From 25f9aca20c1faaf290c76365fe60d0d363745fd6 Mon Sep 17 00:00:00 2001 From: Xi Yin Date: Thu, 14 May 2020 12:51:10 -0700 Subject: [PATCH] init release --- .gitignore | 138 ++++ .gitmodules | 6 + CODE_OF_CONDUCT.md | 9 + DOWNLOAD.md | 23 + INSTALL.md | 38 + LICENSE | 21 + MODEL_ZOO.md | 264 +++++++ README.md | 49 ++ SECURITY.md | 41 + coco_caption | 1 + docs/oscar.PNG | Bin 0 -> 339659 bytes docs/oscar_logo.png | Bin 0 -> 121006 bytes oscar/__init__.py | 1 + oscar/modeling/__init__.py | 1 + oscar/modeling/modeling_bert.py | 711 +++++++++++++++++ oscar/modeling/modeling_utils.py | 671 ++++++++++++++++ oscar/run_captioning.py | 882 +++++++++++++++++++++ oscar/run_gqa.py | 1084 ++++++++++++++++++++++++++ oscar/run_nlvr.py | 925 ++++++++++++++++++++++ oscar/run_retrieval.py | 623 +++++++++++++++ oscar/run_vqa.py | 1222 ++++++++++++++++++++++++++++++ oscar/utils/__init__.py | 1 + oscar/utils/caption_evaluate.py | 293 +++++++ oscar/utils/cbs.py | 852 +++++++++++++++++++++ oscar/utils/logger.py | 102 +++ oscar/utils/misc.py | 46 ++ oscar/utils/task_utils.py | 442 +++++++++++ oscar/utils/tsv_file.py | 85 +++ oscar/utils/tsv_file_ops.py | 18 + requirements.txt | 8 + setup.py | 47 ++ transformers | 1 + 32 files changed, 8605 insertions(+) create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 CODE_OF_CONDUCT.md create mode 100644 DOWNLOAD.md create mode 100644 INSTALL.md create mode 100644 LICENSE create mode 100644 MODEL_ZOO.md create mode 100644 README.md create mode 100644 SECURITY.md create mode 160000 coco_caption create mode 100644 docs/oscar.PNG create mode 100644 docs/oscar_logo.png create mode 100644 oscar/__init__.py create mode 100644 oscar/modeling/__init__.py create mode 100644 oscar/modeling/modeling_bert.py create mode 100644 oscar/modeling/modeling_utils.py create mode 100644 oscar/run_captioning.py create mode 100644 oscar/run_gqa.py create mode 100644 oscar/run_nlvr.py create mode 100644 oscar/run_retrieval.py create mode 100644 oscar/run_vqa.py create mode 100644 oscar/utils/__init__.py create mode 100644 oscar/utils/caption_evaluate.py create mode 100644 oscar/utils/cbs.py create mode 100644 oscar/utils/logger.py create mode 100644 oscar/utils/misc.py create mode 100644 oscar/utils/task_utils.py create mode 100644 oscar/utils/tsv_file.py create mode 100644 oscar/utils/tsv_file_ops.py create mode 100644 requirements.txt create mode 100644 setup.py create mode 160000 transformers diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c9ecf2d --- /dev/null +++ b/.gitignore @@ -0,0 +1,138 @@ +# Initially taken from Github's Python gitignore file + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode +.vscode + +# TF code +tensorflow_code + +# Models +models +proc_data + +# examples +runs +examples/runs + +# pyCharm +.idea/ + +# local folders +data +models +output diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..bef2b03 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "transformers"] + path = transformers + url = git@github.com:huggingface/transformers.git +[submodule "coco_caption"] + path = coco_caption + url = git@github.com:LuoweiZhou/coco-caption.git diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f9ba8cf --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/DOWNLOAD.md b/DOWNLOAD.md new file mode 100644 index 0000000..6e03798 --- /dev/null +++ b/DOWNLOAD.md @@ -0,0 +1,23 @@ +# Download +## Datasets +We provide the extracted image region features, object tags, and the original text annotations for each downstream tasks. +```bash +wget https://biglmdiag.blob.core.windows.net/oscar/datasets/$TASK_NAME.zip +unzip $TASK_NAME.zip -d $DATA_DIR +``` +`TASK_NAME` could be `coco_caption`, `coco_ir`, `vqa`, `GQA`, `nlvr2`. + +## Pre-trained Models +We provide pre-trained *Oscar* models of Bert-base and Bert-large structures, with the name starting with `base` and `large`, respectively. +```bash +wget https://biglmdiag.blob.core.windows.net/oscar/pretrained_models/$MODEL_NAME.zip +unzip $MODEL_NAME.zip -d $MODEL_DIR +``` +`MODEL_NAME` could be `base-vg-labels`, `large-vg-labels`, `base-oid-labels`, `base-no-labels`. + +The models are trained with both image region features and object tags. The image region features are extracted by the Faster R-CNN with +ResNet-101, using object and attribute annotations from [Visual Genome](http://visualgenome.org/). +The object tags are from: + 1) the same VisualGenome model, named as `-vg-labels`. Or, + 2) the model trained on object annotations from [Open Images V5](https://storage.googleapis.com/openimages/web/index.html). named as `-oid-labels`. Or, + 3) no object tags provied, serving as baseline, named as `-no-labels`. diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..a003505 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,38 @@ +## Installation +### Requirements +- Python 3.7 +- Pytorch 1.2 +- torchvision 0.4.0 +- cuda 10.0 + +### Setup with Conda +```bash +# create a new environment +conda create --name oscar python=3.7 +conda activate oscar + +# install pytorch1.2 +conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch + +export INSTALL_DIR=$PWD + +# install apex +cd $INSTALL_DIR +git clone https://github.com/NVIDIA/apex.git +cd apex +python setup.py install --cuda_ext --cpp_ext + +# install oscar +cd $INSTALL_DIR +git clone --recursive git@github.com:xjli/Oscar.git +cd Oscar/coco_caption +./get_stanford_models.sh +cd .. +python setup.py build develop + +# install requirements +pip install -r requirements.txt + +unset INSTALL_DIR +``` + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9e841e7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/MODEL_ZOO.md b/MODEL_ZOO.md new file mode 100644 index 0000000..2ab19ac --- /dev/null +++ b/MODEL_ZOO.md @@ -0,0 +1,264 @@ +## Table of Contents +- VQA +- GQA +- NLVR2 +- Image/Text Retrieval +- Image Captioning on COCO + + +## Performance +Task | t2i | t2i | i2t | i2t | IC | IC | IC | IC | NoCaps | NoCaps | VQA | NLVR2 | +--------|-----|-----|-----|-----|-----|-----|------|------|--------|--------|----------|---------| +Metric | R@1 | R@5 | R@1 | R@5 | B@4 | M | C | S | C | S | test-std | test-P | +SoTA_S |39.2 | 68.0|56.6 | 84.5|38.9 |29.2 |129.8 | 22.4 | 61.5 | 9.2 | 70.90 | 53.50 | +SoTA_B |48.4 | 76.7|63.3 | 87.0|39.5 |29.3 |129.3 | 23.2 | 73.1 | 11.2 | 72.54 | 78.87 | +SoTA_L |51.7 | 78.4|66.6 | 89.4| - | - | - | - | - | - | 73.40 | 79.50 | +----- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +Oscar_B |54.0 | 80.8|70.0 | 91.1|40.5 |29.7 |137.6 | 22.8 | 78.8 | 11.7 | 73.44 | 78.44 | +Oscar_L |57.5 | 82.8|73.5 | 92.2|41.7 |30.6 |140.0 | 24.5 | 80.9 | 11.3 | 73.82 | 80.37 | +gain | 5.8 | 4.4| 6.9 | 2.8| 2.2 | 1.3 | 10.7 | 1.3 | 7.8 | 0.5 | 0.42 | 0.87 | + +t2i: text-to-image retrieval; i2t: image-to-text retrieval; IC: image captioning on COCO. + +For reference, we also release the training logs and output. + + +## VQA +Script to finetune for Oscar base model. +Base model is trained on train split and evaluated on the val split. Good for later comparison. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/base/base_9m_ep107_1192k_eu1/application_1575931286052_40649/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/base/base_9m_ep107_1192k_eu1/application_1575931286052_40649/results/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/base/results.txt). +```bash +python oscar/run_vqa.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 50 --data_label_type mask --img_feature_type faster_r-cnn --data_dir datasets/vqa/2k + --model_type bert --model_name_or_path pretrained_models/base-vg-labels/ep_107_1192087 + --task_name vqa_text --do_train --do_lower_case --max_seq_length 128 --per_gpu_eval_batch_size + 256 --per_gpu_train_batch_size 32 --learning_rate 5e-05 --num_train_epochs 25 + --output_dir results --label_file datasets/vqa/cache/trainval_ans2label.pkl + --save_epoch 1 --seed 88 --evaluate_during_training --logging_steps 4000 --drop_out + 0.3 --weight_decay 0.05 --warmup_steps 0 --loss_type bce --img_feat_format pt + --classifier linear --cls_hidden_scale 3 --txt_data_dir datasets/vqa/2k +``` + +Script to finetune for Oscar large model. +Large model is trained on train+val split and evaluated on the val split, for reproduce the paper's best result. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/large/ab128_img_large_rr1_ep20_590k_tv_done_good/exp_ab128_img_large_rr1_ep20_590k_tv_0.00003_128_50_dp_0.3_wd_0.05_bce_3linear_s88_abcd/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/large/ab128_img_large_rr1_ep20_590k_tv_done_good/exp_ab128_img_large_rr1_ep20_590k_tv_0.00003_128_50_dp_0.3_wd_0.05_bce_3linear_s88_abcd/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/large/results.txt). +```bash +python oscar/run_vqa.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 50 --data_label_type mask --img_feature_type faster_r-cnn --data_dir datasets/vqa/2k + --model_type bert --model_name_or_path pretrained_models/large-vg-labels/ep_20_590000 + --task_name vqa_text --do_train_val --do_lower_case --max_seq_length 128 --per_gpu_eval_batch_size + 256 --per_gpu_train_batch_size 24 --learning_rate 3e-05 --num_train_epochs 25 + --label_file datasets/vqa/cache/trainval_ans2label.pkl --save_epoch 30 + --seed 88 --evaluate_during_training --logging_steps 4000 --drop_out 0.3 --weight_decay + 0.05 --warmup_steps 0 --loss_type bce --save_after_epoch 15 --output_dir results --img_feat_format pt --classifier linear --cls_hidden_scale 3 --txt_data_dir datasets/vqa/2k +``` + + +## GQA +Script to finetune for Oscar base model. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/gqa/base/ab175_base_ep107_1192k_0.4true_taeb_done_25eps_good/exp_ab175_base_ep107_1192k_0.4true_taeb_b_48_0.00005_165_45_dp_0.3_abce/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/gqa/base/ab175_base_ep107_1192k_0.4true_taeb_done_25eps_good/exp_ab175_base_ep107_1192k_0.4true_taeb_b_48_0.00005_165_45_dp_0.3_abce/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/gqa/base/ab165_img45_1568928610179_62515_test_done_good/results.txt). +```bash +python oscar/run_gqa.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 45 --data_dir datasets/GQA/0.4true --model_type bert --model_name_or_path pretrained_models/base-vg-labels/ep_107_1192087 + --task_name gqa --do_lower_case --max_seq_length 165 --per_gpu_eval_batch_size + 256 --per_gpu_train_batch_size 48 --learning_rate 5e-05 --num_train_epochs 5 --output_dir + results --label_file datasets/GQA/questions1.2/trainval_testdev_all_ans2label.pkl + --img_feature_type faster_r-cnn --data_label_type all --train_data_type all --eval_data_type + bal --label2ans_file datasets/GQA/questions1.2/trainval_testdev_all_label2ans.pkl + --loss_type xe --save_epoch 2 --seed 88 --evaluate_during_training --logging_steps + 4000 --drop_out 0.3 --do_train --weight_decay 0.05 --warmup_steps 0 +``` + +## NLVR2 +Script to finetune for Oscar base model. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/base/exp_rvln_base_ep107_1192k_wm1w_b72_0.00003_55_40_dp0.3_3mlp_wm10000_abcf_best/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/base/exp_rvln_base_ep107_1192k_wm1w_b72_0.00003_55_40_dp0.3_3mlp_wm10000_abcf_best/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/base/exp_nlvr_base_11123_testall_b24_0.00003_55_43_dp_0.3_mlp_abcj_best/stdout.txt). +```bash +python oscar/run_nlvr.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 40 --data_dir datasets/nlvr2/ft_corpus --model_type bert --model_name_or_path pretrained_models/base-vg-labels/ep_107_1192087 + --task_name nlvr --do_lower_case --max_seq_length 55 --per_gpu_eval_batch_size + 64 --per_gpu_train_batch_size 72 --learning_rate 3e-05 --num_train_epochs 20 --output_dir + results --img_feature_type faster_r-cnn --data_label_type all --train_data_type + all --eval_data_type all --loss_type xe --save_epoch -1 --seed 88 --evaluate_during_training + --logging_steps -1 --drop_out 0.3 --do_train --weight_decay 0.05 --warmup_steps + 10000 --classifier mlp --cls_hidden_scale 3 --num_choice 2 --use_pair +``` + +Script to finetune for Oscar large model. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/large/large_1583307153868_14140/exp_rvln_large_ep55_1618k_b24_0.00002_seq55_img40_dp0.3_2mlp_wm5000_abcj/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/large/large_1583307153868_14140/exp_rvln_large_ep55_1618k_b24_0.00002_seq55_img40_dp0.3_2mlp_wm5000_abcj/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/large/large_1583307153868_14140/exp_nlvr_large_1583307153868_14140_testall_b24_0.00003_55_43_dp_0.3_mlp_abck/stdout.txt). +```bash +python oscar/run_nlvr.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 40 --data_dir datasets/nlvr2/ft_corpus --model_type bert --model_name_or_path pretrained_models/large-vg-labels/ep_55_1617000 + --task_name nlvr --do_lower_case --max_seq_length 55 --per_gpu_eval_batch_size + 64 --per_gpu_train_batch_size 24 --learning_rate 3e-05 --num_train_epochs 20 --output_dir + results --img_feature_type faster_r-cnn --data_label_type all --train_data_type + all --eval_data_type all --loss_type xe --save_epoch -1 --seed 88 --evaluate_during_training + --logging_steps -1 --drop_out 0.3 --do_train --weight_decay 0.05 --warmup_steps + 5000 --classifier mlp --cls_hidden_scale 2 --num_choice 2 --use_pair +``` + + + +## Image Text Retrieval +Script to finetune for Oscar base model (4 V100 with 16G mem): +```bash +python oscar/retrieval.py \ + --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --num_captions_per_img_val 20 \ + --eval_caption_index_file minival_caption_indexs_top20.pt \ + --per_gpu_train_batch_size 64 \ + --learning_rate 0.00002 \ + --num_train_epochs 20 \ + --weight_decay 0.05 \ + --save_steps 5000 \ + --add_od_labels \ + --od_label_type vg \ + --max_seq_length 70 \ + --output_dir output/ +``` + +Script to finetune for Oscar large model (8 V100 with 32G mem): +```bash +python oscar/run_retrieval.py \ + --model_name_or_path pretrained_models/large-vg-labels/ep_7_816000 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --num_captions_per_img_val 20 \ + --eval_caption_index_file minival_caption_indexs_top20.pt \ + --per_gpu_train_batch_size 16 \ + --learning_rate 0.00001 \ + --num_train_epochs 30 \ + --save_steps 5000 \ + --add_od_labels \ + --od_label_type vg \ + --max_seq_length 70 \ + --output_dir output/ +``` + +Script to inference on COCO 1K test set: +```bash +python oscar/run_retrieval.py \ + --do_test \ + --do_eval \ + --test_split test \ + --num_captions_per_img_val 5 \ + --eval_img_keys_file test_img_keys_1k.tsv \ + --cross_image_eval \ + --per_gpu_eval_batch_size 64 \ + --eval_model_dir your_model_for_evaluation # could be base/large models. +``` + +Script to inference on COCO 5K test set: +```bash +python oscar/run_retrieval.py \ + --do_test \ + --do_eval \ + --test_split test \ + --num_captions_per_img_val 5 \ + --eval_img_keys_file test_img_keys.tsv \ + --cross_image_eval \ + --per_gpu_eval_batch_size 64 \ + --eval_model_dir your_model_for_evaluation # could be base/large models. +``` + + +## Image Captioning on COCO +Script to finetune for Oscar base model (4 V100 with 16G mem): +1) First train with cross-entropy loss: +```bash +python oscar/run_captioning.py \ + --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.00003 \ + --per_gpu_train_batch_size 64 \ + --num_train_epochs 30 \ + --save_steps 5000 \ + --output_dir output/ +``` +2) Finetune with CIDEr optimization: +```bash +python oscar/run_captioning.py \ + --model_name_or_path your_checkpoint_from_cross_entropy \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.000005 \ + --per_gpu_train_batch_size 16 \ + --num_train_epochs 5 \ + --scst \ + --save_steps 2000 \ + --output_dir output/ +``` + +Script to finetune for Oscar large model (8 V100 with 32G mem): +1) First train with cross-entropy loss: +```bash +python oscar/run_captioning.py \ + --model_name_or_path pretrained_models/large-vg-labels/ep_7_816000 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.00001 \ + --per_gpu_train_batch_size 32 \ + --num_train_epochs 30 \ + --save_steps 5000 \ + --output_dir output/ +``` +2) Finetune with CIDEr optimization: +```bash +python oscar/run_captioning.py \ + --model_name_or_path your_checkpoint_from_cross_entropy \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.000005 \ + --per_gpu_train_batch_size 8 \ + --num_train_epochs 5 \ + --scst \ + --save_steps 2000 \ + --output_dir output/ +``` + +Script to inference on COCO test set: +```bash +python oscar/run_captioning.py \ + --do_test \ + --do_eval \ + --test_yaml test.yaml \ + --per_gpu_eval_batch_size 64 \ + --num_beams 5 \ + --max_gen_length 20 \ + --eval_model_dir your_model_for_evaluation # could be bert base/large. +``` diff --git a/README.md b/README.md new file mode 100644 index 0000000..0b91642 --- /dev/null +++ b/README.md @@ -0,0 +1,49 @@ +# Oscar: Object-Semantics Aligned Pre-training for Vision-and-Language Tasks +## Updates +05/15/2020: Released pretrained models, datasets, and code for downstream tasks finetuning. + +## Introduction +This repository contains source code necessary to reproduce the results presented in the paper [Oscar: Object-Semantics Aligned Pre-training for Vision-Language Tasks](https://arxiv.org/abs/2004.06165). +We propose a new cross-modal pre-training method **Oscar** (Object-Semantics Aligned Pre-training). It leverages **object tags** detected in images as anchor points to significantly ease the learning of image-text alignments. We pre-train Oscar on the public corpus of 6.5 million text-image pairs, and fine-tune it on downstream tasks, creating new state-of-the-arts on six well-established vision-language understanding and generation tasks. For more on this project, see the [Microsoft Research Blog post](https://www.microsoft.com/en-us/research/blog). + + + + +## Performance +Task | t2i | t2i | i2t | i2t | IC | IC | IC | IC | NoCaps | NoCaps | VQA | NLVR2 | +--------|-----|-----|-----|-----|-----|-----|------|------|--------|--------|----------|---------| +Metric | R@1 | R@5 | R@1 | R@5 | B@4 | M | C | S | C | S | test-std | test-P | +SoTA_S |39.2 | 68.0|56.6 | 84.5|38.9 |29.2 |129.8 | 22.4 | 61.5 | 9.2 | 70.90 | 53.50 | +SoTA_B |48.4 | 76.7|63.3 | 87.0|39.5 |29.3 |129.3 | 23.2 | 73.1 | 11.2 | 72.54 | 78.87 | +SoTA_L |51.7 | 78.4|66.6 | 89.4| - | - | - | - | - | - | 73.40 | 79.50 | +----- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +Oscar_B |54.0 | 80.8|70.0 | 91.1|40.5 |29.7 |137.6 | 22.8 | 78.8 | 11.7 | 73.44 | 78.36 | +Oscar_L |57.5 | 82.8|73.5 | 92.2|41.7 |30.6 |140.0 | 24.5 | 80.9 | 11.3 | 73.82 | 80.05 | +gain | 5.8 | 4.4| 6.9 | 2.8| 2.2 | 1.3 | 10.7 | 1.3 | 7.8 | 0.5 | 0.42 | 0.55 | + +t2i: text-to-image retrieval; i2t: image-to-text retrieval; IC: image captioning on COCO. + + +## Download +We released pre-trained models and datasets for downstream tasks. Please check [DOWNLOAD.md](DOWNLOAD.md) for details. + +## Installation +Check [INSTALL.md](INSTALL.md) for installation instructions. + +## Model Zoo +Check [MODEL_ZOO.md](MODEL_ZOO.md) for scripts to run each downstream tasks and the expected performance. + +## Citations +Please consider citing this paper if you use the code: +``` +@article{li2020oscar, + title={Oscar: Object-Semantics Aligned Pre-training for Vision-Language Tasks}, + author={Li, Xiujun and Yin, Xi and Li, Chunyuan and Hu, Xiaowei and Zhang, Pengchuan and Zhang, Lei and Wang, Lijuan and Hu, Houdong and Dong, Li and Wei, Furu and Choi, Yejin and Gao, Jianfeng}, + journal={arXiv preprint arXiv:2004.06165}, + year={2020} +} +``` + +## License +Oscar is released under the MIT license. See [LICENSE](LICENSE) for details. + diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..f7b8998 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + \ No newline at end of file diff --git a/coco_caption b/coco_caption new file mode 160000 index 0000000..de6f385 --- /dev/null +++ b/coco_caption @@ -0,0 +1 @@ +Subproject commit de6f385503ac9a4305a1dcdc39c02312f9fa13fc diff --git a/docs/oscar.PNG b/docs/oscar.PNG new file mode 100644 index 0000000000000000000000000000000000000000..76c8757d9c4f816c4a7cd7c28ff9f468cff4df34 GIT binary patch literal 339659 zcmeFYc{p3^8!oJ)t*Z8}qUP?cXw7P-Xsf$u4Q&xq&1#O+5K2ULDXKLTH4AM`C1Os3 zh?E*?9up*nq9_rmAu$B`(%(7zcfNDZzu%wVx~?SGT34>M*89HC`waJeKM4=bjd;!p zpX1=*;4v}2`zT1#FbBtlFTXw~xoNdLk+IY`@S|KcQ(RTHg zL;7FaStxsTKI@OLx_h_d^k3&rzIMCIb?Uj7Hpuxhqn>YuodOZ$X-zUFr zI=2hfn}}Pjsv5T@I1X7o6=Fa5|2XanC$gXXe;$8uO8(#fz17iX{A+ms`|*3Aa@vXi z{Sy1^uIT@N+y9Ry;Z|4B&W}gBLEm4A=|@gp5YzGC7^HAt7XVyllbl-N2(jG`xw!UwD`1-HcOU{7e zW~cuy+~=pO86!6#Lw_|IK(qK+Ma&v)RkbUOUr&2#i6VyBnVCCZ1ztLWEtuCXo zz3JkndzX^7%0AuH335qO-+N<&uLx-%)avkCUyyJLph~dn!-gC|JYAlgyV4vS@5F`X zch{%f$runu!2$i+oEIhs;krNy5g9&DiD<5>D&)GB^3JAL zwM5l=-=<*1HjFllyKN^spc}eg1Fcng=EL+PoLS0o=CzdSwAi$Yg6g2KbN$1`~^PG@jm2hcCFWtG=zta zp;beSLPP`_@7YXpLIqEI)|o255_Os(h;NUD+3<}URzJ6R;|uK({;{IheKz2wDuxy8fhJPbu{dS)D&e7IOK8dEs=_Kb+6 z%H{`?I;dRtBrhQ#q2$OP@@eUk^w34E2G15$B$Hu#eApxIt1?BbaumrmI)F@|A29~) zoS@Aseh%3m(noq~f>&<(?NzgShEgiuP~jV6gQ@tJG+!SWZT45=HowN))2(#1`DN}PU|#|#0jJCF2;?^J{P zm>Mpb(P_6SO1rlSp2dQ`=W#ry}o!moY5d% zS~>Z)O^YXfz%FmZ^G1o^P^AR5v?YXe+JfGvQ#6!jmp;BmjhEY)-jxHc@?%P^990!Le?|7Mq zS9J~1T4|eP`zA?N{O%qn+h5&g)z-O3aNf1cZPMabtWwyFXwbfbe35H`(T2LYct(874xo-q3GJWk85^wu-7(Z9 z_Z3nQ>ZlKf@o~$k(wZpf(AH8P@cY+Pjffp@hnDAo#hAbj_FKJ;XixHhVPMDGf6-{Y zjHz!;XtoEvn4>kW0dhXV>g8dOOH&LOE386L@m^hsG~r&I=;FM-11OrNchu}Nhg75{iK};qbj4ELY;0>&HiV!_^r~BMWIh@`RbUF!?q^P zSUG*hnpR0A9(vv>wR3xA1(VYg!N_OWeQob(|V3Op~9JiLPpRU&9 zB(SP2@We?p<#~pnq@7(UM!r4NHPbr(#1$NuX4;IKxnL#V#=NuKqjSM4gLERD zD%Zjuw$KpF{eey8Z(XadD^eB8X?lu(h(SIkdL#s9_P$;Wv9YX{L1$DZt(}y>xdnYqR((g6W*(Fz3YDyE>@HvZzh^VJ#2Mc zj}PBkBt{X_+%WMv5zmokhe!n2cdyuMuy;hrBGMDyd>D{UTz?Sp@fIH_x7bYW>gr0W zB-wxVy&_|bjOliip(M&6AFa&!e?0`oI?1dYO5TnT*o{z{HD)}Dcfnd96)Z@?PCEj$?UFNN?a)hDcESh_Koy*8PAeM`x+rD}r zefFEM)clVTss_xMmoj=tJ=9uSjfB*>k|$B|q(iY-i{G1XlC(=-9I4QzMJ1=cwZ)|O z7PTDq9QDWAU17BZzxfjTVf#q=oL9WUtEInf@R32_h>Wwu+F+%;nh1-8#Z-BjcH>g! zv&Sg`vD5LjYFTwoO)cvJeSIQ)?bgi&-oqrFFmeBF^wMRU^r0cK+fZwv*f^#gc@n35 z^;7lJNB211Mt!0xd0)j2==-zsj`rd}7iFgynZ8&wa%~Cd7hZC-n^%bJQH^qj=co45 zs?}N_7nWy~G_=2SczU0e-eWfZt^sN}sbjH1>ElrJKN_D?oL(Ruj0*Qm(;Z=z_%{z? z^uq$p`*|}`x31@C3C0z!C|X=KST~GEXC$OnUb|H7h}E#g>-3N(flJkkX>CKCVbCLsQJNy`iQcd}9dk@eHj`pc zL}oU?z?nW~0WvW;PbSp64?Ga`bklnVzEzk}EdH;BalDhd+{1ANIowCbVZCg>^d9?q z@UI`1iI9GjTLRgBs_o5Q+IvA}L@NII{BMZuA%o6#-M)b5A1esu+vQ*pJ>BfzxZtm} z>IS8c9*@bcbQWEM2V~6o@@I#=&b=~xx_v`RY<;9<``E)G#9s8l*a`BDvU*(j(W+Cn ztIkltUZur}`gc`LUWZ*fP(AH}!OfHl<^ldN zE;y^@`@pT`LR*3hg<3I;={jrcTVB@t%InhjvIYvCd!$Z5>*z#rMC zIEf@0L9vI|fa1gq1Wclza{5zW($;%ax2mlh2Wz|6z!9Ii38n7kFen2E0X|p1oQz(U#7_67&iQ-N4Eti+%Fg6%Rvm@P1K;fZA ze;f+Zt@@?VGfaI|qH-|TG!OFgTa?2j%J&$ZDTwn&9?)xO&dpkyH_N>xb4W5ru(?(} zwC8;gbALr*J%;lLsREasvR2Zp5l|qJ-ZoKAs>UAlERZxy7QeL(Xy7bl{AL;|94)U* zv+6l#nK}Xint3&D?#iLTi`mNeeXm*`?!b}+)rr$Ju7+;~Nz#s8Z)^5eR(kH`O(Wna z@2v{QC1=&r`@4iXPXko#pTWelu7U!9%i8l24eNHNkW>s|Vf*_y1WCkD{l7U(u zL-=q6HH#YSHZFbkZWj`&fciis*>00~?WB}$n(XhnZ|_CQ%bT66F1~jIUZiYYT~}Wp zvy~OJr4ebGDoE3#03Q$8?!*3YI=NHJcd+x>=?sbCB1w`=$f{nJYcSt4Qd4V3~DfXzL!)X>vBlgn4?1iWga-e zbav>=oB@tjtQV8-{=0Ynsryfg&mSR#2i|tT%59uE%m6HAwX)x}?pth>k5daJ7ANiQ zIaB~Fx|dWpQe;HlSseq?*9dLgM;*#3l_KtD1?L>B5tre|#YbAfmLK@c4*&;ik65vJP(uTe-~q=1s!9n|}FK0^f4ukRPy z98}Zou(;un{nOy!a9)T$s3bePirFC@a9`O>+|Fw;@22%uOKWp8-M&U8+xr2bn6P;x zP0UG`q|fU(Q*{~6XoMPofp1~+KKd?x@#8#p*DIe}N7N*k+1E>b9f}sMA;cKmAB_~Q z9uq$r^z>Y*{EL%gQHB7A#kDBtnI#iEd91o z@_PMKcl#;IM1jD?d)ddnyF`Ci_pC~ugKx@3Mka|KuO~hjI{1%bETppmRp<)GQqsoc zq=L%E0rT?BNxSfCHxccV?;Mr2;EylDlJ=+Rw)tyd`>O!a2;0SUz#4Ex&MZs+wX zO<2tz+rh2pQ!lTK2DK!5^roL;0!XhUDy9g5tFoqLll()P4aZ|yoPpOP`^#=5>schS zrYiWEv0`QT$r+G3_txXM8-%5r8eWN6zc)sx8k=epH1%MvcTS69P!kw3LU2hai~DyiCR15nSa z{qXtwKgUGPm6fDJQow-j@p)!g67PD6oJA6Ek?qkyL4d);g(W9LA6wO$O3ZRnUus zYqhj-8gsX*<^)~*dU(O%krYMksgHNQm{E#2TA|*aBx#ZYI}T8S&b8U&mPJnW;ZsfN z{wryJ0#@4oYHfL#ZQj%^%nRqc{OTgpty9iUGAB7B!T)f=;?gx%w=fFhqw0BDE{4BR zh9QrMk}!DkdJ8tCW8ol>D8|><(aC(#vS?B7pug974~+2J-E-Esc29 zCn)1SO}u<9{r2jSlXAGG_C8 z%u1dfy=@NlTu}MY{Ivol0{2QI-B?KfxozIuSj*sRkH8t%Q-; z1!=1b{zC5bbCLC|J6KAmFJOhDL9F+(pp2EJ(5Yn2OlMr>;%=dBlFdeK@IrFR>KaKS z%J6pjw-1ICl$-+Helw`KL?_ODtIyyKrzq>Zw?`s1atwMYgle13h~OEdRvPxK~{u_O_*77W*r=8xUX#d zCG#reI8))xdN(US59ZVfBMF|8CJ}A-wCUTgfFfG~C%!nvQY0`HpcarbYUA=a|B_Lm zKHg!)lTM|wX8l&Y083jTHhITCVDfmd}SLJ&#Jb!Vr&rq?s^DK^l#T=eRtV!PdW znjU#_g$BUL$i`2{+qC*g5|`gu>Q8ADmx2mjt+qhcV|{7lEPP1KTohBGI)KhZ#zO5W zcG%jFVU;1H?u@=-?Y7hJ7k`YH5%F;b2@;l-t*v{@@eDJpB7;f8bAqks85;*h1Sr!7 zk#Fm45k45@uq^W1i7WR%A5%c-2V2K18%XK?Pd5xkz6QC}S#7(b zvgqHFW=E_d%>{o+J15*T4)Y-(g?^QF-^FZ%;g>Ey!$|G?aSleSGiTVA^DOdHdv|pv zdkF~c%QwpNAAEW+SEIjebE)~p)C-s2e8~#uAXBHiMsvrw?ke!c*8Na2E*)eDl zaR1e6q-ZKLMF2OG<7^>$uLHIJ-H5kLela0FF;2a-srsH~e7f?V^e2`M zY+xjCpUC)y0&37mq?2p;x^urJKKK&Pi}4vNojTm#25v5NYJ_b($y4)5;}ISo%WVyM zWBp2(2BRhr$F=}pF6Mrhoi+~5oZ0IyBesHI?1VCZAX`OpGR~3=N*2>49x$lBh4|-| z?dlsfpfdmS?L5-oLuqJ~-^AHX5yG0ft^%#ml3E&;%Q-*DhFT`;9(AuiH!F=5?vb5! zCBXIwI-&h4*Xgu5gW<2KlE9ucDK-p$!C|i2vt8hpAeoQuaHiWVTlvqg5@HAilMLtk zLwlRuHU#cG?+>H09bt8VQr~Y+wDiJ zU&eKvrq=i=Wy#xl@G%el>2uI_#aFuerEQc@d8S?J^j-k}vz$HEYozNT0f1b9Bs2#|jm zYLx6>`>60(a>LfdS#7UoWJp7nSoo>Xxz3|CX@gNj*dHRCNgjB#ub#Hr0%dC0&hg&q zx8(=b*{ntJe=j;POY##KlyIlweO;=jH&>#&oe$#Z&J8-BY^vtYw<~oSm%6`JP3>vP z@qRqk+anhNP_jMNJnGi3ni!8r%FDDlej}hzvx&#lS@^}iW5J>tN@@>E8nx~$Qcb|? zF*IHB0@u)r?lK`!(E!a?Zn;yidypvPSIpn;WdpOiI<^);cqbPfO zC@)V;ngaXlv<;Eqv0Mhtvc;D&jdw1y<{-*Jxgq187OhsUiGGb+?s1o4c z0H=S-Nc6=4H9oi=${O=mLE55zjH)U}d*3lZ++B^ux6W5{T|%x&S$@q{IaRqicHAqj z;^;hAO8aWZ^Wb~2(zG#PGZTYk{!DEv&Ozsozx!e*w+X1J@W2p`)w9Qky>eYf0(G1MWxcO5S?kd&`t^g3&6C3) z3}Z};&0cyxHckKQ_1N)|-$ej7!evGOxRx%gc1;=VBXR|}SWfZoR9wnB29TEfg@8%o z$FSL^6;8QgAG(r3HF^-_7q%qEHBGi1{ z!g|fQ$i7zZZOFrb00-GDqdcSWZS?j{!hewD%gR?2Q$mvair6scIUBIseEU8T)h1bx zy6S)sR>F2I3*BZR=lu77dk%ZkFXBZCv zowjPyrL`2lW$P16f9xYfD2mE|WKtRvUV9k4|H!^~)L1|x!D*f(xQ@_(`**zhqfKvN zEvwh$aZZn@vU`^|b9sM{i&}}$uL-1Rd^hu!a3l|i?;7B%D%2^HD#|7m$%rU9CC8b^ zQvsv~(m_;End8paJOI)j454i=zxZHApITackpW*cx@3C4;`qn=y}nexF=J`i=f$5M1G#p@hS!*AOBJ%i zS?Fr76T`GXWkD8#HDkS3ngHvxS~sx0=nJR@z8;fn_r7l(1vnKG_+rlK{#wj9Z>1xa zbT6#hPqSO-8Ovubv3KhAFbc%EYhFY0f^=Io4$OkaRF%)kiTlnS)ewkEg?s5UVqQC1 zLry^LYeQGQU+Xn4J-#x27cj2_DRu4pe7CeVrixN@b2dwlG>dP3e5`HfMKY^q(;tgn zXTlfIW;T%-n!UV^!E4_<89!o=dmr1JUJjr?je_7ogBGFgvnty2Hc?Hr^n|4DaaK9Z z2KMnM7_Q&|nEV#U8((?ZCMwCJ!Z}X{mR$lsUOlHG6i+>Lt4x(Ru!?-t4#QtQBFF|6)*gZgW*3=m0uYnhKy) z>}}!q>D9oFj)dFrM>mhQY~a?m>72(dDIFQC42Jn`PvEYhcgrcbkLFFsq2>!h0*STi zjE6ZD;-8NFNe$x%6R>OZ`nVg76Em0hY_C0#%o%M3u7z=t%>gyrG=$J)+)iSNPPO0u z?l=j>UFkK$9=hd4%RYy(Px7S3D7jt(UCI>6TP>@qk6IpL z+?ob_T$v0u!*Y6ROh~WK=?%`NFzfoLfHL>FZ{pS>?;D%Z4*~&Dr<)s`Sk*yO0m$(_ z*y>pe0{d_N!_m$CAGdQlor}C|n~ojaajLVa(_I5!=)ZA%+STsa&NmMP%|gS&(cgLv za~>d;ZeqfBB4VW4gwJHtI$v$|y-D>*OfES+Q0~@@zgd_3P+rLJW^TJxD$5(5j0&J%TD)#skWoBII%oWoo*X1RSkXo|mGi++; z-a`d1w2k?&vbqL}kS>@ftREXy^4VC!-*g{k;C@Md4HJRT+2U6mIAa)_^LCdY^BT*i#{J6R|mB<44_p`iq86k z)`~6I{z~D_Th!$|cPpJt6GWX_nixs5ZEdeBy7EnBZ<)eLOZNr6gB*}4;BF6$o{IX=k+@bL0MzxW$mf-N~JJ>22zW7AQji1+9{1PZ` zMR5pmw_fl_Az60TgNBy4EV{V%&JZuh;+H`-7S&uTZ3o=UwmSVvx|znUnpO@a)k%@> zX~Je@m!W_iMka|EbXySS(8sdJaHU7Ly7y&TcMA6~HXMQYWu&LHb;BFy1EQyrqwtQ3 zj$$p)@}?TV78l{!vZ_F|XqgHyeez_t`iBSacc}Onolr^sIp3DWIbOVm}LvSVw=C6kg-vwnb8RA@FH(q4JN8`Umr^h@ z+6N9SXH38gRQO_ZV{pA9j2x%nSfD9>q3Y4AR8#0yD^&MR9}7hzamua5<$Qg)Q6@Em z=*(iJ^PJ@S&>!u(3Ap?^tKT`DGco`ZzI<$1a7YyBTtmvShM#v^&39>5Lh&cU!Jj2g zt(L8IB!jo7ycHb>hUAm`!Zed`jPw z+zhWgO(m2$Xy*5@0OC`SJ*#QoN57>Rp|=K$qEZ;a{w*c^GipRY`2P2oxG&Ob&|yUw zIvKj>Du&pFGQSDk9=coQNJ2H@W+?HHFAhW*;)U{jYyGI!A7PGOD}(+bp^bKQtpC! zPrZmU=}`d}_0Gc)YQV{w(dNi+77uM2g9)p~`%>F&~a;gGh_B7eP^?4K<+*}{k%$=IHC6}Nh z$Y^zw%RP)PQn5C1AA1i_Hkikl?8MmP^exYcRci%x1&bhIi<}GX)uf2K1wMB+$(UL^ z?z_k{xoL0psC_-!W6r zQcnh_cRfjXx<1^bOWZ-PECL?M7T&A7QkCFdQTbrKL?U821L7J;U|Gv#4N-lR9M@BL zt5MB3=`*{plDT>-ewSot?|tmeO{eLeUbOZ4jr)z^p+>C^Ex>QQ2<8&Ri%?C@Z?0DY zU`okZDa_?#qb;jI*;9(|Z044u(?yqO}-ILBTMt^FKHGxJpVY z6Fqcn$dmZR>h%n8t=>dKaLtzeBRXKkw;vM_=nSDgrg00|*^rrVqtm-@NHK;EXQwD- z;qwq@@t^p;g`%37Yzsv1nr+gU$3@F*9j9G#@bOV5bLoU$-=|8)@PkH`!U8iZMSGyV z`~JtZ3;)LUTXrvgf#zq;E(FZB3`adb$UhX^?jA7~04es?SxQs^W8!(ZrAseiK0cgQ z^HEvOnxLvFC8{D&^+`=}MJ@}jMz2!W>okF17fSxljSgye_QPv7DzREB$EX|CX9csp zM~c6y2=xrYh`GBPBBqHxHx%T}bu7}M_v@vP)jlAs#n0tM>IHp&3oJAkZw^^?!$%!7 zT5nZnfs@%%FIwZ?QgIlH{)zPuX6t_TAf`?w@wunDl_I&7?|r5($jk6C-VRU7hqR zH{4RbLkrFwv88RpD%TvAEM4hHtzLFo8q7y=FX^V7*>!c58{hk(O1KTA<~p z>r2>jyA=ujmWkztaD5LEW#^SCzCsjAaYkk`*Ki*Z^(j&PPeilS*gtv015FzMk$oiI z%!mE%G9t?pHnHUi50~=VttMp!GhjY_XE{a%>DSo?KCb@>XN!-jf*#%7Bs<7fD#%5U zU@breKeX8ImFRb4nMehqaVgrEntl*Qr{5s<<-;chuo{m?n+_t!O2KE?HI4HM_UA4Q z?vn)iiQ#%Th=$o9y$p`Vr=+WAMH>?CV+I2!9+DBqj!>v8)iXP_^2D?kX?Nb=KCUiMR zt#y;qI5htS3mnzlT`tT5SIq2%)_>!cP&^!c47hir)2HtpXc41@`yu6L;AZtuptt_x z@V@;LVov5b^auYsO+5wcKCV3XZ{dgtI}TG!=c%WJ@iFSKKdT&is-QTN8lBs&p0W~V7rgJd z5NoOz%-mT1F7-h&=xG)9!eb^-}cT7p!6-j00v_#HcvUP4RTm0D1}XwwJX zZ-d*xUFkeX%Uv$?fy-bAyq!x9%id)#2h(c<4kyhSTYt|Al$Eu*;3hgix~1-#Z2p+$ zxQhJvY*i$Xy!q;G@M0~+3R@0%<50_&) z>bMNSp#u#9p+-)ZRc5wYlk6Imm4ExxA4<*ko4fe~dG2wddG+XLoeC#0txFwk1O*8K zE2FR5ZU%Owt(K?^-|XRWHfy2a$op4 zAA|nr1s=KzHE&W&TZPFnq292c$p%L`(i)K2mcO>=KKNBAWw{=PC?y)SO!ou2x}KeL z?!Kh#;kf%6YK?#GxWKaLD%_3wtqXSW*DY7+J0FzmR~pc;PB^Ucpk8m^k<|kEOKn#0 zrk4Vf!E2iTcS;ZMWHfpx!qtW?>#{8UUU}umM$_oS|Ll&LZz=1rt@eSsu>xO@^q>Jy zPlsLjM=x=_K75mneK&fT3xKl-5S1vAv6BZd#%HE8c?ii#idG;;M|>k`ci!G2 z=e*BnP-_wl^d0@l^x^n9hrCcj;AV1Uy?r21@ZT8pd6VOpypAX0Lo1Z&DRQv%Wm9PZ zRP8qwBop8JnymySjb0dnOz5dA3;O7$FG$l~N}p8QtnpqC3wlT zqkxqMGsmHNJwG%M``ddigfit`wrXq9V=z|})O=)1Z$5OdKlZE+E0oC65TXh2`uqU; z*|k-Dn(6{{R?#LfGmH9f@~rejt!v(}$m7xyi|6>CKE0|N;$1j!xbLGh{XYPDhb$=( z^j7#{Wg)dN+_fXbQbA-nXXRZtMIfc_tVM@Klwh^PI&xX^)S`{slc0?e1r4Y_FJ$+{ z1H(au`xp{gq5CwV@Wich)9aG5;6@^ncNC|2ajcTbRS!lrK+g+?B zW788;`;CAF?Gv_MKk?CLW56rf>4yXoePF7f=9*;cLFV1DzFIk0r1aukb1MiEmt{#k@HGSO z{hSYUfCc-%K~mYm5wc zA#!!SJ%fIA-X>NAVb}eFv=tJb>MZ)`7%Bcxcd#a8{k1b#TWxm28l~98hGYw3Jgz$I z`9%ozs4eG>Y+e5~qs#URG1RN+D*We`$Ol5mh3-gAyq{!?GK1#mp&-6(gH^Q8*g!lr zK&^Rg*I`Zrqqp=_6q`3P*M%6JP?m`By=+;%y;hX3u9k8~#Ra*3_a1|Pt??iC6aUj1 ztFn&knNe<82TF4m5&b&-b^NY?@?z|aCkFm!4&6xd_jEvOEATUKWD)K{m zkA6Hc5#42i)B*hR!7rhhvRreyaz;ZplX8%xb<&5l25#Dj`aZIvxC{@DQz`K zTJW`{Nr&3R9xlq^rUx}P`sOZrkw0IJ z;U-lgpX4#-aMe_{l`K0+M0`?!N4_f^2zDUjx7%N>cI9e#lNxSr=NJFXRB-UtbBGE6 z89*%))(Ae)8l8oxf^@ZyY0x;`gOk6vRT)r#|nMDD3 z{V7U&t%%KTJf8P<=qqDORp)#(uT9i^?~?2h>bX=<)dbnq5t%rWgRE9`Ag`rcKi?Cs z_o%+?<*W|@e%1ba)gDq}Qtfi)&ywr4Gp0o2%nWep%;WkTvuw5Nw$=^G^~qZ<*1bu0 z*^wHAYFMfbTxf-2{U2fX0=<_s@wIGi(+biSaOpm)s*_5cZQ1T)m-DNltS^hRn;Gk9 z5?&g$mTNfgyEH<^`LVwPn;h!okUhdZR>v5EZ-Awn`^%4LqE)usLDcUUAfpx$?RXqY z`S##+bJbS;t(NL)&zU%0-Y$W^2`@ey6BUmVJ;v;BuEs(tX10!%+SdNL@l1*jJ!M&_pIN&@npZYpQiub=7m;1j|$;Ki(f9ctl`$RZEzB>3ADos;(^D}me@3@L; z2YuCG>|)xrhYGxJ-n2bq%%|+i_nb}B$ptNR8Ks}-Gg4y!a(e61#Si*DSE}B=PdIP3 z%&(kwgm^Ka*ws)nl$@8k9w$r$jTXDf6eO=-2GQjT3t&#E&BGhvP0fT#yRli&Xs>;1 zaQv=aW2)&7klAUUlGj{((x=H5VA?T4J)EV0IGDy$L4gy$jroL9c*m%3vkX46s~hd5 zhR#xRF<&r^`QUi7Odo^;EQwZc*;jg^7{xLeqX?V;&v+!9)yv#XPf4AI%PFQ{ju&#OGSQuZjTUey!q9U zSH}=GMt=6#QrzYt+~g&DF{eZFsqSunTuJ-nC_88@&5y#Bv=6TVwsmj?cq&P%+CHUc zpWJz!or9sR3LBrcaWKC{4K)H}`2(!R_O9zfkVzZFY5j0|Vd&!@`|~>V7o{4(M_uo! z*3F`9j&_hu7=^EO?fF?X67;>va+kNS7ar|tzvE4CxKVnCl=i4pPN;Gr=~iu^H=X9c zK3V&sO(D=f?bTM`(CCFw=h^+GA$FWHO!5GeIHeDj7ws44 ziy8!cufEA(^_}TuX&}ZvfKthM73Li&NXsIFLrfF0-!V|}YvJ#bb~g~G%ZgIhzqR2s zP~p+3l`j!v*B#H`Z~jE{nA(QhgkyqNJoCnxf__%iK{s^qFU1N>AaMk)GVozeJ-`_s zRl-BriGd#Wib?rnThVqR8m`j+qC)@ho&1lIW#hag;-L*8S2hr4fMoGf_FUjnzgR(Q`1<(r} zm=JH$h@_W|Ibn`P7HsNeC?RlUL`+;PCFwtzBo317#3?%5dh@ACms;4i2fbE#N{&LYv2Gt*Lw#Jhk%M zYo#&+X32m2jIg)UdQ^!ISsqa#gvf-<6w_?S0xE=^@`fzaSQY z`4DzRan{hogP#%LAbv`n3;4y(8YrXwyjbq#2?4Vkx7Wmfu)!{{&&b!=h;1_nvW7r6 zht5XG&s~a8;<)8m#3n zymq`K7?o{a=eKs~NpXXI8r?WZVCQMtyHheqEP1b1Z@rJXh$%)qw*<4{tGhKNXwX{d zH6;KeMKjGhqNRYIh@yOna7ugjIYM%h!8&PGoM2#!+G4tGX-4EnA?p z8Y$P#JU`o%NAA&q@5$n0MqrH*W5DXBMWs!l{tKU*m631Ky4+=1+QG};lgvT}PHt8m zdUgr-oO4Jub%&w?dtM?#fceAYRlVGjfXrP2YGkbR%%U=WmSQ_YK6u==wN#G`>#Ih# zF)c`)Y`0V4oi*VPdOSua%v#Op_^dN)m!zs+bSEIVL zj)-#bDoI^=K2bzdQ907*!5yKH5TMyhU+_OjhY9lgv#gy#1(LD!B@1XN*A+9O_Z>4i z0dLA9uhCVuvPtl=c=2RzQ|x8s0vAXSXt`62io)V0JBRm#`(U~bUMHl>#0LbH-*nbxJixYd*Dv5CriN{w3$I+ON;m=rK86*ARInZu zT-v+yWwCIw=Bnx4JNp=D2$XiX<#Tn6brJI_#JTx5ip7^fXYJ(dR;VoHAZE%lE^i5nzT?CYnGJc_yyf)b&M3I|D7Lh7AJ)atxz#^o068y zz;|^HHVO#S5;fAq>IezW;?GMH2Clb-F2`RGtNF!*5Msv)0{q9^==yhQdM8RNBI=UjO;L>g#Zq#sY>La{F)kbt0 z-(jz}krP(DT~L=96C~}WRtt8SllO~^!LN070+2JWT-@GkT`F@Fi6fJ3^4H+765LEF ztU%kO+k5n{8MeAi=b}yT!LBaDZ+D|@?Od+bNJpZygPO`mt{a^u6M*NCP&3Tg3Ff|N zz*XHKk3wX@B)K`n$B(D5WoA~qLBA2*{!`|~^vfhuo!l$yWK0nbHc^QCsZ^E8nu9KH$y7)N5_AbrrT_PM0{EUaqwLL+3@rx6hB6n=cHO z<5R(rO2|Vrtv?1-KvQ}at$v9HbvuF8(Y&j$>06M^HvpV8+P#${sX=%u(fz-`g zfx;>uvj{j^wNvBDlnC%%7w`)g$H=YS$Q$==98K4{=OuyYcV18xGy|SUR3_)*kxPEW z;I*Ab4YifSg(Q#7IJMO~kmB3q&;F9Zm0euErw9_vqn4-uHkueQ-ZY@g7MJx4DTlO> zjC)Ys2l)07C~nZwxtbrQqxqbq_?6qHS$0UiZ2RvMl2c%7q$57+Hlzx@_pNP4qikS% z_MD4~lz-r=i0&ncrS9~oD67cNQNnOlr=UQk`}b201e1&|ah8~^T*oy7PHgX;8FHZ9 zHjECTOn)4{Uu%iSDzT=6*nKPdl|o}z2j6CaS4A|QMFq*FtOY{@C1=0c_5>W&0Eeq( zVKMF(dF&=|uJE%w=Moc|2Vb1R`U5B{3jier-!~7ATB9CKGy;3-W%a!kx(9ySK&EBl zzi)_y!H1uF)ufsR-hX0dN+r<7(yQw^c{ds!IZrQjQmwE}fWB{i998F~vRJY*N8_j& z7TrLv`dVR7KSCAREjT1qX%!l4&0NKN3x=14U^VWqJWw=c_ksoVmwEk_!{aq14x;LZ zj}E4A?+PPkpz{B#X%>GErD~*}4co$Q2mkK2&8Diuu% z-28FU_eN)oOIg6nk0*|0X!++tM;|);M%T5x5ad?G2@69 z$XCK{*T)|Cy_+m`+3AJRKow-4ve!6c)Dd4tFs0_*jS@Rb5V_3SCDJe`E$B`%fLdHcp4s(JRg96Rd`st%aN zvd(GCyFEBuzxV|raoOc`zZ}!lRiQSWtaE?}b`AUe%Ez1b?!bwxCGgGZ_7^m7SN&!b zyFPehboktK6p|8^!V@8+$$hyOO$^27Qt1WTSvmUOxBO6DV~GvL)EYwm-c$_t zdF$S>WN-6O3&;D`Hs>@r$B!twYDXgK`fA|rZMmv-ROK_Ut#2zMwRPN82RPtVjq&mw z2}$^c3FD6OZwK(>J9ap~T48h7z3Ec|QpZMfrloY#3rngSt5VjlIsGbd&n`{O0x83srQ!hYO_d?umg&n}8s018&W9?@a2NaC zlzn;67sqpe`zuAie^5gyZ4*(B``wyztoK}des?JB@G0Ae+m&`x>VW8vyjblfJup=uIC5}GD&U-DQF25c%d=6xu)S9H5b zqwSq3Z!0AF4D`z1>vB@NfyEkDIa+^>yeEFib5r(Uao$bxybzjpKKxO@4vwvCa5a^* zD?Qz~ah51GQKqLLWwt`i-}ad>gFkc~bBE3EsLKeR5D;W+%m_O2Re~m6^mB378Bxr3J5#GU+Yx z9jr2F7Gr5CEw|>)-3=ujN(ZuayZQX{UDY6St@20qOIf)qyIqwsNMp~o*+@MfN|6Zf zL)-0(O!x%j9@M?crBrxpTt@HJ^R-&QziQ8L_02h)@yz0*6NasTLd^dpuPSr?+s+;o zQQ-m&SadL3{P5(;OM<4;LF&N|bKn@d4R%M+>BsXitON*N1V%5AzBte^@w|R+@JLoL zw5x2R&DAnuxZ>Q3$en~Xw8`NNe{r7asJAM&3dvL?HtkP+h0ZyUjysC?7OuUem`X*{ zxzcF^@LJ~S0uOGQ?Wcd%YiC8>Skulbi9*P#u^Xh57XfLfg`sM0V;-8%x{eb(U zVzAS0FSJ=JO~T8uevxVE^2_^Rdf^ID*DN*E_jI0QZa|s{R;oRtgy{wOVegV0+k548 znP;nw9W0JEWcj!CjGH@3NOBVK{yNlWSJk@t_ax0_Fm$x@8w8}=b%1s2BNq1~D8MnK zWaRg)*XF0?4vd#LKu2J__s$w-2!RgSkPVgSLJKY0)Im@?VIn?jTMtn+LuOb^an88z z(CPj=oi5%A|G%&?4U)@SlEnLnqyX&I)q&a_)0vKqbvjr7?7f%QQ1eYtWs^)HyR?CL zPmvZmQPkA0!Rwj|)m-xyApF#$nVafh`(~=+jIo&(_a@H(iNNQ~W1DgMg`+Q=mia&r z`I=Sy98QX4($m526*Hn;M7vB6i6m&ZANaxt} z;$E`Yz@pl!A4JFksewl5E(u?OQao>UVXP@1CBLWrt zz{~+9ryca@WShCilV&HQ9hJ*7qI;ulB^v_aO4}wB<=UE)@OFmWF_qb=vRL=X*2ZFV z=^lfksFvn&&PTBedf!iEkWx*Ce>m$>QffG{_%)o*n0d;;$<`F>HE3C>sMh-CnhvV^ zf@=19U~oG!qzwZB@_n{R=fF-crxen#u=M#(Z&%+A(zI=+NFi~bjf8C5(8%mYMrCE< zc15am6KR0WOzgaP!VB|L{qYOT*YIh-ub;k4t7k#2V70K_Bi)`vl_u1pt-bNh5u&P^ z1%oLifn4occcvPL^Dr_SjXU?>mnj1{@V!Ij8xW}EuygBC2JYgacjl8Nd0w~6YiY^F z+vV>!_evItOdE3=h(t$z1`!G%0{HZj7sJ#o+M(eEqYqKUyiL_ z`x@$tlW0!i<(*56mWrP`NeyZtezc?N*i*4RR*zT;Cv~b*SdvaxgL2IjYX{ZNx^Jw< z9C$d^hw(VMiXUv)B+jh}QfH1jgW}jy@JptUr8gVVYvilE9v7pmUC5wSozck6deJKU z{AqEd_d|ekjPXhxeAc0nb=Rw2L)_SabuC>-IvVTtTB#I=Ei_}D>p6IA6T13^f6r)t zOH{+AB$~mI8bucbHsQ@VPvHPDpFh}oJhX1ne-G~26Duo{}Tz5%^L7k&Yy^h5w z+f$DQ@(7dBYiu%r4`6RdGWX;3HPwfpP^$geK0!Sw;;3iGu-Gw6bfA~ia5OClx~eCGsMNPU5Aq!u zC6QuRSff2_eE$xVB`#3o^KpD=)7L>jMB-z}aA5-N;7b+S>6wq6rgICF9qtDnF#%KV z7IlVVH$2@&SR7mskERBVRy9#7*_Q}x_Xq8n>!5moD$h{J2m*oK*kcz01ijrIgbeX^tEX9&D;btNeD~eOxLRAV8vvg&-$L!*xY)JfxAeu1=+3kGnLRH_$;b z^xVZ_CRN5^sgaJ>QK;JTZVE9ua-F131x@#*W}dqAH3p{IwhxhJmhxs4reQd$f`k{w)ra^C1=#8?VqYM@dfn_;nZmDiMmd3#4c9Dvd#VK91 zz~0mKSX~7GJn#TooZsY&o@+k1n(bQyKW|AL z8JdQC5IcSQ^Rqo2qA4Z-SJsuFSv~4EltD(#m;|PKanm}Z=z-$E&cY>fl%RHT%g~50 z8)`l5OE>f54y=9CqpITq4x0|rQ8@<=Xy?ap3MILTTy!0OAknwDoeLg?b_%gh+4_U3 zxthZF?c)_rgfjYe-?-g#gowCI<|f+RW9Zs{xqEII>y-9^>RM|h_*J#3`n??-awYF+ zP+3?u>;^GWOD?rwF=N5S(_4FA_{p>y6?6R&8Q!#ZbZ)CFLk?J6es`({x$?z^q`^Sz z(PknPeS)cKKo4Unx+GodjZarfry;sLFiX$z8iMW)S_gs^)Uz8B`0_9jam0zvy09|X9Q4Mxb%9S1rJpJJ+F(4sg z$4Y&MyESs@gys;c;+1QCj2B=_<8kxRqGi?(qzyA%z+P;H=7PJrtY6&Y=k>C##fi@@ z*~{RL-!I<!IOI@bccP}@tBPq}7kzms{AJJTEFS5p%xQST_P3MHs+gBEATApS#X%;CMhdBsk( zQOlRsfK#Q-Z*h_Cg}LtqHLyQw-UFt|r%q<|vV!?u%1FaNtX2ChHaT6SUv}64%^ZUm z+H71_kf9;)oDh}veDZ`+7&&7PYU&FiI7TM?#>A9acbvtA(MFg6mV>Nj$=B2JY#u5z z_rmcN$i?Z6>R7h<84vVU#DMM2s6Mau&4e$!n51LDa{u$scp|YHj=r-`sZ57Dwp?9+ z^UmqkX=*=pLo2HDP?(mf|ZrAyAI3WAX2iUk z7=3-pGq(FL-_!$URfEK1)5f4w56^l+#9W~^YECs1kZ9C&o^suS;b?*CitisTd+!qvV89%uknDs~NP)<)9Qqlf+@tliNdj~^? zVcm2yk+?G5hs^0iuh!X}b_FXz4&cD!SFYw%6^YJurS>BA_l#ThVN0D*U49INN9vpg zCFtOscK>1~!<6IMGv(1OWat9U@S>c-BVJd5`OgaD>!jd4jGr*ZidkP z)ywsqk{eklOlJU)eww5M&Ss)%gx37ISkDIUZb2AzNHIw8c5Vme1=-J71%MH(jY;PC}vxlexXf$5I z%Zf*n@ZZk0+5~MzJsggYkY0!u*76#DKMr=T0X1I=$w&+@&BSOCPV3Vxft z(YNk9p?+%nn}9c?d@Jn*F;@26o;$emE@UP&WqzA^@@6yp2&0(YU%EN#D9AFqQQ{X!W>I63QeV7R1@h2=C(?q6K8 zZJt}_xvp&5*0T_1jIJsV8}6_S9C3^qHHKGE643z{W-)x?_Irg@es~1 z8dLPEQWEjQY=5RcM1mG7wG+vz?BNxdIc5YKwTGMMXcxK#VG6BiqMhYukT3vLDMNVA zt+?FS1VslweY~h~uory3B)vOz#tqg!^7OkHB_c8^eZIz}(=>;~Y#uvV?49w>2x`g- zD-+sk40nZ16b!b{_IB21p1WKujICQyb;M*#BU?QaNzNX2%VXf*uTp$)e*R`}B@~0@ zH`Qz6N~YNV2>_oeikaD165#YRx-ZkU$w2qm$zY!AR>TjbJV&AlfSV+Pt$PL--p>lX zj0?Iqi!#wN!b^2fnX~>t^qepU!P;h{9R<0LSWExbAvKfe?ApYG-i*;xG}H^zhlPir zKG2kN-LzI;Sj4usc%r690M`?pGCMW%-MOaT5;7cI+J71H0oU?*C-A@>C2Xta;dq_e7O@@+2{(_?2wEuXoXyyqBMGW~;B~{L9*X z9Z$pK?K1VkZ7Au|EEc}_aUq=Q?vpNyHTL(|+C^xWUa3lUC2*+d{!l=CjjQX6)d;7vf|#rT?t97V~oXtNe6%Kjos>h9P{K`Ko3t3UChtD`!eW z!UKMNVk&*8wa4a{kAViOzd4&?#9-FgH-fzS-KU*)-Zw)u&x?p@3fqM12lz0tyc(3- z^KGV0Y*lRp;8bv6%HEFznN3>$L!>o$AHRy7_$$nbRKACw!3*6 zEu~XzH~}I$j~x&{Jli-uuv@paD;;5h`-LRA0uLUKk|N})42620;gUGR)5TlkzEsl3Q-if&pW5pk6Gpt+1H32wD!)+=$N3x}QGCB_Q z@=XIjBJ{mhHJJ=Dr9(ore|mI+=*iDI0O#H?XeA=;Q9k$Ktu@$A_GG2`+R1*+TSaAT z!1l>ZvyI?zpGa$+qgMC#bioleY7!|dTyDhbYwQ@NeNT(&-Q(T>so zPHEYmzhKe4JSI+crBm|$e!oA_fd23?n59MeB>kE3NeklL=TF6t>UT1~v{rW=?<_cL zVnv!t`6O6bvIP3((%MYwWJe@!!g4Oj>-*!O=&junWd}#wfiuk4SL~r5?vwx|BB`2L z1UisR=bLtx<|96WKi7?av0Kot{3v0&28+H(vwE{jYA2zhAPBI2vI@j`Y|lLU6bQ^3 zhuYRS9THlhH%hh7(>hOU1{IZ!?jHA;P~7mXtBH`N8M9K_T}Ndjm(v>02% z_FrkVl=*u&C5QB+p32J7DC6w7sj^EQNC7tPuHos0RcfFvPK$KNbS-sAdvi?K$mgfG z?mD5vM3F+bz0MG#w*K~6*UB<+WP}gPK|bP<3#CTd^qW@qiKSK${m3Pw#t6{&*3TP8XTzmR}qfkhzOwD$5G zm#(b6Uy^haLxg!n)?pC*7MukBLYae2h3DE+kFky}CnPP3-sV^9Kqpg|Kw}M+J-aB# z#C9ZC3L&v^6O44wsAV;^XydvYOZ-&I!FVm(y$=3UlG9nTKt%aGY;Ll}-A3;?mv$Dr zD{VOT#C>7}Gw-*Hl*`3i^!CS*&B-@y(ur2rcjn@HB`~sJ85fW$)7TT%5_ONXunHIC>4Nzbj`lX0`r}jIkUE$jso%a{3c@H#cIy}6Bduoev1ls+6HAt_<}UP45A(IeWLjnOL9P! zH)`fM8$0`BF!7-OZ3LU(0Kh{=Kahz=Zq0o93niR9gO$%>Q-NzLLZ4uSKSX z_dtgdl^Se-=w`#2B$@1aNam^yc4uvPTW`RmD+u~xH2;DahsZC9uy=l1cd*eP_89XY zSj$elEY`Ug|B#?l^vUkU#I9hrZM_PGZAR_nmk%bG)F0Lz)#VY4*)I0;b_cgdR2;?lF7h}{NCo1@dvwPLzd zE+$WgwO|=iH%p2mrkyidEDLfPCRD8J>TL;L*;3(B zGyS=dT5YJ6nNEoV%9LG5u`sSt+i?VV`ft zYX!ke*-aD?W6h?XMhMsAupOd4WK4+l+rt6&R{%X|g_nG_+FeoHgqPS@_c&&Mk*Oo8>i(?zF z;PDd$k2$3XO;0^|+^kKt0-N15I?JCqub&vh+CQ;#4CuKOwoE}fl;WZnDISPhC z--5&PXxx)F%nLkXurDe9sW2$MD3I=y{Vb9pc z*6!V~(OYe@>gTzmuII&bzFksYP91Z~GS#n|GFxMpj_)B5zsryg_Uq?zO7nrp2cA_2 zy=EIV|4E+>KH)g$Q^nz2PIp$S@ySInE?&MaBO1lQAMOtWJHgw@puiSx)8ww|SjyD! zz_)`*h0J@B-qjaWVX?Ytgc5bQqe&(LcFlXmF;KOc!Np0|R|wta*Q<5%u3XiD&5|T- zr8r6LCqv+bB2{zLTYR7fT?%!eLN2?}B6q-rdnCA*Z**Wa=lY*LEr0WJ{io!C>3wecdVMWO@B~5FrlO$!wlksl@y6y8Dpk{n4eT;oU4x5%GQQP1ASYJwRTdHRBV13<^peT|5k6zU z%{bdok1>Uf>HJCZkNJtLjg_CNs^->O^u;Zmt9qlnkzZWmUJWJ;Nh-hGXsbQ{wIHlW z5vZ3I*gQaM^JiV(XDLN8aIv_QhEf}yX>_#|h##t;|5qZ$ z$(isyvcI$YSGtFsz^l7sh{*@a4cakevdVnqXN)q$Dy_HJk&}G5Wqq}Hn zR+;`z^N!-u{rjEU^Oe8TyaxV`4*EOI6%~4bf_-J^Rs=X+td8sQ>}vJU9Qgau#b4Xl zl`Bs!0FdVj2fYB{Z*{i#O?nm7TIO7RPGN} z|J9nKNmjyw?ENzTr5y$cfU$ms{bR!4sj8Da{%0y{vdb2wc&VVv>~5ibNsE=2d)@Fq zt5|;-Am#I|yvDz(SVUKB9P=&WzjpG|*~?}0`*pQB_}47GBd$2VRM(a)~ydViOTK%?$|NyVc5S6)fp8voHj^_PT}&#wP9m5oNrf8_PAR1e{Q z=cTLB@jp{dU21H8e-gj-R~?6#t^cE3|4yZ*^LJjNL-zl(?aMENkbHK{tJL|k?KxDF z*8XR2{~^TwPOzl%ua@B8PdfUKR0Nm(_}@~I(Z>OW{BLuti1czySbxnAAqcrOn!w@e z*2p66f8w%}d^tB%h-u@#Dy39nwimNSF|%3O48BV0e>oLi0phLvYfV_D1BTy~D}oH^ z-SabY{eS!E0`uz8AM?uQ^5>Oz9Dl7qm+ABT|MmL69u`~&3jEjS-TrTX%4Qc*(c>nbudH?xO4yOPA3-hx6Ki#C{wZI-lC-f#d0E>p|08u~$$H6t7 z8Kt$bIPHZtZzuo^Y?S;SD>_g}n%p$kXlFTDstQxSL(g9(@#*yxS((i5+@&6B=VwPw z0H*+AJ)AOs_nE$Cij!Ilt?+)oG~h;0BE?2oKqBtt0oz=hl%h@<*u3B0GZ?>fe*GA` z$E?|3_WD}ZTfuzkaoEdnk6muh+|9tr0D?7we&nZE+xe1bnTwuo}eH zn<4A3ZB_HzFNahf_2GSd4-x+t&-Y|9xLRTRDULKXMRx5PlLfx^Up>u@Ul&NNqqfvm zJ{Yx5=L4t@vI@FLn|g17iTL;7Fr^v^-@M`-dYQvt>lwV^VRA0w&75>4(uq zdI9DWYbWsvFiG?C=>G4Y#BArkr-E9gH8(E^fm+G*?8&VlQyR*yvEARKI?o$dI z+NUa6Pad6wJr@UC4#c`9mT&#YcliV4qQGl$t;OVH z8Po6FxgHbW(uG1iPWj7y#1h1l2KY>imFz=sskDvHUahLyZ<mHP4XfF>29Ly1

Awtu5+a~-(@tdUz?U=mC?W^W4i8kf8sa7C`(dQ$T2=GLg6m`$~Sac9MVgBTiIsFFQ&`*7_=c$DXdY4)T*-K=F=@OhBtl{`PnS zc-Xs%D&4H;D0zl-tA%`mDG2B@O}*LD23xYikU19J>GYFMtz8$Kh#o#-^Sp$uvfZi>PQN zA+po`5vw!071ywJrN{B7k!N~(KHn$!EsZTMtRns0Z=2?EN~!zEmUPlH;cRkpr2N8C zUbPO3s@p1!QVUCspL~XEC)?=h+q}mOO3!uH%b|m17Ds>Bcy=-TSaA1Ujk_T<{&M}L zU)t%3Y2LTPyQ2&J3#3_z41z$E@hXdP&| zE}(D==vsbEykM^~XyVe^Ir!p!G}|9T=LE}*`>!X|7MJe4Q@ zwg7rp)^WGt*uuG{>Bn&C1}otBkJ;&1{qB zi)sR&Z@Pf(WV^KOo$Ev zl|u(`nilXdl_?ov$(t}*o1upfR&`kn0mfj=?rBJe4+6igJ(2XYk$oHcTZj{&D+>WF zb$-uCe>bsw{kHYID7m1|ps(qLXz!sYy@c~s5ib4@EYJ#~rFvH#FNc`A zpKMAQzqQ;{jt`C>W@e~b$-CZTggf35+the)|57q5e7qD;oQ#W7rL#PFxGl+Y91=wn!HZOF2 zQ`FfEU62fGO+K;KrYp93>1n2vpVyeHEz!3l6ki3g)y zK+u<^fqVQW^p2qPTm9TsKiID2>?`?%rxO=M`<2s#g6PCsAa~^6+Pipi5P~J1Jfc=2 zalXO1MM}>-WVi-SO4~^1!qcgo(Y=lx?kZEVmETztcr#6p)2=Ak*U;Xdq^B%6*`KHi z>lUq3RJofQUk_i2Zo zCRfa;zL1-#&<0->ngLv-t}-VB5uqlA51o)Ab48?Zbp3Z+(5zfG!A!xcC=2slQYF33 z=zWFFd5BY{wT%tIy2KjYQ!Q6AS#{s?K^A*^*8t_Dt=s%`nlJ96NcrCk-CU~zt48?b z4DL@7vkt78CK10J{We4mm_HTnqMqFy=oyv2*O!_aE%y4~wGH3bjvT6JNlkZHJe_VsZ>h&C3UNOxWskn`K+`3l?XJ#FI+x6di$#Q+AJ?<9+Yu893ikMmk?D4shiJm!V@LqX z(E&3xFdeYi^I56n@aoHC4zC|d1tkE8^vi{WH5Ax|p8KG-G%a#$>T!=>47#3%$Mdvu zsFo3of9`lXAb)f4_FC$~RO(cre{#VTtobp@Tb}7BcRr7rG}h_4){lvA?z+A`dbb5{ zAfSroYdPgWYp|zqKugJs$l^Jxt`T_6e1+|P0~ZCm#aPYtXn^BbLx^>eC4n}{4UAC! zo63-4#nh4xfNrt`1;z3HG#t1HT&KgJzHmOv`D#RPTf1N;UMM2Nz{s?>S2*eJ7HJXT ze{XlDn*~1$B=Z?xUOT>;J?JVQ#?iH1(pki~2L2FFJS1_Ltagth_gv%wmz&*s3%8Ux z&+qB)QOM$2H<;F`T^NO85>oTlMELJCgP57AbyZ+-cv>uwaKrHFPNuaosB?A^CCsij zqJ*?40RxnN^|=O7l?MJ@yy>v`DyxH-TXdCv0}v(dWyb$ys|IuY)v7jb^a&4!_}j|) z%jHhF%vnPANds7uZVcIUDSwPK%JA8Ftx8SwyyW|t7iZ4n2>&8M*fdxT<_Dn#!2i`A-3O5@`S`l|4W|> zBs}v8?<60(y4JA7_VN|qh@^M%|1wbD#q%gIrz`0Se%dU>8gG34s12YmJ8zw$%2z#e zlngFT#o6?Y-inJpYPag0vJrPiR5OU!{MrB{u8ngf%|~vMG$nKE&HY=`> z3kV`XdFv?n0mnEGWtb2xda>op z%_ciL64Pq>AhhO%T@3 zFuN5;+fn?;*|bxlYwpR!CXwO2eGUV$P9olVLVo*#u$OGwzF#stcI^Onsal_d^&6tc zD}7NRltXuOik1M4b8>%?pbOm7GsATr8n6-0lebS_mYZe)P$x<8jwLeOOriOnW=n+cH7_i2CYPY7<^`8ZNuNN8PEx>KxC&-Yz*f>jtw!5dlc+9%>l7Hu_vbH zfY9O1y6qVE?c?<&Bs}~`wyB6u<#a@C4Nlk1V!dii zNlO=G#bvws_y*P!+BSAs5{Jb#M5*er#qsg=1KjMe!?YToy? zIY7n%ufH8ArR2Wh5aYuO+%2|V{0 zx16p;2^iK{ysG6&AySn&Mk8%44Ro4@dq;^T@mfu>i4t>dLByxuHe=(QzRb6DS2&E` zElHjim)cAHwOVW_(-@e@c4PhVTKPmesMEoM-&mUyKQozoi`co4pb7dWqNM$)?ksr8 zqtU!K>;|}eFsr1IOR3oVC06`Ff7{D!C-3&NDw0ewiI0h(NOuk()};5`_HQJsBv!9} zUMlY0N@Qo(_RhoI{@+mJan_tBh_M51Z;(s2{B`N3$uD z#3q-61+G%eUQkYh&v;u9EkikPcj-`%-n@1xtke@3^cD7l?L-JY&~7}~!Hc}$^gRne zR5yuKD@wKTk89}y-a(iCzdh*_epUniD7+;}+q6u7`9vYcF0-a;R0gm0tKn%-y@W%6 zO^5GMJq-{5Crrc`I~JTgjgHB5i))$4sb2VYQz?_GQWE07`4WXuR>mSY1D{Z$d*q~> zf)fFTW$%~dK|qtm3>*%Ja=*94Tj2exf<0*-G3)6o+buPkLyjEaGbYV~GC?YM*oKJx z_KbyYXUP7mKNqxX|K$F6+m(5Cb=q-k%*;@hCay6uS!Xxl5Q}NjtiI^9e2 z2;}rR?-|6b`~7!4nmQ|sxeoW7^IyM~_T!}R`VLb5MrOAsQ(_|HqAifq#B~`t!JORS z_r^MC_Dms=$zn>@=zQ^$>xL>SAag+1o=LM<4xK4|9>U8a>Ns=Tk)phc(Rkjo3b-K5 zzUR|M-^I7n+3V%*t%hhrvjEK<-`1UrbEMPrNx%KL)M~Ij0|6o}hP;_Iz3v1j|nbzE#!c};|p8HS98I878SPp`G9qTxaIeGQ4*ID)Miz?Ih!e<9|I z5XlcGwMM3$;lGLD6WA;FK+M-BGo&@ok{OE~)}}}?+e_!X(y)37AOcd!z>uqJtUdT^E!s$s#m*Ys6kz{@F%wJ( z)dPgnoxY_#3f@z?$(G&VvoGou<2}C#dtA5K@aBfB098rR<5AmFt-!YvF~U{&AA{Xo zHlyE@nZjQ{+|?!8U^1N(tC-%;S=v2Tv2L!PJYq-TcoEX`nbAWS_ZRu~u@ zhLJ8@oS6DkYAN5glpkbv9s(QKh`BqQn=v9W3t%_b+;em&H5V&>%b-_WovE{QIy>A9 z?snc?Tu71n#UR1^M`QZvJ%wogHU!$MV?Lg76mI`Ns4{W~(k!xz8y<3zlAKD6^nK9KroAvbz?l|tVl zL#Sy0h%i?Yv|5^V-G^;hRVax*HHi9SLDc>S-`gbfL>_Dd4`>Q-^_gc0zDYIYvVt7D zW4h1v^ZYT$4`pAA(>JOQKh)i~BCH0b_6$jBu9ecL@TfbhvdS|FB(Q>CYj3v&ih*q? z->^tDI%Ng;FKl-2v7fKLK#c{NnLSQeR=oKzJwD4ya<0oS3oeH2QUNsjPs66$K&F7I zrAcjlvXUw)d@4rB=~96(ld5gA(JiCL?E_u=V*S!UEQ!v@>&K5a=cN=S?g}If3b2Qy z2zj%v5NoqOO5n_5u-rm2ox4Aewn|v>bkU~r`z9lTQUc=)EKw7eDwvaEf}xyoNo0aU zgJs2(Q&u?fX3xq11F=#bl?z>Cx+k8#iz)e$UEJZ{4eI=7TkX<1JWGN)2)za!mS(3m zCl7R3|IquhJI|em;qA!+=lBGwPr)f-huF>lq~nhb@40k%U=wHEYR}M>A%6Akujwz9 zd8?(CmuX)UdJ<@a?oY!@zevKYfD2H<>OR>ubr1C%HL`+Wzu?gyPgq_gnR;oOYQ*hp z_sHK|V>o-wL6igw%)Cw6^7vprIY;2$2c_;Kv<7euufRR-jbI%@mtWs}(HLWu%vdLz z-8TE_!HcWz6DNDC2Sr_Sq_#FOi@sk*5p_U#yd^Sfqq2B_JKPJgduR9?ip)!MB=>#l zgM$qi**E12NVh zT<1;iK5UyP%{8EL9?mmpv34!_Lqbg~G80m-#6ys*?1dqA@P+B0A`~#SWo>0>6RO)z#ds;0uDyXMe6a-2dpBbGgM}oR)t3 z!-@O}YeTZqi?ed{9o;Ge>!^DpP0}Px^@*czyEDk>XdF+e-_(k`Srr~t1==~!P|Z@1 z7#QYC2PlOJEZhhDJvsCGqHGLOp*0`lG(+2}c4~~BdOq9=7E@p=mugEf*cB(6s{u4tpi4BAckCxzN#8atN;mu8X>4#-_L+JBoHQ1Av@W zi!K4p?Th((B$AXZ53N}EwYp;O%UEv_aTC~ezqgxSj~U*tVb^YlWnh@@Ox@_)y%Yk& zQb8Z4Yjuxy;Tcemtvqgr4SlJ4xS=C4xnyul85}>@SkfMHct4UsEU~P-uZdF9=!HW5 z&eICD#n!V+?1@KaM9t0U!u);Xwu|q;Y zaWJiD$yz=^UD42*%NcNDEpG_i>N-Gt{jq^ww|6h>#G-_n9S!5*^Zg)s8y^2 zqeD80u1=;21#&W8U}9Q3qpdCp!LuPq{Z(D(-*2(&(!61z6L0+49B~;$B9oIVD()sP zQM9jfXkAAe)r@M~1@8~NO%1>H_0(yH`Nq#`C1}xe${}(TRTAjW+9E&ygS7>mIYQGa z=>_qCSlpBWzO?g5A2J~VTBNs65^rpcl1iY$oMK*{n>qrLq`~>};E2H!E%Hs;h<@hu z_`})JYzHSt1V6eM%(Yg!wu`T8RW<-(CFUY!_E+v`UXQT~*Y$3aI~bK(x;S_>V1`m~ zqU5@kGpejJB(&D{!VYNDU_OjtW+{-}x04v4>vDdPBwaGmnqOHi*LIX04k7YGc zOI<;=bo&LrW#|rpfTQ?zApbb-fC}Z*CwtoDIjO zcLt;6l;TAbvfqeK*}l1iD|A@6%j3E0cs^X{nC#5djjYH0wDQdDV3WzY;gPanQW&b7 zA;b<-N&$$cs()5m;-9bd2cf%^Z>~m~#`6qw;o4F>x8cK=ftry}5Seiw4>92I>mVp~ zgF7B=Vv%jq^+0BkN~aDK$YidV%GczCgsd@u9kz|8Mt6=@?r1aXUreXy>+J22IsHiD zOv%n>poI$H+{RROcg{FDyN-g=k0FkO5V03XLp0tfn`!AuwlyACmOWxvdw*tKaJKad z02u!pl)m}DfztmE!KVqb^(#*rLGR(`iBw*pfCMtr-0bDi7f}3Ea02-p{xlzEoF5hE z`3k8rgx+K;2_f)CR!+laIeqpzznPp=w~ge=$Ox33vazRU#)ii$A!fzb41s&@E+Av- zH{a|AguHDJDu81fyq!al+pB!nE=^1d07>cR*l&i&nx>}Awg*q-*7=K{*=g!;oe>Vu z40(JjQKoZT|DKCJ0+>ShITfldi)`7AsTrb&>J1mwtFcdg zyR2L5&UpT{^%za(2rf(R?IG5Y4kzVl@`6o6RNR3$SX}9en95xne%<{Q|2^($^B+Ie z+~@WlC!qCa_yUPW=oafuEc<5w1oXwh{#$@IN!Hv3QvGY5>@B}i1vOj z?2HMFbF)OrSbX3$gnR>FUYfK)a%H76?4{DWof31~dKj!~w^A&4*%qJ)<#Puer3*ap zSgTPB(XEdEW_oMb*i@!B+De9)-U6&s6)EJD2gIK}rg*EA!TmXlWMNjf^vA?f& zbH}?(m3P@RQ($$}gN^mb{)=6fq&|HwznFU>Qqy{aQ@UyCN_+E{RuzVmI%A)OQW2Mn z=g4*+cNbj#3#_9_rs#GtByk@?c{KcaEe6u|Z4{X7_HU8#Z?Y|6fpGZxNG8?PH3ovU z%zw{LZ~VdkVlw)FVs<`wE?dt#eH`Mj&t(_uT<5zGHUk{6A|JD{hA6$nijvgosvh*F zjy7iIl!LnB80pdmt)vs|Uf*P?a$y}TYP8(j)j7(D+P>f?m?`4ad~XS~vIAY%RFly8 zFRn$KC1P5JO59qs`OX=k&B6S%S89-db+@9O2O`uNf(8R+ez}w~bmn2jbhZ zJ~KFX2ZG^}&SNVmoUMO_Vel8trRSId$yLh)MPMorf&>H`yPe* z$?RGJ5}gph(cnkQq)Fz{!KBw#fU@J~B)exhY$e(p9cTEV3#K{z4`VE$-|D~)_29(E zKbK3)Ke=2!ATMSugaIKGD_6Ox0K*vlf`w-rF;DH8j-8nQ+nNyv?E7q5?-+n)Gow37yCfn%X=R-wl&Rxb9(`iwO0Mnq zeK9~yk=Gut(Nkg7B7W-Shl<**g8g9a9&(P{ovzAsIz7q3Sgz`ByD_-iX=6R7iXE4d4TDJi`mmz*R{>taM)W@4UH^dWHKM|`3&MF5e zNDb-UVx~#*{=aFAr7<-K=G^>x2if8J0T9-GKgfkr!pfl`5Dvurs*+OitBswdwyy?~ zUMfAF3FB}~kN3hQM%4|806Yo^i4BoZ6^Jh)C&Oi%74wCF*Acf;3~^ocHI%*(#p1f= zWcS4V&SJQ{)aUD{kU;RT#`3~8M)FV^jMC>mAoXHj0U>G22)&Mf%3k zP(=+W(ZLHUjI}ql9VDEd)jAaqvpZ3WZ$-8JAMCwnSd-h@E~+vGl_sJB(o_&oI!G^3 zP>M7W1?eROL=r-g&|xY9A_1g>H0dHuAcRh&20|~=LJIW-oNImGTI)OK z{8{J6-rFBsmsdg<8SfZRz3=C)+sFoi_rohem!48JjQM>}COaLZ)u(+ySV3eY$~he;URX7JgD zmalcEJ$l|D_#i>Vn-%c-q?H0rWs;zduJmHoQssq%S*S?W#u#iwP6qA;%2L5e9FGpQjV|C6+AY#iIjQ?H^|lL z)GDC`O=-ku#T@T_@uPtCk`wB0jiwoF*{{Cu-zFqiE^xWMH{bomKkW76M_t^=Q_+ln zq(JBYlPQp`twYDI+e!USs8S{Vf&B;@EA;Ru((KklV*3ON)_G=mf6A|w{o9?VX!~pE zbJ~A2UJTb=?SwNtp;LG9CW-;D-92Oev4@dEr3`^;;{aF@wm7T5y%iYRmJbZrYTj}+ zVl%y#n3Qr_YhVKo+6M9NQDq0kfgVkfYBK<;-6) zjyPFUsfS+}Kv#|UAPlGMuaE#i+KP|#0cHMF)T7y>Qdc)WL+r7smTy;%aJfasF_rc9 z){vZCr{^+*CUt9i3b%zgEU&=7CSX>qV)At9(WqAiC35SPXgWEWr^k*!hb-KkU4Fu@ z`y(V-8fGI^cK7CAm6x;s$;wN6`#|)bx}Z}>>@7m9;oe6H$a?@6d@_B8qb{3O%_gd<>izs|O^u@0%}8EGU}#L;UxsNEtyDOSVz;q{m@&?hp;Em7lI_WicmPL9 z!*?<~{H$Q*}hNC-Nyfly3YwHdikAZPLe&*f^ppVGI{*{9=oz@)t1v_%hK>+`%7j2 zAQlVlya@>0cX-HuQ4+_em&Oqru5F8blJ{fx|Fx`;dAGEaReG9lQ};}kb+NIE9q2AckjH;~G%32%^I_V!}v}81;N9oRT9e88x>~~BI0iA z*a;>5w5=MDLS;UGUb9)*6Bnwr_lW>G8rZww0}K>Ct8Lqqqd!w@Q7a6u`T@LW$qk3-RwHUD9#YTKDC^(;vPMtUjrW*YS)wu*g=D5;q-vk%EwWkeN4bhMNL;q}p z1m66QnlGqB$8%nTI7$9nf}ABA?p&(6xX7dh#nSl-OzWw+LcSV4>Tqc1@nRVVOn-s^s2~0(&E6-}sZ%O{HOKz5r{b~6sZ%%p z3OVmH{U`fy0>Y<$zxY>*{^{L+GP;eudg|V}KO0?6k6ZqO)c~4Dw5P7W{WAfR5t#iC z_SC3y&Yrrca#>+Lgxgon5UPXmdyM?_rEmz zGVOX&*b;yqN!9k#4Tn%jT)yi`HqPDy8t|{B-(vE?4C zQ+3`KTjOjp?_Q0Vo%7sU`W|>!lI^K$<}Om(@3unbc31P9`Tou3FiV8~MaxSVk(zYJ7D7ZV?tKTdiT?{){(9UkmX8s{SW)s5*C2M%-q6nDFP|^cOF?Tfqj$G zPS|YcGIWxq@8=VZdXztbXH2ciE=kxDMG6g`}-?8M> z5HQ+9s>0a}^{mcb>~Y_@Z2eswf(Mu?5}KL$b%7!1zLLu6g2h!g+habn&SuG9H5@mu zfwbs!C1_tJd>>%0u(#E8$Z3eB*Y{cIe{QxImsqDI>f0RI1`u|V9b94DmYa7>&FbId zSUe9-XeZv6T17LewjUF$+Bpt(a`YGRlawd*%deRuCAS~uE4f?FLVS}f#G22Xy7=Ze zyjj3kvdgb_VajKkdv_7F{=9+ zYp-#l7yNRLg}x;S;jY?$4##=oD+23$bS-_su*(gF8h_9WQg?EkZ@zQ&!9_e@JW&<- zUBx-DL;Ej0bJIY@g#;|%(t*I>U?$#BRr_i8pD8=xQEF>Myt|1HNL zpt+O)>att^P1&8e`@>?hLo6nr?{rQl+nY`23)oIrcDPQIw%k=K&b)<@A=;GS$ zk2bu%m!XkDdo6LZP3E2&L--lV@d*^<;kyADjLecO$qfle|uFln?=X>hJ2Uwe|J7Ov8H-5faHd66{+ zXn|gHi{*KZTs9RHF3?B3=sXuXT^_Ni(v4A zEKap&C;SX0JgcyGhT5)6mJQ5ee&#mBDkU9sWvvCVXFb~I&K7O(QXzzBFAE-`BSRMm zA!+7*F*dnHHvwu~(hJ`?+*1Ncn~S!L-pVdZ!UIx-z7qq$pw|Fa$0wnHUCX&{)b>*w z-=x10%HOnvKCKrT;5Cu=_<8Ljg|n(f#t|2xU8wrL*5cZ(XkenPrc%;#58J6vD=DnL zF~4W~U!S!6@G}PmXQ(<@?-YNS_u&C^RM)LVC*Dl@J90;J@!9^jC;6*bX+yGn8he@9 zC~!a*+Yw*HD+(^2-m1{1Q2%RXNXcY>pf^IMngEFb@c-o;ua~ zi9y80=HbBGFY@1BMNfNaePo@x-#|KHj8(73M!2_IHnxU-&j!BP7llxATPN4FMS~giClUfMZFMG?BI>7rA}c?_@y12VUfN>tts=_3(VB1h6cD5qCbXS$G%kNS!2`J(e-uS)bbCHrrtrk^ZBRIxy{rV-#oguZeL z+$s(hh3CoCg}q+?ZyfADDb>HfJE6G$^S|q}|BZJ2=gaS8|KMc*)0=NH{8wuCpD+Gz z{;}7KMlk`H6WL0&NG=#a{82x5cMb9GbVBl#DoPpUdUTfFSQX8vrUxpHUQvdrO z{@Xg(Zh770(@jYv#w1EW9u@m!LQn37!)sv52A{;gELPLQ?(_ixtn5a^2o8Ob+|f42_;K-n3Hwply=b&qabzGZz9QUSvq{>`d0c~7j}4J%&N zf8DRgu>f7?)b-zxw*UIt#rM%C!%E)%-!J)ppXC4H#}y~19?4a~qaN`aXy#oWAniPz_`?pvK=?7%?BO@vM(qwD--R1xPQG6 zD2vXUt74V|mWPYVNhFe0>z$+?F}57hI|^|#fb<9K0IoxxFSuiDXlPixf)WkUrCoa9 zfDC=?`v0&T0E}P_UNYL&R<@QO6Y1C`R~(Fzhx$>qvmC{RCS?TBFhAB~{kS*M3(08(abY4MO8QZikt zpm$#KI0fQScU-QMs;u<98VU%uXtLK&ytf@2>x2U26hv>ne-#TJT;c!KTGH-pLCkF^ z8&bFBy=@KGLIs$3f|~XEL4)XI*CB_jPSNM<$ruMttDlt-Rx2Y={g3zJQD~Q5SsiAZ^otQDR!F>7tt^E>L5+27U~r#Lms+b7ek6R z!jw7yG!1GAn8R&@WTZWIurJ4P@na48ZS~@4$8^uDnC`{hkExf8BAv?i>(!`bguVQH z|3{}WCJx}EC1N4)6iAT;op{W%y2&MAT;R!6c?n}`l&wo}nO~>ESm1q68GN#^&)E8W zC)Hq-vO*fQM96>;GakSr1jda$DjUD{mVnzowqI2o&Hg+f-K8f6elTf1Quv$r%6`%? z2Vx|JiRl)2O3F&NK;zQ+o1Zc>qj~L5pQfVz_B&H+;km?bBuclR_f*>8!>!+bvf0(7 z6Khrqe7@-V_U13G$oD?A<~lfK)x-lrjDNVKA^K^53d~|)faQHnafy6( z#cfTKhNWmj3*~|J5kB@ZCw-q5f(btRyXeah)Gm=n81r}kxFjkOeMKYjcQFG$iJW9u z4EPl8>slk39M#b~I*_B)3>FZ#`ThV?ocNNH-C!6)YRk(V&rA2ew(=YL#+={bYqUjWPb(v~itPO~hL^K*uF!{g z3K6{8K)hGqpuCcY8T46C1OaxYeBj$=u8TPdBY%K&I1FO&~q_+m339 z0HSo*`9m-At1@2{uvhX!gFq~HYIWLoqZ>M=Sy${{RKjP5SObwu!rAfh;T|GcE}ubc zRQzKNsP%y=Qgp>~yHVt-X@d8cut)_u4zcS)Oe4vK80qor7Z~}vJ)x(w^rMT-Yl~T8 z_;kvy2pW#!;smKWstddfJX!GpX|J?p1F{=s#rSl`M_(xZwH5Dz1Ch38FH@(-mBRrY zC&c6+Vp2f*Od}fcB5z8Xn3M2^35m5f9Jzd%y~By~5b3u!pCIM^z%Ju~k9ean#j0V_ z{~*1nf0zZ&9A?4D5^T#Wm?9Q!F3K}T9rxEcyqt#RVoz~Y8AaIFep=3%k;zGB9Oyb% zs>xe4f!igz1XP0Hi|$U$7M6D?<}349-f+eWnEy8o#Pb&;`Wfb2A)~P~rO!vu^94M{ zk?;-qVX3KS_u0`IN~SBe#8=gKhX{UJPmBoHhtrKLQ(4;3Xg0-HpHJ9{zqUp!jpRL~ z;KSV7wv^TKyhE>KVS_d><<}B{i8FUp?()4^XBw}pXE1cAl^^P+YM8TNB4ue;OPMkU3^EYPw{V$px?)XF*UMg zm!qQ!oui|m&01;_fBX0gy(Vg9ZIs}V5lbK8M3rc}x1$1OB6e@RHqz+lWjcl{4LtqU zT3JTE@LQT!IQVOPtow}NbJ5@tNyo&vKUOnQa&RBIJN5mw^g-x0)aQj*w0?T!?Y7F2 z&Z@AQykewnU@%>&Xv6MJGnT-ThW#%Cz*8^xH$yBwL{qj+A+yJM3o{jfZyK)9V!^{P zhfUMUSH+RE#JIVt|8f-eBY~svG$M-h&T0E+cGULIfQWLPa=TBi()%IqKE5iRvD7RE zVsD>8_`IO)7%=m&%?~QCpNee#Z3=t}Yzcu`fpRP7J25C?+k{OB?8#Juu<00B zRFsY-SRnFM#E3ClcIa}EQ(E?&&V)i9owW4lyaXx>U7c8=7$ea@qiL>@nJ=MHUCa6J zKcX>1w|J8NW|4yI0qRn4@@kEsnSU> zGiNTSW#Ul9Y7#O+)@LY3l-1@|c1&OMLr#$cXK^A>a6jkpQ}qdyvY40N)SBMft7v6U zs?dB0x|VLwsI+5K^|axXC2X*t5zfE$G%f@YJ^auD_ep5mM_H7o zm?yu=V&D?3jSBP~Q|V1R5)Zw@ATG4k-$qTrfe~^^p)Co93M$*hi`MDW9Bv~FM>H0; zc)<&+H`wg+z5ikPHhyL~@piJ-Y~yoDhtf2oPTi*CC!PbYrG2wh|LwH)0m3y#!@c_) zxBss7XRzoJ-g-5~o7kP4Af@DK?&SY&`HQgX`-FtM(fvl__e?pC4ja#8U)EIxbPS% zYa`-T=(pLgF75oy+?q?)z`pA%;3h=BxtFm{9|0R0;?_dL?RznT49xf%!*U0txTXE7 zF4%j*n&!VBn&MKkwNhJ-6(rKx^mb+0i14)bxM_Z_6I%=qWA^5%!d zz0*EzPRiDK?U!S%3C+b{``s3TX@wL0YyuxYS+?QLQe64cdN1H;QH6aF6FE5{Z~YQa zlC9lbp+GkeWSSpdA7^5Z5TIfnNU-#G64m|bkhfaz;B?exk3TfX#RC(}t{qV}Q$426 z`)$+yGJ8DRG!1m(n>a@~2BIVU3ysV9hBTGT%w{tU8WdaJhrMAfRkTQ~6?>w8mE|%J z7LY$AY??vmnyye{*5={3y@lks?VfU;LU?)sxurX+A+B_+G?l!v;s;^5t* z^JwV;III4(6IXTpN1fml8AlX3XZxpCN$^pz$yQXoL_>+Y>?fDphy~*KmTDWm6S9^C zM_wu2_1bcKcCF*hOd-_QftdxYWH;r}_U4FL&!%~_jao6^_3zPub)({AYhL^f34y!k zOL){H88BhH=0rOte}i>XUWg=|&P zZDNu-?jonw8`R(avGS#4+F7Fx}|@3{Kg<4EE^bWpw>euJ8Io-9mrL;@MgV zHWu|&ho608+h^wHGic_`)sIqu@aaYfK92uTN(s)OFxpREtq2vZ=dwxX5Y#qj?P@UJ zLhD>R06CVgE?n`dghJ}EdSyn$aP(_3Lj>;CcBSs2+NfKv63fEr;F_gt9sc`^O6=a_ z4Ei;%&=|y!uWp&s_aA}nPizt|v;G^t*AF7>ETq6d0d=+BI+Hu4*9yAlYe9j?QetVU5 zKpNf1sB{@Aj}i@K?}~0|<3e8FjH!|z(!q?as+Fm1Pf}9ISais!F{gsP&h5>pU8>cw zg@sR5O0fo?w|ONZMCNAT5&|-`!r35S85J@ED&CYUFp|C1jp& zyRNOZ+O^_R;8L=vse7s#d z^JbJkU8r=i+1q%8aQvj7VRYGyT}*0ZS6#uMViy{|>XXlTG0z<>JGpXgs@Dm0r02Lw zOR~aR4)*5ird|_4yX>IS{Sz(83eiPo)m?4V>u@};NO>VhW(q%&=6^(X?vDt|;{v?Y zIx`QiUdrE+q<0^i;njC}#w=zcWNL$VfY|v$Fq2I9FZRAwRM)DQziL~gW@ja-`DV|TAW;%`2m=gxW3Npm)v7(Td8d%Kz#kMNU3=`z4vKC4MZ$aW z-`OI09S-x)J3w-y;#md;hN&IgjS_$yMWfS{a{zaT*TMb5pF#DgSi6SdJ?jBY$!P1P zS*Pc~kjMcGU(!IuDygFX*m&eMPjir~2)dQ;wh?e%p_bL_RO~ME#}FJGk+zkFx<0oD zxaH1pn4^lh)&fImxht2^SW86hyr${?Oh{wj1LH(|qL5jYpNGGtfRUJQm)rFG=@ik2~ zWV|Jmx%sTXcr239D`0mCnjlIAlLbTMFm#)F4vG7?mb-r<|J6~=W= z&%Ns;=gTFbP$5SWc5|Ilgt&5hcJ^)38|TyED9rIuj!x~m_VW0SHM^&Wf+JgAP`6&0 zRxO7^@`_*Xe&(R*6blpxjq9aNno;=-xpb*`qe|6+RhYO8oY&gIMlM2!OJ%0q$ zA(jq4L9e>Wg^twJlGh21vLAvIy_ElJ$kn9cGN3VB2Jx-@+J=~~7@;Pdb5_6U&%c3Z z8a$Kx8&daRxKSXw_i4+t3}X33E!5Qd@$y8zyB8-L4_mMlhQh>mWySmL`7<%J7`V91 zT5`#q4OKJvvz9%;sn%M&%H+KChf~dXkFXl}r6Ag!C|*%_$VN@ic4<6;8gn>Vbw7ne zX8S{1xNXZ&J}rmurd+kP8ljnwr$+9Q?&vqfne zAGft?fpoz!4p3gOGCvVMpl!(JG7TKU7&Qq?303?H4_UBEmL;hWu0a4CB0-ri)$`>x(yQG zlT+QX9}TNG z$Q96X3x;Lg9&5Ufc$I?a+#vTSSkSY)n*92}q=Z;G?O&>gwM+xk;eU=6kk<0j8Ca65 z#D4IJj=(W>)_x55vdjo25tKp*UB!83z~#S*O}&iw-(9&BAgQaQ=)=F#Vf*T8yNWNR48X-5u<@;;pYj?Qqfg5w%m8EZWZ(O>& zZ4cs+^x94u9hE;)N%qmQMZ~$fhTwCpPs{{SAY6QrBMu3~LNG5KR2GYjsM){ZL`j)l4~jWX-lo z{Et3tq9=FD-ujHYtONO-W^PGg-CTn$Ffb$6LFs#9@A($5>r?IEUoky8Ip=Hw^!)7E*Ci3Amh_ z>Z>L(rorF#0r$ol&ojTRfy;oENZ`}6x<$(lLdw%7N(J^X^C*lbuLKi^0 zlW(ttVFE0~M~XpWHG*SmSUf$W5}U}y0Y+T-7M9zQRB?1%tTFvLDb=5FDDA!Z)4B0Z zL|7P1&|(QpBRxK0VxiP!;lK6WIVz>BUT2dK!mV5(ek^y_ber$Ep*2ldtFyJxR*}g| zywC40kqa+_Pt#5oSpsyE_|J1T-0jOTp^PKGS}UPp`0FpKV>SofP|8sFz@lFpifkN_edqC{c^* zkcxIu`lfRYCR)=jawl*^vPbsBGDj$_$S(I~1iA}2^21=$#pYN29kFd<`+e1JL?I_$ zZ~*ikn4NME0e5OzQ_c-{aKC}sN#C|d+8*2IRjyv#Cwa)sau*)-2emYI2TqCD7^ZHE z>3RAtW<@H$OtA2FhzF{TCK&%i=s-(ohS`2-V~|mZ{ExKSRfa_+(Kc0X3@f)OJRt_E zr^xSAHxmp^L#_XmA#*>LPMAeEH#35uCu5%oER_mQK8lDRkW-L-!~!<fe!I#ms=76$g|*#tI^Y;4w~?f7y3g4W4OB#8Z8mM4tYmGS zoQ;HLTg1PWjsrTVVk7x6;t=2JjUSWqgv1hf$5FqsjRbV`Yf6=u1#YuChF3(?@}T2(0yDYJ5UN!_m0o4Rezt zTkaSvt24)~UdAcCC0pMZw;}w3d&^b}q=gs)BM>wy!}SIz;EnhE5u0$$%S6Vy@^k6zkqppggtmn?-rWx zarUB7Sx%fxB5&5fDW|cvbH8LjeV_y9hDqN#g2~2g?2}l+Pt5tn{t1sx_z3<-oba|*MlWWCBdUp-m93W$Zem`h$v!A2W zaOCWW8p}se65rQGdExbQ4^_#$NTZAiPV!J~8Xe1*+2{NZF@{CnyKD6{TaQTc~Izo>|zpX-M0&yt6XQ zV)$04qLj!viji`tJP1o2D>WE?enr%CgMCzUIKX(s4VovM??`zZpU0D)aNi4s(Mo2< zh1;sXMXm7=b}7BqmhQk7uj6Bs!4@|SIZe6Y3PFbf^b~OfZ@6vU^mVl|1gAXGk8q88?`t4x=y^?>{G3F+ZqP0qqt@>R#uLs&OB0i z|DLXK<)oV)Lbs)F+Gpj+Qg_@8jcb^$eW=AQ|LTPp=`!P|=ooRUCx&Ke+*Gj%C75s1qzx}!HdOdtt65e(74<}t0dW)h)l9kc!SsR3;y{ja|e=JK6Hf_ z{BR$J+fcd-eLfoOP~kYDJGS1mCwi8f_+{zzEYQmVCQnr|5;GFt?+q5tH#o+1?g_M% zQBOQWm|1;mdep_U-43KU*YI+aX3U$Sh>Mndn=Qx zp*XE4ha0LKou~~}ommY~&8w9}9fKGzA|yK;?&}oVyYUx*>Q3 zYFXvnuvP21x~@16b$w5tLV2{iwnClfE)^Z+u{C>yaE^s%5sJ|>U~DjLi*}u@1s8lw z!M_fY6_1!P4{W-5hFM11Z|#Vphc(3uUmLTt-$5JGUVTd&+mc$tR02Ax!r4u^L!U0p zAO;Nvn}V>iuqR1%1>`NP0xy?lO}hEsuQ_LZKI!Hn2rqv2ZY`mdUj@%Lh+A^aVFUU= zl7ePtdj}gHP?eIfLNe1p%VWM#bNm-W;-J4K_Av#KTW3!%DeD$gUBbXkm9LdPr0-YHmY9 zAktNVA8EVUtMlx@T#{#OcqXovbvgdXD=koBas#345C$FXCkcv}#(W*%?rra;EX_GiEFI$cF zTAvpN(MbIkz`I>fJKmHk!qnOOD6aZ781T6iSE*VBuR+}6U5NW}Ag5s++li(LT+cmYATa+ZDMWL zr%AE8ytFZMuPAX#2;gZ#}rkMm%=ijU%%%3+>Lq(nlQ+m8qZr*rr(_~~2cAtx>> zePTnkjN$q829x5ft7$~XITdsY_&5nGb8WdINCl=NZ zC_f^XuU};E5Toyxo>dnb9B*!*+MdKCTsN?W3J{A<+2ex^$;d6zP)7u4*L5nG%S8;gpWVU0Ev1yR(aRm^u3hdae{7qnuEK+_&QPB)MVnTlX8V zx7rRnuXW(KG3UN7ChssZ&-hbFY~88IsPihMPM}ByI@&#O1I896uyJc`Ccg@aCo0r` z-On?y@<~`o*YYo4MU+gqj@}3(axO_S~G+BuorwPaw_8aw>zB)$6 z2aR;?Ie+{1I%C43-WviwJ3$r(XLqG2$P#z=a)D;<%;>ECyEPsSxg9GZ$0V_e>icv( zf?^Das&qoi$&?W%2@T!2UI)^G};A3H%9uYg+6P(8i1@ln1G)KN+^VB6CePDX9? z(3d1hNDyd=X+BQv0TV8Z9)J*oG>udJ*8VSLH#0kJ?;q}gaD4Sa9C)WME23M$1j#ZLI_q1?z8*8`po^6N}t@`V3ZRDq0&TcgP z+46ihuBujc<_jMH0P0_)tQw8F9rox!FY7(=Up|uVw9FSBcLYc$?|A7ESfW zjg$FsCj8swP6kuG)t%q>Sv&lX4~;0$rYq=&(yMA{`{d)XxqO4>hqb%?(U{|!Aufdg z4@cBPTE`k}?iF{~tGdbc(a*oq{nL~ugrQq_=^vY^Mfw{e(zXkA&TBGW9wOrnz79?2 zhIgUtcYU0yJfFlxAsdQJ)-oqAv-jgh*AIM1aSy8x^`xSe#BZAYHh;6dhOL94bU9cF zN~ORFOMZ(n9c?|t28S&rj@}8YM>+kMl<=mrG7lSHv7HauhT_z{t<=;Y4sNs6vGA`H z8(2^NFg-3LTRuvn?M~EgA3>J=t)QVSl(@lO3ZKT~mb-+kfD=WKo8H^F)-mou7ls!< z2u8B(JE%ghH)+I+%K-K0lRMn5m;8H>8y$_N_)M9-~MleD{b^5 zq+QOG-!X(*S##g1emVEA&WwqA95Sp`)!!%&0w#E4rE|Zx(yPU)vje~Gw=f<80w-&-d2&u$7Xt?#U{7&6P9*ty1%?&D8k(i9#N{giRYzwru9n0xA+Mq5LYP<5v zeQpgKqJCz8Azjji0&m#M{ZPCZ!5S_RoUkG4z!E`^0kbSlibdO@UCTK=jtMJ{n+bKX zGoHrEpck|-M4;A$UK?*4mR*AwEV0hO=?#CVfJ>BsnIIVYKsIN(Xpou*RNMp=g*y6{ z$g(@yVvalH*EC$8QE(`Rj7FY5hkjY+R;6{hG>^fCh`r-3DTA+cWCm;tP2df3mKjc) zOE3GpNBy|mMpcW|?j4!Q10v>_o|sk@9xDZIqf=P!3JAK(CebDo0f&RYs!GivpeIqq z9b9PpFhAJA?plV^D%Hko`FG34_Nhz*!OGD367rO9I+UU$(%1046m2iC)Au3G?E<0M zi#o6b#Kqby;`a$U?$Nf$Z-}xlK;A9{=jlopK~va3=dAfE#`Cd4!rCM(q!+63^+3NcbDATKWvR1n$ci3rY^XD@ zOF3~(smhkXQumOHmPcc179susW~%K$l6VZH=e%D0)+BxoytI)5{{3#!hH9?4daARH zr>~%crm9IKKe^k(|CgJoMEtW^7V|g)2LRI` z=6hJ@``-Np$NVHgWm&=XRn@uHpwl?U#VL*#A<{3QG*gOge4DcT5@Rr1AIpJe3v-l! z*@NqTVxINB>6}9^-0(Y~$nj1H#3kc~105}2t=GERKD5%wZ^QV`JT zT?4^UW9I{(lL6RNsVc)5&wedZsrP(j#pZL3pl5ZTFQ?!AvvaphP5W&RX)6?O>Hdtk zeqz}Lk0{f1k(IWuKTf-@scd|;rowI5U+od6UQ14SxJ22Xc+wzR-hMm^jto*rTk`{v zBWx8uAn<|BSuB&A7nwyjHz+{v3E^fobmDcB3`ro+lKL{fyU6j0UL{RFCgk3+@Y${t zBzbsmE?wDPQ;0!;mz;N#$t3J<|8`0idG*G$^$pRm{n8uXj&sdfX@4@i%nJab8iiZ^}g#ZRp+wmeiI5#_z&Xm?+R zlIys*dmFOw^#JjnA|qfeJ{|2QW)&8ji=cR(>`S}OwF7;f1#lWE8Ki<>T!&6hAYj81 zW&#i>t3f2ljbCK>y`cQFp=Z(_B6Xy7F}MI8tj4uK4T%zA?Lfnfqe_taSrW%WNk|Z+ zF|&*{#btkO+XzE2V3Rps4RHKEm~l8Zd~-rhl$i?YjAZuvM33a$`Vg}17BQOFnl?Xg zx8l9MIjCn(Q1ST@y%BL+CYtlofS7cODNqBeE(}H1s;#HF-7Q@n8|3COYcsr(Yh|Np zs+iWXr=9+Dm0sq6f371jT*AYSkeH>kc_YG+s9R(}l-lx2_v=zbU)+h=oZ+a0wVIb( zwY+1p4wz8pX3>b~O!vafTf!3RP2XZ~Vcq2#$ScQ_X8I%BI?V@bDF3>>y(+h{(&TDE zn*}%W(sBtSMV9)&EeNBOHZ|=_q}h&Yi*&FVD+L!k>Sc^&ubi%~BC`!ceQds(YDBVq zNL1)pD}qk-^Q~v6+V4WcdnMQG!~X92?*p#?r8&};HBU^u=y^k*_n}XoS=QsMaOh5e zIUH;Ua$LnowiTam%Zwopii!lnBfKHf_z z{R}q;!u!hUhOc?GcZ=HbpL5K8SDR-tnOV+@6$95bL3**wZumtQ!?4GK+xEmJh4iL7 zYagM6=Sq|i%+AArjt`2uF*|R|{M1JSLO{$S%3`Gs0fkm&$)2gO)S_BsxNaF2bNYEe zc3vWy32=V#_{4&pf_BTcHvUPg-nw0zs;~EbHH=I>cx(4p1WD~WvoOV}b&xdKeKM|4 zdX8UBBTe>CJ0KNu_D6P9;q>pPjd_wlQ*fDW*!M2Plv~L!KaTPwC7r#4;v7&-O&U}?$Ue`R}T*(PU*4R5U!9vi$#+;aqF%A>t6+c9(kQBHXRuy1YWl(RdV?923UemB-aftr<*)# z41SG6jqt~~s-G!YB=`->g<=(=xZP$yfwm=dlP#<3XhvyE0z2AU$9ve9!D!nV zyzEqcs|A+&lypF^=suHKvtcIkRXNlk8*HzEM?spaPdW_h+b?M47>H1aYeXMUb!103BC{+$?@{ zmC&2wy*o33&`XwcE2?HxzS`k+ksHnknvl#B`UF>=nv%gi+V1X_?lA|H*h)_?gmag3 z@y|X|?orQWHGk|>W=kWE0YQnjlG@jTaHLmS1THvudp>Uj&%Kh&9 zx7XF_qRxi^soYgF0U5Zn{8@FUW8yT**Z+ewAD3k>`+|A0wA@C--6{C^gY=)t>3VX* zv|Gl!f0diP|LBQY0Qe+2Yvr5W30nN7BYv*jsiU9!(TJAsROB%^#&HOKeFr2c6@@Ca>U)?FksPDsdz0Dz4e`GmR~;sy0;-Fl{3uHg%2WVdox5< zc##*DhFnzU74Msphw=v+mD#C^e_i9iX=evDiK`#PBb`T*HhK>84k3;!hS6Rm)AR{&l|0LJOivBj|X zf%_+Qo=pS^r$-?@tyl?8_cj}6;7E~{m^wh!NOZkc|Bbwt-tkWZ=&=~7OzWP<1E)z+=dS;CcfOJgN(mBz6l+kz^2wpYt#@ zJgja<{FBrEqhuQ%B=|4!b6cH)4WP#U+Ut|x? zLp9vOUNsnE2HI#Ra<1ECdhptuX#b>~QCH**9W%^CX?5 zUEVS6yu|dZn=a5Y*lCz>A-m4UWzg#?rq?q>+9Q({o9jemOvH1uBMI`(_k1n7T7up9 zwz6BL<&L;&yY@0g4W+yHbr z!JfUV4>%s;R!t3o3;6ks>DZloNb;X$RyK81Ne##2*UT(Yx1ha}%RL86wY>hVtvOfb zMx85et?NC4rh#oDgvOjhH;TK68>hVownD1CFVaok0Yl%lw@|c&17m`@^q%4LnF8fC zO~{Tk>-tt?<%|bktlIh(5pgbGGL#!K9gcqNV4W|&RPl2bgx8$Dbqo~eMk|SlcgvzG z-VqU&ihei5bG8SjJUin7lNB+0kng%N9ljMcIgdGkJYb#DUsjL%zFPhleJ>PSX9<=G zm~&;NA9Pp0E-V)qHk^z3ID%*5J@F}jo>{F`?Fo(Nd za#75@jFuz>1Dl6%CcA-ym&~|6q&Fd(WMJ%g{_{60`_R&?GP_pXN_NAufFWzoIq9}4 z_46{btjp*r;{bk)l@r`E$wXIUHIrgN$E9D*H2gy+nR~xZV*-*c_&|mE)`S~(Q45Z< zyh{6wFMD7L=+RztY-;wog12Jp5sSZ;mMZD$Is_v;sTc^|%EM{Z!btdcg@Dnfz{l0K zvCQ8TOfs|{8{O6oGfIGhqI0r9nkik`2F3*<^cDn~z=rsX@+@emg&*L}%2&OP>+f*L zeeLn&HfqjnU-2h+ktQcV)ZOL58{Z``$bAeLtGYV7@I25n8J-bqBQ`nfX*8Tc^I5Ie z!V8aWV>udL^ef%C3K(bx88n;Zbc}fhW;U|oqxY%DXLPo-Xh+U_fXPnQ^7yD&^pTlG zwe-x%(JA_Ibxd!2!M$H^;cW>02&xMSmZV+!Xd#Ph^dBLz8Q1l2KdyGm;^cYdW@XyI zG7sa_d2-NLOI1#4vwj6}^(Mr{obKH6xGDc^8)Ksa;eU~9)NYjN2SoqpjjyXQvwutL zLgTNli&%(dL$uA_$0fxP09})1u%4QNBNSRiThg^YqdExvq9;VXkkh$^2l-Q2UF*JE(9MD7Y(@DUFrlrg&$IoF{A^b?s8Vd@5TF9)L^VYlWnYzV zXQ7(j+kqcO@|NpN?C)gfYPZs~qF$hEJa#n;;YCiP)}RC~c|XC~?SxzIBWv2-Rdyzb zJey~uV1oxaR?p^w3e#j2YWAl2JpoL%X^2sf9LZOKEJT4!+!M@)(@|;WUy=`z)YfAv z)+cFZ7#9C(!r7f!>Dlt{T{CSct5E~9e>sZ|D_)wOs>BDM+ETks1>7dTC53G)+#*b| zu70vhxN({enpVH(P;vEm@;e4iOl9~~z2EI_W%51Fd!YugZ-)s;e6k`G8uMz*0;XGD@PTfpVTYbR_u3zE9yqXgNQLA&0_()|mU zbs`Y&QS4e@eYL@&<(a;FlhE$B;N`7yz;mJgqsQkOYC>wtmr#y>l7l9l#1x`z#7$^HfKt%(NBck7r@WT%PQIidf1w25OHxQLBx$_CXtPGw9vB z*%=#`0bBpktLP(&P#kZHp*8NVN>`T|p!xGfQDt&?v4P7uO)K4yU0Ww~i`&!svpzV= zm%@S(`UwcJ>#{3f-uHv7_Jb%{jVFku#qS*;`wZ^~YZa+T=grY43Vpk;7uw z?q8YLa3|BuxScu)?o&0B%$OZZft$+9KGt1L<5tl=uN1Kr_>d*a7)Db^s){Y|-YeSo z8(V*O!n*y0!m)sxoqx)6cNa_Q-h_m~s1K{_MxDAirNAU2YPVl-VY;SL!w1X@QQtrR zbQ8H89q-mDG}_A5$8@_wyc*;?3D8!w_u$DUQ9j=?Bh|`yV18APcV8iRDG!c+LQZJ$ zit$d^|970i&bGVA_4Qb65ImGW(o!1;*JhAAxtZ9{xZyx7;evx8i}ZP;4j8Q;V~F8> zkqRb-GwGZK|KL^$+!^=n^yF8}Y1F5s^27aVs1(dRm28jLL zc<4VT^!Z(rEfI#(5p3*Fk?X_7^wL`1vTYWc@t)w7aHDlvJSwH(a&>{?nKA!Gw%$k1 z2*i9|k5HI4=Ia}hIO#Um5OPW8>?@6mT~B9M8&&RUvdgW*U>Se@`gb?)p{h5+3cdy* zOBS-@%;>T@T$wk%ov#9x62)#!N0mJaE7eeObdS#u(%T)f()EAn3-K-Qf=})lJ=CaK zyApkbX(NHK|FU2#`}hLf;%vTGux!oLoZp2v+Tl`6r*b+>7R^#PhT*_rrbX6bQhqa# z+V8KDmIy_jQTnL**_{B!a2Js4%+!GOr+lPj4>dk@$T2Kvp93zt5X^tz--m*A?rXqV zweWbFad|=3XO+C;{^K!sZd&hT>~QV*u4m<0x!dckh_%k~Hwr(6V|zK&;n=>^@qF{* zB;k~Ug4)7I(@&nx5>irym)8NNJIg_fhKqVzZvseDOtYU4TXyt;4#vymvjzSf2E(lg zKNuo8hOs){_7fkJ)u=n05gsE{GM>q{{f9VSiUld~Zwu$@+Yzu0!&?qa^q#|af4)>BDB_Z4)~?uHvMZ4M1P zsolFvEki%8EU}US^2?_0ea_h_fGo6vFB)_kb0%czk+QlEf+NJwpoZQi<(s)%om@}C zG?DgC@JgppC%ZJ~X-rxYx>JRPymGwFqdW7ZvDtlF;nMhcxAZINDL#KZePM3DeAI~? z4+6C-)`7r23rK-$Mw6kPrzc$@LkAQ9msR!NE)PXdz}Y-nc2NTR42V-?7hJ>W(qks1 zBAfO2t(08CDcTBo|NORFJT&8LOPXw(MdMkIV#(kYxD}zZ3DxmQJKz`Zd%8u%)tmQz z;v#{m8gmDbEYiS*&QJxi-*T(bs5bk@#qB2K1&Bewwn1tgs`FKtVNW+a7%6Rgf1Y8p zuv&VsBr7yd2B~O#Ekm35hNNu8tPrBQv4euo3SZB_7z0ACsX+mo5CrdU!K0Hju%yqF z`arlodD}BF?E+-9!k|B4G&HL$Bm+`^!^S9g)^d=^Tp8yrCz3H=$1LbHZP$jX&&Cpz zaQI5-y?Z<~^P{6on!}?Q#}HPtt9oG$c$m(Jp5^YB&}Qyicw<}>Jn2wtB0;|4&bTmh z6_T#e#uf-w)Sx+kC(-mz8aJURU(Eo(Z<Yq#yH`3RV52QSST&R^3Cm#by?U6XMpE9!4p=c)#dU9W-3w z$BK$Q%#C{lcdpqE{)u`uydLTt70Eu@^1h6GF?o;Y>r9}Ej*^Tf%?JV2xw?B?hA9Ll zV2?Q7_5-Y?IN{rg>FFNx?mulMV+9!nehVq#C*QR;ExG^q%zFauEenzu5w<+oSnFt6 z?vLUv<^~^<_$8L*(B!f|Gs1JtRdTeld6^Iv$iEYtND=WMm1%ap3B@Vsr4D6i{hRhU zFQ~d%uHSt?&%NBlTgf}v@?F%UvBToBpm&Y&cT@w}JWW8Fj!(UyAlYq~9Ux%7R+>_f z7czR`xZbgwW4Dw|aZWW2P^{-~X%)5Cp$O6zDIXR0+eBKGnu6`jGrK$(Ibo1!tFqWf zEca*jHGf4jGoNzi9$VI1L9OBHsB#&^J2kLO;fXjC20p-A_xAUEpd*k|ik$M$9-YNG z`WJuQ-PXL#3h|R@0`}py_q%t-iwPh9J?`#Lx1@?yyR`4GUE|}`o>sEokS|H>pUwhP z(g*|9Dx$HT!Ca3e&X8?K$DhuWT0rP2Tl(Jt&#lI^|Ib^WQTot29%=x6aBv9EX!ihlXbUIf?S5 zBE1!0(v>C8Qfv&GeTZ|CSw@>)?;+;kCADL+0=&<|{2`+I{`HqZYyAr<*5XEphGd z7rV=7J@Ia-mLmg~WzIX0oEA>1(dzFCkhSNYqgc9!M(U^zsX0=li+wJ7?(M19b1PF| z>6;k1j08ObXX2l%!5;2yYzQXsh6^_O$&u#}8iv$_3E;_@U3=fSxDmo<`lsM@Cx(iG zTWN7;Wo=DBw%uDu8^)5x?>^_=8k7FIv&Pr->MJ4ag3zqN^Eg+&&7=$H6L>*Wa!6&s6DGfx4Zsv-Q5Eqqloy+irLMp_lxn zsxz`H`xtg>aKIumBgV*CBY5zqDf6f1yw~BaB;--FQde2&jBT~1Ds>A$hFNK1ggYj? z=L+h#p7*xqL9x#@LXqiTd`ds;l=Cj}s+JBx01Gkd*hc8aXrcqyj>d)$KfBW)7PQ$f z*xr)bd^SvFgSS#Q%8)3jP%E2*yFr*`DTuLmM8_UjRaY)hAdFm|1z{Cph(C72ZVlLX zx0pL1V7WO`>f6l-s~5})MhAef`15>U*|sQ~J9(Eoh3TxD;JGhqLu+1F%1?21p>4}@ z28s?#C!EJ90O+W$kE}-l8o1GDAGI}5&3X5&u^wF#qN+8RmE#wsc6Sr`GT?Q}A8B}L zMa<0a|9(^ha#%_M@NMq&)G~0YSngSd^RC`apU^FkUHv97auD7}6rXxuq#(v?bhK}; zn%<|lvIMH2!NSh<()~^tIEAB^ zYqB1}?f5foE9uqZ!-W%$jxufO=IdQJO=4mzlXQD)XSezVx35ss@5}-^u9(;g;KV+92b6VA;F!BZ2+IK7 znHyrplv(?WfIr(B#eH#S*8{?v*D0;p+f`7+>C}#qE^`NR;ym-dS(usV>;3`swUdF^ z77xSihAoU7@$7gh*=;7xdYKfw)}~TUTVW@pS0(jy8V`;_pMDWSR(`BaFkbqY`Zd4OC$&v2 zpYhO;KI;jizpHyiH%qcWze;IVMc;jSAvPT#M=A*q*8e2jx#^p4X0^0MHFu9kS+b8I z07O{>KI0$d*$)D;I)j%Zq%PK_Fql_L z-y<^ocd!hi;1hsy=UI{JJ~5MQCzS_hp4Ado`zf|O@GYDF0Yvg0J1lhlT<%7DTrhCr zgLIdsKZim3{v7GpMQ@3_?$6p`J>xecD4*+m!d*Rw5_2JGt`>fc39@3q8mJ_&d1R2B zMp`rCC&raZkcv9!BUybWHs}nFGhjn~U!o0`5u*86pU_a=KNT90q>3Vg+6zRMP^SA= zf;sJ>!pDJjgpZn1<3uRd4* zdy+jPwJOv26<3ubD_=mFz8p@qf%i-UvP z&xDqq-Tw6PmI2ArzB_+0u=74Qn4;&_ny4u@Lv+;I>yLNK9b|}w&TQp%NeBI*^Z5KI zD_78~?M|ARc5!pyWJoT`C@6!G6KvL6S&KR(adKm8EYE2{C3ne2fdRD9$10OJ6-m-P zzIubgazAi{uJt5{FPGayPP((Jtij&eUM#7U?9ySJXoX^XMzm4tZle9U!#$06znx-$ zQeaAKbiWz>7<2ArT?%^y_(IO3Ca70su%yID)M<7c`yJl&$-j;*wN2_v*Wk+BI@ayq{>e*xzbGX&QY-IN8VAmwP$bqgv zzO}bYB!7F}mQt?uX2%jd;%YAcGACG$jU_o>M_aG5e;Sh+(W_h3M|?T0iY5MeXFmHG z0Wb&9$T%@3a2dd=$MI&6vwSnvm^~}KaW`JYgG@v~ErzoS#ytHqc05qN8r0L)D4jOcctLPf~vRz^Uay!&-LiLDxYh zD`q4i%21b)+2<0J$gS4&lRgCnn@j zU%m*>>y{c@*Y=uqXCG7NA6=|+TH~*`oQEhCEg_hRS9!x94NwgcM>S0Fr(s>*JItwU zZS-fIuek1*GwyyvrDD*#ZUR*Kqi?$kOm<)75w_SG2EL6$m|AIg_iq1h&Ia@RwkVMA( z89y@3dX;17|0}NQmx=QD(5IWsPsXWl#ZqemOrK_5OperkEh2+ztv{%hEFzbv<>t10 zB~9z%()gRV;~291wEB&7p%?*o*O}KVsUpC|N3JgzN{^6z%D<79etOJkIzU`VkPL7f z{ZxnFXDos-Z#!jus>(>6ounKClt+4sLIB>p&3tIQ@53Ek!P5HZ&3!8d>LJC+uO5Y1 zw{XY!GnlUdb+x42YbJmRv+q3ox4fFTA~H7A%I$p6+TdKiT1+@6Ui-a3K)z*~5!QHD?rOLomxZ4P3Ldjv=D!$!MTj=t2o1l{_PpJ$}EeqB#%#wSt+Y}X6} z(b%Ft&o2;nUDN2)XF-QufnX@Qdh0|xE$E%q+(%;;E#r!z*z2O9%e`PuQe3BZy9WsA zIYy&JhO)L$2`4A_BDDViwC{LBe&Ce^5v`eYpUK@2h`Kf~kM}<+QG&m-TR85d2Mm5( z@QLJ%o4a?deZ%@J(Dow+_?=W!VJ{qqLJ@;bzN0Y0Y3wH6C8w!Sz+cq9*w!Qt&L}!% z6{60ntx`=Ej3fJ>eM(KXFST9Qun#-WZq} zsiGVO>H{X3?S%^J%Qc$~rx$~^M2|7wCwri+aY>u#5iu+_wFEKSP_UV2{J3}d3?|2f zY8im53~0ylb@Gzks~#{Gnv?7vmc)Kytg66)9VgQ>rlYVw_O@txyaz3x2G7lo45Ljr zqFGR1r>fyp(%HLEB6lGlH}QFTC_Ayrn<|QKm@{q|AJx~FF87_xftmW~e+qHX`(kWU z*JaijsO&I1b0ux%?lb{nm zP#9BN*8tex%O9z{Sp6=oR&x+Y%E+*q$KG3z|DkI#tsU&Qc9FLn1se3)eObnDI@~5v z^q+VI`YNPc8*E3~hqoKstX_4i;scPM5cS)eE%|^dgF?$d`L!NXS4BfF{{0I^z-UbA zqH%%-DJ5#BG%4<&&nRQA^fcw7A@$u^FD1y{M;LtX(|hloc(-r7i5dyBGTw4WOI+Cx z0c0MWP>QR%e-|(@BE4l!MT>1D4k7zR>HGAal_rYum+8LP5?kfalk%)WqbcLh<6Zj- z$kDTr`(BIKfk#d;2Rkh2bIeMSosO|%|AKQc4|FzkpcNhQ=Tn8&D`yhSGBEWy3!3{_ z?{vQ%eg5>>);3>%A$kX%=9iWedQqY8=v5ctVM&EB;&|?6B^W0f`Wok?H~(XD3z-4Y zKPX|FQiu)$=KQo6j6nF|If0dNzgbjtI@e*^cfr$SKKopLw8>X>=mcN?wMeHJ{!pBJ zKr%p}k+F`QamH05Fy9Vs92+@9s$2NFSVlL%Lz)y7Oz@CnxLXdv z%bht8J%~rstDldL4;_Shkk%?W22jyMVw;eIx7T}NbL4pZ*HChuMJM2*j0_2s z+tGPe+e@BxunV4WPsjpxxIBN{^l~c@ekM&jL*nayf1{20f{#mnscU{{f7xA$N5i>q zHjRYc>ma3cZ=$zNW5Z+4=z?yU7wm0Paq#u;QK>IcZ^jL`GyHCx>avK`6haQ2#lkg$ z{3ADrv(f8hhZgvwcm7Kz?Gqu3pH^vd{Wr?u`Bp@{dPu{)KXIVSdAP5+)1OVb&Ln+@ zeOGkCo3}THD2T^Z;nZUE8Uhz-{~C4SxsU)?_<>0X=S5>W_xFAK_SP*gUAQ^!_l@q3 z+4XQdWGt_4aq|__s$kCge#fgCPYFS1uP8+FlKHP$@(8KP9Y4)t)$Nz*ql-9a`Nz>ismls{5nwfyz0{e*zn`q@b*EIFq>yj*LIwPw(C)@xIF=e8PRuMr2N0GMpJj=kP3&^n|0 zB9Qc?ZbU)WWQ?B_@>qVznWRon0GthO!Zb_{TCc1EM*3r+!6H&C;!&#&eyrf~q_lvE z2Ob-AiGZbdb8`%%3|fpfh^94wzkR@M*@(AP4uqVD(VBMwsbSQD7ig0eI#FH!g68|9 zC)l*M3Re;Hh+mZ#Mk)#gcx=a|BK6mf0Gyo6{2D%~_;=~}cc4?yaF-|o)M-FEurS9s zl`z0oRi?9pVU^=qD=88XknL-$IB~}5Zn09==+X#rumXi3612I3|1=4^lL6eowKtng z0D!{2B{L-Ixi&yJbW2raKrseQagW+o$bw8@N%2d0z;c(&&<5QZUEJCD3INcqO0k_! zD~=5-;6WjvxkvEom`D?rG@^)4~mH=?TO3s#y;d_1JCpl zRr3)GmIa!jB!u>SbsTEm{laTWB0hjoX_N9FsqvhV=M1OAQv0`VIN9+|O9>1d5^AC5 zgX7qT@_-!dYTXK^)87JIwVcEu+OOC4SGrxe^%S99u%ac%c`UO@?L8T9LHsZ z)J_r!BiNx~TX#h9AE8j!!Z#p?)Vn0VLJ<;EvYOB(9z%BAZA+LB%pb=NxyYKV5dFj> zVimo#-ju9otW~{kI4DEkkCXkG4gN$Z)L|@7j(*ih>Io;c%g9w-Tx=ISz(yrLnvOuu zH0W>i227;n3cc)v*id*Q<(^RnmRYCMV%5BqFL)uiG2(sJx&Kz8qBHAPlKzf!s|^(4 z?-DmhJKFnwq?~vgE1*pgc|#?B4zb)4R{awIJj(p6+4W3s?nd{$$Z#aBEOyaJYMzT| z-i;%Ec+2doxIFlLR%oVx9pS6@b~QM5vO&kwT8tYm{t0O9eD&8-REkpw$J#CEp;N}p zS;Hq9>nibX3ckL$lum}SB$lz3i&%fjU(c#g05q79ZfqZMy1_~oNbg|fBGP{y`z&z}j*SZuPT`+9vAtaJ{D+knhFqYpi=CcAf@w=v$hnIO~JW$$1FCZv|W`v~Ov zga=hl4E>yE+An(*DR^6D!M*H#Q0FZgs{(79sEY?DK>k};867vGcyo!&b}w51DD>E{y5X2d3&L zB0I-q^jt5Vo@9ukDhp9jrS~Upx&twDCg{+mK#J}H8)(?c>fyD*hOK`;sXkY##i)>_<8N?CDr2@Q<@$^<+F0>9mG=fyed5 zxm6_XCZ(;tOZmDp>;hGecY{V5L%A(4zaA1p_j^UYPcld}8o;iXE`o^vb1OM1?AL)l!>xI2`XWq33hmyEXImvc=^-ixiggS7jwh^b2u;F z6vH0+dI#9hd%V5Ng#TpjqNIrrH>uDNaj`V+x2FXGJxY>o2=S4K8_ z1=Y2Yy-$RMuv)R8HHS-ip<{197$Q&-l9;N)4Y-i@!n$>FlYb&`a-&A}0}!hB=-PM7XOf<1+{*&E>`m)gHE% z=_&WbHtwHGimbC+h00Uc~w=910hfm*+;n8?EBF-mYVNHLQ#wn3i`yo;1Tpvd3yR+Hv+y1)7 za@cyO07wSq=bi#Set;MiI&DnV{QbHH!cXU^Y@2pu!Ni%oeZUR6CZZPoDM!09Y#=)J%?#PGy$F=_eh^&*L(uHc!; zPt;>`b8b}&+l!6R1=`nNhJ2FEwCOOTsN9e>%W5DSh?xhx&MlN_-}VoI!mtPcbjV-; zTt?d#SI(s~8Zv~qFW@}Na+Gh)lBpckOTHl(iZYGobm3y_!&lnc%s*z}y-;M>!Ixb* zz-iHf$&G}$SX6H!t*If|{9&ui$C08;@imM+(YoHqZA<|(E56v99|gogD3gkWw=Job zrWl}|+$h7}_3JM9XkxppduX&yy-Ks9k+FY%G&cnxT!kq%hOfuhBSL}j2;q5`56acU z7%AtnK^9}jtr~Y3h%~w;rP|u0Iu`G}C;-Pvsx=auD92JS+kb#53G6D{lv&@uS{s+N z1e!K3Rq?3k?RbM55+f_m1fQ(e}2z6p7AG+p*>UwM7 zrC;QV3V(M0+xJnbQ(GPvTj}o2w#l*d?8%$WhmYMSEMID zT7`U?;k`$4=?qyGPn#w27zaO@LQba`hWZ@6nqb<#MytKfQ2b{udaKd*0_VyqiMZ29#wx#k`mHs1;ZG_utj-r6tPqAAZAl>EM>uxBOV)vA+Yau9S+V%dPKvn@N$I zw+=FuL&wk)cZ!79vNE~7a?T&wSd|3T_n;_=35_`=)mn=oH+X{d+RCw8% z2I>9w)_6HixAepc{gtj~Nmi1oPfFV2&T2&9X!Ct+L=t>_*+Nj2-fu3ssdebO;Lb_o z>k6y6=`O#Xl%F3@j64%LIa-nT)l7SnnDMOy=5nt7+P=m}qD(%&_%9kcYO@c!J>3%L zc8zHJ7rRDVxk*??r>8XuJf!XGI_yGfwE@jj<78y|r3J~KGMC_KPLo~5b-Qb8W!k7X zNGh4-{|;Xk&6#bJD4&B9{mWN#tEsl2NUb$YzoDJx2m@9 z9t|+5H5jzd342IX)I*6bLmpgD7ht9Lv;Y*yNjhFaFmNVBfkL5qEk%4sXr0TIK%Qn_oxQr%+qe~PRal=x|x=w=>3 zsp)1W<2l@JvGddi-pyP^33I*7zs$4~DsWt;d&4vtpZI6;{?fPj?CO{!p&X*#!vWi4 z7zuX5nwQl4Oz9f?I$Y`1f04EirDWQUr)%yv_)~mdOWf74;reV-WI<+o$K%1b!oR24 z=4e06tTkNIPGLR0I0*hz?tk}1H{=0``!&0LQuVah@mZWkXU|8B$A!?27S$;|!iDfd zo}ik8ySWj*!Ut7fah5|-nB1|5M+=JaLv3%iVL0QPxrDe+SrKIWZXj%dEH}l3A+tAO zt0R@%<;@~LAg+%z2S{`&X?I3{uM`1bYw(JZ==mHVAjAceA@lW4qlH9*Ew891oQxatKvpI22?OasEzlupN`f5TUHu`ZH|V*dRKQ=x#sNa_bx z1J9zmy-+uzW!)ih{*$$pFN@lwlM1h8l--8Ur0WoT0==TmwAZeSBCm9-=oQ7u>S^1D z83m+QLIX&`MTRJ{RW#7&?RV#8`!$NACgKeMuU-Lz^)5Jq4DwiR9|AlQN6k0;K`urZ zKy^F;MNqSEL02-Lhj2KHQuJ4i*;T6_!8$lUHD5igb#(BxpUsw%&koOXdGM8%iMs-l z3;6il$-keDTCa#frDlDVPP@&(E3EyJdG1Ti=7?TJ7PQA_?Jq&*)nKK3Tn7FsKRp&; zP6&7$V8t!v3NTlj~3>?U-~FkRbiV$jSpt>`g@fMjxLX>e~t1c5REu@|~#Xb<{*?I5jbPz}y7&Jt`C6#5}5w50l` zT(j%G(VX&1>+_^eDc-p7bnH-GExr0>)^qKp@1)Cv!qf9-(wcd^6^gWqwsKhyQZHSaU=Y4nD9Q;3D_7?^U&umkpt4_PlNKGG){iQc1jimG)9`r0v<1n$=Ib! z7$03ESiI-e&b=j%t_Jx;?Z`--ZYacC`HfBLWn%h%%r7oLVWz)w&^Al`xe&SWRhCYm zyLGR!uNW(@q%l(D3`G{l)t%m`S2S%f`Utny7L03NpU;4dFppo8Nx!^%zj|f7^J~D0 z!m_nwT0JQ*)Tm+g9eCHt$y0{P^r-$5A-;J-z329cTFxZiM=4((I+X6&bXK>ciC*(n zH+ete8lea<(Bo}MEupCJ4s2IIR}O@c4b$lmF~-7^!Zw#iGKl2?-%(MRMuWEa=s0;E zTgFU&*P}Yi6Zj)7j(-T|)@l>l$IdXzIyA%r;%DUcb=rmDWR_=Q?V}#Ea*BU|*(nBs zq!P{K9l)mvGY!trnL=>5*Fq@J#Yx3bmk-zA)Cw;c*euS1c{WT;!Dx8XdtTYKb^YaL zawLJL#oSN<*>&b9pNZYUs_)<3WOhRx**Hqc7Edq9r- z7A+^BEf=&4uU@%@#JY47W8zT4D`$a8b>o)Yg&fYE`{%4$dU@KP`K;onj~b`G%;6@k z9dC;kLRe)5py-`P+;t|iX9KyV-%2T36k;?grt?2*Fb>}c_CtKD`As|7GFj5>3nGjevbP{rK+73^X67AOr$u1ckmNB+@a+g~uk>7Ycr+AsNCgH*eePnD{{#yx1nsH${Kh*JwH+2ET)G|UPEHa8M){}p+} zZCU-Gxg7GjqhI~O!LM;)d|LFp^1q3VMn)T{FNn5UP%cB1p%^xHVnrqZ zXbdgrWHojytrb@nrErgGVP@FwYMp~#V9oIqVII#tcuNuqcwoEi!4RHv`-K~7;MP2kuRL06N?qzUq5yd~D>q%%OcaMJ$8SO7D?W{j~epGmti{wH9~ z+yihh)65s#w2VXFxOqQAI^=HjvjskX0y4up*mGFuUwSMWpMhMj)ofwpq8?$O?=%N= zCj%z^YL|QR!oQTbO&~c&?FDdpR^wz-yx|P2F0t_TKYjJf#uXh7KpOskDlc%k7ElO0 zk?wU@6D_cBHTR&mPOUW5C0j_!%HRDQ(D;nOiU{r$wUkJgZI_ee zz-B_HFj~7V`=(y%HbrLpN0?{!nN{s1<;PPfc(6;yGTxHzqPT}Qb+o|5B0Ay(9V;e>`#a5 zH=Aj{D+Zc>h{u^xigbnVkn-$@L8J^&1(v^)+@p&odtcOUhinAi@Ll;GoIzId0ndl> zX~at*ZNOrK8+ci@GqFR%rx_lf+kqQfZSD~H`=Wy3IZjene@DvzJ817)N8nPatKv9* zh{iw9)q)E#qKwRJ@lv}67wnBh*CWt50n1*B6kWxzKcfK&WFD586!%pc1#KMLI`iLO zZ25nczPGLayYvB}*WWILH(b;z6wuqZZ^iDCM=5n>^zo*h%BXWA4cxKjQ=TFp-2UDm zGN%C3xN`Ec!La~`Hq~O4mDbsfvaZD@O-c`CE1Vz@!n{Hn+b zoB|SsEd1#4@IAzM_9f2Eli!&6d_4o5l0du|id~*saifkuXbV>G#B}x^3-EQwT0;fN z1nW}CKw!_{b=}Bw-Mmh7ywt#o|HYLFy);Hw+=adq*Y2%u{q<_{n5u5F`||V6q1D_S zWDeG0i+is2gS1)McxKOTr{hUrXkU_6W~3|qOMjf3PCH#~&elfGQ$rw-|9~2+O(!k* znT`OuoMo0=%NeN2?C~~jc4K@Yv@+zQmk@Pl?6Pw-xhKz50(Z))Y?Bnr@Bxk!#f*>= zS@ln})$^nNK(cJIlI5BIW2n9CEV&H;4#yR&T4BH1UAI=Rk?*gkSru`%NhP z(rb|F-zf51UL6gkpw-6#C&`3|@5^AxC&Q0(~iIoVcwH|_}HUq}3${8#UXNFm zwWUYW3R8epP@@3(n$*}A$mRiHaYUev-9Xo{_A8$UCkNA&0_S`QwkiQNUug-DGUW8J zbW}S1bO*!Xc$^N)rTgdyO-={H1o1{>w#d1Si_aE_vsa9h1+<{tL(wA4b>ckYjq$B6 z%a)3(+-$(;7blA$37bUDaOGuVo^aw+Xp_K-biZrb8qs=lk4U{9ErYm>3L;JL(ls|I zU2d^pT7NF^;Px@{1dWoeiPP$C0)h@dHMQepaZ+=M6rAffVcmxV@BD-vu-v%})C~iE zsPhOq?!hd3yUlzO0Qg-N-uO;nea~ubY;RFviwI8z37Aui9%7Dk?m^Fq>fP-{)3F2Y z?oL0=834ByQSEU<-RmjF!ia=J)&R{KIT{Z!(DU;o=!cfYWBJ<-CkdVMI5l<@piVMQ zt_Fm_{W>?*yen}>Fb3$DaGcs|E@?exIB-0qunCg^Nwwj5m2ieST&l_82D?AJxTAdDeqxQm)XQ*a?V@z^fyO{ChKVpmj;^A6~&jHiD)@MrAcRN*h1VtBAV&AP#%Bvq z<&k$O%=K{eQH|F*wZk@!sZy0~T@nR(%mfBYw-2v>dws}C)n}9y!P_{c5EduhlxFB9 zhEP_I18D&v*(27xnY)3c87yYff~Ez;NnC$zv=c z`JP5301-=0bRV-ug<#dxmmW&6EF6(+pnJb6BW_KG0SvtR^Mnb_Y_&*q3-V1@rxg91 z|JnlCK~95Wx*aI$$0@leqX&FgjKuulE#!jAGtR*w)(2XCqo3hLgS=u&K;{ZXRUh@2 zE9z-BOO~ciYvSH|#MK2Bjv*SxBW++B1KGJ+DtL-BNW!T<0kusTJ}Ne9O8p@lx(O{C zr(HIfeftS3$H;Y2M!Q>Jj>V0mT?#TN-5Sw=m1^lp+mNB-;4?vt=rT!sX5 zjS%cKDm5em-3y%vs+mQdj#?YG-uqEfl;?}xy^;$WW+1Qp#);AP%EE&K<9RDu>t(|V zoKpz`D4hfWFIs7I40Rbr0ladpHo>P;f}~Ys+Wr|x`b>>J0W6^+KP{oxk_!YC|M%@2 z0IM~|RHS1Xg~QSS0I|u6F4#Na z&EVki!_zDli(Q483=!7+BM?)n*c#J+T~F}DYU$e6QPgPM)BC%8V!aVTF91w^bO$m8 zGLL#(P$VpTc{keh+U3+P>0apeH3J1tJsqNAGw#SrEKbdxw^bGoH7r0yi

NcJ$v| zYmp3r@(umLN@@*3Fy1Xl8GLkwG1-Ty*Zbnb9V6 zH^CQ{u*E%+Zo(q1L0ia&cr`|&KbRYjLLmqhdPNR_SRYO3lWJ+AUDE){n5>y|_3*)Uu45O~wp7#mwS7r*GCx>MyT zI~P<_F~s7p$L_2T;Uu=rjn;BNGwG*bOYXtMxSYmGqgd7H&8BgnM-05 z8SHLy`q$GPxS?6%gHVe3KVFKpMt{+NYxuwxHYH1ui@+GFBIO@e3V1;lTf}}=D37Q5 zPUk)ME8Km)c{w=lZ~-hSlarf!`Jv=HyL6A~?8^^+mV_nG-kj#pwIuwb`!Iox>mdJd zjd$hJ*7u@-dg68W2`4CYzX_=HLZj6+I9Nkyt~PPSI{Yw!wFl~$KhQ%(J__Bv4&}WA=Xyf-##>X* z`4+?@{Vlu0w~VI9O3YQ=w_kv%_k^3lQY?%?zM(DJlv5Ux5bU(Oi4yQ?Ggpm4!UOm3 zCRB1mT*qjb4t*`_H`u{)1vN@d&2DUCCQuLyMQYgEP4>wW)^IQc zV>7pu#&{6BEvStVh~2&{Q?Q@VUpGrsw+zWW zQLNCYukJ)3C$7EkUp(;%NRpJsm6K15;M{mK#5AMGC(Hu2R-s5m%Imtp7uadF zP?!Rem3UNWosX8 zzpAFfg|oAScn+7q-@Wn|>G-VPKl`Ex6qiPYGMa zNLF?+&gXT?Ld8y}U{xpr6Oz8cWyYl$N`R7XP_Qw4Ll70aww0y7MSQZ}G{$aWzMcrQ z{>_%F4(qxjp@2 zXjpRTK~SZB9S%p4CveBE(%oht=hL+Aa++GG@!JtD7{D%0Ria-2#82eU@HpJr1uh$ zCS5`ZJ#1;AqaY=OB3()dy@m3w#Al!Lj`N=L^Bd>;@i=slVYu&m-E&>@nsd!H*PJKE z%ez?$KZ82EtzyhoCKq*6=!Z#RU)=oF2HOjV)H7~K-$Usz*Gdw1uld+TogAH+4qN}0 z{`jvQ$aR}Z;`3bs%YlvZ%gT@;S-$K{o!|yUDlW_yY#y`tPAX398aO!LzGAW0byi*Z z^-gqBpKizd(!a>rA+4P{Q)KHlW&e(~vJ?m`1uUiWKmRev%i6xpNaEK{xwy#76m)7u zZ8q}O%(>vv^iahITy~N!&B?51>88?pzvz$c&8@v9Eaygg>ufA;C}p_T`I;ih=yq+h zaLdg3JI+Gc=IHi$g=Sdfe0F8Kx-gD>pS~`j9W^!VH;UJ-pV_DydU4cjbB8TFa|gG! zed|EG_AqVUy@`;OOLf4KO|IC-M$n*?;AXRzX zQ=YXqVD7MJVVBt6CD|S~zbPN>)Q>VIc|+0imHY&?8uZ#cs97*!vt+k=d58t{AdwVx zl`qMV=|L7H!Wjen{7IIhs;$9EyADrG>C?X0bCyxJ&nH>EaBQ|@`R zXHK_5-&ZDXn~qF*CFUu2y^-pFo(hWk*V!F?I|a-`P%Palh^~Hs5a4Aa5vS{NMQduI zjoM7k4gq$12)&FK@5G}5=R;W9@Fb5`J_hCnllPHUsi~=dqD7)&BM-_x|A}el`*kFS zIgB^^dIavz^?Gf$bS{>@>zSNf;)|IeElE;nrV)|y@@PO~<_nuuQIX ztDx_Ua;&(j?k%|Qf`OV!u{FskS4OZYCXS!0B669H=!n>WH{fO&xzr1FO~lE* zyB^hQvoNn6d1?0c0Vl47fFoB$?6oS1Vsx+v88R7uZWiM>n09CCL)TH4Ua&B+g|p_# zCU7-)`_HkPb$l#>k4$c~6Pk{y#NX`%*3D@4)K_Usjw_CEBWne4OIII;$0L>cdPM(r z0nP;fT&w@HnC*FXwU{|3%X^VlX3iRKS%B9}_AVo^Odzb}EtYlWbPJ_Lbw{j~Ja&Im zoI44ovn|q!o&4tumthUe*QKr@t|#2ETn4Zg(!Oo@&`V1(i)Ok>_LAeIBG>Wo#c9NQ z7NVYFtKSVe?E;p{A}E3 zVXq-KP}=AEbUcm|m45i`PU?@+URb0%2MY!_t!eoqI8t=B8S&DVgO%X@czUmi4BrsV zpg~{Hpao^^4EpBoj84Fb>X_?d{9<8fw+Z=^ZYJ&6wKJezc9ZSsboF%E4#+QJ++(fu z<|ufztrvhoKoQ~7_xGU9xx6G*6qB3f)UA6NaK79so#WK)U0EMOef=iCi}YTBbe=-4 zn>;viIK*}V*Rr&}lL5#GR~9iJHNBCCQk?5#86xf##L6U3tf4m}CFhcK<$7~kq$c*R z^10A0#6YK6=ay7WYh&ESiec*IDML+<+`978jsH$?x(Mnx@v6t@^Zp&@d&3Vsddq^0 zrNcXGiane+`?THS523i%K}GT zEz>deDQ`UpV*5V%uGlDUT9dbsyq$Psj8Anozbonhz5d<+ zC+(a!tcJ0h6SMtN>qUxUx$wJJ|KwMgJofzbaN`>~8juO&g=mz$&%B538BN zws^zU62tvQMni0nX>y7z({8H}Z*t5h1vWpmZAlmYy($0J2Qs^EXL~tG%A+hK8mNN! zEH8m=Xd%Fl0JR+TJO+o4{OX-u4Mt0nWOLn8r}hP8_$(jq%Dsb)w%W=~OQlsQ>axd* zTc%OCQO0syR>>!J>x+zaBkP5{#b)Zffb>qQQDC;|2@0;JwL+<0ZiKoLSquNsf0U|6 z*z(5ms%u35y~6*-2n7nm5gEb~Wixfcsjm!_dLBAb>_Lj~fTs*qUf=Lm)Fo||eJ6hU zWu=*L)#B}rXY1DW5W>a04UAj{q*y_HYQLUA(t>HW&yqV2E%}&xYHdf6mq2oBmXA^K zn5IAA%u^SZv-G*Zr}b&6w6+KIrQoP4Sv~~y(9UrggzUYn{ju5x3Ac9kBUTKdKw<{) z$6348G`SWt&70-eme9jdV))1^CMax&l^1vOG@+g`fJA<(Y~D-b8g>CJv(2WSPocoS z^}c=P#B=kyVSXNS3_dY7-?wnP+uoL_()Mj1_WJbp=hg?qCC*39h!BEhEQWl99&Uwg z!{0nLKw;6oF@^&w{3a(!KyK;pNIRQ1yV>VEjlIJ2x> zLm7LZKDF8ItH9u~d-oRIMEsr0maJYe#3HfrcPW@W6oi*>w|bm}E))LU?eRthx4|7F z!vWR2+q129FB9i|)TixFtz)Eornm>8C!Eqe@*x+y-gx|O--}UZJtx>LfoC? z&0C^_d)5XiSGzTCzg8cjHPzHy|pS!M`1rBva8ky(Kmqj!p}j~g3^JlV?AuD6WY zAj2DComCa5wsW?=5`GZ7R3CeJ?EmVfG_{=$2yMem@>4HH|sui@Q$?M-72XaX2a1rGdw3O$DkWK7%WSwdFL+QYT*eN z`MV0u$NvYCV(l&SaPJykC^R;E*4;5>{Hl4~0}D?1`q-k%n(T^}`~rmmSYTp0#QqI0 zp4K(7awy_l{J9;bsiW&_HwFaxPoJdiVodr=BGBs0Ms2j!#U_a$H+mknW|I6 zyz>etdBxZ~xUgO!%7^bMcuS5R^jE_u6{i87x@A;x%f$fe*+K+*F%-)L)KtsBh&{I1 z=W23qzWVs$H#)a*_`-D3Tv!fHI=Qj{73*D7V12eyu-M#Xd{+l$v{m)j2+TW$& z9Fg9W)l!2uxp_Ap#w&@|Peb9EgtZf^Nwz7xZs|9#;+gr2-xa7OOg1A*w5M)Qd*-xA zbGu;+)H+Esn)7qC^iNnnD}p3>5U3xGrk`f@x=te#U8dxpeck?s7Bo&>A-=`e-1{*`CE~@}I%#4($cO(dyRZ5$n$vW};zdbe%e*aO?V9)Vt94R+9TqHo9VLR$*}?1W z+q^a@DpI;0A6;%ad&Mf-sh~ih95?Q}RQ!aCM$4`&qzI*In%ikD0sX6^+|NqbcRRzx{jIwWGOjv9ZTi^R+@+HfN_qY)s*B()ChC~(*IX$U)iL$ey9H#1 zMti86rh*Ci+vkCB{M;-12S8p6m`lh!%cKW8fmwn+_i`rU>C0K|rNV9g^21n3kwGz0 z;hWLz@_%p9g8MA;{6DvVjcPGZ#EU>;t(co0%srz;4gaBHDihrjiH-}=`dbiZ?|w7> z7g3zj8>w38?6zfX9MuZa105@JC4(FK>1}H+B>xW|f{ULD{L3=?><6?uIN9{3I+;F; zvhykvRc6U2_0JK0J^L@Kv8(r>U=JZ}_`zJRl+3$o9}!5@ORc}NC%|DG7 z$Efd~n&kU5{2)lZ%WXO-6=zMu)^?BYZyh^z_43bLi=2N7@$A`DaDR~m-egFz);~w` zZ)9~A${S&=-nafwshs+H_FvW)|JMz`;Nt%G+rQEK|E)B(j? z^zFjOe?svepYBPn&L&#gBhIgaGH?lU0*;^vf_luiE1TN{R|nm9lzbg~6iAQA$HuJvT&o7t6+5zrw#hWF;F$)X67#{Y%1e_&4U3>XX3KYx?e z%{ngm|5B}gz$xb!uG3s@CdNM_}Ia*fAS>i7>g=Z&wTivK^|U&swOMgQqqkgPXF zdFTK1Xx{v0YM}Q&{epgo=KsFo)c->L-=GD^>i_>bo_+gExPxV7nSq6v z01}SF=usJEO`_0zNYQaklFBoIaNz|sshIqwx{LwhvX_4m2>26AppWLqaH_UDW|fIYSu zxapHGMI|r8&dajPYy|Rz!kM10EZhAjR?nZe>83vX%`KrzrPPPdTln*PC|>-nBOz3C ze>q0T0ESTXsCKJjK708+t8}XvOlydxJ0z%ZWPd!9RMt{DBA@p|?yVtvUs^mQds9KH zKog2pZxKV}YF1>9)U_5kt?MiPe0o?x@d+tamcKNWEB3B`{TdlH71wh||Ef_cpOmW7 z<1p2qPYY0l{+el(4-rSprCeaptvIY#p0Eis_Hfhr>UB0h>fb%4ri%KQ6e@uiT))oJ zCRSzrmcjd}{vFxnIx=c@@i%WbVWQ?#Q#}_t`Y^aihO>6NThLhPZm4Fv`xkQ=2DNnB zrAEE^&5ndZ_Xkjc>ZHy+9+*oJUAudqAv@x*=C5jz4DtPCNAH5--n^O7 zFTdSwdNA^4`z~TAC#~u+5K9&iOLW?WeJ(S&hFsob_xTI?Lq?8gW}9ahfsjt+bmLtP z9fpa@NORX7P^UnrI;C7+zv9-h0Kb^=L{s+2LHLT~6KC+>u6;odMmQQ~KWBRG*f5pK zIG7^>bgK8Y#5r?{nFa^z`fmCN3R3NzsQd+iQmVQm+{$%2-)#ZakJ3irGa}erj z2HPh{c_(=28f7*zVv>v0;^k?_0)}Yv-Jh77+_tOgvb~;-Zh73p4RyCMSCeB|2zF{R zS}zZCKQSMUF0(Y2&1>1Xk2GgctvK9^OJg_vX`?`qcWC zLeUE&7UWmmcggVv*tt~0M2*yGCN22r96}a*MSf%ZQF6P+1L#sUpG1pdT?T?wrZ(p~~&OB_k5ix1?fk z$`1_8$9CB=C_`CBoTh8V+O!6r85&Q>+42c1=A;{(ssDr6rp0&ZF4P?y4CIPR*3vLA z?^Xgo3p#tBCbwrZWTJ0bsFg=jZZ8N!d@7lXFKO5O>1>?&IP;qcEV+#XJkkJadEUv% ziUHi0>w52Sk$pXa6&2JG6-V~XIm42@|FD-5d}!VaMpX2o;;Len9bfFCKGeMq(xv#W z%Qhzyz4f^!!?=b|{3;a>XR;|nN~I1-lQJYf%8;CPH8KJn{i4T#=wywC!m7cg3z2CnF<5rFj!VnS$b`C`8W zr33)sFF=+3RXVl!2Hy0|dBai=6iJwYd1H69SV*bkIkwk;UOmNENJ0p0YamAj^UW*SCwSzh%bA#!=1q7{G@O#m!ziml)mlT z>#tvW&Z-Ags1PjdvnG5&-@A3T`hvpH+t{+}(%cv)-SL*oFs~eiqmFBSJ7ZqTMX?=8 zZCK2SESK^C*w?npH}}KRy|*O%X0vn~9~YC{)g2#ch(*B!sHkW%97$|usQ7deXfo*3 zeTUtVDlDr)tjOGLsn`KmK+ceCDkU6!#3B^iwJ=uko|y<`|DhAf?U1@f!VV)v<$zwX zJH8x=26L|kb@x=J=oP9@ACXD{@mhf*6~vc-Ucr@7Wv@pM)=B zZ#EGuX{7v)4|ke^*JG}0E_cgJbTv+_4w|_d(dp`193269HjU*;JheJo5c%AIGdEY< z66peZCAkvn&YjGqH!~lmG)1}6?1+dk+Pcp=B@tEp=qNYB?>l4d6J&S!Aq;Kj#16`f zlMgm}o2rTn?{X@MRIoMq4qqp`>ggRlWHIf{!x7wj6Q}(e%3B^sKwDe#_7B)ZfxZFz zw#ES6b^~fStYOAoRJx%tG^44zWXns}n$L}_O!W$nxLxtN%#+YxP7GvH{5a$lvwyh5 zO$-E;3RQ8+`uktnE+gH8yEEj5D1pBZ5|WZfN{GXhh(&z+w)TQ+R;z=_@j~w~KN5bP zYra}+f4a8P`T21t@jZIIQg+P6Z${E@F4>{cyf>!tPt0O*F!6O7Ty7+J=Uo{%eJ3E# zRgO(LpA(b?7!n~#l@C3TPmws)j2s%Ra~N7OQ{|Jexb#y#6w* z&S%Y9hcA7Di5Y%^UmDyHb1jp#p~plP;e4#_ERT6V#TRC$pmzAF^j5`D+3Rx z1je3Y{9Dv}KHW`bds&W0ADa-ad63TfwB_RUtaHUMLY0oml%$GLKp8@l*E>Yo&lAxZixb zSI69f%$6sYI)ifi3DmyhHFtmtqy{Jcy$0*?Se zRnvj=%OezQRZy|DN5vM|@hKHsN)&7jQLu&TqhPD_C$_$Qq0s!`vw^cp6pz4>A|~!` zibnt(9U!Kk%=NTB+*T}HPU*OA!~*es^h~+#6Beg2_t&B(DBe*$dV_Ow?>qkbWs_Ji z1p8}2E^*e?C2A&BavE*Z|!x0Kf9sLkjx!d}pC6#GbQ3dsj zid0;&4gP|sug+eckt472j9IyTgs3G}Uf%kGL2~b%j`*Ke4@-4M78Cohyn{P$Zc=zf z*oU!z6t+pRfwNi^`(9BRw7$Z_IR5Z#>9E|G^qSJsq>)FxRh}&qhKbe$kk%ODZmwt^zirA@kY;vUJ`B`>s_n` zo(sPpIhyVR>&3k2t`<<}bS2AE|J%e}w)V#j0aA@UU4qA(LJ_uaF?*aFZ{;>L_@tob zz*lt=0BajC`dO{|WEiL<<5u{m3d)k$pD!~EDVxk2Rij4sc}zUC<+I6qxR$}`DCOmjbKsxICWaqO|ow2VVDfdA{P(7&=>qZi=S@j4trU zGpXL2M=1b%Ce?czV7j?U1$&JiAkwQ;_xDVx^&T9oMkIhwGk&&#;?uk_D1%e%%t}0X z#iu`C5%?{HFF#+=u&j&X{T>x2QN7>3I0%kC29E;}@TWNBk0=f~OX*?`nA~jw3*@M; zfgs2v@<~_e@EaD?<$tD{zb_VUOQLt;YF7E2rV_c`1-Gc;`c-BBZWVY;h_;8_Q3V zKW0=!9Xx_E`2C-Q7XpKSK^?q=VG(uk0YY<>!Gq4mICd0eCY+muC^IonnF-`#dUW`W zq}Z65&ZUKX?kXpfG{|^l{%HyKH81S*{&jDx?VdWQ{>7ZwJJG(5H#X4Y^NEF7Ogulg z*T!8r(D{aB++9{U*st~w%ITC*X*Tp(Eg`%-(#I}z;}=*cjxe3(?BLPS!%wQMlwR>O zWiQCGo#^if&xh{iya$%OjOxxTssJZDjH+{m3cyCSps35SN})k29vg-B@J3ox?es7% z3jl88Lup{5#B-@C*KM>+6@mf~LZ}^;Y^vfQ4q+|LG!z2AL_VMaYo$nyNoi9mD|3jS zq;L&BNDpvLS+B5pthm^KFb|$P0PfH`A@^m@zxH-EZZk%liIBhXKkJ5`1aV6jOaHx` zV$%vCXLol-QUgHmCsM4g;!kHqD0Wj`OxlEYZ%RHXGeOjhdEM&wtOF3dmn?;T1SMd$ zYLbI%ix7ufLssl|2EFsh0IqPo&2+30$Rc%nuRQ_Z(AH6^9|bj{KeWzX6HkTDfI4E@ z$#v%=OR@VM*n<}Z=4IwLR&GkfTwi)$>@={3zfo%yTy+52y2LHm^s-71#?@N4gx1;2 z=N1;sUD(5`_@JY!@5r%rrnC+%@p9Xp*X+pY{i6=wT2*3F&gb-7n7Z^!Ju$Z(Q0a4n z9wb&<-&y{%T4#R|Q+pAt@`NE&-+?FuR(WMqo_GTsQbT7dI6bngEh(BM91hk#El;U1 zuGW~PtdbW6ff10Tdh-RP`c%(h63~PohH4V09*m@kLa3gDl93Pvn}k)N911oy41i)K zQHc<^Y+Il8sS6+Z0%JgK{^Y;7V24Vn2W#`6S6|_UkPEc#O!i5UN*0dg$eM`~CD&ur z{*fJG4x4<_&nkOwcR=0KGE#Ussk=@))wI971ntL_VN0spu@IoSpzXZ4_hQm^iLlWj zpvyzpI5M83IJ!daZE!AkPT?fBg_TywadSA8 ziadVG4PU(@vFlyC9^xGnbmU*RY{&k!GS9=w;}4y&2#7+*?eW1eF65d%=CO>55#RQO zrc1&J1a@ieO0F^S%IOc@Hyr&-%n3V!spJmf?UBaGL1^kz%bO9l$+sU8RK?#__#=*; z_?@}p^(>BxSS&snGt-xMx`#XF<$zP(v^H-SLX+U@?fQP6_l}oGe<;jvMP=L&8~Hws z9fXhM#zw$5h|BQ(ZGzGE;4*(Pd6xg4%}`@c-J%8i;qt4qmRcrp#siOgHxD)a+`1%| z$P)pCIsE$fKWe|Xt_-?hnr(J3P?|Dou=F;do)2aKdg2`LwZFUR9EW)8Fwkr`7^ug?*@!>S)dMl zvpvI)2kIFSu-<-`3UzUjQy$r4V?xMi=hh;V?IWjt=O?v+?%T7dGU6JkkQf~z{mSR9 zoORB?tg) zpKrTj&`gC;XvItCA!7NI$y1hBNW9&CaJeo6V(l2DHyt^F zKFu@^BZBIohJ$*ZxZUbZQZvKXlXK_oM95EGGLSr!SKuS-zxn;BW>Hqc?m9R!hE=GhOZL!9VwBsOGMSl;lFaaR9>B)AO3JZYjeSpE?N42Jr{E z7B}(u-XGxO#9AnbbU8|d`cs@7vudVA1J9G^|lX?NXk@9Pp< z*?IAFVjd}mmK-{)z3hXj;3;L}Sm6lzRn%uE9~@F2}`Mb_--9IT}B^R#_%+3sdPlk?iQ&z^_*ti_m5%*5Nn`L|S1 z2G7H7H(yE_2OMLy;oocI`3g|gdfT&+we5S;TFG7$h@()2qXtc9WJO=%*If>FS$9nR zUSGdwXNX*Jz0&8_U`y!qoD%m6M*_3iCZ&_e`WqTz+}TppaK!@5CQqBLRbr}zvla9b zW2cl@##m}_UUIe;72|g9?MY?XZF=59cz>s_h{H<(dUe|khx#m> zq0us}3JJtYtUyVsynht)w6&e>@m0M4(v;TZ#eiHlcy0@pHDo?ku^(8!!{y&dGHMH< zHgU{N*4I}moW46U2> z7eP;ek4fbw-iykeE7B|WllDu0Achg3;bAQc0nQT4N?xSl* zF7DlI)ZEb6D0pPpse(!;zVbt&hb;U=SwDEgxq_sERdrFsrR&nfOfF98)@0aiN_6J; z3zxi1J8o7UJk{iq?LYE|a8~I88(Nl zx^$83T8dLr18Sb5K7%V?h{byS(nKXMdX!#o7sh8}4!^O1BSh66**MIN8Zkgn=DPc( z3W`vTr=Y7*AZr{rB`eHMo=}!vNKJRD=NCOCnX8bLRMpSCX1={{Ggwf~-uOgLZgD@D zxFs_|??noo)~uc(KVPt2O1T+*v)tV;0<+V>9iWD1>+kE`KCC1Suf7xXdQ{k~royRI zn&c$I@q-UF=h-0|2pdehwLR`fvg00LUEE!xVGn4TthQ4wh+Z}(TlWPJquz}XkH~#1 zUp&1f*g!Q`|4(mrkPdkk0S#OA^~8}4XjpmC&Rc)aZpi?QifB){%MN3?NooDLl}|h4 za=rLzh(R-sxsGVT`f?Aqabx>1H#e=#YxE`4rMOg?}r7ta|Gh?fs=qu;h=&Y zuciQYR}{p8N zP98?zcMX=4wDO-~G?VX$me*{xbnQOxqqD#B8PokW5uYV(35w;>n|5mvEybWft~I(I{|J#P~$hOAUdA6W$78ZZvD5^{Ga6>rL7P83acl*x#yGsACt zPHhD`uZ&htGTNY#ScgS{u&rI;4pIgN>4A8mBS05>mXEj3E?mC>4^ms4!h)_(q1W_0 zmwqIF{&6W$^RaR9{+Eop#%?QZRIW6=#qtgszuw^8S~6Zb#c2FeBit4&11h>}W9;q{yIO$wRWA`M$Oz( ztKln#0lM=EMAXhi)@xlJ5lHk+>OU4r?KiWOx`Ix;g>-h!7y9oYCO`!x+Jo{_3CAQ_ z_zDh$2af)-+4AADr;WJo#@XjKY%RNhhJSg{BLe(UWO&O02 zkjLMK8R&^2T*mGGka!y%so1zUFhO9qDJ{>*?JMJFh=3 zORa;(P?RPBzDLK)zvvF3>$m)$->Z@{JGwnM%ub`*BdXM*76h+*+Z4)#dsN7=F)^Yd zHRJPio~3sSeK_rV8Riu1RX;%~KhC-ZiU^#{=I4%#fxFr!K5iPB5UW6; zNTnfuaUN;)d@!G}4U#eQE}#EOYQREwIGK}wKyHs8Nq@fznxCQSH*{K(99pd+MB%HW z>*xSfz{I+mgy?e)@5?Mi%X0gi?i#Wz0n;gA_UK{2bFM_xRM-T_#ab%Z4f_yotFJw? z7KmnL0ct<+ZmRbNTYGkaVFA+o`{7AJbGPt5>qkY+hY8PT-9TdH^LP*h><(#cvpEGF z5($m3n$U03JE%8Nk;*SI0#?nImmsm8q&(taLg6H+fMQ<9D$ga26yp@*mJg-@=vjOv z6GISpxD_3X7*O$WE5a+mrsZu$RRZ4(6kg)Nz4c;TE~Z+dNyPGYM^C5E>&bJUMP|H4 zu4#*O-B3E(4PmJDjuhx+^jt4)dX>!YGbB0$dIq}o?#N4alt&X0>P)FSqwY=$=Ownj zCOC{c$L495Q}f$Ea@q@fixNXUoV;OGLG{6PYj>f(VW!IT-;)2O@MZMmRIPAvLhbtD$JLxzSR`Cj? z8D>T6Q8UyU!R`QDPH^(ZPK^Fp+%gyLU{( z!!EJ4JF2cZqyT^z2@Uj$t@;PXm4IAqNfC-;Q@vgV5_79Hw)}$U?pe)9zD7GSQ;bdi zq`=X*2v4%%kk9}K2*MQiv<{BoR}E~uw>8R?PQou(k? zB&KnB;Oth(o^^~l>i&I2E?1v}01{J>;7US5b<+q7q{OLz{jipBLLmdSw=qwvJU-PD z7APouvV8p23HPFj7C+V5gtE2vK`;B(Q>tXiafIxWi&L_8d8r5I<_n2yRv$E+q(6*J zG>|{0UrbE5=B4Aq4$%%D+w~J&VJlqDpxAXm%{yI{;pAJhNQ?aFa0sIEImTR%hKG%> zz{pi0O(G`UCJ>p9lnlrWGIZ5qgvK&$2w%x-dLbx=a%KjaNzmmvqN%nEVM!*dQwtmoPjpaU=?3A`? zTBxMhL(sdIvjqtSY+#gcs8stGnL)p1iZ!R@6U9N8?U=sClRP6dK;lff@Fp5}wy(tG zqOe;7*udNMD+jxn1%j4V{@E1-7+Q0>(r~}c^86Ui|8%8>OO>B zN(=LS*l?V5DBTka$^n78nxNWT`*<`A#xqokSq=#s>T>?j zIK7XM-{|;BnV4D0;Nr+=`tpH71W&mUn^~G?pn`%we0OS{l4L@|~S#DkE zY&6e*dLgo68FOrZiCozxKNv07ZZv2w6Jri2F#JZt@xJ}$Bh9`vmrBKhkKHLo5w{of zqeV9BQ&dnDBr!}h@$0^)pW{s1D(ldT{+SUK6g;wN#$#(ovuFYe8c5ruj4Xf>zgcs2 ztn_xHoKbr^B5kPIIEz5v1(Cd z9p~d0S3!NX!HuxH)>(nDXYt|YdyJ?ECKZCNMR z*n=5Oi@8%ih@Cdo|Ey)iz=JOp&NZDEoxYKWhc$6kIvA`hCK$Q_J5zbdpcK=p zHxlu#ek6#V+r=F?55)CH)m35mI)cpx9_y*MlbJa|jc@+ix&WOHtyD21AY>xfd7;+F zPyGTJQrF=EfxD8f>zocbA9trJ!$Fpj(9s70K;jpC4z7r{wt$j86XWb5o4!&*dfADGxMy?En=ijK1cJ*(wfFEvsQPP4+#oAr8JOJ-! zYTROZ5m2k+wMg#lma0d<(s_cw0m!*D%K3KzgD{XpUuf9i^;GAg3YW4PsPIJbuP(ov zB6R~A_9)g*54ggE4~oWd^eJ|R0yC_kymredh&%s5UtM3F5Hr}?rmbGR$Iw9IQ>jH6n zZ6WC6qnT3BnKv;q18v~mq11CEw`!FN`k)PXTnvd`j#TnTF=bTo)(C^;mfDa7Xw0@T z-&;w^;n>*iL%~-M^QMV0qVh00r-4JgR2kO{Q8SE4*T9Wwx3EaXC!`T>>-^z;2a zU+%DHzi5%v2(v0Xzhl-Bj{|2T+y8xp#U2>6nuqy4N>S0yO>H)eP^29NvpJc&X+lkr zh5}FvsKuo)uqoDcDX&E$@K%uOsvZ+L^eZ^^B&yWqgPvvfe2~Wlf%fr&WArnabgmxa0SYPb=vO9ka+MO1uRWH`RMMF9%sk zY3k@FK2?1T`O98$=kYMowFt_Dpxu^sDSTM`>3TU=CA;|Bd3cwnr(vb)a>R8_#~Z<+ zCjTA6s|T(tXfbL`UHG^LZ?!#yrumbd_ecHrp%I81D|h^6DthnA1M|Fvzp;eYHgw72T#h4f zSAr9H{LOFR&@=W0pcu42eB31*6bJN^+b$S~h}95G$8U$WsRaQ?+a}eA%y!8ugD~^J z39*$`j!tl&{y=}4y!*D_lzeFuY$}Qu%VYx*`1U9U*bg2p#Hd1x738x;#o9fW9M{#qaY93IqCWw_fZLDny5s>g@X3y-c;jZ zvnz)Cg_>{l9)jTW&?9ozv0=2*sRKWn+#Yn;xm_)*z4*2bp}BfF?{xlpT< zJTPaWnfTk14E}2^wUEsD`xq}B1bM_R5cj4w-RhpEa6{aGiE2ZlyGV{k27w)}4aMk0 za)yJMvs=lS95LqsV=KRw^czTw(O_$hRLo(Gb%BTD@?OgDRdYZ5YxUUW;>z)k@v<;E zG(u|SIYC}XNj_tBF>`x_Orn)OhtS<_>b^JOU)wRx>X%+2ziOX)^%Ys9$SkH=%h%@1E|4`$OdrTIP1R-g(&%rpE6iy?M*80nYU$@6inI2?oCB z2RT>|6dp;F)e$btyZB$4nVRV3S6sR$crJP=7lhn7(bP@8P+CFL(Sa`-_$76(k?yne z%(7P*MaaXtmgiQ9v*}D20wl+@tmu-X`JS?QNFa0 zAkJww4`UwkdOfDNf`6i>QM+`pQ4Ml5a+oF0#)AhGmt4POtPk4eiT2do9N=Oz5u zb#h(~XZ!pp-V?{aAK1R=EMo$DUT1#mnRk@|i&qHHLelf~vq%|k7n@WS4_o}ETJ3TM z7P9@utI@nf;tt3{p2ye@#ttqe&=s>mQ#??U;v#4GM5gW?vgSN2Ik@7MS3sZ2_4R8| zuPJMv&E0FFHtkDHnmDt08g_mVsL2~WN}}Zdh0CqAXIr}Tj>2uP!A#&sp4ct;i?sUf z5hsufvVn_T?r3mLZYvMu^fPj02la-xdo0&RuRWY7NwP}>1?Cyc*^wzJJ33b1DyO=G zm#eBWB95Vm-0p5UTz=FDwl>wW z-Oi63Hnv1uTKP1$Yu?TpFvq8&8XkZ~ZnwJs#)B7TN;7!4ja(j14JItvZQLU2mDszo#SN ztHY`dOU#WRn=cDE+bR*53T5%S&Rp!KhD4Lqsm7#x=AZThPkS!hnYGW?j;zo@a98FV z#KgFwPs9o;*Vm>3;G%vINZmo?0yIo(T?gX1FXR0g46RUk}l{mKX zzT4l)L_)TBKVWWq$2{=BJ*iF*BgMaRuc{DEn< znt7!d!Nra+wxi_m-IK;|=~2wi#^y%XXf=;lk3w>C2Kjl9>d><%P=?=v7~fdgGrm$C zEjuE;IIB1K=Xl>0*&f-DUd7{sIj@7yl7>m+Z1S=2uJfla!G?VoUvX^j{|IDx_S8i5 zRWL&!R8Mfqh3?Jiyuajbaua>96C}&H99eG3!|%y1qx9E=IKtj1cswUrdNKA3_8bDG zYz$*ZtU3WAn(6 z^M&gBxs~0d-W$&NN)p*>r&;n%K)}|O4umVy>vqTB4-UP1gx_x5Jy*527#KDzp^cya zcE@)1+GFnx*dA->A2F+`r53nJ{4I&SNSMlJ#WGG+v4$(Oqx)P-o#Gas6Eu#GmnF62 z&c+ivpDnf|&OR9n=EjCfvoD@||06_8{QM<4$kg}GfzRJ+ZSwK`re%%vx|UE+Ubng^ zS2*S$&TYhUE}h+t*ve21U!i9Rkvd;*IVM|i@m!6r03I9&=qtRG$|Uy3!81G35)Z15 z#SdA_7aVs{Ap+rRL#H1REMRi11e!$9$mxnHF9!TR@Y?j0=<=@r@wG?S?|KCU9E*$` z@E_s!BMH}^jBgt}KUGNDxsKVJ+2_jHQr~RF9PQ2?f7AsoCxc>D)#U|^yKa#LKAV=K z^5-PSP?GNJs-v}5-+QBwd-J7kB5Bh0*2m?&?Fw%NeG6+9)I9t7ANPH^>BJ*ePsWaB zB?}yjTigmA7a^D}$^TmX-q%?XzKOB2p_N0hH&9@ZfMp7$kBPQ?_2J-kM~CLoSG1ClS z=e75l5Xe+;&z;oIrpK@8V-#2~+KCx@inD)|QiI6;@YA;Y9y}`gsPs|k$ukYN)wX+G zGwx)KG$;%2sCewk6g4MHJIq*Jz8^K8>R|dl_r=)p3+L`T@S_3a-MOubQwHl;eVpM` z8!jNW^0e_bR_f4xdJ)GRziQ4^o-WIRDL4giyxKuba_a;WXqz zcU4gN_t<*?2}}Hv;{7Kgg~wdExhwCtj&@fGDf|-hoseFicDmVV4S&DaSw^S$er{*v zn{QmfI;%H|M*M#7O*0KeMmC0=eL^Dlcia8|#^e0QHd=(^ksXEL5clJQ+36(n#XR_NP*gx?a`OnUKhf1*s)h03+vhlKWY}V&WttziD7a12B)4Z{~#B1>TyL;lQ z*S>FoQ)tucvz@YDD`_0dw>+0w-*G7j3D4=K$-Xqm)0x~{=zlJ-9DI>P*HUB6o;jTN zLsWgUF8kDUo3aG<5HsqOIJ~mEHofll+S*T&KKI9GEjXks{fC{_?{>{mAydD82Q?}V zkKBb6CqVzvwMIJeHf6=oN#6Urs+(VUeHkw_U9_n4UQ>E^T`}*JM#xS}S@V8q??(Y7 zh^f=EY>TLBYO>uW{g_=kop@!`q zebeGP_7?{Vy$|Dw0*28&xLcPljR}_>f4@^LfBz5lHO@^1WB~WHavA}9vG^7j;+0Y4 zoqlmnBXi+Tq`!XMBf}^AeB&aHI5XD8?j0U<50^P5jyk2v2FZGr1)R%0|9_}^%iy|} zWL?k}Z82EPXo1C&Ee4C3EJlkNEoO@uEk=u(nVFfHnVG?F`E;LiyYJlh=1jyy^voZ{ zmiAIrRwaFvm6>ZT@&2^Cl3Ka6|LpFiwcC-dwX8K=!p$gik5w)pc81~tKjbm{K3vE- z!~(4k#aSetT89ZjlM+5Z};o|@ss(WjW6G-xcNd8rh1)aPdGkjxdcaE zDyO6F)9uN8twnOP+fW?se6uq%i{)}4!sMN-rtJ4+YwBmAm>&*IhE}U2gipJ#ZQVQX z3(ilLeX{x@bnk-cre|h;k`3=GHLPBWIqI|6>kfo(T{;I5Ey~OUbqKHZS^>gYmUHGq z0=S&`pOm~Al}rSHiEyymca_nL41GrPxmkU$kWv>(nP-JfMcws>B-Uk7VnPiBdUrf7 zcW~G^%En~KswVe*t~qo5@hY&kX9$e>Vpu-QOYEbM1SOOR7rQb9RR|xAfcU5-k9|+< z2scYTTjbSQb{2J<&I+zc(oMqui6oSiUzx=GYLxSgQnC5Y_PD8aKT!gr5{(ZtBLQ;* z(`LB!h`JVKsK@ACx|EwRitZQRf$rDS=M-Hn8q_Y*;MGP!Sy6M+PBHhfv%K(`IuPRp zdr$Lx#QpPTy)Gp`iZ$v6DT*X29|4NfyCK1KQ9O0u48i`xJ%1NLYbkS5e>1cWa+G72 z(}un6@|5|u{S`?X%syrKr3d*Zj5F$6>RQydA5qZIh-oUUE)s>##Z%hu3DEW^Mg{O> ze1CMCRcNHN&CjLwJva0r@6fc?5pwv?YoV;7-tfCQX5#fYJ_xME&r8d(b~}=T8Du_t zSh~Mk3LMK~8%3I-u)&k#(ZzjW_IM(9z+~4po+rU`dX2{WA`daz-SA;lY8uJrm1LfL zP@tF;ES6;3VY8cHSe&W%i~vb+BpBh_PiZ%(i387n4R~OgEINWp&)=EE4YuVQ-A~E?a^*=ghAh{uS?=;2YDQ=S4pHU?{67#Aq4N7t3%xtvxo6P?~7OWClmn z3T|{2C7tP4sy$D^>p$j&hkq?wbU!*~&PS+-Ktu{!i$FvRLW}vv3yzEWrgyvQ;sZU# zsnDJABik}y@=O`Uk;{^}xscq@0)wXT$wm@ZXj^HdHe8TZ-+YJ^{u;S#t#s zdRO;&G*8R?H?w_7MvYu(`=Beul%maNwBS25uj=HE8pm={Wh4Rlx%Kr#=1UL3Gb7!V z zyL#mCMNotoc*Uno#?cH1QY>|5HjeI}l)r8e{<;s~a?8<$9z1J(dS{ik;BSAl`?KA& zzIucqVb8q10GEECB?<<}cvl4Ou;%K*1y7;=vmh7%UEd-eZkO7&F4HjTd7X_Cj^mBP zwpE|3A2)?kfcGO4OkM)XF8B9a5f@{guUE5C{VPAMwpanbIX8L77X?^Y3E7x*SygN# z0FO1XUbo~i0w=uS4wticA4b(hW2WjtV`4Y{aFR=MF>pTmf*soKr`oCYoRovI`Nbo; zzm2ZguZ%NWW?)PYW|gD#NWMytPv)@S;Hd$)BGdpB$i1up{yDM<@(BHC=CcZ3?)hE#5cVnNOLbO# zrJ7CF=j(jGm4LqGI=Y!2g+$Lv-!c6^uLww#o%%pwF*eX~#iAK_Md7!x--zvpkFI*H zdB>CtZ`epO>yZ3~%Tleye2Xh9B0h&Nv(f1KZ$JtZ3vaKEbhmc%46Nk+=Xruka(}bp zxVG`TmmIhOpt*Ef&O7MrL=G35LR>%%J{y2pixWIwANFic7C$uu(LpERl5*RkyH=MF zK5B*YKkpKFNcpvOMa3tI=7B1MpfTQGctFu|0hh)OApMloySxwEs1Geb$(y*AvU%5= zg%)7b?<#Aawiy<*%QMWT?)F9qS3Eg^s0{NrmmR_{P(DqV^bfDi4?pjZ{#0(Gn&Drs z%?oU{cK}32^-Lce^dMTSgFYlbr8mZ*MD=Tr#qnG5AgnguVs+JMcIaMe;fZ7?`atoCJKd|0!$L4$O6}I*~@nYEy65^qJRf~z;(f$#}?1;2FNt;L|)=T~=`@RI;041O9J-`HT@0yQlr*n(%&km?a$qbF?Nk@FJl4fW-0; z%4)swDfaH#_3dAF5Bi|1;EHvdFcnnwOqjJc+L!KDT5S&cv1t|b{O$LY^3N}kP2m6U zuI#U_i+^=PB=&{>1|X4t`Tf71iR44{(f=R6L1yaLi46U(C;#;m6g$Aef9uis=Meow z0f52(tH0vEe!|`cf`b3!H^e@8fUo|yKAwO6qkd%9Z+89bIlMYLI>h0GymViA z2{HaVYx@QILYl1STiu%Oh%VyVrh?urK1O+trA=;}pne-z)SjlQ>liXYf(M{jW?A z%zu|4XQDttF&rG6nAbplfP~5PJZOSIf(}8ev^zKyFwD2s2I&D`#1E|GHNFy}{a;9E z{q}rswy|O}|TL3XmzyI&EHqZxd)ZKS614O%= zp>xNqguLk-Lx_@^RubX&pj?cL(R+BM1IDXEn-j8eS8zCKo&uqe1L1>z7p}XMFkIFT zFE)S7eJ9Mgy>oLkXSEV^hRFMgtOhzVD6;#Mn%a7Q$3EtZxaVh^)Z)}w`me09nlk9C zXF7C1G}9NJQc$^N(@TZ2u@5s$9(WAcgg<^DlGAI{pS$BOv=A&F?zL}}0o<4b6*=+L z;|9J^F12+Qo7lid%84J)D`>EK76w#uyWTE0zFTws&Esn`L4kyNW8hpw^s4Ar8S>$u zgZ>`bM|HxId~r;WbMXZ+?jYGvtqtH!-Usp8ac!Sz%88vbA6RxQ`BFpvZm;373#n_+V)n|HUu6yQ5z_e^4MH zXr<@p=O<1N@ZqmTO%f_q7X@hzoqi$_ytVyxkr$hao75PmJ2ns>^ZH%-w*dT7fEW_Y zmt1I3Ol*5ENnWd1D?==x5AewmZBE``PIco2XEPeG8=`Iec710jCneXNVt?)#Fwx6H za9XFJ>r%^Sc%ui~#S1~J51D(62E5_&01?-FRNnKvtg^Re-q5_WxT<36jgt%zz@%UE zu*g*wZj_vOUEcrZ@+3Vnf=uR6A{h@p?A)VYLG= zRofYR@pfJ^|4RK%@M@PJ=km(u=lYCeEZ7A&>_0>ljrb&eigSU!;q2G=nH}C_^cV57 zFy4>dIrqP6T@e83jtofJ>WiRm`TQFBaFL-hCr|I{aK*2&&;mU9=lGL+ImVds)losW zOa>>$gJ`_(LT>BVys#IV7q%VqxbVN3Zb*{PVt*|gv z&fP5GVeGNx3;0h$Z-4>)0z-hx%84@E~8Dh^Z-CIk1BH5k#01 zdm`VD{Kr~6UkVr&fOCh{N{1&F5Pq$+kXW3Ig(3vxXcfQ5UTNXxuI_a!J^de349L6; z31!^R#hj(BmR9S8{?}^0Jf;WVR;N!%BpRnbVZf%irGi``|~6gum%^6VR)MiDN9Kj|9lt zUt9JHCG?bJY$QMXN{)77%X`yxnfk1ftYx5AHc43bdtY6wxZPeYR2CYjT6p1vWRLqO zX=oH0Q!B^80jX-*1GGOz%d^M+L96o*ev=Xt^Bq9SVPUR!X!Qdt{&S`lxKx3`w_7Z~pFC?ZbL7gpCZI**JEw1ZzMRE>;RU?z7jeRr z!P2(hrV$D~6Ia*(Qy{~5`ti8IT^zUNU#7@u)0kMj)ze{O)Yuu@=zF!H45$X&i8x_1 zFH84M#L5{qSr%Ym@+yz$f6Eh}&;1-C@AON{zF|5s(_b3`kkOjCauVR*@LZrJ4xtMz zX|Mzzh}JOU?icFKX_52^xhcM{`Zwa0_wZanC6lb zrVdQ|mxMR%b?!3d0GyhkUO;+jr{Myalf$e3hwi^pgRKsp&UFHMvesC&24ERr@^uA= z7Xv`ynJAHw@&@U=|4^;JhI`tIK(Kib;H87}*e49k)mNr`K}r5S9g;B0xst;e#vj_8 z`r?4~?|a85AmB3@LgY%C^XgTn(5En3`Aia4{GRP`{rWOO z!Da0=LmTYh=~_ow*`#iI1G|9xTYLUo>7C33L0)mHbbjd3Tp6{da=k+zv$$vbA1v^x zs;p2xa96)(D1?U^pv<}MC^&u`0>n?h{YoHAs-_`G9JlIU1>;V7f-bMPMY?)xB?#;C z%R(bRqgz}hVfjA@Ge1--yl$mcfckHAE!5j&O~S2*`%T5N=*#^{7(n7#YNn>Ns%xro zU(*jl(jNPa|}Bqc|HV^e6j(Nn7qdn3c)j z`au3BUfkqYB`;{efvXK|DqcoFL739}2iOMsDPn}IvXhugKtl)pG=3}bDKzw@pmw?b zPm1T4l?{wj7|aW;e9Y(0<{g8#Zwa6gFXM94CLLCzwI*M=Hn4YzPsXHXl^4s#nTI82 zNvGv!nTO@ol-0Ju*Z%+u0VUfh>O|4b9Ti+uYjPAvc?)^od z5bwO=ud!PM+}BZesqy=8d7@uCPS24pB4-_cj0&hJFOITyzg;B`KabmO;n&D^7w?N2 zU7;SvU)>=1hV2v(GDbs-6gLZJ@2?!0+T0qE5Fhv8+tBX<5#-J$c4ax){4o{?DN>(} zA#?=2QkshkD9W@>o_+t~;;Hyg(O>_=iu}22lK~P2C-(5szghaS_KHWwT*dnR(MeG< zjSZtv&tG@&euIHI0b0TbhG9a^r(zZ>!8pP%DDFe(r@l@}M=tZQJnh0)=!B$Dua*ps z;%nh9ok6Vn{d@`WGZ{PtpNlSSE*r8?I|c1xX0^pW(E+*gSw*iG1;vCzN$-kU|LR{b zlh1+)M^gR`t=X$&2UwFAInH_`HVi4*AMjoAhV4ITmooFsez77kx1Z4|$a)KRP;;6M zGoH=_^Vle#@ks~{_0va2=Smj4Pxe2{N8mAQ^F;Pwbjn1zQb|JGZYD z{+W#btZ9zCZtA0r^pxDVM7q2LQ{j~scv~ryf6dXkgw2@Ikxx0?HGHZ?lTi2OYL~Y2 z**T;_`_|3Q(Jmvm9%3L$4%)%iDo394Q5y!1;b?dSwERjM(S@FcSZ?lm}fo zmA4$ftzgXLtCc50^!s=}z3qNpei0*hQ&?S_({ZTv7m546WTT7SM1=ZEE0kK=!tTvk zG;=Mbp!oE!K&}vH@ZH*CM4OhnzZLH?Omh?HYim8qSMIjAXZY&AMe0q~&bFXYt5mN& z$I63rH3X8wv7>-lLU*A2G-?OK1>mIS#FL&!+Cr%dFO$%6PhX z+g>?eFYekl&Inrq=1MhtJM!i-i%P-#V01(gE`){6i6*mNqJH27El!%-)v#($q|gkmoQOL;3LV&{< z?mAv**-u;W>5+)no=fANZ?=c4Jz4Z4rI6O{TXoO3GV%M-1nLdNMZ$NNDrR3Sy5`J? zS#|x(sQ-3}|K!4NSN=uS75|?`xULOqfQ$5)LBIc3`?4D>hiRp?`QEqZt5mr5J^GED zEb5Er(uet$W(=7z`mO~Z+l!Hayzf@@8&yc*8%KAwVpkO3Wy05w;#oJYu8M5jT|v^c zMIAj-Qx~_)X6K^|HBMX4IL&sZ>~tq5Jrgzd^ApKF_9a2aBZvZu%@nYmc#(vr^Z|X9 z&$Lmf#y*?SM(l#*VEW7ws0>9UFsYCe#zQdid&)KFx&}t9rtyZ%S!#59tK#<$3<$alAaHOl zRaj5BnR~dP0x9W1LLr!XL$f9Gp8G_Ics#z$d+9whm|wdGs`kd&t1t;#J!Gd6pwZFM z?Me#d9#drjocJdk8p=ET?Pvl^WoU}x4QOY6II~M z;KPbk3TBDdGsNj)V`Bj)T~^Q=;@Wi4ldSG_HY%R}3g7y$^mhFUfASe+{pI9-bCSf( z&JHo{tD+aP#E0|OD8I?esrDn##X)S|#qPmQeT5_WsD*KN#e?7cQf&B*lu3dzUhu7g zOR6sm`0b|xDd?RZR=SI zkh|pN=;|1ur!CYWroY|@Jzs@5dRg&;w?07JKRw-FpB%B(d7@%Sl?m%<(U>xmGSX5& z&8W&5Q%ZNG%!(Il&HHdCD{nqj^_{OK({F^xtbtx1&|e=$_S)N8d+R)Zi7w_P{Nk)F zR`K2&>+ngabAo{t zA6j2OghH>e`4(Ah?vgus<_p9W{+4gWA+v?6^BN4~?VOr{w>7gZudadU!(dt8p2X15 zTJzt!8nXFj@ScaQ3GaRhW&KG0{`4#WNN;Feg?m0?INv3Av$Mo_ZEV9GqoeQXsR?Tj z)4bn*c4>AoemiPz+VPr$KF?bArfF|mM@E^$p_)QRQxDb<6C#7E1Ll)lZ9a6(=cqOm zai0-6!o9#z8s2LRUu`)9yto&tC|u1DOd`*{+S${IEsl;`SjvZwX|K84fyjF zI#hINoZ&L7uHawYE?e=4y6V36PkFl2o?LzXI>=XvK#loR5@NtmQbcA9o<@TWNxhCI zOtDB=_agfJyyy9w@HDn?d85&O0d zgDB!QLgLe)csStQY+>({KfWj>d6eV+a&g85Dy>YLYh8KhO3vDusX45TEx8S2-)Gj5 z5#U3j7>^(b$#_8T#tP{*>?;qnV^*V`63#w*uP2&l9^-?0#e3mK(H{6EO zjZ?8wBZ-^N)pl^E;kXi*P{aynN54o=NB7xX)>a^Q)^;)Y5N@F!I=0q=_i8;Gb3+!Un}Xd14h6q?ww@-5Q*;W9_1*1UkLdMC}@ z)n@yiu~@AMO0%VEoF@yeB$ZiKP9843oe1!z?$Cc8oBnUGqdYD$b?i}FQJ;mZwbXz)X<7SX~xnQ-NVnaG}J*cCnHw^B8{9$?^4H+jUtka)fGe* zeiBbWrY?9SsAKr~DPE|kr3b&ar%F(L=eG;b`!V++lbZsIS)xlec_U;Ki$XykLhm3H zHif4g#!8dZr;OJFf6eU~>h<*$q1pZhK22*ryfMXi+Q65Dw-fz=F^w|#j!*1cIajxV zx;>B5$Wi+Sx6Ib%!obtdv+;3NwGmbkX0D=})4& z1naOjC?C!p?>a9JO6Hr~V3b&6NY>_H23X|B)AoJfjyS*1MS!QLK(6w_xH*B{uVbAr zP`N!^vbeeEFt2jgK*h5Tca4sU4-ZT6n?!OZRc^{A;{;to@WxO)>Iq9@#m(mGN!YT~ zel*Z?XeBhsDxyD6tr6+nce>N{;mf$C+O%b!SmAe_qU^ z$6*2+kKPHHbW+~-T_E|?jq;w#D`7>;PZJkoqt~|YVpL*mqXjd)r^X0TS4bHFcI#R8 z-BDPxwQ@GpA}OsX(=a;Z#cMvbbl>DE2HtWFcKld+``0x$$ggg;n)e1WFOnMHZHLrOFN09%}&4E2x0sJ3^= zv=GKDJUraM!~+5}IY?fA$8}n{lEGQ`IK@_++gS@Mt1>D%)auDOBL;(M$nG2XCozba z8%y`$g5o!ul<(dW0#+o}?T+F60;c&FfJ>^a-qKkf^hP;LsmTBEtBrx)eqtkG?Z$sU z;gLTC-8DRDXMRZ2(fWYEHmx3%dh(F)^&Jg9HN3AgdZizRzqKM%l9PsytSqNn8M4@ybbj zC+hF%M)q=t6K<&$`nasZE*gis>s>5sME0OYI@!g)IEs^4;bmMh*wu&*XZ^e`tXraCo-a2Vrl>1M z_;AOn$j5q*R$D9hHR`Hm66WspXp6~1)s7VP7NFO#RhZ-4O38F2Rel~Gv+bLF_qDhf zy0h|&=P>DY_0b1KTMM3*q@*r(OGO+g^04ELa<{K z4-Vm|UJx4>QFT5lN?0H6^Q3UP!m^fr?ch9X_5&_Di(-y10x_d;(`S9E~}SbtP-UCW8ZFTay8w{~3V; zC-~ze^d~BJuUDAW>iff2F8(?x<6|oO%$g664J9LjIc#CoJ~f1)?V!n{%T0@7$O&`c zk!w75(RDZrkEf;iW3r-=1P1Pkgt3l@&q2u< zHtrL;_EjhjLT9gBrN=agoHmTCHi~PqG@&Qr=gbUPb{-p zKl4Qqqk}SNg(8cynm|)cjpLIOK_&=c(9j5b<8d)O5Pw&qL2Nq5{Ta~b4t>$iQx&d9 z;=}2xUcS&Eh$d-a`%=Q%Vd{?6olk7#`FNOhjvHCbMdup_icUwexZyOS9gl~fX@+fu zMXC`NNlO^$S+R~xqbr=$@Mf#Cb!_jX>T3P;G*fw%Fje$C4i$!_YT769K*f}HJ(pmZ zGz0OuLIf&m+kSozrz&zNzB@Z!DrzXfJq8^KiAO%Ug07Mwwb3Jp)xzbe;n{S}=DD3S zlDMnVN`Y`ULQ-NpwZ)p@KV0huN>itThM_%u=aY%1J0MU>d(UeVz(Ax3CY)0BONs4s zJ+K=c9-=bg0?y6_%S$Rt*9Qr8+B_I^s-d5bW_5cCWBW(Oe)#A!Rz&sF_X`OLVdLQ9 zX5SpiDtz(YomFBNoLyb;y2aMKJdgX0JC>$H+smOtv6yc&5Tj3=Jg64ql}X4cAl_c2 zorhe07s*F$=1<+A$|YvKiPL)WZ|bBNhD_@SKS!{Qi(~Jkm`3;5I6X(W@pUZcbo1uk zAl#*uX#rc=K;Qn*wntw^YC(T8r94SGiB6Y+cpiCsfPR4JXw%Ga@M0}f4{8$ai?5Bn z4mQliV3c#>_KdeqF>@4&ECT@{nl6;|2+`0*J1sDYPp5Ng z9UFdveMp`ff9+9z4_|s76HXm|f^tYwvPXlzwzF##6geB4%bTklm|cxTZ<4-R*ZlCf z>q@{kdU?HjnSAEFx+qL@_2W8rbV6NiOj(Ui>uyH*ktVa)G95PO=KV{~)n$C`RqUux zN<>?ASesqis}8~BZDz^}3eH(Xr~k*8YJJ@&TZveYJ0G#XqyKdR>W}Q&(T`ZZQ1{v$ z#~9{o&1lpZ(^(%`(HbrgRaVt~F%fCIb6&2jO)bFF)>y#HPSQiD)j6z9oiSKgK-#KN zpj`v@pH$swGdwQFVfMYtDcUso!M(w~`o$J9?ZLUmG-cL(&2F5-QnLce*JMD#Z%?Nr zF3qS-dnXO&aQtlrLmx~PGyX)Zz8`zTX~K`K@CE19N_=d~{E!-917G$+vMINbbt87{ zS0(v%lM*ClTZ{U|1fup4f*J$J`8EqwOAWTLx>C*V26y!dg%~d^M_h;noQ^4Tl4CpJ zE;VIzp4!JigbRK-L$2I97VVizzxfJ1+;DteZ#c~KzM-;~U^DvF_Sho&_3Z-rgw+YJ z^K)chUnpWmMnqNBgZ#o$K}}5r@j-FGq4|%V(n+RI1RZFu0FYEOX|}E!IKvQ=Dq@iV zQ9U5Xt@!W7{SuSY@uz2myVmAV;dULgul#f7Ydr`YEH%E9CL3-&2ozlmX6)2gyCarb z9d;Kjx!UMSF-u=>>*?un#l=8oQO}^%==4(FriJn&At->1vuJzaCE&O!vfIHWu2#y~ z<{;s!@EAB3-la}J8Ne)TwTJ4f+Zl0WPz=FnE|exHuKO+Y zcqOQ=2aGPisWy82$ufapfZ_mcKkf6Q7S@~s!KWn`b~!iPCohZuOjDAvgdEi5P|}Ng zrqOd`^|fcmNJiUG1l+B7c*9ip4bqmF^(-M0ZmNwnQ;M%P5-4UbTYF=f;CQj|Z2;m~ zm*3v#=EC0CMo)a%#!a18M-FCMSK15b_{5BN_Pq7IYZDO(4K%ii7-b2ih`j{@>|l}X z)Uy9<+JfJSCg7H~u$n2?>0?WrPdNO!Yg&D#{qJ`0rA?+=XN&kJ~5k*A)E9W_Y352IP~k*EQMhV$w57p;V~T z+CNE4IEBD-p*Y5q@)7YzY_Re;Bsp&NRg#W66kNGzZpisOk0}$RaFARrEU*d+JFPbb z6HH9_)a4h`tC8Hnr6gl-tqOTnXO$dDhV98aR9wacIh5rOu3&naRSdJau^%UGJSD+R zG{H4#u?|hva#hGzr{>}Y1(SjedgbkNY^NJ;TCY{J_4Z9*DTbtHFpk8|j!Y$X7_pO3 z`vhbbwT44%$D_mZJ{fp_EPIP@Q7~LhF|ZV9VV@@%9h0jzX(om*MQbdT28K^-p^}g& zpe%y)gTkz{e;D~ldRR^k1QqhPka0jQyhBIA1SvAwy3dUn#>Tmq#r(^)uDRMw$P_2$ z*5*cy)64#c<@q|SITnZYg`FF=8TCN$5)&0A0`FBC$B)8^8Pm$R4Rl+PC)vTeG6>TU z2bt6z@3_Tg6JmA477AFt1)3XrV>jxCO<-FDhO6MROOo^JkD%ll2Qu)R0a3^ti(HDO zpYIni@Uf-$hi|*DB^|4?T{DRn5YQ(O`;nLe>UsoK!acvtI!ubz?-zFTt-fJ=MKm^} z?(;{0v%c-r(P(tRi615GdtP;b;9TsO>V&vm)YHje#Dh8%(BX0Z=!Qotn-cZ?p~_JE zY3c~T3~#yap4l}bn=dXu4HU82>{7B=YWd{Oa&(LMwU13bJQ1miIWJi7Rn{ijj(-#Y zLL6dOb>8|nBj*T+UFQ|1NEVUB4i3cRl9TN!Hx=WvGrsW=6im$WDCx*P_4ky5LIEV~ z>`)bqI@aIHef<^`*cR1AuQ1AMKY9DjD5$95boS1oIXKLgS5(AQN9~?yTUsJAGc$|n zB_v^YjEjQg5kBgemcqG;FxRuIi$5{>HH9E>tsJkLTfr!U%$yoH#O2jhB$fh+RaR0h zbc)L$d992fKu$#T6ex=54aQ|uLT2`igYu}1wA2VwlT%SRSb;I;4BO;nL^K~iKQ3Dq*3 zE;r?)W}!LjoD3)7YRP}Q$7;VZINIQ3X^(nqzoX--iB~(@tF08ynY2|byIU59S7+02 zjTQFM6@8m6LMUnvV%l#%lYJ67^&)p)y83e+bX3p-g#551Q{T0dx*gekmg1s_W&%o~ zKwc)@Bv#nPuCaLeXZcFnMdlO+?j_U{)imRgYd-lTiVu3JQH16&Rz(BlIKR-2$&mtz zc2%4X!UbYNAjP+QCbX0TRi}z7%*Bvciq!*Cla({F3SiTe$jismpp|`K$`doevM#!G zN=p1?=_x+O?C7i%zGrhVMoMFMh5dc4FoR3K4jAUHoA|QL_Kg;m8V7c@MO+m0LnJ~Z z5kY^@5MSkH4 zE4H5*p@9*Yl`Hc-yDHK63{J;sE%Z&bNJ2bm zXG_lIg`+@qTM-a7?+-!}5HMlEaQLKiR0zpdZt&^WQZvGRiBi^oYf0y>6!k^5LSCJ@s*i>G+~7tZ&~QfD(Q@Vc3TIkx zTB>@vMAGSet*V+4DvO0iKyR;$le6n*b#*uyo(DD#Wku=|kPqMA)POQ=k^FFMMCPf2 zz>rH3l}KFvW?B@?!%%M(Ogm)?onM_MY&E$EB6~5~^6p?t7*N_`;jA2Aj^$7-eZj_p z`oAd&ck+rk!@zrRx0S^*8f{YBbl{(w=B%=1Amya0)$lw!^{>^Jm%p zP!BxA9}3$Uv~5Vy_`mB55jPt4#!>RXLL&JTmISVqr}QP7cE{5!<;oAAJk%D%qKN63~EuAy+dW)0(VuC=MfD-FhsOEp3-kM)bw)_kIeRl1>?bow5h>#cP;wOZYvm@3f7^xTseh#5c{)OHds{an=1nZl!pqP*N+vSFWGM(|n%Rd{wJAf~Nm*?4^4*p>Het%caGyUMzn7I+wF5I-pf-iBrL8V528sQyI!w%kWN zC4`PWsu#KYlY|`>J3LEnGs~;d+TAWkMMt`p)X-=li(*90#UjX3$Dh-sl50F_H?)_{ zY+C%uOthTeHW)ui{$xDTJR;7HqT0|olXeN$qQT2};8MUK1^1f|&4x({jKmGa27@!2 z8+k$JSF@CBZ1agkj0k4+xy@L4(8X&}{Ker8bG^$3MEKcSu!&#dc_nK=RVxS0M}I!6 zV^oIpVs9cx&S?xIogLt6vdxSdsI3Xyqt|NR!KB-Yo%;#^ayp&9Q+$<1U6`b$r1iZ6 z2pMJ0Zb3g{W^UC#f*=xwX0mIuUZd=A6U(0@5Tk4EMtiOkZ5c`$@I5j%uvpY?G7zGj zA33$3=PSyYKJnQZBS&v#j<0A%$cx-+FL+BAYqG#$N}qKWbS2AaA^RYImCz@5$s%IL z%GmHYN3sqmOULyVa6?7`ior1mh;Y%4ecFi#At0PPmgdmuN^7m{s|50)%?-M<=;+Zu zu@4y2jdWcX>Q*bnXdBAo$BiWh&A{#c%xHcC=ej7Dmi#@2@~hpnfsW1DM-68~IJ&e| zUpNt#jjK9(u=Hjqw-spWB=NJA>*vHSwqL_?DL-=aqzSr% z^q$Ifd-$j4lu4MWv1eHfv3PZgvp;;`AZ+DuYWtf)e}&-Kd9D+o+gTq~&Xfu^5=8p5 z>;giAhC=7%_ygVo8*1KMjh>VQN8^;n1ddg}t~F|mt4Su83MS<0kI0=ue|SvAZ1!;B z=n-o&RV1O{6Kj3;f}h=vBuSa@I}8iMcDQzVYFX3^V9e3O6H$ku*aw#TqPG0<%?O8E zG~+SGv;qBSe3ga=Epd`afZS)$QoI>SY^98_;md8qP8lI@=a+&Q5GIjz1)_Tpg+I&l+)ocfqhjEOolItFB>OOH2siUy`f%dyJ+{$^(514 zAXO?xW2@rM!JZYBCN~Vaw3U#mzPgM`Nb#dA`)zDs=#TDL6y&q@^Xt`HZJXZVX;11-usLXtf(b3 zJH}-Nf~wLVzWvaPMBWWb$wr>W=*!6B!Nm+O>ncUyW^=U0Z+%$DIgdcHdR+K=@el@g zzeF8>emD0uGS1za8EBSqm-7JJr>{oMIztj_vIY7`G?h0upslyW^mMp>9Ufp%5fifTgAr9t{;cF6P6+>v%LzN zCPgjTGpS0;R;@>y#@>uz6mi;JMRB|SJkY}Ct`8bbU8JDq*B=W+PTVd)b+y^Nx5VSE zeKL$Zo_>4KlAOkFkDM2N))fUf1M6Q7(=%~dM7-bz=zf!T>xhaqa#%S`F?^Mr`o9x6 z|BpA3IAmAwwO#!i5&Q<*dI?M(l)VL9u#QDk`LuyO88w=0wH268n6ug*1~fQ&7INsi z!G)i}V;&Ak){Nd&a+4EHN#SFcnuy+e8{B z$q48tO)oljtD$vO0$5*A)_O)(IE0=%TEl!6DaSi%*sJj$@m{OmG{wnFK|Z)~8~14d z1~p+H{1&~gpMk`AGVv`!A2^t5XEK6G?>>q`-|#7#5I5%4X&+3!2TJoO98MO(TAuFo z?p9k=S}bW7bL&XCnBJ9!D}9TkVdTMK2fE19Q43iJaDO<$nPAGo)x5&U%5km5BSY=d za2Qh$%pE2074Flyu7?56sl%M@Z%s)>v@08N@QNij46P81aP{sRmWWwt!KygS%AH4O ziZwaFdlqz{&l!YueMy-%xskVzz^xpWo$A>-#)iO&UwNQm&V?e=Mo?aM)`&cxl0IKZPMdQ%r0zvm~B&N(ck#X2(vWit7c zIC)uYY!#Cu@-r_3c}aPs^C4^LymR4&&CwhbeykR9MbT3p&uMc!u7tN(hb@9=(FI7p z_47O(fNUTGkj#4q^mHkQTiEQ)kw#NEzN3mb>Wv%<8Jm=LRv+prD57WNv#FoLST5gB zU@=~b#n|gp{yzcp$ej6M_+X?y1JJiM@znw*!jwv5W}O#ORpnMYcIK(7Kv?0zSmqnQ zb0mtmd$rtYUI5-0O#dwweew1*-bpWb$R75z7O5tE;3rv{y|JFu+9xQc91)v0_O8$XxxBMo2h>J;w<=Pu=$MY2k>a`MgUx51=PvkcN4L4;=eb@$u~3qtSEr0)?~PfL3Wf<^@jTjQ)0*!g&_VrBPuyh zHE<*L1|*5ajPsZ&!sKWq2=PF|)Q@U%q0=u8DvHqEoUDB2AWqik=M7VXzv6Bt%)|XNTZ@s zeCv_Fc0)Z8am*85Y;=f91&U}HLIuOY$|`*cJr31hNV%pI3~(l*=)p`4TX5PPXTn>a z4e@lpfITm}`(F0Tg<0}8LgxG?S@~jg=IVY9C;@FM8AIU4O=5=BWX(TL6@0c|Jf;tLX=>SAo+01X+k>dig+v9(%( zj}okeZ-LT#Cti}vJ?~tOR~j%tw-&tq^Iq4Tyb+XYH@t$=1fdFt}{L@rDzw zgx#27>KV$wEvlQzYAX5`4)UWhiQfLLRJL?iGTcwk^w8I~J%bOa^UbCNwX{dQUI=jI zu41@KjxjnOLR?Am?y>~t;g`s^3Vumh?dnJcqlf)=bDwMH^!4jah$cK}bc=;a)R<-s zW4OX9TCHE7 z@nIHYIb=6kYVo{1m%cS}U?NCU% z{<9RU3MOk^r_d+qS>IfP3zZpn5P8=UoPH|EI?4Tn2o5644;w` zM`aar)Q?!-=uDcJU13sAcI27|zJ$3rKjpEK-Z?8msMm{k=O;t=yC+xoha*+(%2wcn z8huT3V;jkLId`p3Dvv{`#Fb+PZ7=HDGN0)W_z`nb&Qw@LKXS_@WQGt$l_mK zqEL^#Vq`&Lu4H1K)G%Hwfme0>#pydzFSI*zCSMwI#Dh&-eu7W>;O)!>@yrHZRe$!a zLeX88L^(4EayqCOfaw=rr`nOoD9yG$Cc;4Tj1}uO4F0c9yRLA~HSp^|oY|R|dfC0!Mzx8}FiyS8H z$Jmro>pvi@9s$_upm5WE>vYNzR%{bX9MSmQs+JmA6rG?XHrJ12 z>={%&kR{lq327%mwLp|Umm`B~5f)+c@__wqw3o3oQg^kJ%r2dbz4MP!@yV7Ul`{V_5`lUgPRD@?5FMc}QCZ^^Fcyd*Y^ znT4X0>LCd!3xsVhtM*;(2%GIQG@Vv+An@+PO+PdG&6&B??yz{R9z~Pa{fZy?C1%ao zWLI|lk_z+qe3j(G3EVQLQN@OjZ6O?p8IY)C!k|S2~$fN3i|qEKynzQdFZuHMi~qR z94R{u#^F{dG9d`URqQGH+P=cK?#FvD?qeX)#%s$fBj{8P;`eSw#@O^Z z4@3sS_oU?HUzlJA#G?Agq=0OEtidb22|4@#)e#dXLuFPT4;zZo6txhuG4u8f1m=p` zx_4tInjvGBDIK`+(wi@sV_QvDJoV`0Pk6sNjqaB~Wt)==UGpt2C>Zt%8MbO1rdL_A zX((jap~1t*Xygwh+>_V?#15h{uS%O>#mHM`#52UUs*Lnt1|&prMAx;^=w(8pk`Vo! zc)29w!XVGWt0l2c^#esQ^?-z+b+Zr%Ue4}^c@my>muRO&qk9bqB5 zL)&*z2SyQsthQuqhoqgl@4yfgkoCAxR0-sx4oV~7@2Es=Q+@R*5Xd+}MN(GS1l1#i z5*lE=rG$wFh4C33wxaekgFeb|XV)K*@FmIE%?KymDZ~V*XOYjo1*!&(CGNUI@xI#a zJY>*zf!kjo=7-q|y5=Go?uz`Rrho_w@}NYp5ljl9%A~Ey`Cp8^Q*_*K)c)PrYHX)X z8rxPI+nCrk8{3-LHXCc=#9i5y&CG!(m~p~;GoPV{N&daT`CJ_7%#a@~q0;wtQk@+Px+n{P+Cqva=|j7c zh^l7?`jhO~Lb%7B+YskgmlH%V48Qz-vc47wlmy#I9J4Y&c&l`wQp(; zpJnC~HIbu*y|AJiA-nN#45ZcaINo=!b+=Ep^8Y3;C;=Escx)f!mkIRJ$gaGQ8A-!^ z3!?leSH4znGWX2-r4lTdX7RlC1O}hYT zcNtHDQ=3R(Z?Wf<7k4@v3Nov#FSDfQl7&g4Y;ec|I3Xk*ru;f zj*7>2OY70WSMaA8hwb!-5*BW>hdtbLrfg_O4FP|%RKK1xcV|=Rh)9F3J>|X!{wPF) z2cIhsKMo}r=h!d(LoGNXMie-RC%zUdmRN~1+jXCCUK6A_{4?A+BhUm*AURaKK(G&= zSWEDD5-I*(N~4B!bylG0Al72)dgy$Ynf`lRT54DU?-()9CXTcdGkCOOnh*d?N{bjY zg#qSf2e?=x;R$_wJAT{gVYy-YgqkS*2&XM130XWI&Z~XF8vj<_D`!vPBipoN%>u&+ zYjs+XIS zr}QDdNtmNmVO}X4LbpC+IQ)Z@Ng2~@Vu50Oc5r)zXcI?rAwZj4^gWA3O$A$>CZq1p ze9%o%K63^)G!8Fl$E-Zy3ogSmpLO4wA0)rc{LUG-Qck;eh0Gknb<14xgX^?dUL!fb zp6kv@0g^i|lt=5Y+6{*70{{Y(yUa;5%$sfuI<`gFEETSej&d02?=SHGw15?NoS{id zBY_YR)Rb_^NwT7!RM*Lg5r(^Dba!s0E7XAc`Ft<2z4nJ1ybjCWV4!ih#rSSa3#eic zXqi4!HW@DLDEehqG%MlQJ}#L|%9F0&7{E9L@5m}14^o9*j`#u;6eU8zxU>qCc4s>g zz_cV*Icdfh&6X>2Mc!su_<_bI*P1CG(3lUa8_mAIqq$Mk^P_%UrvelytQA~{(SKh2 zz9uh`r)l;B(rm~{!3#jb3GdP)+7t67;LNuOej|wbV2`39UtuE*|>dsTge8_ zY?7^QDu=C}epOY&Da^0=Iw;Zhopw@8-;YyQmxvh<*scl(hDfRW7X<#_Y=~22-^$9P zga1?_d1ao?=othR95ajatdTFPqUG=sTu&2Go)z-J*0;2eT(gTMHbNLx4O8~(Vw2Mp zVlc`XU(k^EDPh(>s-Od^1IQX3!|T$cJ<`gh9XN!`9;ow%S)8T+O$NPgozx4&NX3GN#eC%;hiMS)G zs*GCsR(iMo!ras+fa4`i1w<3J)n#@zlHODFU@cF|u)zgoYNQW-Dm0IZ>BRbldsAVG zWyccMLiz^L!XsY`>)L$Z-RqCfOac>lk$JM_SJdNb9%03yUk%rf0l+fOuU;8@7$?;+ z^A0NW``!S6IRPglZ9mOuGg3pnl|HN)XIOqC=a=e~4-2|G5xso03MYPMhAG^bL6Prc zE-5ZzN-b!=oP3mk*x^~K|1IdDAYUi(WI-K?X%Q1^;BcGz3mW2O8Xsks&`72ucnixD zG$K;A`9y0&Rmfv;`HDI&J-8wV%4Zi!n^&E`X!535$QB7(1kC9Gd?sM;>_YdDNCQV zM(O)a&ceE4FuFyu>Cb#&Nd;EQ0d-kzwJ1P@BogT^AuSCG7x$YikXfWA=zv)3*IA#T z;>ttX|0?h=rc(n@@r2yi`Lna9S*pKIm<~$cJp5i$wQZGE3IYYsIOMVtbhn?vvttRw zo{%prz_YZhhBv560Z;It2`kCQZK|4V3W%t{wjc4P3o7w7Yw|t`w4ax=FR|@T;p3tg z7{J(f%&Kqvu~^^aR72Mae(-%^y9+!(U%7W-y5EO3lm~oIRi`mDlSiiuyofjCz)z4Q zkZ$G@HjO9L-VLlX(}r18*Pz&#Bsdf!3EIs4z$NhiM;iTk_ZbvAMyW{n0au3lDL@nJ zsfwDQWf2`*xGK$E7W2DpNTqIp00MXKXSl;3aVwI0d5D z^+>1hb7I>`1@3jp#U-dg3w?+Xn~c)oQ7?khi?f@dr$b=x{*?IB8#S1*jcmUHwjNyk z32nJYLK&`OsX%j$m-Su$etfDBm|II&Kmmn8j2}FH?~$CgU(iYhHAjbdu!EUo49d^v z{c1%gmRFpQXrIXX1=p~1HLYGDPi9VO7dQHaFN19wX^K+mV3D6 zT~n6_T{cf}cbdXZk{U!FDbx-i;pPma`UK?9bzFnX$3F(Il!)uHr3T{735ipez5rIePg);Alt~U9ErZbaBHJ z^!vxG-{Xb;nv)Y$td|1Z(vH@`PA~g~+}E@ZpF9XlJ~Jne8XK~VN3lMsnS<6e9h94n z+8I;QJ7@TnpK7o(7e09dG00O#AtVlOckY(~YC^;{Fr|fno|>Ua6_C%PzW7TD#Z84{ z-&GtqJ|RmeIFy88SQ8`sMqElp z;Be}s?`=iE!V(yoo?Q*(DMYD`;V7$3_(&}_ec(|0{)1K+;Kvw=c!x)I_J7JiuaTu6 zC)mteECaRj_$i?zevZP|w*-c?(;?@)Akj;qh4p+;iR<7~tZI`+#L`H+vPo5Ks1B^i zOD3nl_D>A|mDK)uwi3QM!j56bf$umOD(%818JI~cYU{FHRt~v06W6bLfxvAMBN-b8 zVO0pO`o!Uz>xM3-FZnu7@_z6*sp3XvW@L0h)fm>i5F*-?2`2eL8R%T&vN3&GRnYWf zlalpegt}^yfc4K;;*z(WPZ8ti+`QXkf513U$oPC&NX++~5}5aa3oH|a`4N+Ftw;#t zzK~Ft!H&h50RqtaqVfs2LbtEvw@?vgE>$OlwRHNN(A60D8Kc9;9SW^+znvk5V=S7c~O zalkWP^M7&&^Z1MF2_M&dIwYw(pE$HPB_$6LJ}t&~23bj0PnwL=Nb0C~EJCnfQF3Qg z>Jt}pL8@QWpPy*F8ROhEC~IGFKFvI@w3$feh!X&cw6oFULgM2bADonPuJZ-i*ysqU zWaP#Kb>Qp_^r_+9+tBBd$m%aDgohcwT#Remp_GW5&5 z9`3Z2dOO-kAh}Ie4wqaa(p!#5c>^HFl7bR2F9XC4_=0)xK}*Vnf;g-2bu(dMc(Jpp z+y^B_%Njqq_I`4ORkCNpqcx6Rv!`vc$<_u>Nx@CEQ0}@Jbx`J?c8*JbN4cWLh5=ILz zeB(k0_%?Uh0y$`Oxu@4}?EABhMFHJ7>ILm4Fq@PPKXfxoczg_{zcWyAoC#4XA{T#` zeyGoUS0x-+5&6QbP*qFYTXW*NJgXX!K5OnuEP(hZ7AE{zFC=m_vt`^cX29&YPl?C6 z-@|Nnp3CC+hM5=#5U9}#C?mlaMPQDNTH#N4)uS)ZAZk-4A|@kP7c@-&#_dO zV>7UM?Qf1)Y4a&=tjCt%v(0e0LuhxHlT$F>uy`Zg~K`fWDj7*aZ~PcXkG}8ixLd%jK;t;ii#FT!!m=(Z>2&{QQNVwkQbHG;Pg7g zDsC4&rZWT3MvhmO7^Iv&cz z%a}AI;AKx)!21_!DtOYLa`+wWE2^zUGzU34BtPykyJp>M6T7u@L7Ctr6*pvZL(=SC zNmdl9XA~X?r%&rE5DTjFf{pL&JXjpqAr@7%VxT4?_J!nvYNg<@TP39|a)qcnWhHQv zb$&mfBhuiA$7X~VWa18nb%P-^|9|ORaFP*!JCgPk)3B;C?!48aNiC(2orMU4Q60W{ zUZkp!<(9u%@%-xdSIdk&i(sO)@{vX^(a<@n6&G_kDn429);5B-(pd4ldOC{gZ{!0; zkOdb_+cg}}IlQ3lY5{0LUa^Fok|=gu+j$)B{H`(F>_HNs{&pxfcAusmcBI6p-7zwY zR7(z`(E_PNemQ30b3Cy4;u~Wr;=IACiV*z3>G-nuZ@0samOSnk1y000bxzz)650v8bZ& zZ3wxmbDW^7GWY}WH;r+)V6c+8(H%m0_6}fU1&8Kt{`~4mPQy#o#)Ds$tsZbR5xWS>zNr46d|5DsaeB51JRSM(A%J}x&Qcg^yz9Y%C$|t>o`6bp zA-BD682Lp=N?huNf{zXLpjb;7 z_m!!vcmIiv%2+;BoZZ!0Zx3O2ZV+DSjkB}+*v9?G6IH(IVyFgYLUUF1_hTTpDm4rl z*F65sr5Sv~Y~Y`ASRdJM{ZmGWJ`<(A!#1p)vfuhZ9u(`vFGptFBsH+$sbj;eby&Go z-3$rClQ5@Rg)?tq@k{CB@`U~C{d}4}X>LFH1E& zp+wuyGWtAv<8PA9=ij zLBcMNZMMr+Y1=sJl-ZqKI#uS5x(280x=FHqkE&=@LAc>voNC3IY)BFc1LrinE+d>9 z&K5eOf>L_c?~T$4NIzhLyn8_dMlXPWBt{F`yMe4lczVmofxvdfs@j-=+gss#$(4dW z9kdXd(__@8zL#@o8>Wf=j}ot}?1dmXwT16b*TIX|TgK2`a-;4<;ly6wJPIr3k?tB%CS)f4^Np`6tt9T+AO`0~ZvwTB zPi#bF_Sl=)RSO%a(EM^H!7b7Nh>!}&DR3-2ACJ$9d#Exl`1Cj*Sj6&6T|){*65R8d z=EG^2BXO^$8|^}4Bm16Wfc>nnDdheGNpWxf9`~Piu*0E3;JW`X|0!a6{0WG@Cyw#B z?J4^AnQ$2LiIf8PRPcH?z!P};1e{A-ZeV<(U|Uuc+`>hZ+y;?)>(PVi)^>lA zNjsZiqz?1FYRt;($I%6ky=U!C9|s=BEB(mE%p>Vn3s7XsSC_QnJV8oiY8HLzyFU^| zNf40LP3VT@5oCmV(AsCcTgo!1$?tr!CFF1f*kam|PWWh538Tkxs{#j+J*C(1Cz;!m{Vomh)e z_<`FOelqfm5huG9BPUf znUs}q6hXDB_{-8&jnU}E%i=E|w~o2;#iEw~$tf@9tVjI|sc~m-C-?@Y3VoApN(#EnO%v1LD5e-6kfp=Nt(=tq z{HqEm6p*}(ktJDl>Sr$ZLrw6eVpC0fWb^wzP7|>?F8TK?62Lq-|LY}|pLiSt|2VX> z32$C)Kd*opJAD)&t$-+@TO{<=7&3u~6v5nEMmC3;#8@&`c+lnKO5vvf9`gS+_Y2c5 z?0c^%OTq<_s^V&BZuQ%O^qXugU2nadOuerXm--(y06>l$C)wOsU4)Kz)@V8lJv0OF z8x|~(d_vBEb72neryXp#P%iUFs%C?%hZ>ge3eOj85FTI)@*FniXuST*-rj?gOL^U@({W~C^F*^DIU~Eoz zb_sy9FSqk}<)$Q?hQde?fk%7{$?f3+=d{sr_9KLaG@$==P_WL!InF>duq60}$J|Q! zL<=Q;4NOSK3Fz;HR!uH0tf4`hh>=kKmue)bL)l)+Rp>qBGJbw3?(Y6ab(3v+W+veA z(M`6JPRdW7=2t_KqDE@e5vpxpTNmN=;lC~qzb%3+!Bzr|sf*rHdqF54Kd9$@)^4ve zWI_KMGQo~+1FtrjM-bB5&ErXo$H9oU?n)!{x1|R|X9bu;HELroJG@gBA0?cFKx6~w z0p6(gst^Z0hTv6Mk-P({=XAN>e?~n#@Z*Ax7<{Xmc1-iZr}@x?S^m;;+-VG+M;c72 zGQ$iTy%U`wOvihJjetU#|u8WyVRvT%UPKcoMG@Yqc0Hc!oWU^0bUC)Yo!~`K07L^;SZ9n5dU95 zYM!vXfUHx^vKV|Q--`(5*`?r+9W0Ly&3s#J@{%R2!g4CY1AND_Nm)@T334xMx@j%_ z;4u%Tedra6%?)TrJIQcIF0rC0jKK5K{w+z|a*;`}4NuA%z;Cu-V(hEH5nG)bgT$A* zeVc>r;i}H3pRp2nbo$q!gq1Xn;ICs_J*0WXtz!c}94@cb%^!vzj-LPM$K??v+B2yO z=v@qJd0>Z(Nr#j3Karec0L;Wf&|tAX8oBdBLQxijOoblvj!VqMKQ*k2JyD-9<{wft z814}9lEkd)jR;$z&~MQX4^+U1$}CTc$J-5gY-goU6+FtSQ6IM`7csXcHgn@NcAIj! zcWeH6SM5}5+{LzP*PSyHc^PZQzN7)Z_I^Cn{!f{=R6xVaUWAB29+j0@{YFKPqkeo=^`b>PZ` zHm77^FRobM151t_Co~fp%uwk&-$0Wjsa{F*rp}zMP0i)g*n}XHNapl)5OC4L6Hi7Z zX2;1vcKkOP8Y;sq>Bm=iEs7Sd>tYcBig7 zaHWSE!a-5Ju8^%xK*Q`zYR)Fd**c%q*;{rHQF#~tLD5*VcJTEg-pmvddz3U6(6`78 z4N7U_*3FJH@bT-H|eQT4@S`iq>meqZU zZF)d~Li`K41+ZB?6=bA0mpQ$%_#6Ks=C)ID+`WZ!{WPHpgP7p~Y_oJ-CYo8j30yA;5r->+ex4LV{X88Tl&+bxXhxPD~Iym^Um_4qeprMuaM zSGwqCxKzbjWjWRx7>Va%0++`+OM-mK?#d#iCo7_*VG6et7;w^iOd<< z|FSah--$Pp{##z5Fd~o?p}3Mw*KcCBdRjNT1fFo=c{Fg{TD3ppDn9f%)DhQ=(YUU_9v5-x#iX6q1 z64P_VQ&kH!%sPdaCHU4QeZfAqp2el(b`01I466 zxlAY@*pWi=c7X!mwWRW?W}i_@58&8XSmrK;SUPms8R{g_y}0OoSkd#aKH=|V^{2_X zuo!4-;U%*84(cCZmW@LjxJ`5!ubmW?Gx`E1vus%dJQ z|GRROD!5%IkDl@?Tc%8XYbh#H#~HJ~Buabn)Y69f4+V31``#-23E&2`7mc-uLKr-7 zp|G+G!d8w0Y)5ETEB-zr7Zz3tH9sr}PVYUD;YO>e1truYbozjaA;)3i$A-aDFqFKE z=czjlERIUya^aGq%KTrGFfnrRB8tEz+MJ5)8y7ZAh}L(;Q3*>e!LczUMS*H8?3BS- zpL`n$Qv|xoUtj`iaN;avGWJoPvUaVgm!5V;TtOmaax&P}T?Oqa5%T>zt)!nL2CEZm zy%|WGh|d#CDwcF8M}QB!2TUZ3-xdBSS0EtGo<9n0%O7UT_XD>3eeLY|_5$hodJxm& zb5_6e_06u+9l*Eg?MApM@CU{|OE=CME_JtmZiBnqguu<^$XHJW5l2n3P+$Y;UqDHC zV{Z8OjtF>8jiB67O=w^_d9M-V+xSqQRPb0)dFH4%H(_jA5Fk}KJ2|Xr9uHC_pPfn6 z)Tpg_@%v9~9t#?*0M;4Gboh|bt|-fjk)1j)74X|WS^5ljecUH{Vz^acG=>K znBof*8kDel_(nNmIGu8mj#Yfn!X{o0qnlH_tnIx;G=U(|!Fj?t=l^3u|1bDZNaB1} zFRIWuyuh11Nin(mRE}Fm3t7d0?iyH0WN+v=f!iKM{Eyas6_5M4&!}%C|JZ2CHqTv0 z)yG%i$eZC3N$CIAMxz+nR@y;Ua+%K_E?Iytk?*=AJhA%p*>ty)3WJ zZWoeGO-@oS@Co0IR_EvH!jP)`RY%Q*t>dIz-$jA;div{Te3y|Qobg$ds};7lL>yCr z#5l1+b~i>@wkq#cQczPyNllXUA;rT4+poI zIFls%jCSGiDuob&92NQ`&FMRB9}>{_gBz+WTo$nn-MouU2CK=mh`mq+9$ScsAri3U zRQz9dltjWK(4?HKh-MoT6Vj7nqtDrkOpC}l1Ylr7(!bMBO8&GY83TJ$we+GbIZVsS z8PckR^?b3d&ghhMbRu3tt=Ku1@{5~ECT5%R>tSOaQY>t1L}X;Lo2qaos$(oNHv0I+^)8wz?_vqwj=qy?Y=Hix;a&xuUq+k^!I{K~M7 z3%J==nqI;~^tB{?nf-`vk_63qAXdpWZJSufmh&d%37R3RN=6IwfZ1De|EtsGk4OWV zR9SgUpNbgC@H8L}kUWMSt?H?qAs8`Z9J)PP`Vrj2e`C&1-Su?57q{EEcFqc zP^e~DQ;BK~g~F|M+&8xgofCU_?c6Bh6HFS}@D&657*^&f*rA6cl33s!z;5B=OGJWJDl9(O3eo&rsP zVd2;^J2W=b>Aq>9z}>5hh!L}|0pIq)!n!Io$Rws2`#Zy!gq9=8dDR4qzAd|CEYUwj z*wM~FJr=Kf3p{*q=za@{JRc%F5ub#YIJAig4W$4jM!98#^iMfq zGdGHchNSrHr0A?p)qPwOA5wbEu0-&B3_f9DL8Gv~9H+V(Z04{}EuCOMvA8M|GuH?* zDYRJ{SHB<6iH*I)VQ*eA)E0xWBB-4Hp7K!cJnZmr@VC|B9}#LU|M!m1ZPYA5XJ{rp zzBsgxm#^I)VEyra^W*vP{jU@7JW3;WuMszHyV3gf=48S5`94J8`Fa<%^I@EZN7~KE zlH+llZXh*Qw#rU+sB@N?y`;5}Y8)}b&MVvr7;ShK0pGCrJ2wQDU;MYtDPAjHbUx95X;Bik5Q!8n{lFo_g$Qvp?}vZ^Yb4JU~oYhVSZE} z$BYZVkMfC{e3m!6=;jwU`8R^EMe}a3?uWoYg#Tvu2wkIv`9h}1<1r6;Y@S5(L(w5) z;R)c%jQW46%$qlY3L4gk-9F8C)!fnvnVjV|32i4|_NxGg-N1){oRNVB^|hO;Arqp(|4E)E zv>i*#V5}>-{0)C-mX+<{n<3ARS-h-8J`2|2taUvp;e=8hCdmAkii%|$UV-90$$?c> zzyLa$>i$5XuQr624qbg+2fj1k8B1Xsq8`H>b4Mk1Hbb*TRXJGXVHd-c(K7{hRZ|ZM zjDyPnZdqAb@A5JpdHzRuM2xWVFLt9n(S(%j@bylteO$AV!-K7+I%ZsIY8a>A^n4n@ zBg*j_@v#NFHsB3og%$1Mm+4^1q@|`8nUV2pen~;h*qE3gQ{3L98B7UG5Hr!+kOPu~ zt)OgP+9sK6XNr0np|_TUWo5JyclTad-XF*{-7fgJHCdn=^Vsc?@}OmZL%3LytuPtf z*u5AHroAiD^YS}UXLSN;nIlP zKEp-*ECcV}UHbq=zWcczFn4!4Nk3ktq4^0hrG94k{&?-|IlOx(@ITE8?zzAF&X>!l z)>mc;Bd= z@+yrqexG-aR$O@$I?O)0(U}y{O8@>?uDJ5*`_3?23f}v1rsgJx^EiBVyV1Gz{n7PE z;CYfW(LhCB=*am)+yD4(J;wMW%lW-+SgGaWUGaYy>Ypd0e@S--iqVqkHdR_sitj0l z{ngV;dsF9!2JZ-ib|Z}!1Xs~n*jr6pM7IImQxn~n|;#jcWwEI{gdg`=}Fed~g#vR$T>PS_R{*$8j8` zp9HS_AR|O=nXih;+fgqScsAG~4ReGyyUu*qvuu10Hh&?{b=irfmL>+B`P6r+%^%0W zp&oti0EJp&P&4SP9-xlRD`TWYbQPy4CFYSXmkKsHf^;Ll0P)(DgNLvL5+cgse9=XRCa+AeO77O55Ez^%9&cShFT2BGOY9)thl@yT4;p{8Ttdoi*u?z+? zTShQEsW(=MWIEtL<*J7^B>Jc(*&W zf*XW}FL2Ss97H%JlHsxN;#N-VPK}uH2{*C2+#RUcQIxU^zz@Um-(fMCTTJdT0 zI>6k(-G26ytVTtxoUn{jqj~Usc|+Bg1#o-Neqd2nEvn*+l0j%SGpotO*Hwwl=C#UV zY{tjiu~y^>__4Zrrli9ZI!rJ2(Dc>CrFnZd<<4X0MMAgL1E+K+v$CE}?= zDUGukegLZ~FxX(nAh!$A=N_`#^9Zur?;Y#oaTbF|{BhHH?Pju3=lXC(&g0*{w(b@e z>6ER-V8Wo<@@96JS_~~vk0|m53oTiEnPO25Hz76yzJ5tGKAWbK;alH|BKD)oaQ}?3 z8fwU>m}aE@O|a4Tzk)mUS7FVa*i=bNpMC-ubeJLy04Ixai!8h`W#kxIukVDYdpBZ5 zuJ$2AG*NjO$2+pYQ{uvMtGOkJbF*n7jVy~j#)cpN^#|M`f|e*8~9Jmx`_wtc7pxWuDqWy81e&OWl z=XmnFb0f~Tru(R{>U^+Xkhe!iP{?#`F4I*$Ij#(@pcCt$1Ga9x)^c zBU<4;AMV{e{d@LFXkr;2-^FYZdnD==9Iv|WykB`t?~Lhku)#~*SLI$Vd#)VCwcfIh z`rur*H2y=$u3SWteoFk_-EQNmzq}52_TNKmO>$g!RDV@_&>0gXdN%Up1n`I{-Vc)sHdFjru66PQ0NT5 zkg72{>-+2R(klT@G^Jkf`a!`>bvv@DKC<-))#p5S4|c!mi(Q58b`JiFn%a5-3YFUU z$=>ih<$B67^0n1$)_-PzdLS{4Do#P?(k`+czjn>0Uivt|<3iwlVK5#Z=KbSdnF?2xr zDdc?xfvxsf^4@;w<7|G(wPAbAnU|PEQD%}54GqEa<1kApI*;tD1XUCS)*y75NqiO? z1=(TN>bmWpRh~EVl3ediJsg}t+Q9OKn#RROx(f$4Ztzyyr`k;}goTq=r8HOcBbmgT zeIFRVR$l@pyM?_qLN#s;M@R$~efY-=v|7_S=C#f6wm(gCYCA>9mHAG{M1E{F@f;A zynfn%H?Ze=_OuuOT?+>_39TLNYK>$3zG}K$XUDU?R2%#nnlj#XWES^u{aD~Za{YAE zuPy{R_J;8lE4+H`9i6BfWrweYlARSEfU)j<$-(aPBe3O_f_s28KsS46;BsJsPXkYS z(We^%WujD3OCxr5%qJPgERUWE!Q2Yz`reK5MP2lY#Nz>;@nuE-Bw%II(?TR;ZBVCy9>bF!@>KvxqI%(nb$vTChSW%<5>u-o zWvw)UY}O?6Mo2_}U}ckIwEemiH-VQt)aCTPB5wT!YP4y=OH8KxThqKr9)BL+de}hrQ7Aw{ZKnlt7bNF)!&jg%6J8EtCybkb=Pa>JOw3s-1wB1v8e&|%#9S;V;N&%ziAd`F*pI@p z9OOF%Xkp&iZ0o8CMk<<`+|N7d#jz9Z{91zpC4P&0iiGS6MC@6Rsq;3+V%#5U2)QeLsE41wpjCDkFzn>SiaT}Ie7zY= zIt49}rhGh;)(Eu0Vkg1m&=3JKnUysaE0$*LaUc7=e~*C2kbxPym{;fe6lzNn7uQt) z+;vm&)$*7Z!-CK5)PJK>xK8nB1*4d;Nt0l-f_c67?O80ozwW5$kS=!4{lZSmD|+Uq zOYx`T@{t@|onFtwyY>F5p^skDD5u?)i;wq)=lH*@!dAVUeol*jobft8u)hsG;Qo{$ zwx)RMTEu%_JD|{wO@{%g9}WF<=8vVK-p<;5`209i-jh41p!=ZFesRJ*d}oZ0VSY!q8vR=yR- zA2G!B4_;j_t_EHCM>&N@SrrSMg2z=krbEXIT@drX-Cqkl^}IDxed}&ZL@%IqzBm!x z-R(4VvP$kdc$j7l^grIXi#GRtSZoqGE?%Mw0P57*hrv|eVt=Qjv@c&i?|+JmsE$;( zFG40a!%5B9s~wS$u{aR?_f7WdXb-Z!&|uOUTnyIMFS$Z1JWp1J4LVgltjEUWYmMv= zd7joLhgo=@ID78+Y2FU_Q`ew$I^kQ;Y{Fh?lk!O~UR5%&)EO{+|8XBy(SQtFj{3X3 z0y!D0J1~&wsmqA8jP~iMn>0LMOf|2T4Vuygwolrf8QB$9%)gaa@)#wcxSRwWkq!=u zK{%(sXPKFS#I!+SSvC1iBw1lx5yMEq$s{?Mz?@ah8~%;!*6~x6b5f0DFy$0Ww}L-l zb(~wZu(KILF@H!h5HNxQzs^mVnZ|3}r^$A>>6CSYF3V0h zR+W@)c|TkRKt!gaw1}=sP6%Q7+Vu@ME~)~Xn(+=O=J=ADHQx4A5(I;eFJkT$Y0$4& z%q3a&xTpUrs!`9C6o42Y00k(ZMsv2M);ui-Bu>CrX|ba~5bB|0wBo}(1yistqXE`B z>wPP^L|Yt}8cI8QDbhm_l7$SHQuj(V#U#3gWRYE&Z4;pmWl7_xkxGniI!876{l6|& zWkK01XxQAjgE)I)mQVLWg}3CEN$T)3x!QW8=5XTFt~bvSb0@61s}GuDEs;b_Oj4pN zqZmjC>sEZBIyvorUzwpndw2upaG0K9VaXi4(IH!4`nR^=ARzq4FAf%k4Q$YErGEE- zA&A41-0DL6zia3EzVk`j6{ zBn>Po{b1wywvzw9e>TYc;#a<*jq&MNck-9i%3V2S#3bq=#u5ez zZdQd%JFT=?6AI9G@v>A*1ml>o>{#8esLW@$wcZNP2da3fexqqE6sfi-=8F>Tv;KZv zs)2u%lGZkR*h|P57-F?x_cGmG)$MuXw9YOg#_qYzXhSZG6aS`0?iovnuu6>It*;)i zXu~Jqhv&fkbj1=a=*KVm*00lSKvB$g|MO(tAykvR>l6;< z&udTb>md=!QYS|=`*Pt#;-oJ9`^iA6OR}HMh>OJG-*lIbroPwb^*&AXVbolKew{m# z+2Z=9$>x3gF5fp==|yc0Q@9%>4f&cnqlW%AV*GJ!G_5MntyDAc#}mVW_yyC9@6qe6 z&L?xKXjmFCmNjFz`cI>Fxrqb%>0y;uUay z2}<@7)2e~Pg7bkeT_I7_IFBKxd(RWlB!LB~0?GxZC0MSS`9wkH#jNa5_wX^oGsyodt% z!zp?O>A!P6)~$t(=@Ly^r=TCc_Xf(WiRxfb(%RZmN0XZV=E1b7^SY6>dD{Ga;YR}h z_$2O-ht}Wn3i>-oOhUK>S|7(7`*llNWBr%5FrS9&i&0 zJ19SY4^*M8a?ssb%JZS9AW7Ef&{7P(?9-&l?oVSnJDhU~+c)*1unwcRR~MUf>kw1< zb|9M-v)L;0;Js1?(55^D0lXyBwhUIr7IJhSAgBvrM(~c8%xwjvUpj*APn;{;ajKh9 z3a+l97)M~-djYo?&{+x8tmM*?m&y+a+NzkXjaZsAn60fvQ-|8O$W(<_2#k~ndIpHU zSp%utp4P7n1)22ZY^`~*b;Wq~8=cUw7v0%g%tDef*a%iD2+u0d)j*gS zopKoQqYg;2T}NK!jx{;_^PN)))`Q9SHF(3Xg+Lg!I)7Nr+gL%}a zF|qoxIBC9SB_^}Bq{DAmAFm}YRFk9wn6dXf_qg_e)NcLKn9o+_(Ap9}YnY^$ov23I@XY=vdsBQ_$E`>m)6sEAc!Rgtd(I;kZ#RkEUpp)<=@SO-<=l&i zR7s#{*US!fNhESs--OQeZgQlqN!PwItN z?3gw*E6@$`RmmUn92_D*YKLIz|CH3=JnWRKqZcT6;)$o|RnG_3oaljPF?!pd%=iH& zxPcd*dp&I~bqU^wylKuw6S<9kUCB{ZreYquoe}L~{uywLi$~4tJzBZEZ3u3iJ0YhC zYa9^%YtBMjJU>Mm788*Z)3?VT#fxxq0E|0D!qKYAU0)&C+SJO|*;w4Go7zfa`xiR3 z|39+cDk#n_Y7A~gOAcq+~-k4=y;dJQM~%XJ76+QaHW3ZC+%BK1ul&8R?t8dA@hHK zN!Y5d$poQMMcuD%p>XkGjjJNiT`j0S7=T%}#-6BnG!DLrIw@C-jEfmol5%Ohx}A-` zKTwB^XdL>AOSZ9wSE2sQFT;?ru4Yf1&@qOj1VylA+Lr|3fOWj4rZvcvmxB77#C)rG zrne{P&OeCYuQcb;kJ_+uGU8?BV_D;$rX}}Uq_6X>!tjS0oWYwd&btwl+GxL|4SVie z8lnWWZ$=i7=JmfT99Govg=0oQpzm;!o@*=jRGmIdP^>C;pl8~a#^@Hu;c4(#4v<;K zDQXmlk8=K*kkOA4w>;AG$5)g=%gUl@!@GclTr`?dJqg-%HFiE5?fk@OkM_@W$9i!+ zgY`cQ!YibKHJ=PZqM;lI3?1mH=tHN!gs@F5;V~Y4533v2gYxO%i~*R6e_U@gkvN`f z_YZhMrD9o#>LgCMjNw&`K18G?eZ`68?C*0q(h;5h+EtxfZ`;cUWFbPug-QB8Y^zj_ z%*Xi^l(PGK#w_(v@$%!dN zv@)ChYvXKu6`YqOzCuXwGcQOVZErsw&51YE0VL-2i*|X7@Q-&fVvEyytiy(4v3u!{_!p<=)?zP}@LI807 zr&C=jJ8zY70e@7-_pDrw>m3aYmXa$m@q3zqS7wQUvv1eArt#GA0-2ohkm?gK=0)`k zM97+^<<*g@2BZxOT0fuJq%)BZnj;4fzi`mS*2E;e%9C@+Y@^G`EDetk=pxDub>cS5S=JVQY3wxsio3Gm zPkfWiM`T4bO#8qqn}L;oZjt{CQxgZSm-kRjbj#8G!2y;rWYLy zaMd|zeE;o;qt+I{@Bf9|33$Cukv6;1=>%JgL+qp(LEO=D@wFI#JgLPh2a76&3<9Nl z>MqO;wd>_P+~|kG%*f*FaFZ$dBI7rh(eVA3mfzs>R4rqGaAA_8wqD)KDNe05GMMGR zfi9)~&43Fo;lBUFA792W+C(PPe5%CQlp!oEETTYWJGySeDXBTdZ!rU|nOE2&2I4<( zs3_S!UA~c7%0C^uD(8e%j0p^ox!*ji$QCtpE_%l{5c7f^2Am1rr7hrOoo13el45E} zK-o3?z2tZB61rAXcjFVdK2m-=Y6KK%d2x>aJ;O5*xKW1yV34R@GmkFoI<#)>B#HX| zZO;ko_!B9W5L>YE7Bv6%L`=r-^UT;$c3E&NdJKdZ<=7UGlX-!7iGgtbqz1}Z1Y{pW z({BBd0EJH42GYwtd{y9(vy3hKf@h<;zQkUr7ry9Vfk1 zxnA=@Ki%-=&vuN=xS%~_j1w}Ee5aOJlS06`h!B$cDq(7ZYK`=UYCZBn8$RaVfBkjS zq+|uT|K&#r%<`4TVY|5U;r6fMw@(w`*(V}o+_Z_no2W_QW1wl(G9qPjN57aX%uu*1 z8>d>(DV&84;izd-6voloSf6JlzFLa_+3YY(2ktl}K*lMQbzxAiqbsxWt{}7M`LoAi zLj?fC+Hcg#PZ#?z)ZDlK^sYx7(CrV?THUCc)=oeuSlY`hQKyY04LeJ zHKTg*)ldnd8n9XTJsJ1!89fMKIznAYpAkQE_=`FtA`B-;=-srEAMHoGyz}0a#gH{6 zkU2LtancTL_dNV}{&~IQvmc*Mj`(+@qY&uPAw`z~vef`Qi;58H{n&cjD(aTs**{O1 z@6b=1zX}`BUiQ3Df>rsuh@t zoMcK7IpM_?6a$qAPsXuN4l!^Ay@ZVM1DEMbc1dhbdN2AC!Pf1u_S znJz11s4VQh{R6t?a}drxNx_)qrx^ApE)(;dzdn8xx1O+GUXF@!cC2jVus{hdZNsFH zE4vg60qIg%A^8u^AC6h@zFo)~ny^%*!<=$*T%vk%exzipR~c?97gemu!wQv|CUN5s zQde7(IO;a)cLCpz6iOR7{_Tw&jHs89gip!EZ%^(edZ3%OS(~Qv$c!TQhpbT@FyR`; z{-cK;T|);&98a!}=u+OAyoQNb#x0^%_LE<4v_&ju=WCF5m5^m=T*5FHZ7 zn#_DNm)Y1p!jP?MDf0CpOuS(9r79ua%PUanXoenLFtuW62UbTh`#v;G>da2gOr4ak zsW92N*sY@4A{4n5)P?{=jYnkb*CVxU{0bv%1SpdU2B##xC@@ zV`Pb_VwgIB*moBM=p;U(^f!h(Q!{fBIi~9%?T%+Tq}{Q!;qXnzG0xeA9DU(qmib7c zPxwu%>c7d7FudD# zMzLE;wi>|#hFH=x3^TB(d>{9qN4s3ZucBOGm=86KELfP~8!+@{C^UJDB~Q%ewOvBh zJo+}cMOGT>QU__ZV^d-1JKl&;q_27f5;WO%o;lr>bI5Y}Kpu>lF;wtW=iF1MukqLv z?A185u4l&jTpw!qgTp2wVC!NCEQOi6GhG|fu+q{^nXEv*fAni7hM;6fy?r=$7uq*T zGQ){2%_*icRd09{Mh9TTTQ=du?fPf~jD21?GlmgDvWI58I#XdG10j?GZ+2O349U4FaCuWBs9OHT1_M8=+ z=R%4%Rdn#G#hsqM=nx?Nyt&@OvwDJF`c4|ZS?yl$(ZcaJ_XBc0*Vk;Zf|XtWaeGJT z;?tyZRePn;he?U}Veqnf9=$VwhQ;Nshm=)g=ed+1ck;W`(g)fFqD}2Nvg{y<9$tt}h4ze7(|r>HB$c5pOOK9^acO{M4;=Gr#oq3jfyeP3(P8pp271BY3c7_e|Wl)^8%KRr|{i?=EQDPjyBSgp} zomE7NFrcFsW6?|`8btGhQv)Q*3Uh*6?ug#PTZNthvm`AaeHQD2&0Hb<=&~p6k zF}nM|!ZfM~8AART^l{8S9vpFcv}NgJrLwR4BSK8M}o`**boWd7p zF@kF>lxm)6ZCr1e>89~w)-F>nTwc&Fv&R#@JFeG`r8mNPgx{$gXjdK#Ab@J6_F2UL zHjzuSA#c`MQzi~M+XTL_Ntyc*Cf2l4azrP4AJYc&2lL7C5RKy|3?-&d7f4=lZ|EoM zk9V+7J;`UrS79HmMYYFX5XcQpHIw6Ptx>RL_qr3I=A-1LbBXp}uM#?wcc1SQO`4Z= z4GD!yZ3lV^pxZEd9kDwa{k_Xv26v5!5eU5vT?BFtQz27d#ABtj-$zQ|tMUJUnpYTc zZ3O~yOS5{TtcM?&4{35lmn8NVT&Tk+G7Ya3le{)4^3JK(TZ~x3bphLnk^2gYGmX?D z-;fzYJpTZ(l+i$m zAZqet&%=d`R_lb>Qdl|C4~!+wPS%O|Al+)wq{oOYfaryMwb_i0?!G@vzN(+M-IXxjBd&%tTC#HO%+|__OG??in=cK`L9Rl zYI9p=BWqy2nud1KXffyh%oBIyYwst%Q6)?wYJ`WC)ZP=TK*V?i?p`~3UTk$ibVz9L zk90Bfxy)dm!i(@_5daK}eyTFoJv!@Ls?AwPS1@g;~UE%;oxK^N1GO{as?0lFg!(W$ zcv&TXj%(N(aiP@C+t<5Raa*pR^+p+>*fRDQDQEdAlVu6XAg6_trZX{(AXBH<$K)v7 z6*k}vhPBRWT563da=mcs#ENk=DAHi{THL?GfkfEw=L*EF08wMEc)BHOW-8f-DlEb* zU0LBsHNdwK0s`HPh9%YT;!2dG?@fb4nlZa=miR-0I+8YG1*pr-hX6tA(Z`_9dK4bM zoF30z>x*?h^qVeD7n2yyqP9oDr3Bf9aQH}2ZZu_AqjO&%jk^4SEtM>gf<9Lr>M>wg zuSDv%B*${usNmAB*MUHZtS?#2xuEpTdSdf7?7Eh6_#+2x$;IxAn&Owovxuh7uYCBe z#P}}aa?v-k5as*z`skz`#G|qlF2_EcmmAhYgLmKg{HqY+;*cvUD`iwwMU*V9$T;7j z=F}6&g-^*E=J#D{X<(JFcp#_i_KdfUdYn8N+FGTbp5 zPtyGRl}+s*^uD-%rVPp%VC98G<-Q4vF?U6N2zWs7(NxU*sfTUs zjD=k)>Fu|j8vfS<$Lv#TvG3u@{Pzy2{5cp44K$#aC~iIJjur`N?-we$K(+WJDhni> zhbSu(x*_Rp%+Vh;iO&UK;zKS;$sZD z6S?&&@JYFy$$S+cTRlH|tX*7eP8O}hDPCPxJjfr|4EcAwr{j*+XeP6RHE36t>ptzf zmKY4#*d#S~8gBG3?$W}Q^*bSQ=oor57pM453JJ13^tm%iYFe4jDA7I`aq4KO75IFh zpa8C=Qg=~TIcn&%T3e37_AW&gzsJ3!U+16gOIUE4C0L+GMWE2($QWj~x4;cMV|zWB z?Vo4X@G!+qyO%cT=KmZNP37)?A5nI%`lZQ-){aB_0Hoyi!JI?!+C4xQ1<=?_YMG(Asu!yyH{xn% z$L~K3y8HPTL!n-2fW0Cz8y&s`8LV1+_mu`3xWmPjG|0H)=BjzGUct3sXKF6fK{S-3ven^!!l&))-U>*8ixT`l<9`lH=Ithtu1Z;0(FZBU1T)Ux{7I8TauJ81UxB_MXUxLr z)8b5I@?Mq!3_6Ni*6>1(ZXB=fI7c$hnYU2bl%pp9cqOCcL9=;tD;X8m#ckPC+q<}9{othr#S&9=V z+|g&!(4EhGILKbXcMuX{RxjyA7y8JFj{Z>&JSdyxXuWAs28MzOqUAFZ5e+VJ)Qr49{OA9J3yOtj|t8NEO zSO5}BG+kN5lAi)0kdK&Bpl!5st4P4ky;j-v_C~%nP@2 z^5W&K?qr<3N7kaZ=bz~P3Ot-I!{|g{#+$q9PJiv>PK@oF0ry+X19@kNL)mBb?qAz< z)4Xj{1LIOfexW$9C!Pqh-ZQ?t&v4X2us80dKD-$JF!LM&Zvv>+mYf<{zkyoBIEaC; z&!$%!g%sN!lbzJflx@uh8)mz9Z0TN(QY%!{NnC=c$*Ml0xeC3zmC+P8+&efpQ8}ei zBl=c!xBjAvKM+kEODh6B%9Lk?Xx^D8&AS*^g5mSBhux8>&fzGkmDi^)S5sw&9qvzq z02;;`jsl)%%0>IJ*ZFg4uVHM$(Uc-0vyA?<0{Kx}ttY%U4=HC?N!EgD-M^kOnz0Sd z`}nb%3$I1)l^u9Zg`OInJ760^q-bEkA9 z#zZ3U^S|71|7rK#!o6zB+Nf>ieI2DE#t6ebq8IpZ2fb~lwWB2GNi7L!Ci%{hF*&b~ zTz&(5%^0I0iBQ(q){d_W`(1Hfz4gGCls5xm1K%$XG0Ko^h8L;iL7)Awo=lZ^v0?W% z8*z4f+gpmqkc^`%u?c*%&Jief{|`!_hJdy2}?DdcJIhnAm3kslrzMREWl)6HCTyvltWr^WLOH4~}v>zq{; zswrW&qj#9Mia;ufah@kPs%f_0zd0iT;=%oxep*uk_e%{Jh8Syafpum2tEry=CQ&6U zqs($^LJ!c}=QlHt4e7+1vkeO3$CicZgIJMgxoleiP0<|t(3taTheoO)5$b(H! z{2$93j*CXAB%*RS;S;yxOw6-Q+v!w60GCkNxm~w?xZGbLyMz$*QJQT zb47&Y&bFZ`*U;o-rkDGF@T&oz&|x11l9gB8;?hUk>{1_}gL1#O*p^9uCiTDe=me4x z$sqUg$@=rfFAYu5%jk5?pj;AK_AmR=x4^VfGPxpBc*2lzFnqBYWgi(8iagusn~o*0E>NZXZMkZ(Xm$)x zGKT7jE98-6Pm@eauI|P99v0z@gfVlD_n_LK>gZV!?PYSP~U7hNols3q}@F zbLG6+tSO)4!adEjx)_SkM|s<&d;P4k$1Ia*L=CI08B84*fzrA|3vHb)Ujx_{iN1`R z-u^k-TXS`B0MX^IDI_WewKa1QDzb33{ne?lN`}Yrc0x4_3z5xoD=kDf>*?eU!+L-V7jv!2cxeYQ+gMt){TA{^9V(J-{s>s!>7BVSPV0&~ zaeqn5XoYcA6@%cWSVbz@0x@}8jO2LIf%?#QIstF2V`bdE*5S#X7{(sYxTjMea*{=& zqss)r+02u;`x$IFWA3k)>T&fGMu={|VNA=i5X!Qu%);9Qa_m&CrJyhQ&YR$BV^ z!YMbQrHyVLyO2fQ;389{2*`^y;Ur|qUHlWoMoIzc#v+u0(agC?TNjM0iwL42O zFm71VwD~$FWx=1zv@yyKCJ;lQ!9pwyaFKr*>oom!j~%P=+N06Qnvx~pxruTH$;jfA zQwXUm+RRG?wZD6YS1W*4C4ZZEdRK}?g_#1TmPo)lSHIig2)N{T*Q8;BRooZ2$gRu^MEdhh>}2pB8L?#ep?&-QwupRZ#+wgqk^z2 zYR8b9N`A%&4_yi9=`q@1;5{2NXge5p&)C>iow?Q@DtVAJ#^73r2_eC*afN0yfm*}~ zDOuI{86<%a5OqH^okP1HxL(4I)!pILquh(`HJdm+R8;F|pz0O<=-;dKh6aS_Mn0!c z?qd3Aw&xC5!woQVYiiNkqR-PR;-GrRvL%^y~y%BXEf=DibZgd-;ceeEJ^I_uxlM3@s} z{1R)|MdHTkUx;%W@K|eoI z!&VL((N|BuUFenDT@@@iYA7*WB^)E%7{32_Q)wgW)^xsOua8^qX0Y5JW?+5ODHt5W zC%_ccmkn;I=$YR5ds{BR`li8+{z;iG!T|KPcouk5!4ALba0c(FxM?H*<~2FjDJ_Kj zqPRVOwRcm2s*E(gn8xcL}Wt1~DRp03nF*Zvg_u%k> zo(|)-yW;hQwaRed_TXp|d*g4LC2J1vUD~UERka;;pux8xbXoi)J2yTpht=4*C&Rpa z&%}xV(xoBtnVMEPJsEJ)Qs0_>(}yTD_&Tvw4cEV+#Uk!&n)`Ukb53&)MwtEasyd&T z&l=~hJrZdHY(NHeu?4tLJ2KWd^3`>muiJQ3lxT%77v2Ua>k;cYIQ!E)HgP?i zt+pE-WZeW^?r2&UpN0ncLH@hzpHE^=E0uO z5rzDo%I`1D#@nbpSkqwg*Kl^w!->{UtK=J8q!$-(HzGb1Hyv0HlPVY`CT-5~BDl}$ zD&HR5$6nY1c+Wpa5252W^MYdL!aL}yqlYIsoIZT2brJ+M>w$hREngT?W|S+4{sjZt zxP!aA9@yp2#$8G|zDzR!1J*HQphYvBhW{~KdPgA+YhMPo>kxZlQwgp^@!HLb_T`ua zjJN=>3e5kplH@C=I=Z|Q4_K9$9hD1P%)(oL)zsT;2L;vp zBDe+8=cj{-2)DR=TV(0dpcBv`?#WGH$8O{2NjS;p)a0VbzR{;L?+EAu2D(=G#ks)O z^9rUk=o5Af;gj^`@gIz?(wY1x%5QRG3H5!23UCz!^}hcqq=ua=M8GRqLmay;=$Ef! z<@xR0x9iYYZ@#ZH0Mw@x^+BIH{@1&h{64bX4P7a?L{2@2OdZi$X!5DWWi_~`_Mv*& zl^n|$cEi{2FH%4lF%*l(@>evukJqH24)HDh`%rk+4;_&$ti~F!HA~JTc2;{x3KLn8 zjU?8E5|$Iu!W?OF*W}j&2Az7?JDf)l@X7(CnHk2>ARvP?(uCd4895;aV#G^QP-u`T zqef(GFq1)BQ@D=MrJC0=Bg>-xDnS59t04vB)BSLZT?Sx4Wl1e`7R~#_uBt~&0lS}D z<5wZmV{b6PJnvs2|BhR&Bi9b+I2<}lF?#C~g0EKWKrcv(udmtgZ@Dl=CsESH;ZF@G z+JmP!@sA}JK70Y$Uw_&zxEWJ@k#iz)N2EvA+n};`5mj(HV06)Al3MMcl}I0D^iCiy zJUk`DV;OJFgpx422!wW@_fIqb)uiX{&hAh5LFtA*zNsoxzRlvI*yy; zQg(wyke#inrxnzftX5(P*pr%LFDaqg9jNCc5+({sp|9uD%#XKfwY%QlHvE#$vp1OL zx4{xGZhp&?dZiyhxz)HUI8DHq=gu~*hCM}z%{FR6MuRtIJ|;)aXN?+~k}m{C@8|M* z=2j|9A$R#CkW-k#*%bhov5Q%743v=**{_>Ed~IrnFH;KWv2wMl@MMj%>OsWIw6bpg z&P}~!wWJ6ysWWSa3Z(OzrnRh-!1FQpNG8a!%#QKichd@*FQZKs-9Nj`STid?;m;=c zI(84k)(+mW`%_L0r)U!%9R0yAIL-|m!Lurw74(e1uC#@BZ@xbCK2rZ*@;XV#QS04H z@A6bCXD?<}qcEc&g_%fARGbR?$jbPbTT?LX81b zHa;_ZyDt`D=dn6$7>+565r217GhZ!Za5$KS@LM0$1S@8B-+d~f%0q^ufRgV>?TA7y z9TaiQJYe|V<{!yLX>)^TTPBO<@JlEA*Y3Jz9NomjW#h6buS_qzM-JXwCbSd*n9-At zZI-e<+|Np(>r5(}J=BDMNOb2HL)eDLqCB*_ubSykF}#!7FLnzCeg$dKw4mH{>+8_^ zo&;SR2ZJR!(1Y}&NZr@Io1m0cwN#f!HiE{%;YiWC^qr-s@6NNQAO^pVvjp}#-d}&o z!Wk#h#LGO<*=xAYlXc8gJ=Y%*uG~pC>&u>uvI+stgj4&`OQ zhXah1BevvY_B_bZfDVIb-x~!lG7iosSlB)Hmmn4iK#4LWRq-vxy}=$Qba;^mrRruO zuQ$CbN3w2r64%t5(QZN|vx`rEK>rT|W`S`1(R`i4&Dl&E$k)~Cqympvar!qm?aHmv zlMgq+QOIg{LElgEQRivZ>ZBZUpw_8RkzE$+yRx)KR{<;5{<=Wmn*j@BL(p2j&raaj z^8VoIg%ZeNY(FE0{;A%Q167FTo8-0Ksg}@TzUbO(@t4E*{VS|OBj&EmOtmoMbmA)5pK+Q;I(&KDfrQ`fW56%-W)5qcRJ?@F5BJ_*MA=P zx_;+|emdTDWR4BHf|!BY3RNARzrmEZt{Kkgf<9C7EKy;jn;$~|S_@UwCZP>;kb${c z*PdI3@>eQK;i~wa4K2cnPPQN8Q#^rC#gA%<7gyR2@AMmb!fpG<#_G6SM8Cy<=wStb zV*okHz_LKMO9+-%ri?#4J+zQQ+wQ?%E%Lr5$Tc6c?47kwrB@QPX-)$F%q7ysLb8xD z$p(Nxk0?TakW7uR^EJ53!4D4@3J7~FSS#r=0!;s(%SS#|e2czW)QM!qoZ472WCx zF{vL#O_$eQ*G1>?Eqf*4s-448btJ2Y{XOHXsM`s-9?%fMZLOYa()aw7>co-zamn#k zh7y4NQ7YYV@K@%m^UCN;>3!ok>%O%Q^_~*ezO^3oT+CF z70g0ZIW_d?mcheRF?l&v0*Ud%0B*sjpvY@oVE^&wX!GtdRP=i{-puj#Z1;3G`{@0v z=l%KX)yd}Z=g#=nXL!*2C!{<=fuw3BF3ea!{MLK7Si#5UtfvXT9hLQGi_KD+8xC2jxg zP_kK$Cygoj9oPOob%$FTQQzvE-1d^}B9|AhUV*zlFbikTz}*?pETU9H8r1Z?Y-H0u z{EZoBWRnNSTYNvU+aCf!lBS3ZfT12R0yuznyXwzv)4udS(ZG_K5c^twu|q$CkUi0< zG&njFHiDubnj1Md5@!&+kvE>yZ3pdIv`j{8$qYGGH3vMdW^DFyFaEk*YtXYeeyr+N zh2C5~{i~)3rtMi;Zh8Vd5;DXS3r0#t6uDfSpa5K_owJtTM%4z=>4&lSPU*+KOaPhS#^1P8U z&9_JKoiTP6Kazig|F>I9=yXy23Rj?zL7oGfQxS2EJFKL%5f0?*L%1w(fg-B{%mx1+ zMn!}xF&2lS;K0kQX{0TYf6t?L*U)vjhjuKX1~@}Q;oukfnUar_xGgNF6*{l4C_VRy zbe2Bv0x*UH`x!aUxiw@2GU9P$PdyP7EQ*l%8qUP7sGUya#2cp0B98csRASGvUV391 z`9};XUwU5g!<7s}5H0dNQE3*}7oD&>uFH#qS19ZR&uSB#{#x$xc5Q<2l4 z(eHlbq|}Zw6y1R^liyhQCgVK)$bx`WljbN_$h}~rB=a2}LAL;b)rlvA^6*{egE$8Q z*fP5S!lON7NGSn?|J-q~o`^J0|Js29jw~m`Uz|oQRlUZaw6dv!1hGl<1o1`fPdH(J zWG-NI;WFWL6tkJJB-!u(W;4pa3ltm4Oz|cC zNic*hxZTJw6g}n;gr&Ku^b;~#XhOYd6q>R>ZeCi%f7cWs-cBdAGUp>*kUR}s24b>$ z?G^Q*B{Z0=J}K-D?9V>N@vA@ac8+W9%jUm^o7hhe#jI5t{@jz&IQr=ny}v4~;kQvZ zoh;7bFEkN3@Y{?NeuuDt3Fw0kRR0isabEs(*N)}vvZPq1@I-CN41e!{SoK$e^Zao{ zqIjxaP{>o}%eKfX4%qWQQLmSK_{Cs=L`C>hn!Mhjhu?=4di29+G2LQc-`6AGN*Dgj zF)Al#c~AC#e>A{GZH#=o$K9y)`i}}pDpl9EebIVr-zgE^? ze73@U7>M|IG)=j*TiOHC-6@1>!3`s4#GT1GveAs$teu}+Vhv2Ob{+-VKa01&RZ2H_ zQbpK2sHmxjCs%RYyGQiQnL%ARAhAz@jizO>!NqZ4=cb+c7?hi0o*DG(EX<#l*|2#X z%*-6yo<4gxMa2lS&?K09S=Tz6H8!@T@TthcnVHmKXJTyYs)wJomd0L8Zc@@iN*lud zNUdjkvpRSQ!?@$uClc`2@7QnC`)*H1X7ns+ME&pKL_)R)`I0OddmW8!K<$*+jjpIKo)1Tuwf@s7aRpsNOr&{Twi!QkzCz2e5$5{dSI zS5;tAz~XI37JNCLPy^_P@LDzH2LXvrRGH-JF>**KvQ-5;HL!LSqUX4jn`5q`(Dlz; zau`2ux`BSYMusU_R&5p1{lW`pWGt7IG>Sari(e1U=*6$qPf=5LMV#|IjU5pq;sEp4 zz6|xAr!BbL60^0Gh~`!@zf~4 z>glZZ00x4vSaR+9+8X2N>O{h-+F??)m&j=A&?-+)<&?hn-t0#>KV2IUi4ugx9{5`` z#SJT?5?+g)Yy^Y(CA%BvduK%z(X90?V+HhoM>Ji#=0ZNGp|7M6_y68r21s^ z^V@owj*LdDh`*U6DQ4+XQ^N1oYf~tAvofciR; zZlRc7e>Qn_7(|(KG5`o^;Du>e>GAp6NC5iWir-`wOIC*vDJZ*0=m7BI>hH|`|CsHc zX2dww;Qlt5tJ$@t8?RAZdMz8$M8)7h+Z5*9mb#yGo)nS$2P~7}A3jRfv(mKM6Qqdr zn~dMA;Ny>dz$^9;ISgQ2kZ;6di)zo_pYS5ggOhSf$aJkhd=?&z%W1-KR^SnNF`T=3 zeAC@?C8oXW!ZE&c81)An3V3({$@D3BKLpk zNv37SZ-kGI)(k&ax{U18w8OJ7PzkesO{gNp7gMW{=6nWAE(6 zX*qedCvGP!*FA?6y+`%#8>iV7#9J_OM;&&sjVCSvpJvvreykxj`RnUHL-xP|z}8~@ zj^+(g+@Nadh{OiDd*c!C;6hjTN~_iEVwjad^ChSPiA&qdYYx*~%FsB#G`gaH7U|&B zx>oXCe@=)dI%k-xKi817lHjli74%sr18eH-KaTlOF+Xc+=;Wow|8x zal`#Z13ZU?0W|nj>O(Pp|5)t35bw25UHyjB?ygSy4GkH|-wlpn!N~;IWHByqsPJY> zgN|v`jdsaLuf&@nr@k+p?`))^axxYj_}xVPQgdfZ4s^lsGz|h6$c(^CFj=)-x;C$A z2=;%a0up1ly5XgkC?=tS+1}qM6t#T2@E?b|WU@4Jojvc#^0yU%9MojZDkkqytkbBf z?)lhOmqmZ=2hV#|BwTK$wT%ydJHbqGkqkf9!6!9W*we6J<(oOu4em0I6`yF(kX&y? zOt_F!C$!btV&;qSp3Rq89SKkjahG!#F@@i0H2}hR`2s}1~y3(k&x70 z-)w#P{5iQo0E7-I{0bL`AhS}d5_$nA-HY{qq6`SSHExJmA65$78)493`5C{D8;?SI4MHCiSao}81ko>~bS zEB-3C)`gH%U2d{&Ci}$f{PUQ}4j4-u#IGfZr@}L@hNwOfZqhd*?o1%AHM)>#(ZIg( zT3?p!%s~PXfev^ESFi~Uw=a;xaAce$OpwLm{#l9<_f}uBzPzc1H^)=R*f1hWIWk9# zaoPcn4EQ*a@iSdqkTqY}9@AgWAWeI|=^K(rYQmT$&WTmf-+!8jbClZvk}f_&@`5n@ z%8|u)E_NfcOC$5#QvbN6GyRxKmtG)(zR0c)6~0SF6E=pfzf1cQ7{pP8TV?X-XK|5R z9Z77ENxX5+a(S0O7=qTQ0U4onPT`owv!``V9uR_#!JD;*SB{y(oOEO!lJXUk@B6^$ zZ_GsEkxs{)&=m2MBod(%dU`YJvw4Z@Cp}^kqe^kNc^fY7e2H%s!VG7?dSDSj8~?N1 z_wSaq%qhtzyVh?aL@@~xdbdVgskA$*o$61AKj8C?_@ZbRBb>>Fp*-px`I@|O{BQ(J z7DLf>ta-8K6*bvq^kIHvcV=le;DEKqB>PC%>`N+9=h{`yEjusSzj?HjnuEu&6qwvO z`ZN%~TUJhp8^b@mjQyFXto8s)>p<*hf>ezEv?(eSPll3TQV-drE&XGoUT`0?n3C8% z(?ed|-W!b(<;sd@5c>~(d&DXQmp@-jYl0wf1kNb!K^i48?UZ@yxsPU09aBG-{b*}4 zFQH!1v+-(b;KDxM!ShJw|230b;5sI2U+@$yw%s=w{XDy%v?tf~D|~T?p8Ky~oVJdy z(&<#vCJ!@$xSED>mP`4FS~J>wiY9gRew9uNzO*tqkhQkXtb>gOx>iz4T4Nj1zne#T zRw*MvUw0)q7eD92F(dQIY&VNhFSK8GYvuSVw>nP>p1Ys3Vwh{YDyhJ%81K+d0&mzz z=$!k|-CPTc1AR9b^VxCfIYBWrcK;3ohE0Sc&(g&>TEP}`Dk4ls<&VEU7_`SR?X-I( zTo^10IFvWeT<3wj3Mu$P?v%c;C|-)gCt5L4xm|41Qn}!D(7r#DG#tQDEkDE>=zi37 z<($XZP&m6-ZRmga&_fPe{dKkDcVA=Bpc!<2Kfs&Ih;%gh0jA1GMDxd`cYS90HAEHl zu42vyY4xT;?)U5w_R8p@L(h@dv2$EBsfdh8Bs};MbU`i0?BMA_*Y&-Y9oJKR=|&7- zj)vWp=1|q0Ra%`uNc9Su`*zTF-L#0?|HtGRfr|RV+#DH{R1M88cdr+2QC8&ft`0O~n{|K0nA#)Fnfq#e^(yDJN1S}9O2 znr*gI_gJbUNY`hx#svinT}Un*Z`kdQ+OC-%+8FT{y?#v3#rw!++-fl==PL)`5;pHCFVcp)^9e-jV z^_oxt-U%*q`spr$IgAszHO>#hkI0RV;^yLhDdFvUw#w^d^X z6;+T_WxL%>=12e}(wL>pRVf>Hs1TZFULT>8XMGz)(u}Yec1gCdL45@l@D+l06{XWn zsL7qRvDY3q^p^1F=BGJVYITswvz~`mwPap}qt$#AFMZZL>A&H7mii&I)7mtb^=jhK zO}=^mD7~a-VwL0Jhq9qQ*43W_y$aZ(y87yq;g=fve+UHQMlEVz>B5cLS%R2IWdBn~ z5as>@-`4>vDtrQ~<`-VFirE)%$2R+Hc1vCpS)Ax@$I3&0!_?Ud=7R^Pbb$5(={1>Q z*o;SJ&F5fee@CuO^E!k5wN3I!qm90Tgz3Fl;eHNhmX|vNyk|9r zGe0W~GQBBZTR$-@&$MjGITjStkG`_gbR8ZUQa7dulkifSvMf83hA0@CSdHQT9CQj_ zvSv+U2`hVRlKA6^VrOA&Cp4*bi6~HeVL7Va2|G^#A9NYwmS^{*1Xfl0<8B zrz$f1oe0z-{YE$vS1LxIBST?9N&AQ{_k&aF%tyGSX1*VNd6y5UhFGrvYd$T3Ej6hr zbztuuI*mc+TFW6}MNbAur=U2hBy;T&pyifgo)f!oDCGM)1tGhvPtogu74OtA7{!bt zcjM6)^^;z1>nyfE2vH@4DAdU@*Nkm-l@pn7UL&F53`S#-x47sp!s==p5(OnSEvdLf zUjUKv%BWZaW1Q~OjSFS~Q0u2J@;9J%dqNN(E`pbmI`X{uyu9Bnp4ngs-=qb5*$L@j zr-C_zT>z5JCld*B)V}GZH{MrN#`vpx+edTH?3Z!cEYnFO!kTQ`L8g@NJo6^C%z+)XA%`}J^8YU?zOz~Eh@^-tTLw8_%ECd zGL};5mqx(=umBlofQppz4~$%<^mei@D-jCvF!ziK8;z`W;tvNKxNw>9Q}9lNTAWYIP2V(phVk0F^@2k4Yh&YlxJ*q-G7 z=|^B8ooyX0Xc&zW0oQY9D|ziKB0Evbk=f7>(x^Q`QSxZ677T_B&)j6*Fh#ZUMP#7E>7fpoRb zcd{-OMRqS<;v7q~bIT?rI-Az9+2Kz>L*Q65Y%}yCYBO}y+M$sL#hg}FOa8X|c?|n` z>#oRhhSV#WDA)r%WPZGFZBN$-BkQ2_9(`NraOb2(>ti_yj{vRSAStx z1+h((OAj{qXLXcqsQPh5Zz_*0=Aj9YCxI4@C!c{o?74@U+PA{Z{eOwt9V<^}6Qvz` zy9wo|kKWoRC#POBnUiPlQL1_iWzQ=uluZop_jL)GejD=A${wlZH2wVe=SrTxoOvDU zUx`7vu{#6|_9wTWpcnc}T&`}_oHBg;q#Z;^;=G(&F{sZl{t7V~&^a(E%W$|JZlN~d zX>+F%$OdHr-_gc@!NOI-+Q%#X?$zhYz&p@CaiXO)YBTZgf3fwB!F@g8w{L9QR%1J9 zY__rSZEV}N(Wpt<*tX5aHXA!>cy@pH%(-X&_rB|kPAA zKXET|b80dR)PlR;s(bb&wRkbCk1eQPcwhDdZ+q4Dcc*telVO+rswZcp#=NM2MtL+G zoWN-4NG2ov*!rCIk1rpVh)JE;_j$qV^u4VSk3xX?uOc&xc7_xw^k|w^E?(l1!AViK zYgFef_m&SyziAguwM1R7t9+aZy<0_lDEMfb&5~d z^-6CVRUf^rSii%~4Qr2@Rq~0U27a z6$VpoWO^ysFol@Iq-YjxMTxL*Ld?m}hI|yw@f6P?qa+v-a}NqyVayjyM6|4o*q=XR zTl9GoXcRJ#RCkrC!q!Iqf+m5}_+;VfRa> z_W3b&%G3kjKoC^JzDDYfSftq6m4f&c)i|MNB&~MmAknDEZtZ=Gde8mW&8t0!;s>ko zR#z0N)3_~X-24YEh$ZH~%l;K;FLB{}Up7^8YkrbEb#B8&wSz(Uj(HI$DBxAs6k%9b zq#6wFsVkT8RrQr2zc1HN!dea<;b}=Nj+K0<5V~+m{l4@eThUeZ=Q%6w9G{YW6$I>v zuu1Zn3`#qH<*}CHOLOV7swv&(KrXnl;fyVojH}@`Rm^lT7F~TEfV|DJjJdTyN=fc# zCAN*p<0MJWp*0<+aiPIs&I%o5IPCd0h|yA| z1FpG*?V#O?G_Id+ys(<8)j9<|uE{c7mm0lOXU3;dtsPc#C8DOezc&^Nbgg0)RtcgW zoiaE!{>36O_#Bq16WF&Sj)nNLZGmOqEUB7+d;{6K^~%)ZP$YBB z*>@cal&sGkfda&>8*2RzR0i5C$h|UQ3UJCyh9T6?^aSB*qr=JEB56<<7sS?Y@*|vQ zhVs?hmb-_bV0i~^z6XpIW?fGtm{U5R$j?hknObd1QHDk&(Wx>xv;db!O9Vo`~!-)GG@ z!>S?(o)U{KF*!Mx@hp6tHDW?m8D)+s+^9C0Ttks<_`9Ws*5ITmJY5!t4l3ZE*!fXS z7ch#X#;T)?j-vyaVJ$D zi4Ps;l#W>>g>r@Vq`NysM7?<4Ifeazx{mDzVsihRw?RrE^RR0}K_ML(q$gz5I+6S~ zmxD7P+23tDL$Uh;HY$^{6wN`K(AB4BqgwBK&rT8@aL+8m>lCm6-)dD5%g4{NV-^Sk z$T)F#M+;7ZF*)yJlXwe%kALy@oBj$tc@bcSp)xq0fVf zP1Z+%G6W`Z-H2#{x@+$3Y9A zdrTBC^oka|0rvC2skTy+$SpTB(U>0enTEd-mL0L4F%IHcW7Mr5P0*u7l`9cR#JAgY zcE7460b|T4ta%*@8*hMpa+~wNnOrB8RxhTxzuul7zUe`<*`3@fj2Q&g=T)p}rD^p+ zdoL@K!D7}aX^P4HtuQ2+%(DBA7|ep4==HDXK7V+UsakBv%(&+B5A`j zPkHv#tQdncLjK6#GpEF^Lt9a}e(=HYF^f3az-2xD_7JtD@)JUMmf}w`FO3+$7RiA+ zQPJ&wcYWB2tP1v4XMRd{PgaAU{?hy-lh8242qkjpcrG*R`sW>DE-#??B-eBT1j@EZ zv&5-&1YXLGkd%nw#B6VbVSHz^lZJ@uFx+VvAT$x@J@@i`pmIUOO$3j0-H+f&GpF@o zjTm}5^tBk^`rTt7OZ^KIs&wUP3k;15)HBZI^1 zv8cwQOXpWZ+$Qq8O?;WfHF(&#!5V8BtQxE+Tq?q5qDIRX?YG~ZLC>u%$KFAxq#$Xd zkHrHmUbQ=e+b=>2H7^nrmwBvO8-U<^tZI{2cC@;rho7!x_Yn3(d>Aew6D~vmeDL@Q z;$zI8W5?7@S3Tbk!mF%KX0Kl5yRJ5_y8F$^U0*~8h(?F5&%Pot@@l)m%kmZ1vIvu; za_e+l|L#ZUNzT&!QMkl3H(KYV)*+QC3`$J>dFJE~|0swos1!L^8)GFZq-JPt_foQn zS}eJz^eJy)3Ydtj=+yV__$p$qDw`=Gmun%I$Nq?i6dc^>je53$Rta1U|s;xo@_w>Y!%qWgfCqYLGlzMFH zb-rtG!4J+_em~@dn6Bben(i{3cSNaZ(J$xa?bi?$n^<8Z^Uict-TgyU#@)M^2lrDz zBp4Abdv0J|J}M6q5XM7@GuN$Wt5g{=RYJnpiyLBg8vGrvZ>)aDnw7IgCw9?|;pI>l zmY!okXl+x^v}i?KNfq2H3?_qr1Hb+dLxh`jTrbV&;N+JBwY4dshEZh};$mBM&vPup z&f%Y^SOe?rgGPAa&D^WQ{nnM7mnq_0VUnwDiOVHhJ!JRkdxwKr_|=Q()iE(We$DvR zI=$>NIuTQKwyB)5p7ZGYr}uaD*T-9dr_*8Yr^`#`o%C??t2-;Uu6CV1kviHVrQNkd z1wl)>;U%u&mZJn-PGVFk)dt$xAWd?+Tc40hJlID|9Ic&_X#B%qd(GGmZ^+!*R%gd?$$Zm$>8h+PI| zD0>6}@U4p_#DIT3VgLR09M$$$*Kd`SUb4Skz&uVoecaKZa&8(_Y5f=gof=0N*hWGU zo@Ff5x+r7*v@6i0{Atf7T~v*&^t?j0wuQszx2W(}w^9X-7a>%Xn{L%Ey6@a=-CK9x zwjL-4M}D*lXZ%ndxvCeVdHg>0EAi}G7?o1#W}1v6&%q6QZzQr5Z_^Z-^b+27O*zR! zYuv!5y43PHH_k!0(AAS1Hi_eOv-x)=j&cz70x**QhK`K$Mou(PDUHe~35PrtvLR88 zD{RHz-v9AIX>SY(Ps_amxsoAIpkOc-gDU2*oARvyC4db0bC}lOM}`bv*GCvL-hqKF!KT%`J*(JWKIH?@Co(Bps7DA6Tx?*yw^DAF zlxg$h7D!Dnw&owqG!BtCVr#lkX+a?3(|87LnLNri(W1aubk&aNp`iDqPzBAIaOEof zv-HBhn6B5i`H`!q>8_eyLC5=B~4#3_8VL zP+UVhQ?z~j_6_d?X7qEf@hHC;bjmi^iTAE=-n?7a0;*}6y~tp#Pz4LzT>8nm{O>MW zRrG^E+f#Gnx;7LIdqg`&$TfpJnjiaC8HFvGnz+vx?vQ7`=C3EW=!EUE7Igxi8kuoDCW>;Xn z*iZ)v$G(pGTve;|x5Ed4RjZ9sMXca_fiJZ@!npW=cncBnMAH%>g9=o7LA@BT#TFjl z5xQ!Z7eidXGy6tAM0{TTwC4vH;=m%>0&ghFn@m}XyKe*&6Se2cNT4*rNGOYMAnKfl zWm{lv2Qt4lL%aI!fPXKe(xTkEK!kq5aNw*A(ze2A`%@?gS3^!m{pk{|>kxXeZLc}m zJ3-wmFcdrO2$+iyBd3Bl&VP>({ITuCjYmBXrxN8`@)cwOrM0 z!QEKEQZZ0Lr5yh8G9SSlJ3xu7GEvl zIRZntx#OQ+T8zU*o!{topKe^xrqHnv5_n=A8hmgd%S_H`sqCtiLya` zKLJrXZdt|z(C3Vdi~{~}6q#PUwrbU7ldwYY-}H#qU3?0LP2>N z>qhW2!P2lCBnNc1lNK=_XvUeT3d#zjfIcotE@u@K2hR~j8sgSbHt-M)RJlwAVk zO_P9AwbBTR%fKj@H|TkVe0*4f%)zicnihk` zB}`i;>w1e?9~`o5qZ{>}7QQmom;lpGiM3bH+gD-ddEI00AFYlUxG3Ur*^o&&SG##Z zg~IFUVhr%gNk_IeaUMM5h=OofDih&f(&%K&KAW(5D{p0@;E#x~J6X$CF`pyDQcjqX z-|SI3Fn^GZVTdKxj}wP_^HBPy6JosLm^_AQU**`xQ)Ak++^PnZor#zwPH^}j|NnYY zAX@u$0s*zz=yOX8pOi?atW#5j9N3P1&D&~=46&pX)HovL^72gd-OV{6r6ztMk!lGI z;y;P`bqXdUa`AFIa%95FdQy^Z^+yZfMc^-RG-%^EjqzqJ-Bj2oxOxO%cZn*0Ts(;` zmS5tKS4`rr^9Q`@v;ws9PsR4S31{&PqfdjPVYFxFPrf%__5lzDA$dWXj?7&yceh;8{3|!v%8}I7 z>cyIgc1-nUBHWbS4Hu?tJ8aBRg_+31R-@{NGDaifhG9w7C_l02)m<*?;ZN(Zu0q~! z1Nxx@G}uJ91Sw+Er`G{$Xk<^Mn(wetFWNw=$uVL#+|g zXDoGg^k(O{2C%}P!0dbT+2PH-O)DkM&ABqc@%~GuF?O`}yzq1xMe?LM1Hc`e&`I{0 z``^wsn5r~FT@}BjBPF3r9Q7`AgZ((P&MF^F;_f`!-t?{sV6Nf;AC*yKCWtRJxLhI< zx4Ytps?1vt^}fQzyJ&z4rH!vY((e-7B9<`=CVuJ93~hUaO}Y<#b0XEN9JO(NLy+A1 zw`!-!a6E<8ti#H#1y@g+I98F8R9KcGr1UCwA@tLSCWq=;W#=V7l@^(?i(& zMVBerVjWcxiUfOJW5*;?VXd}VjAYA<#5}EG;52X(^Db5WBzlXXYk(A*!SQ|HOm70Y zXQji$Og&~bc=gwb4W?b%E%1C-Il0grhY>Otn7i7Fn&|mmG5>hhKV70yN2}(lubHew z+zE@YLp0Au-^}O@xErbuxI=q(ET%ynw8rkWlB^4M;=a@AMIx?jwq%n=AP%Y{En`AR zA~_|&0nfOZP@DQ_r8S}%dBN8zpTS={YB9e-#VAMlZv8#d3lk9O-CwuzpJ~dSOQBQY z9yX$WF&v3^7k*Z6EWzcZv6t2xb=|ZM5bhR8uz`8~TCVbnQG-4CxDfz?WHV9ajKj?{ zb)_lw`?r}LcdX;u-{1dHTnu~kXDWlk9tDR*7YcU?TdT7d8<>I!B6Uo5%V&Eq_8*E@ zTZfT7+e~oqt8o#(StX>xC_;iKeU4 zxBo7f`1t=qi~nBdQ2z`yMS?0e!^{H;uM|6R!zEf5gk8fCFnOb*WURrD@+@p%?FXDU zim)6BHb}jK{82UdRXr7==x9%ke-KU`=t zOqFt(9ft5Fg=5TJK~jBQOHCq9)G|XTO8iU(%VxMUPD(a|a*QH7fe(nej#?b@w$FDG@x$_U*~G@Cc0It=ylt@j5><+Z8;@6zt=W!<(4BQ zh|wu7o`Ro;z>PABodaJcEj=}AJqC*^XF0wf)_wP>mTupSv@bR~^yz*@vZF;k>eQ9i zf!vyQ48^O^XxK`kG85@hdr|A;XM8CQy~8YVM6Fj{hTs?cxruqU4IkDtUVLEXYMKkl zJ<$+&iscNGSea)WoBEXz@Yz6{N*RUSJ1B534MNr+LKJGIuW5wTo~dO0aOMtJ7>IZbmk{LWc>hy(@TjJb*+q`fSg*2YWJg$^B= zXfd@C{~RM!B8U;t=WD==UyCygnP#*EjK54Cg6QG+pam64n50i*cfsZ%KqwrzSN@++ zI82g|y^_=!2s+Ujy`||VGK$U_Ch|v<Lkc9cl zkb^jRp&N~_xaq`_6<&JRt=6j`mi!vp>z@(R`*}|eouE@u^5lr)cw#N0e2{i|m%FjM z#iD*k^q$0qfX1qJkK>s6ZXjoDsuec*bO_jR2{xcEZjiCZ2xMeZi67m|8ig`9FjZCym! zH7hj8x}9%-fj6hYa%>WwXz2!>`Vy`K`gDh2aHz7e!vf#)(NlY zlU^BO9n61~l-%;w?MBrNd$hn-_O2qVlBp@6k;RHTkp?6n^=BQ{cx{0_GJ_@^euDgN?VL*VhCEx-!x-+A@|Cd#n#D zQ6vmOO_S5M%nbIDH*W`)J^YbL1&}k$b=w}~sMp%Gv`NZmr@cN^jPB%EaD8lTcB*pk z`Awgm{WX|TuUH2W87-&_Cs*^qoxaxBfA*Jx5D#Q z2dEoEt5(NPcZ)hF-pIiTp#mC4Py4>Qkw~LH?MY@mWw$B!3{G$${V1(c^gU;v@k}cY zm-8t4Xc1vA8_t%Ktx2M0&SR90gcG5re^!F~jb_!es{n1*CD6skV37JXXHmU)GWCVF)E zf8PXtdib)Ym6*0?Kc@FV|dFW2?LMr*Y7e2E=St&i~1vKtDVCA1ylmdiq=qU1^5 zc}c20x{^ISX)dSGdQfq~ufLG-RemM#Un5(=h2hw)V}iHT^Fx9<{L?H3d|dD>Eq_{6 zNk|UO?_w@eb{`UiNn#3%X3(Z`9@J^(THdb3xyV{FOvG=fw z55HrqcP&Sdk{S+uygqTgKmXx>_i@Aj{JCs69`)qvOoNZakhy0M@vnyqIqD;U@~)R6 z0y7P4p-DCD4W%X}mjyJ6*M!7x3~8J1RzacM;%r+wh#Zv-+crP~nCKpWqEv#N8t*Hq z7=2TKi-4zY5R=*Yw9=F%(h+NkG>;SOyQ#+xHP(`!<(+$ymg+8v)ow}z8rxdZaR4U*t!wNl65}x zg?8FPZZCR*2c^Nxv6>pVBIAZJdsr5|4s zJy`{f8$6U;i4M+9%!!AFr6o(+ggd5JS4WVX9 z)1)S&6=&%qjNT+Aj(`#985fiQr-X5Qi+h=pG9)gPw&1V4$`OXvu*oi zCXngiNiU)!^MCG+7&0_k0aIeLeZ7@ak}h*{K7rS^Q8<_%C_o*6 z3rd5BJmN+JyeQ&vgOP03Fz)RG<;oKD<{YvFcWLM|5`k$t*~cg6YnV?w#)V@UcTq zQhhZX^Z9#w@+~r~q|!pzGsCv)>VSGVfv&glRYdU5WU>^GqTm|t@KmZTQc>$pI=8V5 z%+tg1jvKJ}lyP_Rxz5IwNd7naDmn$glT=$svoLFh4KCDMQ09lta7=>w9RJ)glqjsG zRnwNWHzNZVb*LO>6Znwn&I11WfABSOFSzE1j0>=Fhd=rb%0 z!zaDDwL&Qdx>ZiC`6POe`_^*o!K9w|jw;sJ$_6LzD5P{YWJ_B%fR8rNk}>l~G{WYE z;^zbJ=y1w5v(heqepftvasytp{Yc-y({1CmOz-3~TEwJ{@|%9dE&fw= zeBq~hR=@}=e%#{m*m3W=8?SVDSCK%}V$5w)NqOffcHhRoJiIg#l!Lbq%=pv|RE^cL zbJ-0~`|Pv`b_tU^Kr)rat8Azc^zj#V{HgaKp0fuc7Lpw!v1xFBXWnkz{=&S@(vTE8 z!3yF8g0|8D=O+!;0`3BFYU&~JosF0Z<$duuG;G3QH-_-KP>Xr_X~rF2n4)1k;|*^XM~v(Vt&L_)baZC z3)sc?HNJj5Z(?eM1Sf~;PFOxluSvan-2KY74V^ho`{T|CCjDd3Z3j)R`v*7Vqvy0L z?2^(?HV5g9k9L?*8K@;{kmL@Vy>Ovp`3;5u>5O1VL0dkj&PmL8mT>eE+Z&MYAPj-! z`-@>LT{b;23kQF-fyCK$mbZKTZfWBBiX+0FFR_-ki>aoq;+?F`^OKESn35DccQb>d z7b_3()>>8Bf{{0Zh)v!tg@zPZ?uLjEO;#8$evB}koklzh#umGD6&#Rt^2yL3Zoo6t z8np^&*nbWjsL+AQnZ=u2GMD@>u8mo=BBm`1PAQFNvRHwyRpS7(^Dy?8ag}?B)VW^W zZIXeFHre$x1ysTzqV&c^I7mo_nZjyQMI(r7NuV`BzZkE%efF?(l-AN>b#khAcz^X= zZ*hc7XE6}<-LRZb#VTlHMQx;BX>n+SP`2aRS-P~_?l#mjCR?#rCb-&r)A8|cgma$; z?YfEcUvN8B7tcC2(!(#YQ#d?Fkjg!_kTU_ZsYQACf!cJNYYqajqLI}VP+X9}7N7u@2oK!kwai9CsS{%K43b{t>vW_Xk?`pjQKi;TN764C z6?Kg2{8-ig1f!QhB_W4^)7rXvBB|p?h{{3c7hQn|gXYeD6y8B+pS#rB=PI_aWhw{>e7xj_%*}jtu=}7Y;(*Q0JVBi}=g-z>>z}F3@c>hzt+zs zY(-rX?lfkXNDKs-eW%yrFsF|~eaUBYrme!c7+gsfP)XkoPg;(ZLDh}x-{e5wXGb|L zA}}o~WJ@o`qN4o&_+RG!ip@W1HS^BUR>SGTUhl+QFT0UR+27yxiy${fH84lzL z;Tw!aHK>mDn9V~d94d}?X)Mt+JYMoY-TnxHb^(C@vi=54!-a2iGsWt@U6CMGQ~44J zMD;J7#Er1-T<7p6RWxgBBSqJLfc+-ubL#W%H1`g#D>n;7hg{(ma6hx1J1&aA-C;v= z_<>xZ`|0$rXx-+6N5-0;@cb5BoabsdZ5MD$VJT=kN0N~$nC!YNGpRL=S}d>0nMGrC zLmg;sBU&_HjeGp!O10B?!wuw}#V?p1m9T0n=nprOo`<7P&=@Ta7M&Q@Vz3Re^#2oT zVL;>I^*8xto>ZhFJERl~J}EVqs`FUtJHph=lf%iwf6)a|5bT3w-F5m&`VG0SW+UWj z!$1%JH|G$E&^;+x3_@|kUmJ49JxvRqx{Mnk457pPwzhGiRD8L38%Z#3UR#ITgq(FRW&6)PgfHK;HOEk2TCBgmC1Osa~G&04`Ezb3jQMH&{k5b`Vs3l z-?jgD+M*XGL%*P3_a!PN*5WBUTz!PUboSLfE~_i)?1*a&IT#O@NXuZ4W_MICG+*Ov z@qFvQGJyeC$GY$@^s#F3yyeMZ#58#h=tuKrjYj4pSMONCkD)f}d^Cn9d zKW!|6NIF7+B}zMZPu-&a@NJEgUhUd-mJMc3g#haTI8g29-Z+GP&bud$oAOEn zj}zdLbQv8mwelpJ7SUjiH7l1~QvXJM{(q#jN$1!D7^BN3LQ>H|g!jyt=s= zTID*eSs5lY$bJp-sa=`I6b0XmKw1gQNhFVmT8@jjvW?F9YRWjx2Ul!A$(QPfQ*rO&qqzYFR`JH65tW zt*fCL|3>hZ`7)LW6P!k zn+BPsR#b(No zvQm>rDm5p|g|p0oUjpdr0yIC^oruQVcnbxs2-y2^w@u8*4A#pA=gP5py{iI0HP@lw zOCHEfe-b;}+2eJ4<(*}pb!4T%dU=E@tffN#yl%h!{FOS_V5~Q%OuR*_2G)F5T-D72 zF)1jW!s(vZW4ftlbu>(Chm+{`tdVtC3G;UN#JIH{o|}abaZ5e9#`S>8Mq3ajx4vSB ztsh~NL%KKIyW8(OJojW%$Z)pmy7$+L3|5F}B3ITqFS??Pvi=zn__$}engvy0wZ>PK z_o+umH?y^xbBRK;T&`wOeb6FU6i<;Xvx`8g)5X=2O+j>9O5GIfF2&mvYN>jn5zF`o zeQ+(!Dx8yU;MdX}-n`~?Z?o0-=+^d`j1J;&u$>7QCqcd>b35$=@C>xEA@y%E8BTvn z;Y8~kG&lXOl+pfYgn?0z>58TzAa119<)sg41|F8`y}hNrK(g0RiB#0g0!~Tupf1#8 zW>7-z1)w{o;QkW-2>FE!#}E6wJ`+R}+4r083-OHDRsTtFL{7mcr>Z*GRna6QFlYjWTHER)3x&Df!X z^e7#p?|T(nmiO^SL}k6ym6lafMYU9=h_AK|ZZBS`%$bhoI2pe6%dI3YCciuLC(xYzAS zSW#rb6-mi0{2{2o2YtA)B$5T4JuVeyk0UVizJiTvtvMZKe8Hmtry`7x9z0(Xm!?pm z?)GYiO0Z$4k2~O~3F`y?Rn=}f+wBW<43WBaS_~15Nj$v%P+!}G7=iefyHLQls;5=Q zj-7tekD8>e(7$j}ex6p!QE(#zGg{$5-JYb~QQ(Xb4=8MYT8&|SfDFU2z!NKaC!oDl z@aSVp#%!!NmRu1qgI5pO7ZEEZ^P0U)N>1TZBDC~>dH^LJc+ieUWB{A;xKk4 z$Tt#tqL1KSsBV=`7Brf05^)kfrP{%~2B1>`Kw-`q9JVB1Mhxe`t0!iSOC*Oz+eF{@ zh1(jpSj05OTv+A!Nv$p$XgxhmhjLVi*A7)}gCJea>b=!p3?lUPIi0g?SxX3mU4PN#lK!259vLGKpJ|hhx;(eFS&ZUCuL(I(vmRZ>IQ91YR9P9D}JPm zGKZ`SFso5{rocNegoQX#pMeRjTMAc}P?}a z;>3-AcfN|PWQ`-RwMh_LfcXB!d{=EnJYhQB1%_+X(hC6UQOPCgz(dGAj^9n>q|;16WX|(j|QchR{tQY~^t81@%zm zV%1f8B(i!QBBXG1f*MjbJh$f)CbG=GDxMjZj!Pv&BH$8IXy2LNs=}y~&mF0p7^V)S z_MB$?V3PEw;(J%_9Mni2q`q7^oo%v0liZ%w%*&!C+aekD{rXivho}vFT}l04ODzH! zDa1t7Xb9HAVwr3^0teKs>!HP*f1o_w_5A{O`8KDFHf|($W&|7@OXGX}AdtU7lw^;H zaa%UXjG~@5({{*}%a-3z2g^65gMd%OuPB2dT|wiHu$X!HcVFJ_U-o?Z3#M(v}*3 z36qenZ$Tt&$fOx1R%DUq(vTPvQYMW_n2s)ac6pGy+Wu{hJZY*Y%VZUdMW3%~Aa^g^ zXJv=b+vy&F(dm1_xaYe|e+H_G2k2T=0ZosY!N+o%Z=dt{xbo2sHBd(T} z`m%1&R4@57Q1!Z#nVVsp+c+en+dMO`79{oe_L=`&dJrDJa?6`(@A*2G=9Q0>3zjG4 zCcfy}$LO5J&EY$CTtD4k2Z3UB*L0{b7a0tR9BcVJlNgY6W{<;i3lH-Jg_PK3h8nkK z-ts$q)&IrqE*D4@*{b2TWsecQ-ZD%tJfw^PZx7@{t( za^<^cxcYFzy6mUDmQqdI1$-`kapvZ%40Z?V=EttV8|T1`UKGp0jKVFKPG$f86b;&% zw}-5@W@C=`{?yJNvL|t>@MYtR=PKlA*Cb|3z)`fa>Fh8-IT7Mj(V${ARB&<4}E`g=P;$4|xj!_6*FtFSB1dEetJ{eJq=4qbUEI3%7<8 zGDAN~LvW%&Ng{#G=7|-iJ<)-nL0X6O$JR{*O%(MedjK}dmq<+dS%GOanzm!FiK_~itA^F*I<&wA&TEH(p;zr(o2p!9;uK3p!-<~|_3vy6tLW_v zr~|NNPwk1U1}SM%F;ujN2A3Lp3}nSa+G*-rHY*gk4r$s`!SFT0CLO48kVUcp`tvj! zVA28kXFQBi#uHj-!;Q$JEwaEheaZwDQ#{NZ0vbNXQ+|CM=& zIDI<(5aX?&yavSLKbZiyYSksR@d!^$7Vl;f?S_lZNI;q1nPeM~On9LH{- z1M#jh1fFWc!Blx%RYTqI?_>1fG8$rJ+Wb+7b-&e8g9(qyug?}gy}tgy_g}9bM&+Tu zx)Wn2$rFkUH)nU~CP#iGm}_~=Z23wU|J>m4^HL`sFMj39ke8rpPs%8F&}gFecb|>3 zB21zp0UsVp*a^@s9<;*hnPD-80LHFZC$@;1CjQNc%LYsenH0|2mT%c&ygzDP#|nAB z56J>L30~3;NaAt;0jky}s`NB0KP>I6St2+}=9qjd(&Li5;3lWf7N1ZD(0;wEBwwN) z1p5BhR9wasHtHW%YeujvY)EP>A5-JsN)uPHDB8EW!`mf_ODhM=ll&W&8Qv_d)h-yORFW zX9Ad{U;F74pC}2XwcuOl{Eu_ad`Jsg{HK5u=FC9T@uwbQ3y21yruvK5%3 zvKOCBWEz3f57W0kLucJWyO_JHwaUghTst+&FC@{JBnoC?u6Lm8BONXjPbBOn3>`L2 zAUKaE&W+w~cR}iC@jL3CfCnGX+3p+xzo@UMF)PEb2WtXVkRPrf4`Vr1Tm6KP7NX`o zrtdM3MlZ}v$sm$O+I>DITP7^&S5ijiZll-pSKEIs7F^OS?H+S_i#Y{g&5K5kEE|9c8LN$W1R?Y#1qdov>j_te8q{PWD zLNBc;sl^$(`CkKdu_tjZ^jtWp!#*`~dx(iH!oe31%IhZ~fEms}84B!us=cKwuG;rC za~xAPOO`6Kip^7`&TwjQ8+UW?&(wjW>5`%>GedjUp7E@4Q@;4l#s1SS9}8Sj7{rxA2ziYh2lOAdwt9PRBvcHdIq8uyylwuIqN}}d z^Hk>toraOo7lvH4KDA~tr!VObL_B)*f{P~|0Kfg1Y1-6Jm~bxgpSrMlURC5}XG*rXENZiELA3IE4+S1dU;vmk=#p(%xK?I zoO{Y17g|mjSfe~&Y($~?jJMa8MVdpyuI1dKYl;8B56wS~Jv2+9@l{?z{?k5`^SY=_ z5993I=fK_7;Et?HpnTd%)nhT#g7aY&Q&59MwTCEArafk7U~P=`C~C`3p!EG}V2=tW zsZp~?`V%@+8lU7XTGUBWZo0*Ku~*@9;dI|E?qcJ1Hn8DRMx>f{E*1W4mJoB%D?QxM z1zU}bFxZ++$>dON@f0UF=3ah_)H!#Y64@)KRQmf$sLh@38c(aKgV z`*x+M-nh@Mf5Shr-lWPa>mcK5))Hd=91lxHnb`PaB5AK>T)>WS*G^Qi{a%q3UX)W! z6HCd$H-Zc=O1ygTUK^u_nOILCG3+dgff2>%lW3VY_28z(^3>$qur7`IQ{E|O3o$2GtXzsy3MC**H-k$rP-66TR%>` z>(xpJxNgUyY5njjPJ)dz2yQCJwkOsaBH>IyoHBTnzHCfaq|Qf4J@k`B)o6dA{K3=T zcl_5=V@SU?_0UV^!yQLw9LP};?r+<@@*BXRIBLL_7yTki-){Z(QjoBP_r$mD@J4p zM_g&WJf5x2M={~RKJctkV+5r^bc$OSD>P2jF#~g~KS!n2RPdx=sL=0Gg7Z8xf@3Dx zfyNBudnnmTS1E9<&nXplLsz;q47v#>bmXHYQm#_p>m6k0XosiX1bPrBNC!emkhc_{G5b!Gv_O%mOf~IrX5NCJ5S#`rI4& z0EN)ga5+Gb5EPB98-BMQ>rFW|r3r>L`swuZQ$(XC zUKE6TTQHM>6B)ET8F6nA(Q{y0YlGpg7*Buz%R!81F(Rrhijed_?;12(Etu|zgQe}{ zVyGA9(ci1-ECyO-tDxHKl zF@0VA&9B1nCRSQ)%G}(^VZ%8koHfo1r zk~Rj@>uQZgSbywC<0t&(8b}`jBdX%9)l-2Z-g~^7;j06ml@mjAT6p{g$d=)|xw&ZF zXJ%r_2tVaShc{Ns98|(~I7X~)&FjeL#r{<_P_V?;kNmGr^JkJ<3 z49pmHAF|AFJe#3p?j@S|I-$Wszw8uS^v=&w;Os7wDr+}`xX1qzNlXKkk*CE0$tpYJN zyzMz^CCH!~Pf9O-MXdS1*m}#THn*r-_*Ah{2=49>DDLiF+@U~lhvHUR+zIX!cXvHF zL5sUXa8lgK&3V5&zI%V%F*5QmPafNQuDRx1Yt?W;zdlZ7V*ho}BhR^R^R`GKuEuxY z%E`8iqSyL-U!m-$@2wV5O{ua2lWnk zdE2A189NZoWVs)O!Gh@QgyZh3Bk%RINh`k9vPs$9f?x=m>^h%ZaCZw>$Q;0hVN*G<5P?&6*tj4T0RWgA@H z32+$Rh!nXmqtG&2o$)L}w{ofT`@*X-HMG9-ZH)naxatx#H@|N71Eq~!N$4`dzFzwWOxR?x75HR^bkI_^>fr^gCYP~b z2;lcNQ6OkzQ#&eHE2}MNOS-+L&*LOk{K|u}>fW%!KTUNm^>9T&F8b#KKGj2}z0BK1 z*^nOuMTn8`2>+-Kw?`y((*9Ggm@M|>ZVbD_PZUd4j-k#nsP30}JZ4Kl)Ab!aUG7c7 zu>)nKd!6%W31~EI^owdPga^^3Ejy4FDBDL1+iHYiby7KqT`F z2!#?UqLBrnYG_U?IHvh+WhkWAk0HWP2SLY(N%fY%-%%(Koz$r-K=0$z(G!BwL;s7OYVQ|o_jBGVu$io`g;7kRJaOzAoLfrsU{!(X)cEtBozcm63Ih4}rU zNQeFl4=$)fH02_qaBV~RT=~cBKXp`g@smT)2$uZ$M>*F;*-<)463<0TfjTvOdY&UH zn?wxXP5%8yW8;B5L6~>2a0|aVj(xd*896a*7^QWjU+N^ zX!{p?>8NO^lOylWlx)ex*}lYDg>YolLx(fdG}68cv0AgIuq%}g#fBY`L`vGXvGHC6 z)&;F{kyd7#hACeS_c=`{Ncs3u4fvuxx7qRZIdO9AU~GNAK$*j($!9UgSD49hfV7b|cnKw^3E3s=0^Sz^u=G8Q-F>e()D6IH_63XPv z^>xtNh*lRv=iAG$CBdRq7_;fzSLtdKNWZaa3|c{Knf8jVQ14r^C`9*yRiC7+rI4X# z>T#^)BFn;Xr|B)8g}9QBTLhDZaJIK>e=x=H9$Q+T2*x4{zsV{smIeA z^6w{)+7I7t-H!1|Bk=x|y*M7+e~Szk@X0<-%w3l;E9o8PA;i-0i7{ zOy|3zeSu^8xMM75L^7G4rG#?QOE+omV?dq|Bg{9X_ulb~i3klsDG`x!UnNA$_Xw(? zvmXpNr=0xdD`rUK5-u)0hat5ZwlPZ}c~QDfo~s~rN~2bh?V5IW-d{QVXQ-#qL6E+D z$`esjdMNDX#8}U-cn(zEAKdz*IheZXKS@#PQ-^M#jsQgy*Yz*;w4X}Ylxlasa{WRo zB}aj&kw)~*JT!YSv;JaqeDjIvcU{P3f%o{C^?|2oTm%YM!_2J@Vx$H9zuCbXwrtRy zdSaj0w9sg5-}DfY3%hE>EQ0vxl^Yv>mgY||O4oB+T>uUxKn4$Yept1{qu)P`p_DIN z?+-O%I4plLqAk+B)NW{-T&6p3X6QRWcc}oN|BhsiJS)UV8jreXSFbJshk&UC(O1$nOI_AwGeKu93bD5Ht^@bbvPu92y2JA=aP==W(nj7p)^S^3{b39^? zzbqXcuDiY<%B5-Ede~cO^Mw4UvCg_h!~yDtfCho(S)VuMQk5#|2rXacKMtBQB|Vb1BK$;~qiUhd+D$lw z^gH1{_u7y0=VLU65pD;;Td=}n-tS`~wn_=HIS9hiHilm#fzDQbtd|za>G{>jb^<$8 z1M+Fk(&Ign<*o}qxzOu}0A(}@3H=ZX5q#}`4&+>aF@`zKV6W(knLCU&Y1x_*RVV#j z@8CxOAWm2aTWLRhY9mlg$7Szn_x90YQm#N(9Xt@IL@)nK!{- zW05~3xlQsPWVwmSN$heCqK4A&=Ih0|8LFX;9aJB&faZ@w z!4N$hsMS|6Hd_>gwFUQTyNdH~ulMVPHonPPX>wf4^OhfCkT(N#{oL@pxrj2hNy%CG z$5dSmA8|D_cthf|&BTE7KIj@h*7T}Vtuj*>YvQP1m)*V8&v3uKtY23|IeaMC%O07i zArvwqQGX;!6Nvcy(iPgq#f!uDW2V7=TVI5YG`3Usuxo5_v}anX?4M0L<0&E6Y`;Un z_`E2=0qNn3bY4gkzY77GhmLAPhS15!4^5|P;+04o_-~WpTDvmLJ#BZ<&@ft=NGn8Z3vue_xxUWF2J#@n|j}!WRws^}aoRGRSmD>m# zP^K#F)4gV-JyhMc=ZEWog=TwY-kJkYtAzI}v?FTtAIZqRsy%@o-?9*18i5W$8(Rme zro5!!ED5lNMueggh0t8wUa%Z%W9r34W|yC9L#*$5$eL~V?~;++VktqU{s{*3FI1}z zylPNlXU?psmu44T`}ByWUV9dg>R!j(&nmKUTNdU)FVh7iYt1p#f;T9xp83rbDU&NK z0u{Ub8o0wZP?kv@TD8=3Ri7W+;vP?Y-XQGZ=2BCJCPWfn9Ik_;Pmu(dKkoV7jXSV!!Oi*bw-ae?ITjxwS8xn7yMn$8+{mLZ4 zG03RmBI$PX)-3kC*ewShnP~eV2-Kjv@>OI^HYjE_LCr8|hpUjbS%Ysy+bY*2JO6YkCm{ zg>48RKQylYhWsM8y=RX+GrH9fOyg7-F+n{&EKU0S;>sB4GUp}5*V`|_;q5mfI0RQa zg5y7H91kzfn(YO!4AF0!P|UpKQKx%e(^q^NuEBSs-(HO&06_a2iVXRhZUJ|99D#y+ zBL*{Rb5^}d6g&a&HIm^jLM-6 z=WEg|{;NFVz^(cUyBBRQ&(C0+)i}3v0P#(n=D0 zSXb$Yjn(J&Ds4vB#4*4>>%qTgQ&o-*^X|pQlcTo+iOl5zi}|yzOY`6EA6L_Eo%kAZ zbfbH-Zr-5NGf`L~lm@L?T@U<_?UvPJ!uorr5Wdqs5Uv$0_3^Ka`8n^|R{*%n%P+w_ z{P$+k6I*(%3%MSp`5llauY&(Yu?2koLl!2C}GSB)ie@im$b z@jRVQiz0gPDo(T{qy!R$@#pJ7CDs?4(@; zai0-IXBWNiSKd=t)_*w3Mv&Gx2vb?!`YqyU#N%j{JgkGj{Lhvc{Xwr-mo0 zLz%uc#`$|m$@eee^Qzto&2ZSLc|$9_`_YT!k~80Z>*^Wz`#1?mO&`vo84P+WEq7}^ z>vQ3sA@Nys=jD#~*}s7K!{AwcyoCKFT2QY)wTEKfp?E~H}Ob_d4v0{;n5 zcl*&kC-A_)Pf2&8sEC}xWT1bv{EXbYTC?cNQzs*$pn}YGesDBGeSW4Tvc99>Bh$np zfF;Wl>4$PW<>6xSMEx414E=x&mbaik1{RH60)B2&`l)3r%%<~#dKFB(qo%aZ4y>y( zYN(>cQpaxa_k95S+B41y8L8!$^f_*HjoKdStKP66E-7K|@1AaL;!IAs+og+nv^0}q13nq%-ePEhC0!ut`vhWK zF~?|8Z>*4rH%Y}I>S;tuDnHry%P2v#AJp_xCQ8VAAo~^gho|RT_Aq|^$;R0~y0UtD z>>s=d_n?K~RDh(ZworD{a;Hljib!`K+#fC!*fGmsK>Kv9&Yo+;wEa@ea?UfTk?M>Z z$Z2%yS?*%!Nhxz;Ri|T4Ze$UYsRYz)&ReYqJ8~g8|cnS|*1k zULug-4$=OklPYRt;37ps2E+7nL_FHxAG08|`*IBxvfw!VZ6^;NccVy$~eu|0ku?V@Sjvw!!CM7@JmB5i_$U=&NH9>`#dX3Xc~vi2QB zh&`KylB@woqFxV_HbpUEPee#>3APiC`yN#US}jd5tq?DtSHQAgNZ84fjNbiBb=nNx zvQl=*=e@!8&a$b_tF8;$nZd&1bc>Laqhw_LWHOcb-dFIeQPOCK4)Oh=HnWrazH1kI z#JLcgzPjoRrn$7k9?IxjB{|WyUw_S4?g^{@J^Po$-h*|(W0A@p<9Hhc`mw()nRd20 zb`Jb_I=4ECmwj`(YV#v?3}t5L9m7;qUK3sNUe13ol!Bf*E9Sax zsS6KVnESIW+s?o344z)?%ndHv@4w@#TAp0$ozCd~+ubzA zSJ;KMJ3+K`x*S{)@uoZFN$yryTv8ho-wwNNCcf_`QqBkM?I9y-Q3XXBSQn#gimdOmNemCYSc0u-~WC)eNHL>J|O@Nr-X0tWpDk8fd)G~WJ8p9 zc&oh=KD6!s+NzX0cg^KpXE5;F7NF)wkH;-qg)%htEiYUt+}GoeTI$n}B?XrDaxCzf z`}LNi$l7mzNv$B84%69)gBXRwiYJ`e;3n#eZve8tzs0PS|e5TdTdD`|t4k z=bdky^Z7XqH^%+F)N}w~bSaWD_r0K9|DpQL6_t&wfE8&owSe3kZRa7k{(s)=N!kCB zAh*#H+kT=ReZI=!6Ui()_EPMvxp%4pc4CtP4Gte#JEEshJMUK~37MX64@@c3hvkiIhXcdct1pAA{piVvYly2bOE_f&-2631fxO@G8gmm7c*#KuG=r=~Y`zM6 z2+Kn#nX2xEBH|Ht)uSPE=r+(m#!px7b=}XqE(0xq7)hvy&begmg2@;C( zz9u8GBX@{6t6(nrE>IQluX+|HiSn{76WZC+lobx;a@4Si0+6h6fcZvH)npMVmc^f8 zE!i)lY7QxUllbn@@~D>28PT5Xr+&s92C(s)OJ**zf~2BLYCSo5 zMNDx{SQCB;jePdo$n5uZ@7D?jJV(Mcy23Cr>-X5TgnBmY+;0Uvj96q8ynpvZ<>YbXh z_Gx3KTQ_9$wG_{YJG(>XE@(KIv>|7ewM=u~B;^C53=E%t7dB=vCR=(7k6OInbLR6l zK?|1>cx$S6x-P4a_kOzkaFxRFI( z2pAT%#kM&MP@D{7aa9ini88qoiiX51&G|YU03{rFgsqd6z8K>t#@m(`dQiPj@_*a+ z>JmP+Be!aJuYno@Cg0{{3mZf{LbNDvF6_ zHLoR9P14xxp^s3PWO``1@5gM)qnGX>z`7;u{gnBDcbW#~k%~(v{Za2{0z#HS=pO;~ zx59F07gWO6jluMKYQ}p*k6^%=yjdyQ(M3SvfR~1P+7J?KBmX3;mSSGaSBD!NWQwSx@`5e?07T$PoJCy*|Xdft!yXrW@n%~-a4R|c!HsABj{x!IWc0@rP&uNq9|{;3@6OUX~)BkN!VhWt3+TnYe&kh z6R5R4%y=y#S3c`u#=^5}RTw&ZZli0T88v_VpnJX^e)Bp(0Na4PRy`6pt_E%RkfyMu zjnzN6Ej3AF(^vFMeOv;k9uGI&Vx8YCICpiWHP1*f&Hj`_^PrkrPWz|hmlxS)#FeSh z>8C~W4l7a*-vu+%yrA@RS0!HH{*t!4(v6NQXcqPJ3CqFAkXG8UjhSO!%zU*nANfk? zFuUY)$Dx- zwA&T6-kq6Vg*GW=d`al7K}RfzcmKtb8d*C5m*?KN;D|w~I)FF#?B|!do`9{5$NO2P zclknDr4>xc(#B>0jlcU3_C#+R3Vkq&#BcN#=p(|PQ#yY zO-;-$KAWQ!bnWlv4ZOYVkE)1_uN0MtztEmrKw2HK2LAlHzW3q!)nQj2a0W9@#zz{y z6HU2ERhs5Z9HiXT*HQD*4ma1^h}yi9Q#9SgK&(`~=&#Tr{KS9?s`1&YsfSg*TwHFk zFD|0lcYd8@ak6)28sH>50@Y3ut^*q_8++Vnm*d^yzJ~ zE+yNiXx89H-;Cx@LV- zbg`HqyzcWWmIL<9lrao!aKvWF*K_?-<6_bH;sH5yDzl&a^`zE>=OS#?@uvZq$)l6C z@Rz^|7`_?oSjN*}pLchBlMOmU0vd({y%p^Z*LolP;CyCRA??3khzv0n8W2?@IcAf= zi`Ea02>}XEgt(fV`JVe};)#XADVw{v{)O|d!7%!W>eB>8t0zskqzJ=3+p4LkyT7*e zJ|@jg9sG(6b{gWD4wKUBncf;Haqm$hg)%-^Jy`1Rz95<{n%SSi?E&UnT|U{e2KY}R z{}Cb~6v~Bc*@&i`l*i?@u}-J6YSks0q0rohc%{9Qs~j8&E-let@LFABQD4rPS@r!} z=9SEmL~A>fM*6RZ=5cV9`Kc}3C*Zy}L~N{;DFkG9r;&ieFeMzKRsVWIt{Nq$S~>O! zhnHER-s#BXqyO_uvNgm}VpQ9tqbpCkLMHnFOx% zVbXUR?MvtuYBKhU^0+&jy3c+c!~&y8r6Vgk$Um_9JuBsu9UB7eiqB6oH<2}imx^jt zeDKSkx{kl7Lf}c{G@Nugcn|1}Z)s!6D@)g(>~qWB1%s$m^8-R^X<6no;&PGF)a1t1TvMaEo}P@cF^^t4 z1VIEf(h{Y_J*eCV8NYMmUL0kAzTd&~^GN8~aNH_z<6m=#K2Wb?JGc~bj(As#6Ay{&(ea*YQ59xrD z&M1noE_K2B0s*!R8JlVymZrFwPLM*RDFtLh$ELr6GW!3Srf}|mVSq-wm`A|MqOGAQp{XehYHI3Y)$E}Pww|9|HB^X}JBSQx)(*~H zqUTgayb?tKK3dcPm}S~GIPJ9T;f6~|rQNxL_2l<8%&8R}2mB&7r^L^^-voyi>J9`{ z&>?qN{L&2bwly~Eg}xMqcstXv$rz7o88!chEG9;yGy(o_{Hv#7{gj~x2=PUEpC5ic zy1!R*{BzK-_=_c@S~^O7Z7sYwwhu$Ladpb~%KL81Q;$$%(CNW;fSIXV2S|gx*kf1< zt-`^6z3m7x%>k>8wGk5KsdqYyX6v6XNIN?n*_HQ;f=w>xXT1Gh?X-Fj>PFy)eEVD} zL?zak`K6_?0B*LWoW) zBd#=O`1uQEy4XEnD)Q}--o>|-9?;K6$H<2KJs-a%g~cd@0ulb)sYe=q0}cZ|i?H|P zL?$F+TV0v&Kd8Zl5IZY=T#re0{=B1FPd%Jc&h0=M3G}AatL4~f@K9Pd`rV~rC1v8| z+bUTy@dr@nxQXKWe)D>dN%%o%yn`RGCB*%Q7ZjuSzlOeK z&1=HVS7xuU6q||E(gCa`%q#xzOAT5Y-{Q`EMQxj7$zi4c9}RFqKoaAB{`e4u+Yy71<{CuFz8^i&Xq<)-!eX@ixP|6z~D%%00?lN4`zm;171 zb*!JUt04!pPWS+P84P-R;i>1w;}k$Xt^b1js$H<7Cxpnt9>^n;37#w5sj0dS70Z&X zKY%{%UmZ&v_uy^b>`Nl_vsOQH%JfolwR8T5bOIKFTZdNi0SYM5$3*pIpaJ#C=H!5I zDHd2*!tw9qf720z7QIBZUx8zzgR?GNdSQr`V%wkFgO^Xh$L(R?c_m#t zqZv@RO1Q?s&p*ruI?id9e5l)kJU~yY@kdsaQIPs~KnO}bjqsnJI6b={>1-b@Tozrh~jlxtX z$M7H<_VPP^E))~;w{t_fSdMMKb~B-F&Nnz7u)IC7Rw5rC zWvr>QUHR5%8TYOKXI=dd`~H0PN)tYrX1Qk9M@&7XVjc@jMY_(uONfLfU1dBvSOmj# z{(9SboFw8gIsviQ&yP=I73k8wR8bmnTg9ai(E5{6KKN%_B!<;Fui1?gamtBosEaJP z9_A8o?^!G}ExY+P7Ih0?8$X%Y>k1_3G6U;Xj?|9pBlTTUkjDb?*H`X`aJ3f4B&1qEFZ&;`p; z<}t|V7%i=E%s2s?p0me^J)@IR<^GwQ^qTEhUJK2SP#h~J>In-Fj{-D%Q?7sk}4FpAHyU z)=e0jyB&W8nn^Kct6B+9c3pLI6OnHC+`R-#lnY(Ih)U%Zu>)c)gO@Vowr{ZyEM=sB zG7DR%U29AKF+Q9s%BN`O=qioyEMl37ND`x0F*vo@Rj;*_8WJ6kU!oQAz$!%;i5`9u5%w`$@-=vY9e=}`(d%@myzfqOuY(aG!hj5YW#sggl*5cOs{P;tgtrVS!(o=D)VX&@kW#CiJ0Dy&F0euw?{ z#HK!6shSwqO$l-CJCw(;*#!neo$_YuYrX(pU+!DF6_%5`b*B`2S}h+8fb2kd^YPsd z)nlBORi|NsoL%)tld=_-7ZsTcy@Yq3C-s^san>iWY{nh+Hk91CSAFSw7&I66Ac7IqF;78F3t+U5a)*G2q3BY{X3Rhe{y-bxP$^j#)jxFU$Am zRmeqsZ7W=tYV_oQl?K}z+uOTEJc7?{b^cz4OQfrrocD@bNX}ZFx=+jf>T&|1#b6 z0PP44Mg5VH1vuh_BRX|(#J$~Nyt3i9s*>RzOskIZIy*xhO-wlks{g_@PxHUf{n~q` z8#Qno$p;4=jW`^f660u-_>TH1LC?O^3osk|-1Fi}a3oaMs%Xu4B`-yDfM<9DRN0qt zL@MQqexIcn%J^R(ppiTmMooh~vKu-_R)1N)T*CdbTDut)QOWU{w(PW$q$Io}P=ALk z;5+5AV#i;Gjj8Bb}iuPoe-6;yOE;+7tK09llP6VfGQY*`p167~v%)iQ{uZa9mcU)It7 zy0DoT)@)Be`6exWzmk0qwvWs9IyDHpSXm}0R~FOJrO13`va>|Dob5}f%z8v6;`~_W zvw%5Qbb^4FdfW2rr_;r0J-(0GgS&r>L?R}}=;}RQ86Z<%wUzOI)|pnO@kp65*lL93 z@kr}36VsuMXipIVqlN~oV))Ic(}Yvz8;0|^Jy}kmG3IBTb=DJj0W!6I$0sZqU&5Eo zk;cq8TpIUVf3tD=95N$hs6TGZG^^k82KRo$BPK5)19~rV6Mz zzwp&qOWsAEQcsVtu!IhrkRs(@Ouk4o`=IM{caN~S-05Rm*nxz-$+&2j)yVB=i_Dpu zl+ifW?h?IRMH#EZBAQp@uHHD<-#Ps2+Vfdvm20Wd@(qzseU{7}78Bv?Z3 zvkQIS&HG*zSrnHqDIY8XQhYCjO~YH3dHlvd{pDT_fkz0x*CV4FUkH{%ax=?f2^Y1H zZ@h9bGmM6Ot}S95i;B`hV9fhzy8Rl#*~Eq;GR>i^e2S$M0&=9&7|&Yw*^i&Dzw_4}V2a*5PfQ{51rLRgnJCCZkTI`na6 zBeUNKnCHw0EZ3COy7d5-<$DbolvD5vpU4;ggz0i27M)3x6$4FXL(Ju~?!)U7+i2vJ z+3&}xsy5B1%vx9(?_B_-)xyrDwxK|1GVZ+3Yr~!~ewU>6{t1ZSB5{F+RmEf8@;&wU zA9{>z#cT&`yv_Al%qmdtXu>^SRP0#JZQrppDa{q+@AOjt#>qkV-yIMdY<2>JU%kDc{kW1vC|C4Q0FpsV=Z{k=%(0c8Xc8cD}Bt26SbMtwwvd?H=z5fP~VX`EX0 z67ck)$G}F;$T-?Q1RB=(jz2Ct;=Y>b+dk--4iy!fywzlSa|lYC^jnen1wNf1_$ zS{Qz5a}`M)YZgua(5TP7!F*dxJDYuy%v4!Dmo7wuz>n)eFP9p|u=9NLPJ^4#epB*Z zQDz-C?y~@2Ec4!fpGT(VWlE9-hkQqb^UqFme61kzm9-w*n@ zJ#59RoH1FWj>-Axl%<$`p+C#@GbS)DkFa|drw(GvL<#kpaORBT2YKSKg^}6i-Tg`N zZ>t|-Js&s!-r>?EN$n+K9gN~Lz5I`?$bdwN31P6I7U@r4P5wnr7Wd!Px47}pr*@XR zVT7wa&MpQFuxF$fSw{Nc&;#H` zZ6WIknQ|GmI=Kn*R$j}3;?hL-0FF$VuBl^k*A}!L;OJ+9c;$j|`)bxkIqzHf>pjkA zU$IA=2+9KfaoZH~{)FQa!G}~#eJ+j+q53a}oMg4wsckC=6Q-a+6XFn)Yu={0;J?xI zTtkE;SmkS1R5Y{8^`Y#)0z+0hQI<$`Cy#bdPz37~B(iCkf$9#6iTinq)mM0~hRyN7 z1aI30rRuQX17s0Kd)XVx-HVQ>o#>D&VTwdT=d5REyY}O&`z;Ur#cIR6mZwivQ%3o1 zT~P}Qbmirvj11#+g7n#y*L><=Du~i2IyT@>DArNewI#9N_t=^ZYw(8JET74tqF#J0 zUC|UIn}}pOtM}YucS#85zM$m$;iinOfP0raB^pl3T--T+oc_yVt^^$nOsvY~#agG` zW!Du>%(jD$=#U=4<{QEaMBE678iM!hwkxwll%fT{V*pwzcC#zL zi=Yk-E6q97XGpvnM7l4L0WdnUyar{=rnqLU9QH4fuSZnOzuA!xwyu~s7#(cLg4ql@q-#TXunyd49+C+^q5Ujz;HuLm{7iGR9i} z7)!gkFcsNsgF@O8w#-!_6W8L{4c8A7E$lN35J$tK$ zCk6Lti&igqeH#ZLxj=613G(KN)@XSB18WhtuM@74_~+&YbNJ5qdX;bMfjh|jb^Fq< zC{RRNKff~AODfzva70KZX}jQnoxf0&f`+))ST=4rb~;YtJ{kBXM}LBxyp=cNLI4D% z#lXDpx<@SXR#kTV7A|2=X&WP*&F&0tFS|)&B(7T&IwSwF(eVs0O1(Y z(A8Q2OBKgTwwNQ+OA_cr%IOSSkyM=su29^n;XML^I3K2X^T9JsGuB$AOrv z7(dOciZEVPJA5}bPeD&NBYyg>z(^Mfpyz9p@jJ-gcVdqcWFj zIU>CEF*Y%GiNjynU!&^r-5*;oYHKzI*KYjeF;h`JMOZG&b4NR7Uf^x_G`tlv7D2H2 za?Fj=*WF*QpdL%V3uN&)Yb5`xy(kMv<33s9WkW=qtyYrW1l7jK#?XWky?&8rot=8S z|5R|0OO&f0OzEH8&(V^WfZVzD7N+9k94SJ0+n|0EH)5=($a9dka1Vy)oR6xuf&3lH#pGMxrG0^zRN0lbkhEwAMts zp^$t&g~CUyl)?Hj#iJ^1%G&C?{42mh&tuE<^^OM>I4VNPb%w7H=?XB8Sk#eu!>sP*r$F6Ik;;EOc_^~5jL=qxD zHaJbSk(3=O74os+uJ(f<`T~0=TQ^eL2pA}HDnQYdI-D|kCLNp%q!Y>C4EMD@?3T@% zwZy#;rhaXdopcMFb~p*?2qu{F|96ud@#ZC8E}J;tnj^An0epQ_%dM0>Ajo(E$$Vz? zeypC?X-4?n1|torfmHc}dOeGFf_a&EN|V5Ha|OgjU-=cnoz#Bv6qM_%nEQonupSRmoM&>SS)OtGv1vq+m?ZxHYl-2$j>g6l&gWG9jM5xmv2=i2z2) z76|(?9M2kyJ6w|MalLw$D!*V?SfN`^teO6&C$3z{)>|CC_o&Pm?VV7hNvZc;7vceC zhA*uBmm9u01*oSX;tL*xXA}jHE=FR-NN&S#L}J7*ftOG=Mf9s3Gi8DZXh4Nc#WA6G z7uTYhDPY(h_C!^GL~UZ)cT)4k=MOm{`=M$y+2b*S)z8it3=W9HpSt76ErEI^*Nh@< z>Udx+?^1*wD4z}gNc7g-`Z)vPY0>}XZjC;d541)n?X^4lWT5I9%yW>>_|8rZ-2>!jV*rl6NwvP1_BkkbLkR%Q zmIDHri94o>YOS0=Lh&qS^xq{kX$yurwi;(P(RC%%4>wV9*O9TK?b07HLfI4J97$;7&fKRyh1CUKG?! zQqo=^Li&5%HZ2c-FoB$|DxV3MhD|D}abTwh`KfgRz0R89IXuJ;&{F%IvQop}M9Qm< zf2HM3iEW(*!h||7xKp+C4<;2Iptyx5fzzA4eo1;yUe_8^$E7iTq$d0+mg_g(9TT3q zG|6UHS~)W05z`yjf^{XqnmS)-FhMB4N(jwPIo}~F#=UNPZZTG>EApDGsqOdQe!xD{ zsBtvFY**1fHoQ(O8Xeu{w`(94-e)PJaAj<@#e9*CGq2jIsESg-X?k*Am75}Z9%PF* zsW@Q-o>H5*nVc`PIA=<0iA$$|zNq1*pl+{0ueq>Ro#{3`jgDnE$7R}rEQX3&j3%O{Pj>xL0h ztbnaG5Ejr)sm}aKX(JIwH+=-f7rEt<;Fz|0oB|*&B!J=ZW|xOc+A!bvX@~%zg8YGu zo(XR=YBNN1yC;x9ZSx&WvJq)|BS+eRAg$Tji7GeFu7}EK8ZQ?_M*T|07YDeAeDqpt z<@(z_koIU~eqDg+62S#SqnX(k3P@BvdL|r>tK>yuRIoU$yW9hZ9)?E5P{7w^C{;Sj`w!hW@b?^~FH)Ge2Mt?bg zm{p#^Rj8};mBSmQU&)dkP4x3)BD#S?4hiH5p$wcs)>c%ah}R>lnQdJ*MsP(Ghid1}8Zp*G}4R@QD>cpE9c|4D0~xQ;n)6l^piGtj01pson1QvKvR$BU+Lg zQv_gNM0t{4*dN*rh3aWH57p+qot-#=QzZ8x%o^aJmxgeeyQR3&qL z+IqS;+1m0Tg=JnF)Hgadv)s!t&xn{{Oo=^7WLwZ=7d0AVaII{3zLdc7z7Z%nJNb&L z8@nf>lS_NBG@t~YEHDue*Ia)~MR6>#)-h61N^bf_iPf$wKw2-#&QKdD?;Ybx8tDqy zVO0xNWnu-qN_j^)Q*|;}xfm8n)P&xiD%HBFC&VJbze&a|WI0+KmEx5Z5F2OChyL*BJ2A{#* z-Q6L<2X`4PxV!s(viJG+Id$*%$Gu%O6jeh_%`i_t-MxCP^)s*lg-o|wj621tV2 z?g}X2DQI(xxVcNCQBtC@NyLK88{c1n_uT>_wt`=%37fYhTjx*-qwzV0uQTK0tg+R1QM$v0)6z z5ie@pHKpS5h=!7i*_J!TDYePGO6a0mT+nrBPILHw`=5P(6iYnrG~GvvG#@bzhKpU` zM2cC>d{dLZ#SH6xj?asE`ZZutk;&~9LQguW%acw>S5Pf^W@lL$N_J;?pv9XIAw$(K z!cxXfM&3cDlyS>Lmw$x|1$XF60)*0A+SevP?{2wet|pbyR#vU<$xFe7s|N7j_o9<$u#W#I8=H$PL&>2>0sP9LNeC=&;7 z{R{UflX4;6|?)sKX`-pU?4|)|z+1w20T9_1HNOIlNf)`p?=eL%F*NS;U2a5Yg zn4nv*BrKc_Xa=@|sWiE25IFU?@XyjRpKTq`x=^NC)j<{uk=As6ydx}fttHn`!f+4# ziu)?tx&htDPUw4&YwC4HPp>`yh~%}QooQg+E{Sy{{_2*YoJJ*8Rn_nA-XOFLG+|3E z0veb@caH=b5jM!$__f0-gj`G<6O-~5@`Q?fLN+&>vRG^E$-B_#w$5U=+}c@oZr0?U z53Z>*VZ$<0ffC3`S8_V&F?LIndw!?hJh+SJg<3BiAER zud#@QoV`Yi^RdUy@Kl@rTiBQ4HN21$s)w*QmL#aRSt0J}#;=6wHLf;g7Hj4T(LK;! z;#NVZ&^$12v2L*)!rU{QC;tfirSW99isNIXU<3EfOA8LK&%xA#?ac>79a6`d-4!S9 zVxPi%=?8Q!$j05oh%g>O*2*EC=$DhT$6qgRpM1aOjc2sq9pT3Wnu~>t0+sU=Sx`aw z*{|*EFjczNgscPgv(FB7Mzh9AQVDj3*Ug!*41}OVs-Qv+?B1h6yoTeqBG1#Uu1u7` za~mK#wRxCK-;`D&t-dSAG4#uto04EQ$`${sJGk9Sjoic4#d=4lkk}(% zM6~RfU+a#3Mb!T6N&y2x_wi#_$f%DC;{>Kn=CQ!tb3Sc(fp|B?ky1AUd?6-x+)dxd z<4`ba*Q(3MXzezdkE9ox_EZVk;q%vTJLw5YketH?f{dl|!>I^&E zvuf~J?cS!6yVpD~osqtSM<9@$kq)TDT0PfKcC_B7XDKNAy10ZQ?qZ6YKVNG zkPKrA6R7OaH9}lT6q&>A46RH1hGT|qJ;1{Co;Skrk4!wK0x;{R3 z2nWT{Dd=o2fe2kGKfh~iMiCQ71Hl7niBPvq@ZII`rafG$@w>rU8g&Uf zFIoJWp=E_gJVK%C=zLdJPC8yBEo$--e&gmdqeLUg+@hKSmkKO&1s$Eq9$jPl9O+BR ztXz%0pM&dgzgcE7YLhP%3#7RqOXP5y1#Z}3X*o$$O9Mn7>OEFHNu&IW>-Qu$_`PQ^ zQ4{7QG^oW0VPA1lj)s5Q=HrGHny0=yk2m&_Xj;R+WMdW z>}}gZM5+b4N%dEnAHMC#=%(LRd~}kF(Vhg3%QjH-37=>#Rf=*1hK2&X z;YwG-Z}s$>4O4GUtWmGu7Zkt09%b)sy?lv8q-QL)bRii>qpx z1>V`+>~73VUk&Pc<4XFQSL=SsY=QdYYWEGRW~@;jC+%M!pK33n6kJ*b8Avq!#Pr@p zvie+J*>m%yKW$!Dye~O9Kr^W*=I1MyznfiHe|W^&-KTpsxZeUGxM-(*v}tB>KZ|cv zdl=gB`-KEof)H50FbJ>k^U0+?9_I)aKcI@Ka{Zv~5abZ-)dKeq{_^y}8U6DF@J%ym z<$lN9d8vi40V=%nU^MOvFm=1|omVD;=tEiPlW+QKbP4%m#0hUKn*@POhR%)g;|moA zbDCpFkFt+{RJ*e_mRc4agW^O5sG1Vfn7>uDgNf~ zSd$*zc%#I7Cs-3hrchgfu#0t8DQ=#)8?u-Xem;nKj%GJ#^&yI$YF;?uXhN*Laekx> z8P^znc%C)E!bU*S$A>*mL`p4IM&9-(+cSuai8HNrNP{JYY{#O1vunZZW+CnE^1;w* zS-$zv%+K){OVOrhJr)JRo)u$cwdTROCP$WLWXY@=b8>uK;9Q(+^wOkfo_(D6l5V%| zS7j*yyYZVE*6MwZ!EbxD)vMLLNKz6M3@;C6H;ou(M~EV37L@>ARS4`TtWzS3Q8#Fs znS{K>&d2(-);VQA4lEwD!jS$Y#02wQmE+!*dD0}VEoGk>^LXiw;m^E@pK`j#<11J^ zPl!HoZ=8GVN9 za?5mN76&#fxS4MEBjHkB8ucq~zsMEi5c@b91AUlcklFF&rEm#3Us#iHH;o4YMEIWM$u1RaLR9H4NQQ zE9Z29qN2!IL3a$K(Urt@jx`v&+zUhZ$BSD5ZpSA_ZB~nI%XF7P@{G=GirUej;tkH8 zzEllP%c%29=i;I$)~=$J9k?)a{DY&|447Y&m$RN0ZHgBd?RNq(jZ8@9?Zu@y{t=hT z=jx$3qB?#e6sgYqD0tiW;Z(ouc|iPk2J7`;qv0>pLZF9MbYDh$RD5W7jJOFXetxBC zO5n#{v$=kEFF6tNJocPhkouvT|Hk}zO_E-%qjwuspgIV>3Y<%8^Cl<*(~#`MfMgy| zQ!0N*0js_287bK>TZH8e5J7%S16r_iQGIUYtflQXap3;B`i`s{4dqrK%A+8o+v{gJ z+qTPqT2LT%QFFOh?S`qttkgaF@r9`Sy?v%PUrwa}g&0}!gqotFyMTLcbZM?kefu=k z-JN&8|S-==n>}Q1PP@|62ue>Nwn`v+)eG z%F_MVl1JULi(7j`PB-U?=l| z9&=C*KCV%R%Bx`Av}8o(oMb>wK5PHnlFUD!S^TCN^>Oq=QV1lpTfP{!quW+^ikLsDp-bHw8*XA>otqtI8MNX4C6WH-=#U*G=$(1?#h1+OpKv9h+ zdrhbvyu`>Z>ul=ae$E2e4S>&`@`=fmaH0{jG%|!_5&G^zLPMc%jQsoirASFhH%e~w zqH!?gWh;r$Uo3YAy@^I66cLf7E&?e{_!QXhdcU(?`EcmA(|q;Tth_@;^q9~R&g6I#SVK?lC5H~>5B$$8 z`;1DWDHQu*ws@k08!knlN{l1jlbu$~h1D*YEqfoidUv5i!Y)~GQDO#UcjgHn>D>YT zrEn|z?(8d^NWXtNGwV-{=}}|&c6-vw449PM z%d+-j>XYD&AixuPuf%VSxt+P6>+mJe23WwHv&iQnH&i6;mrB5} zJMIH5p7>!WeY43_gPjqgo_KMi`8G?uUY@W@7LB0I`nbe>hRvI;9*i)qdWRR3JTfMz zrMkaR>nwH{lKTNin7J=@1X`?;S=>(V@g^m!4EF$~SopWwuMXjx)baA4VQZi+P3FVP z`x#h1s;JN1KJsK{dET4J@0V%~17y#Nrs3{jZX-RDiG3zk+Y_Qh0pHg-?>a6B zkYQ|zH+O#3nog8es8+D6Wr@0HJj<6eO8A+=qS2HL2Kzf9@cyR6Vc@WsZOhd1z|RSW zmyM9>v=nZM#R2ww-Ua{2TB9kG)LiC1Y));_$U}We^UQa+qad5r2M?`eZF9SQXt^16 zQE7f_eq{BY=b6j^Y=DlGwIPIHoWl{RUs{GlkT<4vLkgqIr$6dEzl1WN!E?~#+K{4Q zFK370EQjHdysT)XmXcSQ&_X`HPr%UT5|L%L>+n0JWJBpM^K)hTD?5sl+&31?SsNz# zFr5BCb6E|1%nQW6yC;eQBBG+3R_^YN^wgLl)wx_G1jwYMQz^uLoV#ucH08vk zIX*z{~gcn~!`&G)<4f|hwV#W?|m zV3bS}=EQP;#qwEl#|rkOg$QN+!jR0CFVQoyf_pLYDrozleh?5gvafl=xVOtj1#{bJU1o^-Gj7fj!{|NH z@Rf=uzAD}{WhqNtp)u9W^Q!@=8jj{vP)55TLIhMyVIWrLGfU$hfXb+wG0jyCGP%75 znpCWIqB7j&9D$iDW;Iy)PxV{(1gdcw6F?L!f|W%p2bACKcX1-s!5_nuWyLlxFP>(Iiz2P7u5?r`oV1NM z&`K_E1^eo0seG6x9yIHjBwgXcvgXvZ%@fMUspRfSW^u$zjU>=nRzuX2!iT zE$^f9&Lr*=3OZ99~PU|cWc@QA&UVCuK!J*{l*wo!4- z_x0v5W9Foy5yPR&DK16_0@!U7Ds#2pUTW1yc$gJ=CZL)7j#*L41u$3 z=Z%U;xiXtDEjjRVr8uN6^I17|My}cBgL@zmxe`rd3)9^ZbY>P-7I|6a>CJFcX_;ae z18r7eE7ZSQn#BdAKg z&g$bi>C>iBjHDZRgg*a(fDUwMP+(wxM$a|Li=&ChH!;NMJKLcR4LKM079wPPrzl*Y zKG&FcP%r1{OrC~ zMJMC=i(aoh+Pw-xzO1>ry~${Ih!0aQcX*n>E*@DV?l>aw=n=p$B%HS@`Jcly>d ziQ=hCc#duLARD1Dp69yS*H+_r%|EZ~ZAQdwb9trJx4gS4Jt-I|jFNwxEJEQ5 zCe@g?5S&Kd6hm*Jgq<7nA{3pCiRqND)9m9jaB-)}Pe|3sut9oas*aF@R*?x7rw|2c z3Pu6&O}8<|+Ik2$l2m|HiZpsd(XHChdzU(LQ_qe2*csQUOdzg=!nZb$OL@aD@M{7g zITs$9O<8ah?5+Hf=8wGJw2`|EJ=U`l-qsPI_aDdwDbg3iot0yt#`}gZNk}oXSx89t zYq%&&A&BUjii%^L@%`N^Pft&;U%&pdxhkSjNdy6s@W$hhFC5)&MI|uM0~39dDd-e> z_D8`|SI+lrqN_vWrjR6u0#qI^p&1C$ZglaBwkFe{;J&)p_Z7GjA8axc|Mh(2) zrC#!7YqbYuB2bzgbz;b zw%DnyB-p$3d$BuK`sb=o#TC^<_l+H&w9r;zDGYD^R|gN~NNV(WD%b6Nvhp`?ysNWjZWTfRk9QtJL0JgJd9P~g|z@7J-=*^+?N zG>+CCgv2z0o0?Ani+{;PhGC6jzf$qzcFW^qMaGZSy`t)9>!=y}@XPo$qq^XKS{?z% zKJs6#OzD!B4)6}t1>KTLc=^vm4t4^!!gXQ;np@8KN+Fz>LoN1zG8w8YemepnqeXmr zr8EiZMG*SBl@eHcD-G|=cX^UkYkCJnScaR{$5UBL4 z=-l~AC$qRZeF92rNw-H0W6Bc}7^(i=9qVkk79%AkO~TEtivtY`{s~3@>Y#~=pwfj}!EeXl$364AmD0go2Fkil*I<3XJm5Cf%m7GTc_hkQf$DfOo-7yHC3wLxY2~T z4}5G{zvn)5Pfad16!1QlJ8HlG(S5)8v}5PI`NdCb zZ={$eg54Zd%-%l7-c?S4iPkcl*qaz`oA%cv+=K}l_G>n)RINR!eRPCZwZ)!28`%Gu zRR79Pbbmv(fAA{q7B(7kjI;XuBd%9IbvR1(^-rX~A&6j7wy_e$M?_y`5;1UgydL$e zhFn*C1dOb08nt>iLiMC-8gA*SW5k|v*4zyih|grx5nqKroC?-l?k>BAwVupc6 z?4WN@>(Z``Apw=n`Lu+8J7VN{JT)tlh0iTpCrHQ))rID5G*Q^aYf3jyAv}dN5#PWA z)yJY5L4%pCx6Q!VDl%i+=2yFUXQG1eMNcApTI-B(n%ZSDR>S4_qm19-y~HM}R}iM+Tw%-xfO#XInT z8e?42N7H4JxIetU)2n%)%NwJctiqk!K(HX15^_~fIx-qr8lr;L?FGd#59m1jEdObD zu}=|mwoQxcSdT%Ql66)$yy(jN%4)>${tRTT$M!kRBWKfyR(!zmq3$A2^O8k=N8eROd^^+ne@J6K~+7`UOq*80^DlG^OU5m6@??*^Wi}2}gR zAYEH_?Z6yKR_pquNpp^aZ8Him_m>heUxX z7%4;hL(n=!UYQsgb%La0+Y^q+=-Iyi`iJPjoBx#WFtlX>)Y`U~d)$h6a@{#N8K)@d z=ilmca(&i)d|}bbuVeDkdl3=kqIcz-{I`H@_v>`Zft9p>sAP18P**8e?c7rNGRTb6 z1cc&)5os9TopLVw-!G%&HsU$8fAJS=$m`ds2+M6?`#wXjU;Szbv3fChPkNya0D`yqM|Awc>d)1g>s$>NZxCBK+!3w>B>s=d zRj@v~^f;uxVIdSaHkU38@6M?}KoSQOBHqNrG(#&AY!;3d<-Mp&qD_El!e99yo)$ue z_vDzRX5#)YSl6&dBs_vBogv}=bQn(W!CKVvB^_WW0tEKKkWrC2`z*F3eDRY?exyDW`Pa02Kf#Hs zG$Z}wb20tfZTMY};4+v!#!r&4P^&^E(3t5nV7B3WLbZ?Fx-G|+>K8-e~HT_JEsq4XI ztPeaS(ZNp${Vrqo<<`SWCWKN5S>Ii>XkQdunp1bJ9z%C+-(+K|gXSg=dI=;tP4syqG2uaxu!4RpAU`c~`vdA$@nuA%l zuI5PvnwOaPWrL zs1{9`y+Pl$_?a>7`qIXA)x-Q%!H4p`9!2DqL%PaE(r#lJu3A?{-f7C524m6`oN$xF z)NylP2GYr7*0*v7KV%Iz=@B)BIO2f{pDEzPR$>|s?J=gyk1_^xAvxVqZ^^o+ZPrj_ z=QO<(4H&jwDjl9f7tPQQGxpSy#Xr^NC`$%)gH>}|tn)jKDB=pHEl@NijlNS(w`)z^ zlmXf()bP1Q`M{zm# zJg#+Uj-9togcV5arUD%WX44#Ld<-B%E6#*^l57sk1>m^fEU0!`KW|6a8?{rQ1bcF> zjxQ}vnaRBY!=0Da|M=N?`^W?6{HdFLG?BpaQMLWh`2~%96W7|#w}65!6N*U-=dujE(isieSukMh?jUW6}Ln&UtD7E`z=Gr6%2&&bD~+Beq^Y22!USmLqb90@ObSwQsJw z?zp%Xlr>d)Q;s|OyfSb6;oD8lcwuAGN%n^0_mxzj zbk1yt{2zj-DB$CT^l*@vV?10cFNua62aO49e)PEtVZj!Om5gf>ddQD$^Y1*DdG@8} z_!1b$Vh|@}O$#QYyGJvuQShWS5$vo$$hiPkKaym3!HIr+d!K09)q(C@FxE9I6Nr-v z5k8r2CvlIhhkN(sjQir5{lBiOU^L)+PONk%dyVX(>lZI>sCw8yB;-v+pNRM(^n|XH z`sLsD8?$Le|3>I&aCm#g3!y1Wri0Gsp>G|uCvYC^^{>(inY61DxeKxVah%q4%8qI0 zz|sO%M>;dodd0Ptv~*{gBj=5?)lFK)>-4;~qWJ1P9O>d@7#%ewZ^8GQL`9KTZPYM? zwUHjJr~5;-C(nlT>yccC2IhEV4?VWN1tlU2d6y8H`SJzum&s0#ZPKAjTzkX8mytV6 zUG61#^^^;U&b-1Q351_f^KY6Gm6Fj%^0vISLf5H7oeD_jQ=lO+#&=q1hDQCc$9?W9 z8;bmt_)5EBg8C9U3BO%2=dCEy;Zk(-VS>osaD&oRmPA#R;A*WqZL24-DYGb(=^>>; zWRm%CNTD3zPk5|BX|jMnFi2hx;S~6z^(&15=Q+JzjVq#oe+=utg=B4o_HY7eyQcVKslLgF}Tp}6+U!=8z)nc^(7J5p_ z&>7H;W(E~efPUT-Q7grtAD4Ij+I=!Ujr8PO0ajp2I5G9Y^Rt!PY8!c99(U`=*cW$Z z&zgPc?7&@#w10@qyigx*q@7T0H_37kX-zWMmA+w5v_|F+M!F&+<%%v;^b>i-cfx-d z7#xODpfJC>z8aEH!e2nZaKd{So>9QMe1zJ_tG&KwOmSP)qjI+GJAwBB-%4IH_1E|i z);m_rRE5MfOCJKx6r+i0EH^r`)TqPt+*5hJ$YZMRnZzBylD|b0a%3Ud+Qs1=p(bOa zB?s&wX8Zip7wu{6G`*e60~>fJo2IcwlSBk9l~ZAeRa9f9A0qD?9QXR=NtGveS{)S7Ba?su7QHx+N*>w*&9VPnkfv>Oo)N@{NMHf zk92_mVg=5a{Px;=p_DgEZe|*R= zA0i4H97#%D=8Zaeg$ZU(rey|mP11A|Wq>-t^W@UxX^EoAJp?SJTgrc06X~^g_qZ~6 z>|C^7))t;?f#M1%QJ(OrPZvl4`D;3CeRAJrvDx!S`4y#fiFQ9z8CgeVU~H`{Y1ac7 z>AIkbExp06_N0Lm&*NDc(T=Uxp^h~6NR|`N$fgjLUpnjB7vqov-MSU3;uUXbQkl5! zdCpz$gYEjR&Vhxqd;0F5$gO)WUvoMt)LqOYo57?|EXIb7N!8B^l3*@Ga{v0$Z_<1khP6Zz2(cFD8IW12-)`%QN z_tBD$XET{Z-^oUNRb#m}H2g;)SM~ovUD0PR*O@=^F#XZQIZ}9{>LxC5T{l$#e zNgkdu_>zT6o7H+KqLM?(Rg)mV#1MyWy^kXC+hJz@6S#|Ht_o{!_eUp|#C=Z_DxNvifrZr@%dw z?$}TMSes}G7t(f(YwLYs&I`x?=bz^bkeKjSb;kBf<|TDo$Hgb|7)m+Aow2Ne@%aDt zla>FkCFb8n=6`}u|KDlxuhcb$-{vn7jb(GnPnY0)s_WyN-q2Rv5qsI5b7Kb0N2{Fz9a6jrJ>TP z1Xa6M-J#nanb}u!WnFk{5-0RYY}O{jW}lg|tjjox{z&(*rKXvZs#L%72@Ewk@GrKU;tmZYp;oHDosGFw@XdsOUiIXs3#lH0_rm&VJ9fFZ)6o+mt~holJQuy5I05S0qm&}Ry* zM)Yms(QHEb0+*{jzQoUFg84SY7@rBZiIugMbzaVI?K0ZUrHDRfT=g{0odTBzeD43Qo!|(pg_Vzq3++HYVm4~by)RxdTCPl24KAhv*szH zGKCn_N#Q_E=8ItD3#G&`tq3tQLKgn|=mVGNQX!RUqeU9|Q-FH)qjTuBBg+JAM2d`o zIVt21nxVgf5+vi-LIMC{&Ih-B(kVg7qMJ;sCpSwrN;4+W1b5JYP!|xHCrN*OO`E(| zH)yb;m|2*vgm)mm%PC7dSQwDkWvI8->e{;8GiS^gOpZI}wywnCED6Yc%RQcmJB+_n zw|H2-D&q55b=SZ+Fr9R{FTOG+-(B8E7hO=R8*^|}`PTVU5WQ_fuVWeI*DX&~ zgoMGD=JHiJ^qsNBQGkV!<$9$bh2*7Kje-}d7eyq<#e!yf7u*G0;L~w$e&)ZcH}bT{ z4Zhwns#gCn`8eTrf0^eAr$z3Qz1ns77F_?3f)+_QUz6nXc7MLbrtINS81eMlmymtP z&Gy&4F|VH5I|LVIin|L$5rxlN7l^yO=M1kkj)^;6->qUWbG+?dI3xy<3f~dfTN7Fw zd2uHY4e2wgo-|lR0LW$EWwY^7y|H5!C3J?*4$0^cgu*8gy^dZKv5R?NF0NwBv%T^J zTx`1N@qNqBse?q%kNC{#jR8w~xGP(^^8Rf>73bqfGjvh39A_wFSQahdlFl z$MC<$>aEDMXWD=5_!QkwJ(K`}PrW&d$6UpjCUe@iNo+Cga*`#-AM_Y90s;Y8=Sk#R zLy)LCS+)*8V8JuPGU)okfocZLNhd~Z`w;7fkYr>qmRM3W+dr+45k$o4;;Eu#yy%gwx4ECv4yXTNZFL%Z`lk<#Ym?@~l zBD<{0W8D!^-EIhU+5inw(~DzKD|&X~sfAi~IJ~8&9p0LI&9^S|-TD%|D&|rC?{?$c`n0 z*2v>O7tF8ZQ+C!I6}6=mFz=V}=8jiFE{|L^4s`+!(=WM>7QUaqAY6>NIDvHDXnkC> zYr+*=AnQ)8&!XlIZ@u{ziDzr@5G}~pAX0sKXO+mYp$>RvqIWzZhw#-)TfTBSHfo9e z+G1jSXI;S3n;1@v$ro9%J{L(ru+*Ly(ul?5$>?ypXM?9bGNHI4?V!YM0ixM3+m25E zC~EliW0gU=*xM%b4I`SE^c{I0RX%m;2Xh&RT-t%uUNPWq^XY(8H!2h|l&F9yC}X%pkN>$$(dqjzO6IBo`8k0S zt%I_?R&BI$P zi;kv?5vtUtTeIYmEBl+qH%5Jt@j=E{z}Ec5f3|c0V)pMw=zsnk#r@Y*p5*^GI>Sbh z0e8|TOTgnA8qTBW{1fl$$WsHB`7jCF1h?dGwen9`EOfpQ;f zM-9HP33oHE5qCn@qD#nblSsq198Bak!o!s4U<`w)F{Q(*H8C}LHeF^IlRF*AL_@1F zJoZhQ?DURT4ZYZxA}H9fD50M=#icWI=qZt_mlVEV>b)#;UA40Po>0V?PMN}O(u%z; z+MLQ)`-yX%!JUvb`i=1qwV#2!1f)}|2qag*h^{=+)g727yW<eP{X);uF^NP{cVU;R)L+FsA47{09ae7YP zNt=@$#m3ll1CpUGVqw6nWU$)(^UdnIFLaVh0qwJUXt=Sy18 z1+3?L^%s?qjb{6%i72Z};l&#*Dc3rI=n)ZhPiY0G8b&dkSZp^ z*y@x4`|h%7r3VbHcI$(|;~F}kzK5{O^yV}vn__@(h|l z=3Ea=Rzi9LE3KoBitjV>+DV@Ts*U*7B;Gd86Z8&_V}0NygSN`{BiCdRPpIKQ#hdUj zj2p1kFSL4Ni|q2<78xL8xOK9^vEwk4 zx|N5X#nW%HLidb6NUst0qY4VQX3#)BzjJ7q3x0%@@ znjU?Qq9^CQpD}Y@zE3R4cnFZO^BV%7FJCfpd`{o@T>vTF7nl+LTblo$w&yW^3ZkH@a~v*AQyJhyA4Urucee_L+M8Y1Kk zP-h`%L&KAkK@zB*T|=dO9G^a*O>d>iR7FrhPtvLt{kMj!>O(IoO_Oju&`5rfr}dR z^Wmro8){)YZa7h?pIP+{P2gQGh7o|m4F-{?3{#F<^5!k~Ng*S2J|=Q&U$|BIWiXSR zp4O%bDyT1L4j@a@jNq#E4WmX19O;SQnSgN zr6fVl>~XKjTCd@&;V11`CWDp{N7vG{u=YDP*J6dWUw283L=&BdnUue`@Iej>lAsd@ zyxe+C98dp`EjEGLR+ttI5ch2c@@8wYA!+>CN14h60m)w9KE6o1m-#&rGeAT{5&D0d5gH}6%S|W7^s6;}aH=@=`%mZO>#pDKd2=eS*Tnz)$0k|i z(jsC}!YlTLS+?64VNF_kV;Wm;%c@x`IxvlHh&MXzc50UEALf;p1Q)ZWVb!?}#@0J4 zPR&V!Ot?LIg0MDnV-n&-Y~*ngwD4|kB>0JROE$|!5XymLo)~B2>#=YK4bU`45{3^sMzR>|x1 zg<*UW7X3;r{JA_;i$l9zcp88u&5`%TbZ?C%d~gW=lGpUZGkcBXBLv`WnEuW+pig1` ziG5C)O{wt(JtA*%OpBzBuujujqfHSotkms@EyH7-t1?C9@R~xbUJ|zmMDh61M}x>* zz@l6{`DcCO{!`3Bb@2C`eQt|Jq29+o%HFcmrz~3F$)j5ho1`CkPtNog`G7aORHSWU z$Z$RP#|pyH<+uu?HWSnrru(2Xj$G35WL64a=lC~cdVfTn%Bdn~BEO2I5wBbm7Zten zld`O=H-9`{@~^2w?3s3P)@$r+@;Y|9pX@k268u95=InUrXg%%mJMZ1rci5inaC}U= z=JwnEY{rzj-WY+6NflwTQ#o6Ct{Pc?@ZSeN{NGLudqCUEbs;#e#~eXj6O=n&qO!^t zj5m85+rpR$v%|iGw3vr1i;AbdVFz)5k3h)YU9b5LB( z-+#hO(JCw|%FAwro~>ad?PZJ5BA*xa4`Fx3)$IQ$fS_DpVW?wJdHHaiw4o)khD561hu4Q+4TGSlLIQ-+S z{%%Q*eJWu?C7D3nw2=t>*)@=xfNbD*V7CfYh}#!Z_!Y5%YhQ>n8`Yxb-4ZOG+6;e9 zhcLvAMtGcRwB^{vz%?>VZ!x$3=i42h|s{IhM)*=f`q}Vk! zij7Ya)OLawe0}HU;jz8DOKuR|^k7~pRoA?}Hd^6`F^?gpkaaF*Ys}dp`aK~a75Sh% zd}xcp4Ue8;S=)bJ=?NYBG;i6LNcL)fUW71@c|0MP7_Gz#3HhuZjbZLi{Y*V9c2efv zDrKy2Z%3W^5u*^{w>$`#kWq;6Mqf9C0I0;)cqXf#=-g1P+dV|mcxu3EuDMz9grKm6 zu%+sR>CNh%l};19KZ3>SkLC!pAUv3cAL2G4d-Oc2;z6$x*!9-Vqy-}@;InRTsj1D(g z7A>todHgX;Vcmg608aYfLQ2&_%pGhIMAt;+YUx7FN4%&N3skk4QlT$Sv+^?8z1AcH zKIzn#vmk-_lu`)VxR4g_m-Mzklxs^AR3I&Z#}PF@eJ%uLjF;Wa~>a8rJN1!fm_JK>^qdmxdV)JyEa^t}*qD z;--B3t~o>aaNGu+B$u&RdmhTx{Sf&$jD2-ds6#Dib3y?#HZibeE@gWtHG>iNh7>@- z&MR;uyk9u}*olcGwg`gjw^}*t_kxu3A&$E8r8i1UciGYL_hckqa`O#Ja9`~XEF zIUJnho+$ZHe|%z`n3D)O|99OCes4|8xRwkDQV=L^>_DraBe7nMVlM7P*0Ynr zv{t8Bx*jxWkG93G=wYKJ=`cgqV^0Q)$Q_Vgl~i?gpDyXLDRf<;)6ZqoHzfnzrX$VS zZou8PFqG3_1Yc%?XgGQJV-$Exp3G;c-M*??{~9R!!Rd)h-O*^!$4whMs5Tk!TCC6= z9&{dCt~cOc5_Qvu&S0X^#j5O>SfCk&}K@rkUS?08xs=mx89*Z6X=x2YM_SyKFV zwHfSNi91e0)R0(oyTYtcyKuOlY}58SP_prQ=CJX(cCGAu!0CAC#EjMH5*D{*5U(i5 zVew(DLi7>+-+N4dqpCo8<1JCS9(yYCPGG-!Ir&RJ{4DU6pcz-P+l@v)r+NHdQ#eEy z7=rVxXTpO)7csb)iuOQT9!afAH}7U#12;|6%<>|?q#!b`_T%W-7&RSTelu}$Trcl6 zOPoY_bUqc?x1>_iP44tbyt9#Yn~*IJSqP3%|7PXGz7{97Npfz*eN&+@|92=!9{tN! zWYa=K2j0i(XMhRrVS{kvLfO6$b4o#?zS=S>8j8=#loGVz1~Rw1CPnd?nUWS3%&@Sq zp*JOffGnh>;N{5wSp2S!O5aY`be^V4``@FGpIjOo8!ICQK`l6@QT8iy*iiGEK(o&2YkFmGtVly$c z1z7r&TI8PJ|IFJdHy*G4kypTuXPbT|(oB?8*;cdKm2_8@v~6aeaC^&3Lvu_WcPzlu z6*E81X^7Sv0*^q<^(k!n=U=?*+IG&)J(HVI2l0p^Ln3yaZHbo2t<9?{vcl#x7}Wd; zl%7f`7eWRBnaY)(Bd~me9m;R!K7$bIR{bZ3nx`)Gy(R}j8TS?aeMa=1m|Biv zpivtnxi&V=QoiUu%{i`ownRSK!5Z%om?Lh?Jm)z^O@T;)Fu-iD!+O<5 zzZkv{y}dbnc0^~ITq}-hDkSfXxQ0<;?r=e~>X00*!`vKdxMExCd8vkK9b15^=v%ZK05xqk?QEA+bXDY;hZ`c-o)hyl#u**3Xs0==_zpK-|1b;vU`nm zJ!*f2qUXb6Y^*n!|5ek@EK)BBPCWa0@U;}E>iS>h8vx_LCkujqMXFyYMD=lky;{UI z(wrpiGX$j~N=r0oNVJcE;x?Lo8h0R=Dp}{gSaWJ6k4)fk93CAP78E2lHoC0xOwKN^ zO92yqjWHI0EO$(Nys)4R=D=giZvJ3ACYGRzAe36GrV+_XVk_0B1Sp3Fnus-2EI6-2 z8HZeo#UP<6;n|l%6gQ_2-wIFcSGsU%{Rc95+rMKk38No?B=f(-B}+;Aa1Xef?*dOk zWX6gp3elNUhvw&#qhJw9J2|m3F){TI45HzoiFs2Kn*0Sy5C0ewuQI;SCbcA&ng$X+`= zT(l2|BjJ0B8t)Ina!W=&QuHjP5qKO=HN?nB%ZI~~=^6GNL%ONk#Bv!Fk9Nq=tNJ|SBMab6k+^-uI z@=xf>UFdp!vY;A+$`}{*dZQMUW^|b`E8P21S*|clB3{Up#5?-O?W=0d2Gvy=QUmXg z_YNy{1~v(_7+JW#W+tXa0Arvd5))w;DI@Ia83*1H*vu7PS@$WYb<@P0YXy#c8X>gK zdd)9zweKyqEKA2qye&CPS(3ev<5wE2=y;N%diAeh8b9HK!bk2aaVhgSE>u_t-=L+% zpZ$(}+ZjmfR+yrJe221#))yR0NuO$_7N6fC&gnkaNrZ|2O!xbtGswBPI>-#?)Etitf*A!Km22LGZWTGcvf zcG<%>ow}c0pO>@NsS|<2P`TU1N>uowOygZi!N3rFnnCx`Va~`wk!)BBv8aG1oTDmp zE0ZNuyAmdw9K_8Ym*RGWizO=J#3>mX zD-!S<4GmAsM3wn!)>g2`XBM9>9*?8ydcQ-f-Tnc}cD;F6mbyxI!ayi$$Qv63;aG(sQs*=o5%CH{)c z=k5992)iY-Nk3!#l7&OB5fQY@kzunb&;C%PH}gU&NrnU_0kV3NnhQ<82vKvgfOMw!yKr(U#|a4`0NpXxjp2?V{E0OE@h(ZAh*GZ z+2V$>+G~?@e*$+X?lFGmOZwD4!tv>5C*+Q>Czt&x)gmTkbz|-Jyd)McpSYAFPQ6H&G zv8<@?4208ul;KXs%ppDYP}0B-1xUZzigYbWPQUN8hmPpCpJknZG{X7o=EgMO+PX;j8mRcHqV{E~ZX=cfOfX zO7~lxg~S)|2=K!#xya;_mtDd*_NcsmGri#J&TYPbd*gl6auX*_1pJK*paoWLw zF4|wIUPG&jJOK4^mM4T&aKLXF?mlfYH;21+nx9KgS*pO z$NgIZmtd?c$KC66PM19b{MK6#qIPrWj@pmDz=zKMJNd2wT8_=W*LYJ#i)(mg-+hgQ zDoui$@SnI|U(|JcspEiqxWy}1LJWok7yM}IIsQrs@aMKTn2yZpD{B)$US^1kJ9@#H{|lOLAq-Dwxo{}Xv9Yyxjz%| zYp>u*6lwJQA!zlZ0ea=cvBr4Jgd%>=I*!T!XE_EZx&lw}>UgWd?c&M2O~S>-5m|Sw z^CO3===OFYF}_rqKY>AfOq~-vstLm9pDE{;1j`@Mcs0RMuJYvN8G$e9Xf@7CpB9ac ziD~IEaLmgRB^udj3080=N3JAv;v?8rPs)_y6C{!>&_+<}!IF{+QsmMyVPd1loaMTi z#e4EXE^>v7$+FG%XKd|v`)ppnCY>|WvFvZQB{TS%5g^?)I5RN){1S*yr8ELN%ww11 zQ$sp5BstqZ;ZSlE?Oflpxt(-Fwc8Wp9W;SxV(Dlt-iaZd1-3RwVVK93n=FjlT`YWv zAB6vw?5Q@M!j95%QWsM;VLO>*Eit+oQoNuMqL&##Zhab$DvV6n2vZYFTg__s9d#s$ z0j_&IRf*B~+D#^%8wETV(f_5&CuN5R^2%Tg8LbxEkrI9|<(*EA+p*m)2!f1$lqi&> zE(VnwPS8%0h4NtivV)&1wTMvl#gRU;mbqFTvt}wg@@8M|nQm$0iEXS`c!pvXvddZ^;QC)fk)FJvS~#Z8vC z>1nIN`neNN-P~x2p*sTGJwhjtG5N83X2G$B&54xxLlEie5*#h)c5pmLhsjtUdK{bQ zFOu*=5*9X8o@?gV_zptL#e6a@_Xe@r)lOy#QBUp6h=6`|a*6@VXr_?J@M)N(@8Xss zB?tC3ITDglvp!KGZX%4N;NE(6N)k3cNTw@=wEc$7K8OI&j<`hCSA)hWlyLb8C2b zP*Y>!7%l7=c$FGNVlhTM-0y_WG^cXaruai(!o^IUH2XnC;IklJk-`IFUT1nCrA2*X z2O84tJQPg4Z->d9n1V(W39_n0(Kj#?!b&CCy0VV0VThdJ{r-n0~mH8g3Vp zV9!=z4I^`Rspcjk z%soL=y-MeBL`wKBh7xjz8GO>j{H7kb)v)K%} z!SdbIJ%heTH0W^U+I^n-%~ZKW`Q5pO8|6Z6<#i^Daa;WBP+WeH?{hg=0 z-e4sfR`uC1&MA*GO7N79ppY1%2LiU=vHX(Yhyz9qv0>UCw;OtO>#O4bubM1}8GUH| z+4507S1))_B1Hh=L=~4b6^8Y!#wiF(F`mq>n3!ltzRT}}c(mbAr~h)}!C#HmYvIq& ze9H|7#P_Fs0U3AcC}_zM`3Xi=qicS*5$Hw-`D8p~P}Hy_!;(6o!fnjwtAs?=8n}oE~qY|vc31!u)~_1@Jw?a;G;tub;f!=kZ0pi1c^?y(#`$iqW*LwEBf z$U&kcbfhpnw8@4l{Zb-BqNLoqy3svP2pY>xltkjurvqh__dDxH{+y0h98qUeePwvL z_T*p+7xJp^#b3tD*hCSsu(Z{V&M+Sr#dZc?W!fE$_$tqJ{C7@ztNFY!W_$MyQcO9N zY2W^+#}hn8Y&qi7{nh8EG#UJ5UzidNJY?L&uPb9hOk|AM@tAbIzuaF_xIJ49CnK&> z_`LB=WvH&4yKdV9Nms1^q- zu0K;wox(PN#{!D#PGHi-=2$1lgTCvQn5x7+n$gHZD~!7D9>3B}VVS@S-ED4H)iW6NrxuUywK>BBw9m+oa z<F0g@8nMIN78tmU=x+18t(^s}P@}%qrvBnh>z^BK^g%9` zun+7A(=Ao-NF1fICwC=hMII}k| zjrodSIYFisxgL!SBt!<<^tG*sa(7Dzusm#*!+AM>L|x$7PI+ zjj6xUQF9?mUXcvTgbS*Ue$4Nu>rIN#*XC^{ywKZ>YMQj^x!WkSW3uH!%pdQ6+UtwK zVT8eXryG*FLL@Ej{JFdMJcY(EQcVqFg3zB$a%;KXrXkD;6e7_GVk59zG%}l-OO!~8 z!HzyWln@aQBw-@Kr5<6DkP8ztH00dg=|jfA35ZDyN5*YOzM7$pO+fy3IEM#Q8Yxao!zeH;y~bGFEaRD$hO?ZMj(D)K9rbAy-dhLluGMeLt%sB zu#-g0)N6YnCICJrH;9me^4 zMKpT_kHJVU#_+^S*mAS-_=8rMf>NQuup7}W5gWdZFmw-i`Y$&`oRC<5Xv2AA2KcMv zDs1lI4}C#yhP#(N*en*(X0|f-soB_7d|8Yx@P)EzA*pj9 z@Kb_h3cEdi;jf5++t55F%VW3ktRsKOI!f@&^N~6#BivZ>6WI8Yv|POgea!!TjoA?? z1jMKA?Sj=CituXvEury;hoN;qVfmJj>JH4JaqC!3#xsJc=S&P|=W$Y)y>SaCGQQ!| z)gR0i>W|Ksq_7(^gJd(l9>wRs)}%0c{nftS+Fd8k1MhNDJOlgERxeb9i7?&%I%lTc zRG2#l@{6`aTg-57d*Tx6VP87^kWuz{8rD;M_2f&SDGQuVd41K zZ>0jdrS3exg}Y=@95l4}G0ML^sei{D==A01OA6#}AjOB+gvdZ< zFrrL%i}P33Hv*J%fp>V0iuP{4{BrFcn9$LgpIW-OA1Rq8K{k8sJ~|eW&6NCVgU#q;;BqiEtL{KC&iH}3@;MX z)63R)I5VT7?Vw;bzE@BvLq;PA8y?2M?Fos}E$P6_C(!a!KQl!c$tCqmOdww)1&p5v zUA!Y{J(vk)45cg+4X+VqCg+_*sAk0JLe6qlyLzZmIvA7Ga*r-x@=7e=2d9;^Mwlbd z($9_^u4aX5D6A&RpO>rpMxviSA3if(I-P`QCWfCcCcT?sQD|_WSFTk5K%WK-%Ky=0 zcXARqsq;JAd}z|GM3!gXdJ|%b)jY(5VPPmd5-;0556Wz&m;{&2OLwMu3(hQ;z>;Pt zja_JLpCQkA%*1Tv)VO{97e)K}A;nokq1jw6mnNHKso7dBBzL1_-V83mTB8VMq|KRD zw&?-;;amm%)7|OUPQKMV`3IN7nF@kdXPZbwg0*a&-$`Z*l^B{9k323%A&8kg**d|9 zW^0`UtMwO)`Uoz53AHazb>M{n{q_}mNr z;P03zGj+W^Uwqfm=JGk(WnwyKCEQG*i(Qk$OLYgQUAqbQSd}4s6+ue0r4yIK$AH=hC-e6S(%N)s#QL|@lNY*knQIe^( zdFt@SsYKy1tkN8nj$^kLU29gKP7v-wNm$U7@*tdw)X!80`D~9P0ov5llA`a5%3g*1 zxQ*u#x-Y!WyihsO2VZUpSW#_s`(`%4Fx(FWA*wnaIrb-V#To~Y38h~kDaH~+s|0oE z|2Nujw1T5TlkhqDexhdD6~Dd1qj$&?xpB|r=VeVEr^_|OJt0?M==#1SBW^zA1s9|~tF8pyL=VH}?ad1mZ@ zl$Hz@Z?V8YRuwmHPBA7Ua2Q6w@-pC1;Pk^5hMm{UtB;nRC? z$e8%qd{1OFM%2CjA5hwtiKIX5>$_(YshU5M2{$aMeG*f2s$RUO2tiz&)M6zNbZoEb zcZCo-@FcStAvdBU4A0089sZd5t=OH6(O26x;J5qE^plF)$u3m8FDYErMbuE47O5Sv zHvl4i!m`oiI5b>S zDK(%=seKB)#u&>HD!4AztfVPTsPG%bpfO8u&6X0)d0(nJ8)Y7ySf5pJp;D?WSTLHX zkYgxX5izRSvY^nJao_Gmp&t|lxDFwXq2riFGJO*Mash+gVu6St>oLN3R~&CSYoLVz4GFdN{OP1@?s3Tas^E^ z#)7+XnX+(TX`+G{ZIM-rVwi*RYY;QVo8NJw6m_Y&a)r90C0J6K?k!<)GY+@l(&?@= zY;~b5J4sb6aulQC8DkziL6cbDsHQC8TdGV|mZ+f0QyH0g-!7C|^hLO=$YiCfjDO5M zkycG~AWTzWG*R@i*7%F0rEo(n1?y|nXu+$W!`^7geRAm&{Htascd1HQ;gh1qiT{_H zv_<*IkD6IyY9*mGY*`9ZSu`q8zK_3^igk(#_nrl${wc^r_&xJWk*a9Dg(i{zZ>R3| z5@1{@-@D+4U4xT*>*YpAXosdV!Noyq^)`d>xDj@nVbRG|t8&SxzO)2RqXJ5>w;ZlR ztzxx_bHu#|XYH(E^|&wTcJFfyB?qpWyzRh&n(NNa7|-ST5&k-FQdARSs03z^u=!0) zi&1ghu-~9lcWx}fmOwqd*n^dy`G+;|*7%GBqE<+~az*DtO^9rmu#6#HE*V)hvm2LR zGQ$F4vVn>OW3R9uGG;A4Z4#vZ-gTf9DV+At@XuX1>aBK|NepR`1@Ai5j|JeuU%YDU zHf8A2F2X6^Io5d95G6rR&V8Q}8=iHC(ahg_NDnc-@j~Xcg;#xw6gBGR?1y+FHaJHv zPg}XM%&~{i*nQ=vm@t$;fa4nZgcy%A93zaNRYPE zg1Z-1E&;OQO!_+6!A%odu9<34{H2zT#v-9g9e0l=NvK3ho;p^By!IM5aa36-P!Z!x zjuj3MMxaAf0<97*#;Cb*D6eYH`_TmYaK!@YJf{^0DzzNa(G?Nq*x^#u%JP8HQqtT+ zhCnIfhNsbl=~4HO7&J+MHtc=L2}|m#RgDreuOKKGOBPQ3xYxk0I=iZ(VQH&Uu}(v! zRavU4hEXwTL8X^dTAC*mI9f`uHaa<~ zR$keKK9Z7>qRsy==jA{AnS3(?=dLJH8~{;Oe5S4>)xesPE7mPz{_eSj1CO&Cl^+0u z&#nCRY^{mZdqb>zqZxxMy{TmAZbbu_>fRl+Fr%K19Ml{l8WIgJqByRY-%l^Dc#C@H zXmM&-F01kF_7HNpfghJc&Z^r~?S4DH4$uCj+hTn=>+3^w!Paw>UB)_g06}P7OuHRp zQsjwWi+>u}qN^}vcM+zqjSWDFHjsA(ASDgHg+zVqDV;Wk(QJ565?h8$*or>({(!`; zhmLL$`JL6sdp)}Q^ibnf@MSL-%*ScnEDi+eeYt_!$GIA9@KpviObQpq>l`Kcbw2cC zmpOkJrIKnWHnYII5l z1V*q-|ev@14H;ujpdE+vm=^g0UfM1}Ws3JA0)9*Zoj z>FBhMuaV;UEsZUreacvOnPha6U(Fv$O)R|UCg)=AVSQFDKe|`eNIX$^YZpX%E|Yg? z)4yp~*xvLu+%&ixO1MXXtBD<;_Jd@EL?#W#+zvE#w@x$*_R;$JZOvt=wE9M}E7zfO(;x!1-ssXGw7O{!SU)#M{BsB9xw8>9~ z1KT(ui{DK_kYKP0xh_v^FrKOFc3(KpnND@Wuj=bmNA8xn!nOX zL1aFil$g=yAviC1242xvTN``sGKWi0uiy=oT01C1zn5w+jlY1IvLD_C-l^laqf28= z1;4GEr*K#@?^}-FWCpa%QR*)y)TFTcaC%X?gBydYrl`ihbAG%jjOb|}$zxhG%^y^k zA>ez@-v@08KYQsn6(X2&%m>rC+HwPe4|>ZtfJx(ri1pctL1D!?QJoN3ytbFf5P-;u zU$3^3!6Ju>>N%>Y`Stl^XUP`7_rdmjC|S*Q+Zttry{daM-+fq}AL^SDG_l%pzTA(h@9 z`Z_l3*NxUf*J*6(Ro<8*s|nEymX0XN8DZ6joFWzK5z`c}A-4~L%ge+lo*XE85J^?uXBHe~@(NT+{hE%vSdoPhO1=m+VPm2M$A{~>(3XRhP zMqw`_!i+fmN_`&rbEAPj$khu`3EFwEea8DI#{?dnNkq@BIU0~TQ2!;99wNyC0T5wZO77d)mxK&LLNVzEpa$^M3K|nk`Qf16iYRRp-Fn!cFOI;N+{YtK-5%H|HIxj5X{47Q7?_(^8^EQ8~ekH+{OhqaY1kddA8FIPBFPm-naNVn4L@Sn3l+b{-z=ee3*wj-B7Zt`1I7V{k({3!lLy>+k>OY~2&?9`_je~^w&VutU zzJtkAga|?oKR{&;%5X2?9ZdQ%TCCDKGEW0#E~V?xb&XC9g%@rm;dnPsk%PU9ZP@l` zjFois*_4RdR z6ujX1ak~Br-N@wx^nQv#VpcDTUe#YDUIj#?Sikl6ylngFaRbY%%{klIk+r~u1}~+s z%h>!1p8N#Cb?@zMPH5U&-6=j@L=2v^f$jMP@F_VUwr&UYGbXeG!6!+Eo_-9oYBHk7 z*ODO`jJRVUPyN-HDD0IwCMc(3q=!c5nIav&<_7O~9jl!on`gM)6=1a6vvqXuHr6Gx zAR8SmgBVk!<~HZgS?3ju(;aAH2&aYEi$w@Qw>E*XHWiHRYH$&UGw4d~+y;x6=a)x6 zphoJT$tG+eYB8h4Oull=zK9c>FA@htorDJZuj-3cL8UBp;Q%vKht_=<$hyneR3pyA z^e;ZG-l(eccVM{~L;M{52BE!-QE=xDIcWvkdvzKJ#7=~WCZ7iH`?=K7j>8!RY{yfX z@#`p&`ifD$yPdC;7HhXK0~uCAQq?8BFe4cmnNL|%aVY=Fzy7&!6zi*7^~C>9KvBP< ztHqWli$g0{Xkv7D+zCmDkQ>Iwq2OeFZBAGS2|*OL8)h!oW(TlSOVWK(N&^&^0*0O( zgd7PiAa%#`TYsTmHzb|ouT;U!M!dQE(cPsQ{x!=(1T0WZLzm|aJso#*hp!H@PS)B4nQ8MP2cU-OlB2rA)!a|5 zP1avG32^%=9F=a5X5qcpxeY&eeY(V*CeVH1&eOYaIb5pXT5n??8urn2_g?EY)u$ZC zKMD%u|A4&3foLBNG8~972+3R|Dmy?F9Q2Eo$mD>xpM%z4_@xVjKa1$Nt{2p03O9Z{ z2Q%l%K23;6n#b|>s%*{J4o=AS%=85)`B(Iom{?{xP5R4(INy9Cj0ki4MF zFhoB?W{Ap-xWQ`kH+5Fi_k$7FLU%L;II$5i!4dxui*md<4X{`!rH-{)8AUFKf$N)1 zq^+Uh#)ZS29GcA@2tm+`V3RDXW^=@FsfzA7FUMGpYlkuaCXCG8LYow26Fx1e6MK3P z`dm!SS#839Pd?Eb8}S$Zgm#GEy!$c=2f>`ZV$HM1#?i~Epqwt3@(T(I7(vc7S)SVjbZpSKbRjqFt;|*F(Vzw zY;{Dvg{=lF%afNxs~=hTaKR229+wM*-X4ns0ff#>8yzHtEgane=TioXRalIs`br|u zQN4|~1Dvhy7#|lQxAgB(Pp7m4=xbO2Yz&zbx8@xg3acqP@GNiWOy3i!i3)L{98UCT z;@B5*vRx9e^GkumV1NJab?Dzd^1r2C6t3*I^WXM1W?Q~yW7R?56v%yeKAfx9JK?-f zq6@rueoSeyBf+GH{cI@3+h{w|eQv$NSxo^82^0d*vS6Qp55U61>LQbeffSn+HBK^G zA~F}Dr+azT^d21owv;^eXF@PjIJ4=+?rg$* ztWR>4`lQV6t*BW`RNhBseF{uINf52WD$U*tUzk2FD4J;;sWV$Ub+5NoAR^FW6FFdy zoUT&>!ym4oe%R3LtMg=mcd-my0skgpw)?mkFfP_RTOy&ygmyxDKxR1rPYxB)H%d*I z!dILb*SC*g^G%d&&0s5}QHnJEWI?9md@k?PptCpTPMbh*XvcRo0Z-TRz#5$_1^4)8 zD6sP|xs?XN*E7ra)zk)vjg%9jKHFBi0Z%|Xj;+WfQCK1iAET+4z<3ttU&4Hz#FTuP zH~W*H?<_EY+vOjc*kv1QuI#+HG;ZhP0LX9Q0EDeLp27k>XEM<{I;~$mHPI96XT6-* zg@`xT%hp@%XwB(jEKY2FMOB`*A5^fz4So!$gh`!+gZ1&RHo1}@A14Y(2<)4`xuw{Y z&|$^5zX2>*K4A(=acW#W)SXpmCxNa|FlZ{4(us@JtB@CWsUpne&NAD*&vd4B z0%mxQ~~CL+o@Jy)sC)(|QL&P;*27^1HJw0dr!*_mm{`PM_kE z zw2vqpb3qItM-;)e^JLh2$)CbJ_@OV@Lx1PVN#-Flh-QIC=^x9yJ;n%>$AhAl7LWwR zbbNkBX}03p|MiVCo)WSWffXLTq>pS`!Rjd@>1}9mMv5Cy9H{ zmvUWB{2E6-l)W{iGC-kYT|FV>Pb+t8FOR%2uIqkGt6m;G@&+|jgl$^ui5wJ8_uYO;Ai5oocCJlr$%RcLA5o@}7J06B*|TUR#(yFO$F*NDR( zgn6u9K~Cb+__jtO^dc@!e{Cnri4K=lO9BgIAkNK%0BeA=VUT==)uW2So)#x z9UHnm93-%UriSjVzh1rQDqgE+VPb48w!imarS?Z;c*e-orOWBByL|MIA>h&hL!k?8%%J>j&*9Y$qt(1bMPz442uxpVLkaM9gNW6z5Zz5y&_LZF>9i~S`yzzF zxFg}3QD1zamqEkLUp^_U%`Ahrw*EaEcwnO}%**pNRB%Jq5RzH&4B(FxYmwcW>p*Yl zR)(4&WDQ6=TV~r-gt;|(xX)MU;2<0?itIp5<(TJTO;Q{~L(Wcf(f+BCm9v#sH@a5(eC71?U`qZulVUk#IiPY3nR@)-lcBJuN3 zvZcXzca%D-BZS|}AowU_Ml+L)X~Pso4Cy>{>qD#ldd#2r0gbnnL+$or2G=Gt-QcUI zn$yX=VuR=PF09!v2XI1_B5Z+la$^RP(6E2cKRg2Zs5{ZXFbsu4D~jMqoCEYPLyE?kL_~H=i;PsT5D%~DVW9zK4em)tL&y$TW znyCOz&+G|a*QXOT&7$7s89vS8uE}QRFSfEI!IvlJnRa(HUX5*Aw^Az{w}?YyE~0=K zX#aoc;jGy+5iqb<25UB0=82tY7He0~(5RF|0FiCBSmbfoDqAVl_Yz$Ng(44k(MSu7 z3yFo%J)gj9MCBSKL4^)6mCWM>D`nSgP5Q_*?gdMWAvTNU;?hh8vu0Lm_0mY7))P}$6yu`r`qC*QDGG^) zAuUi)Y?SA46z7_6NIjyacw$Q3rVX0fV{Js}g07WsF!7nPh}R2BI4D90`{Lpsp@7^eE^5BL%#{ z5K9NXF&N_oKB}B8A*Kxr#HcainMs-H%T-z}l&Ew!=ok?$X*vI``}m+p4i`0AeGq@3 z>JE@z98>QCJX^tLu=*ZY6dmilK_bc5Ci10$`_7UJ=5LB>T9YtT#;w`1|ED#>Wj2kb z#Hsy}73X1L!s_(usJh~|Cp$1lGDM+K#>R*KaLFJlmnwFo{2HBNd`a!Ye;d@f0?5)0 zQO%mup+RaeknjFz^AfyLrLi8BjP80KECJHe#H||I{p8FEROE_=y6427E|ryh+e~r~ zLlx7S_-{)STRo}cVE5ZSTc}Meu+OCVG=mm-|6h*W{fn1}2ebn?i5l^sUt|A2%a%i;t_U=%x#hI+ zfH2}gliYFhliX{ybz(HI6lQH?!6Wy8HGL?pU6%;+3h5-LYLQQcA<9uO^7SEcEb7Ek zb-5qan3pJmV>-hBE#M(U|Dm&PtEU%vvQRTF{GUi5!Df+rJ=0WI4$dT)vM&!~4p ze{+e>Et0fy_yu(&;E&8kOYahu0m6LEc!_&p(#1dGSjH=gk~d-UU%r8SpABUAho}Fs z`mVx3W?!H~jMk89l6#0HxHH($PC%sg*}sY_qpuiHhurs7^8G3;S){34G^t;-NneqL zw9^yLNCxg#jcl6U{U>fY?BBnZm+T#xIU?Qge`)7EK9xbJ^Yztb2A3NXZm(kB*zWs; z81)92!<`<~A59BeyPN;bGIG>!XDu9=Pwz?{6aA9 zUqnE*4$Ytb^=(ZONuH!VBzpGo>@Xyi{vqWiEh+)bC}ZW{>5Z> z<(68?wXL=rSF8uEeX`0(cLeNphFBSxs8Kskjz2e$R{*j@m;L5!oCB>r+j$f4rf}hJ zp508`L+eyvm2*mmGbUg_bGUSwmne?oZ$M&NBv41B`>Tk2pHcL@t|0kee}TPzZ3Vl% zy*ylPdR}a8xc+DmiIOB7p*`@r9-$1I;{Pp^cyqU;xAPkqK|Ce!R&K;n(uX{jk`g8j zRO1t}cIH8^ETujNH~d%85Xon1imEvMkxurZ@DJh9HBjU0^06{@Cj#R>=Z*}l%FMwk z07F~!h8J5MUYIIH*KLxUNs)xe(J~+rM*-=94!vPB(>){yPL05k2>x%vYfiE*th+yl zOTM154qQU^z#t%qs;kqn6Sf32ZF;~FdEcURK3_{ylu6Drql^L+3ja;vq{4_%8^D2o zd(^{uK_TG5hrPkOvyEKr?#XK+e6=1($sKrGSU?!f#VIkupUkISwF)=Km{4x%&1*HomB1YPo%pY zWV+6?CtqlDg+!!am#3}Z6%b6a6bDW2_{F+Dmyf;y;!yHL*w{{S7cTJ-?fWdurw9Tw z1153TfMiWGZ3*8t2JJtE)jnxBX&l=!HJX<@Wy!zCA9(~KmScSH3y3UWU4Hk8R7*Xr ziYH+>g!YD5l(|O=WVzA;Tl+DW?grR`ukh$S@_dQj*p}wbM+M}G7p829eLqQZniZGp zWWCOA#4Z!L_)WwcmTNm;n1a{SQPHpaHQNQ%#IEP-@VM$-h1@jnm%F83yLsWmT3m>c z_>)ane$YvKr#+E)9P=*8NJ=0Ua^~1oa>cZIy#Q_yi0qHXg0MFe0I%1@E)=KZc&TI zA{9oBG&3~9m{t_wKdJ_{mL#5*My~L$K{CyLN{MhMxCbqUw(>!Y~=oS)xnl z+c|YVqDH_Ak#FT{Z@&@wpOY}8h;MPW3Z(U{r3YB~AM#Y$yOqJoTB$a)#lu9wY^bk` z1%;$Yj>gPR)G2z2ear{>w+YSyE(y$)QHS1oNW>+qZ>Tu#=x*wysmN*PzS3N%nRFo6 z5@UcbEoct1_EAD0fwoz;fpv~QS6c>fhw0h5PuLYn+D3oP$h4r94*l#>WF%osn&AC= zMzY+_cd>*nvZ$5V&Yv(0LGmbYI#SKV~+weqRQh439NT6`>3Tx@XU zI&rdu(WK>yLgQQ80Rt^NsEp(hT!lA^f z2gpLhnK389b@?;`;o9KDEM&DEgD@kZ^UwgT##VHy1QeljsL;=#rH?HpoO3YrMq$dE3QigB3+>(kl1$j#3iyiV}fW&EBPgN}&iB+T*Dp1z39Q zL=DMMG8)&dJQvBO{e=*)V8MVcqhq)f25nqtE&Hu&3&bZ|xFi0Xc17(N7$ep9sa{z7 z54zhCH{2+MK@y-4#{HA7>!fSyKXhTX$PM2&Pa2jI$BwuWkVK;*X_2$2qh5GQlamPU zB6ICqFvF1S`U;aPuT;(+w{BS|43Bd@r~tard!!p`6bGMrT2_{Ns5hwn{R>X!K4@p5 z&h+Z>;HfbV$uL|RpxO0@_Xqc}bp^#$MSJ|luDi9T!QCeS&!mq4>e>6IYW5+~j(7yE_dkKYD#;YI~>@t;61^K}k zIYh=-hAuo-epTtB{=@!cQs8wyypan#=G-QwRNT8kV(SZ^KN`}7nGTO80LS+4Uj&A-UI9;w7GVQMyUkLDw~g8}fPZqGnf4 zy&^prNajl@lh9K4>lZ(uW0c51pxCI059j#T z(+Obyzy(@cl;jWPCHD)Z9U}|il^SOIyg7HhiD@shd7`@X&R<-Rn5Fs{D8R;&1H8X@ za>a#cqKp3K+I{B0V>`W!D93}ztucl!Wz-~Dm4IqNtGKO860S&2j1T6w(Sc0L``dI44)Fz!T`H&X&- z+yyp{%)3lnpVq*mT(e1k410qp;T{{c@G9w@A{FiTKTE#1Y~yWzKOWRvX2rSO;{fsKmzO(B>>F*`~|~Dqg|V7O<_?{ zaux~AHaH+;&{&iWLnJ*5nFnC8=;5yeve#>#*RpLudv#^Rs~rL+5qWM-IUn$U3Pn_5 zMo^@IArj1k50h_)@`UiU6U8umP^f%zyP1IB_z8nzGA1S8HmKjou1!LHcZ+xlN^PU} z+C@F`!{5vlCP#}s&U$->!J}MorFmdVI6Mm`2u9p`dHA7*(lb8$f7pA^s3zNWYjjz` z0!ZjxLP9SRq&J1okzS;$1d-kZ4D}UI2tD*rl_E%$7CMMvAXGtWXew2DZ=rk-uJx|7 z_Zs_rWB)$m{Dm>#x!ZNmYtDIHccKiP-AV^r3(kS)5qn3R&EI((Tq)+GWfmP)uqT0> zC4+jL&a$1ma;qQ&DI4Stf5qxcca+~j>nXGlY;?lVFHtFCBv;?L3 zJOMfnY&-8-pC?!1Kqti$Fi(arU=P<50vYgY$LE5<9}j^L4Rmq{yr0Ch?mjzZ-wOif zYwqlBTvC`Bg(82ve3b@*Zu~ekn;uAQe}%z0<2NcmB|XdDEbKLH_Odjh=$;4JcdlLi zI^{K?p;tx8pfSfvSzWwv@bsBuL}#_3AG$7nCD@#+cp)WL1)a?whY{AW@eVgoW)%dR zq5gt`lZItzmEg>`9vv(ac&~@`a9_?bzr{LC8e%l70Dy(>KtcmK1%T-n;*p3h?!9?b zMP^vf%xj0y(vL5mC||v&2qqrmEcurXif1Kja?b7+kG;}Sx<>W8n_Go&ahWS~$e9+Czm!W#&kZG zHxF0g3w3{kOQCMKL?>wedDJSfBt(F+bZ%=R?P$}^bR@F|k&o{j^SJ5Kaw;ZwymSRH z?L|rYX9*|fZ3T(D_cuO|asCj2kpHQEOxoesqC8y3eYmSJY-LA`aoPAhw=lojq2ZMt zHpHUhwzKfa*K9%hmP>!kgI|4@6Fd0v!Zb1eD(;&bPzFcg*&41<*3QMuY=>@IMO@}Z zuG_q%vDR=Xs=pm6KN!)&=_E?<7n*o@fdKTVKOOTMFvDSb_t7tOz=A0?ZyWU!gwVB+ znc&GF9Re39&d~SvTrJW;>vaVPpg?rZm0;VhcX)u^_6jw}@uk1)hKalRw)`r$9vGtL zTS9oDyxoaSQhq5KtRi}omPhJwiSY#tSWi~vR2Y?)n+d*Q(>s5&|JhQwX5)4j-{Hwl zzace@vNYi6_jSESP2HE7Y^XWe(9J&S4Q?I{0{g(1X5!)9g;Ydk2P@y}-1bD-%!sBszeQ1yKy%=${++5w46Hk@9 zUMtS#EdeYuFZNWWb7kfId`QnY$2AGt+1e1Tbg8`M!~%roSFXR!Z#9Z@r2zC+->kRT z)uwDXOLh{1ns#Sgh4EPBmWP&Cb6!Nh&bBYh9+XNj_cF>G8eo-2DcH~MWGF|k7LEi4 z%1&qF7I+Ql_m^dJz-+0g2!3&(eDOP}HMNfuSmsGGewPh7YZQ;G>CJ^EYaUudeLX~^gedlPlF}i1Cn)j zkyPG73(ll}=B}7D`vc8!_#>Di0sg#d9!Ne(oQqiuAD4c!XLP@Ha}!AT#&=-<-ee(@ z0^mf{n9bgiZg$A&h(IHv$e!=vqwM!68Js}t#je^e_;qrm3NT*lT;P{#RYFAqP$+2Z zH0%V5{_1R;1=p^Wy5NeWsqvks3fX}ugXh(jaBjeewcO~7f>W;i9&fE619@r^K66{= zN<6Jj`g8whfyY|{1#my||M-QZOXkIHwsSvrqLmU0(ZH_kA^cC@vFCgA%I)Ys#rinHIL9^jS$y2>~d%uqKIRshxQEY40q z^+3@*#eGDpSScF^^~K|>=!tcej1U>p;*t9o>rbHqiD+*PCfNcmXM$3NOW#*1IHc&F zAo-~CbUTnyzMB)|!0)h4cPg{#Pj__1wD)<-e{w8p^pZyR@V10m$y;i$!{nuAw_R318zvXT#B`TP>nAqXHB>EJ)lS!*iKkq zk5~;}+#I*f`Zx=Aya9Goqb@Hieg3YduE&=MxM^jVHo)1C0wM1m<@+A{ZVhBF z@43svko$}7HVztSfWx8En{>6D`7^cyeF$-p&R4iRz0Uln$!*J|a1NE|f)W8G9ITyx z{3Z~UTH3j?{DR7wkJV34HUiJhwtye~n270nX1&ZC>0eiDjX8J>iN}V0;-u_ENOuc0 zA1Wx(i4edY&F6D6*xWZy{q*y(348EY(|>ZlnZR>drHW?>|FYV|`nF;k4WO|MSzH-5 zFg_H&=bJ1=hKprmhj*M{{;b>M$4ZpM5*Q{CPK#;8!-UvFYGD`S2N&I4PFg~M{ZTKT zC!UXfB6R72-_~Fa8}V4sdEEJPBaIZ7dZ? z-8}#NTGiTec9MByFpwi&QbmI5VU#Yw9XzvK1Qym@VoDypDu4L$z%jv-+j#Z}9X4Le zu-KQOuiMcBq!Wsq#I24PH+u~TPFzIk#Qr|>E#tf=D!&_XWyKGJv!nnDm!DSs53fpZ%Ak zlAWb(3ulPQjTZ@>r0g-kgsbX)Hn~a^fQ_HK3$lMD+c6dYld1)v5a$PhSx8!m$$L>FzKI>ELNc>EAOA*UrEo2#vvxuiyDI*U)xzG{~3hT#UNTuNaIy)jE;KR zFBtw%5QCd%&&m`)dELqiK&^9(`0aWve0b2wp?qB{8<5^6kO0$^ZJ4@1caiC zb2}H{$|AsABwD^0tGV6_=4YEI^44|Uy_aC?mS2fnjycfoWl(she z6E|^8F5C>=vBhUQSy^tP9-2=f6&;pA-iT6A%>`jPe4V3A)%GIFwVV^LiF@E^l)>K# zFc`lI-UWiRjK1K|)o>JO9t7gXuPH`BMa4@!VFZ z#?}iPx%|D`b1bsqF+k3cbPPQg0GrhP+$XgLr()u(RmB*OTpCLtq8u$U>f`b)Nnv6O zjRN`4rly--5uG|t;^rikV54e(2ij9Gke;9J{5j)*mq={@mP zg|xf#>eDXKf{KP7)1an1jdr1g=yccneUE>S|8LDjz_ zT}x~UztUYQJ>$c_g|?b!e@a_;3R808lmOA@&;%}0xYn22X2@r4I@M;sbfz%*OJSOjnUW7aF$8BWF?e$p& zEgQpx2UJ>8wLK^5GvarpI_{W7ohUrY*wtnfgW58%jI-Ggq`G`J*#j%i+4$svFS8d z&pz%TN>tlj8R0rMB}FiL`nK63FJql2-coUC6}+$nJ4tFF;&RF4{k+M$%_Q+pW17NR z1Mm`iI5Qww)ZUurLy~3QY)3I!ewGZ_Kzgnq2RZabNY3)nQPa<2OL0sz4 z(H7D7UU|0Ux&8|YeuczSJeRnegdHq3Y473s9>wNfa@i9bhehmA+h@sg37>gJxzkR?s?d*1oc{{Yf5KYG-v!@F ziA`KHRp!Bea3r8sI$v_K?vxsMRAdEP_7$(?54kkj2h;r2&sPb7c0 zWazzmKLhU}kHoz_@vBape{+j&^NhH`hf-gz40q%#0|=*?SN)rnM7rQL=-!jx?~22fr@uabuA(ym2+Se;oDO! zS0b=|)70!b{{vj~>8trfW7oB=OYN^DYVxjTdQ*JHos#@p7@AMET9>%4Ng@At!vAM6 z|E_l9>3Zzs@=xvf0%m4wUFZZOX`MFzbH80rahpd1L#9M$u4fJMx8drpm6!u0{_ ziSPcZZVYjMO}~sq>LqDMG>g8OL8g@F&3x@FqoWu$`O?@$+rD(&`ANX+bP)wXbr5h0 z=`*nVscD=S>hsxrvN`c>?DqpSyc!_68 zomK!4WS#aum@4Y-!i$YKMf@V)s=W;CJKYGy3p8uD|)4*E&wNowa=Sw<(l)Hw>)w$O@L;nshWdr zZ^)l)23zQebS2m421s%u$y!&v_PSRFm3cZOI=DyeR~{{%D4R8(pZOXl?@lb0-Idpv z6Gji`IwGCtp7OH(otythR9>v3V8>lsy21TcRmYZN_TTGim{BivRa25o*tE zislbjZ~WY>i{ak^a7qsFcl4OYJZuC29hmx$aSDh;F0^k8OQ!^4G0Vl z;G`)2H`X=)G65v~|H_bfv7vGg|Nl?@U(^Nte_&QiTjk4GTIEYwaYDu~=jyhO*IWf> zYn~WRH?E-){L*$ZxCFxPMK{{LEa%SFp?^5h{^I zG(*GES3FAFA`KblM#~wyeNl!P05QS^$irTny=3P!MmBXYl9{o;u85EF4ef|msYJL@ z7WZ+zc^ow!7;HEmBBT1B^>CeQcifcQ8Gl-a-S3+>nXhB&nLjDBsk;fgF5eVz$0}bxw>~PXPEth?#I`kk9T}7WW6G}*m)GAyV=z+ z=DjSy{Ay3u5NjxT-DrN2%)NB?;c{&KepUbpVD@`Cv+QAmFz?xMRvu{w{+CB4#g!s2 zN`=m~TEHq6=Q8cuekaHICwW`~Ly?a;B|}k@ab@|A-gRt-c3WRJ!ep@?>ADiW^b{p0SMvjp;!bzMq4yob_G1zx1MC}$8ioJr;KWVD=VEZ~qA%TgcY zL}JxJzIIfaBmT))+UVW9|GJ9xBRj4@RPY4NaVjR$y~ibLix-=Ee)ipOb(^X89@@70v_5mLaC zJm@Jn-Bqf=Fm`H6yKiYy)Hfa|Kyqz#^!*Q<6bbv1E~QVdi-*zSyNyq!OTWi2O&!;? z5rPNCyaJ*a=|Y9w80m_(S)mA-eXIU~k?(7xQ~S5HCnyn^iF3=noJ*;o~q38snnk<`#ecd(RPfq zcq`PmKC-a+&W7kbiaV!a5Mjud$ETb&UzPIQ_-oY5W@<0RB=Mi@XW6~=0gRg`(Ak0+ z{SrP#d8E=_xEn=M{z3`NNfdlb{ES{2{C*|FcNZwD7-yrh*(#vW!8ANt6*&u-D& zkg}OPgX-g|HI#v8tK+lUKVZvASxwhpD%pv8*$f{-OQzm5nVR70^pX^jS~jP&C{^%B zH5RC$zs=Cjp0rj`09!+Wib;d9XB`Yl6#C+^^s3f3I>hY!>C&)ESad_{limAE#2NN^ zfm>F=vRux;HjgM+7IAi365=7T4L<<~=DW zRS#UQmSu((1FO7ke-mq^*^yb~kVp+tz)(WFOqPU4S9n#=ufGyi;1R__hYoQIf1W= zSV4WGmaqStyW33}f}vb@a?HAQR={d~+25`5$(3fQnhwEn2@mPtD)98{p1c$CwnP3~ z*V6GCUsbrUKSR60f1TFPK|1?EEKc*xF1{vTPCn)s8t|cBc#7L#h|9cTs>z=a^-h{+ zA6RSJNoM1}Pi&peUxq$XV||!;3rBAf71emwmGqcMKck)0zu)SsvNJSkTD{@rbYg|B z&ppe}WeF^e#%2(Htd86#rZCKU<^$(H`49hR`Qrqm=4yl1L@=WuoVvLX&{*Wr!x!I{ zY`O!cqSc2TtcOBOR4p4w%x(!JEUtie9-8VSv}9apYGSjYS>$_sI`XWCF(vr&T8^(VQe7w z)_y!2Yh08ef3fl%DT{dn>my^IbFO6rn><-Xbj#E%9+1oXM*of)i|UT`m3gQXMmY7M z-M8p(J!7g#&No+ELI>=z9fFn%whG?kAor)(Btxx-9(PKFJ;`@vjaPZ>d^0lR*Qe++ zhk$eA`@hHalwQa0)o5qWVQ{Iy`f(aLB>l(oUBy)Y40w}8c4lxnGt=3=2ws_jSnwK* zadlHk-ma4$2N^DpDplCOm?#&u9IrSw?udl_QPWtj_GMwN z8q?t6&64hC)~j+}S42*NbW7|FhtFE1_7+Ev;!o>FUD*(ha>d!HG>yll1eo4(ZN^nj zFvj-0PYhk0$;%<*7A$22&c%C?8Sl_D?c6U* z7)#X&mbXz{RKs4u6?=JkGV{+AV)9gz%Ksr5`s~w6Me}@iYqP_60^ae&Wu>ou<2)$2 zUR2*f=-jc8;N=;E-5+|~@K<9#p{ROdGN8yW_r<9s=)lSgt)GL}GZ z592TIS06WezTjwtHxiV zO~9}&Ai)0W|M&>Ce9}@-!CSC6x?j>JPRBffNU%<@NpLH($;}P|Iy*8|-ezN-2`}s0 z(9O3X-fKYj<2s$OX=FT z+_Uf4dQVf)0Lmt5Y>;m<)^($hJqG8&bb9~#?$Tcg^E*|Z)i<)5>c3Ab+hrA#?3mu` z&l;vbF-lje5Mwd@xw}B!7|5?3g{f+Lfa`gjmGGzI#`fKNfmoQU5?XUE4Y=57+$W4{$t9=x3GiG%HAH+e<)-A-d>MKX z+~pEp8O!T;ro?{%jaW~-=UYw>Ce(TL1-NQRzJSofhJYBhfx zexK~}Bx!Wj2|H}a90Fxv`W*~UqnfMRZGCZ`@rk$pLA1-XFY75ul#!VZt+VfPwAPfT zl-Ei`v;{F+kxpF3$|}n^_*h6(abFm15nE&brLV&@@ zYk#to#d-14D?q-62i!um%F;JwtFy{pCuK0^IMqCVF`84CUGlAoA+;)CfPI5)t^d4` zwSi(e^02oX?E#^k=*xS0Ft3?RS3ura&dR!9B9Nswd#A-l5-5TCd;#fpe2mzQM+&#@ zwEosj21->b2tc_Zl9;@@qE#X}QQyE}n)x*Tn7!!kO=MZA5ooq_6y`$N6+Zh)gK6HM z!yo;^p(1nN(xT{~Z6p)`wXDd^l+oJ%r@-KX(|GpAk!rle#^U}-rJopgjvQO=8~)EL z4Wqv*jlFo?H0BtuC?2~dIqte=w(SHC%3CKSsXo(A0vrbdZV)b37nbRe{{B_>Sg&*_ zS5u=RpxY~0=2IDuv~(MYEv5$uJv8WSU>sxv8e7xl0``6_ z5|vC@E%h4g{YqnT!u1*S(3tJxWLxi94ky1E*$O4K5Etf7L-btfhE)t;76;H#%nEb{+!A&OBj02D#AdrSgnTn(;@tTYR8uEiZK-)UQ5Yf+xzzNP=W5$(7yac z7R6p>_>Y3yh{Y_O!EZ^Ybf0Wyt~pM7kk>Wz-L;)iD~>dYK+pQ?ex=3toRMsG?Dc=$ zxy(`J(f_m`CB6!Z?bK9;^HBaZH(&xWqp2YWWcIe##xwehzZ**s+{j)*(0Es^S)E37 zQOW(#M>-$_tA0vXHi?@JF*Dz~0<>?q=rz9%u0jkMYKB1Vaw4mDzn7fr?2eG4dX03O z;M4K5g+kb~l#Xwo3(c=Es4%2Wb+|m6XdNxz`wX3u^OS1y8##gr=F@#F@z5@@_j$K=NC>=Xz+c5 z_*>>Zt1rb$A-29pk~zX@ag_U`3WV2~rT`8}tNcQ8TJ5$Xl6wn4GkPElyn}$`QN+t% zS9c!7ec>O`g?#@l-N=-L>(^Tmth_t;bgE>$THVYyy3;{{ux`lP5{aC~P&-Q3C?}Uz zm5Yx|3(Q(*;>-!-fd&+{Wb2WvR|p(;?dwgAiKX*nGm6_U0_8Esdj%!=+2P12K+E{A zz2S|*YOCF&YuXlrd91I`?j}(;dh_=SLT=Q=JHo*Y%|d1w#N4%*hlZIaP5T8Zpb2KS zknV<&!U7ZU9aN*Ws&pR+od881zqF5J3`~S&8rbB9UN2Y^YmsSC$*`?bdvyn|uZoqk7oL`DRAb6wv0sG`f#z&?B)Zd0P+r@ImHYLPQ{Kt@4#=>0xkIux*GnOB zC1^LzgE;rZ-g#V66RiUS!hmWK7aml!DX$g|E#uN+H; zBfv8AYb{~^`O|u!)K2vv!Q9_A^g*L8=&;CHu%6nK9Wh#RT0cbJvy!XE0sN- zM*OK7fyX~~>pqq@W4-1wQM)P1nr@HV^0`5aqkqFhIW};;1!qOkm{Ir|>2zQi0|3%jEtNiQ*^9Tz{sqZH} z!U4e&rl7eFs#?;Sd3iQ7JX`$EO?8qnlB{DE9q1Jm=fez!Pr0&SGzo(K7fE14^%>qP zNu8MrrKt!;FpSNAn1~EEJSq*_9ae{!Wx|Zjj0eVE@~WA6qsUjZtbC<#{qCA$uKW+@ zlXtc)AGMBDejM|fhQ!;=S};SE?aCsfwj?_zyzh*#q4YQMPQ`c6Qp!PH#a&Pe@*PI; z|6UMT40z@b-==)&Cy6nJ=Oi;_-xV=cialN$5`z-xrv>7dUyz4054PhYz#Bm+n)!UG845)jb=B5p3UK5W{5#v)Eh{Xpy zngkUCiBUNSpW7>5z28ZH1DC?!6z9n^n2VI1D%$@e>!b)@U=S!;GlU6UPViiQJhmX8 zpdaHw`<7=*B_spL;|f(M@mRL{%cwip>K}1&zG7>#`LO~d;_Cr4OkU~AM$@|dhrg3DmpaYnJ1>I_Dz?}iEtDqm|Tt{HMue=Jr1v+8$$9~R~SG<&~Fnuz2;JbRS}&C zb#O%(wU;!iQ{n@HN1!F_se5w*$C@xW3snH=^cXf$^y5?3#ph;#r}3{C10Q6-Y%!^$ z9CW*GA{^FFY3J9U^RwUvI2V4yrY8!WG14aDePB#~Oi5)egMx_4vO=9 ziQ-bG!;>p<5T8`V1;lL||N5#dx_KZ!9^h?X<>Bxq_I_aQ+KrA6-|@Wa3FY(e`0xnj zhCB-Jc33e4Jqo#*)QK3hrA)n@p!uQ6Ip*7fg2L3)v)%6|kQI?)z9g>Rff0*H(3j#7 z3--4<&%+Q~ykJav=nk$-g%&xBPri>N1fBbJU<1;j;?6b>lmLP!scKed*y>7kd3~JeFYPkZa`h`f7o5!NXxZa!iqFW{{O{}wi ztw(+$;JB{>Uex%Jx7T=&)~BqI_U1NziN&62!dOuCTpfGdMZKVVW@i9w8XVB{v3Dw5 zvhC)gGRLbvICE!s>v)NS>%%lF6OnE1Fmo@YZ4&6BFy#H}2TDv2wlvqVr~u)eDxcHq z9TADC8h!}rMzyS^;H8oSG}6`~H5fO`Ti4xt>Q29`b-CdRHEkhTa)HtFGfpf)hIK*A zM3-*tKRLCUXJ?K!KY#@w6$2BmUR5gcIy0#@K5qsrDWO1Fs&)UCc*0f4U{aVgI7#j0 zJ!s6DWRZ(W?T#VI$i^!rC5di=JM}}#--V5Ffw2{PFK`w_as{t5H%EcmV)$JTgfL~T z%%t@DX!{7DXLnea&Yr3z&9z4mu@V+u*znJyv65=186Z#pk?5Ux^?7m_KZKjlfK}tt zG7FR&waU2!!`NwPaiKM13(WgYOizP`Pb948PFM@%5ttr#Bd|4T)xXK zJ6zbEMVTaJq>+=p^}BhIn4C@vm@_YU9PMD=m*1a}vn)<8?|Owo`7dGfMLYW2TAmJDN~SD`pf02%UcfWftm~@_9@Bs=3O(s9FpPhOg?Hk%z0=#N*TR=*Btq0-CR_ zc~(+;aP@#fX?U}S^0#J2e*RU$T&c?WD{fc+m=Bckx_myHxrVU3gFU5N-=iP+4{TDIaETgGJRcEk{5 zFeZ^d4?ErX$tn@rpJz@P)GWba_Hr5B-7Hr&oHwr<^v$9E6C^H{{iX>DhI`CVk?4`Y zOX)bb=OVO(Zp>wcg6-rrEGz$-HE)(l7E_g5+{r5eD=;{n8Zm8*edfWI4=7a>e|n=n z6o-68{KhGY4I(vrOAnOA30oOaV&{(5;4n$6F7z6YaSsp=f}RtfSSdraT(m9>s*UwZ zDw$c`+WHFPc`O)p&iEw+2-B0|i&5c#Weh<*89lg8PmU)oi4wrWP-bcBnQTJReG?fA zeI^ix;nHj5^X=2H=P=7{a{|<1^09WIg=yMuiTnNWyQ^MabbVP6l+b~F!?J*w?O~BSoAK;>molM&E(B3woKgQO2S>%@*5BUWT3l3Qinis-wy!IF;FEmR zE=+Uj(elj4;Uac3HuW?(|HQ|{%7Y;@iI@BS%;jo2>VLsd;zrw1<<)ucN~u>1vdrJ^ zTvBg`D1s}g@l_rPpK{sv@D8}7l%NE5c_!pAZJeovw|&g841UEuddQ|!v8!g(Z3H{@ zp^lsk9X%(5XG_k^$9;bQevKNdRyp?m(vGju*<5bml}e{X_R1xF*q|IH+q8Ch@O?e@SL!DOy0ppOuglm52t)ooT9QioJ z0ejCFXw8aOx95>xMvU_kn+r(g(0X|qU&N-UmIdB0aR6K zwj$~i$F}&N`|cRu7@8mAggEkRg=c_Bbu4jq=8KBeWkI=DQx zQ!hB%Nt>t_%Fd+5bikvg;*#IwY=$W`7bxRB*yD!^@(AV@HHY`?I)E`v=cY`NXB)K! zwJaYZU&Dn(VqWXkutW&!I8n7e>K5wkSMMrENy7GFSiNRpS|5vOYqy3iv6!u?XXz`A zdG;IZ7Uhs>;}RiYaZSCSzwhCXmkdK-3lPuInhVM}9W0w+c2-HtLcqDuie^*kyzoeh zzraM-4Ahf;C!oQtdYHYe+edYpcJ-dJ_V~qEbC=<5JYHV3O_2M<_Gv44U z(Zn>I@29#p$l+5YtVnXrF*!4)>lpy+^`G2{U=;J0g<9yFETAdm%~PURTjs8+IRX71 zn_HzJCgxV8b2c79HW1;sxC}}?&qZ(UzX&xAq+uw|^$~bAa&>jME56eZ9FJ<}82AXk zPNj>#^IcA|n#I#TK{2BvPv&&;6r+3i8N2pw$OU@VSstUU(O$8(Tt})fdQdjHY#I?E z4SS?p0ox+1S7V*TYPeg~bD`sU`d3+gdVPrs69o^cn&nP`nSLwiy!JU&T=V=LubU5_ z^#QUA2HEM_P;1gfZ7xYOI2!6Hht5Dbd!>OUh%Ck ztP+2S5U>lSJnc0ZA&R0kud?I0cKBvJ^1~3_w%v`&Kjv_V#tNHiP}fzYAPfWN7KuRA zM5uD^oWB{$k4lUzgTHIYj{2I%^2XZjp;<(7RXMtjcsL~HhjbQiZm6=VKGO2w8jP>1 z4yiiQHcV9Ydg4^fD8gW7qA@7=Fq6qaZw`)0K8RlUSf0?x#yn9N)y*pEvLJO8$%ao4 z_k;dQ+g0pJaFVmR4N*ul4J4#bf3r*$Zc5wZnpbB-6dO!bxzGFOM_3XKnetc}ta+?( zt9w)9yuVW6)CsfTHk09>GDdF`IM?D1QgutG&%RXHkmm+gu=aZVF#k~Ae6?uhj|JA; z7(&$whrCzf+kJrummh)G*;fzK27_bP%AKG*AWXQzbNJl}UG5l3RfP0j7$U9%;D~r~ zONDgIY~}VsPOKv>jm7)Xyq6PIpMWbFVB^zNE}xsUNl4I+$U{ab$Z2RfncKE#bVJc^ z!K=L#cnGJ;uibX_f%`sdu8U0A-h?)fqflzBVfNYD*-k^l&@h#^C(yk$DIZ0=H(&PP>a0-YHYTcS z@(PypiAr_3Y&I5>xV=Ns_#p%)`u4*TqQ;?UE0tM10s+Gga$_Gf++nnx8h3Fi?sc8 z{q0}tv*JTCtW5<68VhZ5T(qYm$K1$ynUDIp5+)n=f)MxADqXWcy$-BmS&^uRDwfc= zi8kc|$!Ze>V!*Yio&}lnw})KWDUp~?cd#>~hw~7+Va2fIETWJFESF0BY5t)Ou)tha zV6|ZlQx(gWRuG|mve#$WS)~*VLUVbJ@_tR7s!^8(B^DPEH42VG61M0J`wD(vDTnn^ zd^D8=6obSc)}09O0r$s+F4Imi|099}xI-23vBw`>FOxgWVP>t)vJ%t&bcYW=79dRK zTe^HXHH;68ln7%L!?#^p^Q2VCZ7#7jwr<;cm3CtEIFwYwfkLTBOq~e-2f^1#z{^tI zSRsy7eNfJ>CQh?zJJG#_Xi__pZvh|?PW0=P|CBo=7gcf27??d$xQ|#JH zqmK9>AQ;0{(9B4Qzm3!$iISPgZo)oXP+hS)mg^8sa<_@h7{v@MPVAZ8`K^Xi!Ksg= zRK5R1p{Jdas%TePrHeCk(*AwF^!j#l?1B1=`m8p3WSI?9$Ovhh0hrhSnNv26Sa;P{2qNuEt0mNnyYRyl|xdf zXU+K$JsT7B}*8@cB`KuW(MtyO3J?H$2s!bZ556 zdIvT{rGR0WFloI^R$WIra+{UF7kU%VfKJcN4`t0~YAn@Mf`(j@^5#yw$z6ir{Jx65 zf^ppd^)hy+l3D|h=*C|Af2;9eB3X!L3;%|UFBgzBKxu&mP@f?cQQ<#!Q=1dt=eT;1 z>Z`nZmTt^;`}cR69UO(eUV5jP3+q_e>gI!<&HW&igMCvLCqvoqDrD_In~Y&gbZxF9 zkMTE~lGDT8T`eeW-qrq5g6QqjdTeFJ46Wj1{gdgCr>5f(u8xY?pq|{~do4GxZo^6y zRf%VWa!qI-zP7WLai9fGfZ>eWKUq=X=R73^VdyY_SciLQn4yc^EHlhIq{Nv8r3$bF z;Mfd8Lh(JI{KXmlF&%h%pdu;qb=XrtW(YfF&;|N{lf}P9zOa5DlheBDrub>n`?45Z z==-wjKvz#I(Nq%be9Uq?MuPM99it^^y||sn4-%=hh3@X229cnp-XQ>gtM(E%w&%r z<4wsh(l;9P+BOTtC@P$LJOKel!J(y*(*ZthO)`or+{v5%vad1jG)&|Y`$nfS^0lbW z--@ZItZ%I;&)nLvM-MYB8U}D#$e}+})-~Yjr9wFmmFoK%oD}&Pf6GBX+9opq_hnLv zC*}e^(>aQDa8>a{fI53W*=tP%9XY`>MooLSKN!_>|4Vsb(jaKY$ z4*u30!IXO6r3Cl=$9TO^wkOf~#nk(Tvx0#SiWD!uZNKGODD=E*{Z6H&Ldozu0YRJC z6PKvqYjF(rkN1Gr!@v!wRcO#~Ay{hey7HtJvt>s*%l<(mn`lk{|3r`_V@{oZ5r^5sDX#41;({4=4uE z9x2VSqLB+%iBC2|nrnuH$MS_fT#HsN7;!n85!Z^LV$D@qH5Sh{ORCU7=Iut;_+&+FC4CjtQ08}syvvTx{A-9ome zu*h6e^qU;O`ig|gQH)ro)xStEEra23Qu@E%6^oSgF9>^CTEhgv(gNX_=o=oP*@|UJ zrYyZ{p06U8U%Pp{ePZ{^F{H#I3|=BI7+PX$R3!Q&vgn6oeR67z;JVX5?j!$D;X=b2 zsnqJg8v;^d$`M*`s+K3=PEfwt_=9EpU8aGlW-pScjo_5Yyh1sYdpJ}w|XhBFVvmoi2<#RBagE45JZ#LQ9hIdTElHP2LPV^=Yi?*f+{jP+w zBpgILA5KR!j)QK@Z?VAQ3xOiZ)jVLX%Ae*UkjR||nR z!#R3EeUu%g;8v0k%VFio2ZXVKW`Y-5FV-!VYlE`2D1}_qf-DU2N^);CL9B+M|$?k*F2P1b?pU z`>P?j^B#$xy>gxen?}r}k9Lg8cn6UNx%UZxF=C72H%E%Fn0aZ+2Dp09?OQsFiD$t> zN78$ZfX{=MkU{xIBes5Sv54sYL)@*l%l6SvuaZPwqt8|7%*S-q^dyx^_T7Uxh*in0 z#XYX0b4O{Hw+D9}wQqdw8!oz65$NrU#JSn#W|{!v>1P6cW3z2Z;Vj?8NWwz-@tOPU za0SkdM`f1BBUN9l0v64mKV>U4C=EN3Ya8~{n5zV=ffZ$LIct3i3^ssgoqf)TIdF8j zHb|8-4h46N+fUSAD=y9KHc9}=YRoZHdiH=Y?6KoeSlRpJ3Q>Wg06F^0@IX!dbEqPA_Stlrp%yI?<5v2vStEEntcc`xHX9G4{j|9w+g);#W>!5)U-Ab zp0rl~?4b_7p8Ni@O0i76(8f}1Y>UJ0h6MNn3c|IWylWOBM)SLelh;H|eYtKwl{tPN z*bP0f@jedYpDP>ml!%kvYhmi+#aL{D=d);_#i*Hjl@KMZ4U3ctINp`S z%C0oKgIWlT$Ly-|@0qm)g-i{E7@8{7N4uK8KV0sLazQ{zB;NIv|6wggT`*o4EpRVS z%l}q4#SpPDzad$6gQ~`FTF6{~p4YJB&IS+Qt;~lvWOS4km3ys+rgBY~>vD3mqehn7+xXmN+OxEBeu z!EWB~+&}01%orIXJIUV9T5GN;+~b5CD+-LnS>}6yq%qb@O5&_xNuas%K;y|BwFIV< zdmjY{y-5e`E0r=N1wdFyev9A~VHmG3Q4+-)5`Yms&e$U96BYJRY05ev9aU0Ks^o8O zJ>^o+@jGW&km0OG786tI;_j4sm9`f@#f6zoP-;QJsPdWu&$=nj4B!v2#N|%<~ z>-7$`=j;9RjK+&9-a4lmm<}Hngeo2opJ_dLO;$GQ*ZW5;&kHS+ho%IqAHBRNiuUXyiYITg< zmPTv^WVu9N+0qjuo>Y1JIyV22a)&Ri4a4G5d;br&f|5ANC6GdLg(%f$@a2WpR5=7U z+4b4DDG9YU@|-wbELSifN+N!1pk~x;liu~=eIRt6Z#kHx4x-=_NL_7A37(KT~IXGwX?BiN1B4E*Cp+vm&$++&+6eNN~OmjfS zKUE<4A2GJ(jtT@Ub&$>bFLRsTh7L1hKd^T|54)IBkR1&9V@d;$l&0LbZ;>h^$(4r0 zn8q9|w`}7w?g)iVzXT8|I+2_mVhC3pj-!FHqWbB)E^Bmy-RiW{mWq?fdoZ2)mO06{ z(}vOMEdJv_V>!DPVk9Ug$6pG0;e6xunRUQ2PuF7nxM1#rm(H~2HsFa+PmtWf=*&E3 z1sQmWI=l6rWS8?;$o6ohv#&e`mIZk;fHu4l7u`zuvOim<4*awFNbi$&uTr%V>as;3 zAv%@Wgh@ykPfQEz1=o4TSOYX>(xAE1(?{Zxrk)w+O52l7QsahvNeuq8zSn8~NdHRRD zW9mrY^&arc%%teR`5z&cGvs_Y@!A;end);RYGQ4d%K_tqT;jL??)KkmN|w|fNVQ@b#$L|Ny&SP};VIVk38?ykfkSJYBS?R-jbKsE7V=`pZUIS(w|+(zXg;807zC38-A(t6^M zD{e^9gL1x^4KE=|Co4=NsUG1UXhs3C9Cx8lSiF1s;zRh#{CZ4`Fbm8yw&_H%{tWYz z&T@A_cEDiu9Q&c(MBa~w=w_E~FG_*df^PzojXnCQB1`@53uR;dm2Vnszpj-O9eam` zsGH>rDaECwL0OO2yu_coDTr5gvxJQH)Ucs8#xbwBmNXFr`oLs^`U}dV`Fz!N6@+(S z!bus%$m^m{#`v#haGTOYk5=E(g;BnwAxa4yqG~R}wC`pMW?5T(4Kk&aGJmdVmo!F; ztvR2UJluj}mM=D@*98o_0Di)16*-gH-FC!7=z`XCNKwoc6YRb-&V+Ei6c z`kDz9ORfaT@xCYqcMVaek#<}5 zOQ4wLpgM3RO{)ue@rQOzA+IFXVPbESCa()JUge_h2-#0T1v%eK{CZ^w5Q{xpcw$oz zTF!xx*P<@nJ4CA*Zmxn&ZIrwDev9%MdgU+6Ul`8!%q{n|2K`S-m>~QA+fH!dzlXvS zm=7=6-^BFk%ay$51M)E$xt6hMmqA(p9MIwq(WD^l{vf4mNI2V;%zGbWZpVMLENNzZ zi($5caeHd$48K2fAeDjA zNRjw1UBjwwvWGXTxSTG|IpqYuj;B;xTNN()hp_?%IPUtcYxKeB>V=$kHJXd@8c)x_ z2xyb>WeYmri^ZQA0M5!t|7dzzqs=8_@;MJ|;fdQ)X~ zRI@PpnlE+srjm8%`)>^Yop1lSW^*aXqhEMnWNC3&d1@XIQ0Ux%U1`yre&8Kr&U4mz@?BmK68*DsHsgq6!gYKRtr92IQ@6^rlSA!MH33P zfzABG{)@+KKqo(Oa}2EL4D%~Fuo9vscNkfon9ar(2q5$dOb>cM3hs}!nVD(Ci zu9vHl5mn;ieq3RoSf{{GTBgPbwBS|&W!?94)CljM*o44r*bQTQ`9g6N<%L5T?%-I*JSTUe01;o zDAkSf;so1mI>nuxZy1BFT~6w(`Ii@06|YTUE8}&`oz=!Q;wrbutF`0nP6+#OdScVl z>M_NXo$#qQK9`foxD}t*mj^sf=CNva`7azVet9!6f(#z$&Ctr57hU&jCJwVLzSeZ( zcx^A|WMZ8qE!su!2Q^T-D;6psVFiP+g?ZECW#O~b9tQMVjv~W8rs|-qi{;7L#=iPc zoVihkSF1SBvm`$IZ}_|B^%gaFy4D{}Zz%@9P{(+EzdC1xG)aH!T49dPQUnL;v9o;o zzHCd+#P4TfO%=)>ji+g-C_5_zL8iZxV2LsQWd4LWiC-Y{q&6Ou3+=Yl2?6( zA)8{H{fh_d>AyXf4^n4_sdYLU(LBU?Dj{JRKVmr&(op;7+Vk5E zU#05}2yIkC&bMgE*G*227_u7h(Ih)_ESa}liq|CU9#z9k*CJFv zW5oQ!@}&64PG4ZtAL&AuHa1rh>Q-(S(;|=Z4a@x>Z3G78{~S@Icw(H(OU|7~z)2QM z*Oh<1mh&=#x8wWwnmy&dQ)?ws0ReuHar;X@Dcin>l82LhkxCL!au(zXiMA5@4gKi` zFlEgEM5D)^jLa5rAZ5w`ZYxVuVH3(f3~75c4FJFKB>$+9#Mlc`FZ z9lbmK$2t|f$@*U}T&b<(w)6(v%*sEpFRkmK3(=l50vI2n85Xs^KO8EURjP(9e9oU* z=k>!cp0nJ}HuJw727OA#@Az~P6{&T6wt1*OM}>&`F{$)0$+}76GuQ9bcfF2FL#W%G z4yW~^B$4#vP~ZBU2F_eAV~S}FW9HUFJz1m-%{A+b70|J^vuxMIY^|rZm6d?f(YdVKWp%TCQJnurUxf*Czc$KGE`r1m3n+hL0JxRr_v|O_Ev~k({ANHs zWYY^5W-J@%Ak;}a6hW+Hb81{lp9;r5hdZnp5h}UgPgC)KH&xV4!F2>`V0uzqsT3Ya z(@;pm#^U(}RS~c*Iuis~Db2^w6~9VwW-lA)_d#+xTLeia7iz;)5HvP@fxk~N(UQdJ z6O@rDG%0`T{o-4U4>GPSR=PA4X#?xa^*=r78TX8}ui~Ls+#-IjN^jc>nNH~dBVP7k z3SKuo20=QHB?9$pj{%3+39avEJY0ewP3%_G{NrD?s->{0gN8zYf*?LB!iM~E8N{vJ z!$-A;ljAv&4AE8;b8+!Eqa)J#UeeQKPSjL;QFDl>J>R$BZTY_tHb}bHzRo+q=n#;eslEysX z{u!!3;WpXWNpgK*`ECk%d?n28%bi`Vx{5nH>9|(>9BjZdP?YYXzyIhvi)+9o52ch@Oo=p5I0m_in{k2H~VU=Nf`nzUTD`*pO zBqW_!dCW;w6+@Bw2lRO?a_uRwh+K2btj>o(=^WoJl zbrb7S%Zm{39pSLuvY+R-54Hj(#y9#;xG=OSpi1->c}hA?!dJAxDFz2sG-o~h(cD*U z3hFRds^?%*gMb`Mq7;&qD3F(v-Zcxyx~7KNWv}*2qGUWB|8XW*^xm6qh7a-pNVCdI z%cX`kw2D(8u$7QMzAQ>{v@*KeTn}yilNstOZw>piol?>b543G609|&mBPB}2GY{`8 zGKM@@@Qz=3k#!}k>231rq`An5`6GDqvY3DSok$TinwZ+&m-02uZNn^w_DH|A`XOJ9 zq{o5d1BCcp)kHD=`9G8Ihbf1HMwUGjQ(Vq7KSEhEZNa8K;OGEc)=l_AXLEMF&DvFGr#p*Qwy=CabY|1_UF4o}~iiIH6++Bjmha!^IW za6URGviEz+J)Lew>!4bGBy7d65KMta|3Lfi1kXKW3s)CUZ=gb{(Ro+K-9c$4nbM-r zxSZ;5k_=sw))W2wv$gtvaei({X|uOI*WKaP1uVcUU}j|#-;2-;)nuBf3i~m0{LXSw z0h&=i%iVwZo1ZGJd-@#hl%H3hnuXTJ>yXt{A6we4Zv|IP>RXd`tc+*Oq24DdMMq*J z;JL-4k^y?`No^qFr9l#O`Uha-+`CTNkq7+~)%|3(h9Yx7cR+p2hty708AD(e@*lAn z^nC9_!hK-F$*8Y>sz8zzq<2IjrMlP>i3o4H+xVdz;PcrlG)4RpnyM}?to%h z!}8X#R*Lc?b?Zp+o9B1e@FF}C2&Kthl z|M%ZFqBj4Im?)OrB^K5=={xG5RB4(Iw4a4M5y_{e^vnq@?kV#H8g8u0Xu?jjm3n*c zCoGfNn^kSD!XnNIPC=~*?&bqrIi>!>@m{+vCj zwsgI|ulhy?9n6>H(7(W|celiD3URvNNc!F1Oi^Th-5&}OE-s%i7tr1~m=sx$pkxCl zB_qdl;=;9TEW8bX#*=b+L_}f)&;6IPm#C_X`&irwwG)7;NP{9^Q=J5H&9WmrZS0%h z%BpG~3w2tQ42GUPLmgQDbtaSKsAMDwY6pH8d{uF55uP+7rQfUKG)~!M7)&Q zPdsWz+-oWJGu#t5d0(C|ciQONg*hd3^T7p(LE?G$_izLMH0aeXXXOf7@o%KM++_rd zQ_gF^@1yBFra`JVsp_n{z^qP7R}y4DE0?3^@q2cbA5@RZeoEJdV*u%@bIe%dg4vrn zInUxB-??wZ>(S6835XUXJ9r^EfjL_|4IuEHp#Au+v(;)YlL~&yVQf_LP6xGbWCytZ zIoR-hWBGAUK@c7bSNiwA9SW^y(>u`7=6^oFgc;QX`w*u~YTI)%HK_~d8FI3-J_D}P z8`oF94JgaOvF?*Js2!8`@}a2Q{}uIQWQ|J)d>6$p|n+S*d3ZV7F^z9p}35=(z5Ct?5x!}nC2 z!gvn)>vFj-N4c@r+#f>w9247v>WOtB@c=Q^QV<5HE>m6hZ*bB0diy+F^b!4y0u?Hg zjy{Soes^h#guI`d5%kl)MgG|KEbGyAN= zayRnh+gb%uZ-M+&HcWtVct=V{fuKg;^P*TabhmO#*Tu?osvgTOrK!V6=7^^P$axAO zc9jJI8|xB}^Y~}+CZ!1~!|nq728>;gSXA<$hMd*82IY*~2{emTX>$xmmo!W#QsNGW zY-(v-rqB^FOj^i}P4MiSw7$*&_dQW*MgA39icig^x@q`!Q5Igc(StUcuiU~rJ!0DE zqsPg8CyZ>)Es{KwrBTPTl-ux!tr^OlB~@j+hEtQ8^O!<5uSMKzk;)=B;Grn34f*&0 zL7iC^j$aN@dlUEb5aDHBZ(eZ0Fa{)t^YG>^(EV?+ zw7Ja}*~9~8K~;MKd?92geUQT*pw(qHxEv-MBh`k{E#Qv%vMlm;XBuK?%{a)21 z2USOsM(MXzogCA7br7wiNsedTgA+e{AXPWAWKS{yTTx7kNlkqFGg;i-8gn)@JOB(H z*hn2yqGU`7yX`XaKV{_f!eZ6RLSLQ=I<$ygpQB+)M@e6D+fqrF)aO6Va@g37wL+)`l{&y% zpvRlS3hX3F%D|w1JX12xG*L!rsjaHxMU01EGj)wgz#%(m+aBe+^X6Pjxe@>FXDWvE zRcP)*-T9nabp_B&o6%gj_+%0Di^I@cj_%b)HeiZqH5w+#(l=HzJLg__-}CzK8cAm* zd0LaAYjE`e|9m8({OCg410c2>)OtAE_?bTZKJtX4nY>20{F7QM&yn-q<`jkg>@~SL zEv=}(^t=nZ){w0fN3@!$F8F0rYxS6Z-Eyne!DN$$57O07wldq@xCQO*%MoWIj}Kl#!uYDbFzeYXn8-oJ)4&8b`^IoJb(pur(EtW~$2VdW3K zS+IcbQMQ>)Ac2!DQlU$}-{aW^`mE8Cs zeHE`lCWz+Pk`SOie`iI}YYMtgUQ5K9kb?1w6g{aAEzTz+QwXr|edPwAz&s;M4+0T)gz^i|;Ndfg$e>hnvP!Qu31 z&ZdQ5>MGFW07YQRo;DD%&2|v_WvY#IwvW3a{4dup#Vv{?t5htTl(&%jq)IzrY4jg2 zMK0A+t^~K8Vf6&-R6~Z`mGts+RpSWK6ynm2CxV+x6)&zz6@T`}<9BN();y7@QWz0U zM3>$vE$lx$tw;Y=T46w>kAJN(p-H{T;|$E{sXSiVU%h!`j?IK7zxbq(-}{8 z2?JL-&XpLvrk+w|4X zOSv>JW!+DK7eLOE!7z>O)#cD+@MTAOlna|z%AfLdON6=5j>Z%4ZU+gB0N>Nu#y#@q zTw9LF%g$j3sE*3d=|TPYL!dEPi^B|Updc{K!B>Bj%NAp*X%pGb#iThPk~9SuX0gSK zY(hOy>jAFwxj)+H2U8ycDV&|12-c^TUAIIbuXe@aO!yV^+%ZwW5@b)0Jp6J4WQIF4 zytj>P0;T==q#`hjzD8lsvSgf%AA{f7F@8(n(qCYT<&a@Y~X~>Nw7T z{441+;gxbZ0zJd+LsLWgk!bx>o2m_mzOk}KL<6QOVu>x*y(o{nge0E6+j2Zr65ch* zYKpLhk6}_qi<7hdPfe^Weu#ftx)=+~kTLua-J-sMuy`0_(=LLT0U%z0Xw0Y`ZNw^{ z%!FEG0k4RJ0js09VUR+dMTBtb9uz~6R_dW}T0vOXRvzHTjA?zyfU4?5T2UZL(YrP4 z`OIlSHp3cCt|buPWF`M`qJ&b~Xh~rQ$S;PBhdwryBK@{rjFk>ehlHi}9m&2DnsF*0 zq^X`}xDlgMT#6)rkJ(giiv^fV>J>TiP+S}0Wpn3}DmB6o@sl`fz& zRnmbZztnv&cSBjtYmhsIuHz4YoO?#*F^yVY7cIw9;Oy#i;P`XQK0Y_fC46f55^7Cs z&E*%j8R#~vC*!gdDo0W2qEp~6T}qqv!dNNU(lQdgdq}2DWzt7jxd}#q2eWbr$#$BR zA`#n-oNy&D`qiNm20a=)R~(%&Y4=TreZl}q^?Qy4cHhd65j z4bf6c(Jkb~ab>f|+->_hhSn<}u&$jPY3J2U2`g30;hKuF7csUv1SDpeQF?jY;d^HG zql`bR?LTJ}Uq&6pugT|SG9PBGKZCv+ZX0@|B9klss_DC$j~U#JOf9o)vTZu1Bk=+I z8m4zu3*3;Vhjan&h|0!gKe>cdmPe)C_4i+|kvf$oZLA-3eV(fTO*y_EifnRU$kV--L}%=a2-oVbiH?tFZ{$I|fYc-6EZ4aG?V(Bs<7CS|!@AJ=+|FUs zgwnEB(;Zpm7@(%3v(FAV1zJC4|4y`aBryv1wG&c49u8q6*|Il&JSdJ)z5r<=Y@b$Q zOy19JX<_&HSz}S3g{}0sB;Co8@~l8;;ORksg?fDIb@Jd5*|(!(L5G!^GDL`7)wPa+ zT#r)~L^~SViUR^|~WDkd8v^bDy zdyt!F>>4K4;x>cda6Dmf9%D7Er_+b)3RLCEvqn$K##!h0=wI>z67k;;MZK|wic}wq z=z*%~s20;}Be&<>FAqiQN`t*t6W^4MmD);UoH8D^G|WW)<4QXpj~Kks!04rI;0wZW zj2nRR3x2jdLcUQW&bfk<_U+3W z`oL>t>HSzqYHz1nxe|seSAjD4P)(s)N7FKOPR-!&4_Cd3t=u+@{?t_gLibwa!cGgb zJJ#_rr0l_ott{I}TFOm~4$rGw1Laj&;#PU!Y%mBdmCEAlkwP!r!R+Pkg z^l#o|jEf#IyrnL}spMV^vc(i_2Re^|(34cD4v>;jIqlXQ2qzgsw303eqQ|UPBmsIZ z7+D}^*PljWfMW&2U51Bn=m-QS6@8}f(`5VFImx-|MTg}*jrlKS<;>EUI-;29>E z+f*4FARqK5MN)heChvvdeUYZKy;rQjwk1g)s=#15Cmb6 z2snGNKb8DaTU~$kAoVbizX$+m4-ng&rm;fZfl(h8iR2(pN-HlhF;qPnMc2GhPXVg6 zji5KgDVdp>1AHM)^Pzpt1-_@f_7izjL~!uwjpV8K>1x;L^7d+$*hNljXaNlL7jCb_9PlnLOpd|ee6Dxxj$NF)5fIJr>#qI;b+`;6TH;E!k|>`OpI-WpwF!E z=kinC_YxiTA1Vs%BV2h)U->1!?kPC7)qfe>{%>>fU~R$dYxi9J2CA!A&0hNkLH{e8 zxy#}(-n>}EiZEGf9+3mjY1F}1xr`Lp^d%GwNUSyO^Q<4^3??+5oyN*yc)atvAuDb2 z2H&kcV)f$t;jL$Brr?p{k@}eFJCGyp0{~R=qN|TEv38bkGL2@`lBaDoBrmnzr)Twm z{){%Et+U}<6Gy>MCU6+VtC5#f2s>Bms-H13^fMn}arAZMb7xD#^!q=czu3cgumbTZ z1=>e{zJ$p=#uc)Q?pxUH<0DJcjV&npvN9GYeF%`P-qBnmL;ZTdZk-R-C!02I@$T-| z-(xdn*hx31e(n0@o__qB=6VVL%XssF_8>dNRFl<^QI+=1Ieud8&7KGzc@g0kPLEyd ztcxJ;GMdZ9?x|R`gCx@``3D|rvu2TmM1j~l+T8l%ziO&^ky7;@lvZMo!>zhA9wFn! z^5O-&8@k1Q`6%q8Cx==@2}@GZ z{X3;~uf1iz>dpMQ;wPNQqA$6T%QGkGlC6M224R z5~E@Y<7{G(Nc;S9EaEGwou|$*aKm6WUp?Gfjm;4}-Xf_n|IBD^GgWgL!#v;3xVzJ(GQ!hxQoUKy| z^tj@!K|8@xpGyLh$cp9S3Zdk$gdT+RQGI!<&`lP73*70Nwm^H&a$zyf_&NwnYbPyjEP-$4 z36ecV{I_BvyxcLneBUMhn|zvkuhE3zpMi=F`l+&3mnL)|$a=EKF~hyZT=&3)}dLaW1msQET?#vqk!CQi7d6Se-g!8W_ z-tcCuulBVk<+Ve5{&y1 zDqF&{3BN^xzG85mE(OlI>U)^2TQbV6;WsW4 zc^5SQi3k{2D97`ZO@Ka3kMc1*)2f!t>})W0D9=Z5 zq5hT56%WD3Rbe%hQ%Q-m9%H$n8j7C-Q=UoPiiLog>0jRX-R1=OT3Oh{CHo_=_{`>eAQM!?Z%`%tg$I6l&0~v^n;;6}rZBTcf5u^XeVTD1V4PXV3wTX!_T<% zekrt1yab}{UwosG{T2TQM%hat3kB2*S7EOYbCuv*|ObsKRiz?(ywPRVZu8u zs2>Ef#Fi9{%lzSQSsQ3U`lsD;8sg;FE0Racjz(NhRj{;+PUaRe;OIXEny>6gev&c@ zei6jd#b;EJmr)C(5ah5Qo9UtO!($FyC-`c53oOf;qfW$1?khrf$> z$!UfkG&gFA+YJFKOh z*F1Y=9&vH%zp5A&bcywBZpN?7h@pP#d=p8|ZXIUze1T=?PVhuo!ymuN2EM?wRjm`w za9a1?W0Q`KN3ugSrRoilYu#`ax}|q(<7K6=Kbj*(pPvnkX0it*spKW@)%T?=I|*y;AiGCs*Wg25ThL<@ctw`6Jpkdl|n6Lq8M{a zd-LUVBVzn;Sog8%@aK*-UjkB(e(FwXlk4HJcKr%ZGO5lpS-GZ{cMNG?>*%yFL%)797p6lQDq5n0ufU1BA9e zi>;Z5M&T@k1SR})-d)a7QLg$a%^sh0B-E02BCs}!r)w8NZK>BH<4#kBbt4V{@UXW(EnJUeG9|yv{5y*zefCJ}{E+>I7%Rij zKqDj2X&uNS>>GkRil1oyVrX=ftDNgdXD9n&oA}OlD#KX7F|oVNUyhTRjFGIxJ}WWb z{HOb8hk!F173i7wghAAfsDu}o5gf*2umEqsW?KpjbBpjDkwi4PUfc*BQVo8*2MlGN zjDtrc-)Yi?hj<*ZSy2y!Q+m{b@S!hVneAZC@^p8T)g`g2RWBwpI=%%CU%bMb9oNtF ze-Ug_`)seY*+@Mlr=FdI1x2Vv529J`@C8C6jI9>?fd%!ySfU|qj)LJ32qCu1n3jWsNjz_?Ak-~FI|^|K6w{8@Al@; z4pp3pigq>hKGGU?i}1=c9c@p0gjCsJuVE!fKkK=j9Qyt!>S#S+uC><=?S%s1?C`l^ zMX@&=`mc6)-Hk6GQ|~7oUN=$ro>Ux~^lV-IxNr;M@6CI`S_*INcMe{bE=rJct@27^&s~ zR*dn&-_O#G9!h(Z{?~@^9wAP@79~7Mboq9{0Ij-+Q5wR|tMhzG((Y1ynK3A&a(>CHB;Ne@wJjH0K0!w%j_{Y2^|P& zI14R2Ty(AcU>fzDpU7fHSnMjOV=p@gHr)PC+-;O}*G9v>Os8x28SgJCI8vrO?w>h!Il^5=&baWX}CmtBYN6UoSccWV#d<~qEt z_;@q4Q!+X;EfUjCsI{WFjR-HxoQdn6S|}V*&gdIja`v$#`4^!@nNzM33RL=Q9w)`S zn;Tx&K&fJ?A*^7=$HW%8lvjJjUh{k^M*%+Pyf?vm{f8b*WbR*g z!>!NALy*i7))W->NWy_+Q~u6Lm^T*8;GbAK8pQ^K$5P1B6*5vb+?-~xY^65GK)ZU! z$tL8+!UBRKTE^Ow-+Z&Fu+F-kb`q+`0O`I{{z61Z^rZ|Rq+mDiWMRyZK9j24=8KUB zJ_kMaET!yY8MMz@C1GeE5!F`2_w2jdo!v^kBxRJEDU@yeo&EPu$5`bA0Ss6kg-cC=FQ5NapTl%qUBn#V@pD zT_W9J6x;+c>2dU?eaNLC&U9Hb8_`_r7qUv&Qq334>NDGV{W(7}1}nw-2(R%t3#hv0 z4Lg#j#hT^s+jGucnY3A5|Gp~f=zO!Vq1$znwA@LND1cv>-LER%R#K+l#=m073XM1< zSRk#ZkXRg+h6o|xmiaOswcvWE14~xARRCbYxczGvlrOsivk-Dz=pno{C5bmz+nA^D4%|;l46~Tm zpsQ|pz&~X$R}1l9V#$8~oY5GU6q{8(2g@F-z4V}+lFN{Tuc33%0|H_Mf02%2u8kZb z@{?P6^?}582miSII4|r<%fN)Zj0vcvEV2wBFhx?c8QwHJ?r3b zy(=lc{8igG?BuWPDHNEZVz9@RYY*pmncUO*^|7xwAzJ~L?Z_&7tPg$pFwRQ)u#$uq zsl1NXSgZ9#7{Zl~pRlvnEtEUwhvmvlu*dCw0O+9Hm|54RO++V{J`l0$b4V~&NIr&)i!)7SgHf1kmD86i zLw7^Fs&>}1-n_A8_5;^nrJy>$z#}wQkOAJs-CKisg@*a-JAw)A zwdIjpfYkQV)6K0kGC`l(MR&yMXPm}UEPv}h{5^*Uz`nKcQ{+zP6&dJTtAHtDl7@?- zz!ai!t4uno9ilP88&bY%TG1qLJM+wE{@>TehIYRsVS*d_;6<|~i}@0s5@u_K#G}{+ z5fadUqs@Ni-SxiO!(X{38ApRE>bH7nIsx4U5B5nR-^!6dSt#C@cZE0_wq?sl36yaA zh>>SWS!e6^#P4~XZHaY3L#odP;jkX+0{$P08eq{VK7fYN9 zVz<02Nsmv?rSsVtBTeVEsZjaqRso} zrNq7WMfPRhM6@_Vmk=JR~lu;+?S ze~|?5SfkD=kUVa2Lu#h=XU657a8wkn<%?R=y9m^FZ9!RR!xS8*>%A(yUVB4DFX~*D zDtXN<#)P`)ePBxiexFEJnLC-?RFObYb9{U~Idv5DDuhf6V_V2HN{xQaZRC6z=g*F5 zZ*J$~$`fFl%gW}50&JMh1T3}Olq<8Wlh9iGF3&7DCHR>b!>$aC$E-~sQj_Zk_Md($ zHp)Lw!^po-u74xLm6v*u+Efb{ck4W+=Fr`K&2eZ}K0*+XT25Dy<;4;@P~LlzZ|UQR z^x>X(D_T=$g`90wD0@i#c{6y%HbPK85#~6_KK5wusYn#}lUAbUNaw*)%q7lD8E1=) zJm!3sjQtzCgeO^BAvUn1z6|9SWY8R?9TS@3nv-dA{bu6Py!dj}q zutxSzsUL!Z$XQ!1jO=gNzT$$MH?-uyvu%q+3G~-(_9~n;bAxTW^!!kR&HR70wRkogMf5(WGC-ccna45#Qq$;Hn1 zs|!u}LQbBMs}>b47D}Rq-3Gj!V+^5A*B?#;+S4VMDzZ*^!qlGeTe~rwX48Nkdx&_3 zZ+r~n7cj)L>BJai9e!v+GAvp+x(Ga2e{B}Ucacq1|Cw>*P}o!DxmeFTK*!F`=ypI`hJfL)h+(H! zi@`5(GON0PIOhuIrydQks+lc&nYeYCkq3O=fX129pw=8=8nc{r^|GN%L;%mrtmBY% z&xPXwvd6+2rZ`>qVDOXxGnJyYo*xFU{SPw{=c9pMZRf09;)>tqdNkg56VWLMR(f+j zp6=YIFP^fb*JLl&A_aS6zwrd!>r_5UF4}io^u#B!tLOXv|vOtQM~HK_qTjaQY%7fX(TutT95aJn&iV7slf6@ z8@@Bolze+=HguKJ;l~x*_KdiYmUSA=@+Q2Oz2$XW4B>k%Ci4Q zg2x$0z`BlTBZ7)PL~`~SRKyu7x&*dQU6Gj~o$Ws~+`rj9ywEmZ8R^&VvEBXPX)^dz z5%%L-=VNY#qbMr++slvIHz6A@kN#%5jA9{g8`GT2@zY~fNTcKJN8qDDqrw@gGtXLu za%ixAZGD3p49{CVTE$S}Txk|}0@{jXLWX`u6OY;cfRi7rbQjkBH2h}<$6?|%iR z5np+0}Kn;vsee(h4$ap_3YR`>frAoxtPf;bQ6!#HjuTp5-kjOQrn>_$bI_^ z9S@u#S!$@r+wZ(p21}Ux6(uuglL;?q6@Qu>zSJy4V>}VBcf}p&ec}88-{Ktu3Po5r zl%8BOVn{|SjO#HDeq|lRPVxc9%*vC#BsR&Zw>#R+7#wMPpBq$(pXl;?)fX?8fO<2O zSH#fuPa!^?s3j90?NGE4|Ld_ddDs*s7B99*KN^c(en(XDx3dgIL#OZ>b3$t{XUW}7 zHbS40ke_&m=jb^8N#4VNMP{r?g57H&~S+yC|qNJuL= zw1Nl%QqnC7C@n~rFm!jr020#OC7nZeg91aBbSaWUchB$fob!F(zW}alu4nJH?sb3G zPMs55Z?}wmg@Bc0E_q2kN=+fD3N!jDrOYUDK-mAZVRwe!vR_J#P<);fol*7bqks9A zPX&Disr2*X>XgJ$>*!6!Yst+$dW{z9jL-!%>pX||L|UJ$OXOu+Ut01p!c@&={KNz= z?KU!eaV8rQ%!FZJIXs5D zcFwep`h12C48zkl@SLFZIU|o}9@&!*7E09URK^gXPtvFA7lGx2MeX|I(HPMwJ|Fr6 zgw#hI2HjusJd+z~Zgz=o&AYlAdwD6YujK!pTIt&yQ68GzsJpELLFT$-jg$=xD%y8b z|JF~+%q`aGV;0lP-Q#40%j@Rd=Ov#SGel;t!4<;xo?F<8G=FC2M^xSMwJ|$>RgNkj zy|(BCN;c+F<#6D){|AyhfR6@53kuG%Ro4lli8ffc;}{fhv|EpOf>VLFXWnmgh0^!@ z(~aqwzsd^0B5PxbCx&Exdn<2Bv@W;>Wul29qEdDywP%#*3fELe7U8j{XPuZzzb=_B zc8XmpDv01cPEd(7p1QIm-bR~W?+mbhpv?^!@|A^bVa9?TC4*mgeTHS&Q*UOOOx4j4 zGQOwRO7I_nW&CcIsF8u5|Lw*(0xjT3at{;hORa*e`TDx@m?C!vYaXNP2@=M2A3GTB zft=V{iH#l%Z{z5D6HtLmb6NW&+a41m1>pBdi8wZfioV74^Y>uc;5_!&XQP`vo9H%> zcfl8&grJ0rL+$TX&lZl6t_@4N7fit8WnX&~2I_+NI0>2a=;yf}OfxQ39ET?!`hF-y z34ee$pL|N4?2NZg;nKYct-E^kv@zt=;Fm*@`avH;p0ScuIECCQJRFqVX4tw|ACKJT9A zL1iRQ9i{t<#H3#Cc!exPrZ+t4ve~v`F@aO^ z?F=V02TkPBx}biqcaHp!YpS1nXQziw(0&X){!s!=ZRplEIs^`SS5T|%lYrJtfGPbp zRqAovGt9kpKBZghxg!BDF)8tZ6Q( zC50$Cz-OZ6tJN2;@iA2TB)oQ{b?ola{WdG@3n9`Hlx@o7G!~+y&8A_HJWDs0=hxsM zh9Xb?c=}RkX#5!WKDL6@#~QQx3$Ph!fSv-r?2iU}4Ar3OsAhyTGHv|BCX{j{b0-+W$+UoLkgZ83K#=AA+hXC)lV93PY1go zb2m4X02t#tY)wwZ|+lz*$;8fhpfn6QMU?uB#J+sHmbcw>k63bVUr!wPb*LDwD*3# za7?eo1)PwHlWvo$IQo#L5Laa;bVjNvJ`AaDK7t{VAWv8z$b{Y9HA5se;Oiarzj**! zFmoAK%Qfzo{ORK)mEc0gD(UQ({?N;%EHngXWm+OzKb-}(v9&vX^&cN9GwgW$Z#B9! zmgw*wT;;}OmlN$@JHEhN)Hl02vqR{$RF{h=jIjsn_a>S16finQd)#@iOMnM>s@34#?-3xsp&j{h@J>XP+pBcRJ6ZA)tp3YO<7}q!Qz)FF{9Vo zx0{x+KlECj%XhR?d@oPM?!I%+;J{d6ABKthBF2hY?T-4pUkdx!)%@ico!|mx^u`v` zzCF)=(nN5sDMyXb;zlXS9hO^c{r}N(z8rK|zxe9hbD(~aMGf2EAwD&#zZmHVvR0^n zMRDbl)FEDY8((8`!HiLa2SUq017Z38YW8Bitz;w2u5UrTwjtC)9G%bK=yM6?&842~ z-8pE}twnXedp3+NVSZrmM^ex{vQB6Lfw6s&W~~d9K~Kkfj=mNk{{f~8da)mF$bgO* z)Ob}UhWcRYqs!35~@!W42?L$VJ-^&6_rjyHAo` z>r65S-1S?HAUmf}o}gw|_Ou*(nj4v04NGwq=^7AY?LB4;UY{nHX*lsIPcE`oB?BlW zeF_uzoL#{BM)J&r$c)xXt`$M4RyiaV)xi4~_q+tC;izsd)byB1G@rsbv*PfCmk!yD zHFRuqJL@Oqmwi-?TYCWX%35+pYEL)-3S4M!_A`&~c~7Eg7|{i4d=6i7TN{B~Y4kpQ z)OW3rH>i70R_?1$R(@ib43-;kek5R$833jI_^tQd{LRAk0-ecFcxNn`P1bNqa&UBA z;x(KQD!~(jSEWe}s4xBYsRCXVScvCGee^X)*wUakm}8*)w`3gCmwpst2JnuG7Bqf} zbg+auF;J4Y`is#CeerLwIU+Nx75Fy=d!48=^f@Z3y8N|?Z5dO+kVfLT=u_g&C8BG>3Azw4BE*lWIo|?;*0@aM zy$#k_fwAgNb;7#7=w|q7E_0fxm zK~m^(Eblfi>^CS~>LQH;E@~h$I_jk=N^~5pCxhGP@nB+sq#H)h@G` zxE)VEEpyCOdghzdi5u$RE(Zd|qdV87m0JhHe(N0FA5qzvQYj7yzf=9N^SvR;NU0Ze z1v>xo;6<4E>QZd_|>eWkGD##ntsJ@PYk>52VmAfqvP^d?*N&|o# zg&I3KV#-m|X3!o-!g|n3w-a=`#KCRed463nNp(y_FN^jp1ut`10ZflBgGb3lk2(I0 z#>7l)i+pQ)BI)kIQR8l7)k@GamG!K!NjD>z zNWi2p;zIw*ZH_~f8lde}$c$HSG^S>;xJdsUn-MuD+h5Yp+SjL9;K?L=F-!y$TPB{5 zgnvG3S=>$5aK}EH{Yr}dCdu783FGlmn2Ch;^`Nt;BB4QECQ!!ZMYtjJ`^8#(!Q|f~ z#tbbX2yV$5d+P033C3ZpC~NDfAwwa*&Z=wGNK!ag3|od&m+XKgMw97%r6ZkP>hHiu zh?r_zq;*x|2%8sX>yG#3f54rX3RXJ;_PV>yjjLPly_UpY+OxlUvvVQzpYu5dJ8t6NC!Cp# zzYSUfkFWRc%bl48eY#5ho#K?gtz|a&hsy`LkuQ1E5?~-ho;U<=Q`^Kp{C-3p^_zzo zL;10_0mCiD?v6sdv0}pFtEE!V#1FCkPvXgI=YyXIk+E?zN%iPdh8R~S-J!#Km`WlS zu(e{bz}UCg>;xuYYqZd>>lZOBj}7CxrGchK#Y;NK2(P;Guk}u@~ja z9*Af#X%KhpS}K-gPvt8PFVE(acWu4LC9>|nt6u0UOl45*n-vIl4r$eQeVszzy3|y? zdW_wKo4RpSPotVjCu@2;Vm&{=X$EQ$^W^JKw4?mFb2(HsTA@crTt~e*3s=z*Zg)Lu zS-jo$+xX$neN~cl(v|sZBf!Z=x?44>5%nM}P1joI@va}%0$lj?kPB)_?*GgZRv)}9 z3xUBT9w$*$mz4K=-qGhbZL6UWZnx`Ek~vZU2nEX`XFWRnGB<`8aMUrTu*c;Z-5-+XR`$^0>=dae4{K-J@L z=t)~-+OcgHGs>x>d2^Yt<&u1Nr8nz+M(7xvPzQHUQX&F&#d$dwLeoOowdI#>u{?2~ zCE6btJJ&fkmGpSDFR~dl5UMF7ul-!fm6E<5x%~3QD0_)bhI^X{ldvM)p~Zr9fB0N7 z+|8+B1D`PN{i!rCPL7X6&Kk%yXXZ-*;=_+Fr!Emdtc!cPmGQi2=TaJV>-Dq6q+o0A zrOA&wS{(4RTV?EC3IT3sS0-6kVS0`(r6CcvH=ZgS5V6V-)3%=DZ|(%~8EIE(PcTT&#M8wr%>8r?2gDA7f;7}S`J|E# zQ%mca1d z7^TWF8J>-bNOF7YDaQW!B~&8FDcV05Zb%tI^{a$HGf!5gd?TZZsy@l z$=F+69xBQagJ+eD&)7SWMay`4ZgYQ%j&QCN)R}p6_E_TS?ZE6Qe6w$6W<+Z1i<0@K zxbCGU`MrFXHb(YiO&M>dIqmDQz~TC|rltL4Y{n^prK}z^5FU|T*O{4fg=LfIr$24E zl)c|xLRZq;YAx9LZT&`u9gX{98~+K9xj6LyEwmK2OwffuZXnK^Kj$NpRlD_$4_sHN z-9=q=^YxK<$OMn|wFk_4Ml)S;3{{XS7|IoLG@Fjx9N>`4WJ#tD_k0K**!#Ym|FfMp zx52xx#m^B|@L@Te2p>lIBic3C)sr)f7ldFU90ehUDO$VL zV3xqBtHMQb=0Yj)Rbz4@MxLkD3{RaMeX766GZBn&d`^O{#{1T!As@Gd*l?wuNFTcX zzrISyxmZ#Bd!tcK16Uv@IS2vDyC;&UX-?_(O-APNGpH+2LVDvUHEnF-CRl4_}->{w)Adw3aI+%>M>FYc@JDv~aDNBw>;0;Q9jger?971}=nMd@L zU7S1}7qGcHH9?9ohc|2&qgfkbWf>gFRE$*f6%}lF65NYUp-)attEesrY7QfJCUlYW zzL1py1lbawpH0k9yD@bnP3($4Fe z7$R(w+TcfhBX}bsxSnxpGFqco0|hiF_7{Orv0lkVvm2HSQrd3poR>S;(!zaq>j#9; z>A!1)iv8rVl=yV&hBDEN&c<)EuP6^+20)*kJnsjSIG1NYIyLfXox5&SMh}tv< zXbL1f<(!+wNLP)So4!sbfl3Jjx`~2GTEMY)L%2_a7ftGbH`QVR3VSu=rb%_<#xXJf zGKPiw%Y`<(-8m^ATH9A+yv8iwO3}?AQ@TE5!RhE~*g6rTNgK>PnkR_fu?onKi zx@{Y#z1CuiDoi#la=2!k#*mMD?ywaot1DLTttz~t01wHJ4z6}{)p$q zw7XRAF!K~LE14Oxunxth>*ucSLL6o8h5z2E9?7VoTJidDo5p88@x*@dS?(CWc5{M> zM)LU#rx15)RF4U{QoZ6&X0No_IVXt(??B$acoFsj4et~U5jPW7h#T(EUI0H& ziFam}qU z(0E{3b=la|XcPfkFiED$GYNu0kTe%7_J;oKI=>!7LK;Fp?;xI>Ub@1(EYqlR^usWT z=q;M}*FBwqBNHNsMB3*{#boWvrRN2TKij$eqS1&*4Vzi4yvexF4i^XM7>6isKm6Q? zOXrAJU#5+-&C|6OK;yx{xMtvIr_#c)N3T1%kr$P=%M2$xjki4fI`#Lalpis!rRO4s z_mL-N7}d6Rm~QC@mL)?IgU$Ac#U@th83^O|1VW=-It-BO1B%0RfIxYXz^#1YUQnCPn!|lDFagfzUi9dTCY6l+m6NA(4K;1n-9}Cv>XN{CQu72mb>MZ>( zA;MucSxS@q_b zM8}S(vDH)S*+wlK#&if6%4sx!8E3M`V^&VJJEsqB3OVt#JI^xR-6|2jdG-Y${nj8O7{$puBg z4*dn7+CqwHw3Hv*Mqqhx!pUeM#>rXU#ZYPIlhsN+r{;$mr;WS3<200Au3&G=5Uw14 z!v}QkFVe+s{39CMm>Xknz4{YkYCn>=amgn%@(q6mVczGq{rV=Q04fDFfNarn-sD=b z+KergxTs>(f_`c0NM#|Y7^O1E%D|#@);i^!W4Ag!o8_u1sO{Q~$+FjO#{18NHzTJS zD(L1UxQ7@`*A{{Ec*xFeQ#RFVG&!8~=+r@e9~Chw3ZonLz+SnJk>EbNztrHjXvbAS z&7J-ot{qLVKo>$fM0sf_Dn+hJa1Vcft1`C9obV_`adxy~(Tt-qxdc6HFto_!w6Dbt zM5QeX(NBX_O2tDr5o>sadHpU^vPjso6f|n2q&czh%KB06UiI7d2#D)aL2kJ`fHN zHXh`zjmMpPk^YbrXNf5f^M^jT#W1l8N%!M;#^6arnlMPU3s^rgHx2aKoA#=j;TONV zn!*Xu%BIkGDjPhk2y&f-l)JL%e!U!(XwH)xm?zhRbAd_H_l>I8`M}X7dLGb_8Ug&t zo{S#zDbRDOu^-J=^@Q0kfC~l{2K3%tj{}BHXB4sHg6sjGOOm^hk8_5ch%O5DXnY!8 zJGN$GJ9)xlGsSKFOib@EXaC9*ol#g58*$O=27GsQkr{3D%YD^F2$zkrSN2#StMiM- zH*!=pfzRY9`T}NMM)Yn^)%TKu370sEEDIyv$nrVgp{c|SSFCPhdr$j?I#@Y$C(gDa zjnd?^8YPj&S;OZ1864=14NZc*ZP|X^&7VH{^P_}4fs1d5q>gz1ak>9N&<2R1x`AWx z&pC%xdKK0nNT;Lta9v1(x-qF!AuMCN*1La<7eh8h^MzlUfV8)MHRZXRjQe$NiU zoT8sc=Vk&Sl*a<0d8+h~%_m%FaIkEleWWDRo-d~S%Zs4Us)x|W$P$?0qet^?R1&5~ zDYU&iGeodxNj2YS(qE#%g3_B@b>M*k5bqvW&_KY2ja`>-_LnG;D)ZaD5=mF3;p;hK zQXKD^F$Pb0mW4FrrjdU*zA|kN%MTnw+|#dfRO0FifM&ircAqihS1gUgNY&mH9sPJb zBfRMSOJV(A!US=t6YvVUO|Xe7z1DnLelLtH?b!NHm_LMG>y8p68yw+Qe0Vvsb3?t3 zPK}u|B*Bdoe$0sOY)YTH&oj(&gZt%u!fb>`e^SiTIW0XfGefsgQg&|Gy|x0EIFZh!9HPX_1P0p#shz4jbGD%H>aHz$2EeA~*6Z z9=s(OvJ2{A#nXt$(f7|?Bmn=*h#0&n| za~$e6J7A3q$zE9z;J_#VHz05srMyigbTQB9aVZTcC6p8G9tw0TP|r5))FKZi6uTLQ zNx8GX6GU9G2U70-p-_Yggi^_b=E7H?JTcZ1FK7FlGl_qEqrm&aQ@kC576%4dxfqaM zZy>H-tQP4W{a?XAiXV6I zX*z9HZ`~%V^Jr&8A2YW8-b6wXDZXiXxL%|%@|oW~PT_+k{rR;fy89RF8Q+)BWk2q{ z{{aP%$fttz*GWTXOacqzY@6 zoqC)kk*Y7 zUq`!sAQK`1gNiqgB8RKXSe4xRlNBE~463sErVi|GU6`(#AT`6^Wd<83LKksDxC%D@ z1-8OIs1D9AU`3r%3!0QPnC#Ot{jjTwp)tVLG+!RQg#NnVH`(C3ym30*5qjpdR7md2 zu-|ev!8Zazj<4i0t0upN+A6-~#5r}Rhisp457Obp;MwaoJ_P38JeY0EEIR)Z>}i^9 z5e>VC-CGioExh+Kr@Fn#a0(w1>=s8+$Oik^(XV-IC5D1HJIOM=A}1pZoE*KGLsNSz&=h%`{OOYkJD|0&y!0|`3o1iIP70uj7(EcZ2qohk{_1UjK4^jy?1C zYHuu!W%J6&QK5No((i|SB;CdvDQ#yxO?enjCsJtgEcWjqF(Y~%7=$hn4AwWL77eK} z+rCUOK5wRR_-6ff;CbP=5>QdmHXi`b_2m01dPr>S_(!u)7^V7;H-N2hrken|=TFbs zMHFeQC9W~sD+!V@Jo6g$C>m;(q_gqj|J-Hs=c70+@E7{H3!c5Erm;}Qre~&>32mX( zjt0h+AcSWDh|KGq4nSv0f3-?}%t+{1oj#fn&iCouA{RTOER~T znbyED&!v-8cqhTV$2%MZFA9+Gy=PbK(!!B%c9}B9)XRp&RRse>(KxHhH3lN*7l$Nh@r>)9RyT~17=gdr+t}(YVrTFKi_MYFVD|=p}q;v@SSo&S|Ii> zdZVpt5zLFp*;Af`pOa@qY1N7Fk4TN)Mr@b;>6mdrx0&>SJ{P3^KY{yoD1eBLoj|Vers5{u@15hf>8w>NvaYtu ztzm2w^KuQtkcuO`wFIIBusdLOrmkgru2*{h{Uzj}Rc0`*8{GE-$(MLLSn)ANO$vn=?q0%{4d<<8!GL1M%!sHY(ZR zhK8~8ez{#=p&K?q=XC{hQLyTEVWC1@mL} zKVEK5%@85KHLTOm0|N8|Ves!#5wHkB$9waV?2M zh^kSgrUy}IuxphYC(1oa)ItI>W4>}kyO2eDxU+elRKwqTC1WL`$=`a=TkfeTyvHEC z68B%`LN`x8KdK>~sKDXN*p7Ky&h((kj*^+yb`xig-z=ZXBLr7~`oMF!hGDeJmnt)P zCr8+!cx!_|EGPf3pc!K4hG)rQ!8N>LnM7Y3c+3Qo(Bm*^_w&ah$(rmemY*N>%I9-H zZV>q-d`2luW(o(O1+?i$!5`VFGey~nPlh?bAipBZ7D~pWs0U&!lPV_Tk(i@4w65+!RRYg$(6cEA&wOoGh zH#utZz$yQ-m-KT`B9BD2;npWr#I1cA?v}lDYbckwUOlDJRPU+LaCpxMVU&`tp<|Jq zsn3r5d029xhsiqLIGOk}HfjLKNo)(X@^SG|tozi&S0ep`<$-8Y+?RVw@w?Crho{`W z*Q8sf!^?ffE z=(l#5u^<;geXIK096V#RqtJ`HLzOKAhaDU1eaP5&-sjkMJQEf2BQxJw>q*ZE=PyT> z7;#`*H1{w4Kf!h21b-O`Y7lRl_AX|{q&!;cGm(YjhZt%BeJLw3+dm<+9NjcqAi z)7xdo|41QXY_~C3;@n_5bZMr_x6;M^`MT%nq?TxNK~13C-##KMQ{|QkD%%44d3~a! zq`8tzymcl=K&ynw3_Z8vDfl3r@o-B;p9~O;dgS68OTtUIV%(5zhrITsc`hs}n}g5= zZ2934VkIN53&#*r_nRk*EO9C<`&powvwD;H1sUP%@Bc!%Q4M(X84=CyVxr{s{oqTB z_J9B=*Mca*Ih{w9gh@`E`(YbJ%~`P>kFx3Bt=>)LWm)V{42Y1tknO zVBPuHquN_m0)K5hZX_gOtc`%eE3N70fg~j_L7h!QGgr*s1!b~KRT?T}G0B#C&MZw~ zbfjSBn?F=+Xy#3cmw|_%DoTs|{C$Tvc_{OiadP#kq%wl3k3?fn%-ryf$O~G@~ z3ifNz4}FsD6Z)Q~g~qE;*SWV~@=I5q?Mmy;+?FlXhGgHGb~T>&@x9`|;LKar|B-R> zADFHnZnUFg=h}r&d+vANyHOhPjh(-zOMar`%+upez~2*-rhsz-*o;8cpkoT`M^zjA z!pXq%=(v}uHzBxwsy`N2kNUV$sm!9r*N@k#L}9$3uXu_#`?W)VH%O=c<~}VNn(v-S z0ew-{?@q%W4hw|B(jIEu0@1WXtEI243W`l-k5HL2`-n~2uZ}J_IB~xR#6fR32PtJu zBnWSo+JB(H3Mjd~ZJvUG)b}RVW3Ha%3?K%mD!P1d%QM;F6hGA6$9XQlwwX)*`!(R^ z5@31ED0L_PCT35SKdbCD8x#4fob5}=TNz3VvL+j+{%uX7&I+0fCG5EGnS_n(iNdU_ zj6dI?e>G?%uoX92y%$2uV#@-q>S57~g(!^me1lt4?U}La)#S0dCWDz>N`;AGhdDWl z*$WTjGanr#5xVhXE(By&sbP$!4APP1c1uo_YQj)!g|5{V6I4E2Pi`T4RU`L$Qr7EOplc5maB)fIq1^LxV`z(F<)M?8*&+S#Wr?XS}YuaTC4qile zZF>Qf8sfKY9~PYcO{w)XseL*4=vaU?6mk2N-p{RE+ZcXX&Jq}1h$l~5AHgD08+2lKKKsBdoattcSsqlS6OK-&`q@pjDU(w$=*vVnDmtMrnwB(xC8UZde4Z}Tb!rAidA2h9_k%F$keml zh$_e6jT6zs#d^J^%`pz5E>P}M1&8MUloffMrVoX$mom^0_QNk~V!!*qjEa=~^iyR(d+vzT`pyZ_{a6GTg^}80h z)1^PL$@v98X&c(~v>0X_uf@CyAl_m`xN^P3l)k42kjqxX!92(Ejfp9v)l6wTy!#XX zOV;P1!*p2J0SzJTXoTri%A+h#9;On0`AG*y8ZbYTow2GCAN}ZeDFEEw*AahC-IlCXz&dckHiIbmq2Vk+aB#gZ)?{!N=pN<&#rarQ0sk*)es#6wuE{hFN zkh@_}e+&jQ#>Lh8+Q(Jjf{a3~WxeG*rIfg1;tT$F(ziGY7 zr|tm&G=*acmhX88z?bh?>GMQ;-iy@G)!`=9C>i?rA^S0n$=gS_^}CzkkBj7PV#<0v zarjvi?`MgShE{7|Sp7(l`3 zV;_=HGJhyiu3o0{VmW?*F_c0736)WuCxCAVt4IcK{ib2}V|4+MK-mOi^;Vf%2eea$d=?=v2y5hW%U zMj|Rzh`-HfRY(wPMjoNTjAe)mgTMq~CZHaWP<8n`*auUB1Csg;Kf%{v?7iZt^OWJ# zz*cil;Tw5oPJi=gCJ!K>(=~EPGw%K3D_j%%_e)=8|ZQ zxz5XMBYnytL;2n4YZYC-G1?b)c3o{8fN7W#$OwpZqsC8j|g;A|Jdj= z&EnXyh%{ajXdHp$$=EGq^)2wVw#Fq09GkP>#;+^I8T||!j47f^Hd)7FR;#!AeWfPN zje%Ga`Vx8Uk{QNdc_1PBoTpyMyNv(*<~Q6(&3M(UkMI|AF{Fy+tf;zf;NVrk^0L8#5lv z4A!kN+`x6*NYofW^xtM@?nP&4tf(oroKZ2TZrObzGCOV&t?|Y+wbQvYIz-av^+{oBj^2*uOzm1uhyjvG`g@4zn7b; zeS0_(^PQm#+N}~Bn4B@&*7R9kD!z{Q%_!r#d}w+nzDe%AsG~myYU;3JDw6_g=m;vo zc@#BV?BY%H*^Q1#u?|1TFI<<(9M|h{6?u)<7hsq8-csBa^eht%k+sd(%fuE3s2bAE zWtFYkih_Lix4&O#tmYf8x;JKN8bi~2m%enwe6ibT;5RvsYCQ8~aq+z&+*+9!JGE`o z|QV#Cy+o{oL&A z4V)@M=D2J##g`c>cf>SNOO>QYb?fY;j80bHT%O5GldJm~&_im%0AltgmuPn38B31b zvjle9w-ABl)o{W0$eSs!<(txSWxOsbkPN0CNUeJ!u>OFB;k{x+2L}xbnEUVLPI_pv z2GxNQ3P5y0(ju~$#h5at`Uaa%E6h5Y7e;WY#iCW*-Za2T4+Ad4;q4uPtXv$86E*p_=6i5VSmTC zaDIG!Ek!pc^LrvalyEJD=K9;i**5S<4}vCgXb{p(D_k+RGw~#Oj5#C!77qenqUG0o z;)KSz1F-AiP~@(o#!${qCTUlX7*gou`0TbninS7wk$zJyrc3?yc*!}!L$ltR+#6cDPc9XqPznrr_yF=VCC${?*;5FEU)bndDvKc~KvHPTQm~Dk9iI=-Kn3MO|TmFQy2qaas`2pIYG>yFA?^ zxJKjkr73OdDV=3@0!m@X*tG|t-iMRh&c=hvAX_*I)1si_mW}_Sfm1_NlI6K=(iFOK zZPKj>`qx7H^48Hk&u?ZtKlGQbE|$)Ylm59luZI86XqUvVLWg11V1l1_kKw&A7MGeX z@l2;3YP8st6;&i!BgbpPpw~)~9Oiy{|2ydONtugSZyc=*#lPiOHTF=u?B(HlApd$r zB5N9xAgMyjtX*J(MpULE*2LwMfGW6-8r*;y9Y!rg{alZZ$|$SE_be^A%Y`1GNV0j# zTiezbrq_9v84M{vLLlQ{*FhjR-(c#B^OqYaNz=iiumRA7Nar+a*2AGa_RtR@S~izD zd43!M-I?paiTItOOmmv1g|msby=FI)L3=OBa`0q1d)iuSKrzSxEvjdo5FitLuV7H~ zEdanLN!Je+EQZZgeA=OU!$Y%4%1NB_3S?r}!dFcWlGJm2nBVc7&^D~7#yo5lL>VHD zX!tJ-p3Axpo!3}<`z@EjS~zE6ZFo$JZ>;i{yf=bAF{T}6Q@iEf{mI3BV@x^Nwz2Rz zXJQ{&F>##cD+-R!WPTr>^^BeJS0c;3FoKew06LFOeOay}C!%Oyoz!tduC4ELSMup> zIM>!zc=xttmY;q69*g3M$=euf?@EF=9(ZTCi6ahUZyPHo#2<=jYl4o@cbme_M*CbVBp`|uKWXz05U|~Vvdnk$UrT4$xVgXzOHD5hJ zMW9v?HSi;Xh<0^DTl$keHaaDQSX)lFYqXrxaTphJ%;r&p?=?3oSELF*rY_pfIu_+B zJ~|MYYs(DKW_>u`*Fq?&jMH(>BOE=eo$~*! zMX|eb{wMOtr_^glSG=2H8MbJB9r(9qh-2zxcYQ48`A@l)YfVPnH&cs4l*Ax@3HMA- z!e5TGUr)8v2h~bnWZp(huD=57S(x&X3+H^FQ97R(wtGoSWm4?`SH7iB`L|6D9v2&*v^|kA;2JD5REXL2_;^k89VoDwTZ`tAylb%pH}LFq^X1=w zQ@MA3(`Vv``aV*Soh7dX)6=vT&^mzIRtFGr1=qxxe>0vw5Ta5q!~^-=Zpn~bhziK& zH1{ArHLu*N_~+S0RBVfE^;4WId!%9NWH``XoA-LhuPud+1^D1xr*#vR^l!A zdFpP`enS7E+lSXwrfDT$;3?V|Y&0x*f}luTHsK@1yy83iXP+A|Of$UtQBnLyToIV#$xqD8b= zW(u6&t>OoZuqUC;VFsx)J9#>2SS|@ ziCaLOwTe^{Z!M-ZDdHS%P@#)pp>NB9>l3eAwmY=}Aaun)iMCp{ZXg*pCFoC!$m z7|&cKxS+mrWDqCrd$bfm9PbHW$Q5zsCHo7Tiw~z@+GF{ERwS92&w~2#?M6PBw7kk) zb-Je&J|CVcfG;j;Qgumd4o%>Cv9<-(Tdr0%>-t8b1Qqu$=2*nJf(aGw;U}ert&=laZbK2u9{m7?7>UMzQ*Eu z{#(#f15Y#^cJgUB_;R_FndcQ>z`9CLb1Pn|bxo9hV8Cw%dUTwsQJ;mv!a>!Bq1dGO zTE&Qk+-2=6utfLMKA5}+uQ{?%ubInI`bm`XZ~7q9_CJkZPnk*DD@1BUA zH=ziXx3UY3+5M(?KW$xbJDMdrLaJ}XtA8cH)RNmd1m*RrC?DQCXDs*$@Lo> zPW%QTq|fCs5#x5)PtfvlCAnem1mLGxlqTE@w}@k3@veU>T-?-Nv7VPEjirQ7<;et& z1v`7bjShR~2do?=xMzqWVibPt62<v~So2miGn+M*`)Bor9ISHoWt>&+xc~ zc;q1MVgGpRJ_8_`a*D#wmlbF`0;)rfd;y?xCqPL&kF*}G)&OUlU?J}C$5fWH<$)nd z?Q;0bCgjT|F3P2^LY2_R&1Ie#7Rq_K3;K<05Lbo@y(#G%Em(U+b0y;jB6cv*(LPVWt{yH<`NFLB=4bf@d|!&|@K#j1OU)UW(-qs%_uz zAaG3NR+mNRdm5VU2J5en?OB5`vio6;j$IlYwP&qlAU^7Ivwn+Qr7jWXWbC+#6UZbU zlT;sEaLDyT2pqjWgO(=$LY$|E+`uz2p+Zw{@N@aIWZl>Yn(ZIcW62NEp#smJ*e?L` z5-$@ENEoEcdw?wwV7T=2W5V`ev_-ex{F@9#NM`TZ{vZQyxd`5FMnR}Sj1o17Q@&U4 zfaX-9GD*pEob@xRsDr?JS;6E!<1?InD?y$P%OyeV#;VZ$@vR;L_51u=R6BHO_*a^P zwfZQ*?3NIVp4ipPtZllqHKPOVGR=Z0wJ>10+x=hSy-SzQF(j|IW^j~F&Z)T;@{Lob z$qOehI+;l$jyY;zd&ABl7GvxBgC)1Q_RH*g<^T_nP7nyA<_eSxBHG%gRAspyIA=ndnld=%jnrd##iQF~{?fWQ*I4(q+aMqjKol54Gcdg?i4- z{<~rgrZk7Yj4@t53;dO~T|CAttP1q_r&yX8hPV;eQki%u!`CR9?5KTp{tL&?c5x@a z?2?`qOromNZaN^{W}#T8;e1^B!-3s;nyl!EJMk0G7&Z|{PEyrJ0R>Ui?+S|nh?e|D zws3w`ch!9uW=ZfA7^j9W_d^4qd}hZNPv~-Jf(CTy2GC3}ZVvcg1cuKJSRS8a-NGr0u0f$HTpZTxc%pE1cb5U7kMw89t!UsK?N_;+v`K zh1@8ao#MKS)^)A8FW+8ozKzwMY`A3jMnC`P@@^@v=zYxR8?!PU-CL8gr4&cVR)4El zu`=Y3?#&(Jj@Y1CD^R3fXB&&?5?Do4SKqcD8oyHS>EieEyM5?K?mVd**fzUeHLQ6T zW34O9pCpE(WrPp7z*QQUV`nldg%|F1t81Ugvsw8QJNqg>A9=()2=cb8gSc!e?C}#S z$@I-GoI-C??=o^dBbR|uNs035%rx@lJ+RzOT!Z(ZS+6C^e7|pbq+&>dU!O3`NMyTr zO?vV4Kaqjw|6^x264spaKGj^#cI~n_4mXt)FvM<>gTOzx$ zE6X6szC=aY_kB0^HO5||hO%UevGcrrzQ6DB9KYvyj-$Wo50km?>%Q;nI?vZ>9yi&O zA#^+X(reHix<-l{5z0!T=mXwCenD2Dy6VA6+?5w{__2oeT!3~c>j`o1I(fjMh1{By z%J%5B3to7fn_Q-Htx$dx7Ohukw=NKlSrN3}7c zEDlW@Ewz375^@W42qd&iFKxYA5XhfVkgO{HyaCf~A+LFnXS!{?r_c>bfESM+C!Piv zebVl`7?8BfJu~xE|0kQxWWGv6ZP|UH=ycSNgnyjg&M_9OxGwFbUk2WL)z_=fA+tI5 zrGvsE`)Hc7#*i;Feco?7?G|k2y?Jx*bAT0qim7?9X;i%IYIg7EZVdi3F`5h20l(6j zq81OIn$IQ|(Gdo}7J+l4-nUfo>&}ls2J+O?N%512Au*#GAP(rgM(5~oO~EA8t4n}5 zQIW{=e(%DE8mjaE+VZv&5a2-!(WOwPHvHfw$fe9_?IwGOLA31+8c;n!3^FM+CV@FR z|3CzXOqf-yfp<$Yy<6<1kU&*a%D%WpP2mROwL*jH_btf>ypIFp&M-k~{h*k~<|jUY zh8ObwJfxmQ&p$;rk!XSr{znl3kY!mL)I&6uf`tF4MVMeh1_#7HS5RHg8Ra&>5tsZ(UR*s z!%tqb_&&-RwhQo?DjSXZ(}d))wZKwE!u#}?jHWA;i9E|}cm(wYzkHj;p}&*OW>+OL>#>ZN{*TO_iAWaa#J_sQN}_HbEW$GCZ;;nNmO>d~K_`vL2dzvhd3 z%5~Bz%_Q<5fok)j#EVmx$joFQt`_7}g4PSusN(>v^>al7Q`pGr+nzfS+z+Wk0UEUk zF{}n~-OZZ&7xNhU$ZjeV@#s}J0&++b_R_|=y@PF1*IgGMMKO^NH-{14o_WF7e^&w*L z*IQHvAU8t_#cG2_{4-Y(fj?a-T}{~-(0yOZc5U+ajZZfBZ)`OSoE|r7k|gqPe>k-9 z*HH@J6AeU`y@nWI?M0RNgR%%aN?bdz`PwR1*dpl5YgGgZ2f?t!{ofaQv9@j&*EET1 zku$0EOjkZF)V-w81h(N(w9A!NLJ)34w0LN55kj8zZVh?IaNI8O>#2h>ztW+Cmr?oU z@ugI}_>(|fS3pD$mGL3mi;u186M}Ez4E8!i@2^K!B&*bN8e#x)V_cg>GwKl%3+Wg6LckU1&|G-?9 z&iQI55h!@M6m3x=&-`J!jC9otN<9 zcKb~Sqt5HbwaIu%{Q$pS-M!q;{G>CWQoN}d()K#C7x;sW2}4`TbB%Q?i)WWRJgh~hh5wxVN8(_>A4G!eW4AOCk_$lvwaLVf?yyawx zs+OMszxOV8{l{I6VJ3)t`QM(htnUc^;*ee4R5hOMS$}Pm2>$1jIZ11R2*6C1l z#J-_)11_oY+sl|5iA=?hIeUiP`97U;|GEx@n`ylr8_ng`Wul?jOEE>J1s|l`1AX5n z73vz*400Ctpj*E6L(md?=W#G7#rUr+O=vOOm$sDgQxEd11?+K0@KFi;0|_i^q}Ed1L&ljpAv zXVUfq?&0=S0AtRzHo@yrnAj0{$`3phkF95jkE`j9XSTxL>eZWBp~T$F zukpqH7Xp4#Qh++GFrqs zx`UL-oELnrpw)hXQvuDer4!y?t$$2hkzkTdPCzrf+V}@KbmUI5 zBC{Ah&!uy>R8uock01ELdyfJN^|Qy)ZabCF2~3MbUGRuoNr6K9s!reR`cdE6UE_0U z4a{Gh_Zf5lSf6>azlRsE)ovUPwI1BQG){CtM9j3PJgO4=oZJUm!1TCLIf6hyAVOXQ zsxbtT4}WV`zi~pGT*-ud7<8UI5=-A?wVx?*-fp03HLCf2J?}$93-ex!ThcX)iu09^ z2lAX7zRQ}SKmQB}Y`^-rpOM3FprBAD?FB|>Scj#Qsu$d%&)WSBlq(*j?%J?c7kpx1 z5ZtTEp}7oYpn!5W940uTHrjxXH#;Yi9~g}QHL1SS1=V#Xgz?+`wMDPqQDI?WtKcy8u`=6b z|GZlQ#7{P@cq!N-dg9(HKy62_&8i2%HWx4K=nxJ#|3J8-8nCe)DRBCjQ0x1MKipXX zt{w9RU#Dqdcu;b{unl%~Z8jTWLyiM&KQoyW#WjUu5Wskag1#O%D9zBW=0ce-dv3QL zRHJAh{}i*>jt$QJv-bM=+Mip$_By))VBlT+XS>jkCfaesW${ed+Y;e`a8CF0x+OFE zoSPLg0egLZ+ta-~5{>-%b~(%kk1YC3Lt{CIPVH(N?m5!L72e9c{ouV|o3)y)SF)39 z1An;=a(Vwv(nEubsbdYXjvURKzj>vCewqDLJkV00XX>%1cv}_Y?+`zTTPYt#ta^sH zPn&hg8eLO^mOg5g?X)8uOkyO->?QAv=#CS9Q@X#F8(5qF_Lru7aI!hkOaLkRg>fOh zw|-^C(^Wj6wQ1t&N*^x=TfL9>9re2XF6mJ;dx-V)n{VF#PKPP~rxCv$44RGDl_CZ8 zaFQ+hY4iW+Zx-v{wuS#H>OoO(Ldwp$ogZc4E|cE7L{$g`LazLrE-IIQ*cx&3qp9J0 z^6z}lG3H;(GP4r=bjmbX7C;=9XJ8z4uhPG{$?R-2+*E6mKroblkK1;cyK9BN@Iun1 z@gK+f9`))QS|6Bq2f7sRUq44Snm|7}wpd$Yx&`B>Vt{kA#Za1$u#M?+7CkoI^c4H5 zUzsS#_SM`ntClb_fF0Il-hwYx^TNaeyI!O?(*p-bUUqSqG|4*=&s~IY3w|Fsf7p{0 z4N4=PIZfUoHU>U#+;5X_s&o&QZ}m>R*^>8i-gMbZ!OLw+UudU#WueJ(7`#z~{B30X zy16Unb@k^6txCVmQJ=csagJw>*YIAz1H#$4C&rSTAT-DwNizN_iWM*7%27^&z-!#` zZYaY3iwP5pAyF>rN97BhbPMtdJWe0zKa-k&98kb_+QDf*+`~l-c`ITEirHDr3|tVk zGdOJ)`0(M$ZPcJpj4ZPf8Io0tF}cs$5b!z2_jd8AeO`kRl@&hlyk*5MS||w|(Z4$1 zET?(+K6ZS~@PxV0?8MgL@908<$eWm-pMd4RW27ViHO7gzfH6h5OExyf$0WkDWpfvh)1DQI68Zei1?-0iFEvGrj9e|W`A-5Vk=NK?de1bTs;!QNq42PzDv3zr9(mUUyA#CoJl1kO#VNSW>3vO6ZGoU6)0^MD=xVWKDp;G{0xFOQsuC=0T2-SieBmDHaRCQK#01@2=L4?5^eqGu}M1 z+351MLwNdo!z`ClqsK1SjkR2a{TdYsMRUE)U(|gOsc*6L$|BAl15UFgWE`*1R8!I~ zE*ubErVb{GfHW}5^o|OnF9ES=(UOc;w_wk-LBF4W{&`pBbXww< zUQhPx#XhKfmdSuvQuVhNIX7uxZP#gVfJ^&HsEFmXdSS&~9%t~l$n{Kg`aSoxRRyr?8bMj^k2V z@bxwWy!xg|h;3BT-75LL^)pTDcTIow_VnlRAD=Y4Z#>uRq%ywq;Y%JCJE61v?5qBW@QLnP)3>ExS(V{6Pvs_PN6SWmF7!)N%pZIeu*-*IxI&?$hvql<% zN)!Fm3^Ak%(=5Ex{B+#if|gR2hiI8EhcKDak0IpO{l)Q>(a`p1YzT$%4WH;uXS$b=7;EMk$CHQRG}-FQGdi{<7GE^JZlmYiDPVHswrWOI*5P1p^@61a( z*R$r$_2bd+^jyy#;`ZdA`7U@yl)s-guA$m=G15-+#9RSs@=0ePUB$m;@n7w7osR|o zau9N+>b%B}3h_^kW7GAWm+i+75R>uf=Cm6}RJ!n^h#O_W3n*cgPPRI3?A!O~A?TdbO1G$UF0}D^ z6s%6H>3X6fH7OVkVEj%Y;`Od(uBOlO!Z8ZGs)*`e;sLg z$K2H{-!Okm@mIU|S1jiMoHC~ek4w=Por%WwB|8LQ>W3jH#6j37fi<=p7G zQHDhxK(?1jGU$g3SX5g8gAOs@SHzO3mCr2BV{kL9A+P|Lh+)seM#HC=E=P0oKV_j0*x!pE3^Ka3h`Ek>l zt1HcDhn4zi0)&d^GZUd8zvMMlpZZ-R@B!{nG&X&7A*od0O5_xPgq))|chg~5HkJw( zb;;slT!qQ%tU=Ror#;GRStBL|YWN0 z%B$s|kiv2;+rtmO*0k~1C8qUG_mAA-n-v@VMuVAXzv z%dR%c6$Tzp=lxAq;-h_}P-@kKB(aj|@UhLYa7%3Ox`FoKbs`5r0lV+19wDo2o&qYP zN}rp%l7UnKgfzY9N5aP&SMRHO#l3B4&H5>0qJH04SbtvjvJR3PH4ANu*c(-xZ8KPJ zP&1EWTzh9Dsc7Br;rHzINE#SEI3L*Mv2HoYOJ*n$lHGu#&lY7>~BlG*f3 zN(EJoU4KT$3z&+SLac4K!)qQeV`<{LpuJ*_Dk7APZehz^uc7DM*toANgYMBhQ$J1$ zBR6C#Rf#_r(2W<`_~4v}u;yZGEy`}8*X)8C^1l1SG1XVkx6DL=n53ZY7)!Q#@TD7c zW&T1oy~DYzqixgW{9MFttdoy!A)4yb zbsq6uU+RSY9aWUnNkPo)DlhiyZA#JLgQ_jgHt?w*-Ny3XovH*#!tIT``+x#= zMwEH|Ud#v?zzPf&Z%E8Rd#irzDialHS8mMP(Fi;-o1fyxHQ4)Yzg0}*z;qpD%~FwR z$>=zrovCio@T9WR@~@U{j|-eM-oJ$t?jA-HIRGvKa;B|J1Q2|?L6Vu~h3b2DI1O1; zbT`f`dMqoSPOl+F@N_uS@yEeMTdZnw5(^$(A$54@Qx6-aE74ExZUn3=t)H&4w(%}L z7(~ss=XK|2b37o(h4+F+leMrF1A%oe|F}gDh%V2fP>h&W@%&UuV}A9W-|5@FKU+Fh zg8kyWkB$(8atlt2LB5t6!BFXBf@0?=PQ|()+6lf9xva9TwPk%&$#P9o{lUZqk^ga4 za7EFHQhmN5EsmV4%8JRcaA(kP8Y?X-waN%Kur(umPr-0APinq;&vtfX6wC0N z;AiN-?bzcmuPu9z;Ob6!9-YDKIoCNpu(jIMk5_w`_e=?!NDW4#O1}jZ(JiSpPCI=G zTwwT6%!YA8KIu8I#E53)$rF0Tr`q{HIp@if)vc4DtKmoni5!78_tJXL;BqemDAZyqo3Bn$`n%`56UgxD}spc&d9GxUX1-peuQt5 zb&?krHc>O6jiEdw4Ox_s(oD?_1dt+YsyU-%$baB&H%g7zJq?%=jU92}o=4{sb~R4Q&NyZd5)U?_szh$`O69N?IiI&f$4~79 z)`YzG-}y&F%fs9w-58f3Ta>*H-N{nhnmTPxSYKk}K2T=;H!Ouo_36&CItJ$ZPFZ~F z8n<|xKDKtt|J@qx%%raHq{jDs7af1DKum5i*BZ1u577nJWWZ+OHwg3_Ii}$sm~{XA zr3QuwJr#+TIN*Cn)^+~7%$t(o%;qPzYU*mxb&^z~k&2@%L?qs$%Q3a82_9b96437kUi2Bi=W5Jpz5DgN zAB5CZ59jG=sps}wVs}mJ40j)1STD^RPFi9yxqKXP|4q5X#N&&tAJ){kL>FHPt{yIQG zO<4wuBW!Qs%cyloZ8XXZ$dL^vWex&WPW)!x+^>Y-*PHx#s0bi*@p#`w9xwr3=0GJcG;O1pl8!k+-y z@^MZsdg|1;{_Z-f-!zQV+5CzPe1Ouo9SqhAh$KI->&_~ig*{PeMB6cQ5wG+9{Vexa z9Gh9@+*Fd22F|-Tc+p*PK6rLWV%j$X$02+Trx4;3_q%emFo1}6`uh@1qMp`P5qZqD zanDrlq2<*bBZR4;5{!>0Zz^dr^cfc%pbQ;7xW zLBGR=S8E&#j#vNn2%7?r>~3$&$Z}dA_CKJ9l>dSrpw;baV3Pr$R*t{B)yEY}m2yt^ zyfRap5Y|C8il!`HUt^?I39Yol~lL{xmcmBuGSi3IkP)B5(&~9VTrlfZI(}ZrEeDj+VN9tr8-S z^#Wp1x)FkU-t_TQKNxVqx4%*@z4%Sp6;}_t@?JNQ%~hJpJ1$U6I0mtNiz9|3Ade%o z4=#Ai-~Rr}M0KRkO=%&;lf5*{(v*#WL7pd&XW<5;!b>Vm-f>f^+yW+YO_5Lexk@C> zdJYRB#g;ChM+ag`NwTx>9_${xP24V;Lo64uZRa;qkf3N@+?m;y2$*u%DT;sg3R z<$jWfn4@QMZiFV6sjvJrdJQ^{OnG|UBH&h9+^P8Wr6@Z(@T|(h#Enw*8_N^j=q-{Y z?E7aWqu`^=F(fF3`1Fkyddo&;r)flgE{>^yxjRmVs>;^+ExbR%^MX11YDb#EBZ2id zp^!fD+-`)zIpUKsL04hsX(H5$$|OZLQH)}GaU!tV%fxVEth(E*-C*9|d$r%LU1He@ z@PL^^I73O2WHD=}+z>c@wahKqpfx84VHO0uUaTm`g9gfrSu-(_n`xMv?kG-So}LnS*m1R&M%dn&aZ?eE-d0=hQ_QosIhPje!RBKBZvE^e?{UnGYHbAva z!^+cnZMvvsBO1917COubYWTY$qA0_{|CX3b2OoODsz&-(6+#qX}?O8e^R`>ywP& zcky>#e1rsNS>HGZ3evloOS@@6)V_vk=t5CxS2I^L4ykG|Mn;lfZb%-&Rb`TDW$p!B z>ss!fN+7iqKc-HY;god3xSSN3CL!j);H8$p8e!Si5=G)ayG@PgM6x{DZ_m*B|GA6B z&~a02Ll>#IHEB*}>jfzfft%;sIobr1gQ-Pne^GFXdGnuZRrT)8VXg|-2(>6)iId)(aLf zWRq2#nmp{c0YO-PNiwP3d+d?)b1GU=7vaMd)Jl|&a z4ax8!*eX{jFzH1P4n3xnajqs+$jjJu?zt#oummv(nXNjGK#p;7S~fAa#O+;pRjg7Y z*5-|{`X_3zJ4||v>!8}ZY||iU{XbUVXO_>$j~jT;UkRnV0{L#GVM2j^(P<4l19p+h zjFDAv<$l{Fv4Zo?VPLQ?3p5F!7DBxi+mIis*mc?-j<|TrU4RLC>(%s|X&d3yK0gYI=VJCc>W`+M;&=Mg9BTSJ) z?@W==;hzHT;QKq@`g^9@i8C3WaP}=@k?oG&T~mbj6xt64Ef*$IOgV-ee-M7=;A)@Q zcxK<8=>nuIk1xtB%`VqTD)oYH3!Z$Fnrn!3fMM2O6>gm(5%6ERJ z?x-H4w?gd3FYem=7QNeST-|$E%V{7{$1Q5uA2sw^#mCe`3KzI=#LgP^cvLd)C+|$1 znOo*H87(Rh-4mi~vAS~;&K$x6%STdQv}FpRqX2)suu}f6Z&}bV2ve9BVA%%K&z0|| zI1$yvFUB(<#q7IsP?YLkJ_aCVJp0}t=*)aOlJ1Gg@1dfAlX}QJYma4@<0E|{`oI4Z z`h!%0aO5~O^sdNwxP8;danZ`R=*MbzKotkOl8nPUr7!pAp1B*GB1310BQaB=B1n<8 zYZAn^MTI0aV^}k4^-0Y>wew%BEUSj-h-{CGLJ=Z;fgA3c*T$S(c&S`q3_o!I?N z1Q6g6%o80RzNu`ju?DeFF%8xD6B-EzFJJatZL|lp!B?CHFr@G@IG7S#P^QboyT}~+ z1ZWJyJC#AKquLKN{Ou89Ibc1Y6BYOF#a`ZK!^hN#5-}d}Od?N2fniL!ZRkgMzN$hO zu7<&Ue4c~4&8qFb^i&NCgVf@+!Qj}hM7UoYP~(QMxZiClo>U%Vi2eYR?OwT@K?N2W z-|<-T>K3;ss)+o}b4~d^&n;WS8BrI?wT!cppb}7m zm>8(0FRyR&t@`lx=Tq89khH z@6B07-CnsoX%3X!W=IOQMs3tJzx2Y$e`B5Yw_bnKOFDj8KjGuC+>#gAynQ0w-)Zg1 zCeMGYF`S6HaP$L*pXZTac79E&e5^ecX!V2aC|SK;sFAzraBvh%+_3J-?-uv@;+tCI z7R_mQe#D^Zo^3rE$hBIJ8x1dWe{Ndko^vJ zSExF#tXYe?*T+bc0<7onfnQ#>z zP@ZcZ)nI>ES>w6+b`L(SVmNW0r@HLe%{=dvDTk@sRn%~t{ZMA8xd#*2DXD(926t8& zTn?vZt7`xrR1uRR!a0By2(Q7oEKl7TM-HOOK^+Sz!h@~jVZp2V)(#oX9(v;#0s_xk z!pOcUGIuH)Xa75LEo!IT=$9F_h4`EkNv3<=+E@Vj`<)`=#V3_Aqv!>8d#?(E^cu95 zmHPgKWP}3F=~@e4s6zGuJI&qy6(y^Pg#Mup&X(R|iZoPR?H0dm1C2Vl1Qr8KIT=K( zjNL~wT*Zt4e9;$LoCO% zDzatB@-V6$V{_qCu@rG3#hq-i=GUMz+iyOV@ zOS0>rP?y11ukIQJrb6~c<&y!nGiH6E;jUSs(I*wh03BM3?4G&Zq*U#ifkf)z^B}4` zwmiqR3$k^75&UlU#d7Xi-OLe;LhN+zw;j$9p$r^tTuAff9cj1MVu*6`*>6%*AO^Lu zU-#LWLSEI!*f@6Nm&T$=of*v2a)jxeCe5xv%DqAGQ@H#sKiF?G={l5fl5;5y+vwVL z@DE(7*&h$MID{fTIq6?{H{r9=`=aOM=B!AMI_}FEN(bZs^*V2oDg;Wkn**0f8n;J| zTRtw4+2X3YPSVT0KiP7Fe6k)x?bB_s(rg6v(Uymq@R!DdmVIcZ_)dWs;0%4h_Hoe&ZH zaJ7d$MCbab)pJ%a$a(^W&AdmGZJ8#`x|`=?5G7BOV?p z)jJMvk`g}&-vvMJdGgFpo(fQ;x0aBc$GokFg&X!-C|mccN5Jm^It;IZG3a5pO>yE# ztiB4XRHUQSSG0Kw%Y;4yA2xu=tDXJU-}t8;>^Y@nTvg8MT!lVyOPzViBX*{p(u`}3 zUZgiBNM}XN z$c$_*u8eE$aBmiWp@bJa*LyRn9@+C|e0D4D*st8vy>=Zf(K1YUU#wN9V=sqc@%XT& zhgERM7Ro)tL3(!mHzbbA3f!oB`;URVu3Xk>WmYUNOgC5OqB2utB=9?@1-#JT1bmjb zY1nZhrAP=Ka1kreXyY&ezM|FeTHupB6(wyl;CYdIJ*4-1l!aRIKS z(_2U`sT*uisLzN7rYl85j}6(WXhA-SJ=yC*V16_k!z0b#>X*|9-a)*;Op(` z)s4bYm~gJYV=Odf5I4lN@?wSOR>X-O1Bz__9kao0Hh8}zAk6}N23;FV5pxB{Xs`8F zvP%!YuTHo04V?a#&aPgGgCTlH&(qqB&+pL0@*Jkf`o6xsRtcWg4t^tHvG8-{F{&U2p-go3YMAsW z%w)FkGxV>W6Eai|^Qh-nSSuN4NoDzjU3Yh6@`MtIVU zx3v6bk83{|m7&MJJt9tH1u3IUFwBhjA{=vLwA6enp`@l>?&(C}bmZobux_@@uW2~jHpTmHpo1>@(0<+48tG)&F{u zY;r96s0-`e;Zs!~Gm^XGT5jk+gD-XNCiLN5tkg}bLaRs1Z~2Xrt?KxevqePDGI8R( z|2qk{V5bU%tDg_%3N=z)>~6b|B*ZUo?G{~a=ZqR<4B;n{mo`{Mt;F6cC=0;~AMjH4#7rpZmWpm;AID6M}EL4%XG+15>?`OFVqX;LlJ!Hym z%$FP)V18cb#r)i#UNms6HfJr_S$IiVzs}{G{FP#kS=9`111we6sV!H#Z46Qt+IbFv z)dk#g=(s)3F7HmU9HDJzdMjnr^pmoRZ%yf>>e#D=v1LMj98)@VFw!v+_mu#ikFW!k zAd*6SRqI*Ke1oX$&%lKQIEcfc91Q<25!pK z`qZwZx*z)7>)Ew@5>qG%_US{EwO}IQhuS^Nn<7(@L!Y|S=`IgU>R{p+q?T|a_oX#N zzu<5z9>HR+fh1nT8u#p*4r69_vix)$e;;*`b(z2F)g)H^me*9jZ+ZOAKX>CT;BGiLMV*vN?qMp|aWa6pLIG!)hgnVWJiGFuNix*t*DTK$@68xqA&H8F4bWEeyu;uRs}zy{nd4Cj zyg#yx7N2^P3ijtp`@KR>8g_E-k3mZxmS~8gt+9Sn5cdvUQTZ|REZOAavB4b4r!qh< zzUG^oxv>&@1Pt-$*-ahtC<#aw?TMOv(qScCOK_}-CH4VD2%M2J|iyTS!LY@jnc+Zo3q(^0_!O&%>zaXJ$Nit86 z9jBDhit%`?YdpyOyhet)$JN7jXDPr~Udwsb->2|j9C@zhLZba62ceZq#ba)jNyUBkuz{DkH zh!HHBh(dmWpW+E%@|#xcx6kixriDw^U zGFv(}KE;ZA{Pd#i1!4+G^5Si`*Tt+`JI9iG1!3IqsaQN?ckZ$#Qv0Am8)|3S+(Zy? zaJB~lU%H{z#4C$xs3kWvh`ZA%=rcc0C!>=!A` zs~E142SGo+{H(B_^8P79*7C_$wA`Kb54KlbU`##EosrP5dF0ww}udD}`6n(>ffEo6Zf01zSL3k8~OiU1WJxqCs7H_*2)yK`q=DO-O|BZdgN1BS? zc$D;^E!m!aDIlzd?3|^xEGh8DXv*SVWwBG%4r5zdi~`F>I>y;9z49T(5PT2@v{`b2 zt)muESIQYHbdQ#pLwfo2D*}Q$Q3j*5&g(9e*K+aj1>PbcwbM&?cnKZpsV}DPj<=Fn zaRjaJ7?2~nE|*ccCySrDG0#}Q7AFxS;h?B51}lF@6ni>t>v&X-UfzgsCbDWX1^FYc z1)P9JPySy~EdQBqyl;)BBWqCvF5%bQ-MZLaMb4Qd&eG1ktYyK!nJAeo_fm6%#YeY` z+4>DBr%A_K9Y@m7;pTqjW^`Y|bKqHBKjoCCJs6Jh)gl=yS;DD)c@L7#j*fg8e$7TI zah7ix(|do7<@|&=pCdh*bj)^^cGQ-4e#^q94{A<2$;-#a_1;IePP|3wLs84RMUaD` znv;LgdnuXET1!N+)^6+*VvBr~+#A|z`lvGPL2rc3A_ILtj5nOnVs&i%mC)MFz#5Py zVfLJa@jp>hAU>UWKg9%BWiI$7<4=S9-a1`e5J8^)ut}{wH}s zbkO8qnmEX{PTm&$2@S5Q>x(KEww=LBp4s1-G%LN%O52mav*dJXh=(ZPaL>L$^q?%n z8q%8pA$Mi)ox3|H}=R8K_T_D<*an9^=3>K6L>oV{V4 zM+Rp=~z(1dNIz{Dh^Wq6qrMHkgGBcjI}&FK^=pw zfYkbaj`dfyZTmiPwsW6eCH=T^jG=nzTzlhFGWAlZkb+?Eym2^F@BftcA7zHV|qeQyiK0;t(=O3-|4y_Ht zQ_G_L8GN$P$5Y_l|y~Y_Lg%gW;tJ{&~sU@6TU7h{CY%q9YfQYfeR>pSz z$_bo)uJWWzoY`-Fe-gog=ft&gJ2BuBtBC(D&grwoSq3gnvz#~(x4Zt`A?bWuGR93; z<#2fN6ykj-(CTq=<@LgO-D&*7?}1snuMbiLp;wImCMy0a58d4PXYT7uarJ#GAu%-D zt8P0n;!7bkOIa*WULZy%k;sz~TBhKs%0=4KbH2TiT0E5R^*36=q%lO=!Q@5U&Z~*X zdd5DnSJR9A!Bb%9Iv});!kSGB^y<_;TUN}jcbTN6i7mWz@o!EFN30{>iQE8r+57Cx zGiyicP@Arcv*d??PMm|5{+r+`J9G=SCQ7#@d}orr(9X!CY-zqSrM_C$yyZ!$aGCnn zx7c2+jGnRv;E9~RK)5Xfa05+`)-&IWqAFfNDAKEa@~OH)?pgso^73f;u}XAolz2j{ z4u?ZMW`s$BEDXJ@9d-Sg;~?7nyoaq-ug5ykAgGYch}r1Ld?5Vty45|#CwGgul4=7I z-gMTynw8|j91_d8)&_`ziX(3RxEXdnJd<>(S7iwoJ2)H(3$R(^D{bkhDZrRvYpb}&G)!q1LX7;Y56h?wwV;s4dKVp8SIOQPy*T9kM z#;&gQ)Bv9`!OWhlxVm+fc>OZ3B^YrOh&v)%=RwSHGCLEMxX#AvL~K ztZGddf3t7-zCJRX#_}h7$Raa&*qJEuy~MxOE!etDMOLdU4mElwP$$G_Q0utEGWB6A zG3T_da>vCpMG;K!PEjUyg7QVEsigt5uz?;ZJZ~-3s`` z7$=oZJFBsmC0p>OT0z=O_Pq`A4MCRN8(jV?swIV@zivP#Q;>WhIRfz@fp;!M&$h z0d3^K@A>FO@A@lzlL7^=W(R(SxnC*1knup+{_K<-$@igK`|+b`Hng%Pb>#oIG`y(a zh3VW#&we#4#`ju@A<{YA0?r<3rW(1km|rtQsvzc9?8_8oGd-^HUZ$VZ`n94fyDqSq z|A*3ZT1OgfTpLnks@F&?Gnv==@h!iC(i3SFXoW5Q5{yrHsJMBy5U#g0;VpI&56)JB zgX{*_h6ty-5-rD41f{}izYVQ;bgJBzTt?4c^JY~rV9p6}0e|= zg{htKD)7`UH^HkG;?{iW%{KWVPhhW6(za+U<|MA1CfDIw1#lT!0JESojE2i=#Rxo= zCe}&x8?hz)WmjUK7^>Y!dDU}*cU5V{k(Y29T=e)6m1hU};vC(ttKDqR$5%xD_d~($ zP*;^Vl2uu(;OeIIcH;MZa-zSe=bxw3U)p607dn4-PClO&_UE6XQ%v!-%nX^hcO(0& zbmHubL*;=U4#EgQM-SyuBi1#S^Y3>!qqVz6Bd2LguX0H`%s{ngpj~U@QkNEZP1ZX- zFgmx8uUzqFci_yfl8Pr>7MLN|GHt26=SLcTfL|VT0X=_n68+1o;!T830bEEET-9}d3&uR;Z|uwE}dqGnA@W%Gi_b@ z0pSW!HDelP*e9zG{SEuC4xPXE*#QI=rtT}}zQEAWUDx!}`Z0N4RO3{d=R~!UYs&6m$Pgvy zP&C6{n*J0*$-8z%9ytfZHWdsCHp3Ef_PGUKkL5 z%)BWu@bm-rN!9(z;=*Q5S+j`uKbwvNLamiMuR_p0=i^CA!`# z@=N5?=Azse>MB#IKB>E69d%|jtb8w;wBc%oA`1lsJeNRN$A&~r=64i?cbF`mM964C89z? zNR~;;lBKj*l6@?@y04QdcIlmHE>UmLMy65MHf_8sq&439!_&$Y#rgH*f7<}+ z&p0>OIsh$TjHi^pV`VwiNteUm#x^0yjXuac%Rmy@R2SfpPZ58Lej;CW;5^X!C z&F~H*TI{vCdoKXJ%w6S!U${Ou?$DCc@YEE?4s3wSamWr>4he8Gg6@_3dFfL4W8RR~ zC%ee$;|WP3f!NT52PtYYS9x{qhuaQ>9Au#r%nt10{gD!6vprIC>{q8u>_I{G09zqa zZFI?}r@N^6|2K1Wc7vO%2(@X(r_75sqnbdLy)>0C@82K3%vfj4#3w6Or%F#2DZL}$ zD_`D0)I|i?T(%yikN!+Fgx)J zoYZk#*`BABG4|&bv%6w{5B~NH*xn!2N)brVy$G@eGT3@#I(Z5fLVwaWx=t`SxRdzy zg{sh^`otGE#lc$bAzAuFJ8t-`3F88l#6tLjXtR+LtqzW zeo@SRQMORxdrg7eq34n8JbJxH-2ZNh|9X7(yg*XI@}M0l_|`p>ax>hESFpNlT9y10 zl(XgC4@Y-@ypSd8I%By-l4c4Yf4j8BsYe?mcXSl!lWWJyT(Q5D{IeP3?XDd?FUxD3 zG{fQ}aiEnk?kCV?JV6!q?}d&(nnf!F3iX#T0ur(HB444yO#K|R=X$g)@B#D-04P#2WvBR>r zV)HEZgPNxFziIBZOaIqI#4%CXKo1wa>bP5fBz<2AHzsA5JI;;48w09EmM~>fMarGi zl7PxyJt`QS(B5-}Lh&b!W9?K1iuK=J`{}(o3T&8Zseh<-;!`m|lb!!N`lL%{Z?_MY z+b{fR;x+k0uHTOmP@S%Q3@DYern%an1uVe&j8Eh876|OK`>ciKI zH(k56L->Id*Sz}lEp*f!G8q__>&DRlpPPFIpH?{@ur=1iR%Z?t(EiWKK&rxcMaTDM zCiOw)+Pr71OoP$- zBsp3OQ|e@7T_fgqMfIa?UXU3+WZjdlq%$##05I|1ch zHKdIv`JX@@`}_;|Rb!3keT7Zv+OZRs?f-gTtSAufzI5UsBQT!bDxOTZc4n1cntYUM zDqH5>JMWvp28cA>`wY)+R~P-Afa~D};1dM&WUZN6>9hHsCi%{gw*7o2Nm@|G_ORY= zAmD!u(^z@sMFt!Ufd=vO>I6>zFLqrThOw}for2$_)9@X$^@C7Go6=1GHJSY{FmHm1 zyxeLnUnI$k;Vd~BdQZu|!FKPb03Q{|JP|5XOt(X&&L&&f>oL0DDH(YsczEwew}PMV zZ%pANRl`<&q2<78r4Km*k!!m`CogP*MryCDigNus_GwlZiSGS`Hrc*TtH)XXe%PDh z&n+*Vj3gv8YYVTf+W-A=%hiH1+mnrdjq1!| zNcbrV4`IJN!N3pE-kVkn&Y{=lkdChYHDmB~+wCFy0i!u>H3)0$k3rS6l{B6kp z`ech*4{kjsYN~zy`V;M?mZ(MB){J7SEz&C(_m_RpT6Hj1&ZXz#s=xE-f%;g*4a?2t zDuO7&sm0y7L?K2uL}X9eZKomY1Gjx0p0(fzTjzd1@C_DrWnyjsXc@)f8u;_ynD!3o z4wIZ8PusDK{;qpZ%}PdA=4)ii_nU^!0m7>>vwP>74Mpi@$R@ia7f@Pm?^#`Brsz)9 z4cqMM=<+Mr-aPpidy)cG_U#v>RO{~Yu}0k=tGxph$j&b?Tp$akap}^R`Il!^R|L9D zMP2^mJ$`#J2=7+!+dg=z53&oIL;c}=NH+;vQ|sI!&>Cm=a$m6x-!{w6lk5Ujho&V; zo^Jg5`HIpMu*!+U8-QzCO7{xG!qbrBnK9a#Y3|eOMAE6fupzMUhF?q2sAWSGd}+O< z9cwZ6#s~86)MhRL1z44igF4s*gW2S6+TTdq7WTTAxlbGcRk|Bj->|2CeSaD7fB+{m z01vS7ibQ$62c$qxI7MVlo{X~K_u3L(sbu>%>Ev>pRy9$ZkY1k*YSrBqmZx&Z_x%>i zuz~H`T32lCud0>FpD*~W_AbM@LIbDQH<5oas+nf^={I41f6g~Ljlz`D*-g%G^k%7t zHC^4)*0)p_`sam!{x-(Hl+Y&E>H{PE1qd~gJS+86Y!Rlpx9V@#Ae@%3_1gvf^%;gL zClCWDMT&g@cImJw+Sh#c9TI)(RKu3%Av<8|3=Y5=5?}$=Z$ZszqB-t|OMiGauWmEy zK&mZ{ojpgc=hZCRSi%_57&A_a?^B?{?2G=Hm&I&b$CCiFV| z;KP#v0uZS}WZ66B`{pR-5fS@(d$QtbZ}uyNmVNsCrB6TwlXt7jjmEO|lWK}G1DZwr z^#_9P3uib7t>nd@bN8nR+osLy|LZ(T;hxhw|N6iDeTxMaV*I(~zwZ67FAs!(Wc`1; z_BGIz=+d5U{_A0X|MGI5R{!PEuRkzV?C)>#_pAQgkkeythX3{H7iacJ_vhsQ`0}uI z&n*7e%l~)c|3R^RKK?Jq|5;}KSJeNDn*Xnk|FgjSe`wrZ&x2??F;LP{8BdGTUxwB5 z(>`4I4>Nn@&}AZwwq-i}y(s#-$)j^?l;VkLH0Sxh40evN!TxT8f49FEe<)y~!Y~p> zsn}L;>41onNXht^lm(39`QVSCkCymHv!8SQd2=9*6d(h>)dgH9c;5nMM5Pd5>)|@T zuUrTR!v8-v`5(_^+5pB0TU$j*b%fXnX&G9GBG<%sur$hkac0j7_8q_Qk8X^UQE=E5 zR;Ag3QY)@;W&h)hqK*H{0RPN5r{VnH??-tO0vd(wQFcyX{Q}@Q)g}L&^1$jp#vOQa z7Y!qJfKbVC$Y(XzJit5nT;w3=7u`O2bJRaK9*NJ&*t!1fvM1Ar;HWQx*B&p0UQ1uS zzt1L4jlYj?K{Lx0Nin9%Upn0Ot(~AiNW250JlLmROzR#2gnEGt%=YQK>p=J>iG{A~ zX|!~@B6otTcl4trhMpN|m^PX}K8s$ay@S3xpiaYqu&p2(Io%mnihOpb-@W^(6f<^2 z#Fa+5jwA$#({`Q97JVF2B}VyLtO_ouWW1BaV8=yVDN7UA6`RO0pr!aF*QjJG#Ug}v zcia+TF#!a_XGwLk&0R=`2mJzsu!>ClxOq)VMP>?qc1b=eA>S3O{A(Ma+a6#45blpm zQnWU@6p)GNmN&<)7c;F*HuSFnAw@fgx*BtccKq$$HEX>up4<9R`z?&xCLUCfY?s4e z&%k6;Di!rm#@%yBV^r=lDVlXH3@j zLq5nZXcG@D$G82!DYRXvYnl+;bETL9JEMA z(kO)eGjr6l=?=EV8sOKll5w?PJ+@lk1R&$Fu&ej!IT@kfgvPvF5|EW%NGkG&rOKCF#7NT35nXaJ3K?_IN#I&*8b@S~-W-9dhK63Cp@OSvt7jxr48}-e> z_{4;*Y;^LVxqs9?w)E8U6a*SHIFK8|);~Mkb?KX{mwA7kJ$1**WMiDPHZG2CO?X9c zaM$PZT`obMgR+rtQ#bUtfdJt_|iLX{ICnj6)X z&mOORk9;hn=aD3tFx!!=qu;H|gDfvqPPj;*%(UD0h#$!ulM3Ksdo! zwX8gI!~C)MNBy+Vo^~7KgOFwsi=~7k714cpQe&r{B^fYhcl3PLvFrc#u-I$qkx7Yt zsYEx=#l}+#3CDP5-x)>5meG<({t~;pTS(fQ6qWp?crn|io>%eHBNjD&rFq6hce5qu zMFWzWZ0=N1T0XB04sET`$T;2|Jc$lc?*`@qT~a?qOqkb6{>43zXEM!iWPheHAuXxH z!6L9NljFBoG3^IJ{}b*}&i@Vyc59!#7aSHNpjofWKqTjjpXKE}LYim5sJ<$%ce)g? zeWzPHnQxp{+sZbe_4G`_6JMO$W27##+#J2d}}EyqDk1c7$ndo$C+s z+iL49!w$-Q{!N*+{W$xN^6eQ4!1882w|&kqrP{Y!grEdm5lsb{5B{qOv+mwSqQpV?{pT}w^; zxebhhx0vmaLO!^@h^0}>5VR^RMW47lCdzStxNl-tX7#ht6&I3fL=g>d0gKHy6<}(A zm=LYA+p%j_b#DpyPg+mKjWG~q{tiS`9mwHUlpa?ErZd#7WUMq@sjC=NSnf`KS{#K%zdi15OY-gC!+(aFy`d5`Rt` z>{#P&@2pLu*TG60u>sL*gIsGcctH{jym$%L6+Y%WdUuBu{csI_ zhvt4AqHWMO+3(HJ=BPv91RDNuJs0~PhQ~z86A3VNdw3piUwu(jfL$YTX)O&))}Zd7 zVcDwUR>}+^#JSqG9=V)e`(aC88yfXNi)0gVHW?N_Ce@X6G`CV`>s*xWM371G(_Yao zcKSl*Ood7A9%+ikK%W1V@UIz>V-=>VY%&>H%NEq?`z@cMCqo3{+tL)ChRQa9mw;FP zYK?>;L)lR_9%|n1v5R;(?L4L|w9e!{PmN`5kQ70>n%;jG@piqKi6mx0H)nO$$};Nl z%!mul!0u^ZsCP4uBiDZouO%bN=QbHAi@RUQ{>|^^{mh)&9Hat1SstS1WGZLCXYgBd z9asi|q0?l$Ng1=?pzfcb+73R+(WFD7@v|)}-L^ZX^zW0pX2;TTtz0mwKhsq}<&a+p zK-ZmQMU3~8B)Eu=wpAW-F*y<6*lE@OS*QEiOZ0)ra{-9=`7zkEgyF&BgKG<#0vB!` z5t6t4b;#|dupCf2d9SJV!3=6IJT8)jM5~fMIuDP^gwN~qEVaDod~zzLWn~~FQus3F z60fKfZNX@lauwrncK67Gtq~_ByBqW~iAUpws5HtTe`>MB6PSUNNv&6Y^?J7w&+yNw z8)lu2Vwq5hG6s%NXEylq&%9IAynm{dN+EF$waO7)$I|osByudSZ}|gGc$s>r&KFp~ zq#xiNo0s71XJ-NQ5x?uRM1awP^TviQ=iE1oMW7W+BiC$4I5>+_U?(*~LyPPEh3 zGc8(k(cZ@d^BsGhc0qTB0{G_=SD8JJPrJfMS$U6pm469yhy)|IuW0Vl6JXwhaYC4~ zi&`kO;Xb`{7XQKJW7)KbiO|yAfFG#(ZTg+{4_#}`ToyC0=kXxP1oXXGmp9m`C&Ea) zzxv%W!As(q;Gtx>gh5#Cs4xs9c>UsVW)zYPSSk2xJ+f?d39>qnUURdiymB zpSd<$%r25ENYGCcuku>D$0*5BNxEG-J|=YD|FRdP66!smxJ$H9X%Vdvl~9KD z6SvBziPFq}0-FrK7{xu^nR&~#@$2=^s z6A^&pHt|;?QqSDy;o9A8BtU{~{jo5RFtB9-$hRK@egL7Kqo?H`GyqU_Ij)Y_x;+Q;17O9ZOEsNos9R6gOMcvBiBaSaU&|dL+uxe24D( z+7ZlELObi8KNd705R02j!+@p?mn4oRtpJV7XJ?mI?{DPD9pIQ0*i29>f4xTiacqs; zHy@vnAe?KMA7APOAcQzp06XM#z);el?^7cvX!6LS)fMzs>b_yvH1H;BknKt)jcjUyATGbx zRpF_^@8k^+-k1!4S`3$zI2gw2qPB@E>MZd)c~3K&YH@dehY+d%An{?mA|y1dHjWlh zq4`4(zK^OH#;d&H&H$1?M1AARH8P@Zw=+~kvyr_>7jhCNHK!@0=Ya;;htd2^~@nd~`{?B2HpyMtN7y z$1di(C)lpfb*;6DWgdq0(F|+5k61m7FKdZTKBtpZUNIyJ2Mim4nEd@7@RF${LINlv`N0DF;ZUjl?xpJ=Q03ttl8~Y zyD>J5T@+yB6=AmT?vp-{!A!a?U#2kbqn?4es-e2z8E}^k1#=m&nJ>081gbtO^Gd@lm6d4@>Nug^c$CN-T6WUSj-lY@Us;B8P}NEi-U8OTr`p zI13hd$6BgEu!r$d$xizH@6&l3>&%OtCvOZ_ zK)y8ixB9FDWzdDwb==GYG;bs+k?gk!RvUZ0R;4H9@SHW}qg2v3$D38icbA{mV1c`j zpwTOvBmyB}5H-lM;ESVg1G=~rB-60t0p&*SW0=N4*vd%Dxh-mstF}kNkbBUQ2!f-m z35Xl~6_(|>S+c!*>r1xeUytY-+jgO;mSt+cn_O4Mo#W#kw2PIHa{p!;yQoee6hfPs@=QwTodye*4BRpO zxkhP^aJ!1d4ru-vaV^oaXV3PWJJNRZ&tZG_7haGDD+teNtrxnYLr+5{^ z2TC9{8HB6LB~5C#KwR|(vu#ubLs*hTWAquNPH8s*zK8%2IUUkJI{JJH>^eepXJ$gY ze7kB2$E+F-uKIKuZ1l{EKCXMtMcPUirl!R>obwmPLZYu7ZoBx?maQSm@1PKf%;@Us zx`;($H-QBZD_Iooyj9s0V}rQpJm0dR5m4}_GvIm#H0IN5E$r!0wc=AVqSX%gvV7IY zv}2U(2$BdtDmLtz zKLl)DzTke7dObAto4@m=)XB?!LCgKRW}c$!ebDPqK5 zf>>|z&4r_HEkjM8w>`v3X$?3V+E3@-3=RtFl!r!DteHBXuA^eg-un$IAp%Atm60_$ zIXO=RU{2jJXXc(%Igz1V6+Nk?@4g@g1(}D5vB|m0!M0*C%^#LcO=%p+7wENFMn=ZV zkiLE})@M;tQt$jYDHR;)J+ZRDs&)JJH8G3nvptYtMJ=yl#gh?>=&BYK$4%!vxvR&FLf zd}v}Uuu=#XI1_ApZikxk#GgusRVcco+%VdcQbv}%aAE$T`^&pjHe-3Q+U4oQDG}kDF#b?XpRciqY%FUL_qdejYDW>N} z=oo;U*+=Znrs#KbdOO`W=$tV_#u1%*@S$Dgv1W1kw+g78P*Ph0>gP#H)Y8u6`BZt% zDzD`$Wi1;lDn5>2m7uBXBu8_cn3e`P{e?I`FCCo(J-2eqi_CSotL8t5r>{6e8wWsm z`x)?XFMr?!pBx{bo&UI;X73j(^zzu;=CWh6lxlaOWxe56fDe6}{sU*V(xsgeI6kgs zf}!MItO~o>*#Ote3~kX#@aA7cRaaD)s?ExDd!lIHX ztC@XhN&^51f}u@6{mBipx^)B&t~pSBi;Ruq=GB zj^1&~SBexa*ugpd;9iKT+)?pm9Syck|G1Ek9?rmlZzWIEy`ZyCTb>I?8{NF~EK?XQ zXfh&UO2d69xJ6x5oI^MdI7yzR*Z_ol%RDzdO;jZ`i8*CkQbJknIc3n8^O%_F?bXvF zH>2w`m%t{=HdDmvTducnA3g>4t3!;Lar0Eul_D^T{??eJB#*-c|DOW|`HN~Yr;MgX zClNAlbL9h~YkY5wWiWRIexpc_fgd6i3Z={X$j~x@l09mb-;dS(W36w}zoat8o9Jic zsq2h-^yyP`!LXXI84PXK4UO(It#SI;p}FaM-w= zs5cMr6VR8Y(Lfo@^DGox#T=g+`^F(Qk4xyn^0d~}_k$cFN+!MsHkpA}0NmK(&nOv2 ztd(w!WFU{ucT~rvqzrw83#eLf5so9)8RO6BynBkLjXr4pF=$II47x zkw`8kV%R0u-94?WNJjaT-`p1OnEfj9ORiaNNf88<#HV@|%ng0Z%{|V= z_4-_GjQ0~+YZTvNS9jgSLOn!Q1Rp6{KXCA=*U`Yy?NdZFWzE@ zfar2x3*o?>n4yXZX$(ZgEYA7xi4PwtSr}9AUN2KC7S0)6%UjxUGPvw2JvB8&ZxNe! z;q<18`O8H^+b%Lo5d?W-v{<0fVR9OQ1MGU#c?=oY9m~m}-16(uQyaD~Ppe|B`VlTc zwwxTAHp6Tn%jVad#wGTw0|jQ7k5hdBf8b z>2TkBsspTgzGzG|l{8Vy?KW29bhO2#yf@2?&@QXhGL1vS)sI$Pc9;%`2KyO$PjO^w z4 zycWl*xU8(K?EB#}GwN4oz^<5e!Qrf5zx0FY`-q_d-hktOu;hm#lHJ~PQu&LdrS1Du zmgnYX+CDxR+N#Z`>;UlWy&Xyiu{83{W3(gu?at8;NZP5Ynt(#zb^F7I4_l(5&oCZ4 zreSOQ36B>%^_ABgQM%3&FZa>N<7Xuy{sH1h(jmN2 zK}cq1=FwpHyc)^Cs5ASxz{o=EtcMHhoFGrx1xc31AW62N-DQ)2@=0}F3#CJu7=BW# ztEx=IYiHa-Z*KfKk*0d@j`L6}Of6<)g+9B$cMSYl8ZF2Z6D)*3I~dq_W5A{*RCA2< z%Ru_3JiU_{NOrrVvbeZsiF?nwd=mZ-auxyowaIhO7{hbT>^d`!WP0Lt-6WKiM;FB< zCA;z(8WhTWbERtI%uP*0gEsFBj}TpBBT?pQm5j59T5++cTe`NNe5b2Rt_RIFz4GY0 zL@UduLC%PXUvDJyg2C?qxWK!WW&b^&7FSkb9-L}i>TymUJYw76Z(v|x&nevv4h*bK zrQ^2&5Y5F=Lpumte9^afu5$Fx8G>6*ST4dn`dhMZXPiT9=epCAK92Cm_+UDQV}j!1 z%+1Zs_SH3CdwTAzpI*WLaz?h?`h=s0#75$n3cjbrf2Uhl@UGtc1W}1u<+ZooY>8S~ zHw|j5B|dCXZLD47D|>jnLV%ivyO5Er;F|H$9Zx7NbuVBG0M$?F@fNyJ7`AgLt5wo2 z*yUrOjvYl9!q(Q-a%(x?cyZlI>X6f&X^fog#Pqd{@NmX;SbwrHR+$E)gGwts9S{Jz;! z!P}#AzYJ^_nQj3`8Kh78$&j>`7ukpZ+uRURgBD8mW$tQs<13@!TPiO8{64Sv0RA{= zcS0`*Edd5~m+b7GiW{HyRRn=fGmiz?##p;M-wV5b0tt(d8MOSa()#_W%&}P<;8dKP zB~bchg2Rp5ffj{|VxF8L;*!G=sYJoy>{sDk?H;^bTOI2bj~|~LAn7$<$=u?*VT`s4 zA{ciXgV^xU0KF$~#g$u{&x8;LaLyQ&V9uhjYw0*&^Qru^JUvsdE!5Nw8W&9}>6&d_ zRCULoYqlL5fnD3}N@V5P!QA)AbiNMEI19^h&(^kJyKvkanoB$W+GlFdQq59s3%=rzw*hp}$jo9!F z^F{+1@I|lXAuGchlkJzFvS}Dk7M=c&?o|{-d>)lG-ato3`t>QSJ;wfOdu?XeVxGjh z1`OqH>gaAeoq+HH_kR=|ZdHn{ZvT>oQAbSt(kDrZnRKfaf4m?I#;|CvzInAWR#>0s z)hH-{>tT9XA#g|Kkykm^D_*U5w5zJ?oBLuWMnLi2^PrbyBMA4dpQA5Z(H*kl_-FE( zVs{4K|9l#)9Aqn|e#nJ+V@r;xrbbF1sPbLhd*i~x`Bp+H>syFI{HHg6|* zRZNWI1qS>5NZm4OqtFdS_2o-f&u|z{%Ca86m{P_vw>_9|$^e9Z>+Y1@%{ia)JE3)o z$z;W^?0Z;L3w=RLw5GvgXr}|=Su_VNiV1Fpd7~hUDo=&~u$E5Kg1*hCD@WO#zkP({ z)t@pLcam?!^T4m0M<0>Li=*1U;p6b9d9e=&a6ukW(wViZp>Z(H`PZzX&nBQOn@?Ct z0|?5z2TOcuB~_&*K&jw#68D*D#MsmPs~MM%_9K=s3z^Ys#Yu|-V>pM6j_{!w2g4Bj z6~?;3I_$h$`N1RjL!&`_>A0zFMSjRLm3Bi{`TD5n=`ViYL!yenuu8x?L-ViYv7kD>vvZwrJjCZflDl-arjejfP}kg_x2=-$_N`-(F4=rT z*~ksk(xOo!!-X~)-Q#2~!89YyXW1$x63neShvp-pS??^y;7(-Fc4R}BuJLk_EXyNv zpPo8-G6lmf8{Qnt>pD1!*foAk3REx>G0MZUMf^Z#jrHew9m)YCZJOJ*_)XNW~QAPs7D~N|4!D zY1cxLzl@BG(r9~>e;v4M! zM+<(M4}Ng_>9KB)2#_nRWr<%kK7IOiY{{Y;#_NmYoI=j4@J53{1Q<0MWHbNbiSec} z3VbK=e(WOP5g$mAyu-laVlmf;U-)1=EB*L%RvP2_V=~8G8ph7Mx=!56W~RCGC}06q!XktdYa?l+6GU8WRGNs_^4>^>6#Zg_WJ8 z4rh>NiR9Ky9in?tji>TxcBvefn%Wko!IEIZ>^^)$PX?(^vCo-`(_c zzuji9+0={-*zxlL78&?5^mM^k@@REF)}(;}Cccd`VGL4R9;G`1qqH5zCazOy)S1Z%CQeQ&ObW`g3K8!fetq`sL22Zb(863vS-M8*B12Y)~d2 zE_{3*M6N9{GV&|QA4~nNlpwEsr^qLx_*?Fl)-?Kzwj z-e-t!edrdXto>0?3hZh961rb1k$fcapfq(>-Ow<}psmk4u3|;d_}Ch0WZ!992zkX7=e1e2RN9ozBpf-KF8B5xOIM z%iMHI;5mOZ_BF6OnvyH_*28ER1H)Wx1e9cRUsUkig*Nhw3vh!}n_ts{JX^g#?uJGF z;5onc*epMHm&nPNLa8>r-g!YwBR_xky!lvK-56c#Dr=KH!GK?_xEaXskd2cBg7PjX zjErpMT@(ul31$Yl!C37(Z^Y%oNaVNpm!5K3M>9sM6i=^vpcFkA<4bGIyIOUyVSTCI zm0LeQR?>gwsBOr$>nH>&^*5``=?eJ!hBxm3!K8h6znxw95QrxebI2<1xYC@Z>m%ih zD&t;C<^nMRZR?kPKaz+7+p3`^Ro-4Zsxf3;5HAV2?N@K77g!)4m!BfrK-^*rN-NsMF~m!NMRI5F1% zESJr#JsGj76~PxVoA>;;5I&)l6tU&!cb*^3c2r?ZYPsW0lxen?;m2jv7>Xh4;1Mys!0xhq2Y2HkU zkgRrdL(q=nq{>HKiCO(V*2XA`ubGGJcxvgfqdYwJnPeT!&=mS{sg^D+6Y|*%J`&w2 zJztJNAAXcwj8#u1>)hFUKx+8(iMdIHrl$44md$Ese{y9^Gu|)~^SulsXriabLoA>$ zm6YGG@B++6Bg8_+ouA}2pLm9w^axGL}vxLEcv+4?G{U(#Oc(%MvF}Vhc%bZP6&{1lx zp-tCYIeWYOnu8o*oAM)HBa*BL<2Bg=Bx$!x+;_FK!ZlXjz6#Hve4BQPRfUO6o$8W< zA5=F2-pACmwPn0#sOpFzs-6rLeYR~gg-`^`uX70qyksY|MMEB3-rV4O8M4?Y6r;Ex zp{1cLYEGwr=Z@w|Z5N~ILU6%9wNKyOpybZAHIPX4N@Ar=wPs;H2?+Z?fBo7<))bMKKSkOiC~#Tj z4K@TG?>vgC1N&ILn)S8P-*dEk$1Vf>=jFbW0s=8-l)UQDXmm(<_xD4G4zV9UK6$ET zO4Ud5qL>&10~`OI13IeaeoIn7Kw#Z{x+N+*z|vBXxP%ESFTYx{rebt(T9ep@GhS@m zRX5Cqg?{n@N1-7sU%!4GGly9F7mPM@ej5r!p?z0ad`*1xBEUd?EbiSq@baa)qBWnP z4APvV$QsZNXomi zj_U)(1jEIV8QhH1k3BMcB=~5Rp7@ESC9vuv3!_mfxP=F-dag~8xM+0KR&WB%odNM( z9{v9GcsAVsw7@m`mh^?Ek=V-uN@h53s`Fh-LxA$O6Xk-zwqE4sk}RPk5dQ57NvLiA zt!+vP$8qS&`-<{9!4otHOmryP>htI)IiH@Va>Wq>bz4g6=-~wi$_ogJ`HbGM(`q=y z_!0pr$m3q%BlL>Pvt1@&HQn@lAs_@rDpHjzA5Kacu6j;1zssneOJ_jL4l+idl)248 ze_`J^<|xm1p@UvBGBUC8@g9cF6UT!sGGMljW;)ifuMP^+zueYPY`iUD&Smo>%*L`j zH#<9M08u~)kT;0aM_9^;3pON+S<|S&;Y@G0s9Aii1JDi@DX=O`D z({^?5_BJ&VC}5ESRQPb6QM92^>ns`GVYVqe+iMRnWEUET4M z20{{Zpz=DOc2HF{BQ4D-Iv9d>g7b02B(66N#;Aq$mW6Jld<-@04Yirzb5~YA6^&Na z$WR(I({1(7)u?nkv0O@6-K#DC3~O7lp!nCv9j#dKQ1^bMEIr zBv8BBh0809@qE_?fsVGU$M66fI6HxR$2=vi{iDN{hC;y=0;Hik84~+>Jhd;lyJ_Z6 zTUrqVrP2r+cO*1&8uY00Xd>2l>9O2`_D%3mA6V=4ba8Ylq@kjSDwrT1;J$p7VIa_$ zj}C=iaHdKQ5c%lf9jm(+E$9MmpT|Jz157CpWpXPjeXh+tTISmb?n4>mWXVtT;em1i zlG9?_TQOpn`>L@I-@s${@sUFTnw|0kMF3p4RW@4XL)Tq$g|xXz7(r&35+B>eiEDct z(IDN<5E2p+jRYTgRkh0_xW^C*A&4#Bz{!d8Q0*>nbv2bkVbq(8`h@M4uUQDFMvj-^ zj$~GM#36^EC8jmvJ$}m6oiIjo!i41(_#DgqVBK}(MLkhx^*Uqxaa?}3wUp|$ zM#16KtAXV6uZdBpQV{8+j(S6vh2q*FHX20aPeuX9KqrqYu57^hN`?<+=S?B2n0ALq z%Wd*CR<5{VV$&JuuXME7#z@S?{bTIzdl6Cli*NKh~eaN0alSKb*o@!+qkH zOFDG?t=MveU00%msneq2W{4av1Fmkg^p5CG-ah`oHK5xouR!rp^{LWQ&unzJTlB)5 zZ|BdDxFOz?wuU!te&v0+nvv0+7~Xmv1>vGAWZ{5()-CwedY8w@Waf6fM1e~iYMZaj z%58wDYfp$;>ttqvY!w1zPGw?=%%<#Z54py|z6L6Fov0*7z-4!uOTT_S9;ai`e9k<1 zf=wzg!d;45A0Pno<#)24>au9_^i;DJyk=NRM=-ZiN)kFDGCf!y7kW|xN>*6eg%K>f z5rQR6%CsAIX%&Cs^0*hFgJ>Y_`+oG4my8_bieqr2+eQjXF8`bHm6zQbT~A_`G{-~V zywoXl#j2KA>spWx;&vJoFWyNXbdR#cln7sMrta@do;>#UTk!T;rH)y^Sa?jbYg5o5ym*uFEmJwBwCHH?C9M`4r2)PdKat4leZo_0@+$=&qh(e`lkpETl%r?MO}M`?UM<){I?4tGo9OEn!mm($nl*^ zsJ&H}t!M8UXpXLqrRKyBZeA2W5i3Vo#GQBRI0f;Srx&>tqBvDLUR|wR@|qnUygXJ_ zRWy1@neh&awG5cBQZMUJWY zqvnE7z}YVzz6F|O_BUuY~Z|%sDBiY+P znX6)lO-bsPTW3v;rPWgKRslRG=d#cRf7+JmNDNczp~TnUcyzbcwFb4DC`?ox5gbZ=YQ;r(g`nxF6#VEn#T#GkyG3 zSNzR>buRqz)llibh=Ld#VZJB%`ofprm$Dz);LfVEml`Oh;m1sWO}I*-IRhOr4N zC=4#9i4U*BSIS3Wy1J36yr+P@!Ax)8eti_ZI>o!?KpU9~^j%~b_zaYLYHe(6^enWs zwe1C1nEepEJ^DvlA6Zj=o|ui`V14){1Hj??2hWHPC47)^C+tmi?a7mcEXN_p>kT2M zXMznb$by>d6?-SD>{;bq$0LD_b<)^*;9pDzyDnWx7dEHO3iN2)x@DDyy?3*7r@BM& z^;_99yn_W`q-siHqTG{UFFskg-L)f)-JiHMrZ>(0dK@{_Ha1f2OlbOtQ!0d_i^Ec@ zE9DQhbafZqC-A?1T5s1vKIblBSBPwCqCXy*XV7KFpe3{h25<(0UJtcKYFG$Z3U--t zbwJ9Uly;xpZ6&tidf!;SVg;M|;cw#64M7Uj$sd^ylcyFO9O}f}_|13NFwSk~5Onu~!8hdD@`szX&F^yRvLp)@-h>!s!<0BtaBG%3Bx^W)_>DHTilSc-Yy>y38 zJI-}DdQaVCwKj^X^IXC_yAKKws14h)nrsAi3m?x9d>u9yrbOSM|DC_d8|mTL=yw3p za8Gw4YWjN_Z@ls$TSK52@R#-RI1PtO>$W`H+zE@Q22TR(&~zCcd$aXcHaD9?S$R7( zLs1AY#@@xQ4WCW6(JF|AWzh_5FanA4X$KxSI$msOY&?GQWQe3G;#rEY>M3sS@Yq;( z$WqFnv@h%TDm;qrII0t0ssEy|MADphub2R_oa*n#W z`y`L)^<|j0(Y^REb)}>!yk;QYZ&`*spE$VsEhF6Rds0rXJP5DcJ!P0efIDFhr6%-YdT!Rwg736Zje$yQn~~rR1}bw zW`xo(QrHlXkPbl_5f!C#qmdjv6v@#eBt|o0#QQ~0c+Pv?f8gbZiTK&%xu5%cKX+W$ z^>|Wm_)H>`eLZ@fF+X`MZUHpprPsVlHOC`HOR5c&andgS5E*X-cj{(#WdSuqN18ok)wk3$0|sm)-fx= z3{ZUP8W@lO+f0dkOXn<^?pXWWkkX|{jaf0{z)x=?NigmJTCBhKWtY30ef6ik$LBDo z_-KAP7U`#`E54nkj`HYt!9skA;QBv}Jk3<3{2{7L{%o(tSu)SL6VP zbTbS$arAbwH#0?_t^F{Vv%+EDDTZwt6pII@bw|0JOw5*>loGnWNn<*LAOy#_5-yw+y?`pB>KMU5|+nF71vdOYov%uY9N^`rCTIi+w|hjoGe4 zy|=M_iO$aPEk%!g5%a)#aY2_gPqPhW`y9@&9x3%qHpke*R4*1cVWf^lcvsO6XTjfj z(L%t{w*zlYl9why6_vs^AvO({EL|X}&3Hn7uLA{}{H{razfPmjvM)^B4pvg>WR&x+ zE$kQ1X+c%X8xC3L?C*0ok|mZanwNFU*H9dg$POw(wsol7H*w8v|7F(>JOAk4v zaXFR6>z+7vQQBy_UDFA}1U5;KkF(ARZ=kypWSCPAKKeJq140L@?t-m4704dB2HQ;1 z37u`F`eqEa54&2WLM+Ux&`l=0BXc6Z%}(ezlE%GJt4xnu@}8V_82a%vZj4)~W=3%T z15UWZHJmVKd%t?=8x>0OXUA;)8bb+=KurK;m-2HtR4%{t9kDt2)xTrd4{@BA0CIIb zeOMdJfut-wmkDh;{-CU)(qC=;$Ft8m*?}kMZB`lw3zs8~zw)Wm>1xVQ3!CofY)D5& zzvvPPXj?fbAB13ZbHkpNIG;25L_d2ocIZp>%sR*Z1{bKlr$?aZY_=0C(Z-q+{JN-! z?6MFs8ycKe&ud)+u`mzpL@8d&Eh`&537@L#0xA+6>W$M4&%ukhW*OCNT#yxLh7j?U zai70_V@dw`8cP42{1QDE$VgylllUax8Fe1uNVvn&`i!J=(HS@4Y*Lz`h{$elf3n;l zsU%t`MvSEYYrI2L7Pa8{~m- z#y2JC`2we`(AHQMM^$Vmx#o4}F;G%@Z#5e}gEY1UnowZ)lHotR7jR#-b?U>_-g$BR ze^?D*x!l(FwhQx&j|OVye7?jK^kt-e_XeA7tDGsMN30K|tZN(On^c%!b}G2!M0Ov@ zk_fp(G=HMl(^g7r8O9Tc-A08A`%AZf78UdS`l+AV72SN3hB*5?3wL&QqHu+byE^>c zSktH5mvzsqiUqwzN&)~p!TQATqJy!W(*xpdE&rK!K0w`%Fs7Fw9UTYe%i)gN2KtM% zPsKnlUMT5IT2H#5VD)7zY->Ok-s)YQj1!)>mv{ByQ4(b}baWIGh0OR^B(E1p_Cdrs?jT?LFcS>VXf(i$aJXCMz z6&$RWW{aG2sW7cwN0(z}4;aJH*Ctq!j7?#3&O5I? z(2&JED4gDo`AlTwS~$C;B)r$e%3OFr^qyVvU$76BthnK>BI{R=FX}g7oGOjiCHE+( zqJ;E_ogqYq#35OIpmk=wg`+fT3un)J^Ke#)irIQB0#)&hPi1R=rsUP=C#2Pn&-d=D zs;0f7$Pvr^WRBlmb6$}s9N;(gDIeZ^XeB`OMg*R@H#r@PyUQQ+si;jCQyO${*n>H zP4r|ree^u2gCW;Q#D_T>SBdd8rn?s(4F*1j^F~V3n3aBoSMHo$AeLrn!n|4LwPrn{ zdg{|*Z^WmF*cc2}oMCT3=T#ryUsy4jo}L~L62E=J?TM=2wS`8YJ5j8c0nuA&rX3|w z>s!CN5(0{LOq17t_q#3uZN>>hZUAjc__ffy!`3ur9c?>SZBGvLV<*m40stSykJ?FU z+th6N%Gv?T*`-rMm1uhjv8%Vd*-Q_?REKDcLdU1ze20Dy7VFTQ6---kctCw!v#g= z&p31yTLWn{a~p8~uayBk4>uCueM1@=nk>dYnTEy;m?+xIkmzkKFvNeGr*0BoxodLL z!OFT*cRlc>qr}a;58CGDE>|q>JSI%VYL%#Aud5Xk-U9%Egq-6cN{xhxBEA5VAv)TO zYU%(p0rm1c;1IHYURWVYx|?&Jg;EgmJCwcu>0?c&ZX72!YU_9rb|w@^qATg-kx8rt zyqz-c4aoXcaf2Ljy}{%5s5T+lDd-_Fy{sbx%qh*IWojaE!hpf?-X!@{>BG*_`l6W4 z3{0530-nFAg;vAx7szl0u`(N8SJBgdqiEZwjv9(T>DM)W`n`X9y!r2qv;aSr0HC$j z24Mw{Iw#f74?MTRy}m@8#^E*%6EgM8Djy3uurVG&TBKT#VV-r`J1{x@V{FxWmf)m{|X-9$)gq zPf!2(N0?KVQ-PwFPRV0^{bW#0p<)#Ohd?QBbukdX3qNPDiOADJBNgUwP82S10r-;Uq7ZZ;QDez2h z1l#ab=^2u2Qf}Z*x}(J6Gt(UmxYj7R>VLUJo7b|60H+dOXbJZFMS*&=P(UjTxqJXh ztv=1kZihR1b4V)Ay#82og{kna)$NTcn3dT*r3zT>7nNUC{!X~P)(&{|x0FG*%2j%c zgPt$rhq5_LXCe(Mt?`@boCBmNa)(oDcHMuj_@C={UaWNp1mXv<=;lX%qF1vNQ2$kh zIlR|Y5@n*2Dv0Uy8ATZ;cnoi5x5%BdGn*dVE^$BgA4?i4w_Se1#m(*YYZ+)+kdQ6j zA3!)`F^$mBd^=~1D7$TWu+7?Ju+2O4NU33J^3R(2@q{Ar4JFaiIbYC-1K9D&G;G%? z#72%RwzA+R;UkO2vptxhLU>1gp3bvp4xKHhZVi*L^}$dkF3wnZ$sk*)soff4P0QC0 zi~sS{r`(E)>$uO8n3wC#Zp*)hBVyt=}S|P1})CpbM zXzJ^-C`N}MQM_)a34;U&wTWXNc>`|TZI@G>mwD06fB;Cs2bxv_=n`w|*zrX7Xs zl9yb8WW>KT>f-q78tBJ(hw^Py!eCt>)e6V$?jCk(01ZVml+M(;wTQ%=o?khDHeLtX z&A0gYL_xot*yITl-AQ6%hOg%w^sOX!HJk4v0dv+%e8`|T?%kqUmD08Y~bO83_X3M;dqi*xH9#6+de><=q->|lj)>Uzh6pQ50 z(-%SiTyye6uYum%*S$*|zFY%5c=sD(HS%Br!U8#Hz4h`179HgP4;btn|^E%}v z1D9BCK?QZ6Ukbdxs=D@A-szD{bX{GYV4LcCX`Rf~DD#u46_{b;n|*=_116AmLA5w> z6x&g#$vNHBsk8(h9*3CNP!a9OxB|er609T0GdyJHG@wt%O{c_xRn)$A|qa|F*$uZ{6TkTcSii%ZU6>pJ;LUb)`3l4Vv2_obOW4PnH%I z-UHJ|mG@n*Q&u0%qA9pcsHoyrS69h!wi$JGjQ0g0H1EbxzNZYEf+*YIM_HjCBXWx( zE8GH3M;~cb*Iy5W267~l!9xdYnlJKT%Owb`$=GoI=r%tk6;()NBvB=KK80>Ld)jFK zp>G15Fj4&O-BbxD6AID($>T~$y-Cyk=g*#n>%)pLkLClvv0yP;{R#UMZn8Mv!?7c> z#L)0?I6-&O_Bi6#8|3!d?Xk($9nV0z8DGhb^EU1hWGNzo?J6*p`{8W4r95bO;kUn^ z){B?P?I)Dkxh1Uk06-ypLk3>OfQux68QTath4aCyRN0|z^kUb z2Wii?kJ}!luGN2)_g$=IWT*~pjVvF)NFdib z&S9=d>BvY`TiZ`7ahHW+5k$^e0JwiF#(%%@gTVt34@3v%}`})y-um-VQHKT>Sa4Bg~ zfa`yR2KhIohG)t8+~hCZwR}fNI?QEt+^7i5TZTKxtv9l-E*T^}!8tn`hH7kTZL9ms zjzTwcDri2Hct3&5+tk<91vnipiJwv&M>%;{ zjJcZZvYhO4Des8T_TU&zL{!w;&f2!JcgdCiwEezbzMEm7!Lw%*U%zJ>7-fZ=6bStL zIhI*oq&}0O6Q{mnlR_+`VHX<57Dco>5c#Q_P{mCC ztWe{00)Ee zSTV+8YofwZPftG$X3!M{a&jtCi#||4tQ_CCak5?Remyc$>BWn$3{mov?GpRk{@W|n z?@-4I`gAj|;hh6&+NO~&OmveY8~K|$K!&D4&9R5n$l6f2QQ$Yu&A_1X9EIS({#aqF zf)Bx#0q(`>QhjSkU+5mueY?f?H8m7pTc+vHS4m3{=VAHN0VIKLD6E#R*MJ%q^H}tz zMI)i)lgT#~k&)QZH(+yG5vo8h$6bCo5iic2ot?gu@wicI^WKokZArLyb2}($)CU^% zVg4rZpydk9P9UDika}&K5h{&2MtZ>#))16FQK^y;*K>2c03tjMO za2*K`8I3*Dt-p9>A2~~|FHvu;md>*c{uvho84$(T_)l%3cw@mXc806+&MkSgZ#!H& zyVL_6iLaSO7iJ$aGqol5W9~5Yc|3a@iOEQRID5W$r$nN;wKzxc{@_5C1^}N?QBgt6 zCald|P(Y)x9rolWrOtt{x*eTp6WWM?(ZFnmm-A4}=@uB9l?Az8sMw6+)Wq^n9&8IZ z#UaJfOAa902lc||QQ>Z1{h+`JVSVKaXI>wl$&(f?);z*GZs%{kZ9GLi$a7;%WcY}C^m1& z1D*fJ&CHdA{mCrt3*mu7?l?t-SmVnH7}QRUUy(rJ6m_`wyU86QS^@yU#b6-Y+nuy{ zo!Q+hTq3`B|E1<+u-@qCRSWWse17IfVDN2NdMPignf{_2AiJ;T&8W#7IdE6|^S}9z z8tM&6A-dlR75VPo{dp2p{I9YpBe*!g;4)D@D*)#5?X(iA+S^k%GLF4@>9z?O$8Zs%Wpxx2?&$ zu5}+XxSU!lka2OPCS#;>u-;j7sXb(O+D=W2H-KK9!{bX&pFY*Lu$alisyuN0k9h9c zzSd6SDUH{7T$kzMc>8xt;>Wv8WdN6#%gkPB8V{6`jWyPE=M#}|Qq3n%)b#YIFGe(h z>Ez5(uMQ7=ciD`mZpmPtv(`Kj@ zm!atS`S|qo$jM1ls)xHIkTmwmau82y$2Kp|dP^c9T6kv|C1-GaEWY9Ef0sw|m{Rrw z2q21oUvc-Xh%E%BPB=+{_A&TI**-+%Y->yQa0w?4bVJyzt*zq|>If}PCW+ro?Q*Cg zE#khtX9vp>5)$&PcU)pX^r2^$Lql(mHT+_db003lZj;qx^rN$?X(_|W(rAXHEJ(B$ z5iYIjJ~>25W=FRaGRlb(-n?-+6`k!E?v!c{xZ~%q(-g}b0x@TJg3c6)K$Ko)<#d(p zuH7hLS&j`7&L917Tax)vvmeR3EScCxF8+9VOTL^x_V?Q*p71w&g(04@O7~7$gD-kz zVsZ_q2Km_>YA=R{?xp?J3mNb=8@~Mu=nbu9|Eo8|Jd*VE)p`{%ShWI#(0m^WXSRut zxXd5cLVsd)?+qKnd;u^bg^Sy~?|z{wTA>Wg7){nuU|A|D56o}-J7ao*p(c9Px8}}O zt?WWeN*w6@8{DFZuv2@P?$f7;0N z8zEMb{5FUt49Z=AIcd3=s~B2*i4Fz!4_l-E1@eah`UwjBzMax@yCiEOHg?XS{a)#1 z?|@7+fS?3`o_4Sy`Twy4B zOHiO$OtBdDjG@LlXMJWBMP%d71YKyN=R?Ct47c5RM@-$AVlz;ccMjSR81w>cWV?K< z6U+piPC8oL6>t0Hy4mTOO<@fS(Q^T>Po* zR-=al4IwDR;Lzy=YHN33!U*YA-G9&RGiU*;9yfyi)`y#2+@GmgGjVNi??|KhNw1MK zy-V`;-$S{ZKg6eLT5lhE2}befY?DX*!A%ShC;D%Vl64;EB12!d*v8)T|(2 z1z;d=%)Ckx?d~NqjZK9ATDZgSmoWfCd5^Gfgwwfy9SuYeh&WO*(1a!?-q>YE{VU0g z*n8J>J?ZxSKeNQEL04?-wUk5?D%*vP9EUlIe9vip0jR(#euIKLYr_F|UX2u~KV3u2 zTnQq4YX45a0e%Lt`2PO>vzUK#z6YWzDnzNyJcvT~VK;8v=wloHWA02>qWCO%pduLf zv5mf?VX{B}mBN_UYq2tQo#oZom=QQZt?7sEt&}iIvWd%i)khn4&wis=}u019j$hFbYwMJVixh{jR@Vd)wQ+95t{HS>fi79_e(;M+V3r}G;(w-Jc^Pl zk@5aD$N@ACId0!393LMCq%DNul{T}Q9RFV8Ki#yo#=k`&05g#AkwjI)U>{0K$QBnD z0qh4rLB(TaWYqkkY4v++5~lx3S}zQaPg5diVE8V$HElG{VPQCbmF#UOs~X@(e&)aT z%Ai-XYUJm4MfCGoKP^{1O(=>}TUR&lPj1%5p#ATz=$}u=?|%5uIzeWf zm8%{P52dVQW)_1+#N|vp{qyO64LbjPx(spRBEjqT1qB!XlA!c!p*zV=^@0Dk2*4Os z{LkI(djA3VZ@WT6gUa*N<38l(woHCVEqpXGJe*rwYYgY7rCd?}f4sx0ysqxxoTJpu`q#dcRH-#_UxY1mG2A<_OhGLjB)&zXcn zP}60duKT?T(k@UrkA(;ZID}*}1wE^tk1mg!nJCFe&r2A{`lB|Ca0^D#qgd2zxs)LQ z_w9oEi_*%*24U90ffC?_b|?w9loK~ffm>fOjWoS}C=aA~Q5_kZ<#^q!Ee%^COa32u zmkB5-rTzcglC9sA0`a@)EdjeJJFj}bb+rioVJtb0bsVn#lB#$)G~->a)NNv*^Uvek zUM>?FbV#pa#q6r1lW;pM0{EVao138vRelX`cN08=loWb@(@osrDRCc6p3up1Y>}|&6)AP%{F84P!MGk z{B)%0uv}skB>^>joEF)T$xKzY>Y#~;8_o5|f(V?8ma??jJmILK;!XeV=}S|nc90?RSDNR(Njj4GJnfKt*1}&lKL!x)&Xh zAvA>9WcI7acZbU0RB`x;Q9>=_C#*MBlahnAH9R*CgmBQQTbdris7*%;xCT~M(5d)% z1LW;ocbqKD+21fZNiFvl=jgQ#nT$M6bw+LaN)pw^tUP^Cykkxt_gbGL=4jvEJe;%k z);_d*q$yl{!+Alq8baz-J(1RBc@cEGfD+?BP9+k)Q%Wv0pOKfpV%2^ik2te!R0=0Q ze{WHwE&1g%kcL^%Jl45X^?M(A{3%8phL!&s_})RiVFekBn=j^lwCl~@Y@pA3m(lDqvW;HnnRIB=VfPBL5TKBW>%%ZKZskEPNZ-r)l z)l4uf13CvZwyx^taQ<&z7Rgbgo`=Vd{wzH@uq7RYgnU6c8wW4P_~h!2X@GMS$$bd$ z>mKCT@3*H_tq>xixQflD+1j|KiMWVNi~oMue~c=LHc3xE^SQLh;$rW|v_l)M(pho} zbSZ`Tp99Z$8cJtL)%o6Z`hoTatg*1^l;QzFp0=^^ibP4QlITG}<__VG^+($0yB=Xl#<*`||IQjyoHLpLxjB9ai+C|4pJpF>)jp|-XbQ>tXur$&7gFt#mDe}Ly9 zCMsL>uygc%0UyDOM;ksf)B4FtiptsN5(`TUsnR|DkCoNFt(Po!aRA~BhdMuB2Tr-8 zwq4o1I!tZaxnXQ7R#plXoB{ETJI)6tVIDY!%J`(@#g|!3)uKtJ@h_PGUE$ zkLaUNJ0ke14D*Wzq*2S4@STdCF-2#Ga`0WJwzHSGMtinLz!%QQmw2)gld%q3Dt`uA zf*!gSTHvI7V;9x)rQNJCAsD&kq@3NF0nZcQ@%bxlzc}bah-Rub0IC@PF4krf`7t87 zX3=Gec5Hpr(7}22(g@SeNtfi#+M1&dg*XAX3;c5cMfhn*d`^xltUMLuMv^%7s)B9w zeN|2SYtq9``I#@v8CVhzZeF(ghbIW_m60VKz5KVZ<^-hV*|?z4%M^9?mCakmHS~VQ z<91&BE})n!93S7qwmBN)%e^UsQ#LNypuBRB6RONbWrDnyl9knEsj{hQx-aI`>$!u& zecK;5-;TRTcOP|=y^%mT<#0BI8B>Z)x+1%!pY2`U0ekej1K3^814Hfj;E#)0eF^IE zed~ELB%i3(gW8*OEwN6_a&F(4g$}In9@MZk@ifHPs;xUL#8s9)@sZtutt$f^M(5c3 zg3KqZzkspzjLAv%!hwlNd{tM#z$P15p%B>_G)7}of#EQ*Ah$l*P~B3bj(a$IKo74z zUO)hF>bDCUuPw%I1x!tM>NCxBl2fixBpBKZBr)?I%+M$xcVE?3mlbNA|5{`Pm9M%4 zG=c@MSW9q}R82%T7J~CbPr=Mfu!eD^u^ZBOR1z<-vILjQaq)&TmP?T!A%1?bma+MH z>x-ir{J7Xa96(Jx!1 zdjJ!jMbSKLmF?Vl&0p%Fy{3AAW;9`f7j{W@=ANrlpL$EHiLq$@7zDW;wF*&l1*aBZr-NP#3h;>0aznp^rchDRa#|($B-tHrrin%#@AcgG6f{??q z3Yij;n{HH|)-i_L%SSxGNpj|utmzXRF^5>$9{HEc{E`lb$~mA-jIvW-t*>Jar)ec{ zzCM>6%oK?;v9>HK-p9&uub$cOUur_t-VpJ_lP5#H>H7BRzr#Jx>*0L<#H@Q}W*i)ep9gP6~o4S`F4b}VCBVmnJOrwvxjLKPoIT}xuWwkhB5QsROJw6DZ zI&y_h$ratFWb7{sPHCgg?{`dd4e_QmMR{|KHt~&q;f<%dVQWyo>IC(jB)M(NQ>>be zDsTXTU}tU7{i8GSuI(c-JT$A)=W`ceoSZf~CS$QnPn#~RLBy- z9(ttI<(`qpo8ZG<(z|T}ew$E3Tbm8GAx_+`c6f1HZF?Ryc^o(XD;$7cXS_EW1$&Yt zCo^c|fGdkn!5ZW-au;fYS4o?f{295>uusy{^g~TkDyc9CKN)N48&`ko0AP2VOkzAo zrSm_?F$Br_Q*?=XDRvTUDyi}=zlUw&9=59n{Cs??c$bk9*jL@aWBX7RHZ-5T^opa1 zGuTzxLCeZx&^NkTUgGBSt9;B*)>P2*0d?PBxJJdW$2nFe9+AeFz8BE@Hg5k{h0&xm z^$s|l5(eH=&XCw2M!+AVA6jB*KGu*b+PeobQ=xv28gh!L(_;!2i0pIWbqzTYfF_d$jSbEr4=M6f*D7Zfb8q(YUvdo6J+90XCgf2vGdjqI%t-I?~?A|K;!Z2g8*_=b2Ums3wW_xAOD@#BzcJ@Z0E} z3clP@dC*Gx7XKt7*cLe+tXLF?1=#Hyj(Ucpo<~cv2U+jl#msVROf?#xJkcG_b*+%mVSP^9wnwltq*i?N82LemJIaCpI-t$c#yn}EuaDP5b*jT!!`#7d zQMTW29WU@j{?)K|w73Q~5V$=6x+fCUlHI5C_%U~4fO!?BOnn@lswY zg)k`-Ct0cqvzSKDLmjCKrhq`!5lZm0TuyOE{|!w18`A?+j+MXPRzpDC&QcAtaBY*ygS;{L9?|oyu8GO z8f zd^f24!yT~B-P0V{sgFV9^~A^7(RD5c?pCk96tXj*Muw9)y@o-sW!HOi$Lw%+Bm0Tq z=P?>6v7S2=K25W&FwmKg56$?>J8 z;560GT!Q~GPyfN#hIurhQRG;k02pP>%8w+SOaUSLA9s#mTxV6_63&YNwgdMiuALl; zWuKHk?C}xAhJ{MAUR{2pNJoY!ZE26e>i=N%X>^%0n|^3~A7Aht+ zpDZnT=!FfakH21X!mU2uzo{W%%g4y#^3HBFiQU-Ci{mNXjFtbH-2#pv!{k2J#)Y1` zi-c7kP?^k(0wD@?=*sD7sE~W&zv#INzjU?DslrjPT0Fdk+tQ=!cwY)marKydq7&-4J~yI(U~^fO7CgS1kEY)b-8`# z&9_znq!Dlf91H`9FUapFXVnvBrwN>-8ZuHp(a={8i3)-#-OQc0_if=Va0q7R0%JhaH!5!evl@@wxr4rOyzY&**O;>|B+`dOs2t+#OeTS^;N;kbl6cY? zR8#ZB5INuU5ZoZrIkFTIGsE%2>xq3$&jh)O6>vmfpMYPAy3%Rc+DTV__h!%QiLr2i z>5WK9rH>P5vZ#nza~II+e&LGPwbq$)=^J~_6tc;0#KhvLZ2APvgGK6(KfTc?QqpK< z;)mb;?AgOIP`>>210vdEXC+|B2VeG#EiDotoi<^ss177%0-;}056~n8ss49EOEcm< zIe^T-zdQOmuK~etITiYWMl60=mOK=voKP{xjRH&V1M%BSCf2j^lAiq4s9jCC1I8yOs*`XI}Jsuk!M0v-94QW&PLKvt*5iLU};#~G+4Tp`iy;9tTE zhc%FWTD$T%^<(|u)#ZrGi9e@U;pcnT6O&m1qw8y?y~3bi90CF)fM{VvZ0v@9M`vBr zlCCbw&}4OW?Y>RfACpgitGk+zRnjnqg_y*$)Mr;z*AjEWi&j076|wqhn<1`KSaDh_ zU}%S)_a;f*k?R&_ikd0R&Xs@;NXX$s*>*FBL^QDUDctrM&NcFglmuad98~Al9N<-6 zSz>d_xVbWtb0Z@o z`@uCQ zSE@+j&G=4#80?$@D_}k#BD=#FQnA{xUa>DvgfhnHN{%g$0SDz0?+b+<3{s0Rs;f+xs&5urb ziKj)4{?xnai_4<{Kzm)Wsd)ovoKZLRUZX@PAKzI0WdIcncdv9P+Dm76ICR*jiqH#RZ)K$u23 zq~ic+dUpy?(^7jfrj%8Ml2ycujGm@s13G3sMZ+$*FO28niD-0vqr8oyqr^iyGuO3d zK(XET)LhT60iOZfq&^FC20(zqbJsS)5e}i^Mfcjb(dybvTMfmlKi46@ z_etOK-gk`26`R!3*T?^{?Aw=c2QYrQ)TItAh<;z-X>gn7Ry=6Q+l&U$Qz`;2-gXQJ z!6v-+#cztzZ?Bye0aC**k}njI6WirWOMG$c<0`4FRgJx6RLwK=Tp2?l>}jEK+j6a& zu<1~DP8(wzLtcvE|9?yE&D}QuEG*QrNJk-NZSKY&-9f1u^$4PnbaP!xs~9| zhGl0clyl}$8_QP2)e+~NjHb^z`6&9_{~zY2y7?;)(7N|5)v}0=#6nBe7JpOWU-$KC z?uN-7%oxY#A8Fit$TRNx<_HNTO{AQu66)GgJ-cNxlb5Zmq?bs@?KP~3qP2CtAJkY} z2n?k3jU6XLY-XSl3%^nYUCFy4lKLC>(o8gp081N9iyolVV$W|t*GAE^Eg?J+w#7Y* zo&t!t&+i=~Pg7C>NUSw(QjcxHuTRIyvw|=%^E)%p7-MAo5ifbV33EP+y*WCntVDhV zGI{?*%B{(rtC3_p>7%daY6q4KZOY~kmVlt%iMD+9EPn>jUr?%$_s@py zpHl$6%^KqbPRZGE{bLA0IiS+KFm2CRDJaTZyk`+0Km(HQk%4dy6ZM3QT= z$Z2G%_7!M8^W*)P&AS?%r_}kHioj)*vfD=~YzcG6wJU!9T9gc9|0`7!)-Yg^?WVP6 zj7?-1qO1sPCS>xgem1;~7XLDR{MlKv2oU+3YYWHS(HNnX+cFga^v~#ug*gPUb5-Q5 z{$^&ED*^)s{~W24Fdlor1)Ubmx~7|Eoa|#ds0@hn%*@L$6MZ$g)(qxIX*tv7g3J(t%fO~Ia`4`6i zVqAnzLv_=QDDj9OgCglPB}x7c{|QL>YB#a=^mJ)qXG7SbnSVb9DP!_r_3-=6pRNH@ z60C(K>H~@TfMC8d1OmZjoK4zW;B^Ix9mKjNr+g6FnRnDQitPjfa1|qiC|C0=b(m>zqjkD?qqjN^q5_AJYv@%He=}3wX zXno_opkZLuR*heYbU@h}O?M<&eW?aE0jYQG8vac=Ip)8PU#MFC=MNt~v}QiaGN+N^k^-S4`xF$UYe?*Zemb+n8rrn?%yHjek}@73?^mmT!FZZQ&9jXH#JaM^!1-lc zTL!6yF00tjfuR+R8(_Vq^t=yvWTFEYt+Ug?#?`NlL(Ye;XGBQJ>tWMT9op!E z|MqCW{O7fRm9o?QF0&0#Q&WAt{MRna_RZxMMlQ#-dRhV%3%K);0o!rxxA|`?M%Dg! z!Bk1{fLfDs@kni`Qek_$Z_(6YMe)=MI|Ht?uu!e5$;TNv^QJUdBDZgNL}tb_3+AZg z4Pv;qu1H6V-EI!ie=Q}tl%|&ld-YCSTD7}JR6O3B2ibhos0phoTfI79bq#MnB{xT=LnlbgK zPX!E|s^ZoFS|Hla_b#`$go10}nRmbfoMPN88r2HXGqksYd~5riA20`ct$1sCF`Y9R z`LI_&R7D&cvS<)3m>NdzUZSoE3(5ac4u`rL6fL>8UEYOvuf1d?vL#2ffAE~>WoKi zBR%dhWnnqmtw5U1V_nAn4T<1?C$_DUqmLe;;}sT}@w<6qRm{C%er-9f1txPjeVd!! z)6x$F|~%2&GtEjSBzb|GC&$8Z*KUr>Dps~nU;2Z)Zizr zEEmZg*DoZ|FUVQOw#(Dg8Era`Zu<1Zz$6Crr5E`>hO2Qn4IyChKY;-u+STB5dHBb! z*Bg8u@&D8}1XS9sswI_`l_}^%QQqs4oV}B_+=c)7Z|+9im(V(AvA_f-!-2G)X;Q-EQBlBgWCde`LG&*rtZp44)zs22KY^%Nv1J1!_(KGV z)Bn9ip9%|;;$=@a2QFudef=t?{nXTHdTcpkH2{>!-}v_J0;kvVjgNxoUqF|h|5WKR zm7@(eENyMMN&rwLtgjFP_1?ve12|Iv z%S^()Gx|F_RRn>e>ZJ{?!;wu6n5D-hy9kwS720eU}%;c>nuC7 zcRit;wSDri4Fj_F$AYCgPq8Tgs3dM17p%Q1Q!4Vh7vr0G(gh zTw*|==FX(|Q2tm#g7bjI6GRJwk>A^@WM*mu6VTjBHhMI+vK;n%YqRjOsr`O{q_gf4 z5U+*5@nU|WX!JK^3ZJOyUCY*unfFT%A2)E%k5h*{COPyiIc!HVP*@&DTrLE%b1q(9 zP7aQipFsY2(ML1*VLcY^x(D`eT5c6q0eB(|Y&rJlF)=Y1*DwC8SK%uHuWb;^M_~#F z2nm6(8o4&AlT4g->NmdF7|=oueWspIUx2_Pd}N{eJTp(HWYJfx&c14?QTKB zQ6`V*II_9KCnEB_k4kZ8W|GYn-3i$mlP)E(-FAn8iTI2VEu&$G?$L{j5Q<&TWC6Js zW&)hO;)uxZ%!vCAJqB(JPUloW0-94oj!sS{0Ih9eSKP_pxmT_=phS8HwMnm6x#jMd zeYpQkB>(g0c|h}!=Mo~`3;?yJ@w4(GzMZ<8j-;vz>Yd22^}%5a7*5_Q#lQlIm{(eT z=G5ZVQxg@%u^(WbidR7P)(s{(*g66#^14#d7s2M-D_c`o&2Wt3#4*bWxoYmD*T#Hp zMN<>e%2%dx2LzY}R{i^mjpxt5sM&O>leNI|g?XK5T8SoF2bo99K_xjcCFn5*c`{B} zr$DyOqoY)?7}6&ziwY=U83&a_v-|v>t~i&QS)7S>H2HZfJV#-a3IjLZ!#5McNN!sEb9_-_x*iDua5 z{rOKaH!HWKV>_m&!2wTB{arwfxic)-wzG8zY6dm8TwFZ9jp)$(lL-3ug!j7@E^UcNaEC_p+dT}!7dV#AW z;3GKe9Bk83&2TtGaI912iQ`Or+?_kdU(&%ZV;y=OCJ@irGKQFwQ$Ejb0d)k}lRwg@ zBej$eca~)YaqAAMYasT0@C^DkzC;RypWNBar12XnBrNO}lO$*_KL&jw`oBsCdEJmcCl12aRew#DRFmu%9x zz&lTFJ(uUpNS}-i1r-$(+|yeSh^}x(K^o(vn>Ob;QJ^PEq8m|1K@CL|0{ekKMw;?F zD8{+guaisi>sAQmk1F5Chg}BhhVIwAO8AvEngS}%NTl3XRPeKFG|P0ir3LqUaM_hH z`lcQ!y`+^wg2TPdPo##p6yb|PgQ}`ux6)1(07e9{xq0)qtWBZ&PEf1TKxw%g0NA74 z9#&qnEXvv@VYnU*r(Y<7`HpGO#Dok&dcMJfjE|@- zUa}t)YuB&(;=zM=9Cs;Kn46a)BdyEZ?`UAJFR?`ccLpRc`GTV5+pAvOn6Id;Cr!lc zUr+3xXhbbsAIqo8<}ct^5gBFWLLIWS-k_T5?7*6qaUW?@(7HfdVR0p^ssYWtOu2;N z6pVGOx{yt|)5%Wvh_CL7F03Ka1p0dHw_W7zukfutgHoznC3RN>gi|EIn4jB0Az+r0%r1w=%ebVX5m zkzS%$K&ci8y%z;Sz)0^P%>WS<=|s1C>sC5QLXloVkD!DSdMMHoB2q)(&bZHc_u1zi z_v8I`^C1ivz{py2&Gw(q^Lv)n^py1822;WX)%(5X;#y-boH^cPDy%l{n{qs+$tdEu zaY7D%p1q{hi;}i|J8QKCU+F|muW>Xs0`5y(Y+~ng=TO}4X`GsNTBDC-5(sCW9Dz75 z?JIEIUs|m7C1c;n9zrf(Et4tdM86=Z$hxr`@(ubG85$Vqejx{CU}x`zj@8NK=QW?M zPqe6WS9-3f*u5e$d_hX;$X=A(5GI5>`$wl{&biehOC& zM+Swna`K&uo|$neuNHuwMJz(EGshnc@$4cUS)#tQZeK85I-{lIBwn|-;~Ba=tD1r7 z9(;o5({Qd#dQ;Zar2B$rjw6k1{6I@#ttFk|;PfZr_ovqf6okSP)>TtbG|H`uOPiN|J_c1pa0)zGsYOjFS(_|$$PgQkG!(@Qb0@Br1rzqMf845KKipU zbeq4R0EnK{BVU$hz4bJ$o76ka%bi&=9D7;sEWKkVZ^py=Gl>sc;dC*PLJBG3X@hOk z+YazdEX{AQx%h3`X`@Lhqpx5?ufiU$*LLCfN2h7?ASURYEfl00*z_TZAj2o)h7G72 zfR7Z_-mc%$(vneAKvm->I88_|dBX@}1T9=EN)SSGFJ(utvc2@R_rl!>R)c%t_tb9+ z!*ATUN6Wa^$V7?vtYQiKkZ30T`p`>( z=qk#X6RTD(%r-W}p2oz*Q>5~y45UK-{_fpxK>E4sv$)veMt{xlujx>VeR1*|n}ChC za9ljiEMvalMNkB(GRSF;BPlQEHA)1oJCgSyaPpCGbHvm}z?|qs$+lh5$}f9Lzi6in z5f*qUzVvB45XW@))FD7sG>pDG_}Ysg?eg9|BeFS`k8M&=K|ED(9tBv6Dtfc!Zs$~$ zx|sB2ThyPwiER|CgLcR2@tqz4iM-anZ+PMoE7`HygIjx_xSs=RY`DF8ExZS}rM+AQ z3#d#E69F*C&DO#&%a!Ugyi|2>+)3VMGqzZTq1I_hhtQlbiAUm^mxN( zeYh8rUDv~`u!Cs`$0)z!3U#_V`#~2I17=iC>4XIG#%I@LWSE8!D+Jr*G5RjQ?z+n= zU&&QP&AiuES*PS@MW91X4h0jWHvy35Bz^nLaI0UuT=D*JLkB04q}8z&FOHPxAnQH|O9WN5xr_|8Il0ZO>*D>?bdKIy6TbF`GhgSMo7zvb(Ch8Jor;um z3JE@9WpjU)m7VJFC5EW$ah!wK$~{6nGW=ppDeQ1)`*Nw-Ij^yl{Pc|?hTB~WCl6^@ zi!s{`fKsM39f3{#ve(f{JP&gScir%L+dOoYNcj(^#oI~Va9l7i5m*;xe|zt|ux!yg z69{UK0;jJkY6O2eL)6>1Ealq~>LLjNkiqh+Q#*>ZuBr(hA~)Y^T=+a^@FbsD$P+%74!D1|Y-2 zc1!Ys%%i28m;ypoVVU?L1G>nkiW(u2ZJ8@>cT$1Uzvb)eV<6Fw5V^{~8T_Qdy9p{q z@7503Hn1|$y;pdoF=KURKjIJWusOW+C6GDmx3O`zgZPDLez;>Ri7ZJ=E zOpirpGzs_R@C8#sHj_dwg>-TujZ|dsW${=4`Ssp^scygd1{2>82aBfx9~>Pd0sVkU z9~@wIC{CGEIjtw}!$m3byvhxb^XQD&)ld(Dy=RMp0C% z;rme(QU))NTd$u+L`Uqq*$3s9)HP!+>JyT#8KBpWMy><8{2XuNNMLu!;-+v|)#7i~ ztBpCcLm)wU+x3~j`iZ!D(MgV&tSrgv#rtef`uvhb2?Dj-f~#oYGfg48m-bWdcDO1|thhUt;MM;yKd z7TqMaM^rhEz-jVI$E&cC+I1-V#5Asa0@)*2L+ZeWc}` zWM^y}VZi?85Uz3BKX+yD7}i~r+y#saK?_C^Eh>4=WhHX&!}2=nG?uy~b=-PvO=z?J zgrV)!eg6^E`e(CU>^)0-fC_Uy{C#+}hw1}0ruKhUN;wyViariPrBZ`<4tqW#uhQ-6 zaNW=BbiG7hc?WTdR3B-r{y49Iz%h%vx?~K-QqCc562K-t|MbPBT7Z7j1o*WRJrV71 zO`!C>Bi$R8AZX+$;;h>$t|?4Dr}@R@Yc_`TA36)4c{S2sxIB2Txf~+S+J<`?B-&lBKOJZ08eR z28qCjfms#6V<(nTB+7y|4NNa#X6EJ?6nYIVbl4tXgGf`A8cTzV_+VRg5kU@AE^U&u z978Jw*{+E3Dt1U2I~ohH^?OxD?)I!XP#qksvT%R{@?)v%!`SODH~xu#VhnP@7c?|R z6n+lywlMYcx#D+4WW+(iiNjSIoMQlwt z&;)VdYzzo`0;sIcU~hevj_(!&Cq5GaUdw(mPj1c;gqQ&Wr9Z0zwWYh^#NnHxa4#=u zzzt1(_RK-|o)*JGVmuydX6y773I{FlY8?#j+NM(Ovh&}G-&&TB)btkl# zQ-(E|$q*0%mlQuGozhrtzn6_`br9Rk1*dU4)4TICGZ`c|Dq;h#u$B(6?p0HzAOdb^ zIUZl0YnF4^yLm8yHt#!Z&kM>6TY@x`+qV^IZqy~*UNw=({y_WaGt2+dwd!wH4#m0; z4!R?Qqozi8)$NXhbF@Oewx$TMjtdhog(eA-q+lkGaLmZ%37p z0k#b&)Ln;m8qr!i>rFQT!k$)GV=}m8O$yjF6q1@J?|688JSGEp&y(?&?5BHZ#g8hg zcv;^f3}64Lv>Zcn5$WhiUCmVDCymz~POn~}4OXtC;i=n0+e0OO`NxES6(sUGchmH( zx`F~Gmcwq-A0X`I02Bh6Z1m{>BQ6`pKUdadVcwY$B@u_@JAh2liCkZ+Z?566UGdn5 zCt20>JSnZTgzd?VPN`A)EK#5sR0Jxlv-=T~Su=g48} z2HkcGF(3*g!&G1%DU}cuZe&3td1j@hJ%x3-PZRs#lN*j?=gl8Ga=S|va;vS-uce{o z=wovV8LcM;P-6f;+W19i6Q|JDcGIajnhJB?N2Qmls7!t1pjZUr_YQGbzNw!WiwU?O z*Vub63c|cv$=EG!?d&);$FUDp`krIJb#-Amy!fhfOG{B8@9S`md)2@-3Ubc><_lR+2AnQcL zd`Jty7OOR(%3F1E9q{o0z%Un}>VPT*^;UZIo(-8$oG%-4`UL?dFD7y!C@6Y1F(c>66ZwxUY} zXILMJF&pS_Ds^hn=9?p63&Uv#>aws+1>(4G75Dg3_UB0aETOBtEA{?lT=^d5V5nNy zrD%}$K5EgZ+k52N!(GRmT>dDtuk~%Djlp#ZJYyB4cDnQk7rM5$7H>CTLjBI6Fjqm^ zTehE3+Rnu3w!lB`)+?Qq~KeX%TOFSsZDdAHa%CZ7w0FKEzeCZOLt&+O(Uil36M; z28D(@N-NejIdSagD3FThg!5ZntoF_i9RwEz1c-;6%x8;QHJKK5Rird6Kk@bl>S!*Tm0ShgljL0jIim zfhvRy2+s&UG`Otv{Z$D}{igPg_2yp0ru>DX4OOWS%PB2#46Zh4Bc!*nvP-!68+$g0 zPda~dj6NZR-ux5k9m>KQM#$!_<1;1&uBfrxpf0WMqUB(4rDc0c)8>_@>T`e^Xqqb+ zNj0I57upw3=p-KNsx`Wc?uxL=&lSp7Pbx$}8YY&BR(`EY8N8KexG41E6ny56oR*U3 z5sd8aag=7Aa72SMDgB)K`PSdtG+ASSuI4i=kV1jxcxx4&EFcv>RBA%3HUDUSb4VX#cq-$Y&E3C?vsFU*6ZE$dF5Vuz1HX9IF(5w(3 zBu=VF2|jFa+)v&5`az}Hd;b*6k4|EY8IKa?oaJiJCYns!8Y|CO;&oy0J0arb5pDcKCyA%cJE}}*0Qk8&OEPtfV}vQLs>r|DXI845lZSV3VMQBX zgE;xW8?Kj1zxy6E94Km>=3MY}|J8v25G={Lhm&ueHKe|v%cq&9WF-0lNXE5YTI5ZD z3DxOUeYoUlw=__cjr!Qu7A!cjYeqY6m%h7Xu$*LY#m}$8Ql^vKl4$Ny7&J_fTM7t# zILC~;a>sLA11D+;MKs}zbJXU14DdxUE`pxJZ(J)Uj`!K_!VQOoqd*SO3F0Xc&p!>? za`g5FF&uI>0Kc`$s_9_9QReMCtb*0MBe+_Md+HU@27&KtlS3mooqilQEVEeqN1Kh!bJE+%BGYzJ=k_% z)|~lq4iU5Mb@SEclJFZ9sO=*;x0Pg7ESKw6QtLo#hgd93f=$Di|aQ1Bf zVDAke@qQx+h#uXsSIgkx4=L$>{6!G~%>Lxj#NFN9jYz)DMtM{7CP}T}#ITTgQT}VX zX+fH+d-es^m+X=U=e%|O{V~TJf>JW~y*t($zk27#(hu7gv8q5Ew41Loy5J{srYRXL zeRLEU!qBS9X$=%)HW2_|>rxX8ja3?DB}P@}--?Ga7kN&wEJ=RW^3=X-JXY2>!hwa~ zwuuxN!_Y5z2f;m|GGIb~&TaC+vwc1>?BZ1vBukYe-{*;l7s>Y}{8(rBLZq+Yn~rz7nn z!f#1>FBIl^Xp#@=TeR)Fn?dv!5t^|!Dna$nu@soV{ZaG_1s!(h&Kz%3uU&fhAj?6p z(fB+0no;Ll5S(!KKOV5|eqLNW=S(uNm#YHvi-M?A^yr15J{rc+l1BGw{_(s7`{^Ma4H4 z<50`EAQeD6-qgpE$RJeinc2V;ci^g`C((X7s?VS&T4DRS;6aU zTy=c-FysremAX1Ha7oIi9%j9v>0DlatU*1tI)!^{60p6YTFA82zB!&3wmqq_Zz`X@ zHW`DSrvU)pO8G&+F7x7uAD)kNe&8Y5kfj=E-8C`Uv5>9F^s_fxhwYiA-)Cv!laBvE z)cp-iV?a#Ja5wnCYF(%qFztZs^T&=8*N*9j`yUZzcZM5N90Atk+9d{e$^dtTdx^Jl zzH5633%8H|!VAIo{Gh1lw~69Y&uv>!M?^-jHCuakhWsY4#2-kLz_>n0$bUs08Y-Um zu=!>WqNnwiI92F&AW7D``XKPCdP@ZbP4Dr)Gr+_z7}_FD%)KZl`vEc~Or&IN8CRDp z1U(eeQrP31QSJC*I~LS4+hCy*C|)tIPNl6x&l8z|OlAsa#8-&kFycMQCXW;JXG*85C|95&-B*M3@lskt3Od-;^5Wcvc1WjuB#o zMe+y06e@Y655hz^CW*fCov(0CFIj>Zk(H(1f5&l z6Q=Xz(thRcPYhr$mwKvX5ySm?pF8R2i-&3F3A;@U&Q`)^I!{O?uloTRhMcg#Jlfg$ z`%|2?on1;!j-a^S5r@7{sexoX0VvTniA2%i5sP_+$8$3sJT4*ieQ(V=EM#OVj%NC! zJZlUTc1f-YkjF;>(QGUnwx0H)(CGO8?TY6=XkF5O2Og36I=+ii#Zo$cH)~2R>d$$6 z4Sb;E{dfsmNHKY}B?@+e1*U$0xEvSnaThu{z5$cPL2h<~q{QEk$>btSOhD4)kRHNP z)UJLMv*HSKlo`KQQ}gUJs{A);L3`VI;qDz&y4%_F+G9yCXw%_4=k#xm^5O=Dh8{Fo z^7F@+diHoFg_4Wk)2Ic&M8XN4dJIec^U_6wjp?tRa`A&D?-e{lN{%&It_9V}8~RJD z_ELGF|9r&%=evP`*@vP`9bi>yoIH2=26*e-1Po zW9!MvhW6Ec>#RK-fnVNE>>Yg$P}923?Py}yx|hMnWvaz% z51rSIVCo!#z~6pgbmjlK&c7j^{l>NT0<1vB?k_Sig&2HWIzbT|4_LmSgR=#yAF1y( zv;BThvFjPfb0KZ3q-2WSY#(Jm9#W@qBn0e}u--F;94lcA&s{0QUfj)^4pBZ)V*`<9 z1?&lowvZCGyvf`jTv9%V5DHD)MD&eqxmBTtO#-CyeT6+dj+bt{ox+4w9O;-gPA;zp z0R|lQ_#Z!XnTpW~??(>1H2d^)Ij@p70>4$KGsZj{NRfRTm_u__tyB90xmV7oIw;AteD)87y&?!M|~oT4u4 znFA!LN>`PkBHXUGEa)Azd~UG)_Yw+@&EYSh=2@jaA?2|6s+D@t;f@zZYM&%F^}492_fu&~1z>Zu&?{prK8HJ+w?9PzJo4w3lV&*X z4nU2sUssrtpyjN)K1;&gv(Jv3EuQcRCPK6lZ)kEbG_yb9kd!rk@wcSEpr0U`Huim+ zOj{AKc0NCa5+YH2v^P9Q)q~kjo#N^#)WEwy=qonN05SaUaAw8?&$(a)#3jxR`SnjJ z63B#~6$ox|fsc#_70A#v<4VC;GsPJ{&^~TG>8Y!qd!!cZ1wAiKq^OQ=_=4^U`&b2Y zv$GQhBpD5+=gWL-AB&UQUTWTSL=#&hzd)gN$L|evP7;n_4E2Zq+ueOy2=SyeA>K14n@=w zM2E=OQIPZ~C2IE{MYuTaL;$rl`d(U7{72NB4TGNi&r3T4mstRyOw@D;2EcZv0LS{8 zHfTGLtcJPLr|>O(xs`X&mfSb-E5)-^Q%f*5Hg--W-ncQRH`-xvw75Y4%#%Z}6p}9u zIKE)U*JS#`IOk%_K1)OyZ0X}feS)i|f2Om~@lney8%xXMb6PawSJ?jU!RWjF8c)6e zCXvF$0n59!@2{4fcvA4q*AW; zRyf%%G`Oea-imBng?+(Q0LP3!nS1Y<*9PGVt|a{raIenA^Gx2P`4843(+(cI5C!j% z!aA24h3!ag5}#M^>;p`E^#E))W;pYIKU3q|7HQHN8av;{Z(QH8gEQ`)y1?bXmt9xbV_fWuhMdB` zE32qrSfZrOFD9UruC$iiv%GttE^snJdxzT8s#=RQyGk>>m@Fn1GTE7eQuy*RxAcfy z4G?v-jIQ1<=fEpmPCqhTD78#1lWp49UR5SgMw3AdUiqOj@U`|yt{6Q~#&7RVYVF*F zM-G;_mtGWS21p{IXsEH+$4g79XH`@P1*h90g|6M)&52T$T5Z*_LJ2kudS5NeULEdd z_dY0H12za$x(uWgM#;ivLv&3Ib4Z;YyE$v^+lFj>NQgDaB1m-Yi#M|9spIg*n>7G0 z1*&4=<5z^Yj8OAq7@~oDqTnR&uk8wcg=qU&^=7dRiV^UOT}I;?2GgCpP<9D5%S3zs zVDBnz+MJdpqH}Qi&aYMWEXxW18Yk-j;Bc-HstZ@!JXfMF&IS--LUZBrOaJ?+CHwZJ z0vQSpPz<>zBmizpPMiHXVEWWJwocU4vVYBMncQbCuaHhh9z^sBYY zo03q%W_pf74xn}V6=XUM5dzAv8EFpfqQa*_s%G5PczaA<+Kxz)szO$8oShtSTr`DAgxxj zlT;{6KU6+wPkoZ)pQpYll`=X?_43YDf3`0EpPXFAPFx@F6LlP40-A_&=)&24bTuoZ ziIfhj=(`g4oXQz$S74CW3-sHGJmO6L5-Sn)Wp`N9|5bMiqYQ5!N3%_2I+Pvy2?C zxjw58NU@^j<;zUH5$w>cmAEN6x!xN#SBYu`4&1c)jI3;w=-16Kn?=H=a^O-39BbCB zb^)V2RYGu^fo~s85Bd}o)^;24YDE}8A)N20^PJ`0bxjzdrOnFCSQ~zkge86t?wTWi$2o?x)YX(fspC)-{xu)vunXkgzuz3Gm0@|*nKaBbgf*+`n zmN0%Pw)!?dmzceCo}629Y#}rA!m3MQzq#$nI4}*pYu@0U_<87p%$kyvt-|Rxvj(lh zULd|r%%itl*s%;vvM&ycaXt9voecCEa;$=xYrKl9xqGcyes?yOL5LwOBYJ|So&QL* z)~s6j?rBlt%X|0UJ);Cvehzir1JHx6emnk|I@qO&*`AlmAaQ;77c4AfZ?{;>!c___ znwU6uzN5F_wul{+3kV!vaZtS$I;$OHyVD+=o4FJoo3 zzovB<=lt2U=Zi%zZl1ECkB16ArXOl6W{;bRJM{h^P*%n*ul<_ts2JD2OnCv8Uc7kG z%tJ2jZdaH)_^dRtC@~UR*VNtb@5k0m2*iK5_ui?^3;yKY@0n&fdqDb;j?GfHiyzO9L+bKw(|2=`}(3+wzk^P zYMV;^0*RKE7o>6FjKar<6GZg^<*okxl4B5sPp*&?;o-|eoG#0`aV$bH=E8mp+FM@3 z#xv&kA0NG&8I+unV6R?*_|rbLiu;eWprhO5o49#R!UAcs4uzRX0VHbB0xs2cIlwC8`bko>|^xQ zCz{DPKC~zhd8v()f9kq({I3T!B#iTdfxfzNmYgYh` z^1^N_QQ%SRrT5ba&-9-rZn9e>)= z|450s7k%%0$DUPyGM120z}+$QP8H|4f|mMt-YYvkpcoRC-PI+sbs7PsTz!|%UT<75 zSX?2>$CanWeW8ZzXJ}Kwx^UyhO8$_qcft0|kT%XSd2G+m_d=r_IUd`)TjMF%oR<+- z%fgXWr)mAc`=Y~ZHXJH%C;eBo^xtnknJ0*B?CgGQfK&^Xi^l7cXCH-s5`>vYE(MXg9R?=kQCH|LA54zSYz>Viy-U0;0yikEX=r^(?yB#3oUKkO0@|*udNocPQj&ha{juquHek7O0@x7CTySmpbF(m{0TaX3si-6iuKvMgI2N+0&<8oF@t-6uLon zF5;&L;5EMighxG;FwDNEJbyRUr2muwVVoV(;9k z2w9#GNf;pIHKrBek4@9RiKTm#_WweKy7&KJj3g$LD+w11XuqKIrUbz#ZZA3Z9o z`ok3tQuqD~`1tG3+fV)%f%pIa^w%Bu>+S#9-Fe2myMHJ? W1%3smUv~h!Zrw1xUZm>~{eJ*k_b}W5 literal 0 HcmV?d00001 diff --git a/docs/oscar_logo.png b/docs/oscar_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..e7aca84c9f7608f56b9da2d2f76fdc991bb44ac0 GIT binary patch literal 121006 zcmY(r19ap;vj;jev8|15+sVe**tTtBv%x0W*tTtJV|(LdW7~Y$@4NTD_vV~A)Bm3C zuCDH$s_Lrx%})h62?W?LumAu6K}u3o2><{#e7Hhh|Ph#QX;=sem z=$;Lgfm?_|!%%+1Zs$i%|P!b1Pqg5KHF&c)D!-p-lyzk~emIHIP`#!i+FE|&Io zME}M$G_rSf;U^*aN9g}v|DC6crP=>W$Jn~MK zrk|AmjW58=_n(&kzqQwV$jC!16Kv-?9n7%Jxxe0suk)DN$h+ z58!DhREnO2>xX~S!-90Vo_d^VdD}?mXaI0W+_{GtJYGoh>$Q*{42)+SUrgvcVeT8z zEAbE1vi4u?Hf!Z;l~&~sdg`kuA1~ZDlkM*Ml?A#oNvh|P7aJVzX6LQWQ|^Y zw3=y3um$Ln)GB}89EAhXr=ueNqzH&g2PS~1 zAwIhYB;EgS5uJ;HDh9OTc=cs2^uL1&Nq=^j75F#)XE6aZ1OWjX9Z@VvNb*0=BoJ$s zWvh?L#r|2>EUU&>o*RETiUaNuy8{}Y{}GUbhuyk{vjFOmMeR&Yxw#+NWVyP!YGR%D z?qanBzXErqWyW4NZsj-8C4rUpZ3X84oxH;mHGs?1Sd+`lRoh%5)D_G~HxoGSL#S}l z5EaFtt1J%1vBfY2dkt4nQZgM58&R24GJFjOOD8C(uX*k3y@cxvkC8&T9rCx2dH%4W z^goQEb1hN@?Cznei?igvW#WS>@7=8bAPMd8fOLwl_Ya^v8RQMf@oxBRZ7Ub6^s}xp zVV9&wN8i*HR4QH})D#r3g}y!M7LC1LIYqO7?+m^#3b@A%OWKJ0@7e?^0hWnDTIR(C zwUZ1$_2P2Oe2u^YRU#r*xY?wGDM?p=k%UA$J3ITz#xwuo()1RQhN7b3!a`6*OG`g2 z+>?|OtqL~;h@DYpK+0=OXiX^HS9VVxc$VU&Yiay%QMlBoW5C(SScEETY7}~udt5Z- zowneVH1+m<5^AXOMIIX6-86o0bT(NJsaYepV94BQXlhy}VyLPeD`}Ui#ELD_+lbDB zQ2`sEOH?q&PD>w`Hf4ZkOa7rpX#Wm~LxM69Kuw5JsI;?kT}!)=2Yo3aDM>mnjys6K z8DXTduu#l%rJ^HG=f*UR(l|syg>tQ_Q|7~o~A9t zz<`>DiqR7duLV6=&K=Y+$s@s@$$GITbuWZWwVhp^I8Z5f zzq<@dwLKvi2yHbm5P`XxA1z=-Dh^*C!PvDFf<$OYP7b0>ZlDr{5kbLNqA;k~>|AZK z-(;|~ZI41UBg^xdy8SLm+qU-vuA;S6 z)7fM|Esw8+dffnAM`^OD zl2_VH?=M1#tDH|2xj|}*$Khuy%>(@A!)QQ;tcgy>2d;hIxYc>2Gf5@hO4`C6l%k&f ziS)C(Kl)h8J{x*oN92T^)q0&P2+)%}Cv-2*f4c2NLJ|25$`wrMby}v?Dl`nok{R6> z7WLm2^q1#Ll^3pyFK||Ohz+ubVgqms{Krhv;EpG=9=zK z{=7uD7W%&*_%}uS?2^9FZl~}f&AAUUI*uIl?aKaAH74!`!E@?pI7`_sR%qTI33kkU z!F^Z}LE<;TJ=e02z@%@0V`y92c-eU1nHYtz!Db@yC9VX9iG(6x)u=8%H(J}iEYdHo zbq!dNRjNu?!fYkduqqMwS+^1e-wt=1NP|?zv-u=8-ZJHroXPIzxEn}?_Zp%SKoTh+ zz*2J8ExGseQ8ALn&2TK~Ss)aF!#*}Eh)SOC>IeV((h`|2b8O#+2DXvQd@+N0TT>-0HN-L$E{xG!9t{d7!VuyqzMDwj zgVoL2gKY0bEm!^43Y_9^>2`l~oEvqo0+E(lYIn#oF?e$V#S(`6k2SjOGHo{+dVhTl znMZ6&BWC|%qN16)zj%EU(Z?^TUk1k{;zhS2LUD&kt0+^ffyRUaUsciQ0wC}hY6ds$vOj+(l$7f%iNpb?x zHv0mX6zD;)OrFbRZbnXP#89^*^po_~l(Eh=B4;Q&4f(E|EB#le<@D98EOIF#E%(P$@AC=YhRHMk=y^{tbiZDEjPz0w0Gxb* z(BmjS;r2(7*oh8?qgO5oKduPz@u)*@B%umc?b;_+h2IF@4kgZt((SHqeBYkN%>|50 zgipYe&?satodDmkKWgI)wgEqaL<8V9B`%3+B-`n*W4}w+@+Ej7D*#T7oQUR$|JLZJ zDQR-aWqY5ux=l0^&hxEMtkOpecI5fs_?^~5UHA)G4OHIeBKh|-B!6*gng8_!!U2T* zKpC-^ad?P2Yf_!n3Vdx@)#9xfJ~)vkFHLOI2He;n3O#~t%~$@~WrtEFiJzuy10RI2 zQpsI|4sVe^W1OENZ-gI);6mKmN8sx7kIKw)`T-NKmaMd;Q3wDy9Avt$PzpXtsXqXj z+}B-`r&M#ho{!bL(^TKCBK2RlvFf8?HOSG3oEKEpxkVtToadtbUyT_Vv&_%o*>^rg z&c(6xWc-2l=5x~9ZLwhEl<&E77RHG6OrDtA&M(eq<{OXAw{v&={?~ry$Dv-TRl5Wy zVW%T9k}$u=c(M+tx?+Qy1%62TvgyXBk?@-;}q5bbGaw&Zlig1EyGrd zyjJg(vb|B3NPioLFCL~zfYh^E&HIlrG^<)#m0}^Zl}hp3)4DkvsLFK(vN;-6Ea-&= zk^=UgPki!4c-gCP2EY1o;)8szMLQg@o2YZ}AxVnFKK1wtRltuyJS@@^#e8wko34k> zi?cb|9(b_ml#NeL+g(EE#lC*=(1Ax6A|u`-C#TpyHUeD&lmH3Oss5w`d>PrS=26)@ zQ=4z=o2CyRyYlb5Dd6?cMkDk>82fnus;8fp1d;SEzok}GU1x|yBC{-r0O)>}xSTc( z5O7#a^CBjJ(%PA2HJ&wCVCP6To$CiNEc$Op0=Rg2yT-a zAcEq1MXYRBEQ?nVxee6S5bAmxm0V|&PG2=VFn}73)o!iz&2AA8EvBXzYSgMG66WcR zqrrei^WRxs?vxgGHgRAcsR5N=x2S%86I`!6fb9_<@#+93Fv9!KuP*QOjPlPGxs8G;>9B;Fk zEBbbSEyK7vH^par!zO9*$Cq=>AqbK*KWM(-848KOh}CSoh6lKMZ@Y>&Zqz>9t#NUV zxiJw#%!1kal}P!oEwCVLRM2FJ375~NBr4?rHdbODdkXF2krl?Um2x;mXC_u?#&-o) z2ERd?jq7jfvHL}=ON)fkDuFv9<*iz54`&PEq37MWr2jO@ca6BBSi383uBeyVZ(z$s z0UXFyg`S&gusK%1oX4}3#umCLOUO zbp!r%TC{G`$KeMlgw9OLS#lB(!Od|fkcu}~xW8dhT=iIhUu;L~g=QD{-8bo1_)X!d z(#WoSZre`KiFZ*0mkhdty}}D=@Mt3j|NaiOelL^Ua|hqAw%$UWeSCeoxf8gkfpJAU zWt$-=MIO#$AdUblxuC#}TZ`@`{SB|T9WLz<6mPJup%s4I(0>4egN0-BZ|mi-T_QWG z7Vf5jQtpZLhhS-XjS>{;CB? zSlT4c*-oJ!l2ieck`z!>!p>8qCKZ}Q_`)_EVVHpZVB8u?!#v>h+|IA^6B*`AVYc&O zBfax2NJ-51FPy%^Z)9wIXYG>bs+MioW+(8iIC0$F=v#>X6q)wxQQG>w&e!X)<+oi# zQ`ThhWx|)GA+J)Z5vZNFkv_(%68*?Ap} zGUvZLWi1M>2tWNMGn^^DxiOKQT|Yml*$K0SAj(!xmCt(m@hjPI!a>UP3Wq9J#@R?i z&&xB1=xj0M{&I)U)->B%1^LDcj_W`QKptDEqO`>X)M^fJI7?%1^0p76G$N6*ZP zZft5=1W=JsZgKI*5|StB$=W&pLi%xH$-@KzC@IiFk~lbopU4>N{&?y32F|pmIxDJ% zsgjh5^-!k*@;wkONjMeXe?jB800NR}o9wy_0&vGsQSHXNai&YUMvk z>OF-K3~u$UjU>UVS)*fKizus=E#m|r%)LN!B?OVFS5K?S3tkm4S3k9p>5?2Ov+iYh ztytRq2x42~-Lp(hvHC#dfk*)$03fEh4yjnJ3(nSRl`q~<+HYsM)o9ttXYqn5SB3n3 z)a>tzuQqxo<0i5-vw5%bkUixh{Cz5kw*N)HNo1uA#j#!`-RU_1I*rXA)KGQ(Ws(2H z;ah(ji<$U`Vh6K<{>R<6r}Rp$R4JhP2dHCRPjWxHnby-N;HA_11Yj>$pPhof6=cEy z(W8~GxXhCZ<_zy~#z@R>I0DIawL|(-`~FExb@6>vatbLD^q4RJZUNxn68tTiA1l!j z1&#n16n)=(zi&o#7%v671dwhGy^VXKc#?4fom#+=>A}I`yXZ*c&%yuM;91)Jylxrb z%=bIYv#>$zOiKK>BOW}uk^(f<_11x5~2RHc~vG9J| zIz*fU=tBm0M0K(pYUH_#A@91LEUr+rg4t|#O*Bav&#tU&PpPS@o}HC^>ZOr*ieZzQ`?-386dmL(zxSgrgGtxvKUm zRx-)4A+-R&22zL8qf#3n<%w6AdxaY&{iI99<=Q15dpZI2(#bOW=AL^ajEm9}8it<~{^R0~&Kr^|jW&CZE_*iyB zB%Y449Jc&5Rl_Reh1-9V|6!$$GkIm7K7C!mDms!k1{Elh%%Mu+dRawGbd@$5n`2cFV7tu*bD$#Cl%q1- z@>dqZ<(h~(;l)<0)M^|}qSM^Q|Cr$1oQG}w_QJ=S!!D-??GRuZ0Y+z#RFoeN@&)36 z{bKZg8bdT=Vi2M!BSyMKm(-VB1z^N+?-^)(7Y)M|s$)o@6fCHNwz%-#e5o5S#`w9p z!kY+S%w|*q16dqu%)%c4SRw70^a9|&K$sC~;hNME9LYd%L6%Au0^-)cHWLki-*|0X zeJxi9H_I$54rpa^I-wZf{oKu+O%z3LF08=seAt0GD9a6=@l`eWBC2wPrAq^NVSs8rY3dm0!JR?z&xvr^8C2MATM`4bx6<=n7mTE4Y~^2 z$eWacR#Cp-G4sc)4rV3&;J4r~>UYrY^eMHCB{$C%LM*l^So=EBc{Lv1T`~@HJ4tT> z7^2QW2+$a^RvTYS-8wMd!OG|BKM-p(nhzt#18_(kL+v-sy0|EB(ds{im&4%L0Qdk1 z;shEPsLm(`Qe~b$!LM0ytY4=-UZW2Vj*gFy@j3wX1@z}XF=MfviReX#Y~Y2eI`6gA zRh1BFq=f<#GLW|Y0&dFn-^yg(vi{;flKNG0=jVh6F%J>S@>vl9SbTc$xB%FVtTmfG z0qw2&N(dv>u@X3IL`>cv%y_1X&KVJ5 z=Vvl}_3(Xgb(Zd27w0A5xmaEb)`+TGUEY9N-rMw_6?;!%lBS0dC;dIsTD^GPT7&})Iea| zweV=EK*-7AdmD^_(Yy~CszE3aT6=#EuxOIU570$biT|@%^+tC?*$}V&3Q;aFDZvyZ zHOJ6JU4T^uMDJqEad@02tRP@UUnZB)t#BPtXV@X%rrG8kjAKYxD0|9pKX04sdONen z)k0Eoh(wf^LPt4n7cN^7e7l*_*45Qz%*j~`hQnO?c$xgDhr*EkGGsP-1}7WCB!k|% z(+9_JDLkAW7vosFM7@I~MCysOH)xzSlB;k43FBfmkwV=Z1!vPVAMx0|AWc(zVbFUd zx15LYKvB4O$@VHXivR|QwBWEx+x`nhn4l=t^(~EWIb^Kc&f-jgcJ76d2GK>XsjxUB z@C-jSi@%1WuoX%Yh&{;J{q#+p5MR=1+t^oF_#2SIaS`UX`DBUsyD??9Pu)b8Z_~oJ3=8jskDo&X>M>gw z&?0eL;Bg@4a42LhyFVRRixDNz#^HA?AMF0`AWhM(^P z6nBZcW6U65x71=@1;eif3D{%l`}Rszp;q73`>oegEW=BqtF{iQ%?IPO7bC=9Ze<`S zv=vOWS&Szjj!H^*uG}{G3EcAi1gGuFg_{Wd`zX z=)xu+5b@Ncm@hHvqu*sdEhah8Qzou-GU#fJPL`|ZNSVHtR#%Zv0$-uZL9?_pX>tX& zEDy9Ln&%n4rs$0<0$R9;3XCSws<)wsj8fvLf?<$2lZ%G+I8lG2hKO>9HA(HdS?`IA z6`fXa34=ic5&}f{!?H1Cv(Yb%SBXOZnrc-A*QiXg>V>4v^S6yX0eyq`7CDnUH>#t zs=mPO+0KGCYQSQGqb_+_dTg9&90w+$_G2 zt-CXgsz11;lk`hB^>CW2_TTZ@OS7(SV)lYmJ;ol$&{XK&_a;M!T&%d;N3A`kI44Cl z@c~s|(!WXp&*+iQELzC1#vh4l#b49ApKy`TFen=>X`KxMmOfnv+Z1;aA}@0spO^Mo z^h+w7fB-)xCA`9AMjF^!C^(Zntb!zhrtmWR#x+#}?^|Z5I?g0IHv=oxTTd?bvai!H zW~XZ!d)%Teu_A#o9Jpk=K_rj%)nhZ(3mBwxyNHk$RMP*Tuxa0 z3x||_%CtA^!jBi0y&o(cY*2Lo_gu}tbf`%^60Y~V2#~Y`bpjo?vodF-PSt>MZ1g#f z>#K+t05f3Nf70{CwMx&=7K!gM02f@>qc=Fu-r)r1fu{X1P}MlJr(D&=ohg2SLY$k+ zLq_0aq3ALO>?0kt(qnlawHXU%#ugS!LM?4PQ}Zdy7#o!3xCg2W{gq%d4$%XEpOO@F zrkRdqLnxadM1I;}4Hk3cEJqV;&@fy{zp_w04a1Qj)X)zf_Go^&au~l3Z@fy30*0ml z5OwuZkd;zL6w3_+oGBzA(Gj0e$hnLeMRMDz2UzU?#*+Zg0k9HDgLIZ;l1{K#{wx*i zoEIYOMjX#Wi?=$Aewy_f8*i3;^Jlb51y;1^muXfxc6|t;AvM;#&p2iL)QuVRfPid9Yc;p?{!*(G zMabVKj^@WUV{C_0wO7Ucs^n@N!!b3gt)3NECDtY`k3&1A0`*jM{IS9si?Xar^}ELM z%z!^1Z_)nl<5HpvknB0AX-H?vVM(dlXZ6r!ug zY7Ad*!T7gR<$!mvua`L62_&5YZ}W5a@q{1If`U^ZerCBbXTo+S&JZ$fI#&i%`@-++ z`FXp^zHW)xYx0XSywB-MLMM!cQQv)_K&)w43z3f>04u0E4?b7|O53l}fz6p9MHUhP z;v7h@(7#3nF$pRpV8Ny0gA)QtY1s0)G-FtgVhY0fODh#EQ6S0Jm;EvS8^=oAhw-_o zB}g3coW=aA0HCBIP#7<=LEe{HzCxR`rI7F>uk(4dyh&8RyiDP2@^O@W)FDMGU?KoI z6~LN>Vmp~?o@I%_k{hZA&IRd_ap^m!{*nn_et^2uKGDf+ORv;K?PX25A*YoY$K33}>THBYw@v-@ESZ zp<9;QpS?G~N*PZ+FGlCeqTYg69LFs>wPN`U24X>`^m38T{Xo3pDx2q$5STYlQ@n18 z%-A8Acd5{YOwskeV|RP<-lSZ3SHWeZ+|{;kUqBGfU=Feb$b+N65;+Rw=yKh6hY3pB zh36>tz2d6Hx5cz>-o!_5vkc<Bn zurHgcUrlV+yA}>^IBbpUf* z@2O0}@X9BYST#!~VZ?_Sd?MIA6d=t4j7O9y#i#?6;)&!Z?l?7f^Ja;YmYc;@o5t}& z^K8GOi5QfjTbw!D7}+`ImyeByt`o%z!&?WpQ6QeK(KyqYhW?sUTAP;=WS@)wHJxe8 zPjC4_zl(u`aEl-@%Gf(iV#C-qZ?!6&LJrg?!fC`9(fF0Up+TWm(RUNcUlu_YP`$~u z1!Rn?wXi{)az!7Kt|J`vW?G3JXu`?qUml*1*KE%!1+tI!xSXKaDUr~Qwk~QnNM!}N$0UGOJ6r8 z_PAS6Uv)}-+{{}_s!~JGhD<8~b2vnh$0Ot-4-Cw(#~5$noE;R_`qevnX*!VTbN+O| zmj;92GM(QDoa$kOGFIy}i>huXC9=89g;Nm;mLxJd>>(+bxg*xWn1Y@o*4E+uJK*aU|gp4U!cpjMm-@RBXzM zFfw`2-(RDfj2_!DJa^cc_ZDy$N1`i#iwvD9U^5mV?H{CwqqS zALewXT)^K&nS$&jC4PPTWrfB7Q8$~Qnis@$wouOl0NFLr&N&EX3llApw-WH$ zvl4-7b_@&0vl=(5>gvk|r5B;JRFx(NNoMb(YA&U8h)?;?X@C&oe=nb)nm|PC2oRB-SPDm*4htn9^ zAKSb}Z&VQykN^^`ix(%jwW%u@^4z})PsRSv#a(>j1yBbLR6d`-ndq{@E#sG2+kY&A zZ{PN zP!u)mqD_f`ftnnh)%|6?Ttsy~gMp|rYsw5y&#Os<@0Xz)kc)*Y*(v@xJag8c#ZdI|-fH8>vKeqwBT#D*3yHUgw|D zTf~?gARiUPI;3&~*exa(j`PI3pY22UOG4DN&lg2;t(Zv`A>@KY zW3>4kbJY1GwGzlsR4K8zWxP_{t3Nw9Pq7;0-dPotKkNK!w}-{!{i~P{I7}of5A|t^ zZ6@+9yX!1tFu^KOZG)_Pv~v@{C@}DmSuIhh1d;DeZW1KxUUa9N%w8!DN zEo!{{)YTK24(RZIO7_2cxx#t*3|gK=7>hIQfO4Ry_9@Oy<)T%;OQnEsdS7(5E_>no zAMkFDe8<58$OCYx89y&pO0c&I0P)CN$ozLBAPSs5=FObnXL^nZc`QOz>4H%G_%Fc> zoRh2=^a+tZNV#s?>jk3$i==3judS<*5y;!?fxhXyWd0v7AK7m&-S6c{(wbrMLLE8z z2UnG2JUr)b)x`r62U*HwGMFkq=}5Fr%fyq03@iD2@TbAHYAG@~FFapTw|O^wAJz$` zxdFNXcz|DtJ+u%8-_n|Gtoj{_7v9#5&{3Lqqx;V&HKVLyY#X;DeGf$lSD?nL+Ky?G zi(*FoE`tcUN|*B5KJ{B4D9&{IMVc<;Cn0t|OH%+gBf7$?dF8i3N4u}Tgmbn`o})h= zk@OQ-J)?X9W!2zIxqYuE-R~#945Yyz7ypk3|L&>Af7w82nIHy3^q$z-;W4b95m9|- z6Y|-x9HmH@A{mB&u+}>McDGfU)QxK0X1PHk?>9ZjC^}wcvYgYvdWAl<2^bWel;^J| zKDCeVLSD;8yMeVsU!u?HYsUN)xSI%{0a_YPeKJ$?Ug1*uOKz29 zD?EL=>*Qbhu)4O`h>?`gXK`P~rF{2j^{=cW>73@w1#WQu<;`r!G)fYX@fENX)U1I| zGk@=82M|TVOs^6efckydq}ZvpxaHccvI3ta|Aor&Rg60&KhvYcV2f81Kt{nhOo5n8 zCgL9zZ>m(r2n7wNt}PA4b`#O;OJ#}Wr(%`o?*)rQ+Vxh6tZQZv6odVQKP#o8dbf`Q z|CpZ_(Dg8<96iv5r?%orPnGP%9V|g{#n?qP?b?P$mc#>@#vVCI{B&2Xdu+23PE2O-i7Z=_XdiU z{x1R@tI4i>KjdO_i`nS$A*G~GwcvkTNCFO>;v8-;SkI(?x5wzu{h|SYr|Eek2Y}t} zkw){)(!$N^fG2tYrI2b&90m6$CrP#3Nr*Ga$+;IlK}kV;Mhr||D#6v4IpE#f{w0jYl8`yhW2NK3}%h`xzKkA2S%qi6Porm-|seU z&cTq{cG!rkIP8~C!G<$g*oU}rn=)kQg};JM<6*#{p&pazPAparmQzH)Ei}Lr8|Rqn zaPxt@DC&~C%Ovh0s|{|r9nla9QuO^c34OT4-&eRmR$e(WO4!;qqFf&<9!<%{aWInH_ zO0eMju=oablS1qVU@64p=)+t;O0W?bKsW`k_o<}b7Uv@OZy(QH6PYhLXGwF6$a~U~ zQpOtJ_D=-5)rY{OB+9|vrp*BbKvsU<2D*igW%6V_Bmc`%`BqZ)G7&O8%uk=FMvUO& zU!+s@?&rkH`0yn2d*L9O(peoG26&wBJX^{XIg2Cb?Dgbzm78rb&`W(q(+8fk4RHx+8n19c~z%8!2`P@Qef{282Cq_Mh7k-F#E z;nT&iQaj9KFyKodF1!B-p^+v{l0-5Cg)|M>3==>L&`L+*(pe~RR>9r;D|A?bY}zri zLu;4{eHjyDhD{H^_9dso0d|y(k_yK6zQ{;Q|H}!}H`68BpCs_P6c!Yxku76;p$Ssl zZm^uI1B~+FM6H+b0Gakj<6suZX9;f1*H+cP_Wf1>w?tF;_Mbx^2$D1(pE0lzQhD-$ zz^71>G)bxvRMGuW_q@;9ZRs9C*l@z@9Fjq76zzhcL5|0|v5A%W0%9fWF=n2zny(c> z|F)-_X4m`LrNoTGoCX|LtI{~?O-*LdJ!U4pngOdrHfV3ur=z@a*S%+Wsf%1N8&BH{ zf)0pUO1VRY&+@!>^xjXU^jXdvi7#48aon+8x{OsTq+)n1yCOgoJ<&-cNM2)<*5;#g zR&F(bv|yqPHbE!PlT7*a-L$k%VUAE)ZXiL6xsscJL(BL5GB~!f~O#Ai^G`EdO(E^~_wz z2vdjkgD4hH!*c^=`6vD@%rWP`bSfu%cw_#3&eGwt23v*6`b~-DYHj_iIRS(1CcyBf zQ7BjqpW{?8TThSPHVZZSOJ37s6(q0_uedOJ9g*wx_T~*Y+f_S=x+%tS<13ZYz{V*- zYIooC0@I`}D56707z?QrAu@J&?thUQYyjGPXe$84@<&5b`Y;?aGbiRR@tBMnEpVp% zm8<&fw~DFii5RX)SHIR92Mx_ZQQd}vqvGs)<=w(FMd{yV*V3L_`FZ2t8h#$+M4^v6 zmBtffe>&T0``Actz^5;L!wn#Ohktu^s6G5)yac6yE0W3_S zseGPwL*IW3;|8HCRj@Am0T|099DDXde?Zn;=Vugn3)zUYeWP|DQ350y$4r`(=@S_v zKj6b91PJ$)BoCWg9i`j1eW5S=;)r^Fczl@>Io_HZVnXx1&Y*6*@UEa|m>S7}^e*)2 ze$C(IT&?FyLUOk0H|+1G zv(!RL0cf?tuoGd1OUjV}j?_|1Nixr>1q!J5TP|@UV8CM9+IR%}jbgzAC@NX=d00zX z_IOC8b>Q<7Mmh+(soqdRXd&1sEuRm-3xwiOSjP{rje~sX<>O4kGbkx`((M@<>EqA= z-PhqXxa5^kV82V15^D!>hz5P0P&KdUwHk(@(so_r{fq5mr~+C{o`!gicIv^suGwIa z-Qkh8&Zb~~xeoJBhXjI^d+0k3U~rhh7rn8dj%;k4#fmxvFHw)pqG%=N=sq-zNETFz z3=*^KKuyQg?6k`wiXw{s*_oyK0RH!@tuSyhH)9ypHa?P* z<>T6S%-|=#j7HD*zkWgeQuMY)s0=QwmXTHXQ*k8-gLE-r1(|iWF_PpMTI))I77!geZao zJLibRc|W4nSEZYmuq{aP;r{pzw?1?%D;90uEA3)&#&T!gWxg<~YB%Ek;rk#(S}*s< zsSxsb!s*NgE$UAuG{?J7Ug4Z76!u>xjgRe*CqaTW5NVtTg4gwGy>l{u*-?%zFElpu zBEmN=eImeLxTvM68EpL2!xprWbze)VlvuY$nIamq@B!D zBD2Am)$m6R5Y5DPRv-lQNB4WK)OK?K^tdSZ#SbX)FwZ0>k(3BbnTG~-h>4&SKDM#C zhZiNX*b33PP^baF&*Dl2XjwR|JX6xrhzX8iLxvBtH{-+2t_m675`79_&x{GXH}VIg z3H7mjiuyS-J{<+#E>AMD|rH_=ECG!a@*?KGnSPdeU)}&C9)(zzdr+g26`qBe`GN^6e zxG;bP{7UyOEexdS1Rcqe5|&@r zmqN;7(1h)+<8vjJc9LpFNxO}x4FA{5pwFn$XxON0vSYx7b=-5eB-U+*@IBUcvv%&h z^T3W%`#_)ozK#dk>@d@0`(mEf0WfO$C$|CO_|f@#{CVg`Z49tmQ9pHG8YN>&*pucE zpvqQv<&GM*qw1aRg0~6f2~!&UcP+&AGYh?mUWwD{jpr%PP+@!Qb~nICe<}u~rLQ72 zRrn8WA*Mr=3ua-IR^U-tQ87*-BPLjD9hXu>qUrDr)7#V23GtkyK-r-! z1ZihirT|XEGmDOBNFLih`!Pt~tA%cV(_szr5me(S@m=#}=KVc}v%%V*JF4mig&>#I zEW*qc?d0^hyt>w1|D7P6!?#VPJ?K`_u<$}pzIbRHlrGqq!xZ0pMDa`_+}zjU@~Q zrh|207YKK^JpgX6IGI2zg&1cR&-$~VRYETi_`Ui^kIa;iG^To+uU=fBNGBcU`lC3l zD^?xa@~WztyzGE9l1uXvmz+;9 zk{i)+!JmIPDK0Z=q1RrEzwi!XIfig5T=I(oya1e!PyC9_P0_FWv^=AZ>utUP@-lwQgSX?|Az8!! z{(U?u$ic>&k8|2q|IR$e(m?E|96wP(2rYdq?eC&rxQ%O*e8CVMJq$`76-TT^)&QpY zgzSOqM%A%VqBmIVw?eV& z)8d6s2u&!{(b2zMiQw(@%uvQi``8sPXEv6!!Ns;=$O=f7ec)$+nWlULJ%F{o}|> zJ`*_alvXG*wOg#~HIBCPDcLrNn+XS5y_((vab3@D5GAH~OBc)_Jz|U!;r@S#YQ3*1n}SS=W(VQUEC%YOB(|(j&`OSr;+=b z9o`>lp5@o}EmPn(O&OD$0`i941W?Lr?kn3T6b2`;v&m|}iXPCWdd#e*y{NkX)1(mU z_auv9%Dn(JwPSCEcNBigJOOF|N>4x1%X|K{?(wL~@4<`7*S~&HGUtc`_quX|lObjh zITKB2motLg)U@n&J~%9jO1wl7BF}08@$?#v=;J489xM6-0t9{y zX3$myi$CllQqyzW-rA~uy`4bWp920BXg3+BFZe-NklQ75sJiBdl+onJo*;#Rn7}i{ z4yD500SJngCE+l><)y{?W!8&mG?t+D zbj=plq7_{#`acl95Nbd>;we5ulyoE^U6$x>%zC@qlGA{x&vPxM&F_#qwGxY~-=PSJ ztxpE@?n-C-+d)uBwTfCn4m!G-WUQs#Ere2EyFr29B+PS3o^2Xd?Yh_f?oPDX85@?u z;o8m;M#%4n{MT%hi(QsTyJ?%S^jNgJ^48rJ#+Gtb$7&?MgKafvgbT^U8Rw?B`xRqY z67J~=O9iST(kM%onC(#_I}NhL4~9w@*k^;QxPmQ386wiA@?DMmo#a%`RBKfp$I3XCu7dRCG{RpEwr4OB=Op4Ai+xCgXzEgQ&Dr5;68SD>`%Tp9S#tMt zFuiu|sI*MmS>(~4+wVJw`#>4oN{t-$*EBm`d$(A6jRgBKwf)t$yWl2UiQGk<6J)9; z8H{Ag{4l=M&I9dlrnSEa)J$L>tk^|(&nCTPY|k&g*vf-7V1mM9O7U^{UFqJ5-xD#8gcaGd~eK z=i_=n-M2sst>p1rZJVGz(0M|=m!(uGZUnLw@&k0FoX@1Pt)PZ9@XP>Ry1Mv3Hi9Cc z3o8<%Z#av+37Vn9@XSiP!Jw;Br)_+}=SOSbP=$0hN_rp9bJRX}ClJjWA-8X62 z>pj_&9Uw9f`&7N-n|S+9v-`?7&UGW&WquHsvcw=aIxf6F zH1=$AR89v-Pbm>W3;ilP029Jn_mPV(!hq%A$+(jFTMXznby86CsLuQqXV+51O9n%H zgomgg;}%B11ZQ3^RLB#{griepl-_;$RTEz%3-j(TpfK%x_;H@eyWZzw|3*+5 zrMu(kECX)y9Hqm>NGbK~fdM3E0kY@|M3G&Mr2`z5+UC~k zm388!jgW916%?B^=#QA)Ebu}x{f`I>N)!MsP0K++>2VIiQ{-@$hQ{UNMB3ARZGYqD zjXUkigT@&e>Fd*1XV|lR!lOEwNvkH|I-FX$z>p@jC1*K_7;1i}^~%gPDI7_^f|b?aTL!T~{ZXSLxl?46QkTSb6dFq+{^(JD&6X(dwlet9ICvpCt<9j`-kKUsNucn{ovI8r)AfF z^>K#?IN!mnauzNIHV)WzA{|^AaKtR8tqh#6(ta2z-8HKW>99xrTBQDFc zbI$3qrK`&Am6yw;GVM|XlTNLuEs0A;B^8zVHgiV4oin@5CX8;j+NusWP^lT0Z#VpY zkqsG9ZnxatZL3zbS-bR{-D(s7N*icqezy!#x~0|xd?i{kS2(Z)Xj5i*<^Wb;Rksb2 z1bp3fzh@u&*vB&K%jx+L!@P1C$!HbumtS~MerpS@vbDxaHtk5zUu0$ z?Zz8#w7R;wtlBVOeYfTc8x@gdfj#&1Q=)gL=FW!bJushH?UFN~R*0pe-bvO`JyU;d zS7>F=-xSb#Xp!!v6i6v>j8gzm%V379s)}@?ON<*tDJEd98&zN!kpVra%V3B z53E0XT00rdqvrhpH>5V8PEsZy0baHWpqn%oS5#m%BQ!@iQW**)2_n37NR23+W}fB_ zWjCUx11>tIQmH_zN;JQBZikHBN5d9-Q;I4+ejh~62%t9Il9Y3?xvr+^mdoBmBHkW%0{ zqCjA{IOqnG?;=TMU0tOf3JZoz5L{vemX?CBGnNs?_;XeG^yco3&dl3>FFH<|<7QZ~HH9?dSb*LWoL%-P}=HD%27$i$Tu>hdykfgdd&!&#-vf8WKH78eWPd;5L^<{^xURPlC zO~nrSdGd!tT^cH7!?Pjj1yDRwYemGK(vosF4|k&m zNkxE%&u1y$R)N`l_up$bX+`U0%T~ymvD;3cRcP1$p1@27AjOi(gonC80DuS;Q9yf# zGXZoY7l4FNLGpsZ2LNiGizA?bxDL!u2lOLWpn&Qk~Tlx%ASTr+0OvB^b2$V?*!V?(L#sFluM5wspoj-QKxX7HV4{|$vb8|NVh2kQVR5o0(>{| z+hG|QJ~E6lI`kbPO%!VmtwaVPS^p+JIsZn@;|b1v=mERs)?4fiMTRdHFw8!+)IR=U zlj^R`%82ENFe6YwLJEMllPn5e6y@4m9<=I*-_(0DKuEK?N$VR)+7TC-9cUD;OI<@NkS zAF`kS^4GR#@ghkpZFbtsa{Ijx71*Wcw_0^+n7Ql;*hB%9?VA zED>7-E&?+BgLVal1vW@VG-I^Nz?rkp@^DtQwKC`+ptJHh-$Gi{Kmxu-YSX%A8?}VY zQ>tkGOD{_@E3x4MvY~R50L*G-u3w`$v?^JnmdZbnfX47cZjbEG3p&W36wnSrp#EO= zPDZcJfR%6cCj6yj04qyuGPaP#?L;gMz#dVMS^hET83M)&k`puXpzdF{qN6gp_YPa6C&u|rDEl_56WAd}+%5-SBD)hx2_`H~>ED5Wulgp) z^X40G+7EyDBYXDQd2VSqc2uR^a9yFj>#{~0T%af)ZUDkWmTu;I(tVzKKDa(w(En4} z@-nw1pDDQ?ON?!g$NSs%$_K9!hd??-McJ(Zb*+5eX328%rI(AXzOls?EL@_PirekX zGtTsZG_fmXX#(pI?HF7GSec}A*|s@ZAO>n(2I_5!Rf z5}72k+7ZKSh)ny33>7#Ima0+C)Wt=pfd!HRTId0(2a-}i8wj=bo7dl@14&%+V82=W zxWMQpUtwN~^uzzf*KtB}(~prOx@TT4KhMiPol1Qtp7#%wvUSZjy6Am@4o5!k8KDfn zC!-5s-}7g-evtl2DR7KZAogdP52Iy)&vr4^c701Fa?&l5Uz5)(g_$9JUua7@Z29tK z_VZu-T-JY2=(++64=t7dR+(M-z6Kjqq5-CY4HG>CX5FQ4G-ym5xjT5-`bZ%!opJ zV|A0QTD?LB8%u1)l<8Jlr2znQSsHxzDn}Jijz4=S&`AKjTUxYi&aUlRp<1hdKl8l8 zj%^S~CCNODHCDc%-n(FakJ57x(i3TS;0Wk9Fs z(CXmyXn)}Ye+Rv)qYOlDelhv-PNk?!^Ul_Ki#GjINB*dpN)VJL4a=sAnTB{fT|2IqtE--;NPvjDM=se z)~&U>e|wibc>e>|q7Yd_2N&7<-& zdoC@skAJA!#tm(?d_}BhcAHNrEEwt^trLXr`9MGu+i@W3CG&*xdKf_N@qx-2$hE)~ zg0GrPFQj%f8#}tircA1^XQg)B-q38Xy}rOUY}{bO$Bt5%Fs4sbLx#6Rtx=au_L-O5 zwr#6z(z~Du?OKj}i#00L3#P7v z{JXiq=+ zw4}3pY}xV^@=G&2@600mu+}k~T8FQhBsfhpF~;)<8r+v4M1Q`FgMB}=J*MRMmOLM? z4|E&rrmR>V=RELo=2<#5HzuM;ifXsnXLZ+@OP6}soVj!7 zx(RHnUOLOLW12c)LYhJCjf3lxJHspQ%ZqjfdYJ+e2Z-PVO#}xm1tg?K_n=q)8V?fGe#gUzxb-%e#dVm zjV#nCuief(z1%+b;XIGeSS}zV5|De{@PrS1q$18GNJ_d^4B)|Tl$py8pf zIMI$o@)N(tSHc%$N`Sa)7Z+`y2R#sf-ft@Qf z4_2-vU#eueI7pE`Dm7SVUd-2+Q*1_JXqHzpSEfNcR;CJR72-4=-hp!pXt_OvPR~DW zeUF{YOAIfQH>*>O^t2OWl#$z$vZLyF{nc0P+u#1SJ^JXQK2nUpr>JSRU3%tiS~B6K z7hkkr{^A!39D1^rCBMkdJMTQ3e9}oS!TD;fnUB%cg1I1QFiB--9g->Z z<8Irsaf{t_^Ka~_XP;M0!#y_hq;eU76xju5HCd&}aas<@2rnq}GYU#CCf0K(-vjr? za*}OhKb~iMD0RTLXg!H?dddobyXPTUuHj=vpKhz4qE`_NoBzNB{lbHe%#Rn>KBloqF173MDhc z#*Q0jbRN9t@zUobjnb2Y4Szctd`L1Ew+TnX>sBf^Fe{++~efVM9 zv1^x&8D4B3zP8xjeQA@{IBV5Hn_B-&A+i*=USG4;Ce!_VVic47*4`=!XlF9T<7C;% z{bb($?gy5jMrTs2gTq*M-11FekBGdg>Uwg%7o_4yc2xD2$ zbo|&ccGAgHY=d~+pqjz5Rt)C%wL^zUDw9N3RTHLDG$_Zq6h~=~GNOASCS_!a8Vf!Q z+Z515L|(SPaJ^UuGo8a`qd-fSDHx^OGX>uePC5J_rV9{d-050 zDxP!pS$5W0vpvKMmW$MbdT-aR-L`VoDqFI6iAVHq)_fKLQ#Y(%@9N11=RT-lp9Q|E zF3T#695u?uj2UC2$Bed-BS-l{dCbLmm>9R-it0g`;!356ng!r1R%dcmqST4_;IIm8 zP{GjJkmy|1(rxe7$Gt-nyQm~I@Y0Wwod8p&w5g#;q*l5;+9(j2H3KnJjU>Jc_cx1Q zuf993H#Rid;}1V(x8HfUZQr%mhF2BawU?LK)gNfHs^V7bR787y;p+Da)ugT1fHQk> z2rj%WDGMxzxpd0S^0q|TvA#$*_?wuTctw}yz7*&2q$15Jc8W*svPFxQ+BRvF#%W+p z;@-e721&CtRXk&lgB@0kLuBw#D@W-{fmx}73lmxlV-F&W5V$zjOF7g{y(*5{4_itB zEknxqa)U#3cyUQ82cn0+f-$|MZ=}UuxpQ3oo$C=3MG3#AI&b$W=+7 zJbv6*yFf9}wrtsAD_5>`AVVElU%%Io+|cv4%8F>i#*Ma4K=;B6FF2U`h$_>uJn*W= zJmb>~DX3bCd{o4TO(g5dlPBBhr=MD!hZmP6p61vf3icrX3oYd`mT8t`M(bQBLO1N)&B`_u#5U4X^=0 zzHaIwg*GL4MQq$5Zu?@(hgjsXb2~5AU#@8E3>&fJ5E3tIgW{C|P>cp^6z>c}kNqX; zF49XMH+HPREsTH|VK&Gba`*1t^6A)N+qG5@`}Oj&Z)|Mx@I2(f{2p~Ej*N5>?ZnKj#NB1Xx_GfnO2S2DzS&?lT9V|w<#*`Upu0*T) z7G_kAg|d94`;ok^#D~pu#4Fww{fUm;u><>!nR@`baD%#dxX~}G+h;nuABKId>%BPC zX+QVl3^~)kCn^Q#dwgqr57DZT1v_1(C8GHEtu0`;N*a;-@4w&XX<(Sw)oL?OF19PL z=(1C$bZE{@9>&tTWV>UwPiOjjhQP7BL!q~SFUgL7kiuf0A86gV_r-eS`B*LAXt&y< zWznpvyu(hOQfR}6721~VEw*aSN`cx6O@W*$3&laTdZ{=kkBAD?KSV{rV;lpCC40Bcj zdop@SChA4SNW%Do`n@eWW>mwR8Na#-6KGsu;+YSZ4--IZ)5JG1ApHlhd1=bahp2l( zXHrMfJ|8^l1LD-ZF6*S*ZoAbcO`fC(+2{LR{1F*E#vK+&nDaNP?Y{7#y!Sm!PPdo@ zZPC8~QlTC{mbxO8Aa)QVWb0h;+sRylzHkej$A5)8oO$R^Sn|S;{G`7RYEpd;X4+yM z%7@|CXh-I!V$yV+Qy_jDJf`=F%=2=dzRfSH8u_01hPW2P)t`KjmfHsW$H~<(8Q|j`wqKvi@YAcr96e z%*%qH{Bzh5t!A9kWs}C0+OpLhwsrd!TfBIIUH+bTTaA3jlr4Ot0;^huu)?QobHiTo zA@Mx%p-v47sMoXkHyXe!*~VlW4)^|v322f3in)dnM249rsOz0UFUJ4DoWpxmhY3uT zGw~%MB{j*YV(FVp?Y_J3wl%9)dk0y)dX4z&pGED8>~)2 z@;d1Rew6t`8}uVlA0r=r2N3)`uXiftk7@L%5dy&$MFCjv?OeEUkt~|d@p-mzgRnz- z)SDvD*`|@R z66;y{oFz`+asbb^B~xL~llHN97~7kvF5MhK3ed;mH;}B>`;Piehn%PBEA&&Ov(P7d z?rD|9WX}O-+SaheM!5GsFu-(BoqI_=sMa-emncCJJ}BQ2l5@{ z=I!g*lI0)hdTv?V7xTw!Y~#7UuJbV?p=rf90aLz>9@%QMPcO4a=5@R8?ELw!+HOVN zA2V{Sm;@x!KjlaJG<^7Q+qhn;6ky zg;-y|N0QA>m$X=VqD<0C6&5_2e=3#F1s@L*(uVaL?7n;Nb;)Pbrp-P=SWCO?{eJO+I2?iNG?aM$Km+7&d$Ma^&jAc_k>XxW- z;e1TYrnx0+~g z9$|$t$xT~SJ|7JURZ_D)7Un-l1atO@W{nU5IZxT!RhCX6>~IKkoC~x`Fd) zI=U&qJBmy>ZfP-b7+IQx!44++-tU#SQ-)i4`o=Zg<{|uE@k=~JfU}gNY+Jt2TKFZf3WhnPMNuVH1{}`Gm!eTdF5-zez5A4 z)Z5l=Gfr)_5rd0uPeZFhS*>?CvT&Y|oxO_KM8*m&y?vNe>kqFW-Oq#OQ2%s8|a{%R`tssufmtMa>d_ z7@H6$ZN%^qnw%}~1#Cej5a!)b(J+$ACgh#op z_E|hzwk4sQ^zTta0lo#^wg1yTK4(btNDDAfPvp@eoO-*822yYLN-}%w@yD!5K?J6Z z9cnjxveQnR(&9!RT@0qQ`gA@kP}5&1LjA)zM5beAO7)@-^$+dO_O*T%)>r;`j`DI- zyvBL<51Pjd2;k#G#P$LkJ=|=@)CyaO}YI ze_G`-(b2&&Jq*sVY~)ROOGMJ1wkF%?mk-mQ6AsXLxqJZ5G`-^{D5-0J+4sNyU-s)? z{>q)7iK>AQ1y+FgQxLxDMOiUE^Yk+g762VDhcH&>%VLow$g9MFG33~?RWZ&sZFH~t z5sEC0c+N{5B6EAByIsoDtPu_7_j4}2)XqNZEZZsvXBO65EB}tITegL;G6FzUDzuyS z>woy}V9H5mtVbaaKsQ}r`>uCgCNLf2?c*u2m;_8|FMygW#f1---kP8ZySc|Xmk;&u zK*jrqqxP1HVe!j;oO~NJm3Ih1GRgNc$KhS;eG9l*lo5%PA9(;Wo}ZCDDlD6=;<57IUQh`x*d7=DZyX=W)!a#EAvZc0B*4Bfo zhwB^BeGppIDpaJ9VIxQQR0j3~xFqrG(4e!p)L%vqzw&!xi{75?2?uD9k(ZbY1e`~L zx%ZxX?YDQ{ODerm zin4RgK3f)wJ3a40EoxUcVY~wu^It5PFiK{yt2NgKcuhU!6mJh9v8uJC`AIUdhbIF* znDFnE+7nay-IC;jzgmZ+#UfXI#(0qUd9-O_mO!CPmo2k573q4gB3lm~TI=aqo{mSqC3cUqAB14=tI0pMBj=>0U;>sB^uNAM=4q zs+Bs0=OvZsKBQl0&@S5g=w2gycoy?=h0M<>ug}$G1`4sQy~{3&CW5VjTiP)ZzsKyiQbg+K=t(4^cup z`;+s~@<69#UCDh)j&0^s)??^N0U!SBtI=^~dAp4r-((|3m^4jYT5VdcMXOiY$tO>< zV*j3?f?OLkn5a^O5cmmU^a#)Zk80HtG6(@hqFt~%IJD>K@7*U9pz-qf`5*1%zI3Fs zCmwsu)~{cuQD2e5QC%p>>5{C`2L52Xckj~ZP4j62JHQ7JJMX-6_2Ngk3d_F$RWU?M zC7gfmIRc_>Qb#VA=lz>Dp{`C*tw+1VG!hj49T;aIxz);fI+@_f%)<>E#&4)DovefypZ3I&Hu>rUnTv zxhX#>w!sh5KSzuL$$sSf>R0qd_QRYis;!LLSnr=cyG`1WM<0F6o_OME>rephsk6%L zip%pfSnRNE^~LtYykdLw`4-#Lq(vwo``@ z>&g|H_m^$J8ZEQ4x2fKSlnjPAPG{8oI8|d;&?sw1z^q;0K5j#3M&OmM3Z|0GCmGxF zc5F{5K;tFQNnv`uUuqyN^I(Ln!X$pijA=Ie%rjg^90`xU__E99X#Q-5Gv@IE3V^6b z+Jl>hM8`q8n?7}#ZQiuS)~s1=n`I0#Mu5h99i?a=lO|1e32xM=ks3Xo<4iW*&sO~_ zr}HPT?z^ulJ=I7u17Hatitz{L|Fp-W{HwhiH*fJaa}ICj%DZ>KDtc1wlRpb?panCOM^2A!8CP_A1p=5%0a4xm5)Tmb+qqHZ&nNZXiu zBpM39f7HoN&2EpGWnxuy*&Z2ahU z8GyCPak#@qk7&1wf_BZ*GS!AuroOBG*qN{S=P+}T{YGOO57b9uo=A^)e4u+`-u`U+ zDld61){{I(xk#TO0o+Kbc{XmOq_ok+8f=EBA!}ByvAw$+G{DyYJM$eg=o_riS4^|i zYYN4U+XQ5|ErltPOtD93bAkbys47C^Kq(!Q8wMol470|mnRkU}JQA390DUQJ0SKfE zBp@V>110jKzM6cJtbi6TcB@8=KQP8X5`jSyy<_o`CAMI}LK{VBD|z12(Y&wd4`pDy zfZ>DhbDk{v2&+~+gc;5OMdHRnYme# z9nRRy7jiEnUPmhG&NKlg65ABoDR5BdCPQqd=b`NwWn`Ym0gYT_5);C24(IPp`fZd` z`m3+J;?mi9=bkH|EAvPMIB{a3iBuKAC0Smse_9-1#5^?nfJqFBI#{2H_ly4^et2!g zVr-HeyLGOhNFO=CmU$NkEHAxySSjFrMJ4)xNqy1#qy|rgg|ei}(?A0-({+^5Fwsrx zHrm{|zq5slmne8{mzIR7v`a5)v*9(G+i5FMzt6WdDM(({^>fWs3L9)>ED9B2X$ZX*O@2H1&IC@PZa6@ z=e9>n$1tj*lX}WL6wHAq2b5gxmXMN&6$!Y&>`^rkafG=%wuHX{>`@_NL<2xX^&#a% zP;hl3Wvf2UhZF`59m6c{)C(5L*^m#{uH!Gqy;dp8^TEr>6 zN_6EKZFDIs&W~jPiRa>fOcDG_O3TD30nQ?;*W6{NeD{zd3FX7uO|ktwZH^_R+qa(r zc^P$gTj*DMPpGr#^Sny7NZI$@`+z_mr3oITRJxga?ZkPG+f+t)J|wRPCc>0Mgati(3&5TLDEF5{22HhtF2KyN)5-x1M- zhD+lFk85piu(p;)#kAGn!Bvj%yue2L!!Hk&RwoXi!JQ$j#AzAzh>`9~x+y1}WFw`j zTOz3mNd=V~%Ob?AEXw(k3_{+}d>F;=!*Bh-{U^|+z$n)mjv*l^GRi>AJQKm^v<2TUA@^Va9q1?z0F^^ zAV^!f9#w(P(7hqLUt90UfQti+t|GMvSO6q|f#-!bs4{C6gcRvD&W9m2aew$2ywL0) z%Jv?u4cBRNU&)!0uJ=khKIP<-HLr0}W}(z=`k*$rs|3D$yQLNdjG-6SrWEa9sQEAa z?Fi~TT9WTcCr!~>Pt$FhTqg*!>LI>doygovPMvB1k~Q-JxJhi?ywR<;Dpg0R)H1Gy z6eHc=^Y&_gdT`h%;C)?v94BdLPrszEaz6GQ`T?#lcinfl`u$ofD=M{j&ndBU&u^64 z5T#n7-FUmaW7m7SB8U`nKUZ((roaK+Qick0LmC$d*RR~*0^tb zd#)VXUgWNaR?4w(cczm;Q=BgI^>M^`k=XnR6YD%2)S|_UTryZ8mE0)J>i}GI0_M7q zL|)Z$FXQElHg)PLyqQFYjwOXH_h2Le(4fu&1emXhsaZx8t$OiDaG$>M)ADPZs%2ze zQK*?E9utzess>3WfYhEn=%=+JtR8?57o&{5lCHLH+pbCNO}0q}E8DhjQ@(o5fr-fr z5dB-Fvg8MrX|Fa}$%~0yBoY7@Rpkg7sf-#m+SQ=L)KLMn;u4L9xk)Mcf%u8cpWF{6 zd5!%8T~B_1+50kI0QJ%afN0XBNq)Iqlg^Ez3+C9q1iS_WxJaNXg=VN!nLk|m<{Q2y z9p&SrH%Ths3;%!i-ZRRs>pIZfl_PSFL?JT*U;v{i5+ErCB`V97?QXlZezOEQM@_5u7tBLmB z+St3hi+ytYL!V`a}3moa{;?T+*2 z^~A%E1?Nw1tXaJoz2qJYMrJTn#kB=NTu)?u{a-h%I{-q%cdZ?yhQoU=`p(`y&JO+! zE@eBX>;K?;-^k~84?=TRgi~I<$j-Z~kcrD#aB!8ygY84Ne>l0YpWSzFtXaDbq&2^v z6B{*hG-AA|u^Dsw&0Du5Qu}#anv12Wy}Bs5o6(~ROU4qAULj(~e#BZ@`OP7>(Cr`n zNPOgDA5Ei-0|ySq-FN*YzVem7$82^z(M4W~C!Tx??(0BC8u75!D5F!Zs{({>r~Lt1g67My02Rh(&B8pW9t#*I&FPIXm^=zP{v)?Iun{+?+d99;L$`gylP+dfl*hv8+~Z%1#yeepU*WA0lv8G#aQ;jviRCpz7iK-d~tm0 z)1QtdAh%I)of0@KnKPF4Y05v0R2VU}I(BUbsewETy#c@DouO#5+C$KXepAlM47jFp z-y$ptLt!`x5Z0(TjPq~mzMp&c*?5qUawpgqv!~X9%=%)!4%Fy_z+4a@5m(G<|{2>w$jzCs5nNP1pAHQ3-nu~DP^X1#<cEv)0v!DquWk$>xn4u^2tf*+S$aU1M0YdWh>@Tq#_;vt8a@ z2#wjy!-_@dh8>p5s~s}QZEXbYC3Cc4a`t09aTGSYEFuG=;>#^)I3aue^AMVwVi#6c znzCMa(M1*W`vOTQ4pcX^7o;|C?!4sujIJ+-v5D*Yu08Y2)0y4cdJHQ3B043D8*ZRD zA~#{>#fFq~(2&H9FM>*Mej)xBKh&8h6b8^n|{iT z1@$fkuV{1ubFL`%Fr4g`En73=fYDpz>W&>h77su2FxGI-$Mx4;7ytf0{D-uxY~a~q zB8qj=(R$d3Va&&r*s^J3=8V&+6rNqjwnYAJ)bfCMn@-I;u3H0yRDsYk`UGpK>xqPf z@F|>HdN8x!ib0dv;a4Nw7~Wcoq@_0Aa}_>kwIHq&2IywFYI-sl@o#;yCBFOJ>e#l| zs63T=K$^mS5v8Zt1`@x0a&bX4em-y5bS!r4h7fLQK%Z70Gp5zVweM+;CD(Pu+{v8& zxDxcj#VdpqB^15mGh#}9N*Kul7oA7`N_*Z@{B503VqKuQhRqH zG-scelX?Q*DLvk+YfcUiTMvk)a61>uxfW*eUult;;cxomCcqD=3^? zj6b-8{9+m^%mh(-&?+v}YS0a6It%@{4iPi*OJDk8{OOI? zbPhrNp4OedF2X_nAPEW04sHq&EkH$VcisJySi62*{Kx6VCI4G zAjxf;H}%iIt_R8CyAC|?8>CiD+nM@XtYhI?qI-!Pw1EtAKd^_~ck0AVtz;&Nr=NHt zI^oVPUeH3|;fQl4qyOsSzEZEn=ZQEBfBN?J7}?qwciz`btX0xA5wEaIj02)cX$CEb zl;o{^FWLdAA*MXeZGA_338~c{yEGixSW9}9rueq;Pg;4e2cFVcz}2|R1s0ausS}UfS6Ce_K$`ohQLy6 zDRS8d2euPEkBDsbnl-TRyYiupeAf%VL|l0hn6^K@L|!6PS$tE`3`fj14te!NB9w@{ zbj;R|OCya05`boCQEbDIi&E^Dp|_;?VDQC(aSy)w_WfKQ3WIw%cns>|#$Z{xbjfwG z2tT*Sh`0IVlTYS>Xr_e!{@?!$gm)l*4}GTkv(y>74*Zj7FwMNg#(u<^HTbV-;Nv~H zPOMGN{ouxb^Z6~#fVwlHx*)casFWadA287_N{W9~AFRU=tiw1bg zywx%*ep57Q7!AdUUI2Z?&R6^5&U<^}z~Q?1-QR1D#S7Z;VWTy;8}p0tcp8x?zBc$T zXac-xJt7PadFl~vi@jMOhvJIO7bk*$%P|Yj#rC#GX9t;Cvoiol<6jC9Kdv(-O{>LM zvxXqVC(;G!=+Pq>%#EUF**m&d9ivgnC=oh*2oZn#Vf6fp9a%ex1uRU9L2|$Acb3Y# z3ZXfXi5S=vW{nmw22ZZQ;&(-hzPp(U-5GzmV2z8zv4|qCI8bq>mX&f_`o*b-8-ul>5&z{pL{k00 zUw$f}y;AdQ-;{a3+l%u(8rO@PZ@MYkX3W6P=C0(7&6@DV|L=>*1^n*s{a)$?%DHmQ z46!tR!lZb0|3O5L`=D4^E7qYJ>a?6z>(2EoANvy5lD%)Y$1ve$V`IY^U^akC!TTIFZ*jb8~fy4h0t;qi)n4UPd zjW~im1oS4#u&b8GC&;66jwTW@@<&5WXG|H>!&=wG{OU%mmz&V3@<0{H5TqsI=wp9^ zwhE*n7cMd>;$kII^1ePqh+SaV=a%;%fvJg^Gir#=Qyo*M)W)Rob!2)*veQt5ZV&O@ zIQ-d~aXp|j_9ux_FH~~lGC4dy|%6W}PWH=4yY~w5n1nPKH ztfrW&(xIfh1is|N2KVwepU=b$CFusjq4JyZR%U?x;(zv)j=3I&a%S#V_ftvO zDbhQL%gVj?-4iQTt;D=M;@T^!~laqO>!2=Plyt@l_$u(o|uBPDgmojp?4|9d1U`f04k}-QBSb(dFaI zBvje={4Vj_fN{mNiMkRr5VfQ>&YjbRcogoGSzq{@@8_9q9GAx75@3BfR|03b;#|qYdDaP}7Y#5u`kK zZdI&aADNp_BenNlcO3*>LCjgb^^G1qmS7R1;>e++#B?lD&!ouM`L^oB&?OS=9}T~p znewhfXaZnzNFo>jP2e0{L{R29)9*_bTgot>;XX~mcfGP36XD~j7n3@9;_)Y->YjsQ z*%s~mp1?$fwjnU9XE6Y;1_iTb&cw-gR{D79dnU3hN&$vtKgx}po29(?oA#gyf?|u&obQivWE8|OF{9^oHfA**8KUg;I`l<|{eok8~Kkz*MX$O;wt}7k2 zU3a;)A#hbb|C{%pHv{U-*Km>tD>;`ZI& z;hVSzL?;4ERSG{b$NWs6Y_h0@2`fJF)l36b`0RBf1*&3hs_0x(eNW7oj@5KyU7my6 zIq%E>V%CtdSYIn)u%=F)j-9{;#F8hH*cYKmn7ED(=WOnccO61=@?CGipc}C8k;>r2M5t(MYs+v< z>E9*%R6wnOV85SSZ0qe__0HCR%WkA2(ql{%KoLgty{!i?eT1t8fzTS`N9hxpCjzDBs>sCDO^0 zy~u)~{!D@}5t?&K$-Rh9W*?SzPdxcVtX{W)R427@`Negy=mH$3ML=*~d8h-hu7z+b z2bH=f_MQiC9`{`Gw>p5ihupDKM;wno`nRN+xuz!8Y-k`hVHKgTi2Jy`C$_$#J{?5n zGv)J@>6Gi}P2^VmCK5sxQ>;I5kgYK;bFdve)Dw?A(;4eFkV$(}P29S)DHdMb0r5;6 z6V8tsBay((`Hakr8M&R8lq%6Mw}52fu*kGsh1H4Us)94jW_T>cj6O>rdO>nkb-aLG z5my)Ocgb^YcNA2B4Ni=6rLpvSXDhqi20$9*q41+XSUQs^i70Q8&wAfdLs6c zMP%Q;y&$xS)NF69qZ-4Jri`wnY5xgSGfJ-%J7zLr=gvAA&)M1=?>dC$WF{5pBD#`8 zistk?c5F|hk1{~9&u9Q*B!0tTbwN39!-BB{0!0dnMHCy(nl&@SEVa#;K|H}>Nv$V2 zWINRJoAUOZ0o@yB&z>C@FIp6jKmKG!VENX!zD1_q_r%m`(@yHRA+W|1O=ZXSEf7Bk z@xQ_UjpyqiW`rxuMNYc^H(yRa1MV-2Z~2)I-B=^~UE)w;!#m2|s(yRT>X+lGryffu z`U}skk1L6_IAuKX88PVKe1{{EyRcIK=LO#RBB$aT|8o0_%xqk-Bxe018mi;UOS@vh zqCTSW*Tt62b#cd?HL>%@MBu?{u?D|1tFcP!u;e-7HA=}Bm7j}yiu>f^JT&C`cA|^= z-jAx|>F1iTwyZ*bPC)c=wJ~KI!)5O5N!tLkhs-5fQaxN- zmB^C`ts`Pc6BfcoL^6A|(L#)BWB`ad^3dt`d$y37bN;SEXeBaJtawVX5I|`}eKWH* zvM}e6cE!{tI|-}w)KgEz^UuGKkv{}dVbeMk9nNfP!wh^;%$YMgebnUA`b%ZA_J9A) zhqv_%7}(fs3WgsuqquQ9zlRv|KmF(bjE*en;OxKLeG3H3L_{vzi5inZnGI~LpA-a+ zoA#72_O>4VueNi=wow;lX7PeM8*JH_+!>tCL89e6_UNOrYRxK4v??w)ijF~h&FSyHaZYD)`9@=0!aOw1d5NB zeGuCYdzVar5^Smf@aK(Kd{Jy#o=dIa#Fjj9Oat|i)!?3&@Ijn{=1)W<{ z6=Tz~urZp3k;oLT>BP}0Vo-vxJLTqTq8W8a+ibGaFQ|zNh=?*_Y)2eN|9ALUZEW3H z8_zx49M8Sj6T1(QN%}Z>3_Eac26@#o(KFjfc6v)ahAO>?Yd8vwE`nhKlhG zQ((f+CZt+gic5I-O7ne^vC3_Va$M)Lh1A{^2(6?5<;;9}FnQPvEjk0PAVSAJEEDI? zI~OtGD56O`7Ej=?={1#}E`(I$ILzl4EVw8ZLCxz6=C!h&q4oZ{d@l}!H$R#R5$mq* zujF)dR$h0$%a;%Ny zZuD91(?%$NxwEqPyW;#VxPK#jknyMCUvIZVRNhPd|1Zggkx7Kj?Dg#{|%OGp8w z@RsNYeU|zmOD~p*eIT)Js&8+X8)=AbYy0BmRRPysi{mtP$jTw!+q4}64~&7TI*2R+ z`&V1bp^aW#k*Iogi-+_;tM;P&mOV(MmN$BDey~T>3v$q;zk{|q57m&)i9;X6Y+V2D zI7)kc1X;!h46dmUCnrTjkdtp}7eL zze;PVQ*AQiCvGE3x6w6BQ}Sctb2Y&l zc7qMOy3F=K?Fo1u>jlCrz|g)FpF~U!QqIgv<#=kqw)QQb=eO)BRaleJH+rk(Y|H`= zOmVNz*CqUO6R_1oy<4{tXA%+S1CQ6n2_%$9NL(tX$uMZB=s`6Z1Jo-fc~DhP`&BrD z!;yiw*`0+LR%DINvY}S4m6TW5>+jgp9?v{KEZ%<`;?LGDkS00keiArNcM`g9Jq=Vu zyVRXiK!3CVmQy=Q1Q$8Dp46cg%*4>uDc0!B{r&YsXq8zjH;C3p2_OLC&<&QiviL51 z3#{^jixv=hU>)Mc=K&(*7XXFc?{nwO&aBd=GV%MbcPS>Y+;mQl2a#zZ4Cj>zFgaLE z%qfCWCy=GGQH>0s%v{7}P+;R>`by-5O|%HiLrcr2a*a+kpRSdPW`ik<)JkIFLZamR zoP9AV>qd-t%s61Z%^<W= z-5($Q=*KdMbXkM7xp6fP8I2pp3C8*=;;>`Kjz9!;XC5_OF*5cbV#rL?ng`nJ>qptn zJN13=UMpeAYvr1|CySF50|wyBtmldvjqYKvTi4!FKC6@N=1rSO!g)W3_MvE-Gc1-a zMdvge=UqgETJnh!if5eTVxDbBU$H(AkjSi?+`Q|y)x`r3HpW9wM66x~fkh^Z=3ygZ z{-QRB)l2Yc8=nSY!_^1DA-SfTHgAYEtJZKntjCG^)yyj>5vEIyW{Ui0I2D9uUqn!{ zhk4lhp2v&>a#Xx{KqS>aPLP8zua%*D4y(l~;vlN`(}_F9r2arXS%*3^K3a|;QapOp z;LX4ruvRptDF}O$E{Qaq9ZMn}IIwCsy$qlX7bnd3*#>Rtk*=Ss^IpgGty{f`h7`3cp4vr*8mV^oj)=@RE2S;E}ux;~Z5SlI&Mhh9V-|0r#Z~Jp5 z%daOwV_MVyf=%M`lvMJmCeB6E0_PRugAB(_08nUggFn#b8#NL>Z&WEZN`j@ ziD(@@GkCL}s@}KDwZZ)?4~A3qO+d`ze6TY`R0j3kzI{h*-mIa_4&qmOSXZ)GGq={4 zbvjv=+$qJ!{=b7Dx9CeAWBIe+<$Uvuk5dg&nyFniFw(8@y*;hbJ93R zE5!Bd*MlfcmovZtmRx{@hxyRv&!39}^?8}C+ddYK?)^Q?e%sBmJ^Nrvd5N2sURs{n z4K}kme}JFbfBBO?&OQ!$I(MfBLMuqukZDGH7YQCd)PaeA5iGgBj)MuVGo$mf@onSR zwG5s|$5?)U?Re!5IC2#+f*9n|C4!tAtsDCb;AI^K3%~DYKZ{Ldq#ia5pR>y&&YOv> zOud;11yP;ny=$B$!4jB)^AKrtVM*9q+ZYc&(Hj5!{dS^@-~im+fQY#*u7JzFZt1l# zd;Sdc;3SA`8Jp3BloY5Ba!=@@v3(ms6IQN@rwEz$6xlDfpg+~{p$B3aRT6$|a^-z- z1koS(m**0)Dt+>^CsNVBDiM|+K_b{1@ zy!*un*h4x_q*_()#HmKQR4f&pZLPY{11HBi&Gryudxi$O8F5v<9Vs9LHwF5C` zb&a7Q+C)6 zxhC3X%t&9f0u+i-ovyy(*LTj}{<&fvL>h8lT5qmfw;plArp&Kq@MHmTV9dp4%TcmZ z+;Bj@|M}xaD2vF7?;P|V3W^I`T8hv(qY+6cXZPLA(jp$Yurkb#{SrahuAhr&11Rk| zSQI^Kji{dMV;}oi_S;Zoih|4+rxof3kX**f+`S76uXP~3b(#3_cYf!0(s;wvGUmE7 zk*9hB>oX!tzW8j!efw*2*K4o6Hvaiv{v{1WjOPCFPktiW+Sq+Pq662 z9DqlNCen@ArK!0Houwlx?)ph&`S8HJzNV^ohZ{swiOfV8 zA~Xpp4XsMuna}&EzpZ0kAAjtLICi)lZlNKrS{yN^1B>Pl**ks zrj*a-q=`u7-4cybA4rbt&RrD<(o6@)YYQW*?f?A@TQ6&*YSe}8=tnj6RUP0F54lN8Iz zlyI>*T)G0Fj0p18zyC`76I_{}4TW?rh|EK5F8RqC5GR%8Y)pPx_t|_e510ac1XRaf za5JWP(1LTrhV^;C?m;C>tSS$bWYD@D!8g~SdpPTaoQHYk$kbS2FJ|v|KiLrf_>C^Ixi`lpix;73bV^A}E@GAYg0zx_|&jyryI7tY=X;Nq&|(n}$@2otq+D+oH; z4ftO{>c9+rDxwCt4}}5ynvKGd`I+An(TH+Da_YMTSP>gf)48H!eDiyvp{&BYi|31b z1XIT-vK3kKmh;H@iAF7ds{y$yw(n!TMiQTL?6@l6THWID z%sS$kF&qNW^|fv#V*1>=j#@YtgbMBPyiVHoDPR3#$?lzzKfe|T&56vH(8y;eQ~>na z&{B%ijPUVSpZi=!TX&PqIA=y&cinrDpP!eM`q1azN#6zBa!$&AdtjTtZ3i5f9LvUy z8#5Jz#u?r_09$T2t*>yVPX7HDeJh_Y-tFJ`@#FI4lU~hUFs>;?1@UEn6mjhb;q3yk@7S>u@#4-*E2Fqo zB$ixaJ{=r{uf%6R``LJwgY|vydtY2~*=11eqm$D+Rqv8pBkhb_?CxDVIFt`YE1SBh z2jtIYH*kV;UC!gF>UgJJ8w?AB*IVSJXy3!l>#mOkqjMu-cOD8|RJl+CBiEo}E75zL z_kH`wmOQB{7GFlx{IMsH)+vTl2PfK-ASn-Sk7fRGt{)+E)1%MU#y@?tBlhfXitBNn z|M(XTJWJmag7Uq9rJ{ZgVhcbd9Mur%SOV#)t3Zetip`iYExrR` zyzGVDh}5d_C##Js>$?%x)kga<9x%E-h}cXYsG^yN9uQy;datTzH()XC)!+mzBKB`O zP1}FPj)}OuRv;Y3g7zuVhn)c#iQF&|$zdpSI7-gI0RuS+ zP2pPt_85j=9p**jj8*KIVf;X9C7hgT*nj6DG}bFOjEm0VdGI(nr4=JfmEV20c(3vAoQvzKB3paFr4&CU7DJMmA>3H|o~F_7|h5ZDId6K(*3nIT(h zYH0B=QN*Sg(9PM_HiP$RAT!eFz+E}7&YhFv=Morw9}18|uMPfv@aLg@I9uQQ`|Fz0 zcT4{@KFdXlu%}F(lKy8Nh;BSHjlV)9R_AYXI@=cr$%FMD|M4HVT(L4=y;v2Vdv0SaxS%sy>Np@3 zH~O4sJ-RXxTaEC_uw}+$lk-pATSQ5u~@nG<-1ZD+tLbCHBJ{V z)dK!*$@_8;`L+1&c-~?;c?Z75%Fg!Vj_lfN--DO__{t+pb}`?Bt9V;}F2|K80X-DqXs}@%LUmy3j&u+=`hV#^ z2gv9Ac<>H`bC`%2?wskl`N5*orq7v5v=klgxA_tAJ4y38&5tPvFxQj3{86D z$T34k0TZB|3^Yu;_9Ewvr;;f0SEW(HO{HH#Q1;WR38b8oM6i2pbvN7=K4mk8qh~%5 z+jhc{VT>ZDHGNV~+<0R>8QW10(7zV+l1;;p;U3o;%>*SL1?N@|N7)0XTZOoM`g90j zl6`izW7fax6;j0%T(|97>`ycL@;cNLMp)8pzXyb-^)n20Wxey(Gydlth0q**u@Q=m z<9DfmL}Y#`=F?C8#rxY4t7)P3*MI%jv7Q6T0KqpdT|$_uS-HXbZ+;tp$W0~SR*FC) zC!r3=ylbmgt!5)^O);6yrwQcTn}!tVw@d;$ZJN>jry*{eoCmi0p}a3QL~#I@8>VdM zOuoD42G>9Mvt!8@ghye%&0(l@6ja^dQjW`aeP%SNHn*LJrA8hFR z-^5h1m z8;?Csc&PeOaocS-6Zzs24(t)!;eDr`Yuhi!!A<$uzFW#eyZqhtvgb7zMVvEzcHDgP zt+5&1?Bh@TEPnb>Rm`1P8}q03MMH&SP0L1n#nANePwoiE=%&sXby;<7JMsh!07+I)StRk96!aV&@nlhIBiCrKV;j_C7Q_CbI zKw49pTYW(OP+CC8z#~o%Yn*P%s^%~eiBw8yXW2*FH z>mjsQTU$5WZEf^yMHIYaOSo5y~jm;Y;W}^;WOkU);^#)7<6UQ{;+`5}>7|8EzGUs+&6W>LE

VthDmSYxx2Ogar|f1}koMsY64SIcDF7Bi}Ea zvs;rh^}5JBEnssDgXV(z%egGu^Sc{1=Z4SvlT9Fd!YwzyFRs4&s(AXTr{h7Qiabx& zh5!9O|8Mb!|L%|C5C8BFGk2PO%;&6(&xXsIcFv60uy!@`Z~*FFI7a8t=y<$`g8}i; zJFz%U_owTpm{B65%vMp3r<{viQ||Y>v#);5XZZ@8I%oJo@o(SW{UqId7IEQDQkXPh z*$81V5o7>Ts6sE*>wNJ~^9Un8sx#}w*nzlJao~6@h{YJJ-EreB7sYM2+)C)Cu{^_l zn)u6F4Jp~5O1~7(pQuEPFOjpbqXCrgQxeiS7V1$asC&o_H!O)2E0@QMFYJt!*Hp)} z30VE5-c{dm^#pdPkNX}th6N>@5^bsu zm_2i5%$+wc_q4GTbzAY>{W4?5j99p6VfO3A7njBAmyskuLctpS+pMHJfQ1Ui=>HRqH!=E-?xmig>jRoAs6(P@eo*O*G?c0Yivc1-TTgDd3z%?2kq!90Oq1OVRW&edY@WF+)DB3<7#qo#;R;*kZ z&p!8Din|mg3KSwRMW!OJc>ub(z0B3-0e{kAYR3|hl|4LlXUlo8&GK{kM9y*Li@zz# z?1GEZO7IXmC+FMj+!Z8C{Y=Sg!(I~MQG zK5)Bt6_?f3bS2V^BgH*VOg>ROme2Y}*m3Qh8xJLQhB{xT4h_y}9QstR4;{DP2anyl z3h?Voz3Y-C*TwvE=f$0O{y1Jn9QjwD`=4=;tPFqrU;k?~L#g_FS!WH0=y29&^5m(p zb<-wb87`APi6n6$=eu=7coYrT)Z3~!=VjNNlVV)ux)%P6uC?#2+7nD=gw;+pIAE_EwDtGCdHLKa%Fai z15>mnHv>5o-Av?~HFdFkb)X9#A2%$y35Wa1X_S%Shi1lW5z^vBnC9tgg=ll% zX~k>bI+-8)GYjdHMBSmo2Z(&~d_3~t6SyTk3DVmXKmJJP>EcI6ZeYT$b{W0WY#{ldM&&`~yH)vKSV zX8Xd0y0qxSC-W?ow=F_T0Aa(p0n!ncO|Ez$sn=qrPnwdPMDH)3;r%E{Iz8Y&^O?`Y zEGV1iW+&3+dS?`YV?Uew8Iahs&prpB@6GWEup;R5&pSVnnAUt!hY56yyF5J0gT(jp z+Up&8-tVTT4{mS~lZF!eIH>oq<9zv9MC&1BWDxt?Un*;qn>DwB7b*w4AMF$W%0s0N z3$L06jwIe2YU*)PZOX$UW9a235*Z1uij(wB(^0dCNMiroQ2EYvxjcUZ8nR9PWKrL0 z6bd(%QAQNk>aS)|G^;zp#7PsUlZ7Jf!=dczb=IdkJgy zs<2vfO%O41SoQQ2oSDWpj>ls=eV#xzQYpDVxpDv}eRbL|lN5Ioh+Q$=wr#jrfY{_x z&1TSoQ@Zz?gHSW^MHg1bd2>4vfq@Ft#V8u1uKYvY>dpG#G(<3+7&ok28<>?x%$YYg zF1Tm`;%Q9F`B&=B^6=&Ha&LIn7+TAaXLpdn{NYC)NtYI3-?OKQ;M1)bjy=Nl>#n;l zK7h__K18IF0a?SG3ye5~p(WSfKrGNF(2I4)zJmnzon{IZIZ*f>h`C}n_!-e1bJyeC zpqF}nWnFyx8+~#AgY2oACfrZ1i`(CSTP(cfQrrZlrv-NDGSbL-Hhkppm_289?j_xh zv>3JSrp75T@;xJB&a88>P`n`S`te<%zucF&gJv#v!fID=>oM+xJzwkhflk0TQJa5l~;DC==4;u|?~ zM9xF5d1X!gY~NE9-bBSb%G4+qS|AUeGZ@TGyPG~&%;DZ>8d?S(K@BhK?b4aj( zxt@jhCOE;inX_W~3(vvbn)w=E9}rvW0XdZ4*jn~?;jDjf2$iYNww!y39)r(%XnBw+ zW|T844nsx8AT|CgZaRXw-^$g?9lQ25Lnn7(85L0cqFhs&Q_FBKj zC3D+a;*tdsV@Kd*jEIieq{;&E7qg1dI@^F{1aV<4c&IiuZItV2%xFJ`LKDHdQwE@P zVBdJwDD^PpomSAUpZmZfvFLgjynH`q=pNuZ;fgd?`TalmgRH+Aql=su4t`w`=ASzU z@!s^6;Ev>_u!%qL5hA-b|@r5qXuF@h%i)^^v&-~B^%JaT_~w2l}TpZxfT z}AVdf;xN{al#HLt;WpHb;T8z zCzqKV0|P2HaRJoj_^e}SNdO;o-FrM(QnYjM0RBZ^A*;B>+4J9~58BFb;$< zCy1!NA`U!rBKzv*&@p=g7KG=*If>9UY|;GNooJ3FH+5gVapsyaco|u2>`MFgui)ys zD&szwddRI?acS7NG4+r+PnB79gLSd-6s7Ehi!zeTMHgKZ@5LBo5=mbj-_Uur4U4WH z*K^-K=JI9qoPYPXe;XH~U%33rD~cQVYq#VQS`f*!^3s*WK)7yx9a5{R3uB#fj{6^c z8+-u2Wd8@oFFX+m0+{g!IV;zqED0ZGKkn>hp0^Xa>c05)KYtg_cpXURAPFR~v}>** z19(GB8jXH&e0@xr+7U;3>c~dj5u3KOgV?HK{+uSF_wX5!Y!7~3hIw)!)bR*`&Q&#t zOt>-~%poGzZ{Hqp3Bxi+VJo_?oKF{zl5ady`O0&>u>~&ijyvv%2kyT=iJ^kS^zH)2 zz1K|Gj(snB?E5%#)TL^0HClf}I;ZPE4gO}MMz`YNKRtGCcrmuFCj?YS15yY5<^X;w zQvW9caaZ-=w7g?KesbTbkH;SCj1l7|#-~Us`v?E__ha1nk&Iu-61&d+@=79tXYF5( z$sY2(Eajf`lKR=<1VC5}{IygN&JX_Ohw>IwF5^wQa`RPO)lNvEp!u(tue)tA~Dqt>Q?im`oYfG zWhZsugJ0fK8GyXERLxs|*Z~fHxmW`Lb&C3#3@NIIprkxB8AI`l^}q3 z3Nmwa9tc`1K13vnwY)3FxZ?852+4Cr=4F#ZV}c3*78qV<5#R+Tb9+fuZbnJGZQG6% z8EUAZ4$RWOG4dPr3y~C&MI&Lmo9n7$Bro$Y?Sr%0OQu0Ljz>anpaf1MU=#sO8#&Zk zxqoy+O-yX4i}3`yYvQ+zO29KG5dH1P6u5&E(&*lGh#UEt!+<1% z>tA}q4JhQdrBeP2U-&|tf8hnM(a#x8WGv$1T|2hN5!%hn*O>7eZN-y&=$Oj-f3@$$ zoCt!&{6duE0cE-u^1%&4W_j?U-{j}mfTiMhzWq<}t?zs{cJ4V4Jyo3$HdSypW^ArO zCsi8@&+m>YBk^~Fuz7MtAZiD8;m$@jG{q$g`{JC*h#RqJEY3gRQ<$slh3t<2B;m!H z9VI8&-hJJODjN%3m&8v2(z&J*IF=Gdv>W8FR|mgRUw72>c*xPklI-EN{w*<^LG8qV6ylvm82jg2sn_3dZ@kaHF4jAO>xit zJ&0LHWBz|zeEQR$#I0r=XP_-=s4)P~_M+5Hmi_mdW$+#-t`vWj07zK<;UE1Q;*9Pk zG}pcH-G6Ez7A6Gq9EgMB7%2891IUnev|P?Btv_g)l5ILBKM#87RQ-`+ z*}_5n$YYPCf0F=k71Y$_ix=lc>^~r=?{uYXrK|IUe zMx}tUV!t}aa`VElaGFFHeI1y4eurpTyJlm2?WP-7yOD_TCP~k_9J`ynsjyknuoH zj)L#iI2c!uV76&!>mR21c&TzJk$fj|2z(Wif*R>K4RcQD00K3mx}UdXBxg>Hny zU%Pq@PWhLn&L|(?S3{>YfQV}V_$r9J_-%+Pq)8sxUzyM9(zsU@aUt-i00%`jmM1PpU(RtXVW*#(hV*&C%v zI?_ut3nQv5Y$2HykYf3e{V$*4hPPCPuJT)|?9Dvv0u*w1CW1wCu!9$9aGl<#c;;Iu z+`sFOC==h^tIP9fWNqp;z%U~|KJO#t!TfV*2k zG>=0Ou3Wh)DamWDzB;2-7#Y!a!p4p!Q z31OUougZ*WK?rZghH-#v;gU%61E``4meSTm2T_Gd1PQTIO?32nB*v33 z1F_AkjXf`c*j_jq?T0Y_v(V-*Ec8)DwtUVYV7Nv*G4yAF%7<*{+orqmg{_~J_$y~5}aqem0a*0w5hWSg$98LSs9 zSP)M>^%Qj+i0}X42XX8B-w*cz_f;w0CkHVW1Cdt{#~mL@2q6L+cSGG5$f2CW*D;o| z(zE<-Ps*VxN|Lj&)S0Q9@f5!m{p|rxHm%l&TP>_){F{_C?Z)Ij_8= z{Eg<`03tha!hLp>%+467058zNoG;Xqrs+JvHJ|sD?l6QlGdMvSp3#m-$L5N*gyCXb zT*~5a8cHes5FwWL`J)82T#j=B*;n*W9U;ic`@n@!i=oTGqi~~7^&)~Ll-H!$as85| z5Lg#vPCXvxx$FToI=2PlO$%qPc+N=_2`ip25<#M%><9PTb~fzOe&#ok%z5XX8@Igw z#`ycc-4ahd*AbVmsE+HdVhtfCl9=Qiw$Pk^EAJvXE}(GhCNvhqi!ozC9~(@qb1Wm^ zMD@%>*mdd!ZJ)k1w+>4SWY0ti0QYQZu$7_CvR}4yX3DP=p*ivW@?Z{<5)qwhXmVxG zKJyHbKK?Rw8polUJ=B^JbLi-LHON4tfipZmr|Qb-uhF;*rwO|hzK5WplT!aez+(*@ zj*#?Dk)q0H4KKz+$$#L3AB_2P=ix6koH-DPPhS#PX-5V8?pJn0-K|Qe>YY1wrB8^z z3zS(2Z*}-LwT`HV`z5B`L9L7;5z)n?2z7%7?)dRtcQRM|F(jypOD=4V zPk*8=uDqlpMmKh3I3?l5bSGN*wO7^0+D-MbW&?*G2yxQ5+PLTfxU=EvQzfa2gMDC9 zfEm8X#VE=W8R?AOO?ao_BWvPSLQL)7kFL&sOQ-RkrXj4aGb>yQ2a2K_2&CNs_ix%0 z`(M5aWKc|X!m)K#I-oI;`lGyu>t(4&T3~X^E_B1yVClvNta7uhifKG;R-sqkwv#9! z&l?)6I+onDAQmmU1UTdz1AeTt=mQ2@SQDx;Q0Hq-IM0_O7;ve(n>%+dBHIPH?#zPA zCw3^$TFSBIX9sR!@kWmw5m#P$aXj|O>{!3@1)`K7^1XmaHVuf`ne!Z7Wq$GzFY&lr zya=o;vd7YKjP53NDNfwAdtWR-e^%p}k!Jw}1O(3r&SZ_9v`Xer^f12?CiW&t5k3_2 zduE{gSA)<10v9UT(^GI|{x-NAK6E&_vcLbkzaz=s=0t+Rlm0_D6C~Dv!Yb5%*`@5K z3nJCDY18I-4n(yEie99qvRvPl(u7$ZjDY9?uo&lVEw4<3CtF@D>c9w`fq^A1=0i(9 zMK+4ZUIJO}Ks@4s%w+aIScA^0ad;hoLt1*og>x<(7VVR3Vm%1p@IJ&Z=(1+EkQ&8J z%Rk!~hUiUBt%?I*R_SaGGs^h^ga+gffTF1zdCj_nr9O?8g}3$Sa*OVP`|My?1zE!JPeS9#Uiv3PiQaZMLE6m@q-`Ufo0|%d=8gZ+%)>9<~1K&Kh3oD;}Q8tcxZr7 zuB#8^Fj(J2ZXh-zFCZ8=(vb4pWq*`=IU z#`5Hok0GMHGq!Br5>3F^;)V6`JHJ&MS6|itL1(mP3$KH;}-Z^(1<=mIwZ7*A`{4Vm4Adwi&AM6porPVBFfI$VC;J8~_ z3eF^-%|5^t@@3`PhFH5E#7@4v%NAc6Q*oY7aU}If>{}$*l;qrxLCq6SJQ*7|Y)H6O ziqi~?nS1Oa+!73!ef2fhWM+weq&8>_E}uHQnkv#`&AuQOEx07styl+l*a@;EL%?iR z0Bpd#FW>l6LEVlQWM!Y~Sj#Fn-C@Jz4h!e(!-p|XW*;WkZ6MSR2vq2%xMfUv{JEC++_`AZ_Aqp z-o57#2iL~qU2zQRZw6M53r06{z+v9a2BB%xM6MqjymIeWGVg9W(jJG<*X>{19WA3f zV$$LPf~5*8tCn-BW6n4@xRnjD<%uJ4?0{Bww1qC}p+_F({Z-P&-@no&3$UlXH4K9ALrWjm-n^(BUHxA}e!d2hWy=cmvj85@hX2 z9XjC9L=!pG9s-LPA}UbCEFZ55gox-Q(Rqy%*%PE@Sy!bY4?@ z@*@Ffb}U*r3~P`S^h(XUXddvq_W=|84%EQO_Q4JJKnzynOpUt=K3w2D^f&NU%*g<_ zaS@nJdY@`iRLIYY7n=Z=v7^aMy|F#RGaWg40PEM$K$^xjw#(TwP!SwdL+;Jl=$MSa zBRuA@&>#O3Gy*)8~#t0&WT$GV)it~YcX-G%phaY|@zJ%f1vge)$t_e>?e?0Sg zcs~qW9mW-68wN<5Hf~JhcN=hT(14r|tFix-6rnm#7

sH3nOQ3vlvBIH zzMey{?6(J+5jU1$dG*2zFOcem*lft4odW^0aX40o(;8yjT+*F`#A;iaTN=^|Y2QYy zs`fxZaaeYOM2;WDk(k$nCStlZW1W=vqq6zvukqxjqjSg|1ej%*n*_JAH@Usp(1K|R zu0GE*m1_fcp8UyLn=y1bpA5r=_NbUAkD)9)p9cjudUQ+4j~huO131QK_8mu@(-X(` zdjKLf^#+^dtqN+p3MKovMMV9WG$OWu!1u0&ghzRO07nY^7=k2%Oo}1f)xafVh zn)B$oEnD^?se{e|rk1cd3sF;X$C>%th+%*_xxIM!ZU=c9(V^J%Mg2~n`bfoH#~|3$ z#dwIRXP}d3rW0qV7Wbthbt!8PF?%P{E(p^~cNvd+_Zid!k70Ry2tUI~_!-`OOLbgv z9D^|lg#cOg{My%Spt2$P#BhdB4*QM7HryR!MiPpNyKFO0Xy2@mq7~r$`R8Jtx-?$d z^GfV9hymC0P7ttRYnQ~l+4C|2NiS9vB1{vyZrQjc4(!_-6UTIu z?xqvYU(l4B1c)6gNM+AHqzbz+#$ewX*>rbcOwDh zdEz~JmhxRLw;c{>*^A4f`>Va^;wF-u_&v#O=9=aFSR-@oXZ~w=G;`K$M3!S>-I{Im zMbRO;I3kFo;fHr~&Xpw*5MQZ*b|YHATTQ425ZFM3ks_u8Uo<$|1~|r?UhY-;;F;%3 zac~jV4y497oWF8{t@BKkUkXAik(q-?MX3jGj(T7i|z z*Q{A&n7#Stm_2J&%Djsi8K~o}eo6wYQi*$|EG19~I8t4@2pE|5Rs7#_VYtzLtHZkt zBzEN$S0q(lZuGLA)4dBeWph&JTcNhpyBO-~RVXkiI*~^m;>3}YF~T^nAx6w3gcCkn zPGdf+f`9=WgW5Z^8P9xjE!7Mw#ENo8mhp`kfe3*X9xB0gfG8+>VOv9szZAdvOZof2 zm7({S-^HDU&SLPD@^kTi(!6uY9yT$a+T0K*_**zc*tl-SVqIt#B&_k52Vv`ExV4*x zN9(w{*!uL5=tfN6f$UO0F#T{o@{teYH&f(mDqEK8^|0^&`Ot?xRPkLa=Bd2rt-m9x zq<(X5rQr^a-nsysgwUAP8Vo9oH@Tk!P^WJZn&KJZt+HBgy(?#;2A7?e!K3Y?Tt-2p zu|DPDN1qLhd+)vX5`S@9)Ptoiy=Yk6dJ{%RjqOp_$pMTWR#@%hP%j~<56+;Id)KU0 zucTFJUrZ(jqDefPux>OhBd`@8y&h%$RUj*6Z1?< zluh55#xy2!9wiR65R~xAZdd*|AD%m&Gd|}tT55jhJKsrzB;8rcbJqLv;quu;qLWD0 zITiyPM2@>z3#WxG0Ev=&Ht7ZL&+u3k-5>SzK{U6t^uX1!2kFG|<4BAkFx@?J-9GKQ zO6=%I>zas!T~q%R$kF~*UR*2t$zyM+oOOg|rv-v?;+_1{74I?9$6x-%XX9y*hTi8g zriKQ(>dGs_1f1qC(`wLre_6$YMI@{I`gypegNI(tG10Z`M{8>f&d&=;_IFiGh8iz* zE3dbI0)>>hX^Kn;WL7iUYf4TtkeX*NlM#;djQmFMy8xpU_4#yaPzRSat1N z)gJpd6J4CEBRFvMBRhd;XcU%+6G3RAxP18acJ5bjpXMTaKdzS}NCx zb!Ra}z0`a!dXgLqqNuvzRWapqGGh;~i46}QiT1-f`*z05D_11^+{6Z5zg5tc`Y{dfN5(#}8s0z#O5I$`CJhX`(fQP=UK$_m+cjCUjw zqPCec<2@L8-THxBaq^!JQU{*)?WrV)-AhJ~wK$O91E;q)T0x+9-Q7S8R&>>^q;MJA zi6wSdj2W*LCVc=#Yipa+H&TR*mWgpFg@Jg6t9*IIatP;x6{KY^Tm{CP^DdI-Zu;*( zXF-B^*|KFRHr3&~JZ}?W6pvB`9}+IHXr4GOi6`>g;@W_5Pt^&eAs|kit$RIZa^5R# z+DcLlW}h&K0jCKf$P=__bbdvPMc??#BZlEpzuH08>Uw@t{91&iqn@06$7-+7Tsf-< ztwdyYS4u7?bx_~SMR1M`QmQHbm%sew^z)HIb>b&ZoIpy3TQK}Mzv9g5aQax5!ot4s*gVQ zXqtt0v?Fe!?mEm3#}FN1!UYX6{2UOGMiJIk>0Q1J>T53==3UD=V#hK-t=_!;2HV_&m~q|Vj+lUWbP)p| zjahTGA}4h%cAr-dkoyWcl2>QoD>v0kY;H`#$6&2h(=Z}7Ja8zEAMDO>UM~?dP*L^G zM91H=e^+eVzn&-#^J3}sOVYI1`mEOepXuVkQx6Ab&0UCOH2fG^lv*^@H6%iF(-(Q8 zm_H7G9~6Fu!qw0^ektGGsLZpXHGTLDZKM#54rcRG;ICl%?ta+|x7mWO_F{YqFIjXL zYeqRv#e#?&si)NMkb_U+zF1dS8% z;tGtRb~ME>4*&WF#?ykeE(Gei=VDC3x*tA_g(O6)7PiT`Fc*Afnz5E9l&i?zDNM;=5(uGxIEB7=Yp6`XAz-oMC-k55K zQYUE3`7Fb%WL(%97F!eNn{K+{O>sRZwexzv z`ZidQjSF@{9f`ShFlV^<1fCHSfj5S>`JunA(x(;+*| zK_!sqLg(+l=+8kA*H+B>r(?V_A48Q^axwKC9n{}S-3Jjtt=!%oRZ#V_C*!u z002M$Nklwbnq>;BF(s(Asbb>f7H88~+Nnia8s*oL@Z!j&L9myY?T46TeiC>kR$PQc*9(m}YxcetRCECnt z_TAxV>^c!6>Uv@Vl96*pqURnroG3WB!lY;wzd8t-T?cVLT3dyF5wS92R`pICb`S_; ziw41P_|YTwJ#*5F*j#I6&q86xbIHFa74G*6TQG|purJ-33;=IHkiXbn)m;AIO7V{& zTbCa?;hg#q6*dt}p}C2uJ*;03*t9rnL`?dII`1d7m|uEWm)79~B!)QX>TARg7K={K z%*J70Z149>U}8mRY2`@v1dpH0jYfrqi$_6FWT$czuWjdzUOD3kO`t0|GdEz#@}8<= zV(uT`aVP0EK9~Mfid&qEH2hey6zb|yKDRPALl*H=m7Sq$TI1*^_TYmL6NzF&iULKj z0)ruqF1h4l^kkQ247bwU-rk*J&0o_a$6)~A)og&rAA2G>GR=Bx0ivl`{}EIn4lpN& z_x4819*{bIDz(5z71S_iK>}e;1r6rr=H1PHXiHD*ei@VRL-^|;R@6VN4elmqjX%RN zu#iI&I7?uAP3PXg1+5_!jqC%dY{vv&GyFnNN8nO6=QtdtXW(=_4wK%B)c+ZuS=3rNH z*H1k8M24=aJ>C?XR%~Z(A3?i~g;^6hq)xSK2UYuKKe`u=9wS;ir-YVi8P;lOWtBbS zy;`o7p@E9Ua_m6gFL4hH?Oy+P`-WkX{nWRp)ZJu1nRfT@`t|EUYFofpJuwrcbl!ZT zh;TOcbjr&?_AnfmD_4|5HAlzcy>DMNChQmhLD)6J+0C6@8&fBPLbPqqoAQbNuKeN; z%o)168IwB+@QYqbE6 z6FC)_hWRM^>>0r93?MVR&NWlms57>XlM3H`rhHC|Y=d>Hs~&;j4QUJ*Ayu$0KI`mc z12k7w<+JWfyP__znM7{K+iO$2-_?z~Lq1cqYcH)wF7fzL2m+XbgmJj_f@mvng;I>N z{*(IqCcc~%geHs#?1Pb+6Oo0U`}qJ0*DwCf{~O=@<~LIbn@*AZroqRpx7>_Ew=L!9 zXJsS1sB~s%-5Q`g^zb9-$)3;1{p!w~XdP=sVwWI;SBF>X$jav{BdZLr7@vz|$d@7+ z=gaBqWN0H(uB_j4c-ZMG;M?=-ULZH?c4jhj?8=(ld9LHDy-4Ke`dNxQE zoJS4s4cyy2ny^&(=L{PUm5h%2)$N_cJM4-U3_i{qX?=*@L$WLeHkNU7fQ1`-013rmEsjP)9iU=9FE^P_v>p8rZdyEH&zP^t4L9isE3h`)Q-u2#cm=b86vK=NF5ZpH2}Nk z&8d#*lSxs+n#qkx78<-3v|Ws~d>Vwebx&Qa*;-h_O@tsjZ*Fgl8VUEJyxd7hn6RGL zQ_4PJPy{?IUVJ=0{E4Rc$8R5vpZ???3`H8_6CeL5R*mx#VYyd(Aq<6`924VmUGhb{ z5|cF?{@XWggaH3x{LA-$94p8WKfIwYUr5d{V2Ff`2v4S3R)b z%6xgy>im6feNSA`LU=Edd+wOh5ciQe`mx>Bv6eIE+yAKOuOqI!{Hhp+f!V}qQ)3=c zJsw4hql73yL!h9PXHNo&t=VS?4d2_^+A_VAoRO2~BYA($qdjvc_GTb*g9>Q$F>*9I zQ4Lm*<#VdApI9%)p3h{H1Ao}eEE#ZX)(C3a?~3-OL6Z*@*3kCJd9CPLY1K2Hi$$ee z=f~tk2P%H+pUx;k6ZcRs27H_GkE4Je=YnH;AEPu0|JSkWYOm zvW_9op0Te?YPmmLXl2ft-;2z4?%tK66Wbb(1<+z*Dqc+RQ6oa=$e4g}ijD~U{^lGU zf$CX}UhH}NMmBBPoPl*2V)45GrWqFR^|gBESF;gB{#aRYl2?yuikA(qH@BSB02>58mH0n#dV#jd5%rln<2Nbdbk6P`5RMppZu^h9IZt^y|Eo)p75B zu-OU_*%qRFbVB{AxSuknHD*Coj%QQU6En|J(E=F|G3|u!ADd!PGtS!A)!pQ?ms61&DCw&=Q z>o5PaqjH~WNRx+DWiJ%trK5~NeRCQ#;TJWcmAHX3iSs#(7>2;U1XU5-%$1@xkq4Hl zRltqtZR5s1%+k`PlM>N$jCvbM$^o9nUvq%$$z^dg`9@|W&`VT?G5G(Pn zd-ld1cm9Aq-WebL=*JSN6-iyy2Nvz*Ud~>+SJi!X#Y)1Red}A_CN}52aS(sg^9ap$ z^Tg)3aa?_j#iOhd){`TfkrcWyVF77*7Ij97*rZZq=Y*~)VzZ08wgrH&U- zY8`GEiV>^%u1?P9_>s1XfA&-O_N8^KFQs59UiTcNj%^`TT&du+;iG-EsX2kBnoLy0Y6*^I8wm8{3 zfZl)q1F?GTnv?<49Tdl-Qa^SnCir9U(-@4%PSq2aPO;=G_@-$Esc)0qo`;mz-H^>7 ztuYvSbV7|E?BD6d^iLHZ`}^zk`Q;6j;((S&-Uf1A{wiWVxH88x zbJk2Ef`2OBf9uUL0;O{MA+iqd*cRK@tc)!$y%?i6ZivYZ==2zWL$+IRUvG95eQrM- z>`NfC1Bh$tak?Eo1(6Wpjf(M}T(fK9<}zvM7@TvvPqfCyM-FGcG-F&gque-wh;{_e z@9x?aKg8$pqaXPQ#ywML>EyajH)Z+-Zr!#uMaVbc2KT>4XAcI16yA8m=A0wt8JP1{ET8LbFXpbT=Id1;2y$Xs0+Pi-%sv-voMF z@^I!bML#za1BOq26l4Q=cmG4X@o~Kqv;3p+k&k{D>*ULm*r1TA~^ z`Sb<5=VuQ@=aEBk8A$BI6RP8S;*2)vZUB;U1`=bXG{PqbN`9`4A!`>Q@w46wC{$uW zGom{(K)1pn{p{rz;;+B(`81-t;imUxSgYUu%*LDf&Z$`J736J2UpHDU>FTVn5HoRR%6ogCr#$8t@b|P|QdF zyl^!oS8$k(oM*y<#DR!Vmd@i&V!PZ!5|7rS)3pXso{4h#CW7rDMNo~_T~wlk=S-Bh zjL;lt`AV*e2`h^eo`p*e=4)U1YW(freko01#nOsW79qyF@x~i77;GuZ^BJEMES$Uz zP}ReUQ8tFSWrOd@y5Hm8ji@poQu<2-f>;tL2_-e7B z)38bu7L1;OR58O-&DJ#5e{Ak9SJSsQ6ru1NQg}8Z&w5X8hFbKmoIel2B^5a5$}dadUkChd&~d?}sr$npjqH%6HdY zYgdVuyY9N1&B%oJPujpE>WIyXZ3_C495(ZaM50CAKq)BKb~gLL1N#t#V!i{+__FH> zW5tRNJo2mh#Q3DD?Nf%jvR{=c7d|xz*tT_RhWOGjXxxO_zy>EN_zv7jTnveExtKx# zmr$Mh;iD%S(OVlGpB!6=;pfb%hD)O@S$+Qlhi{}@tnr&?S-&YyN7KJ$^r z7><7V{s#{re*6hp8&<{p-+x0axqeB^m_aN=^j8h&2eryJ1lDqhv#uCcNz;|w9B{`H zPzu;rEc_gZdLQmcS6wtZ=FdDqXfV%PT`Ks;c)-o4Ei5G*B+*KsZzS4Puhc>i2p|1` z!N-xK$Haw;3>Al@32~~#ooRJi$)(eX^=~g6o>rdanOZhj)Kh+Y{qJuHp_Pct2|JZp ze!@H}Y`XQ&f9`+Ax4-l4l&_2EG-8AblTxEo;pJ9OSIdTD57h%326r z`45E91$VP$Ya^n5#hJ|0s0LgraEWP#;C&U!Z%{k0cRqv>iDlt3xfttU(dv7<;#|)9 z<_`f21U42%9w)j`Lt94Zi1R_ebZk!*Tui zhWPlzs#r+h8bI16pw=i$@}pP6%DJhG?Ua2z4VXF~a-*~GtNifTI*dmm?q0h(zWwcQ zkfD2ITzdHx8MAiI%(i$AYh4on@7=i@YfxN!kRZwVn{7h)nuxS3_a#HNI0pmVk3@+^ zO2^}=C!b1xweifu)mISXmCV(Big(qHafpdzl080EpWW-8;mkuX4x#NBgX|}?SRI)w zY|nrGw=^}k#G*?tgIk|UM3S{h@SE|wsU9nC3;@zn)@Oiwrhrm;&`+-08!T@fp$R-} zzQK(P;8}Q6PWz{S`hUld@4Pby17zm{7-!EsRE>oE;b9@Z()D5xW==Nul!0S!f`e3hLVi*|3Jglp#$#A^+sQzpGGBEJLRlKgx#Zgn-dYDc1G91ym z!}zta#)>dL^{G$AAN{+3hmX#beokZXx|fZ3O{0O@rk0pA=lmG;Z>GnLg_p*|-~3u^ zUWSKhWv|37!{8C=(O z<$EsR;v#1d1Oq^VIa5rM7$i!hvTVyqZpUu7J)L{r)V!{G{idov^vryinQw2Z-g`4s z)!pvgc6Zy7EGv@|MN)xcki?t`W)K7kBIhu_|2p>o2NxH_1w~1gH;Eh0J!glt_g-tS z9Q`7sN}X~_pLJh>^|dh%%uQ}_fLe2a;k1RhcDi1f5YF0bth;Y~{ToE6kWzhY=|T)8 z(Xy;2OUO8_pvK;f$gP5bOs7wsIz^x3M2nc^K8ulC91L=%R(5NhV6z~KuHQqaB4m{c zwxgc^48H_cX{=kVujIk>i&w7VRW@ z>((d`gTk$W0eIH$mnBc6kCEzp4q3K@%0kbHTR;}p*n&iy_M>YyqytL)I&;<>{P-&1 zE^$D=tWaCmeSXR~2uMvL?q3oyiG6*uCK`;^H>wZge7R$Srq23AXra)dXwh{6MTsT< z_y6sG^)NW?D2jD$DUtD?!ogLM=5-qNdlM57e!fLWsTWCwehmW+-Me7E6rAaIbr=Xf zAxgE z?*oIAF+@J;Fcv?s)rwG9zw^(3YlnAiM^+E)5)TRnD*CRKBg2v&{?4MeJJQ@`?L9(_ zk^+#DrIt`a8wxG1Ajw?wGTwlhi{N+F%I!Q-Fq3QnUn<4m2862<0WHS!a2Kd0c=S=? zDPAYh% zgHn4W7e&@M!)aXUYvh0}ah`~pa!F+{-f(Y)dn5Vz&*J-#ls%L{9~=;hk#<>)tXW06 zkvK^NO%{$*&hN*r+Qfq*RtoOh(w1RoFZ8=#CT@o|uVx6Y1j|UcHH9_v ztv9?X)sk>0nogMh?+_SBbh?Z_db{2y+^zpRDa zbcaZ|i`6mi9*A&k@zetQ7VBmO9H&f2RLc-tN`9%y(c3Sn+0ekF0@)}Mfo}^@lqN>_6e!oUJwg^Yb?^M5aa{$l9 z;6|dxEDqH!2BjV6;B^D_=(AuVf7JZK67=dM{lxF>LftvL+E@>M>9baU2 zs|80y;c%Yv4h-Y-vE3PSC4DN9cvPKJ5$l=~u?0W7p6oCG@-J;WRzXsXt5eFtEPdJ3 z%8KEEddI0tMW2-~?8O&fE={|0ERWS=nAWru=-CGP|SO`a$bgl@iPU~H;(xwzKJJ#B5$KlLmxFS*6H^2E! z`@=u{!(n2UR)x`<7)*>v7&DfjpZwwP```|3e_e}c)TYs<6R&$6WOfoU=S6gPy*f4W zFNR{VVjcliA42wyI7_C+zWJiILEYsweEV^{6hI0UoP#4Pm3Jh|VW!t&NZlX7| zy6eWGMGG)cKt`2V^SvBPm4EIfqzwv9HfvV3J^b*)DfPM%!6_D}_DN06EUfIFVNTV9 zks>qZR>B%x!V)%zO|A{CKN**dGDJ#xZls3K=o8)6`fy1Eun43W;X2hX{01RYiX5G^ zMNodPJ;TgmF=A4Tn1nNo2Jvmd> z?u+9U8rOUHSs~L@p~R3w=+rXM?d9MvHg{%^)lAJI6F{6uGI2XXN|&Gg8$M%=q%Xq3 z|GNZ9-k67*2x~^m#!;=_h5S_@NctCiNGu4))oYjS`4?ZY z7hiY<)6Hgk_yLU>=Vb4RM+`5C zmpCmCvdlO_T&#tl;p@PrgtnT4vu93Ye{jY7s~;r=&^wP)Z$O&DJ)?TU&!ak%^kvcn z%?C_p+#@{!&#=z0^U|69U;pR-LH?dM17gNPQa0wtIHh`8164wixxBKymccG-BkOec z#nW_R!yukQG71=0HT|5Mrtb;>F(d%@rU}a_p*X?;tTgkin~%? zQacjQD0!?JOuNl7wV~K&(VT2Ur-wCTKg28_`S(|z`I`L!#B3&FMi<+p2H2R|k}n9J zS-2@wFI;XLe(ML;MS7Q0d)^EBOaEo+(F7+Zo#3So%<|>Ho7jwTBA!fJmT$Ax7g#ni zhFz?a4xj{7mB_t9NZR%2?|E~G`|sH(I?Hf@C^+72?bmu;*Sn7Zd{VH>pGMYMa>aPz zg4@vjo^k&z z1t-qM0aZpfar6s;pO+3tcRr(u4t?@B`mAmUQHpT+DoY?JA2vS~etMC-wc)hjklSGgOFfO zO9@**YPo}Xi98Z6Np%%EM$MSZWMo+nqCRPcIr7^RRR#7js*!^S4%&|GJ8ky-Im7|Y z4;Tr?pyJ~T$H^tFmvGI5V^_fIichF*=Q_j&-t)=52&H)$2DaeF7qCh$h9Omp@^FXfhsbn# zW?+fG3s{FTnU-n)4fI43AwGz&&pj*2KCMB8KvYe9R)*x)kxa<)Piy(x1iKy9%_f=? zacCpR^^wB|9o9NL+sE@?m&m7N;7Sq_1F_r<97ONo4xqjtF~$DLlE6*^d0z!U^@=x zU9hAp;k<~G4z9`iw2MH+hrqQj*mJ+Yj-jE+p4gg+3&VgdszrqA6iIajb*N?S5&aX) z#uObA!4H907dHspi3SEqU32I|a=RTzmsg2IqRLjnQEpiOkllB`;$l`IVF^f$2Y7(O zDXo$Yi+x!5${GI}26j?U=uhHyxrHEchx-fgj>eNtnG``2z?>~)OwQ6q+6aCdvS0k- z7xw(~&w1(tU0btyHM)qcL^Q8(AgfbXF>`<6m%n835LyZu_1=B^+?Piorxfh>flV7d zbW1ve(2`O&gdh}0PDURJ_A8y8uE~*AqD~=}N@UPkw^6o>XsHfaugMZpBFezv0}&z< zG8c|aA)Sgv$Y|8%<`EcZY+uBp^X3KgRmb4W#2IO^-j8L~kAC!HTe@`VZHfP#j5mwL zJoBD)R=D;ddvD(fm%R%u`Wj**Ny%I?J;wtxmqWnzKLFWKAjTYCYTGyEHBQo6~uJ?@Y+=proL+P=F~t4TF65#QE z@PMX7;m5RgWI22O;w4W#^ZM(rlf7g&Ipx~zsYlD~kN=>9_^0@CijwN!z=Qe+LZe>( z^l_Zd`W?K;nMn2m9pH@&ME6;6mtB&sv|WSazG}yho&=T$?a@abvTr>54DmBpAbAjl z5;v&660{qs+)Rw6l$WppdkHZ)ipnqPgdgb0*>*d|BUKkCP0&J;7 z+rMQ%^}fhw&#tjYAAQ7iW&*Z~e72r@{&{p{7f67)&dr_`q*rFj4?eib6VsIuGcg@v zruqb!27D>rdG}pAc<7K@Qi=$P!&rfM@j-mbl-4{KF-Ww|-~^&miT;REfC0;Uq^>!J z-lJ8iMW{oQU4#kpyy9$3?Bm0t#o@^w{-B;D70rC2gdal~gx5dscWoN=?Fv3^Md=Pc$%|sU(@<{SQ81Z@&9BC(x5v zMB-akMmVo&aK1R!=93gMlM}uq5NgDm3Ys5VJM)yX_y$e-W%yo=G#r4VT26*2dZ@%e-O?uSA!U71{CN}97MC1~M2OU!D{t1A}l2(R8d2m{r@z;BjFmB0k zk#R*k+}R-xa@pt>3k%8RhSWeDS{%#DOvKrtzgqIMNu&A9zB2p1Pqg2{j1y{H{gVhz zMj;x5xJ#u|xz()uWaJ>E$$z*Ui4im5bbPjebz4WKUB1dYhX} zz68$x^u|fwVsL0`|H4I*${s*cK!ESZu{2!IT2MSjaf%|y;w~hq)PVdgy7=#M550cc zn+7b`V1;`a_nFsUdmUq$etdVA5)>VAUVa6KhFm{b#S&|#x2MZ4oxO;W%Te2hiT@EK z3pX0CV)!v+k3U*Tx}-j2!dSjJanI7~!Vh$8;sAO67u`ih|FwE5U0 zk0M7U{}3#sI&}@JujgNQf#@6;U5_QbS`(I0`>`6FHLHdJFLiNXI-S=&vO4_WgT0PG z8cDODNF<`@ixw@i2RK11E@WXrUP>x2R#xH>8K20x`UFH(#sD&(7T#u*5Lqnz(BI5P zA5iL?oyHq1<&lx6{v_})WM3TN9r_*?Et+44a;19!$8Jon3GbusDg4v-zyAY!{D~)6 zP;p?~GlA!F?$_l1%ee6hT9#_!lv* zcnAwa5gbK?E`b}9C7F^|MlHMip;RvbRH;{Fb*R)USK$VAI@F|8SI=}&&{8;zVyqXX zw@834m?dt~gou!xJy+*`L=q{rahmWrWcoePrUC~qE-kQmh!_=zP|*)~P1PfyGV+Ty zh*pFaz5@O9Q)l85Y`F^xxT-5b2|uZ3-8q@b3+uR(ckv`wi868sv-n2D1{&Se%BimJ z`R2EtMX#T2d*E3A>aYF^UE({OxI*!(tk*Ja`}S@2FaP|{_FLcnHYWYwVNq75-wh3# zzf$~C7R+C26_?AbwYv#~7o3EZQ!OZ(ui)Oqg}eO5i4>&UhEuPMobsnI(HzC;2K`I) zfzL{ms7&cf8Kjt_-g)&zo#SSSs^u^F0*AFT=w4+-`=cKtuH}H1Oz7QUBGhZ8>y zUz^~A2)QyZDC-0sB}jO2;x;;r8{QX2CMEc6OGb z#=;1WG+avtZO+slDVyM%xHqmF8~!%-gX1pX(t|Ng7YSRlFtg7B zu@ykbrdRdgE7xnQ7G>JG3puvyy*zvU%^^ge{kG%H?WAuiv6+8XjdW%T^DB&_(P2Y` zA=KPCb8);r>Eg#SMDP;HOIjg)n&?8hB|UijPC*K&IqQl~Z4#2<2W|m@4-<4)% z-P*MtMn)S`zBVtt^a`0=&)RwnL^f{R;Ne;X9OxMF_D1}*!g`b6h3csqF5#^0c>8VV z1eN+*EV5!RZbI)SeOLlrDO_4{5fDe_*jxyVzG%Vb zGC|9Ur8tjh9)t|k<#CIXAazU|kUN{%g_k>ftN};q4os2zs8CZUll(Y?Eks#fNi>k@ zxr(q)Bz;a`uAk6`7lLq#IBs)y{DZ3HzYj+JxrC$gp<4>Q#Ukgsa=ghrUpgOI-0ggO zr~Tjm<-d61LK)gbiLFB8$j?eJ{D1!Yf5#&1rv1tP_@|s0r}}-~OfUiZIJRg`_{+{9 z)kbl(O)Z^iE%i73paW5DZ7n&mo@T?=IZ1;up_j340#Io{yOEY5N)xRg?fTx_ESN zJL2s99K^a1VOFrQ!$Vym9AI{CKFaeze-`cX=lNNq^|@!hbeL6$xG;r`ILLtTD)LXE zyLc27<|B3KLhOW7d}Ef)slxC;$cx-s65II(13zI0#36vVrmbLDIP!`L0t%4AMEW!B zB`?}nBMTz9c9&nZoO;1$b?0IjC=KEaj^u!7CDjB*R%}(JeNiveb-jqt2&YV{O!9Ws z?dTTx$|AH1XY1JzjC{n8wF|Os>lQ@0Fbb_Lnb-f+(0TDow5_(2|E|ukRCw`E7O1?lM^0ZH_@Cd2SiwM6|joJTf{uIv2ovOW4bSnbqaT}k+4~|Ue z3+tLF`2Fb2dI%{M5UK)J^>?2c^mH*RTZ%X=r`XQzXt$G#0730%SJ>^kSvUKuFDiX6@6ft}5IX@^#RCw>&b@rzacZm>#7J}!rFM2t3 z9>c2fC^0`3dA}HUl~VF-72#Cs7LJHmL`1c3#;oI{wm;La3_)s!OhnLH)BT8I<ZxYeB|uNT}|4TyFr$T^Gg^|L;;ZPg9!M~z*&w$Gvr0&lrG z3~(4w;PHj%^YRo$qdR6BSM`%trk(YKuD6SRat6rG9k8-u^q9ni96*sJ2~0MfkMUy< z2Wutn%-}HX(Lgvni)#)}^68I8BiG?q01@ZfMkeuoI6~Ey$Xm8-fD;RlLC{D$toZ?y z^n3Q~apMuOb=?vh@w`mZMa=%_PyX0u z&zbGg=sQf*e@+LJ&6DRL{Icg#1PhJIs;%cjP({h&&|P3D%dI zm(kF9^smwhU&NY>A4x^B#Qyw0|7WY6Gso|Z!a$OQYgjby-0`N@C+l1JUlwcQ5=kWo z?ktXi;*;w@>u7+L*Tm5yEHT)y#XP|A>!2-gjC>6tPIsO=u8H1 zB4bZ>4cJ10Rae8&Xde%_B#QN+4X6lM&z{4Y5AwCC z3v#$FEiD6==Oe{vb&;biHeG^7o#~b|37RG|*wE36Ljrf?$PofwcJR{>C(}aM-YFpR zEKX=wK#aSfzByQiJ>pKN6Uk_!^>=ZiednEbiMRH?%T{$Fo`xy(!w+xvuvLm;G3?)y zcrZXBfwhj@0HoJDckUcnRpk^7pf$nFKyh`B&APw9imJ2h;`^P}-qh#1p;~;*$~m=b z2?P@!?a|0*t%n8(pu~3K%Cu-^l;qk28#dabI3v$rw%kwNId29!0jEfR`XPi}I1@LJg;@^U)yTkSL)^+Cs+u5)#eDtwavbSH^pnqpcNrZcbt*Pw z9~+_?0;%iIjp&zk!%hj?>b23YpduS>-Se!3tv(MaXI^oNv8r zL5J6L=#Vb*Ba##@!5AX&kMW-;e2H`F%+FC)a-A8OpRgo0%}A@^4|nrD7=vmg+798b z2pGM&kSy11D2y6(kopyR->(yaUzyW|(6lB3U&oUefV_lZL%m;{H-Db}r$7I50Thpru{|6V+FF~58Cppc@-hfond^|nooSEATsZ;OyE({DUKe8y!d}TKHtumy z6-Q8J*FucWQvsTVNM=?pUkVpWDkct3@9piuY8kQeMz{=OC_1zu5`(Pk)-KlQm453+ zWRQm?@sx6c2Ip{(a3SlKL%Wx7&3)2AXf10lqC|;H5NY)L?U`8O1&7gDgjB<*5b>zE z7;ex;@p;#Kuo{#1#V74s^7DdIj$lsv5_^iqXVZ8kYKrr19d_jsqG`mBLx>vnFDL<_ z8H5u9RnvFI!)GzZ9^r(;vqt2j5j}uID#yy2Ljo`LQtl*ydl0ERPA6V5qrFT#G5+$n9fnWg}{(%QK+wcD9M{XRmd)IDEabF`u(oyH$ zh+qeYgkGVyi=e>WHiZRT1GhMhcZIi-NHSc$qk2b&wEv~UyT8t!Istm&O>^eOcS!K@(|paO+oki!#nn*)r`8Pj(CrS@1sH2Q zL$nVaPPCpRUe_V3nG6f+;d~AQB4%42+3MVRav!^K(mr%xpB+WqU06_rm=RGUewi|e zD&SD2P3+Fph?b&jI=PGad^B(%B zD|V&9I1u3WQd4M)YuX$E!#-fbk7T1K28V_|4i#E%QH=G7_!&ctA!0jfMG3B-#Sdpg*5RuM%yHdJFb?rieRD>e~zs+BpR0zIh!y zLD;E$agw-3IJ_J&=&(vZ{Q+@f0VG4cyg1|lvb^dZhw`XKZ~MicT}SerCZV?X!OtXml5JATC_MsKoosmr&hs7x&w(O_7iqWoYDIA z>pd&1hp^(_=m{xWSNK`KpFDLMVz?acd4 zx|2OQ4CCh8#^wK!T&!@#2g*MN;*f7UiU``cDgUCU`-U{#O&EvN=IU&cDJ9#oP? zrF~ekY?(d#Ti+tH^#SJucJF!zC(}lkb9ch7Um`7oGFRs^>18bBO2m>=Xs?)g$Oi^7 zhd*pTW4wtE*#+*s3MbY9<&t$}7suo#B5>$*?Te9tX>r_p?>hV4-~Ew2ymbpmwIo1N zK`&wTrDX2%MV7cU{FWHe@OORidU;m_30a#;>>LQ9M%|XimjuKG{B%o5{G#_ya81i$ zcwVAyC7z3c&?**YT32I%UHY)iI@nkT4;}PFnht93Bf4&6y-SQNHqp|hk2*XdJ!H*S&3e2zi5(F#9@&l-aOCWIpn zUi(<%R0JMbqA5Ly_J#JBa&s+Pwi4CCLw4ob&mGzA+23vd^7B0V;g69zOyN#+XM;r8 zZ)(K&hBX@_=hsvYmc%@uevGIfmKnUOPLRc28?lcgQCo;Rw08BmwLJ$z7Si4@pH-NY z=jvbuck_7_(I&O-I(6VG^Lq6&57Lpz0Rm==b%kU@&SY@3zGhgzlDve)uO+@jm-)z! z9l1ArCargE9v)SO>h@0d2}DhLJ`vp9d`>68&Tr8RMFcjExP9gFUMoj}b?$t-M`2RX z0!emsNObO>d$qp`SWntpVu-r&sp~`RKbevqL8Hb9tVY!@?njGTom0_**W#CP!Dbw1 z*Q{CNVn{7`l_;QtD4&6^TURB*~m{Po7O%LGV{LP9~=doeVGSC$ZxB9!>;9#AQS@0Njn@h*?c7hzXE}gAE_ldt&_lk>5r13;5F{PQ}T1&NT2m{sv;t% z8SrTFz(l9@$rLGkm4-t7cDRkV(2^@nMYxjhN%=ikFR6&L-%h;UxVA^v^7(xW_n0=nmI6H_k^_Ng;rv_L+uTJ*qEdw^(zL1n)2nCLQ%^r> zr#Q^+A@xdQbFaPh2FMEs^6!5q*A`TFTTeUAx0g9^byF)G(iFl=DO<908(h*_RNHAR zzqEm)z73Bje2Ru7((CDy7?t^9T+=x;lHtqO^r%f6KJyv8%g;?*j$)PuSXD$qa?rro zA_nIpLQ=}%OK|tQc*gglE>Omd^{(_=lOv!hsI>HFeCARfrr>$XU#Dc9;2{@7gMOuh zltpAN*QS>DTlKUO+xuaMox$BeA?386v>Vk|t$%T6I$(Ek2<+|a#rOyjCd6$rC2fL6 zyD5u0;)-klsuGCz%;wFVYjftz;ZzC{5|T>*I%OqXRLG!h+qZjamS(qdDX~YkY_(OZ zSE1`E_5qK1mNpgPS*K=&&uVN8@-gW?M_8O9%HYbX@5{GRyx3y|y_Bw|uVuh4?d!H{ zM__AFN|$nSo{zXu5%Sf|#8AdGr!A&M7Fs{5jb#r$Z0o-A4LGye^l=d)F=(M=A@pB320DYgtPZp)*T{lnq|AD72`9eiu(;yzBC< zY(e0a}b?By4JVNJZ3AZ8RHn=)mpl~zn~XX;!O;KBX~_D*sgpZ9mI zZx>f-aZ&t@^(v9Fte0ES2OuwX_W%Gu07*naROTa&RoE_xj%6gJ)F3MeU-wl+v@OK6 zJa+t)T_slIzyCW@gsa*1-S1@L6wKj_Yn>pgEcP6BVpx?*TqQIS95{j&@BB4H=$da8 zua~;R`PVi5gmeo@2J_2dF)uF%qK1|Uj$*)ZmhSo-iWX-8Y#JBB=MChqi2PDA=HTWO zSeY^a{!2ZOc)-UO_E=*M#~)t1^Y;Lb-Y*ff7BX{mig@8rkDA8Mz``@!<~*8)K>LH} z+H?RM>LzsHJYvMMRk{!()}6m#4{|+PG>!tI_4mOf_4IV&QwKZmV~gH(`_B^+G+zil z3PDwWql?6UqN<^BhO+bu*kUDzf$1|c`Z?9B+0Z>rWQKj^)mMn2c9@0L<;3tYLT1gI zHy77#mDdW!C`d-ralePHy#bHgk*b?4wiHuIhWt6Gs`Icyj~y0dZ6Zk)jP z>P(k)V4^F%Sst9uQXHryIz+U8+oH+x6~wDp>FM+5*{UbM!uwKmC+X>pf^m;Dqr`~H z8&`MkoEu_DpCW?t^>2Q|zCy~83s}-Al#-&UoWYY`$snbW7iZ&JpFU-=2@62L74tD9 znE+lup}z~~NRZvkgxoKV|B8ytYzvC9=tPGjD^Xv0Bi-;%8s797(;=!WTqG*JSQNbo z!G$GyU1sKr%CZ`6JY5St!9jE1+I7nsTBNg6m-G_0aEI@r%C{@DF}yB^*E7#JnDoLZ zsUwC(S?R1St6ETCSB|yos)MWoap4modJiYM2`75!%7d*n%Cq~ znJmNxf}X%%fjITiEst3HE%ass&)cc9XRNKO$KHGgi9uJsty&YaQ|CEM4)nN%BOt** zK>8rHK`mMbkrM8ik$H>!IL^o9d&4@ycRi-)8tv%v+EG1ZLoA9AoeB981jEO|+vifm zorxP3j?p0@#(IKQSPK_)>?=Y7lZ9_u5Qa)bMg0}rf*wc5c=rZC3z0Q?vzfh1h%gbM zeDwS!B+Ctp2%6+Hayy8j(zer#)zmf0Mfu3t~IztwNx--iqz-8LLB@B&KHgw zM2!*D6xKZP`-B9|t4ILW8wGITB6q;?n^KEN)F@&k^K@}(A#lg*La^WzKkbqev(q4J zT~id14IA$F=nP?$k!T%i^BS=m-^F4~+)Ln-Cge_&dSyqI&dwv%NWPU2Xf}pgBli}D zBFFme%<&HLPX)ea%!8Ex+~s7Ko{Rau7WBx-qAjNW90%wtwmxCAS7Oz62gFR~`yb)b z@NfV2ZzeN&5f8ylFp!Q-$w#04*0(Ssc@PA!+P;tRMh6*?TQNjXh%OOuRsoLD7j4gl zSM6f!QOiigKIDv%D8R!%3p$)eac&*J+AML=uO+`o1g0o1GJJS}n43Pw$u}i^7Xf~R z1IT;`yzWm%CP}vsf9)r1!Hsr|2b#_Yb2t3UXZt~YmbeQH=bUVSPti9bdYrN%+iup8 z&zDoY3^(pswaN`ZrsH>~LtAo@HsL0E(Csh3WXGg?V=g6ToI=*&3LMW%A&4>r5u6o) zTuSkD;g#IhXZs!00^$)z*$*6w2v4MJ0gVmU7z=x?e2R!0zj9}?)-iOLT8T~=g95=_I8*2L;U%ok5kz{YV-D+xu4fV7 zPDQ-HT2QKnY$$3kp_~RF&Ou+Iy->@+KDyIBr2Cu`C=Q7O^hXjUG$a_G#jFu;^j@}a z8Gb!^B-TWE&BTN|q#Am%cM#l?gj)QLnh=Y0{Z z_)Iq?Tapoq#v+0y%Sff-Qu>%70*`xt^_aL~o)MAj!|13_gB`UJ_&PxuyCahx2!W$_ z-}wAa+oQ@nPUmj)!PQO(nvgX&b7S(IBai9lW5_x zf8$9YJd{AwwqrI!y&NrNdsMQB+N*Kl`Vw{Jg-Ed%ybKmVQr+97vT|mp30c*b0jh+<| z+H`TSz?TzzVI?Nz^YiS=p*B}mE0VafMbsc(^|cvkOLL94m+Lt0ZWwhYQWgBU=E(JK)d} z@3X0DYG>QGzWp7nXshixoX_9GGE|)FxjHh7#T51keJ|pwsa5zeu_wJ~3HZP0WKRwh zAxxvhE36^>P2c_C$04t)uZy{$FNjS=M4Z>ukI7xo`qahb>sr0~=`Iz87enObT$!X6 zGTXl;zdfAL<+C?~WTb@!Ap17;RCpjom+n$uMyjDD#_v9C?2A@>_uAu+8$&}58j?{?qD)B8 zR9CRTptK-eSu%NV5{+m9FIcqDty9!5W$KkuFS)48!JMNgWYmhqxNZ9zoMvy@+O?~l zE0{5TdZ1HCB7`y)IePS%N7R>TxZ03dGpy0ua^RtqUYv(-8Q@Du00%j3)eUxA4B(ft z7}k}OL=k+I!=&WWcg2hgo=;;%stsbPNzZC61foTgY{!Ga509eGNp@D2EhTwu zk!}7oa`e?@*zs4I$iG&ETL-#d)<|{{nX=JG)}P?`z{yevjkn+4VfWs5FKdTQnpmrv z*LDtE@9x@ZAMV?W2{L+2xUne^{3#?el*myr8}ke0a|Up z!2|N)G>c2ikuKbX(AC-81q)mstRDL_z($K)Pt4Iy=*LX_W-F&b%tT8P07}^< z{d_kIji?=uVgqQ}I^sx-IE2HoakfF^h>1S?Ns=#ZEB(p1#e;uyO}X);>Cl+`fXr_} zAb1fs{w3h!hIf)w@lR1r<}O?`sZpd!(5RCJ<%zDz38yApXv)T`a8@xULVg3{(zH0E zBAIY9rTf{lXRqzpu>;DUhgj`dw-zgM#d5fkN5-JjpF&=VptazdK7h4fxL|WYs-pfO zrzSm8`9dN-gM`8|ME;$E#o0Jcw%ZBh_Mo7^Qjospu$l6aH56g0JB#!%WpTK=@WB-C zk|7J=B_OD23zu463BrGWVx+wkkBwYnk}qDkXfMC?k~=ReCf@CekYxR}YjATfy3?w} zdOFodi{_%NV5%*z*DRoKoLY-5ER})9f{C z){<$tmKcxEdS0cvbGT7(SdjtCO2U}Qf^`pXOTGq4x+R=+nUaGb24r7$TJdk zOD7ft zuv(ORkRqKZd%&d&b*uqm4$hh5nYTUFkd_ySSvrx0aQzC3unLjwKE&C7&4KPX5{Mjv z+ZG}|%f_dw3r-m_KW2#GejoEgnirzahcK6(RNRIlzkAF^zW38tj(J=M^J+rJ6gJT$ zi=ObOfSpR%zJE#a;s{+ZaKfq;MP5EK6`X;ZDK6}+ngP}wCXNsi5f3`gJ*qoZe^WGY zhnL*24BVssaE>Fgf6UYVXmr`Yu(T9+9oD^~@QbkNkS}IO*lLhmX<0cCGWnMwHkF^5 z^vs^j6F3Vd;T-9?-h@xHhBw)g7C{4!KEdil0KA}?aB)jfK9h*~M_Hf>7o<$p3Jnw` zXnI1|r2A6x%=P!*htCS3auCVLYAm{dG(Zu?58>k8LF9FXi2ABmYa(b9aRz!DnN2I- z&sB(?kmA%Od6r+H#TfKYugo{5`k1V*F2H{b$K!hZ&gh4V4~y4qCMqAGD{<#A$L4p8 ze$o9Je+5XsY&tp=`aKy$tY_tOb_fSs5eAiso)^rIlV?uP$$zYr=n~0Ad5Pv!EwS3l zC3d0lBrr-%DEr7Pd-mEe_hoA2b2k&_2`i0HOg)%(Db?$by?(FkgoVa3T6L zVPmab;l5A11fW!#aE#pqMaIGr6b%kkVCigPIrg|Oukv<;I8EY5Cus7)%~Fg>A|`o2 zDe_fJ%#<9CrAqekclsawdHRlU_NO3L(m8ZB*gAd=%b%^DUgDKeiT(AOfE<-x4Maz6q)uT_aA+}1b#Yo| zv1CoSFtogg`!}Q73h^Tv)AvyW8T&(W^8Ml5>RtXJ`uilsAqR0D7$l_B z)q}R}@N;&(?TWXqJ?Yp?Z((9WO{u}OW+ zB(M22hx2Q38wiO1fzvOOB~E?_(PJg)gJgMJf8{cVwQj2>xo zGHBA_NYa&(68w*{K!c9c!pZk7J-ieqJMQS5M4yck5>36n3(?L*XG-)syP3&>jYDk^ zQDdYp(K^QcoZ|Krm%PpK`WMsHxV3vcF<(Tq$&oOKen z6K#mdLIKh+;c+=pYYy~JqD;LjZfXaj`}_ieK#CQdWJ!yl1q*?0GAM}>-PAa;DK$Y# zsu@N~kjT-gZc=BaB63i!C>Ob~n3E@5?bOxP5hdhJlEa;Gt4VKL6Gt2K=Fj)Up!4U? zbt--VXX=g)OoM5sVqU)Gld4?FA@x11Tkl;r2kqSHHk;Q;R@GdH7dn%{d`_=7|V{a9F@xpvrI{pcUhxz{ja#F!CWUU~Ly5sV;xLj?@;89b-L-3qn10h1E?#0Y z2_7!-WVGmzD!)@9F^|&jK4=0=fYh{qKaCP4B-;5p>?&$W(Ug9xEXu*P04I7dZY^9# zC;Lg_zf45P&S?q<-9e|KZEC$tSinibjLtaVb9M{y=Ru;{H{oLRz#QCT<|5L@Xhd$P zIwU$4l`{W`tG<8m_Uv{&Pt_T1uK4k7`0a-Y%U9aHZ}6YPdsm zbYxp=heO?&8d-7&{&ORGXaz&p;oUPkpc0bN(PT?n1WklYrv=G8l^S3OA03elE)SPE zbCI3no1*Iy7YYC&5OkAn^{D04k*bKr{`liVt~9<3vIuiNzcUn#Ra{sx zM9mSDWc+Nn#`*?py3uHrGW3Ewf*?mdjb#%PT_og z3PV>rOeWFgTz^+JCCBRPNP3Gn?H(dtfbhJ#+846GkRPY8fR7`&J9lwT3DIa-tYd|> zlf>vGDPle%W~|oE+BK+Js=;@{#)&qiw{$2afl6yMyeGN4ll>mfgyVGj*V;}>7>KO6 z>U{in%ggDn=A9y1p^xewKYQO|aCIHc#Lc~>H9*WxEPacR)CK!N5LF8j0YlK*F@e#( z!L}jS^U?TEX}_e2m4haouyJD1z#K0}no;o(t|Z+#@mGmhl`=%e zFq7fZ1g9X?$ap7~MDNv6AJO=I(cH)v?IZ>yzl69*&b@N>dbnmpivIlg&rbdq`a}I4 zJ*{@&+zxx~qkp#ZH%>s@q(4+&6aAi`&$n-vjmz^r_!?f`+ii_!yIj8zCk;sH>>Sbq z5z31sgOdGcf|MqLkHlhmrDS86mTfoA;y4O%?7^C=mp?y@&}E%2l_E*|AzBp8MdG$z zqMzsx7Qj=zON=$~7JcT6_L(i@!fs8g9<&SR zGwce+4jl!o0q~qG4F_}@k|{rZ0_*X4gqR3-sh{a~DZcH8twXq**14FjhV3y$? zXK&w%*=Y{qi>mW%-RgcT23DEa0=Vu>NLU+KeMq+xwBh|uG#>UTxi4eCC)%W)=GIKq zLIE9+m1tQh{nZWw9?p^JoeBgQ{|8=fZH?KDrl3~sAI1Fjn{=Pgwa!8P?q{pJbb$AJ zf=AMRO^=`nSUSP;=hjP_$fS~w&yAGg+T{L^XD(j6;07H_mo9ZSGa2TpMUh2h@%aAx zdpu&ufEEnbC6>I01d9(nwArT3EQgR0+Y8c94rz=|+U+{rnK(f~r35qmY!cv&S7H#f zlIdC2h6UTv<}Sp~LnO@0F@GevQ%RMGh;_4=Z(xCS1;uPrZ@109*=lP6eiAQ~^yJvD z+Vl$%%}(dkD$!%QMFs}*(FRwMW#D#Y04~V$2i@)_{RZI0F`Yo{*Zn~U+-sw)U+_4H zWjum8QH9apgQcu1BAi!m)!ExeUa)shzJl981BVsZWuM{bGoR#hKrp)6`|L(tuXQ!) zT!?6uC~cI;#B@c4 z7$8!5309tRD{+LS2L*fnr1w^O5ao!=!7nKn4o!Gj2Uty#pelREK_X0uxG9~Ca(wAH zIXr=WQ$X}a&q5jy6>$j%NjRYW^j@U6yr!wG{`a_8h_l5)@ ze7|jL8j}w?(Zgs*6Lm#}h59)Mg5IaYHpKHI5|8e0>PGT4XdBjN+ML<=-F5>L`X{J+ z)wk_H>L1hUd*o5=DB*P9KM*IhAN*md2z+u0Cs5<5O-TF3U(XbK0FAXk%RHJ?Qhx#nDI(KBnmHZ6u^mXCPK#QRjaMPu-QK9 zc+J{}ZeWd=YnAai)^#o0ZnY6v1B9t|^Q+KPNW2sl?_~IeuF|ck%LvtTwa*lc_()Ti zH4wXP4(Tpt;$U5Xyw)86A*R}`n!qj=vWVC%D z#wKgQHEY+pu27Lx(kUxetgv-hI7&A&UKzwev;%Yeu3m)O(UJ@cpMU^X{m5JD8jjh| z-~WG2#v%_dea2=@S;VP2FaQY_egv-3dWS#jRHjOUMWI(M$Xhr|iz|EQ#A{eyp2B$* zU1wr`haXM#fekVF!p16#*))6*FCBoJLcFX=l*4ozdaw(7fJtT%X8Cg35E4w3B-n(P zk=$C*bk-F5fj$t3L?A10Q~00%?9cq5Re&Ty#!b=U8W3B9fepE54U30e98wW)RW zn_9F^GTHO;pUQ?14E7fis}id>xPIx$RD3M~0>MTSnJzLq$b37CP(FF2bTY*SWIhBY z2_O4N64{?A81fxcZ;%U1Ogx=_59Uz&RL5WNC9lC>l>5-V;}v;6D(y*}z};mM@ zsVa?9BgB3e@e&)*x6iMR+ot;n9hDEh<00iD8;}B!{Uq|QkEzOVA5;C9bWQU!-u>E= zS-kYaIf}8lek0RvDB==*FDW78NELp*TI-HK=^u3uP@f2%(n5*2-NcUJIw^X@6jfnM zQHkHGaJSDT<4|1+ccmyhf`wc??xdto&{R7Ei=p6lC8>~;R-Z2O-iMf>AIl{bnVE|M zKk>w49wlEL4@(I9ZWQaU6YFm8-hEijv}$e~CsvXi^zp|Yv$^Qx#I-dHp0(n8y5YcR zk6T*%o9xClqR)3?AOVoYndM?BrS(2uQQdjCT`8gG0QMNo^*!VZ<3x{R=P~)AWfS#) zT_JIk+?+cIxzgUtrek42$M9MYkhZh=G}6@Eh*DdG1M{~@6fF#y!GH7FZzA3}?|GxN zxo=ly6OrQUMB)$;O!h$=d5)+X-Gm`(y=t8bWq!LNASO1JCR7%rjaVDLjZW?4<4Y#f~}x>6+&+Aj7xQW zG<5oGF@XXGZP}t*B&p>0ggDYcDg47T(47FbiFGy7ub`jdtNI$wf%c(dU5U?$5q>L< z)!>pvAlgb|Ok&-79I@j)s3>IUFqA>!UyVUjL*K&+!}st)bdce1iMyk1`%ij?{wM*b z;HLV;qPme96ieAZ_M;2l(*&o&;cCO$TwArI&$2Uu$VhVK5dlz>>XBf4rzM2R?Vi`1 zyAcWv_VuF+Yr!Z4zc|6M9P9F`sSq?~Fa`@ZkhoD}5wR8_?Cp<}2Tg>K4Dh(E%$PC5 zW5s&g4mV~JT&r|Mijt)LCl2jSO8NvXF$tOYCefk;ll}&UPTjn5!^H^FZ%O2$NE1rx znH-N%liLgNd*h959xt#J@u6l!1T-;|xoYb6!rM>N*{-|FLsPGzJST)79F`FAJBt!L(-MY{N0;Vt2(*p7GvjVrN zC*s2VkH`IrW{wHUEhAX%63nmXVyPGlIZFK$Hs%kE`=YyKw!985?N9&1fAG`3{PJYZ zpQ^|e;(z$@kL@djm6F&obPb~JUg=-7`HO3C)U6`t96n{e+aP9GCF)?`(b$g& zvCpQhW&L_JsiEW$;rAF~#}b^WC9+hmHyJ7&CE2V}#UvNOUzU6O_mK3nxrtCt(>!UX zPMO-6;~zwlYx-h+A6fUpc%TL54|2u|0l9%EM9@@(6LhcOB$ItviE7Ifk{vpS&{vut zKP>XQa%&A{#O>Ub0{i=a;&=A;-2a5z2*_LnENoMCX$HmBX+HUS+Y&zKH>oeQ>D(iT zqo4ghcXpqxT8y#T;a=N)qu(AWCNU`@NLg>XqqXX%LG@eX3FlP(@Qx(6uIq_DjcQw? zpb~M15_yWI#UPfM5H#^@{UGzBT{*VxIvGJa`fWLWkefFR+4M^JaRqx_(w}Z}j`sPsSuBf%j$z;FB2ay4DX~uP&`l^kg2v?d#A(5@NW%&8Yk~q}`4}f? z`R7RN@Zf_F*un+#-1yGO@v8!H=w5Ug#1>1kD7mYHQ@XKTF?Z$PkKW;hadS@5Ymy z+K&XuJ>@@vAOz^r4vqU(p|9#k7Cvf4Q%UbKALqeZqLARE-4efOH`*>Ef?R=Rp;%*Y zVWh#^e9}mi15G*J6zXgH_U*Qv%-*N*PgBZq`7$Zk^8>)}$)}#Om89Y*CCWs&fksyy z%!>%ex$DR6(hUMUYvAK`2N-yX5(cjMRR*iM=vutmhfL<$kuBRG~#&*i}*&4)|Kc(*wZ+Owqyn(Gb9pw24}OflSq|KZ{Lc#9|4gY?-0Bg;>1)f^RoF_j17GCNz9M`b z`BZ7|X#qgzfOIf7jzc(wL(pUp!W0p|kr<93MJH%H;&-yw+ekI?{yxmrPj%S?TiBO* zpnoI@zP5QzZD_vrT?lhLk=u9>)pOGMKs~u7c+b!6wk;3F?d^98>|+kQFCt3&lj*@h zm##aJ7MAd}t(HYeMh2UrDb+1N1SOn%!9y4?;AXi){fC3^jgjdck>16g0{a!BxqY{K z2>6^~k8Y0H>J><+;^YQHgdVOOJ+Gp^-ARcOvSB=)sv_0TSUrspYq$|{<8@O1Fdc}M zm6$a!*%w>_eIVlr>*3jMV07VXhSgofnHqPg*$}fibLTof3uaIdf%9Vz-6ZIIFRGl} z9NdrYz+_*|rbW;+;2}7f@gjpu$-8aa8?Faa^pDl6SG&F^1ip`t6+A#a@4ol0?fF0_ zS<=@71ex-$TX!%1We+-n*Qrjv2nsDE(|t#b8nF(;2{M!TamLyln=&tsuMvv;s{^EyL1aqMOif%idbK=)92ZO_g2)siKoldEK+d1( zhJZwwMo)FN@t7^0b+1(x&!Et5^OH87FU?iP7Ly@c&Y555-thWus$YCgx z^>X{Q!a8AR$Z|H9P1|u3P7iZpxr!)JXD)LWEebAusqQ>Pv)c_J@Rc{`nm-mdmtwUMAk{opBC$ znPj~lB3pRec5ctI2i9UtfR4+F8X`GKwnWTyco7~^i)q7`wzXCjW=lO5M>#-PzxfM# z?6HRn>?i-)WiMXqvf6?o`*NX_5QLp#{ku*L4pMm!2jEb&2jusfaxEn;(v<%`LNr9s z2C<@~O={?6=q_%hKQ{^9`}_?K{0*emLE`Z6=3IO7kzSipOu|uuK?M7X`n1>h8>w@0 z>t<5VG7cY#5HT>g(FBKf3z!4)k|F{nPs139zGk?{lvxn*3uc4+;m8^1sq-A)PI*|!7-J~cOU;#@c zsG>>5^+g~2PlP7JIXJBxfs!KP7nbmvF6$Qk{rH78)ncOeWNM*KAE5Ar$e zn%@RYlj`u>!k63qrhCGECEpib59^7(M}7->%DnNR>juQ^qYmq8MfZW8qH79bCVMUb}kbvWwPr2#FRMjRbKTb6nJzCxV96 zuYwTEz$TgiH~QRf(&u2QCg7S_8!lDw&s^7FdC>t)`g0wIL`1SS9 z!09+Z){;k$_St{?|FUd9xeeE^BY?x=0d!rsMa24COs91hZWGOIFs~Cp7LMIrI3P}H zQ%E=y98;FxYg-@8u!BdkZ1=u?d+tiFO`Dx-Yx7aOb7%`l3dD^4IhcX@yxJ2vO3|)J zr9_G%hqBNF=kuMb5QR)pV+dkD7yF6wp94Z~r=C}CX4_9MUAJbE@@`pQVBdXqz-G_v zw#**tqK_lv3;;~EcU;nf6SaF24rBF!(1F*9MOBiSh$79GVDA?5W9Bmf?aK-zCa|U-_D*p zYt<*Jv1}x#64A(2#I?C034`ruBx7(FGW2}^N$_%Ai59(*3dYfOutvuAEx3Ku>+pi` ztAL;9Vg%MiPe~E=3VIv-oIF^r4qw9O30={NEBlF`xQP5jRgvHP>9k)oq2c0+8qZ8r zfqE)EmN>Hy&b(nw?F}Q3=tp(dSIo%eaX)F?5k&6yX{=#A<37j_Ax!+<+ufMPZ_$4E zT%_@g-vSIlQav1g8jki7H9t_E=d$6ZFVFE)eCAOQ3s#FFay~HiH|+a3%_w$WA>!r+ z^5}^ZxP{a}#vvkTQ6#2l8CBD#x!a0Fj>>hG&7aKJwHJigwFiEai{lCg7;sChP2b}h zHRJaG0Y@QmyL>QpXzeE#SzZB#4+>lho{>IDYS+@zE}J$ZAM3dsTfM#r&bP}BoxoV* zwAsA58MX?&(qfD_YHQHpkia$rYhAg6Xt@tht3E`Fz^`kG>ypF_szD(IQ46?vX)YEm z>a{O_HODSp&$J_#JMG^kLY|gs%Mp#LgL31EBgs}EV`mc+Er#P)lWNMNjHoR*b6NZ; z*(F~*d1xHM(4#{#drn3Z_5Ak6tv>rFVukW%_be;1AAZ|x#X=0v@aNN$!V#(>c=s!b z@BH{_7VSfZsukRYyTFCZG71raCk7@`tnvzACr*{&golgJT6F*z9Xpj}2icQ-h@Hw5cC;S=@IIIB| zAhMAGg?z_)Tk|ZhTq3Wa<{S`J3>`;*H+|w{$C{z&MHBs56s$Tim;B)DcAHzZ+}6(B z0^$lx-9L-gy$u%Bg&QaB?PI?roYWB~Zn{2tjwS(8frd-kkt z*tBufh$JR2B};QICrd@G5fO8}8K5GlT*4JDhUPuA-VH6H{rnDLmk73# zM74c=_?7l$W9$Uc&*{c_9iuDN0jE@e^kn4(*ecTB4VR?aVO@)Md1+j zKOX|Sd2_#Azm{b`{r5h5|3)_W!*kt_|<8vSS=!?Bsw(VBGy>PYPuE3G5T$XP?{{EnCyuZh?A<~!}c>fb! z_x^`3!P-i9HHS~7es?^d!14MVE&d$mxqHPd; z=VaMCZM~WH!G6LH;wn{CSYr1R>1qDF`2n-ylR`VxqsTFGcToWPf+Ax4!rUxgvMkz# zv_IdTpwXTN6V;vh;C#G8o+oS9&Yf=ExcPxiR!LNLnOufNw`p?s?b~N(&YZ=%sn_y2 zxjz2bqvSwaOZMND_V(Lv+o@Bh_&aTvE?vQteUCe__MnfDsHVH4+uE4y0!~R1FRJ8~ zl3|)qM3O7}i5p}p<*NRTDZy$Ub9wlJ5OG+d_eQUWzenGJOMTM?Cnyp-&aGO;CJ_SD zS?1F#zCk%rUb%J7-Z=EH_EFt#tPQ(pmp1hm8P2EI3$;UI$uLU(o~i?>?wJVg(ci;5 zRS*BQse9V7@;pc|;r;E_*Uon0L|KeMNA((FDq7Z9Z&vj*S!UARMi8YE_7bzZ-H_ZbW|x^wNhmj-MH`gMs5pPrIoi{&PSal?=Y z2vEh8VA#QDeFX;nj9))!XkaRbjr7crKw84*iuH?4n!SQJduIbNB;gR(t|+wM{%+j1 zZfLiXY=R`ftqGm--(eawAYvSRx|^>7aU7{pc;2t6F~%SY*3uHQ^B0NBsO;?kt9sgi zO)bUWkogjvB}(mgeGMao;W)6r(B9cSWUW|5KJ>s6+qiK(+1n*b6a-W$ui&z4N)pST zBvgU8o=6yXSjH!4-k!)&DnfzNF&^Fj`m>+8s8P|y7cXAy%5nk0iJm&d%r?S-eT@0N zQp!k_`0zsyVFa=kZ}Ae6!WCg9Q(?+^cI{dN93OdpD6$MwRC%Q@>B0q|@pP}1VZb5t ze9dk=Up_)C7)>~f&L50Pm2fxpBU50)7jNd~974Cl>?3_yq`vC}LMJuU}f|v^zSG=DVLX&Uo-s!sT9^D+`!R8L z;FTF5>hFAu`OnC)=YQ30uit3H**V9)Hk54}$rfEied4r4AjcF`0Y-%RAxv(yPMugX zE^%#wQ~f}e?ZW(h$BkY)+>F6TUZ!naQ)J)y=8$c^x1Icq=oIOL>!rQ=#CoExVcbeR z$wX*OKknedNd3XXei&dM^bm^c+=UE$$rv@`no^caCUS^7fu#os#iiqd4uAvrA!@%g zHpj{1SZv47Ubj4K1fG22OSWwJvV_AlumB!*^+gi?My)&o4iTWU`!y{yPe(m7TFiP3yPRkm* z@5YH9{v!N-TY?uECsW-|UZq>739W@SV=L>UAH}((10^%Ft#%WclxIs+D9CVKOh9<> znG_s&TA>-F(WfYnm&YLFJ)VfBqU$VW7c)wfuIvxe9|?J@?chmXo%rkS-@Gz<&ugr~ zTm&NxAmE>YQ#Vo?iD;GWTZm5)c|=LFM-c`@S~xss2#z4hLF9FqVNmTLc?&W9#wV+0 zMi1ej%)WIc&-Q0M~WzmAjxoKkS_#6;SaqW$bZ}QA=^u|qQ-7G9Q-9WKA2Cgo4ohQ71zg8VI+8}RnD z=@|rxAjBE)lYTkNaXvNiS)5r*%hHpg&xQh&G$_jdNS+J3@3HML*2tt-B6Yvybwh$yTzpwkiz z#E^WMRACwW@kpmYj2&>Ce*fg__Wr5YJw9S@Kk|yOOt$Lqx#af{e!89tF5&Od-@?z~ zZ^?D)d-z*;Z}eL7XMI=Es(1Kla5{|!)%3MvWmVT_^%!a>y-P2;Gi6ebW56+GelBqv zNh36!Q@z%0P(|>ds_~*3Nm0T+Mt;&G0g?((iq^h@ZA(uyx^Nd8RU2K#2@0|*Hy0h^ zLWJU-iEgCP6?|d84UbQ-)+A8`9bfZ046FEs;F=OR$^}FqREVcR0wu_@(WeXeI#0t~ zs4AC4LM(KE?dLZMs`TIxT%ED}3G{CBr*+u0M>6r%5~r492M@S}?WnK2I zujV63%C#3?#f{)-zx~aLX4_SU!3z2DmZQ%{AYfBD^i*aO*AV0%V_b~Nn&4!bAdJme zG@j%AV9StwaFg&(S1;nj<>})TM0sN>i98Yd$wUX=B1} zrzB`iz0kjd;D+~^58r-K|Si1Z-KMqQH? zn~QAjyaP-tV{#TjdXL0@&<0OYr-+wClG-=Up*gaS60}rLrKsqOdP&o-e@@hb!`LwL zVoc$05+@01OXMUq6b5a;S^GnxEbZd}&f$(0q0u@}36>Z0$~}VSkRoRwIxK*zSunHT z=8{@vh9vI#jRs5`TdN+c~(TCjkpw?uT!>p z^4{TpHV5XSjBs-t<17P9({?!Olc#g+jU5^G>b5@2`*ZBR^=s_MKm4I>T)%;cFCsPc z!&?$l)M2Mjo*-*^5Gt;ivVd} zAi7<^tH-ZEaq#BNoy&=6E{m7dn`Y61U9e!G%_5dz3n5W@SR9u>ME+h@Y^C#&eUr0D z_xySk^#r**(K8emvGr_|r)QasPHow&`)pD58k<^DWBJ)doZ{uqInr}YaPYKFXY#ot zBXZ|WyGTIaqZjuP5&vUrXt_kJMlyF(zuF1PNFAezNm!%q3hzxk@=4zADzQ@6=>M2H zweh&lrtU{Hqr|MOS9`4GG7()MV!g<1`Uw^+dM+_Y!4!hXE+W3-qHLVyaePG)t+Rv5 zC}Nhft>C`fy=qf?xFI&N{>ey5h96-N?%`*-&kN1#92g>y0Q_83NxTJLezRUfMC(v; zSb&`2P{Sc>y@v=@q3{O!;9!}kE;toUm|Q@a&J^om1WsXHTGKkLIK;W=&4Kwv_~KYN zRu;5d)jgSZ?^=*B!F^Ajj@hx}aXW%B$;mT@r8QZ#Z*|(a`c^w~j%@Ys#|W02MVP*9 zn?D;w4Hq}3wjbRaqRHF=LNFmtqqPE}m26zsVat}p?8(QnZPzr@xv&VWNM2b5ZNB1VD>r8ZQdDu9VV2Qb~qQExXmth+=_Sjqqa3xXx<5)WmLJR`2 zw$_*kHVd4AY1xKi(8P!}`UFs-SY!lFck7ep)CnN4h9=v^OZ;7i$nse!V+snfK&Rgq zZsp@N9kY)PW)jMfL&GulDcSKhY+h$S`0jTx2HA+Hkqiv#SfcCwFhb|foU!-b-R8V6b&K@U_ zq(WgyY^JbQ(vu~s2r5eBRZ;raL*47)^sZj5w@z~KoPWRF7UaX0k}^goai4Y7H3m5* z8W``oR*OpX_wZWqOYj=K7QHumPxv`eWlvw1UGKZ(BFK~V2d(g6F{hbIt17L=>wkuo zukbhgCMAV zg#6KIJly0m)+hoA&3&NI3TqHo!>RjPQ0?OlFYH@%T;a9Ry-IvTN&#N!`~TT{?;ty~ z>#*~pb4H_)2DuSu2AG_O6F7mxVWvotT8ScM$z{uBdDmrEdH?tR>81h3}POVlXFDIAm`8ky3slJ{(k3sU-#>W*Np}SgGs!=>-W9yyWfrHo^#K+C-kwP z*;$J|R8lhHx>~euq$ENpAe8JwhZZti7Z*4kw zs4DH+Q=7JK$6q4nZRhdx*@2JGQ}|8XwsVB}Ws}rU4X03*Rv@-+UtOElu1**?r?g_p zymZ~-(bTqtv%81-(ni7&J#n}iGmdH^XH3NBB$>pa9;A1kAMDq zPT#B*18olA⪼IigXGk7RBm)Y&L)WV!{BuG7a@tAX>!1IhuByUGM+^KmbWZK~#(b zqeL%Ru?lU*Po*z@@r&tJ-1ar0f^XdhaD||RNdD~E)9LLu-$=&}9|`Xv3s3-@k9Q&<7erD;F=91Zp*!6iIp}b|q=U zhV|*bd+$rXdHQJp@H}>D7#|+0Np*`5KNi&ZucX}f`yF9v7$puBIlBALr&C==(>|Q{ zR--Udg%7RDa6#e`wBlVfg-~lUoQAO3%#I`HY&55b5<$-vei>R7me$m8qT1#^I}T~P+8(@T4A zzd*JS(t$G|TSc3UyBjRwu@pIoICQw%i$X{oXgMDzl*mt>UD33*E@25jNRX-b#_^sX~a%#kp zmEf>iy!yAUnm{R}lA{mp2`kwkd*0TVw(l6ntN&O!24OqjJw#X(?7{G*xEJ4pO?bFy zYpF?#I4fI*k@xcDm1*^g>a=pHPI)I%Ga|~HZ=N8`m0gsRgBz^$KNWCj^AHcj@W!948vQn2z+-~I=`g9;K z*?t=;c&Q0AHazNCHe>1p^C0RogBc5%eVt1W)ia1UpEt{-yuHPB7hjjYlIJhIU;3MT z{>v8C^oO)2?xWO+SkPwzE%%&WwE^Rr1qUJ#nB%wY_hX;SB%7R#2n&-e6)g540tlYo z#kuKGxHQ%AV+qqH%57R$la_2Gm~52@(*mRLG9?#o71uq}_~c_#^n4VD#+`WE50--Z zU2t1&x;g7tQEf^sx`^lJS+98%Ns72dGBE}-jAc;~AKA+(LLCrIPcs;>@0s@w&&%XD zvh{Be)cI}mS=WSqB5aGqWgH)dm4s=Dcv{(n&S^YCJnh&DXOW;FqyRhMMC2YKU~i+S zQ;GPm8HJ1aI2d2PcsOm^FfTp)Ad&2k;SK&k4GJXHeB(^_Frsc8TMzYNsE-RZMQ!6d z`j}V5ZM3@?bAy#cbXW<2TYB9D-jS*yc$kL3#jQrPNH|Bd^-!K*J=B03#iHX7Ip`}e zOJf`&;6rV3v8b>w@e>ckI^ur(BYzH#5;{RN2$|RR(|?Y${e;8I^712|I1br$_AFb^ zdeoW7*A0c_Tp}+~#?)A5YZql4+I0+0bl*Y4+Q9WTH{dB}d8%r_n+LMg4eM5=ThVfF zU%Mf#S+^c#k@m336nD9?%Wzn>y1QXWUU~k7^!#t0W)p+M_r*)o0wf*_7cXIhuqZXb zoxK-vqkXuDpwR-mhFlks0V?tiPH`F`O7ti@uK-*gJ1P7xo7!L{bAmSobY-*7c+>ZK zac0_yMvOKj8y}dTmTZQ@(G^X!b$a_|ENI#T-0v-Yaa8g@e3$j;rA8v_F(GVi7hENLeshgLg6aeM# za&t%lis}M!7vUl5fqVrVTAr|J%@#dxR9_!#iqZOLVfWO6r;Cjcribs51L54iqdFZr z21kO+rUST8J9!+X4*Y|j!xv!>;arALwLi~B=Ry~p-2t3EAb4tQfHQ;8HPxahfxYA6 zIguQez-ZbzRd)?x3MAZ{%IiLBQJ=wZ>`hv7g_6v#>gxA62e{?5%k`XEU=qMK9 z86|jG;$8ZYb8)e!w2b&?SsWc%DDSF6id2>}F~o*LD4t+C(F8|XiJUgZ$l;8d3Pm$P zeFK<7>_p2Naig%k8TVCx^vB;$D^{;WEZfcou89aCm{ZgfYfvei%m)4N$SU{9qNMKZ z`HuAJ^UtT}pZ-k{oyCh5rxv&}30j-n8E0s!?d$*ay`(Y(jpFRTgIgvEzz$6e7}u)s zD;_INzOSg!ovIzmkmKYmlxCmtw_=MoacX?})ETrJ+5I3ks3iEJvp#L`XDs2v@St43 zi^sR$;XcW2=j1C>gHn&nqc%eMsM;I=P_Dd-?)iT4 zcwdxZ@`kz0J&FR#`F;4HGvqyYbg25sBad7Zmrl4I#4P3bkz-+cQpKWH<5%T}{0u1y zpXx&V{5bu|a0Tm8M8+py@AfgqGw(a!Zfe{l)e>88V#27^Y={tHHK2SkWr$svD-dty z0f^su8ASuprX;<6n(iueD671&{+7;z+}GaY!5N5tGeI-L4(@6^+evt`N<4OUe2 zraBZ*aPJw0<4t%pYHq|QDBp|cSEcI~LGTt015X2JTB7d12qSXDm#w(&YHh(OJLTy9 z%gsQnyXbg^a~;iWj94f3H~Q%x+AmS0`$6a+UeR6ld>JWc-BONYi2@~OVpZvlH=7`8 zl{kFvl=39b)`E#b zW;w@~ziFRzK~enSLkH8FufLMse&Y>bV1b^I45rr=`gIak7R(TxmEQKRpVl)jGP z0c8vyVyxg8yBfkL`L0n*)WJF1U6p?R94@I?|I1b)@xT-++k84(L7w8bH16P$d2pg= zH!`QXHr%jjBluq=>_&mg@opLGtgzl4L+4*fo|^D)5kqk6TW=8S>?FCzaXP&;6sy|M zj$gcVX;Q;6M2+@3fYa7MuX3J)9ljGI95Aa%qZOr?6T6#>eMd{9pFD58JCD9Nv1y#z0vV$z1sxg`eIolk!b<-i|y=G|OT* zqHw;l(z00#ulFE!>+!ve(D4(oUDV5awnPK#d2nC)!Sd{^rWWF&XMed8F&u^4KDv zJuMenQKgu0oDa?jda6?oME3%1ISbx=XD1vN%*oJDE%P?S8K&Wt9FrKhI1e9y@^9Zk z=q1AFXo9e&eMMToacyY(I(|8A$dt0EpUq4^>!FA9|eTf!Di@5pC5 zI?}1rr$Z!Dj$K^2azzlcDM0`;ce)3CGWnONf6N5kySeDn%O(Y>Y#3 z@4S(vY^i0}fdJ&fgbRou405EY*9$kY+gI@~A`^GFc!p_N;jUjqbQOy)D#}GY$eGF6 zckwGqKN~^551<OrKaa`SAE&6J2tS*GUIe8N{QGPg|40o?Rim;|U9;?vTG_-xS#VP4q$ zBQ`Sr*$|qs=esIV62eQ!Ik+}H=R5Hl(#bstT0cZwm$m(fw0jGL4l~1w{F&!TxvH@jv z(iUWn5fNK2AHz5k-g{^q2hf~F*U3T3Cp+l*7=SpC{GIKnCmx-|x;Lb2*I$p)2WMxV z4~qf{*n>IO$0_m5I7imW@F8%vV#Yz-7B)ed+Yl?Zw6sP#EbzVb_Ad)O&16&(9v<0-7mo!SpYRnEFFC0*qQ;@`7sW0v-WZ?2DngyKk_s= zNrfBCRX_G3y{E7{Ve(s6$>A097w65(D884MIn#Ik&P>m}pXVtqxA;1LUR=f;o;xP_ zZc`1!zEdFOeVl8dB{&E-JIpyna8X4=`!rpakuzJTu1KwQT|=-^^4wo?>B1L zxz5Lfeib&C*)Ecf>@-7g;BGS3Y|(}^-bKkD1M{8_thUHZfCd?&Q9N*A0F z#`o^p85)j@7UEz4;%B7l8qNZYJnoLO^qu17eQd6ohcSqmxWM(E8=s4eMgEGSLvevM zegdR-=+GfJl|f8@##0y3kKHu8{Ils4L37QId~MG(PChRk-_qYRy?gQf!orwrNA_C~ ziL1B|G0Se#VL1}oEE9A_VnULN)8Uur5c`n zy)~)72Q`0eyZTXx>ctUrFC1K7za|Y;NWZE=e0c#yDbK5Hk6SvWiG3)j3=Cl=zx&kW%*B@IF_FJ`OngkeS1R# zUIN_E*oghz(%``4!n85d;d~3S(-!2YO&mo9F+=2*D^NM zb*sj`Oo+K?gJ(X*FP~r{=aw?!cYg0EVMh8196C%eIbs|=-jrI`QicHcQBAR+IQzo$ zLibVJWxWd_J3bPcf}<=FIkjnX_>t=;nsNB!Yh8^|ZmhcZsRO{(5Wx#N*a(1&k@YVO z1JTh3%QQp;eeNg|6Uhpah)H=Yl)VhQHYtip$^VW zY0Tutn0sEU9I$uq-t?FM^Dom+fAVt>IfxWRkj@S^m%UYKDPHj>AaFveamwTxgjrsn zF_n&O{(G|S8Q+}i0~>FPvQNAfV34&+knl>HW+vBJ08Z%OgHoVA5;TEU5umTu(dWgB z=^2iFqrHK5)bd5>0a3wg791K}+7N2~;9rSLh^}$()kA0`94`^phBGpr_Yb0}2;!0};6X#zHK9O5%F>}Co@l&0D^2b|y1gH(liqQM<-i+CeUH{tj6u?I&u!|P2Glo^`? zTWBNM$n_#_ef3qms&G8v`NE22E7O+Ex3Xyq=eb$i>71q+yLN0#FTeOai)ScwCKeOy zwi)vkiI!$8t*sKY`Kg7Yrw9*&0*L3$)i|`&NwsUs_U7|P3$8d_M9?fWI!7g=MoX2q zEJpTp*v5V1o8N#l+lI{eL=c^4o_!7kz(F7Eq(~>q2<&rl+kB?Y&WkU-6gFx`GF`$U zp$Eyu>D;-q>@bEHV>-;Pqe2B3+M+?GqO)<>d)aD}a5GzS4>gG2wNqR13wfA`xvW1F zt%4OVG`RfhLTj0weEH0mad)P2t&U>cDdqJl$_P7@(&8hdgbW9$2MJ2Jdmsyl3gC}2 zHigFlqouEw|Lq%D%H9>aixYSKKRK`2zBF3}#rdx8btR!g8gO$}&nIhu<D zIm3GzQX6}(7Zqg{LAGq!oWB0`ug9iqvKp%NWTq2ksMmnsuCC6s5SVX?17kk>=F8AU{u@?Ky$)>Sj-UD$HdZ}3|MM1Ed>lrgR*%L4EtdFGK{g^+Nq1*>LU zw~-O_3dm#874wKCNj}l3uCzQOA7SI)J6efEfskCB4L|zuJi_ph4?;H*+NX~=U%qafA-7q@x1KFfdgm*590F@B@d&gENCO|V$RD@ z_`q|1Q*&eBc{h7Ta#ti>U1czztWJmJYNd+^T3&E8ng;84EvTp45mHPt*N;5*So+L! z&!@ln-uJ>=vR?I{dFI*l1i?F7p~SN-WCueRC!02IB$D+r>794p37?H}eXCaC^H3Q( z1J~VyA+=gl{Hs#bSfF>Rlj~Vv430!~(`aikl5$oG$-4aI+k{C zmJ`vbck*{9zl+PByjgN#opZ@o{92yBsJ%8e0~a!KZI~1_4;716Nq_}D#>Qknda;u^ zwXHv$KZ1CHLuh>uPET3h>G!676_#9#QTApAt~O0VTFpQ;FyAd8nFU+d+rZiR8%z4VaTgwi#m9Q|C9L!Hk#p5>ZLE%Ta@%7OR?+tTcf9?R> z8Su4a<&yNZuYHY!@b*Gsg)f@kPvq(yTi@pFY$pmJ^+C+siN=`#1g)BIBbm5y`l}z; zU!kTC1Qee!=ETKkdvIp{7sO8d$>05$u^&(O+sOduwZDI*fvg#C9oXV0YYhEAUOcKL~Ji31U0~ zM|R}UVQlq|qq*6XTJ*<=qQ_#?@fY!t@o7SGQOh}*=gLasv{kEum}TP`y}qh{$`Ujy zlZaKqdaC%L*wNkQWMZ(4Y4#8Q!9Pd`5F@t$d@_Mt?%`M-6|~qg(G;&*fGFaUsYOm zJF4JD7oelUw%le1K`qa`JBTNG2pI%E#^DO9b_h*Yu@nc&8<5nu1GGz6Aq!YM9y8X` zPb4N`(;^8Q07lNS4cy8B`HN_Y9D}&!-?2+;+8)5_lnE7 zoBD_cU2}IGTlpC=2F*a?bZr${#f8y}r^!iILN0Bbt+!tLVPFOW*355`MOz&4) zFWH&*Cqs_VGh`%EhtYswX|^M0=5d41|;e{?3SRuEKESeYdFhTF(>&=Y5;H z6EM#0;8DdZj0Gz`#@co7N4WCKbtH&+t=KN4I7rmQk!}PgIHnn6{f1^Ic^WXHzU9_| zbn_i(Y&NiK;|$I+q7_m?Kz0Jg;l#13^!nRG2*6uU4Y+?3?%uZCWB}*-$Q$dI?1O{- z>F~jWaA_Ap%e|FN-9orB)$tcEMeGREqG(YKMq$H`8$I!_$#HzIW0{pS^|vfRi^uj) zf;NOUA?&2~XH!1vXbgkBanq*sPyg9JOaJm;{vT=g?%jwF`qImI;@9r%(Z?PQy9Ehj zY05jHSZBlf^}&%n2e+qKLc6o71J!7aRU?vx0O9kkG=Vom>u3Z;hj3i$WVje&ptzp4 z9TFS}^$w>q+|2~4sl@YP4paMxzS_dUZ8gd!@ow>ZrInTvt!a4+GgSDRdX;i5*stPL zuB>>MwjxgL>ljOCAY|wF520Xyzq5@Pq7q3^QR@(g=;!wjr*oY1^qwIe7zfB97LM{I z{e@vDu5X6xyt!&PwK7k)q1tpOJLc8M=NEujFpG)6i*9B7uuh5+Z3Q4#GrjBC)!m5p z)KB@m&g6tvbWzUyGHYG_Yw9}|NXZ}-mRZ=nDQ>K76^`;2PrWUfh-*c<@1NW|mJbI1!q)cbz|*UVZUJVmWoPxk9&!gpp0#lwjfeI-U$1`0I5-wyRm*)cJlT{we(JAO;~ixQ z0Zui~se+yb>>K8QFfox>;3MN0ksqi{+jo!QFnxrm1Fg8W+MHIdS^;ANXGKOo6KrBG zbe_Yn?_uT%jZcV|1kAOt0Fk32_y#z-(D-FNjs(1T6rPk#b=w zM)17L_+;T=*by}GCB?lT&&vPefBzTh|NK|~3hkM9gQ&fZFIS z-YxoT87o(=OrQJQleiVyn6_`kR!&Tc3*U9)d57MZJ2zJy;8QU0If8l zLIPSv9lHlp59c_Z>9k-(+f5~C6a4L9Mnb`cOs)#6iG4Zb%gee`X%0;{@8X-Z<}L+ zG6=4M=C6cE=o#cHBukaMAA@7BZlVU~gN{hh2Ho)3A9k5{-+NEMcT`E$BEy0H-n0w& zG>((IRTpn7i&wWj&DdAP*#NNaMzBiOj&(Bi&ar6#=RVoK46_$-70v0AcKm#rh_M__gr%I2a%(Io470wpm9n0GY1 zCJ#U3nGroWzw1V`@*TJ{?dh~HTMZ$*{f;|Al<1PSY*+U1$RK%+r8Agp)v}3G0Yrk< zqL{JZ^~3v~l?C?*Zn)g>zmmZ!OVBJx&dLdu;HaV0kMV?>Tlp?5Dx+u0MIV3s@i>J1 z*Z!OLyOMcZd*6xwRZ%loRXsvGL}c)7rIb@!iU6oSfB;-a%$cOe2;fwGm!S?(6fH*k-ga9wA14x|fc6%C;}4tzyR z`_2qwqJRo8uj=((BWB^$l=mh3mHib+B{WrZyc$53U`d;Xc&Su63;d>gAZA~~zuhLd zvN`}h+MacyxY${*t&h-AX*06-vy^uX-)<+Fuggsr5jEPbxUq#0F&W))aBbt2Cj&xu zA011bheuNTry9avsinGWbH+sTCyB7&o9wx|1c5%|a6(Cc+5TgU!r7DW;Jb&1cfdbx{!I8f$xz z4MGx|Z0C3gQG@F_{#IYur7gaJsK@K8QgutVGt5e{#=N}zn|ss|znEjfmSO0v3n^UI zwBzveIuHAJEn4fhP$G*V*_u3MqmkuA z+Q->fWm<>kOgkck<%nT%4VP;928SnXJ<~o^f8R!*>JU4DA^N_6dAyxN;v4blR?X&M zaxRK1$t%0$_e4h2Fr~N&b=2c{K$zNkI0AW(E-q-MXVn;pw&2yg7Hxs z_Eso@NX*DvjdDncWEj(HwRnr$cg?REFULePnquzEzI(s3von0Ko;h;{UxjU9yV~B~ z4w2$?i0Arwy&j3owzu9P!t`i5e5^X%h)tC1+{p+OLmk1P#gy}75{9k_K(F}_eYh%Q z%nEAA_w|Y#oLZrsA|&0H z-2bWjgBbngU;YJ#*MEbHl@4qtdLd~0aSwAkv>6pEs;0ke*|ONt7VrA6Tk=jj?kee8-Sd zciwqd`t?)4!A;Zvj%(}FgG8CG0X`=Xk0~suq>gzm{767M_ms4lCROC)*Khw^$q0-w zpyP1C6^=2R{UdBDo_h(!D2%&}7_sG+n+Vmzw|h@Ii%40A0`+*u z$p{)sQAjF0H8A8&i~iRDVC_yk6doMH_KP#Mo}Qj`oI!Z|?RR2_rb0;j+V*f(sko+?xOv@% z->+H_g~%{OLY$cm!fKp-7lec{pd!A0#Br*tH!M@^5iObaC%5OSY`~*6VMi{UBtJ&k zO^fk6> zW>~N`FhD&p7n?A~j{p4dzl20ijroDRdET`Q;E2YJ~YihYus(!p3af23#hs3S43y|A{Mf_`G`U8f>Gsr+42S z4~M@SR)SpJeUZkQFlK~ug`o_ldHsq~-ZsBY#qGVK0zcv}i51iZ2@adJKftlWjvYkv z-i?wE{}y9Mb}!tSW>gt56PKtp33t}fk#=Enp`D1#8}$a)YYn3fnPv0y~f7TVu4eW;02Ht07c~y{WpH;D_>4GZoV=7 z=GVVTKluLlLkm)^r*62s-rgRVKIT<)UFhn zxIq~0MU`~hV5%E!O^e$&)4=3?1mny;LZ~;d$s!puh9!w1byO*1@unJfTJ{kIeb_TfMlOg7{-M zB|=u=V00M86~tU45U*kUv{~+$Vio=umtz^1N&=c`Da7^3iJJ6j!a!UHF=JY?@{0N% zr4|2<>r6YUjqCeB6so8H5bxt}V#IkP?OliAW$AlS;T->ncVPBXGyR?q%$#J>as)D4 z5qZCg$3SoX3sxCj!G_N;Hi76$N}hqpS%;gC{xhR|vnxdeJ9vQ;NA8V7#B`}vk8$sU zb~Nws6271FffcJ(3N~5dLTQEOChWI_#qHS3W7$G`EzsyU;jZ$=31Q-u*b{atveJz>th1oj6YPfLC54 zl0b)k8!N+X<^KEb56N3T-p;SKXD5yx#~A)>c*l`5Q%a$Pt`0w)Rk*Us=8M0qnV6}` zMfCoUH{G`v=lLFE@f$$`vcq zryh79z52>4>6gF!McTG?TkL*S?N{04JhJv<80PJHcQ0;(a9pZ^9@;W;XU(`-(LbDR zkkFjnho&EqWbeQ8&NRBz5Dk4{C)bG#ej$dLv)Pn6u+^=oncvqyUoZ+pL^y^;Kr`V; zRy4u6tgA{FIL}fX)pr*6F%X{6B6JP#vWrV^B{jIq8dvOy$w00ypCL;%5fMS6ofz)9 z7&Birk8zlbkx=w_?bTP)8w7hha9}_7JUTeWo(e>vm}lO+ zGckykCkJ6T0+Ek^Y}gZb6?fisPb{Q;`}W}=xhr)ORk)v!2n~c*N6F3>`zK3cXA|#x z7E@0I*;NnLr4d|7)k6)l}IqF3GLvs zliLS50~=(=608>M%Oxk_&4}+5%VhmN;X7Z9+{uB2%j}sKmq-mDiigpQe+(s#h2Z5N zW>>PXdB@iXw43h6{^tVyI}fow2XHS95olq|F43Nu9=n$Y#o4ioQa(<_4`OTzH*A+q zqjf|aTBp&{@?{m5F?n4y#*_CbL&PYD>^W()i{D1ZKJmr?c7H?^-h{{)ZOUd4>eBhk z(s(VV2Q1(`DPl8JjAzNhwVx2jGbhFE&+~sAzDtpM{@ZVfm{8<+I5q`#D!T2sEWuNR z#}vA^I&knH>$E#9<7~}w%MqQ&7WdxwsdO(68NQDO>5H%RbB0!*2ES6D?!Tiywbr3n z1UzO{InLRtDFaq>8t$lfPC2JC`Hua~`=0-v%JC7s60|vMAFm2KFoaRK( zqD@S!c4<}fa2G`w14V#+Jr_dTMGeTshzd0Rj~zP}no-)kxjld!;%Ikc$`BWLqR`LDV2*grTV=uF&KL%x5&a8d0I238KgbN5z^Ir$s)V+chL< zp`cQ~urZxrM}G$N-q{%nJC>;^wAj%-7sGs5o;rK`P;P@41&b26^Y`SHA`uZRK zQM&V{d*kdlFF-{HH&-IIVuB`u3;4JSZOrTXFn-(bJY{gqnJepBk-zIQAsP;|73Nkv z;n9Jr`7eg)&5m>sqP8B76RzugK1F=(>)O*-Ii5Iv>_~e3)i=^Fe={7>p+EO%Q~J{9 zYSOy3{W$bx&afM-00Dppuor4;~eZ$4}28ZEx^H9f6q63V40gH)rq;zq!--%HWs%rVh0(gh2??1Wtm<2iqMRhT|ZX}Dc> zK_xF>YlD%x3L*MwG$|6l1voM=?c49X9d?%UD;A`0Jod-w^ACPKRP#O9&Xb5+N5tL} zkq3tcLKZ=b$UjbLc4VJHh^904-C5QNLUZGMkRuwtY({Ogd^pqa&6%KCKn@TtW{Fyw$jBO7GPt#_bo|I6sQ20M<<*R_o%!;2N*mx} za0lSWSUHihwK8?>6$En$gzNS@ZjW&?daywhmDyg8`v046zZDvk1B5=;<9x$nxG%g* z%qf*2X4!i*BwrgK=HfmC%BM{>RgtCVFA}&DZ$s1!X%kOou010eGe4i{NgYUY5!y&i znWQJ0XB*yAiaxSSqs)a$8|=1pIq&;r7A`CgKzYw{{?QGxG8MORroQK#n|Yhss&Vy& zB9KlaC+BB|2Kl^{a8AAx)n|XmreaQS2lHc5{B~#gkFdN}{qu*k1+5Vb5&+$16>P`#@0ikFkg$q`RI)Yto}sMPIJ< zY(O1cnK{+AZ{ZAG(bcbh{cDKRE9r$7Uce*J=cbLX#L2VrM<0C*Zz3(}M=O4me)iL! zW1Dt7y~LT$iH?@EeP?}oOK*C0Yc*$>;4U_N`Xznru}5JvHc#Si+v&!m zudh4pgP3()=uFGO!7UBVM9Xdo6Ruh$bK$SCaHi~7E8+SeyyaXLL38l!U^LYAi^{Wd zxDZ8x$a+wB?uB#eMoX*}QQ-p4*7T-O2?7WMyP0OM7#9>@XBAG$Y4g_w1hpVPh1;!P z|LRu}RE`Z>INq&nu2QfRyX9R%0+!SYLYlz{r0*n;!HA@NkT=-o| z)Jk82HjZ%asksE;OOk_VNjGlUk~ZCNecHhJZWePd<2p)Tm*5)A^DJ(DJlhSA;e2OK zyXDYqL+M)|&)<%P|4h2Psz$4ApuYzxO?RAmhrK2U(Dm*H;nBw)OIvkk$C1HLe)5yF zcI{dS-7S*{GZd&0hZqF%Q=fW(s2eNO<}I7jck#}@W5=F!@Hmkq&ef%N(H_3%{@V1| z!xQO_n+za{O%xi$C{)`7cSAJ%NRGIx%`rmerJK$%-1~cNopY9+WlCp_;E~P--m_n2lq* zoHhgk;RQW4yz|yuu~-a-r#f~`O;sp&WS;3uN}V>o(|(j>Ec!Vu+q559CT>;2tk$!0 z)Lzf%&?7humh@b`1Yl9zS{D9BcBP@FPvKXyhWFf78iS*Ow5?_={Q}RY-NEYF#<{i9 zMK*tVZCZIZ+EScxPJ3(GkG!QtH;XRIjfzSI#yLB^Z6J2?a#owJ-<1B~555&=yGjcT zQ{+xm7%ojBzjJ5L5HjW+wD%hDe6c7kV8dptC;c}n8W)l0a9R5P$LY62?Rdl)Gl*+k z`%R7u9J_js6IA(j9Q+tsM=wJs(TLuM*Prv4*%%XR-MV$S3quo&jY#H9mr~)hoR`)G z^W*I8wwrEG4H)Qldrsfgn~t9v#87)I?cRym%^BR(5%9HX0qfm8weQj{m82wIm59zV zN7`amIX7n@Yg5KG%np_1<}vS+C0%YREwA+V<(7A4`KK0%+S&tSHR;*c>eKgrIFXKF zce{M$vh&H~0roHeKe$U4G z%G&wf-nue^Mx8M!LD1mP{6@T3i4mpeXH^gt@9DTyo3rx}wliokbrTHE#h|Dnc4h(@ zWeIO&FI>1_@*mtQq{ulnCsDtKty-;b#T1|u!);!~Tl#ovT%?x|SY-@|eI?`81n#^v zZx%k&ZhEhfFMl3wDf}pYHtju34mtagsCmdbh}U)_0pvT`*_TgIn`XS!iyDr4*#NcU zMPUg<%~af-8DG5y4+{7qodLN#1GRl?Lf@12dgd19W0NJEE2A=q7%j8RC_5mO&5RBA;~(qeYYwR_Ow8fE zJWJ{OrN8rhSNbhZHCb+6c65{ec{;>jdJ8V(CbS;G+Prk=M0NU`A5^3lUPEdDGxF(& zA535W-QPu8u_17Il*#+Um{#rHxg))S$asj2i(2ajw2g0En#rkY&gB}AxhX8X$#z)# zwMz`tI^7g@Qb)-dQOnMeto-8K#bsUkTCqwm?bBd+4Lj00R%eJ>F8%8Jn}d#t z7*M`UZ{d#QCq&up3hr#KJ!XT$cr99AmsZ?`+Hb*y;`F!kUwr46Qx`?|O1qk69ymXi zPP{pk2Joius=kg`O<()^*D(LMD_#)(gj36Ey!k;M$0 ztF4zKl5+2V-0tO@kwALPwPrOsOiG-|t5|qL20%6oi?25O+P-oTJ`}0YIw#m?8y_Q5 z8$PLl{LXPk(5BYGwdiBHal2T)QZCme?#19Kasi>wW56n))@m;)a?ne&3%?J77TS>b zb~G?t9}D30$rC}eoIk%!oZ+b zSB_3zZiO@vt=jY~1mikHoDajP)nVX$)!Kf!?IFM25-~tu(;-gkpQ##7C&FmxGRBjQ zQ1cQzMBIdUN|OuAjKA;oPgaU<$_Cij^mJs-l7%llSKM{i-9dx`DN~gMtz1&;cK_aY z)3GB5*_GnC2FEn19qA=-WUrI%vt6;o-ztE43mAXCO0(Pc3L100<29e3ZGR<2%y$>LxvSNbybjCF2)KlW(jjIO^nDDiuaJnLREAVHojBE)p87R@Tu*c}hjr=k zM;{44yt;aevI>*z*Wkcl_~jf&J-88ZyPA!1Wr~iSPw{-<@7N4lsbhV$dUpBcTor;A z)uQ4i#}Ldd&@6G%rC{)>3WGGwf!K3FGB9JjWJ$r2#9XK`C z39R-JYjJG6Z279tjLf?#BD(k9d(-dz-tUEs{wUm^XKAN6FHxRf-B6brK$xsOR|}~y z+7hxn<=5p1TmHjhPNgR(w&H@+Q4s-&U1@FEQLOZ+NzPEWJb96paE2bFQ{wJ~m5R&~(M9ITpgD z`jF2-x$&P?D+N)YAz`|R{4Gpae(u4hecjr~ZCfQw<&wzZ0#b$7UVJ__Q|?~%fKg9K z`5G2;Y_Nb)n>6+GrFc@QwqoGh`5%O+~`e235r^xK6h(^brFs+?gh_3Ph<-3~A!;00b;SQFh%{SeG;=-rX-~HW>hlFtx-%1o|NfO!sdIz zz3amAjHJ@oq-a-vzV$?+Anso+n>s#Kis*|(jq#p~>;p{iN#uA@yXlLAkbf!}z_8ho zV*_AS(RWZZ!c#LwcGdF?z4z-6OzJP?k9+O_+_|wf^aaOU2$B^LRswlCS(B;ga`ImB^Ze$td__NsF7vF#BL4G=K zIGqO><;evxd(!I)#0*zc%WlI5+@z-brQR!)I)`lTPWw)cr(>`7g)1}95!N8S`p$R0 z6E={6aAy%0dIr4noww6lufG;5&H5Hyh^WQ$vkT{sN{i#S)O3E%tczmACXp1-IRqN2eeh}^mJ=feT6M+dq6s_zwpPyD7lU217J zxN*~_upwLnk*j76C<-2iX!e62j-foVYtLxfyKf>LK3a(d$geN}$q8d!v?>Im~*x2DJl2kk)_tsEgWz`&uaNZ1F zvmXFh$EFfN&)FnqjpR|RS&YQpd9Mapsg-l+@DT_gnpZKfcz#EfU#%!wh5d?+a|cqK z{FU902Zf&{nIg;N^WYSUGkQHesZCVZidG_yc84Wuc=C7BO{KX*Jaf_AEN|&wkKzdo zkdC6t)yWR4h4F2rJ!s2E!LfTTt)R4!_wYOKdM}*W3$>#-I37;NSvXnslCtuZ7rIW( z8c*OWY_z-XzB~QlAO0a++h{t-=2LG$-Q7KL&ZcN` zl2eNkm|n`rQ$Q9Rha8-9(9U|7I4h&wT%3JZ__!d5G**xU^a=|u|(?aeQ z;5iy?pE6s$2?+nO^Hu3@epH!$^DH5f2rIF9^ZK-E#VW?>81XCn(Js9nIA0hWhtS#$ z*rsU8I>qtO2xbzCFvZedO|he{%PLR=oW%2F%&$MFQF?y}THc$ygR;F`dw4D|P7=2Q z*QWSJ;+Ao47)`^u#Rnp#m?tB}N6;?@BM)6*e zk(#;6$tQY8!1Wk}`8Zbx5GnM*Da~hmo8U@Zm_f2BYA&Ts7JG?{AV`+Un(4+Z)iaz^ z`~YwLhaqN?v#hRJYIG6uxQcUDU0bcXpSfMiS?4UTT%t)l=H+Sc7k+?=`c93dBd;LR z?#&!r8wx{WcE??JX7D%zF|$rRgY4S&PI~9fH$rkCF;nE#8k${(kSS<47U1vF?fiJ= z5V3khphAc{bw!U7c?LK-tkT!pZ@*2{jb9Vxcpw~=fAy=s6CCmxM39Hsd>%b=7-G~7 zvSiZ$2V9M|q$x*^8S;)>*&nBz^7T-0-F0e1UY|B?+7N_IDb9rp-EeChZP2E(#5_E3 zXgt08dP*<8Lh#C+)#>~N@Q{Z|;vx|JJg#Hi$+g8NUqARd1g^6UFslj`!2sdt+ccmn zYTWyGnU6z0F1AbhD%A> zR@%TZj#}a;iC^dV-+29v;gd0XVHwUPo$k4CAwq{OK+9MBDifmbM$NVK9_8lHcA0Ev zejPWi;h*=HpxKca*sQbpM7X06=p}4n!^Y0e-JN1d4g4|>BE*_vz(TPEV&}KNMgEz> zNgad8KxShoga=T{-L2z%M3IIOulrdN2xbBVauDGjEf5oVn|!m09^<%TYX<+YFe&l;l+6` z`_8m3W}N`fgPVReUg+1fIeiTVLQ2Ascwp1gO3dPJh*kbla`B zq`T2vUAF-Rl!h9_kOTrABJLpJng%$2ID2k5?cP6>-q|&pcJIMH?o4&+9i$%o)7CYx z8Ej_b1A&_W#yN5dc&0j57?ndjz}wX6eF$4LE?-?a5XVFD|N90PQE%Y7Y)o`D7sd)~ zWNj(5%aMt8+KzGBJjR;awzoEY@B0;aRvO2Ka&h|7m%oVJ*;ivzuyg0m&_Hbf$KH73 z4QVO%WtsB~3$t7{u*}n0Os}fkG-E@kM6K4d5FG#4cyD;;FLoThpFT-sHmF(QL`aOeTZ@RMoViDy;$tYMZOlsqHAm5ad%J zoJ*3BoAPnX-|@FS>wrtb$}jCFw%N-y!)Xih@!Ju-wByLQ0a0zdTR7sf>BY6?EzR#m z)BVANsE-%ixehXxyKzbND-PMUHS36)$x{IOyiL=;^OnPz)v2CepH^W!-L#yY<0Kx) z8e3FUTMdQW|&m)V8kYgD@#+TfeoSM%UELafV zimTLcG+#A@@VV!oPrv!iZ?HG(i@Xm#^l+$&hX$SS<+K;TBcG@E=GlaR#J7?hLsqT8 z#!XjwZ{u{~%~#$?Tesn<-3jT>syjW;x; zH7kbGq6MQUKa8Y0!YWlM(gTFT=8p5gfOXcIa3V(}@#A6K&vBAn_d3SFM#l}CpWTzA zimuqFIV4}t2f*qk69p-c(JCQGtP7XVJSZ<)X514J-iNW>bAY&qKdvI)*-#oCuOh<1 zJ?V3w|9l+L=(5VQ$TtY#vzxFm`}Xe-mx6BkY`;~@XgFWOe6GYd1ou0_Mn-Wn2a`qp z&8{cgc#SWvIYEp56-|nhT#67gKq!m^BkHhDX#F3;H0B9TZ4%I@vIXFD5>fkq{^eh! zmtJ}?w62<#st`hjAmf004w||S5oq)%;*mC#R^T??|CH6?uBTxU{vYo`IR?+}C*J8N zJ{N2wFfoPpFK*3j&$8Ss6cD@I+9`mrgOS=dZzR2pGuPGZsBR$a&-HB8ma~`}QDcUY zi$$6dE=r#^1kMP_Ka{C`*#$oPs>jot6{Bf81XE4R{sOQUa%asb^H&mK?+kjn1cb5d zcFs)+;WJ6hcor$Lc^$Q9_sadvAJYfDLD03LH?sE!yBUKW*qxovv`bs(x23Oq<*Vtw zdkJgGqAZ0ss|lFnbzNX}oH~W5@y*~e6l1sGktWUz;LNg3RrxZ?HTRQnPkJLkQ+qL+ z4K`UIdAT$-jn!aQ6lgdZ&%opCk(bnZzVE*KVwdk(xnju|;pB`MegTf)@y~oFEru%= zkuqt_GyjS(y1%m-&w#>q2-vMHt>Ip4-P*P3K2-j+r_2p`NH9diozutF9YMOBYV?3^M@WU+2i00r>F< z1H_a8BA0P58o`djspW|h-nxEt=mgjetc%K`Y zWQ1`Jkso=$ZDW=}z5z^@x_j!T?EqF(G?nK#$+AoFNQpfNuXHO)3ACQt&BkkYv5W8TaU;QNCi zXqV{dG=!6n5{sh(V0E_;>e5rQ8>K1ZHOAUcfAUin#;?+`qsIX53C?OT@Xi%N06>9f z6i1wA;L@5`8vp8A4nc({Rj4k0jR$?ys?ej>#W@dW4A#8p!q){2w#O*h<_ z9{=oT(|#gjzxvW^>2)}_eFyiY(}>V|@jx=cCT|owE6>}`Z6(;{ULu!c6L}pQuhlCc zYy_I!*gh|FxG1Tn~`;tKj6F1+Zk>hTN)cO_Ib$lnsb!mcqF<+*X1 z0H4KiI@!XUK#t2z8h_lNO2NW79EpRG1t^8eEIGp?);)n&9rKN##dwx57iYU_(y0zy zgzc$G&;7bO?LXX;I*w{?bvo;;~~O7y$kn2auoE_H?#dtRzUG}xgsxm zUex3v3OfUVI>U?mxL*%|HbS-tUzM;T37LCEQ6F7kqW~!3r&7Nk-c?61lUhDt*%Yi-CCbo))4WWbBZic`6b3@T5z@;(_9Z8*fa%{KYSDZ?z-+?ce?lgzbF#vw!?&X~jxScnScJH(rJ{;g|%W zHP$B$Q{%{h?{ z9-W8hBL;=E!2kts@P%U7(%k-%N}X?!F-@%ZN=#-#J3YQADagRl00Gfkb0x5{_X zTzaYBPVMj{5DI9ockn>bDYXA5fmu~sI_oviWw zyP&Q(S6hm4V^eKvLfh}m&Y^UUxNgJjWQC8b-5fTeI1R>45evmJ5KxN=cKnPU^N|1X zDH?&fw#UUHkzRt0*}_e=h*?D{MFmCAtl;vJZ%7A+y5Pc&42Kiy$wqQt7gr~@4y2wl za0=vIwQ6Ph=Mb~oZ@)c*n=;iY8p{`V*ZH$)8{voyC8NT^{P_#Q`D!EFnIadR1Wbp& zG6jBuZUr*4zW|dP9M8Gkz1Opm2)JSrBya}%Y;SK*XG2kIFF|>ahb@@^=sZDpVqBN6 zSdqT*jc=rfP#oEc*B`Yb<%m@ri;R~!wlS|oKlb!t05^JYmBVON4__2ts?y?xwQ!gd zaD*cew-G$15cY~d!3*2q>bR#9X9kFshj$frU&i{A!)w3?C9ZGN&`4G49mcUc8ueok zEQwzQBE)%oy8EiqIqL2L?)%tf4zNo-ceXMeI)LfU(XrIg$p)_E(x>2l@4N5bbjR(tL~!NCM!bPgx?`bcssWbw@7ov4NY_@HT{z}GkHp+M&we}y zl|T4(*uev?e4*e|kb)1Il0cdaw z)qR`@#i+>-baLi{I6<-A!nQ@RGwkUl4_tL{7Jo>{87xS2B>l{EH&JYRqPbqsk|ho=z7 zD^$Y+I^a?ee@WPMp>-P@5xoc*>3;6q+0+b(aLdh=n-W#e{b(+^|LuqkZInp^Pffe%O9 z>mRwSb3~qg>BX1QE|ftW-(|RjyJ7Qh9MV-tKhsP!rLK+c0y0OcnlSgrpIU8%o z{DVv%A%Z44D~Kr+J0jxncPuCX;ih$WrQdn{Tj_5ud@t>OcV}p-34mh|HVv}fbr|pG zcfbDi0MwuS^e5@;nT~X3?_fAGUc`<|G2kVWEP|;N`R4btrx!oZ?@!BTA4n2qW}*>| ztJ>z;)UvEPbsmJ+?i<7hAsi*F6-%eE=`FmIy?I%GMgHYdwofkiWafL5Wl^MaQP*0X z+S=>Wg0-AWLda^Houk70rDNsoa`$Gcu4v5qi8|e(Dn1LcHi#wet@?1kwPP5OazD0y z;}M7JAOG2(g*!BDmC7X-p4^0l%I~9i(|;R8>-_~FP|tK!(buTIt_}?^G#h`*os}6$ zz|k`+#d@9#IhG=1)!$8ynO;DQ4z3pCi4!N%Q%^m`THF$xQg8zSmcTgW2o@3{%`9T> zd{(#@1zyIVa>Oi%9QC*q_#24X^C7kR79;ZXyv{SdGaVg)C-y?%H2>MR_ub$yLkp8W zDxQ`jRXSu7M}Wspl#x31cpy?LAodW;fuHK(>T2*FREdi?AM+v~@tJ3SXnL}SjMQOy z!-(?+*nAHWNgmlof+iwjWJAv27|_%+%WAk_*OzhR)~s$%YuBy|;=F0&M&ikBPHWoR zWBeR{Hyim_WL3lI_4c8F^J2IV>!7dN0IMN-)0UfJ&O&>U%)Y0@;@Hunv~@Ie7-)TT zPrPAy*+|C4WIfrhYybBnLC}h+zB>zdeHv*`C5(y@4}JA_zJ?ve77mr)O79@9(66H! zR*H{qzU8I}34iOYx3UXA7&}EZB8PfM&^GKtail3cip=Q5*vg6!>r~xdxl1a)8=Ri+ zQojIf@NWz9`T92e8?I)Db_h=wD2?=;1yMlQTs#>NU8=12|G6P^nit_seN9bjUd7In zGX@<&SHx!H>eoNopDBEF9Yfp*H|zQ5Xm3hqA!f(l=#LA$HaiU zu7Pkv)r?yuxB-)}swnHW+ipvD-f>6z@sEF;UU=^LB;su)me%qW%j11fBY2%_Ir?~& zFD|Y!u-n$aCsA%}6SK-#SrSy0e^g>wM!Rmh>83P-GKr!#xs!v34q`KRC>>(+bMW9n z2wVqT-3j8eonix>g(ec7EFZcAY`GETH~)r*5nm!w3^fo5D))dWP>OlDkK``i1IH+8 zl()z4W1|Hpwr~M1z~C;gzu~5|a>XiKvMt0`61Qw^3&M^! z3$R%j$4Qt~ER4bz3Up6D^Gy2lKmV^G+O;(|+;~HJ{PD+AdwY9f$i-)z<+(nPdg=OV z0>zoyIt;0qYdPsDZz3_6g>n2~zdjNKEdxR|X#f%knqmm=6^HG}c7g<6WM{2-#WT7F|$!tj_FL_BYy7 zcslp5yo8zhkS(P{K>l?>kw{1AAqXkt6U&*udAtNeaac=83qlz4I4rYe>ZNh zwh_Vlum0+bekO>aGi2;dEIdA=h&j#g{Wke!9+grSqMg@zE&R1smmoez?f7iBsvBXP!>~^}qfX6mO7ZQNLo>uVQ5V(8CXh&1L?s zqcwz0=0U`-+GN_!%)td>$AUq1zAj+SGkj#dzW4#9j|4$;Yy><9+MUNRa(d4)3|o6fpayf@8oFe2D$g1!$SiiHbtt@2!W}P z+!Z=z8Bdqrz4&|DbBl>0vi|zM=li+|tH*U%(-NbBW4{M?q{%^D6VDN?`S3H~|$n zW=e8qnFt0APR`ACZQq8S(%URhjDF$H7A)c;cg8MQ%{;j?wI%aKK2y1$SAYEY9Xe-vcxvIh46!}4b@O8G$_2()M7S9z+ zS<$rkW4YGv8Q{Km_a^b(_x}2?;5H9QV^e#3d;0y~|NZo(FMSE3nQ_a$80~>dojiUl zw0`@EjItChm+-4gJ8XM6o_Ax&zbMR#i|U?g#kITo$PhFK%0Uj_YaEX1%|Q;Wx*`iZ zQEtdGASv^Na@MR}o4$pURE-H=d+oIVu=58v_~r0*#hvx372A`_+A{B!z4boNBE?st zP1VeSbJJoxj;}`X0(t!qA2s{>&W@t;kD+KcPGTW&p*{_?<_qy6Aj{}gQBSfRoPH;D zO;yvt85m(*nx{e(XfZj4q9f`+T;#Fj~pV<9fALL|xPDD_b3;5&cx9gK4a z(|`Jp|1p$A{)=mxF@E&1N7E8SW|<)FgBi|Je37r8{C@r|SCS`T$)s(ap@k6q<&P%6 zwYigkdFJMF$*=|Mh;k%&K9lHpU(u~f7vA@V_q{N0zWKBrS=%%X$d;hnSBaZM%J)3i zG>?Sr2>vo(ef5>}6tU!F@vjzn|Pt=TSC%83yx8Jz z7q=~O5sD;Ep`7pvhvJ@_wYC`+!_e|Z;a$YSou4U!xL|>~E77nkuKSa4EtjSkO7~#h zh;fe|Jq#SxakkwWprQ_h6R>Q>vh)xC^dANT|DXTke@buQk!3vnhw%3Pg)e>)E?^0g zk^*PPD=}}e0I&Th&IV5xKcjQNaXHMRk#wCJXO4lXup}~~1Wf9Tx$rE5Q!``)DK-T5 z-&^_nmO;wvisIr~wuu#w8RbBg{8wInIlb`03+WxC5C`@jU_8eoQn*nkzWwcQ$41a& zn!I>7Z~hoYZ2UHJ^Xf}4gz3eC1q<2awZM7foR~Ez-VK80`tSt{Fxig$I&NIsKOY@} zRv07^jB$Jf0<~|>^!9|NnhrA+5yj3X#_-akYV5VgKJz#`q~Y}c{`-HQwr}4NfS~d9 zFyWzlP7mX*32y`KaBE?FeHn9=Bk9YX&tjxL+q-m68=h%wVa~5r} zGl=t9h}p5X`of{J9FrPo|M;K$leBi-T6XkHV(u;FVgxf)wn#!TkBd<4EhDABz&XP4 zBS%Bzqw`@uZ773eqDEa@x%-ruF}9ZmrK!q3!B;f|9IL5CwHt(Y9)*e1$Bu?-dL8~t zGk=S4Bq%$mmTw3dl`Vew!yl%%wEaWk(%aJ;F3MKpi_zF;0eX%n45_WP+B=1V;$xbTQ;LV@I?CD7e3E9+%h(9I(fI987&%viyVTh+m3UAU;pwK!8j}> zpC%4%h=u(q4$VF7OK_vxlarOV@`L&Q=nyo<$H{Z>jj(DI+Zqm5hlU4`+n>cd`svsq zYB*^ewNSNYrzbq*nnw{sEp`-BgRR=@lb`#1u=)SrfA|mS&9~l+$sfReZJ0oTeVo}W zI8u`qUtbsA;pZ-foT}qmT?K5(d9mTs`-z@FGQMPLvDuawlfqUiTZ9E@j4%Ea7SR+& zc@I6fLO_=@lb9WSo#^JA6}od=yLMgr#y7tiPHkgXTWHoa#89TG9>BYsRP=M9t24d* z#%s7i+7;&&iW(Q81sieB5H-5ViE}eJcsEazV^OBu-`<)&0&XVjQ2#c<{2)PgFLWZ> zVq*|d=D9B-MqeR_ReO7T5H-yvzW@F2r#*Z2q<{DS{JXS&|NbzyxcTN=qMQsAV+)oc zb-`0PTOnO)?(=-t^1HZAab0L`PUyzp0nACP%Dju*k8#T~OyV-AnYls<=NO7@ zD!8e6xohXnAZBV~>SoNdHAR)HSFcH5{K6N|9r$u6&8Zltywb7*KSu2+f%NyL-8;6Y zpa0}X5XR%7A+E1S&o~X8)QGZ`(I&#nelDW4$sdyRks@f0nE>MM4K^)JAkvsf&A}4u z%dry;BUP{k2p5;ku(lKK1V^ycUq{TOm21{A89Ls~X29}Vu{V0+$Q#R zyY%TLe&^6{HrrLmPMQ9Tds8>f5#yNUDz2}PH*OWKXKHVuiYxvq*WaiY6=%Hg=r!f| zTe$DRfZ9HI2>+F@ekFbSkw*ea79)+>+Tel>uAQ>a10S!w{6hF@tYI-OT-1iDvjlBX zs5^UZmhC*V@F#PvW48J~Y0uOOHCCKvG-@O3d1DkQ!Sa=>!+n@O3!P@qa5in+l)lLZ zMe+X+fA9lDV*Ato`0jTxZr+=|jFQ3w4?c(n-rA77#75T5XaNa3uFg*o2@3>Vz0T(> z|2E%;CC#Nx;w5R9AFoY$Kf4g+dgd=a8|dp#dl@G;0=g4Bi3t7V$x~SjNL1~$q11TY z{2GjFVEskL^iEu0snB7P&}H}`e&XYHDDB<7Cq4D^pQjU;Jhi|aNXX)|a6u@j7}21b z`3*%NB^a4FC&oWYDYQ8&@!J3WND?%l8YGqZfD@3N@DM}x)NqigCQ&aMMV`3~#^kGk z6d~H#H9GREBx>voClh73V=-2fJNAtmH>MvU3XxqOg&=x%GjbenBX9~`M@KLou1Sj^ zYz<4`7N^Ahlf_;3MULDr`^7RZ6a&g}O)YUe=e#~Xu5QHeWCxihclnnZB@krB!h);P&eY~WBt1I5lGeX{Wt&S{|*sk9tKI+ zjs-v^xr1oerQJVtCJ&{nl>KO`joW6vb!a#;Nvw7{>D> zN6@kfXL-n_;TNr%iSgPUgEIVv2E-i$eIff*ab!qhhP{Wu{z4gm9itPofXT{4N>dZ! zg1PzDTf%1T=9_N{&)?5J`)tUdWy^XVLSNcgY(&R5gl`;$LT?d|Qc z*o$|SGYwKk$XL1%we#mXFr0oiZAF|cf1r4A@sgzxv@?`I2sa|44PT9MZbl8j)o;_4 zjez2)I_#-hT67@I28A=rLkIS;Zy8G~+t*>N&Y4=UAlasLN8VMCrgRq?NQ26&WIJat6f>g2HrDdlV#=SZ*k373hq^ihpTWb zDRI<@Ta$q{>XMkvR~!#9tJZ7)qxlc(7=I)QTF?b%z#T$_Rp44%p=#U{)k@Tm@s7Yg zEAo(-X;3(Vos9P+Os8SNT@dpLEg=#srZX-U53N7T!r65F^&$57^{;+KJf)X0pzOdY zCx+c<5q1(&O^)rtk&(2B^R+f?#AzlFS<7?eKC z@0-U|`aHXP@!w13yVUc`skbz*ttq9#yrCFMryDOIBe?xxYQrbp?|l7h zoV9#A#8avwyAYu{v!*}}T|?lvW6i&0bdI*&kIYA8Bu1;2eb z>j;SKJZEtOIW{)J`Wphd9!WcPqDc&bT?bcIsn`lbONmhhiX!@MG)~us^&8S(63_5g zPyPSxo!fI&SDxP&2uVmth`RuT0UK=S!Ev?QUEO25-FDARI-N{S)l65MnpCCAQ(bw< ze~_6xA~3ef-zuo5e6gB1%&R1OFp09I!ih_ zEv^>2Yp=8R`mOKp{`*@%?HJXrEj)Q7%m6RH{4&7ya(e9X$3l~$dbs8_ zf7AXvEMIQlu|2)|>Z<`#&47w)QP_t8H;d3}k1VeDQkTNmg^Po+<_SzP@~%&=fj%Pz z0pE*eYDZez$WBDsw1>yMX9ZrWDemvwK;rFXV(Y=F;=ho&36;TN1C`M4vVu#ezPfb8snae~FoO0H}D zvdG5%>L0zD{smR0UBJTQ(B}*h-fSyeyM(Ge_5FRnqFCp90dA@nH^9kmfRkOjp3E^Y zEh|;!VF_HTCY>bsc{a2%yc9vK^%Z+ND9;eCOs%x9zPw`zZ$DEwGnAai2k zO{8<$jU&lMuENuJc!aDR*>+^T!+rbqrPF9xCdhuTYr6L_RxoVlO4s>@GaXsP@ACSV z>B#qjl>zI%%zk7x9D%IV>eIJCCSMw`&fCvPZ}r-1|A#rgBK`QsKc?!$GmH~s&EhmO zP6AVnjJfXk=j5)WZroa8GNlnsFskHr+1GZaLtf4Rll!&MhIdf|nuA#$df7X{o!pAW zdbTOGg-0Y0LyoR2UOJ1IVf`x3BAzFJ#gi>hs74ry`0H^G-??jNBvjqAXHTd-3>nbP zSPp3l31ghVX0!S?MVy$f@Judd&cp?r5hyrmQBgC`Ph)4}dVWaNb`u6J%&-Hdz8?_rM!8P7{uJb&p@!aWq z4PzjoY0ESo=}-R6pM()xH{luLn{H&zH$_FGisj4Gr_wK9|GRYL(@(-_$gB$+kjex! z303J_19xL;tJ}TToE@kR&KiL{mO1!WO^A%9Nqu|IoW(3K5eb2~nTFh*4Q2A)CG=s03bKQrQI#6tYo*Nl(#X5UZF1L{h12iq4$gI~TyT?peOq&$M!R)Q z&v{B5oa3Z94&AV^hI4}eId<(bIT=p@IPXuS-j9*W&Y%t^_*>w5<&{^`zxxmWKI~=N zwrz`Na^wEZ{w5?dewfeH3?Og+;*H3Vz6=gR2N0#Q^&2*Z-OQYxIxgsqmxDtykxnld zvzx9y-Q@_B#WSQQo`cQ=%K$JPMlSUCNM|p6?}d=mRNH4jipFS~ z`|;npb;f5JOr7fi;s`=)W*30gL2!I4fVK?PaFzmR`Z(RU1@dNb+R$z?cxBS$=QK0$ z*UfZ+NF6%+8kZ4ZGjxduI1H$bg}zv$b1U4Q#MrpmwuH8Q#||=FZy|5s)9JYvo(p~d z-%=7oRmd>OgRj0zb`Df$-Fq?Ok}_)Tu9h@`1b3EPgqA<)-b+DGT>lMhzTu{Xb4zf# z;nfT7i|d3yjGm_k6Tm)r;xh4lQ3{Aqh`EzdQT|K55gT9YrF0 zk91bO#OAcKQ06$Z4I!1Sf#Y0>J9AMp>N+08UAKtb;cctaZ!rQoYsErL@c%UyQ(P1f z-Yu81EL|e+xOK~xU}*HBG)w&S{DQnY+xc(P_eHyRmdI4E?=*cIa z3`Y`e1pR&eY3vf=0zZe5 z8>#j3RbE;5n%%^->MzbI-_@UAd+%=6_G<_Dc8=?2T~k{n!nwpz%@WyY?=^~VLNFhU z2cYRp^9{=U{Kr52)AXU>=Ll7+9KnPyxJwU(iFhwj4G4O6MAaD;fzL8P^-SEzK2 zXWg`Ge$0g$t+Ak*FhRBdH%UINjm@}3GXvSi6(qh%B=ceD@7%d_5uAMppgcs7u}P56 zb4dHm?V@n_h|{_=nSWqSRM*NHpp4SWBO2&DdJBp%)ieW~&- zU^aaR@8|F5-vv~+AHXQ+yrZvxjG!LCx#|R}d8KPXkZbDuo!eSCe;+yt?WRK;GZeyd z4pkwjrEMAf%E61yoJ&6VZ5$!HjVAe#q40dD`}FaA6v zwjs(#^nN%_-o>TFtr45Eu?aW-RsyxDs*Fe91nSAL{^e;s`6AobLGVaxH8^Oz#r%A} zmBc4#1`yrt?dW|F2Y*-{w2_UTyGq>NQygij?;-yZTETvq%p_5 zX$jfl1u_>!8A^}w8lc>Y?)jEp*@2hiI#k0el&F|1@r3x}A_^Osx;JeM&(uB4o$uk! z|H;9RNiM!Wy^pHw^y$+Po?|wcAta5H*pJ?M=bex=bd1o!OXn_~jJ9vz&OF>236YiD zwZVCh>r3?dD_F81wLekjvydaE9Q=Nz9*j zaph*-6SFpcaVj#9=>G3Qzkc`fsPw$7L&CK+|J1`rSda?4=U& z^cEfTAg}U1f1>_h8sqm2ox+IUi<3-0pv`Z^Ya6bwGwt~YJ0erTcYpIY)3anO*s^uY zbgOssMsE8U9$EX|`el^4@cFGnD$@mAmwXp0<7c#&EoZ@!I6Wt9D~o|T?`BXI=rw|x zne?w;v&L9EfTklB&jVL*Gq|58ltyQfFHW6K8_CN5#1l^skKZy`kI zu^rYI+l(ET;+fcsR&?SOjU+D!%CI_bVyjK}05%SU#m{5X2mSIFnh7Ou`B?2k^xNyo zwb=udbG??A64xYE2uuK>2erq>B2#`(I@!~c`e3k?yp*7hA3s4z!|`CoFH^u+2RHkv zdiL?1k4Kox4}bV0=6+Y)xAdWLpZf#jiB{@UBwDM9uj;CKQQ{RY$g6U6&2!ur!}hfX zG}q&$h(?47#zRwDFq6NYSED1UAIV2Y0l$lIe7b-ejJ=wL(wacw70DBJ8kdLdq_Wif zAN}MfWDlQ6|M#!|It`v22dpTqv8p94UrlNGwap>5m^fHK>pzBF4He|2FI&kMiEV8& zYS31}!pWbfP;S5WVVYnXrdN@!8ow`>(VmAq|yn#%12{~FC;BLl?F>_c^nqsO%=RI(kb(#}0w(i5nNtp2Nke-}0moiYsK zoXbu*)3+PHlMshcw;b62ZaPTj53|Dhhkm$Kb~I zslJ*25eVlG)a+_Sq@gB&t(mx|D5MF1xN!F&agUSAt{cGFOb$ZT#yRmg%T-LfJol^% zW^9$QgpaY1f9u=N;@x^CYOa|Q>d>c$(lJzyR;~}VCSakBXbAOM54JbkvL#*7E$q4# zaML!v5h-x%)~$Trn%&lEUumTkAl0rH>jWUH=Egn!L> zMgtiR_Qsc4=~vEQPe#34U9@h8$Y{!-anxz!qvqZ8;IY#*QhR*I4E>_9okx{saM4m}gF(Mw&U9j(zr7 zIFg*jo@V;# z21MttbS21W=hAks+V#bkzMpmjXl4fy=x%4TA8uzEpV$7^|4g5L{88jI-LwTz-?lw+ zZ(7Y_R-$*?t}PX}bB_mcP6cRnqh1IiBJf!_jhRsmpcmE{wzW|vrPZySv`+GF)D# zZ$MxdgD`a-=~z!m_AOLU-rAZv@rGPx1$gJF_)`aHHGwM6u^K=Xm51L6WTumfI5_fA zTKlZIJp+_gsG9(}Wq?x?QkW_sJ7BG}E0fpLz>%qRhBQ>87x1&^#uol;TiKFUtzVfQ z-}P8}?xk<1C!Tsd`eoLF6%=UO)P?G1^$r#VY?HQS&*DGs^ajC#riM(+8Y^1AolW3! zk^A_=1L@#__ake*dj3dbhHJ6X*($hf9d$y*2zM#hKdoX68vvcDwg)-_g?W_L{HKJV z-BV^#8_*?G{es#E4q6vUeP#M9wH>$_XHd7?|t>S)xpfodFq_EAD%CBY^ac6>Kyeu8MTn^crPgF znC6WIS(&m`WblwVl#zb^h3C`vU;KWA5UuQ584?tY#Jz6kk9`{e?3dBkU$XZ6;*G!K z_o1>xjk`%H6+m0xy@~s+38t_GwXo7=+~f9db30v`_jA6sD#y7-G8{yB$gyWq$=S<& z>8h_ofw{~&05`?|ak!~3Pk%x6-O03h>!UR`08|xQ0lEumXb>lef&Tt<!{v znRgp06uR+|Ey2W?8`88)GE7on+=Ka_?cKzF1}HR>&Zz@X+!9(C=|tPp1OTRo-yqV` z=`X%Whf!6UOxdc={iKZ=97Gj})MQ_^ZH*yK>h5j@0U2MlD;GwFQqz|(hD(_L-j{y( zM*0z2%N)%68X~as*PHP~hYQ!3&Q5JsdRIFRO{hH|P2c&>cklv!Drirev+c z(}yRZI_6W{_g7T0esBN2p#Qb&VG6M!+P)?gRjD!&_Ssb+_m;8c-$LFhKx=q9WuiE# ze3-t}^$TpO8C)>iJI14YgQ@~!Y4qG`!eh#OXVVXy=e~`s{x81xVmf~2C>+=M={tFj zdhX4SY{v6yXQY|Y`|7pVUQ7GmeHXjk5Y?uUNX8~{^`A{$eCw-KD( zOp2{$t4g=nM`=ub__a_8ws)C`bY)uGNoI^EA4zLp*qE9pJJT?!r9L}!62tk21f`xx z7vUgFl2SXTbmo-xe@K6UXG_w~ox36h)!wJGd%fe3iL*ArYja$pztgQ;Q^vhq?{PU< zy{``A$@badL&4Rqz%fUgi9n`A=KIc7D9UWcU-7+(Pe6#DBm3--qvab4P{xTn8QwV_TZTd>uwC-C1~i)+y|2q>9JTTMLJ z44v7(uQHeYO-jSAJ&vRH=luZM_{3;1rUF_xQqhM_q&}I7@Q!3{^O<~=d)xQ>wN-#N z$4Hll=ib*+a?J^FK46pBMvh1pPi-`#=)hfgjjlJ1lIOX3om|&TFaIEoropuT{A+1+ z$@$0@(z$#kX|(nRsJZwW%jRU-y=QlN^OtX?H&6o{WD%Em!e#d&Dhx`gEI9*!DN!v~ z%A-%J3rV%L6mA!$G2G?9_54%mhtL0Z+PZm1TGP>;R;?hIxD9(m3pO57W|Nm&&@rXv9LnOq>?x|g#^n9@v@Phmu& zFSqW_t-4!%e-B1r);v*|$%+7;UCrwliA`E74qBG=X+vu0BvG#5GI9yW4ji>E^bMqQ zXL_N5gCU`JZ-RMSm%Uu|I(A;kh~|(O_GfY|tbV+hS74+0<|9 z*?R|1r4M?3k&d7LG`;n+_fyBzrt~j=@~^@xORul8xzL-N?#PkD>BA2`Ob4(P9YoEd zJFO~CRUXlYP*!*qfCk}dz_vob-n~zyAN}q>OwYggd|KVL7RfQ|=S%x#FQ=xhzff@Be1BQI(RfdC@W zP22$9MtAgUB>o(o7)l= zt$0Q0iK%2~HVh@ZO{gz*Gv2;~lw4aMiQ_Q^Gt1X_i&!wG5As{w$|6$q{!U&AUcf8> zQTF+YoBU;r`T-~H9+vWPyi^qi*Nf}3AZJ_N)3H0(i(&rwu}ER0Jwv0lhH!z;s=0cV zbuzY{NMvS=XltjkG+-Oq3sB!g25|a%+x@#q*Q>7{+z141d3)xC0mVTJz?7FAMFTr9AO9rGr>zn)fn5=fLa$-sZ9&FoH)S@W?3dH z<5cz!Zid|Y&K02DdW_~gBZv*cGxkJXyDCSetq%0h%}5OO;<=osL~5=y<4w$VAZHZb zS#!2Nb5P)vKn;FWKd;A^L6`*`G~lYA&-!**sroz5v{T;M@4KCtaV@_rBq|mbJGN9@7>yfOheC&sY(-P*(3#k8}Ms+Tos-70q9)``a(2G-B{t`?Oj#b~;bGYJv zLde4BL1SuQq{G$PoCzE^t(DGH>yTo*u;s19OH=RQ$fSd}m~>UP#Y9^bpp~a~H+u;0 z8nz%MCM7i0AxchW6w%h!I1yaTB~B%u#_#Ut_IG>@_m*p$g|>Ilhu#xNumK-Z++B{gK#UCjIPQm9^nD#gSu- z_v5(6E6GhF$@TU1hZOSccfN~yZDY)*DDJxmM@}is-y6RhhZNTz3mr>~g9Cloolj5{ z^)SrlVA#o);W4RZLk*I)MZ*lb05d-eYTBWtF?|3mx5Z$``s5lFOzbU~n(hj*E1~{CU8c&>tnR-SpVn|MuT?i4n_yAi*52IFQZ0Ct3wR%1inJL7F89jI zO({w{LwEtkk@`P2Qrn_ErLkJGyfI#=*Af%&QAPNZkBJo`GdDn^+w^LjH8$hvX$ZqA z-0ADf^MnB(RpQ_vB}2Hz#l~$p)y4G;)fT2ovZNadPf~!)PzlSy>ym824H3H<6Uhw2I)TR&Zhz!?n zJpFVm+N!lR8Mv9XBOD>vt5sMCo4;T6yJ}YZDkH32Ou%WzHl_sHckX-|=sTAN@whZg zN)KTz(wA=c(xw5}S=&J&P{7QWjxO#yJJ|zfI#l_aO%kEj!%Z-&XSx>)uvLII4>Q+? ztr}49h^T)!CDY=VDl42H=HPI=CWGPCbPm;<5{qKL1t>ER>RCkR0FwHNHD`A@$F|Bw z(@2ct!Kr;tu0tgu?GJj9ty#M^f+yFZJ9lF1#1f7v4Sk$$jW<&ey`W@#j^$ZS;VE^E z1;hlhp$>#-RNv^Bk%^;)@;P~7De50%$#iPbT{%FI`^psxUU`ov8=u<kQv5?QTH zgF3Z`=&3cGF}wn^^0*(W{f!n*kC(0fz0+PNrYyPPq02v+%PAxm!EKVbvC9)MbZAdh z_iJ}sQ&YFKuOx0!+b(k})R|nTT3=6E=i5orV|mR6>`Mk6OUvd{6fk7qR9l)bK!!{p zuwQCuBL0o{?q$C0n=ik&Pns$0x94GBgO`tq}qneO-1(B^6IgP>v&Cqn?XC7m6dG#Xs>fMhnzx>KIqSJDX*shTO?p$1O_xk{k0FkQ!Y*V=DUnUEPIX^EB;O>uCuaeFbsygXQrfL=HH>IGqP8ND>xGil+ zY|K;DlFnh{8@Q?5;&)&D?xRf;AU3^!B++mH!QGA(7PP=!G7%15d%lun?O89|E0lR=v#UZ#}?Xa zhOT5x1u&UHY0CGems6eS?~_j5XL^H)g%17gf~Xp$Vg`}U%6Jw4Yy4cE!9v_ufHqH0 z#fL5-Iaw#+2uGQGsy!r%E=riIS_{rC?gv)}w`JCgzkmD9$QbXU?*z2|&{|8Lpte&b zutME;Oh%m{@+1tdkNaJPAeEj3nE}*!CSbipK0IwzN>rxp5j<2e>VofkB_y#mjO?gN zO9~>;D1!$5ixgmRl5(Gs>rP;zE!7edOk{DOPpUG^nJDq(dP{JTUMD9Ri%8b(Q~+;`I^(RsdfyF0a9Blwb`8OA;AC`R=RHy`~W=* z06ULWi7VMzC0o^}z@QeS1SCUUu^vbZN{yTpA$QL0Yge!5^j^@Z8B4BkP6THIwEd_N zO+*;wm`3PbiBEd-xLPCoZU3_L zm}#tl*&?0}(_OCsZF(ra`Wp#Lm4cvl84gYLqY{o_;ldqWRT#GfAf>3D6UV~y@j7n( zdhjXf2s}zCnK*NtnIIG%Of^nQY85#2^3n#N%^*A=AqMZ?3^)Y03Bfs`b*vC{oaCxR zO1`=1r)>lPq4o$vGj<3hO+m=tV?R{MnAzOCb9y00d-OMgn$6AuL22LecV24=k((A)gMdM!q;#Fn7WXOHtNj0g*P&^B0Y{+ zNdqm)(9nf6NO~*d*aSPZD(&{ylF(ol2HD+!w zi>d}K|6)cG=eHT_OYTRt7v6ITbX7DF*H|;8M`>GtQFF44Dg@fPy*`*gk|TJJR_IWA zYgrCGa$ZY|_A-m3X4(|wWImfcmzQ?u9H_2DX8(}|p)v2{_At>~1!(a|=kHGeHPsY) z_Kg8*mW0q@L?W5Q^GbJj_24EbjmZnx2o3_8oRpIf6bKcfF1bp2CP_B9RJCGooT&C@ zz#`|Zi%AS{3NS_Opmes}O2ABb0aXHG1_xk8_U{@YNlf zxxpYDxWCNt0ZJ|+TocB6xlstd9vFjzg@`QKBdCTdmCx?o9wx$?zZq4vG~Xj|;le;9 zDj2&&B@Hww7SB{YZeB)hZqix2VDMz_)KmoXrz8b^2u#wRbRx4SqZad?`5w{~=cwgS zniA-;MkNP|d!97E6kt(W%Gpse;AO`rzy}7lnQP7GD06G_@L&RQj#A2!DK(?NjBcBt z1<)C`BY5f95gArEFDD&`zMwVl$D0gj(shmA>`y)OSpR;$$Nhe^R{`2R80DDU5e%9^ zO)!g~Pl!iJN)An6VuodDDmW`79;BY-I7K)iWz{4O_3Kg2Mk%T$NfpphO=%lCOGp@z za}oj)xCEXIIsh3HJ)}H{Ltab^MyVnt%~H}J>SA;?T{S)EdouvA?@ zaM@u)kgCP4a83NHfmS&pED?L>`<#9t`jY?Lg9lMHS&hIQj=<*xAe$n}>KewD zstLIkOrF37vnQj+7M_;O>umqZ$7J558Mgom=lHdnO-dgxfy_OJrJK-vNViIA?4^(E zoDhH&YEHlnMl$G3V4GIW*HZ8aeA2s;BkuMBXte9OYCQtAyq`3j>$2Gg@8ef)^?+9Y zK7YIT{#Jl?kDi&6qqq}aQ7s6F#Vfc#JpLdQCsn8`ka8?4nr&RUfS}*a1d_rb7Rd}L zO3tuIXLdhxU=qa)R~ISmEBOhxBR4sAqjzN?Xt zW+8^!uVD|UX|ML!66jME;xa(Rf*GErDpGrvvVo>sO z^1I1ZIU%!3!HH~~6=1=4;jjVN=s^|4BDWZ-yrRy_ssjlj04In8A_xM2Aej8w_^T9` z0myqYoioTdziX2`z%Ax`Kv@Q)Oov{=7P49<5(yw)?(Au`RU=T1z-jW3s;bm}3}Jy~ zf}12Js411Hev~HFsHm-wNz8O9O)6apdXdU4=qjrN1Ejc4rnCurn4hCzhn(}g&P^Jg zMd!DmP0x2LH09zV1U6xjo^aBxaOWisUn$fPb{6qD8IS_2*WSK255 z7ZMMXuQng0875-ZWyBnIlJknmS@v)J>}>-l}{xJT^ItjT_ZrfQ8;yJ`fg z5oj2Jp@H+zrCLbooi{R!{sXL(Ya6|OB?0iLoi$E$rc0@VmC z%n{J7Uz1y`33O4ZtE>;X$F3C_8iD&h0u`X$_aU!tSdBn60@VmqBXBQ9paQgeF{)LY)d*B0 zP>nz}0{49cDnPsMLtfpm8i8sAsu8G0;9iVC1!(tTRI4_t5vWF>8i8sA?)wN-fOg-9 zyt-jE0@VmqBT$XNy%>QC(C)>kR&7=zP>nz}0@VoI_YtT7?Y<9rb;D`|su8G0pc;XD vF#;8!-HTDJ+N?&P8i8sAsu8&FBk=zLA4#a6%&a#k00000NkvXXu0mjfcYDlp literal 0 HcmV?d00001 diff --git a/oscar/__init__.py b/oscar/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/oscar/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/oscar/modeling/__init__.py b/oscar/modeling/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/oscar/modeling/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/oscar/modeling/modeling_bert.py b/oscar/modeling/modeling_bert.py new file mode 100644 index 0000000..aee8cb3 --- /dev/null +++ b/oscar/modeling/modeling_bert.py @@ -0,0 +1,711 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function, unicode_literals +import logging +import math +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, MSELoss +from transformers.pytorch_transformers.modeling_bert import (BertEmbeddings, + BertSelfAttention, BertAttention, BertEncoder, BertLayer, + BertSelfOutput, BertIntermediate, BertOutput, + BertPooler, BertLayerNorm, BertPreTrainedModel, + BertPredictionHeadTransform) +from .modeling_utils import CaptionPreTrainedModel +from ..utils.cbs import ConstrainedBeamSearch, select_best_beam_with_constraints + +logger = logging.getLogger(__name__) + + +class CaptionBertSelfAttention(BertSelfAttention): + """ + Modified from BertSelfAttention to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertSelfAttention, self).__init__(config) + + def forward(self, hidden_states, attention_mask, head_mask=None, + history_state=None): + if history_state is not None: + x_states = torch.cat([history_state, hidden_states], dim=1) + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(x_states) + mixed_value_layer = self.value(x_states) + else: + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) + return outputs + + +class CaptionBertAttention(BertAttention): + """ + Modified from BertAttention to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertAttention, self).__init__(config) + self.self = CaptionBertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask, head_mask=None, + history_state=None): + self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state) + attention_output = self.output(self_outputs[0], input_tensor) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class CaptionBertEncoder(BertEncoder): + """ + Modified from BertEncoder to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertEncoder, self).__init__(config) + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask, head_mask=None, + encoder_history_states=None): + all_hidden_states = () + all_attentions = () + for i, layer_module in enumerate(self.layer): + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + history_state = None if encoder_history_states is None else encoder_history_states[i] + layer_outputs = layer_module( + hidden_states, attention_mask, head_mask[i], + history_state) + hidden_states = layer_outputs[0] + + if self.output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states,) + if self.output_attentions: + outputs = outputs + (all_attentions,) + return outputs # outputs, (hidden states), (attentions) + + +class CaptionBertLayer(BertLayer): + """ + Modified from BertLayer to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertLayer, self).__init__(config) + self.attention = CaptionBertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, head_mask=None, + history_state=None): + attention_outputs = self.attention(hidden_states, attention_mask, + head_mask, history_state) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + +class BertImgModel(BertPreTrainedModel): + """ Expand from BertModel to handle image region features as input + """ + def __init__(self, config): + super(BertImgModel, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.encoder = CaptionBertEncoder(config) + self.pooler = BertPooler(config) + + self.img_dim = config.img_feature_dim + logger.info('BertImgModel Image Dimension: {}'.format(self.img_dim)) + self.img_feature_type = config.img_feature_type + if hasattr(config, 'use_img_layernorm'): + self.use_img_layernorm = config.use_img_layernorm + else: + self.use_img_layernorm = None + + if config.img_feature_type == 'dis_code': + self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0) + self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True) + elif config.img_feature_type == 'dis_code_t': # transpose + self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0) + self.img_embedding = nn.Linear(config.code_size, self.config.hidden_size, bias=True) + elif config.img_feature_type == 'dis_code_scale': # scaled + self.input_embeddings = nn.Linear(config.code_dim, config.code_size, bias=True) + self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0) + self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True) + else: + self.img_embedding = nn.Linear(self.img_dim, self.config.hidden_size, bias=True) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if self.use_img_layernorm: + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.img_layer_norm_eps) + + self.apply(self.init_weights) + + def _resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.embeddings.word_embeddings + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.embeddings.word_embeddings = new_embeddings + return self.embeddings.word_embeddings + + def _prune_heads(self, heads_to_prune): + """ Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, + position_ids=None, head_mask=None, img_feats=None, + encoder_history_states=None): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + if attention_mask.dim() == 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + elif attention_mask.dim() == 3: + extended_attention_mask = attention_mask.unsqueeze(1) + else: + raise NotImplementedError + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + # switch to float if needed + fp16 compatibility + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility + else: + head_mask = [None] * self.config.num_hidden_layers + + embedding_output = self.embeddings(input_ids, position_ids=position_ids, + token_type_ids=token_type_ids) + if encoder_history_states: + assert img_feats is None, "Cannot take image features while using encoder history states" + + if img_feats is not None: + if self.img_feature_type == 'dis_code': + code_emb = self.code_embeddings(img_feats) + img_embedding_output = self.img_embedding(code_emb) + elif self.img_feature_type == 'dis_code_t': # transpose + code_emb = self.code_embeddings(img_feats) + code_emb = code_emb.permute(0, 2, 1) + img_embedding_output = self.img_embedding(code_emb) + elif self.img_feature_type == 'dis_code_scale': # left scaled + code_emb = self.code_embeddings(img_feats) + img_embedding_output = self.img_embedding(code_emb) + else: + img_embedding_output = self.img_embedding(img_feats) + if self.use_img_layernorm: + img_embedding_output = self.LayerNorm(img_embedding_output) + + # add dropout on image embedding + img_embedding_output = self.dropout(img_embedding_output) + + # concatenate two embeddings + embedding_output = torch.cat((embedding_output, img_embedding_output), 1) + + encoder_outputs = self.encoder(embedding_output, + extended_attention_mask, head_mask=head_mask, + encoder_history_states=encoder_history_states) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) + + # add hidden_states and attentions if they are here + outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] + return outputs + + +def instance_bce_with_logits(logits, labels, reduction='mean'): + assert logits.dim() == 2 + loss = F.binary_cross_entropy_with_logits(logits, labels, reduction=reduction) + if reduction == 'mean': + loss *= labels.size(1) + return loss + + +class ImageBertForSequenceClassification(BertPreTrainedModel): + """ + Modified from BertForSequenceClassification to support oscar training. + """ + def __init__(self, config): + super(ImageBertForSequenceClassification, self).__init__(config) + self.num_labels = config.num_labels + self.loss_type = config.loss_type + self.config = config + if config.img_feature_dim > 0: + self.bert = BertImgModel(config) + else: + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + if hasattr(config, 'classifier'): + if not hasattr(config, 'cls_hidden_scale'): + config.cls_hidden_scale = 2 + + if config.classifier == 'linear': + self.classifier = nn.Linear(config.hidden_size, + self.config.num_labels) + elif config.classifier == 'mlp': + self.classifier = nn.Sequential( + nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale), + nn.ReLU(), + nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels) + ) + else: + self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original + self.apply(self.init_weights) + + def init_code_embedding(self, em): + self.bert.code_embeddings.weight.data = em.clone() + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None, img_feats=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + if labels is not None: + if self.num_labels == 1: # doing regression + loss_fct = MSELoss() + labels = labels.to(torch.float) + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + if self.loss_type == 'kl': + # KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py + loss_fct = torch.nn.KLDivLoss(reduction="batchmean") + log_softmax = torch.nn.LogSoftmax(dim=-1) + reshaped_logits = logits.contiguous().view(-1, 3129) + reshaped_logits = log_softmax(reshaped_logits) + loss = loss_fct(reshaped_logits, labels.contiguous()) + elif self.loss_type == 'bce': # [VQA] + loss = instance_bce_with_logits(logits, labels) + else: # cross_entropy [GQA, Retrieval, Captioning] + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + return outputs + + +class ImageBertForMultipleChoice(BertPreTrainedModel): + """ + Modified from BertForMultipleChoice to support oscar training. + """ + def __init__(self, config): + super(ImageBertForMultipleChoice, self).__init__(config) + self.loss_type = config.loss_type + if config.img_feature_dim > 0: + self.bert = BertImgModel(config) # ImageBERT + else: + self.bert = BertModel(config) # original BERT + + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if hasattr(config, 'classifier'): + if not hasattr(config, 'cls_hidden_scale'): config.cls_hidden_scale = 2 + if config.classifier == 'linear': + self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels) + elif config.classifier == 'mlp': + self.classifier = nn.Sequential( + nn.Linear(config.num_choice*config.hidden_size, config.hidden_size*config.cls_hidden_scale), + nn.ReLU(), + nn.Linear(config.hidden_size*config.cls_hidden_scale, self.config.num_labels) + ) + else: + self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels) # original + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None, img_feats=None): + num_choices = input_ids.shape[1] + + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) + flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + + flat_img_feats = img_feats.view(-1, img_feats.size(-2), img_feats.size(-1)) if img_feats is not None else None + + if isinstance(self.bert, BertImgModel): + outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, head_mask=head_mask, img_feats=flat_img_feats) + else: + outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, head_mask=head_mask) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + + # reshaped_pool_output + reshaped_pool_output = pooled_output.view(-1, self.config.num_choice*(pooled_output.shape[1])) + logits = self.classifier(reshaped_pool_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + if labels is not None: + if self.loss_type == 'bce': + loss = instance_bce_with_logits(logits, labels.view(-1, self.config.num_labels)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits, labels) + outputs = (loss,) + outputs + return outputs + + +class BertForImageCaptioning(CaptionPreTrainedModel): + """ + Bert for Image Captioning. + """ + def __init__(self, config): + super(BertForImageCaptioning, self).__init__(config) + self.config = config + self.bert = BertImgModel(config) + self.transform = BertPredictionHeadTransform(config) + bert_embedding_weight = self.bert.embeddings.word_embeddings.weight + self.decoder = nn.Linear(bert_embedding_weight.size(1), + bert_embedding_weight.size(0), bias=False) + self.loss = nn.CrossEntropyLoss(reduction='mean') + self.drop_worst_ratio = 0.2 + + def forward(self, *args, **kwargs): + is_decode = kwargs.get('is_decode', False) + if is_decode: + return self.generate(*args, **kwargs) + else: + return self.encode_forward(*args, **kwargs) + + def encode_forward(self, input_ids, img_feats, attention_mask, masked_pos, masked_ids=None, + token_type_ids=None, position_ids=None, head_mask=None, + is_training=True, encoder_history_states=None): + outputs = self.bert(input_ids, img_feats=img_feats, attention_mask=attention_mask, + position_ids=position_ids, token_type_ids=token_type_ids, + head_mask=head_mask, + encoder_history_states=encoder_history_states) + sequence_output = outputs[0][:, :masked_pos.shape[-1], :] + + if is_training: + # num_masks_in_batch * hidden_size + sequence_output_masked = sequence_output[masked_pos==1, :] + transformed_output_masked = self.transform(sequence_output_masked) + class_logits = self.decoder(transformed_output_masked) + masked_ids = masked_ids[masked_ids != 0] # remove padding masks + masked_loss = self.loss(class_logits.float(), masked_ids) + outputs = (masked_loss, class_logits,) + outputs[2:] + else: + class_logits = self.decoder(self.transform(sequence_output)) + outputs = (class_logits,) + outputs[2:] + return outputs + + def prepare_inputs_for_generation(self, curr_ids, past=None): + # NOTE: if attention is on, it should be the token used to mask words in training + mask_token_id = self.mask_token_id + batch_size = curr_ids.shape[0] + mask_ids = torch.full( + (batch_size, 1), mask_token_id, dtype=torch.long, device=curr_ids.device + ) + + def _slice(t, start, end): + if t is None: + return t + assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len) + return t[:, start: end] + + def _remove_elements(t, start, end): + if t is None: + return t + assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len) + return torch.cat([t[:, :start], t[:, end:]], dim=1) + + if past is None: + input_ids = torch.cat([curr_ids, mask_ids], dim=1) + + curr_len = input_ids.shape[1] + full_len = self.max_seq_len + self.od_labels_len + self.img_seq_len + assert self.full_attention_mask.shape == (batch_size, + full_len, full_len) + + def _remove_rows_cols(t, row_start, row_end, col_start, col_end): + t00 = t[:, :row_start, :col_start] + t01 = t[:, :row_start, col_end:] + t10 = t[:, row_end:, :col_start] + t11 = t[:, row_end:, col_end:] + res = torch.cat([torch.cat([t00, t01], dim=2), torch.cat([t10, t11], + dim=2)], dim=1) + assert res.shape == (t.shape[0], t.shape[1]-row_end+row_start, + t.shape[2]-col_end+col_start) + return res + + seq_start = curr_len + seq_end = self.max_seq_len + attention_mask = _remove_rows_cols(self.full_attention_mask, seq_start, + seq_end, seq_start, seq_end) + + masked_pos = _remove_elements(self.full_masked_pos, seq_start, seq_end) + token_type_ids = _remove_elements(self.full_token_type_ids, seq_start, seq_end) + position_ids = _remove_elements(self.full_position_ids, seq_start, seq_end) + img_feats = self.img_feats + + if self.add_od_labels: + assert self.od_label_ids.shape[1] == self.od_labels_len + input_ids = torch.cat([input_ids, self.od_label_ids], dim=1) + else: + last_token = curr_ids[:, -1:] + # The representation of last token should be re-computed, because + # it depends on both self-attention context and input tensor + input_ids = torch.cat([last_token, mask_ids], dim=1) + start_pos = curr_ids.shape[1] - 1 + end_pos = start_pos + input_ids.shape[1] + masked_pos = _slice(self.full_masked_pos, start_pos, end_pos) + token_type_ids = _slice(self.full_token_type_ids, start_pos, end_pos) + position_ids = _slice(self.full_position_ids, start_pos, end_pos) + + img_feats = None + assert past[0].shape[0] == batch_size + if self.prev_encoded_layers is None: + assert start_pos == 1 # the first token after BOS + assert past[0].shape[1] == 2 + self.od_labels_len + self.img_seq_len + # reorder to [od_labels, img_feats, sentence] + self.prev_encoded_layers = [ + torch.cat([x[:, 2:, :], x[:, :start_pos,:]], dim=1) + for x in past] + s2s = self.full_attention_mask[:, :self.max_seq_len, + :self.max_seq_len] + s2i = self.full_attention_mask[:, :self.max_seq_len, + self.max_seq_len:] + i2s = self.full_attention_mask[:, self.max_seq_len:, + :self.max_seq_len] + i2i = self.full_attention_mask[:, self.max_seq_len:, + self.max_seq_len:] + self.full_attention_mask = torch.cat( + [torch.cat([i2i, i2s], dim=2), + torch.cat([s2i, s2s], dim=2)], + dim=1) + else: + assert start_pos > 1 + assert past[0].shape[1] == 2 + self.prev_encoded_layers = [torch.cat([x, p[:, :-1, :]], dim=1) + for x, p in zip(self.prev_encoded_layers, past)] + + attention_mask = self.full_attention_mask[:, + self.od_labels_len+self.img_seq_len+start_pos: self.od_labels_len+self.img_seq_len+end_pos, + :self.od_labels_len+self.img_seq_len+end_pos] + + return {'input_ids': input_ids, 'img_feats': img_feats, + 'masked_pos': masked_pos, 'attention_mask': attention_mask, + 'token_type_ids': token_type_ids, 'position_ids': position_ids, + 'is_training': False, + 'encoder_history_states': self.prev_encoded_layers} + + def get_output_embeddings(self): + return self.decoder + + def generate(self, img_feats, attention_mask, masked_pos, token_type_ids=None, + position_ids=None, head_mask=None, input_ids=None, max_length=None, + do_sample=None, num_beams=None, temperature=None, top_k=None, top_p=None, + repetition_penalty=None, bos_token_id=None, pad_token_id=None, + eos_token_ids=None, mask_token_id=None, length_penalty=None, num_return_sequences=None, + num_keep_best=1, is_decode=None, + add_od_labels=False, od_labels_start_posid=None, + use_cbs=False, fsm=None, num_constraints=None, + min_constraints_to_satisfy=None, use_hypo=False, + ): + """ Generates captions given image features + """ + assert is_decode + batch_size = img_feats.shape[0] + self.img_seq_len = img_feats.shape[1] + self.max_seq_len = max_length + self.mask_token_id = mask_token_id + self.prev_encoded_layers = None + # NOTE: num_keep_best is not equavilant to num_return_sequences + # num_keep_best is the number of hypotheses to keep in beam search + # num_return_sequences is the repeating times of input, coupled with + # do_sample=True can generate more than one samples per image + self.num_keep_best = num_keep_best + + vocab_size = self.config.vocab_size + if not use_cbs: + num_fsm_states = 1 + else: + b, num_fsm_states, f1, v = fsm.shape + assert b==batch_size and v==vocab_size and f1==num_fsm_states + + self.add_od_labels = add_od_labels + # avoid position_ids collision of caption and od labels + self.od_labels_start_posid = max(od_labels_start_posid, self.max_seq_len) + if self.add_od_labels: + # get od labels part from input_ids + assert input_ids.shape[0] == batch_size + od_label_ids = input_ids[:, self.max_seq_len:] + self.od_labels_len = input_ids.shape[1] - self.max_seq_len + self.od_label_ids = self._expand_for_beams(od_label_ids, num_beams, + num_fsm_states) + input_ids = None + else: + self.od_labels_len = 0 + self.od_label_ids = None + assert input_ids.shape == (batch_size, self.max_seq_len) + input_ids = None + + if input_ids is None: + input_ids = torch.full( + (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device + ) + else: + assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." + assert input_ids.shape[0] == batch_size, "Input batch size must match image features" + + if position_ids is None: + position_ids = torch.arange(self.max_seq_len, dtype=torch.long, device=input_ids.device) + posids_len = self.max_seq_len + if self.add_od_labels: + od_labels_posids = torch.arange( + self.od_labels_start_posid, + self.od_labels_start_posid + self.od_labels_len, dtype=torch.long, device=input_ids.device) + position_ids = torch.cat([position_ids, od_labels_posids]) + posids_len += self.od_labels_len + position_ids = position_ids.unsqueeze(0).expand([batch_size, posids_len]) + + cur_len = input_ids.shape[1] + assert num_return_sequences == 1, 'not supported num_return_sequences != 1' + effective_batch_size = batch_size + + self.img_feats = self._expand_for_beams(img_feats, num_beams, num_fsm_states) + self.full_attention_mask = self._expand_for_beams(attention_mask, num_beams, num_fsm_states) + self.full_masked_pos = self._expand_for_beams(masked_pos, num_beams, num_fsm_states) + self.full_token_type_ids = self._expand_for_beams(token_type_ids, num_beams, num_fsm_states) + self.full_position_ids = self._expand_for_beams(position_ids, num_beams, num_fsm_states) + self.full_head_mask = self._expand_for_beams(head_mask, num_beams, num_fsm_states) + + if not use_cbs: + if num_beams > 1: + output = self._generate_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + length_penalty, + num_beams, + vocab_size, + ) + else: + output = self._generate_no_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + ) + else: + assert self.num_keep_best == 1, 'not supported n_best > 1 for CBS' + searcher = ConstrainedBeamSearch(eos_token_ids, max_length, + num_beams, use_hypo=use_hypo) + curr_ids, sum_logprobs = searcher.search( + input_ids, + None, + self._decode_step, + fsm, + ) + curr_ids, sum_logprobs = select_best_beam_with_constraints( + curr_ids, + sum_logprobs, + num_constraints, + min_constraints_to_satisfy, + ) + # (batch_size, n_best, max_len), (batch_size, n_best) + output = (curr_ids.unsqueeze(1), sum_logprobs.unsqueeze(1)) + + return output + + def _expand_for_beams(self, x, num_beams, num_fsm_states): + num_expand = num_beams * num_fsm_states + if x is None or num_expand == 1: + return x + + input_shape = list(x.shape) + expanded_shape = input_shape[:1] + [num_expand] + input_shape[1:] + x = x.unsqueeze(1).expand(expanded_shape) + # (batch_size * num_beams, ...) + x = x.contiguous().view([input_shape[0] * num_expand] + input_shape[1:]) + return x + + def _do_output_past(self, outputs): + return len(outputs) > 1 diff --git a/oscar/modeling/modeling_utils.py b/oscar/modeling/modeling_utils.py new file mode 100644 index 0000000..0763a60 --- /dev/null +++ b/oscar/modeling/modeling_utils.py @@ -0,0 +1,671 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import torch +import torch.nn.functional as F + +from transformers.pytorch_transformers.modeling_bert import (BertConfig, + load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP) +from transformers.pytorch_transformers.modeling_utils import PreTrainedModel + + +class CaptionPreTrainedModel(PreTrainedModel): + """ Expand base class for image captioning modeling. + """ + config_class = BertConfig + pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = 'bert' + + def __init__(self, config, *inputs, **kwargs): + super(CaptionPreTrainedModel, self).__init__(config, *inputs, **kwargs) + + def prepare_inputs_for_generation(self, input_ids, **kwargs): + return {"input_ids": input_ids} + + def _do_output_past(self, outputs): + has_output_past = hasattr(self.config, "output_past") and self.config.output_past + has_mem_len = hasattr(self.config, "mem_len") and self.config.mem_len + + if has_output_past and not has_mem_len and len(outputs) > 1: + return True + elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1: + return True + + return False + + def generate( + self, + input_ids=None, + max_length=None, + do_sample=None, + num_beams=None, + temperature=None, + top_k=None, + top_p=None, + repetition_penalty=None, + bos_token_id=None, + pad_token_id=None, + eos_token_ids=None, + length_penalty=None, + num_return_sequences=None, + ): + r""" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling + and beam-search. + + Adapted in part from `Facebook's XLM beam search code`_. + + .. _`Facebook's XLM beam search code`: + https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529 + + + Parameters: + + input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)` + The sequence used as a prompt for the generation. If `None` the method initializes + it as an empty `torch.LongTensor` of shape `(1,)`. + + max_length: (`optional`) int + The max length of the sequence to be generated. Between 1 and infinity. Default to 20. + + do_sample: (`optional`) bool + If set to `False` greedy decoding is used. Otherwise sampling is used. Default to greedy sampling. + + num_beams: (`optional`) int + Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1. + + temperature: (`optional`) float + The value used to module the next token probabilities. Must be strictely positive. Default to 1.0. + + top_k: (`optional`) int + The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50. + + top_p: (`optional`) float + The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1. + + repetition_penalty: (`optional`) float + The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0. + + bos_token_id: (`optional`) int + Beginning of sentence token if no prompt is provided. Default to 0. + + eos_token_ids: (`optional`) int or list of int + End of sequence token or list of tokens to stop the generation. Default to 0. + length_penalty: (`optional`) float + Exponential penalty to the length. Default to 1. + + num_return_sequences: (`optional`) int + The number of independently computed returned sequences for each element in the batch. Default to 1. + + Examples:: + + tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. + outputs = model.generate(max_length=40, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id) # do greedy decoding without beam search + print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) + + tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. + input_context = 'The dog' + input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context + outputs = model.generate(input_ids=input_ids, do_sample=True, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' + for i in range(3): # 3 output sequences were generated + print('Generated {}: {}'.format(i, tokenizer.decode(outputs[0][i], skip_special_tokens=True))) + + tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. + input_context = 'The dog' + input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context + outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id, num_beams=3) # generate sequences using greedy beam search decoding (3 beams) + print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) + + tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. + input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl + input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context + outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences using using greedy search + print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) + + """ + + # We cannot generate if the model does not have a LM head + if self.get_output_embeddings() is None: + raise AttributeError( + "You tried to generate sequences with a model that does not have a LM Head." + "Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`)" + ) + + max_length = max_length if max_length is not None else self.config.max_length + do_sample = do_sample if do_sample is not None else self.config.do_sample + num_beams = num_beams if num_beams is not None else self.config.num_beams + temperature = temperature if temperature is not None else self.config.temperature + top_k = top_k if top_k is not None else self.config.top_k + top_p = top_p if top_p is not None else self.config.top_p + repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty + bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id + pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id + eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids + length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty + num_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences + ) + + if input_ids is not None: + batch_size = input_ids.shape[0] # overriden by the input batch_size + else: + batch_size = 1 + if isinstance(eos_token_ids, int): + eos_token_ids = [eos_token_ids] + + assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer." + assert isinstance(do_sample, bool), "`do_sample` should be a boolean." + assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictely positive integer." + assert temperature > 0, "`temperature` should be strictely positive." + assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." + assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." + assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." + assert isinstance(bos_token_id, int) and bos_token_id >= 0, "`bos_token_id` should be a positive integer." + assert isinstance(pad_token_id, int) and pad_token_id >= 0, "`pad_token_id` should be a positive integer." + assert isinstance(eos_token_ids, (list, tuple)) and ( + e >= 0 for e in eos_token_ids + ), "`eos_token_ids` should be a positive integer or a list/tuple of positive integers." + assert length_penalty > 0, "`length_penalty` should be strictely positive." + assert ( + isinstance(num_return_sequences, int) and num_return_sequences > 0 + ), "`num_return_sequences` should be a strictely positive integer." + + if input_ids is None: + input_ids = torch.full( + (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device + ) + else: + assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." + + # current position and vocab size + cur_len = input_ids.shape[1] + vocab_size = self.config.vocab_size + + if num_return_sequences != 1: + # Expand input to num return sequences + input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len) + input_ids = input_ids.contiguous().view( + batch_size * num_return_sequences, cur_len + ) # (batch_size * num_return_sequences, cur_len) + effective_batch_size = batch_size * num_return_sequences + else: + effective_batch_size = batch_size + + if num_beams > 1: + output = self._generate_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + length_penalty, + num_beams, + vocab_size, + ) + else: + output = self._generate_no_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + ) + + if num_return_sequences != 1: + for i in range(len(output)): + output[i] = output[i].view(batch_size, num_return_sequences, -1) + return output + + def _decode_step(self, input_ids, past): + model_inputs = self.prepare_inputs_for_generation(input_ids, past=past) + outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size) + token_len = outputs[0].shape[1] + if self.od_labels_len == 0: + next_token_idx = token_len - 1 + else: + if token_len == 2: + assert self._do_output_past(outputs) + next_token_idx = 1 + else: + next_token_idx = token_len - self.od_labels_len - 1 + + next_token_logits = outputs[0][:, next_token_idx, :] # (batch_size * num_beams, vocab_size) + assert outputs[0].shape[1] == model_inputs['input_ids'].shape[1] + + # if model has past, then set the past variable to speed up decoding + if self._do_output_past(outputs): + past = outputs[1] + return next_token_logits, past + + def _generate_no_beam_search( + self, + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + batch_size, + ): + """ Generate sequences for each example without beam search (num_beams == 1). + All returned sequence are generated independantly. + """ + assert self.num_keep_best == 1, 'cannot generate >1 sentences in greedy search' + # current position / max lengths / length of generated sentences / unfinished sentences + unfinished_sents = [] + cur_unfinished = input_ids.new(batch_size).fill_(1) + + # log of scores for each sentence in the batch + logprobs = [] + + past = None + + while cur_len < max_length: + model_inputs = self.prepare_inputs_for_generation(input_ids, past=past) + outputs = self(**model_inputs) + if cur_len == 1: + token_len = 2 + self.od_labels_len + next_token_idx = 1 + else: + assert cur_len > 1 + if not self._do_output_past(outputs): + token_len = cur_len + 1 + self.od_labels_len + next_token_idx = cur_len + else: + token_len = 2 + next_token_idx = 1 + + assert outputs[0].shape[1] == token_len + next_token_logits = outputs[0][:, next_token_idx, :] + + # if model has past, then set the past variable to speed up decoding + if self._do_output_past(outputs): + past = outputs[1] + + # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) + if repetition_penalty != 1.0: + for i in range(batch_size): + for previous_token in set(input_ids[i].tolist()): + # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability + if next_token_logits[i, previous_token] < 0: + next_token_logits[i, previous_token] *= repetition_penalty + else: + next_token_logits[i, previous_token] /= repetition_penalty + + if do_sample: + # Temperature (higher temperature => more likely to sample low probability tokens) + if temperature != 1.0: + next_token_logits = next_token_logits / temperature + # Top-p/top-k filtering + next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) + # Sample + next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1) + else: + # Greedy decoding + next_token = torch.argmax(next_token_logits, dim=-1) + + # Compute scores + _scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size, vocab_size) + _scores = torch.gather(_scores, -1, next_token.unsqueeze(-1)) # (batch_size, 1) + logprobs.append(_scores) # (batch_size, 1) + unfinished_sents.append(cur_unfinished) + + # update generations and finished sentences + tokens_to_add = next_token * cur_unfinished + pad_token_id * (1 - cur_unfinished) + input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) + + #for t in input_ids: + #print(self.tokenizer.convert_ids_to_tokens(t.tolist())) + + for eos_token_id in eos_token_ids: + cur_unfinished = cur_unfinished.mul(tokens_to_add.ne(eos_token_id).long()) + cur_len = cur_len + 1 + + # stop when there is a in each sentence, or if we exceed the maximul length + if cur_unfinished.max() == 0: + break + + # add eos_token_ids to unfinished sentences + if cur_len == max_length: + input_ids[:, -1].masked_fill_(cur_unfinished.to(dtype=torch.bool), eos_token_ids[0]) + + logprobs = torch.cat(logprobs, dim=1) + unfinished_sents = torch.stack(unfinished_sents, dim=1).float() + sum_logprobs = (logprobs * unfinished_sents).sum(dim=1) + + # pad to the same length, otherwise DataParallel will give error + pad_len = max_length - input_ids.shape[1] + if pad_len > 0: + padding_ids = input_ids.new(batch_size, pad_len).fill_(pad_token_id) + input_ids = torch.cat([input_ids, padding_ids], dim=1) + + # (batch_size, n_best, max_len), (batch_size, n_best) + return input_ids.unsqueeze(1), sum_logprobs.unsqueeze(1) + + def _generate_beam_search( + self, + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + batch_size, + length_penalty, + num_beams, + vocab_size, + ): + """ Generate sequences for each example with beam search. + """ + # Expand input to num beams + input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len) + input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len) + + # generated hypotheses + num_keep_best = self.num_keep_best + generated_hyps = [ + BeamHypotheses(num_keep_best, max_length, length_penalty, early_stopping=False) for _ in range(batch_size) + ] + # NOTE: Expand >1 words to leave some spare tokens to keep the + # beam size, because some sentences may end here and cannot expand + # in the next level + TOPN_PER_BEAM = 2 + + # scores for each sentence in the beam + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores[:, 1:] = -1e9 + beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) + + # cache compute states + past = None + + # done sentences + done = [False for _ in range(batch_size)] + + while cur_len < max_length: + model_inputs = self.prepare_inputs_for_generation(input_ids, past=past) + outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size) + if cur_len == 1: + token_len = 2 + self.od_labels_len + next_token_idx = 1 + else: + assert cur_len > 1 + if not self._do_output_past(outputs): + token_len = cur_len + 1 + self.od_labels_len + next_token_idx = cur_len + else: + token_len = 2 + next_token_idx = 1 + + assert outputs[0].shape[1] == token_len + scores = outputs[0][:, next_token_idx, :] # (batch_size * num_beams, vocab_size) + assert outputs[0].shape[1] == model_inputs['input_ids'].shape[1] + + # if model has past, then set the past variable to speed up decoding + if self._do_output_past(outputs): + past = outputs[1] + + # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) + if repetition_penalty != 1.0: + for i in range(batch_size * num_beams): + for previous_token in set(input_ids[i].tolist()): + # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability + if scores[i, previous_token] < 0: + scores[i, previous_token] *= repetition_penalty + else: + scores[i, previous_token] /= repetition_penalty + + if do_sample: + # Temperature (higher temperature => more likely to sample low probability tokens) + if temperature != 1.0: + scores = scores / temperature + # Top-p/top-k filtering + scores = top_k_top_p_filtering( + scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 + ) # (batch_size * num_beams, vocab_size) + # Sample [TOPN_PER_BEAM] next words for each beam (so we have some spare tokens and match output of greedy beam search) + next_words = torch.multinomial(F.softmax(scores, dim=-1), + num_samples=TOPN_PER_BEAM) # (batch_size * num_beams, TOPN_PER_BEAM) + # Compute next scores + _scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size) + _scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, TOPN_PER_BEAM) + next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, TOPN_PER_BEAM) + # Match shape of greedy beam search + beam_indices = torch.arange(num_beams) * vocab_size + beam_indices = beam_indices.repeat(batch_size, TOPN_PER_BEAM).to(next_words.device) + next_words = next_words.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams) + next_words = next_words + beam_indices + next_scores = next_scores.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams) + else: + # do greedy beam search + scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size) + assert scores.size() == (batch_size * num_beams, vocab_size) + # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product) + _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) + # re-organize to group the beam together (we are keeping top hypothesis accross beams) + _scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size) + next_scores, next_words = torch.topk(_scores, TOPN_PER_BEAM * num_beams, dim=1, largest=True, sorted=True) + + assert next_scores.size() == next_words.size() == (batch_size, TOPN_PER_BEAM * num_beams) + + # next batch beam content + # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch) + next_batch_beam = [] + + # for each sentence + for batch_ex in range(batch_size): + + # if we are done with this sentence + done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item()) + if done[batch_ex]: + next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch + continue + + # next sentence beam content + next_sent_beam = [] + + # next words for this sentence + for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]): + + # get beam and word IDs + beam_id = idx // vocab_size + word_id = idx % vocab_size + + # end of sentence, or next word + if word_id.item() in eos_token_ids or cur_len + 1 == max_length: + generated_hyps[batch_ex].add( + input_ids[batch_ex * num_beams + beam_id, :cur_len].clone(), score.item() + ) + else: + next_sent_beam.append((score, word_id, batch_ex * num_beams + beam_id)) + + # the beam for next step is full + if len(next_sent_beam) == num_beams: + break + + # update next beam content + if cur_len + 1 == max_length: + assert len(next_sent_beam) == 0 + else: + assert len(next_sent_beam) == num_beams + + if len(next_sent_beam) == 0: + next_sent_beam = [(0, pad_token_id, 0)] * num_beams # pad the batch + next_batch_beam.extend(next_sent_beam) + assert len(next_batch_beam) == num_beams * (batch_ex + 1) + + # sanity check / prepare next batch + assert len(next_batch_beam) == batch_size * num_beams + beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) + beam_words = input_ids.new([x[1] for x in next_batch_beam]) + beam_idx = input_ids.new([x[2] for x in next_batch_beam]) + + # re-order batch + input_ids = input_ids[beam_idx, :] + input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1) + + # re-order internal states + if past: + reordered_past = [] + for layer_past in past: + # get the correct batch idx from layer past batch dim + # batch dim of `past` and `mems` is at 1st position + reordered_layer_past = [layer_past[i].unsqueeze(0).clone().detach() for i in beam_idx] + reordered_layer_past = torch.cat(reordered_layer_past, dim=0) + # check that shape matches + assert reordered_layer_past.shape == layer_past.shape + reordered_past.append(reordered_layer_past) + past = tuple(reordered_past) + + # update current length + cur_len = cur_len + 1 + + # stop when we are done with each sentence + if all(done): + break + + # visualize hypotheses + # print([len(x) for x in generated_hyps], cur_len) + # globals().update( locals() ); + # !import code; code.interact(local=vars()) + # for ii in range(batch_size): + # for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True): + # print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist())) + # print("") + + # select the best hypotheses + tgt_len = torch.ones(batch_size, num_keep_best, dtype=torch.long) + logprobs = torch.zeros(batch_size, num_keep_best, + dtype=torch.float).fill_(-1e5).to(input_ids.device) + all_best = [] + + for i, hypotheses in enumerate(generated_hyps): + best = [] + hyp_scores = torch.tensor([x[0] for x in hypotheses.hyp]) + _, best_indices = torch.topk(hyp_scores, + min(num_keep_best, len(hyp_scores)), largest=True) + for best_idx, hyp_idx in enumerate(best_indices): + conf, best_hyp = hypotheses.hyp[hyp_idx] + best.append(best_hyp) + logprobs[i, best_idx] = conf + tgt_len[i, best_idx] = len(best_hyp) + 1 # +1 for the symbol + + all_best.append(best) + + # generate target batch, pad to the same length + decoded = input_ids.new(batch_size, num_keep_best, max_length).fill_(pad_token_id) + for batch_idx, best in enumerate(all_best): + for best_idx, hypo in enumerate(best): + decoded[batch_idx, best_idx, : tgt_len[batch_idx, best_idx] - 1] = hypo + decoded[batch_idx, best_idx, tgt_len[batch_idx, best_idx] - 1] = eos_token_ids[0] + + return decoded, logprobs + + +def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): + """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + Args: + logits: logits distribution shape (batch size, vocabulary size) + if top_k > 0: keep only top k tokens with highest probability (top-k filtering). + if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). + Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + Make sure we keep at least min_tokens_to_keep per batch example in the output + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + if top_k > 0: + top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits[indices_to_remove] = filter_value + + if top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + + # Remove tokens with cumulative probability above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs > top_p + if min_tokens_to_keep > 1: + # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) + sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) + logits[indices_to_remove] = filter_value + return logits + + +class BeamHypotheses(object): + def __init__(self, n_hyp, max_length, length_penalty, early_stopping): + """ + Initialize n-best list of hypotheses. + """ + self.max_length = max_length - 1 # ignoring bos_token + self.length_penalty = length_penalty + self.early_stopping = early_stopping + self.n_hyp = n_hyp + self.hyp = [] + self.worst_score = 1e9 + + def __len__(self): + """ + Number of hypotheses in the list. + """ + return len(self.hyp) + + def add(self, hyp, sum_logprobs): + """ + Add a new hypothesis to the list. + """ + score = sum_logprobs / len(hyp) ** self.length_penalty + if len(self) < self.n_hyp or score > self.worst_score: + self.hyp.append((score, hyp)) + if len(self) > self.n_hyp: + sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)]) + del self.hyp[sorted_scores[0][1]] + self.worst_score = sorted_scores[1][0] + else: + self.worst_score = min(score, self.worst_score) + + def is_done(self, best_sum_logprobs): + """ + If there are enough hypotheses and that none of the hypotheses being generated + can become better than the worst one in the heap, then we are done with this sentence. + """ + if len(self) < self.n_hyp: + return False + elif self.early_stopping: + return True + else: + return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty + + + + diff --git a/oscar/run_captioning.py b/oscar/run_captioning.py new file mode 100644 index 0000000..2671be1 --- /dev/null +++ b/oscar/run_captioning.py @@ -0,0 +1,882 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function +import argparse +import base64 +import os.path as op +import random, time, json +import numpy as np +import torch +from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler +from tqdm import tqdm + +from oscar.utils.logger import setup_logger +from oscar.utils.tsv_file import TSVFile +from oscar.utils.tsv_file_ops import tsv_writer +from oscar.utils.misc import (mkdir, set_seed, + load_from_yaml_file, find_file_path_in_yaml) +from oscar.utils.caption_evaluate import (evaluate_on_coco_caption, + evaluate_on_nocaps, ScstRewardCriterion) +from oscar.utils.cbs import ConstraintFilter, ConstraintBoxesReader +from oscar.utils.cbs import FiniteStateMachineBuilder +from oscar.modeling.modeling_bert import BertForImageCaptioning +from transformers.pytorch_transformers import BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + + +class CaptionTSVDataset(Dataset): + def __init__(self, yaml_file, tokenizer=None, add_od_labels=True, + max_img_seq_length=50, max_seq_length=70, max_seq_a_length=40, + is_train=True, mask_prob=0.15, max_masked_tokens=3, **kwargs): + """Constructor. + Args: + yaml file with all required data (image feature, caption, labels, etc) + tokenizer: tokenizer for text processing. + add_od_labels: whether to add labels from yaml file to BERT. + max_img_seq_length: max image sequence length. + max_seq_length: max text sequence length. + max_seq_a_length: max caption sequence length. + is_train: train or test mode. + mask_prob: probability to mask a input token. + max_masked_tokens: maximum number of tokens to be masked in one sentence. + kwargs: other arguments. + """ + self.yaml_file = yaml_file + self.cfg = load_from_yaml_file(yaml_file) + self.root = op.dirname(yaml_file) + self.label_file = find_file_path_in_yaml(self.cfg['label'], self.root) + self.feat_file = find_file_path_in_yaml(self.cfg['feature'], self.root) + self.caption_file = find_file_path_in_yaml(self.cfg.get('caption'), self.root) + + assert op.isfile(self.feat_file) + if add_od_labels: assert op.isfile(self.label_file) + if is_train: assert op.isfile(self.caption_file) and tokenizer is not None + + self.label_tsv = None if not self.label_file else TSVFile(self.label_file) + self.feat_tsv = TSVFile(self.feat_file) + if self.caption_file and op.isfile(self.caption_file): + with open(self.caption_file, 'r') as f: + self.captions = json.load(f) + + self.tokenizer = tokenizer + self.tensorizer = CaptionTensorizer(self.tokenizer, max_img_seq_length, + max_seq_length, max_seq_a_length, mask_prob, max_masked_tokens, + is_train=is_train) + self.add_od_labels = add_od_labels + self.is_train = is_train + self.kwargs = kwargs + self.image_keys = self.prepare_image_keys() + self.key2index = self.prepare_image_key_to_index() + self.key2captions = self.prepare_image_key_to_captions() + + def get_valid_tsv(self): + # based on the order of file size + if self.label_tsv: + return self.label_tsv + if self.feat_tsv: + return self.feat_tsv + + def prepare_image_keys(self): + tsv = self.get_valid_tsv() + return [tsv.seek(i)[0] for i in range(tsv.num_rows())] + + def prepare_image_key_to_index(self): + tsv = self.get_valid_tsv() + return {tsv.seek(i)[0] : i for i in range(tsv.num_rows())} + + def prepare_image_key_to_captions(self): + if self.is_train: + key2captions = {key: [] for key in self.image_keys} + for cap in self.captions: + key2captions[cap['image_id']].append(cap['caption']) + return key2captions + + def get_image_index(self, idx): + if self.is_train: + img_cap_pair = self.captions[idx] + img_key = img_cap_pair['image_id'] + return self.key2index[img_key] + return idx + + def get_image_key(self, idx): + img_idx = self.get_image_index(idx) + return self.image_keys[img_idx] + + def get_image_features(self, img_idx): + feat_info = json.loads(self.feat_tsv.seek(img_idx)[1]) + num_boxes = feat_info['num_boxes'] + features = np.frombuffer(base64.b64decode(feat_info['features']), np.float32 + ).reshape((num_boxes, -1)) + return torch.Tensor(features) + + def get_caption(self, idx): + if self.is_train: + img_cap_pair = self.captions[idx] + return img_cap_pair['caption'] + return "" + + def get_od_labels(self, img_idx): + od_labels = None + if self.add_od_labels: + label_info = json.loads(self.label_tsv.seek(img_idx)[1]) + od_labels = " ".join([l['class'] for l in label_info]) + return od_labels + + def get_caption_file_in_coco_format(self): + cap_file = op.splitext(self.caption_file)[0] + '_coco_format.json' + return cap_file + + def get_captions_by_key(self, key): + assert self.is_train, "cannot get captions for inference" + return self.key2captions[key] + + def __getitem__(self, idx): + img_idx = self.get_image_index(idx) + img_key = self.image_keys[img_idx] + features = self.get_image_features(img_idx) + caption = self.get_caption(idx) + od_labels = self.get_od_labels(img_idx) + example = self.tensorizer.tensorize_example(caption, features, text_b=od_labels) + return img_key, example + + def __len__(self): + if self.is_train: + return len(self.captions) + return self.get_valid_tsv().num_rows() + + +class CaptionTSVDatasetWithConstraints(CaptionTSVDataset): + r""" + Providing inputs for inference with Constraint Beam Search + + nms_threshold: float, optional (default = 0.85) + NMS threshold for suppressing generic object class names during constraint filtering, + for two boxes with IoU higher than this threshold, "dog" suppresses "animal". + max_given_constraints: int, optional (default = 3) + Maximum number of constraints which can be specified for CBS decoding. Constraints are + selected based on the prediction confidence score of their corresponding bounding boxes. + """ + + def __init__( + self, yaml_file, + nms_threshold=0.85, + max_given_constraints=3, **kwargs + ): + super().__init__(yaml_file, **kwargs) + boxes_tsvpath = find_file_path_in_yaml(self.cfg['cbs_box'], self.root) + constraint2tokens_tsvpath = find_file_path_in_yaml(self.cfg['cbs_constraint'], self.root) + tokenforms_tsvpath = find_file_path_in_yaml(self.cfg['cbs_tokenforms'], self.root) + hierarchy_jsonpath = find_file_path_in_yaml(self.cfg['cbs_hierarchy'], self.root) + + self._boxes_reader = ConstraintBoxesReader(boxes_tsvpath) + self._constraint_filter = ConstraintFilter( + hierarchy_jsonpath, nms_threshold, max_given_constraints + ) + self._fsm_builder = FiniteStateMachineBuilder(self.tokenizer, + constraint2tokens_tsvpath, tokenforms_tsvpath, + max_given_constraints) + + def __getitem__(self, index): + img_key, example = super().__getitem__(index) + + # Apply constraint filtering to object class names. + constraint_boxes = self._boxes_reader[img_key] + + candidates = self._constraint_filter( + constraint_boxes["boxes"], constraint_boxes["class_names"], constraint_boxes["scores"] + ) + num_constraints = len(candidates) + fsm, nstates = self._fsm_builder.build(candidates) + + return img_key, example + (fsm, num_constraints, ) + + +class CaptionTensorizer(object): + def __init__(self, tokenizer, max_img_seq_length=50, max_seq_length=70, + max_seq_a_length=40, mask_prob=0.15, max_masked_tokens=3, + is_train=True): + """Constructor. + Args: + tokenizer: tokenizer for text processing. + max_img_seq_length: max image sequence length. + max_seq_length: max text sequence length. + max_seq_a_length: max caption sequence length. + is_train: train or test mode. + mask_prob: probability to mask a input token. + max_masked_tokens: maximum number of tokens to be masked in one sentence. + """ + self.tokenizer = tokenizer + self.is_train = is_train + self.max_img_seq_len = max_img_seq_length + self.max_seq_len = max_seq_length + self.max_seq_a_len = max_seq_a_length + self.mask_prob = mask_prob + self.max_masked_tokens = max_masked_tokens + self._triangle_mask = torch.tril(torch.ones((self.max_seq_len, + self.max_seq_len), dtype=torch.long)) + + def tensorize_example(self, text_a, img_feat, text_b=None, + cls_token_segment_id=0, pad_token_segment_id=0, + sequence_a_segment_id=0, sequence_b_segment_id=1): + if self.is_train: + tokens_a = self.tokenizer.tokenize(text_a) + else: + # fake tokens to generate masks + tokens_a = [self.tokenizer.mask_token] * (self.max_seq_a_len - 2) + if len(tokens_a) > self.max_seq_a_len - 2: + tokens_a = tokens_a[:(self.max_seq_a_len - 2)] + + tokens = [self.tokenizer.cls_token] + tokens_a + [self.tokenizer.sep_token] + segment_ids = [cls_token_segment_id] + [sequence_a_segment_id] * (len(tokens) - 1) + seq_a_len = len(tokens) + if text_b: + # pad text_a to keep it in fixed length for better inference. + padding_a_len = self.max_seq_a_len - seq_a_len + tokens += [self.tokenizer.pad_token] * padding_a_len + segment_ids += ([pad_token_segment_id] * padding_a_len) + + tokens_b = self.tokenizer.tokenize(text_b) + if len(tokens_b) > self.max_seq_len - len(tokens) - 1: + tokens_b = tokens_b[: (self.max_seq_len - len(tokens) - 1)] + tokens += tokens_b + [self.tokenizer.sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + seq_len = len(tokens) + if self.is_train: + masked_pos = torch.zeros(self.max_seq_len, dtype=torch.int) + # randomly mask words for prediction, ignore [CLS] + candidate_masked_idx = list(range(1, seq_a_len)) # only mask text_a + random.shuffle(candidate_masked_idx) + num_masked = min(max(round(self.mask_prob * seq_a_len), 1), self.max_masked_tokens) + num_masked = int(num_masked) + masked_idx = candidate_masked_idx[:num_masked] + masked_idx = sorted(masked_idx) + masked_token = [tokens[i] for i in masked_idx] + for pos in masked_idx: + if random.random() <= 0.8: + # 80% chance to be a ['MASK'] token + tokens[pos] = self.tokenizer.mask_token + elif random.random() <= 0.5: + # 10% chance to be a random word ((1-0.8)*0.5) + from random import randint + i = randint(0, len(self.tokenizer.vocab)) + self.tokenizer._convert_id_to_token(i) + tokens[pos] = self.tokenizer._convert_id_to_token(i) + else: + # 10% chance to remain the same (1-0.8-0.1) + pass + + masked_pos[masked_idx] = 1 + # pad masked tokens to the same length + if num_masked < self.max_masked_tokens: + masked_token = masked_token + ([self.tokenizer.pad_token] * + (self.max_masked_tokens - num_masked)) + masked_ids = self.tokenizer.convert_tokens_to_ids(masked_token) + else: + masked_pos = torch.ones(self.max_seq_len, dtype=torch.int) + + # pad on the right for image captioning + padding_len = self.max_seq_len - seq_len + tokens = tokens + ([self.tokenizer.pad_token] * padding_len) + segment_ids += ([pad_token_segment_id] * padding_len) + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # image features + img_len = img_feat.shape[0] + if img_len > self.max_img_seq_len: + img_feat = img_feat[0 : self.max_img_seq_len, ] + img_len = img_feat.shape[0] + else: + padding_matrix = torch.zeros((self.max_img_seq_len - img_len, + img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + + # prepare attention mask: + # note that there is no attention from caption to image + # because otherwise it will violate the triangle attention + # for caption as caption will have full attention on image. + max_len = self.max_seq_len + self.max_img_seq_len + attention_mask = torch.zeros((max_len, max_len), dtype=torch.long) + # C: caption, L: label, R: image region + c_start, c_end = 0, seq_a_len + l_start, l_end = self.max_seq_a_len, seq_len + r_start, r_end = self.max_seq_len, self.max_seq_len + img_len + # triangle mask for caption to caption + attention_mask[c_start : c_end, c_start : c_end].copy_(self._triangle_mask[0 : seq_a_len, 0 : seq_a_len]) + # full attention for L-L, R-R + attention_mask[l_start : l_end, l_start : l_end] = 1 + attention_mask[r_start : r_end, r_start : r_end] = 1 + # full attention for C-L, C-R + attention_mask[c_start : c_end, l_start : l_end] = 1 + attention_mask[c_start : c_end, r_start : r_end] = 1 + # full attention for L-R: + attention_mask[l_start : l_end, r_start : r_end] = 1 + attention_mask[r_start : r_end, l_start : l_end] = 1 + + input_ids = torch.tensor(input_ids, dtype=torch.long) + segment_ids = torch.tensor(segment_ids, dtype=torch.long) + + if self.is_train: + masked_ids = torch.tensor(masked_ids, dtype=torch.long) + return (input_ids, attention_mask, segment_ids, img_feat, masked_pos, masked_ids) + return (input_ids, attention_mask, segment_ids, img_feat, masked_pos) + + +def build_dataset(yaml_file, tokenizer, args, is_train=True): + if not op.isfile(yaml_file): + yaml_file = op.join(args.data_dir, yaml_file) + assert op.isfile(yaml_file) + + if is_train: + return CaptionTSVDataset(yaml_file, tokenizer=tokenizer, + add_od_labels=args.add_od_labels, max_img_seq_length=args.max_img_seq_length, + max_seq_length=args.max_seq_length, max_seq_a_length=args.max_seq_a_length, + is_train=True, mask_prob=args.mask_prob, max_masked_tokens=args.max_masked_tokens) + if args.use_cbs: + dataset_class = CaptionTSVDatasetWithConstraints + else: + dataset_class = CaptionTSVDataset + return dataset_class(yaml_file, tokenizer=tokenizer, + add_od_labels=args.add_od_labels, max_img_seq_length=args.max_img_seq_length, + max_seq_length=args.max_seq_length, max_seq_a_length=args.max_gen_length, + is_train=False) + + +def save_checkpoint(model, tokenizer, args, epoch, global_step): + checkpoint_dir = op.join(args.output_dir, 'checkpoint-{}-{}'.format( + epoch, global_step)) + mkdir(checkpoint_dir) + model_to_save = model.module if hasattr(model, 'module') else model + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(checkpoint_dir) + torch.save(args, op.join(checkpoint_dir, 'training_args.bin')) + tokenizer.save_pretrained(checkpoint_dir) + logger.info("Save checkpoint to {}".format(checkpoint_dir)) + break + except: + save_num += 1 + if save_num == 10: + logger.info("Failed to save checkpoint after 10 trails.") + return checkpoint_dir + + +def compute_score_with_logits(logits, labels): + logits = torch.max(logits, -1)[1].data # argmax + scores = logits == labels + return scores + + +def train(args, train_dataset, val_dataset, model, tokenizer): + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size, num_workers=args.num_workers) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // \ + args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps \ + * args.num_train_epochs + + # Prepare optimizer and scheduler + no_decay = ['bias', 'LayerNorm.weight'] + grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not \ + any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if \ + any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + if args.scheduler == "constant": + scheduler = WarmupConstantSchedule( + optimizer, warmup_steps=args.warmup_steps) + elif args.scheduler == "linear": + scheduler = WarmupLinearSchedule( + optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + else: + raise ValueError("Unknown scheduler type: {}".format(args.scheduler)) + + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + if args.scst: + scst_criterion = ScstRewardCriterion() + logger.info(" SCST training...") + + global_step, global_loss, global_acc =0, 0.0, 0.0 + model.zero_grad() + eval_log = [] + best_score = 0 + for epoch in range(int(args.num_train_epochs)): + for step, (img_keys, batch) in enumerate(train_dataloader): + batch = tuple(t.to(args.device) for t in batch) + + if not args.scst: + model.train() + inputs = {'input_ids': batch[0], 'attention_mask': batch[1], + 'token_type_ids': batch[2], 'img_feats': batch[3], + 'masked_pos': batch[4], 'masked_ids': batch[5] + } + outputs = model(**inputs) + loss, logits = outputs[:2] + masked_ids = inputs['masked_ids'] + masked_ids = masked_ids[masked_ids != 0] + batch_score = compute_score_with_logits(logits, masked_ids) + batch_acc = torch.sum(batch_score.float()) / torch.sum(inputs['masked_pos']) + else: + loss = scst_train_iter(args, train_dataset, model, scst_criterion, img_keys, batch, tokenizer) + batch_acc = scst_criterion.get_score() + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + global_loss += loss.item() + global_acc += batch_acc + if (step + 1) % args.gradient_accumulation_steps == 0: + global_step += 1 + scheduler.step() + optimizer.step() + model.zero_grad() + if global_step % args.logging_steps == 0: + logger.info("Epoch: {}, global_step: {}, lr: {:.6f}, loss: {:.4f} ({:.4f}), " \ + "score: {:.4f} ({:.4f})".format(epoch, global_step, + optimizer.param_groups[0]["lr"], loss, global_loss / global_step, + batch_acc, global_acc / global_step) + ) + + if (args.save_steps > 0 and global_step % args.save_steps == 0) or \ + global_step == t_total: + checkpoint_dir = save_checkpoint(model, tokenizer, args, epoch, global_step) + # evaluation + if args.evaluate_during_training: + logger.info("Perform evaluation at step: %d" % (global_step)) + evaluate_file = evaluate(args, val_dataset, model, tokenizer, + checkpoint_dir) + with open(evaluate_file, 'r') as f: + res = json.load(f) + best_score = max(best_score, res['CIDEr']) + res['epoch'] = epoch + res['global_step'] = step + res['best_CIDEr'] = best_score + eval_log.append(res) + with open(args.output_dir + '/eval_logs.json', 'w') as f: + json.dump(eval_log, f) + return global_step, global_loss / global_step + + +def scst_train_iter(args, train_dataset, model, scst_criterion, img_keys, batch, tokenizer): + cls_token_id, sep_token_id, pad_token_id, mask_token_id = tokenizer.convert_tokens_to_ids( + [tokenizer.cls_token, tokenizer.sep_token, tokenizer.pad_token, + tokenizer.mask_token] + ) + inputs = {'is_decode': True, + 'input_ids': batch[0], 'attention_mask': batch[1], + 'token_type_ids': batch[2], 'img_feats': batch[3], + 'masked_pos': batch[4], + 'do_sample': False, + 'bos_token_id': cls_token_id, + 'pad_token_id': pad_token_id, + 'eos_token_ids': [sep_token_id, pad_token_id], + 'mask_token_id': mask_token_id, + # for adding od labels + 'add_od_labels': args.add_od_labels, 'od_labels_start_posid': args.max_seq_a_length, + + # hyperparameters of beam search + 'max_length': args.max_seq_a_length, + 'num_beams': 1, + "temperature": args.temperature, + "top_k": args.top_k, + "top_p": args.top_p, + "repetition_penalty": args.repetition_penalty, + "length_penalty": args.length_penalty, + "num_return_sequences": 1, + "num_keep_best": 1, + } + + model.eval() + with torch.no_grad(): + greedy_res_raw, _ = model(**inputs) + greedy_res_raw.squeeze_(1) # batch_size * max_len + + model.train() + inputs['do_sample'] = True + sample_res_raw, sample_logprobs = model(**inputs) + sample_res_raw.squeeze_(1) + sample_logprobs.squeeze_(1) + assert sample_logprobs.requires_grad == True + assert sample_res_raw.requires_grad == False + + def _ids_to_captions(all_ids): + captions = [] + for ids in all_ids: + c = tokenizer.decode(ids.tolist(), skip_special_tokens=True) + captions.append(c) + return captions + + greedy_res = _ids_to_captions(greedy_res_raw) + sample_res = _ids_to_captions(sample_res_raw) + gt_res = [train_dataset.get_captions_by_key(k) for k in img_keys] + + loss = scst_criterion(gt_res, greedy_res, sample_res, sample_logprobs) + return loss + + +def get_predict_file(output_dir, yaml_file, args): + cc = ['pred'] + # make sure it works with/without / in end of the path. + data = op.basename(op.join(args.data_dir, '')[:-1]) + split = op.basename(yaml_file) + assert split.endswith('.yaml') + split = split[:-5] + cc.append(data) + cc.append(split) + cc.append('beam{}'.format(args.num_beams)) + cc.append('max{}'.format(args.max_gen_length)) + if args.add_od_labels: + cc.append('odlabels') + if args.num_keep_best != 1: + cc.append('best{}'.format(args.num_keep_best)) + if args.use_cbs: + cc.append('cbs{}'.format(args.min_constraints_to_satisfy)) + if args.output_hidden_states: + cc.append('hidden') + return op.join(output_dir, '{}.tsv'.format('.'.join(cc))) + + +def get_evaluate_file(predict_file): + assert predict_file.endswith('.tsv') + fpath = op.splitext(predict_file)[0] + return fpath + '.eval.json' + + +def get_evaluate_method(predict_file): + if 'nocaps' in op.basename(predict_file): + return 'nocaps' + else: + return 'coco' + + +def evaluate(args, val_dataset, model, tokenizer, output_dir): + assert op.isdir(output_dir) + predict_file = get_predict_file(output_dir, val_dataset.yaml_file, args) + if op.isfile(predict_file): + logger.info('Skip predict. {} already exists'.format(predict_file)) + else: + test(args, val_dataset, model, tokenizer, predict_file) + + evaluate_file = get_evaluate_file(predict_file) + if op.isfile(evaluate_file): + logger.info('Skip evaluation. {} already exists'.format(evaluate_file)) + return evaluate_file + + eval_method = get_evaluate_method(predict_file) + if eval_method == 'coco': + gt_file = val_dataset.get_caption_file_in_coco_format() + result = evaluate_on_coco_caption(predict_file, gt_file, outfile=evaluate_file) + else: + split = 'val' if 'val' in op.basename(val_dataset.yaml_file) else 'test' + result = evaluate_on_nocaps(split, predict_file, + data_dir=args.data_dir, evaluate_file=evaluate_file) + logger.info("evaluation result: {}".format(str(result))) + return evaluate_file + + +def test(args, test_dataset, model, tokenizer, predict_file): + args.test_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + test_sampler = SequentialSampler(test_dataset) + cache_file = predict_file + + test_dataloader = DataLoader(test_dataset, sampler=test_sampler, + batch_size=args.test_batch_size, num_workers=args.num_workers) + + cls_token_id, sep_token_id, pad_token_id, mask_token_id, period_token_id = \ + tokenizer.convert_tokens_to_ids( [tokenizer.cls_token, + tokenizer.sep_token, tokenizer.pad_token, tokenizer.mask_token, '.'] + ) + model.eval() + + def gen_rows(): + time_meter = 0 + # restore existing results for long running inference tasks + exist_key2pred = {} + tmp_file = cache_file + '.tmp.copy' + if op.isfile(tmp_file): + with open(tmp_file, 'r') as fp: + for line in fp: + parts = line.strip().split('\t') + if len(parts) == 2: + exist_key2pred[parts[0]] = parts[1] + + with torch.no_grad(): + for step, (img_keys, batch) in tqdm(enumerate(test_dataloader)): + is_exist = True + for k in img_keys: + if k not in exist_key2pred: + is_exist = False + break + if is_exist: + for k in img_keys: + yield k, exist_key2pred[k] + continue + batch = tuple(t.to(args.device) for t in batch) + inputs = {'is_decode': True, + 'input_ids': batch[0], 'attention_mask': batch[1], + 'token_type_ids': batch[2], 'img_feats': batch[3], + 'masked_pos': batch[4], + 'do_sample': False, + 'bos_token_id': cls_token_id, + 'pad_token_id': pad_token_id, + 'eos_token_ids': [sep_token_id, pad_token_id], + 'mask_token_id': mask_token_id, + # for adding od labels + 'add_od_labels': args.add_od_labels, 'od_labels_start_posid': args.max_seq_a_length, + + # hyperparameters of beam search + 'max_length': args.max_gen_length, + 'num_beams': args.num_beams, + "temperature": args.temperature, + "top_k": args.top_k, + "top_p": args.top_p, + "repetition_penalty": args.repetition_penalty, + "length_penalty": args.length_penalty, + "num_return_sequences": args.num_return_sequences, + "num_keep_best": args.num_keep_best, + } + if args.use_cbs: + inputs.update({'use_cbs': True, + 'fsm': batch[5], + 'num_constraints': batch[6], + 'min_constraints_to_satisfy': args.min_constraints_to_satisfy, + }) + tic = time.time() + # captions, logprobs + outputs = model(**inputs) + time_meter += time.time() - tic + all_caps = outputs[0] # batch_size * num_keep_best * max_len + all_confs = torch.exp(outputs[1]) + + for img_key, caps, confs in zip(img_keys, all_caps, all_confs): + res = [] + for cap, conf in zip(caps, confs): + cap = tokenizer.decode(cap.tolist(), skip_special_tokens=True) + res.append({'caption': cap, 'conf': conf.item()}) + if isinstance(img_key, torch.Tensor): + img_key = img_key.item() + yield img_key, json.dumps(res) + + logger.info("Inference model computing time: {} seconds per batch".format(time_meter / (step+1))) + + tsv_writer(gen_rows(), cache_file) + return predict_file + + +def restore_training_settings(args): + assert not args.do_train + assert args.do_test or args.do_eval + # restore training settings, check hasattr for backward compatibility + train_args = torch.load(op.join(args.eval_model_dir, 'training_args.bin')) + if hasattr(train_args, 'max_seq_a_length'): + max_od_labels_len = train_args.max_seq_length - train_args.max_seq_a_length + max_seq_length = args.max_gen_length + max_od_labels_len + args.max_seq_length = max_seq_length + logger.warning('Override max_seq_length to {} = max_gen_length:{} + od_labels_len:{}'.format( + max_seq_length, args.max_gen_length, max_od_labels_len)) + + override_params = ['max_seq_a_length', 'do_lower_case', 'add_od_labels', + 'max_img_seq_length', 'img_feature_dim', + 'img_feature_type'] + for param in override_params: + if hasattr(train_args, param): + train_v = getattr(train_args, param) + test_v = getattr(args, param) + if train_v != test_v: + logger.warning('Override {} with train args: {} -> {}'.format(param, + test_v, train_v)) + setattr(args, param, train_v) + return args + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", default='datasets/coco_caption', type=str, required=False, + help="The input data dir with all required files.") + parser.add_argument("--train_yaml", default='train.yaml', type=str, required=False, + help="yaml file for training.") + parser.add_argument("--test_yaml", default='test.yaml', type=str, required=False, + help="yaml file for testing.") + parser.add_argument("--val_yaml", default='val.yaml', type=str, required=False, + help="yaml file used for validation during training.") + parser.add_argument("--model_name_or_path", default=None, type=str, required=False, + help="Path to pre-trained model or model type.") + parser.add_argument("--output_dir", default='output/', type=str, required=False, + help="The output directory to save checkpoint and test results.") + parser.add_argument("--loss_type", default='sfmx', type=str, + help="Loss function types: support kl, x2, sfmx") + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name.") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name.") + parser.add_argument("--max_seq_length", default=70, type=int, + help="The maximum total input sequence length after tokenization. " + "Sequences longer than this will be truncated, " + "sequences shorter will be padded.") + parser.add_argument("--max_seq_a_length", default=40, type=int, + help="The maximum sequence length for caption.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_test", action='store_true', help="Whether to run inference.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run evaluation.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--mask_prob", default=0.15, type=float, + help= "Probability to mask input sentence during training.") + parser.add_argument("--max_masked_tokens", type=int, default=3, + help="The max number of masked tokens per sentence.") + parser.add_argument("--add_od_labels", default=False, action='store_true', + help="Whether to add object detection labels or not") + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out in BERT.") + parser.add_argument("--max_img_seq_length", default=50, type=int, + help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, + help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='frcnn', type=str, + help="Image feature type.") + parser.add_argument("--per_gpu_train_batch_size", default=64, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=64, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument("--output_mode", default='classification', type=str, + help="output mode, support classification or regression.") + parser.add_argument("--num_labels", default=2, type=int, + help="num_labels is 2 for classification and 1 for regression.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before backward.") + parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial lr.") + parser.add_argument("--weight_decay", default=0.05, type=float, help="Weight deay.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup.") + parser.add_argument("--scheduler", default='linear', type=str, help="constant or linear or") + parser.add_argument("--num_workers", default=4, type=int, help="Workers in dataloader.") + parser.add_argument("--num_train_epochs", default=40, type=int, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="Total number of training steps. Override num_train_epochs.") + parser.add_argument('--logging_steps', type=int, default=20, help="Log every X steps.") + parser.add_argument('--save_steps', type=int, default=-1, + help="Save checkpoint every X steps. Will also perform evaluatin.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each save_steps.") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA.") + parser.add_argument('--seed', type=int, default=88, help="random seed for initialization.") + parser.add_argument('--scst', action='store_true', help='Self-critical sequence training') + # for generation + parser.add_argument("--eval_model_dir", type=str, default='', + help="Model directory for evaluation.") + parser.add_argument('--max_gen_length', type=int, default=20, + help="max length of generated sentences") + parser.add_argument('--output_hidden_states', action='store_true', + help="Turn on for fast decoding") + parser.add_argument('--num_return_sequences', type=int, default=1, + help="repeating times per image") + parser.add_argument('--num_beams', type=int, default=5, help="beam search width") + parser.add_argument('--num_keep_best', type=int, default=1, + help="number of hypotheses to keep in beam search") + parser.add_argument('--temperature', type=float, default=1, + help="temperature in softmax for sampling") + parser.add_argument('--top_k', type=int, default=0, + help="filter distribution for sampling") + parser.add_argument('--top_p', type=float, default=1, + help="filter distribution for sampling") + parser.add_argument('--repetition_penalty', type=int, default=1, + help="repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)") + parser.add_argument('--length_penalty', type=int, default=1, + help="beam search length penalty") + # for Constrained Beam Search + parser.add_argument('--use_cbs', action='store_true', + help='Use constrained beam search for decoding') + parser.add_argument('--min_constraints_to_satisfy', type=int, default=2, + help="minimum number of constraints to satisfy") + args = parser.parse_args() + + global logger + + args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + + output_dir = args.output_dir + mkdir(output_dir) + + logger = setup_logger("vlpretrain", output_dir, 0) + logger.warning("Device: %s, n_gpu: %s", args.device, args.n_gpu) + set_seed(args.seed, args.n_gpu) + + # Load pretrained model and tokenizer + config_class, model_class, tokenizer_class = BertConfig, BertForImageCaptioning, BertTokenizer + if args.do_train: + assert args.model_name_or_path is not None + config = config_class.from_pretrained(args.config_name if args.config_name else \ + args.model_name_or_path, num_labels=args.num_labels, finetuning_task='image_captioning') + if args.scst: + # avoid using too much memory + config.output_hidden_states = True + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name \ + else args.model_name_or_path, do_lower_case=args.do_lower_case) + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + else: + checkpoint = args.eval_model_dir + assert op.isdir(checkpoint) + config = config_class.from_pretrained(checkpoint) + config.output_hidden_states = args.output_hidden_states + tokenizer = tokenizer_class.from_pretrained(checkpoint) + logger.info("Evaluate the following checkpoint: %s", checkpoint) + model = model_class.from_pretrained(checkpoint, config=config) + + model.to(args.device) + logger.info("Training/evaluation parameters %s", args) + if args.do_train: + train_dataset = build_dataset(op.join(args.data_dir, args.train_yaml), tokenizer, args) + val_dataset = build_dataset(op.join(args.data_dir, args.val_yaml), + tokenizer, args, is_train=False) + global_step, avg_loss = train(args, train_dataset, val_dataset, model, tokenizer) + logger.info("Training done: total_step = %s, avg loss = %s", global_step, avg_loss) + + # inference and evaluation + if args.do_test or args.do_eval: + args = restore_training_settings(args) + test_dataset = build_dataset(op.join(args.data_dir, args.test_yaml), + tokenizer, args, is_train=False) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + if not args.do_eval: + predict_file = get_predict_file(checkpoint, test_dataset.yaml_file, args) + test(args, test_dataset, model, tokenizer, predict_file) + logger.info("Prediction results saved to: {}".format(predict_file)) + else: + evaluate_file = evaluate(args, test_dataset, model, tokenizer, + checkpoint) + logger.info("Evaluation results saved to: {}".format(evaluate_file)) + +if __name__ == "__main__": + main() diff --git a/oscar/run_gqa.py b/oscar/run_gqa.py new file mode 100644 index 0000000..a702028 --- /dev/null +++ b/oscar/run_gqa.py @@ -0,0 +1,1084 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import copy, time, json + +import sys +sys.path.insert(0, '.') + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset) +from torch.utils.data.distributed import DistributedSampler +import _pickle as cPickle + +from oscar.modeling.modeling_bert import ImageBertForSequenceClassification +from transformers.pytorch_transformers import WEIGHTS_NAME, BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + +from oscar.utils.misc import set_seed +from oscar.utils.task_utils import (_truncate_seq_pair, convert_examples_to_features_vqa, + output_modes, processors) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'bert': (BertConfig, ImageBertForSequenceClassification, BertTokenizer), +} + + +log_json = [] + + +def _load_dataset(args, name): + processor = processors[args.task_name]() + labels = processor.get_labels(args.label_file) + + if name == 'train': + if args.train_data_type == 'bal': + examples = processor.get_train_examples(args.data_dir, 'gqa_bal_qla_train.json') #[0: debug_size] + else: + examples = processor.get_train_examples(args.data_dir, 'gqa_all_qla_train.json') #[0: debug_size] + elif name == 'val': + if args.eval_data_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, 'gqa_bal_qla_val.json') #[0: debug_size] + else: + examples = processor.get_dev_examples(args.data_dir, 'gqa_all_qla_val.json') #[0: debug_size] + elif name == 'train+val': # depreciated + if args.data_label_type == 'mask': + examples = processor.get_train_examples(args.data_dir, 'train+val2014_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.data_dir, 'train+val2014_qla.json') + elif name == 'test': # test-submission + if args.data_label_type == 'bal': + examples = processor.get_test_examples(args.data_dir, 'gqa_all_qla_submission.json') + else: + examples = processor.get_test_examples(args.data_dir, 'gqa_all_qla_submission.json') + elif name == 'test-dev': # test-dev set + if args.data_label_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, 'gqa_bal_qla_testdev.json') + else: + examples = processor.get_dev_examples(args.data_dir, 'gqa_all_qla_testdev.json') + + return examples, labels + + +def _load_img_features(args): + t_start = time.time() + if args.img_feature_type == 'faster_r-cnn': + if args.img_feature_dim == 2048: # object features + feat_file_name = 'gqa_img_frcnn_feats_obj.pt' + else: # object + spatial features + feat_file_name = 'gqa_img_frcnn_feats.pt' + else: + feat_file_name = 'gqa_img_frcnn_feats.pt' + img_features = torch.load(os.path.join(args.data_dir, feat_file_name)) + t_end = time.time() + logger.info('Info: loading {0:s} features using {1:.2f} secs'.format(feat_file_name, (t_end - t_start))) + + return img_features + + +class GQADataset(Dataset): + """ GQA Dataset """ + + def __init__(self, args, name, img_features, tokenizer, label_pos_feats=None): + super(GQADataset, self).__init__() + assert name in ['train', 'val', 'test-dev', 'test', 'train+val'] + + #t_start = time.time() + #if args.img_feature_type == 'faster_r-cnn': + # if args.img_feature_dim == 2048: # object features + # feat_file_name = 'gqa_img_frcnn_feats_obj_{}.pt'.format(name) + # else: # object + spatial features + # feat_file_name = 'gqa_img_frcnn_feats_{}.pt'.format(name) + #else: + # feat_file_name = '{}_img_feats.pt'.format(name) + #self.img_features = torch.load(os.path.join(args.data_dir, feat_file_name)) + #t_end = time.time() + #logger.info('Info: loading {0:s} features using {1:.2f} secs'.format(feat_file_name, (t_end-t_start))) + + self.img_features = img_features + self.label_pos_feats = label_pos_feats + self.output_mode = output_modes[args.task_name] + self.tokenizer = tokenizer + self.args = args + self.name = name + + self.examples, self.labels = _load_dataset(args, name) + self.label_map = {label: i for i, label in enumerate(self.labels)} + + if self.args.load_fast: + self.features = self.tensorize(args, cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + else: + pass + + logger.info('%s Data Examples: %d' % (name, len(self.examples))) + + def tensorize(self, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + # debug: + debug_size = 500 + features = [] + + for (ex_index, example) in enumerate(self.examples[0: ]): + if len(example.label) == 0: continue + if ex_index % 10000 == 0: logger.info("Tensorizing example %d of %d" % (ex_index, len(self.examples))) + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = self.tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + img_feat = self.img_features[example.img_key] # torch + #img_feat = self.img_features.item().get(example.img_key) # numpy + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %s)" % (example.label, label_id)) + logger.info("score: %s (score = %s)" % (example.score, score)) + + new_scores = target_tensor(len(self.labels), label_id, score) + #features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + features.append((torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor(new_scores, dtype=torch.float), img_feat)) + + return features + + def tensorize_example(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + txt_b_arr = example.text_b.split(';') + txt_label_ixs = [] + for txt_b_ix, txt_b_ele in enumerate(txt_b_arr): + tokens_b_ele = self.tokenizer.tokenize(txt_b_ele) + txt_label_ixs.extend([txt_b_ix] * len(tokens_b_ele)) + txt_b = example.text_b.replace(';', ' ').strip() + tokens_b = self.tokenizer.tokenize(txt_b) + assert len(tokens_b) == len(txt_label_ixs) + + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + txt_label_ixs = txt_label_ixs[0:len(tokens_b)] + + # original + #if example.text_b: + # txt_b = example.text_b.replace(';', ' ').strip() + # tokens_b = self.tokenizer.tokenize(txt_b) + # _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + if self.args.img_feature_type.startswith('dis_code'): + img_feat = self.img_features[example.img_key] + + if self.args.img_feature_type == 'dis_code_ln': # for discrete code image representation + img_feat = img_feat.reshape(-1, img_feat.shape[0]) + + if self.args.img_feature_type == 'dis_code_t': # transposed + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * 64 + else: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + else: + img_feat = self.img_features[example.img_key] #[:, 0:self.args.img_feature_dim] # torch + + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + if (example.label is None): + label_id = [0] + score = [0] + elif len(example.label) == 0: + label_id = [0] + score = [0] + else: + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if self.args.img_feature_type in ['dis_code', 'dis_code_t']: + img_feat = img_feat.type(torch.long) + elif self.args.img_feature_type in ['dis_code_ln']: + #img_feat = img_feat.reshape(-1, img_feat.shape[0]) + img_feat = img_feat.type(torch.float) + + return (torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + img_feat, + torch.tensor([example.q_id], dtype=torch.long)) + + def __getitem__(self, index): + if self.args.load_fast: + example = self.features[index] + else: + entry = self.examples[index] + example = self.tensorize_example(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + return example + + def __len__(self): + return len(self.examples) + + +def trim_batch(batch): + """ new batch func + :param batch: + :return: + """ + print('batch size', len(batch)) + + batch_size = len(batch) + batch_tensors = [] + for ele in batch[0]: + print(ele.shape, ele.size()) + zero_tensor = torch.zeros(([batch_size] + list(ele.size()))) + batch_tensors.append(zero_tensor) + + for b_id, b in enumerate(batch): + print(b_id, len(b)) + for ele_id, ele in enumerate(b): + print(ele_id, ele.shape) + batch_tensors[ele_id][b_id] = ele + return batch_tensors + + +def train(args, train_dataset, eval_dataset, model, tokenizer): + """ Train the model """ + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, num_workers=args.workers, sampler=train_sampler, batch_size=args.train_batch_size) #, collate_fn=trim_batch) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + #train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args.seed, args.n_gpu) # Added here for reproductibility (even between python 2 and 3) + + best_score = 0 + best_model = { + 'epoch': 0, + 'model_state': model.state_dict(), + 'optimizer_state': optimizer.state_dict() + } + + for epoch in range(int(args.num_train_epochs)): + #for epoch in train_iterator: + #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + total_loss = 0 + total_norm = 0 + count_norm = 0 + + t_start = time.time() + for step, batch in enumerate(train_dataloader): + #for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + + #loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) + loss, logits = outputs[:2] + + if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + total_norm += torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + count_norm += 1 + + #batch_score = compute_score_with_logits(logits, batch[4]).sum() + #train_score += batch_score.item() + + tr_loss += loss.item() + total_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + scheduler.step() # Update learning rate schedule + optimizer.step() + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:# Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + + logger.info("EVALERR: {}%".format(100 * best_score)) + logging_loss = tr_loss + + #if args.max_steps > 0 and global_step > args.max_steps: + # epoch_iterator.close() + # break + + t_end = time.time() + logger.info('Train Time Cost: %.3f' % (t_end-t_start)) + + # evaluation + logger.info("Epoch: %d" % (epoch)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + #best_model['optimizer'] = copy.deepcopy(optimizer.state_dict()) + + # save checkpoints + if args.local_rank in [-1, 0] and args.save_epoch > 0 and epoch % args.save_epoch == 0: # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch)) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving model checkpoint {0} to {1}".format(epoch, output_dir)) + + epoch_log = {'epoch': epoch, 'eval_score': eval_score, 'best_score':best_score} + log_json.append(epoch_log) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + logger.info("PROGRESS: {}%".format(round(100*(epoch + 1) / args.num_train_epochs, 4))) + logger.info("EVALERR: {}%".format(100*best_score)) + logger.info("LOSS: {}%".format(total_loss / len(train_dataset))) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + if args.local_rank in [-1, 0]: # Save the final model checkpoint + output_dir = os.path.join(args.output_dir, 'best-{}'.format(best_model['epoch'])) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving the best model checkpoint epoch {} to {}".format(best_model['epoch'], output_dir)) + + return global_step, tr_loss / global_step + +def evaluate(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + #if args.n_gpu > 1: model = torch.nn.DataParallel(model) # debug: single-gpu or multi-gpus + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, num_workers=args.workers, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + preds = None + out_label_ids = None + num_data = 0 + correct_num = 0 + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + + eval_loss += tmp_eval_loss.mean().item() + + #logger.info('logits: %s, batch[3]: %s' % (str(logits.shape), str(batch[3].shape))) + #logger.info('correct: %s' % (str(logits.argmax(1) == batch[3].view(-1)))) + + correct = logits.argmax(1) == batch[3].view(-1) + correct_num += correct.sum().item() + num_data += logits.size(0) + + # debug + #val, idx = logits.max(1) + #logger.info('idx: %s, batch[4]: %s' % (str(idx.shape), str(batch[3].shape))) + #for i in range(idx.size(0)): + # logger.info('idx: %d, pred: %d, real: %d' % (idx[i].item(), eval_dataset.labels[idx[i].item()], batch[3][i].item())) + + nb_eval_steps += 1 + + acc = float(correct_num) / len(eval_dataloader.dataset) + + logger.info("Eval Results:") + logger.info("Eval Accuracy: %.3f" % (100*acc)) + logger.info("Eval Loss: %.3f" % (eval_loss)) + + t_end = time.time() + logger.info('Eva Time Cost: %.3f' % (t_end - t_start)) + + return results, acc + +def test(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + label2ans = cPickle.load(open(args.label2ans_file, 'rb')) + logger.info('label2ans: %d' % (len(label2ans))) + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': None, + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + logits = outputs[0] + + val, idx = logits.max(1) + #logger.info('idx: %s, batch[6]: %s' % (str(idx.shape), str(batch[6].shape))) + + for i in range(idx.size(0)): + result = {} + result['questionId'] = str(batch[6][i].item()) + result['prediction'] = label2ans[eval_dataset.labels[idx[i].item()]] + results.append(result) + + #logger.info('q_id: {0}, answer: {1}'.format(result['question_id'], result['answer'])) + + with open(args.output_dir + ('/{}_results.json'.format(eval_dataset.name)), 'w') as fp: + json.dump(results, fp) + + t_end = time.time() + logger.info('# questions: %d' % (len(results))) + logger.info('Test Time Cost: %.3f' % (t_end - t_start)) + + +def load_and_cache_examples(args, task, tokenizer, evaluate=False): + processor = processors[task]() + output_mode = output_modes[task] + + label_list = processor.get_labels(args.label_file) + + t_start = time.time() + examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) + + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_feats.pt' if evaluate else 'train_img_feats.pt')) + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.pt' if evaluate else 'train_img_frcnn_feats.pt')) + img_features = np.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.npy' if evaluate else 'train_img_frcnn_feats.npy')) + + features = convert_examples_to_features_vqa(examples, img_features, label_list, args.max_img_seq_length, args.max_seq_length, + tokenizer, output_mode, + cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, + pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0) + + #if args.local_rank in [-1, 0]: + # logger.info("Saving features into cached file %s", cached_features_file) + # torch.save(features, cached_features_file) + t_end = time.time() + logger.info('Info: loading features using %.5f secs' % (t_end-t_start)) + + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) # batch*max_seq_len + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + if output_mode == "classification": + labels = torch.tensor([f.label_id[0] for f in features], dtype=torch.long) + targets = torch.tensor([target_tensor(len(label_list), f.label_id, f.score) for f in features], dtype=torch.float) + + if args.img_feature_dim > 0: # change here + t_start = time.time() + img_feat_np = np.zeros((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + for f_id, f in enumerate(features): + img_feat_np[f_id] = f.img_feat + + img_feats = torch.from_numpy(img_feat_np) + + #img_feats = torch.empty((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + #for f_id, f in enumerate(features): + # img_feats[f_id] = f.img_feat + + t_end = time.time() + logger.info('Info: convert image tensor features using %.5f secs' % (t_end - t_start)) + + #img_feats = torch.stack([f.img_feat[:,-args.img_feature_dim:] for f in features]) + #img_feats = torch.stack([f.img_feat for f in features]) + #img_feats = img_feats.type(torch.long) + + #print('targets:', targets.shape) + print('img_feats:', img_feats.shape) + elif output_mode == "regression": + all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) + + if args.img_feature_dim == -1: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets) + else: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets, img_feats) + return dataset + +def target_tensor(len, labels, scores): + """ create the target by labels and scores """ + target = [0]*len + for id, l in enumerate(labels): + target[l] = scores[id] + + return target + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name") + parser.add_argument("--task_name", default=None, type=str, required=True, + help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--label_file", type=str, default=None, help="Label Dictionary") + parser.add_argument("--label2ans_file", type=str, default=None, help="Label to Answer Dictionary") + + parser.add_argument("--data_label_type", default='bal', type=str, help="bal or all") + parser.add_argument("--train_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--eval_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--loss_type", default='kl', type=str, help="kl or xe") + + parser.add_argument("--spatial_dim", default=6, type=int, help="spatial_dim") + + parser.add_argument("--max_label_pos_length", default=45, type=int, help="The maximum total input label position sequence length.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_train_val", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', help="Whether to run test on the test set.") + parser.add_argument("--do_test_dev", action='store_true', help="Whether to run test on the test-dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out for BERT.") + parser.add_argument("--classifier", default='linear', type=str, help="linear or mlp") + parser.add_argument("--cls_hidden_scale", default=2, type=int, help="cls_hidden_scale: for classifier") + + parser.add_argument("--max_img_seq_length", default=30, type=int, help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str, help="faster_r-cnn or mask_r-cnn") + parser.add_argument("--code_voc", default=512, type=int, help="dis_code_voc: 256, 512") + parser.add_argument("--code_level", default='top', type=str, help="code level: top, botttom, both") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") + parser.add_argument('--save_epoch', type=int, default=5, help="Save checkpoint every X epochs.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument("--philly", action='store_true', help="Use Philly: reset the output dir") + parser.add_argument("--load_fast", action='store_true', help="Load Tensor Fast") + parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 1 --img_feature_dim 565 --img_feature_type dis_code ' + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 10 --img_feature_dim 565 --img_feature_type other ' + + #args = parser.parse_args(args.split()) + + args = parser.parse_args() + + if args.philly: # use philly + logger.info('Info: Use Philly, all the output folders are reset.') + args.output_dir = os.path.join(os.getenv('PT_OUTPUT_DIR'), args.output_dir) + logger.info('OUTPUT_DIR:', args.output_dir) + + #if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + # raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: logger.info("Output Directory Exists.") + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed, args.n_gpu) + + # Prepare GLUE task + args.task_name = args.task_name.lower() + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + + processor = processors[args.task_name]() + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels(args.label_file) + num_labels = len(label_list) + logger.info('Task Name: {}, #Labels: {}'.format(args.task_name, num_labels)) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained( + args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, finetuning_task=args.task_name, + ) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + + # discrete code + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.code_voc = args.code_voc + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + config.classifier = args.classifier + config.cls_hidden_scale = args.cls_hidden_scale + config.spatial_dim = args.spatial_dim + + # load discrete code + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Load discrete code from: {}'.format(args.data_dir)) + t_start = time.time() + train_code = torch.load(os.path.join(args.data_dir, 'vqvae', 'train.pt')) + t_end = time.time() + logger.info('Load time: %.3f' % (t_end - t_start)) + + if args.code_level == 'top': + config.code_dim = train_code['embeddings_t'].shape[0] + config.code_size = train_code['feats_top'][list(train_code['feats_top'].keys())[0]].shape[0] + elif args.code_level == 'bottom': + config.code_dim = train_code['embeddings_b'].shape[0] + config.code_size = train_code['feats_bottom'][list(train_code['feats_bottom'].keys())[0]].shape[0] + elif args.code_level == 'both': + config.code_dim = train_code['embeddings_t'].shape[0] + train_code['embeddings_b'].shape[0] + + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Initializing the code embedding with {}'.format(args.code_level)) + if args.code_level == 'top': + model.init_code_embedding(train_code['embeddings_t'].t()) + elif args.code_level == 'bottom': + model.init_code_embedding(train_code['embeddings_b'].t()) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + # load image features + img_features = _load_img_features(args) + label_pos_feats = None + + #if args.do_eval: + eval_dataset = GQADataset(args, 'val', img_features, tokenizer, label_pos_feats) + #eval_dataset = GQADataset(args, 'test-dev', img_features, tokenizer) # test-dev as val + + if args.do_test: + test_dataset = GQADataset(args, 'test', img_features, tokenizer, label_pos_feats) + + if args.do_test_dev: + test_dev_dataset = GQADataset(args, 'test-dev', img_features, tokenizer, label_pos_feats) + + # Training + if args.do_train: + train_dataset = GQADataset(args, 'train', img_features, tokenizer, label_pos_feats) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Training on train+val + if args.do_train_val: # depreciated + train_dataset = GQADataset(args, 'train+val', img_features, tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. They can then be reloaded using `from_pretrained()` + #model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + #model_to_save.save_pretrained(args.output_dir) + + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + #model = model_class.from_pretrained(args.output_dir) + #tokenizer = tokenizer_class.from_pretrained(args.output_dir) + #model.to(args.device) + + + # Evaluation + #results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score = evaluate(args, model, eval_dataset, prefix=global_step) + #result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + #results.update(result) + + # Test-Dev + if args.do_test_dev and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score = evaluate(args, model, test_dev_dataset, prefix=global_step) + #test(args, model, test_dev_dataset, prefix=global_step) + + # Test-Submission + if args.do_test and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + test(args, model, test_dataset, prefix=global_step) + + +if __name__ == "__main__": + main() diff --git a/oscar/run_nlvr.py b/oscar/run_nlvr.py new file mode 100644 index 0000000..9f427b3 --- /dev/null +++ b/oscar/run_nlvr.py @@ -0,0 +1,925 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning the library models for sequence classification on VQA (Bert, XLM, XLNet). +Debug: +CUDA_VISIBLE_DEVICES=0,1,2,3 python ./examples/run_nlvr_baselines.py -j 4 --img_feature_dim 2054 --max_img_seq_length 45 --data_label_type mask --img_feature_type faster_r-cnn --data_dir ../data/nlvr/nlvr2/feats_deb --model_type bert --model_name_or_path bert-base-uncased --task_name nlvr --do_train --do_lower_case --max_seq_length 45 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 8 --learning_rate 5e-5 --num_train_epochs 2 --output_dir results/nlvr_test --save_epoch 1 --seed 88 --evaluate_during_training --logging_steps 10 --drop_out 0.3 --weight_decay 0.05 --warmup_steps 4000 --loss_type xe --save_steps -1 --use_pair --eval_data_type all --train_data_type all --classifier mlp + +# load pre-trained model +CUDA_VISIBLE_DEVICES=0 python ./examples/run_vqa_baselines.py --data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path ../vqa/ban-vqa/data/qal_pairs/vqa_txt_model/ep_36 --task_name vqa_text --do_train --do_eval --do_low er_case --max_seq_length 96 --per_gpu_eval_batch_size 48 --per_gpu_train_batch_size 48 --learning_rate 2e-5 - -num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2la bel.pkl --save_steps 5000 --overwrite_output_dir --max_img_seq_length -1 --img_feature_dim -1 + +# test +CUDA_VISIBLE_DEVICES=0,1,2,3 python ./examples/run_vqa_baselines.py --img_feature_dim 2054 --max_img_seq_length 45 --data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path ./models/results/best-17 --task_name vqa_text --do_eval --do_lower_case --max_seq_length 128 --per_gpu_eval_batch_size 64 --per_gpu_train_batch_size 64 --learning_rate 5e-5 --num_train_epochs 40 --output_dir ./models/results --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl --save_steps 50000 --img_feature_type faster_r-cnn --data_label_type mask --eval_all_checkpoints --do_test --label2ans_file ../vqa/ban-vqa/data/cache/trainval_label2ans.pkl + +""" + + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import random, copy, time, json + +import sys +sys.path.insert(0, '.') + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset) +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm, trange +import _pickle as cPickle + +from transformers.pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer) +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule + +from oscar.modeling.modeling_bert import ImageBertForMultipleChoice, ImageBertForSequenceClassification + +from torch.optim import Adamax +from oscar.utils.task_utils import (_truncate_seq_pair, output_modes, processors) + +logger = logging.getLogger(__name__) + +ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig,)), ()) + +MODEL_CLASSES = { + 'bert': (BertConfig, ImageBertForSequenceClassification, BertTokenizer), +} + + +log_json = [] + +def _load_dataset(args, name): + processor = processors[args.task_name]() + labels = processor.get_labels() + + if name == 'train': + examples = processor.get_train_examples(args.data_dir, args.use_label_seq, 'nlvr2_train.json') + elif name == 'val': + if args.eval_data_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_balanced_dev.json') + elif args.eval_data_type == 'unbal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_unbalanced_dev.json') + else: + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_dev.json') + elif name == 'test1': # test-submission + if args.data_label_type == 'bal': + examples = processor.get_test_examples(args.data_dir, args.use_label_seq, 'nlvr2_balanced_test1.json') + elif args.eval_data_type == 'unbal': + examples = processor.get_test_examples(args.data_dir, args.use_label_seq, 'nlvr2_unbalanced_test1.json') + else: + examples = processor.get_test_examples(args.data_dir, args.use_label_seq, 'nlvr2_test1.json') + elif name == 'val+test1': + if args.eval_data_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_balanced_dev.json') + elif args.eval_data_type == 'unbal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_unbalanced_dev.json') + else: + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_dev_test1.json') + + return examples, labels + +def _load_img_features(args): + t_start = time.time() + if args.img_feature_type == 'faster_r-cnn': + if args.img_feature_dim == 2048: # object features + feat_file_name = 'nlvr2_img_frcnn_feats.pt' + else: # object + spatial features + feat_file_name = 'nlvr2_img_frcnn_feats.pt' + else: + feat_file_name = 'nlvr2_img_frcnn_feats.pt' + img_features = torch.load(os.path.join(args.data_dir, feat_file_name)) + t_end = time.time() + logger.info('Info: loading {0:s} features using {1:.2f} secs'.format(feat_file_name, (t_end - t_start))) + + return img_features + + +class NLVRDataset(Dataset): + """ NLVR2 Dataset """ + + def __init__(self, args, name, img_features, tokenizer): + super(NLVRDataset, self).__init__() + assert name in ['train', 'val', 'test1', 'val+test1'] + + self.img_features = img_features + self.output_mode = output_modes[args.task_name] + self.tokenizer = tokenizer + self.args = args + self.name = name + + self.examples, self.labels = _load_dataset(args, name) + self.label_map = {label: i for i, label in enumerate(self.labels)} + + logger.info('%s Data Examples: %d' % (name, len(self.examples))) + + def tensorize_example(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + text_b = example.text_b['left'] + ' ' + example.text_b['right'] + tokens_b = self.tokenizer.tokenize(text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + if self.args.img_feature_type.startswith('dis_code'): + img_feat = self.img_features[example.img_key] + + if self.args.img_feature_type == 'dis_code_ln': # for discrete code image representation + img_feat = img_feat.reshape(-1, img_feat.shape[0]) + + if self.args.img_feature_type == 'dis_code_t': # transposed + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * 64 + else: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + else: + img_key_left = example.img_key['left'] + img_key_right = example.img_key['right'] + img_feat_left = self.img_features[img_key_left] + img_feat_right = self.img_features[img_key_right] + img_feat = torch.cat((img_feat_left, img_feat_right), 0) + if img_feat.shape[0] > 2*self.args.max_img_seq_length: + img_feat = img_feat[0: 2*self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((2*self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + if (example.label is None): + label_id = [0] + score = [0] + else: + label_id = [example.label] #[self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if self.args.img_feature_type in ['dis_code', 'dis_code_t']: + img_feat = img_feat.type(torch.long) + elif self.args.img_feature_type in ['dis_code_ln']: + #img_feat = img_feat.reshape(-1, img_feat.shape[0]) + img_feat = img_feat.type(torch.float) + + return (torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + img_feat, + torch.tensor([example.q_id], dtype=torch.long)) + + def tensorize_example_pair(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + choices = [] + for choice_key in example.img_key: + tokens_b = None + + if example.text_b: + #tokens_b = self.tokenizer.tokenize(example.text_b[choice_key]) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + #_truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + + tokens_b = self.tokenizer.tokenize(example.text_b[choice_key]) + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # img + img_key = example.img_key[choice_key] + img_feat = self.img_features[img_key] + + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0: self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + choices.append((tokens, input_ids, input_mask, segment_ids, img_feat)) + + if self.args.output_mode == "classification": + if example.label is None: + label_id = [0] + else: + label_id = [example.label] #[self.label_map[l] for l in example.label] + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + choice_input_ids = [choice[1] for choice in choices] + choice_input_mask = [choice[2] for choice in choices] + choice_input_segs = [choice[3] for choice in choices] + choice_input_imgs = [choice[4] for choice in choices] + + choice_img_feats = torch.stack(choice_input_imgs) + + return (torch.tensor(choice_input_ids, dtype=torch.long), + torch.tensor(choice_input_mask, dtype=torch.long), + torch.tensor(choice_input_segs, dtype=torch.long), + torch.tensor(label_id[0], dtype=torch.long), + choice_img_feats, + torch.tensor([example.q_id], dtype=torch.long)) + + def __getitem__(self, index): + entry = self.examples[index] + if self.args.use_pair: + example = self.tensorize_example_pair(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + else: + example = self.tensorize_example(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + return example + + def __len__(self): + return len(self.examples) + + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + +def train(args, train_dataset, eval_dataset, model, tokenizer): + """ Train the model """ + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, num_workers=args.workers, sampler=train_sampler, batch_size=args.train_batch_size) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + if args.optim == 'AdamW': + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + elif args.optim == 'Adamax': + optimizer = Adamax(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + #train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args) # Added here for reproductibility (even between python 2 and 3) + + best_score = 0 + best_model = { + 'epoch': 0, + 'model_state': model.state_dict(), + 'optimizer_state': optimizer.state_dict() + } + + for epoch in range(int(args.num_train_epochs)): + #for epoch in train_iterator: + #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + total_loss = 0 + total_norm = 0 + count_norm = 0 + + t_start = time.time() + for step, batch in enumerate(train_dataloader): + #for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[4]} + outputs = model(**inputs) + + #loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) + loss, logits = outputs[:2] + + if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + total_norm += torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + count_norm += 1 + + #batch_score = compute_score_with_logits(logits, batch[4]).sum() + #train_score += batch_score.item() + + tr_loss += loss.item() + total_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + scheduler.step() # Update learning rate schedule + optimizer.step() + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:# Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + + logger.info("EVALERR: {}%".format(100 * best_score)) + logging_loss = tr_loss + + #if args.max_steps > 0 and global_step > args.max_steps: + # epoch_iterator.close() + # break + + t_end = time.time() + logger.info('Train Time Cost: %.3f' % (t_end-t_start)) + + # evaluation + logger.info("Epoch: %d" % (epoch)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + #best_model['optimizer'] = copy.deepcopy(optimizer.state_dict()) + + # save checkpoints + if args.local_rank in [-1, 0] and args.save_epoch > 0 and epoch % args.save_epoch == 0: # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch)) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving model checkpoint {0} to {1}".format(epoch, output_dir)) + + epoch_log = {'epoch': epoch, 'eval_score': eval_score, 'best_score':best_score} + log_json.append(epoch_log) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + logger.info("PROGRESS: {}%".format(round(100*(epoch + 1) / args.num_train_epochs, 4))) + logger.info("EVALERR: {}%".format(100*best_score)) + logger.info("LOSS: {}%".format(total_loss / len(train_dataset))) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + if args.local_rank in [-1, 0]: # Save the final model checkpoint + output_dir = os.path.join(args.output_dir, 'best-{}'.format(best_model['epoch'])) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving the best model checkpoint epoch {} to {}".format(best_model['epoch'], output_dir)) + + return global_step, tr_loss / global_step + +def evaluate(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + #if args.n_gpu > 1: model = torch.nn.DataParallel(model) # debug: single-gpu or multi-gpus + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, num_workers=args.workers, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + num_data = 0 + correct_num = 0 + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[4]} + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + eval_loss += tmp_eval_loss.mean().item() + num_data += logits.size(0) + + #logger.info('logits: {}, batch[3]: {}'.format(logits.shape, batch[3].shape)) + + val, idx = logits.max(1) + batch_acc = torch.sum(idx == batch[3].view(-1)).item() + #logger.info('idx: {}, batch[3].view(-1):{}, batch_acc: {}'.format(idx.shape, batch[3].view(-1).shape, batch_acc)) + correct_num += batch_acc + + # correct = logits.argmax(1) == batch[3].view(-1) + # correct_num += correct.sum().item() + + nb_eval_steps += 1 + + acc = float(correct_num) / len(eval_dataloader.dataset) + + logger.info("Eval Results:") + logger.info("Eval Accuracy: {}".format(100*acc)) + logger.info("EVALERR: {}%".format(100 * acc)) + logger.info("Eval Loss: %.3f" % (eval_loss)) + + t_end = time.time() + logger.info('Eva Time Cost: %.3f' % (t_end - t_start)) + + return results, acc + +def test(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + label2ans = cPickle.load(open(args.label2ans_file, 'rb')) + logger.info('label2ans: %d' % (len(label2ans))) + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': None, + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + logits = outputs[0] + + val, idx = logits.max(1) + #logger.info('idx: %s, batch[6]: %s' % (str(idx.shape), str(batch[6].shape))) + + for i in range(idx.size(0)): + result = {} + result['questionId'] = str(batch[6][i].item()) + result['prediction'] = label2ans[eval_dataset.labels[idx[i].item()]] + results.append(result) + + #logger.info('q_id: {0}, answer: {1}'.format(result['question_id'], result['answer'])) + + with open(args.output_dir + ('/{}_results.json'.format(eval_dataset.name)), 'w') as fp: + json.dump(results, fp) + + t_end = time.time() + logger.info('# questions: %d' % (len(results))) + logger.info('Test Time Cost: %.3f' % (t_end - t_start)) + + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) + parser.add_argument("--task_name", default=None, type=str, required=True, + help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + parser.add_argument("--data_label_type", default='bal', type=str, help="bal or all") + parser.add_argument("--train_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--eval_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--loss_type", default='kl', type=str, help="kl or xe") + parser.add_argument("--use_layernorm", action='store_true', help="use_layernorm") + parser.add_argument("--use_label_seq", action='store_true', help="use_label_seq") + parser.add_argument("--use_pair", action='store_true', help="use_pair") + parser.add_argument("--num_choice", default=2, type=int, help="num_choice") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', help="Whether to run test on the test set.") + parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out for BERT.") + parser.add_argument("--classifier", default='linear', type=str, help="linear or mlp") + parser.add_argument("--cls_hidden_scale", default=2, type=int, help="cls_hidden_scale: for classifier") + + parser.add_argument("--max_img_seq_length", default=30, type=int, help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str, help="faster_r-cnn or mask_r-cnn") + parser.add_argument("--code_voc", default=512, type=int, help="dis_code_voc: 256, 512") + parser.add_argument("--code_level", default='top', type=str, help="code level: top, botttom, both") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") + parser.add_argument("--optim", default='AdamW', type=str, help="optim: AdamW, Adamax") + + parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") + parser.add_argument('--save_epoch', type=int, default=5, help="Save checkpoint every X epochs.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument("--philly", action='store_true', help="Use Philly: reset the output dir") + parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') + + args = parser.parse_args() + + if args.philly: # use philly + logger.info('Info: Use Philly, all the output folders are reset.') + args.output_dir = os.path.join(os.getenv('PT_OUTPUT_DIR'), args.output_dir) + logger.info('OUTPUT_DIR:', args.output_dir) + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: logger.info("Output Directory Exists.") + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args) + + # Prepare GLUE task + args.task_name = args.task_name.lower() + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + + processor = processors[args.task_name]() + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels() + num_labels = len(label_list) + logger.info('Task Name: {}, #Labels: {}'.format(args.task_name, num_labels)) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + if args.use_pair: + model_class = ImageBertForMultipleChoice + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + + # new config: discrete code + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.code_voc = args.code_voc + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + config.use_layernorm = args.use_layernorm + config.classifier = args.classifier + config.cls_hidden_scale = args.cls_hidden_scale + config.num_choice = args.num_choice + + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + total_params = sum(p.numel() for p in model.parameters()) + logger.info('Model Parameters: {}'.format(total_params)) + + model.to(args.device) + + logger.info("Training/Evaluation parameters %s", args) + + # load image features + img_features = _load_img_features(args) + + #if args.do_eval: + eval_dataset = NLVRDataset(args, 'val', img_features, tokenizer) + + if args.do_test: + test_dataset = NLVRDataset(args, 'test1', img_features, tokenizer) + + # Training + if args.do_train: + train_dataset = NLVRDataset(args, 'train', img_features, tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. They can then be reloaded using `from_pretrained()` + #model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + #model_to_save.save_pretrained(args.output_dir) + + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + #model = model_class.from_pretrained(args.output_dir) + #tokenizer = tokenizer_class.from_pretrained(args.output_dir) + #model.to(args.device) + + + # Evaluation + #results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score = evaluate(args, model, eval_dataset, prefix=global_step) + #result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + #results.update(result) + + # Test-Submission + if args.do_test and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + #test(args, model, test_dataset, prefix=global_step) + result, score = evaluate(args, model, test_dataset, prefix=global_step) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/oscar/run_retrieval.py b/oscar/run_retrieval.py new file mode 100644 index 0000000..d2bbf9b --- /dev/null +++ b/oscar/run_retrieval.py @@ -0,0 +1,623 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function +import argparse +import os +import os.path as op +import random, json +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler +from tqdm import tqdm + +from oscar.utils.logger import setup_logger +from oscar.utils.misc import mkdir, set_seed +from oscar.modeling.modeling_bert import ImageBertForSequenceClassification +from transformers.pytorch_transformers import BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + + +class RetrievalDataset(Dataset): + """ Image/Text Retrieval Dataset""" + def __init__(self, tokenizer, args, split='train', is_train=True): + """ + tokenizer: tokenizer to process caption text. + args: configureation parameters including max_seq_length, etc. + split: used to infer the data used for training or testing. + All files are in .pt format of a dictionary with image keys and + image features (pytorch tensors), captions (list of str, support multiple + captions per image), labels (list of dictionary or str of all labels), + + """ + super(RetrievalDataset, self).__init__() + feature_file = op.join(args.data_dir, '{}_img_{}_feats.pt'.format(split, args.img_feature_type)) + caption_file = op.join(args.data_dir, '{}_captions.pt'.format(split)) + self.features = torch.load(feature_file) + self.captions = torch.load(caption_file) + self.img_keys = list(self.features.keys()) + if not type(self.captions[self.img_keys[0]]) == list: + self.captions = {k: json.loads(self.captions[k]) for k in self.img_keys} + assert len(self.features) == len(self.captions), \ + "the length of image features and captions does not match!" + + if args.add_od_labels: + label_file = op.join(args.data_dir, '{}_{}_labels.pt'.format(split, args.od_label_type)) + self.labels = torch.load(label_file) + + if is_train: + self.num_captions_per_img = args.num_captions_per_img_train + else: + self.num_captions_per_img = args.num_captions_per_img_val + if args.eval_img_keys_file: + # select a subset of image keys for evaluation. eg. COCO 1k and 5k + # eval_img_keys_file is a list of image keys saved in tsv file + with open(op.join(args.data_dir, args.eval_img_keys_file), 'r') as f: + img_keys = f.readlines() + self.img_keys = [int(k.strip()) for k in img_keys] + self.features = {k: self.features[k] for k in self.img_keys} + self.captions = {k: self.captions[k] for k in self.img_keys} + if args.add_od_labels: + self.labels = {k: self.labels[k] for k in self.img_keys} + + if args.eval_caption_index_file: + # hard negative image/caption indexs for retrieval re-rank setting. + # useful for mini val set to monitor the performance during training. + # However, it cannot be used together with cross image evaluation. + self.has_caption_indexs = True + assert not args.cross_image_eval + caption_index_file = op.join(args.data_dir, args.eval_caption_index_file) + self.caption_indexs = torch.load(caption_index_file) + if not type(self.caption_indexs[self.img_keys[0]]) == list: + self.caption_indexs = {k: json.loads(self.caption_indexs[k]) for k in self.img_keys} + else: + self.has_caption_indexs = False + self.is_train = is_train + self.output_mode = args.output_mode + self.tokenizer = tokenizer + self.max_seq_len = args.max_seq_length + self.max_img_seq_len = args.max_img_seq_length + self.args = args + + def get_image_caption_index(self, index): + # return img_idx to access features and [img_key, cap_idx] to access caption + if not self.is_train and self.args.cross_image_eval: + img_idx = index // (self.num_captions_per_img * len(self.img_keys)) + cap_idx = index % (self.num_captions_per_img * len(self.img_keys)) + img_idx1 = cap_idx // self.num_captions_per_img + cap_idx1 = cap_idx % self.num_captions_per_img + return img_idx, [self.img_keys[img_idx1], cap_idx1] + if not self.is_train and self.has_caption_indexs: + img_idx = index // self.num_captions_per_img + cap_idx = index % self.num_captions_per_img + img_key1, cap_idx1 = self.caption_indexs[self.img_keys[img_idx]][cap_idx] + return img_idx, [img_key1, cap_idx1] + img_idx = index // self.num_captions_per_img + cap_idx = index % self.num_captions_per_img + return img_idx, [self.img_keys[img_idx], cap_idx] + + def get_label(self, index): + img_idx, cap_idx = self.get_image_caption_index(index) + return 1 if self.img_keys[img_idx] == cap_idx[0] else 0 + + def get_od_labels(self, img_key): + if self.args.add_od_labels: + if type(self.labels[img_key]) == str: + od_labels = self.labels[img_key] + else: + od_labels = ' '.join([l['class'] for l in self.labels[img_key]]) + return od_labels + + def tensorize_example(self, text_a, img_feat, text_b=None, + cls_token_segment_id=0, pad_token_segment_id=0, + sequence_a_segment_id=0, sequence_b_segment_id=1): + tokens_a = self.tokenizer.tokenize(text_a) + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = [self.tokenizer.cls_token] + tokens_a + [self.tokenizer.sep_token] + segment_ids = [cls_token_segment_id] + [sequence_a_segment_id] * (len(tokens_a) + 1) + seq_a_len = len(tokens) + if text_b: + tokens_b = self.tokenizer.tokenize(text_b) + if len(tokens_b) > self.max_seq_len - len(tokens) - 1: + tokens_b = tokens_b[: (self.max_seq_len - len(tokens) - 1)] + tokens += tokens_b + [self.tokenizer.sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + seq_len = len(tokens) + seq_padding_len = self.max_seq_len - seq_len + tokens += [self.tokenizer.pad_token] * seq_padding_len + segment_ids += [pad_token_segment_id] * seq_padding_len + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # image features + img_len = img_feat.shape[0] + if img_len > self.max_img_seq_len: + img_feat = img_feat[0 : self.max_img_seq_len, :] + img_len = img_feat.shape[0] + img_padding_len = 0 + else: + img_padding_len = self.max_img_seq_len - img_len + padding_matrix = torch.zeros((img_padding_len, img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + + # generate attention_mask + att_mask_type = self.args.att_mask_type + if att_mask_type == "CLR": + attention_mask = [1] * seq_len + [0] * seq_padding_len + \ + [1] * img_len + [0] * img_padding_len + else: + # use 2D mask to represent the attention + max_len = self.max_seq_len + self.max_img_seq_len + attention_mask = torch.zeros((max_len, max_len), dtype=torch.long) + # full attention of C-C, L-L, R-R + c_start, c_end = 0, seq_a_len + l_start, l_end = seq_a_len, seq_len + r_start, r_end = self.max_seq_len, self.max_seq_len + img_len + attention_mask[c_start : c_end, c_start : c_end] = 1 + attention_mask[l_start : l_end, l_start : l_end] = 1 + attention_mask[r_start : r_end, r_start : r_end] = 1 + if att_mask_type == 'CL': + attention_mask[c_start : c_end, l_start : l_end] = 1 + attention_mask[l_start : l_end, c_start : c_end] = 1 + elif att_mask_type == 'CR': + attention_mask[c_start : c_end, r_start : r_end] = 1 + attention_mask[r_start : r_end, c_start : c_end] = 1 + elif att_mask_type == 'LR': + attention_mask[l_start : l_end, r_start : r_end] = 1 + attention_mask[r_start : r_end, l_start : l_end] = 1 + else: + raise ValueError("Unsupported attention mask type {}".format(att_mask_type)) + + input_ids = torch.tensor(input_ids, dtype=torch.long) + attention_mask = torch.tensor(attention_mask, dtype=torch.long) + segment_ids = torch.tensor(segment_ids, dtype=torch.long) + return (input_ids, attention_mask, segment_ids, img_feat) + + def __getitem__(self, index): + if self.is_train: + img_idx, cap_idxs = self.get_image_caption_index(index) + img_key = self.img_keys[img_idx] + feature = self.features[img_key] + caption = self.captions[cap_idxs[0]][cap_idxs[1]] + od_labels = self.get_od_labels(img_key) + example = self.tensorize_example(caption, feature, text_b=od_labels) + + # select a negative pair + neg_img_indexs = list(range(0, img_idx)) + list(range(img_idx + 1, len(self.img_keys))) + img_idx_neg = random.choice(neg_img_indexs) + if random.random() <= 0.5: + # randomly select a negative caption from a different image. + cap_idx_neg = random.randint(0, self.num_captions_per_img - 1) + caption_neg = self.captions[self.img_keys[img_idx_neg]][cap_idx_neg] + example_neg = self.tensorize_example(caption_neg, feature, text_b=od_labels) + else: + # randomly select a negative image + feature_neg = self.features[self.img_keys[img_idx_neg]] + od_labels_neg = self.get_od_labels(self.img_keys[img_idx_neg]) + example_neg = self.tensorize_example(caption, feature_neg, text_b=od_labels_neg) + + example_pair = tuple(list(example) + [1] + list(example_neg) + [0]) + return index, example_pair + else: + img_idx, cap_idxs = self.get_image_caption_index(index) + img_key = self.img_keys[img_idx] + feature = self.features[img_key] + caption = self.captions[cap_idxs[0]][cap_idxs[1]] + od_labels = self.get_od_labels(img_key) + example = self.tensorize_example(caption, feature, text_b=od_labels) + label = 1 if img_key == cap_idxs[0] else 0 + return index, tuple(list(example) + [label]) + + def __len__(self): + if not self.is_train and self.args.cross_image_eval: + return len(self.img_keys) ** 2 * self.num_captions_per_img + return len(self.img_keys) * self.num_captions_per_img + + +def compute_score_with_logits(logits, labels): + if logits.shape[1] > 1: + logits = torch.max(logits, 1)[1].data # argmax + scores = logits == labels + else: + scores = torch.zeros_like(labels).cuda() + for i, (logit, label) in enumerate(zip(logits, labels)): + logit_ = torch.sigmoid(logit) + if (logit_ >= 0.5 and label == 1) or (logit_ < 0.5 and label == 0): + scores[i] = 1 + return scores + + +def compute_ranks(dataset, results): + labels = np.array([dataset.get_label(i) for i in range(len(dataset))]) + similarities = np.array([results[i] for i in range(len(dataset))]) + if dataset.has_caption_indexs: + num_captions_per_img = dataset.num_captions_per_img + else: + num_captions_per_img = len(dataset.img_keys) * dataset.num_captions_per_img + labels = np.reshape(labels, [-1, num_captions_per_img]) + similarities = np.reshape(similarities, [-1, num_captions_per_img]) + i2t_ranks, t2i_ranks = [], [] + for lab, sim in zip(labels, similarities): + inds = np.argsort(sim)[::-1] + rank = num_captions_per_img + for r, ind in enumerate(inds): + if lab[ind] == 1: + rank = r + break + i2t_ranks.append(rank) + if not dataset.has_caption_indexs: + labels = np.swapaxes(labels, 0, 1) + similarities = np.swapaxes(similarities, 0, 1) + for lab, sim in zip(labels, similarities): + inds = np.argsort(sim)[::-1] + rank = num_captions_per_img + for r, ind in enumerate(inds): + if lab[ind] == 1: + rank = r + break + t2i_ranks.append(rank) + return i2t_ranks, t2i_ranks + + +def save_checkpoint(model, tokenizer, args, epoch, global_step): + checkpoint_dir = op.join(args.output_dir, 'checkpoint-{}-{}'.format( + epoch, global_step)) + mkdir(checkpoint_dir) + model_to_save = model.module if hasattr(model, 'module') else model + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(checkpoint_dir) + torch.save(args, op.join(checkpoint_dir, 'training_args.bin')) + tokenizer.save_pretrained(checkpoint_dir) + logger.info("Save checkpoint to {}".format(checkpoint_dir)) + break + except: + save_num += 1 + if save_num == 10: + logger.info("Failed to save checkpoint after 10 trails.") + return + + +def train(args, train_dataset, val_dataset, model, tokenizer): + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size, num_workers=args.num_workers) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // \ + args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps \ + * args.num_train_epochs + + # Prepare optimizer and scheduler + no_decay = ['bias', 'LayerNorm.weight'] + grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not \ + any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if \ + any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + if args.scheduler == "constant": + scheduler = WarmupConstantSchedule( + optimizer, warmup_steps=args.warmup_steps) + elif args.scheduler == "linear": + scheduler = WarmupLinearSchedule( + optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + else: + raise ValueError("Unknown scheduler type: {}".format(args.scheduler)) + + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step, global_loss, global_acc =0, 0.0, 0.0 + model.zero_grad() + log_json = [] + best_score = 0 + for epoch in range(int(args.num_train_epochs)): + for step, (_, batch) in enumerate(train_dataloader): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = { + 'input_ids': torch.cat((batch[0], batch[5]), dim=0), + 'attention_mask': torch.cat((batch[1], batch[6]), dim=0), + 'token_type_ids': torch.cat((batch[2], batch[7]), dim=0), + 'img_feats': torch.cat((batch[3], batch[8]), dim=0), + 'labels': torch.cat((batch[4], batch[9]), dim=0) + } + outputs = model(**inputs) + loss, logits = outputs[:2] + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + batch_score = compute_score_with_logits(logits, inputs['labels']).sum() + batch_acc = batch_score.item() / (args.train_batch_size * 2) + global_loss += loss.item() + global_acc += batch_acc + if (step + 1) % args.gradient_accumulation_steps == 0: + global_step += 1 + scheduler.step() + optimizer.step() + model.zero_grad() + if global_step % args.logging_steps == 0: + logger.info("Epoch: {}, global_step: {}, lr: {:.6f}, loss: {:.4f} ({:.4f}), " \ + "score: {:.4f} ({:.4f})".format(epoch, global_step, + optimizer.param_groups[0]["lr"], loss, global_loss / global_step, + batch_acc, global_acc / global_step) + ) + + if (args.save_steps > 0 and global_step % args.save_steps == 0) or \ + global_step == t_total: + save_checkpoint(model, tokenizer, args, epoch, global_step) + # evaluation + if args.evaluate_during_training: + logger.info("Perform evaluation at step: %d" % (global_step)) + test_result = test(args, model, val_dataset) + eval_result = evaluate(val_dataset, test_result) + rank_accs = eval_result['i2t_retrieval'] + if rank_accs['R@1'] > best_score: + best_score = rank_accs['R@1'] + epoch_log = {'epoch': epoch, 'global_step': global_step, + 'R1': rank_accs['R@1'], 'R5': rank_accs['R@5'], + 'R10': rank_accs['R@10'], 'best_R1':best_score} + log_json.append(epoch_log) + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + return global_step, global_loss / global_step + + +def test(args, model, eval_dataset): + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + eval_sampler = SequentialSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, + batch_size=args.eval_batch_size, num_workers=args.num_workers) + + logger.info("Num examples = {}".format(len(eval_dataset))) + logger.info("Evaluation batch size = {}".format(args.eval_batch_size)) + model.eval() + results = {} + softmax = nn.Softmax(dim=1) + for indexs, batch in tqdm(eval_dataloader): + batch = tuple(t.to(args.device) for t in batch) + with torch.no_grad(): + inputs = { + 'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2], + 'img_feats': batch[3], + 'labels': batch[4] + } + _, logits = model(**inputs)[:2] + if args.num_labels == 2: + probs = softmax(logits) + result = probs[:, 1] # the confidence to be a matched pair + else: + result = logits + result = [_.to(torch.device("cpu")) for _ in result] + results.update({idx.item(): res.item() for idx, res in zip(indexs, result)}) + return results + + +def evaluate(eval_dataset, test_results): + i2t_ranks, t2i_ranks = compute_ranks(eval_dataset, test_results) + rank = [1, 5, 10] + i2t_accs = [sum([_ < r for _ in i2t_ranks]) / len(i2t_ranks) for r in rank] + logger.info("I2T Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10".format( + i2t_accs[0], i2t_accs[1], i2t_accs[2])) + eval_result = {"i2t_retrieval": {"R@1": i2t_accs[0], "R@5": i2t_accs[1], "R@10": i2t_accs[2]}} + if t2i_ranks: + t2i_accs = [sum([_ < r for _ in t2i_ranks]) / len(t2i_ranks) for r in rank] + logger.info("T2I Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10".format( + t2i_accs[0], t2i_accs[1], t2i_accs[2])) + eval_result["t2i_retrieval"] = {"R@1": t2i_accs[0], "R@5": t2i_accs[1], "R@10": t2i_accs[2]} + return eval_result + + +def get_predict_file(args): + cc = [] + data = op.basename(op.join(args.data_dir, '')[:-1]) + if data != 'coco_ir': + cc.append(data) + cc.append(args.test_split) + if args.add_od_labels: + cc.append('wlabels{}'.format(args.od_label_type)) + return op.join(args.eval_model_dir, '{}.results.pt'.format('.'.join(cc))) + + +def restore_training_settings(args): + assert not args.do_train and (args.do_test or args.do_eval) + train_args = torch.load(op.join(args.eval_model_dir, 'training_args.bin')) + override_params = ['do_lower_case', 'img_feature_type', 'max_seq_length', + 'max_img_seq_length', 'add_od_labels', 'od_label_type'] + for param in override_params: + if hasattr(train_args, param): + train_v = getattr(train_args, param) + test_v = getattr(args, param) + if train_v != test_v: + logger.warning('Override {} with train args: {} -> {}'.format(param, + test_v, train_v)) + setattr(args, param, train_v) + return args + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", default='datasets/coco_ir', type=str, required=False, + help="The input data dir with all required files.") + parser.add_argument("--model_name_or_path", default=None, type=str, required=False, + help="Path to pre-trained model or model type. required for training.") + parser.add_argument("--output_dir", default='output/', type=str, required=False, + help="The output directory to save checkpoint and test results.") + parser.add_argument("--loss_type", default='sfmx', type=str, + help="Loss function types: support kl, sfmx") + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name.") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name.") + parser.add_argument("--max_seq_length", default=70, type=int, + help="The maximum total input sequence length after tokenization. " + "Sequences longer than this will be truncated, " + "sequences shorter will be padded." + "This number is calculated on COCO dataset" + "If add object detection labels, the suggested length should be 70.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_test", action='store_true', help="Whether to run inference.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run performance valuation." + "do not activate if we want to inference on dataset without gt labels.") + parser.add_argument("--test_split", default='test', type=str, help='data split name.') + parser.add_argument("--eval_img_keys_file", default='', type=str, + help="image key tsv to select a subset of images for evaluation. " + "This is useful in 5-folds evaluation. The topn index file is not " + "needed in this case.") + parser.add_argument("--eval_caption_index_file", default='', type=str, + help="index of a list of (img_key, cap_idx) for each image." + "this is used to perform re-rank using hard negative samples." + "useful for validation set to monitor the performance during training.") + parser.add_argument("--cross_image_eval", action='store_true', + help="perform cross image inference, ie. each image with all texts from other images.") + parser.add_argument("--add_od_labels", default=False, action='store_true', + help="Whether to add object detection labels or not.") + parser.add_argument("--od_label_type", default='vg', type=str, + help="label type, support vg, gt, oid") + parser.add_argument("--att_mask_type", default='CLR', type=str, + help="attention mask type, support ['CL', 'CR', 'LR', 'CLR']" + "C: caption, L: labels, R: image regions; CLR is full attention by default." + "CL means attention between caption and labels." + "please pay attention to the order CLR, which is the default concat order.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out in BERT.") + parser.add_argument("--max_img_seq_length", default=50, type=int, + help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, + help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='frcnn', type=str, + help="Image feature type.") + parser.add_argument("--per_gpu_train_batch_size", default=32, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=64, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument("--output_mode", default='classification', type=str, + help="output mode, support classification or regression.") + parser.add_argument("--num_labels", default=2, type=int, + help="num_labels is 2 for classification and 1 for regression.") + parser.add_argument("--num_captions_per_img_train", default=5, type=int, + help="number of positive matched captions for each training image.") + parser.add_argument("--num_captions_per_img_val", default=5, type=int, + help="number of captions for each testing image.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before backward.") + parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial lr.") + parser.add_argument("--weight_decay", default=0.05, type=float, help="Weight deay.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup.") + parser.add_argument("--scheduler", default='linear', type=str, help="constant or linear.") + parser.add_argument("--num_workers", default=4, type=int, help="Workers in dataloader.") + parser.add_argument("--num_train_epochs", default=20, type=int, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="Total number of training steps. Override num_train_epochs.") + parser.add_argument('--logging_steps', type=int, default=20, help="Log every X steps.") + parser.add_argument('--save_steps', type=int, default=-1, + help="Save checkpoint every X steps. Will also perform evaluatin.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each save_steps.") + parser.add_argument("--eval_model_dir", type=str, default='', + help="Model directory for evaluation.") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA.") + parser.add_argument('--seed', type=int, default=88, help="random seed for initialization.") + args = parser.parse_args() + + global logger + mkdir(args.output_dir) + logger = setup_logger("vlpretrain", args.output_dir, 0) + + args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + set_seed(args.seed, args.n_gpu) + logger.warning("Device: %s, n_gpu: %s", args.device, args.n_gpu) + logger.info('output_mode: {}, #Labels: {}'.format(args.output_mode, args.num_labels)) + + config_class, tokenizer_class = BertConfig, BertTokenizer + model_class = ImageBertForSequenceClassification + if args.do_train: + config = config_class.from_pretrained(args.config_name if args.config_name else \ + args.model_name_or_path, num_labels=args.num_labels, finetuning_task='ir') + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name \ + else args.model_name_or_path, do_lower_case=args.do_lower_case) + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + else: + checkpoint = args.eval_model_dir + assert op.isdir(checkpoint) + config = config_class.from_pretrained(checkpoint) + tokenizer = tokenizer_class.from_pretrained(checkpoint) + logger.info("Evaluate the following checkpoint: %s", checkpoint) + model = model_class.from_pretrained(checkpoint, config=config) + + model.to(args.device) + logger.info("Training/evaluation parameters %s", args) + if args.do_train: + train_dataset = RetrievalDataset(tokenizer, args, 'train', is_train=True) + if args.evaluate_during_training: + val_dataset = RetrievalDataset(tokenizer, args, 'minival', is_train=False) + else: + val_dataset = None + global_step, avg_loss = train(args, train_dataset, val_dataset, model, tokenizer) + logger.info("Training done: total_step = %s, avg loss = %s", global_step, avg_loss) + + # inference and evaluation + if args.do_test or args.do_eval: + args = restore_training_settings(args) + test_dataset = RetrievalDataset(tokenizer, args, args.test_split, is_train=False) + checkpoint = args.eval_model_dir + assert op.isdir(checkpoint) + logger.info("Evaluate the following checkpoint: %s", checkpoint) + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + pred_file = get_predict_file(args) + if op.isfile(pred_file): + logger.info("Prediction file exist, skip inference.") + if args.do_eval: + test_result = torch.load(pred_file) + else: + test_result = test(args, model, test_dataset) + torch.save(test_result, pred_file) + logger.info("Prediction results saved to {}.".format(pred_file)) + + if args.do_eval: + eval_result = evaluate(test_dataset, test_result) + result_file = op.splitext(pred_file)[0] + '.eval.json' + with open(result_file, 'w') as f: + json.dump(eval_result, f) + logger.info("Evaluation results saved to {}.".format(result_file)) + + +if __name__ == "__main__": + main() diff --git a/oscar/run_vqa.py b/oscar/run_vqa.py new file mode 100644 index 0000000..cd19ce6 --- /dev/null +++ b/oscar/run_vqa.py @@ -0,0 +1,1222 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import copy, time, json +import base64 + +import sys +sys.path.insert(0, '.') + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset) +from torch.utils.data.distributed import DistributedSampler +import _pickle as cPickle + +from oscar.modeling.modeling_bert import ImageBertForSequenceClassification +from transformers.pytorch_transformers import WEIGHTS_NAME, BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + +from oscar.utils.misc import set_seed +from oscar.utils.task_utils import (_truncate_seq_pair, convert_examples_to_features_vqa, + output_modes, processors) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'bert': (BertConfig, ImageBertForSequenceClassification, BertTokenizer), +} + + +log_json = [] +debug_size = 500 + + +def _load_dataset(args, name): + processor = processors[args.task_name]() + labels = processor.get_labels(args.label_file) + + if name == 'train': + if args.data_label_type == 'mask': + if args.use_vg: + #examples = processor.get_train_examples(args.data_dir, 'train2014_vg_qla_mrcnn.json') + examples = processor.get_train_examples(args.txt_data_dir, 'train2014_vg_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.txt_data_dir, 'train2014_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.txt_data_dir, 'train2014_qla.json') + elif name == 'val': + if args.data_label_type == 'mask': + if args.use_vg_dev: + examples = processor.get_dev_examples(args.txt_data_dir, 'vg_qla_mrcnn.json') + else: + examples = processor.get_dev_examples(args.txt_data_dir, 'val2014_qla_mrcnn.json') + else: + examples = processor.get_dev_examples(args.txt_data_dir, 'val2014_qla.json') + elif name == 'train+val': + if args.data_label_type == 'mask': + examples = processor.get_train_examples(args.txt_data_dir, 'train+val2014_qla_mrcnn.json') + #examples = processor.get_train_examples(args.data_dir, 'train+val2014_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.txt_data_dir, 'train+val2014_qla.json') + elif name == 'test2015': + if args.data_label_type == 'mask': + examples = processor.get_test_examples(args.data_dir, 'test2015_qla_mrcnn.json') + else: + examples = processor.get_test_examples(args.data_dir, 'test2014_qla.json') + elif name == 'test-dev2015': + if args.data_label_type == 'mask': + examples = processor.get_test_examples(args.data_dir, 'test-dev2015_qla_mrcnn.json') + else: + examples = processor.get_test_examples(args.data_dir, 'test2014_qla.json') + + return examples, labels + + +class VQADataset(Dataset): + """ VQA Dataset """ + + def __init__(self, args, name, tokenizer): + super(VQADataset, self).__init__() + assert name in ['train', 'val', 'test-dev2015', 'test2015', 'train+val'] + + self.args = args + self.name = name + + # load image features + t_start = time.time() + self.img_feature_file = None + self.img_feat_offset_map = None + + if args.img_feature_type == 'faster_r-cnn': + if args.img_feat_format == 'pt': + if args.img_feature_dim == 2048: # object features + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_frcnn_obj_feats.pt'.format(name))) + else: # object + spatial features + if args.use_vg_dev: + self.img_features = torch.load(os.path.join(args.data_dir, 'train+val_img_frcnn_feats.pt')) + else: + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_frcnn_feats.pt'.format(name))) + elif args.img_feat_format == 'tsv': + self.load_img_tsv_features() + elif args.img_feature_type == 'mask_r-cnn': + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_mask_rcnn_feats.pt'.format(name))) + elif args.img_feature_type.startswith('dis_code'): #in ['dis_code', 'dis_code_t']: # discrete code + self.img_features = torch.load(os.path.join(args.data_dir, 'vqvae', '{}.pt'.format(name)))['feats_{}'.format(args.code_level)] + else: + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_feats.pt'.format(name))) + t_end = time.time() + logger.info('Info: loading {0} features using {1:.2f} secs'.format(name, (t_end-t_start))) + + self.output_mode = output_modes[args.task_name] + self.tokenizer = tokenizer + + self.examples, self.labels = _load_dataset(args, name) + self.label_map = {label: i for i, label in enumerate(self.labels)} + + if self.args.load_fast: + self.features = self.tensorize(args, cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + else: + pass + + logger.info('%s Data Examples: %d' % (name, len(self.examples))) + + + def tensorize(self, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + # debug: + debug_size = 500 + features = [] + + for (ex_index, example) in enumerate(self.examples[0: ]): + if len(example.label) == 0: continue + if ex_index % 10000 == 0: logger.info("Tensorizing example %d of %d" % (ex_index, len(self.examples))) + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = self.tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + img_feat = self.img_features[example.img_key] # torch + #img_feat = self.img_features.item().get(example.img_key) # numpy + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %s)" % (example.label, label_id)) + logger.info("score: %s (score = %s)" % (example.score, score)) + + new_scores = target_tensor(len(self.labels), label_id, score) + #features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + features.append((torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor(new_scores, dtype=torch.float), img_feat)) + + return features + + def tensorize_example(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = self.tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + if self.args.img_feature_type.startswith('dis_code'): + img_feat = self.img_features[example.img_key] + + if self.args.img_feature_type == 'dis_code_ln': # for discrete code image representation + img_feat = img_feat.reshape(-1, img_feat.shape[0]) + + if self.args.img_feature_type == 'dis_code_t': # transposed + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * 64 + else: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + else: + if self.args.img_feat_format == 'pt': + img_feat = self.img_features[example.img_key] #[:, 0:self.args.img_feature_dim] # torch + elif self.args.img_feat_format == 'tsv': + img_features = self.get_img_feature(str(example.img_key)) + img_feat = torch.from_numpy(img_features) + + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + if (example.label is None): + label_id = [0] + score = [0] + elif len(example.label) == 0: + label_id = [0] + score = [0] + else: + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + new_scores = target_tensor(len(self.labels), label_id, score) + + # features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + if self.args.img_feature_type in ['dis_code', 'dis_code_t']: + img_feat = img_feat.type(torch.long) + elif self.args.img_feature_type in ['dis_code_ln']: + #img_feat = img_feat.reshape(-1, img_feat.shape[0]) + img_feat = img_feat.type(torch.float) + + return (torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor(new_scores, dtype=torch.float), + img_feat, + torch.tensor([example.q_id], dtype=torch.long)) + + def __getitem__(self, index): + if self.args.load_fast: + example = self.features[index] + else: + entry = self.examples[index] + example = self.tensorize_example(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + return example + + def __len__(self): + return len(self.examples) + + # tsv feature loading + def load_img_tsv_features(self): + self.check_img_feature_file() + self.check_img_feature_offset_map() + + def check_img_feature_file(self): + if self.img_feature_file is None: + img_feature_path = os.path.join(self.args.img_feat_dir, '{}_img_frcnn_feats.tsv'.format(self.name)) + t_s = time.time() + self.img_feature_file = open(img_feature_path, 'r') + t_e = time.time() + logger.info("Open {} image time: {}".format(self.name, (t_e - t_s))) + + def check_img_feature_offset_map(self): + """ load the image feature offset map """ + if self.img_feat_offset_map is None: + img_feature_path = os.path.join(self.args.img_feat_dir, '{}_img_frcnn_feats_offset_map.json'.format(self.name)) + t_s = time.time() + self.img_feat_offset_map = json.load(open(img_feature_path)) + t_e = time.time() + logger.info("Load {} images: {}, time: {}".format(self.name, len(self.img_feat_offset_map), (t_e - t_s))) + + def get_img_feature(self, image_id): + """ decode the image feature """ + self.check_img_feature_file() + self.check_img_feature_offset_map() + + if image_id in self.img_feat_offset_map: + img_offset = self.img_feat_offset_map[image_id] + self.img_feature_file.seek(img_offset, 0) + arr = [s.strip() for s in self.img_feature_file.readline().split('\t')] + num_boxes = int(arr[1]) + feat = np.frombuffer(base64.b64decode(arr[2]), dtype=np.float32).reshape((-1, self.args.img_feature_dim)) + return feat + + return None + + +def instance_bce_with_logits(logits, labels, reduction='mean'): + assert logits.dim() == 2 + + loss = nn.functional.binary_cross_entropy_with_logits(logits, labels, reduction=reduction) + if reduction == 'mean': + loss *= labels.size(1) + return loss + + +def compute_score_with_logits(logits, labels): + logits = torch.max(logits, 1)[1].data # argmax + one_hots = torch.zeros(*labels.size()).cuda() + one_hots.scatter_(1, logits.view(-1, 1), 1) + scores = (one_hots * labels) + return scores + + +def trim_batch(batch): + """ new batch func + :param batch: + :return: + """ + print('batch size', len(batch)) + + batch_size = len(batch) + batch_tensors = [] + for ele in batch[0]: + print(ele.shape, ele.size()) + zero_tensor = torch.zeros(([batch_size] + list(ele.size()))) + batch_tensors.append(zero_tensor) + + for b_id, b in enumerate(batch): + print(b_id, len(b)) + for ele_id, ele in enumerate(b): + print(ele_id, ele.shape) + batch_tensors[ele_id][b_id] = ele + return batch_tensors + + +def train(args, train_dataset, eval_dataset, model, tokenizer): + """ Train the model """ + #if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, num_workers=args.workers, sampler=train_sampler, batch_size=args.train_batch_size) #, collate_fn=trim_batch) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + #scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) # original + + if args.scheduler == "constant": + scheduler = WarmupConstantSchedule(optimizer, warmup_steps=args.warmup_steps) + elif args.scheduler == "linear": + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + #train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args.seed, args.n_gpu) # Added here for reproductibility (even between python 2 and 3) + + best_score = 0 + best_model = { + 'epoch': 0, + 'model': copy.deepcopy(model), #model.state_dict(), + 'optimizer_state': optimizer.state_dict() + } + + #eval_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=True) + + for epoch in range(int(args.num_train_epochs)): + #for epoch in train_iterator: + #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + total_loss = 0 + train_score = 0 + total_norm = 0 + count_norm = 0 + + if args.adjust_dp and epoch>=3: + logger.info("change droput ratio {} to 0.3".format(args.drop_out)) + if hasattr(model, 'module'): + model.module.dropout.p = 0.3 + model.module.bert.dropout.p = 0.3 + model.module.bert.embeddings.dropout.p = 0.3 + else: + model.dropout.p = 0.3 + model.bert.dropout.p = 0.3 + model.bert.embeddings.dropout.p = 0.3 + + if args.adjust_loss and epoch>=args.adjust_loss_epoch: + logger.info("\t change loss type from kl to bce") + model.loss_type = 'bce' + + # debug + #epoch = 20 + #global_step = epoch*math.ceil(len(train_dataset)/(args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))) + + t_start = time.time() + for step, batch in enumerate(train_dataloader): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[4], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + + #loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) + loss, logits = outputs[:2] + + #loss = instance_bce_with_logits(logits, batch[4]) + + if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + total_norm += torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + count_norm += 1 + + batch_score = compute_score_with_logits(logits, batch[4]).sum() + train_score += batch_score.item() + + tr_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + scheduler.step() # Update learning rate schedule + optimizer.step() + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:# Log metrics + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() + + if args.local_rank in [-1, 0] and args.evaluate_during_training: + #if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score, upper_bound = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + + logger.info("EVALERR: {}%".format(100 * best_score)) + + if args.local_rank == 0: + torch.distributed.barrier() + + logging_loss = tr_loss + + #if args.max_steps > 0 and global_step > args.max_steps: + # epoch_iterator.close() + # break + + # evaluation + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score, upper_bound = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + #best_model['optimizer'] = copy.deepcopy(optimizer.state_dict()) + + # save checkpoints + if (args.local_rank in [-1, 0]) and (args.save_epoch>0 and epoch%args.save_epoch == 0) and (epoch>args.save_after_epoch): + output_dir = os.path.join(args.output_dir, 'checkpoint-{}-{}'.format(epoch, global_step)) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + logger.info("Saving model attempt: {}".format(save_num)) + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving model checkpoint {0} to {1}".format(epoch, output_dir)) + + epoch_log = {'epoch': epoch, 'eval_score': eval_score, 'best_score':best_score} + log_json.append(epoch_log) + if args.local_rank in [-1, 0]: + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + logger.info("PROGRESS: {}%".format(round(100*(epoch + 1) / args.num_train_epochs, 4))) + logger.info("EVALERR: {}%".format(100*best_score)) + + t_end = time.time() + logger.info('Epoch: %d, Train Time: %.3f' % (epoch, t_end - t_start)) + + #if args.max_steps > 0 and global_step > args.max_steps: + # train_iterator.close() + # break + + if args.local_rank in [-1, 0]: # Save the final model checkpoint + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + output_dir = os.path.join(args.output_dir, 'best-{}'.format(best_model['epoch'])) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving the best model checkpoint epoch {} to {}".format(best_model['epoch'], output_dir)) + + return global_step, tr_loss / global_step + + +def evaluate(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + #if args.n_gpu > 1: model = torch.nn.DataParallel(model) # debug: single-gpu or multi-gpus + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, num_workers=args.workers, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + preds = None + out_label_ids = None + num_data = 0 + score = 0 + upper_bound = 0 + results_dict = {} + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[4], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + + eval_loss += tmp_eval_loss.mean().item() + + # batch_score = compute_score_with_logits(logits, batch[4]).sum() + batch_score = torch.sum( + compute_score_with_logits(logits, batch[4]), 1) + # update results_dict + results_dict.update( + {qa_ind: score for qa_ind, score in + zip(batch[6].view(-1).tolist(), batch_score.tolist())} + ) + score += batch_score.sum().item() + #upper_bound += (batch[4].max(1)[0]).sum().item() + num_data += logits.size(0) + + # debug + #val, idx = logits.max(1) + #logger.info('idx: %s, batch[4]: %s' % (str(idx.shape), str(batch[3].shape))) + #for i in range(idx.size(0)): + # logger.info('idx: %d, pred: %d, real: %d' % (idx[i].item(), eval_dataset.labels[idx[i].item()], batch[3][i].item())) + + nb_eval_steps += 1 + + #if preds is None: + # preds = logits.detach().cpu().numpy() + # out_label_ids = inputs['labels'].detach().cpu().numpy() + #else: + # preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) + # out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) + + score = score / len(eval_dataloader.dataset) + upper_bound = upper_bound / len(eval_dataloader.dataset) + + logger.info("Eval Results:") + logger.info("Eval Score: %.3f" % (100*score)) + logger.info("EVALERR: {}%".format(100*score)) + logger.info("Eval Upper Bound: %.3f" % (100*upper_bound)) + # with open(os.path.join(args.data_dir, 'val_results.json'), + # 'w') as f: + # json.dump(results_dict, f) + + t_end = time.time() + logger.info('Eva Time Cost: %.3f' % (t_end - t_start)) + + #eval_loss = eval_loss / nb_eval_steps + #if args.output_mode == "classification": + # preds = np.argmax(preds, axis=1) + #elif args.output_mode == "regression": + # preds = np.squeeze(preds) + #result = compute_metrics(eval_task, preds, out_label_ids) + #results.update(result) + + #output_eval_file = os.path.join(eval_output_dir, "eval_results.txt") + #with open(output_eval_file, "w") as writer: + # logger.info("***** Eval results {} *****".format(prefix)) + # for key in sorted(result.keys()): + # logger.info(" %s = %s", key, str(result[key])) + # writer.write("%s = %s\n" % (key, str(result[key]))) + + return results, score, upper_bound + + +def test(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + label2ans = cPickle.load(open(args.label2ans_file, 'rb')) + logger.info('label2ans: %d' % (len(label2ans))) + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': None, + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + logits = outputs[0] + + val, idx = logits.max(1) + #logger.info('idx: %s, batch[6]: %s' % (str(idx.shape), str(batch[6].shape))) + + for i in range(idx.size(0)): + #logger.info('idx: %d, batch: %d' % (idx[i].item(), batch[6][i].item())) + result = {} + result['question_id'] = batch[6][i].item() + result['answer'] = label2ans[eval_dataset.labels[idx[i].item()]] #label2ans[idx[i].item()] + results.append(result) + + if len(results) % 2000 == 0: + logger.info("PROGRESS: {}%".format(round(100*len(results)/len(eval_dataset), 4))) + #logger.info('q_id: {0}, answer: {1}'.format(result['question_id'], result['answer'])) + + with open(args.output_dir + ('/{}_results.json'.format(eval_dataset.name)), 'w') as fp: + json.dump(results, fp) + + t_end = time.time() + logger.info('# questions: %d' % (len(results))) + logger.info('Test Time Cost: %.3f' % (t_end - t_start)) + + +def load_and_cache_examples(args, task, tokenizer, evaluate=False): + processor = processors[task]() + output_mode = output_modes[task] + + label_list = processor.get_labels(args.label_file) + + t_start = time.time() + examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) + + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_feats.pt' if evaluate else 'train_img_feats.pt')) + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.pt' if evaluate else 'train_img_frcnn_feats.pt')) + img_features = np.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.npy' if evaluate else 'train_img_frcnn_feats.npy')) + + features = convert_examples_to_features_vqa(examples, img_features, label_list, args.max_img_seq_length, args.max_seq_length, + tokenizer, output_mode, + cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, + pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0) + + #if args.local_rank in [-1, 0]: + # logger.info("Saving features into cached file %s", cached_features_file) + # torch.save(features, cached_features_file) + t_end = time.time() + logger.info('Info: loading features using %.5f secs' % (t_end-t_start)) + + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) # batch*max_seq_len + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + if output_mode == "classification": + labels = torch.tensor([f.label_id[0] for f in features], dtype=torch.long) + targets = torch.tensor([target_tensor(len(label_list), f.label_id, f.score) for f in features], dtype=torch.float) + + if args.img_feature_dim > 0: # change here + t_start = time.time() + img_feat_np = np.zeros((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + for f_id, f in enumerate(features): + img_feat_np[f_id] = f.img_feat + + img_feats = torch.from_numpy(img_feat_np) + + #img_feats = torch.empty((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + #for f_id, f in enumerate(features): + # img_feats[f_id] = f.img_feat + + t_end = time.time() + logger.info('Info: convert image tensor features using %.5f secs' % (t_end - t_start)) + + #img_feats = torch.stack([f.img_feat[:,-args.img_feature_dim:] for f in features]) + #img_feats = torch.stack([f.img_feat for f in features]) + #img_feats = img_feats.type(torch.long) + + #print('targets:', targets.shape) + print('img_feats:', img_feats.shape) + elif output_mode == "regression": + all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) + + if args.img_feature_dim == -1: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets) + else: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets, img_feats) + return dataset + +def target_tensor(len, labels, scores): + """ create the target by labels and scores """ + target = [0]*len + for id, l in enumerate(labels): + target[l] = scores[id] + + return target + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--txt_data_dir", default=None, type=str, required=True, + help="The input text data dir. Should contain the .json files (or other data files) for the task.") + + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name") + parser.add_argument("--task_name", default=None, type=str, required=True, + help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--label_file", type=str, default=None, help="Label Dictionary") + parser.add_argument("--label2ans_file", type=str, default=None, help="Label to Answer Dictionary") + + parser.add_argument("--img_feat_dir", default=None, type=str, help="The input img_feat_dir.") + parser.add_argument("--img_feat_format", default='pt', type=str, help="img_feat_format: pt or tsv.") + + parser.add_argument("--data_label_type", default='faster', type=str, help="faster or mask") + parser.add_argument("--loss_type", default='kl', type=str, help="kl or xe") + parser.add_argument("--use_vg", action='store_true', help="Use VG-QA or not.") + parser.add_argument("--use_vg_dev", action='store_true', help="Use VG-QA as validation.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_train_val", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', help="Whether to run test on the test set.") + parser.add_argument("--do_test_dev", action='store_true', help="Whether to run test on the test-dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out for BERT.") + parser.add_argument("--adjust_dp",action='store_true', help="Adjust Drop out for BERT.") + + parser.add_argument("--adjust_loss", action='store_true', help="Adjust Loss Type for BERT.") + parser.add_argument("--adjust_loss_epoch", default=-1, type=int, help="Adjust Loss Type for BERT.") + parser.add_argument("--classifier", default='linear', type=str, help="linear or mlp") + parser.add_argument("--cls_hidden_scale", default=2, type=int, help="cls_hidden_scale: for classifier") + + parser.add_argument("--hard_label", action='store_true', help="Soft Label or Hard Label.") + + parser.add_argument("--max_img_seq_length", default=30, type=int, help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str, help="faster_r-cnn or mask_r-cnn") + parser.add_argument("--code_voc", default=512, type=int, help="dis_code_voc: 256, 512") + parser.add_argument("--code_level", default='top', type=str, help="code level: top, botttom, both") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") + parser.add_argument("--scheduler", default='linear', type=str, help="constant or linear.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=-1, help="Save checkpoint every X updates steps.") + parser.add_argument('--save_epoch', type=int, default=5, help="Save checkpoint every X epochs.") + parser.add_argument('--save_after_epoch', type=int, default=-1, help="Save checkpoint after epoch.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument("--philly", action='store_true', help="Use Philly: reset the output dir") + parser.add_argument("--load_fast", action='store_true', help="Load Tensor Fast") + parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 1 --img_feature_dim 565 --img_feature_type dis_code ' + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 10 --img_feature_dim 565 --img_feature_type other ' + + #args = parser.parse_args(args.split()) + + args = parser.parse_args() + + if args.philly: # use philly + logger.info('Info: Use Philly, all the output folders are reset.') + args.output_dir = os.path.join(os.getenv('PT_OUTPUT_DIR'), args.output_dir) + logger.info('OUTPUT_DIR:', args.output_dir) + + #if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + # raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: logger.info("Output Directory Exists.") + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + logger.info("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed, args.n_gpu) + + # Prepare GLUE task + args.task_name = args.task_name.lower() + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + + processor = processors[args.task_name]() + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels(args.label_file) + num_labels = len(label_list) + logger.info('Task Name: {}, #Labels: {}'.format(args.task_name, num_labels)) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained( + args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, finetuning_task=args.task_name, + ) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + + # discrete code + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.code_voc = args.code_voc + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + config.classifier = args.classifier + config.cls_hidden_scale = args.cls_hidden_scale + + # load discrete code + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Load discrete code from: {}'.format(args.data_dir)) + t_start = time.time() + train_code = torch.load(os.path.join(args.data_dir, 'vqvae', 'train.pt')) + t_end = time.time() + logger.info('Load time: %.3f' % (t_end - t_start)) + + if args.code_level == 'top': + config.code_dim = train_code['embeddings_t'].shape[0] + config.code_size = train_code['feats_top'][list(train_code['feats_top'].keys())[0]].shape[0] + elif args.code_level == 'bottom': + config.code_dim = train_code['embeddings_b'].shape[0] + config.code_size = train_code['feats_bottom'][list(train_code['feats_bottom'].keys())[0]].shape[0] + elif args.code_level == 'both': + config.code_dim = train_code['embeddings_t'].shape[0] + train_code['embeddings_b'].shape[0] + + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Initializing the code embedding with {}'.format(args.code_level)) + if args.code_level == 'top': + model.init_code_embedding(train_code['embeddings_t'].t()) + elif args.code_level == 'bottom': + model.init_code_embedding(train_code['embeddings_b'].t()) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + #if args.do_eval: + eval_dataset = VQADataset(args, 'val', tokenizer) + + if args.do_test: + test_dataset = VQADataset(args, 'test2015', tokenizer) + + if args.do_test_dev: + test_dev_dataset = VQADataset(args, 'test-dev2015', tokenizer) + + # Training + if args.do_train: + #train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) + train_dataset = VQADataset(args, 'train', tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Training on train+val + if args.do_train_val: + train_dataset = VQADataset(args, 'train+val', tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. They can then be reloaded using `from_pretrained()` + #model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + #model_to_save.save_pretrained(args.output_dir) + + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + #model = model_class.from_pretrained(args.output_dir) + #tokenizer = tokenizer_class.from_pretrained(args.output_dir) + #model.to(args.device) + + + # Evaluation + #results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score, upper_bound = evaluate(args, model, eval_dataset, prefix=global_step) + #result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + #results.update(result) + + # Test-Dev + if args.do_test_dev and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + test(args, model, test_dev_dataset, prefix=global_step) + + # Test + if args.do_test and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + test(args, model, test_dataset, prefix=global_step) + + +if __name__ == "__main__": + main() diff --git a/oscar/utils/__init__.py b/oscar/utils/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/oscar/utils/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/oscar/utils/caption_evaluate.py b/oscar/utils/caption_evaluate.py new file mode 100644 index 0000000..5699253 --- /dev/null +++ b/oscar/utils/caption_evaluate.py @@ -0,0 +1,293 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from collections import OrderedDict, defaultdict +import json +import os.path as op +from pprint import pprint +import torch +import re +import subprocess +import tempfile +import time +from typing import Dict, Optional + +from coco_caption.pycocotools.coco import COCO +from coco_caption.pycocoevalcap.eval import COCOEvalCap +from coco_caption.pycocoevalcap.cider.cider import Cider + +CiderD_scorer = Cider(df='corpus') + + +def evaluate_on_nocaps(split, predict_file, data_dir='data/nocaps/', evaluate_file=None): + ''' + NOTE: Put the auth file in folder ~/.evalai/ + ''' + if not evaluate_file: + evaluate_file = op.splitext(predict_file)[0] + '.eval.json' + if op.isfile(evaluate_file): + print('{} already exists'.format(evaluate_file)) + with open(evaluate_file, 'r') as fp: + metrics = json.load(fp) + return metrics + + image_info_file = op.join(data_dir, + 'nocaps_{}_image_info.json'.format(split)) + image_info = json.load(open(image_info_file)) + open_image_id2id = {} + for it in image_info['images']: + open_image_id2id[it['open_images_id']] = it['id'] + predictions = [] + cap_id = 0 + with open(predict_file, 'r') as fp: + for line in fp: + p = line.strip().split('\t') + predictions.append( + {'image_id': open_image_id2id[p[0]], + 'caption': json.loads(p[1])[0]['caption'], + 'id': cap_id}) + cap_id += 1 + if split == 'test': + print('Are you sure to submit test split result at: {}'.format(predict_file)) + import ipdb;ipdb.set_trace() + nocapseval = NocapsEvaluator(phase=split) + metrics = nocapseval.evaluate(predictions) + pprint(metrics) + with open(evaluate_file, 'w') as fp: + json.dump(metrics, fp) + return metrics + + +def evaluate_on_coco_caption(res_file, label_file, outfile=None): + """ + res_tsv: TSV file, each row is [image_key, json format list of captions]. + Each caption is a dict, with fields "caption", "conf". + label_file: JSON file of ground truth captions in COCO format. + """ + assert label_file.endswith('.json') + if res_file.endswith('.tsv'): + res_file_coco = op.splitext(res_file)[0] + '_coco_format.json' + convert_tsv_to_coco_format(res_file, res_file_coco) + else: + raise ValueError('unknown prediction result file format: {}'.format(res_file)) + + coco = COCO(label_file) + cocoRes = coco.loadRes(res_file_coco) + cocoEval = COCOEvalCap(coco, cocoRes, 'corpus') + + # evaluate on a subset of images by setting + # cocoEval.params['image_id'] = cocoRes.getImgIds() + # please remove this line when evaluating the full validation set + cocoEval.params['image_id'] = cocoRes.getImgIds() + + # evaluate results + # SPICE will take a few minutes the first time, but speeds up due to caching + cocoEval.evaluate() + result = cocoEval.eval + if not outfile: + print(result) + else: + with open(outfile, 'w') as fp: + json.dump(result, fp, indent=4) + return result + + +def convert_tsv_to_coco_format(res_tsv, outfile, + sep='\t', key_col=0, cap_col=1): + results = [] + with open(res_tsv) as fp: + for line in fp: + parts = line.strip().split(sep) + key = parts[key_col] + if cap_col < len(parts): + caps = json.loads(parts[cap_col]) + assert len(caps) == 1, 'cannot evaluate multiple captions per image' + cap = caps[0].get('caption', '') + else: + # empty caption generated + cap = "" + results.append( + {'image_id': key, + 'caption': cap} + ) + with open(outfile, 'w') as fp: + json.dump(results, fp) + + +class ScstRewardCriterion(torch.nn.Module): + CIDER_REWARD_WEIGHT = 1 + + def __init__(self): + self.greedy_score = None + super().__init__() + + def forward(self, gt_res, greedy_res, sample_res, sample_logprobs): + batch_size = len(gt_res) + + # must keep order to get evaluation for each item in batch + res = OrderedDict() + for i in range(batch_size): + res[i] = [sample_res[i]] + for i in range(batch_size): + res[batch_size + i] = [greedy_res[i]] + + gts = OrderedDict() + for i in range(batch_size): + gts[i] = gt_res[i] + for i in range(batch_size): + gts[batch_size + i] = gt_res[i] + + _, batch_cider_scores = CiderD_scorer.compute_score(gts, res) + scores = self.CIDER_REWARD_WEIGHT * batch_cider_scores + # sample - greedy + reward = scores[:batch_size] - scores[batch_size:] + self.greedy_score = scores[batch_size:].mean() + + reward = torch.as_tensor(reward, device=sample_logprobs.device, dtype=torch.float) + loss = - sample_logprobs * reward + loss = loss.mean() + return loss + + def get_score(self): + return self.greedy_score + + +class NocapsEvaluator(object): + r""" + Code from https://github.com/nocaps-org/updown-baseline/blob/master/updown/utils/evalai.py + + A utility class to submit model predictions on nocaps splits to EvalAI, and retrieve model + performance based on captioning metrics (such as CIDEr, SPICE). + + Extended Summary + ---------------- + This class and the training script together serve as a working example for "EvalAI in the + loop", showing how evaluation can be done remotely on privately held splits. Annotations + (captions) and evaluation-specific tools (e.g. `coco-caption `_) + are not required locally. This enables users to select best checkpoint, perform early + stopping, learning rate scheduling based on a metric, etc. without actually doing evaluation. + + Parameters + ---------- + phase: str, optional (default = "val") + Which phase to evaluate on. One of "val" or "test". + + Notes + ----- + This class can be used for retrieving metrics on both, val and test splits. However, we + recommend to avoid using it for test split (at least during training). Number of allowed + submissions to test split on EvalAI are very less, and can exhaust in a few iterations! However, + the number of submissions to val split are practically infinite. + """ + + def __init__(self, phase: str = "val"): + + # Constants specific to EvalAI. + self._challenge_id = 355 + self._phase_id = 742 if phase == "val" else 743 + + def evaluate( + self, predictions, iteration: Optional[int] = None + ) -> Dict[str, Dict[str, float]]: + r""" + Take the model predictions (in COCO format), submit them to EvalAI, and retrieve model + performance based on captioning metrics. + + Parameters + ---------- + predictions: List[Prediction] + Model predictions in COCO format. They are a list of dicts with keys + ``{"image_id": int, "caption": str}``. + iteration: int, optional (default = None) + Training iteration where the checkpoint was evaluated. + + Returns + ------- + Dict[str, Dict[str, float]] + Model performance based on all captioning metrics. Nested dict structure:: + + { + "B1": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-1 + "B2": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-2 + "B3": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-3 + "B4": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-4 + "METEOR": {"in-domain", "near-domain", "out-domain", "entire"}, + "ROUGE-L": {"in-domain", "near-domain", "out-domain", "entire"}, + "CIDEr": {"in-domain", "near-domain", "out-domain", "entire"}, + "SPICE": {"in-domain", "near-domain", "out-domain", "entire"}, + } + + """ + # Save predictions as a json file first. + _, predictions_filename = tempfile.mkstemp(suffix=".json", text=True) + with open(predictions_filename, "w") as f: + json.dump(predictions, f) + + submission_command = ( + f"evalai challenge {self._challenge_id} phase {self._phase_id} " + f"submit --file {predictions_filename}" + ) + + submission_command_subprocess = subprocess.Popen( + submission_command.split(), + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + + # This terminal output will have submission ID we need to check. + submission_command_stdout = submission_command_subprocess.communicate(input=b"N\n")[ + 0 + ].decode("utf-8") + + submission_id_regex = re.search("evalai submission ([0-9]+)", submission_command_stdout) + try: + # Get an integer submission ID (as a string). + submission_id = submission_id_regex.group(0).split()[-1] # type: ignore + except: + # Very unlikely, but submission may fail because of some glitch. Retry for that. + return self.evaluate(predictions) + + if iteration is not None: + print(f"Submitted predictions for iteration {iteration}, submission id: {submission_id}.") + else: + print(f"Submitted predictions, submission_id: {submission_id}") + + # Placeholder stdout for a pending submission. + result_stdout: str = "The Submission is yet to be evaluated." + num_tries: int = 0 + + # Query every 10 seconds for result until it appears. + while "CIDEr" not in result_stdout: + + time.sleep(10) + result_stdout = subprocess.check_output( + ["evalai", "submission", submission_id, "result"] + ).decode("utf-8") + num_tries += 1 + + # Raise error if it takes more than 5 minutes. + if num_tries == 30: + raise ConnectionError("Unable to get results from EvalAI within 5 minutes!") + + # Convert result to json. + metrics = json.loads(result_stdout, encoding="utf-8") + + # keys: {"in-domain", "near-domain", "out-domain", "entire"} + # In each of these, keys: {"B1", "B2", "B3", "B4", "METEOR", "ROUGE-L", "CIDEr", "SPICE"} + metrics = { + "in-domain": metrics[0]["in-domain"], + "near-domain": metrics[1]["near-domain"], + "out-domain": metrics[2]["out-domain"], + "entire": metrics[3]["entire"], + } + + # Restructure the metrics dict for better tensorboard logging. + # keys: {"B1", "B2", "B3", "B4", "METEOR", "ROUGE-L", "CIDEr", "SPICE"} + # In each of these, keys: keys: {"in-domain", "near-domain", "out-domain", "entire"} + flipped_metrics: Dict[str, Dict[str, float]] = defaultdict(dict) + for key, val in metrics.items(): + for subkey, subval in val.items(): + flipped_metrics[subkey][key] = subval + + return flipped_metrics + diff --git a/oscar/utils/cbs.py b/oscar/utils/cbs.py new file mode 100644 index 0000000..b5a792e --- /dev/null +++ b/oscar/utils/cbs.py @@ -0,0 +1,852 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. +# Copyright (c) 2019, Yufei Wang, Karan Desai. Licensed under the MIT license. +# Code is modified from https://github.com/nocaps-org/updown-baseline + +import anytree +import base64 +import json +import numpy as np +import os.path as op +import torch +from typing import Callable, Dict, List, Optional, Tuple + +from oscar.modeling.modeling_utils import BeamHypotheses + +StepFunctionType = Callable[ + [torch.Tensor, List[torch.Tensor]], Tuple[torch.Tensor, List[torch.Tensor]] +] + + +def _enlarge_single_tensor(t, batch_size, num_fsm_states, beam_size): + # shape: (batch_size * beam_size, *) + _, *last_dims = t.size() + return ( + t.view(batch_size, 1, 1, *last_dims) + .expand(batch_size, num_fsm_states, beam_size, *last_dims) + .reshape(-1, *last_dims) + ) + + +class ConstrainedBeamSearch(object): + r""" + Implements Constrained Beam Search for decoding the most likely sequences conditioned on a + Finite State Machine with specified state transitions. + """ + + def __init__( + self, + eos_token_ids: List[int], + max_steps: int = 20, + beam_size: int = 5, + per_node_beam_size: Optional[int] = None, + use_hypo: bool = False, + tokenizer=None, + ): + self._eos_token_ids = eos_token_ids + self.max_steps = max_steps + self.beam_size = beam_size + self.per_node_beam_size = per_node_beam_size or self.beam_size + self.num_keep_best = 1 + self.length_penalty = 1 + self.use_hypo = use_hypo + self.tokenizer = tokenizer + + def search( + self, + start_predictions: torch.Tensor, + start_state: List[torch.Tensor], + step: StepFunctionType, + fsm: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Given a starting state, a step function, and an FSM adjacency matrix, apply Constrained + Beam Search to find most likely target sequences satisfying specified constraints in FSM. + + .. note:: + + If your step function returns ``-inf`` for some log probabilities + (like if you're using a masked log-softmax) then some of the "best" + sequences returned may also have ``-inf`` log probability. Specifically + this happens when the beam size is smaller than the number of actions + with finite log probability (non-zero probability) returned by the step function. + Therefore if you're using a mask you may want to check the results from ``search`` + and potentially discard sequences with non-finite log probability. + + Parameters + ---------- + start_predictions : torch.Tensor + A tensor containing the initial predictions with shape ``(batch_size, )``. These are + usually just ``@@BOUNDARY@@`` token indices. + start_state : ``Dict[str, torch.Tensor]`` + The initial state passed to the ``step`` function. Each value of the state dict + should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other + number of dimensions. + step : ``StepFunctionType`` + A function that is responsible for computing the next most likely tokens, given the + current state and the predictions from the last time step. The function should accept + two arguments. The first being a tensor of shape ``(group_size,)``, representing the + index of the predicted tokens from the last time step, and the second being the + current state. The ``group_size`` will be ``batch_size * beam_size * num_fsm_states`` + except in the initial step, for which it will just be ``batch_size``. The function is + expected to return a tuple, where the first element is a tensor of shape + ``(group_size, vocab_size)`` containing the log probabilities of the tokens for the + next step, and the second element is the updated state. The tensor in the state should + have shape ``(group_size, *)``, where ``*`` means any other number of dimensions. + + Returns + ------- + Tuple[torch.Tensor, torch.Tensor] + Tuple of ``(predictions, log_probabilities)``, where ``predictions`` + has shape ``(batch_size, num_fsm_states, beam_size, max_steps)`` + and ``log_probabilities`` has shape ``(batch_size, num_fsm_states, beam_size)``. + """ + # shape: (batch_size, num_fsm_states, num_fsm_states, vocab_size) + batch_size, num_fsm_states, _, vocab_size = fsm.size() + + # generated hypotheses + generated_hyps = [ + [BeamHypotheses(self.num_keep_best, self.max_steps, self.length_penalty, early_stopping=False) + for _ in range(num_fsm_states)] + for bb in range(batch_size) + ] + + # List of (batch_size, num_fsm_states, beam_size) tensors. One for each time step. Does not + # include the start symbols, which are implicit. + predictions: List[torch.Tensor] = [] + + # List of (batch_size, num_fsm_states, beam_size) tensors. One for each time step. None for + # the first. Stores the index n for the parent prediction. + backpointers: List[torch.Tensor] = [] + + # Calculate the first timestep. This is done outside the main loop because we are going + # from a single decoder input (the output from the encoder) to the top `beam_size` + # decoder outputs per FSM state. On the other hand, within the main loop we are going + # from the `beam_size` elements of the beam (per FSM state) to `beam_size`^2 candidates + # from which we will select the top `beam_size` elements for the next iteration. + + curr_ids = ( + start_predictions.expand(batch_size, self.beam_size*num_fsm_states) + .reshape(batch_size*self.beam_size*num_fsm_states, 1) + ) + # shape: start_class_log_probabilities (batch_size, vocab_size) + start_class_logits, state = step(curr_ids, start_state) + start_class_log_probabilities = torch.nn.functional.log_softmax(start_class_logits, dim=-1) + start_class_log_probabilities = start_class_log_probabilities[:batch_size, :] + vocab_size = start_class_log_probabilities.size(-1) + + start_state_predictions = start_class_log_probabilities.view( + batch_size, 1, vocab_size + ).expand(batch_size, num_fsm_states, vocab_size) + + start_state_predictions = start_state_predictions.masked_fill( + (1 - fsm[:, 0, :, :]).to(dtype=torch.bool), float("-inf") + ) + + # (batch_size, num_fsm_states, beam_size) + start_top_log_probabilities, start_predicted_classes = start_state_predictions.topk( + self.beam_size + ) + # shape: (batch_size, num_fsm_states, beam_size) + last_log_probabilities = start_top_log_probabilities + + predictions.append(start_predicted_classes.view(batch_size, -1)) + + log_probs_after_end = torch.full((1, vocab_size), float("-inf")).to( + start_predictions.device + ) + log_probs_after_end[:, self._eos_token_ids] = 0.0 + + #state = { + #key: _enlarge_single_tensor(value, batch_size, num_fsm_states, self.beam_size) + #for (key, value) in state.items() + #} + + step_state_mask = fsm.view( + batch_size, num_fsm_states, num_fsm_states, 1, vocab_size + ).expand(batch_size, num_fsm_states, num_fsm_states, self.beam_size, vocab_size) + + curr_len = curr_ids.shape[1] + for timestep in range(self.max_steps - curr_len - 1): + # shape: (batch_size * beam_size * num_fsm_states, ) + last_predictions = predictions[-1].reshape( + batch_size * self.beam_size * num_fsm_states + ) + cur_finished = (last_predictions==self._eos_token_ids[0]) + for eos_token in self._eos_token_ids[1:]: + cur_finished = (cur_finished | (last_predictions==eos_token)) + if cur_finished.all(): + break + + curr_ids = torch.cat([curr_ids, last_predictions.unsqueeze(-1)], dim=1) + + class_logits, state = step(curr_ids, state) + class_log_probabilities = torch.nn.functional.log_softmax(class_logits, dim=-1) + #last_predictions_expanded = ( + #last_predictions.view(-1) + #.unsqueeze(-1) + #.expand(batch_size * num_fsm_states * self.beam_size, vocab_size) + #) + cur_finished_expanded = ( + cur_finished.unsqueeze(-1) + .expand(batch_size * num_fsm_states * self.beam_size, vocab_size) + ) + + cleaned_log_probabilities = torch.where( + #last_predictions_expanded == self._eos_token_ids, + cur_finished_expanded, + log_probs_after_end, + class_log_probabilities, + ) + cleaned_log_probabilities = cleaned_log_probabilities.view( + batch_size, num_fsm_states, self.beam_size, vocab_size + ) + + device = start_predictions.device + restricted_predicted_classes = torch.LongTensor( + batch_size, num_fsm_states, self.beam_size + ).to(start_predictions.device) + restricted_beam_log_probs = torch.FloatTensor( + batch_size, num_fsm_states, self.beam_size + ).to(start_predictions.device) + restricted_beam_indices = torch.LongTensor( + batch_size, num_fsm_states, self.beam_size + ).to(start_predictions.device) + + expanded_last_log_probabilities = last_log_probabilities.view( + batch_size, num_fsm_states, self.beam_size, 1 + ).expand(batch_size, num_fsm_states, self.beam_size, self.per_node_beam_size) + + for i in range(num_fsm_states): + # shape (batch_size, num_fsm_states, self.beam_size, vocab_size) + state_log_probabilities = cleaned_log_probabilities + + state_log_probabilities = state_log_probabilities.masked_fill( + (1 - step_state_mask[:, :, i, :, :]).to(dtype=torch.bool), -1e20 + ) + top_log_probabilities, predicted_classes = state_log_probabilities.topk( + self.per_node_beam_size + ) + summed_top_log_probabilities = ( + top_log_probabilities + expanded_last_log_probabilities + ) + # shape: (batch_size, old_num_fsm_states * beam_size * per_node_beam_size) + reshaped_summed = summed_top_log_probabilities.reshape(batch_size, -1) + + # shape: (batch_size, old_num_fsm_states * beam_size * per_node_beam_size) + reshaped_predicted_classes = predicted_classes.reshape(batch_size, -1) + + if not self.use_hypo: + # shape (batch_size, beam_size) + state_beam_log_probs, state_beam_indices = reshaped_summed.topk(self.beam_size) + # shape (batch_size, beam_size) + state_predicted_classes = reshaped_predicted_classes.gather(1, state_beam_indices) + else: + # shape (batch_size, beam_size*per_node_beam_size) + candidate_beam_log_probs, candidate_beam_indices = reshaped_summed.topk( + self.beam_size*self.per_node_beam_size, sorted=True, largest=True) + # shape (batch_size, beam_size*per_node_beam_size) + candidate_predicted_classes = reshaped_predicted_classes.gather(1, candidate_beam_indices) + next_batch_beam = [] + for batch_ex in range(batch_size): + next_sent_beam = [] + for word_id, beam_id, log_prob in zip(candidate_predicted_classes[batch_ex], + candidate_beam_indices[batch_ex], + candidate_beam_log_probs[batch_ex]): + if word_id.item() in self._eos_token_ids: + generated_hyps[batch_ex][i].add( + curr_ids[batch_ex * self.beam_size*num_fsm_states + beam_id/self.per_node_beam_size, :].clone(), + log_prob.item() + ) + else: + next_sent_beam.append((word_id, beam_id, log_prob)) + if len(next_sent_beam) == self.beam_size: + break + assert len(next_sent_beam) == self.beam_size + next_batch_beam.extend(next_sent_beam) + state_predicted_classes = torch.tensor([x[0] for x in next_batch_beam], + device=device).reshape(batch_size, self.beam_size) + state_beam_indices = torch.tensor([x[1] for x in next_batch_beam], + device=device).reshape(batch_size, self.beam_size) + state_beam_log_probs = torch.tensor([x[2] for x in next_batch_beam], + device=device).reshape(batch_size, self.beam_size) + + restricted_predicted_classes[:, i, :] = state_predicted_classes + restricted_beam_indices[:, i, :] = state_beam_indices + restricted_beam_log_probs[:, i, :] = state_beam_log_probs + + restricted_predicted_classes = restricted_predicted_classes.view(batch_size, -1) + predictions.append(restricted_predicted_classes) + + backpointer = restricted_beam_indices / self.per_node_beam_size + backpointers.append(backpointer.view(batch_size, -1)) + + last_log_probabilities = restricted_beam_log_probs.view(batch_size, num_fsm_states, -1) + + def track_back_state(state_tensor): + _, *last_dims = state_tensor.size() + # shape: (batch_size, beam_size, *) + expanded_backpointer = backpointer.view( + batch_size, num_fsm_states * self.beam_size, *([1] * len(last_dims)) + ).expand(batch_size, num_fsm_states * self.beam_size, *last_dims) + + # shape: (batch_size * beam_size, *) + return ( + state_tensor.reshape(batch_size, num_fsm_states * self.beam_size, *last_dims) + .gather(1, expanded_backpointer) + .reshape(batch_size * num_fsm_states * self.beam_size, *last_dims) + ) + # reorder states + if state is not None: + state = tuple(track_back_state(value) for value in state) + curr_ids = track_back_state(curr_ids) + + last_predictions = predictions[-1].reshape( + batch_size * self.beam_size * num_fsm_states + ) + curr_ids = torch.cat([curr_ids, last_predictions.unsqueeze(-1)], dim=1) + # Reconstruct the sequences. + # shape: [(batch_size, beam_size, 1)] + reconstructed_predictions = [predictions[-1].unsqueeze(2)] + + # shape: (batch_size, beam_size) + cur_backpointers = backpointers[-1] + + for timestep in range(len(predictions) - 2, 0, -1): + # shape: (batch_size, beam_size, 1) + cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2) + + reconstructed_predictions.append(cur_preds) + + # shape: (batch_size, beam_size) + cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers) + + # shape: (batch_size, beam_size, 1) + final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2) + + reconstructed_predictions.append(final_preds) + + # shape: (batch_size, beam_size, max_steps) + all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2) + all_predictions = all_predictions.view(batch_size, num_fsm_states, self.beam_size, -1) + assert (all_predictions == curr_ids.reshape(batch_size, num_fsm_states, + self.beam_size, -1)[:,:,:,1:]).all() + + if self.use_hypo: + decoded = all_predictions.new(batch_size, num_fsm_states, 1, + self.max_steps).fill_(self._eos_token_ids[0]) + scores = last_log_probabilities.new(batch_size, num_fsm_states, + 1).fill_(-1e5) + for batch_ex in range(batch_size): + for i in range(num_fsm_states): + beam = all_predictions[batch_ex, i, 0, :] + log_prob = last_log_probabilities[batch_ex, i, 0] + generated_hyps[batch_ex][i].add( + beam.clone(), + log_prob.item() + ) + hyps = generated_hyps[batch_ex][i].hyp + assert len(hyps) == 1 + score, sent = hyps[0] + decoded[batch_ex, i, 0, :len(sent)] = sent + scores[batch_ex, i, 0] = score + all_predictions = decoded + last_log_probabilities = scores + + # pad to the same length, otherwise DataParallel will give error + pad_len = self.max_steps - all_predictions.shape[-1] + if pad_len > 0: + padding_ids = all_predictions.new( + batch_size, num_fsm_states, self.beam_size, + pad_len).fill_(self._eos_token_ids[0]) + all_predictions = torch.cat([all_predictions, padding_ids], dim=-1) + + return all_predictions, last_log_probabilities + + +def select_best_beam_with_constraints( + beams: torch.Tensor, + beam_log_probabilities: torch.Tensor, + given_constraints: torch.Tensor, + min_constraints_to_satisfy: int, +) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Select the best beam which satisfies specified minimum constraints out of a total number of + given constraints. + + .. note:: + + The implementation of this function goes hand-in-hand with the FSM building implementation + in :meth:`~updown.utils.constraints.FiniteStateMachineBuilder.build` - it defines which + state satisfies which (basically, how many) constraints. If the "definition" of states + change, then selection of beams also changes accordingly. + + Parameters + ---------- + beams: torch.Tensor + A tensor of shape ``(batch_size, num_states, beam_size, max_decoding_steps)`` containing + decoded beams by :class:`~updown.modules.cbs.ConstrainedBeamSearch`. These beams are + sorted according to their likelihood (descending) in ``beam_size`` dimension. + beam_log_probabilities: torch.Tensor + A tensor of shape ``(batch_size, num_states, beam_size)`` containing likelihood of decoded + beams. + given_constraints: torch.Tensor + A tensor of shape ``(batch_size, )`` containing number of constraints given at the start + of decoding. + min_constraints_to_satisfy: int + Minimum number of constraints to satisfy. This is either 2, or ``given_constraints`` if + they are less than 2. Beams corresponding to states not satisfying at least these number + of constraints will be dropped. Only up to 3 supported. + + Returns + ------- + Tuple[torch.Tensor, torch.Tensor] + Decoded sequence (beam) which has highest likelihood among beams satisfying constraints. + """ + batch_size, num_states, beam_size, max_decoding_steps = beams.size() + + best_beams: List[torch.Tensor] = [] + best_beam_log_probabilities: List[torch.Tensor] = [] + + for i in range(batch_size): + # fmt: off + valid_states = [ + s for s in range(2 ** given_constraints[i].item()) + if bin(s).count("1") >= min(given_constraints[i], min_constraints_to_satisfy) + ] + # fmt: on + + valid_beams = beams[i, valid_states, 0, :] + valid_beam_log_probabilities = beam_log_probabilities[i, valid_states, 0] + + selected_index = torch.argmax(valid_beam_log_probabilities) + best_beams.append(valid_beams[selected_index, :]) + best_beam_log_probabilities.append(valid_beam_log_probabilities[selected_index]) + + # shape: (batch_size, max_decoding_steps) + return (torch.stack(best_beams).long().to(beams.device), + torch.stack(best_beam_log_probabilities).to(beams.device)) + + +def load_wordforms(wordforms_tsvpath): + wordforms = {} + with open(wordforms_tsvpath, "r") as fp: + for line in fp: + parts = line.strip().split('\t') + wordforms[parts[0]] = parts[1].split(',') + return wordforms + + +class ConstraintBoxesReader(object): + r""" + A reader for annotation files containing detected bounding boxes. + For our use cases, the detections are from an object detector trained using Open Images. + """ + def __init__(self, boxes_tsvpath): + self._image_key_to_boxes = {} + with open(boxes_tsvpath, 'r') as fp: + for line in fp: + parts = line.strip().split('\t') + img_key = parts[0] + labels = json.loads(parts[1]) + boxes, class_names, scores = [], [], [] + for box in labels: + boxes.append(box['rect']) + class_names.append(box['class'].lower()) + scores.append(box['conf']) + boxes = np.array(boxes) + scores = np.array(scores) + self._image_key_to_boxes[img_key] = {"boxes": boxes, "class_names": class_names, "scores": scores} + + def __len__(self): + return len(self._image_key_to_boxes) + + def __getitem__(self, image_key): + # Some images may not have any boxes, handle that case too. + if image_key not in self._image_key_to_boxes: + return {"boxes": np.array([]), "class_names": [], "scores": + np.array([])} + else: + return self._image_key_to_boxes[image_key] + + +class ConstraintFilter(object): + r""" + A helper class to perform constraint filtering for providing sensible set of constraint words + while decoding. + + Extended Summary + ---------------- + The original work proposing `Constrained Beam Search `_ + selects constraints randomly. + + We remove certain categories from a fixed set of "blacklisted" categories, which are either + too rare, not commonly uttered by humans, or well covered in COCO. We resolve overlapping + detections (IoU >= 0.85) by removing the higher-order of the two objects (e.g. , a "dog" would + suppress a ‘mammal’) based on the Open Images class hierarchy (keeping both if equal). + Finally, we take the top-k objects based on detection confidence as constraints. + + Parameters + ---------- + hierarchy_jsonpath: str + Path to a JSON file containing a hierarchy of Open Images object classes. + nms_threshold: float, optional (default = 0.85) + NMS threshold for suppressing generic object class names during constraint filtering, + for two boxes with IoU higher than this threshold, "dog" suppresses "animal". + max_given_constraints: int, optional (default = 3) + Maximum number of constraints which can be specified for CBS decoding. Constraints are + selected based on the prediction confidence score of their corresponding bounding boxes. + """ + + # fmt: off + BLACKLIST: List[str] = [ + "auto part", "bathroom accessory", "bicycle wheel", "boy", "building", "clothing", + "door handle", "fashion accessory", "footwear", "girl", "hiking equipment", "human arm", + "human beard", "human body", "human ear", "human eye", "human face", "human foot", + "human hair", "human hand", "human head", "human leg", "human mouth", "human nose", + "land vehicle", "mammal", "man", "person", "personal care", "plant", "plumbing fixture", + "seat belt", "skull", "sports equipment", "tire", "tree", "vehicle registration plate", + "wheel", "woman", "__background__", + ] + # fmt: on + + REPLACEMENTS: Dict[str, str] = { + "band-aid": "bandaid", + "wood-burning stove": "wood burning stove", + "kitchen & dining room table": "table", + "salt and pepper shakers": "salt and pepper", + "power plugs and sockets": "power plugs", + "luggage and bags": "luggage", + } + + def __init__( + self, hierarchy_jsonpath, nms_threshold, max_given_constraints + ): + def __read_hierarchy(node, parent=None): + # Cast an ``anytree.AnyNode`` (after first level of recursion) to dict. + attributes = dict(node) + children = attributes.pop("Subcategory", []) + + node = anytree.AnyNode(parent=parent, **attributes) + for child in children: + __read_hierarchy(child, parent=node) + return node + + # Read the object class hierarchy as a tree, to make searching easier. + self._hierarchy = __read_hierarchy(json.load(open(hierarchy_jsonpath))) + + self._nms_threshold = nms_threshold + self._max_given_constraints = max_given_constraints + + def __call__(self, boxes: np.ndarray, class_names: List[str], scores: np.ndarray) -> List[str]: + + # Remove padding boxes (which have prediction confidence score = 0), and remove boxes + # corresponding to all blacklisted classes. These will never become CBS constraints. + keep_indices = [] + for i in range(len(class_names)): + if scores[i] > 0 and class_names[i] not in self.BLACKLIST: + keep_indices.append(i) + + boxes = boxes[keep_indices] + class_names = [class_names[i] for i in keep_indices] + scores = scores[keep_indices] + + # Perform non-maximum suppression according to category hierarchy. For example, for highly + # overlapping boxes on a dog, "dog" suppresses "animal". + keep_indices = self._nms(boxes, class_names) + boxes = boxes[keep_indices] + class_names = [class_names[i] for i in keep_indices] + scores = scores[keep_indices] + + # Retain top-k constraints based on prediction confidence score. + class_names_and_scores = sorted(list(zip(class_names, scores)), key=lambda t: -t[1]) + class_names_and_scores = class_names_and_scores[: self._max_given_constraints] + + # Replace class name according to ``self.REPLACEMENTS``. + class_names = [self.REPLACEMENTS.get(t[0], t[0]) for t in class_names_and_scores] + + # Drop duplicates. + class_names = list(set(class_names)) + return class_names + + def _nms(self, boxes: np.ndarray, class_names: List[str]): + if len(class_names) == 0: + return [] + + # For object class, get the height of its corresponding node in the hierarchy tree. + # Less height => finer-grained class name => higher score. + heights = np.array( + [ + anytree.search.findall(self._hierarchy, lambda node: node.LabelName.lower() in c)[0].height + for c in class_names + ] + ) + # Get a sorting of the heights in ascending order, i.e. higher scores first. + score_order = heights.argsort() + + # Compute areas for calculating intersection over union. Add 1 to avoid division by zero + # for zero area (padding/dummy) boxes. + x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + + # Fill "keep_boxes" with indices of boxes to keep, move from left to right in + # ``score_order``, keep current box index (score_order[0]) and suppress (discard) other + # indices of boxes having lower IoU threshold with current box from ``score_order``. + # list. Note the order is a sorting of indices according to scores. + keep_box_indices = [] + + while score_order.size > 0: + # Keep the index of box under consideration. + current_index = score_order[0] + keep_box_indices.append(current_index) + + # For the box we just decided to keep (score_order[0]), compute its IoU with other + # boxes (score_order[1:]). + xx1 = np.maximum(x1[score_order[0]], x1[score_order[1:]]) + yy1 = np.maximum(y1[score_order[0]], y1[score_order[1:]]) + xx2 = np.minimum(x2[score_order[0]], x2[score_order[1:]]) + yy2 = np.minimum(y2[score_order[0]], y2[score_order[1:]]) + + intersection = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(0.0, yy2 - yy1 + 1) + union = areas[score_order[0]] + areas[score_order[1:]] - intersection + + # Perform NMS for IoU >= 0.85. Check score, boxes corresponding to object + # classes with smaller/equal height in hierarchy cannot be suppressed. + keep_condition = np.logical_or( + heights[score_order[1:]] >= heights[score_order[0]], + intersection / union <= self._nms_threshold, + ) + + # Only keep the boxes under consideration for next iteration. + score_order = score_order[1:] + score_order = score_order[np.where(keep_condition)[0]] + + return keep_box_indices + + +class FiniteStateMachineBuilder(object): + r""" + A helper class to build a Finite State Machine for Constrained Beam Search, as per the + state transitions shown in Figures 7 through 9 from our + `paper appendix `_. + + The FSM is constructed on a per-example basis, and supports up to three constraints, + with each constraint being an Open Image class having up to three words (for example + ``salt and pepper``). Each word in the constraint may have several word-forms (for + example ``dog``, ``dogs``). + + .. note:: Providing more than three constraints may work but it is not tested. + + **Details on Finite State Machine Representation** + + .. image:: ../_static/fsm.jpg + + The FSM is representated as an adjacency matrix. Specifically, it is a tensor of shape + ``(num_total_states, num_total_states, vocab_size)``. In this, ``fsm[S1, S2, W] = 1`` indicates + a transition from "S1" to "S2" if word "W" is decoded. For example, consider **Figure 9**. + The decoding is at initial state (``q0``), constraint word is ``D1``, while any other word + in the vocabulary is ``Dx``. Then we have:: + + fsm[0, 0, D1] = 0 and fsm[0, 1, D1] = 1 # arrow from q0 to q1 + fsm[0, 0, Dx] = 1 and fsm[0, 1, Dx] = 0 # self-loop on q0 + + Consider up to "k" (3) constraints and up to "w" (3) words per constraint. We define these + terms (as members in the class). + + .. code-block:: + + _num_main_states = 2 ** k (8) + _total_states = num_main_states * w (24) + + First eight states are considered as "main states", and will always be a part of the FSM. For + less than "k" constraints, some states will be unreachable, hence "useless". These will be + ignored automatically. + + For any multi-word constraint, we use extra "sub-states" after first ``2 ** k`` states. We + make connections according to **Figure 7-8** for such constraints. We dynamically trim unused + sub-states to save computation during decoding. That said, ``num_total_states`` dimension is + at least 8. + + A state "q" satisfies number of constraints equal to the number of "1"s in the binary + representation of that state. For example: + + - state "q0" (000) satisfies 0 constraints. + - state "q1" (001) satisfies 1 constraint. + - state "q2" (010) satisfies 1 constraint. + - state "q3" (011) satisfies 2 constraints. + + and so on. Only main states fully satisfy constraints. + + Parameters + ---------- + tokenizer: BertTokenizer + wordforms_tsvpath: str + Path to a TSV file containing two fields: first is the name of Open Images object class + and second field is a comma separated list of words (possibly singular and plural forms + of the word etc.) which could be CBS constraints. + max_given_constraints: int, optional (default = 3) + Maximum number of constraints which could be given while cbs decoding. Up to three + supported. + max_words_per_constraint: int, optional (default = 3) + Maximum number of words per constraint for multi-word constraints. Note that these are + for multi-word object classes (for example: ``fire hydrant``) and not for multiple + "word-forms" of a word, like singular-plurals. Up to three supported. + """ + + def __init__( + self, + tokenizer, + constraint2tokens_tsvpath, + tokenforms_tsvpath, + max_given_constraints, + max_words_per_constraint = 4, + ): + self._tokenizer = tokenizer + self._max_given_constraints = max_given_constraints + self._max_words_per_constraint = max_words_per_constraint + + self._num_main_states = 2 ** max_given_constraints + self._num_total_states = self._num_main_states * max_words_per_constraint + + self._wordforms: Dict[str, List[str]] = load_wordforms(tokenforms_tsvpath) + self._constraint2tokens = load_wordforms(constraint2tokens_tsvpath) + + def build(self, constraints: List[str]): + r""" + Build a finite state machine given a list of constraints. + + Parameters + ---------- + constraints: List[str] + A list of up to three (possibly) multi-word constraints, in our use-case these are + Open Images object class names. + + Returns + ------- + Tuple[torch.Tensor, int] + A finite state machine as an adjacency matrix, index of the next available unused + sub-state. This is later used to trim the unused sub-states from FSM. + """ + assert len(constraints) <= self._max_given_constraints + fsm = torch.zeros(self._num_total_states, self._num_total_states, dtype=torch.uint8) + + # Self loops for all words on main states. + fsm[range(self._num_main_states), range(self._num_main_states)] = 1 + + fsm = fsm.unsqueeze(-1).repeat(1, 1, self._tokenizer.vocab_size) + + substate_idx = self._num_main_states + for i, constraint in enumerate(constraints): + fsm, substate_idx = self._add_nth_constraint(fsm, i + 1, substate_idx, constraint) + + return fsm, substate_idx + + def _add_nth_constraint(self, fsm: torch.Tensor, n: int, substate_idx: int, constraint: str): + r""" + Given an (incomplete) FSM matrix with transitions for "(n - 1)" constraints added, add + all transitions for the "n-th" constraint. + + Parameters + ---------- + fsm: torch.Tensor + A tensor of shape ``(num_total_states, num_total_states, vocab_size)`` representing an + FSM under construction. + n: int + The cardinality of constraint to be added. Goes as 1, 2, 3... (not zero-indexed). + substate_idx: int + An index which points to the next unused position for a sub-state. It starts with + ``(2 ** num_main_states)`` and increases according to the number of multi-word + constraints added so far. The calling method, :meth:`build` keeps track of this. + constraint: str + A (possibly) multi-word constraint, in our use-case it is an Open Images object class + name. + + Returns + ------- + Tuple[torch.Tensor, int] + FSM with added connections for the constraint and updated ``substate_idx`` pointing to + the next unused sub-state. + """ + #words = constraint.split() + words = [] + for w in constraint.split(): + words.extend(self._constraint2tokens[w]) + #TODO: set max_words_per_constraint + #assert len(words) <= self._max_words_per_constraint + if len(words) > self._max_words_per_constraint: + words = words[:self._max_words_per_constraint] + connection_stride = 2 ** (n - 1) + + from_state = 0 + while from_state < self._num_main_states: + for _ in range(connection_stride): + word_from_state = from_state + for i, word in enumerate(words): + # fmt: off + # Connect to a sub-state for all tokens in multi-word constraint except last. + if i != len(words) - 1: + fsm = self._connect( + fsm, word_from_state, substate_idx, word, reset_state=from_state + ) + word_from_state = substate_idx + substate_idx += 1 + else: + fsm = self._connect( + fsm, word_from_state, from_state + connection_stride, word, + reset_state=from_state, + ) + # fmt: on + from_state += 1 + from_state += connection_stride + return fsm, substate_idx + + def _connect( + self, fsm: torch.Tensor, from_state: int, to_state: int, word: str, reset_state: int = None + ): + r""" + Add a connection between two states for a particular word (and all its word-forms). This + means removing self-loop from ``from_state`` for all word-forms of ``word`` and connecting + them to ``to_state``. + + Extended Summary + ---------------- + In case of multi-word constraints, we return back to the ``reset_state`` for any utterance + other than ``word``, to satisfy a multi-word constraint if all words are decoded + consecutively. For example: for "fire hydrant" as a constraint between Q0 and Q1, we reach + a sub-state "Q8" on decoding "fire". Go back to main state "Q1" on decoding "hydrant" + immediately after, else we reset back to main state "Q0". + + Parameters + ---------- + fsm: torch.Tensor + A tensor of shape ``(num_total_states, num_total_states, vocab_size)`` representing an + FSM under construction. + from_state: int + Origin state to make a state transition. + to_state: int + Destination state to make a state transition. + word: str + The word which serves as a constraint for transition between given two states. + reset_state: int, optional (default = None) + State to reset otherwise. This is only valid if ``from_state`` is a sub-state. + + Returns + ------- + torch.Tensor + FSM with the added connection. + """ + wordforms = self._wordforms.get(word, [word]) + #wordform_indices = [self._vocabulary.get_token_index(w) for w in wordforms] + wordform_indices = self._tokenizer.convert_tokens_to_ids(wordforms) + + for wordform_index in wordform_indices: + fsm[from_state, to_state, wordform_index] = 1 + fsm[from_state, from_state, wordform_index] = 0 + + if reset_state is not None: + fsm[from_state, from_state, :] = 0 + fsm[from_state, reset_state, :] = 1 + for wordform_index in wordform_indices: + fsm[from_state, reset_state, wordform_index] = 0 + + return fsm + diff --git a/oscar/utils/logger.py b/oscar/utils/logger.py new file mode 100644 index 0000000..9deee1c --- /dev/null +++ b/oscar/utils/logger.py @@ -0,0 +1,102 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import logging +from logging import StreamHandler, Handler, getLevelName +import os +import sys + + +# this class is a copy of logging.FileHandler except we end self.close() +# at the end of each emit. While closing file and reopening file after each +# write is not efficient, it allows us to see partial logs when writing to +# fused Azure blobs, which is very convenient +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode='a', encoding=None, delay=False): + """ + Open the specified file and use it as the stream for logging. + """ + # Issue #27493: add support for Path objects to be passed in + filename = os.fspath(filename) + #keep the absolute path, otherwise derived classes which use this + #may come a cropper when the current directory changes + self.baseFilename = os.path.abspath(filename) + self.mode = mode + self.encoding = encoding + self.delay = delay + if delay: + #We don't open the stream, but we still need to call the + #Handler constructor to set level, formatter, lock etc. + Handler.__init__(self) + self.stream = None + else: + StreamHandler.__init__(self, self._open()) + + def close(self): + """ + Closes the stream. + """ + self.acquire() + try: + try: + if self.stream: + try: + self.flush() + finally: + stream = self.stream + self.stream = None + if hasattr(stream, "close"): + stream.close() + finally: + # Issue #19523: call unconditionally to + # prevent a handler leak when delay is set + StreamHandler.close(self) + finally: + self.release() + + def _open(self): + """ + Open the current base file with the (original) mode and encoding. + Return the resulting stream. + """ + return open(self.baseFilename, self.mode, encoding=self.encoding) + + def emit(self, record): + """ + Emit a record. + + If the stream was not opened because 'delay' was specified in the + constructor, open it before calling the superclass's emit. + """ + if self.stream is None: + self.stream = self._open() + StreamHandler.emit(self, record) + self.close() + + def __repr__(self): + level = getLevelName(self.level) + return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) + + +def setup_logger(name, save_dir, distributed_rank, filename="log.txt"): + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + # don't log results for the non-master process + if distributed_rank > 0: + return logger + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") + ch.setFormatter(formatter) + logger.addHandler(ch) + + if save_dir: + fh = FileHandler(os.path.join(save_dir, filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger + diff --git a/oscar/utils/misc.py b/oscar/utils/misc.py new file mode 100644 index 0000000..6b6df2e --- /dev/null +++ b/oscar/utils/misc.py @@ -0,0 +1,46 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import errno +import os +import os.path as op +import yaml +import random +import torch +import numpy as np + + +def mkdir(path): + # if it is the current folder, skip. + if path == '': + return + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def set_seed(seed, n_gpu): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(seed) + + +def load_from_yaml_file(yaml_file): + with open(yaml_file, 'r') as fp: + return yaml.load(fp) + + +def find_file_path_in_yaml(fname, root): + if fname is not None: + if op.isfile(fname): + return fname + elif op.isfile(op.join(root, fname)): + return op.join(root, fname) + else: + raise FileNotFoundError( + errno.ENOENT, os.strerror(errno.ENOENT), op.join(root, fname) + ) + diff --git a/oscar/utils/task_utils.py b/oscar/utils/task_utils.py new file mode 100644 index 0000000..67b510f --- /dev/null +++ b/oscar/utils/task_utils.py @@ -0,0 +1,442 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import csv, json +import logging +import os +import sys +from io import open +import _pickle as cPickle +import torch + +logger = logging.getLogger(__name__) + + +class InputInstance(object): + """A single training/test example for simple sequence classification.""" + + def __init__(self, guid, text_a, text_b=None, label=None, score=None, img_key=None, q_id=None): + """Constructs a InputExample. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label + self.score = score + self.img_key = img_key + self.q_id = q_id + + +class InputFeat(object): + """A single set of features of data.""" + + def __init__(self, input_ids, input_mask, segment_ids, label_id, score, img_feat): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id + self.score = score + self.img_feat = img_feat + + +class DataProcessor(object): + """Base class for data converters for sequence classification data sets.""" + + def get_train_examples(self, data_dir): + """Gets a collection of `InputExample`s for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of `InputExample`s for the dev set.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with open(input_file, "r", encoding="utf-8-sig") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + if sys.version_info[0] == 2: + line = list(unicode(cell, 'utf-8') for cell in line) + lines.append(line) + return lines + + +class VQATextProcessor(DataProcessor): + """ Processor for the VQA Text data set. """ + + def get_train_examples(self, data_dir, file_name='train2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, file_name='val2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, file_name='test2015_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test") + + def get_labels(self, label_file): + """ See base class.""" + + ans2label = cPickle.load(open(label_file, 'rb')) + return list(ans2label.values()) + #return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + + examples = [] + for (i, line) in enumerate(lines): + if set_type!='test' and len(line['an']) == 0: continue + + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = line['o'].replace(';', ' ').strip() #line['o'] + label = None if set_type.startswith('test') else line['an'] + score = None if set_type.startswith('test') else line['s'] + img_key = line['img_id'] + q_id = int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + +class VQATextAProcessor(DataProcessor): + """ Processor for the VQA Text data set. """ + + def get_train_examples(self, data_dir, file_name='train2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, file_name='val2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, file_name='test2015_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test") + + def get_labels(self, label_file): + """ See base class.""" + + ans2label = cPickle.load(open(label_file, 'rb')) + return list(ans2label.values()) + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + + examples = [] + for (i, line) in enumerate(lines): + if set_type!='test' and len(line['an']) == 0: continue + + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = None # line['o'] # or None + label = None if set_type.startswith('test') else line['an'] + score = None if set_type.startswith('test') else line['s'] + img_key = line['img_id'] + q_id = int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + +class GQAProcessor(DataProcessor): + """ Processor for the GQA data set. """ + + def get_train_examples(self, data_dir, file_name='train2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, file_name='val2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, file_name='test2015_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test") + + def get_labels(self, label_file='trainval_testdev_all_ans2label.pkl'): + """ See base class.""" + + ans2label = cPickle.load(open(label_file, 'rb')) + return list(ans2label.values()) + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + + examples = [] + for (i, line) in enumerate(lines): + if set_type!='test' and len(line['an']) == 0: continue + + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = line['o'] # or None + label = None if set_type.startswith('test') else line['an'] + score = 0 + img_key = line['img_id'] + q_id = int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + +class NLVRProcessor(DataProcessor): + """ Processor for the NLVR data set. """ + + def get_train_examples(self, data_dir, use_label_seq=True, file_name='nlvr2_train.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train", use_label_seq) + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, use_label_seq=True, file_name='nlvr2_dev.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev", use_label_seq) + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, use_label_seq=True, file_name='nlvr2_test1.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test", use_label_seq) + + def get_labels(self, label_file=None): + """ See base class.""" + + #ans2label = cPickle.load(open(label_file, 'rb')) + #return list(ans2label.values()) + return [0, 1] + + def _create_examples(self, lines, set_type, use_label_seq=True): + """ Creates examples for the training and dev sets. """ + + examples = [] + for (i, line) in enumerate(lines): + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = line['o'] if use_label_seq else None + label = line['label'] #None if set_type.startswith('test') else line['label'] + score = 0 + img_key = line['img_id'] #[line['img_left'], line['img_left']] + q_id = 0 #int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + + +def convert_examples_to_features_vqa(examples, img_feats, label_list, max_img_seq_length, max_seq_length, + tokenizer, output_mode, + cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + """ Loads a data file into a list of `InputBatch`s + `cls_token_at_end` define the location of the CLS token: + - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] + - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] + `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) + """ + + label_map = {label:i for i, label in enumerate(label_list)} + + features = [] + #debug: + debug_size = 500 + + for (ex_index, example) in enumerate(examples[0: ]): + if len(example.label) == 0: continue + if ex_index % 10000 == 0: + logger.info("Writing example %d of %d" % (ex_index, len(examples))) + + tokens_a = tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:(max_seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambiguously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + # image features + #img_feat = img_feats[example.img_key] # torch + img_feat = img_feats.item().get(example.img_key) # numpy + if img_feat.shape[0] > max_img_seq_length: + img_feat = img_feat[0:max_img_seq_length, ] + if max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + #segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + #segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + #segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if output_mode == "classification": + label_id = [label_map[l] for l in example.label] + score = example.score + elif output_mode == "regression": + label_id = float(example.label) + else: + raise KeyError(output_mode) + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %s)" % (example.label, label_id)) + logger.info("score: %s (score = %s)" % (example.score, score)) + + features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + return features + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +processors = { + "vqa_text": VQATextProcessor, + "vqa_text_a": VQATextAProcessor, + "gqa": GQAProcessor, + "nlvr": NLVRProcessor +} + +output_modes = { + "vqa_text": "classification", + "vqa_text_a": "classification", + "gqa": "classification", + "nlvr": "classification" +} + +GLUE_TASKS_NUM_LABELS = { + "vqa_text": 3129, + "vqa_text_a": 3129, + "gqa": 1853, + "nlvr": 2 +} diff --git a/oscar/utils/tsv_file.py b/oscar/utils/tsv_file.py new file mode 100644 index 0000000..3563bb1 --- /dev/null +++ b/oscar/utils/tsv_file.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import logging +import os +import os.path as op + + +def generate_lineidx(filein, idxout): + idxout_tmp = idxout + '.tmp' + with open(filein, 'r') as tsvin, open(idxout_tmp,'w') as tsvout: + fsize = os.fstat(tsvin.fileno()).st_size + fpos = 0 + while fpos!=fsize: + tsvout.write(str(fpos)+"\n") + tsvin.readline() + fpos = tsvin.tell() + os.rename(idxout_tmp, idxout) + + +class TSVFile(object): + def __init__(self, tsv_file, generate_lineidx=False): + self.tsv_file = tsv_file + self.lineidx = op.splitext(tsv_file)[0] + '.lineidx' + self._fp = None + self._lineidx = None + # the process always keeps the process which opens the file. + # If the pid is not equal to the currrent pid, we will re-open the file. + self.pid = None + # generate lineidx if not exist + if not op.isfile(self.lineidx) and generate_lineidx: + generate_lineidx(self.tsv_file, self.lineidx) + + def __del__(self): + if self._fp: + self._fp.close() + + def __str__(self): + return "TSVFile(tsv_file='{}')".format(self.tsv_file) + + def __repr__(self): + return str(self) + + def num_rows(self): + self._ensure_lineidx_loaded() + return len(self._lineidx) + + def seek(self, idx): + self._ensure_tsv_opened() + self._ensure_lineidx_loaded() + try: + pos = self._lineidx[idx] + except: + logging.info('{}-{}'.format(self.tsv_file, idx)) + raise + self._fp.seek(pos) + return [s.strip() for s in self._fp.readline().split('\t')] + + def seek_first_column(self, idx): + self._ensure_tsv_opened() + self._ensure_lineidx_loaded() + pos = self._lineidx[idx] + self._fp.seek(pos) + return read_to_character(self._fp, '\t') + + def __getitem__(self, index): + return self.seek(index) + + def __len__(self): + return self.num_rows() + + def _ensure_lineidx_loaded(self): + if self._lineidx is None: + logging.info('loading lineidx: {}'.format(self.lineidx)) + with open(self.lineidx, 'r') as fp: + self._lineidx = [int(i.strip()) for i in fp.readlines()] + + def _ensure_tsv_opened(self): + if self._fp is None: + self._fp = open(self.tsv_file, 'r') + self.pid = os.getpid() + + if self.pid != os.getpid(): + logging.info('re-open {} because the process id changed'.format(self.tsv_file)) + self._fp = open(self.tsv_file, 'r') + self.pid = os.getpid() diff --git a/oscar/utils/tsv_file_ops.py b/oscar/utils/tsv_file_ops.py new file mode 100644 index 0000000..f520aef --- /dev/null +++ b/oscar/utils/tsv_file_ops.py @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import os +from .misc import mkdir + + +def tsv_writer(values, tsv_file_name, sep='\t'): + mkdir(os.path.dirname(tsv_file_name)) + tsv_file_name_tmp = tsv_file_name + '.tmp' + with open(tsv_file_name_tmp, 'wb') as fp: + assert values is not None + for value in values: + assert value is not None + v = sep.join(map(lambda v: v.decode() if type(v) == bytes else str(v), value)) + '\n' + v = v.encode() + fp.write(v) + os.rename(tsv_file_name_tmp, tsv_file_name) + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..7bd1d73 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +tqdm +pyyaml +matplotlib +requests +scikit-image +anytree +regex +boto3 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..b9105f5 --- /dev/null +++ b/setup.py @@ -0,0 +1,47 @@ +#!/usr/bin/python + +from __future__ import print_function +import os +import sys +import re +import os.path as op +from setuptools import find_packages, setup + +# change directory to this module path +try: + this_file = __file__ +except NameError: + this_file = sys.argv[0] +this_file = os.path.abspath(this_file) +if op.dirname(this_file): + os.chdir(op.dirname(this_file)) +script_dir = os.getcwd() + +def readme(fname): + """Read text out of a file in the same directory as setup.py. + """ + return open(op.join(script_dir, fname)).read() + + +def find_version(fname): + version_file = readme(fname) + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", + version_file, re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") + + +setup( + name="oscar", + version=find_version("oscar/__init__.py"), + url='https://github.com/xjli/Oscar', + description="Oscar for vision and language tasks", + long_description=readme('README.md'), + packages=find_packages(), + classifiers=[ + 'Intended Audience :: Developers', + "Programming Language :: Python", + 'Topic :: Software Development', + ] +) diff --git a/transformers b/transformers new file mode 160000 index 0000000..067923d --- /dev/null +++ b/transformers @@ -0,0 +1 @@ +Subproject commit 067923d3267325f525f4e46f357360c191ba562e