From 1ade07733db910917cdb31c2a310d03ba937746e Mon Sep 17 00:00:00 2001 From: Victor Bittorf Date: Wed, 8 Sep 2021 14:57:34 -0700 Subject: [PATCH] Oirginal Release. History squashed. --- .flake8 | 3 + .gitignore | 145 + .gitmodules | 6 + CODE_OF_CONDUCT.md | 80 + CONTRIBUTING.md | 31 + LICENSE | 191 ++ LICENSE.md | 176 ++ README.md | 131 + benchmarks/dlrm/ootb/CODE_OF_CONDUCT.md | 5 + benchmarks/dlrm/ootb/CONTRIBUTING.md | 36 + benchmarks/dlrm/ootb/Dockerfile | 15 + benchmarks/dlrm/ootb/README.md | 389 +++ .../dlrm/ootb/bench/dlrm_s_benchmark.sh | 147 + .../dlrm/ootb/bench/dlrm_s_criteo_kaggle.sh | 32 + .../dlrm/ootb/bench/dlrm_s_criteo_terabyte.sh | 32 + benchmarks/dlrm/ootb/bench/run_and_time.sh | 19 + benchmarks/dlrm/ootb/cython/cython_compile.py | 26 + benchmarks/dlrm/ootb/cython/cython_criteo.py | 55 + benchmarks/dlrm/ootb/data_loader_terabyte.py | 368 +++ benchmarks/dlrm/ootb/data_utils.py | 1292 +++++++++ benchmarks/dlrm/ootb/dlrm_data_caffe2.py | 843 ++++++ benchmarks/dlrm/ootb/dlrm_data_pytorch.py | 1309 +++++++++ benchmarks/dlrm/ootb/dlrm_s_caffe2.py | 1703 +++++++++++ benchmarks/dlrm/ootb/dlrm_s_pytorch.py | 2511 +++++++++++++++++ benchmarks/dlrm/ootb/extend_distributed.py | 603 ++++ benchmarks/dlrm/ootb/input/dist_emb_0.log | 3 + benchmarks/dlrm/ootb/input/dist_emb_1.log | 3 + benchmarks/dlrm/ootb/input/dist_emb_2.log | 3 + benchmarks/dlrm/ootb/input/trace.log | 1 + .../ootb/kaggle_dac_loss_accuracy_plots.png | Bin 0 -> 547424 bytes benchmarks/dlrm/ootb/mlperf_logger.py | 118 + benchmarks/dlrm/ootb/optim/rwsadagrad.py | 122 + benchmarks/dlrm/ootb/requirements.txt | 8 + benchmarks/dlrm/ootb/test/dlrm_s_test.sh | 47 + .../dlrm/ootb/test/dlrm_s_test_fbgemm_gpu.sh | 51 + benchmarks/dlrm/ootb/tools/visualize.py | 1030 +++++++ .../dlrm/ootb/tricks/md_embedding_bag.py | 81 + .../dlrm/ootb/tricks/qr_embedding_bag.py | 185 ++ benchmarks/dlrm/ubench/README_comms.md | 5 + .../dlrm/ubench/dlrm_ubench_comms_driver.py | 130 + .../dlrm/ubench/dlrm_ubench_train_driver.py | 122 + benchmarks/rnnt/ootb/inference/QSL.py | 71 + benchmarks/rnnt/ootb/inference/README.md | 116 + .../rnnt/ootb/inference/accuracy_eval.py | 53 + .../rnnt/ootb/inference/environment.yml | 128 + .../rnnt/ootb/inference/loadgen/.clang-format | 2 + .../ootb/inference/loadgen/CMakeLists.txt | 68 + .../rnnt/ootb/inference/loadgen/README.md | 105 + .../ootb/inference/loadgen/README_BUILD.md | 32 + .../rnnt/ootb/inference/loadgen/README_FAQ.md | 88 + .../inference/loadgen/benchmark/.gitignore | 2 + .../inference/loadgen/benchmark/README.md | 10 + .../inference/loadgen/benchmark/repro.cpp | 302 ++ .../ootb/inference/loadgen/benchmark/run.sh | 21 + .../inference/loadgen/benchmark/run_debug.sh | 21 + .../ootb/inference/loadgen/bindings/c_api.cc | 168 ++ .../ootb/inference/loadgen/bindings/c_api.h | 90 + .../inference/loadgen/bindings/python_api.cc | 397 +++ .../loadgen/demos/py_demo_multi_stream.py | 92 + .../demos/py_demo_multi_stream_free.py | 92 + .../loadgen/demos/py_demo_offline.py | 88 + .../inference/loadgen/demos/py_demo_server.py | 85 + .../loadgen/demos/py_demo_single_stream.py | 93 + .../ootb/inference/loadgen/docs/src/BUILD.gn | 33 + .../ootb/inference/loadgen/docs/src/README.md | 34 + .../inference/loadgen/docs/src/doxygen.cfg | 2495 ++++++++++++++++ .../loadgen/docs/src/doxygen_footer.html | 26 + .../loadgen/docs/src/doxygen_header.html | 49 + .../docs/src/doxygen_html_generator.py | 37 + .../loadgen/docs/src/doxygen_layout.xml | 211 ++ .../loadgen/docs/src/doxygen_stylesheet.css | 1629 +++++++++++ .../docs/src/loadgen_integration_diagram.dia | Bin 0 -> 1943 bytes .../loadgen/docs/src/mlperf_icon.png | Bin 0 -> 4632 bytes .../docs/src/mlperf_logo_horizontal_color.svg | 55 + .../loadgen/generated/version_generated.cc | 93 + .../loadgen/issue_query_controller.cc | 619 ++++ .../loadgen/issue_query_controller.h | 211 ++ .../rnnt/ootb/inference/loadgen/loadgen.cc | 1644 +++++++++++ .../rnnt/ootb/inference/loadgen/loadgen.h | 96 + .../loadgen/loadgen_integration_diagram.svg | 85 + .../rnnt/ootb/inference/loadgen/logging.cc | 1096 +++++++ .../rnnt/ootb/inference/loadgen/logging.h | 808 ++++++ .../ootb/inference/loadgen/query_sample.h | 61 + .../inference/loadgen/query_sample_library.h | 75 + .../rnnt/ootb/inference/loadgen/setup.py | 79 + .../inference/loadgen/system_under_test.h | 72 + .../ootb/inference/loadgen/test_settings.h | 352 +++ .../loadgen/test_settings_internal.cc | 679 +++++ .../loadgen/test_settings_internal.h | 193 ++ .../ootb/inference/loadgen/tests/BUILD.gn | 25 + .../ootb/inference/loadgen/tests/README.md | 42 + .../ootb/inference/loadgen/tests/basic.cc | 335 +++ .../inference/loadgen/tests/loadgen_test.h | 199 ++ .../loadgen/tests/loadgen_test_main.cc | 33 + .../loadgen/tests/perftests_null_sut.cc | 236 ++ .../loadgen/tests/perftests_null_sut.py | 71 + .../loadgen/tools/mlperf-trace.ipynb | 441 +++ .../rnnt/ootb/inference/loadgen/utils.cc | 126 + .../rnnt/ootb/inference/loadgen/utils.h | 70 + .../rnnt/ootb/inference/loadgen/version.cc | 85 + .../rnnt/ootb/inference/loadgen/version.h | 39 + .../inference/loadgen/version_generator.py | 126 + .../inference/optional_harness_ck/README.md | 303 ++ .../rnnt/ootb/inference/pytorch/Dockerfile | 46 + .../rnnt/ootb/inference/pytorch/LICENSE | 204 ++ benchmarks/rnnt/ootb/inference/pytorch/NOTICE | 5 + .../ootb/inference/pytorch/configs/rnnt.toml | 77 + .../rnnt/ootb/inference/pytorch/dataset.py | 159 ++ .../rnnt/ootb/inference/pytorch/decoders.py | 122 + .../rnnt/ootb/inference/pytorch/helpers.py | 123 + .../rnnt/ootb/inference/pytorch/metrics.py | 67 + .../inference/pytorch/model_separable_rnnt.py | 214 ++ .../ootb/inference/pytorch/preprocessing.py | 39 + benchmarks/rnnt/ootb/inference/pytorch/rnn.py | 109 + .../inference/pytorch/scripts/docker/build.sh | 3 + .../pytorch/scripts/docker/launch.sh | 32 + .../pytorch/scripts/download_librispeech.sh | 28 + .../inference/pytorch/scripts/evaluation.sh | 92 + .../inference/pytorch/scripts/inference.sh | 104 + .../pytorch/scripts/inference_benchmark.sh | 84 + .../pytorch/scripts/preprocess_librispeech.sh | 51 + .../ootb/inference/pytorch/scripts/train.sh | 113 + .../pytorch/scripts/train_benchmark.sh | 130 + .../ootb/inference/pytorch/utils/__init__.py | 0 .../pytorch/utils/convert_librispeech.py | 82 + .../pytorch/utils/download_librispeech.py | 76 + .../inference/pytorch/utils/download_utils.py | 69 + .../pytorch/utils/inference_librispeech.csv | 5 + .../pytorch/utils/librispeech-inference.csv | 2 + .../inference/pytorch/utils/librispeech.csv | 8 + .../pytorch/utils/preprocessing_utils.py | 77 + benchmarks/rnnt/ootb/inference/pytorch_SUT.py | 124 + benchmarks/rnnt/ootb/inference/run.py | 110 + benchmarks/rnnt/ootb/inference/run.sh | 90 + .../rnnt/ootb/inference/third_party/pybind | 1 + benchmarks/rnnt/ootb/inference/user.conf | 6 + benchmarks/rnnt/ootb/train/Dockerfile | 56 + benchmarks/rnnt/ootb/train/LICENSE | 204 ++ benchmarks/rnnt/ootb/train/NOTICE | 5 + benchmarks/rnnt/ootb/train/README.md | 192 ++ benchmarks/rnnt/ootb/train/common/__init__.py | 0 benchmarks/rnnt/ootb/train/common/audio.py | 214 ++ .../rnnt/ootb/train/common/data/__init__.py | 1 + .../ootb/train/common/data/dali/__init__.py | 13 + .../train/common/data/dali/data_loader.py | 143 + .../ootb/train/common/data/dali/iterator.py | 112 + .../ootb/train/common/data/dali/pipeline.py | 212 ++ .../ootb/train/common/data/dali/sampler.py | 109 + .../rnnt/ootb/train/common/data/dataset.py | 233 ++ .../rnnt/ootb/train/common/data/features.py | 276 ++ .../rnnt/ootb/train/common/data/helpers.py | 22 + .../rnnt/ootb/train/common/data/text.py | 50 + benchmarks/rnnt/ootb/train/common/helpers.py | 241 ++ benchmarks/rnnt/ootb/train/common/metrics.py | 59 + .../rnnt/ootb/train/common/optimizers.py | 257 ++ benchmarks/rnnt/ootb/train/common/rnn.py | 88 + benchmarks/rnnt/ootb/train/common/sampler.py | 143 + .../rnnt/ootb/train/common/tb_dllogger.py | 179 ++ .../rnnt/ootb/train/common/text/LICENSE | 19 + .../rnnt/ootb/train/common/text/__init__.py | 32 + .../rnnt/ootb/train/common/text/cleaners.py | 94 + .../rnnt/ootb/train/common/text/numbers.py | 99 + .../rnnt/ootb/train/common/text/symbols.py | 19 + .../train/configs/baseline_v3-1023sp.yaml | 80 + .../rnnt/ootb/train/docker-compose.yaml | 17 + benchmarks/rnnt/ootb/train/eval_model.py | 74 + benchmarks/rnnt/ootb/train/inference.py | 314 +++ benchmarks/rnnt/ootb/train/mlperf/__init__.py | 0 benchmarks/rnnt/ootb/train/mlperf/logging.py | 74 + benchmarks/rnnt/ootb/train/requirements.txt | 10 + benchmarks/rnnt/ootb/train/rnnt/config.py | 117 + benchmarks/rnnt/ootb/train/rnnt/decoder.py | 126 + benchmarks/rnnt/ootb/train/rnnt/loss.py | 88 + benchmarks/rnnt/ootb/train/rnnt/model.py | 275 ++ benchmarks/rnnt/ootb/train/rnnt_layers.svg | 1 + benchmarks/rnnt/ootb/train/run_and_time.sh | 52 + .../train/scripts/create_sentencepieces.sh | 19 + .../rnnt/ootb/train/scripts/docker/build.sh | 3 + .../rnnt/ootb/train/scripts/docker/launch.sh | 32 + .../train/scripts/download_librispeech.sh | 31 + .../rnnt/ootb/train/scripts/inference.sh | 62 + .../ootb/train/scripts/inference_benchmark.sh | 23 + .../train/scripts/preprocess_librispeech.sh | 54 + benchmarks/rnnt/ootb/train/scripts/train.sh | 110 + .../rnnt/ootb/train/scripts/train_bench.sh | 84 + .../rnnt/ootb/train/scripts/train_debug.sh | 96 + .../rnnt/ootb/train/scripts/train_refactor.sh | 85 + benchmarks/rnnt/ootb/train/tests/Dockerfile | 14 + .../rnnt/ootb/train/tests/requirements.txt | 1 + .../dataset/test_rnnt_wordpiece_tokenizer.py | 52 + benchmarks/rnnt/ootb/train/train.py | 617 ++++ benchmarks/rnnt/ootb/train/utils/__init__.py | 0 .../ootb/train/utils/convert_librispeech.py | 81 + .../ootb/train/utils/download_librispeech.py | 72 + .../rnnt/ootb/train/utils/download_utils.py | 68 + .../train/utils/inference_librispeech.csv | 5 + .../rnnt/ootb/train/utils/librispeech.csv | 8 + .../ootb/train/utils/preprocessing_utils.py | 76 + benchmarks/run_all.sh | 19 + benchmarks/run_dlrm_ootb_infer.sh | 57 + benchmarks/run_dlrm_ootb_train.sh | 57 + benchmarks/run_dlrm_ubench_train_allreduce.sh | 60 + benchmarks/run_dlrm_ubench_train_alltoall.sh | 60 + .../run_dlrm_ubench_train_embeddingbag.sh | 36 + benchmarks/run_dlrm_ubench_train_linear.sh | 36 + benchmarks/run_rnnt_ootb_infer.sh | 69 + benchmarks/run_rnnt_ootb_train.sh | 68 + benchmarks/run_xlmr_ootb.sh | 28 + benchmarks/setup_rnnt.sh | 93 + benchmarks/xlmr/ootb/xlmr.py | 137 + benchmarks/xlmr/ootb/xlmr_extra.py | 31 + docs/DLRM.md | 109 + docs/RNNT.md | 139 + docs/adding_benchmarks.md | 6 + docs/getting_started.md | 21 + fb5logging/fb5logger.py | 93 + fb5logging/loggerconstants.py | 32 + fb5logging/result_summarizer.py | 249 ++ fb5logging/test_mllog.py | 16 + param | 1 + 220 files changed, 36806 insertions(+) create mode 100644 .flake8 create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 LICENSE.md create mode 100644 README.md create mode 100644 benchmarks/dlrm/ootb/CODE_OF_CONDUCT.md create mode 100644 benchmarks/dlrm/ootb/CONTRIBUTING.md create mode 100644 benchmarks/dlrm/ootb/Dockerfile create mode 100644 benchmarks/dlrm/ootb/README.md create mode 100755 benchmarks/dlrm/ootb/bench/dlrm_s_benchmark.sh create mode 100755 benchmarks/dlrm/ootb/bench/dlrm_s_criteo_kaggle.sh create mode 100755 benchmarks/dlrm/ootb/bench/dlrm_s_criteo_terabyte.sh create mode 100755 benchmarks/dlrm/ootb/bench/run_and_time.sh create mode 100644 benchmarks/dlrm/ootb/cython/cython_compile.py create mode 100644 benchmarks/dlrm/ootb/cython/cython_criteo.py create mode 100644 benchmarks/dlrm/ootb/data_loader_terabyte.py create mode 100644 benchmarks/dlrm/ootb/data_utils.py create mode 100644 benchmarks/dlrm/ootb/dlrm_data_caffe2.py create mode 100644 benchmarks/dlrm/ootb/dlrm_data_pytorch.py create mode 100644 benchmarks/dlrm/ootb/dlrm_s_caffe2.py create mode 100644 benchmarks/dlrm/ootb/dlrm_s_pytorch.py create mode 100644 benchmarks/dlrm/ootb/extend_distributed.py create mode 100644 benchmarks/dlrm/ootb/input/dist_emb_0.log create mode 100644 benchmarks/dlrm/ootb/input/dist_emb_1.log create mode 100644 benchmarks/dlrm/ootb/input/dist_emb_2.log create mode 100644 benchmarks/dlrm/ootb/input/trace.log create mode 100644 benchmarks/dlrm/ootb/kaggle_dac_loss_accuracy_plots.png create mode 100644 benchmarks/dlrm/ootb/mlperf_logger.py create mode 100644 benchmarks/dlrm/ootb/optim/rwsadagrad.py create mode 100644 benchmarks/dlrm/ootb/requirements.txt create mode 100755 benchmarks/dlrm/ootb/test/dlrm_s_test.sh create mode 100644 benchmarks/dlrm/ootb/test/dlrm_s_test_fbgemm_gpu.sh create mode 100755 benchmarks/dlrm/ootb/tools/visualize.py create mode 100644 benchmarks/dlrm/ootb/tricks/md_embedding_bag.py create mode 100644 benchmarks/dlrm/ootb/tricks/qr_embedding_bag.py create mode 100644 benchmarks/dlrm/ubench/README_comms.md create mode 100644 benchmarks/dlrm/ubench/dlrm_ubench_comms_driver.py create mode 100644 benchmarks/dlrm/ubench/dlrm_ubench_train_driver.py create mode 100644 benchmarks/rnnt/ootb/inference/QSL.py create mode 100644 benchmarks/rnnt/ootb/inference/README.md create mode 100644 benchmarks/rnnt/ootb/inference/accuracy_eval.py create mode 100644 benchmarks/rnnt/ootb/inference/environment.yml create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/.clang-format create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/CMakeLists.txt create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/README.md create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/README_BUILD.md create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/README_FAQ.md create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/benchmark/.gitignore create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/benchmark/README.md create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/benchmark/repro.cpp create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/benchmark/run.sh create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/benchmark/run_debug.sh create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/bindings/python_api.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream_free.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_offline.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_server.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_single_stream.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/BUILD.gn create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/README.md create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen.cfg create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_footer.html create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_header.html create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_html_generator.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_layout.xml create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_stylesheet.css create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/loadgen_integration_diagram.dia create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/mlperf_icon.png create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/docs/src/mlperf_logo_horizontal_color.svg create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/generated/version_generated.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/loadgen.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/loadgen.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/loadgen_integration_diagram.svg create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/logging.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/logging.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/query_sample.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/query_sample_library.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/setup.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/system_under_test.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/test_settings.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/BUILD.gn create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/README.md create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/basic.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test_main.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.py create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/tools/mlperf-trace.ipynb create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/utils.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/utils.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/version.cc create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/version.h create mode 100644 benchmarks/rnnt/ootb/inference/loadgen/version_generator.py create mode 100644 benchmarks/rnnt/ootb/inference/optional_harness_ck/README.md create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/Dockerfile create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/LICENSE create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/NOTICE create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/configs/rnnt.toml create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/dataset.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/decoders.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/helpers.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/metrics.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/model_separable_rnnt.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/preprocessing.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/rnn.py create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/build.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/launch.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/download_librispeech.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/evaluation.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/inference.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/inference_benchmark.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/preprocess_librispeech.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/train.sh create mode 100755 benchmarks/rnnt/ootb/inference/pytorch/scripts/train_benchmark.sh create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/__init__.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/convert_librispeech.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/download_librispeech.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/download_utils.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/inference_librispeech.csv create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech-inference.csv create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech.csv create mode 100644 benchmarks/rnnt/ootb/inference/pytorch/utils/preprocessing_utils.py create mode 100644 benchmarks/rnnt/ootb/inference/pytorch_SUT.py create mode 100644 benchmarks/rnnt/ootb/inference/run.py create mode 100755 benchmarks/rnnt/ootb/inference/run.sh create mode 160000 benchmarks/rnnt/ootb/inference/third_party/pybind create mode 100644 benchmarks/rnnt/ootb/inference/user.conf create mode 100755 benchmarks/rnnt/ootb/train/Dockerfile create mode 100644 benchmarks/rnnt/ootb/train/LICENSE create mode 100644 benchmarks/rnnt/ootb/train/NOTICE create mode 100644 benchmarks/rnnt/ootb/train/README.md create mode 100644 benchmarks/rnnt/ootb/train/common/__init__.py create mode 100644 benchmarks/rnnt/ootb/train/common/audio.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/__init__.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/dali/__init__.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/dali/data_loader.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/dali/iterator.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/dali/pipeline.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/dali/sampler.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/dataset.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/features.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/helpers.py create mode 100644 benchmarks/rnnt/ootb/train/common/data/text.py create mode 100644 benchmarks/rnnt/ootb/train/common/helpers.py create mode 100644 benchmarks/rnnt/ootb/train/common/metrics.py create mode 100644 benchmarks/rnnt/ootb/train/common/optimizers.py create mode 100644 benchmarks/rnnt/ootb/train/common/rnn.py create mode 100644 benchmarks/rnnt/ootb/train/common/sampler.py create mode 100644 benchmarks/rnnt/ootb/train/common/tb_dllogger.py create mode 100644 benchmarks/rnnt/ootb/train/common/text/LICENSE create mode 100644 benchmarks/rnnt/ootb/train/common/text/__init__.py create mode 100644 benchmarks/rnnt/ootb/train/common/text/cleaners.py create mode 100644 benchmarks/rnnt/ootb/train/common/text/numbers.py create mode 100644 benchmarks/rnnt/ootb/train/common/text/symbols.py create mode 100644 benchmarks/rnnt/ootb/train/configs/baseline_v3-1023sp.yaml create mode 100644 benchmarks/rnnt/ootb/train/docker-compose.yaml create mode 100644 benchmarks/rnnt/ootb/train/eval_model.py create mode 100644 benchmarks/rnnt/ootb/train/inference.py create mode 100644 benchmarks/rnnt/ootb/train/mlperf/__init__.py create mode 100644 benchmarks/rnnt/ootb/train/mlperf/logging.py create mode 100755 benchmarks/rnnt/ootb/train/requirements.txt create mode 100644 benchmarks/rnnt/ootb/train/rnnt/config.py create mode 100644 benchmarks/rnnt/ootb/train/rnnt/decoder.py create mode 100644 benchmarks/rnnt/ootb/train/rnnt/loss.py create mode 100644 benchmarks/rnnt/ootb/train/rnnt/model.py create mode 100755 benchmarks/rnnt/ootb/train/rnnt_layers.svg create mode 100755 benchmarks/rnnt/ootb/train/run_and_time.sh create mode 100644 benchmarks/rnnt/ootb/train/scripts/create_sentencepieces.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/docker/build.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/docker/launch.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/download_librispeech.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/inference.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/inference_benchmark.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/preprocess_librispeech.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/train.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/train_bench.sh create mode 100755 benchmarks/rnnt/ootb/train/scripts/train_debug.sh create mode 100644 benchmarks/rnnt/ootb/train/scripts/train_refactor.sh create mode 100644 benchmarks/rnnt/ootb/train/tests/Dockerfile create mode 100644 benchmarks/rnnt/ootb/train/tests/requirements.txt create mode 100644 benchmarks/rnnt/ootb/train/tests/rnnt/dataset/test_rnnt_wordpiece_tokenizer.py create mode 100644 benchmarks/rnnt/ootb/train/train.py create mode 100644 benchmarks/rnnt/ootb/train/utils/__init__.py create mode 100644 benchmarks/rnnt/ootb/train/utils/convert_librispeech.py create mode 100644 benchmarks/rnnt/ootb/train/utils/download_librispeech.py create mode 100644 benchmarks/rnnt/ootb/train/utils/download_utils.py create mode 100644 benchmarks/rnnt/ootb/train/utils/inference_librispeech.csv create mode 100644 benchmarks/rnnt/ootb/train/utils/librispeech.csv create mode 100644 benchmarks/rnnt/ootb/train/utils/preprocessing_utils.py create mode 100755 benchmarks/run_all.sh create mode 100755 benchmarks/run_dlrm_ootb_infer.sh create mode 100755 benchmarks/run_dlrm_ootb_train.sh create mode 100644 benchmarks/run_dlrm_ubench_train_allreduce.sh create mode 100755 benchmarks/run_dlrm_ubench_train_alltoall.sh create mode 100755 benchmarks/run_dlrm_ubench_train_embeddingbag.sh create mode 100755 benchmarks/run_dlrm_ubench_train_linear.sh create mode 100755 benchmarks/run_rnnt_ootb_infer.sh create mode 100755 benchmarks/run_rnnt_ootb_train.sh create mode 100755 benchmarks/run_xlmr_ootb.sh create mode 100644 benchmarks/setup_rnnt.sh create mode 100644 benchmarks/xlmr/ootb/xlmr.py create mode 100644 benchmarks/xlmr/ootb/xlmr_extra.py create mode 100644 docs/DLRM.md create mode 100644 docs/RNNT.md create mode 100644 docs/adding_benchmarks.md create mode 100644 docs/getting_started.md create mode 100644 fb5logging/fb5logger.py create mode 100644 fb5logging/loggerconstants.py create mode 100644 fb5logging/result_summarizer.py create mode 100644 fb5logging/test_mllog.py create mode 160000 param diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..5e97f52 --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length=120 +ignore = E203,E305,E402,E721,E741,F401,F403,F405,F821,F841,F999,W503,W504 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d826437 --- /dev/null +++ b/.gitignore @@ -0,0 +1,145 @@ +# Data files +.data + +# Results folders +run_kaggle_pt/ +results/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..8b32f06 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "param"] + path = param + url = https://github.com/facebookresearch/param.git +[submodule "benchmarks/rnnt/ootb/inference/third_party/pybind"] + path = benchmarks/rnnt/ootb/inference/third_party/pybind + url = https://github.com/pybind/pybind11.git diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..08b500a --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..1665402 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to proxyworkloads +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to proxyworkloads, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..bb84d29 --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2021 Meta Platforms, Inc. and its affiliates. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..2bb9ad2 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..f2e38c0 --- /dev/null +++ b/README.md @@ -0,0 +1,131 @@ +# Proxy Workloads + +These benchmarks represent important workloads. The faster these benchmarks are, the happier owners of important workloads are. The maintainers, updates, and rules in this benchmark suite all exist to keep the connection between the people running these benchmarks and the people running the original workloads. + +The key things to know: +- These benchmarks are directly connected to real workloads run every day +- The main metric is throughput, subject to some constraints such as latency or max batchsize +- Data is often synthetic, though we have safeguards to ensure correctness +- There are special requirements when improving these benchmarks - it's not "anything goes" +- This includes benchmarks (runnable on 1 device, multiple devices, clusters) and microbenchmarks + + +To get starting running the benchmark suite right away on a V100: + + cd proxyworkloads/benchmarks + ./run_all.sh + + +## The Suite + +This suite captures benchmarks across multiple devices, across multiple precisions, and includes microbenchmarks. We organize the suite so each benchmark result is identified as: + + Benchmark = Models + Implementation + Mode + Configuration + +### Models +This suite contains the following benchmarks: +- Recommendation: DLRM +- Text: XLM-R (WIP) +- Vision: CVT (Planned) +- Text: OSCAR (Planned) +- Speech: RNN-T (WIP) +- Video: Resnext-3D (Planned) +- Image: Regnet-Y (Planned) + +### Implementation + +Each benchmark comes in three different implementations: +- Out Of The Box (OOTB): indicates the performance that is provided by the libraries and frameworks. Code is written like a regular AI engineer / researcher would write the code, not like a systems/hardware specialist would write the code. +- Optimized: Represents the best possible performance which can be reached; the code is tuned, re-written (and perhaps even mangled) by hardware and software experts +- Microbenchmarks: benchmarks which look at a specific component of dev, computer or cluster. These are highly unique and specialized in their purpose. + +### Modes + +For OOTB and optimized implementations, the modes are Inference and Training. For Microbenchmarks, the mode is the specific kind of microbenchmark being run. + +### Configurations + +Each implementation comes in multiple configurations. Each configuration looks at the benchmark in a different way, such as: +- The model and data scaled to different number of devices: e.g. 1, 8, multiple node +- Different precisions and numeric formats +- Different variants of the models, representing possible different layers or sizes the model might be run at. + +## Results + +Running one or more benchmarks on a specific machine or cluster produces a results table. Below are example results which you may get. + +|Model |Implementation|Mode |Config |Batch Size|Score |Units| +|-------------------------|--------------|------------|-------------------|----------|------|-----| +|Recommend: DLRM |OOTB |Training |A.1dev-embed32-fp32|1024 |570.16|ex/s | +|Recommend: DLRM |OOTB |Inference |A.1dev-embed4-fp32 |1024 |61.85*|ex/s | +|Recommend: DLRM |Micro |MLP/Linear |linear_A.1dev |256 |7.08 |TF/s | +|Recommend: DLRM |Micro |EmbeddingBag|emb_A.1dev |65536 |537.80|GB/s | +* = missed latency target + +Notice the following in this table: +- Each row is one Benchmark run with a batch size (`Model + Implementation + Mode + Config` at a given batch size). More on batch size in Suite Design. +- All rows in the same table are run on the same machine. Benchmarks from different hardware must appear in different result tables. +- Some results have a `*` denoting that they missed the latency target. More on latency targets in Suite Design. +- You may report multiple batch sizes for the same benchmark, they appear as different lines in the table. + + +### Results by System Scale +We look at all the results to understand the broader picture of performance. + +** For systems that can't run the full model: ** Microbenchmarks give us a picture into potential performance and early indicators of where to explore more. + +** For single device systems: ** For training, single device configurations and microbenchmarks can indicate trends in overall cluster performance; microbenchmarks run on the cluster paired with single device results can indicate if single device performance is in fact the bottleneck. For inference, single inference is often easily parallelizable across multiple devices, the single device benchmarks are a very good indicator of real performance. This has the added advantage of being quick and easy for debugging and experiments. + +** For multiple device, single node: ** For Training, multidevice configurations give good insight into how single nodes perform within a cluster - this can be combined with microbenchmarks on the cluster to predict overall performance. For inference, this is a great reflection of actual workloads. This has the added advantage of being quick and easy for debugging and experiments. + +** For Clusters: ** Running these benchmarks on a cluster gives the best indication of performance for Training but does not add additional information for Inference. The downside is, obviously, these runs are more costly to set up and run. + + +### How Results are Consumed +There are two broad comparisons that can be done: hardware-to-hardware and OOTB v. Optimized. + +- System to System: Compare two tables generated by two different systems to understand their differences +- OOTB v. Optimized: Look at one table, one system, and understand the gap between the software (compilers, frameworks, and libraries) and what might be possible if the software was improved. + +Generally, consuming results is specific to the situation. Different goals will result in placing different priorities and weights when evaluating results so there isn't a one size fits all approach here. It's up to the people and situation. + + +## Suite Design +We are very specific about how these benchmarks must be run and optimized in order to maintain our goal: ** improvements to these benchmarks connect directly to improvements in important internal workloads **. Where our methodology may seem arbitrary or cumbersome, it is in service of maintaining the connection to the source. + +### Ownership, Versions & Updates +Each Benchmark (`Model + Implementation + Mode + Config`) is connected with an actual owner of an actual workload who endorsed the benchmark. The owner is the arbiter of changes, updates, and methodology for the benchmark. It is exceptionally frustrating to see benchmarks change while you are working on them. It sucks, and we version our benchmarks to help with bookkeeping. Ultimately, our goal here is to reflect the current state of what people care about - unfortunately this means (sometimes too frequently) bumping versions to ensure we are offering the best proxy to the world. + +### Convergence and Accuracy +The gold standard in understanding how the system works is measuring convergence and accuracy of the model in the end-to-end context. Unfortunately, as shown by MLPerf, this is exceptionally costly, burdensome and slow. We do not place an emphasis on convergence and accuracy for the following reasons: +- We don't allow significant changes to model code (see "Improving the Benchmark Score"), so we don't expect people to be breaking convergence +- We limit the data types and precisions to ones we understand and are known to be viable +- We (will) offer the ability to verify correctness (possibly through real data or through statistical analysis on synthetic data) +- We lean on benchmarks in MLPerf which has a similar suite of models and submissions to MLPerf are required to test correctness. + +Overall, we aim to allow benchmarking at the granularity which is usable by people in their projects, representative of the actual workloads, and not overly cumbersome or expensive. It's a compromise. + +### Data +As discussed in Convergence and Accuracy, we are not an accuracy or convergence benchmark. This frees us up to use synthetic data which significantly improves usability and time-to-results for this suite. + +We may choose to use real data, or data derived from real data, where we cannot generate proper synthetic data. + +### Batch Sizes +Generally speaking, the bigger the batch size the better the throughput but the longer the time to converge and the higher the latency. When running these benchmarks, people will want to see: +- The benchmark run at specific known batch sizes (where the convergence is understood) to allow for predicting and modeling +- The benchmark at the batch size which gives the best throughput, subject to either (a) a maximum batchsize for which the model will converge, or (b) a latency requirement for requests. + +### Latency Limits +Inference benchmarks come with latency limits and the goal is to provide the best QPS while hitting the latency limit. Some inference benchmarks may reflect user facing operations where latency is key. Some inference benchmarks may reflect background jobs where throughput is key - so the latency limit is very high in these cases. + +## Improving the Benchmark Score +The bigger the score, the better - but there are limits on how to get there. The limits depend on the implementation (Out-Of-The-Box OOTB, Optimized, or Microbenchmark). + +- Out-Of-The-Box (OOTB): Improvements must come in through libraries, frameworks, and new hardware. No changing the model code (special exceptions for non-optimizing changes which enable porting to new hardware). +- Optimized: No holds barred - make the system shine. Just keep in mind everything you do, you're asking the actual people who run the workloads to do it too if they're going to realize that performance. You'll need to describe what changes you made, so keep track. +- Microbenchmarks - Implement the same operation as defined, and make it as fast as possible. + +## License + +This is released under the APACHE 2 license. Please see the [`LICENSE`](LICENSE) file for more information. + diff --git a/benchmarks/dlrm/ootb/CODE_OF_CONDUCT.md b/benchmarks/dlrm/ootb/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..0f7ad8b --- /dev/null +++ b/benchmarks/dlrm/ootb/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +Facebook has adopted a Code of Conduct that we expect project participants to adhere to. +Please read the [full text](https://code.fb.com/codeofconduct/) +so that you can understand what actions will and will not be tolerated. diff --git a/benchmarks/dlrm/ootb/CONTRIBUTING.md b/benchmarks/dlrm/ootb/CONTRIBUTING.md new file mode 100644 index 0000000..cc013a1 --- /dev/null +++ b/benchmarks/dlrm/ootb/CONTRIBUTING.md @@ -0,0 +1,36 @@ +# Contributing to DLRM +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `master`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## Coding Style +* 4 spaces for indentation rather than tabs +* 80 character line length +* in general, please maintain a consistent style with the rest of the code + +## License +By contributing to DLRM, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/benchmarks/dlrm/ootb/Dockerfile b/benchmarks/dlrm/ootb/Dockerfile new file mode 100644 index 0000000..0e4b750 --- /dev/null +++ b/benchmarks/dlrm/ootb/Dockerfile @@ -0,0 +1,15 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +ARG FROM_IMAGE_NAME=pytorch/pytorch:1.3-cuda10.1-cudnn7-runtime +FROM ${FROM_IMAGE_NAME} + +ADD requirements.txt . +RUN pip install -r requirements.txt + +RUN pip install torch==1.3.1 + +WORKDIR /code +ADD . . diff --git a/benchmarks/dlrm/ootb/README.md b/benchmarks/dlrm/ootb/README.md new file mode 100644 index 0000000..7096b83 --- /dev/null +++ b/benchmarks/dlrm/ootb/README.md @@ -0,0 +1,389 @@ +Deep Learning Recommendation Model for Personalization and Recommendation Systems: +================================================================================= +*Copyright (c) Facebook, Inc. and its affiliates.* + +Description: +------------ +An implementation of a deep learning recommendation model (DLRM) +The model input consists of dense and sparse features. The former is a vector +of floating point values. The latter is a list of sparse indices into +embedding tables, which consist of vectors of floating point values. +The selected vectors are passed to mlp networks denoted by triangles, +in some cases the vectors are interacted through operators (Ops). +``` +output: + probability of a click +model: | + /\ + /__\ + | + _____________________> Op <___________________ + / | \ + /\ /\ /\ + /__\ /__\ ... /__\ + | | | + | Op Op + | ____/__\_____ ____/__\____ + | |_Emb_|____|__| ... |_Emb_|__|___| +input: +[ dense features ] [sparse indices] , ..., [sparse indices] +``` + More precise definition of model layers: + 1) fully connected layers of an mlp + + z = f(y) + + y = Wx + b + + 2) embedding lookup (for a list of sparse indices p=[p1,...,pk]) + + z = Op(e1,...,ek) + + obtain vectors e1=E[:,p1], ..., ek=E[:,pk] + + 3) Operator Op can be one of the following + + Sum(e1,...,ek) = e1 + ... + ek + + Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek] + + Cat(e1,...,ek) = [e1', ..., ek']' + + where ' denotes transpose operation + +Cite [Work](https://arxiv.org/abs/1906.00091): +``` +@article{DLRM19, + author = {Maxim Naumov and Dheevatsa Mudigere and Hao{-}Jun Michael Shi and Jianyu Huang and Narayanan Sundaraman and Jongsoo Park and Xiaodong Wang and Udit Gupta and Carole{-}Jean Wu and Alisson G. Azzolini and Dmytro Dzhulgakov and Andrey Mallevich and Ilia Cherniavskii and Yinghai Lu and Raghuraman Krishnamoorthi and Ansha Yu and Volodymyr Kondratenko and Stephanie Pereira and Xianjie Chen and Wenlin Chen and Vijay Rao and Bill Jia and Liang Xiong and Misha Smelyanskiy}, + title = {Deep Learning Recommendation Model for Personalization and Recommendation Systems}, + journal = {CoRR}, + volume = {abs/1906.00091}, + year = {2019}, + url = {https://arxiv.org/abs/1906.00091}, +} +``` + +Related Work: + +On the [system architecture implications](https://arxiv.org/abs/1906.03109), with DLRM as one of the benchmarks, +``` +@article{ArchImpl19, + author = {Udit Gupta and Xiaodong Wang and Maxim Naumov and Carole{-}Jean Wu and Brandon Reagen and David Brooks and Bradford Cottel and Kim M. Hazelwood and Bill Jia and Hsien{-}Hsin S. Lee and Andrey Malevich and Dheevatsa Mudigere and Mikhail Smelyanskiy and Liang Xiong and Xuan Zhang}, + title = {The Architectural Implications of Facebook's DNN-based Personalized Recommendation}, + journal = {CoRR}, + volume = {abs/1906.03109}, + year = {2019}, + url = {https://arxiv.org/abs/1906.03109}, +} +``` + +On the [embedding compression techniques (for number of vectors)](https://arxiv.org/abs/1909.02107), with DLRM as one of the benchmarks, +``` +@article{QuoRemTrick19, + author = {Hao{-}Jun Michael Shi and Dheevatsa Mudigere and Maxim Naumov and Jiyan Yang}, + title = {Compositional Embeddings Using Complementary Partitions for Memory-Efficient Recommendation Systems}, + journal = {CoRR}, + volume = {abs/1909.02107}, + year = {2019}, + url = {https://arxiv.org/abs/1909.02107}, +} +``` + +On the [embedding compression techniques (for dimension of vectors)](https://arxiv.org/abs/1909.11810), with DLRM as one of the benchmarks, +``` +@article{MixDimTrick19, + author = {Antonio Ginart and Maxim Naumov and Dheevatsa Mudigere and Jiyan Yang and James Zou}, + title = {Mixed Dimension Embeddings with Application to Memory-Efficient Recommendation Systems}, + journal = {CoRR}, + volume = {abs/1909.11810}, + year = {2019}, + url = {https://arxiv.org/abs/1909.11810}, +} +``` + +Implementation +-------------- +**DLRM PyTorch**. Implementation of DLRM in PyTorch framework: + + dlrm_s_pytorch.py + +**DLRM Caffe2**. Implementation of DLRM in Caffe2 framework: + + dlrm_s_caffe2.py + +**DLRM Data**. Implementation of DLRM data generation and loading: + + dlrm_data_pytorch.py, dlrm_data_caffe2.py, data_utils.py + +**DLRM Tests**. Implementation of DLRM tests in ./test + + dlrm_s_test.sh + +**DLRM Benchmarks**. Implementation of DLRM benchmarks in ./bench + + dlrm_s_criteo_kaggle.sh, dlrm_s_criteo_terabyte.sh, dlrm_s_benchmark.sh + +Related Work: + +On the [Glow framework](https://github.com/pytorch/glow) implementation +``` +https://github.com/pytorch/glow/blob/master/tests/unittests/RecommendationSystemTest.cpp +``` +On the [FlexFlow framework](https://github.com/flexflow/FlexFlow) distributed implementation with Legion backend +``` +https://github.com/flexflow/FlexFlow/blob/master/examples/cpp/DLRM/dlrm.cc +``` + +How to run dlrm code? +-------------------- +1) A sample run of the code, with a tiny model is shown below +``` +$ python dlrm_s_pytorch.py --mini-batch-size=2 --data-size=6 +time/loss/accuracy (if enabled): +Finished training it 1/3 of epoch 0, -1.00 ms/it, loss 0.451893, accuracy 0.000% +Finished training it 2/3 of epoch 0, -1.00 ms/it, loss 0.402002, accuracy 0.000% +Finished training it 3/3 of epoch 0, -1.00 ms/it, loss 0.275460, accuracy 0.000% +``` +2) A sample run of the code, with a tiny model in debug mode +``` +$ python dlrm_s_pytorch.py --mini-batch-size=2 --data-size=6 --debug-mode +model arch: +mlp top arch 3 layers, with input to output dimensions: +[8 4 2 1] +# of interactions +8 +mlp bot arch 2 layers, with input to output dimensions: +[4 3 2] +# of features (sparse and dense) +4 +dense feature size +4 +sparse feature size +2 +# of embeddings (= # of sparse features) 3, with dimensions 2x: +[4 3 2] +data (inputs and targets): +mini-batch: 0 +[[0.69647 0.28614 0.22685 0.55131] + [0.71947 0.42311 0.98076 0.68483]] +[[[1], [0, 1]], [[0], [1]], [[1], [0]]] +[[0.55679] + [0.15896]] +mini-batch: 1 +[[0.36179 0.22826 0.29371 0.63098] + [0.0921 0.4337 0.43086 0.49369]] +[[[1], [0, 2, 3]], [[1], [1, 2]], [[1], [1]]] +[[0.15307] + [0.69553]] +mini-batch: 2 +[[0.60306 0.54507 0.34276 0.30412] + [0.41702 0.6813 0.87546 0.51042]] +[[[2], [0, 1, 2]], [[1], [2]], [[1], [1]]] +[[0.31877] + [0.69197]] +initial parameters (weights and bias): +[[ 0.05438 -0.11105] + [ 0.42513 0.34167] + [-0.1426 -0.45641] + [-0.19523 -0.10181]] +[[ 0.23667 0.57199] + [-0.16638 0.30316] + [ 0.10759 0.22136]] +[[-0.49338 -0.14301] + [-0.36649 -0.22139]] +[[0.51313 0.66662 0.10591 0.13089] + [0.32198 0.66156 0.84651 0.55326] + [0.85445 0.38484 0.31679 0.35426]] +[0.17108 0.82911 0.33867] +[[0.55237 0.57855 0.52153] + [0.00269 0.98835 0.90534]] +[0.20764 0.29249] +[[0.52001 0.90191 0.98363 0.25754 0.56436 0.80697 0.39437 0.73107] + [0.16107 0.6007 0.86586 0.98352 0.07937 0.42835 0.20454 0.45064] + [0.54776 0.09333 0.29686 0.92758 0.569 0.45741 0.75353 0.74186] + [0.04858 0.7087 0.83924 0.16594 0.781 0.28654 0.30647 0.66526]] +[0.11139 0.66487 0.88786 0.69631] +[[0.44033 0.43821 0.7651 0.56564] + [0.0849 0.58267 0.81484 0.33707]] +[0.92758 0.75072] +[[0.57406 0.75164]] +[0.07915] +DLRM_Net( + (emb_l): ModuleList( + (0): EmbeddingBag(4, 2, mode=sum) + (1): EmbeddingBag(3, 2, mode=sum) + (2): EmbeddingBag(2, 2, mode=sum) + ) + (bot_l): Sequential( + (0): Linear(in_features=4, out_features=3, bias=True) + (1): ReLU() + (2): Linear(in_features=3, out_features=2, bias=True) + (3): ReLU() + ) + (top_l): Sequential( + (0): Linear(in_features=8, out_features=4, bias=True) + (1): ReLU() + (2): Linear(in_features=4, out_features=2, bias=True) + (3): ReLU() + (4): Linear(in_features=2, out_features=1, bias=True) + (5): Sigmoid() + ) +) +time/loss/accuracy (if enabled): +Finished training it 1/3 of epoch 0, -1.00 ms/it, loss 0.451893, accuracy 0.000% +Finished training it 2/3 of epoch 0, -1.00 ms/it, loss 0.402002, accuracy 0.000% +Finished training it 3/3 of epoch 0, -1.00 ms/it, loss 0.275460, accuracy 0.000% +updated parameters (weights and bias): +[[ 0.0543 -0.1112 ] + [ 0.42513 0.34167] + [-0.14283 -0.45679] + [-0.19532 -0.10197]] +[[ 0.23667 0.57199] + [-0.1666 0.30285] + [ 0.10751 0.22124]] +[[-0.49338 -0.14301] + [-0.36664 -0.22164]] +[[0.51313 0.66663 0.10591 0.1309 ] + [0.32196 0.66154 0.84649 0.55324] + [0.85444 0.38482 0.31677 0.35425]] +[0.17109 0.82907 0.33863] +[[0.55238 0.57857 0.52154] + [0.00265 0.98825 0.90528]] +[0.20764 0.29244] +[[0.51996 0.90184 0.98368 0.25752 0.56436 0.807 0.39437 0.73107] + [0.16096 0.60055 0.86596 0.98348 0.07938 0.42842 0.20453 0.45064] + [0.5476 0.0931 0.29701 0.92752 0.56902 0.45752 0.75351 0.74187] + [0.04849 0.70857 0.83933 0.1659 0.78101 0.2866 0.30646 0.66526]] +[0.11137 0.66482 0.88778 0.69627] +[[0.44029 0.43816 0.76502 0.56561] + [0.08485 0.5826 0.81474 0.33702]] +[0.92754 0.75067] +[[0.57379 0.7514 ]] +[0.07908] +``` + +Testing +------- +Testing scripts to confirm functional correctness of the code +``` +./test/dlrm_s_test.sh +Running commands ... +python dlrm_s_pytorch.py +python dlrm_s_caffe2.py +Checking results ... +diff test1 (no numeric values in the output = SUCCESS) +diff test2 (no numeric values in the output = SUCCESS) +diff test3 (no numeric values in the output = SUCCESS) +diff test4 (no numeric values in the output = SUCCESS) +``` + +*NOTE: Testing scripts accept extra arguments which will be passed along to the model, such as --use-gpu* + +Benchmarking +------------ +1) Performance benchmarking + ``` + ./bench/dlrm_s_benchmark.sh + ``` + +2) The code supports interface with the [Criteo Kaggle Display Advertising Challenge Dataset](https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset/). + - Please do the following to prepare the dataset for use with DLRM code: + - First, specify the raw data file (train.txt) as downloaded with --raw-data-file= + - This is then pre-processed (categorize, concat across days...) to allow using with dlrm code + - The processed data is stored as *.npz file in /input/*.npz + - The processed file (*.npz) can be used for subsequent runs with --processed-data-file= + - The model can be trained using the following script + ``` + ./bench/dlrm_s_criteo_kaggle.sh [--test-freq=1024] + ``` + + + +3) The code supports interface with the [Criteo Terabyte Dataset](https://labs.criteo.com/2013/12/download-terabyte-click-logs/). + - Please do the following to prepare the dataset for use with DLRM code: + - First, download the raw data files day_0.gz, ...,day_23.gz and unzip them + - Specify the location of the unzipped text files day_0, ...,day_23, using --raw-data-file= (the day number will be appended automatically) + - These are then pre-processed (categorize, concat across days...) to allow using with dlrm code + - The processed data is stored as *.npz file in /input/*.npz + - The processed file (*.npz) can be used for subsequent runs with --processed-data-file= + - The model can be trained using the following script + ``` + ./bench/dlrm_s_criteo_terabyte.sh ["--test-freq=10240 --memory-map --data-sub-sample-rate=0.875"] + ``` + - Corresponding pre-trained model is available under [CC-BY-NC license](https://creativecommons.org/licenses/by-nc/2.0/) and can be downloaded here + [dlrm_emb64_subsample0.875_maxindrange10M_pretrained.pt](https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt) + + + +*NOTE: Benchmarking scripts accept extra arguments which will be passed along to the model, such as --num-batches=100 to limit the number of data samples* + +4) The code supports interface with [MLPerf benchmark](https://mlperf.org). + - Please refer to the following training parameters + ``` + --mlperf-logging that keeps track of multiple metrics, including area under the curve (AUC) + + --mlperf-acc-threshold that allows early stopping based on accuracy metric + + --mlperf-auc-threshold that allows early stopping based on AUC metric + + --mlperf-bin-loader that enables preprocessing of data into a single binary file + + --mlperf-bin-shuffle that controls whether a random shuffle of mini-batches is performed + ``` + - The MLPerf training model is completely specified and can be trained using the following script + ``` + ./bench/run_and_time.sh [--use-gpu] + ``` + - Corresponding pre-trained model is available under [CC-BY-NC license](https://creativecommons.org/licenses/by-nc/2.0/) and can be downloaded here + [dlrm_emb128_subsample0.0_maxindrange40M_pretrained.pt](https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt) + +5) The code now supports synchronous distributed training, we support gloo/nccl/mpi backend, we provide launching mode for [pytorch distributed launcher](https://pytorch.org/docs/stable/distributed.html#launch-utility) and Mpirun. For MPI, users need to write their own MPI launching scripts for configuring the running hosts. For example, using pytorch distributed launcher, we can have the following command as launching scripts: +``` +# for single node 8 gpus and nccl as backend on randomly generated dataset: +python -m torch.distributed.launch --nproc_per_node=8 dlrm_s_pytorch.py --arch-embedding-size="80000-80000-80000-80000-80000-80000-80000-80000" --arch-sparse-feature-size=64 --arch-mlp-bot="128-128-128-128" --arch-mlp-top="512-512-512-256-1" --max-ind-range=40000000 +--data-generation=random --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2 --print-time --test-freq=2 --test-mini-batch-size=2048 --memory-map --use-gpu --num-batches=100 --dist-backend=nccl + +# for multiple nodes, user can add the related argument according to the launcher manual like: +--nnodes=2 --node_rank=0 --master_addr="192.168.1.1" --master_port=1234 +``` + + +Model checkpoint saving/loading +------------------------------- +During training, the model can be saved using --save-model= + +The model is saved if there is an improvement in test accuracy (which is checked at --test-freq intervals). + +A previously saved model can be loaded using --load-model= + +Once loaded the model can be used to continue training, with the saved model being a checkpoint. +Alternatively, the saved model can be used to evaluate only on the test data-set by specifying --inference-only option. + + +Version +------- +0.1 : Initial release of the DLRM code + +1.0 : DLRM with distributed training, cpu support for row-wise adagrad optimizer + +Requirements +------------ +pytorch-nightly (*11/10/20*) + +scikit-learn + +numpy + +onnx (*optional*) + +pydot (*optional*) + +torchviz (*optional*) + +mpi (*optional for distributed backend*) + + +License +------- +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. diff --git a/benchmarks/dlrm/ootb/bench/dlrm_s_benchmark.sh b/benchmarks/dlrm/ootb/bench/dlrm_s_benchmark.sh new file mode 100755 index 0000000..c6a75e2 --- /dev/null +++ b/benchmarks/dlrm/ootb/bench/dlrm_s_benchmark.sh @@ -0,0 +1,147 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +#check if extra argument is passed to the test +if [[ $# == 1 ]]; then + dlrm_extra_option=$1 +else + dlrm_extra_option="" +fi +#echo $dlrm_extra_option + +cpu=1 +gpu=1 +pt=1 +c2=1 + +ncores=28 #12 #6 +nsockets="0" + +ngpus="1 2 4 8" + +numa_cmd="numactl --physcpubind=0-$((ncores-1)) -m $nsockets" #run on one socket, without HT +dlrm_pt_bin="python dlrm_s_pytorch.py" +dlrm_c2_bin="python dlrm_s_caffe2.py" + +data=random #synthetic +print_freq=100 +rand_seed=727 + +c2_net="async_scheduling" + +#Model param +mb_size=2048 #1024 #512 #256 +nbatches=1000 #500 #100 +bot_mlp="512-512-64" +top_mlp="1024-1024-1024-1" +emb_size=64 +nindices=100 +emb="1000000-1000000-1000000-1000000-1000000-1000000-1000000-1000000" +interaction="dot" +tnworkers=0 +tmb_size=16384 + +#_args="--mini-batch-size="${mb_size}\ +_args=" --num-batches="${nbatches}\ +" --data-generation="${data}\ +" --arch-mlp-bot="${bot_mlp}\ +" --arch-mlp-top="${top_mlp}\ +" --arch-sparse-feature-size="${emb_size}\ +" --arch-embedding-size="${emb}\ +" --num-indices-per-lookup="${nindices}\ +" --arch-interaction-op="${interaction}\ +" --numpy-rand-seed="${rand_seed}\ +" --print-freq="${print_freq}\ +" --print-time"\ +" --enable-profiling " + +c2_args=" --caffe2-net-type="${c2_net} + + +# CPU Benchmarking +if [ $cpu = 1 ]; then + echo "--------------------------------------------" + echo "CPU Benchmarking - running on $ncores cores" + echo "--------------------------------------------" + if [ $pt = 1 ]; then + outf="model1_CPU_PT_$ncores.log" + outp="dlrm_s_pytorch.prof" + echo "-------------------------------" + echo "Running PT (log file: $outf)" + echo "-------------------------------" + cmd="$numa_cmd $dlrm_pt_bin --mini-batch-size=$mb_size --test-mini-batch-size=$tmb_size --test-num-workers=$tnworkers $_args $dlrm_extra_option > $outf" + echo $cmd + eval $cmd + min=$(grep "iteration" $outf | awk 'BEGIN{best=999999} {if (best > $7) best=$7} END{print best}') + echo "Min time per iteration = $min" + # move profiling file(s) + mv $outp ${outf//".log"/".prof"} + mv ${outp//".prof"/".json"} ${outf//".log"/".json"} + + fi + if [ $c2 = 1 ]; then + outf="model1_CPU_C2_$ncores.log" + outp="dlrm_s_caffe2.prof" + echo "-------------------------------" + echo "Running C2 (log file: $outf)" + echo "-------------------------------" + cmd="$numa_cmd $dlrm_c2_bin --mini-batch-size=$mb_size $_args $c2_args $dlrm_extra_option 1> $outf 2> $outp" + echo $cmd + eval $cmd + min=$(grep "iteration" $outf | awk 'BEGIN{best=999999} {if (best > $7) best=$7} END{print best}') + echo "Min time per iteration = $min" + # move profiling file (collected from stderr above) + mv $outp ${outf//".log"/".prof"} + fi +fi + +# GPU Benchmarking +if [ $gpu = 1 ]; then + echo "--------------------------------------------" + echo "GPU Benchmarking - running on $ngpus GPUs" + echo "--------------------------------------------" + for _ng in $ngpus + do + # weak scaling + # _mb_size=$((mb_size*_ng)) + # strong scaling + _mb_size=$((mb_size*1)) + _gpus=$(seq -s, 0 $((_ng-1))) + cuda_arg="CUDA_VISIBLE_DEVICES=$_gpus" + echo "-------------------" + echo "Using GPUS: "$_gpus + echo "-------------------" + if [ $pt = 1 ]; then + outf="model1_GPU_PT_$_ng.log" + outp="dlrm_s_pytorch.prof" + echo "-------------------------------" + echo "Running PT (log file: $outf)" + echo "-------------------------------" + cmd="$cuda_arg $dlrm_pt_bin --mini-batch-size=$_mb_size --test-mini-batch-size=$tmb_size --test-num-workers=$tnworkers $_args --use-gpu $dlrm_extra_option > $outf" + echo $cmd + eval $cmd + min=$(grep "iteration" $outf | awk 'BEGIN{best=999999} {if (best > $7) best=$7} END{print best}') + echo "Min time per iteration = $min" + # move profiling file(s) + mv $outp ${outf//".log"/".prof"} + mv ${outp//".prof"/".json"} ${outf//".log"/".json"} + fi + if [ $c2 = 1 ]; then + outf="model1_GPU_C2_$_ng.log" + outp="dlrm_s_caffe2.prof" + echo "-------------------------------" + echo "Running C2 (log file: $outf)" + echo "-------------------------------" + cmd="$cuda_arg $dlrm_c2_bin --mini-batch-size=$_mb_size $_args $c2_args --use-gpu $dlrm_extra_option 1> $outf 2> $outp" + echo $cmd + eval $cmd + min=$(grep "iteration" $outf | awk 'BEGIN{best=999999} {if (best > $7) best=$7} END{print best}') + echo "Min time per iteration = $min" + # move profiling file (collected from stderr above) + mv $outp ${outf//".log"/".prof"} + fi + done +fi diff --git a/benchmarks/dlrm/ootb/bench/dlrm_s_criteo_kaggle.sh b/benchmarks/dlrm/ootb/bench/dlrm_s_criteo_kaggle.sh new file mode 100755 index 0000000..867d8c0 --- /dev/null +++ b/benchmarks/dlrm/ootb/bench/dlrm_s_criteo_kaggle.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +#WARNING: must have compiled PyTorch and caffe2 + +#check if extra argument is passed to the test +if [[ $# == 1 ]]; then + dlrm_extra_option=$1 +else + dlrm_extra_option="" +fi +#echo $dlrm_extra_option + +dlrm_pt_bin="python dlrm_s_pytorch.py" +dlrm_c2_bin="python dlrm_s_caffe2.py" + +echo "run pytorch ..." +# WARNING: the following parameters will be set based on the data set +# --arch-embedding-size=... (sparse feature sizes) +# --arch-mlp-bot=... (the input to the first layer of bottom mlp) +$dlrm_pt_bin --arch-sparse-feature-size=16 --arch-mlp-bot="13-512-256-64-16" --arch-mlp-top="512-256-1" --data-generation=dataset --data-set=kaggle --raw-data-file=./input/train.txt --processed-data-file=./input/kaggleAdDisplayChallenge_processed.npz --loss-function=bce --round-targets=True --learning-rate=0.1 --mini-batch-size=128 --print-freq=1024 --print-time --test-mini-batch-size=16384 --test-num-workers=16 $dlrm_extra_option 2>&1 | tee run_kaggle_pt.log + +echo "run caffe2 ..." +# WARNING: the following parameters will be set based on the data set +# --arch-embedding-size=... (sparse feature sizes) +# --arch-mlp-bot=... (the input to the first layer of bottom mlp) +$dlrm_c2_bin --arch-sparse-feature-size=16 --arch-mlp-bot="13-512-256-64-16" --arch-mlp-top="512-256-1" --data-generation=dataset --data-set=kaggle --raw-data-file=./input/train.txt --processed-data-file=./input/kaggleAdDisplayChallenge_processed.npz --loss-function=bce --round-targets=True --learning-rate=0.1 --mini-batch-size=128 --print-freq=1024 --print-time $dlrm_extra_option 2>&1 | tee run_kaggle_c2.log + +echo "done" diff --git a/benchmarks/dlrm/ootb/bench/dlrm_s_criteo_terabyte.sh b/benchmarks/dlrm/ootb/bench/dlrm_s_criteo_terabyte.sh new file mode 100755 index 0000000..5a4ee94 --- /dev/null +++ b/benchmarks/dlrm/ootb/bench/dlrm_s_criteo_terabyte.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +#WARNING: must have compiled PyTorch and caffe2 + +#check if extra argument is passed to the test +if [[ $# == 1 ]]; then + dlrm_extra_option=$1 +else + dlrm_extra_option="" +fi +#echo $dlrm_extra_option + +dlrm_pt_bin="python dlrm_s_pytorch.py" +dlrm_c2_bin="python dlrm_s_caffe2.py" + +echo "run pytorch ..." +# WARNING: the following parameters will be set based on the data set +# --arch-embedding-size=... (sparse feature sizes) +# --arch-mlp-bot=... (the input to the first layer of bottom mlp) +$dlrm_pt_bin --arch-sparse-feature-size=64 --arch-mlp-bot="13-512-256-64" --arch-mlp-top="512-512-256-1" --max-ind-range=10000000 --data-generation=dataset --data-set=terabyte --raw-data-file=./input/day --processed-data-file=./input/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=0.1 --mini-batch-size=2048 --print-freq=1024 --print-time --test-mini-batch-size=16384 --test-num-workers=16 $dlrm_extra_option 2>&1 | tee run_terabyte_pt.log + +echo "run caffe2 ..." +# WARNING: the following parameters will be set based on the data set +# --arch-embedding-size=... (sparse feature sizes) +# --arch-mlp-bot=... (the input to the first layer of bottom mlp) +$dlrm_c2_bin --arch-sparse-feature-size=64 --arch-mlp-bot="13-512-256-64" --arch-mlp-top="512-512-256-1" --max-ind-range=10000000 --data-generation=dataset --data-set=terabyte --raw-data-file=./input/day --processed-data-file=./input/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=0.1 --mini-batch-size=2048 --print-freq=1024 --print-time $dlrm_extra_option 2>&1 | tee run_terabyte_c2.log + +echo "done" diff --git a/benchmarks/dlrm/ootb/bench/run_and_time.sh b/benchmarks/dlrm/ootb/bench/run_and_time.sh new file mode 100755 index 0000000..e241d80 --- /dev/null +++ b/benchmarks/dlrm/ootb/bench/run_and_time.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +#WARNING: must have compiled PyTorch and caffe2 + +#check if extra argument is passed to the test +if [[ $# == 1 ]]; then + dlrm_extra_option=$1 +else + dlrm_extra_option="" +fi +#echo $dlrm_extra_option + +python dlrm_s_pytorch.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=./input/day --processed-data-file=./input/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt.log + +echo "done" diff --git a/benchmarks/dlrm/ootb/cython/cython_compile.py b/benchmarks/dlrm/ootb/cython/cython_compile.py new file mode 100644 index 0000000..ffacf08 --- /dev/null +++ b/benchmarks/dlrm/ootb/cython/cython_compile.py @@ -0,0 +1,26 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: compile .so from python code + +from __future__ import absolute_import, division, print_function, unicode_literals + +from setuptools import setup +from Cython.Build import cythonize +from distutils.extension import Extension + +ext_modules = [ + Extension( + "data_utils_cython", + ["data_utils_cython.pyx"], + extra_compile_args=['-O3'], + extra_link_args=['-O3'], + ) +] + +setup( + name='data_utils_cython', + ext_modules=cythonize(ext_modules) +) diff --git a/benchmarks/dlrm/ootb/cython/cython_criteo.py b/benchmarks/dlrm/ootb/cython/cython_criteo.py new file mode 100644 index 0000000..46a0b7d --- /dev/null +++ b/benchmarks/dlrm/ootb/cython/cython_criteo.py @@ -0,0 +1,55 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: run dataset pre-processing in standalone mode +# WARNING: These steps are required to work with Cython +# 1. Instal Cython +# > sudo yum install Cython +# 2. Please copy data_utils.py into data_utils_cython.pyx +# 3. Compile the data_utils_cython.pyx to generate .so +# (it's important to keep extension .pyx rather than .py +# to ensure the C/C++ .so no .py is loaded at import time) +# > python cython_compile.py build_ext --inplace +# This should create data_utils_cython.so, which can be loaded below with "import" +# 4. Run standalone datatset preprocessing to generate .npz files +# a. Kaggle +# > python cython_criteo.py --data-set=kaggle --raw-data-file=./input/train.txt +# --processed-data-file=./input/kaggleAdDisplayChallenge_processed.npz +# b. Terabyte +# > python cython_criteo.py --max-ind-range=10000000 [--memory-map] --data-set=terabyte +# --raw-data-file=./input/day --processed-data-file=./input/terabyte_processed.npz + +from __future__ import absolute_import, division, print_function, unicode_literals + +import data_utils_cython as duc + +if __name__ == "__main__": + ### import packages ### + import argparse + + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Preprocess Criteo dataset" + ) + # model related parameters + parser.add_argument("--max-ind-range", type=int, default=-1) + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--data-randomize", type=str, default="total") # or day or none + parser.add_argument("--memory-map", action="store_true", default=False) + parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + args = parser.parse_args() + + duc.loadDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map + ) diff --git a/benchmarks/dlrm/ootb/data_loader_terabyte.py b/benchmarks/dlrm/ootb/data_loader_terabyte.py new file mode 100644 index 0000000..cf0db71 --- /dev/null +++ b/benchmarks/dlrm/ootb/data_loader_terabyte.py @@ -0,0 +1,368 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +from __future__ import absolute_import, division, print_function, unicode_literals + +import os +import numpy as np +from torch.utils.data import Dataset +import torch +import time +import math +from tqdm import tqdm +import argparse + + +class DataLoader: + """ + DataLoader dedicated for the Criteo Terabyte Click Logs dataset + """ + + def __init__( + self, + data_filename, + data_directory, + days, + batch_size, + max_ind_range=-1, + split="train", + drop_last_batch=False + ): + self.data_filename = data_filename + self.data_directory = data_directory + self.days = days + self.batch_size = batch_size + self.max_ind_range = max_ind_range + + total_file = os.path.join( + data_directory, + data_filename + "_day_count.npz" + ) + with np.load(total_file) as data: + total_per_file = data["total_per_file"][np.array(days)] + + self.length = sum(total_per_file) + if split == "test" or split == "val": + self.length = int(np.ceil(self.length / 2.)) + self.split = split + self.drop_last_batch = drop_last_batch + + def __iter__(self): + return iter( + _batch_generator( + self.data_filename, self.data_directory, self.days, + self.batch_size, self.split, self.drop_last_batch, self.max_ind_range + ) + ) + + def __len__(self): + if self.drop_last_batch: + return self.length // self.batch_size + else: + return math.ceil(self.length / self.batch_size) + + +def _transform_features( + x_int_batch, x_cat_batch, y_batch, max_ind_range, flag_input_torch_tensor=False +): + if max_ind_range > 0: + x_cat_batch = x_cat_batch % max_ind_range + + if flag_input_torch_tensor: + x_int_batch = torch.log(x_int_batch.clone().detach().type(torch.float) + 1) + x_cat_batch = x_cat_batch.clone().detach().type(torch.long) + y_batch = y_batch.clone().detach().type(torch.float32).view(-1, 1) + else: + x_int_batch = torch.log(torch.tensor(x_int_batch, dtype=torch.float) + 1) + x_cat_batch = torch.tensor(x_cat_batch, dtype=torch.long) + y_batch = torch.tensor(y_batch, dtype=torch.float32).view(-1, 1) + + batch_size = x_cat_batch.shape[0] + feature_count = x_cat_batch.shape[1] + lS_o = torch.arange(batch_size).reshape(1, -1).repeat(feature_count, 1) + + return x_int_batch, lS_o, x_cat_batch.t(), y_batch.view(-1, 1) + + +def _batch_generator( + data_filename, data_directory, days, batch_size, split, drop_last, max_ind_range +): + previous_file = None + for day in days: + filepath = os.path.join( + data_directory, + data_filename + "_{}_reordered.npz".format(day) + ) + + # print('Loading file: ', filepath) + with np.load(filepath) as data: + x_int = data["X_int"] + x_cat = data["X_cat"] + y = data["y"] + + samples_in_file = y.shape[0] + batch_start_idx = 0 + if split == "test" or split == "val": + length = int(np.ceil(samples_in_file / 2.)) + if split == "test": + samples_in_file = length + elif split == "val": + batch_start_idx = samples_in_file - length + + while batch_start_idx < samples_in_file - batch_size: + + missing_samples = batch_size + if previous_file is not None: + missing_samples -= previous_file['y'].shape[0] + + current_slice = slice(batch_start_idx, batch_start_idx + missing_samples) + + x_int_batch = x_int[current_slice] + x_cat_batch = x_cat[current_slice] + y_batch = y[current_slice] + + if previous_file is not None: + x_int_batch = np.concatenate( + [previous_file['x_int'], x_int_batch], + axis=0 + ) + x_cat_batch = np.concatenate( + [previous_file['x_cat'], x_cat_batch], + axis=0 + ) + y_batch = np.concatenate([previous_file['y'], y_batch], axis=0) + previous_file = None + + if x_int_batch.shape[0] != batch_size: + raise ValueError('should not happen') + + yield _transform_features(x_int_batch, x_cat_batch, y_batch, max_ind_range) + + batch_start_idx += missing_samples + if batch_start_idx != samples_in_file: + current_slice = slice(batch_start_idx, samples_in_file) + if previous_file is not None: + previous_file = { + 'x_int' : np.concatenate( + [previous_file['x_int'], x_int[current_slice]], + axis=0 + ), + 'x_cat' : np.concatenate( + [previous_file['x_cat'], x_cat[current_slice]], + axis=0 + ), + 'y' : np.concatenate([previous_file['y'], y[current_slice]], axis=0) + } + else: + previous_file = { + 'x_int' : x_int[current_slice], + 'x_cat' : x_cat[current_slice], + 'y' : y[current_slice] + } + + if not drop_last: + yield _transform_features( + previous_file['x_int'], + previous_file['x_cat'], + previous_file['y'], + max_ind_range + ) + + +def _test(): + generator = _batch_generator( + data_filename='day', + data_directory='./input', + days=range(23), + split="train", + batch_size=2048, + drop_last=True, + max_ind_range=-1 + ) + t1 = time.time() + for x_int, lS_o, x_cat, y in generator: + t2 = time.time() + time_diff = t2 - t1 + t1 = t2 + print( + "time {} x_int.shape: {} lS_o.shape: {} x_cat.shape: {} y.shape: {}".format( + time_diff, x_int.shape, lS_o.shape, x_cat.shape, y.shape + ) + ) + + +class CriteoBinDataset(Dataset): + """Binary version of criteo dataset.""" + + def __init__(self, data_file, counts_file, + batch_size=1, max_ind_range=-1, bytes_per_feature=4): + # dataset + self.tar_fea = 1 # single target + self.den_fea = 13 # 13 dense features + self.spa_fea = 26 # 26 sparse features + self.tad_fea = self.tar_fea + self.den_fea + self.tot_fea = self.tad_fea + self.spa_fea + + self.batch_size = batch_size + self.max_ind_range = max_ind_range + self.bytes_per_entry = (bytes_per_feature * self.tot_fea * batch_size) + + self.num_entries = math.ceil(os.path.getsize(data_file) / self.bytes_per_entry) + + print('data file:', data_file, 'number of batches:', self.num_entries) + self.file = open(data_file, 'rb') + + with np.load(counts_file) as data: + self.counts = data["counts"] + + # hardcoded for now + self.m_den = 13 + + def __len__(self): + return self.num_entries + + def __getitem__(self, idx): + self.file.seek(idx * self.bytes_per_entry, 0) + raw_data = self.file.read(self.bytes_per_entry) + array = np.frombuffer(raw_data, dtype=np.int32) + tensor = torch.from_numpy(array).view((-1, self.tot_fea)) + + return _transform_features(x_int_batch=tensor[:, 1:14], + x_cat_batch=tensor[:, 14:], + y_batch=tensor[:, 0], + max_ind_range=self.max_ind_range, + flag_input_torch_tensor=True) + + def __del__(self): + self.file.close() + + +def numpy_to_binary(input_files, output_file_path, split='train'): + """Convert the data to a binary format to be read with CriteoBinDataset.""" + + # WARNING - both categorical and numerical data must fit into int32 for + # the following code to work correctly + + with open(output_file_path, 'wb') as output_file: + if split == 'train': + for input_file in input_files: + print('Processing file: ', input_file) + + np_data = np.load(input_file) + np_data = np.concatenate([np_data['y'].reshape(-1, 1), + np_data['X_int'], + np_data['X_cat']], axis=1) + np_data = np_data.astype(np.int32) + + output_file.write(np_data.tobytes()) + else: + assert len(input_files) == 1 + np_data = np.load(input_files[0]) + np_data = np.concatenate([np_data['y'].reshape(-1, 1), + np_data['X_int'], + np_data['X_cat']], axis=1) + np_data = np_data.astype(np.int32) + + samples_in_file = np_data.shape[0] + midpoint = int(np.ceil(samples_in_file / 2.)) + if split == "test": + begin = 0 + end = midpoint + elif split == "val": + begin = midpoint + end = samples_in_file + else: + raise ValueError('Unknown split value: ', split) + + output_file.write(np_data[begin:end].tobytes()) + + +def _preprocess(args): + train_files = ['{}_{}_reordered.npz'.format(args.input_data_prefix, day) for + day in range(0, 23)] + + test_valid_file = args.input_data_prefix + '_23_reordered.npz' + + os.makedirs(args.output_directory, exist_ok=True) + for split in ['train', 'val', 'test']: + print('Running preprocessing for split =', split) + + output_file = os.path.join(args.output_directory, + '{}_data.bin'.format(split)) + + input_files = train_files if split == 'train' else [test_valid_file] + numpy_to_binary(input_files=input_files, + output_file_path=output_file, + split=split) + + +def _test_bin(): + parser = argparse.ArgumentParser() + parser.add_argument('--output_directory', required=True) + parser.add_argument('--input_data_prefix', required=True) + parser.add_argument('--split', choices=['train', 'test', 'val'], + required=True) + args = parser.parse_args() + + _preprocess(args) + + binary_data_file = os.path.join(args.output_directory, + '{}_data.bin'.format(args.split)) + + counts_file = os.path.join(args.output_directory, 'day_fea_count.npz') + dataset_binary = CriteoBinDataset(data_file=binary_data_file, + counts_file=counts_file, + batch_size=2048,) + from dlrm_data_pytorch import CriteoDataset + from dlrm_data_pytorch import collate_wrapper_criteo_offset as collate_wrapper_criteo + + binary_loader = torch.utils.data.DataLoader( + dataset_binary, + batch_size=None, + shuffle=False, + num_workers=0, + collate_fn=None, + pin_memory=False, + drop_last=False, + ) + + original_dataset = CriteoDataset( + dataset='terabyte', + max_ind_range=10 * 1000 * 1000, + sub_sample_rate=1, + randomize=True, + split=args.split, + raw_path=args.input_data_prefix, + pro_data='dummy_string', + memory_map=True + ) + + original_loader = torch.utils.data.DataLoader( + original_dataset, + batch_size=2048, + shuffle=False, + num_workers=0, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, + ) + + assert len(dataset_binary) == len(original_loader) + for i, (old_batch, new_batch) in tqdm(enumerate(zip(original_loader, + binary_loader)), + total=len(dataset_binary)): + + for j in range(len(new_batch)): + if not np.array_equal(old_batch[j], new_batch[j]): + raise ValueError('FAILED: Datasets not equal') + if i > len(dataset_binary): + break + print('PASSED') + + +if __name__ == '__main__': + _test() + _test_bin() diff --git a/benchmarks/dlrm/ootb/data_utils.py b/benchmarks/dlrm/ootb/data_utils.py new file mode 100644 index 0000000..bf76dff --- /dev/null +++ b/benchmarks/dlrm/ootb/data_utils.py @@ -0,0 +1,1292 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: generate inputs and targets for the DLRM benchmark +# +# Utility function(s) to download and pre-process public data sets +# - Criteo Kaggle Display Advertising Challenge Dataset +# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset +# - Criteo Terabyte Dataset +# https://labs.criteo.com/2013/12/download-terabyte-click-logs +# +# After downloading dataset, run: +# getCriteoAdData( +# datafile="", +# o_filename=kaggleAdDisplayChallenge_processed.npz, +# max_ind_range=-1, +# sub_sample_rate=0.0, +# days=7, +# data_split='train', +# randomize='total', +# criteo_kaggle=True, +# memory_map=False +# ) +# getCriteoAdData( +# datafile="", +# o_filename=terabyte_processed.npz, +# max_ind_range=-1, +# sub_sample_rate=0.0, +# days=24, +# data_split='train', +# randomize='total', +# criteo_kaggle=False, +# memory_map=False +# ) + +from __future__ import absolute_import, division, print_function, unicode_literals + +import sys +# import os +from os import path +from multiprocessing import Process, Manager +# import io +# from io import StringIO +# import collections as coll + +import numpy as np + + +def convertUStringToDistinctIntsDict(mat, convertDicts, counts): + # Converts matrix of unicode strings into distinct integers. + # + # Inputs: + # mat (np.array): array of unicode strings to convert + # convertDicts (list): dictionary for each column + # counts (list): number of different categories in each column + # + # Outputs: + # out (np.array): array of output integers + # convertDicts (list): dictionary for each column + # counts (list): number of different categories in each column + + # check if convertDicts and counts match correct length of mat + if len(convertDicts) != mat.shape[1] or len(counts) != mat.shape[1]: + print("Length of convertDicts or counts does not match input shape") + print("Generating convertDicts and counts...") + + convertDicts = [{} for _ in range(mat.shape[1])] + counts = [0 for _ in range(mat.shape[1])] + + # initialize output + out = np.zeros(mat.shape) + + for j in range(mat.shape[1]): + for i in range(mat.shape[0]): + # add to convertDict and increment count + if mat[i, j] not in convertDicts[j]: + convertDicts[j][mat[i, j]] = counts[j] + counts[j] += 1 + out[i, j] = convertDicts[j][mat[i, j]] + + return out, convertDicts, counts + + +def convertUStringToDistinctIntsUnique(mat, mat_uni, counts): + # mat is an array of 0,...,# samples, with each being 26 categorical features + + # check if mat_unique and counts match correct length of mat + if len(mat_uni) != mat.shape[1] or len(counts) != mat.shape[1]: + print("Length of mat_unique or counts does not match input shape") + print("Generating mat_unique and counts...") + + mat_uni = [np.array([]) for _ in range(mat.shape[1])] + counts = [0 for _ in range(mat.shape[1])] + + # initialize output + out = np.zeros(mat.shape) + ind_map = [np.array([]) for _ in range(mat.shape[1])] + + # find out and assign unique ids to features + for j in range(mat.shape[1]): + m = mat_uni[j].size + mat_concat = np.concatenate((mat_uni[j], mat[:, j])) + mat_uni[j], ind_map[j] = np.unique(mat_concat, return_inverse=True) + out[:, j] = ind_map[j][m:] + counts[j] = mat_uni[j].size + + return out, mat_uni, counts + + +def processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, pre_comp_counts): + # Process Kaggle Display Advertising Challenge or Terabyte Dataset + # by converting unicode strings in X_cat to integers and + # converting negative integer values in X_int. + # + # Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day. + # + # Inputs: + # d_path (str): path for {kaggle|terabyte}_day_i.npz files + # i (int): splits in the dataset (typically 0 to 7 or 0 to 24) + + # process data if not all files exist + filename_i = npzfile + "_{0}_processed.npz".format(i) + + if path.exists(filename_i): + print("Using existing " + filename_i, end="\n") + else: + print("Not existing " + filename_i) + with np.load(npzfile + "_{0}.npz".format(i)) as data: + # categorical features + ''' + # Approach 1a: using empty dictionaries + X_cat, convertDicts, counts = convertUStringToDistinctIntsDict( + data["X_cat"], convertDicts, counts + ) + ''' + ''' + # Approach 1b: using empty np.unique + X_cat, convertDicts, counts = convertUStringToDistinctIntsUnique( + data["X_cat"], convertDicts, counts + ) + ''' + # Approach 2a: using pre-computed dictionaries + X_cat_t = np.zeros(data["X_cat_t"].shape) + for j in range(26): + for k, x in enumerate(data["X_cat_t"][j, :]): + X_cat_t[j, k] = convertDicts[j][x] + # continuous features + X_int = data["X_int"] + X_int[X_int < 0] = 0 + # targets + y = data["y"] + + np.savez_compressed( + filename_i, + # X_cat = X_cat, + X_cat=np.transpose(X_cat_t), # transpose of the data + X_int=X_int, + y=y, + ) + print("Processed " + filename_i, end="\n") + # sanity check (applicable only if counts have been pre-computed & are re-computed) + # for j in range(26): + # if pre_comp_counts[j] != counts[j]: + # sys.exit("ERROR: Sanity check on counts has failed") + # print("\nSanity check on counts passed") + + return + + +def concatCriteoAdData( + d_path, + d_file, + npzfile, + trafile, + days, + data_split, + randomize, + total_per_file, + total_count, + memory_map, + o_filename +): + # Concatenates different days and saves the result. + # + # Inputs: + # days (int): total number of days in the dataset (typically 7 or 24) + # d_path (str): path for {kaggle|terabyte}_day_i.npz files + # o_filename (str): output file name + # + # Output: + # o_file (str): output file path + + if memory_map: + # dataset break up per fea + # tar_fea = 1 # single target + den_fea = 13 # 13 dense features + spa_fea = 26 # 26 sparse features + # tad_fea = tar_fea + den_fea + # tot_fea = tad_fea + spa_fea + # create offset per file + offset_per_file = np.array([0] + [x for x in total_per_file]) + for i in range(days): + offset_per_file[i + 1] += offset_per_file[i] + + ''' + # Approach 1, 2 and 3 use indices, while Approach 4 does not use them + # create indices + indices = np.arange(total_count) + if data_split == "none": + if randomize == "total": + indices = np.random.permutation(indices) + else: + indices = np.array_split(indices, offset_per_file[1:-1]) + + # randomize train data (per day) + if randomize == "day": # or randomize == "total": + for i in range(len(indices) - 1): + indices[i] = np.random.permutation(indices[i]) + print("Randomized indices per day ...") + + train_indices = np.concatenate(indices[:-1]) + test_indices = indices[-1] + + # randomize train data (across days) + if randomize == "total": + train_indices = np.random.permutation(train_indices) + print("Randomized indices across days ...") + + indices = np.concatenate((train_indices, test_indices)) + # no reordering + # indices = np.arange(total_count) + ''' + ''' + # Approach 1: simple and slow (no grouping is used) + # check if data already exists + recreate_flag = False + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # load, reorder and concatenate data (memmap all reordered files per feature) + if recreate_flag: + # init reordered files (.npy appended automatically) + z = np.zeros((total_count)) + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered".format(j) + np.save(filename_j, z) + print("Creating " + filename_j) + + for i in range(days): + filename_i = d_path + npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + X_cat_t = np.transpose(data["X_cat"]) + X_int_t = np.transpose(data["X_int"]) + y = data["y"] + size = len(y) + # sanity check + if total_per_file[i] != size: + sys.exit("ERROR: sanity check on number of samples failed") + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + # print(filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r+') + if j < tar_fea: + fj[indices[start:end]] = y + elif tar_fea <= j and j < tad_fea: + fj[indices[start:end]] = X_int_t[j - tar_fea, :] + else: + fj[indices[start:end]] = X_cat_t[j - tad_fea, :] + del fj + else: + print("Reordered fea files already exist, skipping ...") + + # check if data already exists + recreate_flag = False + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + if path.exists(filename_i): + print("Using existing " + filename_i) + else: + recreate_flag = True + # split reordered data by files (memmap all reordered files per feature) + # on the day boundary del the file object and memmap again + if recreate_flag: + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + size = total_per_file[i] + X_int_t = np.zeros((den_fea, size)) + X_cat_t = np.zeros((spa_fea, size)) + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + print("Creating " + filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r') + if j < tar_fea: + y = fj[start:end] + elif tar_fea <= j and j < tad_fea: + X_int_t[j - tar_fea, :] = fj[start:end] + else: + X_cat_t[j - tad_fea, :] = fj[start:end] + del fj + + np.savez_compressed( + filename_i, + X_cat=np.transpose(X_cat_t), # transpose of the data + X_int=np.transpose(X_int_t), # transpose of the data + y=y, + ) + else: + print("Reordered day files already exist, skipping ...") + ''' + ''' + # Approach 2: group days + # check if data already exists + recreate_flag = False + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # load, reorder and concatenate data (memmap all reordered files per feature) + if recreate_flag: + # init reordered files (.npy appended automatically) + z = np.zeros((total_count)) + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered".format(j) + np.save(filename_j, z) + print("Creating " + filename_j) + + group_day = 3 # e.g. 8, 4 or 3 + group_num = days // group_day + file_group = [i*group_day for i in range(group_num)] + [days] + for ii in range(group_num): + # for last may be group_size != group_num, therefore reset it below + group_size = file_group[ii + 1] - file_group[ii] + X_cat_t = [0]*group_size + X_int_t = [0]*group_size + y = [0]*group_size + start = [0]*group_size + end = [0]*group_size + for ig in range(group_size): + i = file_group[ii] + ig + filename_i = d_path + npzfile + "_{0}_processed.npz".format(i) + # setup start and end ranges + start[ig] = offset_per_file[i] + end[ig] = offset_per_file[i + 1] + # print(filename_i) + # load a group of files + with np.load(filename_i) as data: + X_cat_t[ig] = np.transpose(data["X_cat"]) + X_int_t[ig] = np.transpose(data["X_int"]) + y[ig] = data["y"] + # sanity check + if total_per_file[i] != len(y[ig]): + sys.exit("ERROR: sanity check on number of samples failed") + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end[ig]-start[ig]) + "=" + str(total_per_file[i])) + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r+') + for ig in range(group_size): + if j < tar_fea: + fj[indices[start[ig]:end[ig]]] = y[ig] + elif tar_fea <= j and j < tad_fea: + fj[indices[start[ig]:end[ig]]] = X_int_t[ig][j - tar_fea, :] + else: + fj[indices[start[ig]:end[ig]]] = X_cat_t[ig][j - tad_fea, :] + del fj + else: + print("Reordered fea files already exist, skipping ...") + + # check if data already exists + recreate_flag = False + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + if path.exists(filename_i): + print("Using existing " + filename_i) + else: + recreate_flag = True + # split reordered data by files (memmap all reordered files per feature) + # on the day boundary del the file object and memmap again + if recreate_flag: + for ii in range(group_num): + # for last may be group_size != group_num, therefore reset it below + group_size = file_group[ii + 1] - file_group[ii] + X_cat_t= []; X_int_t = [] + for ig in range(group_size): + i = file_group[ii] + ig + X_int_t.append(np.zeros((den_fea, total_per_file[i]))) + X_cat_t.append(np.zeros((spa_fea, total_per_file[i]))) + y = [0]*group_size + start = [0]*group_size + end = [0]*group_size + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r') + # load a group of files + for ig in range(group_size): + i = file_group[ii] + ig + # setup start and end ranges + start[ig] = offset_per_file[i] + end[ig] = offset_per_file[i + 1] + # load data for the group of files + if j < tar_fea: + y[ig] = fj[start[ig]:end[ig]] + elif tar_fea <= j and j < tad_fea: + X_int_t[ig][j - tar_fea, :] = fj[start[ig]:end[ig]] + else: + X_cat_t[ig][j - tad_fea, :] = fj[start[ig]:end[ig]] + del fj + + for ig in range(group_size): + i = file_group[ii] + ig + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + print("Creating " + filename_i) + np.savez_compressed( + filename_i, + X_cat=np.transpose(X_cat_t[ig]), # transpose of the data + X_int=np.transpose(X_int_t[ig]), # transpose of the data + y=y[ig], + ) + else: + print("Reordered day files already exist, skipping ...") + ''' + ''' + # Approach 3: group features + # check if data already exists + group_fea = 5 # e.g. 8, 5 or 4 + group_num = tot_fea // group_fea + if tot_fea % group_fea != 0: # sanity check + sys.exit("ERROR: the group_fea must divided tot_fea evenly.") + recreate_flag = False + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}.npy".format( + jn, group_fea + ) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # load, reorder and concatenate data (memmap all reordered files per feature) + if recreate_flag: + # init reordered files (.npy appended automatically) + z = np.zeros((group_fea, total_count)) + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}".format( + jn, group_fea + ) + np.save(filename_j, z) + print("Creating " + filename_j) + + for i in range(days): + filename_i = d_path + npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + X_cat_t = np.transpose(data["X_cat"]) + X_int_t = np.transpose(data["X_int"]) + y = data["y"] + size = len(y) + # sanity check + if total_per_file[i] != size: + sys.exit("ERROR: sanity check on number of samples failed") + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + # print(filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}.npy".format( + jn, group_fea + ) + fj = np.load(filename_j, mmap_mode='r+') + for jg in range(group_fea): + j = jn * group_fea + jg + # print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg)) + if j < tar_fea: + fj[jg, indices[start:end]] = y + elif tar_fea <= j and j < tad_fea: + fj[jg, indices[start:end]] = X_int_t[j - tar_fea, :] + else: + fj[jg, indices[start:end]] = X_cat_t[j - tad_fea, :] + del fj + else: + print("Reordered fea files already exist, skipping ...") + + # check if data already exists + recreate_flag = False + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + if path.exists(filename_i): + print("Using existing" + filename_i) + else: + recreate_flag = True + # split reordered data by files (memmap all reordered files per feature) + # on the day boundary del the file object and memmap again + if recreate_flag: + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + size = total_per_file[i] + X_int_t = np.zeros((den_fea, size)) + X_cat_t = np.zeros((spa_fea, size)) + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + print("Creating " + filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}.npy".format( + jn, group_fea + ) + fj = np.load(filename_j, mmap_mode='r') + for jg in range(group_fea): + j = jn * group_fea + jg + # print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg)) + if j < tar_fea: + y = fj[jg, start:end] + elif tar_fea <= j and j < tad_fea: + X_int_t[j - tar_fea, :] = fj[jg, start:end] + else: + X_cat_t[j - tad_fea, :] = fj[jg, start:end] + del fj + + np.savez_compressed( + filename_i, + X_cat=np.transpose(X_cat_t), # transpose of the data + X_int=np.transpose(X_int_t), # transpose of the data + y=y, + ) + + else: + print("Reordered day files already exist, skipping ...") + ''' + + # Approach 4: Fisher-Yates-Rao (FYR) shuffle algorithm + # 1st pass of FYR shuffle + # check if data already exists + recreate_flag = False + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j) + if ( + path.exists(filename_j_y) + and path.exists(filename_j_d) + and path.exists(filename_j_s) + ): + print( + "Using existing\n" + + filename_j_y + "\n" + + filename_j_d + "\n" + + filename_j_s + ) + else: + recreate_flag = True + # reorder across buckets using sampling + if recreate_flag: + # init intermediate files (.npy appended automatically) + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s".format(j) + np.save(filename_j_y, np.zeros((total_per_file[j]))) + np.save(filename_j_d, np.zeros((total_per_file[j], den_fea))) + np.save(filename_j_s, np.zeros((total_per_file[j], spa_fea))) + # start processing files + total_counter = [0] * days + for i in range(days): + filename_i = npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + X_cat = data["X_cat"] + X_int = data["X_int"] + y = data["y"] + size = len(y) + # sanity check + if total_per_file[i] != size: + sys.exit("ERROR: sanity check on number of samples failed") + # debug prints + print("Reordering (1st pass) " + filename_i) + + # create buckets using sampling of random ints + # from (discrete) uniform distribution + buckets = [] + for _j in range(days): + buckets.append([]) + counter = [0] * days + days_to_sample = days if data_split == "none" else days - 1 + if randomize == "total": + rand_u = np.random.randint(low=0, high=days_to_sample, size=size) + for k in range(size): + # sample and make sure elements per buckets do not overflow + if data_split == "none" or i < days - 1: + # choose bucket + p = rand_u[k] + # retry of the bucket is full + while total_counter[p] + counter[p] >= total_per_file[p]: + p = np.random.randint(low=0, high=days_to_sample) + else: # preserve the last day/bucket if needed + p = i + buckets[p].append(k) + counter[p] += 1 + else: # randomize is day or none + for k in range(size): + # do not sample, preserve the data in this bucket + p = i + buckets[p].append(k) + counter[p] += 1 + + # sanity check + if np.sum(counter) != size: + sys.exit("ERROR: sanity check on number of samples failed") + # debug prints + # print(counter) + # print(str(np.sum(counter)) + " = " + str(size)) + # print([len(x) for x in buckets]) + # print(total_counter) + + # partially feel the buckets + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j) + start = total_counter[j] + end = total_counter[j] + counter[j] + # target buckets + fj_y = np.load(filename_j_y, mmap_mode='r+') + # print("start=" + str(start) + " end=" + str(end) + # + " end - start=" + str(end - start) + " " + # + str(fj_y[start:end].shape) + " " + # + str(len(buckets[j]))) + fj_y[start:end] = y[buckets[j]] + del fj_y + # dense buckets + fj_d = np.load(filename_j_d, mmap_mode='r+') + # print("start=" + str(start) + " end=" + str(end) + # + " end - start=" + str(end - start) + " " + # + str(fj_d[start:end, :].shape) + " " + # + str(len(buckets[j]))) + fj_d[start:end, :] = X_int[buckets[j], :] + del fj_d + # sparse buckets + fj_s = np.load(filename_j_s, mmap_mode='r+') + # print("start=" + str(start) + " end=" + str(end) + # + " end - start=" + str(end - start) + " " + # + str(fj_s[start:end, :].shape) + " " + # + str(len(buckets[j]))) + fj_s[start:end, :] = X_cat[buckets[j], :] + del fj_s + # update counters for next step + total_counter[j] += counter[j] + + # 2nd pass of FYR shuffle + # check if data already exists + for j in range(days): + filename_j = npzfile + "_{0}_reordered.npz".format(j) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # reorder within buckets + if recreate_flag: + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j) + fj_y = np.load(filename_j_y) + fj_d = np.load(filename_j_d) + fj_s = np.load(filename_j_s) + + indices = range(total_per_file[j]) + if randomize == "day" or randomize == "total": + if data_split == "none" or j < days - 1: + indices = np.random.permutation(range(total_per_file[j])) + + filename_r = npzfile + "_{0}_reordered.npz".format(j) + print("Reordering (2nd pass) " + filename_r) + np.savez_compressed( + filename_r, + X_cat=fj_s[indices, :], + X_int=fj_d[indices, :], + y=fj_y[indices], + ) + + ''' + # sanity check (under no reordering norms should be zero) + for i in range(days): + filename_i_o = npzfile + "_{0}_processed.npz".format(i) + print(filename_i_o) + with np.load(filename_i_o) as data_original: + X_cat_o = data_original["X_cat"] + X_int_o = data_original["X_int"] + y_o = data_original["y"] + filename_i_r = npzfile + "_{0}_reordered.npz".format(i) + print(filename_i_r) + with np.load(filename_i_r) as data_reordered: + X_cat_r = data_reordered["X_cat"] + X_int_r = data_reordered["X_int"] + y_r = data_reordered["y"] + print(np.linalg.norm(y_o - y_r)) + print(np.linalg.norm(X_int_o - X_int_r)) + print(np.linalg.norm(X_cat_o - X_cat_r)) + ''' + + else: + print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename)) + + # load and concatenate data + for i in range(days): + filename_i = npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + if i == 0: + X_cat = data["X_cat"] + X_int = data["X_int"] + y = data["y"] + else: + X_cat = np.concatenate((X_cat, data["X_cat"])) + X_int = np.concatenate((X_int, data["X_int"])) + y = np.concatenate((y, data["y"])) + print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0])) + + with np.load(d_path + d_file + "_fea_count.npz") as data: + counts = data["counts"] + print("Loaded counts!") + + np.savez_compressed( + d_path + o_filename + ".npz", + X_cat=X_cat, + X_int=X_int, + y=y, + counts=counts, + ) + + return d_path + o_filename + ".npz" + + +def transformCriteoAdData(X_cat, X_int, y, days, data_split, randomize, total_per_file): + # Transforms Criteo Kaggle or terabyte data by applying log transformation + # on dense features and converting everything to appropriate tensors. + # + # Inputs: + # X_cat (ndarray): array of integers corresponding to preprocessed + # categorical features + # X_int (ndarray): array of integers corresponding to dense features + # y (ndarray): array of bool corresponding to labels + # data_split(str): flag for splitting dataset into training/validation/test + # sets + # randomize (str): determines randomization scheme + # "none": no randomization + # "day": randomizes each day"s data (only works if split = True) + # "total": randomizes total dataset + # + # Outputs: + # if split: + # X_cat_train (tensor): sparse features for training set + # X_int_train (tensor): dense features for training set + # y_train (tensor): labels for training set + # X_cat_val (tensor): sparse features for validation set + # X_int_val (tensor): dense features for validation set + # y_val (tensor): labels for validation set + # X_cat_test (tensor): sparse features for test set + # X_int_test (tensor): dense features for test set + # y_test (tensor): labels for test set + # else: + # X_cat (tensor): sparse features + # X_int (tensor): dense features + # y (tensor): label + + # define initial set of indices + indices = np.arange(len(y)) + + # create offset per file + offset_per_file = np.array([0] + [x for x in total_per_file]) + for i in range(days): + offset_per_file[i + 1] += offset_per_file[i] + + # split dataset + if data_split == 'train': + indices = np.array_split(indices, offset_per_file[1:-1]) + + # randomize train data (per day) + if randomize == "day": # or randomize == "total": + for i in range(len(indices) - 1): + indices[i] = np.random.permutation(indices[i]) + print("Randomized indices per day ...") + + train_indices = np.concatenate(indices[:-1]) + test_indices = indices[-1] + test_indices, val_indices = np.array_split(test_indices, 2) + + print("Defined training and testing indices...") + + # randomize train data (across days) + if randomize == "total": + train_indices = np.random.permutation(train_indices) + print("Randomized indices across days ...") + + # indices = np.concatenate((train_indices, test_indices)) + + # create training, validation, and test sets + X_cat_train = X_cat[train_indices] + X_int_train = X_int[train_indices] + y_train = y[train_indices] + + X_cat_val = X_cat[val_indices] + X_int_val = X_int[val_indices] + y_val = y[val_indices] + + X_cat_test = X_cat[test_indices] + X_int_test = X_int[test_indices] + y_test = y[test_indices] + + print("Split data according to indices...") + + X_cat_train = X_cat_train.astype(np.long) + X_int_train = np.log(X_int_train.astype(np.float32) + 1) + y_train = y_train.astype(np.float32) + + X_cat_val = X_cat_val.astype(np.long) + X_int_val = np.log(X_int_val.astype(np.float32) + 1) + y_val = y_val.astype(np.float32) + + X_cat_test = X_cat_test.astype(np.long) + X_int_test = np.log(X_int_test.astype(np.float32) + 1) + y_test = y_test.astype(np.float32) + + print("Converted to tensors...done!") + + return ( + X_cat_train, + X_int_train, + y_train, + X_cat_val, + X_int_val, + y_val, + X_cat_test, + X_int_test, + y_test, + ) + + else: + + # randomize data + if randomize == "total": + indices = np.random.permutation(indices) + print("Randomized indices...") + + X_cat = X_cat[indices].astype(np.long) + X_int = np.log(X_int[indices].astype(np.float32) + 1) + y = y[indices].astype(np.float32) + + print("Converted to tensors...done!") + + return (X_cat, X_int, y, [], [], [], [], [], []) + + +def getCriteoAdData( + datafile, + o_filename, + max_ind_range=-1, + sub_sample_rate=0.0, + days=7, + data_split='train', + randomize='total', + criteo_kaggle=True, + memory_map=False, + dataset_multiprocessing=False, +): + # Passes through entire dataset and defines dictionaries for categorical + # features and determines the number of total categories. + # + # Inputs: + # datafile : path to downloaded raw data file + # o_filename (str): saves results under o_filename if filename is not "" + # + # Output: + # o_file (str): output file path + + #split the datafile into path and filename + lstr = datafile.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + d_file = lstr[-1].split(".")[0] if criteo_kaggle else lstr[-1] + npzfile = d_path + ((d_file + "_day") if criteo_kaggle else d_file) + trafile = d_path + ((d_file + "_fea") if criteo_kaggle else "fea") + + # count number of datapoints in training set + total_file = d_path + d_file + "_day_count.npz" + if path.exists(total_file): + with np.load(total_file) as data: + total_per_file = list(data["total_per_file"]) + total_count = np.sum(total_per_file) + print("Skipping counts per file (already exist)") + else: + total_count = 0 + total_per_file = [] + if criteo_kaggle: + # WARNING: The raw data consists of a single train.txt file + # Each line in the file is a sample, consisting of 13 continuous and + # 26 categorical features (an extra space indicates that feature is + # missing and will be interpreted as 0). + if path.exists(datafile): + print("Reading data from path=%s" % (datafile)) + with open(str(datafile)) as f: + for _ in f: + total_count += 1 + total_per_file.append(total_count) + # reset total per file due to split + num_data_per_split, extras = divmod(total_count, days) + total_per_file = [num_data_per_split] * days + for j in range(extras): + total_per_file[j] += 1 + # split into days (simplifies code later on) + file_id = 0 + boundary = total_per_file[file_id] + nf = open(npzfile + "_" + str(file_id), "w") + with open(str(datafile)) as f: + for j, line in enumerate(f): + if j == boundary: + nf.close() + file_id += 1 + nf = open(npzfile + "_" + str(file_id), "w") + boundary += total_per_file[file_id] + nf.write(line) + nf.close() + else: + sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset") + else: + # WARNING: The raw data consist of day_0.gz,... ,day_23.gz text files + # Each line in the file is a sample, consisting of 13 continuous and + # 26 categorical features (an extra space indicates that feature is + # missing and will be interpreted as 0). + for i in range(days): + datafile_i = datafile + "_" + str(i) # + ".gz" + if path.exists(str(datafile_i)): + print("Reading data from path=%s" % (str(datafile_i))) + # file day_ + total_per_file_count = 0 + with open(str(datafile_i)) as f: + for _ in f: + total_per_file_count += 1 + total_per_file.append(total_per_file_count) + total_count += total_per_file_count + else: + sys.exit("ERROR: Criteo Terabyte Dataset path is invalid; please download from https://labs.criteo.com/2013/12/download-terabyte-click-logs") + + # process a file worth of data and reinitialize data + # note that a file main contain a single or multiple splits + def process_one_file( + datfile, + npzfile, + split, + num_data_in_split, + dataset_multiprocessing, + convertDictsDay=None, + resultDay=None + ): + if dataset_multiprocessing: + convertDicts_day = [{} for _ in range(26)] + + with open(str(datfile)) as f: + y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int + X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int + X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int + if sub_sample_rate == 0.0: + rand_u = 1.0 + else: + rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split) + + i = 0 + percent = 0 + for k, line in enumerate(f): + # process a line (data point) + line = line.split('\t') + # set missing values to zero + for j in range(len(line)): + if (line[j] == '') or (line[j] == '\n'): + line[j] = '0' + # sub-sample data by dropping zero targets, if needed + target = np.int32(line[0]) + if target == 0 and \ + (rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate: + continue + + y[i] = target + X_int[i] = np.array(line[1:14], dtype=np.int32) + if max_ind_range > 0: + X_cat[i] = np.array( + list(map(lambda x: int(x, 16) % max_ind_range, line[14:])), + dtype=np.int32 + ) + else: + X_cat[i] = np.array( + list(map(lambda x: int(x, 16), line[14:])), + dtype=np.int32 + ) + + # count uniques + if dataset_multiprocessing: + for j in range(26): + convertDicts_day[j][X_cat[i][j]] = 1 + # debug prints + if float(i)/num_data_in_split*100 > percent+1: + percent = int(float(i)/num_data_in_split*100) + print( + "Load %d/%d (%d%%) Split: %d Label True: %d Stored: %d" + % ( + i, + num_data_in_split, + percent, + split, + target, + y[i], + ), + end="\n", + ) + else: + for j in range(26): + convertDicts[j][X_cat[i][j]] = 1 + # debug prints + print( + "Load %d/%d Split: %d Label True: %d Stored: %d" + % ( + i, + num_data_in_split, + split, + target, + y[i], + ), + end="\r", + ) + i += 1 + + # store num_data_in_split samples or extras at the end of file + # count uniques + # X_cat_t = np.transpose(X_cat) + # for j in range(26): + # for x in X_cat_t[j,:]: + # convertDicts[j][x] = 1 + # store parsed + filename_s = npzfile + "_{0}.npz".format(split) + if path.exists(filename_s): + print("\nSkip existing " + filename_s) + else: + np.savez_compressed( + filename_s, + X_int=X_int[0:i, :], + # X_cat=X_cat[0:i, :], + X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data + y=y[0:i], + ) + print("\nSaved " + npzfile + "_{0}.npz!".format(split)) + + if dataset_multiprocessing: + resultDay[split] = i + convertDictsDay[split] = convertDicts_day + return + else: + return i + + # create all splits (reuse existing files if possible) + recreate_flag = False + convertDicts = [{} for _ in range(26)] + # WARNING: to get reproducable sub-sampling results you must reset the seed below + # np.random.seed(123) + # in this case there is a single split in each day + for i in range(days): + npzfile_i = npzfile + "_{0}.npz".format(i) + npzfile_p = npzfile + "_{0}_processed.npz".format(i) + if path.exists(npzfile_i): + print("Skip existing " + npzfile_i) + elif path.exists(npzfile_p): + print("Skip existing " + npzfile_p) + else: + recreate_flag = True + + if recreate_flag: + if dataset_multiprocessing: + resultDay = Manager().dict() + convertDictsDay = Manager().dict() + processes = [Process(target=process_one_file, + name="process_one_file:%i" % i, + args=(npzfile + "_{0}".format(i), + npzfile, + i, + total_per_file[i], + dataset_multiprocessing, + convertDictsDay, + resultDay, + ) + ) for i in range(0, days)] + for process in processes: + process.start() + for process in processes: + process.join() + for day in range(days): + total_per_file[day] = resultDay[day] + print("Constructing convertDicts Split: {}".format(day)) + convertDicts_tmp = convertDictsDay[day] + for i in range(26): + for j in convertDicts_tmp[i]: + convertDicts[i][j] = 1 + else: + for i in range(days): + total_per_file[i] = process_one_file( + npzfile + "_{0}".format(i), + npzfile, + i, + total_per_file[i], + dataset_multiprocessing, + ) + + # report and save total into a file + total_count = np.sum(total_per_file) + if not path.exists(total_file): + np.savez_compressed(total_file, total_per_file=total_per_file) + print("Total number of samples:", total_count) + print("Divided into days/splits:\n", total_per_file) + + # dictionary files + counts = np.zeros(26, dtype=np.int32) + if recreate_flag: + # create dictionaries + for j in range(26): + for i, x in enumerate(convertDicts[j]): + convertDicts[j][x] = i + dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j) + if not path.exists(dict_file_j): + np.savez_compressed( + dict_file_j, + unique=np.array(list(convertDicts[j]), dtype=np.int32) + ) + counts[j] = len(convertDicts[j]) + # store (uniques and) counts + count_file = d_path + d_file + "_fea_count.npz" + if not path.exists(count_file): + np.savez_compressed(count_file, counts=counts) + else: + # create dictionaries (from existing files) + for j in range(26): + with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data: + unique = data["unique"] + for i, x in enumerate(unique): + convertDicts[j][x] = i + # load (uniques and) counts + with np.load(d_path + d_file + "_fea_count.npz") as data: + counts = data["counts"] + + # process all splits + if dataset_multiprocessing: + processes = [Process(target=processCriteoAdData, + name="processCriteoAdData:%i" % i, + args=(d_path, + d_file, + npzfile, + i, + convertDicts, + counts, + ) + ) for i in range(0, days)] + for process in processes: + process.start() + for process in processes: + process.join() + + else: + for i in range(days): + processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, counts) + + o_file = concatCriteoAdData( + d_path, + d_file, + npzfile, + trafile, + days, + data_split, + randomize, + total_per_file, + total_count, + memory_map, + o_filename + ) + + return o_file + + +def loadDataset( + dataset, + max_ind_range, + sub_sample_rate, + randomize, + data_split, + raw_path="", + pro_data="", + memory_map=False +): + # dataset + if dataset == "kaggle": + days = 7 + o_filename = "kaggleAdDisplayChallenge_processed" + elif dataset == "terabyte": + days = 24 + o_filename = "terabyte_processed" + else: + raise(ValueError("Data set option is not supported")) + + # split the datafile into path and filename + lstr = raw_path.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1] + npzfile = (d_file + "_day") if dataset == "kaggle" else d_file + # trafile = d_path + ((d_file + "_fea") if dataset == "kaggle" else "fea") + + # check if pre-processed data is available + data_ready = True + if memory_map: + for i in range(days): + reo_data = d_path + npzfile + "_{0}_reordered.npz".format(i) + if not path.exists(str(reo_data)): + data_ready = False + else: + if not path.exists(str(pro_data)): + data_ready = False + + # pre-process data if needed + # WARNNING: when memory mapping is used we get a collection of files + if data_ready: + print("Reading pre-processed data=%s" % (str(pro_data))) + file = str(pro_data) + else: + print("Reading raw data=%s" % (str(raw_path))) + file = getCriteoAdData( + raw_path, + o_filename, + max_ind_range, + sub_sample_rate, + days, + data_split, + randomize, + dataset == "kaggle", + memory_map + ) + + return file, days + + +if __name__ == "__main__": + ### import packages ### + import argparse + + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Preprocess Criteo dataset" + ) + # model related parameters + parser.add_argument("--max-ind-range", type=int, default=-1) + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--data-randomize", type=str, default="total") # or day or none + parser.add_argument("--memory-map", action="store_true", default=False) + parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + args = parser.parse_args() + + loadDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map + ) diff --git a/benchmarks/dlrm/ootb/dlrm_data_caffe2.py b/benchmarks/dlrm/ootb/dlrm_data_caffe2.py new file mode 100644 index 0000000..0bda2ac --- /dev/null +++ b/benchmarks/dlrm/ootb/dlrm_data_caffe2.py @@ -0,0 +1,843 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: generate inputs and targets for the dlrm benchmark +# The inpts and outputs are generated according to the following three option(s) +# 1) random distribution +# 2) synthetic distribution, based on unique accesses and distances between them +# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou "Synthetic Trace-Driven +# Simulation of Cache Memory", IEEE AINAM'07 +# 3) public data set +# i) Criteo Kaggle Display Advertising Challenge Dataset +# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset +# ii) Criteo Terabyte Dataset +# https://labs.criteo.com/2013/12/download-terabyte-click-logs + + +from __future__ import absolute_import, division, print_function, unicode_literals + +import bisect +import collections + +# others +# from os import path +import sys + +import data_utils + +# numpy +import numpy as np + +# pytorch +import torch +from numpy import random as ra +from torch.utils.data import Dataset + + +# Kaggle Display Advertising Challenge Dataset +# dataset (str): name of dataset (Kaggle or Terabyte) +# randomize (str): determines randomization scheme +# 'none': no randomization +# 'day': randomizes each day's data (only works if split = True) +# 'total': randomizes total dataset +# split (bool) : to split into train, test, validation data-sets + + +class CriteoDatasetWMemoryMap(Dataset): + def __init__( + self, + dataset, + max_ind_range, + sub_sample_rate, + randomize, + split="train", + raw_path="", + pro_data="", + ): + # dataset + # tar_fea = 1 # single target + den_fea = 13 # 13 dense features + # spa_fea = 26 # 26 sparse features + # tad_fea = tar_fea + den_fea + # tot_fea = tad_fea + spa_fea + if dataset == "kaggle": + days = 7 + elif dataset == "terabyte": + days = 24 + else: + raise (ValueError("Data set option is not supported")) + self.max_ind_range = max_ind_range + + # split the datafile into path and filename + lstr = raw_path.split("/") + self.d_path = "/".join(lstr[0:-1]) + "/" + self.d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1] + self.npzfile = self.d_path + ( + (self.d_file + "_day") if dataset == "kaggle" else self.d_file + ) + self.trafile = self.d_path + ( + (self.d_file + "_fea") if dataset == "kaggle" else "fea" + ) + + # get a number of samples per day + total_file = self.d_path + self.d_file + "_day_count.npz" + with np.load(total_file) as data: + total_per_file = data["total_per_file"] + # compute offsets per file + self.offset_per_file = np.array([0] + list(total_per_file)) + for i in range(days): + self.offset_per_file[i + 1] += self.offset_per_file[i] + # print(self.offset_per_file) + + # setup data + self.split = split + if split == "none" or split == "train": + self.day = 0 + self.max_day_range = days if split == "none" else days - 1 + elif split == "test" or split == "val": + self.day = days - 1 + num_samples = self.offset_per_file[days] - self.offset_per_file[days - 1] + self.test_size = int(np.ceil(num_samples / 2.0)) + self.val_size = num_samples - self.test_size + else: + sys.exit("ERROR: dataset split is neither none, nor train or test.") + + # load unique counts + with np.load(self.d_path + self.d_file + "_fea_count.npz") as data: + self.counts = data["counts"] + self.m_den = den_fea # X_int.shape[1] + self.n_emb = len(self.counts) + print("Sparse features= %d, Dense features= %d" % (self.n_emb, self.m_den)) + + # Load the test data + # Only a single day is used for testing + if self.split == "test" or self.split == "val": + # only a single day is used for testing + fi = self.npzfile + "_{0}_reordered.npz".format(self.day) + with np.load(fi) as data: + self.X_int = data["X_int"] # continuous feature + self.X_cat = data["X_cat"] # categorical feature + self.y = data["y"] # target + + def __getitem__(self, index): + + if isinstance(index, slice): + return [ + self[idx] + for idx in range( + index.start or 0, index.stop or len(self), index.step or 1 + ) + ] + if self.split == "none" or self.split == "train": + # check if need to swicth to next day and load data + if index == self.offset_per_file[self.day]: + # print("day_boundary switch", index) + self.day_boundary = self.offset_per_file[self.day] + fi = self.npzfile + "_{0}_reordered.npz".format(self.day) + # print('Loading file: ', fi) + with np.load(fi) as data: + self.X_int = data["X_int"] # continuous feature + self.X_cat = data["X_cat"] # categorical feature + self.y = data["y"] # target + self.day = (self.day + 1) % self.max_day_range + + i = index - self.day_boundary + elif self.split == "test" or self.split == "val": + # only a single day is used for testing + i = index + (0 if self.split == "test" else self.test_size) + else: + sys.exit("ERROR: dataset split is neither none, nor train or test.") + + if self.max_ind_range > 0: + return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i] + else: + return self.X_int[i], self.X_cat[i], self.y[i] + + def _default_preprocess(self, X_int, X_cat, y): + X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1) + if self.max_ind_range > 0: + X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long) + else: + X_cat = torch.tensor(X_cat, dtype=torch.long) + y = torch.tensor(y.astype(np.float32)) + + return X_int, X_cat, y + + def __len__(self): + if self.split == "none": + return self.offset_per_file[-1] + elif self.split == "train": + return self.offset_per_file[-2] + elif self.split == "test": + return self.test_size + elif self.split == "val": + return self.val_size + else: + sys.exit("ERROR: dataset split is neither none, nor train nor test.") + + +def collate_wrapper_criteo(list_of_tuples): + # where each tuple is (X_int, X_cat, y) + transposed_data = list(zip(*list_of_tuples)) + X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1) + X_cat = torch.tensor(transposed_data[1], dtype=torch.long) + T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1) + + batchSize = X_cat.shape[0] + featureCnt = X_cat.shape[1] + + lS_i = [X_cat[:, i] for i in range(featureCnt)] + lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)] + + return X_int, torch.stack(lS_o), torch.stack(lS_i), T + + +# Conversion from offset to length +def offset_to_length_convertor(lS_o, lS_i): + def diff(tensor): + return tensor[1:] - tensor[:-1] + + return torch.stack( + [ + diff(torch.cat((S_o, torch.tensor(lS_i[ind].shape))).int()) + for ind, S_o in enumerate(lS_o) + ] + ) + + +def unpack_batch(b, data_gen, data_set): + return b[0], b[1], b[2], b[3], torch.ones(b[3].size()) + + +def read_dataset( + dataset, + max_ind_range, + sub_sample_rate, + mini_batch_size, + num_batches, + randomize, + split="train", + raw_data="", + processed_data="", + memory_map=False, + inference_only=False, + test_mini_batch_size=1, +): + # split the datafile into path and filename + lstr = raw_data.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1] + # npzfile = d_path + ((d_file + "_day") if dataset == "kaggle" else d_file) + # trafile = d_path + ((d_file + "_fea") if dataset == "kaggle" else "fea") + + # load + print("Loading %s dataset..." % dataset) + nbatches = 0 + file, days = data_utils.loadDataset( + dataset, + max_ind_range, + sub_sample_rate, + randomize, + split, + raw_data, + processed_data, + memory_map, + ) + + if memory_map: + # WARNING: at this point the data has been reordered and shuffled across files + # e.g. day__reordered.npz, what remains is simply to read and feed + # the data from each file, going in the order of days file-by-file, to the + # model during training. + train_data = CriteoDatasetWMemoryMap( + dataset, + max_ind_range, + sub_sample_rate, + randomize, + "train", + raw_data, + processed_data, + ) + + test_data = CriteoDatasetWMemoryMap( + dataset, + max_ind_range, + sub_sample_rate, + randomize, + "test", + raw_data, + processed_data, + ) + + train_loader = torch.utils.data.DataLoader( + train_data, + batch_size=mini_batch_size, + shuffle=False, + num_workers=0, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, # True + ) + + test_loader = torch.utils.data.DataLoader( + test_data, + batch_size=test_mini_batch_size, + shuffle=False, + num_workers=0, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, # True + ) + + return train_data, train_loader, test_data, test_loader + + else: + # load and preprocess data + with np.load(file) as data: + X_int = data["X_int"] + X_cat = data["X_cat"] + y = data["y"] + counts = data["counts"] + + # get a number of samples per day + total_file = d_path + d_file + "_day_count.npz" + with np.load(total_file) as data: + total_per_file = data["total_per_file"] + + # transform + ( + X_cat_train, + X_int_train, + y_train, + X_cat_val, + X_int_val, + y_val, + X_cat_test, + X_int_test, + y_test, + ) = data_utils.transformCriteoAdData( + X_cat, X_int, y, days, split, randomize, total_per_file + ) + ln_emb = counts + m_den = X_int_train.shape[1] + n_emb = len(counts) + print("Sparse features = %d, Dense features = %d" % (n_emb, m_den)) + + # adjust parameters + def assemble_samples(X_cat, X_int, y, max_ind_range, print_message): + if max_ind_range > 0: + X_cat = X_cat % max_ind_range + + nsamples = len(y) + data_size = nsamples + # using floor is equivalent to dropping last mini-batch (drop_last = True) + nbatches = int(np.floor((data_size * 1.0) / mini_batch_size)) + print(print_message) + if num_batches != 0 and num_batches < nbatches: + print( + "Limiting to %d batches of the total % d batches" + % (num_batches, nbatches) + ) + nbatches = num_batches + else: + print("Total number of batches %d" % nbatches) + + # data main loop + lX = [] + lS_lengths = [] + lS_indices = [] + lT = [] + for j in range(0, nbatches): + # number of data points in a batch + print("Reading in batch: %d / %d" % (j + 1, nbatches), end="\r") + n = min(mini_batch_size, data_size - (j * mini_batch_size)) + # dense feature + idx_start = j * mini_batch_size + lX.append((X_int[idx_start : (idx_start + n)]).astype(np.float32)) + # Targets - outputs + lT.append( + (y[idx_start : idx_start + n]).reshape(-1, 1).astype(np.int32) + ) + # sparse feature (sparse indices) + lS_emb_indices = [] + # for each embedding generate a list of n lookups, + # where each lookup is composed of multiple sparse indices + for size in range(n_emb): + lS_batch_indices = [] + for _b in range(n): + # num of sparse indices to be used per embedding, e.g. for + # store lengths and indices + lS_batch_indices += ( + (X_cat[idx_start + _b][size].reshape(-1)).astype(np.int32) + ).tolist() + lS_emb_indices.append(lS_batch_indices) + lS_indices.append(lS_emb_indices) + # Criteo Kaggle data it is 1 because data is categorical + lS_lengths.append( + [(list(np.ones(n).astype(np.int32))) for _ in range(n_emb)] + ) + print("\n") + + return nbatches, lX, lS_lengths, lS_indices, lT + + # adjust training data + (nbatches, lX, lS_lengths, lS_indices, lT) = assemble_samples( + X_cat_train, X_int_train, y_train, max_ind_range, "Training data" + ) + + # adjust testing data + (nbatches_t, lX_t, lS_lengths_t, lS_indices_t, lT_t) = assemble_samples( + X_cat_test, X_int_test, y_test, max_ind_range, "Testing data" + ) + # end if memory_map + + return ( + nbatches, + lX, + lS_lengths, + lS_indices, + lT, + nbatches_t, + lX_t, + lS_lengths_t, + lS_indices_t, + lT_t, + ln_emb, + m_den, + ) + + +def generate_random_data( + m_den, + ln_emb, + data_size, + num_batches, + mini_batch_size, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + num_targets=1, + round_targets=False, + data_generation="random", + trace_file="", + enable_padding=False, +): + nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size)) + if num_batches != 0: + nbatches = num_batches + data_size = nbatches * mini_batch_size + # print("Total number of batches %d" % nbatches) + + # inputs and targets + lT = [] + lX = [] + lS_lengths = [] + lS_indices = [] + for j in range(0, nbatches): + # number of data points in a batch + n = min(mini_batch_size, data_size - (j * mini_batch_size)) + + # generate a batch of dense and sparse features + if data_generation == "random": + (Xt, lS_emb_lengths, lS_emb_indices) = generate_uniform_input_batch( + m_den, ln_emb, n, num_indices_per_lookup, num_indices_per_lookup_fixed + ) + elif data_generation == "synthetic": + (Xt, lS_emb_lengths, lS_emb_indices) = generate_synthetic_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + trace_file, + enable_padding, + ) + else: + sys.exit( + "ERROR: --data-generation=" + data_generation + " is not supported" + ) + # dense feature + lX.append(Xt) + # sparse feature (sparse indices) + lS_lengths.append(lS_emb_lengths) + lS_indices.append(lS_emb_indices) + + # generate a batch of target (probability of a click) + P = generate_random_output_batch(n, num_targets, round_targets) + lT.append(P) + + return (nbatches, lX, lS_lengths, lS_indices, lT) + + +def generate_random_output_batch(n, num_targets=1, round_targets=False): + # target (probability of a click) + if round_targets: + P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.int32) + else: + P = ra.rand(n, num_targets).astype(np.float32) + + return P + + +# uniform ditribution (input data) +def generate_uniform_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, +): + # dense feature + Xt = ra.rand(n, m_den).astype(np.float32) + + # sparse feature (sparse indices) + lS_emb_lengths = [] + lS_emb_indices = [] + # for each embedding generate a list of n lookups, + # where each lookup is composed of multiple sparse indices + for size in ln_emb: + lS_batch_lengths = [] + lS_batch_indices = [] + for _ in range(n): + # num of sparse indices to be used per embedding (between + if num_indices_per_lookup_fixed: + sparse_group_size = np.int32(num_indices_per_lookup) + else: + # random between [1,num_indices_per_lookup]) + r = ra.random(1) + sparse_group_size = np.int32( + max(1, np.round(r * min(size, num_indices_per_lookup))[0]) + ) + # sparse indices to be used per embedding + r = ra.random(sparse_group_size) + sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int32)) + # reset sparse_group_size in case some index duplicates were removed + sparse_group_size = np.int32(sparse_group.size) + # store lengths and indices + lS_batch_lengths += [sparse_group_size] + lS_batch_indices += sparse_group.tolist() + lS_emb_lengths.append(lS_batch_lengths) + lS_emb_indices.append(lS_batch_indices) + + return (Xt, lS_emb_lengths, lS_emb_indices) + + +# synthetic distribution (input data) +def generate_synthetic_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + trace_file, + enable_padding=False, +): + # dense feature + Xt = ra.rand(n, m_den).astype(np.float32) + + # sparse feature (sparse indices) + lS_emb_lengths = [] + lS_emb_indices = [] + # for each embedding generate a list of n lookups, + # where each lookup is composed of multiple sparse indices + for i, size in enumerate(ln_emb): + lS_batch_lengths = [] + lS_batch_indices = [] + for _ in range(n): + # num of sparse indices to be used per embedding (between + if num_indices_per_lookup_fixed: + sparse_group_size = np.int32(num_indices_per_lookup) + else: + # random between [1,num_indices_per_lookup]) + r = ra.random(1) + sparse_group_size = np.int32( + max(1, np.round(r * min(size, num_indices_per_lookup))[0]) + ) + # sparse indices to be used per embedding + file_path = trace_file + line_accesses, list_sd, cumm_sd = read_dist_from_file( + file_path.replace("j", str(i)) + ) + # debug print + # print('input') + # print(line_accesses); print(list_sd); print(cumm_sd); + # print(sparse_group_size) + # approach 1: rand + # r = trace_generate_rand( + # line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding + # ) + # approach 2: lru + r = trace_generate_lru( + line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding + ) + # WARNING: if the distribution in the file is not consistent with + # embedding table dimensions, below mod guards against out of + # range access + sparse_group = np.unique(r).astype(np.int32) + minsg = np.min(sparse_group) + maxsg = np.max(sparse_group) + if (minsg < 0) or (size <= maxsg): + print( + "WARNING: distribution is inconsistent with embedding " + + "table size (using mod to recover and continue)" + ) + sparse_group = np.mod(sparse_group, size).astype(np.int32) + # sparse_group = np.unique(np.array(np.mod(r, size-1)).astype(np.int32)) + # reset sparse_group_size in case some index duplicates were removed + sparse_group_size = np.int32(sparse_group.size) + # store lengths and indices + lS_batch_lengths += [sparse_group_size] + lS_batch_indices += sparse_group.tolist() + lS_emb_lengths.append(lS_batch_lengths) + lS_emb_indices.append(lS_batch_indices) + + return (Xt, lS_emb_lengths, lS_emb_indices) + + +def generate_stack_distance(cumm_val, cumm_dist, max_i, i, enable_padding=False): + u = ra.rand(1) + if i < max_i: + # only generate stack distances up to the number of new references seen so far + j = bisect.bisect(cumm_val, i) - 1 + fi = cumm_dist[j] + u *= fi # shrink distribution support to exclude last values + elif enable_padding: + # WARNING: disable generation of new references (once all have been seen) + fi = cumm_dist[0] + u = (1.0 - fi) * u + fi # remap distribution support to exclude first value + + for (j, f) in enumerate(cumm_dist): + if u <= f: + return cumm_val[j] + + +# WARNING: global define, must be consistent across all synthetic functions +cache_line_size = 1 + + +def trace_generate_lru( + line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False +): + max_sd = list_sd[-1] + l = len(line_accesses) + i = 0 + ztrace = [] + for _ in range(out_trace_len): + sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding) + mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0 + # generate memory reference + if sd == 0: # new reference # + line_ref = line_accesses.pop(0) + line_accesses.append(line_ref) + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + i += 1 + else: # existing reference # + line_ref = line_accesses[l - sd] + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + line_accesses.pop(l - sd) + line_accesses.append(line_ref) + # save generated memory reference + ztrace.append(mem_ref) + + return ztrace + + +def trace_generate_rand( + line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False +): + max_sd = list_sd[-1] + l = len(line_accesses) # !!!Unique, + i = 0 + ztrace = [] + for _ in range(out_trace_len): + sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding) + mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0 + # generate memory reference + if sd == 0: # new reference # + line_ref = line_accesses.pop(0) + line_accesses.append(line_ref) + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + i += 1 + else: # existing reference # + line_ref = line_accesses[l - sd] + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + ztrace.append(mem_ref) + + return ztrace + + +def trace_profile(trace, enable_padding=False): + # number of elements in the array (assuming 1D) + # n = trace.size + + rstack = [] # S + stack_distances = [] # SDS + line_accesses = [] # L + for x in trace: + r = np.uint64(x / cache_line_size) + l = len(rstack) + try: # found # + i = rstack.index(r) + # WARNING: I believe below is the correct depth in terms of meaning of the + # algorithm, but that is not what seems to be in the paper alg. + # -1 can be subtracted if we defined the distance between + # consecutive accesses (e.g. r, r) as 0 rather than 1. + sd = l - i # - 1 + # push r to the end of stack_distances + stack_distances.insert(0, sd) + # remove r from its position and insert to the top of stack + rstack.pop(i) # rstack.remove(r) + rstack.insert(l - 1, r) + except ValueError: # not found # + sd = 0 # -1 + # push r to the end of stack_distances/line_accesses + stack_distances.insert(0, sd) + line_accesses.insert(0, r) + # push r to the top of stack + rstack.insert(l, r) + + if enable_padding: + # WARNING: notice that as the ratio between the number of samples (l) + # and cardinality (c) of a sample increases the probability of + # generating a sample gets smaller and smaller because there are + # few new samples compared to repeated samples. This means that for a + # long trace with relatively small cardinality it will take longer to + # generate all new samples and therefore obtain full distribution support + # and hence it takes longer for distribution to resemble the original. + # Therefore, we may pad the number of new samples to be on par with + # average number of samples l/c artificially. + l = len(stack_distances) + c = max(stack_distances) + padding = int(np.ceil(l / c)) + stack_distances = stack_distances + [0] * padding + + return (rstack, stack_distances, line_accesses) + + +# auxiliary read/write routines +def read_trace_from_file(file_path): + try: + with open(file_path) as f: + if args.trace_file_binary_type: + array = np.fromfile(f, dtype=np.uint64) + trace = array.astype(np.uint64).tolist() + else: + line = f.readline() + trace = list(map(lambda x: np.uint64(x), line.split(", "))) + return trace + except Exception: + print("ERROR: no input trace file has been provided") + + +def write_trace_to_file(file_path, trace): + try: + if args.trace_file_binary_type: + with open(file_path, "wb+") as f: + np.array(trace).astype(np.uint64).tofile(f) + else: + with open(file_path, "w+") as f: + s = str(trace) + f.write(s[1 : len(s) - 1]) + except Exception: + print("ERROR: no output trace file has been provided") + + +def read_dist_from_file(file_path): + try: + with open(file_path, "r") as f: + lines = f.read().splitlines() + except Exception: + print("Wrong file or file path") + # read unique accesses + unique_accesses = [int(el) for el in lines[0].split(", ")] + # read cumulative distribution (elements are passed as two separate lists) + list_sd = [int(el) for el in lines[1].split(", ")] + cumm_sd = [float(el) for el in lines[2].split(", ")] + + return unique_accesses, list_sd, cumm_sd + + +def write_dist_to_file(file_path, unique_accesses, list_sd, cumm_sd): + try: + with open(file_path, "w") as f: + # unique_acesses + s = str(unique_accesses) + f.write(s[1 : len(s) - 1] + "\n") + # list_sd + s = str(list_sd) + f.write(s[1 : len(s) - 1] + "\n") + # cumm_sd + s = str(cumm_sd) + f.write(s[1 : len(s) - 1] + "\n") + except Exception: + print("Wrong file or file path") + + +if __name__ == "__main__": + import sys + import operator + import argparse + + ### parse arguments ### + parser = argparse.ArgumentParser(description="Generate Synthetic Distributions") + parser.add_argument("--trace-file", type=str, default="./input/trace.log") + parser.add_argument("--trace-file-binary-type", type=bool, default=False) + parser.add_argument("--trace-enable-padding", type=bool, default=False) + parser.add_argument("--dist-file", type=str, default="./input/dist.log") + parser.add_argument( + "--synthetic-file", type=str, default="./input/trace_synthetic.log" + ) + parser.add_argument("--numpy-rand-seed", type=int, default=123) + parser.add_argument("--print-precision", type=int, default=5) + args = parser.parse_args() + + ### some basic setup ### + np.random.seed(args.numpy_rand_seed) + np.set_printoptions(precision=args.print_precision) + + ### read trace ### + trace = read_trace_from_file(args.trace_file) + # print(trace) + + ### profile trace ### + (_, stack_distances, line_accesses) = trace_profile( + trace, args.trace_enable_padding + ) + stack_distances.reverse() + line_accesses.reverse() + # print(line_accesses) + # print(stack_distances) + + ### compute probability distribution ### + # count items + l = len(stack_distances) + dc = sorted( + collections.Counter(stack_distances).items(), key=operator.itemgetter(0) + ) + + # create a distribution + list_sd = list(map(lambda tuple_x_k: tuple_x_k[0], dc)) # x = tuple_x_k[0] + dist_sd = list( + map(lambda tuple_x_k: tuple_x_k[1] / float(l), dc) + ) # k = tuple_x_k[1] + cumm_sd = [] # np.cumsum(dc).tolist() #prefixsum + for i, (_, k) in enumerate(dc): + if i == 0: + cumm_sd.append(k / float(l)) + else: + # add the 2nd element of the i-th tuple in the dist_sd list + cumm_sd.append(cumm_sd[i - 1] + (k / float(l))) + + ### write stack_distance and line_accesses to a file ### + write_dist_to_file(args.dist_file, line_accesses, list_sd, cumm_sd) + + ### generate correspondinf synthetic ### + # line_accesses, list_sd, cumm_sd = read_dist_from_file(args.dist_file) + synthetic_trace = trace_generate_lru( + line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding + ) + # synthetic_trace = trace_generate_rand( + # line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding + # ) + write_trace_to_file(args.synthetic_file, synthetic_trace) diff --git a/benchmarks/dlrm/ootb/dlrm_data_pytorch.py b/benchmarks/dlrm/ootb/dlrm_data_pytorch.py new file mode 100644 index 0000000..9c4fa89 --- /dev/null +++ b/benchmarks/dlrm/ootb/dlrm_data_pytorch.py @@ -0,0 +1,1309 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: generate inputs and targets for the dlrm benchmark +# The inpts and outputs are generated according to the following three option(s) +# 1) random distribution +# 2) synthetic distribution, based on unique accesses and distances between them +# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou "Synthetic Trace-Driven +# Simulation of Cache Memory", IEEE AINAM'07 +# 3) public data set +# i) Criteo Kaggle Display Advertising Challenge Dataset +# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset +# ii) Criteo Terabyte Dataset +# https://labs.criteo.com/2013/12/download-terabyte-click-logs + + +from __future__ import absolute_import, division, print_function, unicode_literals + +# others +from os import path +import sys +import functools +import bisect +import collections + +import data_utils + +# numpy +import numpy as np +from numpy import random as ra +from collections import deque + + +# pytorch +import torch +from torch.utils.data import Dataset, RandomSampler + +import data_loader_terabyte +import mlperf_logger + + +# Kaggle Display Advertising Challenge Dataset +# dataset (str): name of dataset (Kaggle or Terabyte) +# randomize (str): determines randomization scheme +# "none": no randomization +# "day": randomizes each day"s data (only works if split = True) +# "total": randomizes total dataset +# split (bool) : to split into train, test, validation data-sets +class CriteoDataset(Dataset): + + def __init__( + self, + dataset, + max_ind_range, + sub_sample_rate, + randomize, + split="train", + raw_path="", + pro_data="", + memory_map=False, + dataset_multiprocessing=False, + ): + # dataset + # tar_fea = 1 # single target + den_fea = 13 # 13 dense features + # spa_fea = 26 # 26 sparse features + # tad_fea = tar_fea + den_fea + # tot_fea = tad_fea + spa_fea + if dataset == "kaggle": + days = 7 + out_file = "kaggleAdDisplayChallenge_processed" + elif dataset == "terabyte": + days = 24 + out_file = "terabyte_processed" + else: + raise(ValueError("Data set option is not supported")) + self.max_ind_range = max_ind_range + self.memory_map = memory_map + + # split the datafile into path and filename + lstr = raw_path.split("/") + self.d_path = "/".join(lstr[0:-1]) + "/" + self.d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1] + self.npzfile = self.d_path + ( + (self.d_file + "_day") if dataset == "kaggle" else self.d_file + ) + self.trafile = self.d_path + ( + (self.d_file + "_fea") if dataset == "kaggle" else "fea" + ) + + # check if pre-processed data is available + data_ready = True + if memory_map: + for i in range(days): + reo_data = self.npzfile + "_{0}_reordered.npz".format(i) + if not path.exists(str(reo_data)): + data_ready = False + else: + if not path.exists(str(pro_data)): + data_ready = False + + # pre-process data if needed + # WARNNING: when memory mapping is used we get a collection of files + if data_ready: + print("Reading pre-processed data=%s" % (str(pro_data))) + file = str(pro_data) + else: + print("Reading raw data=%s" % (str(raw_path))) + file = data_utils.getCriteoAdData( + raw_path, + out_file, + max_ind_range, + sub_sample_rate, + days, + split, + randomize, + dataset == "kaggle", + memory_map, + dataset_multiprocessing, + ) + + # get a number of samples per day + total_file = self.d_path + self.d_file + "_day_count.npz" + with np.load(total_file) as data: + total_per_file = data["total_per_file"] + # compute offsets per file + self.offset_per_file = np.array([0] + [x for x in total_per_file]) + for i in range(days): + self.offset_per_file[i + 1] += self.offset_per_file[i] + # print(self.offset_per_file) + + # setup data + if memory_map: + # setup the training/testing split + self.split = split + if split == 'none' or split == 'train': + self.day = 0 + self.max_day_range = days if split == 'none' else days - 1 + elif split == 'test' or split == 'val': + self.day = days - 1 + num_samples = self.offset_per_file[days] - \ + self.offset_per_file[days - 1] + self.test_size = int(np.ceil(num_samples / 2.)) + self.val_size = num_samples - self.test_size + else: + sys.exit("ERROR: dataset split is neither none, nor train or test.") + + ''' + # text + print("text") + for i in range(days): + fi = self.npzfile + "_{0}".format(i) + with open(fi) as data: + ttt = 0; nnn = 0 + for _j, line in enumerate(data): + ttt +=1 + if np.int32(line[0]) > 0: + nnn +=1 + print("day=" + str(i) + " total=" + str(ttt) + " non-zeros=" + + str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%") + # processed + print("processed") + for i in range(days): + fi = self.npzfile + "_{0}_processed.npz".format(i) + with np.load(fi) as data: + yyy = data["y"] + ttt = len(yyy) + nnn = np.count_nonzero(yyy) + print("day=" + str(i) + " total=" + str(ttt) + " non-zeros=" + + str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%") + # reordered + print("reordered") + for i in range(days): + fi = self.npzfile + "_{0}_reordered.npz".format(i) + with np.load(fi) as data: + yyy = data["y"] + ttt = len(yyy) + nnn = np.count_nonzero(yyy) + print("day=" + str(i) + " total=" + str(ttt) + " non-zeros=" + + str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%") + ''' + + # load unique counts + with np.load(self.d_path + self.d_file + "_fea_count.npz") as data: + self.counts = data["counts"] + self.m_den = den_fea # X_int.shape[1] + self.n_emb = len(self.counts) + print("Sparse features= %d, Dense features= %d" % (self.n_emb, self.m_den)) + + # Load the test data + # Only a single day is used for testing + if self.split == 'test' or self.split == 'val': + # only a single day is used for testing + fi = self.npzfile + "_{0}_reordered.npz".format( + self.day + ) + with np.load(fi) as data: + self.X_int = data["X_int"] # continuous feature + self.X_cat = data["X_cat"] # categorical feature + self.y = data["y"] # target + + else: + # load and preprocess data + with np.load(file) as data: + X_int = data["X_int"] # continuous feature + X_cat = data["X_cat"] # categorical feature + y = data["y"] # target + self.counts = data["counts"] + self.m_den = X_int.shape[1] # den_fea + self.n_emb = len(self.counts) + print("Sparse fea = %d, Dense fea = %d" % (self.n_emb, self.m_den)) + + # create reordering + indices = np.arange(len(y)) + + if split == "none": + # randomize all data + if randomize == "total": + indices = np.random.permutation(indices) + print("Randomized indices...") + + X_int[indices] = X_int + X_cat[indices] = X_cat + y[indices] = y + + else: + indices = np.array_split(indices, self.offset_per_file[1:-1]) + + # randomize train data (per day) + if randomize == "day": # or randomize == "total": + for i in range(len(indices) - 1): + indices[i] = np.random.permutation(indices[i]) + print("Randomized indices per day ...") + + train_indices = np.concatenate(indices[:-1]) + test_indices = indices[-1] + test_indices, val_indices = np.array_split(test_indices, 2) + + print("Defined %s indices..." % (split)) + + # randomize train data (across days) + if randomize == "total": + train_indices = np.random.permutation(train_indices) + print("Randomized indices across days ...") + + # create training, validation, and test sets + if split == 'train': + self.X_int = [X_int[i] for i in train_indices] + self.X_cat = [X_cat[i] for i in train_indices] + self.y = [y[i] for i in train_indices] + elif split == 'val': + self.X_int = [X_int[i] for i in val_indices] + self.X_cat = [X_cat[i] for i in val_indices] + self.y = [y[i] for i in val_indices] + elif split == 'test': + self.X_int = [X_int[i] for i in test_indices] + self.X_cat = [X_cat[i] for i in test_indices] + self.y = [y[i] for i in test_indices] + + print("Split data according to indices...") + + def __getitem__(self, index): + + if isinstance(index, slice): + return [ + self[idx] for idx in range( + index.start or 0, index.stop or len(self), index.step or 1 + ) + ] + + if self.memory_map: + if self.split == 'none' or self.split == 'train': + # check if need to swicth to next day and load data + if index == self.offset_per_file[self.day]: + # print("day_boundary switch", index) + self.day_boundary = self.offset_per_file[self.day] + fi = self.npzfile + "_{0}_reordered.npz".format( + self.day + ) + # print('Loading file: ', fi) + with np.load(fi) as data: + self.X_int = data["X_int"] # continuous feature + self.X_cat = data["X_cat"] # categorical feature + self.y = data["y"] # target + self.day = (self.day + 1) % self.max_day_range + + i = index - self.day_boundary + elif self.split == 'test' or self.split == 'val': + # only a single day is used for testing + i = index + (0 if self.split == 'test' else self.test_size) + else: + sys.exit("ERROR: dataset split is neither none, nor train or test.") + else: + i = index + + if self.max_ind_range > 0: + return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i] + else: + return self.X_int[i], self.X_cat[i], self.y[i] + + def _default_preprocess(self, X_int, X_cat, y): + X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1) + if self.max_ind_range > 0: + X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long) + else: + X_cat = torch.tensor(X_cat, dtype=torch.long) + y = torch.tensor(y.astype(np.float32)) + + return X_int, X_cat, y + + def __len__(self): + if self.memory_map: + if self.split == 'none': + return self.offset_per_file[-1] + elif self.split == 'train': + return self.offset_per_file[-2] + elif self.split == 'test': + return self.test_size + elif self.split == 'val': + return self.val_size + else: + sys.exit("ERROR: dataset split is neither none, nor train nor test.") + else: + return len(self.y) + + +def collate_wrapper_criteo_offset(list_of_tuples): + # where each tuple is (X_int, X_cat, y) + transposed_data = list(zip(*list_of_tuples)) + X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1) + X_cat = torch.tensor(transposed_data[1], dtype=torch.long) + T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1) + + batchSize = X_cat.shape[0] + featureCnt = X_cat.shape[1] + + lS_i = [X_cat[:, i] for i in range(featureCnt)] + lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)] + + return X_int, torch.stack(lS_o), torch.stack(lS_i), T + + +def ensure_dataset_preprocessed(args, d_path): + _ = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + _ = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "test", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + for split in ['train', 'val', 'test']: + print('Running preprocessing for split =', split) + + train_files = ['{}_{}_reordered.npz'.format(args.raw_data_file, day) + for + day in range(0, 23)] + + test_valid_file = args.raw_data_file + '_23_reordered.npz' + + output_file = d_path + '_{}.bin'.format(split) + + input_files = train_files if split == 'train' else [test_valid_file] + data_loader_terabyte.numpy_to_binary(input_files=input_files, + output_file_path=output_file, + split=split) + + +# Conversion from offset to length +def offset_to_length_converter(lS_o, lS_i): + def diff(tensor): + return tensor[1:] - tensor[:-1] + + return torch.stack( + [ + diff(torch.cat((S_o, torch.tensor(lS_i[ind].shape))).int()) + for ind, S_o in enumerate(lS_o) + ] + ) + + +def collate_wrapper_criteo_length(list_of_tuples): + # where each tuple is (X_int, X_cat, y) + transposed_data = list(zip(*list_of_tuples)) + X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1) + X_cat = torch.tensor(transposed_data[1], dtype=torch.long) + T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1) + + batchSize = X_cat.shape[0] + featureCnt = X_cat.shape[1] + + lS_i = torch.stack([X_cat[:, i] for i in range(featureCnt)]) + lS_o = torch.stack( + [torch.tensor(range(batchSize)) for _ in range(featureCnt)] + ) + + lS_l = offset_to_length_converter(lS_o, lS_i) + + return X_int, lS_l, lS_i, T + + +def make_criteo_data_and_loaders(args, offset_to_length_converter=False): + if args.mlperf_logging and args.memory_map and args.data_set == "terabyte": + # more efficient for larger batches + data_directory = path.dirname(args.raw_data_file) + + if args.mlperf_bin_loader: + lstr = args.processed_data_file.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + lstr[-1].split(".")[0] + train_file = d_path + "_train.bin" + test_file = d_path + "_test.bin" + # val_file = d_path + "_val.bin" + counts_file = args.raw_data_file + '_fea_count.npz' + + if any(not path.exists(p) for p in [train_file, + test_file, + counts_file]): + ensure_dataset_preprocessed(args, d_path) + + train_data = data_loader_terabyte.CriteoBinDataset( + data_file=train_file, + counts_file=counts_file, + batch_size=args.mini_batch_size, + max_ind_range=args.max_ind_range + ) + + mlperf_logger.log_event(key=mlperf_logger.constants.TRAIN_SAMPLES, + value=train_data.num_samples) + + train_loader = torch.utils.data.DataLoader( + train_data, + batch_size=None, + batch_sampler=None, + shuffle=False, + num_workers=0, + collate_fn=None, + pin_memory=False, + drop_last=False, + sampler=RandomSampler(train_data) if args.mlperf_bin_shuffle else None + ) + + test_data = data_loader_terabyte.CriteoBinDataset( + data_file=test_file, + counts_file=counts_file, + batch_size=args.test_mini_batch_size, + max_ind_range=args.max_ind_range + ) + + mlperf_logger.log_event(key=mlperf_logger.constants.EVAL_SAMPLES, + value=test_data.num_samples) + + test_loader = torch.utils.data.DataLoader( + test_data, + batch_size=None, + batch_sampler=None, + shuffle=False, + num_workers=0, + collate_fn=None, + pin_memory=False, + drop_last=False, + ) + else: + data_filename = args.raw_data_file.split("/")[-1] + + train_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + test_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "test", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + train_loader = data_loader_terabyte.DataLoader( + data_directory=data_directory, + data_filename=data_filename, + days=list(range(23)), + batch_size=args.mini_batch_size, + max_ind_range=args.max_ind_range, + split="train" + ) + + test_loader = data_loader_terabyte.DataLoader( + data_directory=data_directory, + data_filename=data_filename, + days=[23], + batch_size=args.test_mini_batch_size, + max_ind_range=args.max_ind_range, + split="test" + ) + else: + train_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing, + ) + + test_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "test", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing, + ) + + collate_wrapper_criteo = collate_wrapper_criteo_offset + if offset_to_length_converter: + collate_wrapper_criteo = collate_wrapper_criteo_length + + train_loader = torch.utils.data.DataLoader( + train_data, + batch_size=args.mini_batch_size, + shuffle=False, + num_workers=args.num_workers, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, # True + ) + + test_loader = torch.utils.data.DataLoader( + test_data, + batch_size=args.test_mini_batch_size, + shuffle=False, + num_workers=args.test_num_workers, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, # True + ) + + return train_data, train_loader, test_data, test_loader + + +# uniform ditribution (input data) +class RandomDataset(Dataset): + + def __init__( + self, + m_den, + ln_emb, + data_size, + num_batches, + mini_batch_size, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + num_targets=1, + round_targets=False, + data_generation="random", + trace_file="", + enable_padding=False, + reset_seed_on_access=False, + rand_data_dist="uniform", + rand_data_min=1, + rand_data_max=1, + rand_data_mu=-1, + rand_data_sigma=1, + rand_seed=0, + cache_size=None, + ): + # compute batch size + nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size)) + if num_batches != 0: + nbatches = num_batches + data_size = nbatches * mini_batch_size + # print("Total number of batches %d" % nbatches) + + # save args (recompute data_size if needed) + self.m_den = m_den + self.ln_emb = ln_emb + self.data_size = data_size + self.num_batches = nbatches + self.mini_batch_size = mini_batch_size + self.num_indices_per_lookup = num_indices_per_lookup + self.num_indices_per_lookup_fixed = num_indices_per_lookup_fixed + self.num_targets = num_targets + self.round_targets = round_targets + self.data_generation = data_generation + self.trace_file = trace_file + self.enable_padding = enable_padding + self.reset_seed_on_access = reset_seed_on_access + self.rand_seed = rand_seed + self.rand_data_dist = rand_data_dist + self.rand_data_min = rand_data_min + self.rand_data_max = rand_data_max + self.rand_data_mu = rand_data_mu + self.rand_data_sigma = rand_data_sigma + self.cache_size = cache_size + + def reset_numpy_seed(self, numpy_rand_seed): + np.random.seed(numpy_rand_seed) + # torch.manual_seed(numpy_rand_seed) + + def __getitem__(self, index): + + if isinstance(index, slice): + return [ + self[idx] for idx in range( + index.start or 0, index.stop or len(self), index.step or 1 + ) + ] + + # WARNING: reset seed on access to first element + # (e.g. if same random samples needed across epochs) + if self.reset_seed_on_access and index == 0: + self.reset_numpy_seed(self.rand_seed) + + # number of data points in a batch + n = min(self.mini_batch_size, self.data_size - (index * self.mini_batch_size)) + + # generate a batch of dense and sparse features + if self.data_generation == "random": + if self.cache_size is None: + Gen = generate_dist_input_batch.__wrapped__ + cache_key = None + else: + Gen = generate_dist_input_batch + cache_key = index % self.cache_size + (X, lS_o, lS_i) = Gen( + self.m_den, + tuple(self.ln_emb.tolist()), + n, + self.num_indices_per_lookup, + self.num_indices_per_lookup_fixed, + rand_data_dist=self.rand_data_dist, + rand_data_min=self.rand_data_min, + rand_data_max=self.rand_data_max, + rand_data_mu=self.rand_data_mu, + rand_data_sigma=self.rand_data_sigma, + cache_key=cache_key, + ) + elif self.data_generation == "synthetic": + (X, lS_o, lS_i) = generate_synthetic_input_batch( + self.m_den, + self.ln_emb, + n, + self.num_indices_per_lookup, + self.num_indices_per_lookup_fixed, + self.trace_file, + self.enable_padding + ) + else: + sys.exit( + "ERROR: --data-generation=" + self.data_generation + " is not supported" + ) + + # generate a batch of target (probability of a click) + if 'cache_key' in locals() and cache_key is not None: + T = generate_random_output_batch(n, self.num_targets, self.round_targets, cache_key) + else: + T = generate_random_output_batch.__wrapped__(n, self.num_targets, self.round_targets) + + return (X, lS_o, lS_i, T) + + def __len__(self): + # WARNING: note that we produce bacthes of outputs in __getitem__ + # therefore we should use num_batches rather than data_size below + return self.num_batches + + +def collate_wrapper_random_offset(list_of_tuples): + # where each tuple is (X, lS_o, lS_i, T) + (X, lS_o, lS_i, T) = list_of_tuples[0] + return (X, + torch.stack(lS_o), + lS_i, + T) + + +def collate_wrapper_random_length(list_of_tuples): + # where each tuple is (X, lS_o, lS_i, T) + (X, lS_o, lS_i, T) = list_of_tuples[0] + return (X, + offset_to_length_converter(torch.stack(lS_o), lS_i), + lS_i, + T) + + +def make_random_data_and_loader(args, ln_emb, m_den, + offset_to_length_converter=False, cache_size=None, +): + + train_data = RandomDataset( + m_den, + ln_emb, + args.data_size, + args.num_batches, + args.mini_batch_size, + args.num_indices_per_lookup, + args.num_indices_per_lookup_fixed, + 1, # num_targets + args.round_targets, + args.data_generation, + args.data_trace_file, + args.data_trace_enable_padding, + reset_seed_on_access=True, + rand_data_dist=args.rand_data_dist, + rand_data_min=args.rand_data_min, + rand_data_max=args.rand_data_max, + rand_data_mu=args.rand_data_mu, + rand_data_sigma=args.rand_data_sigma, + rand_seed=args.numpy_rand_seed, + cache_size=cache_size, + ) # WARNING: generates a batch of lookups at once + + test_data = RandomDataset( + m_den, + ln_emb, + args.data_size, + args.num_batches, + args.mini_batch_size, + args.num_indices_per_lookup, + args.num_indices_per_lookup_fixed, + 1, # num_targets + args.round_targets, + args.data_generation, + args.data_trace_file, + args.data_trace_enable_padding, + reset_seed_on_access=True, + rand_data_dist=args.rand_data_dist, + rand_data_min=args.rand_data_min, + rand_data_max=args.rand_data_max, + rand_data_mu=args.rand_data_mu, + rand_data_sigma=args.rand_data_sigma, + rand_seed=args.numpy_rand_seed, + cache_size=cache_size, + ) + + collate_wrapper_random = collate_wrapper_random_offset + if offset_to_length_converter: + collate_wrapper_random = collate_wrapper_random_length + + train_loader = torch.utils.data.DataLoader( + train_data, + batch_size=1, + shuffle=False, + num_workers=args.num_workers, + collate_fn=collate_wrapper_random, + pin_memory=False, + drop_last=False, # True + ) + + test_loader = torch.utils.data.DataLoader( + test_data, + batch_size=1, + shuffle=False, + num_workers=args.num_workers, + collate_fn=collate_wrapper_random, + pin_memory=False, + drop_last=False, # True + ) + return train_data, train_loader, test_data, test_loader + + +def generate_random_data( + m_den, + ln_emb, + data_size, + num_batches, + mini_batch_size, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + num_targets=1, + round_targets=False, + data_generation="random", + trace_file="", + enable_padding=False, + length=False, # length for caffe2 version (except dlrm_s_caffe2) +): + nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size)) + if num_batches != 0: + nbatches = num_batches + data_size = nbatches * mini_batch_size + # print("Total number of batches %d" % nbatches) + + # inputs + lT = [] + lX = [] + lS_offsets = [] + lS_indices = [] + for j in range(0, nbatches): + # number of data points in a batch + n = min(mini_batch_size, data_size - (j * mini_batch_size)) + + # generate a batch of dense and sparse features + if data_generation == "random": + (Xt, lS_emb_offsets, lS_emb_indices) = generate_uniform_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + length, + ) + elif data_generation == "synthetic": + (Xt, lS_emb_offsets, lS_emb_indices) = generate_synthetic_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + trace_file, + enable_padding + ) + else: + sys.exit( + "ERROR: --data-generation=" + data_generation + " is not supported" + ) + # dense feature + lX.append(Xt) + # sparse feature (sparse indices) + lS_offsets.append(lS_emb_offsets) + lS_indices.append(lS_emb_indices) + + # generate a batch of target (probability of a click) + P = generate_random_output_batch(n, num_targets, round_targets) + lT.append(P) + + return (nbatches, lX, lS_offsets, lS_indices, lT) + + +@functools.lru_cache(maxsize=None) +def generate_random_output_batch(n, num_targets, round_targets=False, cache_key=None): + # target (probability of a click) + if round_targets: + P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.float32) + else: + P = ra.rand(n, num_targets).astype(np.float32) + + return torch.tensor(P) + + +# uniform ditribution (input data) +def generate_uniform_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + length, +): + # dense feature + Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32)) + + # sparse feature (sparse indices) + lS_emb_offsets = [] + lS_emb_indices = [] + # for each embedding generate a list of n lookups, + # where each lookup is composed of multiple sparse indices + for size in ln_emb: + lS_batch_offsets = [] + lS_batch_indices = [] + offset = 0 + for _ in range(n): + # num of sparse indices to be used per embedding (between + if num_indices_per_lookup_fixed: + sparse_group_size = np.int64(num_indices_per_lookup) + else: + # random between [1,num_indices_per_lookup]) + r = ra.random(1) + sparse_group_size = np.int64( + np.round(max([1.0], r * min(size, num_indices_per_lookup))) + ) + # sparse indices to be used per embedding + r = ra.random(sparse_group_size) + sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64)) + # reset sparse_group_size in case some index duplicates were removed + sparse_group_size = np.int32(sparse_group.size) + # store lengths and indices + if length: # for caffe2 version + lS_batch_offsets += [sparse_group_size] + else: + lS_batch_offsets += [offset] + lS_batch_indices += sparse_group.tolist() + # update offset for next iteration + offset += sparse_group_size + lS_emb_offsets.append(torch.tensor(lS_batch_offsets)) + lS_emb_indices.append(torch.tensor(lS_batch_indices)) + + return (Xt, lS_emb_offsets, lS_emb_indices) + + +# random data from uniform or gaussian ditribution (input data) +@functools.lru_cache(maxsize=None) +def generate_dist_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + rand_data_dist, + rand_data_min, + rand_data_max, + rand_data_mu, + rand_data_sigma, + cache_key = None, +): + # dense feature + Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32)) + + # sparse feature (sparse indices) + lS_emb_offsets = [] + lS_emb_indices = [] + # for each embedding generate a list of n lookups, + # where each lookup is composed of multiple sparse indices + for size in ln_emb: + lS_batch_offsets = [] + lS_batch_indices = [] + offset = 0 + for _ in range(n): + # num of sparse indices to be used per embedding (between + if num_indices_per_lookup_fixed: + sparse_group_size = np.int64(num_indices_per_lookup) + else: + # random between [1,num_indices_per_lookup]) + r = ra.random(1) + sparse_group_size = np.int64( + np.round(max([1.0], r * min(size, num_indices_per_lookup))) + ) + # sparse indices to be used per embedding + if rand_data_dist == "gaussian": + if rand_data_mu == -1: + rand_data_mu = (rand_data_max + rand_data_min) / 2.0 + r = ra.normal(rand_data_mu, rand_data_sigma, sparse_group_size) + sparse_group = np.clip(r, rand_data_min, rand_data_max) + sparse_group = np.unique(sparse_group).astype(np.int64) + elif rand_data_dist == "uniform": + r = ra.random(sparse_group_size) + sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64)) + else: + raise(rand_data_dist, "distribution is not supported. \ + please select uniform or gaussian") + + # reset sparse_group_size in case some index duplicates were removed + sparse_group_size = np.int64(sparse_group.size) + # store lengths and indices + lS_batch_offsets += [offset] + lS_batch_indices += sparse_group.tolist() + # update offset for next iteration + offset += sparse_group_size + lS_emb_offsets.append(torch.tensor(lS_batch_offsets)) + lS_emb_indices.append(torch.tensor(lS_batch_indices)) + + return (Xt, lS_emb_offsets, lS_emb_indices) + + +# synthetic distribution (input data) +def generate_synthetic_input_batch( + m_den, + ln_emb, + n, + num_indices_per_lookup, + num_indices_per_lookup_fixed, + trace_file, + enable_padding=False, +): + # dense feature + Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32)) + + # sparse feature (sparse indices) + lS_emb_offsets = [] + lS_emb_indices = [] + # for each embedding generate a list of n lookups, + # where each lookup is composed of multiple sparse indices + for i, size in enumerate(ln_emb): + lS_batch_offsets = [] + lS_batch_indices = [] + offset = 0 + for _ in range(n): + # num of sparse indices to be used per embedding (between + if num_indices_per_lookup_fixed: + sparse_group_size = np.int64(num_indices_per_lookup) + else: + # random between [1,num_indices_per_lookup]) + r = ra.random(1) + sparse_group_size = np.int64( + max(1, np.round(r * min(size, num_indices_per_lookup))[0]) + ) + # sparse indices to be used per embedding + file_path = trace_file + line_accesses, list_sd, cumm_sd = read_dist_from_file( + file_path.replace("j", str(i)) + ) + # debug prints + # print("input") + # print(line_accesses); print(list_sd); print(cumm_sd); + # print(sparse_group_size) + # approach 1: rand + # r = trace_generate_rand( + # line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding + # ) + # approach 2: lru + r = trace_generate_lru( + line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding + ) + # WARNING: if the distribution in the file is not consistent + # with embedding table dimensions, below mod guards against out + # of range access + sparse_group = np.unique(r).astype(np.int64) + minsg = np.min(sparse_group) + maxsg = np.max(sparse_group) + if (minsg < 0) or (size <= maxsg): + print( + "WARNING: distribution is inconsistent with embedding " + + "table size (using mod to recover and continue)" + ) + sparse_group = np.mod(sparse_group, size).astype(np.int64) + # sparse_group = np.unique(np.array(np.mod(r, size-1)).astype(np.int64)) + # reset sparse_group_size in case some index duplicates were removed + sparse_group_size = np.int64(sparse_group.size) + # store lengths and indices + lS_batch_offsets += [offset] + lS_batch_indices += sparse_group.tolist() + # update offset for next iteration + offset += sparse_group_size + lS_emb_offsets.append(torch.tensor(lS_batch_offsets)) + lS_emb_indices.append(torch.tensor(lS_batch_indices)) + + return (Xt, lS_emb_offsets, lS_emb_indices) + + +def generate_stack_distance(cumm_val, cumm_dist, max_i, i, enable_padding=False): + u = ra.rand(1) + if i < max_i: + # only generate stack distances up to the number of new references seen so far + j = bisect.bisect(cumm_val, i) - 1 + fi = cumm_dist[j] + u *= fi # shrink distribution support to exclude last values + elif enable_padding: + # WARNING: disable generation of new references (once all have been seen) + fi = cumm_dist[0] + u = (1.0 - fi) * u + fi # remap distribution support to exclude first value + + for (j, f) in enumerate(cumm_dist): + if u <= f: + return cumm_val[j] + + +# WARNING: global define, must be consistent across all synthetic functions +cache_line_size = 1 + + +def trace_generate_lru( + line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False +): + max_sd = list_sd[-1] + l = len(line_accesses) + i = 0 + ztrace = deque() + for _ in range(out_trace_len): + sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding) + mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0 + + # generate memory reference + if sd == 0: # new reference # + line_ref = line_accesses[0] + del line_accesses[0] + line_accesses.append(line_ref) + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + i += 1 + else: # existing reference # + line_ref = line_accesses[l - sd] + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + del line_accesses[l - sd] + line_accesses.append(line_ref) + # save generated memory reference + ztrace.append(mem_ref) + + return ztrace + + +def trace_generate_rand( + line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False +): + max_sd = list_sd[-1] + l = len(line_accesses) # !!!Unique, + i = 0 + ztrace = [] + for _ in range(out_trace_len): + sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding) + mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0 + # generate memory reference + if sd == 0: # new reference # + line_ref = line_accesses.pop(0) + line_accesses.append(line_ref) + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + i += 1 + else: # existing reference # + line_ref = line_accesses[l - sd] + mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line) + ztrace.append(mem_ref) + + return ztrace + + +def trace_profile(trace, enable_padding=False): + # number of elements in the array (assuming 1D) + # n = trace.size + + rstack = deque() # S + stack_distances = deque() # SDS + line_accesses = deque() # L + for x in trace: + r = np.uint64(x / cache_line_size) + l = len(rstack) + try: # found # + i = rstack.index(r) + # WARNING: I believe below is the correct depth in terms of meaning of the + # algorithm, but that is not what seems to be in the paper alg. + # -1 can be subtracted if we defined the distance between + # consecutive accesses (e.g. r, r) as 0 rather than 1. + sd = l - i # - 1 + # push r to the end of stack_distances + stack_distances.appendleft(sd) + # remove r from its position and insert to the top of stack + del rstack[i] # rstack.remove(r) + rstack.append(r) + except ValueError: # not found # + sd = 0 # -1 + # push r to the end of stack_distances/line_accesses + stack_distances.appendleft(sd) + line_accesses.appendleft(r) + # push r to the top of stack + rstack.append(r) + + if enable_padding: + # WARNING: notice that as the ratio between the number of samples (l) + # and cardinality (c) of a sample increases the probability of + # generating a sample gets smaller and smaller because there are + # few new samples compared to repeated samples. This means that for a + # long trace with relatively small cardinality it will take longer to + # generate all new samples and therefore obtain full distribution support + # and hence it takes longer for distribution to resemble the original. + # Therefore, we may pad the number of new samples to be on par with + # average number of samples l/c artificially. + l = len(stack_distances) + c = max(stack_distances) + padding = int(np.ceil(l / c)) + stack_distances = stack_distances + [0] * padding + + return (rstack, stack_distances, line_accesses) + + +# auxiliary read/write routines +def read_trace_from_file(file_path): + try: + with open(file_path) as f: + if args.trace_file_binary_type: + array = np.fromfile(f, dtype=np.uint64) + trace = array.astype(np.uint64).tolist() + else: + line = f.readline() + trace = list(map(lambda x: np.uint64(x), line.split(", "))) + return trace + except Exception: + print(f"ERROR: trace file '{file_path}' is not available.") + + +def write_trace_to_file(file_path, trace): + try: + if args.trace_file_binary_type: + with open(file_path, "wb+") as f: + np.array(trace).astype(np.uint64).tofile(f) + else: + with open(file_path, "w+") as f: + s = str(list(trace)) + f.write(s[1 : len(s) - 1]) + except Exception: + print("ERROR: no output trace file has been provided") + + +def read_dist_from_file(file_path): + try: + with open(file_path, "r") as f: + lines = f.read().splitlines() + except Exception: + print("{file_path} Wrong file or file path") + # read unique accesses + unique_accesses = [int(el) for el in lines[0].split(", ")] + # read cumulative distribution (elements are passed as two separate lists) + list_sd = [int(el) for el in lines[1].split(", ")] + cumm_sd = [float(el) for el in lines[2].split(", ")] + + return unique_accesses, list_sd, cumm_sd + + +def write_dist_to_file(file_path, unique_accesses, list_sd, cumm_sd): + try: + with open(file_path, "w") as f: + # unique_acesses + s = str(list(unique_accesses)) + f.write(s[1 : len(s) - 1] + "\n") + # list_sd + s = str(list_sd) + f.write(s[1 : len(s) - 1] + "\n") + # cumm_sd + s = str(list(cumm_sd)) + f.write(s[1 : len(s) - 1] + "\n") + except Exception: + print("Wrong file or file path") + + +if __name__ == "__main__": + import operator + import argparse + + ### parse arguments ### + parser = argparse.ArgumentParser(description="Generate Synthetic Distributions") + parser.add_argument("--trace-file", type=str, default="./input/trace.log") + parser.add_argument("--trace-file-binary-type", type=bool, default=False) + parser.add_argument("--trace-enable-padding", type=bool, default=False) + parser.add_argument("--dist-file", type=str, default="./input/dist.log") + parser.add_argument( + "--synthetic-file", type=str, default="./input/trace_synthetic.log" + ) + parser.add_argument("--numpy-rand-seed", type=int, default=123) + parser.add_argument("--print-precision", type=int, default=5) + args = parser.parse_args() + + ### some basic setup ### + np.random.seed(args.numpy_rand_seed) + np.set_printoptions(precision=args.print_precision) + + ### read trace ### + trace = read_trace_from_file(args.trace_file) + # print(trace) + + ### profile trace ### + (_, stack_distances, line_accesses) = trace_profile( + trace, args.trace_enable_padding + ) + stack_distances.reverse() + line_accesses.reverse() + # print(line_accesses) + # print(stack_distances) + + ### compute probability distribution ### + # count items + l = len(stack_distances) + dc = sorted( + collections.Counter(stack_distances).items(), key=operator.itemgetter(0) + ) + + # create a distribution + list_sd = list(map(lambda tuple_x_k: tuple_x_k[0], dc)) # x = tuple_x_k[0] + dist_sd = list( + map(lambda tuple_x_k: tuple_x_k[1] / float(l), dc) + ) # k = tuple_x_k[1] + cumm_sd = deque() # np.cumsum(dc).tolist() #prefixsum + for i, (_, k) in enumerate(dc): + if i == 0: + cumm_sd.append(k / float(l)) + else: + # add the 2nd element of the i-th tuple in the dist_sd list + cumm_sd.append(cumm_sd[i - 1] + (k / float(l))) + + ### write stack_distance and line_accesses to a file ### + write_dist_to_file(args.dist_file, line_accesses, list_sd, cumm_sd) + + ### generate corresponding synthetic ### + # line_accesses, list_sd, cumm_sd = read_dist_from_file(args.dist_file) + synthetic_trace = trace_generate_lru( + line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding + ) + # synthetic_trace = trace_generate_rand( + # line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding + # ) + write_trace_to_file(args.synthetic_file, synthetic_trace) diff --git a/benchmarks/dlrm/ootb/dlrm_s_caffe2.py b/benchmarks/dlrm/ootb/dlrm_s_caffe2.py new file mode 100644 index 0000000..8e3ed74 --- /dev/null +++ b/benchmarks/dlrm/ootb/dlrm_s_caffe2.py @@ -0,0 +1,1703 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: an implementation of a deep learning recommendation model (DLRM) +# The model input consists of dense and sparse features. The former is a vector +# of floating point values. The latter is a list of sparse indices into +# embedding tables, which consist of vectors of floating point values. +# The selected vectors are passed to mlp networks denoted by triangles, +# in some cases the vectors are interacted through operators (Ops). +# +# output: +# vector of values +# model: | +# /\ +# /__\ +# | +# _____________________> Op <___________________ +# / | \ +# /\ /\ /\ +# /__\ /__\ ... /__\ +# | | | +# | Op Op +# | ____/__\_____ ____/__\____ +# | |_Emb_|____|__| ... |_Emb_|__|___| +# input: +# [ dense features ] [sparse indices] , ..., [sparse indices] +# +# More precise definition of model layers: +# 1) fully connected layers of an mlp +# z = f(y) +# y = Wx + b +# +# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk]) +# z = Op(e1,...,ek) +# obtain vectors e1=E[:,p1], ..., ek=E[:,pk] +# +# 3) Operator Op can be one of the following +# Sum(e1,...,ek) = e1 + ... + ek +# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek] +# Cat(e1,...,ek) = [e1', ..., ek']' +# where ' denotes transpose operation +# +# References: +# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang, +# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu, +# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii, +# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko, +# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong, +# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and +# Recommendation Systems", CoRR, arXiv:1906.00091, 2019 + +from __future__ import absolute_import, division, print_function, unicode_literals + +import functools + +# others +import operator +import time +import copy + +# data generation +import dlrm_data_pytorch as dp + +# numpy +import numpy as np +import sklearn.metrics + +# onnx +# The onnx import causes deprecation warnings every time workers +# are spawned during testing. So, we filter out those warnings. +import warnings +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + try: + import onnx + import caffe2.python.onnx.frontend + except ImportError as error: + print('Unable to import onnx or caffe2.python.onnx.frontend ', error) + +# from caffe2.python import data_parallel_model + +# caffe2 +from caffe2.proto import caffe2_pb2 +from caffe2.python import brew, core, dyndep, model_helper, net_drawer, workspace + +""" +# auxiliary routine used to split input on the mini-bacth dimension +def where_to_split(mini_batch_size, ndevices, _add_leftover=False): + n = (mini_batch_size + ndevices - 1) // ndevices # ceiling + l = mini_batch_size - n * (ndevices - 1) # leftover + s = [n] * (ndevices - 1) + if _add_leftover: + ls += [l if l > 0 else n] + return ls +""" + + +### define dlrm in Caffe2 ### +class DLRM_Net(object): + def FeedBlobWrapper(self, tag, val, add_prefix=True, split=False, device_id=-1): + if self.ndevices > 1 and add_prefix: + if split: + # split across devices + mini_batch_size = val.shape[0] + # approach 1: np and caffe2 operators assume the mini-batch size is + # divisible exactly by the number of available devices + if mini_batch_size % self.ndevices != 0: + sys.exit("ERROR: caffe2 net assumes that the mini_batch_size " + + str(mini_batch_size) + + " is evenly divisible by the number of available devices" + + str(self.ndevices)) + vals = np.split(val, self.ndevices, axis=0) + """ + # approach 2: np and caffe2 operators do not assume exact divisibility + if args.mini_batch_size != mini_batch_size: + sys.exit("ERROR: caffe2 net was prepared for mini-batch size " + + str(args.mini_batch_size) + + " which is different from current mini-batch size " + + str(mini_batch_size) + " being passed to it. " + + "This is common for the last mini-batch, when " + + "mini-batch size does not evenly divided the number of " + + "elements in the data set.") + ls = where_to_split(mini_batch_size, self.ndevices) + vals = np.split(val, ls, axis=0) + """ + # feed to multiple devices + for d in range(self.ndevices): + tag_on_device = "gpu_" + str(d) + "/" + tag + _d = core.DeviceOption(workspace.GpuDeviceType, d) + workspace.FeedBlob(tag_on_device, vals[d], device_option=_d) + else: + # feed to multiple devices + for d in range(self.ndevices): + tag_on_device = "gpu_" + str(d) + "/" + tag + _d = core.DeviceOption(workspace.GpuDeviceType, d) + workspace.FeedBlob(tag_on_device, val, device_option=_d) + else: + # feed to a single device (named or not) + if device_id >= 0: + _d = core.DeviceOption(workspace.GpuDeviceType, device_id) + workspace.FeedBlob(tag, val, device_option=_d) + else: + workspace.FeedBlob(tag, val) + + def FetchBlobWrapper(self, tag, add_prefix=True, reduce_across=None, device_id=-1): + if self.ndevices > 1 and add_prefix: + # fetch from multiple devices + vals = [] + for d in range(self.ndevices): + if tag.__class__ == list: + tag_on_device = tag[d] + else: + tag_on_device = "gpu_" + str(0) + "/" + tag + val = workspace.FetchBlob(tag_on_device) + vals.append(val) + # reduce across devices + if reduce_across == "add": + return functools.reduce(operator.add, vals) + elif reduce_across == "concat": + return np.concatenate(vals) + else: + return vals + else: + # fetch from a single device (named or not) + if device_id >= 0: + tag_on_device = "gpu_" + str(device_id) + "/" + tag + return workspace.FetchBlob(tag_on_device) + else: + return workspace.FetchBlob(tag) + + def AddLayerWrapper(self, layer, inp_blobs, out_blobs, + add_prefix=True, reset_grad=False, **kwargs): + # auxiliary routine to adjust tags + def adjust_tag(blobs, on_device): + if blobs.__class__ == str: + _blobs = on_device + blobs + elif blobs.__class__ == list: + _blobs = list(map(lambda tag: on_device + tag, blobs)) + else: # blobs.__class__ == model_helper.ModelHelper or something else + _blobs = blobs + return _blobs + + if self.ndevices > 1 and add_prefix: + # add layer on multiple devices + ll = [] + for d in range(self.ndevices): + # add prefix on_device + on_device = "gpu_" + str(d) + "/" + _inp_blobs = adjust_tag(inp_blobs, on_device) + _out_blobs = adjust_tag(out_blobs, on_device) + # WARNING: reset_grad option was exlusively designed for WeightedSum + # with inp_blobs=[w, tag_one, "", lr], where "" will be replaced + if reset_grad: + w_grad = self.gradientMap[_inp_blobs[0]] + _inp_blobs[2] = w_grad + # add layer to the model + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)): + if kwargs: + new_layer = layer(_inp_blobs, _out_blobs, **kwargs) + else: + new_layer = layer(_inp_blobs, _out_blobs) + ll.append(new_layer) + return ll + else: + # add layer on a single device + # WARNING: reset_grad option was exlusively designed for WeightedSum + # with inp_blobs=[w, tag_one, "", lr], where "" will be replaced + if reset_grad: + w_grad = self.gradientMap[inp_blobs[0]] + inp_blobs[2] = w_grad + # add layer to the model + if kwargs: + new_layer = layer(inp_blobs, out_blobs, **kwargs) + else: + new_layer = layer(inp_blobs, out_blobs) + return new_layer + + def create_mlp(self, ln, sigmoid_layer, model, tag): + (tag_layer, tag_in, tag_out) = tag + + # build MLP layer by layer + layers = [] + weights = [] + for i in range(1, ln.size): + n = ln[i - 1] + m = ln[i] + + # create tags + tag_fc_w = tag_layer + ":::" + "fc" + str(i) + "_w" + tag_fc_b = tag_layer + ":::" + "fc" + str(i) + "_b" + tag_fc_y = tag_layer + ":::" + "fc" + str(i) + "_y" + tag_fc_z = tag_layer + ":::" + "fc" + str(i) + "_z" + if i == ln.size - 1: + tag_fc_z = tag_out + weights.append(tag_fc_w) + weights.append(tag_fc_b) + + # initialize the weights + # approach 1: custom Xavier input, output or two-sided fill + mean = 0.0 # std_dev = np.sqrt(variance) + std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n) + W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32) + std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1)) + b = np.random.normal(mean, std_dev, size=m).astype(np.float32) + self.FeedBlobWrapper(tag_fc_w, W) + self.FeedBlobWrapper(tag_fc_b, b) + # approach 2: caffe2 xavier + # W = self.AddLayerWrapper( + # model.param_init_net.XavierFill, + # [], + # tag_fc_w, + # shape=[m, n] + # ) + # b = self.AddLayerWrapper( + # model.param_init_net.ConstantFill, + # [], + # tag_fc_b, + # shape=[m] + # ) + + # initialize the MLP's momentum for the Adagrad optimizer + if self.emb_optimizer in ["adagrad", "rwsadagrad"]: + # momentum of the weights + self.FeedBlobWrapper( + "momentum_mlp_{}_{}".format(tag_layer, 2 * i - 1), + np.full((m, n), 0, dtype=np.float32) + ) + # momentum of the biases + self.FeedBlobWrapper( + "momentum_mlp_{}_{}".format(tag_layer, 2 * i), + np.full((m), 0, dtype=np.float32) + ) + + # save the blob shapes for latter (only needed if onnx is requested) + if self.save_onnx: + self.onnx_tsd[tag_fc_w] = (onnx.TensorProto.FLOAT, W.shape) + self.onnx_tsd[tag_fc_b] = (onnx.TensorProto.FLOAT, b.shape) + + # approach 1: construct fully connected operator using model.net + fc = self.AddLayerWrapper( + model.net.FC, [tag_in, tag_fc_w, tag_fc_b], tag_fc_y + ) + # approach 2: construct fully connected operator using brew + # https://github.com/caffe2/tutorials/blob/master/MNIST.ipynb + # fc = brew.fc(model, layer, tag_fc_w, dim_in=m, dim_out=n) + layers.append(fc) + + if i == sigmoid_layer: + # approach 1: construct sigmoid operator using model.net + layer = self.AddLayerWrapper(model.net.Sigmoid, tag_fc_y, tag_fc_z) + # approach 2: using brew (which currently does not support sigmoid) + # tag_sigm = tag_layer + ":::" + "sigmoid" + str(i) + # layer = brew.sigmoid(model,fc,tag_sigmoid) + else: + # approach 1: construct relu operator using model.net + layer = self.AddLayerWrapper(model.net.Relu, tag_fc_y, tag_fc_z) + # approach 2: using brew + # tag_relu = tag_layer + ":::" + "relu" + str(i) + # layer = brew.relu(model,fc,tag_relu) + tag_in = tag_fc_z + layers.append(layer) + + # WARNING: the dependency between layers is implicit in the tags, + # so only the last layer is added to the layers list. It will + # later be used for interactions. + return layers, weights + + def create_emb(self, m, ln, model, tag): + (tag_layer, tag_in, tag_out) = tag + emb_l = [] + weights_l = [] + vw_l = [] + for i in range(0, ln.size): + n = ln[i] + + # select device + if self.ndevices > 1: + d = i % self.ndevices + else: + d = -1 + + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + len_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_l" + ind_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_i" + tbl_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_w" + sum_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_z" + weights_l.append(tbl_s) + + # initialize the weights + # approach 1a: custom + W = np.random.uniform(low=-np.sqrt(1 / n), + high=np.sqrt(1 / n), + size=(n, m)).astype(np.float32) + # approach 1b: numpy rand + # W = ra.rand(n, m).astype(np.float32) + self.FeedBlobWrapper(tbl_s, W, False, device_id=d) + # approach 2: caffe2 xavier + # with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)): + # W = model.param_init_net.XavierFill([], tbl_s, shape=[n, m]) + # save the blob shapes for latter (only needed if onnx is requested) + + # initialize the embedding's momentum for the Adagrad optimizer + if self.emb_optimizer == "adagrad": + self.FeedBlobWrapper("momentum_emb_{}".format(i), + np.full((n, m), 0), add_prefix=False, device_id=d) + elif self.emb_optimizer == "rwsadagrad": + self.FeedBlobWrapper("momentum_emb_{}".format(i), + np.full((n), 0), add_prefix=False, device_id=d) + + if self.save_onnx: + self.onnx_tsd[tbl_s] = (onnx.TensorProto.FLOAT, W.shape) + + # create operator + if self.weighted_pooling is not None: + vw_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_v" + psw_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_s" + VW = np.ones(n).astype(np.float32) + self.FeedBlobWrapper(vw_s, VW, False, device_id=d) + if self.weighted_pooling == "learned": + vw_l.append(vw_s) + grad_on_weights = True + else: + grad_on_weights = False + if self.save_onnx: + self.onnx_tsd[vw_s] = (onnx.TensorProto.FLOAT, VW.shape) + if self.ndevices <= 1: + PSW = model.net.Gather([vw_s, ind_s], [psw_s]) + EE = model.net.SparseLengthsWeightedSum( + [tbl_s, PSW, ind_s, len_s], [sum_s], + grad_on_weights=grad_on_weights + ) + else: + with core.DeviceScope( + core.DeviceOption(workspace.GpuDeviceType, d) + ): + PSW = model.net.Gather([vw_s, ind_s], [psw_s]) + EE = model.net.SparseLengthsWeightedSum( + [tbl_s, PSW, ind_s, len_s], [sum_s], + grad_on_weights=grad_on_weights + ) + else: + if self.ndevices <= 1: + EE = model.net.SparseLengthsSum( + [tbl_s, ind_s, len_s], [sum_s] + ) + else: + with core.DeviceScope( + core.DeviceOption(workspace.GpuDeviceType, d) + ): + EE = model.net.SparseLengthsSum( + [tbl_s, ind_s, len_s], [sum_s] + ) + emb_l.append(EE) + + return emb_l, weights_l, vw_l + + def create_interactions(self, x, ly, model, tag): + (tag_dense_in, tag_sparse_in, tag_int_out) = tag + + if self.arch_interaction_op == "dot": + # concatenate dense and sparse features + tag_int_out_info = tag_int_out + "_info" + T, T_info = model.net.Concat( + x + ly, + [tag_int_out + "_cat_axis0", tag_int_out_info + "_cat_axis0"], + axis=1, + add_axis=1, + ) + # perform a dot product + Z = model.net.BatchMatMul([T, T], tag_int_out + "_matmul", trans_b=1) + # append dense feature with the interactions (into a row vector) + # approach 1: all + # Zflat = model.net.Flatten(Z, tag_int_out + "_flatten", axis=1) + # approach 2: unique + Zflat_all = model.net.Flatten(Z, tag_int_out + "_flatten_all", axis=1) + Zflat = model.net.BatchGather( + [Zflat_all, tag_int_out + "_tril_indices"], + tag_int_out + "_flatten" + ) + R, R_info = model.net.Concat( + x + [Zflat], [tag_int_out, tag_int_out_info], axis=1 + ) + elif self.arch_interaction_op == "cat": + # concatenation features (into a row vector) + tag_int_out_info = tag_int_out + "_info" + R, R_info = model.net.Concat( + x + ly, [tag_int_out, tag_int_out_info], axis=1 + ) + else: + sys.exit("ERROR: --arch-interaction-op=" + + self.arch_interaction_op + " is not supported") + + return R + + def create_sequential_forward_ops(self): + # embeddings + tag = (self.temb, self.tsin, self.tsout) + self.emb_l, self.emb_w, self.emb_vw = self.create_emb( + self.m_spa, self.ln_emb, self.model, tag + ) + # bottom mlp + tag = (self.tbot, self.tdin, self.tdout) + self.bot_l, self.bot_w = self.create_mlp(self.ln_bot, self.sigmoid_bot, + self.model, tag) + # interactions + tag = (self.tdout, self.tsout, self.tint) + Z = self.create_interactions([self.bot_l[-1]], self.emb_l, self.model, tag) + + # top mlp + tag = (self.ttop, Z, self.tout) + self.top_l, self.top_w = self.create_mlp(self.ln_top, self.sigmoid_top, + self.model, tag) + # debug prints + # print(self.emb_l) + # print(self.bot_l) + # print(self.top_l) + + # setup the last output variable + self.last_output = self.top_l[-1] + + def create_parallel_forward_ops(self): + # distribute embeddings (model parallelism) + tag = (self.temb, self.tsin, self.tsout) + self.emb_l, self.emb_w, self.emb_vw = self.create_emb( + self.m_spa, self.ln_emb, self.model, tag + ) + # replicate mlp (data parallelism) + tag = (self.tbot, self.tdin, self.tdout) + self.bot_l, self.bot_w = self.create_mlp(self.ln_bot, self.sigmoid_bot, + self.model, tag) + + # add communication (butterfly shuffle) + t_list = [] + for i, emb_output in enumerate(self.emb_l): + # split input + src_d = i % self.ndevices + lo = [emb_output + "_split_" + str(d) for d in range(self.ndevices)] + # approach 1: np and caffe2 operators assume the mini-batch size is + # divisible exactly by the number of available devices + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, src_d)): + self.model.net.Split(emb_output, lo, axis=0) + """ + # approach 2: np and caffe2 operators do not assume exact divisibility + ls = where_to_split(args.mini_batch_size, self.ndevices, _add_leftover=True) + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, src_d)): + emb_output_split = self.model.net.Split( + emb_output, lo, split=lp, axis=0 + ) + """ + # scatter + y = [] + for dst_d in range(len(lo)): + src_blob = lo[dst_d] + dst_blob = str(src_blob).replace( + "gpu_" + str(src_d), "gpu_" + str(dst_d), 1 + ) + if src_blob != dst_blob: + with core.DeviceScope( + core.DeviceOption(workspace.GpuDeviceType, dst_d) + ): + blob = self.model.Copy(src_blob, dst_blob) + else: + blob = dst_blob + y.append(blob) + t_list.append(y) + # adjust lists to be ordered per device + x = list(map(lambda x: list(x), zip(*self.bot_l))) + ly = list(map(lambda y: list(y), zip(*t_list))) + + # interactions + for d in range(self.ndevices): + on_device = "gpu_" + str(d) + "/" + tag = (on_device + self.tdout, on_device + self.tsout, on_device + self.tint) + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)): + self.create_interactions([x[d][-1]], ly[d], self.model, tag) + + # replicate mlp (data parallelism) + tag = (self.ttop, self.tint, self.tout) + self.top_l, self.top_w = self.create_mlp(self.ln_top, self.sigmoid_top, + self.model, tag) + + # debug prints + # print(self.model.net.Proto(),end='\n') + # sys.exit("ERROR: debugging") + + # setup the last output variable + self.last_output = self.top_l[-1] + + def __init__( + self, + m_spa, + ln_emb, + ln_bot, + ln_top, + arch_interaction_op, + arch_interaction_itself=False, + sigmoid_bot=-1, + sigmoid_top=-1, + save_onnx=False, + model=None, + test_net=None, + tag=None, + ndevices=-1, + forward_ops=True, + enable_prof=False, + weighted_pooling=None, + emb_optimizer="sgd" + ): + super(DLRM_Net, self).__init__() + + # init model + if model is None: + global_init_opt = ["caffe2", "--caffe2_log_level=0"] + if enable_prof: + global_init_opt += [ + "--logtostderr=0", + "--log_dir=$HOME", + "--caffe2_logging_print_net_summary=1", + ] + workspace.GlobalInit(global_init_opt) + self.set_tags() + self.model = model_helper.ModelHelper(name="DLRM", init_params=True) + self.test_net = None + else: + # WARNING: assume that workspace and tags have been initialized elsewhere + self.set_tags(tag[0], tag[1], tag[2], tag[3], tag[4], tag[5], tag[6], + tag[7], tag[8], tag[9]) + self.model = model + self.test_net = test_net + + # save arguments + self.m_spa = m_spa + self.ln_emb = ln_emb + self.ln_bot = ln_bot + self.ln_top = ln_top + self.arch_interaction_op = arch_interaction_op + self.arch_interaction_itself = arch_interaction_itself + self.sigmoid_bot = sigmoid_bot + self.sigmoid_top = sigmoid_top + self.save_onnx = save_onnx + self.ndevices = ndevices + self.emb_optimizer = emb_optimizer + if weighted_pooling is not None and weighted_pooling != "fixed": + self.weighted_pooling = "learned" + else: + self.weighted_pooling = weighted_pooling + # onnx types and shapes dictionary + if self.save_onnx: + self.onnx_tsd = {} + # create forward operators + if forward_ops: + if self.ndevices <= 1: + return self.create_sequential_forward_ops() + else: + return self.create_parallel_forward_ops() + + def set_tags( + self, + _tag_layer_top_mlp="top", + _tag_layer_bot_mlp="bot", + _tag_layer_embedding="emb", + _tag_feature_dense_in="dense_in", + _tag_feature_dense_out="dense_out", + _tag_feature_sparse_in="sparse_in", + _tag_feature_sparse_out="sparse_out", + _tag_interaction="interaction", + _tag_dense_output="prob_click", + _tag_dense_target="target", + ): + # layer tags + self.ttop = _tag_layer_top_mlp + self.tbot = _tag_layer_bot_mlp + self.temb = _tag_layer_embedding + # dense feature tags + self.tdin = _tag_feature_dense_in + self.tdout = _tag_feature_dense_out + # sparse feature tags + self.tsin = _tag_feature_sparse_in + self.tsout = _tag_feature_sparse_out + # output and target tags + self.tint = _tag_interaction + self.ttar = _tag_dense_target + self.tout = _tag_dense_output + + def parameters(self): + return self.model + + def get_loss(self): + return self.FetchBlobWrapper(self.loss, reduce_across="add") + + def get_output(self): + return self.FetchBlobWrapper(self.last_output, reduce_across="concat") + + def create(self, X, S_lengths, S_indices, T): + self.create_input(X, S_lengths, S_indices, T) + self.create_model(X, S_lengths, S_indices, T) + + def create_input(self, X, S_lengths, S_indices, T): + # feed input data to blobs + self.FeedBlobWrapper(self.tdin, X, split=True) + # save the blob shapes for latter (only needed if onnx is requested) + if self.save_onnx: + self.onnx_tsd[self.tdin] = (onnx.TensorProto.FLOAT, X.shape) + + for i in range(len(self.emb_l)): + # select device + if self.ndevices > 1: + d = i % self.ndevices + else: + d = -1 + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + len_s = on_device + self.temb + ":::" + "sls" + str(i) + "_l" + ind_s = on_device + self.temb + ":::" + "sls" + str(i) + "_i" + self.FeedBlobWrapper(len_s, np.array(S_lengths[i]), False, device_id=d) + self.FeedBlobWrapper(ind_s, np.array(S_indices[i]), False, device_id=d) + # save the blob shapes for latter (only needed if onnx is requested) + if self.save_onnx: + lshape = (len(S_lengths[i]),) # =args.mini_batch_size + ishape = (len(S_indices[i]),) + self.onnx_tsd[len_s] = (onnx.TensorProto.INT32, lshape) + self.onnx_tsd[ind_s] = (onnx.TensorProto.INT32, ishape) + + # feed target data to blobs + if T is not None: + zeros_fp32 = np.zeros(T.shape).astype(np.float32) + self.FeedBlobWrapper(self.ttar, zeros_fp32, split=True) + # save the blob shapes for latter (only needed if onnx is requested) + if self.save_onnx: + self.onnx_tsd[self.ttar] = (onnx.TensorProto.FLOAT, T.shape) + + def create_model(self, X, S_lengths, S_indices, T): + #setup tril indices for the interactions + offset = 1 if self.arch_interaction_itself else 0 + num_fea = len(self.emb_l) + 1 + tril_indices = np.array([j + i * num_fea + for i in range(num_fea) for j in range(i + offset)]) + self.FeedBlobWrapper(self.tint + "_tril_indices", tril_indices) + + # create compute graph + if T is not None: + # WARNING: RunNetOnce call is needed only if we use brew and ConstantFill. + # We could use direct calls to self.model functions above to avoid it + workspace.RunNetOnce(self.model.param_init_net) + workspace.CreateNet(self.model.net) + if self.test_net is not None: + workspace.CreateNet(self.test_net) + + def run(self, X, S_lengths, S_indices, T, test_net=False, enable_prof=False): + # feed input data to blobs + # dense features + self.FeedBlobWrapper(self.tdin, X, split=True) + # sparse features + for i in range(len(self.emb_l)): + # select device + if self.ndevices > 1: + d = i % self.ndevices + else: + d = -1 + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + len_s = on_device + self.temb + ":::" + "sls" + str(i) + "_l" + ind_s = on_device + self.temb + ":::" + "sls" + str(i) + "_i" + self.FeedBlobWrapper(len_s, np.array(S_lengths[i]), False, device_id=d) + self.FeedBlobWrapper(ind_s, np.array(S_indices[i]), False, device_id=d) + + # feed target data to blobs if needed + if T is not None: + self.FeedBlobWrapper(self.ttar, T, split=True) + # execute compute graph + if test_net: + workspace.RunNet(self.test_net) + else: + if enable_prof: + workspace.C.benchmark_net(self.model.net.Name(), 0, 1, True) + else: + workspace.RunNet(self.model.net) + # debug prints + # print("intermediate") + # print(self.FetchBlobWrapper(self.bot_l[-1])) + # for tag_emb in self.emb_l: + # print(self.FetchBlobWrapper(tag_emb)) + # print(self.FetchBlobWrapper(self.tint)) + + def MSEloss(self, scale=1.0): + # add MSEloss to the model + self.AddLayerWrapper(self.model.SquaredL2Distance, [self.tout, self.ttar], "sd") + self.AddLayerWrapper(self.model.Scale, "sd", "sd2", scale=2.0 * scale) + # WARNING: "loss" is a special tag and should not be changed + self.loss = self.AddLayerWrapper(self.model.AveragedLoss, "sd2", "loss") + + def BCEloss(self, scale=1.0, threshold=0.0): + # add BCEloss to the mode + if 0.0 < threshold and threshold < 1.0: + self.AddLayerWrapper(self.model.Clip, self.tout, "tout_c", + min=threshold, max=(1.0 - threshold)) + self.AddLayerWrapper(self.model.MakeTwoClass, "tout_c", "tout_2c") + else: + self.AddLayerWrapper(self.model.MakeTwoClass, self.tout, "tout_2c") + self.AddLayerWrapper(self.model.LabelCrossEntropy, ["tout_2c", self.ttar], "sd") + # WARNING: "loss" is a special tag and should not be changed + if scale == 1.0: + self.loss = self.AddLayerWrapper(self.model.AveragedLoss, "sd", "loss") + else: + self.AddLayerWrapper(self.model.Scale, "sd", "sd2", scale=scale) + self.loss = self.AddLayerWrapper(self.model.AveragedLoss, "sd2", "loss") + + def sgd_optimizer(self, learning_rate, + T=None, _gradientMap=None, sync_dense_params=True): + # create one, it and lr tags (or use them if already present) + if T is not None: + (tag_one, tag_it, tag_lr) = T + else: + (tag_one, tag_it, tag_lr) = ("const_one", "optim_it", "optim_lr") + + # approach 1: feed values directly + # self.FeedBlobWrapper(tag_one, np.ones(1).astype(np.float32)) + # self.FeedBlobWrapper(tag_it, np.zeros(1).astype(np.int64)) + # it = self.AddLayerWrapper(self.model.Iter, tag_it, tag_it) + # lr = self.AddLayerWrapper(self.model.LearningRate, tag_it, tag_lr, + # base_lr=-1 * learning_rate, policy="fixed") + # approach 2: use brew + self.AddLayerWrapper(self.model.param_init_net.ConstantFill, + [], tag_one, shape=[1], value=1.0) + self.AddLayerWrapper(brew.iter, self.model, tag_it) + self.AddLayerWrapper(self.model.LearningRate, tag_it, tag_lr, + base_lr=-1 * learning_rate, policy="fixed") + # save the blob shapes for latter (only needed if onnx is requested) + if self.save_onnx: + self.onnx_tsd[tag_one] = (onnx.TensorProto.FLOAT, (1,)) + self.onnx_tsd[tag_it] = (onnx.TensorProto.INT64, (1,)) + + # create gradient maps (or use them if already present) + if _gradientMap is not None: + self.gradientMap = _gradientMap + else: + if self.loss.__class__ == list: + self.gradientMap = self.model.AddGradientOperators(self.loss) + else: + self.gradientMap = self.model.AddGradientOperators([self.loss]) + + # update weights + # approach 1: builtin function + # optimizer.build_sgd(self.model, base_learning_rate=learning_rate) + # approach 2: custom code + # top MLP weight and bias + for w in self.top_w: + # allreduce across devices if needed + if sync_dense_params and self.ndevices > 1: + grad_blobs = [ + self.gradientMap["gpu_{}/".format(d) + w] + for d in range(self.ndevices) + ] + self.model.NCCLAllreduce(grad_blobs, grad_blobs) + # update weights + self.AddLayerWrapper(self.model.WeightedSum, + [w, tag_one, "", tag_lr], w, reset_grad=True) + # bottom MLP weight and bias + for w in self.bot_w: + # allreduce across devices if needed + if sync_dense_params and self.ndevices > 1: + grad_blobs = [ + self.gradientMap["gpu_{}/".format(d) + w] + for d in range(self.ndevices) + ] + self.model.NCCLAllreduce(grad_blobs, grad_blobs) + # update weights + self.AddLayerWrapper(self.model.WeightedSum, + [w, tag_one, "", tag_lr], w, reset_grad=True) + # update embeddings + for i, w in enumerate(self.emb_w): + # select device + if self.ndevices > 1: + d = i % self.ndevices + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + _tag_one = on_device + tag_one + _tag_lr = on_device + tag_lr + # pickup gradient + w_grad = self.gradientMap[w] + # update weights + if self.ndevices > 1: + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)): + self.model.ScatterWeightedSum([w, _tag_one, w_grad.indices, + w_grad.values, _tag_lr], w) + else: + self.model.ScatterWeightedSum([w, _tag_one, w_grad.indices, + w_grad.values, _tag_lr], w) + + # update per sample weights + if self.weighted_pooling == "learned": + for i, w in enumerate(self.emb_vw): + # select device + if self.ndevices > 1: + d = i % self.ndevices + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + _tag_one = on_device + tag_one + _tag_lr = on_device + tag_lr + # pickup gradient + w_grad = self.gradientMap[w] + # update weights + if self.ndevices > 1: + with core.DeviceScope( + core.DeviceOption(workspace.GpuDeviceType, d) + ): + self.model.ScatterWeightedSum( + [w, _tag_one, w_grad.indices, + w_grad.values, _tag_lr], w + ) + else: + self.model.ScatterWeightedSum( + [w, _tag_one, w_grad.indices, w_grad.values, _tag_lr], w + ) + + def adagrad_optimizer(self, learning_rate, + T=None, _gradientMap=None, sync_dense_params=True, + epsilon=1e-10, decay_=0.0, weight_decay_=0.0): + # create one, it and lr tags (or use them if already present) + if T is not None: + (tag_one, tag_it, tag_lr) = T + else: + (tag_one, tag_it, tag_lr) = ("const_one", "optim_it", "optim_lr") + + # approach 1: feed values directly + # self.FeedBlobWrapper(tag_one, np.ones(1).astype(np.float32)) + # self.FeedBlobWrapper(tag_it, np.zeros(1).astype(np.int64)) + # it = self.AddLayerWrapper(self.model.Iter, tag_it, tag_it) + # lr = self.AddLayerWrapper(self.model.LearningRate, tag_it, tag_lr, + # base_lr=-1 * learning_rate, policy="fixed") + # approach 2: use brew + self.AddLayerWrapper(self.model.param_init_net.ConstantFill, + [], tag_one, shape=[1], value=1.0) + self.AddLayerWrapper(brew.iter, self.model, tag_it) + self.AddLayerWrapper(self.model.LearningRate, tag_it, tag_lr, + base_lr=-1 * learning_rate, policy="fixed") + # save the blob shapes for latter (only needed if onnx is requested) + if self.save_onnx: + self.onnx_tsd[tag_one] = (onnx.TensorProto.FLOAT, (1,)) + self.onnx_tsd[tag_it] = (onnx.TensorProto.INT64, (1,)) + + # create gradient maps (or use them if already present) + if _gradientMap is not None: + self.gradientMap = _gradientMap + else: + if self.loss.__class__ == list: + self.gradientMap = self.model.AddGradientOperators(self.loss) + else: + self.gradientMap = self.model.AddGradientOperators([self.loss]) + + # update weights + # approach 1: builtin function + # optimizer.build_sgd(self.model, base_learning_rate=learning_rate) + # approach 2: custom code + # top MLP weight and bias + for i, w in enumerate(self.top_w): + # allreduce across devices if needed + if sync_dense_params and self.ndevices > 1: + grad_blobs = [ + self.gradientMap["gpu_{}/".format(d) + w] + for d in range(self.ndevices) + ] + self.model.NCCLAllreduce(grad_blobs, grad_blobs) + # update weights + self.model.Adagrad( + [ + w, + "momentum_mlp_top_{}".format(i + 1), + self.gradientMap[w], + tag_lr + ], + [w, "momentum_mlp_top_{}".format(i + 1)], + epsilon=epsilon, + decay_=decay_, + weight_decay_=weight_decay_ + ) + + # bottom MLP weight and bias + for i, w in enumerate(self.bot_w): + # allreduce across devices if needed + if sync_dense_params and self.ndevices > 1: + grad_blobs = [ + self.gradientMap["gpu_{}/".format(d) + w] + for d in range(self.ndevices) + ] + self.model.NCCLAllreduce(grad_blobs, grad_blobs) + # update weights + self.model.Adagrad( + [ + w, + "momentum_mlp_bot_{}".format(i + 1), + self.gradientMap[w], + tag_lr + ], + [w, "momentum_mlp_bot_{}".format(i + 1)], + epsilon=epsilon, + decay_=decay_, + weight_decay_=weight_decay_ + ) + + # update embeddings + for i, w in enumerate(self.emb_w): + # select device + if self.ndevices > 1: + d = i % self.ndevices + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + _tag_one = on_device + tag_one + _tag_lr = on_device + tag_lr + # pickup gradient + w_grad = self.gradientMap[w] + # update weights + def add_optimizer(): + self.model.Unique( + w_grad.indices, + ["unique_w_grad_indices", "remapping_w_grad_indices"] + ) + self.model.UnsortedSegmentSum( + [w_grad.values, "remapping_w_grad_indices"], + "unique_w_grad_values" + ) + + if self.emb_optimizer == "adagrad": + self.model.SparseAdagrad( + [ + w, + "momentum_emb_{}".format(i), + "unique_w_grad_indices", + "unique_w_grad_values", + _tag_lr + ], + [w, "momentum_emb_{}".format(i)], + epsilon=epsilon, + decay_=decay_, + weight_decay_=weight_decay_ + ) + + elif self.emb_optimizer == "rwsadagrad": + self.model.RowWiseSparseAdagrad( + [ + w, + "momentum_emb_{}".format(i), + "unique_w_grad_indices", + "unique_w_grad_values", + _tag_lr + ], + [w, "momentum_emb_{}".format(i)], + epsilon=epsilon, + decay_=decay_, + weight_decay_=weight_decay_ + ) + + if self.ndevices > 1: + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)): + add_optimizer() + else: + add_optimizer() + + # update per sample weights + if self.weighted_pooling == "learned": + for i, w in enumerate(self.emb_vw): + # select device + if self.ndevices > 1: + d = i % self.ndevices + # create tags + on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/" + _tag_one = on_device + tag_one + _tag_lr = on_device + tag_lr + # pickup gradient + w_grad = self.gradientMap[w] + # update weights + if self.ndevices > 1: + with core.DeviceScope( + core.DeviceOption(workspace.GpuDeviceType, d) + ): + self.model.ScatterWeightedSum( + [w, _tag_one, w_grad.indices, + w_grad.values, _tag_lr], w + ) + else: + self.model.ScatterWeightedSum( + [w, _tag_one, w_grad.indices, w_grad.values, _tag_lr], w + ) + + def print_all(self): + # approach 1: all + print(workspace.Blobs(), end='\n') + for _, l in enumerate(workspace.Blobs()): + print(l) + print(self.FetchBlobWrapper(l)) + # approach 2: only summary + # for param in self.model.params: + # self.model.Summarize(param, [], to_file=1) + # self.model.Summarize(self.model.param_to_grad[param], [], to_file=1) + + def print_weights(self): + for _, l in enumerate(self.emb_w): + # print(l) + print(self.FetchBlobWrapper(l, False)) + if self.weighted_pooling == "learned": + for _, l in enumerate(self.emb_vw): + # print(l) + print(self.FetchBlobWrapper(l, False)) + for _, l in enumerate(self.bot_w): + # print(l) + if self.ndevices > 1: + print(self.FetchBlobWrapper(l, False, device_id=0)) + else: + print(self.FetchBlobWrapper(l)) + for _, l in enumerate(self.top_w): + # print(l) + if self.ndevices > 1: + print(self.FetchBlobWrapper(l, False, device_id=0)) + else: + print(self.FetchBlobWrapper(l)) + + def print_activations(self): + for _, l in enumerate(self.emb_l): + print(l) + print(self.FetchBlobWrapper(l, False)) + for _, l in enumerate(self.bot_l): + print(l) + print(self.FetchBlobWrapper(l)) + print(self.tint) + print(self.FetchBlobWrapper(self.tint)) + for _, l in enumerate(self.top_l): + print(l) + print(self.FetchBlobWrapper(l)) + + +def define_metrics(): + metrics = { + 'loss': lambda y_true, y_score: + sklearn.metrics.log_loss( + y_true=y_true, + y_pred=y_score, + labels=[0,1]), + 'recall': lambda y_true, y_score: + sklearn.metrics.recall_score( + y_true=y_true, + y_pred=np.round(y_score) + ), + 'precision': lambda y_true, y_score: + sklearn.metrics.precision_score( + y_true=y_true, + y_pred=np.round(y_score) + ), + 'f1': lambda y_true, y_score: + sklearn.metrics.f1_score( + y_true=y_true, + y_pred=np.round(y_score) + ), + 'ap': sklearn.metrics.average_precision_score, + 'roc_auc': sklearn.metrics.roc_auc_score, + 'accuracy': lambda y_true, y_score: + sklearn.metrics.accuracy_score( + y_true=y_true, + y_pred=np.round(y_score) + ), + # 'pre_curve' : sklearn.metrics.precision_recall_curve, + # 'roc_curve' : sklearn.metrics.roc_curve, + } + return metrics + + +def calculate_metrics(targets, scores): + scores = np.concatenate(scores, axis=0) + targets = np.concatenate(targets, axis=0) + + metrics = define_metrics() + + # print("Compute time for validation metric : ", end="") + # first_it = True + validation_results = {} + for metric_name, metric_function in metrics.items(): + # if first_it: + # first_it = False + # else: + # print(", ", end="") + # metric_compute_start = time_wrap(False) + try: + validation_results[metric_name] = metric_function( + targets, + scores + ) + except Exception as error : + validation_results[metric_name] = -1 + print("{} in calculating {}".format(error, metric_name)) + # metric_compute_end = time_wrap(False) + # met_time = metric_compute_end - metric_compute_start + # print("{} {:.4f}".format(metric_name, 1000 * (met_time)), + # end="") + # print(" ms") + return validation_results + + +if __name__ == "__main__": + ### import packages ### + import sys + import argparse + + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Train Deep Learning Recommendation Model (DLRM)" + ) + # model related parameters + parser.add_argument("--arch-sparse-feature-size", type=int, default=2) + parser.add_argument("--arch-embedding-size", type=str, default="4-3-2") + parser.add_argument("--arch-mlp-bot", type=str, default="4-3-2") + parser.add_argument("--arch-mlp-top", type=str, default="4-2-1") + parser.add_argument("--arch-interaction-op", type=str, default="dot") + parser.add_argument("--arch-interaction-itself", action="store_true", default=False) + # activations and loss + parser.add_argument("--activation-function", type=str, default="relu") + parser.add_argument("--loss-function", type=str, default="mse") # or bce + parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7 + parser.add_argument("--round-targets", type=bool, default=False) + parser.add_argument("--weighted-pooling", type=str, default=None) + # data + parser.add_argument("--data-size", type=int, default=1) + parser.add_argument("--num-batches", type=int, default=0) + parser.add_argument("--data-generation", type=str, default="random") # or synthetic or dataset + parser.add_argument("--rand-data-dist", type=str, default="uniform") # uniform or gaussian + parser.add_argument("--rand-data-min", type=float, default=0) + parser.add_argument("--rand-data-max", type=float, default=1) + parser.add_argument("--rand-data-mu", type=float, default=-1) + parser.add_argument("--rand-data-sigma", type=float, default=1) + parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log") + parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + parser.add_argument("--data-randomize", type=str, default="total") # or day or none + parser.add_argument("--data-trace-enable-padding", type=bool, default=False) + parser.add_argument("--max-ind-range", type=int, default=-1) + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--num-indices-per-lookup", type=int, default=10) + parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False) + parser.add_argument("--num-workers", type=int, default=0) + parser.add_argument("--memory-map", action="store_true", default=False) + # training + parser.add_argument("--mini-batch-size", type=int, default=1) + parser.add_argument("--nepochs", type=int, default=1) + parser.add_argument("--learning-rate", type=float, default=0.01) + parser.add_argument("--print-precision", type=int, default=5) + parser.add_argument("--numpy-rand-seed", type=int, default=123) + parser.add_argument("--sync-dense-params", type=bool, default=True) + parser.add_argument("--caffe2-net-type", type=str, default="") + parser.add_argument("--optimizer", type=str, default="sgd", + help="""This is the optimizer for embedding tables.""") + parser.add_argument( + "--dataset-multiprocessing", + action="store_true", + default=False, + help="The Kaggle dataset can be multiprocessed in an environment \ + with more than 7 CPU cores and more than 20 GB of memory. \n \ + The Terabyte dataset can be multiprocessed in an environment \ + with more than 24 CPU cores and at least 1 TB of memory.", + ) + # inference + parser.add_argument("--inference-only", action="store_true", default=False) + # onnx (or protobuf with shapes) + parser.add_argument("--save-onnx", action="store_true", default=False) + parser.add_argument("--save-proto-types-shapes", action="store_true", default=False) + # gpu + parser.add_argument("--use-gpu", action="store_true", default=False) + # debugging and profiling + parser.add_argument("--print-freq", type=int, default=1) + parser.add_argument("--test-freq", type=int, default=-1) + parser.add_argument("--test-mini-batch-size", type=int, default=-1) + parser.add_argument("--test-num-workers", type=int, default=-1) + parser.add_argument("--print-time", action="store_true", default=False) + parser.add_argument("--debug-mode", action="store_true", default=False) + parser.add_argument("--enable-profiling", action="store_true", default=False) + parser.add_argument("--plot-compute-graph", action="store_true", default=False) + # mlperf logging (disables other output and stops early) + parser.add_argument("--mlperf-logging", action="store_true", default=False) + # stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107 + parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0) + # stop at target AUC Terabyte (no subsampling) 0.8025 + parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0) + args = parser.parse_args() + + if args.dataset_multiprocessing: + assert float(sys.version[:3]) > 3.7, "The dataset_multiprocessing " + \ + "flag is susceptible to a bug in Python 3.7 and under. " + \ + "https://github.com/facebookresearch/dlrm/issues/172" + + ### some basic setup ### + # WARNING: to obtain exactly the same initialization for + # the weights we need to start from the same random seed. + np.random.seed(args.numpy_rand_seed) + + np.set_printoptions(precision=args.print_precision) + if (args.test_mini_batch_size < 0): + # if the parameter is not set, use the training batch size + args.test_mini_batch_size = args.mini_batch_size + if (args.test_num_workers < 0): + # if the parameter is not set, use the same parameter for training + args.test_num_workers = args.num_workers + + use_gpu = args.use_gpu + if use_gpu: + device_opt = core.DeviceOption(workspace.GpuDeviceType, 0) + ngpus = workspace.NumGpuDevices() # 1 + print("Using {} GPU(s)...".format(ngpus)) + else: + device_opt = core.DeviceOption(caffe2_pb2.CPU) + print("Using CPU...") + + ### prepare training data ### + ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-") + if args.data_generation == "dataset": + if args.num_workers > 0 or args.test_num_workers > 0: + print("WARNING: non default --num-workers or --test-num-workers options" + + " are not supported and will be ignored") + if args.mini_batch_size != args.test_mini_batch_size: + print("WARNING: non default ----test-mini-batch-size option" + + " is not supported and will be ignored") + + # input and target from dataset + + train_data, train_ld, test_data, test_ld = \ + dp.make_criteo_data_and_loaders( + args, + offset_to_length_converter=True, + ) + + nbatches = args.num_batches if args.num_batches > 0 \ + else len(train_ld) + + nbatches_test = len(test_ld) + + ln_emb = train_data.counts + m_den = train_data.m_den + + # enforce maximum limit on number of vectors per embedding + if args.max_ind_range > 0: + ln_emb = np.array(list(map( + lambda x: x if x < args.max_ind_range else args.max_ind_range, + ln_emb + ))) + ln_bot[0] = m_den + + else: + if args.num_workers > 0 or args.test_num_workers > 0: + print("WARNING: non default --num-workers or --test-num-workers options" + + " are not supported and will be ignored") + if args.mini_batch_size != args.test_mini_batch_size: + print("WARNING: non default ----test-mini-batch-size option" + + " is not supported and will be ignored") + + # input and target at random + ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-") + m_den = ln_bot[0] + train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader(args, ln_emb, m_den, \ + offset_to_length_converter=True, + ) + nbatches = args.num_batches if args.num_batches > 0 else len(train_ld) + nbatches_test = len(test_ld) + # table_feature_map = {idx : idx for idx in range(len(ln_emb))} + + ### parse command line arguments ### + m_spa = args.arch_sparse_feature_size + ln_emb = np.asarray(ln_emb) + num_fea = ln_emb.size + 1 # num sparse + num dense features + m_den_out = ln_bot[ln_bot.size - 1] + if args.arch_interaction_op == "dot": + # approach 1: all + # num_int = num_fea * num_fea + m_den_out + # approach 2: unique + if args.arch_interaction_itself: + num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out + else: + num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out + elif args.arch_interaction_op == "cat": + num_int = num_fea * m_den_out + else: + sys.exit("ERROR: --arch-interaction-op=" + + args.arch_interaction_op + " is not supported") + arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top + ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-") + # sanity check: feature sizes and mlp dimensions must match + if m_den != ln_bot[0]: + sys.exit("ERROR: arch-dense-feature-size " + + str(m_den) + " does not match first dim of bottom mlp " + str(ln_bot[0])) + if m_spa != m_den_out: + sys.exit("ERROR: arch-sparse-feature-size " + + str(m_spa) + " does not match last dim of bottom mlp " + str(m_den_out)) + if num_int != ln_top[0]: + sys.exit("ERROR: # of feature interactions " + + str(num_int) + " does not match first dim of top mlp " + str(ln_top[0])) + + # test prints (model arch) + if args.debug_mode: + print("model arch:") + print("mlp top arch " + str(ln_top.size - 1) + + " layers, with input to output dimensions:") + print(ln_top) + + print("# of interactions") + print(num_int) + print("mlp bot arch " + str(ln_bot.size - 1) + + " layers, with input to output dimensions:") + print(ln_bot) + print("# of features (sparse and dense)") + print(num_fea) + print("dense feature size") + print(m_den) + print("sparse feature size") + print(m_spa) + print("# of embeddings (= # of sparse features) " + str(ln_emb.size) + + ", with dimensions " + str(m_spa) + "x:") + print(ln_emb) + + print("data (inputs and targets):") + for j, inputBatch in enumerate(train_ld): + lX_j, lS_l_j, lS_i_j, lT_j = inputBatch + print("mini-batch: %d" % j) + print(lX_j) + print(lS_l_j) + print(lS_i_j) + print(lT_j) + + ### construct the neural network specified above ### + # WARNING: to obtain exactly the same initialization for + # the weights we need to start from the same random seed. + # np.random.seed(args.numpy_rand_seed) + ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1 + flag_types_shapes = args.save_onnx or args.save_proto_types_shapes + flag_forward_ops = not (use_gpu and ndevices > 1) + with core.DeviceScope(device_opt): + dlrm = DLRM_Net( + m_spa, + ln_emb, + ln_bot, + ln_top, + args.arch_interaction_op, + arch_interaction_itself=args.arch_interaction_itself, + sigmoid_bot=-1, + sigmoid_top=ln_top.size - 1, + save_onnx=flag_types_shapes, + ndevices=ndevices, + # forward_ops = flag_forward_ops + enable_prof=args.enable_profiling, + weighted_pooling=args.weighted_pooling, + emb_optimizer=args.optimizer + ) + # load nccl if using multiple devices + if args.sync_dense_params and ndevices > 1: + dyndep.InitOpsLibrary("//caffe2/caffe2/contrib/nccl:nccl_ops") + # set the net type for better performance (dag, async_scheduling, etc) + if args.caffe2_net_type: + dlrm.parameters().net.Proto().type = args.caffe2_net_type + # plot compute graph + if args.plot_compute_graph: + graph = net_drawer.GetPydotGraph( + dlrm.parameters().net, + "dlrm_s_caffe2_graph", + "BT" + ) + graph.write_pdf(graph.get_name() + ".pdf") + # test prints + if args.debug_mode: + print("initial parameters (weights and bias):") + dlrm.print_weights() + + # add training loss if needed + if not args.inference_only: + with core.DeviceScope(device_opt): + # specify the loss function + nd = 1.0 if dlrm.ndevices <= 1 else 1.0 / dlrm.ndevices # 1 + if args.loss_function == "mse": + dlrm.MSEloss(scale=nd) + elif args.loss_function == "bce": + dlrm.BCEloss(scale=nd, threshold=args.loss_threshold) + else: + sys.exit("ERROR: --loss-function=" + args.loss_function + + " is not supported") + + # define test net (as train net without gradients) + dlrm.test_net = core.Net(copy.deepcopy(dlrm.model.net.Proto())) + + # specify the optimizer algorithm + if args.optimizer == "sgd": + dlrm.sgd_optimizer( + args.learning_rate, sync_dense_params=args.sync_dense_params + ) + elif args.optimizer in ["adagrad", "rwsadagrad"]: + dlrm.adagrad_optimizer( + args.learning_rate, sync_dense_params=args.sync_dense_params + ) + else: + sys.exit("""ERROR: Select an optimizer for + embedding tables : 'sgd', 'adagrad', + or 'rwsadagrad' """) + + # init/create + X, lS_l, lS_i, T = next(iter(train_ld)) # does not affect the enumerate(train_ld) in the main loop + dlrm.create(X, lS_l, lS_i, T.int()) + + ### main loop ### + best_gA_test = 0 + best_auc_test = 0 + total_time = 0 + total_loss = 0 + total_accu = 0 + total_iter = 0 + total_samp = 0 + k = 0 + + print("time/loss/accuracy (if enabled):") + while k < args.nepochs: + j = 0 + for j, inputBatch in enumerate(train_ld): + # forward and backward pass, where the latter runs only + # when gradients and loss have been added to the net + time1 = time.time() + lX_j, lS_l_j, lS_i_j, lT_j = inputBatch + lT_j = lT_j.int() if args.loss_function == "bce" else lT_j + dlrm.run(lX_j, lS_l_j, lS_i_j, lT_j) + + time2 = time.time() + total_time += time2 - time1 + + # compte loss and accuracy + Z = dlrm.get_output() # numpy array + T = lT_j.numpy() + ''' + # debug prints + print("output and loss") + print(Z) + print(dlrm.get_loss()) + ''' + mbs = T.shape[0] # = args.mini_batch_size except maybe for last + A = np.sum((np.round(Z, 0) == T).astype(np.uint8)) + total_accu += 0 if args.inference_only else A + total_loss += 0 if args.inference_only else dlrm.get_loss() * mbs + total_iter += 1 + total_samp += mbs + + # print time, loss and accuracy + should_print = ((j + 1) % args.print_freq == 0) or (j + 1 == nbatches) + should_test = ( + (args.test_freq > 0) + and (args.data_generation in ["dataset", "random"]) + and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches)) + ) + if should_print or should_test: + gT = 1000. * total_time / total_iter if args.print_time else -1 + total_time = 0 + + gA = total_accu / total_samp + total_accu = 0 + + gL = total_loss / total_samp + total_loss = 0 + + str_run_type = "inference" if args.inference_only else "training" + print( + "Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format( + str_run_type, j + 1, nbatches, k, gT + ) + + " loss {:.6f}".format(gL) + ) + total_iter = 0 + total_samp = 0 + # debug prints + # print(Z) + # print(T) + + # testing + if should_test and not args.inference_only: + # don't measure training iter time in a test iteration + if args.mlperf_logging: + previous_iteration_time = None + + test_accu = 0 + test_loss = 0 + test_samp = 0 + + if args.mlperf_logging: + scores = [] + targets = [] + + for i, testBatch in enumerate(test_ld): + # early exit if nbatches was set by the user and was exceeded + if nbatches > 0 and i >= nbatches: + break + + # forward pass + + lX_test_i, lS_l_test_i, lS_i_test_i, lT_test_i = testBatch + lT_test_i = lT_test_i.int() if args.loss_function == "bce" else lT_test_i + dlrm.run(lX_test_i, lS_l_test_i, lS_i_test_i, lT_test_i, test_net=True) + + Z_test = dlrm.get_output() + T_test = lT_test_i.numpy() + + if args.mlperf_logging: + scores.append(Z_test) + targets.append(T_test) + else: + # compte loss and accuracy + L_test = dlrm.get_loss() + mbs_test = T_test.shape[0] # = mini_batch_size except last + A_test = np.sum((np.round(Z_test, 0) == T_test).astype(np.uint8)) + test_accu += A_test + test_loss += L_test * mbs_test + test_samp += mbs_test + + # compute metrics (after test loop has finished) + if args.mlperf_logging: + validation_results = calculate_metrics(targets, scores) + gA_test = validation_results['accuracy'] + gL_test = validation_results['loss'] + else: + gA_test = test_accu / test_samp + gL_test = test_loss / test_samp + + # print metrics + is_best = gA_test > best_gA_test + if is_best: + best_gA_test = gA_test + + if args.mlperf_logging: + is_best = validation_results['roc_auc'] > best_auc_test + if is_best: + best_auc_test = validation_results['roc_auc'] + + print( + "Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k) + + " loss {:.6f}, recall {:.4f}, precision {:.4f},".format( + validation_results['loss'], + validation_results['recall'], + validation_results['precision'] + ) + + " f1 {:.4f}, ap {:.4f},".format( + validation_results['f1'], + validation_results['ap'], + ) + + " auc {:.4f}, best auc {:.4f},".format( + validation_results['roc_auc'], + best_auc_test + ) + + " accuracy {:3.3f} %, best accuracy {:3.3f} %".format( + validation_results['accuracy'] * 100, + best_gA_test * 100 + ) + ) + else: + print( + "Testing at - {}/{} of epoch {},".format(j + 1, nbatches, 0) + + " loss {:.6f}, accuracy {:3.3f} %, best {:3.3f} %".format( + gL_test, gA_test * 100, best_gA_test * 100 + ) + ) + + # check thresholds + if (args.mlperf_logging + and (args.mlperf_acc_threshold > 0) + and (best_gA_test > args.mlperf_acc_threshold)): + print("MLPerf testing accuracy threshold " + + str(args.mlperf_acc_threshold) + + " reached, stop training") + break + + if (args.mlperf_logging + and (args.mlperf_auc_threshold > 0) + and (best_auc_test > args.mlperf_auc_threshold)): + print("MLPerf testing auc threshold " + + str(args.mlperf_auc_threshold) + + " reached, stop training") + break + + j += 1 # nbatches + k += 1 # nepochs + + # test prints + if not args.inference_only and args.debug_mode: + print("updated parameters (weights and bias):") + dlrm.print_weights() + + # build onnx model from caffe2 + if args.save_onnx: + pnet = dlrm.parameters().net.Proto() + inet = dlrm.parameters().param_init_net.Proto() + value_info = dlrm.onnx_tsd # None + # debug prints + # print(value_info) + + # WARNING: Why Caffe2 to ONNX net transformation currently does not work? + # 1. ONNX does not support SparseLengthsSum operator directly. A workaround + # could be for the Caffe2 ONNX frontend to indirectly map this operator to + # Gather and ReducedSum ONNX operators, following the PyTorch approach. + c2f = caffe2.python.onnx.frontend.Caffe2Frontend() + dlrm_caffe2_onnx = c2f.caffe2_net_to_onnx_model(pnet, inet, value_info) + # check the onnx model + onnx.checker.check_model(dlrm_caffe2_onnx) + + # save model to a file + with open("dlrm_s_caffe2.onnx", "w+") as dlrm_caffe2_onnx_file: + dlrm_caffe2_onnx_file.write(str(dlrm_caffe2_onnx)) + + # build protobuf with types and shapes + if args.save_proto_types_shapes: + # add types and shapes to protobuf + __TYPE_MAPPING = { + onnx.TensorProto.FLOAT: caffe2_pb2.TensorProto.FLOAT, + onnx.TensorProto.UINT8: caffe2_pb2.TensorProto.UINT8, + onnx.TensorProto.INT8: caffe2_pb2.TensorProto.INT8, + onnx.TensorProto.UINT16: caffe2_pb2.TensorProto.UINT16, + onnx.TensorProto.INT16: caffe2_pb2.TensorProto.INT16, + onnx.TensorProto.INT32: caffe2_pb2.TensorProto.INT32, + onnx.TensorProto.INT64: caffe2_pb2.TensorProto.INT64, + onnx.TensorProto.STRING: caffe2_pb2.TensorProto.STRING, + onnx.TensorProto.BOOL: caffe2_pb2.TensorProto.BOOL, + onnx.TensorProto.FLOAT16: caffe2_pb2.TensorProto.FLOAT16, + onnx.TensorProto.DOUBLE: caffe2_pb2.TensorProto.DOUBLE, + } + + pnet = dlrm.parameters().net.Proto() + arg = pnet.arg.add() + arg.name = "input_shape_info" + for i in pnet.external_input: + if i in dlrm.onnx_tsd: + onnx_dtype, shape = dlrm.onnx_tsd[i] + t = arg.tensors.add() + t.name = i + t.data_type = __TYPE_MAPPING[onnx_dtype] + t.dims.extend(shape) + else: + print("Warning: we don't have shape/type info for input: {}".format(i)) + # debug print + # print(pnet) + + # export the protobuf with types and shapes + with open("dlrm_s_caffe2.proto", "w+") as dlrm_s_proto_file: + dlrm_s_proto_file.write(str(pnet)) + + """ + # export the protobuf with types and shapes as well as weights + # see https://github.com/pytorch/pytorch/issues/9533 + #save + net = dlrm.parameters().net + params = dlrm.parameters().params + init_net, predict_net = mobile_exporter.Export(workspace, net, params) + with open("dlrm_s_caffe2.predict", "wb") as dlrm_s_predict_file: + dlrm_s_predict_file.write(predict_net.SerializeToString()) + with open("dlrm_s_caffe2.init", "wb") as dlrm_s_init_file: + dlrm_s_init_file.write(init_net.SerializeToString()) + #load + net_def = caffe2_pb2.NetDef() + init_def= caffe2_pb2.NetDef() + with open("dlrm_s_caffe2.predict", "rb") as dlrm_s_predict_file: + net_def.ParseFromString(dlrm_s_predict_file.read()) + print(net_def) + with open("dlrm_s_caffe2.init", "rb") as dlrm_s_init_file: + init_def.ParseFromString(dlrm_s_init_file.read()) + print(init_def) + """ diff --git a/benchmarks/dlrm/ootb/dlrm_s_pytorch.py b/benchmarks/dlrm/ootb/dlrm_s_pytorch.py new file mode 100644 index 0000000..1774eb4 --- /dev/null +++ b/benchmarks/dlrm/ootb/dlrm_s_pytorch.py @@ -0,0 +1,2511 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: an implementation of a deep learning recommendation model (DLRM) +# The model input consists of dense and sparse features. The former is a vector +# of floating point values. The latter is a list of sparse indices into +# embedding tables, which consist of vectors of floating point values. +# The selected vectors are passed to mlp networks denoted by triangles, +# in some cases the vectors are interacted through operators (Ops). +# +# output: +# vector of values +# model: | +# /\ +# /__\ +# | +# _____________________> Op <___________________ +# / | \ +# /\ /\ /\ +# /__\ /__\ ... /__\ +# | | | +# | Op Op +# | ____/__\_____ ____/__\____ +# | |_Emb_|____|__| ... |_Emb_|__|___| +# input: +# [ dense features ] [sparse indices] , ..., [sparse indices] +# +# More precise definition of model layers: +# 1) fully connected layers of an mlp +# z = f(y) +# y = Wx + b +# +# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk]) +# z = Op(e1,...,ek) +# obtain vectors e1=E[:,p1], ..., ek=E[:,pk] +# +# 3) Operator Op can be one of the following +# Sum(e1,...,ek) = e1 + ... + ek +# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek] +# Cat(e1,...,ek) = [e1', ..., ek']' +# where ' denotes transpose operation +# +# References: +# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang, +# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu, +# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii, +# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko, +# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong, +# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and +# Recommendation Systems", CoRR, arXiv:1906.00091, 2019 + +# TERMS: +# +# qr_ quotient-remainder trick +# md_ mixed-dimension trick +# lS_i Indices used as inputs to embedding bag operators. Indices determine +# which embeddings to select. +# lS_o Offsets used as inputs to embedding bag operators. Offsets determine how +# the selected embeddings are grouped together for the 'mode' operation. +# (Mode operation examples: sum, mean, max) + +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse + +# miscellaneous +import builtins +import datetime +import json +import sys +import time +import itertools +import traceback + +# onnx +# The onnx import causes deprecation warnings every time workers +# are spawned during testing. So, we filter out those warnings. +import warnings + +# data generation +import dlrm_data_pytorch as dp + +# For distributed run +import extend_distributed as ext_dist +import mlperf_logger + +# numpy +import numpy as np +import optim.rwsadagrad as RowWiseSparseAdagrad +import sklearn.metrics + +# pytorch +import torch +import torch.nn as nn +from torch._ops import ops +from torch.autograd.profiler import record_function +from torch.nn.parallel.parallel_apply import parallel_apply +from torch.nn.parallel.replicate import replicate +from torch.nn.parallel.scatter_gather import gather, scatter +from torch.nn.parameter import Parameter +from torch.optim.lr_scheduler import _LRScheduler +from torch.utils.tensorboard import SummaryWriter + +try: + import fbgemm_gpu + from fbgemm_gpu import split_table_batched_embeddings_ops + from fbgemm_gpu.split_table_batched_embeddings_ops import ( + CacheAlgorithm, + PoolingMode, + OptimType, + SparseType, + SplitTableBatchedEmbeddingBagsCodegen, + IntNBitTableBatchedEmbeddingBagsCodegen, + ) +except (ImportError, OSError): + fbgemm_gpu_import_error_msg = traceback.format_exc() + fbgemm_gpu = None + +try: + import apex +except (ImportError, OSError): + apex_import_error_msg = traceback.format_exc() + apex = None + +try: + import torch2trt + from torch2trt import torch2trt +except (ImportError, OSError): + torch2trt_import_error_msg = traceback.format_exc() + torch2trt = None + +# mixed-dimension trick +from tricks.md_embedding_bag import PrEmbeddingBag, md_solver + +# FB5 Logger +import pathlib +from os import fspath +p = pathlib.Path(__file__).parent.resolve() / "../../../fb5logging" +sys.path.append(fspath(p)) +from fb5logger import FB5Logger +import loggerconstants + +# quotient-remainder trick +from tricks.qr_embedding_bag import QREmbeddingBag + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + try: + import onnx + except ImportError as error: + print("Unable to import onnx. ", error) + +# from torchviz import make_dot +# import torch.nn.functional as Functional +# from torch.nn.parameter import Parameter + +exc = getattr(builtins, "IOError", "FileNotFoundError") + + +def time_wrap(use_gpu): + if use_gpu: + torch.cuda.synchronize() + return time.time() + + +def dlrm_wrap(X, lS_o, lS_i, use_gpu, device, ndevices=1): + with record_function("DLRM forward"): + if use_gpu: # .cuda() + # lS_i can be either a list of tensors or a stacked tensor. + # Handle each case below: + if ndevices == 1: + lS_i = ( + [S_i.to(device) for S_i in lS_i] + if isinstance(lS_i, list) + else lS_i.to(device) + ) + lS_o = ( + [S_o.to(device) for S_o in lS_o] + if isinstance(lS_o, list) + else lS_o.to(device) + ) + return dlrm(X.to(device), lS_o, lS_i) + + +def loss_fn_wrap(Z, T, use_gpu, device): + with record_function("DLRM loss compute"): + if args.loss_function == "mse" or args.loss_function == "bce": + return dlrm.loss_fn(Z, T.to(device)) + elif args.loss_function == "wbce": + loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device) + loss_fn_ = dlrm.loss_fn(Z, T.to(device)) + loss_sc_ = loss_ws_ * loss_fn_ + return loss_sc_.mean() + + +# The following function is a wrapper to avoid checking this multiple times in th +# loop below. +def unpack_batch(b): + # Experiment with unweighted samples + return b[0], b[1], b[2], b[3], torch.ones(b[3].size()), None + + +class LRPolicyScheduler(_LRScheduler): + def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps): + self.num_warmup_steps = num_warmup_steps + self.decay_start_step = decay_start_step + self.decay_end_step = decay_start_step + num_decay_steps + self.num_decay_steps = num_decay_steps + + if self.decay_start_step < self.num_warmup_steps: + sys.exit("Learning rate warmup must finish before the decay starts") + + super(LRPolicyScheduler, self).__init__(optimizer) + + def get_lr(self): + step_count = self._step_count + if step_count < self.num_warmup_steps: + # warmup + scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps + lr = [base_lr * scale for base_lr in self.base_lrs] + self.last_lr = lr + elif self.decay_start_step <= step_count and step_count < self.decay_end_step: + # decay + decayed_steps = step_count - self.decay_start_step + scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2 + min_lr = 0.0000001 + lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs] + self.last_lr = lr + else: + if self.num_decay_steps > 0: + # freeze at last, either because we're after decay + # or because we're between warmup and decay + lr = self.last_lr + else: + # do not adjust + lr = self.base_lrs + return lr + + +# quantize_fbgemm_gpu_embedding_bag is partially lifted from +# fbgemm_gpu/test/split_embedding_inference_converter.py, def _quantize_split_embs. +# Converts SplitTableBatchedEmbeddingBagsCodegen to IntNBitTableBatchedEmbeddingBagsCodegen +def quantize_fbgemm_gpu_embedding_bag(model, quantize_type, device): + embedding_specs = [] + if device.type == "cpu": + emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST + else: + emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE + + for (E, D, _, _) in model.embedding_specs: + weights_ty = quantize_type + if D % weights_ty.align_size() != 0: + assert D % 4 == 0 + weights_ty = ( + SparseType.FP16 + ) # fall back to FP16 if dimension couldn't be aligned with the required size + embedding_specs.append(("", E, D, weights_ty, emb_location)) + + q_model = ( + split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen( + embedding_specs=embedding_specs, + pooling_mode=model.pooling_mode, + device=device, + ) + ) + q_model.initialize_weights() + for t, (_, _, _, weight_ty, _) in enumerate(embedding_specs): + if weight_ty == SparseType.FP16: + original_weight = model.split_embedding_weights()[t] + q_weight = original_weight.half() + weights = torch.tensor(q_weight.cpu().numpy().view(np.uint8)) + q_model.split_embedding_weights()[t][0].data.copy_(weights) + + elif weight_ty == SparseType.INT8: + original_weight = model.split_embedding_weights()[t] + q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized( + original_weight + ) + weights = q_weight[:, :-8] + scale_shift = torch.tensor( + q_weight[:, -8:] + .contiguous() + .cpu() + .numpy() + .view(np.float32) + .astype(np.float16) + .view(np.uint8) + ) + q_model.split_embedding_weights()[t][0].data.copy_(weights) + q_model.split_embedding_weights()[t][1].data.copy_(scale_shift) + + elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2: + original_weight = model.split_embedding_weights()[t] + q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf( + original_weight, + bit_rate=quantize_type.bit_rate(), + ) + weights = q_weight[:, :-4] + scale_shift = torch.tensor( + q_weight[:, -4:].contiguous().cpu().numpy().view(np.uint8) + ) + q_model.split_embedding_weights()[t][0].data.copy_(weights) + q_model.split_embedding_weights()[t][1].data.copy_(scale_shift) + return q_model + + +def create_fbgemm_gpu_emb_bag( + device, + emb_l, + m_spa, + quantize_bits, + learning_rate, + codegen_preference=None, + requires_grad=True, +): + if isinstance(emb_l[0], PrEmbeddingBag): + emb_l = [e.embs for e in emb_l] + if isinstance(emb_l[0], nn.EmbeddingBag): + emb_l = [e.weight for e in emb_l] + Es = [e.shape[0] for e in emb_l] + + if isinstance(m_spa, list): + Ds = m_spa + else: + Ds = [m_spa for _ in emb_l] + + if device.type == "cpu": + emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST + compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU + else: + emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE + compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA + pooling_mode = PoolingMode.SUM + cache_algorithm = CacheAlgorithm.LRU + + sparse_type_dict = { + 4: SparseType.INT4, + 8: SparseType.INT8, + 16: SparseType.FP16, + 32: SparseType.FP32, + } + codegen_type_dict = { + 4: "IntN", + 8: "Split" if codegen_preference != "IntN" else "IntN", + 16: "Split" if codegen_preference != "IntN" else "IntN", + 32: "Split", + } + + codegen_type = codegen_type_dict[quantize_bits] + quantize_type = sparse_type_dict[quantize_bits] + if codegen_type == "IntN": + # Create non-quantized model and then call quantize_fbgemm_gpu_embedding_bag + fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen( + embedding_specs=[ + ( + E, # num of rows in the table + D, # num of columns in the table + split_table_batched_embeddings_ops.EmbeddingLocation.HOST, + split_table_batched_embeddings_ops.ComputeDevice.CPU, + ) + for (E, D) in zip(Es, Ds) + ], + weights_precision=SparseType.FP32, + optimizer=OptimType.EXACT_SGD, + learning_rate=learning_rate, + cache_algorithm=cache_algorithm, + pooling_mode=pooling_mode, + ).to(device) + if quantize_type == quantize_type.FP16: + weights = fbgemm_gpu_emb_bag.split_embedding_weights() + for i, emb in enumerate(weights): + emb.data.copy_(emb_l[i]) + + elif quantize_type == quantize_type.INT8: + # copy quantized values upsampled/recasted to FP32 + for i in range(len(Es)): + fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_( + torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(emb_l[i]) + ) + elif quantize_type == quantize_type.INT4: + # copy quantized values upsampled/recasted to FP32 + for i in range(len(Es)): + fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_( + torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat( + emb_l[i], + bit_rate=quantize_type.bit_rate(), + ) + ) + fbgemm_gpu_emb_bag = quantize_fbgemm_gpu_embedding_bag( + fbgemm_gpu_emb_bag, quantize_type, device + ) + else: + fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen( + embedding_specs=[ + ( + E, # num of rows in the table + D, # num of columns in the table + emb_location, + compute_device, + ) + for (E, D) in zip(Es, Ds) + ], + weights_precision=quantize_type, + optimizer=OptimType.EXACT_SGD, + learning_rate=learning_rate, + cache_algorithm=cache_algorithm, + pooling_mode=pooling_mode, + ).to(device) + + weights = fbgemm_gpu_emb_bag.split_embedding_weights() + for i, emb in enumerate(weights): + emb.data.copy_(emb_l[i]) + + if not requires_grad: + torch.no_grad() + torch.set_grad_enabled(False) + + return fbgemm_gpu_emb_bag + + +# The purpose of this wrapper is to encapsulate the format conversions to/from fbgemm_gpu +# so parallel_apply() executes the format-in -> fbgemm_gpu op -> format-out instructions +# for each respective GPU in parallel. +class fbgemm_gpu_emb_bag_wrapper(nn.Module): + def __init__( + self, + device, + emb_l, + m_spa, + quantize_bits, + learning_rate, + codegen_preference, + requires_grad, + ): + super(fbgemm_gpu_emb_bag_wrapper, self).__init__() + self.fbgemm_gpu_emb_bag = create_fbgemm_gpu_emb_bag( + device, + emb_l, + m_spa, + quantize_bits, + learning_rate, + codegen_preference, + requires_grad, + ) + self.device = device + self.m_spa = m_spa + # create cumsum array for mixed dimension support + if isinstance(m_spa, list): + self.m_spa_cumsum = np.cumsum([0] + m_spa) + if not requires_grad: + torch.no_grad() + torch.set_grad_enabled(False) + + def forward(self, lS_o, lS_i, v_W_l=None): + + # convert offsets to fbgemm format + lengths_list = list(map(len, lS_i)) + indices_lengths_cumsum = np.cumsum([0] + lengths_list) + if isinstance(lS_o, list): + lS_o = torch.stack(lS_o) + lS_o = lS_o.to(self.device) + lS_o += torch.from_numpy(indices_lengths_cumsum[:-1, np.newaxis]).to( + self.device + ) + numel = torch.tensor([indices_lengths_cumsum[-1]], dtype=torch.long).to( + self.device + ) + lS_o = torch.cat((lS_o.flatten(), numel)) + + # create per_sample_weights + if v_W_l: + per_sample_weights = torch.cat( + [a.gather(0, b) for a, b in zip(v_W_l, lS_i)] + ) + else: + per_sample_weights = None + + # convert indices to fbgemm_gpu format + if isinstance(lS_i, torch.Tensor): + lS_i = [lS_i] + lS_i = torch.cat(lS_i, dim=0).to(self.device) + + if isinstance(self.fbgemm_gpu_emb_bag, IntNBitTableBatchedEmbeddingBagsCodegen): + lS_o = lS_o.int() + lS_i = lS_i.int() + + # gpu embedding bag op + ly = self.fbgemm_gpu_emb_bag(lS_i, lS_o, per_sample_weights) + + # convert the results to the next layer's input format. + if isinstance(self.m_spa, list): + # handle mixed dimensions case. + ly = [ + ly[:, s:e] + for (s, e) in zip(self.m_spa_cumsum[:-1], self.m_spa_cumsum[1:]) + ] + else: + # handle case in which all tables share the same column dimension. + cols = self.m_spa + ntables = len(self.fbgemm_gpu_emb_bag.embedding_specs) + ly = ly.reshape(-1, ntables, cols).swapaxes(0, 1) + ly = list(ly) + return ly + + +### define dlrm in PyTorch ### +class DLRM_Net(nn.Module): + def create_mlp(self, ln, sigmoid_layer): + # build MLP layer by layer + layers = nn.ModuleList() + layers.training = self.requires_grad + for i in range(0, ln.size - 1): + n = ln[i] + m = ln[i + 1] + + # construct fully connected operator + LL = nn.Linear(int(n), int(m), bias=True) + + # initialize the weights + # with torch.no_grad(): + # custom Xavier input, output or two-sided fill + mean = 0.0 # std_dev = np.sqrt(variance) + std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n) + W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32) + std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1)) + bt = np.random.normal(mean, std_dev, size=m).astype(np.float32) + # approach 1 + LL.weight.data = torch.tensor(W) + LL.weight.requires_grad = self.requires_grad + LL.bias.data = torch.tensor(bt) + LL.bias.requires_grad = self.requires_grad + # approach 2 + # LL.weight.data.copy_(torch.tensor(W)) + # LL.bias.data.copy_(torch.tensor(bt)) + # approach 3 + # LL.weight = Parameter(torch.tensor(W),requires_grad=True) + # LL.bias = Parameter(torch.tensor(bt),requires_grad=True) + layers.append(LL) + + # construct sigmoid or relu operator + if i == sigmoid_layer: + layers.append(nn.Sigmoid()) + else: + layers.append(nn.ReLU()) + + # approach 1: use ModuleList + # return layers + # approach 2: use Sequential container to wrap all layers + return torch.nn.Sequential(*layers) + + def create_emb(self, m, ln, weighted_pooling=None): + # create_emb parameter description + # + # ln parameter: + # ln is a list of all the tables' row counts. E.g. [10,5,16] would mean + # table 0 has 10 rows, table 1 has 5 rows, and table 2 has 16 rows. + # + # m parameter (when m is a single value): + # m is the length of all embedding vectors. All embedding vectors in all + # embedding tables are created to be the same length. E.g. if ln were [3,2,5] + # and m were 4, table 0 would be dimension 3 x 4, table 1 would be 2 x 4, + # and table 2 would be 5 x 4. + # + # m parameter (when m is a list): + # m is a list of all the tables' column counts. E.g. if m were [4,5,6] and + # ln were [3,2,5], table 0 would be dimension 3 x 4, table 1 would be 2 x 5, + # and table 2 would be 5 x 6. + # + # Key to remember: + # embedding table i has shape: ln[i] rows, m columns, when m is a single value. + # embedding table i has shape: ln[i] rows, m[i] columns, when m is a list. + + emb_l = nn.ModuleList() + v_W_l = [] + for i in range(0, ln.size): + if ext_dist.my_size > 1: + if i not in self.local_emb_indices: + continue + n = ln[i] + + # construct embedding operator + if self.qr_flag and n > self.qr_threshold: + EE = QREmbeddingBag( + n, + m, + self.qr_collisions, + operation=self.qr_operation, + mode="sum", + sparse=True, + ) + elif self.md_flag and n > self.md_threshold: + base = max(m) + _m = m[i] if n > self.md_threshold else base + EE = PrEmbeddingBag(n, _m, base) + # use np initialization as below for consistency... + W = np.random.uniform( + low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m) + ).astype(np.float32) + EE.embs.weight.data = torch.tensor(W, requires_grad=self.requires_grad) + else: + EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True) + # initialize embeddings + # nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n)) + W = np.random.uniform( + low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m) + ).astype(np.float32) + # approach 1 + EE.weight.data = torch.tensor(W, requires_grad=self.requires_grad) + # approach 2 + # EE.weight.data.copy_(torch.tensor(W)) + # approach 3 + # EE.weight = Parameter(torch.tensor(W),requires_grad=True) + if weighted_pooling is None: + v_W_l.append(None) + else: + v_W_l.append(torch.ones(n, dtype=torch.float32)) + emb_l.append(EE) + return emb_l, v_W_l + + def __init__( + self, + m_spa=None, + ln_emb=None, + ln_bot=None, + ln_top=None, + arch_interaction_op=None, + arch_interaction_itself=False, + sigmoid_bot=-1, + sigmoid_top=-1, + sync_dense_params=True, + loss_threshold=0.0, + ndevices=-1, + qr_flag=False, + qr_operation="mult", + qr_collisions=0, + qr_threshold=200, + md_flag=False, + md_threshold=200, + weighted_pooling=None, + loss_function="bce", + learning_rate=0.1, + use_gpu=False, + use_fbgemm_gpu=False, + fbgemm_gpu_codegen_pref="Split", + inference_only=False, + quantize_mlp_with_bit=False, + quantize_emb_with_bit=False, + ): + super(DLRM_Net, self).__init__() + + if ( + (m_spa is not None) + and (ln_emb is not None) + and (ln_bot is not None) + and (ln_top is not None) + and (arch_interaction_op is not None) + ): + # save arguments + self.ntables = len(ln_emb) + self.m_spa = m_spa + self.use_gpu = use_gpu + self.use_fbgemm_gpu = use_fbgemm_gpu + self.fbgemm_gpu_codegen_pref = fbgemm_gpu_codegen_pref + self.requires_grad = not inference_only + self.ndevices_available = ndevices + self.ndevices_in_use = ndevices + self.output_d = 0 + self.add_new_weights_to_params = False + self.arch_interaction_op = arch_interaction_op + self.arch_interaction_itself = arch_interaction_itself + self.sync_dense_params = sync_dense_params and not inference_only + self.loss_threshold = loss_threshold + self.loss_function = loss_function + self.learning_rate = learning_rate + if weighted_pooling is not None and weighted_pooling != "fixed": + self.weighted_pooling = "learned" + else: + self.weighted_pooling = weighted_pooling + # create variables for QR embedding if applicable + self.qr_flag = qr_flag + if self.qr_flag: + self.qr_collisions = qr_collisions + self.qr_operation = qr_operation + self.qr_threshold = qr_threshold + # create variables for MD embedding if applicable + self.md_flag = md_flag + if self.md_flag: + self.md_threshold = md_threshold + + # If running distributed, get local slice of embedding tables + if ext_dist.my_size > 1: + n_emb = len(ln_emb) + if n_emb < ext_dist.my_size: + sys.exit( + "only (%d) sparse features for (%d) devices, table partitions will fail" + % (n_emb, ext_dist.my_size) + ) + self.n_global_emb = n_emb + self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths( + n_emb + ) + self.local_emb_slice = ext_dist.get_my_slice(n_emb) + self.local_emb_indices = list(range(n_emb))[self.local_emb_slice] + + # create operators + self.emb_l, self.v_W_l = self.create_emb(m_spa, ln_emb, weighted_pooling) + if self.weighted_pooling == "learned": + self.v_W_l = nn.ParameterList(list(map(Parameter, self.v_W_l))) + + self.bot_l = self.create_mlp(ln_bot, sigmoid_bot) + self.top_l = self.create_mlp(ln_top, sigmoid_top) + + # quantization + self.quantize_emb = False + self.emb_l_q = [] + self.quantize_bits = 32 + + # fbgemm_gpu + self.fbgemm_emb_l = [] + self.v_W_l_l = [self.v_W_l] if self.weighted_pooling else [None] + + self.interact_features_l = [] + + # specify the loss function + if self.loss_function == "mse": + self.loss_fn = torch.nn.MSELoss(reduction="mean") + elif self.loss_function == "bce": + self.loss_fn = torch.nn.BCELoss(reduction="mean") + elif self.loss_function == "wbce": + self.loss_ws = torch.tensor( + np.fromstring(args.loss_weights, dtype=float, sep="-") + ) + self.loss_fn = torch.nn.BCELoss(reduction="none") + else: + sys.exit( + "ERROR: --loss-function=" + self.loss_function + " is not supported" + ) + + def prepare_parallel_model(self, ndevices): + device_ids = range(ndevices) + # replicate mlp (data parallelism) + self.bot_l_replicas = replicate(self.bot_l, device_ids) + self.top_l_replicas = replicate(self.top_l, device_ids) + + # distribute embeddings (model parallelism) + if self.weighted_pooling is not None: + for k, w in enumerate(self.v_W_l): + self.v_W_l[k] = Parameter( + w.to(torch.device("cuda:" + str(k % ndevices))) + ) + if not self.use_fbgemm_gpu: + for k, w in enumerate(self.emb_l): + self.emb_l[k] = w.to(torch.device("cuda:" + str(k % ndevices))) + else: + self.fbgemm_emb_l, self.v_W_l_l = zip( + *[ + ( + fbgemm_gpu_emb_bag_wrapper( + torch.device("cuda:" + str(k)), + self.emb_l[k::ndevices] + if self.emb_l + else self.emb_l_q[k::ndevices], + self.m_spa[k::ndevices] + if isinstance(self.m_spa, list) + else self.m_spa, + self.quantize_bits, + self.learning_rate, + self.fbgemm_gpu_codegen_pref, + self.requires_grad, + ), + self.v_W_l[k::ndevices] if self.weighted_pooling else None, + ) + for k in range(ndevices) + ] + ) + self.add_new_weights_to_params = True + self.interact_features_l = [self.nn_module_wrapper() for _ in range(ndevices)] + + # nn_module_wrapper is used to call functions concurrently across multi-gpus, using parallel_apply, + # which requires an nn.Module subclass. + class nn_module_wrapper(nn.Module): + def __init__(self): + super(DLRM_Net.nn_module_wrapper, self).__init__() + def forward(self, E, x, ly): + return E(x, ly) + + def apply_mlp(self, x, layers): + # approach 1: use ModuleList + # for layer in layers: + # x = layer(x) + # return x + # approach 2: use Sequential container to wrap all layers + return layers(x) + + def apply_emb(self, lS_o, lS_i): + # WARNING: notice that we are processing the batch at once. We implicitly + # assume that the data is laid out such that: + # 1. each embedding is indexed with a group of sparse indices, + # corresponding to a single lookup + # 2. for each embedding the lookups are further organized into a batch + # 3. for a list of embedding tables there is a list of batched lookups + + if self.use_fbgemm_gpu: + # Deinterleave and reshape to 2d, so items are grouped by device + # per row. Then parallel apply. + ndevices = len(self.fbgemm_emb_l) + lS_o_l = [lS_o[k::ndevices] for k in range(ndevices)] + lS_i_l = [lS_i[k::ndevices] for k in range(ndevices)] + ly = parallel_apply( + self.fbgemm_emb_l, list(zip(lS_o_l, lS_i_l, self.v_W_l_l)) + ) + # Interleave and flatten to match non-fbgemm_gpu ly format. + ly = [ly[i % ndevices][i // ndevices] for i in range(self.ntables)] + else: + ly = [] + for k, sparse_index_group_batch in enumerate(lS_i): + sparse_offset_group_batch = lS_o[k] + + # embedding lookup + # We are using EmbeddingBag, which implicitly uses sum operator. + # The embeddings are represented as tall matrices, with sum + # happening vertically across 0 axis, resulting in a row vector + # E = emb_l[k] + + if self.v_W_l[k] is not None: + per_sample_weights = self.v_W_l[k].gather( + 0, sparse_index_group_batch + ) + else: + per_sample_weights = None + + if self.quantize_emb: + if self.quantize_bits == 4: + E = ops.quantized.embedding_bag_4bit_rowwise_offsets + elif self.quantize_bits == 8: + E = ops.quantized.embedding_bag_byte_rowwise_offsets + QV = E( + self.emb_l_q[k], + sparse_index_group_batch, + sparse_offset_group_batch, + per_sample_weights=per_sample_weights, + ) + + ly.append(QV) + else: + E = self.emb_l[k] + V = E( + sparse_index_group_batch, + sparse_offset_group_batch, + per_sample_weights=per_sample_weights, + ) + + ly.append(V) + + # print(ly) + return ly + + # using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu + def quantize_embedding(self, bits): + + n = len(self.emb_l) + self.emb_l_q = [None] * n + for k in range(n): + if bits == 4: + self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack( + self.emb_l[k].weight + ) + elif bits == 8: + self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack( + self.emb_l[k].weight + ) + elif bits == 16: + self.emb_l_q[k] = self.emb_l[k].half().weight + else: + return + self.emb_l = None + self.quantize_emb = True + self.quantize_bits = bits + + def interact_features(self, x, ly): + + if self.arch_interaction_op == "dot": + # concatenate dense and sparse features + (batch_size, d) = x.shape + T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d)) + # perform a dot product + Z = torch.bmm(T, torch.transpose(T, 1, 2)) + # append dense feature with the interactions (into a row vector) + # approach 1: all + # Zflat = Z.view((batch_size, -1)) + # approach 2: unique + _, ni, nj = Z.shape + # approach 1: tril_indices + # offset = 0 if self.arch_interaction_itself else -1 + # li, lj = torch.tril_indices(ni, nj, offset=offset) + # approach 2: custom + offset = 1 if self.arch_interaction_itself else 0 + li = torch.tensor([i for i in range(ni) for j in range(i + offset)]) + lj = torch.tensor([j for i in range(nj) for j in range(i + offset)]) + Zflat = Z[:, li, lj] + # concatenate dense features and interactions + R = torch.cat([x] + [Zflat], dim=1) + elif self.arch_interaction_op == "cat": + # concatenation features (into a row vector) + R = torch.cat([x] + ly, dim=1) + else: + sys.exit( + "ERROR: --arch-interaction-op=" + + self.arch_interaction_op + + " is not supported" + ) + + return R + + def forward(self, dense_x, lS_o, lS_i): + if ext_dist.my_size > 1: + # multi-node multi-device run + return self.distributed_forward(dense_x, lS_o, lS_i) + elif self.ndevices_available <= 1: + # single device run + return self.sequential_forward(dense_x, lS_o, lS_i) + else: + # single-node multi-device run + return self.parallel_forward(dense_x, lS_o, lS_i) + + def distributed_forward(self, dense_x, lS_o, lS_i): + batch_size = dense_x.size()[0] + # WARNING: # of ranks must be <= batch size in distributed_forward call + if batch_size < ext_dist.my_size: + sys.exit( + "ERROR: batch_size (%d) must be larger than number of ranks (%d)" + % (batch_size, ext_dist.my_size) + ) + if batch_size % ext_dist.my_size != 0: + sys.exit( + "ERROR: batch_size %d can not split across %d ranks evenly" + % (batch_size, ext_dist.my_size) + ) + + dense_x = dense_x[ext_dist.get_my_slice(batch_size)] + lS_o = lS_o[self.local_emb_slice] + lS_i = lS_i[self.local_emb_slice] + + if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)): + sys.exit( + "ERROR: corrupted model input detected in distributed_forward call" + ) + + # embeddings + with record_function("DLRM embedding forward"): + ly = self.apply_emb(lS_o, lS_i) + + # WARNING: Note that at this point we have the result of the embedding lookup + # for the entire batch on each rank. We would like to obtain partial results + # corresponding to all embedding lookups, but part of the batch on each rank. + # Therefore, matching the distribution of output of bottom mlp, so that both + # could be used for subsequent interactions on each device. + if self.ntables != len(ly): + sys.exit("ERROR: corrupted intermediate result in distributed_forward call") + + a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank) + + with record_function("DLRM bottom nlp forward"): + x = self.apply_mlp(dense_x, self.bot_l) + + ly = a2a_req.wait() + ly = list(ly) + + # interactions + with record_function("DLRM interaction forward"): + z = self.interact_features(x, ly) + + # top mlp + with record_function("DLRM top nlp forward"): + p = self.apply_mlp(z, self.top_l) + + # clamp output if needed + if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: + z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold)) + else: + z = p + + return z + + def sequential_forward(self, dense_x, lS_o, lS_i): + # process dense features (using bottom mlp), resulting in a row vector + x = self.apply_mlp(dense_x, self.bot_l) + # debug prints + # print("intermediate") + # print(x.detach().cpu().numpy()) + + # process sparse features(using embeddings), resulting in a list of row vectors + ly = self.apply_emb(lS_o, lS_i) + # for y in ly: + # print(y.detach().cpu().numpy()) + + # interact features (dense and sparse) + z = self.interact_features(x, ly) + # print(z.detach().cpu().numpy()) + + # obtain probability of a click (using top mlp) + p = self.apply_mlp(z, self.top_l) + + # clamp output if needed + if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: + z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold)) + else: + z = p + + return z + + def parallel_forward(self, dense_x, lS_o, lS_i): + ### prepare model (overwrite) ### + # WARNING: # of devices must be >= batch size in parallel_forward call + batch_size = dense_x.size()[0] + ndevices = min(self.ndevices_available, batch_size, self.ntables) + device_ids = range(ndevices) + # WARNING: must redistribute the model if mini-batch size changes(this is common + # for last mini-batch, when # of elements in the dataset/batch size is not even + if self.ndevices_in_use != ndevices: + self.ndevices_in_use = ndevices + self.prepare_parallel_model(ndevices) + elif self.sync_dense_params: + # When training, replicate the new/updated mlp weights each iteration. + # For inference-only, this code should never run. + self.bot_l_replicas = replicate(self.bot_l, device_ids) + self.top_l_replicas = replicate(self.top_l, device_ids) + + ### prepare input (overwrite) ### + # scatter dense features (data parallelism) + # print(dense_x.device) + dense_x = scatter(dense_x, device_ids, dim=0) + # distribute sparse features (model parallelism) + if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)): + sys.exit("ERROR: corrupted model input detected in parallel_forward call") + + lS_o = [ + lS_o[k].to(torch.device("cuda:" + str(k % ndevices))) + for k in range(self.ntables) + ] + lS_i = [ + lS_i[k].to(torch.device("cuda:" + str(k % ndevices))) + for k in range(self.ntables) + ] + + ### compute results in parallel ### + # bottom mlp + # WARNING: Note that the self.bot_l is a list of bottom mlp modules + # that have been replicated across devices, while dense_x is a tuple of dense + # inputs that has been scattered across devices on the first (batch) dimension. + # The output is a list of tensors scattered across devices according to the + # distribution of dense_x. + x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids) + # debug prints + # print(x) + + # embeddings + ly = self.apply_emb(lS_o, lS_i) + # debug prints + # print(ly) + + # butterfly shuffle (implemented inefficiently for now) + # WARNING: Note that at this point we have the result of the embedding lookup + # for the entire batch on each device. We would like to obtain partial results + # corresponding to all embedding lookups, but part of the batch on each device. + # Therefore, matching the distribution of output of bottom mlp, so that both + # could be used for subsequent interactions on each device. + if self.ntables != len(ly): + sys.exit("ERROR: corrupted intermediate result in parallel_forward call") + + t_list = [scatter(ly[k], device_ids, dim=0) for k in range(self.ntables)] + + # adjust the list to be ordered per device + ly = list(map(lambda y: list(y), zip(*t_list))) + # debug prints + # print(ly) + + # interactions + z = parallel_apply(self.interact_features_l, list(zip(itertools.repeat(self.interact_features),x,ly))) + # debug prints + # print(z) + + # top mlp + # WARNING: Note that the self.top_l is a list of top mlp modules that + # have been replicated across devices, while z is a list of interaction results + # that by construction are scattered across devices on the first (batch) dim. + # The output is a list of tensors scattered across devices according to the + # distribution of z. + p = parallel_apply(self.top_l_replicas, z, None, device_ids) + + ### gather the distributed results ### + p0 = gather(p, self.output_d, dim=0) + + # clamp output if needed + if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: + z0 = torch.clamp( + p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold) + ) + else: + z0 = p0 + + return z0 + + def print_weights(self): + if self.use_fbgemm_gpu and len(self.fbgemm_emb_l): + ntables_l = [ + len(e.fbgemm_gpu_emb_bag.embedding_specs) for e in self.fbgemm_emb_l + ] + for j in range(ntables_l[0] + 1): + for k, e in enumerate(self.fbgemm_emb_l): + if j < ntables_l[k]: + print( + e.fbgemm_gpu_emb_bag.split_embedding_weights()[j] + .detach() + .cpu() + .numpy() + ) + elif self.quantize_bits != 32: + for e in self.emb_l_q: + print(e.data.detach().cpu().numpy()) + else: # if self.emb_l: + for param in self.emb_l.parameters(): + print(param.detach().cpu().numpy()) + if isinstance(self.v_W_l, nn.ParameterList): + for param in self.v_W_l.parameters(): + print(param.detach().cpu().numpy()) + for param in self.bot_l.parameters(): + print(param.detach().cpu().numpy()) + for param in self.top_l.parameters(): + print(param.detach().cpu().numpy()) + + +def dash_separated_ints(value): + vals = value.split("-") + for val in vals: + try: + int(val) + except ValueError: + raise argparse.ArgumentTypeError( + "%s is not a valid dash separated list of ints" % value + ) + + return value + + +def dash_separated_floats(value): + vals = value.split("-") + for val in vals: + try: + float(val) + except ValueError: + raise argparse.ArgumentTypeError( + "%s is not a valid dash separated list of floats" % value + ) + + return value + + +def inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld, + device, + use_gpu, + log_iter=-1, +): + test_accu = 0 + test_samp = 0 + + if args.mlperf_logging: + scores = [] + targets = [] + + if args.fb5logger is not None: + fb5logger = FB5Logger(args.fb5logger) + fb5logger.header("DLRM", "OOTB", "eval", args.fb5config, score_metric=loggerconstants.EXPS) + + for i, testBatch in enumerate(test_ld): + # early exit if nbatches was set by the user and was exceeded + if nbatches > 0 and i >= nbatches: + break + + if i == args.warmup_steps and args.fb5logger is not None: + fb5logger.run_start() + + X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch( + testBatch + ) + + # Skip the batch if batch size not multiple of total ranks + if ext_dist.my_size > 1 and X_test.size(0) % ext_dist.my_size != 0: + print("Warning: Skiping the batch %d with size %d" % (i, X_test.size(0))) + continue + + # forward pass + Z_test = dlrm_wrap( + X_test, + lS_o_test, + lS_i_test, + use_gpu, + device, + ndevices=ndevices, + ) + ### gather the distributed results on each rank ### + # For some reason it requires explicit sync before all_gather call if + # tensor is on GPU memory + if Z_test.is_cuda: + torch.cuda.synchronize() + (_, batch_split_lengths) = ext_dist.get_split_lengths(X_test.size(0)) + if ext_dist.my_size > 1: + Z_test = ext_dist.all_gather(Z_test, batch_split_lengths) + + if args.mlperf_logging: + S_test = Z_test.detach().cpu().numpy() # numpy array + T_test = T_test.detach().cpu().numpy() # numpy array + scores.append(S_test) + targets.append(T_test) + else: + with record_function("DLRM accuracy compute"): + # compute loss and accuracy + S_test = Z_test.detach().cpu().numpy() # numpy array + T_test = T_test.detach().cpu().numpy() # numpy array + + mbs_test = T_test.shape[0] # = mini_batch_size except last + A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8)) + + test_accu += A_test + test_samp += mbs_test + + if args.fb5logger is not None: + fb5logger.run_stop(nbatches - args.warmup_steps, args.mini_batch_size) + + if args.mlperf_logging: + with record_function("DLRM mlperf sklearn metrics compute"): + scores = np.concatenate(scores, axis=0) + targets = np.concatenate(targets, axis=0) + + metrics = { + "recall": lambda y_true, y_score: sklearn.metrics.recall_score( + y_true=y_true, y_pred=np.round(y_score) + ), + "precision": lambda y_true, y_score: sklearn.metrics.precision_score( + y_true=y_true, y_pred=np.round(y_score) + ), + "f1": lambda y_true, y_score: sklearn.metrics.f1_score( + y_true=y_true, y_pred=np.round(y_score) + ), + "ap": sklearn.metrics.average_precision_score, + "roc_auc": sklearn.metrics.roc_auc_score, + "accuracy": lambda y_true, y_score: sklearn.metrics.accuracy_score( + y_true=y_true, y_pred=np.round(y_score) + ), + } + + validation_results = {} + for metric_name, metric_function in metrics.items(): + validation_results[metric_name] = metric_function(targets, scores) + writer.add_scalar( + "mlperf-metrics-test/" + metric_name, + validation_results[metric_name], + log_iter, + ) + acc_test = validation_results["accuracy"] + else: + acc_test = test_accu / test_samp + writer.add_scalar("Test/Acc", acc_test, log_iter) + + model_metrics_dict = { + "nepochs": args.nepochs, + "nbatches": nbatches, + "nbatches_test": nbatches_test, + "state_dict": dlrm.state_dict(), + "test_acc": acc_test, + } + + if args.mlperf_logging: + is_best = validation_results["roc_auc"] > best_auc_test + if is_best: + best_auc_test = validation_results["roc_auc"] + model_metrics_dict["test_auc"] = best_auc_test + print( + "recall {:.4f}, precision {:.4f},".format( + validation_results["recall"], + validation_results["precision"], + ) + + " f1 {:.4f}, ap {:.4f},".format( + validation_results["f1"], validation_results["ap"] + ) + + " auc {:.4f}, best auc {:.4f},".format( + validation_results["roc_auc"], best_auc_test + ) + + " accuracy {:3.3f} %, best accuracy {:3.3f} %".format( + validation_results["accuracy"] * 100, best_acc_test * 100 + ), + flush=True, + ) + else: + is_best = acc_test > best_acc_test + if is_best: + best_acc_test = acc_test + print( + " accuracy {:3.3f} %, best {:3.3f} %".format( + acc_test * 100, best_acc_test * 100 + ), + flush=True, + ) + return model_metrics_dict, is_best + + +def run(): + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Train Deep Learning Recommendation Model (DLRM)" + ) + # model related parameters + parser.add_argument("--arch-sparse-feature-size", type=int, default=2) + parser.add_argument( + "--arch-embedding-size", type=dash_separated_ints, default="4-3-2" + ) + # j will be replaced with the table number + parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2") + parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1") + parser.add_argument( + "--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot" + ) + parser.add_argument("--arch-interaction-itself", action="store_true", default=False) + parser.add_argument( + "--weighted-pooling", type=str, choices=["fixed", "learned", None], default=None + ) + + # embedding table options + parser.add_argument("--md-flag", action="store_true", default=False) + parser.add_argument("--md-threshold", type=int, default=200) + parser.add_argument("--md-temperature", type=float, default=0.3) + parser.add_argument("--md-round-dims", action="store_true", default=False) + parser.add_argument("--qr-flag", action="store_true", default=False) + parser.add_argument("--qr-threshold", type=int, default=200) + parser.add_argument("--qr-operation", type=str, default="mult") + parser.add_argument("--qr-collisions", type=int, default=4) + # activations and loss + parser.add_argument("--activation-function", type=str, default="relu") + parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce + parser.add_argument( + "--loss-weights", type=dash_separated_floats, default="1.0-1.0" + ) # for wbce + parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7 + parser.add_argument("--round-targets", type=bool, default=False) + # data + parser.add_argument("--data-size", type=int, default=1) + parser.add_argument("--num-batches", type=int, default=0) + parser.add_argument( + "--data-generation", type=str, default="random" + ) # synthetic or dataset + parser.add_argument( + "--rand-data-dist", type=str, default="uniform" + ) # uniform or gaussian + parser.add_argument("--rand-data-min", type=float, default=0) + parser.add_argument("--rand-data-max", type=float, default=1) + parser.add_argument("--rand-data-mu", type=float, default=-1) + parser.add_argument("--rand-data-sigma", type=float, default=1) + parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log") + parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + parser.add_argument("--data-randomize", type=str, default="total") # or day or none + parser.add_argument("--data-trace-enable-padding", type=bool, default=False) + parser.add_argument("--max-ind-range", type=int, default=-1) + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--num-indices-per-lookup", type=int, default=10) + parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False) + parser.add_argument("--num-workers", type=int, default=0) + parser.add_argument("--memory-map", action="store_true", default=False) + # training + parser.add_argument("--mini-batch-size", type=int, default=1) + parser.add_argument("--nepochs", type=int, default=1) + parser.add_argument("--learning-rate", type=float, default=0.01) + parser.add_argument("--print-precision", type=int, default=5) + parser.add_argument("--numpy-rand-seed", type=int, default=123) + parser.add_argument("--sync-dense-params", type=bool, default=True) + parser.add_argument("--optimizer", type=str, default="sgd") + parser.add_argument( + "--dataset-multiprocessing", + action="store_true", + default=False, + help="The Kaggle dataset can be multiprocessed in an environment \ + with more than 7 CPU cores and more than 20 GB of memory. \n \ + The Terabyte dataset can be multiprocessed in an environment \ + with more than 24 CPU cores and at least 1 TB of memory.", + ) + # inference + parser.add_argument("--inference-only", action="store_true", default=False) + # quantize + parser.add_argument("--quantize-mlp-with-bit", type=int, default=32) + parser.add_argument("--quantize-emb-with-bit", type=int, default=32) + # onnx + parser.add_argument("--save-onnx", action="store_true", default=False) + # gpu + parser.add_argument("--use-gpu", action="store_true", default=False) + parser.add_argument("--use-fbgemm-gpu", action="store_true", default=False) + parser.add_argument( + "--fbgemm-gpu-codegen-pref", + type=str, + choices=["Split", "IntN"], + default="Split", + ) + # torch2trt + parser.add_argument("--use-torch2trt-for-mlp", action="store_true", default=False) + # distributed + parser.add_argument("--local_rank", type=int, default=-1) + parser.add_argument("--dist-backend", type=str, default="") + # debugging and profiling + parser.add_argument("--print-freq", type=int, default=1) + parser.add_argument("--test-freq", type=int, default=-1) + parser.add_argument("--test-mini-batch-size", type=int, default=-1) + parser.add_argument("--test-num-workers", type=int, default=-1) + parser.add_argument("--print-time", action="store_true", default=False) + parser.add_argument("--print-wall-time", action="store_true", default=False) + parser.add_argument("--print-accumulated-time", action="store_true", default=False) + parser.add_argument("--debug-mode", action="store_true", default=False) + parser.add_argument("--enable-profiling", action="store_true", default=False) + parser.add_argument("--plot-compute-graph", action="store_true", default=False) + parser.add_argument("--tensor-board-filename", type=str, default="run_kaggle_pt") + # store/load model + parser.add_argument("--save-model", type=str, default="") + parser.add_argument("--load-model", type=str, default="") + # mlperf logging (disables other output and stops early) + parser.add_argument("--mlperf-logging", action="store_true", default=False) + # stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107 + parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0) + # stop at target AUC Terabyte (no subsampling) 0.8025 + parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0) + parser.add_argument("--mlperf-bin-loader", action="store_true", default=False) + parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False) + # mlperf gradient accumulation iterations + parser.add_argument("--mlperf-grad-accum-iter", type=int, default=1) + # LR policy + parser.add_argument("--lr-num-warmup-steps", type=int, default=0) + parser.add_argument("--lr-decay-start-step", type=int, default=0) + parser.add_argument("--lr-num-decay-steps", type=int, default=0) + + parser.add_argument("--precache-ml-data", type=int, nargs='?', default=None, const=sys.maxsize) + parser.add_argument("--warmup-steps", type=int, default=0) + # FB5 Logging + parser.add_argument("--fb5logger", type=str, default=None) + parser.add_argument("--fb5config", type=str, default="tiny") + + global args + global nbatches + global nbatches_test + global writer + args = parser.parse_args() + + if args.dataset_multiprocessing: + assert float(sys.version[:3]) > 3.7, ( + "The dataset_multiprocessing " + + "flag is susceptible to a bug in Python 3.7 and under. " + + "https://github.com/facebookresearch/dlrm/issues/172" + ) + + if args.mlperf_logging: + mlperf_logger.log_event(key=mlperf_logger.constants.CACHE_CLEAR, value=True) + mlperf_logger.log_start( + key=mlperf_logger.constants.INIT_START, log_all_ranks=True + ) + + if args.weighted_pooling is not None: + if args.qr_flag: + sys.exit("ERROR: quotient remainder with weighted pooling is not supported") + if args.md_flag: + sys.exit("ERROR: mixed dimensions with weighted pooling is not supported") + if args.quantize_emb_with_bit in [4, 8]: + if args.qr_flag: + sys.exit( + "ERROR: 4 and 8-bit quantization with quotient remainder is not supported" + ) + if args.md_flag: + sys.exit( + "ERROR: 4 and 8-bit quantization with mixed dimensions is not supported" + ) + if args.quantize_emb_with_bit in [4, 8, 16] and ( + not fbgemm_gpu or not args.use_fbgemm_gpu + ): + extra_info = "" + if not fbgemm_gpu: + extra_info += "\nfbgemm_gpu module failed to import.\n\n" + fbgemm_gpu_import_error_msg + if not args.use_fbgemm_gpu: + extra_info += "--use-fbgemm-gpu not set. " + + if not args.inference_only: + sys.exit( + "ERROR: Training quantized embeddings requires fbgemm_gpu. " + + extra_info + ) + elif args.use_gpu: + sys.exit( + "ERROR: Quantized embeddings on GPU requires fbgemm_gpu. " + extra_info + ) + elif args.quantize_emb_with_bit == 16: + sys.exit( + "ERROR: 16-bit quantized embeddings requires fbgemm_gpu. " + extra_info + ) + + assert args.quantize_emb_with_bit in [ + 4, + 8, + 16, + 32, + ], "only support 4/8/16/32-bit but got {}".format(args.quantize_emb_with_bit) + + if args.use_gpu: + assert torch.cuda.is_available(), "No cuda device is available." + if args.use_fbgemm_gpu: + assert fbgemm_gpu, ("\nfbgemm_gpu module failed to import.\n\n" + fbgemm_gpu_import_error_msg) + use_gpu = args.use_gpu + use_fbgemm_gpu = args.use_fbgemm_gpu + + ### some basic setup ### + np.random.seed(args.numpy_rand_seed) + np.set_printoptions(precision=args.print_precision) + torch.set_printoptions(precision=args.print_precision) + torch.manual_seed(args.numpy_rand_seed) + + if args.test_mini_batch_size < 0: + # if the parameter is not set, use the training batch size + args.test_mini_batch_size = args.mini_batch_size + if args.test_num_workers < 0: + # if the parameter is not set, use the same parameter for training + args.test_num_workers = args.num_workers + + if not args.debug_mode: + ext_dist.init_distributed( + local_rank=args.local_rank, use_gpu=use_gpu, backend=args.dist_backend + ) + + if use_gpu: + torch.cuda.manual_seed_all(args.numpy_rand_seed) + torch.backends.cudnn.deterministic = True + if ext_dist.my_size > 1: + ngpus = 1 + device = torch.device("cuda", ext_dist.my_local_rank) + else: + ngpus = torch.cuda.device_count() + device = torch.device("cuda", 0) + print("Using {} GPU(s)...".format(ngpus)) + else: + device = torch.device("cpu") + print("Using CPU...") + + ### prepare training data ### + ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-") + # input data + + if args.mlperf_logging: + mlperf_logger.barrier() + mlperf_logger.log_end(key=mlperf_logger.constants.INIT_STOP) + mlperf_logger.barrier() + mlperf_logger.log_start(key=mlperf_logger.constants.RUN_START) + mlperf_logger.barrier() + + if args.data_generation == "dataset": + train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args) + table_feature_map = {idx: idx for idx in range(len(train_data.counts))} + nbatches = args.num_batches if args.num_batches > 0 else len(train_ld) + nbatches_test = len(test_ld) + + ln_emb = train_data.counts + # enforce maximum limit on number of vectors per embedding + if args.max_ind_range > 0: + ln_emb = np.array( + list( + map( + lambda x: x if x < args.max_ind_range else args.max_ind_range, + ln_emb, + ) + ) + ) + else: + ln_emb = np.array(ln_emb) + m_den = train_data.m_den + ln_bot[0] = m_den + else: + # input and target at random + ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-") + m_den = ln_bot[0] + train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader( + args, ln_emb, m_den, cache_size=args.precache_ml_data + ) + nbatches = args.num_batches if args.num_batches > 0 else len(train_ld) + nbatches_test = len(test_ld) + + assert args.num_batches > args.warmup_steps, (f"Change --warmup-steps={args.warmup_steps} to be lower than --num-batches={args.num_batches}.") + + args.ln_emb = ln_emb.tolist() + if args.mlperf_logging: + print("command line args: ", json.dumps(vars(args))) + + ### parse command line arguments ### + m_spa = args.arch_sparse_feature_size + ln_emb = np.asarray(ln_emb) + num_fea = ln_emb.size + 1 # num sparse + num dense features + + if args.use_fbgemm_gpu: + assert m_spa % 4 == 0, ( + f"{m_spa} % 4 is not 0, but fbgemm_gpu requires the embedding dim " + + "(--arch-sparse-feature-size number) to be evenly divisible by 4." + ) + + m_den_out = ln_bot[ln_bot.size - 1] + if args.arch_interaction_op == "dot": + # approach 1: all + # num_int = num_fea * num_fea + m_den_out + # approach 2: unique + if args.arch_interaction_itself: + num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out + else: + num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out + elif args.arch_interaction_op == "cat": + num_int = num_fea * m_den_out + else: + sys.exit( + "ERROR: --arch-interaction-op=" + + args.arch_interaction_op + + " is not supported" + ) + arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top + ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-") + + # sanity check: feature sizes and mlp dimensions must match + if m_den != ln_bot[0]: + sys.exit( + "ERROR: arch-dense-feature-size " + + str(m_den) + + " does not match first dim of bottom mlp " + + str(ln_bot[0]) + ) + if args.qr_flag: + if args.qr_operation == "concat" and 2 * m_spa != m_den_out: + sys.exit( + "ERROR: 2 arch-sparse-feature-size " + + str(2 * m_spa) + + " does not match last dim of bottom mlp " + + str(m_den_out) + + " (note that the last dim of bottom mlp must be 2x the embedding dim)" + ) + if args.qr_operation != "concat" and m_spa != m_den_out: + sys.exit( + "ERROR: arch-sparse-feature-size " + + str(m_spa) + + " does not match last dim of bottom mlp " + + str(m_den_out) + ) + else: + if m_spa != m_den_out: + sys.exit( + "ERROR: arch-sparse-feature-size " + + str(m_spa) + + " does not match last dim of bottom mlp " + + str(m_den_out) + ) + if num_int != ln_top[0]: + sys.exit( + "ERROR: # of feature interactions " + + str(num_int) + + " does not match first dimension of top mlp " + + str(ln_top[0]) + ) + + # assign mixed dimensions if applicable + if args.md_flag: + m_spa = md_solver( + torch.tensor(ln_emb), + args.md_temperature, # alpha + d0=m_spa, + round_dim=args.md_round_dims, + ).tolist() + if use_fbgemm_gpu: + for m in m_spa: + assert m % 4 == 0, ( + "Found an incompatible embedding dim in m_spa. " + + f"{m} % 4 is not 0, but fbgemm_gpu requires the " + + "embedding dim to be evenly divisible by 4." + ) + + # test prints (model arch) + if args.debug_mode: + print("model arch:") + print( + "mlp top arch " + + str(ln_top.size - 1) + + " layers, with input to output dimensions:" + ) + print(ln_top) + print("# of interactions") + print(num_int) + print( + "mlp bot arch " + + str(ln_bot.size - 1) + + " layers, with input to output dimensions:" + ) + print(ln_bot) + print("# of features (sparse and dense)") + print(num_fea) + print("dense feature size") + print(m_den) + print("sparse feature size") + print(m_spa) + print( + "# of embeddings (= # of sparse features) " + + str(ln_emb.size) + + ", with dimensions " + + str(m_spa) + + "x:" + ) + print(ln_emb) + + print("data (inputs and targets):") + for j, inputBatch in enumerate(train_ld): + X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch) + + torch.set_printoptions(precision=4) + # early exit if nbatches was set by the user and has been exceeded + if nbatches > 0 and j >= nbatches: + break + print("mini-batch: %d" % j) + print(X.detach().cpu()) + # transform offsets to lengths when printing + print( + torch.IntTensor( + [ + np.diff( + S_o.detach().cpu().tolist() + list(lS_i[i].shape) + ).tolist() + for i, S_o in enumerate(lS_o) + ] + ) + ) + print([S_i.detach().cpu() for S_i in lS_i]) + print(T.detach().cpu()) + + global ndevices + ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1 + + ### construct the neural network specified above ### + # WARNING: to obtain exactly the same initialization for + # the weights we need to start from the same random seed. + # np.random.seed(args.numpy_rand_seed) + global dlrm + dlrm = DLRM_Net( + m_spa, + ln_emb, + ln_bot, + ln_top, + arch_interaction_op=args.arch_interaction_op, + arch_interaction_itself=args.arch_interaction_itself, + sigmoid_bot=-1, + sigmoid_top=ln_top.size - 2, + sync_dense_params=args.sync_dense_params, + loss_threshold=args.loss_threshold, + ndevices=ndevices, + qr_flag=args.qr_flag, + qr_operation=args.qr_operation, + qr_collisions=args.qr_collisions, + qr_threshold=args.qr_threshold, + md_flag=args.md_flag, + md_threshold=args.md_threshold, + weighted_pooling=args.weighted_pooling, + loss_function=args.loss_function, + learning_rate=args.learning_rate, + use_gpu=use_gpu, + use_fbgemm_gpu=use_fbgemm_gpu, + fbgemm_gpu_codegen_pref=args.fbgemm_gpu_codegen_pref, + inference_only=args.inference_only, + quantize_mlp_with_bit=args.quantize_mlp_with_bit, + quantize_emb_with_bit=args.quantize_emb_with_bit, + ) + + # test prints + if args.debug_mode: + print("initial parameters (weights and bias):") + dlrm.print_weights() + + # In dlrm.quantize_embedding called below, the torch quantize calls run + # on cpu tensors only. They cannot quantize tensors stored on the gpu. + # So quantization occurs on cpu tensors before transferring them to gpu if + # use_gpu is enabled. + if args.quantize_emb_with_bit != 32: + dlrm.quantize_embedding(args.quantize_emb_with_bit) + + if not args.inference_only: + assert args.quantize_mlp_with_bit == 32, ( + "Dynamic quantization for mlp requires " + + "--inference-only because training is not supported" + ) + else: + # Currently only INT8 and FP16 quantized types are supported for quantized MLP inference. + # By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32) + assert args.quantize_mlp_with_bit in [ + 8, + 16, + 32, + ], "only support 8/16/32-bit but got {}".format(args.quantize_mlp_with_bit) + + if args.quantize_mlp_with_bit != 32: + assert not use_gpu, ( + "Cannot run dynamic quantization for mlp " + + "with --use-gpu enabled, because DynamicQuantizedLinear's " + + "forward call calls 'quantized::linear_dynamic', which cannot " + + "run with arguments from the 'CUDA' backend." + ) + if args.quantize_mlp_with_bit in [8]: + quantize_dtype = torch.qint8 + else: + quantize_dtype = torch.float16 + dlrm.top_l = torch.quantization.quantize_dynamic( + dlrm.top_l, {torch.nn.Linear}, quantize_dtype + ) + dlrm.bot_l = torch.quantization.quantize_dynamic( + dlrm.bot_l, {torch.nn.Linear}, quantize_dtype + ) + + # Prep work for embedding tables and model transfer: + # Handling single-cpu and single-gpu modes + # NOTE: This also handles dist-backend modes (CLI args --dist-backend=nccl, + # --dist-backend=ccl, and --dist-backend=mpi) because in these modes each + # process runs in single-gpu mode. For example, if 8 processes are launched + # running dlrm_s_pytorch.py with --dist-backend=nccl --use-gpu, each process + # will run in single-gpu mode, resulting in 8 gpus total running distributed + # training or distributed inference if --inference-only is enabled. + if dlrm.ndevices_available <= 1: + if use_fbgemm_gpu: + dlrm.fbgemm_emb_l = nn.ModuleList( + [ + fbgemm_gpu_emb_bag_wrapper( + device, + dlrm.emb_l if dlrm.emb_l else dlrm.emb_l_q, + dlrm.m_spa, + dlrm.quantize_bits, + dlrm.learning_rate, + dlrm.fbgemm_gpu_codegen_pref, + dlrm.requires_grad, + ) + ] + ) + if use_gpu: + dlrm = dlrm.to(device) + if dlrm.weighted_pooling == "fixed": + for k, w in enumerate(dlrm.v_W_l): + dlrm.v_W_l[k] = w.cuda() + else: + # Handing Multi-gpu mode + dlrm.bot_l = dlrm.bot_l.to(device) + dlrm.top_l = dlrm.top_l.to(device) + dlrm.prepare_parallel_model(ndevices) + + if args.use_torch2trt_for_mlp: + if torch2trt and use_gpu and args.inference_only and args.quantize_mlp_with_bit == 32: + bot_l_sample_input = torch.ones([1, ln_bot[0]], dtype=torch.float32).cuda() + top_l_sample_input = torch.ones([1, ln_top[0]], dtype=torch.float32).cuda() + dlrm.bot_l = torch2trt.torch2trt(dlrm.bot_l, (bot_l_sample_input,)) + dlrm.top_l = torch2trt.torch2trt(dlrm.top_l, (top_l_sample_input,)) + elif torch2trt is None: + sys.exit("\ntorch2trt module failed to import.\n\n" + torch2trt_import_error_msg) + else: + error_msg = "ERROR: When --use-torch2trt-for-mlp is enabled, " + if not use_gpu: + error_msg += "--use-gpu must be enabled, " + if not args.inference_only: + error_msg += "--inference-only must be enabled, " + if args.quantize_mlp_with_bit != 32: + error_msg += "--quantize-mlp-with-bit must be disabled. " + error_msg = error_msg[:-2] + "." + sys.exit(error_msg) + + # distribute data parallel mlps + if ext_dist.my_size > 1: + if use_gpu: + device_ids = [ext_dist.my_local_rank] + dlrm.bot_l = ext_dist.DDP(dlrm.bot_l, device_ids=device_ids) + dlrm.top_l = ext_dist.DDP(dlrm.top_l, device_ids=device_ids) + else: + dlrm.bot_l = ext_dist.DDP(dlrm.bot_l) + dlrm.top_l = ext_dist.DDP(dlrm.top_l) + + if not args.inference_only: + # specify the optimizer algorithm + opts = { + "sgd": torch.optim.SGD, + "rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad, + "adagrad": apex.optimizers.FusedAdagrad + if apex + else torch.optim.Adagrad, + } + + parameters = ( + dlrm.parameters() + if ext_dist.my_size == 1 + else [ + { + "params": [ + p + for emb in ( + [e.fbgemm_gpu_emb_bag for e in dlrm.fbgemm_emb_l] + if use_fbgemm_gpu + else dlrm.emb_l_q + if dlrm.quantize_bits != 32 + else dlrm.emb_l + ) + for p in emb.parameters() + ], + "lr": args.learning_rate, + }, + # TODO check this lr setup + # bottom mlp has no data parallelism + # need to check how do we deal with top mlp + { + "params": dlrm.bot_l.parameters(), + "lr": args.learning_rate, + }, + { + "params": dlrm.top_l.parameters(), + "lr": args.learning_rate, + }, + ] + ) + optimizer = opts[args.optimizer](parameters, lr=args.learning_rate) + lr_scheduler = LRPolicyScheduler( + optimizer, + args.lr_num_warmup_steps, + args.lr_decay_start_step, + args.lr_num_decay_steps, + ) + + # Guarantee GPU setup has completed before training or inference starts. + if use_gpu: + torch.cuda.synchronize() + + ### main loop ### + + # training or inference + best_acc_test = 0 + best_auc_test = 0 + skip_upto_epoch = 0 + skip_upto_batch = 0 + total_time = 0 + total_loss = 0 + total_iter = 0 + total_samp = 0 + + if args.mlperf_logging: + mlperf_logger.mlperf_submission_log("dlrm") + mlperf_logger.log_event( + key=mlperf_logger.constants.SEED, value=args.numpy_rand_seed + ) + mlperf_logger.log_event( + key=mlperf_logger.constants.GLOBAL_BATCH_SIZE, value=args.mini_batch_size + ) + + # Load model is specified + if not (args.load_model == ""): + print("Loading saved model {}".format(args.load_model)) + if use_gpu: + if dlrm.ndevices_available > 1: + # NOTE: when targeting inference on multiple GPUs, + # load the model as is on CPU or GPU, with the move + # to multiple GPUs to be done in parallel_forward + ld_model = torch.load(args.load_model) + else: + # NOTE: when targeting inference on single GPU, + # note that the call to .to(device) has already happened + ld_model = torch.load( + args.load_model, + map_location=torch.device("cuda") + # map_location=lambda storage, loc: storage.cuda(0) + ) + else: + # when targeting inference on CPU + ld_model = torch.load(args.load_model, map_location=torch.device("cpu")) + dlrm.load_state_dict(ld_model["state_dict"]) + ld_j = ld_model["iter"] + ld_k = ld_model["epoch"] + ld_nepochs = ld_model["nepochs"] + ld_nbatches = ld_model["nbatches"] + ld_nbatches_test = ld_model["nbatches_test"] + ld_train_loss = ld_model["train_loss"] + ld_total_loss = ld_model["total_loss"] + if args.mlperf_logging: + ld_gAUC_test = ld_model["test_auc"] + ld_acc_test = ld_model["test_acc"] + if not args.inference_only: + optimizer.load_state_dict(ld_model["opt_state_dict"]) + best_acc_test = ld_acc_test + total_loss = ld_total_loss + skip_upto_epoch = ld_k # epochs + skip_upto_batch = ld_j # batches + else: + args.print_freq = ld_nbatches + args.test_freq = 0 + + print( + "Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}".format( + ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test + ) + ) + print( + "Training state: loss = {:.6f}".format( + ld_train_loss, + ) + ) + if args.mlperf_logging: + print( + "Testing state: accuracy = {:3.3f} %, auc = {:.3f}".format( + ld_acc_test * 100, ld_gAUC_test + ) + ) + else: + print("Testing state: accuracy = {:3.3f} %".format(ld_acc_test * 100)) + + print("time/loss/accuracy (if enabled):") + + if args.mlperf_logging: + # LR is logged twice for now because of a compliance checker bug + mlperf_logger.log_event( + key=mlperf_logger.constants.OPT_BASE_LR, value=args.learning_rate + ) + mlperf_logger.log_event( + key=mlperf_logger.constants.OPT_LR_WARMUP_STEPS, + value=args.lr_num_warmup_steps, + ) + + # use logging keys from the official HP table and not from the logging library + mlperf_logger.log_event( + key="sgd_opt_base_learning_rate", value=args.learning_rate + ) + mlperf_logger.log_event( + key="lr_decay_start_steps", value=args.lr_decay_start_step + ) + mlperf_logger.log_event( + key="sgd_opt_learning_rate_decay_steps", value=args.lr_num_decay_steps + ) + mlperf_logger.log_event(key="sgd_opt_learning_rate_decay_poly_power", value=2) + + tb_file = "./" + args.tensor_board_filename + writer = SummaryWriter(tb_file) + + # Pre-cache samples. + if args.precache_ml_data: + for _ in (test_ld if args.inference_only else train_ld): + pass + + ext_dist.barrier() + with torch.autograd.profiler.profile( + args.enable_profiling, use_cuda=use_gpu, record_shapes=True + ) as prof: + + if not args.inference_only: + + if args.fb5logger is not None: + fb5logger = FB5Logger(args.fb5logger) + fb5logger.header("DLRM", "OOTB", "train", args.fb5config, score_metric=loggerconstants.EXPS) + + k = 0 + while k < args.nepochs: + if args.mlperf_logging: + mlperf_logger.barrier() + mlperf_logger.log_start( + key=mlperf_logger.constants.BLOCK_START, + metadata={ + mlperf_logger.constants.FIRST_EPOCH_NUM: (k + 1), + mlperf_logger.constants.EPOCH_COUNT: 1, + }, + ) + mlperf_logger.barrier() + mlperf_logger.log_start( + key=mlperf_logger.constants.EPOCH_START, + metadata={mlperf_logger.constants.EPOCH_NUM: (k + 1)}, + ) + + if k < skip_upto_epoch: + continue + + if args.print_accumulated_time: + accum_time_begin = time_wrap(use_gpu) + + if args.mlperf_logging: + previous_iteration_time = None + + for j, inputBatch in enumerate(train_ld): + if j == 0 and args.save_onnx: + X_onnx, lS_o_onnx, lS_i_onnx, _, _, _ = unpack_batch(inputBatch) + + if j < skip_upto_batch: + continue + + if k == 0 and j == args.warmup_steps and args.fb5logger is not None: + fb5logger.run_start() + + X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch) + + if args.mlperf_logging: + current_time = time_wrap(use_gpu) + if previous_iteration_time: + iteration_time = current_time - previous_iteration_time + else: + iteration_time = 0 + previous_iteration_time = current_time + else: + t1 = time_wrap(use_gpu) + + # early exit if nbatches was set by the user and has been exceeded + if nbatches > 0 and j >= nbatches: + break + + # Skip the batch if batch size not multiple of total ranks + if ext_dist.my_size > 1 and X.size(0) % ext_dist.my_size != 0: + print( + "Warning: Skiping the batch %d with size %d" + % (j, X.size(0)) + ) + continue + + mbs = T.shape[0] # = args.mini_batch_size except maybe for last + + # forward pass + Z = dlrm_wrap( + X, + lS_o, + lS_i, + use_gpu, + device, + ndevices=ndevices, + ) + + if ext_dist.my_size > 1: + T = T[ext_dist.get_my_slice(mbs)] + W = W[ext_dist.get_my_slice(mbs)] + + # loss + E = loss_fn_wrap(Z, T, use_gpu, device) + + # compute loss and accuracy + L = E.detach().cpu().numpy() # numpy array + # training accuracy is not disabled + # S = Z.detach().cpu().numpy() # numpy array + # T = T.detach().cpu().numpy() # numpy array + + # # print("res: ", S) + + # # print("j, train: BCE", j, L) + + # mbs = T.shape[0] # = args.mini_batch_size except maybe for last + # A = np.sum((np.round(S, 0) == T).astype(np.uint8)) + + with record_function("DLRM backward"): + # Update optimizer parameters to train weights instantiated lazily in + # the parallel_forward call. + if dlrm.ndevices_available > 1 and dlrm.add_new_weights_to_params: + + # Pop any prior extra parameters. Priors may exist because + # self.parallel_model_is_not_prepared is set back to True + # when self.parallel_model_batch_size != batch_size. + # Search "self.parallel_model_batch_size != batch_size" in code. + if "lazy_params" in optimizer.param_groups[-1].keys(): + optimizer.param_groups.pop() + + # dlrm.v_W_l_l is a list of nn.ParameterLists, one ParameterList per gpu. + # Flatten the list of nn.ParameterList to one nn.ParameterList, + # and add it to the trainable params list. + lazy_params = nn.ParameterList() + if dlrm.weighted_pooling == "learned": + lazy_params.extend( + nn.ParameterList( + [p for p_l in dlrm.v_W_l_l for p in p_l] + ) + ) + if dlrm.use_fbgemm_gpu: + lazy_params.extend( + nn.ParameterList( + [ + emb + for emb_ in dlrm.fbgemm_emb_l + for emb in emb_.fbgemm_gpu_emb_bag.parameters() + ] + ) + ) + lazy_params_dict = optimizer.param_groups[0] + lazy_params_dict["lazy_params"] = True + lazy_params_dict["params"] = lazy_params + optimizer.param_groups.append(lazy_params_dict) + dlrm.add_new_weights_to_params = False + # Run "[[t.device.type for t in grp['params']] for grp in optimizer.param_groups]" + # to view devices used by tensors in the param groups. + + # scaled error gradient propagation + # (where we do not accumulate gradients across mini-batches) + if ( + args.mlperf_logging + and (j + 1) % args.mlperf_grad_accum_iter == 0 + ) or not args.mlperf_logging: + optimizer.zero_grad() + # backward pass + E.backward() + + # optimizer + if ( + args.mlperf_logging + and (j + 1) % args.mlperf_grad_accum_iter == 0 + ) or not args.mlperf_logging: + optimizer.step() + lr_scheduler.step() + + if args.mlperf_logging: + total_time += iteration_time + else: + t2 = time_wrap(use_gpu) + total_time += t2 - t1 + + total_loss += L * mbs + total_iter += 1 + total_samp += mbs + + should_print = ((j + 1) % args.print_freq == 0) or ( + j + 1 == nbatches + ) + should_test = ( + (args.test_freq > 0) + and (args.data_generation in ["dataset", "random"]) + and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches)) + ) + + # print time, loss and accuracy + if should_print or should_test: + gT = 1000.0 * total_time / total_iter if args.print_time else -1 + total_time = 0 + + train_loss = total_loss / total_samp + total_loss = 0 + + str_run_type = ( + "inference" if args.inference_only else "training" + ) + + wall_time = "" + if args.print_wall_time: + wall_time = " ({})".format(time.strftime("%H:%M")) + + print( + "Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format( + str_run_type, j + 1, nbatches, k, gT + ) + + " loss {:.6f}".format(train_loss) + + wall_time, + flush=True, + ) + + if args.print_accumulated_time and ext_dist.my_rank < 2: + current_unix_time = time_wrap(use_gpu) + ext_dist.orig_print( + "Accumulated time so far: {} for process {} for step {} at {}".format( + current_unix_time - accum_time_begin, + ext_dist.my_rank, + j + 1, + current_unix_time, + ) + ) + + log_iter = nbatches * k + j + 1 + writer.add_scalar("Train/Loss", train_loss, log_iter) + + total_iter = 0 + total_samp = 0 + + # testing + if should_test: + epoch_num_float = (j + 1) / len(train_ld) + k + 1 + if args.mlperf_logging: + mlperf_logger.barrier() + mlperf_logger.log_start( + key=mlperf_logger.constants.EVAL_START, + metadata={ + mlperf_logger.constants.EPOCH_NUM: epoch_num_float + }, + ) + + # don't measure training iter time in a test iteration + if args.mlperf_logging: + previous_iteration_time = None + print( + "Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k) + ) + model_metrics_dict, is_best = inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld, + device, + use_gpu, + log_iter, + ) + + if ( + is_best + and not (args.save_model == "") + and not args.inference_only + ): + model_metrics_dict["epoch"] = k + model_metrics_dict["iter"] = j + 1 + model_metrics_dict["train_loss"] = train_loss + model_metrics_dict["total_loss"] = total_loss + model_metrics_dict[ + "opt_state_dict" + ] = optimizer.state_dict() + print("Saving model to {}".format(args.save_model)) + torch.save(model_metrics_dict, args.save_model) + + if args.mlperf_logging: + mlperf_logger.barrier() + mlperf_logger.log_end( + key=mlperf_logger.constants.EVAL_STOP, + metadata={ + mlperf_logger.constants.EPOCH_NUM: epoch_num_float + }, + ) + + # Uncomment the line below to print out the total time with overhead + # print("Total test time for this group: {}" \ + # .format(time_wrap(use_gpu) - accum_test_time_begin)) + + if ( + args.mlperf_logging + and (args.mlperf_acc_threshold > 0) + and (best_acc_test > args.mlperf_acc_threshold) + ): + print( + "MLPerf testing accuracy threshold " + + str(args.mlperf_acc_threshold) + + " reached, stop training" + ) + break + + if ( + args.mlperf_logging + and (args.mlperf_auc_threshold > 0) + and (best_auc_test > args.mlperf_auc_threshold) + ): + print( + "MLPerf testing auc threshold " + + str(args.mlperf_auc_threshold) + + " reached, stop training" + ) + if args.mlperf_logging: + mlperf_logger.barrier() + mlperf_logger.log_end( + key=mlperf_logger.constants.RUN_STOP, + metadata={ + mlperf_logger.constants.STATUS: mlperf_logger.constants.SUCCESS + }, + ) + break + if k == 0 and args.fb5logger is not None: + fb5logger.run_stop(nbatches - args.warmup_steps, args.mini_batch_size) + + if args.mlperf_logging: + mlperf_logger.barrier() + mlperf_logger.log_end( + key=mlperf_logger.constants.EPOCH_STOP, + metadata={mlperf_logger.constants.EPOCH_NUM: (k + 1)}, + ) + mlperf_logger.barrier() + mlperf_logger.log_end( + key=mlperf_logger.constants.BLOCK_STOP, + metadata={mlperf_logger.constants.FIRST_EPOCH_NUM: (k + 1)}, + ) + k += 1 # nepochs + if args.mlperf_logging and best_auc_test <= args.mlperf_auc_threshold: + mlperf_logger.barrier() + mlperf_logger.log_end( + key=mlperf_logger.constants.RUN_STOP, + metadata={ + mlperf_logger.constants.STATUS: mlperf_logger.constants.ABORTED + }, + ) + else: + print("Testing for inference only") + inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld, + device, + use_gpu, + ) + + # profiling + if args.enable_profiling: + time_stamp = str(datetime.datetime.now()).replace(" ", "_") + with open("dlrm_s_pytorch" + time_stamp + "_shape.prof", "w") as prof_f: + prof_f.write( + prof.key_averages(group_by_input_shape=True).table( + sort_by="self_cpu_time_total" + ) + ) + with open("dlrm_s_pytorch" + time_stamp + "_total.prof", "w") as prof_f: + prof_f.write(prof.key_averages().table(sort_by="self_cpu_time_total")) + prof.export_chrome_trace("dlrm_s_pytorch" + time_stamp + ".json") + # print(prof.key_averages().table(sort_by="cpu_time_total")) + + # plot compute graph + if args.plot_compute_graph: + sys.exit( + "ERROR: Please install pytorchviz package in order to use the" + + " visualization. Then, uncomment its import above as well as" + + " three lines below and run the code again." + ) + # V = Z.mean() if args.inference_only else E + # dot = make_dot(V, params=dict(dlrm.named_parameters())) + # dot.render('dlrm_s_pytorch_graph') # write .pdf file + + # test prints + if not args.inference_only and args.debug_mode: + print("updated parameters (weights and bias):") + dlrm.print_weights() + + # export the model in onnx + if args.save_onnx: + """ + # workaround 1: tensor -> list + if torch.is_tensor(lS_i_onnx): + lS_i_onnx = [lS_i_onnx[j] for j in range(len(lS_i_onnx))] + # workaound 2: list -> tensor + lS_i_onnx = torch.stack(lS_i_onnx) + """ + # debug prints + # print("inputs", X_onnx, lS_o_onnx, lS_i_onnx) + # print("output", dlrm_wrap(X_onnx, lS_o_onnx, lS_i_onnx, use_gpu, device)) + dlrm_pytorch_onnx_file = "dlrm_s_pytorch.onnx" + print("X_onnx.shape", X_onnx.shape) + if torch.is_tensor(lS_o_onnx): + print("lS_o_onnx.shape", lS_o_onnx.shape) + else: + for oo in lS_o_onnx: + print("oo.shape", oo.shape) + if torch.is_tensor(lS_i_onnx): + print("lS_i_onnx.shape", lS_i_onnx.shape) + else: + for ii in lS_i_onnx: + print("ii.shape", ii.shape) + + # name inputs and outputs + o_inputs = ( + ["offsets"] + if torch.is_tensor(lS_o_onnx) + else ["offsets_" + str(i) for i in range(len(lS_o_onnx))] + ) + i_inputs = ( + ["indices"] + if torch.is_tensor(lS_i_onnx) + else ["indices_" + str(i) for i in range(len(lS_i_onnx))] + ) + all_inputs = ["dense_x"] + o_inputs + i_inputs + # debug prints + print("inputs", all_inputs) + + # create dynamic_axis dictionaries + do_inputs = ( + [{"offsets": {1: "batch_size"}}] + if torch.is_tensor(lS_o_onnx) + else [ + {"offsets_" + str(i): {0: "batch_size"}} for i in range(len(lS_o_onnx)) + ] + ) + di_inputs = ( + [{"indices": {1: "batch_size"}}] + if torch.is_tensor(lS_i_onnx) + else [ + {"indices_" + str(i): {0: "batch_size"}} for i in range(len(lS_i_onnx)) + ] + ) + dynamic_axes = {"dense_x": {0: "batch_size"}, "pred": {0: "batch_size"}} + for do in do_inputs: + dynamic_axes.update(do) + for di in di_inputs: + dynamic_axes.update(di) + # debug prints + print(dynamic_axes) + # export model + torch.onnx.export( + dlrm, + (X_onnx, lS_o_onnx, lS_i_onnx), + dlrm_pytorch_onnx_file, + verbose=True, + use_external_data_format=True, + opset_version=11, + input_names=all_inputs, + output_names=["pred"], + dynamic_axes=dynamic_axes, + ) + # recover the model back + dlrm_pytorch_onnx = onnx.load("dlrm_s_pytorch.onnx") + # check the onnx model + onnx.checker.check_model(dlrm_pytorch_onnx) + total_time_end = time_wrap(use_gpu) + + +if __name__ == "__main__": + run() diff --git a/benchmarks/dlrm/ootb/extend_distributed.py b/benchmarks/dlrm/ootb/extend_distributed.py new file mode 100644 index 0000000..1f2c8a5 --- /dev/null +++ b/benchmarks/dlrm/ootb/extend_distributed.py @@ -0,0 +1,603 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +import builtins +import os +import sys + +import torch +import torch.distributed as dist +from torch.autograd import Function +from torch.autograd.profiler import record_function +from torch.nn.parallel import DistributedDataParallel as DDP + + +try: + import torch_ccl +except ImportError as e: + # print(e) + torch_ccl = False + +try: + import torch_ucc +except ImportError as e: + torch_ucc = False + + +my_rank = -1 +my_size = -1 +my_local_rank = -1 +my_local_size = -1 +alltoall_supported = False +a2a_impl = os.environ.get("DLRM_ALLTOALL_IMPL", "") + +myreq = None + + +def env2int(env_list, default=-1): + for e in env_list: + val = int(os.environ.get(e, -1)) + if val >= 0: + return val + return default + + +def get_my_slice(n): + k, m = divmod(n, my_size) + return slice( + my_rank * k + min(my_rank, m), (my_rank + 1) * k + min(my_rank + 1, m), 1 + ) + + +def get_split_lengths(n): + k, m = divmod(n, my_size) + if m == 0: + splits = None + my_len = k + else: + splits = [(k + 1) if i < m else k for i in range(my_size)] + my_len = splits[my_rank] + return (my_len, splits) + + +def init_distributed(rank=-1, local_rank=-1, size=-1, use_gpu=False, backend=""): + global myreq + global my_rank + global my_size + global my_local_rank + global my_local_size + global a2a_impl + global alltoall_supported + + # guess MPI ranks from env (works for IMPI, OMPI and MVAPICH2) + num_mpi_ranks = env2int( + ["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"] + ) + if backend == "" and num_mpi_ranks > 1: + if torch_ccl and env2int(["CCL_WORKER_COUNT"]) > 0: + backend = "ccl" + elif use_gpu and dist.is_nccl_available(): + backend = "nccl" + elif dist.is_mpi_available(): + backend = "mpi" + else: + print( + "WARNING: MPI multi-process launch detected but PyTorch MPI backend not available." + ) + backend = "gloo" + + if backend != "": + # guess Rank and size + if rank == -1: + rank = env2int( + ["PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK", "RANK"], 0 + ) + if size == -1: + size = env2int( + [ + "PMI_SIZE", + "OMPI_COMM_WORLD_SIZE", + "MV2_COMM_WORLD_SIZE", + "WORLD_SIZE", + ], + 1, + ) + if not os.environ.get("RANK", None) and rank != -1: + os.environ["RANK"] = str(rank) + if not os.environ.get("WORLD_SIZE", None) and size != -1: + os.environ["WORLD_SIZE"] = str(size) + if not os.environ.get("MASTER_PORT", None): + os.environ["MASTER_PORT"] = "29500" + if not os.environ.get("MASTER_ADDR", None): + local_size = env2int( + [ + "MPI_LOCALNRANKS", + "OMPI_COMM_WORLD_LOCAL_SIZE", + "MV2_COMM_WORLD_LOCAL_SIZE", + ], + 1, + ) + if local_size != size and backend != "mpi": + print( + "Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default" + ) + print( + "If this run hangs, try exporting rank 0's hostname as MASTER_ADDR" + ) + os.environ["MASTER_ADDR"] = "127.0.0.1" + + if size > 1: + if local_rank == -1: + my_local_rank = env2int( + [ + "MPI_LOCALRANKID", + "OMPI_COMM_WORLD_LOCAL_RANK", + "MV2_COMM_WORLD_LOCAL_RANK", + "LOCAL_RANK", + ], + 0, + ) + else: + my_local_rank = local_rank + my_local_size = env2int( + [ + "MPI_LOCALNRANKS", + "OMPI_COMM_WORLD_LOCAL_SIZE", + "MV2_COMM_WORLD_LOCAL_SIZE", + ], + 1, + ) + if use_gpu: + if my_local_size > torch.cuda.device_count(): + print( + "Not sufficient GPUs available... local_size = %d, ngpus = %d" + % (my_local_size, torch.cuda.device_count()) + ) + sys.exit(1) + torch.cuda.set_device(my_local_rank) + dist.init_process_group(backend, rank=rank, world_size=size) + my_rank = dist.get_rank() + my_size = dist.get_world_size() + if my_rank == 0: + print("Running on %d ranks using %s backend" % (my_size, backend)) + if hasattr(dist, "all_to_all_single"): + try: + t = torch.zeros([4]) + if use_gpu: + t = t.cuda() + dist.all_to_all_single(t, t) + alltoall_supported = True + except RuntimeError as err: + print("fail to enable all_to_all_single primitive: %s" % err) + if a2a_impl == "alltoall" and alltoall_supported == False: + print( + "Requested DLRM_ALLTOALL_IMPL=%s but backend %s does not support it, use scatter/gather based alltoall" + % (a2a_impl, backend) + ) + a2a_impl = "scatter" + if a2a_impl != "": + print("Using DLRM_ALLTOALL_IMPL=%s" % a2a_impl) + else: + my_rank = 0 + my_size = 1 + my_local_rank = 0 + my_local_size = 1 + print_all( + "world size: %d, current rank: %d, local rank: %d" + % (my_size, my_rank, my_local_rank) + ) + myreq = Request() + + +class Request(object): + def __init__(self): + self.req = None + self.tensor = None + self.WaitFunction = All2All_Scatter_Wait + + def wait(self): + ret = self.WaitFunction.apply(*self.tensor) + self.req = None + self.tensor = None + return ret + + +class All2All_ScatterList_Req(Function): + @staticmethod + def forward(ctx, a2a_info, *inputs): + global myreq + batch_split_lengths = ( + a2a_info.global_batch_partition_slices + if a2a_info.global_batch_partition_slices + else a2a_info.local_batch_num + ) + table_split_lengths = ( + a2a_info.global_table_wise_parition_slices + if a2a_info.global_table_wise_parition_slices + else [a2a_info.local_table_num] * my_size + ) + gather_list = [] + req_list = [] + for i in range(my_size): + for j in range(table_split_lengths[i]): + out_tensor = inputs[0].new_empty( + [a2a_info.local_batch_num, a2a_info.emb_dim] + ) + scatter_list = ( + list(inputs[j].split(batch_split_lengths, dim=0)) + if i == my_rank + else [] + ) + req = dist.scatter(out_tensor, scatter_list, src=i, async_op=True) + gather_list.append(out_tensor) + req_list.append(req) + myreq.req = req_list + myreq.tensor = tuple(gather_list) + myreq.a2a_info = a2a_info + return myreq.tensor + + @staticmethod + def backward(ctx, *grad_output): + global myreq + for r in myreq.req: + r.wait() + myreq.req = None + grad_inputs = myreq.tensor + myreq.tensor = None + return (None, *grad_inputs) + + +class All2All_ScatterList_Wait(Function): + @staticmethod + def forward(ctx, *output): + global myreq + ctx.a2a_info = myreq.a2a_info + for r in myreq.req: + r.wait() + myreq.req = None + myreq.tensor = None + return output + + @staticmethod + def backward(ctx, *grad_output): + global myreq + a2a_info = ctx.a2a_info + grad_output = [t.contiguous() for t in grad_output] + batch_split_lengths = ( + a2a_info.global_batch_partition_slices + if a2a_info.global_batch_partition_slices + else [a2a_info.local_batch_num] * my_size + ) + per_rank_table_splits = ( + a2a_info.global_table_wise_parition_slices + if a2a_info.global_table_wise_parition_slices + else [a2a_info.local_table_num] * my_size + ) + grad_inputs = [ + grad_output[0].new_empty([ctx.a2a_info.batch_size, ctx.a2a_info.emb_dim]) + for _ in range(a2a_info.local_table_num) + ] + req_list = [] + ind = 0 + for i in range(my_size): + for j in range(per_rank_table_splits[i]): + gather_list = ( + list(grad_inputs[j].split(batch_split_lengths, dim=0)) + if i == my_rank + else None + ) + req = dist.gather(grad_output[ind], gather_list, dst=i, async_op=True) + req_list.append(req) + ind += 1 + myreq.req = req_list + myreq.tensor = grad_inputs + return tuple(grad_output) + + +class All2All_Scatter_Req(Function): + @staticmethod + def forward(ctx, a2a_info, *inputs): + global myreq + batch_split_lengths = ( + a2a_info.global_batch_partition_slices + if a2a_info.global_batch_partition_slices + else a2a_info.local_batch_num + ) + table_split_lengths = ( + a2a_info.global_table_wise_parition_slices + if a2a_info.global_table_wise_parition_slices + else [a2a_info.local_table_num] * my_size + ) + input = torch.cat(inputs, dim=1) + scatter_list = list(input.split(batch_split_lengths, dim=0)) + gather_list = [] + req_list = [] + for i in range(my_size): + out_tensor = input.new_empty( + [a2a_info.local_batch_num, table_split_lengths[i] * a2a_info.emb_dim] + ) + req = dist.scatter( + out_tensor, scatter_list if i == my_rank else [], src=i, async_op=True + ) + gather_list.append(out_tensor) + req_list.append(req) + myreq.req = req_list + myreq.tensor = tuple(gather_list) + myreq.a2a_info = a2a_info + ctx.a2a_info = a2a_info + return myreq.tensor + + @staticmethod + def backward(ctx, *grad_output): + global myreq + for r in myreq.req: + r.wait() + myreq.req = None + grad_input = myreq.tensor + grad_inputs = grad_input.split(ctx.a2a_info.emb_dim, dim=1) + myreq.tensor = None + return (None, *grad_inputs) + + +class All2All_Scatter_Wait(Function): + @staticmethod + def forward(ctx, *output): + global myreq + ctx.a2a_info = myreq.a2a_info + for r in myreq.req: + r.wait() + myreq.req = None + myreq.tensor = None + return output + + @staticmethod + def backward(ctx, *grad_output): + global myreq + assert len(grad_output) == my_size + scatter_list = [t.contiguous() for t in grad_output] + a2a_info = ctx.a2a_info + batch_split_lengths = ( + a2a_info.global_batch_partition_slices + if a2a_info.global_batch_partition_slices + else a2a_info.local_batch_num + ) + table_split_lengths = ( + a2a_info.global_table_wise_parition_slices + if a2a_info.global_table_wise_parition_slices + else [a2a_info.local_table_num] * my_size + ) + grad_input = grad_output[0].new_empty( + [a2a_info.batch_size, a2a_info.emb_dim * a2a_info.local_table_num] + ) + gather_list = list(grad_input.split(batch_split_lengths, dim=0)) + req_list = [] + for i in range(my_size): + req = dist.gather( + scatter_list[i], + gather_list if i == my_rank else [], + dst=i, + async_op=True, + ) + req_list.append(req) + myreq.req = req_list + myreq.tensor = grad_input + return grad_output + + +class All2All_Req(Function): + @staticmethod + def forward(ctx, a2a_info, *inputs): + global myreq + with record_function("DLRM alltoall_req_fwd_single"): + batch_split_lengths = a2a_info.global_batch_partition_slices + if batch_split_lengths: + batch_split_lengths = [ + m * a2a_info.emb_dim * a2a_info.local_table_num + for m in batch_split_lengths + ] + table_split_lengths = a2a_info.global_table_wise_parition_slices + if table_split_lengths: + table_split_lengths = [ + a2a_info.local_batch_num * e * a2a_info.emb_dim + for e in table_split_lengths + ] + input = torch.cat(inputs, dim=1).view([-1]) + output = input.new_empty( + [ + a2a_info.global_table_num + * a2a_info.local_batch_num + * a2a_info.emb_dim + ] + ) + req = dist.all_to_all_single( + output, input, table_split_lengths, batch_split_lengths, async_op=True + ) + + myreq.req = req + myreq.tensor = [] + myreq.tensor.append(output) + myreq.tensor = tuple(myreq.tensor) + a2a_info.batch_split_lengths = batch_split_lengths + a2a_info.table_split_lengths = table_split_lengths + myreq.a2a_info = a2a_info + ctx.a2a_info = a2a_info + return myreq.tensor + + @staticmethod + def backward(ctx, *grad_output): + global myreq + with record_function("DLRM alltoall_req_bwd_single"): + a2a_info = ctx.a2a_info + myreq.req.wait() + myreq.req = None + grad_input = myreq.tensor + grad_inputs = grad_input.view([a2a_info.batch_size, -1]).split( + a2a_info.emb_dim, dim=1 + ) + grad_inputs = [gin.contiguous() for gin in grad_inputs] + myreq.tensor = None + return (None, *grad_inputs) + + +class All2All_Wait(Function): + @staticmethod + def forward(ctx, *output): + global myreq + with record_function("DLRM alltoall_wait_fwd_single"): + a2a_info = myreq.a2a_info + ctx.a2a_info = a2a_info + myreq.req.wait() + myreq.req = None + myreq.tensor = None + table_split_lengths = ( + a2a_info.table_split_lengths + if a2a_info.table_split_lengths + else a2a_info.local_table_num + * a2a_info.local_batch_num + * a2a_info.emb_dim + ) + outputs = output[0].split(table_split_lengths) + outputs = tuple( + [out.view([a2a_info.local_batch_num, -1]) for out in outputs] + ) + return outputs + + @staticmethod + def backward(ctx, *grad_outputs): + global myreq + with record_function("DLRM alltoall_wait_bwd_single"): + a2a_info = ctx.a2a_info + grad_outputs = [gout.contiguous().view([-1]) for gout in grad_outputs] + grad_output = torch.cat(grad_outputs) + grad_input = grad_output.new_empty( + [a2a_info.batch_size * a2a_info.local_table_num * a2a_info.emb_dim] + ) + req = dist.all_to_all_single( + grad_input, + grad_output, + a2a_info.batch_split_lengths, + a2a_info.table_split_lengths, + async_op=True, + ) + myreq.req = req + myreq.tensor = grad_input + return (grad_output,) + + +class AllGather(Function): + @staticmethod + def forward(ctx, input, global_lengths, dim=0): + if not isinstance(global_lengths, (list, tuple)): + global_lengths = [global_lengths] * my_size + + assert len(global_lengths) == my_size + assert global_lengths[my_rank] == input.size(dim) + local_start = sum(global_lengths[:my_rank]) + + output_size = list(input.size()) + + ctx.dim = dim + ctx.local_start = local_start + ctx.local_length = global_lengths[my_rank] + + input = input.contiguous() + if dim == 0: + out_len = sum(global_lengths) + output_size[dim] = out_len + output = input.new_empty(output_size) + gather_list = list(output.split(global_lengths, dim=0)) + else: + gather_list = [torch.empty_like(input) for _ in range(my_size)] + gather_list = [] + for length in global_lengths: + output_size[dim] = length + gather_list.append(input.new_empty(output_size)) + + dist.all_gather(gather_list, input) + + if dim != 0: + output = torch.cat(gather_list, dim=dim) + + return output + + @staticmethod + def backward(ctx, grad_output): + # print("Inside All2AllBackward") + dim = ctx.dim + start = ctx.local_start + length = ctx.local_length + + grad_input = grad_output.narrow(dim, start, length) + + return (grad_input, None, None) + + +class All2AllInfo(object): + pass + + +def alltoall(inputs, per_rank_table_splits): + global myreq + batch_size, emb_dim = inputs[0].size() + a2a_info = All2AllInfo() + a2a_info.local_table_num = len(inputs) + a2a_info.global_table_wise_parition_slices = per_rank_table_splits + ( + a2a_info.local_batch_num, + a2a_info.global_batch_partition_slices, + ) = get_split_lengths(batch_size) + a2a_info.emb_dim = emb_dim + a2a_info.batch_size = batch_size + a2a_info.global_table_num = ( + sum(per_rank_table_splits) + if per_rank_table_splits + else a2a_info.local_table_num * my_size + ) + + if a2a_impl == "" and alltoall_supported or a2a_impl == "alltoall": + # print("Using All2All_Req") + output = All2All_Req.apply(a2a_info, *inputs) + myreq.WaitFunction = All2All_Wait + elif a2a_impl == "" or a2a_impl == "scatter": + # print("Using All2All_Scatter_Req") + output = All2All_Scatter_Req.apply(a2a_info, *inputs) + myreq.WaitFunction = All2All_Scatter_Wait + elif a2a_impl == "scatter_list": + # print("Using All2All_ScatterList_Req") + output = All2All_ScatterList_Req.apply(a2a_info, *inputs) + myreq.WaitFunction = All2All_ScatterList_Wait + else: + print( + "Unknown value set for DLRM_ALLTOALL_IMPL (%s), " + "please use one of [alltoall, scatter, scatter_list]" % a2a_impl + ) + return myreq + + +def all_gather(input, lengths, dim=0): + if not lengths: + lengths = [input.size(0)] * my_size + return AllGather.apply(input, lengths, dim) + + +def barrier(): + if my_size > 1: + dist.barrier() + + +# Override builtin print function to print only from rank 0 +orig_print = builtins.print + + +def rank0_print(*args, **kwargs): + if my_rank <= 0 or kwargs.get("print_all", False): + orig_print(*args, **kwargs) + + +builtins.print = rank0_print + +# Allow printing from all rank with explicit print_all +def print_all(*args, **kwargs): + orig_print(*args, **kwargs) diff --git a/benchmarks/dlrm/ootb/input/dist_emb_0.log b/benchmarks/dlrm/ootb/input/dist_emb_0.log new file mode 100644 index 0000000..7a8c1b7 --- /dev/null +++ b/benchmarks/dlrm/ootb/input/dist_emb_0.log @@ -0,0 +1,3 @@ +1, 2, 3, 4, 5, 6 +0, 1, 3, 4, 5 +0.55, 0.64, 0.82, 0.91, 1.0 diff --git a/benchmarks/dlrm/ootb/input/dist_emb_1.log b/benchmarks/dlrm/ootb/input/dist_emb_1.log new file mode 100644 index 0000000..7a8c1b7 --- /dev/null +++ b/benchmarks/dlrm/ootb/input/dist_emb_1.log @@ -0,0 +1,3 @@ +1, 2, 3, 4, 5, 6 +0, 1, 3, 4, 5 +0.55, 0.64, 0.82, 0.91, 1.0 diff --git a/benchmarks/dlrm/ootb/input/dist_emb_2.log b/benchmarks/dlrm/ootb/input/dist_emb_2.log new file mode 100644 index 0000000..7a8c1b7 --- /dev/null +++ b/benchmarks/dlrm/ootb/input/dist_emb_2.log @@ -0,0 +1,3 @@ +1, 2, 3, 4, 5, 6 +0, 1, 3, 4, 5 +0.55, 0.64, 0.82, 0.91, 1.0 diff --git a/benchmarks/dlrm/ootb/input/trace.log b/benchmarks/dlrm/ootb/input/trace.log new file mode 100644 index 0000000..4d33e55 --- /dev/null +++ b/benchmarks/dlrm/ootb/input/trace.log @@ -0,0 +1 @@ +1, 2, 3, 4, 5, 3, 4, 1, 1, 6, 3 diff --git a/benchmarks/dlrm/ootb/kaggle_dac_loss_accuracy_plots.png b/benchmarks/dlrm/ootb/kaggle_dac_loss_accuracy_plots.png new file mode 100644 index 0000000000000000000000000000000000000000..aaa51f36fce8018cb97d581d7c14f7815cf5df14 GIT binary patch literal 547424 zcmeFZWmuGJ+cvC7Bds8av>-@GjC4yV3?SX0NK1E%vb?R$TGKkl_{Yu0i+XXd)j^N9V}kNvoU73HOIvB|NoUAu<+ROX4&wQIyb zuU*3s#Y6$`l)r~{f&W~$SCSULR?tVW2L6C$Bco}5?HZ##^55(C0;b!Q_*nH zfXeY3T3fQccxC<4h|R^)2CTkzP0)oO{I8{v!wYH`%hy)+{4PSYS1b6z|0CaKr=`AH z;$R^}s{vJ{maw)nqK2?>vpu2}#-^sG7PNb1%&+uB>W`0uKMB#AIyl(yv$H!pJF_`+ zv02-huygS7@v%SRWas2$1uIzXU9B8mxUgE;)BXOEzkSaWBYQ(TGaCmpYb$Ex_r7>( z?dTvxON;!WKmYlCoepNk|MN>$_J7`GAJ!1d!yTL~Vk?-;=8QELEc0_)? zs+F08FsI4U*G%R>lN(GjKKXNe@2+&kM)0j z?|-jXG_`iH277I1X86>~!N?B$*dL$%+Xepf6@Pq}AUkrq{<3|)&*th~a5%!)g6w}D zv@mwSU;+KLYogblJ`q!KxxP8&e1sUDyS#k&<}Ecgo6ha8?^5w793L`kN#B!fWGxLd zC)_~fjPP${Y4JF|nI}m{JZH5E(YB@2LT!0>Lx=jyP4B{_!~I@H^}eUulT{HB_A`eO zBHZA_`BrS@KMC;th}ezTC~T;gjbRg+qe8)?esk?#zKE*%6zJAWJ55-Ma4(2N$) zH+%itPhCS1(R}^2l{@k9@{;P#f3VyUo2B9GS1)hcAx2cLJeia9wd<$^qW|&*?lO(0 z_^+2-{cV(r=?%T&=|MKgRpCy-1w*0s-UFU9HHlo(Z zjPoDdh$@=Y2g>v1IBT^~1S%my%Obzt^YpQp*aMV*xy{H!n3bFDWK8l{ic)6|JZ<2p zBZs^HY$X-wM#T{Sw^s@iorO!_TZVJg#l0@Z^6Y}bUo!pMU#z^vyZqVkEk~WD4&`S< zntx*n|JqLe{9E{2l=o|vjn6VW{)65650=QI-5J`5jPw0}b9tr7*9_gXiw2oE|Lt-7 zC)@Ex`Fh(P|I)I-e{duJ!4lR7-#$;11|L1o8Cs*;$7SF1@;i!|Z;=A+zS@K`A`@h<*zb^i-+5Hon`s*hBlO_K(yZ>O5{_&Fkn%zIKseim( zf4BI*X7?X#(m!7EU$gr=OZvx~^iP)j|K03LEgLzNU;VE2$z^5}oTsaH`vfxO6Ha=0 zW=hS5GC=KaQCN47Eap32W-%(7PMynoUo_P+d%dr&$bO~I^JFb^#{EZKVf~5u#o2N7 zYJT~+xwdtk<3zb&)nXX*EhhC2@wR3x8s`t05{Z9UNBc~ox#-6TFk$f%Bv0{+S|KFh znUi@C*(!n^>hU**5^6iW2M9Z9s5j>A!JTGxq5bK3g*?vhN7I%8@YOcz?w)crJCa_+kk3{Xz{3xu|HtdAeEzS_sJ>F;9s=-Pf_ z5qj$%GcVM%-%hJBF>`TgL~AUA%ztr3>@l5cvM*x$7~h@^Q2ih(bo$sLXeg_(i58+=ihg0isA8$$F48F zutvA=t51TZL2p(_75Ym?X+!B#J}YO)9k+{IaJcQ6vaXK)FiG-!Y&~5|UGJ5*j`7Co zLffj!Xr|uNF%F?HX7CV@T5vfOFJWr#h(|{MixM*xzJ3xJS18Z){K>a|b*VjpCpp2Y zNocFe2USj5tk#M8f+DrLP?oZT_%78G=~_U1FQ>exOEo zVcr4!*51Y_`zJo!Nu{GGbv-hy!s?u_dJ|SPTlW1zq`}#!`<>NXeY|u;eat9$sBG1% zUlu-y)F<9>+(1}HKp&5Joo+#DGt~?A_UUg=A69P^;NY*~J~oppL_aJj?qU|h#KDYw zxHGJ*uvOg6h7g)OD?G!cRabfJKIcdHMm;7{S^bkc7)8lljVkObtDuGEM zN%;KY^amB^tB&>D!ut7l4-;MHgJFomKJKy_^zF9$oUXqXE1`aLvwE}d9u~;(W-S|E zo@^8n(-vX*c%TV3xVR4QHq0DAd)WjGDp<3w@QcPWhvnnB+{fjy+Xykk0)MN z&ILG6ImXm08n)4(B6-noD;RzO&GgNG^&@cc`E;U5b{oTv7SY2iNP8(Yj(gprG?Y0R zREB7AJvf_Rb3K4nzG%4}D!jx?)Vv2NQC4 z5~}iCOOH7@{Lyf{9H;R4QA(o2chc*q9t1nb*BddYe*GEC{vf492T=ByNLl4h$WT4J zjGi<|1YjGH9)kC3cvgZsQGUBfvE{C1x zx97ElO^wqQc}&HRhD+h!_Y8NL$}ya?sy|*9^F`KBZmK)GQNcZf3S+@dzOzP+`#!|CIvB0@5Kc6 zB&KQl6-Mu&f9CIC^$Pkhg?EpMakdBx?68;Ag9@i#bw)AT(Q3T>`y<$8a%8|geBOcR zY=#94O*91C<7h6>eXDGAvdTVxX;ie3E{wOSI#?8yTuYSByO<-rXixqJYa}y)kZILrp022hw(fy664e*9N7SN8#rS1#xNFDx-!=qp z$>h1brYvtQDThkXp_|-i;y1Y)A>24=J+_7q?YW6Etd7p@gbdQ;>dDXr8tR&*mMIlp4M+N3|R zEJMlz^&P=yLDyp8SbfOS@$Slu+eW@>Dq#u)ZTKDS4Y#FaK6jCm)zsJ2zYY2EZriwQ zzAUj=CI!6aXg;K{452k)Suq7dM9H+<25Ze`u_}(hizdIP5e%xKQm9~ejC^|VF_*q9 zd3Cc@@It*E)^pc#Kciely5BAY_W+i`qo1Jk1%}FbWiAXRv@x_GkT7jRo#2!DE^b~w z_XORci0Zu76os)aacUIc*Bxi?XL*z6n!y+2G4|PQgT2{KqYnZB^oZq~V4d(No+}dW zy(5BC)Uwy^yoKyj3b{>!EVDIHQDJqTz=sI!0E7d8EgygK;j>@%xHw)J%7DgQEHvN? z9~tbe4j8t*KL){-j)N|jm{ESxVF(J&T`v^#-Rox>TL2I=0K))S=1Hor@bO{<-aUSn zib*?UD#K!o|5!vCM_|Kb12zX~I%Z)m-K-UGhapkIOt0(x?1_mcgv=F{(l2zliOAQcUk2grVmIk>(s5L`I)B3#Ej&L;b*gOt;V9DR z2~2gaQ&AF-tAF@y27jT+6;*I#{J3YBAp3fYD zSR_vgp8(*x;qv@PZeH;-9Zd(SQ5*g9AyOW;y1l^{%>kafy*%L^u;=tdP0T2m+&*s@ z@iC#)PYM4TnvhQU2zcXTD5e?abp<_SwkF6=kK%foWcP5izTfEZDo1$~jGgRCCE1+^ zq`eV+h~x6>J-ka*bnK68{f!ykM*a*~A&5yO*dPRRN#ApfEOUTegWjMX)v$5{EPGukNUir5b%@Une$41coHMk%AV$HZM*`xC8Lx?+9ZL7j7wF7S zGJ_%U;A};|g}&JyUWvDtpZw||LB;QoRX-H|D4D0$x<63}30R;L?u+5lY(?Kt=Aa=g z#WF5YM!+H<*C`3K^4pl$|465Q%Tfuz$40R_wsC2`JxmSAV&K&D12I}M>k|VC<}F6x zYqsTSpy@<-J&}->I`0qf6iorGJl4pV7UX?5pk~M?R0G)2hO1w@`X$6^+>EioPR@!T zOTnOSAK^Y9%xKkc?zDm05LKE4Ffn))c4ba<-t2OElL zLeXR+G#63Rv7F4=D?|OCmo;#%AawRD6U;_Ew;!fUmthAt=JxORL$3`uCn;nqvJ@*C>P*hs@D#{J? zoABghHr?u$+l$uTr*TpjEtdBwe543J^H@)VfNH+%gbWWLJLL}*T>>QUzWKGiM;8+r zttl|$ou^$F+Cobxvpi1^Ql&!2UqwLkw1-fws+OXG-JjUD-^=9l3cN`+n6F*I?M{dR zN}wliF#JfeP2UCo;gWyc=|?f($2Uzk2Psf$InBe|(r&4O8!veF1Vr1NXK#d!3UhpW zsNv=)f^?-?p~`x{kvIj;ql{4w-auqbd`1j62mn<9!tDrL_zA!g#a6TgKFb8(J$8E` z=FXk>W8Oau)4SeoNsGKem%RaLk&B}R87qiDdfOhe&5Y+^=R0QS?Tv>{mnOh zV5s08f48TAcauweh-cJ4_)*mr}OqsMVUO-^V1ng5Qr<#f7w|R#ld&76)n~}ZY zB}^RvQ6&QL$|!3i18c2f@F*U-*Mh|qWRFhLpI_x!TA}v(avU4e8kYWD5)K1F4$;ZIAImv_}8G@u~0}na}GviY- z$iHTrJTQJmTKH_r+@wJLC_Es1G^g7H{3c5}OBnw8k&P?FZ1Ae++mTMr7ty3)yfQL0 zEWyjOU0)3Ra)#-2*%%P-3jtn4z^vJmj4$CjV!Uws9U1z;DL>WACGit{OSeA#BI3x|}hi{TiQE(T40SUF51F>n3SfhV`LLh0}vAi)UlI9Rk~x3xF#E^|^8H3L2?SvU4K>Iw!g&1k#fE z3W&s^>4x}NJ&D9^OU7AuiS+k3LZX?JS^ z*5a~0)dZ%K*M8f0AZR^b+D_gmDTVZC96!H1l+Lj0C^h>*=-~7E^L*V|P*&KaQ$n~* zgvKsFaTN99zJU9|>-5y+ujA&qmO$wONncy$%nb>sm!`(>j8n_3V!Ow0PuHuFen;5j z=$`ZBPk@TN0TSm=aj;hjE}yjPt~(gdb^bBUJX-6z3ql~|K2hXPe^Ncn=w_UE8R*1# zSkaEmYmdjP>{qDii$UqdWq&X@EbcF^t~JsMQX{wR{1aUl$G#l4Vbd5*~(tGP?$7{;gijO*aWWiy=MllfW7HyW3$YzL4lAVS`_Z|~YFliEM%VLen3o1AqY9BdlvSR$i|Pmadn z_m^EuWyxY853_v$iIU;iA1H=LW5Kte5EOF*++b!f8uFD|09(SS zM1-MTd~Mm;{-bU>`+4;7gybfwkVagdNqA{B@S5E1J5S)x!$29@Q7ORH`z$EHJ;A04 zD=OSY3>yoFyoK`Vx^u&+E`9w4gCQuIhOTwU^mOFd@q!X0^v3&5Jx554l=kY9Mh%oi+hAAr@atLJ7DV zkh=Jf4Y9k5(7)#TTi7_-Oads~$kKs+_cg&2OcL&!gYUD&hWCe4x1s!NJ+DA%cQISp ziNnTTaj-ms3WKk${IR zlShWcW~h+FG@TWhn(oE3&X09nZ2ZdeTH^o;O!Rabf9L2G+Q`mK98B+o;*z+XmYk#! zb~sC0m}5IQoiCO`a=($)yRJskB#Ka@raw5jmQu5qDCWYBuNIuC_ zW}Z3owh)(R7nWnf1%{Fhv#Kt1iNG-L}PB zI^-RVp$>|;ny)vpmVa-u8@S|<@vSK~5_LD57TGEo(^()YBroVRfG8L*-&{>Chb@0y ze{TLR8Ov{IvHfStuG`o2Kn-Z82C>Zj;tcq3kbvg^Gp;%bb2Gv+C{ISfX) zH?j%qT9L>RvSp%!KHs#DVGp}oWIlbXu_tkP>!#Cih2ICh<_7ZBkY*z#7zj2;Af z7^AtXGnBy5u>$IJD^^)v#Te&So(436Fy$dTaS2ZQ+(>0TvS(E;89j-b1vadvBuh8& zc#|0)y<-LWtvLw!yT@2Kyre3Da<>;*p7UL0t$|-3) zEr(NQW&LpRukVm}Q?UThF5UYpt5<*^L-Yj0{3NoM)Dw%Q7g$b!${tW$Z@$^re0Sc9ftMSRpl@ptTkmkHikVW>2az2T&#qYeH{`NG06s#zIY+~ zcD3zzZ29yY96%gD!L#yS7Q6kjd~UB|)9nD)r0v{Vg=^)0V%5?GQ;_?a&B$o!<}ugX zIOXq9s`R*G4U2}=3D3o3$qH|t{o72pa<49N38_xg@eT_ES3msl<3*?a&TONtwR~^3 zz>taT48{6LuDVXwTJs228kYMI6;F^WLeoSuFg;zQ6k-f$Fkgul)=H7mV>g*tg|QE} zBj*84-}xwvJFm2hIadzG%_LI<_upd`)P5{{y2*vBf)~T4fhbzZF*7b8JQwn7J4S8K zaTF7JMa~>c^l~G405l`xHv)%Td`Sm;*pXyzbEP-j?a{?$j(ug~C ze`{hCmZ6se3%AD)zMGdD-WG1yy2MrZwH4^S#B4vt5{E+^2m|>iB^w`+P4KL^)mo z|9g|g2~qczPOYpC=Y081FY(qN>d=A$4&+-80-*iFFB7G_O1w=dYgAc)_PRV!;E1bK z(A;Woq_eM#LX^bU`e%-xW>~`?3oUs7q>EbeXp;Po&rX`-7*xlm3 zNxREiX}8Xj*DZ8+j>#Iwm^PC?3~FljpwG@aQ3+@H9{fnIZ@T}I>?$i`0IBDRBF-W* zM|ZG&{MBiHv@>V>Lk?oU5YJ~{yd*091GM;by2=79rb{rWJc=^nkyT$A<$WWMsE2%v z=@jE0LTU49Ywg%wcAy|aa5v~si~<05*(dM=wUeacW3m5egwMtKsdh*<<6=&yaV{Zl zP`=}`5?7#t$hkwnrfS>XG=Qm&VH+1iNf#>nt5KL^L8XNy-66}i5ZBwZU)bx9XpK0U z2lno1ZUmx(Hd!n{Tmgn8rFlQ-KHnew?$}F6&NmM-xIvprkR$D6N4=^#FvBT+eXe^T zqXM!jAy&fTV7q#QjVnW(-+4QaDLZ!=@jh9k4?gS4)oV3Xz2=`xCW6Vd!D*3b5p{9n zbT?76DCunRJy)M1)3BoD-4ef002~a0Sw2^dT+f;Adsgd`(V5s~8l9_zQ!(w9TgI3E zN*8dhCcUuR9CCWBOX|<;qUX!k2X7sAF9LclbRsv|rUd<=syD#dqq`v$8RQO5rzPZV zSQ>PJH@JS>kxz?6#xjJUrEDYqs28+bScJM#&_AT!sD?$h@;YzL6b6Rl&_zf=VKOhv zRkA#y`}SGSd;M+v)t^*Qm4p?CGUI=hSrX)Ho<@$mo)qLUF#Qxu{>vsOv`)|GS4oC*HD_Wi?uMSZ?&b z-6$+U8KTPuVxJ9P&ADWlwr20-5z^w+!t@Zxkd_{g3~4f&M+m3S4DbmP)R;`00nbRb zZlY`lo{H1iWUF!Sf&JUJ0@1DzsW)nq_rg?01`o=UE0Ah>%O^(9?J3t!E&Cc!`j}Iw zEZZ`W<7a$TLPT<*u5Y`1yN=-jdLT@P_W%jbU?)dbg)nvdnNTDf94OzRIsyfBQXkdD z4hElks3xQdv|PUfLTzm*Q$2Le8?}tzf&s#*ybzrqw|=wsXWQNVkph0=V^AKIq$dxl zDHC`fPC1V`6>d5%$C{W2X$di@OpT5q@ms!xg5)h8Ic~mPzyeelOFkg+8U+ax~ z_$8#?-+7Ud+qsVvc5-OQPYO7`8*?WJZ;h96H9w@O*EM#e)NFUr-qo*4TiLf$ zOy$luiQb1Z-RQ0ZK`xC9IyMb#D8IM!{cBv;}c_|w+xdo=cK^mIi)A8a4H!CcvvmgdZwApGU%WbW+X)~yk5nuQL&OF+s&dm{S_vh4ymm<7Omdjm2dKXB-C9cXzMulgh)Bv$Tb z1(3(gDk}(qoLyL%Kh^{pooC97e>tUoLl~cbH5r5~^)kRCKQv{72NN^agO-(f1)wEL zzBRiW-FQz6+eKD`$-)*c^*%Y3pyFeP zw=+p_j3!IuT>NYpc0B8gPei+6ABB{v*mbk=SdN$4F0`%P=-GeoGVDVFXfUtylQqc{ zSH+Q_$FXyrG$1o%Ld3;|PMl&@Z?7_2vid`cfYb$B1n?K!62a;!_p>AWB_s0Mg*&?C)r*EkUZOUx z*rcA_PMXol{1h|Wy5*N&HXc5Dbybr7St56^h$eA*c8M+69(Pbs3}5>96uvLM-TTx1 zlMKU}^5e_mx57pzk#w0)jiQ8&9?G74n>>wlT(v7qTI;3^8#7N(o-5AO$xz@!QGLcJ zPmjN??s*!zesonOdo(-GoXv;*)x7;I8i9Em{ngoASd+)x_f6W*uCzQm`bbU)A)FrQ~D$4VL79>?cI(#V{4i;jHK6VRvFG+d|AQ+B(7-%>mvcKENG3 z1HBcBO(H#bRtLzd&OkpT)?*;%14zV?HzPJ@U3Q@DtPdDnYU5wx!GYW3Wx27(pk{u| z*F1hQqOO0wmni}{%=a@T$euoJxzFiwWD}YjJfZG*ufmXRu9Hu0RFJ4v@Mi z2tNrR559|52WEoIGz=%_xF?_6hX&~1FoQelEK*Oxgi6x*Egf6o8gPU5t|lZk2|DjR z@2ULaK}$bew@1~2a*$gqCWbWOLM){ThkPZEr^q;kNc`fk;j+wZh#_9a$Tbv#koo?w z9SX*Xid`La=ec$B_aE5W6KB-~=KS9}N=!`~pLM~>P&>NV8374XfpP>z z7lBsqUB5%zXe#$jjtxDmR{VodmR0{$M z9`#bI+X(S?xg9}VaUst3ZGbF=d|(Ao17b3;n-3a^ywg!zv&~-Q=9go4-O+>NW@R|o z1#e2=R@a`#nyUG5Te9zlbxBl>Dp>S2L%$YJ=4Dz|9U0@ukdPRh3LImPD`0hkZ;PIv+GNFeSZGQ+wmnN4b&U6vd09LI}p1MX6A(immKzt z6=>PU2<)DjBl@yC>L5$Zo?V0wVk#OsQp#XeKSxWf*BYMZr#rbWM;~QUr!T~XJ~9fb z=*J9Gm=iyJa4+V5mYYe1`{7mt`~>@&<`o~cBQ5GJ`}!j149n2%Je$VuEhug1ap*-{ zFXr&Qi*%N}Z&m{s@JU&|1S05?2Wa<`H}ld|o{`4Vtpgnsm9=J(&+TZ}pN^<}S>7g( z;&$6xUG#+9PTbG04j^Ovp)sa4pAvO_V#3$+^8a9k_3G-$ox1< z6UaCPOpU!e&w|=cqz1F|%x{`cvZ*%bS3&DJBfWmg_35Q|AnYXf_P*Z6eOW=2jmvmu zH*~he-;gJO?z43946FG0koe?cvqZqnY^DSq+88}wX8(l={gz(lk5xZ2$DWftI6N_9 zwcCg7S@30eOIs|1OX=D3QceoDNQS*Y8JN+@uA)H|jTUxO`^ynm_BAEK^`2 z>kMj5>SG`Nxn6qDuJ9O1WU;Mw>+F5-t;t=hrA3lN52v)5XGwPPs^^|v&&E#5%h%nF z+ke;FWTnrNip|86?7p|yCKairL>m8iK=xHA@Xv!Qdx3Y`m7GqD*$eKc)lh9Gzq{gO+Zi3QL5Jm z2`53 zBPG?i;0KH8F?!EpWdcmGSOq1_ZENBIkVAag0zFpS8kHul;SSKsGN;@C3Hhn8VtGDd z)Bi((X4yXIhy@=4*;%c+%W)G9yWI2?EHEcj&5Xakwk5Q6WM_C0MXKDYxRc>UQdebgEHBko5bj74TQ>Sal1o@$+&qhHFn^nww zlWd_F$hKSTZ_RFZOdbtwYMks3X)W%!j<#f4t{sIh$9RnZWtN@7oQL&TdSvU^)^y-O zlC@{Lh;PtU8*x^S`be7!%q()P{EmB_C|LNzdra*EWm-wa6do?G!g9kFcl<~aYYm~= zpeA3=$yCr}(w;%6dG>D}v66Up%^j{pfgBGdMxt5%rxEP;u(V5bN#g5F`l9P9yLIp6 z2Mc5pbO47oG;ON^8eAMp7w*mQyKwZ&j-qfUN zl3?W7Pjq-f7q1c=h-Z?Tv1CzDB}Et1qkAyz{sWBc!sZYkL2E~Fn|KPGa=v2>I(BI} z#*%)oz@d^{0{@5{SMOxnvYunWs`_l!^HfkP>p&8K1kVjAjF3{sM(mv2xTmkTC;7%{ zYdm($>wJF{U}z}UB>9&r9>{#(WGP6S8kzkNLj>7Xs}U`mvg>#2G8lVHG|H5GwDSA90hsp2o_Gg24 zP0B$J6SOp#LP{otwi@=7-{1U0bbzE0-@Fk4op;7z$=rRWkhjvyeh10WF+H;TnPe3s zWA2|SZ=%laF=5=&N><>@d#(wh)JqV*P+QJ(L`M5x8(DAnWxoa$Q=C=0!~Ewl8)|i`Iwz3{kn)=tT|4jLa+x-oMz;1fhnMpaDXqmgSHIa0V$^<>Q^#E-ks0?!_{|wMZZ0>nZ$@9_`6&!d6c8s_wNL=iZS0#^IwvKnqQ*NwXm zTgLHJ{U+P@)^k}Ajk31-kt|_9-`v1E+R(quIW`=kcnSLJfftH&!`Z6V_`@v>7oiwb|9568?S{*m> z#mlFhCeov|m^E-oxCJWq(`^VEb>MdlEGTd70v)U_sDOHPdk7%iyZs{i`VNnIRB?gz zJMI<0-y`);=~UhYv^_~8Gjj9=l;;gS>u(ElfX+BmGfN8V=gNGnrX1C$OTaco8aq#! z;|uPHTJ#|0D&&-?pf-&JMSYXom`d=T(A#7kVV275#%E7;r!Tt7_Rmh%G6uW(Err_I zD~!~_5|6K^#LB;DH?noA$vi)`c^8gxl}-M5*^vfJo?o?THo#sx9r5rx%yFP{&Vkv{ zzb!eW4_|!t~SK0 zm)0s6CTu{1NUmL;`pR9(G(-8jCo;aAHypjP_`2zjG|kx(z;JsO-1~(bKwhLv7Sl&q z-&%@r+sf8hDahlG`{H=>#pGTvLQa#e0d#&49dqA+0rW|oEHNg!G-)jWe{<=gfj55T zt~T!YNYiLu^vG;5?W+54*@CvAVRXBNzo0M?#k}Z?|#JgqlJy9GSU#&0TMR)Ku8przZSH-3vp^#?G1 z5YCHK4NH8N48$bilgo|fE{z~A2xB4}d4ao=Hr}UbJfo*b;qWc8gePcs>JpRfz~Hf&v%gu0Ie*xM-~W{ z(5XT1TxmxlLy$VSKN#MNCJ8$L&6xo8hutElWBonfDnEwZ6M)4r$j6(aONl=dbf9Ns zQcrSSCbN(WT`LP<#IPzIkV0SrNB9D%eU7pa5NpTSn z$Thk(PLTUqRSN2wMz=TEPTtl)YWKcrv9kk4VV6jj<%O~Pufzgy~Rx#&iTUv51dsrUlHwH|r3fIcaFH0z61M`v8Dc3o1L60p5wTwdZqCjt;y zH4w;S+2+NmkknOi@dyC{Cr~zWHPg``C02kHp!Lz30`075JvTEh9hyXaW1cC8A!2#n zM%Zo}Hp}^ZY^F#C6kv9d{aDVBMCExN*VsG63^%Df%+lj7yYyp>a@9zTPd>915a z$pb(-G_eYixd8nLpG)F9j|{#6PV3NnBPAUWNyy$&${SA7{3Okjg9+=1-B$@hBjk5t zI7p)Ltf^37!DjA2R06+a*GKuuwCrhwSAGm>sD0Soab)CI zxm_Z^ol|!(<9(TPilBM{6gGf6ZRl}006KVRvqgZk_~Wrx5kUFQM3gmx)EO4kY;!g* zh_{8^vzUs+6&rnJabF9c5Op0o16o;RdFVJ`IAz52zyM+;wp1qH9j}O*#9jiVgB@TB zVi?$m;A)iD&-WHws?CmGW!5wG=A|-zKsZ20^kpQL%LL{#N3elUWMSY^xKq5}7PE|@ z#2dcXjin!(ZFbAygw8l|=bS{A2RD3;~ECriwlw39kYR`Ea+2=k@nTP_h+? z0WdD~iRLcS+YiY_ylTnrkiHCkeeR}A?|%g#V5)UBu0U+u`x!t^<8m6khj~SBV`1Xv zu+AZvc$JUsjI1`$R@wJ|2QF$1RSp1!ecj?G*)s~qRKfPpHBUgj=G|u&depqMlXLVS5S$RVHcVag!HIV-obF9OKjc7prpCn?|)~)BX+exH{ zhI6%m<0K0eHaZ5OfSX9+!XY#EB+_W2aW1 z4bm=&lzb=}B3K-LsQeu{NzWuf+1+z@(J= z9I(sRQ_PLE7~fubjU$ZsK60Axs>_Qc4E8{{Z`Ul7Pbx+Qm8J#yoJ~yOdx}~{EK=5X zP*V~=T2m&$10^QOd}q=&w{lXk%iY5hGON2xF?X}+;23D2Ji%Vq;lrN-uZI+{d zWl}?f^;?1$evzdFo$;8$BxknGE^)^nbvy|y>T8Z-b-i4mAoN~o_XaazJ{eg#XCXaeFiV}EV7^Sid^k%+7_uWLy#bmL z^piRxk#Qi{7LZxp>n06WLrN5%mgM*ZAxvRGgZ!lF5Ifj6AkXH6a$7I7;8GNOEfw@B zWeGfKX6Ko-k=V$~d3FXMKObQEk+VCXk#YD@0&n2K5dVkIquXw>)1W8;JzFUvm7u;2 zFjDZO2}hT8``4Fjw$(nD+}+4ur1=(o7~1q~U#H+XwPzgYxm zOCXRUWQ0~XMQomk#`7@+zNK3mdUK+oTk4B8GDcPIhug*mDKzAvg=^ux6Mn)enH!hL z)=pSKJ_36BqoDz!|Ksvp59RK2GEjNU&37s7 zZPhce3m=zzC|&Wv55xf){H=heRxJJo28FUUG1@;+Suaa0m_f9E?94UyLSrSCd*d>L z3HGOt0WW|YS>?)%s9gY~fzF`ODm`!NxA-FP-a!ko0{w><%B9Go1+=NE;kUfGKgxx! zrAm|T$ePR#kAd)IFI?z-Hok?XebkQLcXAP9jpL#!@d1 z%nFYS`2?})*CoAICBrfTwhXS^688hnuu2``YYn--74C;L;=`#%qZr-CL_>`^Qgbjv zH%FZjm8Q>n7DC8j3Z#O&n=59FH{^DlOads$z-(jPQq&8Rpz*%{>~ z=eQf?idhs}S$QKQNXb|8-X)yN^{TC<$AfU7|3tfY7|;TtCC(dg^(U5eEPq$iO#-ua>NB=BO9Jup@N9Tx^Obrf%O-yhRSB!sQqa#D*H zw?2fr{_IR|jKD+^1Yoei5*wOuHP?Xpj1V4Eha6q-jOlQ5fX)ekJid{kbp!HRgU%1) zWC2tWK<{g6d&4d2E0EoO=XYHLok_<&z7-9|cX_v@?a}Zlcz4bZXLw{s!9)YM{aLh& zDObO$#9r`Fj;&p=qGRN9HfbidaL~K&gjEIv`_0Za5Qj z4;4eawtdL$Ei$TrP|1|vluM9?-s)yihUf89*fPiOeKU|rV;U_M@NjUqK;5JwN* ziN<0pT*S+FMvZ8sH@`@dRQ5=TzSFaWhnNhoh@BvPeGK7=Y)Fb-G~+Cn&dues7H|GK zSV@fBFlnug93na6#+^RN6&1+v7DZOqwwl;=Q$rU+rQY*@Wq-h=D@fV}hiV(jYGbai z@Oa)-E}iI;wWd>$>a7C$#}`2ba={1?CSv1wb|IF-)eTZF*W~r^o@I&^Ch=j@)2lvP zQ4#mbvHGIG7u#XWC~s$MH`8T3jBUo0$$G>zVV}^C+3SE^!!8R%h~wT}2MXLs69coY zIUkri4_(r(o-Q^kWdL~Db3nlGyX+Wj%-t%m)wjqmi*~eti9$`YW3Aci_LS_6ySZyj zf{-&{Nc;|liFg8kB!V#)Fb+05b9v!h^2E&x!4e4?bKd#VXA4x9rnnzaqg_^I!eVwV+u2^g0*v8vF(T&QuSqn*0skjhu% z#YqX!Pxc4ku3ye+ywq$Z!8wsEk@Zze5aHd|MPk3|Z6#`AbnH48?}dENVb|OQWb&x4 zvo5{u6ofnC44^$ytkImNUQvJ3bz=*c`yabsqsSA+k?+lGqWVL6+fJgCn3_Opv^sy; z`mnI}C97cwYP8*{{p;@i5){2IGaRoGgx<;xajt}>^vHDh6*@j;{JVz@mKKBIqNs7} zMq#7dNU*-*l~DLTC&M~aan+q@12}dX(y{%Jj*TLJ9!Gwn;JnW6#p{n==M@m5IBCa} zpzKHz2pY%+ObNV*_*f z=|Ok|4(Sp(f(B&sJ#a9v4<>pq!A-4ibT1=vXh6D0PX18>?LiL5ZfFtE`I|pV3-}(} zX-+QHJD;m((p!U+*z|#pvT_|zw_Ct`2PkgnjOpHZlRw35Jde%v6OK5eN9{`z5qV*u z7ss8r3nT!248pT2wKJ zbDg7HVHu@raOb@w9pjJ!X^RtF=bxp~Uy{F}gYzbY7nXt9jL+nNUWeS!rccY8NHWwb zNV;Bm3OxOqh*e7B70KJpDl3L`Go2E~zk}}tS(xCF88FYT98b#FTs_ovS0fe3&qYkM zI>x8Mrb3{P0GQpBb9iR_8|9S@smq}R{t)Y_y19ZRgwid-)q4A!J|%eOYn14TV8i9i zWCE{L^rCGA?VTHEe*Qhbk6SOA7J1x$#{FOtA2|3tm`~J>XJtYvaF%d%0QLr`>lcvS z85kQ)R@x%Ta8TC1vU-L$Ad?SRTrE1|h*UIfQeB7Vo_y>YBX5nV5_qijc`MMad9_Gt-3fA9l%)C!BfTr2QZ|pXV-ZjVGEsWUi0*6yXFldFJRnp z0&6rc0GXSQ!TjZVq>JmotwKQcRG@1~W9NoM1^7~eiL*HyXoP_6d<#HpW4-is)d)@L zFR)6g zABn8J!(LQ*1vK7EDxVGfncp2tMF8^XK?;i`63ZbTD^Me|D3V0IdLbY|()WaVR`bB$ zBczvYbhTr+8aRtH)GQ1Le>^qsl{h)NLUV8}faVrJMnF6;t@>J!ghrzg;W)`>1Qv(_ zD4-_nF#n`j1oq41eRNtMeo7|^Cii}UDREg}cThrzZMc%tLy&5ag5jGJVnK2K;E5g`f*%>U z5mX$X=NtpTxj*j%LMCc^xxyw0kIO5$Lbq4FpVrqipEnx2Y}4S2oE4$FtR&_^#HJWN zwePh-e2d%m)@5;FedlMp{X8M5KERjkp7EZ6tbVf)y&DwgcCom5&ooUVLo#LKkv{$6 z`K3jCKWI(mm6*4;T8Hh}S$nhLR(YY1|Msj(e=wH~Qs@6+?=8csT(`F2C4v$vf&xoI zk(8DWK~PaT1STCyhjd6IDo6+@T?#0nq;!YC1SvsMT1f%vN!L4W)cvfr-o4%L@%{RK zJbzXk3iF=#HLeloILA45lq21(k&@h4LEzVc{y0^*)0B#j$zsbDH0S}G(;n`f1h!A+ zTO)$j4H2m#;pF~6gzR%>0Fb%6l=}{hnYX{$z19{bxQWZSP-V~TjyVB5H;InjnRf~Ry`rhHKc0)x{BA@-2TWubl@UBGm*`YkEm@O8JU-c)zcuEN-M0n)`Fve)D zGS2P8MKE7VI+uM~q%$7I%7C`kY10pRw1fdIZ(FQjVqckcYz>T6YSI>l9v4ZKN&^)I zW;7)2Q4DtU@^GgUny8J3@>5bJPK_08KD-)9%FnJ7<}TErVe=m6e!o!^GP4L^EIIn@ zhR=X>Hl$vc=L!bVlN!nlA7KJeR?qr|dF<4H?E4c;%PUGvt+yyF3t&V#?p)W|obkpG z!dhfJ3MOh;pea){>bdhcg!b5{S-lb{+vgPMjPT6?Q2QwB_g(8CBQO1m4{zPnL_;rU z%Lz)W%&^s2NhURLd`YqhFo@x5X9l?skcIM+5uKXtR#)Ci`eI$Tp`xZ2h zv`NQ0Ly|LzhPP#J&|R(|zyIErFU&@OJ7TgB5FX#Tqoh!YIi0?0H6$>{$ky3yp#8SA z^;iWv*=~h%*;!NM?9ROquXJ6@W1e|UkyEh};HaE`Y};lB(#^H}>pDn2Wp|~0_W^V= z-?baPU4=?X!Rp}NUtyCY*jP|0v7BWCs!{FA2TC5#x$9GEw|cu*@H1CMq;&iJl5Zr~ z;XJ-NDOrcBI-bo3^{_ar!0Dyc?U}?c5jVm+1yLq54m04TFJns2Fn%!9C9u68q`DIq z|KkA1e_lu3NY~#SV2492?X9PNabilV3i}K9t7XCrg6Q*4Ep$pswt1<~iS_eHoES6h z&K2Y}0)8FuA1nPP2|br9Zj$Nn_)%&ouJ`JB?EzGwd?sGk7EOZ3Nqa?A6=)y4qNt;4 z5WVKG(|d3Gp_DDEa6z&s($P*nLS=k#O-0Y@d6c@ofA#Jp#{mpNj7^oC=QR&+p}Y!n z_q}z9TtM$qt2dlvtxK=7ybfpW#fV7|0OQ52*jpb&8eR#cBOp=nuxE^Pp=!s<-y|%kKF)VeQke#x+GVGOx5?X1X4k29p7tD>_G1=V(%Ss=%Tz8wL@3J?h{_ z){(zt-EmrWXy~-5R>Fz_lEAT+=RQbMN8DMT@p5QC9?yR7tfK~`sd)3ktA`o#$=i{w zNXB%1$K=f4y(`4b(iG@`tqF5I8T2v~JiLuKS;n3UN{XH^3tiKhM%h;b@v>zp?aJMg z8!c5jqQki1J%T8&8TjKr`NSFJ_j;(3YphjnhdT~Dyijms?s^f-_6qq>q_FnGIeJ;- zGa@LpSfc5X9afv07a}l+!xxkUrkiu%Pf8&CA>tHMNo%DYAe%M|NE9c(&kdfsBuCbm zk;FY1t;KI__0U*VGB5ienb&yUE%Rj+#u^$!=V1`E`>isgZ1fUTMrvJKkSM3YyBaQY z?5cP_K+dnq7Mkc-SbsxnRhd_De2ynip=;=-zPbC0^XYR*esrJxGR*%ZFW7>FSA#oe zaZsaf`1nG_>PIMxHbIlj@}(_#Y<9ovcolx6o9cLjeKr(STjw+CTVzV8%k=LR4?97K zv=LBjDJv2NU7)h));NCai4V`8XKC(%S_>8biNUAZ3VvITkEG3!-WE7;MTO>AEKM>L zw-0;|*@WI;iiN5gEP_m!_=Vj3x}n6inK41rjzr0lxE=^4(-31`ih6lY*pN47EM8H; zT4M_Mpqd2$OK_QO^w;OlRI6G+-+}+t*Pe7yG6fUXlN&7b&jg{i;{f*h9o~w|_ddS^ zH3J9G_QF5TH(XRd^bGdAzRt9-aSs^>Xj#>R7diU7s?+ zPEh!&Y2AM{3Z0p!X}z&ue4shk!h;As!7%C#7<+M5XEFqdp3|4azeH8jn?D6|(A0s2(Ygj=D@!aTk8LL%7B{HwvvTtABQJ#hK?%eceK9t0v)}=;2A0rR|j9B zVVJFkSfAE*;u75kXC>Ts8XQUin?%7>HCxR5QRs`n-=4hCe=~!e#R`}bG$}}%URE`M zPw^;UodteB#B|4y%Z=}u#S_A>!a0s9bxB~ia#jXp3Z`&Bo$2YGQxI8G8Ol}ioy&It zjkFZAl>SFR0M6*mV3{bf9dGR(W7y<1DEe+xR6i5druoiy!fmxG3}#S9_XMGHz6iECyZZ6;5kU7jU<%{@r?1kjmmx!rxo zd6riIz`hTyjYg8C$7kgI231ryK|RReyk^%c_JWS<#?E#hko^>JN!?)rsOb}p7H>e0 zQ2u>T$KeAZBgqLNHJOSEal0zuNjT1Q(i6Wy$t|Z_d<2q%l~w2Ws@`TUpj5reEu4zh zN$4IfOE%?d~T#06V+cb*P52#N$#{{-|u?WxZHVLqsAtid|6HU*#S`WkGDL> zIay6;+!K7S`CU5EJ`lIme5t6ukG0RYU%Sil#B_Vk1l2?g1x?$)+tnlQpC}9DXcZ9} zak5;xnA|&9$v7{Xvn~A$^kDv4RXqy?5aryTGVb$X7mMGszZB*=zO{Te>7&}Mg+RqQ zx6sFwpz-kFOh14q*y6yCiDQuS3bMqIy%f|HZ5!BNPQK=qqH+Qzfp63~j&+aj$L()x zM7)7Rcej}DUVZwdjw!y6ZSYB+Iy}X@)L&0==f_h3Z3`$=Y3v{H;F%Ol#6V#Tx{-Wy zHoR0j;Iu&!?8X@`B7WbZ)Z1G5#%-=J!*M29f0)O9N|t*Uq#E0ezD32@3BbjFl7G7^ zkD;pTjf_26n{S%Nt+u69!cR3oO%hpyfD^-S{j%`g;K62l+1vUWQ1NUQE^8)M!{@dW zn8pnd+LRSDv^AOrpsgHG_vaY+h#u4T0;%`_5bbSzK@xwMp-8hGc z`avnzI8*>c9QAeqZsQjhZTAv;PnZbwLW|sDcn<3FeEB^c|IFJZy_J}^eo!X2mo<@Z zev-H_GkDz4|=TAt8lAUSognEnQ(5 zR>{~K{9`%ciQ3@wTNJ#uyYBGjC305c>5ch`2W$0Ij3S5}Fv>8Ku_vlU$3xpVhY|+$ zf_B=+WGC^}Q+rjNkFTI{AP`-E`aH z-s|e6R_LX5mp>smxy_P7BL#F%@@(jq<|o3Fp!}u5Glg`^@3rGzjjweX&hxk=ll(9} zyVs$7BYAHcrH8Wp(VzeF=z&p92w^`B;i0kO;DJ)sICLh1WVDqPb%AYWs}-eb&NP<_ z#abP?)iHCioRPwybKDI>;gos6FN`C4>A7v@@u$?891@|t;`$qLH|5Xef!YQlJ7{cJ zso(L=@?-%wKdBS~Os=bN$3KdUd+<1w!?Sa5o`DRWhD3aW9G-<7o&6W24PN<~J-?Mp z%d!wPWmtEmA?Oh*9%QH<8~8>c@)}6V`SsbIEvO!3U+#37HEEt|&$5`T8l&E=^>Iqje>(FrrZL-i`ss%9= zXW;!|XH3;1@DON>&h`bCzBTvLZ9afpx(hRoh&s{R4asGhEBV}7nq5~jm3mYUm-bvZ zk5$s4uU1-d6j`QeDHmx@{mpqOrh)S)#ERS;NbAk? z9e0DO;iHm9i%TV!UBZFpXqZYHSP9CXnFyq?lAtn-=Go&d`lRPP<+=Bi=MOJc3XGaG z=mi)F@@PYv8C3l*6Cv!AXa9?dlsxQY?fYdSX6}BE5Ku_5Ol+{zUhLRm5Thjc4p$EN z)6vIxHA37#OUNSeJ2X};ta?6haw}dW$8&FHN@E43&)%$SiO1Bw!!=b9UKT$c>X`)7 z2fZJljxY1IK!mPbpC@?-2kvg>08AO-v*g8VWe+${mxGOKzlj_(`@8xZjYdpzo6jZA zyFloO;;go&#r69hQYtGOPtV*Z3 z-mH=#^Z1H}Mk`Qbfe`dP)*?G*MT{J83n;|V&)1CUw1!ckN*uAed}ew56Xf0QA5G$I zeO(-Uz7-6>WKd?@EFSCfioUU1mmI&2V-KqBZbE|Be(5yq|*#!*^Uf)@QqlfuH#Evc{dONp+I@FVT5 zqK-tUT5b(1Vi-B#Y8sp-*684nZQJzdL8&1u*XMG!8PNOa${9j%Fs>|zq3P@6owjEd zdgj~Zo;!w5Re7o!v2GMt+Pe41R}ud75-rSuErXSWNB~QBeS7h5T>$FC2*%49xigTK znt6+6XXyiFQ|Ne)J17}_tZ3PwLm{ebD4Lrf^rQkKwmfDF7MYA3!j)y7v6eMYo;B=6 zD1CaZPzr;Pc3Rf~5_BPO&Bs2~xEapk<9mlcv#e=ibR@8SYrpN0qz2hYHGzuxW?!A$nTm zA?ch2=?enh)Y^5P=4~cfyu=koT{HF{ET=|#aAi<&E7S0 z07MF>#JMA^Cf^$wZ(8ZmgU5j`Oc;YqWC}GIckBA z=UwKC%45ez?&()#1w`x&LW?7Q~ ztG73i!5gy}&7S*|#7<0mYSPz7l&m_fw1(1^=dE>{kW%x?p;55e;_j0nmqNy@?q+v{-AUy?}ynz-W0Vh z?J{=y!`9OCF_0mYladrYxOnGC6!dG7-NqN+2bEt;x&4aZjtf+>lRm|*!qdq}G zm1frYTwtxh2DrmHQgzl-h1w1YdCc9hL61jb@<#M;4(PsvS)1fGgmj$fqC)Ck($MSr z>Sn;Rir5{3_7kO!TQ3D~tTR7|xzDplk!|11dfoT*c6iHnU|V9|tXW|iyUPavZ1MVY1BfO(;b{}(Aeq?@d z6w&{2#P436I7hh+cT_rhUO8;^7-t;;Ue}-IHEF(%KD26)h*mo2L*xSCEYeNk(j^z! zoY2nh=eEk?zM*gL-tJmAU;j|@-1zImaA#8^+QC4!c>z$a2A)ZSziAinn>d}mtBd&f z5>V*&s^rE}S_79^#BKd)@@w{8n^yOzam|ujaTcr^6Qdw?qt&+bmZ&0DTl3Mw&J(9H z!*AwXK^j{p1ak^+(GY$XBEtaR-0>WvC@|yP%JmvIRiaX{c*LF*AEY1@P>9NcJGR{eWh!*pZQKCPYY_$&C6W-3i?bCUe z*>5yXu2dxaMx?#!Mk9ibe0D@X=>fzPYL>Ga&J}s(mkcaT*uuWA(JUe0F8lk1b zK1TfXaW%B>fGsnqvTejSSW2oEFX=oKg(F@JG#+!BIqAW!ntM7z;*~H?d=CJ~=i;>) zg;Tvx0!?}dqVUx%N^2ROPinf^GM7i~u+S?#zL&AxDqhPGxzs-Z(r8DH9{cuW5||XJ z9Ewjt?ourj!Kun}8$43DKJS4hbCGclh8)1!Lz7PTgGt!631O(imJ8U(M6sehdj^j` z?o_yYZpyKR9W>djH?9^ORLuU*xK$J~ZuPb;_b`@r;?|2OrX9!^1VfqEwZW7T>EUcB z-<3>jJEke%(d^1OavFMY9Ve_UWLyGC%-r4sZ}u98>^opmb_hT8>CDMJCtpCliXyY~ z`Wkbwn0=JnWRaedi0+}kUJ21ux z*p5NL=?2;Z7L;nI8Lh4vxQc>*E6Iwmk$dL^f&sHv44Xcg52W`$x&R*jF_Wc?s6(;U z@KsQ-lT6}ihGBH}0PNTdNF#&SA`}Hu8N_*az6+ z;uqCo{j`^{M33OnJY@J#dHvzTSCY?1QgBkRZ%Shw!J>PRMEC4^^Tw)y*GKEBd|(Ik z^SQ~7Oovpwgz>esinQ;B345*KcrVzxR3$bfpG686sX<+m#;et&GP1?uYx=q=bb zBSNE=h3A*;mTM=tIE!=dHZ2sQ?oLSDTSZn2bP5<#(zfQwHmR?{fbJA9Lz(p$dLzHz zC_ipl6%{ph&11nnp&Y86z|}56TU>OQZ?T(Q74@jrZ#NVs zWo;}s7%>M=0LdUn)?{olhRFHC6N!o}q2P5tTs92>`6pw6y7d@jte(Cls>vNh8k|1g zeZqxkItQ9AK00lKkmQiX+003E~H#+688n)HP} zeHtmMMduF_sGWHCdR#iruoKbJTRu8Gj>l^DwUxmKO=61~%?*Mo(!R{u-@#jnM8m+ecl$!V6sn`V`#Qnc1ZaZ3|w->c_@ zeAjO1oCw+Z(!95s)3-m{aW47t5iD$69y#il&Lo8A1E$8@R-D;&y9O*ZZ-7$&OV9)7 zA^X$<>87P-IXR8kgFn2}aR#;t3k4^cCCk_2?H)|*>rF8`Noz?slb{L<{RJn*Q;F=G zZ@Z9;_D-#^sg5beUrvkX9ZQDl0K7OiiW$m0HG zQmBN|sqibVS*HC5v=g4kwHgoa24YOJE0vq#$7f5bg1WWF;WOZ7xTM$2n zj}dupdA6k%y?w5cOm#cl)v30?%_m!{(X| zU`a%uDxGDLO+~H~6oMQh?-m4?-OEFBocu?F>qGEnM~2wTzY}{G(q~YKdb&65UY`Q} zbVO6BK-Swy4Xx!qj(J)bE|${Z6z$0oFd(5HFA41 z!-$${polj%^mp7?#swB=g{X8VmE3yoe>8){Q`iX@Q(z{Wj& zc&~l&qm@sMunVVAzHWh0y^|sju4uZ<>z#8fKwlc&U6cyAVkTeuIeD1Uz8Nh(6&@&8 zw!~XRQnhr?XU=3<-uivKJ7d}`4pt2c26H$%wG$!FJAp&>p6X+fg;ovf>C=?ethm zU`#DhFo^=!py}%%noIXTRv&cEfL+iUt3zf>e!DrwG7JqBJ>7`D7gRCyf-~a5a%tta zb%a_C7*A2~eRmt((G|X$UM9_@g*McEXTmjCqP`pOae!qMII!}9li?g*TP=mv5(Tec z_>ZrGY7t{7pLpl**R_4e&6QWUQ?jz~w}{l;5CO0~Oo%xwEvVp+>pe|Jy|+F4N_20SMU$M11_G z6;*5#V&1Y{Evj0H$l3jJT-2{Tu{B*)>)zXtYU447uX83Y4XOf`SExLP7fP zUnzB0P#RJIJ7bu%2mi7Yt<28nA1{ADhz$vErCwkAY5Lkj1+v2W@xzqYIxnYbqh(#S zAExi4jVqnBd)5vgDGYfeTF;jK3rSn6@6gB9E3f9+pOaY?U9dk4XaOPEf`?Z33Tt_N zgrR~~{S5NIisdz+i!bdNNeR~08Z_`Ugi-L3--rq+= z+?1e@3hLb$EYPpu=vTbRO=Cx!$(b+3+ECs%b}qq)PgWIOeiZ30BmF&e#92F_Q~C7g zn=np>fwhrsE82Y_Ld9X;)ryOoUU2=ER4881un_#N^T~PFH?^b|OsZ=HW?eh{y06JU zxdI^60P^L&Kn#0F5(av4x{d zdRbFnyU=L>H2Voo2{cj!3%hjz^vnFS) zCV^+Vc|I>@G%S2k*M2J8fG~3L*4B7Z)2M#L%u~V13H>;_R;DPyJF#9tS(D-{+|MxS zLY`kWbgbi?_A;~G?xNOw&8DxH5y;Au5T=*KZ&E}4rtRE9XTF6t&a*Iwd5eC&88JGc zwYB`$CRQ1HXOp)!XLLqCIj|gDxVUlU^Uv77Q_@apCwTtAaY!%bng$&bHr;V(T#M^Z zRz#1uZv^zhS5Pb!t6;E&EQ?*c8K5OCNpTEJ?VMrGp;Jb{zNh3|j<=#MD$xAOT$P(3 zk8<;a_?9FZ7kjTWs_N`=!OThMmdv(Z5v5h<(V-3t{?JN%LFZukLd@lNt*Z27<08-K zGXh$XUYc_lWgNQ$gDz*(jE(S2`s|%rptmgyj_x~E^a?1O#Gs*v)u>wqeAh8SFZ9)u z&HhV^ezz4o=~YwRY&X#s7ilp(x&`PVrOOy2XbZlzN_uQ?fj?G(`y$J5sNkj<>-qmgZMS>hsTSAka77 z4p&j)%57qZt0(A$>Bv3V_CUfUu@B*I-E(E1E<>A&H0Nw~MY^;!-zt~Z2~^8*@t9BO ziXDLMZ`(04#3bx}H$lY6xgu1C2dA`!GZu7vC`h${Ag)97KJFPQG1N%bwoP=5VCKn; zL6RW(&SFJ9ZXhzkop6__^iOm!D9Dyb1n_sNjU>2l+_4pnJ6KjL6oX$9QueH?V9tcf zZNx}^F!gJ3;RZ{S_2dCH-JbP6Q#_B3WcMU(xgsj8u`@no=Y-l_&c^ZT;GK+#4ntxj zJ6y%7$MB76A4Xb52`0wUUs?dV;D=Xkz7m{Vc_Q6ZD7YV0YUm|xT(qKg3*|-FAHOK2 zc~@R=-K*ys0e|B!ch^KEF9>diL<_~+*!oe3-socV;-BKiVJXD;q($fjkYT1_XkmrnO|2T?H zbL#-t=PQyiMje(Pk3)7sC3JpV+@_?aLqne;NH%aSr{7q9?)K>>8GRt}p${L!LWE*~ zbWLh&1hRg9(G4)8lrdWzLvo9JnGk7yVC$cx6nLjN4b6c;5V3ZmqN$$)5ph^gW4G(Y zwErq}Q`NMC(Tk`GnQJ7gw&k34hsy)qng~;#S~dN%L}C4^H;~prlj5s%(NcMlwQkTn zs4gibU@xJ*&fc{p$oFEQVJ=~}4R{+)5{o01eV~Le7MmQI>rdnZ>zy!7VKmKk5w^@51vMFQ{|JL ztG)afFUXyLWeok1XgaABa!wKJbz zdYk_#Dk?dth}hPR(iLri;93a&AW(fxaCsLkpfc4Ob4KE(u;s<<9NSR7H(AL8uozJe z+KM9;F19N5M8xwAycgLQrM#tympys`ch(R%KQ`*@Wvy-Jd*U4Nadq)sgb{0rBEDer z$$PW%wssV7mV@wVw+GVMR$dngtkqtw9WXe%{Ab{|8}6=={mBiew$j$#IqeL?cec6P zjnxNZokS7_e>7zG1OE^NAa(>;2eXnJM))ve=U(I>#-z?lx^dH%g@oV0HnBm9%o}67 zOGd^rQSd=WT5CANz|-Aq1sDP%3zx?=&px@fTW|-#$CnQScKprOLdXbMMHYude71C(?31x9RQJ5!EOI`O<~z&#mA6G=slt7PUFh zYJ7L-Yj18J>OEkRv|kGI7)G5b9ev*m-{E^KfD#=cJBFrc|B7IwS{%F9bXXrU5Ij9E zA~aI2YN{e@9k$FXfh65jq<(t1I>4WcY10p7s#bg;iyR^n!d zt3so=wQE6N$dnHau3IYRxpnpaXGHgA6v(Dq;FpVaVGpJ71Zm#Do~8d%kQJ5e`lG}+ zf|bQYom$Mh5vWT99;qgj_SyJ-4ne-kiSK%}Svd~`C%0W=m9ulw25?WmdHtX~Tu47X z0IFIT__FQA|5>R}b9g(*T5nVcCFvYylZr_-K!j4!UIExAhdq%$JpeCa44;;9WIeK} zJ$FV{ft8rL!`&N^%rz9VuJG0tvtGUb1d@OcxFs@857g;8wz1>~dq*W)|5^MlsogWyc!cP7DaS9ZOF(e#ryQPP^<-|vI#5RH`#1o3g!oP`o8`8f@ViJE6q-t zAx~y{(;GHeklwd|W*^JP%*)fDM#-UJ2REf!>3b5{8YKmR z+!((^#w@j{2)FxYjhPnNt%$?w?B|z1tk2J7v}Wp3pGz{B}gn0E&8Tl#i0|4}(e%XC;~yjNdd5j)mcoMX=aBytRRoJ@D)Q z`fTtwWT+b#b?xqzv7OvPz%K|cU2k~BABd5_OG_8aM>Ye-7j11@y!iQp9ft7EzgL?Hgq^Ot}Jl$k1y+XBAc3)oYEM$^b!!(u{(X?S&I6rI?(6Ev4SVMfA!nv{Pp zrrq86TSpRl3%Q;hvPbSSwRQKBdY9n}SkBeHAwIN|zdWdz)X#$rJt1P;Hx9Q;nsO5}GQKBIafC}4oQWg7Vovli7LavHeWGL5V) zISxq$1R+)Jdq6Em64%%ve^qT}@wN zX^}HOMQ{(6SfB^R9e|g*;0By1vK(_3#0W>BZ;h-t2*rWkaLL#%q;CsPDdEQ%mSCm{ zA-5*dxo^^#?$BS+FK$v*r0{`#(O6ce?S7Lvo{fm9F}dK2%^x;kjEfT%>ysb3Go z)s_vD7^BFU(dq+8J;z2zD&5*&T-ktis?LyLf=*Z43dQ44A}O8uS(|}g zTvPj>mM#`ZKxyB~=`)JinC)R%*wd^iMmca)IVJ$)d`|#o&LMABz5Ph;(B#B!;Mnah z&^M-1NT;u`v%(u#e_U4t_?h-bfpPX1Pw)ZwZ+@-meGFKx1R1r`PHW~^ujNunKNP>w!DLKo~fe#j^`k7$+*W3ST>bMT5pLaOBj6Wz) zY>ohK$DT*>6Kvea56aIzIpQtP2(H2t9b$AfZaxqj$HwPSVah%La>mmQ@1L(+7ffm;w2vL2`uHML zSO2qrSQK?fwpj3J<-}))a#nnJfw)KPn}ZZlj7PA7PQ!)AML80izIo79lvKftz@5lj zD+qoNJ<{7x<_acz1Zxr-P;d_L$hN>1U{!lF<_Mlf5Z)()Tr7wIQ52Tn4wfq(~Gtr0T@+iut)X>6Zl-)6!=_fWZD51jo5D+ia$LSxYAiN|7EM=EWJ zKEu@NX?QlcokJ-FU*td0V@<+Ko%G10|Ht#vltfLKK(aB6{K#r8HAhGsoxl@kV5!m4qbGA7U!^mG2LlyO!WGAnq7*rx<1o`Go6 zz4s4sr1s&0&#pb3Oa90n@0U>za5BvIMn26&_Np%E0gq5B7)b*)72!k(7S8I>DC&Z< zA8Gs>5Qf;N7~4-rROr|5&?^?gd72CTFk$4^>U}xz z3L8WiUr-7adWNk1u*S0lteOR;kk((=+u^{6xdX@NV&!)Q! zk^vykE)njA{d#?2*zHS=_i*|BMd_pWFzRE;o-m>ep}5j{c}y3&rHiB0EB9Ub%dEkx zKM;EY37&(W%w4Q3>MsSBK9z}tzYJv-8!2ERH%1pN`*gx+YN(Ee^h8sUkoI=uhtgHYaM3L#k-+{qoYhcB8R9VJGSNW z8mZVS<>WSy%b7{j!pECt!tZa`PJbbG6mrayVkmmVV1Bxb!~ghlaOTHNDcZpcKMSW4 znzGU=@V1Rf?PCfZq_eDF*i#}lf~$xwF?}guYoZDJdCfXzuqz6(y5_m482)L>fBp4; zzmXeAio_2OA&>7)QotcYYbG|L9P~7<;nGf4P!R0HwWd)k^j!U{!e1m zv4*2x#>e0VGoH$$Nkz4+Hkhx|zzG4mYQ>%R50h>@0b=>gTI_gsP6WUc|MP?YaLJl_ zVzSnAi?6paOTfvc5cS*zYMl@)Y(Sch56xbWz~XRQzyJmT=|YxGz}AY_pDtDfl5tca zusM`!$STU&?zb?)8_#Vb??5|11xxLa5n$N(xBz4&pcA?H7+CpSLw!b}fAb{pM;%7k zr0m+xbtn0S*SeLF5vOG|+4v}~d(fvWAVz>-z#uH|HrU_rqYYLE&V!K9glijCqag<7 z_ck{fy!w$SIM;u8A;3Z?qra&j zG?NT#o>SS9*Od8-!~RFCen*Fk)I7*Os^{yoJr=#0(02LolV|hP0+uuY(PrND zD9@{4{9RbDxjnK`g)DxK9KDR?Iv!Dks3iF)L}ib&+F&1(n3#vBxSjXJmJZnxEaIwy zIf1$40u?^vns|wwIIdcWWnRQ@zIjb3>t$dmf*#{p>@tp~(=xT67oqv*eBDOav&3D@ zTCoDExaH^x=evu*>YZa*g@hWB&s;7Tm&S5^JpUz|rADng309*lgG!v4Al zy=ouHwG}D^8h|CM6UnX61rqVxbv{yw=l7ehCME%m1SgeD?g%eHtw1lb9c&|GMZS^1 zZk)3zBFegW0!A4CGqeQ`4u@_<$#AWdH#BxGNtqC5U4A1Q1%TW?F3sPp_>RBXcAQedeKPGGT3c+rhwI{cTOcm^Py{T;rrCS$O34BwpC&0-FL2~rdtDA^W z50nStnPo5=14YSjrCXfBb%f!FsC_a_j^m)yUb>p+UAp$4-Vj{uM0~(%P>GJga!BtR zDu%QjWOq+PnD54CgAj+Q3((q*P6OdTb0O|2<#DMdOPt!N-YXQ18k!QGZscz55htZ zn@s;So?kw%8Mk1ozMpGQy=NP1d1)MaqF0C80ZfHSHV^dbbEM%n(MTKCuI?}H-PVGGnLeuP=7xHd8LqvWqo`rAGE z*NNR}B60@L1dGn&&My*zx*O~tVn5SCfkWd9Hk%6ri56u`;AObmAF*d*0Fn-Lyt|=iIGdK)#|&>j{2O@i_lFR3gaReQ z-eN7w6GW>TE$q4~pGtK>=)3YFEEz_4LtSp!WlEoyk3T!7s1GGGnV!3?YZ!^D`d45K zAS<-GVyL92uf938rh50c2aoG;&RceK4hn90P$4b4)d#E0TitEL-DlsX1+*Y1yr-}q zstVpZicUuo*wS2kHFM4bj;UgIZDEKEf}gx9kRUuEuo(UqdpAeyy{mA+mq=mfiRkXd z;p*u0AwaIW1Y;Z?4*)BO^#wA!h4|Q+_){s?z(WJVu}EbTsQksa5Zk+2md~jEsZ#zz zNsyTtfu!8O1m+G>`T`11cBnhiX{GYbDwJ|5_EF^{^@^SQ*U~SFVGZ} zA}oTq7XB0qQW<&cJZSf)lF{mHXnc`~x5)go~TP?X{Mn z2jGI}3)k|&dUJJwHofj3hbr{m6A;ebcSQzAqc45-+^zz za<;`ok!m)p_ILgS_w+Yuthvm5c~=1>+nE+mv*(%txm6Q8z5&zMvhu?w-Z1l2>Tm|c z_vAT9J3+I|E>zHZqymhL64aE4dgBaX!ekV&kFcL$@PBX=y!d`ISw!zh_`P63T<0*k zfX$ud3DYvMiy|=0--rZ6>?n`b0l393LNrkS~@}-*HC?y|2LHSH*c6o zfO`*edx*b!uGG+B^FJ>(K_jdi7*O5=&JL+^K4KxQ z5e@F?zTeKtYof2$P>$w~XP?d6jEp>vAp7hfIoqs0$I zmTN=r=S9K!{2m}ay4_udq%)}WyI~LfeK$~TVEn-X`49IFy3l}q>4s)0Daa;*z!J-F z983=-leHv|&wPj}ysaf+;A$|yyN4NjV*`#4T=^b2a>%Fd-+E?~>R&qs5^ji?*WOB0 zwJg?yF0|bRl`?KIy{t+zIv2eyNe)8I1P1u03At3vuJ-NVH04lw8?*TA}PKVq} zVZln~)Hqn_H-Lz=LYl&FO1g<;HpE>NC#f8}Z^QNZ4CYUDroVct%^ImclNsS+>F4NH z>MsL;t>_e6`uS3Cvy4`}n}-K_ z_I4srZ+Gk=4gpt84BAR|+Dr6ir`|;3i7I7R+{=AdJuTsn zTktejz>K(%V@nl1WdHr++Wh4l-x=b=L-E0Q|93Tb+LgKsyyo4A?G|CV8VokHVJSpk zz6*sgb62GT^av2yumG0qKaG$D@38voDic!IU4XdoU+Us*Bo^eD>=#zT@{bt#OjtJ= z6JJC+Zd5vhf)MO5ImkePn5tBiEM~6H8pJo@9_qntuQcfOAg1?P!2G+yUOgkX9wZ-vs^o#%20H0vHI=m^U zeQ}%m%v&vET%!6o-5oICcF?zcp)6IlDN^Z^#RnCb^UGyq2;^>^FNvK#F$S*T7MjD; z_Nx!sEg_vPSYYG?RYWpGXa+*&F4rfd2XG||cAn0T;UkOJZ=4cL7SKEOpI?YrITq|y z(=dOVGQgG!_Y{R&uWgB4g-lAJua(XWfDYz%M2`C;vbq!|#}H3|NiXVjjK>LPsX16} z8Z9vELarg?2ZZE2rXY|0CusL}x?&FAybhKor3K=Q8Y16Fj6hPdeIu}|bEa^^bA?P0 zlKNrj-FASY31%QQ+!K5Uwqdm+wH_7e|F0gI5?e~9 zxf~-B%At;A6wECF(5qn#BooN<2jOV7^O(c@t_4vFu7FII9#n_i8L{s+!GSs!L;&p= z{e2amC9?yXRKZpm!vFD{6J_x0cN7h@OIw*t3*y;Cdg!tIxO(gA0SQV%CpWUb?R|mJ}0p|8WkmPu>3@{Ey6g*eLYR7;(7;)-bkO|5fHfJucFDBd2A<-Mrs+);dm2Zp- zFj(J${LXSxewN}tKUWMEXA)&sbK5;mvlchkGXds^0DVxv!=wrk)?_FopHd3xl9>Q(=25>i+sCiIjejRH0 zFvA7f{reXV8m*BpBTv26kz-{K$shD9TdzonEdmA`!+u%6-p}u;4(Kl6#{ha*izq`t zGGbCIG2XldPTB(!HcGyG>Ul29WXW=I1Hnz-kW5cXVT=U>7GJhB`gn7#0XW&AD2vS8 z27n8wI4hq6yJZ8>v}ihuIw8!X3bUHYHul6+u^z$f2Pi?_Yj#^#?aI*Mil<*H-MPTP_6Fxba> zax44jKH{_O>OtPCV;3q=m#P^4CDH1NnoP~m4Ry?HlvAVPoKug8%NEk_%}Ae^lMq$O zCNrB~qq|>QHIq)2ugjb4m9LT`bVVg3dgPR?VsJav|j?RIJjg_Jo(?IyuPP zPwl;FscPp`zmpM2_~BS##sC-AvfHThu1k^_uv1lmo6F^sWgc4taEYQRi|3ls@Ds_d zva$&t9v~rH@3_9plfG5WqG=a@g{N~YvnmaD(X%5LP7 z<+`yzb^ilZ*0rr$xSJi;kIjP|w>xr(-Vw#^{P;ilIKHeU5!7yK^9%K$=(PcN*q4LM zT6B|!kvd6Ns#s)B1GW&%KSRRo{{lw-Znc-)JMTufE|Zk*>}RN5KdY1_ZBFE8p{D&F zpBExTO<1@2`+N08pg!Wd;{N7VcaA)kOlA#`pJHQN5hop{`1vvYRG`;-e!@J&Zld}t zP)^W^o>#Jk-5x^#n>3xgwVbS#x^Fl*CeG!errwjJ;lFuvs;_FjP$S=%zUQTF!#P@s zU{+Sd!^xeuB^KoD(Zc-}gaJ)g%G_o|wl>M|Y&oE1$fk%|{jUA_j_>vu<|83GQtOVq3lCoI&wnP;d;bB>o#Xiu!d8SED zP57AwA}jLhSX{RbHM^dVb?LA}&v4N!epnoPwMP2Dy9J6WAmueF2(37~vy3tF& zcq=Q+D6lHLDe!@rQjC~d?b2wHe|xh3So8?Ki7r$o;VQ)b7}Zij=3&l5`sRN|R9J6g zA$cDH_MBe{HP29n()kPb`9>*m)$Zs>`jo4F;v z>Upp;sz7=Ozs|r=0KcwWJnOA7fSx5qo41L29_S?h^5)PSIgn@Nt@xA0Ns<2X`vB65W zQqYwGtpdpNbn~PxJ`av=Ga}uDyDJPhNvQ5kS-T2z0oTxp$Ib;Z<4J3S<`J$w`XHk4!Q9y~R}#E-33MbdlUkoR?Sb zFC;xVFXXkqn_Miq)ue!c42GOjnXu+FlGS)BlVB?&Gf!IS3()IjJAe0mu$hTYcWL=6 zF|y>tSZ0dbF8n5=hmKtqVHo(?3ZS7IauOpMmuv0{26ldQ`5-a^7=5i7Sw^H7bZGj2 zO5!QKVQp;6@wgZw`5fsq+Qj=%r_RLtrOR>fxsYxG719m#zr#);V*Eh;znAx?f3%j~ zyF1rH&$Q>ucS_Od+ z|5C91Up4rP#9nCoR5-Z|JxI6zc?0l&r2h6qDeUYyL0d>$|uli@2_ z17;IbivZ$Wf-z`B4`O)?J=5Ci=4ua>zV&~r^si&8{%+p~s7WAAHVeAB+H3zlf?eG2gm|0tCVs z3QSx>>|=!I<3GlLlm=+xhr?yN3ThZ!)_GKdc7|n!UB-uqEYwmWW|{>LNwd( zJN=o9S6QA|M!B6#1qE{-)OLrKeZv~OFNhioV#I{ z4FKI`ZkuKaFf=6f%J;X?yC`+Hvjj(ekehkQZhgW|d8g`xq@2eUI;bvt^VRo9V6o(3 zA*^>Sr}zx@P#+#;qWaJcye20gv9oiJ5_JXgQshxfD|( z5r#WAuQn#Iiuz%^rWNanE5FL8Tl#~J=OIwBlt4qoo`Ldj@jB4Qii(!cNQapQ9RNsC zMW)1Y_S=tiS=I%R4*CLCE2cj#0s=BRb@|)oBIyl#n{$=p*7+bz$`tk6X7xiS{d{#6 ze==v@vJd)fRd@{&3PL?QnUYm(LGE?Zl>sz6Q+mCD0GsJ2fn?1j2mqPV&$R5nPpYBF zD-4+bTmSt(uwJQ0Cg7v>pwf9sXy!FGW;5++u~cH!NFoyBII;vKYAO!)eylBqkg!f6{fL{Z<#lgm|;{d2ZqQS_39&nwCU`qY}_V9Nu-!8BtUXV$6JYfskenI`E z8i2>+P?J~SIl;`P3qa9Xq(^~M?h{!lFe@=j12yuto9>3tA>g!j3LSh_09mric`PvA z6UbYRI{J^|BejnpK2nGgu}J`~h5+_FBdE|YpZQST;c>JHT-~_NtVih%~YE>OVxBLVbT;o6@gjHe&PI&}SAzwUpl)ZDw^4Z=D`QMXlq zYv~Hh0TgwvrxG+5o7DRrcG?bo(4RT+PB-0Z0V2`%&ZGHf99ri2&Kk}7fE7jiaEiodBKRfxw;n7kzq zVrT2k#e5E0f&GPw;eV_H{WU&rAxf7Cg#y0Gldv|NUbxht(!PZ_Drrd9Sp2c|2^!=< zi9Mk?oUe?{0nl@}FE$;J+cEbT0gh*?UfDM3{5kzmT|j0Ciag0QreegR-@;QpetvXI z)lM#F|D(2qTMxb;@x2_#%oI&ONe7X;9mdVWNZ{@UB=^B2iMp7WP?kDCph1&TqM5}V z6z$?g)}+fns!u5AzP%>}TzjD;Ac6(>h*CWF>`Arv1p5T!V1V`?OfQJ?j0uDKXpR{; zJ40FTR1Uo`*mtGZGEi>+Ti^aah(i2Bq{MnWk5!NEyg1QMDn24O0NnfSIr5;3zxL?F zJT`!-!^XZ&o^)*#18{ybZU!)%?5?49L8XbXC|Eycf4-miMf6@Gzs+!leDsSvaXcId zgj|!|&k~IMBisLv+@MCqpImZ3-%0FHVn07E%7YP;4Sslm;!PYhS2JI(4Ek@z``K{!jn!j~4IGh4}|>>i^zZrIfx+CO$EE0>S9uo=ig%dnLbkC%S9 z_aW$B@9R!ACwj*lvX6)@8_dsXox>I1N;`OR`bvzpe&H>`Gqq=^$GZ=##x16IeWtgM znFPg!t9QLIyDa;@_4`>q3$xMy*95qT&lHA|2}5`Uv|;~K?Cy7ex;4;c^KZG3)G7ZN zk_a?;5m(uXvh;t|WBbitrLtfn|CS4Z?{B2KH9%)*Y$5yadDhoTT*d#6)t3Ud^M+k2 zORx&r9rkZ})&vOT%)epvPhQ$t-PcMEjy?%D|4q-zi>UjnwAJrg>&bfk^|Kd>P9;pu z;CV&=9hL^3)eP!Me(N_4^}l`Wvf*i7L?h1J?ccCHPae1(yG8q-5B@LxbgK$H=-ZX9 z)qj(LM4%V=|J&35?ypjg#O>H~Be1c*DDD312J)w4`<*rX)3N>5{`%9g{mn1`Gt2jH zb8IZepDWTUtMU-}*$!K8Wpv?*cx6eu6x^p9%m-SybCmUie-z#3L+T9mp|8Dc3D%)YE*V5TK zu_mh-ug~$bg?(oqj4!RB>&x|!;0+Juj1IFVh5K#QNVzXSW-BLO^Ih~`Yo11fQZ zG&8i~;&?3{;#04&^7-Rm9{Uv09QA05_VHUu`U6bu?(yuxn@+sS(i4ZZ9A~#qYHU~& zd9N=t>m*Rf^}^^0|NaVatNRzyxVEwhtQH9D4E*ieO)2-f7F!u5oL{=TnYziZxFmKK z*L(RrdYWh#aOkNFXRzVps~;OxUAhK>Ptm6i^j$LlZ}{@hY?HrOB+Gp@9hCays9jQK zR!MDD-egs<%74Tc8}}T)R@pR7>G(Km8eoi8TyQDy-lhy!+d_3-n9x|+T7%=7JOG0C^u7x2sm#>&t?@{JVO;cSNQarHf<{z zCU5z|kH^BwavNtYqVf0l`6;5)SME6%9xXvy1aFa= zP@cW4MnqAslR<>gSprry-BqA@Cd*#>XJ@^~eOY4Y!ec1(yzjxUFM0UhTrc$k`qNxQ zW9E~{0Q1Gl93%4zd(8X1bCn2dN?Ogwuaz({RmvRyf-AgGC#4_S_wBWFY3cYSHi{d2 zWxRKZE(o2?qpK0X6uZ7sC2UW3B_(SXEscemPa+QC=8EC(;_Q-b?>L5Byrz>FjO{dE zjH|drMy6X7H&NU5e&ivZ>|8B5}b^hkq1rdSueC>qRTsQp!>eH%3 z#M)(Y1h`S|H^1AWYnK(Wo8ffYrkWn45m8M+%n^)Do}SE+Ms*4NQ|?=Mv3c3`b< zJiKxtO-ko^aPgSCr67KspBe9>U|=xblc_*w;I$W#Kq(mcG)(ogpULOCse_-DEF4Aa zUum_AT%jul?btBPrFV1cX3(b1qOUB!pIzJPKJ86U z%fW@S+PCWKdPWX7os5kG-)SGGB(cD7W>ouReBh07p3QnE1LQRn` zDhKUVUZQd z)>kCFknwc$QQhM#hT2oE&^G(lE5B);;A0?A*oZ@ux81LDurJ21ZSU)R)?Jr|=%Xsv z#nmQfB-6xj4z#+Cwj^5ouW;B)do}R1&K;z0s+Z4ADv84}&|c`sd5%FMplx2E#*2Uw z@e1!y)TGu{$1D-CFQ2QgKCpn&8629uHIKT>`~dn{7rOkZ*6(2S%kU*h8@2q*E3_aR z{o=I9@%v98<#BP)BWHkJUHYTZ`=J`qYb#3=rEw4+AX=gl@1t(l)Or9%(Bg0IMkMx;6^cG*)XJK!h8x65q z{5Oz=*!@D1ZCsD<6j4u>gSbyhP)#pFSQ#2_eG*d^JCIQ0b*P=YAGTu|_$s4F|Ak^+ z>#dne?$7+XR>~glFA6f|@Ix}pn%pb}Y2AN(l!_fEnT7L}>n@SaTnh%y7^CTEsJK#;%6j25Q!duFjiPRD7QhhA4v?8t7 z_|y=wCQ1Etg+wLr8H@u{L+)*wW3xI}A;z;+sG*}}tQ!ui_`s$<Atq@L~61!}}b$H=jp_I7~ zj4=;qoPE=a)1-W+h4@EHtS`*?44YGqv@Z=D^=B#mORV; z{{A7}m7+-2KoP`q?43IUbTWc(GyjE?;mehm)+zak!KTqIdAmVTH}gVnlk>U z)m?7pv(~hSO!)gsnabJCnXyohdksQG_AWBXe9cuy7<0--@>_}y#-NkgV{Z~sF}$Q; zeB7{oe6QZd$NrQmPh;-XlzCV?5?N^dhA+Q~xsfoeLa-G_oB<)NnyHuo(;T$j{R<8u z9NIQ|zG@neic2(u(mjJ7(89|J^mjY<;k^!#HLR6)kL%@Od(`|^{$j7I#Grx<7i58zO zTZ;5DZZ&nd#!m>l6ow;WzVr-pyFgE3r+KT^%Q`983bRaVE8Z^%*IskICUi(h`E=;{ zTpd@QU3n~(QZ2F)7a}5mOY~#w50_i@@|1PGXQn0fFsrXHV~bEZ8`)IY*(w6tH|ydN z5rpz21R0r#@?IQ54CYAcjZ*3ewQauu?yb)~)#kU$?*4OH4SH1E_nm8^rDe{+v6A%is=x@_&FT4QXDw5k@aetG zWk71OXy$u59v zLTXy2yoNSc%#aZ!ev<5>(`vv&*Hs0$U>1?wl4#5EL< zrDN8oLL)8$1c}DgBRMBGH@A$B!#DNP=U_uE7Z($xEm76|tjE8ZK;I%Grd7gGEiw1# zakI8ok83!z2qd3B2X9lH$iE}@v&|pjXrW1jF&{p(Qw<$2rwy-zyvnF*85Vvou#zZF zC0b~YjiI0)c)yF?k*v(^_H6e*Se(#san)HefiYk(ZPpHG*%ib0u@cd#E!q<9)dd4p zUCdWeW0Q=RxT=5NuXOGV^d21HhZHDU%2)K2UDFMJRQtKE5Ru=v@T4^Vo?~U-;gc_8 zD?EW*G_G7LRyd>Vk%gOIEPB~&qVFtON4GMOJW>8xE$n)aSV2MIgmME=dZw3`m)F(R z<=bo&ro_aQ10r8AMrI6XS^zPoyi&;Q$ol334WgzK%<$-t(Dus1)^_#y2y@KB!U7e} zD=t1!Q3O0%#`gC1Dk@5VSmV?RV_oh5zc6&)2T%qQVvB`(SL_($XN7i3RVTf8Q>);UZD+ptI-}G)B}gQdG0zll-F|Pw zxU;04KU3^c(RW6OK4W`S5^@=i$gGro0=XN;<5txGOB^BOPiD9&=8olT+FThbKp=vzazbv0V)r z*$Hdf5v&y{=CbB*P)wh2$#PfH)rG8-ITsWt{A@hkfnAKPtTvqc!hMh2XqJ}dLH=6I z4a?PTIavNits;`G#%nF|p{v%u-f|ivc5_j?Og=piSyovrd0zt=kaeJHVCi6xom$gu zS|ewi0Krt`GA}BFuULFU>&;PzeEY=3R;dSLy?+ynG}PK0e~&LH5XbhNa{T?D33ujG!XC<-2|{JSS@ zn592Hj$vtmwJ!`3L%XsH`{~=;`+e7=13UZ>&%s86Cw6IVATfivl99z{$Kd;Gz1uW< z+TJM_Z}$8$G~$M!Kp8fPwG>LyiSk+7nsO)@5cP_?4HeHKLB-oO$`Ie)!2&SsdRgUuy4cjd0td2N8zs(G?{I$s?21=JdaMMXQvFx<+B zqP78M&L?zrg>FgnY=+_2V&TH06Bd#J|JkUhC@vqgqPTc~->9P`wb$>SKSDah%RYl> zBeA|1bYUDvW;nNb&TBm3d{F+x9k=1fAR#q3%emg-`S`w?8ai%%B6jjd|IpcWv(~h+ zUbGF}8MRi`RCqO_Z9#mf|Hq2cb9@ripfskHerx>dRqS&elu*BL_j0bSW3bP#0P!h> zpn}f8kHy4te$tA4XMGG=c1so0X2cP`5-TM4S!uhAoLU~X>VyWvA{j7>`k|;EewFq7 zZA5nsX3BkrPtxPK)4+YILLxhtKVz2HbViupsx_&Z26=sT3X`wm@Nrkk0nK#&$0XOr z-k6O?UgM_AX4_6|91P|+OKR+RPJg7xh|oyk>$pBqO+`g@{u2FYr9*1ZETFy<1Kk;! z7R7~f8io zLFZE{M_yS&Qj>gNk{lIP`l5wojuRd8rs?ExMGD@(zYOO24*^pnCpqF6urb!| zFD8QN?&pYYFg9Hc=q%II5B=nS$^Q7=vi{8I)rea<29~MYHB9xa#|by-n3+ZFyn^$_ zSp^3S)Sid4j_KqG*wEFsWABHW=Bh)M;^t%SeI8U!mOJNKS|DZtH4w#%)Dl4Mdd@m| z>?p`hXV^Uj-mRuNGDmcOne6ub{Me7y7c&sbykXBo6mf$1oq5i=(ArLGa$$Lt z+`byVp_9qLvRwd?@mVpC^;!0agH#Q?8l{-EYvMoB?jywua_KdSx`*_?wB0ju^5%XB z2HEc&I1gPNl+i|fnb|m)Pk=C}JU7&{sXg=>qqXS9LL3(jEwglyyX?y%152&6OWAy8 zMCnlxw?)}mN#L&s^MHodabBu9V9}xzv03Rd%ZkzYAUJ|eo<=&cXxLk zonbIKTm*gjyQ!t4H_5|?iDi+Ll~*G&t;%?B?z77m`#5TEF*(cAcg>9*27DLmZTjkX zHOFd8vrlK)F{Ki!QuP3ZfieZHx`*&fb%t^(kn1sU6&DO0)(wS1s4bFY?=<%M=4y&? zb*PaUrhPd)JagsB5&MVXDxI)cKHA_2QQe>!YJKSjk3x8?be;VVo2RX38qYFXp6^EcK0htsY`MzgFH@@uwbOAZd27|nGzF! zgL-JtBK-7HOhqKxl{cV@^-lBB+^a8F_5cU*m?ZwKEN@Y-RB=UorXp-yVO-((429U` zPnl53E-*cdYXkxN$%HnR0rB8z0|rU3N63x?ZRRrE*T3Oq|EymF=;!zE*6lK_Z89>l zHKFp5an?0;d;|n)7H!XOB}L{z3%1v{k4S4=W$mC2$?e} z9`B8`$@sbHj_KMvR3!E)O?10yRY$0BX>UfbZx1m z6&C6e4{(zCO7kJQ^H}VqRiQzX3960WymL(}#=L7Tl;jC+T$Y#>M#n|}e7ahwQl-ap z>(!3npUDG@F?;e`q7+|PXNU&T)w_2iMF!xui$HBh` z#+Y2rvHu12mUepd)XVy32x*_f;V9aL{Ozw#c%s&BgRRxs&yyd~0ZN$2j!Q)HUrbo* zuHjZ0JIKi}#oLCsLE%}R@6nOdSU(DLS2qgkTyIe4cSn9bh@!|=#wq4I@DJvc8)@yB z)hiU@>5ka`8lioo@FHT!fssq@=I(X;>nXf3t3w! z_?e3-Mc=ft-5Zk<%3h@Xd~pXQvL;xbRpM0?Sd~8g-Pbg6Hh~!L>S^UAA9r2VIvK*{ zF01j`nc92E&37aATiuW9^_zioM_cw|^~U9TS7_=v1|e1+xxv05RNH^_AayEL%w`Hz zYmFVtc(7o{e2L`?w?X}Gk1}CJW~7te^%ru2WglmurRykge}(Mg!F~sy<&(rM{M7LG z;W74>P+Uo?yVGOCn87=7O2tnqaE<-Lk}%uaQL==FJ>%224eqeg<~QuDp%OoR!lnZP zz-$Lv6p&hZKgDJ4D`+>Kv8x2Fg~N4>-+F!4mFYYs{+(Ui%1ZNyTFvOSz;6}KFYEaO zFkxi6?6~meP*gDLlfEBnPS%$omKjTAL(f?zzxcg6W`6HwzRUaEDr0>+OBb(GQO>>F zP;{h5G;bMBPP*JT-?#GC+SaLcM!kIHS)(vC4?V^zFZP(g5JR4I= zYNC>RdvA?b;zn)8%f;gZ&VlPmUY71Xs>dFqI;V$3yrlDId+tS!lne9pE_i-)5(($s zxuy-bam9v6i?#6g$X>$HnwDr}Yq|9*Ka5$%>0KXn_)y-V6*ys@+-lU?f|60-s78)C z5^S5^l%qFWX7Xpga@&t;uNk6(4en$6ks!BO@4FfMs!XMZL)?DAhkfJ4Rcf!VblmJG8%44;vTBp0IcusN`U3qiPp@xFzkK*@rZMSSyUlH+GAr zb~tgi>SYUBxxKQEy1lo=(~1@gE^E_2J1X4j@YHbWHZ;6M1^wnrfwOhLl269aPCOb% zH;!3G!^|pNd@U%1Pxp6q@;7B zxg24nVK*wQYSxSu{>T@?<%^n$8RMh0AoshwQv2+t@NB6_)1i@&JGZcd_u~7rKZ5hg zcuWpC%HKXP@%V9`5Yx?Q>mL$Ac6bd4qHIT*8&z0q+pe0NH9nkQct0{1W2>vKt&K~Z zK9DIK9;?C)8{m3>{rUwcFMf%a|7MOA^EzfJ_Hlu=CO?B;{*wwTdOo5>5ky%a-#hil zE3AnMIE!ml#F=k`qK)ly>P5J$T7P<`r(GVVJ&4;R+tbK=v*~vTJ$6Xx@$xYk0(I`= z9mIKXA)y(|P2O2l*0)6G&-1$n7Ta~*k{?ZJ$Hg)qZg-M##YQ|ZAW4#_ubvn|1IXOR zvJ%0Y$3vMwFKMds=JtRAbyc(TqxZ+$%TFMDloAhz*^IcgvPW*%6j*I(ZNsOuEG8_b z2CeT!TGP#yqQy0z@p^8`ab0o_Z51i%uSdm{RVeXp2X-whXzO|2c}*I#xpmoH;;*m=@`t)V%I>U&Pr_NJvu=|ZV!<*Y*xhgj)xTxFxc9t%&-g)D6eeuby8KT~d1XmI^A7gg9h6of^A`j5fzve=K0Xs6bR^y!cUm%NKRa-lW`Lj$Y=F}58! zi8JM<;)b0`v}G>5;mxCN4E8rx-hsMn?xdzw&xCvraCA1-R_=Dh+~8-1)Wzk5@}nig}3b3>vCnc0HO4_uT9f zjqR>$tEzMifKH=cdsWPb(r4QwW!~pSx4Qzjr<9L^utCyq>v>wwlRtT?A~~1wM0g?( zRUlRvu&Y$DCN9$Lotib#T1g^9v#wti0h2?T}r;c0Q%yLyR#OPKu{!n4Pv1)&RH& zr}2&KrHCPUW~MPrF+77-Y*Pl}i7L7q+Uxn)IB!=f1y#nhv&Y$mHuS!+^APR{U89+s zzFkXS-yXH+v%IF>(ib|Uv+&-oJAXugkZZ;sZn8Hv7kGSVSzAQFA*lsmwRKmt><4bP zDZU;SlJ-pgvOzeAiU070GPAYWIkb|gu%x@nj=aatzQY=2vSg8Iwb}c;UsK~|^Q)W~ zpE#^lHCxQeVF$4}e*oR>N7F3RPu|PkEpBb7?rA)9!(w;pw;-8om&anH99Fv0V`?4X;tkzyX~?K!Y&<Prv`BxZ*L!lr~;eV=pV z7v9N8Su55gy<~ivZS*Q@k<%rF+n4E)3CH>fO3*PQ!@TDs#D&DaH1fq4N>;aCx2a{J8N+IY`hF zy7H-(xy7>OMY4^TyF(9GO7A*jr-ZKHt4U*>TP+C=wfMpIb~L;(stHRu5|e(8y&`O- z@`lU)cVV7k1&yoZ39X-4BtM$3=%vT65hWVsGo#Ll3bHGsUv%ifUBl*M3(|w~Wi(WJ zJz4s&1TZyo?;L43;LE6Iw^U7OE6aOr#)ByBg({aPtpar%Gs%>EjH@ zxiGg-w{6eqG#uDpTosOg5JME@Zf8Y=XcDbgeGq$qw%X_#HaxcuTn0K`9eGJv*_9oF2|CL^z-Olbq}f^}<1IqSo%LK8W^ z?Aqy*f9b~3=F{Z!E20(UoomY4o7mPpmOoIDOn%a@Kbq`E)w?Z^FWgpeQuKbMs**nV zM#4`2TUvU;!-}KDO9UgGT5LcDKzY3=Ekf(!afnBD1d>#g%Kb49wjshKSBe>qZrjy4 zF+(jIIQbZZyZVNvF^IBS%(~@vfmimW3D2s{aEI$g+X+{-!iORnstemjvYO|7)|@ed zIxr*0a_^VcQM1b#I|W)&aC_G2*loZlur7iiUa)KBBK2{$61AaLWH_1~31Yg)<)zCRcHB!HmkK~C zx50P;ZO&(s_kGqK2X#nvRt|gNW=G7$08!X0pR!WA)R#iVJRLIpbph@4Rx`9Whf9?a z_u@Ni#!cGO0QL-Z`PHjoWMn6ru&abA(BBcvfLs|7u#*xQc?A{T@9CaE}X`f6hbLO8%@cchskh7IueokW< zvC(u}(-nCT#c@>S^snSoExSTZgw?Tsn`*MJ0 zI(cEh{j847A5JCeJbYvwTo)s0i=j3$MqaR4l|sMD?){6YOSI_0rCq&VUCCx^*bwcD z5_F!5L)6IuHo8M#k8Z==ut^f3M(4N1v&AYon7u>%Ea?WhO38{`6ZtOvXq5VN{||Qd z7=l-mIAMW&!Nzy@XqRGm#UEnxHOBPN&#yO_jr3Z##{zoSdfhbB&ELaV3s?ly{t|UNmi_fJsxwedf-SH z4!gfUuimKZDfG?V4;E=4aAfQ?&vSLl@$`}iHA7E1UH;XTIe>o0)O?4A581EXz%f)B zec-qF@uRFgYY>ZEjLO7|ZBZVD`#nqB{eB7Yde(>*|J1Sd6v;dJEJ2pmz5yazSW;}P zE6_kVQ8e}ONte*KZ7S#ijQ+Z)bWbS-$)>@tJt02u`{H&L?~SkW@49jBE+t-Aujw?HpQ7 zI}IPu+m&6@7*#kFPHTg@(Hl4fAF>yn((l29|Z>S_u@o@e4{7hrud7_$gJDdTPrOh<#l?*}6ma=U=^Q0CfFz5R2 zw^8RE5oZNeY@OSTHPSs#OUGFsx-z$rd;P&$Q}N~fQBSNZf$j-|<;F_=JpK?nyflcS z#0dIw<&5~%T(2FX(6VDI%C}z2Q?VLd#>NfDM{UdIpEfuT@z7aTMV2ndWR^w$OxNX7 zteT_C_Hm3w3}^Y0jNiTt+P{hBBT~}f!$)2khK5OPRHIwvKYsj}3a?0h{Gt-hkf7{q zn+!hz$hQIcc!evi;6Q)>I*`oLR23hp8d>%Ra%7ou#a*Y|exrqaMqTQTfuQ6oHDVJt zdi%Yjx97EUAKFA6IH$@pV1_otiUOZsIIHuyDo^^`yLFc+v@2dtS+axvXmztmL$4xZ ze2Mk-c7r^I$KnFBG4)OU`xbX*8J9GeNwj|zH>7Xpe8kG-qPEhhlyROC-qt}C3AUZ$ z!-+%=_*s8zYCLB@d-mlb%8}Kin6yLC6b#3X>`0R{msx5Q zoSE+94SYIOe$}DCud;k(lZ`1k@5xsX2Z;)F?%l`5uO#l3+VNdWgph%7tI7jXV3%QQ zKgz7_LttviEq#x_k^gch>4|h!Wv^^91ZNN=5k54PIPfYX<0+<(Up(u%1vW?Zl#RD$ zVm4W!M0Gi{=gWg6{TPwQk=FNzY@W*7m7xmaI-u6MuAd+2wqbT;<}J^zZ3t> zI-s)~T1N;Cdp5Au;kIkSqLb=e<+I;5z-*Z3CMRxfT2os(6``XoyKn8`hsl3Dw$**X z^v7IMKRh{*E}1pz1y0re-v0Kqf>GWuUDvI?gqy;A(7pAHJ{r%vLaJA83XAhw1@H`@ zL_`Yg3kLF|nyeSY3ldx~mBSK*0T*AFLs~Y$LcEnyoYbIMa{P9HQbXLJt8noH7u3MO zz@(jr#oI12>yZ=E>{$xGs)`DJVPQ|8GWYT0$C8q_sr;_4uIAE`0J=J8Vm$gjIGAy% zgcc+hgs0R-uYle#3XfB+P77p!{*sl%NAfikP_T1w98~%dDo;dpffk)n$&2?$_L#L~ zQFq@JgXt|_naL-4oYd6RUXpQ#GOS_U6)q4DX%=d7Wjp<7mSyO+;_ zX86p^%rY0TdZ|F?$hMB^!Q#_)c6Q#`?|^nhy)n2oC(Up3Svlq#nZ@_MWqqinDVW=f>K$+LEh=R546wJ@>T(2)P?U`8zi%}9x*ehuUb2HOE!RKx z?mZAq>4e{8_J>r5vtOqh=DJ(>eQu(8HZh;kz@{@_kxbkC8m=+#@E~g|vG|#l9Z6N0 zWBzK?YwWtM*M}c^xvqYYfuio6X8i$GeY4|yQn|qv;{E`@f%*W3K+?A9fBXn#Y1I$2lVQx zAyd|S&6<3c*B^qOwytgJRld?H+(Yv%1ZS3 z7ZX4Kka-=Uh_6?uNmyzNu~p`_HV(+>_u>rfPq?5L>T!~V5%q7Y0av-Ri%WW1HnV~=uOhs);3?6u5r!J2u}4o+6Q`}4RH2{iIifaa68mqFEj7EpRKag zb?Nh%C9LEZ(y5Q}W3{IQTWYo2WjX_^58kc5N@@>&ec&TD%?A1WSU4b&Qen+0DHsED z`bpbmz8r{aFJK`fV4@R{!g(((?&d4xse5^G@A;$~5PneWlnu zI(~58i@Ce_Y!zG39dBBE{oI9A?Gi8=_IIO${w5dNQ@a%XMVGqi+;sfb_!Nh>_d;Sz z3(`ZVSdy>yzilwPSOr@z*J1v6OTO%h%$C;oM-&&IOoEY7N+GRErJtUXe!PTm!Bq3^ zncU~k`?(j5qAkLkLkC54Q{!jI3>30<`@=cZOy7)Owl>`yCGCx8FM+HKvSo=N$g?I) zx#O(6WtLf^6fo2&3w+79b5(r4T4TESy)jxt?hwDe(^RQmF0we#a65gl`{}8)xk>V- z9Dky9%!r#@kW#5xo-F?*d~YNC6;|P=?)$)@+y2({?KbPnG@gR!f%OinQeMP^n*paa zx_7Q=ZdKMonnD&{RmpRcp)9(f?2$jmj;Hgzv5ByBB3Bf`@@>h@JSzPsO`o_+j7 zX#RkP!#JbEHvgKHXElXe^%`A+`cnKa@a1JXOu)+1n@YB}2xvwM#nQo=xax~nj<@&Y z%O9d*mH9v|TS!pm1;RRKlZv_cA%DYY#6(uf^e{S8%HB?MBy`s@?^;d_>TnjOtMGM9 zk45)!J>{er&!nZqH2jp!wxiQZJTGGF+KpX>GSD`@6@SWeOfY_#inaM^FSm~}wYKZ8 zuI<>Vk+wm8@~I1^X$FiI6+?khpz7eYoUF0Ljjr~c{K&(%sy;-_#^V{LRO}@)DO4S) zVm*}t=V;vjXx%eCIlz@U6Iv#}1bX?R@H#E7j2>*p6xhz!AkC1(Lw^WLRCWtU!*eg~ z{2Ywr(%)|jY}qzV*0t-oi@9cXV+9Mha)mBb@}@^Y$Wd@^fu<)78#n1$*iC8XuExt{ zRi>K`o-fZZv+U+Ru;50%7Y6(04`E?6ZQT31{&zi1Lcr%T-k550lcR6HI0@aMx;#}6<~ zzH|QHFI>~wQ{GLq z+_oJP9y{o9sUEiFT=+SJK0BZ%rG4(koa1r>5yjKjLo+1;hPE>ZcqVDFY+n7InwwGqYD};hY$V&a-=a+1T(Xc|=&QV5A`$%I4+uFe_@4|d; zM=%Smm_^yyXU_qXAH3ZU0vaKWiM(CCwX2Ka_$YBF|32@%i&k%If4(43wY5vW`*p$f zW-q?Uc_wYJQI1=3mcyi~Vz$n3cu}_@0bQ8C{j48RXg4SsAly{q+b0or`b4`jOF0pA zVF@3O0x_jR2;vzg5^nh6ER_BJeXK9rK0Ch$jLX>Gb;_}c-1u-=7l?B#0!4FG#^Heh zmmdC+B%n&0uT$7hkp8Zsq!jbGaY~iR)raDyh*>C1$yJGrF@0+D4ZnuM{agi7jYs3U zSI{QODW1wi8XEFLOtV?>?^v=`W3_@pLPEkAJl}QC7X*}Dx|owl3EKV!WnE|1jHD!x z^U_MKE}C`e?`@fk@wd}=eD5H!2!r>|8F=4WMah`e^(76#_ZVvT9JTcY@cTGJAM^1m zF5_KZy%ZNVijUhK*P7nk|7;(W^!}KUBWG}m6?V*CQS7j1eB3VjG&Xia;>_JMoZm&U zrpjRaNC>mX5>xrHPQSo(;n+EHy0KM|LY7!w>e%;UKU@6uzYok*{hOYqA88oNM$}^} z`)^X-{Bhh4VZ6Jx2!|lLVRl@JVA|?6Y%8Ty?js9lGKTLBFIx9XDY0Dt78sXl9kA=m zgWY-8YV>@_svDbk;|p?-zw0VaIsbt^m%Kq{U#ROkiLI)={)&NUfaV8$ch|t2%1~Yz zOAiB8OsNcQv3e@gExV7zI{Y_&`r+p})eZKq<{oUh`Fx$?ELzs{`Pt66&?(kSo5u?Y znqXkFkNo~Dw4OVIzxtXQmDJ%A9h5mUZ^J@0Rc~(V_+8^H8kQT#7-viMF?o}umX zk8XI1*|6m>N=NwD;*R=9d!N@+!+Z#h3|)v79+LmS{ut2<=^GxeFbTUYwqPC!FQeyp zc+l#*e6dVZY5IzevuM|=4-$oEXCz-K1Yr-%O%tYyD^pQgM@`3ixzH{4@sMM+udk+Z zc1QA<%ciHa)5H4dhGk!Qhulsn3>*rjcp1^=(j9c0C(k9oG*R?gn7jEMo2aw7YmNpN zhT*!?F+|mPi*i#gGQ!M4Z|PQ>HZo`Hsy-R7&R%`KsLZC(k9qsscj zHCD-e`2;u;2~ob|*8oJAy`{IBZXdCiQ}}MY1p>3kX93f#Cn8Wko>J>K&0puxFKx!$ zCc6NCeQ*;Fb?;8#Yuo1}-64vfX1nFr7N45xfKyc8jEQNSj@V0h<-fXWYeNonXD*Gw z+B1w@|q zA=z11PrIL%&k-p1AX$^U<~WXCkG@Im1!g7WQ#w6zqrOL5C$xG*IUSg}(vphx%HN<8 zEFFe(*y$K5W$>kxOBM@{rdmb0BQX2k>DbuRXBh)Dy7qN4hdHmyGd~rTD5XhM$5`*; z^v=uA*1S8D8JE?}F{DNLNXyQ^w5e!WHC4QC7;RHlHGUZ$(-AwE%PZ+=r-an4dkRxV}OZR!Ed&cjkNIpJpyQ6j@`AE@BvuJ}XoC#_vw zt_C^?J(}s8mc??sI%4+rj2&2fo^PF%ap8jjn~+je3d!?KGdt=#7;1xXyE3O&lO`xg zMXoHPb~b?XUk<%MXpV2SpPTDqvCD}oIIzJ+UvH6*m~x9L?&%EZMI!wHIEFm;>yWaS zt9bjWRorG&Z>-6-TIlV=UyW{$4V$s(Go%`KNb_XYKk>`u5qurBONThO)rcyQ)v`$R zGf}Zj7+uBm9mv$Pu>qUCpo1;R2Li|xb9qu_)emY8_db3;cGax*c9dT{NGsDkKGY0O zPbUjET1T%{4s}&#2>%ajZxs}0phatB9mFqBwx(8~f%LP45zE*} z7b&2`%{;KocN3ei-s*=T5bg_uF7>b2o0#3*-KTt69r&G6=5JxfWm+|T^~ED2ayn5x zM*pgA!PgM4bOL!L{Y1$JT}VZg4fr;(jAU~U(vNMu@V9T`e_2ZYmp9J;vbS_p_{JmL zVEe=x1Mr%Q-32kQXsizJbw3FisK6nS7agDWYfVio+x0IM-m`j1(C(^SdkMmmc#?!? zN`03otY9TIdOG?+R;5yRER+FOFId7Z?1T8Z!o_hYDsLv)bB18adkLln=ug35ue%qM zzjOI}%0;ZR+tyMy<+Xij({VH94y{L3r=uhHcw{RnhJ{?kdsXb=%uF;jG)DC?Px%Qx z(;hIhiI&M@6O95*38K+{)9 z)KcGLehr5HBS_9TpO{_<9~-aAUb@u*rH|#`1);bowrr>4pZ#!HHeX}R8)B&nATcoGPKHlpL=@8vE@CWJS z+!Y7$>33@edrGK@orHH5Ze(-kROih3#oBAIy3+X3a?TW+ltkKFu+^dp;)&g?F<~Fd z%O<-WN_wgn%4Av!x6h0sPoW_gk75Ck`-UG=f-#gk*GhL2pXfzIF1S^PSWTGqV74>4?lzw+jglh?1aj~!)` zb8{9+T8>Ny;YxiE<{kgT;=v@g^P<(lH@VnkQ|OpgBdEkrF?)*Eu5FTbYL%(=UE0_y z2~Y|+uUd*#b#jJ86OH%EObVw;uV?zO3h_;4#bt`+PpjE?#CPe`XpO^*kZ;+LyfBRm z1cas>pG={a`@*XPd=`~Rn-}8`<-bCHADFi<>@-i`)OAApIcjl%M`JNS_uQlN?h0&+ z$wUhk(?>H!LgmY@D;`l19W+|^asOgpaBF=8XI1^Ax1I`Mb{Oz5L;<|YdW`)zJB)f< z!`^_>Z+@#=C?LcV(X@1|T7AY7&u;&8V-Y5)fu0V5ZQ%BmJ9<^Q`RKRAA0^WLbrg5jtr#?tR%MLR>?ZDcJ+9@D>nK$ zu}b7;qg6}lEeBXhcI3%_8_nk6Uu7qtWy<)V?Z#ZjUar}9C@Jd5Q$&EYiJXdzE6%Hz zhNn-Gfrsr!(_!gk?Y5mFC8|o@A+vu9jjz!)x zW9huN^7^O6#l@|yd|u~}vb&qnjXEnUtCkkYSC(%H2M`Fxz`%ruz|&+@xptk*IrIOh zLDtcBU2pU1i*fw_K|e6SqAp<ida!IA~hd&eDoPpV7r(X^Q_5PFF~TeItj{!n1-v4f&NU?!QiTA(UR^P-}~ zD)Z&OVs4*n*g8n=A2%h-X=eRGry$WY-43N7)1+`(XL&~Nh^)Xe%|tFe7WCXZ&F#LZ z==6Y~PVQym7z13y=4E9uE#%3=H)R*O=sadFEBFd!sFUT9ynfUdgIhLJ;@F-BaZOvt z4euT1^VP>6)K0lPJ;dfj6;ANp*7u|T^$zK;m;JPs()mRC^AMK?tH4wtlGu9^U)EQ~ z^c(^cvb=yJh-wF%j1vXy-0?1Qp#Sr^WT}8Iy=>_WCE+k(paf7g%>ncQ zX|_;?jxA9jX61e%|=(Ei7gE``@V?bU}&_f}g zu0%!xNx%9W9_D=gW10MR1p=$&x|ox}^aLwb_~QIxl`>8`zhZMY0$kAj27klTjDVVOw{okpeYVm$(9B$WeL20)!>DiXnAafaHC3ss8A+r1 z=FU@Fek4tm-8)}It>2fkj%{S`bYyudC9JF#vt74N$y1CKYh!1dIF-Ydb|+WAa`Iy> z`yi^G9)FOWjmnFyvvRRCqikv(s&WLxY>j{IWa--<)T0#v5Zb+Pxu4OD!wutLoInaJ z*WO~EF4=d@3oLSo8I!BHoXucS>tAqdTq6TKwmnx+^%6y(;>+G@Z))E20yHP{^VuWW z1ip(@7?+~D3^vQAOln##Xr4FJP4CmrJ@giQ*|3(HaXXtV_$nqYq3fz%>=1r~TT(2; zZ?0cVdw$nkvtH7$*h7b zM{Cs0R`U_M+4kru;K~=utbL6vGn)HHfW8ibocPJMsX|g}c2vWX3f8sH`{|Cp=XNcl zC*Nx*GS1q4dfmK_cas1e!<=pWZA&0uh_C?eZKt}2R`X`u1b2AGAoj(z_n(zsySqz7 z&#ljyl|&DayyT*LS}H`-&t6N8j=so3^O8D81bW<`t=11Ja_?e+3mO_4vJ7X;+$DoE z^(AIEV`6dyTAQ0cDe@}^Sp1*o!2%^nSlb^{^v{~E=JfPm2FH%i*D?V!m?_th+jL_a zaiG|tEHQO(I}dow-KgGeA?*kT^E6JRw1MAS(~|qii$Czqh}b=E;>%bJ+y)uJ(JnU7 zJhU^-z3aqHIw-K9idy%8(F}9km85;}^<=Cxz4i5Cl})@@XWMe;Sh6(n@QVU_a9 zM}sjB2T(Zfd<0Pw3o_X>k0$PFj-`}Xz#b0xnQr}KKhmE{a2WO2#?zi-MRMwQ2Tet5 z=G`;KjQQ_y$yUaUz4*U&z1lvKE5kcc%paxA*+jL*M6mP%$S441(nES-Q#!^wzoERx zxgw#xw|f$9^+t(a`;y~opWhgX^)?8nXQ$6S@415?PZ#5++0+82nUtKh;79UaiX2>) zCb!C~WsA>?bqgJG*K*KU&D;qjz+oQl&$DE(H*Rfri=S`gjh~lflhQr3LzjMx+zhFH zGx4oRs&cZ9`NCUK$gV_}X61U#AjLczc>ii?&4zuLpjizkKTA<4VdYU_xr}e@CGcxS z@{wFE4no*G99!DBa&CDLy`IILjh0*}tgrf}U%LJ-YHI4YnY5|sMdDOMQ~GlRvE*tj z7E7;b0IGU>Vk0RM@UYZ~sbi*`_ZlGbfxJhYf_QZ5c$;FHwq02*6=sB(m7hXPP<=^u zFN*$mGk1f+(kgy_`{!A|ukX-3HhHA zY{SyV?SNcI%@j177QQ}5MKRADf=+Qg4GFB|WjBulwSsI!m+=2|x#{h@)B{LW?mMI<^ia0xAUiBO`;(1_mz>ZNaj6jK&}e~t{{9tOPfbkBx0(7s521bpcWiDYLQKqb~nK(^PgEA zWKk&aMB6#`@gjcQL$cTDYdk(q?yxFUN`j21m6!{)?RSFb3uM1E->cfsexp!myxWgw z*~%2l){QqftMm4{Kjpt#dMOMmarM;S2R_z>LlYQ7EjXVrw=S*=+7=Fs2Lr%llFbw~ z3ZY0mT5nZ8_AKDK{E=B*)j*xH$I>{-SLyay#!Ve*%wp#HAgsF;&%bx0M1ZwxW0n9< z;eVLZzKb1*AwJ26Zq@lqT{7P?x-h3Y10WbKck}BIpy1-yUyUt506>&bEyAT8?T?aV zQ4l$L(nu=ByE1=qR+~Q{DrqX+yZvy++Su};OF53M_6Fk9Mk<$iGzUNI7mR06xp6qf zTejBbW#=xT%d5e*vQ;B_eK|NgIO0`VSz4XeUpJCETVN@n{nGEjy}Hs9Bb3ivX6do} z@Id-2p7>aY zYQgr6Y9uuFZ&8XWd+9;W(hR|q<*m0qXn6~goPG1r*J8KYlfvs_Rgh^tJyM;jtsb!~ z)?PP!fT$9XqgV5t#`B#Cf&X8YfWSbU)R37ScTq--wmmr56TenUHwe3hRhB~2%e#cW zB0AT#n?N2Ih#+tCS((SFh`;zK?G8@Wagkf@*5nIYgG;1IeOY7mk`?<-D(&3?KTBIw z`0gku&UJkyX_;KnFxp09{~PTTiqV+iSjIyReC@5CN|`o?!?476;}S4cN-<9fn5|0^ za+`fv3G*!@r=ZyFLS~#PfOsETA#J|@Vxa(A2s>lRZ!%MK{q3&dmRX&bk?|bTpUVv? zrXJ(k4*z$o{e=UxjXXR*U#|!0$cAlhqK|b#%-$M6w;y_|g_2~YK(KpnD0Y3PY$DyH zhx5t8C8P)afWPnVv|*j03h1RV5Q=t8_5Y=x{cryH|3zMT7is&>XMQWYbo!PgkzhaB zulTd2cumo%E~T#%+B?oz9z@m|FbF0c`)Uh~xcYiD&iz!WjIqeoJ{jnnOeDA6E9k+6<@l8l^l5N%JM#5CLAxI_#J*ht&wzj|VK5^2Q^I{( zAdYmzAs$?jcF;uAQjG>43pjoC)k7X0aTk2CK?|Ss?)qHj!(%`Hv{JEBCAo%*spp_z zb?nAibJq9F8nG)ukwmd63RcKJUEoFx8Vc+S&pYk-Z9SCWPB*l=GItz~BV#uoq%Ml2 zrXL#`vdoh9gsXoFu~3+bNzx_Tgxql-L)c&hNF__AclK4IAbvEkwkrhu)%J3|s?xA> z4$Di9q+{hSyriI~X8eFU8+J4Fa8MXR%cyI@tHiO(vHHFx=F>l5=XDigGU>Iew;zG( z2AOjNSE%6)ZjGAy=(1-R<|@=e{f<-Bb@r0+h#t!r>D~{PxW~Z6JnWC1sbj4NCvNq6 zPero~wA05K21!FzdRKo#vWSi0_`S-r$j?bt4QBHk9pZ{0$pFt<=BESC=_1-{=wfeu z6tiC!H5!zHuiG`d4u|dY>8|Sy^Kb1og07fCY~owBFx0bO1rSWMMG1u8TMrjbWYj;ORf z{Bx+I7=QlX>vaDwD-Kb1-S<*i~kv5BgB#L3+& zsh_RfurwWWmX-3JOwN#K-zAN(oEC2!=yHQu9GpRHNVr`x9lFbyY!-5d4T$*OPGhLI zANccGv^eDfn0 z!j$?5_h~RM&=wKw!Zfk4$L>kTwDA2!#TXKWNcDGUw#g~e zrZ6_ZeR6;{@7zckiI{_&45>@i2~*;#r)o?9T!rC92|mDF&qVq{c6R84Gq1dk3)A`u z^#XideIofY?X5dbML`eU=V=phJ|q3|l3O zwe?pV%>+&oxSqwIPQ>VfOC|OYo4Wdhmf%68vY64KRszY1SXf7B(u_Fy&uo~bq-s)T zYuIecM#TI6<>+6b*}qGD#ydl*)984z(z&KX&b3AwU$Kv9fxGq<=A|ntvP&e2_J(=1 z-vc`fIO9pwHVe{qq`1Tsq|v7Lgnn*W23~VC4S#R+) zTN?lTQWf+o#kbyBKMd>|DsU~FmH2zKP$@G|3TBj@byVtO-f!PoPY$(k|Zil@>euKUi$HOUzfoK~-Y@%jh_%QVRuY~?#BVamT-s67NWmZmK zUmvQ&Qmj_laCa!dlF)!2Lc;jCbOG1bzBwJX1xW;Mc=$lt;=5H{MpM`!<6Dc%`5S~| zk>P$sQ(1StKQ5_r+-5R}$<(Kfo(f5Di`&{(;#ZWEq(TDv5Sr0DBwfPEb_4E)oR4h9 z{>J)T1Q46=bweOg1O~=+9~-ow)qSxI;5Gk~uL&$De-+(HBz)np>pcS+D1Dg{m33=8 z8mQc~Z$i4X9ddiVHN~JMM$R&PavS4>5xWX(DDLav{f5l9672oex}&Z2@(-Y6Cz^XA zL7?boE3%!~AiWE97xH4hospj{t)D^<>C2W{Tm(0Tsh+(X2yC|<9ei!|<1HR)?z)g* zS}_pC0dPgs})y2T^{Q=Q#P?0oT`pFNGS(cPPV_r14iRGRX3M*B22+dGx-ioWHr zW23#A;SBa?@5N023%qDZGA}E~^XGriNCkhu{l>iOH#_Y1MO ze%MZRWq>>_>Xuz?!`~6We!P5)7&X`O)&$_CcC~ohT<4V%lvZwUO(7W>%|ibEd`8p6 zNh;Rw0==Z?q>b~z;_c-Y8`m6Dd_;~FCb6z}?yrX=#vkC85N6KvWK$s@b1%yD@_8d> zp~q*T#-}P724m=DO*c9%nyjRZw5Mz2zIEHR#+oP@nULz%^=@i?CFp)0V4w)x3eFB# z4i#x4$z((N3@cc5UMfwD2}~Zk(wgMm5j!qaguN&gcC3BWbs2nf*t^J_?s{1UbF-Si zF>7XOLhkIRHW;74yr}H)iQ&(TWbM#Pg?;hhH5dIfwp$p_W@j6vLKN4h1{W2{oxK!E z7uQGAJf3x;E1`*n+LB<(fX7OF91xw^->pk>m7jP1Ep-+^Q2ypGDUq%z9?er0`UqFD zLPQP_rn-%#1wy<+Mt`lV22Sy4NUN1A)&=I(*NnkSYFOosru__9HspGB`YZMOD17E+ z1G^}$t&glg03gr1>UZE|U#jY;@N?=JaSc9RGP7|XGGnXISqhik7V9{l1H>#;bj!=j ztAF$+oOE$<@$~c*9UYxd*;Pn{xRG|=wEwl53*o=ow@jkt_*PqiJK3zXGhA_*H z4{$k~exhYPKsuqze_#3x3tG_fIh&LY5ZDqzifv6)NB<-$4=en~+~VQcsQV(%fFZFgW((MX}EG7DrhK1cEp^<8#@U>cpeE{UXSQ{jVKA0`mD9S?QyC6sPnj)8x(bH> z7jcoBTVb5^2|pug$guP(?UJu)$D18 zMrQ#2Zu{KZ_dOLL_~I$?`Vj8;cizbS;DefG&Tie|Z~4|p0<7a3__%D~gP0FVToTgS ztt#sZ3)lb>VhO2OjBvZ%86KmU4io*u{>|vrc{tB!M08DS3a%FWsdQcXCWXB!bG6)! zH3c4{6V_oa(AGW)pU>0Y#CQGR3SR@cUv9io>|i#A1C>015AEHD6DQQP|t=)C+t$lbfCtc`SZ!cFq6;^ z^2Z(npsuU4$@wd9*K^C4>kVXHsSHpEDiZK7u_rRsc)DV1lSeQ--rEU+I3@LMgT{4` zV+PaD*+PvXC~I!v)Pd1}SrTd1=fQBrv#SrEaA8}NVNg4Bu7>4jHP+hinu9skZp_Da z+jX9o%^Xfq_+~fn8l2iL0gLZ^ZIAm=)~)aB%X&QP#n`Co1QV`?;w7n*1XyhfLO<2P zcQegk5!%g6eOZgwJdlk$yNh>>4|;-*EV>o48~=>2eNakqYRUVEgP^V*L8-z|M36NQy1Dc&ZGbQjW!kZzb<(N0eu#Uyu%w%tQBr<}MKzTW-}C zYgM-sP6FFfL?7hLx+E3o!`qsmpxRPU=5OxHUX){w;W%(8>$uSwSiRp}<6$0PU+&;; z>Cv@>^c^S%Te$S(-Y8vJTM||Mt_eU$!lNRywV(TdX|cn}njvVyqV*V3q-;Vc%(S^D zA2E;O?b1;|^CgIhckC=`Z%cqw1A2nTlpJmoM5@j)={zW(IhnJk7@ESo7V8_AY~JzU zB9n4`sM=C^iax|iU=wI`lm!>&s!wbx4tSOLnL0i_z|RSrxu|89Ug!CEb#vp$nI|GD zYIyoQKo+G*5l|lya?l_K&BDsUQu_jNXK(y5j-2$rOLf>Edp)Tc2l~9;1_r`CkMN!~ z{=+o7Uw2ALoyC4ndDQoQe_vU$aYFO^M&!-j|WA z-I4lSIo)(|HCEft{BWFGUq>_EJ=H_)^xt%Y`|r-qU0rs+)(Fs9j+(HJ9!q&AuP&~O zb91`$C%O<_uj*x1a~ULr_b}w+2zKqO*6y~yzuH}{zPFUwDiZJ1dwOA40%_?~oyA~F z=TUrCALcc#=Bz7*7grxPkA{wk*B-k1do9O8K-~c|q3d&drB_aOQ8E0tV{(kXc`ffQ z$BJ)_Uz_WL2I&~iVXuz`zLcG>5~;*ef=(j=)Yfa?#@<>IbGzx z^=*7z*H-@N-)3}km~4oq$4Mmhha1A%3y@*`i2CIy`LZ)KT8_~Zvg%e8ovS#a)?N;s z2w*(8cjs5AWB4~6uH@GJ-OfKGwQ~L@A3nG|G>Oi>XrQ}IhDfQzo=QRdar4Va4YwOR zjFIXp7HDR^P>mHK2AH43Re(5ygpob4z!iE}Hse($&T?SqV6J;g{$%d-+)_25E}0yA zHrM?HJRq6qehyVYg{Rih!Rh70M_9H?t2yQuFLj2E5?$!(RU&N7kVaS}QMyzq8ucj$ z(XB?-wKDUF+nTZvqe?6UzsjPZ2Y&R>rkpm;hRcS~T&IbKAoZHS{tADjjvNX~k+^`h zVUW2efl~Ijg!?^pSm~iru3@ZiURi!Ib|CL7uKxL>JhCP@-IF@vM;uW86{Gd=m7Uoi zEwo^+paVma5CRtpg2anlhT>_wCg13V+9A5JnS#RDKgO?5R2zKiimz|mx$kzA;2Xq> zHxt|O3NxVxc4vR_W-g3iXW$FmA4!IdcAYH-oH1`e0B=8+>rWw3Pwh&BPHKk;_#Yh7 z($bKOlZUWyXZ4R^6&00u{uPt|Y!Ue%dyJ6A1{eT`)Y#JcgZ#qvNf&JwBveK&OBrRM z@w-1a;)E9qC3c<#p}IO^mIZLE0Gj_+^|)~C#*_glKw3CK8@JIQ6P*;W-RRJJUr^Ux zD%%B4LSf&9wS!_K5A4rX&#_u1FBe!01KH}oa5Se$i^>v9S}GdP zaX8i2-TzsMg~0!L&fVOXlTx3we2uP?e8#J~W(Rm!G^Lo=NFt5MK2oaRVMfI5_s043 zzAsq0ksM`uwg?S#a?YO~n9U(g@A-IXn&f}89|=ELe>Vx0jg#JygnN#0C@FWjQ>LQY zpwv{B#0rj;jxRE`1FVb6VKuaKh(!uPgSn6%z7LC{$EU1dq1+-%dUaS@9OVyD6x-FV z11Ec(mS0L2bY-Xh><-ne=JPg%v6+2I)<}z$xvJIN-Mph}Xf*k=`UeldE|Z zbyAe0*<>Pp(E`onN8vm>^LjpC>(^sOkIo;KY%;4bq_bweceGntweL64U9=D@YOx3c ze-@)~GJS6<@5E5mdnC*6MGzKBIiR3d!TI*IN;dkw~b^*5dsks`ztAkMc$&fjcj z7-wY|JuL0YMo5L03e&TM*v-L##sN(C@3gabEP!&^krvnosBUuF%S=u zuQl{;P+*`r(WR&!tD``v)c~!KONmV zkUaOXAyaV`%gIt@jkj*BnNRpzdXg0hLSsQt)m?&K)ZkY(em3rCmArq`Y3!N$o zlPf7?7|BNyPy%;Jgc}O{F1@PcW zR7$73)*xMRhJF6qzysK1@6NI+CX3nwG4ZYbZLO-ZjNJz#QlO<#_e*N@<9`UfS;V z^tgk{83ln?YUuPJ$yu@8dH9=r|4|5BzZCXWYBTT7rS=p zuDEQE4qRf*jI|f@NG%k_tkkcoj_->3&6fh$tJ1P^HPv|EnvDL$^ofUm^Rrd^h|9_| zY0za%O<>8jFpM!MF52Y}cFT?>;V%br3V{6+D8`G@6)QGn!?&xa(po>-QFO%X6n`U- zFYDQc_ZS@azNkG{Dv^D74CnC-Maay@$m)6eSQiRr;yc>?^aECOgvs;AE|4$#$8xbS zkldk$%vOBS!#u~p^q!HIsf}{}WI`fPvEo^>rmz%Hxd@l=bTOX)y1UzY%i99 z(yM~7rG%@m+if31wjkE(mhpS)>GgFV-TNp>Fjw+_s$czkZOHqrWDAsvFYiwI0FHZ8 z)sdq6%q>|EQ;QJ0S17rm9Z+|O=Ta^HHjQj#VLqD5Mqu;R+gqJRvw+V%MVAF(w%{kR zKCctDRLSCjYQjMFl3u<0GA`;*zyILL;>A(57zp4;yi6@2pl*4 z#3aiZZK9`|gv}%!)Xj@U%+%^q(7Y6mWFU~5{sUC&rug=m4+JG#dk7j{#5axdz9F3 z)@#dIsAd2ZobNlBKf&6lKauE3Ii>ZjbO~IcEY(XIVE{OeTHi=#qHgFLjNAtakf`8y zPD4kVf1gTN#N71Sl&L73ayE4E-rLfFLwh2hme7r$$#BP; z4c#x{Iw+2gS&LU)dn|rbaWhPA=fyyQ1#NHDbfG-%T{ci-kLBf*Tl;r~w$e#Ce;DsQ z*e)ibNa9s71vpTQ zG#T-{5Xt4u9-*tD@cywh!QPPh*3DR#|tbd87KvYSls$uYzXQ{|lHBGBYiSAIjEMC3nY zjjqami;kjE%KMz)UW8YlQW~tXDS@Qy^G%^ws z(`7D33VkK-N27K%zt@C(NVA}bEIbABv8sm+j6TLasRN=r516{1 z0O0R4W|1f2Q@-{UfbQZXFb%jcr~re3>MCD0q}s&YZg?X%^_r$=svYq-b!mW|Qpm;X zrlRn}4Fl<1-fQY7F@}N_ovR1tVy^DzRs^;2aB-2DXdD;Fsr=>GvAaM_>ombm39ude{6LMjK6k8)SOU7P|HXK77)&)_5`~{j_1z-x zSXryY#1$Od(!fF?dR!Y075BEW|Pg8dDX$4<8jU#%R^cWoCjH^Z@JxzQ$qF3Nxi4`&0e*k!{P>A&wWjSn- z;Se5z;L!7X8h7N#%okMSWc7S;ei@2i51{?nu7S@0Fw;*A^SebOZ=zy)L zH)%%3WG3r~*VA+tzNTBw@twA}AB*jR3~!7290Hf7Zc#dN^+`HYxfKl+-gF&)%6^jV%G8JLVE-Q^;reflZ=1@L>`L3)jb?(M_9pd^xN=bb_5@cjP^BjG<``eWA_hk6VOb@K%^=cOcv{Qf%fteG{-HQ+N!r2* z))k;7MZ6G8{$`Gr2!2Fw+%&3z<-9wA(t~;)#-f>3r5LEM z*lPtWsvILOH=(1iOnNbh4DmNauSUiRbiX9D*s}~p)SzCtXW6Px&8KMH{rFy974tRS zLjCRgVW2hXf1G;{n@QpQpp;XPMqctTU2~s=GZ@rP3R9H(rYt>`p%%O53Lkgxq{(#Y zI3>ZxX&2C)wHl7YT{cY8yrV?RE9B$B!oi(lupt`(U{wp(8dK6m?DS*XC@ZfdX^tko zYy53{O$uFKC@4VodYNj*@A!Hns~}UAS)UfL^w%a0n0hY)f06_?Ks^~~%U`r_qJ}5X z(NeP7uqYLu8SO5(E1eV?(;XG#C2icV+Sn65olF%c2w8#img}qAIQiYPjmj+`kfQ?n z(StldjcxagR`a#V?t?mlrWCztm5Fz&1WXvx#X0=va=^9G=lh%lK!rL=-$tZH%-k30%^U%{sTt><4K`mfR5oBb}9Ez8Tb=UYxTV7d9_W@p?-fYy#JDQ=J~_Zw)) zdQY@Voa0fnAuLzm0}G)=4rjD~_Z(-#yvDR;kC7GAT5;I$17zDwm3uHj>Ozo+;jl2m zu{I!b;FB*btkcC1N+j(RY;q|iASoaHw$Lfi;1JBv_}VX3|BN0!3w z*JnI3Bwt39UqB(3(a`c)umERDoi?EEdA54A!Us_PVfHTN)Tx1f`O`;z6jb-ro5S=B zyst9M;D|CWPXp1gZtV~3@?L{SwbE)T^5~$}F5q+ZCy;BCC@q1$)lT%+^A32utr&Yp z+n?=VOBOC>+V>8;?Jhr7d70yVH|pj9+Al9VPegFjEw&;Fg8cJr@8{8! z>l(6Ak~4MRn_d9iH1byfNu)NT!%|88-7^=)<8?I|j;h%GQ~K=@k?_-vksUNm4o7pd z#jnF+?(B3-oSS~D{Fs3TsSZmT!PjZ`t1}`09O4%4?2zz?DigXGU#-`>p}{Kf*Dl+G z-lmp|2k#HV)zw_{_Nr`4yr>_U%i0(Q3gXlQSI7+w7VzT+wWB@NmNxRUc@HhDS-?9H z=&LB1-EoVaK3T5Qz*lIUX({xp#wCQXVDdrDM`*bvqk=vRsTxY_Ao}poom}(bW6)(O zE*F{=e6MMA`}kHebGH1|j&{Yx&nrA(w?j+BqkKu!3roq8yKuYmKfo(HDofLQmn{GS z%=~FA-r2m>O(8Gi=+qdhFOV-04AnI|R4u<21vyAYbH=O;N5td{t>52ErQ(eT|2MWu zh_?dw1N`^n`^C>n7iOs`hewP9!9VLOJv~pu7$7m$$Nd&~U$@!~sWrCO*O5h-%|BaX zLBo)}7h?+bCyD4EQ#;rqjx;ZJ2Q`O-sp2_d7vpYf1FQxdUf)JunUpVq;-?HY%w~Dw z^*GRI3N!w&E4BIpyT=hpC>+?zQOZ@g5Zvgn8tKlW|G~kT^X;-XoqUMe;lmWvrp?q3YA!EbP3j-!i0cMvM36fT4=>su zObgbkt3$oT)2w2OxYWv{S8Fc-C+#vKi>-{r!B%pW?3bR@!DV-T`&qr*%;FTGdOJs1 zMa6#Jk2%K0N2xbGO;v)tE`)>_{Zzreu}6Fp6;K?8DMVjSX!Rs!SJmG_Eh7P`Y%Z&i zi;g-rUuaydt!_mpT{!2cPonx`Rl2+S!^eYsQvF(ruj$VbzXbr{XZ*vV&= zLpx32ZFgt3crnbSc3c_(;0ZVtguEKJXIkWDia_Sb$X8^^Ys_squdn9E+D@n@ymS-u z33WWP$*pOaIUm=6r}A4Njruc$wbFzdYahS)^cAsTKKc;K!)T$fQguF#PMI8u$ z2(H3O_z1sv8dpn;2Ol1)I;%k(7nHHJWlcC5!P5@G&2vMCWk;j-M`fUrhKs_f}s#T2UlInVz`2j&i> zPl+&*wNb`h2BSUssFnP&NU&zrWtGgl>@)clI4w)OJch-H?j|n_=r7A*Na}hg?@0PL zq!tJz{-XF)Uhc1+l67_W2s65h*^`6i(@uGt3M+bhLL;b?TCgU&ALUQKTL778iaW5U zk9`q5zLgN50XeyWpI&?WiXlN%C8Bo$J&oJB`Q3Y6Zs}htQu&*pemw zW`Iu9z1zIVM}~)GZFRxvCvV8>SKOb;AliUg($1+stTr4Af~|SxFBb~efl33x(gE9! zLq7-u6md>A6+g|IbMJ78l#0PfPCPx*Ws4(5Vc>a*XZ~kJzhGu*N$YL)?~4rpt<9K? z5~sL_O3FPWM#qbbJ~Lt?zGnW>(<}6wK-{!Q-r#qJg!sk|fBt5algKSfed4WM7&?#$ zi{Yu&SaNuessQAbb_(&T1hkUY4JeyLi#_dD6(|dUxyDixQwZ3&JrTz^~m7j;@$Iwr{RXm>?O$cz#4K)d&1dse=4u5Z7gjFg?t`^e9RQJdV)!luU}oP=h=L7BDdN#waLDIK!fZD8A|NU^*37 z+;yawkxThA8eUDK=MZ#g-)J_)#A(3EOl&+tHczcdHh-L%Z#D*SAKB>t}TzTUZWB#}?9@YR|c%oI(&ELXeU0 z=hg#3piU8LZXJ$%M78!`xMWwhWLo(UBuV+GIhm&Qp5e3S{z=N^voOsc&3A`RjUP&n z9A5rXPu%BuC&vN_9m>{P2#XEz-EwydLP7axyq zeNMgmi@sr>hCWks#-n-8TVuZgBuoyHdsMT^P!Y)wTN29#z7*YVT(Eew>%ad{fIcCx zWFH{+YxI-4{h;qilRD5l)=`^mu87#!s%4AfkoO!o0ZQvhnWj8{JPFHmWqr>maFAcK zFa~zpWDWvk69DI`oZ`G3BgP&~=x=C1)?0m-k=OnA5%0~hrg?A1&wX}GiELr#3|8s^ zHr{%uDq}BAo2_=HmEdh(r0D%G>|0W!Y$XFZ6}vIc`Rb2n+fBci+~#}?Y>>$WqWN;c zRZxRac89CsAffgS_qvNdJ(Qg*==bwbX0O{FD3n)ux1G!-xRX^=8bIbp#A`r z32!LTCAnaUd08c3!Ia??xO(UVsaGGHae>t-leKBBsS?(W@8+SgGz9o1K+O51U}C1e z;GG+CFTLf17ZEJUbp5sSj93-1yDlddqji$9RA|436SA-2zEz#XDe!f5+yha8C-LaI zm zMp*fqVq6~p>-hoWv-m4@sNok43`p-fyxU7!k|ZMcC zn*Zvr-d9A26P~3R!XrI~^B;GIL`R>Q8*hs$5;}?eN~-=7H_`v&l|iNn9XqJJucM`i zZfDuWRO{gU`6Le4<@%>T+qJt4ZNtpUv-M9=2efyni;Z$0O`at;9;6c=X1g`Y*2!qCQP@1Ig@`K|4R?@`9H+}Myp@Fj23*#d6Ou~^(1KFfv!AAgLBW216( zWpn9XIVUs^kyD9&pgl^Uva*8Zt`y_P~6x}*W1j9oZ?3qQjfvk#-n;B za4r)ND=W1N^dqE!qN5whwAkKZg22Siw#+wiw8}IbnKR&w46@X1k-m(2PT@|=U9*XJ z3l;)9Nj67QmXSQlZSP;)gt-k*@OI9(&n0zT_d-};1T}RcHc$g$4h%eiN8zH{g9&%iEllq| z8TV1O%MUY5rjr;(OR&*+zsqy$lY{DIV@oVs>phCFVh@QvCLK@VsHOk*;`OWE1hkNg zAw-9fnGdV{QRt<{{JhqE6B}&p?H<#RffLfm<5Pzcw{Y<@Uk@ca0vS;>AbWmm!Ewh} zJPEQXjkJ-*CdlxCMofh2Nlq)cC{Ay>Kg-#FZQy%+ES~#)KvDQPz4aHBQ#*f}{!T z>E=WG8u?$@0Sp$fsLQRr&H}Qj)`!+(717zdIU+DvqRC=401fXO3fOIRx8Ue9?#~aJ z&ao4z$w#DDJvxuRpqb42NZ8@EAq_HMgY@^lQjRV!_s)w2W4^7BSYt#+_G!8?z84H! zn(T1?I=9$12kl6lRJS}hsiIk$@|BjbE&4jPswUY@@j*BYxUtu{&?^Ms!et2+$NSyl zO2OI~m05K=czP5d&mzT(m>TsSN|pK1a^HA&2UlJ2Q_#C)&e9GH1_oMAD`$ zTWe`g($a3oiR62!iINi;MZ}V6&M6Fb?dgut9fNuccG1g-@EOnogR|4f=?6{jJBelz zSgTy#I-z-p0*Ckoz;lW=PB%-GtJF?T42I(jFXH3;Jjsqy>c~SJ8`3lbPeivY3uMwszkEk(lJ1Gn4|hWitHjmV^%_isBB;UQ zqO?}JgY+_Li&yaGw>iv6(Q8t(E=R$PGS8kmEy5cGv&K+FJ(Y)wWrixCXc2 zF2UV{1a}DT5+u00yL*B=1b4R^cXxMpcgyT&s;B4e@5j{i+x-)&D5S27bu2(5 z--Dm`^KyueN`X`G_6SFL?cV1ZPJ{+af79sEy>z&HPRuKZ%Kq9t;k{Ctj6H~sRX|vu zhCOeILWI4CgD2VkYJ3A&=9Y>2VDvQhjKGb;9Al*b%C`?Uzn+ihyzvrTBs;yK-M+aw z4!uj8Mu_)y?$T*UO{j!-9tMvfrsHm!9^ksKTAZ<$_@BG`4WN;wpanFw-RIv=vwS-L zF+H|ySEc*hj&VNG5G6>kN+wcWgEhFQe!VXGYtf_Fvo9;rc3Ol z4L-TLe_G}@^syIvdg(e}`6gruXkd66ab~|$t8MjMYmd9m4j|@% z0-wAhVo%s&gMXK4B9K^mH#O1fxg2b;k4WH*!ltA$k_uJqN;&QtT$KTa?dxG(>VQ4; z`N6HyiaVsrfVCpVD6Uh;wBSE3@b?aADvjCD+a*a67tUs?ffuX5jnwBRNi(u=CT_2l z@E+gAh2?6$p^qB71B$tKxN*ox4Z;3lU14&J;){(v7Lvb0RW=VeTLOV~sMCnoR7in; zqbw>?!mDbhAmCW4*$G)dOs@gOjFbKX+GEN-*2OkF(1D1ACmN{&_C@Abj=uc@0rs=O z05OJe*BzxFX%{QWwJWDTeJftX5>GvbMj-EWlby!~9&uv(Pl!wjus8Q02gkrt(vu}>4K){E8@*i?F#!A}#~*f!Q(I>*wzUoWLCIsJG#&cPVDR1iM~ zpQ`YZ5}zu9M=^~iB+7#3Jp^pEzwdgUb=C(Js;PgjSg@?-#7|qjcsX1EF5V9thmD)p za|@T7?+n$dgFBSJp#t*mAsj+>wCw!Sh1zuW;DO8Nqf&x6`MTOZKQP?2opo^7byLE( z{?@jv`k3|TN@Crlyt-tSOKLFZe-&1`)l*NNYFcNk+Q$_5mBSp$`J-hSx{bd!ZBMD6 zz5aWoSwBLtn-K!I-36m1cysXM91eb2L(Eb}I+p!Id^nlU^Oq6|$dIzMpww{-M)H(| zRCqJNEfF#z0!g8S@pY~??ZT!K;`$bUSM8k^C>K*_D*);?7Ku_Ghe7UlWR2}ipx{miwZB5NFHHcUc1`9+w_E| z@eAi%#4FF9SJNhnkq6#%EI9O0!6PN4TA*%Jm=VS2h!1i4arKdVAo0u-w2$nYQ7nWg zx`scrmrLIB6MZEQVQ=!aozFsQ!yCK!;i-Eu+y*>Af#IdkrhNQ;mw$D$ywxtr#bn%G z(;Y-5vp0fl1jjTRA7k3J8*24z#e*K2r%SP*jO2XugK3lVR{G`m=!dY%@qrsH!Isx# zYXOo};(0=82UFZg>-!$7D7qOXO3U=q1sTvfZ$m@JGRzd$TKJAN_nm|h=VzA&$*`}3 zcnBwrj(HV7ha92@2zSxa!%9_{<0f+o>2G2xcX3V4T{3g(r4p(omyUge&yd^O#z!yx zqfAm)6(2c^o3!}nsP3^Ja4nRXdq=j;JZ2P>irK~`u};aCP+YNe=tck zr+XYwH=+1sCrOlhnCKF?8ow2NDB`{x*f%_<6$5WkE5~QXCA=8+Mi7x&|3&Pg)OFk| z2_@l{nFr~yTY5xq{*M9^90rN+fre1u_|}!^)`;hAK9Bz49jQhb*v(&HkMa(+b!wSS z@{mvvkshPQ1MLYHmmo_;nw<}Hl@2q1SmzTk#kaV-_F9iNE{QC<>AW4JuEpp%%1b2M zviK)?6EZTykHeRtZS~oFwwgBi!9_3eZTfnhJx$A<#Ck?eC4)~k1*!)}?wDnOAQ{sL zIXW3rURhob^4W;@?~;J6o_;of)XXqa>$<@`VH#ig; zg_p;0KC{{P)isoPy}C>T4}Z9r>r>BCKf?Ft=iR^er!9}bLXgAEY|U+>8MZ3b<9wO- zBDki@zt`pkAEC4FhlPVj3Qu--w5%+pd_7NJ{g`l0?D8>35wqs4y?saBgkM*|p*9si`9e7WXc)R5;W5+>xR0-gWU@Q!k za811*fvYBmA(x9mWS)taqT*YDA{TkFvrk>vot5#{(ioR}XfSU+!^dXlrR{sBrx?7W z_cdT6bBQ-=S26TZoz&Ar=xDf+?ouV)t(~5mxokre<$dNk$-HhL5VHisStMI3x3tRF zABnHLLxWaG!*T82F>ZMQzo^pB5cCOQ(6gE7qe8cAvqI9t6&`uDDZLkYO-W_p%>U$t zBl-mVP4`O4+>GC!Tg@>_4$r#=d5^G$_Oa?U<0adZdeh0J{W~sO&^Q>fKK;*)jez3V zrAQ;hTMAxQ&GM5l!aOP?jd{ztS0llvqSpm8Sl;-DVKCj^jD1g=G@Omx-AckHGzrpG zQLdnxp>nk7Ppg|yBOFPhgx)B4VvJK`qmXCO&=^wd3-(518dDopjF`2yQF!Tx$-m3c zC<_QN1WzV7cWEW`^uAL13$Y8__=;uZD^0`B7_3+itlWMO!|nIS=V$z2qdy&` z9jU}qgzbKYDulmmC`lC&W-jl?Rw8+&a(JHzzYRF(;kQXYs{a8cW&gEkdDR zbsH(>Etqz2;e~gE4D_X5J|WsJ&HqZjJDLt1AH5j7%iAhJ#E{fU%m{R9Y2tduly5PH zRtU7i`l#9Vjv(8)P0=wHJqnB&9M9|S-#zSe=S3RyqB~W0qF6+>8{SXY4S~gwEKOY= zzvC4YO3+~isid`*N=i}M&z2-(yAJ{Bm6I{RI@9$h9SD)$n0^tJ1jFY-Zo!0mMyDQ) z*qWK0%?5d25GI?xPl{n=wRle?KwonXXI<3l-;3O?WhD~|SLz1}$EM491-kR%zIyR% z4C$YNSQcY8%;z&;GQbvTHS>Tq(e;~Lz=#>K z3+mYKfiHyew0P_mb5uCjb5!^8rBh<-&X=SOj>lAUn%*tAf84uM#A*R7h0Q4!H0D|@ z4GS4CTdG1%76j>n+?A6p>K70y?fEI()2!Y3;y$=F8~!7D30!Sg$JTjoY~$0XX)nz$ z=lyc`;z9vg+F6+i!5>D9_5rFCe3~3i#B(aJ~vGSBO=} zaWr$86`|qG=lCAR5zR@l@T&e6yB_1lQ9~g}P@;}qjCLenM(bvamDJ!~zH#8a9THuR zZ^XAgd7&X2zAeEuq*4zby@5Cp_?=&jbgW_(TintoIzPgBE@hs(m;VWc>}FK6XLXa0 zcX+aCQv^QW&w;{LBptqaS?pU8FbmJ6eK`oXV~ZH_f38U1tL~bf&h3kU(b7*w&fDYZ zht<>eM0wW}Vld3dM#;5G#BWPhk5eu^@9@|!{JB*jx>8|je!;~6cE;(Di_|{2u8y60 z1UUXb=j`GcpPCc0>9gT4qSvEr`LPHHa>v+h3)tJ7Riia>+q<1v-oC|HiwEi-Rpo?52CLgV} z4dIk&q!D4H>M$&o{NELXpJL@I3P;ClOF=OUxs28-=MwaJlJE^OdS48$OR2aU@zd7x zrioZUWk917V7&T!Ig767#qf#@bnpq@EA>J${a*H*Gb`^vp@TIXOE=jq%_;YDl1!1 zG}&Jh;=K^mvIQ73WETRYru_~0sR~hlZZ>{fcf#ZJtMXPV{Kp7AN_{0cV;6gNbfYBl zfF%sp$`=uC_C!ft*abHiG%|&lNME)fY}vO<4rv8*6*Ee@lY_HeFW$lsjR7=l&;kUWHfvqQ5Bejf%ooJAPo|2pvx}CpJxiNw2WB*{ZpP z+oU!fCVUODtCCqwP`IHPE;dz<%khelnpDd-ti;(U8f6O+oBBr!5{H?N!xb>QDw3}e zW)5B>&^f-HO1%CBwfz{~|7Q&CkJ!YbpC% z|1n-Qiuh1}fX%_+S+=#%X`95Tt(Qy~5yL3VhYz`$oAO($uDDiponjT(*-0sDvM$pe zaVz@pbYp0#lJ6p6PDnd2C2W$M7I#-bN4vf|mYMssed)aCQaJQ5L!mnXdOVqFBJJ~- zgp9XiFQrc9(c6_`f6}awXb5OaXta;i2qkN-=l+=wnl3vl<}5JE%!IY^G-A8(Zo$d9 zuSLP*{pxf|>Fp6)SWU+hX9YK8wxf}7e4d^%vEGfHl%%%TjN3Z`r1uWz)A1k8Me)DPgH1>NLsZg&pHvyo#b zFIlQ69V;r<$XtSa=mdqhT;y)D;Z`GzG5)+ zMVx1_{`nY4+g%st$SpeiOCKXuyy3gNQQ%hd;dWl%UuQ)1CuD2Wx5H%x-f?H_J7jDS z^mF{AUuz-f&8!iL07zJOqi6ypqnd;m2yZZ^>Y=VvXlrZD-xCJx`@7I=VtrS*zsErr zqOZzfC@0B?HgTOM8Ju?j0!X$5V4xLaQ8|oX_dmJm z#F67R6~D_XQBu$TeGNz@A&29v&9DMJWn!VLU$4NgP+EW7!r|=kF@H7n=!B$>NAdsP zkhawk;_iZv;ElRL+Xe3iV7fk!YcAUgG;CZ49Q==l@oNM;NXanmg(WyYd-r)No{GvF zBUkWRv(a&6nZ$9rdW8xN9o@rfN$O$06MIV?1l=(MhQ&jeL1=o{$=OxOHRRrk6}8uw zWTKQR3kh{4na>#!f+eARdD#r70qo356!N=7SF;CfapR9_s$CWaDlM3xQk>Ek0bI(^E*rQOS5@d!RsdD zDb=aiSSOED_p_6NoT+5v3HsSGb|{e48z7ZY`@#KV~TXxhz*vTVucqPEfi&|L`SZvJ`J|g?bzx{YDXA z%xYr1HeHbM)A(^b{SGTP<<~jm89NT2-y&Tzj6nT5n8$Fa1+NJMpTCxlfwRpbo9HWA z)#n75Io6tRvtPJ(G!WQ?HlV-F&I{3p(SZ$T7EO z>G0PCu|56M^5|gbM`c$U8UtokWu`O`#$sNBzCy{~Sh9niCQ;|V*P;les_^K~V{4O6 z8%~$^o=yyn(wEixb!aCCS!3D<2Je2WK_(K0f_3AGg#y$;MQQHkh+buJbnEgY7JSY; z#sUbg7_NhVmH`*TaYlY(^;F2%;ef#x178diDop`5SW5^Uvni~c&Y!T}pBab#t*eZm zC-lSls=B(wn|b~#LSTZXzP|6K_alRD>sUhhzF*3uu*ohKR)~tn>4!`#;-A)JikB_^vx7M>%;b$~3%TWw!?s|m zScbP!#ehVGK-4Yx3>k0FcLBd={QU>-g+IBgDGiXDrjG)9VV#VXheo%%H5! zO_E#A9xP41yx2uG3pA^q7md35ey_fFBPz@I=eclH9=3tdc%Q6O*EztIy)?!J=8QSb z_|0$J)I|6;p)$&pl}vSA_OgvGd=)klZY=f4Kbql51HE5m5b6Jpj7dj-GGEP-3W2;O zVRhyvEOjp8uT&_u{C;s?TvJg!JqCLy)vBD(M1A~$hqZ7YVbAYsbkzNwo|5sXz86DY zke`$;SOdyG<5PybD<*~l#gfhI%jd~8y+3~4zL!UR8w@X@Z1g(5EOh7}1(B_h zJr7<7ZtAY7SPMrs8Q>=IMIt0srn=~M9JA$QHg9>|cLPDZ?TRz#0R@k>^BSyK1TrM~ z{aCt|BU=r0v&Q6KcuDvRP;8objh*FiZ~DLT3xFI}EXZk{KK-=jaF|+6jX(r(;fv5(^sk1eGWk>3QGJv#_w(zyNiS z-T=U=Chk*u-4QVOZWIRScEFZv+0YUCopADZOaR_{pT*(;+w} z|DXy0v2J90jf?gaK5;1}jOZ5^3vEup%nSrOV=baI=`#9vsK>e+GPb7bY17hLk~0ev zZR@z?g{M7(*#aelrPo!y3ZapVw}pubNRqty7q#;MhaMbW7Ai1g2i)-Pg8n~R2&ibB zkni3D)WKF)Qt zamsxeN{3;_4^4jZQz%E%f~BKRFS=_?gO-FdTeidae9A@~7{vHbn{%y!fV*SpH*-d9 zMa@oMUu|lst;zA2PkAAVhb4`qL*}7_e@}ajIEou41z63aswjO*C{esH_|eZCxehT@ zh$5I$i3P7n=MW@hqd7r?i&j;E-SkporQYlH*I$1WYOaS|5Sj$T^OSFm?`T0SOsO@_ zYaM`bNjZFGEgdLw2!sDY*v!hE-3c;6X&q&>Am=pt)IK;5XEl@ko+B9@_MkMIKfcvP z?pQ}=V0)oqHf3)#h-Lz@rjPh|#3bS0K0+|vmA){IrFoiX&AaiXnq$BLGBaOXEHtLl zhTUq}b2Q%1`%?bBQ=?G?h0rC%FR2L|{d3EMe0P5ATR~ruy4NKF%wDo!mzLw2zo?z( z4i%Qo`GfP0PWtE7Mch7++n2=7aeq&RrU7qQdkCEo6_Wm>LMrtb=14nZ(Jw+?+%d(^ z*T(-21#OFj_Udj{VH2pWhRhcF{(S~u97r#iTUbDFMr9uWV$-1TL*?m*`*lA~yxnO< z6VMM6EiA0-n`C{jtfB&S15`>ju!8$^1_uWN&Fctcc5B{u(-Pj^y6+$;R@EHWnF1;0 zLfLqemVX`2yGTp@zRp@+?wo$aE$%I7Qu?ypr06AGt zn|9pCydL8dXbCq+UXvD4!Dv&>vFNLui@{h8{n#q4NPDaB#WSY?g;bAgP+H3{Y6 z+na>gIpM^^P**4;zYo0>Mg9u_wT`IxcCwnS@H#SaN3$Mk;p$ zV)sbzj(EW1brto;3PxAJVrZBIE+@HtSZM|AkXyj{AG%q73%Gz4Ih^QD zblqBIcm%Do5Xip3Z-}MovxC8@vX`6@;L4sY=9i3%e|@1n%#-aE!M0VFPiJMS$8p1j z4=9zC79XAmN?{^`1q|g;3@=Q3$PUZN(e+=-F|!Vhah3p1wlPGDpL&RBdgVDFAd--CmdN{OQb-X)TA_%>mt7T7GGOmiMu!s#Fg=*k? zB%HRnC_0&U+3yBhz)4J4g}~uUKYZu%dS^3itH>s{>Q%JWHEv>rr}e_9?lqk5oJV6g z&K^tVm#51DLJN$-@MM=_bsykjcY|? zcwDtitvg;F+GC$_TBFfwIe}6}2+h*ok&TP%5vXU4w-*%bJk0W(p!`JdX&?A6!;XJ^ zA($0WMbvbk3SCB=JeLF%^U~fg<3XghFu}oOPZa51)5a68f)X4>AtB*C6jWtafjAO= ze7aM(hpuv|w30M)HMCC@vD37$*9p?9g(dI_OIFgFKfh zIasQf3&0zeng*Xi#Jh0s61Aos(yf;Heco-cluva+Q;?uc&q&>{+cTU*1tN@v%rN|TW z;FjA9ZQ$4(jRU)b%$l&R;Ubfe%tq!7mHh`|Q*)Cl?k{?kD+UKB6aj3MNqf~y3FwC> zhL-w$Z6LJnKIvc$`rdM?P2>Z{WHgCJ9g?Hsq|fGq?mNFv97GG^Wtn_@yL;DkG$Ef4 z5(VkQ;w0Zc%5$%Fhio!7-uc&!6CPuh?I~XF3Pa)|e-a3;?#@$=?e2%UA{bc1l|51{ z^oPMeK338{@`fQW&w8TvALjn|jlTZzl|mYy-_=*LCfW)^oS>r_A4JJQ z!63aZS`HfkD3r~=YGHO^v|Ex^$OyBRwh?g1vqW$M;goK;s{-xbdmW=huj7hy*0dGEme}=Yc*&W+5^clIIY-G-h1U#QOHqX=&k& zQb?rDuAPiH0W4@ghPJQ3&?Zrp+%1K7b^oq;T-&NMwE)HPVWpmPt-$sU_a^36;DP-fm4=sdcf9ocv)HNL8e-DtCUL@Ti+8LEQD;mOXi%?58e_ zD|qayAw49r4uAC!h=Kj;|SFjhOCtnFpgM1;nHu z13f!He>eHA4q7jAqy6401cLl90T3Rto}*>cV)|lW%5=dT>NrR<^SBRFy~)g~39B85 zJ>2l*$8c*{&lKSVr0{NappG(fBfkT_xNT|0%QW&LvGpb=)?>QJc^0CNW%j*E+(X~J zJwNRZ9Q(B~pTAX;_S~`P2hG(dax&FyV|PEf6%xt6!6~Raz$Y-fer_3uA`>BKws%J> z?AJ2gF*)BT!b=2?5KQw}0Resl9##5{-0O`YQbBpV^w$m<%Mwb?ke;ICcGt1|uJ0tK zS6kwC)IAE?o_}IwkOizsg?EY9NWahpA= z8~gQPiOJ_Wa(a(A%$a%}xG{cKKOlDBi*MTSDEmK$rb>vIka{fV%3^s>=tkFkAS$|w zfS33K84od6t~`HN2y~<0fjgel);Y&A?hG;=s(G&lI?7qw_s0_P5}`et2z5r zQ&@xawEtN!BFK&fk}gi_v~>J%ylMn6@}Z)ZCpDf`1USaXx8D`aO-KCsg*A$B^}F7r z&d`JOo^SJP8m8aWLT4+FeiNtTXs2N=`v62}TSe1paIL{Xv!Vb}HsB=_Y|Q8)Mf z)$ur8!w(N@`7++g(wBeYP+>n23vrUNz___IH=`0UMW+VMFykasnA7Kn;Jm!e@MYDL zzQ>(W1`6HyUHb*G_;V5bvUBxvcOS;EJrJ?GhD5;o+m&;VgA>(s^Sq|{?Fl~i66HFe zDSo!vylZ{Yw92*34rkZH&TF)*LdlzIsKzb^a819O%2Y)CM8eEY*!J^=aP1xy+rJe< z(}_!*C3vyuElH<9|3rS_$MR0Hu?2=VPNHA#BalBN2Cnsh_KTa!al~uKx3oC1-U8C? zwO66=&0-n*H$3dV=jF`s)LQ9c+<YN}p3lqRz3=ODR7$lEHIB@;wY#Ijp05=awaXoQ1uASb zqJ{9SlhGjRV_3%1Pe#4t#(j1gV?EhA>VawF;f}l5@-Yy78?{Z|FU@x6^ZEBe%NPfJ zg~2?C#m;v|jy5iPt8=g+^udFCkWl!7Avpj_kdSl0i80oPmY^ zjG!UFCGj!m9`gV90sYs9mw65$!>$KDozuFsoZc|8I_2sx<09d75{ieq$ zgJQD<>J{^uo$vdpewlE}en6);@IIX10>E=%%cjQ0#oe$(ZbCLMudlvW-LQb>5$3lJ z0-DJUuMmH!103Ba(BlT*{hYE%$BIUcQNQ=&IyHH2Ztf)iZ4?q1J==es&4$#wMi>+) z2$+c$97*M7jAMR~Zw?2sH5DH}@z}>WhfwD>!iUO!f=0IRZJSUN?wWvv6$(Ws^bpav zB{D8dzB?Y5hF@1(`Z>|wZp!o~<9WEV>?_5jRwr7rGy4h<->STXmkM)n-XsCOuD#~` zhQ1y<+S8xnSy5rkIK01cvmw;XRzIX%M*v#g5PY_)m4mowFP~)^zSiCB@1z-e9keXovVgKHx_LJN85CIBD^MZ9HQhe(CaeDqjRV^Jhg6vyk}8 zoxLHa?B!k*vnE0`U}1)Ux{%Bu!>HbG*c?@Pjo{Q#wLbw*OmC})V?@`fZ-$ZeG?oRE zyC~{fbnjL~GTkQS(2uicAQWLf5fmUjStpN`wz{<+7 z8D0oDuQ&AYzin~n5(Wpd21AAyjO2`Bvr(9$EG;VmEyiF}7z9UjRmOuY+usLg2Ux~w z!4;P`Z8ENxTSubX@a&!gsvwU&rdsI1DjB^kD`H0F_!(!2=Z+%AIkZ^SHoMxQoG@X} zTrcZdP~OC_Kt+{HFCdDcpmWB-MhS0me;su%iVnZJL2HCFpXOU$& zzWBM3NB%eOnVkVll`}30{+V0lsD3`#LRbCt58cq3F=%eoC^#x)oysi{Mw}UEI7S=Z z+oLmbM%(ZFC11rU*u+C26WI$<=J~>8C7+*7z=^p-d&_s%hQJt12^aFra(?utmp6ieU~@_p)Dj zGRdU|={7+AQ(g_q{2?+@xV3$PfEd|kEdz|KI4*Swg&{EXL~fH9<0pa6JGvQ!INfVr zeEiWTB1k+6g$%91Hcp)Yb(;qE6)cXTg2x{f^ZqfZxVJ~AZBjAFtt6u-*$m#h+)IJR*r?hzgyInDtCx20VU$`g%Fv6@q=)^i_tRe*mHRMO06o-*@d z39dP7?tye!CYXknP{j4w3K`t zCoi0+mD?8QzF$)T2;N9&V-L-mHAF}e#$NkA!VD5d3#rM3<}koInxdoQF*zqq9D;uU zmf{)B1pAQ&{Wu2WB569mr#Crjo_$T+8Go91_=B&iK{5x_+>IC0;vF`z4#B zPW5(Q8gn_dGI4UIH#D+Z9kyD;1iCo)+b zCU_3J95rkl{)=uRqU+x}z&R+w%%U{NXaNzWA@!)jVWwV!tX=9ETKxXNwJ-QOD*o9S zNXU8KN7nv9Dc5gl7;Q#b=sO5AFeej%(?C(srWSYyNiecDS&~pV$VlkT0eHwdZ5|negv()Kdi#YC}eE{g@q{ zf%*2HKBnygHE zwJ~bzV*KPK#G?!W6u&I>CDvs+>cHH}c=8A_w|>Bw7dE{PGjldZMmPPvz9n>?i}LYn z@)o$9#{?LSJ%4-~R?sH13l|ub73qbp?n3uZ7%QR|uYsXb-`k~7$3{pdF1?F<7^g{A zh`eUe5^a;TdZ6JiE@vUmY}f37533QAqAZs=x!~cF=3SlS7y-__FhA6z-=BH9nfJY6 zoYWOvZ`N#XIvwpGle^+xn>OP-oU8%H?W-#T{hJGzfJ*uA^`~i`KJa9nZw<*M%j<1n z_}s*Z@bRd%uH+6?m+cThn6VN__XW2dfZktwvtRbDU_4Nc*=!#9hDp5*_}%}l*Od>n zmX_95PFW0}&06!Uf}n16PG(|azPNZSk@u=~QwdrHAu6gJuyE44LJ2myV76p5UeJn8KJJn9M)Mk-bsHAR zIQHFB7KFn4{f`NPCzW3t%X8Aq8_Q14xkXZAXBVf|LVmD&N14Q|_}#MQD5HVjR9v82 z4`;*~49G_uzw;J_urGdf*PCR5@c$uB{Phna@4OE)F7p5@9JR(1*r*&A(l`w6h2xdW zEN=jw?ph>Ozzr)#oFM8yz_%MPYDs6D&*vUvp7Tzc4Z@;za3DHcL*s175Y0AkbYp-m+o(_ou z3_T`bG?-&DBjf-A4kn81cF%yIa08HPW_L~sWeT}+UpJz1Wuv<9d>SjiS4cJHZUg#D zp&o@=WxuJbM73I5$T)(Y1c9|=-9X4vuPy*bTJM`j&xb^HY>6^V8Bpw)vRu` z7c~DP^LmG;02%e&(C!8<1j^+r*BV${^UrP8bBD$Zaay})^tRWuvzgY;+x%$Vs@Hp9 zxlfyqGn$fx`v8D4`JF8n$%E;QV2Vt%SkK8c`L(8D8pIsJEykDNcHqmYJ{hh4X=zHA z@jx}>aQYLIg=Q@f*=OhGO`js_){DGuTtA|QF*{SuWA~k%wQtf(w-mC37`;OvpQzpR zB;3)nBl7C#1EBMm-80(fJvdldV&ADfBNmCq%c#U{Ufs)m&CcOK>a!4FtgYGzi3R_6`ubuGGqcCS$Y;~x?lI;vYcy|FQf^7M4%>!PouRO58~Jv9oZ z-0z3cnHSHTu}$5-gud-0X}~y#?X!RffbSsz9@;Y_UY+8Skyx!v@AIuov8e5%ldwA= z3$`5K&>~aNj>U>nS)@O-8G3MiBAP{fa8qFLU!fX=NvLduygsfdppV>7RmG=N^%T@O z_Xkz51w=qevm09J2k#cbM(37$=)cyM z=l&Tivj#>N%~=zmG%dTz)Gcf?`Y?9!Meh|P+h?LkloOScyOUGlG%+=9Y*R2u>mZ!`G zIERKadZli6hAgF=pgy4}%RQyPzs%61wgaqekKO0ptag)ye8Ft<{Z@L3jA9)F)?$vU z(<)|83jf2K6A<0n^l02Zfb1XIX_F2!Gc`r;u9BBV`VATq_jpspH8-y+ z1rG{T-sxRbNYB`Cg{z|qB1d0){Z;*i(FHsP;bzfybC8iM)~1&GcpO<+CoJqaC|IBZVL#wPCF zpU5ME*XIb6bY>F*he(zsX~R20P%dNiV?L5YvmOifs{0=hZHLQc57&kE)0nkmOy>Un zE@BmW%T{DY)H1gR?|Y~9_NFD(=sa%e&%&n5y79A9XQ)m^?gzQBdQV$kmqRGv%yT&?u$d1`8k`29!%K$ti>I?ite!DGzH)n+Cnu-fm4 zIwz0Z!N5GcmUS0W?OXzAvepkP-M6#VB1bfJU1zN7jg9wEUB_{bq6~+Ke_jJu|FpS1 zahH~z%V^i1fF3QDYZ)E`qQkANs*lHmOzn<96hOs#w*Ou*SgwfQXNcUO3oy=p1LlF| zN4+^uTe=Fhl@~(ug0?!>iHR6By5*$Fj*nnASD>A5!q?Qy^nQX31&stk)DI37oQ#g+LmpSFB;-R}imBIHRbIAE%&u>@b$ z%-#gCls9T!t#Z0fZ0del?4hSz(&(437lqN(;Bt1Y7bSQE#JPsvV#{;RY?)V3{cwKI zL`KvR7xM2$MEDy4P%*rPQbV_e^EWUt7c7#Ey4%c9WOzk}tbpR8E^ue(&H$%daAUeD zIn!t!^zG}l@ae5fgb@)%O-TPD8{9TGae~oM63p48`p46V{#2VE)nnf0qv)yilfEU` z+V@1X^D>FuG|o<1v8Lz^A(nOvV7>8CSr|~;*tFWeV`!|=@TIFZXTym0X7uuv1BGwD z*ua|4Zrb9eqpv=CqMktsPkCONXzmcO!%PdLVClP?-l!sepUA|`#;%e)KtPafk}h4c z%iOZ~OnAYrYKX@d+dwPG)75mA?6d4{k3>d4*250k6;&HJYNVTv&ja!VBXraX#FrZr z>@2)n{cSg!Praf~TA=5xf+AbO!E>jkSxE(^k?W?fbox#64P@%K%e!R4 z)b#RdpA`rR8M9MKU%rpZxR8IV?=_#H5yUmj?}F6Xm{s*zRj6;+f@J!dIS}&tr3s0+ zl_3CBRnUyCS-#49v|c!ccp;|ioZMvWIxn+!vEWEFk!LqcdC{|TWw7^xc)4HmxM7{+ z%E3BgqP7qSR;j<#&{Oi=3)4aH@|Q3oaI~TnAqtKObNP}uBmX&OLQr8~|9R&AKR@WI z#?SzHI1BstO27^1H(F`_EKZD0U@pnaBfCDEnE`&+%AMocqDb{{4$$!M@Qx}HQkb>6 zj11!Fwzv);tpPH?(#qP}8c;eP!tniHzLh_kNY40_nF4K{*f>JFmr1KSr_*q`Ng02V zdJV!z;EDJTN4}2j{riEycL!yAVYgah)p)`_%!HkoZ&r2B1dON3HPVZD3?$kNkGm`1P4<`SvcpW}H_!e_s~&L`{)TsF=Jj@C4b zg;i<)a+%J6GWT>=RyR|R6=AZFAGNYl!ceK}*VDT+{%GHoL_|&UIgr~qUztcJim|Bc zT+u-65@yn66|#2M(g8ccLHMZ5;rIF*U!t(iYI8%QR~sB*p@=yXjo#Fik^OSXGDQ)I z-=bk#&8%*$;oMB%zN)0B6-Lq)0WV=p5p6AhDY&zR*z1pF{oQGD8#X}11_A-+RP16_ z8--3)jVT44t|B`gwSMPAYI2k14{qN8(*vcVX-c0<@4hIzE3(Y&4EUybG%urSR znmLS4P#jMh@oW{%D{Yn|hG-docq$s=0$=Wc7w^!jXX|`?1U2)NTchPX5Qmt8&leG* z@~W8Ouf-IP4*Tj84^?_R?y|i!rm2RI9^zvb%m)8i8ukaP!2lH0VX<2ZrD9GT*pQqp zKhwJE4c2Yql&zcz#ts7t0tkOX^-i7s*zSr5N+p8H_?yYK)*a@!MUQ)6e=jP{^j*M$ z_h(a9E0HI2Ro^!Iu}|@Cp0XBK7+kSpD8CKUe>2B@lVf_3j`x@6;8{6lh}gL}3&z_K zx9!KMwFfblbs9)V(Is2mZJ^i?ZWF<`=2unY=B*<%X&<2=G-9FVJl^g=MTCQ)eJ+7Q zmAQ^@EveT94*Y-bkjJ6B0o&HTd!R`8vh|eF9x+n*yXVmqY2#1UR1JCzV`Jm6MQm(r ztqQdUEu?6m=wwHglCs_Tceg=taj^l9ksQ-p%Vo#w5^c2j?Z@jSEKE!>HQMrm(*Jwj zGj^ab%{}g$-&9F?QKx8!>C04q6&Czj&G>;8Rvv%;cSB#44(mAsp>>45R!dmA z)NJ0?^$0zWg=YGUiWV5La;)->*_PAt1eiOu=xHN2j18TpA5J@nYG*An5sPtX

6?_XREd z;gAooFf0eG_rj7G*ah>&-b$IYTy!=y9SsURR<%npBR(RU5>IN3DG|*=A^}?Ge2f_G zshFpy{b2Ml(>UPOqzNzk3M0^y22U+_<-b<5R_QoU=*!qH7_90H#Rm*{)6fHDDsr}( zv`?3Q^?|3zdYo0#SMJhgCEz2Ixv13oTmuXn5BDZQXI;iJkr-pj-8}tPg5vFxDiL4TnT6sw(D%q(^H@FV9|A-b1|UC~TZca!=ssB>SLif#4QtcCw; z&%`%d`7Mx`U0L|X2U2_?&H>7!)=XrZTbuD&OusoM25@V*fs0eP99k`^7yEA>{TN99 zOg`62{Z2jpE1tuQ#Kc5$8^apC%K~uJGHcBk_APdhOB)CTx`e;?^Ya^$9NfJp_P#?a z$5Pj*GnI2~>b0f)`c+p0vGkutpn#oZN+$gduamhFt9XcidC5dkKt!;E0&kC@CDbVU zy517o43%d_>j+T$k?P6$mx<9Lj+lQh3aeJlTnL_9G+zv7(s9j&L4)Gd7H8fExsU^v zt5g&aEtNgzMRRhY6$Fn5*~;`(Y{4RG&nQAu)OeGc_v>rEa_t$!r$rrqqVovuE}n@U zmdNF6R*SGiu&=3mGZ!qR$x6i`J%?k6H0y(3;juYS#<%1o7Ckc*7J(42DuyJItVkNW z7yT86w||V4V%nP_Ob*&NU|YJ?yI&*}Jo~xT2%&2%0NOf@q#YD5dMHUC6L}NzV|e#F zi{@~+3VyhI5(q}&4igXYFq$7_2SH;I8uX8aIAT4StN1=yb6NzPl#Ze{#E#mL6Pb1d z-w>tm9Q%I|LurjBM~in(Z(3D|JVMZ@3y;xP_U`aJE72c7Fn8{2wYR}}B48%Wq;1)I zKd|?iYNqi#B{_}J&jliak3j`_6~7?rk@-zvd|k~+J!^M;UZa@G%H@%nlo!+Om^QXh z`L>mW$V&9Oe)Kk`Xn<_BU3xV z-s5zS0C^O)QP;vy%nUAH$7(uHaUu-YTrFF54ExJxH+N1Ofyn!QI{6S-88qTX2HA zOK^90cX!v|5FGBz-c|S1c{yMGkMOc4i!n!Uy-Dx?EiU;teDTx!a`|#I>5%tVe%C_O z+?#8LMIkgSHS`aKd)qcBJPy(c$D-Bul9ZHuzsY*9>ThUh7$ovZr}ckwx!URP*+G}( zsx}(MYA^?!XeivbsG9lzr+9qQgW{8gt4QcPx1lP7z)Z0Uw8~f=2fvX1WvzL7Y3j{2B$3V#X(rA{)skX7xM!Z#*VH+<~9CY3b2f}Idv6`sfL zZ>oKKM-LA^QTW}SD$vu!p9*pj0&$IM%%k8-G$E0PtLa{sn-HXwsl}q2>;1PGPG+gG z%5&&^`kA9tFON9K{kf7$Q4U(P!mHo})`S^%@=O0<9^Yer_U~LPKhUma zP|sl`WKDO4T&8N|Y2SCsnDo(TGJI_4e0jcH$@s8kGWs+En_Y^Hu~OiaQLKekMjV8Y zJrp^B(YzA?F7>mZ_lH%SK~KE5VxLnoo&!Ibe%^=gnPd)eKXYICcM&Frxm#LW2T|%j zDRXhBxVCXh)8Ehr8lcoaAA~(WJ^8Z0;|}Hrjb&i;mIw7mKBxV`VsFQk9EzP@4PYX; zy9$m%EJSXu`tzcGg`Js6yb%-l^-ANs*z(i& zgwYB0zvczyn+#ic(o7zn>!0jmt&Ti%--#h=2ZXsj1|Mv)GV8A^83{YRkv<`yU4{YZN7`@ubjcEB9`CWmz z-4aqZop0E~gpY&p=t8IP+&Ql+L#If1V#C%ccreCyZ!KQ9peest;#wzu)wE2b^J` zMBCT9MVs{&r-QK+CnqOBY}*FJg5?rm2OKL~(;+1gjA;z7@0fZ39GNBa=859qz{mY> z`2Zf+l6$VrUhuBM02!W$tz4Uj_qYU_QkRNNpGDDI5aY?e!8v*Vbf~P{T1d*B|1Dp+ zbR`n2g0zX8s@iMU-K{EgLZU;)F*%EfT$j&7dUCpaeN+=XKmogq*=THBrHt;?P#aSt zhWU>eHNhZ_#UJtT)7Eb?p|?pTbLOesXp(t6?ecow2F7wc2ozRw;I3V?Vq^Sv zwC{2K{wO>=VNmYeit_1})lH)aOKEJ&t#o1cl8k=9&?5M`K z46PhRr-?Ny3;BI*0p|@W8#XOkA|q(HtGm}26Z@A%<&LbEWjU&0AME#eRx!Qq)-=f@ zmQ^`fvy@rHhx-j%nU(i;p9pGZX4bXxNruJ#0AU8Pwws+Xpo3)nZQ)Pw0r$GLu-{!+ zqtN#Iv>wOQ9$t&VxR*qsI!6PtFayrdCP7N1Xnk1T2@v+nR3RC~wSX$FkL zMK^>57u!c`4q(+{*kFNcF+A8GdY|&x%#X`WTQBg75DiCmw*4u?wu!-~h^1hIy#cCG z{)!wHetdutNfCtVZt-2NWN4h2 zm;fM+fO;aBO0T-Q`hNl%T|-nb{%Of9Y`;;mMt0-U*8n1_Mj|t>;|k9hUOS5rhwA)+ z%jxI@vU={Q)W3{|0)%@`PO@*rM?o(9Py1`i$8@ zA=_cWYBsq|4b;!D(6?NAh=ggyFbQdf);~8Va?cZyH$?9EerzNhA+NdczAi^*C*cXV zC1*}m9!Jic(SEEv{$YXZ=JzejkCwImGYRbFaga|jM^q`eNS+j77DlAnGs1}ZF9ygH zC|Hh66D+*0n$LVD$nQNK3B|>Q*lpm;M`hTE0QFuNi5x=DX<>)ig^k*vs+RT>GyWX$#Gt863JR+vBVdC6G0uSRXb zbq8t-@3+Kk8ZI8OO1&RvR;g42JGv0otee?qxtungX;rndGUItQu*6R3vxCJ>8a-NF z;+LY*2hfgX`C@=L`}XDJsSzfYR5%Kk(g?_L#GG?~J#%{!4(LlPxu&2tDQo*-4)Grl z#6&SX6AI3zNK{U8;mf{#?GkTqw$c*`H2OBqx|Vl_;(_& z!_ZQ+ixwuyCT`n=>?#7L?ROW&jgm;_(|Z=&2$f5I=tG@fTlfQEaMKNSq}Yft9w@bi z*eoCD!mY?c9qOy*ZNZhB9^+YDAq(l^s&Bo|V6js-1zMA+eazFwLfi0X&;MDVN1c z5UuVO94Vqi`EkD>O zLhg|T9oOw32I>Z}|EMkB;nd3i@K&;Y!Mv?0=Tw=#I(DBdNGOXM1XugM@$@@0%bC@` zs=THfPO<&Dz{{qB4w^Z3kXhSl&V{4~m8rQoG)euk?_{r$ucZdu9ORcxw09WQXskX zu0}F*X_GdpXW^e!h;UWAE2da3JZ1VhmV2>XK9W(V!2T&6==v#SQAD*j2?xF>IwmY7 zcAeASnJ&2aep4_7x4I0A^f;padfyNRwc-yVQ;5*=m0U6gD4<6lA4TU~#?}Nx_oU2( z?>_cO=B@5cb)wEeCF1?|T0=j&ncKZge0wAUS%w1pRv7KZrsF^uZP!ybVCIW7AS<@T zOm?z?d0X8;YMmf&>^^u@#sG^P?ZT!e(l%to{NFoRW}e45Ki=PV^EW3XIUMZAw37@w z>@T3Ig=?97zSQR%n3O9lha;~s(D*#Vg$^!QmWEj1q<`GTgR*Ig(NEcFOX`Q$<)+n% z|51?g8N3AkxaBxcP<)EI*Q!``oRG-rt<2Ri9>9AtM3W%WDJil@;#260`@@T|_eID! zZy&dYQgpM#p?gg&tc{HAi^DI7*p&piY_!PD`?>~^lSoG2parFU*I`GHFbMtEQb+bi z%lsR^a5;^#x$XzXpyQq2H{x9fiz@TDDLq(pW`PZ~xgCGbKJao}Hhm}(@)T_nm1S$oZ6J{_&sinSU`Asc#n$Jy~Bl z*>fXH_-&ttLqy~=$$iiDxEHtc2ju-o#>?w7ohuA1=Y;OmDuz#8B^8bOyoBte4g}qizDmn@8`$X zn>McX0s>>2jn?ZOUe6WUt=V?hd&7zzyxGt5$6QP~`Ud|&6(Ldh`IOT^S1LWCZN7Kx z^en??F3*gpI=5m8wM}|!nK~>u`_J9%Iw`|Su4Xk+PPW$0;GwdUafmYn36`r&8`_bU zkV{AW5zXL|{N^KL$6VQWRQr6Lkm5iIS2}XC_Q}}4hK@=ofuM3-e&~6vh2u}hvVx}4 zdXZd07JLgpln8brSQfDRlUCGecNB<6!!_r*?~mhL`E5N@Z4yM-szbQO)g(jfUclhW z;1w!kKi~ShWA&j7eo;`cb;9D}mCraS#$a7om=zWM(*K~!Oq`fz8I1a3EKuhsk%2Cx0 z`(i6#+);~ay^){X*h!0x&2KPO=3n%+MPd!EBb7BXiO!l`SYIw%yb;6Gy&$q|zuZQG zmB{gl5kiv;xoblHfD9u2xuS^_o7&}epJQf64!!ASN2&>3v%NX-0~E`dDy>+vwLFyt zP6OD+O4V-hEYVtE&P{C$RqNp&$oErxB-&jN_Z&JW^eOesElg`C2el|G71={M zuS|tG)03Ifx9uUT%xeY6@j%37Ks@+RhrM!@>gnlewQ7{Vd0Z6L)YxBav;&bwfZ_+zi{i4fCMl7Tk&7T7 zZq2~0an{XnfN|3its1wYd>kOow<3*Iw>n_ zwuY8g#6tdn9=UG)pJpzGlbwjz>+O70hW;=R#|SRpWAl`)e%a@_B-)wd4n;?Hg(LI7D8Zn1GFUV2u@h zs8Xp{P|Z#TL?!*UPD!re(F{TX#|&u@rk}H{Z@kLq5%ltQK@-us_=0+ZAYoyu&5jRB zHSmOc_+JHs6D!fP&pF?ZltRZ6PVoqGVQ|cyN%VskR`hNX*na^CTQ} zNk8W{>#vwwU+Hz~7&7(&HvB3FYw)-l@k$3qbj{zN#CcDQI-9mWa%fxc8w)lzUazBd z;JDrPiM5xsU8m=UacuxZ7k{4HAQr5p(Ixl%^Uoz9Q>7T5F_r$W^2S<0{XG7ASf4>bw>PcGbI|YMn?q z8(Rp&?qB@b)B?ru^8&`ANT&t|&4rK!tPgPPR{YK1R+Z#5EY1W@U>_bEm{l6KH$PTf z+G%pR)EI+eE2sCqDCsbK9x5u8d0CaT5MWZvdH@{U{wv6k+9za&+?3bPARdHr>^Gu z^~afU>lWQ;+Y6m|XQBoy-f(GmdW6Q1O>05NV{r3bxhj3UG{RDLx!|UbUKu3$ZURfZ z(OZ?Sw;3ZQJ@sA@z&u0u9+hn0x<+vfbcU19oFZ}tIlb^DYI&l2+qwqIA(*{Wt^(j)}qSKR?iQXNE6wZw!b*``Oej5 z%xwIi7F-IYF+KCW$eJ-+K=glN$yYV3vaw7StRB9W#>!eTRJIvS16EOtP0Lt!%~kG?JneB&q7YW~npl!ye3WeP4qm{Tj^;Xnl0yFf}-(G$Jn?Ky^)+xBl3-do=6$Qra3)7bzpwMroz9T?eGA zcQ@W4Ln+d1Omdh3AgnJ!#eLa55HSceIR1PYHWKkI*&1sL z=&Yx-Z%h0LAMNbCI#z#F<7585>FVFE^eL*eIepwIR_3|6fuu`C8yd&wzWdQg%ql+W ztxN`o?@p{_$4FQHIqUcKRhMbIqmGOoE()%g4N_K6Pb(^$<~e=ddivYL$|gL;_1&kv-Xawi0KuXk7nNLS2{e9Uq8d# zGsbog^%Fexs9PkX{iwWMcc_c2zK5Uo*8>R*qc3x!&> z@_VpE%0r@MpXdYcal@@HG|9VQV{R49ba_sS>F(RixhSvso1wvAWk_qsim~g36L{p60|`BQ*4>`1I=m#`5GYELLaEjQ=KE(|3q^B^<>fq4L=$AAef zXs-A1npfHekIZU(kZ4Suk3c&1Xk(#4jrFc(`W$+?_$6B6c2}i4_m-F|f*_F?j>|_Q z%h`0eOt<`04;+}xRpa+k-;w>zAe^SyiXW=QvYZL*mu6D!ZgC7LhHTV=FO$Flq`jgw zZRlz3Cz4X zYGAdM@#HvvlRIWLxGcXn_7nRMf(egUg$ok3e!tGe>Gty18D27NRT_kNRrq4r>mS1n z4Kln0V{%@VtN!K+9bU9`Q3S!?+7uMde-%YoKnsKSgax*Qtrdjs@QqIE=G_|){QaDZ z;`g!9bX7q94bmUgoYjf%gN4MR>+zNZ>F9gBc?>qTk90+I8f;bkQgrKvu6Mz8)0WjX z+~g)Zw2prEsuS?RkUAQE^}eG_;YJYUFpgVc-;KhiN)!XTlpO>LjX3FQ41U%xjtOu4 z`Od|(wGTbxw^fzJN@$j3tPqY3KLpa!&7J~lj!VEcvF}Mtz;&#Bz?JAk*?UvFpoX|L^cbO@I= ztT)=+UZ3t-T`o3iO=S7`!MVy%+DS-A_-qlQnl78S{1LOS!|6L7Hasq>{imm=&F{K0 z|IkrVHUlGF)S)9qAfW^N-S^P9{GS2XSEUSy*}QKLq^9mU`$bva9PckToSd9N^~CTM zzkZ2}iRljaqSL6?81g}SJslSbbf}bmKo-fQCz4~9_=5l*&`pfLVf*{>ncAyqids++ zO%-aZl(s3l=kW`%FQJfgznR) zPFMdZ!GUzU6#Q9$FL(BM%iMlu8~lfiut9+1io=bd_r~Hx{rxA}5=ehY7-x&m7?d^) z?N?p_YF&tbYN;DP-L9V$+~hU+^@S&t+mpA`+{{L1#EA+6x?7dv(uW_Qi;(@VNKy+O z&p38Gp<(&tX&J`#u=gm^ccCwz2=!zmQIvmqD(bJGB;4fw;%FZU@awyA!NZe!$+m9& z(%+TuPKR(c?F;*j6H^t{(>=$?GWESeKa)>BvR@YasK{xED(ob-oq>*)n670~<)igl z;@H*W#pL*oyjq*By&B+_Nv5D|X`k=1F%TT=>T+fxt#Mv@=v$KGcV(e!7HoMARPPrx z9W)ZD!a?BI3Y4cxs5BS3o$vO;J^wT$4nD1or&_hTGi!8HxpnlTP@u1#8@WLTb&lYuSTL`Z1Qz5V;~Am5f?IUu@6rSI74L zVA^!YY}BEJp0j#~#s=6+NK0Pz4pj7Cb4mbVG+5I;L zxQb%wohVcMe1}w+wWrMAtlCG$nRAf=oyXTZ2b*qsVuESSEz9@>CE{TMI#65v)q9e( znS^yka|&u_jy;{m-bw-yvl%};(V`c7vJ|(XsRUdetROn&xagQ6I0gc7M(|=rnO5tT zzBVPdbOz}@62ewkw{X!Nl}90x!+JH2E}7fDr>6amuj0$t)mhg>*LB*e@t$BhJ+YFx z&l;XysVx+-U>=G@Jw3APdV3M`IlfNLS>&wY<(&ZmIh=)Ee{F5;!9~Utrgp*$&J(eJ zR-v|(C@$9=Chg$*!qlx3#&*owi9Um8a+q6LL8D=tx;N7a3k&Zg4#!c1W3LZf3LQ8t zYATtR5b`uy=R~pP~`iqiXHxfyZ-#od5^!w{97@b+aCPy40 z8dFqZ(Jg7KjMis|N4k&Zn2)nF?ryuy?(FD)hW#UEX!s?Se%7?+^GN%{*Y{L%ox3@! zyFEm;sj067+&nY9?CkI>gT~H8RFwxG%~w4`p3*jqtoC7?TG@D5;R`>Q<fqvq8a(^D4^DDT)R(r)|8%}IGwh&bp= zS>DuhbbB6P#HBV>{sS@WX1b{Z{j0KlDTzQGW&1Po3bxc8x6+2ogPj>5IEqW{Mc>p3 z-3$n};o~A3SJ2al1jE=W25~MTz$Ak}M21EdAHRI+8fZlI6Diw1Lks@fHz z%3dHllR+H*EQ;_Wjr3XMv*j}iiRINUdF}mRHRJ*N(*Qz2(Y<4OM^@!$PWH$?D;X~Z z3pb&m@XHkv;fit7i5QPT%}#z?ykQ2@2M6+!MnYPlqE=lAn&&V(_(d9oWp_uS#UU(g znGnGZMBb~daTWh~N1+YJ{X)EGm8(Vd+qH*{$-z&WSVAF>>yA1t(}wdU$|>-E>f-#r zDOg7N1HiPQ*R*m<)eKClW@S-g;?sc_+j3S!3e4xxuMvp(qGMr^zaq{v*id@zQXt*L z&;@&3Wd=5O&}p{T64K4$!26A26-?j`f>g8|2`m{+-M6`Y1u!2ZRi&iHbg47PUKP<> zoUuqMf9S2o$HAz+Y`b<7wI2I-?hgzO*21U5b&q44X1!HiB-=4(HNj~+i`10XZvTD zqQbvH=++)t)pe3N`H_+4)o@Em|13-yw$Zw5Om-9Qm9SLAbu_{qq|rpIy^fZunptw{ zDXI?D8bW(_+de8uJZ4MhAEV*D^vgZ9wU^qGSk;*pT*vuBjj7ufjncOE_Bd>D0npTn zICJ~>neo1ny77kw;UC;@>D*cAhC-^gWj{C31ZuF~8`W)a<(SJ?SXtU8A0wf^VKq8YLLcTcQIvdaFXn_lk*g4h}$RsI{4_&W2q!%b3GZ5>+-fyvVexS32FE!fxgPy3%C$}nFv3P?z zkNIZL?J=P}VncC;ua+2^hTw2NK4^#Deq#p~(RY8BJ=wZ_`Pzgf)`+3g?FcBJ6)aYc z=0iB>x!%j+Y#mR}zp$jEo#ta#aKLZJh}&6x=_eL?heja|WTX6i%2Kb1+4#;>X7To- z)9>R^_)~?YpK>LDY5xa{yvIQ>JSPQ9KZ#$ZH(krQ;3jHl48FxM`+ zalBtMMN)o5isK+K%**ZZUDE-@cE z?>9VjfrZ0-xnnzSHwV>fZ~_7XU6RSnJ(6qcQva5N{Nhr(=sjXZ(_5t)2nKH{?wq^# z3T7lsp_u7kY@9!m&;`MkaWCzy2U$tkQ=VB@uYle_v31&}zbyEf+P5Yt$6+ZA1T7lt zys~MF3t4HAWAOtP?6Uhmch;cU-AU=E>L7Vogf!QH9~ao(J|&);@X|GH|I$|FYIq&K zkM?J;tuv_g)_vN`PX;TL2PYsG^DJOiNBmuRNjylGmX0@}QmvaMmPUmcCy4v?HlIU0 zCUe8q`l*vB9jPWolYY!~r?AaujR$$&22PY2PX_KpQ#zaI0M`f$?|HnXpIu%r%EN=9 zw*9__CJJWE^U*1<9(T{okj&mxk48y0Ed{Qj;>F~3L!=fuwtNIp;*qL-lz6{=XKZhk zZiLIn8G-}6Zc!JP6jQT$-JSo*7pk3snztN*Y(V-`)M2uBdy6aPLu?p!&?lz$Vt)>1 z_=kHQrrae$m542%G<%Re*Vdd#X~Y=mt0OmXoV!v&#y{l!-j=+LmZ9!^XRa(>Q@~(BZd?!uWtvE z>1iJ!$V+EZ_z=b3=+e?00kLoE7BPJOuSn+$?oUA^OLFK~F}0CgY$8 zkHcz{9nNP4bvB8jNMxV84fu+=q7mlyoMc&g^D1?nkfBb!pGS84=91B>oS_;CNCY3V zQy4#Mh?6Jf^%)@p0biY-T(!3GkM7LvaJIJ*->8_?O1q;c?Qn8zT?0BDO;PRRYqr(E zIF*VB%upf2$Up+$&cKA-V8 z*64A`K^wm<9-ng|S(;_cb8-nzVYsDoV+)JOf&;jhS|_Fq5>4qec2@{TPBzzICBJ`m z#JVU&w}26j5bBk#wLZ_Qn_;cz@cJ@wvj zJ6yLPrL{!!e|mDSKD1T)L^6O%Q9Mt2M(ObF!(Sgd@ONy@eFi(PEEXj}Wz*2ROu-q+ z@uj}*(C22$gLy0klq#jsrzCr?5TEK?LixOcKkhRU-12(vm>)z+=Xs_gsmdIG!sMhq zcCUmD=b=8U%(=5tu(R<}TNVkycvdO#`Y3$S0O{rRn=?9drl2Db%rY!W+kRSbNfm^| z{Dry)E_M&Zgx@#feF<^yDMqQJ=$6C*ML3k0^hZHegSM_y6sUN~p7Dj~{8sVn`{n~)* zlz({mn1!ErZAK5P2>x5~SAJZI`CCFe6F3^${`xut^7a_WRL#g0Y?iE1n*|i9S;V_- z;_WcFJ9SdaRHfP0S5ri>lCF@%g60o=Ru6s8gKYejQ;3eEHa?nv21`ZY!)eb##px|C zc5v+ID``Yv{l&vp(LFGNLlxwQhGihUGIkfxl%{MW#P9aD4rxr?tCI6vH8670hu%C*KPAt_E?g+ zd$)j_b?2F_VO2{VGP|&?)6d*}AqlPP;d(fFIlj#t;!e!$Bp#D38}67&x05o01biCc z8=|u4%qIfZB1NcjagZWn^p#ZbsqT2Ey4nhwC0z{1`kzY#k_0tdvDZsOklOwo#%YH{ z@49UH9JJo+O=~>~`4V zrJbbYC@`t1ODU!tn7$M=C(Fjf#Pp?4uLP!{ohA!fF09|o%QRS3fZlA@SeZcBUAKFu`OOz7qKg+B7bcMS}jY_{EmV zOZ*anzgfxR}4 zRQ4}lmkG1aB&?_%7tNHLe>L?}m$07qS+=9+LDbP%2AV`iLtv|4DFK0|^ENj0iADi{ z;qAB3?!VUDF2Sxx2agAH=UxZxO_c^X)YOqCXUUG(xi<$ei58Nvb-m_P2;EaNLY0&J z^#g-}r+6rBGm8lQAO*8&X?B&_0aXyZUe2sZPKtfuryn#iq}NEJ=2+ya^f?WViY8(r zc*9v&Yn|6$d(b&`IIQR5$-66%jKJIQ!@^EE_;Y_Q1l_*ld9$5$TM07wMUBBKVW6qX z8BqYLF1sTN*L>pvz5f}{*yJ|(#X~cWFBe{*-}zRKrU;L2S1(f(fW=faweU29GwN9URU|7*W%I${t^BL}k z>!Z%P6|2T|6=miA6`Q8*U_|=17}~Ia{QNHsYFZJ=4FnvHp=2?INjcg7`b-&f2xe(I zFL%Ax6Ac2GG`v47gj7z+u?U*$Mm5d$9Ngz>=4mbY4q)rJH`vmVr{8|>^C);I)A?zZ zna4wZm3cE$g)ABp#;o<@VE$@ZOZHv7jmBS&z$Cx1IGxAD+ zDyR=F#NKlwj?!P>C4IfAwJn;uXk2_h|D~OV%v5ct+MO6M?!f>npJ-FF^atxCNO(%7 z))`AE@S!^x)J&XGU!}XeqlAUXZ|pC5YV5!Bqi=qetr0kBDdKm7RCLI6fZ|1u+3kD9_@iZ+pkNZ!+FBVB?sNy-YXn5Ks zbTgT0v4nG~>?G*yk*Yd?9EjGiT^{C9WDv(uO5BI5%NcpeNeMqza^D_LEs-O1*8TA; z{N4qT;IB{6Nf-g?>FQ_*pXUFkQM0h4xvH1K#JM;QHDGMQam=G4rw2P*wtC8*rz8Ov0Nv&D*uK)urE#wD>vn+BaK?T;6v z(JF7#q=G|Wy`n8Xd`#gzTTfBWKRl%Ao=_}SD|J9R<*H3lQ4!E|^zFibK3s11$_*L{ zC}EX70|T`HcRwUgdl|I<%gf7O!}Hn>)!SA83eVevK?4gl^=S~cj&8UX*hXg=^gwxe zxluSO1|}u}mupNtkZP<-{{LfI6!d;iD$R)1OGPes#v8^eODB+IPl85m3->}`3bSjp z8Nq7oDY)T715~&$j42T%vZ6Yyyj+SU4?`5 zi!LOj!7z72l->BrlEK22rtSp@UDOAh)-6r^{3=gJoT44;VMLZRj~N#u>c6Sj)K*0C z?v5jW-;s5g^N+NNPwyZsW@cQc!OPHAv2e0~S}&?Xz$c#nSi3o$d~9vyz}keG*Ri1f z!>?n@gySe$jcYi}FYU?)V@p8Voj;raJs3ZvtZ5a1m^N?+vxxl;-+r!TXOF|=ur0dk z)AhK}4uK}x(S7ODPvpAXt1yf9g-32VJ~#|xU1OG1erA_eA_GyeP%si;eH9hI&0#f8n)X&XQaN~9 zHaGTfRS{=9Y+6~`NHb;c8)T(n7eY0OSqNTop1ba~Na6Hb8$E}ZM`%+yIC+{hTVK6E zd9!IkWmffP7i*R;Z_rui9jcMG!Zc*7-D;n1+qGI(g*8pmw!;TKf3q;ndOEhh)tB-u zTd;mOnM5?TGsi~-p9Fno%+~vw^3R&6|H*IVledp(JM=>xOk`++Ktvxu+5>@F0VLzL z?2^dP(4+HnKu%l+j%zCrlYPD4)GS91;X5M4#%?qRd~Nt}N*yM$(W6pGxTerH`>YKlJ*p~g@|*WaUtL}8-IWp{yInL-uNe#!>?SS;nSo}$ zZRi6tJCGl#-kZ6_mc zmW$O3x#ll9CRcobG6E!2W)XSXtFVv)-#w+zn~VA{=u83j97r=sm>`;Y(w9eL1zcnPKH07=kPmU`b)r) zQf>VL>Z{8vW<3u4#~Y$x7^;;A4Nu6mv(ubYDxJaTU#p$!1CmuRaSu!FeTdkEUb-`r z&~FhSaadn8Swthiw4lSX+3&mw>0{|RJ28rs6mbB?6$B%Om<*v zid*#x`7*WlLZt?O{zn*ugaHB$0~^~1umf!M$+D?>% z&6NgG`R1bBy}3yw0;{R33SYKvTuZCjP*y(vx!v_BCxnk>>|=g+B#>+G^*uTo0o2AxOxHtn z+g{2#9}jGDJJdz-+%UUp1jWk-u*(Q`)Xn*7=#ye_U(v&j79=pD`^y!Cu`$xGx1Xz~ z`*<2KG$%~o>acO@#mqvcJni4af!=F`G+;M~J}28ts>0I}zscJ};KvuL`T?7^-MfD; z&VTup0q4r6YpfFp+y)JoAgi)@yqH$vCSv0c=SI{LT&~&~_gqVSCjtQdPAN&uR}}1_ z#Fl0UHIafgSf8F6?p(U8)zq)hyt&9L6vrl#_RT4oeaUKJ_p9OAs#Cv3-uR<4DJ-m{XF~W3LskTSN!amO@2W2zb_n;v9W3S5Ju-mXEZ%8I! z45fjJ;8Z6tPAyGMu+<;4bWl6}K4Q`$gAxJqnKOz*@9_I)KMVF)?CsO7lVsTP-*ao1 z-Lh}F%kWzn69ka9`ps8sOpiO&Xx>Lxl;_ZIhQrlEzr@o?(P3eWop7_P`{>uEDKQ=7 zT*?YV;gHh7n&Nw_7xjL7NW)WKPBa?Xe$x-9N#-FJXhtDCB}0xsa!S|>wfHNpc3qKx zA#l&;5YZ;UdMe>g1wBQk9G)IN7EGOL+P$)y*@mcZytiN#XJz5IcV};Y+_`f@_TiV* z_S>!%migrm<|yOmnJtBUa_@1f8;UA$yjj|ivEiKpgsfuuJt_#GZ~dQE&_ zK2y((4h}BJ<$|h_#GWEgW}EEIoWqwNcB!?~crL5M@$+fP!OxNK}OAFH#wjukM}rm*m#zJ(RiKT(!r}rm1e9WzdECjyEF=qnF+KI70*Ds4>nUjDV%mEgN+=Pt=axqTCwqohJcG`r){VP zQ9Mny^;q#*%=4?ZMxR=+;xu7t;2T2>WgIa~`UaE!vs%dqC4&Hg6~3S0vawz04v0Vw zvZDgO5M`rKcmdGrPhVhO-nwAvgb^pL7t>;&Lq#dl!X zBQFs1`GHQ+B9ZM(!uQvv!}bl{cjPMC2H()UK-RmW#hrho-UOvF7%M_5%k}+JiN}ks zRX@;-{Ic`wg-4x_52Mg!T8<>w^vx$5lLx^3WnU zg8by(fH}mI3Prj-4XFoorOe5{{0s0p%oWEiK#5h6Pe(;1k`rQW`!9~lmBtDKMXVfB z6y++LVQgy>vw?|3-<;@{>T>nvk8)>A-d*RvUSfF|8QHmU+xvPow#$?pYqvLJIYq#9 zs{i667&#~i%1-ZnW^Y}=uOI-U1F^XGpUh-AGkJR9X7PBzsqZ2cUy=D0Jyo%pW%MSiCaiaJ;o0VT55apR=vk)OCnPYyJ7( zQdKW9E&;OyfvCn7;Z|LBEcT@MoVkqCpogTF{Bx(xi9_fag+)0>KFCgcshr4Fm1z~! zanZR@cn#24HO#8Fv}qjUA>K?x&&;mFaoz?8*Tdrp85TMWL~r726;YG(<4szu+gju` zn3V)_W#ud7!qDxn%DR~YL^|i{3KsM5dGpg|&T-WyJToAbS8ewWWlp0bW;u5 z5|-&{Z+RS=oRM;xQ3Kp#rFCX;0yb`yububm)c2B%u(LDG2kg=V7>(q^TfJqr7*+>J zK`HSpLQpbyad)VC1vUTf!T!tP9U_F_uCD!?o5#a%fYsXJ_fmDH)v-yI7mUFA?X6?V zIpB_A-Sl+e=LeR-@s!9-A*b)tBSLlyl!1a-)DknsTy(Z<+<_7^@solXZXK|Q!T>MVJu-aCZ`MZ2HH$-aP1Q1Y z-k49zp<7FJ2v$_zPV{rIU}N=q0sc9tR6^&eEg9ZDh>qIlm-l-}hsj8{znTUkQ?aQ0 zkBNrnb*O=!89FUQ(}PVd?WcK>z8`QMR&HWHTBoz=e zQPL>s4EcQVrWpB@B@9w%kBeJnuB4nHlipECB2cdl1 zQfQHRl36%*YBrIUe2_NShzZ;oqRIrA*(&&~Y2Rb-r83!B6be1ORlXMA8Ge+hQ_i-} ziVyIlG~N>)8;-tSu-vYPcyF`mnQe>$tPW)qHWMEAlZpxWF}6GQoPJ~c9uk9r&Fm}P zl}l$#U;W^a;=61PDPT|5@I3^ieR${N`8kb4V49ozx-2mGf?21^vps?TTW|_klii-u7>PiccX1 zHxgW|kHfn^JA*{gzseNwe$%QZFm06v0b{jkk@@LQZH~_$_M*3Z{y)y%GOF!%Yxixj z;!>=*wYWPJr#L~2dvSNS;w|3d?(XjH?oM!bPq1+EyzgH7><{~l^RD$VV~`9+^1ttS z&EGX;jF9|j;~B6vmgE$uZ_%_!dj4`#RWX&d+2J3>>krjKgzdY+vff$~T4=~7 zJc0T-xxmGir;%tDQrIND>1%BwIe?Vk#mjY;ZI#}!%JiBr?ruyKp}OEq!>zxqy)G5r z-9r_qhG>mn=t&$j$)Xu?e1z~?mITr~en9rh0cu0%^lhIvYOwIwc?{V(Kf)5QGhGBX z6jGf+?V6gJ>25oVfLEBZvjaQlV+G4-X8)RIWn5x{r#AmbfPlgJkCtE;OU8|aO#fByVYB06)xwQ_9X zGch+mb>WX2!NJ38bUB`dHr-~(#To=NUj?L`tuZ0fK_4%&Z6|9oF1;m?{?0oZWk)QY{3-xp?ln-3e$kCRvi$6fO{#IQo z6JsGdkRrvfDk_PRE9JOLzMBQ&OkAY=iarXd&VxJ+ShBk{xH3gIc~@Khq_A>njFKpJyp&-~0TPIA)tQ4klm^S`Ayi zxkkGvhk{$07I`%+TzqBC@wf~fA;7QVdz&#ScO zbHLvR(L~uxQ2ax4v}8p%>b3PO)vUWZhrId=LgKLDy0-cuc&9oVvpaxEUx04sIniYE zZS}oUZ{14t7~(RCkO0o9xGSx8dFph31A#- zJG_i;!WT^Knm#cye|XD;Zz65C!(N|^`v{Ey(8(R-U1w05PDXU|m;4i9e{i-ps=(<* zPFiTejp5Ru`-DWAXbyd3F0vw=TISol94FmwnB6*-(#qAuoFVAMN}SQO4dC6i2oRW3 z3miIkD}xViOGjK+p0MKfc~UweAuD_F)_;YiRO=h<8ZI_JKSzt*?6~#s!51z>j&Pn= zCO($sE?x_Js4yzjGw2l_;BflvX8$%`^Hb8TZM&@N2@L=9k!ksXu+w9FXydk$4Pwkj z;O~JH=`5buxpR66UZGWh^*6iD<>bbHImr|CLvc9~*|anjG$lLP^f#Jw-8*Wzex^qYOW-z=~coIQmV21n`8>V2vI z*}J0RDblLWbN>S>#S>SJ5Bh|IP>BRmT<2Ic;3-*%{yr2W=1~27F*QFz$Q%7P<0>@R z0lKHA6gRY%f-2$ z3ZRz%vHjmr^8@l0xFD^R8j%Ne^|ci4NW72=5~o|NLS&(i_kK>DJYt04AyRrrPSPyp0Y2KDFh#O4k-u~JXiWHKyBVvsT<7>d;k_=*DG$g zedj`X%Ix%R(=M4+pX!zKG4#pIgWq=HX`xw?@I0^%StAW~65FF=cy#2@N}_L6hNm{L zj)gI}EgI&k=TG+}t}dl^HxlIoIsrhFvzP6HiwZJlc-xAFLLl^Ew|yx8S$DD6@BJ7; zzV1MCoZI4ppFAk@|MXgCxs;Fn zl7?Q+mUJ!|c8!L3pLP57ZLac04b*NU2L?|yuWta2bf0DYgLbe#F zP+49y0JVYSuJICB^rRyO?mxeg2Rf9{bktxk5a};y`+pPx8o3)cueh-|I?8P?ygTuH(v-0^6z0Z8 zYqN9E_%el`N@V@)?f9oraaU1)u~TqXU(Og!>(?r4zN=lv7iRD}>4Jw(fW^KO3n{}d zbfC0SPej@Jy$7A$+#;zdpmN}Xdd46YU6bO8wRIP9A)72<6^dF1^S;i7_p&ik=Ja~Y z>6w5FE>-^%C%-&s2lyDIY#69lsu5gx{VFt7sr0RRP9qEI)V^{v>BJLq-aXGO)PI;m zuYF^flJ|RvzE<4iN7pUv==Eg1#d^wx%1awNWV@e(1tW~P+`Eiq(fkCkZi$lbv95l; zOV;QcL;umckE07M{onBVDr1qot`@52DY%UJU*M7zlhx;*r>%42TqLwA3&F8)t^WNN zO`WoPhC~@?F#zR3FW`v>dJ#WW0`2gdV&*>y#2LlZcX<~XEnpYFr(1Jqn^=6D;D8Rn zdH1!+8>CUW=miB)46K#@W44=6;Fu=o?&k9ptH^UBMp@t1QP=P)+@zDnFx^l|$hE(w z9H;Mk<6!{owmbZu9qiGm#M91C&taS_&>cZ#c7l+~{tFzu$i zt8IOY_`oaB$1|zhuBR=MCMdo-tSw@R3Im)b+Bj)H5VmzDW8F)LY191IEC3d^D6KPS zwg$8|07g@Z<{Z9-Zddiah1gk~Xy#XVb*4d(t&55tH{)GrBIF)?|*u zQ-p(7F$iKkz)bv`^^5;!okP2#;W>IsQ4L0t>nA-1zYbm^)~$r4Y-D2+CU+H9FGm;) zI#1l<>4Jv7P{KZ0lna(b>=oJDeI38m1@f$SnKkT4#X-!fYqilq_s6#4kY};FA+O*rv;`YP&Uam#qX>B$KLyIi*I{~uzOTAN0m4F> zb-}rv4wMJ#bNars3p1X>fY9on=6fYJx#w$F+D*R!+R;}kmYtUsq;f>~AewiS;%?j`WdjmNy@4bbMi$X)gZZ?4SsOLT zom_Td*>IFw;qThdwnoy1dRwpnKb@&Qt>%>pSj4~82E&ig`3ZiCZiS|gTM{rpZ}l{c z*W%*rma(e=<8bzbFDY_vCk9=E+8SFzKr1%CH?ik7_n&Ab&n7FVX*C_r6SoyCdb3Yw zE$3u{h9!LTX&1()HX)vFmv89hQaw0J(R_iU8lR?DC%IVayB^yvNEk#%);nG~Ub`yK zb)QSikP8LN${XwsSnkeO?>g~uXUM;t^L?}#R%AN zV-NNI9{#5&_>BUNp^$T()+2o+;*}yXmK8}t50!2XcM_ls zZs*oqQJCI|5!J}YB)6$nKJaW8LIy~w+urVOOt4QQ;sq5S86o3Yip=2Kz2*8_TYJfgiC&kOLTJvDJFqIk*p2@0i3GUmZi+P;F|xskykx zr~MJLc0+djJ1FcGjZQ;-0ogR5r4p$kxTy>rv|jFI=8m2hAa|0zAg`eC>aH!m`#1L} z-kyX`g>=}>5L^uTCp#TrG*tzE#R;?-h4oVH2!^Rryhb1(5}e(UOw>3%#t^vnPFPL1 zd3B+TGM;vOpC@4P<9kmSOUHn!4W4cLOHw|io%}%6$ETO=rj=dxj$3BfWO2>62HXq` z&|etNFhXbodByxb4La-1ci0XpH};Ej=X|%B(VjI=B2de+`WLrs4S|;Y&5N)VdG)$W z2NcMVC$vvWR{(6^Ym?jOLv^WO7lPs=T&gAvwktk^gO~RR-T- z1PZaCj!^84wygUDIQ79_q@Ey_Z;2JcPPWwA4_SZz)?_{RL62y2zt#2iCedERqWi2s zx2u6*A}pU5E$i`r*OC`sB`voFT7YPF8izWt%3(VLivN(9I<@tt_Z3Sz{gqp#H1V0=DQ<0jQ7onU==vDZ zeYOP7lVjs{na@{*${r3f$ZrPr?14$Lw-OQ#GD(w;@_PwJQlIJju`b`UzXtA|^vN$< z5qY@tGsk||Qyelj1r-U;Jzd%nDKt8)1I_$&7!`_9LmAze5T>nWdm_MpD1ZK;WC~g^ z&{5wlz=u3s@ycU(*VoMbv8%pyp9 zLWj(~M9KBE*R+7a(rg2L>RfICO@A4h7M|3<*%C2etpp}dZz_Jl?GG%d_y}N0-V87L z+|SlwCG$%eP_<(9;ZPIvHv6hCD1-F)+m|8Rj7H`W7Q3a z$)9_LQKT`sTP5ONv|!n}0lvU)&Go>+>Z+>bEY!E@hWR607-+2#d#b*@7Eg@AY;LN4 z#h|+tvs1R0>0B`)mLc5M?AuGmBN2$lx1&JaBS}`@Ygo;k`moUJFrNnXbJc2s`?aZR z!9PD#6(%e0i~qwUtUxKf@9vzp7X}@s>=7UpM^qV8>8!(0&sD>nsjadNvH)CIfWBDL zcUx$9w}?xmq9zi4w}BKK{Bf3}1G2eI&BJq;o+2+T4dZvaM{($L_;r&H9q9XPd8!Ta4g8EN;>UMHZmWU-E`5__1#lXo>&i{g73S{J2krU66-uuRxy8va5_rz z=8ejn=W}gF6+qQ?M)pE|QMk0w`m}b(`Bfst@C}Sno82UI0pf>%Xc8tZh*`BJ?~-g9 z{K4cnT~6QZg4f+{8>>(U%B-g#9rT?O7m+`oM7=|tuNiK{MnkQ)BSJTJDfHD4at={f z;G05bUCH2&8%C%61n$aHf#rqxwKAKNWPuGsoY%*pkT@5ukWt)%M*B#`c_DRh6QQNt_rSLkIF{m? z!u-80)DJtuKnVB7fegdf?l4Jl5D5M&@I|Rn(XOdD0Ekc~n?k45&Vl99pw;m{E~ZJX zU*LlJlTz7LO~o;+{#<(@{_1l#TR@~%5&&8O0pwE1=zrp}I1gZ!dpWC~cD?XfEB_uE%0%=G8x2(| z#WxQPZr3MWPer+G{={Or-W`7(q=NQ@ja6W*ZkPJlt*rj-z4(C_z#a)yV4F~?5e?v% ztWZZIJIGTzOS*5uNBP_^Q=l$uQDKuc!@$;B=&lsQJ>8vkMeTUv@9nB6@Y{@;QQ3?4 zurSTvTWt!Pc}(6$eJ*X$&~`#T-|iXvXpIR{R{+xzT_1Bai&Cz|vCQfHI}pA-ssm~A74JQXX4MoKW%4p6GW<(` z@w4G!Nn?QpBVYsa_)uQ&|Kv|0WeN>VO_DS?p0Xv79kfY%h+PK6KR0FO$A)c0-w%x$ z*j{Ph9>gfFmm)F~t4&DAC=GaxNpWE{W{|~?-eJI*UagSW#YjX8|0&UY!hD>zJ})5rhvX;5!|vIY>jCU)>O`uG0{hcvg>PK@ z1b23C=&cq7jy363r$V!Cj|Qhcb_F?=0;M*^QQT_@{$W_mevF5k_Mg~+LeEVOAJq?} zZI^nJ*5x+vEVPJJ#G^}Bn=Uc+6`u6XCHOU?`1I$D<2#3nL)mBih>)jdL4;vN3HDP? zZ;<<*RI2UQVlFGybZS1SrGl!?H3#3zYbz!O_Anh5NxcaDdpn)){(`3f==}gNu3Rc` z7w*hobX(w1VtmFZkEn;GExR`PZtc3dx4znBw`&_!!%z^wlAe9pS#2Tr5eMu;Bfb=~!uvA^L%I-rv|F#1zwr?W4R zF!Zt2Ki;IMm^>yH8(7&VyXDU^F~YoUIQ>zyPqmOhM3+d_SJzstHjCTb=PD1x=_^4F zpzII7!nR}o5Wa4uI7OU>DU`<*qQJ#5(yxP;v;h7BxZFl=n$Fia(qxE-&{q{#YT^A7 zNLW=cJ5<3Rr+^7n#B^zomD#n~DbmP9gE50P5%mPxlu6I&R5fq7OJ99IPq9uo?W;~& zy6e5wQivrwr!FL1;CbwLgu!O=q^a&pSR+|zc68eC?3)$Vb2>6inL9;^K1&mD{Vv%a z@`?vRvd7Ti%HMTm?nxR`#+F<;cjH$8WBLhUf57WUF%T5VW?e#9@kz@*`cuF%&UV3Q zdGk~@rlR>F)6wdjH{4C4)ueXzF^XBk~MPdr*rS~JG2A~ zmfYt`EfAuWtDl+y$z(*W_)qahFy_3r$*eb<|L~08mA@K%2Czgb>TPx%>vOerHfvXm zky#P516i+p+OynzL8acuUvCn@p{wbJ9eZrzmq@X6y{L;_mo!2jun^Ta@9v0pA1ZuA#eOrsx{q?-9#x>KIpE;>m2=y z$El>X1HMx>sy!Y*t(RFDR9?DD)!%FLe_H0>f4}|hnFa&pd;A*S0_h5_|1D8!aOBk; zS5_|*-bHO7vR9nz6V)%BA|cJFx&xz2ih}eXDG*j}Iw=&PJSu z%8@L&hMq`ccfZ?6^NAuKlHHUg?d(3`i&vDW4%b!jXg>e#xve55L8GAP(%8CG#p!*F z-}z=qe{*y~N!WyyKh0#A09fz_&=A38SooyTIFt_il`vuM^ld6cnSF$}GAzA}vdca7D5xw-!*@Aw}a#sJlz?pD18(IfOUZyYjLgdhq)kea3QF%zl(ZP{zP z1HKybr;G+yf7=0W^*Qe?3v8s^A$;&<)4;8I(gG=H{(On+qR~?C8Na{DgKQn{C^#eY z9ey<3yONV(AsJ}2mI_8Ay4N;MfW}G)_<*}08z-~|)LDPL@QlC!UvLvYs(s1)hpH@{ zoM-e~uU@43ns4XNt@R=<-lI|6>7P91wG0h%(F6I%xS1>S!ICRPtL=pvscO5>8v(n# zJoe*XWBoalEPczY%SndNCXOzD+Oj-3nSDB>#X*)oV(~i3<`EYR&tnm7dhC_gM2Aip z+?Fn5wSG;V>2wWm(yuK)*E0yjn!FoT=B(G7lK@!ZvugnP1&$$PBo~51QpWR-lp_k1 z<`c4PKwp{nW0mJmu@UwgG@X4g3ZtC&e`A?*SYYXfIwjagBtn0BkvC|)O z;Nd8xg?AG(D!^CCwwBG&0{_%(<8rgDmW>qo5mYr0bK17_ogZ7kk0`7;k-H=8y!G%) z0}W^RQ<^?Xgmjl@D2RIC`%sc%FPvH$-s#uY><-FsB6?)6MIRX4;@!F-xKS0mG8GZk z_QAGODe)>ctU(>jP&AvmqN&&2=SDeF6!;+YPb3lXl_$-dw_u$b`ur|T{ildU)~{EU zX~ukq%ndWW{eo$oxYD#SH1G@fB@s%!=w!pLf;_4K=Fp}#CF}Q z<6;Rt#4?90m+yZZNP9sB)76{q9)lY6p5SGh*b~;R-^_)c(Hm;6nd%MlkH5s}hw2oa zG(L1%WSkgUaQvqn{htFWvQ;xVv201Lr~&3%}1Ta7BP!FPyVS`NS7gotfGXufYkum7drWWy z40X9z!kiUJ=g=mw1m{jRZ>Y3`#lKTAg|11%Hs-jW;?2SoJ58zuCVb4c#c1Aohi`yU z(s>r{Z*P$4gteEW1+RWf=el&1JjWtp4;88S+4ily23DA(uzFa zRAv(7XG90qS*`Tarqkk#ZkLTccJ^)@KitjC60)H@QZs=o!;r$iJ3$#5+Jr>UcH-6n z`j{iquIW(~a1#hqg_?XHf+*~7vwq|wbqX>Nnn{95u9hIriASvleud9N?vGnY-vrVQ zxxS(tGFw!+u=r2;JH)ZB@ZY=FuFr7p$O?UsHJnkw|aKH;RX!it)1m9pM%u131x{`c%s`DU+-QquVmNTyw!Lg->Dgg?t`%WU;ILP8m!RM z(7G=A$7zq(1#-hh{XtPu+@|i#u~~zMjT4<13w(D{i>x@9SZtPt36*(ihWV8L`QtzR z8?k(a!hsOyf<;nQeS4i&G}#%l+Xw6oMv*cXHAu~(n}&LqnwdA(pn`TwE7oE!NL3=%DUP20Dw*WDt*yjAT-*QfUIcRt_MGQ+d%6+Jt*3 z>zJ)hEqz|QE7T!uQ`s}nEo_(iw`k5PyJt5HEd4_@gTutT+Gc+)`xoT3K~OPMysAD| zX-;SNN_<+!MR#S)2DNQe{fwK#Y-s6o8u*9dNs|nycP2&5my7ssQFJ6N$TqIvt0LB? zNU2qMkj>%@vkLXVT9`=IOS3o2Wu=_A-()Mm zN(GB@+7mHf!efczJeIQ~<51ppUV*&br7BtLwMvfujlJ}w%J`h@wa4E?L&~=Bbty#L z1}dW@PQAE&_Q5Yx^!~ePJ`HV)kZbYGs7+Sa_O`@@4)3H<>`pp)2C~%t^5!&yrGB24 zGWdB%BUr_Y9yyb~*=bXn&khSGrdMvTgyU2_=<>O;B5O% zE1}_UVy=PDA3~hT-N(st8>u-SkDembjfs5PC9niUecBSaguiS#Px=lv9N`)jUPsEI zzD_2D-T6)~_M>cg?-KXSsVRu}HCg|*!6=IyT3$Rrt=1dNgdg(+X%@b4pFPtHyGY)) zfHYaWbt`@sA9KXC^ss_qJVUE<8VjsG)&`3^Vd~F?3SsYE(9KU%oAr2USHyrNvBV4j zRvM6BKHiB`wCy-XSf~t!yRj&bS}OgVFUW{qGmGwmR4UQLOSqA-Ycvf;M_;ji82cev ztnQIaIYk8PrJ)Nk{8imS)%!T*5g-~k(^M6xJ`)2QOb&UDsBdM zN}sDC$JoFRG#44PF`=+~g&i87uAKGlN)42x#I{Z?*Wjr?R1r%x2G}4ljI}#*|FW2^ zyeD9H>1jXJBL26J*BthM0X!bzHbP*IIPOHUE)woQq^y+1oCelJkw%pOW_{Z)&W^u| zm*KWBdOZ2<*fDKg^k_CIsiSs3bmMT*_$TVz@6F3qz4w>(TivHy%n>`q%NZA*+{|jR z*!4OmA$gOpVHrNer5?jD`@E>(`)+rVPCjvY(V6R`%18D7&Ytd_!z6QUDN%t*R6@6$ zzwTIm#kk;vc;+8R^%wT-s7jf3Le_egvXZ{~${)aLJa|Ad-@lwA@Y^`ovtsc|Am&pO zdX8CJUV7O7>}o6;cAVx`L=F&H$XVsFbD8wz)^rrv2bH!nUnY7s*h8=UMsR+)yHhv< zJw~g>xRcLqavjjizX5R0h%mrLXA||mMxg#^tP%GYT0FgU1&C_f zme|Ib3atvcUJmN)U`Tj6r^W85zC}Y6TCl2n)-HV2=fk20><%*|V3kBU@9Fc&5rk3I z*RJI@R<2!v391kWmJ2zm0PSx?P}0zp{`T`eoCe#UXUb=BLV2>bzVd!&2sQTN$a!LAnJ9$N zQQ3L;$?ta@ma_GIAvDKI5Pv`hmB!?IRF{79_zJ6M#^OgQ!H@LbOKMq00nRq)-))VA z0;2x~>Q+O$71zqZV%O@xWC)!P@vN%Q^62p47`i=ODK=iqWjHB!9lE$D0St{=u}r~h z`HB`(y;Fi!uMRjZVP>zkw;-XXb@A~ocfR>OOtOOF*1}Xoe#l0azmmmg276;%_f+-E z;)f^rgF00yv(dV}P$jj9nH#HsMGcFT2$KjIn(rHUWVQ#TvH2}GX88!J;eWugdaF!F%cdeS=pC1B*F4>=F5?;{KW2RhgFGH=a8Mrt z`MKT-ygzjMY?9{$G1o0Dy4Tza{CRY~#yW&h!2s-=3Kf=^u8yNP>iwE(G(LSL--j?< zmse?A^)}+`X;Kd9Z-1YX2e6`Jj(7nOCbCJ9ik@aN5}&80s>gYSFEi!ZvV__Dir-p( zO(~A4%6wQ*hRa?B$YH)!jHvLAyU#!HKemdx>t)x%y}6b8B9N#}KC(ck;wUe~?dEjd z+dMt=H{ury={{ek!J2xn91HR2nup*;*qBLyU~var$<}plUnHuf@5|HowYb(AwnA2y zDytrczJqQsOopVPLf~#y*lU>Uvc>RR*^+jz*v0>idT+w8_cMncDCV*>mA{0W8oVAo z^+q}JnJ!V~|r_W8&=eWYp! zSo;&Bk#+|N=^!^Cse1s}*Xmt=Zr4nq95hNQLk+~(veUfxIu(8jh_qjd8Snqp_SA`; z3Xx~Npk8RS+2#eq_K$;i2w@B zz-jub5zI@NHeplkYAQAKW0?xJz8C?ecuDTn2NqkJuWxd+F&eo!HPl+n9?}E4-w~YG z8>U9gmDUQX_)jcVbUwQBy(SDOTCH)Ou_qjzII(!s>>>GUq7!e7*uP6N86SKk4tF6B zNH$pFcZjLygc6y5osEAt_5>s&m9MLh9HO5l(0-RDrN_6RdOWU?B2NlQS4;MvSuW>z z{vs#taEyZt__DcWn#K@}ySFti@dYAMe`z(A(kqG6Q_X5XFwSR3y4GZ^>qYsjW|=>s zqxUcZD)?f`yb-FQ1$VCZ;oQz1n_-tsLMkK62oD*C%F-+5H)n?>mXB8rT2AzLT1vGG zgJynbG1YIYGo!85sZE0vteA4up#vMEACSdB#gWqI@I-Kg8X4-oi``y1TzT&M4AOr- zE6hbStLKpdh4-IiS~L{L>BgfXI6)@;IiL5&NPj9;N&l1eo3i7~6TYW6m2Hkn_dpp- z0a%?v;{OnEkP$#)3?AMNtDdHOOg^>Rc8Hl*xeo+0zu;A+B#WCR2;hZ7Gj9hkOw5f| z_^?EpR-K_^$KeWLId@osq4_Y9H4$S=*| z+22*~sfS^K{hY!mns^>RI;QIpZJ1>raqeeGmC`XjZ@XN|G0RH%nWTLgfcw-`ahdrw zV%0D@lP)T>AX7sCz9Ex!rQJjJl;hXRn{Nkiz*UdQZ z?-90cYYA&*vRPU1!kFGBJv8=NXY#p$`%4T`ZW-Iq2wwT8l8Hhg$u5%AIfhz=P$FX@(}`i2p=CmoCktOogCQw2=73T!6~yiju50gmKrK>tGAB;Dk)!3v zQ##bMqZi{LA5L`xt6;7*HX+f+A;ZHy5*i1T{kXzfGStN(h<&9qK z3+TAe$qI+(_9eAkml2aZ^Q9K=kX`e|uu5vpJfphNc)#=%kIO{kUzImLlMfCaB&4W( z4S;SDHrbY_&5?G94D$Z08KWbTM|f9h#nlBvB=fN(dL`fO3}tE{Adh+b*l<0FLL%BJ zaX(19R`iJome5m-h%0-WY+2AqL^Mzz%oV3Vl*)O4<|hlIqZR3uj_RdCe(wrv zSer`F4aegw|6#hxKVbN=yVe+-zd=zKj*HcWHRNrAeh*EX4lYfXd?Ej3FZvUOBI z?da|=_Ae$Z?mkbycGJw``I3+H9}G;tt)Q!uh5CKn!*6eqNX3`kDrTS@YS@xETsIe0 z)Ka&zu6yq9bK%K>MAH}k(a^4XNt>?16}`>(wq1g#Cd!UV#G|zfFPY)dQ>lD)$p0-v zO@7PQT4`P#T4vQ?v=txhEyRVMI(jJeyW4vX>gcFpZCpQtDoZ{0tqf+gjo>L5nz2uB z5Rh&QvW>1n1}WwekcPfhC% zFg7a&I67(AHET>Ju4i`W;17$oIIcH6Zi-Rq?iE*zG+IN9_&|29j7|7|BO0sU8))4I zuWiFdl0gw0mt%?i`zR)O(z@%ebhP6t%QMu&qc(aL?}aqsaG9^KtyUHKNj%BI%>jV&8GUl_n-hu4G@6D7JkfL+6Z*o&cf9al z$qk`+i{uw9{V7a3Tu7lj_pPg$bUCZX|4+>wk>u_|$|W89%0KCHIqj{+UQ3pF>_`)I zvW6pt;h05t!N2T1F7v~+dy)pmN(H7hPwH){>c8r4XV%FZA{x(IlXiCE8_ObLG~KsZ zf0`UHoy_e+hxU*W{Ae|(;B}gu=A`~)jBnQKV^&4HrZ{2wo8oA}>h#MW{OHLSr^juo z1Q=KA1xFrjKP2kf+iVgsDZ1vOdFJ(nWe8ohp(%B4M4hhMA;6YCPeQl2F=}USjj8N6 zu&jQ%Q+#F*d0Xw<(#JeOg=l;mB4@cVAmp8UROxplN`R|=tEoFu8$pjrf|bjg7m5X= z5xWIP9H5yHI*sVgUL{?`m6)!DWJhdj@BG7wKqq+@f8Mc#nB-p>4=(ycw?=XGi}O3X zO&321ROrg_sCl#p=(jXp2dS=|sDbVdB&DSOX=ayw*Yr1~qd9&>px>R!i-t|~L>7)1 znv}3sqama_XCW;nXi>i+SW?9FwON;Rc^D6?mHn2R!_O?Zv~%IqPXT!o57OPn)>2dud$>x*=OVSmEVSswE(8T&=fz>UD&c5KwU1Hu8RK zU+Z%Ac?Buiro@-a409?zp}o+jWe~Y6ElfZv=*gQI(1e%jobYgp4r(}9;Y#v%%Zk9n zrk@=w(wsM(7cILq5DFU2`!sxFe|J+Z-_8xmE1igKBjx0+LWk9M*1Uc8g(%@q4mV2t zZK$GVBlz2 zu5I_@EcM|iw$A)Ch!>mEWU2iyxK8XLy!CCvwBzXQ69x0%*Mc=LU2FP#Oo63#uL`K~ z3-_L@*7d1xoUviUf>vOx&65-xWc#Mib)6*7I#_A{?Cw*znpk>1tC5y^sn3Sp*yaaWs2?_)Gsl{lZLs~gHQ>FiH?dpWs9;S^lcmpZ=iHZNsB1vE;f!soBtQC3KEd{b7VH!*$jT@K+{f#wrO((Z&Y0svP)gJNtK| z%c1X{+r;W+tyIt13UvV5VPCuD@meP-X(lPbvgR6p_LAvnU8No@DaC%Ow`c)a>Wqw5 z9#C)C^~#xX?zAJd^@}o6eyle)rM$AYo|@HKHUp6E#%s2V0CFm9|6Fz=G+*8sKlthG zHeOVzUf7@gjKBMw$1Z03`{&!VTl0-%E>@$*!qGf`(DM*TjI=HN0CKHoL0FL_`2K?& z-*O{bU(Z5lUmE4&9e)Q=7h6;-|Bv45_rqTa=nUh1E~77*@P&v+t~id$-6mf@xO6#% zbk6`cm~I7650;_p0vdDwq3hpD7(;d^jzGeD=d8l!rvzs?jCoEe^IeuTRf7AZsPnB{zDT6{XX zR7dVFmx`Z$m~?myUO%>0TQpj)M{1m3zG?sEALDzIS|Pu5AAdyQ@e{~iSgG{G`{8tLAwJK$BfB))||gR(^@ZJ<6=?6gh9z z=`jeOSMcIQB+ADv54o9z2gmql5rS0?%MU8`zhig8eA5ou`A6gZ^TjA6ciUH8tbA#> zwpDwxa|G#cyatzLY+!e!9Y_j`fd0O9rzp`KCEWOWtIv0OV#jCA6fsD_E`$YfNr{U- zBr=SqKe^FbtYC`Luf(DW+KQ{b5LvCI;klk}+snDVcu83!b>D+} z63?*Lv4PD}7v}H$&Y`2&!r#{=^-9a7P5T&c+ax{oJ#T$SMEx6~Irs!miXU+n33B-D z7Iz7;p_pNJ3k8hK>gCGfpA{pXimu_nR|%L5ct?us!uJYYDr$tkDks6zGjok;#Bc#>Er8BgVn}+kC|3uhj#~YSl5q1mn^)^ zh5&-Sw%4|SP0lY>tUL;n<*wlU#`T|yHg4fal08nr_T6JiNcx^`_@AbjBTJb>vqAXy zZr-*EdEN=%;jE3aKBi|3z@Ra?F`H5konoj}7nPJQ4%rlzr}9i7M`+{;Bt?1I`h^x8 zA)dOz672;5PjLsv=^-N3gFVbq>G6d^9lt&g9D8O;gsn8h64+yRts>e6r@Ur=t;IN1 z(ug(PpNq%R4=5)-k8PC9o1B1Fgg5g@t%gp|;O{(2KRTrgOWj~{{R_d=Qv{@Q-gc1@ z*XAwjJB`C9-CV59K$y!q6!c904C>=8?>Z5pS>*xJ)+G#BRcx&WH`bf%8=i||0{5R6 z8gYilQDKWDYdviH#I0{tHuT06n$4;FoK9*DxDf|He`DhsR2!Og$E8 zlU+{))f`Fb0W9xoAwg+@mD)M9oO_LH!ruK&yJ5d@=TocECx$w^LX-%cM9mVrZ0vzL zO=0PQQ-~^N)$POA3!64ckdzc!*N(4A&7Ix=UL16U*U87X@T_aue7K=?^AK@p0=-Nh zJOQo5%X;T%c22~z1$Qmpix}Qb!F|!#M6Mn8(Ps_sHU*OJ6P2gCi@Q!Km15Izgk|eg z$vzuuYpcy8+VE3Y(^p#i`|%2cxm9r8-nKd?W8CFJ{vLmqr{uo5@L^~f0_;P~Tdc=a zk(Du6O&aRJu_trU>Gh4!J;>0xZfGyYMz`r zedzxy$wwkB^5QQ-(oCV9`k&(LX*)P{HTd-n%r4!dX1T*@{ogN`68sziYHRe5=b|Yo z7G+HsqY7NMbU(UWCL<&sDQmv@t*i7D_2k;VY*ov#zDWaEp(>>)j0@S>5o^3-=Zu*H zKao{pi?uzk8QrW!o){>#x!P%F{tAN3EH@V&Of}sRY_#b=Y8~Q8tL#>I732^bU}Nda z{jO3M_5dh;+Md+c4Wx;_a-1pv(x3x&xjF(25yg`1v=#}W+fx%nqSwB5L8kwH6Y5ZN zd!UW}2jl&Qi80t0dUlOC1l!& zTi^(7_9-II(#p?rf#si?wHW1l9**6{>ict#2e|q!KZN^2jY8?Uv3EJlSpTI%(*>Zb z;}P6x{@5!~>JA&UF4Tde_5_7X#YI1FWqi-;SIwIP@y_{d2(}`!e?HvN0B&|^88q5d z`EM$2RG`>u2*L)$IaEv%1j{0)?k%28G+EPq?+Mj6roz3pNj-fzq5EaC5BJL$DBO9t z);8(}yj=~J=VRs(6YVm+Epp=d0qZGY4TVZZAIT{yeGey(hC(=qXxC1GSGXKYP4)?% zz2~Fk>(-}+eMDN*3}3u?e2$3(0P4b{xU9hIl{_bn<)1PoZ^O+0SRnItrheHb9Ap|p zMMJE|FRlKnHIhmfrbgO{T(rd;rbXxFzj8dm`{x8dAQ)puI zfZQ9jP!P4AOZ}lz{AJeZGtQNF za}lg0TE`$AiR1N?8Q+`VT<{uf!J3#p&G%F$Tzl&|6c>1^t3WyylA0WKq9=t&URl_T z(4Up3({V+|1FC(TXDTl)fBQmqc~I+L2Z+*uG0g5tb$s#DHE?k5kke(fnUjC%hJRg_ z|NnyOzn%ol05>WnZ7gLv_pRb?xjG=XBud7+_lVzk&PujvqED7+-GecpSCc6tnT;i! z*g9Gpd|8&@XSC8K>4h8i@js#Qj?(^6x*O!5AX@SYd~y;3_YFEhf3&FHT~NW)Lyw{Q z2{Kpi)$xQ(Oe5Ly`HfHq?JwANQ!9wDKUcOtkGdXe+#V72zP^JQ^|(_Acj*;aA_Q0v zMUP?`242dZbD1b^f)CxUAWkAmNZ)r#k^YpP`DrEqK+lz(y@94#8HwKniYWhL&k9cKV4Mf>dOt$C$D1#4EI) zM;D^)Gm+qzpOZZn{a0v@Y?F_JNKSK;2)&91?KOdoetDH4EFr->y2>TwJgz=B)hg2r zEcm>&W7jj{Ipp5nB%i_DJ_QwDDa|DPIT-W9O z=X}=7koty;jy7@EA+WsAPC7`S?|s1;8ARbIZ%iJaKPmJ@2-*>lB41+Nab+};{pq4= z(Guh2xw_YEn-MO`T-S!wImodew-=6YM(DqS8&>BlU*fqVMgC?u9Dd;SF`B5(X(~e(_xn?*kp|S^Wk}5C zIYjNT%@7mSyqC;EkFh>F0a^>)m%qW(`V>b)Ron+1sKEo~XLVk5C$l_qc7rNG6}q{U zr5mnC#C5H0r12rT*%id2w#=EA#QN5_S7IAK4BL1kVmB?u1oGFYFHuhXvF{%5+{k_Y z(^wFIPVLYvdnI9HsaPwo)`hE?xX&jSXNq^Uq1!ZxS$) zrYDQ1dL&3B8Gzw~mPT@AFLM&q8-FMqH_#hIWm?hnJG`rmv;hO{;Ww13sX`AtHI0%b zGgKY+hjd&#(FnE8a7!Y9V&)9QVwl`6Q$WG$qiW;(Xscm$it=G;?&#yJ^fY~zu3+6; z=`YrU43=J?cz$p#u#QRFwu4yP!ei|DE;IR`9s#)FN`f!atohpQ7t8+-XI~jqSGO&j z5Htk0U?I3maCdiicX!v|1P$))?#_k)!QFl1?(Xo`ch0@_{@!z{R#CNf?H|77=oXL!c!I zd(t&>S@O|r6V7@0>9&%`|M>R1UDl??Jg&_=zKx$2j#x5r0;;=mSzjf68-Ud^hD}vc ze2s+5GKw{A)$YUFW{{2O1pI_7xOoPgt|dPj2w)3zuUvJ*{ULOi{#D(_|*_ zVq6B3e&*@=YMP+%_#g1oID+!~g74PmWu`%=#oG~Go(mHegQzbdKj%R=Hh{<~!AQU? z>i&5lF_yxn9}o4j!Sevp(6Vf|aMZl_*&6bVp*R5cej{J>{zUNF-GIYLtGgoJPjLK1-|*P${&ga@BQD%C*m6v;`f+ z;*`XSq)D5kNtz{ShtbBL-xDk%v$wSm9b86xJw88yw+%SQkRBC32h3lXm2d3ueeHZ9 zw&Som;2Q$klUbZQUM}xaRZ-?452|r;CV7p3xCY*v%$z_~!z*}GxyFtd-%vRwbQez_s;Pc4o=q;i>t>Zy&1LmGG- za{jDrPi<73aj$@7m@0^~ziImBs=QYQol~8cPAt#Le7B)fr}wQH4Z8R$P1n8Oc1;h4 zr!qs^dsai6OtqkTQ4>xEaGf}N9V2+{YMB2iW+N5Y0K{Pz07n14dXQw&7qhCunscbr zS}BPwfZN{JBmY*6@qS!P*7?SVK*9%rZoQo2L{wJ3PU?cjFt}F-Q9Dqt$Rw z8aN!Mo2rz^Suk5SeHm@X2@%o_l!A9 zFrCt&9Q5w)n9@UY#N+(CzS`p;!zvTFdcVLFqE%R3mOp*Vr^R~vn768qi2PPnGjwbF zz?ii2gcqf{aQg-5JOR0t%CXGoUw)BVuU+d2If7Eb05&7(y4Blrox_ZGdaJ-^3gZ$g zxu7n$?hno>jz!wJi3o$*wPt>YP5NU7iK6g!(s=ou`a%6kTl6c(^xzNJR5z*JZH@=2 zy>B{BTeG*tf--nO@Q@C@2JdZaRAV~FvE9@4tOkdsEws>I@GdJ z*-mGP#pX<&n)JTtWprD`ioEq!wd;v1a?rQJt8g5(>7Fmgl z-8u?;UAzwROHa-h}c~rNkmMQX6F#~m0K@}@$ zay3oq%_rb!Own=+tU{iI_YYdvTe}_2^=7C0As_c)tRSIhD=!bBVC=y5x-${v5VCpB zZKSI59nVd)uCzYsSVq?jg<%ehg^}ajilz};EswE}73uiz&q@s%(Dk+N1?F(f^A@lh za+P#$IezbL6kLD^9M|M#7N_U97?9Mhj=bTTtX$Bk9?PO3I3@IQ&xQVdFLfv*z&OqR zZ<`ooc>k;AFufvrtf4!@QV9-xkq=}IFVkk|WK@AmlofRl`QP+v^5)?UiW%z*Pk~X# z1Er8Bt45~W&x2&7D-;Q7t_Cy14i&Xy{a3RjyHe$0-i2QcV~R2*epAS4H$;e;1q#(; z(yUjuZPPcMfWQ0_jxPve?~_>X)ix*;vMu3E2dA-Hg0&kSQSnxfeE(Dkr8Bxr^;kt?+JRU77p9|s zvFoy05dLM;%WQUgQ9?dmNei8>I;#lu$BsvZK;rqI<>ouS=FgSF#4>OLfge+hw+0?> zX8?uC?PH)ap~|EV3@F=f;&bk?MS~@RQC1=oFyuaVeCmao1?|;`)NC}n&1jG8c&$v9 z%{K1XEX4VhcSJQu`p*@+jd*u2Gf>`o363A>-xALG-^O9$C{kZ9xdEyd%F%m*rM>|` zu#N+hfh1;%1o48YltTGSsa+78qEZK4r znzsUN5SD(IHZ*t~K}T|P38_)ODx|jeO^@t>Z8NTScJ5BguOfI`Ggdw`ceek;n(4x1 zgbs`^GZx^a$~NQ~4m2O?wE35Q;ybU>+O_SyNASTTUwXQ|d}wWaebo~~y%E&>0%fP3 zwRYogDJfIEUET#(xD9A=ER?R(WfAVbfcn3R>3q_%@D&f&U)ig-jTPI}g1|flHW`j2znsrUl2gWlz z$#N0eag;JFZf>hQCVsG7D(-2}xAFLo=g6G)lQfqc58;|<-211ce$sebo-i1R#8O-b zCwBVqc{KZBmC+j2<+-?s0^3zSX@lV?gMFFRl3#v`KJG+|vW@i%^Ky8hxuqBKpEb_3 zshd4S^BA9o1)K+^gMJ%&BHU%%CmHK%&?G0hEWgv1k_Fkn``Z6&)ARqC=M8jnc?-Lz zOzXZ)*;#lYiL$Qye%(RVrrnrxi@jDvFKHk=c>~?=%BLYTu@qx zWo|~p*T$E5#N_rz{Rkp2VcGiE=;ve6QndUcHPji)#-rL#iVv3MO>7RRD~{SNBgKS@ z&WoG3P{C>u0>%D0SR6j>{qXxlUnRJ3D6@aT(?&>*=pZI{-KghF3TMp`w_j?_{sJ;Z zS*l;SHA&fH4u3&3R(6EXeo&7WxQH5F=%R3J(?D_2z@X;lkDV3nMvUKv0^p%VDZE7$ zL-cpqu`sXRE~Fd53Aic5p0OUOJLl zF|($t+w9a-+rLZQBrjwRQMuRqLPw68CeKb;rO!Shz_m`XZ9l9V(PG|EQH5~3o7pg_Ke#!J9+mIv>PCojRL_6y9Qp1*UxsS)(A z-MRJvN_&C_oI3TAlhJKEvmdL~G1+@-^qc%0~oGW-J8?D@X7@Y#WmOP*h8wBmbH_6xm~sAzO1wbI|v# zL85h3cCrr*Z(@?aPrw&;e%||HM?{~ea{4o`b_?P7{6cQTM>u!U zMZPzqxtaZCGoB5NTZ0iu5EVuiqy5X?NRtF)s5NLKrpHXarE}c;o0CvFjj+ga=+DlJ ziUat~Gt?|A!v7|9OkpH6S;} zk8Xm9RH|JcE)rM8^YOo~yG;7-^RabsoqrDs?&L4&-+Vu4pAOYsKiWtL#cvt9)AO`a z$mILZt{2!&&X7JNiJ^Wgs4-m7SOSQb(1$Vimh%=QWEjZQ(;q*(G)qU!X=fA9-e3~A z2<*|)Hwf)~rHO(LEDIda?y2l4P#eZxBA1{mwEjc=wOvJ2b9sCOnS!de5D?b|td`D{ zsOVsj^#jNcPn_RjI`_DL#(!N!)bV;H1m#R4Q1P)f(WSGwljG0dk#eXsFmtRUyX&q9^t>6D|Qz+_I*BrzMnt5wROpjO(zpXIYsrRa42+$qp~7f_-48FROh`L=-~4tuW4c2ksPH`#JRdI{rD^dL3?GG*tBBx? zmV1wRbjM(CQ8P%6l`s?om`YXHO=H)XA7CDzCxFg>~%{Y4?4!1#>C zjI)n;l=WdjwFsh3>wZB}U{E_IT-$h;5+<76DwDUBnr#c08Dw@R)s)r~0vg>8k*IV@ z4*1K=Bk{fZ)z*ij9RXY3G{t%iq`pmzAb)wJ$$8q*M_8(fw z@h)w%_BG~&i*eYSVydA_%QT7I>B~l$qcAnhp+5nJ1ayj}ic#gJvG}v7u3Nse0z4CW z%XT%t6WzP$_dlGz6cvG>D~m=#Z!=__D*m)%`X#*2(4ZBp&{db0n;I7zRYHPfqRD~J zF6L+1yjSOXBNL2>o${rTEOIThc~ca88fHP?7ALK~h;9T`aL|1*t?SU*BSyu%3HMHC zDP5D!uQDlO&|$w61Q6f}oX$e7r4vMfGa&id^5Ym;XGF zbg#%$7@GCx;R-d!TbWL{Ds8VGmzmGV01tGgJWtweEM#7X@Ygh_%hN%h?1NQjDxWg^ujyo2p`2xu$j;;=PQ8v`l_1eh zHt}v^A_G^;00C;nBSx3W(>Tk{f6aY9XEbKL+Pk@y!I^!w;PtDill?9yDOIr^Qpec& zD+|4tz8YnRdS+7A`YiQ1eWcxLHn+8Wm_%-yz5Z??`96g~eS@~5!dui}3kVaytl%j6 zB8#6m)uoj$ugx)wy$s;L)Guk`^v*4BiH3Hjy)|(moH%MwXvu9r;wY5?BGkXqkc-j$ zYnogSxxlpWf>qn+c#*1rgsV5F6p(wcmZx$XAf92=XTa+v^yQsJx>+y0OQj>V7>q^* ztvfKd87V}6W&1wf>;%_dqIQuLKh{0=%MuZ0_+AKa1c?x0tlnx9*rxUWyiEzgx%pMd zd|5~Pd;5xu{PcNR6UA}duY2;GTSog8_iJqt8%V64`j5U+`WzRKdJ|8nJQw2REIL>a z^{IESp{U;0b=)H(6&9}Csyo73{wM~dldMGVIlGI9yyK3?j(i?c{YdDdodz`74=s#& z$E-B+{ZAOrO@76ru`BYp!#2zt+;HXK8Vt*NByk%%cZKR2sQ%-hUPD@=+yFiUvPgj8 z>5of$waQxEml92^nNXSSoYDsrbd=znn18fwuYq17fww=lH!N`Ad6!r`Z-d$>TsSvDp8w0PKU!4ADh zV?*rfXq@BP_15vc2V}1f-meXI3#b^jYlaNzqT0byoc&lwvL4cW)iQ^^x}j2HQvWSR zGyg8X4a>~j*JXu&YGzIb_9r?*f_#4h%eGT*Wr5x{%1%}G5v0_lpIVtu>E5#xx%?W0 zJV`DD#MvJ8hkjrYMDjRpl<@0Ku_)Cb>(#sYG9S{f{z}RJxfQ!`bH=Ui_wX!oWVoAG zIIyZ3zfm4oSA;dRA2V8mqr!ysje>WtK9xB>qv$Ps=8yD(}gBTKu} zl~kBfjB=f)excc|fh{^qFodZoZ7X~^Si~yk(zlD(?_jFAsmuF3r$Lw)vMu*FcYZBl z!}orB$qqtH8G1X4HT5TVc;7BJy=fa^Hb09(0;1j8!1|e)G$v*)-pgjSTgbbKB)qbU zYXc^%sIj)E3_9sc*}1u6WFaWo2nBhgabUy39%G%BM z4+dGU^#jV1a3uqqD~;%UlTK;q1>CKBq~&4vf6c;ZsLN;`;NgTwDh)}iY6qylPo(dmW+CRo1eFuwtyLP zOqUM#tb>A#BVzSoa1yV&Q_y+>Su1GWS7_GkfNKiyLHIHa!*(RFP(BV1!Zim9RZX7g z1n}YgP?5L#@=7j{PsjJeD9yz&?n2RmW4|h1DZQ+T;G??P_Ic6vH6N1{PKfmRmt^ev z@}?upt(omA57wP~!LQZ7q|~gKZB$Z^8b0}(f(aUW5zXQLjp6}h^MB5=`ZxE$ABtS! zAUZoB`CRHC{A%m`Z0k9n7q>OB^`v=J?Nb~j|u-VH3QoVod& z*zlN&=}X8vU2VnPgo8dP+S3YhvWZ--cgy1c)KUER<0*o+LGVBCSm^MY22rtLVG-72 zuNS#-5U8dnW^~j!v&p6tH!T?j4UJ}4tE|T_ap6XF1(((yDZ<+`7{=?R2J-#ShdZ@o zbpFnf7y<5=#g!Gkhuc4ZPFu?Cgf`Bj!kTz=A%VE2kOjNJkxi1AMK^+gx%DdvW&>7!_obe3fNnc2O>ppMf{Heh|1F_l<4h%yWwa?i1N%l?pV_$^Ut18Jdv# zrtrr)jZ+C*I>Wl%tkaF9eTE(@H^$W?E*;>y)Vl4j{%J~yL z(DMKZ1C-mttO+c%qGDMhi z$sC8<(}yg*!TL=rz)LOOMqDdrr}>dDBIfIpjHA!JXpJ`?-z+h?F%}hDyE}+6ox5&Ib4g9HRe>0)k^Dv+| z6EZbr+*|E8zi)&d5nDFi)(s%=h%v#nRS0e-=3lXOwkH*D6l7bgRC-L?1>Ai;7xiDO zz3t)q!;eAWy7THB62e*v|DFxinb+8(R4OxU@y>$_Eo-4VQ$9iv5cAuno~@D>nCe-s zw`F31sE>Pj76B{xOzMTcon2i%i z^P8CrOs$r4=;8?>-(XstmJS?(UiG(G8!E)kA?fZnSAU69KOtZf9$>u1A z=O8xZ=fAN&aX@g0^*{6s+K8yu{QZSX6pBp&XXSPgZr9pc|>t1I+7HQyT#`}hJyEAbmnwJJr*El z&O|!1_95fH{U&kBYRqoM>>w%kAZ7e5)0Pa{emfMp4&ur=i6Z%MZT-S&jKx(bw7pyb zC|1nVaeKti{E1zB?%QsAQt&hY7k23tdkYSgdK^xubm&&Y-7T)Wn9};$E_iLkB$BXt z#7{pnNBms^a2oI{8)ezITfEr#$BlLRly82;tKh+ze>jXxd-wuq)O=zTmPdQUzoc4H z>;f7ebV+ILrMbN?Kka89=ah!OFTdqA=IZ~f6+?vb2I%iG<13wgH9vT}bT0ri(JJ{C z&*RDvSNG@-?)%3xp?i*Qn#V*q#%^mqMO$^lOQ-$@t?G4r%c>P{%e)BqHC2pNrf;rs zT7PDC#CBLV*F>GA{MoZmVdi3F4H(ooDzi%LagKjXhDKFi%9Wp??l@}DdAe+BTP30b z(%+J22&Ub9#j!sEVI8%@VXLfBo#Sr$AiHNnj_Jio?26=99X_T2;PS4BqSdN0pQ$&(i#!l%rLE{(R;kE6$m z#|wp@h55;TYO(%-SZ;PBeG@#qg3jzm1YHp-ZO@YOr-7C3SEt7R!}C-AU9>)+m}B@a z?|FM#iA)cu(t6~@>pdHGEv!dEU^gNqwL8u0==?`+W2GCUG zju?(qV|+qA(4Nh3HPnDjlQhsfOEL2?iPXt_Yep#(tQal4n>+g9B^U@|*5dC@x=RIR zs&$KbyVAeBo^5|hAQgCAw+t3LXY55odHQSp+ym6vyRjg_dhp4q_1t9?NV zZxzQ4(j0dCT-th6*!8!*8(R^vjAOTj3!!?fWLO_Vrg&mY&&N-jBV@6D)=Cr%Xuc;! z;QP)NjYsa_QLp};j-p=y-x6M$btZ51vQJc^UB();*+ViS62wbn0Ly_H;fJUyyM^hR zhwFng&jlrGgn;6kvk<_5FQ!eYwJjJ03~K|-cBR97ch`CqG5F+vYm&VwB64DZXwZ#^ z;f6yv(V0&))n8wbLU{Kf%MhU@n<>T88$fgmFZ-|03!3z=5&dzK#s}^&Zw=n^;rgnB z+dpgV3n2gMNI?eZQm)s(NnQWdgTqq63__nIzgkei9!;V|`i{2z4fq(C^hj|?SqjP0 zSb6#&-#|mM@&C9<{Ue7inj;%@g~r!2wHSyaMZ|jY@^r3Hlrqt#F+Iu1F&VmTEy!CO z>BZ)YRz|BN8gwd7*1Pxx5_E;$kf;Nw@ubak_jTWR=`(LnH8JOJzOB%kv9dOB<|^0y z5SZ%0_D3rPCS)QN5;KgCzCxQCGJ*qK$m(emtut^??mMCMg@5MGiG1-~|BiL6*ruRK zNUQ*M5v_GA>-scZb`q(*{-_yr+7g3IcqZ<0Q))c|{VFTafr5@KpNL_65n$Y_I$xKU zwP0MG{PCq#Je#c~*Uizdw{s3~$1)6uq!65~0;VWS8?ROO1ITE}^bFz@sE>U2lg>dB z^sfF`M2ktH$u`G;3;nbia6Q(6{hq_;`R8#)43U*kFV40o71PDSb>^1_CYsZijYOSuv{%x2?Mo_h8 zkD;D$y;DU}#x7;L&Mh@#u{~|p7U1rTCd7&~i`i~CaL1=dX#f%>408CM$-%jC$b`KeIJe=@83yJtF9Ft=4iY-A_z@hZU3M6#U!1Z*@%_YrPtc z>U2-*RN#|s?*$r9sOY~+2A>ne>7}1NpVMFGhyN0rIm<$fS$ZC4RP@&gA@0PVix|eh zYEkmcC#8YSOwW^`K+^%wh+Ns3AQ;Rvl=<_Y7Xaz-Qa1T^dx|qKP$;e&!avjg9)^~G z;iJ=1bRwQe4$kAck7Q`O!B36W-{Zx%bQT|=Vj7xGL!9_Ez*ugJU`>V z%ANcvjvnTAQ};BfTqs$94Zb8-O)^FK_=|t=96sR`ahbXio2LQJ_~Y7PRRqw#SJ1-G z;|HF}rt9YX5b@@l9M=k+s&YFfnrNHoeUgqvsL1XF~sZc70E?Xtl%-SZ+lm2Pvk znM>PL0*M*GD$2q8*d^X3Z0?ZQBP)(qiS<@{*Lb{}If5|JZ%4Baj-s=U-_2PycVd%j zb-aPTX#+q~g^y9lA1@8F25G;qa3wj^UpIC5LMeAw+NZhSSUC<>9))T;#Lrk+zhEY^ zy|A)B^*WAcDtH?bkImiV^jdj-5_5&taq%|_7&D)46QLrFv>J7QbMxJJ*s7LP;6xu! zI`m_{i7X)$E4@gxU)pYo6GFVp4_Y>6(!2Iq9KgUM@v`RMAG}e<9*H~T7O@ov zD^3GmQ2KSYgI6dRH_R0WQ}Qno?bhB+0_41q4>wm@0jruVXuL{a{X;aq+Fk1lG`Z}u zs&aK#D5g+P-jFRpmTezFJ8gdwgVIEIhe-bd5y)c7VSokQ)hs>ccUkY#qd=B|rtzGK z6H&ybYi~Y^I%`Auk=utcUWPVP{KK%BD|7o74f)J9cA`BO-HQ< zAxKe6u{5CrQSI9`Qd}`FEKk2+A6Gl`SmcugJK;C!Al-5D5k#wNR)bb7(GK%P%a2({ zxlKcWeB}ccxjPEh6CfYWPz}5{kJxINE(I6E)#=T+X>Te3CT{)HMPlYMUbq=*}*aJ?z48e^~-_nbH(~+ z8_nL-NgF_6rn$0nPILDK{-4%Gn(`n#_YlEKMr2wt%ry1I^;i(+^Q=q#84{I94Rplm zRo8HxrO$mVkf1s#_^o(Z{k?MmiqGsVcdl>3z47jJg_y%bk_q5?Z(hfD&De0K-0-0v zE2z+|2zNlyfwT}fIWT=DCpR@ zyO}j%-5QQ0?NW_^u(W?bTQj7X1`^g%hD^ru_hj?KS|sp8NWhZbfW6q}pjW-l*BoQ# z8bNPnpeu9c`?HfWsl633q|Wrx)V>Mo$W z7n#0d@#2WvR!;*Sdr+EgZ;plHm@#z!RQ(#kTVggPj!b;jCOr#B8U5F)b1se=8^Y!| zhf%!VpG>V%uyJi#$#LnU?VLOoHenI74>$F^iaL1*{t5k)H&iM5pCP_~fT)znNqi&F zw$eo%hxmy6;Xl3njbUfqkKP?Q>Wg{IJ^fbY`D7{I(!j(fx_g{B*blB_ zm+oAA51aN1%{}hrv^Yq?M10m1r1u*}7`)S+MTOeklpeyKrFr3~VODe+daj~iT;N$|h1-P8@Q2hDzSMPFt_|rc8rCrbo5FSS zlf;3kW2z_0(fyqQM;6eyCetZMj6Dtf0!>wpVUxi{b`Ls7!b&E4%i1dYYcyA<7!_W3 z!n_u{IFh^;3=8{{{}|1l-u-X;gUMs-&KGR%re23RrHWbkta)&MZdv|#vMnNh_3L7Q z{gSU^G%1t%O5goaJCdE<+VC?WO7(u|etu`8|6jZDsi;kP`}?c3wGo6*#TsVdN*3ma z;`)~x(HPLkx})C{e||H0`_#FspEEt%66Y(MVR7eHtIt?`73qxf`f^O%RIY=Rgg_e> z3;lW;U>Co+5DV}{rInIe6*yS5wao}Ha5K61^W|)v$LZy9=Fg5=OnXjRdV?QPpT=$ zesl`q#Snj;U)+Z3!D=!Or@OvQm>CX@F(!#toA0-wbkbm_SB%Egl(iX$-s2nSubgf1 zyodaw1T?PL;;w)F`A^pwTGMZR`n@`zkladHDGm@`amj)UagSav`|OuKHql6oY0aX6 zfyr`tHn{hj48e_lsw?ga5)tZ09nWNAuoR`t^7PZE%BiE2XZKt9t}ymv-YO}pk(#l` zV%5K00-En38^Zsr>JrjF?_c;fKK`fc8~DDzQu@ykv@fK;Z!DJsi|%2v;A|8+EA{;I znI>><%sm`YlS$6bj_@1Ff~?QBWOjA^Ad-z-)nZi^Ay(KEb((Yw@zfLVZ8oa?wEhXEnCDjS2v67r`}0aDbdGEU9FKx|p1k5`tWI;0mf5#@b>?Ca=CnAqY*dMT4|a ztih$@aYrp@g(W8Cxpzh}&ZU_&Oy^Bm@1e#g(do^p(JHhOpTHzDnrF+wC*LU9KqhJK zEU)ayS5L7ae7k$ieXG2*ZbVdpA7FoefF&WBl*}$qu0bLJZvNwo-wL(Fu{4#wkF)d= zQmdZyjwXh_!N83LH=B_=J)rdvZ;iNhq3$s%?6y#r$|oM$~!$!_ap zG%%p>IVR5xGAyLZU!v%aYmRV1%>uafGm(d`K93T#bnVHu1h@sTYBR@3A%`n}t{}9I4XuMAaV}355^Y`XYwJ6!yoB=V zrF`5G0K<0Gm~a`rkUprD5~;pjxzHb_>eC(e=S~D}T9!s@T$R%%Zj-9io97O^S*e%K z=0~E6uQ)q(&(-UAR~^()aGcDP;V!G4pp~i`Ui=>%L0N56uu&LM{hng_ z_2-wE61lW|s_KhkH=W7}B0dRxfaL!GxrO=%X}QI`=EFZ=<3GFXKck6@;v899$(%pw zsN~uQf#A1cij;nzNG}x{?c&MMml|MIP-xWGPa7i-{<%9*;0TD`zM`rY^;B>f=vGW&TyVtf=T$fg3nhCx zH`S$qLoj2nhQ#N;ld+P*Y)iR z-U5iGz(rMxs}+-qi{Em8SAr(zk@73oF+A3KVD9@IHP|BRYs;L8jy4 zw*}pmXQ3)`vD6kCH^qOLyS)Szj48>H^{NvU(P!{aYfR&cLFjs-az|wSasvs<+O3!% z#m1W6GN}!qiAs{kUCEzt%BRia9zg2F{JJqITh89)L9Yt84~7eloub8N8x}rR=9ORyR}15P2^}zHazXjK>JW&RkcTRL0REo^3tZ@sIfp7A zy3zmV_TL}=pZ;!;3*>OZW$RY+;e81nLTjuHs{ktueOqIikYue#pODyJ42!j~NMzza z60_LOMV)wmwNsKXsSG@x9@{0L=Xi?2g;AHW?BZ+MuaPSIgI}?t(ihb%&wv+A6Il(B zNFWK_QH(SeC)To^3sQ<-xS?B8{w6o0rF~>_tc0|KH`1T}LoiKu>}Y#TrDjgr(+DBv zARb3g7d4NIhN%3QtjolYq0C2L?{P_H?nZH}$NRa}$dOm4-=R4olfBLk zVoDQnLQ(4|sVc%sfQD2bcyA~7Ab~aLHZ*c%BfOfJ)kk0Y5J1%wFI0U!#wlQTYRSIy z_P|S+&%f@|Le!;EyNFIfegXT&a4XEPPgN7^6^tH5cyU=R};XK9j=HZ-C{V4BFJXRORtf$)EFHo{&bX z3b&xKsXr_C+;WVfKU-fCH_nRewPY`EZrzaf%_h$;xO5%$>wH=&*O;?vaXufafb@%l z$QXmgUq>t5d`%RctoQAE%ajVD`?Pj5>mX9L!)7&6r}L!GC^`ekiiY@oKaKDe~A{spCQo+{QmIN=Bvv& zrHdi|YjNi6!j`=9EhTYbB>sE5{XebJe=YKVzL{eKS2N6S%O@*GF*k3T25#iyNCU@` z>qtw$lwdXb=S&aYyBEq$-*~>}uJbf%px*RXIjGeh+tJ%}y>qf)6$K6?JmEoQ2a;w< zn&r$jgu%pkS+2rapBdthXJwd8l6RUIJs0ymfxq>M^rfG19nn=@^%cy9XY_K**y)mq z*;Xzt{6v<+7BBmwvWa*cpJp0!3?(LY;$|DnB(JGM zt&yxpiLJqwT=x_L-)09^6N>Tg@U`Y{aFbNlt9&|2$i3q>F2or`P;dNz2|SYJl%Iav zSeJN8yO|!zAY0B;GtPRUZ;@Z}aymN+x#^$Eo^Du>EYzU;_Kds(-c_IHO1cPBxG z!(YL^6sx4n-ya5l4HR>8_j4DB&DOVMi2u%!6~dtHQt(vjF%N(K{|+H6@=!l<0*ypDkjUZUcmNOEp+=B#=;q{g`1Db|*;)4%`R z6fmEWcg{7`bw}KX%Hvo-hxlzZCv2by&?InvC3ou*ueUtl0e3x8+fsV`E?9FsxhiyJ zDLTj5{!8#=%M?BZ0 zrv+~cmQtxhzp4 zknc(bs;p2a+tCu}B+!YVf0~}8B06!N0Uup{!p(OykRpvdp^H8iAYbPyDcy*Jp54^YBe!vBcQu?_2R&tw|2HoX@0be0|VZ}O_zl`mz>#pCSLG(%eK z@Nq1UWp?Yyk*-oX?~9+cCAd$o&Fhy_1)T`2ImQY8wKXOd1=-)P_N)-B3v;?YmQk2j zXC%?b*bii?uz+rKM%RMB14P6#;fBtmt>V~&Hn@*os|cLHz@e>l^!V25Zec8ryV%~g zRO#7!CJEI40j>lza*V^;%nbKwLdQ~s=;zjy;yY|2#LEVAc<+huuB8B?M zeEPZ1o<}{mn|jO! zT)sOvi$?%3G2{C_5vi8C<%~f>p;8M^_E@;v(LM;)KvVDy3N?AMLwGGNL$~{j%F&Gz zGI~+{T!7+bA=UDN-2MZI06ZiORTs*>tSDkr?pVSXORhS13ptH@-iaPK(FhOgTu(4& z22BlA1a?H`n|dU7_Va!TziTuNwK?-k9}(43$l$_gN4TthPH87W`pkhZF;k8qyaII( z8LB5Nr|@?l_38;1CPQRz@roLd$&>N(@dicO9`R8JS9sbk7PBI2;Co_IxwMi9xegEV}>)NNIn5~U$u9^HSh8EccPO)e_fa{+vx7k}Co<9@FUc!G5d7yN4}TZ!tm zm^r5$$L(-E&)Hp!31`A0!UB)uVFqzIFy}QDY0cIw@m&3B%9lctO4*(Pw&m@^jKLGQ z7|NB(CPQc!2E#|VXLF--E?nQ)D@MU^VhSftEA;qP#UJliYta(P4w=$H3U_)XCcPm= z#=&$ts-9GJ?Il%1GrI;G?!f4!LpCTLWLar(=OGui#3Ki-xh?1O_S-lb#nF>Ie!j}_ zKuh)z7pvuR ztU4{aOk8WlBrR~R$MsxPWn&?prB0(R$PpjV1>)JY zE#6haWH@v1aB3A=PJc&_{=HPq9L2U+SFO7Gl|C1HyJ9dYJcb~}cdQjEikEcF;Ctep zA9$_a(kpm@ps1b0!hUaJ{N(BJ%4ot@wLCmFyn21C6~94KyB~#bztobwFii&U=FE`T zqtz2-ntR6R_v9}7-12glw7BEVy57{j|AH$!Pork$l!!%FR_33KsD#mz+qk0SWAn^x z8-w_C)FJz?Zywu!Dxa-fF}Nfqx#}p!3^vUpmxwvd(wsSE)Gcz<(@V~_MpT6$Avrny zYs!1-53t{tV@O{M=94V4ylEfnnkJ;KD|bDljv%Mc;;5_Z%(_;tPtoSD&y@R3u4~$d z+pq97elh`bWLC#FogGD7En@8a{Sm*R7-FdW*DF6#J6MTfQ&%>vkXyE*R!N=!K>XB+707d4SU=R;~j?NI<& zUE@b{%QE00G-Ly-7$a#8tp}+|`G4?~}|lz2q487wF2i?&1?+R&fj7_3a!}^KcJY+sQ|d z$J*fBJDv9w&$(JO7CBt2%g_%!5P!^8)iaYP;H+cWsx*)S5@*cnB6MJ2?yC*d5>W)~ zG9L#*K6{pi&Fiz?q~_SpNpTPPsdyN2WoeDK#PWvx^e%4YIht-;+<#LW9Ne#J(v}}Q zl2yEhv||jQFz%gd*tLbAYKHyj@5^lcKx1nGl1QGSJ)^?>H}duEfz*8V>wOJU*q;6` zw(DgSf2!o^pUEp3N_#U>Ss*`m*N9c9%}9r{%*8U(lZp=M8ncH-+@=dx z%pf7pxfvw0!uelB1#eR2mOh@%_$9xD3mD8U)|R&c^d%8YpYOcHiDbbVF@!Air0>e>X@wZEHOvfMc3vaHp0HO-%-Sa zbVaP(KV9^fi!wfC$+&>6y(NleuG_k+l$JtqDK4c*@j`Kz0>O&}4^Z6QwMdJ*6?cc=PM}aMP~5$^ zLkR>56u95o``&%_y5pWZ_B#JElKjXRdB1ng`Pkel)ej2THJ(S=R9Ce9~nM%fl4ZfuoFn6O=H> z+v3sKIrn9r9q3cwPhfSh>t@a$A1a7{`Z`|U9i9rVpx1*)E4S2X50+2aJ@p*2sCuP0 z7*+n`!jF-@V4o)%mOnuYMwDsJT>#kI#3ZAOYbG&zGW>J%4st1|J(Z=67c;{O;48w5 zh|XryfUfGXUZ<)+>;lbyrRm$-SR1#sELGM)YUzpAPqJH)XAP8zZ()Ao7e;UxtmH zIIG337ZO4lGg3@@T^Gl;bd{`9bG+ziMLk9o_!xmLf<#G=VQ}wz>LiI70k4o^{i}p= zvs~+x#r%L_zO58iGt?$FOv(iYr9-SB(K)>?#ws7!kMQu$IV=5lw&p^yan{p<>c=m> z$c)TJcxMF{{KQEn9*rm;2F4T->)p-wT#?5)g_eN8N*D)EZbr44tJ#=q$LNAVrP5g|Xl;sAb6*S|{ zsQJ{zxdaKZyFhYniT%}kz9&g#bVkim|m6=}(-PGoYR zZ_et0F5X0Efqhe$u$umXh;|SBWm&nfl39mam%16a_MLmnz0sFW!n2SmYpsvHW45{k z$MFk6wa)}_SojAv^hlH(&YZAcBN7_F^m_JXaIW&j>s8LsZ53xYPYRfiM(|E9_LOK% zNO4WMq7A6D$e0(i5dqt=D_VI@f?V@&jNap*@de)4<_&!Dp$MWGk3wC3PURGE!57_i zkAs&pjJ!NuSrdlxzpim`n=wgcNILTFym#P_Ni5H&kpuhLh`M(AG$+Q~UWUJiQ%Os@ zDES?VnJHL}Fx`l5GTBoID5Nf?DdmPaERcg^M=qhB8Sgp}sHa07v?uajbgKY2W@QVn z|7iv7W3NgewVeldSK$h`eY?KTVW!GF02mI0_|DFku?}gLSwH@;`LZ|LDIUPGY-#ou zvb;wQd0SAcE`%K7fjg$}Q;rJ=ac!ZJBnj^G=l{8MRLb8{wQl9i^6- z2?dWHy{uU{W}1|2lRSDS4>icd9apeZmH?y9X>)#f!3ZsS&nKEb?RB$P?IDfS#C}8J z=1Q@nbXIX{r%=%CT%GiVi0nX+q_suu?K55ps@hDi`q}wkDg!h)2I>?}qFJDi3pr*%*yVZ%p8WS)RTB){m`qUJ*@NPCg*PZv5 zdHFgCJvf`eKa0p8&LP_?PW-$=C8q22o?be}6;uk3BKO%U%85u$MH1%Ud0eMNKo?$k zDUnfEl!BkIY#g~B-lsqBXKeW?H6W71Ce-BZ)3;i1@}&~?bMgtwl`AiQn_=5)OC7QGsi%eY@Gt;3U=%Nh08`Z_ zavyRL#ZEuqs#gqh|q9-(8yvy!V!NIX+C!|s36GO7(xF1UpyCU+cl~1Q1FSu)z~v;DmjKb{vI;gXn>usY9wGr4uXo#xtA`PN^yO=3>=$~p*s50)** zoo^IA7yx<9>JiMt6eAb~#?9PRO05*@G=$_{rIj+^pZPyjc2GlP_?C)r`f1yyg`>@* zeUtkUr^{cniPCX*rQ9zHnZ>TTqBFOb+Yh!BuNg)9Kj*zoo_IPM{muMFvtuN=Q?Gbs z^{X)`RzVzeDk(an+UJw0C3)wb>Gv#HoSfjzMb_5(y*@$o_(jthto!Uw5^OvMU3_XO z#sORR@!wY@SaPE9$XR=fG+x#$VV9Clvaf2aK2tg!Hl*4zSObWJMeoU##mWybb)sv1 zrRq6qcdsDx0!RMNMfLJ}XP-X$L^&tb(>^dq;(o<&1@|z$k^?LY$;j^Tvn=+~uStL3 zLGfwN1@bt3^0xX=R;CWg=VVSV9POm3tAOrZ7VQ7Ty(t#b$6oiI_uz?DnH>Ni^pCHW z^%cn$`4a}`6F^6SE>$Q-#i5Q5Mk8CMq4m`YSS(#LMwxND9xJ=Gs~go*RKn7AyM;~? z{D++Q9eF3slcZTAPE1kR@5fV;iak6nWWuH?=_9s{h|h{f`|#bM7h0H@EaUV{>v%Ji6`0D$BY`?X$2 zbaa?(aw!txZXz8OJeFKe8o>ZCoB6c4^pvbs<#g|vrha(=jXnkV6q!U znric^&dYD_K;Pg^WC_<7Pn+0`2||tx3W-uTZtm4TE%%I19IL$}i!*5g5VLgUWV)NC zg)1#F4@fVVov7r|3YTAOJmOiu5}gr=FLd`%wpDs>>QP9N{s}%%>PFGWRoKUxRV;gk z0+;|F*;Kmiafep}+CTb$aZS|p^KYe#2V{=ye_y2V$20D*k7Br&PZHi>PFsSK$tpTd zOVf9cKZ}41FKvO68`Ii)Szk-bDW)gg{lPCOeU-2x+eS`Xce`XG@cld0z%2V^^$~Xi zwY(&viT<--uK8oxp$;OR#-^_k;ATG0+szDZK+7T%G%u9BOo72@kMN`)P~-p@&^utU zgB03b8?XS8ys>T5NW|)@8y~9TV4UnMbsL^4mjC`5H2BtJXp6hOyHw?8d(eJN3qruF zo0qyoTUug(n3UQ3FlDKe8#cY__7+2ixJb@#QmNHM`81P``*`vNroT1n@1?Hlo4MVZ)+@{sSJFY-Kn{ukI>GPGu?qG6|*wNbW#=|V3ikuEwjkcUB z#m;mnPU85;=b%x()1SY8N(Q6CT=hdJ-g~f{k3~y^_&{dKL92-i_gFbz{+{<+m8AC< zRo8J<`3k4Z?v!+~dZTw@_~r0dSmfkYh5IvryyXOt7n~SAFp;Oq_JC!BZmp}{zgn+3 z&uYhi?(K%c^ir;gxq3?9MCzf;w(Lh>qpruN_ilrAXMXNHWE0`DI#)WytBMc-sMjj@ z$*!u!j~fm^lxTAABB&sL=V)SXtn4s=tWJ2s0{0-gl}kKoxqQN95AL-h6Rr`JOzGQ- z>9ApHFQ=MVtd9z|DBX1(2Pd59S*&I`uSCXpsl-8===O?+CyLz6IhghRU%!Y7k-E2g zzJTht?&%dr*G|_mqni}4K9Np&=@q+Av4Gf;iG_=$oV0iDUCalUC=Az++_#&Ntp&AD zlo%B_4YsBeFzm8oAl_O814S^DMx0v8C>{5%H`JSB!ZQ0!wE-%@KRSp_@Rf%M3aV)%|RRK3WjYX9&yr)*v(S=)2H>r}EAE-zo1}U(W?>DCR*$cejB1rKL!{R-yBAqW#Rw9(0ve7)U zZrvYWWqPW*i{p%XQ0PZFSKt)Ug+3?OR&^7$QQ7?T3E;wZ7^_gP2ma zMD^{`Z+{zijf8d{5hFlaVNu+W%bsNx1R9|IvDP$ry;w~olGvfnLIx5o0Y-Y~h3H=^ zx(ewLV^v8M{e7x~ z=|JA5PcV=>=H%=JMr{=A_MR|wjbS~-;dq^rkd{6$HkC#R0H|Aum4hV%A9+r}TjSGR zXD)tMdv*wRKc24yaJei zWuFv-v6n2rWDd~oz)zC1Q77YMt>xup?MPgkK8Wj|?#bw*XBi6~V{8sa$_Z05uPQd{ z**KHjD05-*ztvS5zGaR2nA*uP*!iW+dM`YMiI469vM|1J%xjoBbe4|;ri$TFTrQH`Suh2C2@*%6HOh9DYC(Wj1{_IPn{=)we6-NoP#gZC z>7^MT0vHQBvmgA}f4@ftbb`|@iIcBX3)oever-9D4zy|d)0=uO0p8yjDI|sAc2Nre z;V0zALx#4jyx}Eto11X+`8%?a^5bM@C5P99Oq*7g=UgM7Aa>-J8Z6efi|T#W-Z&{O zUAo}>!7XJm1845M;#Bs~m5elNqu>#emx+3sXzf;pE8B^oZ>>re2+r%1CvA93T4`UX zYmnU>>lV?*9K_VYHm19TrYEh`;i@2f;` zpMeR{+}Y8SDRZv?IW$iAA8w>8EvM_t_EO2RnWy)G?OOmgxtr?Q2DR$;9=&_UJ#I4YUW&TNP0D>)V`GN=F*8x`nhL0r{XG{BmrxSYPh z=8t^m4Ulsz&L~3uG<+IWxxeWx0B9m~7>79_dCGAd*`33qexlk>E_wy^j;)ug;CYj7 zAYYJSBXF)+J1jmY8xYyT%ul`<0;;km^LfH{;1jds>ke}MdY92vn|a$F*4_hv$WEhW zScj3t+Qn-Oh3=XvHbcxJZopVyNwL^+sgzaySzU*FvHBM=q|-PY(`SUBx|l<{!>dFW z7M?AI{4?GFBjJ_b14s!6(fB&x<#= z5&iW;>(b)xrtts1l>Zh1Z0QEDYBt`m#)Gv2VBtc>$Fm+bpy1hBh9g_+_BczWDjeM^(mJk;=P@ z$8C;&RxO<`X|p`fdIjN{2F1DxI%Sk_HeQyQ8n*zHd;u$&Lpc$bAPv@FaTfv_0qE^{ zz|$tblJIFC#u+m=K9!cCH;!a1U^6|MGiLbeyeI3%^_lc!6I|=DkLGO0&9Gp%-RmEK zp2cV&7AS>@Z`88ONYLCla83*>p3P>cm2EprNB(5!=S=-Hj6FSNc{XXx>9+=NioD04 zY6HYoz{4k}pa|LbiAtLLd8&V;hlC2_J5VsM8qdSVm!zzaq#QZqjHB zY~BOIz41LR{_KrrKwMNt?W}1tgt^8-pmlLpnN;tgqmqHdzwb*MVl9Wb z{u4OJc^#OuA%^3h-%OT@_4nZ{#{_X;`+>c~^zrOAdYdw)@{1q8NvDdPUwFPm^q7el zd4)G})~k{j3{J&aa+3-tf)TOXWY2L9){RtN+F8qeQi({_n2j#VU7=OOdF-`xMdg-= zsXbKqsLX)4iX%~0qrX%9*L(DVjQ5>I4Q&Q@^J?ZA-jO$wl0LQ z>o7>XpJJ@nODAC(Y}Dr~5LY$5-1iFhDFSsv7gW2oJBZ5~H`yA$d2(TFQDs#f*SIuX zsMCn!FO(0Mqc!PV>V@+~n=1XJ2y7FI9F4BHpesn{OTqUt$xi~0Bsw5E%^%C!tIZ<>cjL3YvhDIEU?hmAH$L_HtPMO>ToCN-*RE6V$ z4%QUlgp%0*lS#*w5Cs^);a{))nCAdlAoM%fsn#IC^EAGtDI#J^s)&lK1dIwUP7F0A zL!$7#1lEPHRc+XKkCCO>;R>X-`YL2`=IXDpWRWELB@}Co5mW+-zvir_BWCybQtd?+ zENIR`))Jc*1ZR_fhVW1tfykcoVDL1}p7~YG?VxzV(H>f`Tl2c9I7mImx$o zN3b(6Ax^9JTt)zlp*Gc-Ef7M2{!a)4!?DbF;AP20VIKDqnO`h3P<0o-56}d*R`-p68 z1w=^}w)G-UsFfL*ALv>gjBc-nEFbSm?)n1_OClN|n>Wrl$B8x=AzFT8*@oQcvT{TE zXYSuBn~q&LR8L(bm_g60)}%9wY}02`B!;wkq30T3ntglv6Jvkzu^S~eHYxh$pxb6f z$0e5nh*rt!W!hc(u43X&u^zWkLW+0tfYd)F2L)3>#{p%EX ztX4`qc>T4I0!|g!!gv64mqw(tThm411zfjkds#@gGP3vuLmImt?FRJXobJl_3NNWr z0q&&#Q0_MCS9r&ZnPmmr>P#YE!#%&Lck>s-dnpnNn|Jhbay(_`Rd%2;7DW?cBzXCk z`nETS+8eVh?2Y#KQJ+5o`+;mCw0XO%NhMQ(im-truf&Oq$^NQaYwzs^LZX(b;P6bV z?RHn)k-kL1;a3PwEqaCY)dDjL2R(!<NmA`~_rnoLa`x?0}%@Z*LCYoDyZ!X&G=&xxZB8?5!7AQ_&RJm$jCe7B_zNNc^V zyy$p$$O!ALl)~daU;Fesdagi+(l!e!-KjN2*$C-+lmu!gd>FAu3#&bso(q<_C8rkc zY>-!*>-D)z%@79HLcKi#RUGtIa+-wsJWYLicPG;+O=aY%KB}?Af1EpCdFW6uoCyh1 zd(+~R<}vXWs764iai9`*R#+s1-6OH0g;QtVx@?{OBzDi1w7+||rsCc%QZkgc&PkfC z#w~9@!q+c_$+r!nn~I{IpSD@HGZVpp>HX|sKqv*^mlIDYVyVj#{ z9b_%_zA#n-U7M`bU7HwIGm*7twX2Gdpg$eNOdH_@Ku%n`dI7{Uxh&+$=W#7ol9!7T zT_FnDq~x-0HFTbuFs9Eva{V9P`c$>l(ItK%+49?{)Rp7UE3lYIfV!l| zoEGITWsWlakKM<_LKnzfufLqh|ETi{OrZfbSam(jP!caUMs=R}nE6-{$#N{>?>4@xorPC}oL?5r zH?z1D3vP>Qof7sn1#F(E>gTLxChn9HJTy#jw`#GS4xy&sf2vI`kp z=fcJ+(`~J+BcxN+F0bkXQwMt&f*9o=8%t9SUo3*Z<5e}PglcE#d{>GsuWPOTeS}@K zzwMX946S(6#mOuxji)Gt5eWJ!Tij0c@I~{kH#2>AM@x*ied8WJ>T!xqwIGvwHaoA)hKUTg)?NxNGne6rvg;FHkGddwS;kAiVHf^WMgMq@oMsNVgdD#_mB) zuSo?@BdtyJw@r;{Gi@MuPImP}#WUxUKp0CIy)MSlFU}2B|Ir}h=T^qws77NMABw99 z)3}M8pCe()CYwWHG-Lw?>n%H^b-T$N`#v!YIcj;GoMp4wX6;8W4+Xl+p1>Nk*8J$^ zR`?uy%K2(6iX8TB5U&b3nIy6N`k(JZ6iuq*f+oeu5jt8G*Dn>)j%aEuq^vzg)YjS|I<3x?M zl&2&vv_@VsgJWWoj7M8}$E@aA)-6NgFM{vuqEGW8q`LhobjXZj<85*A{&Jf5kiPLi29^e*XwRT%*g-kSx-L2e!b-1KUJ1i*u;iAPToku`5H~W1)V_xN_zP zwzA2$Be(<@Dnh-u3ttU+{Zy~?4m_4xoKg(VgB@DU7=!oqjHXttopocp_*r7)Crfo| zD~O8ko>MHVf0xO#37rIr@|&_XFEtLAPbbU6vNdm8_~iHm{9Vx>5@6L)Ga8yJC7|Mcys(=3^6Y>4hMOaIs}nTDpgYsa|Z$r z`=-zgL}aSEKQeE-LkO^^ION=V9vtV8-el!jil--(aWMrJm2v-cc*v%DItL)~c-J4l zi?L1F$4Ub@IIS4?+S=B3xl5y2Ouj6S;~}Mbfvs(N)Yz|zD`V;+Ocl(lcwp&wTDp;L z(^1I}IT9Rv&#i-J+sK6eWh8N@g2F=Nft~UcT4rl?N6#5x$rYMFV(WwasOiS3{(&Q=*bMhgg*HnKq2(cgl`x8Kb1B%3_bJ^-#WCG zS}0J=VenKy0N2p*@&ed~$@&&pS5B3l8NS8RZ-WPj8iL0{}>&wC_=BaF?Ek z?+TPzqJmNgA&juX*er> zp;zL7tbC-30U$%7zEnG=98tm0pd}o^))(DU-qOh0S}H=H*c2D#B%J7;oS6O8%aQa=Tk>FU{%Ptr?g~pXnBEZk zwtYuZSt4p1)@<)qyCwVZDJe6wj^Wj?A+9H|`Lc09lk>95MS)^^lIN9Z>iLKeXoOrS ztRcNg-~rd=mQHxC{Qz<$dsSZeDdt#1Cz<0Dwq2$F4%tA*fE&}q73Vbz{?9)V6c({_ zwg9OD0@f+}0@JCcF*pD14CcO1N7;byc(T~qm)&b*o?oMG_-?xk4&&uTLOquNIj3#< z=T2$6w0N@ZD?r4q1`rnalw!_JZ(aiGBG40NclHEL6d_`po|jh4SeSQXVdb1EU88^^ z1EoDBZladq2mRv@St4(ltw*R?%^8seIU&CKY&ouRzPTKkB=b!msyrvhHL|gul5b|W zL}z;%+yEurKC69bRQ+~gq~xLJZhvh-@$7bh_S@K&b?m~Hb_jcl-RG1r#SXR+57wcNl89u?&@8k8w!eEVgMk!-6S{| zdxjYEimq0IuG*?&19!a0W4$u7hE%H`%Y2@aCCv!?-%7S)1pY^rQzmZ{C!K$dBme1( zaYa<&&T+nqWHs9IyX_(T0GegR&<1x>D7t85;LBvWRo~pn$O*ywu!>!=Bb%+F_k3c;|e80I7MQvx3ajr z6>@i6x!qDxs8dU*`i$*xR^{JyAV&PF*?(@Gof#ifL(HQJ;1lk@5rtYCv@spAs&~DZ zd|UC{d#1U*Vto=eGp0{jE?lXNVD)9ro~m}(uefJhOW2y}K)&sR?oAujj`@9ehr`Oc z?t9R`~Hbv~6 zsi2@UIW*PLnAVjv*Jb+tBTXM`8M~abZI)N_agP{4qu7!;sU`#&G}U?cj#~&bzNDZ- zGgsCxs5S76)z48B=*J^ItZS=>>so|+z*%hd=_PeHo{cr=^jT{c=}M(GJgv7vG%iDQ z^*&pDH>;)35@A`DB2svRnU*(3{k>|+TB1tXqWy>(Xp^tQ@uGR({4(b9N+cL7OJLY zb;9oN)4ii#5jTlX6E(C|rKIkA3y0}-ILca*ZOD|Ab+d{+Y`eI51G7WFq-IvO}3+% zB6UT5p*F`&z4n_-y@Bh?tmKE*@va@IlX4jcE)|&Enhk5aJgq4Jr_8sozcNfs8183U z1DqHpXQ0<4$bB;dU>Z=Yr9tc!)!Mi&LbjFfpne7alwM;BV1kT3W zrQPd!6;}0M{9LN`U-X8o1mcfMxqb$sx2w$rAr>7uUt8KNgz%g?yI$Ejo(asjA&& zv8;$V+NGm5r^w44X9<4y^1j%1Y2ZHe05c8MO(kGZqgyU$*~?_vr$E5rrbTp(s*{){ z3s`>V0x@-m?qDY+Z)6bfh{0~;lEBn5eTA|n9Z!?7o-{wjLPzZRSIi?FmLBedPlPN3 zY)uUjCfF1eitL?}c$4C*v4pna01O9M+lhxoB0C&g&)o5aEfi1e>V5jQ76hxz`{$Rt zAXC*@7qhv%pRd9z7U?UtzPxe`#&EARy~~?PX{8Lz30T=@q{7HmjubsLpvf3 zH{2!4SHdJ4e7QI@RF7`90u$>$-<35MC(f#_okn>s^$c9Kb=_At96)X&+VJdq!S)M? z_#_T2(2fO}i`R+kuRrRuI~(bO(=ZOy}u4-g&`zQ-~v0ilFMr#W^VUX78qlJJt|1*QT6)_=ime_lO~7`R`>UMdG> zgem+-v+2KMDSC9^bZ9*G>d zdkOG%J#L#LtuQZkPO`EVh0Rb%@W2xmM1kjySadgv9j?KESm(l43p{{Zp1dH7{^EOhx;t zP{!Pgxq1g+3TjOGqz7x}nh}s|HH68Om6m1n3olWxOI?e52`V}MSz=}G=nMrb-3c0Z z^7J=iDMouj00LIt=^=7g(l7`65$`zS^w1;?s7LnH1j7YjMOG|0n2*Plcix>BUJL5U zbSR#uH9LQrGPR_DvWFid{?y2}Ugv$&E270yX*{obaPhmul@9XJl<_R?gv-YiY!L5{GDx^Xy$Q&Y(7Pivjg#^S&G}<>pR6x zNl-=)x?gWIN)pVjT;=)Xy1QZg?(t|bUhY&Idkrp=$*^c<`BZti;K%gY4w`cj#f3Cj zT!*QZH2rOQeL?@OX@hwuM=v+5VT`FP2}sa*v0AVy zO1|Tcj68on@A*M%X)901_IFZvTyW{#fUMhnOHMs*yV@F}S#XH{me;Rr(%NXv+I^<$ zo&Y+V$|XmbezR14q$N^x`c^`HO$|JFY;wC$0H}ZtSsQa?#WCate(yGFW;eU@Z(SqS zG{&3HoHSmXjfpCsiXyB`SgFny(H^n#2fztIfC>&_DB zZ}`&iU6^n6PsXURZSPVblf=qK!BsC_&go1Rlv9@|oC3V&C;6DsBbo^g#+Q3nzE@%q z0C(ll=~BW#XzJoSY~T@a8{%+Ra)cjG*xnczTsrR-XEn{b`LVu1=DvjFeFfdRGJyXB zmZvzytimopI6_yHW6FUa&yO0?Fe+*Az#r-QnDui{IABb64Eh*1^Xer!XF!eSG^3&v z%3lzOOfppdhsQV5EpPOn{N;nc)U^LDbwzv%y#*#gRBLwT6jNd|g=4pmF=HQd%0hVn zU~vLXMSVq?!_)iRbWwC>M*tBX@mWmA`63@Zi?F{5k!eDd86?0pQz0yn=EMc~@LS+T ztY+HEQ--=kXHFK?4$iRq^<^CYGijnVi152QO*~P7N!>KFb!D~vQ}(( z5)a6OryWnUyn#HRe(x4PcaDT0jum~XjJgNdO{RTFQuM1yNz~65@^{F}><7Pq*b7XL zvBuxXbXtjl_=V>8IBXDJ{O}NY_t2P=uwTlrkH5WqIN82Q5OkUF#6rEQ1~_w&%$%%! zLpMZ0s_3>SS+>!b35z^zK^bc&$NP5F|?DR_eQWV=`5$kOgc{$h65iYY!trLPXDh-EtXHR}s9 zc9^@pas+%3rhr$82SpYPy0MniuX?>iRjeDVlLNwDp}IM05g`dR{ILV0oFF1a%U1d= zbW+r)ybFjKkt?!I%5VAq3Lq>#2D;^?ZxC^uzuzq@qf@AXkK6q6$UNeX?3S#qoEG!5 z7l_nK52PuzriL3|DK{R-@$uLjJ+0U;`c2K3qC%3?osAz!!LqxjyldDJUcq;_SvJALo4rA zQJC2b#oJZwczH(IsSNAgLFiT}@9DfP;k~ol#O`n`3<;^-@X96kn7R1<_Ok`wX?ST$ z+*bh;XEY?}+DV@BA_YM1yEm_l5s?+qAQS1E>>b8GqdyWX z+cbP`rKkwnt$a3_B&=Z)WMCBzELq89P};-_`HlkEk6Kr6C^LR|U!6G4sPZZs)JcF> z0FV?2b9Xmk%_qVFQ2a8lv(G=ev6d;SUS+VlW5EWPH01&nlV9;<#zNvO4$Rv0CwVg% z4R^0kVf4v8vQKy1W^zem^LUAJs!l{WKyM`t zo&Uu2m%GOGJqqPf0I(%|$ddeT_soK?1HTnfAxGiPv-a!lc;ybZ-)5|dWv3JvVABaR z->;L)H#RArl{J+XyCUMgGm6e1;xI#S4RsxjmOr{eD2B%qB6!CEJW}Ik5NmIA;==hf za`}grJPA0d-`cwfd~Ni8jPRO!P17WTtX6=w!_SEO8G zb4h{cX))4jV}nhmYNSBYoEsej4z{q#I8YvY2o1?=|gs^RVtOrZ(=&9 zxsZQqT;HMqokY2zMlVPSiZs65WR&%6A8dxj6}T2r?QO+MT9t&wqKH z3Ht+0?X6}P2W2lQ2`fgI=2UNmZ^ny|*wbygIh_65Z0I(S=QL)0mWIq1(-Py++SIzA zpShy45IEG5qjDf|Hb6FyJ(~$l{H~=kRkXd|j-@)*eNd+KakY_kcV*xASSuX_>x$DJ zYgY%IdRrU%)az%BW|!T^qw3nE$IVsij7t6$N8iImJrH%Tm1>A&_#Y@0brZkhWfxR- z1a2lf9fP;6j;-}a&i5Pj3dkT(mFi(~<6D(_*-vG>^`1LntnO}Ws#ihD^tQq;n98;+ zy%WzCqqX8HjT;v|BOA{;6pQ9gK2jl%T)>weL+?+Dwt(z9a1%%YyJDWLBs-Ho7$B`< zH>dPezj=K{&6*umH&h61+RJt5%u0f}jE&|9bWYWXmf>DBy*>tJQV`t+1Zh7Lz#hdc zO-{ErlL64R^+}TS?UH6_^Ze4n&vV=XZ9)t1=*O-dUmu78<4HIw|LTgGW=EjINGLeg z@WuQMq$)QR(erX`a;`kPgZ8K@{1~H#y<1VBmBpd=5J)ZPF0_o28Y7s@pgZQx@nX?S zA?LivD6cP){p2eEyGEBXj|2S~36St^TzEfy+Asj1p;dsGx(3AuwlZJ!(_cee$YReR zgT+7N20ESz@e*b!vo|96;jUG0Pm*LeqNAloqnoGGojBV|Hit@Ft?J0hz`=&*?Hh)= zoHr(dX{nlwEuoNXMRP8J{&ix8#Lt;PhG3p!(D7s_=nGl9Vinsr@AVwOKajnswGySp z=uTOvHZtzvTp*SzQN)_^@2HG;?h5`7-%``|W1{H5|E@Xt@856W3VdaTKCQo{xj4ke zbJt=`Jb>RcTQZk)RISnH>s^+nQ?l#V`CGds+eNQoU{`cW%<278o8RQvvMrK(#X#-L zXJ4^@9r-FU8qdVf`_|1Sp_q?lX1+brZTF>CrGLi*_HlClxK*JLe_N$ej!RHT+0Vk(DKkj*!DZJv6;5R|*wIHK0 z45crdMz&;A8pvW`yIbdJVQG%onAJn$453=gq;vsXJ&^g6@mYCeo#cq)YbGwbhv_nlvvGZR;kYnabG#WPfwdoVWNJHQ^{0<8U3Tgtn#2=80 zhK{5methg!(K{bu2?yJPfGHFJuL4q?0FPU;T5VCycRPMhBgzbpEhI5ld+ph08CHwc z{0W9%$+~&N;%X25%!zu{Cil;Qeb)FsQP{;hw{0=y*%uju4MKVvJ0(Q#HpkC5A?a~5 zQb(K-?bd1j?X%@OmG)#l((O07sLtfP7dh5cS1tgpPWEyyTEa1=O^}wPRooa5DxS2g z^K29+n2?F+StzN?eVtT^_`@&bS=tjx>)jW3%3}2KWDyKDt|(WNgO~bieyASk5`P&l z`N8OPzo>Cb4$L;&{_@2BpKzQ1|CcVWTd7+882Ek#JkGUCoRF9L87 z7AzvSpnm6f7Z5Z=v^0qH#-dur1@x{8VZTI1=Nzu8Jlpfo`Vb<|bFhzg_pGdveO>RC zUU+H26a+?b?)8zU&;5qi6g?wuD`X3b+vPeWi`;Ue#I<@)#;?eRpOWLf{7~X_VT+93 zfzJ&QfIip&;dXvN(8*h>$~4!EE*l%mNsw;^r}mZ|$mD##E9}aks=n|~Dn@32FMrPL zMP`C#!v|T$-9Z+5(V;+PVBt$H5Gc#Lo1^>oLH1?*pQ7DXJQpAj=raM0g$bZMY`Ikg zQjz=dQ2&Aqsek$7VQshM=abF9z~2A=f9YSJCeVp@#?U9DbHj?j_jDDFIStm?#d9CR zbte*8qKpQ6a#m77XF&XxBg2e*U6W-nO z+=M}Yee0Ru_RmeqX#Qt(Dl|uBm+XFQHQP%-FI>g8GVW1l^}9p!&2Hh9_LarM_&ZwW zsslA=fMR1j!fGT-gsGWexss85KS~Df{L(pM9}veWWW~|0y({Zxn0cHC}YD)BG@_5zG3&!{{-%NJUb&GfRRdl z!RiZGx@H~4|lh>*lgntVZyDobSG(hhomgUt#_6^ zc582P_3I4cXh1-A=o3@Se=VH&X6gFIVQ%O)Ayzh5vXt; zpf;8gyH&Y+%|K~x)cf4l>v*T}W{`89&HB{aPG8 z2ZXL}{^{nZK`3$Qd(9)^kDDSN$j({;J>UzT&FB!K0UpcrLK1!r)rx(r7X&&(~k= z{#I$-^=EvNcsig$^ ze&OG~``^FZ>q7Z{q&@?zoqC&4sp0YkQunj^G{wKZBs-~;n>@s(*kaXLj{WUvQtNNU zAiBse22=rpvdsi;-c>IJU<|=O3o4!m+&RdQ!c#>>d2JNsGt2J!bir6JdGH@I3Y-hV zo{*Z~6mJcFeRMp-vkl6Xh}gph@nlyzm2C{!`}bxs;s8leb^_X~`TF>liPmi{{Y8^b z_;L*)drx+_=Sis*z6-*>Z-0OOa$fd{@3s$!i&3t0$~<-pwO>}jV}+RMZt*t)LKdsu znAg?Vx!riXc?wx901K|@t%4zZ<2)m=s8wFCFFOfO9nvRr%jF#i9(#sw2d`KQFt9F1wCf2ZCO6l0y&GuJGfb0Vc1RR6HYu9cXQMOc zJWb~t7#Ioo?z^pW$L@3G{~k$1pGkmXA@R|dvw8I=8hqm2s$gKG!UifGSLPEpeUyKl z3-Me&j2}5YurrFDT8&CO#d1f0({`ce)3 zz4(fMwdq5P6jFh7>N{1Mb1EfV1&XjGP{OU$$ zv{8mb+El;vjiO$MG%E0rj%{K^& zD9JxiEMAMN?TtC2vHttNC2mL@hD00pyAvJTJ#)}8 zvFa7Nb#=eZ5pWIcOPgcUe7KqGI%(a_X16t~U!d{_RxUjEv0QRYm7@5sgZ5u{(f|8? z$=pS0RElaVYmh5g$(V&FO>2pXurpo^kwaHC-}YTHC>FJIoWX((+ZRd zSLYd6N~z4DI)4N|_ArFB-2HG**s6@+xGmo}uu2P`ZF)C*5Tj_moq3^*!aVnMk*?Vt z6w_GP5KcuKk9W;$o{7qcrEufuH=k*cz?hk6j?v7EOPy(Qea$B2f7So8L@&vLJw4uS zVy{NU44;K5RVSgjdld8)8#FX0S+MpDbh)^bdh)Y*ddP#0CuzyBb?Iis!tE3BVE2i2 zga5A(gziNXRLLQ+a*jRefd0CTj!nmz+fk;1*Mi%@ly1fc2&XD;C9_yn&Y`moq06#< z&hazZpWMxS3Dv-+^FVd;mrain?^wWh4`W8SH8$VN;+9bokC`z{6DVCGvVi0KNE^>9 z8S0quzbJd_xG2|mUEHODgoucUw6uVLf2sm^~ch^u0Bm~K!rKB7hK^g{-?ixaB z5SRgm?vCHP_St)%{rfJ~Ip_QD3p0Fp?&rDh>%Ok*eqL?YHzn$;+X@8CeD$3lj!Yol zs;EO~v6C(w$SA!u=MkT7Tk=4jr@x1tM3YWj0#(JYs_N=a+X=<R*31MUT zwXxT6nD%_a1?+hR>6(2|wDP(!FY+}HHO}!!4N=)A+W%Q$Zu=8|{P!<7iQ$j)*G!E@ zMYfjYk(-3iviUc~Ww0Kv!i<$aeYfjLl7(YLsON{!YD+S>S4spw6cl$kT^XstiAjM-+2Fl{nwzapG0Ix+0_R z)!eUcIwCI2FZ0>yadm=wF08-wY0#S=Qh+Z>$F#6?qR%M5w)kVeMtQ_N>;jXxGBoG5nhI5S>f}JvUE;u-;lq zGe`IUW&HjyVT^+R(eKV>#5nC+BN*B;O4*x2oSd=?KFA14Y#sU`QU_Lx zEGv_dz`GfH8`H~AbV))g`6C9`OA&M)?O;MZvt#GsdtHKFH{z0Qe0->wV>&@&sgR$(D=ach110mSs@(52Z(RV8^`!ZBQ^8>Dt0)3-!p#?@BhlkkTi)BcSyTgZ)jr%`vrY| zdMVOyb~+tNJP}>`fV`H*fv&#J8aLEBHy#$PwAA)K#m+2aGH>-zSIo)yGmG7*?pZV) zDuLXsZP3;Ax28l`5e$?Vk24{!2LrZ9`|bgf)CZPKrowh6ixM*K)1 zYJfXICX1B8Vrm%#N_|A}Zbp#nRM`}F2nOXO)Ss;XC4nG1#`JaOS>gBgdJ-plL-NHt znN(~Y81bjjO8S9tzaEWB^&9+#vWE zL=)L^Krl3-$vf*~_M@x&WpEjdSehY)X)t0vD&En}fA0k6Ksj0DD-E}3+kJGb^<+k# zP?_frY2Vk%K^^rajsS)(5i>-mrw?HXLD)A6fkcoFmPH1#EB?bg7utPxmXb3M#j}JX zc}s;0G<@}EI@0hGl|DU6?e#lQ115zO=3!=DrMWrnMkgDU&7HzekUqOVAN2S2CH(BB z_Sx*Ba2(%gTs*lSXrIhsAY|Nn2hEtWm6w~0|eg}yK5wWQ#iO7AQq!y0EtIp{9wtPRs6!nZH7h{~{z=0-6@Nf2HR2_V* zvYhp9Z|FzYSK{?1u@jW&=t_hV1L4_1adfdq^HHUKy>Erh+QUXA38DFh>{PY))6Zbb z@`jw$SBif={cuFBPm0N87_GietSk0^X02jpnkQH8I*1=xkBYh@AW2`?@8;tXVDrUW z&$zkVr3C<8m+aM%=us*4dFR!s8~))lqST%y9d&n2xrpjtd@)&6-B{gp=4^BEAqzXviW$%R{ZoQA*zRrUv6=sOEg0xkpe6KlH&1_tf3~pbE&T4T%*D7dR+vI`zXugxU4}!f z@?N-@V$M1|6vy?hG#e=WeBmSp;`>C@_29b8GMUQTYwrc*lg>F`^AACIlAIu1H?YOa zcU{<)^}W_YU3s9{a8XcRTISO$3k-Mwr~WYG0KY6v@M~2LU+*{0p4Z(e*9~4CHK>!8 zYn}R+)rr)M^-)v90wN=?1op(6FM($zzZt<_mLy5T<`e2p7=J zKH@X*@NgnmNUtVB z1~wuHcOT4j^qA@=g;J2o%hb=a6F>(Quib3eB!4^9Gn_2#SvSV;$Mfzi<#>H5)1Z*a zwBw80H06jDFG-i@F8fFl-!7sfKRGo2#U-H1`j5i=q3it^RJ#1?`szbs($w-(7SSKF z^hoK2Hwz1^9*&T-gfM$Qz~8?3+jqE3HQnfnZF}7av)?7N@Fv%}G$4^nc_h_x0VCR+ zF@#|h_;7^_ZJTMhtaU4UWm=3w8`BQQP|bhWQAe;qk{ot$ciiMH}46Cn_2Api_ zqCKmv?q~1^qytI6VQy*2fpK_hC9)C~32iiRa1zQrY7B7n+nG`@^V=hFIWwmX1Pga? zddJx1akA#EApQZIags`?IC2IAHModm4cFRDbEEqNT~`LuK&oVF)ubHy0&Z&~R~IM! zd-k(6e7w92Of~RaTIzo%(pTW$srmUXQjWi=MzJaZY~Xqrr+_OI3rVv|!8L z5V@{ben$f+hapTpCy{AzI%<%A^G~bZIw^r^MRbc0tCg`^r26B^D-m!NBGl^ zB-jsEqb6|wugTP<{YS)6w^v4UOsz#l=56b-VhFDb*D<;7UnYCmDOd!lQ-`i6akEp@)Ep z<$mf#wUm;8=&^sn!fP3Ke|jcwW~{as zHoW-myxjHe8xy+Cwya)e+PsF8)378mp1Z|%sthbM@^8^+mUo$psEoD?@*UTUp$)l- zrEblPqCAgjy;BxL*G{wd*976hiqeKxIWxhO7e;I`g+T+`N%x}Mn#y{W9}trW!ZlqF z6>!xy4);YsCUPY^u&h+GuQB60^aJB_*gkSy?$SKV(rjG1QtmAr4o%M<2E+@?u^twx3+vVnmTV|Bb=73->zAp@~UB^#>lh1vm; z6EVo#g6@v~$rGw6f~N1Y@s8e#F10)ISxxvv_ABp}6DXqMNfF-lzDs{T2cbxMTe&!X z^4?ZXorbVt5k5m%LEGZ3uq^+g8Ml>z)FPaD__okdBdh<_(bOUDZ4mlD>CS(NC-C$< zscg6KhY9=?RJ1CpVO0rQBY1!^;)%iQps z5}TMEa=uQ$PWvBxr2ORC>EV!{(2{3u#1XSxpF?ot3+oEyfEFZtPntEmN$#kEcDk0{42DL7eF=I4oD@e=C8`lj&Bc)4Ba-IxF zuO^?;>7$Eh3BzjL-x_F1-Fb&MT0(-gT_GXkK?k~cAi}?Q4iXDbKV3&d{SKOkHZ*o- zsujI&tgWr-Wg_3nq-2fIK(&AvKTQ|z-;%Zb9MfDxwfTsKCts5<%FUCedYH1kz)%mob6ymzrk8>Y zd?VMjzZhTI`F5{wbX`WK*$B#as=!*z@=KHBeNDVwr0;w5{V{5g0IH!#%^bwmnFr`n z_5gH0w}3iR9hO^8V4|>E7M?@x47-^F9Qu2X8hCZ#CY!zo=7i2dN3hUe+S8ZijMqu8 zv&gAsHyXuHgug^;IK1nNb;gs6CFD{`*HX#7^k<%z07~e7jthw@WT;NVIh>7?z_u7kXeJwh;>Kop$jnV z`4i|gv*^Me;<5^*Bj1cc42<9fpF*tngB*6NUYo9JrKm=*z>+^JqLgb6K4W|0-x92r z#}XMx|2^JNZrs&s~pPg&yv&{oV#R>t& zY(lXT+x9?raJ0Xbsp}mK#E6VdqZZYfdIqch@=>RRS2rN)wJQYd4%wJd_nnAg&^u(7 zgMV{8z7lghQ1q;;ePye-#z>ge4fig*n((>IIY*SbPjj|09eT_)E-x=MdX}!QRbOpQ z_ZMihsqRi?JtlA1f5N>JH>ez-V;rz}!R{#b>u_M=807y-@C(X5mB4YDU#N^N#VXzR z9?-)RzKxDuYH#iX0+T7dff2B*vcYHSb4FTC5H!8pma$sv9*T(_q0qKU_bLq~^!LWC zP2&#`;eLNl!1sfaApGSR#kMJ~^DSu%Qh&z<;&Vnkp12Iw(qc;{AthyNB%1m7fKW4< z4yIKY$bVb)xjl!iS>6>^%r%?dvM`@`W|mTYH<#Mc6rWx#>6x%D=Ih6So=k2T70yEJ z%d@1|sjmBzk9sREcO!f)n)fO+0N0Fz@lyY9wKq0j*J@X2@vXDXuAi@f%|~2! zZE4t;H3gYR9Ike83#%ogeF**8Yf}mQ1nDP|1T`K(gAFf=Jr$!!e)e2fA1Ck7o%OoB znRtN?x;iEY$GTNxEz4)j)}4vN!h(pRkI3a(#v3l>_xFg?Cr9@a9X747Zzc)S+i+>K zjsVzwKdimlP39oIv7MmLSxn48{1v3$^s1+RA9~P4%V5z>_ag6U2RDK1ZP`b{kr?Mp z@V%oj56@TzML5BwMAz#$GOG!SnUWUV{8BU?2{8(ghVX9gDTb|0EfCKJ<{SZ{w zo(*h{DXIt}qvF_?$9qvrqmUU9&jD06T%ZCoam|+iP;`s-Wk@(Bzt| zA7YC?*;J8Fcy65vXd2v3H?P%@Ax4J{&3+nWakFLRMqDboO8W-)h^Ef9P}wgDJlT4Q z`Hjv^xd(SVPE|G|M2w7dmMGn$T%yO-S4FD;C2A?&^{{=#X(m=H`Al>u$-HIcoq2F) z-xUMlH1E}ezEV`p&M%L8c97hbKCRzl%zD@$uTle0?byBL9M(dIUJt{|&q8)CGS+MF zJnr|buZ25O(ztj@t&4W`s8158YoR9yQh#+ zI;9aSf^(weH(#8@qW0t~(1+iiWva5)Y29-E&SQY{cQNcrR!0m&Sl5PKHgsD2==6%$ zEC%z%Z%(oc!ehkF3v5;;lGCG}K4?A|5_W_iX-eXpVLfzhX8k2STT~Xr@oz*u`R+wb zSW^!}mk)y=9QPRzC=FX5A{F31>}&Bbv4t5!_XF`_RTc8g(_e$qcc<$I(vkffasgLA zh>K9lV~qI_=7+c|_u`3xTa^^E5H@2fREbJU_12L=wam6sE)Z3|x-}|jNfWo#XMA~$ z_m8k}1jP{ct@*fGqK7co=SbeU?$`aI;tBC5QKGOZJIx!& zpQytwk7$lP{<`Yel_qYN;DLHHLT0~B075K6+t0fzA|KLt0Whu z%&9VIJ!|+Naj*|lTI(YZH}WR=q2gG{hZ`c$4&g z@+dj~F=yZScq8d7kCIkQT9AE&KUSe9!uNM9E+tLvE-1ZBbVAVVrBc{9@nkUD_MmeG z>_OOcvr&j(Ch3#h2tE3As%qu zy+XtBn+TTn;+Bt?UR28PyZRuuTyyAAfIL=xYpUoub4+6Y7nQ^>D0_tcF8PQ zXPp*``C>KE{lf)9p*!OO0Vp*`Kbw%zY>8#w9?aqphxeamOC!2H*J|!G7P_Rl(>(r~r?nPG%N#BwW84UzVvC zAA>z?i*2sAc6#<${zIa05<}GdbY@?F2_lWsRm$B6bVz;8-^PC0SEB$cK81J$$f(i_ zmd#Yr9gv8lOSZD8`UC@cjV;{u(Nz+9TJRX&nSE0&v9~c$*5>1>on&2#o>UUx2QonnBCnXIujK&k4ORkA!JRFTi(x;&Y zQ)^4u(qF2+18B>+y0aJ-dY4&|9a;mVc0Glnbebr){!*Dqo0R=|t2V!2z{)Q0x)jHx zJiUXGn5~}T*Lb(BJmlk_H6Y5nmTNMBGc&pq4$jVzATIDAt{eEQN+^TlxVne{rj!Z@ zUF9!7nt{fSAhZ77#M>1%6Gh!L0hcW|7cK3|YuWbg24#9njUHR)gO2{h7n)!&cpuf! z&=7&wMOcEywMwV@0}YcgH8r)h-I%WDD0?C)LD%wg!z*@dj)QcWw2W%`^nwy6R+ZTI zOz6ROiDF;YD-{^ADxTsRWd|zYk+%r72+Qhk+)rNezKEC}R|jl#?ILq>?d+CA?ZsUB zJuL$IKON6ajE5wjFB}g40jW}4O7rKgkLh{DgDy@ztwM59r#a-(p^nm4xwCF9@XDO$ z+*y<|Lk}kLe06gc-^0&WdtvJaAv#-}*@6>assF;06F0w9s@81DKi<61qI$(nJ1GP= zTYQ_Vaea+cBH&&h3YwZru^0tHG!fjgtK8m1-D3T9oe{lNr zM8=Hb(pYWzxdHnCu05tX>uSQ~sC;f#%vdG%Bl6=$k7K}lai?9Xc{)`b{zE2OSGn&L zrHl-#ewuJ%T@_u`Odw=kA6Cf|8A!&4SNZ1E$8HxvV9US7yc+%qy87{ux*y$B zjJ?NMq5XAepHhUyl1wD2e-Xa*;ja%+KI+zKrsE(5|9QzEzD}w1zdM?Fw%tu1Sxq|u z#vP2)=K#pXcggPI?U(P_2;bT#Xu7e(YD>lJ>5BgXPP2}<@<+d&l9}Fmx{l2lO5{ME zn$_exMsq|kGkSo%%kzNSd!>JG_uPz^&w^pPiVX-E#)F8Zrqo^;AsDDig_9nZT!u)q zviG?;cOrxaQ#yQ#jxSj*%!M|ynowpcH9Nh@V{JZkxOxROo6u6znN()q5`-Ul0H_VN zd%=-hZl^ci?n795BAiBsxL@kOHXt$gdIe7irbQn4V_uRkHPfKpTs_(1ipBTj zp;t}78ktyFMg~QC{$4HrCvaF5*+&wtsN_UMaX(c*ZibI@E+&V@OF=A&ye}~=zDCks%@t{EahZ0-j?WR%%b1y&^}y)P;JLCfnIK1`hN`Ih<>}tS&GiYy zuct8yI#rMKJs3nW_AHi7NY92gQZD@)bu}3$_SIbC0yZ%Ot#P$;A)CIP;&Omu%yZ7O z6@<&%*O%;-q$9B#r?yB=s6HPaqiOu~@fy^vL?dDL4ymYcWh_@ZtnB|}Hx zBxkg@51cA*pBM~|>5J}zQ)j)O8YpN^Q<}giSB5lwds}S+`M~S~>7Cgz_|iWxLx1)w zSxHI5-rvwYbttNIi)Q$!L zoJsk~Nl1F`vO6WW?5I4puH24IRKX#&_U#!ZgM1Uik15`*#?ZKAH^J=Mt5{0Q=YFYM z7sJdVRbw8rEm5TUpMwq0d)`{1_HCd*o0JN{X~)?>#@z5_ooM27+d&}>V*Y#P(#QW|4!G zPsQQ2u(0qtuXj*Cx~R02zzs`s#AZ*PbvFG6ws)>kJ(5q}5B6hWc)E9J8Gl(GlcCyt z8U2v5JFeUqeh|AlTezEd*k(zos=(>1j-p?@_^dSQL++&x8FtMU!EH`p;e*Ii06+Am zOXzNm&lz81wBHXnK?y7VetjB;pP67tH-aCNeW^rydW7lxRXKF?rY4Oi2lfQdnOyk$u#JSG8=_Op*tbE|N3WQLF`jvB zRf~_cN)F#x$oqo}BhYkLL71-)kjJz;y$xWX&~(F=+_==o#T5J+XW2P4TvIyDdStj$ zOpJizA}^r~a6`O**e|Lw!D=z(W0wXPwotU{Z?tSaG=T%ljB`Yok3-9>o)r3Su;>)( z(rIoUP;>54gJHz+)-#vRMZIgs*niM68s+kEZ?Hayh7Gc86RdWS-PWr}klv5w_=Qzv z2oD~@lol6%-s>nWE$vISwpK&_`A)FDe+!TuEw=!AH<<&x{oS-Xnq&14`xujLpBNTd zp&UVyCE!fg)oV7hniU}CxjolVpL#Zfu7a)Q0+3`qT307{W6HV?u+c zvogABmcE`rXhMb)IZWyg99>CJIA4p{q?Kj<5fu^MuBe)DVkrYGTh8cmrTXaT0@79U{ zMvVQN)76Qslj}55@BO@WPeb_4fB^0Pq6c2c1Zo_Wu|FVhSH8CJ^aFI(e%o}QCqJ9C zdrV2Bc>PGu`r<@2C~;2}A=7x$8ZP%1(oWU(idTxnbeHl(B>ny^v%`1rwgRsgy@IN$O?H%P!C#uzZ6ZVLmM^KP{dO zcsmwCF-n+Prk_vaTyYRD&PKlosHc16EpJv2L(>*Q>UK^ap;?Cm-TNHADn~p7+-+-s zwGFY^DAK5DomXR&z(xa5NXJz8^=`&hw!Q6r8Kc{{e?*f4g|8i4yt2#_e81^_4)gB{ z=37`4OX;s$TMzfvRPO@sJTNymj|9E6WXE2~s&*@(x!w&cB2G$3D4;V~1I&g70=P+k z*K}^%=6_{Lz}a{T1?OubU035QSUv>mds}>Uu7axNf%qhiL|*q@=v(He^gQ~Dpedin z2z%)pm}|KkSZD!gA;X4T_>=RW#1lip*Wc&2jq^N*DC2Wx;!}57l2;SseG-eIC6QC_ z%o)N4F;uZLp(E1|dPSQZO-B1;qJ7pH0tt=8cN1m17a%%@mLN>PLF)9Va;ZITs`8^= zn96|Ny*0Zy)br826&rw@&J@jRKcriLC55~O~Y}*bkrsPrz=}s4IV$Wca zIn(c#QXG#Xqyuq=tzA@LKez!qLb`p}m$p&@Qf8Jq=Wo9iQ=SbEA%|x28b+meV$CK- z<$Eylk@|C*$-n+;Xml-993v9rMCG1DV%V_hu(*T~!m)X45{A5@ZvHYU0PchJA83NO zxFO4tK@*ghe`22T?kqY|6=xCmdlq^u6F*3?Kz`$F&e1r_XV93cKgK$#A-s&1K_)q* z*LMh*WZPJdB~mIMlP33cdUfv4vpZh?64@aF1kauV?TQ$~C#G+}d7$qK;vdR>i1e{l z1`lL6^?vKm`bv9N^*%#yVj`r9?rgV+w%+zRXyL~yev8JHdEiqc5~yo0Z$*ugNWE+( zC&(?^JrwN~Nzjh!%$(2a;7(O#d50G7uEcX4ivH7K3D@7&{bSGSa(jZIMy^1Jm9bCA@9KEObTn86LpMV`wmYI-zIZfDy!dOR-)x(#;R z4*3k8S=sDui;q6>_O`~NR|d^JvB)q~=S)_K18>Ksei+}FF?28+$7i~?JV6!3Ziv3r zp7~hG-e(mPcUeosX!VtbB{Zpfp|rx(z>*8tD0oQ=TrIkNmO z)l^(JsMr^)Wy(I_%71E#Gd>tuL-nPYl16M1xLaF9(m_BB@?|L8z2rQ%ymG7BusPlb zo$4YDG7Akg^|k9KzDqFw9nhaKbq# z_Cy{1;S}RKesDNaqgMVI_x3Q|_VlX{Rt=5A{za~xcS#?SdFY3_&8Mc^01-r1qxp71 zzcZp{C6=<7Qod>pb)3qJZQfm&bYmqcso>kw*j~(Zzkwn3q>qK{@Ui!_yv<#DW!~69 zpQ53+obol5M$&JKo+0Ap&E~$|x1jvVfs@DZI&-5-Q*Hl(ZG+4m@yXfJU6k{8cKaSU&c6%;$;vA$r{ z1|ko$`XS4u$s*;e9?ft1oo)n)T1Hf35wF#8(`j`_eNksKiy60g#AkH=r%QcNDLqV? zCU20eG$Nj(5TSUzLF%_n#kQuVo{k&efQFgY-(PXl`E$IRf4Dx) z&>{2038r9)5PorMS{o|8bFn(QdaP7;PKFmrC9GL5?e1R9s-q>yTB}u}O)C$bV~k4W zv1Dx&o#J4z4p@DA_5_2$xHR@hQP0%$ul|&}MZmZ{SH2kG3xu{StC3R22*Rc?&)9Sd zOR-X&^ES6oMB$5Nd_^Nngp9h4%rH;Dt4d)xCRXY7m}bjd`G6_klIaIR7y&F=pb$Lq zC{hUC3v@sz70YPl#2qb%r{arRhYuIxl#$Df>h`HF{$ER}Uh9MG zo>6JZ9OiO>JreEPPj_B67V%pu#veEywXYV)fDM-OIvH}`To>Azhg$3hz_6vndIL-93+URZhOv z&fV&{oFW@M2q>fYrTLDW@U|5l@x;ox+HKrHPJ8?A>--oOZ}0=$B)O2Lo9l}{LH;!R zu{`-cx#8+shxwuz-(TNNisVQEmc^b_C5Z91qjM|SBk^15zJfyT>S1Py`TMWth6y@A zpFXmZm)djL&tcp`BHV4wL`eAUf=O2xT9PvLiCvDPsN{-`lN=ilE6Hfuhf;#l{lv>kbp^tVYVihbkDJO6-mB7!@6QO+{}5!A?#yy0_ziyw*-5D z?LBmi4cW{dGOshyL=9T&%&cR43K9OcpF~& zYnO#&zJ?1hGK!_JQJ7{Rt#hGkrKp}yz7#xJ@k(`GzB4zk$Q<~144XfEMu^p$c?IV! z9d`T{MB9Qv4aM4TA@by=IU!f%towNB%#IY-85kYZbF-P>l>G?L=;vyWp=h?97#WFH zy@t{2^B9V(`i8SkMxg$mU0oM&53Jg@|Bd_3 zs82JFhm@YWX^6I`$m0W_`R4dW>27JrhfZYdH6}V%v%~sGN-C-5rCpr*{MSM$mtXpx z&=#97exz_$y*G}U=B?z+3$2-jqbZzze-h7#a|xCXLE32umdkOOwrxJW+9v<6y8w<* ztotDX8EQSxr5w@IjWa&RR8*kIdMRW&_1622*Js#6t*7ctjvpAhyK*^kcA1L(*Od0d zK<>k^0HZqVbfUkuI59K=b^;G7<`f0^;vgC+W{DqU6NHs})d-_vK!9>K(DH z9biE1-PN6|>x&&2b8=FW*cmGR;&{tdfHE~TwRXY(f^VuI0C=h9*f-@gnaJlR)w&p} z_e5$>9qSfD8ybYneNjDee>pN_28muP{ehlVXhlMh^9c502JXh^@{yQ2ZNYdR-KA%jZ33p@+Rb{o zJZt;iuA#q4iMch>^pq@F8f!Ta-}m81rmJiq@p%5;8{WZY#NHlFN?NRjwW|zI*yG^a zk*P!s-uDI5N?FeE!Qr4*RsVhB39$dg>_YQy8-CFX44?=NW{C$(l^OJ9e^wDsh?BTj zNptky>m-#|TL5+$xLA{2zs_mL@TgI$`D`Uk7!j9!lstr3u+sZrMUkT( zwNc&z{C8c%sv_ADu?~dxibgm@|HjTKzVAU31$epA&z=@PV)x_ROh*nFlN)*zVH{6L zf$6ny)Ce@EZfa=Y{K|lSS&6pu9`r*$hOxJ`Rv#*z1vuYAke``@^N4_+@vS5L)lR9A z(e80MbMyX>JsL5=v+RurZ&lV9)AG1B4gnnqLh2X(K1~8ZuF&4_V8cY*`J;@vW$hzb z@SM*Y#VTz0F;~{3xL@d<0b@+ zR%8;nIz;gC@%5QnSL;IWltkX2F$;^L0I@Ek;LP&!$i?s(J%J}D9^%@Wx+vkL1_r`fyRq-TFBb7(F`p>P5Jf_VVx>KS7&h;?l@vVIB?EnL= zVC-q<4(UxTUN>mpFNq^mHQZ6tUHrQGlmKVt5UK{9yjWPEkx`AO=bFR}Dud$Z=Ie>o z#b=-F-zBLWCR{rcSFe7~@cRhQYa=kng2jz}nG zWDF6sWwlh<6($F#edd!5?sx@$w+$&AYuC zQv_aY=?z{*szB({bgHL7G zTTTW9-Vearg9#~;Q)n^-+{Bo0Y!{U$0lN)>4E5WH!HGaY5m^0I(Pc3wUK+K$<3ypH zZct#Rs^PJ^OI>(0Aae0zq~6#Gq1oqK4cAT@n<1kv^E*8SwpYb&Qh+h@>DKYbrS%x! zAJb`8yl-`!7sV8=peSU+0DS9SVMS1FK1G+IdEIXzXh#2BMBX@ruoM-5$&qb+=(%|46p9k>Alo2tH{< z&iM(5NX6%5-g6<~=4t?!DLy>9wx*^i$lBV#S6E3s^W}cb3k=M*Nqm(pQy9ldC zQN%|}@mLq?w|=>9$am^8~1dVd#)+v8NwOE@5 zN-#t+8I*JxZgNzgl|#YHXQNVXGIw`q=PD0#F=nMg${Q&xhvgW2NVPIBm z+h`2ld8+;0_~%Tu?ZLr;sbJ=W4P(Avmf!KTQ@nhQy_8gtP1V=05+wE84I35%DcN{3 z+bSuqO@O&L8sA+>8JS<6uA9JUGR?!>za5<0Gdb||_Z;4)(3ld<*B5MMU+E1r1$F12 z!&NxtpJ;O6Rz0xPTQf#|VpoAp$#`SbBxU464J9q+%;U#;v#6EV2q@DMaH4(vy?NQM zeOV$TB02yAjI-}~r0cxmhe0BpuX^OjF-Q6>`m;G<&AMiPon_4_!gpn3uFX4`WWebn zOyB$3R;g$X%%5%~!VFwd;B17c1*`=92KA$wd7cY4^=zOjEMhlrQxr#50(~tybo?D{ zEtsINer7?v7;ns=V%;gNX*?W{AW*p~gmk3wDEpIxRaJ8QUJxMvT#QgH7W~6PtW`=R z(3E(38@<|OybEM1b`}=zrEq2d)e>#XFsB>QUyh_3p36WupqWoyRUF&A z;}9Fg9QKfv^&}7tLwNPj@b%>3W_2mR#gY2WA3Z&d-ucv&=+naE_$$Mu7YQofYY|92NG6 z>d9N@)<6FsV!ab@X0ZQ4m=^?coNuVk$;sJ1{HPBCTplbeEX>T70N?OTan+x8TzYoQ z^Y(e9|LvdnpBqD9^{zvC&#PkLhMpv=2}E~sxF)Q&qSNP;A@|+-t_THOZybjuL)UOQ z@^$HJHjXx{m`C}{*b6mcZ6Qxy3B_lgc+n|g2&I9sg<~y$lxOvQ$F3buM{ljlDQ$;S z`fmK6Udg%)6_YzwkxlPyUdD(>QX7;QT`Nv~bV=p~EQhO+sv0t=8r${_auz#_%7cRz zOi~krDpPe3po8}s)Mo=68$Nq*{{SZm1sj^NC{){9g2cqb4;+??X>9mOdd7NuzQs2H z_Lcg^@AufXp!~jICP?Wt`rz}8Iduw;_CWGnH>K3$f7psws(-oub_5B0WZPB5+a7iE z$NHWV&Cb>6FywkomgrOr$2eOM+EXk~a1s2&SNzYl{Ld9m#|oU6-839kHL`kd($vj1 zy7vsW=JuIM`q$IJ!3&Bt@ZDj8;o7mig17;coc9kQ_T)5+=mvR0<(|k=6-S3Jg<+3I zd}@ID$gW6(FTb`WkbcstH8e4t%GnH~AL(rAjPNti@73jhBOoO6>xVQY(etV=)r5vx z9bXFee9{?|lLepRoJS|es>2&Do%4Y4RVo_M!F0PXe#xvVV^-j}nzC-%`pu@ez1}5Z z_gJnhyzq&9hQ>rZ+u#A(nLpZlT_p+QJ^{86izY{`zh>L*4c8sZzQx#Qo;-F`&R_m? zJHsLI)d=sk|C?W&JH#NYe@wZzpq;>G#_!9obC0bhLsQ=;E7xGWqn$xMLRT56u<6nDm1euqpVNLE&|9TT{0L$e2D;u&#L;7BDhkO}C(jdp~7o!CsUbc{{xRvRX{I0D&x(_~kv3#c*ccNBNKJvJDbWiscNCVrZJ0R>>|y~qCz(Oy za(T#^*)2LQbU*Q=l!C6qbGdT{52(2!^+u+)u~%=U;vfGhF$GMe&AJPh{6n_*hmLd@ zH-XY22Sg(*5O`m{!49gd8Z*i7y=MGEU1xR1BQ}mf#9I499b?>b+~>JR{iX>#QB5YG zXbYydMYOMsUV^UD!B3KgM{)^T7#b#9o?xE%2W&(voaY-)TUJ!4m$1__I?>@&f>Hw6C9$eBgtUeCwnYaETxYVy%aV2?i0={&55=?*^g8cc^L;?D4F4<_ z^$|<{_!GOF&*<9e`|K#)gqydmf-GZC0v;`OIW;RHiFetehEXDtN7-gF%`@o4NKo&7AobGUP@3 z9vu;W+x9yYUHbA0Gr$aMB01ZPOf@zzWwN3eFwp&0_3Ri-rS9S+;)pzTkN@?tN4mGGMz@0dXKS9)j(miq933OjS#We5b=v8ArrbsujE87U=lkd+K zgNepSiXR~)2{?Ddr`6}Vx?))w)=p9SPk;jT;R<;pq3?TIzg}QH%CG`IRkdHpTIS8gpLI3Yc=GVe z@_L?;P(SE9@D}U~SqifHjNH$5n2N-z{GHd+-&9S|B=M-&e+=Lf1i6x=To<6WQGcX$PJ_dfU#w6&?CU&9)Xq@55^CV?^srnF)hdm0Y?=e3m_hTIbW=gCilZMBbLjxI?-h(zlEYu z@r^{}+sckSvLWMAh{)L#X@;KJF$`^Cedwd}A~Hd^luKqTR}ZnYN_nhq>zXmyk7P=|F(}9bpCb> z+G7?hKD-a8pII-j&bHe9HRF)QF!Qmr4r($|Cg)nOugYSUn~m-}1Do&FKZ3^EZN|QJ z@I0h1#b}tSYb5T-JxsLYBX9Y^!jN6N%%9)YmsA@tGirLnu31Zx2o}FzdN%ezK(SH? zISLwV$^hxy6GfB=%Y$d5<9;uEgVVThbXiIohlYXYlM_fEmBmbQbDmHO< z8pk!8-!sxg{rjErSg2)6-KWf6pN#W)8sLUNT5~&iSW9*oaMxO^&hz@ZL#6iMkcPPr zLL9Ow6XB`ukWN-WT^8!aP`%uFmQ9=2&CA~Q==nb?-{-M^O%;vAhvfb{nJb<1ZkS5V zSgnviH{An`55Fj}wd==c z-v1wEZygr(w)PM2jRGo)goLD$(jp=q3P=e^cSuS%149WYASpvh4k8T$f^?&F!_YBw z3^l|sG`wHW6Th>c4QKoQ!*$_+taYzDK6ei|fpN`nxoghW>fT7)?LcPIO!(i-LW*Vl zh{7s#<&q*N&6h_Gh=0`i*)9)s;3$XhvS0%fi`qVrE(1M90?`V*(JuQZq7bc2a9t)P zFAcNFd3)7Msq)7n4+-XMEVJad@c1Z#qm8T3*K1L0k}hed?Ywh1bVAk=(>Yew)l#4LfJg2=;*Ezl zefB8IYKKZ|oo$kdSMx}Uwu5(gIcx-IEz%txACjXo9DZnirUi`U+W*SwjM92I41L`j z+0S%x$cnN09>}v2JPf2lEkF2m0P3FI^tPL=yQmv2uhqizVh_X?^mW^LT@9rc}q$n@A4t(tl2C~*Ap;z{T_BZ@O9Su;!DeC_pN~uws zgdgaXUfC@HfNQPj#VXyTt>!Z53(dxWT+mFB=rfkw{BBhRvteYJkhfjY+y0w=Eq&}9 zWkOzS;ZG^@Wj#mzu>=txf;DFC8(1%D;+JDIg*u|5L3HK9HKKRo;cUZHnen%%1?o)OUwdI6N%Sd8-6qppKRq_{4HdK;0dQXV z6uu!uO?ze#kx$W@P_cfCInUoEzJdw<+*iJiRez?9 zbugd1`Q}Y+Q-O{Rk}eSZq@9qHGb%xtT}k{(Cl~0n-JnHcz9%(rUV)b{*L-PRSd;kA z&Uah9`Go6_<}Rk|z`6?Wz4Iui4n)5@PxlJOOVhbpSI}BjUla*}sO-YNy%%uFNRN$J z$w)v~&*pnWYy{02jjGh_Wi8=>BKv4j2B$vIEn-*`gY0s$B6c|VjZt~#0RtNHv+aIa zm1UL#=y@Q-cA6!v`qN@bv`%HO>#wQ^`E}_A5b9LRMldsxxQb0+6DUgJZ}2jR?Vqgg zTyXK(wP*`rPDla<+5x>~!*>&U@6Wdr*<#u&u^WaQn(i`t(Do)j0D=}l>SXa@$J6*K zB3h|Td_bJ9irOT=bdw?*@j?kxF+?b9HU%>MKGSrVJa{MN{%3O_hG^S#eTk^b_Vc%X z;S)umWwS|J<^ETQlB?y;&~tF&GjF2@IX&brB3bDt3f^>bUOC)Gyn*aj5BtTv1)R&W zz?Y-!Kdx7Ee%r_o@%3_l`kJ?*yhDP>>~3tu6gO0V5z`VG!CUSgOSTtsK2z_u-2#oE zXUXbxU&KVo0pSs-`YkC36*ig?_b2lcSOgAvt^rh)3kMJ zswu9KZ7P2#Ul>+ghP|RlZ4ZNb^lKK6mdz;uovFN{6}7r)jHZ!K3^8dyy-!4t^`SQ+ zj*n(LW$eD)iw9eXlCa3W$30r*L#ZU537K+; z7l*y@BPG128OOxNo7_O!orFwV3d~y@r_`z36P9V6{H?zXgWb?on79D|&7WgO$^GZ| z7df?a6UD2O89yY85v;g3l^ouMp-#I!50hqJ)pZD;a4vk{!=)$Gt?kGgpomd2HmWu) znp^Ov74A+pc#2SSS+GPXw>b<-|lq?IGuBdP(1u` zzdNJ3a6R8QcUk7nQfdlqRH*?`yw2L>Zf0(fPcxOGaz!HFI6JT;22|S&G9_kl>CPry z4eC`}o>!rKj0MaFwkmjihk#R|)kvVd7rFUaeC=TuPr@BrMf7$1P^O)xlU zEwDWOTmG}}&f2uKK4}J^`dZUf|1Tf?AT6w!#R2oU=F)Px>OGN}K=~Am_B_&L3H5^! z-I??$W@y=WhD9K0DL8NLfM4JGLW$(|=3a zcwEJ3Wc4dMTndI3&3L}nr|WJil;iy86h}Dfa5|R@Xq}K>a^1)Ow7Fqb$+wd+#Q*&IpM=0GBsv?A;$&Hy-@`%Q(j;whM9Sv{F=!#2qm6ZjZNJ5c82X~U?e1!?KIoJJagAmZ-lTPqjl(v zC;BNg)X%~Xk~Za`$*Rv>Uwsitg%L_c)qAaJbxnVKv1;9yWO@_di^A(ja}GqS& zP#rO2nB`g_xGX(#p?X&Ut>yNL^m&=1{OUgLLjHVXZv$fWa{?cVy{%GYHNh)PgdC>- zr47s$v~UwR6x6`1`JB%`4li)22UMvDvsF640|l>{wK3K0Zab!O=WQ5UDP9a+K^}hBb6}^UieK(>-)7N%*oQq}U`H@uI=gy|~POl!kEfS$W{7 zJ!*9|VqD2_WZug2aSgq6AM451Q8cTZ=PE--nM3Qhiz=@`6ArXEm9fys`}4gZpUt!p z`;GIq3qcwp?Sl}BsgxC=<>n`a*xzNurO^g1vyy4Ht&E1`OfTo(tCE6+}t^|$-~i~Bz9V{H{QNZ1ivM$^mw z&F_rB-Lp<2U)KbxP*WZ8S;y*COIjg|X{t_Z*9af9&geaJxYF#5%F1g>8~Z^v4A(`D z+OQMdz%u9vCkW?Ty4WTw-~E)hj6*|1-a3D?V6UnBu2hbbK>v4f1!yW>j+e2+ z`n;Uj!S`|sU-Da8*XfjPdq}OB4V-&1VCyAYZ~3ugqx;e(#}l@M^8Jo$W7!}**Lsoh z;w(+Q){t;~-a2UoO7oeAAUcBzuTQtY!*Tk>J@x`}tY6KKNqswnOmgL&e3CRq;bJ4h zIY(NqeK;aRZz7u85ZcVGxJTLgTuUTl#6$Yl#3X2Nme*j_exY}KBz@{lX>Z|;Gt}y` zSsih_Jc(38wOs@{j8U6~()td@nI;8PAE)_wO*TuRH3HAJzU{YO_>C@%m+~^XK&v-Nqx@(C;UyRYJd|+kUA( z2wuU=$iqB`=*5kR$?To1Jfwmxi=3OLEz(&gy{BShtAs1O=+;;OiJ~`Hf>1%PR>Wg# zk;wB?dm}cTwSmJFg9DI7Y1kDvLh{|}J+mCp+=|D-KTjvKLkEm@2erA?+o!g+IVpco zd1gLS=}_s5B*7$05>{2JsMRs)DBJ@TAk{%daR0gwTUKXGCv#3nnT}|S$OEqV9+dDC zSnCfz;s#qg>ZykB8J%%>L6fSb5#GFYIu_qYswY!tX+$Q8&|F6$7RNiwX^)EW+3NzC!CY)wIb$!U0I^9|tz&=AK&Wsti)b z6A`UxXG05lRL5w1HnjV_j?|5QVRa|!XT@WwvZk|G19vaXW|~FLWoFXx!Ye%&!+vm0 zEkQ`=jc@;8OMTjEOfXqIAsik#&FkFLR7>AmP8N|!3~r+_Y{IVSz&^7Z*vL&lIh0k? z@RY5&0z_hE!lZit@wI&ndm65!RPT4uY=1;k1L^N&x#QpHrr}WvmB2v)?QTIbuDc93mA4UY&dRCVq^bay+Ao*E?UzCa%Wa;Gw z*FFl?3pPBSbrYs<^1HDbm1bfwUHencrW5GnekE907n?=^$(604ujyHy$<0=EQC zJ9FKpc8rqWAy1;bmg^wd^Z|4jdG1drKz^;N5jYFY*3egx{nO&<^-X5i$Yf3ye6hFl zQAZJ{dUGCP#7YeI7+DBhVH`y7e>pM6nE`gfh^TJe_)T^)635ER^8HrFvqZWqh={}7 zYpLFu*09H8o^jjT;`Dssn$QPb>7Q3O)wnI6p9ShYq&q2>#Nq5$?a$90@n-1YIU*iO z>sp#p(+HQcj&F;WD$W@x?eR(TG=(M_M5V=qvk;iosmz0XS?mVxjtHH(pV(Qx6ecDc z=lPn1=ohoQ2HP4pQgWEA9-43-jFdD_Q9kWySfX}K6}BmT>$l$71&U0eUofmtn6nQ_Y1{n0pie< zAI&(dvtt>WEYL@|RE@E3zQ%U4U{uDmHfwfnsh9ag%U4ub#>IBZ0u#Cw#W*jVE8IjJ zL)iu=m-$keBFULqeW)onCk|xq?Yz<>KV1=tQ?L7~JYA=_QBXF#uFnyt&aKYMJQ;B9 z>U)o{>o^ z6fJ~@idKu8n{F@4dK>^N(a$AK9 zIjYSkBIbIBQ%6f(7Em3Yk7*nuQ`V3B&f?U|wyueg#!k_= z_65Z;a=U6Wu@T3HvSGFPPWucs(nHuinP`c%_e;(4x7Suq<%=G#ed@{IT0_oHj#1rT zGs#Ws9{x7&M3$~pq+@>vv-pUn%BFxZf#p(9Jr@Uld25rD{$GO4sqAhug|k0yjcdvw^pS5C zP2u@6M$?@lW7C${uj_lO?u@MLOLoNZA3m1TFdf|#Z;4EBwFr4+rF5U0mde-k0?p=O z5z5Vcxg}Fi2M0QYW!tz?2Iwh{VI5!f&Ycs8Cz{>UK}LGfe-5)gDtM6h3nO&~J6^Mo zv=O5wyn^4E{yavT!{Lhtzx&=eto`@HK2A)yxLNWJ@07c?Yy`1?=HB?_IXQYR_LTL2 zMCv^z#$KGLXh(sxqDE;Vr2%$iuX!5uVirNatjLK>SsawHMU3ws9+*sO-sjs)DSmpv zGZXK1(>f!(+XW}W)>YrGC};DPOC{AKQkY&@mTlbovBI+S2O73xrXNFdRRlU{sFH&% zb_Z0_BvE!{$(-m3?>HH+wcLaia@y+WRsFeH6?MvYK@;|JEKsAnM9;5a-EOQQbRsYL zy=#-)eH6{wue?(#!mA+@G)oSnQ1)LzLU3Wi0j%$swwG_CVTCGca@w?ZT|;A||Z zfEmWC+etfnMomWp_1u`l-}yr2RDc{7qAYs`ba7jOgLc0zPY_<_HIp;^>G7%er{(WH zJpuVGtaU4)!P@STUPQI1vv-lByXBXaWE1K8@R$Jo@Q0qgrQG;Eo;{Y)(#&kfDZGXJ zGx5&QlOop&H&K_^%rw3T(tLdyFJ!!VqofJP7~*^~)SnB%g8$K0_rBM8ImR%h=o#wq zF7*kznvI$AX2i9F_WKxjn*%}R_coF8^)IS&rf6tOgHfxYdNhWD*?WqjFXgy+Ql_6U zNIPx7CyO<0r>q)Ym^Hh5e!6j`gr{YV7BrDxZV~@Ri3g+DBO~RLH#_C`HkvS72+_LD z>AL;8m*$T3D`=@$sIW~X;do?Iw89(whP{W@~YYB7xxJ6gGK?tjR zIFLt(^4WS;_U1b`$u(GA{a)WFGLiId-achd{!X!qKnu-NX)0@QSzTFiN`8;!iCg~X zw|DW)C7Sm8XyB;!sHCVDo~%Lc&{g8HK$fNFin$I2`66-_ zT)R|T8LWS@Df0QseLTuoE|!jpyIqhzRy+Cne$c2et^R>iNxr(t7Z9y{+^kcJTRpAz zQHF>#?BqcS2Agnn`13?%OKO^$VhL^!q14meSQ6(9i-Pqi^**ft&UmLBYj6wW5vl%X zzKib|YaZ#{2>wYCaGU^y+N3SfK$BtB0Td$5NsT06e*bql{ny1lU!Th2mbgwp{sRll zKt6p~O=@#6)U6&kdV5Z2ZtzlZ_&*}T?}BArUK;cjf4C~tJWk~3n5N71zBgDXIT0PQ znEsC0r`-~dO-#VzP~P5dOpi>Nx2}ZUp?mJDp?l5wjxX~DBc+%_Z!=w_kQBGW(&L$e z9jWsTHhd(?YHDt|Jt2$hhQt z>yxcB0zPKcea>`gX_fe5e4lNKTU;$TPBsNsvptsS^Jlc{5v17VZ9&adEK z=h}}osp2Icm)m}r(6Qj^B$0QLja`%<(A3Ugq0bhgu@n0W$$K&8)BZf4Zmt}{N%8ufkn9#E^*0Us9H+F-oO(yL_`LE;kyP}?oQEjLO9aXdYHwZIS>lizL84T zG4?#q#bdLGU<1!h2WFKAzh(CtR3dk@4?s&jQ6dhXrq5>9dD6B7S#zmAPrQ3Sxj2T5z!zslFX)ur z$+4=OFFX;kdo$=%rRtB%d86ISZ>WgM^jyT@!;Qttch#OL`VY!iB(AnqfAu&R2(^lf zhwEA8JS>zRk?y3w6T0_Ni%8I)n-FyFyR#bRAIP{WyDnb`JcrwhjH zurdZ_XawxGo|}%sZZf;pFmKsjF{Qpvq?e#!XYkIslK1z$FZmoDI=cD=b^%>W{hj>P z9tn)&Pr^qEKh^rkzkW4MVmXY)lXIWYA9B2ANSl6)?RD$B+BqZlzT+JkG?L_}yBVsj*!v>IJXBvh&c^{cv=>RsC|^g}ox^fEUV^_^gKISIghlMr|U38@8Hy3P)~0cAv)UVVK4)0#LfmP!V4;qCwDod5l;Uou&ml6|i)-XD+A*yvuS zwxs^*#HCY}Hb(#7l6xaEcl+;v|6JR_xYLc*@KeZEaDlgO8vl+aUdrw!GYMe@oguh$ z3{I+1h!`9hBuwFd;@kV)Sb2T@bMnG-{n3DnbBXF3Ks>5S%sF0(;3%~N8eDr2w$#{; zTU2GG`0e`FNewMJq@swZhgS86l&yj9!}`|Ir~)-5rPRi?2RGUo)mi$|Z_~1UZ0+)T z_*ee>U%Zfeoyx#c#`VVfjR&8!v2;NB3)liaR0SqX&wuMlbF6P|+J=r%FTY}sz@~2407zWA@2qtQ6JzXi ziA;ne4H6F>3o;Q) zUg1pq`+xMH4SU4KvI1}MBxXnbXs6Oy0VVAXUYG`R1ZB@;k2)EVDwSaZw>|sba@0tn zR8a-dF@tO82%E!AX=;2&PrTlGbxzh%i4?0zn)pH%GZRLoQ~@}U|B!dA8Tu^nsC8=- z71+kbjeAh@E1Y44U_KfX!E7(ybQ&iY_*NwCvinNoZr;1(R#2`A_feX`wWP~k9PyDG zui;6&kr2ubeZTj-bzgd?q~>~m2OF(sH=gJM_Q@ig3u4Anuj)55m+~y7DV1X2&{s$C zGb+Zuqa)YS1+GgSskac~(uNW|T(jR3^EN@e$j9av7RkVjq|D3Qqv` z8CO+&2wKCSeJ_MrAXkN#v4d6q3Nd^4@Bt+~V0$0m!$xZjk%aOaRMBK*jS>bvxWZ5P z3-9A&S*HM(jbMAC0{A9OxkwH)TWj&S2m#Cyuw^$mE|Mln*alMa?&u2AoMtKaPezl5 zx`^xql|%@qyu zeM(qXBu9Z=K{v`X{3T0s&=`@@6>j^>={GR}UUVzPD~#KfDp<4;Idqm7x!-#n<1t}T z;9eUYu|@pGY4v(c3Ad~wC#%oW=^(>R_N&xU;2*qoIM~;&2WGspJI>JRy2GGdV*2Fr zaQR=ozF%T$yZH^z{hsCSDi-~g*4Gn9sKKxd4^G5UtCibrI#dI>*U=WjSYP2zJ-VxX zBAy+TUA_@vcNot|4qcmuec_{Z(<{0DmC^t%l*Im=d%x$RKxCC%SuQUoFBg+p3%hvc zUu?@3Ud-(fDbUTcZ(pJMKeGTFqkyYdwo!3*oOyD^JtEOyWfIt0PN|jzV!C>C{D)X_ zEzJx-kUKTSG?wuSz1qm$-{33#jJ5;tW}iG4kL34~aX9^vgY0>a+symd$Guc6Zr}7P zE8E1Z-@~q>0ikNFsjOXu`~{}7`3{e-xmC=H`C8u$anh4-7QVybDH$2qL)hlHn$ou6 z$kH&xWBXAg_0fEaPpx-{YQ@W~G5Y!WC&o313T7ZTyHw+27~`HE3>x`}VeH6UX*kFUZgZ<$QSl5Z zj$iV<*XrIsBNR_yeK{;oaq+x> zpkGRpmc?t{?E5Bd_4Tb*q_@+{x`?NxY$oOQAl23>9#*tLGu~4`z~P0xsA($2Lam1d z#exe?=Qrgo$Gy!2k23rA8bqj_fw`edCv&@>G2sEtx#fwKFkO7OYrK+7X`(OpywgW{`SHpnt5;}QN`?v$7 zf=VvsWEGP=;=Us($Jm#$;qi~%QDYc&s|5|SW`5^>a%+CeW(-_2G<;U?M2N{cf4;Qt5!w(iR`Ps&CWA;;ZHA)vI;gz<93T`t z9Ms3`O-!;+ItO3H&Vhe;gYVqJxp7G>dJLrey zjPgnXe2~gBi?&GZRS9z!^_>X>3{z=w8nu(g(B&81V50P?t97wQtc!*jqJUM8z49@{ zu8X=#x)0N2TOU6~lH9bD z>fPh79nUHppTj=JYM88F`4qaU7+cz^|2Dj=b?ga&0w_G#@&D82l9Vj@5UP6p7%TQA*cUjjRkI=`|fk_ zP?Fl9twET@$&k z-=m^j^)~32-jGG^y`)$6%?FY+A2Cut^mRM~^wc=Btl$yvEwivi#yERdb>00~mIwVe z^_4EhsJL6tTjGOTt6ED9uk}>|bG8*j3&4U)%3Fz5kfO$9row)nqtDx(7yUK5fIaf~ zu1EwVT&}l{v!=zUk?EhORXc}I=w#ofT`x>bgn>G}FSoFbO~k8vN}IT-ryD?)9#>X8 zl97E%p~U4AXQ1uxU`28#ELVjuS#H(|g_WWAdwJ{RqG8#nRMMc)Exgl&$vRc*{JZt% z;x=to9Sg)ux%KpY+?tso{)mW^2Aauoy7Nc-n84XkP@XI0t{5nP)xlcW5awFJ?M9WO zFbOzEI?g)0b;i+KWGFye0fxoQW^BKi11$ZOn-@p!ND}oXn-YFT9_&^#(9=c5$A^mJ zJ7L&+Dyq@hdOhMKPehSn9lJU5ohRC6hj|?pLwPy8_X}Y728F77d@B5P=d>BDf%o3v zT;wJn6Yf=F|8JUpF|{#~qnu7ZUS4f60_O0xp7j_#@d5Kxpwm0E7e;Lp#{gXmWQe| z&nD(_v?>?AAth6 zhfPbJNf+9EV?U|mk4eyBVow})frY0;{7X8{xC;ykS0YEie~kdZBl1yHfH)3bGZE?5 zU^ZU>EoJ;~SfEcVSSVYk93+tBXx};BO*uxpeNgjss`mx@{WmFTKFO@Yq)SHxPc(EE zG?ZVm%{PtGIwafz^(4$UjztW=WH3{(Kb1@m(Yh<>@6uDHIIB;lDzEvn#&d2cKo{G*i3b&sb+$^LM>b( z7F>ThA`E@>yxAma=7|OzsC3Z7Iyi938WbFBiJ<9#PG52ec$#NRDNzJIGExrQyKwtdn z(9SHM{=jRmPghZ>I zVwkvJTqR8Z_OuVAu@UEKEj}yXneVZLm_~-m9LH8;TY}K8!UIh=Uh;9t*Tc#yo_V~m zIRMi?x3+)U8gUIkx?ZGJBh_v7tnq3s0g$4!?R3K`>*aCh4Z^-slQa2x<4D?^;8eq9 zAa`1rDEA!YM`uQRB{m5Kq=l3q>&9(vKT~h8e&J%i#D+$d-jzT9Zy(_2yvti-6SXO` z_c+WT0uua4C50lP0j3~UntXlkM%8s7<=uujWJ)*XYq6`&0&NUWBwK+)*}_dSYsKft ze2^*LA%u=p?pf5D4wy)W1U?~wU4qM%HDH{{DX>m?4*J~{^%${kVnYs)>hw47n*RAa zW_yZMt1K7BmN9WBA{qRL6?pF~K@(nM4LdVGp;V@<2$3i?3Sy&>^C%cGC<0I=JWRB1 zhp{6XIMoH7*D@1}?`IKhNiXQBQ?4vVH*67CX}fP zvY0Q6Z+i20PFg_b?iwqRJq}datnP}Zdva~FeT-w@K?@!0;0w#BmYG$omI!V44A+&V z?ntk)B%ys=m|zM+Xu*9XM{bGXMoSa1rotSw-rtx6N9RFFxM<~IOSv=qCQ}e?b9w6 zhX0CUm0!xR#Nb^bqQJL#%BUQV0fuyCH0*Js+`9YENBrZ@8gc$S(Ze73o>MEaN(Qgr zL?+RfurHmc#P2VksIGjj5E(NOp$It`ElZZkO{UTUe*ns!i9jA_M;22Bw~LBfTl8#( z$1^|~>hSdtfqRNwE$aA_L|1F>2e^Qc^!NpJbC{RC+d{9!JGNC*aJT+mVU_re)3bdM zg-!gTY-shjl1H<$vN4Kxf#Q3iCDDL{v}bmX$m;zt2z#CzV^PC)XanYz7(z?^Wf<4- zAPR^)bYlP1qk$6g;plYPaRAJ^Kn@q@R8;1-?KHbO8`qIQzad}31?RS*-QM)`~qekg8B!dn*}XLPJuRr%!1 z^vTs|wVesTI^U6&PFz)w(%Ps*1uDs2gzrb(|1H_t!@6+KuP@`Vvfy9%oHzI*6>Buc z&~hJnhYEeLxSEc#tCY7z&`j3VXPfd7xh!D|FD!euQa@Z;(%t$26cPfpMT)8+Y%R3T zl3_~_D^G~7R zR((!1z`Ln><=OQJf$Xhj>eGXSSgh)|KiNdx_&PLqIqQGhZV|hKB&_wLC0I7JL-XN9 zKd4Vd>#X3IEejutY4l5-yON>UhDWY0dnNvRK1?g|t#=9c8!;xagqB0-IQ}7E31LAw zY8k=+T3Q?{kHwewmMDQyI?TA5U(KhcFmcq1qxmL>lgSO!V)49ZwP<9|d{7_kH~0jt zIh(n32P5HSaP&QtnpJZ%VMj*!!M?>R*|F*h!^8~H-6F5N%`N9aj@7fn*|Bnxlt{g= z)_K3Gq~C6y?8u>f*_MdG%r{@k$;Nt1StH)umGe`n8hbdNDyvX5{r;*#rFY^Hzv3+P zcM~VO75#=7Yto~*46nZ7zX|576ZX%wBt#F1p$6aXoRM?FvU9X|o77S6&|&(ghDoN{bEM@H#a z|K;0efCvWd!WSk8W%kqc*U}AdUoEfy56`yW@r$L@l_2c#DFP~DN^zqlLv}@EH5TNN ziy2c4yL0Q)7l)Rer3^Q}Xrf49;Nid?43)>&+m4jum!{dGb(m?%jO;AFaASR-PF!%s zRT%sERSf&*^}yf(1b@yu z*t+s$s|J{OO%d7xY(=rFoWm&^^}JCOq6$-JF=*^qAgl-5ZRQZs0dLF zW`x1&*MVevb*eDOQQk;7Gf=1r6fk9az1THb$aiXb952*If|va&?$*l;c(x|zmLaxl z=N{xQ$)~@ZvXOu&Eemu*gq@S*&!l%0Wgm`lRG82T7FRn_|AQyAt_unZo*(#LRMgZ= z5}`&*3ko*!@(XuQ&SWE0)b~F!r)rg14FmIkpV>`4I#(Tnhn&9o8K{sV=sHnq$z$GK zc{m>`FE1Z$Lu7ahCqPMLe@KYd8)i_aqlVpgv1;AqkKIP&Y18?$=O(cH=KOn~sI_D- z|3VlrRgGK~M?(G=i8jc*&;?ICft|sRJ{_MuYdOkwh9pak?)NrK7vOsOTvzex2+R<=lK;TcuK~X3t}E*1(V^3+x&Go2$tM zc!0N8_XG5P&ifFyKlO|82D@^^S&nw^|C_j6`rD2DM3um>bNi~LI6p&YSJ!542oKG8u}M%)t$wlbhp7IE ziE8f?3@~L*Pm;4{qr~uRP16pfgNi3eSoTM_>lYVu1bkuqR>7e+Gv9B6Y0u5gZ{`5$ z#Gr~LH{kP6oi?QJM{VmyHka4hzUsMVHuMSz>VD*`aY}1m+L5h)Ase@F6AaP?T=VV@ zW2&R=8OZ3DWQ4xx03_ipE2}FUhU6%eEB#0qFQ0?teN2AktrwFU>wV#URVT}Vgzfl# zF~D30Cexs6Aiw|No~wQnS3z9uC(Wn2pKpSk;weM_Zr< zA&=d~yJDyPCHJ89XPC)!`l70|QHUEb1TNKOP&OqcC9cxD8sIhSqPme_PvKNtTCc}F z#zqT|aITR7&4rsbxAXEu8)1l%NU-j?CBqw%ZnB`RceXL@=uyjJP$p zXD>j2JdKp$xdafTpHf$qJkb+)ZF3JDatie7xbd@64AxLl++!m_j-XjB)z(7xFlgjH zS@rHNbz##zh`e;7`G3${?Hqubb6)pwt{7(`2nE&z$Ao)D)Aj;Ms;g-8-yghIQ*#j~ zkgU;f^X9BW4~`o8PCK$FzjguUKKbLND+iIiYB=97;(Pj(9G5kO?$*1|(9rN;^4ing zu7fG(NC9KoHsj(x-}Alc>FL(NyFS|iz~FWYRh)na{ryDsM;9YdS9poMPs#lE8AD7v zn-H$k;QplGMvxMvZcaNyBvn`?2AXbA@pv}wq`~$y1I#{gG>WP+#^>FZJ8C}VIiJ*P zwysiCUS&Hz(DCn8tZ!#%|df7vQFrNonN zxnH)*ik;0qIq7O_{9oY?zvsljOiVw9>InS9{Pgr6^=-gRQedL6lJ0yolS1|R@w)F` z(#$C^PPQJHJzRf0TtrH$Bb3W&Sbyp{bCSB$NM`7z+#EhT7es5IsmVOC8RBGI97#wP zV0dvnnwXV9AGgC%F}1r@gQ9poAbVK7UdWSm8|QgiF|XIKe*TQRuOW-Orp(zi&!o@Z z(8!tSeyhGd#w2@GtD$T|028bmwdWloVka50C(>Hk5CFiF^eqFFL(F`K*h(9Ox$jNK zJ)&?gh41erl#kguI_#RfGW1l;Qkf{^rIw6Y%qR`2!V(dZ@0+i*h9!m%T2=zh#r^;V zH+~|0O)ctH-FGMR)oOIll@X->jT3#l#jaoN0?Y>u4-X&tkg*4h+s-I2A4x88-!a}( z3zL$Vwl1t{rE;m61|d9F%;NQ#aF!46`X2tI1T0tm$rcYTs&XNM_H0#}M#+9>vCjv? zL;sMA3-y7_fLzq8GPu3OYkyeN_i!#KJ%%s!n^{-fN~-5pjH)8{ZchN;jMv_Xu01bZ zditRsE)78vP}dmnU^`Tr52)ed<#kqIp7Op1&p`7e6P8p8^lNYjn z(lPgiI6tOoDU`-A6iWZpS2}BG_w3_eK8!b*Q~Zis1Zo|e?P_qEYNB${ROD8Qj$BfM zfjsf!8LHA&+7wtQQ#N3?w}h+OztoByVSfv`QbhsfEohHB?ABW$l+PbXUsu5(hmioF znz=Q_^bbOdt7_C5cK>X`=Rn$USpt}yU%9i;mPBuS+AD;5|Kg$ZHm=xV(`Y@0*O@_- zG_$JVY}3BNc9KbOhax{p#&o*c)p@y_o6f%BtejhPe@Jx!yYA&;sNre4GMvqyO|#IY z0J*ubQM;NRs#aE^r+V*hBqd+(SWwf08wC?X$6J9KZNwr+(CU0Kz5z!TSqCL&e)28v zte49f$|r_ED9Rp+GB8o%9*aywv?I&wt)C;y!eR**@MUJ~yUJf($)N<~tXN*b8?QZ7 z^q8>}u~;Uzt+=y)2LCf^##NpL#EulZVwqUaby$7b(u+OMs;3E9o~h@1w!a+g!M^qf zKp!{2b<%|Cm`P5E9zV$ftYj^B#N7zNwHye9i2l)9M0l~s_NKkBCp=5xj zRrz_*B3rxIIw{Mh;y}8(eW$KNWMaaTHknViq$@@9MtSKlegoc`EcwMn6oh8)%gSf8 zj4#Cn|6(?A&R^ONPbtB(+u&celJv~H=nm)~F<6jfT20LfAV%C!@Q?R5vs2s>fllrs zt>Pb4$s(TR$)#3~YeTtl_)uVh(KELk7>u{o__YDBRHwC#goNZ}e{t49Zgy(*0m3sp zIWI47=5(2_HKu9y#)i#}!ozPI164XgUxg^DiD673_AqCDQ$6(u%08X$yT-F`K3K-y zT2CbP+DBjv4GAU(Cygh3)&T7Id{hQ<$vywK0Q_1i1vHfadz-JoO^IT2zoc zi;_lJz8bowtprX&czMbAFE7<7=hkbukQ$U~H2fVwziRBp8cV2;0b1RIOKrNi++Q?M z?&abIV|1LFOAXKbHiL*SZ@2g_D&PTB7=k_+g-&AST7mU-P~=8ZN5}&OhHaIEYI1qtwy)+C3!>M?Hw#k{%s;NSSAJB)K(wHZhQA_7AIlODO7l@_ zXTE>JOrHj@fQ>Zh)^N(6cz0m$1Q`iQ0)6Y4UKE@E{{DU(*odA-v~np}Qa>>~N$2P4u-XOOG*^oqXTFY}Nyx^7qfk6i-)#GV z53)`Wb`Oh`m~Xd67xsEj)gUI+@iA{$tiV7BXhFQM8M}2%c>5KuE!2lY!x0_zAILiWjN399o}0NVK-7yF;xXrc zV1{mcQrpxyD=O6A%)gomOzVI|llDb$Rg(>AziQlu6;K_u$M?s!=Fr;XI{z1KZo3!bnX>s(~DT&ai?*g^#iQ==eze7 zq7~O*LSqB{d#=PW56?p~=(d}{&3PtN>`x}p$kJEB1~ zb$)$QYA!n>Tb&)=^VdEfiXCPCkVoyk(qjNh(;`!;`8NYk`_8nSJTWHnRXKXQBus*@ zZ^g*eW9{YHHy!;_Yr9^O8Sb^;l~_(sQ_>wP1cJD)hJpf*W>1UL1a35O&_8Ct9#W$f zPR?5e$sl;i{&Ad60`mho2U>5m@}6tYXYPgf#EOrlHx_OZGXV=wF8Bc#M@Z;I%}Mlh z`{%Ds_1I*3+iUqnc7THSN_(_=wgUi0@)C29sRew*Gpn}52FmhwIN=q96E1&FDUiYlic(Wtk#d&xCp z9I8_9moP4*yHVm9L#Km6s`l_b6*SKAx^uF!gR_ygF~HLiWHLP z0ZD$rK5|l>_m-EvGc5cSP5}D@D==Jmp|BOejKI_mg&_sL9y|g|o;D#elD-_rV363J znAJ3HL{b;yuvGlnoY6hBV5%M95GsEsqRv-)<)TNFSt~bAehWnec)+YkoNxJ(~PBV~>?Nts@xja{=3z051awwD~}& zie1Ld;$iwVBYI`leZH$-f+9pPTAowEub`r5J$y6lEG&dkKxoD7XW*UIK?~8n1TNpB zu3xwjuK+P)M)^S1S|Y>C~5M#cx8K<)IX{i4A5 ziyopdrKLGuAiz@h$jeo)CnwEws#bDzfrgi7fXMOc@GHS%JYYK08v}Eoek_4q9Ey15 zBdAAtwc!0rVO7y@`(7OQ0{gd@Pe@vd{lpuoWJL=L3QmWMd>C)pMQ79j0m*b(d&u=- zH3I|AK}iz>Wi^|QdxQhuL4Xwm>^-4;pXf;e|JNv0Wo1VI2()G=W{I31E*MAue~i6# zSd?41H@rnqKqW*_q)QqEq#Nn(4h4qJp@+5zX$BA}rG`d22Sn+V?hxtjj&HHgj`N-E zcEA74<#i1{v!1o?_{F_`gz+n?Uk0ojN- za)7r!MpS&0glc`Vx4d9Vvs1GB9kiZps$Nthelx@*hpENTtMAp+vgY;$NqiCw-Wmc6;hgZLmX*x~+w#v=AQ*dhr z#SL3q+xv#@-$A-GF+5+CGNOd;^-2kEH{yK_z~(N*8guV_$5vgi*|IF&v=8phIQa3o zLBRB@VgSri!nNx`c3tTNjDnx2y1zE9%LR9}fhA7L(WQKC#j31qv<&em)XOTf;LWXz zZ98*$Cy&VjrS8rYVJ@4hPql;Lr{hFx;}%nA2S4mA?A9EDgrClJ2tSvZf>|4QJI_5Y zQ!psVl)t26% zF{*yiK#ooGloYex8PfXl@ukKWRs+H#<@fL3>+0&V&wYU&vv&&9pY_G?PGdB)8q5yQ zY}f@Tt#%~JbtN%=k1M$CHPrkRH-)g>{tMxbU&!<;?7@BIe$T8qURIQwV&oca1qHlb zBU}DFY@WXW+5Tu3PEnJ+c4Runl>xkdTgChRqX>7rsd)3`V~|30cO={?0A5>4t1)f6+Yto;&cUWDO}}C--?d z8#>nPS>`ks%W=u&K}UELa+LcjAj%(w&|T&%-E`(DZd@aHFFX22r-DyOUmdf%)0u_d zCfwDMc_V%J2D#d;g z$9@+e7|bjRUslZ%6jrY=$WiyImrfa!T0~}i(1Pz6dajol%cze87u13~HDkX@xWSdY zSJXgl{edQVVGqD>FX=G2qQDhsrW0R4Y_?%pih+2v!|AWOm8d3>dyl8VwJ2A!P$)l@ zw0(b?3oNdVA_UA1>qegAhh^KPtnpNyy2O19;3BXmWJ4)M!&G-zRmDXM&&e;F@6Sue z36j2tL7fNmJq5l}RdauX^)WZJ(FUp-b{XA$cos+o2t3+mmQ0M{B(_zc8=)}@UmZo=W~9dI8HYuBlGS}&{jvfS=sPYRz)4sSBF z)@60b-e)&K^8NVw1*n?drKGS@rrz3kiY;i$6iUpyGaqL}z=@;;tV>~{&&e`?$G0^c zU(XhUYo9~r8VJ2MoKjYYgD!UB>Kk_jNtHxJMeD;z&gKO3Z4aWu3cK%X8LISEo7fK& zkKrn=W*)dY>tfyO@14K6j**&O@GPO@hm*&3@9qrAT_N{!rMa}A1?HvXbKY&_;_mTU zZagXOv~-IBL?EP&J8^k9dFiAv+_m;JE0xZtURdSfcEdgN8!xvDhG$FgQT*4AB#Gqf zue-8r-)k%hw5Jtve0pXJ{akLAc8-s8=?OU#n>}B$_H~=By#9#%y^)K{`YAg0wh5(= zHrB|H&6D5!ySu&XQLzS%s&6=K5_-~sC^VCH6;?dxK+K`Q{^CV=)(07CjbQsdKK-$~ z*`G69Yz7^9;A(vV|NP}gpwY|igvXa4NMY05!z39?ZUbFaH}`d|7b5lGC6C*)WBtv} z{``~wb%rg__kAf&sWKiaHjOuQn{ar9OiDp&?QpJ)8<2>uq4YwYhm`LAq`UljY)2d?Jr7Ol~oOK}s8I z-F9Q5io>vs4ztLeE?`S1(2d(}x=zsN8mLPpBqRd1jCox2MQW#=J7Sr)i`*$e-Su=K z;#Ocqhy6q)n-Ae^TmfjDhQC@upeCuq=XalQ3{O0=PQlHVSg<{mA& zZOILP^rk%PbFnxl>0!B;m-1rJ_sXuU)IMWhdnf7e80{#-`f*vmaNM?B+>uB@dwcsH zHudfdM&RXf@FgR6y)%ix+{K0s_`54xv>{ zq=TUosj=~#u9b8dXj~z~h7bV}|D@0ci#&w)t9ffL4W(Q2IqU&GsEe ztiY^D;-K&rO=3SoaP#A3KWtG4%xDbJ@t?=C-y*7I$OthXvU(b$rukOPn3TV8!Af^xUb2Sv`*zM~^(Sv87 z_u^?kp<<^@6(4;-=DAkX%p*j{#B@)iMN-)-u!ha-v4cNzP0d;YJZ@qB)RR=XOijVw zK+V3WAahyi%=3`j&Uf<#S)J3l{Sb>@eIftG!O4f^8k=cC(>6|Mqlh&uEn#ZS>u9_{ zy5hX}Y2jtjY5ZkS-bKJr?bo`8$*|N7!llvm@qeA01Xj(Tf+AwA=@W(Hb$x1-Dy9#) z-i9P@>C&_<+afRibA?3i`X3w|cy1zx6UlQt>JQ2aoD}To>T+^U4mb4N4)OGaCwfOm9_m~?UkyFXzx?VXh(A{5No-{& zF>F-mDPsQ)=zE<7p!srgM^WCui(PZ!V|})Ba)uZuN_MqMLC5>|oQcy)<9`h)9x10h z!H7ag9|5G3QTK5qb$%W*Pl*qit7{}>-Gp9>N^eQdb9}{B4K5`JOJ9< zebTFaZ!e6Hqq&wL*rlsq{0F#I*9UZe%`29GK{%d|0|&;!!s78EG0Vl}+T_i5c!Oh~ zx7Tf+yDcN#Br+Hn$ms%u=_s?vKWIl-OPmi_v!ZKPx#tIxph@5sF*2lP1@1i}M@kpg zeCmj$o?dO{$6Az+lHqE5H0qnYSv4K+)}6?^n-CGoyPF)HWIMIWlE6s)=+O#(AS%-* z+wdlD8-4!C$!AGc*|3gNDCVtT;=3tNoH|}`-i*PFt{>$xA2sYqoRTcNu+!viT*%Cw zk=}q`mgs*xd`A$>RFEZhT37JoZN(7V&Ej9R!bOY`_iEyrV#;Q|J^0~H^%6I1kSmrOH>@=a zB5=qSowK_(N38IfWLZnu>!X9G)5{1I<1ozz!BjAJXzTJ1F4j)DzDtK=d z)9<-A{+eKPxLGCkwVBJ>!XClJ#bmF?=6(A_`_VAo2S!CXt6M@&;`A-%no9gmukM=u zYP@ZGf{2($&9j30{@Qmxl_=ULhL(Amw7s(aPohBi7SNEef}fIm%oO!uywFJ4snNU& zAJoHNs zaOU z^RIR{9X86`UTi>3CuehkU%zpx@w}A**9u+0r=Y*Nw14^Jj{EKYGV=QyB5;9uEph5* z^-4}IJA0T8i$?6a?R4EaO`JX@?^j-4-e!u^!_7;uay72&d=4`rGUuGLs+E9rg*X~G zHR?|R95$4D%0m1u9^SOi>3T3e<3rqxXNnvwEG(p)R&YnYfUl0H>lH-%cz5sKz$Ue^ zQKifHJoWT7>{bki+FhE$L*K$`%^+=qF8x1e0aP+_EnL{wPhL*v*-5_0)=>%|N#f6V zHszUoK#Hd%o##G^hY>&6-&0c})^gFZb}}tFrZbEvT{^30z7YNej0QKlb}2c~UWlg^ zCSr;?qVsZiIk2;?pYgxKN1&wt9U?A(>U^Ep-P_v(w|FN|xwbRwmcDyp+2?oddPuDf zZ~L;dBBO^4F95Q^-NI2XJdd`XciiyYU!k$Ah^?7W0DnMU_b~&&U=b3ap)QY#~gzCrkfHxP=l?@!m4h&~mza zw%WDYQU9xa%3xxE>wFKa&79Ni^`4WRlHG`6`!)s!;r*$&fe#8(aX<8ree1Vp)fh94 z+(~O%bf%NSR_Q-h08tyjAtU#p%;7OJN83N@FE2bz>|4S} z$Y4tpCjG?`@$nw#`@_PZnr@ydlr?P5tRZK-JMA{@EgR9*hrZTP^x(80D#qBZ#Bl$~ zuG#1Lq0nM4QoT;(`L7K|KP^WwA3}t;|Y8Ns8I1ZX(f@krAVQx0^re?P_)%uJ2r= zlwIvMNIk3rIU1n&)3F^dA9TaT(4G+~XyQX^ut#^@Y3D;Gb5wTWBV#~l8GF)kpqXZB z)KJXoJrs$VZSW@JuuiU!Fa@62w-Yjeb*`%GLi=8ul+P^F)poxRXK;oPr~uUEk(b;gWMCgiohG6~e_-dfu@BxIy%5dRk#zI=Skgl@wGj;csY=myPjin8Yo|={xB$r(vakJ`659*5t zyJvrW+zrrU1~snxeJC|3I6M)no=H>bjjzTu!aX*1lIb{`#-kt8ayf5kd%P)aFK~9NAJ?{b<+wltfo}L~hd3ksY6v-58eTjMZ?%k8PT54Rk@p4-z z=Am3HS^-FxU!$~M-ni4(*Jo2cny15|(_QiD(_?;q&!ul4ueiXj>yv|XI3Dv03oLr& zHiJ1DIyyQee9qrWyw0tn+YM2%NXuK7^YypixxnVP-f;F`PB;z7@fWWabF6yTyZ6LS z9>)&CT=VP*xo^H5>fx}zFer#nSuR)7aUK?XgY%2fctj!i(v6PuY*&B0xl#DeaMNaa!8F^VcC`oUk_*Yh$)Wj?Ck8ODO4^0`dtVOYmQ*W zg#nVoiww0!ToxPG8}<|P?jN-?uV~e74o-K8x>cFYW15%kCiUypT1NWVK%>>9mj-nL ze*ymb)uUiskcxPt;slakGcVfg<{_zDRla_uI{B~N%YOHhKB6M3T+u#yjBtuyn}(yx zaxz3MELY%Y4JO<5>Q&`9i)C!o+9_}RY<>sa1j11zLn|o#@0t$qpKfXSxxmix(}YkW zDQBSfMprO-k>&5Q{$(`L|6W#QE|h>U?N0Oj#l7-8qh8NTMZr2*aC!!+b`o!3Y;X7b zj3gUv4Ks}ti)}KWC%9VZw4Hu09*(aW_OJ*rWX^uB_ur3}Xz1IN6)w_<&ck$rbHBT4 zEpI-TO!AjM=#N$O|8|SGjn$RM;KZZ2%mrqxDH9Xpez*urysYk!(U@G9HrvEseDYu< zZ7K{Ym9Y%A>h9)JHlgE8y_aqqs)9tY=(E zwrlia|3AFS9cqWAK#4FrF)w|y_e}!(>w50`)>=dRw$o?tkn}PbcdRqGb7Jf`K57k+ zjBBkP$hd^PRgxa~CD8HPHD9#W<$bM?6Lj$qzb;dQq2alF|X%(SFA|bNODe^roMk%U)$YZc8^O zxP{zBcUhG3e{aM8OVf({25NyDe$5o<;;;NVHu#WXflDk&|8=&mj-Ln~k8>j>f}5X8 zC-+E;oPy%wf>9Te{^*{2oO_Yvnb~77OO}Gjb9Q%3k(_(w5uC zGS43!Rl0VSq?~E3M(b$|5p8qNj#LfJMl{YfT7@#rzWU|+zC@Gdm&(30pF>B*L67o7 zy1>?17KzdodvE{u6a9vAfCkMQTJDmqFhmSb+C=jaDjE2``1+lJ4hr}2Figu0OY(gH zNsG`GVFm=NV1wG&B}StB>W3)HUW3BqFj~K(!;`Ll{0|#V#+ucc&R^}Q zKl+e)O>){oX<*PST-{sQKM&@?-eeqmJniy?JNx-KwOYGf(I6jErcC!}kSFHfr9EIH zvVsi?Yj}PiC6f6WtIqHSv@X&+e-GPJ* zuY@*)QsqJLa!hBLB0jy&Vse6tOwfjUPc{HE&e!GWWlhG+L_JNxQr1+H_|Wp8%C8ck z$cL+g%FyD74zK$CRV0S7Z_<sT&Wq-{uu4`bL4tePOZxg=ILqES|t*ZnD zF`(EN7Z*1{xplBM3RLA7UD+1+f4*|MVH94e;JsCBP%cZ<_dqu$(^KRqkEfNK*R=CD zWpf7g|9$rT@0$jNKpOK%8Y_gwAJnvUpfoq~InEI9F9nXJ7cWqjeKTZZtcDA`zhKbb zCg%ABkONQyH&oNfJV@rU9qY@GJGXkw%)EYiv3XfsQsPZYv)=JTEyep_^aa|jIjHN# zgpRs8JTY;i((wmS7gSjErpm>!fF`e~$2GP3#pLgqW@TJ{@n_pURea`f+Ba_c+5AoI z<9*d3=k>u{t?4>%fKgDTpW;p-(N$EdDN1BcLg?^E{?-|9Y0X%COl$qc-VYm@q!+^k%KFRH0R?lf0E8o!tn+lkg$d8ddU~2(tGkXBTW*5j1 zYnC$;P7V z`fcMl6gw-Rf&tXN8xOJMtt%$&C7_{Oslp#bvNMUhAGRyMQTw|ygjb1lM;dnsnP$-H zHDAZ#+r=WtPg$II(3%qUwpU%({8_pZuM@Fs=7tRPv}#H#DIJ!&>Z*s^x08fyv56x+ z`xc4pYq!D43p%SH6a1g|^SK5jwJGmZpUuvAjsPr@YR^Nv6fMd&Bw)y*UwZ*o93B-l zTT*w33EHEQL(JrZb_RlER~db1IE}DQ9}GFb=6Vir8DL4Q2HG#sMq8trF-J?c5baivNwEF)IJLGrK z9!&3fXkBflVo%@}#Nb5=Z8G8ie%vUH$SHPeq_M9PL^O^Gd7q5PEO9EbxvUIau|6ZW zD?bR=nU&D%u#Bj6Wyr;05%YXA6DzF2jqBJ4Y@7FP7kBbl$Vz3$6iO`%6qa_v)$j3V z`1(5mkh7zLRvPb3Se`)**KKo*CVFsG)46MWFMR~HA4Q^X8pgkKQ2pzBptWu7V(jyb ziYGlITzPx3^(|E2 zd(=^d<@)=!rw8lTZ``J1P?f4z%ja*sSm_ z_E$48F|jxn17O`yg(oSx^i2Q+z6_|90nbz1z11GWsFkPvaky&Xy>%?BQF2n!diK0U z=(zk)S=bDr=x`pYj42YrcRnfIBxzJCd$uI@Ow^OeQ<lsYJKj=_g?yh@v#qTKwZKhw4vxvmrSXc*mBCyP4AQCh+6{Tz08P5@YAP|S zYAA+Ex&h64`&iGl4dlIm2BV%#9Nsyq=v98#`2HJuDIy;%5Rh%wov+zRF8dH%ivNaPi|c0qqad@}LG2EP5Bxp@Ed8?XTzN z@ZKL>kdBW#-CL%*h29bO32b+dWqB*B6#$18dZB4)ZtO$dpR7Z5<+W^jeW5UBW%Qr* zlt;9Wp>wTQ#lojg-xi+(KcL!*K7dz&ph)9Iag}*bGN>TrD1?OSRMI3ha?~+;Y(`;9 zvShrwogDQ*KzZfl16nU87M214J{@jMN`*X7If;GQC-%yXm2Oy?>hEZ%!VnQ{4x(WU z=^R2QxPk@yGi|o$_-|peIDeg`KQ~?=s()XIy{z49wm#aI??y*QW3}!L{z!mI;0nFT zaxT=&*9Ci+l*i%2Jd*r>=8=dvW1xE6qpoHV()46h3Q4 zQDN#RsvV-d!<6S8<-4H%C~o?`_V?mI=pqud(t-JVi5EG}1cm)j<1mnN+Apc03UUck zk}jOHWc{Z=Hqh&8nYb*zefIVT)lGO-Uhqt@)X{^~D$gkFV%PPt8AG8zCR8YEqF_K= zjJt{W{5^IT;`?>x#m`UncRlzZU+(HG90rj2YS2~#u zhZ3`{&V(YB@|xdRd67kq^;MYv*zAAhg+IbC`2y-yC(-}1O3{8hY>-b+Eaqf88^6E% zL`z>RwT?Su9W5wkW$8cK2zs;^yqK!(1rF|?P{B@s*tK_4B2|UKK?d(10I(ckleHct zDq#g*fJ`$6tHQEhGN|!O=A;Ykt;u)esvAFj%skXY#YLdr0s9-+%(OvyPr$6Jo~PZL zliH#!D%t?3;(7DI>o>6gL~Ra%w4nxGaLpF&ldn7Q5JTg45^NnEi{yW3gGI3#%xMd+ z7PYD??Ru5(8Gub@SYek$Zfb50G&>p_?v^#A2S4UXuDpElF;GxVtH$O==+PGr3=Amm zK|pz6JzNk&q>hBS9P-o7l%N)6`<;IF|6>>6M5FL}5KxUZbS=BAX{Ytm4BF8&q*v$n zdxmzNt_Uw+2JI_K3m0u`HTdBtyq9foBO*uJy>Ec+VE&0HySL8gE@ zInnrP^zqnNFK6Cjsr>96Bo#&+IeLBp%pTIkxsJU-K|zRQSk9T)NWLC7Ro4BXLZeGy zhX)Z|&PQH}{41W4K+)3xEG;8%7Z3qrFk=(&9?2hp&H;;5fQ}Hcyu3`C ze zeuIBssMKb(7y$l@iB5{(ea~~-B42a`3V3Zb6WIrjypM`I8)qZau5q>X___zkj?aGA zgh~Qmj=-z1;eWM%66Cj6_xN1%H53^D8lhMHG8=&{lQ$}`Ilf3-Gt`2BWDxsmo{UyB zBGQ)waDKvnuc5mWUNa5kn4#z{EiJuq=ivnKR0^4!AI!2=um)0nGMu`N%ic&B{M?aM zi@9bl1UPhh!8`^*^KRlcN-ix$X=#h$g2}4IWX6{X*8;Gw*aYjCo zzCrA780~YeR&yXr=ja%|h^sJ`7RCgAUaHaO zeH9;{2DCq`hD@*Bynb-y(OBqCV$@_8V$s~0Xj!$}^C?YP#lQ+oB)* zXbz2D9y_-U0J!XX&^blAO1=0-21zpX_||2X2B>+?VfgXmxl>tXq2)&k@2P#~(m9W7 zXt1@~jL>!Pv<-Jg;*Uczm^!8bJB65nt>!}soYaO5+zfz_p(}EPt|pP7A74-Z{lDkN z4uoq{e=Cmn*?7#njZan_Ek zKoNl|#tBgFS5E3v!b$o{AfrkYcuW4ZnZ1@FZEOm=_prTcj`s~{BvxuBaNC}x zXzo1Zh0alh3sB1JBwO7mqE|xveM>^1h-hTmrxD}k=)ApWLS4pyrCHrfG+dU1_A8Dm z@K+^vyIKB03%a9{IT~z<*6U~>f|u@= zzN{LTta@_MhgQ`jskZS2(D?KIcW9Qp)uO&a2<)9msvzg5hlfIKHB~|oR7%ayo3vt- z2eM1=^vH#UjYzFItW6=1cy1(opN1-(lXZW6GpgqsKd8s1JU6{({BLg5U;5~brGiKf z;EyOTlVjEWG3!E)lM>s-dq7696X(4%*9Lm^hfgdbqWYL>^x0Rg^Ivuka zJt~cL@&Fxsip0WR?eRha5P#S@eKWu^R)hTz|10=Qe1=9EW=9j&CK|SUOe#nVh}ms7 zQ|&U7jd*2<_UfPa?4rwy5WX=9R5LdXRO4ooI(ta>gp6)p8T&0G!pYv};&>5E`MA;u z#BkR`AQ;^s8?bv*f0fQ0KTwwk zy;eQxK#4es8Ut~vDOu1nL~Vq=6(t&0_D+9KrCy$rja5X^K5ue%vc`=u&Qut|EhuQf z%caeWlr3q_6nIzmtl3gdX*MEI&+#LeN^^-KAf=-Wd|>7PUfm6l*-09kHVv`C0Nifg{Thu!)8-|k#&Mj)52B7M52uwzkg+jWY zK|(k_3EliOX8Vj*HEgWXF)c)|BP;!GMOBq)TU5Uyf0y~As^0fZXlHG-DGw63ZpC)! z+$^F`nEdeYj}iiIehtVJOEH6Ff`C$y74J8gfiYO6UP;f3Y zc2Y=0;?%%^dVT49?pH9W#T}TsMm?nE9_dEV9=OKVnzCK%z-yztaV0tl2V*mgM|Dka z7;xR6p+Ru7Z;~N@Hjrn_NTk{3YBUu~cpjSz<*1;v>l$>zNa=d{~xUQVsTeG2z# zK`Y7bR$Q+DoQ9gsvgd;B(`+}bAeUCpo9 z=3++SssQFOWaL%~~@JF9io_!$3Pq zEurPMoUi1M^gLv6VP|KDgBU@R!o|xwdQl{sQ0(Vd+*nom$fZ`8>u^xZ2uK1}BBe-G zbk(@l<;#m4zKg{ZiV?6?f}uG8)`%haQblj{1R{!YY7Rd(3u!N&d!3wayQaVc9Z^*E zaeVF{CmAwC=Z^dMvOi)_@hnNKS*~LZQ~Pr*Lzn=JPOThu;@-_SVj{5~i_x_AK3yxd zJTr-`Po9AP_S;j(z-Q{ zx?MRY=a^7mrn6pblHz)1qRXT&WZu5ZVp{b8JD9ATAQ7q)TV!JHn5d9FO}^k)nca8~ z0%JmIX}Lu&WrpJ@+_dScchf@qQNejEqip$&{f_ncZ_ffpJsFRK zh`nsHENlH4^pLga#Bg3bx!QcRv4ziVF;G}WcB_VBD=?2p<)2~ZMphd>YVo5wLXHZ}&TB1oVU0LC}ivKcgT z=n%uvqvA41k>6%KE$Pn1wsWHz76 zN~>~SUKTwsE-3oP!Lp|IbTCPs0}sZPBVX{2<4V^J9E_)jrimw+33h#_t|!Zga6*nu z00Y_i623b0WYchel8$_+s5k$DKatqd#=s3{2o zkk>RsL-U$GXcIs+W3r4b zGOE5V%7S*9Hb*m`!AKNni{>1TNgpZQYLg?ah|-{%voGn(^n&n$gi+(XoZ8rxursfb ztji(A)DiWKu@H#swCa+WA?xt9E zLXFvQAHoJterDf2r3&nQc`&g zo~I>)%1OEL;`SSvokcLyFuhUG_GB&j;xxZcov!*2IE3|#1OT{Phy)YYsBQa>4jIAG zu#Gw!soAKO^ReY}V*(|Dq(@f|4Sa{?9noRXqZlLi?`R%)@57;SG(^Xsd5Sjb?y3R8 zU?$wEk9f3-rHKtES5Ecyvf(-J4Ix<%C0XU}45JT^4M9g*Jgje&oLC7)y%pAM@X1ib zx)q;&wHeBbKOQ0Hg48v?V_n@Gc~@`Bg7Z7J=ueJ0H;2ER56QT>t3uGi@-{#g&-%$= zkK8}03LNy8ez!H-pA7*8-b!-2W*)e7;x$6#bybaJJd^aO2`z8YamB8Pbx)KG>w zG6b6tTVwX)_-%FV8;}&{xb~O(r3wzgJWLLwv_09O(&#lJ zr^kL!aERC`E#vhZfeW%098mWAB~T2Yfg=Pg3BdYUuzBcfLKH03noUC$(v;tGg-TN9 z;8cgB5W2eh#At_72n-bBILB?r;~!UsFr&u z$!5J4yfZHZT()Q5vaSShTYMAQ$c*}CtscGR?1A!>bTZ-v?h{O?9!>n{h&m>tW}Y(T zj&;?1J95K{-I<{Sz2nI`__Z-bABsG+a1vCj8~>UqHzPxS7}!*DO|)~_g*Pb-nXp$9 z-JhHAPrSnaF}n1@E3ijrOC>j+UL`~k<>r@r*`75ntB(w#RC+~U-e`M>ut)C4f+lFA zgxXjDOfOgntYMN%Ujb|IkjsY0Z9_lacSxGTF-2NaJBCvif}<(C3HbKCXMvO-my)SJ zv{xqdeFtVDnf8!*90c`79kAQA7k5V4(+7Q%rMhPGW>=7kTQ`$-h@V~!+VMFn`5PlA zq|+IUCo9%p2RUw2NLfY)Qz8L|O+|6HFC0aOa(u_~W(paa=pnae3Ri*>AW>_Vv(JDV zm%*}4RjqjT_xfo7Ba1gt#qFB2Hm-8rNvq*0FS7kM;g0FMD2Lp{TzJ9`r}~BB()4H3 z=i>nR_Oz_^IX~G!kCVrzDg?s{76E7$IeMRf)sSi0gxXDufIvAc29ZR)$(@KjPEUe9 znAxETj1c|CI8W3KQVst1*7^&L{X$8e9nOvp15m?=K!W@k?NcH4?GkyJ@pKtHeyRP< z>3YD3$aaD4Palb6+ivr_|DEZf&1l^jl(DVzgeeqglDDfW3m`8qZaf_jjQ7(|iPG`8 zbI>P9xE`D08-oeT%$IjD#utDUq_9Va{!nFBXxcr>iV@pksX`-@_7E!35yz@kumP~S z^3FS4TG5?`@#}io1dW-Ui>Y7~#{V?G?zpXOB@ep0+ocbnfkrinH!njNS-t_>>-nlM z=HB(6-NJC~R_o($=`zvXz%7=*+-lPlrA_fvA_`@=7$V0n)%Gv}NcEMi>?SHzf?ipa z0I(1kvZEu#JHQQgy8w!bQDJ~=0v7|k>~#!g!`N+=L$j{;8FOouEyYM@y@j0EQJ)x0 z&@jHfi}8#jag&hHW7yOE>mBaavyOJUbk%h*&E}o3@5R-q6(HBAUoCr+sxZG6`HApN zI7$bC#LMR%>ZH*|*;$QR=e&vS_b;!nqEe>=4PCfQYeY3LyZA5RO9xLC^gGF26GXTH zQAaH2{)b{QZxAo{_(a36G}b#$`oLIs??6Ys=88H8PqGst37&1G!pCm$J4`V)zTiB8$FPMI^qP$-wPt zDOx1Eb{xf=y|7^$k0GOT_#*`d|3XKuBxyJDsli+9xbPt)Gxf3SD>V|TEijI-vI3$R zxOIlhyRc`YjnN7xz-lpa5@~g+2E<}{ju_Jf$_{ta`EIC}ZFIcs7jIbiqK2RFiJ?ck z8Nw@&uw+*qXoD4mc~*;1&U%H1s3^;i(#UWtzK*#)TG)nwE)gEbhiW-ye$1y-Jdm3u+ zil^lCbWsW~>0H!uxvq_j+tqZj@^uKg+l%_xtKY!Q=t&;ytzlpsh2_{JY6;7_%3x-l zy`vX~Js8}3Q3^nT`;7H@vYc%}WdhkglpPywok7hJRnAX^8N?@@j6Tc(Q3*o_y`JK* zZ2_12)(ki@@l+r~MO@DG`7(147pV=gPT%dako75fFXUZaWO+|AoOCiF;kt_AGU-LI)TAC$FFTTVumf$2;RiFFvU

&jnrj7tKnSbvuv4m{OC&5DP$7tu zK`J+oBh2X1T0c)C7r%)5o?*VF2y9n9`i%j_mD5$2d!~(o?U<>b6sn$ZJ zU)fg^mtXL=&zDvt+P^Ylp8Sc;j#71Iu!mdW%vp_zW)E{W%v%cKRkOaFy-FE%R;cK* zKDJ*_vw`{aaBpuD=!zIim~JA8*s~LRdhHHn0B!s+#TzUL>~UaZgsc5ReX1^*aMV>E1vh6v_IUOj0EHieGN6u z$g(qoD1U;eO?&rp*2_NwWuV`D|5s6cAWs-K#(&@VP@&=f3KKqfVb0y>vGcY&UZe43 zzKj--RR|_7(9px07Id8d!g-VG`0j&Q=V_8RJd;?F9F6qn7|*oN5Gg*;LSEAgDYpi+ z`%4P(jP9%fsS2CsOwq{ar*v6*6$McLsT5cA$jc9+FHIM`Gibr;CLiLi152rV%4e-d zOv+==@%*!Zv4lIvK1E`2-b5=(w~K#IDzMYD3j%o&@o9c98O&E%u#@&^3dWahobCe1 zji}M631G4>%JF*llga^MY@_*hNiO3j!`eisG3OTWl}<&wrr$P(-55aVygjfP-5+jm zjy%ZGE}Ff9kPB}R2m7>(_sW(;F)?%qGc?TD34vOH{e?9`p(EaUQjTn#SKt#L#d`9T z2fwfyfIRc$bM+PN>2Xr|>pmriTXVYk{R#MuF0ezJZ`pXc<0p0bWdCpw_*>jR zf=(3}+{8>hXAs!G>?-8)9Mf2Ee8Ur}2jH{>O6VJ&4B>0H-vT0`r&*8_6AUqmR>As! z6U=peNm3dcV^K;J0LpOd?TgZ&PTg}B8y-@PYe>%&*;(GUVF<&@VluCy<=Nj8-Sq|=uft}0VfXxBT4)N!CR0nA=XlyTSZ%+lO zXh~^lX~~5hr@~11n;OGv$shy1Y!zYO2B>zFmZ!j&BzRyQ@M0PYW_2vNyddp$g3VWK zT#all#A^#5fBNQjFSbs#b2cel8ZdDCM{s(zt4Ry<+`h>7cLCh+zp*kq|4{xgo2hs$ z?X@Q&O!v{++ojnV;Kehax|i?t`zphWcZvV+4bURr(B_0YUrw3LJ*lLBQ5Z&{_{00l z|9%4AZu_J@s5og?dU>CH`04mtX|T zfG4}hj;Y{MSnJv3RqarGMezX_nDSNV8U&hH$^G?`4KILbfY~rTDfUx)>ap>s!-Yon zQ@O%IkSU_ZVlNVUCZ;Fc56H;ur*7xv^~|U$sw69 z2;H2;q@qsl8TkDS8B z7&7$s@CO>M*gD66uOFYJnqo0}rrGx!D%unCB75Dem;X6AQ9M9oz7y`GSg^f+Hrd;2 zAnZ9>1j)#BVLua~YtLDnn_~jioGMKh>#=BA*3xF}i`GjCHmavcsi+DZ-in6ljpgf8 zxmtx+U;x00h*|INgVAgZo;1;1XRBa{Kj1Im$xlF|7Bn(iO1u?Ykvm^hd~XYw_5mbv zdUocO@uIW%OtVeSXC1JK?w1$mNF@W}#!PtfU-sa6{b$1$X@|A7?1eL%xM!eBkg7+X z1mWh7Kk4f3DtXt&rwtfr?Das#Vs|E{B0gT^{26oG{NF;^tJgiRHIt}jrFi=MwBaj( zlTFia?GW?9Q2*R54~*N6C)5m(*CRx;w@*AJZ%6_~QaX7TV~?k*shlEFBPyEcGs z1`=olm5jF6=O*)sFYsmy0Zx8_q?qm)$a^iqmgYh4|L!Gwl{7*zsWb9h%zYO9 ztS1G+RT=7^N~&x|*)w!Oh=NNy+w(C?ax)hMCK}$lC-#pZ=O^NTYPT^>Ey%f@JMFr7 zxw*YJQ>vGluP&{)G*mve?@bhE$tvc)Gchp%<_ynW5eBBcFJ8RBNv7XZprw)63qQMX zRa(e|vq2%*-@(NB=OFP}S`Ot()&n2)0x!$t9DvoakVx1KLTB}bF8D*xEDyFcg_4$9 z4O#&}4v_N5+hq?$ou0e4t(JqL7IdvmB>*^FKrTRW64<%r2>;NUwO9EGFaj$X^;RpG z_l=RtmLJT6KVRhJDKXfmTdz;D<7l*$qNj5)HVfo%-XyftG4d~j-#XPPr&r^>IiK9D zN<~qf|MB38N{C4`tfG%v&RpCB(za&{o$>9=(eRn0BloLJ)XRd?8-6OzZ>O_guUBEW zV+rwbT?9VzD~7<-?z)n}dw#bK0F&8Gc^-_KpPm{tdR3n90Ikl|u(ym0Fe3DW_}L`T z!PR$;ejo<-v=GtU9ZX;WUagf=sedZy$seIIP}5F1sNl)?PR3E;B{hGcY2yjZ6Ha`G zlkk-yssW76eCPP18H_1w`Z5_@WgX#SJdaodt?aZHgow)~g4YeOZ6hC_-gMG$4w4Ed z&1daPh=_0i8j=L-HwY$S*;U#cqxw41J;I{I#<338v%Z-?T&g*J5a9cVF}W0Jt(ArM zzC8^WzBnRro7nz*9oyaNai9VrBJ)pa7dVS;d@h4n*K}&uTUi+omf0~d90{|7U;=)D zKzepgN3cAo@MnDevj~JU`b(DewM;wr^@>T^B_sDUC=F?8=qYMaTf~C`H|sXB*c+F} zv*fMCXK>83ra7WqvZKNITNxCPU>Y|~GUU*bwztkY2RW7TSRc3%IE{J%Z%6-f4PZ!Y zH-%4@sC3Lnh$D9Z3JXV1KySSAVX-;j4a!9*m6mk#t%5m49IiHFr81;84}sYqm+d~V zLC+JCIAMKSBjJUnSB2GFK^b6VUV(0zY_fZFedU$pqTc5mZdHua0ESKsRlBTqcFIHI z>gcfz?f`Opx-Msgn&i(}046t96nVbC0=g`^m|>s<0!L?m-0H`agx^-jO(V>Hkw zQq-GwCvdaA3+>bVgf@471{^qp9rv#PkFoELr?UV5FA=2@X{hYIi^^UlnJ0VCkd%>? zkaQYG6v;?t8HbEx%Qz^?9=DNEMsjfM%b^_&H=pj`f9LM;xSexd*ZX>n=j-`= zzV`C#=O$}lP>4t)V75lTH2cy7AO*Kqzh2NQ>o9CEDg{ecl-R^63A$0=?2^nC=Yw3t zGc~4#99fntL;k_-eh<#y{uq14_Ic^0Of^B^5xgh22x~&jCt=%!;K0z?f4{sKX|pV+ zI=eAW7s*JE`e&sd*|{Y|bd~so7j4=a4x|cwxVJf3{|DvauqR64^qcX*PrQo%>P|Bd z@UZxh+PNsRn@!cvGnPFSR8-pUEii}K>hFu}wigV$>b;!r{l;VngRBs#7dr3gYI((g zsU`w}SBaSM1IiO+%195bC>&^u~* zDFDV>eV&UwU}*ky={jfKSsF#zGl|a=cGrz1h8qV5+E^QkU>$tHVMTEtV!}x2aCopg zCp$aD6v8y^jTQdWHH19>^AcOYT<9azK?EZ}fSn`Onpg5_SZZM~ucElT67Cd1p%w;- zoy_ANH-kc^%JWldj_!DB0hxr=(bSFU)F>Q?d}1yaXkOjvDag!x&R#Hcu?fP>ToRXd zS~6)UQp#EcI6+5;7_kG`^q5ZKmF9^vJ=QHsVLX(!%vbn`vpAh{ez3RVj_)29(d~EE zH(4Cz^_i#bqVe)N*5SHz%whav8vR>QT+y!I$6db4K-OcPEgiuN-K)N>A?nLx4BSk)d|I7OD@C0ZM-suW zN3k5BJ2ZFM%4$Wt*+`~07v_T5tBw5JUrQl@g{9IcsVp&iPWQh-7MKKg&M!ct*ZXrF zCkH!`2gSi zgLs;46U*=qmRy-%j{d|{#&@FlWkHlN%}V3O#|HE$^KlsY{gE@yy9fR`1^YdSNS)RUz-yr+s<#CDo;G^+ zecvwJUA!ah8lHC7sNLhWL$e8$tZb|n9Ur*2q7^Zz++l2#FJ_KeJ|F@)&7-M$Bwcbw zw>HYS?##VX6gKT}a$i+RiMYfOktg48{37&v5Mdss56%@agGzJ*MmC{1fDhD|pzjuh zzWaTBdhY8WXpq*YD<}YA&+f!LuwXRQ>t2NUTF1gFZII1JV61X_00%Wq7GX(%6ope} zDiX*N$W|J0wL!cex0|6g6tn>QE%t`EVFv|Gz~>X_uKw*54&iIcMwkkob3ZEbe?>5~ zBP+vL2v-MpN(nN@N_+LHvWw>&`t94dphhhJga#m_mMoEFthT8Hcth@hrGRA;&#ctK zX#3buqQJCtDetE1&>#1>@SLg1GF~UCxi8F~5 zNKFbjtcf)?61n6Hcck}_9Ty+y6U7-jx|45wOX=@ebbfl*q>s0^ z$k0!WLYHUYiE3d?=}W95^jVwl^N${%h3rHn%`VqBR`g6)ePJ>Z0c!xcg#}J<@qY(M zl33irvQ-jyWLv~hJ!zOu+nE;Iahb)Y;Pjq#sMo*-MV;@~(hMGGu21`6xB(I&H*CUV z6X(Cq50P0CUsAXe)vH?sVlF0fYGk(;P_dV*Qq>HCZ$nY=<{-Cr^%qsS=Ij$!+c;)R zs6iVOombvcG|Vu~^`wy$)sz2N0E_;d|McMHMnX~&OvMgmOJClS9TE%WoCAT@fLQH@3} zsi@lPPpB+%Y)C-O=Qz^L=tN(1V}eVHSt$V~mf5CLFd5=0W3LO&$<77lyQIgLM?$ss zZE#*r)2rv>HBchwr^-RKFdrB0o4`?*U7<)4F|Qswrl0b~NrSE7_{mH6)or1B2M(b2 z-T#X5S&lIgotbJsNNTR&e9K^|Y&xeUmw+OOQYLpWb0D#a{#Gto7E zJi6115eBZ$_MHZpYgg%FjA*PmRP{Wqne&Hy8TLOSQuwRWdU-rtT3h=Ajui`K*c!Zw ztWQnGLoVJ>xRDM;HMBZ0Kt-F$vc00I|=sJu$ot!@bDU*FM0B znwxW;dQ@3$?9-EV5u6d;SMv^UsY+Nq)Cr)TL)NlVH~dC>45PxaOpQmPGL+53QjHA_ zdll=IWJTUK))jB;wEk#Nry;fvpa-YX77N+&Ol ze=rj1@m+)8K>32Nw@|wO(#46kxWiyFIBwi!Xc+9>jvkEC7QyWbttOnl&++>UHnlW?0WnBidjOjbn=|A`)j^_DT8Cnxw&fZQ)nwLt2X)H5p>F_JgMIlf z{yLf>?z(Ot0gAk(@)IEyZUz+%n5eOufx758cT95bsOak;!{6Jt+2Lhf9L?k@J-elg)p7zZvX7CRi)OvvX}ND zC!4pPINu_xuDmO-%9gYD`B;rCdPt?t(ah{KbiZbW{-zwnb9+WX(QcCDT>^8=LDwPG zsFNeM5a+nX#0>Oozkv!M`_(k!ZawS_lV>PPFNQo1z99w>Me-uF zRjY&_v{l&@X;h48#p6Ztuem7uH*W(=Xh(SXdP_JGwS?jy`$YA2%aQEfB9v7kcRH&; zj-ZPq_$3ME9q|eZ3bljX0RDLb|HGStK7&Kgt~PlDj1jx@ZC4OKJns81Ro2=B))t{^ zy!04+5I@7)T?`vJ8e|enfM@H^SCkwVC++9so^z@|iz;+&)cEFVqEfcz6c9?RCqF{C z1o;aa-b4f13Xi!FvkW;J^pOMdzFA|R9rtta6XhQ!tdZY$aj&cUcz0}5)czg?B&A+O=H_W;DarbP}Y%^*$sW>V~1%=Hx`#WEW2?eR&x0sbqlp0C>x0|ZTzCeb? zrXm)4Va&lp&wHl=1|%U))+;s$-{AeA1)nkVS;jp?fNTg)D)J#f1h1CN}%R2{?!s->qicLD9f3@U0~0DhYEoDTFxIH5okxgps``Fm&*Z_rvL5OgTvD&{rt>_j|%1qT7|o{ z8lkE)Qb@OAT~KnA@3$iErN${yp01=Bg7(F|1e9!JT{*)**vc?akd;%yC`T}v7t4{< zBtU*nRuiGNJ^HAaJPh*;=wgFM=)V`Zm6!8QwA*sa%*NTKWH5zScbR)I2-&I z7xxFig}#=|Hm<<2!_iT`jSe>OfR%K5G5tX58|%fbfcV3lZcmun)d&VXwwI-dA|=sq ztsJ}L*?@kOPs`@u_@L}sIGAh&`rx&elDN|Raj@5CYJ)7-|v z+L#ia.m=H#~(xWKp_?ZUigee$spbU(cwoxq`!i0E2j96!%6N*w$$qssZncH(ua z>C8F1Tk0e_e@V@TtlHwI%|ob-w|HpV;W5W~ZLG)3JyK6<41G}vih(j~BVikQ2Q*G@Uz_5ukeYn#gN$mTjcP z7ju1nz|6mvCNO_LW%ubOFyaKhV0#OpBvlZ!cS`2NyUXRA`#{M|UlT zYf~hmdy=(*Jq<1q91;>ev@qm-&$(G6Es^ZQNKuq&VMAWVAIw;mY;67Zx-h>EFFtK` z=@A>1(-1hVy|>czySO^MU1s*h4_NLO>jt#^0aIVWmPz06b=m6uR^R#rVH)lMqO1+@ z?I(@e&dt5{+{v9puRG+8XHB|R63fB>cD&`ICRY?N@&Fe3n*A&tQ}@Iz(Q>U5I0stR zClVsfO4H4z5A*58xPCLcyo=`@yYP-Z#51HV->_I&bb}25`pj>#tK9>(!9r$d> zqS!sUilZsYUEFRtl1)t9pA_xVo36e{X0WaSfY4u=f%f_r065p6P0z{6!4))vo;8Hp zzvS@Lg__>`_wNG>yg=b;nSk*pEW?@kGPb3SA4QUMl!^b`L!C#phfHR7>(u8e5QH-u zj9MddE=aL37AMiMY~d~{o3`y-6s5jnA;w%|Uq@Bkb^UPmX)ap_fMJ9&M3PKlT{1I3=^` zaAP;O+ptKX@k(=?%r=SP{lqVo;`T_Yyd?MSW!nFOC`@Te@vQyb_Haf=cjh_17Tztw z3wnB23HUuwh)+w)$$3X>fV|k`!<6Ue2ZxD{dFDFp9vvZKmJ9d>JA*=B6N8Zur(fwz z88Qe;45c_Oe0^?2&8LFVQXx9@g+|Rct;fMC5t2)QUblSt><6riVT9m4{e~2 z%OP`x^D!sQ<)C11+QG`H)0+$w>sh^4Lz&AlhYFA#g2y2#4_*G@O$*cWYO&F2rMRT9 z*0K5?jS>fRgB(n2Tvh5^+z$v`>$LIN!p9eUJD`YV6nd*9!~|OJ)u{-O?R&K(C}iJB zKg?>W12GMTXElJjsjb-TAOzs&Q zQ}*5C$=iz9eM%qzEuX4~m7h^3%wLcB}dr97@)y$Y5!mPS_;jKaFoa5`tECR#Cp38G-}y8;ny zCSoT~R|(`2#Eyw~Lko&vCyJjB)9p|-_cRp2H`+BHq?*oG<+HtTpLu;U~Q6N}XP%wiGrXlH$k-X;26AM`S6}F%EXrGc@E`3wejSC^4^T8KC{18t<5I*Q2ki`z$mR z90ShLgyo1^n;PvM=ZN2dEHyr?XeQ)<@ zM-R!HaIr*U3ENuDH=s#a0@09dM$prz%$(Vhv(^nh&*~COZni_h4=oy~=DN)>fDkh@P5M}jd)fzbOQEi!?j`#B4jd^L^5Lpjz z9jon%9}jN0uk7gVa**jq@cx-f_G7?{>`q*O+D_#IAt%{pyJ!YN-dv(le9EYH(LC>R zhWhe_6@>2uBYnZVLLNZ@6QddE7GhT;k!-z3ueJ{FSfkv~LQiuQ;bf$0rd*P&^?-W* zc@sxhVL^CF7>}NSRbvzYk+nJ7p)&&)FW7frdC(-nwA;|6Hkh0H9!}r{FLTaJj7@VK zI=qTeaYoBw}^41Xi8WWmx6!8)o({R0CFj#qP9ehIeu zlrdHH4=E&@R3+~@7tt%k6Jt?#zh4OGx;^oaCt%<_-Zct#gCfY*Lb^E z2O-_n24(-(=u|Xm!C1J-i-{$NuZMDs#3r{v7!IT~cRYWWzr62MZVy!^fBWuPG}( zRLVV47yuhJ$a&8Gc6`3i(7~M-fEcwRri-X3hxas!%_~WkM43EO<7*rXKZUAf_fZUG zv=~+D{=4fCKUm}2*oEa z?lF-0GLdnOa-H93L zAz8C{mcg)5q{c!ZLYZx8_-mKMA`Tb@Q&ZkF)ioZ*ljP`1$`hfzc~N#tP1gel=ZS5y_6{TEJvY@7tGtSPd zx8hM#8#_ViC6@lBP+Q+SN6ynT+`dWML?NpZSA2%x5j{9J$|3u&X~I98AEtw*4+Ygl z9-k}!3ZzKz9XXFaa>7&^6vc?RK=vRoan)jZ7DGpf-i*$P?sakZOd{@=B^_Ab>W}T0 zj_W47DNcviM;a^`1+gBvcu?e3(G%=(HZD9I)+;+KF<1JISa0XZR_he1lMs%bf2s#SY(R6 zg>vc@AP+&wFtuSDeTpxoGAw_VR9qmB1*}^px|i5X?7GL6M^)1v{S+mtP=$`0P~ zvh>8x=y(2y0dhO%2Ltd?km_o}84fB(u57vj|A9)M;Yze|!RP#x)@#y?|HC!>E7SG* zlT|RN=g^sj-4M{oyMdzw#M;`7^2@hxuLCmqSbU}$$R+Ii=Y+u72-0*Kan~kp=$j~w zPC;SW8}N>Li$)G@d)M@%Zj75@=yir;7;WuF`dV(AotL`k6hhs_yq-UtfJVm&VvOTx zuiSLCo0~i}I47F3FNcS_`O!|It#124YSV`~ckdjJ$tC(y6}Y%`t9F9o=qY)>Oh4&M z28Toj6GnU|kf}IuW!n zr;}pVs6KW6cdYa88<5O}hDL4`NEA>g6x`mf9G(e{JS-kK zL*xc`kdTtmY{lP|nc)0|3U{&FZ*4-x3T3pCRL1M#POJZbv2 z=B})Iqrl0)?Fqa1)C%X3HFQwy=^%%*lgL9y3jbgmQTxTX+{)_>d^LpADqiZCjyvbc ze*TVM`swY&1M>a8%#WV|+evdE{oCo9-K9_2FQQab(!Z$&Y_hOaDbaVCDrQ;<)=3Js zAFUrAZp^i%y;9Vw9=JA(euUeg=fQLod*t9E%O>g{zwHy8H`@w*BxA|hrV&-MI#;+@ zO^j^RTa$i>#1$%IXh80JRemzPyZfe!2o6&Y9C@o=B*-nkF`^gEQQef!Q#T2AP^}Zw#biUsfUnC;Yweb2R zrla86Q~O~y4#&PEOf0a-_~G=qZ1;aSc_chF9CywXxa$PI7FUmsZCS13bF zrBNQzI(9ut7%qjjx200HKjYWmzj=BXC`ni%UE4}KpvnjF3qws2u&TNWove(FjbSZW z9sZR=e>+t8z~DAcsyXlk?eWvQ;}!1jsMdaP;QhnH4bE3gMjeHFQh3`F4WdK}mxX(c zB5A`ItvYMZIDepE-|9`cBW;1a$wTke6j+;~@A>+r3M{(}lGZiU)vta?a|8Iz5b#=s z2jvY+{f8Ma;jTnP?Hc6+EE*T&B9wr&?YJg9-X2_I|;30@N#YmbxvSR?6ER_q=mt`@IL> zd<1`gLMI$RBIV+^!ro<-gC-7UUt8?wZOZolV|b4#Oj{`JYTDD0C53|suDAFo%X zF~f+m!Z`l}L0G0KLJaZGV{GoIqdRRIf!Bw}XzR25uOIS~0n+i2Z=zSyHT4vfI!cWY3`LiGoo+I*jpO&*rx% z7HkMsPa7aq;{_XU@biYUCmMCA4mIOSTtM$9ozkIWIM5n6mq4<%L#xQ{MQ@*0L1L=* zISD>Wh`Li@QUS%|GFI9dmU;_hwtu?Saj%GBjSH7sqrdb6;#&~M!|E&*^sx&wY8V~C zt!zy<-#kb=C(y_MX|VdqIu3kP0gUR0O38gZt7_-iMaU4&=(jJ`B{-g_y0~}nw@^3; z*aZYr_80LC_ zE=`>?83c3#@vMV>q*2(j7_2bBnn4j|ucoFp;?U_Ytx0SlszG1nNK!}OZ5P{$M)mK@ zioa9!<>Y(Tk$HD1@C1R4cqO2R2Y+fv-1-WBW(+#6G-> zd12xHt^?q>&FsPA>;ZiP5I?SsNRFS6dr6eY>P?v-k8RC;m6er@{e1K6Z+Bs{di(yV zA7r2~Ms)ImExus=e+t&JDn9|E?jB%u13yrBzD)xxiNLnu!wHInV??_+RbhdQfi_{^ z}@r?ghjrVKJIC)t9eN-=~YAPqk9SiNiqRykTcflP8 zrdF8}R98I2W0vRl`WGr1^}*PcA8AbvQU|%Vy?W#p=}Ju}@c5)k2R^@E1efS@yl(5Z z<(G>MVQmKd6gKL{f>Xf;@a-&6h0sD;(q?&Wr0M0r#=dSa(Twt1U&>~2Hv9ZS(9`Vk zSs~(Ag+SZaDZ7{37H6XIby63*==$Mm0_hIQy6h;+p)t`O?HhLe`~SF?Q@VXOaWqd# zcOB>p>xf@@uEHxxqv5pXnWP+%4QnYqysSH5tN^D3%ad9Hj6|4Af(e{1fnhcqK+~PFC5)F z2*Qm=_s+#hs9#25bL1rIA_DpzWpV%YvwHsh*rLBt%D;E>httzVUk+{zwtbr20;f}D zf!NXuE0Jd##rOf3y#b@=DQJ4(&3=uGtbfN9QL%u_r6kiZ9itnj{o>VE5?|f7fq5r3 z`TT^}hH@Do1fLznSkiU8o^0tg@~`kK9VbqN!sI|4f_)VMJZq~^0U$GM7?|v~x&v_- zaqWi^m=Cf*8c5kM0_`t(W zD7gIkVE#l!%O9?OJ29661C5irkZrd`hs|FvA9%L*#t7NR#5`XCtzWG`{eoRfDFlAN z!YAHX)`fLJ_TG;!|J2UedrpVXxadYx3BGt?AD;Kjp4UQ{j#E2Ec~3gC{SN?P^SR>> z!va6(oR7m?04yYWfsO*X-O|d+N3b4R(69aXC_wwL0PJ3`nHqNd^xv1O85j4hxogt1 z;0av<3hTOf=!>mr&ghXZ=(yCh5F?$5_vpq`zdzv3TaD)W{xZYpvAZYol=UUBMYPPa zSV_tSs_m_IEgepot`L%EU}k zYe)ZoEt0-Vo{*3LWVm7AbM}G-3~aSPM&JPf0sTD`lS|Mz!Merw;NUh7<^(Y3YpSag zdmVU)$d5@C!Gf;pC+c4=ccse~F77Lzu`uAYejyQ+cPzA>am4lPS^Da&ISc<>ew+aD z9-4ch4$og71#{@<3uBxd9GUhzyCK{^7k0R0+S&))AK9x{Us283=U?k+q+`c)WXOmK zpmV8BB(AmcTAQ0Rz9tj~d?Za&R&b)W`=tHa^^`fGC(gxPiMe>&-o=k~V5KaogOKuK>~z~{-q~?yX$DD> z-Tr%~hI6TBJ2Xo?_PDr5cNJRpKF`snvAk*IbDGy_zvxy$75}ubO4fNo&%9y!R^XW91ER)7X zYY1A^Kd4fhw|Dasn(H83t|oz1Yqf52V_$~^`za%;e+)`w}cLLn+9E--+77L)KjNAVo&p9re z7`RM`StSrc1CE?zT6KanH3ZX3YhdgE8OGwRJBVRE(<@yh9#DykOV%u7x%kRj&fs$S z;t@yp=m;MIGY6&BWnONaHQScqGh#vg}*r7>LjnC6ROSSEmd!g|0wf7c7eNrcpH~ z%9IrNL7e%&HO$YdJo$AwltrMRN9dI#6C`BL|++H zb;sNJnR(x>o(^SzU4;bZ5lWq&L+;J=4)`yd<)I0?@i=4kC6Y@R?Fqc5pGcbSD@n{7- zm{CrH*B4otlmw+OH~KA8#flVf`|9a~CcS6Y2BIVK=HuubPTRyokEgSy{pJSc|8)WR zc`K27{7Samh}9~+`zd190`4_1(Aj199y&3kSc9VrHsJtNlMXDrN6e`q6-&nQ2OS*m zL*}N56p>^hI_2SEDfH?U-FXfMW+>~d(mKaNe0p*fEl3FVa^3&sMcnF|@*`y-?ku7=$Iz+!t{9a)`a!uuzRT5nuBjm=&8>_1hY)0mvZ;T^UM$%daD*5}~ho_V5bUK`wz7xdWlGdccUBlQ|8D8bF~# zypCXp(N2><4s$>TKkbHEada*z@rZUxn89?!*xZNR!;?`or`5Zi+dYzUMccJz3Z_y)U4u0fJiY zg)2}sNh>IN-wtUb7#&SRVY%O#)y|(jJm2oawk0I^@z1r?ema z>L3Tt!sy|kLr^^l^-bgVa_1yT-aQ%o*Oww%rBVlKJng43M@+0k&l?r}&06jEPtv!@ zl2cdTS82T);6jOitpw&WLXY3)kPR<#8CY=(;Euiw94`W2(@@TjsToDJ6R4J3gl6q= z-f-!xk3p@G`09@w&=bA$esQ;rFz>c^jQ-d@_W!IJeRfkHIs~3fh4y`=(0`OIkEX)T z9CFJW-&Lx%fG>rixILk_=ZcTZeimf)XX zgxw^;qEnh|vPf@|gTa=})FO#U5F6a!0d}rZ(3c7*9bNufjTRukHl+d7Qur-h%(;}l z%5iPaTCYts5auIBzv`n3&aJna{LeY%FE1b&U;n&&A{|x}AKG04Zn|8)JEhtjx~(yL zVGTO4SAoC4SVZy>?^xK_v$*>MT9~N)ECO+a-~KA8@x+*LInl=eqa#{;EI-|$hh^&V zUP+%Ot@zU0hnRkU?3+hYTAU2#X#<~&F1hSgS6Phl#3Eo3aJBpo@dmT>0xvhix;4qn z_$*YIEMIrBIV4KheG=AtAyQvK@RlY+Q@IwE3bk|f7TFe57`&WvM1BwiI2janyZ?U$ zxXWk&Sez%iU4TL~)FbmUJ2^L`omTwjH06cklnLZ!#KH1$nNVnaJf#tBY`>1eVVK{U z7i#SJ8pLSz$enR9=LgaGm%F%`Dv~duEt&{>r$Ht;8{mnRX0eQV&G^F$pk5AkKvH`Z#=V@RgAg*tg2-%x|s;Ft_X`))tL)LCow|rKA#$zdoJoDDdh1VflXc; z4Vpop*k;HOT`=+Htmn2a53^)VrCEGb&Peo*)fhV!@Pv`;FhQo zhM4us|GRmA|Na!G5Br4;(&_0B$9^LcxJdYzc-uVAJFo86Ff!y@d%=e37(Y+_>-B8$ z$L>k1sXbwIXJ~_rUAKS(Yn&Sl?>Ct5z)h2Ja@R2OnfqAqxvnpTG=1*qmJ+m0P)W6i zM48~#z83TPHZhfaIiZHH-O=4C4T?A;P7kG(QGOsb9r)uL0q+Jer95~YEW*wmFyMV~ zkCZ|Z(OckT_e+=asj6yT?VBs;;j6>v&b{u4YE_Chj(+&3%1qN^yT-CfUKyv|HlTiF zw&cu5?h0$=J`X*pnU*=J2u_sgziQXJWXErGnxQ37m|1YI3D@ie(dlIi3*rH2k{Rwx zn?2F#iwqlRyGQjEKh^xD(Rpj;)-QKy0`^k#E;94*@Ep6tI9L&Ct!dG}8PcxYa;YliL0G0+T8qtQNR| z!aA8eEZy*$calf(^Z)6U`%#5+nE+;_iV=bmp7x-pfb7*M=$qU2Wss6Hni*k4vsVUxuj=K?w%wNi!ki$f2qEv|I?i*|194U)r&ao ztk1Q!wyrEr*#~xhl#HEv$eqC<_@cwB8p!EFx4umm^t#tc64??n9U?13VevB^1eb`W z^U=mRKLTapde^%AAb}ZXn#$bBH`D7iJ%{*214S6?iRF<>SJwTw z2p#K5;(8Z%jr!KE(p95nBLf1I0$dPCemunAKE-vDKkJZAUnn>@vlHZ}>=6YEe8CS4 zu*MRAR>wd!c!A#2BJjT5MKH8{HSaxHVNCp%I!4lRZf(MRDfB;Qt&y9>UWH#}=VB$)7;xfNPJ%Q--FJzs&&a zD}Bz9rD<$z-f&twk0;5PD3~b^1WsXSap4?_akM%|Hakg-1c; zoQ+r$fKE>J?h7HnF1kUHli1|aVWgw0n+!B2h9@w|S=1@W@6*)JjOvBylre<}?uK~x zuMzC$b^IOQf*266LJZ~;E3L4$EBxf?jy@{@h5+i#TMWbOlr9dc@PSks9RZcDFbfz`qVH>Ev3|M~80e|m>!4N*q+wSu{F%BO3A_gjGm z;|aR&>>P?3lp&PF&>rk>{5&TU_Z*7q2uF7fWv8wO9>*omS=Bw&P??I}B%J)~ZvB&! z&ypP(QaU?_3NV6-FT1b$AKd(+_}BYXHPzKO!Lb9FfF&S+?$QFS6WA9y0NZ`RPW_cm z&GG!H#7ENKh_v-DzmDDk44LrHLF)fM&3iKmK5pSnBt`S-n>|9)Yq zjQc?l!4F7m-({Gyxr5*~FeFvNd88R$*^d-@*%mc0lN_h)CSwVPfzFFLyXRS;A?B-i zgt63r?`MBb=jZDr8iJpxg9n&>W6OE-b+J+4qIqfU*@^G|B~RG8icwg*~4V zC=}M{-i=Cl2%Xm`=#yKoiBPaCb~lbPV>*&#{#YRci)cR)tY3SPt)uOx&4lk(R#8Em zwj8Sg>kb8vE|vf&EjTek?bEaeO-dOt9fTT%BfIVX7whc%J<249^E6(3AloSCvdW4l z=NLCB0uWt;DLPuk%N({S2z-1+ycP-nmQw}pOV^qo>y|5uC9FI7Ish6^aIf>Fp5W`kKML)bfK`Mrkmb$fd{kdw4|?`a=7s!_={)oFTH@eLuE! zOjs2LL#OKRiI%Wh#`y31k6+EefF!{JvEwdnYGIaL%#^wCpRtjV*QBn)omf!$`=qF) z+!nLu^oA(A`^Kn5x-+2Ee@qs@zuq&;>UZ6Maq0y?(FA*#Rs%+$? zKkSbWH8su2%skN?CI?$4J)fQ-da`Rgy>tvkKBh|`cttq5?-fik+pEO>AGt4B*%Z`^ z3u4+y&nAj%+u@yxpIH0;2vH}{XU(H~H7i^-Z!Hzic@&7tltsR9{WBh-$-KSdmcr?x zlYr39_-KF7Yh}!kz;;K_k%BnOm9R`5i zC!(;8Tde2a44qVG??f4201H~6^uSWb&t7rAW4b=2Flb%r{q)Ijh3$KlA&BX4U_j0b zFkfN){I~7b8U9eH0W&9f7e@{*$Eqnq_zn-R1neSM5eDznALk^?teudn8Gbvk^lr3! z(Es8H{^+{o_nt2bVJg&8`y-6}5b zt;*kfP*EJ};tiL>{mXysz2l_?hURLoR#8aI1vO^DI!!>H#~l2kuD{&{UC7DF8B*Y< z2qvPYI5AOFFQtn2lbJAl62+0$&gw(c99zJ$D%kSBLS=Fi5zKPyVRJU@t&NV2ErVZ< zZ1{x2H7#lJt>t$Ll;#U*NS`4&WdM{ZCX_^iEmD!AJm|R)t3wE*GBDIXeDT$1F|N|P zJ?`7)-#K0Gfvx~#%Gs8 zuKmTVNcs<@y_p2;|Ca!R?EC`7+c_w(kZecc-4_Fv5U}R)@_5BN0||RI-38C?(paDkfp)IeY`X4 z-Me>cZ9$5!V2QIp+$~cBxE7e%8Ufg>cD$^J;xxQvOHqZnbN%cd;6OeOZd;yE{b(;5 zeEH|}`Fd@yA-IS}8&ADC8$WaS1$5zxON+23F??}(xIV%OY-Y#ml%s$-gr|MDPNsPn zeHMsMryw#y?XLoiysacUc&g;;)uj4n5&;#!J;mt)zqZxekMbxx{FF8{lPDK@xnpGm zqJT(&5D3cAI}mKvbWsPG1WV8L!O`cQ%XAgkKxtd9&x2yyvG{4nnNx>7>bUpYMfctl z!FlIBq8*$I>xf?8DpH<7lhWOkrc$&;D_YY*`wuT{YRHS!&v97X!gC4en@9n#<`TiR zPn$W`eE$RO7yC0S_KCr7VZRt-Q#J=T7;&*5O~ulkvC#$JoQdMQc@VW1V9KRX(}y?+ zU%YW6KOhOIzRids;l@y7Zr>{<+8U`)Rogx!#Lac&udhI+t$Z)@{W}%dO`q{A(A|TU zzadJ*30R9j5DGfGf8kiTO)oSNh$7jNZLXGQ99kM1seE|00)E#5o8=&aY3s`g`;Y>6 z1$7RE4UFo&zJJTJvR~E|ut=E&S-}!;6TGrdr=dze>(YbUq~I&te(y)y1|xakX-RUn zrv~t}i_m$DXTdp*fx;3_OD{lJVE&vw2iwyES9`1h15xi-1cSL%a2QQod~y!#Ut*p= ze|5Z$jmVEtRdalPX-`3L7*gdP8gDDsCx~nbaP2SMHXHfb;CvRlMwD8m#dpF2n)s0X zf`0bDP+NiFZXj#_Ky4*mBCJALj3bKY7sD5Tw=mj)r62n2d@fFTJ2!7?y`+a&h#IDV zB~4~zOXTH_f;5)LYE|(#fFC$IDC;i8PfC=>48EIeqg^XX?|4Ju$STR z-vm49v%7O`Ki&X$aO0to5UfO|1Rm<aq9#=DUOcr* z{u><5weK^Y@lY61i5aHL5!)+>5rP`kv1snvEKrx?e_xXXNu10e4lFFp79VArySp!2 zW1DTrez^w=+j zOIr!I2#Mj1ATi;1T%$@>8F>eR&uwdz3N=mEQLNdnt%Pj#*1Zl}^ghy^MD_OGDwZSF z)R^rL{sAujI&h+WHlTMT08Zz@!(>oHIbz18!dvHtpGWmRl-ect^EqukyI|dZd#@D( z;H&sQd{}$xN>_Gb%P?fG@7-mbja_#x)NMGe!okQjIMk^Sw`{cQK44iSnSnwX3vw=a zT(2R*fXEB5Q0K7^6VI<@_%)^CU;X_Z@|xS1R%(!~$YA}0EcF{$&_Ocb3t3Wdo;w-BQ$Kr8EGB*e1o zo}O2R8*vJfK3I}h%K=yF)sye@<@S`myI3fNjkZt~$XdAv%#&$3NAh)-(rQz<|E$k>k4Uj z4G7K1*bt0h!T$cfSK*^$LdcRiuyPl57?7oMuW*gG$~(ltF*Di`8+J)Is@ovbHIyE2 znHc^h@R=je7A}O)T>euxNJo*YVito)2j|^ELNX1*=i4A!Z0XFOVhv>ih*!?N`3+z)Kw2%Y%!p#~9L6lw6ac?2rXbYR`W`wNuz)*ajy|wVY=KoPKZ(i2jU8~l8 zslccL(nf@c(@-$05+KP1a6EvTa`@oE!k$SOjMNThmpm38@CDMrXW-O@%VqxM%M7~4 z2k*QI1f5)DtyJkwqW3^>R&cUy!*hBkF>KsWv>@ z0a8@F?BUY|`nnLlkT;2{FT`o}SOp*6tGda7?ci=dY-!sJFMxu9-G#J4{MVZ=Z?cAd zMWqIujK5j8=LZ?xk4sfG;RB(;dEEL6-p$q4@$zu@RJV>Ppg3xn4m;Unzuj9ZC5D|E zQM<&?v1ml8r`o@{OQNTzhvr1pWxt%K^DE$NeEarnp-BW)U%st@qM{-Sg#tKius7bN ztX5dyK#^tLp@@T_HYtE>fRPG$_D9>=i)xSel^#~{fta=#Oj)IMyBJ%88BEda;#wmtBOb8PR;C?gqLI;go`my z{lb|&FTRhIX}@y0qKd07%BSgATE+S|G@?#PT%2=LLWJX+4|(&`t7vJM?`*6umcgTT zg;*VYImN^avI_!10s>YBjFIKu>r4U%R%16-FG$WCy`BP-2gbw-aC@J|NUbTL80pE5 zX^5J*6B1!x12Jk2=$NG4>+gY?=nOo24w)`z(P;3wk@#A9U`O%$$F$t8MPRZ}6VOmy zEet+%Aesn`Hj-=42HMmNvV8%Fve#-bI8eqR3s%dYLFz}!yl-E>NG%N>uqGX733A4R ze?1sm8p8&i?5Ya;4KQ^$Sg^EW3=Kv$JqJzZ%oSvfjSUJ1TbK%ZZbUlg&}%&a=D!f| zcb-sZ07>io5Ye2!bx%>=^=QZ?;BY3dq|_>Q|MC4Llo(89&IX$I9UFS~rWd#H;>u6W z4JA4Cp{b2xjeK*RrVu?$(eNopyX!X2HePL?>uqDYJY*I98`s}>E<=djm3XK3mFDHQ*ntr1RxelrE3n8Vh*|LAf%BDkvE{DCz79HyGFp3$C_uWw2k#=H z^Z;Lw41)qNW{I$o02tw8-ZgfOA_2lDz^D_?5l1?{=hR5L1+*Txvc!bQ?h8Sj<4}`M zHhHb*)Ca~wSH58M;-lJejow};h62Txfj?Y*+ztv5=+7PDC4UfWvsy3MRIL%V%b7;; zvI~OWf`uTGnBNx2`*6cS(=6i|f`PpS>Xa4Nlc!WF7NUtY`q8g#Jv+Ir?|T z#KDUF{f^XN1Q@?+q3|j){GG^PM?Mb&WqR@L!6z5izXp2AUOV+(0kwUJ$q4j)Yl~1g zI4|vv&MlDWY}dJ7eVEiGYWz*pGP~qr$%g9ZMSD(Kam?PD5fQ^(*)&OaR$1k0L%)gD znJnWyg3RdT3u+Gpc?AFlUN2R8>#rRBaJLlk#LmDHSPs3RfTtJ9=Fu4EzauzGG{Y~w zw1}z1llRReI^lLa=_Ie7qTY}S#P=nuEuCOpCKXSMsk3-x^tM39RsWi+tn|z%U(1bTm`1%maCfS=9DWPwcOql?CxRbInJ(WAaru-Uq6 z8zQZax6*}NWC6!PIuyoMV)|6^?AOx8M8fexC35`#kmg?|z;??%O%%x;~%x z`!$avXn`=bOC^(e8M3cN`^zv%#6utrhx$)y<>SbWZj8tNgve0T?m0wRZ-8R+1h%#( zC^V<<$7dgGRbdVDUNb1?uif$fvB|v=E~QL&xPg>?0MZ!{|AGK#{iI8dyCo~B{vN|G zFn42$e}Scq$=1YJy`XJ*%zWkTPVM1tEx;-*iYp!@x4S3~qMDp+3uNf>h?_guwogt4 zVY|4~Hs>Tr-$d8Jg*z#8!S3Gns?z8f9;tpR3uYO{VeF4W@=DbGYvWJHH-xvJKg>Gc zjB8mU-Tov!tVe$lZkh_=@3!_I_T-x>6Y~;o?FV&@6tT&~huU0SVXdoHpSCkSgn5jNMd(0ZpMM=h@LeKjsbm?n8U<;Uu-Ez4H|C_!;L~xoA6+lA} zlS-=+r=!84e|#+Z&F6G>7Xtr_>>K(~Iogr2v6R1m>0+anbB8})qD*gkBzSyh*X0PJ znGzB;V$X5nd`er zii^gO#U4Z!Bg@HXbkpY-1a!Pra86Blk3Yv#pxDr3>t z4(WOIe%^xTWHm<`s){8Vjg$nqqcjc!lTYH8!AOaiv&)oih?~k+T$kjoS`};9Lfe!b5uoY+g_#VV!(21(1 znSEB6j^M7&*}CDx+BIE&PEPc&J_>N*Nel*6!BKltxoQ5dt|2!W_i3w(?8*hdX`3KiUkIjxrOShsm4VLs$Xey4 zDcW>(iN6E`^~fP}(ou}^QlwdLmE#bqnv9dR&+qew&}qBANL@$GDBd`)W7Lox z4_mRuRM)YPCzn^gC0^Nri@dA9oTKS(0z*?hEMAzbuN+=f<5jK|n)B3_jr+JcB50vf zvr$(1LZ_;6@^wE88=D7YABU3w=5G!=Ibt%Sy@;qa;PR1*KRQuV#*z(tXQ4(T%%$%u zD#A$;@0hkDLm15Wm`=RH;Fj@5e<(w^AhpIx#Wtp0@#8h62n*jMem@$#WC9k20p9Aj z;llBHU3~}5GpFTR|7NQ3a=rldx*lL?z#SVhxEPg?kPsh#)hZvQg#@XC9F6YV}n7VHqp_KP%)-LO}&D%87WrU-^?+gbug^?8r;Uv z$)FKrCz)LYLxyFlt>0-@szh90rRqWcMN|QrVuE{--A~Ks{Nf&_+2Iy0ztgg9SSp6W z2fwnw_lwGCb$t{4|9A?*Ws;`TGkt7hRi~~!AU2Vt);}PrPRAwxJ>NJhO!!n9PYRD{ zxTJC%2TpwUh{9y-!fUEoBwLVAa?rHUxtCbcR++$;E^8sDE-8X2egMauxVjNAzoGt0$7gVr4858Dwe~l zyeffE%RUcnTFa8EDvKiYa%%ks(%SPF;5;cYOpq2H&_ z`704cW4hPDiMoab%sI+PYu0~a#|`uN?Mh35U3ku6KP-inixmjOb6};XNni)aE(ZnU zn;fsyw&LQuyIywT;RGJ=yW`cv63=+D#f}1`uV?HD?;2%*J)5q8BKP%esS=R&LLf{{ z&TD2m6ucq3@IkZ^jl+qZXEW1{F34;z4?Ev3oLq&=Du3x72>$_fc7X2Yo{Wd08<>hl zpx1v0!N3SOBttyz#%5h=lF{L131yPZR5?vu4Z#ilGMjk88h+)L^@XuoPYB(BFI!tf zgJFbLLd%5hnC-FMhwy(=au_wkhRbwfaH2D>nNQ9R8}V%~_5y{n%jV@T^uEY$n?xwt z;|*N~lIBj^V`quk7$hjTL2bp@c;0*eB{!AGS+A{&%NOR`cc=A#=$N+nQK!cC0q6Nn z-j>IE?f!r3k>FMFttT_JH}%cJlBLV`tV2l$`Xu?I=$CSEU6^UA{WQ=(C+5?W(}>B)NH1whuWBB5Q|nZLw~UqkdDR&`>7D71Y4HzTRo;W zzg58LS_9bk@RFlk z?!7h(wIx~N4D^^ouA)1Rl7xeo&53DA6h{>pyTLBA#n-F=hNM6>{4ky0c1YV%e#53w z0{sQ{ZHjA_4|G*|e%XHR_$x5^t~Y znzUYWyY=+|?1O6*B_QBZ*`8NdvOzKiK`yq0xP%b`rjrl)pD$hdG8S9}#7(%))s~|q zR??zMdeAs%5!6t~${Dx>U0*N<%eYU}x!^VqS0ZbXZbL8M+7Bjemo+~r+1!7#%YC>} zRtwf51AU?J0i{7g(<^QifXN7&x&7F~7mRap#b3DUBpK^YXQWfJe&Y+>M>DVK zYCZ{&Y%90{JRS0Xrl^16x?Z!iXJ0V3XLOuxJS0nR#elm~8+{60<}LdJH#K#$l`Q=* zZO`_p6nVt_7btNOp9+VGV;H1)i7e;y;Y{*t-N1-^-XIAzg_X5q)O9X*Bk6-Xpa7RH z=7e*;HP+MCC(}UlBI&B2^Cgga&qcy;-{oGYY2H$9bJUL6ud^XNb#ym&0BP)KX$hGN zQOz};)mPFp-q9$p(h%%5=uqjEifmc56HsrYK27dVnRnftpVEGq-G3Ai9^WNUApF6Ff7F+{_fki#KS zFuXhGr2)IUPg!v*>GXdLApai3!1W-B)6f)pSWX*F8P?$(@lww%k(lkHuwe4KLh`@_ zM~Qh{Y`7o<5`EYVFKvB|F|C!pRlvBavGQRVZMvQJv!4^>K4gbX4Y#| z0()29&H&=s(Qy^YuT5vUz?3Zq*kZeWlP0fD8gI2;{<(np_zeINPC66>Uuht`!6?0k}F|@iQk<}6JI>#xD8q<7jD{CGE?O4cT`eRqD_~Y9hWW< z0e16XF*006)3u-KSK0fVJaB7r>@xbRGu1B6VO;~_G?Tk_Anw@1_$nnv7DKHe3RJ?FYW`d$~5AubG<^j}@+3hEV&9?GNU zJ6WBfm2}j*)Sz@v&i@HiGCH4F@{_gs%(Xct5KW8N$Yq@$Iuo=Ne^d3!!bJZNX^>_$ z0Aku{fpGmbC<*s21uw}n;z-^(99_v?1s@p7DQ&%E%21Lg+eXOK()ih&V zAi8A3jK^Pc*3tq?2udn9NCci2ow1JQkB2!hB}kAdvlb%v(yL$O%v*uS0G=&Dg9r(u zspn_N?yJo<#?e@K!40npEi2^YF-?rrM%Bu}XXAIo~++%x>H9P;!I^moi( zuk}TZcp0HFc7B){ptNXPDx|-DWK7(Vemx6w^}HJlQEuQQ=pHL_3fpK3w5ajwOdhnB z#sqh}M&t~hRcDi7!M46jU1xHKGV6SHf?QN5o6i-e;ip&c*(<(PFwTS$rmZkd1VqD&N)jD;Z>5&?>+Dc8j_*p(~PWvp+FKh=B^wmWX2`PW6 zt$?r`l-^QaWIns*hP#Tmf8u>N;QJ%4SZ+OAtATz#;`Sr zsMvL&k?5^CwUEakNVq~@C14!{Zae2RnKETosq48I^F&&p((I5(*6=vJli)4PPJZUP zy0WsyPPqn`roJ$LoMp-xyp!zTWw-dcZ52IC23{z2CIe2tA&GCjoflf@y8?UHjY~eF zXKxX`R88-z{=EdR-8XtbC-6txJaWi@sG?@vTcC?ds`%yTvFqE3f^#F89zPH2+BNoS zbKV=V~hKjiC(}4h9f4Z1pN| zW|{cR6Y**khD?!daMjWT7vm{Keg_9ee$`N9M9RAA9i}DwfB@lB_|j#$y@&3T>zZ?@ za^~vOYs3t?Wx{8)C^-MdO8EK-f6J$))f36rZ&EMdTYB;B;lr$En%lACvac!LEvNJ3 z146T(Nw-0kKAbOW#^l5rmH>`2>8~~0J?i?51cAm&KpUi-F&1L7*X9sff3r^XmSQJB zwQ#ezOP&&v8&1jR3i*Ex>e-U*;F$=3%sTZ+3y_yR&dkpaSZ<=Uo$eSMm0&W#K8Bow zV!Cae-z%60bSqum88f-=_6*SkZ5&%)x-$T7gCrf$>hakhp7YV}?d%`>0+ec!ysg>& z{}ZbFKguZFMTU`rhKJ$m2NPK>q@$ssoDoqCN@<^f`p?}AVJdXJrI8A$Etot5-s(cL zlul*A?-Awi$HyPj>B(`!f%8Zz?ex!aD8|co8iI5^o_ffdnjH!Eg?d(99lDQ}eQb2+ zux&^)h){g^3&-+P{`9i~`=ps)n8%nDH*8>@ZwaN_Ffgv_0_W*P5|NU`vzda(a%`e-p7aatVK9B8}=>dm@JRo!q28`GkE zwKq4610k)2$A3YVKD|tdk`4woq0|DExR1^;lh&KS zcS3L7&F;i**Wa=nd?o8JITyY??U|xsPBFQlmo2!h0(-;p)2Di>y>(M-*5-i}S%0a- zTLtXb4h<-~1~pCNOQ5*iUA@HrrRK24qRcEMR;K*E`)%DBgGp7AZf-hHsWVKOimpD= zfz!Oz;C#hJsH5jnmCT+(Tf)vYAZp?TG$@veF_*eL+KNG;>6Ln63y@!k`^_Ko0Jp#- zAM;9E1gO6>*^CbMwxvrpfRvN`dJu|P0NSqf44vu?HK5I+qs`BNRpuyu)ll!O(}&SW z=K3BzhofZG=|Up$qXry*q_t&y6n@-ct3{zIK=lDh?Rx3JaoIi{k69EdQ&r2 zW*F}(Y049WR3QH;99)U383`V|Pw1|Gb%O=$CDgptA>+@k{N*l{ty9eosWkTqjSxC9 zr^tSJ6bh@tll}KE5=!Wf82>8SI zwY00nHX}P=utOr{Qj_Q9y5%%YG&Cu?cz&>&`8}s)XC!q;R_*Q=KghOvg8bCPJQv}B zZEa_pxS_c}-*oQFibv6APJok^ae#TP31Wf~?j)M_7j))Nfc2E!JDOVBo2-S~aBOl!Cc z;nhEOI~rN9V#9WptrelSoeeDH5xw_wnJ8|`kX6E+8Afp(mx-I4g!Jh~D%p8P`SLD8 zLA_qnp>>D2h1WldBiUgR%?2{zxoXV-yjfAYqAt{y#{9~MZ5X_1j-Y&%N)%v?KtyXs zvwgb{e1(>xVp0Qp^_mjhC%78$e@7g6-@L^4irdXH< z;tp-tc;5Y}X{70WmqkphCrD+9>bYazLJrS$m3}Gg~(R537xJmCs_ zkQy;R{m-29W0)Ngvw6L|sA!Rk-5-S{HB#EOon#w# zY;I-ZSMhO8Z&tQAMd8a_waFI0@nu65*uL>rfRC>|`fq78q!fOfr3!JV&k$G|U1ILP zT5^BFFsWGnr<;6b6(*m~L^-bYEMtcj(;;I>ozI62XU>>$h&uTRV0P8w*GO6o@{1RD zmohRknmsIyO9_nN>yU%x6cH}I577r+tyF!NH*DbQ4&ykUUZmx+ocw!0D9#jMXtG#|%lsg|T0 z(gbXxiBJ_3J|ZG|u$ZlcKO}qCA5s_%TC%gx{x;@@JkJ*2pJ&!*7%A?q+&<&OxODs7 z!|XL0tX;3gWR_DZE^AyN=-8|IcUaP>36HX@(T|K0O7i?$r zGu$WlP8Oh!3D@7Yfm$Pxw5Evl+y~-|586-lIh1+b|3cx9U*FoM)6cOa|GZqSP<{G{lO@(5y!#FiIpS z6*|_~c9fb0YBo-MRMtJMg{`u_px;!}HTO*PVL8(T8T}nGN9{7coPh;IH$XOk2y@_% z32zK1CMI6k)Tl2F(9y~w_X}H(Wah{T$W$m*d(!HsqcBXREfrM4k&rp^1R5rx)_6XG zKz2P&Z~NOO1f>lGMUmcMEDU&N;lgSY3PP znI}m>jsIVMXoTvLHTOWNyncMPQg}X{C;D39u20w7pN#S?-y>ep80$8lrRMN?7F54; z>3lo0hH@E4(<#UOp1XRPK_!Lzd;l8NxVzD*FCyt*QX8O^8cJ*qXH;JM^bItImekJ6Pefui3*3Vw%JNHx zMNVT!mB?m3e{nK-M#93&gctfy<=xQ>1Y)U)E&uvB}`?pd#cj* zDmAr!kf7N~-9>UDVq(_VtS&xlP)kD}RDqSm>E2Aa(Q2lv#_+J#DbcXAD+Ibl?e(A? zn%mk`*;TM0w=K-7z8um<-YtqDd|C1wJM-ki0_U0fY6V+Vj=;Uj>g^of$Kd=6_={Ke z(|sBSKcFBKE|5O!+|JH|z>2kX4=)jte1!RkEa6_>Xy?y0q;=dE6!`YH0~>w3gX zm{_vOAIA#uXVw&(H~XRVrsZ9T!XUr84dffj@_;;Y5faKEUs2tNcOhoBGE5!%wanWs zb7w*c{bd%j19Ug0dYb6UD{WP)i$6L#L$Wbk0II~tw{G$B4xl6k0tc?-aE6A38Rktq z(l=WgGY>WO`1ar;z*lZ1HZ=3@u~lwuXMVpyNhi%@9y)Jb%fYQ-oa~F!x+ zf2Yqc$5-}aOxYnikr4!I6%ckhcdr*hURu;Ei?b*DCNt9$vIuy^lCI&EzPzgd%b&th zo=_Rvhrx7i%#2y{NICf*p8**+go&CZazd>#7m}GOBWge|=QZ;hrNctmDhNbFA^Bu$ z^c{#|F?7+_+}ePxX&*9C12UDd79p2WaAOJrLb4MmAF7Kp-0tu1-_c#n&IUV7tDH-F zBS)W;1j{qmJ9jKwB!8{M-#jEg0EXkFvz(LqgkI@D^H_D(rGMU-B={HrKi`(u%yV24 z)J~5Zg|LH5*3$JWeLh-V9C+8*3fR(@f0c3VB`7m6|BUN=+}D$~NM3#pn=(!tcZpq} z*UIvqEos4fBTccBf1!P@v7a4glsECMLcMtToh*G`(JcBavs4w_4OgJVIj4w)S`3@s zd+zq@a*usim+7WQxv$pNUdSw&=O6bFUkKtVq3X4tw!de2@51$8NBH6G0)|c&6e_^U zsH$#zzm!cXwp6cvJppRR6uNy3r30+OvvklnSjS?(?Q>bw2n^>_j}R?>;m22dcfXPA za`o9upnprA8<55;ogcu`tQG9klnavLTU;oh)7(30tV6fEMoy+abwN$C!KAFY$BCCj z`_ku+!FYceX^-F~GaAU34nyPS^U9cr2#3$7xpVZYcYxQT1X36`f_pIzAvf9ye)Hd9 z_5^)OjBCKdQfYKhFB}R;=V9N3-I(6>uDGe(*ty6q%k6{kvS9A38&mN}!ywR1Ze1~z zZ@I0x2A%RZ1G?d&Z(^a**3S6My(D8B2VpHp{qRJ1c`(3x$0ZnX5KLc2 zo2to(=;Pk-~F|4Ep<^cMb# zTyraXT!i(E?SL{t$7SV|jY8C|`>xFCY>>fi_?A~(uu$#1YumbIpvIJ^%}{A>FR_5i zvP3MEO?ms>`_tBQ%V=O11JbM)Xu9tXB z7c(FI75FSMudc8`)hAdG=7-By5%uzz@j#7xWvR0$NL>+$8~8+keYdQ8esqQ4^7fL;oS&*&Q@u=c7A@s>udD5(d*`w+poTp{gh#^Hd>s8 zT5n_V66>KsLD@U{n$N`2p0!m3E*;~XK^FPh=$XrOHnO&h$1Hz(>tY6q} zN*arAq{Z6UMdD0#0FA|+fbnM+gyGei*dMTX&wzJiGWqtq??nCQ1s1xrXZ<1H!Yb`)PkRhXRe0Irel(F^3{K)v?y>!$YpvfPFqc+Fz7Z(!y^ z++?H8tBxoOIVVkR=}w(%Yd3H>9Ec~;gB@(T70nH)EF_3u!?zP0yHGxP2)lkWZC+ex zXw7APb(v;WBj* z#X4QG`gIUegq?PyD{^X&<1H-zV0DZ+F&g;9lJs~-Ey9~?(mo12KkyxB77HgIN57!D zUicz!UMNR3Sk6^hS-B)B(g(eC_jZ04pIl|6ajb*;riHZ~a$JfP)B$_Y{jH_%MKLE$ zcM<5Y{am*Y3!BbI#lVl)mE8wZ@jX1e)mg`Jx6U!JI<@ZF#r8ge-}G4fth?*^i8CI2 zjR_-qmC}60>6|^+ul&umM|&0qoSK+xkNvu+mqevrhv#VJ;y~gfMgE?At>p(GTKOdt zPk12Ffr3>d{WF-JE!HVV(|VI!+#Y2jS(xTM7WcwSvfj&Z#;;582M6`6MAxifm}{=~ z0nyWX8pOit-nLc0UOUc0*>;ia=iV&jE(qjKS5Mdn#Xh7fl2X^4oFtYaCTD5JU{)T* z-hK64yAB=x!|Ppt1dVCgJQJ@5{h-C6HH45a|NV?z!)KwdqIO?b}>%Ff=sm`PK&AF#;-Bm@ib$z-H}phMJA9 zpVG68?BYeFL>oGh)Uu_bq21u+=O?6FW;5-_UR}_#?Xo8KJWJ)vVdt8?F=Ore(lNr( zOo1+4{l;Hjvrkeqchh<1Zi_aMnHr2b7A9I4zprrt26&{Zyb_CBNsH0?37Qk-Gb1>p zG-8Wk2vyqa%bTvwW^NEWC9?s~{Y;mfvj&E1#@w43&vbukN6{`0~>);V3o*$AZJ z*NP8dQh~F|y#5GvOc;}KMfYb^vdc3qP0R9}9G1F~jfI8QI}Jd<#R|H+LPe0RE!p$l zc3@*5^kzYv;|t>uv1NS>3CuUUa7|j8E;#PO$IK;|vp0iZ5QqGNcWwUj7krMAk{rrX zS3@7mICK<8_PGy%MGrbX$jtfMs}_ z_BzrbP%)R+byHBQ)XD`!y%g#XLBeweZ%4uNjOvZ`@SK<`kS7fkscV}J?Eb>&%tJgd zbgjy)a<^95zKkJsCNH3K!_2Tc(qVAryVA@|@m!S$rsz03-m3{iKmP*F!EASpjwUP; z+Szi;WdLZY0|d(SZ6DwDY!V?@H$p?T9wysznhZ<-%Ge z`-$YI@;^g*fzn(-YfsDN7zo!yB)9x20IRgd^MToKY#%g3U_?4>v7doy2I0x;!c`X1 z1bM^5bAhUC9qty`9KLbv_Lq|86&%*MdFp9PwnAbl4c5~jvx=% zViF{@y6%)z&BB7mNpI^3>wp#x(sZJ-%^i^UyzF^;OK6i}H=y>g34pBofytw}@JMD# zU*L6>xm1Jc*jllMcd3NMFCRJTCVl>l6Qdnni)K2r#ZW?LhM9ldD8v8%V5TK7Qc_%D zEy-&sY(Pd@Fx=x@u6MadOm?&;BC!b_k!QT<<>N9qaf0DQhg)BS4nRNGJYG|Hm|%SM z2E((mGlE-5Pp8?gfSYr$7iB@PdcD_f1E~t%bFH(;zpP=?mpAhK=&{y7!jCMTE}%AO z8Egfcy8w98j2;=d=s}}VK&v>^GD;^wXod?kfctHOnXZ);pYA&4td_?o_G9IsDJp*d zs)q^zR5;GFJj1t8#1z)73mtANu=X!I#Z)~gankxcw6?d{^BVFm32DCeBju$UV(2hel~XfLiZL}0J-_k?Few?X<~e+%sg^O3@r6le#$qMN}Nh^$QWZu zEbdPz3Z|e>VzkOKIA<+iA?6_e<{zWgZiuID`^Yd#`Z|a9&Y)#}3vK?x`yA#nGG}sk zoS?%kmfgxAR_g_NVHH-olV#7b?k(Z=?H-3 zpY57U1eI4+L-|qQsE(P?604z2LT(7J7^t}tpzV71fOy;c?cVFK$x%?NDVLH3moOW- zyKg^YawBQp3pS_-GtH%+9RD%lc0nFAO@Fk(5ZDJUVu=U%8O~uG%Hl z{hp@WO23r(Z{(cu5#;wp{`^kh5_wthoVH0e{$9`U{pgor#_qVTK`>KmF1pg6e_6Pc zhjL~nmc)LkLiBu_3q$H%y&rjbc^9>zMpDmCj?UT+;vYJ_9rZQq)zjqZ$|T|w?EA{W zsZ@-aO)k~U-VI;VK@01ZT16M%oMXTiet`>pvGct@YAZ*N5(C{|Jc5EXc>>|T@-&OY z?b3~HC&5K$1MzE!mPD8Gmy5g0G<*$3#zjz29nQic0MMHQhlWY!MQH2OZw}rz*E}(T z`T_Vfql~x3uh+F*upvyXatYMBl??sflJ9aI)s)ebsf-X3vCujy#+4n({>8a=uL_m_ zfLzVYDm8!`58qFQIdZ>Zj6e8e#tOc6Ij_g}4tLkE@odu%hFVnCc_w1>n%03F|E??k zKc0M=D+GAA8V8m&)3r?DU{V=#^N1fW-b>m#RIN6mR4t)|TCGC6;z{lnmZHn~$}Zrz zK@ZJhiYr?^F!trLL9jQAO$i(4E`X+1Hq>x;ZRUnsvCZ(uZyz6uhE#Avm`2oTYtKTz`RGkMLFBZ}(e zq+#w%G$!D6j<{B>sl`bSk3DDXN7DJx>Pmjb2?QH~TWPSPYxP3lcdMbSNSqdFpuyUy z+hPP(j004zYTvW;MxC+kq4yUiSS$(^@(GBp{jtKp@vnF>xa6X7+H#nJ;BBWh|15Pd z9Y13l~bH$V{swYmigfn*v_E9G82r0qsxQ}qMaOkp9&SFG@;SOeD zVNqeXkA%1$I_>h9330K@Atg8H2)cbg5-%ReXio73#v8zn_5yZ&^=yh5pJOLT!W(A5 z-rX|!wA$@s(kvbG38)}Q84cW;{Di2DcUD017pol>VCm9gXfarvS=BWeNV~*wW{72T zu1}%&3};zaol{IIN*`LGwTez#74|+OOzmHH=rOAPaE4>E*Dg#ik~ym`Vm>b}eA!gqHkJw} zeZ!txFLoet-KOZ zYIe|~J$Uwi`*Z*IEg^h(G}_Y@5NvrUo`;D*+wtLwRrT;R=k0Gh&yWW^Z;7!IbFQQ=@@KbKp_-EWTdsT^bUqURb?QXEJtd5!8>$%K0ubzY5MCA>ccIG zV?*Z6xWb>yd)upY7iFj_jrmrhXw`M}(58V&ImcMv4BWm=p4h-O8IMMzHvl_~8vA_O zybJ4;fBP;GoQT3H7n~Q%e;VzBbkDIlkxoLTnqN^i=B^ZZa^%RVNo_H74|pVRdJe^b z-VSYg^7`2wJpJ5Z!-1B&+FrbRmm($uvgC1cTz0NQeIFl+%3|48&a@4a{KfrEQ}z2- zj{5y8*Uq6h8yOStVbi}BlICqYu9O#ZQ z7aCEB9Kpy=Bxt2vof7VIn)0T`{h9m5?FLVnUa zA@d#lu4`s4t5FJ4Vjhc#pByuDk?l6#INH1;ZUua@nnU=!xMPJA5^7WiT3vo8v~vH8}Bjx%oP5s~$&nZ1VK@c-o{ zp+OPs-D{-y{mWXq?`~W8DRW$-myJ@KTPP-i8xLLem~|TW!#Glx3lg}n4t|!__e)98 zmTf(3po2>+%N%)t9)caCgnaD@Ls!nq{e5UoFtX_uM?{N9L`njkuE684)u0A;_)R|7{y&oXp)c_HJK zYnf)cQ@*PN=J)KPN7TGj=|kl@4R&aaD(4To+}~kMb%)B?W{wTLQejSbn!l%?-i{!N zH2%_=Yv;PE>vNFCu`v_D-r2ZG^=Lp+r%ajkZxO3rPso~yn7H2vt zpht8dZyXS>?BkY9So}rE%$j_$6T^g0L-qa3tgR<0l8u@3%UTM)E*>n>Sdt+HUCc2? zzBkU=;eCtl)n^{YSv_7WNp1a5`_=qP7Qw~cwX7@1zV*k3ymy0UZYAq9mj7MrTt>)F zgz?NUBIWB8qbpM1s0s`W%q@wB!%YtYBM?m-KuE@=IY1+E>T=jkN}9Vs84*-WzV?Ze z-Nc2qRT{N~A`!Ln6BYpWr_w=bg+HhDI8z|@CXx`7I4EUJqI&ZwlFO%7z|yzGl#&TS zoR^Flo6q01)n%rTp6ZDNfT{_>QtUzuPhwj0+#*FL@n&; zD>X)|NN*ib!@-=fu)5D|JBCIgo*vC3+ICSLd`tWt5jd;;QS&cXo#K4ZbA2Z9qt#T4 z?Tr6=sLfRHm_5q+Q@QVOs;xM5SrM4{p&SxNUjknDUh7lR)6$osGc}9EhLt35%+2?$ zZ#~0`{qgc1IZ90w^^+SuGi;AbwT$g`t!H0-BTFyMdD1WU6hpfXb)tPBbcAKAhd;!k ziV~V)AeiS6_HF@Kx#$?4eVMH)pB#3LI?sLiHW9}{4U~?8GW|Xgl?_w+ z$V=A@M%LXFxs=aJXOBj!dBE<7!yd8FVI{`G<_<4~aZW4?Y01#9U%!C1RAPUKJsoGG z7E82e=&*Y@2b{7eDf;~8;$qKCbk>c~!hQ=<-f)*y2uJ>U1u21{!Y65qan5LfFNcu8 zYnr^*&ECp^s+3o%eblcMJwo@-J^tqs@LYiaxXEWN$s;9}>YAF-EP{}ZH&OBY+;)X>mO`+k=$b`c|187^@5eSBz3yMLA*VD_Q*JBXY0 zEV7ocnwpqkwECKuLFZc8yZad={%?U&T6y5Pjl0-_*VfYV#@`={!GvZ?w0sD6cvX^? zN)N(m3>F(^q%JkR@HP1bckFJ@64OL4f`6Rm9xkvQ6C5a3vtf#wx`qwUeK@8Y>@p1O zxEOJ-Jp(g7Y}+`1xYd~_fBy8B$~7;Zr9Q#30aX< zzb5x-OmTEU6Cgv8l9n!<7ni&5S=F`o%|vehw^8*jlNY3RZeO4oE6>cx2wti|(#Q5Y z6;j?A6cBzS@F>8{ER3;Q4kd&Zv%WH+g9ooWRdh`LLpqmS{4(mtoaeJCTt&X$0_rhl zTe*$(Z|P)sGzJM{;o(BjsjEG?`*#~D3Mk$cM2&-@3!O8>+*KJ75y9bk(NQ^Wky{&7 zC2+{PL_6r#dQ?wtVq{Fv#$p^l*>23jVT-O{Q7-1-taX@Ei=IF;>TaLa9<6Y29m7bB zg=Oh+%&{GwJ=mH!6HONxiN%G|#l9EnoB}x9NPKA51k@0&PTNTb=&U|a&?5N*pgg+I zz{Z9iu#SEbsf#jxZ(dRpZftHFJs&|~;T2jY0o{E3D4%-zt~QGkczYU+hZbnz#uA2vlF?5lMVj(&TxtqD)HAIs5|iweVw5Hp3t9G9{HZOUN_j>UTd+?5P(;7& zwR3&Hf)x5K$Bn)|qrXrpyN`4t1#V@{2RX$E4X*V#nWZ0(5Ml}&PwBBV)N+ukUCi}s zV~*P`eNZ-gxvMm?%|@MB@h?4>c#Pi@uHIQ>!0u5PQOrIo+#*UiZbRc4;n6$i>{<#_Au8ySv+3?4-!8*Q_L0S9SaP*opUGMF}Rt zZPQ}|O^zLE-~z8@mWH0TS*xcfXi4NQyW=>o@hlj$Q_keTOx#o)IGfs^x${-IJ2BV|rY z58!gT|H_K|{w*CY1TTb?$V_UJ(8t?{lCPo6qK~kQ87`wPnErFA5k}R42cH4Fd3;VH z`D8}1-p@zkDX#(IGiS$A=xCT?{l!t9o$(ouHMwaZtmo(7EjYS=xTKA;(}}FuKTYAy zVt?|FyAc#|Eck#J`hO9l7u8A1$(e^>*l+iu-eSk|)Do>?;=2&Szj4-q}XY#Ph_uj=W#d_0IZ=^gyZ%y^m5 zCs3}ABYm@3t>xE;Z}CH^R%I8W?-S`TE*fYlEAmCWl z!3@yGhKAetJl@oNqN|a@FDKWgCMUOKGHMoH8${0Xk>wFzJL;xYv}UQttiSrkuut4g zJUh|)i)~zF_PXtELMOuL%ICjucjaV4cHzH#wxo8vlP6COa+a5uBh|==Sjt(>2V5c0 z0@Bldla3BlzjDOCX*bxg3h=qC`jbX>z%|?n56IUHe2XC9BfK$jcgz)9(5H}ytqj#VJe*ghCf(0We9p>t`=I|>*z1wdub%e)y=`rQlZ&h+542m~RP=**?7{yh;eD3j zD?k>j7hq_I$hAaXo3|=zYH(|LfZcbHQ7Rnb#(KT~#r&ORIjZR;Qt1HkNF^Zug4=?X z&%bSAp?k_eKEKUGrbydpk6cXk&>=G66WeJM{_B5!t|NF}=K+&^(mTv3LI7@xGRQ-+ zeiF~;B>3ayc|;vS^F0vx{bKrBmra&Ea?g9~uFN@`^E2S(7OtPZol1FuB=Qc^;5#At zjhPa~Yk$o+J%ZG79p>V2~2=H{=9HJ*nate>vo1xg$Itl?O7<|~#AP0B9hA62ywj9rp;IB}HEM19h?qe3hfbVv_PpKod+3i5MG`&wX;W{qZ>l<^(d05WitG|)ZzaO&_B7R6A z2<=nR&@jtmjQv%kzs`}E#b~!NU@G&1GyY%Yp2CXwAWAQRIHmPogb{-01d`6uk27+g zI~x7Q10D=em#JuK4nR{W-{Qv`Y;1WU0V4vkSUJ~;$MeydC$7e+@m4qH;Y+Bj=QxC| zy~wQndukBjJ9rB2H~j4V!A2wFu{2mBL)n;g{{`ec68?VHoEZ(;cUM$2B>F(D^SyX^>#Y-6 zdT@--LB}ZFb~P+QTc9K;xwp=Tf*TRE;1#Yq%y%-uyP3KEDd|*JuO7jBk?eW=Rd?n% z$7i=Nn&zam|DThGuv_;#1Y&5_pti>#^AJ)a-B?6TrY%RA6nG7!wbN%lquG_!!JTge8fk3|N@ScurdEd{L+Hg|_4iB9-2wccox z%+y0SYFb5i#Xi>QEPT_LTKW|Icg3I=DV)-`+K1^r;TyeQLcn@T6%=X@$f_LBksdj^ zM38gtu&OpD<{JAlNS!c5^{^n;+#9eOu#El`ck7Y2Rll~L&4*8UI65kCcvN$Jim2pU zxIR&}S11rzDwa61FkH|wrPlf~I4vCgc|5VyKThwM@8ACx!n+zt5E2P6`VORZ9zs$d zWcapWH)6P~@42i@g6j+kiKSh0eIw3^JgiC#z@W-m_>Eor2`84ZYs+Dbei|T(7n*=Qv=>5VG)#gQEW< z6CTkvy!JCKJ-pRFoADpH}N>YwS|PNH$9qQyu!f0rcigO1PB+XjPpWnm;{Mb z`i2LTzasQ^>BDc! zSm_QZE|m@0+ZQXht|Z1;gj}$-N&jRX#L?jgf2ZdEuN-hoGD)*%4i12ohi-TOX)X{B zCNMg$zU_*Es|GYU?jS`s_x~z$ZyL$|So)YaE^dH^%%sSi zaQ&kL2v~Zu_rA?T332J;MxqHeprczML8}uk_w+8L)>S&8^yw@DEJ7pm^{{SnF=n7P z9e^?rSDY#YDXQZ{!NUtgeynQe6lC%aKE28AXG-UuHE60FA?I9dEe*u@)&C(qKuFRM zcz7SJ6--;4C1A^UCHQb`__9~?d75`VG+3D(oZ+a7$i$J}5YMQGDeB8W0mt2ibL_|$b&fpV%fD1crX1184<`&{I`(bKi+@RP2tzKcGhMl zEUz11cfDGcJv|n8D;;$bkDRfus#BfyEK?+~_`$ms+Sztde4OAMZAP!Iv0qKq2*$aM zllo3hWeM61?p;Jd{aWGX`+y14g?4G(q#TC%PI;%SfA%82U(j}|(K9xlhSvDV#pB&c z7lJ%@@4s|*XnTEQY<0X;;ho`UEywwwa{dnz38y@Mr{4Wf<~Gd$V-ENFZ|$Z0J5(88 zfno3~u$d@eG%#=l;Ass4a@vQIi-8s&YZP&NtpjYCQZ~lsaYW4I zamfSluUJH1s5DUV?Ts~%wFqfzv%_m|xcnar+21AI5DMO60D<46`Bb^!N7%qixB1;3 zC4id1rX=VNUxRe{DDVQg~~*c!n@ zE_|C36zU8}E)ySBqg;C>r+!K%z=r?&%k_wx;*sCSrsJF+yf^7T+2eHT_a)rFUuP^k z)>UVsa5?0xENrvyu1SZg^lEcp2_|HrI|#jtU|RHpEt9G@AunT$E_d^buHNK4M6~dT zy*Wh-DQ5qN^#F_a@4q`sOpI|XE@3pw#_@#ZSK+}C0JSe)xuUZ06imWWyw{i=&ABtD z4e}7%!V3m=Hw$!{YHP2$t9ND-R>I^Qh?W-Q(a2dck@dJ63&)d=$C2Fxz9)1=T*tqKi$_82 z4#?;Vgk_eqXEnZYp)TD!;CxI%bv3v9HQtzPMDy=F@BiMLr0K_a%Lz9pVM+*nDt_t- z#NoMJq+uCCUq_MYN+}e3Ttxj=qImTx3FFQV6nB*m@*qPCy*~HbyZR5IMKn(stawgb zt$>OVbC~UB@fT>?lmRxn)(MQAr~z{>-(=;KbWM|7^#HIOE-AjPCfkc8E-_<52buRw zJ#*$YCP_%`v}>E1lxh95H2Vb0Ng6!+>ZZ(|Tb%Xs`#)>u|6YnV7W#b~y(=X*iFL4@ zKpdq01fkKAuv2L}Eqy_pj!AW3t_VEmRB?CJfZCf|^|@k(Wt}0kQZbH#svqqiTB)Zt z|8bq3N6UG*4B4I<6fdGq+AykC0IDLVK~u(Sj589@chQ>jJ#7mC!8n1^z~Y_EojXb} zUC$kS!l9F3slv?6tT-dR1)lD&^0aBMLsA5WVr1U_x$@dmEUA|7zRnM#y6&l;H^?u^ zW94o8!>&9AiT3-zNINUPYt`aY#ygmfJ7oKEM}`}7?A`?wSWb9fp-1TAi6 z5A^iuxY9D$gE$!j#E?SyE%@gHkyKZ-#CnRutoPc_tX7g2QfcP`Wo2$`dY&;_I*xN8 zqVY{j3xjzp%fRpW4A42Hju{a>#P{g&*y`9+}#+wf+vgC97Kv&{~s~s|E`a}e()@@3i`7jY7FSNVP=gAErgafQ_vN3)bN<~CQwQh zg9!%wQCz-p3efC5MCKd$!q+#bY(63367BbgIG&JTX15j;b$jSse@skKAH?lr>N~~F zuOq>JgYoy@`R`@xjs8}YSD%nQ5s2%_4?wt32=1Ps%{E#yl^k0YAB|O-^9spTco09w zo$o0X)*)o(y|!F8?fIvJ79q6bq`_!gFgcnYqZRjSQ4UjXc2HZ->G}K_z`&k|E6|p- zEY_@vJBwHez!&Da*eDy=SA5XYpG(Ws>!cGF9I2g)edMWxtdzy@D(GV6 z7nqkViL)vMiY!D>0HkM-6UlqST~jas6ug@KH>x@%sV*zNh!>Nyr$>n9^y&mV$bWf`Sm_=i0?-6 z6yf`p{vj(MXhj2Z8ypFW-k`+X0MG+DC1%=ZLHO$zsa@O!a1ERZod6N4iERVTTPNQ* zwAC%byyP++ofsbC`^43y;-2#`-+Rj>-cwwGktG|xnJ#X-kG|>s2}4W`1ux#;dJX-( z9fi?n8Aab#L9LocXdlxS5fYN1#ax%C{<+W_$Yh{Ssf1w_4r|kEA2F%g+n+wgP5E$l zlWXvjfVNrXcjK5r#`URt{)~UP(Pz-}Lnh;b)FZzRX%-8l=@iR0B9pNo4=;I48`iJ} z8j(8EHb_=?pef}4)X?xlsRfJ%0Qq-nTG}<)y67q(%v{5}d^U{o6Z!8P$S9$>DfI(jQ z;$wH?#p~X+*IdLPWMs;J`&mvfPj0~__cfddQ(^pqZ&@+D4bL86NOUUP_uQe|PWw0XZ~mh!l*mtD+|j=2a&}-z88)w>1cs$E}!7 zp898Q_M1oMFFXvEmVB1O&-mnT2gpcCDbTJlcE$I=i~txxVU)w3d5PG>F2rZHrxct- z5TR5u0^Wcl57+cH=r}_TGcST_<8?i)kW%=U7r?&LBP6*Eoz=0w{iC4IJc7qU8=Ah4 zZ$Dr>t1-4pPp5Q+bB-yeb}FACsIBk(AgLeWZyEr9t=DN<0>zk@Mf7Rtd3Ut%ox7~e znVFcNgDcR!VL1)9A0Q^LWC@Q94@YcFpo)h`2@Y@zgJ$I)YxA^KrnLuI1Hg7mS<9Cj;W)nEQ)!;w+IcT~Vjb#|zm!d(>kVf*(J|J^4&ChF3x3ZVV(&+kIhn_f0@ za-kIc!ye*OTiObzc9Pii<4KfZA_L4LjTUmPXibFl=fVb}9PfCptwyTR1mwYb(So}= zL~j>B2!ar~eMBwe5cJ@(V6T9_5#i3N_+quu!{2@VzyEI75f+291!+G?<5VWpnrk`I zFsISD5w>%(w+oXFIM0G*zr)U4VaGa%{HJ(PJfATW0}Pf%z5p6?mo~bAD#MiFg$U~f<=D=U|HQ&+CqGsdl7>lnJS z`yD?B6)zY9!mftQw**t?2P|ujGCn>_31dKt(cyL2Qp&0n4} z%A$h3iHz~ZwrTOd80LCPekFavGfL&d?$pEm%KQF9^lZ2yKuVV6-8WfSEX;OYvTsNe z9KV85U@r0DHG~G?PzfEsF zcpAo3@HKy$^j*5*DQ9&$IQDG24j&L9dUK8B~`(b66xOGoYdC z&8>t7{>5a|m1p-Sv7_wVmn6b=FPJ*nlJX6hL<;-}%ph=@uMxS-%>47p4+4AP8l6uc zr(v}F$JZNO&X5=8e=wlD`?B3SDTA5C1}(UanWtoTxgkGt5Sk!Qb9b}Z?-F-hzI8Vg_Hqdx_qq{obK~Oz!{b}=ZJVXJsA#5 z+HxXxOy1ApAfK%>lK=&^iKiHTGe(sOtt#d&Uv7i$M2F~0sHSrP%$^TGWTV8y#NrGv znPozDGPD-#9gz7Qa__FyIV-0iCELMX<$)3M=5qpO?BX2`Hj)df@^aQ~7*JjGf2q1H0Jtp8OqF!o~1o_+N}d1kD)lyFA!dqQmlMS3HBh+G}1X^6M71 z?h*atPsj~zjJ(-x9m7zdPFdU$AgH$~!^BSv&~-~{%6u)%*tJA?BZSW#<2~Fa{Hzkw z)w*MVg(K6aDspglcNcik4Td+NsSa~N>gwyUzr`cG?=VPOUcaU(6ogwea2u@ZS1Em& zc7~jSBHhK~^A1-+zUINRu~leODUy8wmU6q~bv&s#SKazYzU$=}9erG?omsU@X z-?cnAUNSx!;3=Nm$m1@A-HG6qF+2{ocBZ0e%09lnk2+F9ORfRjVNzb^m<5oXfl9QJ zy?t@3+-+&@dIhYnH!tjkU6_Hv0JYcC^0NSkg@#J67Are@>yGcPU&t;zzZp*zakbsH zfC35oA@%e7k)dYY$^|w%3GLHt3dh!1-j+ zlv~5fDyPyyVrLLblI?l+K4XlgJuS&Iz8r?QZ1$*K=^)oD$BQl(F0xQlMU%zEFwoKU z%wet=J;hKF-VZ-d_O#x0z$BRT;~bgYx#K^>1WB>3iN=D<6K-*Tg9#=s>8L@Yd^38lfE<2&HV{kISx$Wnjg+Ojtq~q8V z<>!oBk>8AGwvP5}J+nW_!RW3r9GEe@tuYuSRKlX>PBx1Csmt1Ke=_AZz0c#HKP4)X z$UmcE2YNT9-^!I-3#X<`8B)WR;=z-V!5CW?tek>E&+obRBh8BOWuSXvQlmDYTiL7n zjWFH)01v@n##a$N;9$SB!fhVlw{)7eeeidftXu|Bx8IdZ#S8cM->Y`x)(^kd^7%D0 zs#ARv6nZe~Z91b9#)#FmWULX{d6LL1bI&F^9lhiBudvlhrsVqBk7ZkGVBA zFRT8_BJAWi^TW6_LK^4bq093Yd-Zm`Wd~wZG*N+@=5T_eQ)TZGaPISqRj^-nz38pW zK&BVAnMmc1AEmPmkM`F`ItB&_L7K3DI=hrjX%l z9Eb|?zQ3QuExO&4RB};HH2S4uZH`aEE`fPTE8|WX6w^IVg?DENeNXK? z#Z|$&#|!AXz@}9dc5$rVSnB6#{Pr!x;vss`iT);*Dm%k(^fv<~9qNFh&_D1>sAM$P zJ=V?_&!-A(h6kiGR-(dN?1j#|TG!bUuI)4jw_K3^yee8@$@Zw6gB*_bdCswaEl@3z z4Mg#{6XIl}xqrSOP5kOBG)NXC%@!66c3bIUo&qW8(+*DZln?}sG40w2p^{y1&8|28 zLM59`3R+XUDmrh>xRmJ@hqP|wkw4-sr}R>vNd|0i5uwvDGQPr}1iysmK$~a0vvEFz zd>ou%$bJIXH|!3U4QPISR7s%WCbBVcuBJy(miG(ePJR}6Or$&8P_{cW&7Ms3XFTQ| zp6tDT*+x`tbhU(>`1s_>!vC~$4yncZF)ryCz`=w|?&iTOd@03wCh8n8K*R~|;raw&N^z;Nd&mRe5#i~~YJl7B=@h*biIHDaa?FGU}A$YUv zfcOCjf)_YMXNtMy(xPmrDj@4O)-+6prW#6oS4A7fV|V`Y1C#8=m6~xqhR3OhZ`LIL zD71|XI<&_qPyFdYS(Yq&!6MBNF0?*OzAX{O}tp(qEY+4`K+Ind1|+i1LglP{@- zzt=tseeR5PWp23$pa6Fkg2x9J4UT)N_q>&|@*6^>!d)(RLS@FwPl_~29!ObDtdh)e z#pDLHnTY>p0@6Qfk=@JCbjyG4_+Uv$4R61a_kA?(7&1n}G`FXn5~+-$y>k)bzu>lH zxjvY+v9r5%*YJW>AYjtU7OZ2ShtPQWyel@Z33L1^XJRKfj#sTdFFmsMg|B~xRa-U( z-1fX{Dm~rn@LB_p#cZuifQ^tE1yWc;8J>z{781ET8w)YOlaRhG8^!zNgT0WR+7=%d zSI1P zPQ(D_HC?4ozXtDEk=D34CubQdU4@A&V`qo~-MF#_jT~6k>IMh6Ql)n{DGd{9E=1WCz4y<#Ynd&V zsdw}E5y%Pb8#m!o2uZ8=eNB#0+Gxnh>HYnZKtE}dUaL21sfsgkLhs7Q-ddI#+(QpxA!gSLHUy^pfH+c~wQ?T^Tg^tHsyYIa#TqA6O6#VN@R)xqo zt3dZv_an6HiiD%Gth?+%_JVo(JT+$&qSxkQH8!7v(^3d5@kp*{r$zm8dGgt5k@J9| zao10BDt0)7uZ1c-iibRg+4(md=zoM0tS84y2lhdA4HnJ{6yA4+jT_ifjYKMUD-ZVM zIvtdEdH^Ja?JVct9zPps2%{KwK!r^jHPxN{6Z*5}kdmoZS_;>6qYP+T=2h<6FS}TAl4#foo7UL9`OP*`$xd5~a5 zcApRfIo&Ot9+V2H1thnl8~dQ|*dF%4SB4G){A#f123wj%B$-AW=0Ws**(3}fNI6!1 z`LR1KRPZ?OzCDTGJY^6wc-7g@`hUmh{^LOkQ=?%3c8<(fNICJOIWi?^ktxEiE$W7S z&NnU|%d>(OwgB)TP7|%ElarIc5xqz0P1uSJJ&vn19Abht(@L~!y+16$yM(ny?REze zFZ`<|SykuTBF^dvP|dMq0sI@?dr5)o{JA4{qNSTxQxaDhX5KJ^8#J&azo(6Mrp&pj zt|`!t=ZucvkzT7MAIdcfWY>5U8EP*Dxnz<}0*eW!Ndu?JT1Pcd7%JfeF&1UsagtZx z(Q|$qqyW9krur;O>L`i7Fv)-3`QLvB8a;3+eF0VD6X>Sb5a^q)fOAsVt- zl{rfVh1Y59ze*kZTv^nUPtQ*Q2UFY#v5`=F(|i5exl#7plnbEc??=HvqXWtHH9*It zrKAKQ6#+Q2zGxH@Jg7NRAD>V5{LbNR|8PE1TSe`$l7048nm~T?pRoTb)>sDp5h8Xp z0H*R-uCg?o?s_DeBf10#)kFK+_wT>w)mBnUz<{ISgIL*(kU4ueOJ4!eF|MK+bL@w< zo*fhKMSAX<@p;?ChUsE{rcdVb*605agK7z037CTB8h|E1#i#Yohk}RNZBW_FEdtb( zo0HQnDNqk91a$t$tlQRLWXvrrVQxHD9AK*=iJ60xVRk0a1OZvUCo>s}x_qJ6IA4-g z1D1z;zFcOmZrGQkxz+-G;3z*kjHCRoJywK7>QTE-Nq6Hx7u*gAt;Na9Pl9uB=E2EI zOVOaN0EQU1G*LeOOsGBzk`n}i%#WQA3Fix)Y6eHkOt3D9HmO$|0>W&cr;6t{p~C5O zKLE3Bc%ctd7R^vD_V;=?edqm1`qBN| z6|u9Nr~i@Q!{nz&MG`^wj`rBI0XEU1_pXWEy@t`mT?HuGBqBH6Q%IjMs+O?!bvwKbT({B`v{;9O6x!{a z^MW82g$~4dZ%S-n3g(ck15?NfM8LhK-1#5Ac{R|qFO$pNSQ6!;{Ad6ai_e=LI$V#( zHDD%7+dGM@lOiI&c^m)9F-h5ULpQq?l^0CMbS{e>Z;6(u@IH!lHd%H6cA0YNYrURasbo~s}k^0z02??T|kIfa>=x9Okd#DR$ z=t>T@@-4&0#s;<)L%4C+1>^AJJVslc$&Hq2nhN6}NM;vQcYgygGb*^v(#58Kg~r|iYgRJ?E*yy{Bun&Q;U^i#DR%fuoH zjtbsL`)gAsHZRW_vU&BD#t3hd^#_`ok=_`w`FaknlYNF%X%6d(z9m? zupE!mq6%s1w_^$#?&{&}+~qj-r)v;F+JT6S2wHQ6X1U$1tDtwm4%BOD5NDAe(Tf=z zAZxt3p2q!4w(_5){hPx|q>68;`ML}wkDnx|X}LNF`szvHS&$R!T_Z1f@%!uk>o$PU z4?Z(s^u%=KN@j+gU9qNtfdO=Fq1^yZN;7oqm&4>yw}wv8f(|3WJYf2y z7|!O-<`&5qVzEnlYs6DaN_a{EVmI?~3?&cd4%R0xRpt^WQw7f4qwQvp&0# zgD^hX5lEh{0fjXOVFs`Bp8$3_f$$e~WxNr$RT&iMs;?fWL-9xzO#AzC7%cML33_G| zKz2m7hdgKkdNg1ZEt<$d@qvV!oi&nc0ng;nrRrBb6B2tqUw0e}82UE%_1ersyra2b z1o9X(LMS`44rz+qurY&sSFSKC;=0Kb$Ukvlet(|`MlphMH{)us;NQ{Yx1R{=(Gn9A z_h+&(B-gsXo#6P^p-@$1mk@K`V{?1P#l|LM{QG|RY(*hz({3DD@qNO;RnEwzU7ntUq^ zH1Fv6Q!hOhd((hkq5L@5Dqy`8zLO_F$6k^(I-HgQX3`azz=P-fBRvK091z(AZ?v?; zCnO{Q52YV0F!WcrLtf9EV3xn1FK6*OzRF&=w3wDXy)?o@^ymepbo^zS(3*93sDa*3 zf~nj_L!6-K7JqTNSK*{8stPnB8F}?!%cpdW3HRj7KkP*Tbf_T*fyV0Y?hX=)uv+d) zkdOl1^fj3iXuo0Vl?>^(`uY?d6HQNH0?Uyt9=?7VF65O_P=h~)9&}iy zNCyo{mlVQ!#7zU$c28L^sEw@hD-DY0!#$n|ff7O{cKv_s!2fxiv9f4!IjLq*d~1<)rWM;?k%(yjx@_)?yGb=&X+1fklO;fCa zIh$Q`G^j2Gf9gLSg4EBdexN0ekwmQazK_hHfNojAoXutj`O&dN6-!lrPpuuhn!q<- zZ?k1G2Or(I2h|071wr8tr;d7_zcM505e%uc^(g@wLkT4@U;9~hGbp*ze&d1vkJr(H zG3)RQ{6Daa+`hF>o4s$)>L6QBiGO_WRa>gV?*z#I__x2JcoqpDO>gC_Uk=>I>*ZAM zRm^RC{D%Yd7p4Ge$mfO6vgItqLcre9KgGk@(jVOZP#RSPBdNP+aTLmj{|&mc_!#y*ao*JHAvqX0wiz4&YwAdG~JGnP(oM znL*K!?h0gI(Cj%IKj%O!&4GNumQt3?+|}RrmRsQ4%|E;-!{eoYJ-eUacEM8&ztrXpqZ)28O30%tB-B$t}#v%JQ1-`~f7Hx##*@6;O()zY+nt z3B1s_wUlz_2!Z-U3~-`rWi6S!8E^zg92R>TI*aXcUJA(@cJJz?xxyiH@F4*gQ(oR; zVE%$N#mWy!uoy!K$6y?sfI=;R=mo}@pXawV1(F)R#Z5)5_IuQqFVeHsXanKb4gsy1 z8m4=x*N*;Pf``x1pvzbeGp6#~bU?2GzrNi)8vg)%7Mq}{1CGeX`M`O!?atBzVW#mb zcHAP*JfXeDzXr-JRw_A(vI> z7>60H3a(ALrS_Gv#nUigaZFK8fJ?XPnWcP$Fec9FS&icHbg!EGHV!K?Ru24|k5mJW z9Y;gQBA^z2_CFtDrLlLxYtr1*^t6RD?46rqgO&*)w)DZpAR55q9eo89)N?hNn#CNO zF01D%H^1Bx^$=z_n`H7$%$7?qpN8mE2{A3I#)IaN-;aEw7>0sayaB=K95dQeTG5=wd$qbHRV#f-sKT zyLa!XCkn(EgRp488|nqR^pxzoYf}L4t;~Vj%Z`yZ97y4OH@H8Fo_)y zZl0uNSLx8s;0Caz&WBF`;Oir5eabqa>L9hPkGA4rMI1Nf*^9qf4ga}%Y8FeNJA0Pi z+yN6WmrLJ2n9~RQR!Shjq3@dSM|3{b*b4S@+K)x8%=j*(_miEL42&eA{2=q6U+_O4 zT3!fW7+>B0NpTJYzhDRs!wP^*d@Tf;2nSa;L{1)_j_^p&Wu)FkPJ~Gi$Dp@2A7rN zsRXL?b72_iRgRu`yBln`{^uh9^GM{CVoyJDa9xQQ5)I&kp;fs}e9T0qne#9v1Rqvz zttm`a(ty_;TaA8lE%kI~@9QhegV%HlldB8#E*^EET1v4cC46Noz)Iu^zvD&A-wHbD z&nMPj$?biKu^?bR=V;zJ7hj58Ht}thL zj^*pX>Q22%1yT4GgrgV8i+@`cYd5`W&i1A)B6XleuP=+>(AD(pm>U~YmZ8i`hXnA0 zFbnelat7;-z{tTYFI(957T-MWxcGWTVqT_)DTsXa{QB_B@1n}T9&8?)yg6?<;#&pN zysM2sv%dkNr3(Rhjyu3hz^zCokGB2jj`2|zS@Q6{lXU1+O{Cwe1QAC}uQbYtjodmS zsGwu*3b{E69vr;0IIsXa@}knVx!&I1#d44);MnD#vgUS^j2diBCRs}@oG-XvThvrw zwsZKJw>EUMYM62c&O2>CL!{*=$RB7wuFn~AgzJXZMlMB(o}47=mO=9<9A6!O2~=C6 zVpfaU4>mGKZTQ$5=s=7m6|9B$OgulYSz26dKi^;c!_i|w(YqAFU4*b!17CT(g2_+* zwDWv|NAAh)XyM|qRW^+R7OVEliXTL2O5^z-eNU*(V^&W4wY&YQswzT1-fHx%m)R0r zB7QQXa&%Ni$2yA+hO5dCG5wKwBTF?IF1&W&C$_e>_Tv$4>hmj)&P<+0Tf^=7#Jx>l zuzH94Y>B7%7XsdLjMKP>uJEb=jCxj7AuMsL$)^Qo`og8wKU32{^QUFVeafu>Tj_(M zvty!1Y049kkqyJHD}k6|hX-VM`;1t3BClt+4E7em$xahgU<=bs#%ozYcov8(Y$msD z`KZB*$%QXw({q?QjO2rgj#%1Nw&-wib}-H)D0m*aqCvIull4;r6e;8VkK8Wqaegc} zSr)T$crtVE)!-u)9-_lnwl$6$s2ata?12G~1TBAb>4iaH$b@M-JksBVrYQ}f7Fv{4 zY{g>@xV*ZxrSin>I_bbvpTjq@`WeQ;$icOl_{^YnfL1z>^-lEylupC>CHE!m3F}j{ zf^K$C96F{3oIe}V|0hoU&j&^wH&C{Q8ZU8_CnryVI~L;eY&+5dR!OT9f*Zw7f>gyD zpU=qo{p<0S$jdGak9utmDx47ieB>dXj+OTmx2L`+!zvk=dqL@)ot@&a21|i=U7N?n z#TDek=7x9Xe7{AhjYF;l%Y2rZ>XC=H%GvWbo-WSznn&Fv2oaGm>LjI5IZ!EGm6Fb zTRBMh`Bbp07h4%zCMdoB0oEpm$l@CwqE@;VJv0%_Rjf|+a573A_CoX973|vQAzgT*(M5Ihg;VranGKtql&ad@UVvuwE@W_6LZ zuML3eIRpN*`F8jXEIj^T`yG*T7j!<9uyc~6MOHPe;E|oQNg<9SL%>5Z>Vf+FjHi!A zYbVL#L5w~RuA&F?f(_90rIkKSrF|)igF`&5%ADx@x~%L%PXDB-;Ju*J&Np~fuf$kr z?@5gfV$OX?JUV8bMh6?VO2|nxL6NhRi{2sa@qBQ&a7_VNzhVY-(1O|o zf!qY|RO(W9#Q<=Y1wiT^#4F*)_*(kHg?MRQo>QmE-uCdVjLdd#5;dlde?4+Pp<_MA z;0XT)HIhrqO-xUZ(r(#A1?c0>NeC_nX+?gWL$@syc7E`ZuiQCUV}$vG1Fp}Ju~MB7 zi60<+a9A9k^ww<#5`Z$6?cR5Mmqm~9@$uRlA7zoC1jnqCZbxT6$;A4LE4^j2o=@eU zANqnIy14mN!BL1fRX9ya>%91Em%;zAB^FduQ&9N~755E@#nNJ^cTP2K+-|Q?kz1dp z!hdnZ?#ZJ;QUa*lm*hx6>ForhEQH*V`#$fL_e;%04^_^ zpBV(R+k%UNge9=sxeRd{)@0j1yxzK1*dmWA6pp)jcqF-tL=uk&&|s}ws>Y;}1)qkU zpG#VPaJWrFGrGaBu)MyP51A4ZQ>SnH*sAbMH?bR`#}He`j)S?WR)vI9^mtcHz>)6| z&O0VBT1o?3gk`Wk+JnMu5PDnhAG|pE)$hU+m@#xkJ#P}C3>lyG!&Y{8biO!PA{kk! zE_Ls2Icm^`$||q*baou^co=U|Ykhjl#ytv0KtCjnI$$iiSYcvp?7j)uiBtV$)bpRE z-?fXa#z5-00q4*chOO?ef+X-#CneqPdZdnD>4}t9DvOMID(T>&KlR~@-1_xRTG;(Z zWR%Cz(y_`GqI4EpCm?eE09ukZ7_5Q;3j7D|oL$}BZ)H(HL}@vQ8iGkV?j;r86t=1@ z4-IaGVOguYZF`wf@hn926Ac;VbUizAy~f(OaeSLfPf98R;=G4caZoVkg1m=PpX*+i zYdQ230n?={?(LihKT1nv#c7ptyZnTj?EKjU|F2?L)(KT_;{*!Xc&_mCMz^KPT1W`u zm=ONGw*GtBg#-&PN^QTga~ZQ{G`dJYh}AVU7nSx>6SpKe+Kf?^qEhSdz11Rspe>2%?|NRT#~;62?^0O99)wp%@IQ8~xnOiwW)jzMvrf+C zwNxm>ES~e}$s1@7yvrlnb`;F5oK%WsOx|^|y?`4zVh18W64v@zz*jDCX6^AS{d+Ng zdZIZp0_C7vgy4T~brIN0H$Ya8EDe0;Ns)s!odYh{{asVrzS5-kg{FgLTkv`XK`dY6 zPj*h}m=f`K7-ye(8vCIb%8zzNy z3uqB`DpzKpKlJ3u3XBVMpB)-r2mTbBX3^E8B@OFnOH1e*Rl-aRq(E_$(p`O(5-q?q zO`5YMI_){zzL6N?Kb!FQqlZA?R-vSP*ims5*&gjYhAL$teR)c>lcZ=W)oV^4_xC3(G9CVD^{7_sEl)u_K6~bmY{CQskQz&d8lUHYyYP}|miqV;y z_S6w;*_y`(6Gwff@%_9si#8u5*B|W*_oYzzycMY67 zm*E0T^3bpGMzWC(<*P57bvVn5l zWuxJmb6!t14-a7wNO;bvf?goPv;x4! z0MP&HbG*5vHlpc8I7#4kCV;%cCdz-yC3C#yYnQ3hmfMkU{F$&FNm6pM??nH}AaKf` zLvH|atjlUVm6&=bZ^kN~3Xw;3X^hLReLlSIm?T9TxQ61{ zwRG5+cLw{8=YTxdi&Hbw?Zs7l@2xjCxStUS7ntPq6%q~X?C;Lo^!?U;p|$A9Wi-!( z;H6>#`w*^$h#o2O1lMU=e#D5`n>DJo8-REXtu5-_C@E@9RG~ypcI_Y&90PX=4~;um zd2FwY_i|Fvv8-*}@F@jZ!ku;qW|lojjK0C3@^ON z$DiBXGYqK~qmC96A=kq4zwGlOAze(P7t4u@cr_MblCp&cpZXDcPYbhJUA0xLN=7oFFRw z2so19A-)@zN#}D^III=~E`&1Ca{)+3oh|c9Xg7lfom~0?+NTB#;|>QDy19%aS)kwWvWv>JXD+nYy8iwM7+g4(A7`|B%FkC6Fw4@1?VX)o z6KaJ`(j{5mwhTl_cuC+1uQhlrv(!k(?D$^@UT6EIoxi*Q{wpg@$Elq%8$f;O&}1tF zRtFIEyV~3Hieg1#+i;v5#`}mJy2dfP##2Wk8Z*?+A6cUjOh%)Lc;n$x?SdE++s-ay zpuhhH*fA`YgKP*7d0~`pC0?d_oZ+HD&j$-L4M$f=ULBFqB*H5ja8p{mzxK`~V+-0V z&b1f4!IH!XPHNB=;gG@r$s5y2@l7zX4tUmcy4Yc-tkK*~Fsuvd-l`O(DZatOQ4uxH zmHFl9g1Q7%1p{e6Vd4w!?beW4JO^w&8VnMgot-ypYMFjW_oA<-2|NERh#aK!9kEFX z4iyt*CQi!4>dlI?MKmU@{U7MRj1CJZMmScWExUjf{uzaOjy}vH>V5fTtqnZ%4R+qoyVsB}KAQYL-0ZjI;S~>*09B#RM zV*Nr}=~LN-Ua=_SRBn+&i(_hGU3|o^lrv`=(H<~cJVy=)uwuBzG4U4|3i+a5U!~05 zGTBU;@~F|fJtq%4mRmZ|>xEDoP>+^@k1HN<&R}j5h0Ep-q=R4IXypXdNz4TaP*Y}!1jH&WfDXoYn3 z7l%L=wU1|UnU6Z6@#M>z%1lk`ENZuab4m3|flj5D+qr zOU`ZHzu6YEe{C~k^)?vN1C^p2gsfl?;Jex0-VQJ;GwfFj`7XN{=k-$+A{10-`iaWa zn)Eq>9Bocn6G8AYvvF{|6!BtKDr%61@z&w5jRGgZ?nZ)1KoLyEgJd}-v|IEJb=}RyS zruHuswuzRYX{zxO=}$6g!}clvqk0auEb`Gd4j{g9gn5Vyo2#r6Y1-v{Pc0n4Qn283 zlh` zItzt9O|T5Qfjl1 z5RP~Z)Jh0)06z%2n91$HYuo9_bXkLmWM3g?rupg7J_LY0H&HuXi#@uyOb<^*xL(h_ zz;Di+f7oyW|A2J!e=d30H3Fh*wzytQF>#I9&GsCe@9P4ScXFU+7EO#Cv{m7`cxV)4 zMX$e#vO}1$fMvcEEQb*+z+~A$YR<5V)kKhc8i2?}%-hogI2+Whg%1S=b-UqMjR8YT za9{SeIMoXTBRix?y@GW|aPrxvp*SjK!NJMq$j&#CJDx_e`IaY(F2!2tCvi!ElF1VV z$H^Q=#BD6-LzQ?@bWWxBI;Td2AD%VVpc%Yvb6jcUG0N@JzGCM56nsc3q00{lcm6Pc zz0giiGXl~gno;}`s2?rF{my5XW8MHBi-q%yMqgUFf869riFok1?+7T+jkWR#K19#cteBjObIlepwV1Xt_R>+D3apfCmRt# zC-Y3`^r~$NgCXZ(5l%RaP|_|u>P=ZIvfeqkpNf%|G5Vw93_!b08%QuPpGP0^)Qjh@ zx%&+v90>9r?NYFmfRRp`i`WHMsoxrdr`XNM!?%DHgd$2J7p$=F!3kpZzpZ>Q*Tb`K z);Sn7yHR^p>(CSn7SMp=u=B!hPR}e%Lt2DsTvuTH798&IAJWPnB!qf-IHL8V$;dDd zm@CF+Ea-VtkX$Q4g(})Tg0-lrcOZr|Mp-wHC}(>wA$LBIbGYqnRnz)0#Z(JNlwYAVnHDd2iKBwU=Po8$Xq>C5JLi*lau`RwZ3r~fO3 z|L05c!ATg2W25K+b7cJks{KW8&hR^z&M!pU_C5+4259h{Z{BzRiPZ{F)TR5``Bp^} zV?hQ&c0MWv&pog#i_PWsQLqiNYfF5EwL}UmzrVMEW%7rjD-kEeRdxYP>olNO!-OXYlg=w&LfL)e=Pf^oJW!tKK}8*P1|U#0CeG|t z6i@iz5~_p$Y4aP}|Hjz=^Y5)sxmkyG8%#y?m)SqjUj#)AP0*YTxQ3KL(eMdT5Rvrq znwi|NlB@fTg&-X`>LQnr^9AfGv+w;z&=%(B@8DSLT# z_LA#<^Fa?-&szzm@CYSQKxQjVo-~oxz z^v#Hi8*&ITmkKZ~v0ZsJEp($b-tK)Iz13^>x(>3Hy=WJMYTpG4!s3LZV3B84vN$X{ z6}umx2?x+LG8!&D7)t69KhCD^sZ#@zx*Mn3x%F{^jfng`ZR3K{R)Z$dybjH_g=2Bi z#@@7%2{}w97uV@?xRheKSrKDNtpR|1o_C;IX|1e3WI{W*jm)i;?$DP*K%K>jhIq48 z9&8+(OfG|2)w+T)_;T4+WA&CQ5yA|Fwd2b&jeYydFFWA?cfM2}e1om;&<#-XR(NY- z)ixXReknSiRP009g}3d~x_iYhxoz6n(vB)bVozfLP!1Z$3c!civYzI2)UaT?=U7Z0G zoy>y5NA5G|9!JA*9Lt0>xpQDjq_P(Vm*Yfp%uxNYFs#|jZoNcS4#D92;5faddT2s3 z*^6f7P`bX7>xjnB`ifF}GAK3NH=Nc!j8 zjp;G6QaXhTopETb0RCk2l~3LAp0B(A&*<}i-iw{P{VG`RBO^@NuV3gYX{DK@kxU`QB$DO2z84n@{O)`o3(OJDA-M7 z5G4`YfTeUgroC*mr`C2uHr)EEnT zuS)Fw%~7cOi`rl4hSrStr`Xs$@Ln?>B5o8v^cPSI$1-D=LDTgn(*h`-i@?LJVBQrP zGcYf<7bUosEP}_I@avj>Vi=Rr*f>P|bd>xswpw#f^zbOkPx7j*^Z_j`R!FL_!{<@2 z?w*42)ssKyI`XX?9^5!|l^`jFq%K&8K|}k=tHvFe8=SY+4R-XqlM+It3CqZ|hGeCb z^g=Q@UAa^3orSeiG7K+9SLwfp3*-wNaWs_VArw5@XqC7+{T%57gKj4~f~51jbS~cr z>v$aKj7`pzy8{IS-%sbrvRr-~Ek8fsuK)4Ze+yxaa#@$jq;G5??LM8|k#pgGWnYOt zo}Lj!vx(y?C-xmz;-DIe5^8*7Gb{gn;lS8T(D%ioR9qEkO4(%G! zx3HlRSJTrmefKqe+02C5GkHd_LHnDGR>6l1EoJsoN2W3tUp0*FuNPKgB$pvP9T?%0 z!|X`OZ=3dfQ0ut$0_EAHB{}y+gA~foFu%blyrQ9~QGcoCKgqyvK1@!d z!NBib81=aZsj<_d2i0jS2Wh#^FDH++ymeV7wIbNplJKOzMj_&VFcyJOV)F}?!P_T=542AX}n0$h1sX~Noh%5@)SzF0+&eJdsV;&4{ zEEf=i&kq+))3-drfPmv!eviIT1vFPz#gE2hfCVw&@YFXy@qqArPZY-Y2kRD3m+c7M zE)(CBwW!GE-Kg=Z(L3R+!}}@zh^uVL$1pZFcG)N$!dQC>rbHjPotgMHDd&M%UO^qY z0gbawNW)4B+y~Bd2MEhBqIeYZ5O}XI?#$s7#aa1S(n+#6t z8ulyf67o%ZqadEvs=r>Lltq@=!>`P|naSuHFlTR1l$8W3N!cOb&i>(_?NN zXOfbeNn}HAn>H-4qC=@9e^jZ&M2#v;zh85Pk;m;@$b%8yatIXLbljOwMS=#c9Q&IN zProL|(SSYNcV=>^^58Nmk_cigggL-+`pD>IW6Lm%;j0%gXRQx=yH%9L5b-6pSY>0+ zU@6HuSkQ#Zj;cH9@P>Y)dL6w{UlJFk>rrq-5-J=!fNoW+*;)*P*pm>s4}y?nViQdY zqW$125B)N;s%%Sz%W(K}%*zx`aYk{H9Y@oWrB>@;5SV|pS%9DURIhP+d)#!%6?IWi z`>2c$ItI1+9AZ3MFd!X{;YT3z07mE<_>8>>AiN~Mj)p7KGj3X8`RzS#xvzELr(=h+ zCXTsrpMsvwnXr%O^@`M-W8Iq{@lxIn@mD5pH+p?abSg-Gv=r>0DG=>!VsJ6(aON6> zFq~6}HHBkozz7+7NMhdH8x3?QqPo&)8_AJ%ttFvDE zytyfP_=yA5Kw-Y`uqI>4u$5#5=$BA2qIl7=;{mcl`Bnoz9(b*tX5+eHXEZRB*<_aH z;;70I+T&`t5ZolUK1oq5eQ36e6op14Pn#VF&xA~aFSn^dF!Y#RT_#lS-GlaB8f^4A zV6TBm&s?FQ3wphVuC&&Z&1a#$@JOduy@K)%KYGjvfaQm)>P5t;;`YKVo>^S(Wz4)s zXkePE2HEEs$}O`-s)_^iTNW+N6EF+IVx@6nJmO{9NB?Hp!*L;ayunz0Ks5s1)BY%f zP!Ejjp)gObsMv=fAhg!5=#5LZJ|*Y64RqZ{#>?)e-liw5^&l#|fx??r!D&c9wIl~8 zrls@n%XU-}mR+D6E4=O|OWX2uIC#YILbwBM_GWgC3RQaPLH%aO_!qfOq3ymukTs0{ zA7ftuRb}4pJ0e&JiU=qvARukfty0n*Qi4c_NXG^dQ3L@cl?LgQZcsX;yCpZ>o%h*( z=R4=#a}V1yYt5`#<6?N@|33K(LN16>mSA`U&+!n_IX{R&DyEQCa*pM^XogI5Lur8w z*eKxIcMM}WQ2Hs00$st8Nz)EPhBlX(2Pl}w`gF6~dSJ9v zv_N^#5y~J^wQ|4byuhkEY6NE8$_)?-LCNCg8yH$5a25bD`&Br*s(VgKmD>+-dZW)1 z4twp^ZQ;fDKceu&w-!r{56@4U`WtD74dv02j}bTanVY%Hu@03o$efxz;N@>%etZ({x|FWTTFQ~(|p*Vh+%zyAhCq-IQFrpUD{p7O{;#{u%w-c0*6=G3NL zv6w+Mrd{Zdh9`j(ZRCT!1K}Zjk(TSMck+T;98SL=cNvCH5(%R)?>*@QI3}oBSOAsd z3Y3#`HYFL(I5}@TbU4Dz7kumR`*TqT?$`(poPtZ?GBZre3^|R_tZDloZ4B8)M@Hfs z8M-&w%v+Wla3Y6x-LPAHTbhvt8!A$#K3*s!pUGmQB)+0$K@M39N`Y_{LkOUnPv0-gf7|b2SBO$U6aD;mOIv9=v&Mr#&{I7)k8BdyZL> z(LFO4ul9cyYSVQ-$u^$x6BWtgWOk$_?xaJL(&SiB%Xk#~5H(3au{}%2j+OzsD9JYf zf;DbyK02h((7;r^5A|7~mIXAUMmOB~X}%|*8f#%i7rOb)I%zb0)_-CuMjk!;)gD#I zi7-;Pn6i@%m#SlBXLY7TOG>yUXMh2YyA>b@NyPkg6Y?I2&3}b%i)rmO*wLYSLKG9x zkA!nsV{E?{B}O>m3;3pI5CRE39Z^AL#ez0+?zfiu=`>Y24ye+aXXJS%K?z)rT)5lGCueyF&pe*e$r5nW=knTJiD^?04W8F2~c} zZui8h!(F+#=xXlTu(Qyr9%{O#zSbR+eI#%a{nfahh+F%)`L?gfUkGkZQ0Pua@;h$> z$>ZQw@eVK;rU4aB7Jm@pFpQG)hmALb)pr=7NIjD@_o*tI6ZTmy9fA~B9rgay;iP*G zK8$nh{JY&REWY{Cue%b0)cQ=}C^NEmGO)Ua4BK%oaI74*(pfsG4QDvF5)4zy0ZTO( z&&4LkwHUOU^W|H20gwvVKRzHvpfGK~@Jy14eA<)(iumeHcb}v1)gk+YPs+aP`z-fH z-iBU!iaML{wC_ev8}-r`9C>iuwSZW}+EtpWw?HcW;g?RB|DIJf5v%d;M1VXUt@Gw6 zv8+G$tqSptOWfsF1n5eZWtzJn*#yFaE3m+I1*&PQz`>$!wYLj%;VlJKRaGO?tFyN; zH0E>0Bd=$4g{L>XMaysuVu4!OhLQRr5W>pE$~00rT?P|+@;xn_HuD~pwi{`)SGM>5 zKhw1TcC+m*Sh&C^Wy~z1O0IE+t&s~Py2YNf2HqkmQ9XUmlLvD)^=&ijz+Qdj^x#7x zRjE=^;RWy0Np3N>AzjeiyD1!VaN#3~_S6`iT3%}6Eja+ZmCoe5SZ5a&dk&!kkX*2N z;BLdi4}})Yms3hg_Wwi7AX>uQSxivB;giM8cu}NQOkxFhPOZ&-;*00tJ&Jy@IlX+L z0X#Oj@hzCjJbHGRJi2Dh^7LlY99h1lixAaNgw}>hhs_w3$EgPv^~7xG++n`&IukjT zs+?v4*bU*uVQvZIN*Cd2mjtN@UR$8R zjUg)75zrDe&uQf(7G%?hrnnrWjW?zWr2KTA9@Qiq(A`K4g`1N?b8{#f4sMpsJu78#3#)a3r=$6NXGfW}8FBcd#!4z{%+kJ$K4 zD&Yh*WJc(~nM8Mlpj_^|27qrB+HdL=meH_U&fL;m3PJ67{-@E}tP}<;1rw8WSuuIs zg|bpNwX2<2v=oU3nsuVJOHH&8kDuEKjWwNpC-da?G3m{#03iUXAo?qaNxOi0?5Mw+ z=9U;_y9G}B0ryrqc=Q|m_`LAwu@Ze3h}!1Lk&#Ga8yL?1pJN(*FE0BHw#t&7 zBvoKHyf567diZ_UYbJPO;^&e`SRd2y=7QT$SUbtbMsv;bY5Oq~m$~C5Um|o>1QZzV zBXn0A=a{wQIn^qtau2qj8T`)&1l4~z0-BI{1-W2@X(t+^v(iMpL)O0^>67DJf1VAp zEIAW#Bk~n{A%d=BxP8JzK*zexf5X?7`3=wic;x>5Bw$H&fMqftR_&ui8&);)2(auJ zo@f?R84&-67!3H_Eht~so!^mA%~MIU*UeOuSy?ZpnP)5C7q?rA+x_j*qJh&kMur}4 z0@v`vf}meB6sJw+tJ&v?(j^6eahuMbSNmP!^J6L-(Rx%9-?WspRS`l$RM=3S zPX$mPtNiKh=Fg36&w z$#JW}6@NyD&BzUD z?33(r-4@BHM$TUImDImp7p{AYEnHU@tkMa961_Qd`?Q+0#8;?fD5Cq}#j8fSKZ@|m zE9~autIR9ijHY|+Sq27MXc!1*>XvlYOvdMXMZ4nM47Q_hUKA}E$j9EuC!zXjK4NCQ zv0faE?>|h~aXS9_Prz{>VUp{*pTrqx=FR#gsEb4l4T>^|DMrS!QR&>Z*=^M4e1+s! zn}y^Ka}9}(Gt-@Fjafo-Hdg0Yqq3_o6~;D4Gj{p&250|q518Ln(dT(a7xG`n_rE`q zpfq?nuMjz{A~8o;v>z8oa>Yb+M~eqiP%~!RcSxzk1I_3&3HUDRNL~Cm6|CQf0_k-gpvzS{k|4&H;sn z`NsV);DCG~ZJKMLQviU!zXx08u}uqq=v1yq#Q^E$R?hC3Ugi&D1b$N-D#&>bPr%y$ zPg&`|p7HksOpZdPP7)@iuT1N$oa*w$=tI>2DpVbaExyJaRD2eFkNMJuK(~v2#{fxG zk!=@io6*yK%|nxod8`jEK3Tl=^_ZW{woFAP-~8n&+Luv9;Ad_|eCf{(LnBS(B5xj{ z`H=*e)G@cDEQ*3FS9V23#V zVZ3f%ER*RmJw!R+XU9XeO25CAe}_KLz82!WVD_O8GA-1m#_wyTPM`YCW@5}OJFQyS zFHbjzqI|^M_Pe4{0@Z+md-v|$Yk*mu0o*l;11ihAlm`$aW!T4jl;j9U>6S+U%n79l zo{~ZzDRYICYc<9LH|&9-4A_lJ3_rfx2zHQbAeRhCoz?t4ZR}&-hB=&NGB%_3?i587 zvj~v&2!KoC7_eA9s+PdJ!CsWg5SVKDi{RW?zr(tSa;Wm{LXk_MZ$zuRDlN6QUX8UK zQuXAH^*XfUT5Ew$^|X8C|V)4vuvxMY^;W3ZYS*wHY=n8-L}ua$fHP^u&tKGk)% zVL%n3S|_;1$YzmT7S6H_=e_Ar6!Q93kJjj$C@To^P2iB5FN?x>o=eX zRN`euJ17KJ<;g*1TeP5{tL@SLmVf02HVbU7L}0pO-^#O)jw`9^x03XWDn;qlVZ$gZ z>xd>-U)=C$AzP$_(@g85+no)WVU^Q$S(``0Spfu=CloRTH4&qngkm#j@fCkE`0ejB zpyBd_t+r1h(HVVlNpmS)g49rrh+r2GgZE)C2YGRSY{dGP!gJDlMJxt>Gt1r%kOTh9CO z_H)P6CVP$+7uSZy45@x#lEY~4#^nai9>YFn;}`nl3fsN`?+EJtR=fh5#VpfTWm=jp z?sFksmK*>Ye9-_J#NlH>3cjg_B`yqsHB0Tx=iZlX^=f#e79{wOefVq`jj*FHE*%Qr z8#`Vadt+_BV$!h-14O~C6xpSwIFY2Kf}fBm4i^jAqq`4+u<`l5H3xZ$mSc4wI-$zF zsc&0xlet)*U0U`vM-t;g(QGpR1cVp_7OY5u8>zU<8*LI1eeAG1I3-fVXk3gh?J+Lx z(S;$?R<=KaDXIY_vQG}!_Q&rotAm#yC^-OFdhbFrd!Ob8Y}q)I2S`2?O74IT#s!J< zE1OCu$dkipK)3L|I)Kh~qr>0T=5TlI?77PqWXQVDpzvUT^d=?WhNX7|2&tk3Nw|{B ztuZE_+d6qThGe9q@vT-L)ssE7ADi~wSZ2L%{aCs>u#h%wzm7HG-N!a_wXa5ggkG%~ z8Ue)4&s@sTb^NFs&CB{RGnm-$Hdtg~6T~&|&YeDR9i)QNpZTel$s&*h6t%R-M%O{5 z93@u;(Z|yRwOFp$&d*|2&$&%{xl_lZs^F^>ubXyZX6AC5&XI{jEJwb_n`#t6p}H`> zYYs%J*RgeWAGKeoXDykHTX8}1!s<7x`X@Bg6Ym8NbwNEBZXF*3u8e~NsZy>kXfWF# z5M2~++(931DZtsGM)I}n_=5%`9Vy<$b0eFxM>zy@i(!3Kp*ag7@zD3l{iW~uPJ#(5 zM&=M;imzCF=cGC9#kc!nDvZpgkIW@M%oqe)ik)$M|CXgI!?|C$2!tjMKYHMgOgPlFjG}`#1g>$-h!V)vYq9<7N+(2%zCQI4m zt$JJ7!i|f^lHKkVl^8r1iqd(gdTnQ1cD%pvGqXw&IC6rfiao<1Xu{%h08&^e{QP_B zal89pQPacf&oM`kYk;@LOLx~oIBVBeQpJ+-mxgUkeIWlqef{lq=3}Np+~gbOIwlQ8 zyT2Dp63`E-{v6vUd&eL7G?_6ET z^fd3zaL=f*`x-S!NvZT*r7 zUV%t+=T|!*9#6Jl(Gshn_Z(y!$3JJ#ZJwj{IJ4&>;L_}cM_r}06iEo%i=8ddUwSr^ zN-ZND&tO+1T-(st*hEg?QC}-I)63e1%t6n#`Hu*aI5-0lq%l!^YAG`UJtIQ_b-WmQ zJnN38S=~M?+QEYLvv?WF^;Ta@6hK;BA6eq->x;?)yQ47Xv;6ht-ncmZ5`gF5y>E%+ z%a}cvy^uPY-|qPDq&XbIkW@jz`!;yY-cXV<3?EqzWJ(A zd;~nA%Q_k9>3p#5sltP|((mk$rGMYa{*!LGnYk9%;3R+aWqY53E_}ZW6ENFq6!4fl z3jyxGQ49a80RPY1Yxl#*-x|;q;L&-J9Wg2_^1>zp4F?uT{s6a0oaZ^~q2IS^AW7dz z@==yH@L_@ZZe4=p5M?AFL4%2OT&x@hQPa-A#<$co1^yt{*U+#62osdQ!0n>^15}*_ z1^`!dcz|gPHg?zjj8^i5loR!)Vp4aPva&KSo@d+%iz0V^#&yj+=>ez&om=y!oQzY& z|FB4s^*70#tGD2$!cBaFrZS{BYD2|UQbA%KT~mn~6e^{#uvq{%YkWi|T7ZApJb@qh z{;NH9jdMqJoE6z9^l=`=UW1qnICVWLl=cOOnh6lzx;O>{Wg>khu~+sUXHZt$pmkME z4W91@wxM9N>7Y{Q3Z1?#`z}} zQ3S+092^`^T>zY+AO#S_vGIJhh;9hx*t-P4`0JUFLG(%1t9ALc&l=RqVi8Y^l|KzU8KL>pN7h! z>dbl4m!O_@PLk*x=EZX8hZewfOVi6J&j-e}BJ-Opn(Fw`Wjl)$@%uRb^^8fR8r*9Q zkIEosq@x$a_|sG`lDoI$i&X54Q1@u+vvZe=$h|$iGe&V}nUz!1K59V4tDlyD+}xfg zXz3gdju=HD24UBtr~3dU3u0ff5&6hIi3H3H{uKVOk!iRLa6?pS1WKzn{8pf0dx(n? zUaNk%WQX^KD-~W3jz(Co`LGJ503eIvj1J zj>P6@C7}NZtGfp>%BjW4SD5yiVs43UH1e~hp%w8?3VuD8S2EZ&u4np0+S1N$8$=mt z&bz_+rlz8r`YcfhmT`sPYgwlER%|gpQhM98Q6ms>??1l_e1Qu&-JttYOI!Z% z+Kp82*V$c~BE>z=nNTqDVsz7Vhzt)&?bby^QC%aO`_v&IX@h<(QO9{% zz;ZtysV#0;6<#4M?@Zh&jdwC%OPBrhl}>y(mB0rpMUzJOPwwJ|R2JS2dC}boR;if7 z%j-|We*KcNXi6!;jTel;)a`2h7Y=KX|p zpX`a@&ZSFNtX~07Mubx{y%E~4WgJ_si=;w*DGCn{FP7_}JWHGx=6tH#Ts(M>xCflA zt<4%60R^JE=G)_?#7kB85b_1=D&!f$Hwmu@#)$yB~D zv4fnde5AT2@>%`}V>oD@6Zhc4L@4oeW#5f$7g{Bc*A5<;%7QX!nSORzx^TWFf&^Mjv}J^op5pZ{JA*e$2s; zo+87O(^K&g>X;_K71GTXmClk+OH{SYQPM*s-uk~()c=3?h+Qm)mkx-7aJk^H(auf< z?)S+7`D#_i95QC!jCP5^y~cxib6|wUY@619Rbyp)WlTrDol(LO^yzf(-L~_+TDne4 zLqqQu5E^RVAwmSI+p>5`8Y1o-zC8p*CHIq7g#7CI%412JBio-w8Zb3~A2VH#m2lY)_x}EQ4FX!gw zrqtHAvdZRD)FNf1=rQw|AAm88l)z@?$q_3pttfeljfSSzomqaH*-tK>p!Vy-?*L9G z3s**=Y6T7?0rDff9+nE~bSn*+ z6|^IJUm7Tevrs19N^1DVWihC$Q%@MovqVdVaQZfCU-iZ*lJ&k@R+0#7t-MJN<92a? z-P5~d&OK8d+#!jt`xTxH2EGlCdtxK;rZYrB^jn8n@0|ld8bjMK3Z)YIa~`qR{O7q^ zwo>QTYyDZ*9!oK`N~h3{r!qB%yDD#{8namL_h4$&zDU?18r@90<9IDelGAf1G%)#T zZGuBr7S%IbDWa3Ha)(kP#)U1XwNFL!T$rZs7;lYSzn9H%!mC!)r<}A=E12NKSAXIz za~Mi>?3~gSswgNkiddYkB!tsfoHQYbzeeG6qF*Ce?A7)xf2xKHr}FjT5f(n`jksUJ z;@f+_xUpPb7nR7XlE&M4SdIO{-eB*VG*9S^;luI|CvK^al&<}E--mxi-+u*K_tzNR zo9E(FzMYMGqhvMXO{G?wTvlp1Cg50jjlCSa71ukyi6w}jgg4+{Bu5xH$Pt_JwfUOx z8G|{dueWzV78sTvK|!tqYBq6RoSdAh#Mdf+SOM)ML$f&Nog4NNI3+y=V>KIM0uBJ0 zae$4qTUZem=4ZDXpvXvY7$__;ofY-xUIZV?W zM%ODE`&rbKln;+|)TN^?*RNxHjXfU*FTqM?4g$;m@09oRA3p2L)H?3EDr=ow6E1FE z^mgAK*Vx$jSf`fIG87yaGu(*Oh_b}Xl6IpdZn<75h4#sI?b`er_eP=)wq@onC9P}F zo~)G1WqgV{lkq1}?~-c5U75?~%I5m2!mCjKN)KidG&D35H8qdDO!fqeyC0RC@FalJ zFcBf4cdZCFDQ801r)n(@US18pk`6)1;UMDUzV4T8+iZin!teQBjGnUPVCSkKLmXrQG4PzHINB+Z+7H(l`Sb}J6>-Hk*i%_m+J~u|$Oak;!;)qqW8B5``UTv$KXD`<)#NW%V5e+l#%M2EKQ`qHS9#1Oo z1}bxR+;%`56?jvfpZfb`f_Bd_%x)!Z&2=Hc8Worg=M&W9C|lo$X>*Gc3?bT#p531x ziCWi~)L9K>=2kt@$(3NTFzHg=S4GhGA|zBDxJ zh!|=tGW>?T+(KFY0pY~1d*Ls4c;itMRf^2TJd+*?*9sU3CzX*TBS2p*FOO|6xgV)A zij%-RM!^+y_?sI{r4*_{nCV=CxuDz!L#HR-+=+qvUs$X6*&pCe@}mh_BJOv>Q|#Y6 zY*D3gAd@{}g@2j1c8b-cJRs8`O!E*%7e)T#7&0*bZX8YHF7%}AGI?oy`K!$Cu@<#E zdfPc6xicnC9_!sLKS5`%NtTb1QBppgks^3-%w@y3Zy~rO&I&Y8noDEzCMVgOcpraF z{)$gPKvRoy6wep=296%<7BK~PX3Yn35?u1q)1}}}Y~9^Q@VkA?Ml`;B*3u5hkhyTZ zgQO*dg(y`lQp0t%m~@^Cty|-NgvO?e4%mK1GE$vLw(rItXH({S!s{}s9 zAk1h$Mjm_h)9eOFvaA)3SlEM!AfIA|XBJ8xFyo zN8vs1SBm0nX>gy-*?oAvg!KMUF~L{A4~hy|$~ALn57EEQrjpDLji3zfdmW#MD8_qI zC??JHqqU{)zCm|#g_0q0ykk&)zYa(_zH)6=)^r3km#}oXmpec@%b=2_eFVzIO=9ma z-_*PBdDJ&q@SJsz=jRZ3Gs;R!2fs>$Q)suSmcqzh^VRFs*x{iIeGV8jr;x>u+wg!n zCZjB>2`Q5-oyWNMIWcN!vUi!OlJ}UMD2w~PosneP-rKNwIb1`N9z}|-hNS)VqQ$-| z>4v`DBvsmXgFFH}(0p$Hx~~WejERA!u+U*4ovU=)bILuN^U^9UE@ z&cMCMX4e05_4m(pd>L6;H;{Xp!gJJEeB%tZykUkAmC#BVZ)vkhPb;o`)oEq?pCRPbceSPPLnBVBi{u-lUttLw z%Rr4lIWfaqbYHW3J67@>&PXcD3jrVlcy}RS`e@h^mT{7nDCM)uP_X5Alp=x?)xnH zfWlj>_%{t(ck5Q8{v!9fjW0+RZn>FFsG=9R`xcl!&wogeOmg(`OP&1RM4l>w)!->p znT`cp-;czPolKnA%l{HRQQgaZFVBy3-P5n4%JyR`P zuPKNg#@h!V6FdEtHt@;yf10U}3)IH_1&=?~i|A(bv#OG(RBXeV>RVjg)B+NT+?nrL zK*^ZF>A!@Hoe(=btZ5|@$FK(_lB$`&Aqecz(a}kMWs?bc{WGRgPe^gggxj;VP1Qic zs<1vhOn2-{SoCMD`L+G{q7q#%ulz_v2G25ME|J@}wNS;#C;yAa`j*%0w(0ibNcU^R zvv~!p35lGah^+o3)KmU(i|ve}k+FUczq{kcOeA7^Ktx1jUGZe5Uy@wDP$YKc^_jEX zYrt4wkkA+L`ziPM#W)#{4H!LM$*kBn+2eL7A8qq1ulZV2TT5{Ly0Dno(30I&qmNgz zbrhIT&s_bPihX%$XxF3mfQ%(eFGV)Q@bdiE8)Uj+l@q`1D=7*>$;S%%^t(AF^YTqd z2P#dgtxIY8nM7-aHBRJTRJx^G^Io^=+a{oqO!}&@&sE5>MmjppNPm~j$Yl9$yIn84 zQ(9ebh4-AzUksp7b?@8PS?PxZ116uivY>Mt)mJwBi?QPct(bS0?mrf!ye}If1X$&n z)<{z}21y!^ER;c)bOe|D13D_IM`WxlEGp;I=wJ0Ht76eN5nz>16uDn=0^`~(0A%;5 zVZCC%*gf;&IhC|3Zg+ob;jcNwVx;+VZMDK@wGUHbR5iVZCWqYOZF*)RPF(X21`UrR z?z`=T@VkvYoO|IV1w+!q3@Z5rAXj#a~JV7RcWXd@2 zfAIh?F77tTI0zR}F*jw%ccnd#*2couI^|gW$%D6~AVF7SrYX)rp{oNH3E<9k$B+yk zpSoZ>QUKu;qo0ju0lmCl9thrhi|5;+1aM%loCMmau@to?`GD)U0MBDEHI8kg%Dslc zso5W=`%#_$-h-s{qQ%|qvUzztdRqs=tVXBbz8Qtm1K%B5@InuE4Jr%HJpU-oYb+yw z9#LB?Voh6g#Z3MR9@-Jnzv7zQ1qVrwp3T9CVsDp@7tz}J7t!YXTTG~1oJe(Uge`eL zgfXa#$3P8F$dJ|BH5vxj85)It#Ow@meu69ojrjv+3awZ~ns=d(NzwQq3R=7g>a>9= z&`&6E$~}i&JFnd$YU8)Ry_D9sa5mwC1IW^$Hf!)iT0jCXDA_-u5qn-hH&ndp_nJOJ|J6dgkeS)yWW6@%T#x0O_KiPh&FUM9e}XT+ zH3(>G5X5@mV3lq&04hmspo{+9dB?)PJ}@GJbOQHBr;Y9Nft(iyJ1ea8^!LJBpwWV+ zPyv}tea=I7Dm%1#yrH4tD&ww>MPVrfmkIA0NhNGCVc}%twG5r(MKMdG=>GV}Q|<31Dvm5Ya}O_8RHX+bbj7i_@zuC=qDxi)sA*W0RM{Ue%Lp5xm?oeqZjy4;OO|6jWcax&^up7!c(6}0i( zRE6)w&wOudcs5+i|K3c^@@BY8IO5#hqO^doZ`HjTe z*J;t0BMq1sy(5jUj8G-1?gbljvV+dInNQXTYa_l~=voR*a@k)T7&}ZF*x*TJrRj^F}kM*zV~Z5LZ}p~%Xegve$m2x*{+|H zXST?tL*Lm5WLY;)RH+v_tlw|;$V2z+{^PVr-P#o>54*A0mDczOeOImZP>aUgeF4&>|;&*a_tqVIrEdaGO5;A#jyLy~kg^ zBB<&wlz&mJrB1TVnCci)@X>q14*$cn>Y`=^!s7_~v)&$<)K@7@p%k_&HV?Eo|d8o2(7y`oX!pfGp^45tq~DAB3kv- z=}O~NU#P@Z^+H#_D<%C?Yu1y}JP01nVM5hv%|*KO56Blgn0_X5KQ`-G*f6%7B5`ay z%Y%5~8dSVwohcujZQn;0Lzi-xa@(nmMNb%y}m(lEH z<_Qje9YScY7(47~-v3J=*wX%iWWw1Y2K#2sd+TU3yyrjc<(IC)z~LnW7ICcxMMXvV z%*|gKIfCsf<#--+v*!20av3g^L~gg8ENv7qi1S9i!+JG9fxWI%k!RHUw(91$OWGvM zt^H0sXI^HYxqVc4iXg-gFzi3roNwvvdh^Z)?W0YVhDnskw->rFGY=A4@moV~i7UyD z*vD^%G7oBO5yz2;YP(cAdGqO+SQoU$uVLlVSvxoGC{BG%avWMi8q$m%iy;{{cpWxy z2@!`egA=61UYGk@zAQ9DC;pnsPr zoMIEr7~?nQ;?cLabnRdCb1t^3)*pRNW~9?7(T*q2x1k_@FwefWl(QAFAf0iLj4xcH z@v0?g#OZFxI0n* zw)WKOYKAXmX4#q&&BJ|}3W=3pd7YH#4Cb4Q!!I9NduUVZ9~5Ecw)%rb4k0yw_@M1e1M_1b74zfRUj=_)HpruLNVEkv{6I znrO#l{aTjLN#hp&Yc!mXd!2RdV3Yh{BA-Q@2?@67r(bdRwKIy$+*f4ILX`M~%_jMg zD@$y`8%J#VVTX1hqabr`k$LWG+u{%zq>rFLma1jDgMs_K3ggYyq(y9UWu~-&Ez$uk zE>p9`q8*!Fu9mk>gqwRV%ZLRGtsq`RWWv#Fp>H*4KVk^fF%}nhHD8QJeIM1oXn$$% z^ovEM+gE3N!#MZuqW0mxl`d-X-cjFZ-v~7{s+)e;@Wo z4%4CD0^X`Uu+z43w`=Su=dDTORHs{dS7?77o2$F>#X32om zOqYheCAT4b)~7zg{71Lq0}7s6B}QOEOmoH1OB=NLi7JY^ zG`^r@+`6CN@w_ii9;d7d68|r)%kSHlxJYr!wJo#NWG2EWS(N4ERzXaBaip|w$u22A z9%WaSG{yFaZ|G{5OK|F3&sQ0DH!oFT;?wUBaupqXff@LP9|DDcvk}bpP*yzNnAN&z zSG&ck6>d8cGe!91v$unq)^okISySy*N6n~ja}{ylMmsSqe}2wUUfGkt?pf&XbX^yv z4T^B|bxM{ICJlG{M6~H`y{&J?tj)%4(9PJn=j<2%#JuGlQk;RkJnTLux+Cts&n%~; z#P*^oC?FspRnq`mh^hHOLKS5H1AY+rvF%bXQlGwY9aRq-3pdcuPo7P&f2s z0*HN3GK^Er(sozD-vi;4p6GgQkOcPg^(9cOSpczzn`}mMG3sG!Zbu6`APrH@cH7Sx zW~~w{85;zB{A|DB$duXp;YrYQCEzUN0Us-qCW2mLXF8a5l1vUO?_fL<&#Q+5FS3bvAJy|Fs2F%k^ zUPgjM$1u$BK|?~tFeEs5NpSm`NC8S#7VtZ~ZpSXRgP_1EJiP=q90{>7U%h7uRv1+G zT@QCb_N+_)(%cS>aAH|ZA#6tC;?$lN;BspENko*5nq|~5{biLcj&UwU1QjBaJu#4W zre8XRp9_wT70g(Se9MltAYlFZrJL4ZlP&VeLE5ZWq)+*Zua%j4x`20^{9@z4N~^?1 zapZi5B8ipRZj)-_@It?vgkHngYO&U6na9tzuE0ogK6iOE6)WsMo#=0+>dH3Ra+4Bt zeikNrPUWYa&RWN|D@+dN!sDB! z>)L!{$9t9hP%~%BO`dJdAQovj&V9MG+c>uBjlb>a5go>9sI!VGitc*?_(lS%fqzn* zIXpakYau={(QaiF0Y0(<(X8TOddcze!g$31b#0&0TYIb2%-B@7S#o>>VdC zh2Y!yX_eT_ff7^vH<0_V27^)5^3$mtV8Er|<(CkP0{8(L!>eI zi~^xF-@7MDvpvVIv2dtteFoiC{zxH=N!(EyJL3lxALWH%JIAJ@uEJz|L$^{(p}u#t z&wsEE_9#C3D15nWz*KNv?MU8huZgYju^BRr^C?qgn<{RP*;sH1&+eI#xds28O_j3j z?^(CUG@j~vd0niy^$p$c{s&`A>Ng;QhjU)|Bq!l4l+^i=g-1oPM6bJ@?6|FGb`xS1 zeiQW_EHDqhj)#+7xeSuaF9E2YY=6c#pk(i@BE$)R8CiPcwQ!@cuUK z@zKX;@}5RS- z&~S6gcWJ0vup&R8OVg^*K@2)uay@{m46~%bk08Ia(xN4p>R8l3W}vT5DG7nxlN0IiEJd#7J!#RO5!N(#*RSQL=^CI zS0v1H2V5GnL$7}(Hth{^Ojmj&8>er*m_bxB&j7xZ*o38D!5|ue3ehzUGwuH~Bog}nP28gD88<)`` za#7#;Ss87=+&DnN*CIIIh?BC(8I;iQm*-NCVOh5Ju4^tx=QnEOrIiAK1d_6H3L~++ zygcl1rDNV`J0`|yy8KX>@Bp!?Evbr(cXP)3y@3YHCi|b||1Wxy;8JR3O z$xkgs2j!E`cFpyOW!;@<;eV#HnJ&ITh$KL)zZ~a%PMRjT3EKqj-B8oM)CrL`!qyv0 zjr?-sJi(Hyga(cqSFoZNxO0Y(R?d$ZdvWeD@8aDQK|4iA9>V~L2w-bJ={yH$6G(f} z`in2YY?usk)Vy~cz$joNO6C8yANGc!hJfg5EIqb^IF%4%AiV-gaO~DIQuIkcj|APB zg6<4X-=@Vb(}JGxX5^#OM>voW6={T|S5g9yMKBX7_%Tb#OqDLWmFFo)(bRh7uKxn; zkBZ)V?UPI1#3MyTTzgH&1^t9-mxz6vSScVi)%jDf(s3$f`QJg8jNA-bCJ{$=Zf9??=$c-iWo&OLNRP3f1N@Kvh zKP{*I`GI&M+tdeB3&9=o>bI-^d4X7ej!g~Q>{P*H-F zUT5@W`azXy5VL>75R9SYigz3&eWo6|!Ldm3h!xX3&+$HBad0iS6^TRB6>eWe-TCdo=YmH7GB+b+7JG7`}u4S3;(d)R(&twM-agFLe5Od42L9 zT(WcQ8?ykpq3bqWLADuiUhrR(D^vjomD^@kE=41Ym}NR&)dhO9T5f$bu>-yoOoH}N z<@>YO&p}P$u1I@#z05%8#EvUTPTjNAOl!MgBVdTb=hE~qjs$`-F3Zro240<|azp*0 zSOm%Y`>K|{IooBLhaFOW70Wc)6qcXXwOZ7vW?P_cIC=TYw`)r@ux-KCLTjRbHQ0G2 znkP$)B3z&Q`OBIg8kg_s3Z4v+J(bR~KJXTzOZ0NKPMK5w)QQpyoeTMF zz$>D@uC9q%;Q-hYfrD|nxdYdSrkcbU%>WLf85J0rvDQ6OR>nu4q@1RrpuuJUTB)#K zm;4MypB2DQP$-OIcUjJPndOCWswSYOK-a>`VSks+8hi`;lWy%Z;k6DMG|W9n_EJ6`#z(w%hrJy&l! zXUp;Qa34YE;mGaMJF^5<**;m5JF=IwDdq-CInrMsERvAzesYNpxGPFYAGExB9$Jon zX0RYj*A7IxzIiQ#xCXvvy%f6CWa^}-sJK#j;%W>^lmOyly?mkBv%K+eIFIMp zff#RHNAmVE-Eo+FDqE+rWuv1m-|-%QMBR8kRtV32#xAm+z$mY(sMwammo)ps%VSDk zE=n|4f*N+)K*iPzSVILC$|f6FVX?wv@D9_?X3unC8@3Y`76v!+1a(+TevsuOQQLl6 z=8#=I{t1Jgp8mL?ezF({u_CouY@6U#y)*Z13-L8bzK>-2)q6SCS~G0+Kt8m;-Tfbo z#DA=+pyCo@Cb)ZJbMk!x-#ydt{X+xS@X@sDuM_fP^OSD-bA#!5zt$}$rj6D2T6GF$ zBJ2&~_+~9FM>IA!_Vo(u`}jI+bl-^bf7kQQIJqOM688LvNn7Ck-TN1JTO(DL_ z%FJ?fJ+o|@iCKGBX>pg^>AARI$;f$*W`tZy@PE9$y0@Il zVUZhD64wExk6U4U{L~-ZV3oSM`oO?I+*Q@<3n*DPez45x%?M8(32qB0WG$Ww@E6!` zpm-uH8~wnr6*xnSD}c3^isDn3AjHNcnj*Uo@k0fc8048x@x{rc;_F(DPXw9Rch@)zBIv+NOGK0-e4n|74pM2x7GbUqGemA~o&bi?bi* zXef6rv?vmL?@tB1=&Jp2QSXGRqKYYWHy7cVfLu*(rsxa*e zEOc-8YYa<~r`QJr``iK&5CSo&swh>l#fmZCwODZpDhzi zQ^E_fKN*UfxH@lJv<$L6``n_fxZPPTGVlw~82 zSEBQrO!E?kYVSQa6xCr@xAbLW=cS%+GGs%ZyAqP=P_Gfd+J=|k6cusQYTi6@Q$xft zgKcWmt?F^hkrm|X=a0OD@Z)vO`m5S+sYm{9;lFjoY5f8ASVhO$LA}C5V2l)xOCBU` z9pcTbFC_S)Ib9bbS~0*Mq9pF>4(PI8#86O6KI~k|0VhA8T2>w`^VagLi|~=Rfi7xI zR5_~Pt3O%-3?tZ=dn#zU1k-lbsFcIuPj-?04Gu}WZPI2JTtLjbWMtaEhCl-cm_#a1 zV@ElLTfejVhK+u2{DNAmoSM4tz>ZGYyIt%bcOJ_23dhZuDB^>)WnP8-uDWkwp{qv} zVr-KN?Q`1}!&((PD>?b<;hjyT3Qxaw9Soo6U0)h(!wT&buI_Y{TSV10ddi271#4vY z@9l>XwPZL;UhK6X61!ZYHUF-Em3uKRT~)$v`8Z?McL5ecx+hbyRrYmLM}~59ZMg{Y zP^3{w7MH3-An{BH|3}})`vjPeA7RV)GD-&#ldDT-9f+HZ|H_V98gooKG-a|fhVJP4bk4&}d95zStPGs{@|QnUOG7<$eId0A<~EPBKWFi< zh_rG|e(Xt8t*@{5X@D-X%o-_UY_z*N4y93G$yU+0HE&fpPdPW$F#Qzz;o2RTk1D^0 zD#V?K`ehdlGtN`NY5}z}9kcATqRY-H2Y~MNjwva%P~9UeqF`;4quZq}7RfY@PQoHq zuGtE?@YpP1)JUa9M74VpcG3#jOmM4aQ+o5C3cfl&m zwHeriWp3n?keV_s;3z z_7-pSw{& zH|=3d4W7ESR37KA5cX6~&=C8Bw)q<@Y*N4PX}SrIyQALK%M&g)&Z1V9_tEd^f?q!& z_|s2x{7Gor1e1c0Fup=GqS{+wmY z0V~Hz4+d%oi<)AeBLJd3;qTPYotmVb#mSLbl-T#FUqb52cUjAKvSkw3-ZP_1jA_67 znEawWa`SAaOXaI+Q)EBaztvexgexa{#Hke1x&Ylmb8E{pVdGh4* z3}8q%Lo_Ttm&Dxz^+gaB{2==9;m>6GZxu(|8hf+e&sp(q-hK8XPQ>RLKK}GK+sq|9 zo{zdwc@7~~Uml2yp$q3%gh3iUdg8F z#{C^~7J4SDTG6o3j5LRXftxx@k!wO%JRW8hE6qOCh)zp4)Sj2nT51tov<~rV(opnS zcA6X$j5bFO|5n>b;S^NN4l~`c^cKIPWi3JR*}$wbwo3nb)fg-dV3$0M3x#h^w88!& z!HP2zSwr@+kJm}5$f7Efh|#H$@y{Q0lehzN^A&gwt~cMjrzZ@ctZ6)34L)%dyv5<- z4NwB1N_8Z#^7z>f#UkiyRDxn|IVdkrksNwU456G*A*HHHt=WsvXE_+-j(~yI`zsV7 z?}Y99zw5@8x}CUMVF0Y_)4S`wI&Me#;DZVBr51ge6NYmPGrdun{Yiy~&D~dJoAt2o zeP%m7YeG787s7ZabQZM)bRPtOMy$EfSl#@_FXmStaF!Tra$2ILdS!l-+iBH6_k(|SuFq5h9x8f1b_}6d}jMFi%+b3 zIkoUaE7!o6k1lBYV5PG1<@h7|T)@6{((r-(%=#N`d70tt=k+QX$F8?CH?Lv+k`>sC z@6PbS$r{32HuhOztN!bD=Fw|}6TvM2e8%`){7BsC(ip8SQ>bb+itX0A=q!q=0!0pe zqJs~Hy<<9JWG(aoTB&r;YGE{P*T}kJB*Dwn&d{xJ<)nm8SVU>EO+m8Hr&+~qv9UkS z5hM0I4o(5yM}@WY^fhCKI>yNhKCrBll%zez>0n&Ot0&P(;Kbkd&bh!c(ts0S>c{wt~V01p&>^_qb#xDEAl5 z2RPXW*-mbga=4Mv(X3;V1M^C*vQNjU-uus#$`1P$SJB@#x&-4%w_%IIW=n5_`#;0* zUY7d1on>(?c&TVoS61~+{3L-ZQpQ5UQ?n=ZmRnb2QxlKVrqLFePw?aGpT>d!`n*O+ zF(KTUDl6owSVKn7z>r$;Jp0uwhW5&u4*k?))mfvSNP5I912gmVQF%%JW6kI*T9l8J z?ZXZ;-d@q_$sn89Uk?um3=9knCHnXOXSZD$(K?>VgqXcX;gR6Ri(`+sE}QueMTtx% z@`<{13H^lRRe_7q*E>^LsSwvP_Z5OQVu=^p8SEeZLh&}Mu8Dul_2bJLs24Q2W8jD= zNitk1 z;@tZ{b?x44!31m6=XZXb5ea^RGd?-mFCqjm*s0{oX4O+-`rQ5xVP6>+)f;WC2%<=- zfYOQr(xP+=BFfO+2uODf0|Ubl0tx~mCEeZKj0%W!Gt|&sL)XCY9=!K{d*%O4e@e`G z&a?L3Yp=Dpgnf_L&I4YvE^1yUPh#JHri2oW!Y-1q`{dC+5bejy>9De_GvuXQ#S zmRz8tN6lyRbtx-58?;G4Jr~nuSGUUKp4tlR3GT19H@s*8Hhpz3vciwiK-fM5l5!Wo z$^dMWK`ch=p2afQ0=}0mLrAb zD_k;u#O70G!F`UJEBNuvea-2>$0~sR8YU7w36dh zZda@&0cKmf*^a{Az@m=Y#C`1sM6wHTTz}+q>t^2>`c!|5P|!KRt7TE=7g=RVBD(P<_8}Nzk96gYb|_~^*)&L%ZPIklCq+wSG@b4 z<+?{=g>SAqe{P{2pqKi6v-`}^#F!22YxdN58yCRYhm|e}waDYV!4B+k%Q7q6xK1c_ z>y4*DSnp;*{jwptcQP?*X}nYg7F#qlTy!2k5pX!bwgb#@ zWw%)b!)1*=4lXhxL}Ve&SrLaqkj8}PSJV}clt8#Z@QR+ibt!patAggYR@v3=EOYaa+=$>Y|b2B8>biM3OFzTWyH@*8_O`b-hu|W8z zJ6kU1c987zBJOsZ;HE?I=+Fyut8wj(*Lk&x_JH)1GVOjwZC11>po}oG8PO)7iWryO zd_)9u@dZjg72hd~vIBbkT@*;AWqO-@S#ShqB)n`Gl+?;W#bbv)L!ef`!(NiJSd-Ulf0+Z8k@#WJe0e|yfsf<2=J3MR|ZMgRQAz^rU zxJm*rP2f}gqb>i7lsO{JhWMfF+YSPi-pB5YUREdbb5fe0lIw@oNd-US%q8kKATSTe zE@D4oSNESL-2mm{ZMfEp|1teGHT!?F00KFAdvb%eQ`gnn@dY{Q>wIU17D(yqtW<P!X>GPTy%=%;~{yGC``fKn;D`{wC9Rt=2 z;!Sa-6HQfK&-cWE09+-!_Sy4zimhW@&JK~0(%Uoii(fxD{r*-}HsWnJfkCU#O}`?O zvP|bPQxqMtA19=^8oBYuW1+B65jDxe%{esvP>F`YO>^ke@5yc19VC|kzZXX@4-@2b z)2_CO3iaIzfPSW@G&g+y4l-v@129_Bl!c>80Q&?j8(09nvvtl-0zOEbfpzgLbpE#C zgqBChGyR>=;tC0!j_GIm2ThenQBY^~bgU{{H?z=@cpRTD|?_?fVPik2Zl0Na?)Yn>PWG<$#|4()L!o`|Tc}h{R!JMu#ya_NsjugZq zHl6Etf9Wi0IYm&g1pGPPUgnl!`5GO)bRo=cB?Bzq%%jPIv*Hd64xWqCw2*MGA|h2C z8EDG&B+eubH93ML+3jR&K_S&G^~>0E_=RzR8?Qy*BR;;7>7|P#$?H3qEMUUVPO^8+ z6QD*H@n>#MJ;zCFczsI4Cg#FxHFPU>{L$CAxHY&LKJYpW4Ha_uO5vs(-$v|rL2FjOP#Em|aG{;%uTB~@I==0{%o|a~i=+F7+$;a{B zhB3qrGe7e3{7i3b3=?xvr+2Dh1JT#DIHw#xr71GtT?}rkk_8;pSyH%$Bcz_+FJ&Vs zMYlv8Z0lXyl~WUJVh!^ZlmK98A==ysh7b14$LeA>ipLK+$zH+y6~ z&uJ~%@6SQYc~A4qO_)7xgfI9&y=*OJCEWat43uM*aZ_0??=R7t> zV)MS=OVO_lcy>Pehl0CkZ|3=7Kjj{14}8^nZJvAlxFyY3Gj7g)h2w6ObCD*lG*UBC z|F9E(%}whrAXyK}A_|*4^=IMINACp{er}W*q=k2JedkL%p13%rX`}a`9Epnpabo8z z0f&&z&(hN;ZNKd^I73u9XV}U;UR2`?5pl067}8rr3S5EkxOd%vfveKm#UitH>qQZo zjU9nJ6M&^Hq7Cp2o4fNX=|;Zp2o!>Bv7>KVDFPd*fVSnp3LulPL!7$;kp&=lcLU3U zr;(9irxUxlrJX)PsS2<4Lg$4!1He_#JD3jvcyF#$F{eD1kT1ST0Py%Z!I!QVIC{nD zjjY^ugrY))C|H$w)p6+Y$iAzk(GJ%{qx?RtW(Ka%>sO_io!E&fj8}5o`khGC)|y<) zwm&~1uMJP+7Fv&j6D^1HfoQ_ORr7M|(0%6!M(KPGS{i{32{IV3I>wsm(I>vQ8k3?L zpeD@9O3WxSYaUv%F=<{}du%_Vf+sDCl}IPj8@HSwUtrouR(b9~MD3KvbOrXz z6b;P24to~8&>W`+SVqy4p98)HGxScG*sbyNcr%^$0VIqw5b(9dJ#Yy2H83o>c&hI) zGIXZx`1;?K-&&pbsD;aTg=J9wcLC*j%km%^cTMHNdZ2efz>gpo1Z=?(Tm!kPc+=*e z7XXn(d#{E~5-=}-{e&TnmK^-+{>R{;AhO$OY|{jaO^d}s<$N+uPMb!_N&Ol8v===* z8?|!(;{p0tm47S)Xei)Mj}3uiz~GdIFU6oYpg;l@p($A%7#{j=yOr!ojyh^li0Wmd(?n z0yAS6Czuw;e^GfCFnEoMyXU-LEBSSV-d>;|=qh-wb=ryXacXgG=_v%^gxo7Ox_*>O z`!j3>&?{W65L3Ftu2=J1R8*f1m{1@AME1pSf(c;eUloHnAb1M;lqT%H`(gUC;$DG6 z@>D}Z1JGjB7PAI4c5CM!0gtwcaYyna_Nq;F8XcEb6_t|9c4EC9uTyjJ>aecDGq)mL zWarqGhK5R+nUB8;$DMT&ZUc7{7_bu2@33ylQck}K?mlTZ1{uH?ZCyU@OxoR6WpaU; z*(`s58{xUwB4EAu<}RSeLOtvGmf-NKfbAv^%+sY!BC6TgA4S=Eq&U6%=S!jPAz99Y zp)U*Mf-yzian%by&K4;VHtj5&VC$^JmiI&3aB9U#KTWlh@tc!50v~Jj#id_Q4Yx7H z3X^=3S+Jd`H95Fjy88*S$su5KvEn1U@j(|zx`fPgp|7M{G`|`09MK)vHTKww2U!^y z8l4my)&PynIME(mJ#77hpiCSvAXsaelt^dT(mIme4FMf zxthe|=ioIp>8F$O(AIsU<^V<}rVeB*U?em%%MxL*zW{ckCBqJ5MFtu@AYh{skyjPK z3^u@N{ffnHy|9{?UQBPTgSx%FT?GP=Ju13ZHWO9yR!CIbRD)*-42lwp;Oz4Y0RyZ2 z;tW=M2CuvltXvL~Q96q;MogDS6ptInG-_8bB7Npq+c@_aMd4hs{9u2JG$x$3K&}E! zxg(UQ=m=Xrdiy1WM|2MxsuC!}{S?f(u>c-0IoUA>Wvm?AzbtwHNRC_&d1UD4e+lBm?jRwiPQd~1r&v^hTb5SN8oqws-& zg#C+&o~AcsBbPm@@)S*ApyxxIWmH~m5kR-Ck7-&Hzq=$uy8vx(-EM$2h5!gKr+TKl z@4vdP22s9xGc09HufN=*uF|l8iGgMMt7uL+Z0fDbiq6I;ETqTTE{|l!Il3$9r8Iyr zk++T;=z9D22}m}@w6GX{a;Au@Iz_vzdWheEF2wQ$o+p_NY3YPry*zyGumq3^Z;cdA z0P_cGfZe)u=OfarEFsJ($c+8o7^_<4$Q30Q1<)z9GIMvt6NQlheyTRDCVb4fA_(PB;cle zDs@##OnM6hgrkz0w{L(>^RvEkeNR2LABJ2O1AXI+j59H(9N!oW5yhBRb7nk zNztqT-}G_-dq!%0QC%KD1F2No@e@Ff4O3cQvH{yX38Z^17G*@V|cVC$xa9 zIMKU5hkk(hz4F%fb$Rk$Jk4u>J20@w`r;2_e1e_)?6rQS;PT>0?(Ql*`&6>4(mdtp zE=_+=4@3-u4Go+ghidpRdbvIx^{LW_^H(H_Lm6*1qK{nm-#Hf-PC_{iYO-UB8yx=`B;|P$q-g&VO zjKtS2HTI9Z+(Mb1?4`42#b<16%p~{y5&bE!18}PevlR#svA{oln=_V~V@`6DTpOtk zKo#xKW=`2~cty041=`{9WcHLJhGkyi;NWGgxuXVgt2-w7G z4{g7i2gy6?PjtekC8?bFLW_zW~T^c;@fY1TJGCBc=_g`Bs2a$BzwJQADTk5`FS@SCEI4}tqUs1nR$M#ky zf=ND_%Ww*qTk%M+(({iU(k-&2pZ##vmUN}dFA{SzPV!2Cl91p4%9g(humsmjNuhDN zeyeG`{3)Op8D+oTp+r6=q_1*gz1&0?;i^~-5M}Cqi$2we@p|Sx^s<2>Mx=ZGAd2K; zpHoePy=Un%%6?GlPhm7!*G;Yo3r z3%0ULT&->wwM(O&pRs-`*W7C0=YZ*_p(v`Nq3PG*B|Yj)z>`jxi1rBH!wHdfEC$AI zzAqo*uwUVwQqj-N@4Ro3NO0Z0r|B#4`d%#wnvf;p>Axeqg8lX6Ui0#?g`39I6P)^W zf1X5H=(0xxHW6o_Ea9JY@cWsvQyHwKN^=AVATU7dNBW&@TPUp`{UWe^+*yD|r3b{- zs5VOmFUw&|0xUX!prj9w37vlqiwp@1qZji$223b^G510Eg^3|)_o{mr)so}KtaxAJ z6#&BAOQGGDE8D81RT8beDMTLc{yNOTN`S|pWx0*g92U`Z;v%S$K zRLvPKb7tF?oSf@Nb+>q;bT)={XRfZ~CVUOhZQKGU55S40pa zC6WiIh-%e)Nh97}YL@>0eV~~Na#F@(V4^~AR~I`62QO8~?KT5uP&$w~U5=9i1hVyj z_-ANn2*{Oympe#yJOfZ;Ei3>Es^CpmDG`l&*UbWr{K3^P!NK)_x((g1*F(yDI@m(C z!^+A!-r!!I!vol*Hc|D}bshW#yw>k_!y8_dPi2ZxMhs_>s0>oOy=^4B`;Xu5e|~MC zi3!XN{Pq~|)@3LD7)zx7Bz~w;-ML9Fe5A2>DqQs?*2x^{pHJL<>{JX*JlD=teUOni z&){^VZp*omSm89-Urn*u%`)UBQUxFJCC8`cFEQXw$z5Wo6tn<5AU`RrFI>JAtEFdX z!xA|s(Hv5_19?OU3li;6Gco_rbFm{&G_r_DfJ`&C!S=g*W58|tlKtKt0u zkwEQiV7v;yHX(NiPGa8Uyx9wLwAf7gvpq3ybe9d+>g~s*z^iz%yV-Dili^hZMUvjr3W_(FY;^LOL-UJvG zQ6|kD+)h5%o90Vx6n>+c5)&!2FSmkZSt|Dr^wLp%%i*prE~f8hp%r#K*hQDW<(U;+ zN9>nI_kHnrsaF!)^TsgPHhk6|B~v>ou)rq+d(o)VXt@B}Vnlbkk}A?1(vRkV&|LYT zz_hllUVUa-*&DFs@bMu!DnIFYux#thkL2>W`jxQWeS$WUI!({DiJfxx_O9R6ykT&? zY0H|8s`xgeAIrp}pvc}dG~Y&fY za2c<7IREn{Hwj9J62eyhKES?fKJmXRw&L%qCgag`inxI5cm|D!#6)t>WfTwBlIR%8 zrSKKPp2M&|`#RitmW*}Ypw4sMe+_wJE0Q7iiV@sVp57i;8VzMikqL99t6I7n3Wrk1u0-oFn0e_sV2W}iGcuJmuK zrWH5&vP^53P1Al=Z4*`Fg8%E8T+amF8rl15m+ux&>^1QE`CvD#NkN?uB!NdH)Amle zjgAhe6G6A(+U9n^S&Xn_Z5N#T%3-;Z`UtlyY_u9(TvD&*WbqM-$Q$u`F9IWi>-I+ z4<`3S;+T>I(i(J6jR&jN@{GtXZj+Ion2dFtQf@0J3$&ViVMR zYT*=#rUSVWk0gvtmLa{MsJQFRD;{#U=rkCeRO^Nkgm`yt4c)A?))m(XbWc`MLw z1WbX)PJNQ;p#C1#@&bKsS{&-sbrNwg6RlHD?KOkqo33BQoEAHG!+9U&oZ-lvLj;g! zK~@|wV>NkaA~JNUSG#>HPUf~c0j;orc*}+`@1H{l3$~zCIYLC?ReXU48s=*?GyZ2S~Pfz zGb>XKX>PzClIbH3;V^tk0#co+ays2?uyQ~y@0y-lUF>gltU`|W_&^vQi$@w{f)5%- zN*GN~nr!!|D;8*y3#PJh{b%18n2 z(w=5*#A1De?^;E#ml#i)IZhjQo_ViA_-w`R>qyE|LpHkJ`?9+{gl(BFOKB17aPZ#8 zY|6r^^3&ix#=0(UGV&tzRS{oMJ6Y%!F^5K&w8673BCZm{cRk!eM9 zO90^!j??Y}`l-4#Y*_Q4ks~=vU@2^)1nwGU)EBY@%96DvFCV(ob5d;8pfKzl8EO{7 z7evI?3&i>~G)(Y7NFzyzp3T=Pc`5>yq^`>oOztmvqRh!8eOPimIX!cd(jbFl>S#(3U)K2{ zN{B?C4jV&cRR4t{8mhS^AXWx3PQ)av-X?5z_`JGr)#Cp!nm5ctZ=$Li(kVh<<>Xj+ zfa2_jG-_3m>;G~(DUTXN4}D^_f5RDHAi}dBpCP|Vr;~lha`1G|omwiGCrcpbJUFm$ zpPEGZT@mBOVZrfhzQ?~(At;;FWADwSYRX2IUlo0-P5Ow_@|Zx4yf#9U##u0I>7kIz z+527FMCSM>(d5SvuFMZ*1zuuwD&{C}9e#aIu_(Gg>;6~i{MMeFW9d8Y@wHRCa>aXr zF&Pr}2cE@)I|oB6flaB3IX3wJnR7X7q~sK=(S8I|MbU?F>!~LjoX&h3NT-i`61gC}KYw#F^!lu;9j2t+ zbpBCTE@EmX&wo9bfn7XIVO2P+0(GXbf5T7{Pn+ClYR1n(7hI{gBVHBLZK)6#Af{>Ma&s91&xHeKoX6dKC(~bga$Ne%35UToe-QA( z-N-jHt@i`f*AAZT;PX!)ye3^cg?|PI+11;(NnSJ!pY@AgPY#~qPXx&gR;^~rZ_aA- z3(v4>+P!=HVepSfrjRv=XTP?iewI4-9?iClYhUnT_%Jo}Eo9~Nga}A^D=A(S@D(Oo zjwaR}-mo6-{o&(w_SH#~FN-m~&@0S+El19~Ob~l+jpN0Yo_{iTRB)ogCnL1Ksq^$p zT@1*ASshr!b@d}!E|XB!R?pJRAJ0A->Q(Al#I*f48t7kxL3@vSA8pa&kk)wP3Pi6C zKh`<tB! zUNia&=j$x%K#)fbJiA0{Kt_G`*ou1nY+Afynr2yNJ5JnO{fDFEyt8Vr7IxdUH!n6+ zTXEN&TAlM}Ni>_`hZ-I9q7jW}zLnqHEf$TaM0HS5v(yutgx?8Km50MZmrV#6!V*IVpOsRf@G`e2mo?Eyg>&tH=xc~EU)krAk25u>3 z10UJqVEMRv{_yHb(_7*tX-?Fp<;`9{V0k_6qrL__sOu+##(cHIFT%uX9jteWk(-?N zZ`|$COx7tqKhBtC0ry(JdFf(#o<@KAyZ!`!IyCv@iExyt$5;+U9p@7wQLf>tEsB#` zZBW5x0Fq6-R=4)^nxqus-BM?gsa9yuZrdSL1B z5I;cbAl)P3y(-$MYL8qc=*M7C)bI`8Y503TaSukr5>N_fjc+pd8%x!CRm60kC;Pg{ zPmYs%3$AY*B%&ENYsu;L%&8n>fFz_W!*Qizkn&C&32RLGj%_9bm@i9e=VL=+G1odj zEJpdKWm5rTq*QeDGyJB_JKghi=G(%YvQ&Sor+=3l0!O&MFesR?xPdo7DoZohm*DF6 zm(;@e`UhzIL$Zm>7$%U8BRZbHzba}l%f^22w2gAzd9_k|_if2qUV-1?{ivqnoIC5x zN0E8FhlpqcAf|G4-F8c)N(MErj^?}`(Kt}4RN=5v_0Y0+pAbB%^=>N?dc{f=D90BT z&q`~h>sn=SkypIl8fu@M-B+L{o_L*6Q$Bq-;>S>)X6$MvS-wYW_DH?~fq_~9`yu_* z@8zQG0{b8O`7AC!948UW5UAzE{tH7aO}F{vVn4{hjNv9UKiU7N4j#WWQQiebUHDh^ zs*71lklirAPv?CuY{D76 zT#FHzge4p7#E-9JJ-mMiGAe1UWn%g*hrDk2A3Yb%7E`aGyc&Gta)F|^d;flLo9{K; zxLgUHvN7X@1iDv;p28$*Bg}*A%dOpli)EFw#F}Uk$JVdWKg&-T?Su*(b_?%?_|^Yz z9glM#iRq7bv4sXi_k~4yfGJz1+$( z2L(|5wh8Coog{0599$raaHa`6SeQy*s`Kk=RJ~)7{gM5z&&^Z>n#pJHZ9b1LgzCvM zRmzT3Z7H08m5TZE+qnIewm)(0N7qzoAWtyOirH<(-k?69#aGRV7q4M-X<8etcJ9ZE zS}l}qphp!NY1um6G%3l6OGssaac|>kyIAT^OxdUnP}UJt0a?<&VSz{Uda@xqL@m=D zNDY!Dxc}_)Ar(cE?p&D)_&C1a#$DBVJ_RoU_V|iA(VIVXr`zj;lz70`pCr+=9lkT z?*-;`q#>Dm zn3%J%>~W@Y9fIcgl>r*8YL&KE>TK%24@6?-%Pq1Pb>7uX5{~r4dFG;6M85s@*pA}W zH{;QE8^NPnJ8h@)YnLn;6?-3tKF46Zrgn13$QGx%Li$KOk`UyM#@r}V1)zR`4%~DI zR+!?}Tw&=rfa8Xh@)Ne!Fs<8F2uYKbx+lO;>^5b9h&RR z28#(FiJx}ufobq2=`1_(<@uWqOtTzNxo2@Y6UKjMie8XTauqVCbpBGMo)y`uKGv~Z z1>Y$-_|2bt!J6p~n^?;4QjTnycQ=6A9=}QFUlk@EHVqwtP|`G(E2YJ8z%(K~& zO{VIN<1&qad3f;~9u~miU^gvS4X>4>|8IUk zA(C0V76(-g!@;q?IQyxC%8&-?raPr+qfV#`ZoMTVx z<(VNSp7~i4!Ic*Qpx*R=n-CChga_O)A8|GB(|y)rc+IH(?6!>V`td7F9eb1{*f^=0$lqwD#vk}&J8L{_yWP3fJ>Qz&QZV;RmH zeBssPdQCHZELKyn!o&09#HiuS*C`KSm+_RY(VAzl-oySOD1U{*yaP~;R5%4*^wOI* zVl2WwTKL&KCXQF~23b%a%-CE^Elzb$tA2So<6K^atABb8R_$0<(e(3<q4!2mh+h1bAczc;A2rOY$x#Pa;4A*8I13rINEjS0;~rs-y2 zJ@gk_6QxRZ*Vnw{xaRvNx?@C`$f<>Hee$(m=4wg|ZrPRmO_kkCW3u8*o?hr4=IuQL z!5$+WCN2}OJ`>69avD2@^!4ju&E=0s8_=>m9B->B>{((4YgcE4l0z-Iqczw)p|BOH0{r-tZ-m= zj-yV3bW#TNLnz-Bzl1E0_}Cw-2g=^5U2o=zOE6G3 z-BnAXfI)!)l|Xv*?*j$Cmpr?-oc(=2W8uFC^eROL^UO30QlHjow~~m96oVqRMC^4P$f@&uTe*RsNXYbD`56#=zz0|2_$kBidR+)SJ7(?k4EQ)s>`6T zsgPx^B55=(Yp|*H=ttM8_~|UB<}x8#CvKLidb3e;v1c@>LJ^N8)^QD#8h`ei;tsju zH937KJm{J8yG{Q&y)oS0x-lR>g4I)4wyYWYI$rR}PgI31<{=z*Y(GR$Lsk-w6o&yW zfgd>s0aHrVj%o}nHNmWDoRhdu^oFBMCfLd-w2DvgO}}Rf?(_T2*FUfVkW67auv)bb z$>&bhtbSgd=Ak~SQ2Ub{D+==ec4LU^bm*r1VK!v&DEw&G=Cns(j&9^bun3X))zFr+ zI+#BjMfC+ZrYP2&D^C@GGA4@m@owTa_39VEHv_DHC-u0EN}mlB$6D%KyZm7#U$J9{nWH46$u!%@*M-{kWZ zB2LQ8O_ENA`A%*O=6HZ@p|SvSVgt73P3SQFo{Pcb``3M0G)kCGRG_z?(1&|k4>2{TOnp~8Lo%YkSBN0|Wxm+dBCEpCs{>ZIWOF^s7m2Ha* zEY*NN$x-+k1oSg43xEte@9mA$=kd@9;0L}>6tBp1I;Cx3xF%-~L?N1KF+*%m)S|t< zS@J=oxSYL_CA>GFwyAJ0Ln_K=dLDq<+#dR|)#Fpg5V94bBw)>yGj?Gbp%J%)dX5Cj zpw;ItKbCFNC~DMpmWv*QE&C^_(0Yt`73zp5G9)8{(|&b-v|q2heU~{%PsO#<{Ul4Hjmu_K$|~NAi7|oby9xndDadqPvF8W$>%4+d zud{4kf6OkeLfDTNaW&6G{XL}2Z?-Yr;7T1=8&aE@?+{bO zU@{m7yzR_>)}^&l2V=mYnW7kQP-?^GRI_Vcpzl0g9W+==YWmB=AX4jEXk#KS9@Oyo z1EeN?=Z=p%b%KoIc6HbrXNA>>KHMA73-Ejj^(r~3TgZ(0{=r&f*UFdQO`3yjz^WX# z``p~za{7qFpH#M06XH%i9%I!opS8l5Rj;jB^Fj2aBB0x|d0!o>K`YOHU{20xu^UlM zjhz5Nr54i}G-UMMXc%$$FnSPD-%@WEZS#3oDTe!F(5XR8U5vuu5SI#_LzHln7sDyG z_}L>4+ZF9SMqd6A+`I|n;6<6--a;Vn03f=ZVels-*)`Ymv#bh{$sg6$+~S@D;CsYw z`amThm;5R!mO)Y5SXQsR>UF=@%LsF!$+r?Ck=)1d)b%cy9o(s1%@mWug7pSa~F~a@ClVVs0-m4Yk z`h`XitKO4~XsbN=;3F%k(#Fk&>W}?ZN2{xwxmfp}wmvkrJ$>mSYCw8K zQ|3%Fky{xx|NW&))SrnL2^vzxZ(z>`mu9PsTyDy4p4Npra(G5K6%OzOA*cZPER8$K zqF1Kzig*8-%jW4or-)silIM?l>r(S!IU@R8`HH5A(LUM_CQ`|4>s|XQ@rImC{uECA z?RBA2XM*`3;y((Lfx-kHG}$HYAI;$Q#ue)!8Zr~~z&Fy~*KYKdsT!M>nv6uOUDqE| zv-;3@c7L6yxYi3(t>q}^SXL*&ZL+LbJZk6Fc|O78{={&M>Zf9=HFkzVTl>5Zy<>Nh zZ28A_&79gG^UY*af#G_&0( zU?bo8+^Y}Q+dEI<$-GdWN9zCdMa|_?AZZA^Dlx<{Z?<*=`foTsH+_8 z?mcOIRDs8W=a`&4DOhf+UiX5x{R|E<7duyuwwgB_v+Iyr+rgZDL4_Gac%8gI$;@m| zd+2ts4s)%B+Gxd5(6EoMttHMreECR0!@lpasS-k8Ln3@0pqEBI|7HuUpy5yPcM5z_ zk4D)$YohZyK&s8i{>J<0gg!o&k(7k76#`L=xj21XbzL90tk}sj(rc}96CW&Em~Fk^ z&ExWz@i}tqfE~3eq`{;WVhR(ELOE;KpB}FWhh;o~+i<}x@&krdF0}O9ejdP?-8flL zBBx%kY@h|#II*7s*hRsZq>vDl~7_`v8q%zdGDL zy^i=zLlfA!Kj4uk``_d%selyD2IcsJiNA7}AC%-uwqVev^E&-XV~xGB4k&n+30+r# z3RK*$Gk)t-Ox*tj$|%=ZbZ*{zNOTkG7a?3(+UTU1)jfF(@prqx+*pTbGMcE1duh45 zXc}a$u?)}Q?}e(2YaFP-g1iho55=`#QqXj8IjSnT8#e8A3u30RW(XS&>BQuOs>?=* zYFdtVS?xmaMZ41wmk0ZV{K41pJ?sI(!<>ub)=j7-vFR7XQ=8rZQPaG=-dqMeD9h*b zMJG(ZiVx1LYjugu#12cbt#(yqM$*wcZH@Vq_%A@d458G%kSMOGtx`v>;G3-q5dl!- z&8@|uc@bq~t)l|Umc$41eIGtbc-D=~I_x|8{)l$Od?xNlF*U7k2NfLG*V16j2;L`G zpdpS1v>j0|w=(z;LE7qg!-1LIwB^ZJUWh!9Wbjri+6u?mA`@#;q@HEDAlh*aN^4W1P{dptYs z60Ng}*YOYKY_)JAv-4;K*wDYA#sSvvTUjZTj>WXykO@MSS*p2%!m&}^xfin!g2M{B zzsV%FdM_o~AMqElIh}LCPG2swb7<8`(@h_owj#|pJFVVIVqCU9Rb)j7s0|r%q5jGHG1p=osmU8FUoIX=Kf(Qh9NR7y;M@U?arlnN$o^ zqnl7vB(W5r*%zJkW5OfPHAYyHH^!dJWQN|mx&~p!!wnI8mO7YY3e^rZS`@$kk>=_z zAoZ4rxHBX3LS7+==n{kzh^_}11sus)Oh$d1CoD!GdB?|(-#*vpXBpYvdV;!u$}Cqz zv(`2V*>|npHHeRTqGHUu;HR^sGJ?3Y2#W?bFkmGXt+;^!w{J5|r`5RnmL!4Xh0=f%+b zf-*Ff8hmRhB}04i-#<9LtF@n6GYJ_#VCN{ScME!xZ_vesn{(oD-}R+NqpXE8ayBie zUOWT_?5s8s|J_tF4rJA~PH$E|I+&a$mFZy<0@SS~x=XV9N&Zm0fT|8fuKxa6ZRPT2 zm5e&Zs%clcgB%pq_v(iUi6|j|7EkefytpRz2UJ z>-I~9YZ(*DkKh(J!_E?$fSKcFan=Ue>bxzLz?)s#0!%+NHqx+2^Jl*tE z*fYFBy-YNe8zz~nShgDpnXfiJ{}^She98F!liBC-xUQgj9`T|OXcPz+($n&Jh>ASA z;{RV3Ko+c*O%vIBn9IKiOyI;AL{9dBRIH}~nO_pt1x?||%hD2H$IzWkxbSsrhhBVa zovRwMHagBQye>Ezu6we%8;7kM3Oc-K>Y9ov9#D#*!c%Q~?-yEc_L{8@zKEqut})Xr zB(5eSOghDe9(Z(Qw2#TqC@M^-ukM1#sn{o6Cwq~=12tz$JnLN#_6?jJsQOthULe9+ zXtn=-s|?3L32}eic_(Y`4ce1}f>Td(Ww+zy@Bp}z78{GmIWmY|;yD4bpW0(SvU=OT zp7EirrA};yo^jG~MHN`nG_eR^uwQQs2HUD;>DqgUpZ23ovar3QM^+u=)Ti<)J;m@= z1ct`1ZQ}oC0q}=Rrwb)>w*jr67~Zx%ZT;nNJr$2(%T#H&aX@zS_r9Ptt%>y@tsaI)so5`~l~?b_#@IDeC_Y?eOGF_AJ&oJ^JGO_P_FhIQpH7u`UcUL2jH zUE@Pf@8dZ(ciZbF^3jYu@iAy)YoI=gwx86kvNiNa=?~BHb);=3Sgj9?R3Uzp*10b6 zxfKF(n#ppQOEzEC#uL}IK3%0WI^zT<%ZZV9!?RuOF%{ZnxivLh8Su((`0yfscb~a6oXkN8W2>6io$i zW6L2cB?jAkFpCX-={L-Iijnw4($#ckUc-Y}BI`H5jhmnbjmM+dgMid7L zgPyQ!&OkcEQD^?WIc?fX$xxFQuhn9q$?Fx7#{$)5w#p2_mVr6mN z1_U6)`cN#Azy5;tm|FVO&O|?W>bAhb^!FsNA@!+&JU^2zh|;9@bWGAqLtOHy&f+NPhG-_($?_ZcS&s3Cm+yX z9mp?V8=aJ$>lrU$=A%G+A1IsG$Z9YHRl0{!sxsP(T^}vZ*^XQ@C%F=3~;v~zj2hruhTol9NcnUII z0_MN(+|c!GTKS}Qo0xAo663C%UAQKWcG+`|9&x!woLK7sn_Y0sEv?Ekluaqw+VY!y z5=up6>J1c2%XHv?^+oE*SSU_$J8NT2v0g2pMB1u!R2Eu$P6H+B*qIAyH9+JP1Zq@m z7m4(Hepgp2r|{Ql_}Rgb z!Iin93>m&K$_IP5H!bPCTAJ&mFCNqxt$eRT-}Jbv3O4~{Gxc8XZ2JW~d?9t>JhPTm z?qM$mK@kA&T;zUHp?C1W(#YXa9jk~nVz&S*+3aDOZAE*!;Bz*kEp@c>^E#tPh@ZV9 zmsj-&s}QMM$0?9{2v=-PBYMP^Ekmm7@C8uukY@hhv#MPtbcBfkrB5a9S^(r5(EhQ; zQHOK=?L8lwXQVLF6aG7zBp`4$RoRopk^B%pnQ|p8DPf>o@g#2Bf4ns_a<|k>lee$` z#hK+?iC}?w@hq3mm8&0)#49`|E80A6-e@dSHDCU!lA@9lQA1 z&7XDfF$fO#Sjq}5Q(F9#bh}J`rEmT|WKkX8B?cflAB2-j6X99-byhEHD2?#!{#8x} zi3Ii(O)BRr+xFaTld`u*9A*+lT-fhrJm5RC?h)%Cks1B)v+ma|bH8lY1&txW<$Y!) z{uy(zaqzl0|M@1uBiDI%h{V-kZ|;aIXmUyWu5Djp!3%Ors+-@gEf%qfH+5Ho4W9Rbz}W4o4ocNy6OT;6Wt2CRI2m&E zh5FyLnnM7tRooN&tnxgKcK|Fk+xNcgLs8Rr$g_?}&DO zV*hQ}TBKE@r^BxG5g4{gOmLc{i_S!4`JW_pPVjz8FybLwkq>>@q0z6wvU5{AoVa=+ z5sr8 z`NcEeEu>XUoi2(vX-u)SKc1ZK7QxV9Psx?}`!06H115-CR$r9}qm!mj5o24QbZn2NT*I%lug1-_l#qJFYX? za^6=&ANr@&8%*O7A%?~b?vmoQC78GDrtjXt=!F7y?z1)c=XT~mK?=5@24Szmf|D}Y zE`uWv#*brJMyAtS)2w1mg&XvKpbPFAfb0ddJ4Cu+WBJV1g52<{RzxVr_n;4TdzSa1mL?rx1k@ZheE6P%{;#`)^pGjqTB&9(Wn z>nVzcXYak<^~x%H=Xn`|&ZXK9CPMT40}YS9)$$ZBPp zdfugl(fcTzZP0LZ^7ZTk6#5XeLLq6MWSESz4uJO~s9_i&gY)fiD;G)FQk7_U7i&X5 zry*NH==Je+ut5^5WSY$ei3+hQq_S%Jpwp?Qf+L}MDl`69fgDR;gdoYz_kx=UUX6Lj z>W({)-~>Nyl|BxaS^!OS*9f|p%*Z9P)lUiKJe#wuTfHYJc*p6hXer2ZwOgK_RJ*z! zwrWc1k@qDbXSX1Jcsu$iW;WGJk3}lAI1W&g6%I|%oJXs9>0m-@tzys z!TN$HgK1gsHTPPg$VFdrpC7~IB|$2+8lIPi-^R+BaOC(q{d$JoQtq_iWph%YAfYXl zhh@^|0+;}NmBeexcbR*|*eVSxg#D?Q8lM1y+DZAXy*f?X1_eUZaJx@SNJm?cDp@B{ za0jNm3B|VY?6VDw&hI`FB6u0+j~jW48Yg0Sd$K{VhA%M8Qm*TLxT5|w!KbH$*rvGc z*g!a;=%7YU@t=ZA4&di8`+~YtO_)FnsjFVO6uu#r~3|BS!hp5sgl#>qAuI5 zX>W06fpJ!ZB|sAz`uA+?&~v>&6VNG3|t)EcmzsgZ?F+tkU>Pv5P&HxB_~ z7M1?7u<*8^KnZA%Plm9HXO^3%zLIZD$e3c`ox1&kiEuwK*|65@d^$k~_{(y-xd@5!~OA%uBN4EHfN4%mhMF}NV{`txP)_W>duBIL)g1O zQXoMFW98Sb)6-*({W#m9yuHSD@%SJX`dmw*&-%)JK(O$lUDEc&N`DHiP+w}yOhX^GaX`a3H;2%ks{*!ykXTJJd5%ga| zB#66uIv(^nYA5U4X6R(ZPUoj%zdAkS6F=Tr5)=$0z8R`*ok4b$vg_+_-)^WXajJh; zv}|pC--!^n^>T(&6AWuvDFEscx0zJS?UM#!)PCAbfvr51-QW7y&^M3~omN?LvSs&P z&tSH8&w+Fsw}he0PoAIJeV$m^?;7q}YV-Zhw)W7l%s>h3RV3toO@YFCM~N9aO_e8L zSru0uPfe}G`W^RA&C9pq&X*~9`x28fUaW~)=&4-sqG@W39IF%Ck(pIMCQwa7#MYjq z<=K2o+IAQr9X=`ceI-(ra+cOM=B-`Q>yVP??p8Feiv>b0e|9Mv%BtD}JqDmhneuB}>g`MSDC(SvT2A^;J8; z;$4jUd%D3L8(`Q0)0b%nnbt95}_pT2YXT#Hu`y}*2E>-gjbf$C}> zr!Ii~cD1ziJuSH$Y~$)ZCC&_a7HX@EhdVx%EUjzk!qt1FO70E5&GzyxGoZ~*kW4s@ z^u$dWCfCr`f7()MeTrP9^yFRKR}G=?@mqfw@OsW(GYn4j>D=5aVdwNKY~X+&U_bN> znQ#`w<)! z(>rE$l-Ez$DABZg`$ZmV3u^T{r&xIN+d{eL`I(W4t_|dJ(&FXztLbn2N_k&ub9>8` zyxy+AH;drnVosPU)$>W4kW~sXxZA~=N59DAV@f2i@w>5{S;*h zB4j|%cwrESsg69O>AC2)cPPJ&C-ZNPhCdK)v$7KurJuy=Yrp>|FY%vb=bLHB@B9Vq zzdB#+@3niGO&vXeE1Tpho=ot#@Uf-hl{|Tmi)5HV%D#SW$-=&}_?@T* zQV1;rsMD5SIi3?2nTBOe<5f<`2PUG0cVNGw1XVPhcx&apURp(RnU{ zx2k+HsBJh&(-qm4OC+0e_>Wk4s8b9+qP~ZC30#QA4%GP$R}Ef}s}Ej)rGYSKI4kIt zu)as}$Z--B>~!a7>SabVx--FXuh9S5k;zLo?`l|N%7hIpK3&a^0je^0K`wbs`lou7 zLYb(^SniDgEOuW?d>)t-JnT51t~bBnsgI*E1Ug{~DG2`2R9*DL9HWjODJpDWWF8S*`Z-BihY^oYG zJjXa5o0@>XCdnuVoZa9Dk<)7V|VO@PLF&oAlcIaJRaI68{;L70>M76~?61}94g;%DT0YvZ$`*cg~l4Dhb>qQ-U2rDFz4NvgM%-f^H-) ztKC)>8(FM5zCqK88iUUJ(hRmMAF6~mg5kuS&c?{mW^H`^jiivng(6(q4s$}uz~!=$7ZvG zJlThs6-ME*@kE3WnL4}v&+0;CKuX1AY+AJIxEg|t7zKMWZhurCZ_zf|EzZD3uPq*} zvK?}{{ie)s=pOl_62LvPg0SB&tvzgrU7u2}zm@M|a9HYCW;dGWth5lf8?IU zWBu38J*A?rURegFUWb7_&;PXT{qs}fZ&xRt|9Ki9P=Wt;u=v-ph+w!*$GP%Ksjo`u zxv}pvjiN*~n@VgOrM<-s;>~-X8h-bV33ECCUdUR(JG5`j0pjmV#vtueO}GoxFi2kB zPpERbZxg^J;iGktBlBW8P7}v(kMLGhSOi{up$PRI7pnBx6(`5Ep7^A9~h zeA%uqx{hx*8iq32jH{<(3*-vJTfH|d^q$fF3>K`*q{B)>0%K!tT3+Aa@#%3lOc9Yx;a2>KN`rqt9i30^IowG*P%o+Y{0s3*TcO_~eTI z2fJ5Y!0oNdJmG>s9oi7O)2VGeeMWPyixm!G z+ACLh5n#vOfi!3_WF9`d)j*qhkbHJqNqD$~mH!#+_q0^#Mb)2ZN&N6f9(N5m(81GW zV~_k!Q`I(3__Iqtvi^K4$RFU%X5Y=}uDOnZqPz%)$5d#IZX-e-6X?<$3bE(|4h*g+ z9)I?K7B=n2Gb47*GFp$U)pcTd_%H<+bjM%@6GA*`k(H0!YWahsr=)%^SRmdTXN|er z?m6O%jNs{A&7`?1ULlUme>D=Li1ugM{g+R95?QO$s(se~;XqLm_D8O-D_aXrM*3^z z@yH^kl(=>pdX$N8{Tx}alwE-tO}2wv3%P}4o(|geP0Aa%U=u@UXZrfzDvr*XV?XDy z$AMLdvf4pxn2mczO#jr^Ou@PgCXa6a^n&0qh3}=2ZBh^zF ze`m{xakb5#ZfC$JkkJ)g7A43H;sz=it_=N;@!>3sCwP*W>oM;x{F{TbT3yEmO`h*D zs*=hKK$Qmj&d;>GQ?MY0H+j~#EtYqje@~_*ROwKgH~NsY9QvcIP_|G~@gd7>bGi#K z#-I^Gh9=HcrY8n>CgCJr3NJA(kas6sD*$IJ`BT26O^imyFkqIw+m;!En7!UQX)Tt! zQQ01p)EqIPpk&rj7*sz`V6oP^>`b{4(>*d2l7#vk`cVoA58rI9sqX^X@KgiTR_Snn zF_T9P|M>&8P%(Fv_u|D+wB0G}Mi6lQw4ydK>c3d4_Ivy+Qheje_qbp%oA0%!kFty( z)jWj>9XtWBNu&id)Yn|ZC`dk#w1&MZeTc9bv1rmalNPUv;p0{4p{$J(NTv) zB+)0SU#YTriuRF6lK-aY{NG4 zAbA@SC6z2$1U7=?-c|=0rUyv{h}2=9M}+V0A?E@ zVH0=hW|oTsoc@_)cn#dW#aRusf~=Km8Isu9jbK*7Hu9l@$6#N+7^{XD8{KT&TS^r1ol z;o!)+dDC+X8~wThFQWh$XEJYwM>wtGN+~O-EVY{FRf;3AwXW-uA=Tu2ljH|GlYEcW zDh^DlXo;m7FxK`Lwn*@M;-i0}XENGjoknqz-d~kp3*8>bZ#fW26Ut^NKZIX9&rfES zIu{qiSd}5U&oEqw#OV9q#~1tL1y!1v=*)RqRl0J6#$^lC0bj2(EB)1?qE#D%y*glt zF2BF9J9Mp5vNyGoZt~w_a3As1!Tk}CV9u7PHQg6Ik&^?PJzjgt+t^;|6J!+5v#XrC zuVc?{ss-r($1j&G_(JaOHj@xQntE>EUuWkAIu2`*ZY5~5?zs2Vj$XSh7gXH+o&dyG zAA9E$=JBJB!Q)MF9ary@9<&n|>?Z!mHI_MzA>m4jAbUL}q&ueeHb*Y-Vku-CI@@FQ0H}({Z+^@5ufi<>v9JX>xNS^qdO^K zd=^;s@ImzRuKy5%zOf>{!lB|v)O*_<1A2aaAc#8(G+sX2nU!7*$iIqQYtEfdfx$cR zSw$=o2B;Gj(9{k3m8r7MZW6diJmNO;+Ei0RQC(QLjcx0Vt@Z1}#|LU)#6WE0rpeN^ z%H3g`*)4TiF@M>RX~rbaI^Wg=zt->|OP?DEqrED749%y@uI8;`eUc>)Sx{oLMhYNf zb#2dp=2_w6@u=#HP}9&z*@DFGm&4XC%>lcwx~~&=kat2HQQoM4oprT>-zc%vGW7n6 zcblWcc+Zb%#bR5EyG;0#19@2k-DV*$o*<7#u?z@&_7_cE=<>7_Z9dpsq?ov*azNX`M3=W2QEmWWjH>(SdyexPWY;p4X#6TrB%$}Mcr040#eYP50OD?v2@G>o9y zyg6w<6WdUnLe6MaAZj-TGb~4}S_`qiL+p4n>^7Y?qyf2H6rmfe^W#*1FZe2*bRDEl zbeFC^^J__9qofp8@?s~Tg9cicnLi^aSQGSUKhT(YmB&FS?AGAhvZ;a5BNh37^Ne+pa{0Xjl}q+hbLXoQU7Fx zI&JwX5g5R?k5|iK{UB*N7WiPdm`b)LUn_Ov87vF-03p+#sBj;fx(7H}X|^)AOs)3A z-e;PH`hxEz7%~l?*&bGcA*;L5(980yElBex@>DZk{r>8(3)ae}(#Dq`QskuRnI2HM zlz5bQz_jbGNa6-*={dl-kTsCrhX~@^Ydl7vHz~ZJ$)M@Nw+dA zbOkfaTTWq^snK3D%Xb;_m}DA~W65x3^3hMlCAH*yrz1}u`D`(qwChzFK(ZEd0)ZVx zAPh{HM7-X9?;p8ZA$$X`_#w!NBu{@You#@6#^7^d@==AKh_E1esXNclT?2%ilbDdZ zejmzw8m*8Ss#R!tI*kV2%~jP#^bs(MaPqqV$?UgG*|ke!E(U2{UeTb|I)XiePwKChV@`QFwMiJAHJ`H=>eUeH<08{UXDMYv6(gl^Dah za%FZA?`Ok55_f=4^(}(Iwv<2wLCWeUbDqEATmKq4rOJprG!~>;yC3~m9s!sf0gY>5 z+PWB!nO&9F%OirYgytDE63gqwci6NR>w8*{eo$Opt3KvL03)xQ6a@SL7~m0oHy6K- zp5onG_o!w&Q%1lA(v;Iz-0m!@>!lpBu&zJrNqOKbo|)Yu6yNQ4sd$~Jd!9y1PDmuV z{er4wV@`|D!*Ol%@Qdz(DD@sEQu zJ1jZ_Bfs!a0faPT&Xt^=|8YM@jM(0lD&x%jHY#9$_HA^3bvmFR;Ei3BKcuD(SYMZx zeMr;-;wJJvu*7%IE{VkQj&A_m?DDi~)}_Ya`p_Wf6a1qrhg+<|t785>?@Tx@!PbL5 zOB!LPl%}3Iva&}aQQFVl_vw|HxLDHkg2Uu!p^^Jg*c2tw`wH&1jpT=X$rzUSor$lA zDL{(-K9Sj)RH$#_kWGfi+nxFBeHuBRJn{5Ha=#D**^~$*H=yzy7040i^xZ~DWbz$O z4Fkf`Ei-Ss=Vqu_2lO1IyG!3_v;|qR^DW!V(^cE*z!P-!h3C4TV-?22fhkjyMbU1k z*UR0f0@hrNc*o~3!?L4Sa+E57pDHrIjxDl7_!>>!K(lnh4o@3ycNeN7i4s18*ix#0 z{`HW^T8i{+L%4eKa#$A-&puUHe>w;3Lu!3>m*$fOdS>45rRc^gtVPHo@<`sRV>IGU z@AoZjA~p8>XjRwEtP-rr!lCxf`BnLr|DyTGP~c1Jr654KFQzzz05Zs9A9X2^*mwm< zw#N%|h05??NY0~c4j|qg4wM+WIG}+rlqjrVJ23i?k#`02ep&3?acm>CBK9XbU%D^> z0#?4~*s@es)ng&6HEJA!cjCPl`Rqb}pq57Wfg8SSTUamb=RxvN(cPa9jGYsSeVl+J zqOLH!K}WElzuYVA)a`EA!pw;Nr-PR)X%N-zzZYJw6W^$k^hwhFGj#a>kSO9nW|{j3 z=3iO!pS;W>Rt06q1AaVD$!9{Yq|UPwi7F2OU9rK|E?pB`X@&7C?2|V#%1i(}2%>hY zI8-Pda{2ON>OMm4@W<%8sqXd9Fe9m;9z`-Qb?OF=0ps799W`4qar9=b;ZH`}lf1Go z;s*^nv9qJPYDf1SJ~i@a$HvenwHn`{0eatSTXS0)4UHlWPC>SJDQ1_zbf}Qa;wY7A zT&5+hc;{Z_{muPHAlpZ_%*i}QaP_cGXiKMI9$|K;$#?TEEu0opCULeAT9s{)gl`rS zKXG-QtH9_-uvq^q?A>a9MpA_?PS~9jiBVrMsYD2Q&rjYW!Y!v6cu=g$k zxkN+9DHOy_Nr@o(Rirovz8d~XS?M_*_6v=pRU3QFDzDkj_8e41M5;I6QDtE5@I)oi zS1QuA?W4y}`UEs_DT|{t*awZ{LJni@9p@SBoheNIZRZ-{XmkwrO(-vUJ0G@8i)~F% zHT5GGNX{k}0)8;WS10zxMe1b)Q{|wkH9b;ReL%8!OZd`U3~hED?F;Ec=(x_$^zoRK z&EJ!oSHF}rQ!~>|(w>6hBX(EV@fa;T0x_&|>=ti^p9-q0uj{jw0*De2A?WmtT4{?NtA`C3&Ems;IyzTT;em)(Sq3-RN zM|zt1PlJUIx41Jbp-FblSXH%5Hw##&OuGn%#sjtE`jFlIkj@G@^rm%DxQ&$QfRBQP zH118zSDBNSmkx``<4w4^LdjuCyXSJ1>_OftKHl z7%aO7E}$!_y94Q*Ij8cp2yYaJHEdC{L?XC3c`cI<3b8%cbdA>!-@UR@n)by{$;i)l zuijDApmpTF*Gu=1SnuBPyxL^w1M9aE-q;5D`vl3%7KaqVO~S5C-S{}ED8JqzHDOhv zww?FLkp~~h-$~Ejexgyq#Cd}#{uLD)flB20_dovr!2tv9nyAs?Qt9c(`;mOqzhD1< zZu;LJer{vGk%w(h20YEf1??$f+> zY%E^n5P-erMqud`JHfz9C2wzB{tJ#?p|jcq-k7j@;Z9i(M4r&Cz2F7-o!k#}nt83K zKX2>_GfC%|jF!LZB#))-o!=0ak8dJLwEEc%5mVF;am}H%by#^ zLsT8jGryM#cql0}!phdBg?#F*=uq)k;{JH69u^qSHXZZFB5mdd`?m6IB-Y~}5IOsP zgiuioXZ^{|A-)QE++8;9kd$vOC0sJvMe2b=%)h*hB&{>AH zU+r_RTGukE0ZD1wCPhC{aOX31a(h&o7I3E!Ffe~KyiT9aib<71*_}L~&Pw+_M=5j2 zTth~zU>XY~zv6@)=3N)fKQHfpQ++LJhf?kyiS?9UJSQTsp)%9FVF4S2CVAyNUENi5 zb`o)(Hhu`xu*(iQtXA`puQbS%%Wi2)`q(FtS*C0Bf&6n`Nq@vU2ZYAr+MjTjk^xz*Tjv_n)a%h zVw>PhHC6YL+)8>}Aohu+M}NFGj$Y<}?u`F=)BoBt5@>pf z2rKw2X!z9swWpLmB32jJCR?s6bX0)u7akj$a3Wk_>Z+w0cDvBVk1wJ)BZ!*n1Ol+uSe8-|;d)9sKj zYDChi`23z$F{7@XcNlTIS^7g2|h z!!9e%n(T4;=QQ;Z^bo3u5yK>H^BrM`v(pRxPtQUuI+mV((R!~wZ7;M=M@M>iL*m~R z1?}|z;(!RU_?u^?HHQaFbN>OtSFw~fNS%N%|;h~{ibZN%yh*TuoK(loXi-3a&HtdJH$hFPO ziI=~E@1ETn*?;WuL4xDbZG(0Fya!uO$i9;ZT3mT49DhA6aa}EwFkB{*oESdtYA{PQ zTiVL`UGilq!S-h+_d$)}w{He&!3YgTGgAF1e(}Ux;)LH^mf1j5&X47LmLK+{*LJmV zrxFvnadK55-Hkc+zxx98=l%Ugga!9_+ZgEfBtCr3zdz5rGBXqz4QGBEyQ<1BzKa|n z*Z*<%w#4Wav)?a^L3g_h6Wx0RpX8MgO%(3_LzN*J;N9DA-uTOArlv1J8VvGGh|xUi9jjFFJFYyJ(qEoL|%6f6L#`16uHjk z=v#A?xlg(}k6HWDt36}<&K`6-BFvp(qf*psM@XzN^BXE&pO$Kb8!7dAIZemqK+jUR zM^?Qp;%Q!;=)=+jjAh`BNe5n{$Zt*yS98hCb0`XNUDs!mgCFcjJ1WS*-^JPm+~s$Y zr}*4fdxs7m>zH<1C5pc7Dp$ouAIG649i0qQ5(%|$IDZHm-szE&c)nq#@G{{mJVcn= zVq7;aR@Wt#vb;j-qMWy0)-^>=T$rL1vyU1qF+IdO-F(*ORNp=rCDO{Jh23 ztP9zp)&m(g^O9S6R<1Rx61lFf%Z1Z7>?XcB1vk5NW+P_bp;qu{Y~Re(+GXY0kN#C} zr?l~OTwnaoyw|J04m8BNi-L$Rck*?1?qxU6gJoix$mW9V)5ufEhdy#=$GQc!;s!oc zMaN(At;_%HxMxI8+{&TIq_%k@&%F%yd<_7LI{O5i@j>X=D7zS0g)`Jt@2aIkF<2&> z=bx=~1S*F}G`-u#Y!yh%w0(5^=0~#s_q_gJ>-@h9f!ZsCTf-C?L|L=Yzu}?v(pO)Z z8)fUX1bV8_ON<(KYRcfhKdNZ4D?nP+)!VOH9_hZMkm83Ha#L!Y&t3O36uh6=5c}i& z==*P4p_M};nZUfZ-&%2{vOwvFa6|+%By$JN%ZCv*hRaE_eRO7YaR9k5O{y-My)e{- z4Gz>PGxfBCa0^sI>9C7qD==ZHf;VkU6_BK-YckiomxSENvbqMM#dg+nW>#KL%kgBT zY%Gs(aJm`9S$urB{~40s7Xj{aT@e_ePdayN5)`|!@d2biw<3tUE(M~8Gd#2#TO1cT z%k~Cx(XyNE6P1%0%(b3tiAQx*9|&gHDwbBZQlU803_R|q<4Vbd|E?Qv?a=mkMSP?= zz=3{_-a0;nQl$N;G@~Y*lkYL>9=m?-mF{5Q-K}t zLW4bhoNKRUGX3o9?bE@qe6Fxmdb>1U_>6KhlcL?|EYFMlK5buFB72aQsB&3QX!A(lB>}DblbgcvJ({51m%s;$k@3-Px#ubW`WEA-cbCn(BE0{Q=H0}%Kh>ikmEMv-^@uN^=@6Z5*N{Rzwn%&3qS z*X8_vYb$S_(w9YQ&?v`C)#gU~Qu@|+vP5ZC^0I`ezqSdq#jwHIe!?F)6&fB!xn~>u z<(p7kqs!ZG25v(&b#v~8p?7Ygd)}JS&-e6$QVo*Vm{-#U-9#!rXF#RI+l=xshNu{y zk~9@E>bfp94JcuAqT$}>UFSz*i;_P0oawoXcGWo5ggZezD~5?hSO;2>g3V=ayIo63 z>;_ZyJZ)*1co()LhObKR30!7!HD@Bs_N)8)PlK1bxoU7JQ`;QG=FZ_Wgk%hwCkxuc zjKZX~e7#2Y0uGPuL{e+sJOz53^vx=0zshK=ycK@pcIxho@~jIrjWl>GzjkDK-X5%+ z^=W(4odDZTJ$B-`qEWhI^YV{yckT(zG6H=3ZTH2t-6*eVxDkn*LTVM=d9Ju>N$kL^ zD%W1Fonxy&C9^I=w*MDj5=hY55LQ9u{JYqpe3=S0rb^|ITeWGcdgmSvx7_NSeV-~d znp{Xot?c|0k@|4gRj2S}p)I}wUg)ocAulNYC7&^>%*@GgA=UISg$maJW$Y)xBKO1+ z$V)>K*jE|4;xsHZ&O;XVz&0!-UGYLz;sKCN%I){!gc=rDyzQe+aE)QzTosSuoc8$5 zS)VYY>odc^@5ED%u*GK`-7nqn!5ezQNXH#52;*LT*q2Cd>>_h_b@WL`B@y>XyM-gxHLn`oFB>SawHVY?!w@d7sxr%f(-IqvM*tDDlU z7iN;&7T593CF|Xq#bChgN*ox8t8F=5$#&oX=6Ddq@*Ln$w+(?p-~%>-Ss&L z;1Lg%*kv^Hy0BhzBR2akN_f|{oSjYzik;z^a0;bl!c^mkoXq#YuiU%OU&aVpLGBg8 z%dLMDo`VkD-0SQOW7V=r|LM8^?s?ns4=zZq_oo`V;*b9qi0?o2RwQ+GC||bWuYK_q zRgc_%=TeS1VPyX*h<>_K*=2wR2Td*5y$Tq2?D^1vXw-lldYhwP`eN%#zW$bFirnUl z_SnSByzI}$=bjj!z5$EhKq07AKdq0Z-|DnnRogauuviS@9OoYRFEX(R=nf9Be! zg(ulG{V2U#fywG1nfXlA(m>$HaV@l2=>VO5t*vwN$(&+*#2GzARAPh+cKdMiz)bHt z58Wogai*d&W3Vn3A@gFYV3TT-CPETdLlrSXiD-$ObpY)}dsO7Kek;LQgC9c%7~&xx z9TLda$yh60qEjVj9d1#rBa-*xG$%UtpiDeB|Jyp3D7S=`?xNoeeaB#cE!#WvBHR{FD+!G{(BYxKexg&QvIhQ zva(KG{8umFZzAXpTu(BWnIj-cK5t{`eQqoqa*4i&fg%LJ;95sT*YJ-=X!v8VB1P0~ zQcm$k!X%^uK6XOe9;&dOc^y?_PA#L4Wt?9I7;H)|aE*2B?&-|?n`O?7#uAzqp^$vQ z4sK>uTsY!wU{_-Q5dt}OhEPkJI%mgm7ug0$fWa`_FyhF?Yo^toWi(WXzMZbeWx=r& zlFWyF!)xwBs^8od8i2RxL_~l6PSAq%L!d8q%id4kATM^fJDSmoK z>jI8mQQpKN@}-W+YUeA=QOQJa)wDB*dO-cizvRXYH+?jKT_{u=O z5paZ4S?>+$N>s)6#DOQe-AA_z99uu^lerHD!^bK)J9Hd=@P~1k)?|&!_W0ZLx{%B0 zuhO+VK>EZ+QZ##?9unfT!RVG=557-Rw!Gcy#B*#MmA0hwopVp-S=dp*q2;R?gfkc2 zkA&uZMr;6^^vzDYjG*T9CHtXM@LYUHOjChFq#z%MbP0BI@s#rslDUxW2-$S!9Lw;y zQ+PJ?>^hs2d`{4xFuLvV>UuTZNnFi)gz;>+wyx>*XU5TRRX88z8}|Nv`=_KznE3c< zWDGT1PzsxVJ?Ytu7a8SJn{`@*UY_nMlQbvtLx#PMg^=?kO4EhUitV^0)souGm$0Ja zDDL|?#4KJEn&J;Ai7A_J_AoAAc%Bs>^oP(uo^D1Opp8>D!AFBzahL8L1;w+ZM8P<) zR@K`jg3fVG9K+-ax&0TKx+xtp)fyr>`km~Vl(HrwHa}7}wQ)EB&!%aSzSHwriI6lB z z*5T&@^>WyP<&1x%<69lVxT!Kb!7)c@F90&Rw?6M{WurtZ74bL z#*VP+H^rhK59HTgSPAO&s?$HjyRxj#|1o$HYUV`s$hLK^v)f?|uyUo+_8X#IsAcf0 z%tieJDrGCjXNazl4Wmk^5~qDF&*}(MXZu^9(e4jKg!y*sFSr*^r#7(vzBm4Au>oZE zbN0=f+i{xT)83pD)5L$j{Qpz5|A^jVVhWR(5&yf`&2##9Wt4?XUaZavQkHn}wwY2c zgUsZ5e4(4`@4WdSUMP}ZzIW9wNdccoH060K;6c(2+9h$&h0;3X-{ zAUg}^o8!@Lj5mk{r$ZUd0l!K$wL%;Y9X?)cqhxfYjc;sGXH#F#cA>HbZL%zj?^ zd{?tsq7Cd7#fZtNQgOmy28Vt>-60s4(HX<|ELnUyK=@}xW4!T)CY;XkTgWhgnii6L z-?y-9VUP@fn5JJh=cOvZZ$~^4G!1G{F0zb`(CWJ!Qt`>qk!uo-uKBUcMU!nGABA#q z)m@n1F3&BF+n@*(LEVJ8yI{bJNSU4OQbtY5H~WWY2`M;9+=CVcVSDF zTW`=W3t<5u=KLIdEq#KF&pzwk1E~~x{5$7vhm1U1LMPmJVP6T7a8$_{e=LcLTR+`B zc}xfC1KZ>0@=AFpeyVhnLx}IsAH*~Zz&A%COB16FCON%(4fi)Y?kRJ8!}IYhIn@KH zWW8SRR?|FZBd~{wqw)(A{IMWLg+$U1QPqx1iwMJBp%ip_#{>G}i2A6`gmQ@7{&pJK z0BNkEvsUaKMvCa{mp{0)Um58N_4C0vrvvH30`a&AzFe0BcQ}9kiK%M2s+4``<`Gpf zplZ;bW49g-z+4F@yT}+o;(3*H-6*xYlQJYJ*qnQ)`wWfj-9PDe^q3KrO=PvGW^56H zQt&7d|4+N7i4^f&XBdcA-IDq5k5rO=uJcx^>$e+ZL)7S@h0_IV*+BO-3;R^1GQPmD z+}1l6d+p1jcNS;i5~LcGD}k~0T=t^-c3EIY)x>mqGRE05O)(S%_TODqk+w69w#yM_ z7SSOM1$_VyaB#{m`z4H%;ilS-Gti*w*Rqe293xe%ixqjmq!;IfXt$O2=2ECi8i5Oe ztpM$J>GcefcoZ=Uc=xZ{ao+}tTV116%iiJZoE_+5?ZK)V4*h%kq2nzdd6u5*x-str zr;?iZ*OA;+R4X7Wvm=!LU%9CUlh3TQh0%+AEt81j2r@RIG=c+;>!yz9UihrWK@OnR zB{_qx4=bHu)3`$JmlVV;=+8OGq=rmv7L!0Y^<9C1iqo#?h0v3->iSY6Zh?)T`$DzB zyzcSze1s=~YQ?6?v)eTd_zgNgM>LcxI`$Mkvz0?1UT69?_5iJE15MjXsJTH;cANDN z#}UGv65O{hp9rvgZ8~hbbT_?Kttj`95H{#cf4&mlTD}N(_{4!IQy4d}JrK17wnp02xc2HU3XNjHU3WBTFae5G(;xB0*jOpPA2p>-%!GYI}o zJz3lGs5p$9%F{R=lu%X~QemP2ZOrXZ_M?0G45TJ=!BJED@8&R^q{3e&`5%2LMw=UNhYVMM;?HPvq`!_tn8r4=A z*h)22^$O}@|fW9-wTalB~^Fe(13F8 zwebEeMe(vGXV zY~$&rIW5~4o>14)gq}Mck9FYSNs<=P?sQgr7!wN7E>+SLX?JTbqjguKaW8kkDJErn z<5GU&w4H%!#5 zOcBnWVqKNBj#Y1@A13DCPcw(5OR&fOES1AYlb6HY?OvL1cdxNzBe@_oFk=?7AU#hQ z!ER#z^cL&a3Us)$*AquRSQzYi_6+DeK5CBRS%jl|u_#>dFx6#-d$|M)bYxnV45PIG z;`X}_yZ!^te4@5oKSED3i)qW=2diaDfVGIh@Q$i@L5_Ek~`BVbsT?K z4PD}+osmX6CP=ktBbZ@A(GN*0V#4uH3lRCK=5zkuqiD;5$PAk3L5sHfb!&%*j}QI7 zG1Q+0#9ue@YZhhJs_xbAY5)IA$1|V#hm}{=18+9jf5SRd#h>|_z7KLQ7RD==GBhnu zd!@>bj}jf+$DeB;koex>leSlgHQ=UjRpK*6yw2&9SC~j@mch@nR?AAwrD;h9n6f}~ zSNS$7`GePMujlEuz)LT~WqwLN^C6ijmmb=wrq@cuF5hgvkihoz8yf z(n!sgkSp2G5kiCmyZST~)vQK8R3TQ89P9UNJ03BmX@*UNFrHVQNNEE*SXFjGE#Rb+YOIKXSy(bVstPU1&`V%(u;w)1P3 z#GGz!w5Hr(>Tz|G?)?<@c`K0<^H4`UKs%9KwGu}WDygq^bsK@FEyx?BP#nKu3<4d; zxZ#I=l^gbxeVHJA>Vz}lLW)$SX30jKQn1haaW-G`T29}u>GC$Cngrucso{0Loul^$ zq|*JpUBXs3mTx+Cg?#oV0lSra-0iojz7StjnXKhg zQEjujho3dItd8`UG3d-frJf&toG#;UQaWI|+bSu)e71FFjWJ_;EbjQ-b}rc8RNR6H1%pr(o`pJqw>jvKdQd!T1PE;h=x5S zWHfF-EEvPG>y26~(BiLNfaE^5tZnrwua6AnmCCxR-`l0x4ERvARrLrgV;+|8)<0x9 zLLbHUCUmVcA$B*HLB#{+-8ao@=@3Vq-a7yK>CF9jo5Z&0!FL5smA3shrm19ddDRyB>JX z(LWz;s9r&p{?4y5dr&*OdvF&?W*kpA)X%TF^!P=4bZ?yYUQ+_{e5g*+1-d@=y(J_& zPDcs4m2hZ^6nLl3*y3N;8{peKK(~;kXosY|dC@1fqbQO5&ok?P-k08?p`w#WfurF? z{%+)GIzj&>p=n+Cv!a@hmv6jg;A?{&UO--Td)rve7+W>n%`?U{w-oYMoc2P#n&b*TgqD2}rILS&c5pI|<$GG_O>;vlQ=3dS8-iOL zYnayKdn+^M4cO&ohmQ1nTOwKVOrBq~)sSr6Q`e_I=&(manP|*(s`^@6^;xqE=1zag zBK%V3V3#!5c>Kad`)#miE2i*1e!X8p^H%@)!6edl`Z`KR3$(w68 zh1(KbEL!B!FQG+djfg(}!EHDVBbqyq-_fx9ba6E*Xbe8`5SE8l>EL5Y`-k{1-~~mm z{-b5Nfz>SfzE}dxx*evF0(gb8D;<4i6a+CLks;cBrD;p}p!uvFlIeolEq>5+fohXVoY0{AsAW> zj~Faf$OwO{Lu4=vHsKYUtBTPTnt6=$9;up#Vy2sdisdPE+(_$+m;3Sw^$N=f`^}j@ zRNxb0kjCAFgX~zKc;x5PJI(|zv>ydvk`;gv*lK<2m%8Qr68gvE&<;*>Ob7H$2Xjic>K%AZG@!6Y_+nBmtrOqR%-Xj_5&AYaY^=)AwM$-S zV1I+_WzI0Sg~4~;Q7_;%ay8(Rff}PV)VU3?n?llNoDZ-7pWLw{lk;N|wyydD*6%Zl zU2^kfIzJ*4zj?>-pO#H{+3M!oFOJ0bOMoNP1GL2?4#p!lnKrEg!YYO-r}KncWa;g0 z-o`G>(6)4F(+T+G14oyKt9(5d+sOOic|+TVTC*npEJ;&Z$=m@cnRmp&2}My=zN_mO zn@aF&m!QJkO6fCpUN|m+e_d`qsZsA<3x8a%K{V8>H$OzCLCS6(W#U>xn3#KJ$a5*#y6JZbL?rAW&H={99uQVcwHd0%Jj8qd3_Z_WZANU@aq9` zo!%T09wU;ci%EmgQ2s%{p>yvjk2ae9w+5Ax;!cj9vE%#!qk=t-(cML}UO1#QtL6R- z3=yyl-yiK74z<(|8U3{vr$SQd6mVZ+4!%KK8bZqI0``l=>xE~#);1yacUT(DZeC(h zOV*;3oA>5M7Km%N+yJF7OX0*E`O05JlQYJ!;Q~M4N}P=HquA_BM^c*R>m##%8h~CR zsqpr*H{Q{qNr&)|fmgpzS-5wsUQM0|l2pwgyAIZ^m&uCMtPsc9B553toJ|&3#jD#2 z?p$p`DEv&isKciLs|T@T7MGk{LHC5l4B*Rs%ZSc7Q4kM7=2q_Be*<)efM4Tp%F3)i zmR=wnj<^NOaKmYj>Xi7NW2qd%UNXm$mQmReY!;5aUfJE##qk4(Cfi)m>3OXBzKvDh zWatwlEi2lLmD18ppnzQ?faa-E_qLU(>yFkKfi3S z@xI=*bT`b!1kVL!WA^kU^?v=Nw(;UI1U9?iZeHzZ!hYDRfY7iurjuG3?rL$nJ679V z>BV=2)>rKTMRm&-5q3#7+R+rBJUl2=Abqk<@49%!ovFB=cC(JE_%#6SwCiT?%Wbr> z8*WN9&DKP=gU|`-Z|Cus+CP%Uu8o{XTWgt=o2+xip49Os^}9@0vzJN&+>zL}67dV# zt=u0@+q!BdJR~S)2~!o(Zho!WuRrg%85+Clu}4TC7QXW}h$qW4tFeYsA^qv(%Kv%; zeU{VP#71>BJ59dUJ|6lj!yT(mSOV4ODB_)eRo_#4+MSk*Acn&eTK*-LsnNdM5Wn`% z9hZF31r&GgBM1K!m4!SN7Zcs?rcQ>X-9%{lnv1kyUu?Vv9>ty56CoE~h>T^MR zx@3rpXd2EYZaao6jYvA)f|$vS@~P9o*5HntZ)lNtDBUCdgf;rx6iJZ9qUEFmN^R=Y zQqR*+cwNduuqRAi`M$l9cc`GA=PGp`3Yt3-VUG84yCi~Y2A!p^rveDDKK zHZ!&KzZ*j3VvtC0Z8@D%2ya?DuFE_pA^*|A!OrI+E;q4!ue$&K?~@v;k2r;Fbusps zxaPXxLBgP1?Awaz#qM!C+poesZN*zbCN~*y3S;q3*(%9IM_3l(w==P78IBdc4b<(J zXem#&N*8P6*QPIW66+UG64XHf4*oHY5h7n(yS@sG-W72kj7_zWax9N(qz)opHvI+2 zoFRk*Rkd$131s17eoRoNisHO5FK=TcXV@R0R%pBBaX;e{EgZ z)8eQZ=mkmC86n;&@QSX~s)h8{DAQT3aky?lwB=Y$b^1b34iMVWdHHL6y9(>Iz^2!#A91F-KD7F+OaVEU=1oT0d%bV}wE2OzC{==u zT*bI4eCpOyvltYd>>}W?_=d;xqWz&JS7jrhp#4R}S(j^0hK>`uG9ky3`=z3ue2(xS zsEzXCVc8AGE^P3pDc;$OWDm^Og1Uo{g}Q(V9&7^^3(S@{PXuG<|Q8imJ!ntVPlEx6Z8 zI#-pXFX%Sh$6j8hK)PT+wue;5Cem)ns@y}OVN&`1E~{1_h&vozJe{20ooT{DH~F)> zCVE1$4C2mmB$f^>da56f6=^VM>#sXEnKNH`^)6=;$#|tA_?Y$_bq{SL3N1yuFK=Cq z2Z`T*>xCSf&SNwLChrI)`rUB&ED!6z&S#n47aY1NK-C?kPM3!K?%VH?BMU3xOkdS#nMTg zw0 zyUAoHIMX6IP1dF{1?WZ+bLV@TEaTKGAm%aDZle2TT^Rg5ut-Qqc%72uT?Fm2Ybz&J zQn=9hW~_)3K--W_iCj~Dd_8ZR>^VzwpI-b7npa7Jm(=7t`Jr)Z2qA!$kYRy1cfVOSTzP|tyq(4L`q5cQE0GcQjs^A%pZ+BQbJCL9H8&*7P~ z&`bl>bwnK?=TRFE9GJFVyjlT@8d5G_)$=n5)WWj8JWSw?vh2L}OFmzeeOu`%^9_rE zGWIB_9b*@>TO6NR5YWAQ3w45FYc}=UqWfpCMtpc{k2g{h@GxFxA+>$iYaNL|y=Rty)v8Nk-6vJvUpyUl^wRjSZUiOw>yA9$T(_1CDw$pZ$1 zfzAY{Hgl8gEg6Qd#%EAKZVp;|l+O@XoLAL8CSbW{pL}R@5dZ*D>x+^zD@5Q=WzVq~vQ($y75Qqs}Ylm3GftfBV3X7U=i zwKi#D;-9l1J3fS;vd^z1t+}Fd1zJP;sNhdlNoxFW9tC2<^CGFEjjd!DVmI&yH~0lc zSh>ZXQXZy8c+%dCfl9PHbcZ4q{OY&`>Y!}Egjt(okSj#{<-3*A%Iue(s4?J`|DF%3tOz@N}H6 zHXr*#sALGj2#poNKF#*z5|`pu?*{yr*v^RGI0@ze6alI0sYsSw&E-eCk4z~*{xael zo5}AmQKQCD@)Ip=1+jjkyO1vWKu~$5%*8zQ{q-uGbaGf}#yPF@!;xgYQKej9=zMe_ zSTa|5U6djVH2ZFhz-I3_qZW8>Dj9=Wkn)Uzz+bMSN=`;_Z$EVKA8WOHvmP@=) z^^uEkp7xgH;(`|d-UO6er$Wm>c}&>e%Xbdd1#L2+qR^yadj-pUf!ArM|-1 z%Jq#xOM5?5xpI9KvnQjTzt$m?`O533+5Fi-b^kP9Io+*(YWs;6@}<7*ZW`sPreE@d|FS$I9+%93 zrj{G_69R+hnXf1zTXC_n6C(h;+KDx?`dM)B@v}sq1+0tSa7w#!}-Ee)K&pX3+FP ztv+9=u}6Sfg;|2BmsK)NTTe5;0+Snopu`zJ-< zfsFRH&(|Vie35g{FSuLt3^sxr4TeuIQUJ0?=J4#6m>SB;qu13v2P zpsa$qU4!*qt{fF9N4dI6cQJaE@!>ukiy_N7ka%GMlZhHzHXcP>r5~5lli`CJ8~^-# zs`^LsyuEoD!zGv8{W_~JKMj|gJmY`^moBFoYVe|1%cU!z>1$XBHb^!5+A{fL?Oso> zR8=+GDFa~UZ`?iuuTirFEp^1(F%K&hQW0uIs$PwRi&~rl9lpQn@uPp)m1MWF z*Y2+z^phMJZiTV`+OluwsPe2$@*bd>)_+24!&Xl+v2mt)aXH+1!bb@Dj1CV0Djm~U z=HPZHK5!l#i;*n+NG6L1y}o3U!ZDX><2}d)nX1{~(RK8jN~|>80cAq~Q zb)i4`cq^xCQC=jESt^l9J&E7W+yyY92lalpZ0l3*2{Yok>UW#}?r5y>=%M-@b{aJeIuRyfx;-!@`g6AriNnFP_ zbHZSCx~WXv&}x_4;Q>HmFw(h1^S?O6hB8!&O|t9HGDJ=)eS~3$)EkbsWK0gqZjHOX zS2;NECBiJUmaJuXLJ#a-hNc88r6{L#Zkz#oGM(tG85J#WSUYp6#L67g^RO9rN#}12 zPI3SY?!1`jQDC~LGi6f);#fPgQQK=j<*Z8Kj#*0(e^@+2#2b}r&nmU+cXvj+DvUdQ zvW_{^%sSve*9%Utkl_2>AZ}@g;jD&bxDoF=439%MLymg27}vr;I=@j z=?Mx~$!R*V{$X*^jKxv^C&8b&_$T5w0U$uKH4oO0jxYCU)YBC5+jgw??ME>_mEw~%rF^nQE&Z1QMnRGQkYwOV*t$#Iu zCulb`$pX=86Ig4{FlUNFM8LMl_Qr+F@hQ@vE z2(eMuPMgzeHN(e&H?jcMWjVy3OBm|N8aGGv&-rz=-56;@zR_}R<@v^e*W-5 zceZk+U+pIHPPH$&y>8q6-dKF3_*1{be%!;&pqT&e*!6Lx93 z%y13m)eRNAU|v87Jzeqv>AUc)HlI(_6a^K6N)&nDvB;DUn>xWA+&GUjvNtcx0rld) z*~@)$EQ9XZUh)s+&uTouse^o-+ZnnD)$SsDx=3NL6uuzXe%&v{pegm4pII%T1225m z_Hr0r`nEF6B6|kv80J>pN$Cjng{4#C59=Q!U-7JF^^Ey8PHCe`XeALbe1=Ov+~w>R zsWmeVZ8m*EB$U@;!CIp#)_<&eY~qn6VbuPu0L4xZ5L z_<%));>m|&kyREqO>WPiC>zjYbfl0z(nF2@iVR@dT~3mGb+Al^kz!z=Ukqp?jO>i8 z$2JdooLzXF^ncqVs`Z4gy!*GNT0WgVyNd3k5nB zH*}}9e~OE-A@9KxnR|7w(Z^xFUn(1{EhZXt)jEuk5AxTa#w2c=j{#;L#i3$Qu&jt zFxX=s{l&81!3|TzhZOovvJ=nU^T#%g`{}?n)4gO1!qFr;J3WN^MrD^(TI)&IZt~?} z1X3=4LjQD=&4*COzw0`{47Yw4`aDhnU~h5u_^m$HHgCg-S}LmlOykDsw@-_i0M&nAm#c zCx;~^Dss=2Wj#AzIO24Uwd=?e_e2o`DnKM`96R=PgjIhqlAq;q%Q8V$s>4|d!vRxF zFqaeUb-~82*{HL{57@OJWpLmsNU1}bt%3e&@3m%=nYA^HEp(nzB}%T|1lO7ko3Dv2 z>g6^{p@n1XN+_4RLWp{c+1S;^WcjpwO9Y6SrxI+`lJ0wKG(#QW)MPD_8fRp_?RM0U z)rN;DlSkIv8GXb+aP?C_pvy?~ALJ~?S{g_ zdr7K_Mn7+D`N)4zA#3z&<98BAi?*%7Xy0+E*{W(g1M#n*717EOZV$D`7?uPlVpFoj z#7VPC&S_vbTrZ#?iIVotK_8=awrBRh;_1fp4_9VChcOh_8E}OPtjq|@e66Bk1w5k9 z1oqstA%=NAUfWFg5kQL67h=Qmrvgv+7c|fBL^>bF&N=l&voDJ)#u0xSomh-5?zWMY z5=)OSWtHe{zme?se%TLIvN*Lr)21kuEkfHFjDRhgT(p5y)eGU=FHfKb^BCjJYVRH9WnViH<z4hvB!J%v#y-^ThS;(3;VLMJL;B6RKxuo~n z%>Bko*AnwWAG0)DQQd$H+OrL9Bk&{3|fJr1w2tte;f{qmvJ8<@=!lt zkFUXKP+37JacJsKDJ&!o+A~jGGlb^=L_X14<<%o4gpU~@fvCQ3AK0JhyyBWHq0a5^& zJz={q_jW=T0U|wt&MLL0pmVBH{p)T83i8KNfO>9K3+RdpNn& zYGKC#ZuD5~4cjM8B4P&mNmoh|OmPh)DD268j*KUMEVj5rEF)=qpXNon=8kXlpT@cu*zWoFa{<08 z{MQaeE!#S+_D~rC(4TE9jr|c}rt%(>yrY%=5GcpiiF&xk3e(Wu1~$V>Bo7Z3QwWmu zOT~)KG4k>a1Gb*`_j`O^dq3?-d;Af4`YC)?t$t2rkA6{lHh&#i#&>uXQF=26$8iX0 zTq)^xHlSH?$D3IR9#o+{+4dIQ&= zR=j=16}BQa{MS`xp>W2+-(-N{6A+(G&_lbT5aL8&(&B6`*e{{ zMh@(!*Ym_IG&mOC!mA^V<7vrQ>u8y>!Lk=Td$EMD@|3VnEq4HL-k%Sh0vEW`l0{;v zolDRv=wAG(_L0%O?HYir@N#pWB+2N>)$wpU5IpXbY+lSFxU6d0S!bq_hx~^{*FnCH z^8}IbB=nUbv*|y&ZjwN%BnS*zv1>0h{mUu?4n=H;0ek>xSF2>n!#M(~A1*_euVw?| z7Ga}sC|kRmW*YwJx3}uouFV>R4iU>X>NKNMf0=IA``TJ|SZvk@+je$iVxr!Zn^a$N zg+1m~K9Nm8}L9=6^Lmu~v{l-~q<_8atSqgG15v&dD# zAX&<)hsm?hx>Cl)hquJzn5<}t&e+7klUrjoCD6f&T@FY6IF`;%PVMw;x4REhJ>()$ zUuJ@dnY`Gtn(^#VH7dnbfpv5LnEEd;*HS}WrKB3G(YLIp_J#hn%;+u*8e%TGD(Dt# zV#1W`$vmOuo%vfJ%75E^zLtB{|>_}qAqCOb}`xw04AfrDStU4i3k?IN!80TgrceQ} zIm)Utc6u4AX~7e!Qx+|NmvMJZj~E)pN%$k#+UY3R8Y+e8L)TDzVz?i3_`_uFmDAp1 zX7^I4_zY~)Lf2&U;E$U;3m*kCup&%TQ#ROSF=|~((di>hr4Gdn6p)w?Zg9q;nZMy6 z$GNVO5I{1?_^PZ1h6?|+K(aJ$-=refoNqYPZSHR|j}5$E7kn%6d@I;6Cmu(mricu< z9HCrgSgfhydGbeK20`bs-5@LH$a3UNR9i(KhZ01kf@k+4#VNy5vpHfxL_>qeGMHUz z49Tr_g=y2y)cw0B-XLWzwlM={l=DD(M-T0}gau$WjnHr3ruk0#WMBHa+Gtqde-SEU zkZakzllY5*ye;m#_-XJqU(;yYCN1t*m6|~42{pl;Z=AZ(mj^iz#YSjd7CO$X0^1?H zj{V*uv5Qa!AYw~P_`l|~G-91Hi>Te^zu_DX`BcpTskZ{Lg7||R%~j-v+qV+f=LQ2tWEtU>~4V)t^DcV z<=Fpy$#vXFdA}Wb&)r#?IQ`2%K*ErKzu{l3-JIfRoOqb~H0_^g1xnNXIH{AqYipJI zTUH4%Bx_%Ai5*fXrGXF^n3@u$uOAJ0_;fr@5h2 zJ}P|7*s|;@t|sQ{aQma=zxYzipO8T2FDrxY>YY+q;6Ch&##gBsP_tu)U5?NWvN)@# zL#iGS2@~UG(pY(BbH<~N<;E9vf48IOA|djdLB=@8(xcFr5|aH?Fn--0bp=uWOa0Ug|?AmCVZiczqIA4>=KFzo~JT=EVsM8WSV&L_b<{t6%&$^7A z1~DcL@*p*9fmG@E`PJ_|>$^UAEA~CLG4ei!GEtehQLx~<-y z%gAfhI8*x9vSeIla1RmQq^(-KI_5c?4wJ&PCRSw^u*rDzIQ_sG!8A(oa_wleQD5*~ zMZ|iZb?*(0qG(t_B=T$rP?pl_OomVUt*AkI%seJxd<5oCSM8ELt_O&;ac3W_Zf)jq z5cdR{z?OUhxIqNfAXrt3D{=o8&Z1cuD=aV0FL6MD<9iec%2ioIoe)+iR$y+^q^NrU zi_z@UwK7ti0vE{?swjq6%dcOT^5Ke{N>Hs!pShN^T~``z1d^a-7TJKN4H1mLp5LcCu+qdZI7A^! z6=QqIX_-$BpB<8(616-zU-XRKc~q}cfX%m87BAGxxU%4j&`OFlgm4CX%13@a)5bo)s8=Ad4)&f97l+P6UR$613Sb)J67d-=MBQ@1o|vpEW<@%t4xkA62SU(gs&@(pc zZ9i>oD&RGu=s;^2h6P#uQBAP4OE@Wb-+JvIji!=6+clF5U^+AWLC`E#fWyDO@bxfS)obJ5BsIME7aMD`Hk%r@VzdqzDaUdnW~&h zxrE!x&cQ=iob_W4rgxdcvUxCWA?q6e0j_q>?YBJG*At#R_1LBoBzDBV zhao$?P(AkME3Gc#-z;YqP#KA}-#f*#v!xOx1X0-k?%YU zO4oiAu;TZ7;Y?%a}olKMg*OA6kj8p&+V^k9pqzDZ%On!2B+WXBvJv@WV#x@BqJB+9P zwjixr(NJHw%HBKu`g3&h-kAR%%k=-c>NRA;YnF#BBYT|JxB&OPzpwi9_dMKW3+8?% z7Ni7Vyh0a!^W4V}oOpOA`?AWNahLg}DtL3K$+bj=?_LW7J72SiJ%Qnpc$6z6!)_uO zyK4nUjMe)*NrjC``e_ZG`b?TXG#xI8Kgw7UM%@kyQb45l3LQ!KgVK-;Y~qEwc~Dpoym~nRb-V>^C=4Xx2 zh~buOW|c*qPlIKNe zU)|)x-dKy%&^uE@-V;g~qI=uUa2)Di#RVm&VRSj@ekdWh_pSMIrUp12=S$Zat1vUz z26FQG%G%XMRhDiC0c%=;E9F4rg>@HFLq&mQ;MV6jK>!zRF(nlFX{S{E3RtgQ`3vv; zvmkOOKE3IaZxDoXSzJ4{&_^LISN7hYW8SNBu}R-)NSxZC$@-wUNsQX9A!}g%)^d>) zCQbyjC#n7z=dPHUT91sU0aK31z%n{HwgaevOB#5+$t>oXTUSiW5>h)%QwnG?r3g2m zo66}#{d2|_mHjhkC`jFK8g%(Aset8aHzTmwljdDE)}{i-1Y%-OShP?#>U^aMY@RH7 z)P17%Xda?eEs@}+t}Gy0op>J|@9-K-zz4eV#U12O&^KF5G$$%Z7PH}=zVI8iqI8nU z=fn05m%qGmD0}SD{?Vl^Nm(m6c?$>XH0$`jsWH?vd!`QH5VW@rFyEl9M5|*U5Lm3|R3sl!Uc93V$W-&r6=;m6e6<*e z7n>@1h+wUGKEOK*#Ibil+nz_{4mw+S?%m# zC1a}jyb%03tQk#BvaI7~+$bqW*^Z5^b+*O*SFix2U+2SP+O;Bw!=83ZPYYd?g>JC$e38`0fMbf;mT-F8W~_*{)r zGV}vxIJT4Z`f`VFqIItQ#uGBbsWq4bJY08fp}7ZCajkxa6Z!8>IiI6g@63%iGhmn| zZUT?3OqTzBZ~A*E>VKV8tgq033egcK>4V^Z{NE=OLzZ?&c2cmt9;h0e+A>oeW(4Wi zX2DxjfHWyz#{8sSUtT)(f0Zp8#vvS}ZV`x(5$}v5td;9bF&+dcAKmW!+Tq?5(CMHh$Tm7(8pMtio1ei_5E9bkwM}i?S-* z;x3#Om^>VjoL9Y1S}kXYZu5;2L$Qe5m9pczGKUB*XRQ9-335g~5EjNZBAYC&JJjX! z;?&#iK)C=?^PhT5GoE(6&wDoP$23wo3%uwgd2B^{^4RIsl~@lq{F z+vRjUK#6)Q;Z)auq8psdh-jCUC}JATn8|FYb`#Vq_>P1FZz60Wa;}HYaMCqvLasD! z(BZ#$QN1}(vWz`E4=_V-ET?p_Ju9jl53M+qz86bL%L=M=1SMi|&kf%T;t5xC8n4^( zrka7Vh>KRexf2)J$slkI+j$O9%|wxJUNL|o{&uRAe2Qetp6;Ugj1kARCL@{p5&@6slhmX?0Mzs)JH?}nn$BYLz|GkkQ zdS&%aO<Sywhx1O!?h1En^9#W5NHkG@6Cv2Fl#jgjZaiBkX771YQo&vhbWG_nP^zM zv&wjOe0_b&q9M`)L6lvsUGF5m_ro*evpfCV;srsKH5LvJ+_21imoph4&C-LqSeKvKaCdpan||c!8^fB{4rKH(Srr<PbkEfgBARs7RfZ^1B0Ci}_I^3h zCzaBC3n7R^2cW4pWRuY{TL)UttXjq-TIgMF0vlqWEw|-%ONYL9lp&NlV?=fY;ej<< zZi+OPwegm+QvQ2*D?vREU77en|Jwec9sz?(`d@~z|Nn-kDFV6K4q=DIE+B#SPfvXP zk?h@$;M{JdbKoL&PbD$$DrR3rG>PB{OpGazJtHF0ER?RL1@9=(7`!Yd7!wIvz}p;) zPj@76@4PK=Bc6XQzli{ARN0V{QcyI&-3i8!1qd?4X=IDP)Js7A4TI6p5(9I-PK}FeYZ5KpauX zjj4-9Qu$usvTjhbq+U7GNPT(~Z|T!wb9#K&#TOgvm^3E=UZ}75+klW|J9Gl^sApZnv>>s#g~VV4o|q0 zty;b{3PC}h7jBUihR@phL->F6Cbc|0vnUEAeHZM73#01QhF>xt0W&>>@g~c|9H;u_7^`% z+V^7neuSzF1b_Yo_l4%#bMYI?g@uO>qtm6<^;+@H98?v$p&vQOa&x3h+??6m=qRcz z=!rf_(z?0$w42p~-p7k!FPsKxci21n<@9mV)|&OBy$DYRb5;9T3zZ_8$GvX(J0Xeg z8m~C92o+0DHHF~Fd19qIJ>A~{N9&lTn$HCXB|+XAh)1n-v~kw^Shh~*cFOlbbI;F$ zLJpOmLzda09mnV0lf@Cwv7+;GdBB}Nf1A0d8L)yR=AMZJ#FT_}+(kG=!iGA^J?qO$ zwO!!e=}ypCVaMz7lE!?}EBhnACB=gSMS}62E?lPaJxZDBLj+IknFqQF#k~fu6M>aG z4$xY&-@mByb~I#$pG9&nc>*f&HumLZO_k~nS=oUQ6uwVxHGZG}oIS|jXWXG;g47g)Si>03 zizu;m@E+R1W7FuK!Y^YdR};&6SJ}%Ey!IRGBP8ln z6)|#WJO!Jv)w-D7$~W`{q%o*JU?KP6x@x~%d{@$ z3=N5wDVI#KGE-@@=90*JW0JsphX6h}O1z__9lFDf?{j^%u312=hhea5_}nD>MFdZL zTkuB#cGS37=112X*{8|c!lB{7a}Oxa2cte?U~yPX8XoP(UR>gaVli|7`U0g&G}!rv z(BC8*Z3;Vr1x`y7=tb!aUGDZn1oUCpC>lwP(wCFV3I!?9RGj2^PPk8X<}_}R zC-Jq8Yhp%;ST9Ybr5QvUWtoDLYjR^0CBcyAM>9a?|wMr0TvP z-ah|k@d#0N=ypSkD>G$^$4p?QZOM7w>hdIpY|VK4ntOAhb?+NHVe$Aa;?aj`Ht%G% zv|BnvQN57c{`k9+eT$~eYodsx`;@3foxjk8^VE|Pt)*VmO;vLY?eq(MK>XL^T(o!f z8;3Hd=8P(pelWGR>1Y{)DV^0WN=h58?(|+YMk6e~pA$t46ZssYLCP28Tx;*l4Q4_$ z^qMWC7W-7H5K&SM&r}ki){5Nxa5KTDoaJ(+?^@#n)*teG@xLYnO2Ne4CJZxtXvve3(;j^);Jri)1aP#@h}xQSGg{4O301 z8)-Qs)0|-}=ID9dv3#7#6HC0hRsq>P&_YctZ_wwL#2)KAmq@~5q=V6ZZquQoTaOWj zChB=Rnb5^1_+91B`Q_{0mn8P=A9*WAuP-BkO*GV6%M_(jINm*T&-Zbq85d7ohWxW! zQ-_7mmQQgms!ir1wEwkt{I!K>LtGy0Q&H(3gXLS>~#L^4d*k^W>9Gu@_zwkZsOaI{psLDCMMg-u%3A&yOzt zp-|b|86I(3o0-GeI%;<%>_VF5e4Q?+_twzc{F{OuIh^m)qmy|M(|Yb}+Xy4Jx6+vu zOYTPH;IyzJye{s*MF85wH?h(X-2YnlrWX zM!fJqbo;)=T~N?x{$y%5dh7rD($q{b$@6rco<%H30+~XC4OddH;!4%zr!uQ%7#4!d zn(-REpD+2C{4$+{vt0e6=k%wOc1M|SOU$j#y~JYFz)da@7K=`} z&9P4whbos`@!(+4(!dh9E#4B{TnhKM4R9HckK|Jjtg7o$i1Qo5^wIKxJL;pyo=ojZ z^DeN@#eq1i-uVcJa0DugE^%1j-{#WMbpX|*b6PS;Zu#&w>flO7Ic$NQb`TJsbP7M6 zbsThQbCn{^{cKO=^ByeAB^%Q(Oo5oXW;s=X6~2r)?}gbu?r}Avy%#ruXec^k1$W@7 z)`)YPXRsa_zr%%Ew-z^!oAjbXiJA+Ht(OKd*83byf(g%(+oV4fGlSyVNB z2epORaieL}JflZrqb-U5u`8{7C_2Yz<;@-Jo`PsxF+KbG3JYyTv4JJNN9}$$vL7kTeJ!zM#n~n{?Y{ugrc5z_5+d3PSLh|&0dwx3W$W)k@)sKW zj}L3hAh%U+Ryg?#PmmBWFfdka2M9afpG$g3z0eD{PY-5`6gE5`2p`uMfZlKY_|~UB zEJF8-`b_PQ*@ZbWLT~KkGhx)BM6LP8zE#9oUdItXO{1=mQR z-)|9+Lhjr_S&z=%ql%QK(+m^kOUld^V~CnD0mk)D#Quwz?@ zlQ<>DVCYtZFcA$v5$oa7u*-E-lz3{L@^X2K*WjqO-qc@sfLK$vHos2gbkK zuD)0?x9_4Sx;->%rc2RZc_sYO33rt*@J{pTLZSKE#uRiuPKHL#;A3E#JCJ<$f}qiP z%lZJffUOF&&`BUkt2Iwqi(YhKYf8j7c6fPxNKMsad8u-;?rQY?vRE^%c%}sfH6$j) zJ%oDXU^RBKkrC8zgU)p;*DyGrNNl52=Ga%E);SyK?le#(FT1KD^Q+m`h`q!3i88Gi zpkPuM4OCXqFuwSBgpO?r=-C+lNF>SV90zd#Jx((kM1myFqyKCxrhSbY(|3GGxCeEZ zZ~zL_oxYwY>uZFxZ>e+U+Q{<`J3;p(O`8uOZFX#7 z%ER%<#=DA!d0ZQ5T_PhL?Rs$4Z%Q9v+Od-SGAkv;OuDt);erbjr!!;##hZhK+@2+v z073_n*yyx;V@xRjj^r|Js^~snqKuOhk>M}eXvCmFxxYH*PybR*nVBsM#HX%Id}HHs!v}36?b))1xE^i zzj7_jBIPMS`C%AwSwVT1XHx2etif9>A4gk1qCP-Jch~o>CrfXEqCj(dezghB8;`)h)Mliyhyi?d>7*HZ+#mewRQWT~arV zq=}59vUvpulYLr9>!56gREZYc*W3(J#L(n0nAcDCZ*BvA#ZcC$Z2(_!;?~#w9P~ z>$E5-p>%od!sJ;-?lF)hMSeV>U0=_jP7zJdW4Yn;C^Q%!os zYMnVY&`KHNymu_LK3+9ef=y-pF$XG?9PcK=W-#9g;uge8G+0D zJwTi!P0|5t5^ttJWwDjR&?j04x@+gxACDS}24j_s2`?ciHu))9tB>D{B=S<5hNUPP z!&>(H1RuxYxmZ?F;kB-6zvRm3mHejquwj1(H|sgmo^)Si)-eh z+GOeICx0z1yMh$ibdwo2Z1U>)P6RzyTB&*Utsz z5cYpJ$-%ui%5-rk9-BQ}wR9He9+1^FO?ODSTxx$04{uj*RYq#-` zH6tNXs$_E6o0(6bP~9~K>g_dQm-y4nMzyt}B_8ok`|o7^=ZD~5!Yz5ai>ix|{#D+u zK}J0mm+5^h9b{P_j3wIUrZ^Od(aSTqxpUQ8lz7SIkPW)_P72bVzS;;37PcQ)+aCVJ zIDPv}i9t=xXIwvE;kc>DL-6NGspE{!M8CxOX9Ycs? z-Wx&5*}Zp*H8y?lAi@aJGPKrHvrI#dF=VGBg?1)_P7F{Q34&FX#+@xQv}6*e(`kEe zs3=wg*}dro88WJ}W<1GsZ%ofERt|&CE6f{O&6gg^CKVKMIb?;7IHfXvmE&a-c@6gW zS$B8VygyoJGjkt4Yu|YvCuViKWoD7MD@|R^WvrhSj$)UabyR;Q>9$$&oPMob<6OUm zI9x)(hZllD3u^w=&^V2yytu4;!w|f1d1a|FiPSoJU_H^T*9>i{8-45yCaT11NQZ6E zV|<0b`T-iI?z+Vz=LGl7XT)kUmUA*43-OM*RtE))WPDDdW68#6=vM5YcPwj6#`6u` zok5SW9{Co+~C)@}pZsq4AAo(zxL9%C9pvH`tj&r&e6$U?d}o?P&J9h*LM zj`O{{D{slSB%*pN73Pw2`5rTYHo2F5QX`o$LH&-vYuG^Po zjT&^D5aqH~LZZR!$T9p48%6AwQM>UMcx)h8k`jg37Bx>h3Z{Cs()#s8(_5lCVw3m% zVl&{ZuRvGUnrX;@JBeIX-=5}uZo%s;Kzh}=vx$Al)EO+C@+Iy1&ehT+yB%2 z)pHIV2G&|Y{yA-HLs*P<;-G`y-r!iaKprs=AU(84O z{-QsP2H?yauYcXrni_tqWb6eZmdr76rbSn$0X8AJ+PyAFdc{}j6_6`!R8`qxyc-y> z#T*+dc$|dM zBIDaFbw$BU;^mgAz%H!ML16LDSV*ee}5E>i(6jW%_v&=Wa+~;<=8i_?ECOXc!$lSQTq^w&ci0ucr%MRO_+WQ-btbXls+ zJL3>n79@?095ujSZ{tZu4R)fM;0c;Ds}c_;sWcS7jL z82>Kyh#XGZeo}EcRO^zH!>;?BH@zp7(#6yF^ccY4_!hQlsteSjmWp!r@Nb-*GP+`+ zx8NM|9#8D!^c{AQgZZ=f(KghA6mq+^jH~Ac-wRg#Sl?)`&7_4kMQ@9=(KiLk8c#O5 zy&!-TsEC=)Y57~r+o7?bj?{|%xWSw#IluHBx^;cRo{y9x@lv8N;{wpFz-qdT2=!M# z<)1&S^y3f9DqjqHN>!`nH6h-%qKna@blY+;Exy_x5uQNS6B65cAjF=o{~}d zwOp86RR0#TwkVV}6=Rr{Y1h()>ui#3dH1#=$T|}(kOHo5EY?H)bQGX8z5ax;k06(U z%wf2XT=9!GFD!D}Vg@{+T2vF0OJc6BqZ@}-ff~<@?gZssXX1*v22;AZnGm;{kS-bv zH&)AS$D_B3+@70&!w@-NesCKl7%LoRw~00zmDVOYdI)n{V0seJiNlg#LF5p70Joe* zNBi1hB1UghKm9D;wbrba0r53{-adlCyzp71gHG<-EalEEt2tcHV$(EMCBAv#(<6fz3Yw(WdJ6 zvvuONfNrB*!U$c4!9bv{&(5OF>5_h{uu$@EF6E-!Ii6f3wP$tCGr>dS_jg75?^xzP z{M2a<&3h=IQ!yaoYnd;st-anIh?sYKy4mF~7D<5n>C>N#4DZv$svNPX&}PkckEg{d z{jDXsV(XYb4)gg=-bN{T*NksjvXY6+T`w-7V^9{*8?nQyfS%54K+qbebkM_ABee36kEBJ#Yu^HOIu8K}HK zAv-{DbNex?vV^0_!0(Z071JmZA%V4R6b338l@s>K^c=Xjqhsf)6d@IBe=_wc{b;kaO3FVh9HwGD}m@P9XHrMo-A7en1+c6&-)p2Ia+$Gc#ZeF zt|1i=%iu(T<$8cW)NWSnFj*0%QeFKa^ILed?$#IHc3|RUH&X%-15?KVnd(5S;*6pA zq0yv)3((+%dTaMX@Rj0b`cal?tZK$iYBL~dego&Y9NiF5zlG>|Q>vUcNX1qh@0n;K zXuUwQruyg#`A$6FLZJZCLC=K`c7b0juJ2A3uner zY%8Ku;_&wdK6;Ug*FP?ztlG(*_sC zn$jIp%ETy{rR7hM8-BBH8b7RsQ8L?g^EOm1WhJTpk$3_fIDY7UH=;zmK?@CP{60MB z)o%_|c;qqGH%)wl`)(z*r0d^g;$`QLNbu1pD@YC1z4wlzh0t|P zAp<}~F2bbBX>FvK(euQq?9*I}I=i(-ijai=x2&PUf}So z7Ay8)tk-l|W`1B<>=(Jt@v)TkPs(oY#$zA*SF?|BBV zh;fCa41fOrCfPp#P3^D0h|;g{G?t;ahYg_Y4U}cmfOMVSw1%0}N=` z*w_N&q6F?2>lU<2rfRQ1D@kMfutYAxi*j@$8{Qz#h4KUK2D=zqYU)Dg3nMlpt54Ik z{|0qB(^Ps%JN~on7R(-N$?JN};p;Fv?Al zAWl0D)Oca=yh$7yz4S+5Eyk9%YnQ(;mIZhIz|?0eszKLLVBSe;M?ailaG1`2Hc>i6 z#hBDD=`wCoh|^w#{Y9(aCnKm&sHYl44n>%n(|2PP?^#FO%;~ z#(X~Im2(602oiZ<+wq^SE*s?!l)PR#Xfop0QsDpLxW!{9hjHFPotPN`H@7r{a#O*g=O>KB+P~nS^w}1j8c5h(fG>PJE>j z6x@S36vc6KwZA)xi$XE)3cEORB{JxWgSf9}{yM;cUdvG$nC&sr0U0oJbyFU_{giCqiyK^rDrYh4lA z7|1bsfbmi>@=|$n=Qr#b^j1&M??TzDQsnYv@sp^udq5$27b9kbnYiyh4WL3guVp|~ zm(%6odF@*6tbakrf#!Np>BF~Y%_|%v^(L3qy)CVy51`1hk`+*!0FzwefD%|F88v>7 z8P>V7hhtm+u_V=+1cYYqj(6{lcg5-Nvw5^AEZJ9SVghW>MK*os+Q$p*+VIRBs4Fgn z=s(fs^1KO|HC5(2rtKLG<|g{&nu#i|g)-k~&vYtc>H5pZoe=XW{Hu zW(zN!dD763;*n?0yBfN-Oqk<3yH(E7sn>V|a1%-kSDFy`3+Q;4@2L4p<-Xr4!8rQQ zf}Kp%tOJ#j>>XtL1SRvY27P_=gQWY~Gj~ND89-N-H)LY#ooy~%)Nxex8Sl{8qdwdK zCSo%BLOn6Far|ksodfYCZE)h*qDHZ-HNx|;$T9pGM(oZQG%3~iL+BlbJI0;||9C9Z zZ;Vf{Ym3y_c}J_PhqA=@RTc15itnAn9%%Kt{j7g-CThB^G~b*&Co^B}t%kdT^#7bOu4!`~jfCPJl$n`_u(n7M*U?ceq0f7FBjQC2j=5o+(>uET6oKnVKM z($eo+!NdU?c$k=VmMis*joi8v!z$XYdzHF9+dDgSw6u>(myd6+Hxt%PdsyxHQV4(P z!)QcA%sM_p2E)U{X0~>Z@M7QHP86oLwF^oAR)>TUY!Jc;(E_YtOCWG8u5PWEyvbZm zl~_@7*5Pjv#(q+et}8nrvb|oE`))3Mqpzyaei~(XbGc=tT$R;k2=WlNQ=Vs*ZKxEs z+%U;$AhRJ1<{F1k^sc`OKS4XdQ*B~R6IyH7p7)!NXKf>SM#)p$1c(bNc;*kDN)a&{ z9am2N3<_{TC*CY+_mIT)5bZj=P2<4LmMCe1P`x;OABXzXdOkxJb8HzQOE>@Zb7ps? zyC`6WQ!N%P3~Y|Cd%3FU3fxno4-!ys=3R6Yi#cWSPy}}&>FbdE1YO)ioJSk!MoqJh zQf<7@Kmm#`{nxI*rWbQW>%nkiG2TgYn^gYw$M1HAEjhY9@7X8$&N1b7O8%5Ui(%V3 zWpz}GMg07Sv$+B~$#C><$!LPjZ!P#@_X5Uc@Z$z&UCR+-VP^*qHv=ct7pd5M-jF*= zcMBad`sdtoOig``+{GH3>q*fUW22D6XTf{W|y?z7{OVkr)TT>{qBkuVEB!Eyi#u`>Z5o$V3uE?13b)+ z=kZJJJyQ$ZK0*FApra+|(8je_62V)IX4$M~;dE4Ax~J=4fFDw(g8y#bQ$)S={_2)i z5WmI;AA*tZJJcH)z5-D(IrOqNL??`34X6meZqV+LN@JbIA@dR$hM@4APpupcsN!2` z7CbhvpqyyLb4wP|7_{8)4ULH+{SmJO-@VM~aU=J40rRL7E|fygG@DVO*Yu}=CA#Y~ zXqw06OJlE3FCuDq@Gu|ITDdE49+ZAD#wS!qX=$&BNe4*=@yK&#O!m@Z z7AX}(Q#gZtOV4b|$XL$E;{&DbZzsk6P6Ga0*f%ZSOo18nySNt3Q8`z4^Dzd z{jONaF*zOLmGGz>d5)9|I;yaV7r93!k}+FC`+Rddc8$h1aqBwT5A#UN{wpn|L#dh3 zy5?p^H)*14XOU(!X}C<9aMRex!I$-ZSOGTJmtb?{^TH2dC2;N1*nS9hh?xZejj5%~ zsBbP~Wc$e#1NDy=tBp31Ca>+JVrV#hRu{@&G7_Mo@kMka$1>6@6_h`G=NV49O4-vC zqfaN$Y`fau1BV=<=YD#fEn7OS2oh$>tT)+>L<3VLU0OVuxggMs{?N?@(GivO zlZbYpPDFkf&?ZAElhum^yoJNMG%Z_J#wPy5W6DVLUe=Idn0+mOjJselA~h8p0zexU$g8k+8(sQJ}WLR zE>2u!Si`#83D_vdo|pZn(DN6ODuQJD9@vu~XBs#R>mTm+KL1`;wYj5U8%_}VWZm|Wj8NO^8 zVMNxO6(Zcy5jsqK<1?_}+UoQo*WWUtUZ*FUy3u98Ldc>kI|&u$R_j8pDdH3M8_%VT z8nxB6>$V&EHqKf;P?(AM{;IijU9P?7<-_UETLf{RC5!7Mye+c{Cr7t^+hj_(&zsc7ZU}HwMoM5=S=K!*(J4x!J#_0Z@oYU7a5TMH_#YUX3ZOs(S>_H5w>Bit0Cq4?1C zggy;L$q01h3K<6?=g#NIicQlRN9wI${Ji06fT6CQ4M_b7$)%`sOl^$m-e;s=fIc&F z66JrN17x!`{^DEmx_K}vR&m173>2G4=t4b`k*!=N=s3Sen%JSd?mifjS)WYc8%b{2 z`jwQP#u-yx@?;8^oRYEq142I25P@2x8K1Fb;-$X$GQD^y3dd;GT1p74Z0IxbuIZG2 z7%YGbk##&m7DV%Q%NR=$e>C7NV||g{_R~!@r{g{QCUBYpRa9;&*^~NcTmJ0@f4X< z=G&Gq6keHl4M_#7x|tQrj!Zv1qXMVR#2DE}-4btvl?NhX$J`dRu?9KYM# zlsN2Qm3)4F9CgdxFECyzJ2AIybi4a7YIR}Bz$vljnOI^%z}bAoHGB3ttJK-#7Tjj_ zaPS*(Wsmfa@;{Kks}iP3CwkE?nTi>?qvDtU;|T`POfn*%&8#q1YcJ+ph}E#(hjSl@9z z1p8=E!*v^So5V$`w|qGnUsamT`27yifTc~k^!CAD^ysNB+r{fDjpGQM#d{PlZU{I? z?+vASdz@$e9Up;s@Dm*7z@B(Ro_^_YMsm44+nF;8vZtXwER{#gfv3dYCWfn&>0!K-MD)#r7cUHZXElPxI?DAOcQYzX`<_B|_b7R^J$5d<`Dd{Gsg}Yj@l@={1UubfNFSvIcSJ+S3H^+T z#%CI#dlVlC{H+o3>PvrddZD-|-qfRloshxPZCJ0S^$DA&oaLJL{_fY|Olprnj!{-5 z-!ySoHdi}C7nvR8i)6u;hOX{J8HCXSuV;Cek3CqbYxbIF))n(qrH1$8g|T$(SN3=4 z<*?^g{xO$BW0i#QRpFMnmMO{*M#>NAaQ-Hary=>~$@HedV>dxZcUqUNiWHTr`)9<% z^yStwv<#cx?qBR;7%33#+JyL<<&Xy~%$LufV^YXT%L;$f?c#}uc$SrR-AGCfR}yVz ztU2drxHX)OvS`eIe9%=j=cx@!)Q+N+`!BTZtp#lnz68&5F$z>cTeka~T>mrD`X6+{ zzkbK73{IZ$DPN!*S#Gp@#>Gip`qNdkPU}yo06p(!WQ4|oyY`)5?$2%xCJxNi6&Asm zJekA%5lK)`u%h)Ep+g+{-`})K2he`VSh#I>FrfxW|F+IF7mh^^zwyUEqY(M36Li$> z%2yU~woNuR`oZnXSCsQDhMzQJG-VJyr$I-7BxR>ot3D(&Ga9V|(^K{BFtM}FE4{x-Nt7nzfEK(vl%S-~Y!-G&F1x>p-sKIPAAViayp5mH09@cr zrHG1~B87iTxSy`~U1qGj2wktk=Nq15cO2Y?adINo_U8+{{9v`-r#Xu*fzTX2jmGVG zvZ+c~)%lUP_q*2-m&Z692^Dd68{b6nC+Zz~(wtSIcH{fG6nzW|J+GwqT%J|~Aq`LL zc0jFYhNC-TPLG56eUpdOhMD=Jk@>lRO1~{~iCL-7gy@gT5JHEMQ=HD2Y$YWQPUbD) zNzpqiteUXi_^DAH8*a%Q{Y0&vAB{nQFJLnkhQzfiw>(Al20O$Jf{w2E%IM>HSH^q% z7=Ab`?%cveNIF}6SmoJPEW$$o`Dlbb84epBT+HR(E*sK#0MPvk%Pw?m!*XP7up&@} zv)sK=z;!kLqT%3|k9PU59#Z$Mn^-XGznK=|WEgNDY09o251Mh61a%sHJR*X1Wr2CE zub6z%-I-l+E+*K(JgK(X-ry|YYh4xKlo=`NvbuhJsv$Zf;!1@CJM5VP7~JXFO%|@% zeQxLA*qF^#*6uZEm9z8Z32s?m(R(iyLBDWf)%%38DbH%bcz$**65+raT~sGFAB0hv zS(H~ip-i4WT3F3zAmaA8=H}|We=B22NAhU1^C!xTtJR9HEn%WG6gNM^BW3a<_V>wl z#Pphoj`eC#%X3TzS7OPdjx9v2V+T!8k#m=5l13gbGx7$EdD`8rpiV=5Dti4ZIjTp6 zL{2DPh^`NXvLpTnJ+;*^12r}sx3hjOd`XHw91)n!Lk;H7EcviXhvCmxT!{^>3pmTQ zyKD!*;>GB=3;I70MUHP%GVO#$Q>$)Z$98GN{>*io7^Pf;ODzX3M6oYIG4KI0tpx5o zfAYmX>FVT@@PLx+y4KU6ae}ui{2Q7E@bSe#r9Uv4K@nSO;OpWb z^*`Jmbn#DB*)9z-$4%2+D9ed>RRP04>M)aNw+GRcY9>$=cV$OOA0eYtSRQ&J^J6;V z1!8D>?gY<38)7}UcK(on((ArMq!N`#T7PBzDP6=syZA&$lapCq{oSG`(5*F}Nh&0{ zeX9Q;{`LiI`e#b!K~%=PJx=MiyY*i%0c!c85)3hC)z46b{AuZhPY!alt2kqhp6SNu z%eva}r)P-YJF}Gvo7}ZiUrU+f7ZqarcnZ7F|Dcw*tIvA|EWQLep zBVwO_v3;*h^#ogaQo%Uds~4bA?&ARc_c!6GKT9l?r{DL+ck(3Bo_$N)8hML#T~_Gx zu`?Show*G6#hGVA@lL8yrPa@nes$qGU2NOar$NMo@pd!oB#!cDe*>>@kgNxXyC6aI ztB5+zThM8-T5prt>HK)1Z4;xNDpoSoRm*;lqU}1fCAqMc_5(x4Cy(9fF@}kpkm79P zAGw8qJfwQZm$LhH4xplIW5FNWRERzmtUgc1R>ueXyO5i2L(di}_S$Y=}X zo%|VT0q;)yY8RPOET?j1?z@R+?NPDc;@wl3P;skT`6PNM#=wfyI6KGK*;B7+t=JnWyK(<( z?=h(PjP6v_w7Ygu5-b;Dw1*E^=;Ej-AHIciF;%?1Nr3~N2HgiQ`B|GKDawXK5tlC5WaJHgS zv5+zrVO^Nf9?1~>_bdQG&t%-C)!_wgcLoZIKx^u;+9>w&$Eh zV37#n7!`qv>5A2O#pRIukrwBqi(oc>^pn3;f&ieiehh~;Yg^;dsX&nN1O+mE`18;+^IHa&{* zY}7&Ccf{~{+55k&JI|65NN>KM9L!yU>I)f%=8;+c2Fk@Hvu znf1kmp9vQ2q?qEq_sTkBkshOJBxlg}OBQ_U-2yk_3-V*oa+$5o?@cl>-zJ0_kA!Ya zr-#LYpDHPDO>b;@UDp`f+J4ZfP%B2iUEWb9;`$>ES8{tKnQLekfu*iC>NcwdT2q*_ z!N8yq;yvSxXczz~r#8sx4*Ow+6@|TA$roL_7~dUP1st~2XA&=3d2`hV0Oe0Su-Zvf zI9=E6KhLGj7_2>>w-x)^>}5% zK^C7<)YLE_p@!_r9}}x`OR$D$K0;Nt;i2R?_iWmB`0hQ(^Z_pAUkVNy?WB%AbDLe> zcN`JJe}|?2Yar0~UcqPo&RWQlNp-)wo3diWU4J^PYI_W4vcCI^v7xI??nFcb#?XRnT`45E*rB!0^BBX`3oqH0-0Du}QJpD@i%G7%?1lLMs4Pp~*9Mh2kKn ztx+b$h11Us)9E0e-`1uwrOvW$82{7UOM9gwz~y_*;k>^o)__{Bows}(J1Lq37gH>m z*J|e`I0GnwpKWxO?swaY=Y=U7@v_ce?*(M@(MEci&LZ`IF@oS;YK8wOoarBqRF0&b zIjY<(fy7GIp{&{yw*?wi@$1_g@gQx;MTuqYpr%h9E;iz&ogiEYL}fcvd|)^oV&W{R zybHNea?xW4P-veab!j53Yk&8+4JrC>ut)fm<+_l}wEtJ2* z;^C9ml~w=~em>0=1@EivWkJG2Y|m?*q41|9IH7u$jiAJ9H5y2B#d(mNN>hAy=oN0C z8y;wVfOST0Nh0cll3k9QKt7fgi&+6@!N9J=d3nJqIY+uKx2KorGykarwkMF z<~+RoCV_>3EBO*_o7`>yEl?P`G6yW9asBZu7$$z9{@4(O$jyA5jDbcYYwh^jTJlj4 zyX?+I`m;wJPeGfzwV{{fezic>?oEWT*9uToVZbZg+7Nr4 zi)*xTV`v_4LVzJB>vLfWxQXC8>BYL~>ycoTW=?QT`GupkNzm8~Qet_AQw3<6*d;T; zyd>kw2iy;v&2Lv$akadlxOKWgvfNbz8UJnqPSSl2czZz^+smn-%Xs*%b#cVHAJbOY zksnrEN%U7=1-jKFO2!5_1U!+86cPB%H_8Ie#iZ?GyP+}(+}fbD7HmGCx#J8DfscsE z`~DplL8H?#mi#Ot*rpd}^54YH|K)`EIw=UxStZe2K`Uey77nF6iv%HE*-^K-KL|RV z;t;P=N?wpf@$0d2v{MKN}BPM9GQni z^f-7Y1=AVN&pkgfP(toBSoy#yO1e=nGQFHLSY8ap`tvyC+eI<=R0zVs;P5sMQ9}^W$;sI_^VeUao;Sr6f*pSZxjF9G~EGsAl z%H0Rbc%u*9-=cjTl;>36JaFJ@*)h%R)Fdy$qqj0ZUO~7MDta66(Wg^ye(|H{?^_i~ z;N#?%5rr^bl?|W~2=RgXu_tss2o;W?VMLENPJ|>GY^lrn!NUKQ_DMw#7H^uIP zdP8ukU49y;wm3x@+eyhbaBs^ws>L=~Fi8zr)p;uKV06N^_BFawP;hIo;Yw#pi9W@_ z(N`VUH66H-_C$J0m#Y(zKsYVEhTO4kJNNd!cSz(9wh0vvm2&0|&pmA)bHp3-el{Ifhrn4c3p9;X0V!O>m68&X6io*FW#OQNysO zh~kt#M;n#0#wjv1^mGs*Fy|6=;W2o`E$ZU2yh!Z7PK|GvBDlq`P28CPIpyzz_CF{r z|G0eK*%2DulU`(MqP^>oKR(?)zr45?2}cnL1VkGe8Y-74mEGT;Iq7PYw}J)FiuUKw ziiIPGijH^Qj>YHhVx0)L1T~ahjOzzok`QJpbJ5c!K)CNZ zSVhoXM}##Qh{GM!@#TAyXjdmR-*~n*N#q!~@NLIkPrbE$4ZBDucIM9eihw|wTE1#q zYOuZeTc!Pnl(Fkp^E8C&GWcD#;+=>McO9TIeaV#-T(X{gWT6M;=h~0D^7+_Al_w9F z^v&$UN@BmQZb%Cb$`1Vr&vL9^uiz4P&OgG4Gk~Wh7e%wSRTjOH`P!015aZ;|gupj@ zk48b2cGvRlJYP@Im>>5Bg>t$>wPIy8neeNIhcP?A^9E0Ep~=(i!C?hLh}?aW3T|PY z$#Lv2tOVPP%)j*7E4pmovmn&HJ_eRhikj8|7`wPF9*K&g8LRJ08HMG5#zbHZ6heo8O}6W}CF^ z6+8BcY$9tHA#83uLN@tcM$>(p`68l1m) zq>C-8AnE3fc#>7ExP}6H^yNL^BI7CRyoUmt-S`-n6YI%1yEoq~u+t=H7Z=+f4H1N5 zzOJ7`@{_jOUSy(yC>u7VIgF-sG^L2T0=Lu{@512~odT=)T*9UK|uBKgYfy)NPiJRWzyzY7+#kH2J z7|H`-gbsF&|AiGZp%HE)?niGU&xvRKM$@w3eJG+NnMQT^EDCKj?gnqy+1cFOzYYbx z>5WTpi*F{Zen#UC_ipRe95IVw*^=B`@`3@gO9&75gVsfG_!#ScBL|F>2{v3_zCf({eL*Wxm_hg7Y zetT-_J343+d8+HwByMV6IAbD?^V{=4LO}DjLG8D0Zk=vU=1rMc(lTw};&1li8^9Ejko`H^n*12|Ci_j%~dQ1XRoQEE1| z%Tobe(^!iCGv)XHOWpj298(g0t=F?(QSdMZ<2rF9W|xUuJf!V~(yrTj7btKi=Gx9c z)XkW_o}MOYqlcshW^r*b3K74{Nm&E<<1qo~Wm-}j8~pDVUNofdcPWEQ9z@x{+Lyld zUWU!zTc*Z@+i?j<_*A$l3RQTRCvCGg*bOtoK+;NnBFu-~bL=skQiqT23%(1x4{T!T zXw;FKTe_wii-`JMKTaT^Ht0yI z@*r%2gVL-m0#IYvcdCvjyoGiICd2fJo`1gRW$!nJ^f`S5NypZo5rc)cmY`#uMLI3_ z&uue2iM|(*esqMiHAZJd1y9T<*7-v9l|x;*c3YoG2RPH;z;G6e$U+{jps2VHOkLV{r=nD(qHT; zXI4s5tS<`tHQ+Z8?;nT4i;2~+<$GTr#nSOBbOhlsmo*^(D@682n8``w6GZs+l$rJ8 zFq58hK^Brs@H2m}5!$=b0A;{o85qou@tKRXF1q|RFE1z}9{PMdpRn5E)!d(m4!H5T^Hpw&t z7Pf1*h-7sciE1x(uyW`|DQQ7F!>Sj^piU2MJ_ZCoe}}-mqfPD(SM<;8jn0$a7bXse znEM0q(nNUAkI0@Ijn2#_+BTKlb(*;EayGuQ2LReX3tA>}DVHHbms1jAx9Z1b@ zZR}phm9j^XK{6HWtObLb){bx|_;`4URuh|caS{pK90Xz@=Lg|N+v}pQuNcl_*D0K% zL%(}gVk!<0k?EfSi%o-)8i_I1D@geb-z-dm$mNWav}IRm7c?B678yTpCtr9zhg5b* zg>PbuLi;R5le(devuuSED-;<=M1SG&YT;0sot*m&15dR}k-)|CQ4>0c2zEE13#m&~ zsxv+ZCV0>19O>s)j%oOgL~Irr_LgYV9UK&gPz!vxug{|I!_GN6QKxz>M%tH**Lces zHk2_iM(m?uIy>S<^r~Mb2$tt};bfI6&U<1KkLa1Y9HsgJv>Nt($~6I?8LzE(ue>`4 z{CKzTV)Be@4$I@8dBdk1B75eo_;fDRFn%;X3mPCqx{et#hd4Z2@9X5>1HBj`+}H(> zb|K>rq`y>6Epw`L8q`|~j0$dudX_yitdRYrF`G^nl|;8$84&}%vSPA%wt}JLHa1E# z&qwu{XfDeyDIFmDe0Fd+>d%}u+SGIqb}kGmsTER^_xf3CTGov>3Wa;)rarnf$TdlO zm9^3lJlRK6%5JLYeslfX1S(9YejEJB=24 zj59p%lZ?~1%z3K;HQNX2?&B{+h=}-}pCo?%yt&4)#&pk!_`LEBVynm(swF>&KyLZl zf-;3IH(oSA)qY`)1(2fbC>)_t0FXv+_NbHY<}@G6pf{>Xa*{|HT4tBafLm(L=jhIx z5{Ub}_jC~d{OmSGtJ@&z4LS`D34e!=_~W|elx@BpTRg>#AW-i zN5z1RS@->*`>6gHhW4*AiVE844a(W0y9&WL3q+R)1p$GC?W5#9J2-smOwNEI%&E{# ztb5{#&=Wr?p@$#iMf%y!P}v6OcSy1$L?CxnU1$rQvf-fTK@tj=spDg_us**A#2!dR zQOx?Aw8mF=*>Fyw4)`zFnTFhc-5wkaWCNK3142+g*IjZE$OiWBj*C+`XQP0Q&ywia z%H{grX=y9*ES{cD=?14v>>0B%9yDkuZs+1a*#4>n_|Tk4Fi2ERYI4drvA z6UZlnih_A7XGgMxZKsgcnX$lziqL?fLLS_ywwt zTRF{G`2lT-NDd+CR9Ie3TDrX3ysUd+R+0+CRJ3EMt__eHSUFrewrMB0b1c35om)muc@cb8vRbZ7bT zM)0bz8gZ=Sf=7TPKRP2A(}po@7j-%%EDXVBV~8H(1+R(AUb}${R849|i8PA0FDk1d zZ@8AA7?W%~L|?o4$1mk;W<|A1u9Z!x*Egn&@yfgo;-c$o1o(>6C@%?h<}Nm@bK3zQ zl>z%ebqdshvGBX1@;ae*+J@}>E018N^Dtm84n}@?7xpIMGgCucv)#UC6QjLw?wW~? zq)6v-HP-x9$Du9ZPDS?E*xeYXb3)kKz)uLtMT@4{ShLw zoGq1{TwK@_qS?>&TD|Dl8EAZYZ7n_Y%q=oW?JIKHR=m#Iw;_}3`~tZehtS+)kI2qI z@REnC44buYRsyk9ZA~q|oNEv|h@a{nVGRQPSe#LbH}fkgzPygoAg!nT)>mXTLj2S( zo223ca*V>0v@2G03Z;V=;-|fQ!2a`hn^_iMrXV3vJ4jwp^j{U`KbDWb9}2fngQwNB z3@#V_L9qSy0JZa|DhN}N3kSs9J`upWzu4%o+mjGPGTrdFk@BkI2;{qHwI5Anp^Tvc z4=&>7UfkHoxOsyf=>LhZ`~ z%Rg=*+pb0U?24GJ5#A@j%T zq;Qx%QXwLfHox%T-0OR@_hm)p7=sB*j$KH*V?kz)7#)-YmHp9;ku%F%IdiiG8ZSKiZ)+5| zUly0-J3@Hf%UOJH%!{$*T2!Jy_>s2inuI({C|%C97*_d3J>oHDnJzZ}q} z&In}2K^~H<9@ST(>CCx=wBM}LbL&zD7 zW-2wUCNJAVXGG;(G35MPB+R$mli-k9;uz*z8j;sDz~iazKzGzj^7YF~O3GgpG2xb? zdOEJ)Ysf2wKZmDQ<^3@+02~k9`3c8aMykd)55e$>KOCU5n}^-`dA9q;pDZ^^*adM< z2gs9{AztkDO6&zmN=_rUmSTa`?Fp91Lrr)AcJsC{BZhu>LIrWl;mU-I(Zv4qziONj zRepK&JF-3a=Dz4FEq7sC8_kOWxD(Bn9oEuPD7_B`0}#Y83UopXh&N|WDR4mII{JjR zx2OZCAu@&u%LbNY7L(^e^)NIq5@6I^C8TZpfDyT>hctI4$-0;cEa-}G*f85Pu;1Z( zzbBd(&Po~$r`_}H#C*3D!V~P@_^OIsE3mxp`~_>Q9wdDbe&^C6Xt0u)(9RnsHcVg| zs-T!}hkA1%PlvamJgB~~in~BivYf0jxZad2q8c^;8VG9iy1Y#WXm_16%P=eZvf?4t zmc7zk=#uKJ60)81H49Hlcbpx2-% z9~LE`!Qj+o=^QasQ}zJPA^$a$&QZ+RId#%r0dT5%Ii^T3Y3Dvd%1__;#Z}5gq{ON6 z(>wVl@KZVYzwjlVY zoULoKD#{-FwHdS2he}Ylz4tp}%|rs}A!v92nOzNz?r!F~<+1m1Rlhh5NFfur@(sCaWUTZ(u4xba5hK9zwT?~lTl*#r zQ3jE(9w+(yPEZIOAA()ZmZ`!T@BHPitST=vypa}uaIOa%n&>NqB!X|p74#$|1Njh@ zIJKw*=VDvdT1K>{UccW!CH^i;O<}{p&a3g4D~yTc970OODO)a+MBEwpUJr>9uD|Z0 z^6NpQ4AU{6*Kw2is#vFlxXHHPU+itiyRZ@KdZ;PRQle($z{BwP_v86loTJoRLb_wFSH1!e*h>?9{5(c>gZ! z)eM)PAN9D&HdOUQLJB%nb0_p-|45R3GPNm=F!cz$$7*=(C5%JQc4Z^e zX4sJ{Vjyt3NU6PzZ_Ww&9nxxn^ow9HqqIK9Llo&Udp(n;Ly%yygt55hHc#8bU>7=K z($^nf9gS`vpxA}{dVT}5D%raKZs(3a+N7HoOE8cWB4O*HHJB-q)>h)cS=*)hJ~h2y z?0XPr556%4%msfFrokfYDNjwBuKqZqsdghJ7}1P@zY_gP==vL#Zz{i2Z;m#&kDZrx zhwO(vEUm?YwyLO$9m<~COHBMO`#wj=%N}{yGVi|lzM4u~aqj1HFh=RfK-#%&eW^Z; zPfo3Ya{T_2$gqo-U`A4#Z$$QDf{l2DB^YNVO)@?*kB#jZ8hkgqMEsSg3*Y434(fin z9&NxOXMo3k;U7T3%XatAxvE~wYcJZC^X8M5s-6M8V(oryy8EV+jt58RTfZpBlibbH z3=MRsMa$!ZEs_s^MAg?^PlpN&y93wS&wV@}Cd(`sw0gO*j zFg@oz^@>eYG`rL4Dk5S3jxIh?UmdD+O?ae`fWupxYt2jlkDWaW&D381;{>N5t}+XT z8R9H$iVQz7v0$#b7uV|OZ?D(uK^P?bEq~JQ-!nW;sJ*Kp;k~`Rkp*6>ef}`C<3h$7_ z2(5z=Eb9nFgf8Ed;ZBmd$h`6{ur`b?Frn$T_e=)kTY>R+sPUC968jQb@nxTl$%bAC z9mm@`K@0>HGc`sVKah*^I*Y{f`-?-dPg$zkTFLu5tG}U6jmC6#3Mtwzj>fqOfo{U# zAB$Spb|XjpU-$=hvHxFOkW=7*>0Q;T*QD$EV?0gIwg@Wt|0YN3!edtO9#H)FR*<-@eDSF?KcvvC?bIHT{sZj+Tt%Y-lq3NDt# zAi3oV4bFsYUW?uDLrvEdMP5yA0y7^MkxmU@ACx~g-X4*THwu4FJKraLO>w60ht?ap z+wr>}9BvW&HmP}^!Jyx^vz8kc)arL14plMP(SR^WK?8}WMiqbYmVX`@H2ZwtpWvc@ z^44me99-9b;%2ME73Bqq-Ft?o`~|NfWeWz&QnApwJ|xcUTxhn%pW<=$^VPc0H%%)XIJw1`MFSACEq)5K8-L& z3Xe|){B^+-o_qouDidv?UY&!iRjq`R7;6ngoYpnX;%1K&fG^Ij?uYkEd}Byg*9CvR zX-3R(SF}9ovTbnE>w2Yf=`3LnjM8!t32s7VvqFgZ%-n)kom`lWzA4ImGX z3R`qM8K5EhN1#Z5k=Bgg+rv5SQp;Q%3QcCc-i~c_D^yunS{D$;ieE){lQeGxgi5A` z0jp)f8ynTGws(Exjm5vTL1mJfxGg@0;$L-27&;CqZN3Vw8! zGPn6P+HSss=kpODBMJFdu76$zzArGy z5!{1a!;>~FE-qp}~vVu@yAJNw|e+d74 zoh}D7MiN)(oF5J%gKDsEu~H-HUP2g7(2Owwca@tF8LK9(N!l<@Rru%lQL>y{jpxE)SdJZz3I(*5Qx2N-NhG zDq?DGvJ>$iOJaNwXghQtJUau`7?$s~-DVCIaxUs$WmGQY@7Ty{*BLGQXC(k;Djv?E zvo6g2#XC{sLF*;SSK_+y(-%bQA-CF9ZCi_XK#P=n=p|A{ExqdRpryO8UWtdB#nPyb zH_Ocpv|$c4Qb-*e+$8w+Snh?ubPI1?QZ7w4A?hs?27a=XqYM5iq>m6!$io5<+=D$X z3zcHJhW5}ly?f5qm0-S%ib}wW({@RZ%lV%SPdM*3_lJJJ;xG%#UPy+HIE_a`| zZSyTU6nv=&I_*hp9)Ct6J~qhF{JER@Vh90AYfI z8ePraO(C_5JvWxp+6#Y2t$eg~jwtVnFi1YAky3dz(8+jG(N~W0G1)gYY0I#r;s*E; zjJ{uHX|lq^dn@D)y}yC3O?C4zjfxo=b6-us_ZgKwT@EH6uumVk zTpt+C4i>NlsBMDnc_1V@n^2z z7T0F#(ITGrOX;{_*89+1fhqWs6Q|9CiSp}x30bYCW?LrMr( z!u`a-T;bb5(7WDD19-QUV8YiaVnN->Bv8q%9-(JhG!B<85A`&!jkMs42efw536hB2?m_E|7N1Bzz*E z6L`NszF)nACD`ES*P=ZVJI`Y1STOUbBqV(3Ib03ga6pJWW8jYMXcu}fq4;{aENPyG zew@8{XTt7np=O()UsrEVX@%lT;&S0HVVoM0G(uBy;pV)%@rEX2I;6P?VnQx6(ZyWm z>xh~AH5(~{_X{QV`SyT9I&J?L?T8 zmPLj;{X`Vtd}KF&Hn3toXNc5U&Ny%nd>J5fmK})va#}&4c!t%8QB^}U22bfQn~!=zYHln7Tq&W4A)&{_^m$Ta7^UiFWG98acf5$1`Ye1T zRvMNEz9EXnMW=XmBS$ZGbfE3q44x_hhv($BCZnyFgNI?s@2`O2zSOt-=N%hV3recj zj*h1nMN~H|EF$g(Rmy`!lb+u=SnAR17T?B3-I@r2wzLJ-@ee19JRTHIQw$!(v5Ia z2UNysup2-ZhrONM&#EKm zGNK|JR4a{Wbt?D1mOhUYff6>50c(nev#hn+h5?;>%x8v*6auL58vx;ERr_6xYolag~PEYqR7+}F{Q2mI-A z%vbo4ZhwxIpD1S|KLst@^pj?)JBd!+5mr{bIiJ6eHyb};L2W$EC21kI6(>Kf-AAc& z6kD#u7_mL&{nmRe-X>BPZRNZcIAlwpx|RC3y0Hf(H#hVFA)cwr#VG#ISN~H%$wGv^ zlKEu)=|0>g^leMkk|*uLQAO&%QX9F}Lu<cgWi&o6vE1@QvV&znw*HY}flsSaq8@#IxL#Otvo*KHPYHa3!#P_v3xJS=B{ep{1X^<1pf@*WbQUaQg`h zQJ9}@Jtj2ofm_HGX!`pz0rtUR8p?^&9xUtCxgG6>0Qp6vI>+bYhHu&7{6s0W`<;!2 z+b0OBCnQ!~_Z90F^zcifkda~EV1gxyEUYO;P;NN@v!L!C-lCmAr262dS?|p%AG5}n za-yuFH2~doj74fz`2I`be4x4xIz=DAjT4kh2S!j%s;cWY4{K+QmnUbJ>rj=W^8m6{ zBFV!qaF6}%Z`Higmh;3e_}KG!cz`ATYoWxE+L9KN>mb9TH=@6|iOD;t@Hrvj1js4z48ad@wt8J`jqrQXi@Gi8eB4d^F zckgvt9R#5b9A_!l2aU}vjF#UxjMgMyu=o~UC#o}^sv>wKJ`BDPX-=ZaUPb~FjbAIwKX+mX^Nw4?Vb;3(~+t=I_)OoxYm7bX`VL{KI!>pZ~u1jvDBnj8lsVixgYMU^ZGkZk~sI$qPehzg&bXUGD+#MEWKdq^M0(? ztMokE5#k8BxYh7aBGQrXNl`OhPsu0zZp~zQw9{?tKIpXgu4ky*+M0ui^9XE1k8!qfK!`M^DeYf`}* z25hXlBcrIZhaGm1Aj0GmL6XF7i%h$`fqF)nq5`;Kd1|}|$#Z`#U7y3q^V1#j&7QZ7 zU)+HVNt-@g-8_Wn6DoW=w!JCcq|A>2QaJ%DeQK4PFfD9>vF)v5D@SbYKh}c&py7Qh zN>-3x2blyt(?B7Zg*acPpo1p-wBK3^T>^U|ff9+}sRkP@JS!I*<+nr2RHQ^aukBVj zoYe{mFPNi468AvCyH#Uo8FqA;VE@I(lrls~|87!mlET4Sig8wW?`LppnH*tJ9vX$` z+gc>V6uAzs`#0I|&^Wc&o;;(kkbez6zph3D4w8UR*Qi6A>e=nfDQmlC;(Xzfu?Tk0 zNU!s9{*S-)f4|nss?LUiMvlH*E+`=?BJ6Ui0Lccj;p1o+_c8D}?($o4LJZ!*~*TRK)YY&00r!*Ol#E{(z zwfWjQ3P~kOudn4d%lRQXwa-2q>`{4Ey@JyVKg*&<)8-Dfe^s9OGf>?MbKwpyPe0xE6aAwz_?=W=k25>(Ne!9YpGWbR z@@Qp*w2M~T)dzx}8*kz)-@M9($JSIC-mr`7`YB-Ly_@cn@nQk?)D0$TV6l zpt}WN#y0Z*w!OhSvdf%!YNql%Er{A7qyP;T8F`Uiv)!ZqVX8=5L*s8;Ty8lcocZ2p zUO6+(;#}QPYl3iNLPBD>Z~(>i!rSe7=yXj*c_;(a(OqLzFt8!SX89Tg@XkE=mH-v`JZK zXR!+~=5zcVWG@Ejdx0({SO&#~%V32L?sIztnmS8WgaT}`O;0lqSj_DtD>*4i!Ob4` z*wp)FW;$Uj`{&!=B-V?ZlZ0@(FQGyiFcQ+_9X5bs6%Njtq>nzKZgWLK6g-}#)lU$t zEe*2IMnM}4@;LT8Tlzw#r4v`N{GWTu^VPCH5#;2A$VApjn?+DS@`NoyKbENMbTAeE z#6h4zKhKh0o*YFzu_^v!t|!&5bbV?JqbgLn0-4tw)s_mkU58t(@#JX4)b>PIVE*mL z#oggTJF9Igdr@L#OHoW3TnDrU(bk2pJ3L%d<+~?frJl5BEzLOTMCGBPN4ostTBYgY z!(gVgv22lo%6Mqe6$4+haePcveRxd34&A2Zg>Z}%rB~o_nEe(%pB)Gy=~5YK!e3w& zLMq5(SX5(YlJf7nSch&6NAm`#e`U&0EJq3cqS_MLRfO;w_!D1v9Q5A93)Ck1O`*(q zq&k!J1UwGI2gW6eT3E_U8t1rqN^hbo7+P}dnmtTusOE97L#2IWBj4+q@_#kb9EgpEkJ9tu8MT&Va%y*a|=YQt9`jl;<6B7(^b@byIU)Riz*M2 zeN?So<2%P=7x+ zcfjpkFIXvM*}8drf9i6)#KY+!B%6PdRF$fc>Ub}$5iG&Vd;P0L;r!U|Lu*ni0_AWfpt^Hv~H`@agEcl zk~6%Rm>9~bxP(M0s`)nPva>P}C*r_#tLt|PY3UzbU0r(g9gARlXXklB_WHDp3Y+?5D(*|mI$PIZa6 z^a2S{hR*1N#o%B_~aMIZH?rZf?1#l=lwpWn$4Owthm>U z&Hb}5+1w<~&M%T8LSa^(y41@_%e_Z9IcZc#k#4hy6AZ1XcXPBrd5id9-S~QM&*MB2 z63*OyW?4q3#lK`}a`%Y0;FBcMSi-_bn-mMNr~J{U^P=M<=f^#(Ed0^O^JGIR8MdlW zA-%Iwpj72XN|SQ9X{X9%eyXT|NVemrfC>O?!w3mGbCb79-)$zqxd5o#mi-%RpxxMN z@5MUbW1aIA+rYZrQr+Gw#Atxwx*=HG?$x)2#johToyg^QOf*mk7wO}1G$ROd7p=^L z!;0}7pA>Zlj|0t;xFq1Q=3P$thPbNpzNRS5GfXbS?aG$$+p)f>(R52gUfW4JbSpo| zdeYeWR6c9RmRe5OF?@b9r@e$iN2zT)7WQ}o)(yDjA@rxsY4-Q#!g=s&`Q4vS?_zwf zUhTN%qtz%<qZM0Re? zSP|nm+4Z`RD6Y#Ni@&IgmVCX$yo8o_JNuk1fJ(*Ei^mgbNNSz*!|LC&0KT+I*+cgB z*5bVq8PQwDw3p0u$&r~Kj-NpQQscKqv;X_@Y9 zF}=D1vv`yI9?i|$fdi|EHq69Y!0m`$^1KvH=0YZH-*X`IJn-=eNt$_M?kcfzfjURv ziTZN*Sn35y-V`dTjZMp{{KB={p?4geU*m6K#{Drbg5^L;C#z4G+@J7NC{Lm`FV7S< zzO;AzqJ8yWT2G<@n%i-`A-eAxm~tOqxxW9~{_5fq%hD=2-UsccwG8p%{ELtKCoA{= z`2D*!?GszO=`*y&#N8J}+&i$%+YI0hR)eE>Z$j8}7$-9Y^ziPku8!tQlh_SqpQ{H6 zL10688Ha^N&9Be~fTSOf^GTlQ78t@~Guv1x5-P1qD1RVeEef-=E-EQ+?;I z7KV7Og;ynSlyZ49TZ=ca$49>X9xqNvJ}{pOg4UWrPJNu3TVY_*JxJLIbHa?5CCE9y zF&N>l3Y$!l5k)qIZOk(0Y8qumI+6%3kfAveD+r6@+v9s`KkMm|_gM!mUwVdL@BtgE zp4%-A!*l(US1-XQNjA)E>B^av^;T!eT{7-*_N6wFROnrFNeQ=@Cxey_*`Do+;uqDK zuuR+F64|1j?ZEVtSMT1p^dwtM+FNAd*;Sk61?g`{DAI_>C#6^g@IZo!GVi-}dZjnA zIW;2?w0C2fpcd_2E?>Zgu4hrAsWxu2g-r3?uX+5jW*kH3`eygfJ=j!S*~iTCj;;-v z?#X?s*f*EK6_GwCy>#UzDBRrh@tkbhImhEmnrcce_$BJ3>W;U=8p=E$lR*0EyPhQ! z%KU_ObC^K$huuYTnip$#M#k{G}# z2LyBRVA=V*0}R4v`vh&*VD*Ie=ashG_yYBVSv;bHFZQH_js_l0w(2(2#|c!A1(>np z$enr&E)09S4uIn01nDh5ZfQDo)Q%o(u#k!8~%~ zz}X1A*+BgDuQ{+9i#Y=@Rn^LC7lF>AY64L0L;4j;kAXhh3~wVrg->8U?R8UtM+B+- zQYeIO7Q4Ju72JTvlAa3aws5UJ%AI6wRv3yM2%QQ0xfZRQ++p3(raB>sr@z1dG(On3)uG=FVSNtHDmyz*_3~VEyOCIpa_3@acL6oO9@)p%dCf|zG|)?5b}&7jPN1yu zD}X3yhZm(?t_z=s;urtGtB>ZKXl@-vrbW1gr{!tl z`FeB)S>M2&Ja_ew*}&r^%*U_Uh3NAENZl>~-%Dn=GI!upLc4mAAKU=R_o z_=ER0JceB4pAmy-^m8=9rwvksf9$co7$$sry*m%S`6CKzA<#tS<}fdE3n7QN(u4Qj zu3K5|Cy37Bb0(Ec&aIiM`ez;LuDx3yKWygo*7cd|{6^E`h!wLYUSU{Flx&P=w(9qB!w zl}x9m6xu2K36gO|IH#Z^1{*C>6zy=-Ql7&?DA6}atuld0H`{A%~S7>@3m^y2wtMF;$! z4-8>f?3ksiX%eW@cij`X9;}r4Pid73-sT(D63L;`AEN>{lX9|`9?O<;V&(L7+NvQ*F?X|B>=DNQ zI;4M)D$s+JGt#%Q;Ko`E3H~cw{;y~0zgRTET3A-E#XQaN~>`iAR{F1XaU-UyZe?%2<#>XkS^*^@3%(62bwh!?A(v8$VnH zf#6PNBtiWAC=ryRmW^Y}K-e}@qIUM{@>cHN&y%zB3Ni=!rjCbwf2V68m~}wsUJxtl-OhxMfyqgL z5D8fR&&rHK`5k>$5sFq%1LcNUt`Jx;*9%74dR4+UFMpcb-7HJQgeqs|^MR0h83~El zCI;=QgX^jq&y$Pn#s|`SWmRmLyut};d}!Q#0ZpGlJdZAKC&RkYy2wX!ime_-OcGbw zr90K9zqzCPA?l{YrEfSo%GFOQMj3#6>+~L64BWwU7G@j*wDTFH9xtbdkV0Uw;H8Nb zf^@tekBp&GqlTC}p`*Z{JhVa|4rw`~WAcYj(vaXHAzW;~HE08#Dmjp%zMhgFEP?Gu zRkWObFq=${cRY@d?-J_X->uP^Q3w9qzdB?SJD?HDBcQAFd#gv!cQO{x z+9xe1w5m;we0~vTEn#`EJCVwah7`e zC($Yvy_F`q+-Xmj+X?349DsE`x-_i?mUR&;)?JA#vrFQn7qw2yB(#kpX+mpbE7%bh zTSugi*0Vm(KQFANyDpoW45;XICoT&DSMzExg27gXrkmZWkFAG>tCb%4cAr=jSJ$mXrB`E=w77~pSm7*Vz?c(Yv3f2?fU zO^9$4y!v?@Y?ClF0mO76QIw#jD*hH>dKOJLWPt?VRHK%XZwaV(#*u5I5GmVqk!-Q~ z%gV*4dL_Jw9FU+*)CUd64b=S?n;(ti` z;LDN*qlhw-lZ)jo!-gm`y&Srsv7lgJ{xKfy`IrLiw*ZF}W9c|e&*aMlnvZ4h0sjBE zArMO6&$Jo_TTb|g5!1O7g;RarEzU;orQaU6SoE-b+q^JaoLq_)n}I@AU1)3Vn`*Cc zzN)D_ipN|BHChQu47?Vp@x+@_I|f|oec>m9m9fzre$To?q~&ApCO|LHvv+=&urau!B(Dwr`%HU3nyC?QZ4=xy zl!FIQM6}~IcbaG)ELHGwQr?0a2?JHd+w5x)5O^K_QRZ{In!eH$dv%K&I$pe@Mz2xJ zy}dT?RsTuRweT-@AZ4B1OrY4f=x01WDp%O}?HO6j_+Q6t%!`N?5=@M{ZDJzrZ3OPG8ybtec=cSkhvoAsbLTUk}x2@G=lx@paKkujJ4`WMG3mmz# zD7_>q&Ob&lC~QLXdqkXnCNPXEaaG`#E}$B@N8A?mk1;Lmo55+TdXYHl^g!3*vpuwBcJ7U4!izNyhQ`T=!>!Ma^7|ug&re z>kpd|Rs0L==H)dB{O#Cs*UjVH#UC6vZwg{;4=_wpX72M3e8HP9Hk4yUdaqtQWPLM% z>e&5ew&-lKEy#Xap5pLsiM6gW=!+cT@KK59-veYdR^&iHaacQre**)*XJxYrrePXh zrq9RT_q|(8hpzu8sr{d}o-J)k;^&#AX77>1{%4`Nm2v>bd2vtpV-TJVHlN2`2u-`8 z+)ViV40&*3L62)-lVUv|Gy2xUO^c&DtCsS|HJ+dsH^ z9h=+cd$W>%OCD)%U!U(4xlF}3rA=UzFShtYZ;e2Q zv`W7+JQ9b!)(?&qnwbj=d+st)qoGjw((WJ$>vTpHbxc{E5fnw`lz>@SDC>K6o*Y$6 zQa1l3fw|8-N>;`!qpcaxwzxZs*6C4lcy<~m{s`A~o_DDifzPK4+6&h7Ls6lr^N(Ft zEC>orK|-brDE>1hf8Hzw&1*BIB9q$2Y1tSiE4=P}_4#GzZd_HcBkgzH9PCQ$LP7_c{}?lsjq5d>v&ROI9Pt0I-D$gRjfr>*EtEzMgCfso4C@Cw_|qFLYwU7VG>=aC8M3hq?tqd3B8u%E^x1C z>J!iQrgy3zXEOtzG`utA!`4vFer)YJjk?D56bNjQ(i$x#ZcH3WP|o2ki1<4Dh|I>8 z0Sin+{a24r726eRF>ES{2kPw3Oeo_ODv0@ib_W0XW!JKN*BQXc^y7?6+z&*z-tCoNot%)!+j5-H6&kFhc00Ggok(1HZ6{dIr zo#p!4NhO>WV6V2|1Pvt33;K8*pCD3S2y1$6!?(r3BcC79n%y>^kP5pANULnCS=ni7 zf-3)Bcz12(Csxw0dHl^zjNxSaak$d+PBkP?++8r7q+H2Q;k%HG(KN^9N{oShM1+Pt z@+G}ZM;nw9N}MK~YV-|1H@y%>3fQhJ+5@&`?J|D#gEsV#!yyxy>hI1&PaLW!IFYqh zZ`wg|?bbwx{|@5`Sc2H~sfY8~tO*XYu5?S<2hcZBiDV&g71^a!lh`2ZV0;{b02aQCKW!lZxFxHNl{T2Cpl8&#^kt~dIBg8Yj8`DQbKc-7 z?(J`fj9>9CCdd}|ubAJHP;7JklNfH8Ux1Y{!1EwwL%-xfyGUS(jvo8t1HGUccxkXs z^n9X(9;Zw0KC`8-T3%@VzODzdA;b%WedWropH({^RY>c&9-nyhUUJs_5M0{>i_TMr zlB59RgRzydN1rieM7jFw?seRTJzq?)jxn@onm=5Ga8ikdWPfqFv3ma&=nj_cg#F=1 z4LSEWesDv4BB}b2DkKVGz&#PlB_OI(W~LUofhW)77di#uo+%%u(YsPQ6c1>kK1MIJL%zSEBT+2e!gsCWgjIq~j-@@+4Z zB+xLC!rne(xMrzHT?a2=clKZXZg60u48b zvK!U7`p(NQVTf+7xfWK}tNmMhh1MT9ENf+UonzgAl;QtE_P%L?5x)wfTpn)P$M*Ps zNdNDu9oZB!#Np5*Dd5ZYnpJvq^wqQk(eg6eIFjYTSTcC3GZxL>CDH45S&s0lcS4=@ zsK2*2S}L?CLGs6z46Vtv2;#V-Yi7qFGE4{ zpFj7q0kGO3AI{3mtii(CKCnrn*N}eMn4ZrMb(Dpa*=ymZL_x{s(-~Lqe5O$C@I}+@ zyyK0cc&-M~x>Kcf=jmr}m#IosQND@KSA^bRs93~LieembwqGf$FtXI#<-xUlkZw!_ zPPW^G`>y`=YpnET-Y}-WT%-%w2Q~&Q0?`|h8XVA#f7rPHw)g|vD_^`s&9wIO2jh7| zad+yVs8%Nuc~CRo4>nx_h>c^NU9dT}UX6O|SUg)feDB#Wny#}+X=st?y-ggwjRc0U za4Ah#HWI0V#Szmx>%!kf3w8(POQ*#TiY0n_EeM0WTQ%zuw=cT6Q+2DW=Rn>7DS*kK8~CWQ6$b^PU}3aYGH~}n zx2X*grQ9xurYZJSCTVIrK+{?*Yn?lFBZpJ5%A zj9J(^iuh2Oe<15IiOQp`Pvdl?j^F94y8C_XgA?gv@-frGy z-qsdV(Zanw5bEV>aAPwEHX6mo3;EGieTE(OAo%?jz)G7hp0`Mjfu9~`>$h~ zsm=OgzwgAwkl35u>~$k!ifg7Z=3I~(XpJW$VtpV{nd;K*WiiJAn;^2>7cIm7; zXIX3J9q1Z`1mwABi8p`@i{|7s@|C0kXO`p2X+li^vyMaIguewfeFhGeG)}g$0xw>YcKPeN^5P+ z@&N)Q8#(|D@}VLgH1z|4GHo3Or{Y&xLg_ZLnP6M)u*t%1ev7{(g35RaS|qh(07JFJqX^yMChBa1arW+o-OM9~t3fY%3MCjr z!qQu(@dNC5bH$XL^LX)MgZO6KzRPM+h;#v~ti286YV<%DYEM41U`w0+rq~c&i4PXv zB;8Odu+7|{uLqJ`3~fyjOdZq5 zX(QY#(09RuB79s14qz=r0Z*N_E)1*MIT2L*;O>Z+K(usuky6T5^5^kHC{vVfD$3q2 z{>9}CX?8ET9k_;tacf+B{1UADx7hn1Gglbj?YcTZBN}g|va$n^`HAyAF3Ej!TR^=P2yw|9pFY&li8ly4TH&F~_(@UGqX)FrjbE!Nk1? z)$1_O4ivOejn|oG#D+A&?CX_nZ!sNIMQnXjW}Bp;mFncOTYMieO@~pL=N8JamjMCg zi)SOJFrzOn)L-Df_KMn_{; zkud4D1ZvlNN3cB0LqjnZg*s2%*oSZiSPHsiJt;MWsGNMVefM6 zt!o|uzm(W7$i!r8l#)GuUz;@Y(CNcQ%s^yl2?6H=hxfoGVI!|Q(_#qPYwELjwl@gH z?Vl%d9#o^eWv&|1kZC_We@ld0Nin$U>>fr(e)d@Z3xNvllY|+8pp6-wJY-u8j6^B+ zb|WIP4eH#GV}0*v)#ebF7pdY0C5OwOPEoN`tMa_~QhZ*paR`3BZD;V*M~ObADfA-? zMtwiJDxa%)yubeQ_d30>8RfPP(md&M?{tIfa|>pKd_*)$B@=JLSQ$C>r>4KMwQeqSdsk zBlm(Y;~l1i>{Dl@59;5MjDPLE8wtCkC>TnA$8^tUh(q8o{X=i~H@`j86tst<%d=&Z z+~G^XTpDXWOm1Vlrf9XMM;%ZN3rc45kH~&iG$&o9?wH@tAd5M1-|$ZtYmg>!)q`jv z^g+8GkJjjJyB1uRLKZ_U#nx1`gvg5&V?&$#C=6v$N5%PED+oG-u(GkQO!j75NIbGP zmTS`Ymsm7q_o6A!PB+5G1GRDXG8(dsr8tjjm1@TyU{we?-$Z~;cL&oD5zu@K=2jV* z@9Gio7_$E(M#jOxAx$dAi6v45gLxTwOUvzun3i^<zCWA&wYFPo-; zYX()4U-b7Oyzy@NO$RqmE)`>fhgWsXg~eoFz#-QQH{3A83H%vkawO^kFf3t3+IH0FKvRPsn5Zig!1V) zb;6jl!-eslZgVJ=7{%i-5*FI`92QL}pYua7A_+3nz2S-G&QvOh%jp}Zl?-~Yd|njB zmeFhlC^Pd-Gt}(?G2_0u3F>%0ngm5c)cxB+UrSRmAh~5{)avHSw#fcMoF|#rP0%iYdrB&IeO(scnDr zNQ8P)7dw_IdLi@K%_-M>QsD5FTgReZ_7Tv{yEPq2VOkLCDR;w{Blmsm-Zv80i(>@* z9aeuPny`;5I-dJjxapj3FazIQ%SpeSEE}gWpN{kTP(W(Or35%p^YT6f?Z_dz) zO+)GL_Ux5Lb0ecwc7f!lk3U1037O_+6&N2y&NqjIcZ6RVv~8hbM8}vdM{aRPcg98$yUyu&YMgPj9AvZ(kryQ5jkDYfa?=)@ z_o>bucRwRWrj74>fTM|>ZRo&k$>LUBSyL&Uf|ZBJInp^9Z@ZgxtWLUXOn0+>H55_|qUO+fc@2j(IPiTFQ5+twU+PEVc=p80f|_9P8$MyG(+ap**|orRDOIQ>9ghnaq@o#w zVHt)}9>FWUX6{+x*LDVRmYnPY`=EoBofTw?ewFFEv-r{T8$m^*niB|7@?>-uV`R^k zafcd(EqEJKF)bX&E2_=(@9Yo^(bsRPwA8-6+ghq%PNF+9I;_q^!C5nknD~nE$jvc9 z$)D;erO#&%ntRa?HfZncK(u0sQxU@5CT5`KuHlp@4@zELX8*Napw8O@y=&_n^ji{^QZSY;*B!c<@1 zM~Wuj^XYbszJ!J64M02N-Bl__#C0Egw`Pd-#{PKVaTY`f&ZIFh&&tqz<;eufadtVv zKD!DeMR!lBbqN9442OW2DFCa-0(BOzlu3G~n_8FT8+RS@(!9QZs{O!^XGNM5=?9*T z-;$sLD9TFW_FOeP^g;_r)DqIH4T&D@c$*(t_qEAh+h zv?Q8SZyhH47C%@pZ6;w7I+Ej~ZGzWt33~PnYVb%r@Eo3Cdf+vl*KI{DJ&f9RaK_NY zsP_%t5ah{{%zy?EG5_6sx$uvt?O%VGyT&>1d=Y`TyDrc@DS;HU@_gm^RDzZ1oRj(> zb@Ad{H%fsb&om{hfP(wHc3?rjAsuZ8hN|$fPsexadSO=1G|=@3?#8m(S6UpMnN%`@ zF+#5!Q(|-rB`h=ToYo%4c~qoxc4Vf)$Jg5Z{>C8X?c0z!>kK!sKcY_2;-J}6gb~VY zciL9-)#kG!%}>nq-;2ieIc|;?xxS4i_^-m1nn|x44T!l?`|hA;Vo&{Xnnx=>9CQGk zp;h9peRG~HziNoEOS*QBY}O84tnPQ2gl^KWeBkoHhq};<$j_X)?k~e%NSAH%e>^;Q z9S8Zj97VrqpNE|V@>NdHzm!(YIS&Q8@y!u&4WpAvxtv!tFrkq|WQvK6C49)p}yrf4|6EDdXxue;76Zl+$EI+)R=~t2GC3Wflr$pF4R){*v@DXqt_s4)3CWH zyR%QI>yAByxySFa^v#u%`eB8}0FQ-N|0wK>rAF#ARy4G!SF-zY59hrZr$rT{PCIvn zjE+#T=4(-vI(2R0-h@5OL3*pXi(1RpcV4DjzLG~!$t|a;;tCoz#d|(hs}~-GLk4_3 z`qy8!WIOl?>}B4TywkPjbIkr}9o--(DQ~2x+G>)VYweZz9qX2$>K9M8ZhF^1ly#&v zYzYzc6df!3QXjGiY|J@b7(p=axa;9|v5!_4y$shp_{hjf$ZBaS08w{KKK9=rrj-Ht zU0xoSwvRaZNyu|u_YOMLqTN@-aR3d)M(XDxhn-nt25JCi-9oa3Owm<*Pjr zv?;T;s2*I*d|%GToZdS_5UKv?59Gb&v7G5$sG2C9=ld4!dhgM|8K!Tygi9yj?2^X9`DzplRPv%QI0CfPMaN%Ii_Cp-kh)y5e%@MQE zZILWUKnr{@4|W?$sMJ*dU1KHJg}2NSaq-Xj&(xC@0Wk^7K65+wU^I5`^T(^y8jnc|5D38lkiK%&`PkS@@74Y&hL!EMT5(ZfA?pyYV^X?ha zHFtLW{BL zoDJ2H0~UXLveFv<7ObrBb+%6l#-i!mF2cYqz?*If`YIkSfBA(ZMa}o4VVqz~Bc+l$ zqs=}a6?1I07g4*x_f@Lj`G&^96D~Gl-IaOc{OBbMwsHG_Q{J}TBP{%TyYcs_Ab$=$o9z~yT z_`zOwf*p%T5HsS)@r?{Spy)eES@R}*;_P!XetGoFtlDgTk&CYs@wx}yTw`42eoH+2bEke2riyP#Jc$3vK&X(6TDX_ z#-N73`k7F6^wq1SQB*!wcvMYFpPHo*MTL|FaA_8`{ilui4R~LDi5VwSO)B!^7!Kw& zGKNUZ+25sTfx}~Y_CAc=_PE^Bh`ZOPlW!mve|a;kDFlGHu2Kx@qpUB_4!&~Ds8S0?P-x)NJt)#l@RFLzq1$A9w6dE0Hh+^qx#7iZqYUe6Y#bm(h`FA8F5$$a#8rqjEgJ1@=|u;hQF9XJK$4dblr z_X`!Fl?>mN6PLy#l>^i;%z8#9`sgw0EEXGIr3;M|U&dKiDJ`X4_pZcyAv^KuPTMJ|3a5JU^ zo$S!8iXtwrE`VlS+J?vuD^zeuB&* z-TC~(z2h`R!IeUBeQ04pa-~>IkZR4aHq!mQeCj)2cuaD&PK49GqS`snMS9`TAT}Fw zk$-O-Sibn8qa&|RVnWa2vMwi?;F(5w~=`+Fa~Ww|wG-&lpBt ztZkZvOs}68`HtLrT)vckC_|V2L|Trr0%mE?v0{o zY%OM@x{tVKi#-|lzI(X8)h%XUrWjk!O+Kde3Pb&cYSEJl%@?BvtHJONAJ0B(W`Cmm z40>}sVY3mBy}nEdJ<&AiODWVR?k#}_+1^LL4%XB| zfd*H)YnhX|^mH(A_h~pgbcARZIsunzhYT#*+Pl z*Fz}ti^wt6TJF|~9jpZ;C{w-D?uj1UI!zEy+d<%?HHvyZM^HPTI4nn8Q7NWXyT zO*u97Bz>Na+)TCvf1-<2aiZeSVd2vW$Xm>A#zxY`&yxZoY<%}m^-NbD^m{RuN&Y{v-E1!GoZ7TNC;apj$H3)g84Q5$A zW0X#}=9^)_;?l*M7Mv1FX7aeDW3iIZN4ND@4Cm1{oIUZ~PsX2i{X!W|%~U)z4YQ>{ zQ_6N5PXjR=YFo=hj|?ZDP5{rjsAOw%pV1KoTO9pCX;MUNac7$BoiWSq>?jvs*9tepc!w=rEA<% zb&0+TJv?Y9r;)n9I${>3!5rZi>M91U=r3_#-%S(-v?~%fZZ~SY!ZF0QT;OEl>Ym7* zuKutK+Nb(Wjhq9+3i$WMLs_fDg4MVdR}AeJ9_AgsL~_Czv(4}DKHb~C$=AbHR$SV< zK7Iq$$T0FA#`nZvA};)L_UW98ib~F`-Vp5sUmGm!5tQ|yrJPQh@qCRw<)jNxd;28{ zRm)cPOHUE|qa?_>59T|g096szA1eW*BG8L|)nJ&rJ}1jGHZET(&aEG?le4Lx3}RV| z!!#tATU<*CPUHuMdm=_4M?mY z+I-(fe)%M>D)pea2sQ6HH$yzy6#|SI<4|lFj4mLpEH)6ZWsF79Mwrf*_JvCzU%&a@ zp@5`@v0ojTa@Qplg|Bm>BelGsG@t#Eu@?lN1_c<#yX52&>Xwg#>c$g#FxGM5okzDG z+!pn3?=R@%PmkMyDvRMuGfgvMM30Vk;4U2rl(*OQTcOW_I*|jZ07Z-q7WMnGcF9iN z%R+^6tFfD1{aYi>H!@!vl#WbMwJ0Pippg}lIbq{Bd z8x{L!#2^-DpAqYgVz8t3Qq6g6^3@Z+$TMLgs0Bxl4xp&qHBILcRapDAykja}Ru z*5OKUG^XaGWiN#ke7HjwOq_)-^>JI|qnU11rV_;VCVQ(k*C|y1y`{|N$I?`C=AwN1 zDy#H}cZDNroed~m;h8@CH%s;l9(%WOn0bOqQ7@7nfiV%)XK9QJRETx1Bf63g2Q-Q^ z!liJ+@5b(f`c*ejHor-Q2@4eqes%_vqYY!N<8WHe9(Tkgx3ygD>N1x!Nb>x4nL#JE zK8vdY>r5tEK~4_xK7zwbBXv11j^xTRoCI&zwE!t6)HymHq#2hC(QG>#$kaOv05?4WgqeCc&j zN=w6XsvK<=t{>PQ)jP3U=dhY-_)rE~BK%Z|ge3y<@zVk+h}x?d|?!Yxr`XT;J*if4`(=WLo30 z>Yd=;U>FYO5^ac^A9IVB{4+OYbFME1MRyr|K2SdG>T+B0!R~8(P~DtmR8Utt`X?8F zzfGpW{pF29;*I>O9PH1L4VjP;n2?JRfna)ES6L*uZJj|9?sK3uhf|E`=c-V(@KD7y znG&hh1Lyp`Zs}pglDE{=pW=*@5;+~+s{6Q8BAy2U=Y3G%=-4M7XzPYv7(6L(=K17QNlCS+ZXi+uw*Q;`A~cscz*k<=9htPSk~{(J_il6`NABzRb%8M z0uB#9Gn8e~Mtg0MQBSHDRu@ZQt}g4s6U-v@x_H|iC{hRPK{L>(U@m+5kJXY!??XbH zjlrd79iY-C^gC!B?im$bYfgXTuL@I1yllz2WFg)$K`oY7tww_OFli?@Z4(>0f%Uie zcg>N+l$S)Wc-+L!QP|YizY&rBwK)6FpQGJi^k-RQrFh^1M1EODwt#>0OJq6wDD=2L zKvfoTXiRn`0&(Bc(EJ#&A|3`W1II^En!Hl*(?5PrNj`vzl zS{79E*qEL%GrmVFKQaGQ1QJ+6l{5!8P48t!jl9-2ATu#-E$?Z$-@8izw4OYV+EJ z@0Xk9jt-M_0$azr1-cW2j6@O0396ZI)_rAg*B06VM3fZzQcY27K|{4+V6P^Vdt< zjY{lela3PAyzlSI632KaLDxXpkhI$u;+%JqWMc*?iu@LJ@qv*eb_CSlJnx|?%6tV? z)o-;Pui4l+8`{>RF?h*!z_W1|KIEV-#hU8Z)yymTFh5x*<(wG3H&m2RpTgpW8Pv{0TD%o2oG&}#ry0;6V&)>R9KbO(!sgVCpV&LU9O*0WG! zKO=+`ASq#pKv@#Ppr$T1@@tHv>r^irKWII)1?f=M(r|~JI|5omt@;j~o=)~!$>WS9 z_wVyPo?P4ntQ<*BCwXl&@5V5C_&Di>A{OMvItEBMYuJ|K;-j>xaXr`}t>49Z|i%aZCT)SI!T*@!S*enV#=<Wtm?KEV~tF>IlwtX`{)wdr|58SrfT8o>v#ut-@ zbK*1+gdthE!B{SdjWDr6il^6(4KDi1Sg=zFT-l6u6Vj=PpGePCoY}W40n8GVw(}Dz z7IRw%rOU##Rvvr%iTG|MO&78#?7AjZ@#b02pLnVle0U?9gY6TKeJjJ~&w#-jFJ8#= zUSpj}&7jtM|3poU-pe1j8E-12pZ9Z=_FJqf?Zb`dZ<0<*a(Xh}>Cem#pHFAa61dun zD@uB{>c|MERgg3l=j$^Az9ZX{!}P(0jco2lW}3b=j|@wF%m@%_U8l)ZFJF3Nr*yAM z3QDGcbHougo$Y)PQkF38qD|uzzv}R&wD*0inb3V{cQd;lWk9nm!-BE36({Q}n^CSjp@v4ktKZgz*3YpnTSVzBo?0)Q=P*o3N zp~r-Duh42Z%y>!TCI=s^px>)KCiJJ|6LwZ3i;%3LFf6DwlR1ytt*7`ySXdtq?r!hZ zs{M0r6XSsAzf`YJUrNOb^ZT%@ubzUi8G}{ZD96yg4 zzE~IMzJWN5s~r2Wy@8g)<`=pa!SS<~bd<2v_%j7DAS)?5QbWV-&;ytzKRK7No}SEy zoJ@Qhv!6;Ipu_z}2S^Yomy0|#^<$1ga9 zz%ZB<Kj^7TfrfD4x&=mPfs`GRAnEh2hmvMQ0pHRtH#vYyVC9~00VYLN$#z5z zjwq}S?uO2z4#a|sBa=}?|97VE^L9WqLv?6pcz}~S@vJ46h9{Tf6=MMiLM72`J}`P5 zu^zsxAtR?BjIf}YFtFIw+y0ong^9Gfj!c_3Euf@?8fqsL!yxtBxGi$;v3BQ{sJhea zF7-2LdtE=ik$}rhY7XQ22b(nM&D#bC{gwep`a8!P&u8Q8H^%H68e4OveGhhJr#I8i zM&IN+B_@?u46&EfyeQC`tgD@v2g-2`Z*^DEVjZH{p2h6*aOin2M`Ab`z7kI1pG$h@ z=7}A|3S)omhvh#YG=Bp-@}`FE@azL~d;`s|VlaR8#IlJk_zGFva@x;F=lNosdl&Nw z2?jy=C?3}YVThGgvFl>yVX;A{5@%7ZhAYo0d7jd&TecnG%mmjKq~iZ;lyWdak($T5 zVwTT-?HT;bx|lu(C2`C!ZDv;t@H`I6;gOfgiGF*+3s&XsP2JCuyjT#{L^*f`T)`Bg zyJ8yL5KKx%OJ%C%;*bEp7b|6i>qswi-Y1#%m%HWW^mJ!ZQM{5x!-V@R$hS}ZbW&~I zC9OG1yiC+=-GAcZY)?P#fNjI>9NsGLnY1PhEW$lP-lgqP4oh1g7lSyt~em|W`Mq%@l7S9$W zOYo6f=kdrK_HS=}bK|#<33>nagVqC?-V<9?>fitJ-#+@+xBAfx#$D3nZ`);dypP=F zTKPh=!=v_q!Y8Tr9o*#C#Y3}k4)$c0Cum^kZ))t|miusL&d=Gp(YAYENzbC#Mj%{z5r{laHrT$}+KaP<3v zk#|jTWOOXqifokb7*%&^m5hrr+Whyp@jh~ zlH-2OIL#HS^!rDF`Y;)tOVlIeoe#%;{WM`SjFFS|=?yCBzl`J;?@pKZ2wB+0!eGko z6_k|v_49#G<5O7>p9lYH68~Q5&7AuNn=#uM2lA`Y6Brb7e-ntm?uSP?x=gQ{eaUN* zzy9{;y7~_#*E5 zsmp}JkQ+P2^s0r=&Hs8*$@gS>qhJJoDGz^LN;EPm5nIZRCCQX0!oQq#ItLISTYcl# z*9#=?I-F7%OJ?PV#1bwFQoSoq$g&i#r1|)8WW@E;4s`bTR9Zr2GQMYgVlY&uy-};x zW^l#X>ni{%nf*31*U$I*uiqq0gZVK@PQ~^kHBsv1sh#XlrAF%~5=MjJXK7J|tiSv| z;RdYl_IkiL-7lZ`*J2WO!t%qJ7ok;CYeBOUO{}#)qQJqyIo_VKL;UMN{`LIj4_|>o zjXKeP3I8jh`S+T6P$6J28wbBrq~iPAEB^M)K)HB2J*A+3f9v{T|LkQR5{T(b?IFpI zS8VG)`Y0I-%x^?6?0-2R-c;DW-PdA0oPV3_^@shx)?E%MeMxfdiTKYZ{XbYLLV2JU z8Hf$>uRkYO&pZRDgjBoF#QzUd`Rn=zl3!ZKAV$>x!5n_J1Ri~0Uh)+!_y4Uq{+~wH z5(3VpglMGC)w92LH+hY~yfQR&e|>kLJZ z%WO8rY8d2?!@|AWB>t=r( z<&#mf$*QwY?x(ZKj_Xw?>qs9ryC+E-kZHo~q z)W4o{V=*03aEw*fbWE|LkNKV`_)*R-irdwR^)riRS5%*APqL^mo^2oKKn~sxjoPTb zIM)l;!20p=<44fb9Qpd01*6y%`^!rREFq38!c`;wZ9pEn9u`l`tTKhxFe#N3@Jnr1 z2EMi0(6>jiNDr@7?sTY?n5fuf@Oj?ma^9z+ruL_QeApXzZnF?$-^36)TUb~Kb~;C^ zn!DVl5mR{;j8azqs$s>K17Ma#naXqZ!3H4!A`iE-V@wQ;zH{%<5>vGk(fbS-SXlcT zlhxZMojez(K&BE;-A>jHjuzs4X6*#nw!mG^i7p81WwQa>+uIH#*-wuwWdXZ1aJ*Rd4Lu;lz zWJ>^*s}J0B91=d$8b+<=-D^J^@!VzHc2KZ2#%^Tk>q4wCV0>ut!k+p_TjelO>6j40 z(mkE@iQ8#DLjN`@KJMNp_)CGzb3a=llb4gD*ZUSiCmQlwoatAmHQfW?IqdjH*A9PM zBwjgBv%h?_aa42bu@8aU;d;deerK{DD(7a+{@Q3-Wr&p3@$QmrsryN(yEw+KP(-g3 z2X*EWax2TE3xl5I1`tSarU=Z(%bBnpdL>zG*G8C(t~?oi5;e4+b0VT-uQql74e}a! zjG9%L?##Du850Qy__3@!(S)pFw!1W-2|u@dvN&354n9lLP3H)haiBBmOF!CK5RRA) z;Gl_S!)1cI>itGk;KsNno7>lEkee{AFe`(3)qu2ecGzsZ9QhIb)7XrU4z+LLwhTv$ z4bja>W&nwRP-_Si^b_F5Oco7J^J-nvC_ZtxEd~q$voic_2N^HXy zlX4~lsIBSB|8o#qocC5^D_V7En!NZ>g z-Lfr=I%y7Eo1Vyu7VS76k&w+I7HdD@=y@C2HxlNmaX>IYm`j|u{#tj);{wxRyOosZ zaLSXld2_lxipd!3aufwR&4=IH!f8LTpl^K(rA*RY=!i*Tg$*W=bOJmqiQn;^S1m73 zoaD{Fo!viG@b6mDV+1(SInabh*DJ|tq%n(nWXtz&-@Zknt)z?1s3;TgLD^~r0x;{q z(6BO)qgH029pX2*+Y!fek;ZdoXp@niZm`^!AsNZ|f&S{n0HG5#X90%45AJ8gIcK> z*Iu8zK%=>h;p$Mqq}4nQS|y%;d}syr}%?v0DDF-y&_ARM8y4$mhnP8 z@$@2O{nHZjN$wC>A7!Auv+%!^IrYg3pB=bg*gw(Ce_0DXnBZ_Z+Wj0D*@MX}%Eg8npm;ulpJN6&PcBqA9N8+|diP!hhg z_TBQ?@dC@c5a@oqlx7YVJzW3@b$#ITqRPrsK%D3bBm~F-fRg53bLS@;7f{nYnS%;o zI0cgmalLfBj7w&k03^*-S1BqG(o-TlT@sThbKH52N?cUJ6ia=KcXPVW3^k?6F9Po& z6v-=XHbHlwM72l|^@*QqshP%Zc#3N9Vzi3Kq5M$3wr4eA#E+a|6R_vqyaeS>e52vJ zt+x^=7doGe@a|$#RaPHvV;0OYBM`hn$JtDvEJdyJ$sIcecS9ZrVdmxntfoiMN`>^? zd|g|EpuEsetI?|4dKhrL9~@A`7dnz3?5tyF29uVWjkCjQ=g3R3{m4j*_0@8<9HrDn;Xa&_Ss%?* z%sTAPZcGyL&m2}wnAbnr{=tM}tbm8;6=$<6v)oeB)ay+!w=H9ott@=2Ub@Gb%vM)z z+;GsuSTeUehaXMC?QslH?b&Y5{JF(NGKZ=XnniG@(Pg& zI{Cg?H~6m$`R~}+zu|Oz>>bYiAsqtOU0D}vp2J#H;P;g}r20Al&g~o*roIF3LBPPx zV9*tBvA7QI3$5p=M|~{FRga2`io;CRx8i*~O%{RSB{#7tCPoseALqIsL@b^tp1=ku z%eqc?d!;Y-v@Z65YW_$=43}5AEjC%@3LY9>AFm+Q=z03@f&3SEkb&@O_lIlzgYd7y zV>-9f!{jUg?2k>gG-f;!w<;yM*>Wj zX+ZF{B!p%F^m+*{KSJf&m2oL4op3VVfF108Tv&y1B*#E~4^9(_GU6ixK$4-5JC#wx0{=x8{i;)S+gLsz(dVO+_H!*_t z!Ap&Rx;orI5mjcgev)r8TnL=YAG%8oi69sI-rVrIZP{$cDx3Dk2{I$pd=_Ps5!TMo z!Ag{oY7f*vy*IMB`-qIV;HvQ}GUjRonNhHg-jeKG*ZTIQfhI<^@B652@ zD$gk}h+Jp8z3Acmr9e$wG`pV)Cq_Wr2g>qufVEGcp)o!wmcfJpS*{OR(-;P*DRguD|6Yvd0OFQxybowf-n z9BcN;rSeoRP~Y9y#uvjBQ>nt>3Zcrwp+2adJo6j55>ouGGI-fxrg6Q2H^(|rWI9@M zc@qa`aP48-`BGfaYiHoA2LX7N2i+gJIEuUYaO`C?p>@6;E^7_Aer3m?&Ig_->|{pX zQ(ov&2>K2}5f;xaFp#c-f5}83JdQ`~det+3Mi)r@~~5vZoMl( zpl5D6Ib?UieW#tJIRFo5&wQkK3EUj;Knl-~KH)i=FZ|tyz`u!4PM)i|h3XdsSWN?7 zT?yuGfOnOelze^WXp_mKKTBCUl94?24qWL%tg{(s@bDq<#fK^#tz9|*#CIt2bpWRe z2-!ze0v-A>nhwXA`IeI>|)1PJ@dP! z*FrBlS+5XKG3T`H5hZQR(wq&azuy_l#bFq`8-8-s>CO$VQ!qFTd@RrbbUd(kKqWMG z;?jVSjFx1>7D_++ld^3E`YTL`BN^BIUTL!I)Mk+!~lpbKm6em>Ylbtl9B(+fc>R9FSOpYpD{sr-vrU0^ZUoVu1By^Sv@g(1QlDM*%_HS1jW1AJy={ zngP}1&R8PY&y`#XP#_qMxFfDv&-a8!0dVKjsP2b9!nK-c*MKnvfipjSxh5cJ>%in< zX|T`wF?gfkVKdG0`Jmt>&IA02(I`s?hDWA!O~|eX_{~uyA!gSPa1!qPc(B^rYOJi$ ztYXb>wV?UZ*SUniw@^9h0P(rqu-;bW=(k&!xD}|%3ou-%avOlWi1jE~E~AZ;XY2N| zRY2-lCLDD7B<`@=tv=M#blGYo77qhXrpw{9kNe5!WTFHxXh|=w+DJk?k3<{xrGJpK z6hTnnPCfupM@ov!32DO1u-bR5jwR^%@d~(TXtLCN(!#|I#9~LlPq`Wul2OdAm1}zq z=+c+4DD=lC_T_Y)0Jc;s?${%MW-g$Z3!HM)~d}k9dPX8csPvAT^w3*e5 z*2$+yAw$Z@Ihuo&)h!!A^q8RYX`}WM54e8$Qc&bHud3*(K>>vh>wzNh*MS$_sSOz5 zq4{Go`KqZ~)WSiX_I&kw{GwRQ@$cAx;4z!kTnn9m_OzY*n*@MCT#t7V1;6&DNlO>q zJp?!Gjujiq7Y73fX7-6||9zw)EVz3EK+u&Ca(O!dIWLV=XZ|#ZGG8Vzd~mHqIBBm^ z>w&2?0q*dok?2d;d(SQvaQv++;69IX049-fn7!miHAxaeDyFKn@{1EbLN1!Qs<{c>6q_hP-Z%1j&^X zaD|xZK(@+|?hlGdqxKAjtK3Q^==t62#bN0VqsdUdcsR|{zEp))dK4brOWu)j`>w6Q zQB97;;-oepknrZtYw1ZVSBYQGeKLuxaH*>vxfb+XPNq)*7=FngeneL(#y`#B70(35 z0!z6Yy?ec-yn0wN@|XF(P=zv@rwmWCIu*qixm35eb4_1(A5&j`+ov-waEV= zKvmPTATI;~X#(>@$y1#ZV3V|5_xeE~_~P?@w5!+n7b58rUBB^&)n9L_CH7e|!LENO3G>+3@b4th`yMK-3{9-lX&<;=r0DGqe zkUNMU`}+EVFBX6_R;=xS3P_q9T|hIBFJuBwK_H&%XfBvRzvFIck#1`cowTK;CBU4{ zAUXsJ56;g{2ml~bl$^%C%7|ROR$xYi70!~>e{un+0VP*N98+P-4_z77kHh<>)z;P) zY=_oSf}Cd$%rAiV0_hu2V0xuF){2w4ZM$(9UVmpAR1ap{0f_%H*a*_%#6N-~6Ye_H zc_D(-sWqz+>yjMVN=)e*$i_&&7qR-#$-KYm`Tb#Xih;wa}c531i^owmBxMDj`jk9^*XOZ;Qi_BToa@4G6Xl;C&; zxE0kEAgR&2q2(ESc{wPGvAw($hx7ibj1?{@>vm+)D@;tB0Vx7E5ImOG481Duc)dQ> z6^^4^}^Vy*cbwhXD8i(1k0S;k2~+Yz-uUGR7AC7&^W%=t~(I8nzEG zU7I)g^YHxCKFEjzD-D6ouCBk#F9AoZ;b1UBK8<6e(t2~Mc59B{;;*ro;64zm_hdjlCwlf z4@fln|99fqmjUn{`;YeaIXykSPn_E#E^9;qtgk>KbW1|c1q7NmK)uM;9FBQqfz^+1 z4yj>VAjJkUCl4j9K%yJvAswMnDmq9B&ns6S_b2$^m)CdPf=9V5)Ul7^ur~v4CMVHV zj=?`TK#~B-4x|^AcCF-9R~hwZ9y(omRh1F++(0eQ9$9?fxJ*e$j5L6}qzS7!OJo8F zUC%au`V?u4_7rcfLaz<*0oYEPNL{OguYh`g_^qz>$D9Xa)}kETtNHao^r2DHQ)9_< zZM47dd0|JeJ>uqwB;4H#BLC8QA)5D+Or zz@Vf>8tD=wMCk@8X%r+BQCd70`}=RNK*t~}54Dkdb_?jWbbBlvHPEb`m>2xKS2c<0hHSRsMiRD*K+3{(<%0PkJk#p^Yu2X^sf0-_T*?TxmNo~|8;)r=*UdEi z0D887hVao(C6$R_UvNK3?k&P*)Hw~E*=ONraTpjF$eotnLzTc1`qInwtz2{IAUF8o z@J7;4@00-hao?Pxu>2fKz+FB8jK&9IBgk$`RfW}|4P(vLq^zzU8FVZ6EQWEQbRCxd zI)X@#{K|ci_R5>s*C(I?a7)7I7cJjKuzV5yeYcGPBqZLL zpw%$|V5Nk&u}K_zqvH3GJ~q@ugzBNE2D_BYsGBwn2Bbi96=I!gYA94-e?d1lF97@e zB+#*Xt8!qD@^7%`4mn>RAAzmAM5%A%12~y(lMTS{RP3%aj^9UWdw?slPpVyp`UVk; zruy0vpYt|?CZ&tAme$j7H$uKI+j#}Nmdbbq&5@B z#>Q6bYEM4d`~pQF6)oq}G+>E8j{EcT8FgRngT zJBNe74OhP$Oa#irwY<-^dTDE|^n%TOQ_Bx51N1a58cW^*DR7^_VgeNiTJI<;7nDV> zT*pJJE8STMCfI6tD&neIRg&x_-$zIH3P#vacVoGG1q6!9*2E33rd8H=B~H)X?yW4g znHy`RSGkj8rhXT?iRD2xH>}Qxx~i7Wd|A3k&2(Fhxu^iZC;*pTIE)o zlw*=!uCBLOp)Af5RCo(}d^*;H*TYL*R`$jB-XQKl$Y!XP%M7t&#hhM5iUi} zaC$tkjV9D7X;dOGap+h?_#=BY@OygE*@M!lY- zB||RcL>hZ)nC>m7n6vjysVpyf3UZ-#a!8tjgW8&k1U91{UmtmlMUC#UI6&0|1jY?IzqNPhxyZKR?2(!Wk9jWhtJHgRUH-Q8!=V zdcQG1VGe{ctZp03v?oH#11&KZY4uOW=wpOpbI)g&073%rD(QRedWW2{zwuJ10JwCl zPqS+;`a4%?qxkwuAASP|p}b+{`YmGQ15TT1Pv#I9f;4XNu03v>S!=+2D}5z+?o|x= z+5TL`zVZI<)^b{Gll{cUmw@X*JGoo??cT4rcQ*lBgcw(M-7!rub7l`fAsy;2uwcv4 z;ADL`I(z^-bKPNe+P>8y$xK{a909(i2=F1D6dVc{2rHkRy+&L92{7sa*II>o_?y?S zDWh8jodD5YE5kHA>I*%qs!I1`1Va!Q^g`Zv4!bQj^LUkZ&nV^e2L@=GGBGh_%$;to zxakg{W(!ukxI2&@s1=wLRiEe5xp5b8x;{!(=AJ3!ryab|Kksm)5HpFqB*2GBIQ6aBm2t7mDu47#jBE5%l!($(pM5s^Iud@DU1BXKJf@xs z*8}i%!p-xjSR|~QH7qVyAj3$Qgtl@;QxrGH#W&)IL~}oX34Fx^hg`SFE*ij7;QFc& z(5=uAA;DZYw>Ng#t<|g>Qj5^pC)1jPmIwD|w_{_k66{B-JeQlmT$_Pw6H0=5bReA! z7#o|?o7>B#?}pX^ECldOe!dAmj^Sw}a=Y-0q45nkM?^1ji-qLm`vy{yM&36XloOo(0#w2qVduIavOI9>|@hjgF9uQ7gQlTn0ixJv3DP+h)T=?2IH zn)1~>u&P^=p>@DkKoa_B6IgG+HI2Wr9y{1=J#Yg517TtN4B7Ur2zI97YeA^czTkd| zo{UOi(kni zs`!h25FM+L?~=wy-TJu!IK++pgA6p15Ph(}co3a81T%3RRwnAj>p!W(X1`KL2F1e2 z&BU9qbgzVXpe;1AJH;_7I|9gE{%=j!fS9y9SoDC=F70U(q>f00f=)nd4f#VSsmfb3 z`bjLWJ*0&%)?W*7TLhu@LuBNTA9d(Ul$a>Os<*50`-nI90o0Q$6Qi1|CoJhzZ7=%l z%rfBGJYBy+%N=@rv6tAbkx(F%MT^`9^9I-z`XkTv@h!tf0fg-TrMj&=wq?C!WY3I8 zRdgB3jQH2g2Md>{OtUPME$A7LJha4i0f3YxPfEfd6pS!0e+U>s1R`Zf3N6TD=%fc8 z-hlmi>Nk%Dso(Pl*vhZCY-afnzVXv3z2hrdP+Bt+T$p=*l0vpEw`6taGrxSADdKEvx|Q695wFhq|=!vAIp3H21@6;3 za23h?t||I_bLqKLtug$tPgg}}e(e;0EQB=YSzvb-Iu5{IVZUqHbSc4o?;mavY+D43 z$9@a7VtmZ#viue1;9k+ zFWnC&!(cV;o+lpAL^v%FnTD7+28V`v-Rl8*5`bii>=uV(vDu@}X#$Ref4j{eF7z>A zk!+_woo^;^OAFhA`obo3B6_m4wA9rHp=bwQM!D^}<(rWL74K?ikJKT+PB8&F0*VEr z>_{I0AaD>TAn**YZ_|hT#J};<$8fj<>2hCOV#}auUgVFo&aaEJ@^s>R#f_z?W>9ug zd#U6#NXN<^=qXDv%xDO&G%CAJ}Z)9tSRd-ep_})H`vE8p;7b*DtBFlFSzhv*+ z1on~3iqT(?{>AIELhzd$UzPs`Ui+|Q;UfiH*4DT5p^LwGU1mt9sCFpzuzT@SzjPF3 z3%M-U=zQ*9y)HjE!-Iu)f`7@L2~fHsm(`kUoBWH{RfL>UQoFQYJ*56{S-TIrzXtu{ zb#cHceK2mO^oxh|xA8f20e>5xLy!D#1| zvHf<(^XTtC31ZQRO~>9$cyJ8On@URPvb6`Jky$3?jb#Sh@JF<)7rPnbsuD4K>0{5t zQ8J<9-lPn}#m8^2SkrZF<+O`&mR^tM&D9Usi{`h7GD&X3&iF*ob>n{*38ADo>ge#z zlH*qcnr~>m9Rob7cYZa1;6qtoofD=WM|$NBEb>{bJM@E?OBqJXPN>@r$3c0;|kHVHf|4;S8(4 z8*f>2V1Do~e$E84%6wl%elch?AcrFnL1)bPi=XomS!Jg|zh6u_KJmbjV3+Au{>9IE zfvoaYA?Yv1!ml5~kuZ%;&-m5Y^S9wSB-e8PZFmkdmA?(oq1pUz!*fW;_&ee`tZ4At z@EjU3{x&?n4bP$3`ftN?n05Ib@f>Ph4#{hO8=l{W=eObcXI1ofjprwJ_IHivFoW^C z#`CX7>CbNYca7(;!}Ytkd}t8!+wdH2c-)^i=PEM#VFI}N9jIIYt=dl|^Xnv=m0X%5 z{nX*a9QQ6Y%0ZuffFAPO%pw4Dv%EHucu{`r?$v5~|Uq6QsOf@0gd_ z)Sa^%1pYJ7wqrme6C}&4PHnF*AlOgSg1#xhG4g`?3TZ)&9YBA90f@+DerbAzu(qF{ z6;Oj`m0jP}h z0OBj0OWf(fLjZkOY&DzHX6^zWj2PI@WgQLv8g@fAptQr*JenJ`XHwWjuCY=t<6;GJ zbZxOHf3{oyl6<+(9-p7d7&fvo1!`0ifa8V$sRukImOIrujQogjyw>g*v9{|XaDn!U zTAee~rHH(MuDaD?SFX_g4dLS+J$53}q6%Gi?KBQ%VT$iJ#lsPO?p+#`gH7kSZB6#l zZo{vZ0pf1>wz%zs>pbLK^QF$f=#G0^RkjQi@^65&!(dAW5plqRQbhBfi!zPJRjcJ# zHOuc(203EW_%emV%AftX1|zlCle>?&pQLcQ%yXWT$E9%c$#71W=bbP_GR|ImajfQP zqjaL_LO&X!MCy4e} zlllF~uCD+MJHy44f2Xgt1++zH2j~nQ!cr+0*}c2>rAA$`=1uC)el6vv61zo3N(Ze+(KR$hb_L*X=p7fzwJue1F8z&+8detLEe&uf&rINOs&mXl?n$?eS^bc%hYoX z^fBSE%6m|t} zvpVPkj|Z=X5`$JIaQk~YWR4OmwOXkkKIp=I3=^3v09+14cSuTMZ}$tomMxtFs7hq! zczxjb<AM29B?Xs5A~RqC4|Id3w;y1yl{Hl!xUXUsOm!1c-GxKE)fy*pK-DL-i8Z zd{JXwK#Q3Utj zgLoZbf3&?>;u_=zR$!2Vw2zVeMW-*^BR(TJ=YYntyY+3ve%z0Nn`J64HL_IiuBkC?-S>s*y7e@9o@?tqsw+6rP5iiVO z1%u?7Qw?v~sK~lFT@<3rG+kw!S)@1a$MGEd^r4qYu!P&EV5_j;t1*n#4D=!;o0U-i-{WZq@%)}OQrZT-h8Q#R2T)6?_ z+ci&#>v~*3Ot}VUfS#_rh{tYHwDTK67zvj29If`+SQy|)y)cOWAA*ea^@Q*p}IO|cGJDO}1=CMdwp1NmJST&*>r?S5xONRdv$ zR!o+PAX>BqEcJ4={KF`lZXX85uQPki<}TNO)R@u?bZD2&Y6_9VuVfZMA&{3n(%eCW zLXvUn=rL`5Xk!k7YNi|xufu8~sBFYn8(|z*S7htx z{&&pAzqq-hD9yA>@f3TAqVlp5BJsO0pt81szBZ`NJPPR z{l<_TB6VUvDaA3w3fvldzpIDXBKHp{N<-Hq2TL#?;MoJ(zp)m~wR5A98u=6AFCj|6 z6LGziCT?2@{1EW394-q$2knMaW#7!uD14`rfiC?psZaRga9K(o{_WJXOyKpwJd5N= zD~Zyie5)vqP>?erT6~D`6RYKmv?^TWCLv6TvwCAO`zHB`X_?$4P&yudJn!`rb3IE4 z_dvRmN|W#5%EV^g32ZV>!1vor)vScvF;lgD{0&@iR&P#sgGI%9u!^QnW+N%>m#yOee!ZS48nT z=;ox5VSoeOTrhVf?J{{=Ljj1uf&jI4Futaa5eGSPwVOoB>iyX;QdVERj;PNB-d>-2@0QKsTMaeAa?)JMuw` zPfwn5=E2m8!(0aOq7zt%X=-D?p@E3IjWe?H0+q=nsqh zQnVoQ+{VJ}sV-S8CynU<^3a%8^M950NUB% zn~0Lh{kfq{Ld+wyOX`IdK!IJZUj=fUORq=~oH||oo90nmHVkR$E0=ldH<4|?f$gs} ztP?0T#-&`y1_-SwSpk|M?F$lxKZw~6a~U0_rgkq z0t1PTy;R@**W<}!lZ58Xh{OUSjsrwh^;uV7oABS>!lPgg>QOV!XdfiHblUnhI|tzcx=^{(ua%04A^R{a^-!z6AWjIP;P6Q4nlm zaRM4;e61*%oYz=a-bkJdmf^uQT%dP;V*McHvA_viPP> zy(R6F=crXZpI~Z}S2d7=)?p*3s(h5AbdtJQwshVp>$E27%kf*MhU6=W_d;lkdOTb| z#8I4dYHQ_vZh%rgElOB(aWhUU@>1NZ%{U4I;AYURkK)Z%t0LHhIIV@(-``uO_^1+Es~|{Z-?n++F7WucSFFD= zs5@{E!$ArO{4X(sU9YxRl(LJ5a|N)Xd@_wJ&XL{hKEEs=meHsWVQ2`6Ec++OviVdZ%GCY8Y`#Y#*0=^2p8Er zQMj6+_ciybJFbK`E$3#$tFhfyT<4B1Fiq_WA->n%B}IvE3R090dQFx%hb$f(E1zlT zsTiAHHZQIs-Y{~hQ+i7gd=XnBd)yUcO;2jB~mjZV@BPw>!Dbqb&uO)t}8zEppyz;g;8=DFiQ_f5JX5#38gN~r+pV3 zu9htXH95)FaCFytJaQ}xNN#Fd`HELAbb@p@8~)jn6=91-bNzGJG? z{XojAh$S_tFR7iAr}wHutt-R1S&zSTKL9Tb1$`FC4j`sq;_PBWR#5N*x> ztJpyx5YywGF&~JLOFh54g-UYK0@BM6EMzQ9a918r)7X?%qZ~u`AOt*6#Zj>~f^YH< z!8Nm}<5+zndfyy^w$Up`<;G>vNW*|KdvHxtVZy3U3x)7u>eOFr@)|r7uy>gQ3po<@ zwn1d(w(Y!y4Y5Jd-Wp7n&<}pP4I$Pz(@AWyGyLgEJV1JenNW3iTsBHZ7C_#hl=jB; z5n-Azi`@Va8JPo#JKvr>jeQusXaQQ$!Tp0UJ_Pa}gOajbEL?k-z=93hckKeBn)^uj zbK;bdcfoNGCMsKaH&Wh7yPJzlA1W9c?yE&J$$oz#(OnwmN^Mh2e+s*vwH)u!8nm>9 zAMy|;wK*+1j@%jVDN(z0*+~jrOSMW7g8z+}y?OmKxQqG$i0Tha$^diq8+Eo|fSNVWN#Hseywr2s9?t0m*0A zeb<8xSFd`HI!x(ko+8RzqZLDSh>bV~EpDP~L;QM7!6}F%Rt^cmUeKU3}(&2d6=0WJ$h7I#yn+fXwe0+$RP9g z+M3%hRxi39!Y0}NrV6M^>_UCa zRL^%Top*Qn)+Ryt<(Ts~pV%u)J}=r`%W~y~d{}~@5MoTk+mg}UpAM)5b-`R1RTSI+ zKIA$~$*7-T&UV^bnZ~;eN_T30rh`tv${^2aR;AWxo2?+hFbUrmbdkF-(h|nLT*@y6f;*zH zWR%%XtKhz59HU{|m&p1-7=js)jn?IgY)QkrougAv8(|`E^sx4<{w^}HgH6RSFu#(0 zz~ma3`O6Im&EIUWFx{)el??@`--{qmN3QlnaI?#fEz}?|UtkmUA>>un^rH$>KqPK- z;M`NWfLk5}w}iJVuPMckzpfsQ{hb+%6i{Hd-F%Xho|In=TK5-R7~V$@XAiy2vrOrf zDU@;qn)8R9TgokVa`jVW8eLF6MN>AjC`HL;?lzeiy@P-HB>Uy$#qZ~dSse>848dwlFc%+4Ut zu;aqB&+2V1jY0tNZhtiESiAxfwM&vZ+-#=fTlQI6ZCZMk+urwS9<J4uRbUhp6xZXw2R_He}!Fr}Nd=$b7lEf2vFwjn!)ptN2Pq^H3dQL1%WHa%` z*do@7MhywO!zT7MwU_2F;A#-FjZSh?IIlHmaGYL>a`5%V(-36bz(?=!E}y{|!QZ)6 ze(W&J`1dtH5u2{r5>J94m@CqKP`?1Q8q=hlFKlK)BZ84 zqr>;fLyl-Nc)ieu7>C+YjwqV{IqvoCxh$4^bk|peU)@633#Up_a;|e9UV=X29w12~Aqc*c_%LXtCfy`_1fSdUEbfN=Y@Og1J!%eDYPbX&l`}UTSKCt{aioD4y;4i9D<$ZT5hY# zk9|l_C|vpTLB(XLVNZn!vlF6>-KAG;3sRSdFo|tkpP=+U@4m}tdzJbIOk(C| z%uSF6kp1)$@DVvTT9|iv?ulS;vm=mYad`u(T*e1d19)LCU%8VE>(+Y9S6c>(d+B>`QTK z<=sFE@m!}oAuZ)_xh+)$c~=)}T8@-Gy;bMcI$_}lvOmj0#Xdth5@Wz2zFKy(NuQ5k zWQ~MhNx9|glvSTV+h|YudOpNFSGdSv_yEX=LUjXeqkDXj>?<_%c1_-pnS*We3c>0; z>BuIu z-f}>z#)!m=9Z6_PbqkHTLG40b%3c!OHU(K!^WV8auc>i|jU2(|h8RY|oz`W`IHw0U zXUrQ0JXtsuKn%YQWv$!YSNA(-ER3L7p^H_e4aPXGVUy7{UgG#+?PpVjhjyZ6*1-rW zLxxZ#bGp9(BCuCMatsV?enA4{!DA2$`Ni;p))bd*`j)FdOXjZkMu+V~2=rXxSH-cz z$e>DA@L_-0i+=Gax5;{T@^ql+anJmNj$f4%RkjE_`tO2S? z&qr>?Kw8(hf$l}8wK3bp*xx{^IdJD~+V|>&oa&*44mOm9G{c1ESsGmOQ@cJgPE>qf zEa=vd!InKA8li`Lj?GAMcVCzD&hm1779=Ia8jGMWLdErr7^ZNQO*wdQc=Nk*-U5zC0^G9rbWewC~of+l2q@kD*#c=Ed1 z7GBoHVC0B`;?4EaPCE!S5H&p*4|J|VyD!JNnC&bR!;tu_-(O}l-z@PF1rnBW9ke1Be_|0!)QlxZ|UK)p>{RU3?rzY#Lp1@S#zvAEHX_TU#X zrS?IWCqC!UKC=;21v5kp*Nh)PeG$=+Tz=>kx*ToWZ-|sL&0W4lz#KZSjMC8)6Q@y3VxSJ!wNb~rqD$H=|zzgpU(|P zHw+A^gf_rg^2=o8*VGlfM)7J86Ide&DxKXq`%vyPQGKN$HT$%|EY;y~iMdKhB*Az* z3GJ>8wS`D?70S<=3LYy;DjI9x5Jj%_874L`p6`s zWvsGMzYh?)`*0{6_EoZez~k$p&&V`=&N=3dc=mWd_Ef#2qr*D(m|r)_H_Ez6HeknF3?yQ$qFCWcyJhAtx)WIT;VR{W8mtL-#kY9DA(c%FQH zEYs>9R13uIiEK%m`PSe1BO36KJ1z7Zj1wMGA9nYJQmwL%>H`L3_M>=6Dc?#*J5lcv39@iIp_`>cj`ga6;T9?t48%iN)W{igSdLSpZrRDQHDe^_u2 znRWr4@Cf922cbcTB-V%+sUhuXz|s}ax+d1NKR;-MW0!&?=OHCd#` zn{hLW96gLFOxwCgtaHHv6iERnu@k!p@F%tiyj8W_N+=9mhUu;reULXRe5+ypc>6Wz zXSMO`f*ZB0jh`uJII6XpcF)K@e(6;XWc`=tp(xa|^d9TB>gVig>;spcA;X$^YB}_dyF*zCCL(B#et_ghYcwyF2h|qpVaG9vJYE8-=2hKhZ8~K( zfLugIPQL?GiXf&E7BL#Xut+S3?PZLPlV(4vgbHH3Y&SGSFqjl48M2`qxen5%x3nQL zYPO*|0njvo^QduIsxE;F-aN|ERJ*kL09%XYgd*WsQ)<1w75GmxD6SnHg+o6z;~T_$|WgFGla!)0H)Nh0G`2_WlMMy&)x{D1raIznr<*e%Ft(bg z0|1&PyQ?BB=pNix<>(VDHZBB~2<)sc#LHIkHV=Nek3!=P@y{3>%u|yV8!_THP?3qm z?Tdv4pey<4Rz2%F#9UCtKXKAEQa$qEEHcygG^4V|mdD};t5;nW`&3_czqL|un5i=d zUjB*Tm$HI8i_7v;XceOc;^3sx@6{|B6~E|CIv4GC6&gMk>(H(wg@KmF&7D6p1(NPC z{`NC;*crOsnLkv+I>b!>y_YUR1{y(?l=)-V|DBWZpWdSgfSh+Iz6`&5>VE&8!$aZU z>+^ek4v)kC-&&vP+p*H8kD~aU4g<&pq{|3=s40fub+iGQ2kyGsCJLYuGFF36br4&& zyOvnbwD&$dYkC^xd_FRa0=ndY___`gLJ@6nCH2qNH&G|rai{uE;7OQjVi__8J-JN! zSDh|awaTF{B2SZ9qa8Ue-bm{96Oxa=m31LDLI0Js48C}s9LvAksAf7uwnf*h<~T0e zjt!CpBW|Ru($G@mX zrOhY!k4Jn`)baxEDAM&68p4>uRKk+fO``OFJo=FABlHLokK?-|l|kJ)?vP+;S=OA^ zzA*jhIHr+?zWrH~O*BV0bLef`F7%&s*&Kz?vkwZZ#tWi5d8i?X3hwEKQPL30o!5lf|w=B*P#UOR}w1t;+|KAq&)&M_4mre6I`kl z9hSyr7@l3UrFuztrDIlt-cUTL%U`HH;MQA;*#Gz!osPH?vk5k~^e?%N<`$~ctEO~! z=*!_>2eg#RCRA1>-9e+c6y|HL?K-xt^g}nr8pd|3wy2F{U?3ARmW2=jROJY+Hi4r> zthgopu^#dnipK`f6Wx1Fv1|b6vjHK?epiC-Lx2}MLQ7;|wx&oCmV-gxD5{=@tznf5 zU&0~e%^G!3C%wXg81oyeq>fjz?UuJnh>H@K!UbIg#gATj%sdfr)Tm4YBT@7HsmxZI zAZQt7tUiqb7=~VU(r(GAIR>W7>8uBRVF2A@JU-~GS-mf6v6y7wE|Fm1xw zfaWu?GA@Hhq}h92EWxeqk0<~6vVqSg>r1ht$1yl*4s1^|5&94^y}V2jOUM+Yj*nhq z7vGGs5FY?_l|J-KYI)ND?Vks&*FpdxSc}47^Q{17uORRQBc(gHXBsRSCAQB`MEaTkQF#Xt@0tj8u`E= z_Vg(Qs*^{5wScmKS7<^{(J<1NUnYm5VqwUlHa1%ifKdRHKw$nxT~2`cwCYL*p+x!q zX;ESc;gCT_q(qM}8xjp~MRBb`0=W;Uz`pf#NJr8UEcya_Ti**u`>P+J5!4WEbB;~c z;38#E2(56+OG!L=`iRG}KU^FQ0|mjTbBS zA*NaJ+kqQ}YpR@)4yg~Td`ZJw4|y8J>TvM{?anS7>Y&*Sk`8&Q|2ds*wfKfdtTJw* zB`Uv7ACROr9!1^5q&cdV6q;V{13~A^!5J>Pr2|w%VtZ+B#+S$#%r78>i))DPcyR1^ z`n$kHmFc$lod$m}LQGoaPEZ?y&Pp2~^{Yq^s_I~D4mp^nCB2HAVZmIayV=U~?N(vl zd{A`=3giUy&=4HSDQ^jfK_|)ga1cp!=vK=w2Q?JBsgIEyzlfPgdE-O!`Jh4(%ZAac zn`42I=!p_3PuK#*s#DS^#OS;3uq7o5k<%&+7 zcBt8Y#sG!KD7jYS+wZI{-hC~I=j;0Z>bC1$P-h;kKc^ezWsZX!U#N9(@wAM?u@Afc zKu`5?wi0J)#s-u|8Pn{-eBET_wQrRX6eUQjxq1n{8M(Yh?h(Lc$Z)QK=5(z!H=aas zwBZ_UXdbn~yeR#}dr7^)>hd3Mr7^}P(7d|GFDQp%P)`qX$`464B^O{$l-9tPSl2kDsgY0$i!HzC-#6!gLRtOotdXLsjjl0YkSFyAsqMGHcQi31BhbkW`_$ZphD_zG3t zahTetoEyvGVoDDatrj2$cZ3XA5C;YVJd1&`b9Yp>?p50FBr$@D9i_?P(-@Br()k`li^?;yd_l45l2}qGGgDjwCxFF2Hx2~F-9lz)@auy(RKEy{ zPlXIA;n%mYTeb>{-wJK>#IHvyh&J<-_tzxqk)X!x;@TmUh;nj$rvNzlXNujiqfheRYR4 zsZ=rPrnTtNKV2;oqWbY|IVBQQeSNfFRgqs&EK7CveRoHOEPUITnvYJbntqHReA`+h zX7I-f+ha-P(&z9gaZ!_`qe$!ur{kVhIXaB9tp(Y^G@qpt6-Y;Rb^@?5NUtaErl&6o zla_z!`xB&2vkEb(y@h@aNEc)ue+og~B52nDexwxJ0L9xfX!!t}z|V+qmKaxJ?}_RvW#}cvxp@V8 z8x%duB3U>u6*5Jg=3x#vg5puE!Pt`l825{#&}cMT^Be*t)dDWAUkLgURLy!Faest~ zETh~1AD7Xax9rWAw3{dgl9Z*~9413Ny0*+{I}e zML(?9wVKR{04hB-4Rji&dcqw%3E!|{DuLG@QNHpKUp~jt=}Y7*yEXaExtoO?3j1M^BQ0HQJ*kUlshtn{-0*!Pa}t0H%t#}j4DN5(S2 zz$qejWtb3}cgjD!uQ#czV17jX7uC4O-fh0hU zQ+*8MUezB?2E(A-A~-sY($`Jg+|)_j)aP&o-zcRYUGM~de9OxokfBJxZ{Q7-TUv&9 za=p5t8e{t7uDPePRHSJs!6~U~(S)XZ60EWWp&F=aQ3a)Y2ClJ$>X^OKqzSt9kid>P z^n-Pu27S=H$=r-8GdL^j4A~#o@I?&;Rhb212+XihM~*+Ia|uaXo6E_xnI~97#E!i% zJ|{^3-`J}DI2u4E5nY@MtgJw-DtQ?H7}7$~NkUA3D?fLb100~DvcXNt2`gx-@xg&W zPk9%4&r!h6C3+$GBP>a0!IS8&dbQ3E`J?>-U>!Agv-7PsxA}mp zG;Ojg>8kkV#s7`*CPDXbL~Sz}a(+&4#ZqV&=!mLI@IvgY7#{d{55lnrFT~XEuQBW_ zCmTEz|Ch|?FH;*N$-#@E#aB&)JUNZJ-AzCPWi<42+Zr^tM}fCSaxO_8`pFzfvnM4D z=9>uXdMaVeLtq7nkwM_q3;?(_rxrMDF}6@T09coCOXPLkWHLP_%0F#`YqJ%=FjEiX*WYm`O&`u1!V8A+MU%j<73|~xbQD2oQC!vexVbz& z4oQBjGs@8-YJtXW{8$~P$k(p1@OUrag_;xg^1H=aSRP^)O)@2<8NtBV`F~?^Y7mR_ z`K%|4I2dGx3qFwawCnsx_LQ&3;HsATakp6dsW;%)dzLNS@C3rpc&kLh@>Cj`9DN$K zk;O5e@8~%Xc|Z9J17Zm;`<0&41Tn;tbl;-FiqCYRj8!!63JVdI{181{J7E?yb(#|@ zPvEx+GY$~)C4%|yKG6noAaCh?_E{c6q+PQaYZZ+05A@6?U``59oxsMxq;@vwvA|@q z)P`YJNl5`_sATl|%l?R$`@&qH+f77a01eUQYW4=0M(y3Q4wZ)g+tu1U>v^^qEOZ!> z8ACpt;D(ac7PPymkeB^$xiY+VJTkzwG>K(R^CS@5k}WFDrVID?2YDlWa`Q9ftp*xOt4 zB6n_6$1&lZ#fde*^b}g9u4<&H8?GnOPCVI}M6R6e? zAp(V7V#@$&!CUBk!#&i3t+&~zwSN^TwpgU>s74KshB+5dpN}PJy-rP)U`}uXYCR$d z@tvt)+CQ;y+Evwm4|e*nb%zwRt+hX=&S|o-=inmos6v4_{l%lXoRJWZGG#V@DJb8i zsrjJ=u>bVId_yqfxMBn_m@4`qni;LAwK31>kKIvC6Q}P^8MhOnU(QJJIf63DRGAD_HI|qm z1I(vFc{Ehx=LtA;DqnV$f9+R8yqa^&h3o9?^qiC3Wp9d`{rV`kZR^UNS zv`7+pw}i9Jae!b3%)$h||E8b0Q#`O_bLck!1XK(WdKQdn27LDvphGJr0wfV53W^qF z7L`Yl!UW)!kSqmP|YfL@PRDqn;-i5jcm zCjE&kQf$KXNz}~+H<|T#{qjWUMb7wf2r}1-cW{W&&tT;mxU1*LO7+pkCZINrSqRn( z2D9S{2>2g~;hU?cniAWbr=<+1cvzj*u=eTun>ChHLhSzhQGB!??c$Bdd{gecrbCY2heJDex!6y z(}&jm34I zvu@E-=*PLeZ!1J~CDugw0%1hI?Qbx7iw)Fkd{?UE&$K=V97>-P43%w zABNijs6cDWa!zCp@Zv-JTmTz;azHzvjpBh%J6;f}KDbRhA0>;CQ^-V3_-K_GrR)=&A3W$J5-uEg;=_+=qHBN8y$1 zf3sd8W>iw81f=XHy&qp*fO|>_V}Vg1vF-+WfQhkUtE0VC4lk7ipbNMQ05WcXp&$Sh zZz9eF19wEg2G|KOShUexyDL>`<%)>Qbs8enk2NS(?aXL2Z^AscJ?X+`-E%?fgNvNia(i6QUuoh-U&o)t_A-vOG zzkNPbM^o8|!1wgjho~2l_j2ZOD1+wx2sZBW1Od0y}&Ha3mE|t z5)z%T{QP_y8=Kj>#v^zsmk_TxREl^_+kf$zc{KR?JhfW;A7+BM@KC6KY*d`NP@yUv zvJe8_o4b!R0>y3+z4P=K>*lYp!>{MC)D)y|xqx=cO$ddO#0=l}3E}Sj%hBHHqNITA z&BW|p-G0+2Qd!`32@4pvv!H`OWWKin?58j6Fpo0_h84nq<9y(}H+Q>2UI2(2jY)^n zp%S~$r+a2lmM*B1a6KTp4NO+Z!5LJ(u4z4!BT|!m2(*X;ji(0#M3lq>V^_8>1f3Bp zNW=9twItZ9xBdozZosMCD90%VHj<)}(kp{tc6m6Q4CPJOw4qG~PtC(*ecv_CiwV|0 zV^^x42=HZzc#KX%+LM;&-r8sWTCkbQa>GKeFuaby)(&7qG z=7V)lrnUZbBxZ7u|mIgURXnV-q!ZBL_YLQzW<0Yeint? zVW({TKDM;Z_16R&@JYPWA5}@~5|UAKx{0NK4<4_WIKc|LuPjL*UvN8O>V?6j5RSv;)W=D*bk_ zQqYS68^jR&GIW^}U0Ao7Cp2aVsZ}LQz&aLJA@?E(w z%vkzU!cT;lF&@A>W0&rpCyYYD5UsfuRwa7|9|et*_;XR)L)5KfltE4e?`WvUx-0H3 zddR=1(LV3P`2lAxT((L_Jvh*tam(8Uod%_@o*Cb=ihSr|tk6#Hh$Pi|GwH%hSrVEY zb$6jlkLzZh%Z}0arQf7t&%eJeOn)UdU8&;*nz2Jhjg%-z{MbXmK(9GG9{_N{j4CZ) zZlJ}Aj-6!0j^YvEXWXw`Pm^)?L>+;|i4?;K>Y%(wePlAseT7Lz!Lstpy0({#bVTkr zReQ*nYUqjLOX%gjA)yUbbm}9!MGtQ>4OVZ*(6iu))p0X=EpBzi&7c@v@CuXBYhJ-C zZej$mQEX9oI2d4G{SglTY_t9dnK6zNV*7kyMuNm2t_sU1J4t1r>=EqapY5c-vkQVE z;Ep#k$+evP4_p0DJO6L|R{;6Ae3~lze>*mR_WYD)8|NhJWi|hXcAE#;RbD8|#ZO(u3`R-N(ccsjWXd1Tu z_mBSn{WAaf3j+d!F)Ra6*8_Uzs#%Wv5$MpTtK}mkYXreO7Z)2l3X($ZUV1XnZM zTq%+)9Tg1>HW)mP+EDaL4C+sos%f|W6bJioQH2Vr+cFUNp8Jwe_Ciwr3g#O6|9JGM z?FmRu7k?wBewHo6;)JvdU^&} z-*%`-N%=WB&^UK>cXxMn_00rnf@?`~>;F<4HqO#$1Yw`@|1#T1iuxbU8@vY%hX7xW z{qgxbYb> zL{`V}T2fZ-(g1_z!r0{#l8@RpDgCaIn19;5IA{I|d1{T6gd*1R<~SA!|73u{C(iiq z@0zFdBZYU(Z-q!0*Piv`xXlo7qRw#ell|ipG6jm^SIrDc-^;&b<(3|Q2A}U(PSX~S zd`1Xr{PGPFeb(XE%L?5iuaT!_IhyWyARiG{qsnOBfqZ4~F}*81<49bY-NA@{#5qMP|Kq4-*(02y}~u* zU5~1n7X`n+%lu$B_xn@3S3;eAzQ4;E)t!J9c~_+y<7g7{l(Hbk{sZKx8wUoAIs?dW zVx&e}Wc51+*Gy_OX>taV%Da;~FS!-Q-D*jLEOV0HwSNtX%))RF4=2eTzdZc9Z!}oQ zxE2dm2LD;@NyErC`^U91s;-Zbr&2~fZK8c&zs-xGLXW?%AO17Fli$}*ul1dx+O?KH z){j#f`O&QQIf?B--+!xbLZ9XL_3Qh_)E-8B|4OpUPe;!pznVy2Dgq9@K%U!j^3`!H5&w!y8zjgJsyU*OL6!pW)TwfH zyYKHUYVe8*MV@-%SNg0G`RMFt%b3yO$WugQwhxKF|KY=(FELTbQ&av$^OE0x)BFXr z$k)hICr+rNbA10peEFrAc;u<=|A)Qzj^}#s|Hlz!R5nGj^OjYzijch%MK)z*=S}vG zl9lW|GFn22tcb`~WMr0+m7T0N@89F4<6P%DSLeDu*Y&x6x7+u7yM6yTr<0T3UgPVP>Ge8Ox(0g}5BAq3-D24qTm2MxJFgfjUJ z9Y!gX4he0qcp24X?DQpsJ?0QbspMd%!zjxOU-%%NLFH%ug$%uikC{IbFY9wq`$&?p zHw|@>;1Nd0;&g-&udwU_J9{Sn*CI0{n|%?`7# z?_oa#*V?ft2B=o)-K&%CQ~?jqc%zX9i!4|QH${5%4bqNXIud;Fo<7xADO3Qw){_yg z1xPzab%#Zog*LE^EVx$PwSX*G4L2|oy?RhfPcLc9^ig@aF0=9`bCcVIntHo{mvu38 z6|n^R=Y@5}+d5~$)r%)&{|$)m7w4z;y~Y^>`x50=RErK*o|UpsngAt zxu?;mj7lLpzBV zFe>KnzmF--pWBXvsQ$Unn_;4CrjQjWw8QZWJ2-^wBQnxNWj7RKINLm8pZC0?KZ?&YdIf)X}>W0i$FRxz3Sw80Gl*FF0rP{9T75z|f{3S%QIOL7 zJcCRXc4x;+M~F#jq&{$_-@N=I?G1EqAjH0r@i;FpFF#-VMkHE6P*CvDp+lVtwzjr_ zwlw#Mz)zM;*ym&Cgig%v=FGU!@<$B)uZ~fqUHgj;=m+IdanE%S$)gXq6+0kp_aZ)c z>W$eJWytudN8^(p*a#xC3>t4OwELKbW!-62#q)l^GQl;KL{ zE<`AMk!I&XM2L}elTZeVh~?Bl^snyRIH|AtvMLH07hLTwyD~EUud778Ez6aDkt3x4 zrIXbRnP~V)qPzY(6OB5@dzGP}BV>4H90hDm z)?=kgc@g>Zb%@uxHhjRf;IEfOHa%>#dt1BMXAgeIg12rp*1vYkJWrv7fc?>0C;%PH zI;@{Qdp4)1Gj*w*%f%m^LsOnff=@UhumpH@8~=Q;IM7G_R&d>Pt8zQO5(H z9ztKDmbW_iGI8Ky(iDiqLu>SSg^DMr;-ngms)aEiU$o#{wQ|oeO zbnt-dd~l{|(cyc)&yQMRGP#%DEd;5L>V0~m$l{z#xhjRa_lo=S338G~#R{hAtZO>G zd_yv@zFhUX{exWL#CwoL!n*M#m`WlVXPA3azZqLuuuA?33`#6flv3o1Z(AA%i0P>o z5j(Y!`GF{9^Wu786;%CznKN64sa(=i6SQJ|T-%#J<52q(*=Y9#1+?`M${fU(<* z1DW#_h!9AAuUY&jryTPC{Cdv&5C!Pi%OT+-7qdxPQ_qXaGsV$j!EB2wMf%+aWpMa0 z|4gR1^dSF%fQL*0pUDW>Q4h!9&};mH4fR*wA-$VOKmPDUaQ*=)l#$-!Pb^e|moRC5 z5iQs+?;UQdJB^baBPhd{r5ZdzMZipT1h0%r(!oFml}CwsFOfkdnL!A3$`GkjF1Wvy zr=Qd3-UAwlM|}6KW8BZh^1mu!u#X1lWbIJHe+MU0x5LYw<|&mq%lKb`VsLHY+jYkh zj{SE|Rg^Gp&-Ylphspn)Q{PtzzMYkF!2bgj|9wF9|8zj{y~=`Zi`G2Tq(|Wp4rSscIz)J{T$IO?+e8PfYid9=Y@tZbY&8< zYYso8=ZJeaT__p>G%=oXGbzqQ-mZO3@P$!2j_A?BLh)dr#PB}!lH$Z`t7RUCHJl}F zz#gMSg%%oY^ezM*`oC_lqa#&J0f#J%kMJR6GS)H%frU_72DmyIJ7df~(Pd19#{3+D zYl|sX#A|LtU2C%nxTY!UZ8Q!H0bNz8U!sWmVfw#s5%Q&Zf8Q<`WfV(2dH%f3>-c2@ z1KW{isrzWdGRo*PFW)m507w!qX?b4wPKsyJOHo3mA95W}QQ6`2wXFG-Bz+uwGXS9?~KxyW^@W}lzVT`uYSFO0dn0KXqM!omqW zY5T9YOaG&3c=06w{?#SMB=C)aM-?YqWMZp3&uep}xrK*IXI|~B_uEBoX`OlA(YJyN zg}J6>qHW9M*Nfm66+x8RuUq(|C+4Ha?jGtWjGUOSZP)cFGefluO|S8ZNVO=%R&cpC zZ<6eOqiUZ%LLTHBeanrB)6*&(OhIFUzUbU395i@)&wAKQrwQ$nIFb3z{Ae@U<-=s2 zbG{jS%g__E`^V#vU&yA3!ulV_?-Xv!MXlHh5>e}kiP#u!mq*8{KQD4q>dfbT=A7j# zMn2&7&IW6-3Vc8e0B97upW+@N-tFDrs%eUCR^9!r7%I#d@>`#3yx+$gGU63}{JR3>j}P6&l^u|CS`&&zd2iWLk}v(I@;! z9-r(zAJR&M{t_}?x06fW!s#$@h_>lrCg1Bud!9ZB5}a9QAXoC_5*1W@8}9hfkPtXz zfz51tZ#?(xW_FSS<$d!%?h5u301GkY&Yk_Y4}BHx274aZ>T^{mN1noknbkp0jn{6Q zr!pg^qL&t_xA z1unFZS-xqCo#<0T78yzi43C4?KoDOL8kJ)oq25WKXzM4_>P}}FrXG5V&L%+S;I%}n zV09J6PPIUi#jizAMR69yo&~e|oT{8lw!c;xurVaUc}ObBCIH=C9xX3$A$CMGL45jM&oiBMpi43P&$5L{5gJrQ>1J9w8jjt+~1)%L;&HvW1z zk{^+r`Iu7ESN)olPM#cZ?3*zsd6YvmcP}$l%jeS-ta0(`*?P8++5p!tD%}N0VTiY| z)5R<*`5mCdh95tFPIwuH;$&D1+V&T7)>Cp$pSJHS$jHmH!>lhMzGIKlyiY32nW--1 zs3w7PYCn)ekDqr36INqLU19eMXFtCVCR{*XRx-Q=Ox+~JpQgxM6^L}8j740F9KbUY zR4R^&X(_31&I|!lF!KRiPkj1EIyqY+N;|K<76~L z#P5wy)<|^9gtL-kkJS2ya(=H?ruFbW+AQ<)W2FsA9ZS#opEikh9DS2$6znNw`)dHK+R~L?NergG$`GZ!tz1$As+$!9jQaV#a4@a7;wJyyWP! zq>5|6+-jkGHV^EkCgJ|fgVey*_W2+N3~5-y69p_Ixkq@81XBSSZwW4Lm`JjL1AhGc z7%Ik{A;+VXh<_+))uLQ@HbK_9bzSFij!o3GIJ>dpF+1n-xaw|4p8#!b?YOto91K~> z+Vl7mDpALkfmht9=1asSI`B~MMD)XH{&NwauTkyd&d36l+f~rl2%04YL{g|@znB|E z%t#nrVI6O?xb3jmUUZ?AJ8{%@lKXO3aT6w0F8^}4S+GkC)k=kqTaIf{s=3~EZ{b>O5(oo7nT764#6tpC#*!Q5{s$04!Ik+A_-n7fIk!^_%8iz1BU5*B zy%`}01D3_p_I`8mc&n?cCjHB1AYQ08JUK4B3Q%#=eMwn)%_~=K0Y$xO`5UH=^B!xB z6Qg{SB3yQkZ%1!6JK593P7mik`qUG@)aIUO6*fH_owejFaB(enTJX5=Ar@cBOgxpV zVHBV8B~d@xGCkNH?9oMK9{o-}yFahJLee?)ph%dhCaD8ALO|GU@@$x4&b#Fj$DGP~ z!FG&ca#CPs{3s8T-jx5w5I4VOrOrjo=clWaA_WEx|NIZj)DfL7>yu{v7nt&V^%b_k zf+%E1K98|LBvBIiU|H!^8W_KZemQJUiDvh5zwF zCOE@|jEI{aX3AO0iC+psj@fDPDib-LskC{WuB1!f1wwG}2;cNjfxO2!Js@Z+JZAeiR-l)&j zR6EO_dtzdmVEpA8m~a<^rXyOlE#b_&ky=m@16je*=bnvn6ZlkB0Uz~1bJg?EHa_} z<``s~p|W;(iM8Ck{CzmXDTa_`I(%mKOm_;Wa{n0&iH{((-Y|*TNe&5)s>qElSVmQHAJt7TDvn0 z>rItw;O~IAxC!m=Z!rK)nbL>Pnu7hpCy&``i2rEreKK*)VL2au`XpidSfg_f_)J@b zizNCt7r-gl6LKj{Clu2KpPcB8OFHYAbL>c8eVacX!e(=0E)%)W4=d5LphS%BQK9F+u?9mMfFj$4P~ z@y)3opXC|!#_|Llca;(sbh5mFXbd*)W`5GIj-G(S2%#BI4?p+uhI@$g-0|}%@?=6d z|Is~s+6?#bNoO&KxA}Y3?&mz!v>L ze{>Q^#>_f@2WzWkqPsxFdkhihv$2U?~Hfx{byAwYY54R zCu4eeOXu{4rr2nxl1m)a-|m29%jr}CR{3->{P+T@uL{2a$%!dBGibAjR|}6&*o#|a zB<83NyQzUB@C7uKn-s0^ov>->dVaBsWd%vDrE(rR^TTFN+Y#` zW9DJNR~JApuy~VLyP_Q7OT(Atm~v{s$n)5`;VSHWP_)m3-CSi}BK8CYDzdM`svO1| zf~5OmD4D2I*6@F}KEz#0ty?&xvGMA$&)kf%c@{@G!xX2g6xkm{OmSjBSqfwhi@pd&q)&|a1C`g9A6k zSf$|n!h83HH^&1JXPg0udx2x+{?Jh95aS3XZ~uFqmNBuOPXT9Zs{labr=bPl1oA z*H?8!0YigV?MsQR34W?$WVDcC5A49oIZPN>Nv@NC&+$hnQT0ZV)n1@XM66g0%PRRJ zLv%faKM_){!&+jy0>pR7FA!-M7m`toMJ$sc3u9GDE8mcTP$6&71I#d8sVl=^a6k-I zhgCl{8aePS;4mkM?6(qT&m4$3y>1aZXqdC*$=<@gqaHucGbZo!F7!{w zUti#^*eM|Ul>0AsNtfaY`4p({jQXUV5f&Tvbf%AzARl&fP73T5{X?U$1C7F#pn%Pm zMbw~c{7A~=C}|+)u-h?yoUbm({=bHb5y!#1=-5}J+Pn;*>mtXCVP(?JUzU(=${ub_ z^)Oz#r~jnMX6r8@Xt3!@!`h$vbQ}yL-r{KG>AOJ&6{sf&!XuN=mLnT0lLx|MdN6_$ zotb{bgWRY^h%wn1LJxp*Q4$29z1cJg1k>is>J;E!1a@atUh)Hzin_W%curVmBp@Kn zQ1z252rF4mdQ|j0*Xu1bQ(mZn)#UI%y4^@&yois)_Nwq z>Vh&&lGh3SrZm=gX0ys>NL+VLb^RkiA-3a#tJjwa>WN0cm8*M`S* zy|q-B&3AcYhVqRGIrd>RWRmGOa8Km^z9YypqxgM!z(U9IU~j9S*6(du#iZ))%&waz zoc5C7ooEN5A$7IC*yJJN`97DK>5msYYGVjd2p9)<2s`N2V&)NvfXWhq&M}(U9t9FlnM0;YZt>;;=%hCX)H)wzK+ zcsU899g7HYAAHBoC0d(+8(Ee6D&6U8GG1O@O&Es;+xP74TAIA`ky;}1CP&4>ZS zXifRWvW4nIEr|xGF}@a+sJl+;Ap448=N=kjA16y70M}9F^H6XGwL5$4z}P{ zurt5^5Z!q7(GngKcX+}Pukdt#ZdXyvVjZGilRPUNMcha_*qVIyjE~nUIET!`CxSqksRTZlY8Ipj(pu(q>_s0RRQU_^h1E<)PY{>O|zv zoQ8U+)&=DGK2>i`61ZW2T<#oK+l=UWOgpl&W^d56!$aE|E<}IrNL#R_AG`{OYOn6C zTQ5u7Np?@^L(Zigt%^%kmWj`9cOqByn68Nk}N~q%m##MZ9$#};kQw9XFT+~ zAHuw#TIJQB;#(>Kcyb123$UzNfO}SD-bRPbPoINZVj{@7h7V*m7_zv-^$B!zyda*` zEe6^wh~9$B{1r54m9>Byx(?XL5S|*62f1p!(}L3L574R7BVwKDVSd-v`S_*lK=^2I zvfu4q)=pJ+8kfCu*?&9H*&z^6?0!Z(ZtO8$$a0I=m} znCczpY+X<)Y&>+E)9}hM{v&^TmNm+eN$=%Q+>wiAMC)n!6I(sVjL@2zp)c5uHQR)P z;-4^ecy2DNV!5@FO)!X?;K0aE&ucFap9yG%7+~y0yc=l?4U%hIUazDeK70VRwO8-o zO+CGR_}moY8&rlia|m<1%;E=P&muTahM8fy)WPm-Z%fdR&p|3y3Wf`9W>Qkhbxk_Y ztID$LM83b%q-_!TiO<@RaUQ^hBzoosaL@$b=5}|!H=<-JkVa2X6lR0A@R^p40bV*+I1#nN@gaL%Xu8-aK5Za|FDmuonGn_Uhpu$pz~$C zGd@E?(UM(ZPH$$=Fx(A9M?F1E$|kRWK}~yjirg^b%!B~#zS|IQz%=sx(I}0UiX%wlWIu47jJ`0g@*+@ zmvp=6f|w>3WkWav7p?FLU0j$wL@?|m+0ark!HLsO(p0}E(VJxdBXbp?ePJw)Y1l)o zdFvc7TaCeT(=W#~V+?U20bEqh%lQ2ht2>jMTN$fcM>}7Y1MOiHI(apW2FPyY z9Ti}Ai8rH=O}yvPXoSq5ZwzuOzsX$0 zII+mQO{wbx#7B?&oBNsTy|h||D8(Gh5_C8_Rf^7UMfNvlt!Mp3zxL8cES-bA~+)qQ$;)Wl{bu;&hMM1Vkg( zs9L(kSi)hq7dRw+F6uS3f z=i0DC08Kw^d=e_@=?4pKzz6r;Kc|y`#IlCLD2xR5l#yxY&2#nEx`Vx=_j(=UX8!i< zOK$D9V2P*1`HHA>3pjt4(fYOc(dZA*IQA8$FC~p&a@ORvclKMmUsn>+wFO`biGqGp zpIu0NiI?}JwNf@nnuSj4$PnmMB>9ZA$0)EMslP~SqG=0yKUg}dLPyhBRRJV^x#IlvGNKVgWi zpD=_|N1~zi8u2-hQ^Qof_A9>MrFaFRDyA!sCruSVHXmScZX(4(dzoNs;vY`9b1exY z3}0)h7RV&t3$A$J$keIkxE|tMf2S_BQsJYz&}+0mrV|V{H>*E23E>gOQ$$o-=0Rp} zLC?6}!t%jRxK+;B9c@ib);I&*(p4ibukBhOCiCbD0B5xfU*{?-TJ_Avg=oanBNuN1 z7YGWzs4_*~-MXfZp;Ia$0uH#JNrW=a+YMD6i}esAOf9_}08XGTrhd`?{+Z5AY3~5p z9WXXXri+Ak^BAX2z!FwIZ1C z9}*y0QvmR~03;(@O6HJ`;r{>w3Hc>I18-cn>L-xw0U>CK)u(Lq4CR|E0w5JL-?S9O z>y6#@`vk!korU&eWLjnv~&VbNFwu`j%?a&Kae7)V>Wef+a zSbY&pehcRpox=^~tN_CZt4v4H<1lQGc8GSU^^rI?540ma_Og7O9u@-!;fm__xKq^? z%|V>dtE3x!UBd~AOj21=@^3tp8Sr%tqUG+>37&imv(H3AxDTNz8`DT~Y{=Ap##>+Q zT4t#74#x?If=SUoim*850`((<%?ymRzSu#$@Q%bQux;AmQhkKJ*MdM5^xsQ1l(`St z^u0g^32vG15v)R{s`gh|qdBM}WV8IZmgE}WjogCK94ro)r;Sfm?m(vjRxQIQG)$Ef zll#ScQ)r||93s;3kq1AndKtt2RInFd*r(t{Fe*<2JU5GC9_a-;@ z3RlK->g;#j3hnpgc!WX7Rjk1M?JCwf((dUqE(dBz@hMy><2-uMUVMPm-z9Y}>F#Rd zK)VTl9+Y^4rn{>x(0yfAL-(a9r?RBhw<2oh4KuPKycuG6SRADr-fKpW)@B#npci4} z7$7udz6r24wU_0qe8y*Pdg#F6dz2jJr#~y*h*EdTQQ6h#@5whVPoR-*3F|ZiuJg@T z0zuImWgWIT&}s`vZswrbi+LzT!&8NFJ>L^ojY@1jA#uK7%*~0-GnBc<{X9;YD~Z8m zqWA~Y=6SHMBmV~G6wN-T5%f;jvXgBTW#0>}p!(T(^NwiPVOliz#LsKuos)odf+D`k znp$7oIkN`X&giu}E%Zpd_jsZLOU*<0?^N;03YcmF%zh&m0@a8OeB;RHD3!=>BpjCl z6IGgCkqi*<)nCe9_7@0oJz2$#DUuO5n~vkuh{y_9Gjq~5ASXMImS^b(zIkbWpS$4bTffHhS0;WUk9WmF+b z_(}+L@`JCIz7=)+=x?FW(*V2}?D3(rGU9SR7#zo^r93JhVUo(WNZaIHTsg*unX zfe0IkGf-6T&$1ooc1Lf(Rh^lgg>H$xmm$n>oW`dW{FWIJ_#Pk%rP~lX{uxYmw&e2J z>8l;AXsBEPq`3>6El?|9sz9TglXIdvnSvD$ER|!#GO9{eN1&u~g(ytmvVUEs`2tPz zgvA|Tl)+KJg|+R?8v^?OiF|cL=7ltEL*=*5@Z-=m5g{hINZ{fDM$ZDMxgj?pq$}V& z-X!A8OhBv8R4o!=B+skM+q=wg%2;xTR|zin5C?Tjo_Lp+bBU0id3ouRxdoWy^1nDI z9UVbLVhQskg(93EC5Bo$mJk=3o8zkwD(%n@qb(mTcj_4uDbW_Xm*wU0cD=p`z(C#uw}G=GZ*7TID)fAO0BrLP(-<`Oc8Q_WtmJa@scv1^ z1<&EAelJnoLYpBUMi(=7O*KEV;!xdVbzGgOQ@3V5f)5SP@a6(AB|2=uhepPmO;Rhh z`0ssate3!tMq=i1W(jC0h}m0qiiy}g#;&XCjJ6@2gWm>ih+ft5v0WNjcE=}|Dj_9}sE+yY-!z=J^JRJ=>au8YIynzMI{?jcl zIe{EyU6@RZm%R&1@ckN=kUlAkJM7sLMYkXZM5AkLm2=1Zu3ORiI3+}8r2uf`tsNt- zS@~3i`0$;LDyEPnsT|j_EVCXau-)KRSNnV04fqjZ9Rj}8&Og3=DlF9QVQ*d-N`@VD z40<`4cn(X)=RzzvfuBPz%#9i*7>{FC2Avzo7PS_`j+8ptf*OMX~m?@6^? zdY!ry-i}~Zo6?QJgVeiU`~4s3wFJPnat8!iO9b!H{arqyq#Mbpp*WY<$qav?F#9Y6 ze^E+77w6BfClH^iwrEx!+vTl&BD!qWE5HN2!e9L1fA0&vgatWq6}bPE?*;A^#IP{b zGt!m+udoXx(~sf<#l0NavH3vID_BB-25RuN*A^oz_K~&8bgG(LXtKJ~SB1YTwf~+{ zQtUS8!9ZqEggb)BDMda)0!IogqV%L#_UN<6Xd;95oFT59y$3F6Q*mGnWk+$ey8vRU_~3Z>;}O2Z$epV1_hur!48?51%0*V(0k3SMcoF9wSXzm?k z6!-l2_)=NX<3cz7?@*`q6Bv4LOkjGTi_gYLDdiH81q;r|Fg#FZ;T4S`s^8;Azib}= zB|W={E(0WJBeZ32OAEE4)N?O!%mnH0RoSQ?QoM@w+#flp_9SQ@5geKf5id6X0wMZ0 zy-Q3}5bYOxyDM1`R{;Qtn5Q5KY2(X!n~0NfHB;>)dD)(+0mn?>pKM$Hs80M3JVhVl zBLck~m0xfVe&t5;*LpCe9>fYsZ#49P-zy|5vOT{aSUdk0--!3n-eI0XIPg_(X7>Ld zUo6D>VusaMlAUmI@bM|LUz9aw@_$QySE!ct#HAK7-1WuXSBaSV`J(;6`5uaxBODdr zD%ysqe8Ei&1fuTH3^(637_I|F?8a}3*k1|6e{C*Cb*J`rpt=+I*<7LNFoqOFSHPCW zP4$XzIKUo)Q*1U`x_ly5uP5J#FYFkkLm|nrKegjj#HLGwj~4}$trg(?;rrit|Cl~# zgdtrRP|d*vp(QofKWZKR75)AFjweFwN1p4PjXlg4fE5t{z@8`M3F6_NO^k0@u z5vn7trv1xxq@**}q7fDzau*om8nv7fA&Z149Ua|BA<`n$Tj?cRq< zet8jZT!w2BMsW!jcLi_R@CUdyi9uuvXs#WQJ~CQZjgjG;Q$&3g5YeVbsCxO zwY7!4G3)mwcGkNtO2n{d7UsCXTMnLIbHi00Fz4(Cs{YN-lG=5dBfs(Vb25M;>9{)6 z4Hwd~`)N0LEjo_g<2Za=%oR8%cN8*wJsrlufzoQI{3WLE9S{}sZw63{uz~XF+#fv! zr8NG|X%IOKWGzskuscDT+)(`W?`K)Bo^M;`D2?J?AN)k*k*+E3a@hBAdDGo?Xz$M) z5zj}8`CnDSj+W7B984p?=WFgE%iJPzp`RyZ( z{?a;2O|4Gn$7wHXfIt9ov0N>**{o%K%dH7# zJXU97ZapeqAZpBC*(`Ou;L2{pMjbKn3Y?ci_1CT*#xw}Oc36CKK)$p9HjH;6CjjU` z+mIHH5p=xJNYFHq#?6lK&o@_RIa-u-5~s9JUh@E*3G7giAc0}nsKcnb0JAGNsfq!& ziq%{7GqU}Ki5iwEvkPjk=UHbxCPt8<6^M6U3mzjY3uXglHQ=FScD~Y+Hr(Cf#(WF&@0XoKWNvj_}(WUj?X!)Dv z+V4Ae?GVJ`XduEhe$TaxnVjR^B2f!^O&viw_S#x) zO$4VVMv(oK1m+7Vxpish+7%+6Mt9k=^)K+~BRPUi?$Z!vtfqi4*UOG0h+h=3Cs?=* zpX&q)s5fw?a;H_T6ui#kK^7f$V`~H}E}Y8XGI&XAT<-oHgmyo$=auVinv0340<6K< z)^-kJ!+YSJ7rCqc4v28B)sWJI*N+AAZFV?jv<~(oLf75~&CFO~nG3gJx!4kHlpupQ zG28|L!HvBt_K;TuUfpu3q*Z94L1J*+Q!?any&j?b#$&+d6$KNh;y!FgD({&P?^?y( ztB#|M+=FQT(Oyq<736RuAjhi@p=}wcN76PRIhwL3pL^o`*0-Vw2J?LX%GkI%$b#O6 zM{K^CE58z8W=JRhHfNT31H{6K;rqZ!83V7Ij%^Oc@{thT{;%r39VrZ>@=k4c+sQ@D zNlXX-mTkSJh)=+bLW-4Tp!}{R+od!K7s)o!<7k1dCt_4^9!Ux6#*i5 z^40a(IXHEX{S*lbS9K2KzFQE6?D@(3;l@Z^8txOi2%8g-VrmemcX)AS!LdN*WI3gO* z^zNGv=E{pOy0``0x9x>&8(5to*BilBpJ;MmMP_GaAfFj;;nDH!3iJRRoo7MbkO~aV zC!b)j#1KadoRmcEp8EJFmB6#PyP+-3*(%e-*p2`Wzvn(z7d)qqosd4eH!&OQ`)Y^m zIjgsoNV4oXt{)PTV%b$8aKz`ZslL?Ig~qz~@_g}a$!K5CEshX^sxsc+1PF~v&tbbN zVMX?>h4RQ5^HBYFh>Lvdz)Cg2vg|YY(jCBx2jG2hXy$wr(48e#X5&h5SY9f6cLBfqtd73YZe8IOD^^xX1<_-jw$v9zMet?U0 zbot_r*%zOn7V`rG<^Z0l)tSbZ?-0I;y-t2*L=^tS3OZlvdjqWA-GB)|>+pPe%KQ_^ zHh#7*(d9UNY}6+d{^m#|Yl0HH>U4)6L`{h6HpFB#GNxkYSK(FtQ8BVT-vd!ki?t+< z)Z-qDu%oej>u;E!0vt>mu#80(rXc0hZj{^21dw6CCwq(nzvrH#!aiuOrofJr82m4c z`_n|X1i#y?WE7=xA0gWNAf-6@G}K>}|6%euN}6VDMUp*EVFCoJ3bHXyY231%tlZ<= zuGgS$i$Hi6B5bRIi`(}iGdDHTx~{qUglb}A3qVXHt2$5*-N&njDe+Fxa@fZHEDjq4^b54S1v2PG4PYp=@Gs0J{{zLL0AanT!B0!{FBGZSJPC4 z!9+BUq<)<;TDBieJprM<+&2k@_nI9bV0{kEs{Kv`caEQJmnwof7qi zJW7lKG1$T6nB3=)?61%p?_t|idgB92fiU(fyww{i3!WU>ye%%EwEzMTYe+suaKvNb z{Pa|;p4}2{A;XCR^n?;n7yE__&<~JAwV9b2wR6*11?Qq^c^1FiWu)=4AwQ6J^~J-W z%#(8w(7(Aq;-)2??tT{Ma>i#2_*_f0&Gs~*bb>QLP*N;1e znPbeTw~tyRf1z%^W$q1dI;ccHaBZ~Nd$ZCeLpv9vUHiMWpL!FVdY6GJ_xH}vAC5Ld zpNZtE#9xm}3mpVG#5hOiczMNX(xxj>!fkMVr!5UUIQ;=!h^V@OO8O8fE+&JRrspTA z#9g!yt02#L;MIvc_5969#ScyWx%*+Q=|4z=SjN6kssPFE2DoD#J4%XYc4D!KBMov- zyL~he2_W|&uc?f?NSKSog?#MQPXJ7>F1=&K0^?SZ1lD9JKKc4m&d}8EP?Xf+6C399 z=N4A$)A*+Xr?W8K4y3I+=#nmy#%`WYVJQ-G0t{Cg@X=j1&ifbqfB@<)=#BfTq0iW= z?tpYv3_+EKyjI%IYQE2TIKeTi92Zk7;mO#&6+p8{IqUd!-31A@_2;wBLKLtFM`D?$ zlQmqV*60gu^?E(2&$=|WC|3WYfz>AMccpDOskBbSLD21c(H{V^L%2q{OB7IGe@<9{ zRyiL z{)gSLshFq#6P{=aRW{dYG22|gZ8T_)NO)KJ9K8bF@dR;jVXpADTT?gH1PhMW){hwb z!xOqu_1eqZ8SuCWd5ujB)fSqyz%DMzC;YtocVsb!h@+TeUIrF^hFQyUXT4H-+`SQ8 z-MyiAo{)+IswXD_cfJBC_q~f}*U{%IU%ws}eF@&6ZUYuW5kL;ddE!5Ve z*&YTq79eFml=@I8NpM?0$>8`A(|+!<2z0^ODej_Ca!%`|5`&qEw~+$%9ZTId*^YaR zLwMsoj8aaIJO#++{u&`YoKItG_3m2;A`E{f$Fdi8^6euVIWkmzp=?E_syI6qFbTN1 zmL3vJ6Jc=xyV0V{$x7nxXxsdfcS3jv35v}R0gc^@oZc?N0VD|zkL#A1&jy92_p0?vCRUa+!_O)4hBmF`;ql9k9Mpd<)sG~MPt4jW(nrVi z?Sx!!sW%4X7%!|*cn%rQB$Gc(IaKG>a&=LAY@fgl^#Onq0meDn-N zULzl2(WkOMOOeuTzB!+B0tP%R3`lU}eHIisQUqKVKVz2ln?O4UvN+nZMfCg$m=M$P zCeYms3Ho?}gHDil^9z?oSZtFh?vZ-`NU=M_KK_t7u;z$qo9$<>F(g$5!f|1Rm8PvJ zE+_%rc^b&(_3vQ_S=9P6MJFg*Z!m6ov@ky~sk_q@ zN4*YK3+4pl*J5@aWdYr?XzoBy;tteg!2xfb5@+CiZ%99wg90O)JPk{0KBUp@B;L6` z(fN91vV-&(*Yl0p#%%e0)UR~ZKLb~0rDCE4gm7_5*GP_Q6f8nR{=Kxws{5&bH!76% zX2Ven*La|rX}@LmvyQI>StEH&l>zjk)}IQqd+XtBUqFPX=_MsSlu?h;3#>GFDPHYanMve~WyV;u4*^6PDLHoLmE`oyw$bwfoN~CyO5crGv`-E>=FaF_cc8 zG%EJZb!U0ry|kF6EtWWh`4oQaY$x{NYe0^SJeOqO3`6eHNuRdC63or4+;yI-656g! z>~ZV&BaKW$9=GNtOIikwRylCE+$mI12P;3u*fTsWl;c@sC~9a`wBo6J`oVo1j$6o$ zg1cs$;p$V-%;@SlmCME;PNz;0AbgNw>9=tM&H6X*yGq@Ue+uIAlsMb&m0m3VCe%?b z+wf5_#e?pO;okjED|v*#BAvt!=0cCQW@>jBerJaS3SYoL+^ii^nL`8AaD#5~&7zuucDA<`Mmd&VuT{JKLU^>&^PpTwmwJ;_`1*3*#nCxpjDnwb`yHdt zkdnA;HxD7u!_GT*RLBrGDcy7GeFlUoTc@A8*~Q4%JALSLz!3L(1*u64XDL|mIVb<) z62*CDMXK|kEN1fI4IeRrq^AX&k=Iq#ZwFp~xkLYB(D5P&pvFKSw*Z((`g@<1hatSn zBL|%(5j#UNbr!0NccAz_Hr>5Z;(xwTiy6DR08fGR-Bh%{x0BZn`DKC{Q4T0BgptUS z3ck~+Xv!P?#ZO$6`~>X{-I@Zc9R1d z$2BFjDm#8xR*XN>MF`TMKE5s_E2xPF5YhLey;o>qsJh_traXe=o+gI8R(Ffy7&EiAwvc9d^W!CXGppshB)qSTH4=6P(z7&}`3gR2C{J*87i&qX z>&_ZY&yQ*-<^-<#cS>QDN@=2%_@6&tyKQQ{-7;*T-dtE*siCiX+#iF3&u*^~KYqR(rE3yokc_J=RL<@xT zU`PRhF z&3ps`B+T5AY6cHqq*JXNC5yvrkX6Ue_Qk6|7Rks)a)kB*r?L7qg;9-~SAZ(Q>)u`%&rP)ze5<`umZ-p%%*l}J3rp~&8`Q#py)TeSs@`n+}Z_7>=%Ux*hu_mSn zGCjf)sZRBX2~~ej){+gmnQvY3vu|8j>bmxUg<_06`XPa=OfU^OBPBU!x+LyhQ-Y!! z7pBX|A9U#zTQs@$t1?wRrJKI822+HVo^5wnRH*8;uA?$_cS#o-+6P`f3)R@P?Rlx< z2DLBjdp*czR{?oa?XL!Y5Rxqo@I@IIxp2e~8&LMaV~Gp`08Ok1P^tw9lsIvap8x(H z*rHe%BetV8)gD;IL$;b*Ahp(2vzRN9YsOVj^$<5w=mOHPAr0(t`uzOp%nO!m2*3pd zrI9a>uPw|%gxx7ino{Lv;u!o+rKN43`N1;iWiNhecyKN8;$~P!JD~3p;BrzJ7kx5H zq#>hMSj$f-P1o3ufVb*<=Q`;HnXJo<0}PDrQyD`sPayQ&^=y^pqHeRlMu^7BK#=lE zn`mWbx|703%zpn)&lTPIsRK>A(TRg{>WK#p$L8K<%RM$1qKr1T>hL&MckGmsaQrl~&rG zja-s>x>Y=$n_e7H)vUT*e3jtQl}Gs5SC(VuxmaRMb+7WFc;51PSd(+MboMK8#Xn!| z7^#|+%ev^zZ$(HpCFkc_YF1wQAyeK5==Rfz=UOI{9o5R~T78OUzf~IW-5;;}{xLF) zyfdb|Wr^aGg0%`?g8Wvac$m{WIB-ihU$*zKklpKfam7C1M2SrMI9J0y#_88w&xYI!>x~(noM4k*`wBbt9N2PwoI#{ z&T+r>p&QlH>f|oT5FZUXa6VbRxYBs2Y=T*Vlxce`6y8P^>z3I4 zy0dB36+z>wx`wPM9?}cuI4qI4oV6M0RLsOpM=-_lzj{r{AM75=e|{;AA?6H(3>%R; zvMf`80QVW(3xd&h2v~R#PR9c;dd+A~OHUNy0pw&;E{>+^x+@slt84un;CrU7D{SW) zhO>NtyKnVPrf3Fl$-)-ogc~E+oi)sD2A|6=jm&p#0HR0W=~cDl=bFZkV2(FWvh7R} zgYk5!q8i>vo~n8~fj*GLM4G>NX}^m3fw(?JsR}_WZxN~du%Lm?I*T->g*YotJM#_j z)7HKw*_-P&g!e$>Z=kz>XfUqRhjv)JZm7IRk#&id1WSHc5gSrGd_~(paA*M9jm$cyFmwtYy-mS|tW`sp z-FbUPP8_s{wtxoSJpHiS!>qW&;2MvXGxs`yEif84u&pAnU z!B}3P{qzlOHnQFygv0)f2PbmgZ*Q$#JQvCkd7X7+{1m5*I&bA-?f5BTK|B7a*IB}G zu!Bj)4dr=f(UCfoq^c|Ka>3G#u=%j0KJ+;y8b~+vW8PmQHajrJJHm$9J6n%Li9Mm` z|7-89!>UZTH{cOF5yS$C0hA6&X;h?ZKtNiM4bn>+u{BoEgvWeCPY)`@^{|ui;YmyWjVDpJ&Cr?sc!Gdvbq?hO%JS*kP7Vh`B%J zsDT3Sd^t1kI7{YK%arSzy6JM$O{Znd84u8iN6}xL&hb*Z=Gl6XlM&-%)RDs~$IX%` z#IKsSkk2XzR`-*p+~##g^R#KA=gKJULw-|dY!tlvvfuuz49_2b;yY~BO3rvlRi}{?Q?d)bfUbCsT=#}18poYWw2pKV?*J8NLd+2lF2bAa3otjAy1px7<_BtKe zYn*O({uXewC-@HppSX^<5ju@Khnu%7ik|PI?7RvsA@YSC!(e4lbC{1tW9LUmr{(!O zFF(W9`IKX}Zi!~SCrh+t#|3xxR_>1w@!pdT9KYb0bglk|YEwj*mrgeus~~|rJm>cr3dodW+itT)HM{Vq9l9^j_ZaFA>=Vg`o`t13tpH?o=gCjI zbvPZL1RcB&x8G!AB14AXe7V0To4v*kP&|LG;GO0+bI_@qRUwkc$-xOaF;#qRte+GP z8}8(skmhcCCt2-QaDZ2L{~R>R*<7cx8w45$i}`5t-qig+O5;AyF{Z&cb+?Jr!04-d13I&2CK%5plfwJ zgNhx}I)d*T!;6uhsTcF|1gb%TO_x)oZk-bp3FGUy6p^ss^3XLW-cBf7>n@X;$PtM! zGQy-IWrF{0{q32UIYYhZoyGEPHrGy^i!#{>uTtyPQhm~85*$Xj3B^Ye6hqZm<&iUy zha&R4nlhj#h6kHlOmRmVt6XJqg7Ir0MGGGt6cAoF*Pt23^zjr)8R`tHRg@gd$XLiR7 zsoyJ3KQKL3X5;S?G0MrbTcJ2kdmkdt!%=<^Pu4T#u-ggu2r;=J4^hj=b71D1ivUFo ze!p~nyMu(G@zI`rZ?^9_vR~*O15IkO6UZA1zKUz0Ns8bxRk2At6T1Z^92t|~EV^(N zlUS4@*!iqfJY*P5kyI9>H&VYB-jjx8=IHe(BXKPc zMp}rNJbn|oyp>yQE-Qg>8WNPYAf+go>EDoI$g^R|+XS&*!WjXFfqdBPuCJGN^A&*$ z;7-ghrM=e>qISx*ud>;kr^5utIvID`|GrTNz^ifZG9(4^JBFwieZp>3bXV8w~izycLM!`v{@SK_q<)uM*tuaR9h#cpD*oiXmmat z9J{p`OOXvm%c!#6$Ht{zr$fz8Y8mmgpa}v>CSgrU`Ae|B7DCOFV9KJF<6fNO44Tf1 zYI-5MC3L%xRHeTZm4Nd^Pa12gy*$qg(gf*aiMB%%X|p3Y1O1&5_rUSeise#pd^KKa z>Mf&D3@hW&2iK0>l{-!3PjxHmIqxo&Ljl*eyd{3Pb*F^Ok!uf|7!@^j?jE>jM1Kv^ zCYKn=YRyCXM`2VJ1`h*$N@&X4tZLP{o(pWh$C zqe^s>G-W|2{K{Oc?p4DNnf@0XM?UB|g>IDSOu0oY1axFO!is2OGt6^wqnzJJLQ9Rn|0&Rs6{wQY5(1 z`iJQvn62r@PqF2gD&3J5jGgtQ$UHX!!|org#1mnNwvf|v0s#Qn3rnaJon9l_c2iqI z_6xar0*SLP>eGooX()K>n-+j|UGvRq*S6zO$u0*^UHF=4cTvH!#z*aD!8yQ-ho;O{ zh{Rb0bpRjngleY!%V3L<2y;#KkC1wcL6-FyuQZ?TU!PZdW_RyUquo96;gh6YwN$Hf zEH;Pf?(m2z8&Fkvj6W%K8hicPdowa;tEg6v2toyzL@3RMS>+O7Yj?11u9cCw2tC+{ zp#m?M+LM(0ZG2L$S&GIRfIwDkp|n{{v3X~eyL+eEZQmdN7) z>&Hhlj9d4}?vc^i^)I}|?ZVxo0dpf(OMWaKZ-m_DzOWZIvWK)D)X=#Ky~{!Skj~do zQl1rLH?q~t4UEc5QAKUD?0Lg!j}?~$c{~)fx?i~{r_U+4E`8?TcC4i$6>mpOp7WG+ zU@wt+Ww+Ohuy{m0n%vRrHlhXW3M z4$jZK9e^>ljE>4^67zH4k%!cFoRfSq8a*Oq+8{>$O7p@|gq6-f)(VfOijWtq2F z3X|l)oVM$Ax9%dqn5qp@Es(&uv{6W5%A#dx<~VN-M=*5bCCyO;;O4ig)VaElCMQ>$8_) zZPBt^juRigjI=`er-Q5{fBeu7w_!;S@BsFjDf9aET>BlyaPi2u*ix@`YrXbw|IH5> z!Cv8=Gk0TKz8#bQmlq^c5?8byH#C&^+q?b0-_gi%h@J93oAUjS*Li>7LA~~?$45vu z?#KW8jr{BT#a(xTUrTlh-TOaYXM9V~)FF-&k`n*nt^bF|uGJ&5acN_DVtn?0yN|B_SxVK{<7D8ZJ%G;XQM&caD4vLKmFQ1zqZeR z+9N;NP`|d%ukG_|`}`%~{}np_#AfWL`4u|<{LK7+w|%B8lL?Q1NW3oFBF#Bb=;ni# z&ZAJF0->gUXaV4hOdW#s42iartZP|`)Aw#puUaBE}MR%DU2X>g6TxigmewD z653y0Md%ldfRYR5NO?^}VfFG8F!GS-x_Wri^_Tpc6t4B`>rfllq7SU=4jTYO zHZHDb1FHNteAp%BHDINSNTC)=XB9PQn)0ms5-&V13_@V-68~XA{u)zfd-3+p_$9Zj zTaH>|ls2+P#cWI)N9gv4vcwej?YF{CYDC4quZ(}1+9YRCjo!6!i=%Tqm zg&a+*U`K=*YgA0yr& z5^|JtKS$aD(9)~-hqNs+K1k}0E~5?-v=~TN7RF1%AS?t2LZ?f8Z>LT(-4>9^(3Lx5 zU5NqVvIG;!)LK^{iitsCS`|1)BoGa3gx(f2odWg^&L$ z^=b_(@E#>Wgr-L~I_>VGc0*`TZN4d7){<_5MBY~T2_qoss%PJ*h6iHWbIGSwTi|hX zK%z3XSmqqN6_%}nFAva1XjytZg#)S-uSzW_`*hny+Ypn^z31@oR~%r&F5RNAf=zt= z+T|DDZ)V*_gB>DHvHOCmRrg0eOEb7@$&MiYw3zlFP&EGb9=)l42i!&KMj6e{@2_h> zh6or``Yks)AD``!*+;=Q46kN**o~H-BcfJ2S9nH!#6qlfmc|=|*!gCRs`s3laa)1@ z-P9K2Ft*GF6=*C)e__buz5%goF2 z{?qNl0Z}-V64P$<#UVd}x7Pt{VcUmM$PuU>J+lUb&~VVi@>~CTYyDxe-}!2~Nn0)H z{<8fnR15d=HAl%(#O}BuPCsOP%+RMFiq0N@U&{_6%}*FnWe(Uf^u-yewDi0M)kok# z968#DDhZ@=tS5n%c!D@e*II)CS|M%Q)QVqiT;ug$ETRkNUFrVC)5X>bIW*pv8#xo0 zvSmBnzb|eBQ*CVZkS<6Qt!66Ki7%+|wVK;*cK8&2 zaSI5;;g+7yLc^XL#@rlDvVeWga0zMOAD&RhrHtET zSA^k~jszI*D+{&ZhQX5GZ)tYd)JW&()J7OJbH_2;%9H*&__%yE^P^As5)UFfKCu~C zWNZ_GW;+57Hk-vKB87F-rw)cTjN7F8%cpnQ?KTxK_UO6ArNH%%_=9o%c2kycQ}+(} zQ}I)omy56)ZCiuAYx$6!JrQu?`@ZxNS0cULn0b#qcfhr4QBR3HBbD!uk5KNa>9Pa~ zJG2VSJKaXRlVwdB|B6Jf9Y!)u*irt^RBxnesvMWvg=6S z<)JYzKX><5>e{Wq4^ao;IfcBVnbuz*_e4saEK6cmt67M7EFk`JDIu~xPA9g%_w2v8zJlP5s|;;v2Q<$)4?#NirItxrpMjTCMB zO7zDc61;Smx|8SoBMU9M1zG4EVB?wz>yE$CvHNhYTWtXJ858nrz%?p9=KOt|5EMM2 z9o8DOX5tglWMf@kEbFnEw!G~Z7M&vrMH2yljy}GFHMNLzeCIuDqK@AEU4F>vysUsD zqCQxd=h8&H{;jTm3F1+b-~>$>;m_XQL82-hRX_iR^aKlq92dqR0ds{MDiJ*Cw==_| z4;6vQs*ZKxjkg}de&@@uMgqXJLu}#79@H5L{bg>?uzckWc+Vp&hn`8@2h-sLLvjQi zN86#7)HN>>xI3q&8Zl|b-Z9J8^pZkiJ9v145C*Fi|QPXi?&a4bOWO# zD&|s3d_DsqY_&pv<>CBCDXlT^P6e<_Jr?RCtEV(84BV9R2IJz9{=A9U%H1(7LOe_b zrs^o>KGc

5Ih#eD{CrtqC0cVlX!Qw9U&KsX5Kd?ZZDLIB#Azux!c|(;`L!jG)*zz)hGKa~xgoW7n zauP)pSWnx~J(;YeloTp^exi5nYo1V5FLX-<_pqbw|+9 zZ+4#rZiB{%NXbMeF#V~+G9{l5tBt$ffF{-aVcb5JlKa$M24SOS`?WuSjZCk|M|}lq zN!a1`+xEk~6s2mR(Ms<{>!5nxiZ~ug3Fp}t-C*DiA78ioATE|!_n_v>5LM7v?Yi{r zBaB+;2D4fg9%pkzE-O{ZUOr_w|LZ&3!=bP0P`THodUCehH+D*)8^-D&r31t3L$Z;< zc4AK#a7ylR3(o>G?za<~-T6fep*(M=q8YuvE3;sE#JZoqD#_-K>i1W;!9v4XN`kYL z6=R_wAS%>&c!fMa=GbP&ylX8j7>s#&V8LF8x$Fb;yDKD%LT#vI*OE!`8dAt1(m!J| z)?Ii?+Pk!w73+GBEvwmMab(gKbshr+HzIkA!0S(P7?z4k3inytQKE1w_rH@O3M5!zT#c7}Lm)DRdCaYX$o=z<^H@d7*6GJ6v zcRAMwrReaI$IeSSE=(YnIv6OMiS0+raK&#o~9J`JZ7IOvXpt(TF&F8*OFzQyX9dlBP9 zMVMSS&#n`uLrQq&kB{hbw|nB;rwQ_mkBlRjizG86%=DGIf!@CF2xnA zGB2kjHC+qWYU#Exk3OiUleB)Y;Z2bc5vK9HO&7Z3^_R^=abwRPDc;>Hj0incC!c9; zn+IFi69$o&3v`?GgZ#sxwniXcUmw5K>K69meoyYwC=NFM1$u)dz=n`ECR1?tOP;&}<^tgpK*#kTW2n1Iho1$%;FfTm9oVp`9>=O^nNB(|RQHb- z_da4!>V(h*oaX3D&W<+7pSnv86!i)zU850mmaq-Qf9;IA2YRJXFLllq1mi5)4SWS0 zfG5b|8Pbl1D^&MhG}XDKekD6fZEz~!8YL}j1i3APkHLni z&?*-b_P(i}@J_`!I#9h8nt13g2~3>nW?}bjADcr;39F#_(bI(hD*D#WF;5wD$ag+M!n2vsvNza?oMT6>42O*1+EYc{?k7w6X^da#7fgsfGcT| z|8D5*VZ^vgTP~5OQ^+1x`@{Lexx<0^%s8O99pJ_zqFGss6-N(s>&r#xM8SW;BE{{> zQwXuxmn_SF3MNuD$nXkI{Djw?hoUJ4lM%ek0jRn%OS|;vZt&WkYsWzIm)CPml~Y3r>2;Q{ zX4VA|wFZGq>|F7|yu}zLrtkKTkp&G530%Pm0I-s_*Tn@>8PD_Pv8htzz47nA1*zta zf5jf-*JF8mjgGVIY6^IC->kkVpxduegA-_mSux>TlFq-Fv}TBt0-q+?_WdpUZ>4qR zpMRRHq@waIEBU%}y!&>5R)lc=-AC;_Ga>R0x8#}Ip6wqq!0zRvQ#hF}FaipIHK)3L z2W^pc1j&&@Esg|th3<#V88Z-wRP{R)?~6x%Y+6*vp3f!)M$Y#v9cM{MIYYn83SgLd z)jtk!i-6(_Y8*^skv|zR6_!;oQ%k{2C&q2#$jwd3bGa5eu>x0m&&9l;3 z0>tS;kHH39NV(R&elY8Z1V6q>enH3Z&T%wtrg_pC`nMEj_iJw|F|gQ{2YNq6RPX&I za?4JHJ*O}#Pu+UeDB760JH?{hy+D)O{O(M+l&Ga!!Dof-gUh6|=ANbXHV59s3dtrg1a%>HF+Z@T-!Th6)fr&R zZs&r2U0>aMg_99uM$Di^lwVtY-ofdJ3_kVx6!cf%4Jt0fnGuqB*pI&Rp8nFG?*@Ve zeWmQc7M9yV=jWWfLIv^e>2QRRLkC7(irF z^U9FM)1aNUV5@yItLIblZ|m~S9DNHEyu_EGL~-D(dHK$B9IRvp3IfgM`mn)uW|&51 z3L^j!YtlUa?L*>9BoMKw(qh8bdA*E45U<)?Znf-!`k=QxAK_^P#^-Sdaa|+-bIc0N zK?Tf~*N-OWpjqda#5!g1lJ2ZaJvEZ$n1VEOUZB6<$#v3l9jjHyp5a%`F4E27P1djVDHWqH;I zZn?Yxkdc!=!;F*}Ar_N4mB7S6mY=BnfVcPJSVLMGZ*Lm08UG5}f@yD=*EXEX10Xt5 zk+GgV{(UD$C2pAkSI&#q;pUD8U8!QBTik&nvz%!Ypw}pNUS@zzVT470Ue?1emA?=A z=Q06J?pKO7r!|>-6uD%Sd!oGKalI>4O<)@gg{%Z?8i!o4EI+R<0HNyUNfR`D;p>{| zsv!+3ShM?79fh8&@|c!)Z?11I@nN(bgocUIhVx?WpcGsHae;=l>S~MAMXrdVv*>Ne zV|EkYn+z}E$4U=?AlhXga-U;1(gjt}T^^k58#*WHkGqde>*n~4EAS@~6@queZ@fb% zHGbDQNVLD4U}e!(OJeLl<`h#^K%xcoAJGod)b#3$5^zm%Ek zZ*hY|Ums^&Lg`p(Ex3T@uR$Fzke#r18k)1Oc!VOI*@85cDnJnf1R8PSoj?cI1Hfcl zeN3LA!{bO;x$%PU@Um51tpCQFF%=8vIJe^PLE%1g{eL!Nh8G;Wo}8A zSi`BWyxtTdq%IZ?;8(#^G)He6m=L=8@=n!Bm?MojL6Mi<_bkNpktiTm$LoFmSb@PE zLeL{2kBNSVRP|BAZghIb1u+=Z<7SQix>Z1i&zN4e^?7^11aN+ zYdZ&ifO@7$guS{2%{+`;j%HW{pi>8qInvF~P0O!HU0DFY&)(8+{(T~74A~{dFR7sc z+wzgIq@{nxkcy}O)LZ<`!Yk5DM+I*P1RnuM?%yh0#S0s!T| z6T1jW{|G{DBHBsdc_nOD+p8{|b^-a_x z+^D>v!RCU)VhiX4HrQC^WtACSD&~Rsw`F0H8BS6!xvfjyZd6O705F|mbroN9=K$p4 zy1DNYG}Cj3^soc5{i6Mwx9utQD$?QNjeggxw+I6O5e!n^=dC|Ej)xw3XNb2(pm!8X zM=Wi3P@Cgl!sxGmBQCckfho8-ktw@p+gwY^izh5-4Se=w}S z%s}pIkehP@G#q4;**f0e*m>ZyeFY%8iW*s0ogp!0`Z!Fh!JU57W=|T0tqaJ(>AcqD zx0K!+>hVyfZQ#zU)s;n%&@6#eb17DA*%V|au?4!_3u8Go&Rqo&bNNlo@d-ANEPHfQ zbiD|ebsa0wM;gKWh4WQNGYWKZu-tfH>#x75bw@r*75wYCXQEo0Lkomj;B2izFtc*^oh+2xtL6MsI z-+hivAQ_?5_lM-9xK;i323-o9lJ2;_pPx-kn0woIxbw%y^7e%oY>)KSeXccH+LKj@ zOd!i$N9kH)Yo(A=#~ttwsrlG^}>B?7sc0d zyP7lXby$}-686n?qMF-1@vaWouvlf2E}difdn_vx2jK5H)KhKH&`&dIv|1|?>apG~?7K_CwF;6EH&7POdA z&Ww)n-sO~{As_x0qepf zHK1_`#e93PWW_`IEHa`ibKU;5qR@j2>ikJ66w^Px!?l%wtOTCd&(>tDuC1w6a7b0j z)x3OqzB#P{wdc(Jua-cYXi2x3j1ps!Fj`vhuJkjOxR zWY7*H(uaKDWui1xNg&I#O2grTDqpOl|G7hiFK-q|9x&Y#fVCnyZermt;t!wdW`+F3@IYHA6SRe&aSq)7d7U7-86sF0(-{cI^iRhmDOVYs%-KQ24RYc|zb$`7|<&~A$LL`xZQNsOmsKKKO?iD(oUujtDnaBIT zcw*cXF!W)l*xYpz9asu#dfhcRf}W^jQtFe8npOZWXJpe#U*n|Y@Mv%LpPM^bM@P`(+Kx9Nr5P^U5ywyET)7&Q(zt$+Itn&y~2 zXThH=0*rMG29T|sSHg{lOFb7|Y^f#8bm@L6Sqf#a-CcBPcoLtt?7H_U6^VG`@YuAyqs>LGM}(@~-5$vAXoz7QnA{K?M$_CdFT% z{#M$sfs(Yrr0mF$g_joJ!_tA*(moy{JDthl-e;M}q@jXcXf=qpQz4ueJp|ydI{YCo zUwc-{U8RJP2hy9@J0cz>8uil%y;)i|jPI=On+oc%oPZo3CTFMdA_4+BxWu^v5N5KzVUDj6XP4-v>*P+*_4^luTP1KFeF@e@Yq7JwF^S3?=N=SWi%X z2Hye{qWN+v4Q}wEm5&)fm;{HjyWp_Qn-HTEsYiHtsHqSh4Kk&&Jr@CZsb~lg2?x?w z%K~%Y^lPZT)iKpLYZ*f)DB&GriF#${)dwf9N`H&t*6&61C>hG|s&|i1W@j}8@yp;F z(x*%n1lU5D@SDR3aY1L{N4vwgd=Vl6c$H4s?KF{B0MzAJ0h~C# za~_J%4q%v|Qm<~^Bv*|Jd%HPaU4al+-yIStBW4y#h%3?|{*mgl0Abq#sB@bzyMF$3 zn}m+*onMuOxtkM;FEK%yCv4}@h+=q=Cd~J)9HG#cYT%-~^T8Mdl+JaVG${Ap?C&J<^I4?qL(2D|xvHN?A-{jJX0Euw86@26f<2Z$SU*7tQR z{_*Ks3MEI*9A@8;Pam`L^y>0tcKWInNcpXx;1BS(l}dfZ z?$nNPq+BEX7Tgr{ls34z$Mk{4MNQt5Endj!K-+JGY5M_URJ^ZDfx@^0RGN&LE2t#z z4x^xN3Fn3SDJQ2B6OVFUe2o&vqwkOo7_n8Z0{s+{N|}E?Y8RH)>vFNe;X-~|e&l5H z{5!8aD9lN z{orbxD0@{1cRxs1`{~p}r9sR#E||LbR_^>>t#>^>Fu%dThw>1Gllnvs?0mY@f-{L# zJOgdnV>=nbAK-?c`pj#r7E4rv0U*yF6ah7D!(I;V+2*AgC=5>_ri6%YmYtfBDXKDo zTV4dM--3n4WaHey&UYwpuK$GEdjZ~{R}DGl2TqX6YoBAWVr%Sf`?oQ9ajEUaHO|7n zgC_j2p6kd#?cgo|%ox=1k9e}W&&kdxYM!@qUzz^z?hR<4?r>7uc*Mqvy0KRrMPAuB37+X#78WIX$i&h zDAIJg@h~TNSE`t^(PgABG2U*h=nj6g=BFM_2h3c>*#~=~u=v)80uDLK{ph62PT0Ry zAIy?yf_3WCW^VL@OvDem1jERUgN21TW&JppAm0WyF@HFJO}|>OeesYab2t~8e zBZV_`qZ3dITmlJ!YM!Ctjv=3sVajLjd7}LxQW&6qk2k!ZErtr}En8OmlI=5SUx6t{ zrNUQjV2%$8+4K)J{(UNqj=mNP?Dou+ki6_6Ep6ja-BSskXt!(5EErTcXy>QtqTSxu zJ<7Va+p~8=@Zg}b;RHpwZZXU&1&TyLb)r-@fTg8AAj!_ag2kzvv3}Ly3<{d7;AXT5 zZ}rr=lJyHA%n-@Kx`T(PP!v8wSHV+I{zQTKl502$<%spAbS2X(GwyEyGZR8m9)SvUz*vopYwX@}E4 zzu;mvcw^*kV%xL;HT3xF(H+8cck2)iL}c8@dz7%(VyHWwW7mR(_2%%aCyG*olu!G3 zWTAClQf9A7>&;L>6mV#8B_GIA$yNj81cHB^;{%Fo!7_g;gt z9kBCvpC2>>ClhbStV_Mq6qK^0@#i^8T$%xvatAW#458bkc=eCOj5=9_Iu19_7se?D zfec!3Gu8okW1~Ix=d26BEFiK%rmqd85124Ez`Xr4^PngYAfoNs8!I}EYo%~Z>_zYn zxSPUsJkrR5<>cl4dHoDa8##2AjN=V6}Dt{;K>{Stya`%hZI|GZg3>0q-6 zRA;tFv*Ce9>EvTMsJ{RZ#i1OJ=Cls=WQz*qW~QTr)_|zm5uz2*yfzv8^^`_LgIOa6I`dPWn9Dd zVg7<~EQH)KvSFiw_CQxCY4d|vURlnEPRh@B+0_&A#6V{}GovO0N6lAnz~(bD8!n|{ zC1C&@y9CUFOjvV|aNQgltlPl&&x^DR`48pOriGbVqn{JMo~D)y4aC}6ZG(YuK|r>t zu9~a?|GB!rL(G0OxQ=`3-N~NPY8H?A7mAVRn+EoDrH{W*^AeWVaIyXQBS7a7VxMUe zB+1?jayBjyjFw!ps*{OH0Zz~=fZ@(?_)zV$4);QafmZU+I2e=1In3@afmisPn2vDL z1$G@1Vhs?|PqrY(s;e4}=7^fuV9ez&Ce@ils~UKKMnwT+rKy4AK*`g@Y2rj@E+EHz z>3S1_*FnHBzVnnVpE%bdCn`0C@O3!71ZoT+ti~~e28E5I6W09_c z5veiBZF6cy)K)Z%=i_x^MNjcM%Ri4%=Yp|Bh7eWgj}@EzF7gb3*p}Z^;B_L!*uIc8VgU?Kq;wA##w;On?Ke<$>VO-pJTG*^HJCo}v z$WoA#etTo5|26>_t>ENo_S{O6)PnVXZ<|sGS1(w9Yk3n0*LBQr(QzguH852Gx&{!T z1I!EAz2pkkba~dwp2dXoli;{e-vd-2{H7E4YXe=EC9@L0U9qR7*bd}gDROW?#UPmaxgFL=L-;f6Z{~Oiv|*~Coqb^rVZF=Wa^v( z1ZoGWUmm`%UP=45-VSFFfU+NfL9@0Z;Gg*HKOqbTS1NkrY=GbQb1dKC7d-d7#}&Uwvep`Uhqv>v{S5Vh^i&HB z*H(`MJap@`7ErZN}5JEh2#( zZABObEzN^lXV(<~zvn*Ryiyb;bUI|6C9B%VRQyZEge8&gpPDhPWSf%x>qY*%zj?h1 zWw)U})}zhe?&iO_Q0)bf^uSIxf=KW`C3ye0%L?Q_`Cl(VgoQ_HyP%)Hp2_jwJj}Nw z3Kr(*uZ3A}&i=iMzZT{n_UNyL`L!_nX});2W?v%P=FPYxivIce{r3mm6GVIhx7Ufb z@MK-%{Ifz1BVL$SLkQXcUH!wteX~CUQ|13R1NzVQ*E_*W(Ygt5_s_raAMWAbmOwC{ z>zjrAc6WbcBYyf%G%Qi|Vbi=3|KlThDJJjvZ@>CSM^L(JV9MBCVGTL^>3#qB;qSj3 zRnM~dfAf4qThfRG^ELJB*OG_7`2GLe7yQ~PfAgAtZI!>8b8)_Y})EO920bchhbBl^=H2Yt=2Cx_A*xV$)xwDK~tHciXN8ml>_00xJsopI 0: + momentum_update = make_sparse(grad_values.pow(2).mean(dim=1), True) + state['momentum'].add_(momentum_update) # update momentum + std = state['momentum'].sparse_mask(momentum_update.coalesce()) + std_values = std._values().sqrt_().add_(group['eps']) + p.data.add_(make_sparse(grad_values / std_values.view(std_values.size()[0], 1), False), alpha=-clr) + + else: + state['sum'].addcmul_(grad, grad, value=1.0) + std = state['sum'].sqrt().add_(group['eps']) + p.data.addcdiv_(grad, std, value=-clr) + + self.momentum_initialized = True + + return loss diff --git a/benchmarks/dlrm/ootb/requirements.txt b/benchmarks/dlrm/ootb/requirements.txt new file mode 100644 index 0000000..b198a12 --- /dev/null +++ b/benchmarks/dlrm/ootb/requirements.txt @@ -0,0 +1,8 @@ +future +numpy +onnx +pydot +torch +torchviz +scikit-learn +tqdm diff --git a/benchmarks/dlrm/ootb/test/dlrm_s_test.sh b/benchmarks/dlrm/ootb/test/dlrm_s_test.sh new file mode 100755 index 0000000..e504545 --- /dev/null +++ b/benchmarks/dlrm/ootb/test/dlrm_s_test.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +#WARNING: must have compiled PyTorch and caffe2 + +#check if extra argument is passed to the test +if [[ $# == 1 ]]; then + dlrm_extra_option=$1 +else + dlrm_extra_option="" +fi +#echo $dlrm_extra_option + +dlrm_py="python dlrm_s_pytorch.py" +dlrm_c2="python dlrm_s_caffe2.py" + +echo "Running commands ..." +#run pytorch +echo $dlrm_py +$dlrm_py --mini-batch-size=1 --data-size=1 --nepochs=1 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ppp1 +$dlrm_py --mini-batch-size=2 --data-size=4 --nepochs=1 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ppp2 +$dlrm_py --mini-batch-size=2 --data-size=5 --nepochs=1 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ppp3 +$dlrm_py --mini-batch-size=2 --data-size=5 --nepochs=3 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ppp4 + +#run caffe2 +echo $dlrm_c2 +$dlrm_c2 --mini-batch-size=1 --data-size=1 --nepochs=1 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ccc1 +$dlrm_c2 --mini-batch-size=2 --data-size=4 --nepochs=1 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ccc2 +$dlrm_c2 --mini-batch-size=2 --data-size=5 --nepochs=1 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ccc3 +$dlrm_c2 --mini-batch-size=2 --data-size=5 --nepochs=3 --arch-interaction-op=dot --learning-rate=0.1 --debug-mode $dlrm_extra_option > ccc4 + +echo "Checking results ..." +#check results +#WARNING: correct test will have no difference in numeric values +#(but might have some verbal difference, e.g. due to warnnings) +#in the output file +echo "diff test1 (no numeric values in the output = SUCCESS)" +diff ccc1 ppp1 +echo "diff test2 (no numeric values in the output = SUCCESS)" +diff ccc2 ppp2 +echo "diff test3 (no numeric values in the output = SUCCESS)" +diff ccc3 ppp3 +echo "diff test4 (no numeric values in the output = SUCCESS)" +diff ccc4 ppp4 diff --git a/benchmarks/dlrm/ootb/test/dlrm_s_test_fbgemm_gpu.sh b/benchmarks/dlrm/ootb/test/dlrm_s_test_fbgemm_gpu.sh new file mode 100644 index 0000000..c699043 --- /dev/null +++ b/benchmarks/dlrm/ootb/test/dlrm_s_test_fbgemm_gpu.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +#WARNING: must have fbgemm_gpu module to run these tests. + +echo -e "\nConsistency test: fbgemm_gpu -compared-with- PyTorch emb ops" +dlrm_base_config_="python dlrm_s_pytorch.py --arch-sparse-feature-size=172 --arch-mlp-bot=1559-2500-2500-172 --arch-mlp-top=2000-2000-2000-1 --arch-embedding-size=213728-213728-213728-213728-213728-213728-213728-213728 --mini-batch-size=64 --num-indices-per-lookup-fixed=1 --num-indices-per-lookup=16 --num-batches=1 --nepochs=3 --debug-mode" + +for weighted_pooling in '' ' --weighted-pooling=fixed' ' --weighted-pooling=learned'; +do + dlrm_base_config=$dlrm_base_config_$weighted_pooling + + echo -e "\n======================================================" + echo "Testing 32-bit embeddings" + + dlrm_config="$dlrm_base_config" + echo "---GROUND TRUTH--- using PyTorch emb ops on CPU" + echo "$dlrm_config" + $dlrm_config > aaa1 + echo "---COMPARISON--- using fbgemm_gpu on CPU" + echo "$dlrm_config --use-fbgemm-gpu" + $dlrm_config --use-fbgemm-gpu > aaa2 + echo "diff GT & COMP (no numeric values in the output = SUCCESS)" + diff aaa1 aaa2 + + echo "---GROUND TRUTH--- using PyTorch emb ops on GPU" + echo "$dlrm_config --use-gpu" + $dlrm_config --use-gpu > bbb1 + echo "---COMPARISON--- using fbgemm_gpu on GPU" + echo "$dlrm_config --use-gpu --use-fbgemm-gpu" + $dlrm_config --use-fbgemm-gpu --use-gpu > bbb2 + echo "diff GT & COMP (no numeric values in the output = SUCCESS)" + diff bbb1 bbb2 + + echo -e "\n======================================================" + echo "Testing 8-bit quantized embeddings, inference only" + dlrm_config="$dlrm_base_config --inference-only --quantize-emb-with-bit=8" + + echo "---GROUND TRUTH--- using PyTorch emb ops on CPU" + echo "$dlrm_config" + $dlrm_config > ccc1 + + echo "---COMPARISON--- using fbgemm_gpu on CPU" + echo "$dlrm_config --use-fbgemm-gpu" + $dlrm_config --use-fbgemm-gpu > ccc2 + echo "diff GT & COMP (no numeric values in the output = SUCCESS)" + diff ccc1 ccc2 +done diff --git a/benchmarks/dlrm/ootb/tools/visualize.py b/benchmarks/dlrm/ootb/tools/visualize.py new file mode 100755 index 0000000..f16504c --- /dev/null +++ b/benchmarks/dlrm/ootb/tools/visualize.py @@ -0,0 +1,1030 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# +# This script performs the visualization of the embedding tables created in +# DLRM during the training procedure. We use two popular techniques for +# visualization: umap (https://umap-learn.readthedocs.io/en/latest/) and +# tsne (https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html). +# These links also provide instructions on how to install these packages +# in different environments. +# +# Warning: the size of the data to be visualized depends on the RAM on your machine. +# +# +# Connand line examples: +# +# Full analysis of embeddings and data representations for Criteo Kaggle data: +# $python ./tools/visualize.py --data-set=kaggle --load-model=../dlrm-2020-05-25/criteo.pytorch-e-0-i-110591 +# --raw-data-file=../../criteo/input/train.txt --skip-categorical-analysis +# --processed-data-file=../../criteo/input/kaggleAdDisplayChallenge_processed.npz +# +# +# To run just the analysis of categoricala data for Criteo Kaggle data set: +# $python ./tools/visualize.py --data-set=kaggle --load-model=../dlrm-2020-05-25/criteo.pytorch-e-0-i-110591 \ +# --raw-data-file=../../criteo/input/train.txt --data-randomize=none --processed-data-file=../../criteo/input/kaggleAdDisplayChallenge_processed.npz \ +# --skip-embedding --skip-data-plots +# +# +# The following command line arguments are available to the user: +# +# --load-model - DLRM model file +# --data-set - one of ["kaggle", "terabyte"] +# --max-ind-range - max index range used during the traning +# --output-dir - output directory, if not specified, it will be traeted from the model and datset names +# --max-umap-size - max number of points to visualize using UMAP, default=50000 +# --use-tsne - use T-SNE +# --max-tsne-size - max number of points to visualize using T-SNE, default=1000) +# --skip-embedding - skips analysis of embedding tables +# --umap-metric - metric for UMAP +# --skip-data-plots - skips data plots +# --skip-categorical-analysis - skips categorical analysis +# +# # data file related +# --raw-data-file +# --processed-data-file +# --data-sub-sample-rate +# --data-randomize +# --memory-map +# --mini-batch-size +# --num-workers +# --test-mini-batch-size +# --test-num-workers +# --num-batches +# --mlperf-logging + +import os +import sys +import argparse +import numpy as np +import umap +import hdbscan +import json +import torch +import math +import matplotlib +import matplotlib.pyplot as plt +import collections + +from sklearn.metrics import accuracy_score +from sklearn.metrics import f1_score +from sklearn.metrics import precision_score +from sklearn.metrics import recall_score + +from sklearn import manifold + +import dlrm_data_pytorch as dp +from dlrm_s_pytorch import DLRM_Net + + +def visualize_embeddings_umap(emb_l, + output_dir = "", + max_size = 500000, + umap_metric = "euclidean", + cat_counts = None, + use_max_count = True): + + for k in range(0, len(emb_l)): + + E = emb_l[k].weight.detach().cpu().numpy() + print("umap", E.shape) + + # create histogram of norms + bins = 50 + norms = [np.linalg.norm(E[i], ord=2) for i in range(0,E.shape[0])] +# plt.hist(norms, bins = bins) +# plt.title("Cat norm hist var. "+str(k)) + hist, bins = np.histogram(norms, bins=bins) + logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins)) + + plt.figure(figsize=(8,8)) + plt.title("Categorical norms: " + str(k) + " cardinality " + str(len(cat_counts[k]))) + plt.hist(norms, bins=logbins) + plt.xscale("log") +# plt.legend() + plt.savefig(output_dir+"/cat-norm-histogram-"+str(k)+".png") + plt.close() + + if E.shape[0] < 20: + print("Skipping small embedding") + continue + + n_vis = min(max_size, E.shape[0]) + min_cnt = 0 + +# reducer = umap.UMAP(random_state=42, n_neighbors=25, min_dist=0.1) + reducer = umap.UMAP(random_state=42, metric=umap_metric) + + if use_max_count is False or n_vis == E.shape[0]: + Y = reducer.fit_transform(E[:n_vis,:]) + else: + + # select values with couns > 1 + done = False + min_cnt = 1 + while done == False: + el_cnt = (cat_counts[k] > min_cnt).sum() + if el_cnt <= max_size: + done = True + else: + min_cnt = min_cnt+1 + + E1= [] + for i in range(0, E.shape[0]): + if cat_counts[k][i] > min_cnt: + E1.append(E[i,:]) + + print("max_count_len", len(E1), "mincount", min_cnt) + Y = reducer.fit_transform(np.array(E1)) + + n_vis = len(E1) + + plt.figure(figsize=(8,8)) + + linewidth = 0 + size = 1 + + if Y.shape[0] < 2500: + linewidth = 1 + size = 5 + + if cat_counts is None: + plt.scatter(-Y[:,0], -Y[:,1], s=size, marker=".", linewidth=linewidth) + else: + #print(cat_counts[k]) + n_disp = min(len(cat_counts[k]), Y.shape[0]) + cur_max = math.log(max(cat_counts[k])) + norm_cat_count = [math.log(cat_counts[k][i]+1)/cur_max for i in range(0, len(cat_counts[k]))] + plt.scatter(-Y[0:n_disp,0], -Y[0:n_disp,1], s=size, marker=".", linewidth=linewidth, c=np.array(norm_cat_count)[0:n_disp], cmap="viridis") + plt.colorbar() + + plt.title("UMAP: categorical var. " + str(k) + " (" + str(n_vis) + " of " + str(E.shape[0]) + ", min count " + str(min_cnt) + ")") + plt.savefig(output_dir + "/cat-" + str(k) + "-" + str(n_vis) + "-of-" + str(E.shape[0]) + "-umap.png") + plt.close() + + +def visualize_embeddings_tsne(emb_l, + output_dir = "", + max_size = 10000): + + for k in range(0, len(emb_l)): + + E = emb_l[k].weight.detach().cpu() + print("tsne", E.shape) + + if E.shape[0] < 20: + print("Skipping small embedding") + continue + + n_vis = min(max_size, E.shape[0]) + + tsne = manifold.TSNE(init="pca", random_state=0, method="exact") + + Y = tsne.fit_transform(E[:n_vis,:]) + + plt.figure(figsize=(8, 8)) + + linewidth = 0 + if Y.shape[0] < 5000: + linewidth = 1 + + plt.scatter(-Y[:,0], -Y[:,1], s=1, marker=".", linewidth=linewidth) + + plt.title("TSNE: categorical var. " + str(k) + " (" + str(n_vis) + " of " + str(E.shape[0]) + ")") + plt.savefig(output_dir + "/cat-" + str(k) + "-" + str(n_vis) + "-of-" + str(E.shape[0]) + "-tsne.png") + plt.close() + + +def analyse_categorical_data(X_cat, n_days=10, output_dir=""): + + # analyse categorical variables + n_vec = len(X_cat) + n_cat = len(X_cat[0]) + n_days = n_days + + print("n_vec", n_vec, "n_cat", n_cat) +# for c in train_data.X_cat: +# print(n_cat, c) + + all_cat = np.array(X_cat) + print("all_cat.shape", all_cat.shape) + day_size = all_cat.shape[0]/n_days + + for i in range(0,n_cat): + l_d = [] + l_s1 = [] + l_s2 = [] + l_int = [] + l_rem = [] + + cat = all_cat[:,i] + print("cat", i, cat.shape) + for d in range(1,n_days): + offset = int(d*day_size) + #print(offset) + cat1 = cat[:offset] + cat2 = cat[offset:] + + s1 = set(cat1) + s2 = set(cat2) + + intersect = list(s1 & s2) + #print(intersect) + l_d.append(d) + l_s1.append(len(s1)) + l_s2.append(len(s2)) + l_int.append(len(intersect)) + l_rem.append((len(s1)-len(intersect))) + + print(d, ",", len(s1), ",", len(s2), ",", len(intersect), ",", (len(s1)-len(intersect))) + + print("spit", l_d) + print("before", l_s1) + print("after", l_s2) + print("inters.", l_int) + print("removed", l_rem) + + plt.figure(figsize=(8,8)) + plt.plot(l_d, l_s1, "g", label="before") + plt.plot(l_d, l_s2, "r", label="after") + plt.plot(l_d, l_int, "b", label="intersect") + plt.plot(l_d, l_rem, "y", label="removed") + plt.title("categorical var. "+str(i)) + plt.legend() + plt.savefig(output_dir+"/cat-"+str(i).zfill(3)+".png") + plt.close() + + +def analyse_categorical_counts(X_cat, emb_l=None, output_dir=""): + + # analyse categorical variables + n_vec = len(X_cat) + n_cat = len(X_cat[0]) + + print("n_vec", n_vec, "n_cat", n_cat) +# for c in train_data.X_cat: +# print(n_cat, c) + + all_cat = np.array(X_cat) + print("all_cat.shape", all_cat.shape) + + all_counts = [] + + for i in range(0,n_cat): + + cat = all_cat[:,i] + if emb_l is None: + s = set(cat) + counts = np.zeros((len(s))) + print("cat", i, cat.shape, len(s)) + else: + s = emb_l[i].weight.detach().cpu().shape[0] + counts = np.zeros((s)) + print("cat", i, cat.shape, s) + + for d in range(0,n_vec): + cv = int(cat[d]) + counts[cv] = counts[cv]+1 + + all_counts.append(counts) + + if emb_l is None: + plt.figure(figsize=(8,8)) + plt.plot(counts) + plt.title("Categorical var "+str(i) + " cardinality " + str(len(counts))) + # plt.legend() + else: + E = emb_l[i].weight.detach().cpu().numpy() + norms = [np.linalg.norm(E[i], ord=2) for i in range(0,E.shape[0])] + + fig, (ax0, ax1) = plt.subplots(2, 1) + fig.suptitle("Categorical variable: " + str(i)+" cardinality "+str(len(counts))) + + ax0.plot(counts) + ax0.set_yscale("log") + ax0.set_title("Counts", fontsize=10) + + ax1.plot(norms) + ax1.set_title("Norms", fontsize=10) + + plt.savefig(output_dir+"/cat_counts-"+str(i).zfill(3)+".png") + plt.close() + + return all_counts + + +def dlrm_output_wrap(dlrm, X, lS_o, lS_i, T): + + all_feat_vec = [] + all_cat_vec = [] + x_vec = None + t_out = None + c_out = None + z_out = [] + p_out = None + + z_size = len(dlrm.top_l) + + x = dlrm.apply_mlp(X, dlrm.bot_l) + # debug prints + #print("intermediate") + #print(x[0].detach().cpu().numpy()) + x_vec = x[0].detach().cpu().numpy() + all_feat_vec.append(x_vec) +# all_X.append(x[0].detach().cpu().numpy()) + + # process sparse features(using embeddings), resulting in a list of row vectors + ly = dlrm.apply_emb(lS_o, lS_i, dlrm.emb_l) + + for e in ly: + #print(e.detach().cpu().numpy()) + all_feat_vec.append(e[0].detach().cpu().numpy()) + all_cat_vec.append(e[0].detach().cpu().numpy()) + + all_feat_vec= np.concatenate(all_feat_vec, axis=0) + all_cat_vec= np.concatenate(all_cat_vec, axis=0) + +# all_features.append(all_feat_vec) +# all_cat.append(all_cat_vec) + t_out = int(T.detach().cpu().numpy()[0,0]) +# all_T.append(int(T.detach().cpu().numpy()[0,0])) + + z = dlrm.interact_features(x, ly) + # print(z.detach().cpu().numpy()) +# z_out = z.detach().cpu().numpy().flatten() + z_out.append(z.detach().cpu().numpy().flatten()) +# all_z[0].append(z.detach().cpu().numpy().flatten()) + + # obtain probability of a click (using top mlp) +# print(dlrm.top_l) +# p = dlrm.apply_mlp(z, dlrm.top_l) + + for i in range(0, z_size): + z = dlrm.top_l[i](z) + +# if i < z_size-1: +# curr_z = z.detach().cpu().numpy().flatten() + z_out.append(z.detach().cpu().numpy().flatten()) +# all_z[i+1].append(curr_z) +# print("z append", i) + +# print("z",i, z.detach().cpu().numpy().flatten().shape) + + p = z + + # clamp output if needed + if 0.0 < dlrm.loss_threshold and dlrm.loss_threshold < 1.0: + z = torch.clamp(p, min=dlrm.loss_threshold, max=(1.0 - dlrm.loss_threshold)) + else: + z = p + + class_thresh = 0.0 #-0.25 + zp = z.detach().cpu().numpy()[0,0]+ class_thresh + + p_out = int(zp+0.5) + if p_out > 1: + p_out = 1 + if p_out < 0: + p_out = 0 + +# all_pred.append(int(z.detach().cpu().numpy()[0,0]+0.5)) + + #print(int(z.detach().cpu().numpy()[0,0]+0.5)) + if int(p_out) == t_out: + c_out = 0 + else: + c_out = 1 + + return all_feat_vec, x_vec, all_cat_vec, t_out, c_out, z_out, p_out + + +def create_umap_data(dlrm, data_ld, max_size=50000, offset=0, info=""): + + all_features = [] + all_X = [] + all_cat = [] + all_T = [] + all_c = [] + all_z = [] + all_pred = [] + + z_size = len(dlrm.top_l) + print("z_size", z_size) + for i in range(0, z_size): + all_z.append([]) + + for j, (X, lS_o, lS_i, T) in enumerate(data_ld): + + if j < offset: + continue + + if j >= max_size+offset: + break + + af, x, cat, t, c, z, p = dlrm_output_wrap(dlrm, X, lS_o, lS_i, T) + + all_features.append(af) + all_X.append(x) + all_cat.append(cat) + all_T.append(t) + all_c.append(c) + all_pred.append(p) + + for i in range(0, z_size): + all_z[i].append(z[i]) + +# # calculate classifier metrics + ac = accuracy_score(all_T, all_pred) + f1 = f1_score(all_T, all_pred) + ps = precision_score(all_T, all_pred) + rc = recall_score(all_T, all_pred) + + print(info, "accuracy", ac, "f1", f1, "precision", ps, "recall", rc) + + return all_features, all_X, all_cat, all_T, all_z, all_c, all_pred + + +def plot_all_data_3(umap_Y, + umap_T, + train_Y = None, + train_T = None, + test_Y = None, + test_T = None, + total_train_size = "", + total_test_size = "", + info = "", + output_dir = "", + orig_space_dim = 0): + + size = 1 + colors = ["red","green"] + + fig, (ax0, ax1, ax2) = plt.subplots(1, 3) + fig.suptitle("UMAP: " + info + " space dim "+str(orig_space_dim)) + + ax0.scatter(umap_Y[:,0], umap_Y[:,1], s=size, c=umap_T, cmap=matplotlib.colors.ListedColormap(colors), marker=".", linewidth=0) + ax0.set_title("UMAP ("+str(len(umap_T))+" of "+ total_train_size+")", fontsize=7) + + if train_Y is not None and train_T is not None: + ax1.scatter(train_Y[:,0], train_Y[:,1], s=size, c=train_T, cmap=matplotlib.colors.ListedColormap(colors), marker=".", linewidth=0) + ax1.set_title("Train ("+str(len(train_T))+" of "+ total_train_size+")", fontsize=7) + + if test_Y is not None and test_T is not None: + ax2.scatter(test_Y[:,0], test_Y[:,1], s=size, c=test_T, cmap=matplotlib.colors.ListedColormap(colors), marker=".", linewidth=0) + ax2.set_title("Test ("+str(len(test_T))+" of "+ total_test_size+")", fontsize=7) + + plt.savefig(output_dir+"/"+info+"-umap.png") + plt.close() + + +def plot_one_class_3(umap_Y, + umap_T, + train_Y, + train_T, + test_Y, + test_T, + target = 0, + col = "red", + total_train_size = "", + total_test_size = "", + info = "", + output_dir = "", + orig_space_dim = 0): + + size = 1 + + fig, (ax0, ax1, ax2) = plt.subplots(1, 3) + fig.suptitle("UMAP: "+ info + " space dim "+str(orig_space_dim)) + + ind_l_umap = [i for i,x in enumerate(umap_T) if x == target] + Y_umap_l = np.array([umap_Y[i,:] for i in ind_l_umap]) + + ax0.scatter(Y_umap_l[:,0], Y_umap_l[:,1], s=size, c=col, marker=".", linewidth=0) + ax0.set_title("UMAP, ("+str(len(umap_T))+" of "+ total_train_size+")", fontsize=7) + + if train_Y is not None and train_T is not None: + ind_l_test = [i for i,x in enumerate(train_T) if x == target] + Y_test_l = np.array([train_Y[i,:] for i in ind_l_test]) + + ax1.scatter(Y_test_l[:,0], Y_test_l[:,1], s=size, c=col, marker=".", linewidth=0) + ax1.set_title("Train, ("+str(len(train_T))+" of "+ total_train_size+")", fontsize=7) + + if test_Y is not None and test_T is not None: + ind_l_test = [i for i,x in enumerate(test_T) if x == target] + Y_test_l = np.array([test_Y[i,:] for i in ind_l_test]) + + ax2.scatter(Y_test_l[:,0], Y_test_l[:,1], s=size, c=col, marker=".", linewidth=0) + ax2.set_title("Test, ("+str(len(test_T))+" of "+ total_test_size+")", fontsize=7) + + plt.savefig(output_dir+"/"+info+"-umap.png") + plt.close() + + +def visualize_umap_data(umap_Y, + umap_T, + umap_C, + umap_P, + train_Y, + train_T, + train_C, + train_P, + test_Y = None, + test_T = None, + test_C = None, + test_P = None, + total_train_size = "", + total_test_size = "", + info = "", + output_dir = "", + orig_space_dim = 0): + + # all classes + plot_all_data_3(umap_Y = umap_Y, + umap_T = umap_T, + train_Y = train_Y, + train_T = train_T, + test_Y = test_Y, + test_T = test_T, + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info, + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + # all predictions + plot_all_data_3(umap_Y = umap_Y, + umap_T = umap_P, + train_Y = train_Y, + train_T = train_P, + test_Y = test_Y, + test_T = test_P, + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info+", all-predictions", + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + + # class 0 + plot_one_class_3(umap_Y = umap_Y, + umap_T = umap_T, + train_Y = train_Y, + train_T = train_T, + test_Y = test_Y, + test_T = test_T, + target = 0, + col = "red", + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info+" class " + str(0), + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + # class 1 + plot_one_class_3(umap_Y = umap_Y, + umap_T = umap_T, + train_Y = train_Y, + train_T = train_T, + test_Y = test_Y, + test_T = test_T, + target = 1, + col = "green", + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info + " class " + str(1), + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + # correct classification + plot_one_class_3(umap_Y = umap_Y, + umap_T = umap_C, + train_Y = train_Y, + train_T = train_C, + test_Y = test_Y, + test_T = test_C, + target = 0, + col = "green", + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info + " correct ", + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + # errors + plot_one_class_3(umap_Y = umap_Y, + umap_T = umap_C, + train_Y = train_Y, + train_T = train_C, + test_Y = test_Y, + test_T = test_C, + target = 1, + col = "red", + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info + " errors ", + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + # prediction 0 + plot_one_class_3(umap_Y = umap_Y, + umap_T = umap_P, + train_Y = train_Y, + train_T = train_P, + test_Y = test_Y, + test_T = test_P, + target = 0, + col = "red", + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info + " predict-0 ", + output_dir = output_dir, + orig_space_dim = orig_space_dim) + + # prediction 1 + plot_one_class_3(umap_Y = umap_Y, + umap_T = umap_P, + train_Y = train_Y, + train_T = train_P, + test_Y = test_Y, + test_T = test_P, + target = 1, + col = "green", + total_train_size = total_train_size, + total_test_size = total_test_size, + info = info + " predict-1 ", + output_dir = output_dir, + orig_space_dim = orig_space_dim) + +def hdbscan_clustering(umap_data, train_data, test_data, info="", output_dir=""): + + clusterer = hdbscan.HDBSCAN(min_samples=10, min_cluster_size=500, prediction_data=True) + umap_labels = clusterer.fit_predict(umap_data) + train_labels, _ = hdbscan.approximate_predict(clusterer, train_data) + test_labels, _ = hdbscan.approximate_predict(clusterer, test_data) + + fig, ((ax00, ax01, ax02), (ax10, ax11, ax12)) = plt.subplots(2, 3) + fig.suptitle("HDBSCAN clastering: "+ info ) + + # plot umap data + umap_clustered = (umap_labels >= 0) + umap_coll = collections.Counter(umap_clustered) + print("umap_clustered", umap_coll) +# print("umap_data", umap_data.shape) +# print("~umap_clustered", umap_clustered.count(False), ~umap_clustered) + ax00.scatter(umap_data[~umap_clustered, 0], + umap_data[~umap_clustered, 1], + c=(0.5, 0.5, 0.5), + s=0.1, + alpha=0.5) + ax00.set_title("UMAP Outliers " + str(umap_coll[False]), fontsize=7) + ax10.scatter(umap_data[umap_clustered, 0], + umap_data[umap_clustered, 1], + c=umap_labels[umap_clustered], + s=0.1, + cmap="Spectral") + ax10.set_title("UMAP Inliers " + str(umap_coll[True]), fontsize=7) + + # plot train data + train_clustered = (train_labels >= 0) + train_coll = collections.Counter(train_clustered) + ax01.scatter(train_data[~train_clustered, 0], + train_data[~train_clustered, 1], + c=(0.5, 0.5, 0.5), + s=0.1, + alpha=0.5) + ax01.set_title("Train Outliers " + str(train_coll[False]), fontsize=7) + ax11.scatter(train_data[train_clustered, 0], + train_data[train_clustered, 1], + c=train_labels[train_clustered], + s=0.1, + cmap="Spectral") + ax11.set_title("Train Inliers " + str(train_coll[True]), fontsize=7) + + # plot test data + test_clustered = (test_labels >= 0) + test_coll = collections.Counter(test_clustered) + ax02.scatter(test_data[~test_clustered, 0], + test_data[~test_clustered, 1], + c=(0.5, 0.5, 0.5), + s=0.1, + alpha=0.5) + ax02.set_title("Tets Outliers " + str(test_coll[False]), fontsize=7) + ax12.scatter(test_data[test_clustered, 0], + test_data[test_clustered, 1], + c=test_labels[test_clustered], + s=0.1, + cmap="Spectral") + ax12.set_title("Test Inliers " + str(test_coll[True]), fontsize=7) + + plt.savefig(output_dir+"/"+info+"-hdbscan.png") + plt.close() + + +def visualize_all_data_umap(dlrm, + train_ld, + test_ld = None, + max_umap_size = 50000, + output_dir = "", + umap_metric = "euclidean"): + + data_ratio = 1 + + print("creating umap data") + umap_train_feat, umap_train_X, umap_train_cat, umap_train_T, umap_train_z, umap_train_c, umap_train_p = create_umap_data(dlrm=dlrm, data_ld=train_ld, max_size=max_umap_size, offset=0, info="umap") + + # transform train and test data + train_feat, train_X, train_cat, train_T, train_z, train_c, train_p = create_umap_data(dlrm=dlrm, data_ld=train_ld, max_size=max_umap_size*data_ratio, offset=max_umap_size, info="train") + test_feat, test_X, test_cat, test_T, test_z, test_c, test_p = create_umap_data(dlrm=dlrm, data_ld=test_ld, max_size=max_umap_size*data_ratio, offset=0, info="test") + + print("umap_train_feat", np.array(umap_train_feat).shape) + reducer_all_feat = umap.UMAP(random_state=42, metric=umap_metric) + umap_feat_Y = reducer_all_feat.fit_transform(umap_train_feat) + + train_feat_Y = reducer_all_feat.transform(train_feat) + test_feat_Y = reducer_all_feat.transform(test_feat) + + visualize_umap_data(umap_Y = umap_feat_Y, + umap_T = umap_train_T, + umap_C = umap_train_c, + umap_P = umap_train_p, + train_Y = train_feat_Y, + train_T = train_T, + train_C = train_c, + train_P = train_p, + test_Y = test_feat_Y, + test_T = test_T, + test_C = test_c, + test_P = test_p, + total_train_size = str(len(train_ld)), + total_test_size = str(len(test_ld)), + info = "all-features", + output_dir = output_dir, + orig_space_dim = np.array(umap_train_feat).shape[1]) + + hdbscan_clustering(umap_data = umap_feat_Y, + train_data = train_feat_Y, + test_data = test_feat_Y, + info = "umap-all-features", + output_dir = output_dir) + +# hdbscan_clustering(umap_data = np.array(umap_train_feat), +# train_data = np.array(train_feat), +# test_data = np.array(test_feat), +# info = "all-features", +# output_dir = output_dir) + + print("umap_train_X", np.array(umap_train_X).shape) + reducer_X = umap.UMAP(random_state=42, metric=umap_metric) + umap_X_Y = reducer_X.fit_transform(umap_train_X) + + train_X_Y = reducer_X.transform(train_X) + test_X_Y = reducer_X.transform(test_X) + + visualize_umap_data(umap_Y = umap_X_Y, + umap_T = umap_train_T, + umap_C = umap_train_c, + umap_P = umap_train_p, + train_Y = train_X_Y, + train_T = train_T, + train_C = train_c, + train_P = train_p, + test_Y = test_X_Y, + test_T = test_T, + test_C = test_c, + test_P = test_p, + total_train_size = str(len(train_ld)), + total_test_size = str(len(test_ld)), + info = "cont-features", + output_dir = output_dir, + orig_space_dim = np.array(umap_train_X).shape[1]) + + print("umap_train_cat", np.array(umap_train_cat).shape) + reducer_cat = umap.UMAP(random_state=42, metric=umap_metric) + umap_cat_Y = reducer_cat.fit_transform(umap_train_cat) + + train_cat_Y = reducer_cat.transform(train_cat) + test_cat_Y = reducer_cat.transform(test_cat) + + visualize_umap_data(umap_Y = umap_cat_Y, + umap_T = umap_train_T, + umap_C = umap_train_c, + umap_P = umap_train_p, + train_Y = train_cat_Y, + train_T = train_T, + train_C = train_c, + train_P = train_p, + test_Y = test_cat_Y, + test_T = test_T, + test_C = test_c, + test_P = test_p, + total_train_size = str(len(train_ld)), + total_test_size = str(len(test_ld)), + info = "cat-features", + output_dir = output_dir, + orig_space_dim = np.array(umap_train_cat).shape[1]) + + # UMAP for z data + for i in range(0,len(umap_train_z)): + print("z", i, np.array(umap_train_z[i]).shape) + reducer_z = umap.UMAP(random_state=42, metric=umap_metric) + umap_z_Y = reducer_z.fit_transform(umap_train_z[i]) + + train_z_Y = reducer_z.transform(train_z[i]) + test_z_Y = reducer_z.transform(test_z[i]) + + visualize_umap_data(umap_Y = umap_z_Y, + umap_T = umap_train_T, + umap_C = umap_train_c, + umap_P = umap_train_p, + train_Y = train_z_Y, + train_T = train_T, + train_C = train_c, + train_P = train_p, + test_Y = test_z_Y, + test_T = test_T, + test_C = test_c, + test_P = test_p, + total_train_size = str(len(train_ld)), + total_test_size = str(len(test_ld)), + info = "z-features-"+str(i), + output_dir = output_dir, + orig_space_dim = np.array(umap_train_z[i]).shape[1]) + + +def analyze_model_data(output_dir, + dlrm, + train_ld, + test_ld, + train_data, + skip_embedding = False, + use_tsne = False, + max_umap_size = 50000, + max_tsne_size = 10000, + skip_categorical_analysis = False, + skip_data_plots = False, + umap_metric = "euclidean"): + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if skip_embedding is False: + + cat_counts = None + + cat_counts = analyse_categorical_counts(X_cat=train_data.X_cat, emb_l=dlrm.emb_l, output_dir=output_dir) + + visualize_embeddings_umap(emb_l = dlrm.emb_l, + output_dir = output_dir, + max_size = max_umap_size, + umap_metric = umap_metric, + cat_counts = cat_counts) + + if use_tsne is True: + visualize_embeddings_tsne(emb_l = dlrm.emb_l, + output_dir = output_dir, + max_size = max_tsne_size) + + # data visualization and analysis + if skip_data_plots is False: + visualize_all_data_umap(dlrm=dlrm, train_ld=train_ld, test_ld=test_ld, max_umap_size=max_umap_size, output_dir=output_dir, umap_metric=umap_metric) + + # analyse categorical variables + if skip_categorical_analysis is False and args.data_randomize == "none": + analyse_categorical_data(X_cat=train_data.X_cat, n_days=10, output_dir=output_dir) + + + +if __name__ == "__main__": + + output_dir = "" + + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Exploratory DLRM analysis" + ) + + parser.add_argument("--load-model", type=str, default="") + parser.add_argument("--data-set", choices=["kaggle", "terabyte"], help="dataset") +# parser.add_argument("--dataset-path", required=True, help="path to the dataset") + parser.add_argument("--max-ind-range", type=int, default=-1) +# parser.add_argument("--mlperf-bin-loader", action="store_true", default=False) + parser.add_argument("--output-dir", type=str, default="") + parser.add_argument("--skip-embedding", action="store_true", default=False) + parser.add_argument("--umap-metric", type=str, default="euclidean") + parser.add_argument("--skip-data-plots", action="store_true", default=False) + parser.add_argument("--skip-categorical-analysis", action="store_true", default=False) + + # umap relatet + parser.add_argument("--max-umap-size", type=int, default=50000) + # tsne related + parser.add_argument("--use-tsne", action="store_true", default=False) + parser.add_argument("--max-tsne-size", type=int, default=1000) + # data file related + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--data-randomize", type=str, default="total") # none, total or day or none + parser.add_argument("--memory-map", action="store_true", default=False) + parser.add_argument("--mini-batch-size", type=int, default=1) + parser.add_argument("--num-workers", type=int, default=0) + parser.add_argument("--test-mini-batch-size", type=int, default=1) + parser.add_argument("--test-num-workers", type=int, default=0) + parser.add_argument("--num-batches", type=int, default=0) + # mlperf logging (disables other output and stops early) + parser.add_argument("--mlperf-logging", action="store_true", default=False) + + args = parser.parse_args() + + print("command line args: ", json.dumps(vars(args))) + + if output_dir == "": + output_dir = args.data_set+"-"+os.path.split(args.load_model)[-1]+"-vis_all" + print("output_dir:", output_dir) + + if args.data_set == "kaggle": + # 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh) + m_spa=16 + ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]) + ln_bot=np.array([13,512,256,64,16]) + ln_top=np.array([367,512,256,1]) + + elif args.dataset == "terabyte": + + if args.max_ind_range == 10000000: + # 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000) + m_spa=64 + ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]) + ln_bot=np.array([13,512,256,64]) + ln_top=np.array([415,512,512,256,1]) + elif args.max_ind_range == 40000000: + # 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000) + m_spa=128 + ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]) + ln_bot=np.array([13,512,256,128]) + ln_top=np.array([479,1024,1024,512,256,1]) + else: + raise ValueError("only --max-in-range 10M or 40M is supported") + else: + raise ValueError("only kaggle|terabyte dataset options are supported") + + # check input parameters + if args.data_randomize != "none" and args.skip_categorical_analysis is not True: + print("Incorrect option for categoricat analysis, use: --data-randomize=none") + sys.exit(-1) + + dlrm = DLRM_Net( + m_spa, + ln_emb, + ln_bot, + ln_top, + arch_interaction_op="dot", + arch_interaction_itself=False, + sigmoid_bot=-1, + sigmoid_top=ln_top.size - 2, + sync_dense_params=True, + loss_threshold=0.0, + ndevices=-1, + qr_flag=False, + qr_operation=None, + qr_collisions=None, + qr_threshold=None, + md_flag=False, + md_threshold=None, + ) + + # Load model is specified + if not (args.load_model == ""): + print("Loading saved model {}".format(args.load_model)) + + ld_model = torch.load(args.load_model, map_location=torch.device("cpu")) + dlrm.load_state_dict(ld_model["state_dict"]) + + print("Model loaded", args.load_model) + #print(dlrm) + + z_size = len(dlrm.top_l) + for i in range(0, z_size): + print("z", i, dlrm.top_l[i]) + + # load data + train_data = None + test_data = None + + if args.raw_data_file is not "" or args.processed_data_file is not "": + train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args) + + analyze_model_data(output_dir = output_dir, + dlrm = dlrm, + train_ld = train_ld, + test_ld = test_ld, + train_data = train_data, + skip_embedding = args.skip_embedding, + use_tsne = args.use_tsne, + max_umap_size = args.max_umap_size, + max_tsne_size = args.max_tsne_size, + skip_categorical_analysis = args.skip_categorical_analysis, + skip_data_plots = args.skip_data_plots, + umap_metric = args.umap_metric) + diff --git a/benchmarks/dlrm/ootb/tricks/md_embedding_bag.py b/benchmarks/dlrm/ootb/tricks/md_embedding_bag.py new file mode 100644 index 0000000..7c4071a --- /dev/null +++ b/benchmarks/dlrm/ootb/tricks/md_embedding_bag.py @@ -0,0 +1,81 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Mixed-Dimensions Trick +# +# Description: Applies mixed dimension trick to embeddings to reduce +# embedding sizes. +# +# References: +# [1] Antonio Ginart, Maxim Naumov, Dheevatsa Mudigere, Jiyan Yang, James Zou, +# "Mixed Dimension Embeddings with Application to Memory-Efficient Recommendation +# Systems", CoRR, arXiv:1909.11810, 2019 +from __future__ import absolute_import, division, print_function, unicode_literals +import torch +import torch.nn as nn + + +def md_solver(n, alpha, d0=None, B=None, round_dim=True, k=None): + ''' + An external facing function call for mixed-dimension assignment + with the alpha power temperature heuristic + Inputs: + n -- (torch.LongTensor) ; Vector of num of rows for each embedding matrix + alpha -- (torch.FloatTensor); Scalar, non-negative, controls dim. skew + d0 -- (torch.FloatTensor); Scalar, baseline embedding dimension + B -- (torch.FloatTensor); Scalar, parameter budget for embedding layer + round_dim -- (bool); flag for rounding dims to nearest pow of 2 + k -- (torch.LongTensor) ; Vector of average number of queries per inference + ''' + n, indices = torch.sort(n) + k = k[indices] if k is not None else torch.ones(len(n)) + d = alpha_power_rule(n.type(torch.float) / k, alpha, d0=d0, B=B) + if round_dim: + d = pow_2_round(d) + undo_sort = [0] * len(indices) + for i, v in enumerate(indices): + undo_sort[v] = i + return d[undo_sort] + + +def alpha_power_rule(n, alpha, d0=None, B=None): + if d0 is not None: + lamb = d0 * (n[0].type(torch.float) ** alpha) + elif B is not None: + lamb = B / torch.sum(n.type(torch.float) ** (1 - alpha)) + else: + raise ValueError("Must specify either d0 or B") + d = torch.ones(len(n)) * lamb * (n.type(torch.float) ** (-alpha)) + for i in range(len(d)): + if i == 0 and d0 is not None: + d[i] = d0 + else: + d[i] = 1 if d[i] < 1 else d[i] + return (torch.round(d).type(torch.long)) + + +def pow_2_round(dims): + return 2 ** torch.round(torch.log2(dims.type(torch.float))) + + +class PrEmbeddingBag(nn.Module): + def __init__(self, num_embeddings, embedding_dim, base_dim): + super(PrEmbeddingBag, self).__init__() + self.embs = nn.EmbeddingBag( + num_embeddings, embedding_dim, mode="sum", sparse=True) + torch.nn.init.xavier_uniform_(self.embs.weight) + if embedding_dim < base_dim: + self.proj = nn.Linear(embedding_dim, base_dim, bias=False) + torch.nn.init.xavier_uniform_(self.proj.weight) + elif embedding_dim == base_dim: + self.proj = nn.Identity() + else: + raise ValueError( + "Embedding dim " + str(embedding_dim) + " > base dim " + str(base_dim) + ) + + def forward(self, input, offsets=None, per_sample_weights=None): + return self.proj(self.embs( + input, offsets=offsets, per_sample_weights=per_sample_weights)) diff --git a/benchmarks/dlrm/ootb/tricks/qr_embedding_bag.py b/benchmarks/dlrm/ootb/tricks/qr_embedding_bag.py new file mode 100644 index 0000000..290d795 --- /dev/null +++ b/benchmarks/dlrm/ootb/tricks/qr_embedding_bag.py @@ -0,0 +1,185 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Quotient-Remainder Trick +# +# Description: Applies quotient remainder-trick to embeddings to reduce +# embedding sizes. +# +# References: +# [1] Hao-Jun Michael Shi, Dheevatsa Mudigere, Maxim Naumov, Jiyan Yang, +# "Compositional Embeddings Using Complementary Partitions for Memory-Efficient +# Recommendation Systems", CoRR, arXiv:1909.02107, 2019 + + +from __future__ import absolute_import, division, print_function, unicode_literals +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.parameter import Parameter +import numpy as np + + +class QREmbeddingBag(nn.Module): + r"""Computes sums or means over two 'bags' of embeddings, one using the quotient + of the indices and the other using the remainder of the indices, without + instantiating the intermediate embeddings, then performs an operation to combine these. + + For bags of constant length and no :attr:`per_sample_weights`, this class + + * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``, + * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``, + * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``. + + However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these + operations. + + QREmbeddingBag also supports per-sample weights as an argument to the forward + pass. This scales the output of the Embedding before performing a weighted + reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the + only supported ``mode`` is ``"sum"``, which computes a weighted sum according to + :attr:`per_sample_weights`. + + Known Issues: + Autograd breaks with multiple GPUs. It breaks only with multiple embeddings. + + Args: + num_categories (int): total number of unique categories. The input indices must be in + 0, 1, ..., num_categories - 1. + embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"`` + or ``"mult"`` operation are used, these embedding dimensions must be + the same. If a single embedding_dim is used, then it will use this + embedding_dim for both embedding tables. + num_collisions (int): number of collisions to enforce. + operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation + to compose embeddings. ``"concat"`` concatenates the embeddings, + ``"add"`` sums the embeddings, and ``"mult"`` multiplies + (component-wise) the embeddings. + Default: ``"mult"`` + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. + scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + Note: this option is not supported when ``mode="max"``. + mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. + ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights` + into consideration. ``"mean"`` computes the average of the values + in the bag, ``"max"`` computes the max value over each bag. + Default: ``"mean"`` + sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See + Notes for more details regarding sparse gradients. Note: this option is not + supported when ``mode="max"``. + + Attributes: + weight (Tensor): the learnable weights of each embedding table is the module of shape + `(num_embeddings, embedding_dim)` initialized using a uniform distribution + with sqrt(1 / num_categories). + + Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and + :attr:`per_index_weights` (Tensor, optional) + + - If :attr:`input` is 2D of shape `(B, N)`, + + it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and + this will return ``B`` values aggregated in a way depending on the :attr:`mode`. + :attr:`offsets` is ignored and required to be ``None`` in this case. + + - If :attr:`input` is 1D of shape `(N)`, + + it will be treated as a concatenation of multiple bags (sequences). + :attr:`offsets` is required to be a 1D tensor containing the + starting index positions of each bag in :attr:`input`. Therefore, + for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as + having ``B`` bags. Empty bags (i.e., having 0-length) will have + returned vectors filled by zeros. + + per_sample_weights (Tensor, optional): a tensor of float / double weights, or None + to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights` + must have exactly the same shape as input and is treated as having the same + :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``. + + + Output shape: `(B, embedding_dim)` + + """ + __constants__ = ['num_categories', 'embedding_dim', 'num_collisions', + 'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', + 'mode', 'sparse'] + + def __init__(self, num_categories, embedding_dim, num_collisions, + operation='mult', max_norm=None, norm_type=2., + scale_grad_by_freq=False, mode='mean', sparse=False, + _weight=None): + super(QREmbeddingBag, self).__init__() + + assert operation in ['concat', 'mult', 'add'], 'Not valid operation!' + + self.num_categories = num_categories + if isinstance(embedding_dim, int) or len(embedding_dim) == 1: + self.embedding_dim = [embedding_dim, embedding_dim] + else: + self.embedding_dim = embedding_dim + self.num_collisions = num_collisions + self.operation = operation + self.max_norm = max_norm + self.norm_type = norm_type + self.scale_grad_by_freq = scale_grad_by_freq + + if self.operation == 'add' or self.operation == 'mult': + assert self.embedding_dim[0] == self.embedding_dim[1], \ + 'Embedding dimensions do not match!' + + self.num_embeddings = [int(np.ceil(num_categories / num_collisions)), + num_collisions] + + if _weight is None: + self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0], self.embedding_dim[0])) + self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1], self.embedding_dim[1])) + self.reset_parameters() + else: + assert list(_weight[0].shape) == [self.num_embeddings[0], self.embedding_dim[0]], \ + 'Shape of weight for quotient table does not match num_embeddings and embedding_dim' + assert list(_weight[1].shape) == [self.num_embeddings[1], self.embedding_dim[1]], \ + 'Shape of weight for remainder table does not match num_embeddings and embedding_dim' + self.weight_q = Parameter(_weight[0]) + self.weight_r = Parameter(_weight[1]) + self.mode = mode + self.sparse = sparse + + def reset_parameters(self): + nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories)) + nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories)) + + def forward(self, input, offsets=None, per_sample_weights=None): + input_q = (input / self.num_collisions).long() + input_r = torch.remainder(input, self.num_collisions).long() + + embed_q = F.embedding_bag(input_q, self.weight_q, offsets, self.max_norm, + self.norm_type, self.scale_grad_by_freq, self.mode, + self.sparse, per_sample_weights) + embed_r = F.embedding_bag(input_r, self.weight_r, offsets, self.max_norm, + self.norm_type, self.scale_grad_by_freq, self.mode, + self.sparse, per_sample_weights) + + if self.operation == 'concat': + embed = torch.cat((embed_q, embed_r), dim=1) + elif self.operation == 'add': + embed = embed_q + embed_r + elif self.operation == 'mult': + embed = embed_q * embed_r + + return embed + + def extra_repr(self): + s = '{num_embeddings}, {embedding_dim}' + if self.max_norm is not None: + s += ', max_norm={max_norm}' + if self.norm_type != 2: + s += ', norm_type={norm_type}' + if self.scale_grad_by_freq is not False: + s += ', scale_grad_by_freq={scale_grad_by_freq}' + s += ', mode={mode}' + return s.format(**self.__dict__) diff --git a/benchmarks/dlrm/ubench/README_comms.md b/benchmarks/dlrm/ubench/README_comms.md new file mode 100644 index 0000000..5a76db0 --- /dev/null +++ b/benchmarks/dlrm/ubench/README_comms.md @@ -0,0 +1,5 @@ +# dlrm_ubench_comms_driver.py runs /param/train/comms/pt/comms.py. + +# Note +If /param is empty, change to that directory and run: +git submodule update --init --recursive diff --git a/benchmarks/dlrm/ubench/dlrm_ubench_comms_driver.py b/benchmarks/dlrm/ubench/dlrm_ubench_comms_driver.py new file mode 100644 index 0000000..e157fc0 --- /dev/null +++ b/benchmarks/dlrm/ubench/dlrm_ubench_comms_driver.py @@ -0,0 +1,130 @@ +import argparse +import contextlib +import io +import itertools +import os +import pathlib +import subprocess +import sys +from itertools import product +from os import fspath + +# param ubenches +p = pathlib.Path(__file__).parent.resolve() / "../../../param/train/compute/pt" +sys.path.append(fspath(p)) +import dataset +import pytorch_emb as kemb +import pytorch_gemm as kgemm +import pytorch_linear as klinear + +# FB5 Logger +p = pathlib.Path(__file__).parent.resolve() / "../../../fb5logging" +sys.path.append(fspath(p)) +import loggerconstants +from fb5logger import FB5Logger + + +def main(): + parser = argparse.ArgumentParser(description="comms.py driver") + parser.add_argument( + "--size", + type=str, + default="small", + ) + parser.add_argument( + "--backend", + type=str, + default=("nccl"), + choices=["nccl", "gloo", "mpi", "ucc", "xla"], + ) + parser.add_argument( + "--collective", + type=str, + default=("all_to_all"), + choices=["all_to_all", "all_reduce"], + ) + parser.add_argument("--fb5logger", type=str, default=None) + args = parser.parse_args() + + if args.size not in ["small", "medium", "large"] and not ( + args.size.isdigit() and int(args.size) > 0 + ): + sys.exit("The --size argument provided is not a valid positive integer.") + + lookup = { + "small": 2200 if args.collective == "all_reduce" else 134000000, + "medium": 9944 if args.collective == "all_reduce" else 244000000, + "large": 22372 if args.collective == "all_reduce" else 544000000, + str(2200): "small" if args.collective == "all_reduce" else 2200, + str(9944): "medium" if args.collective == "all_reduce" else 9944, + str(22372): "large" if args.collective == "all_reduce" else 22372, + str(134000000): "small" if args.collective == "all_to_all" else 134000000, + str(244000000): "medium" if args.collective == "all_to_all" else 244000000, + str(544000000): "large" if args.collective == "all_to_all" else 544000000, + } + (x, y) = (args.size, lookup.get(args.size, args.size)) + (size, name) = (x, y) if args.size.isdigit() else (y, x) + + master_ip = "localhost" + num_compute_per_collective = 100 + mm_dim = 1000 + num_iter = 100 + + cmd = f""" + --f 2 + --n {num_iter} + --master-ip {master_ip} + --master-port 22565 + --collective {args.collective} + --b {size} + --e {size} + --num-compute {num_compute_per_collective} + --mm-dim {mm_dim} + --backend {args.backend} + """ + sys.argv = cmd.replace("\n", " ").replace(" ", "").split() + + print("") + comms_abs_dir_path = str( + pathlib.Path(__file__).absolute().parents[3].resolve() / "param/train/comms/pt" + ) + sys.path.append(comms_abs_dir_path) + from comms import main as comms_main + + fb5logger = FB5Logger(args.fb5logger) + fb5logger.header( + "DLRM", + "UBENCH", + "train", + "comms_" + args.collective.replace("_", "") + "_" + name, + score_metric=loggerconstants.GBPS, + ) + + comms_stdout = io.StringIO() + with contextlib.redirect_stdout(comms_stdout): + fb5logger.run_start() + comms_main() + + output = comms_stdout.getvalue().split("\n")[-3:] + output = [_.split("\t") for _ in output] + output[1].insert(4, "") + output[0][4] = "Latency(us):" + output[0].insert(5, "p50") + output[0].pop(7) + output[0].pop(0) + output[1].pop(0) + extra_metadata = {} + for a, b in zip(output[0], output[1]): + extra_metadata[a.lstrip()] = b.lstrip() + fb5logger.run_stop( + num_batches=num_iter, batch_size=None, extra_metadata=extra_metadata + ) + + print(comms_stdout.getvalue()) + print("-- Pretty Format --") + for a, b in zip(output[0], output[1]): + print("{:<15s}{:>4s}".format(a.lstrip(), b.lstrip())) + + +if __name__ == "__main__": + main() diff --git a/benchmarks/dlrm/ubench/dlrm_ubench_train_driver.py b/benchmarks/dlrm/ubench/dlrm_ubench_train_driver.py new file mode 100644 index 0000000..15f407c --- /dev/null +++ b/benchmarks/dlrm/ubench/dlrm_ubench_train_driver.py @@ -0,0 +1,122 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys +import pathlib +from os import fspath +# param ubenches +p = pathlib.Path(__file__).parent.resolve() / "../../../param/train/compute/pt" +sys.path.append(fspath(p)) +import dataset +import pytorch_gemm as kgemm +import pytorch_emb as kemb +import pytorch_linear as klinear + +# FB5 Logger +p = pathlib.Path(__file__).parent.resolve() / "../../../fb5logging" +sys.path.append(fspath(p)) +from fb5logger import FB5Logger +import loggerconstants + +if __name__ == "__main__": + + import argparse + + parser = argparse.ArgumentParser( + description="Measuring the Compute Kernel Performance Using PyTorch" + ) + parser.add_argument('--warmups', type=int, default=10, help="warmup times") + parser.add_argument('--steps', type=int, default=100, help="repeat times") + parser.add_argument('--device', type=str, choices=['cpu', 'gpu', 'tpu'], required=True, help='valid devices') + parser.add_argument("--fb5logger", type=str, default=None) + + subparsers = parser.add_subparsers(title='kernels', dest='kernel') + subparsers.required = True + + parser_emb = subparsers.add_parser('emb', help='measure EmbeddingBag performance') + parser_emb.add_argument('-d', '--dataset', default='B') + parser_emb.add_argument("--randomseed", type=int, default=0) + parser_emb.add_argument("--usexlabag", action='store_true', help='use xlabad instead of embeddingbag') + parser_emb.add_argument("--alpha", default=0.0, help="Zipf param. Use uniform if == 0.0") + + parser_linear = subparsers.add_parser('linear', help='measure mlp performance') + parser_linear.add_argument('--optimizer-type', default='sgd', help='Optimizer: SGD', choices=['sgd']) + parser_linear.add_argument('-t', '--dtype', default='float', help="data type", choices=["float", "float16", "bfloat16"]) + parser_linear.add_argument('-d', '--dataset', default='small') + + # FB5 Logging + + args=parser.parse_args() + + print("Measuring the performance of ", args.kernel, " on device = ", args.device) + print("Steps = ", args.steps, " warmups = ", args.warmups) + + #fb5 logging header + if args.fb5logger is not None: + fb5logger = FB5Logger(args.fb5logger) + + if args.kernel == 'emb': + print("with emb dataset ", args.dataset) + global_bytes = 0 + global_elap = 0 + if args.fb5logger is not None: + fb5logger.header("DLRM", "UBENCH", "train", args.kernel + "_" + args.dataset, score_metric=loggerconstants.GBPS) + fb5logger.run_start() + if args.dataset == 'A': + run_dataset = dataset.emb_A + elif args.dataset == 'B': + run_dataset = dataset.emb_B + elif args.dataset == 'small': + small_dataset = [ (4800000, 56, 34, 2048), + (4800000, 56, 34, 4096),] + run_dataset = small_dataset + else: + import ast + run_dataset = ast.literal_eval(args.dataset) + for i in range(len(run_dataset)): + features, embdim, nnz, batch = run_dataset[i] + elap, total_bytes = kemb.run_single(args, features, embdim, nnz, batch) + elap /= args.steps + total_bytes /= 1.0e6 + global_bytes += total_bytes + global_elap += elap + if args.fb5logger is not None: + extra_metadata={"GB/s": global_bytes / global_elap / 1.0e3, "ELAP": global_elap, "BYTES": global_bytes} + fb5logger.run_stop(args.steps, batch, extra_metadata=extra_metadata) + else: + print("with linear dataset ", args.dataset, ", Data type: ", args.dtype) + global_flops = 0 + global_elap = 0 + if args.fb5logger is not None: + fb5logger.header("DLRM", "UBENCH", "train", args.kernel + "_" + args.dataset, score_metric=loggerconstants.TFPS) + fb5logger.run_start() + if args.dataset == 'A': + run_dataset = dataset.mlp_A + elif args.dataset == 'small': + small_dataset = [ (18, 1024, 1024, 1024, 128), + (18, 1024, 1024, 1024, 256),] + run_dataset = small_dataset + else: + import ast + run_dataset = ast.literal_eval(args.dataset) + for i in range(len(run_dataset)): + layer_num, input_size, hidden_size, output_size, batch_size = run_dataset[i] + elap, loss = klinear.run_single( + args, layer_num, input_size, hidden_size, output_size, batch_size + ) + elap /= args.steps + + flops = batch_size * ( + hidden_size * hidden_size * layer_num + + hidden_size * input_size + + hidden_size * output_size + ) + # Forward 2x and Backward 4x + flops *= 6 + global_flops += flops + global_elap += elap + if args.fb5logger is not None: + extra_metadata={"TF/s": global_flops / global_elap / 1.0e12, "ELAP": global_elap, "FLOPS": global_flops} + fb5logger.run_stop(args.steps, batch_size, extra_metadata=extra_metadata) diff --git a/benchmarks/rnnt/ootb/inference/QSL.py b/benchmarks/rnnt/ootb/inference/QSL.py new file mode 100644 index 0000000..3848ca3 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/QSL.py @@ -0,0 +1,71 @@ +import sys +import os +from os import fspath +import pathlib +sys.path.insert(0, fspath(pathlib.Path(__file__).parent.resolve() / "./pytorch")) + +from parts.manifest import Manifest +from parts.segment import AudioSegment + +import numpy as np + +import mlperf_loadgen as lg + + +class AudioQSL: + def __init__(self, dataset_dir, manifest_filepath, labels, + sample_rate=16000, perf_count=None): + m_paths = [manifest_filepath] + self.manifest = Manifest(dataset_dir, m_paths, labels, len(labels), + normalize=True, max_duration=15.0) + self.sample_rate = sample_rate + self.count = len(self.manifest) + perf_count = self.count if perf_count is None else perf_count + self.sample_id_to_sample = {} + self.qsl = lg.ConstructQSL(self.count, perf_count, + self.load_query_samples, + self.unload_query_samples) + print( + "Dataset loaded with {0:.2f} hours. Filtered {1:.2f} hours. Number of samples: {2}".format( + self.manifest.duration / 3600, + self.manifest.filtered_duration / 3600, + self.count)) + + def load_query_samples(self, sample_list): + for sample_id in sample_list: + self.sample_id_to_sample[sample_id] = self._load_sample(sample_id) + + def unload_query_samples(self, sample_list): + for sample_id in sample_list: + del self.sample_id_to_sample[sample_id] + + def _load_sample(self, index): + sample = self.manifest[index] + segment = AudioSegment.from_file(sample['audio_filepath'][0], + target_sr=self.sample_rate) + waveform = segment.samples + assert isinstance(waveform, np.ndarray) and waveform.dtype == np.float32 + return waveform + + def __getitem__(self, index): + return self.sample_id_to_sample[index] + + def __del__(self): + lg.DestroyQSL(self.qsl) + print("Finished destroying QSL.") + + +# We have no problem fitting all data in memory, so we do that, in +# order to speed up execution of the benchmark. +class AudioQSLInMemory(AudioQSL): + def __init__(self, dataset_dir, manifest_filepath, labels, + sample_rate=16000, perf_count=None): + super().__init__(dataset_dir, manifest_filepath, labels, + sample_rate, perf_count) + super().load_query_samples(range(self.count)) + + def load_query_samples(self, sample_list): + pass + + def unload_query_samples(self, sample_list): + pass diff --git a/benchmarks/rnnt/ootb/inference/README.md b/benchmarks/rnnt/ootb/inference/README.md new file mode 100644 index 0000000..27fbabd --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/README.md @@ -0,0 +1,116 @@ +# 1. Problem +Speech recognition accepts raw audio samples and produces a corresponding +character transcription, without an external language model. + +# 2. Directions + +Open `run.sh`. Set the stage variable to "-1". Set "work_dir" to a +path backed by a disk with at least 30 GB of space. Most space is used +by loadgen logs, not the data or model. You need conda and a C/C++ +compiler on your PATH. I used conda 4.8.2. This script is responsible +for downloading dependencies, data, and the model. + +Run `./run.sh` from this directory. Note that stage 3 runs all of the +scenarios for the reference implementation, which will take a long +time, so you may want to exist before then. + +As you complete individual stages, you can set the variable "stage" to +a higher number for restarting from a later stage. + +# 3. Dataset/Environment +### Publication/Attribution +["OpenSLR LibriSpeech Corpus"](http://www.openslr.org/12/) provides over 1000 hours of speech data in the form of raw audio. +We use dev-clean, which is approximately 5 hours. We remove all samples with a length exceeding 15 seconds. + +### Data preprocessing +Log filterbanks of size 80 are extracted every 10 milliseconds, from +windows of size 20 milliseconds. Note that every three filterbanks are +concatenated together ("feature splicing"), so the model's effective +frame rate is actually 30 milliseconds. + +No dithering takes place. + +This is not typical preprocessing, since it takes place as part of the +model's measured runtime, not before the model runs. + +### Test data order + +Look at dev-clean-wav.json generated by run.sh. It looks like this: + +``` +[ + { + "files": [ + { + "channels": 1, + "sample_rate": 16000.0, + "bitrate": 16, + "duration": 6.59, + "num_samples": 105440, + "encoding": "Signed Integer PCM", + "silent": false, + "fname": "dev-clean-wav/2277/149896/2277-149896-0000.wav", + "speed": 1 + } + ], + "original_duration": 6.59, + "original_num_samples": 105440, + "transcript": "he was in a fevered state of mind owing to the blight his wife's action threatened to cast upon his entire future" + }, + { + "files": [ + { + "channels": 1, + "sample_rate": 16000.0, + "bitrate": 16, + "duration": 7.145, + "num_samples": 114320, + "encoding": "Signed Integer PCM", + "silent": false, + "fname": "dev-clean-wav/2277/149896/2277-149896-0001.wav", + "speed": 1 + } + ], + "original_duration": 7.145, + "original_num_samples": 114320, + "transcript": "he would have to pay her the money which she would now regularly demand or there would be trouble it did not matter what he did" + }, + ... +] +``` + +The data is loaded into memory. Then all samples with a duration above +15 seconds are filtered out. Then the first object in the array is +assigned query id 0, the second is assigned query id 1, etc. The +unfiltered file is uploaded to the directory containing README in case +you do not want to recreate this file. + +# 4. Model +This is a variant of the model described in sections 3.1 and 6.2 of: + +@article{, + title={STREAMING END-TO-END SPEECH RECOGNITION FOR MOBILE DEVICES}, + author={Yanzhang He, Tara N. Sainath, Rohit Prabhavalkar, Ian McGraw, Raziel Alvarez, Ding Zhao, + David Rybach, Anjuli Kannan, Yonghui Wu, Ruoming Pang, Qiao Liang, Deepti Bhatia, Yuan Shangguan, + Bo Li, Golan Pundak, Khe Chai Sim, Tom Bagby, Shuo-yiin Chang, Kanishka Rao, Alexander Gruenstein}, + journal={arXiv preprint arXiv:1811.06621}, + year={2018} +} + +The differences are as follows: + +1. The model has 45.3 million parameters, rather than 120 million parameters +1. The LSTMs are not followed by projection layers +1. No layer normalization is used +1. Hidden dimensions are smaller. +1. The prediction network is made of two LSTMs, rather than seven. +1. The labels are characters, rather than word pieces. +1. No quantization is done at this time for inference. +1. A greedy decoder is used, rather than a beamsearch decoder. This greatly + reduces inference complexity. + +# 5. Quality +### Quality metric +7.452253714852645% Word Error Rate (WER) across all words in the output text of +all samples less than 15 seconds in length in the dev-clean set, using a greedy +decoder and a fully FP32 model. \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/inference/accuracy_eval.py b/benchmarks/rnnt/ootb/inference/accuracy_eval.py new file mode 100644 index 0000000..ea81792 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/accuracy_eval.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +import argparse +import array +import json +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "pytorch")) + +from QSL import AudioQSL +from helpers import process_evaluation_epoch, __gather_predictions +from parts.manifest import Manifest + +dtype_map = { + "int8": 'b', + "int16": 'h', + "int32": 'l', + "int64": 'q', +} + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--log_dir", required=True) + parser.add_argument("--dataset_dir", required=True) + parser.add_argument("--manifest", required=True) + parser.add_argument("--output_dtype", default="int64", choices=dtype_map.keys(), help="Output data type") + args = parser.parse_args() + return args + +def main(): + args = get_args() + labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] + qsl = AudioQSL(args.dataset_dir, args.manifest, labels) + manifest = qsl.manifest + with open(os.path.join(args.log_dir, "mlperf_log_accuracy.json")) as fh: + results = json.load(fh) + hypotheses = [] + references = [] + for result in results: + hypotheses.append(array.array(dtype_map[args.output_dtype], bytes.fromhex(result["data"])).tolist()) + references.append(manifest[result["qsl_idx"]]["transcript"]) + + references = __gather_predictions([references], labels=labels) + hypotheses = __gather_predictions([hypotheses], labels=labels) + + d = dict(predictions=hypotheses, + transcripts=references) + wer = process_evaluation_epoch(d) + print("Word Error Rate: {:}%, accuracy={:}%".format(wer * 100, (1 - wer) * 100)) + +if __name__ == '__main__': + main() diff --git a/benchmarks/rnnt/ootb/inference/environment.yml b/benchmarks/rnnt/ootb/inference/environment.yml new file mode 100644 index 0000000..4958247 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/environment.yml @@ -0,0 +1,128 @@ +name: mlperf-rnnt +channels: + - pytorch + - conda-forge + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - absl-py=0.9.0=py36_0 + - blas=1.0=mkl + - bzip2=1.0.8=h7b6447c_0 + - ca-certificates=2020.4.5.1=hecc5488_0 + - certifi=2020.4.5.1=py36h9f0ad1d_0 + - cffi=1.14.0=py36h2e261b9_0 + - cmake=3.14.0=h52cb24c_0 + - cudatoolkit=10.1.243=h6bb024c_0 + - cudatoolkit-dev=10.1.243=h516909a_3 + - expat=2.2.6=he6710b0_0 + - freetype=2.9.1=h8a8886c_1 + - gdb=8.3.1=py36h497da48_1 + - intel-openmp=2020.0=166 + - jpeg=9b=h024ee3a_2 + - krb5=1.17.1=h173b8e3_0 + - lame=3.100=h14c3975_1001 + - ld_impl_linux-64=2.33.1=h53a641e_7 + - libcurl=7.69.1=h20c2e04_0 + - libedit=3.1.20181209=hc058e9b_0 + - libffi=3.2.1=hd88cf55_4 + - libgcc-ng=9.1.0=hdf63c60_0 + - libgfortran-ng=7.3.0=hdf63c60_0 + - libpng=1.6.37=hbc83047_0 + - libssh2=1.9.0=h1ba5d50_1 + - libstdcxx-ng=9.1.0=hdf63c60_0 + - libtiff=4.1.0=h2733197_0 + - mad=0.15.1b=he1b5a44_0 + - mkl=2020.0=166 + - mkl-include=2020.0=166 + - mkl-service=2.3.0=py36he904b0f_0 + - mkl_fft=1.0.15=py36ha843d7b_0 + - mkl_random=1.1.0=py36hd6b4f25_0 + - ncurses=6.1=hf484d3e_1002 + - ninja=1.9.0=py36hfd86e86_0 + - numpy=1.18.1=py36h4f9e942_0 + - numpy-base=1.18.1=py36hde5b4d6_1 + - olefile=0.46=py_0 + - openssl=1.1.1g=h516909a_0 + - pillow=7.0.0=py36hb39fc2d_0 + - pip=20.0.2=py36_1 + - pycparser=2.20=py_0 + - python=3.6.10=h0371630_0 + - python_abi=3.6=1_cp36m + - pytorch=1.5.0=py3.6_cuda10.1.243_cudnn7.6.3_0 + - pyyaml=5.3.1=py36h7b6447c_0 + - readline=7.0=hf8c457e_1001 + - rhash=1.3.8=h1ba5d50_0 + - setuptools=46.1.3=py36_0 + - six=1.14.0=py36_0 + - sqlite=3.31.1=h7b6447c_0 + - tk=8.6.8=hbc83047_0 + - torchvision=0.6.0=py36_cu101 + - wheel=0.34.2=py36_0 + - xz=5.2.4=h14c3975_4 + - yaml=0.1.7=had09818_2 + - zlib=1.2.11=h7b6447c_3 + - zstd=1.3.7=h0b5b093_0 + - pip: + - ascii-graph==1.5.1 + - attrs==19.3.0 + - audioread==2.1.8 + - autopep8==1.5.1 + - backcall==0.1.0 + - chardet==3.0.4 + - coverage==5.0.4 + - decorator==4.4.2 + - entrypoints==0.3 + - flake8==3.7.9 + - grpcio==1.28.1 + - idna==2.9 + - importlib-metadata==1.6.0 + - inflect==4.1.0 + - ipdb==0.13.2 + - ipython==7.13.0 + - ipython-genutils==0.2.0 + - jedi==0.16.0 + - joblib==0.14.1 + - librosa==0.7.2 + - llvmlite==0.31.0 + - markdown==3.2.1 + - mccabe==0.6.1 + - more-itertools==8.2.0 + - numba==0.48.0 + - onnx==1.6.0 + - onnxruntime==1.2.0 + - packaging==20.3 + - pandas==0.24.2 + - parso==0.6.2 + - pexpect==4.8.0 + - pickleshare==0.7.5 + - pluggy==0.13.1 + - prompt-toolkit==3.0.5 + - protobuf==3.11.3 + - ptyprocess==0.6.0 + - py==1.8.1 + - pycodestyle==2.5.0 + - pyflakes==2.1.1 + - pygments==2.6.1 + - pyparsing==2.4.7 + - pytest==5.4.2 + - python-dateutil==2.8.1 + - pytz==2019.3 + - requests==2.23.0 + - resampy==0.2.2 + - scikit-learn==0.22.2.post1 + - scipy==1.4.1 + - soundfile==0.10.3.post1 + - sox==1.3.7 + - tensorboard==2.0.0 + - toml==0.10.0 + - tqdm==4.31.1 + - traitlets==4.3.3 + - typing-extensions==3.7.4.2 + - unidecode==1.1.1 + - urllib3==1.25.8 + - wcwidth==0.1.9 + - werkzeug==1.0.1 + - wrapt==1.10.11 + - zipp==3.1.0 +prefix: /cb/home/daniel/ws/miniconda3/envs/mlperf-rnnt + diff --git a/benchmarks/rnnt/ootb/inference/loadgen/.clang-format b/benchmarks/rnnt/ootb/inference/loadgen/.clang-format new file mode 100644 index 0000000..f08c9c2 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/.clang-format @@ -0,0 +1,2 @@ +BasedOnStyle: Google +Standard: Cpp11 diff --git a/benchmarks/rnnt/ootb/inference/loadgen/CMakeLists.txt b/benchmarks/rnnt/ootb/inference/loadgen/CMakeLists.txt new file mode 100644 index 0000000..7865287 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/CMakeLists.txt @@ -0,0 +1,68 @@ +cmake_minimum_required(VERSION 3.1) + +project(mlperf_loadgen) + +# The mlperf_loadgen version. +set(mlperf_loadgen_VERSION_MAJOR 1) +set(mlperf_loadgen_VERSION_MINOR 1) +message("mlperf_loadgen v${mlperf_loadgen_VERSION_MAJOR}.${mlperf_loadgen_VERSION_MINOR}") + +# Set build options. NB: CXX_STANDARD is supported since CMake 3.1. +if (NOT MSVC) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -W -Wall") +endif() +message(STATUS "Using C++ compiler flags: ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_STANDARD "14") +message(STATUS "Using C++ standard: ${CMAKE_CXX_STANDARD}") +message(STATUS "Using static linker flags: ${CMAKE_STATIC_LINKER_FLAGS}") +message(STATUS "Using shared linker flags: ${CMAKE_SHARED_LINKER_FLAGS}") + +# Output directory for libraries. +set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}) +message(STATUS "Using output path: ${LIBRARY_OUTPUT_PATH}") + +# Detect Python to use for generating source file with version info. +# NB: PythonInterp has been deprecated since CMake 3.12 +# but it works with earlier versions of CMake. +find_package(PythonInterp) +message(STATUS "Using Python interpreter: ${PYTHON_EXECUTABLE}") + +# Generate source file with version info. +execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/version_generator.py ${CMAKE_BINARY_DIR}/version_generated.cc ${CMAKE_CURRENT_SOURCE_DIR}) + +# Add source files. +set(SOURCE + ${CMAKE_CURRENT_SOURCE_DIR}/bindings/c_api.h + ${CMAKE_CURRENT_SOURCE_DIR}/bindings/c_api.cc + ${CMAKE_CURRENT_SOURCE_DIR}/issue_query_controller.cc + ${CMAKE_CURRENT_SOURCE_DIR}/loadgen.cc + ${CMAKE_CURRENT_SOURCE_DIR}/logging.cc + ${CMAKE_CURRENT_SOURCE_DIR}/logging.h + ${CMAKE_CURRENT_SOURCE_DIR}/test_settings_internal.cc + ${CMAKE_CURRENT_SOURCE_DIR}/test_settings_internal.h + ${CMAKE_CURRENT_SOURCE_DIR}/utils.cc + ${CMAKE_CURRENT_SOURCE_DIR}/utils.h + ${CMAKE_CURRENT_SOURCE_DIR}/version.cc + ${CMAKE_CURRENT_SOURCE_DIR}/version.h + ${CMAKE_BINARY_DIR}/version_generated.cc +) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) + +add_library(mlperf_loadgen STATIC ${SOURCE}) +target_link_libraries(mlperf_loadgen) + +if(WIN32) +set (LIBS "") +else() +set (LIBS pthread) +endif() + +add_executable(benchmark benchmark/repro.cpp) +target_link_libraries(benchmark PUBLIC mlperf_loadgen ${LIBS}) + +# Install library and headers. +install(TARGETS mlperf_loadgen + DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ + DESTINATION ${CMAKE_INSTALL_PREFIX}/include FILES_MATCHING PATTERN "*.h") diff --git a/benchmarks/rnnt/ootb/inference/loadgen/README.md b/benchmarks/rnnt/ootb/inference/loadgen/README.md new file mode 100644 index 0000000..e5329a1 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/README.md @@ -0,0 +1,105 @@ +# Overview {#mainpage} + +*Note:* A compiled html version of this document is hosted online +[here](https://mlperf.github.io/inference/loadgen/index.html). + +## Introduction + +* The LoadGen is a *reusable* module that *efficiently* and *fairly* measures + the performance of inference systems. +* It generates traffic for scenarios as formulated by a diverse set of experts + in the [MLPerf working group](https://mlperf.org/about). +* The scenarios emulate the workloads seen in mobile devices, + autonomous vehicles, robotics, and cloud-based setups. +* Although the LoadGen is not model or dataset aware, its strength is in its + reusability with logic that is. + +## Integration Example and Flow +The following is an diagram of how the LoadGen can be integrated into an +inference system, resembling how some of the MLPerf reference models are +implemented. +

+ +## Useful Links +* [FAQ](@ref ReadmeFAQ) +* [LoadGen Build Instructions](@ref ReadmeBuild) +* [LoadGen API](@ref LoadgenAPI) +* [Test Settings](@ref LoadgenAPITestSettings) - + A good description of available scenarios, modes, and knobs. +* [MLPerf Inference Code](https://github.com/mlcommons/inference) - + Includes source for the LoadGen and reference models that use the LoadGen. +* [MLPerf Inference Rules](https://github.com/mlcommons/inference_policies) - + Any mismatch with this is a bug in the LoadGen. +* [MLPerf Website](www.mlperf.org) + +## Scope of the LoadGen's Responsibilities + +### In Scope +* **Provide a reusable** C++ library with python bindings. +* **Implement** the traffic patterns of the MLPerf Inference scenarios and + modes. +* **Record** all traffic generated and received for later analysis and + verification. +* **Summarize** the results and whether performance constraints were met. +* **Target high-performance** systems with efficient multi-thread friendly + logging utilities. +* **Generate trust** via a shared, well-tested, and community-hardened + code base. + +### Out of Scope +The LoadGen is: +* **NOT** aware of the ML model it is running against. +* **NOT** aware of the data formats of the model's inputs and outputs. +* **NOT** aware of how to score the accuracy of a model's outputs. +* **NOT** aware of MLPerf rules regarding scenario-specific constraints. + +Limitting the scope of the LoadGen in this way keeps it reusable across +different models and datasets without modification. Using composition and +dependency injection, the user can define their own model, datasets, and +metrics. + +Additionally, not hardcoding MLPerf-specific test constraints, like test +duration and performance targets, allows users to use the LoadGen unmodified +for custom testing and continuous integration purposes. + +## Submission Considerations + +### Upstream all local modifications +* As a rule, no local modifications to the LoadGen's C++ library are allowed +for submission. +* Please upstream early and often to keep the playing field level. + +### Choose your TestSettings carefully! +* Since the LoadGen is oblivious to the model, it can't enforce the MLPerf +requirements for submission. *e.g.:* target percentiles and latencies. +* For verification, the values in TestSettings are logged. +* To help make sure your settings are spec compliant, use +TestSettings::FromConfig in conjunction with the relevant config file provided +with the reference models. + +## Responsibilities of a LoadGen User + +### Implement the Interfaces +* Implement the SystemUnderTest and QuerySampleLibrary interfaces and pass + them to the StartTest function. +* Call QuerySampleComplete for every sample received by + SystemUnderTest::IssueQuery. + +### Assess Accuracy +* Process the *mlperf_log_accuracy.json* output by the LoadGen to determine + the accuracy of your system. +* For the official models, Python scripts will be provided by the MLPerf model + owners for you to do this automatically. + +For templates of how to do the above in detail, refer to code for the demos, +tests, and reference models. diff --git a/benchmarks/rnnt/ootb/inference/loadgen/README_BUILD.md b/benchmarks/rnnt/ootb/inference/loadgen/README_BUILD.md new file mode 100644 index 0000000..095a8d8 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/README_BUILD.md @@ -0,0 +1,32 @@ +# Building the LoadGen {#ReadmeBuild} + +## Prerequisites + + sudo apt-get install libglib2.0-dev python-pip python3-pip + pip2 install absl-py numpy + pip3 install absl-py numpy + +## Quick Start + + pip install absl-py numpy + git clone --recurse-submodules https://github.com/mlcommons/inference.git mlperf_inference + cd mlperf_inference/loadgen + CFLAGS="-std=c++14 -O3" python setup.py bdist_wheel + pip install --force-reinstall dist/mlperf_loadgen-0.5a0-cp36-cp36m-linux_x86_64.whl + python demos/py_demo_single_stream.py + +This will fetch the loadgen source, build and install the loadgen as a python module, and run a simple end-to-end demo. The exact *.whl filename may differ on your system, but there should only be one resulting whl file for you to use. + +A summary of the test results can be found in the *"mlperf_log_summary.txt"* logfile. + +For a timeline visualization of what happened during the test, open the *"mlperf_log_trace.json"* file in Chrome: +* Type “chrome://tracing” in the address bar, then drag-n-drop the json. +* This may be useful for SUT performance tuning and understanding + debugging the loadgen. + +To build the loadgen as a C++ library, rather than a python module: + + git clone https://github.com/mlcommons/inference.git mlperf_inference + cd mlperf_inference + mkdir loadgen/build/ && cd loadgen/build/ + cmake .. && cmake --build . + cp libmlperf_loadgen.a .. diff --git a/benchmarks/rnnt/ootb/inference/loadgen/README_FAQ.md b/benchmarks/rnnt/ootb/inference/loadgen/README_FAQ.md new file mode 100644 index 0000000..c1093a6 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/README_FAQ.md @@ -0,0 +1,88 @@ +# LoadGen FAQ {#ReadmeFAQ} + +## Q: The LoadGen does not match the MLPerf specification. Who is right? +**A:** +The MLPerf spec is *always* right. +Please file a LoadGen bug so it may be resolved. + +## Q: How can I file a bug? +**A:** +On GitHub: https://github.com/mlcommons/inference/issues/new + +## Q: Can I make local modifications to the LoadGen for submission? +**A:** +No. To keep the playing field level, please upstream any local +modificiations you need to make. Ideally upstream such changes behind a runtime +flag or via an abstract interface the client can implement. This will help +with testability. + +## Q: Where can I find the results of a test? +**A:** +By default, the loadgen will output an *mlperf_log_summary.txt* file +that summarizes the target metrics and constraints of the test, along with +other stats about the run. + +*Note:* LogSettings also has a flag to forward the results to stdout and +there's an outstanding TODO to make this more programmable. + +## Q: The reference implementation for \<*some_model*\> prints out results of its own. Are those for submission? +**A:** +They are not. The LoadGen results are the ground truth for submission +results since they will work even for systems that forgo the python bindings. +If you notice a bug in the LoadGen's results, please file a bug or submit a +patch. + +## Q: I'm getting linker errors for LoadgenVersion definitions. Where is *version_generated.cc*? +**A:** +If you have a custom build setup, make sure you run the *version_generator.py* +script, which will create the cc file you are looking for. The official build +files that come with the LoadGen do this for you out of the box. + +## Q: What is this *version_generator.py* script? +**A:** +The LoadGen records git stats (if available) and the SHA1 of all its +source files (always) at build time for verification purposes. This is easy +to circumvent, but try your best to run *version_generator.py* correctly; +ideally integrated with your build system if you have a custom build. +The intention is more to help with debugging efforts and detect accidental +version missmatches than to detect bad actors. + +## Q: How do I view the *mlperf_log_trace.json* file? +**A:** +This file uses the [Trace Event Format] +(https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit) +to record a timeline of all the threads involved. +You can view the file by typing [chrome://tracing](chrome://tracing) into +Chrome's address bar and dragging the json file there. +This file zips well and you can drag the zip file directly into +[chrome://tracing](chrome://tracing) too. +Please include zipped traces (and the other logs) when filing bug reports. + +## Q: What is the difference between the MultiStream and MultiStreamFree scenarios? +**A:** +MultiStream corresponds to the official MLPerf scenario for submissions; +it has a fixed query rate and allows only one outstanding query at a time. +MultiStreamFree is implemented for evaluation purposes only; it sends queries +as fast as possible and allows up to N outstanding queries at a time. You may +want to use MultiStreamFree for development purposes since small improvements +in performance will always be reflected in the results, whereas MultiStream's +results will be quantized. + +## Q: Why is the code littered with so many lambdas? My eyes hurt. +**A:** +Lambdas are a convenient and efficient way to ship arbitrary data + deferred +logic over to the logging thread without much boilerplate. +Much of the loadgen is built on top of the logging utilities. +Thus the lambdas. (Sorry about the eyes.) + +## Q: What C++ version does the LoadGen target? +**A:** +It currently targets and requires C++14. It should compile with recent +versions of clang, gcc, and msvc. + +## Q: What dependencies does the LoadGen code have? +**A:** +The C++ code has no external dependencies. The loadgen itself, logging +utilities, and unit test utilities are built solely on the C++ Standard Library. +The python bindings, however, do require +[pybind11](https://github.com/pybind/pybind11). diff --git a/benchmarks/rnnt/ootb/inference/loadgen/benchmark/.gitignore b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/.gitignore new file mode 100644 index 0000000..e792c8e --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/.gitignore @@ -0,0 +1,2 @@ +loadgen_build +build \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/inference/loadgen/benchmark/README.md b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/README.md new file mode 100644 index 0000000..24e8729 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/README.md @@ -0,0 +1,10 @@ +Note: please install jemalloc first. See: http://jemalloc.net/ +Command: bash run.sh <0=Basic,1=Queue> + +Experiments: +- On Intel(R) Xeon(R) CPU E5-1650 v4 @ 3.60GHz +- Basic SUT : 500-600k i/s +- Basic SUT + jemalloc: 800-900k i/s (`bash run.sh 800000 0`) +- Queued SUT (2 complete threads) + jemalloc: 1.2-1.3M i/s (`bash run.sh 1200000 1 2 2048`) +- Queued SUT (2 complete threads) + jemalloc + server_coalesce_queries: 1.4-1.5M is/ (`bash run.sh 1400000 1 2 512 1`) +- Basic SUT + jemalloc + server_coalesce_queries + 4 IssueQueryThreads: 2.4-2.5M is/ (`bash run.sh 2400000 0 2 512 1 4`) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/benchmark/repro.cpp b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/repro.cpp new file mode 100644 index 0000000..8b4bc8a --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/repro.cpp @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "loadgen.h" +#include "query_sample_library.h" +#include "system_under_test.h" +#include "test_settings.h" + +class QSL : public mlperf::QuerySampleLibrary { + public: + ~QSL() override{}; + const std::string& Name() const override { return mName; } + size_t TotalSampleCount() override { return 1000000; } + size_t PerformanceSampleCount() override { return TotalSampleCount(); } + void LoadSamplesToRam( + const std::vector& samples) override {} + void UnloadSamplesFromRam( + const std::vector& samples) override {} + + private: + std::string mName{"Dummy QSL"}; +}; + +class BasicSUT : public mlperf::SystemUnderTest { + public: + BasicSUT() { + // Start with some large value so that we don't reallocate memory. + initResponse(10000); + } + ~BasicSUT() override {} + const std::string& Name() const override { return mName; } + void IssueQuery(const std::vector& samples) override { + int n = samples.size(); + if (n > mResponses.size()) { + std::cerr << "Warning: reallocating response buffer in BasicSUT. Maybe " + "you should initResponse with larger value!?" + << std::endl; + initResponse(samples.size()); + } + for (int i = 0; i < n; i++) { + mResponses[i].id = samples[i].id; + } + mlperf::QuerySamplesComplete(mResponses.data(), n); + } + void FlushQueries() override {} + void ReportLatencyResults( + const std::vector& latencies_ns) override{}; + + private: + void initResponse(int size) { + mResponses.resize(size, + {0, reinterpret_cast(&mBuf), sizeof(int)}); + } + int mBuf{0}; + std::string mName{"BasicSUT"}; + std::vector mResponses; +}; + +class QueueSUT : public mlperf::SystemUnderTest { + public: + QueueSUT(int numCompleteThreads, int maxSize) { + // Each thread handle at most maxSize at a time. + std::cout << "QueueSUT: maxSize = " << maxSize << std::endl; + initResponse(numCompleteThreads, maxSize); + // Launch complete threads + for (int i = 0; i < numCompleteThreads; i++) { + mThreads.emplace_back(&QueueSUT::CompleteThread, this, i); + } + } + ~QueueSUT() override { + { + std::unique_lock lck(mMtx); + mDone = true; + mCondVar.notify_all(); + } + for (auto& thread : mThreads) { + thread.join(); + } + } + const std::string& Name() const override { return mName; } + void IssueQuery(const std::vector& samples) override { + std::unique_lock lck(mMtx); + for (const auto& sample : samples) { + mIdQueue.push_back(sample.id); + } + // Let some worker thread to consume tasks + mCondVar.notify_one(); + } + void FlushQueries() override {} + void ReportLatencyResults( + const std::vector& latencies_ns) override{}; + + private: + void CompleteThread(int threadIdx) { + auto& responses = mResponses[threadIdx]; + size_t maxSize{responses.size()}; + size_t actualSize{0}; + while (true) { + { + std::unique_lock lck(mMtx); + mCondVar.wait(lck, [&]() { return !mIdQueue.empty() || mDone; }); + + if (mDone) { + break; + } + + actualSize = std::min(maxSize, mIdQueue.size()); + for (int i = 0; i < actualSize; i++) { + responses[i].id = mIdQueue.front(); + mIdQueue.pop_front(); + } + mCondVar.notify_one(); + } + mlperf::QuerySamplesComplete(responses.data(), actualSize); + } + } + void initResponse(int numCompleteThreads, int size) { + mResponses.resize(numCompleteThreads); + for (auto& responses : mResponses) { + responses.resize(size, + {0, reinterpret_cast(&mBuf), sizeof(int)}); + } + } + int mBuf{0}; + std::string mName{"QueueSUT"}; + std::vector> mResponses; + std::vector mThreads; + std::deque mIdQueue; + std::mutex mMtx; + std::condition_variable mCondVar; + bool mDone{false}; +}; + +class MultiBasicSUT : public mlperf::SystemUnderTest { + public: + MultiBasicSUT(int numThreads) + : mNumThreads(numThreads), mResponses(numThreads) { + // Start with some large value so that we don't reallocate memory. + initResponse(10000); + for (int i = 0; i < mNumThreads; ++i) { + mThreads.emplace_back(&MultiBasicSUT::startIssueThread, this, i); + } + } + ~MultiBasicSUT() override { + for (auto& thread : mThreads) { + thread.join(); + } + } + const std::string& Name() const override { return mName; } + void IssueQuery(const std::vector& samples) override { + int thread_idx = mThreadMap[std::this_thread::get_id()]; + int n = samples.size(); + auto& reponses = mResponses[thread_idx]; + if (n > reponses.size()) { + std::cout + << "Warning: reallocating response buffer in MultiBasicSUT. Maybe " + "you should initResponse with larger value!?" + << std::endl; + initResponse(samples.size()); + } + for (int i = 0; i < n; i++) { + reponses[i].id = samples[i].id; + } + mlperf::QuerySamplesComplete(reponses.data(), n); + } + void FlushQueries() override {} + void ReportLatencyResults( + const std::vector& latencies_ns) override{}; + + private: + void initResponse(int size) { + for (auto& responses : mResponses) { + responses.resize(size, + {0, reinterpret_cast(&mBuf), sizeof(int)}); + } + } + void startIssueThread(int thread_idx) { + { + std::lock_guard lock(mMtx); + mThreadMap[std::this_thread::get_id()] = thread_idx; + } + mlperf::RegisterIssueQueryThread(); + } + int mBuf{0}; + int mNumThreads{0}; + std::string mName{"MultiBasicSUT"}; + std::vector> mResponses; + std::mutex mMtx; + std::vector mThreads; + std::map mThreadMap; +}; + +int main(int argc, char** argv) { + assert(argc >= 2 && "Need to pass in at least one argument: target_qps"); + int target_qps = std::stoi(argv[1]); + std::cout << "target_qps = " << target_qps << std::endl; + + bool useQueue{false}; + int numCompleteThreads{4}; + int maxSize{1}; + bool server_coalesce_queries{false}; + int num_issue_threads{0}; + if (argc >= 3) { + useQueue = std::stoi(argv[2]) != 0; + } + if (argc >= 4) { + numCompleteThreads = std::stoi(argv[3]); + } + if (argc >= 5) { + maxSize = std::stoi(argv[4]); + } + if (argc >= 6) { + server_coalesce_queries = std::stoi(argv[5]) != 0; + } + if (argc >= 7) { + num_issue_threads = std::stoi(argv[6]); + } + + QSL qsl; + std::unique_ptr sut; + + // Configure the test settings + mlperf::TestSettings testSettings; + testSettings.scenario = mlperf::TestScenario::Server; + testSettings.mode = mlperf::TestMode::PerformanceOnly; + testSettings.server_target_qps = target_qps; + testSettings.server_target_latency_ns = 10000000; // 10ms + testSettings.server_target_latency_percentile = 0.99; + testSettings.min_duration_ms = 60000; + testSettings.min_query_count = 270000; + testSettings.server_coalesce_queries = server_coalesce_queries; + std::cout << "testSettings.server_coalesce_queries = " + << (server_coalesce_queries ? "True" : "False") << std::endl; + testSettings.server_num_issue_query_threads = num_issue_threads; + std::cout << "num_issue_threads = " << num_issue_threads << std::endl; + + // Configure the logging settings + mlperf::LogSettings logSettings; + logSettings.log_output.outdir = "build"; + logSettings.log_output.prefix = "mlperf_log_"; + logSettings.log_output.suffix = ""; + logSettings.log_output.prefix_with_datetime = false; + logSettings.log_output.copy_detail_to_stdout = false; + logSettings.log_output.copy_summary_to_stdout = true; + logSettings.log_mode = mlperf::LoggingMode::AsyncPoll; + logSettings.log_mode_async_poll_interval_ms = 1000; + logSettings.enable_trace = false; + + // Choose SUT + if (num_issue_threads == 0) { + if (useQueue) { + std::cout << "Using QueueSUT with " << numCompleteThreads + << " complete threads" << std::endl; + sut.reset(new QueueSUT(numCompleteThreads, maxSize)); + } else { + std::cout << "Using BasicSUT" << std::endl; + sut.reset(new BasicSUT()); + } + } else { + if (useQueue) { + std::cout << "Using MultiQueueSUT with " << numCompleteThreads + << " complete threads" << std::endl; + std::cerr << "!!!! MultiQueueSUT is NOT implemented yet !!!!" + << std::endl; + return 1; + // sut.reset(new MultiQueueSUT(num_issue_threads, numCompleteThreads, + // maxSize)); + } else { + std::cout << "Using MultiBasicSUT" << std::endl; + sut.reset(new MultiBasicSUT(num_issue_threads)); + } + } + + // Start test + std::cout << "Start test..." << std::endl; + mlperf::StartTest(sut.get(), &qsl, testSettings, logSettings); + std::cout << "Test done. Clean up SUT..." << std::endl; + sut.reset(); + std::cout << "Done!" << std::endl; + return 0; +} diff --git a/benchmarks/rnnt/ootb/inference/loadgen/benchmark/run.sh b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/run.sh new file mode 100644 index 0000000..62559c1 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/run.sh @@ -0,0 +1,21 @@ +#!/usr/bin/bash +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +echo "Building loadgen..." +if [ ! -e loadgen_build ]; then mkdir loadgen_build; fi; +cd loadgen_build && cmake ../.. && make -j && cd .. +echo "Building test program..." +if [ ! -e build ]; then mkdir build; fi; +g++ --std=c++11 -O3 -I.. -o build/repro.exe repro.cpp -Lloadgen_build -lmlperf_loadgen -lpthread && \ +LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 build/repro.exe $1 $2 $3 $4 $5 $6 diff --git a/benchmarks/rnnt/ootb/inference/loadgen/benchmark/run_debug.sh b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/run_debug.sh new file mode 100644 index 0000000..ba63727 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/benchmark/run_debug.sh @@ -0,0 +1,21 @@ +#!/usr/bin/bash +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +echo "Building loadgen in Debug mode..." +if [ ! -e loadgen_build ]; then mkdir loadgen_build; fi; +cd loadgen_build && cmake -DCMAKE_BUILD_TYPE=Debug ../.. && make -j && cd .. +echo "Building test program in Debug mode..." +if [ ! -e build ]; then mkdir build; fi; +g++ --std=c++11 -O0 -g -I.. -o build/repro.exe repro.cpp -Lloadgen_build -lmlperf_loadgen -lpthread && \ +gdb --args build/repro.exe $1 $2 $3 $4 $5 $6 diff --git a/benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.cc b/benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.cc new file mode 100644 index 0000000..9de41da --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.cc @@ -0,0 +1,168 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "c_api.h" + +#include + +#include "../loadgen.h" +#include "../query_sample.h" +#include "../query_sample_library.h" +#include "../system_under_test.h" +#include "../test_settings.h" + +namespace mlperf { +namespace c { +namespace { + +// Forwards SystemUnderTest calls to relevant callbacks. +class SystemUnderTestTrampoline : public SystemUnderTest { + public: + SystemUnderTestTrampoline( + ClientData client_data, std::string name, IssueQueryCallback issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb) + : client_data_(client_data), + name_(std::move(name)), + issue_cb_(issue_cb), + flush_queries_cb_(flush_queries_cb), + report_latency_results_cb_(report_latency_results_cb) {} + ~SystemUnderTestTrampoline() override = default; + + const std::string& Name() const override { return name_; } + + void IssueQuery(const std::vector& samples) override { + (*issue_cb_)(client_data_, samples.data(), samples.size()); + } + + void FlushQueries() override { (*flush_queries_cb_)(); } + + void ReportLatencyResults( + const std::vector& latencies_ns) override { + (*report_latency_results_cb_)(client_data_, latencies_ns.data(), + latencies_ns.size()); + } + + private: + ClientData client_data_; + std::string name_; + IssueQueryCallback issue_cb_; + FlushQueriesCallback flush_queries_cb_; + ReportLatencyResultsCallback report_latency_results_cb_; +}; + +} // namespace + +void* ConstructSUT(ClientData client_data, const char* name, size_t name_length, + IssueQueryCallback issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb) { + SystemUnderTestTrampoline* sut = new SystemUnderTestTrampoline( + client_data, std::string(name, name_length), issue_cb, flush_queries_cb, + report_latency_results_cb); + return reinterpret_cast(sut); +} + +void DestroySUT(void* sut) { + SystemUnderTestTrampoline* sut_cast = + reinterpret_cast(sut); + delete sut_cast; +} + +namespace { + +// Forwards QuerySampleLibrary calls to relevant callbacks. +class QuerySampleLibraryTrampoline : public QuerySampleLibrary { + public: + QuerySampleLibraryTrampoline( + ClientData client_data, std::string name, size_t total_sample_count, + size_t performance_sample_count, + LoadSamplesToRamCallback load_samples_to_ram_cb, + UnloadSamplesFromRamCallback unload_samples_from_ram_cb) + : client_data_(client_data), + name_(std::move(name)), + total_sample_count_(total_sample_count), + performance_sample_count_(performance_sample_count), + load_samples_to_ram_cb_(load_samples_to_ram_cb), + unload_samples_from_ram_cb_(unload_samples_from_ram_cb) {} + ~QuerySampleLibraryTrampoline() override = default; + + const std::string& Name() const override { return name_; } + size_t TotalSampleCount() override { return total_sample_count_; } + size_t PerformanceSampleCount() override { return performance_sample_count_; } + + void LoadSamplesToRam(const std::vector& samples) override { + (*load_samples_to_ram_cb_)(client_data_, samples.data(), samples.size()); + } + void UnloadSamplesFromRam( + const std::vector& samples) override { + (*unload_samples_from_ram_cb_)(client_data_, samples.data(), + samples.size()); + } + + private: + ClientData client_data_; + std::string name_; + size_t total_sample_count_; + size_t performance_sample_count_; + LoadSamplesToRamCallback load_samples_to_ram_cb_; + UnloadSamplesFromRamCallback unload_samples_from_ram_cb_; +}; + +} // namespace + +void* ConstructQSL(ClientData client_data, const char* name, size_t name_length, + size_t total_sample_count, size_t performance_sample_count, + LoadSamplesToRamCallback load_samples_to_ram_cb, + UnloadSamplesFromRamCallback unload_samples_from_ram_cb) { + QuerySampleLibraryTrampoline* qsl = new QuerySampleLibraryTrampoline( + client_data, std::string(name, name_length), total_sample_count, + performance_sample_count, load_samples_to_ram_cb, + unload_samples_from_ram_cb); + return reinterpret_cast(qsl); +} + +void DestroyQSL(void* qsl) { + QuerySampleLibraryTrampoline* qsl_cast = + reinterpret_cast(qsl); + delete qsl_cast; +} + +// mlperf::c::StartTest just forwards to mlperf::StartTest after doing the +// proper cast. +void StartTest(void* sut, void* qsl, const TestSettings& settings) { + SystemUnderTestTrampoline* sut_cast = + reinterpret_cast(sut); + QuerySampleLibraryTrampoline* qsl_cast = + reinterpret_cast(qsl); + LogSettings default_log_settings; + mlperf::StartTest(sut_cast, qsl_cast, settings, default_log_settings); +} + +void QuerySamplesComplete(QuerySampleResponse* responses, + size_t response_count) { + mlperf::QuerySamplesComplete(responses, response_count); +} + +void QuerySamplesCompleteResponseCb(QuerySampleResponse* responses, + size_t response_count, ResponseCallback response_cb, + ClientData client_data) { + mlperf::QuerySamplesComplete(responses, response_count, + [client_data, response_cb] (QuerySampleResponse* response) { + response_cb(client_data, response); + }); +} + +void RegisterIssueQueryThread() { mlperf::RegisterIssueQueryThread(); } + +} // namespace c +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.h b/benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.h new file mode 100644 index 0000000..cf1a859 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/bindings/c_api.h @@ -0,0 +1,90 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief A C API wrapping the C++ loadgen. Not tested. Needs work. +/// \details The C API allows a C or Python client to easily create +/// a SystemUnderTest without having to expose the SystemUnderTest class +/// directly. +/// ConstructSUT works with a bunch of function poitners instead that are +/// called from an underlying trampoline class. + +#ifndef SYSTEM_UNDER_TEST_C_API_H_ +#define SYSTEM_UNDER_TEST_C_API_H_ + +#include +#include + +#include "../query_sample.h" +#include "../test_settings.h" + +namespace mlperf { + +namespace c { + +/// \brief Optional opaque client data that creators of SUTs and QSLs can have +/// the loadgen pass back to their callback invocations. +/// Helps avoids global variables. +typedef uintptr_t ClientData; + +typedef void (*IssueQueryCallback)(ClientData, const QuerySample*, size_t); +typedef void (*FlushQueriesCallback)(); +typedef void (*ReportLatencyResultsCallback)(ClientData, const int64_t*, + size_t); +typedef void (*ResponseCallback)(ClientData, QuerySampleResponse*); + +/// \brief SUT calls this function to report query result back to loadgen +void QuerySamplesComplete(QuerySampleResponse* responses, + size_t response_count); + +void QuerySamplesCompleteResponseCb(QuerySampleResponse* responses, + size_t response_count, + ResponseCallback response_cb, + ClientData client_data); + +/// \brief Create an opaque SUT pointer based on C callbacks. +void* ConstructSUT(ClientData client_data, const char* name, size_t name_length, + IssueQueryCallback issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb); +/// \brief Destroys the SUT created by ConstructSUT. +void DestroySUT(void* sut); + +typedef void (*LoadSamplesToRamCallback)(ClientData, const QuerySampleIndex*, + size_t); +typedef void (*UnloadSamplesFromRamCallback)(ClientData, + const QuerySampleIndex*, size_t); + +/// \brief Create an opaque QSL pointer based on C callbacks. +void* ConstructQSL(ClientData client_data, const char* name, size_t name_length, + size_t total_sample_count, size_t performance_sample_count, + LoadSamplesToRamCallback load_samples_to_ram_cb, + UnloadSamplesFromRamCallback unload_samples_from_ram_cb); +/// \brief Destroys the QSL created by ConsructQSL. +void DestroyQSL(void* qsl); + +/// \brief Run tests on a SUT created by ConstructSUT(). +/// \details This is the C entry point. See mlperf::StartTest for the C++ entry +/// point. +void StartTest(void* sut, void* qsl, const TestSettings& settings); + +/// +/// \brief Register a thread for query issuing in Server scenario. +/// \details This is the C entry point. See mlperf::RegisterIssueQueryThread for the C++ entry +/// point. +/// +void RegisterIssueQueryThread(); + +} // namespace c +} // namespace mlperf + +#endif // SYSTEM_UNDER_TEST_C_API_H_ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/bindings/python_api.cc b/benchmarks/rnnt/ootb/inference/loadgen/bindings/python_api.cc new file mode 100644 index 0000000..140604e --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/bindings/python_api.cc @@ -0,0 +1,397 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Python bindings for the loadgen using pybind11. + +#ifndef PYTHON_BINDINGS_H +#define PYTHON_BINDINGS_H + +#include + +#include "../loadgen.h" +#include "../query_sample.h" +#include "../query_sample_library.h" +#include "../system_under_test.h" +#include "../test_settings.h" +#include "pybind11/functional.h" +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" +#include "pybind11/stl_bind.h" + +namespace mlperf { + +namespace { + +using IssueQueryCallback = std::function)>; +using FastIssueQueriesCallback = + std::function, std::vector)>; +using FlushQueriesCallback = std::function; +using ReportLatencyResultsCallback = std::function)>; + +// Forwards SystemUnderTest calls to relevant callbacks. +class SystemUnderTestTrampoline : public SystemUnderTest { + public: + SystemUnderTestTrampoline( + std::string name, IssueQueryCallback issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb) + : name_(std::move(name)), + issue_cb_(issue_cb), + flush_queries_cb_(flush_queries_cb), + report_latency_results_cb_(report_latency_results_cb) {} + ~SystemUnderTestTrampoline() override = default; + + const std::string& Name() const override { return name_; } + + void IssueQuery(const std::vector& samples) override { + pybind11::gil_scoped_acquire gil_acquirer; + issue_cb_(samples); + } + + void FlushQueries() override { flush_queries_cb_(); } + + void ReportLatencyResults( + const std::vector& latencies_ns) override { + pybind11::gil_scoped_acquire gil_acquirer; + report_latency_results_cb_(latencies_ns); + } + + protected: + std::string name_; + IssueQueryCallback issue_cb_; + FlushQueriesCallback flush_queries_cb_; + ReportLatencyResultsCallback report_latency_results_cb_; +}; + +class FastSystemUnderTestTrampoline : public SystemUnderTestTrampoline { + public: + FastSystemUnderTestTrampoline( + std::string name, FastIssueQueriesCallback fast_issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb) + : SystemUnderTestTrampoline(name, nullptr, flush_queries_cb, + report_latency_results_cb), + fast_issue_cb_(fast_issue_cb) {} + ~FastSystemUnderTestTrampoline() override = default; + + void IssueQuery(const std::vector& samples) override { + pybind11::gil_scoped_acquire gil_acquirer; + std::vector responseIds; + std::vector querySampleIndices; + for (auto& s : samples) { + responseIds.push_back(s.id); + querySampleIndices.push_back(s.index); + } + fast_issue_cb_(responseIds, querySampleIndices); + } + + private: + FastIssueQueriesCallback fast_issue_cb_; +}; + +using LoadSamplesToRamCallback = + std::function)>; +using UnloadSamplesFromRamCallback = + std::function)>; + +// Forwards QuerySampleLibrary calls to relevant callbacks. +class QuerySampleLibraryTrampoline : public QuerySampleLibrary { + public: + QuerySampleLibraryTrampoline( + std::string name, size_t total_sample_count, + size_t performance_sample_count, + LoadSamplesToRamCallback load_samples_to_ram_cb, + UnloadSamplesFromRamCallback unload_samples_from_ram_cb) + : name_(std::move(name)), + total_sample_count_(total_sample_count), + performance_sample_count_(performance_sample_count), + load_samples_to_ram_cb_(load_samples_to_ram_cb), + unload_samples_from_ram_cb_(unload_samples_from_ram_cb) {} + ~QuerySampleLibraryTrampoline() override = default; + + const std::string& Name() const override { return name_; } + size_t TotalSampleCount() { return total_sample_count_; } + size_t PerformanceSampleCount() { return performance_sample_count_; } + + void LoadSamplesToRam(const std::vector& samples) override { + pybind11::gil_scoped_acquire gil_acquirer; + load_samples_to_ram_cb_(samples); + } + void UnloadSamplesFromRam( + const std::vector& samples) override { + pybind11::gil_scoped_acquire gil_acquirer; + unload_samples_from_ram_cb_(samples); + } + + private: + std::string name_; + size_t total_sample_count_; + size_t performance_sample_count_; + LoadSamplesToRamCallback load_samples_to_ram_cb_; + UnloadSamplesFromRamCallback unload_samples_from_ram_cb_; +}; + +} // namespace + +/// \brief Python bindings. +namespace py { + +uintptr_t ConstructSUT(IssueQueryCallback issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb) { + SystemUnderTestTrampoline* sut = new SystemUnderTestTrampoline( + "PySUT", issue_cb, flush_queries_cb, report_latency_results_cb); + return reinterpret_cast(sut); +} + +void DestroySUT(uintptr_t sut) { + SystemUnderTestTrampoline* sut_cast = + reinterpret_cast(sut); + delete sut_cast; +} + +uintptr_t ConstructFastSUT( + FastIssueQueriesCallback fast_issue_cb, + FlushQueriesCallback flush_queries_cb, + ReportLatencyResultsCallback report_latency_results_cb) { + FastSystemUnderTestTrampoline* sut = new FastSystemUnderTestTrampoline( + "PyFastSUT", fast_issue_cb, flush_queries_cb, report_latency_results_cb); + return reinterpret_cast(sut); +} + +void DestroyFastSUT(uintptr_t sut) { + FastSystemUnderTestTrampoline* sut_cast = + reinterpret_cast(sut); + delete sut_cast; +} + + +uintptr_t ConstructQSL( + size_t total_sample_count, size_t performance_sample_count, + LoadSamplesToRamCallback load_samples_to_ram_cb, + UnloadSamplesFromRamCallback unload_samples_from_ram_cb) { + QuerySampleLibraryTrampoline* qsl = new QuerySampleLibraryTrampoline( + "PyQSL", total_sample_count, performance_sample_count, + load_samples_to_ram_cb, unload_samples_from_ram_cb); + return reinterpret_cast(qsl); +} + +void DestroyQSL(uintptr_t qsl) { + QuerySampleLibraryTrampoline* qsl_cast = + reinterpret_cast(qsl); + delete qsl_cast; +} + +void StartTest(uintptr_t sut, uintptr_t qsl, + mlperf::TestSettings test_settings) { + pybind11::gil_scoped_release gil_releaser; + SystemUnderTestTrampoline* sut_cast = + reinterpret_cast(sut); + QuerySampleLibraryTrampoline* qsl_cast = + reinterpret_cast(qsl); + LogSettings default_log_settings; + mlperf::StartTest(sut_cast, qsl_cast, test_settings, default_log_settings); +} + +void StartTestWithLogSettings(uintptr_t sut, uintptr_t qsl, + mlperf::TestSettings test_settings, + mlperf::LogSettings log_settings) { + pybind11::gil_scoped_release gil_releaser; + SystemUnderTestTrampoline* sut_cast = + reinterpret_cast(sut); + QuerySampleLibraryTrampoline* qsl_cast = + reinterpret_cast(qsl); + mlperf::StartTest(sut_cast, qsl_cast, test_settings, log_settings); +} + +using ResponseCallback = std::function; + +/// TODO: Get rid of copies. +void QuerySamplesComplete(std::vector responses, ResponseCallback response_cb = {}) { + pybind11::gil_scoped_release gil_releaser; + mlperf::QuerySamplesComplete(responses.data(), responses.size(), response_cb); +} + +PYBIND11_MODULE(mlperf_loadgen, m) { + m.doc() = "MLPerf Inference load generator."; + + pybind11::enum_(m, "TestScenario") + .value("SingleStream", TestScenario::SingleStream) + .value("MultiStream", TestScenario::MultiStream) + .value("MultiStreamFree", TestScenario::MultiStreamFree) + .value("Server", TestScenario::Server) + .value("Offline", TestScenario::Offline); + + pybind11::enum_(m, "TestMode") + .value("SubmissionRun", TestMode::SubmissionRun) + .value("AccuracyOnly", TestMode::AccuracyOnly) + .value("PerformanceOnly", TestMode::PerformanceOnly) + .value("FindPeakPerformance", TestMode::FindPeakPerformance); + + pybind11::class_(m, "TestSettings") + .def(pybind11::init<>()) + .def_readwrite("scenario", &TestSettings::scenario) + .def_readwrite("mode", &TestSettings::mode) + .def_readwrite("single_stream_expected_latency_ns", + &TestSettings::single_stream_expected_latency_ns) + .def_readwrite("single_stream_target_latency_percentile", + &TestSettings::single_stream_target_latency_percentile) + .def_readwrite("multi_stream_target_qps", + &TestSettings::multi_stream_target_qps) + .def_readwrite("multi_stream_target_latency_ns", + &TestSettings::multi_stream_target_latency_ns) + .def_readwrite("multi_stream_target_latency_percentile", + &TestSettings::multi_stream_target_latency_percentile) + .def_readwrite("multi_stream_samples_per_query", + &TestSettings::multi_stream_samples_per_query) + .def_readwrite("multi_stream_max_async_queries", + &TestSettings::multi_stream_max_async_queries) + .def_readwrite("server_target_qps", &TestSettings::server_target_qps) + .def_readwrite("server_target_latency_ns", + &TestSettings::server_target_latency_ns) + .def_readwrite("server_target_latency_percentile", + &TestSettings::server_target_latency_percentile) + .def_readwrite("server_coalesce_queries", + &TestSettings::server_coalesce_queries) + .def_readwrite("server_find_peak_qps_decimals_of_precision", + &TestSettings::server_find_peak_qps_decimals_of_precision) + .def_readwrite("server_find_peak_qps_boundary_step_size", + &TestSettings::server_find_peak_qps_boundary_step_size) + .def_readwrite("server_max_async_queries", + &TestSettings::server_max_async_queries) + .def_readwrite("offline_expected_qps", + &TestSettings::offline_expected_qps) + .def_readwrite("min_duration_ms", &TestSettings::min_duration_ms) + .def_readwrite("max_duration_ms", &TestSettings::max_duration_ms) + .def_readwrite("min_query_count", &TestSettings::min_query_count) + .def_readwrite("max_query_count", &TestSettings::max_query_count) + .def_readwrite("qsl_rng_seed", &TestSettings::qsl_rng_seed) + .def_readwrite("sample_index_rng_seed", + &TestSettings::sample_index_rng_seed) + .def_readwrite("schedule_rng_seed", &TestSettings::schedule_rng_seed) + .def_readwrite("accuracy_log_rng_seed", + &TestSettings::accuracy_log_rng_seed) + .def_readwrite("accuracy_log_probability", + &TestSettings::accuracy_log_probability) + .def_readwrite("print_timestamps", &TestSettings::print_timestamps) + .def_readwrite("performance_issue_unique", + &TestSettings::performance_issue_unique) + .def_readwrite("performance_issue_same", + &TestSettings::performance_issue_same) + .def_readwrite("performance_issue_same_index", + &TestSettings::performance_issue_same_index) + .def_readwrite("performance_sample_count_override", + &TestSettings::performance_sample_count_override) + .def("FromConfig", &TestSettings::FromConfig, "FromConfig."); + + pybind11::enum_(m, "LoggingMode") + .value("AsyncPoll", LoggingMode::AsyncPoll) + .value("EndOfTestOnly", LoggingMode::EndOfTestOnly) + .value("Synchronous", LoggingMode::Synchronous); + + pybind11::class_(m, "LogOutputSettings") + .def(pybind11::init<>()) + .def_readwrite("outdir", &LogOutputSettings::outdir) + .def_readwrite("prefix", &LogOutputSettings::prefix) + .def_readwrite("suffix", &LogOutputSettings::suffix) + .def_readwrite("prefix_with_datetime", + &LogOutputSettings::prefix_with_datetime) + .def_readwrite("copy_detail_to_stdout", + &LogOutputSettings::copy_detail_to_stdout) + .def_readwrite("copy_summary_to_stdout", + &LogOutputSettings::copy_summary_to_stdout); + + pybind11::class_(m, "LogSettings") + .def(pybind11::init<>()) + .def_readwrite("log_output", &LogSettings::log_output) + .def_readwrite("log_mode", &LogSettings::log_mode) + .def_readwrite("log_mode_async_poll_interval_ms", + &LogSettings::log_mode_async_poll_interval_ms) + .def_readwrite("enable_trace", &LogSettings::enable_trace); + + pybind11::class_(m, "QuerySample") + .def(pybind11::init<>()) + .def(pybind11::init()) + .def_readwrite("id", &QuerySample::id) + .def_readwrite("index", &QuerySample::index) + .def(pybind11::pickle( + [] (const QuerySample &qs) { // __getstate__ + /*Return a tuple that fully encodes state of object*/ + return pybind11::make_tuple(qs.id, qs.index); + }, + [] (pybind11::tuple t) { // __setstate__ + if (t.size() != 2) + throw std::runtime_error("Invalid state for QuerySample"); + /* Create a new C++ instance*/ + QuerySample q; + q.id = t[0].cast(); + q.index = t[1].cast(); + return q; + })); + + pybind11::class_(m, "QuerySampleResponse") + .def(pybind11::init<>()) + .def(pybind11::init()) + .def_readwrite("id", &QuerySampleResponse::id) + .def_readwrite("data", &QuerySampleResponse::data) + .def_readwrite("size", &QuerySampleResponse::size) + .def(pybind11::pickle( + [] (const QuerySampleResponse &qsr) { // __getstate__ + /* Return a tuple that fully encodes state of object*/ + return pybind11::make_tuple(qsr.id, qsr.data, qsr.size); + }, + [] (pybind11::tuple t) { // __setstate__ + if (t.size() != 3) + throw std::runtime_error("Invalid state for QuerySampleResponse"); + /* Create a new C++ instance*/ + QuerySampleResponse q; + q.id = t[0].cast(); + q.data = t[1].cast(); + q.size = t[2].cast(); + return q; + })); + + // TODO: Use PYBIND11_MAKE_OPAQUE for the following vector types. + pybind11::bind_vector>(m, "VectorQuerySample"); + pybind11::bind_vector>( + m, "VectorQuerySampleResponse"); + + m.def("ConstructSUT", &py::ConstructSUT, "Construct the system under test."); + m.def("DestroySUT", &py::DestroySUT, + "Destroy the object created by ConstructSUT."); + + m.def("ConstructFastSUT", &py::ConstructFastSUT, + "Construct the system under test, fast issue query"); + m.def("DestroyFastSUT", &py::DestroyFastSUT, + "Destroy the object created by ConstructFastSUT."); + + m.def("ConstructQSL", &py::ConstructQSL, + "Construct the query sample library."); + m.def("DestroyQSL", &py::DestroyQSL, + "Destroy the object created by ConstructQSL."); + + m.def("StartTest", &py::StartTest, + "Run tests on a SUT created by ConstructSUT() with the provided QSL. " + "Uses default log settings."); + m.def("StartTestWithLogSettings", &py::StartTestWithLogSettings, + "Run tests on a SUT created by ConstructSUT() with the provided QSL. " + "Accepts custom log settings."); + m.def("QuerySamplesComplete", &py::QuerySamplesComplete, + "Called by the SUT to indicate that samples from some combination of" + "IssueQuery calls have finished.", pybind11::arg("responses"), pybind11::arg("response_cb") = ResponseCallback{}); +} + +} // namespace py +} // namespace mlperf + +#endif // PYTHON_BINDINGS_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream.py b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream.py new file mode 100644 index 0000000..141b27a --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream.py @@ -0,0 +1,92 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Python demo showing how to use the MLPerf Inference load generator bindings. +""" + +from __future__ import print_function + +import threading +import time + +from absl import app +import mlperf_loadgen +import numpy + + +def load_samples_to_ram(query_samples): + del query_samples + return + + +def unload_samples_from_ram(query_samples): + del query_samples + return + + +# Processes queries in 3 slices that complete at different times. +def process_query_async(query_samples, i_slice): + time.sleep(.001 * (i_slice + 1)) + responses = [] + samples_to_complete = query_samples[i_slice:len(query_samples):3] + for s in samples_to_complete: + responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0)) + mlperf_loadgen.QuerySamplesComplete(responses) + + +def issue_query(query_samples): + threading.Thread(target=process_query_async, + args=(query_samples, 0)).start() + threading.Thread(target=process_query_async, + args=(query_samples, 1)).start() + threading.Thread(target=process_query_async, + args=(query_samples, 2)).start() + + +def flush_queries(): + pass + + +def process_latencies(latencies_ns): + print("Average latency: ") + print(numpy.mean(latencies_ns)) + print("Median latency: ") + print(numpy.percentile(latencies_ns, 50)) + print("90 percentile latency: ") + print(numpy.percentile(latencies_ns, 90)) + + +def main(argv): + del argv + settings = mlperf_loadgen.TestSettings() + settings.scenario = mlperf_loadgen.TestScenario.MultiStream + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.multi_stream_target_latency_ns = 100000000 + settings.multi_stream_samples_per_query = 4 + settings.multi_stream_max_async_queries = 2 + settings.min_query_count = 100 + settings.min_duration_ms = 10000 + + sut = mlperf_loadgen.ConstructSUT( + issue_query, flush_queries, process_latencies) + qsl = mlperf_loadgen.ConstructQSL( + 1024, 128, load_samples_to_ram, unload_samples_from_ram) + mlperf_loadgen.StartTest(sut, qsl, settings) + mlperf_loadgen.DestroyQSL(qsl) + mlperf_loadgen.DestroySUT(sut) + + +if __name__ == "__main__": + app.run(main) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream_free.py b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream_free.py new file mode 100644 index 0000000..a603059 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_multi_stream_free.py @@ -0,0 +1,92 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Python demo showing how to use the MLPerf Inference load generator bindings. +""" + +from __future__ import print_function + +import threading +import time + +from absl import app +import mlperf_loadgen +import numpy + + +def load_samples_to_ram(query_samples): + del query_samples + return + + +def unload_samples_from_ram(query_samples): + del query_samples + return + + +# Processes queries in 3 slices that complete at different times. +def process_query_async(query_samples, i_slice): + time.sleep(.001 * (i_slice + 1)) + responses = [] + samples_to_complete = query_samples[i_slice:len(query_samples):3] + for s in samples_to_complete: + responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0)) + mlperf_loadgen.QuerySamplesComplete(responses) + + +def issue_query(query_samples): + threading.Thread(target=process_query_async, + args=(query_samples, 0)).start() + threading.Thread(target=process_query_async, + args=(query_samples, 1)).start() + threading.Thread(target=process_query_async, + args=(query_samples, 2)).start() + + +def flush_queries(): + pass + + +def process_latencies(latencies_ns): + print("Average latency: ") + print(numpy.mean(latencies_ns)) + print("Median latency: ") + print(numpy.percentile(latencies_ns, 50)) + print("90 percentile latency: ") + print(numpy.percentile(latencies_ns, 90)) + + +def main(argv): + del argv + settings = mlperf_loadgen.TestSettings() + settings.scenario = mlperf_loadgen.TestScenario.MultiStreamFree + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.multi_stream_target_latency_ns = 100000000 + settings.multi_stream_samples_per_query = 4 + settings.multi_stream_max_async_queries = 2 + settings.min_query_count = 100 + settings.min_duration_ms = 10000 + + sut = mlperf_loadgen.ConstructSUT( + issue_query, flush_queries, process_latencies) + qsl = mlperf_loadgen.ConstructQSL( + 1024, 128, load_samples_to_ram, unload_samples_from_ram) + mlperf_loadgen.StartTest(sut, qsl, settings) + mlperf_loadgen.DestroyQSL(qsl) + mlperf_loadgen.DestroySUT(sut) + + +if __name__ == "__main__": + app.run(main) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_offline.py b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_offline.py new file mode 100644 index 0000000..c152530 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_offline.py @@ -0,0 +1,88 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Python demo showing how to use the MLPerf Inference load generator bindings. +""" + +from __future__ import print_function + +import threading +import time + +from absl import app +import mlperf_loadgen +import numpy + + +def load_samples_to_ram(query_samples): + del query_samples + return + + +def unload_samples_from_ram(query_samples): + del query_samples + return + + +# Processes queries in 3 slices that complete at different times. +def process_query_async(query_samples, i_slice): + time.sleep(3 * (i_slice + 1)) + responses = [] + samples_to_complete = query_samples[i_slice:len(query_samples):3] + for s in samples_to_complete: + responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0)) + mlperf_loadgen.QuerySamplesComplete(responses) + + +def issue_query(query_samples): + threading.Thread(target=process_query_async, + args=(query_samples, 0)).start() + threading.Thread(target=process_query_async, + args=(query_samples, 1)).start() + threading.Thread(target=process_query_async, + args=(query_samples, 2)).start() + + +def flush_queries(): + pass + + +def process_latencies(latencies_ns): + print("Average latency: ") + print(numpy.mean(latencies_ns)) + print("Median latency: ") + print(numpy.percentile(latencies_ns, 50)) + print("90 percentile latency: ") + print(numpy.percentile(latencies_ns, 90)) + + +def main(argv): + del argv + settings = mlperf_loadgen.TestSettings() + settings.scenario = mlperf_loadgen.TestScenario.Offline + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.offline_expected_qps = 1000 + + sut = mlperf_loadgen.ConstructSUT( + issue_query, flush_queries, process_latencies) + qsl = mlperf_loadgen.ConstructQSL( + 1024, 128, load_samples_to_ram, unload_samples_from_ram) + mlperf_loadgen.StartTest(sut, qsl, settings) + mlperf_loadgen.DestroyQSL(qsl) + mlperf_loadgen.DestroySUT(sut) + + +if __name__ == "__main__": + app.run(main) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_server.py b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_server.py new file mode 100644 index 0000000..75aa82f --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_server.py @@ -0,0 +1,85 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Python demo showing how to use the MLPerf Inference load generator bindings. +""" + +from __future__ import print_function + +import threading +import time + +from absl import app +import mlperf_loadgen +import numpy + + +def load_samples_to_ram(query_samples): + del query_samples + return + + +def unload_samples_from_ram(query_samples): + del query_samples + return + + +def process_query_async(query_samples): + time.sleep(.001) + responses = [] + for s in query_samples: + responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0)) + mlperf_loadgen.QuerySamplesComplete(responses) + + +def issue_query(query_samples): + threading.Thread(target=process_query_async, + args=[query_samples]).start() + + +def flush_queries(): + pass + + +def process_latencies(latencies_ns): + print("Average latency: ") + print(numpy.mean(latencies_ns)) + print("Median latency: ") + print(numpy.percentile(latencies_ns, 50)) + print("99 percentile latency: ") + print(numpy.percentile(latencies_ns, 99)) + + +def main(argv): + del argv + settings = mlperf_loadgen.TestSettings() + settings.scenario = mlperf_loadgen.TestScenario.Server + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.server_target_qps = 100 + settings.server_target_latency_ns = 100000000 + settings.min_query_count = 100 + settings.min_duration_ms = 10000 + + sut = mlperf_loadgen.ConstructSUT( + issue_query, flush_queries, process_latencies) + qsl = mlperf_loadgen.ConstructQSL( + 1024, 128, load_samples_to_ram, unload_samples_from_ram) + mlperf_loadgen.StartTest(sut, qsl, settings) + mlperf_loadgen.DestroyQSL(qsl) + mlperf_loadgen.DestroySUT(sut) + + +if __name__ == "__main__": + app.run(main) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_single_stream.py b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_single_stream.py new file mode 100644 index 0000000..53efa42 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/demos/py_demo_single_stream.py @@ -0,0 +1,93 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Python demo showing how to use the MLPerf Inference load generator bindings. +""" + +from __future__ import print_function + +import array +import threading +import time + +from absl import app +import mlperf_loadgen +import numpy + + +def load_samples_to_ram(query_samples): + del query_samples + return + + +def unload_samples_from_ram(query_samples): + del query_samples + return + + +def process_query_async(query_samples): + """Processes the list of queries.""" + time.sleep(.001) + responses = [] + response_array = array.array( + "f", [0, 1, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128, 254, 255]) + response_info = response_array.buffer_info() + response_data = response_info[0] + response_size = response_info[1] * response_array.itemsize + for s in query_samples: + responses.append( + mlperf_loadgen.QuerySampleResponse( + s.id, response_data, response_size)) + mlperf_loadgen.QuerySamplesComplete(responses) + + +def issue_query(query_samples): + threading.Thread(target=process_query_async, + args=[query_samples]).start() + + +def flush_queries(): + pass + + +def process_latencies(latencies_ns): + print("Average latency: ") + print(numpy.mean(latencies_ns)) + print("Median latency: ") + print(numpy.percentile(latencies_ns, 50)) + print("90 percentile latency: ") + print(numpy.percentile(latencies_ns, 90)) + + +def main(argv): + del argv + settings = mlperf_loadgen.TestSettings() + settings.scenario = mlperf_loadgen.TestScenario.SingleStream + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.single_stream_expected_latency_ns = 1000000 + settings.min_query_count = 100 + settings.min_duration_ms = 10000 + + sut = mlperf_loadgen.ConstructSUT( + issue_query, flush_queries, process_latencies) + qsl = mlperf_loadgen.ConstructQSL( + 1024, 128, load_samples_to_ram, unload_samples_from_ram) + mlperf_loadgen.StartTest(sut, qsl, settings) + mlperf_loadgen.DestroyQSL(qsl) + mlperf_loadgen.DestroySUT(sut) + + +if __name__ == "__main__": + app.run(main) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/BUILD.gn b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/BUILD.gn new file mode 100644 index 0000000..865bc4d --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/BUILD.gn @@ -0,0 +1,33 @@ +generated_doxygen_out_dir = + get_path_info(".", "gen_dir") + "/.." + +loadgen_doxygen_sources = [ + "doxygen.cfg", + "doxygen_footer.html", + "doxygen_header.html", + "doxygen_layout.xml", + "doxygen_stylesheet.css", + "loadgen-integration_diagram.dia", + "mlperf_icon.png", + "mlperf_logo_horizontal_color.svg", + "README.md" +] + +source_set("loadgen_doxygen_sources") { + sources = loadgen_doxygen_sources +} + +source_set("doxygen_html_generator_script") { + sources = [ "doxygen_html_generator.py" ] +} + +action("generate_doxygen_html") { + script = "doxygen_html_generator.py" + args = [ rebase_path(generated_doxygen_out_dir, root_build_dir), + rebase_path("../..") ] + outputs = [ generated_doxygen_out_dir ] + deps = [ ":loadgen_doxygen_sources", + ":doxygen_html_generator_script", + "../..:mlperf_loadgen_sources_no_gen", + "../..:docs" ] +} diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/README.md b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/README.md new file mode 100644 index 0000000..d5cf5fe --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/README.md @@ -0,0 +1,34 @@ +# Generating the HTML docs {#ReadmeHtmlDocs} + +This document is generated from inline docstrings in the source and +various markdown files checked into the git repository. If you've +checked out the code, you can generate this documentation. + +*Prerequisite:* You must have [doxygen](http://www.doxygen.nl) installed +on your system: + +## With gn / ninja + +If you are using the gn build flow, you may run: + + ninja -C out/Release generate_doxygen_html + +* This will output the documentation to out/Release/gen/loadgen/docs/gen and +avoid poluting the source directory. + +## Manually + +Alternatively, you can manually run: + + python docs/src/doxygen_html_generator.py + +* If is omitted, it will default to ".". +* If is also omitted, it will default to "./docs/gen". + +## Hosting + +A version of this doc is currently hosted online at +https://mlperf.github.io/inference/loadgen/index.html + +To update the hosted version, submit a PR to the +[mlperf.github.io](https://github.com/mlperf/mlperf.github.io) repository. diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen.cfg b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen.cfg new file mode 100644 index 0000000..fc05853 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen.cfg @@ -0,0 +1,2495 @@ +# Doxyfile 1.8.13 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "LoadGen Guide" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = $(MLPERF_LOADGEN_SRC_PATH)/docs/src/mlperf_logo_horizontal_color.svg + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = $(MLPERF_DOXYGEN_OUT_PATH) + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = YES + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 1 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = YES + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = $(MLPERF_LOADGEN_SRC_PATH)/docs/src/doxygen_layout.xml + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = NO + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = $(MLPERF_LOADGEN_SRC_PATH) + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = depot_tools + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = $(MLPERF_LOADGEN_SRC_PATH)/docs/src + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse-libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = YES + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = -I ../third_party/pybind/include --std=c++14 + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot o= +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = $(MLPERF_LOADGEN_SRC_PATH)/docs/src/doxygen_header.html + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = $(MLPERF_LOADGEN_SRC_PATH)/docs/src/doxygen_footer.html + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = $(MLPERF_LOADGEN_SRC_PATH)/docs/src/doxygen_stylesheet.css + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = $(MLPERF_LOADGEN_SRC_PATH)/docs/src/mlperf_icon.png \ + $(MLPERF_LOADGEN_SRC_PATH)/loadgen_integration_diagram.svg + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 127 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = YES + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 50 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , / + + + + + + diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_header.html b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_header.html new file mode 100644 index 0000000..91d214b --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_header.html @@ -0,0 +1,49 @@ + + + + + + + + + +LoadGen: $title +$title + + + +$treeview +$search +$mathjax + +$extrastylesheet + + +
+ + +
+ + MLPerf + + +
+
$projectname +  $projectnumber +
+
$projectbrief
+
+ + + +
$projectbrief
+ + + + +
$searchbox
+ + +
+ + diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_html_generator.py b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_html_generator.py new file mode 100644 index 0000000..405ac1e --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_html_generator.py @@ -0,0 +1,37 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +## \file +# \brief A script that sets the environment variables expected by doxygen.cfg. +# \details This can be run manually without any arguments, but also allows a +# build system to customize the output directory. + +import os +import sys + + +def generate_doxygen_html(doxygen_out_dir, loadgen_root): + os.environ["MLPERF_LOADGEN_SRC_PATH"] = loadgen_root + os.environ["MLPERF_DOXYGEN_OUT_PATH"] = doxygen_out_dir + os.popen("doxygen " + loadgen_root + "/docs/src/doxygen.cfg") + + +def main(argv): + doxygen_out_dir = "./docs/gen" if len(argv) < 2 else argv[1] + loadgen_root = "." if len(argv) < 3 else argv[2] + generate_doxygen_html(doxygen_out_dir, loadgen_root) + + +main(sys.argv) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_layout.xml b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_layout.xml new file mode 100644 index 0000000..1fc5a9c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_layout.xml @@ -0,0 +1,211 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_stylesheet.css b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_stylesheet.css new file mode 100644 index 0000000..3bd6126 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/doxygen_stylesheet.css @@ -0,0 +1,1629 @@ +/* The standard CSS for doxygen 1.8.13 */ + +body, table, div, p, dl { + font: 400 14px/22px Roboto,sans-serif; +} + +p.reference, p.definition { + font: 400 14px/22px Roboto,sans-serif; +} + +/* @group Heading Levels */ + +h1.groupheader { + font-size: 150%; +} + +.title { + font: 400 14px/28px Roboto,sans-serif; + font-size: 175%; + font-weight: bold; + margin: 10px 2px; + color: #135384; +} + +h2.groupheader { + border-bottom: 1px solid #879ECB; + color: #354C7B; + font-size: 150%; + font-weight: normal; + margin-top: 1.75em; + padding-top: 8px; + padding-bottom: 4px; + width: 100%; +} + +h3.groupheader { + font-size: 100%; +} + +h1, h2, h3, h4, h5, h6 { + -webkit-transition: text-shadow 0.5s linear; + -moz-transition: text-shadow 0.5s linear; + -ms-transition: text-shadow 0.5s linear; + -o-transition: text-shadow 0.5s linear; + transition: text-shadow 0.5s linear; + margin-right: 15px; + color: #135384; + +} + +h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { + text-shadow: 0 0 15px cyan; +} + +dt { + font-weight: bold; +} + +div.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; +} + +p.startli, p.startdd { + margin-top: 2px; +} + +p.starttd { + margin-top: 0px; +} + +p.endli { + margin-bottom: 0px; +} + +p.enddd { + margin-bottom: 4px; +} + +p.endtd { + margin-bottom: 2px; +} + +/* @end */ + +caption { + font-weight: bold; +} + +span.legend { + font-size: 70%; + text-align: center; +} + +h3.version { + font-size: 90%; + text-align: center; +} + +div.qindex, div.navtab{ + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; +} + +div.qindex, div.navpath { + width: 100%; + line-height: 140%; +} + +div.navtab { + margin-right: 15px; +} + +/* @group Link Styling */ + +a { + color: #3D578C; + font-weight: normal; + text-decoration: none; +} + +.contents a:visited { + color: #4665A2; +} + +a:hover { + text-decoration: underline; +} + +a.qindex { + font-weight: bold; +} + +a.qindexHL { + font-weight: bold; + background-color: #9CAFD4; + color: #ffffff; + border: 1px double #869DCA; +} + +.contents a.qindexHL:visited { + color: #ffffff; +} + +a.el { + font-weight: bold; +} + +a.elRef { +} + +a.code, a.code:visited, a.line, a.line:visited { + color: #4665A2; +} + +a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { + color: #4665A2; +} + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +pre.fragment { + border: 1px solid #C4CFE5; + background-color: #FBFCFD; + padding: 4px 6px; + margin: 4px 8px 4px 2px; + overflow: auto; + word-wrap: break-word; + font-size: 9pt; + line-height: 125%; + font-family: monospace, fixed; + font-size: 105%; +} + +div.fragment { + padding: 0px; + margin: 4px 8px 4px 2px; + background-color: #FBFCFD; + border: 1px solid #C4CFE5; +} + +div.line { + font-family: monospace, fixed; + font-size: 13px; + min-height: 13px; + line-height: 1.0; + text-wrap: unrestricted; + white-space: -moz-pre-wrap; /* Moz */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: pre-wrap; /* CSS3 */ + word-wrap: break-word; /* IE 5.5+ */ + text-indent: -53px; + padding-left: 53px; + padding-bottom: 0px; + margin: 0px; + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +div.line:after { + content:"\000A"; + white-space: pre; +} + +div.line.glow { + background-color: cyan; + box-shadow: 0 0 10px cyan; +} + + +span.lineno { + padding-right: 4px; + text-align: right; + border-right: 2px solid #0F0; + background-color: #E8E8E8; + white-space: pre; +} +span.lineno a { + background-color: #D8D8D8; +} + +span.lineno a:hover { + background-color: #C8C8C8; +} + +.lineno { + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +div.ah, span.ah { + background-color: black; + font-weight: bold; + color: #ffffff; + margin-bottom: 3px; + margin-top: 3px; + padding: 0.2em; + border: solid thin #333; + border-radius: 0.5em; + -webkit-border-radius: .5em; + -moz-border-radius: .5em; + box-shadow: 2px 2px 3px #999; + -webkit-box-shadow: 2px 2px 3px #999; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); + background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000 110%); +} + +div.classindex ul { + list-style: none; + padding-left: 0; +} + +div.classindex span.ai { + display: inline-block; +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +body { + background-color: white; + color: black; + margin: 0; +} + +div.contents { + margin-top: 10px; + margin-left: 12px; + margin-right: 8px; +} + +td.indexkey { + background-color: #EBEFF6; + font-weight: bold; + border: 1px solid #C4CFE5; + margin: 2px 0px 2px 0; + padding: 2px 10px; + white-space: nowrap; + vertical-align: top; +} + +td.indexvalue { + background-color: #EBEFF6; + border: 1px solid #C4CFE5; + padding: 2px 10px; + margin: 2px 0px; +} + +tr.memlist { + background-color: #EEF1F7; +} + +p.formulaDsp { + text-align: center; +} + +img.formulaDsp { + +} + +img.formulaInl { + vertical-align: middle; +} + +div.center { + text-align: center; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; +} + +div.center img { + border: 0px; +} + +address.footer { + text-align: right; + padding-right: 12px; +} + +img.footer { + border: 0px; + vertical-align: middle; +} + +/* @group Code Colorization */ + +span.keyword { + color: #008000 +} + +span.keywordtype { + color: #604020 +} + +span.keywordflow { + color: #e08000 +} + +span.comment { + color: #800000 +} + +span.preprocessor { + color: #806020 +} + +span.stringliteral { + color: #002080 +} + +span.charliteral { + color: #008080 +} + +span.vhdldigit { + color: #ff00ff +} + +span.vhdlchar { + color: #000000 +} + +span.vhdlkeyword { + color: #700070 +} + +span.vhdllogic { + color: #ff0000 +} + +blockquote { + background-color: #F7F8FB; + border-left: 2px solid #9CAFD4; + margin: 0 24px 0 4px; + padding: 0 12px 0 16px; +} + +/* @end */ + +/* +.search { + color: #003399; + font-weight: bold; +} + +form.search { + margin-bottom: 0px; + margin-top: 0px; +} + +input.search { + font-size: 75%; + color: #000080; + font-weight: normal; + background-color: #e8eef2; +} +*/ + +td.tiny { + font-size: 75%; +} + +.dirtab { + padding: 4px; + border-collapse: collapse; + border: 1px solid #A3B4D7; +} + +th.dirtab { + background: #EBEFF6; + font-weight: bold; +} + +hr { + height: 0px; + border: none; + border-top: 1px solid #4A6AAA; +} + +hr.footer { + height: 1px; +} + +/* @group Member Descriptions */ + +table.memberdecls { + border-spacing: 0px; + padding: 0px; +} + +.memberdecls td, .fieldtable tr { + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +.memberdecls td.glow, .fieldtable tr.glow { + background-color: cyan; + box-shadow: 0 0 15px cyan; +} + +.mdescLeft, .mdescRight, +.memItemLeft, .memItemRight, +.memTemplItemLeft, .memTemplItemRight, .memTemplParams { + background-color: #F9FAFC; + border: none; + margin: 4px; + padding: 1px 0 0 8px; +} + +.mdescLeft, .mdescRight { + padding: 0px 8px 4px 8px; + color: #555; +} + +.memSeparator { + border-bottom: 1px solid #DEE4F0; + line-height: 1px; + margin: 0px; + padding: 0px; +} + +.memItemLeft, .memTemplItemLeft { + white-space: nowrap; +} + +.memItemRight { + width: 100%; +} + +.memTemplParams { + color: #4665A2; + white-space: nowrap; + font-size: 80%; +} + +/* @end */ + +/* @group Member Details */ + +/* Styles for detailed member documentation */ + +.memtitle { + padding: 8px; + border-top: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + border-top-right-radius: 4px; + border-top-left-radius: 4px; + margin-bottom: -1px; + background-image: url('nav_f.png'); + background-repeat: repeat-x; + background-color: #E2E8F2; + line-height: 1.25; + font-weight: 300; + float:left; +} + +.permalink +{ + font-size: 65%; + display: inline-block; + vertical-align: middle; +} + +.memtemplate { + font-size: 80%; + color: #4665A2; + font-weight: normal; + margin-left: 9px; +} + +.memnav { + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; + margin: 2px; + margin-right: 15px; + padding: 2px; +} + +.mempage { + width: 100%; +} + +.memitem { + padding: 0; + margin-bottom: 10px; + margin-right: 5px; + -webkit-transition: box-shadow 0.5s linear; + -moz-transition: box-shadow 0.5s linear; + -ms-transition: box-shadow 0.5s linear; + -o-transition: box-shadow 0.5s linear; + transition: box-shadow 0.5s linear; + display: table !important; + width: 100%; +} + +.memitem.glow { + box-shadow: 0 0 15px cyan; +} + +.memname { + font-weight: 400; + margin-left: 6px; +} + +.memname td { + vertical-align: bottom; +} + +.memproto, dl.reflist dt { + border-top: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 0px 6px 0px; + color: #253555; + font-weight: bold; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + background-color: #DFE5F1; + /* opera specific markup */ + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + border-top-right-radius: 4px; + /* firefox specific markup */ + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + -moz-border-radius-topright: 4px; + /* webkit specific markup */ + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -webkit-border-top-right-radius: 4px; + +} + +.overload { + font-family: "courier new",courier,monospace; + font-size: 65%; +} + +.memdoc, dl.reflist dd { + border-bottom: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 10px 2px 10px; + background-color: #FBFCFD; + border-top-width: 0; + background-image:url('nav_g.png'); + background-repeat:repeat-x; + background-color: #FFFFFF; + /* opera specific markup */ + border-bottom-left-radius: 4px; + border-bottom-right-radius: 4px; + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + /* firefox specific markup */ + -moz-border-radius-bottomleft: 4px; + -moz-border-radius-bottomright: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + /* webkit specific markup */ + -webkit-border-bottom-left-radius: 4px; + -webkit-border-bottom-right-radius: 4px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); +} + +dl.reflist dt { + padding: 5px; +} + +dl.reflist dd { + margin: 0px 0px 10px 0px; + padding: 5px; +} + +.paramkey { + text-align: right; +} + +.paramtype { + white-space: nowrap; +} + +.paramname { + color: #602020; + white-space: nowrap; +} +.paramname em { + font-style: normal; +} +.paramname code { + line-height: 14px; +} + +.params, .retval, .exception, .tparams { + margin-left: 0px; + padding-left: 0px; +} + +.params .paramname, .retval .paramname { + font-weight: bold; + vertical-align: top; +} + +.params .paramtype { + font-style: italic; + vertical-align: top; +} + +.params .paramdir { + font-family: "courier new",courier,monospace; + vertical-align: top; +} + +table.mlabels { + border-spacing: 0px; +} + +td.mlabels-left { + width: 100%; + padding: 0px; +} + +td.mlabels-right { + vertical-align: bottom; + padding: 0px; + white-space: nowrap; +} + +span.mlabels { + margin-left: 8px; +} + +span.mlabel { + background-color: #728DC1; + border-top:1px solid #5373B4; + border-left:1px solid #5373B4; + border-right:1px solid #C4CFE5; + border-bottom:1px solid #C4CFE5; + text-shadow: none; + color: white; + margin-right: 4px; + padding: 2px 3px; + border-radius: 3px; + font-size: 7pt; + white-space: nowrap; + vertical-align: middle; +} + + + +/* @end */ + +/* these are for tree view inside a (index) page */ + +div.directory { + margin: 10px 0px; + border-top: 1px solid #9CAFD4; + border-bottom: 1px solid #9CAFD4; + width: 100%; +} + +.directory table { + border-collapse:collapse; +} + +.directory td { + margin: 0px; + padding: 0px; + vertical-align: top; +} + +.directory td.entry { + white-space: nowrap; + padding-right: 6px; + padding-top: 3px; +} + +.directory td.entry a { + outline:none; +} + +.directory td.entry a img { + border: none; +} + +.directory td.desc { + width: 100%; + padding-left: 6px; + padding-right: 6px; + padding-top: 3px; + border-left: 1px solid rgba(0,0,0,0.05); +} + +.directory tr.even { + padding-left: 6px; + background-color: #F7F8FB; +} + +.directory img { + vertical-align: -30%; +} + +.directory .levels { + white-space: nowrap; + width: 100%; + text-align: right; + font-size: 9pt; +} + +.directory .levels span { + cursor: pointer; + padding-left: 2px; + padding-right: 2px; + color: #3D578C; +} + +.arrow { + color: #9CAFD4; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + cursor: pointer; + font-size: 80%; + display: inline-block; + width: 16px; + height: 22px; +} + +.icon { + font-family: Arial, Helvetica; + font-weight: bold; + font-size: 12px; + height: 14px; + width: 16px; + display: inline-block; + background-color: #728DC1; + color: white; + text-align: center; + border-radius: 4px; + margin-left: 2px; + margin-right: 2px; +} + +.icona { + width: 24px; + height: 22px; + display: inline-block; +} + +.iconfopen { + width: 24px; + height: 18px; + margin-bottom: 4px; + background-image:url('folderopen.png'); + background-position: 0px -4px; + background-repeat: repeat-y; + vertical-align:top; + display: inline-block; +} + +.iconfclosed { + width: 24px; + height: 18px; + margin-bottom: 4px; + background-image:url('folderclosed.png'); + background-position: 0px -4px; + background-repeat: repeat-y; + vertical-align:top; + display: inline-block; +} + +.icondoc { + width: 24px; + height: 18px; + margin-bottom: 4px; + background-image:url('doc.png'); + background-position: 0px -4px; + background-repeat: repeat-y; + vertical-align:top; + display: inline-block; +} + +table.directory { + font: 400 14px Roboto,sans-serif; +} + +/* @end */ + +div.dynheader { + margin-top: 8px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +address { + font-style: normal; + color: #2A3D61; +} + +table.doxtable caption { + caption-side: top; +} + +table.doxtable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.doxtable td, table.doxtable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +table.doxtable th { + background-color: #374F7F; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +table.fieldtable { + /*width: 100%;*/ + margin-bottom: 10px; + border: 1px solid #A8B8D9; + border-spacing: 0px; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + border-radius: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); + box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); +} + +.fieldtable td, .fieldtable th { + padding: 3px 7px 2px; +} + +.fieldtable td.fieldtype, .fieldtable td.fieldname { + white-space: nowrap; + border-right: 1px solid #A8B8D9; + border-bottom: 1px solid #A8B8D9; + vertical-align: top; +} + +.fieldtable td.fieldname { + padding-top: 3px; +} + +.fieldtable td.fielddoc { + border-bottom: 1px solid #A8B8D9; + /*width: 100%;*/ +} + +.fieldtable td.fielddoc p:first-child { + margin-top: 0px; +} + +.fieldtable td.fielddoc p:last-child { + margin-bottom: 2px; +} + +.fieldtable tr:last-child td { + border-bottom: none; +} + +.fieldtable th { + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2E8F2; + font-size: 90%; + color: #253555; + padding-bottom: 4px; + padding-top: 5px; + text-align:left; + font-weight: 400; + -moz-border-radius-topleft: 4px; + -moz-border-radius-topright: 4px; + -webkit-border-top-left-radius: 4px; + -webkit-border-top-right-radius: 4px; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom: 1px solid #A8B8D9; +} + + +.tabsearch { + top: 0px; + left: 10px; + height: 36px; + background-image: url('tab_b.png'); + z-index: 101; + overflow: hidden; + font-size: 13px; +} + +.navpath ul { + display: flex; + flex-flow: row wrap; + justify-content: flex-start; + align-items: center; + font-size: 11px; + background-image:none; + background-repeat:repeat-x; + background-position: 0 -5px; + height:auto; + line-height:30px; + color:#8AA0CC; + border:solid 1px #C2CDE4; + overflow:hidden; + margin:0px; + padding:0px; +} + +.navpath li +{ + list-style-type:none; + float:left; + padding-left:10px; + padding-right:15px; + background-image:url('bc_s.png'); + background-repeat:no-repeat; + background-position:right; + color:#364D7C; +} + +.navpath li.navelem a +{ + height:32px; + display:block; + text-decoration: none; + outline: none; + color: #283A5D; + font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + text-decoration: none; +} + +.navpath li.navelem a:hover +{ + color:#6884BD; +} + +.navpath li.footer +{ + display: flex; + flex-flow: row wrap; + justify-content: flex-start; + align-items: center; + flex-grow: 1; + list-style-type:none; + float:none; + padding-left:10px; + padding-right:15px; + background-image:none; + background-repeat:no-repeat; + background-position:right; + color:#364D7C; + font-size: 8pt; +} + +div.summary +{ + float: right; + font-size: 8pt; + padding-right: 5px; + width: 50%; + text-align: right; +} + +div.summary a +{ + white-space: nowrap; +} + +table.classindex +{ + margin: 10px; + white-space: nowrap; + margin-left: 3%; + margin-right: 3%; + width: 94%; + border: 0; + border-spacing: 0; + padding: 0; +} + +div.ingroups +{ + font-size: 8pt; + width: 50%; + text-align: left; +} + +div.ingroups a +{ + white-space: nowrap; +} + +div.header +{ + background-image:url('nav_h.png'); + background-repeat:repeat-x; + background-color: #F9FAFC; + margin: 0px; + border-bottom: 1px solid #C4CFE5; +} + +div.headertitle +{ + padding: 5px 5px 5px 10px; + color: #135384; +} + +dl +{ + padding: 0 0 0 10px; +} + +/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ +dl.section +{ + margin-left: 0px; + padding-left: 0px; +} + +dl.note +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #D0C000; +} + +dl.warning, dl.attention +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #FF0000; +} + +dl.pre, dl.post, dl.invariant +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #00D000; +} + +dl.deprecated +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #505050; +} + +dl.todo +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #00C0E0; +} + +dl.test +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #3030E0; +} + +dl.bug +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #C08050; +} + +dl.section dd { + margin-bottom: 6px; +} + + +#projectlogo +{ + text-align: center; + vertical-align: bottom; + border-collapse: separate; +} + +#projectlogo img +{ + border: 0px none; +} + +#projectalign +{ + vertical-align: middle; +} + +#projectname +{ + font: 200% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 2px 0px; +} + +#projectbrief +{ + font: 120% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#projectnumber +{ + font: 50% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#top { + border-bottom: 1px solid #5373B4; +} + +#titlearea +{ + flex-grow: 1; + padding: 0px; + margin: 0px; + width: auto; + border-bottom: none; +} + +#main-nav { +} + +#main-menu { + display: flex; + flex-flow: row wrap; + justify-content: flex-start; + align-items: center; + background-image: none; + min-width: 770px; +} + +.ui-resizable-e { + height: 100%; + background-repeat: repeat-y; +} + +.image +{ + text-align: center; +} + +.dotgraph +{ + text-align: center; +} + +.mscgraph +{ + text-align: center; +} + +.plantumlgraph +{ + text-align: center; +} + +.diagraph +{ + text-align: center; +} + +.caption +{ + font-weight: bold; +} + +div.zoom +{ + border: 1px solid #90A5CE; +} + +dl.citelist { + margin-bottom:50px; +} + +dl.citelist dt { + color:#334975; + float:left; + font-weight:bold; + margin-right:10px; + padding:5px; +} + +dl.citelist dd { + margin:2px 0; + padding:5px 0; +} + +div.toc { + padding: 14px 25px; + background-color: #F4F6FA; + border: 1px solid #D8DFEE; + border-radius: 7px 7px 7px 7px; + float: right; + height: auto; + margin: 0 8px 10px 10px; + width: 200px; +} + +div.toc li { + background: url("bdwn.png") no-repeat scroll 0 5px transparent; + font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; + margin-top: 5px; + padding-left: 10px; + padding-top: 2px; +} + +div.toc h3 { + font: bold 12px/1.2 Arial,FreeSans,sans-serif; + color: #4665A2; + border-bottom: 0 none; + margin: 0; +} + +div.toc ul { + list-style: none outside none; + border: medium none; + padding: 0px; +} + +div.toc li.level1 { + margin-left: 0px; +} + +div.toc li.level2 { + margin-left: 15px; +} + +div.toc li.level3 { + margin-left: 30px; +} + +div.toc li.level4 { + margin-left: 45px; +} + +.inherit_header { + font-weight: bold; + color: gray; + cursor: pointer; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +.inherit_header td { + padding: 6px 0px 2px 5px; +} + +.inherit { + display: none; +} + +tr.heading h2 { + margin-top: 12px; + margin-bottom: 4px; +} + +/* tooltip related style info */ + +.ttc { + position: absolute; + display: none; +} + +#powerTip { + cursor: default; + white-space: nowrap; + background-color: white; + border: 1px solid gray; + border-radius: 4px 4px 4px 4px; + box-shadow: 1px 1px 7px gray; + display: none; + font-size: smaller; + max-width: 80%; + opacity: 0.9; + padding: 1ex 1em 1em; + position: absolute; + z-index: 2147483647; +} + +#powerTip div.ttdoc { + color: grey; + font-style: italic; +} + +#powerTip div.ttname a { + font-weight: bold; +} + +#powerTip div.ttname { + font-weight: bold; +} + +#powerTip div.ttdeci { + color: #006318; +} + +#powerTip div { + margin: 0px; + padding: 0px; + font: 12px/16px Roboto,sans-serif; +} + +#powerTip:before, #powerTip:after { + content: ""; + position: absolute; + margin: 0px; +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.s:after, #powerTip.s:before, +#powerTip.w:after, #powerTip.w:before, +#powerTip.e:after, #powerTip.e:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.nw:after, #powerTip.nw:before, +#powerTip.sw:after, #powerTip.sw:before { + border: solid transparent; + content: " "; + height: 0; + width: 0; + position: absolute; +} + +#powerTip.n:after, #powerTip.s:after, +#powerTip.w:after, #powerTip.e:after, +#powerTip.nw:after, #powerTip.ne:after, +#powerTip.sw:after, #powerTip.se:after { + border-color: rgba(255, 255, 255, 0); +} + +#powerTip.n:before, #powerTip.s:before, +#powerTip.w:before, #powerTip.e:before, +#powerTip.nw:before, #powerTip.ne:before, +#powerTip.sw:before, #powerTip.se:before { + border-color: rgba(128, 128, 128, 0); +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.nw:after, #powerTip.nw:before { + top: 100%; +} + +#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { + border-top-color: #ffffff; + border-width: 10px; + margin: 0px -10px; +} +#powerTip.n:before { + border-top-color: #808080; + border-width: 11px; + margin: 0px -11px; +} +#powerTip.n:after, #powerTip.n:before { + left: 50%; +} + +#powerTip.nw:after, #powerTip.nw:before { + right: 14px; +} + +#powerTip.ne:after, #powerTip.ne:before { + left: 14px; +} + +#powerTip.s:after, #powerTip.s:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.sw:after, #powerTip.sw:before { + bottom: 100%; +} + +#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { + border-bottom-color: #ffffff; + border-width: 10px; + margin: 0px -10px; +} + +#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { + border-bottom-color: #808080; + border-width: 11px; + margin: 0px -11px; +} + +#powerTip.s:after, #powerTip.s:before { + left: 50%; +} + +#powerTip.sw:after, #powerTip.sw:before { + right: 14px; +} + +#powerTip.se:after, #powerTip.se:before { + left: 14px; +} + +#powerTip.e:after, #powerTip.e:before { + left: 100%; +} +#powerTip.e:after { + border-left-color: #ffffff; + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.e:before { + border-left-color: #808080; + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +#powerTip.w:after, #powerTip.w:before { + right: 100%; +} +#powerTip.w:after { + border-right-color: #ffffff; + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.w:before { + border-right-color: #808080; + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +@media print +{ + #top { display: none; } + #side-nav { display: none; } + #nav-path { display: none; } + body { overflow:visible; } + h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } + .summary { display: none; } + .memitem { page-break-inside: avoid; } + #doc-content + { + margin-left:0 !important; + height:auto !important; + width:auto !important; + overflow:inherit; + display:inline; + } +} + +/* @group Markdown */ + +/* +table.markdownTable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.markdownTable td, table.markdownTable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +table.markdownTableHead tr { +} + +table.markdownTableBodyLeft td, table.markdownTable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +th.markdownTableHeadLeft th.markdownTableHeadRight th.markdownTableHeadCenter th.markdownTableHeadNone { + background-color: #374F7F; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +th.markdownTableHeadLeft { + text-align: left +} + +th.markdownTableHeadRight { + text-align: right +} + +th.markdownTableHeadCenter { + text-align: center +} +*/ + +table.markdownTable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.markdownTable td, table.markdownTable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +table.markdownTable tr { +} + +th.markdownTableHeadLeft, th.markdownTableHeadRight, th.markdownTableHeadCenter, th.markdownTableHeadNone { + background-color: #374F7F; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +th.markdownTableHeadLeft, td.markdownTableBodyLeft { + text-align: left +} + +th.markdownTableHeadRight, td.markdownTableBodyRight { + text-align: right +} + +th.markdownTableHeadCenter, td.markdownTableBodyCenter { + text-align: center +} + + +/* @end */ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/loadgen_integration_diagram.dia b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/loadgen_integration_diagram.dia new file mode 100644 index 0000000000000000000000000000000000000000..569089f243e4584e12134caf36d078248cb50af1 GIT binary patch literal 1943 zcmV;I2Wa>oiwFP!000021MOW~kJ>mCexF|v(Z^*%?A+VVRH|0H`_NS@ZTA@&;(=^K z%qE@-FZ1`E8Yu$2geet%`O_5|KoELebsi z%|r2;%nC!ZlBE;Yw`heI2}a-AjT`Lc``wIhaZ)wB*^)G5P3Uf0Ytmwe|2=9`v`Sf{ zcy<5g6Q&d=Z}M&x2M_j|I@{`qZcwrcrY`CE+X92`!J@1ncod# zzB&Ukoj9D{bH?S?T7@X^u#N35LbQ4e1Du|j%;h#MmexhH*}3apZP)YC1Yx?3(C+jQ zs*PG~p_qn@!%&I?u}m4G?JXs@6`By|h%ElWOF9sHw)t9<=huvA2scX-$810>6usRN z2G?|$tN7Zfm>;S{shL+c$#7Ei^y48u)e5#LdZHEmM@NSc8_v`I-O+r{zq(=`{}z$w zE)yKktB{8}`&cl=fv|WiT-zl}Lq(^^}07u#{+OG`2 zDW)xX(g}WQi&RNWoBZ)x`@4$)=r0D2Xfd!4V&DlE14oI05`#uD_>y4lOG1`7O-Z7pK)e*VN(y930qiXW zmQXE#=65kLffO;YM5=+K`=yr$l{}1E9!N@+KFHGt=<2qT2)Pnr_LT@=PJzgD0dk2j zxJ1BAB7DIT(S1BWI$`jXB8XOmk2qegHz@h0#6Ywd0G9=ZN(v-Ofi*z30EA}?#vxm< z#b*lwrN9*^1(uQm;ZndZ6OIEV1X8jBwTB>83hW`W0`X}9ILZrvDIqZcYAZp>L37zH zLBa1hR&pRAFJRzTYA~1@{Ka@RKuN&F;8Pgm1~qVctAQ~!1&V>swE*gl>A`oI46ty) zbU;T&SdKxc7{HduXN6dUXQ`jxEeq2nl>pA{;VK0 z%OC`>HUkJ@zRyhlGlHa zx7dUg$xC}Dd~F*-#!n+!+qM)hNxT2FVJ<3S8YeLgQUfaVQOUd%Ghf}#?{ibj+iH2c zKJOI&NamjbD{X-zsl)&pLp;z$B-=qt=XZzb4s^_S(AN#Xb3+!2xu!RoTfVhVWR)rg z6ds0Ua|P-U0#gy8Fe2;-_u7wJU0lO8Olcz=qP$ov=Vi5xob(RGjhynNR2Vnhs5K1T ztNO-~+~?!T3O?WvYd~2tBzg^KnpLmmYe3oJG#JUInWGkgWG(_#C!>lzM!5(C`&VAu zJQQ_xp_^uz4@dWn>~{XlpQ9FoR4UXw{}dOElZ$#6TT7s#BPLQrq=@L`+C7LLNhg22mH|rR`$qOSNHg%6&&$TR;UiFrG=x`O|EO*#A<)W$@v?*Ve7+H dUb*O!zt4VH`2ME!%ft6K{{zrB$(5Fa002n3teyY> literal 0 HcmV?d00001 diff --git a/benchmarks/rnnt/ootb/inference/loadgen/docs/src/mlperf_icon.png b/benchmarks/rnnt/ootb/inference/loadgen/docs/src/mlperf_icon.png new file mode 100644 index 0000000000000000000000000000000000000000..95321896d3e467b923909c3654a4260346df5b9f GIT binary patch literal 4632 zcmb_gXHXMNyG2n1{1g!clqO9CBp9Uz2+B(h0@6F7BQRAaZ%{ulwU0f8Ck0`^@Z~bDr7Rvwt?}v4Qp_)*Gx03=Efabu^4m z>+!#Xndx*kspxh;E!TrIErU#8Zb6}r0j>-WU0_bGH+6j+-Cd1c9bFLq-L5JO4D484 zjfbZ2@zs3&08VYLr}rz)yNSCgFmgQeeHw9Q1eeE<(8)3h7k6`{og2>kvKC*(FKO^J z5eV$fjChu2;2`1j#U2~VGnny)Xm)IQvn=Xf#l6~A8TAK>pG-{`EH+$Q!`>Q8h%vkq z6rDWF5O?A4cmQYkJN~~7kgq0V{Ag5f`8x#@9Wx!a1SkQI%6R2@OxAA2Vq7bSB2YpbCM3ct@U0s2<-Lx1%cM>r zwzF{wFH}(Yx}S{v)uUm5#9$ZXMUlI5f;lo0Tt}hWMgM@!;u4VO6+CD^V}JOla{PpX zwtMN%ELZ8yvLK3y_CZ1Mq8jX$wO>9fYz*a|&=U~pAT!MNR7NXwpXU;@VAWjMJFl;u zZgEB}y1V3IwpP_S?E_{}g=C*@p6=dW8BKXt5_0qFCW`32_A=9!RcRfH^_5v8v^_HH z6vnCDp!d@sz)?jsi^)qBDn`P35;UO}ZWR<=WsEUExnU9_G^X4ym6;+U@Tq-eYo%Ir*L*Cs;M!{r^~um8-8JDSIRo*AwoK6t1Vi(M%Ozy zBCY{!XrUFtH8)1_m1`6%sIirN;1oQ!ZTJMXVJ;k$eJ*Rc^OBh+uk<@e5q;gredU zu@fU{oHe`eOw{cbgxuZjF^pQ0+j#9FCowRQgP%C+d0Rbs9;iQacd*Uf$-h$Vl{2r@ zcbZ#{lQetm=SQV-O)3RFd}f`D)7uwH;JaEbCX~&GKh{@f@`y=hZ#}*{5*9w;R4YCz z_IT=GPL!{=z^Ys?%aDSb)(&qs8gx@Rs((?N`g7adRCvmd8!N!;@6`w>!W?Nam1=~e zwjYUwN2R<@z3TCU_bz8{bV_&t=hzz){?UIB4XuRsBcX7<*Hpu(TCp)FzVRq3?iz`a zi1J_=0_P{0=OGowqL1&*?b=D*r#saFS+-`vB_gUHP{{zM{UxNk_JlnP`PnL{uy?Ih5N7)PST#s)OaJ(n zeC_JXP1;ILY)2+nYUYlLa9AUI+smA=K}=e~vWL3O><4blSZlNb3AbA93!7x>RLxx+ z@EUiV9jZY`HN@v&5sgda*1j$@;ten{IOWPvXY9z{?<6`y>_@bNv>;{BTA$8wv8yVG zsEHa{ps&tAKZ>X`!>oX9YN0$0_9oF_^IiPj?|_+&Lf|(41=CD(yi^4 zZP_sp%min08u!cIQK!|gq%3yQv~WIQf_pCsWb=wx{%(@*R*fBa2!JX!~o9gBOw(P@8bM;c45q;pU& zyzy>t@0KgISap7uY9^-KwfQHc4TwV0^7{K$P;XDhO!wPTL&to>BG$|bCwUb^**%way6-jA@9mHwhlASDol51mA~L2Rg6yf4`zTDJE3Mbl*IPAP2Rr*~gbH)f1**^toGpVIi5 z+!L`Af3>(Pp&;7OTh99}aJ_?lG#|YyMzU59X?(@zwJfG}RFs8mfZ(V2*UCrWqu|B# z)^9d?KQ1qzHw4E<^!$v?_l0S}Tem2l1#*e~Tw5T$rJXTdDcG=3_`qGc;2ntfd7`GV zP)36t5cFm|+9ZW;uz0t90v-ckqfcl(i*oQ>W|UYCnp7x&)sC8RIE^A@-L zL%rY;exe>M6)NO3jX%~Yx&OwkCRzJEKij#{M%rF~qlP~2n?pCGWE-u7UMs50!T!c= z(g3KJzRR_bk<2ky3z5;53NHbWB)h=~?L7G$=a}T}D*SXZfiri~bZ4lYM|$QF87fYw z|GZc)FXdCE^xZfe<|8?19oHtw^s=8{VJC!ZX=c_(-|lv@GKbXX$h=Iu(pn=Rt9VZX z*K}^5#iOtgBqH22J10yyOKQ~Iu&|26S2Xl-Lo?T?HG`S#?P_0;9ohF-w6N5EWF(&M zT;e6$b>PQkAO7z)W_W828wsL3J!4E$d}3`P!%iw`Tl3}5x~P77p5uxpY0G=QB0b6* znH4p|3nj(~Zsln8=9V=RZeOUUQdm>7*&+WP%Iyf$@v0#bYv_8pdNayUSvq{m)YU*z z5Ip9&oOQ{?to3XJa~eU7m{Jz?>|On=xgIJMSJ*Om$C zRp`2-5k8!fI=KXB=Wa$+nxFjVeyZO-U+wMCov;0Mx+v^6WOu-CE+T z23%sqZSSP3Zz^dWyi<=R(Re){%5~K(JkFAcoKJRjB|%9(0%a{%7OaTQg=fW6n4vQH zS>mfsv+jrXve@Gp-Sj;7a%-Zb8p^KV9Q-K%!(I{{)~Ir(&J$!9L`?yBE{ies>^9Wd z2KHT#vE?nFHC}0#5~=`aP%xcNY@NrtWSGh?q7>JBgrqaSnX(c^wZthc$DeE5-Ke8D zBwk@2dE;}x>Nu4CZaC-AmCUNdVMO(aV#1V{m5P1DQ7+% ztVB6}^0wGm6dJ!zoHHQizP2=2s_0jmV<~Q2-~51%T1v-20{aki$?ZY_5;4DPqM*Y9 zt0=)4NKL&hk5t+BB4in+q_N_GX{ico@iGSADlJocjYqt=Yj3;nM-gtRUK40YqQtz2 zOV2r+!1Y{I0CkFDm37F5^PhF$JHu*5O-g8cYndQx zLGxbkb<055Rb}p?F-cNDT*HqEt*kGu+>mlzJcWn-=%8XfdqOFqSo1t(gUjTA3hS9- zJw^rWuFNJ>wYc#w9@N(Oxr-HBWB|1I-QvNyL>ox9Y0n9X6Oc~IQIcA=vv#B+TsMcc!% znDl&;FjyVGND3qNNc878JAHrdRfXPdF}6Xfy&ib9+-!DJBdYnv~^k>f3=bK+-uQw!8)~jT@z9|dj3WiR3LJTjpYK_1B;V8>q z{+Yt_y@X|sUw|lpltZmJh;x4$KQ;Xg}D=}RWz`iS@@GRQG=^d8E z8@v#NQJ}lZ)#T&coew~O<9MBM&sqIA|EuIAm4G3?%li|A|cTI7STR z9vwx5{9LBvly87=ZRWx)S8Lka)t_(HFYH35xPA2?;uQB?&HdWIFx6H&GP#L%9Y9$% zh?60m#qK`O4*Im5CkW`OB(2DXula*s9BE|!R;M+wU*FG z^%k=-em^e{xYq`YT@~5!>(n3YgvRDi)Yn1ky!QbRxj2 z78AZZTw~?#dDhiTFx@zc8Fnz>U0;8tTjOM_;=W;Z8N!B{IMJD>=bl + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/rnnt/ootb/inference/loadgen/generated/version_generated.cc b/benchmarks/rnnt/ootb/inference/loadgen/generated/version_generated.cc new file mode 100644 index 0000000..56c5603 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/generated/version_generated.cc @@ -0,0 +1,93 @@ +// DO NOT EDIT: Autogenerated by version_generator.py. + +#include + +namespace mlperf { + +const std::string& LoadgenVersion() { + static const std::string str = "1.1"; + return str; +} + +const std::string& LoadgenBuildDateLocal() { + static const std::string str = "2021-10-19T21:48:17.599620"; + return str; +} + +const std::string& LoadgenBuildDateUtc() { + static const std::string str = "2021-10-19T21:48:17.599647"; + return str; +} + +const std::string& LoadgenGitRevision() { + static const std::string str = "b41cf1d6b0"; + return str; +} + +const std::string& LoadgenGitCommitDate() { + static const std::string str = "2021-10-05T08:52:18-07:00"; + return str; +} + +const std::string& LoadgenGitStatus() { + static const std::string str = R"LGVG_RSLD( M ../speech_recognition/rnnt/run.sh)LGVG_RSLD"; + return str; +} + +const std::string& LoadgenGitLog() { + static const std::string str = R"LGVG_RSLD(b41cf1d6b05e70057c62c8da5f65c9a110819ffb Add MLCube support for translation benchmark (#1022) +077f823ece09e37d9d40540d8acd504bf138e880 Improve memory usage of RNN-T encoder StackTime module (#1015) +5128211f4ced39a50fb4c66d9b436ffacf7f4704 Dockerfile update (#1020) +215c057fc6690a47f3f66c72c076a8f73d66cb12 Update backend_tf.py (#1019) +ca250e4d37b3171a3c59b2d6b45d27884bcd8a24 Add TensorFlow intra and inter op threading to BERT tf_SUT (#1008) +a77ac37d07145d9f3123465a8fd18f9ebbde5d6a use size_t in ArgValueTransform for vector (#1004) +d29092298f5075b234eee21352a85c094a636e71 Format numbers (#999) +f3967a6cbdde8581520ae7f483db83e97721c5c1 Update python_api.cc (#991) +b1452de454bcf99c9dde1e50670887d0557b1841 Revert change to 0.5 seeds, add settings for 1.1 (#988) +90243c8bd745b808e5f0c2bd4593491a623cd595 Update checker with 1.1 RNG seeds (#986) +b11a1eae042b1637b5f199bb0e4e477fbbc1daaa Update with 1.1 seeds (#985) +e27236f0849eb8cd7f707c53b6422c2fcfd2d0b7 Added pytorch model for ResNet50 pytorch inferene and updated resnet50-pytorch profile (#982) +79919f17d609472a6d27e2396200e90e8cc90e4e slight modifications (#981) +df372335d124dcb0f4d4ba18a022206d449dcb15 update CONTRIBUTING +672d8b32169ef7551dfd77bbe84caf9fe80616dc update LICENSE +889f6080b69759342444f1aa97b2cb571fddc5b5 @dkorchevgithub adding lines suggested by @EtoDemerzel0427 to fix "good" count (#965))LGVG_RSLD"; + return str; +} + +const std::string& LoadgenSha1OfFiles() { + static const std::string str = R"LGVG_RSLD(012aad77e5206c89d50718c46c119d1f3cb056b2 /.clang-format +52f09b641b5693821a85daf5d8277b8e00ced162 /CMakeLists.txt +5748d70fda253921fcbbc8b71ebdc4219ad4549d /README.md +7c578c34d97c55ebecb5dd88e5d56a7ac299d197 /README_BUILD.md +4e3da1409225b92d93e86f8379aecd043eae6e54 /README_FAQ.md +9816f5b050ec2c3386824f1c07238b9fb7bde1e3 /bindings/c_api.cc +fce6628f717113ed402dc4f49396593694f29989 /bindings/c_api.h +9080d1779ecb10b8ef509dd9bf42d28f0bd8611b /bindings/python_api.cc +07d85ea5fefade8d3d66ca832b783bf90e5bce7d /demos/py_demo_multi_stream.py +2f941e949770a2ea9ad6d6eea2f33fc19150edcd /demos/py_demo_multi_stream_free.py +73c60cf12cfb69619249e637f9bcad18a47cda6c /demos/py_demo_offline.py +02a6ddbe8b51bde2cfed32abeccdb7fdd9cfe4e7 /demos/py_demo_server.py +3e7d786882099ac784bf878f1cde9dd3db2f6d9f /demos/py_demo_single_stream.py +51609e637eb47121bf34af468e4e8d06cb37e667 /issue_query_controller.cc +0365e32a3b80091113f871e0de1a80c6c66cbe11 /issue_query_controller.h +3c48f3b9299047a6000902759bc2f18ffcdcfa30 /loadgen.cc +1d985e62e8f8dd5523f8d3a1ed404529afcd221e /loadgen.h +47f748307536f80cfc606947b440dd732afc2637 /loadgen_integration_diagram.svg +48658fd3bc872f178fca86ea8ed2e3025f195d71 /logging.cc +384e30e0ef566a1d03d925726d5a1dda08ea8b22 /logging.h +a879f127c4bb64d131056942734ea1a6ba9c3033 /query_sample.h +61feb478b15668f48245c5083d559dee5de1c082 /query_sample_library.h +9fced62cc3af5e3b31b8e13f38cf35ba0564fe3c /setup.py +744482c84336420adbbe31ef8c2b39612c6ca3f8 /system_under_test.h +34eec1f8fb4a2db273b715686580f19cffa2f3a1 /test_settings.h +b45f0d4d28d4c49c33bf2575eff613ad8ca99154 /test_settings_internal.cc +81603212f824ceefbf2d0fe88d2ef94c67d28f6e /test_settings_internal.h +efde030897afdc372f974901eef2ed59bcedcbeb /utils.cc +40775e32d619ea6356826ae5ea4174c7911f6894 /utils.h +cbec2a5f98f9786c8c3d8b06b3d12df0b6550fa0 /version.cc +9d574baa64424e9c708fcfedd3dbb0b518a65fcc /version.h +fd7ffb94e1e84161264c44e175e551e5fffb6a09 /version_generator.py)LGVG_RSLD"; + return str; +} + +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.cc b/benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.cc new file mode 100644 index 0000000..2be942c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.cc @@ -0,0 +1,619 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Implements IssueQueryController and other helper classes for +/// query issuing. + +#include "issue_query_controller.h" + +#include + +namespace mlperf { + +void RegisterIssueQueryThread() { + loadgen::IssueQueryController::GetInstance().RegisterThread(); +} + +/// \brief Loadgen implementation details. +namespace loadgen { + +QueryMetadata::QueryMetadata( + const std::vector& query_sample_indices, + std::chrono::nanoseconds scheduled_delta, + ResponseDelegate* response_delegate, SequenceGen* sequence_gen) + : scheduled_delta(scheduled_delta), + response_delegate(response_delegate), + sequence_id(sequence_gen->NextQueryId()), + wait_count_(query_sample_indices.size()) { + samples_.reserve(query_sample_indices.size()); + for (QuerySampleIndex qsi : query_sample_indices) { + samples_.push_back({this, sequence_gen->NextSampleId(), qsi, + sequence_gen->NextAccLogRng()}); + } + query_to_send.reserve(query_sample_indices.size()); + for (auto& s : samples_) { + query_to_send.push_back({reinterpret_cast(&s), s.sample_index}); + } +} + +QueryMetadata::QueryMetadata(QueryMetadata&& src) + : query_to_send(std::move(src.query_to_send)), + scheduled_delta(src.scheduled_delta), + response_delegate(src.response_delegate), + sequence_id(src.sequence_id), + wait_count_(src.samples_.size()), + samples_(std::move(src.samples_)) { + // The move constructor should only be called while generating a + // vector of QueryMetadata, before it's been used. + // Assert that wait_count_ is in its initial state. + assert(src.wait_count_.load() == samples_.size()); + // Update the "parent" of each sample to be this query; the old query + // address will no longer be valid. + // TODO: Only set up the sample parenting once after all the queries have + // been created, rather than re-parenting on move here. + for (size_t i = 0; i < samples_.size(); i++) { + SampleMetadata* s = &samples_[i]; + s->query_metadata = this; + query_to_send[i].id = reinterpret_cast(s); + } +} + +void QueryMetadata::NotifyOneSampleCompleted(PerfClock::time_point timestamp) { + size_t old_count = wait_count_.fetch_sub(1, std::memory_order_relaxed); + if (old_count == 1) { + all_samples_done_time = timestamp; + all_samples_done_.set_value(); + response_delegate->QueryComplete(); + } +} + +void QueryMetadata::WaitForAllSamplesCompleted() { + all_samples_done_.get_future().wait(); +} + +PerfClock::time_point QueryMetadata::WaitForAllSamplesCompletedWithTimestamp() { + all_samples_done_.get_future().wait(); + return all_samples_done_time; +} + +// When server_coalesce_queries is set to true in Server scenario, we +// sometimes coalesce multiple queries into one query. This is done by moving +// the other query's sample into current query, while maintaining their +// original scheduled_time. +void QueryMetadata::CoalesceQueries(QueryMetadata* queries, size_t first, + size_t last, size_t stride) { + // Copy sample data over to current query, boldly assuming that each query + // only has one sample. + query_to_send.reserve((last - first) / stride + + 2); // Extra one for the current query. + for (size_t i = first; i <= last; i += stride) { + auto& q = queries[i]; + auto& s = q.samples_[0]; + query_to_send.push_back({reinterpret_cast(&s), s.sample_index}); + q.scheduled_time = scheduled_time + q.scheduled_delta - scheduled_delta; + q.issued_start_time = issued_start_time; + } +} + +void QueryMetadata::Decoalesce() { query_to_send.resize(1); } + +/// \brief A base template that should never be used since each scenario has +/// its own specialization. +template +struct QueryScheduler { + static_assert(scenario != scenario, "Unhandled TestScenario"); +}; + +/// \brief Schedules queries for issuance in the single stream scenario. +template <> +struct QueryScheduler { + QueryScheduler(const TestSettingsInternal& /*settings*/, + const PerfClock::time_point) {} + + PerfClock::time_point Wait(QueryMetadata* next_query) { + auto tracer = MakeScopedTracer([](AsyncTrace& trace) { trace("Waiting"); }); + if (prev_query != nullptr) { + prev_query->WaitForAllSamplesCompleted(); + } + prev_query = next_query; + + auto now = PerfClock::now(); + next_query->scheduled_time = now; + next_query->issued_start_time = now; + return now; + } + + QueryMetadata* prev_query = nullptr; +}; + +/// \brief Schedules queries for issuance in the multi stream scenario. +template <> +struct QueryScheduler { + QueryScheduler(const TestSettingsInternal& settings, + const PerfClock::time_point start) + : qps(settings.target_qps), + max_async_queries(settings.max_async_queries), + start_time(start) {} + + PerfClock::time_point Wait(QueryMetadata* next_query) { + { + prev_queries.push(next_query); + auto tracer = + MakeScopedTracer([](AsyncTrace& trace) { trace("Waiting"); }); + if (prev_queries.size() > max_async_queries) { + prev_queries.front()->WaitForAllSamplesCompleted(); + prev_queries.pop(); + } + } + + { + auto tracer = + MakeScopedTracer([](AsyncTrace& trace) { trace("Scheduling"); }); + // TODO(brianderson): Skip ticks based on the query complete time, + // before the query synchronization + notification thread hop, + // rather than after. + PerfClock::time_point now = PerfClock::now(); + auto i_period_old = i_period; + PerfClock::time_point tick_time; + do { + i_period++; + tick_time = + start_time + SecondsToDuration(i_period / qps); + Log([tick_time](AsyncLog& log) { + log.TraceAsyncInstant("QueryInterval", 0, tick_time); + }); + } while (tick_time < now); + next_query->scheduled_intervals = i_period - i_period_old; + next_query->scheduled_time = tick_time; + std::this_thread::sleep_until(tick_time); + } + + auto now = PerfClock::now(); + next_query->issued_start_time = now; + return now; + } + + size_t i_period = 0; + double qps; + const size_t max_async_queries; + PerfClock::time_point start_time; + std::queue prev_queries; +}; + +/// \brief Schedules queries for issuance in the single stream free scenario. +template <> +struct QueryScheduler { + QueryScheduler(const TestSettingsInternal& settings, + const PerfClock::time_point /*start*/) + : max_async_queries(settings.max_async_queries) {} + + PerfClock::time_point Wait(QueryMetadata* next_query) { + bool schedule_time_needed = true; + { + prev_queries.push(next_query); + auto tracer = + MakeScopedTracer([](AsyncTrace& trace) { trace("Waiting"); }); + if (prev_queries.size() > max_async_queries) { + next_query->scheduled_time = + prev_queries.front()->WaitForAllSamplesCompletedWithTimestamp(); + schedule_time_needed = false; + prev_queries.pop(); + } + } + + auto now = PerfClock::now(); + if (schedule_time_needed) { + next_query->scheduled_time = now; + } + next_query->issued_start_time = now; + return now; + } + + const size_t max_async_queries; + std::queue prev_queries; +}; + +/// \brief Schedules queries for issuance in the server scenario. +template <> +struct QueryScheduler { + QueryScheduler(const TestSettingsInternal& /*settings*/, + const PerfClock::time_point start) + : start(start) {} + + PerfClock::time_point Wait(QueryMetadata* next_query) { + auto tracer = + MakeScopedTracer([](AsyncTrace& trace) { trace("Scheduling"); }); + + auto scheduled_time = start + next_query->scheduled_delta; + next_query->scheduled_time = scheduled_time; + + auto now = PerfClock::now(); + if (now < scheduled_time) { + std::this_thread::sleep_until(scheduled_time); + now = PerfClock::now(); + } + next_query->issued_start_time = now; + return now; + } + + const PerfClock::time_point start; +}; + +/// \brief Schedules queries for issuance in the offline scenario. +template <> +struct QueryScheduler { + QueryScheduler(const TestSettingsInternal& /*settings*/, + const PerfClock::time_point start) + : start(start) {} + + PerfClock::time_point Wait(QueryMetadata* next_query) { + next_query->scheduled_time = start; + auto now = PerfClock::now(); + next_query->issued_start_time = now; + return now; + } + + const PerfClock::time_point start; +}; + +IssueQueryController& IssueQueryController::GetInstance() { + // The singleton. + static IssueQueryController instance; + return instance; +} + +void IssueQueryController::RegisterThread() { + // Push this thread to thread queue. + auto thread_id = std::this_thread::get_id(); + size_t thread_idx{0}; + { + std::lock_guard lock(mtx); + thread_idx = thread_ids.size(); + thread_ids.emplace_back(thread_id); + } + + LogDetail([thread_id, thread_idx](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Registered IssueQueryThread[" << thread_idx + << "]. thread ID : " << std::hash()(thread_id); + MLPERF_LOG(detail, "generic_message", ss.str()); +#else + detail("Registered IssueQueryThread[" + std::to_string(thread_idx) + + "]. thread ID : ", + std::to_string(std::hash()(thread_id))); +#endif + }); + + // Start test. + while (true) { + // Wait until the main thread signals a start or the end. + { + std::unique_lock lock(mtx); + cond_var.wait(lock, [this]() { return issuing || end_test; }); + // The test has ended. + if (end_test) { + break; + } + } + + // Start issuing queries. + if (thread_idx <= num_threads) { + IssueQueriesInternal(num_threads, thread_idx); + { + std::lock_guard lock(mtx); + thread_complete[thread_idx] = true; + } + cond_var.notify_all(); + } + + // Wait until all issue threads complete. + { + std::unique_lock lock(mtx); + cond_var.wait(lock, [this]() { return !issuing; }); + } + } +} + +void IssueQueryController::SetNumThreads(size_t n) { + // Try waiting for IssueQueryThreads() to registered themselves. + std::unique_lock lock(mtx); + const std::chrono::seconds timeout(10); + num_threads = n; + cond_var.wait_for(lock, timeout, + [this]() { return thread_ids.size() >= num_threads; }); + // If the number of registered threads do not match the settings, report an + // error. + if (num_threads != thread_ids.size()) { + LogDetail([this](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Mismatch between settings and number of registered " + << "IssueQueryThreads! settings.server_num_issue_query_threads = " + << num_threads << " but " << thread_ids.size() + << " threads registered."; + MLPERF_LOG_ERROR(detail, "error_runtime", ss.str()); +#else + detail.Error( + "Mismatch between settings and number of registered ", + "IssueQueryThreads! settings.server_num_issue_query_threads = ", + num_threads, " but ", thread_ids.size(), " threads registered."); +#endif + }); + } +} + +template +void IssueQueryController::StartIssueQueries(IssueQueryState* s) { + // Get the state. + state = s; + state->start_for_power = std::chrono::system_clock::now(); + state->start_time = PerfClock::now(); + + if (scenario != TestScenario::Server || num_threads == 0) { + // Usually, we just use the same thread to issue queries. + IssueQueriesInternal(1, 0); + } else { + // If server_num_issue_query_threads is non-zero, issue queries on the + // registered threads. + // Tell all threads to start issuing queries. + { + std::unique_lock lock(mtx); + issuing = true; + thread_complete.assign(num_threads, false); + } + cond_var.notify_all(); + // Wait until all issue threads complete. + { + std::unique_lock lock(mtx); + cond_var.wait(lock, [this]() { + return std::all_of(thread_complete.begin(), thread_complete.end(), + [](bool in) { return in; }); + }); + issuing = false; + } + cond_var.notify_all(); + } +} + +template void IssueQueryController::StartIssueQueries< + TestScenario::MultiStream>(IssueQueryState* s); +template void IssueQueryController::StartIssueQueries< + TestScenario::MultiStreamFree>(IssueQueryState* s); +template void IssueQueryController::StartIssueQueries( + IssueQueryState* s); +template void IssueQueryController::StartIssueQueries( + IssueQueryState* s); +template void IssueQueryController::StartIssueQueries< + TestScenario::SingleStream>(IssueQueryState* s); + +void IssueQueryController::EndThreads() { + // Tell all the issue threads to end. + { + std::lock_guard lock(mtx); + end_test = true; + } + cond_var.notify_all(); +} + +template +void IssueQueryController::IssueQueriesInternal(size_t query_stride, + size_t thread_idx) { + // Get all the needed information. + auto sut = state->sut; + auto& queries = *state->queries; + auto& response_logger = *state->response_delegate; + + // Some book-keeping about the number of queries issued. + size_t queries_issued = 0; + size_t queries_issued_per_iter = 0; + size_t queries_count = queries.size(); + + // Calculate the min/max queries per issue thread. + const auto& settings = *state->settings; + const size_t min_query_count = settings.min_query_count; + const size_t min_query_count_for_thread = + (thread_idx < (min_query_count % query_stride)) + ? (min_query_count / query_stride + 1) + : (min_query_count / query_stride); + const size_t max_query_count = settings.max_query_count; + const size_t max_query_count_for_thread = + (thread_idx < (max_query_count % query_stride)) + ? (max_query_count / query_stride + 1) + : (max_query_count / query_stride); + + // Create query scheduler. + const auto start = state->start_time; + QueryScheduler query_scheduler(settings, start); + auto last_now = start; + + // We can never run out of generated queries in the server scenario, + // since the duration depends on the scheduled query time and not + // the actual issue time. + bool ran_out_of_generated_queries = scenario != TestScenario::Server; + // This is equal to the sum of numbers of samples issued. + size_t expected_latencies = 0; + + for (size_t queries_idx = thread_idx; queries_idx < queries_count; + queries_idx += query_stride) { + queries_issued_per_iter = 0; + auto& query = queries[queries_idx]; + auto tracer1 = + MakeScopedTracer([](AsyncTrace& trace) { trace("SampleLoop"); }); + last_now = query_scheduler.Wait(&query); + + // If in Server scenario and server_coalesce_queries is enabled, multiple + // queries are coalesed into one big query if the current time has already + // passed the scheduled time of multiple queries. + if (scenario == TestScenario::Server && + settings.requested.server_coalesce_queries) { + auto current_query_idx = queries_idx; + for (; queries_idx + query_stride < queries_count; + queries_idx += query_stride) { + auto next_scheduled_time = + start + queries[queries_idx + query_stride].scheduled_delta; + // If current time hasn't reached the next query's scheduled time yet, + // don't include next query. + if (last_now < next_scheduled_time) { + break; + } + queries_issued_per_iter++; + } + if (queries_idx > current_query_idx) { + // Coalesced all the pass due queries. + query.CoalesceQueries(queries.data(), current_query_idx + query_stride, + queries_idx, query_stride); + } + } + + // Issue the query to the SUT. + { + auto tracer3 = + MakeScopedTracer([](AsyncTrace& trace) { trace("IssueQuery"); }); + sut->IssueQuery(query.query_to_send); + } + + // Increment the counter. + expected_latencies += query.query_to_send.size(); + queries_issued_per_iter++; + queries_issued += queries_issued_per_iter; + + if (scenario == TestScenario::Server && + settings.requested.server_coalesce_queries) { + // Set the query back to its clean state. + query.Decoalesce(); + } + + if (state->mode == TestMode::AccuracyOnly) { + // TODO: Rate limit in accuracy mode so accuracy mode works even + // if the expected/target performance is way off. + continue; + } + + auto duration = (last_now - start); + if (scenario == TestScenario::Server) { + if (settings.max_async_queries != 0) { + // Checks if there are too many outstanding queries. + size_t queries_issued_total{0}; + if (multi_thread) { + // To check actual number of async queries in multi-thread case, + // we would have to combine the number of queries_issued from all + // issue threads. + { + std::lock_guard lock(state->mtx); + state->queries_issued += queries_issued_per_iter; + queries_issued_total = state->queries_issued; + } + } else { + queries_issued_total = queries_issued; + } + size_t queries_outstanding = + queries_issued_total - + response_logger.queries_completed.load(std::memory_order_relaxed); + if (queries_outstanding > settings.max_async_queries) { + LogDetail([thread_idx, queries_issued_total, + queries_outstanding](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "IssueQueryThread " << thread_idx + << " Ending early: Too many outstanding queries." + << " issued " << queries_issued_total << " outstanding " + << queries_outstanding; + MLPERF_LOG_ERROR(detail, "error_runtime", ss.str()); +#else + detail.Error("IssueQueryThread ", std::to_string(thread_idx), + " Ending early: Too many outstanding queries.", + "issued", std::to_string(queries_issued_total), + "outstanding", std::to_string(queries_outstanding)); +#endif + }); + break; + } + } + } else { + // Checks if we end normally. + if (queries_issued >= min_query_count_for_thread && + duration >= settings.target_duration) { + LogDetail([thread_idx](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG( + detail, "generic_message", + "Ending naturally: Minimum query count and test duration met."); +#else + detail( + " Ending naturally: Minimum query count and test duration met."); +#endif + }); + ran_out_of_generated_queries = false; + break; + } + } + + // Checks if we have exceeded max_query_count for this thread. + if (settings.max_query_count != 0 && + queries_issued >= max_query_count_for_thread) { + LogDetail([thread_idx, queries_issued](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "IssueQueryThread " << thread_idx + << " Ending early: Max query count reached." + << " query_count " << queries_issued; + MLPERF_LOG_ERROR(detail, "error_runtime", ss.str()); +#else + detail.Error("IssueQueryThread ", std::to_string(thread_idx), + " Ending early: Max query count reached.", "query_count", + std::to_string(queries_issued)); +#endif + }); + ran_out_of_generated_queries = false; + break; + } + + // Checks if we have exceeded max_duration. + if (settings.max_duration.count() != 0 && + duration > settings.max_duration) { + LogDetail([thread_idx, duration](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "IssueQueryThread " << thread_idx + << " Ending early: Max test duration reached." + << " duration_ns " << duration.count(); + MLPERF_LOG_ERROR(detail, "error_runtime", ss.str()); +#else + detail.Error("IssueQueryThread ", std::to_string(thread_idx), + " Ending early: Max test duration reached.", "duration_ns", + std::to_string(duration.count())); +#endif + }); + ran_out_of_generated_queries = false; + break; + } + } + + // Combine the issuing statistics from multiple issue threads. + { + std::lock_guard lock(state->mtx); + state->ran_out_of_generated_queries |= ran_out_of_generated_queries; + // In Server scenario and when max_async_queries != 0, we would have set + // state->queries_issued when we check max_async_queries in the loop. + if (!(scenario == TestScenario::Server && settings.max_async_queries != 0 && + multi_thread)) { + state->queries_issued += queries_issued; + } + state->expected_latencies += expected_latencies; + } +} + +} // namespace loadgen + +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.h b/benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.h new file mode 100644 index 0000000..e5cf8fd --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/issue_query_controller.h @@ -0,0 +1,211 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Declare IssueQueryController and other helper classes for +/// query issuing. + +#ifndef MLPERF_LOADGEN_ISSUE_QUERY_CONTROLLER_H_ +#define MLPERF_LOADGEN_ISSUE_QUERY_CONTROLLER_H_ + +#include "loadgen.h" +#include "logging.h" +#include "query_sample.h" +#include "system_under_test.h" +#include "test_settings_internal.h" +#include "utils.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace mlperf { + +namespace loadgen { + +struct SampleMetadata; +class QueryMetadata; + +/// \brief Every query and sample within a call to StartTest gets a unique +/// sequence id for easy cross reference, and a random number which is used to +/// determine accuracy logging when it is enabled. +struct SequenceGen { + uint64_t NextQueryId() { return query_id++; } + uint64_t NextSampleId() { return sample_id++; } + uint64_t CurrentSampleId() { return sample_id; } + double NextAccLogRng() { return accuracy_log_dist(accuracy_log_rng); } + void InitAccLogRng(uint64_t accuracy_log_rng_seed) { + accuracy_log_rng = std::mt19937(accuracy_log_rng_seed); + } + + private: + uint64_t query_id = 0; + uint64_t sample_id = 0; + std::mt19937 accuracy_log_rng; + std::uniform_real_distribution accuracy_log_dist = + std::uniform_real_distribution(0, 1); +}; + +/// \brief An interface for a particular scenario + mode to implement for +/// extended hanlding of sample completion. +struct ResponseDelegate { + virtual ~ResponseDelegate() = default; + virtual void SampleComplete(SampleMetadata*, QuerySampleResponse*, + PerfClock::time_point, const ResponseCallback&) = 0; + virtual void QueryComplete() = 0; + std::atomic queries_completed{0}; +}; + +/// \brief Used by the loadgen to coordinate response data and completion. +struct SampleMetadata { + QueryMetadata* query_metadata; + uint64_t sequence_id; + QuerySampleIndex sample_index; + double accuracy_log_val; +}; + +/// \brief Maintains data and timing info for a query and all its samples. +class QueryMetadata { + public: + QueryMetadata(const std::vector& query_sample_indices, + std::chrono::nanoseconds scheduled_delta, + ResponseDelegate* response_delegate, SequenceGen* sequence_gen); + QueryMetadata(QueryMetadata&& src); + + void NotifyOneSampleCompleted(PerfClock::time_point timestamp); + + void WaitForAllSamplesCompleted(); + + PerfClock::time_point WaitForAllSamplesCompletedWithTimestamp(); + + /// \brief Coalesce multiple queries into one query. + /// When server_coalesce_queries is set to true in Server scenario, we + /// sometimes coalesce multiple queries into one query. This is done by moving + /// the other query's sample into current query, while maintaining their + /// original scheduled_time. + void CoalesceQueries(QueryMetadata* queries, size_t first, size_t last, + size_t stride); + + /// \brief Set a coalesced query back to its original state. + void Decoalesce(); + + public: + std::vector query_to_send; + const std::chrono::nanoseconds scheduled_delta; + ResponseDelegate* const response_delegate; + const uint64_t sequence_id; + + // Performance information. + + size_t scheduled_intervals = 0; // Number of intervals between queries, as + // actually scheduled during the run. + // For the multi-stream scenario only. + PerfClock::time_point scheduled_time; + PerfClock::time_point issued_start_time; + PerfClock::time_point all_samples_done_time; + + private: + std::atomic wait_count_; + std::promise all_samples_done_; + std::vector samples_; +}; + +/// \brief A state object for communications between the controller and its +/// caller. +struct IssueQueryState { + // Information from caller to controller. + SystemUnderTest* sut; + std::vector* queries; + ResponseDelegate* response_delegate; + const TestSettingsInternal* settings; + TestMode mode; + // Information from controller to caller. + std::chrono::system_clock::time_point start_for_power; + PerfClock::time_point start_time; + bool ran_out_of_generated_queries; + size_t queries_issued; + size_t expected_latencies; + // The lock to modify this state (in multi-thread case). + std::mutex mtx; +}; + +/// \brief Controls the query issuing part. +/// This controller handles both the cases if the user registers or does not +/// register IssueQueryThreads. It is implemented as a singleton, and is NOT +/// thread-safe (i.e. users should not call StartTest() on multiple threads). +/// It is thread-safe with regard to IssueQueryThreads. +class IssueQueryController { + public: + /// \brief Get the controller instance singleton. + static IssueQueryController& GetInstance(); + + /// \brief Don't allow copy. This is a singleton. + IssueQueryController(IssueQueryController const&) = delete; + void operator=(IssueQueryController const&) = delete; + + /// \brief Register an IssueQueryThread. + /// It is blocking until the entire test ends. + void RegisterThread(); + + /// \brief Set number of IssueQueryThreads and wait for thread registration. + /// If for any reason the number of registered threads do not match the + /// specified number, it prints out an error. + void SetNumThreads(size_t n); + + /// \brief Kick off the query issuing. + /// The query issuing will be done on the current thread if there is no + /// registered IssueQueryThreads or if it is not in Server scenario. + template + void StartIssueQueries(IssueQueryState* s); + + /// \brief Notify the IssueQueryThreads to end. + void EndThreads(); + + private: + /// \brief Hide constructor. This is a singleton. + IssueQueryController() {} + + /// \brief The internal helper which actually issues queries. + /// This should be called by the thread(s) which issues queries. + template + void IssueQueriesInternal(size_t query_stride, size_t thread_idx); + + /// \brief The issue query state. + IssueQueryState* state; + /// \brief Locks for communications across IssueQueryThreads and the main + /// thread. + std::mutex mtx; + std::condition_variable cond_var; + /// \brief Thread ids of the registered IssueQueryThreads. + std::vector thread_ids; + size_t num_threads{0}; + /// \brief Whether the threads should be actively issuing queries. + bool issuing{false}; + /// \brief Flags for each IssueQueryThread to mark that it is done. + std::vector thread_complete; + /// \brief Whether the threads can end now. + bool end_test{false}; +}; + +} // namespace loadgen + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_ISSUE_QUERY_CONTROLLER_H_ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/loadgen.cc b/benchmarks/rnnt/ootb/inference/loadgen/loadgen.cc new file mode 100644 index 0000000..f01d62c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/loadgen.cc @@ -0,0 +1,1644 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "loadgen.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "issue_query_controller.h" +#include "logging.h" +#include "query_sample.h" +#include "query_sample_library.h" +#include "system_under_test.h" +#include "test_settings.h" +#include "test_settings_internal.h" +#include "utils.h" +#include "version.h" + +namespace mlperf { + +/// \brief Loadgen implementation details. +namespace loadgen { + +/// \brief A random set of samples in the QSL that should fit in RAM when +/// loaded together. +struct LoadableSampleSet { + std::vector set; + const size_t sample_distribution_end; // Excludes padding in multi-stream. +}; + +/// \brief Generates nanoseconds from a start time to multiple end times. +/// TODO: This isn't very useful anymore. Remove it. +struct DurationGeneratorNs { + const PerfClock::time_point start; + int64_t delta(PerfClock::time_point end) const { + return std::chrono::duration_cast(end - start) + .count(); + } +}; + +/// \brief ResponseDelegate implementation templated by scenario and mode. +template +struct ResponseDelegateDetailed : public ResponseDelegate { + double accuracy_log_offset = 0.0f; + double accuracy_log_prob = 0.0f; + + void SampleComplete(SampleMetadata* sample, QuerySampleResponse* response, + PerfClock::time_point complete_begin_time, const ResponseCallback& response_cb) override { + // Using a raw pointer here should help us hit the std::function + // small buffer optimization code path when we aren't copying data. + // For some reason, using std::unique_ptr wasn't moving + // into the lambda; even with C++14. + std::vector* sample_data_copy = nullptr; + double accuracy_log_val = + sample->accuracy_log_val + accuracy_log_offset < 1.0 + ? sample->accuracy_log_val + accuracy_log_offset + : sample->accuracy_log_val + accuracy_log_offset - 1.0; + if (mode == TestMode::AccuracyOnly || + accuracy_log_val <= accuracy_log_prob) { + // if a response_cb callback is provided, data only needs to reside on the host *after* calling it + // note that the callback is blocking and will likely involve a memcpy from accelerator to host + if (response_cb) { + response_cb(response); + } + // TODO: Verify accuracy with the data copied here. + uint8_t* src_begin = reinterpret_cast(response->data); + uint8_t* src_end = src_begin + response->size; + sample_data_copy = new std::vector(src_begin, src_end); + } + Log([sample, complete_begin_time, sample_data_copy](AsyncLog& log) { + QueryMetadata* query = sample->query_metadata; + DurationGeneratorNs sched{query->scheduled_time}; + + if (scenario == TestScenario::Server) { + // Trace the server scenario as a stacked graph via counter events. + DurationGeneratorNs issued{query->issued_start_time}; + log.TraceCounterEvent("Latency", query->scheduled_time, "issue_delay", + sched.delta(query->issued_start_time), + "issue_to_done", + issued.delta(complete_begin_time)); + } + + // While visualizing overlapping samples in offline mode is not + // practical, sample completion is still recorded for auditing purposes. + log.TraceSample("Sample", sample->sequence_id, query->scheduled_time, + complete_begin_time, "sample_seq", sample->sequence_id, + "query_seq", query->sequence_id, "sample_idx", + sample->sample_index, "issue_start_ns", + sched.delta(query->issued_start_time), "complete_ns", + sched.delta(complete_begin_time)); + + if (sample_data_copy) { + log.LogAccuracy(sample->sequence_id, sample->sample_index, + LogBinaryAsHexString{sample_data_copy}); + delete sample_data_copy; + } + + // Record the latency at the end, since it will unblock the issuing + // thread and potentially destroy the metadata being used above. + QuerySampleLatency latency = sched.delta(complete_begin_time); + log.RecordSampleCompletion(sample->sequence_id, complete_begin_time, + latency); + }); + } + + void QueryComplete() override { + // We only need to track outstanding queries in the server scenario to + // detect when the SUT has fallen too far behind. + if (scenario == TestScenario::Server) { + queries_completed.fetch_add(1, std::memory_order_relaxed); + } + } +}; + +/// \brief Selects the query timestamps for all scenarios except Server. +template +auto ScheduleDistribution(double qps) { + return [period = std::chrono::duration_cast( + std::chrono::duration(1.0 / qps))](auto& /*gen*/) { + return period; + }; +} + +/// \brief Selects the query timestamps for the Server scenario. +template <> +auto ScheduleDistribution(double qps) { + // Poisson arrival process corresponds to exponentially distributed + // interarrival times. + return [dist = std::exponential_distribution<>(qps)](auto& gen) mutable { + return std::chrono::duration_cast( + std::chrono::duration(dist(gen))); + }; +} + +/// \brief Selects samples for the accuracy mode. +template +auto SampleDistribution(size_t sample_count, size_t stride, std::mt19937* rng) { + std::vector indices; + for (size_t i = 0; i < sample_count; i += stride) { + indices.push_back(i); + } + std::shuffle(indices.begin(), indices.end(), *rng); + return [indices = std::move(indices), i = size_t(0)](auto& /*gen*/) mutable { + return indices.at(i++); + }; +} + +/// \brief Selects samples for the performance mode. +template <> +auto SampleDistribution(size_t sample_count, + size_t /*stride*/, + std::mt19937* /*rng*/) { + return [dist = std::uniform_int_distribution<>(0, sample_count - 1)]( + auto& gen) mutable { return dist(gen); }; +} + +/// \brief Generates queries for the requested settings, templated by +/// scenario and mode. +/// \todo Make GenerateQueries faster. +/// QueryMetadata is expensive to move; either reserve queries in advance +/// so the queries vector doesn't need to grow. And/or parent samples to their +/// queries only after all queries have been generated. +/// \todo For the server scenario only, scale the query timeline at the end so +/// the QPS as scheduled is equal to the QPS as requested. +template +std::vector GenerateQueries( + const TestSettingsInternal& settings, + const LoadableSampleSet& loaded_sample_set, SequenceGen* sequence_gen, + ResponseDelegate* response_delegate) { + auto tracer = + MakeScopedTracer([](AsyncTrace& trace) { trace("GenerateQueries"); }); + + auto& loaded_samples = loaded_sample_set.set; + + // Generate 2x more samples than we think we'll need given the expected + // QPS in case the SUT is faster than expected. + // We should exit before issuing all queries. + // Does not apply to the server scenario since the duration only + // depends on the ideal scheduled time, not the actual issue time. + const int duration_multiplier = scenario == TestScenario::Server ? 1 : 2; + std::chrono::microseconds gen_duration = + duration_multiplier * settings.target_duration; + size_t min_queries = settings.min_query_count; + + size_t samples_per_query = settings.samples_per_query; + if (mode == TestMode::AccuracyOnly && scenario == TestScenario::Offline) { + samples_per_query = loaded_sample_set.sample_distribution_end; + } + + // We should not exit early in accuracy mode. + if (mode == TestMode::AccuracyOnly || settings.performance_issue_unique || + settings.performance_issue_same) { + gen_duration = std::chrono::microseconds(0); + // Integer truncation here is intentional. + // For MultiStream, loaded samples is properly padded. + // For Offline, we create a 'remainder' query at the end of this function. + min_queries = loaded_samples.size() / samples_per_query; + } + + std::vector queries; + + // Using the std::mt19937 pseudo-random number generator ensures a modicum of + // cross platform reproducibility for trace generation. + std::mt19937 sample_rng(settings.sample_index_rng_seed); + std::mt19937 schedule_rng(settings.schedule_rng_seed); + + constexpr bool kIsMultiStream = scenario == TestScenario::MultiStream || + scenario == TestScenario::MultiStreamFree; + const size_t sample_stride = kIsMultiStream ? samples_per_query : 1; + + auto sample_distribution = SampleDistribution( + loaded_sample_set.sample_distribution_end, sample_stride, &sample_rng); + // Use the unique sample distribution same as in AccuracyMode to + // to choose samples when either flag performance_issue_unique + // or performance_issue_same is set. + auto sample_distribution_unique = SampleDistribution( + loaded_sample_set.sample_distribution_end, sample_stride, &sample_rng); + + auto schedule_distribution = + ScheduleDistribution(settings.target_qps); + + std::vector samples(samples_per_query); + std::chrono::nanoseconds timestamp(0); + std::chrono::nanoseconds prev_timestamp(0); + // Choose a single sample to repeat when in performance_issue_same mode + QuerySampleIndex same_sample = settings.performance_issue_same_index; + + while (prev_timestamp < gen_duration || queries.size() < min_queries) { + if (kIsMultiStream) { + QuerySampleIndex sample_i = settings.performance_issue_unique + ? sample_distribution_unique(sample_rng) + : settings.performance_issue_same + ? same_sample + : sample_distribution(sample_rng); + for (auto& s : samples) { + // Select contiguous samples in the MultiStream scenario. + // This will not overflow, since GenerateLoadableSets adds padding at + // the end of the loadable sets in the MultiStream scenario. + // The padding allows the starting samples to be the same for each + // query as the value of samples_per_query increases. + s = loaded_samples[sample_i++]; + } + } else if (scenario == TestScenario::Offline) { + // For the Offline + Performance scenario, we also want to support + // contiguous samples. In this scenario the query can be much larger than + // what fits into memory. We simply repeat loaded_samples N times, plus a + // remainder to ensure we fill up samples. Note that this eliminates + // randomization. + size_t num_loaded_samples = loaded_samples.size(); + size_t num_full_repeats = samples_per_query / num_loaded_samples; + uint64_t remainder = samples_per_query % (num_loaded_samples); + if (settings.performance_issue_same) { + std::fill(samples.begin(), samples.begin() + num_loaded_samples, + loaded_samples[same_sample]); + } else { + for (size_t i = 0; i < num_full_repeats; ++i) { + std::copy(loaded_samples.begin(), loaded_samples.end(), + samples.begin() + i * num_loaded_samples); + } + + std::copy(loaded_samples.begin(), loaded_samples.begin() + remainder, + samples.begin() + num_full_repeats * num_loaded_samples); + } + } else { + for (auto& s : samples) { + s = loaded_samples[settings.performance_issue_unique + ? sample_distribution_unique(sample_rng) + : settings.performance_issue_same + ? same_sample + : sample_distribution(sample_rng)]; + } + } + queries.emplace_back(samples, timestamp, response_delegate, sequence_gen); + prev_timestamp = timestamp; + timestamp += schedule_distribution(schedule_rng); + } + + // See if we need to create a "remainder" query for offline+accuracy to + // ensure we issue all samples in loaded_samples. Offline doesn't pad + // loaded_samples like MultiStream does. + if (scenario == TestScenario::Offline && mode == TestMode::AccuracyOnly) { + size_t remaining_samples = loaded_samples.size() % samples_per_query; + if (remaining_samples != 0) { + samples.resize(remaining_samples); + for (auto& s : samples) { + s = loaded_samples[sample_distribution(sample_rng)]; + } + queries.emplace_back(samples, timestamp, response_delegate, sequence_gen); + } + } + + LogDetail([count = queries.size(), spq = settings.samples_per_query, + duration = timestamp.count()](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generated_query_count", count); + MLPERF_LOG(detail, "generated_samples_per_query", spq); + MLPERF_LOG(detail, "generated_query_duration", duration); +#else + detail("GeneratedQueries: ", "queries", count, "samples per query", spq, + "duration", duration); +#endif + }); + + return queries; +} + +/// \brief Provides performance results that are independent of scenario +/// and other context. +/// \todo Move to results.h/cc +struct PerformanceResult { + std::vector sample_latencies; + std::vector query_latencies; // MultiStream only. + std::vector query_intervals; // MultiStream only. + size_t queries_issued; + double max_latency; + double final_query_scheduled_time; // seconds from start. + double final_query_issued_time; // seconds from start. + double final_query_all_samples_done_time; // seconds from start. +}; + +/// \brief Issues a series of pre-generated queries. +// TODO: Templates for scenario and mode are overused, given the loadgen +// no longer generates queries on the fly. Should we reduce the +// use of templates? +template +PerformanceResult IssueQueries(SystemUnderTest* sut, + const TestSettingsInternal& settings, + const LoadableSampleSet& loaded_sample_set, + SequenceGen* sequence_gen) { + // Create reponse handler. + ResponseDelegateDetailed response_logger; + std::uniform_real_distribution accuracy_log_offset_dist = + std::uniform_real_distribution(0.0, 1.0); + std::mt19937 accuracy_log_offset_rng(settings.accuracy_log_rng_seed); + response_logger.accuracy_log_offset = + accuracy_log_offset_dist(accuracy_log_offset_rng); + response_logger.accuracy_log_prob = settings.accuracy_log_probability; + + // Generate queries. + auto sequence_id_start = sequence_gen->CurrentSampleId(); + std::vector queries = GenerateQueries( + settings, loaded_sample_set, sequence_gen, &response_logger); + + // Calculated expected number of queries + uint64_t expected_queries = + settings.target_qps * settings.min_duration.count() / 1000; + uint64_t minimum_queries = + settings.min_query_count * settings.samples_per_query; + if (scenario != TestScenario::Offline) { + expected_queries *= settings.samples_per_query; + } else { + minimum_queries = settings.min_sample_count; + } + + expected_queries = + expected_queries < minimum_queries ? minimum_queries : expected_queries; + + if (settings.accuracy_log_sampling_target > 0) { + response_logger.accuracy_log_prob = + (double)settings.accuracy_log_sampling_target / expected_queries; + } + auto sequence_id_end = sequence_gen->CurrentSampleId(); + size_t max_latencies_to_record = sequence_id_end - sequence_id_start; + + // Initialize logger for latency recording. + GlobalLogger().RestartLatencyRecording(sequence_id_start, + max_latencies_to_record); + + // Create and initialize an IssueQueryState. + IssueQueryState state{ + sut, &queries, &response_logger, &settings, mode, {}, {}, false, 0, + 0, {}}; + auto& controller = IssueQueryController::GetInstance(); + + // Set number of IssueQueryThreads and wait for the threads to register. + controller.SetNumThreads(settings.requested.server_num_issue_query_threads); + + // Start issuing the queries. + controller.StartIssueQueries(&state); + + // Gather query issuing statistics. + const auto start_for_power = state.start_for_power; + const auto start = state.start_time; + const auto ran_out_of_generated_queries = state.ran_out_of_generated_queries; + const auto queries_issued = state.queries_issued; + const auto expected_latencies = state.expected_latencies; + + // Let the SUT know it should not expect any more queries. + sut->FlushQueries(); + + if (mode == TestMode::PerformanceOnly && ran_out_of_generated_queries) { + LogDetail([](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR( + detail, "error_runtime", + "Ending early: Ran out of generated queries to issue before the " + "minimum query count and test duration were reached. " + "Please update the relevant expected latency or target qps in the " + "TestSettings so they are more accurate."); +#else + detail.Error( + "Ending early: Ran out of generated queries to issue before the " + "minimum query count and test duration were reached."); + detail( + "Please update the relevant expected latency or target qps in the " + "TestSettings so they are more accurate."); +#endif + }); + } + + // Wait for tail queries to complete and collect all the latencies. + // We have to keep the synchronization primitives alive until the SUT + // is done with them. + auto& final_query = queries[queries_issued - 1]; + std::vector sample_latencies( + GlobalLogger().GetLatenciesBlocking(expected_latencies)); + + // Log contention counters after every test as a sanity check. + GlobalLogger().LogContentionAndAllocations(); + + // This properly accounts for the fact that the max completion time may not + // belong to the final query. It also excludes any time spent postprocessing + // in the loadgen itself after final completion, which may be significant + // in the offline scenario. + PerfClock::time_point max_completion_time = + GlobalLogger().GetMaxCompletionTime(); + auto sut_active_duration = max_completion_time - start; + LogDetail([start_for_power, sut_active_duration](AsyncDetail& detail) { + auto end_for_power = + start_for_power + + std::chrono::duration_cast( + sut_active_duration); +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_INTERVAL_START(detail, "power_begin", + DateTimeStringForPower(start_for_power)); + MLPERF_LOG_INTERVAL_END(detail, "power_end", + DateTimeStringForPower(end_for_power)); +#else + detail("POWER_BEGIN: ", "mode", ToString(mode), "time", + DateTimeStringForPower(start_for_power)); + detail("POWER_END: ", "mode", ToString(mode), "time", + DateTimeStringForPower(end_for_power)); +#endif + }); + + double max_latency = + QuerySampleLatencyToSeconds(GlobalLogger().GetMaxLatencySoFar()); + double final_query_scheduled_time = + DurationToSeconds(final_query.scheduled_delta); + double final_query_issued_time = + DurationToSeconds(final_query.issued_start_time - start); + double final_query_all_samples_done_time = + DurationToSeconds(final_query.all_samples_done_time - start); + + std::vector query_latencies; + std::vector query_intervals; + if (scenario == TestScenario::MultiStream || + scenario == TestScenario::MultiStreamFree) { + query_latencies.resize(queries_issued); + query_intervals.resize(queries_issued); + for (size_t i = 0; i < queries_issued; i++) { + query_latencies[i] = DurationGeneratorNs{queries[i].scheduled_time}.delta( + queries[i].all_samples_done_time); + if (i < queries_issued - settings.max_async_queries) { + // For all queries except the last few, take into account actual + // skipped intervals to the next query. + query_intervals[i] = + queries[i + settings.max_async_queries].scheduled_intervals; + } else { + // For the last queries, use query latency to guess if imaginary + // queries issued at the end would have skipped intervals. + query_intervals[i] = + std::ceil(settings.target_qps * + QuerySampleLatencyToSeconds(query_latencies[i])); + } + } + } + + return PerformanceResult{std::move(sample_latencies), + std::move(query_latencies), + std::move(query_intervals), + queries_issued, + max_latency, + final_query_scheduled_time, + final_query_issued_time, + final_query_all_samples_done_time}; +} + +/// \brief Wraps PerformanceResult with relevant context to change how +/// it's interpreted and reported. +/// \todo Move to results.h/cc +struct PerformanceSummary { + std::string sut_name; + TestSettingsInternal settings; + PerformanceResult pr; + + // Set by ProcessLatencies. + size_t sample_count = 0; + QuerySampleLatency sample_latency_min = 0; + QuerySampleLatency sample_latency_max = 0; + QuerySampleLatency sample_latency_mean = 0; + + /// \brief The latency at a given percentile. + struct PercentileEntry { + const double percentile; + QuerySampleLatency sample_latency = 0; + QuerySampleLatency query_latency = 0; // MultiStream only. + size_t query_intervals = 0; // MultiStream only. + }; + // Latency target percentile + PercentileEntry target_latency_percentile{settings.target_latency_percentile}; + PercentileEntry latency_percentiles[6] = {{.50}, {.90}, {.95}, + {.97}, {.99}, {.999}}; + +#if defined(_WIN32) || defined(WIN32) || defined(_WIN64) || defined(WIN64) + // MSVC complains if there is no explicit constructor. + // (target_latency_percentile above depends on construction with settings) + PerformanceSummary(const std::string& sut_name_arg, + const TestSettingsInternal& settings_arg, + const PerformanceResult& pr_arg) + : sut_name(sut_name_arg), settings(settings_arg), pr(pr_arg){}; +#endif + void ProcessLatencies(); + + bool MinDurationMet(std::string* recommendation); + bool MinQueriesMet(); + bool MinSamplesMet(); + bool HasPerfConstraints(); + bool PerfConstraintsMet(std::string* recommendation); + void LogSummary(AsyncSummary& summary); + void LogDetail(AsyncDetail& detail); +}; + +void PerformanceSummary::ProcessLatencies() { + if (pr.sample_latencies.empty()) { + return; + } + + sample_count = pr.sample_latencies.size(); + + QuerySampleLatency accumulated_latency = 0; + for (auto latency : pr.sample_latencies) { + accumulated_latency += latency; + } + sample_latency_mean = accumulated_latency / sample_count; + + std::sort(pr.sample_latencies.begin(), pr.sample_latencies.end()); + + target_latency_percentile.sample_latency = + pr.sample_latencies[sample_count * target_latency_percentile.percentile]; + sample_latency_min = pr.sample_latencies.front(); + sample_latency_max = pr.sample_latencies.back(); + for (auto& lp : latency_percentiles) { + assert(lp.percentile >= 0.0); + assert(lp.percentile < 1.0); + lp.sample_latency = pr.sample_latencies[sample_count * lp.percentile]; + } + + // MultiStream only after this point. + if (settings.scenario != TestScenario::MultiStream && + settings.scenario != TestScenario::MultiStreamFree) { + return; + } + + // Calculate per-query stats. + size_t query_count = pr.queries_issued; + assert(pr.query_latencies.size() == query_count); + assert(pr.query_intervals.size() == query_count); + std::sort(pr.query_latencies.begin(), pr.query_latencies.end()); + std::sort(pr.query_intervals.begin(), pr.query_intervals.end()); + target_latency_percentile.query_latency = + pr.query_latencies[query_count * target_latency_percentile.percentile]; + target_latency_percentile.query_intervals = + pr.query_intervals[query_count * target_latency_percentile.percentile]; + for (auto& lp : latency_percentiles) { + lp.query_latency = pr.query_latencies[query_count * lp.percentile]; + lp.query_intervals = pr.query_intervals[query_count * lp.percentile]; + } +} + +bool PerformanceSummary::MinDurationMet(std::string* recommendation) { + recommendation->clear(); + const double min_duration = DurationToSeconds(settings.min_duration); + bool min_duration_met = false; + switch (settings.scenario) { + case TestScenario::Offline: + min_duration_met = pr.max_latency >= min_duration; + break; + case TestScenario::Server: + min_duration_met = pr.final_query_scheduled_time >= min_duration; + break; + case TestScenario::SingleStream: + case TestScenario::MultiStream: + case TestScenario::MultiStreamFree: + min_duration_met = pr.final_query_issued_time >= min_duration; + break; + } + if (min_duration_met) { + return true; + } + + switch (settings.scenario) { + case TestScenario::SingleStream: + *recommendation = + "Decrease the expected latency so the loadgen pre-generates more " + "queries."; + break; + case TestScenario::MultiStream: + *recommendation = + "MultiStream should always meet the minimum duration. " + "Please file a bug."; + break; + case TestScenario::MultiStreamFree: + *recommendation = + "Increase the target QPS so the loadgen pre-generates more queries."; + break; + case TestScenario::Server: + *recommendation = + "Increase the target QPS so the loadgen pre-generates more queries."; + break; + case TestScenario::Offline: + *recommendation = + "Increase expected QPS so the loadgen pre-generates a larger " + "(coalesced) query."; + break; + } + return false; +} + +bool PerformanceSummary::MinQueriesMet() { + return pr.queries_issued >= settings.min_query_count; +} + +bool PerformanceSummary::MinSamplesMet() { + return sample_count >= settings.min_sample_count; +} + +bool PerformanceSummary::HasPerfConstraints() { + return settings.scenario == TestScenario::MultiStream || + settings.scenario == TestScenario::MultiStreamFree || + settings.scenario == TestScenario::Server; +} + +bool PerformanceSummary::PerfConstraintsMet(std::string* recommendation) { + recommendation->clear(); + bool perf_constraints_met = true; + switch (settings.scenario) { + case TestScenario::SingleStream: + break; + case TestScenario::MultiStream: + ProcessLatencies(); + if (target_latency_percentile.query_intervals >= 2) { + *recommendation = "Reduce samples per query to improve latency."; + perf_constraints_met = false; + } + break; + case TestScenario::MultiStreamFree: + ProcessLatencies(); + if (target_latency_percentile.query_latency > + settings.target_latency.count()) { + *recommendation = "Reduce samples per query to improve latency."; + perf_constraints_met = false; + } + break; + case TestScenario::Server: + ProcessLatencies(); + if (target_latency_percentile.sample_latency > + settings.target_latency.count()) { + *recommendation = "Reduce target QPS to improve latency."; + perf_constraints_met = false; + } + break; + case TestScenario::Offline: + break; + } + return perf_constraints_met; +} + +void PerformanceSummary::LogSummary(AsyncSummary& summary) { + ProcessLatencies(); + + summary( + "================================================\n" + "MLPerf Results Summary\n" + "================================================"); + summary("SUT name : ", sut_name); + summary("Scenario : ", ToString(settings.scenario)); + summary("Mode : ", ToString(settings.mode)); + + switch (settings.scenario) { + case TestScenario::SingleStream: { + summary(DoubleToString(target_latency_percentile.percentile * 100, 0) + + "th percentile latency (ns) : ", + target_latency_percentile.sample_latency); + break; + } + case TestScenario::MultiStream: { + summary("Samples per query : ", settings.samples_per_query); + break; + } + case TestScenario::MultiStreamFree: { + double samples_per_second = pr.queries_issued * + settings.samples_per_query / + pr.final_query_all_samples_done_time; + summary("Samples per second : ", samples_per_second); + break; + } + case TestScenario::Server: { + // Subtract 1 from sample count since the start of the final sample + // represents the open end of the time range: i.e. [begin, end). + // This makes sense since: + // a) QPS doesn't apply if there's only one sample; it's pure latency. + // b) If you have precisely 1k QPS, there will be a sample exactly on + // the 1 second time point; but that would be the 1001th sample in + // the stream. Given the first 1001 queries, the QPS is + // 1000 queries / 1 second. + double qps_as_scheduled = + (sample_count - 1) / pr.final_query_scheduled_time; + summary("Scheduled samples per second : ", + DoubleToString(qps_as_scheduled)); + break; + } + case TestScenario::Offline: { + double samples_per_second = sample_count / pr.max_latency; + summary("Samples per second: ", samples_per_second); + break; + } + } + + std::string min_duration_recommendation; + std::string perf_constraints_recommendation; + + bool min_duration_met = MinDurationMet(&min_duration_recommendation); + bool min_queries_met = MinQueriesMet() && MinSamplesMet(); + bool perf_constraints_met = + PerfConstraintsMet(&perf_constraints_recommendation); + bool all_constraints_met = + min_duration_met && min_queries_met && perf_constraints_met; + summary("Result is : ", all_constraints_met ? "VALID" : "INVALID"); + if (HasPerfConstraints()) { + summary(" Performance constraints satisfied : ", + perf_constraints_met ? "Yes" : "NO"); + } + summary(" Min duration satisfied : ", min_duration_met ? "Yes" : "NO"); + summary(" Min queries satisfied : ", min_queries_met ? "Yes" : "NO"); + + if (!all_constraints_met) { + summary("Recommendations:"); + if (!perf_constraints_met) { + summary(" * " + perf_constraints_recommendation); + } + if (!min_duration_met) { + summary(" * " + min_duration_recommendation); + } + if (!min_queries_met) { + summary( + " * The test exited early, before enough queries were issued.\n" + " See the detailed log for why this may have occurred."); + } + } + + summary( + "\n" + "================================================\n" + "Additional Stats\n" + "================================================"); + + if (settings.scenario == TestScenario::SingleStream) { + double qps_w_lg = (sample_count - 1) / pr.final_query_issued_time; + double qps_wo_lg = 1 / QuerySampleLatencyToSeconds(sample_latency_mean); + summary("QPS w/ loadgen overhead : " + DoubleToString(qps_w_lg)); + summary("QPS w/o loadgen overhead : " + DoubleToString(qps_wo_lg)); + summary(""); + } else if (settings.scenario == TestScenario::Server) { + double qps_as_completed = + (sample_count - 1) / pr.final_query_all_samples_done_time; + summary("Completed samples per second : ", + DoubleToString(qps_as_completed)); + summary(""); + } else if (settings.scenario == TestScenario::MultiStream || + settings.scenario == TestScenario::MultiStreamFree) { + double ms_per_interval = std::milli::den / settings.target_qps; + summary("Intervals between each IssueQuery: ", "qps", settings.target_qps, + "ms", ms_per_interval); + for (auto& lp : latency_percentiles) { + summary(DoubleToString(lp.percentile * 100) + " percentile : ", + lp.query_intervals); + } + + summary(""); + double target_ns = settings.target_latency.count(); + double target_ms = target_ns * std::milli::den / std::nano::den; + summary("Per-query latency: ", "target_ns", + settings.target_latency.count(), "target_ms", target_ms); + for (auto& lp : latency_percentiles) { + summary( + DoubleToString(lp.percentile * 100) + " percentile latency (ns) : ", + lp.query_latency); + } + + summary(""); + summary("Per-sample latency:"); + } + + summary("Min latency (ns) : ", sample_latency_min); + summary("Max latency (ns) : ", sample_latency_max); + summary("Mean latency (ns) : ", sample_latency_mean); + for (auto& lp : latency_percentiles) { + summary( + DoubleToString(lp.percentile * 100) + " percentile latency (ns) : ", + lp.sample_latency); + } + + summary( + "\n" + "================================================\n" + "Test Parameters Used\n" + "================================================"); + settings.LogSummary(summary); +} + +void PerformanceSummary::LogDetail(AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + ProcessLatencies(); + + // General validity checking + std::string min_duration_recommendation; + std::string perf_constraints_recommendation; + bool min_duration_met = MinDurationMet(&min_duration_recommendation); + bool min_queries_met = MinQueriesMet() && MinSamplesMet(); + bool perf_constraints_met = + PerfConstraintsMet(&perf_constraints_recommendation); + bool all_constraints_met = + min_duration_met && min_queries_met && perf_constraints_met; + + MLPERF_LOG(detail, "result_validity", + all_constraints_met ? "VALID" : "INVALID"); + if (HasPerfConstraints()) { + MLPERF_LOG(detail, "result_perf_constraints_met", perf_constraints_met); + } + MLPERF_LOG(detail, "result_min_duration_met", min_duration_met); + MLPERF_LOG(detail, "result_min_queries_met", min_queries_met); + if (!all_constraints_met) { + std::string recommendation; + if (!perf_constraints_met) { + recommendation += perf_constraints_recommendation + " "; + } + if (!min_duration_met) { + recommendation += min_duration_recommendation + " "; + } + if (!min_queries_met) { + recommendation += + "The test exited early, before enough queries were issued."; + } + MLPERF_LOG(detail, "result_invalid_reason", recommendation); + } + + auto reportPerQueryLatencies = [&]() { + for (auto& lp : latency_percentiles) { + std::string percentile = DoubleToString(lp.percentile * 100); + MLPERF_LOG(detail, + "result_" + percentile + + "_percentile_num_intervals_between_queries", + lp.query_intervals); + MLPERF_LOG(detail, + "result_" + percentile + "_percentile_per_query_latency_ns", + lp.query_latency); + } + }; + + // Per-scenario performance results. + switch (settings.scenario) { + case TestScenario::SingleStream: { + double qps_w_lg = (sample_count - 1) / pr.final_query_issued_time; + double qps_wo_lg = 1 / QuerySampleLatencyToSeconds(sample_latency_mean); + MLPERF_LOG(detail, "result_qps_with_loadgen_overhead", qps_w_lg); + MLPERF_LOG(detail, "result_qps_without_loadgen_overhead", qps_wo_lg); + break; + } + case TestScenario::MultiStreamFree: { + double samples_per_second = pr.queries_issued * + settings.samples_per_query / + pr.final_query_all_samples_done_time; + MLPERF_LOG(detail, "result_samples_per_second", samples_per_second); + reportPerQueryLatencies(); + break; + } + case TestScenario::MultiStream: { + reportPerQueryLatencies(); + break; + } + case TestScenario::Server: { + // Subtract 1 from sample count since the start of the final sample + // represents the open end of the time range: i.e. [begin, end). + // This makes sense since: + // a) QPS doesn't apply if there's only one sample; it's pure latency. + // b) If you have precisely 1k QPS, there will be a sample exactly on + // the 1 second time point; but that would be the 1001th sample in + // the stream. Given the first 1001 queries, the QPS is + // 1000 queries / 1 second. + double qps_as_scheduled = + (sample_count - 1) / pr.final_query_scheduled_time; + MLPERF_LOG(detail, "result_scheduled_samples_per_sec", qps_as_scheduled); + double qps_as_completed = + (sample_count - 1) / pr.final_query_all_samples_done_time; + MLPERF_LOG(detail, "result_completed_samples_per_sec", qps_as_completed); + break; + } + case TestScenario::Offline: { + double samples_per_second = sample_count / pr.max_latency; + MLPERF_LOG(detail, "result_samples_per_second", samples_per_second); + break; + } + } + + // Detailed latencies + MLPERF_LOG(detail, "result_min_latency_ns", sample_latency_min); + MLPERF_LOG(detail, "result_max_latency_ns", sample_latency_max); + MLPERF_LOG(detail, "result_mean_latency_ns", sample_latency_mean); + for (auto& lp : latency_percentiles) { + MLPERF_LOG(detail, + "result_" + DoubleToString(lp.percentile * 100) + + "_percentile_latency_ns", + lp.sample_latency); + } +#endif +} + +void LoadSamplesToRam(QuerySampleLibrary* qsl, + const std::vector& samples) { + LogDetail([&samples](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "loaded_qsl_set", samples); +#else + std::string set("\"["); + for (auto i : samples) { + set += std::to_string(i) + ","; + } + set.resize(set.size() - 1); + set += "]\""; + detail("Loading QSL : ", "set", set); +#endif + }); + qsl->LoadSamplesToRam(samples); +} + +/// \brief Generates random sets of samples in the QSL that we can load into +/// RAM at the same time. +std::vector GenerateLoadableSets( + QuerySampleLibrary* qsl, const TestSettingsInternal& settings) { + auto tracer = MakeScopedTracer( + [](AsyncTrace& trace) { trace("GenerateLoadableSets"); }); + + std::vector result; + std::mt19937 qsl_rng(settings.qsl_rng_seed); + + // Generate indices for all available samples in the QSL. + const size_t qsl_total_count = qsl->TotalSampleCount(); + std::vector samples(qsl_total_count); + for (size_t i = 0; i < qsl_total_count; i++) { + samples[i] = static_cast(i); + } + + // Randomize the order of the samples. + std::shuffle(samples.begin(), samples.end(), qsl_rng); + + // Partition the samples into loadable sets. + const size_t set_size = settings.performance_sample_count; + const size_t set_padding = + (settings.scenario == TestScenario::MultiStream || + settings.scenario == TestScenario::MultiStreamFree) + ? settings.samples_per_query - 1 + : 0; + std::vector loadable_set; + loadable_set.reserve(set_size + set_padding); + + for (auto s : samples) { + loadable_set.push_back(s); + if (loadable_set.size() == set_size) { + result.push_back({std::move(loadable_set), set_size}); + loadable_set.clear(); + loadable_set.reserve(set_size + set_padding); + } + } + + if (!loadable_set.empty()) { + // Copy the size since it will become invalid after the move. + size_t loadable_set_size = loadable_set.size(); + result.push_back({std::move(loadable_set), loadable_set_size}); + } + + // Add padding for the multi stream scenario. Padding allows the + // startings sample to be the same for all SUTs, independent of the value + // of samples_per_query, while enabling samples in a query to be contiguous. + for (auto& loadable_set : result) { + auto& set = loadable_set.set; + for (size_t i = 0; i < set_padding; i++) { + // It's not clear in the spec if the STL deallocates the old container + // before assigning, which would invalidate the source before the + // assignment happens. Even though we should have reserved enough + // elements above, copy the source first anyway since we are just moving + // integers around. + QuerySampleIndex p = set[i]; + set.push_back(p); + } + } + + return result; +} + +/// \brief Opens and owns handles to all of the log files. +struct LogOutputs { + LogOutputs(const LogOutputSettings& output_settings, + const std::string& test_date_time) { + std::string prefix = output_settings.outdir; + prefix += "/" + output_settings.prefix; + if (output_settings.prefix_with_datetime) { + prefix += test_date_time + "_"; + } + const std::string& suffix = output_settings.suffix; + + summary_out.open(prefix + "summary" + suffix + ".txt"); + detail_out.open(prefix + "detail" + suffix + ".txt"); + accuracy_out.open(prefix + "accuracy" + suffix + ".json"); + trace_out.open(prefix + "trace" + suffix + ".json"); + } + + bool CheckOutputs() { + bool all_ofstreams_good = true; + if (!summary_out.good()) { + all_ofstreams_good = false; + std::cerr << "LoadGen: Failed to open summary file."; + } + if (!detail_out.good()) { + all_ofstreams_good = false; + std::cerr << "LoadGen: Failed to open detailed log file."; + } + if (!accuracy_out.good()) { + all_ofstreams_good = false; + std::cerr << "LoadGen: Failed to open accuracy log file."; + } + if (!trace_out.good()) { + all_ofstreams_good = false; + std::cerr << "LoadGen: Failed to open trace file."; + } + return all_ofstreams_good; + } + + std::ofstream summary_out; + std::ofstream detail_out; + std::ofstream accuracy_out; + std::ofstream trace_out; +}; + +/// \brief Find boundaries of performance settings by widening bounds +/// exponentially. +/// \details To find an upper bound of performance, widen an +/// upper bound exponentially until finding a bound that can't satisfy +/// performance constraints. i.e. [1, 2) -> [2, 4) -> [4, 8) -> ... +template +std::pair FindBoundaries( + SystemUnderTest* sut, QuerySampleLibrary* qsl, SequenceGen* sequence_gen, + PerformanceSummary l_perf_summary) { + // Get upper bound + TestSettingsInternal u_settings = l_perf_summary.settings; + find_peak_performance::WidenPerformanceField(&u_settings); + + LogDetail( + [l_field = find_peak_performance::ToStringPerformanceField( + l_perf_summary.settings), + u_field = find_peak_performance::ToStringPerformanceField( + u_settings)](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generic_message", + "FindBoundaries: Checking fields [" + l_field + ", " + + u_field + ")"); +#else + detail("FindBoundaries: Checking fields [" + l_field + ", " + u_field + + ")"); +#endif + }); + + std::vector loadable_sets( + loadgen::GenerateLoadableSets(qsl, u_settings)); + const LoadableSampleSet& performance_set = loadable_sets.front(); + LoadSamplesToRam(qsl, performance_set.set); + + PerformanceResult u_pr(IssueQueries( + sut, u_settings, performance_set, sequence_gen)); + PerformanceSummary u_perf_summary{sut->Name(), u_settings, std::move(u_pr)}; + + qsl->UnloadSamplesFromRam(performance_set.set); + + std::string tmp; + if (!u_perf_summary.PerfConstraintsMet(&tmp)) { + return std::make_pair(l_perf_summary, u_perf_summary); + } else { + return FindBoundaries(sut, qsl, sequence_gen, u_perf_summary); + } +} + +/// \brief Find peak performance by binary search. +/// \details The found lower & upper bounds by the function 'FindBoundaries' are +/// used as initial bounds of binary search +template +PerformanceSummary FindPeakPerformanceBinarySearch( + SystemUnderTest* sut, QuerySampleLibrary* qsl, SequenceGen* sequence_gen, + const LoadableSampleSet& performance_set, PerformanceSummary l_perf_summary, + PerformanceSummary u_perf_summary) { + if (find_peak_performance::IsFinished(l_perf_summary.settings, + u_perf_summary.settings)) { + return l_perf_summary; + } + + const TestSettingsInternal m_settings = + find_peak_performance::MidOfBoundaries(l_perf_summary.settings, + u_perf_summary.settings); + + LogDetail([l_field = + find_peak_performance::ToStringPerformanceField( + l_perf_summary.settings), + u_field = + find_peak_performance::ToStringPerformanceField( + u_perf_summary.settings), + m_field = + find_peak_performance::ToStringPerformanceField( + m_settings)](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG( + detail, "generic_message", + "FindPeakPerformanceBinarySearch: Testing the mid value of bounds [" + + l_field + ", " + u_field + "): " + m_field); +#else + detail( + "FindPeakPerformanceBinarySearch: Testing the mid value of bounds [" + + l_field + ", " + u_field + "): " + m_field); +#endif + }); + + PerformanceResult m_pr(IssueQueries( + sut, m_settings, performance_set, sequence_gen)); + PerformanceSummary m_perf_summary{sut->Name(), m_settings, std::move(m_pr)}; + + std::string tmp; + if (m_perf_summary.PerfConstraintsMet(&tmp)) { + return FindPeakPerformanceBinarySearch( + sut, qsl, sequence_gen, performance_set, m_perf_summary, + u_perf_summary); + } else { + return FindPeakPerformanceBinarySearch( + sut, qsl, sequence_gen, performance_set, l_perf_summary, + m_perf_summary); + } +} + +/// \brief Runs the performance mode, templated by scenario. +template +void RunPerformanceMode(SystemUnderTest* sut, QuerySampleLibrary* qsl, + const TestSettingsInternal& settings, + SequenceGen* sequence_gen) { + LogDetail([](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generic_message", "Starting performance mode"); +#else + detail("Starting performance mode:"); +#endif + }); + + // Use first loadable set as the performance set. + std::vector loadable_sets( + loadgen::GenerateLoadableSets(qsl, settings)); + const LoadableSampleSet& performance_set = loadable_sets.front(); + LoadSamplesToRam(qsl, performance_set.set); + + // Start PerfClock/system_clock timers for measuring performance interval + // for comparison vs external timer. + auto pc_start_ts = PerfClock::now(); + auto sc_start_ts = std::chrono::system_clock::now(); + if (settings.print_timestamps) { + std::cout << "Loadgen :: Perf mode start. system_clock Timestamp = " + << std::chrono::system_clock::to_time_t(sc_start_ts) << "\n" + << std::flush; + } + + PerformanceResult pr(IssueQueries( + sut, settings, performance_set, sequence_gen)); + + // Measure PerfClock/system_clock timer durations for comparison vs + // external timer. + auto pc_stop_ts = PerfClock::now(); + auto sc_stop_ts = std::chrono::system_clock::now(); + auto pc_duration = std::chrono::duration_cast( + pc_stop_ts - pc_start_ts) + .count(); + auto sc_duration = std::chrono::duration_cast( + sc_stop_ts - sc_start_ts) + .count(); + float pc_sc_ratio = static_cast(pc_duration) / sc_duration; + if (settings.print_timestamps) { + std::cout << "Loadgen :: Perf mode stop. systme_clock Timestamp = " + << std::chrono::system_clock::to_time_t(sc_stop_ts) << "\n" + << std::flush; + std::cout << "Loadgen :: PerfClock Perf duration = " << pc_duration + << "ms\n" + << std::flush; + std::cout << "Loadgen :: system_clock Perf duration = " << sc_duration + << "ms\n" + << std::flush; + std::cout << "Loadgen :: PerfClock/system_clock ratio = " << std::fixed + << std::setprecision(4) << pc_sc_ratio << "\n" + << std::flush; + } + + if (pc_sc_ratio > 1.01 || pc_sc_ratio < 0.99) { + LogDetail([pc_sc_ratio](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "PerfClock and system_clock differ by more than 1%! " + << " pc_sc_ratio: " << pc_sc_ratio; + MLPERF_LOG_ERROR(detail, "error_runtime", ss.str()); +#else + detail.Error("PerfClock and system_clock differ by more than 1\%! ", + "pc_sc_ratio", pc_sc_ratio); +#endif + }); + } else if (pc_sc_ratio > 1.001 || pc_sc_ratio < 0.999) { + LogDetail([pc_sc_ratio](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "PerfClock and system_clock differ by more than 0.1%! " + << " pc_sc_ratio: " << pc_sc_ratio; + MLPERF_LOG_WARNING(detail, "warning_generic_message", ss.str()); +#else + detail.Warning("PerfClock and system_clock differ by more than 0.1\%. ", + "pc_sc_ratio", pc_sc_ratio); +#endif + }); + } + + sut->ReportLatencyResults(pr.sample_latencies); + + PerformanceSummary perf_summary{sut->Name(), settings, std::move(pr)}; + LogSummary([perf_summary](AsyncSummary& summary) mutable { + perf_summary.LogSummary(summary); + }); + // Create a copy to prevent thread hazard between LogSummary and LogDetail. + PerformanceSummary perf_summary_detail{perf_summary}; + LogDetail([perf_summary_detail](AsyncDetail& detail) mutable { + perf_summary_detail.LogDetail(detail); + }); + + qsl->UnloadSamplesFromRam(performance_set.set); +} + +/// \brief Runs the binary search mode, templated by scenario. +/// \details 1. Check whether lower bound from user satisfies the performance +/// constraints, 2. Find an upper bound using the function 'FindBoundaries' +/// based on the lower bound, 3. Find peak performance settings using the +/// function 'FindPeakPerformanceBinarySearch'. note: Since we can't find a +/// lower bound programmatically because of the monotonicity issue of Server +/// scenario, rely on user's settings. After resolving this issue, we can +/// make the function 'FindBoundaries' find a lower bound as well from some +/// random initial settings. +template +void FindPeakPerformanceMode(SystemUnderTest* sut, QuerySampleLibrary* qsl, + const TestSettingsInternal& base_settings, + SequenceGen* sequence_gen) { + LogDetail([](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generic_message", "Starting FindPeakPerformance mode"); +#else + detail("Starting FindPeakPerformance mode:"); +#endif + }); + + if (scenario != TestScenario::MultiStream && + scenario != TestScenario::MultiStreamFree && + scenario != TestScenario::Server) { + LogDetail([unsupported_scenario = ToString(scenario)](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR(detail, "error_invalid_config", + find_peak_performance::kNotSupportedMsg); +#else + detail.Error(find_peak_performance::kNotSupportedMsg); +#endif + }); + return; + } + + LogDetail( + [base_field = find_peak_performance::ToStringPerformanceField( + base_settings)](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG( + detail, "generic_message", + "FindPeakPerformance: Check validity of the base settings field: " + + base_field); +#else + detail( + "FindPeakPerformance: Check validity of the base settings field: " + + base_field); +#endif + }); + + // 1. Check whether the lower bound came from user satisfy performance + // constraints or not. + std::vector base_loadable_sets( + loadgen::GenerateLoadableSets(qsl, base_settings)); + const LoadableSampleSet& base_performance_set = base_loadable_sets.front(); + LoadSamplesToRam(qsl, base_performance_set.set); + + PerformanceResult base_pr(IssueQueries( + sut, base_settings, base_performance_set, sequence_gen)); + PerformanceSummary base_perf_summary{sut->Name(), base_settings, + std::move(base_pr)}; + + // We can also use all_constraints_met to check performance constraints, + // but to reduce searching time, leave it up to whether the settings satisfy + // min duration & min queries or not to users. + std::string msg; + if (!base_perf_summary.PerfConstraintsMet(&msg)) { + LogDetail([msg](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "FindPeakPerformance: Initial lower bound does not satisfy " + << "performance constraints, msg: " << msg; + MLPERF_LOG_ERROR(detail, "error_runtime", ss.str()); +#else + detail.Error( + "FindPeakPerformance: Initial lower bound does not satisfy " + "performance constraints, msg: " + + msg); +#endif + }); + + sut->ReportLatencyResults(base_perf_summary.pr.sample_latencies); + + PerformanceSummary perf_summary{sut->Name(), base_settings, + std::move(base_perf_summary.pr)}; + LogSummary([perf_summary](AsyncSummary& summary) mutable { + perf_summary.LogSummary(summary); + }); + // Create a copy to prevent thread hazard between LogSummary and LogDetail. + PerformanceSummary perf_summary_detail{perf_summary}; + LogDetail([perf_summary_detail](AsyncDetail& detail) mutable { + perf_summary_detail.LogDetail(detail); + }); + + qsl->UnloadSamplesFromRam(base_performance_set.set); + + return; + } + + // Clear loaded samples. + qsl->UnloadSamplesFromRam(base_performance_set.set); + + // 2. Find an upper bound based on the lower bound. + std::pair boundaries = + FindBoundaries(sut, qsl, sequence_gen, base_perf_summary); + PerformanceSummary l_perf_summary = boundaries.first; + PerformanceSummary u_perf_summary = boundaries.second; + + LogDetail( + [l_field = find_peak_performance::ToStringPerformanceField( + l_perf_summary.settings), + u_field = find_peak_performance::ToStringPerformanceField( + u_perf_summary.settings)](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generic_message", + "FindPeakPerformance: Found boundaries: [" + l_field + ", " + + u_field + ")"); +#else + detail("FindPeakPerformance: Found boundaries: [" + l_field + ", " + + u_field + ")"); +#endif + }); + + // Reuse performance_set, u_perf_summary has the largest 'samples_per_query'. + std::vector loadable_sets( + loadgen::GenerateLoadableSets(qsl, u_perf_summary.settings)); + const LoadableSampleSet& performance_set = loadable_sets.front(); + LoadSamplesToRam(qsl, performance_set.set); + + // 3. Find peak performance settings using the found boundaries + PerformanceSummary perf_summary = FindPeakPerformanceBinarySearch( + sut, qsl, sequence_gen, performance_set, l_perf_summary, u_perf_summary); + + // Print-out the peak performance test setting. + LogDetail([field = find_peak_performance::ToStringPerformanceField( + perf_summary.settings)](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generic_message", + "FindPeakPerformance: Found peak performance field: " + field); +#else + detail("FindPeakPerformance: Found peak performance field: " + field); +#endif + }); + + sut->ReportLatencyResults(perf_summary.pr.sample_latencies); + + LogSummary([perf_summary](AsyncSummary& summary) mutable { + perf_summary.LogSummary(summary); + }); + // Create a copy to prevent thread hazard between LogSummary and LogDetail. + PerformanceSummary perf_summary_detail{perf_summary}; + LogDetail([perf_summary_detail](AsyncDetail& detail) mutable { + perf_summary_detail.LogDetail(detail); + }); + + qsl->UnloadSamplesFromRam(performance_set.set); +} + +/// \brief Runs the accuracy mode, templated by scenario. +template +void RunAccuracyMode(SystemUnderTest* sut, QuerySampleLibrary* qsl, + const TestSettingsInternal& settings, + SequenceGen* sequence_gen) { + LogDetail([](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "generic_message", "Starting accuracy mode"); +#else + detail("Starting accuracy mode:"); +#endif + }); + + std::vector loadable_sets( + loadgen::GenerateLoadableSets(qsl, settings)); + + for (auto& loadable_set : loadable_sets) { + { + auto tracer = MakeScopedTracer( + [count = loadable_set.set.size()](AsyncTrace& trace) { + trace("LoadSamples", "count", count); + }); + LoadSamplesToRam(qsl, loadable_set.set); + } + + PerformanceResult pr(IssueQueries( + sut, settings, loadable_set, sequence_gen)); + + { + auto tracer = MakeScopedTracer( + [count = loadable_set.set.size()](AsyncTrace& trace) { + trace("UnloadSampes", "count", count); + }); + qsl->UnloadSamplesFromRam(loadable_set.set); + } + } +} + +/// \brief Routes runtime scenario requests to the corresponding instances +/// of its templated mode functions. +struct RunFunctions { + using Signature = void(SystemUnderTest* sut, QuerySampleLibrary* qsl, + const TestSettingsInternal& settings, + SequenceGen* sequence_gen); + + template + static RunFunctions GetCompileTime() { + return {(RunAccuracyMode), + (RunPerformanceMode), + (FindPeakPerformanceMode)}; + } + + static RunFunctions Get(TestScenario run_time_scenario) { + switch (run_time_scenario) { + case TestScenario::SingleStream: + return GetCompileTime(); + case TestScenario::MultiStream: + return GetCompileTime(); + case TestScenario::MultiStreamFree: + return GetCompileTime(); + case TestScenario::Server: + return GetCompileTime(); + case TestScenario::Offline: + return GetCompileTime(); + } + // We should not reach this point. + assert(false); + return GetCompileTime(); + } + + Signature& accuracy; + Signature& performance; + Signature& find_peak_performance; +}; + +} // namespace loadgen + +void StartTest(SystemUnderTest* sut, QuerySampleLibrary* qsl, + const TestSettings& requested_settings, + const LogSettings& log_settings) { + GlobalLogger().StartIOThread(); + + const std::string test_date_time = CurrentDateTimeISO8601(); + + loadgen::LogOutputs log_outputs(log_settings.log_output, test_date_time); + if (!log_outputs.CheckOutputs()) { + return; + } + + GlobalLogger().StartLogging(&log_outputs.summary_out, &log_outputs.detail_out, + &log_outputs.accuracy_out, + log_settings.log_output.copy_detail_to_stdout, + log_settings.log_output.copy_summary_to_stdout); + + if (log_settings.enable_trace) { + GlobalLogger().StartNewTrace(&log_outputs.trace_out, PerfClock::now()); + } + + LogLoadgenVersion(); + LogDetail([sut, qsl, test_date_time](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "test_datetime", test_date_time); + MLPERF_LOG(detail, "sut_name", sut->Name()); + MLPERF_LOG(detail, "qsl_name", qsl->Name()); + MLPERF_LOG(detail, "qsl_reported_total_count", qsl->TotalSampleCount()); + MLPERF_LOG(detail, "qsl_reported_performance_count", + qsl->PerformanceSampleCount()); +#else + detail("Date + time of test: ", test_date_time); + detail("System Under Test (SUT) name: ", sut->Name()); + detail("Query Sample Library (QSL) name: ", qsl->Name()); + detail("QSL total size: ", qsl->TotalSampleCount()); + detail("QSL performance size*: ", qsl->PerformanceSampleCount()); + detail("*TestSettings (performance_sample_count_override) can override"); + detail("*Refer to Effective Settings for actual value"); +#endif + }); + + TestSettings test_settings = requested_settings; + // Look for Audit Config file to override TestSettings during audit + const std::string audit_config_filename = "audit.config"; + if (FileExists(audit_config_filename)) { + LogDetail([](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_WARNING(detail, "warning_generic_message", + "Found Audit Config file (audit.config)." + " Overriding TestSettings from audit.config file."); +#else + detail( + "Found Audit Config file (audit.config)." + " Overriding TestSettings from audit.config file."); +#endif + }); + std::string audit_scenario = loadgen::ToString(test_settings.scenario); + // Remove Spaces from the string + RemoveValue(&audit_scenario, ' '); + const std::string generic_model = "*"; + test_settings.FromConfig(audit_config_filename, generic_model, + audit_scenario); + } + + loadgen::TestSettingsInternal sanitized_settings( + test_settings, qsl->PerformanceSampleCount()); + sanitized_settings.LogAllSettings(); + + auto run_funcs = loadgen::RunFunctions::Get(sanitized_settings.scenario); + + loadgen::SequenceGen sequence_gen; + switch (sanitized_settings.mode) { + case TestMode::SubmissionRun: + run_funcs.accuracy(sut, qsl, sanitized_settings, &sequence_gen); + run_funcs.performance(sut, qsl, sanitized_settings, &sequence_gen); + break; + case TestMode::AccuracyOnly: + run_funcs.accuracy(sut, qsl, sanitized_settings, &sequence_gen); + break; + case TestMode::PerformanceOnly: + run_funcs.performance(sut, qsl, sanitized_settings, &sequence_gen); + break; + case TestMode::FindPeakPerformance: + run_funcs.find_peak_performance(sut, qsl, sanitized_settings, + &sequence_gen); + break; + } + + loadgen::IssueQueryController::GetInstance().EndThreads(); + + // Stop tracing after logging so all logs are captured in the trace. + GlobalLogger().StopLogging(); + GlobalLogger().StopTracing(); + GlobalLogger().StopIOThread(); +} + +void AbortTest() { + loadgen::IssueQueryController::GetInstance().EndThreads(); + GlobalLogger().StopLogging(); + GlobalLogger().StopTracing(); + GlobalLogger().StopIOThread(); +} + +void QuerySamplesComplete(QuerySampleResponse* responses, + size_t response_count, const ResponseCallback& response_cb) { + PerfClock::time_point timestamp = PerfClock::now(); + + auto tracer = MakeScopedTracer( + [](AsyncTrace& trace) { trace("QuerySamplesComplete"); }); + + const QuerySampleResponse* end = responses + response_count; + + // Notify first to unblock loadgen production ASAP. + for (QuerySampleResponse* response = responses; response < end; response++) { + loadgen::SampleMetadata* sample = + reinterpret_cast(response->id); + loadgen::QueryMetadata* query = sample->query_metadata; + query->NotifyOneSampleCompleted(timestamp); + } + + // Log samples. + for (QuerySampleResponse* response = responses; response < end; response++) { + loadgen::SampleMetadata* sample = + reinterpret_cast(response->id); + loadgen::QueryMetadata* query = sample->query_metadata; + query->response_delegate->SampleComplete(sample, response, timestamp, response_cb); + } +} + +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/loadgen.h b/benchmarks/rnnt/ootb/inference/loadgen/loadgen.h new file mode 100644 index 0000000..cb2f4cb --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/loadgen.h @@ -0,0 +1,96 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Provides the entry points for a SUT to start a test and respond +/// to issued queries. + +#ifndef MLPERF_LOADGEN_LOADGEN_H_ +#define MLPERF_LOADGEN_LOADGEN_H_ + +#include +#include + +/// \brief Contains the loadgen API. +namespace mlperf { + +struct QuerySampleResponse; +class QuerySampleLibrary; +class SystemUnderTest; +struct TestSettings; +struct LogSettings; + +using ResponseCallback = std::function; + +/// \addtogroup LoadgenAPI Loadgen API +/// @{ + +/// +/// \brief SUT calls this to notify loadgen of completed samples. +/// \details +/// * The samples may be from any combination of queries or partial queries as +/// issued by \link mlperf::SystemUnderTest::IssueQuery +/// +/// SystemUnderTest::IssueQuery \endlink. +/// * The SUT is responsible for owning and allocating the reponse data. The +/// loadgen will copy the response data if needed (e.g. for accuracy mode). +/// + If no response callback is provided, the response data must remain valid +/// for the entire duration of this call. +/// + The response callback is untimed; it is called for each response in +/// responses after the loadgen records the completion time and before the +/// loadgen copies the response data. The response callback enables the +/// loadgen to simulate response data being stored in accelerator DRAM. +/// After the response callback is called, response data must reside on the +/// host so that the loadgen can copy it. Submitters must seek prior approval +/// to use this feature of loadgen (refer to +/// https://github.com/mlcommons/inference_policies/blob/master/inference_rules.adoc#5-load-generator). +/// * All calls to QuerySampleComplete are thread-safe and wait-free bounded. +/// + Any number of threads can call QuerySampleComplete simultaneously. +/// + Regardless of where any other thread stalls, the current thread will +/// finish QuerySampleComplete in a bounded number of cycles. +/// + Note: If a callback is provided, the SUT must ensure that the callback +/// is also thread-safe and wait-free bounded for the above to hold. +void QuerySamplesComplete(QuerySampleResponse* responses, + size_t response_count, const ResponseCallback& response_cb = {}); + +/// +/// \brief Starts the test against SUT with the specified settings. +/// \details This is the C++ entry point. See mlperf::c::StartTest for the +/// C entry point. +/// +void StartTest(SystemUnderTest* sut, QuerySampleLibrary* qsl, + const TestSettings& requested_settings, + const LogSettings& log_settings); + +/// +/// \brief Aborts the running test. +/// \details This function will stop issueing new samples to the SUT. StartTest +/// will return after the current inference finishes. Since StartTest is a +/// blocking function, this function can only be called in another thread. +void AbortTest(); + +/// +/// \brief Register a thread for query issuing in Server scenario. +/// \details If a thread registers itself, the thread(s) is used to call SUT's +/// IssueQuery(). This function is blocking until the entire test is done. The +/// number of registered threads must match server_num_issue_query_threads in +/// TestSettings. This function only has effect in Server scenario. +/// This is the C++ entry point. See mlperf::c::RegisterIssueQueryThread for the +/// C entry point. +/// +void RegisterIssueQueryThread(); + +/// @} + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_LOADGEN_H_ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/loadgen_integration_diagram.svg b/benchmarks/rnnt/ootb/inference/loadgen/loadgen_integration_diagram.svg new file mode 100644 index 0000000..17dd1b4 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/loadgen_integration_diagram.svg @@ -0,0 +1,85 @@ + + + + + + + + +Model + Dataset + + + +Pre Processor + + + +Post Processor + + + +Benchmark + + + +Backend + + + +LoadGen + + + + + + + + + + + + + + + + + + + + + + + +1 + + + +2 + + +3 + + +5 + + +4 + + + +LoadGen Logs + + + + + +6 + + + + + + + \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/inference/loadgen/logging.cc b/benchmarks/rnnt/ootb/inference/loadgen/logging.cc new file mode 100644 index 0000000..3341747 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/logging.cc @@ -0,0 +1,1096 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Implements a logging system with a central IO thread that handles +/// all stringification and IO. +/// \details Log-producing threads only submit lambdas to be executed on the +/// IO thread. +/// All producers and consumers use lock-free operations that guarantee +/// forward progress independent of a) other stalled threads and b) where +/// those threads are stalled. +/// Each thread uses a double-buffering scheme to queue its logs. One buffer +/// is always reserved for writes and the other is reserved for reads. +/// A producing thread sends requests to the IOThread to swap the buffers +/// and the IOThread does the actual read/write swap after it has finished +/// reading the buffer it was working on. + +#include "logging.h" + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) || defined(WIN32) || defined(_WIN64) || defined(WIN64) +#define WIN32_LEAN_AND_MEAN +#define NOMINMAX +#include +#include +#define MLPERF_GET_PID() _getpid() +#else +#include +#define MLPERF_GET_PID() getpid() +#endif + +// Use system-level TID for tracing. This enables correlation with other +// performance tools that are not aware of C++ std::thread::id. +#if defined(__linux__) +#include +#define MLPERF_GET_TID() syscall(SYS_gettid) +#elif defined(_WIN32) || defined(WIN32) || defined(_WIN64) || defined(WIN64) +#define MLPERF_GET_TID() GetCurrentThreadId() +#elif defined(__APPLE__) +#define MLPERF_GET_TID() \ + std::hash{}(std::this_thread::get_id()) +#else +// TODO: std::this_thread::id is a class but MLPERF_GET_TID() assigned to +// uint64_t +#define MLPERF_GET_TID() std::this_thread::get_id() +#endif + +#include "utils.h" + +namespace mlperf { +namespace logging { + +namespace { + +uintptr_t SwapRequestSlotIsWritableValue(size_t id) { + // LSB of 1 indicates that this isn't a pointer. + // MSBs encode the id to detect collisions when a slot in + // |thread_swap_request_slots_| is reused for a different id and the request + // for the previous id is very slow. + return (id << 1) | 0x1; +} + +bool SwapRequestSlotIsReadable(uintptr_t value) { + // Valid pointers will not have their lsb set. + return (value & 0x1) != 0x1; +} + +constexpr size_t kMaxThreadsToLog = 1024; +constexpr std::chrono::milliseconds kLogPollPeriod(10); + +/// \brief How many log entries to pre-allocate per thread to help avoid +/// runtime allocation. +constexpr size_t kTlsLogReservedEntryCount = 1024; + +constexpr auto kInvalidLatency = std::numeric_limits::min(); + +} // namespace + +const std::string& ArgValueTransform(const bool& value) { + static const std::string v_true("true"); + static const std::string v_false("false"); + return value ? v_true : v_false; +} + +char Bin2Hex(uint8_t four_bits) { + char number = '0' + four_bits; + char letter = ('A' - 10) + four_bits; + return four_bits < 10 ? number : letter; +} + +const std::string ArgValueTransform(const LogBinaryAsHexString& value) { + if (value.data == nullptr) { + return "\"\""; + } + std::string hex; + hex.reserve(value.data->size() + 2); + hex.push_back('"'); + for (auto b : *value.data) { + hex.push_back(Bin2Hex(b >> 4)); + hex.push_back(Bin2Hex(b & 0x0F)); + } + hex.push_back('"'); + return hex; +} + +#if USE_NEW_LOGGING_FORMAT +const std::string ArgValueTransform(const std::string& value) { + return std::string("\"") + value + std::string("\""); +} + +const std::string ArgValueTransform(const char* value) { + return std::string("\"") + std::string(value) + std::string("\""); +} + +const std::string ArgValueTransform(const std::vector& value) { + std::string s("["); + for (auto i : value) { + s += std::to_string(i) + ","; + } + s.resize(s.size() - 1); + s += "]"; + return s; +} + +const std::string ArgValueTransform( + const std::map& value) { + std::string s("{"); + for (const auto& i : value) { + s += "\""; + s += i.first; + s += "\":\""; + s += i.second; + s += "\","; + } + s.resize(s.size() - 1); + s += "}"; + return s; +} + +const std::string ArgValueTransform(const float value) { + if (value == std::numeric_limits::infinity()) { + return "Infinity"; + } + else if (value == -std::numeric_limits::infinity()) { + return "-Infinity"; + } + else if (std::isnan(value)) { + return "NaN"; + } + return std::to_string(value); +} + +const std::string ArgValueTransform(const double value) { + if (value == std::numeric_limits::infinity()) { + return "Infinity"; + } + else if (value == -std::numeric_limits::infinity()) { + return "-Infinity"; + } + else if (std::isnan(value)) { + return "NaN"; + } + return std::to_string(value); +} +#endif + +ChromeTracer::ChromeTracer(std::ostream* out, PerfClock::time_point origin) + : out_(out), origin_(origin) { + WriteTraceEventHeader(); +} + +ChromeTracer::~ChromeTracer() { + WriteTraceEventFooter(); + out_->flush(); +} + +void ChromeTracer::WriteTraceEventHeader() { + // Times and durations are converted from nanoseconds to microseconds, use + // 3 decimal digits to preserve precision. + *out_ << std::fixed << std::setprecision(3) << "{\"traceEvents\":[\n"; +} + +void ChromeTracer::WriteTraceEventFooter() { + *out_ << "{\"name\":\"LastTrace\"}\n" + << "],\n" + << "\"displayTimeUnit\":\"ns\",\n" + << "\"otherData\":{\n" + << "\"ts\":" << Micros(origin_.time_since_epoch()).count() << ",\n" + << "\"version\":\"MLPerf LoadGen v1.0\"\n" + << "}\n" + << "}\n"; +} + +void AsyncLog::SetCurrentPidTid(uint64_t pid, uint64_t tid) { + current_pid_ = pid; + current_tid_ = tid; +} + +void AsyncLog::SetLogFiles(std::ostream* summary, std::ostream* detail, + std::ostream* accuracy, bool copy_detail_to_stdout, + bool copy_summary_to_stdout, + PerfClock::time_point log_origin) { + std::unique_lock lock(log_mutex_); + if (summary_out_ != &std::cerr) { + std::string warning_summary; + if (log_warning_count_ == 0) { + warning_summary = "\nNo warnings encountered during test.\n"; + } else if (log_warning_count_ == 1) { + warning_summary = "\n1 warning encountered. See detailed log.\n"; + } else if (log_warning_count_ != 0) { + warning_summary = "\n" + std::to_string(log_warning_count_) + + " warnings encountered. See detailed log.\n"; + } + + std::string error_summary; + if (log_error_count_ == 0) { + error_summary = "\nNo errors encountered during test.\n"; + } else if (log_error_count_ == 1) { + error_summary = "\n1 ERROR encountered. See detailed log.\n"; + } else if (log_error_count_ != 0) { + error_summary = "\n" + std::to_string(log_error_count_) + + " ERRORS encountered. See detailed log.\n"; + } + + *summary_out_ << warning_summary << error_summary; + if (copy_summary_to_stdout_) { + std::cout << warning_summary << error_summary; + } + } + if (summary_out_) { + summary_out_->flush(); + } + if (detail_out_) { + detail_out_->flush(); + } + if (accuracy_out_ != &std::cerr) { + WriteAccuracyFooterLocked(); + accuracy_out_->flush(); + } + summary_out_ = summary; + detail_out_ = detail; + accuracy_out_ = accuracy; + if (accuracy_out_ != &std::cerr) { + WriteAccuracyHeaderLocked(); + } + copy_detail_to_stdout_ = copy_detail_to_stdout; + copy_summary_to_stdout_ = copy_summary_to_stdout; + log_origin_ = log_origin; + log_error_count_ = 0; + log_warning_count_ = 0; +} + +void AsyncLog::StartNewTrace(std::ostream* trace_out, + PerfClock::time_point origin) { + std::unique_lock lock(trace_mutex_); + if (trace_out) { + tracer_ = std::make_unique(trace_out, origin); + } else { + tracer_.reset(); + } +} + +void AsyncLog::StopTrace() { + std::unique_lock lock(trace_mutex_); + tracer_.reset(); +} + +void AsyncLog::LogAccuracy(uint64_t seq_id, const QuerySampleIndex qsl_idx, + const LogBinaryAsHexString& response) { + std::unique_lock lock(log_mutex_); + if (!accuracy_out_) { + return; + } + *accuracy_out_ << (accuracy_needs_comma_ ? ",\n{ " : "\n{ "); + LogArgs(accuracy_out_, "seq_id", seq_id, "qsl_idx", qsl_idx, "data", + response); + *accuracy_out_ << " }"; + accuracy_needs_comma_ = true; +} + +void AsyncLog::Flush() { + { + std::unique_lock lock(log_mutex_); + if (summary_out_) { + summary_out_->flush(); + } + if (detail_out_) { + detail_out_->flush(); + } + if (accuracy_out_) { + accuracy_out_->flush(); + } + } + + { + std::unique_lock lock(trace_mutex_); + if (tracer_) { + tracer_->Flush(); + } + } +} + +void AsyncLog::WriteAccuracyHeaderLocked() { + *accuracy_out_ << "["; + accuracy_needs_comma_ = false; +} + +void AsyncLog::WriteAccuracyFooterLocked() { *accuracy_out_ << "\n]\n"; } + +void AsyncLog::RestartLatencyRecording(uint64_t first_sample_sequence_id, + size_t latencies_to_reserve) { + std::unique_lock lock(latencies_mutex_); + assert(latencies_.empty()); + assert(latencies_recorded_ == latencies_expected_); + latencies_recorded_ = 0; + latencies_expected_ = 0; + max_latency_ = 0; + max_completion_timstamp_ = PerfClock::now(); + latencies_first_sample_sequence_id_ = first_sample_sequence_id; + latencies_.reserve(latencies_to_reserve); +} + +void AsyncLog::RecordSampleCompletion(uint64_t sample_sequence_id, + PerfClock::time_point completion_time, + QuerySampleLatency latency) { + std::unique_lock lock(latencies_mutex_); + + max_latency_ = std::max(max_latency_, latency); + + max_completion_timstamp_ = + std::max(max_completion_timstamp_, completion_time); + + if (sample_sequence_id < latencies_first_sample_sequence_id_) { + // Call LogErrorSync here since this kind of error could result in a + // segfault in the near future. +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Received completion for an old sample." + << " Min expected id: " << latencies_first_sample_sequence_id_ + << " Actual id: " << sample_sequence_id; + MLPERF_LOG_ERROR_SYNC(GlobalLogger(), "error_runtime", ss.str()); +#else + GlobalLogger().LogErrorSync( + "Received completion for an old sample.", "Min expected id", + latencies_first_sample_sequence_id_, "Actual id", sample_sequence_id); +#endif + return; + } + + const size_t i = sample_sequence_id - latencies_first_sample_sequence_id_; + + if (latencies_.size() <= i) { + // TODO: Reserve in advance. + latencies_.resize(i + 1, kInvalidLatency); + } else if (latencies_[i] != kInvalidLatency) { + // Call LogErrorSync here since this kind of error could result in a + // segfault in the near future. +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR_SYNC(GlobalLogger(), "error_runtime", + "Attempted to complete a sample twice."); +#else + GlobalLogger().LogErrorSync("Attempted to complete a sample twice."); +#endif + + // Return without recording the latency again to avoid potentially + // ending the test before the SUT is actually done, which could result + // in a segfault. + // If the SUT recorded the wrong sample, the test will hang and see + // the error above. + return; + } + + latencies_[i] = latency; + latencies_recorded_++; + if (AllLatenciesRecorded()) { + all_latencies_recorded_.notify_all(); + } +} + +std::vector AsyncLog::GetLatenciesBlocking( + size_t expected_count) { + std::vector latencies; + { + std::unique_lock lock(latencies_mutex_); + latencies_expected_ = expected_count; + all_latencies_recorded_.wait(lock, [&] { return AllLatenciesRecorded(); }); + latencies.swap(latencies_); + } + + if (latencies.size() != expected_count) { + // Call LogErrorSync here since this kind of error could result in a + // segfault in the near future. +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Received SequenceId that was too large." + << " expected_size: " << expected_count + << " actual_size: " << latencies.size(); + MLPERF_LOG_ERROR_SYNC(GlobalLogger(), "error_runtime", ss.str()); +#else + GlobalLogger().LogErrorSync("Received SequenceId that was too large.", + "expected_size", expected_count, "actual_size", + latencies.size()); +#endif + } + + size_t invalid_latency_count = 0; + for (auto l : latencies) { + if (l == kInvalidLatency) { + invalid_latency_count++; + } + } + if (invalid_latency_count != 0) { + // Call LogErrorSync here since this kind of error could result in a + // segfault in the near future. +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Encountered incomplete samples at the end of a series of queries." + << " count: " << invalid_latency_count; + MLPERF_LOG_ERROR_SYNC(GlobalLogger(), "error_runtime", ss.str()); +#else + GlobalLogger().LogErrorSync( + "Encountered incomplete samples at the end of a series of queries.", + "count", invalid_latency_count); +#endif + } + + return latencies; +} + +PerfClock::time_point AsyncLog::GetMaxCompletionTime() { + return max_completion_timstamp_; +} + +QuerySampleLatency AsyncLog::GetMaxLatencySoFar() { + std::unique_lock lock(latencies_mutex_); + return max_latency_; +} + +/// \brief Records a single thread using thread-local storage and submits +/// entries to the central Logger. +/// +/// \details This setup allows for each log entry to be added: +/// * With forward-progress guarantees. (i.e.: no locking or blocking +/// operations even if other threads have stalled.) +/// * Without expensive syscalls or I/O operations, which are deferred to +/// the central Logger. +class TlsLogger { + public: + TlsLogger(std::function forced_detatch); + ~TlsLogger(); + void ForcedDetatchFromThread() { forced_detatch_(); } + + void Log(AsyncLogEntry&& entry); + void SwapBuffers(); + + std::vector* StartReadingEntries(); + void FinishReadingEntries(); + bool ReadBufferHasBeenConsumed(); + size_t MaxEntryVectorSize() { return max_entry_size_; } + + uint64_t Pid() const { return pid_; } + uint64_t Tid() const { return tid_; } + + void RequestSwapBuffersSlotRetried() { + swap_buffers_slot_retry_count_.fetch_add(1, std::memory_order_relaxed); + } + + size_t ReportLogCasFailCount() { + size_t c = log_cas_fail_count_.load(std::memory_order_relaxed); + log_cas_fail_count_.fetch_sub(c, std::memory_order_relaxed); + return c; + } + + size_t ReportSwapBuffersSlotRetryCount() { + size_t c = swap_buffers_slot_retry_count_.load(std::memory_order_relaxed); + swap_buffers_slot_retry_count_.fetch_sub(c, std::memory_order_relaxed); + return c; + } + + void TraceCounters(); + + private: + using EntryVector = std::vector; + enum class EntryState { Unlocked, ReadLock, WriteLock }; + + // Accessed by producer only. + size_t i_read_ = 0; + + // Accessed by producer and consumer atomically. + EntryVector entries_[2]; + std::atomic entry_states_[2]{{EntryState::ReadLock}, + {EntryState::Unlocked}}; + std::atomic i_write_{1}; + + std::atomic log_cas_fail_count_{0}; + std::atomic swap_buffers_slot_retry_count_{0}; + + // Accessed by consumer only. + size_t unread_swaps_ = 0; + size_t i_write_prev_ = 0; + uint64_t pid_; + uint64_t tid_; + size_t max_entry_size_ = kTlsLogReservedEntryCount; + + std::function forced_detatch_; +}; + +Logger::Logger(std::chrono::duration poll_period, + size_t max_threads_to_log) + : poll_period_(poll_period), + max_threads_to_log_(max_threads_to_log), + thread_swap_request_slots_(max_threads_to_log * 2) { + const size_t kSlotCount = max_threads_to_log * 2; + for (size_t i = 0; i < kSlotCount; i++) { + std::atomic_init(&thread_swap_request_slots_[i], + SwapRequestSlotIsWritableValue(i)); + } +} + +Logger::~Logger() { + // TlsLoggers might outlive this Logger when loaded as a python module. + // Forcefully make all currently registered TlsLoggers orphans. + std::unique_lock lock(tls_loggers_registerd_mutex_); + TlsLogger* tls_logger_prev = nullptr; + (void)tls_logger_prev; // Avoid unused error in release builds. + while (!tls_loggers_registerd_.empty()) { + TlsLogger* tls_logger = *tls_loggers_registerd_.begin(); + // Otherwise, this is an infinite loop. + assert(tls_logger != tls_logger_prev); + tls_loggers_registerd_mutex_.unlock(); + tls_logger->ForcedDetatchFromThread(); + tls_loggers_registerd_mutex_.lock(); + tls_logger_prev = tls_logger; + } +} + +void Logger::RequestSwapBuffers(TlsLogger* tls_logger) { + auto tls_logger_as_uint = reinterpret_cast(tls_logger); + assert(SwapRequestSlotIsReadable(tls_logger_as_uint)); + size_t id, slot; + uintptr_t slot_is_writeable_value; + // The compare_exchange below should almost always succeed. + // The compare_exchange may fail if a recycled slot is still actively used + // by another thread, so we retry with subsequent slots here if needed. + // Since the slot count is 2x the expected number of threads to log, + // the CAS should only fail at most 50% of the time when all logging threads + // happen to be descheduled between the fetch_add and CAS below, which is + // very unlikely. + id = swap_request_id_.fetch_add(1, std::memory_order_relaxed); + slot = id % thread_swap_request_slots_.size(); + slot_is_writeable_value = SwapRequestSlotIsWritableValue(id); + while (!thread_swap_request_slots_[slot].compare_exchange_strong( + slot_is_writeable_value, tls_logger_as_uint, std::memory_order_release)) { + id = swap_request_id_.fetch_add(1, std::memory_order_relaxed); + slot = id % thread_swap_request_slots_.size(); + slot_is_writeable_value = SwapRequestSlotIsWritableValue(id); + tls_logger->RequestSwapBuffersSlotRetried(); + } +} + +void Logger::RegisterTlsLogger(TlsLogger* tls_logger) { + std::unique_lock lock(tls_loggers_registerd_mutex_); + if (tls_loggers_registerd_.size() >= max_threads_to_log_) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR_SYNC((*this), "error_runtime", + "Warning: More TLS loggers registerd than can be " + "active simultaneously."); +#else + LogErrorSync( + "Warning: More TLS loggers registerd than can " + "be active simultaneously.\n"); +#endif + } + tls_loggers_registerd_.insert(tls_logger); +} + +// This moves ownership of the tls_logger data to Logger so the +// exiting thread can exit immediately, even if all the logs of the +// exiting thread haven't been processed. +void Logger::UnRegisterTlsLogger(std::unique_ptr tls_logger) { + OrphanContainer::iterator orphan; + { + std::unique_lock lock(tls_logger_orphans_mutex_); + tls_logger_orphans_.emplace_front(std::move(tls_logger)); + orphan = tls_logger_orphans_.begin(); + } + + // Only remove the TlsLogger from the registry after adding to orphans so + // CollectTlsLoggerStats doesn't have any gaps in coverage. + { + std::unique_lock lock(tls_loggers_registerd_mutex_); + tls_loggers_registerd_.erase(orphan->get()); + } + + // This will flush the logs of |tls_logger| and mark it for destruction. + // Deferring destruction via orphans_to_destroy helps avoid use-after-frees + // when the IOThread calls FinishReadingEntries. + (*orphan)->Log([this, orphan](AsyncLog&) { + CollectTlsLoggerStats(orphan->get()); + orphans_to_destroy_.push_back(orphan); + }); +} + +void Logger::CollectTlsLoggerStats(TlsLogger* tls_logger) { + tls_total_log_cas_fail_count_ += tls_logger->ReportLogCasFailCount(); + tls_total_swap_buffers_slot_retry_count_ += + tls_logger->ReportSwapBuffersSlotRetryCount(); + + size_t max_entry_vector_size = tls_logger->MaxEntryVectorSize(); + if (max_entry_vector_size > kTlsLogReservedEntryCount) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream msg; + msg << "Logging allocation detected:" + << " tid: " << tls_logger->Tid() + << " reserved_entries: " << kTlsLogReservedEntryCount + << " max_entries: " << max_entry_vector_size; + MLPERF_LOG_WARNING((*this), "warning_generic_message", msg.str()); +#else + async_logger_.FlagWarning(); + async_logger_.LogDetail("Logging allocation detected: ", "tid", + tls_logger->Tid(), "reserved_entries", + kTlsLogReservedEntryCount, "max_entries", + max_entry_vector_size); +#endif + } +} + +void Logger::StartIOThread() { + { + std::unique_lock lock(io_thread_mutex_); + keep_io_thread_alive_ = true; + } + io_thread_ = std::thread(&Logger::IOThread, this); +} + +void Logger::StopIOThread() { + { + std::unique_lock lock(io_thread_mutex_); + keep_io_thread_alive_ = false; + io_thread_cv_.notify_all(); + } + io_thread_.join(); +} + +void Logger::StartLogging(std::ostream* summary, std::ostream* detail, + std::ostream* accuracy, bool copy_detail_to_stdout, + bool copy_summary_to_stdout) { + async_logger_.SetLogFiles(summary, detail, accuracy, copy_detail_to_stdout, + copy_summary_to_stdout, PerfClock::now()); +} + +void Logger::StopLogging() { + if (std::this_thread::get_id() == io_thread_.get_id()) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR_SYNC((*this), "error_runtime", + "StopLogging() not supported from IO thread."); +#else + LogErrorSync("StopLogging() not supported from IO thread."); +#endif + return; + } + + // Flush logs from this thread. + std::promise io_thread_flushed_this_thread; + Log([&](AsyncLog&) { io_thread_flushed_this_thread.set_value(); }); + io_thread_flushed_this_thread.get_future().wait(); + async_logger_.SetLogFiles(&std::cerr, &std::cerr, &std::cerr, false, false, + PerfClock::now()); +} + +void Logger::StartNewTrace(std::ostream* trace_out, + PerfClock::time_point origin) { + async_logger_.StartNewTrace(trace_out, origin); +} + +void Logger::StopTracing() { + // Flush traces from this thread. + std::promise io_thread_flushed_this_thread; + Log([&](AsyncLog&) { io_thread_flushed_this_thread.set_value(); }); + io_thread_flushed_this_thread.get_future().wait(); + async_logger_.StopTrace(); +} + +void Logger::LogContentionAndAllocations() { + LogDetail([&](AsyncDetail& detail) { + { + std::unique_lock lock(tls_loggers_registerd_mutex_); + for (auto tls_logger : tls_loggers_registerd_) { + CollectTlsLoggerStats(tls_logger); + } + } + + { + std::unique_lock lock(tls_logger_orphans_mutex_); + for (auto& orphan : tls_logger_orphans_) { + CollectTlsLoggerStats(orphan.get()); + } + } + +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "logger_swap_request_slots_retry_count", + swap_request_slots_retry_count_); + MLPERF_LOG(detail, "logger_swap_request_slots_retry_retry_count", + swap_request_slots_retry_retry_count_); + MLPERF_LOG(detail, "logger_swap_request_slots_retry_reencounter_count", + swap_request_slots_retry_reencounter_count_); + MLPERF_LOG(detail, "logger_start_reading_entries_retry_count", + start_reading_entries_retry_count_); + MLPERF_LOG(detail, "logger_tls_total_log_cas_fail_count", + tls_total_log_cas_fail_count_); + MLPERF_LOG(detail, "logger_tls_total_swap_buffers_slot_retry_count", + tls_total_swap_buffers_slot_retry_count_); +#else + detail("Log Contention Counters:"); + detail(std::to_string(swap_request_slots_retry_count_) + + " : swap_request_slots_retry_count"); + detail(std::to_string(swap_request_slots_retry_retry_count_) + + " : swap_request_slots_retry_retry_count"); + detail(std::to_string(swap_request_slots_retry_reencounter_count_) + + " : swap_request_slots_retry_reencounter_count"); + detail(std::to_string(start_reading_entries_retry_count_) + + " : start_reading_entries_retry_count"); + detail(std::to_string(tls_total_log_cas_fail_count_) + + " : tls_total_log_cas_fail_count"); + detail(std::to_string(tls_total_swap_buffers_slot_retry_count_) + + " : tls_total_swap_buffers_slot_retry_count"); +#endif + + swap_request_slots_retry_count_ = 0; + swap_request_slots_retry_retry_count_ = 0; + swap_request_slots_retry_reencounter_count_ = 0; + start_reading_entries_retry_count_ = 0; + tls_total_log_cas_fail_count_ = 0; + tls_total_swap_buffers_slot_retry_count_ = 0; + }); +} + +void Logger::RestartLatencyRecording(uint64_t first_sample_sequence_id, + size_t latencies_to_reserve) { + async_logger_.RestartLatencyRecording(first_sample_sequence_id, + latencies_to_reserve); +} + +std::vector Logger::GetLatenciesBlocking( + size_t expected_count) { + return async_logger_.GetLatenciesBlocking(expected_count); +} + +PerfClock::time_point Logger::GetMaxCompletionTime() { + return async_logger_.GetMaxCompletionTime(); +} + +QuerySampleLatency Logger::GetMaxLatencySoFar() { + return async_logger_.GetMaxLatencySoFar(); +} + +TlsLogger* Logger::GetTlsLoggerThatRequestedSwap(size_t slot, size_t next_id) { + uintptr_t slot_value = thread_swap_request_slots_[slot].load(); + if (SwapRequestSlotIsReadable(slot_value)) { + // TODO: Convert this block to a simple write once we are confidient + // that we don't need to check for success. + bool success = thread_swap_request_slots_[slot].compare_exchange_strong( + slot_value, SwapRequestSlotIsWritableValue(next_id)); + if (!success) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_WARNING((*this), "warning_generic_message", "CAS failed."); +#else + LogErrorSync("CAS failed.", "line", __LINE__); +#endif + assert(success); + } + return reinterpret_cast(slot_value); + } + return nullptr; +} + +void Logger::GatherRetrySwapRequests(std::vector* threads_to_swap) { + if (swap_request_slots_to_retry_.empty()) { + return; + } + + std::vector retry_slots; + retry_slots.swap(swap_request_slots_to_retry_); + for (auto& slot_retry : retry_slots) { + TlsLogger* tls_logger = + GetTlsLoggerThatRequestedSwap(slot_retry.slot, slot_retry.next_id); + if (tls_logger) { + threads_to_swap->push_back(tls_logger); + } else { + swap_request_slots_to_retry_.push_back(slot_retry); + swap_request_slots_retry_retry_count_++; + } + } +} + +void Logger::GatherNewSwapRequests(std::vector* threads_to_swap) { + auto swap_request_end = swap_request_id_.load(std::memory_order_acquire); + for (; swap_request_id_read_ < swap_request_end; swap_request_id_read_++) { + size_t slot = swap_request_id_read_ % thread_swap_request_slots_.size(); + size_t next_id = swap_request_id_read_ + thread_swap_request_slots_.size(); + TlsLogger* tls_logger = GetTlsLoggerThatRequestedSwap(slot, next_id); + if (tls_logger) { + threads_to_swap->push_back(tls_logger); + } else { + swap_request_slots_retry_count_++; + // A thread is in the middle of its call to RequestSwapBuffers. + // Retry later once it's done. + auto it = std::find_if(swap_request_slots_to_retry_.begin(), + swap_request_slots_to_retry_.end(), + [=](SlotRetry& s) { return s.slot == slot; }); + if (it == swap_request_slots_to_retry_.end()) { + // This is the first time we are retrying the slot. + swap_request_slots_to_retry_.push_back({slot, next_id}); + } else { + // Whoa. We've been retrying this slot since the last time it was + // encountered. Just update the next_id. + it->next_id = next_id; + swap_request_slots_retry_reencounter_count_++; + } + } + } +} + +void Logger::IOThread() { + while (keep_io_thread_alive_) { + auto tracer1 = + MakeScopedTracer([](AsyncTrace& trace) { trace("IOThreadLoop"); }); + { + auto tracer2 = MakeScopedTracer([](AsyncTrace& trace) { trace("Wait"); }); + std::unique_lock lock(io_thread_mutex_); + io_thread_cv_.wait_for(lock, poll_period_, + [&] { return !keep_io_thread_alive_; }); + } + + { + auto tracer3 = + MakeScopedTracer([](AsyncTrace& trace) { trace("Gather"); }); + std::vector threads_to_swap; + threads_to_swap.swap(threads_to_swap_deferred_); + GatherRetrySwapRequests(&threads_to_swap); + GatherNewSwapRequests(&threads_to_swap); + for (TlsLogger* thread : threads_to_swap) { + if (thread->ReadBufferHasBeenConsumed()) { + thread->SwapBuffers(); + // After swapping a thread, it's ready to be read. + threads_to_read_.push_back(thread); + } else { + // Don't swap buffers again until we've finish reading the + // previous swap. + threads_to_swap_deferred_.push_back(thread); + } + } + } + + { + auto tracer4 = + MakeScopedTracer([](AsyncTrace& trace) { trace("Process"); }); + // Read from the threads we are confident have activity. + for (std::vector::iterator thread = threads_to_read_.begin(); + thread != threads_to_read_.end(); thread++) { + auto tracer5 = + MakeScopedTracer([tid = (*thread)->Tid()](AsyncTrace& trace) { + trace("Thread", "tid", tid); + }); + std::vector* entries = (*thread)->StartReadingEntries(); + if (!entries) { + start_reading_entries_retry_count_++; + continue; + } + + async_logger_.SetCurrentPidTid((*thread)->Pid(), (*thread)->Tid()); + for (auto& entry : *entries) { + // Execute the entry to perform the serialization and I/O. + entry(async_logger_); + } + (*thread)->FinishReadingEntries(); + // Mark for removal by the call to RemoveValue below. + *thread = nullptr; + } + + // Only remove threads where reading succeeded so we retry the failed + // threads the next time around. + RemoveValue(&threads_to_read_, nullptr); + } + + // Explicitly flush every time we wake up. The goal being minimization + // of large implicit flushes which could affect tail latency measurements, + // especially at percentiles closer to 100%. + /// \todo Determine if explicitly flushing logs every wake up is better + /// than relying on implicit flushing. + { + auto tracer6 = + MakeScopedTracer([](AsyncTrace& trace) { trace("FlushAll"); }); + async_logger_.Flush(); + } + + if (!orphans_to_destroy_.empty()) { + auto tracer7 = MakeScopedTracer( + [](AsyncTrace& trace) { trace("Abandoning Orphans"); }); + std::unique_lock lock(tls_logger_orphans_mutex_); + for (auto orphan : orphans_to_destroy_) { + tls_logger_orphans_.erase(orphan); + } + orphans_to_destroy_.clear(); + } + } +} + +TlsLogger::TlsLogger(std::function forced_detatch) + : pid_(MLPERF_GET_PID()), + tid_(MLPERF_GET_TID()), + forced_detatch_(std::move(forced_detatch)) { + for (auto& entry : entries_) { + entry.reserve(kTlsLogReservedEntryCount); + } +} + +TlsLogger::~TlsLogger() {} + +// Log always makes forward progress since it can unconditionally obtain a +// "lock" on at least one of the buffers for writing. +// Notificiation is also lock free. +void TlsLogger::Log(AsyncLogEntry&& entry) { + size_t cas_fail_count = 0; + auto unlocked = EntryState::Unlocked; + size_t i_write = i_write_.load(std::memory_order_relaxed); + while (!entry_states_[i_write].compare_exchange_strong( + unlocked, EntryState::WriteLock, std::memory_order_acquire, + std::memory_order_relaxed)) { + unlocked = EntryState::Unlocked; + i_write ^= 1; + // We may need to try 3 times, since there could be a race with a + // previous SwapBuffers request and we use memory_order_relaxed when + // loading i_write_ above. + cas_fail_count++; + if (cas_fail_count >= 3) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_WARNING(GlobalLogger(), "warning_generic_message", + "CAS failed."); +#else + GlobalLogger().LogErrorSync("CAS failed.", "times", cas_fail_count, + "line", __LINE__); +#endif + } + log_cas_fail_count_.fetch_add(1, std::memory_order_relaxed); + } + entries_[i_write].emplace_back(std::forward(entry)); + + // TODO: Convert this block to a simple write once we are confidient + // that we don't need to check for success. + auto write_lock = EntryState::WriteLock; + bool success = entry_states_[i_write].compare_exchange_strong( + write_lock, EntryState::Unlocked, std::memory_order_release); + if (!success) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_WARNING(GlobalLogger(), "warning_generic_message", + "CAS failed."); +#else + GlobalLogger().LogErrorSync("CAS failed.", "line", __LINE__); +#endif + assert(success); + } + + bool write_buffer_swapped = i_write_prev_ != i_write; + if (write_buffer_swapped) { + GlobalLogger().RequestSwapBuffers(this); + i_write_prev_ = i_write; + } +} + +void TlsLogger::SwapBuffers() { + // TODO: Convert this block to a simple write once we are confidient + // that we don't need to check for success. + auto read_lock = EntryState::ReadLock; + bool success = entry_states_[i_read_].compare_exchange_strong( + read_lock, EntryState::Unlocked, std::memory_order_release); + if (!success) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_WARNING(GlobalLogger(), "warning_generic_message", + "CAS failed."); +#else + GlobalLogger().LogErrorSync("CAS failed.", "line", __LINE__); +#endif + assert(success); + } + + i_write_.store(i_read_, std::memory_order_relaxed); + i_read_ ^= 1; + unread_swaps_++; +} + +// Returns nullptr if read lock fails. +std::vector* TlsLogger::StartReadingEntries() { + auto unlocked = EntryState::Unlocked; + if (entry_states_[i_read_].compare_exchange_strong( + unlocked, EntryState::ReadLock, std::memory_order_acquire, + std::memory_order_relaxed)) { + return &entries_[i_read_]; + } + return nullptr; +} + +void TlsLogger::FinishReadingEntries() { + // Detect first logging allocation and track max allocated size. + size_t new_size = entries_[i_read_].size(); + if (new_size > max_entry_size_) { + if (max_entry_size_ == kTlsLogReservedEntryCount) { + Log([ts = PerfClock::now()](AsyncLog& log) { + log.TraceAsyncInstant("FirstAllocation", 0, ts); + }); + } + max_entry_size_ = new_size; + } + + entries_[i_read_].clear(); + unread_swaps_--; +} + +bool TlsLogger::ReadBufferHasBeenConsumed() { return unread_swaps_ == 0; } + +void TlsLogger::TraceCounters() { + auto tracer = MakeScopedTracer( + [lcfc = log_cas_fail_count_.load(std::memory_order_relaxed), + sbsrc = swap_buffers_slot_retry_count_.load(std::memory_order_relaxed)]( + AsyncTrace& trace) { + trace("TlsLogger:ContentionCounters", "log_cas_fail_count", lcfc, + "swap_buffers_slot_retry_count", sbsrc); + }); +} + +Logger& GlobalLogger() { + static Logger g_logger(kLogPollPeriod, kMaxThreadsToLog); + return g_logger; +} + +/// \brief Moves ownership of the TlsLogger to Logger on thread exit +/// so no round-trip synchronization with the IO thread is required. +struct TlsLoggerWrapper { + TlsLoggerWrapper(std::function forced_detatch) + : tls_logger(std::make_unique(std::move(forced_detatch))) { + GlobalLogger().RegisterTlsLogger(tls_logger.get()); + } + ~TlsLoggerWrapper() { + tls_logger->TraceCounters(); + GlobalLogger().UnRegisterTlsLogger(std::move(tls_logger)); + } + std::unique_ptr tls_logger; +}; + +TlsLoggerWrapper* InitializeMyTlsLoggerWrapper() { + thread_local std::unique_ptr tls_logger_wrapper; + // forced_detatch lets the global Logger forcefully detatch TlsLoggers + // from the thread in the Logger's destructor, which may run before + // thread-local variables are destroyed when the loadgen is used as a python + // module and dynamically unloaded. + // Note: We capture a pointer to the tls_logger_wrapper since variables of + // the thread-local storage class aren't actually captured. C++ spec says + // only variables of the automatic storage class are captured. + /// \todo There is a race where the same TlsLoggerWrapper might be + /// destroyed both naturally and via forced_detatch. Destruction of + /// the TlsLoggerWrapper should be locked. + auto forced_detatch = [tls_logger_wrapper = &tls_logger_wrapper]() { + tls_logger_wrapper->reset(); + }; + tls_logger_wrapper = std::make_unique(forced_detatch); + return tls_logger_wrapper.get(); +} + +TlsLogger* InitializeMyTlsLogger() { + thread_local TlsLoggerWrapper* wrapper = InitializeMyTlsLoggerWrapper(); + return wrapper->tls_logger.get(); +} + +void Log(AsyncLogEntry&& entry) { + thread_local TlsLogger* const tls_logger = InitializeMyTlsLogger(); + tls_logger->Log(std::forward(entry)); +} + +} // namespace logging +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/logging.h b/benchmarks/rnnt/ootb/inference/loadgen/logging.h new file mode 100644 index 0000000..23b70f1 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/logging.h @@ -0,0 +1,808 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Internal logging implementation details. + +#ifndef MLPERF_LOADGEN_LOGGING_H_ +#define MLPERF_LOADGEN_LOGGING_H_ + +#define USE_NEW_LOGGING_FORMAT 1 +#define MLPERF_LOG(logger, key, value) \ + logger.Log((key), (value), __FILE__, __LINE__) +#define MLPERF_LOG_ERROR(logger, key, value) \ + logger.LogError((key), (value), __FILE__, __LINE__) +#define MLPERF_LOG_ERROR_SYNC(logger, key, value) \ + logger.LogErrorSync((key), (value), __FILE__, __LINE__) +#define MLPERF_LOG_WARNING(logger, key, value) \ + logger.LogWarning((key), (value), __FILE__, __LINE__) +#define MLPERF_LOG_INTERVAL_START(logger, key, value) \ + logger.LogIntervalStart((key), (value), __FILE__, __LINE__) +#define MLPERF_LOG_INTERVAL_END(logger, key, value) \ + logger.LogIntervalEnd((key), (value), __FILE__, __LINE__) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "query_sample.h" + +namespace mlperf { + +/// \brief Wait-free logging utilities that defer stringification +/// and syscalls to a worker thread. +namespace logging { + +class AsyncLog; +class Logger; +class TlsLogger; +struct TlsLoggerWrapper; + +/// \todo Verify lambas are not allocating when bounded to a std::function. +using AsyncLogEntry = std::function; +using PerfClock = std::chrono::high_resolution_clock; + +/// \brief Logs the raw bytes as a hexadecimal ascii string. +struct LogBinaryAsHexString { + std::vector* data; +}; + +/// \brief By default, print out the value directly. +template +const T& ArgValueTransform(const T& value) { + return value; +} + +/// \brief Print out True/False. +const std::string& ArgValueTransform(const bool& value); +/// \brief Print out binary day as hex string. +const std::string ArgValueTransform(const LogBinaryAsHexString& value); +#if USE_NEW_LOGGING_FORMAT +/// \brief Print out a string in JSON format (with quotes). +const std::string ArgValueTransform(const std::string& value); +const std::string ArgValueTransform(const char* value); +/// \brief Prints a list of int in JSON format. +const std::string ArgValueTransform(const std::vector& value); +/// \brief Prints a dict in JSON format. +const std::string ArgValueTransform( + const std::map& value); +#endif + +/// \brief Helper to print out values without quotes when value is a string. +template +const T& ArgValueTransformWithoutQuote(const T& value) { + return ArgValueTransform(value); +} +inline const std::string ArgValueTransformWithoutQuote( + const LogBinaryAsHexString& value) { + return ArgValueTransform(value); +} +/// \brief Helper to print out a string without the quotes. +inline const std::string ArgValueTransformWithoutQuote( + const std::string& value) { + return value; +} + +/// \brief Outputs a trace that can be uploaded to chrome://tracing for +/// visualization. +/// \details Trace event format definition: +/// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit?usp=sharing +class ChromeTracer { + public: + ChromeTracer(std::ostream* trace_out, PerfClock::time_point origin); + ~ChromeTracer(); + + template + void AddCompleteEvent(const std::string& name, uint64_t pid, uint64_t tid, + PerfClock::time_point start, PerfClock::time_point end, + const Args... args) { + *out_ << "{\"name\":\"" << name << "\"," + << "\"ph\":\"X\"," + << "\"pid\":" << pid << "," + << "\"tid\":" << tid << "," + << "\"ts\":" << Micros(start - origin_).count() << "," + << "\"dur\":" << Micros(end - start).count() << "," + << "\"args\":{"; + AddArgs(args...); + *out_ << "}},\n"; + } + + template + void AddAsyncBeginEvent(const std::string& name, uint64_t pid, uint64_t id, + PerfClock::time_point time, const Args... args) { + *out_ << "{\"name\":\"" << name << "\"," + << "\"cat\":\"default\"," + << "\"ph\":\"b\"," + << "\"pid\":" << pid << "," + << "\"id\":" << id << "," + << "\"ts\":" << Micros(time - origin_).count() << "," + << "\"args\":{"; + AddArgs(args...); + *out_ << "}},\n"; + } + + template + void AddAsyncInstantEvent(const std::string& name, uint64_t pid, uint64_t id, + PerfClock::time_point time, const Args... args) { + *out_ << "{\"name\":\"" << name << "\"," + << "\"cat\":\"default\"," + << "\"ph\":\"n\"," + << "\"pid\":" << pid << "," + << "\"id\":" << id << "," + << "\"ts\":" << Micros(time - origin_).count() << "," + << "\"args\":{"; + AddArgs(args...); + *out_ << "}},\n"; + } + + template + void AddAsyncEndEvent(const std::string& name, uint64_t pid, uint64_t id, + PerfClock::time_point time) { + *out_ << "{\"name\":\"" << name << "\"," + << "\"cat\":\"default\"," + << "\"ph\":\"e\", " + << "\"pid\":" << pid << "," + << "\"id\":" << id << "," + << "\"ts\":" << Micros(time - origin_).count() << "},\n"; + } + + template + void AddCounterEvent(const std::string& name, uint64_t pid, + PerfClock::time_point time, const Args... args) { + *out_ << "{\"name\":\"" << name << "\"," + << "\"ph\": \"C\"," + << "\"pid\":" << pid << "," + << "\"ts\":" << Micros(time - origin_).count() << "," + << "\"args\":{ "; + AddArgs(args...); + *out_ << "}},\n"; + } + + void Flush() { out_->flush(); } + + private: + using Micros = std::chrono::duration; + + void WriteTraceEventHeader(); + void WriteTraceEventFooter(); + + void AddArgs() {} + + template + void AddArgs(const std::string& arg_name, const T& arg_value) { + *out_ << "\"" << arg_name << "\":" << ArgValueTransform(arg_value); + } + + template + void AddArgs(const std::string& arg_name, const T& arg_value, + const Args... args) { + *out_ << "\"" << arg_name << "\":" << ArgValueTransform(arg_value) << ","; + AddArgs(args...); + } + + std::ostream* out_; + PerfClock::time_point origin_; +}; + +/// \brief The proxy all logging lambdas ultimately use to write any log type. +/// \details Passed as an argument to the log lambda on the +/// recording thread to serialize the data captured by the lambda and +/// forward it to the output stream. +/// \todo Make summary_out_, detail_out_, accuracy_out_, and trace_out_ +/// instances of a new LogOutput interface that the client may override. +class AsyncLog { + public: + void SetLogFiles(std::ostream* summary, std::ostream* detail, + std::ostream* accuracy, bool copy_detail_to_stdout, + bool copy_summary_to_stdout, + PerfClock::time_point log_origin); + void StartNewTrace(std::ostream* trace_out, PerfClock::time_point origin); + void StopTrace(); + void Flush(); + + void SetCurrentPidTid(uint64_t pid, uint64_t tid); + + void LogAccuracy(uint64_t seq_id, const QuerySampleIndex qsl_idx, + const LogBinaryAsHexString& response); + + template + void LogSummary(const std::string& message, const Args... args); + + void SetLogDetailTime(PerfClock::time_point time) { log_detail_time_ = time; } + + void FlagError() { + std::unique_lock lock(log_mutex_); + log_error_count_++; + error_flagged_ = true; + } + + void FlagWarning() { + std::unique_lock lock(log_mutex_); + log_warning_count_++; + warning_flagged_ = true; + } + +#if USE_NEW_LOGGING_FORMAT + template + void LogDetail(const std::string& key, const T& value, + const std::string file_name, const unsigned int line_no); +#else + template + void LogDetail(const std::string& message, const Args... args); +#endif + + template + void Trace(const std::string& trace_name, PerfClock::time_point start, + PerfClock::time_point end, const Args... args) { + std::unique_lock lock(trace_mutex_); + if (tracer_) { + tracer_->AddCompleteEvent(trace_name, current_pid_, current_tid_, start, + end, args...); + } + } + + template + void TraceAsyncInstant(const std::string& trace_name, uint64_t id, + PerfClock::time_point instant_time, + const Args... args) { + std::unique_lock lock(trace_mutex_); + if (tracer_) { + tracer_->AddAsyncInstantEvent(trace_name, current_pid_, id, instant_time, + args...); + } + } + + void SetScopedTraceTimes(PerfClock::time_point start, + PerfClock::time_point end) { + scoped_start_ = start; + scoped_end_ = end; + } + + template + void ScopedTrace(const std::string& trace_name, const Args... args) { + std::unique_lock lock(trace_mutex_); + if (tracer_) { + tracer_->AddCompleteEvent(trace_name, current_pid_, current_tid_, + scoped_start_, scoped_end_, args...); + } + } + + template + void TraceSample(const std::string& trace_name, uint64_t id, + PerfClock::time_point start, PerfClock::time_point end, + const Args... args) { + std::unique_lock lock(trace_mutex_); + if (tracer_) { + tracer_->AddAsyncBeginEvent(trace_name, current_pid_, id, start, args...); + tracer_->AddAsyncEndEvent(trace_name, current_pid_, id, end); + } + } + + template + void TraceCounterEvent(const std::string& trace_name, + PerfClock::time_point time, const Args... args) { + std::unique_lock lock(trace_mutex_); + if (tracer_) { + tracer_->AddCounterEvent(trace_name, current_pid_, time, args...); + } + } + + void RestartLatencyRecording(uint64_t first_sample_sequence_id, + size_t latencies_to_reserve); + void RecordSampleCompletion(uint64_t sample_sequence_id, + PerfClock::time_point completion_time, + QuerySampleLatency latency); + std::vector GetLatenciesBlocking(size_t expected_count); + PerfClock::time_point GetMaxCompletionTime(); + QuerySampleLatency GetMaxLatencySoFar(); + + private: + void WriteAccuracyHeaderLocked(); + void WriteAccuracyFooterLocked(); + + void LogArgs(std::ostream*) {} + + template + void LogArgs(std::ostream* out, const T& value_only) { + *out << ArgValueTransformWithoutQuote(value_only); + } + + template + void LogArgs(std::ostream* out, const std::string& arg_name, + const T& arg_value) { + *out << "\"" << arg_name + << "\" : " << ArgValueTransformWithoutQuote(arg_value); + } + + template + void LogArgs(std::ostream* out, const std::string& arg_name, + const T& arg_value, const Args... args) { + *out << "\"" << arg_name + << "\" : " << ArgValueTransformWithoutQuote(arg_value) << ", "; + LogArgs(out, args...); + } + + std::mutex log_mutex_; + std::ostream* summary_out_ = &std::cerr; + std::ostream* detail_out_ = &std::cerr; + std::ostream* accuracy_out_ = &std::cerr; + // TODO: Instead of these bools, use a class that forwards to two streams. + bool copy_detail_to_stdout_ = false; + bool copy_summary_to_stdout_ = false; + bool accuracy_needs_comma_ = false; + PerfClock::time_point log_origin_; + size_t log_error_count_ = 0; + bool error_flagged_ = false; + size_t log_warning_count_ = 0; + bool warning_flagged_ = false; + + std::mutex trace_mutex_; + std::unique_ptr tracer_; + + uint64_t current_pid_; + uint64_t current_tid_; + PerfClock::time_point log_detail_time_; + PerfClock::time_point scoped_start_; + PerfClock::time_point scoped_end_; + + std::mutex latencies_mutex_; + std::condition_variable all_latencies_recorded_; + uint64_t latencies_first_sample_sequence_id_ = 0; + std::vector latencies_; + QuerySampleLatency max_latency_ = 0; + PerfClock::time_point max_completion_timstamp_; + size_t latencies_recorded_ = 0; + size_t latencies_expected_ = 0; + // Must be called with latencies_mutex_ held. + bool AllLatenciesRecorded() { + return latencies_recorded_ == latencies_expected_; + } +}; + +/// \brief The central logger that logs all threads belonging to a run. +class Logger { + public: + Logger(std::chrono::duration poll_period, size_t max_threads_to_log); + ~Logger(); + + void StartIOThread(); + void StopIOThread(); + + void StartLogging(std::ostream* summary, std::ostream* detail, + std::ostream* accuracy, bool copy_detail_to_stdout, + bool copy_summary_to_stdout); + void StopLogging(); + + void StartNewTrace(std::ostream* trace_out, PerfClock::time_point origin); + void StopTracing(); + + void LogContentionAndAllocations(); + + void RestartLatencyRecording(uint64_t first_sample_sequence_id, + size_t latencies_to_reserve); + std::vector GetLatenciesBlocking(size_t expected_count); + PerfClock::time_point GetMaxCompletionTime(); + QuerySampleLatency GetMaxLatencySoFar(); + + private: + friend AsyncLog; + friend TlsLogger; + friend TlsLoggerWrapper; + + void RegisterTlsLogger(TlsLogger* tls_logger); + void UnRegisterTlsLogger(std::unique_ptr tls_logger); + void RequestSwapBuffers(TlsLogger* tls_logger); + void CollectTlsLoggerStats(TlsLogger* tls_logger); + + TlsLogger* GetTlsLoggerThatRequestedSwap(size_t slot, size_t next_id); + void GatherRetrySwapRequests(std::vector* threads_to_swap); + void GatherNewSwapRequests(std::vector* threads_to_swap); + + /// \brief The main logging thread function that handles the serialization + /// and I/O to the stream or file. + /// + /// \todo Provide client hook to set logging thead affinity and priority. + void IOThread(); + +// Slow synchronous error logging for internals that may prevent +// async logging from working. +#if USE_NEW_LOGGING_FORMAT + template + void LogErrorSync(const std::string& key, const T& value, + const std::string file_name, const unsigned int line_no) { + /// \todo Acquire mutex once for FlagError + LogDetail to avoid + /// races. Better yet, switch to a non-stateful error API. + // This is better than nothing though. + async_logger_.FlagError(); + async_logger_.LogDetail(key, value, file_name, line_no); + } + template + void LogWarning(const std::string& key, const T& value, + const std::string file_name, const unsigned int line_no) { + async_logger_.FlagWarning(); + async_logger_.LogDetail(key, value, file_name, line_no); + } +#else + template + void LogErrorSync(const std::string& message, Args&&... args) { + /// \todo Acquire mutex once for FlagError + LogDetail to avoid + /// races. Better yet, switch to a non-stateful error API. + // This is better than nothing though. + async_logger_.FlagError(); + async_logger_.LogDetail(message, std::forward(args)...); + } +#endif + + // Accessed by IOThead only. + const std::chrono::duration poll_period_; + AsyncLog async_logger_; + + const size_t max_threads_to_log_; + std::thread io_thread_; + + // Accessed by producers and IOThead during thread registration and + // destruction. Protected by io_thread_mutex_. + std::mutex io_thread_mutex_; + std::condition_variable io_thread_cv_; + bool keep_io_thread_alive_ = false; + + std::mutex tls_loggers_registerd_mutex_; + std::unordered_set tls_loggers_registerd_; + + // Temporarily stores TlsLogger data for threads that have exited until + // all their log entries have been processed. + // Accessed by IOThread and producers as their threads exit. + std::mutex tls_logger_orphans_mutex_; + using OrphanContainer = std::list>; + OrphanContainer tls_logger_orphans_; + + // Accessed by producers and IOThead atomically. + std::atomic swap_request_id_{0}; + std::vector> thread_swap_request_slots_; + + // Accessed by IOThead only. + size_t swap_request_id_read_{0}; + struct SlotRetry { + size_t slot; + uintptr_t next_id; + }; + std::vector swap_request_slots_to_retry_; + std::vector threads_to_swap_deferred_; + std::vector threads_to_read_; + std::vector orphans_to_destroy_; + + // Counts for retries related to the lock-free scheme. + // Abnormally high counts could be an indicator of contention. + // Access on IOThread only. + size_t swap_request_slots_retry_count_ = 0; + size_t swap_request_slots_retry_retry_count_ = 0; + size_t swap_request_slots_retry_reencounter_count_ = 0; + size_t start_reading_entries_retry_count_ = 0; + size_t tls_total_log_cas_fail_count_ = 0; + size_t tls_total_swap_buffers_slot_retry_count_ = 0; +}; + +Logger& GlobalLogger(); + +/// \brief The generic way to add a log entry. +/// \details Supports all types of logs, which is useful for complex +/// lambdas that may wish to log in multiple places or log something other +/// than a simple summary, detail, or trace entry. +void Log(AsyncLogEntry&& entry); + +/// \brief The convenience proxy a LogSummary lambda uses to write to the +/// summary log. +class AsyncSummary { + public: + explicit AsyncSummary(AsyncLog& async_log) : async_log_(async_log) {} + AsyncLog& async_log() { return async_log_; } + + template + AsyncLog& operator()(Args&&... args) { + async_log_.LogSummary(std::forward(args)...); + return async_log_; + } + + private: + AsyncLog& async_log_; +}; + +/// \brief A helper to simplify adding a summary log entry. +template +void LogSummary(LambdaT&& lambda) { + Log([lambda = std::forward(lambda)](AsyncLog& log) mutable { + AsyncSummary async_summary(log); + lambda(async_summary); + }); +} + +/// \brief The convenience proxy a LogDetail lambda uses to write to the detail +/// log. +class AsyncDetail { + public: + explicit AsyncDetail(AsyncLog& async_log) : async_log_(async_log) {} + AsyncLog& async_log() { return async_log_; } + +#if USE_NEW_LOGGING_FORMAT + template + AsyncLog& Log(const std::string& key, const T& value, + const std::string file_name, const unsigned int line_no) { + async_log_.LogDetail(key, value, file_name, line_no); + return async_log_; + } + + template + AsyncLog& LogError(const std::string& key, const T& value, + const std::string file_name, const unsigned int line_no) { + async_log_.FlagError(); + async_log_.LogDetail(key, value, file_name, line_no); + return async_log_; + } + + template + AsyncLog& LogWarning(const std::string& key, const T& value, + const std::string file_name, + const unsigned int line_no) { + async_log_.FlagWarning(); + async_log_.LogDetail(key, value, file_name, line_no); + return async_log_; + } + + template + AsyncLog& LogIntervalStart(const std::string& key, const T& value, + const std::string file_name, + const unsigned int line_no) { + async_log_.LogDetail(key, value, file_name, line_no); + return async_log_; + } + + template + AsyncLog& LogIntervalEnd(const std::string& key, const T& value, + const std::string file_name, + const unsigned int line_no) { + async_log_.LogDetail(key, value, file_name, line_no); + return async_log_; + } +#else + template + AsyncLog& operator()(Args&&... args) { + async_log_.LogDetail(std::forward(args)...); + return async_log_; + } + + template + AsyncLog& Error(Args&&... args) { + async_log_.FlagError(); + async_log_.LogDetail(std::forward(args)...); + return async_log_; + } + + template + AsyncLog& Warning(Args&&... args) { + async_log_.FlagWarning(); + async_log_.LogDetail(std::forward(args)...); + return async_log_; + } +#endif + + private: + AsyncLog& async_log_; +}; + +/// \brief A helper to simplify adding a detail log entry. +template +void LogDetail(LambdaT&& lambda) { + Log([lambda = std::forward(lambda), + timestamp = PerfClock::now()](AsyncLog& log) mutable { + log.SetLogDetailTime(timestamp); + AsyncDetail async_detail(log); + lambda(async_detail); + }); +} + +/// \brief The convenience proxy a ScopedTracer lambda uses to write to the +/// detail log. +class AsyncTrace { + public: + explicit AsyncTrace(AsyncLog& async_log) : async_log_(async_log) {} + AsyncLog& async_log() { return async_log_; } + + template + AsyncLog& operator()(Args&&... args) { + async_log_.ScopedTrace(std::forward(args)...); + return async_log_; + } + + private: + AsyncLog& async_log_; +}; + +/// \brief ScopedTracer is an RAII object that traces the start and end +/// of its lifetime. +template +class ScopedTracer { + public: + ScopedTracer(LambdaT&& lambda) + : start_(PerfClock::now()), lambda_(std::forward(lambda)) {} + + ~ScopedTracer() { + Log([start = start_, lambda = std::move(lambda_), + end = PerfClock::now()](AsyncLog& log) { + log.SetScopedTraceTimes(start, end); + AsyncTrace async_trace(log); + lambda(async_trace); + }); + } + + private: + PerfClock::time_point start_; + LambdaT lambda_; +}; + +/// \brief Helper that creates a ScopeTracer with automatic type deduction. +/// \details Helps with automatic template type deduction, which has been +/// supported for functions for a long time. +/// C++17 will support deduction for classes, which will neutralize the utility +/// of a helper function like this. +/// \todo Determine which traces to keep for submission purposes. +template +auto MakeScopedTracer(LambdaT&& lambda) -> ScopedTracer { + return ScopedTracer(std::forward(lambda)); +} + +template +void AsyncLog::LogSummary(const std::string& message, const Args... args) { + auto tracer = MakeScopedTracer([message](AsyncTrace& trace) { + std::string sanitized_message = message; + std::replace(sanitized_message.begin(), sanitized_message.end(), '"', '\''); + std::replace(sanitized_message.begin(), sanitized_message.end(), '\n', ';'); + trace("LogSummary", "message", "\"" + sanitized_message + "\""); + }); + std::unique_lock lock(log_mutex_); + *summary_out_ << message; + LogArgs(summary_out_, args...); + *summary_out_ << "\n"; + + if (copy_summary_to_stdout_) { + std::cout << message; + LogArgs(&std::cout, args...); + std::cout << "\n"; + } +} + +#if USE_NEW_LOGGING_FORMAT +template +void AsyncLog::LogDetail(const std::string& key, const T& value, + const std::string file_name, + const unsigned int line_no) { + auto tracer = MakeScopedTracer([key](AsyncTrace& trace) { + std::string sanitized_key = key; + std::replace(sanitized_key.begin(), sanitized_key.end(), '"', '\''); + std::replace(sanitized_key.begin(), sanitized_key.end(), '\n', ';'); + trace("LogDetail", "key", "\"" + sanitized_key + "\""); + }); + std::unique_lock lock(log_mutex_); + std::vector detail_streams{detail_out_, &std::cout}; + if (!copy_detail_to_stdout_) { + detail_streams.pop_back(); + } + auto time_ns = (log_detail_time_ - log_origin_).count(); + for (auto os : detail_streams) { + *os << ":::MLLOG {" + << "\"key\": " << ArgValueTransform(key) << ", " + << "\"value\": " << ArgValueTransform(value) << ", " + << "\"time_ms\": " << ArgValueTransform(time_ns / 1000000ULL) << "." + << std::setfill('0') << std::setw(6) + << ArgValueTransform(time_ns % 1000000ULL) << ", " + << "\"namespace\": \"mlperf::logging\", " + << "\"event_type\": \"POINT_IN_TIME\", " + << "\"metadata\": {" + << "\"is_error\": " << ArgValueTransform(error_flagged_) << ", " + << "\"is_warning\": " << ArgValueTransform(warning_flagged_) << ", " + << "\"file\": \"" << file_name << "\", " + << "\"line_no\": " << ArgValueTransform(line_no) << ", " + << "\"pid\": " << ArgValueTransform(current_pid_) << ", " + << "\"tid\": " << ArgValueTransform(current_tid_) << "}}\n"; + if (error_flagged_) { + os->flush(); + } + } + error_flagged_ = false; + warning_flagged_ = false; +} +#else +template +void AsyncLog::LogDetail(const std::string& message, const Args... args) { + auto tracer = MakeScopedTracer([message](AsyncTrace& trace) { + std::string sanitized_message = message; + std::replace(sanitized_message.begin(), sanitized_message.end(), '"', '\''); + std::replace(sanitized_message.begin(), sanitized_message.end(), '\n', ';'); + trace("LogDetail", "message", "\"" + sanitized_message + "\""); + }); + std::unique_lock lock(log_mutex_); + std::vector detail_streams{detail_out_, &std::cout}; + if (!copy_detail_to_stdout_) { + detail_streams.pop_back(); + } + for (auto os : detail_streams) { + *os << "\"pid\": " << current_pid_ << ", " + << "\"tid\": " << current_tid_ << ", " + << "\"ts\": " << (log_detail_time_ - log_origin_).count() << "ns : "; + if (error_flagged_) { + *os << "ERROR : "; + } else if (warning_flagged_) { + *os << "WARNING : "; + } + *os << message; + LogArgs(os, args...); + *os << "\n"; + if (error_flagged_) { + os->flush(); + } + } + error_flagged_ = false; + warning_flagged_ = false; +} +#endif + +} // namespace logging + +// Export some things out of the logging namespace to simplify call sites. + +const auto GlobalLogger = logging::GlobalLogger; +const auto Log = logging::Log; + +using PerfClock = logging::PerfClock; + +using LogBinaryAsHexString = logging::LogBinaryAsHexString; + +using AsyncLog = logging::AsyncLog; + +using AsyncSummary = logging::AsyncSummary; +template +void LogSummary(LambdaT&& lambda) { + logging::LogSummary(std::forward(lambda)); +} + +using AsyncDetail = logging::AsyncDetail; +template +void LogDetail(LambdaT&& lambda) { + logging::LogDetail(std::forward(lambda)); +} + +using AsyncTrace = logging::AsyncTrace; + +template +using ScopedTracer = logging::ScopedTracer; + +template +auto MakeScopedTracer(LambdaT&& lambda) -> ScopedTracer { + return ScopedTracer(std::forward(lambda)); +} + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_LOGGING_H_ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/query_sample.h b/benchmarks/rnnt/ootb/inference/loadgen/query_sample.h new file mode 100644 index 0000000..9a0bb37 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/query_sample.h @@ -0,0 +1,61 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Defines the structs involved in issuing a query and responding to +/// a query. +/// \details These are broken out into their own files since they are exposed +/// as part of the C API and we want to avoid C clients including C++ code. + +#ifndef MLPERF_LOADGEN_QUERY_SAMPLE_H_ +#define MLPERF_LOADGEN_QUERY_SAMPLE_H_ + +#include +#include + +namespace mlperf { + +/// \addtogroup LoadgenAPI +/// @{ + +/// \brief Represents a unique identifier for a sample of an issued query. +/// \details As currently implemented, the id is a pointer to an internal +/// loadgen struct whose value will never be zero/null. +typedef uintptr_t ResponseId; +constexpr ResponseId kResponseIdReserved = 0; + +/// \brief An index into the QuerySampleLibrary corresponding to a +/// single sample. +typedef size_t QuerySampleIndex; + +/// \brief Represents the smallest unit of input inference can run on. +/// A query consists of one or more samples. +struct QuerySample { + ResponseId id; + QuerySampleIndex index; +}; + +/// \brief Represents a single response to QuerySample +struct QuerySampleResponse { + ResponseId id; + uintptr_t data; + size_t size; ///< Size in bytes. +}; + +/// \brief A latency in nanoseconds, as recorded by the loadgen. +typedef int64_t QuerySampleLatency; + +/// @} + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_QUERY_SAMPLE_H_ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/query_sample_library.h b/benchmarks/rnnt/ootb/inference/loadgen/query_sample_library.h new file mode 100644 index 0000000..85df96f --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/query_sample_library.h @@ -0,0 +1,75 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Defines the QuerySampleLibrary interface. + +#ifndef MLPERF_LOADGEN_QUERY_SAMPLE_LIBRARY_H +#define MLPERF_LOADGEN_QUERY_SAMPLE_LIBRARY_H + +#include +#include +#include + +#include "query_sample.h" + +namespace mlperf { + +/// \addtogroup LoadgenAPI +/// @{ + +/// \brief The interface a client implements to coordinate with the loadgen +/// which samples should be loaded. +class QuerySampleLibrary { + public: + virtual ~QuerySampleLibrary() {} + + /// \brief A human readable name for the model. + virtual const std::string& Name() const = 0; + + /// \brief Total number of samples in library. + virtual size_t TotalSampleCount() = 0; + + /// \brief The number of samples that are guaranteed to fit in RAM. + virtual size_t PerformanceSampleCount() = 0; + + /// \brief Loads the requested query samples into memory. + /// \details Paired with calls to UnloadSamplesFromRam. + /// In the MultiStream scenarios: + /// * Samples will appear more than once. + /// * SystemUnderTest::IssueQuery will only be called with a set of samples + /// that are neighbors in the vector of samples here, which helps + /// SUTs that need the queries to be contiguous. + /// In all other scenarios: + /// * A previously loaded sample will not be loaded again. + virtual void LoadSamplesToRam( + const std::vector& samples) = 0; + + /// \brief Unloads the requested query samples from memory. + /// \details In the MultiStream scenarios: + /// * Samples may be unloaded the same number of times they were loaded; + /// however, if the implementation de-dups loaded samples rather than + /// loading samples into contiguous memory, it may unload a sample the + /// first time they see it unloaded without a refcounting scheme, ignoring + /// subsequent unloads. A refcounting scheme would also work, but is not + /// a requirement. + /// In all other scenarios: + /// * A previously unloaded sample will not be unloaded again. + virtual void UnloadSamplesFromRam( + const std::vector& samples) = 0; +}; + +/// @} + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_QUERY_SAMPLE_LIBRARY_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/setup.py b/benchmarks/rnnt/ootb/inference/loadgen/setup.py new file mode 100644 index 0000000..a4a88d9 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/setup.py @@ -0,0 +1,79 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +## \file +# \brief MLPerf Inference LoadGen python module setup. +# \details Creates a module that python can import. +# All source files are compiled by python"s C++ toolchain without depending +# on a loadgen lib. +# +# This setup.py can be used stand-alone, without the use of an external +# build system. This will polute your source tree with output files +# and binaries. Use one of the gn build targets instead if you want +# to avoid poluting the source tree. + +from setuptools import Extension +from setuptools import setup +from version_generator import generate_loadgen_version_definitions + +generated_version_source_filename = "generated/version_generated.cc" +generate_loadgen_version_definitions(generated_version_source_filename, ".") + +public_headers = [ + "loadgen.h", + "query_sample.h", + "query_sample_library.h", + "system_under_test.h", + "test_settings.h", +] + +lib_headers = [ + "logging.h", + "test_settings_internal.h", + "trace_generator.h", + "utils.h", + "version.h", +] + +lib_sources = [ + "issue_query_controller.cc", + "loadgen.cc", + "logging.cc", + "test_settings_internal.cc", + "utils.cc", + "version.cc", +] + +lib_bindings = [ + "bindings/python_api.cc", +] + +mlperf_loadgen_headers = public_headers + lib_headers +mlperf_loadgen_sources_no_gen = lib_sources + lib_bindings +mlperf_loadgen_sources = (mlperf_loadgen_sources_no_gen + + [generated_version_source_filename]) + +mlperf_loadgen_module = Extension( + "mlperf_loadgen", + define_macros=[("MAJOR_VERSION", "1"), ("MINOR_VERSION", "1")], + include_dirs=[".", "../third_party/pybind/include"], + sources=mlperf_loadgen_sources, + depends=mlperf_loadgen_headers) + +setup(name="mlperf_loadgen", + version="1.1", + description="MLPerf Inference LoadGen python bindings", + url="https://mlperf.org", + ext_modules=[mlperf_loadgen_module]) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/system_under_test.h b/benchmarks/rnnt/ootb/inference/loadgen/system_under_test.h new file mode 100644 index 0000000..4b98e06 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/system_under_test.h @@ -0,0 +1,72 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Defines the SystemUnderTest interface. + +#ifndef MLPERF_LOADGEN_SYSTEM_UNDER_TEST_H +#define MLPERF_LOADGEN_SYSTEM_UNDER_TEST_H + +#include +#include + +#include "query_sample.h" + +namespace mlperf { + +/// \addtogroup LoadgenAPI +/// @{ + +/// \brief The interface a client implements for the loadgen to test. +/// \todo Add hook for an untimed warm up period for the SUT. +/// \todo Add hook for an untimed warm up period for the loadgen logic. +/// \todo Support power hooks for cool-down period before runing performance +/// traffic. +/// \todo Support power hooks for correlating test timeline with power +/// measurment timeline. +class SystemUnderTest { + public: + virtual ~SystemUnderTest() {} + + /// \brief A human-readable string for logging purposes. + virtual const std::string& Name() const = 0; + + /// \brief Lets the loadgen issue N samples to the SUT. + /// \details The SUT may either a) return immediately and signal completion + /// at a later time on another thread or b) it may block and signal + /// completion on the current stack. The load generator will handle both + /// cases properly. + /// Note: The data for neighboring samples may or may not be contiguous + /// depending on the scenario. + virtual void IssueQuery(const std::vector& samples) = 0; + + /// \brief Called immediately after the last call to IssueQuery + /// in a series is made. + /// \details This doesn't necessarily signify the end of the + /// test since there may be multiple series involved during a test; for + /// example in accuracy mode. + /// Clients can use this to flush any deferred queries immediately, rather + /// than waiting for some timeout. + /// This is especially useful in the server scenario. + virtual void FlushQueries() = 0; + + /// \brief Reports the raw latency results to the SUT of each sample issued as + /// recorded by the load generator. Units are nanoseconds. + virtual void ReportLatencyResults( + const std::vector& latencies_ns) = 0; +}; + +/// @} + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_SYSTEM_UNDER_TEST_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/test_settings.h b/benchmarks/rnnt/ootb/inference/loadgen/test_settings.h new file mode 100644 index 0000000..d656d1a --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/test_settings.h @@ -0,0 +1,352 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Provides ways for a client to change the behavior and +/// constraints of the load generator. +/// \details Note: The MLPerf specification takes precedent over any of the +/// comments in this file if there are inconsistencies in regards to how the +/// loadgen *should* work. +/// The comments in this file are indicative of the loadgen implementation. + +#ifndef MLPERF_LOADGEN_TEST_SETTINGS_H +#define MLPERF_LOADGEN_TEST_SETTINGS_H + +#include +#include + +namespace mlperf { + +/// \addtogroup LoadgenAPI +/// @{ + +/// \addtogroup LoadgenAPITestSettings Test Settings +/// \brief This page contains a description of all the scenarios, modes, +/// and log settings as implemented by the LoadGen. +/// @{ + +/// +/// \enum TestScenario +/// * **SingleStream** +/// + Issues queries containing a single sample. +/// + The next query is only issued once the previous one has completed. +/// + Internal LoadGen latency between queries is not included in the +/// latency results. +/// + **Final performance result is:** a percentile of the latency. +/// * **MultiStream** +/// + Attempts to issue queries containing N samples each at a uniform rate. +/// - N is specified by \link +/// mlperf::TestSettings::multi_stream_samples_per_query +/// multi_stream_samples_per_query \endlink. +/// - The rate is specified by \link +/// mlperf::TestSettings::multi_stream_target_qps multi_stream_target_qps +/// \endlink. +/// + The loadgen will skip sending for one interval if the SUT falls behind +/// too much. +/// + By default, only a single query may be outstanding at a time. +/// + The samples of each query are guaranteed to be contiguous with respect +/// to the order they were loaded in the QuerySampleLibrary. +/// + Latency is tracked and reported on a per-query and per-sample basis. +/// + The latency of a query is the maximum latency of its samples, including +/// any cross-thread communication within the loadgen. +/// - If the loadgen has to skip producing for an interval because it +/// couldn't detect that all samples were completed in time, then the +/// query will not be considered meeting the latency constraint. +/// - This is fair since the loadgen skipping production will reduce +/// pressure on the SUT and should be reflected negatively in the +/// latency percentiles. +/// - The last query is special cased since there isn't a subsequent query +/// to delay. For the last query, the query latency without cross-thread +/// communication is used. +/// + **Final performance result is:** PASS if a percentile of the qer-query +/// latencies is under a given threshold. FAIL otherwise. +/// - The latency constraint is specified by the function ( +/// \link mlperf::TestSettings::multi_stream_max_async_queries +/// multi_stream_max_async_queries \endlink / +/// \link mlperf::TestSettings::multi_stream_target_qps +/// multi_stream_target_qps \endlink). +/// * **MultiStreamFree** +/// + Behaves similar to MultiStream, with the exceptions that it: +/// - Allows up to N async queries where N is limited only by the latency +/// target. +/// - Issues queries at a variable rate corresponding to when the N'th +/// oldest query completes. +/// + Not an official MLPerf scenario, but is maintained for evaluation +/// and testing purposes. +/// + Compared to MultiStream, there is no frequency quantization, which +/// allows the results to reflect small performance improvements. +/// + **Final performance result is:** PASS if a percentile of the per-query +/// latencies is under a given threhsold. FAIL otherwise. +/// - The latency constraint is specified by +/// \link mlperf::TestSettings::multi_stream_target_latency_ns +/// multi_stream_target_latency_ns \endlink. +/// * **Server** +/// + Sends queries with a single sample. +/// + Queries have a random poisson (non-uniform) arrival rate that, when +/// averaged, hits the target QPS. +/// + There is no limit on the number of outstanding queries, as long as +/// the latency constraints are met. +/// + **Final performance result is:** PASS if the a percentile of the latency +/// is under a given threshold. FAIL otherwise. +/// - Threshold is specified by \link +/// mlperf::TestSettings::server_target_latency_ns server_target_latency_ns +/// \endlink. +/// * **Offline** +/// + Sends all N samples to the SUT inside of a single query. +/// + The samples of the query are guaranteed to be contiguous with respect +/// to the order they were loaded in the QuerySampleLibrary. +/// + **Final performance result is:** samples per second. +/// +enum class TestScenario { + SingleStream, + MultiStream, + MultiStreamFree, + Server, + Offline, +}; + +/// +/// \enum TestMode +/// * **SubmissionRun** +/// + Runs accuracy mode followed by performance mode. +/// + TODO: Implement further requirements as decided by MLPerf. +/// * **AccuracyOnly** +/// + Runs each sample from the QSL through the SUT a least once. +/// + Outputs responses to an accuracy json that can be parsed by a model + +/// sample library specific script. +/// * **PerformanceOnly** +/// + Runs the performance traffic for the given scenario, as described in +/// the comments for TestScenario. +/// * **FindPeakPerformance** +/// + Determines the maximumum QPS for the Server scenario. +/// + Determines the maximum samples per query for the MultiStream and +/// MultiStreamFree scenarios. +/// + Not applicable for SingleStream or Offline scenarios. +/// +enum class TestMode { + SubmissionRun, + AccuracyOnly, + PerformanceOnly, + FindPeakPerformance, +}; + +/// +/// \brief Top-level struct specifing the modes and parameters of the test. +/// +struct TestSettings { + TestScenario scenario = TestScenario::SingleStream; + TestMode mode = TestMode::PerformanceOnly; + + // ================================== + /// \name SingleStream-specific + /**@{*/ + /// \brief A hint used by the loadgen to pre-generate enough samples to + /// meet the minimum test duration. + uint64_t single_stream_expected_latency_ns = 1000000; + /// \brief The latency percentile reported as the final result. + double single_stream_target_latency_percentile = 0.90; + /**@}*/ + + // ================================== + /// \name MultiStream-specific + /**@{*/ + /// \brief The uniform rate at which queries are produced. + /// The latency constraint for the MultiStream scenario is equal to + /// (multi_stream_max_async_queries / multi_stream_target_qps). + /// This does not apply to the MultiStreamFree scenario, + /// except as a hint for how many queries to pre-generate. + double multi_stream_target_qps = 10.0; + /// \brief The latency constraint for the MultiStreamFree scenario. + /// Does not apply to the MultiStream scenario, whose target latency + /// is a function of the QPS and max_async_queries. + uint64_t multi_stream_target_latency_ns = 100000000; + /// \brief The latency percentile for multistream mode. + double multi_stream_target_latency_percentile = 0.9; + /// \brief The number of samples in each query. + /// \details note: This field is used as a FindPeakPerformance's lower bound. + /// When you run FindPeakPerformanceMode, you should make sure that this value + /// satisfies performance constraints. + int multi_stream_samples_per_query = 4; + /// \brief The maximum number of queries, to which a SUT has not responded, + /// before the loadgen will throttle issuance of new queries. + int multi_stream_max_async_queries = 1; + /**@}*/ + + // ================================== + /// \name Server-specific + /**@{*/ + /// \brief The average QPS of the poisson distribution. + /// \details note: This field is used as a FindPeakPerformance's lower bound. + /// When you run FindPeakPerformanceMode, you should make sure that this value + /// satisfies performance constraints. + double server_target_qps = 1; + /// \brief The latency constraint for the Server scenario. + uint64_t server_target_latency_ns = 100000000; + /// \brief The latency percentile for server mode. This value is combined with + /// server_target_latency_ns to determine if a run is valid. + /// \details 99% is the default value, which is correct for image models. GNMT + /// should be set to 0.97 (97%) in v0.5.(As always, check the policy page for + /// updated values for the benchmark you are running.) + double server_target_latency_percentile = 0.99; + /// \brief If this flag is set to true, LoadGen will combine samples from + /// multiple queries into a single query if their scheduled issue times have + /// passed. + bool server_coalesce_queries = false; + /// \brief The decimal places of QPS precision used to terminate + /// FindPeakPerformance mode. + int server_find_peak_qps_decimals_of_precision = 1; + /// \brief A step size (as a fraction of the QPS) used to widen the lower and + /// upper bounds to find the initial boundaries of binary search. + double server_find_peak_qps_boundary_step_size = 1; + /// \brief The maximum number of outstanding queries to allow before earlying + /// out from a performance run. Useful for performance tuning and speeding up + /// the FindPeakPerformance mode. + uint64_t server_max_async_queries = 0; ///< 0: Infinity. + /// \brief The number of issue query threads that will be registered and used + /// to call SUT's IssueQuery(). If this is 0, the same thread calling + /// StartTest() will be used to call IssueQuery(). See also + /// mlperf::RegisterIssueQueryThread(). + uint64_t server_num_issue_query_threads = 0; + /**@}*/ + + // ================================== + /// \name Offline-specific + /**@{*/ + /// \brief Specifies the QPS the SUT expects to hit for the offline load. + /// The loadgen generates 10% more queries than it thinks it needs to meet + /// the minimum test duration. + double offline_expected_qps = 1; + /**@}*/ + + // ================================== + /// \name Test duration + /// The test runs until **both** min duration and min query count have been + /// met. However, it will exit before that point if **either** max duration or + /// max query count have been reached. + /**@{*/ + uint64_t min_duration_ms = 10000; + uint64_t max_duration_ms = 0; ///< 0: Infinity. + uint64_t min_query_count = 100; + uint64_t max_query_count = 0; ///< 0: Infinity. + /**@}*/ + + // ================================== + /// \name Random number generation + /// There are 4 separate seeds, so each dimension can be changed + /// independently. + /**@{*/ + /// \brief Affects which subset of samples from the QSL are chosen for + /// the performance sample set and accuracy sample sets. + uint64_t qsl_rng_seed = 0; + /// \brief Affects the order in which samples from the performance set will + /// be included in queries. + uint64_t sample_index_rng_seed = 0; + /// \brief Affects the poisson arrival process of the Server scenario. + /// \details Different seeds will appear to "jitter" the queries + /// differently in time, but should not affect the average issued QPS. + uint64_t schedule_rng_seed = 0; + /// \brief Affects which samples have their query returns logged to the + /// accuracy log in performance mode. + uint64_t accuracy_log_rng_seed = 0; + + /// \brief Probability of the query response of a sample being logged to the + /// accuracy log in performance mode + double accuracy_log_probability = 0.0; + + /// \brief Target number of samples that will have their results printed to + /// accuracy log in performance mode for compliance testing + uint64_t accuracy_log_sampling_target = 0; + + /// \brief Load mlperf parameter config from file. + int FromConfig(const std::string &path, const std::string &model, + const std::string &scenario); + /**@}*/ + + // ================================== + /// \name Performance Sample modifiers + /// \details These settings can be used to Audit Performance mode runs. + /// In order to detect sample caching by SUT, performance of runs when only + /// unique queries (with non-repeated samples) are issued can be compared with + /// that when the same query is repeatedly issued. + /**@{*/ + /// \brief Prints measurement interval start and stop timestamps to std::cout + /// for the purpose of comparison against an external timer + bool print_timestamps = false; + /// \brief Allows issuing only unique queries in Performance mode of any + /// scenario \details This can be used to send non-repeat & hence unique + /// samples to SUT + bool performance_issue_unique = false; + /// \brief If true, the same query is chosen repeatedley for Inference. + /// In offline scenario, the query is filled with the same sample. + bool performance_issue_same = false; + /// \brief Offset to control which sample is repeated in + /// performance_issue_same mode. + /// Value should be within [0, performance_sample_count) + uint64_t performance_issue_same_index = 0; + /// \brief Overrides QSL->PerformanceSampleCount() when non-zero + uint64_t performance_sample_count_override = 0; + /**@}*/ +}; + +/// +/// \enum LoggingMode +/// Specifies how and when logging should be sampled and stringified at +/// runtime. +/// * **AsyncPoll** +/// + Logs are serialized and output on an IOThread that polls for new logs at +/// a fixed interval. This is the only mode currently implemented. +/// * **EndOfTestOnly** +/// + TODO: Logs are serialzied and output only at the end of the test. +/// * **Synchronous** +/// + TODO: Logs are serialized and output inline. +enum class LoggingMode { + AsyncPoll, + EndOfTestOnly, + Synchronous, +}; + +/// +/// \brief Specifies where log outputs should go. +/// +/// By default, the loadgen outputs its log files to outdir and +/// modifies the filenames of its logs with a prefix and suffix. +/// Filenames will take the form: +/// "/summary.txt" +/// +/// Affordances for outputing logs to stdout are also provided. +/// +struct LogOutputSettings { + std::string outdir = "."; + std::string prefix = "mlperf_log_"; + std::string suffix = ""; + bool prefix_with_datetime = false; + bool copy_detail_to_stdout = false; + bool copy_summary_to_stdout = false; +}; + +/// +/// \brief Top-level log settings. +/// +struct LogSettings { + LogOutputSettings log_output; + LoggingMode log_mode = LoggingMode::AsyncPoll; + uint64_t log_mode_async_poll_interval_ms = 1000; ///< TODO: Implement this. + bool enable_trace = true; +}; + +/// @} + +/// @} + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_TEST_SETTINGS_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.cc b/benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.cc new file mode 100644 index 0000000..89c68b4 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.cc @@ -0,0 +1,679 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "test_settings_internal.h" + +#include +#include +#include +#include + +#include "logging.h" +#include "utils.h" + +namespace mlperf { +namespace loadgen { + +TestSettingsInternal::TestSettingsInternal( + const TestSettings &requested_settings, size_t qsl_performance_sample_count) + : requested(requested_settings), + scenario(requested.scenario), + mode(requested.mode), + samples_per_query(1), + target_qps(1), + max_async_queries(0), + target_duration(std::chrono::milliseconds(requested.min_duration_ms)), + min_duration(std::chrono::milliseconds(requested.min_duration_ms)), + max_duration(std::chrono::milliseconds(requested.max_duration_ms)), + min_query_count(requested.min_query_count), + max_query_count(requested.max_query_count), + min_sample_count(0), + qsl_rng_seed(requested.qsl_rng_seed), + sample_index_rng_seed(requested.sample_index_rng_seed), + schedule_rng_seed(requested.schedule_rng_seed), + accuracy_log_rng_seed(requested.accuracy_log_rng_seed), + accuracy_log_probability(requested.accuracy_log_probability), + accuracy_log_sampling_target(requested.accuracy_log_sampling_target), + print_timestamps(requested.print_timestamps), + performance_issue_unique(requested.performance_issue_unique), + performance_issue_same(requested.performance_issue_same), + performance_issue_same_index(requested.performance_issue_same_index), + performance_sample_count(0) { + // Target QPS, target latency, and max_async_queries. + switch (requested.scenario) { + case TestScenario::SingleStream: + target_qps = static_cast(std::nano::den) / + requested.single_stream_expected_latency_ns; + max_async_queries = 1; + target_latency_percentile = + requested.single_stream_target_latency_percentile; + break; + case TestScenario::MultiStream: { + max_async_queries = requested.multi_stream_max_async_queries; + target_qps = requested.multi_stream_target_qps; + double target_latency_seconds = + max_async_queries / requested.multi_stream_target_qps; + target_latency = + SecondsToDuration(target_latency_seconds); + target_latency_percentile = + requested.multi_stream_target_latency_percentile; + break; + } + case TestScenario::MultiStreamFree: + max_async_queries = requested.multi_stream_max_async_queries; + target_qps = requested.multi_stream_target_qps; + target_latency = + std::chrono::nanoseconds(requested.multi_stream_target_latency_ns); + target_latency_percentile = + requested.multi_stream_target_latency_percentile; + break; + case TestScenario::Server: + if (requested.server_target_qps >= 0.0) { + target_qps = requested.server_target_qps; + } else { + LogDetail([ + server_target_qps = requested.server_target_qps, + target_qps = target_qps + ](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Invalid value for server_target_qps requested." + << " requested: " << server_target_qps + << " using: " << target_qps; + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", ss.str()); +#else + detail.Error("Invalid value for server_target_qps requested.", + "requested", server_target_qps, "using", target_qps); +#endif + }); + } + target_latency = + std::chrono::nanoseconds(requested.server_target_latency_ns); + target_latency_percentile = requested.server_target_latency_percentile; + max_async_queries = requested.server_max_async_queries; + break; + case TestScenario::Offline: + // target_latency_percentile is not used in Offline, but set it to + // 0.99 anyway to avoid garbage value. + target_latency_percentile = 0.99; + if (requested.offline_expected_qps >= 0.0) { + target_qps = requested.offline_expected_qps; + } else { + LogDetail([ + offline_expected_qps = requested.offline_expected_qps, + target_qps = target_qps + ](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Invalid value for offline_expected_qps requested." + << " requested: " << offline_expected_qps + << " using: " << target_qps; + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", ss.str()); +#else + detail.Error("Invalid value for offline_expected_qps requested.", + "requested", offline_expected_qps, "using", target_qps); +#endif + }); + } + max_async_queries = 1; + break; + } + + // Performance Sample Count: TestSettings override QSL -> + // PerformanceSampleCount + performance_sample_count = (requested.performance_sample_count_override == 0) + ? qsl_performance_sample_count + : requested.performance_sample_count_override; + + // Samples per query. + if (requested.scenario == TestScenario::MultiStream || + requested.scenario == TestScenario::MultiStreamFree) { + samples_per_query = requested.multi_stream_samples_per_query; + } + + // In the offline scenario, coalesce all queries into a single query. + if (requested.scenario == TestScenario::Offline) { + // TODO: Should the spec require a max duration for large query counts? + // kSlack is used to make sure we generate enough samples for the SUT + // to take longer than than the minimum test duration required by the + // MLPerf spec. + constexpr double kSlack = 1.1; + uint64_t target_sample_count = + kSlack * DurationToSeconds(target_duration) * target_qps; + samples_per_query = + (requested.performance_issue_unique || requested.performance_issue_same) + ? performance_sample_count + : std::max(min_query_count, target_sample_count); + min_query_count = 1; + target_duration = std::chrono::milliseconds(0); + } + + min_sample_count = min_query_count * samples_per_query; + + // Validate TestSettings + if (requested.performance_issue_same && + (requested.performance_issue_same_index >= performance_sample_count)) { + LogDetail([ + performance_issue_same_index = requested.performance_issue_same_index, + performance_sample_count = performance_sample_count + ](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Sample Idx to be repeated in performance_issue_same mode" + << " cannot be greater than loaded performance_sample_count." + << " performance_issue_same_index: " + << performance_issue_same_index + << " performance_sample_count: " + << performance_sample_count; + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", ss.str()); +#else + detail.Error( + "Sample Idx to be repeated in performance_issue_same mode" + " cannot be greater than loaded performance_sample_count.", + "performance_issue_same_index", performance_issue_same_index, + "performance_sample_count", performance_sample_count); +#endif + }); + } + + if (requested.performance_issue_unique && requested.performance_issue_same) { + LogDetail([ + performance_issue_unique = requested.performance_issue_unique, + performance_issue_same = requested.performance_issue_same + ](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Performance_issue_unique and performance_issue_same, both" + << " cannot be true at the same time." + << " performance_issue_unique: " + << performance_issue_unique + << " performance_issue_same: " + << performance_issue_same; + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", ss.str()); +#else + detail.Error( + "Performance_issue_unique and performance_issue_same, both" + " cannot be true at the same time.", + "performance_issue_unique", performance_issue_unique, + "performance_issue_same", performance_issue_same); +#endif + }); + } +} + +std::string ToString(TestScenario scenario) { + switch (scenario) { +#if USE_NEW_LOGGING_FORMAT + case TestScenario::SingleStream: + return "SingleStream"; + case TestScenario::MultiStream: + return "MultiStream"; + case TestScenario::MultiStreamFree: + return "MultiStreamFree"; +#else + case TestScenario::SingleStream: + return "Single Stream"; + case TestScenario::MultiStream: + return "Multi Stream"; + case TestScenario::MultiStreamFree: + return "Multi Stream Free"; +#endif + case TestScenario::Server: + return "Server"; + case TestScenario::Offline: + return "Offline"; + } + assert(false); + return "InvalidScenario"; +} + +std::string ToString(TestMode mode) { + switch (mode) { +#if USE_NEW_LOGGING_FORMAT + case TestMode::SubmissionRun: + return "SubmissionRun"; + case TestMode::AccuracyOnly: + return "AccuracyOnly"; + case TestMode::PerformanceOnly: + return "PerformanceOnly"; + case TestMode::FindPeakPerformance: + return "FindPeakPerformance"; +#else + case TestMode::SubmissionRun: + return "Submission"; + case TestMode::AccuracyOnly: + return "Accuracy"; + case TestMode::PerformanceOnly: + return "Performance"; + case TestMode::FindPeakPerformance: + return "Find Peak Performance"; +#endif + } + assert(false); + return "InvalidMode"; +} + +void LogRequestedTestSettings(const TestSettings &s) { + LogDetail([s](AsyncDetail &detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "requested_scenario", ToString(s.scenario)); + MLPERF_LOG(detail, "requested_test_mode", ToString(s.mode)); + + // Scenario-specific + switch (s.scenario) { + case TestScenario::SingleStream: + MLPERF_LOG(detail, "requested_single_stream_expected_latency_ns", s.single_stream_expected_latency_ns); + MLPERF_LOG(detail, "requested_single_stream_target_latency_percentile", s.single_stream_target_latency_percentile); + break; + case TestScenario::MultiStream: + case TestScenario::MultiStreamFree: + MLPERF_LOG(detail, "requested_multi_stream_target_qps", s.multi_stream_target_qps); + MLPERF_LOG(detail, "requested_multi_stream_target_latency_ns", s.multi_stream_target_latency_ns); + MLPERF_LOG(detail, "requested_multi_stream_target_latency_percentile", s.multi_stream_target_latency_percentile); + MLPERF_LOG(detail, "requested_multi_stream_samples_per_query", s.multi_stream_samples_per_query); + MLPERF_LOG(detail, "requested_multi_stream_max_async_queries", s.multi_stream_max_async_queries); + break; + case TestScenario::Server: + MLPERF_LOG(detail, "requested_server_target_qps", s.server_target_qps); + MLPERF_LOG(detail, "requested_server_target_latency_ns", s.server_target_latency_ns); + MLPERF_LOG(detail, "requested_server_target_latency_percentile", s.server_target_latency_percentile); + MLPERF_LOG(detail, "requested_server_coalesce_queries", s.server_coalesce_queries); + MLPERF_LOG(detail, "requested_server_find_peak_qps_decimals_of_precision", s.server_find_peak_qps_decimals_of_precision); + MLPERF_LOG(detail, "requested_server_find_peak_qps_boundary_step_size", s.server_find_peak_qps_boundary_step_size); + MLPERF_LOG(detail, "requested_server_max_async_queries", s.server_max_async_queries); + MLPERF_LOG(detail, "requested_server_num_issue_query_threads", s.server_num_issue_query_threads); + break; + case TestScenario::Offline: + MLPERF_LOG(detail, "requested_offline_expected_qps", s.offline_expected_qps); + break; + } + + // Overrides + MLPERF_LOG(detail, "requested_min_duration_ms", s.min_duration_ms); + MLPERF_LOG(detail, "requested_max_duration_ms", s.max_duration_ms); + MLPERF_LOG(detail, "requested_min_query_count", s.min_query_count); + MLPERF_LOG(detail, "requested_max_query_count", s.max_query_count); + MLPERF_LOG(detail, "requested_qsl_rng_seed", s.qsl_rng_seed); + MLPERF_LOG(detail, "requested_sample_index_rng_seed", s.sample_index_rng_seed); + MLPERF_LOG(detail, "requested_schedule_rng_seed", s.schedule_rng_seed); + MLPERF_LOG(detail, "requested_accuracy_log_rng_seed", s.accuracy_log_rng_seed); + MLPERF_LOG(detail, "requested_accuracy_log_probability", s.accuracy_log_probability); + MLPERF_LOG(detail, "requested_accuracy_log_sampling_target", s.accuracy_log_sampling_target); + MLPERF_LOG(detail, "requested_print_timestamps", s.print_timestamps); + MLPERF_LOG(detail, "requested_performance_issue_unique", s.performance_issue_unique); + MLPERF_LOG(detail, "requested_performance_issue_same", s.performance_issue_same); + MLPERF_LOG(detail, "requested_performance_issue_same_index", s.performance_issue_same_index); + MLPERF_LOG(detail, "requested_performance_sample_count_override", s.performance_sample_count_override); +#else + detail(""); + detail("Requested Settings:"); + detail("Scenario : " + ToString(s.scenario)); + detail("Test mode : " + ToString(s.mode)); + + // Scenario-specific + switch (s.scenario) { + case TestScenario::SingleStream: + detail("single_stream_expected_latency_ns : ", + s.single_stream_expected_latency_ns); + detail("single_stream_target_latency_percentile : ", + s.single_stream_target_latency_percentile); + break; + case TestScenario::MultiStream: + case TestScenario::MultiStreamFree: + detail("multi_stream_target_qps : ", s.multi_stream_target_qps); + detail("multi_stream_target_latency_ns : ", + s.multi_stream_target_latency_ns); + detail("multi_stream_target_latency_percentile : ", + s.multi_stream_target_latency_percentile); + detail("multi_stream_samples_per_query : ", + s.multi_stream_samples_per_query); + detail("multi_stream_max_async_queries : ", + s.multi_stream_max_async_queries); + break; + case TestScenario::Server: + detail("server_target_qps : ", s.server_target_qps); + detail("server_target_latency_ns : ", s.server_target_latency_ns); + detail("server_target_latency_percentile : ", + s.server_target_latency_percentile); + detail("server_coalesce_queries : ", s.server_coalesce_queries); + detail("server_find_peak_qps_decimals_of_precision : ", + s.server_find_peak_qps_decimals_of_precision); + detail("server_find_peak_qps_boundary_step_size : ", + s.server_find_peak_qps_boundary_step_size); + detail("server_max_async_queries : ", s.server_max_async_queries); + detail("server_num_issue_query_threads : ", + s.server_num_issue_query_threads); + break; + case TestScenario::Offline: + detail("offline_expected_qps : ", s.offline_expected_qps); + break; + } + + // Overrides + detail("min_duration_ms : ", s.min_duration_ms); + detail("max_duration_ms : ", s.max_duration_ms); + detail("min_query_count : ", s.min_query_count); + detail("max_query_count : ", s.max_query_count); + detail("qsl_rng_seed : ", s.qsl_rng_seed); + detail("sample_index_rng_seed : ", s.sample_index_rng_seed); + detail("schedule_rng_seed : ", s.schedule_rng_seed); + detail("accuracy_log_rng_seed : ", s.accuracy_log_rng_seed); + detail("accuracy_log_probability : ", s.accuracy_log_probability); + detail("accuracy_log_sampling_target : ", s.accuracy_log_sampling_target); + detail("print_timestamps : ", s.print_timestamps); + detail("performance_issue_unique : ", s.performance_issue_unique); + detail("performance_issue_same : ", s.performance_issue_same); + detail("performance_issue_same_index : ", s.performance_issue_same_index); + detail("performance_sample_count_override : ", + s.performance_sample_count_override); + detail(""); +#endif + }); +} + +void TestSettingsInternal::LogEffectiveSettings() const { + LogDetail([s = *this](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "effective_scenario", ToString(s.scenario)); + MLPERF_LOG(detail, "effective_test_mode", ToString(s.mode)); + + MLPERF_LOG(detail, "effective_samples_per_query", s.samples_per_query); + MLPERF_LOG(detail, "effective_target_qps", s.target_qps); + MLPERF_LOG(detail, "effective_target_latency_ns", s.target_latency.count()); + MLPERF_LOG(detail, "effective_target_latency_percentile", s.target_latency_percentile); + MLPERF_LOG(detail, "effective_max_async_queries", s.max_async_queries); + MLPERF_LOG(detail, "effective_target_duration_ms", s.target_duration.count()); + MLPERF_LOG(detail, "effective_min_duration_ms", s.min_duration.count()); + MLPERF_LOG(detail, "effective_max_duration_ms", s.max_duration.count()); + MLPERF_LOG(detail, "effective_min_query_count", s.min_query_count); + MLPERF_LOG(detail, "effective_max_query_count", s.max_query_count); + MLPERF_LOG(detail, "effective_min_sample_count", s.min_sample_count); + MLPERF_LOG(detail, "effective_qsl_rng_seed", s.qsl_rng_seed); + MLPERF_LOG(detail, "effective_sample_index_rng_seed", s.sample_index_rng_seed); + MLPERF_LOG(detail, "effective_schedule_rng_seed", s.schedule_rng_seed); + MLPERF_LOG(detail, "effective_accuracy_log_rng_seed", s.accuracy_log_rng_seed); + MLPERF_LOG(detail, "effective_accuracy_log_probability", s.accuracy_log_probability); + MLPERF_LOG(detail, "effective_accuracy_log_sampling_target", s.accuracy_log_sampling_target); + MLPERF_LOG(detail, "effective_print_timestamps", s.print_timestamps); + MLPERF_LOG(detail, "effective_performance_issue_unique", s.performance_issue_unique); + MLPERF_LOG(detail, "effective_performance_issue_same", s.performance_issue_same); + MLPERF_LOG(detail, "effective_performance_issue_same_index", s.performance_issue_same_index); + MLPERF_LOG(detail, "effective_performance_sample_count", s.performance_sample_count); +#else + detail(""); + detail("Effective Settings:"); + + detail("Scenario : " + ToString(s.scenario)); + detail("Test mode : " + ToString(s.mode)); + + detail("samples_per_query : ", s.samples_per_query); + detail("target_qps : ", s.target_qps); + detail("target_latency (ns): ", s.target_latency.count()); + detail("target_latency_percentile : ", s.target_latency_percentile); + detail("max_async_queries : ", s.max_async_queries); + detail("target_duration (ms): ", s.target_duration.count()); + detail("min_duration (ms): ", s.min_duration.count()); + detail("max_duration (ms): ", s.max_duration.count()); + detail("min_query_count : ", s.min_query_count); + detail("max_query_count : ", s.max_query_count); + detail("min_sample_count : ", s.min_sample_count); + detail("qsl_rng_seed : ", s.qsl_rng_seed); + detail("sample_index_rng_seed : ", s.sample_index_rng_seed); + detail("schedule_rng_seed : ", s.schedule_rng_seed); + detail("accuracy_log_rng_seed : ", s.accuracy_log_rng_seed); + detail("accuracy_log_probability : ", s.accuracy_log_probability); + detail("accuracy_log_sampling_target : ", s.accuracy_log_sampling_target); + detail("print_timestamps : ", s.print_timestamps); + detail("performance_issue_unique : ", s.performance_issue_unique); + detail("performance_issue_same : ", s.performance_issue_same); + detail("performance_issue_same_index : ", s.performance_issue_same_index); + detail("performance_sample_count : ", s.performance_sample_count); +#endif + }); +} + +void TestSettingsInternal::LogAllSettings() const { + LogRequestedTestSettings(requested); + LogEffectiveSettings(); +} + +void TestSettingsInternal::LogSummary(AsyncSummary &summary) const { + summary("samples_per_query : ", samples_per_query); + summary("target_qps : ", target_qps); + summary("target_latency (ns): ", target_latency.count()); + summary("max_async_queries : ", max_async_queries); + summary("min_duration (ms): ", min_duration.count()); + summary("max_duration (ms): ", max_duration.count()); + summary("min_query_count : ", min_query_count); + summary("max_query_count : ", max_query_count); + summary("qsl_rng_seed : ", qsl_rng_seed); + summary("sample_index_rng_seed : ", sample_index_rng_seed); + summary("schedule_rng_seed : ", schedule_rng_seed); + summary("accuracy_log_rng_seed : ", accuracy_log_rng_seed); + summary("accuracy_log_probability : ", accuracy_log_probability); + summary("accuracy_log_sampling_target : ", accuracy_log_sampling_target); + summary("print_timestamps : ", print_timestamps); + summary("performance_issue_unique : ", performance_issue_unique); + summary("performance_issue_same : ", performance_issue_same); + summary("performance_issue_same_index : ", performance_issue_same_index); + summary("performance_sample_count : ", performance_sample_count); +} + +} // namespace loadgen + +/// \todo The TestSettings::FromConfig definition belongs in a test_settings.cc +/// file which doesn't yet exist. To avoid churn so close to the submission +/// deadline, adding a test_settings.cc file has been deferred to v0.6. +int TestSettings::FromConfig(const std::string &path, const std::string &model, + const std::string &scenario) { + // TODO: move this method to a new file test_settings.cc + std::map kv; + + // lookup key/value pairs from config + auto lookupkv = [&](const std::string &model, const std::string &scenario, + const std::string &key, uint64_t *val_l, double *val_d, + double multiplier = 1.0) { + std::map::iterator it; + std::string found; + // lookup exact key first + it = kv.find(model + "." + scenario + "." + key); + if (it != kv.end()) { + found = it->second; + } else { + // lookup key with model wildcard + it = kv.find("*." + scenario + "." + key); + if (it != kv.end()) { + found = it->second; + } else { + it = kv.find(model + ".*." + key); + if (it != kv.end()) { + found = it->second; + } else { + it = kv.find("*.*." + key); + if (it != kv.end()) { + found = it->second; + } else { + return false; + } + } + } + } + // if we get here, found will be set + if (val_l) { + *val_l = strtoull(found.c_str(), nullptr, 0) * + static_cast(multiplier); + } + if (val_d) *val_d = strtod(found.c_str(), nullptr) * multiplier; + return true; + }; + + // dirt simple config parser + std::ifstream fss(path); + std::string line; + int line_nr = 0; + int errors = 0; + if (!fss.is_open()) { + LogDetail([p = path](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "can't open file " << p; + MLPERF_LOG_ERROR(detail, "error_invalid_config", ss.str()); +#else + detail.Error("can't open file ", p); +#endif + }); + return -ENOENT; + } + while (std::getline(fss, line)) { + line_nr++; + std::istringstream iss(line); + std::string s, k; + int looking_for = 0; // 0=key, 1=equal, 2=value + while (iss >> s) { + if (s == "#" && looking_for != 2) { + // done with this line + break; + } + if (looking_for == 2) { + // got key and value + const char *start = s.c_str(); + char *stop; + (void)strtoul(start, &stop, 0); + if (start + s.size() == stop) { + kv[k] = s; + continue; + } + (void)strtod(start, &stop); + if (start + s.size() == stop) { + kv[k] = s; + continue; + } + errors++; + LogDetail([l = line_nr](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "value needs to be integer or double, line=" << l; + MLPERF_LOG_ERROR(detail, "error_invalid_config", ss.str()); +#else + detail.Error("value needs to be integer or double, line=", l); +#endif + }); + break; + } + if (looking_for == 1 && s != "=") { + errors++; + LogDetail([l = line_nr](AsyncDetail & detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "expected 'key=value', line=" << l; + MLPERF_LOG_ERROR(detail, "error_invalid_config", ss.str()); +#else + detail.Error("expected 'key=value', line=", l); +#endif + }); + break; + } + if (looking_for == 0) k = s; + looking_for++; + } + } + if (errors != 0) return -EINVAL; + + uint64_t val; + + // keys that apply to all scenarios + if (lookupkv(model, scenario, "mode", &val, nullptr)) { + switch (val) { + case 0: + mode = TestMode::SubmissionRun; + break; + case 1: + mode = TestMode::AccuracyOnly; + break; + case 2: + mode = TestMode::PerformanceOnly; + break; + case 3: + mode = TestMode::FindPeakPerformance; + break; + default: + LogDetail([](AsyncDetail &detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "Invalid value passed to Mode key in config."; + MLPERF_LOG_ERROR(detail, "error_invalid_config", ss.str()); +#else + detail.Error("Invalid value passed to Mode key in config."); +#endif + }); + break; + } + } + lookupkv(model, scenario, "min_duration", &min_duration_ms, nullptr); + lookupkv(model, scenario, "max_duration", &max_duration_ms, nullptr); + lookupkv(model, scenario, "min_query_count", &min_query_count, nullptr); + lookupkv(model, scenario, "max_query_count", &max_query_count, nullptr); + lookupkv(model, scenario, "qsl_rng_seed", &qsl_rng_seed, nullptr); + lookupkv(model, scenario, "sample_index_rng_seed", &sample_index_rng_seed, + nullptr); + lookupkv(model, scenario, "schedule_rng_seed", &schedule_rng_seed, nullptr); + lookupkv(model, scenario, "accuracy_log_rng_seed", &accuracy_log_rng_seed, + nullptr); + lookupkv(model, scenario, "accuracy_log_probability", nullptr, + &accuracy_log_probability, 0.01); + lookupkv(model, scenario, "accuracy_log_sampling_target", + &accuracy_log_sampling_target, nullptr); + if (lookupkv(model, scenario, "print_timestamps", &val, nullptr)) + print_timestamps = (val == 0) ? false : true; + if (lookupkv(model, scenario, "performance_issue_unique", &val, nullptr)) + performance_issue_unique = (val == 0) ? false : true; + if (lookupkv(model, scenario, "performance_issue_same", &val, nullptr)) + performance_issue_same = (val == 0) ? false : true; + lookupkv(model, scenario, "performance_issue_same_index", + &performance_issue_same_index, nullptr); + lookupkv(model, scenario, "performance_sample_count_override", + &performance_sample_count_override, nullptr); + + // keys that apply to SingleStream + lookupkv(model, "SingleStream", "target_latency_percentile", nullptr, + &single_stream_target_latency_percentile, 0.01); + lookupkv(model, "SingleStream", "target_latency", + &single_stream_expected_latency_ns, nullptr, 1000 * 1000); + + // keys that apply to MultiStream + lookupkv(model, "MultiStream", "target_latency_percentile", nullptr, + &multi_stream_target_latency_percentile, 0.01); + lookupkv(model, "MultiStream", "target_qps", nullptr, + &multi_stream_target_qps); + if (lookupkv(model, "MultiStream", "samples_per_query", &val, nullptr)) + multi_stream_samples_per_query = static_cast(val); + if (lookupkv(model, "MultiStream", "max_async_queries", &val, nullptr)) + multi_stream_max_async_queries = static_cast(val); + + // keys that apply to Server + lookupkv(model, "Server", "target_latency_percentile", nullptr, + &server_target_latency_percentile, 0.01); + lookupkv(model, "Server", "target_latency", &server_target_latency_ns, + nullptr, 1000 * 1000); + lookupkv(model, "Server", "target_qps", nullptr, &server_target_qps); + if (lookupkv(model, "Server", "coalesce_queries", &val, nullptr)) + server_coalesce_queries = (val == 0) ? false : true; + if (lookupkv(model, "Server", "max_async_queries", &val, nullptr)) + server_max_async_queries = int(val); + + // keys that apply to Offline + lookupkv(model, "Offline", "target_qps", 0, &offline_expected_qps); + + return 0; +} + +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.h b/benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.h new file mode 100644 index 0000000..809e627 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/test_settings_internal.h @@ -0,0 +1,193 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief The internal representation of user-provided settings. + +#ifndef MLPERF_LOADGEN_TEST_SETTINGS_INTERNAL_H +#define MLPERF_LOADGEN_TEST_SETTINGS_INTERNAL_H + +#include +#include +#include + +#include "logging.h" +#include "test_settings.h" + +namespace mlperf { + +namespace logging { +class AsyncSummary; +} + +namespace loadgen { + +using AsyncSummary = logging::AsyncSummary; + +std::string ToString(TestScenario scenario); +std::string ToString(TestMode mode); + +/// \brief takes the user-friendly TestSettings and normalizes it +/// for consumption by the loadgen. +/// \details It does things like remove scenario-specific naming and introduce +/// the concept of target_duration used to pre-generate queries. +struct TestSettingsInternal { + explicit TestSettingsInternal(const TestSettings &requested_settings, + size_t qsl_performance_sample_count); + void LogEffectiveSettings() const; + void LogAllSettings() const; + void LogSummary(AsyncSummary &summary) const; + + const TestSettings requested; + const TestScenario scenario; // Copied here for convenience. + const TestMode mode; // Copied here for convenience. + + uint64_t samples_per_query; + double target_qps; + std::chrono::nanoseconds target_latency{0}; + double target_latency_percentile; // Single, multistream, and server modes. + uint64_t max_async_queries; + + // Target duration is used to generate queries of a minimum duration before + // the test run. + std::chrono::milliseconds target_duration{0}; + + // Min duration/query_count/sample_count are used to validate the test + // duration at the end of the run. + std::chrono::milliseconds min_duration{0}; + std::chrono::milliseconds max_duration{0}; + uint64_t min_query_count; + uint64_t max_query_count; + uint64_t min_sample_count; // Offline only. + + uint64_t qsl_rng_seed; + uint64_t sample_index_rng_seed; + uint64_t schedule_rng_seed; + uint64_t accuracy_log_rng_seed; + double accuracy_log_probability; + uint64_t accuracy_log_sampling_target; + bool print_timestamps; + bool performance_issue_unique; + bool performance_issue_same; + uint64_t performance_issue_same_index; + uint64_t performance_sample_count; +}; + +/// \brief A namespace of collections of FindPeakPerformance helper functions, +/// mainly about binary search. +namespace find_peak_performance { + +constexpr char const *kNotSupportedMsg = + "Finding peak performance is only supported in MultiStream, " + "MultiStreamFree, and Server scenarios."; + +template +TestSettingsInternal MidOfBoundaries( + const TestSettingsInternal &lower_bound_settings, + const TestSettingsInternal &upper_bound_settings) { + TestSettingsInternal mid_settings = lower_bound_settings; + if (scenario == TestScenario::MultiStream || + scenario == TestScenario::MultiStreamFree) { + assert(lower_bound_settings.samples_per_query < + upper_bound_settings.samples_per_query); + mid_settings.samples_per_query = lower_bound_settings.samples_per_query + + (upper_bound_settings.samples_per_query - + lower_bound_settings.samples_per_query) / + 2; + } else if (scenario == TestScenario::Server) { + assert(lower_bound_settings.target_qps < upper_bound_settings.target_qps); + mid_settings.target_qps = + lower_bound_settings.target_qps + + (upper_bound_settings.target_qps - lower_bound_settings.target_qps) / 2; + } else { + LogDetail([](AsyncDetail &detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", kNotSupportedMsg); +#else + detail(kNotSupportedMsg); +#endif + }); + } + return mid_settings; +} + +template +bool IsFinished(const TestSettingsInternal &lower_bound_settings, + const TestSettingsInternal &upper_bound_settings) { + if (scenario == TestScenario::MultiStream || + scenario == TestScenario::MultiStreamFree) { + return lower_bound_settings.samples_per_query + 1 >= + upper_bound_settings.samples_per_query; + } else if (scenario == TestScenario::Server) { + uint8_t precision = lower_bound_settings.requested + .server_find_peak_qps_decimals_of_precision; + double l = + std::floor(lower_bound_settings.target_qps * std::pow(10, precision)); + double u = + std::floor(upper_bound_settings.target_qps * std::pow(10, precision)); + return l + 1 >= u; + } else { + LogDetail([](AsyncDetail &detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", kNotSupportedMsg); +#else + detail(kNotSupportedMsg); +#endif + }); + return true; + } +} + +template +std::string ToStringPerformanceField(const TestSettingsInternal &settings) { + if (scenario == TestScenario::MultiStream || + scenario == TestScenario::MultiStreamFree) { + return std::to_string(settings.samples_per_query); + } else if (scenario == TestScenario::Server) { + return std::to_string(settings.target_qps); + } else { + LogDetail([](AsyncDetail &detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", kNotSupportedMsg); +#else + detail(kNotSupportedMsg); +#endif + }); + return ToString(settings.scenario); + } +} + +template +void WidenPerformanceField(TestSettingsInternal *settings) { + if (scenario == TestScenario::MultiStream || + scenario == TestScenario::MultiStreamFree) { + settings->samples_per_query = settings->samples_per_query * 2; + } else if (scenario == TestScenario::Server) { + settings->target_qps = + settings->target_qps * + (1 + settings->requested.server_find_peak_qps_boundary_step_size); + } else { + LogDetail([](AsyncDetail &detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG_ERROR(detail, "error_invalid_test_settings", kNotSupportedMsg); +#else + detail(kNotSupportedMsg); +#endif + }); + } +} + +} // namespace find_peak_performance +} // namespace loadgen +} // namespace mlperf + +#endif // MLPERF_LOADGEN_TEST_SETTINGS_INTERNAL_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/BUILD.gn b/benchmarks/rnnt/ootb/inference/loadgen/tests/BUILD.gn new file mode 100644 index 0000000..d73bf83 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/BUILD.gn @@ -0,0 +1,25 @@ +static_library("mlperf_loadgen_tests_loadgen_test_main") { + sources = [ "loadgen_test.h", "loadgen_test_main.cc" ] + configs += [ "//build/config/compiler:exceptions" ] +} + +executable("mlperf_loadgen_perftests") { + sources = [ "perftests_null_sut.cc" ] + deps = [ "..:mlperf_loadgen" ] +} + +executable("mlperf_loadgen_tests_basic") { + sources = [ "basic.cc" ] + deps = [ "..:mlperf_loadgen", + ":mlperf_loadgen_tests_loadgen_test_main" ] + configs += [ "//build/config/compiler:exceptions" ] +} + +source_set("mlperf_loadgen_perftests_py") { + sources = [ "perftests_null_sut.py" ] + deps = [ "../..:loadgen_pymodule_wheel_lib" ] +} + +source_set("docs") { + sources = [ "README.md" ] +} diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/README.md b/benchmarks/rnnt/ootb/inference/loadgen/tests/README.md new file mode 100644 index 0000000..41056b4 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/README.md @@ -0,0 +1,42 @@ +# Building and Running the Tests {#ReadmeTests} + +The unit and performance tests are only supported via gn/ninja at the moment. + +See the [top-level build readme](@ref ReadmeBuild) for details but, from a clean checkout, you must first run: + + make bootstrap_gn_ninja + third_party/gn/gn gen out/Release --args="is_debug=false" + +This will build the gn and ninja build tools and create a release project. + +## Unit Tests + +To build: + + third_party/ninja/ninja -C out/Release mlperf_loadgen_tests_basic + +To run all tests: + + out/Release/mlperf_loadgen_tests_basic . + +To run specific tests: + + out/Release/mlperf_loadgen_tests_basic + e.g.: + out/Release/mlperf_loadgen_tests_basic SingleStream + +## Performance Tests + +To build: + + third_party/ninja/ninja -C out/Release mlperf_loadgen_perftests + +To run all tests: + + out/Release/mlperf_loadgen_perftests . + +To run specific tests: + + out/Release/mlperf_loadgen_perftests + e.g.: + out/Release/mlperf_loadgen_tests_basic ServerPool diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/basic.cc b/benchmarks/rnnt/ootb/inference/loadgen/tests/basic.cc new file mode 100644 index 0000000..bfe57fe --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/basic.cc @@ -0,0 +1,335 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Basic functionality unit tests. + +#include +#include +#include +#include +#include +#include +#include + +#include "../loadgen.h" +#include "../query_sample_library.h" +#include "../system_under_test.h" +#include "../test_settings.h" +#include "loadgen_test.h" + +/// \brief Correctness unit tests. +namespace unit_tests { + +/// \defgroup LoadgenTestsBasic Test Coverage: Basic + +/// \brief Implements the client interfaces of the loadgen and +/// has some basic sanity checks that are enabled for all tests. +/// \details It also forwards calls to overrideable *Ext methods and implements +/// the TestProxy concept. +struct SystemUnderTestBasic : public mlperf::QuerySampleLibrary, + public mlperf::SystemUnderTest { + const std::string& Name() const override { return name_; } + + size_t TotalSampleCount() override { return total_sample_count_; } + size_t PerformanceSampleCount() override { return performance_sample_count_; } + + void LoadSamplesToRam( + const std::vector& samples) override { + for (auto s : samples) { + samples_load_count_.at(s)++; + loaded_samples_.push_back(s); + } + LoadSamplesToRamExt(samples); + } + virtual void LoadSamplesToRamExt( + const std::vector& samples) {} + + void UnloadSamplesFromRam( + const std::vector& samples) override { + for (auto s : samples) { + FAIL_IF(loaded_samples_.front() != s) && + FAIL_EXP(loaded_samples_.front()) && FAIL_EXP(s); + loaded_samples_.pop_front(); + size_t prev_load_count = samples_load_count_.at(s)--; + FAIL_IF(prev_load_count == 0) && FAIL_EXP(prev_load_count); + } + UnloadSamplesFromRamExt(samples); + } + virtual void UnloadSamplesFromRamExt( + const std::vector& samples) {} + + void IssueQuery(const std::vector& samples) override { + std::vector responses; + query_sizes_.push_back(samples.size()); + samples_between_flushes_.back() += samples.size(); + responses.reserve(samples.size()); + for (auto s : samples) { + FAIL_IF(samples_load_count_.at(s.index) == 0) && + FAIL_MSG("Issued unloaded sample:") && FAIL_EXP(s.index); + samples_issue_count_.at(s.index)++; + issued_samples_.push_back(s.index); + responses.push_back({s.id, 0, 0}); + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + IssueQueryExt(samples); + } + virtual void IssueQueryExt(const std::vector& samples) {} + + void FlushQueries() override { + samples_between_flushes_.push_back(0); + FlushQueriesExt(); + } + virtual void FlushQueriesExt() {} + + void ReportLatencyResults( + const std::vector& latencies_ns) override {} + + virtual void RunTest() { + samples_load_count_.resize(total_sample_count_, 0); + samples_issue_count_.resize(total_sample_count_, 0); + samples_between_flushes_.resize(1, 0); + mlperf::StartTest(this, this, test_settings_, log_settings_); + } + + virtual void EndTest() {} + + protected: + mlperf::TestSettings test_settings_; + mlperf::LogSettings log_settings_; + + std::string name_{"BasicSUT"}; + size_t total_sample_count_; + size_t performance_sample_count_; + std::vector issued_samples_; + std::deque loaded_samples_; + std::vector samples_load_count_; + std::vector samples_issue_count_; + + std::vector query_sizes_; + std::vector samples_between_flushes_; +}; + +/// \brief Provides common test set up logic. +struct SystemUnderTestAccuracy : public SystemUnderTestBasic { + virtual void SetUpTest(size_t samples_per_query, + size_t samples_per_query_remainder, + size_t accuracy_remainder, + mlperf::TestScenario scenario) { + performance_sample_count_ = + samples_per_query * 16 + samples_per_query_remainder; + total_sample_count_ = performance_sample_count_ * 32 + accuracy_remainder; + + log_settings_.log_output.prefix_with_datetime = false; + + test_settings_.scenario = scenario; + test_settings_.mode = mlperf::TestMode::AccuracyOnly; + test_settings_.multi_stream_samples_per_query = samples_per_query; + + double qps = 1e3; + test_settings_.server_target_qps = qps; + test_settings_.multi_stream_target_qps = qps; + } +}; + +/// \brief Verifies all samples from the QSL are included at least once +/// in accuracy mode. +/// \ingroup LoadgenTestsBasic +struct TestAccuracyIncludesAllSamples : public SystemUnderTestAccuracy { + void EndTest() override { + std::sort(issued_samples_.begin(), issued_samples_.end()); + + FAIL_IF(issued_samples_.size() < total_sample_count_) && + FAIL_EXP(issued_samples_.size()) && FAIL_EXP(total_sample_count_); + FAIL_IF(issued_samples_.front() != 0) && FAIL_EXP(issued_samples_.front()); + FAIL_IF(issued_samples_.back() != total_sample_count_ - 1) && + FAIL_EXP(issued_samples_.back()) && FAIL_EXP(total_sample_count_); + + mlperf::QuerySampleIndex prev = -1; + size_t discontinuities = 0; + size_t dupes = 0; + for (auto s : issued_samples_) { + if (s == prev) { + dupes++; + } else if (s - prev > 1) { + discontinuities++; + } + prev = s; + } + + FAIL_IF(discontinuities != 0) && FAIL_EXP(discontinuities); + if (test_settings_.scenario == mlperf::TestScenario::MultiStream || + test_settings_.scenario == mlperf::TestScenario::MultiStreamFree) { + const size_t expected_sets = + total_sample_count_ / performance_sample_count_; + FAIL_IF(dupes >= + test_settings_.multi_stream_samples_per_query * expected_sets) && + FAIL_EXP(dupes); + } else { + FAIL_IF(dupes != 0) && FAIL_EXP(dupes); + } + } +}; + +REGISTER_TEST_ALL_SCENARIOS(AccuracyIncludesAllSamples, + TestProxy(), 4, 0, + 0); + +/// \brief Verifies samples from the QSL aren't included too many times. +/// \details This is a regression test for: +/// https://github.com/mlperf/inference/pull/386 +/// The root cause was using different values for samples_per_query while +/// generating queries for the GNMT dataset. +/// \ingroup LoadgenTestsBasic +struct TestAccuracyDupesAreLimitted : public SystemUnderTestAccuracy { + void SetUpTest(bool, mlperf::TestScenario scenario) { + SystemUnderTestAccuracy::SetUpTest(4, 0, 0, scenario); + total_sample_count_ = 3003; + performance_sample_count_ = 1001; + } + + void EndTest() override { + std::sort(issued_samples_.begin(), issued_samples_.end()); + + FAIL_IF(issued_samples_.size() < total_sample_count_) && + FAIL_EXP(issued_samples_.size()) && FAIL_EXP(total_sample_count_); + FAIL_IF(issued_samples_.front() != 0) && FAIL_EXP(issued_samples_.front()); + FAIL_IF(issued_samples_.back() != total_sample_count_ - 1) && + FAIL_EXP(issued_samples_.back()) && FAIL_EXP(total_sample_count_); + + std::vector issue_counts(total_sample_count_, 0); + for (auto s : issued_samples_) { + issue_counts.at(s)++; + } + + const bool multistream = + test_settings_.scenario == mlperf::TestScenario::MultiStream || + test_settings_.scenario == mlperf::TestScenario::MultiStreamFree; + const size_t max_count = multistream ? 2 : 1; + + for (size_t i = 0; i < issue_counts.size(); i++) { + FAIL_IF(issue_counts[i] > max_count) && FAIL_EXP(i) && + FAIL_EXP(max_count) && FAIL_EXP(issue_counts[i]); + } + } +}; + +REGISTER_TEST_ALL_SCENARIOS(TestAccuracyDupesAreLimitted, + TestProxy(), true); + +/// \brief Verifies offline + accuracy doesn't hang if the last set +/// in the accuracy series is smaller than others. +/// \ingroup LoadgenTestsBasic +struct TestOfflineRemainderAccuracySet : public SystemUnderTestAccuracy { + void SetUpTest() { + SystemUnderTestAccuracy::SetUpTest(4, 0, 7, mlperf::TestScenario::Offline); + } + + void EndTest() override { + auto& flush_samples = samples_between_flushes_; + + FAIL_IF(flush_samples.size() < 3) && FAIL_EXP(flush_samples.size()) && + BAD_TEST_MSG("Test should generate multiple query sets.") && ABORT_TEST; + + // The last counter will be 0, since a test ends with a call to + // FlushQuery. + FAIL_IF(flush_samples.back() != 0) && FAIL_EXP(flush_samples.back()) && + FAIL_MSG( + "Detected stray calls to IssueQuery after the last call to " + "FlushQuery."); + flush_samples.pop_back(); + + // Verify the test ran with a smaller last accuracy set. + size_t first_size = flush_samples.front(); + size_t last_size = flush_samples.back(); + FAIL_IF(first_size <= last_size) && FAIL_EXP(first_size) && + FAIL_EXP(last_size) && BAD_TEST_MSG(); + + flush_samples.pop_back(); // Don't check the last set for equality. + for (size_t query_size : flush_samples) { + FAIL_IF(query_size != first_size) && FAIL_EXP(query_size) && + FAIL_EXP(first_size); + } + } +}; + +REGISTER_TEST(Offline_RemainderAccuracySets, + TestProxy()); + +/// \brief Verifies all queries only contain samples that are contiguous, +/// even if the set size is not a multiple of samples_per_query. +/// \ingroup LoadgenTestsBasic +struct TestMultiStreamContiguousRemainderQuery + : public SystemUnderTestAccuracy { + void SetUpTest(mlperf::TestScenario scenario) { + SystemUnderTestAccuracy::SetUpTest(4, 1, 0, scenario); + first_qsl_offsets_.resize(total_sample_count_, kBadQslOffset); + + auto spq = test_settings_.multi_stream_samples_per_query; + FAIL_IF(performance_sample_count_ % spq == 0) && + FAIL_EXP(performance_sample_count_) && FAIL_EXP(spq) && + BAD_TEST_MSG("There is no remainder."); + } + + void LoadSamplesToRamExt( + const std::vector& samples) override { + FAIL_IF(loaded_samples_.size() != samples.size()) && + FAIL_MSG("Contiguous sample order is likely ambiguous."); + for (size_t i = 0; i < samples.size(); i++) { + auto& offset = first_qsl_offsets_.at(samples.at(i)); + // Samples may be loaded into multiple slots for paddign purposes, + // so make sure to only index the first time a sample appears in a + // loaded set. + if (offset == kBadQslOffset) { + offset = i; + } + } + } + + void UnloadSamplesFromRamExt( + const std::vector& samples) override { + FAIL_IF(!loaded_samples_.empty()) && + FAIL_MSG("Contiguous sample order is likely ambiguous."); + for (size_t i = 0; i < samples.size(); i++) { + first_qsl_offsets_.at(samples.at(i)) = kBadQslOffset; + } + } + + void IssueQueryExt(const std::vector& samples) override { + size_t expected_offset = first_qsl_offsets_[samples[0].index]; + for (auto s : samples) { + FAIL_IF(loaded_samples_[expected_offset] != s.index) && + FAIL_MSG("Samples are not contiguous."); + expected_offset++; + } + } + + void FlushQueriesExt() override {} + + void EndTest() override {} + + private: + static const size_t kBadQslOffset; + std::vector first_qsl_offsets_; +}; + +constexpr size_t TestMultiStreamContiguousRemainderQuery::kBadQslOffset = + std::numeric_limits::max(); + +REGISTER_TEST(MultiStream_RemainderQueryContiguous, + TestProxy(), + mlperf::TestScenario::MultiStream); +REGISTER_TEST(MultiStreamFree_RemainderQueryContiguous, + TestProxy(), + mlperf::TestScenario::MultiStreamFree); + +} // namespace unit_tests diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test.h b/benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test.h new file mode 100644 index 0000000..433ff35 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test.h @@ -0,0 +1,199 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief A minimal test framework. + +#ifndef MLPERF_LOADGEN_TESTS_LOADGEN_TEST_H_ +#define MLPERF_LOADGEN_TESTS_LOADGEN_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#define REGISTER_TEST(name, ...) \ + static Test::StaticRegistrant test##name(#name, __VA_ARGS__); + +#define REGISTER_TEST_SCENARIO(name, scenario, test, ...) \ + static Test::StaticRegistrant t##name##scenario( \ + #name "_" #scenario, test, __VA_ARGS__, mlperf::TestScenario::scenario) + +#define REGISTER_TEST_ALL_SCENARIOS(name, test, ...) \ + REGISTER_TEST_SCENARIO(name, SingleStream, test, __VA_ARGS__); \ + REGISTER_TEST_SCENARIO(name, MultiStream, test, __VA_ARGS__); \ + REGISTER_TEST_SCENARIO(name, MultiStreamFree, test, __VA_ARGS__); \ + REGISTER_TEST_SCENARIO(name, Server, test, __VA_ARGS__); \ + REGISTER_TEST_SCENARIO(name, Offline, test, __VA_ARGS__); + +#define FAIL_IF(exp) \ + [&]() { \ + const bool v = exp; \ + if (v) { \ + std::cerr << "\n ERROR: (" << __FILE__ << "@" << __LINE__ \ + << ") : " #exp; \ + Test::AddFailure(); \ + } \ + return v; \ + }() + +#define FAIL_MSG(...) \ + [&]() { \ + std::cerr << "\n Info: (" << __FILE__ << "@" << __LINE__ << ") : "; \ + Test::Log(__VA_ARGS__); \ + return true; \ + }() + +#define FAIL_EXP(exp) \ + [&]() { \ + std::cerr << "\n Info: (" << __FILE__ << "@" << __LINE__ << ") : "; \ + std::cerr << #exp << " is " << (exp); \ + return true; \ + }() + +#define BAD_TEST_MSG(...) \ + [&]() { \ + FAIL_MSG("The test isn't testing what it claims to test. "); \ + Test::Log(__VA_ARGS__); \ + return true; \ + }() + +#define ABORT_TEST \ + [&]() { \ + FAIL_MSG("ABORTING"); \ + throw std::logic_error("ABORT_TEST encountered."); \ + return false; \ + }(); + +/// \brief Testing utilities. +namespace testing { + +/// \brief Wraps a test class as a functor for easy registration. +/// Forwards registration args to a SetUpTest method. +/// \details Calls SetUpTest, RunTest, and EndTest. +template +struct TestProxy { + template + void operator()(Args&&... args) { + TestT test; + test.SetUpTest(std::forward(args)...); + test.RunTest(); + test.EndTest(); + } +}; + +/// \brief A collection of methods for registering and running tests. +class Test { + /// \brief Maps registered test names to a callback. + using TestMap = std::multimap>; + + /// \brief The registered tests. + /// \details Wraps a static local to avoid undefined initialization order + /// and guarantee it is initialized before the first test registers itself. + static TestMap& tests() { + static TestMap tests_; + return tests_; + } + + /// \brief The number of errors the current test has encountered. + static size_t& test_fails() { + static size_t test_fails_ = 0; + return test_fails_; + } + + public: + /// \brief Registers a test before main() starts during static initialization. + struct StaticRegistrant { + template + StaticRegistrant(Args&&... args) { + Test::Register(std::forward(args)...); + } + }; + + /// \brief Registers a test at runtime. + template + static void Register(const char* name, TestF test, Args&&... args) { + std::function test_closure = + std::bind(test, std::forward(args)...); + tests().insert({std::move(name), std::move(test_closure)}); + } + + /// \brief Runs all currently registered tests that match the given filter. + static int Run(std::function filter) { + // Determine which tests are enabled. + std::vector enabled_tests; + for (auto& test : tests()) { + if (filter(test.first)) { + enabled_tests.push_back(&test); + } + } + const size_t enabled = enabled_tests.size(); + std::cout << enabled << " of " << tests().size() << " tests enabled.\n"; + + // Run the tests. + std::vector failures; + for (size_t i = 0; i < enabled; i++) { + const char* name = enabled_tests[i]->first; + std::cout << "[" << (i + 1) << "/" << enabled << "] : " << name << " : "; + std::cout.flush(); + test_fails() = 0; + try { + enabled_tests[i]->second(); // Run the test. + } catch (std::exception& e) { + constexpr bool TestThrewException = true; + FAIL_IF(TestThrewException) && FAIL_EXP(e.what()); + } + if (test_fails() > 0) { + failures.push_back(name); + std::cerr << "\n FAILED: " << name << "\n"; + } else { + std::cout << "SUCCESS\n"; + } + } + + // Summarize. + if (enabled_tests.empty()) { + std::cerr << "Check your test filter.\n"; + } else if (failures.empty()) { + std::cout << "All " << enabled << " tests passed! \\o/\n"; + } else { + std::cout << failures.size() << " of " << enabled << " tests failed:\n"; + for (auto failed_test_name : failures) { + std::cout << " " << failed_test_name << "\n"; + } + } + return failures.size(); + } + + /// \brief Used by test macros to flag test failure. + static void AddFailure() { test_fails()++; } + + /// \brief Base case for the variadic version of Log. + static void Log() {} + + /// \brief Used by test macros to log an arbitrary list of args. + template + static void Log(T&& v, Args&&... args) { + std::cerr << v; + Log(std::forward(args)...); + } +}; + +} // namespace testing + +// The testing namespace exists for documentation purposes. +// Export the testing namespace for all files that define tests. +using namespace testing; + +#endif // MLPERF_LOADGEN_TESTS_LOADGEN_TEST_H_ diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test_main.cc b/benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test_main.cc new file mode 100644 index 0000000..3dc5afa --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/loadgen_test_main.cc @@ -0,0 +1,33 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief A main entry point a test binary can use if it just wants to execute +/// Test::Run on all statically registered tests. + +#include + +#include "loadgen_test.h" + +int main(int argc, char* argv[]) { + if (argc <= 1) { + std::cerr << "Usage: " << argv[0] << " \n"; + return -1; + } + std::regex include_regex(argc >= 2 ? argv[1] : ".*"); + std::regex exclude_regex(argc >= 3 ? std::regex(argv[2]) : std::regex()); + auto test_filter = [&](const char* test_name) { + return (std::regex_search(test_name, include_regex) && + !std::regex_search(test_name, exclude_regex)); + }; + return Test::Run(test_filter); +} diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.cc b/benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.cc new file mode 100644 index 0000000..3007226 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.cc @@ -0,0 +1,236 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Performance tests using a null backend. + +#include + +#include "../loadgen.h" +#include "../query_sample_library.h" +#include "../system_under_test.h" +#include "../test_settings.h" + +/// \brief Performance unit tests. +namespace perf_tests { + +/// \defgroup LoadgenTestsPerformance Test Coverage: Performance + +/// \brief A simple SUT implemenatation that immediately completes +/// issued queries sychronously ASAP. +class SystemUnderTestNull : public mlperf::SystemUnderTest { + public: + SystemUnderTestNull() = default; + ~SystemUnderTestNull() override = default; + const std::string& Name() const override { return name_; } + void IssueQuery(const std::vector& samples) override { + std::vector responses; + responses.reserve(samples.size()); + for (auto s : samples) { + responses.push_back({s.id, 0, 0}); + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + } + + void FlushQueries() override {} + void ReportLatencyResults( + const std::vector& latencies_ns) override {} + + private: + std::string name_{"NullSUT"}; +}; + +/// \brief A stub implementation of QuerySampleLibrary. +class QuerySampleLibraryNull : public mlperf::QuerySampleLibrary { + public: + QuerySampleLibraryNull() = default; + ~QuerySampleLibraryNull() = default; + const std::string& Name() const override { return name_; } + + size_t TotalSampleCount() override { return 1024 * 1024; } + + size_t PerformanceSampleCount() override { return 1024; } + + void LoadSamplesToRam( + const std::vector& samples) override { + return; + } + + void UnloadSamplesFromRam( + const std::vector& samples) override { + return; + } + + private: + std::string name_{"NullQSL"}; +}; + +/// \brief Runs single stream traffic. +/// \ingroup LoadgenTestsPerformance +void TestSingleStream() { + SystemUnderTestNull null_sut; + QuerySampleLibraryNull null_qsl; + + mlperf::LogSettings log_settings; + log_settings.log_output.prefix_with_datetime = true; + + mlperf::TestSettings ts; + + mlperf::StartTest(&null_sut, &null_qsl, ts, log_settings); +} + +/// \brief A SUT implementation that completes queries asynchronously using +/// std::async. +class SystemUnderTestNullStdAsync : public mlperf::SystemUnderTest { + public: + SystemUnderTestNullStdAsync() { futures_.reserve(1000000); } + ~SystemUnderTestNullStdAsync() override = default; + const std::string& Name() const override { return name_; } + void IssueQuery(const std::vector& samples) override { + futures_.emplace_back(std::async(std::launch::async, [samples] { + std::vector responses; + responses.reserve(samples.size()); + for (auto s : samples) { + responses.push_back({s.id, 0, 0}); + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + })); + } + + void FlushQueries() override {} + void ReportLatencyResults( + const std::vector& latencies_ns) override {} + + private: + std::string name_{"NullStdAsync"}; + std::vector> futures_; +}; + +/// \brief Tests server traffic using SystemUnderTestNullStdAsync. +/// \ingroup LoadgenTestsPerformance +void TestServerStdAsync() { + SystemUnderTestNullStdAsync null_std_async_sut; + QuerySampleLibraryNull null_qsl; + + mlperf::LogSettings log_settings; + log_settings.log_output.prefix_with_datetime = true; + log_settings.log_output.copy_summary_to_stdout = true; + + mlperf::TestSettings ts; + ts.scenario = mlperf::TestScenario::Server; + ts.server_target_qps = 2000000; + ts.min_duration_ms = 100; + + mlperf::StartTest(&null_std_async_sut, &null_qsl, ts, log_settings); +} + +/// \brief A SUT implementation that completes queries asynchronously using +/// an explicitly managed thread pool. +class SystemUnderTestNullPool : public mlperf::SystemUnderTest { + public: + SystemUnderTestNullPool() { + samples_.reserve(kReserveSampleSize); + next_poll_time_ = std::chrono::high_resolution_clock::now() + poll_period_; + for (size_t i = 0; i < thread_count_; i++) { + threads_.emplace_back(&SystemUnderTestNullPool::WorkerThread, this); + } + } + + ~SystemUnderTestNullPool() override { + { + std::unique_lock lock(mutex_); + keep_workers_alive_ = false; + } + cv_.notify_all(); + for (auto& thread : threads_) { + thread.join(); + } + } + + const std::string& Name() const override { return name_; } + + void IssueQuery(const std::vector& samples) override { + std::unique_lock lock(mutex_); + samples_.insert(samples_.end(), samples.begin(), samples.end()); + } + + void FlushQueries() override {} + void ReportLatencyResults( + const std::vector& latencies_ns) override {} + + private: + void WorkerThread() { + std::vector my_samples; + my_samples.reserve(kReserveSampleSize); + std::unique_lock lock(mutex_); + while (keep_workers_alive_) { + next_poll_time_ += poll_period_; + auto my_wakeup_time = next_poll_time_; + cv_.wait_until(lock, my_wakeup_time, + [&]() { return !keep_workers_alive_; }); + my_samples.swap(samples_); + lock.unlock(); + + std::vector responses; + responses.reserve(my_samples.size()); + for (auto s : my_samples) { + responses.push_back({s.id, 0, 0}); + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + + lock.lock(); + my_samples.clear(); + } + } + + static constexpr size_t kReserveSampleSize = 1024 * 1024; + const std::string name_{"NullPool"}; + const size_t thread_count_ = 4; + const std::chrono::milliseconds poll_period_{1}; + std::chrono::high_resolution_clock::time_point next_poll_time_; + + std::mutex mutex_; + std::condition_variable cv_; + bool keep_workers_alive_ = true; + std::vector threads_; + + std::vector samples_; +}; + +/// \brief Tests server traffic using SystemUnderTestNullPool. +/// \ingroup LoadgenTestsPerformance +void TestServerPool() { + SystemUnderTestNullPool null_pool; + QuerySampleLibraryNull null_qsl; + + mlperf::LogSettings log_settings; + log_settings.log_output.prefix_with_datetime = true; + log_settings.log_output.copy_summary_to_stdout = true; + + mlperf::TestSettings ts; + ts.scenario = mlperf::TestScenario::Server; + ts.server_target_qps = 2000000; + ts.min_duration_ms = 100; + + mlperf::StartTest(&null_pool, &null_qsl, ts, log_settings); +} + +/// @} + +} // namespace perf_tests + +int main(int argc, char* argv[]) { + perf_tests::TestSingleStream(); + perf_tests::TestServerStdAsync(); + perf_tests::TestServerPool(); + return 0; +} diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.py b/benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.py new file mode 100644 index 0000000..457a2d9 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tests/perftests_null_sut.py @@ -0,0 +1,71 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Python version of perftests_null_sut.cc. +""" + +from __future__ import print_function +from absl import app +import mlperf_loadgen +import numpy + + +def load_samples_to_ram(query_samples): + del query_samples + return + + +def unload_samples_from_ram(query_samples): + del query_samples + return + + +def issue_query(query_samples): + responses = [] + for s in query_samples: + responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0)) + mlperf_loadgen.QuerySamplesComplete(responses) + + +def flush_queries(): + pass + + +def process_latencies(latencies_ns): + print("Average latency: ") + print(numpy.mean(latencies_ns)) + print("Median latency: ") + print(numpy.percentile(latencies_ns, 50)) + print("90 percentile latency: ") + print(numpy.percentile(latencies_ns, 90)) + + +def main(argv): + del argv + settings = mlperf_loadgen.TestSettings() + settings.scenario = mlperf_loadgen.TestScenario.SingleStream + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + + sut = mlperf_loadgen.ConstructSUT( + issue_query, flush_queries, process_latencies) + qsl = mlperf_loadgen.ConstructQSL( + 1024 * 1024, 1024, load_samples_to_ram, unload_samples_from_ram) + mlperf_loadgen.StartTest(sut, qsl, settings) + mlperf_loadgen.DestroyQSL(qsl) + mlperf_loadgen.DestroySUT(sut) + + +if __name__ == "__main__": + app.run(main) diff --git a/benchmarks/rnnt/ootb/inference/loadgen/tools/mlperf-trace.ipynb b/benchmarks/rnnt/ootb/inference/loadgen/tools/mlperf-trace.ipynb new file mode 100644 index 0000000..ab834d1 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/tools/mlperf-trace.ipynb @@ -0,0 +1,441 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tool to extract usefull information from mlperf trace" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "# Ignore warnings\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "import json\n", + "import os\n", + "import seaborn as sns\n", + "from operator import itemgetter\n", + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "figsize=(10, 5)\n", + "font=10\n", + "\n", + "plt.figure(dpi=600)\n", + "plt.rc('xtick', labelsize=font) \n", + "plt.rc('font', size=font)\n", + "sns.set(font_scale=1.4, style=\"whitegrid\");" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def trace_to_df(fname):\n", + " with open(fname, \"r\") as f:\n", + " j = json.load(f)\n", + " if type(j) == dict:\n", + " j = j['traceEvents']\n", + " \n", + " result = []\n", + " for item in j:\n", + " name = item['name']\n", + " if name not in [\"Latency\", \"Sample\", \"QuerySamplesComplete\", \"IssueQuery\"]:\n", + " continue\n", + "\n", + " args = item.get('args')\n", + " d = {\"ts\": item['ts'], \"name\": name, \"dur\": item.get(\"dur\")}\n", + "\n", + " if name == \"Latency\":\n", + " d[\"issue_delay\"] = args[\"issue_delay\"]\n", + " d[\"issue_to_done\"] = args[\"issue_to_done\"] / 1e3\n", + " result.append(d)\n", + " elif name == \"Sample\":\n", + " if args:\n", + " d[\"issue_start_ns\"] = args[\"issue_start_ns\"]\n", + " d[\"complete_ns\"] = args[\"complete_ns\"]\n", + " d[\"issue_to_done\"] = (args[\"complete_ns\"] - args[\"issue_start_ns\"]) / 1e3\n", + " result.append(d)\n", + " elif name == \"QuerySamplesComplete\":\n", + " result.append(d)\n", + " elif name == \"IssueQuery\":\n", + " result.append(d)\n", + "\n", + " df = pd.DataFrame(result)\n", + " df = df.sort_values(by=[\"ts\"])\n", + " return df\n", + "\n", + "BINS = 10" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
tsdurissue_delayissue_to_doneissue_start_nscomplete_ns
count2.000000e+0410000.0000005.000000e+0310000.0000005.000000e+035.000000e+03
mean4.894584e+0717.7316827.001508e+046112.5544917.001508e+046.182570e+06
std2.839099e+0725.5786399.666462e+042254.0772359.666462e+042.263719e+06
min4.102560e+031.1520008.810000e+022754.9670008.810000e+022.780383e+06
25%2.463025e+073.9747505.806250e+044100.4730005.806250e+044.166623e+06
50%4.881766e+077.3640006.159800e+046089.8800006.159800e+046.155939e+06
75%7.373552e+0727.4410006.835175e+047337.2570006.835175e+047.408272e+06
max9.832065e+07508.5520006.522433e+0622234.1010006.522433e+062.414005e+07
\n", + "
" + ], + "text/plain": [ + " ts dur issue_delay issue_to_done \\\n", + "count 2.000000e+04 10000.000000 5.000000e+03 10000.000000 \n", + "mean 4.894584e+07 17.731682 7.001508e+04 6112.554491 \n", + "std 2.839099e+07 25.578639 9.666462e+04 2254.077235 \n", + "min 4.102560e+03 1.152000 8.810000e+02 2754.967000 \n", + "25% 2.463025e+07 3.974750 5.806250e+04 4100.473000 \n", + "50% 4.881766e+07 7.364000 6.159800e+04 6089.880000 \n", + "75% 7.373552e+07 27.441000 6.835175e+04 7337.257000 \n", + "max 9.832065e+07 508.552000 6.522433e+06 22234.101000 \n", + "\n", + " issue_start_ns complete_ns \n", + "count 5.000000e+03 5.000000e+03 \n", + "mean 7.001508e+04 6.182570e+06 \n", + "std 9.666462e+04 2.263719e+06 \n", + "min 8.810000e+02 2.780383e+06 \n", + "25% 5.806250e+04 4.166623e+06 \n", + "50% 6.159800e+04 6.155939e+06 \n", + "75% 6.835175e+04 7.408272e+06 \n", + "max 6.522433e+06 2.414005e+07 " + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df = trace_to_df('/tmp/mlperf_log_trace.json')\n", + "df.describe()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoIAAAFKCAYAAACJoz5RAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAAstklEQVR4nO3deZxcVZ338Q8ECIQASVgNyjIIP8AoPkRGUfRBkSXgOiIMjAsoKIroIAgzKItsowODKCIoI4IzqICODgjigiwqKtCAEiA/MLI8EEFIAoJgB0ieP84tUhSVdHelu6vS9/N+vfp1U/eeunWqTi/fnHvOuSssWrQISZIk1c+K3a6AJEmSusMgKEmSVFMGQUmSpJoyCEqSJNWUQVCSJKmmDIKSJEk1tVK3KyBJdRIRmwB3A/tn5nndrc3z9ULdIuJdwDeAjTLz0W7UYbAiYhpwC/DKzJzZ5epIHTEISmNIROxH+SO6fWb+psvVGZSIWAc4Eng7sBHwJHAD8MXMvLybdRurIuKjwJM9GETHAScAZ/V6CATIzJkRcTlwPPAP3a6P1AkvDUvqmogISo/KIcCVwMeAfwPWAy6LiM91r3Zj2keB/drsvxdYDfivUa3NYm8BtgK+2qXX78TZwDsj4qXdrojUCXsEJXVFRKwMfBeYDLwhM69vOnYacAFwZET0ZebFo1y31TPzr6P5mp2qetFWysz+ZT1XZi4C/rbsterYB4AbM/OPXazDUP0UmE8J1p/pblWkoTMISmNcRKwPnATsSulpexS4CTg8M2+rymwLnAhsB6wBPARcC3woM5+KiB2Bq4A3ZubVTefehDZjyiJii+p8OwGrA3cAJ2Xmd5uq9i5gGnBMcwgEyMxnI+LDVZ0/C1xcnXfY69F0OX0n4B3A3sB61XPvBA7LzNNaPtNXAL8DPpqZZ7EEETEJOB14J7AI+F/gC23KXV297x1b9p8H7JiZm7S8z38FngA+AWwKvBm4OiIOq15rS2AicBflEvt/Np3zHmDj6t+Ne4zem5mbLOVz3IbSU7sDMI5y6f7ozPxFU5n9KJ/jjpTL/O8FJgA/oXwfPbykz6l6/qrAbsB/tDm2CPhsZh7Xsv8e4OrM3K96vBLwL8D7gJcAT1Wfwecz83+anjeY708iYi1KuHsXsCHwCHAN8KnMfAAgM5+u2u+dGAS1HPLSsDT2fRfYEzifckmwEUS2AIiIdSm9GpsB/065PHseJaStPtQXi4itgN8CL6/OdxgwF7g4It7TVPSt1fab7c6TmY9RgtNWEbHZCNaj4QxgW0poPiYz7wJ+DbQr+x5gAXDhUl5/har+76X0bn4GmEpph2X1XuBwSvA6FPhTtf9QYCZlzNqnKIH+nIg4qOm5/wzcD8yqzvPeat+S3sdWwC+A/wOcAhxXvY+fRcQb2jzldGAbSoA/i9LOXx7Ee5oOrALcOIiyS3Is5b1fA3y8+vcs4O8bBQb7fRERq1fnORT4OSV0f4USolsvA/dRvk8nL0Pdpa6wR1Aaw6oeqR0oPRinNh1qHnv3WmAKsGtmNv8RPrbDl/0iMAd4VWY+Ve07MyJ+AnwuIi6oLkFuDTyWmfcu5Vy/q7ZbA7NHqB4NT1B6355p2vdN4KyI2DozbweIiBWBfYDLMnPeUl7/bcAbgCMz89+r554F/GyI76OdjYHNM/NPLfu3yMwnmx6fUb3fwylj2cjMH0TEicAjmfnfg3itk4BVgelVOCYivkEJWKcBr2opPxfYufHZVp/XxyNirSrcL8mW1XZZLgu/Bbg8Mw9cSpnBfl98ihJo92oZmnBSFfKb/RFYgTK+8bplqL806uwRlMa2pyg9VztGxJQllGn8cX5LNW6vY9VrvBm4CFg9ItZpfAFXUC6vbVEVXwN4fIBTNo6vMYL1aDinJQRC6fHrp/SaNewIvJiBJ1TsDiyk9IoB5ZI3cOZQ3ssS/KBNCKQRAiNi5YiYUr3fq4DNqsucQ1KNP9wVuLQRAqvXeYTSazy9GnrQ7OstAfsXlMvJGw/wcmtX2/lDrWeTx4CXVZd+X2CI3xd7Are1G5/a8v6a67zOMtRd6gqDoDSGVRMIjqSMvXooIn4ZEUdFxEuail1DuXx8LDA3Ii6NiAOrS2ND9VJKz8hxwMMtX42xX+tV28cZOOA1jv95BOvR8IIex8ycD1wC7NvUC/QeYB5w2QB12Bh4MDNbw+6dg3sLS9W2dzQi3h4RN1L+AzCX8n5Prg4POQgC61LG+WWbY3dU201a9t/X8rgRkgZ72bS1t20ojqG8z4yI2yLitIho7rEcyvfFZpTL7EOpc2tAlHqeQVAa4zLzdGBzyqWux4CjgTuqiRdk5qLMfDfwasr4rnWArwG3RkTjj+KS/sCNa3nc+J3yBWDnJXw1/rjeDqwVERstpfqvqLaNy4UjUY+Gp2jvm5T1Dd9QTWh4F3BRZi5YSr2HarDvq+EFdY2IHYDvU9ZhPAjYg/I+G2NCR+v3/bNL2D9QwHuk2g5lnN3zPp/MvJYS4N4P3EyZNHJ9RBxRFenk+2IwGnV+ZKmlpB7kGEGpBjLzbkrIOz0iXkxZu+/TwNVNZa4HrgeOiYgZwOXAgZQxYo1enUktp2693NcIbM9k5kBj4S4F9qX8sT6x9WBErEmZfXpT03IiI1GPgVxB6ZF8L7A+sCaDW2fvXmDniFijpVew3WXL+cDftdk/0OXUZntSln7ZJTOfWwImIt7Ypuxge64epgTLaHOsMabvniHUcWkaPYybUkJcs/m0tHlErAK8qPUkVS/uN4FvRsRqlO/jz0bEfzC074vZlAlTg7Ep5TOdNcjyUs+wR1AawyJiQvXH8DmZeT8l2EyqykxuM/j9pmo7qdreS+npaZ0l+tGWc/+ZMibtwIjYsE191m16+D3gNuBfWi7fNcamnUXpaTmp6dBI1GOpqnGDF1CC1geBP2TmYCYEXE75HfuRptddETi4TdnZwJbN9aqWbHndYOtJ+VwW0fR7vZrF+oE2Zf/KIHreqjGNVwBvbZ65XY21ez9lzb+HhlDHpemjBNnWySdQPp/WNv8QLT2CEbF28+NqMsgsymSX1Yb4ffFdynjDd7cp1/rzMh2YVYVQablij6A0tm0B/DwiLqaErn7KJIatKDNJofxBPzgivk/5g7sasD8lWHwXylIu1TkOqdZ0m02Zodk6zg5K8PkV8PuIOKcqux7l0vPWVEtvVOuvvYuyNMcvI+JcShiYTOkp/D/Aic3rv41EPQbpm5RlRHahjC8bjEur1/+3an2+2yjrFLabtHMu8EngxxHx9aqeB1XPWXMIr/dJ4KcR8V/V6xwIPAhs0FL2RuCjEXEsZcziE5l56RLO+xnK+/5lRJxJCWsHUv6TsOcg6zagzFwQEVdQLs8e1XL4P4GzI+J7lKWOtqFMYmm9FHtHRFxLWefwkarcAcAPM/OJqsxgvy9OoQwD+HZE7EL53pwEzKCMRbwGnlsY/f+yfN0NRXqOPYLS2Pb/KL1Zr6f0rJ1CmRn5wcxsDI6/hnJJeC/K0hpHUcLDmzLzt03nOoSyLt5BlEu591FC5PNkZlJ6dS6hXPY9k9JjtxJlfGJr2W0o68ztTFmn7RRKCHx/Zj6v/EjVYyCZeQvw++rhYJZcITMXUpaQuQD4J8rn/6cl1PWOqo5rUZZkeRvlUvRNrWWX8npXV+eeQhkG8AHK2ohfalP8eBYHx29V5ZZ03jsoSxDdTJl49FnK98ebqzF5w+lc4FURsWnL/nOAz1N6Bf+Dcil2Z0rPZrPTKTO6j6R8T+1GWSppn0aBwX5fVHeWeUN1fDfK5/gxyhqMz82gpsxCnkKZRS0td1ZYtMhJTpJ6R0S8nLLkyL2UW88tbe25URMRNwALMnMol2s1BNWl85mU5WqO7HZ9BiMiLgEWZuY7ul0XqRP2CErqKZl5K2WSSADfryYFdFVEvJLSizQcdwXRElS9qEcDH6kWQ+9pETGNMtTCW8tpuWWPoCQtQfWHfjplfOCLgE1b7twhScs1ewQlacn2pNzPdzXgHw2BksYaewQlSZJqyuVjOtDX1zce2I4yA3BJq+hLkiT1gnGU4S03TJ8+vb/5gEGwM9tRZjVKkiQtL14P/LJ5h0GwM38C2GKLLVhlleGd0Dhz5kymTRvsXY00GmyT3mJ79Bbbo/fYJr2lF9pjwYIF3HnnnVDll2YGwc48C7DKKqswfvz4YT/5SJxTy8Y26S22R2+xPXqPbdJbeqg9XjCczVnDkiRJNWUQlCRJqimDoCRJUk0ZBCVJkmrKIChJklRTBkFJkqSaMghKkiTVlEFQkiSppgyCkiRJNeWdRXrY439dwJP9z3S7GsNiwviVWGP14b0dnyRJWjYGwR72ZP8zXHnDfd2uxrDYabuNDIKSJPUYLw1LkiTVlEFQkiSppgyCkiRJNWUQlCRJqimDoCRJUk0ZBCVJkmrKIChJklRTBkFJkqSaMghKkiTVlEFQkiSppgyCkiRJNWUQlCRJqimDoCRJUk0ZBCVJkmrKIChJklRTBkFJkqSaMghKkiTVlEFQkiSppgyCkiRJNWUQlCRJqimDoCRJUk0ZBCVJkmrKIChJklRTBkFJkqSaMghKkiTVlEFQkiSppgyCkiRJNWUQlCRJqimDoCRJUk0ZBCVJkmrKIChJklRTBkFJkqSaMghKkiTVlEFQkiSpplbqdgUaImIiMAvYENguM29sOvY+4ChgE2A2cHxmXtjy/JWB44H3A5OAG4BPZOYtLeU2AL4I7AYsAn4I/HNmPjIS70uSJKlX9VKP4HG0CaYRsSdwPvB9YAbwM+DbETGjpegXgIOBY4G3AwuAKyNiatO5VgKuAF4OvA84AHgtcElErDDM70eSJKmn9USPYERMAw4CPgl8teXwCcDFmfmv1eOrImIr4LPAj6rnb1g9/+OZeU617zfA3cA/A0dUz30XsA0wLTNvq8rNAX5FCZmXj8T7kyRJ6kW90iN4JvBl4M7mnRGxKbAl8J2W8t8CtouIdavHuwDjgOcuF2fm45TLvrs3PW934NZGCKzKXQfc21JOkiRpzOt6EIyI9wIvBU5sc3irant7y/5GkIumcg9l5tw25baIiBWbyrWeq1Fuy6HUW5IkaXnX1SAYEWsBpwBHZOYTbYpMrraPtuyfX22nNJVrLdMotzIwcRDlprTZL0mSNGZ1e4zgicBdmXlBl+vRkZkzZ47Iefv6+gBYbc11mTNnzoi8xmibO3cC99/9cLer0bFGm6g32B69xfboPbZJb+nl9uhaEIyIl1EmeOwcEZOq3Y2eu4kRsQaLe/4mAQ82Pb3RUziv2s6vyrSaDDwNPDGIcvPa7F+qadOmMX78+KE+ban6+vqYPn06AA/Ne5KpU58c1vN3y9prr8P6m2/U7Wp0pLlN1H22R2+xPXqPbdJbeqE9+vv7l9h51c1Lw5tTguhVlIA2H7i0OnYV8AvgjurxVi3P3braZrW9A1gvIlov724N3JmZC5vKtZ6rUW5WB+9BkiRpudXNIPhL4I0tX4dWxw4CDsjMuykBbe+W5+4D3JCZjWuNPwEWAns1ClQLVL+V5y8Jcznw8mr5mUa511AWqnbpGEmSVCtduzRc3cnj6uZ9EY1JwPQ13VnkGODCiJgN/JSyWPQuwB5N53ogIs4GPh8Rz1CWgzkcWAE4veklvgf8HvhuRPwr5f2fAvyaak1CSZKkuuj68jEDycyLgf2BPYEfA7sC+2Zma3A7FDiLMgHlEmA14M2ZOafpXM9Qbi03E/hv4BvAb4C3ZeaiEX4rkiRJPaXbs4afJzOvpvTite4/n3KbuaU992ngX6qvpZV7kBdeapYkSaqdnu8RlCRJ0sgwCEqSJNWUQVCSJKmmDIKSJEk1ZRCUJEmqKYOgJElSTRkEJUmSasogKEmSVFMGQUmSpJoyCEqSJNWUQVCSJKmmDIKSJEk1ZRCUJEmqKYOgJElSTRkEJUmSasogKEmSVFMGQUmSpJoyCEqSJNWUQVCSJKmmDIKSJEk1ZRCUJEmqKYOgJElSTRkEJUmSasogKEmSVFMGQUmSpJoyCEqSJNWUQVCSJKmmDIKSJEk1ZRCUJEmqKYOgJElSTRkEJUmSasogKEmSVFMGQUmSpJoyCEqSJNWUQVCSJKmmhhwEI2LXiFhhJCojSZKk0dNJj+CPgPsj4pSI2Ga4KyRJkqTR0UkQfAfwK+Bg4KaI+H1EHB4RU4e1ZpIkSRpRQw6CmXlJZu4FrA8cCDwMfA64NyJ+EhHviYgJw1xPSZIkDbOOJ4tk5uOZeW5m7gRsDBwFrAecDzwUEd+MiJ2GqZ6SJEkaZsM1a3gcsDIwHlgBeAp4M/DTiLg5IqYN0+tIkiRpmKzU6RMjYi1gL+A9wOuAZ4DLgH+ptguBtwFfAL4BbLeslZUkSdLwGXIQjIh3UMLf7sCqwA3AJ4BvZ+a8luI/iIh1gK+0Oc8/AJ8EtgQmAg8A3wdOyMzHmsrNAE4Ctq7KnJ6ZZ7Q53+GUCSwbALcBR2bmlS1l1gBOAfas6n4VcEhm3jOkD0GSJGkM6OTS8P8Arwa+CGydma/OzDPbhMCG3wMXtNk/BbgW+BCwW3W+DwAXNwpExPbAJcDNwAxKz+LpEXFQ84mqEHgycCawB3AXcFmb5W2+TemlPATYG5gKXOnkFkmSVEedXBreBbgyMxcNpnBmXg9c32b/f7bsujoi/gZ8NSKmZuYc4Bjgpsz8YFXmqojYCDg2Ir6WmQsjYjzwGUpP4akAEXENcCvwacrlayLi1ZSQuEdmXl7tuxWYDexHm15LSZKksayT5WN+NtgQ2IFHqu0qVcB7E3BhS5lvUS7/bls9fi2wFvCdpjo+C1wEzGi6C8ruwGPAFU3l7qOsibj78L4NSZKk3tfJLea+EBF3LeX4nRFxyhDONy4iVo2I6ZQewEuqMXubAasAt7c85bZqu2W13ara3tGm3ERgw6ZyszJzYZtyWyJJklQznVwa3oMX9tI1uxB4N/CpQZ5vLqVHD0pv3b7VvydX20dbys+vtlOayvVn5lNLKXd/Va71XI1yU9rsH9DMmTM7edqA+vr6AFhtzXWZM2fOiLzGaJs7dwL33/1wt6vRsUabqDfYHr3F9ug9tklv6eX26CQIvgS4ZynH763KDNaOwARgGmWs36URsXMH9Rp106ZNY/z48cN6zr6+PqZPnw7AQ/OeZOrUJ4f1/N2y9trrsP7mG3W7Gh1pbhN1n+3RW2yP3mOb9JZeaI/+/v4ldl51EgT/Amy6lON/R1lQelAy85bqn9dFRB9wI/BOFl8SntTylEZPYWOW8nxgfESsmpl/G6BcuyQyuamMJElSbXSyfMzPgQ9Xs3efJyI2AT5clenELZSFqF9Kmc27gMVjABu2rrazqm1jbGC7co9T1h5slIumySPN5WYhSZJUM50EwWMoPYkzI+KLEfGh6utLlDUDVwSO7rA+21fP/2Nm9lMC5V4tZfYBHgRuqh5fR5kNvHejQESMq553RdMM58spvYu7NpV7CbBDdUySJKlWhnxpODPviojXURZvPqTl8DWUO3XkQOeJiB8DV1Jm7f4NeCVlgsnvgR9UxY4Hro2IcyiLUr8OOBA4uDH7NzP7I+JE4OSIeJgSEA+gzDpuTDwhM38bEZcBX4+IwyiXuI8H7gPOG9qnIEmStPzr6F7DmXkbsGN1+7i/q3bPzsy5QzjN9ZRb1TXGG94DnA2clpkLqtf5dUS8nXLXkPcBc4BDM/PslvqcGhEAHwfWp4TLPTLzdy2vuQ9wKmXx6PGUW8y9OzPHxowMSZKkIegoCDZk5iMsXgR6qM89mkFcQq7uAjLgpdvqriKnDlDmccoYxg8PspqSJEljVkdBsBqDtyulN3Ay0DoBY1FmnrCMdZMkSdIIGnIQjIhXAd8DXswLA2DDIsAgKEmS1MM66RH8CrAa8A7gF5n56HBWSJIkSaOjkyD4CuDTmXnpcFdGkiRJo6eTdQTvZ8mXhCVJkrSc6CQIfg44MCLWHO7KSJIkafR0cml4CvBX4A8R8V3g/wHPtpRZlJmnLGvlJEmSNHI6CYKfa/r3QUsoswgwCEqSJPWwToLgpgMXkSRJUq/r5F7D945ERSRJkjS6Or7FXERsDuwIrAdckJn3RMQqwAbAg437BUuSJKk3dXJnkRWBs4EPUpaRWQT8GrgHWAW4FTge+I9hq6UkSZKGXSfLxxwFfAA4GtiepjUFM/MJyu3n/mFYaidJkqQR00kQ3B84NzNPBv7Q5vitwObLVCtJkiSNuE6C4IuB65dy/Clgjc6qI0mSpNHSSRB8ENh4KcenA84sliRJ6nGdBMHvAR+pZg03LAKIiBnA+4CLhqFukiRJGkGdBMHjgPuAm4ELKCHwqIj4DfBD4HfAvw1XBSVJkjQyhhwEM/MvwGuBk4H1gb8BOwATKSHxDZn51DDWUZIkSSOgowWlM/NvlCB48vBWR5IkSaOlk0vDkiRJGgM6ubPIuYMotigzP9hBfSRJkjRKOrk0/CaqWcJNxgEvqrYPA39dxnpJkiRphA05CGbmJu32R8TKwIeBfwZ2XqZaSZIkacQN2xjBzHw6M78M/AT48nCdV5IkSSNjJCaL/A54wwicV5IkScNoJILgzsCTI3BeSZIkDaNOZg0fs4RDkyg9gdsCn1uGOkmSJGkUdDJr+Lgl7J8PzAYOAs7ptEKSJEkaHZ3MGnYRakmSpDHAUCdJklRTnYwR3KiTF8rM+zp5niRJkkZGJ2ME7+GFdxYZjHEdPEeSJEkjpJMgeADwceAlwLeAO6v9AewD3Ad8CVg4HBWUJEnSyOgkCL4IGA+8NDPnNx+IiGOBXwEbZOa/DUP9JEmSNEI6mSxyEPC11hAIkJlzKUvHfGRZKyZJkqSR1UkQXBuYuJTjq1dlJEmS1MM6CYK/AT4REdNbD0TEq4BPAL9d1opJkiRpZHUyRvBjwNXA9RFxA3BXtX9zYDtgHnDIsNROkiRJI2bIPYKZeTvwcsrM4EnAntXXJOCLwMsz87bhq6IkSZJGQic9gmTmQ8Ch1ZckSZKWQx0FwYaI2BxYD5iZmY8N8bnvBv4JmA5MAWYDZwFfzcyFTeVmACcBWwMPAKdn5hltznc4cDCwAXAbcGRmXtlSZg3gFEoP5qrAVcAhmXnPUOouSZI0FnR0r+GI2Dci7gNmAddSwhwRsU5E3BkRew3iNIcB/cCngLcAP6Bcbv580+tsD1wC3AzMAL4BnB4RB7XU53DgZOBMYA/KuMXLImKbltf8NvA2yhjGvYGpwJURMWHQb16SJGmM6ORew+8C/hv4KXA6cGrjWGY+EhF3AO8DLhrgVG/NzIebHl8VEROBj0XEZzKzHzgGuCkzP9hUZiPg2Ij4WmYujIjxwGcoPYWnVnW8BrgV+DSwV7Xv1ZSQuEdmXl7tu5XSE7kf8JWhfhaSJEnLs056BD8N/CwzdwXOb3P8t0BrT9wLtITAhpspl2ynVAHvTcCFLWW+Rbn8u231+LXAWsB3ms79LCWIzoiIFarduwOPAVc0lbuPcieU3QeqryRJ0ljTSRDcCvj+Uo7/GVi3s+rwesryM38GNgNWAW5vKdOYkbxlU30A7mhTbiKwYVO5Wc3jD5vKbYkkSVLNdDJZ5K8s/c4imwGPDPWk1WLU+wOfzcxnI2JydejRlqKNW9tNqbaTgf7MfGop5e6vyrWeq1FuSpv9A5o5c2YnTxtQX18fAKutuS5z5swZkdcYbXPnTuD+u9t1Ai8fGm2i3mB79Bbbo/fYJr2ll9ujkyD4c2C/iPhi64GImAocCPzvUE4YERsA3wOup2mySK+bNm0a48ePH9Zz9vX1MX16uWnLQ/OeZOrUJ4f1/N2y9trrsP7mG3W7Gh1pbhN1n+3RW2yP3mOb9JZeaI/+/v4ldl51OkbwRcCNwEeBRcDuEfE5ygSNhcBnB3uyiFgL+BHwJPC2zHy6OtTo0ZvU8pRGT+G8pnLjI2LVQZRrPVej3Lw2+yVJksa0Tu4schfwOuBB4DhgBeCTwBHALcAO1SSMAVXh7RLKWoS7ZebcpsOzgQUsHgPYsHW1nVVtG2MD25V7nLL2YKNcNE0eaS43C0mSpJoZUhCMiHHV8i0PZeYuwDrAq4HtgfUzc6fMvHOQ51qJMrP3FcCMzLy3+Xi1fMzPqZZ/abIPJYTeVD2+jjIbeO/melbPuyIzF1W7L6f0CO7aVO4lwA7VMUmSpFoZ6hjBFSk9dUcCp2XmfOCGDl/7TOCtlJ7ECRHxmqZjt2fmX4DjgWsj4hzgAkpP5IHAwY3Zv5nZHxEnAidHxMOUgHgAZdLKvo0TZuZvI+Iy4OsRcRjQOP99wHkdvgdJkqTl1pB6BKvxe3Mo4wKXVaNn7t+BX7d8bVu93q+BtwPbAT+mBLxDM/PslnqdChwFfJwy3nBLysLRv2t5zX2AH1IWj76Y0rP45swcGzMyJEmShqCTWcPfoMwaPisz/9bpC2fmJoMsdzmDuHRbhcFTByjzOPDh6kuSJKnWOgmCdwLjgFkRcT7wR6B1DT8yc6BbzEmSJKmLOgmC/93076OXUGYRA99rWJIkSV00qCAYEV8Czs/MPuCN1e6JlJ7AZ0eobpIkSRpBg+0R/BjwG6AvM6+JiLUp9wPeOTOvGbHaSZIkacR0cmeRhtaFmSVJkrQcWZYgKEmSpOWYQVCSJKmmhjJr+O8i4u+rf69VbbeMiCfaFc7M65epZpIkSRpRQwmCn62+mp3RptwKlOVjxnVaKUmSJI28wQbB/Ue0FpIkSRp1gwqCmXn+SFdEkiRJo8vJIpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1dRK3XzxiHgpcDjwGmAaMCszp7UpNwM4CdgaeAA4PTPPaFPucOBgYAPgNuDIzLyypcwawCnAnsCqwFXAIZl5z/C9M0mSpN7X7R7BlwF7AH8Abm9XICK2By4BbgZmAN8ATo+Ig1rKHQ6cDJxZnfMu4LKI2KbllN8G3gYcAuwNTAWujIgJw/SeJEmSlgtd7REELs3M/wWIiPOAV7UpcwxwU2Z+sHp8VURsBBwbEV/LzIURMR74DKWn8NTqfNcAtwKfBvaq9r2aEhL3yMzLq323ArOB/YCvjMi7lCRJ6kFd7RHMzIVLO14FvDcBF7Yc+hbl8u+21ePXAmsB32k697PARcCMiFih2r078BhwRVO5+4BfVcckSZJqo9uXhgeyGbAKL7xsfFu13bLablVt72hTbiKwYVO5WW0C6G1N55IkSaqFbl8aHsjkavtoy/751XZKU7n+zHxqKeXur8q1nqtRbkqb/Us1c+bMoT5lUPr6+gBYbc11mTNnzoi8xmibO3cC99/9cLer0bFGm6g32B69xfboPbZJb+nl9uj1INjTpk2bxvjx44f1nH19fUyfPh2Ah+Y9ydSpTw7r+btl7bXXYf3NN+p2NTrS3CbqPtujt9gevcc26S290B79/f1L7Lzq9UvDjR69SS37Gz2F85rKjY+IVQdRrvVcjXLz2uyXJEkas3o9CM4GFrB4DGDD1tV2VrVtjA1sV+5xytqDjXLRNHmkudwsJEmSaqSng2Bm9gM/p1r+pck+wIPATdXj6yizgfduFIiIcdXzrsjMRdXuyyk9grs2lXsJsEN1TJIkqTa6fWeRCSxetmVjYM2I2LN6fENm3gscD1wbEecAFwCvAw4EDm7M/s3M/og4ETg5Ih6mBMQDKLOO9228Xmb+NiIuA74eEYcBf6nOfx9w3oi+WUmSpB7T7cki6wEXt+xrPN4fOC8zfx0Rb6fcNeR9wBzg0Mw8u/lJmXlqRAB8HFifsiTMHpn5u5bz7wOcSlk8ejzlFnPvzsyxMStDkiRpkLoaBKv7+7aO12tX7nIGcem2uqvIqQOUeRz4cPUlSZJUWz09RlCSJEkjxyAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaYMgpIkSTVlEJQkSaopg6AkSVJNGQQlSZJqyiAoSZJUUwZBSZKkmjIISpIk1ZRBUJIkqaZW6nYFVA8LFy7ioXlPdrsaHVltzXWfV/cJ41dijdVX6WKNJEkaHgZBjYr+p5/lut/P6XY1OjJnzhymTl0cBHfabiODoCRpTPDSsCRJUk0ZBCVJkmqqdpeGI2Jz4AxgB+Ap4DvAkZm5fA5gkyRJ6lCtgmBETAKuAu4F9gTWA04D1gX+sXs1kyRJGn21CoLAh4HJwCsz8xGAiHgGuCAiTsjM27paO0mSpFFUtzGCuwNXNkJg5XtAPzCjO1WSJEnqjrr1CG4FnNu8IzP7I2I2sOUQzjMOYMGCBcNYtcX6+/sBeObpBay04sIReY3R9uwzTy+372XVlVd4Xt0XLOjngYdGpu1H07gVV+DZhYu6XY0hGz9xCg889Ojz9q22ykqsPmHl7lRIz/3OUu+wTXpLt9ujKa+Maz1WtyA4GXi0zf75wJQhnOdFAHfeeecwVOmFZs6c+dy/NxtKrXrYo3++Z7l9L5tNWZMyr6j48wN/7F5lBMCDT8zrdhXUpPl3lnqDbdJbeqg9XgTMbt5RtyA4XG4AXg/8CXi2y3WRJElamnGUEHhD64G6BcH5wKQ2+ycDswZ7kunTp/cDvxymOkmSJI202e121m2yyB2UcYLPiYjxwGYMIQhKkiSNBXULgpcDO0XE2k373gmMr45JkiTVxgqLFi1/swY7VS0oPRO4BziBxQtKX5mZLigtSZJqpVY9gpn5KPAm4Angf4AvABcCH+hitSRJkrqiVj2CkiRJWqxWPYKSJElazCAoSZJUUwZBSZKkmqrbgtI9KyI2B84AdqDcz+w7wJGZ+WRXKzbGRMRLgcOB1wDTgFmZOa1NuRnAScDWwAPA6Zl5RptyhwMHAxsAt1Ha7MqRewdjR0S8G/gnYDrlFo+zgbOAr2bmwqZytsUoiYh/AD5Juff6RMrn/X3ghMx8rKmcbdIFETGRsubthsB2mXlj07H3AUcBm1B+lo7PzAtbnr8ycDzwfsrNFW4APpGZt4xC9Zd7EbEf8I02h87MzI81lVuufj7sEewB1bI2VwFrAHsChwH7AOd2sVpj1cuAPYA/ALe3KxAR2wOXADcDMyg/+KdHxEEt5Q4HTgbOrM55F3BZRGwzYrUfWw4D+oFPAW8BfgB8Cfh8o4BtMeqmANcCHwJ2A75IWVXh4kYB26SrjqNNB05E7AmcTwntM4CfAd+uAkmzL1CCx7HA24EFwJURMXUE6zwW7QZs3/R1auPA8vjz4azhHhARRwLHABtn5iPVvn2BC4BpmXlbN+s3lkTEio3epog4D3hVa49gRPwImJKZr27a9zXgrcCGmbmwuiPNQ8DXMvOIqsw44FZgZmbuNSpvaDkWEetm5sMt+04DPgJMysx+26L7IuJDwFcpn/cc26Q7ImIa8BtKj+1XaeoRjIg7gFubP9eI+Anl5+jvq8cbAvcCH8/Mr1T71gDuBs5ttJOWrKlHcN3G3+o2ZZa7nw97BHvD7pRFrZu/sb5H6S1p/R+dlkHzJcd2qh/QN1HWl2z2LUr3/bbV49cCa1Eu4TfO/SxwETAjIlYYrjqPVa0hsHIzsCowxbboGY3fS6vYJl11JvBl4M7mnRGxKeVS/ndayn8L2C4i1q0e7wKMo6ntMvNx4IeUv0FaRsvrz4dBsDdsRctlyszsp4zz2LIrNaqvzYBVeOFl40avbKM9GvesvqNNuYmUMTwautcD84A/Y1t0TUSMi4hVI2I65WrFJZl5D7ZJV0TEe4GXAie2Odz4rJfUJtFU7qHMnNum3BYRYR4YvJkR8WxE3B0Rx0ZE43L9cvnzYcP3hsnAo232z6eM2dHomVxtH23ZP7/aTmkq15+ZTw1QToMUEa8C9ge+UP3v2LbonrmUSWs3An8C9q322yajLCLWAk4BjsjMJ9oUGUqbtJZplFuZEkC0dH+ijK/cjzJO8PvA0cB/VseXy58PZw1L6rqI2IAyHOJ6miaLqGt2BCZQZtZ/Brg0Inbuao3q60Tgrsy8oNsVqbvM/DHw46ZdP42Ix4DjIuKELlVrmdkj2BvmU6byt5pMuUym0dP4H9mklv2N/+nNayo3PiJWHaCcBlD1ePwIeBJ4W2Y+XR2yLbokM2/JzOsy82vAO4E3VlvbZBRFxMuAg4CjI2JStcJEo+duYjXZYyht0lqmUe5poF1vowZ2UbXdluX058Mg2BvuYPGYAeC5QaebUdaM0uiZTVlSYauW/VtX20Z7NMZ2tCv3OGXtKA2g+kV4CbAesFvL+CXbojfcAiykjFGzTUbX5pQrd1dRwsN84NLq2FXAL1j6Zw2Q1fYOYL2IaL3suDVw50AT6TQoy+XPh0GwN1wO7BQRazfteycwvjqmUVJN0vk50Dp9fx/gQeCm6vF1wGPA3o0C1fT/vYArMtN1mQZQDbC+CHgFMCMz720+blv0jO0pfyv+aJuMul9SemObvw6tjh0EHJCZd1MCxt4tz90HuKFpdv5PKIG+eYmZiZRlTfw707l/BBYBfcvrz4djBHvDV4FDgP+txhmsB5wGXJiZbRc9VmciYgKLl0rYGFizWowVyi/Neykr718bEedQ1nJ8HXAgcHDjf83VGncnAidHxMOUH/ADKL24+6LBOJPyR+gIYEJEvKbp2O2Z+Rdsi1EVET8GrqTMXvwb8ErKgt+/pyz4DbbJqKmWFLu6eV9EYxIwfU13FjkGuDAiZgM/pSwWvQtloeLGuR6IiLOBz0fEM5Q1BQ8HVgBOH7l3MXZUPx8/B2ZSQvUM4KPA1zPzj1Wx5e7nwwWle0REbEG5q8LrWXyLuSO8xdzwiohNKAuotrN/Zp5Xldudsur7VsAcykzWL7U53+GUEL8+5Y/nEd5Ca3Ai4h5KGG/njZl5dVXOthgl1X9E3w5sWu26hzKJ57QqmDfK2SZdEhE7Ui4Lt95i7v288BZz32l57srACZRZr2ux+BZzN49G3Zd3EXE6Jfy9mNKRdhfVnUOqlQ4a5Zarnw+DoCRJUk05RlCSJKmmDIKSJEk1ZRCUJEmqKYOgJElSTRkEJUmSasogKEmSVFMGQUmSpJoyCEqSJNXU/we2dyrPLzUr8gAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAApsAAAFKCAYAAABSGJRzAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAA3OklEQVR4nO3debgcVZn48W8EuRDZQtiMIy4sr2DUGQIjICiKigFFHFnEUQSF0ZHFBRQXREDEhYyiAu4KjiCL/lQQxAUQHRDBwAAB8oIIZDAjsoRFYS6Y5PfHqSZFc29yb9+u23f5fp4nT6er3q46dbq679unzjk1ZenSpUiSJElNeEqvCyBJkqSJy2RTkiRJjTHZlCRJUmNMNiVJktQYk01JkiQ1xmRTkiRJjTHZlMagiDg6IsbUvGQRsTQivtLrckgjERFTIuLaiPhEr8syFBFxTkSc3etySCOxcq8LIE0Ww0ge92+0IGNEREwFPgj8KjN/1ePiNCIidgQuAfbJzDN7XJwhiYinAe8H9gI2Bh4DrgW+BpyemWPqR1AH9gE2Ab7Q64IM0aeA30fEizLz2l4XRuqEyaY0et7a9vzfgG2At7ctvxz4LvDp0ShUD00FPl79/1c9LIcqEbEBcBGwOfA94EvAasC/AP8JzI6It2bmkt6VcsQ+AHw/M+/pdUGGIjOvjojfA4fz5O8QaVww2ZRGSWZ+t/48Il4J/HP78pq/N18q6QlOoySab8jMc2vLvxARJ1ASnv8GThjNQkXEKsCSzBzRZyIi/gn4R+DIbpRrFJ0FHBsRB2Xmg70ujDRcJpvSGBQRRwMfz8wptWW3A/MpLZ5zgOcDtwKHZubFEbE7cCywGXAjcGBmzm3b7mbAccBOwNOAm4BPZub3h1G2vSktks8FEjgiMy9si1mritkD2BC4E/g2cHxmLo6IZwO3VeEfj4hWC+dpwH8A1wF7ZOYPqu1Fdex/yMxNa/v5T+Clmfms2rKtgWOAlwCrAHOBj2XmJW1lfDrwCeC1wDTgj8AXM/PLtZgdKZfB3ww8B3g3sC5wGfDOzPzDUOutts3VgaOBNwIzgAeBG4CjMvPXVcwmwPHADsA6wL3Ab4GDM/N/a/W3f2ae2rb9pcAxmXn0MI91G2Bn4FttiWbLh4HXAx+KiJMy85GGyrEjpc7fAmxKafl/BvBPEXEF8I3MPLRtX9OB/wU+n5lHDFD2lt2BxcDFba8/mrbPW7V8P8p5+5zMvL1atiXlM7Q1sAZwF/Br4N8y85EqZgpwMOXqxaaU9/g8ymflnrZ9vIpSt1sBU4CbgS9n5jdqYb+gfOZ3Bs5ZzvFJY5IDhKTx5bmUy5vnAx8C1gbOjYg3A18EzgCOquLOiYiVWi+MiM2B3wEvAD4LHEZJYs6JiLcMcf8vAb4MnA18FFgVOC8itq/tZzVKsrAfpTvAwZQ/7kcDX63C7gb+vfr/DymXB99arZ8HLAJeWtvvS4ElwCZVwtKyA+UPfWvfLwN+Q0nQjgWOAPqAn1dJTCtufeAK4DXAKcB7qv2eEhEDtXp9kHIpeQ6lD902wOmD1tLyfRk4hHLc7wY+Q6mPF1VleyrwM2B74OQq5hRgA0pyOizDONbXVY/fGWg7VaviGZS63a7BcrR8hJKQf4HyPi4EfgTsHRHtDSV7A08drOw12wE3tpLCDo5hPUritzHlM3QwcCowk/LjreXLwOcon7f3UPq77gFcEhGr1rb3Vsp7vUG1vQ8CVwK7tu36RuARyudPGnds2ZTGl00pLXm/AYiImyh/rL4FbJ6Zt1XL76ckbi8Hflm99guUP9hb1f7YnhwRPwc+HRFDGfwxE9guM39b7edU4BZKa2sr4Xwf8Dxgy8ycXy37WkTcBhwXESdkZkbE9yl/lK8boIvBZTwx2dwB+CmwY7X8rIh4JvAs4JPVa6ZUx/xfwKtax1KNoL+G0lLYSpKOoyShL8jMu6tlX4mIrwMfqVru7q/tf1XgRZn5aLXNRZRLyzMzc94K6qzda4GvZ+b7B1m/BeXHwp5tLc7HDXM/9dcN5Vi3qNYtbxBKa90WlL6dTZSjZQ3KOf231oKI+A5lgM+rgQtqsW8BrsnMG1ZQhudRWro7tR0l2d45M39fW95qmScitgPeCbwtM79TW34h5YfQvpTPw5rAScDVwA71BLg6lx+XmX+PiP9h2XskjSu2bErjy82tRLPyu+rxV61Es235cwEiYh3glZQWyadFxLqtf8CFlMuUmw1h/79vJZoAmXkvpbXrJRExrVq8FyXhu6dtP62kd8ch7Oc3wAury/FQEsyLKS1jrSR0h1oslJbBqMozvbbfNSmtUS+OiKnVH/I9KK3DS9vK+HPKgJgXt5XnO61Es22fzx3CsbR7oCrLMwZZ3+qTt3M1MrxjwzzWNarHh5azyda6NZYTM9JytHynnmhWfkG5XP74QJmIeC6wLWUA04pMp7Sad+qB6vG1VQv0QPYC/gpc2Hac8ymX3F9exb2acm5+ur2ldZAffYsoXTikcceWTWl8WVB/kpkPlO6M/E9bXOuPYisB3ITSH+zo6t9A1qf0wVyeWwZYdnP1+CzKH8TNKInf3QPEtvazIr+h/BjePiKuq7b9a2B1YM8qZgfgL7XW01ay/M3lbHc60E+pl7fz5JkABivjgrbnrYRlGsP3AUrf1AURcQ0l2f/PzEyAzLwtIj5HmX7oLVUr73nAd6vkfjjWY+jHWk8k7x8ktpVk/qXBcrTc2h5Q9ff9LnBQRKyRmQ9RWjUXU7qXDMWUFYcM6lLg+5SWzPdHxKXAucAZtcR4M8p5etcg22gd58bV41BbxqcA433aKU1SJpvS+LJ4mMtbf1hbVzE+zxMvP9YN93LwYJ5CaYX81CDr/ziEbfye0kftpZR+qQ9RLoWvARxdtdTuQGlBre8XSl/WwS6V3l1tD0py8q1B4tovx66ofocsM8+JiN9QBtu8GjgU+GBE7JeZZ1Qxh0XEt4Ddqpj/AI6MiJdl5o0MknTU++hWWnUylGO9kTKA5oXU+sG2eWH12HoPmyhHy2D9Kr9DSdj/hZK0/yvwi8z88yDxdfcw8A+EwZK4JxxH1eK4Z0T8M6U7xKso/TE/HBHbZOZfKMd6L/CmQbbZacvqNJYNqpPGFZNNaXJoJQd/z8xfLjdy+TYdYFmrRfGO6vFWYI0h7GfQVprMfKwaefxSYC3g8qpV6wrKlFCvp/Rf+3rtZa2WsIeWt++IuJuSvK48wrroWJUYfRX4akSsTekecAylC0Ar5gZKAvapiHghJYF+H3AgyxKWtds2/ay258M51vMog3L2ZYBks0og38yy0dc0VI7lysx5EXE18Naqz/JmlLobipsoswq0WwQQEWu39RttP45WGa6kDOQ5KiJmU37AHUjpP3wrJQm9IjP/upyytM7XmZRL7IOqBkQ9k8F/KEpjmn02pUmganG5BDhwoL6C1SjbodgqIratvW46JQG5PDNbicdZwNYRscsA+1kjIvqqpw9Xj4Ndiv4NMIvyh/vX1XE8Qmn1PILSqlhPiuYCf6Bc3nxSn8LWMWbmYsql0N0j4kWDxTUhIlaq9UOlKs/9lBartauYNQcYbX0TpaVv7eo1D1Ja6V7aFvfutm0P+Vgz8wpK/8n9I6J9NDSURGoz4DOt+S6bKMcQnUbp+/hBShL7wyG+7jJgi2rGhLpW4vf4cVT9Zd/WVs5p7YN3KAN8YFnCfRblb+tR7Tuv3v/W+f5zSv/cD7WXZ4B9bEEZpHb5wIcljW22bEqTx79T/theV40AvpXSf+zFlD9mmwxhG/OAn0TElyh/5P+Ncmn7w7WYEyjT6Pw4Ik6jJIGrUVpw9qRMvXR7NU/jDcCbIuJmyqXH2zKzNbjpNyybxqmeVP6akmw+SG3kdGYuiYh3UPpA3lhdhr6TMl3QyyjJaWtwxocoA5V+W9XFDZSk9x+BN1D+sDdhDeBPEfGDquwPUqazeQ1lZDLAKyizBHyf0od2CmVqnzUoiUzLNyiJyjcoCfhLGXiQ13COdV9KF4hzI+IMynuwKuWS9csog3Y+37b9JsqxIt+jTEP1RuDUYUxl9GNKK+grKIOVWn5O6Zf7zSiT1y+m9C29G9ioFvc2Sn/RH1I+P6tRbi/bSqbJzF9HxMnAB6oW6Z9R+glvQhkkdVRV5gcj4j2UbgW/r+r7Xsr8uc+g1HnLqyg/Nn42xOOUxhRbNqVJohqAshVlQMO+LJvDcWXgY0PczGXVa/amTCXUD+zemoy82s8jlKTiM5TE40TK5dnNKRN61/vWvQO4ndIn8Xssm3sTyiTmfwf+j3LJsqU1Evyy9tsmVuXYhnJZ+t2UBO7twH1VeVpxf6Ek2d+g9FM8iXKJekPK/KNNeZhS7y+g1PmJlPfk8Gr/UJLQnwK7UBKqT1ASzt3bpkI6ljIYag/KHI0rAbPbdzicY83Mu6rYY4B/okyX9QVKovnxzHxCS19T5ViRauqkn1ZPhzIKvfW6ayktkXu1LX+MkvDeSqnvQ6tyntS2iUsp5+JelHr5COV8fkXtRxKZeTDl3F6H0iL8aUrf27OpTShfTYT/Wsr5+RFK/W1L6dJQtxfww8x8AGkcmrJ0qYPbJEkDq7pdXE5pnNg2M+/scZEAiIhzKD8sntX+o2MFr9uH0tf3WR2M7h91Ue5Y9HtgVmZe0+vySJ2wZVOSNKjM/BPlMv9UytyRa/e2RI/fjWg3ypRRQ040K2dSWjDf2+1yNeTDwPdNNDWe2bIpSRoXIuI5lD6ub6dcbt50rLS0ShqcA4QkSePFy4BvU25isJ+JpjQ+2LIpSZKkxtiy2YG5c+f2AVtT7tE72J1FJEmSxoKVgKcDV82aNat/tHdustmZrVk2/YokSdJ40H6b31FhstmZ/wXYbLPNWGWVVRrbybx585g5c2Zj2x8vrIfCeiish8J6KKyHwnoorIeivR4effRRbr75Zqjyl9FmstmZxQCrrLIKfX19K4odkaa3P15YD4X1UFgPhfVQWA+F9VBYD8Ug9dCTrn/OsylJkqTGmGxKkiSpMSabkiRJaozJpiRJkhpjsilJkqTGmGxKkiSpMSabkiRJaozJpiRJkhpjsilJkqTGeAehMWz1tdblrvse7nUxumJq38qs8bTmbu0pSZLGJpPNMWzx0ilcdNWCXhejK3baeiOTTUmSJiEvo0uSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGrNyr3YcEXsC/wrMAtYBbgW+DHw1M5dUMacCbxvg5Xtm5vfbtnc4cBCwIXADcERmXtQWswZwArAHsCpwCXBIZt7etQOTJEnS43rZsnkY0A98AHgt8CPgi8Bn2uL+CGzb9u/iekCVaB4PnAzsCtwCnB8RL2rb1veA3YBDgL2BGcBFETG1WwclSZKkZXrWsgm8LjPvrj2/JCJWBw6OiCMzs79a/khmXjHYRiKiDzgSODEz51TLLgWuBz4K7FUtezElEd01My+oll1PaVHdDzilmwcnSZKkHrZstiWaLddQLm+vM4xNbQesBZxZ2/Zi4GxgdkRMqRbvAjwAXFiLWwBcVq2TJElSl/WyZXMgOwD3AX+pLds4Iu4HngbMAz6dmWfV1m9ePd7Utq0bgNWBZwB3VnHzW/1B2+J27krpJUmS9ARjJtmMiK2A/YFjqpZJKC2dV1ESwrWAA4AzI2K1zDy1ipkG9GfmI22bXFQ9rkNJNqcB9w+w60UMryX1cfPmzevkZUO22prrsXDhwkb3MVruvXcqd942UGP20MydO7eLpRm/rIfCeiish8J6KKyHwnooxlI9jIlkMyI2BH4AXEltgFBmfqEt9McRcTFwDHDqqBVwEDNnzqSvr6+x7d94ywJmzJjR2PZH0/Tp67LBpht19Nq5c+cya9asLpdo/LEeCuuhsB4K66GwHgrroWivh/7+/sYbyJan5/NsRsRawE+Bh4HdMvOxFbzkHGCjiFiver4I6IuIVdviplWP99Xi1h5ge9NqMZIkSeqiniabVYJ4LrA+8JrMvLeDzbT6am7etnwL4CHgT7W4qA0YqsfN72C/kiRJWoGeJZsRsTJlxPgLgdmZeccQXjOFMpXRHbXR7JdTRpnvXYtbqYq7MDOXVosvoLRs7lyLeyawfbVOkiRJXdbLPpsnA68DPghMjYhtautupFzePo0yEfsfKIniAcCOwFtbgZnZHxHHAcdHxN3A1VXcxsCba3G/i4jzgW9GxGHAg8CxwALGQP9PSZKkiaiXyWarhfGzA6x7OXAdpcXySMpl9scoieRumXlePTgz50QEwKHABpTR67tm5rVt290HmEOZwL2PcrvKPTPz4W4ckCRJkp6oZ8lmZj57CGGvH8b25lASyeXFPAS8s/onSZKkhvV8NLokSZImLpNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNWblXu04IvYE/hWYBawD3Ap8GfhqZi6pxc0GPglsAfwJODEzvzTA9g4HDgI2BG4AjsjMi9pi1gBOAPYAVgUuAQ7JzNu7fXySJEnqbcvmYUA/8AHgtcCPgC8Cn2kFRMS2wLnANcBs4NvAiRHxrvqGqkTzeOBkYFfgFuD8iHhR2z6/B+wGHALsDcwALoqIqV0+NkmSJNHDlk3gdZl5d+35JRGxOnBwRByZmf3AUcDVmfmOWsxGwMcj4muZuSQi+oAjKS2ecwAi4lLgeuCjwF7VshdTEtFdM/OCatn1lBbV/YBTGj5eSZKkSadnLZttiWbLNZTL2+tUSeQrgLPaYs6gXCrfsnq+HbAWcGZt24uBs4HZETGlWrwL8ABwYS1uAXBZtU6SJEldNtYGCO0A3Af8BdgYWAW4sS3mhurxedXj5tXjTQPErQ48oxY3v94ftBb3PCRJktR1vbyM/gQRsRWwP3BMZi6OiGnVqvvbQhdVj+tUj9OA/sx8ZDlxd1Zx7dtqxa0zwPIVmjdvXicvG7LV1lyPhQsXNrqP0XLvvVO587aBGrOHZu7cuV0szfhlPRTWQ2E9FNZDYT0U1kMxluphTCSbEbEh8APgSmoDhMa6mTNn0tfX19j2b7xlATNmzGhs+6Np+vR12WDTjTp67dy5c5k1a1aXSzT+WA+F9VBYD4X1UFgPhfVQtNdDf39/4w1ky9Pzy+gRsRbwU+BhYLfMfKxa1WqZXLvtJa0Wz/tqcX0RseoQ4tq31Yq7b4DlkiRJGqGeJptVgngusD7wmsy8t7b6VuBRlvXJbNmiepxfPbb6ag4U9xBlbs5WXNQGDNXj5iNJkqSu61myGRErU0aMvxCYnZl31NdXUx9dTDV1Uc0+wJ+Bq6vnl1NGme9d2/ZK1esuzMyl1eILKC2bO9finglsX62TJElSl/Wyz+bJwOuADwJTI2Kb2robM/NB4Fjg1xHxdeB04CXAgcBBrVHlmdkfEccBx0fE3ZQk9ADKaPY3tzaYmb+LiPOBb0bEYUBr+wuAUxs9UkmSpEmql8lmq4XxswOseznwq8z8bUS8nnJ3oH2BhcD7MvMr9eDMnBMRAIcCG1CmM9o1M69t2+4+wBzKBO59lNtV7pmZD3fnkCRJklTXs2QzM589xLgLGMJl7uruQXNWEPMQ8M7qnyRJkhrW89HokiRJmrhMNiVJktQYk01JkiQ1xmRTkiRJjTHZlCRJUmNMNiVJktQYk01JkiQ1ZtjJZkTsPMD9xSVJkqQn6aRl86fAnRFxQkS8qNsFkiRJ0sTRSbK5O3AZcBBwdURcFxGHR8SMrpZMkiRJ496wk83MPDcz96Lcg/xA4G7g08AdEfHziHhLREztcjklSZI0DnU8QCgzH8rMb2XmTsCzgI8A6wOnAXdFxHciYqculVOSJEnjULdGo68EPBXoA6YAjwCvBH4REddExMwu7UeSJEnjyMqdvjAi1gL2At4CvAT4O3A+8KHqcQmwG/B54NvA1iMtrCRJksaXYSebEbE7JcHcBVgVuAp4D/C9zLyvLfxHEbEucMoIyylJkqRxqJOWzf8H/An4AnBaZs5fQfx1wOkd7EeSJEnjXCfJ5quBizJz6VCCM/NK4MoO9iNJkqRxbtjJZmb+somCSJIkaeLp5HaVn4+IW5az/uaIOGFkxZIkSdJE0MnUR7sCZy1n/VnA6zorjiRJkiaSTpLNZwK3L2f9HVWMJEmSJrlOks0HgecsZ/1zKZO6S5IkaZLrJNm8GHhnRGzUviIing28s4qRJEnSJNfJ1EdHAbOBeRHxbeCGavlMYD9gMfCxrpROkiRJ41onUx/dEhEvAU4GDmlbfSlwSGZmNwonSZKk8a2je6Nn5g3AjtWtKJ9bLb41M+/tWskkSZI07nWUbLZk5j3APV0qiyRJkiaYjpLNiFgJ2JnSqjkNmNIWsjQzPzHCskmSJGmcG3ayGRFbAT8A/oEnJ5ktSwGTTUmSpEmuk5bNU4DVgN2B32Tm/d0skCRJkiaOTpLNFwIfzczzul0YSZIkTSydTOp+J4NfPpckSZIe10my+WngwIhYs9uFkSRJ0sTSyWX0dYC/AX+IiO8D/0O5a1Dd0sw8YaSFkyRJ0vjWSbL56dr/3zVIzFLAZFOSJGmS6yTZfE63dh4RmwCHA9tQ7q0+PzNntsWcCrxtgJfvmZnfb4s9HDgI2JByz/YjMvOitpg1KInwHsCqwCWUW2ze3oVDkiRJUk0n90a/o4v7fz6wK/A7Sv/RwfqQ/hH417ZlN9efVInm8cBHgKuBA4HzI+LFmXltLfR7wJaU+7o/CBwLXBQRL8jMh0d2OJIkSarr+HaVEbEpsCOwPnB6Zt4eEatQWhX/nJmPDmEz52Xmj6vtnQpsNUjcI5l5xXLK0gccCZyYmXOqZZcC1wMfBfaqlr2YktzumpkXVMuuB24F9qPMISpJkqQuGfZo9Ih4SkR8DZgPfJXSMvjcavUqlATvkKFsKzOXDHf/g9gOWAs4s7btxcDZwOyIaE3VtAvwAHBhLW4BcFm1TpIkSV3UydRHHwHeDnwM2JbanJuZ+VfKrSz/pSulW2bjiLg/Ih6LiGsiYu+29ZtXjze1Lb8BWB14Ri1u/gBJ7g3A87paYkmSJHV0GX1/4FuZeXxETB9g/fXAa0dWrCe4BriKkhCuBRwAnBkRq2XmqVXMNKA/Mx9pe+2i6nEdymT004D7B9jHoipmWObNmzfclwzLamuux8KFCxvdx2i5996p3Hnb3R2/fu7cuV0szfhlPRTWQ2E9FNZDYT0U1kMxluqhk2TzH4Arl7P+EWCNzorzZJn5hbZFP46Ii4FjgFO7tZ9OzJw5k76+vsa2f+MtC5gxY0Zj2x9N06evywabbtTRa+fOncusWbO6XKLxx3oorIfCeiish8J6KKyHor0e+vv7G28gW55OLqP/GXjWctbPAro5Yn0g5wAbRcR61fNFQF9ErNoWN616vK8Wt/YA25tWi5EkSVKXdJJs/gD492o0estSgIiYDexLGZgzmlp9NTdvW74F8BDwp1pc1AYM1ePmN1c8SZKkyamTZPNoYAGlL+XplETzIxFxBfAT4FrgU90qYLsqUdwLuCMzW50AL6eMMt+7FrdSFXdhZi6tFl9AadncuRb3TGD7ap0kSZK6qJNJ3R+MiO2A9wN7Av9HSdZupSSiJ2Tm/w1lWxExlWVTDj0LWDMi9qieX1U9nkaZiP0PlETxAMr8nm+tlak/Io4Djo+IuymTuh8AbAy8uRb3u4g4H/hmRBzGskndF9Dj/p+SJEkTUUeTulfJ5PHVv5FYn9L/sq71fH/gXEqL5ZFV7GOURHK3zDyvrUxzIgLgUGADyuj1XdvuHgSwDzCHMoF7H+V2lXt69yBJkqTu6/gOQt1Q3Y+8vf9ku9cPY3tzKInk8mIeAt5Z/ZMkSVKDhp1sRsS3hhC2NDPf0UF5JEmSNIF00rL5CqrR5zUrAU+vHu8G/jbCckmSJGkC6GSA0LMHWh4RT6Vcmn4v8KoRlUqSJEkTQidTHw0oMx/LzJOAnwMndWu7kiRJGr+6lmzWXAu8tIHtSpIkaZxpItl8FeA0QpIkSepoNPpRg6xam9KiuSXw6RGUSZIkSRNEJ6PRjx5k+SLKXYTeBXy90wJJkiRp4uhkNHoTl94lSZI0AZk4SpIkqTGd9NncqJMdZeaCTl4nSZKk8auTPpu38+Q7CA3FSh28RpIkSeNYJ8nmAcChwDOBM4Cbq+UB7AMsAL4ILOlGASVJkjR+dZJsPh3oAzbJzEX1FRHxceAyYMPM/FQXyidJkqRxrJMBQu8CvtaeaAJk5r2UaY/+faQFkyRJ0vjXSbI5HVh9OeufVsVIkiRpkusk2bwCeE9EzGpfERFbAe8BfjfSgkmSJGn866TP5sHAr4ArI+Iq4JZq+abA1sB9wCFdKZ0mjCVLlnLXfQ939NrV1lyv49c2YWrfyqzxtFV6XQxJksaFTu4gdGNEvAD4EDAb2KNadQfwBeCzmfnn7hVRE0H/Y4u5/LqFHb124cKFzJgxdpLNnbbeyGRTkqQh6qRlk8y8C3hf9U+SJEkaUEfJZktEbAqsD8zLzAe6UyRJkiRNFB3dGz0i3hwRC4D5wK+BWdXydSPi5ojYq4tllCRJ0jg17GQzIt4IfBe4CfgAMKW1LjPvqZbv260CSpIkafzqpGXzo8AvM3Nn4LQB1v8OeNGISiVJkqQJoZNkc3Pgh8tZ/xdgvc6KI0mSpImkk2Tzbyz/DkIbA/d0VhxJkiRNJJ0kmxcD+0XEkyYajIgZwIHAz0ZaMEmSJI1/nfbZfDrwe+DdwFJgl4j4NHA9sAQ4pmsllCRJ0rg17GQzM28BXgL8GTiaMhr9/cAHgf8Gts/MBd0roiRJksarYU3qHhErAc8A7srMV0fENGATStL6x8y8u4EySpIkaZwa7h2EngLcChwBfC4zFwFXdb1UkiRJmhCGdRk9Mx8DFlL6aUqSJEnL1ckAoW9TRqOv2u3CSJIkaWIZ7mV0gJuBlYD5EXEa8EfgkfagzDx7hGWTJEnSONdJsvnd2v8/NkjMUmCFyWZEbAIcDmwDzATmZ+bMAeJmA58EtgD+BJyYmV8aIO5w4CBgQ+AG4IjMvKgtZg3gBGAPYFXgEuCQzLx9ReWVJEnS8AzpMnpEfDEiZlVPX179ex3wytrz+r9XDHH/zwd2Bf4A3DjIvrcFzgWuAWZTLuOfGBHvaos7HDgeOLna5i3A+RHRfp/27wG7AYcAewMzgIsiYuoQyyxJkqQhGmrL5sHAFcDczLw0IqZT7oH+qsy8dAT7Py8zfwwQEacCWw0QcxRwdWa+o3p+SURsBHw8Ir6WmUsiog84ktLiOafa3qWUSeY/CuxVLXsxJRHdNTMvqJZdTxlhvx9wygiORZIkSW06GSDUMmWkO8/MJctbXyWRrwDOalt1BuVS+ZbV8+2AtYAza9teTLmUPzsiWmXdBXgAuLAWtwC4rFonSZKkLhpJsjkaNgZW4cmX2G+oHp9XPW5ePd40QNzqlInoW3HzB0hyb6htS5IkSV3SyQCh0TStery/bfmi6nGdWlx/ZraPiq/H3VnFtW+rFbfOAMuXa968ecN9ybCstuZ6LFy4sNF9jJb+WGdExzKW6uHee6dy5229uVnW3Llze7LfscZ6KKyHwnoorIfCeijGUj0MJ9l8bkT8c/X/tarH50XEXwcKzswrR1SycWDmzJn09fU1tv0bb1nAjBkzGtv+aOrrW7XjY1m4cOGYqofp09dlg003GvX9zp07l1mzZq04cIKzHgrrobAeCuuhsB6K9nro7+9vvIFseYaTbB5T/at70vRDlL6cSylzcY5Uq2Vy7bblrRbP+2pxfRGxamb+3wriBsoSptViJEmS1CVDTTb3b7QUg7sVeJTS1/LC2vItqsf51WOrr+bmlCmS6nEPUebmbMW9KiKmZObStrj5SJIkqauGlGxm5mlNF2SQ/fZHxMWUqYs+X1u1D/Bn4Orq+eWUUeZ7UyWbEbFS9boLa4nlBZSplHamSl4j4pnA9sB7Gj0YSZKkSainA4SqidRbUw49C1gzIvaonl+VmXcAxwK/joivA6cDLwEOBA5qjSqvktLjgOMj4m5KEnoAZTT7m1v7y8zfRcT5wDcj4jDgwWr7C4BTGz1YSZKkSajXo9HXB85pW9Z6vj9wamb+NiJeT7k70L7AQuB9mfmV+osyc05EABwKbECZzmjXzLy2bfv7AHMoE7j3UW5XuWdmPty1o5IkSRLQ42Szuh/5CieHr+72c8EQ4uZQEsnlxTwEvLP6J0mSpAaN9UndJUmSNI6ZbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqzMq9LsCKRMR+wLcHWHVyZh5ci5sNfBLYAvgTcGJmfmmA7R0OHARsCNwAHJGZFzVQdE1QS5Ys5a77Hh71/a625npd3e/UvpVZ42mrdG17kiQNZMwnmzWvAR6oPf9z6z8RsS1wLvAd4DDgJcCJEfFYZn6lFnc4cDzwEeBq4EDg/Ih4cWZe2/whaCLof2wxl1+3cNT3u3DhQmbM6F6yudPWG5lsSpIaN56SzbmZec8g644Crs7Md1TPL4mIjYCPR8TXMnNJRPQBR1JaPOcARMSlwPXAR4G9Gi6/JEnSpDPu+2xWSeQrgLPaVp1BuVS+ZfV8O2At4MxWQGYuBs4GZkfElOZLK0mSNLmMp5bNeRGxHrAAOBX4ZGb+HdgYWAW4sS3+hurxecDvgc2r5zcNELc68Azgzu4XW5IkafIaD8nm/wIfB64EFgOzgY8BzwH2A6ZVcfe3vW5R9bhO9TgN6M/MR5YTN6xkc968ecMJH7bV1lyPhQtHv29gE/pjnREdy1iqh5Eey0h0c7/33juVO2+7u2vbG01z587tdRHGBOuhsB4K66GwHoqxVA9jPtnMzJ8BP6st+kVEPAAcHRGf6FGxAJg5cyZ9fX2Nbf/GWxYwY8aMxrY/mvr6Vu34WMrAmLFTDyM5lpHodj1Mn74uG2y6Ude2N1rmzp3LrFmzel2MnrMeCuuhsB4K66For4f+/v7GG8iWZ7z22Ty7etySZS2Ta7fFtFo876seFwF9EbHqCuIkSZLUJeM12ay7FXiUZX0yW7aoHudXj62+mgPFPUSZm1OSJEldNF6TzTcBSynTIfUDF/PkqYv2oczFeXX1/HLKPJ17twIiYqXqdRdm5tKmCy1JkjTZjPk+mxHxM0oyOQ9YQhkg9G7gm5n5xyrsWODXEfF14HTKpO4HAgdl5hKAzOyPiOOA4yPibkoSegBlNPubR/GQJEmSJo0xn2xSLn+/HfgHSnlvAY4ATmwFZOZvI+L1lLsD7QssBN5Xv3tQFTcnIgAOBTagTHu0q3cPkiRJasaYTzYz873Ae4cQdwFwwRDi5gBzRlwwSZIkrdB47bMpSZKkccBkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUmJV7XQBJvbFkyVLuuu/hXhdj2FZbc70nlXtq38qs8bRVelQiSdLymGxKk1T/Y4u5/LqFvS7GsC1cuJAZM56YbO609UYmm5I0RnkZXZIkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGJNNSZIkNcZkU5IkSY0x2ZQkSVJjTDYlSZLUGO+NLmncW7JkKXfd9/CKA8eBqX0re593SROKyaakca//scVcft3CXhejK3baeiOTTUkTipfRJUmS1BhbNiVpDBlql4DV1lxvTHcdsDuApBaTTUkaQ4baJWDhwoXMmDF2k027A0hqmXTJZkRsCnwJ2B54BDgTOCIzx+63tiRJ0jg1qZLNiFgbuAS4A9gDWB/4HLAe8KbelUySJGlimlTJJvBOYBrwj5l5D0BE/B04PSI+kZk39LR0kjRBjNZ0VKPRd9X+p9LITLZkcxfgolaiWfkB8C1gNmCyKUldMFrTUY1G31X7n0ojM9mSzc0pieXjMrM/Im4FnjeM7awE8Oijj3axaE+2ZMliVn7Kkkb3MVoW//2xjo9l1adOGVP1MJJjGYlu10OvjmOkBqqH8XosAxnqsYy1z0W70XpPRqMe/v7Yo/T3r9ToPrqhv7+/10UYE6yHol4PtXylJyfylKVLl/Zivz0REY8BH8vMT7ct/y/gL5n5L0PZzty5c7cHftNAESVJkpqyw6xZs/5rtHc62Vo2u+UqYAfgf4HFPS6LJEnS8qwEPJ2Sv4y6yZZsLgLWHmD5NGD+UDcya9asfmDUfxlIkiR16NZe7Xiy3a7yJkq/zcdFRB+wMcNINiVJkjQ0ky3ZvADYKSKm15a9Aeir1kmSJKmLJtsAobWBecDtwCdYNqn7RZnppO6SJEldNqlaNjPzfuAVwF+B/wd8HjgLeHsPiyVJkjRhTaqWTUmSJI2uSdWyKUmSpNFlsilJkqTGmGxKkiSpMZNtUvcxLyI2Bb4EbA88ApwJHJGZD/e0YCsQEXsC/wrMAtahTB77ZeCrmbmkFjcb+CSwBfAn4MTM/NIA2zscOAjYELiBUgcXtcWsAZwA7AGsClwCHJKZt7fF9aROI2J1yvytzwC2zszf19btC3wEeDalro7NzLPaXv9U4FjgbZSbEVwFvCcz/7stbkPgC8BrgKXAT4D3ZuY9bXH/TJl9YRZwH/CNar+N3QUrIt4KvJfyfj8MXA3s0yrbZDgfImJ3ynu9OfA34DLgQ5l5S1vchDknImIT4HBgG2AmMD8zZw4QN2bf/6GWbST1EBErAYcBu1b7WRm4Hjim/fgmcj0MED8LuBJ4JDNXb1vXk8/AUD6fKzKMz8WqwIeAtwL/ANwDXJCZB7bFjZvzwZbNMaSamukSYA3KiXEYsA/wrR4Wa6gOA/qBDwCvBX4EfBH4TCsgIrYFzgWuAWYD3wZOjIh31TdUfYCOB06mfAnfApwfES9q2+f3gN2AQ4C9gRnARRExtbatteldnR7NAD/oImIP4DTgh5R6+CXwvepDXPd5yhfJx4HXA49Sjm9GbVsrAxcCLwD2BQ4AtgPOjYgptbjnVvu5j/L+HE95rz7ZheMcUER8lPKD4/9RjvMdlC/Evmr9hD8fImInyvHPB/6lKtvzgF9GxJq1uIl2Tjyf8l79AbhxoICx/P4PtWxDsKJ6WI2SwPw3sD/wJsof8F9ExGvbyjSR66G+z6dQvjfuHiRk1D8Dw/h8rshQPhdPofz93Lcqz6uBD1Jm0anHjavzwZbNseWdlFtn/mOt5efvwOkR8YnMvKGnpVu+12Vm/cvhkqpl7+CIODIz+4GjgKsz8x21mI2Aj0fE1zJzSXVHpyMpv5bmAETEpZRf+x8F9qqWvZjyAds1My+oll1P+cW5H3BKtY+e1GlEzATeBbwf+Grb6k8A52Tmh6vnl0TE5sAxwE+r1z+jev2hmfn1atkVwG2UlsIPVq99I/AiYGbrWCJiIaX1bDbLblbwAeB+YM/qvbgoItYCjoqIz2bmfd07eoiIoCTbb8jMn9RW/aj2/8lwPuwD3AG8LTOXVvu7A/gd8BKq95uJd06cl5k/rvZ9KrDVADFj+f1fYdm6VA+PAM/JzEWtBRHxc2Azyh/8n1TLJno91B0IrEVJdA6tr+jhZ2CFn88u1sP+wLbAFpn5p9ry02v1MO7OB1s2x5ZdKBPM15v5f0BpMRzuL6hR1ZZotlxDabZfp/pwvIIyr2ndGZRLAFtWz7ejfNGcWdv2YuBsYHbtV+kuwAOUX6+tuAWUL5NdatvvVZ2eDJwE3FxfGBHPobRsndkWfwawdUSsVz1/NbAStfrKzIcof3zaj+/6epKUmZdTEpz2uB9VX6j1fbbel27bH7ijLdF83CQ6H54KPNRKNCv3V49TYGKeEytKQsby+z+Msq3QiuohMxfXE81q2VJKS+eM2uIJXQ8tEbEupbXuPZQWy3aj/hkYxudzhYZYDwdSEts/LSdm3J0PJptjy+a0Na1XH4JbKSf7eLMD5fLEXyj3n1+FJ186aH0ZtI6vde/6mwaIW53S/7EVN3+AD+8NPLGuRr1Oo/RT3AQ4boDVreMbrB6iFndXZt47QNxm1aWWVtxAl2Mer4eIeBqwUXtc1WfnYZqph22A6yLiyIj4c0Q8FhFXRsTLqvWT5Xw4Fdg8Ig6JiLUj4tnAHMrxtPpWTZZzom4sv/9DLVsjqvdxO554zJOlHj4D/FdmXjjI+l58Bob6+RyxKP1RtwRuj4jTIuKvEfG3iPhR1YLYMu7OB5PNsWUay1o96hZRBt2MGxGxFaV16/PVL65p1ar720Jbv+pbxzcN6M/MR4YQ176tVly9rka1TqtLMCcAH8zMvw4QMpx6aI9pxT2V8oWyorjWttYeZJ/tcd20IfAqyjlwKPA64EHgwirhmhTnQ2ZeQumr+clqH7cBzwFeVWtNmSznRN1Yfv+HWramHEJJYP6jtmzC10PVH3Af4H3LCevFZ2A062E65TiOoHyHvpHS1/1FwAVR+qK2yjSuzgeTTXVdlFGAP6CMJvzMCsInmuOAWzLz9BVGTmxPoXzxvzEzz65aKnajJJwf6GnJRlFEbAd8B/gm5RLUnsASykCF1XpZNo09Vcv/Z4E5mfmbXpdntEQZlX8K8LnM/GOvy9NDrZzsr8DumfmzzDyT8r3xfOANPSvZCJlsji2LWPZrq24a5XL0mFe17P2Uchlit8x8rFrV+vWzdttLWr+W7qvF9UWZ+mFFce3basXV62rU6jQink/pvP6x6pLp2iz7pb16lCkohlMP7TGtuMdYNjJxKMd3/yD7bI/rpkXAvVmbjiTLFBpXUKb8mPDnQ+WLwCWZ+b7MvCQzv0/psP9PlGlNWmVigHJNtHOibiy//0MtW1dFxAuBH1MG0R3Rtnqi18OBwNOBU2rfnatCGSld+2HWi8/AaNbD/ZRpmi6rt1pmmTbvQcp3Z6tM4+p8MNkcW25iWV8M4PFOuRtTpk4Z06oT/1xgfeA1bf1qbqV0+N687WVbVI+t42v1QRko7iHKtCCtuKh1hK7H1etqNOt0U8oMD5dQPpiLgPOqdZcAv2H5xweQ1eNNwPoR0X5pYgvg5lofnCcdXy1uPkBm/g1Y0B4XEc8CptLMubW8Ud2rMjnOh9b+/7u+IDPvpMybt3GtTLSXi4l3TtSN5fd/qGXrmojYGPgZZR7at7YNKIOJXw/PAzagHEfru/MI4GnV/z9VK/dofwaG+vkcseoH+e2DrF5KlYCvoExj8nww2RxbLgB2iojptWVvoIyMu2Dgl4wNVV+Ss4EXArMz8476+qp/2sVUUzLU7AP8mfIlC3A5ZfTc3rVtr1S97sLal/AFlF9YO9finkmZkLZeV6NZp/8FvLztX6v/0buAAzLzNsqHcu+21+4DXFUb1f9zyuXWx+srylRSr+PJx/eCahqOVtw2lImH2+N2j4hV2vbZz7KBKt30E2B6RDw+QrHqkL8tMHeSnA9QRr/Oqi+o/pitS/VHZRKdE48by+//MMrWFVW3o59X2949MwcahT3R6+EknvzdeRrwf9X/T6riRv0zMIzPZ7f8BNi+3s0mysTzawFzq0Xj7nyYsnRp+w8o9Up16WAe5Y/QJygthJ+jTEnwpt6VbMUi4qvAv1HmOWvva3RjZj5YdQD/NWWE7umUeQaPBQ7KzK/UttWarPbDlBP4AEpH6Rdn5rW1uJ9QLkceRrnEcCylSf8F1S/EntdpROxIadV8/A5CUe62dBbl1/ovKBMTv4cyF9pPa689iXKp9TBK0nI4ZV62F2TmwipmZeD3lE7lH6a0rJ4A3AW8JJfN7fhcSgvbxZS7REQV96XM/FADx/0U4LfAepR53x6qjmNrylxuf5gM50NEHEyp75Mol0inU+bHWw94fqv1f6KdE1EmjG5NrXIQpYXk/dXzqzLzjrH8/g+1bCOtB8pMHb+tlr+F8h49LjOvmAz10N44Ub3maODwfPIdhEb9MzDUz2c36qFKBq+lvMefpySLx1Peyy1bXdPG2/lgy+YYkpn3UwYR/JVy15HPU07wt/ewWEPV+uX0WcqXZ/3flgCZ+VvKh3RryiWjA4D3tZ+kWSap/QhlFPNPKZdYdq1/gCr7UH4FngKcQ/l19cqs3V5rLNZpZp5DGaW9B6UedgbePMCX1vsod9I4jtI9YTXK8S2sbevvlNuxzQO+S7mjwxWU/rJLa3F/BF5JSXLOpyQ8/0FJBLuuupy1K+WLqfX+AOyYmX+oYibD+XAyZcLkHSh98U6k3D3k5fVuJhPwnFif8h6cA+wIPLP2/OXV/sfs+z/UsnWhHjagjDRenXJ+tH93TpZ6GI5R/wwM4/O5IkP5XPxP9f8p1fKTKA04r8xlYyDG3flgy6YkSZIaY8umJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqjMmmJEmSGmOyKUmSpMaYbEqSJKkxJpuSJElqzP8HVvNfDeBqWn8AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "df1 = df[df[\"name\"].isin([\"IssueQuery\"])]\n", + "df1['delta'] = df1['ts'].diff()\n", + "ax = df1['dur'].plot.hist(bins=BINS, alpha=0.5, figsize=figsize)\n", + "ax.set_title('IssueQuery duration (usec)');\n", + "plt.show()\n", + "ax = df1['delta'].plot.hist(bins=BINS, alpha=0.5, figsize=figsize)\n", + "ax.set_title('Time between IssueQuery (usec)');\n", + "\n", + "# df1['delta'].describe()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# for SingleStream\n", + "if False:\n", + " df1 = df[df[\"name\"].isin([\"QuerySamplesComplete\"])]\n", + " ax = df1['dur'].plot.hist(bins=BINS, alpha=0.5, figsize=figsize)\n", + " ax.set_title('Inference time (usec)');\n", + " plt.show()\n", + " ax = df1['dur'].plot(figsize=figsize)\n", + " ax.set(ylim=(0, 100))\n", + " ax.set_title('Individual inference time (usec)');" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoMAAAFtCAYAAAB8yGDhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAABA20lEQVR4nO3deZwcZbX/8U8WMkmAkIWwBAkgwgGM4DXyExQF5SKyCFxlEQQFRUAQRETZd81FiQsoKKAsV5TVK8tlEWVfJQ4IJJAjW4hhIIQkLJIwZJnfH+fppNL0bD29Tdf3/Xrl1emq6uqnz9TUnH7qeU4N6OjoQERERETyaWC9GyAiIiIi9aNkUERERCTHlAyKiIiI5JiSQREREZEcUzIoIiIikmNKBkVERERybHC9GyAi+WFmw4GfArsDawHnuvvRdW1UAzKz9YEXgIPc/bL6tmZFZvZd4AhgY3dfXO/2dMXMdgGuBjZw9zn1bo9Io1LPoIj0mJkdaGYdZrZVmbs4BjgEuBg4APhdxRrXD5nZ4WZ2YL3b0VNmtipwAnBOoyeCAO5+M/Ac0WYR6YR6BkWklrYDHnf3U+vdkAZxOPAacFnR8heBYcCiWjeoG18j2vU/9W5IL1wInGNmp7v7m/VujEgjUjIoIrW0BjCvUjszswHAUHdfWKl9NgJ37wDeqXc7SvgacIu7v13vhvTCdcB5wN7Ab+rcFpGGpGRQRPrEzC4DvgRsCJwP/CewELgcOM7dl5jZdsBdmdcU7oO5gbvPMLMW4Hhgf2A80Vt2DXCSuy8oet2FaV8nA0Zcdr7MzFYDTgP2JMYjzgIuBSa5+5L0+vWJsXgnAHPTe74PeAI43N2nFH22jYEzgO2BEcC/gJuz4xzNbG3gLGBXYBTwPHCeu/+qm7jNANYriseL7r5+qTGDZnZ6+nybAScBnyd6Di9Kz8cR8f8MEf/J7n5O0Xv2KM6dtHcDYPP0Htnl72lrZl0HcIa7n56erwKcDnwxtfdNYBpwqrvfm3ndlkTcPwEMAVqBU9z9rqL9r532twswFngZ+AtwjLu/BeDur5rZE8B/oWRQpCSNGRSRShgI3EYkWMcC9wDfJRI1gKeJMYKzgOnp/wcAc1Lv3p+A7wM3A0cSCcrhwPVpfdaniITkj8BRwHQzG0YkiAcCVwDfAu4kEoULS7R3n/R+FxJJ5frA/5rZSoUNzOyDwCPAjsAl6b2uJZKwwjZrAA8DnwMuAL4NTAUuMLOTu4nZ0SXicXQ3rwG4kvgifzzwEJHYfpdIgl4BjgOeAX5sZp/JtLW3cS728fT49x60sTO/Su/7p/S+PwLmAFtk2rktcB8wGjgzfZ4W4Pb0paKw3VrEz+crxLFwJHG5/f8BY4retxXYugefUSSX1DMoIpWwEnCtu5+Znv/azB4Fvg78yt1nA1eY2fHAa+5+ReGFZrYfkUx92t3vySz/O5HY7QDcnnmvTYCPuPs/MtuemFk+PS2+yMxeAH5gZue4u2f2sS6wkbvPT6934AYi8fu/tM35xDnyQ+7+Qua9Tsrs5wdEovKhzGzVX5vZxcCJZvZLd3+9VMDc/Xoz+0FxPHqg1d2/ntpyETAD+DHRc/bDtPxKoI24rHtnet2+9C7OxTZJj8/3oq3FdgUudvdjSq1MydqFwP3ADulyOWb2a+AxYBLLk9Kzid7Fj7v73zK7Ob1E0vc80Wu7NhEXEclQMigilXJx0fP7iN6u7uwN/BOYZmarZ5bfA3QAn2bFJOXBbCKY2cf9wGtF+/grkbBtB2STwT8WEsFMWwHeD2BmY4FtgfOziSAsG89XSFz2JHq5Oore93bgYOBjwJ87++BlWnapM12C/ztxqfu3meWvpwT3/ZnX9TbOxcYAS4E3+tD2N4CPmdk67v5SifVbEJf+fwyMMbPsur8AR6byRO8Ql31vLUoEgeU/o4zCz3p1lAyKvIeSQRGphEXu/nLRsvlEb0x3NiYSgM7qwK1R9Py5TvaxRS/2MTP7xN3np8Sj0N5CEjW1k/1BjFEbRfS+fa2H71sJM4uev0HE/5USy9fMPO9tnEsZkP4VJ1s99T1iLOlMM3uMGFrwu0yv7cbp8belXpyMAd4lxnB29fPJKvQUlttukaamZFBEKmFpH147EHiKGG9XSnFPTqmZwwOJy6H/3ck+ii9tLulku96MKSuMub6SGFNYyrRe7K+nSrW9s/hnP09v41zstbS/1Vje0wadJFhmNqh4mbtfa2b3EUXHP0uMw/y+mR3o7n9geUyPJ8b5lTIntaE3Ckn+a718nUguKBkUkXp7DpgI3FHi8l5v9rGqu/+1gm0CmNDFNnOAt4DBfXjfWvZU9TXOT6fHDVgxGSz8f2TR9uuV2knqwbwQuNDMRhITcM4A/sDyuL/VVUzN7F1iJnJXP5+sQpuLe09FBM0mFpH6u5q4nPnN4hVm1pLuetGTfWxpZjuX2MeqqaRKj7n7a8RYugNTSZXs/gakbZYQNez2MLMtiveRxh125216dim9Evoa5wfS40ezC1Mh59eIWd5Zhxe9x6BU/if72teJsjQj06JW4FngmFLtKcTU3ZcSYzV3MrOPldiuuId3IvBwH75siDQ19QyKSL1dQUzEOD+VFbmfuBxpxKSHvYC7u9nHOUTJlxvM7HIiqRhG9BztBXyImHXbG0emtrSa2YXEpebxRE3FjdI2xxOTUx5KM4inEcndh4kJDkO7eY+/A4eb2WnE5I5/u/tNvWxnT/Upzu4+08z+Qcw6vqho9W+A483sN8Rn+hTLx/8VrAq8ZGZ/BB4nevY+Qcxw/mV6j6Vm9nViLOFTZnYJUX5nHDGhZwAx0QWipM4OwN3p5/MUkex+gYj9DFhW/mdz4Nfdh0gkn5QMikhdpQTgC0SNva8S48kWEsnXBURB6O72sTDVoDuBSGwOIC7hPkMUhO715UF3fzLdg/ks4FAiufwXcFNmm1dTz9QpwB5Er9s84pLqd3vwNmcSZW6OISZEvJjdfyVVIs7E2MizzWzloruQnElMqNmTiP+twE7Aq5ltFhDlenYAdiPKEb1A1KU8N9POe1PcTyF6F0cQP78prDiT+uUU+7OIsjkjiXGPt7Pi2MAvEhNOru7B5xPJpQEdHeo1FxGR7qVLt88Tdwzp8g4rjSL1Zt6dvWuMiKxIYwZFRKRH0i3ezga+Z2YNf2XJzHYBPkAUqxaRTqhnUERERCTH1DMoIiIikmNKBkVERERyrOHHfDSi1tbWFmBL4GU6v5OBiIiISCMYBKwNTJk4cWJ78Uolg+XZkuU3thcRERHpDz5J1BhdgZLB8rwMsPHGGzNkyJA+7Wjq1KlMmNDTOyo1L8VBMQDFABQDUAxAMQDFACoXg3fffZd//vOfkPKXYkoGy7MEYMiQIbS09OouVyVVYh/NQHFQDEAxAMUAFANQDEAxgIrHoOTQNk0gEREREckxJYMiIiIiOaZkUERERCTHlAyKiIiI5JiSQREREZEcUzIoIiIikmNKBkVERERyTMmgiIiISI4pGRQRERHJMSWDIiIiIjmm29E1sLfefpcF7Yvr3YyKGN4ymFVX7tt9nEVERKTylAw2sAXti7ljysx6N6Mitt9yvJJBERGRBqTLxCIiIiI5pmRQREREJMeUDIqIiIjkmJJBERERkRxTMigiIiKSY0oGRURERHJMyaCIiIhIjikZFBEREckxJYMiIiIiOaZkUERERCTHlAyKiIiI5JiSQREREZEcUzIoIiIikmNKBkVERERyTMmgiIiISI4Nruebm9kHgGOBrYAJwHR3n1C0zWXAV0u8fC93v65o22OBI4C1gGnAce5+R9E2qwLnAHsCQ4G7gCPdfUYFPpKIiIhIv1LvnsEPArsAzwJPdbHd88DWRf/uzG6QEsFJwPlpn88AN5vZFkX7uhLYDTgS2AcYB9xhZsP7+mFERERE+pu69gwCN7n7DbCsB/CjnWy30N0f7mwnZtYCnAz83N0np2X3AE8CJwF7p2UfIxLFXdz9lrTsSeA54EDggr5/JBEREZH+o649g+6+tEK7+jiwGnBVZt9LgGuAncxsQFq8M/AGcFtmu5nAA2mdiIiISK7Uu2ewpzY0s9eBlYGpwNnufnVm/abp8emi100DVgHWAWal7aaXSEKnATtWutEiIiIija7eYwZ74jFikskexKSPWcBVZnZgZptRQLu7Lyx67fz0ODqz3esl3mN+ZhsRERGR3Gj4nkF3P7do0Q1mdidwBnBZ7Vu03NSpUyuyn9bW1pLLh40YS1tbW0Xeo97mzh3OrBfmdLlNZ3HIE8VAMQDFABQDUAxAMYDaxKDhk8FOXAtcYGZj3X0O0bPXYmZD3f2dzHaj0uO89DgfGF9if6My2/TYhAkTaGlp6e3LVtDa2srEiRNLrps9bwHjxi3o0/4bxZgxq7PmRqVCH7qKQ14oBooBKAagGIBiAIoBVC4G7e3tXXZg9YfLxD1RGCu4adHyzYC3gJcy21lmQkl2u+nVa56IiIhIY+p3yWBK5PYGXky9ggAPErOE98lsNyhtd5u7d6TFtwAjyUwWMbN1gW3SOhEREZFcqfcdSIazvKTLesAIM9szPZ+SHi8nCkU/SyRyBwPbAQcU9uPu7Wb2A2CSmc0BHk3bbQjsl9nub2Z2M/BbM/su8CZwJjCTOo8/FBEREamHeo8ZXIMY/5dVeH4QcCPR43dy2nYRkejt5u43ZV/k7pPNDOAoYE2iXMwu7v540f73BSYTBaZbiNvR7eXuzTE4T0RERKQX6poMpvsBF4/fK7Z7L/Y3mUj0utrmLeDQ9E9EREQk1/rdmEERERERqRwlgyIiIiI5pmRQREREJMeUDIqIiIjkmJJBERERkRxTMigiIiKSY0oGRURERHJMyaCIiIhIjikZFBEREckxJYMiIiIiOaZkUERERCTH6npvYpH+6K2332VB++KK73fYiLHMnreg4vvtzPCWway68pCavZ+IiDQmJYMivbSgfTF3TJlZ8f22tbUxblztksHttxyvZFBERHSZWERERCTPlAyKiIiI5JiSQREREZEcUzIoIiIikmNKBkVERERyTLOJpSaWLu3osmxKrcuq9MWixUvq3QQREZGKUTIoNdG+aAkPPtHW6fpal1Xpi49vPq7eTRAREakYXSYWERERyTElgyIiIiI5pmRQREREJMeUDIqIiIjkmJJBERERkRxTMigiIiKSY0oGRURERHKsrnUGzewDwLHAVsAEYLq7T8isHwR8F9gF2Ixo75PAGe5+R9G+ZgDrlXibse7+Wma7VYFzgD2BocBdwJHuPqNiH0xERESkn6h3z+AHiUTvWeCpEuuHAScC/wAOAr4EvAT8xcx2LbH9dcDWRf9eL9rmSmA34EhgH2AccIeZDe/bRxERERHpf+p9B5Kb3P0GADO7DPho0fqFwAbuPr+wwMxuBzYmegz/r2j72e7+cGdvZmYfI5LPXdz9lrTsSeA54EDggr58GBEREZH+pq49g+6+tJv1S7KJYFrWQfQUlnNPsJ2BN4DbMvubCTyQ1omIiIjkSr17BnvNzAYCHweeLrH6y2Z2MLAEuB84wd0fzazflBiXWJyETgN2rEZ7RURERBpZv0sGibF+BhxStPxG4G/ATGIiyQnAfWa2pbsXxiOO4r1jCAHmA6N725CpU6f29iUltba2llw+bMRY2traKvIe9dZuo7v9LP3ls/bks5SrljGYO3c4s16YU7P366nOfh/yRDFQDEAxAMUAahODfpUMmtm2wI+Bye5+X3adux+VeXqfmd0KTAeOB75SjfZMmDCBlpaWPu2jtbWViRMnllw3e94Cxo1b0Kf9N4qWlqGMG9f5lf22trYu1zeS7j5LuWodgzFjVmfNjcbX7P16oqvfh7xQDBQDUAxAMYDKxaC9vb3LDqx6zybuMTPbHLgBuB44rrvt3X0ucCeQjeJ8YGSJzUcB8/rcSBEREZF+pl8kg2a2IfBn4FHggDSJpBxPx+5sQNHyzYheRBEREZFcafhk0MzWAm4HXgH2cPd3e/i61YHtgSmZxbcQPYM7ZrZbF9gmrRMRERHJlXrfgWQ4y0u6rAeMMLM90/MpwKtEGZg1gGOAzcxs2esLNQXNbF9gV+BWoij1+sSl5Bbg7Mz2fzOzm4Hfmtl3gTeBM4lJJ5dV4zOKiIiINLJ6TyBZA7i2aFnh+UHA3cAW6fn1JV5fuNz7AlF38KfE+L83gHuAPd29+PLvvsBkosB0C3E7ur3cvTlmaoiIiIj0Ql2TwXQ/4OLxe8W6W1/oIfx0D9/zLeDQ9E9EREQk1xp+zKCIiIiIVI+SQREREZEcUzIoIiIikmNKBkVERERyTMmgiIiISI4pGRQRERHJMSWDIiIiIjmmZFBEREQkx5QMioiIiOSYkkERERGRHFMyKCIiIpJjSgZFREREckzJoIiIiEiO9ToZNLMdzWxANRojIiIiIrVVTs/grcAsMzvHzLaodINEREREpHbKSQb3AB4AjgAeNbMnzOxYMxtX0ZaJiIiISNX1Ohl09xvdfW9gTeAbwBzgbOBFM7vdzPY3s+EVbqeIiIiIVEHZE0jc/S13v8TdtwfWA04E1gAuB2ab2f+Y2fYVaqeIiIiIVEGlZhMPAlYCWoABwELgP4G/mNljZjahQu8jIiIiIhU0uNwXmtlqwN7A/sAngMXAzcDx6XEpsBvwM+BSYMu+NlZEREREKqvXyaCZ7UEkgDsDQ4EpwLeBK919XtHm15vZ6sAFfWyniIiIiFRBOT2D/wu8BJwLXO7u07vZ/gng92W8j4iIiIhUWTnJ4GeBO9y9oycbu/sjwCNlvI+IiIiIVFmvk0F3/2s1GiIiIiIitVfO7eh+ZmbPdLH+n2Z2Tt+aJSIiIiK1UE5pmV2Aq7tYfzXw+fKaIyIiIiK1VM6YwXWBGV2sfzFt0y0z+wBwLLAVMAGY7u7vqUloZjsBPwQ2Iyav/Nzdf1Fiu2OJ2+StBUwDjnP3O4q2WRU4B9iTmA19F3Cku3f1mURERESaUjk9g28CG3Sx/v1E0eme+CDR0/gs8FSpDcxsa+BG4DFgJ6Jm4c/N7LCi7Y4FJgHnp30+A9xsZlsU7fJKov7hkcA+wDjgDt1CT0RERPKonGTwTuBQMxtfvMLM1gcOTdv0xE3uvq677wk82sk2pwKPuvvX3f0ud/8B8FvgNDMbmN63BTiZ6DGc7O53ErUQnwdOyrTvY0SieLC7X+nuNwP/BYwHDuxhm0VERESaRjnJ4KnE5eWpZnaumR2S/p1H1BQcCJzSkx25+9Ku1qck7zO8d4ziH4hLwR9Jzz8OrAZcldn3EuAaYCczG5AW7wy8AdyW2W4m8EBaJyIiIpIrvU4G3f0Z4vZzjxKXWn+d/n0LaAU+6e5eofZtCAzhvZeQp6XHTdLjpunx6RLbrQKsk9lueokkdFpmXyIiIiK5Uda9id19GrBdutXc+9Pi59x9bsVaFkalx9eLls9Pj6Mz27W7e/FYxex2s9J2xfsqbDe6xPIuTZ06tbcvKam1tbXk8mEjxtLW1laR96i3dhvd7WfpL5+1J5+lXLWMwdy5w5n1wpyavV9Pdfb7kCeKgWIAigEoBlCbGJSVDBa4+2vAaxVqS78zYcIEWlpa+rSP1tZWJk6cWHLd7HkLGDduQZ/23yhaWoYybty4Tte3tbV1ub6RdPdZylXrGIwZszprbvSeob911dXvQ14oBooBKAagGEDlYtDe3t5lB1ZZyaCZDQJ2JHoFRwEDijbpcPezytl3kULP3sii5YUew3mZ7VrMbKi7v9PNdqX++o3KbCMiIiKSG71OBs3so8Afgffx3iSwoAOoRDL4HPAuMdbvtszyzdLj9PRYGCu4KVGCJrvdW0RtwsJ2O5jZgKJ7K2+W2ZeIiIhIbpTTM3gBMAzYA7jP3V+vZIOy3L3dzO4E9gZ+llm1L/AKy8vRPEjMEt6HlAym3su9gdsyid8txGzoHUnJpZmtC2wDfLtan0NERESkUZWTDG4OnOTuN/X1zVOh50JJl/WAEWa2Z3o+xd1fBM4E7jWzi4HfEzOZvwEcUZgVnJLGHwCTzGwOkSQeTMxG3q/wfu7+NzO7GfitmX2XKKB9JjATuKyvn0dERESkvyknGZxF55eHe2sN4NqiZYXnBwGXuftDZrY7cXeRrwBtwHfc/dfZF7n7ZDMDOApYkygXs4u7P160/32ByUQPZwtxO7q93L05ZmqIiIiI9EI5yeDZwPfM7CJ3f7Mvb57uB9xtYunutxCXeLvbbjKR6HW1zVvEXVIO7VkrRURERJpXOcngaOBt4Fkzuw74F7CkaJsOdz+nr40TERERkeoqt2ew4LBOtukAlAyKiIiINLhyksENKt4KEREREamLXieDaYaviIiIiDSBsm9HZ2YbAdsRM4J/7+4zzGwIsBbwiru/W5kmioiIiEi1lHMHkoHAr4GvEzOBO4CHgBnAEOBJonbfTyrWShERERGpioFlvOZE4GvAKcDWZErDuPu/iVvVfaEirRMRERGRqionGTwIuMTdJwHPllj/JLBRn1olIiIiIjVRTjL4PuCRLtYvBFYtrzkiIiIiUkvlJIOvEPcR7sxEQDOORURERPqBcpLBPwLfTLOJCzoAzGwn4v7B11SgbSIiIiJSZeUkg6cDM4HHgN8TieCJZvYw8H/A48B/V6qBIiIiIlI9vU4G3f1N4OPAJGBN4B1gG2AVIlH8lLsvrGAbRURERKRKyio67e7vEMngpMo2R0RERERqqZzLxCIiIiLSJMq5A8klPdisw92/XkZ7RERERKSGyrlM/BnS7OGMQcDa6XEO8HYf2yUiIiIiNdDrZNDd1y+13MxWAg4FjgZ26FOrRERERKQmKjZm0N0XufsvgduBX1ZqvyIiIiJSPdWYQPI48Kkq7FdEREREKqwayeAOwIIq7FdEREREKqyc2cSndrJqJNEj+BHg7D60SURERERqpJzZxKd3snw+8BxwGHBxuQ0SERERkdopZzaxClWLiIiINAkldiIiIiI5Vs6YwfHlvJG7zyzndSIiIiJSPeWMGZzBe+9A0hODyngNZnY3sG0nq09w97PN7HTgtBLrv+fuk4v29xXgRGB9Yozjme5+dTltExEREenvykkGDwaOAtYF/gD8My03YF9gJnAesLQSDQQOB0YULTsgLb8ls2whcau8rBezT8xsT+ByYrbz7cAewJVm9qa731qh9oqIiIj0G+Ukg2sDLcAH3H1+doWZnQY8AKzl7v9dgfbh7k8VLzOz84An3f2JzOKl7v5wN7s7C7jW3U9Iz+8ys02BMwAlgyIiIpI75UwgOQy4qDgRBHD3uURZmW/2tWGdMbONgC2BK3r5ug2ATYCrilb9AdjSzMZWpoUiIiIi/Uc5PYNjgFW6WL9y2qZa9icuQf+haPkwM3sVGA08C/zC3c/PrN80PRb3NE5LjwbMqXBbRURERBpaOT2DDwPfNrOJxSvM7KPAt4G/9bVhXfgycI+7z8osexY4jhizuBvwEPDLNLGkYFR6fL1of4UeztEVb6mIiIhIgyunZ/BbwN3AI2Y2BXgmLS9cvp0HHFmR1hUxs62ADYFJ2eXuXnzJ+BYzAzjOzM5x97er0Z6pU6dWZD+tra0llw8bMZa2traKvEe9tdvobj9Lf/msPfks5aplDObOHc6sFxqvM7yz34c8UQwUA1AMQDGA2sSgnDuQPGVmHwKOB3YC9kyrXgTOBX7s7q9Urokr2B94B7iuB9teAxwIbAZMYXkP4Egg275Cj+G83jZmwoQJtLS09PZlK2htbWXixPd0sgIwe94Cxo1b0Kf9N4qWlqGMGzeu0/VtbW1drm8k3X2WctU6BmPGrM6aG5VVNrRquvp9yAvFQDEAxQAUA6hcDNrb27vswCqnZxB3nw18J/2rCTMbDOwD3OTub5axi6fT46bA9MzyzdKj96F5IiIiIv1Sn25HZ2YbmdknzGy1SjWoCzsCq9PzWcRfImoPTgNw9xeIJHCfou32Baa4e+NdLxMRERGpsrJ6Bs1sP6Jw8zpp0Q7AnWa2OvAgcLK7X1OZJi6zPzCXEvUAzayVKCbtwBAi4ftyakf2OuupwNVm9hzwF2B34LPALhVuq4iIiEi/0OueQTP7ItE79zTwPWBAYZ27v5aWf6VSDUzvuQoxS/gad19UYpNngaOB64mxgpsAX3P3H2Y3cvdrgYOIcY5/Jnob99PdR0RERCSvyukZPAn4q7vvaGZjgMlF6/9GhYtOu/u/ifqFna0vvvTb1b4uJ3oRRURERHKvnDGDmwJ/6mL9q4Du5iEiIiLSD5STDL5N13cg2RB4rbzmiIiIiEgtlZMM3gkcaGZDileY2TjgG8R4PBERERFpcOUkgycBawN/Bw4HOoCdzexs4EnivsFnVKyFIiIiIlI1vU4G3f0Z4BPEXTxOJ2YTHwN8H/gHsI27z6xcE0VERESkWno1m9jMBhG1BWe7+2fNbBTwASKpfF6Fm0VERET6l96WlhkIPAccB/zU3ecT9/0VERERkX6oV5eJU8HnNmKcoIiIiIj0c+VMILmUmE08tNKNEREREZHaKucOJP8EBgHTzexy4HlgYfFGVbg3sYiIiIhUWDnJ4BWZ/5/SyTYdxD2CRURERKSB9SgZNLPzgMvdvRX4dFq8CtEjuKRKbRMRERGRKutpz+C3gIeBVne/x8zGEPcg3sHd76la60RERESkqsqZQFIwoGKtEBEREZG66EsyKCIiIiL9nJJBERERkRzrzWzi95vZ/0v/Xy09bmJm/y61sbs/0qeWiYiIiEjV9SYZPCP9y/pFie0GEKVlBpXbKBERERGpjZ4mgwdVtRUiIiIiUhc9Sgbd/fJqN0REREREak8TSERERERyTMmgiIiISI4pGRQRERHJMSWDIiIiIjmmZFBEREQkx5QMioiIiORYb4pO14WZHQhcWmLV+e7+rcx2OwE/BDYDXgJ+7u7vKYptZscCRwBrAdOA49z9jio0XURERKTh9aeewc8BW2f+TS6sMLOtgRuBx4CdiOTx52Z2WHYHKRGcBJwP7AI8A9xsZlvU4gOIiIiINJqG7xnMaHX31zpZdyrwqLt/PT2/y8zGA6eZ2UXuvtTMWoCTiR7DyQBmdg/wJHASsHeV2y8iIiLScPpTz2BJKcn7DHB10ao/EJeCP5KefxxYDbiqsIG7LwGuAXYyswHVb62IiIhIY+lPPYNTzWwsMBO4DPihuy8GNgSGAE8VbT8tPW4C/B3YND1/usR2qwDrALMq32wRERGRxtUfksGXgdOAR4AlxJjAU4ANgAOBUWm714teNz89jk6Po4B2d1/YxXZKBkVERCRXGj4ZdPc/A3/OLPqLmb0BnG5mZ9WpWQBMnTq1IvtpbW0tuXzYiLG0tbVV5D3qrd1Gd/tZ+stn7clnKVctYzB37nBmvTCnZu/XU539PuSJYqAYgGIAigHUJgYNnwx24hrgdGI8YOFy8MiibQo9hvPS43ygxcyGuvs7XWzXYxMmTKClpaW3L1tBa2srEydOLLlu9rwFjBu3oE/7bxQtLUMZN25cp+vb2tq6XN9Iuvss5ap1DMaMWZ01Nxpfs/fria5+H/JCMVAMQDEAxQAqF4P29vYuO7D6/QQS4DngXZaPCSzYLD1OT4+FsYKltnuLqE0oIiIikiv9NRn8EtBBlJtpB+7kvaVh9gVeAR5Nzx8E3gD2KWxgZoPS625z945qN1pERESk0TT8ZWIz+zOR7E0FlhITSA4Hfuvuz6fNzgTuNbOLgd8DnwC+ARzh7ksB3L3dzH4ATDKzOUSSeDAxG3m/Gn4kERERkYbR8MkgcXn3a8D7iPY+AxwH/Lywgbs/ZGa7E3cX+QrQBnzH3X+d3ZG7TzYzgKOANYnxhru4++PV/xgiIiIijafhk0F3Pxo4ugfb3QLc0oPtJpO5lZ2IiIhInvXXMYMiIiIiUgEN3zMoItWxdGkHs+c1VumiYSPGltWm4S2DWXXlIVVokYhI81MyKJJT7YuW8OATjVXoO2ot9j4Z3H7L8UoGRUTKpMvEIiIiIjmmZFBEREQkx5QMioiIiOSYkkERERGRHFMyKCIiIpJjSgZFREREckzJoIiIiEiOKRkUERERyTElgyIiIiI5pmRQREREJMeUDIqIiIjkmJJBERERkRxTMigiIiKSY0oGRURERHJMyaCIiIhIjikZFBEREckxJYMiIiIiOaZkUERERCTHlAyKiIiI5JiSQREREZEcUzIoIiIikmNKBkVERERyTMmgiIiISI4pGRQRERHJscH1bkB3zGwv4MvARGA08BzwK+BCd1+atrkM+GqJl+/l7tcV7e9Y4AhgLWAacJy731G1DyAiIiLSwPpDz+B3gXbge8CuwPXAecCPirZ7Hti66N+d2Q1SIjgJOB/YBXgGuNnMtqhe80VEREQaV8P3DAKfd/c5med3mdkqwLfM7GR3b0/LF7r7w53txMxagJOBn7v75LTsHuBJ4CRg7+o0X0RERKRxNXzPYFEiWPAYMJS4bNxTHwdWA67K7HsJcA2wk5kN6Es7RURERPqj/tAzWMongXnAq5llG5rZ68DKwFTgbHe/OrN+0/T4dNG+pgGrAOsAs6rSWhEREZEG1e+SQTP7KHAQcEbq2YPoKZxCJHarAQcDV5nZMHe/LG0zCmh394VFu5yfHkfTy2Rw6tSpvf8AJbS2tpZcPmzEWNra2iryHvXWbqO7/Sz95bP25LOUq5YxqObn6Ity2jR37nBmvVDqIkL/1Nk5IU8UA8UAFAOoTQz6VTJoZmsBfwQeITOBxN3PLdr0BjO7EzgDuKxa7ZkwYQItLS192kdraysTJ04suW72vAWMG7egT/tvFC0tQxk3blyn69va2rpc30i6+yzlqnUMqvU5+qLcGIwZszprbjS+Ci2qva7OCXmhGCgGoBhA5WLQ3t7eZQdWw48ZLDCz1YBbgQXAbu6+qJuXXAuMN7Ox6fl8oMXMhhZtNyo9zqtYY0VERET6iX6RDKYE7kZgDeBz7j63jN0UxgpuWrR8M+At4KXyWygiIiLSPzV8Mmhmg4kZv5sDO7n7iz14zQCiVMyLmdnIDwJvAPtkthuUtrvN3Tsq3XYRERGRRtcfxgyeD3we+D4w3My2yqx7irjMezlwJfAsMJKYQLIdcEBhQ3dvN7MfAJPMbA7waNpuQ2C/qn8KERERkQbUH5LBHdPjj0us+zTwBNHjdzJxGXkRkejt5u43ZTd298lmBnAUsCYx+3gXd3+8Ok0XERERaWwNnwy6+/o92Gz3XuxvMjC57AaJiIiINJGGHzMoIiIiItWjZFBEREQkx5QMioiIiOSYkkERERGRHFMyKCIiIpJjSgZFREREckzJoIiIiEiOKRkUERERyTElgyIiIiI5pmRQREREJMeUDIqIiIjkWMPfm1hEpDtLl3Ywe96CejejIlZZbfV6N0FEckbJoIj0e+2LlvDgE231bkZFfPj9w+vdBBHJGV0mFhEREckxJYMiIiIiOaZkUERERCTHlAyKiIiI5JiSQREREZEcUzIoIiIikmNKBkVERERyTMmgiIiISI4pGRQRERHJMSWDIiIiIjmmZFBEREQkx5QMioiIiOSYkkERERGRHBtc7wbUmpltBPwC2AZYCFwFHOfuC+raMBEREZE6yFUyaGYjgbuAF4E9gTWAnwJjgS/Vr2UiImHo0GHMntf/v5sObxnMqisPqXczRKQHcpUMAocCo4APu/trAGa2GPi9mZ3l7tPq2joRyb1FSzq4Y8rMejejz7bfcrySQZF+Im9jBncG7igkgskfgXZgp/o0SURERKR+8tYzuClwSXaBu7eb2XPAJr3YzyCAd999tyKNam9vL7l88aJ3GTxwaUXeo96WLF7U5WcZutKAfvNZu/ss5ap1DKr1Ofqi3Bg04mcp19Ili5vis7z7bjsvzS7vHNmyymhemv16ZRvUB8OGDGbl4SvV/H07+9uQJ4pBZWKQyVcGlVo/oKOjo89v0l+Y2SLgFHc/u2j5/cCr7v6FnuyntbV1G+C+KjRRREREpFo+OXHixPuLF+atZ7BSpgCfBF4GltS5LSIiIiJdGQSsTeQv75G3ZHA+MLLE8lHA9J7uZOLEie3AezJrERERkQb1XGcr8jaB5Gli3OAyZtYCbEgvkkERERGRZpG3ZPAWYHszG5NZ9l9AS1onIiIikit5m0AyEpgKzADOYnnR6TvcXUWnRUREJHdy1TPo7q8DnwH+Dfwv8DPgauBrdWyWiIiISN3kqmdQRERERFaUq55BEREREVmRkkERERGRHFMyKCIiIpJjSgZFREREckzJoIiIiEiOKRlsEGY2oOgxdz8bxeC9FIOgOCgGoBiAYgCKAVQ+BrkPaKNw945UFHukma3s7ksBzGxQfVtWO4oBmNkQM1vXzDY2s5GZGAyod9tqLfuZC3EoXt7sFAPFoFheY6DjoLoxUJ3BBmBmewN7AbsALwLPAH919/My2wzM/vCbjWIAZnYgsC+wA/Av4A3g/4AfuPuCtE1TxyDLzFYHNgG2BZyIyTPuPi+tH+DuTX0CUwwUAwAzGwdMBHYG/gnMAh5095fS+qY/L+g4qG4MlAzWmZltADwG3AHcBGwBbAZsTtwp5SR3v6Z+Law+xWBZDJ4AriHujmPAB4E9gAHAKe5+ftq26U96AGZ2K/EHcAEwHngZeJBIkK9y9/Y6Nq8mFAPFAMDM7gPeD8wF3kecF9uAG4BzM18Wm/bcoOOgujFQMlhnZnYpsDqwT+YXeh1gO2A/YGvgT0Qy0FavdlaTYgBmdgGwAfAFd1+Ylo0EPgR8FTgQuAc41N2frVMza8bMziJ6io8A/pEWHwHsDqwK3A9c4O5/r0sDa0AxUAwAzOwMYB/gQHd/2MyGAfsDOwH/AcwAfuTut9WvldWl46D6MVAyWEdpLNylwEjgi8BSoCMzTmwjIhHYB7jJ3Y+pU1OrRjFYNt7jXOIb3w7uviB72cfMxgK7AUcTl4gOKCTNzcjMhhKJ71/c/eSidesC3wb2JGJxuLs/22w9IoqBYgAxhhi4GZjq7t8pWjcW+BKRGA4krqDc3oQx0HFQgxgoGawzMzsJOAyY6O6vpmUrufui9P9BwPeAScBe7v7HujW2ShQDMLOvAj8Gdnb31rSsOAZfBi4DvunuF9arrbVgZtcBS9197/R8JWBJJkHeAbgCeA7YtTBmppkoBooBgJn9BtjQ3T+dnhfH4GPARcBQYDt3f7luja0SHQfVj4FmE9ff5cT4j/vN7FMA7r7IzAaa2RB3X+LuZwN/Bz5az4ZWkWIQYz6eA243s8/DshgMSknhEnf/H+DPwEfq2dAauRf4opkdYmaD3H2Ruy9NJ0Dc/S/A54jB1J+pZ0OrSDFQDADuBLY1s9PTFYPiGPwN+E9gFHF1pRnpOKhyDJQM1lH6xZ4FHAq8Bvwq/cKv4+5L3f3dwnbAC8AH6tjcqlAMgrvPJWYSPwhcZmaXmNmGKQks9A4OAF4F1qtjU2vlUuBq4vLH0WY2HpZ/SUjbzCAS6C3q0sLqUwwUA9z9D8CPiOEy55nZh9PywpfFgcSEgieATaw5S63k/jigyjFQMlhHhe5dd78XOAmYDnwFuMrMTjCzUWkixReJwcJX1a2xVaIYLOfuLwLfJ37pPwHcbWbnmtkmZvYfwEHE7OLf1K+V1WOZYuPu/hZwKnFimwSca2ZfMLO1fXkJjSHEwOnX69HealAMFIOsTGL3S+LqwR5EDI4xsw+kL4tLgRHAOkBbs4yV03FQ2xhozGCNmdmqwJZEnaABwAvufmla10LMGv08MUtsTaInaBFwu7t/ox5trjTFYNlM4c8S3fmrAE8CV7j7S2l84GeJGOxE9ATOBd4CbnT3o+vR5lpIcRkILEonP8zsIOC/gcHAQ0R9rZeBXYH13X2D+rS2OhQDxcBi4siawHBgji+vI7cH8YVxHWAe8eV5FnEuHenuG9elwVWS9+MAahcDJYM1ZmbXELNGVyH+wI8n/sj/DPiFuy80szWJy6FrpvW3AS96KjnS3ykGYGY3ABsTyXAb8GFgEPA/wPnuPt3MVgFGEyf+dYCHiT8MTVdPy0oXHb/H3X+S2eY44FPE8TAO+D3wB3d/uPYtrjzFQDGATovP3wqc4ctLbx1EnEM3B9YCrgSud/fH6tHmStNxUPsYKBmsITPbnZgs8WV3v9nM3kckBHsRl0afB45Il0yLX9sUU+UVAzCzXYlf2t3c/R4zWw0YC+wNHAu8DZzo7r8r8dqmiEGWdV10/G3gdHe/Im27CpFAd7j7v+vT4spTDBQD6Lb4/EAiIfx52naou79jmaoDzUDHQX1ioGSwhszsZ8Qlvz19xfsKDiUum55GFFo+AZgMDHT3JXVoatUoBmBmpxDf5nYpTJDJrFuX6P7fD/ipux9bhybWlPWs6PiNRNHxf1nMpGu2Y0IxUAywnhWfvw84zN29SWOg46AOMdAEkhrIDAKeSSQB66XlgwHc/R13vw84BLiEqLk3oZkOcMVgBf8CPk1841+Bu/+LmFl9KvBlM9upxm2rqTQ+chDQASybHenuL7n774lC2xcQE2q+A9Bsx4RioBjAsnPkYmIySHbiwOvp3HgCcW5YHZhkZsObMAY6DuoUAyWDNZC5rHcv8ct+RFq+2MwGpx8+7v48cDwxI+g7pfbVXykGK/gr8BRwnJmtBSsky7j728BPgFeAb1pzlooAlp3EnJgsNKowO9KW1856hugt/i1RTqHp6qgpBooBLDtHthJjpTdNy7IxmEMUnT8H+C/ggPq0tHp0HNQvBkoGa8jjzhJnAUeZ2S1pSvhid1+S+UHPA64FVre4B2VTyXsM0pi/WcAPiZlf15vZR4vHAaZLRFcTZQJG1L6lNaWi44oBKAag4vOg4wDqEAONGawyMxvs7ouLlu0PnA6sDfwUOMuXF1dehRg8vMDd96xxc2smxeBMYrZw08egMNi7aNnWxGf/f0TtwJ8Ar7j7m2a2OjGzeJG7717zBtdIuvyxNJ3wzgZWI74IXOzuL2W3I2ZMDnT3verT2spLvb4DUgy2IW5JOAK4jpzEAHQcZFkUE/4lcRnwBuCH7v5cZv0AoodwTXf/XF0aWSV5Pw7qeT5QMlhlFvfdvdbd/5lZthIxAHTf9K8D+BPwDjF77EPAVumSab9nUTtwbWCEuz+Rlg0gkqCvETdbX0pzx+BnxHHwYGbZAGAjoqD2YcAawN3EZfS1iJh90t1fqHmD68DMPg18i7g88hJwC/Brotbax4nLIgd5k9yb2sxGe9H9Q81sW+AootRQG00eg1JyeBwY8LxnZgSb2QeJIvO7ECW4/hf4FTCMiMvPgK+7+3W1b3Ft5PA4qOv5QMlgFZnZN4ALiZuMv2BFZUHMbCwxXXxbYE+i1t6TRNJwRz3aXGlmth1x+5ztiGEJc0k1sYjxMS1EvaztiaSoGWNwMPELbO7+XInjYDjRQ7odkRi/QVwq+l93n1KHJleVqeg4FneUuRfYvPjcYDGz/mvAzsQfgbVozhjoOIhbyz0ATPSoLZo9DoYQyeAONHHxeR0HjXE+UDJYRWY2B/g5cHYaEzeA6PIdD8zwVE08s/1q7v5G7VtaPWb2EnGj9b8RB/A2xIG9gPim+xuPGbSF7ZsxBnOAc4H/TsfBYOKm8psD09z9laLtm6puWDFT0XHM7AHiXtz7FA8fyGyzFrAhzRsDHQdxHMwBvpQ9DrLDiyxKy6xGFBVuuuLzOg4a43wwuBI7kfcys58Ss0Ev8eXTvo8Gvk5k9kMsagkdX/hhFpKg4p6j/srMDgPaiQLKhYTvGjM7mZgRdwrwSTM7uHA5uAlj8FPiNkG/zRwHpwH7E7/UQ83sWuBUd/e0fvF799QcLIqOf5bSRcdPAw4ws0LR8dlFr22KWdUWdxb4MHGZpz0tW5vo/RhJ9I5c6e4ziXNI9rXNEgMdB8uPg0+w/DjYmJgpvE4aE/Zzd3+WuNfsi5nXNksMdBw0yPlAPYNVkC7/zgYmufvJadmpRDJY6CV7H/Bl4lvhbh7TxZuKmR0AnAx8xN3fTgfusiLSaQLFJcT4h8+5+9P1a23lpUkgrwLnuvt30rIziLI61wH3EOMCv0l889/Z3f9ep+bWhKnoOGY2E/i9u5+Qnm9L9BxvDrwJLCTuMvAjd7/YmrOoro4Ds1nA7zLHwWeIqyXrE+PDWog7E51BTCJpuj/WOg4a53yg0jLVsQ3xLW4/MzsvjQv5NlFIeF93P4c4yA8nuv73rldDq+xZ4hf9MDMb5u4d6TJpoYjmQ0Q19QVEdf1mY8AjwDfM7EYz+xjwDaJH9Ah3vxL4BdFL2A4cXLeWVpmp6DgAZvZN4ovgSma2WVp8EfACMWZqDaK+5kzgRDPbuJlioOMgmNmXiXP/+y1mjQKcDzwO/D933wDYB7gKOIY4NpqGjoPQSOcDJYNV4O5/Iga93kp0gT9AFBm+pjAWzKOq/DXEfSi3slR0uclMAX4HfBfYM02UwFMRzfT/x4iBs582s6app2dmK7v7A8RxcDox1uMh4u4jNxR+od19kbv/jYjVZmY2rFkuf2S5io4X3E3MCNwfmJyGCKxE/I7c7+7vuvtVxHCStYk7TjQNHQfLPETcReKDwM/M7G6iJ/B4YgIdKRk6iojTV+rTzOrQcbDM3TTI+UDJYJW4+z3EN7pTiUKidxBdvsu+FaXZYs8TYzebJhksfMNJA6CPJu6leSnwSzPbxFJx6YwnieLKzfSt7ydm9ll3n050+R9CXOa4iRj/kz0OBgOzCi9sxstBBZ7zouPu/rS770oMDVgD+AIxZOAld+/I9Iy8QMy2X7lJvxzk9jhI46Gfd/dvAUcCM4irCNcTdUaXZpKh+cTVhSElzpv9Xp6PA2is84HGDFaQxRT5CcAniVmiN6fl44CR7v5Uujy6NC1fj0gSL3f3s+rV7kpKk0Z2JC77/jvzWQ8jLo8OIwqm3k3ccmdz4EdEr+nxdWhyxZnZocTYn+eJYQFT0vKRxO2FXig6DjYgblH3O3c/vT6tri3LWdHxzqTfi8Xu/pui5WOI5OAxdz+qHm2rNFMB/kJB6TW9qGSUme1DjIe7smj5KOI4cHc/pGYNrTGdD0I9zwdKBiskfZO7lKgF1AGMIS4Pf8lXrBo+KH3r2ZAYB7Gvu7+vHm2utPQt5m3gJHefXGL9eOKy6beIntAWooTAXe6+Xw2bWjUpBnOJX9ytidlfX/WiwtGZ42ATotfwi+6+Xq3bWwumouPAshP6Ku7+YtHyAakXYCWPW04NIeqOXkbUpmyKouOmAvyY2WPEbNnt3f3h7GSAEsdBC3EcXEpzHQc6H9B45wMlgxViZucRs57OIsYB/gfR43Wfu+9ftO0goqL8x4DD3P36mja2SszsQmAr4kT3Wlq2CZEgrwrcBTzh7q+b2W7AfKLA8j+9k9pK/U0mBtsSJSIuJi5xHFCiV2Qg0SO4CXCou99U4+ZWnanoOGa2BTFR6gBiqMhjwMleogB52v7bxBem6939ezVvcBWYCvBjZl8lErvZxFWRA9z9X6V6TNP2pxBfnq9z9+Nq2tgq0fmgcc8HSgYrwKI20hTgSE+3B0o9RMcAk4BPufuDtmJV8S2A8c2SAJjZB4DpxCXie9JA4G8RA6DXJWYMjyQGyx6W7S1tFma2EXGS/0IhwU9/BM8l/gic6O5vFF0iHgP8h7v/tU7NripT0XHM7ElgHvAoMUxie+AfwFe8qGCsma0LfI8oSv6VZhk/airAj5nNI+41+wJwBXAjUV/vPV+EzWw0kTSNAw5pouNA54MGPR9oAkllfJYoJfMMLOvmXUyMfZhB3FKITCL4PqKCfFMkgsmxxPE0OiWCo4kK8jcR3/Y3I7r/twJusKim3mwuAm4mZpEXXAdcTcwG+yJAJhEc5O5zmzgRzBYd/6W7X5PGu4wjLgGdAlxuZu8vvMYzRcfr0eZKs6gruRT4hketycOBnxDHwr5pm2WTx9IfwnOAo5ooAeisAP8DxNWCl8zs3OzkgCY8Dn5K9IL9xt2vJnp6diQmmg1L2yz7e5wmTZwNfLuJjgOdDxr4fKBksDL+TdxKZwZE0pd6fxYTycCyQa8W5VNuIsaJNYV0EruR+FxXm9mfiG++twI/cPdHgFfd/Vric38I+Ei92lsNaRzM08SJbtltotx9vrsfRCSEF5rZ18xsYPrC0Eyzp0t5m7h/5jyIE3pKgN9090OJOy+sDdxlZptmX9gMfwDT7/qOwP8Qk4lw96Xu/mvij9+X07JlY8bS8395zCLt99Ll36OJckovp2WnEn/4pxNDaS4lYvFo6l1fpkmOg3WJ0iknFYbPEJdGryBmkR4Iy78kFrj7QndfUMOmVpvOBw18PlAyWAEe9QJ3TZcACz/Awi/2bcAHzGyr9HwvorbUebVvaXWkA/oWYgzEgcAGRG/gLaQyKhlPEL0E69awiVXn7u3ufri7P5ldnvmWdw4x9uVE4IPNcHLrgbwXHV83/Vvsy+8zWzjn/gH4qJl9MLP9p8zsZmuu8hkqwB+fdRpRYqxw5ehNdz+MSIQnm9kh6UtiM/9N1vmggc8HzXzg1ZS7z0iPxX/kHwJeArYzszVYfmuhebVtYfW5+5tEkendiMvGj6Re0sLsqAHERJJBvDdJbEq+vLj0VOIP3WLgT2b2H3VtWG3ktug4gLtPI7703QHLkoDCl8T7ibqan0vrRhC9ZaOKxw31Zx4F+A8gxwX43f0bxK0mF6TnHZkk4GdED+nxwCbFvYNNppXoDc3z+eB8GvR8oGSwitIPu52YMfp54DhgkLufUd+WVU/6tjcTuMjdH02LC8fZCKLncEkaN5Mb6XLIs8SkonWJnpCmlOkdLxQdv5+cFR235UWDf5S+CKzwRdHdZwN/BnZNi3YnZlh+sbYtrR5bXjD3fuLuESeTvwL8QwDc/ZXs8kzyM5X4mS8E/s/ilpVNyaNu4NHAg+TsfFDg7pPcfWrqCW2o84FmE1dRpkfs88TU+QFEPbk/1bdltZe+BR5J/FE4xN1vrHOT6sbMjgGedPe/1Lst1ZKSoYGF3h8zO4T4pjucqJd1D01adLwgJUNLOhsSYGb/RfSUfAr4PXCTN0kpmYJMQli4LLYGMMbdn7YmL8BfkH4XlpY6DgoxMLPdiQlojwL7NdGY0c5uxPBN4CSW34Sgac8HRTF43N3/nJYvO/7T87qeD5QM1oDFnSceB55298/VuTl1YWYbE5cM72qWX/LeshI1pJpJ+qN3EnBxZrLASpmE8P1Ez/A3iS9GQ2m+ouOlYrCssHBmu4HEDeqnECU21nL3sbVubzV0dhwQiXH2j18zF+Dv0XFQ9JpvAke7u9WomVVlnd+IYW93fzn93L/K8qskzXo+KI7BvcSx/nJmu7qfD5QM1kg6Ga7s7q/Xuy31kn4xBntmtq00DzM7G/g+cZnnUuBcX15OqbjI8OeJcaNvAdO9eYqOdxWDUknhX4HPAHu5+x9r3d5q6CYGKxRYtuUF+LciCq9fX/sWV15vjoPMFaSViF7TV0rutJ+x0jdi+DFwr7/3RgyF88GbxK33muV80OObUaTt63Y+UDIoIn2WSojcS5SNmE9c7pkBTC4MCUjjw4Y065eBXsRgoC8vH7EJccvK0+vR5krrbQzS/ycQBfhvrk+rK6uc46DZWPc3YtjW3R8o/nLQTHoQg1I3o9iU6DU8tdbtHVzrNxSRpvQRYC1i9uyVwMHAHsAFZrYvMMmj7E67mQ0lak22AA800aXznsZgiUVdyg8Tl46aaYxcb2IwhEiUBhFlqJpFOTEYSnP9LpS8EYNF8e1DiMumD2TGkq5LJMcvdrbDfqi7GOwCPJhJBNcjekdPq0djNZtYRCphOjEQ/JpUKmQyMWHoSqLn5wYzOzuVTBhO1NXauYn++EHvYrAyMVh8jybrHepNDFZJy3fP8XFQiEGz/S709kYMN5KKbzeR3sbgemKoRF2OA10mFpGKMLMh7v5udtJIWv45opjsNsAbRN3NHYhB0k0xa7JAMVAMQDEAMLP13X1GifHC2xEzxz/h7g+b2deJ+xKv5U1Wf7c/xUDJoIhURVHpkBHAF4ib0m8DHOvuP61n+2pBMVAMQDHISkMkngEuAC4hyulc7E1cf7dYI8ZAyaCIVFWmhMhg4CpgC3ffqLvXNRPFQDEAxSAzc/oSwICHibqKa9e5aTXTqDHQBBIRqar0x28g8J9Ej8ie3byk6SgGigEoBhl/IsbIbU0T3XWnlxoqBuoZFJGaMLPxwP7uPqnebakXxUAxAMXAdCOGhouBkkERERGpKd2IobFioGRQREREJMdUZ1BEREQkx5QMioiIiOSYkkERERGRHFMyKCIiIpJjSgZFREREckzJoIiIiEiO/X/icxdMgxke1QAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAAFKCAYAAACQBBKyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAACHR0lEQVR4nO2dd5wURdrHf7MLLDkHEUkCFiKY1hzOHFAx5/CennreneE8MZzhxHzmnD3jmdBTCZJEohIUlpyKuOSwwO4Cm0O/f1T3THdPdZye6ZnZ5/v56DLd1RW6u6qefup5noooigKCIAiCIAgi88kJuwIEQRAEQRBEMJBgRxAEQRAEkSWQYEcQBEEQBJElkGBHEARBEASRJZBgRxAEQRAEkSWQYEcQBEEQBJElkGBHEBkAY6wXY0xhjN2oO/YYY8xXvCLGWCFj7BMX6U5Vyz3VTzlJqM9UxtjUBMo5gzE2jzFWobarrd+8shnG2CeMscKw62GGMdaCMbaNMXZz2HVxA2NsDmPs+bDrQTQsSLAjiIBhjN2oCg3HhV0XIgZjrBWAbwHUA7gDwA0AykKtVIgwxgaoHwe9wq6LB/4OoAbAf8OuiEv+DeB2xth+YVeEaDg0CrsCBEH45ikAz/q8lkEIOJnG2QlcexiAdgCe5JyPDKg+mcwAAMMATAVQaDp3K9Lsw58x1hjA3QDe4ZxXh1wdt4wAsAfA7QD+FW5ViIYCCXYEkaFwzmsB1Pq8tirg6qSEBCf0zurf0iDqAoilQc551mn9OOc1YddBwgUAOgH4JuyKuIVzXs8Y+x+APzLGhnHOM/FjisgwSLAjiBSg2o9dDaAPgLcAnAmgAsCnAB7gnNfp0rYF8CqASwAoAEYCeEWS52MAhnHOI+rvHwEcCqAn51wxpf0ZQB/OeW/1dyGAqZzzG3VpDgDwJoCzIJYovwAwXlJu3LXq8akAwDk/Vf3dBMDDAM4D0BdAUwCLATzDOR8hv1P2SMroBWAdgAcB7ALwTwAHAFgE4G+c8zm6605Rs5nCGAOAT7U2MMaOBvA4gBMBNAFQAOBfnPMpurIfg9BwDQLwAIDzIYRE7Z6erbY3X73kVwD/5Jwv0OXxCdy/BxEAf4PQnjGIZzIfwBOc81906a4F8A8AAwFUAvgZwP2c83U29/FGAB+b7gcA3MQ5/0St56mc8166axQA76n5Pw7gQIj7fBvnfAFj7FYA9wPoDuA3Na+1pnId77MNFwPYyjlfasozrq7q8ceg6x/qsTMQe4Z5ALYCGMc5v0OXJg/iPboeQA8AOyGEyYc55+WmMq6G0CIOglgiXgLgBZNGeCLE0n8+gDku2kkQCZFWqnaCyHJyIASlXQDuBTANwFAAf9YSqJP5SAj7ry8APAJgf4iJ34mvISbV4/UHGWOdAZwKYLjVhYyxZgAmATgHQrh7GmLyTcTwuzWA2wDMgBB4Hoa4Bz8wxgYnkK+MqyCEivcg7lkvAN+ry3eAaM/r6r+fgbi/7wEAY+wUAL8AaA/gCQihLQ/ATxZOI8MhlnQfAfCamse1EM+2EkLIfAxC8PmFMdbfdL3je6DyPsSz2Kbm+TSAEgB/0BIwxv4J4HMI4XYogBcBnARgBmOsk/xWAQCmS+7HDepxO06A+Mj4r9pGBuBHxthfIITLdyDemeMAfKK/0Md9lpU910U6KYyxAQDGAGim1v3vEDaXJ+rSRAD8APEujQFwJ4RQ9zcAI9TzWtpHAHwF8fH1OMRS62qIPqSnQP17IggiBZDGjiBSR2MA33LOn1B/v8sYmwfgZogJEQAuhJi4H+CcPw8AjLF3ILQkToyE0P5cBWCm7vjlAHIhBD8r/gzgIABXcc6/Uct9H0JD5JdiCO1hdNmXMfYmgHkQQsi4BPI20x1AP855sVoOh7gf5wD4kXM+kTHWBsBdACZyzqeq6SIQAt6vAM7SNJ2MsXch2v4MhEChZwXn/DJdm1pACGCfcM7/pDv+IQAO4FEA1+qud3wPVEHnFgBvc85v1137iiZcMMZ6AHgSwGO6vMAY+xrAUghB6yHZzeKcr2WM/WK+Hy7oD+BgzvkataxiiPv3BMT9L1WPNwLwIGOsL+d8tc/7HEXNrw+EsOWXsyAEycGc85264//U/fsaAOcCOI1zPk1X/lwIAfosCEG0D4QwNwrApRJNaxTO+WbGWDWETSNBJB3S2BFEavnA9PsXCM2OxnkQTg2aoAd10njLKWPO+V4AYwFcwRjT9+2rIISRBTaXnwdgO4D/6fKrAPAfp3Jt6lOnCXWMsSaMsfYQWrzpiC1XBsV3mlCnoi1VHihLrOMwCK3TlwA6MMY6MsY6qvWcCOBYxlhz0zXvmH6fBaHB+1K7Xs0jV63HaZJynd6Dy9W/w8wX6pbZL4X4OB9uKrcUYslbVm6iTNGEOpXf1L/fa0Kd6bjWJj/3WU97ABGIjwW/aPW72NQ/9FwJYCWApaZ7Og1CM6fd00sg5s8n9UIdYHg+eooBdEyg7gThGtLYEUTqqOGcbzUdK4YQCjR6AtimCml6Vros42sAl0Fo/aYyxvaHWJp70uG6ngDWSIy73ZYrhTF2C4Tm6GCIiVnDV/w9Gzbof3DOi1W7sXby5FEOUv9+aJOmAwC9bdUa03ktj4kW15vvqZv3oA+A7SbNkhmt3BUW59daHE+EDabfmrC00eK41iY/91lGxOG8HcMhtKIfAHiWMTYZwmv1G9URSasnA1BkkYfmgNNH/bvUIp2ZCIJ/5wlCCgl2BJE6UuERNwbAXggt3VQIDUQO7JdhvWI1QeUC0C9JXQcxiY4G8ByAHRBevDfBuDQZBHUWx50EAU1z80/EbKHMmCf5Cos8bgSw2aE8ILj3QCt3MOTe0eZ6BoHVfXa6/37us55dEO+dTFC3ex+jcM4rVDu/P0BoqM+BsGO9hzF2sqqhzgGwDML+TsYWmzra0RbCCYMgkg4JdgSRXqwHcBZjrJVJa3eQ1QV61MlrFIDLGGN3QAh4CznnVlodfbmHMcZyTFo7WbnFEBOVmZ4waomuUH9fpF+eYozd5NySlKFp3/Zyzt3YMdrlUZRAHrI8z2WMdeKcWwk8WrkbOOfLfJSRSg1SQveZc17HGFsF1QPZhN37aM6nHuKDZyqA+xljfwXwNsSy9hdqPfMBTLJYUtXQ2nMIHBw6GGPdIDyAl9ulI4igIBs7gkgvxkL0y79qB1R7oNstr4jna4h4XzdBeCe60daNBdAFMdsuzVP2FknaNQCOU8OZaGkvgHBg0KNpcfSehAdC2CelCwUQnoz3qDtTGHDwLNWYAOGt+pD+nnjMw4xm6/iYJD/tfn4HcY8fNRvsq+mcbLq0+HtOy9VBEMR9ngHgKMnxNQDaMMYO1eXXFab3jDHWQXLtPPVvW/XvcIh+8FdzQsZYnq7uP0BoXh9ljOWa0pmfhWZPOhMEkQJIY0cQ6cVoiAns32qMtqUQ8bvae8hjAoQW42X1txvB7gOIWFufMsbyIZYUrwcgC2T8HwgBcDxj7BsIe6PrEW97NgpCEzJK1SJ2gwgbwQEc7qE9SUMNIHszRPiRZYyxjwBsgggxcwqEUGrrhMA536OG+/gCwHzG2FcQjig9IDwsl0Is03qp11Q1PtvfVA9MzYP4eIjYcc+onq3/BPACgJ6MsREQAmZvABdBCCmP2RQzH0IwfJCJ2IkVAH6zi3/nlyDuM4SX802MsUNMsey+hljq/4Ex9jqA5hCC2UoAR+rS/Uv1Nh4DsdNGOwB/gRBwf1TTfA7xbr+lLtv+qtaNQZg1XAERw3ENY+wJiPv7K2Psewj7wCMhQt7oP8TOUtvqO1QLQXiBNHYEkUaoS0UXQggJ10HELtsK4I8e8qgB8D2AVhATdaGLa8oBnAHgJwgB718QGob7JWknQIQrOQgikPLxELsCbDKl+1S9fgBEzLTLIBwp0mo7L875dAjN5mwIwfNNAH8CsBtCYHCTx3AIwWQDxL15HcKOcDnUeHk+uBnAPRCa0Ochnkl7CA9NrdwXIQT/aoi4ei9DaKqmQsRos6vzdojgx+0gBPuvEAviHDgB3OcxEHaaV5ry3QXR5nKI+/RHiLh/o03Xj4QwDfijWvbdEMLtiZzz9Wpe9RAfI/dBvLcvQIQ1OQ5iyXaRrtzH1bwaQ4R7eQpCAJygpVG17ZcD+Ix2nSBSRURRyFGHIAiCSH8YYw9CaOP6pOm2ZwYYY5dCaAH7SDyhCSIpkMaOIAiCyBReg3BE+L+wK+KSBwG8SUIdkUpIY0cQBEEQBJElkMaOIAiCIAgiS2jwXrEFBQWNABwAYFN+fr4syCdBEARBEERa4CS3NHjBDiKI5WoAJxcUFGxySkwQBEEQBBEiB0DsL90X8WGmSLAD0FX9+4ttKoIgCIIgiPShK0iwk7IVAA466CA0aRIXND5QlixZgoEDBya1jHSlIbcdaNjtp7Y3zLYDDbv9DbntQMNuf7LbXl1djZUrVwKq/GKGBDt126MmTZogLy8v6YWloox0pSG3HWjY7ae2N1wacvsbctuBht3+FLW9TnaQvGIJgiAIgiCyBBLsCIIgCIIgsgQS7AiCIAiCILIEEuwIgiAIgiCyBBLsCIIgCIIgsgQS7AiCIAiCILIEEuwIgiAIgiCyBBLsCIIgCIIgsgQS7AiCIAgiC9hStA/XDxuHouKKsKtChAgJdgRBEASRBYyfvR6l+6rxy4JNYVeFCBES7AiCIAiCILIEEuwIgiAIgiCyBBLsCIIgCCILUBQl7CoQaQAJdgRBEASRVUTCrgARIiTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQBEFkCSTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQBEFkCSTYEQRBEEQWEaFoJw0aEuwIgiAIgiCyBBLsCIIgCIIgsgQS7AiCIAiCILIEEuwIgiAIgiCyBBLsCIIgCIIgsgQS7AiCIAgiC1CUsGtApAMk2BEEQRBEFkHhTho2JNgRBEEQBEFkCSTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQBEFkCY2cEjDGrgBwHYB8AO0BrAHwDoD3OOf1unSDATwNYACAzQBe5Zy/IcnvXgC3A9gPwFIAD3DOJ5nStALwAoDLATQFMAXAnZzzQlO6fgDeAHASgAoAX6v5lbtoO0EQBEEQRFbhRmM3FEAVgPsAXABgBIDXATynJWCMHQ9gFID5AAYD+BjAq4yxv+gzUoW6ZwC8BeB8AKsAjGGMHWYq8ysAFwK4E8BVAPYHMIkx1lyXV1sIga8VhAA4FMA1AD5y0SaCIAiCyCoUULwTwoXGDsAQznmR7vcUxlhLAHcwxh7hnFcBeBTAPM75zbo0PQAMY4y9zzmvZ4zlAXgEQpP3IgAwxqYBWAzgYQBXqseOhRD6zuecj1WPLYbQFN4I4G21jNsAtANwOOd8p5quFsAXjLEnOedL/dwQgiAIgshsKN5JQ8ZRY2cS6jTmQyyRtlcFttMBDDel+RJiufVI9fcJANpALJdqedcB+AbAYMaY9iaeB6AUwHhdug0AZqjnoEs3SRPqVL6D0C4OdmoXQRAEQRBEtuHXeeJkALsB7ADQB0ATAMtMaTSNWX/178Hq3+WSdC0BdNOlW6G339Ol66/7fbC5TFV7uMaUjiAIgiAIokHgZinWAGPsKAA3AXicc17HGGunnioxJS1W/7ZX/7YDUMU5r7BJt0lNZ85LS9de99ttOlcsWbLE6yW+KCgoSEk56UhDbjvQsNtPbW+4NOT2p7rtO7aXAAA2bdqIgoKSlJYtg559OHgS7Bhj+0Esd/4OnfNENjBw4EDk5eUltYyCggLk5+cntYx0pSG3HWjY7ae2N8y2Aw27/WG0fd6mxQDfhwMO6I78/D4pLdsMPfvktb2qqspWGeV6KZYx1gbAOADlAC7knNeopzSNW1vTJZomb7cuXR5jrKmLdOa8tHS7db/dpiMIgiCI7IecYgm4FOxUYWwUgM4AzuWc79KdXgOgGjEbOo0B6t8V6l/Ntk6Wbi9E7DstHdM5U+jTrdD9Xm7OS3Xk6GNKRxAEQRANhgg5xTZoHAU7xlgjCM/VQwEM5pyv159XHRYmQw1XouMaANsAzFN/z4Twdr1Kl3euet14zrn2rTEWQhN3ji5dd4ggxGN1+Y8FcAZjrIPu2CUA8kzpCIIgCIIgGgRubOzeAjAEwP0AmjPGjtOdW8Y53wPgCQDTGWMfAPgCwIkAbgVwu+bdyjmvYow9BeAZxlgRhMB3C4SG7VotQ875b4yxMQA+ZIwNBaDlvwHAJ7qy34MIYDySMfYkhDbxZQDDOedmD12CIAiCIIisx81SrKY5ex7ALNN/RwIA53wWgIsAHA1gAoTA9g/O+bv6jNTAxA8BuAvCXq8/RCDihaYyrwHwI0Qw4m8hNH9n6rcK45yXQMTP2wfgewCvQMTS+5OLNhEEQRAEQWQdjho7znkvNxmpu0Q4LoGqwt2LDmn2QuwscZtDupUAznVTP4IgCIIgiGzHb4BigiAIgiAIIs0gwY4gCIIgsgCKdkIAJNgRBEEQRFZB0U4aNiTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQBEFkCSTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQRBagKBTwhCDBjiAIgiCyC4p30qAhwY4gCIIgCCJLIMGOIAiCyDh2FJfT0iNBSCDBjiAIgsgo+PrduPmpifjpt/VhV4Ug0g4S7AiCIIiMYuP2fQCAZet2h1wTgkg/SLAjCIIgMgxagiUIK0iwIwiCIDKSCHl/GiF5lwAJdgRBEESGQT4T9kQo3kmDhgQ7giAMlFXU4LclW8OuBkE4QgIMQcRDgh1BEAZe/KIAT338O7bvLg+7KkSCjPplDdZv3RN2NQiCSCGNwq4AQRDpxdadZQCA6pq6kGtCJMoHI5YgJwKMfPGisKsSKLQSSxDWkMaOIAgTYtokw/TsoD4LpSDNxo7eUYKIhzR2BEEYiE2aNGv6pbK6Ftt30VJ28shCaZUgAoI0dgRBGNCmTBLr/PP8f+fijhenoLq2PuyqZDX08WGExF0CIMGOIAgT2v6bNGn6Z8maXQCycxk0HaBwJ/ZQ123YkGBHEIQBsl8KApI8UgG9owQRDwl2BEEYIJEkcaLCcbjVyFroHbWHNJoNGxLsCIIwQkuxBEEQGQsJdgRBGCDnicRJB4WJks1qG/r4sIVuS8OGBDtCypChI/HcZ3PCrgYRAgpJdglDdorJhV5RgrCGBDvCkl8Xbgm7CkQYaNoQmjb9kwbasjSoApFislpLS7iGBDuCIAxEtSEk1/kmplGim5gMSH6xh966hg0JdgRBGKBlxAAJ8R42CNmH3lGCiIMEO4IgTDQIkSCpkEYpydANJghLSLAjCMIA7RUbBJqdYphVyH7hh95QgoiHBDuCIAyQx2HipINMlQZVSBrZ3DaCSBQS7AiCMEKSXcKQA0pyIa0yQVhDgh1BEAbqKdxJVpAOWsNkoaTDUncaksWPnPAACXZE4IybVYiSvVVhV4PwCXnFBkA2S1XpBL2jcqjzNmhIsCMCZUvRPrz9v4V4lnatyGBIKAGAD0YuxvT5mxLO59IHRuPOF6cEUCOv0HMkiIZIo7ArQGQXNbX1AIA9ZdUh14TwC9kvCUZNXwsA+MMRB3i+Vi9S1dTWo3DrnoBqRQAgmZUgbCCNHREoZDSe+dAzTJx0WIlNhzokm4b+8UEQMkiwIwjCiEKG6dlANst12dw2gkgUEuwIgjAQ09iRaEekJ1FzgXCrQRBpCQl2DZAxM9bhq594UvJWSNuT8ZBXbHagZPVaLEl2UrL5kROuIeeJBsi73y8CAFxzNktaGaTtyWRodiAyA4q1KIeG34YNaewIgjBAXrFZQhbL51mtjCSIBCHBjiAIA7SjWHAk6x5WVtVixLQ1qK9PLwnn2c/m4JWv5qWsPPr2IIh4SLAjAoW+pDMfhSS7tOfTscvw4aglmLVkq2WaMLrijIVbMHnuxqSXQ+MMQVhDgh2RFOhLOoPRHGDoIaYt+8prAABV1XWWabLbeYIgCCtIsCMIwgAp7DIHkr0JgjDjyiuWMdYXwL0AjgMwEMAKzvlAU5pPAPxRcvkVnPP/mdLeC+B2APsBWArgAc75JFOaVgBeAHA5gKYApgC4k3NeaErXD8AbAE4CUAHgazW/cjdtI4KFtASZD4U7IdKf7NAq7yguBxSgc/vmgeRHoy8BuNfYHQLgfACrASyzSbcWwPGm/ybrE6hC3TMA3lLzXAVgDGPsMFNeXwG4EMCdAK4CsD+ASYyx5rq82kIIfK0gBMChAK4B8JHLdhFJgsIQZC4KGdkFB93CpJAtAYpvfmoibn56YuD5Zvp9IRLDbRy70ZzzkUBUM3eURboKzvlsq0wYY3kAHgHwKuf8RfXYNACLATwM4Er12LEQQt/5nPOx6rHFANYAuBHA22qWtwFoB+BwzvlONV0tgC8YY09yzpe6bB8REPTFmPnQXrHpjxvFeDYrz7O4aQSRMK40dpzz+oDKOwFAG4jlUi3vOgDfABjMGNOmkvMAlAIYr0u3AcAM9Rx06SZpQp3KdwCqAAwOqM4E0bCg3UOyimwW0LO5bQThl6CdJ/owxkoYYzWMsfmMsatM5w9W/y43HV8KoCWAbrp0KyQC5VIA/U35GZaGOedVEJo9fToi1dCAm7FEtSE0a6aMEdPWYN2W0kDzzGatFtnyEoQ1QQp28yEcLC6GsHfbBOBrxtiNujTtAFRxzitM1xarf9vr0pVIyijWpfGSjkgVNN5mPNliv5RJfDhqCe56aarn62yfEWleCaJBEthesZzz10yHRjLGJgN4HMAnQZWTLJYsWZKScgoKClJSjhvc1MVrfbfsrgYAVJSXx12bTm0Pg0xr/7x5BYF5HWZa2/UEVfcg+9uu3bsAAOvWFaIVdkjTVFSLBQ/FQ75BoS8vGWVv3rwHALBt+3YUFFQGnn9QuG17UPeoqEjoSNZv2ICCvN2B5JkImdzvEyXMtgcm2FnwLYC3GWOdOOdFEJq0PMZYU865vje2U/9qb2IxgB6S/Nrp0mjp2lqkW+GlogMHDkReXp6XSzxTUFCA/Pz8pJbhii83AYB9XdykkdBmYwkwfgeat2huuDZt2h4SGdV+9dkfdZSVj5Q3Mqrtenz2Af21erE4yP42eflcYP1m9O7dC/n53aVp9pZXA//bgoiHfBPG1I5kPfs1xSuBhXvQdb/9kJ8/IPD8g8BV2xN5xyTMWrsAWF2Gnj16ID+/dyB5+iVj+30AJLvtVVVVtsqoVAco1mzrDjYdHwBgL4DNunRM50yhT6cX2Jab81I9b/vAo2BHBIOShLXY14fPx0+/rQ88X4JoEGShraQSjWMXckXSFRc3ZmdJBd75biHq6oLyjSTShaQJdqpQdiWA9aq2DgBmQni7XqVLl6umG88516SCsRCauHN06bpDBCEeqytmLIAzGGMddMcuAZBnSkekiGTYZ038fQPe+GZBgDkSRPqQkCOAzQRO/gWEHW9+uwBjZxZi4aqdzomJjMLtzhPNEQsz0hNAa8bY5ervOerfTyGCCq+GEMpuAXAqgBu0fDjnVYyxpwA8wxgrAjBPTdcHwLW6dL8xxsYA+JAxNhTAHgBPANgAo73eexABjEcyxp4E0BnAywCGc87tAikTyYY+pQnCFb4EMBLaiATR3rt6+gLIOtza2HWGsJfTo/2+CcAoCE3cI2raGgih7ULO+Wj9RZzzFxljAHAXgC4QIUzO55wvNOV/DYAXIYIR50HsMHGFfqswznkJY+x0AK8D+B6xLcXud9kugiCIUElW6I6sDgmSxU1LGfTtnbW4EuzU/VmdXoOL3Baq7jrxokOavRA7S9zmkG4lgHPdlk0QBJFOJFtGyeb5O9P3ig0T7c5l9QdAAyXVzhNElqNQ7CyCiOJG8Ij2GR+dpqH2MxJFEkd7N+leZh8k2BEEQYSIH4cjN5NxQ1DENFTBNlAawHvS0CDBjkg7vp20EisKww+uaWbx6p0YMnQkdu+pRHllDW4YNh6LVhc5X0gQNmjzarKWFbNxtTIqtGZh24LAzW3J0TR2DeELoIFBgh0RKLFJyn8en41djvve+CWQ+gTJ6F/XAgCWF+7G+q17UbKvCv8da972mCC8odRrS7HBSinJiCmZNkRNPkiy84v2umXxW9JgIcGOSAo04DYctu0qo6/+BNDuXI6HLuPKLi/6SLKvL5rftlUbi1FUbN6CnHADdd3sgwQ7gnCJzDGkoY+JfP1u3PrMzxg3qzDsqmQsUaE4G9dMk4x2y+55dTr+9NRP4VYmY2noo1j2QYIdESxZPEbo59/oHJzF7XXD5qIyAGJ5mvBHsuS6bH4100nLdNXDYzD6l7VhV8Mz0aXYNLqXRDCQYEcESsMwao5E25fVdkyuaOjtTxw/S7EabkweslkRGHbTFEVBeWUt3h+xOOSaCLwIaRTuJHshwY5ICmEPuMkm29vnFbof/lF8fA25C3dCU3ayqa9Pz3vsSZhPzyYQCUCCHUF4RD9o0tzpTF29gro0nQDTAe0d8qOxSwd2labeaSGqKQ9ZHants5qTgQ8vQqsOWQsJdkSgNJRBgpYx3PPk15vxwJvpF74mXYh5uGaecLBi/W7c+MRPmDx3Q2oLTg+5DnV1qmAXdkV8EB3DaBDLOkiwI5JCKiepyupaLFu3K+nl0ADoH76+OOwqpC0JOU/YXONnRwuvrN+6BwCwdG1qnWfSpStmtMZO/UtL9tkHCXZEoIQxRrz5zUI88Oav2FFcntRyNG2kYQhv4INitjY/lZNd9L3yItml2X0PSzgIW5zSbOwyUK7LSA0x4Q4S7IiMZ+2WUgBARWVtUsuJaVYiFLXdRLZNEqmUU5IW7iQlbQjnuaeLlimInXbCJk1uJREgJNgRhFcitLOGGW2iLa+swazFW0KuTeKkcq5Luo1dJksdToTcNP3HXjrgReClUJzZCwl2hC3fTV6F8sqasKvhDoex9bOxyzBu5rpAi8zmr10/WpHXhs/HM5/Mwcbte5NQo+wkEa9YO3mioTgyeWXj9r0YPpEHkpdsNxonxs8qxCX3j0qyp7iLGsWM7JJYDyIMSLAjbPlkzDJ8NHqp6/Su9rB0cb2/i+1PfztpFd7+bpH//PXQ564BTWOxY7ewc6yoSu6yeNJJpY2djz1d00VoC3v3Aj+a84fenoHPx69AWUXiH6yKj7XYD0YuQW2dgpqauoTLNzPxd8072fmBaPcuPd4kIkhIsEszdu+pxJChI7Fg5Y7Q6mAWrvzYrvldmsiEj8cI9HJdBlTYJ26eRVyaNFmSSpSULsUiSQb4KWhEWE87EbvE6trgBCo//V97zvVJHOw87UCRtFoQYUGCXZrB14uwAWNmBLtkmCmks5ikF3jTxaYmmfh5FtkSQiEM54mgheJsMOy3IpH3K9Bn6yOkjBYaJZnvmKudSdJ6tCUSgQS7NCXMeTGJq6GW/Hfccnw1YYWvwlM1ccUmykjoS1DpSjYKEclGE1KStles92w901CFBD/CcywwcPw9W72pJJCPIld5pJnjhx8mz92A1ZtKwq5G2kGCHeFMCvYd/ObnlfjyJ57e00NaVy4J+BGys8ZuJ/U2dnYT7NadZfhs7DJPk75fAWHx6p2uPZszUSYIss5+9vnVBHiz88TiNTvxj1emYeT0tQHUyzmNthScLs+wdF8VKqvdm/3U1Nbjla/m44E3aFcbMyTYEXGEOSmTBizD0TSZ9eFWI1FSuhQL5wn2yY9+w7eTVmHrzrKk1+ehd2bgmU/mJL2cIPCjbUr2s527fDse+2CWpWBttRSrOR6t3VySzOpF0a9ApAPXDxuP+153L6Rp0RqqazN8sEkCJNilHenRycIjsyS7TLcls8Ndy4ypssWpJKW1d6Gxq60zTl7p9tqluj5BlDfs/Vn4+ff1CeVRrz6WHN1M+sSHs1GwYoflOxTVapsaEajtnYtMEo1gkAwK1S3q3FCjCnRNGucmqzoZS6OwK0CkIYkYJrvQPrgpOp0GGw39QBi1kwmzQmkIbSzuneiSmIu0cbc19H4SbgUSKZ1vKAbfUIwzj+npO4/YNoOSmlj0AW1sM3vFan0nCG9ZNyHyYnsJh/4S+ULzbm7ahAQ7M6SxIxxJZcdPt+UBGRHdHclmASab2+ZESveKdaGx89Mbsvn5pU3TJCZ2Th99Vpq5aBiUAAIXe9KYBzTUVlTVYsjQkZixKDU7z9TUCI1dHgl2cZBgR8QRro2d90ju0WuDrYp9/ukrd4aCJpPEvIXTZur1R0rDnThrub1UR1EUzFm2Le2M44MkXZYRZa4TTlWKaubq5Ro7P11nReFuY5+T5PH9lFVYu7k0liSBsVbGtl3C/vOrCSsCytEeTWOXR0uxcZBgRwRKwur9dHbBlzrAZbgAY4uf0DPZsUSd2gDFgqDe+ZmLt+KJD3/DiGlrtJwDyTc9Cbdtsg8Yp1BImsbOvORqddyJ35dtw31v/IKxutinMqXfxz8uw99fnhr9HfQ+tzkp7vuaYNwol8QYM3RH0ox0kGeCULb4trFL4Ppk37qYPU2srCCWTTIZy3elYd8WT7jZK9bqlOz4rtIKAMCO4vKE6uWGsDW06TBeAuZ62As4VmYcfpdiNU3Zph37dEfdOE+o9QnqHqb4XdBKyQl8y5bMhwQ7Iq2ILbH4CGMQdGVsmMeLAACbi5IffiIs/IzP0Yk+wyU7N5NTRVVtdFINoiw373yllz14XTyCRAPihiVYpctKv2yfX6d7EtVsxe/H57MS6tUeBRz9h2oQ5ESXmAPK0CXpItwDQE1tHYqKK8KuBgl2hIwERs2ABtx06qwydu+p9H3tnGXbUhKPLAxioRwSy+fbSSt1S4npycPvzMCtz/yccD5uNCfaubtfmaZek3hH+3XhZvzjlWmYNn9zwnmFhb9xIjipULbPbyR2UooWGiWovWJlFiJe9nn2KhBakXLtbZoI93pe/nIe/vTUT6itC7dyFO6EcMTL4JmopiZmo5d+6CfgROr3xIe/IRIBRr14USD1ShZenqQm0EXflQTHtc/GLgcAXHxKn8Qy8okbYWvVxpJAyoqFO7EuzGqulNZPcZEGsaW7Tdv3OlXRkTScY1OCdJ/fqK2Z/K4EHRJIlo8b4Spo54lU29hphKW9raiqxZ6yanRp3zx67Lel20SdwqlSFNLYEXGkh42d+wxSLQRGEElYoxj0YLRwZRFqVC+xdCDjl2LVvyl5t3wUlsrJrK6u3marp2Du0LotpRijM/53Il3eLz/OE1b4H1Piv4Y9bBUbnKNaqm3sQn4FHnz7V9zy9ETDMc0+MmzFBAl2aUrYL21opEkYA1siAQ6GAbBmUwkeeW8mPhy1NNB8/QzQfie1sCneU4lp8zbFn0jBc9Y0drbOEz6qEdQE+8RHv+GKB8c4FJZYGXe9NBXvfr/I/QUS27YwkGl2E62R38em1/h21mmRRJ4ytZ6/cqzICVgTme6s2VQad0zb/zfs6YEEOyKORPplop068K/IANG3LZ2qV1pWDQDYXLTPIaVH/DhPOHgEmqmuqcO6LfEDZKp57D+z8eIXBdhbLu4lAl6msmPDNrGNUrLe+URznbdih3XeIfWD6apdYIUXZ5IkYohjF3UgkhO04KPP77B+HQEATRrlWKaJHUuOEJI6r9j0lSDDnh5IsCOSgt84dmHb2G3cvhfPfTYnbm9OPZE009hp6v+cEOqkDa0/z9kg/uHRxu6t/y3EXS9NRXECzihO1NbVY/Qva1Fn80x3lghPtjrV6DmRsDteeW34AgBOz89LwBPB/JVFvuuU7pTsqwIAbN0ZzMfM356fhP+MXOL5OkWmsoODZBcwbuxBZVUJ+iM66hWbciO7FJeXAZBgl2akg7iQyBdXon0skXAnQfDa8Pn4deEWrHYwjE+H56SRtC9vH9fE5Dp3V/P1xQCAfRU1Pkpzx8hpa/D+iMUYP6vQMo05TIssjEXSsS0qvWevsJbfWjVvEkg+G7fvw8jp9l7YKwp346mPfosutwHyp+KksbPCa9+J1kE3ZlrHlYw/YXae2FFcjh9/XeupbD2xrdIaho2dLbQUS6QdCYRX0jq1X1f+wINmeq6A3Sn9Wmzyq+KWoCPI+ylbI2KxD6YV0Wj7SfzML6usUf9aL9tZaZhTeUuDKipZd1LqKJCkspw48dD9AQAH9WiXsjL//ekc/LZ0m1G7rN4SQ7iTkAYvu1KlGjv14MzFWwEAw96fhfd+WIxSVRvqufxUhzuJFhx/qHRfVUIhqTIdEuzSlHS2H7Bjwuz1AIBFq3f6uj4aNDMkY3G+QWiQ6iSChn67NN9bpiUBTYjODTgCuy/nCY/pc31uo+SWhSuL8OtCsSm5l/YEHQrCDfYCQQI1CUjQCDs2lx7tgyC1MlT82CQLVRPVvKVqKdZNGqmNnfg7flYh1m/bg33l4gPIb18MOoyLW2rr6qO7rWhcP2w8/vj4hJTVYduuMkNg4rBnBxLsiDgS6ZfGbW38Fx62DVtVtU3okEh6OU/Uh+iJZVWmWyFKuz5ZGrtH3psZDQbtqoQ4bXXqbmpOTgQjp6/BkKEjsU9z4nAgmdUbY1qWs7M7TfWHqNX7Vbh1TxLLtDkpfQ6W66LyLHw+TJkphuSz1DYP2/HOJSkXaNWCNmzbixuf+ClUR5pbn/kZf3rqp+jvsOcvEuyynJraOluj8aBxOyFZUR+CpkSG7S4ACL/j6kmrpViPQUqTrbHT42Zi1pK42b81aCIRYMLsQgDArgSWkeKeic983v1hseG3dBzxa1CWIFHDf1PrErERc1um3slFu9dbd5bpQl2401zpu+u+8mp8MHKxdeIEkUc7UWx/e0Vrd6J9ma/f7es6T9vtZTkk2KUZQU/Olz7wIx58e4anaxJZ0pQtYfqheG8VZqhLaE4kQ6Bx2lg6jeQ6XRy0NKqUy9cgOhmkwpXO5r229DlNqY1d/HJeWpGO75epSl620vJcpMRlXz9WLlgpwsL4uU2fjl2ObbvKfdVP/2Fnda2djZ0Zv2YmQdnY3fv6L67SmUtJxcdhpkCCXQNgeaG/L6Aw0G8g/exnc0Krh0xI0g9Y6TTHRcOdBG5j5/0a6w3O7UmFttGdLZIW7iT1k8TiNTtdC7h2t3fR6uSEOQnahjMRErHF9V2mzsY2Vg/JeR9KzERWVfT3Qvv3JC38ULRu1l6xcf/OUPvuoJQK2QAJdoQjqRzOUzGoTJ8v2WHAjN1SbCS9nCeUNNLY+V2ZS4UnnV0Rcbcumja193RzkbAHNAu6+p/3v/ELqtXt42S1m7Nsu/Tawq17ErJDapQbX1pozusSIUscT957JPXYlxQXDdLtoSpePmw2bt8r/QD4fsrqqNZv9pJtsWDbFvWMX7JP7Glq5bUMKASNI6b6p0TrnyGQYJemNFStsrndyRioX/i8wDGNbIhL150ntPEsEnBvTuTOu31sKdW4eGhRzJ4qOXVJhOWFuz05CeyrqMGu0grc+eKUhLTgOTlpOF2kVGNntKEDjO9U9HAS67R6Uwn+9vxkfD91ta5i4k9dvRINtG1GvhQbrI2dFouyZbPG0WOP/2c2PhzlPfCzHxrqnCkjDXsqEQZlFTWoqhFagDA7SNxgE1JdFABTCzZG70k6E67GzviAYlVIfEkxcGzLstD8pJME7xNFAW58Qnjs8QTMMuyE3OkLNvvON12prK5FTW2s/0fN+gw2drF/axovpzdG9s67fc22qxq5lRuKUbBiO1ZuKLZ8rY1OHrKlWN2/9XXxK5lGHY5i189dvh0jptkHft5TVo3rh43zUZyxTWRjF4MEOwIAcPUjY/H3l6aEXY20+epatGonXvpyHj4ZvTR6TB9WINHQBEESRLiTrTvL4u18/MSxS+ONwN3Z2Bl/p6tY5/c9atI4N+CahINVnMFE37tq3YfcFQ+Owd+enxyXecRKYFIPx5wI7MvyM4ZEHaVyInjsg9kY+tp0y3KCtrl1jcdil67dhdJ9iUVTAGgpVg8JdkQUzb4nFUtwltfHueCHgxa2pXhvfBT2CCK+l+iSIfDUS76UvVBUXIE///tnfDJmWWB1ctvM1Bq/23jFupyMg6S6pg41tXKjeaf74ree2SLYaZjvU6LLieZYfZrNGqAzeTCUp6uL6V/JsBfWhJdcK7WhDr1g5yrcSVDV9ZhPUGOpX+eJiqpaTJhdmPodM5IICXYhs2nHXrzw+Vzb4J9mvKTNONKkb9U57ubgU2Pnsz62eeq+4v2gbSG0eI1xtxA/dY3ONy4vTpex1Erzk0zB87J//oi/PjdJXh9dwdt3l8cF/vYbL1Im2A3/eaWra1P5rGYu2hK3m4C8Lql17QKsnSe0Dyu3fcDoZW+/bKqxUt0ZxyC0WaQ1LMVKzhvkoDTph4ngV2P37veL8Oa3C7FsXeZEj3CCBLuQefXr+Zg+fzNWbSgRBxzGqR27y3HJ/aPx02/rk1cp08CSysC3cXYSIc380RAiufLB0fctScZSrGSJyA9ubn3pvipTmASLvNJwpvBy64MSHJy0ANt3l0uP19crUW3eUx/9Fn/e5+3Naxwb8s1V27G7PC44bBjLW+WVNfj3p3Pw1Ee/YUvRPmwpit/NxkrwTuZwYbctl6iM+schSLesm7p9y0b9sjYuD8t4dAYB1NktNrBhPuDporqmTq6NM3vFenj4C1buwIX3jsTe8upoH5Rd/9VPHA+/4y0ObDpAgl3INMoVj2BywUZX6bUv918CMFYeN3MdPhq9NNStWMzECRfhVCMm2ElGO2Fj5y9fd3Zeird9TaN19Vcnt2zbVYbrh43HD1OtjaG92tily1JsNE3Ab5zf3F77ej4ufWA0AATaPxs3sl6KvfnpiXHBYYdP5NF/p6ovllWI9hbvrcJtz07Cbc/KtZpAMDKEWwcprf1GTVj8XfG1jO6xIW608w5RWQwfB0EIxF7z2FK0D9U1dY4fpJf980e8/EV8JINE+ur/Jq+CogBrNpVEn3+eRJv95YQVvvc9DxMS7EJGW+obP6sQG7fvdb4gwInw7e8W4Yepqw2DN5DoAJ7g1QmMMEF+rWtfb4alWL2dtG/nCec0F947ylNYCm3ZOFGv2Hjth7Gy2ibXc5Zvs84jenFCVUk9pgB8YWscubrk9sPU1ZZaPSD+Njttx6QXCNy8Lobg5gF0sJra+ujSv4w9ZdW6gLvWFbR6PvX1CuYu3+5pHNm+q8xVOu0D6relsfffLgSSF+cJJyHMjJNwaa6Qmxh8svqWVdRgyNCR+H7K6viTCVBRVYvbnp2E14cvcPWs3Hhda0oSN8S2P4vtk5vXJHvsTxu5ScQY6wvgXgDHARgIYAXnfKAk3WAATwMYAGAzgFc5529I0t0L4HYA+wFYCuABzvkkU5pWAF4AcDmApgCmALiTc15oStcPwBsATgJQAeBrNT/r0TCN0A+0ldW1+Fj1wmyortvmVptvw7R5m9Bjv1bovX8bd/kpCr4YvwInH9HNUz3qJLs5RAfQiH/52u2EM3PRVg95ir+RJKvstHtRV2fnhJA8w/FE8RegONFCE7v8I51XthsKt7r4OPRA0MPQc5/NwW9Lt2H0SxdJz1/36Di88o9TXNfLLPxNnbcJU+dtwiM3HYNjB3aVX2v67dY2VbvuxS8KcMqRB8Sdj777AXipO2Fot5Vcp/u3NNyJxYX647vVPYt/+m09Lj2tr+d6WlGiOqWtWL8bJx++v688zE2SBdC2Qr9DjhbSprEHwTDdcduSQwCcD2A1AKnrHGPseACjAMwHMBjAxwBeZYz9xZTuXgDPAHhLzXMVgDGMscNMWX4F4EIAdwK4CsD+ACYxxprr8moLIfC1ghAAhwK4BsBHLtsVOmYziKiRtNOAGuCA20IXUFKrh18S9op1uP7FLwpw10tTMWp6/HKgbPAqq6zF8J9X4sG3PNpJqFnJNvyORP9nzZaifRgydGRcINlkiDv6MCx6Zi7agiFDR0YDhzrnY3/eabuw9Vv3RG9LOn6XuFoGN/1NdHJO9m0w3+fGjYKVJvSTfBBt0Wu7rNijhr5wde8t0ui92Z3sBN1qumXvtP4D3Owz4eXd8boC4MZ5wsmWz7KPGpZovQupbpJqu1S0at44MBtuJ43dtl1lKNlbhbf/txDzuNjXV1F0S9LpGtvIB24Fu9Gc8+6c88sBzLNI8yiAeZzzmznnUzjnTwH4EMAwxlgOADDG8gA8AqHJe5FzPhnA9QDWAnhYy4gxdiyE0HcL5/wrzvkYAJcA6AHgRl2ZtwFoB+Aizvl4zvlnAO4CcBVj7BCXbQsV/UvtRkuXjHcvyMC2xw8SX8n7dWjukFJOvNAgvycfjIyPZm5nuuLbk9ji1jgF8dSWDsy2kMmJYyf+mp+jtnyyyWmJX3eZnb2RtvGA/j3Vt2brrrJoXmHLdcV7KqPaBjdky5ie67A7hNeubghi6+Gh7iguxw6bJWTbMrWlWF9XC8ora7CvvBrT5m3CRfeNwtad1sutrjV2uhsQFRalPgla/d23QJ9y3oodlmFwNAwWIm5sRx2kTQWKdDtAL0KqFy19eaX42GzetLFDSrvyjNi9+/P4Dtz6zM+44bHxGDerMJaHR3vmTMGVYMc5t33LVIHtdADDTae+hFhuPVL9fQKANhDLpVredQC+ATCYMaa9PucBKAUwXpduA4AZ6jno0k3inOutG78DUAWhNUx/9B00pCgm8cuf/l/0owd0AQD06NI6gRrp65LY9f9861ctJ28XOgxkTgOdFui0SWNTF0vCGBINWmqqVKNGomynSULPJz/qAzIbz0WXYi2srhVFv09muIPl/z0+AX98fILn67R6h11/vzi9l153FfBrEnLzUxNx89MTLc8nen+tAhRrfPzjMlzzr3HRDyu7LdicNEa/q1pGfY21PmDcUszoOJTI9/KE2YW25105T6gVGPPrWtzw2Pi48/Wmvit9JBZL3l6pqa3D4/+ZHX0OicbelGGXVeGWUulxRdEtnWfN511wzhN9ADRB/DKtNkv0V/8erP5dLknXEkA3XboVEoFyqS4vLZ2hTM55FYA1pnQZgWEQdfKs8ighvPPdQgwZOlJ6LkjTLD/j9QqdgXZC4RUkl2oDSSBeX+rfSCSCH39da5/WYuBK5lKsebBvogp21bUOXn+6ShXvsTZsd/J4/XTM0szdgStJFU++fGgswG95Uyy88o35xX7sKq3Au98vQl29v69Ru27upF3S1ySIZTwn4eJJLdyM7mZou7TInCcMJhsSpE03Ja528NT1IhB9OjY23bpZijVoJh0EaLes2liCucu3461vF6iFSCrkFVP9/bz79Uri1sDzVuxIMIfgceU84YJ26t8S0/Fi9W97Xboqzrk58qQ+3SY1nTkvLV173W+36RxZsiQ1GxUXFBjdtveUxr4kVvCYd2ppaWlcWgBYs00sL+3Zs1d63oqxMwul5QPApk2x5cKCggKUVRoHlV27drkua80GsfxSWlpiuKa8qh4zZs9BU7MGC8BjX26K/nvZcqPMP2/efEPd9Gi/KyoqoteW7mgirZd5uyyn9uzeJYTNoh1F0bTlZWI5Z8WKFdFdOqzy2rpNPNfNmzejoGBvNF1VTb3tdV7qqLFpkxBet2/fhoKCWNcqKxPlruCrgLJN0msBYMtuYe9SXl6O4pxY4NuFCxeiZbNcaTqtbus3xGKMbS4qQ4cWYphct24dWsM44MnaU6a7p/t25sWd9/KOO7Ft2zYUFMiXZ6ur1CDNS5Zgc8tG2L1XhNyora0N7DkF2RaNNWvWokl1zNFmXaH98ufefbFxY8vmmBbr5S/lFjb79sWW8QvmzUdj1UD9q2k7wTdXYsOmZrHzFu2THZ87t8DS2H3VqlUAYs9ElkepOm6uXr0KkXLrd7u4pAQAsHbtGjSp3gIAqDeNBYsXL4r+e/6CBdIxqqCgwCCMzps/H3mNc7B2W+x94pyjYnceqmtUm9aIvO1VlaJd+jmnaEeRIc3GTZtRUGCtZSwqivWtrVvldota2fX1uv1uFSVu3NTqXlsr3vlFixajdXPR77cWiz5fWVnp+P5uKBLtKisri0vL1bmtZM8+fPbDL9GPzr179mD16niPW+16vZBpznP1FmNfXrR4EVo3z8WCteVx16xcUyKt8+rVa1BVLdq4ZMkSbGklF4ns2v7GcHkEg2T0d7cEJdhlPAMHDkReXvzEEiQFBQXIz883HBs1bxawVXTSfv36AT+LDt6mdZu4tACQu3IHMHknWrduFXe+oqoWNbX1aN1CJ9x8aRz0Duh9MITsrDt2wAHAAjFQ5ufni3AE38cmiw4dOiA//0i4obLRFuDX3Wjbtm20fuWVNbjq4bEAIPeG09Wxf//+wPjYoHXEkUcA32yO1k2fVsu/2ZQpQEkNDj64P/p1bwcDanrxZa/EXWtVl44dOwCF5ejUqRPy84VfT/PpU4HdpaKOE2J1lOW1eOtSYNledOvWDfn5B0WffXllDfDtFld1kJ2fPHcjunVqAdazPX78dS2OOWQ/7LdrI7BoD/bv2hX5+QdjyZqdqKtX0KlDPVZu3oIePXsj/3Brr+C534uJbVtxDfr06ARsFAP+oYcdinatmkbTtdlYAozfgebNm0frtq1yHTCnJJqmQ4f2QGE5evXqhfz8Ho7t+fLXacCuEvTv3x+V1XWYUrARd199pO01GhNmF6J1i7yoXacx4/jJvkuXLsjPjznzb99djsqqWvTs2hp54ycCZeUYeMhAdO3YAlt27gNGb0Pjxo18PSdZ+XHpJGm8cuCBByL/0JhX4b7IJmCmdciT1q1i48aq3RxYbC08AECrVq2AHbsAAEcecUR054qxC34DNm9D23btgfXy/qlhaLd6/vAjjjDGDdNd17dvX2DaLjRtmgeUlcfnAWDk3JnA1iL069cP+f27WN7LNm3aAJsr0a9vX+Qfsh8AIOd/W4G6mLAzcOAgYKQQjo44/PCY3Zd5rNH9PuKII9Asr1F0PAaAg/v3R/9e7ZE7YgeAakQk9QaAvJ9+BvbV4pBDDgF+3A4A6NKlM7Ay9pEkxo1+8Q1S69ClSxdgxb7Yv5fF29FqZTf6YTuqVGEzJycSPZ43cRIAkQdjDI1m/Q5UVWPQoEHo2FYI7Ks3lQDjdqCFrs9b0XTtLmBiEVq2bIn8/HyDYMMYAyYWYVtxDb79dTeuOZsB2Ik2bdqgb9/ewPRd0rrX1yvAV5sNx6K02A5MjVlhDRo4CEvW7sKo3+bF5fPYl/LVqt4HHohGCxYBqMKgQQOxX4cWxgQu+njTpk2BPfFBtJ3uVyJUVVXZKqOCWorVNG5tTce1WXa3Ll0eY6ypi3TmvLR0+lHLbbq0xVIT7bQUK9Ef3/7CZFz36Djb62SBjZ3iL3lZ7YjFoIod21Pmfvuj+N0PnBXlm9XI9HZJfavbJVEFEvF2S3Rp7pWv5uHe139B6b4qvPfDYjz63kyd55oo68G3Z+CRd2eisUsbu+3FFloeta7bd5dbb2FlsUuJUzt37C7Hq1/PQ3llbbSoR96diUlz3AXqBoA3v12IZz753XV6M7c8PRF3vDgFQPJCU2SmpV4Mvd2Rvi2xZUd/LVRs1mK95OhkF+WmegZnAZeF3/3yVDzzye/SnSfsPEm37y6POnIYHFPMdXKoiBdnMGtTGw83J8H+YX5OZRUxraZd3lb3YcvOfVHbx2haeJtrohdleieVEJRgtwZANWI2dBoD1L8r1L/aOpss3V6I2HdaOqZzptCnW6H7vdycl+rI0ceULm0xBql0YRBrk0YLIGuHrJMHOalpDiD6dskielte72PrCTfOAUHaOrl5Tpb2KwHVQQucvHtPVTSERPFe49JETLBzF1kfEN6kZm55eiJuf2GKp8HdaWL6fupqTJqzMW4P1LQhQwb7OIHA5XVT523CF+Odh0jj1lWx3DV7Tr/96t7Xp1ufjNqoyV+48soaVFa7f6cBYzvMdrx+hNMtO8swa/FWSOQ6W/u/WYu3eC5Lhj6WpJdAyPpObCVbGwVOzTHLaw3tcWu7J6tiXV09bvv3pKh5UTStoqDeo82nAiUrY8YGItipDguTAVxpOnUNgG2IhUiZCeHtepWWgDGWq143nnOu3eGxEJq4c3TpukMEIR6ry38sgDMYYx10xy4BkGdKR6jIjY3NBv7+X3Sna52MgpPXxVLbebXSauvqsWh1zH4mKG9LbWKtqKrF2s1iGX3C7PWGNFpcJ+k+izr0E6h+I2z9VVahQ8w5u/1IKHMZWy/VaO9vTFGR4IyW6jnD4f3Sns9Lki2avKAZ7/udFNdvsw7BE+0jFrf+qofHxnbEcFzZMGqzAW9e4k7o+3NMW219T+yCexvztT+v19g55WjlaKGv56qNJVFtlyF2oWmZoqKqVpgpmFi5oTjuw9IOqwDTVnXUJ7Mazwwx6TyQqR7wdrjdeaI5YmFGegJozRi7XP09h3O+HsATAKYzxj4A8AWAEwHcCuB2zbuVc17FGHsKwDOMsSIIge8WCA3btVp5nPPfGGNjAHzIGBsKYI+a/wYAn+iq9h5EAOORjLEnAXQG8DKA4ZxzaSDlho5TRyreW5nQZCTrI/pDl/3zR3z37AVRWx3bxPE/Hcq2Th3EZuZW2a/bUop1W/bg9KO6x53TtCJDL+lqm0cyiC6X2bT9X+/OxIJVRZbnnTC3p0Z1DvGyvY8XXvqiAHx9Md5/6EzP19ruPGEKwBfUYJ/0HThM2e+xWjJX8SqoWm1BpR0Pol+Z0bKU1VSLfxath0Nebmone9SV1e7255WOd3aCXb3iKp3Te+P0sablH4lEDM/QqIGN/fvDUXJ7rajHvXrdYx/MwrJ1u+NspYe+ZqOBRfwHn9MHgVZ3swC4cFURtu2Sm44oUHy9j4m+wukYCcCt80RnAN+ajmm/bwLwCed8FmPsIohdJf4PwBYA/+Ccv6u/iHP+ImMMEIGEu0CEMDmfc77QlP81AF4E8DaEBm4KgCv0W4VxzksYY6cDeB3A94htKXa/y3aFRl1dPdZsNsbWWVa4yyK1Po34UvW7MbEshqNezb5qYwn6HdDWV96AOzu06po6S8HO3OG9TLB2KQOZfiwyueulqQDEzgs3DRFxsc31rlW/1AMTGFxko32p19kkthPqXAU+Nd2U0jLhGde6pX9HJKtyd5ZUYOo8/w4HtpOlUa5LCgtW7sDhB3VOYgnAByMcvPs9TkJmQbCouAJzV2wPNP6YZdmSIsxbrDlWIyoY2CXRCVvq33Wb7Z1KbIu0eYkMgp1tJvZlGDR2LrW0gPM2Y+ayY8pTcaVeow+IUFV2MQItcXgudfUKGuVGdDE6xfFH3p1pm6cbgddwidKANXbq/qyOPZlzPhYulkA55y9CCG12afZC7Cxxm0O6lQDOdSoz3fh64kp8PZGjTcuYB+vn4+JtXvaUVRu8XFeqm4P7JddhJDT6jvpAEnXd3HFsN/cOsI/py/War1bFcTMLcfOFA5HXONfxK/r7qaujgp1Vfma27NyHXSWVGNS3Y1ydE8Zs9JMElq41foxUqxo7za7Sysjb1tHF4ty/3rMZ1AMmGWP9v96bhe+fuwCNGwW32Xgq9+RVAAz7YCY2bt+HI5kQUL1OpO5LAmRTTkWlUZPm7Dwh8pq3YgeaNmmEQw7sIEnkq5KG/PX/jm5KIcnXEPfPptzPx6/A2cf2RLvWZj9DgVGws6qbNu5YLMValK0/rlg/CgDAfW/8YpGLPdruD3OWbceh6thnqINWsMslWy2pZ42dTrALamuzdCB7dr3NMAq3Cm1d6T7rpZPl63bjukfHYcbCmMFtohO/dKN4g6o+/ryX5Zto7Wwusf16TqR9pkuDmnNGTI2Ps+QXc/Nu+/ckPPRObB9bfZ2HDB1psM8z5OOiLG3SzQ3Y8nnt5lK8/d1CrN5YgpmLttqmfVMLSGpi5UbrDxSrtun3/0wZiZrYmRrj1sbKLc99NtdTes/NMTlP7C0TS6H1USEmeMHObueGOO1zxGlJUzDql7W6XWjkafSFu53jFdm/XZqEON25d39YZHnO4DxhkZP0qGTv67jrlPi8E9bQ2lz+4aillueiGjsXY5ii+HOESPwVTj+BkAS7kHDzdbBqk5j8lqyJLbt6+SKRaUucBLecSCQh4Upm92zOzm6QSKSP+QmVomfbrljQYf09qZFEmfdal5jyzNm2RM+YGevcF2pCe1dydPZupfuqXGtZ7No7bmahq9ACMxfFewHW1yu2e3daEbRnnp7o81GCXTI3E7YHnletxHrzMlvE8Md1hx0ydCS+nGBckVhRuBtDho7Ehm3GMuzMOfxoZByTSJ6J68ckkez0VTSPwUZvVqexwN25UdMtdsFxaoSLRu4scY60YCYIxVf8nAHsdbAfVRR/Np9JUTqHDAl2IbGv3L1XYPHeKjz2wSzsq6jxJFy8+338F59juJOENRTOam2p1tB0fex34nVxi9Wku7u0Eu/9sEi6P6QVI6evsaiU/LBsiyLZb+cTMbRJpZF6v8sqanD9sPH4yMJQ2isO+80DkA+ac1dst7/Ism2Je6iWV9Zg8Zp4+9RUrcKEPYlUVLlzCtAwa0m1rqv1by8T6Vc/ccNvLabmPO7svDNj4RaU7quSmHU4CEAu+qoxvIc35EKhOLZrTw0uuX+0wS7Uy9L1rMX22nDHukmOeX3PX/hceE+70Zglg6gjTSTiausu2f3dUmQdUkmBEpuz/FVRBPRPM2jniZCoqHIh2Knv6AxV6zF57gZPX/wFko4g05ZFDOddZy/FjcbOrog4wcZL2abUTmPoisLdKNlXheMGCo9VK03ixN83eKiFVd2Mf83U1NYjNzdHItj6lwSiS7Hq1k1lqkfhzEVbcOvFgxLSGAAul0ckD6HWIdzEtPnxQbSBxIUvBcBz/52LeSt24PPH5Wa5Wps320wGXsvUkwwvUi8sL9ztnMgCYbNlfAiJeFRrxD1XiT3Hs5/NwSEHdkALbVcIl/jtPm5tF/WPM2oWpv5jW4nob7MWb8GpRx7gKd8gcFTY+bzOD167bnS81CkKnMcrRZpm2AezHK9LBM9BkVMAaexSyIjZu1Ggaiscl0Qs3jUvgp1sEnGajO3s6eYu347Pxxn3ct22q8zUMSRfsKZjtk1PSENn+u0wid73xi94+uPYzgX6e+P0eJo28WcAbzmIaOEjXGos3dym+jrNPkV0c01wjQ2aLjKxIddGZRcNtSK9zv7mvvKVfN/SILRqhVuEbWu8mUIs8+XrduOZT+aYjvrEdAPqPAZQTTeC1NtEl1xNx7UlN/PzLiqpkI5/Mg2sp3q4cLKynvx116r/NodrsRoeEzMndnNxfBo3z0+Wd1h+BVFzkoiLECmQz3nVNfZ9LmwtejIgwS6FLFhbjsc+mA3AnTFq3PumuBsMFq/ZiXkrdkgHI6lAqd/9Ise6jMf/MxvDf14Z/b1uSylufeZnjNTZeEi9qDx0nLjO6+IrzXVeDrg1EJ7Pi3zLn1ZV0oKmBvm1bHae8Bp7zGnycOOUIXs+fpd1UuW1tlVna1m8twoLA9BKadTXK/j59w3YtMM6QG86MaB3++i/FejMKAKxo5Ko9wG89T9z5CtBbk5E+u7ahcBw059ciUiWAp+Liy3zjF08+hcLOzkL3PThAEzsosjGRqtdNKR23D77bsyRJgLHbyLFaqnbuqGGcCcRYaqxw2qLxQyCBLuQcPOeyzqeVYf+8dfYwPDQ2zMw7INZUsFm9C/xtl/GOEeyTimvn7ZctVwXfy8m1/nsyB7T241vdgPXlIL4PUndChz/HbccVR63NIot08jPX/uvcep583JyIkuxYiTUBLCoxk6ri++cBW7ul+z5+BbsfF0Vw+4jQP+Om6v3no13omOZpt/19cBrw+dHYx+GwYr17pdju3bUbYquKNFnEIiI7fACmsvIicgFOztc9R+DFk1R/xqTrN5U4nSpZyEvkf4nM7Oxyt8qQLFVDYZPXGm504yeNZtKHdP4xezEtKes2nkpFvL50YuAe8+r03HzUxNd1y9dIcEuJJwmt+27y7F6Y4nhmAL5QFVbV4/3flgcd1z2kq+WdEaD74SDMbIsf/3XnOxaLwsCcTZmTpWwC3Vgc+7lL+OX+1KhD7LTgr357QLXzhNunpH29bpx+148/p/Z8c4ffo1wVGy9m6NFyJ5+/HWlLkKZJMPbTkN7l/eV10j6pvj90eilGDJ0ZELla8J2kNtaeeW+1/3FHgOAHep+1EFoT50+As1l5OoC1kbTBNBr3Qx3ljsr6C6urK7F356fHDugmSOEJATIy41gzaYSWyeayXM3xm05J+vrqWiV/nk7yfSKooCvLzYcq6mt91RPt7a1aS7XkWCXKtbqdplYt6U0LoK3mc1F+zBtvjHKvkFtrCMuJIGK649b/VKshwE7KtgZJkJxLKJ7szztHuFtJda206aj7QQvLLY8N2H2+kAnAS2rz8evwNzl27FWtS/TljTMJR3er5On/GUfJ369mp/S2TomDV1dzO/5FjX8ysTf18ed037+EEA8w7DDnSSCf/MD+ZWyfUDtyInEC3ZBoK+fV0N4vSZv1cYSbNzuYYk92a+CJP+a2jrc/co0/PuT3237ZpVpT+/q2jrsM4UbSarAKvn2dN6GDNExTmNL0T7beurP2H0kmDcGII0dAQCGWE1O++rZIRNWrF54t8sWcRo71x5hIp3e1spun0c3eO0wxk24E8vLD7Ko6XY8/7l9QFnzI3OaFO0wp3n1q/mG4+YsWjY3ehwqANbr3luzHZDX+7t1Zxk+HRPuFs5WNT7q4C4AgEP7dYoTWBMJzmq+RWF7xYaBkwOQ27urKArWb/Vmm+j1HTVo3Fzwv8mrdGVZ1UH3b/1xj5Kd112HZLlrWntH72jTxcvW7cY1qrkIIByMUiHbGDR2Dn1H+qw9rEDZMfS16SjRrSqkey8mwS4E/C/DyDc5tnrh3Q5qTjZ2VtRFvS511+jc092UZ8Zc40RCcnj9uvfTWc3CEAAMn8jjjrnNO8g4fmbMAT65ydZK9szueGFK9N+T5xrtEu2qJntuT340G/+bvMrgnJAuaF7OOZEAouwbMN6H5GzBlRr0j9TLLXJssUVm5sPrt+11DFIbV3Yqb3fcMrGn5FIWrirC3OUikoJXhUAyP2zvf/OXlHw4G4RiFzZ2ZiJwuM8e2lBZHVu+dhN6JUxIsEsRQYzniiIX4qwmCz/agR9/XetaColp7GKvkexSu3d84m/rjWm9buKs+7dZKE1F35JtqfX5+Pg9f93eU3Od5/EdWGdaXkgUrYgH355hn87hBsrerxUmGxc9idqVWVVnV2kFvp200tPAv2N3zPPt20krDfZe5hXmSASGJbZEBm3zXqeZhO/JzEllZ0EQ9nNu4vYFNU7EZ2N0VhL/9lbYI+/OxOP/mZ1QveJrJOpqK+8koJOSjQtev5W0HPR5Oc1n1TXxDm3iY9W7gkN+3t6WXE/Yn28k2GUQVjZ2Vi+8exkp9sJOXyAPDivNX2JjJ3eesK7IhNlGwc6rLGq2+9DjeTAxLOsG70pR6SLqv+z56pd7AiGgmcyt7Yq52KCD2L/weQE+G7schRa2psZKiD/6zcs/G7vcaH9nqmAEEdslOr39bFxxphvxwJv+HRfSHa/ym3b8s7EWy/Mpip0WlHZlwUqrfZ1F/jW1ddiwLZgPBN/ox7UEi7d674O0g9TfI6e5QbYvdSQSnK21/nV0bGPIkh0Jdikj8Se9bVeZ9IWy1Nj5WIoF3NdUE+zGzyrEtf8aq16sLcVaZ6g/xePsRrwtRf770zmxfE3tSGRJLWi5ToG75+H2mXmM4mCgzEJr5LXJTnGlVm00PttYvKhgb265uqOGc1gDFzdNshQbMY2S5mze/k4ed02WNoNXYg3ItGmypq3cXGH5YaI9j/IUajFlzhFBPRIn7eCb3y7EotWJBVP2gqKIuWFXaSx0SUR3LtFdMOZbCbIBCHZaHvrx0CnfjdvjPVpzHMI8eKqph9isYXdzEuxSRBAB58fNKpTmY6mxczmLFBV72+hZ29dUL1DuVfe+1S9p+cHrxKd3b99ZWoE/PfUTtqk2XInsbxj4x3QiApvfuiTYBqcq2wqhiogJJatOsDZs3rCazPRtiRPsHOpr18/CHuCDxOgEIFPNxx/7ctoufCEzTXCBq9fE46t03aPj4g8m+SFpt2W5KRKCl2LNoa9clQsbT24fzldukXUH33FNdXn5cjyKONkC65IGaTcaMiTYZRiyzpaoQfbXJmN/pw7960IRcVw2sWvHbDX+kQjKK2uwq1QiUCrmn+7bNnnuRhQVV+An1W7PbT9Nxq4PfnFbh1TsOblT9nz0dbD9Eo4/p9lPBr0U6/aejZ1ZaHlOmzQikfgB3qm6dgJuOrxTiaC3i3TaeivopoYn/idnmTRuBz4PRfzj1WneC1SUuPArsViWri73RZD3zhjHznu+W3eWBRYzUi+cJrrPdrJpFG7xDYlgnrRVgOJkYtZYaDGONK9YDUXo9+OQdYLbX5iCnSXxgkP8lmLu6xlb6Yt4urRkbxU6tWtmOJaMpVhX6ZJkn+IKU5udYnr59Tr2vb2Qw3k32Vo5L+g/jsyaXidTBVuNXdgjfIJMnx+zuXWOI+bxfQjigQZAKj6SgNiezdFyk60phP+PDouh3F25sunI56M0Op54v17bwjMIDMqKNO/WJNiliKBsa2T5pDqEgjaRyTasl64kSg7KhDqR2Pjz9W8WWNbDvMSjD3g6duY6adgRGbm58d5rqUC2J6ZsIA5rDHGMG2XzPSHV6MjsLwMkEbtD7eNIeMXGO0/4LTfNx39PGJbFZO+px8ami9CbdAErGj3A5LWf5LdDUaz7qHDEc5GBD6TCpI+s1m/bg607Y3Zzyblf3vOsrqlDmWrXG1yuwUKCXYoIavCQhjtJssYurg6aYaupLgqMWjNFUfDpmGXo172dIZ3dNGnuvFoMJxnmJeQN6rJDTiSCd75zv79nWDZf42cVxh1z/Z4kIMS4vcBp4q3zKNEkyXfCm22MRZXrPWjsRCaxg3YfVmu3OW+VlikYDNlle3IGXF6oS7EB5KG9N1peidj8+kFRFEthyM0qj2+NnaLgv+OWo7kSe/e368ILucvDGEMzWejHg6kFm6wTIvY8//HqNIN3szzjBCuWICTYpYxgnrR+sl29sQR9u7dFbV2wb5Fs8tMHs9UG9TghU1GgtTMSEQ4V302JN961+9pJxB6iyOc+llGNXRJ7YyK2c9ozX7p2l8dCE0vuZm9GK2RCX+xIsBNcEB9N+onO3C7n98m6AttL7L/sMwmj84TsfLD9J1XfW+Z6+3FUcINZsKupSe4H+YZtex0c45JjJ1avAN/8vNJw7NnP5liklrNph0RwSrKw9IluV5y5y7dbxg90FOqQuuV9K8h5IkUkQ2O3bXeZeiz5Grt7dRuHaxoKs6ZCr7EDrAd6u44RhKGr1wkhSI2dfgsuP1i9J1U1dfjnW7/G0iVUijuclmLtzg97f1b8wQQ0dmN+XWvY0kdGIo8x+i5H4peSnGzs7Pp28b7MDUhsxhBTTKax8/hSWoXLSDU7SyoNv305Ktih3hfzUuxz/7XfXjBRHnpnhqsAzTJK91V53uVDIwgBXz/fhMFHo5fEHUtGbNNkQYJdighqItZPOtqLFrSNnflrwxxYVyvOzs4mWQbybvAaXiH6JR1A4XbLB27Gu0S3hzNck2CDXvlqnn3+njWCmjbX+7vx7g+LPV9jVQsZeo2d2SnIqb6KAqzZVILvJdrplZsrJVdkJkE7T1ja2aoEsfOEG8wmHYDYPD5RamrF/dDui/kD0q/gFBR2j2tHcQX++PgEX/kmYz/kJo1zUZMEk6PPxy2XHs+Nc2H2RtjmoyTYpYqAnrR+0smJAKs3ldhGvg8C824U9RZLscIgVxwbP6vQ4FHnljA2SU/F9OG2VTLbl1Qse/mhzqOmWEueYlMjA1a3LfpBAucPpTjHbUXB3a9Mw8c/LgUgdhhIJTMWbUlJOfp2m7VPycCN/F9Vnfi9lu1YsqKwOOF8xxWUJJxHJpKMoam6pg7DJ650TuiRnaXyDy+ZPWTm6OtIsEsZydDY1dYp+Mcr02zjc/nCobJaHeInQKOO6P0Rci2L3YBtJyzMWhy/L2sQvPntQtTXh20VIbjrpalxx+rrFXwwIn5pwIlky37vfu/eQQWIaSh27wnWocDVVmIqbm6J+ePCrGkxC9XmPC994EfX9QmCZz+dgwoX29Uliv6+VEtMJoJ+3xo1cp6eRk1fE2yhKhXVwd1PBcLBze+yaLJI1vCQHiNpYsg+XGYvcT//hH0HyHkiRSTDxu5Vh6UyvzhVdcS01dhZUhF1OtAoKq5IWJNkpwR65pPfE8rbimnzN+Gc43vGBfMMlARuy4rC3Sh2sC8LAq/Pzq+A9uEo90JqbV09GuXGT/DJCvETicTnbf4QeX34Agzq2zF2QJd8nMTTORVc+dAYjH7poqSWoX8/ZI48QT8RN7avyfp4mbEwOC3ogpVFqE3HveSSdPPCWHUJGplg9/Z3i/DLApfvBS3FNhAC6kR6D1jZV3MqKN1XjTEz1mGvKYDt8J/dqcr//Ym1h9SYGWsTqptfHnp7hqcQKV6prKnHGzYx+dKBdDQOvuT+0dLjK/xqPyzXYmP/NE9M5tsybf4mw4bjeg3F25LYhKliyNCRSc3faQgL2jygsQuNXU5u+r2zMtLBdCJVZIFch1zJxyQALF7jbq/fsG8BaexSRFAPOtm7TADuB6Epkrg/bi4t2Wet6dm2y1u8o2QRtJDz0/xSbNrpz1haZu9hdZ9vfCJm8OxVq5WK5Tw/yN5H2Rd1MM8sgrr6OtMR/wGKswkn54myimBDu7Rv3dQxTSps/QKhgbwjQHYIsam2kw0a0tiliCx4113xXwsvo0wj6MHJr1AHeBNYdumMgb22wS4YdJisksQVK97rz9vUjaw7ae5G4wGH299AurbjGHbrMz+npiI6UuU5myhrkuzg5oek2dhlQYdYuaEkoevDvgck2KWIVi2ahF0F1/h9KSebJ8QMZsLs9WFXIYpMKeFq+6wsGGABYOhr0+OOPWOznO+HtVvExBuJAPNW7DCccwxPnC032oHXhs9PaXnutojLjHuvj0GZ7TSU/mDHO2PD/UimpdgU0altM+dEBCEjDW3f0pXaunpUJhAC4+mP4x10Ig7LffbR/bOHZIdV8kO6mg9kAuWVybl3Tkv2DYG9FeEu5ZJgRxBpzg6P+yxqaFooGXrj/2whAmtni0TzJVKPG23ckjUet9kjkk7pvnADLxO0FJsyzHvnEUQiuJn07IK3ptNSc1AkS0+wbVdZknImCCIb6dEpXNMrEuxSxDy+wzlRmkA2EkQmkqz3toQ0EKFAwxCRqcgiGaS0/FBLJwjCHzTppQzzXskEQRB2hB2FhwQ7Ig6SGYhMJFkanmTtckEQRHYStl0uCXZEHJPmZE/YkmyFRI14GooJwdnH9gy7CgRBpDEk2BFxLF3rbtsUgkgnGoZY13Ci3zQUQZ0ggoYEOyIOWnlKf2jSk9BAbkk67ulLEIQOsrEj0o3Vki2cCCLdyZRdCBKloch1DeNpEkTwkGBHEERW0FCUmDkNRbIjiAwl7B5Kgh1BZCD1tF7eYAl70kgVm3fsC7sKBOETimNHEIRHvvqJh12FtGPrzoaxQ4TT3rXZwioyCSEIX5BgRxAZyKLVRWFXIe148YuCsKuQEpK9EjvsluOSWwBBZDlhW0uQYEcQGUhDsScj4km2jV2fA9okNX+CIJILCXYEkYGQXEckDXq5CCKjIcGOIHzQvUurUMsv2VsVavlEeJBXLEEQdpBgRxA+oLmVCIuDe7dPav6ksCOIxAh7eiDBjiB8EHbHJRouzfIaJTV/2tWEIBKEnCcIgiAIt5C2mCAIO0iwIzKesO3dCCKVkEKNINKbsL+9AtPpM8ZuBPCx5NRbnPM7dOkGA3gawAAAmwG8yjl/Q5LfvQBuB7AfgKUAHuCcTzKlaQXgBQCXA2gKYAqAOznnhQE0icgQBh/fC++PWJzSMmkjdiJbIcGRIDKbZGjszgVwvO6/F7UTjLHjAYwCMB/AYAhB8FXG2F/0GahC3TMA3gJwPoBVAMYwxg4zlfUVgAsB3AngKgD7A5jEGGsefLOIdCW/f2fb8+ed0Cs1FSEIgiCIkL/7k2GFW8A532lx7lEA8zjnN6u/pzDGegAYxhh7n3NezxjLA/AIhCbvRQBgjE0DsBjAwwCuVI8dCyH0nc85H6seWwxgDYAbAbydhLYRaYiT9oz1bIexMwtTUxmCyHBIY0f4pUv75miUG8HmooaxvV+6kjIbO1VgOx3AcNOpLyGWW49Uf58AoA2Ar7UEnPM6AN8AGMwY02bx8wCUAhivS7cBwAz1HBECRw/okvIynVdFadmUINyiUMATwidiLKbxNuw7kAyN3RLGWCcAGwB8AuBpznktgD4AmgBYZkq/VP3bH8BcAAerv5dL0rUE0A3AJjXdCs55vSTdOYk3g/BDJIRX2ilgazL2TCcTO4IgCCMRRJBDLpmhE6RgtxXAMAC/A6iDsKH7F4DeEEuj7dR0JabritW/WtTNdgCqOOcVNuk2qenMeWnpPEfwXLJkiddLCAklpSUpL3PxEnvHicLCwsDLLC83v54EkRpWrlyZ1PwXL06tIxKRPVRVVaG2NuxapAcFBQWhlR2YYMc5nwBggu7QRMZYKYDHGGNPBlVOshg4cCDy8vKSV8CXm5KXdxrRrm1bYPO2lJZ52KGHAiOtyzzwwN7AzN2BltmieXOgpDTQPAnCiUgEOOigg4DJVmbMiTNw4CDb/kQQVjRtmofGjXKA0r1hVyVUIpEI8vPzk5Z/VVWVrTIq2UrTb9S/RyKmcWtrSqNp8rSZtxhAHmOsqYt05ry0dMHO4kRa47QsSqFJiGwhAiR9zy/aeYLwiwIab9OBVK6GrwFQjZgNncYA9e8K9a9mWydLtxci9p2WjumcKfTpVoBoMDjZ2CVlnKGxiwiDBF/mRrlkAEUkF5LrwifZvfxqCCG+gHNeBWAy1HAlOq4BsA3APPX3TAhv16u0BIyxXPW68Zxz7XNyLITG7hxduu4ATlLPESEQRqd2+kJMhkNHTjI8MggiydCkSySbMBzoCCNB7jwxAUJwWwKgHsJ54m8APuScr1WTPQFgOmPsAwBfADgRwK0Abte8WznnVYyxpwA8wxgrghD4boHwqr1WK49z/htjbAyADxljQwHsUfPXvHGJBoLjZJWEcSaXZkgiBBJ969xc77QSmxMB6jNgtbZ1iybYU1YddjUaHBFSCodOkI9gOYA/QdjVjYCIWfcAgOiuEpzzWQAuAnA0hKPFLQD+wTl/V5+RGpj4IQB3ARgHEQrlfM75QlOZ1wD4ESIY8bcQmr8zOeflAbaL8ICd9uy7Zy9IeZkAhTshsodE37tIEJ0hQ17+p/5yQthVaJBkxtuRXHbtDdc1OEiv2LsB3O0i3Vi4WCpVhbsXHdLsBXCb+h+R5jRpnJuUfJ3nKlqKJRoOZx7dAz/P2SA950pj5+CdkSlvPhnxhwPdd2BHSU2o5ZPSNA0ZfHwvtGjWOOxqZAxhaOxIsCPSFTvBzNWc67DMmm7zdqPcNKtQAyfd3o+GCAl2aQjr2Q4XnXxg2NXwRTjOE07nk6Cxo9GLCIVIgh99Qby39O4T1pDzRPiQYJeG1NaZd0nLHFo0Tb2m0VFwo3GGyCL6dm+Lf/3pWMvzds4PkQhwzID9bPPPAL8IV2Rqt7//+qPCroJ/FKC2PnPnr2yBBLs0RFGAfZXhrtH75eqzWMrLJLmOaCho7/oxh9gLZ1YoCvDIn45xSGMv2qWbFUK2xVPusV8r2/OXnto3RTXxx5pN2bsjT9Mm7uzEe3dJ4i5WLiDBLk058+gentIf3Eu+Pe5RB3cJojquadw49a+Uc4Bi+/MtyZ4xaeQ1ycURBzYPuxopp1Xz5LxTbmSqehtJp6a23rE/OApKSTZDGP3SRUnN3yu3XjTQdVrWs51zIiciwBv3nmZ5Ol3se3NyIvjb5YcZBc0UVu2vlx2KT4ed45wwQE4+vJvhd67Fs7jipA6pqI4lJNilKb33bxNIPq1bNAkkH7eEISQ5Bih2GGwOOTDcTpjNRADkSYT9Vs3Fe3nWMd4+YIJk6HXJ28vxlCMPsDyXbMeo+jprySwIMw9zf/IqaNh9tJ53Qi/P9bESZP0o8i6RaMMuOOlAPHv7SfjqqfPw5n3WAhcAdOvU0kepwEmH7R/9dwTBfxhce07/QPMDgMP6dsTg43vhpiGH4NGbVdMABdivQ2o+5Nq0yEP71uadR/3hVng/oLPx+V5xxkEAgL9ceqjhePO8cEUrEuyyHNkXResWTZL2Vdy4UXJCmtjh1nmiRbPGGPXihbjstL6m8+7KCUoA7Nw+uQNf144tkpq/mTuuOMzyXCQSwWmHto473rpFE/zw/BDcccXhSayZNa1bNMGpRx6Ac47rKT3vdsnFEhupwosc9Pb9p3suWm/j1Lypc0SrCxN01BpyUnCOXieZNCJu0Mt1+mVMP3vetpF8COfkRHDIgR3Qsllj9Nwv/l3W41eZuW7LHtdp3bSrp2k5d0Bv+YpOIug/qLvpBJ5zj+sVeFnyCgSXlVkT55brzu2PkS9ciIN6tA2uMgFAgl2Wk8q9If9whL/OkSjOW4qpKAoikUhc1Hyn6x+5SdgklVXE7B4Tses5bqA/+yi3NG6U2m7dsW0zw2/9pBKJyDV27VrnoVFujqW257LT+qZEk9rfYunsDI+mEGYmWsSREyQwI+neVav7U1RcEf13n25tHbO88syDDL+dBAdz7d0IM6ccEdNg2i0VD0zwmV+XgGaqzwFtEn7ufj1CNxftw/+dJ7ZHb9e6KTq0aeZwhT3P/O0kw++ke/HrHmluBu5H3LpFE5x+VHfpuUF9Otpem5MTCUWhYUfmPYEsxEpr4IVO7eQDQa4kxpPbPj7sluM81eE+G2+uls3Eq3bCoV095ekGxx3F1ASN1QDJ5onLadBrp6r795RV4e6rj/BVRz11NktlAHD94P5o18q/8W2TFAt25nn6jisPtzzndLxX19b4+F9n44bzBqBZXmDx0y2pqpEvTd584SGJ5VtdZ3kuJ4HHo39VrQS7VRtLov92CjYsw6uJ3fkn9o5LY/640NfDTnDUPrL8yiF6odHp48u8JH7PNUeiaV5iE3Qi8tPlp/fDd89eEDVTkHH4QZ1c5WX+Xkq2XV5NrehHrVo0lq4S+f1Is4tRaNeiPw1x339vvXigrTAaXWa2oVdXe01uqiHBLg2444rDceP5AxLK44bBB0uPd2lvvSz3wUNn2uZ5aF/7LxUv/HWwcOK4/4aj0UKyPGSeSJ+87XjXeTsNpi3VgVLTKNabVHZO17dtKYSsft3bRQVoBYrtFmmP/9m6/v17tTdoMMyclt8dHdp4sx3RL7/Kvh69LMV4tQFrqbMHatsqD/17xsqqqva2tc4RrDM6tm2G3JyIr6U0r5xpYeOX6Be4+X0+UWdDVesg2Gv03j9+sjj72NhH4CkmDfkFJwkBy24s6XuAs+1uba1R2L3+XKMWTK/hHv3SRehk0tge1q8j/nHNkZb561tvXjKUlaHxxRODLfPUaK4Lt+T0/uT37xxXrya65965fXN0tOmH+mdhxklr/rfLDo07FolEHHfnefgma49mg1mD6f7ZjXFu7qsezVGvdcuYANpjv1a46syD8OAfj5EKY3q7Oy9C0N1Xx96jl/7+B9fXebLzc/yScZ9VukCCXQg8+Mejo//+6knRqS47vV/0mJ/pzGoguegPMfsXs+Hnfh1aYPRLF1l6zgYZ2LdFUzFg5eZEcLtuALrijH4Y/dJFuPgUo93b4QcZB10Zrw89FW/ee5pjPVs0bYzPhp2DP10gJluzYOeksWvcKAdv3Xca7jUZ2+sH4SNZZ3z/XEzQO8Lmy/rUIw/AvdcHa7j/xr2n4cBuYtKWaUW9LMmfeXQP9OraGh89crb0/BCTTVb/nu2jE5V2J6PLswm8Q7J+cCRzfi/MPP3XE3DxKX2k5/Ia5+ICicYJAIY/fR7+drm1/aAd5vdZb8Mj0+Y9d8dJUcFM4/iBxuf4+ePnGoy8e+zX2jCWdGrbHKNfusgwlui1wx8+fBae/uuJjnWvNmkxI5EIjuxvfd/N/e/YQ7rGPzzdb0WX/aH9OuGea+OFQJlQZuUINuyW4/DS3/+A+284Ckcc1AlfPDEYnw47x3Ycvf+Go+K174rQbGmCyYcPn4WPH7X2upRpzzTNmJOzwgCfGqymTeRa7Iv+0MdoAmFRLzOd2zf37GD31F9OwJCTD8SfLx4UKy8SwfWDD0bHts2QI1FJ33JRLK2dTa6ZZnmNog4pzfIaGYTQPNUO9o+SDxmZTD/yhQt1dTjcdR3080OmhNYhwS4E9J0sL1EjbQcikQi6dhDaHNmSif21yaiRe0NVq2XPvt3bYvRLF6H3/m3Q0+XXX7vWTaPqdvOA7GaZosd+rdFUtzRotqVp3CjHoOVJSCi2GDxusfHcymucG71uoMQmxMotX0b71k3xxr2nWS7v5/fvjC4mBxAtrprW7DvVgVOmdQKME/eb950WtW8xTOiy++DjtuZEInHPQ++1etul8doTQGh/nDSnfsItyDxTB/TugM7t7LUMbVrmeX6v9GV1bt/coNGyorrWKHial3PNH5E5ORHDx6ECJc6OTjH8O/YrAqC5ZMndbgI1a/laNmuMg3q0w8mHd0MkEkHrFk2Et6RNHicf3s1SK/vds0MwQicEWGH3KGSrEgYCFhBYj3aGvmHO3urj1esKbf+e7dCkcS7+fPEgyyVjmcauWZNc9OveFoC3sdEs4Ldu0QTfPHM+brtkUPQj73Ldh0z0Okle+nHeSzdKl/AyXiDBLgSSYchq11natRZLiVYvqNWSRdivs6Uhs8fPJvOtOdakCXF6HLLSzJOd287vOuyApFJul2dlNfFm0Ox8f51sfZxslfSPsOd+raXehjIjez/vZCRiXNZ9959n4OYLXcYmc7gVXgRmDbNXts+i49IcNyjeKafO7CkkwTx21JiWYs2PQeYgoF/6UpT4utfbCOzNPSz9f/jIWXj+zpNt6xcrxp/0lJMTcfVcZfdBG9sVAJedELwnql+sBTtv768bMw2re+dXltWqqPXhZnmNcMFJB9oLiA6Feem2+nu0dN0u9xeGCAl2KeLlu2P2Afr3MTcRS2odbt5Tq45g2QdSsB/qhm17PV8TtDZcJpRpHmpmrLze3C51XnO2y505ZEKNU2BZmzuT6o3SA9kvUtIcP5pQ80TTspncwFuGnQenLG83yJaOgPjupijOXVCzAbv2bIb9O8bHUDObHcho1byxQZtfXSNx/FCzufAPB8afg/F17dCmafzHou6n+Z568YTt3C5e62gZxy6MZTNNCKlXMKiXtQY2kapJ2xUxLnGbE1l9eHrtT27SS+e0SCRaJy9FKroyvTxPZ6Fet7zqkFJ/7+Ys2+6+EiFCgl2K6Nc9FlZB3zlSqeb1WpIsvdfwKUf274y7dF6SGprn5m9Lt3msVfCYv1pbNGuMP9g4NwDxg4yfCd4re8qqXaWTDb5BhyAwt79tyzwc3Kt91Ng5kiNPZ4XmZJGns1v0q3Exk5NjXIpNZClIlneiHJhAMHLWsz1e+vsfcKXFVn49usidE/ReipFIxBBgNW4ZVffzSNZZOjBo1xx7yH448dD94+6b0Ss2drxZXiPp8zDfk2NttlCzekZ2nsmJcPygeBvW5+44CQ/deLRBY2eHH693W6cDxeQRbDptZYMdkF7BgCwSAxCrk/6jz2kVomPbZjGNnYc6OI07siHAzbBgDkScrpBgFwJBLcUathFzkaVlsWonMAdZlKV32sfQzOO3Ho+zJN5jiUyIijxChW+0ieWmCw7Ba/eciq+fOg9d2jePDr6GicOi2vrBTLMB8rIVkRsqKr15mOppFOAIrijxk2lubg6ev/NkHKHavTg9XfP1eapRePcu+gCz8df56Trma7zk4aTwct2X1Xw6t2sWJ8g8pHo6mrVr/Xu1QycHuzsAOKhHO8sPi7uvOQK9urbGq/84xXDcSuADgOMGdsWlp/aNOXPEhQeKv+bKMw7CoX074u6rj1CXvo3njSuxsR9XmGLoaTz11xMMv+3scq0eUXebNnrJxxL1PuREhI3k8YP2j773ToJFm5Z5+N+zF3iysTZvM2YW1vTvj7l8q3vhdcXITRgR2cd/BLoxISKCYv/9qsOjbXj/QXmEhr4HtNXdU+ub+sD/GUNt1TnssCL7mHDySAaAwcf3wu2qQ1UqwjH5hQS7EAhqhbONzt1c/xVk9orU+kOOhUpbC/kwoLdxSUSq2ZD0rYF9OuCMo+XBHa3QjLplAp75i9gcly0S8Fur1aFZXm7UsxSQPyfN/qixafDSBsjXh56KZ28XwUEv/EMfTx5gg4/vBUDsFnDpqRKDYIfZwu50x7ZN8dCN9pu/a+zvYlskrSwr7zKvSzxDTu6N+284yhDkOqiltJyI/9Ap+usO72ftBelIREw+z90h7MNOPfIAHHFQJ1x6al90Vp1UzLZt+f27RDWYXiLb6+vcuFEu3rj3NPQ5wHi9zBGne5eWuOy0vmiUm4ObhhyC6889GGcc3R0XmTyKZRNgx7bN8PRfT4yGFiq3+QjR97E8i8m0VfMmeEUnjPoZM9u2yjPssHPducFsq6V5abZtmYerzjoIL98dq6dmL+hG25zXOBeKzZeDlXOCdoXZrEO/y4hM6JCZY1hp1/QxTD9THYT269DclbDc0mI7tPtvOBoXnNQbB+7fBrdePAhnHhP/wS/DzVhi9haukpkT6Ni/Uyw8lNZdrDyO9eTkRHDu8b3w2K3H4am/nOCYPizSV+TMYpzsdtyiKMDVZzGUV9YYjj/4x2MwZOjIuPTN1I5vnkBOPrwbTj68G0ZMW+1Y5p8vGYSnP/4de8vFsqDZ7d0tfQ5oC76+2BBC5Lk7TkIL07Y9r9x9Ctq1zsONT/wEQNgnnXjo/nH5aYx4fgguvn+04ZjT5KuNG27skbTlHfPkpmlMzHv86uPLmXdoMPPnSwbhijMOQsvmTXDyEd3w/OdzDeed3ptjB+6Hwq174pZ5br7wEJx/Ym80bpSL9q2bYveeyrhr+x7QBqs3lQIQGhuNa85m+OonHpdem7isbm1UuLA0bDfSuFFunFbmxEO7YvGanYZjfmz3zM/fS/fTP897rhPLzDW19aisqsVvS7cZ3oPjB3XFrMVbo7+bNMpBta6vnXRYrH2yfWpr1I+dS07ta3Cw+Pqp89BEsnuHFW7aJxPO3r7/DMPvFs0aG+KIaWzbVe6Y/wmHdsX7Ixajc9vG2FFSA0VRMPzp8zB/ZRGOH9gVn49bEXeNef7ue0Bb5PfvjIIVO+LSfjrsHPzx8QkAgEN6u7PR269DMFvtXXdufxzatyMOObBDXODd0/IPAF+/Gycd1g3rVpWgSeNcg83iFWf0M3jj2g05Hzx0Jq5+ZGzcce1DwKzN1QvvjRvlYOQLF+Ki+0ZFj33++OC4/Mwa57OO6YH8g7sYvJzbtW6KP188KOr57kT/nu3xt8sPQ5tIEf797RYA4tl27dgCt13ifynT7r3O798Z3Tq1xOaifQBEyJ03v11oW0cz2kfGgd3aYO3mUtu65PePDxF24P5tsHZLKbp1Su2WjjJIYxcC7VoJuwLN/duMl6Xa687tj1svHhQdFO1svbR4RVZBWbUtJo8ZsF80eLGmPTvq4C74/PFzcciBHQyBIv0IdUBMXd9VN9gO6N0hzjuyb/e26NCmGW66YADOOa4nLj+9n+VeqAN6t5faksnCSHymC1Oh3e9mLkJBaLEATzzMqFU8V9W2mTm0b0zL06dbG2kajUa5OYYQI6NfusjTnr7Xnt0fnz9+Ltq1bmrYTP3iU/pGJxOzkKPFaTvCIj6clW2TNshavaqa8NhetaH5dNg5+ORRXVw8F8LH+ScdiM8fP9dwTF/e3686wjkTyMOduKVbp5bRD4m8xrlo16opOrdrjh77tY5uAP72/afjnmuPjAvo7dWuUQsM3LpFE7RpGRPOWzRr7ClgctSWyaHJ157TH6fl29uSRlHzcuNlCwAd2jTD6JcuwmmDRH9WFBE+5sRD95d+aN115eF46774fXG1pTrzvdRv/u7FrMPOVk+2T6yMRrk5lv3lgM6t8NRfTozWz7yd3v+dN8CgadPGk1YSLVeLZo2lQeLPO6E3ht1yXFyAajPm+yLzaNXmi/Zq5ISuHVtIP5yHnHxgXIgju3IHH99LupWgH2IrTdbvXiQSMezg1LaVWOo2k5sTwUV/kMe01PaHlsUgffimY3C/zc5KAPDEbcfjyduOx7v/tA/8nwpIsEshD1/VDV89ORi992+N68/tj3/qgosCwH3X56Nj22bRmF53Xnk4/mWznYl56RSwF+xyc3Iw/OnzLEM9aB2nW+eW0a/bv191BG67ZBAevfnY6GQThG3BXy87FENOPhC9HYQdjUtP62cbVPLjf50d3e3BTYTydq2b4tC+HTH0unxce05/XH56v7i9bo8fJAY4fXu7d2mFb545H6cfZRSOD3TRjjtNTiSP3XpcNHq9m51HmMW+pgP7iPcgJycSfUZ/vUy+BHymLoRMs7xcnH1MD/xpyCG4+iyG14eeahS+ABzQpRW6d2mJq3XG+U0a50S1glbhD/Iai3umCbPt1f0vtWUst4o3vYCjp1O7ZoYPFP1+p41ycww7HzRpnIuObWOCQFMHuybz7gz3XHsk3nngdMsYcN27tMJp+d3jtDf6pa+uLrRFJ6gT6jED5AHD3ZITEfa3D5rGFzPXnM1wz7XuAmVfqgZc7tOtDf6qBqN20880ecy85PfYrccZTAPOOrandJlP26VDFuvODw/deIwhRp32AXTHFYdFtw4MEm3pHRABr81oO0nceaX8I0Ubq1vq+pkWN9DNx8pNFxyCoyXvk7abiGZ28e/bT0LblnmG+I5B8PwdJxs+Ms1o5bXSCdUnH94Npx55QOzjOxruxL4sTbB77FaxjCxb5h/xwoVxZghHMCHInXdib5xxdHdcdno/dO9iNEc5bmBXnOwgSLdpmecqsH5KUBSlQf83d+7cXnPnzlUqKyuVZDN37lxf123cvkeZt2K7UlZRrfy2ZKuyt7xaWbu5RKmvr4+mqa6pVW56coLy/ZRViqIoSsneSqWqulZRFEV545v5ygX3jFDWbSm1Laesolp5+csCZW9ZlWOdtu0qU2pr66TnVm0sVh59b6ZywT0josf8tt0vF9wzQrnv9enK+z8s8p1HbW2dsntPhW2aucu3KRu27bFNc9u/fzbcCz319fWO1/++dKvyn5GLFUVRlMWri5QL7hkR/a++vt7wHuhZs6lEWbu5xHCstq5e2bRjr3LL0z8pMxZuti3XjL7M6ppaZeJvhZZl19fXKzMXbVFqa+sMz762rl559/uFytad+1yXO3nuhmjZS9fuVPaWVysVVTWKoijKzEWblS/GL1dqa+uiaTRWFO5Svpu8UlEURamrq1cmzC5UyitrpGUMfXWa8ta3C5QL7hmhfDlhheu6mVm3pVTZXLRXURRF+e/YZXF1amjMnP278spXBcrOknJf1+/YXa68/8Mi6VizZM1OZcmanY55LFmzU7nt3z8re8ur485VVNYoo39ZY/keJ4L23tfV1Su1df7znzJ3g+U49Mg7M5QL7hmhlOwV89cF94xQbn16omVeKzfsVkr3ifF99uIt0X4k4+ff1ysvf1ngu95uxvy6unqlQu2T2jxlfk7T5m2UHnfDlqJ9yoVD5X2wYMV227HXbmx1ItnzXWVlpTJ37lxl7ty5vRSJXBNRQgn2kz4UFBT0ArBu4MCByMvzv/G6y7KQnx/sVlJuqK6pw9K1uyyXD1JBWG1PB8orazDr93k44w/Om0m7oaKqFne/PBWXntYX5xzXK5A83TBq+hoM7NPRlXZSTxDPvrKqFjuKy9FDEshY45f5m3FQz3aul4xkbC7ah64dWgQSxkRRFMz+fS76DxgUNb9oaDTkfh9G20v3VaFJ49y08NhMl2dfXVOH+nrFsHNQskl226uqqrBkyRIA6J2fn19oPh/+0yeSTpPGuaEKdQ2d5k0bo22L4Lpas7xGeM8iPEAyudDCNiUVNM1rZCvUAXBcKnFDNxcewW6JRCJo0iinwQp1ROqxMl1oyLgJY5JtkI0dQRAEQRBElkCCHUEQBEEQRJZAgh1BEARBEESWQIIdQRAEQRBElkCCHUEQBEEQRJZAgh1BEARBEESWQIIdQRAEQRBElkCCHUEQBEEQRJZAgh1BEARBEESWQDtPALkAUF1dnZLCqqqqUlJOOtKQ2w407PZT2xsuDbn9DbntQMNufzLbrpNXpNtq0F6xBQUnAfgl7HoQBEEQBEF44OT8/PxfzQdJYwfMAXAygK0A6kKuC0EQBEEQhB25ALpCyC9xNHiNHUEQBEEQRLZAzhMEQRAEQRBZAgl2BEEQBEEQWQIJdgRBEARBEFkCCXYEQRAEQRBZAgl2BEEQBEEQWQIJdgRBEARBEFkCCXYEQRAEQRBZAgl2BEEQBEEQWQLtPJFkGGP9ALwB4CQAFQC+BvAA57w81Iq5hDF2BYDrAOQDaA9gDYB3ALzHOa9X03wC4I+Sy6/gnP/PlN+9AG4HsB+ApRD3YpIpTSsALwC4HEBTAFMA3Mk5LwysYS5gjN0I4GPJqbc453fo0g0G8DSAAQA2A3iVc/6GJL+Mabtal6kATrE4/SDn/FnG2GMAhknO38c5f9GU3/8BeAhAL4j36AnO+XBTmsYAnoB4n9pCRFb/O+d8gd92uIEx1hfAvQCOAzAQwArO+UBJupQ/62SPIU5tZ4zlAhgK4HyIdjcCsBjA45I2FQLoKSmmE+d8py5dWrRdLcPx2Yc1xoX97NU0drsYHM85n62mmwr5eHE053yuLj9XfZwxth+A1wCcC0AB8COAu/XvUSK4mdvUdBnX50ljl0QYY20hHlwriAc5FMA1AD4KsVpeGQqgCsB9AC4AMALA6wCeM6VbC+B403+T9QnUF/8ZAG9BTBKrAIxhjB1myusrABcCuBPAVQD2BzCJMdY8qEZ55FwY2xUVWBhjxwMYBWA+gMEQguCrjLG/6DPI0Lb/DfHP9G313FhdugpJui/0GTHGLgfwKYAfIO7TzwC+UgdNPa9ADI7DAFwEoBqi/fsH1io5h0A8l9UAlskShPGsUzSGOLW9GYRAvgDATQCuhpjgJjLGLpCk/x/i34cSU5p0aTvg4tmrpHSMS5NnD8S3+XgAswHsADDXlHaGJO1yUxrHPs4YawRgPIBBAP4PwC0ATgAwijEW8dFOGY5zW6b2edLYJZfbALQDcLj2lcEYqwXwBWPsSc750lBr544hnPMi3e8pjLGWAO5gjD3COa9Sj1doX24yGGN5AB6B+Np5UT02DeLL/2EAV6rHjoXoGOdzzseqxxZDfE3diJhgkUoKbL4SHwUwj3N+s/p7CmOsB4BhjLH3Oef1mdp2znncQM8Yex3AYs75It3hertnr/IkgG855w+qv6cwxg4G8DiAcWre3QD8BcBdnPMP1GOzAawDcDeA+xNojhOjOecj1TI/AXCUJE0YzzoVY4hT2ysA9OacF2sHGGM/ATgIYtL50ZR+u8NYkE5tB9w9eyD1Y1w6PHuY26wKHkcAeJ9zXmtKXuJwj9z28csAHAZgoNZOxtgWCMFxMIwfln5xM7dlZJ8njV1yOQ/AJJNQ8B3EV4JZU5GWmF58jfkQauT2HrI6AUAbCJWylncdgG8ADNZ9hZ0HoBTia01LtwGiQ5/nqfJJRu3QpwMYbjr1JYQ6/kj1d1a0XV0eOBrA5x6v6w2gP3TtV/kSwNGMsU7q77MhNreO3k/O+V4IwSGp7dcvvcgI8VknfQxxajvnvE4v1KnHFAgNnh9Natq0XS3btv0eyLpnb8EVAPLgcRxQcdvHz4P4gFyqSzcTwHoENBY4zW2Z3OdJsEsuB8Ok3la/AtZATHSZyskAdkOo4jX6MMZKGGM1jLH5jLGrTNccrP41q+WXAmgJoJsu3QrJgLMU4d2zJYyxOsbYOsbYMHWZAAD6AGiC+CUMbTDS6pvJbddzPYB6iIFNTzPG2A7GWC1jbAVj7HbTea39VveJ6dJt55zvkqQ7iDEW5ngV1rNOyzFEfRYnIL6dAHAdY6ySMVbGGJvAGDvSdD5T257qMS7d2q9xPYCVnPPfJedOYYztU5//r4yxM0zn3fbxuLbr0iWz7fq5LWP7PAl2yaUd4m1LAKAY3rRdaQNj7CgIO5tX1K8SQHzl3AvgYgibgE0AvmbC+UCjHYAqznmFKUtNE9Bel65EUnQY92wrhB3IjRB2dj8A+BeA/6jn26l/S0zXydqUaW2XcR2AaZzzTbpjqwE8AGEDciGAWQDeZMKpQsPLfTKn0dI1hhgkwyKsZ52u78SdEAL5S6bjowDcAeAsiCWlAwD8whgboEuTiW0PY4xLp/YDANRlyJNhsqFVmQaxnHoegBsARAD8xBg7XZfGbR9Pedslc1vG9nmysSNcw4SX0ncAfofOwJRz/pop6UjG2GQI+6lPUlbBgOGcTwAwQXdoImOsFMBjjLEnQ6pWKDDGjoP4gn1Gf5xzbl6OGcsYA4AHGGMvcM7LUlRFIkUwxk4B8DyAFznnv+jPcc7v0v38hTE2DsAKAP+EMILPSLJ1jPPBtRACW9wyLOfc4B3PGBsFYCGAx2ByMkk3rOa2TIU0dsmlGMKd20w7CHVvxsAYawNh5F4O4ELOeY3DJd8C6KGznyoGkMcYa2pKp30V7dalayvJL13u2Tfq3yMR+yJra0oja1Omt/16AJUQHo9OfANhp6JpabzcJ3MaLV0NgH3uqpoUwnrWafVOMMYOBTASwoPwAaf06pLbZIiQEhoZ2XYJyR7j0rH91wGYxTlf65RQXT4cCffPXt/HU9Z2m7ktY/s8CXbJZTli6+8AokbYfSC+YjMC9YUdBaAzgHMl9hFu0OwPDjYdHwBgL0T4BC0dY/Eu7QOQfvdsDYSrvqxNQKy+Gd121abwKggPuj0+srBrPwBwXbrOjDHzcsMACJueoIzc/RDWs06bMYQx1gdCgz0PwA2qA4UfMq7tLsnaZ6+WfThErDs/ThMabvt4XNt16QJru8PclrF9ngS75DIWwBmMsQ66Y5dAeBQF4a6ddNRJ/RsAhwIYzDlf7+KaCISL93qd59FMCI+gq3TpctV043WTxFiIr5VzdOm6QwRqTId7djVEsMwC9Yt0MlR3dh3XANgGMQECmd/2cwB0hPsB/WqIEBlLAYBzvg5iQDIbm18DYI7uHfkJwjkjej/V8ANDEPKzD/FZp8UYoi5V/QTR1os559Uur+sI4AyIILQaGdV2GSka49Kt/ddBaNXMXqJSVGHkYhifvds+PhbAICZCImnpjoMIbh5I253mtkzu8xFF8fvRRTjBRLyfJQAKIeJ4dQbwMoQr89Xh1cw9jLH3APwZIr7QL6bTyyDUw59CBF5cDfHi3gLhbHCD3gaLxYI4PgjRKW6BiFd0LOd8oS7djxBxkoYC2AMRpbwdgEE8hTt2MMYmQHTsJRCD0WCIoL0fc85vVdMcD2A6hJ3NFwBOVOt7O+f8XV1eGdV2PYyxryCM4bual+AZYwUQz59DeJBdBTEBPMI5f1qX7gqICeHfACZCBCb9O0RMp3G6dG9CGF4PhQhtcC9EbK1BnPMtSWxjc8RCDtwO8XV8j/p7Dud8fRjPOhVjiFPbITwEZ6nHrwewXX89j+08cA1EoNdxEFqKXhDLtd0gdh+IahvSpe1qOU7tB0IY49Lh2WvCjuqtugEiptuFknxOhgj0+4Na3/0g+nc+gLM451N1aR37uCp0zYVwqHgQwh/gBYh378QEtMX6OtvObZzzPZna58l5IolwzktUj6DXAXyP2NYgyQy0GjTa18XzknOnAVgE8bXyCMQLWAPxYl/IOR+tT8w5f1E1rL8LQBcIjc75+hdf5RqI3R3ehvhKmQKxdU+qBZvlAP4E4dnXCCKa+AMAXtUScM5nMcYugujU/wdgC4B/6Du9mi7T2g4g+jV9IYBPLewqV0N4wnVVfy8F8CfOuWErNs75t+ok8hDEQL4GwLV6oU7lHxB2Nk9BxIaaA+DMZAp1Kp0hbKb0aL9vAvBJGM86RWOIU9unQgSLBYRtnRltaWkdRFy7lyEmq1IIT8nL9UKdSrq0HXBu/yiEMMalybP/RP33qRAC+j2QsxXiw+4ZAB0g7NVmAziVcz7DlNaxj3POaxlj50JsKfY5YluK/T0IoU7FaW6bmql9njR2BEEQBEEQWQLZ2BEEQRAEQWQJJNgRBEEQBEFkCSTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQBEFkCSTYEQRBEARBZAkk2BEEQRAEQWQJJNgRBEEQBEFkCf8Pn/9H/koRoC8AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "df1 = df[df[\"name\"].isin([\"Latency\"])]\n", + "ax = df1['issue_to_done'].plot.hist(bins=BINS, alpha=0.5, figsize=figsize)\n", + "ax.set_title('Inference time (usec)');\n", + "#ax.set(xlim=(0, 25000))\n", + "plt.xticks(rotation=60)\n", + "plt.show()\n", + "\n", + "ax = df1['issue_to_done'].plot(figsize=figsize)\n", + "ax.set_title('Individual inference time (usec)');\n", + "#ax.set(ylim=(0, 200))\n", + "plt.show()\n", + "\n", + "\n", + "# df1['issue_to_done'].describe()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAApwAAAFKCAYAAACwxI8KAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAA8y0lEQVR4nO3deZgkRZn48e8A0oBcw+24ooL4Ao6iDKyKuIsiIqDIKoegIigs7iIeC4oiIiIiyqh4gNeq4G9REF1PEA9AVJDDwUWG4wURmMVZEbkVbJCZ3x+RNZMkNdPdNZVd3T3fz/P0U12ZUZmRkUe9FRkROW3hwoVIkiRJbVlh0BmQJEnS1GbAKUmSpFYZcEqSJKlVBpySJElqlQGnJEmSWmXAKUmSpFYZcGpSi4hjI2JCje0VEQsj4nODzofUi4g4LSJuGXQ+JqqIeEp1jh8w6Lw0RcS0iLgqIj446LyMRkScHRHfGHQ+ND5WGnQGpKYxBJAHtpqRCSIiVgPeBfwsM3824Oy0KiLWA44EXglsDDwAXAF8MjPPHWTexiIitgSOAZ4LPAG4C7gRuDAzjx1g1iaMiHg58GbgH4G1gbuBy4CvZOa3B5i11kTEDOBfge9k5v+0sIp9gacBn2xh2W34MPDriNgqM68adGbULgNOTUSvb7z/V+B5wBsb0y8B/gs4cTwyNUCrAe+v/v/ZAPPRqogI4HxgPeArwBxgOrAfcE5EfCQz3z3ALI5KRDwfuBCYD5wG/AGYAcwC3g0cO6i8TQQRMQ34PHAw8Fvg05QyWh/YFfjviHhtZn5tcLlszQzKuXwL8D8tLP+dwDcz888tLLvvMvPKiPg1cASPve5rijHg1ISTmf9Vfx8RLwH+sTm95u/t50ptiojHAd+kBJj/lJmX1+Z9HDgDODIi5mTm2eOct8dn5l/H8JGjgb8C22bmnY1lbdjXzE1O76AEm58B3paZC2rzPhIRLwMeN5CcTWIR8Rzg2ZTjbzI5CzguIg7NzPsGnRm1x4BTk1pEHAu8PzOn1abdAlxPqfmcDTwDuAl4a2ZeEBF7AMcBTweuBQ7OzDmN5T4dOB7YEXg8cB3wocz85hjytg+lNmMTIIEjM/O8Rpq1qjR7AhsBt1Fq907IzEci4inAzVXy90dEp6bzdOBjlBqiPTPzW9Xyotr232XmZrX1/D9KIPfk2rRtgQ8ALwBWptQovi8zL2zk8QnAB4GXUwLC3wOfyszP1tLsQKnV2w94KvDvlJrKi4FDMvN3IxTXq4GZwDH1YBOgKodDgJ2r/J7dWOeL6k0NamV2YGaeVps+4j6t2uV9pUqzB7APsEH12RuAwzPz443yeRZwFfDvVZlsClzbDDarbbm98dndKcHXcyg1fH8EvkHZD3+rpTsNeA3lmD0V2AG4DzgxMz8VEc8APkW5E3An8N7M/H9dtuvFlLJ+DTAE/BA4rJmvbiJiP0qwOBP4G/BT4F2ZeXMtzdOAE4AXAutUefkV8JbM/L+IWBU4inI+vKMRbHbKqHmOrFctc3fKrfffUZpYfLGW5imUff6eqlyOoJxPlwBvAuZV8/6Nclz+BHhjvSawdt34KHAS5boxDzg+M786ivJZ6nlSO14BvhIRX6n+/0CnmcUyXnf2AB4BLmjk61ga18hq+gGUY+KpmXlLNW3rav3bAmsAtwM/B/41Mx+s0kwD3kK587QZpby/T7m+/bmxjp0o5b4NMI1yDn02M/+zluwnlOv0zlTntqYmOw1pqtoE+DpwDuU25trA96ovzU8BX6O0sdsEODsiVux8MCK2oLQleybly+dwyhfn2RHxulGu/wXAZynBw3uBVYDvR8T2tfWsSvkCOoDSNOAtlC+LYym3HAHuoHxJAnybctvp9dX8uZR2b/9UW+8/AQuAp1VfgB0vpHxxdNb9z8AvKEHBcZR2k0PAj6svxk66DYBLgZdRAp23Ves9NSK61aS8C3gV5Qvkw5QA6IwlltJir6heu36xZ+a9wHeBLSJi01Es71F62KefBrYGPkQJgm+kBE7d0r4OeIhSUwPldulzImKrUWTtQGCYcky+lbL/30G5Fd+0AnAu5fbzOykBzScj4kDgR8CVlP14H3BaFfw1fZJya/844AuUIOXHEbHy0jIZEe+mHKM3U8puNrA9cHFErF+leVyVj+2BUyg/Ok4FNqTcSoZyXqwLfC0zR7wzERGrUM6RA4Ezq+2+HfhClaem1wCHUWpPP0Y57s+m/FB5BWXff54SFH68y+c3Ab5FadpxJHAPcHr143Fp+RzNeXId5ZoDpew75/J/V8tY1uvOdpQfOg+OIm23bVifEvxtWq3/LZTjcCYl+O34LKXsLqu28wuUH8wXVvurs7zXU46HDavlvQu4HNitseprgQcpx4amMGs4NVVtRqnR+wVARFxHufh9GdiiUysTEfdQvoBeRKmxgfKlPB/YpnbxPiUifgycGBFnZOZIHZtmAttl5q+q9ZxG6TRyIuULGUpgsTmwdWZeX037QkTcDBwfESdlZkbENykX+d92aW5wMY8OOF9IqbXaoZp+VkQ8CXgyJXiqt6H7JbBTZ1uqnvW/odQmbVct73hKIPrMzLyjmva5iPgicFREfCYz76mtfxVgq8x8qFrm3ZSgaGZmzl1KeW0J3JuZty4lzVW1tDctJV03Y92nfwF2aARFXwU+GxFbZua1ABGxAqWjxjmZeVeV7qPATkCnfdovKEHT+fVay8prM/OB2vvPR8SNlP3/zsz839q8xwFnZeYHq3V/vdqmLwGvz8wzquk/odTUHUD326s7ZOZwlfaa6vP7A//ZJS0RsTGl5u7YzDyuNv1M4BrKcXwUZb9sAuzVqJE7vvb/ltXrb7utq4t/pZxLB2Tm6dV6T6Wcy8dGxBcbNcn/ADytc0xWPyTfQ2kH/ZzMfLiavgHwmog4pBGgbQbsl5lfr9J9gXJOnBQRZ3erka1t40jnye0R8UNKsP+rLk2ElvW6sznlLkWvtqP8AN05M39dm965q0JEbAccAryhXusbEedRjvP9KdewNSlB/5XAC+tlXF1/FsnMv0fE/7L42NAUZQ2npqobOsFm5bLq9Wf1W4C16ZsARMQ6wEsoNZOPj4j1On/AecATKbc1R/LrTrAJUH0pfg14QURMrybvTQn6/txYTyfw3WEU6/kF8Kzq1jyUIPMCSm1LJxB9YS0twFZAVPlZt7beNSk1HM+NiNWqL4Y9KbXECxt5/DGwKqUXdt1XO8FmY52bjLAdawD3j5CmM3+NEdI9So/79ItdauDOotRG1js37EAJchbdvs7MCyhl/gNKsHR49f/tVW0ktbQPVHlcISLWqvL0S8rtx627bM5/1j57D+XW9N8otfmd6UmpmetW5p/vBJuVr1ZpX94lbcerKJUTZzXK7l7gasqPNSg1qwA7R8TjuywHyjEGI+/rjt0otfyLgrPMfAQ4mRLgvaSR/luNH0Cd8/u/OsFmbfrjgCc1Pv8nFtdUUwVK/1mle1a3DPZ4njSX0Y/rzrqUOx69urd6fXlVW93N3pQfY+c18ng9pea5cyy8lLKvT2zWuC4haL6b0tRBU5gBp6aqefU31S1ZgP9tpOtM7wSBT6N82R9L+aKr/32sSrPBKNZ/Y5dpN1SvnXaUT6fUhDXXc+kY1vMLynm8fa0m8+fVXz3g/FOtFrXzxfWlLut+W7W8dSltCqdTRgdopuuMndfM47zG+84X4HSW7n5GDiQ78/80QrqmXvbpY2pQM/Nu4HvAfrVamtdRhjw6p5H2ksx8JaUpx7MpNY0LgS9HxIs76SJiZkScS/kSv6fK00XV7LV4tIcz8/8a0+4F/tCl5u1eupf5o47LKqi+GXhKl7QdnePleh5bfttQlV31Q+7jwEGUH1E/jYi3RcS6tWV1gtLR/mh4MqU98iON6ddVr818N4+/0Z73HTd1KcvOedtcV0cv50lTv64700ZOskQXUTruvR+4MyK+HxEHN348PB1YnRJcNvO5YS2PnWYvS7ur0cz3hBpPWf3nLXVNVc0vqJGmdy7UnR9hn6C0l+tmtBfRkaxAqY388BLm/34Uy/g1pf3TP1GCm/sptwDXoNxyXIcScP6ysV4obVuXdAvujmp5UGrPvryEdNc03o9UvktyLfDsiNg4M5tBQ0enhqlTLkv6glqx8b6XfbqkdnBfBfYC/ikiLqN0wPlao1Z3kapW7Srgqoj4FaVt4OuAC6pa6QspPdrfS+kM8yClNus0HlshsKTbub2W+Wh18rEL3UeEWFRWmXl4RHyZ0sHnpZRg6eiI+OeqGUInUHwm8J0+5a+u1/N+WXTKZyznyZKWsSzXnT/T/UfGqM6TquZxr4j4R0qN906U9pnviYjnZeafqnzeSWkr202vNazTWdw5UlOUAaf0aJ1g5u+Z+dOlply6zbpM69QUddop3gSsMYr1LPGXf2Y+HBGd2+drAZdUvbovpQQHr6S0jfpi7WOd2rv7l7buiLiDEsCutIxlMRrfp/Rw359Ht/nr5GVNyrZcmZmdfdT5clu7kfzJjff92qdQbm/+iXJbfUPKbcP/t9RPLNbpfd/pQPMiym3EPTOzU6vZ6dnbls0ot3k761qJMqrARUv8xOLjZV6n7erSZOY1lADrw1F68M9h8VBIv6Tst/0i4oQuNZdNt1I6YK3YSLt59XrLSPkZo00jYoVGLWfnvF3SusZynizpXO7HMXodZV823Q0QEWs3mhs0zxMAsowScTlwTETsQgmAD6a0Ab+JEohempl/WUpeOsfMTErN+BJVx+CTWHKgrSnCW+pSTfUr/kLg4Ih4YnN+1ZNzNLaJMgB453PrUgKqS6pbs1Daim0bEbt2Wc8aETFUve10KlnSbelfUHoe70TVE71qN/VrSk/badR6qFMCgN8B/xERj7m12dnG6gv+m8Ae0aXH9RjKYjS+RQlS3h0R2zTWsyKl09R0qo5PlVspNVf1TlNQekcv0sd92rkFfQalzd6bKLd7L2ks78VVZ6Kmzn7ufAF3Aqj6kF4rAP8x2vz04JDacQUlwF+bRpOAhm9R8npMs8MHLBq2iIhYswoe6q6j1ICuDYuOyw9TAsaPLWF5L43yFCIobV/Xp5w7nfkrUJp+DLO4vXO/bEAZCquzrlUpTQRuYwkdncZ4nnTGc33UudynY/RiYMsqz3Wd4G/ReVLdJn9DYx3Tu+yPK6vXtavXsyhxwzGNdETEirX26T+mNJ94dzM/XdaxJaWz4SVoSrOGU3qsf6NcvH9b9TK9ifJF9FzKxbHbcDNNc4EfRMSnKbUf/0q5zf2eWpqTKEO1fDciTqcEgqtSagX2otx2vCUzH4zSm/g1EXED5ZbWzZnZ6RDxCxYP8VQPLH/O4mFyFj02LjMXRMSbKLV111a3QG+j1Lz9MyUA6jT+fzelY8yvqrK4hvJl+WzgXyhfFMusqql9NaWJwS+rPNWfNPQcyniI/137zL0RcTZwWJTHod5EuRXYra1bP/Zpx1cpNXYvpftTgz4FrB4R36YEXCtQOgC9nrLvTq7SXVy9P706Th6mBLKrjyEvvbgwSg/3p1CGEJpLGde1q8z8fZQhiE4CnhwR36G0N30qpdb5LEo5vJjSq/qblM5M0yjB2xrUOuJQhlTanBI07lDtw/mUwHLnajmdAPOLlHPnS1EGNv89ZSinHYH3ZJexTpfRjZShjJ5DOSdeR+lg99ql9FCH0Z8nN1FqHP8tIv5CuTbMrUZwWNZj9LuU4Z9ezKN/QPyY0rb1SxFxEuXHQ6e96ca1dG8ADq2O25so16IDq/TfBMjMn0fEKcA7q9rrH1EC/6dRjt1jgNMy876IeBulicGvI+JrlGP9GZQmI6+qrXcnyo+SH42wfZrkrOGUGqpevttQOojsz+IxBVcC3jfKxVxcfWYfyjBDw8AembkoIKxqe3YAPkKpfTiZMrzMFpRhaP5YW96bKLf0PkZpK/ZvtXm/otw+/xuLb9vC4h7iFze/LKt8PI/SQenfKUOYvJHSAeYjtXR/onzh/Sfli/4zlGBrI0rv676pyn2rah07UcYzPIkSbL4hM7uV/WGUL9o3U27Fz6NRc1Nb9rLu086y/ofFtV3dnn51BKW3/86U4OqT1facQXn60C3Vcu6i9ML+X0qgcBSl1/f+Y8nPGL2N0sb3/ZThbb4HvHRJbVA7MnM2Zf8/ROkA9XFKIPUzFg/WfRVlSK5dKdv9QUrQuUd9mKTMXJiZb6IEq/OBt1PaCh5O6Ty1e2dYomoYqRdRAuLXUo7/J1AGIm/jkba/p7TL3ZEyvNV0ygMElvqYzdGeJ1Wb3tdTztVTKOfyntW8ZTpGszyL/EpKT/L69Icp++omyj55a5XPzzQWcRHl+rE35Zg9inINenHtxy2Z+RbK9Wgdyh2HEyk/vr5BbdD5LA9deDnlmnIUpTyfT2k+U7c38O1ax05NUdMWLrRjmKSJKSKeSQmcb6WMqzohvpQi4grgocycFINVx+Knyjw/My8dIflyKaonDWXmywadl15FxL6UWuEnt1D723dRnmz0a2BWZv5m0PlRu6zhlDRhZebVlJqwAL4dIzwRZzxExLMpNVFLvA0tDciZlJrMtw84H6P1HuCbBpvLB9twSprQqh7cfWkruiwiYialc9Y7KD3Vu91OlwamGtpoNI9UnRAyc69B50HjxxpOSRqdPSm3pVcFXpOPfiSlJGkpbMMpSZKkVnlLvQdz5swZArYF/o8lP8FCkiRpIliRMsLDFbNmzRoeRAYMOHuzLYuHnJEkSZoMmo86HjcGnL35P4CnP/3prLxye51m586dy8yZM1tb/mRhORSWQ2E5FJZDYTkUlsNilkVRL4eHHnqIG264Aar4ZRAMOHvzCMDKK6/M0NDQSGmXSdvLnywsh8JyKCyHwnIoLIfCcljMsii6lMPAmgHaS12SJEmtMuCUJElSqww4JUmS1CoDTkmSJLXKgFOSJEmtMuCUJElSqww4JUmS1CoDTkmSJLXKgFOSJEmt8klDE9jqa63H7Xc9MOhs9MVqQyuxxuPbewyoJEmauAw4J7BHFk7j/CvmDTobfbHjthsbcEqStJzylrokSZJaZcApSZKkVhlwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVhlwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVhlwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVhlwSpIkqVUGnJIkSWrVSoNacUTsBbwWmAWsA9wEfBb4fGYuqNKcBryhy8f3ysxvNpZ3BHAosBFwDXBkZp7fSLMGcBKwJ7AKcCFwWGbe0rcNkyRJ0qMMsobzcGAYeCfwcuA7wKeAjzTS/R54fuPvgnqCKtg8ATgF2A24ETgnIrZqLOvrwO7AYcA+wAzg/IhYrV8bJUmSpEcbWA0n8IrMvKP2/sKIWB14S0QcnZnD1fQHM/PSJS0kIoaAo4GTM3N2Ne0i4GrgvcDe1bTnUoLR3TLz3Gra1ZSa1QOAU/u5cZIkSSoGVsPZCDY7fkO51b3OGBa1HbAWcGZt2Y8A3wB2iYhp1eRdgXuB82rp5gEXV/MkSZLUgkHWcHbzQuAu4E+1aZtGxD3A44G5wImZeVZt/hbV63WNZV0DrA48EbitSnd9p31oI93Ofcm9JEmSHmPCBJwRsQ1wIPCBqoYSSo3nFZSgcC3gIODMiFg1M0+r0kwHhjPzwcYi765e16EEnNOBe7qs+m7GVqO6yNy5c3v52Kituub6zJ8/v9V1jJc771yN227uVqk9OnPmzOljbiYvy6GwHArLobAcCsthMcuimEjlMCECzojYCPgWcDm1TkOZ+clG0u9GxAXAB4DTxi2DSzBz5kyGhoZaW/61N85jxowZrS1/PK277npsuNnGPX12zpw5zJo1q885mnwsh8JyKCyHwnIoLIfFLIuiXg7Dw8OtV5KNZODjcEbEWsAPgQeA3TPz4RE+cjawcUSsX72/GxiKiFUa6aZXr3fV0q3dZXnTa2kkSZLUZwMNOKsg8XvABsDLMvPOHhbTabu5RWP6lsD9wB9q6aLWiaie7voe1itJkqRRGFjAGRErUXqSPwvYJTNvHcVnplGGObq11sv9Ekrv831q6Vas0p2XmQuryedSajh3rqV7ErB9NU+SJEktGGQbzlOAVwDvAlaLiOfV5l1LudV9OmWw9t9RgsWDgB2A13cSZuZwRBwPnBARdwBXVuk2BfarpbssIs4BvhQRhwP3AccB85gA7UElSZKmqkEGnJ2axo92mfci4LeUmsujKbfcH6YEk7tn5vfriTNzdkQAvBXYkNKrfbfMvKqx3H2B2ZRB3ocoj7bcKzMf6McGSZIk6bEGFnBm5lNGkeyVY1jebEowubQ09wOHVH+SJEkaBwPvpS5JkqSpzYBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrTLglCRJUqsMOCVJktQqA05JkiS1yoBTkiRJrVppUCuOiL2A1wKzgHWAm4DPAp/PzAW1dLsAHwK2BP4AnJyZn+6yvCOAQ4GNgGuAIzPz/EaaNYCTgD2BVYALgcMy85Z+b58kSZKKQdZwHg4MA+8EXg58B/gU8JFOgoh4PvA94DfALsBXgJMj4s31BVXB5gnAKcBuwI3AORGxVWOdXwd2Bw4D9gFmAOdHxGp93jZJkiRVBlbDCbwiM++ovb8wIlYH3hIRR2fmMHAMcGVmvqmWZmPg/RHxhcxcEBFDwNGUms/ZABFxEXA18F5g72racynB6G6ZeW417WpKzeoBwKktb68kSdJyaWA1nI1gs+M3lFvd61SB5IuBsxppvka5bb519X47YC3gzNqyHwG+AewSEdOqybsC9wLn1dLNAy6u5kmSJKkFE63T0AuBu4A/AZsCKwPXNtJcU71uXr1uUb1e1yXd6sATa+mur7cPraXbHEmSJLViwgScEbENcCDwiaqGcno1655G0rur13Wq1+nAcGY+OIp0zWV10q3TZbokSZL6YJBtOBeJiI2AbwGXU+s0NNHNnTu31eWvuub6zJ8/v9V1jJc771yN227u1opidObMmdPH3ExelkNhORSWQ2E5FJbDYpZFMZHKYeABZ0SsBfwQeADYPTMfrmZ1aijXbnykU/N5Vy3dUESskpl/GyHdxl2yML2WZkxmzpzJ0NBQLx8dlWtvnMeMGTNaW/54Wnfd9dhws27FP7I5c+Ywa9asPudo8rEcCsuhsBwKy6GwHBazLIp6OQwPD7deSTaSgd5Sj4hVKMMebQC8LDPvrM2+CXiIxW00O7asXq+vXjttN7ulu58ydmcnXdQ6EdXTXY8kSZJaMbCAMyJWovQkfxawS2beWp9fDYt0AdWwRjX7An8ErqzeX0Lpfb5PbdkrVp87LzMXVpPPpdSW7lxL9yRg+2qeJEmSWjDIW+qnAK8A3gWsFhHPq827NjPvA44Dfh4RXwTOAF4AHAwc2ultnpnDEXE8cEJE3EEJRA+i9HLfr7PAzLwsIs4BvhQRhwOd5c8DTmt1SyVJkpZjgww4OzWNH+0y70XAzzLzVxHxSspThPYH5gPvyMzP1RNn5uyIAHgrsCFlqKPdMvOqxnL3BWZTBnkfojzacq/MfKA/myRJkqSmgQWcmfmUUaY7l1Hc8q6eMjR7hDT3A4dUf5IkSRoHE2YcTkmSJE1NBpySJElqlQGnJEmSWmXAKUmSpFYZcEqSJKlVBpySJElq1ZgDzojYucvjISVJkqSueqnh/CFwW0ScFBFb9TtDkiRJmlp6CTj3AC4GDgWujIjfRsQRETGjrzmTJEnSlDDmgDMzv5eZe1MeIXkwcAdwInBrRPw4Il4XEav1OZ+SJEmapHruNJSZ92fmlzNzR+DJwFHABsDpwO0R8dWI2LFP+ZQkSdIk1a9e6isCjwOGgGnAg8BLgJ9ExG8iYmaf1iNJkqRJZqVePxgRawF7A68DXgD8HTgHeHf1ugDYHfgE8BVg22XNrCRJkiafMQecEbEHJcjcFVgFuAJ4G/D1zLyrkfw7EbEecOoy5lOSJEmTVC81nP8N/AH4JHB6Zl4/QvrfAmf0sB5JkiRNAb0EnC8Fzs/MhaNJnJmXA5f3sB5JkiRNAWMOODPzp21kRJIkSVNTL4+2/ERE3LiU+TdExEnLli1JkiRNFb0Mi7QbcNZS5p8FvKK37EiSJGmq6SXgfBJwy1Lm31qlkSRJknoKOO8DnrqU+ZtQBn6XJEmSego4LwAOiYiNmzMi4inAIVUaSZIkqadhkY4BdgHmRsRXgGuq6TOBA4BHgPf1JXeSJEma9HoZFunGiHgBcApwWGP2RcBhmZn9yJwkSZImv56epZ6Z1wA7VI+t3KSafFNm3tm3nEmSJGlK6Cng7MjMPwN/7lNeJEmSNAX1FHBGxIrAzpTazenAtEaShZn5wWXMmyRJkqaAMQecEbEN8C3gH3hsoNmxEDDglCRJUk81nKcCqwJ7AL/IzHv6mSFJkiRNLb0EnM8C3puZ3+93ZiRJkjT19DLw+20s+Va6JEmS9Ci9BJwnAgdHxJr9zowkSZKmnl5uqa8D/BX4XUR8E/hfytOF6hZm5knLmjlJkiRNfr0EnCfW/n/zEtIsBAw4JUmS1FPA+dR+rTwingYcATyP8iz26zNzZiPNacAbunx8r8z8ZiPtEcChwEaUZ7wfmZnnN9KsQQmG9wRWAS6kPI7zlj5skiRJkhp6eZb6rX1c/zOA3YDLKO1Jl9Sm9PfAaxvTbqi/qYLNE4CjgCuBg4FzIuK5mXlVLenXga0pz4G/DzgOOD8inpmZDyzb5kiSJKmp50dbRsRmwA7ABsAZmXlLRKxMqV38Y2Y+NIrFfD8zv1st7zRgmyWkezAzL11KXoaAo4GTM3N2Ne0i4GrgvcDe1bTnUgLc3TLz3Gra1cBNwAGUMUYlSZLUR2PupR4RK0TEF4Drgc9Tagg3qWavTAnyDhvNsjJzwVjXvwTbAWsBZ9aW/QjwDWCXiOgM47QrcC9wXi3dPODiap4kSZL6rJdhkY4C3gi8D3g+tTE5M/MvlMdevqovuVts04i4JyIejojfRMQ+jflbVK/XNaZfA6wOPLGW7vouge41wOZ9zbEkSZKA3gLOA4EvZ+YJwO+6zL8a2GyZcvVov6F0LNqD0tHnNuDMiDiglmY6MJyZDzY+e3f1uk4t3T1d1nF3LY0kSZL6qJc2nP8AXL6U+Q8Ca/SWncfKzE82Jn03Ii4APgCc1q/19GLu3LmtLn/VNddn/vz5ra5jvNx552rcdvMdPX9+zpw5fczN5GU5FJZDYTkUlkNhOSxmWRQTqRx6CTj/CDx5KfNnAf3syd7N2cCpEbF+Zt5BqaEciohVMvNvtXTTq9e7qte7gY27LG96Lc2ozZw5k6GhobF+bNSuvXEeM2bMaG3542nddddjw826Ff3I5syZw6xZs/qco8nHcigsh8JyKCyHwnJYzLIo6uUwPDzceiXZSHq5pf4t4N+qXuodCwEiYhdgf0pnnfHUabu5RWP6lsD9wB9q6aLWiaie7vr2sidJkrT86iXgPBaYR2lbeQYl2DwqIi4FfgBcBXy4XxlsqoLFvYFbq9pNgEsovc/3qaVbsUp3XmYurCafC6wN7FxL9yRg+2qeJEmS+qyXgd/vi4jtgP8A9gL+RgnYbqIEoyc1bmsvUUSsxuLhiJ4MrBkRe1bvr6heT6cM1v47SrB4EGX8z9fX8jQcEccDJ0TEHZSB3w8CNgX2q6W7LCLOAb4UEYezeOD3eQy4PagkSdJU1dPA71VAeUL1tyw2oLTHrOu8PxD4HqXm8ugq7cOUYHL3zPx+I0+zIwLgrcCGlKGOdms8ZQhgX2A2ZZD3IcqjLffyKUOSJEnt6PlJQ/1QPb+82Z6y6ZVjWN5sSjC5tDT3A4dUf5IkSWrZmAPOiPjyKJItzMw39ZAfSZIkTTG91HC+mKpXes2KwBOq1zuAvy5jviRJkjRF9NJp6CndpkfE4yi3qd8O7LRMuZIkSdKU0cuwSF1l5sOZ+Rngx8Bn+rVcSZIkTW59CzhrrgL+qYXlSpIkaRJqI+DcCXCIIUmSJAG99VI/Zgmz1qbUbG4NnLgMeZIkSdIU0ksv9WOXMP1uytOG3gx8sdcMSZIkaWrppZd6G7fhJUmSNEUZPEqSJKlVvbTh3LiXFWXmvF4+J0mSpMmtlzact/DYJw2Nxoo9fEaSJEmTXC8B50HAW4EnAV8DbqimB7AvMA/4FLCgHxmUJEnS5NZLwPkEYAh4WmbeXZ8REe8HLgY2yswP9yF/kiRJmuR66TT0ZuALzWATIDPvpAyJ9G/LmjFJkiRNDb0EnOsCqy9l/uOrNJIkSVJPAeelwNsiYlZzRkRsA7wNuGxZMyZJkqSpoZc2nG8BfgZcHhFXADdW0zcDtgXuAg7rS+4kSZI06Y25hjMzrwWeSemJvjawZ/W3NvBJ4JmZeU3/sihJkqTJrJcaTjLzduAd1Z8kSZK0RD0FnB0RsRmwATA3M+/tT5YkSZI0lfQUcEbEfsCJwBOrSTsBF0TEesAlwNGZ+Y3+ZFFTwYIFC7n9rgd6+uyqa67f82fbsNrQSqzx+JUHnQ1JkiaNXp6l/mrgv4CfACcDszvzMvPPEXEdsD9gwKlFhh9+hEt+O7+nz86fP58ZMyZOwLnjthsbcEqSNAa9DIv0XuCnmbkzcHqX+ZcBWy1TriRJkjRl9BJwbgF8eynz/wSs31t2JEmSNNX0EnD+laU/aWhT4M+9ZUeSJElTTS8B5wXAARHxmEZsETEDOBj40bJmTJIkSVNDr204nwD8Gvh3YCGwa0ScCFwNLAA+0LccSpIkaVLr5UlDNwIvAP4IHAtMA/4DeBfwP8D2mTmvf1mUJEnSZDamYZEiYkXK2Ju3Z+ZLI2I68DRK4Pr7zLyjhTxKkiRpEhvrOJwrADcBRwIfz8y7gSv6nitJkiRNGWO6pZ6ZDwPzKe02JUmSpBH10mnoK5Re6qv0OzOSJEmaenp5lvoNwIrA9RFxOvB74MFmIp+lLkmSJOgt4Pyv2v/vW0KahfgsdUmSJDHKgDMiPgWcnplzgBdVk1en1Gw+0uvKI+JpwBHA84CZwPWZObNLul2ADwFbAn8ATs7MT3dJdwRwKLARcA1wZGae30izBnASsCewCnAhcFhm3tLrdkiSJGnJRlvD+RbgUmBOZl4UEetSnpm+U2ZetAzrfwawG3AZpT3pY9qURsTzge8BXwUOp4wBenJEPJyZn6ulOwI4ATgKuJLyxKNzIuK5mXlVbZFfB7YGDgPuA44Dzo+IZ2bmA8uwLZIkSeqil1vqHdP6sP7vZ+Z3ASLiNGCbLmmOAa7MzDdV7y+MiI2B90fEFzJzQUQMAUdTaj5nV8u7iPLko/cCe1fTnksJcHfLzHOraVdThno6ADi1D9skSZKkml56qfdNZi5Y2vwqkHwxcFZj1tcot823rt5vB6wFnFlb9iOUdqS7REQnON4VuBc4r5ZuHnBxNU+SJEl9NtCAcxQ2BVYGrm1Mv6Z63bx63aJ6va5LutUpT0fqpLu+S6B7TW1ZkiRJ6qOx3FLfJCL+sfp/rep184j4S7fEmXn5MuWsmF693tOYfnf1uk4t3XBmNodnqqe7rUrXXFYn3Tpdpi/V3Llzx/qRMVl1zfWZP39+q+sYL8OxzjJty0QqhzvvXI3bbh7MU1znzJkzkPVONJZDYTkUlkNhOSxmWRQTqRzGEnB+oPqre0xPcUrbzoWUsTqntJkzZzI0NNTa8q+9cR4zZsxobfnjaWholZ63Zf78+ROqHNZddz023GzjcV/vnDlzmDVr1rivd6KxHArLobAcCsthMcuiqJfD8PBw65VkIxltwHlgq7lYsk4N5dqN6Z2az7tq6YYiYpXM/NsI6bpFCtNraSRJktRHowo4M/P0tjOyBDcBD1HaXp5Xm75l9Xp99dppu7kF8JtGuvspY3d20u0UEdMyc2Ej3fVIkiSp7yZ0p6HMHAYuoBrWqGZf4I+U8TYBLqH0Pt+nkyAiVqw+d14tuDyXUlu6cy3dk4Dtq3mSJEnqs2UZh3OZRcRqLB6O6MnAmhGxZ/X+isy8lTIw+88j4ovAGZSB3w8GDu30Ns/M4Yg4HjghIu6gBKIHUXq579dZX2ZeFhHnAF+KiMNZPPD7POC0VjdWkiRpOTXQgBPYADi7Ma3z/kDgtMz8VUS8kvIUof2B+cA76k8ZAsjM2REB8FZgQ8pQR7s1njIEpXZ0NmWQ9yHKoy338ilDkiRJ7RhowFk9v3zEJxZVTwUa8ZZ39ZSh2SOkuR84pPqTJElSyyZ0G05JkiRNfgackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVQackiRJapUBpyRJklplwClJkqRWGXBKkiSpVSsNOgPSZLNgwUJuv+uBcV/vqmuu39f1rja0Ems8fuW+LU+SpCWZ8AFnRBwAfKXLrFMy8y21dLsAHwK2BP4AnJyZn+6yvCOAQ4GNgGuAIzPz/Bayrilq+OFHuOS388d9vfPnz2fGjP4FnDtuu7EBpyRpXEymW+ovA55f+5vdmRERzwe+B/wG2IUSoJ4cEW+uL6AKNk8ATgF2A24EzomIrcZjAyRJkpZHE76Gs2ZOZv55CfOOAa7MzDdV7y+MiI2B90fEFzJzQUQMAUdTaj5nA0TERcDVwHuBvVvOvyRJ0nJpMtVwdlUFki8GzmrM+hrltvnW1fvtgLWAMzsJMvMR4BvALhExrf3cSpIkLX8mUw3n3IhYH5gHnAZ8KDP/DmwKrAxc20h/TfW6OfBrYIvq/XVd0q0OPBG4rf/ZliRJWr5NhoDz/4D3A5cDj1DaaL4PeCpwADC9SndP43N3V6/rVK/TgeHMfHAp6cYUcM6dO3csycds1TXXZ/788e+c0obhWGeZtmUilcOybsuy6Od677xzNW67+Y6+LW88zZkzZ9BZmBAsh8JyKCyHxSyLYiKVw4QPODPzR8CPapN+EhH3AsdGxAcHlC0AZs6cydDQUGvLv/bGecyYMaO15Y+noaFVet6W0jt74pTDsmzLsuh3Oay77npsuNnGfVveeJkzZw6zZs0adDYGznIoLIfCcljMsijq5TA8PNx6JdlIJmsbzm9Ur1uzuIZy7UaaTs3nXdXr3cBQRKwyQjpJkiT10WQNOOtuAh5icRvNji2r1+ur107bzW7p7qeM3SlJkqQ+m6wB52uAhZShkoaBC3jssEb7An8ErqzeXwLcC+zTSRARK1afOy8zF7adaUmSpOXRhG/DGRE/ogSUc4EFlE5D/w58KTN/XyU7Dvh5RHwROAN4AXAwcGhmLgDIzOGIOB44ISLuoASiB1F6ue83jpskSZK0XJnwASflVvgbgX+g5PdG4Ejg5E6CzPxVRLyS8hSh/YH5wDsy83P1BWXm7IgAeCuwIWVIpN0y86r2N0OSJGn5NOEDzsx8O/D2UaQ7Fzh3FOlmU3sspiRJkto1WdtwSpIkaZIw4JQkSVKrDDglSZLUKgNOSZIktcqAU5IkSa0y4JQkSVKrDDglSZLUKgNOSZIktcqAU5IkSa0y4JQkSVKrDDglSZLUKgNOSZIktcqAU5IkSa0y4JQkSVKrDDglSZLUKgNOSZIktWqlQWdA0mAsWLCQ2+96YNDZGLNV11z/MflebWgl1nj8ygPKkSRpJAac0nJq+OFHuOS38wedjTGbP38+M2Y8OuDccduNDTglaQLzlrokSZJaZcApSZKkVhlwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVhlwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVhlwSpIkqVU+S13SpLdgwUJuv+uBkRNOAqsNreRz4SVNOQackia94Ycf4ZLfzh90Nvpix203NuCUNOV4S12SJEmtMuCUJElSq7ylLkkTyGjbo6665voTut2qbVEl1S13AWdEbAZ8GtgeeBA4EzgyMyfulVvScmO07VHnz5/PjBkT97JlW1RJdctVwBkRawMXArcCewIbAB8H1gdeM7icSZIkTV3LVcAJHAJMB56dmX8GiIi/A2dExAcz85qB5k6SpojxGqpqPJoW2DxAWnbLW8C5K3B+J9isfAv4MrALYMApSX0wXkNVjUfTApsHSMtueQs4t6AEl4tk5nBE3ARsPoblrAjw0EMP9TFrj7VgwSOstMKCVtcxXh75+8M9b8sqj5s2ocphWbZlWfS7HAa1HcuqWzlM1m3pZrTbMtHOi6bx2ifjUQ5/f/ghhodXbHUd/TA8PDzoLEwYlkXRKYdavDKwA3nawoULB7XucRcRDwPvy8wTG9N/CfwpM181muXMmTNne+AXLWRRkiSpLS+cNWvWLwex4uWthrNfrgBeCPwf8MiA8yJJkrQ0KwJPoMQvA7G8BZx3A2t3mT4duH60C5k1a9YwMJBfCJIkST24aZArX96eNHQdpR3nIhExBGzKGAJOSZIkjd7yFnCeC+wYEevWpv0LMFTNkyRJUp8tb52G1gbmArcAH2TxwO/nZ6YDv0uSJLVguarhzMx7gBcDfwH+G/gEcBbwxgFmS5IkaUpbrmo4JUmSNP6WqxpOSZIkjT8DTkmSJLXKgFOSJEmtWt4Gfp/wImIz4NPA9sCDwJnAkZn5wEAzNoKI2At4LTALWIcywOxngc9n5oJaul2ADwFbAn8ATs7MT3dZ3hHAocBGwDWUMji/kWYN4CRgT2AV4ELgsMy8pZFuYGUaEatTxnh9IrBtZv66Nm9/4CjgKZTyOi4zz2p8/nHAccAbKA8tuAJ4W2b+TyPdRsAngZcBC4EfAG/PzD830v0jZWSGWcBdwH9W623liVkR8Xrg7ZT9/QBwJbBvJ1/Lw/EQEXtQ9vMWwF+Bi4F3Z+aNjXRT5niIiKcBRwDPA2YC12fmzC7pJuz+H23elqUcImJF4HBgt2o9KwFXAx9obt9ULocu6WcBlwMPZubqjXkDOQdGc36OZAznxSrAu4HXA/8A/Bk4NzMPbqSbVMeDNZwTSDVs04XAGpSD43BgX+DLA8zWaB0ODAPvBF4OfAf4FPCRToKIeD7wPeA3wC7AV4CTI+LN9QVVJ9EJwCmUC/GNwDkRsVVjnV8HdgcOA/YBZgDnR8RqtWWtzWDL9Fi6/LCLiD2B04FvU8rip8DXq5O57hOUC8r7gVcCD1G2cUZtWSsB5wHPBPYHDgK2A74XEdNq6Tap1nMXZR+dQNlfH+rDdj5GRLyX8qPjvynb+CbKRXGomj/lj4eI2JGy/dcDr6rytjnw04hYs5Zuqh0Pz6Dsq98B13ZLMJH3/2jzNgojlcOqlCDmf4ADgddQvsR/EhEvb+RpKpdDfZ0rUK4bdywhybifA2M4P0cymvNiBcr35/5Vfl4KvIsyuk493aQ7HqzhnFgOoTxm89m1GqC/A2dExAcz85qB5m7pXpGZ9QvEhVXt3lsi4ujMHAaOAa7MzDfV0mwMvD8ivpCZC6onPx1N+dU0GyAiLqL86n8vsHc17bmUk2y3zDy3mnY15ZfnAcCp1ToGVqYRMRN4M/AfwOcbsz8InJ2Z76neXxgRWwAfAH5Yff6J1effmplfrKZdCtxMqTV8V/XZVwNbATM72xMR8yk1abuw+KEG7wTuAfaq9sf5EbEWcExEfDQz7+rjtgcl2P6XzPxBbdZ3av8vD8fDvsCtwBsyc2G1vluBy4AXUO1rpt7x8P3M/G617tOAbbqkmcj7f8S89akcHgSempl3dyZExI+Bp1O+9H9QTZvq5VB3MLAWJdh5a33GAM+BEc/PPpbDgcDzgS0z8w+16WfUymFSHg/WcE4su1IGoa9X+X+LUnM41l9S46oRbHb8hlKFv051gryYMu5p3dcotwO2rt5vR7nYnFlb9iPAN4Bdar9OdwXupfyK7aSbR7mg7Fpb/iDL9BTgM8AN9YkR8VRKLdeZjfRfA7aNiPWr9y8FVqRWZpl5P+VLqLmNV9eDpcy8hBLoNNN9p7qw1tfZ2Tf9dCBwayPYXGQ5Oh4eB9zfCTYr91Sv02BqHg8jffFM5P0/hryNaKRyyMxH6sFmNW0hpcZzRm3ylC6HjohYj1Jr9zZKzWXTuJ8DYzg/RzTKcjiYEtz+YSlpJuXxYMA5sWxBo5q9OhFuohzwk80LKbcq/kR5Xv3KPPY2QueC0Nm+zrPur+uSbnVKW8hOuuu7nMDX8OiyGkiZRmm7+DTg+C6zO9u4pLKIWrrbM/POLumeXt166aTrdntmUVlExOOBjZvpqnY8D9D/snge8NuIODoi/hgRD0fE5RHxz9X85eV4OA3YIiIOi4i1I+IpwGzK9nTaWi0Px0PTRN7/o81bK6r9uB2P3ublpRw+AvwyM89bwvxBnAOjPT+XWZT2qVsDt0TE6RHxl4j4a0R8p6pJ7JiUx4MB58QyncW1H3V3UzriTBoRsQ2llusT1S+v6dWsexpJO7/uO9s3HRjOzAdHka65rE66elmNe5lWt2ROAt6VmX/pkmQsZdFM00n3OMqFZaR0nWWtvYR1NtP1y0bATpRj4K3AK4D7gPOqoGu5OB4y80JK280PVeu4GXgqsFOtVmV5OB6aJvL+H23e2nIYJYj5WG3alC+Hqn3gvsA7lpJsEOfAeJbDupTtOJJyDX01pe37VsC5UdqmdvI06Y4HA071XZTegd+i9DL8yAjJp6LjgRsz84wRU05dK1Au/q/OzG9UNRa7U4LOdw40Z+MoIrYDvgp8iXI7ai9gAaXzwqqDzJsmnuoOwEeB2Zn5i0HnZ7xE6a1/KvDxzPz9oPMzQJ2Y7C/AHpn5o8w8k3LdeAbwLwPLWR8YcE4sd7P4V1fddMqt6Qmvqt37IeWWxO6Z+XA1q/MraO3GRzq/mu6qpRuKMizESOmay+qkq5fVuJZpRDyD0qj9fdUt1LVZ/Kt79ShDVIylLJppOukeZnGvxdFs4z1LWGczXb/cDdyZtaFKsgyvcSllOJDl4nigjNRwYWa+IzMvzMxvUhrxP4cy5EknT3TJ11Q6Hpom8v4fbd76KiKeBXyX0rHuyMbsqV4OBwNPAE6tXTdXgdKDuvbjbBDnwHiWwz2UIZwurtdeZhlO7z7KtbOTp0l3PBhwTizXsbhtBrCooe6mlGFVJrTq4P8esAHwskY7m5sojcC3aHxsy+q1s32dNind0t1PGTKkky5qjaPr6eplNd5luhll9IcLKSfo3cD3q3kXAr9g6dsIkNXrdcAGEdG8VbElcEOtXc5jtrGW7nqAzPwrMK+ZLiKeDKxG/8tiab29V2H5OR62pHQAWSQzb6OMq7dpLU8088XUOh6aJvL+H23e+iYiNgV+RBmn9vWNTmYw9cthc2BDynZ0rptHAo+v/v9wLd/jfQ6M9vxcZtWP8luWMHshVRA+Qp4m7PFgwDmxnAvsGBHr1qb9C6XH3LndPzIxVG1LvgE8C9glM2+tz6/aq11ANVxDzb7AHykXWoBLKL3q9qkte8Xqc+fVLsTnUn5p7VxL9yTKoLX1shrvMv0l8KLGX6dN0puBgzLzZsrJuU/js/sCV9R6/P+Ycvt1UZlFGWrqFTx2G59ZDdPRSfc8ygDFzXR7RMTKjXUOs7gDS7/8AFg3Ihb1XKwa6T8fmLMcHQ+3UgaUXqT6QluP6otlOTkeHmUi7/8x5K0vqiZIP66WvUdmduudPdXL4TM89rp5OvC36v/PVOnG/RwYw/nZLz8Atq83uYkyOP1awJxq0qQ8HqYtXNj8IaVBqW4jzKV8EX2QUlP4ccpwBa8ZXM5GFhGfB/6VMg5as+3RtZl5X9Uo/OeUnrtnUMYhPA44NDM/V1tWZ0Db91AO4oMojaefm5lX1dL9gHJr8nDK7YbjKNX7z6x+KU6IMo2IHSi1m4ueNBTlyUxnUX65/4QygPHbKOOl/bD22c9Qbr0eTglejqCM3fbMzJxfpVkJ+DWlsfl7KDWsJwG3Ay/IxeM/bkKpbbuA8kSJqNJ9OjPf3edtXgH4FbA+ZVy4+6tt2JYy1tvvlofjISLeQinrz1Bul65LGT9vfeAZnbsAU+14iDKodGfYlUMpNSX/Ub2/IjNvncj7f7R5W9ZyoIzg8atq+uso+2iRzLx0eSiHZgVF9ZljgSPysU8aGvdzYLTnZz/KoQoIr6Ls409QAsYTKPty604ztcl4PFjDOYFk5j2UjgV/oTyd5BOUg/yNA8zWaHV+QX2UcgGt/20NkJm/opyo21JuHx0EvKN5oGYZyPYoSu/mH1Jut+xWP4kq+1J+DZ4KnE35lfWSrD2Ka6KWaWaeTenBvSelLHYG9uty8XoH5akbx1OaK6xK2cb5tWX9nfL4trnAf1Ge/nAppQ3twlq63wMvoQQ751ACn49RAsJ+b98CSlvFn7N4/wDskJm/q9IsD8fDKZRBlV9IaZt3MuUpIy+qNzmZgsfDBpR9cDawA/Ck2vsXVeufsPt/tHnrQzlsSOmBvDrl+GheO5eXchiLcT8HxnB+jmQ058X/Vv9Pq6Z/hlKJ85Jc3CdiUh4P1nBKkiSpVdZwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVhlwSpIkqVUGnJIkSWqVAackSZJaZcApSZKkVv1/xVCuu0pwNnUAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoIAAAFKCAYAAACJoz5RAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAAw2ElEQVR4nO3debxtc/348deNXMksEpUy9Ea3+HWpDPVrki6l4WuIypeifJMkU5Mh5Kf4lpKiCRWRVF8iDcZKGU4D1/CmK3y5kXmI7sW9vz8+a3eWbZ9pn2Gfe9br+Xicx7p7rc9a+7M++3PPfp/PtKYtXLgQSZIkNc8zep0BSZIk9YaBoCRJUkMZCEqSJDWUgaAkSVJDGQhKkiQ1lIGgJElSQxkISuMgIg6NCNdm0tNExOsiYmFEvK7XeZmsIuLiiLi41/noJCL2jYibI2LxXudlKBGxdUQ8EhEr9zovmrwmfUWWJosRBHa7jmtGRiEipgHvA3YDNgCeCcwBzgC+mJmP9jB7wxYRzwb2AbYH1gQeB+4Afgd8KTNv6GH2JoWIWAPYH9gSeD7wJHA98FPg+Mx8oGeZG0cR8WHg0cw8eRyuvQzwSeCgzHxirK8/1jLz3IiYQ8nzx3udH01OBoLS8L2v7fUHgVcD72/bfxnwfeCoicjUcEXEYsBplODpN8DBwGPAa4HPAttHxJsy8x+9y+XQIuKZwCXADOB7wNeAZwHrAlsDvwcaHQhGxJbAWZTg73vA1ZTf9xtRgoL/C7y5ZxkcXx8G7gFOHodrv59S1747DtceLycCR0fEoZn5UK8zo8nHQFAapsz8fv11RLwJeGX7/prJ1mJwACUIPCYz96/t/0ZE/BD4CXASJZiaMFUr5ZKZ+dgwT3kHMBPYtb3Vp+quW25MM7iIiYgXAWdSWkjfkJl3tB3/FKVFWCP3fuC8zPxnrzMyAj8CvkL5v/+tHudFk5CBoDQOIuJQ4JDMnFbbdwulpeoo4BjgpZRu2Y9m5oUR8Q7gMOAlwHXA7pnZ13bdlwBHAG8Enk3p6vtcZv5oiPw8i9JNeCOlRegpMvPsiDgF2DUiXpmZV9TyfHFm7tJ2vYur815X2zcd+ATwXuCFlFaZHwKfrnc5V13sJwIXAZ8BAvhgROwOLJOZL++Q/z8Cj2fmq4C1qt2/6XAfTwD31s5bgxIAvwFYA5gP/Bb4ZGZeU0v3uio/7wHWobT2Lgf8EvgA8E/K5/YeSrmfBexRD17b7usQSpd1Agdm5vntee1wj0N+tlWg+wlgZ+AFlBbdm4DPZ+aPq2QHAMsAW7UHgVUZ3Vm9T/299wA+AqwNPAD8T1VG99XSXAysCmwHfBXYGLizSndGRGxOqdcbALdR6vUvaucfWpXLSymf+9bAAkod2TczHxmifKZVefwg5TN6CDiHUr73VGluoXzO9aEct2bmi6p9w6qjA7z/i4GXA8e37X8R8Dc6/2GyEPhsZh5avV4aOBT4D2C16h6uBQ7OzEtr521MaaXfDFgC6KN0R1/Udv3nVdfbGlgZ+DvwK+DjmfkwQGb+IyKuBt6JgaA6cLKINLHWBH4AnEv5QloeODsidqL81X4apct2TeDMqjsXgIhYD7gceBnwBWBfStBzZkS8d4j33RxYAThtkLFNre6ut430pqov6Z9QgpBzgb0oX7AfBn5aHa97LeUL9Szgo5QA+RTgZRHxlECwuu//U8vfLdV25w7Xbbdx9V4/AvYGvgS8Arik+hJtdwAwixL0fZvS+vgN4JvA+pQv57MogdjTAmrKF/fXq3v/NLAkcE4VJA1oBJ/tIZQ/Fi6hlNthlLJ7ZS3NNsDfMvO3g71n7b0/U+X5LsofC6dTWr4urAKnuuUon++VlLJ6FDg1It5NKePzKfV6qSrvnVpnT6fUxU9V53yQUl5D+TrwRUo57U35XLYFLoqIJas0HwNup5TJ+6qfj1X3OdI62m7TanvVMPI62D3sVeXjw8DngbspwTNVPv8v5Y+cFSmf74HAdOCX9QlGEbEqcAWlLp5VXfdkSl1Yqe19+4BNhnGPaiBbBKWJtQ7w2sz8DUBEXA/8AvgOsF5m/q3a/wClden1wK+rc78MzAU2qrVEHR8RvwSOiohTM3OgCS3rV9u/DJK31rH1B0kzkB2BtwCvz8xLWjsj4irKeMktKK1rLesCr8jMP9fS3kC5x/dSvqxb3keZDHJG9fqnlC/6gyktmBdTWvnO7dACdm57a2lEfI/S4voB2lrGKK0vr8zM+VXalYF3U1pZ3lKV79ciYh1KsHRw2/kzgE0z8/fV+SdTWuyOogTjAxnuZ/tWStfk7p0uEhHLAqtTWvSGVN3fQcAFwJaZ+WS1/8+UYQK7U1r/WlYFds7M71XpfkX5LE4DXpOZv6v2t+r1djy9FeoOSmvlwirt34GDqvGpv6aDiNgU+BDwn5n53dr+8ylB087ANzLzpxFxBHBPhyEbI62j7dattjcPkmYobwW+mZkdJ25UgdqJlPq8Ra2MTgD+BBxJf0B6FKVVcdPMvLx2mUM7BHw3U4Lv51HqmfRvtghKE+vGVhBYaf0Cv7gVBLbtXxMgIlYE3kRpwXh2RDyn9UNphVmd0qU8kGWq7cODpGkdW2aQNAPZntLtfG1b3i4BFlIC2rrL6kEgQDWL9Wxgp4h4Bvz7i3En4Oet7r/M/BclqDoGaM2CPhH434j4fhUMta5Z77pdKiJWonTHJWWcYbvvtYLAyuXVe5zUFmRfDqzWocXsqlYQWL3/vZQgabOIWKHD+430s30QeGnVjdxJ694H+5zr3kQJfr/cCgIr36O0ELaPF30MOLV2f0npSr6xFQRWnlJ/23y1rSy/Um3fOkg+twceAc5vK58bqny216+BrjGSOtpuJUpX9oPDeK+BPAi8KiJWH+D4BpShEqcBK9XyuCzlj5FXVfX4GZSu3p+3BYEAdPiD8P5q+5xR5F1TlC2C0sS6rf4iMx+MCID/bUvX+rJpBQ9rUwKSQ6ufTlahBDidDCfIax3rZtbwSyhfYHcPcHyVttdzBkh3CuUL+/WUVqrXUMZ87VdPVAVY+wP7R8TzKd2/e1PG8C2gtBBRdRkeRmllbO8Kvpenu63tdetz6PT5TKN07d9V239Th2veWG3XoP8LuW4kn+3BlBbRjIjrKK1up2Vmq7uyNSt0uMH8GtX2KfUmM5+MiJuAF7WlvyMzF7Tte5C28qnV607B701tae+JiPs7vFfdS4CleWpZ17XXr4GuMZI62sm06qfbNUL3p9Tx2yLiT5RA/3tVQN3KI5RhCQNZiTLWdVlg9jDft9VC6NqmehoDQWliPTnC/a1f4K3W+y8B5w2QdrAvheuq7cspgUQnrbF59a6vgb44FuOpeX5G9R57D5C+vTtqoBnCv6B82b+XEgi+l9LidM4A6cnM24HTIuJHlIH3746I91djIY+jdOEeR1nW5wFKoHgsnXtEuv18RmPYn21mXhoRa1HGcb6ZEvB+LCI+kZlfyMyHImIuZazheOhF+UApo3sp3fSddAqwO11jJHW03T2U+1mu7f06/h+pj+9tycwzI+I3wNspn99HgQMiYpfMPI3+uvAJyri+Tu5m5DPjWwH5PSM8Tw1gICgtGlrB2RMDjaMawu8oQdBOEfG5tm7Alp2r7Zm1ffdTWr3arcFTA8Y5lK7WCwYZpzikqiXqVGD3iNiHMhngzMycN4xz50fEXygtbM+hzGjdDvhuZn6snrbqph2PL8V1OuxrtfLcOsA5I/psM/N+ysSZ70aZDX4e8NmI+O/qcz0b2CMiNmvrru2klaegv+WSqutxHcq4tLG2Ttt7PYcSqNwyyDlzKGP4/jDU7GIG/uNltHX0+mr7Yp4aCLb+vXxb+jXooJq1fSJwYkQsD/yBMgnpNPpbyh8erC5ExHxK6++MYea9lec7h5leDeIYQWkRUC3yfBElQHra+KIY4hFS1dIYX6B84X+uw/lbA7sAZ9eXVaF8Mb06IpaopX0rZemSujOA5wL/1eHa06M8kWG4TqF0bZ5ICRCesnhvRGzQ6X6rL9VNgPvo7/57krZWqYjYkTLIfjxsFBGb1N5rJcoYx8uqAO5pRvLZVtern/sYZZzckpSFjgGOpoyn+3ZEPO0+I+K51UxhKOPO5gMfbY3LrLyH8nn+bPDb7cpH2iYzfLTanjvIOWdQvq/aJ+cQEYu1jb/8J527pEdbR1tB9Ub1ndUizfdQhifUfbhDPp/SkleNi/0b/UFkH/BX4OOd8tOqC1X3/E+AWRHxqg7p2ltiZ1KCaLuG9TS2CEqLjv+ifBldHRHfpARpqwCvosz0XXuI878AbAgcGBGvBn4MtCZevAe4hhIM1n2L0ip3fpRFp9eidNe2j/H7fpXu+CjLX/yWEoAFZczfdsDFw7nJzLy6atnbnvIl2d6qtQVweEScQ3mKyIOUwHRnSoC3V63F82zKMjMPUbpXNwR2YHQzPwczG/hZRBxHGZf5QUpQ22mpmbrhfrbXR8SllOVb7qFMLtgN+FmrpSwzb46IHSgtu9dVs6RbTxZ5BaV79bIq7T0RcThwOGV5kp9SJnh8hDKLfDzWnVsdOC8iflblf3fgl5n5q4FOqLrEj6eMCX05ZQjBPEq5bEsJEE+ukl8FfDgiDqG0PD6SmecwyjqambdVs6m3oCxdU/ct4BMR8a3q/V/L0ydvLQPcERFnUcr2IcpyQ2+hmpmdmQsi4gOUsYPXRcR3KMvhrEZ5Gsw0+ie1fLLKy8URcSKl2/u5wLsoE0luAYiIVSjDPk4Y6N7UbLYISouIakD5RlTBDWUdvg9TvuAPGsb5T1KCgF2qc46gtLq9j9Iy9Mr2VqtqQeB9KV9qx1Ja3N5K+XKqp1tA+QLanxK4HE3p7no15RFwV4/wdk+ptt/v0IpxFmXpjNUpa6ydSGlVuhl4Z2bWlzvZmzLwfgfKGLyXUr542yd/jJXfUT6THShLfcwD3lFfLLiTEXy2x1KeG3wgJXh4C6Usdmy73nmUbsPTgK0oYySPpgTCn6MEPa20R1AC0ecB/035o+Bk4I3D6ZLvwo6Ubsoj6X/axXaDnlHy+RHKkj8rUu7hKMo4ux8CF9aSHkYZU/pxyv0fV50/FnX0O8BWUZ51XXcYpZ5tS/mDazHKepR1j1I+15dRPtNjKZ/5fpTnZrfu89IqT3+g1IGvUsa53kdZd7CV7u+UPxROp5RpazzsFTx12MN/UFp9z0DqYNrChbYUS00V5bm951CevLH1YK0yEyki9qR8AUZm3jhU+skgqieLZOYevc7LZBT9TxZ5XjVObpFTddfeTHkSyNd7nZ/hqFoxL24fJyu12CIoNVhmPk5pMfgLcFZEvKLHWWrZDfj9ohIEqhmqx7YdReminvRDq6qxv2tTWl+ljiZ9RZY0vjLzn5RHsfVU1d22DWUs1IaUbjZpUsnM/6Z0oU96mXkuZf1FaUAGgpImi5UpY7oeAL6QmWf1NjuSNPU5RlCSJKmhbBHsQl9f33RKV9rfGXhFfUmSpMlgMcrKAFfOnDnzKasBGAh2Z2PgN73OhCRJ0gi8hrKG5r8ZCHbn7wAveclLWGKJJQZNOHv2bGbMGO5TgKYuy6GwHArLoZ9lUVgOheVQWA79xqIs5s+fz4033ghV/FJnINidJwGWWGIJpk+fPmTi4aRpAsuhsBwKy6GfZVFYDoXlUFgO/cawLJ42nM11BCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIayieLTGIP/3M+j857otfZGBNLL/ecXmdBkiS1mTSBYEQsDdwArA5snJlX1Y7tDHwKeBEwBzgsM89oO/+ZwGHAfwLLA1cCe2fmn9vSrQp8GXgLsBD4GfCxzLxnPO5rNB6d9wQXXHlbr7MxJjZcc6leZ0GSJLWZTF3Dh9IhMI2IbYFTgJ8As4BfAz+IiFltSb8E7AkcArwdmA9cEBGr1a61OHA+8DJgZ2A3YFPg7IiYNsb3I0mSNKlNihbBiJgB7AF8HDix7fDhwJmZ+cnq9UURsR7wWeDn1fmrV+d/NDO/We37A/A34GPAAdW5/wFsAMzIzGurdHOB31GCzPPG4/4kSZImo8nSIng88FXgxvrOiHgxsC5welv604CNI2Ll6vWbgcWAf3cXZ+bDlG7frWrnbQVc0woCq3SXAbe2pZMkSZryeh4IRsT7gLWBIzocXq/aXte2vxXIRS3dXZl5b4d0L4mIZ9TStV+rlW7dkeRbkiRpUdfTruGIWA44Gtg3Mx+JiPYkK1TbB9r2319tV6yla0/TSvdMYGngoSHSrT/8nBezZ88eVrq+vr6RXhqAZy27MnPnzu3q3MlmwzXX7rocphrLobAc+lkWheVQWA6F5dBvPMui12MEjwBuysxTe5yPrsyYMYPp06cPmqavr4+ZM2d2df277nuU1VZ7tKtzJ6Nuy2EqGU19mEosh36WRWE5FJZDYTn0G4uymDdv3oCNVz0LBCPipZQJHltExPLV7qVb24hYhv6Wv+WBO2unt1oK76u291dp2q0APA48Mox093XYL0mSNGX1cozgOpRA9CJKgHY/cE517CLgN8D11ev12s5tdeNmtb0eWCUiVuyQ7sbMXFBL136tVroburgHSZKkRVYvA8HfAq9v+9mnOrYHsFtm/o0SoO3Qdu6OwJWZeXf1+pfAAmD7VoJqgeq38dQlYc4DXlYtP9NK92rKQtUuHSNJkhqlZ13D1ZM8Lq7vq00W6as9WeRg4IyImAP8irJY9JuBrWvXuiMiTgA+HxFPUJaD2Q+YBhxbe4uzgKuBH0XEJyn3fzTwe6o1CSVJkpqi58vHDCUzzwR2BbYFfgFsCeyUme2B2z7A1ykTUM4GngW8KTPn1q71BOXRcrOB7wMnAX8AtsnMheN8K5IkSZNKr2cNP0VmXkxpxWvffwrlMXODnfs48InqZ7B0d/L0rmZJkqTGmfQtgpIkSRofBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNZSAoSZLUUAaCkiRJDWUgKEmS1FAGgpIkSQ1lIChJktRQBoKSJEkNtXiv3jgi3gV8HFgXWBq4A/gJcHhmPlhLNwv4HLB+lebYzDyuw/X2A/YEVgWuBQ7MzAva0iwDHA1sCywJXATslZm3jPX9SZIkTXa9bBFcEbgU+CDwFuDLwPuBM1sJImIT4GzgT8As4CTg2IjYo36hKgg8Ejge2Bq4CTg3IjZoe88fANsAewE7AKsBF0TEUmN9c5IkSZNdz1oEM/Nbbbsujoh/ASdGxGqZORc4GPhjZn6gSnNRRLwQOCQivpGZCyJiOvAZSkvhMQARcQlwDfBpYPtq36soQeLWmXlete8aYA6wC/C1cbxdSZKkSWeyjRG8p9ouUQV4bwDOaEtzGqX79xXV602B5YDTWwky80ngh8CsiJhW7d4KeBA4v5buNuB31TFJkqRG6VmLYEtELAY8E3gppQXw7My8JSLWB5YArms75dpquy5wFbBe9fr6DumWBlYHbq/S3ZCZCzqk23IMbkWSJGmR0vNAELiX0qIHpbVup+rfK1TbB9rS319tV6ylm5eZjw2S7vYqXfu1WulW7LB/SLNnzx5Wur6+vm4uz7OWXZm5c+d2de5ks+Gaa3ddDlON5VBYDv0si8JyKCyHwnLoN55lMRkCwdcBSwEzKGP9zomILXqao2GaMWMG06dPHzRNX18fM2fO7Or6d933KKut9mhX505G3ZbDVDKa+jCVWA79LIvCcigsh8Jy6DcWZTFv3rwBG696Hghm5p+rf14WEX2U7t530t8lvHzbKa2Wwvuq7f3A9IhYMjP/NUS6F3bIwgq1NJIkSY0x2SaL/BlYAKxNmc07n/4xgC3rV9sbqm1rbGCndA9T1h5spYva5JF6uhuQJElqmMkWCG5CydPNmTkPuJBq+ZeaHYE7gT9Wry+jzAbeoZWgmoCyPXB+Zi6sdp9HaV3cspbuBcDm1TFJkqRG6eWTRX4BXECZtfsvYENgf+Bq4KdVssOASyPim8CpwGbA7sCerdm/mTkvIo4AjoyIuykB4m7AWvRPPCEzL4+Ic4FvR8S+wEPV9W8DTh7Pe5UkSZqMejlG8ArgvcCLq9e3ACcAX8zM+QCZ+fuIeDvlqSE7A3OBfTLzhPqFMvOYiAD4KPBcSnC5dWb+pe09dwSOoSwePZ3yiLntMnPqzMiQJEkapl4+WeQg4KBhpDuPYXTdVk8VOWaINA8DH6p+JEmSGm2yjRGUJEnSBDEQlCRJaigDQUmSpIYyEJQkSWooA0FJkqSGMhCUJElqKANBSZKkhhpxIBgRW3Z4Xq8kSZIWMd20CP4cuD0ijo6IDcY6Q5IkSZoY3QSC7wB+B+wJ/DEiro6I/SJitTHNmSRJksbViAPBzDw7M7enPNN3d+Bu4Cjg1oj4ZUS8NyKWGuN8SpIkaYx1PVkkMx/OzO9k5huBNYBPAasApwB3RcR3I+KNY5RPSZIkjbGxmjW8GPBMYDowDXgMeBPwq4j4U0TMGKP3kSRJ0hhZvNsTI2I5YHvgvcBmwBPAucAnqu0CYBvgS8BJwMajzawkSZLGzogDwYh4ByX42wpYErgS2Bv4QWbe15b8pxHxHOBro8ynJEmSxlg3LYI/Bu4Avgyckpk3DJH+auDULt5HkiRJ46ibQPDNwAWZuXA4iTPzCuCKLt5HkiRJ42jEgWBm/no8MiJJkqSJ1c0j5r4UETcNcvzGiDh6dNmSJEnSeOtm+ZitgTMGOX4G8LbusiNJkqSJ0k0g+ALglkGO31qlkSRJ0iTWTSD4EPDiQY6vSVlQWpIkSZNYN4HghcCHIuKF7Qci4kXAh6o0kiRJmsS6WT7mYGAWMDsiTgKurfbPAHYBngQOGpPcSZIkadx0s3zMTRGxGXA8sFfb4UuAvTIzxyJzkiRJGj9dPWs4M68FXlc9Pm7NaveczLx3zHImSZKkcdVVINiSmfcA94xRXiRJkjSBugoEI2IxYEtKa+AKwLS2JAsz8/BR5k2SJEnjaMSBYERsBJwFPJ+nB4AtCwEDQUmSpEmsmxbBrwHPAt4B/CYzHxjLDEmSJGlidBMIvhz4dGaeM9aZkSRJ0sTpZkHp2xm4S1iSJEmLiG4CwaOA3SNi2bHOjCRJkiZON13DKwL/BP4aET8C/pfyNJG6hZl59GgzJ0mSpPHTTSB4VO3fewyQZiFgIChJkjSJdRMIvnjMcyFJkqQJ182zhm8dj4xIkiRpYnX9iLmIWAd4HbAKcGpm3hIRSwCrAndm5vyxyaIkSZLGQzdPFnkGcALwAcoyMguB3wO3AEsA1wCHAf89ZrmUJEnSmOtm+ZhPAe8HDgI2obamYGY+Qnn83LvGJHeSJEkaN90EgrsC38nMI4G/djh+DbDOqHIlSZKkcddNIPh84IpBjj8GLNNddiRJkjRRugkE7wTWGOT4TMCZxZIkSZNcN4HgWcB/VbOGWxYCRMQsYGfgh2OQN0mSJI2jbgLBQ4HbgD8Bp1KCwE9FxB+AnwF/Af7fWGVQkiRJ42PEgWBmPgRsChwJPBf4F7A5sDQlSHxtZj42hnmUJEnSOOhqQenM/BclEDxybLMjSZKkidJN17AkSZKmgG6eLPKdYSRbmJkf6CI/kiRJmiDddA2/gWqWcM1iwPOq7d3AP0eZL0mSJI2zEQeCmfmiTvsj4pnAh4CPAVsMdZ2I2A54D2XdwRWBOcDXgRMzc0Et3Szgc8D6wB3AsZl5XIfr7QfsCawKXAscmJkXtKVZBjga2BZYErgI2Cszbxkqv5IkSVPNmI0RzMzHM/OrwC+Brw7jlH2BecD+wFuBnwJfAT7fShARmwBnU5aqmQWcBBwbEXvUL1QFgUcCxwNbAzcB50bEBm3v+QNgG2AvYAdgNeCCiFhqJPcqSZI0FXQ1a3gIfwHeN4x0b8vMu2uvL4qIpYGPRMRnMnMecDDwx9p4w4si4oXAIRHxjcxcEBHTgc9QWgqPAYiISyjPPP40sH2171WUIHHrzDyv2ncNpSVyF+Bro7prSZKkRcx4zBreAnh0qERtQWDLnyhdtitWAd4bgDPa0pxG6f59RfV6U2A54PTatZ+kPN1kVkRMq3ZvBTwInF9Ldxvwu+qYJElSo3Qza/jgAQ4tD7yWEqAd1WV+XgPcB/wDCGAJ4Lq2NNdW23WBq4D1qtfXd0i3NLA6cHuV7ob6+MNaui27zK8kSdIiq5uu4UMH2H8/pZt1D+CbI71oRGwE7Ap8NjOfjIgVqkMPdHgfKBNMAFYA5nV4mkk93e1VuvZrtdKt2GG/JEnSlNbNrOEx706OiFWBs4ArqE0Wmexmz549rHR9fX1dXf9Zy67M3Llzuzp3stlwzbW7LoepxnIoLId+lkVhORSWQ2E59BvPshiPySIjEhHLAT+njCvcJjMfrw61WvSWbzul1VJ4Xy3d9IhYsnr03WDpXtghCyvU0ozIjBkzmD59+qBp+vr6mDlzZjeX5677HmW11YYcbrnI6LYcppLR1IepxHLoZ1kUlkNhORSWQ7+xKIt58+YN2HjVzRjBTsHUkKqJGe3XWpKyPMwqwKaZeW/t8BxgPmVs3/m1/etX2xuqbWts4HqUySb1dA9T1h5spdsiIqZl5sK2dDcgSZLUMN10894C/K2Ln6eIiMUpM3tfDszKzFvrx6vlYy6kWv6lZkfgTuCP1evLKLOBd6hde7HqvPNrQd95lNbFLWvpXgBsXh2TJElqlG66hncDPgq8gLKUy43V/qAEabdRFoZun53b7njgbcABwFIR8erasesy8yHgMODSiPgmcCqwGbA7sGdr9m9mzouII4AjI+JuSoC4G7AWsFPrgpl5eUScC3w7IvYFWte/DTi5i3KQJElapHUTCD4PmA6snZn31w9ExCGUdflWzcz/N8R1Wi1zX+hw7PXAxZn5+4h4O+WpITsDc4F9MvOEeuLMPCYioASoz6UsCbN1Zv6l7bo7AsdQFo+eTnnE3HaZOXUG4kmSJA1TN4HgHsAX24NAgMy8t2q92xsYNBAc6JnFHdKdxzC6bqunihwzRJqHKc9D/tBw3luSJGkq62aM4EqUhZoH8uwqjSRJkiaxbgLBPwB7R8TT5jJXi0LvDVw+2oxJkiRpfHXTNfwR4GLgioi4Erip2r8OsDFlTb69xiR3kiRJGjcjbhHMzOuAl1FmBi8PbFv9LA98GXhZZl470PmSJEmaHLp6skhm3gXsU/1IkiRpETSqR8xFxDqUp4LMzswHxyZLkiRJmgjdTBYhInaKiNsoj2a7FJhZ7X9ORNwYEe1PA5EkSdIkM+JAMCL+A/g+5dm9+wPTWscy855q/85jlUFJkiSNj25aBD8N/DoztwRO6XD8cmCDUeVKkiRJ466bQHA94CeDHP8HsHJ32ZEkSdJE6SYQ/CeDP1lkLeCe7rIjSZKkidJNIHghsEtELNF+ICJWA3YHfjHajEmSJGl8dTtG8HnAVcCHgYXAVhFxFHANsAD47JjlUJIkSeOimyeL3ARsBtwJHEqZNfxx4ADgz8DmmXnb2GVRkiRJ42FEC0pHxGLA6sBdmfnmiFgBWJsSUN6cmXePQx4lSZI0Dkb6ZJFnAHOAA4EvZub9wJVjnitJkiSNuxF1DWfm48BcyrhASZIkLcK6mSxyEmXW8JJjnRlJkiRNnJF2DQPcCCwG3BARpwA3A4+1J8rMH44yb5IkSRpH3QSC36/9+6AB0iwEDAQlSZImsWEFghHxFeCUzOwDXl/tXprSEvjkOOVNkiRJ42i4LYIfAf4A9GXmJRGxEuWZwltk5iXjljtJkiSNm24mi7RMG7NcSJIkacKNJhCUJEnSIsxAUJIkqaFGMmt4zYh4ZfXv5artuhHxSKfEmXnFqHImSZKkcTWSQPCz1U/dcR3STaMsH7NYt5mSJEnS+BtuILjruOZCkiRJE25YgWBmnjLeGZEkSdLEcrKIJElSQxkISpIkNZSBoCRJUkMZCEqSJDWUgaAkSVJDGQhKkiQ1lIGgJElSQxkISpIkNZSBoCRJUkMZCEqSJDWUgaAkSVJDGQhKkiQ1lIGgJElSQxkISpIkNZSBoCRJUkMZCEqSJDWUgaAkSVJDGQhKkiQ1lIGgJElSQxkISpIkNZSBoCRJUkMZCEqSJDXU4r1884hYG9gPeDUwA7ghM2d0SDcL+BywPnAHcGxmHtch3X7AnsCqwLXAgZl5QVuaZYCjgW2BJYGLgL0y85axuzNJkqTJr9ctgi8Ftgb+ClzXKUFEbAKcDfwJmAWcBBwbEXu0pdsPOBI4vrrmTcC5EbFB2yV/AGwD7AXsAKwGXBARS43RPUmSJC0SetoiCJyTmf8DEBEnAxt1SHMw8MfM/ED1+qKIeCFwSER8IzMXRMR04DOUlsJjqutdAlwDfBrYvtr3KkqQuHVmnlftuwaYA+wCfG1c7lKSJGkS6mmLYGYuGOx4FeC9ATij7dBplO7fV1SvNwWWA06vXftJ4IfArIiYVu3eCngQOL+W7jbgd9UxSZKkxuh11/BQ1gKW4OndxtdW23Wr7XrV9voO6ZYGVq+lu6FDAHpt7VqSJEmN0Ouu4aGsUG0faNt/f7VdsZZuXmY+Nki626t07ddqpVuxw/5BzZ49e1jp+vr6RnppAJ617MrMnTu3q3Mnmw3XXLvrcphqLIfCcuhnWRSWQ2E5FJZDv/Esi8keCE5qM2bMYPr06YOm6evrY+bMmV1d/677HmW11R7t6tzJqNtymEpGUx+mEsuhn2VRWA6F5VBYDv3GoizmzZs3YOPVZO8abrXoLd+2v9VSeF8t3fSIWHIY6dqv1Up3X4f9kiRJU9ZkDwTnAPPpHwPYsn61vaHatsYGdkr3MGXtwVa6qE0eqae7AUmSpAaZ1IFgZs4DLqRa/qVmR+BO4I/V68sos4F3aCWIiMWq887PzIXV7vMoLYJb1tK9ANi8OiZJktQYvX6yyFL0L9uyBrBsRGxbvb4yM28FDgMujYhvAqcCmwG7A3u2Zv9m5ryIOAI4MiLupgSIu1FmHe/Uer/MvDwizgW+HRH7Ag9V178NOHlcb1aSJGmS6fVkkVWAM9v2tV7vCpycmb+PiLdTnhqyMzAX2CczT6iflJnHRATAR4HnUpaE2Toz/9J2/R2BYyiLR0+nPGJuu8ycOrMyJqEll3wWd903NYp4qemLs8yzl+h1NiRJGrWeBoLV833bx+t1Sncew+i6rZ4qcswQaR4GPlT9aII8/uRCLrjytl5nY0y8ceMXGghKkqaEST1GUJIkSePHQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhFu91BiZaRKwDHAdsDjwGnA4cmJmP9jRjkiRJE6xRgWBELA9cBNwKbAusAnwRWBl4d+9yJkmSNPEaFQgCHwJWADbMzHsAIuIJ4NSIODwzr+1p7iRJkiZQ08YIbgVc0AoCK2cB84BZvcmSJElSbzStRXA94Dv1HZk5LyLmAOuO4DqLAcyfP39YiefNmzeCS/d74vH5LP6MBV2dO9ksePKJKXMvTzw+n3nzFuv6/G7rw1RjOfSzLArLobAcCsuh32jLohavPO3La9rChQtHdfFFSUQ8DhyUmUe17f8t8I/MfNdwrtPX17c58JtxyKIkSdJ4ec3MmTN/W9/RtBbBsXIl8Brg78CTPc6LJEnSYBYDnkeJX56iaYHg/cDyHfavANww3IvMnDlzHvDbIRNKkiRNDnM67WzaZJHrKeME/y0ipgNrMYJAUJIkaSpoWiB4HvDGiFiptu+dwPTqmCRJUmM0bbLI8sBs4BbgcPoXlL4gM11QWpIkNUqjWgQz8wHgDcAjwI+BLwFnAO/vYbYkSZJ6olEtgpIkSerXqBZBSZIk9TMQlCRJaigDQUmSpIZq2oLSEyIi1gGOAzYHHgNOBw7MzEd7mrEJFBG7ACd1OHR8Zn5kgrMzYSJibWA/4NXADOCGzJzRId0s4HPA+sAdwLGZedxE5nU8DaccIuJk4D87nL5dZv5o3DM5ASJiO+A9wExgRcqCrl8HTszMBbV0U70+DFkOTagPABHxLuDjlOfbL035vH8CHJ6ZD9bSTfU6MWQ5NKVO1EXE0pR1jVcHNs7Mq2rHdgY+BbyI8n/osMw8Y7TvaSA4xqolai4CbgW2pX+JmpWBJi5R8xbgwdrrO3uVkQnyUmBr4HJKi/vTWt0jYhPgbOC7wL7AZsCxEfF4Zp4wgXkdT0OWQ+VmSoBQd+M45mui7Uv5XbA/cBfweuArwJrVvqbUhyHLoTLV6wOUQPhSyvfCfcDLgUOr7ZuhMXViyHKoNKFO1B1Kh9gsIrYFTgGOAn4JvAP4QUQ8lJk/H80bOmt4jEXEgcDBwBqZeU+1byfgVGBGZl7by/xNlFqL4MqtcmiCiHhGWwvHRh1awn4OrJiZr6rt+wbwNmD1ekvRomqY5dBx/1QSEStn5t1t+74I/BewfGbOa0h9GE45nMwUrw8DiYgPAidSPu+5TagTnXQoh5NpUJ2IiBnAHygtpSdSaxGMiOuBazJz+1r6X1L+/7xyNO/rGMGxtxVlgep68HMWMA+Y1ZssaaIM9Qu6eqThGyjrV9adBqwKvGKcsjahpuoX1Ui1Bz+VPwFLAis2qD4MWg4TnJ3JqPV9sURT6sQA/l0OPc1F7xwPfJW2Fs+IeDGlC/30tvSnARtHxMqjeVO7hsfeesB36juqv3bnUD7IppldVdLbgJOBz2XmE73NUk+tRfkld13b/lZL8brAVTTHWhHxAPBsylN/jhqLMS+T3GsoXWH/AILm1od6ObQ0pj5ExGLAMynDKA4Gzs7MWyJifRpUJwYqh1qSRtSJiHgfsDZlSM1GbYfXq7YD1YkAOv2xNSy2CI69FYAHOuy/n2b95ft34BBgF8o4wZ8ABwHf6mGeJoMVqu0Dbfvvr7ZNqiN/okwoeQdlPO3twOnVsIIpKSI2AnYFvpSZT9LQ+tChHKB59eFeymTCqyi/L3eq9jetTgxUDtCQOhERywFHAwdk5iMdkoxrnbBFUOMiM38B/KK261cR8SBwaEQcnplzepQ1TRKZ+eW2Xf8TERcCn6W0Hk8pEbEqZZjIFcDne5ydnhmoHJpWH4DXAUtRZtV/BjgnIrboaY5643V0KIfMfLJBdeII4KbMPLUXb26L4Ni7H1i+w/4VKN0gTfbDajuVx7gMpfUX3PJt+1t/8TW9jpwJvHC0Y14mm+ov/p8DjwLbZObj1aFG1YdBymEgU7I+AGTmnzPzssz8BvBOykzqd9KwOjFIOQxkStWJiHgpsAdwUEQsX608snR1eOmIWIZxrhMGgmPvevr784F/TxBYi7I2kJptDjCftjpCWSsMrCNTTkQsSVkKZBXgLZl5b+1wY+rDEOXQdH8GFlDGiDWmTnTwZ/rLoSnWofTOXkQJ+O4HzqmOXQT8hhJXwMB1IkeTAQPBsXce8MaIWKm2753A9OpYk70bWAj09TojvZKZ84ALge3bDu1IWWPxjxOeqUkiIqZRyuXWAWaZLnIiYnFKS/jLgVmZeWv9eFPqw1DlMMA5U64+DGITyvfxzU2pEwP4dzl0OjhF68RvKa2g9Z99qmN7ALtl5t8ofwDs0HbujsCVoy0LxwiOvROBvShjGQ6nf0HpMzKzfcbPlBURv6D8MptN+QtvFvBh4NuZ2fE/+VQQEUtRlhACWANYtloIFMp/2FuBw4BLI+KblPUlNwN2B/acKsuuDFUO1fYU4AfAXyldHrtRxgu9b8IyOv6Op6z9dgCwVES8unbsusx8iAbUB4YoB0oXVxPqQ+t34wWUGZ//AjakLKp9NfDTKtmUrxNDlUNErEED6kS11NzF9X0R0fpnX+3JIgcDZ1QrkPwKeDtl4e2tR5sHA8ExlpkPRMQbKKvm/5j+R8wd0NOMTbzrgfcDz6fUs5uAA4Fje5inibAKZQxLXev1rsDJmfn7iHg7cCSwMzAX2GcKPTEAhi6HsylPnPlMlfZxSkvHNpl5DlPHltX2Cx2OvR64uCH1YahyuJpm1Acok2TeC7y4en0LcALwxcycD9CQOjFoOUTEwzSnTgwpM8+s/sD+FGUm9Rxgp9E+VQR8sogkSVJjOUZQkiSpoQwEJUmSGspAUJIkqaEMBCVJkhrKQFCSJKmhDAQlSZIaykBQkiSpoQwEJUmSGur/A0w6fGYym9kTAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "df1 = df[df[\"name\"].isin([\"QuerySamplesComplete\"])]\n", + "df1['delta'] = df1['ts'].diff()\n", + "ax = df1['delta'].plot.hist(bins=BINS, alpha=0.5, figsize=figsize)\n", + "ax.set_title('Time between QuerySamplesComplete (usec)');\n", + "plt.show()\n", + "\n", + "ax = df1['dur'].plot.hist(bins=BINS, alpha=0.5, figsize=figsize)\n", + "ax.set_title('Time QuerySamplesComplete (usec)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + }, + "varInspector": { + "cols": { + "lenName": 16, + "lenType": 16, + "lenVar": 40 + }, + "kernels_config": { + "python": { + "delete_cmd_postfix": "", + "delete_cmd_prefix": "del ", + "library": "var_list.py", + "varRefreshCmd": "print(var_dic_list())" + }, + "r": { + "delete_cmd_postfix": ") ", + "delete_cmd_prefix": "rm(", + "library": "var_list.r", + "varRefreshCmd": "cat(var_dic_list()) " + } + }, + "types_to_exclude": [ + "module", + "function", + "builtin_function_or_method", + "instance", + "_Feature" + ], + "window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/benchmarks/rnnt/ootb/inference/loadgen/utils.cc b/benchmarks/rnnt/ootb/inference/loadgen/utils.cc new file mode 100644 index 0000000..7712491 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/utils.cc @@ -0,0 +1,126 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "utils.h" + +#include +#include +#include +#include +#include + +#include "logging.h" + +namespace mlperf { + +std::string DoubleToString(double value, int precision) { + std::stringstream ss; + ss.precision(precision); + ss << std::fixed << value; + return ss.str(); +} + +bool FileExists(const std::string filename) { + std::ifstream file_object(filename); + return file_object.good(); +} + +namespace { + +std::string DateTimeString(const char* format, + std::chrono::system_clock::time_point tp, + bool append_ms, + bool utc) { + std::time_t tp_time_t = std::chrono::system_clock::to_time_t(tp); + std::tm date_time = utc + ? *std::gmtime(&tp_time_t) + : *std::localtime(&tp_time_t); + constexpr size_t kDateTimeMaxSize = 256; + char date_time_cstring[kDateTimeMaxSize]; + std::strftime(date_time_cstring, kDateTimeMaxSize, format, &date_time); + std::string date_time_string(date_time_cstring); + if (!append_ms) { + return date_time_string; + } + + auto tp_time_t_part = std::chrono::system_clock::from_time_t(tp_time_t); + auto tp_remainder = tp - tp_time_t_part; + auto ms = std::chrono::duration_cast(tp_remainder) + .count(); + if (ms < 0 || ms >= 1000) { + LogDetail([ms](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + std::stringstream ss; + ss << "WARNING: Unexpected milliseconds getting date and time." + << " ms: " << ms; + MLPERF_LOG_WARNING(detail, "warning_generic_message", ss.str()); +#else + detail("WARNING: Unexpected milliseconds getting date and time.", "ms", + ms); +#endif + }); + } + std::string ms_string = std::to_string(ms); + // Prefix with zeros so length is always 3. + ms_string.insert(0, std::min(2, 3 - ms_string.length()), '0'); + return date_time_string + "." + ms_string; +} + +} // namespace + +std::string CurrentDateTimeISO8601() { + return DateTimeString("%FT%TZ", std::chrono::system_clock::now(), false, + false); +} + +std::string DateTimeStringForPower(std::chrono::system_clock::time_point tp) { + return DateTimeString("%m-%d-%Y %T", tp, true, true); +} + +std::string EscapeStringJson(const std::string& in) { + std::stringstream ss; + for (auto c = in.cbegin(); c != in.cend(); c++) { + int c_val = static_cast(*c); + switch (*c) { + case '"': + ss << "\\\""; + break; + case '\\': + ss << "\\\\"; + break; + case '\b': + ss << "\\b"; + break; + case '\f': + ss << "\\f"; + break; + case '\n': + ss << "\\n"; + break; + case '\r': + ss << "\\r"; + break; + case '\t': + ss << "\\t"; + break; + default: + if (c_val >= 0x00 && c_val < 0x20) { + ss << "\\u" << std::hex << std::setw(4) << std::setfill('0') << c_val; + } else { + ss << *c; + } + } + } + return ss.str(); +} + +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/utils.h b/benchmarks/rnnt/ootb/inference/loadgen/utils.h new file mode 100644 index 0000000..c587e0c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/utils.h @@ -0,0 +1,70 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Various shared utility functions. + +#ifndef MLPERF_LOADGEN_UTILS_H +#define MLPERF_LOADGEN_UTILS_H + +#include +#include +#include + +#include "query_sample.h" + +namespace mlperf { + +template +void RemoveValue(T* container, const typename T::value_type& value_to_remove) { + container->erase(std::remove_if(container->begin(), container->end(), + [&](typename T::value_type v) { + return v == value_to_remove; + }), + container->end()); +} + +template +double DurationToSeconds( + const std::chrono::duration& chrono_duration) { + return std::chrono::duration_cast>( + chrono_duration) + .count(); +} + +inline double QuerySampleLatencyToSeconds(QuerySampleLatency qsl) { + return static_cast(qsl) / std::nano::den; +} + +template +inline DurationT SecondsToDuration(double seconds) { + return std::chrono::duration_cast( + std::chrono::duration(seconds)); +} + +std::string CurrentDateTimeISO8601(); + +/// \brief Uses a format that matches the one used by SPEC power +/// measurement logging. +std::string DateTimeStringForPower(std::chrono::system_clock::time_point tp); + +std::string DoubleToString(double value, int precision = 2); + +bool FileExists(const std::string filename); + +// \brief Escape special characters in a string for JSON. +// Don't use this in performance critical path. +std::string EscapeStringJson(const std::string& in); + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_UTILS_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/version.cc b/benchmarks/rnnt/ootb/inference/loadgen/version.cc new file mode 100644 index 0000000..3216c9d --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/version.cc @@ -0,0 +1,85 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Non-generated version logic. + +#include "version.h" + +#include "logging.h" +#include "utils.h" + +namespace mlperf { + +/// Helper function to split a string based on a delimiting character. +std::vector splitString(const std::string& input, + const std::string& delimiter) { + std::vector result; + size_t start = 0; + size_t next = 0; + while (next != std::string::npos) { + next = input.find(delimiter, start); + result.emplace_back(input, start, next - start); + start = next + 1; + } + return result; +} + +/// Converts the hash-filename pairs to a dict. +std::map LoadgenSha1OfFilesToDict( + const std::string& in) { + std::map result; + auto files = splitString(in, "\n"); + for (const auto& file : files) { + auto hash_and_name = splitString(file, " "); + assert(hash_and_name.size() > 1); + result[hash_and_name[1]] = hash_and_name[0]; + } + return result; +} + +void LogLoadgenVersion() { + LogDetail([](AsyncDetail& detail) { +#if USE_NEW_LOGGING_FORMAT + MLPERF_LOG(detail, "loadgen_version", + LoadgenVersion() + " @ " + LoadgenGitRevision()); + MLPERF_LOG(detail, "loadgen_build_date_local", LoadgenBuildDateLocal()); + MLPERF_LOG(detail, "loadgen_build_date_utc", LoadgenBuildDateUtc()); + MLPERF_LOG(detail, "loadgen_git_commit_date", LoadgenGitCommitDate()); + MLPERF_LOG(detail, "loadgen_git_log_message", + EscapeStringJson(LoadgenGitLog())); + MLPERF_LOG(detail, "loadgen_git_status_message", + EscapeStringJson(LoadgenGitStatus())); + if (!LoadgenGitStatus().empty() && LoadgenGitStatus() != "NA") { + MLPERF_LOG_ERROR(detail, "error_uncommitted_loadgen_changes", + "Loadgen built with uncommitted changes!"); + ; + } + MLPERF_LOG(detail, "loadgen_file_sha1", + LoadgenSha1OfFilesToDict(LoadgenSha1OfFiles())); +#else + detail("LoadgenVersionInfo:"); + detail("version : " + LoadgenVersion() + " @ " + LoadgenGitRevision()); + detail("build_date_local : " + LoadgenBuildDateLocal()); + detail("build_date_utc : " + LoadgenBuildDateUtc()); + detail("git_commit_date : " + LoadgenGitCommitDate()); + detail("git_log :\n\n" + LoadgenGitLog() + "\n"); + detail("git_status :\n\n" + LoadgenGitStatus() + "\n"); + if (!LoadgenGitStatus().empty() && LoadgenGitStatus() != "NA") { + detail.Error("Loadgen built with uncommitted changes!"); + } + detail("SHA1 of files :\n\n" + LoadgenSha1OfFiles() + "\n"); +#endif + }); +} + +} // namespace mlperf diff --git a/benchmarks/rnnt/ootb/inference/loadgen/version.h b/benchmarks/rnnt/ootb/inference/loadgen/version.h new file mode 100644 index 0000000..87c3409 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/version.h @@ -0,0 +1,39 @@ +/* Copyright 2019 The MLPerf Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/// \file +/// \brief Declares the version-related strings that will be defined in +/// a version_generated.cc as created by version_generator.py. + +#ifndef MLPERF_LOADGEN_VERSION_H +#define MLPERF_LOADGEN_VERSION_H + +#include + +namespace mlperf { + +// Non-generated. +void LogLoadgenVersion(); + +// Definitions generated at compile time. +const std::string& LoadgenVersion(); +const std::string& LoadgenGitRevision(); +const std::string& LoadgenBuildDateLocal(); +const std::string& LoadgenBuildDateUtc(); +const std::string& LoadgenGitCommitDate(); +const std::string& LoadgenGitStatus(); +const std::string& LoadgenGitLog(); +const std::string& LoadgenSha1OfFiles(); + +} // namespace mlperf + +#endif // MLPERF_LOADGEN_VERSION_H diff --git a/benchmarks/rnnt/ootb/inference/loadgen/version_generator.py b/benchmarks/rnnt/ootb/inference/loadgen/version_generator.py new file mode 100644 index 0000000..c37c4c4 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/loadgen/version_generator.py @@ -0,0 +1,126 @@ +# Copyright 2019 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +## \file +# \brief A script run by the build to generate the version definitions +# expected at link time. + +import datetime +import errno +import hashlib +import os +import sys + + +# Creates a C++ raw string literal using a delimiter that is very +# unlikely to show up in a git stats. +def make_raw_string(str) : + delimeter = "LGVG_RSLD" + return "R\"" + delimeter + "(" + str + ")" + delimeter + "\"" + +def func_def(name, string): + return ("const std::string& Loadgen" + name + "() {\n" + + " static const std::string str = " + string + ";\n" + + " return str;\n" + + "}\n\n") + + +# For clients that build the loadgen from the git respository without +# any modifications. +def generate_loadgen_version_definitions_git(ofile, git_command): + git_rev = os.popen(git_command + "rev-parse --short=10 HEAD").read() + git_commit_date = os.popen(git_command + "log --format=\"%cI\" -n 1").read() + git_status = os.popen(git_command + "status -s -uno").read() + git_log = os.popen( + git_command + "log --pretty=oneline -n 16 --no-decorate").read() + ofile.write(func_def("GitRevision", "\"" + git_rev[0:-1] + "\"")) + ofile.write(func_def("GitCommitDate", "\"" + git_commit_date[0:-1] + "\"")) + ofile.write(func_def("GitStatus", make_raw_string(git_status[0:-1]))) + ofile.write(func_def("GitLog", make_raw_string(git_log[0:-1]))) + + +# For clients that might not import the loadgen code as the original git +# repository. +def generate_loadgen_verstion_definitions_git_stubs(ofile): + na = "\"NA\"" + ofile.write(func_def("GitRevision", na)) + ofile.write(func_def("GitCommitDate", na)) + ofile.write(func_def("GitStatus", na)) + ofile.write(func_def("GitLog", na)) + + +# Always log the sha1 of the loadgen files, regardless of whether we are +# in the original git repository or not. +def generate_loadgen_version_definitions_sha1(ofile, loadgen_root): + """Writes definition for Sha1OfFiles.""" + sha1s = "" + loadgen_files = ( + ["/bindings/" + s for s in os.listdir(loadgen_root + "/bindings")] + + ["/demos/" + s for s in os.listdir(loadgen_root + "/demos")] + + ["/" + s for s in os.listdir(loadgen_root)]) + for fn in sorted(loadgen_files): + full_fn = loadgen_root + fn + if not os.path.isfile(full_fn): + continue + file_data = open(full_fn, "rb").read() + sha1s += hashlib.sha1(file_data).hexdigest() + " " + fn + "\n" + + ofile.write(func_def("Sha1OfFiles", make_raw_string(sha1s[0:-1]))) + + +# Outputs version function definitions to cc_filename. +# Includes SHA1's of the relevant dirs in the loadgen_root directory. +def generate_loadgen_version_definitions(cc_filename, loadgen_root): + """Generates the C++ source file with the loadgen version info.""" + try: + os.makedirs(os.path.dirname(cc_filename)) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + ofile = open(cc_filename, "w") + ofile.write("// DO NOT EDIT: Autogenerated by version_generator.py.\n\n") + ofile.write("#include \n\n") + ofile.write("namespace mlperf {\n\n") + ofile.write(func_def("Version", "\"1.1\"")) + + date_time_now_local = datetime.datetime.now().isoformat() + date_time_now_utc = datetime.datetime.utcnow().isoformat() + ofile.write(func_def("BuildDateLocal", "\"" + date_time_now_local + "\"")) + ofile.write(func_def("BuildDateUtc", "\"" + date_time_now_utc + "\"")) + + git_dir = "--git-dir=\"" + loadgen_root + "/../.git\" " + git_work_tree = "--work-tree=\"" + loadgen_root + "/..\" " + git_command = "git " + git_dir + git_work_tree + git_status = os.popen(git_command + "status") + git_status.read() + is_git_repo = git_status.close() is None + if is_git_repo: + generate_loadgen_version_definitions_git(ofile, git_command) + else: + generate_loadgen_verstion_definitions_git_stubs(ofile) + generate_loadgen_version_definitions_sha1(ofile, loadgen_root) + + ofile.write("} // namespace mlperf\n") + ofile.close() + + +def main(): + if len(sys.argv) != 3: + raise ValueError("Incorrect command-line arguments.") + generate_loadgen_version_definitions(sys.argv[1], sys.argv[2]) + + +if __name__ == "__main__": + main() diff --git a/benchmarks/rnnt/ootb/inference/optional_harness_ck/README.md b/benchmarks/rnnt/ootb/inference/optional_harness_ck/README.md new file mode 100644 index 0000000..896cdf7 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/optional_harness_ck/README.md @@ -0,0 +1,303 @@ +# MLPerf Inference - Speech Recognition - RNN-T + +We describe an automated and reproducible workflow for the [RNN-T +workload](https://github.com/mlperf/inference/tree/master/v0.7/speech_recognition/rnnt) +implemented using the [Collective Knowledge](http://cknowledge.org) technology. It automatically +downloads the model and the dataset, preprocesses the dataset, builds the LoadGen API, etc. +For any questions or questions, please email info@dividiti.com or simply [open an issue](https://github.com/mlperf/inference/issues) on GitHub. + +**NB:** Below we give an _essential_ sequence of steps that should result in a successful setup +of the RNN-T workflow on a minimally configured Linux system. + +The steps are extracted from a [minimalistic Amazon Linux +2](https://github.com/ctuning/ck-mlperf/blob/master/docker/speech-recognition.rnnt/Dockerfile.amazonlinux.min) +Docker image, which is derived from a more verbose [Amazon Linux +2](https://github.com/ctuning/ck-mlperf/blob/master/docker/speech-recognition.rnnt/Dockerfile.amazonlinux) +Docker image by omitting steps that the [Collective Knowledge +framework](https://github.com/ctuning/ck) performs automatically. + +For example, installing the preprocessed dataset is explicit in the verbose image: +``` +#-----------------------------------------------------------------------------# +# Step 3. Download the official MLPerf Inference RNNT dataset (LibriSpeech +# dev-clean) and preprocess it to wav. +#-----------------------------------------------------------------------------# +RUN ck install package --tags=dataset,speech-recognition,dev-clean,original +# NB: Can ignore the lzma related warning. +RUN ck install package --tags=dataset,speech-recognition,dev-clean,preprocessed +#-----------------------------------------------------------------------------# +``` +but is implicit in the minimalistic image: +``` +#- #-----------------------------------------------------------------------------# +#- # Step 3. Download the official MLPerf Inference RNNT dataset (LibriSpeech +#- # dev-clean) and preprocess it to wav. +#- #-----------------------------------------------------------------------------# +#- RUN ck install package --tags=dataset,speech-recognition,dev-clean,original +#- # NB: Can ignore the lzma related warning. +#- RUN ck install package --tags=dataset,speech-recognition,dev-clean,preprocessed +#- #-----------------------------------------------------------------------------# +``` +because it's going to be triggered by a test performance run: +``` +#+ #-----------------------------------------------------------------------------# +#+ # Step 6. Pull all the implicit dependencies commented out in Steps 1-5. +#+ #-----------------------------------------------------------------------------# +RUN ck run program:speech-recognition-pytorch-loadgen --cmd_key=performance --skip_print_timers +#+ #-----------------------------------------------------------------------------# +``` +(Omitted steps are commented out with `#- `. Added steps are commented with `#+ `.) + +For other possible variations and workarounds see the [complete +collection](https://github.com/ctuning/ck-mlperf/blob/master/docker/speech-recognition.rnnt/README.md) +of Docker images for this workflow including Ubuntu, Debian and CentOS. + +# Table of Contents + +1. [Installation](#install) + 1. Install [system-wide prerequisites](#install_system) + 1. [Ubuntu 20.04 or similar](#install_system_ubuntu) + 1. [CentOS 7 or similar](#install_system_centos_7) + 1. [CentOS 8 or similar](#install_system_centos_8) + 1. Install [Collective Knowledge](#install_ck) (CK) and its repositories + 1. Detect [GCC](#detect_gcc) + 1. Detect [Python](#detect_python) + 1. Install [Python dependencies](#install_python_deps) + 1. Install a branch of the [MLPerf Inference](#install_inference_repo) repo +1. [Usage](#usage) + 1. [Performance](#usage_performance) + 1. [Accuracy](#usage_performance) + + +## Installation + + +### Install system-wide prerequisites + +**NB:** Run the below commands for your Linux system with `sudo` or as superuser. + + +#### Ubuntu 20.04 or similar +```bash +$ sudo apt update -y +$ sudo apt install -y apt-utils +$ sudo apt upgrade -y +$ sudo apt install -y\ + python3 python3-pip\ + gcc g++\ + make patch vim\ + git wget zip libz-dev\ + libsndfile1-dev +$ sudo apt clean +``` + + +#### CentOS 7 or similar +```bash +$ sudo yum upgrade -y +$ sudo yum install -y\ + python3 python3-pip python3-devel\ + gcc gcc-c++\ + make which patch vim\ + git wget zip unzip\ + tar xz\ + libsndfile-devel +$ sudo yum clean all +``` + + +#### CentOS 8 or similar +```bash +$ sudo yum upgrade -y +$ sudo yum install -y\ + gcc gcc-c++\ + make which patch vim\ + git wget zip unzip\ + openssl-devel bzip2-devel libffi-devel\ +$ sudo yum clean all +$ sudo dnf install -y python3 python3-pip python3-devel +$ sudo dnf --enablerepo=PowerTools install -y libsndfile-devel +``` + + + +### Install [Collective Knowledge](http://cknowledge.org/) (CK) and its repositories + +```bash +$ export CK_PYTHON=/usr/bin/python3 +$ $CK_PYTHON -m pip install --ignore-installed pip setuptools --user +$ $CK_PYTHON -m pip install ck +$ ck version +V1.15.0 +$ ck pull repo:ck-mlperf +$ ck pull repo:ck-pytorch +``` + + +### Detect (system) GCC +``` +$ export CK_CC=/usr/bin/gcc +$ ck detect soft:compiler.gcc --full_path=$CK_CC +$ ck show env --tags=compiler,gcc +Env UID: Target OS: Bits: Name: Version: Tags: + +b8bd7b49f72f9794 linux-64 64 GNU C compiler 7.3.1 64bits,compiler,gcc,host-os-linux-64,lang-c,lang-cpp,target-os-linux-64,v7,v7.3,v7.3.1 +``` +**NB:** Required to build the FLAC and SoX dependencies of preprocessing. CK can normally detect compilers automatically, but we are playing safe here. + + +### Detect (system) Python +``` +$ export CK_PYTHON=/usr/bin/python3 +$ ck detect soft:compiler.python --full_path=$CK_PYTHON +$ ck show env --tags=compiler,python +Env UID: Target OS: Bits: Name: Version: Tags: + +633a6b22205eb07f linux-64 64 python 3.7.6 64bits,compiler,host-os-linux-64,lang-python,python,target-os-linux-64,v3,v3.7,v3.7.6 +``` +**NB:** CK can normally detect available Python interpreters automatically, but we are playing safe here. + + +### Install Python dependencies (in userspace) + +#### Install implicit dependencies via pip +```bash +$ export CK_PYTHON=/usr/bin/python3 +$ $CK_PYTHON -m pip install --user --upgrade \ + tqdm wheel toml unidecode inflect sndfile librosa numba==0.48 +... +Successfully installed inflect-4.1.0 librosa-0.7.2 llvmlite-0.31.0 numba-0.48.0 sndfile-0.2.0 unidecode-1.1.1 wheel-0.34.2 +``` +**NB:** These dependencies are _implicit_, i.e. CK will not try to satisfy them. If they are not installed, however, the workflow will fail. + + +#### Install explicit dependencies via CK (also via `pip`, but register with CK at the same time) +```bash +$ ck install package --tags=python-package,torch +$ ck install package --tags=python-package,pandas +$ ck install package --tags=python-package,sox +$ ck install package --tags=python-package,absl +``` +**NB:** These dependencies are _explicit_, i.e. CK will try to satisfy them automatically. On a machine with multiple versions of Python, things can get messy, so we are playing safe here. + + +### Install an MLPerf Inference [branch](https://github.com/dividiti/inference/tree/dvdt-rnnt) with [dividiti](http://dividiti.com)'s tweaks for RNN-T +```bash +$ ck install package --tags=mlperf,inference,source,dividiti.rnnt +``` +**NB:** This source will be used for building LoadGen as well. + + + +## Usage + + +### Running a performance test + +The first run will end up resolving all the remaining explicit dependencies: +- preprocessing the LibriSpeech Dev-Clean dataset to wav; +- building the LoadGen API; +- downloading the PyTorch model. + +It's a performance run which should print something like: +``` +$ ck run program:speech-recognition-pytorch-loadgen --cmd_key=performance --skip_print_timers +... +Dataset loaded with 4.36 hours. Filtered 1.02 hours. Number of samples: 2513 +Running Loadgen test... +Average latency (ms) per query: +7335.167247106061 +Median latency (ms): +7391.662108 +90 percentile latency (ms): +13347.925176 +================================================ +MLPerf Results Summary +================================================ +SUT name : PySUT +Scenario : Offline +Mode : Performance +Samples per second: 4.63626 +Result is : INVALID + Min duration satisfied : NO + Min queries satisfied : Yes +Recommendations: + * Increase expected QPS so the loadgen pre-generates a larger (coalesced) query. + +================================================ +Additional Stats +================================================ +Min latency (ns) : 278432559 +Max latency (ns) : 14235613054 +Mean latency (ns) : 7335167247 +50.00 percentile latency (ns) : 7521181269 +90.00 percentile latency (ns) : 13402430910 +95.00 percentile latency (ns) : 13723706550 +97.00 percentile latency (ns) : 14054764438 +99.00 percentile latency (ns) : 14235613054 +99.90 percentile latency (ns) : 14235613054 + +================================================ +Test Parameters Used +================================================ +samples_per_query : 66 +target_qps : 1 +target_latency (ns): 0 +max_async_queries : 1 +min_duration (ms): 60000 +max_duration (ms): 0 +min_query_count : 1 +max_query_count : 0 +qsl_rng_seed : 3133965575612453542 +sample_index_rng_seed : 665484352860916858 +schedule_rng_seed : 3622009729038561421 +accuracy_log_rng_seed : 0 +accuracy_log_probability : 0 +print_timestamps : false +performance_issue_unique : false +performance_issue_same : false +performance_issue_same_index : 0 +performance_sample_count : 2513 + +No warnings encountered during test. + +No errors encountered during test. +Done! + +Execution time: 38.735 sec. +``` + +The above output is the contents of `mlperf_log_summary.txt`, one of the log files generated by LoadGen. All LoadGen log files can be located in the program's temporary directory: +```bash +$ cd `ck find program:speech-recognition-pytorch-loadgen`/tmp && ls -la mlperf_log_* +-rw-r--r-- 1 anton eng 4 Jul 3 18:06 mlperf_log_accuracy.json +-rw-r--r-- 1 anton eng 20289 Jul 3 18:06 mlperf_log_detail.txt +-rw-r--r-- 1 anton eng 1603 Jul 3 18:06 mlperf_log_summary.txt +-rw-r--r-- 1 anton eng 860442 Jul 3 18:06 mlperf_log_trace.json +``` + + +### Running an accuracy test + +``` +$ ck run program:speech-recognition-pytorch-loadgen --cmd_key=accuracy --skip_print_timers +... +Dataset loaded with 4.36 hours. Filtered 1.02 hours. Number of samples: 2513 +Running Loadgen test... + +No warnings encountered during test. + +No errors encountered during test. +Running accuracy script: /usr/bin/python3 /disk1/homes/anton/CK-TOOLS/mlperf-inference-dividiti.rnnt/inference/v0.7/speech_recognition/rnnt/accuracy_eval.py --log_dir /disk1/homes/anton/CK/ck-mlperf/program/speech-recognition-pytorch-loadgen/tmp --dataset_dir /homes/anton/CK-TOOLS/dataset-librispeech-preprocessed-to-wav-dev-clean/../ --manifest /homes/anton/CK-TOOLS/dataset-librispeech-preprocessed-to-wav-dev-clean/wav-list.json +Dataset loaded with 4.36 hours. Filtered 1.02 hours. Number of samples: 2513 +Word Error Rate: 0.07452253714852645 +Done! + +Execution time: 502.197 sec. + +$ cd `ck find program:speech-recognition-pytorch-loadgen`/tmp && ls -la mlperf_log_* +-rw-r--r-- 1 anton eng 3862427 Jul 3 18:00 mlperf_log_accuracy.json +-rw-r--r-- 1 anton eng 20126 Jul 3 18:00 mlperf_log_detail.txt +-rw-r--r-- 1 anton eng 74 Jul 3 18:00 mlperf_log_summary.txt +-rw-r--r-- 1 anton eng 29738248 Jul 3 18:00 mlperf_log_trace.json +``` diff --git a/benchmarks/rnnt/ootb/inference/pytorch/Dockerfile b/benchmarks/rnnt/ootb/inference/pytorch/Dockerfile new file mode 100755 index 0000000..1cb52bf --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/Dockerfile @@ -0,0 +1,46 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG FROM_IMAGE_NAME=nvcr.io/nvidia/pytorch:19.09-py3 +FROM ${FROM_IMAGE_NAME} + + +RUN apt-get update && apt-get install -y libsndfile1 && apt-get install -y sox && rm -rf /var/lib/apt/lists/* + +RUN COMMIT_SHA=c6d12f9e1562833c2b4e7ad84cb22aa4ba31d18c && \ + git clone https://github.com/HawkAaron/warp-transducer deps/warp-transducer && \ + cd deps/warp-transducer && \ + git checkout $COMMIT_SHA && \ + mkdir build && \ + cd build && \ + cmake .. && \ + make VERBOSE=1 && \ + export CUDA_HOME="/usr/local/cuda" && \ + export WARP_RNNT_PATH=`pwd` && \ + export CUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME && \ + export LD_LIBRARY_PATH="$CUDA_HOME/extras/CUPTI/lib64:$LD_LIBRARY_PATH" && \ + export LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH && \ + export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH && \ + export CFLAGS="-I$CUDA_HOME/include $CFLAGS" && \ + cd ../pytorch_binding && \ + python3 setup.py install --user && \ + rm -rf ../tests test ../tensorflow_binding && \ + cd ../../.. + +WORKDIR /workspace/jasper + +COPY requirements.txt . +RUN pip install --disable-pip-version-check -U -r requirements.txt + +COPY . . diff --git a/benchmarks/rnnt/ootb/inference/pytorch/LICENSE b/benchmarks/rnnt/ootb/inference/pytorch/LICENSE new file mode 100644 index 0000000..75ee157 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/LICENSE @@ -0,0 +1,204 @@ + Except where otherwise noted, the following license applies to all files in this repo. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 NVIDIA Corporation + Copyright 2019 Myrtle Software Limited, www.myrtle.ai + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/benchmarks/rnnt/ootb/inference/pytorch/NOTICE b/benchmarks/rnnt/ootb/inference/pytorch/NOTICE new file mode 100644 index 0000000..7916839 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/NOTICE @@ -0,0 +1,5 @@ +Jasper in PyTorch + +This repository includes source code (in "parts/") from: +* https://github.com/keithito/tacotron and https://github.com/ryanleary/patter licensed under MIT license. + diff --git a/benchmarks/rnnt/ootb/inference/pytorch/configs/rnnt.toml b/benchmarks/rnnt/ootb/inference/pytorch/configs/rnnt.toml new file mode 100644 index 0000000..a4cd1df --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/configs/rnnt.toml @@ -0,0 +1,77 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +model = "RNNT" + +[input] +normalize = "per_feature" +sample_rate = 16000 +window_size = 0.02 +window_stride = 0.01 +window = "hann" +features = 80 +n_fft = 512 +frame_splicing = 3 +dither = 0.00001 +feat_type = "logfbank" +normalize_transcripts = true +trim_silence = true +pad_to = 0 # TODO +max_duration = 16.7 +speed_perturbation = true + + +cutout_rect_regions = 0 +cutout_rect_time = 60 +cutout_rect_freq = 25 + + +cutout_x_regions = 2 +cutout_y_regions = 2 +cutout_x_width = 6 +cutout_y_width = 6 + + +[input_eval] +normalize = "per_feature" +sample_rate = 16000 +window_size = 0.02 +window_stride = 0.01 +window = "hann" +features = 80 +n_fft = 512 +frame_splicing = 3 +dither = 0.00001 +feat_type = "logfbank" +normalize_transcripts = true +trim_silence = true +pad_to = 0 + + +[rnnt] +rnn_type = "lstm" +encoder_n_hidden = 1024 +encoder_pre_rnn_layers = 2 +encoder_stack_time_factor = 2 +encoder_post_rnn_layers = 3 +pred_n_hidden = 320 +pred_rnn_layers = 2 +forget_gate_bias = 1.0 +joint_n_hidden = 512 +dropout=0.32 + + +[labels] +labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] diff --git a/benchmarks/rnnt/ootb/inference/pytorch/dataset.py b/benchmarks/rnnt/ootb/inference/pytorch/dataset.py new file mode 100644 index 0000000..7b9036f --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/dataset.py @@ -0,0 +1,159 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file contains classes and functions related to data loading +""" +from collections import namedtuple +import torch +import numpy as np +from torch.utils.data import Dataset +from parts.manifest import Manifest +from parts.features import WaveformFeaturizer + + +def seq_collate_fn(batch): + """batches samples and returns as tensors + Args: + batch : list of samples + Returns + batches of tensors + """ + audio_lengths = torch.LongTensor([sample.waveform.size(0) + for sample in batch]) + transcript_lengths = torch.LongTensor([sample.transcript.size(0) + for sample in batch]) + permute_indices = torch.argsort(audio_lengths, descending=True) + + audio_lengths = audio_lengths[permute_indices] + transcript_lengths = transcript_lengths[permute_indices] + padded_audio_signals = torch.nn.utils.rnn.pad_sequence( + [batch[i].waveform for i in permute_indices], + batch_first=True + ) + transcript_list = [batch[i].transcript + for i in permute_indices] + packed_transcripts = torch.nn.utils.rnn.pack_sequence(transcript_list, + enforce_sorted=False) + + # TODO: Don't I need to stop grad at some point now? + return (padded_audio_signals, audio_lengths, transcript_list, + packed_transcripts, transcript_lengths) + + +class AudioToTextDataLayer: + """Data layer with data loader + """ + + def __init__(self, **kwargs): + featurizer_config = kwargs['featurizer_config'] + pad_to_max = kwargs.get('pad_to_max', False) + perturb_config = kwargs.get('perturb_config', None) + manifest_filepath = kwargs['manifest_filepath'] + dataset_dir = kwargs['dataset_dir'] + labels = kwargs['labels'] + batch_size = kwargs['batch_size'] + drop_last = kwargs.get('drop_last', False) + shuffle = kwargs.get('shuffle', True) + min_duration = featurizer_config.get('min_duration', 0.1) + max_duration = featurizer_config.get('max_duration', None) + normalize_transcripts = kwargs.get('normalize_transcripts', True) + trim_silence = kwargs.get('trim_silence', False) + sampler_type = kwargs.get('sampler', 'default') + speed_perturbation = featurizer_config.get('speed_perturbation', False) + sort_by_duration = sampler_type == 'bucket' + self._featurizer = WaveformFeaturizer.from_config( + featurizer_config, perturbation_configs=perturb_config) + self._dataset = AudioDataset( + dataset_dir=dataset_dir, + manifest_filepath=manifest_filepath, + labels=labels, blank_index=len(labels), + sort_by_duration=sort_by_duration, + pad_to_max=pad_to_max, + featurizer=self._featurizer, max_duration=max_duration, + min_duration=min_duration, normalize=normalize_transcripts, + trim=trim_silence, speed_perturbation=speed_perturbation) + + print('sort_by_duration', sort_by_duration) + + self._dataloader = torch.utils.data.DataLoader( + dataset=self._dataset, + batch_size=batch_size, + collate_fn=lambda b: seq_collate_fn(b), + drop_last=drop_last, + shuffle=shuffle, + num_workers=0, + pin_memory=True, + sampler=None + ) + + def __len__(self): + return len(self._dataset) + + @property + def data_iterator(self): + return self._dataloader + + +class AudioDataset(Dataset): + def __init__(self, dataset_dir, manifest_filepath, labels, featurizer, max_duration=None, pad_to_max=False, + min_duration=None, blank_index=0, max_utts=0, normalize=True, sort_by_duration=False, + trim=False, speed_perturbation=False): + """Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations + (in seconds). Each entry is a different audio sample. + Args: + dataset_dir: absolute path to dataset folder + manifest_filepath: relative path from dataset folder to manifest json as described above. + labels: String containing all the possible characters to map to + featurizer: Initialized featurizer class that converts paths of audio to feature tensors + max_duration: If audio exceeds this length, do not include in dataset + min_duration: If audio is less than this length, do not include in dataset + pad_to_max: if specified input sequences into dnn model will be padded to max_duration + blank_index: blank index for ctc loss / decoder + max_utts: Limit number of utterances + normalize: whether to normalize transcript text + sort_by_duration: whether or not to sort sequences by increasing duration + trim: if specified trims leading and trailing silence from an audio signal. + speed_perturbation: specify if using data contains speed perburbation + """ + m_paths = [manifest_filepath] + self.manifest = Manifest(dataset_dir, m_paths, labels, blank_index, pad_to_max=pad_to_max, + max_duration=max_duration, + sort_by_duration=sort_by_duration, + min_duration=min_duration, max_utts=max_utts, + normalize=normalize, speed_perturbation=speed_perturbation) + self.featurizer = featurizer + self.blank_index = blank_index + self.trim = trim + print( + "Dataset loaded with {0:.2f} hours. Filtered {1:.2f} hours.".format( + self.manifest.duration / 3600, + self.manifest.filtered_duration / 3600)) + + def __getitem__(self, index): + sample = self.manifest[index] + rn_indx = np.random.randint(len(sample['audio_filepath'])) + duration = sample['audio_duration'][rn_indx] if 'audio_duration' in sample else 0 + offset = sample['offset'] if 'offset' in sample else 0 + features = self.featurizer.process(sample['audio_filepath'][rn_indx], + offset=offset, duration=duration, + trim=self.trim) + + AudioSample = namedtuple('AudioSample', ['waveform', + 'transcript']) + return AudioSample(features, + torch.LongTensor(sample["transcript"])) + + def __len__(self): + return len(self.manifest) diff --git a/benchmarks/rnnt/ootb/inference/pytorch/decoders.py b/benchmarks/rnnt/ootb/inference/pytorch/decoders.py new file mode 100644 index 0000000..7f6d405 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/decoders.py @@ -0,0 +1,122 @@ +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple + +import torch + +import torch.nn.functional as F +from model_separable_rnnt import label_collate + + +class ScriptGreedyDecoder(torch.nn.Module): + """A greedy transducer decoder. + + Args: + blank_symbol: See `Decoder`. + model: Model to use for prediction. + max_symbols_per_step: The maximum number of symbols that can be added + to a sequence in a single time step; if set to None then there is + no limit. + cutoff_prob: Skip to next step in search if current highest character + probability is less than this. + """ + + def __init__(self, blank_index, model, max_symbols_per_step=30): + super().__init__() + assert isinstance(model, torch.jit.ScriptModule) + # assert not model.training + self.eval() + self._model = model + self._blank_id = blank_index + self._SOS = -1 + assert max_symbols_per_step > 0 + self._max_symbols_per_step = max_symbols_per_step + + @torch.jit.export + def forward(self, x: torch.Tensor, out_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, List[List[int]]]: + """Returns a list of sentences given an input batch. + + Args: + x: A tensor of size (batch, channels, features, seq_len) + TODO was (seq_len, batch, in_features). + out_lens: list of int representing the length of each sequence + output sequence. + + Returns: + list containing batch number of sentences (strings). + """ + # Apply optional preprocessing + + logits, logits_lens = self._model.encoder(x, out_lens) + + output: List[List[int]] = [] + for batch_idx in range(logits.size(0)): + inseq = logits[batch_idx, :, :].unsqueeze(1) + # inseq: TxBxF + logitlen = logits_lens[batch_idx] + sentence = self._greedy_decode(inseq, logitlen) + output.append(sentence) + + return logits, logits_lens, output + + def _greedy_decode(self, x: torch.Tensor, out_len: torch.Tensor) -> List[int]: + hidden: Optional[Tuple[torch.Tensor, torch.Tensor]] = None + label: List[int] = [] + for time_idx in range(int(out_len.item())): + f = x[time_idx, :, :].unsqueeze(0) + + not_blank = True + symbols_added = 0 + + while not_blank and symbols_added < self._max_symbols_per_step: + g, hidden_prime = self._pred_step( + self._get_last_symb(label), + hidden + ) + logp = self._joint_step(f, g, log_normalize=False)[0, :] + + # get index k, of max prob + v, k = logp.max(0) + k = k.item() + + if k == self._blank_id: + not_blank = False + else: + label.append(k) + hidden = hidden_prime + symbols_added += 1 + + return label + + def _pred_step(self, label: int, hidden: Optional[Tuple[torch.Tensor, torch.Tensor]]) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + if label == self._SOS: + return self._model.prediction(None, hidden) + if label > self._blank_id: + label -= 1 + label = torch.tensor([[label]], dtype=torch.int64) + return self._model.prediction(label, hidden) + + def _joint_step(self, enc: torch.Tensor, pred: torch.Tensor, log_normalize: bool=False) -> torch.Tensor: + logits = self._model.joint(enc, pred)[:, 0, 0, :] + if not log_normalize: + return logits + + probs = F.log_softmax(logits, dim=len(logits.shape) - 1) + + return probs + + def _get_last_symb(self, labels: List[int]) -> int: + return self._SOS if len(labels) == 0 else labels[-1] diff --git a/benchmarks/rnnt/ootb/inference/pytorch/helpers.py b/benchmarks/rnnt/ootb/inference/pytorch/helpers.py new file mode 100644 index 0000000..cfe3b66 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/helpers.py @@ -0,0 +1,123 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from metrics import word_error_rate + + +class Optimization(Enum): + """Various levels of Optimization. + WARNING: This might have effect on model accuracy.""" + nothing = 0 + mxprO0 = 1 + mxprO1 = 2 + mxprO2 = 3 + mxprO3 = 4 + + +AmpOptimizations = {Optimization.mxprO0: "O0", + Optimization.mxprO1: "O1", + Optimization.mxprO2: "O2", + Optimization.mxprO3: "O3"} + + +def add_blank_label(labels): + if not isinstance(labels, list): + raise ValueError("labels must be a list of symbols") + labels.append("") + return labels + + +def __rnnt_decoder_predictions_tensor(tensor, labels): + """ + Takes output of greedy rnnt decoder and converts to strings. + Args: + tensor: model output tensor + label: A list of labels + Returns: + prediction + """ + hypotheses = [] + labels_map = dict([(i, labels[i]) for i in range(len(labels))]) + # iterate over batch + for ind in range(len(tensor)): + hypothesis = ''.join([labels_map[c] for c in tensor[ind]]) + hypotheses.append(hypothesis) + return hypotheses + + +def __gather_predictions(predictions_list: list, labels: list) -> list: + results = [] + for prediction in predictions_list: + results += __rnnt_decoder_predictions_tensor(prediction, labels=labels) + return results + + +def __gather_transcripts(transcript_list: list, transcript_len_list: list, + labels: list) -> list: + results = [] + labels_map = dict([(i, labels[i]) for i in range(len(labels))]) + for i, t in enumerate(transcript_list): + target = t.numpy().tolist() + reference = ''.join([labels_map[c] for c in target]) + results.append(reference) + return results + + +def process_evaluation_batch(tensors: dict, global_vars: dict, labels: list): + """ + Processes results of an iteration and saves it in global_vars + Args: + tensors: dictionary with results of an evaluation iteration, e.g. loss, predictions, transcript, and output + global_vars: dictionary where processes results of iteration are saved + labels: A list of labels + """ + for kv, v in tensors.items(): + if kv.startswith('predictions'): + global_vars['predictions'] += __gather_predictions( + v, labels=labels) + elif kv.startswith('transcript_length'): + transcript_len_list = v + elif kv.startswith('transcript'): + transcript_list = v + + global_vars['transcripts'] += __gather_transcripts(transcript_list, + transcript_len_list, + labels=labels) + + +def process_evaluation_epoch(global_vars: dict, tag=None): + """ + Processes results from each worker at the end of evaluation and combine to final result + Args: + global_vars: dictionary containing information of entire evaluation + Return: + wer: final word error rate + loss: final loss + """ + hypotheses = global_vars['predictions'] + references = global_vars['transcripts'] + + wer, scores, num_words = word_error_rate( + hypotheses=hypotheses, references=references) + return wer + + +def print_dict(d): + maxLen = max([len(ii) for ii in d.keys()]) + fmtString = '\t%' + str(maxLen) + 's : %s' + print('Arguments:') + for keyPair in sorted(d.items()): + print(fmtString % keyPair) diff --git a/benchmarks/rnnt/ootb/inference/pytorch/metrics.py b/benchmarks/rnnt/ootb/inference/pytorch/metrics.py new file mode 100644 index 0000000..5426e37 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/metrics.py @@ -0,0 +1,67 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + + +def __levenshtein(a: List, b: List) -> int: + """Calculates the Levenshtein distance between a and b. + """ + n, m = len(a), len(b) + if n > m: + # Make sure n <= m, to use O(min(n,m)) space + a, b = b, a + n, m = m, n + + current = list(range(n + 1)) + for i in range(1, m + 1): + previous, current = current, [i] + [0] * n + for j in range(1, n + 1): + add, delete = previous[j] + 1, current[j - 1] + 1 + change = previous[j - 1] + if a[j - 1] != b[i - 1]: + change = change + 1 + current[j] = min(add, delete, change) + + return current[n] + + +def word_error_rate(hypotheses: List[str], references: List[str]) -> float: + """ + Computes Average Word Error rate between two texts represented as + corresponding lists of string. Hypotheses and references must have same length. + + Args: + hypotheses: list of hypotheses + references: list of references + + Returns: + (float) average word error rate + """ + scores = 0 + words = 0 + if len(hypotheses) != len(references): + raise ValueError("In word error rate calculation, hypotheses and reference" + " lists must have the same number of elements. But I got:" + "{0} and {1} correspondingly".format(len(hypotheses), len(references))) + for h, r in zip(hypotheses, references): + h_list = h.split() + r_list = r.split() + words += len(r_list) + scores += __levenshtein(h_list, r_list) + if words != 0: + wer = (1.0 * scores) / words + else: + wer = float('inf') + return wer, scores, words diff --git a/benchmarks/rnnt/ootb/inference/pytorch/model_separable_rnnt.py b/benchmarks/rnnt/ootb/inference/pytorch/model_separable_rnnt.py new file mode 100644 index 0000000..68a0ed6 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/model_separable_rnnt.py @@ -0,0 +1,214 @@ +from typing import Optional, Tuple + +import numpy as np +import torch + +from rnn import rnn +from rnn import StackTime + + +class RNNT(torch.nn.Module): + def __init__(self, rnnt=None, num_classes=1, **kwargs): + super().__init__() + if kwargs.get("no_featurizer", False): + in_features = kwargs.get("in_features") + else: + feat_config = kwargs.get("feature_config") + # This may be useful in the future, for MLPerf + # configuration. + in_features = feat_config['features'] * \ + feat_config.get("frame_splicing", 1) + + self.encoder = Encoder(in_features, + rnnt["encoder_n_hidden"], + rnnt["encoder_pre_rnn_layers"], + rnnt["encoder_post_rnn_layers"], + rnnt["forget_gate_bias"], + None if "norm" not in rnnt else rnnt["norm"], + rnnt["rnn_type"], + rnnt["encoder_stack_time_factor"], + rnnt["dropout"], + ) + + self.prediction = Prediction( + num_classes, + rnnt["pred_n_hidden"], + rnnt["pred_rnn_layers"], + rnnt["forget_gate_bias"], + None if "norm" not in rnnt else rnnt["norm"], + rnnt["rnn_type"], + rnnt["dropout"], + ) + + self.joint = Joint( + num_classes, + rnnt["pred_n_hidden"], + rnnt["encoder_n_hidden"], + rnnt["joint_n_hidden"], + rnnt["dropout"], + ) + + def forward(self, x_padded: torch.Tensor, x_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + return self.encoder(x_padded, x_lens) + + +class Encoder(torch.nn.Module): + def __init__(self, in_features, encoder_n_hidden, + encoder_pre_rnn_layers, encoder_post_rnn_layers, + forget_gate_bias, norm, rnn_type, encoder_stack_time_factor, + dropout): + super().__init__() + self.pre_rnn = rnn( + rnn=rnn_type, + input_size=in_features, + hidden_size=encoder_n_hidden, + num_layers=encoder_pre_rnn_layers, + norm=norm, + forget_gate_bias=forget_gate_bias, + dropout=dropout, + ) + self.stack_time = StackTime(factor=encoder_stack_time_factor) + self.post_rnn = rnn( + rnn=rnn_type, + input_size=encoder_stack_time_factor * encoder_n_hidden, + hidden_size=encoder_n_hidden, + num_layers=encoder_post_rnn_layers, + norm=norm, + forget_gate_bias=forget_gate_bias, + norm_first_rnn=True, + dropout=dropout, + ) + + def forward(self, x_padded: torch.Tensor, x_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + x_padded, _ = self.pre_rnn(x_padded, None) + x_padded, x_lens = self.stack_time(x_padded, x_lens) + # (T, B, H) + x_padded, _ = self.post_rnn(x_padded, None) + # (B, T, H) + x_padded = x_padded.transpose(0, 1) + return x_padded, x_lens + +class Prediction(torch.nn.Module): + def __init__(self, vocab_size, n_hidden, pred_rnn_layers, + forget_gate_bias, norm, rnn_type, dropout): + super().__init__() + self.embed = torch.nn.Embedding(vocab_size - 1, n_hidden) + self.n_hidden = n_hidden + self.dec_rnn = rnn( + rnn=rnn_type, + input_size=n_hidden, + hidden_size=n_hidden, + num_layers=pred_rnn_layers, + norm=norm, + forget_gate_bias=forget_gate_bias, + dropout=dropout, + ) + + def forward(self, y: Optional[torch.Tensor], + state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + B - batch size + U - label length + H - Hidden dimension size + L - Number of decoder layers = 2 + + Args: + y: (B, U) + + Returns: + Tuple (g, hid) where: + g: (B, U + 1, H) + hid: (h, c) where h is the final sequence hidden state and c is + the final cell state: + h (tensor), shape (L, B, H) + c (tensor), shape (L, B, H) + """ + if y is None: + # This is gross. I should really just pass in an SOS token + # instead. Is there no SOS token? + assert state is None + # Hacky, no way to determine this right now! + B = 1 + y = torch.zeros((B, 1, self.n_hidden), dtype=torch.float32) + else: + y = self.embed(y) + + # if state is None: + # batch = y.size(0) + # state = [ + # (torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device), + # torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device)) + # for _ in range(self.pred_rnn_layers) + # ] + + y = y.transpose(0, 1) # .contiguous() # (U + 1, B, H) + g, hid = self.dec_rnn(y, state) + g = g.transpose(0, 1) # .contiguous() # (B, U + 1, H) + # del y, state + return g, hid + +class Joint(torch.nn.Module): + def __init__(self, vocab_size, pred_n_hidden, enc_n_hidden, + joint_n_hidden, dropout): + super().__init__() + layers = [ + torch.nn.Linear(pred_n_hidden + enc_n_hidden, joint_n_hidden), + torch.nn.ReLU(), + ] + ([torch.nn.Dropout(p=dropout), ] if dropout else []) + [ + torch.nn.Linear(joint_n_hidden, vocab_size) + ] + self.net = torch.nn.Sequential( + *layers + ) + + def forward(self, f: torch.Tensor, g: torch.Tensor): + """ + f should be shape (B, T, H) + g should be shape (B, U + 1, H) + + returns: + logits of shape (B, T, U, K + 1) + """ + # Combine the input states and the output states + B, T, H = f.shape + B, U_, H2 = g.shape + + f = f.unsqueeze(dim=2) # (B, T, 1, H) + f = f.expand((B, T, U_, H)) + + g = g.unsqueeze(dim=1) # (B, 1, U + 1, H) + g = g.expand((B, T, U_, H2)) + + inp = torch.cat([f, g], dim=3) # (B, T, U, 2H) + res = self.net(inp) + # del f, g, inp + return res + +def label_collate(labels): + """Collates the label inputs for the rnn-t prediction network. + + If `labels` is already in torch.Tensor form this is a no-op. + + Args: + labels: A torch.Tensor List of label indexes or a torch.Tensor. + + Returns: + A padded torch.Tensor of shape (batch, max_seq_len). + """ + + if isinstance(labels, torch.Tensor): + return labels.type(torch.int64) + if not isinstance(labels, (list, tuple)): + raise ValueError( + f"`labels` should be a list or tensor not {type(labels)}" + ) + + batch_size = len(labels) + max_len = max(len(l) for l in labels) + + cat_labels = np.full((batch_size, max_len), fill_value=0.0, dtype=np.int32) + for e, l in enumerate(labels): + cat_labels[e, :len(l)] = l + labels = torch.LongTensor(cat_labels) + + return labels diff --git a/benchmarks/rnnt/ootb/inference/pytorch/preprocessing.py b/benchmarks/rnnt/ootb/inference/pytorch/preprocessing.py new file mode 100644 index 0000000..5818854 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/preprocessing.py @@ -0,0 +1,39 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +import torch +import torch.nn as nn + +from helpers import Optimization +from parts.features import FeatureFactory + + +class AudioPreprocessing(nn.Module): + """GPU accelerated audio preprocessing + """ + + def __init__(self, **kwargs): + nn.Module.__init__(self) # For PyTorch API + self.optim_level = kwargs.get( + 'optimization_level', Optimization.nothing) + self.featurizer = FeatureFactory.from_config(kwargs) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + input_signal, length = x + length.requires_grad_(False) + processed_signal = self.featurizer(x) + processed_length = self.featurizer.get_seq_len(length) + return processed_signal, processed_length diff --git a/benchmarks/rnnt/ootb/inference/pytorch/rnn.py b/benchmarks/rnnt/ootb/inference/pytorch/rnn.py new file mode 100644 index 0000000..39e2121 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/rnn.py @@ -0,0 +1,109 @@ +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from typing import Optional, Tuple + + +def rnn(rnn, input_size, hidden_size, num_layers, norm=None, + forget_gate_bias=1.0, dropout=0.0, **kwargs): + """TODO""" + if rnn != "lstm": + raise ValueError(f"Unknown rnn={rnn}") + if norm not in [None]: + raise ValueError(f"unknown norm={norm}") + + if rnn == "lstm": + return LstmDrop( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + dropout=dropout, + forget_gate_bias=forget_gate_bias, + **kwargs + ) + + +class LstmDrop(torch.nn.Module): + + def __init__(self, input_size, hidden_size, num_layers, dropout, forget_gate_bias, + **kwargs): + """Returns an LSTM with forget gate bias init to `forget_gate_bias`. + + Args: + input_size: See `torch.nn.LSTM`. + hidden_size: See `torch.nn.LSTM`. + num_layers: See `torch.nn.LSTM`. + dropout: See `torch.nn.LSTM`. + forget_gate_bias: For each layer and each direction, the total value of + to initialise the forget gate bias to. + + Returns: + A `torch.nn.LSTM`. + """ + super(LstmDrop, self).__init__() + + self.lstm = torch.nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + dropout=dropout, + ) + if forget_gate_bias is not None: + for name, v in self.lstm.named_parameters(): + if "bias_ih" in name: + bias = getattr(self.lstm, name) + bias.data[hidden_size:2 * hidden_size].fill_(forget_gate_bias) + if "bias_hh" in name: + bias = getattr(self.lstm, name) + bias.data[hidden_size:2 * hidden_size].fill_(0) + + if dropout: + self.inplace_dropout = torch.nn.Dropout(dropout, inplace=True) + else: + self.inplace_droput = None + + def forward(self, x: torch.Tensor, + h: Optional[Tuple[torch.Tensor, torch.Tensor]] = None): + x, h = self.lstm(x, h) + + if self.inplace_dropout is not None: + self.inplace_dropout(x.data) + + return x, h + + +class StackTime(torch.nn.Module): + + __constants__ = ["factor"] + + def __init__(self, factor): + super().__init__() + self.factor = int(factor) + + def forward(self, x, x_lens): + # T, B, U + r = torch.transpose(x, 0, 1) + s = r.shape + zeros = torch.zeros( + s[0], (-s[1]) % self.factor, s[2], dtype=r.dtype, device=r.device) + r = torch.cat([r, zeros], 1) + s = r.shape + rs = [s[0], s[1] // self.factor, s[2] * self.factor] + r = torch.reshape(r, rs) + rt = torch.transpose(r, 0, 1) + x_lens = torch.ceil(x_lens.float() / self.factor).int() + return rt, x_lens diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/build.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/build.sh new file mode 100755 index 0000000..cfdc97c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker build . --rm -t jasper \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/launch.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/launch.sh new file mode 100755 index 0000000..5c9c6a3 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/docker/launch.sh @@ -0,0 +1,32 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/bash + +DATA_DIR=$1 +CHECKPOINT_DIR=$2 +RESULT_DIR=$3 + +docker run -it --rm \ + --gpus='"device=1"' \ + --shm-size=4g \ + --ulimit memlock=-1 \ + --ulimit stack=67108864 \ + -v "$DATA_DIR":/datasets \ + -v "$CHECKPOINT_DIR":/checkpoints/ \ + -v "$RESULT_DIR":/results/ \ + -v $PWD:/code \ + -v $PWD:/workspace/jasper \ + mlperf-rnnt-ref bash diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/download_librispeech.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/download_librispeech.sh new file mode 100755 index 0000000..ee322fe --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/download_librispeech.sh @@ -0,0 +1,28 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env bash + +DATA_SET="LibriSpeech" +DATA_ROOT_DIR="/datasets" +DATA_DIR="${DATA_ROOT_DIR}/${DATA_SET}" +if [ ! -d "$DATA_DIR" ] +then + mkdir $DATA_DIR + chmod go+rx $DATA_DIR + python utils/download_librispeech.py utils/librispeech.csv $DATA_DIR -e ${DATA_ROOT_DIR}/ +else + echo "Directory $DATA_DIR already exists." +fi diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/evaluation.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/evaluation.sh new file mode 100755 index 0000000..fcd472f --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/evaluation.sh @@ -0,0 +1,92 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/bash +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +DATA_DIR=${1:-"/datasets/LibriSpeech"} +DATASET=${2:-"dev-clean"} +MODEL_CONFIG=${3:-"configs/jasper10x5dr_sp_offline_specaugment.toml"} +RESULT_DIR=${4:-"/results"} +CHECKPOINT=$5 +CREATE_LOGFILE=${6:-"true"} +CUDNN_BENCHMARK=${7:-"false"} +NUM_GPUS=${8:-1} +PRECISION=${9:-"fp32"} +NUM_STEPS=${10:-"-1"} +SEED=${11:-0} +BATCH_SIZE=${12:-64} + + +if [ "$CREATE_LOGFILE" = "true" ] ; then + export GBS=$(expr $BATCH_SIZE \* $NUM_GPUS) + printf -v TAG "jasper_evaluation_${DATASET}_%s_gbs%d" "$PRECISION" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE="${RESULT_DIR}/${TAG}.${DATESTAMP}.log" + printf "Logs written to %s\n" "$LOGFILE" +fi + + + +PREC="" +if [ "$PRECISION" = "fp16" ] ; then + PREC="--fp16" +elif [ "$PRECISION" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +STEPS="" +if [ "$NUM_STEPS" -gt 0 ] ; then + STEPS=" --steps $NUM_STEPS" +fi + +if [ "$CUDNN_BENCHMARK" = "true" ] ; then + CUDNN_BENCHMARK=" --cudnn_benchmark" +else + CUDNN_BENCHMARK="" +fi + + +CMD=" inference.py " +CMD+=" --batch_size $BATCH_SIZE " +CMD+=" --dataset_dir $DATA_DIR " +CMD+=" --val_manifest $DATA_DIR/librispeech-${DATASET}-wav.json " +CMD+=" --model_toml $MODEL_CONFIG " +CMD+=" --seed $SEED " +CMD+=" --ckpt $CHECKPOINT " +CMD+=" $CUDNN_BENCHMARK" +CMD+=" $PREC " +CMD+=" $STEPS " + + +if [ "$NUM_GPUS" -gt 1 ] ; then + CMD="python3 -m torch.distributed.launch --nproc_per_node=$NUM_GPUS $CMD" +else + CMD="python3 $CMD" +fi + + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee "$LOGFILE" +fi +set +x diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/inference.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/inference.sh new file mode 100755 index 0000000..2d4474c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/inference.sh @@ -0,0 +1,104 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/bash +echo "Container nvidia build = " $NVIDIA_BUILD_ID + + +DATA_DIR=${1-"/datasets/LibriSpeech"} +DATASET=${2:-"dev-clean"} +MODEL_CONFIG=${3:-"configs/jasper10x5dr_sp_offline_specaugment.toml"} +RESULT_DIR=${4:-"/results"} +CHECKPOINT=$5 +CREATE_LOGFILE=${6:-"true"} +CUDNN_BENCHMARK=${7:-"false"} +PRECISION=${8:-"fp32"} +NUM_STEPS=${9:-"-1"} +SEED=${10:-0} +BATCH_SIZE=${11:-64} +MODELOUTPUT_FILE=${12:-"none"} +PREDICTION_FILE=${13:-"$RESULT_DIR/${DATASET}.predictions"} + +if [ "$CREATE_LOGFILE" = "true" ] ; then + export GBS=$(expr $BATCH_SIZE) + printf -v TAG "jasper_inference_${DATASET}_%s_gbs%d" "$PRECISION" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE="${RESULT_DIR}/${TAG}.${DATESTAMP}.log" + printf "Logs written to %s\n" "$LOGFILE" +fi + + + +PREC="" +if [ "$PRECISION" = "fp16" ] ; then + PREC="--fp16" +elif [ "$PRECISION" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +PRED="" +if [ "$PREDICTION_FILE" = "none" ] ; then + PRED="" +else + PRED=" --save_prediction $PREDICTION_FILE" +fi + +OUTPUT="" +if [ "$MODELOUTPUT_FILE" = "none" ] ; then + OUTPUT=" " +else + OUTPUT=" --logits_save_to $MODELOUTPUT_FILE" +fi + + +if [ "$CUDNN_BENCHMARK" = "true" ]; then + CUDNN_BENCHMARK=" --cudnn_benchmark" +else + CUDNN_BENCHMARK="" +fi + +STEPS="" +if [ "$NUM_STEPS" -gt 0 ] ; then + STEPS=" --steps $NUM_STEPS" +fi + +CMD=" python inference.py " +CMD+=" --batch_size $BATCH_SIZE " +CMD+=" --dataset_dir $DATA_DIR " +CMD+=" --val_manifest $DATA_DIR/librispeech-${DATASET}-wav.json " +CMD+=" --model_toml $MODEL_CONFIG " +CMD+=" --seed $SEED " +CMD+=" --ckpt $CHECKPOINT " +CMD+=" $CUDNN_BENCHMARK" +CMD+=" $PRED " +CMD+=" $OUTPUT " +CMD+=" $PREC " +CMD+=" $STEPS " + + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee "$LOGFILE" +fi +set +x +echo "MODELOUTPUT_FILE: ${MODELOUTPUT_FILE}" +echo "PREDICTION_FILE: ${PREDICTION_FILE}" diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/inference_benchmark.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/inference_benchmark.sh new file mode 100755 index 0000000..7aeea84 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/inference_benchmark.sh @@ -0,0 +1,84 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/bash + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + + +DATA_DIR=${1:-"/datasets/LibriSpeech"} +DATASET=${2:-"dev-clean"} +MODEL_CONFIG=${3:-"configs/jasper10x5dr_sp_offline_specaugment.toml"} +RESULT_DIR=${4:-"/results"} +CHECKPOINT=$5 +CREATE_LOGFILE=${6:-"true"} +CUDNN_BENCHMARK=${7:-"true"} +PRECISION=${8:-"fp32"} +NUM_STEPS=${9:-"-1"} +MAX_DURATION=${10:-"36"} +SEED=${11:-0} +BATCH_SIZE=${12:-64} + +PREC="" +if [ "$PRECISION" = "fp16" ] ; then + PREC="--fp16" +elif [ "$PRECISION" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi +STEPS="" +if [ "$NUM_STEPS" -gt 0 ] ; then + STEPS=" --steps $NUM_STEPS" +fi +if [ "$CUDNN_BENCHMARK" = "true" ] ; then + CUDNN_BENCHMARK=" --cudnn_benchmark" +else + CUDNN_BENCHMARK="" +fi + +CMD=" python inference_benchmark.py" +CMD+=" --batch_size=$BATCH_SIZE" +CMD+=" --model_toml=$MODEL_CONFIG" +CMD+=" --seed=$SEED" +CMD+=" --dataset_dir=$DATA_DIR" +CMD+=" --val_manifest $DATA_DIR/librispeech-${DATASET}-wav.json " +CMD+=" --ckpt=$CHECKPOINT" +CMD+=" --max_duration=$MAX_DURATION" +CMD+=" --pad_to=-1" +CMD+=" $CUDNN_BENCHMARK" +CMD+=" $PREC" +CMD+=" $STEPS" + + +if [ "$CREATE_LOGFILE" = "true" ] ; then + export GBS=$(expr $BATCH_SIZE ) + printf -v TAG "jasper_inference_benchmark_%s_gbs%d" "$PRECISION" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE="${RESULT_DIR}/${TAG}.${DATESTAMP}.log" + printf "Logs written to %s\n" "$LOGFILE" +fi + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee "$LOGFILE" + grep 'latency' "$LOGFILE" +fi +set +x diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/preprocess_librispeech.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/preprocess_librispeech.sh new file mode 100755 index 0000000..7cfe5cc --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/preprocess_librispeech.sh @@ -0,0 +1,51 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/train-clean-100 \ + --dest_dir /datasets/LibriSpeech/train-clean-100-wav \ + --output_json /datasets/LibriSpeech/librispeech-train-clean-100-wav.json \ + --speed 0.9 1.1 +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/train-clean-360 \ + --dest_dir /datasets/LibriSpeech/train-clean-360-wav \ + --output_json /datasets/LibriSpeech/librispeech-train-clean-360-wav.json \ + --speed 0.9 1.1 +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/train-other-500 \ + --dest_dir /datasets/LibriSpeech/train-other-500-wav \ + --output_json /datasets/LibriSpeech/librispeech-train-other-500-wav.json \ + --speed 0.9 1.1 + + +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/dev-clean \ + --dest_dir /datasets/LibriSpeech/dev-clean-wav \ + --output_json /datasets/LibriSpeech/librispeech-dev-clean-wav.json +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/dev-other \ + --dest_dir /datasets/LibriSpeech/dev-other-wav \ + --output_json /datasets/LibriSpeech/librispeech-dev-other-wav.json + + +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/test-clean \ + --dest_dir /datasets/LibriSpeech/test-clean-wav \ + --output_json /datasets/LibriSpeech/librispeech-test-clean-wav.json +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/test-other \ + --dest_dir /datasets/LibriSpeech/test-other-wav \ + --output_json /datasets/LibriSpeech/librispeech-test-other-wav.json diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/train.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/train.sh new file mode 100755 index 0000000..d59ce8e --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/train.sh @@ -0,0 +1,113 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/bash +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +DATA_DIR=${1:-"/datasets/LibriSpeech"} +MODEL_CONFIG=${2:-"configs/rnnt.toml"} +RESULT_DIR=${3:-"/results"} +CHECKPOINT=${4:-"none"} +CREATE_LOGFILE=${5:-"true"} +CUDNN_BENCHMARK=${6:-"true"} +NUM_GPUS=${7:-8} +PRECISION=${8:-"fp16"} +EPOCHS=${9:-100} +SEED=${10:-6} +BATCH_SIZE=${11:-8} +EVAL_BATCH_SIZE=${11:-2} +LEARNING_RATE=${12:-"0.001"} +LEARNING_RATE_WARMUP=${12:-"8000"} +GRADIENT_ACCUMULATION_STEPS=${13:-1} +LAUNCH_OPT=${LAUNCH_OPT:-"none"} + + +PREC="" +if [ "$PRECISION" = "fp16" ] ; then + PREC="--fp16" +elif [ "$PRECISION" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +CUDNN="" +if [ "$CUDNN_BENCHMARK" = "true" ] && [ "$PRECISION" = "fp16" ]; then + CUDNN=" --cudnn" +else + CUDNN="" +fi + + + +if [ "$CHECKPOINT" = "none" ] ; then + CHECKPOINT="" +else + CHECKPOINT=" --ckpt=${CHECKPOINT}" +fi + + +CMD=" train.py" +CMD+=" --batch_size=$BATCH_SIZE" +CMD+=" --eval_batch_size=$EVAL_BATCH_SIZE" +CMD+=" --num_epochs=$EPOCHS" +CMD+=" --output_dir=$RESULT_DIR" +CMD+=" --model_toml=$MODEL_CONFIG" +CMD+=" --lr=$LEARNING_RATE" +CMD+=" --lr_warmup=$LEARNING_RATE_WARMUP" +CMD+=" --seed=$SEED" +CMD+=" --optimizer=adam" +CMD+=" --dataset_dir=$DATA_DIR" +CMD+=" --val_manifest=$DATA_DIR/librispeech-dev-clean-wav.json" +CMD+=" --train_manifest=$DATA_DIR/librispeech-train-clean-100-wav.json,$DATA_DIR/librispeech-train-clean-360-wav.json,$DATA_DIR/librispeech-train-other-500-wav.json" +CMD+=" --weight_decay=1e-3" +CMD+=" --save_freq=100" +CMD+=" --eval_freq=1" +CMD+=" --train_freq=250" +CMD+=" --lr_decay" +CMD+=" --gradient_accumulation_steps=$GRADIENT_ACCUMULATION_STEPS " +CMD+=" $CHECKPOINT" +CMD+=" $PREC" +CMD+=" $CUDNN" + + +if [ "${LAUNCH_OPT}" != "none" ]; then + CMD="python -m $LAUNCH_OPT $CMD" +elif [ "$NUM_GPUS" -gt 1 ] ; then + CMD="python3 -m multiproc --nproc_per_node=$NUM_GPUS $CMD" +else + CMD="python3 $CMD" +fi + + +if [ "$CREATE_LOGFILE" = "true" ] ; then + export GBS=$(expr $BATCH_SIZE \* $NUM_GPUS) + printf -v TAG "rnnt_train_%s_gbs%d" "$PRECISION" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE=$RESULT_DIR/$TAG.$DATESTAMP.log + printf "Logs written to %s\n" "$LOGFILE" +fi + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee $LOGFILE +fi +set +x diff --git a/benchmarks/rnnt/ootb/inference/pytorch/scripts/train_benchmark.sh b/benchmarks/rnnt/ootb/inference/pytorch/scripts/train_benchmark.sh new file mode 100755 index 0000000..7b5a337 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/scripts/train_benchmark.sh @@ -0,0 +1,130 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +DATA_DIR=${1:-"/datasets/LibriSpeech"} +MODEL_CONFIG=${2:-"configs/jasper10x5dr_sp_offline_specaugment.toml"} +RESULT_DIR=${3:-"/results"} +CREATE_LOGFILE=${4:-"true"} +CUDNN_BENCHMARK=${5:-"true"} +NUM_GPUS=${6:-8} +PRECISION=${7:-"fp16"} +NUM_STEPS=${8:-"-1"} +MAX_DURATION=${9:-16.7} +SEED=${10:-0} +BATCH_SIZE=${11:-64} +LEARNING_RATE=${12:-"0.015"} +GRADIENT_ACCUMULATION_STEPS=${13:-1} +PRINT_FREQUENCY=${14:-1} + + +PREC="" +if [ "$PRECISION" = "fp16" ] ; then + PREC=" --fp16" +elif [ "$PRECISION" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +STEPS="" +if [ "$NUM_STEPS" -ne "-1" ] ; then + STEPS=" --num_steps=$NUM_STEPS" +elif [ "$NUM_STEPS" = "-1" ] ; then + STEPS="" +else + echo "Unknown argument" + exit -2 +fi + +CUDNN="" +if [ "$CUDNN_BENCHMARK" = "true" ] ; then + CUDNN=" --cudnn" +else + CUDNN="" +fi + + +CMD=" train.py" +CMD+=" --batch_size=$BATCH_SIZE" +CMD+=" --num_epochs=400" +CMD+=" --output_dir=$RESULT_DIR" +CMD+=" --model_toml=$MODEL_CONFIG" +CMD+=" --lr=$LEARNING_RATE" +CMD+=" --seed=$SEED" +CMD+=" --optimizer=novograd" +CMD+=" --gradient_accumulation_steps=$GRADIENT_ACCUMULATION_STEPS" +CMD+=" --dataset_dir=$DATA_DIR" +CMD+=" --val_manifest=$DATA_DIR/librispeech-dev-clean-wav.json" +CMD+=" --train_manifest=$DATA_DIR/librispeech-train-clean-100-wav.json,$DATA_DIR/librispeech-train-clean-360-wav.json,$DATA_DIR/librispeech-train-other-500-wav.json" +CMD+=" --weight_decay=1e-3" +CMD+=" --save_freq=100000" +CMD+=" --eval_freq=100000" +CMD+=" --max_duration=$MAX_DURATION" +CMD+=" --pad_to_max" +CMD+=" --train_freq=$PRINT_FREQUENCY" +CMD+=" --lr_decay" +CMD+=" $CUDNN" +CMD+=" $PREC" +CMD+=" $STEPS" + +if [ "$NUM_GPUS" -gt 1 ] ; then + CMD="python3 -m torch.distributed.launch --nproc_per_node=$NUM_GPUS $CMD" +else + CMD="python3 $CMD" +fi + + +if [ "$CREATE_LOGFILE" = "true" ] ; then + export GBS=$(expr $BATCH_SIZE \* $NUM_GPUS) + printf -v TAG "jasper_train_benchmark_%s_gbs%d" "$PRECISION" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE="${RESULT_DIR}/${TAG}.${DATESTAMP}.log" + printf "Logs written to %s\n" "$LOGFILE" + +fi + +if [ -z "$LOGFILE" ] ; then + + set -x + $CMD + set +x +else + + set -x + ( + $CMD + ) |& tee "$LOGFILE" + + set +x + + mean_latency=`cat "$LOGFILE" | grep 'Step time' | awk '{print $3}' | tail -n +2 | egrep -o '[0-9.]+'| awk 'BEGIN {total=0} {total+=$1} END {printf("%.2f\n",total/NR)}'` + mean_throughput=`python -c "print($BATCH_SIZE*$NUM_GPUS/${mean_latency})"` + training_wer_per_pgu=`cat "$LOGFILE" | grep 'training_batch_WER'| awk '{print $2}' | tail -n 1 | egrep -o '[0-9.]+'` + training_loss_per_pgu=`cat "$LOGFILE" | grep 'Loss@Step'| awk '{print $4}' | tail -n 1 | egrep -o '[0-9.]+'` + final_eval_wer=`cat "$LOGFILE" | grep 'Evaluation WER'| tail -n 1 | egrep -o '[0-9.]+'` + final_eval_loss=`cat "$LOGFILE" | grep 'Evaluation Loss'| tail -n 1 | egrep -o '[0-9.]+'` + + echo "max duration: $MAX_DURATION s" | tee -a "$LOGFILE" + echo "mean_latency: $mean_latency s" | tee -a "$LOGFILE" + echo "mean_throughput: $mean_throughput sequences/s" | tee -a "$LOGFILE" + echo "training_wer_per_pgu: $training_wer_per_pgu" | tee -a "$LOGFILE" + echo "training_loss_per_pgu: $training_loss_per_pgu" | tee -a "$LOGFILE" + echo "final_eval_loss: $final_eval_loss" | tee -a "$LOGFILE" + echo "final_eval_wer: $final_eval_wer" | tee -a "$LOGFILE" +fi diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/__init__.py b/benchmarks/rnnt/ootb/inference/pytorch/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/convert_librispeech.py b/benchmarks/rnnt/ootb/inference/pytorch/utils/convert_librispeech.py new file mode 100644 index 0000000..e90076c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/convert_librispeech.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import os +import glob +import multiprocessing +import json + +import pandas as pd + +from preprocessing_utils import parallel_preprocess + +parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.') +parser.add_argument('--input_dir', type=str, required=True, + help='LibriSpeech collection input dir') +parser.add_argument('--dest_dir', type=str, required=True, + help='Output dir') +parser.add_argument('--output_json', type=str, default='./', + help='name of the output json file.') +parser.add_argument('-s', '--speed', type=float, nargs='*', + help='Speed perturbation ratio') +parser.add_argument('--target_sr', type=int, default=None, + help='Target sample rate. ' + 'defaults to the input sample rate') +parser.add_argument('--overwrite', action='store_true', + help='Overwrite file if exists') +parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(), + help='Number of threads to use when processing audio files') +args = parser.parse_args() + +args.input_dir = args.input_dir.rstrip('/') +args.dest_dir = args.dest_dir.rstrip('/') + + +def build_input_arr(input_dir): + txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'), + recursive=True) + input_data = [] + for txt_file in txt_files: + rel_path = os.path.relpath(txt_file, input_dir) + with open(txt_file) as fp: + for line in fp: + fname, _, transcript = line.partition(' ') + input_data.append(dict(input_relpath=os.path.dirname(rel_path), + input_fname=fname + '.flac', + transcript=transcript)) + return input_data + + +print("[%s] Scaning input dir..." % args.output_json) +dataset = build_input_arr(input_dir=args.input_dir) + +print("[%s] Converting audio files..." % args.output_json) +dataset = parallel_preprocess(dataset=dataset, + input_dir=args.input_dir, + dest_dir=args.dest_dir, + target_sr=args.target_sr, + speed=args.speed, + overwrite=args.overwrite, + parallel=args.parallel) + +print("[%s] Generating json..." % args.output_json) +df = pd.DataFrame(dataset, dtype=object) + +# Save json with python. df.to_json() produces back slashed in file paths +dataset = df.to_dict(orient='records') +with open(args.output_json, 'w') as fp: + json.dump(dataset, fp, indent=2) diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/download_librispeech.py b/benchmarks/rnnt/ootb/inference/pytorch/utils/download_librispeech.py new file mode 100644 index 0000000..f7e5eda --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/download_librispeech.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import argparse +import pandas as pd + +from download_utils import download_file, md5_checksum, extract + +parser = argparse.ArgumentParser( + description='Download, verify and extract dataset files') +parser.add_argument('csv', type=str, + help='CSV file with urls and checksums to download.') +parser.add_argument('dest', type=str, + help='Download destnation folder.') +parser.add_argument('-e', type=str, default=None, + help='Extraction destnation folder. Defaults to download folder if not provided') +parser.add_argument('--skip_download', action='store_true', + help='Skip downloading the files') +parser.add_argument('--skip_checksum', action='store_true', + help='Skip checksum') +parser.add_argument('--skip_extract', action='store_true', + help='Skip extracting files') +args = parser.parse_args() +args.e = args.e or args.dest + + +df = pd.read_csv(args.csv, delimiter=',') + + +if not args.skip_download: + for url in df.url: + fname = url.split('/')[-1] + print("Downloading %s:" % fname) + download_file(url=url, dest_folder=args.dest, fname=fname) +else: + print("Skipping file download") + + +if not args.skip_checksum: + for index, row in df.iterrows(): + url = row['url'] + md5 = row['md5'] + fname = url.split('/')[-1] + fpath = os.path.join(args.dest, fname) + print("Verifing %s: " % fname, end='') + ret = md5_checksum(fpath=fpath, target_hash=md5) + if not ret: + raise ValueError(f"Checksum for {fname} failed!") + else: + print(f"Checksum correct for {fname}") +else: + print("Skipping checksum") + + +if not args.skip_extract: + for url in df.url: + fname = url.split('/')[-1] + fpath = os.path.join(args.dest, fname) + print("Decompressing %s:" % fpath) + extract(fpath=fpath, dest_folder=args.e) +else: + print("Skipping file extraction") diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/download_utils.py b/benchmarks/rnnt/ootb/inference/pytorch/utils/download_utils.py new file mode 100644 index 0000000..bda4193 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/download_utils.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import hashlib +import requests +import os +import tarfile +import tqdm + + +def download_file(url, dest_folder, fname, overwrite=False): + fpath = os.path.join(dest_folder, fname) + if os.path.isfile(fpath): + if overwrite: + print("Overwriting existing file") + else: + print("File exists, skipping download.") + return + + tmp_fpath = fpath + '.tmp' + + r = requests.get(url, stream=True) + file_size = int(r.headers['Content-Length']) + chunk_size = 1024 * 1024 # 1MB + total_chunks = int(file_size / chunk_size) + + with open(tmp_fpath, 'wb') as fp: + content_iterator = r.iter_content(chunk_size=chunk_size) + chunks = tqdm.tqdm(content_iterator, total=total_chunks, + unit='MB', desc=fpath, leave=True) + for chunk in chunks: + fp.write(chunk) + + os.rename(tmp_fpath, fpath) + + +def md5_checksum(fpath, target_hash): + file_hash = hashlib.md5() + with open(fpath, "rb") as fp: + for chunk in iter(lambda: fp.read(1024 * 1024), b""): + file_hash.update(chunk) + return file_hash.hexdigest() == target_hash + + +def extract(fpath, dest_folder): + if fpath.endswith('.tar.gz'): + mode = 'r:gz' + elif fpath.endswith('.tar'): + mode = 'r:' + else: + raise IOError('fpath has unknown extention: %s' % fpath) + + with tarfile.open(fpath, mode) as tar: + members = tar.getmembers() + for member in tqdm.tqdm(iterable=members, total=len(members), leave=True): + tar.extract(path=dest_folder, member=member) diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/inference_librispeech.csv b/benchmarks/rnnt/ootb/inference/pytorch/utils/inference_librispeech.csv new file mode 100644 index 0000000..40dac4e --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/inference_librispeech.csv @@ -0,0 +1,5 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 +http://www.openslr.org/resources/12/dev-other.tar.gz,c8d0bcc9cca99d4f8b62fcc847357931 +http://www.openslr.org/resources/12/test-clean.tar.gz,32fa31d27d2e1cad72775fee3f4849a9 +http://www.openslr.org/resources/12/test-other.tar.gz,fb5a50374b501bb3bac4815ee91d3135 diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech-inference.csv b/benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech-inference.csv new file mode 100644 index 0000000..b5e43b2 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech-inference.csv @@ -0,0 +1,2 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech.csv b/benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech.csv new file mode 100644 index 0000000..d48a9f8 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/librispeech.csv @@ -0,0 +1,8 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 +http://www.openslr.org/resources/12/dev-other.tar.gz,c8d0bcc9cca99d4f8b62fcc847357931 +http://www.openslr.org/resources/12/test-clean.tar.gz,32fa31d27d2e1cad72775fee3f4849a9 +http://www.openslr.org/resources/12/test-other.tar.gz,fb5a50374b501bb3bac4815ee91d3135 +http://www.openslr.org/resources/12/train-clean-100.tar.gz,2a93770f6d5c6c964bc36631d331a522 +http://www.openslr.org/resources/12/train-clean-360.tar.gz,c0e676e450a7ff2f54aeade5171606fa +http://www.openslr.org/resources/12/train-other-500.tar.gz,d1a0fd59409feb2c614ce4d30c387708 diff --git a/benchmarks/rnnt/ootb/inference/pytorch/utils/preprocessing_utils.py b/benchmarks/rnnt/ootb/inference/pytorch/utils/preprocessing_utils.py new file mode 100644 index 0000000..260e860 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch/utils/preprocessing_utils.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import multiprocessing +import functools + +import sox + + +from tqdm import tqdm + + +def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None, + overwrite=True): + speed = speed or [] + speed.append(1) + speed = list(set(speed)) # Make uniqe + + input_fname = os.path.join(input_dir, + data['input_relpath'], + data['input_fname']) + input_sr = sox.file_info.sample_rate(input_fname) + target_sr = target_sr or input_sr + + os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True) + + output_dict = {} + output_dict['transcript'] = data['transcript'].lower().strip() + output_dict['files'] = [] + + fname = os.path.splitext(data['input_fname'])[0] + for s in speed: + output_fname = fname + \ + '{}.wav'.format('' if s == 1 else '-{}'.format(s)) + output_fpath = os.path.join(dest_dir, + data['input_relpath'], + output_fname) + + if not os.path.exists(output_fpath) or overwrite: + cbn = sox.Transformer().speed(factor=s).convert(target_sr) + cbn.build(input_fname, output_fpath) + + file_info = sox.file_info.info(output_fpath) + file_info['fname'] = os.path.join(os.path.basename(dest_dir), + data['input_relpath'], + output_fname) + file_info['speed'] = s + output_dict['files'].append(file_info) + + if s == 1: + file_info = sox.file_info.info(output_fpath) + output_dict['original_duration'] = file_info['duration'] + output_dict['original_num_samples'] = file_info['num_samples'] + + return output_dict + + +def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel): + with multiprocessing.Pool(parallel) as p: + func = functools.partial(preprocess, + input_dir=input_dir, dest_dir=dest_dir, + target_sr=target_sr, speed=speed, overwrite=overwrite) + dataset = list(tqdm(p.imap(func, dataset), total=len(dataset))) + return dataset diff --git a/benchmarks/rnnt/ootb/inference/pytorch_SUT.py b/benchmarks/rnnt/ootb/inference/pytorch_SUT.py new file mode 100644 index 0000000..5695479 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/pytorch_SUT.py @@ -0,0 +1,124 @@ +# Copyright (c) 2020, Cerebras Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +sys.path.insert(0, os.path.join(os.getcwd(), "pytorch")) + +import array +import torch +import numpy as np +import toml +import mlperf_loadgen as lg +from tqdm import tqdm + +from QSL import AudioQSL, AudioQSLInMemory +from decoders import ScriptGreedyDecoder +from helpers import add_blank_label +from preprocessing import AudioPreprocessing +from model_separable_rnnt import RNNT + + +def load_and_migrate_checkpoint(ckpt_path): + checkpoint = torch.load(ckpt_path, map_location="cpu") + migrated_state_dict = {} + for key, value in checkpoint['state_dict'].items(): + key = key.replace("joint_net", "joint.net") + migrated_state_dict[key] = value + del migrated_state_dict["audio_preprocessor.featurizer.fb"] + del migrated_state_dict["audio_preprocessor.featurizer.window"] + return migrated_state_dict + + +class PytorchSUT: + def __init__(self, config_toml, checkpoint_path, dataset_dir, + manifest_filepath, perf_count): + config = toml.load(config_toml) + + dataset_vocab = config['labels']['labels'] + rnnt_vocab = add_blank_label(dataset_vocab) + featurizer_config = config['input_eval'] + + self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, + self.process_latencies) + self.qsl = AudioQSLInMemory(dataset_dir, + manifest_filepath, + dataset_vocab, + featurizer_config["sample_rate"], + perf_count) + self.audio_preprocessor = AudioPreprocessing(**featurizer_config) + self.audio_preprocessor.eval() + self.audio_preprocessor = torch.jit.script(self.audio_preprocessor) + self.audio_preprocessor = torch.jit._recursive.wrap_cpp_module( + torch._C._freeze_module(self.audio_preprocessor._c)) + + model = RNNT( + feature_config=featurizer_config, + rnnt=config['rnnt'], + num_classes=len(rnnt_vocab) + ) + model.load_state_dict(load_and_migrate_checkpoint(checkpoint_path), + strict=True) + model.eval() + model.encoder = torch.jit.script(model.encoder) + model.encoder = torch.jit._recursive.wrap_cpp_module( + torch._C._freeze_module(model.encoder._c)) + model.prediction = torch.jit.script(model.prediction) + model.prediction = torch.jit._recursive.wrap_cpp_module( + torch._C._freeze_module(model.prediction._c)) + model.joint = torch.jit.script(model.joint) + model.joint = torch.jit._recursive.wrap_cpp_module( + torch._C._freeze_module(model.joint._c)) + model = torch.jit.script(model) + + self.greedy_decoder = ScriptGreedyDecoder(len(rnnt_vocab) - 1, model) + + def issue_queries(self, query_samples): + for query_sample in query_samples: + waveform = self.qsl[query_sample.index] + assert waveform.ndim == 1 + waveform_length = np.array(waveform.shape[0], dtype=np.int64) + waveform = np.expand_dims(waveform, 0) + waveform_length = np.expand_dims(waveform_length, 0) + with torch.no_grad(): + waveform = torch.from_numpy(waveform) + waveform_length = torch.from_numpy(waveform_length) + feature, feature_length = self.audio_preprocessor.forward((waveform, waveform_length)) + assert feature.ndim == 3 + assert feature_length.ndim == 1 + feature = feature.permute(2, 0, 1) + + _, _, transcript = self.greedy_decoder.forward(feature, feature_length) + + assert len(transcript) == 1 + response_array = array.array('q', transcript[0]) + bi = response_array.buffer_info() + response = lg.QuerySampleResponse(query_sample.id, bi[0], + bi[1] * response_array.itemsize) + lg.QuerySamplesComplete([response]) + + def flush_queries(self): + pass + + def process_latencies(self, latencies_ns): + print("Average latency (ms) per query:") + print(np.mean(latencies_ns)/1000000.0) + print("Median latency (ms): ") + print(np.percentile(latencies_ns, 50)/1000000.0) + print("90 percentile latency (ms): ") + print(np.percentile(latencies_ns, 90)/1000000.0) + + def __del__(self): + lg.DestroySUT(self.sut) + print("Finished destroying SUT.") diff --git a/benchmarks/rnnt/ootb/inference/run.py b/benchmarks/rnnt/ootb/inference/run.py new file mode 100644 index 0000000..4f688fd --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/run.py @@ -0,0 +1,110 @@ +# Copyright 2020 The MLPerf Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +import argparse +import mlperf_loadgen as lg +import subprocess + +import os +from pathlib import Path +import sys + +MLPERF_CONF = Path(os.path.dirname(os.path.realpath(__file__))) / "../../mlperf.conf" +MLPERF_CONF = MLPERF_CONF.resolve() + +# FB5 Logger +p = Path(__file__).parent.resolve() / "../../../../fb5logging" +sys.path.append(os.fspath(p)) +from fb5logger import FB5Logger +import loggerconstants + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--backend", choices=["pytorch"], default="pytorch", help="Backend") + parser.add_argument("--scenario", choices=["SingleStream", "Offline", "Server"], default="Offline", help="Scenario") + parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass") + parser.add_argument("--mlperf_conf", default=str(MLPERF_CONF), help="mlperf rules config") + parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS") + parser.add_argument("--pytorch_config_toml", default="pytorch/configs/rnnt.toml") + parser.add_argument("--pytorch_checkpoint", default="pytorch/work_dir/rnnt.pt") + parser.add_argument("--dataset_dir", required=True) + parser.add_argument("--manifest", required=True) + parser.add_argument("--perf_count", type=int, default=None) + parser.add_argument("--log_dir", required=True) + # FB5 Logging + parser.add_argument("--fb5logger", type=str, default=None) + parser.add_argument("--fb5config", type=str, default="small") + args = parser.parse_args() + return args + + +scenario_map = { + "SingleStream": lg.TestScenario.SingleStream, + "Offline": lg.TestScenario.Offline, + "Server": lg.TestScenario.Server, +} + + +def main(): + args = get_args() + + if args.fb5logger is not None: + fb5logger = FB5Logger(args.fb5logger) + fb5logger.header("RNN-T", "OOTB", "infer", args.fb5config, score_metric=loggerconstants.EXPS) + + if args.backend == "pytorch": + from pytorch_SUT import PytorchSUT + sut = PytorchSUT(args.pytorch_config_toml, args.pytorch_checkpoint, + args.dataset_dir, args.manifest, args.perf_count) + else: + raise ValueError("Unknown backend: {:}".format(args.backend)) + + settings = lg.TestSettings() + settings.scenario = scenario_map[args.scenario] + settings.FromConfig(args.mlperf_conf, "rnnt", args.scenario) + settings.FromConfig(args.user_conf, "rnnt", args.scenario) + + if args.accuracy: + settings.mode = lg.TestMode.AccuracyOnly + else: + settings.mode = lg.TestMode.PerformanceOnly + + log_path = args.log_dir + os.makedirs(log_path, exist_ok=True) + log_output_settings = lg.LogOutputSettings() + log_output_settings.outdir = log_path + log_output_settings.copy_summary_to_stdout = True + log_settings = lg.LogSettings() + log_settings.log_output = log_output_settings + + print("Running Loadgen test...") + if args.fb5logger is not None: + fb5logger.run_start() + lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings) + if args.fb5logger is not None: + nbatches = sut.qsl.count + fb5logger.run_stop(nbatches, 1) + + if args.accuracy: + cmd = f"python3 accuracy_eval.py --log_dir {log_path} --dataset_dir {args.dataset_dir} --manifest {args.manifest}" + print(f"Running accuracy script: {cmd}") + subprocess.check_call(cmd, shell=True) + + print("Done!") + + +if __name__ == "__main__": + main() diff --git a/benchmarks/rnnt/ootb/inference/run.sh b/benchmarks/rnnt/ootb/inference/run.sh new file mode 100755 index 0000000..7538df9 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/run.sh @@ -0,0 +1,90 @@ +#/bin/bash + +set -euo pipefail + +work_dir=/export/b07/ws15dgalvez/mlperf-rnnt-librispeech +local_data_dir=$work_dir/local_data +librispeech_download_dir=$local_data_dir/LibriSpeech +stage=3 + +mkdir -p $work_dir $local_data_dir $librispeech_download_dir + +install_dir=third_party/install +mkdir -p $install_dir +install_dir=$(readlink -f $install_dir) + +set +u +source "$($CONDA_EXE info --base)/etc/profile.d/conda.sh" +set -u + +# stage -1: install dependencies +if [[ $stage -le -1 ]]; then + conda env create --force -v --file environment.yml + + set +u + source "$(conda info --base)/etc/profile.d/conda.sh" + conda activate mlperf-rnnt + set -u + + # We need to convert .flac files to .wav files via sox. Not all sox installs have flac support, so we install from source. + wget https://ftp.osuosl.org/pub/xiph/releases/flac/flac-1.3.2.tar.xz -O third_party/flac-1.3.2.tar.xz + (cd third_party; tar xf flac-1.3.2.tar.xz; cd flac-1.3.2; ./configure --prefix=$install_dir && make && make install) + + wget https://sourceforge.net/projects/sox/files/sox/14.4.2/sox-14.4.2.tar.gz -O third_party/sox-14.4.2.tar.gz + (cd third_party; tar zxf sox-14.4.2.tar.gz; cd sox-14.4.2; LDFLAGS="-L${install_dir}/lib" CFLAGS="-I${install_dir}/include" ./configure --prefix=$install_dir --with-flac && make && make install) + + (cd $(git rev-parse --show-toplevel)/loadgen; python setup.py install) +fi + +export PATH="$install_dir/bin/:$PATH" + +set +u +conda activate mlperf-rnnt +set -u + +# stage 0: download model. Check checksum to skip? +if [[ $stage -le 0 ]]; then + wget https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 -O $work_dir/rnnt.pt +fi + +# stage 1: download data. This will hae a non-zero exit code if the +# checksum is incorrect. +if [[ $stage -le 1 ]]; then + python pytorch/utils/download_librispeech.py \ + pytorch/utils/librispeech-inference.csv \ + $librispeech_download_dir \ + -e $local_data_dir +fi + +if [[ $stage -le 2 ]]; then + python pytorch/utils/convert_librispeech.py \ + --input_dir $librispeech_download_dir/dev-clean \ + --dest_dir $local_data_dir/dev-clean-wav \ + --output_json $local_data_dir/dev-clean-wav.json +fi + +if [[ $stage -le 3 ]]; then + for backend in pytorch; do + for accuracy in "--accuracy" ""; do + for scenario in SingleStream Offline Server; do + log_dir=${work_dir}/${scenario}_${backend} + if [ ! -z ${accuracy} ]; then + log_dir+=_accuracy + fi + log_dir+=rerun + + python run.py --backend pytorch \ + --dataset_dir $local_data_dir \ + --manifest $local_data_dir/dev-clean-wav.json \ + --pytorch_config_toml pytorch/configs/rnnt.toml \ + --pytorch_checkpoint $work_dir/rnnt.pt \ + --scenario ${scenario} \ + --backend ${backend} \ + --log_dir ${log_dir} \ + ${accuracy} & + + done + done + done + wait +fi diff --git a/benchmarks/rnnt/ootb/inference/third_party/pybind b/benchmarks/rnnt/ootb/inference/third_party/pybind new file mode 160000 index 0000000..b11ff91 --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/third_party/pybind @@ -0,0 +1 @@ +Subproject commit b11ff912a6b68edcd67770308ba4703e3a740e7b diff --git a/benchmarks/rnnt/ootb/inference/user.conf b/benchmarks/rnnt/ootb/inference/user.conf new file mode 100644 index 0000000..545569c --- /dev/null +++ b/benchmarks/rnnt/ootb/inference/user.conf @@ -0,0 +1,6 @@ +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 +*.MultiStream.samples_per_query = 4 \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/train/Dockerfile b/benchmarks/rnnt/ootb/train/Dockerfile new file mode 100755 index 0000000..cb13d98 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/Dockerfile @@ -0,0 +1,56 @@ +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG FROM_IMAGE_NAME=pytorch/pytorch:1.7.0-cuda11.0-cudnn8-devel +FROM ${FROM_IMAGE_NAME} + +ENV PYTORCH_VERSION=1.7.0a0+7036e91 + +RUN apt-get update && \ + apt-get install -y libsndfile1 sox git cmake jq && \ + apt-get install -y --no-install-recommends numactl && \ + rm -rf /var/lib/apt/lists/* + +RUN COMMIT_SHA=f546575109111c455354861a0567c8aa794208a2 && \ + git clone https://github.com/HawkAaron/warp-transducer deps/warp-transducer && \ + cd deps/warp-transducer && \ + git checkout $COMMIT_SHA && \ + sed -i 's/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_30,code=sm_30 -O2")/#set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_30,code=sm_30 -O2")/g' CMakeLists.txt && \ + sed -i 's/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75")/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80")/g' CMakeLists.txt && \ + mkdir build && \ + cd build && \ + cmake .. && \ + make VERBOSE=1 && \ + export CUDA_HOME="/usr/local/cuda" && \ + export WARP_RNNT_PATH=`pwd` && \ + export CUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME && \ + export LD_LIBRARY_PATH="$CUDA_HOME/extras/CUPTI/lib64:$LD_LIBRARY_PATH" && \ + export LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH && \ + export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH && \ + export CFLAGS="-I$CUDA_HOME/include $CFLAGS" && \ + cd ../pytorch_binding && \ + python3 setup.py install && \ + rm -rf ../tests test ../tensorflow_binding && \ + cd ../../.. + +WORKDIR /workspace/rnnt + +RUN pip install --no-cache --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda110==0.28.0 + +RUN pip install --global-option="--cpp_ext" --global-option="--cuda_ext" https://github.com/NVIDIA/apex/archive/8a1ed9e8d35dfad26fb973996319965e4224dcdd.zip + +COPY requirements.txt . +RUN pip install --no-cache --disable-pip-version-check -U -r requirements.txt + +COPY . . diff --git a/benchmarks/rnnt/ootb/train/LICENSE b/benchmarks/rnnt/ootb/train/LICENSE new file mode 100644 index 0000000..f2f7693 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/LICENSE @@ -0,0 +1,204 @@ + Except where otherwise noted, the following license applies to all files in this repo. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019-2020 NVIDIA Corporation + Copyright 2019 Myrtle Software Limited, www.myrtle.ai + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/benchmarks/rnnt/ootb/train/NOTICE b/benchmarks/rnnt/ootb/train/NOTICE new file mode 100644 index 0000000..9d3b1ff --- /dev/null +++ b/benchmarks/rnnt/ootb/train/NOTICE @@ -0,0 +1,5 @@ +RNN-T in PyTorch + +This repository includes source code (in "rnnt/") from: +* https://github.com/keithito/tacotron and https://github.com/ryanleary/patter licensed under MIT license. + diff --git a/benchmarks/rnnt/ootb/train/README.md b/benchmarks/rnnt/ootb/train/README.md new file mode 100644 index 0000000..1793e83 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/README.md @@ -0,0 +1,192 @@ +# 1. Problem +Speech recognition accepts raw audio samples and produces a corresponding text transcription. + +# 2. Directions + +## Steps to configure machine +### From Docker +1. Clone the repository +``` +git clone https://github.com/mlcommon/training.git +``` +2. Install CUDA and Docker +``` +source training/install_cuda_docker.sh +``` +3. Build the docker image for the single stage detection task +``` +# Build from Dockerfile +cd training/rnn_speech_recognition/pytorch/ +bash scripts/docker/build.sh +``` + +#### Requirements +Currently, the reference uses CUDA-11.0 (see [Dockerfile](Dockerfile#L15)). +Here you can find a table listing compatible drivers: https://docs.nvidia.com/deploy/cuda-compatibility/index.html#binary-compatibility__table-toolkit-driver + +## Steps to download data +1. Start an interactive session in the NGC container to run data download/training/inference +``` +bash scripts/docker/launch.sh +``` + +Within the container, the contents of this repository will be copied to the `/workspace/rnnt` directory. The `/datasets`, `/checkpoints`, `/results` directories are mounted as volumes +and mapped to the corresponding directories ``, ``, `` on the host. + +2. Download and preprocess the dataset. + +No GPU is required for data download and preprocessing. Therefore, if GPU usage is a limited resource, launch the container for this section on a CPU machine by following prevoius steps. + +Note: Downloading and preprocessing the dataset requires 500GB of free disk space and can take several hours to complete. + +This repository provides scripts to download, and extract the following datasets: + +* LibriSpeech [http://www.openslr.org/12](http://www.openslr.org/12) + +LibriSpeech contains 1000 hours of 16kHz read English speech derived from public domain audiobooks from LibriVox project and has been carefully segmented and aligned. For more information, see the [LIBRISPEECH: AN ASR CORPUS BASED ON PUBLIC DOMAIN AUDIO BOOKS](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) paper. + +Inside the container, download and extract the datasets into the required format for later training and inference: +```bash +bash scripts/download_librispeech.sh +``` +Once the data download is complete, the following folders should exist: + +* `/datasets/LibriSpeech/` + * `train-clean-100/` + * `train-clean-360/` + * `train-other-500/` + * `dev-clean/` + * `dev-other/` + * `test-clean/` + * `test-other/` + +Since `/datasets/` is mounted to `` on the host (see Step 3), once the dataset is downloaded it will be accessible from outside of the container at `/LibriSpeech`. + +Next, convert the data into WAV files: +```bash +bash scripts/preprocess_librispeech.sh +``` +Once the data is converted, the following additional files and folders should exist: +* `datasets/LibriSpeech/` + * `librispeech-train-clean-100-wav.json` + * `librispeech-train-clean-360-wav.json` + * `librispeech-train-other-500-wav.json` + * `librispeech-dev-clean-wav.json` + * `librispeech-dev-other-wav.json` + * `librispeech-test-clean-wav.json` + * `librispeech-test-other-wav.json` + * `train-clean-100-wav/` + * `train-clean-360-wav/` + * `train-other-500-wav/` + * `dev-clean-wav/` + * `dev-other-wav/` + * `test-clean-wav/` + * `test-other-wav/` + +For training, the following manifest files are used: + * `librispeech-train-clean-100-wav.json` + * `librispeech-train-clean-360-wav.json` + * `librispeech-train-other-500-wav.json` + +For evaluation, the `librispeech-dev-clean-wav.json` is used. + +## Steps to run benchmark. + +### Steps to launch training + +Inside the container, use the following script to start training. +Make sure the downloaded and preprocessed dataset is located at `/LibriSpeech` on the host (see Step 3), which corresponds to `/datasets/LibriSpeech` inside the container. + +```bash +bash scripts/train.sh +``` + +This script tries to use 8 GPUs by default. +To run 1-gpu training, use the following command: + +```bash +NUM_GPUS=1 GRAD_ACCUMULATION_STEPS=64 scripts/train.sh +``` + +# 3. Dataset/Environment +### Publication/Attribution +["OpenSLR LibriSpeech Corpus"](http://www.openslr.org/12/) provides over 1000 hours of speech data in the form of raw audio. + +### Data preprocessing +Data preprocessing is described by scripts mentioned in the [Steps to download data](#steps-to-download-data). + +### Data pipeline +Transcripts are encoded to sentencepieces using model produced in [Steps to download data](#steps-to-download-data). +Audio processing consists of the following steps: +1. audio is decoded with sample rate choosen uniformly between 13800 and 18400 ([code](./common/data/dali/pipeline.py#L91-L97)); +2. silience is trimmed with -60 dB threshold (datails in the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/archives/dali_0280/user-guide/docs/supported_ops.html?highlight=nonsilentregion#nvidia.dali.ops.NonsilentRegion)) ([code](./common/data/dali/pipeline.py#L120-L121)); +3. random noise with normal distribution and 0.00001 amplitude is applied to reduce quantization effect (dither) ([code](/common/data/dali/pipeline.py#L197)); +4. Pre-emphasis filter is applied (details in the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/archives/dali_0280/user-guide/docs/supported_ops.html?highlight=nonsilentregion#nvidia.dali.ops.PreemphasisFilter) ([code](./common/data/dali/pipeline.py#L101)); +1. spectograms are calculated with 512 ffts, 20ms window and 10ms stride ([code](./common/data/dali/pipeline.py#L103-L105)); +1. MelFilterBanks are calculated with 80 features and normalization ([code](./common/data/dali/pipeline.py#L107-L108)); +1. features are translated to decibeles with log(10) multiplier reference magnitude 1 and 1e-20 cutoff (details in the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/archives/dali_0280/user-guide/docs/supported_ops.html?highlight=nonsilentregion#nvidia.dali.ops.ToDecibels)) ([code](./common/data/dali/pipeline.py#L110-L111)); +1. features are normalized along time dimension using algorithm described in the [normalize operator documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/examples/general/normalize.html) ([code](common/data/dali/pipeline.py#L115)); +1. In the train pipeline, an adaptive specaugment augmentation is applied ([arxiv](https://arxiv.org/abs/1912.05533), [code](https://github.com/mwawrzos/training/blob/rnnt/rnn_speech_recognition/pytorch/common/data/features.py#L44-L117)). In the evaluation pipeline, this step is omitted; +1. to reduce accelerator memory usage, frames are spliced (stacked three times, and subsampled three times) ([code](https://github.com/mwawrzos/training/blob/rnnt/rnn_speech_recognition/pytorch/common/data/features.py#L144-L165)); + +### Training and test data separation +Dataset authors separated it to test and training subsets. For this benchmark, training is done on train-clean-100, train-clean-360 and train-other-500 subsets. Evaluation is done on dev-clean subset. + +### Training data order +To reduce data padding in minibatches, data bucketing is applied. +The algorithm is implemented here: +[link](https://github.com/mlcommons/training/blob/2126999a1ffff542064bb3208650a1e673920dcf/rnn_speech_recognition/pytorch/common/data/dali/sampler.py#L65-L105) +and can be described as follows: +1. drop samples longer than a given threshold ([code](./common/data/dali/data_loader.py#L97-L98)); +1. sort data by audio length ([code](./common/data/dali/sampler.py#L69)); +2. split data into 6 equally sized buckets ([code](./common/data/dali/sampler.py#L70)); +3. for every epochs: + 1. shuffle data in each bucket ([code](common/data/dali/sampler.py#L73-L78)); + 2. as long as all samples are not divisible by global batch size, remove random element from random bucket ([code](./common/data/dali/sampler.py#L82-L86)); + 3. concatenate all buckets; + 4. split samples into minibatches ([code](./common/data/dali/sampler.py#L90)); + 5. shuffle minibatches in the epoch ([code](./common/data/dali/sampler.py#L93-L94)). + +### Test data order +Test data order is the same as in the dataset. + +# 4. Model +### Publication/Attribution +To the best of our knowledge, there is no single publication describing RNN-T training on LibriSpeech, +or another publicly available dataset of reasonable size. For that reason, the reference will be a +collection of solutions from several works. It is based on the following articles: +* Graves 2012 - an invention of RNN-Transducer: https://arxiv.org/abs/1211.3711 +* Rao 2018 - time reduction in the acoustic model, internal dataset: https://arxiv.org/abs/1801.00841 +* Zhang 2020 - Transformer-transducer publication. It includes bi-directional LSTM RNN-T result on LibriSpeech: https://arxiv.org/abs/2002.02562 +* Park 2019 - adaptive spec augment, internal dataset: https://arxiv.org/abs/1912.05533 +* Guo 2020 - RNN-T trained with vanilla LSTM, internal dataset: https://arxiv.org/abs/2007.13802 + +### List of layers +Model structure is described in the following picture: +![model layers structure](./rnnt_layers.svg "RNN-T model structure") + +### Weight and bias initialization +* In all fully connected layers, weights and biases are initialized as defined in the [Pytorch 1.7.0 torch.nn.Linear documentation](https://pytorch.org/docs/1.7.0/generated/torch.nn.Linear.html#torch.nn.Linear) ([code](./rnnt/model.py#L123-L137)). +* In the embeding layer, weights are initialized as defined in the [Pytorch 1.7.0 torch.nn.Embeding documentation](https://pytorch.org/docs/1.7.0/generated/torch.nn.Embedding.html#torch.nn.Embedding) ([code](./rnnt/model.py#L105)). +* In all LSTM layers: + * weights and biases are initialized as defined in the [Pytorch 1.7.0 torch.nn.LSTM documentation](https://pytorch.org/docs/1.7.0/generated/torch.nn.LSTM.html#torch.nn.LSTM) ([code](./common/rnn.py#L56-L61)), + * forget gate biases are set to 1 ([code](./common/rnn.py#L67-L69)), + * then the weights and bias values are divided by two (in result, the forget gate biases are set to 0.5) ([code](./common/rnn.py#L74-L76)). + +### Loss function +Transducer Loss +### Optimizer +RNN-T benchmark uses LAMB optimizer. More details are in [training policies](https://github.com/mlcommons/training_policies/blob/master/training_rules.adoc#appendix-allowed-optimizers). + +To decrease the number of epochs needed to reach the target accuracy, +evaluation is done with an exponential moving average of the trained model weights with a smoothing factor set to 0.999. + +# 5. Quality +### Quality metric +Word Error Rate (WER) across all words in the output text of all samples in the validation set. +### Quality target +Target quality is 0.058 Word Error Rate or lower. +### Evaluation frequency +Evaluation is done after each training epoch. +### Evaluation thoroughness +Evaluation is done on each sample from the evaluation set. diff --git a/benchmarks/rnnt/ootb/train/common/__init__.py b/benchmarks/rnnt/ootb/train/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/rnnt/ootb/train/common/audio.py b/benchmarks/rnnt/ootb/train/common/audio.py new file mode 100644 index 0000000..d515832 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/audio.py @@ -0,0 +1,214 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import soundfile as sf + +import librosa +import torch +import numpy as np + +import sox + + +def audio_from_file(file_path, offset=0, duration=0, trim=False, target_sr=16000): + audio = AudioSegment(file_path, target_sr=target_sr, int_values=False, + offset=offset, duration=duration, trim=trim) + + samples = torch.tensor(audio.samples, dtype=torch.float).cuda() + num_samples = torch.tensor(samples.shape[0]).int().cuda() + return (samples.unsqueeze(0), num_samples.unsqueeze(0)) + + +class AudioSegment(object): + """Monaural audio segment abstraction. + + :param samples: Audio samples [num_samples x num_channels]. + :type samples: ndarray.float32 + :param sample_rate: Audio sample rate. + :type sample_rate: int + :raises TypeError: If the sample data type is not float or int. + """ + + def __init__(self, filename, target_sr=None, int_values=False, offset=0, + duration=0, trim=False, trim_db=60): + """Create audio segment from samples. + + Samples are converted to float32 internally, with int scaled to [-1, 1]. + Load a file supported by librosa and return as an AudioSegment. + :param filename: path of file to load + :param target_sr: the desired sample rate + :param int_values: if true, load samples as 32-bit integers + :param offset: offset in seconds when loading audio + :param duration: duration in seconds when loading audio + :return: numpy array of samples + """ + with sf.SoundFile(filename, 'r') as f: + dtype = 'int32' if int_values else 'float32' + sample_rate = f.samplerate + if offset > 0: + f.seek(int(offset * sample_rate)) + if duration > 0: + samples = f.read(int(duration * sample_rate), dtype=dtype) + else: + samples = f.read(dtype=dtype) + samples = samples.transpose() + + samples = self._convert_samples_to_float32(samples) + if target_sr is not None and target_sr != sample_rate: + samples = librosa.core.resample(samples, sample_rate, target_sr) + sample_rate = target_sr + if trim: + samples, _ = librosa.effects.trim(samples, trim_db) + self._samples = samples + self._sample_rate = sample_rate + if self._samples.ndim >= 2: + self._samples = np.mean(self._samples, 1) + + def __eq__(self, other): + """Return whether two objects are equal.""" + if type(other) is not type(self): + return False + if self._sample_rate != other._sample_rate: + return False + if self._samples.shape != other._samples.shape: + return False + if np.any(self.samples != other._samples): + return False + return True + + def __ne__(self, other): + """Return whether two objects are unequal.""" + return not self.__eq__(other) + + def __str__(self): + """Return human-readable representation of segment.""" + return ("%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, " + "rms=%.2fdB" % (type(self), self.num_samples, self.sample_rate, + self.duration, self.rms_db)) + + @staticmethod + def _convert_samples_to_float32(samples): + """Convert sample type to float32. + + Audio sample type is usually integer or float-point. + Integers will be scaled to [-1, 1] in float32. + """ + float32_samples = samples.astype('float32') + if samples.dtype in np.sctypes['int']: + bits = np.iinfo(samples.dtype).bits + float32_samples *= (1. / 2 ** (bits - 1)) + elif samples.dtype in np.sctypes['float']: + pass + else: + raise TypeError("Unsupported sample type: %s." % samples.dtype) + return float32_samples + + @property + def samples(self): + return self._samples.copy() + + @property + def sample_rate(self): + return self._sample_rate + + @property + def num_samples(self): + return self._samples.shape[0] + + @property + def duration(self): + return self._samples.shape[0] / float(self._sample_rate) + + @property + def rms_db(self): + mean_square = np.mean(self._samples ** 2) + return 10 * np.log10(mean_square) + + def gain_db(self, gain): + self._samples *= 10. ** (gain / 20.) + + def pad(self, pad_size, symmetric=False): + """Add zero padding to the sample. + + The pad size is given in number of samples. If symmetric=True, + `pad_size` will be added to both sides. If false, `pad_size` zeros + will be added only to the end. + """ + self._samples = np.pad(self._samples, + (pad_size if symmetric else 0, pad_size), + mode='constant') + + def subsegment(self, start_time=None, end_time=None): + """Cut the AudioSegment between given boundaries. + + Note that this is an in-place transformation. + :param start_time: Beginning of subsegment in seconds. + :type start_time: float + :param end_time: End of subsegment in seconds. + :type end_time: float + :raise ValueError: If start_time or end_time is incorrectly set, e.g. out + of bounds in time. + """ + start_time = 0.0 if start_time is None else start_time + end_time = self.duration if end_time is None else end_time + if start_time < 0.0: + start_time = self.duration + start_time + if end_time < 0.0: + end_time = self.duration + end_time + if start_time < 0.0: + raise ValueError("The slice start position (%f s) is out of " + "bounds." % start_time) + if end_time < 0.0: + raise ValueError("The slice end position (%f s) is out of bounds." % + end_time) + if start_time > end_time: + raise ValueError("The slice start position (%f s) is later than " + "the end position (%f s)." % (start_time, end_time)) + if end_time > self.duration: + raise ValueError("The slice end position (%f s) is out of bounds " + "(> %f s)" % (end_time, self.duration)) + start_sample = int(round(start_time * self._sample_rate)) + end_sample = int(round(end_time * self._sample_rate)) + self._samples = self._samples[start_sample:end_sample] + + +class Perturbation: + def __init__(self, p=0.1, rng=None): + self.p = p + self._rng = random.Random() if rng is None else rng + + def maybe_apply(self, segment, sample_rate=None): + if self._rng.random() < self.p: + self(segment, sample_rate) + + +class SpeedPerturbation(Perturbation): + def __init__(self, min_rate=0.85, max_rate=1.15, discrete=False, p=0.1, rng=None): + super(SpeedPerturbation, self).__init__(p, rng) + assert 0 < min_rate < max_rate + self.min_rate = min_rate + self.max_rate = max_rate + self.discrete = discrete + + def __call__(self, data, sample_rate): + if self.discrete: + rate = np.random.choice([self.min_rate, None, self.max_rate]) + else: + rate = self._rng.uniform(self.min_rate, self.max_rate) + + if rate is not None: + data._samples = sox.Transformer().speed(factor=rate).build_array( + input_array=data._samples, sample_rate_in=sample_rate) + diff --git a/benchmarks/rnnt/ootb/train/common/data/__init__.py b/benchmarks/rnnt/ootb/train/common/data/__init__.py new file mode 100644 index 0000000..b9211a7 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/__init__.py @@ -0,0 +1 @@ +from .helpers import * diff --git a/benchmarks/rnnt/ootb/train/common/data/dali/__init__.py b/benchmarks/rnnt/ootb/train/common/data/dali/__init__.py new file mode 100644 index 0000000..ff80003 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/dali/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/benchmarks/rnnt/ootb/train/common/data/dali/data_loader.py b/benchmarks/rnnt/ootb/train/common/data/dali/data_loader.py new file mode 100644 index 0000000..733bade --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/dali/data_loader.py @@ -0,0 +1,143 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import math +import numpy as np +import torch.distributed as dist +from .iterator import DaliRnntIterator +from .pipeline import DaliPipeline +from common.helpers import print_once + + +def _parse_json(json_path: str, start_label=0, predicate=lambda json: True): + """ + Parses json file to the format required by DALI + Args: + json_path: path to json file + start_label: the label, starting from which DALI will assign consecutive int numbers to every transcript + predicate: function, that accepts a sample descriptor (i.e. json dictionary) as an argument. + If the predicate for a given sample returns True, it will be included in the dataset. + + Returns: + output_files: dictionary, that maps file name to label assigned by DALI + transcripts: dictionary, that maps label assigned by DALI to the transcript + """ + import json + global cnt + with open(json_path) as f: + librispeech_json = json.load(f) + output_files = {} + transcripts = {} + curr_label = start_label + for original_sample in librispeech_json: + if not predicate(original_sample): + continue + transcripts[curr_label] = original_sample['transcript'] + output_files[original_sample['files'][-1]['fname']] = dict( + label=curr_label, + duration=original_sample['original_duration'], + ) + curr_label += 1 + return output_files, transcripts + + +class DaliDataLoader: + """ + DataLoader is the main entry point to the data preprocessing pipeline. + To use, create an object and then just iterate over `data_iterator`. + DataLoader will do the rest for you. + Example: + data_layer = DataLoader(DaliTrainPipeline, path, json, bs, ngpu) + data_it = data_layer.data_iterator + for data in data_it: + print(data) # Here's your preprocessed data + + Args: + device_type: Which device to use for preprocessing. Choose: "cpu", "gpu" + pipeline_type: Choose: "train", "val" + """ + + def __init__(self, gpu_id, dataset_path: str, config_data: dict, config_features: dict, json_names: list, + tokenizer, batch_size: int, sampler, pipeline_type: str, grad_accumulation_steps: int = 1, + device_type: str = "gpu"): + import torch + self.batch_size = batch_size + self.grad_accumulation_steps = grad_accumulation_steps + self.drop_last = (pipeline_type == 'train') + self.device_type = device_type + self.pipeline_type = self._parse_pipeline_type(pipeline_type) + self.sampler = sampler + self._dali_data_iterator = self._init_iterator(gpu_id=gpu_id, dataset_path=dataset_path, + config_data=config_data, + config_features=config_features, + json_names=json_names, tokenizer=tokenizer, + pipeline_type=pipeline_type) + + def _init_iterator(self, gpu_id, dataset_path, config_data, config_features, json_names: list, tokenizer: list, + pipeline_type): + """ + Returns data iterator. Data underneath this operator is preprocessed within Dali + """ + + output_files, transcripts = {}, {} + max_duration = config_data['max_duration'] + for jname in json_names: + of, tr = _parse_json(jname if jname[0] == '/' else os.path.join(dataset_path, jname), len(output_files), + predicate=lambda json: json['original_duration'] <= max_duration) + output_files.update(of) + transcripts.update(tr) + self.sampler.make_file_list(output_files, json_names) + self.dataset_size = self.sampler.get_dataset_size() + print_once(f"Dataset read by DALI. Number of samples: {self.dataset_size}") + + pipeline = DaliPipeline.from_config(config_data=config_data, config_features=config_features, device_id=gpu_id, + file_root=dataset_path, sampler=self.sampler, + device_type=self.device_type, batch_size=self.batch_size, + pipeline_type=pipeline_type) + + return DaliRnntIterator([pipeline], transcripts=transcripts, tokenizer=tokenizer, batch_size=self.batch_size, + shard_size=self._shard_size(), pipeline_type=pipeline_type) + + @staticmethod + def _parse_pipeline_type(pipeline_type): + pipe = pipeline_type.lower() + assert pipe in ("train", "val"), 'Invalid pipeline type ("train", "val").' + return pipe + + def _shard_size(self): + """ + Total number of samples handled by a single GPU in a single epoch. + """ + world_size = dist.get_world_size() if dist.is_initialized() else 1 + if self.drop_last: + divisor = world_size * self.batch_size * self.grad_accumulation_steps + return self.dataset_size // divisor * divisor // world_size + else: + return int(math.ceil(self.dataset_size / world_size)) + + def __len__(self): + """ + Number of batches handled by each GPU. + """ + if self.drop_last: + assert self._shard_size() % self.batch_size == 0, f'{self._shard_size()} {self.batch_size}' + + return int(math.ceil(self._shard_size() / self.batch_size)) + + def data_iterator(self): + return self._dali_data_iterator + + def __iter__(self): + return self._dali_data_iterator diff --git a/benchmarks/rnnt/ootb/train/common/data/dali/iterator.py b/benchmarks/rnnt/ootb/train/common/data/dali/iterator.py new file mode 100644 index 0000000..21df27f --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/dali/iterator.py @@ -0,0 +1,112 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.distributed as dist +import numpy as np +from common.helpers import print_once +from common.text import _clean_text, punctuation_map + + +def normalize_string(s, charset, punct_map): + """Normalizes string. + + Example: + 'call me at 8:00 pm!' -> 'call me at eight zero pm' + """ + charset = set(charset) + try: + text = _clean_text(s, ["english_cleaners"], punct_map).strip() + return ''.join([tok for tok in text if all(t in charset for t in tok)]) + except: + print(f"WARNING: Normalizing failed: {s}") + return None + + +class DaliRnntIterator(object): + """ + Returns batches of data for RNN-T training: + preprocessed_signal, preprocessed_signal_length, transcript, transcript_length + + This iterator is not meant to be the entry point to Dali processing pipeline. + Use DataLoader instead. + """ + + def __init__(self, dali_pipelines, transcripts, tokenizer, batch_size, shard_size, pipeline_type, normalize_transcripts=False): + self.normalize_transcripts = normalize_transcripts + self.tokenizer = tokenizer + self.batch_size = batch_size + from nvidia.dali.plugin.pytorch import DALIGenericIterator + from nvidia.dali.plugin.base_iterator import LastBatchPolicy + + # in train pipeline shard_size is set to divisable by batch_size, so PARTIAL policy is safe + if pipeline_type == 'val': + self.dali_it = DALIGenericIterator( + dali_pipelines, ["audio", "label", "audio_shape"], reader_name="Reader", + dynamic_shape=True, auto_reset=True, + last_batch_policy=LastBatchPolicy.PARTIAL) + else: + self.dali_it = DALIGenericIterator( + dali_pipelines, ["audio", "label", "audio_shape"], size=shard_size, + dynamic_shape=True, auto_reset=True, last_batch_padded=True, + last_batch_policy=LastBatchPolicy.PARTIAL) + + self.tokenize(transcripts) + + def tokenize(self, transcripts): + transcripts = [transcripts[i] for i in range(len(transcripts))] + if self.normalize_transcripts: + transcripts = [ + normalize_string( + t, + self.tokenizer.charset, + punctuation_map(self.tokenizer.charset) + ) for t in transcripts + ] + transcripts = [self.tokenizer.tokenize(t) for t in transcripts] + transcripts = [torch.tensor(t) for t in transcripts] + self.tr = np.array(transcripts, dtype=object) + self.t_sizes = torch.tensor([len(t) for t in transcripts], dtype=torch.int32) + + def _gen_transcripts(self, labels, normalize_transcripts: bool = True): + """ + Generate transcripts in format expected by NN + """ + ids = labels.flatten().numpy() + transcripts = self.tr[ids] + # Tensors are padded with 0. In `sentencepiece` we set it to , + # because it cannot be disabled, and is absent in the data. + # Note this is different from the RNN-T blank token (index 1023). + transcripts = torch.nn.utils.rnn.pad_sequence(transcripts, batch_first=True) + + return transcripts.cuda(), self.t_sizes[ids].cuda() + + def __next__(self): + data = self.dali_it.__next__() + audio, audio_shape = data[0]["audio"], data[0]["audio_shape"][:, 1] + if audio.shape[0] == 0: + # empty tensor means, other GPUs got last samples from dataset + # and this GPU has nothing to do; calling `__next__` raises StopIteration + return self.dali_it.__next__() + audio = audio[:, :, :audio_shape.max()] # the last batch + transcripts, transcripts_lengths = self._gen_transcripts(data[0]["label"]) + return audio, audio_shape, transcripts, transcripts_lengths + + def next(self): + return self.__next__() + + def __iter__(self): + return self + + diff --git a/benchmarks/rnnt/ootb/train/common/data/dali/pipeline.py b/benchmarks/rnnt/ootb/train/common/data/dali/pipeline.py new file mode 100644 index 0000000..0ca66da --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/dali/pipeline.py @@ -0,0 +1,212 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nvidia.dali +import nvidia.dali.ops as ops +import nvidia.dali.types as types +import multiprocessing +import numpy as np +import torch +import math + +class PipelineParams: + def __init__( + self, + sample_rate=16000, + max_duration=float("inf"), + normalize_transcripts=True, + trim_silence=False, + speed_perturbation=None + ): + pass + +class SpeedPerturbationParams: + def __init__( + self, + min_rate=0.85, + max_rate=1.15, + p=1.0, + ): + pass + +class DaliPipeline(nvidia.dali.pipeline.Pipeline): + def __init__(self, *, + pipeline_type, + device_id, + num_threads, + batch_size, + file_root: str, + sampler, + sample_rate, + resample_range: list, + window_size, + window_stride, + nfeatures, + nfft, + dither_coeff, + silence_threshold, + preemph_coeff, + max_duration, + preprocessing_device="gpu"): + super().__init__(batch_size, num_threads, device_id) + + self._dali_init_log(locals()) + + if torch.distributed.is_initialized(): + shard_id = torch.distributed.get_rank() + n_shards = torch.distributed.get_world_size() + else: + shard_id = 0 + n_shards = 1 + + self.preprocessing_device = preprocessing_device.lower() + assert self.preprocessing_device == "cpu" or self.preprocessing_device == "gpu", \ + "Incorrect preprocessing device. Please choose either 'cpu' or 'gpu'" + + self.resample_range = resample_range + + train_pipeline = pipeline_type == 'train' + self.train = train_pipeline + self.sample_rate = sample_rate + self.dither_coeff = dither_coeff + self.nfeatures = nfeatures + self.max_duration = max_duration + self.do_remove_silence = True if silence_threshold is not None else False + + shuffle = train_pipeline and not sampler.is_sampler_random() + self.read = ops.FileReader(name="Reader", pad_last_batch=(pipeline_type == 'val'), device="cpu", file_root=file_root, file_list=sampler.get_file_list_path(), shard_id=shard_id, + num_shards=n_shards, shuffle_after_epoch=shuffle) + + if resample_range is not None: + self.speed_perturbation_coeffs = ops.Uniform(device="cpu", range=resample_range) + else: + self.speed_perturbation_coeffs = None + + self.decode = ops.AudioDecoder(device="cpu", sample_rate=self.sample_rate if resample_range is None else None, + dtype=types.FLOAT, downmix=True) + + self.normal_distribution = ops.NormalDistribution(device=preprocessing_device) + + self.preemph = ops.PreemphasisFilter(device=preprocessing_device, preemph_coeff=preemph_coeff) + + self.spectrogram = ops.Spectrogram(device=preprocessing_device, nfft=nfft, + window_length=window_size * sample_rate, + window_step=window_stride * sample_rate) + + self.mel_fbank = ops.MelFilterBank(device=preprocessing_device, sample_rate=sample_rate, nfilter=self.nfeatures, + normalize=True) + + self.log_features = ops.ToDecibels(device=preprocessing_device, multiplier=np.log(10), reference=1.0, + cutoff_db=math.log(1e-20)) + + self.get_shape = ops.Shapes(device=preprocessing_device) + + self.normalize = ops.Normalize(device=preprocessing_device, axes=[1]) + + self.pad = ops.Pad(device=preprocessing_device, fill_value=0) + + # Silence trimming + self.get_nonsilent_region = ops.NonsilentRegion(device="cpu", cutoff_db=silence_threshold) + self.trim_silence = ops.Slice(device="cpu", normalized_anchor=False, normalized_shape=False, axes=[0]) + self.to_float = ops.Cast(device="cpu", dtype=types.FLOAT) + + @classmethod + def from_config(cls, pipeline_type, device_id, batch_size, file_root: str, sampler, config_data: dict, + config_features: dict, device_type: str = "gpu", do_resampling: bool = True, + num_cpu_threads=multiprocessing.cpu_count()): + + max_duration = config_data['max_duration'] + sample_rate = config_data['sample_rate'] + silence_threshold = -60 if config_data['trim_silence'] else None + + if do_resampling and config_data['speed_perturbation'] is not None: + resample_range = [config_data['speed_perturbation']['min_rate'], + config_data['speed_perturbation']['max_rate']] + else: + resample_range = None + + window_size = config_features['window_size'] + window_stride = config_features['window_stride'] + nfeatures = config_features['n_filt'] + nfft = config_features['n_fft'] + dither_coeff = config_features['dither'] + preemph_coeff = .97 + + return cls(pipeline_type=pipeline_type, + device_id=device_id, + preprocessing_device=device_type, + num_threads=num_cpu_threads, + batch_size=batch_size, + file_root=file_root, + sampler=sampler, + sample_rate=sample_rate, + resample_range=resample_range, + window_size=window_size, + window_stride=window_stride, + nfeatures=nfeatures, + nfft=nfft, + dither_coeff=dither_coeff, + silence_threshold=silence_threshold, + preemph_coeff=preemph_coeff, + max_duration=max_duration, + ) + + @staticmethod + def _dali_init_log(args: dict): + if (not torch.distributed.is_initialized() or ( + torch.distributed.is_initialized() and torch.distributed.get_rank() == 0)): # print once + max_len = max([len(ii) for ii in args.keys()]) + fmt_string = '\t%' + str(max_len) + 's : %s' + print('Initializing DALI with parameters:') + for keyPair in sorted(args.items()): + print(fmt_string % keyPair) + + def _remove_silence(self, inp): + begin, length = self.get_nonsilent_region(inp) + out = self.trim_silence(inp, self.to_float(begin), self.to_float(length)) + return out + + def define_graph(self): + audio, label = self.read() + if not self.train or self.speed_perturbation_coeffs is None: + audio, sr = self.decode(audio) + else: + resample_coeffs = self.speed_perturbation_coeffs() * self.sample_rate + audio, sr = self.decode(audio, sample_rate=resample_coeffs) + + if self.do_remove_silence: + audio = self._remove_silence(audio) + + # Max duration drop is performed at DataLayer stage + + if self.preprocessing_device == "gpu": + audio = audio.gpu() + + if self.dither_coeff != 0.: + audio = audio + self.normal_distribution(audio) * self.dither_coeff + + audio = self.preemph(audio) + + audio = self.spectrogram(audio) + audio = self.mel_fbank(audio) + audio = self.log_features(audio) + + audio_len = self.get_shape(audio) + + audio = self.normalize(audio) + audio = self.pad(audio) + + # When modifying DALI pipeline returns, make sure you update `output_map` in DALIGenericIterator invocation + return audio.gpu(), label, audio_len.gpu() + diff --git a/benchmarks/rnnt/ootb/train/common/data/dali/sampler.py b/benchmarks/rnnt/ootb/train/common/data/dali/sampler.py new file mode 100644 index 0000000..3f0acaf --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/dali/sampler.py @@ -0,0 +1,109 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import numpy as np + + +def hash_list_of_strings(li): + return str(abs(hash(''.join(li)))) + + +class SimpleSampler: + def __init__(self): + self.file_list_path = None + self.dataset_size = None + + def write_file_list(self, files): + with open(self.file_list_path, 'w') as f: + f.writelines(f'{name} {label}\n' for name, label in files) + + def get_file_list_path(self): + assert self.file_list_path, 'File list not initialized. Run make_file_list first' + return self.file_list_path + + def get_dataset_size(self): + assert self.dataset_size, 'Dataset size not known. Run make_file_list first' + return self.dataset_size + + def is_sampler_random(self): + return False + + def process_output_files(self, output_files): + self.dataset_size = len(output_files) + return [ (path, entry['label']) for path, entry in output_files.items() ] + + def make_file_list(self, output_files, json_names): + self.file_list_path = os.path.join( + "/tmp", + "rnnt_dali.file_list." + hash_list_of_strings(json_names) + ) + self.write_file_list(self.process_output_files(output_files)) + + +class BucketingSampler(SimpleSampler): + def __init__(self, num_buckets, batch_size, num_workers, num_epochs, rng): + super(BucketingSampler, self).__init__() + self.rng = rng + self.num_buckets = num_buckets + self.num_epochs = num_epochs + self.batch_size = batch_size + self.num_workers = num_workers + + def process_output_files(self, output_files): + names = list(output_files) + lengths = [output_files[name]['duration'] for name in names] + labels = np.array([output_files[name]['label'] for name in names]) + len_ids = np.argsort(lengths) + buckets = np.array_split(len_ids, self.num_buckets) + + gbs = self.batch_size * self.num_workers + shuffled_buckets = np.array([ + perm + for _ in range(self.num_epochs) # for every epoch + for bucket in buckets # from every bucket + for perm in self.rng.permutation(bucket) # pick samples in random order + ]) + + # drop last batch + epochs = np.reshape(shuffled_buckets, [self.num_epochs, -1]) + to_drop = epochs.shape[1] - (epochs.shape[1] // gbs * gbs) + for epoch in epochs: + dropped_idxs = self.rng.choice(epochs.shape[1], to_drop, replace=False) + if dropped_idxs is not None: + epoch[dropped_idxs] = -1 + epochs = epochs[epochs != -1].reshape(self.num_epochs, -1) + self.dataset_size = epochs.shape[1] + + epochs_iters_batch = np.reshape(epochs, [self.num_epochs, -1, gbs]) + + # shuffle iterations in epochs perserving batches + for epoch in epochs_iters_batch: + self.rng.shuffle(epoch, axis=0) + + epochs_iters_batch_worker = np.reshape( + epochs_iters_batch, + [self.num_epochs, -1, self.batch_size, self.num_workers] + ) + workers_epochs_iters_batch = np.moveaxis(epochs_iters_batch_worker, -1, 0) + + return [ + (names[i], labels[i]) + for i in workers_epochs_iters_batch.flatten() + ] + + def is_sampler_random(self): + return True + diff --git a/benchmarks/rnnt/ootb/train/common/data/dataset.py b/benchmarks/rnnt/ootb/train/common/data/dataset.py new file mode 100644 index 0000000..f450c10 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/dataset.py @@ -0,0 +1,233 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from pathlib import Path + +import numpy as np + +import torch +from torch.utils.data import Dataset, DataLoader +from torch.utils.data.distributed import DistributedSampler + +from common.audio import (audio_from_file, AudioSegment, SpeedPerturbation) +from common.text import _clean_text, punctuation_map + +from common.helpers import print_once +from common.sampler import BucketingSampler + + +def normalize_string(s, charset, punct_map): + """Normalizes string. + + Example: + 'call me at 8:00 pm!' -> 'call me at eight zero pm' + """ + charset = set(charset) + try: + text = _clean_text(s, ["english_cleaners"], punct_map).strip() + return ''.join([tok for tok in text if all(t in charset for t in tok)]) + except: + print(f"WARNING: Normalizing failed: {s}") + return None + + +class FilelistDataset(Dataset): + def __init__(self, filelist_fpath): + self.samples = [line.strip() for line in open(filelist_fpath, 'r')] + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + audio, audio_len = audio_from_file(self.samples[index]) + return (audio.squeeze(0), audio_len, torch.LongTensor([0]), + torch.LongTensor([0])) + + +class SingleAudioDataset(FilelistDataset): + def __init__(self, audio_fpath): + self.samples = [audio_fpath] + + +class AudioDataset(Dataset): + def __init__(self, data_dir, manifest_fpaths, + tokenizer, + sample_rate=16000, min_duration=0.1, max_duration=float("inf"), + max_utts=0, normalize_transcripts=True, + trim_silence=False, + speed_perturbation=None, + ignore_offline_speed_perturbation=False): + """Loads audio, transcript and durations listed in a .json file. + + Args: + data_dir: absolute path to dataset folder + manifest_filepath: relative path from dataset folder + to manifest json as described above. Can be coma-separated paths. + tokenizer: class converting transcript to tokens + min_duration (int): skip audio shorter than threshold + max_duration (int): skip audio longer than threshold + max_utts (int): limit number of utterances + normalize_transcripts (bool): normalize transcript text + trim_silence (bool): trim leading and trailing silence from audio + ignore_offline_speed_perturbation (bool): use precomputed speed perturbation + + Returns: + tuple of Tensors + """ + self.data_dir = data_dir + + self.tokenizer = tokenizer + self.punctuation_map = punctuation_map(self.tokenizer.charset) + + self.max_utts = max_utts + self.normalize_transcripts = normalize_transcripts + self.ignore_offline_speed_perturbation = ignore_offline_speed_perturbation + + self.min_duration = min_duration + self.max_duration = max_duration + self.trim_silence = trim_silence + self.sample_rate = sample_rate + + perturbations = [] + if speed_perturbation is not None: + perturbations.append(SpeedPerturbation(**speed_perturbation)) + self.perturbations = perturbations + + self.max_duration = max_duration + + self.samples = [] + self.duration = 0.0 + self.duration_filtered = 0.0 + + for fpath in manifest_fpaths: + self._load_json_manifest(fpath) + + def __getitem__(self, index): + s = self.samples[index] + rn_indx = np.random.randint(len(s['audio_filepath'])) + duration = s['audio_duration'][rn_indx] if 'audio_duration' in s else 0 + offset = s.get('offset', 0) + + segment = AudioSegment( + s['audio_filepath'][rn_indx], target_sr=self.sample_rate, + offset=offset, duration=duration, trim=self.trim_silence) + + for p in self.perturbations: + p.maybe_apply(segment, self.sample_rate) + + segment = torch.FloatTensor(segment.samples) + + return (segment, + torch.tensor(segment.shape[0]).int(), + torch.tensor(s["transcript"]), + torch.tensor(len(s["transcript"])).int()) + + def __len__(self): + return len(self.samples) + + def _load_json_manifest(self, fpath): + j = json.load(open(fpath, "r", encoding="utf-8")) + for i, s in enumerate(j): + if i % 1000 == 0: + print(f'{i:>10}/{len(j):<10}', end='\r') + + s_max_duration = s['original_duration'] + + s['duration'] = s.pop('original_duration') + if not (self.min_duration <= s_max_duration <= self.max_duration): + self.duration_filtered += s['duration'] + continue + + # Prune and normalize according to transcript + tr = (s.get('transcript', None) or + self.load_transcript(s['text_filepath'])) + + if not isinstance(tr, str): + print(f'WARNING: Skipped sample (transcript not a str): {tr}.') + self.duration_filtered += s['duration'] + continue + + if self.normalize_transcripts: + tr = normalize_string(tr, self.tokenizer.charset, self.punctuation_map) + + s["transcript"] = self.tokenizer.tokenize(tr) + + files = s.pop('files') + if self.ignore_offline_speed_perturbation: + files = [f for f in files if f['speed'] == 1.0] + + s['audio_duration'] = [f['duration'] for f in files] + s['audio_filepath'] = [str(Path(self.data_dir, f['fname'])) + for f in files] + self.samples.append(s) + self.duration += s['duration'] + + if self.max_utts > 0 and len(self.samples) >= self.max_utts: + print(f'Reached max_utts={self.max_utts}. Finished parsing {fpath}.') + break + + def load_transcript(self, transcript_path): + with open(transcript_path, 'r', encoding="utf-8") as transcript_file: + transcript = transcript_file.read().replace('\n', '') + return transcript + +def collate_fn(batch): + bs = len(batch) + max_len = lambda l, idx: max(el[idx].size(0) for el in l) + audio = torch.zeros(bs, max_len(batch, 0)) + audio_lens = torch.zeros(bs, dtype=torch.int32) + transcript = torch.zeros(bs, max_len(batch, 2)) + transcript_lens = torch.zeros(bs, dtype=torch.int32) + + for i, sample in enumerate(batch): + audio[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0]) + audio_lens[i] = sample[1] + transcript[i].narrow(0, 0, sample[2].size(0)).copy_(sample[2]) + transcript_lens[i] = sample[3] + return audio, audio_lens, transcript, transcript_lens + + +def get_data_loader(dataset, batch_size, world_size, rank, shuffle=True, + drop_last=True, num_workers=4, num_buckets=None): + if world_size != 1: + loader_shuffle = False + if num_buckets: + assert shuffle, 'only random buckets are supported' + sampler = BucketingSampler( + dataset, + batch_size, + num_buckets, + world_size, + rank, + ) + print('Using BucketingSampler') + else: + sampler = DistributedSampler(dataset, shuffle=shuffle) + print('Using DistributedSampler') + else: + loader_shuffle = shuffle + sampler = None + print('Using no sampler') + + return DataLoader( + batch_size=batch_size, + drop_last=drop_last, + sampler=sampler, + shuffle=loader_shuffle, + dataset=dataset, + collate_fn=collate_fn, + num_workers=num_workers, + pin_memory=True + ) diff --git a/benchmarks/rnnt/ootb/train/common/data/features.py b/benchmarks/rnnt/ootb/train/common/data/features.py new file mode 100644 index 0000000..8f2d64a --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/features.py @@ -0,0 +1,276 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import random + +import librosa +import torch +import torch.nn as nn + +from apex import amp + + +class BaseFeatures(nn.Module): + """Base class for GPU accelerated audio preprocessing.""" + def __init__(self, optim_level): + super(BaseFeatures, self).__init__() + self.optim_level = optim_level + + @torch.no_grad() + def calculate_features(self, audio, audio_lens): + return audio, audio_lens + + def __call__(self, x): + audio, audio_lens = x + if self.optim_level == 1: + with amp.disable_casts(): + return self.calculate_features(audio, audio_lens) + else: + return self.calculate_features(audio, audio_lens) + + +class SpecAugment(BaseFeatures): + """Regularize by masking entire time steps/frequency bands. + + Implementes SpecAugment (https://arxiv.org/abs/1904.08779) + with adaptive masking (https://arxiv.org/abs/1912.05533), without time + warping. + + Args: + freq_masks (int): number of masks for frequency bands + min_freq (int): minimum number of frequencies in a single mask + max_freq (int or float): maximum number of frequencies in a single mask + time_masks (int or float): number of masks or adaptive percentage + min_time (int): minimum number of masked time steps per mask; applies + only if max is non-adaptive + max_time (int or float): maximum number of masked time steps per mask, + value 0 < 1 then denotes adaptive percentage + noise_magnitude (float): mask with N(0, noise_magnitude * std(sample)) + noise instead of zeros to stabilize training + """ + def __init__(self, optim_level, freq_masks=0, min_freq=0, max_freq=10, time_masks=0, + min_time=0, max_time=10, noise_magnitude=0): + super(SpecAugment, self).__init__(optim_level) + assert 0 <= min_freq <= max_freq + assert 0 <= min_time <= max_time + + self.freq_masks = freq_masks + self.min_freq = min_freq + self.max_freq = max_freq + + self.time_masks = time_masks + self.min_time = min_time + self.max_time = max_time + + self.noise_magnitude = noise_magnitude + + @torch.no_grad() + def calculate_features(self, x, x_lens): + sh = x.shape + mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device) + + for idx in range(sh[0]): + + for _ in range(self.freq_masks): + w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item() + f0 = torch.randint(0, max(1, sh[1] - w + 1), size=(1,)) + mask[idx, f0:f0+w] = 1 + + # Adaptive time masking + time_masks = self.time_masks + if 0 < time_masks < 1.0: + time_masks = int(round(x_lens[idx].item() * time_masks)) + + max_time = self.max_time + if 0 < max_time < 1.0: + max_time = int(round(x_lens[idx].item() * max_time)) + + for _ in range(time_masks): + w = torch.randint(self.min_time, max_time + 1, size=(1,)).item() + t0 = torch.randint(0, max(1, sh[2] - w + 1), size=(1,)) + mask[idx, :, t0:t0+w] = 1 + + if self.noise_magnitude > 0: + mean = torch.zeros(x.size(0), x.size(1), 1, device=x.device) + std = torch.zeros(x.size(0), x.size(1), 1, device=x.device) + for idx in range(sh[0]): + mean[idx, :, 0] = x[idx, :, :x_lens[idx]].mean(dim=1) + std[idx, :, 0] = x[idx, :, :x_lens[idx]].mean(dim=1) + + std *= self.noise_magnitude + noise = (mean + torch.randn_like(x) * std).masked_fill(~mask, 0) + else: + noise = 0 + + return x.masked_fill(mask, 0) + noise, x_lens + + +@torch.jit.script +def normalize_batch(x, x_lens, normalize_type: str): + if normalize_type == "per_feature": + mean = x.new_zeros(x.size(0), x.size(1)) + std = x.new_zeros(x.size(0), x.size(1)) + + for i in range(x.size(0)): + mean[i, :] = x[i, :, :x_lens[i]].mean(dim=1) + std[i, :] = x[i, :, :x_lens[i]].std(dim=1) + # make sure std is not zero + return (x - mean.unsqueeze(2)) / (std.unsqueeze(2) + 1e-5) + + elif normalize_type == "all_features": + mean = x.new_zeros(x.size(0)) + std = x.new_zeros(x.size(0)) + for i in range(x.size(0)): + mean[i] = x[i, :, :x_lens[i]].mean() + std[i] = x[i, :, :x_lens[i]].std() + # make sure x_std is not zero + return (x - mean.view(-1, 1, 1)) / (std.view(-1, 1, 1) + 1e-5) + else: + return x + + +def stack_subsample_frames(x, x_lens, stacking=1, subsampling=1): + """ Stacks frames together across feature dim, and then subsamples + + input is batch_size, feature_dim, num_frames + output is batch_size, feature_dim * stacking, num_frames / subsampling + + """ + seq = [x] + for n in range(1, stacking): + tmp = torch.zeros_like(x) + tmp[:, :, :-n] = x[:, :, n:] + seq.append(tmp) + x = torch.cat(seq, dim=1)[:, :, ::subsampling] + + if subsampling > 1: + x_lens = torch.ceil(x_lens.float() / subsampling).int() + + if x.size(2) > x_lens.max().item(): + assert abs(x.size(2) - x_lens.max().item()) <= 1 + x = x[:,:,:x_lens.max().item()] + + return x, x_lens + + +class FilterbankFeatures(BaseFeatures): + # For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants + __constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length", + "log", "normalize"] + # torchscript: "center" removed due to a bug + + def __init__(self, + optim_level, sample_rate=8000, window_size=0.02, window_stride=0.01, + window="hamming", normalize="per_feature", n_fft=None, + preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True, + dither=1e-5): + super(FilterbankFeatures, self).__init__(optim_level) + torch_windows = { + 'hann': torch.hann_window, + 'hamming': torch.hamming_window, + 'blackman': torch.blackman_window, + 'bartlett': torch.bartlett_window, + 'none': None, + } + + self.win_length = int(sample_rate * window_size) # frame size + self.hop_length = int(sample_rate * window_stride) + self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length)) + + self.normalize = normalize + self.log = log + #TORCHSCRIPT: Check whether or not we need this + self.dither = dither + self.n_filt = n_filt + self.preemph = preemph + highfreq = highfreq or sample_rate / 2 + window_fn = torch_windows.get(window, None) + window_tensor = window_fn(self.win_length, + periodic=False) if window_fn else None + filterbanks = torch.tensor( + librosa.filters.mel(sample_rate, self.n_fft, n_mels=n_filt, + fmin=lowfreq, fmax=highfreq), + dtype=torch.float).unsqueeze(0) + # torchscript + self.register_buffer("fb", filterbanks) + self.register_buffer("window", window_tensor) + + # do stft + # TORCHSCRIPT: center removed due to bug + def stft(self, x): + return torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, + win_length=self.win_length, + window=self.window.to(dtype=torch.float)) + # return_complex=False) + + + @torch.no_grad() + def calculate_features(self, x, x_lens): + if self.dither > 0: + x += self.dither * torch.randn_like(x) + + if self.preemph is not None: + x = torch.cat( + (x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1) + x = self.stft(x).to(x.dtype) + + x_lens = torch.ceil(x_lens.float() / self.hop_length).int() + + # get power spectrum + x = x.pow(2).sum(-1) + + # dot with filterbank energies + x = torch.matmul(self.fb.to(x.dtype), x) + + if self.log: + x = torch.log(x + 1e-20) + + # normalize if required + x = normalize_batch(x, x_lens, normalize_type=self.normalize) + + return x, x_lens + +class FrameSplicing(BaseFeatures): + __constants__ = ['frame_subsampling', 'frame_stacking'] + + def __init__(self, optim_level, frame_stacking=1, frame_subsampling=1): + super(FrameSplicing, self).__init__(optim_level) + self.frame_stacking = frame_stacking + self.frame_subsampling = frame_subsampling + + def calculate_features(self, x, x_lens): + + # frame splicing if required + if self.frame_stacking > 1 or self.frame_subsampling > 1: + x, x_lens = stack_subsample_frames(x, x_lens, self.frame_stacking, + self.frame_subsampling) + + return x, x_lens + +class FillPadding(BaseFeatures): + __constants__ = [ 'fill_value' ] + def __init__(self, optim_level, fill_value=0): + super(FillPadding, self).__init__(optim_level) + self.fill_value = fill_value + + def calculate_features(self, x, x_lens): + # mask to zero any values beyond x_lens in batch, + max_len = x.size(-1) + mask = torch.arange(max_len, dtype=x_lens.dtype, device=x.device) + mask = mask.expand(x.size(0), max_len) >= x_lens.unsqueeze(1) + x = x.masked_fill(mask.unsqueeze(1), self.fill_value) + + return x, x_lens + diff --git a/benchmarks/rnnt/ootb/train/common/data/helpers.py b/benchmarks/rnnt/ootb/train/common/data/helpers.py new file mode 100644 index 0000000..3879635 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/helpers.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.data.dali.data_loader import DaliDataLoader + + +def dataset_size(dataset): + if isinstance(dataset, DaliDataLoader): # DALI + return dataset.dataset_size + else: # PyTorch + return dataset.sampler.num_samples diff --git a/benchmarks/rnnt/ootb/train/common/data/text.py b/benchmarks/rnnt/ootb/train/common/data/text.py new file mode 100644 index 0000000..36b78fe --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/data/text.py @@ -0,0 +1,50 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sentencepiece as spm + + +class Tokenizer: + def __init__(self, labels, sentpiece_model=None): + """Converts transcript to a sequence of tokens. + + Args: + labels (str): all possible output symbols + """ + # For labels use vocab or load worpieces + self.charset = labels + self.use_sentpiece = (sentpiece_model is not None) + if self.use_sentpiece: + self.sentpiece = spm.SentencePieceProcessor(model_file=sentpiece_model) + self.num_labels = len(self.sentpiece) + else: + self.num_labels = len(self.charset) + self.label2ind = {lab: i for i, lab in enumerate(self.charset)} + + def tokenize(self, transcript): + if self.use_sentpiece: + inds = self.sentpiece.encode(transcript, out_type=int) + assert 0 not in inds, ' found during tokenization (OOV?)' + else: + inds = [self.label2ind[x] + for x in transcript if x in self.label2ind] + return inds + + def detokenize(self, inds): + if self.use_sentpiece: + return self.sentpiece.decode(inds) + else: + return ''.join(self.charset[i] for i in inds) + + diff --git a/benchmarks/rnnt/ootb/train/common/helpers.py b/benchmarks/rnnt/ootb/train/common/helpers.py new file mode 100644 index 0000000..0d12382 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/helpers.py @@ -0,0 +1,241 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import re +from collections import OrderedDict + +from apex import amp + +import torch +import torch.distributed as dist + +from .metrics import word_error_rate + + +def __rnnt_decoder_predictions_tensor(tensor, detokenize): + """ + Takes output of greedy rnnt decoder and converts to strings. + Args: + tensor: model output tensor + label: A list of labels + Returns: + prediction + """ + return [detokenize(pred) for pred in tensor] + + +def print_once(msg): + if not dist.is_initialized() or dist.get_rank() == 0: + print(msg) + + +def greedy_wer(preds, tgt, tgt_lens, detokenize): + """ + Takes output of greedy ctc decoder and performs ctc decoding algorithm to + remove duplicates and special symbol. Prints wer and prediction examples to screen + Args: + tensors: A list of 3 tensors (predictions, targets, target_lengths) + labels: A list of labels + + Returns: + word error rate + """ + with torch.no_grad(): + references = gather_transcripts([tgt], [tgt_lens], detokenize) + hypotheses = __rnnt_decoder_predictions_tensor(preds, detokenize) + + wer, _, _ = word_error_rate(hypotheses, references) + return wer, hypotheses[0], references[0] + + +def gather_losses(losses_list): + return [torch.mean(torch.stack(losses_list))] + + +def gather_predictions(predictions_list, detokenize): + rnnt_predictions = ( + __rnnt_decoder_predictions_tensor(prediction, detokenize) + for prediction in predictions_list + ) + + return [ + prediction + for batch in rnnt_predictions + for prediction in batch + ] + + +def gather_transcripts(transcript_list, transcript_len_list, detokenize): + return [ + detokenize(t[:l].long().cpu().numpy().tolist()) + for txt, lens in zip(transcript_list, transcript_len_list) + for t, l in zip(txt, lens) + ] + + +def process_evaluation_epoch(aggregates): + """ + Processes results from each worker at the end of evaluation and combine to final result + Args: + aggregates: dictionary containing information of entire evaluation + Return: + wer: final word error rate + loss: final loss + """ + if 'losses' in aggregates: + eloss = torch.mean(torch.stack(aggregates['losses'])).item() + else: + eloss = None + + hypotheses = aggregates['preds'] + references = aggregates['txts'] + + wer, scores, num_words = word_error_rate(hypotheses, references) + multi_gpu = dist.is_initialized() + if multi_gpu: + if eloss is not None: + eloss /= dist.get_world_size() + eloss_tensor = torch.tensor(eloss).cuda() + dist.all_reduce(eloss_tensor) + eloss = eloss_tensor.item() + + scores_tensor = torch.tensor(scores).cuda() + dist.all_reduce(scores_tensor) + scores = scores_tensor.item() + num_words_tensor = torch.tensor(num_words).cuda() + dist.all_reduce(num_words_tensor) + num_words = num_words_tensor.item() + wer = scores * 1.0 / num_words + return wer, eloss + + +def num_weights(module): + return sum(p.numel() for p in module.parameters() if p.requires_grad) + + +class Checkpointer(object): + + def __init__(self, save_dir, model_name, keep_milestones=[100,200,300], + use_amp=False): + self.save_dir = save_dir + self.keep_milestones = keep_milestones + self.use_amp = use_amp + self.model_name = model_name + + tracked = [ + (int(re.search('epoch(\d+)_', f).group(1)), f) + for f in glob.glob(f'{save_dir}/{self.model_name}_epoch*_checkpoint.pt')] + tracked = sorted(tracked, key=lambda t: t[0]) + self.tracked = OrderedDict(tracked) + + def save(self, model, ema_model, optimizer, epoch, step, best_wer, + is_best=False): + """Saves model checkpoint for inference/resuming training. + + Args: + model: the model, optionally wrapped by DistributedDataParallel + ema_model: model with averaged weights, can be None + optimizer: optimizer + epoch (int): epoch during which the model is saved + step (int): number of steps since beginning of training + best_wer (float): lowest recorded WER on the dev set + is_best (bool, optional): set name of checkpoint to 'best' + and overwrite the previous one + """ + rank = 0 + if dist.is_initialized(): + dist.barrier() + rank = dist.get_rank() + + if rank != 0: + return + + # Checkpoint already saved + if not is_best and epoch in self.tracked: + return + + unwrap_ddp = lambda model: getattr(model, 'module', model) + state = { + 'epoch': epoch, + 'step': step, + 'best_wer': best_wer, + 'state_dict': unwrap_ddp(model).state_dict(), + 'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None, + 'optimizer': optimizer.state_dict(), + 'amp': amp.state_dict() if self.use_amp else None, + } + + if is_best: + fpath = os.path.join( + self.save_dir, f"{self.model_name}_best_checkpoint.pt") + else: + fpath = os.path.join( + self.save_dir, f"{self.model_name}_epoch{epoch}_checkpoint.pt") + + print_once(f"Saving {fpath}...") + torch.save(state, fpath) + + if not is_best: + # Remove old checkpoints; keep milestones and the last two + self.tracked[epoch] = fpath + for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones): + try: + os.remove(self.tracked[epoch]) + except: + pass + del self.tracked[epoch] + + def last_checkpoint(self): + tracked = list(self.tracked.values()) + + if len(tracked) >= 1: + try: + torch.load(tracked[-1], map_location='cpu') + return tracked[-1] + except: + print_once(f'Last checkpoint {tracked[-1]} appears corrupted.') + + elif len(tracked) >= 2: + return tracked[-2] + else: + return None + + def load(self, fpath, model, ema_model, optimizer, meta): + + print_once(f'Loading model from {fpath}') + checkpoint = torch.load(fpath, map_location="cpu") + + unwrap_ddp = lambda model: getattr(model, 'module', model) + state_dict = checkpoint['state_dict'] + unwrap_ddp(model).load_state_dict(state_dict, strict=False) + + if ema_model is not None: + if checkpoint.get('ema_state_dict') is not None: + key = 'ema_state_dict' + else: + key = 'state_dict' + print_once('WARNING: EMA weights not found in the checkpoint.') + print_once('WARNING: Initializing EMA model with regular params.') + state_dict = checkpoint[key] + unwrap_ddp(ema_model).load_state_dict(state_dict, strict=False) + + optimizer.load_state_dict(checkpoint['optimizer']) + + if self.use_amp: + amp.load_state_dict(checkpoint['amp']) + + meta['start_epoch'] = checkpoint.get('epoch') + meta['best_wer'] = checkpoint.get('best_wer', meta['best_wer']) diff --git a/benchmarks/rnnt/ootb/train/common/metrics.py b/benchmarks/rnnt/ootb/train/common/metrics.py new file mode 100644 index 0000000..4ae47a4 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/metrics.py @@ -0,0 +1,59 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def __levenshtein(a, b): + """Calculates the Levenshtein distance between two sequences.""" + + n, m = len(a), len(b) + if n > m: + # Make sure n <= m, to use O(min(n,m)) space + a, b = b, a + n, m = m, n + + current = list(range(n + 1)) + for i in range(1, m + 1): + previous, current = current, [i] + [0] * n + for j in range(1, n + 1): + add, delete = previous[j] + 1, current[j - 1] + 1 + change = previous[j - 1] + if a[j - 1] != b[i - 1]: + change = change + 1 + current[j] = min(add, delete, change) + + return current[n] + + +def word_error_rate(hypotheses, references): + """Computes average Word Error Rate (WER) between two text lists.""" + + scores = 0 + words = 0 + len_diff = len(references) - len(hypotheses) + if len_diff > 0: + raise ValueError("Uneqal number of hypthoses and references: " + "{0} and {1}".format(len(hypotheses), len(references))) + elif len_diff < 0: + hypotheses = hypotheses[:len_diff] + + for h, r in zip(hypotheses, references): + h_list = h.split() + r_list = r.split() + words += len(r_list) + scores += __levenshtein(h_list, r_list) + if words!=0: + wer = 1.0*scores/words + else: + wer = float('inf') + return wer, scores, words diff --git a/benchmarks/rnnt/ootb/train/common/optimizers.py b/benchmarks/rnnt/ootb/train/common/optimizers.py new file mode 100644 index 0000000..ccb4a0c --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/optimizers.py @@ -0,0 +1,257 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.optim import Optimizer +import math + + +def lr_policy(step, epoch, initial_lr, optimizer, steps_per_epoch, warmup_epochs, + hold_epochs, min_lr=1e-5, + exp_gamma=None): + """ + learning rate decay + Args: + initial_lr: base learning rate + step: current iteration number + N: total number of iterations over which learning rate is decayed + lr_steps: list of steps to apply exp_gamma + """ + warmup_steps = warmup_epochs * steps_per_epoch + hold_steps = hold_epochs * steps_per_epoch + + assert exp_gamma is not None + + if step < warmup_steps: + a = (step + 1) / (warmup_steps + 1) + elif step < warmup_steps + hold_steps: + a = 1.0 + else: + a = exp_gamma ** (epoch - warmup_epochs - hold_epochs) + + if type(initial_lr) is float: + initial_lr = [initial_lr] + + assert len(initial_lr) == len(optimizer.param_groups) + + for lr, param_group in zip(initial_lr, optimizer.param_groups): + param_group['lr'] = max(a * lr, min_lr) + + +class AdamW(Optimizer): + """Implements AdamW algorithm. + + It has been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + + Adam: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + p.data.add_(torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom), alpha=-step_size) + + return loss + + +class Novograd(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(Novograd, self).__init__(params, defaults) + + def __setstate__(self, state): + super(Novograd, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p.data, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.data.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/benchmarks/rnnt/ootb/train/common/rnn.py b/benchmarks/rnnt/ootb/train/common/rnn.py new file mode 100644 index 0000000..6cf3dcc --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/rnn.py @@ -0,0 +1,88 @@ +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import torch +from torch.nn import Parameter +from mlperf import logging + + +def rnn(input_size, hidden_size, num_layers, + forget_gate_bias=1.0, dropout=0.0, mlperf=False, + **kwargs): + + return LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + dropout=dropout, + forget_gate_bias=forget_gate_bias, + mlperf=mlperf, + **kwargs, + ) + + +class LSTM(torch.nn.Module): + + def __init__(self, input_size, hidden_size, num_layers, dropout, + forget_gate_bias, mlperf, weights_init_scale=1.0, + hidden_hidden_bias_scale=0.0, **kwargs): + """Returns an LSTM with forget gate bias init to `forget_gate_bias`. + + Args: + input_size: See `torch.nn.LSTM`. + hidden_size: See `torch.nn.LSTM`. + num_layers: See `torch.nn.LSTM`. + dropout: See `torch.nn.LSTM`. + forget_gate_bias: For each layer and each direction, the total value of + to initialise the forget gate bias to. + + Returns: + A `torch.nn.LSTM`. + """ + super(LSTM, self).__init__() + + self.lstm = torch.nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + dropout=dropout, + ) + + self.dropout = torch.nn.Dropout(dropout) if dropout else None + + if forget_gate_bias is not None: + for name, v in self.lstm.named_parameters(): + if "bias_ih" in name: + bias = getattr(self.lstm, name) + bias.data[hidden_size:2 * hidden_size].fill_(forget_gate_bias) + if "bias_hh" in name: + bias = getattr(self.lstm, name) + bias.data[hidden_size:2 * hidden_size] *= float(hidden_hidden_bias_scale) + + for name, v in self.named_parameters(): + if 'weight' in name or 'bias' in name: + v.data *= float(weights_init_scale) + tensor_name = kwargs['tensor_name'] + if mlperf: + logging.log_event(logging.constants.WEIGHTS_INITIALIZATION, + metadata=dict(tensor=tensor_name)) + + def forward(self, x, h=None): + x, h = self.lstm(x, h) + if self.dropout: + x = self.dropout(x) + return x, h diff --git a/benchmarks/rnnt/ootb/train/common/sampler.py b/benchmarks/rnnt/ootb/train/common/sampler.py new file mode 100644 index 0000000..7978480 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/sampler.py @@ -0,0 +1,143 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import numpy as np + +from torch.utils.data.sampler import Sampler + + +class DistributedSampler(Sampler): + def __init__(self, dataset, batch_size, world_size, rank): + """ + Constructor for the DistributedSampler. + :param dataset: dataset + :param batch_size: local batch size + :param world_size: number of distributed workers + :param rank: rank of the current process + """ + self.dataset = dataset + self.world_size = world_size + self.rank = rank + self.epoch = 0 + + self.batch_size = batch_size + self.global_batch_size = batch_size * world_size + + self.data_len = len(self.dataset) + + self.num_samples = self.data_len // self.global_batch_size \ + * self.global_batch_size + + def distribute_batches(self, indices): + """ + Assigns batches to workers. + Consecutive ranks are getting consecutive batches. + :param indices: torch.tensor with batch indices + """ + assert len(indices) == self.num_samples + + indices = indices.view(-1, self.batch_size) + indices = indices[self.rank::self.world_size].contiguous() + indices = indices.view(-1) + indices = indices.tolist() + + assert len(indices) == self.num_samples // self.world_size + return indices + + def reshuffle_batches(self, indices, rng): + """ + Permutes global batches + :param indices: torch.tensor with batch indices + :param rng: instance of torch.Generator + """ + indices = indices.view(-1, self.global_batch_size) + num_batches = indices.shape[0] + order = torch.randperm(num_batches, generator=rng) + indices = indices[order, :] + indices = indices.view(-1) + return indices + + def __iter__(self): + g = torch.Generator() + g.manual_seed(self.epoch) + # generate permutation + indices = torch.randperm(self.data_len, generator=rng) + + # make indices evenly divisible by (batch_size * world_size) + indices = indices[:self.num_samples] + + # assign batches to workers + indices = self.distribute_batches(indices) + return iter(indices) + + def set_epoch(self, epoch): + """ + Sets current epoch index. + Epoch index is used to seed RNG in __iter__() function. + :param epoch: index of current epoch + """ + self.epoch = epoch + + def __len__(self): + return self.num_samples // self.world_size + + +class BucketingSampler(DistributedSampler): + def __init__(self, dataset, batch_size, num_buckets, world_size, rank): + """ + Bucketing sampler with approx. equally-sized buckets. + :param dataset: dataset + :param batch_size: local batch size + :param seeds: list of seeds, one seed for each training epoch + :param num_buckets: number of buckets + :param world_size: number of distributed workers + :param rank: rank of the current process + """ + super().__init__(dataset, batch_size, world_size, rank) + + self.num_buckets = num_buckets + len_ids = np.argsort([sample['duration'] for sample in dataset.samples]) + self.buckets = [torch.from_numpy(t) + for t in np.array_split(len_ids, num_buckets)] + global_bs = self.global_batch_size + + def __iter__(self): + g = torch.Generator() + g.manual_seed(self.epoch) + global_bsz = self.global_batch_size + + indices = [] + for bid in range(self.num_buckets): + # random shuffle within current bucket + perm = torch.randperm(len(self.buckets[bid]), generator=g) + bucket_indices = self.buckets[bid][perm] + + # add samples from current bucket to indices for current epoch + indices.append(bucket_indices) + + indices = torch.cat(indices) + + # make indices evenly divisible by global batch size + length = len(indices) // global_bsz * global_bsz + indices = indices[:length] + + assert len(indices) % self.global_batch_size == 0 + + # perform global reshuffle of all global batches + indices = self.reshuffle_batches(indices, g) + # distribute batches to individual workers + indices = self.distribute_batches(indices) + return iter(indices) + diff --git a/benchmarks/rnnt/ootb/train/common/tb_dllogger.py b/benchmarks/rnnt/ootb/train/common/tb_dllogger.py new file mode 100644 index 0000000..2ffb4bf --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/tb_dllogger.py @@ -0,0 +1,179 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atexit +import glob +import os +import re +import numpy as np + +import dllogger +import torch +import torch.distributed as dist +from dllogger import StdOutBackend, JSONStreamBackend, Verbosity +from torch.utils.tensorboard import SummaryWriter + +from . import helpers + + +tb_loggers = {} + + +class TBLogger: + """ + xyz_dummies: stretch the screen with empty plots so the legend would + always fit for other plots + """ + def __init__(self, enabled, log_dir, name, interval=1, dummies=True): + self.enabled = enabled + self.interval = interval + self.cache = {} + if self.enabled: + self.summary_writer = SummaryWriter( + log_dir=os.path.join(log_dir, name), + flush_secs=120, max_queue=200) + atexit.register(self.summary_writer.close) + if dummies: + for key in ('aaa', 'zzz'): + self.summary_writer.add_scalar(key, 0.0, 1) + + def log(self, step, data): + for k, v in data.items(): + self.log_value(step, k, v.item() if type(v) is torch.Tensor else v) + + def log_value(self, step, key, val, stat='mean'): + if self.enabled: + if key not in self.cache: + self.cache[key] = [] + self.cache[key].append(val) + if len(self.cache[key]) == self.interval: + agg_val = getattr(np, stat)(self.cache[key]) + self.summary_writer.add_scalar(key, agg_val, step) + del self.cache[key] + + def log_grads(self, step, model): + if self.enabled: + norms = [p.grad.norm().item() for p in model.parameters() + if p.grad is not None] + for stat in ('max', 'min', 'mean'): + self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms), + stat=stat) + + +def unique_log_fpath(log_fpath): + + if not os.path.isfile(log_fpath): + return log_fpath + + # Avoid overwriting old logs + saved = sorted([int(re.search('\.(\d+)', f).group(1)) + for f in glob.glob(f'{log_fpath}.*')]) + + log_num = (saved[-1] if saved else 0) + 1 + return f'{log_fpath}.{log_num}' + + +def stdout_step_format(step): + if isinstance(step, str): + return step + fields = [] + if len(step) > 0: + fields.append("epoch {:>4}".format(step[0])) + if len(step) > 1: + fields.append("iter {:>4}".format(step[1])) + if len(step) > 2: + fields[-1] += "/{}".format(step[2]) + return " | ".join(fields) + + +def stdout_metric_format(metric, metadata, value): + name = metadata.get("name", metric + " : ") + unit = metadata.get("unit", None) + format = f'{{{metadata.get("format", "")}}}' + fields = [name, format.format(value) if value is not None else value, unit] + fields = [f for f in fields if f is not None] + return "| " + " ".join(fields) + + +def init_log(args): + + enabled = not dist.is_initialized() or dist.get_rank() == 0 + if enabled: + fpath = args.log_file or os.path.join(args.output_dir, 'nvlog.json') + backends = [JSONStreamBackend(Verbosity.DEFAULT, + unique_log_fpath(fpath)), + StdOutBackend(Verbosity.VERBOSE, + step_format=stdout_step_format, + metric_format=stdout_metric_format)] + else: + backends = [] + + dllogger.init(backends=backends) + dllogger.metadata("train_lrate", {"name": "lrate", "format": ":>3.2e"}) + + for id_, pref in [('train', ''), ('train_avg', 'avg train '), + ('dev_ema', ' dev ema ')]: + + dllogger.metadata(f"{id_}_loss", + {"name": f"{pref}loss", "format": ":>7.2f"}) + + dllogger.metadata(f"{id_}_wer", + {"name": f"{pref}wer", "format": ":>6.2f"}) + + dllogger.metadata(f"{id_}_pplx", + {"name": f"{pref}pplx", "format": ":>6.2f"}) + + dllogger.metadata(f"{id_}_throughput", + {"name": f"{pref}utts/s", "format": ":>5.0f"}) + + dllogger.metadata(f"{id_}_took", + {"name": "took", "unit": "s", "format": ":>5.2f"}) + + tb_subsets = ['train', 'dev_ema'] + global tb_loggers + tb_loggers = {s: TBLogger(enabled, args.output_dir, name=s) + for s in tb_subsets} + + log_parameters(vars(args), tb_subset='train') + + +def log(step, tb_total_steps=None, subset='train', data={}): + + if tb_total_steps is not None: + tb_loggers[subset].log(tb_total_steps, data) + + if subset != '': + data = {f'{subset}_{key}': v for key,v in data.items()} + dllogger.log(step, data=data) + + +def log_grads_tb(tb_total_steps, grads, tb_subset='train'): + tb_loggers[tb_subset].log_grads(tb_total_steps, grads) + + +def log_parameters(data, verbosity=0, tb_subset=None): + for k,v in data.items(): + dllogger.log(step="PARAMETER", data={k:v}, verbosity=verbosity) + + if tb_subset is not None and tb_loggers[tb_subset].enabled: + tb_data = {k:v for k,v in data.items() + if type(v) in (str, bool, int, float)} + tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {}) + + +def flush_log(): + dllogger.flush() + for tbl in tb_loggers.values(): + if tbl.enabled: + tbl.summary_writer.flush() diff --git a/benchmarks/rnnt/ootb/train/common/text/LICENSE b/benchmarks/rnnt/ootb/train/common/text/LICENSE new file mode 100644 index 0000000..4ad4ed1 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/text/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Keith Ito + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/benchmarks/rnnt/ootb/train/common/text/__init__.py b/benchmarks/rnnt/ootb/train/common/text/__init__.py new file mode 100644 index 0000000..4901823 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/text/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2017 Keith Ito +""" from https://github.com/keithito/tacotron """ +import re +import string +from . import cleaners + +def _clean_text(text, cleaner_names, *args): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception('Unknown cleaner: %s' % name) + text = cleaner(text, *args) + return text + + +def punctuation_map(labels): + # Punctuation to remove + punctuation = string.punctuation + punctuation = punctuation.replace("+", "") + punctuation = punctuation.replace("&", "") + # TODO We might also want to consider: + # @ -> at + # # -> number, pound, hashtag + # ~ -> tilde + # _ -> underscore + # % -> percent + # If a punctuation symbol is inside our vocab, we do not remove from text + for l in labels: + punctuation = punctuation.replace(l, "") + # Turn all punctuation to whitespace + table = str.maketrans(punctuation, " " * len(punctuation)) + return table diff --git a/benchmarks/rnnt/ootb/train/common/text/cleaners.py b/benchmarks/rnnt/ootb/train/common/text/cleaners.py new file mode 100644 index 0000000..08ef5d8 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/text/cleaners.py @@ -0,0 +1,94 @@ +# Copyright (c) 2017 Keith Ito +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" from https://github.com/keithito/tacotron +Modified to add puncturation removal +""" + +''' +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You'll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). + +''' + +import re +from unidecode import unidecode +from .numbers import normalize_numbers + +# Regular expression matching whitespace: +_whitespace_re = re.compile(r'\s+') + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + +def expand_numbers(text): + return normalize_numbers(text) + +def lowercase(text): + return text.lower() + +def collapse_whitespace(text): + return re.sub(_whitespace_re, ' ', text) + +def convert_to_ascii(text): + return unidecode(text) + +def remove_punctuation(text, table): + text = text.translate(table) + text = re.sub(r'&', " and ", text) + text = re.sub(r'\+', " plus ", text) + return text + +def english_cleaners(text, table=None): + '''Pipeline for English text, including number and abbreviation expansion.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_numbers(text) + text = expand_abbreviations(text) + if table is not None: + text = remove_punctuation(text, table) + text = collapse_whitespace(text) + return text diff --git a/benchmarks/rnnt/ootb/train/common/text/numbers.py b/benchmarks/rnnt/ootb/train/common/text/numbers.py new file mode 100644 index 0000000..46ce110 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/text/numbers.py @@ -0,0 +1,99 @@ +# Copyright (c) 2017 Keith Ito +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" from https://github.com/keithito/tacotron +Modifed to add support for time and slight tweaks to _expand_number +""" + +import inflect +import re + + +_inflect = inflect.engine() +_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') +_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') +_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') +_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') +_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') +_number_re = re.compile(r'[0-9]+') +_time_re = re.compile(r'([0-9]{1,2}):([0-9]{2})') + + +def _remove_commas(m): + return m.group(1).replace(',', '') + + +def _expand_decimal_point(m): + return m.group(1).replace('.', ' point ') + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split('.') + if len(parts) > 2: + return match + ' dollars' # Unexpected format + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + return '%s %s' % (dollars, dollar_unit) + elif cents: + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s' % (cents, cent_unit) + else: + return 'zero dollars' + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + if int(m.group(0)[0]) == 0: + return _inflect.number_to_words(m.group(0), andword='', group=1) + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return 'two thousand' + elif num > 2000 and num < 2010: + return 'two thousand ' + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + ' hundred' + else: + return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') + # Add check for number phones and other large numbers + elif num > 1000000000 and num % 10000 != 0: + return _inflect.number_to_words(num, andword='', group=1) + else: + return _inflect.number_to_words(num, andword='') + +def _expand_time(m): + mins = int(m.group(2)) + if mins == 0: + return _inflect.number_to_words(m.group(1)) + return " ".join([_inflect.number_to_words(m.group(1)), _inflect.number_to_words(m.group(2))]) + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r'\1 pounds', text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + text = re.sub(_time_re, _expand_time, text) + return text diff --git a/benchmarks/rnnt/ootb/train/common/text/symbols.py b/benchmarks/rnnt/ootb/train/common/text/symbols.py new file mode 100644 index 0000000..24efedf --- /dev/null +++ b/benchmarks/rnnt/ootb/train/common/text/symbols.py @@ -0,0 +1,19 @@ +# Copyright (c) 2017 Keith Ito +""" from https://github.com/keithito/tacotron """ + +''' +Defines the set of symbols used in text input to the model. + +The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' +from . import cmudict + +_pad = '_' +_punctuation = '!\'(),.:;? ' +_special = '-' +_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + +# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): +_arpabet = ['@' + s for s in cmudict.valid_symbols] + +# Export all symbols: +symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet diff --git a/benchmarks/rnnt/ootb/train/configs/baseline_v3-1023sp.yaml b/benchmarks/rnnt/ootb/train/configs/baseline_v3-1023sp.yaml new file mode 100644 index 0000000..fb05553 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/configs/baseline_v3-1023sp.yaml @@ -0,0 +1,80 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# ~5.8% WER on dev-clean after 100epochs with LR 1e-3 +# + +tokenizer: + sentpiece_model: /sentencepieces/librispeech1023.model + labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] + +input_val: + audio_dataset: &val_dataset + sample_rate: &sample_rate 16000 + trim_silence: true + normalize_transcripts: true + + filterbank_features: &val_features + normalize: per_feature + sample_rate: *sample_rate + window_size: 0.02 + window_stride: 0.01 + window: hann + n_fft: 512 + n_filt: &n_filt 80 + dither: 0.00001 + frame_splicing: &val_splicing + frame_stacking: 3 + frame_subsampling: 3 + +# For training we keep samples < 16.7s and apply augmentation +input_train: + audio_dataset: + <<: *val_dataset + max_duration: 16.7 + speed_perturbation: + min_rate: 0.85 + max_rate: 1.15 + p: 1.0 + + filterbank_features: *val_features + frame_splicing: *val_splicing + + spec_augment: + freq_masks: 2 + min_freq: 0 + max_freq: 20 + time_masks: 10 + min_time: 0 + max_time: 0.03 + +rnnt: + in_feats: 240 # n_filt x frame_stacking + + enc_n_hid: 1024 + enc_pre_rnn_layers: 2 + enc_post_rnn_layers: 3 + enc_stack_time_factor: 2 + enc_dropout: 0.1 + + pred_n_hid: 512 + pred_rnn_layers: 2 + pred_dropout: 0.3 + + joint_n_hid: 512 + joint_dropout: 0.3 + + forget_gate_bias: 1.0 diff --git a/benchmarks/rnnt/ootb/train/docker-compose.yaml b/benchmarks/rnnt/ootb/train/docker-compose.yaml new file mode 100644 index 0000000..21b2eeb --- /dev/null +++ b/benchmarks/rnnt/ootb/train/docker-compose.yaml @@ -0,0 +1,17 @@ +version: '3.3' +services: + test: + deploy: + resources: + reservations: + devices: + - capabilities: + - gpu + build: + context: . + dockerfile: tests/Dockerfile + volumes: + - .:/code + - /mnt/mwawrzos/storage/datasets/LibriSpeech/LibriSpeech:/datasets/LibriSpeech + stdin_open: true + tty: true diff --git a/benchmarks/rnnt/ootb/train/eval_model.py b/benchmarks/rnnt/ootb/train/eval_model.py new file mode 100644 index 0000000..44d4da0 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/eval_model.py @@ -0,0 +1,74 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import torch +from torch.autograd import Variable +from warpctc_pytorch import CTCLoss + +import torch.nn.functional as F + +import sys +### Import Data Utils ### +sys.path.append('../') + +from data.bucketing_sampler import BucketingSampler, SpectrogramDatasetWithLength +from data.data_loader import AudioDataLoader, SpectrogramDataset +from decoder import GreedyDecoder +from model import DeepSpeech, supported_rnns +from params import cuda + +def eval_model(model, test_loader, decoder): + start_iter = 0 # Reset start iteration for next epoch + total_cer, total_wer = 0, 0 + model.eval() + for i, (data) in enumerate(test_loader): # test + inputs, targets, input_percentages, target_sizes = data + + inputs = Variable(inputs, volatile=True) + + # unflatten targets + split_targets = [] + offset = 0 + for size in target_sizes: + split_targets.append(targets[offset:offset + size]) + offset += size + + if cuda: + inputs = inputs.cuda() + + out = model(inputs) + out = out.transpose(0, 1) # TxNxH + seq_length = out.size(0) + sizes = input_percentages.mul_(int(seq_length)).int() + + decoded_output = decoder.decode(out.data, sizes) + target_strings = decoder.process_strings(decoder.convert_to_strings(split_targets)) + wer, cer = 0, 0 + for x in range(len(target_strings)): + wer += decoder.wer(decoded_output[x], target_strings[x]) / float(len(target_strings[x].split())) + cer += decoder.cer(decoded_output[x], target_strings[x]) / float(len(target_strings[x])) + total_cer += cer + total_wer += wer + + if cuda: + torch.cuda.synchronize() + del out + wer = total_wer / len(test_loader.dataset) + cer = total_cer / len(test_loader.dataset) + wer *= 100 + cer *= 100 + + return wer, cer diff --git a/benchmarks/rnnt/ootb/train/inference.py b/benchmarks/rnnt/ootb/train/inference.py new file mode 100644 index 0000000..f6e3c95 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/inference.py @@ -0,0 +1,314 @@ +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +import os +import random +import time +import tqdm +from heapq import nlargest +from itertools import chain, repeat +from pathlib import Path + +import dllogger +import torch +import numpy as np +import torch.distributed as distrib +from apex import amp +from apex.parallel import DistributedDataParallel +from dllogger import JSONStreamBackend, StdOutBackend, Verbosity + +from common import helpers +from common.data import features +from common.data.dali import sampler as dali_sampler +from common.data.dali.data_loader import DaliDataLoader +from common.data.features import BaseFeatures, FilterbankFeatures +from common.data.text import Tokenizer +from common.helpers import print_once, process_evaluation_epoch +from common.tb_dllogger import stdout_metric_format, unique_log_fpath +from rnnt import config +from rnnt.decoder import RNNTGreedyDecoder +from rnnt.model import RNNT + + +def get_parser(): + parser = argparse.ArgumentParser(description='RNN-T') + parser.add_argument('--batch_size', default=16, type=int, + help='Data batch size') + parser.add_argument('--steps', default=0, type=int, + help='Eval this many steps for every worker') + parser.add_argument('--model_config', type=str, + help='Relative model config path given dataset folder') + parser.add_argument('--dataset_dir', type=str, + help='Absolute path to dataset folder') + parser.add_argument('--val_manifest', type=str, + help='Relative path to evaluation dataset manifest file') + parser.add_argument('--ckpt', default=None, type=str, + help='Path to model checkpoint') + parser.add_argument('--max_duration', default=None, type=float, + help='Filter out longer inputs (in seconds)') + parser.add_argument('--pad_to_max_duration', action='store_true', + help='Pads every batch to max_duration') + parser.add_argument('--amp', '--fp16', action='store_true', + help='Use FP16 precision') + parser.add_argument('--cudnn_benchmark', action='store_true', + help='Enable cudnn benchmark') + parser.add_argument('--save_predictions', type=str, default=None, + help='Save predictions in text form at this location') + parser.add_argument('--transcribe_wav', type=str, + help='Path to a single .wav file (16KHz)') + parser.add_argument('--transcribe_filelist', type=str, + help='Path to a filelist with one .wav path per line') + parser.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'], + default='gpu', help='') # XXX + parser.add_argument('--repeats', default=1, type=int, + help='Repeat the inference for benchmarking') + + parser.add_argument('-o', '--output_dir', default='results/', + help='Output folder to save audio (file per phrase)') + parser.add_argument('--log_file', type=str, default=None, + help='Path to a DLLogger log file') + parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0), + type=int, help='GPU id used for distributed training') + + parser.add_argument('--cpu', action='store_true', + help='Run inference on CPU') + parser.add_argument('--ema', action='store_true', + help='Load EMA model weights') + + parser.add_argument("--seed", default=None, type=int, help='seed') + + return parser + + +def durs_to_percentiles(durations, ratios): + durations = np.asarray(durations) * 1000 # in ms + latency = durations + + latency = latency[5:] + mean_latency = np.mean(latency) + + latency_worst = nlargest(math.ceil((1 - min(ratios))* len(latency)), latency) + latency_ranges = get_percentile(ratios, latency_worst, len(latency)) + latency_ranges[0.5] = mean_latency + return latency_ranges + + +def get_percentile(ratios, arr, nsamples): + res = {} + for a in ratios: + idx = max(int(nsamples * (1 - a)), 0) + res[a] = arr[idx] + return res + + +def main(): + parser = get_parser() + args = parser.parse_args() + + log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json')) + log_fpath = unique_log_fpath(log_fpath) + dllogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_fpath), + StdOutBackend(Verbosity.VERBOSE, + metric_format=stdout_metric_format)]) + + [dllogger.log("PARAMETER", {k:v}) for k,v in vars(args).items()] + + for step in ['DNN', 'data+DNN', 'data']: + for c in [0.99, 0.95, 0.9, 0.5]: + cs = 'avg' if c == 0.5 else f'{int(100*c)}%' + dllogger.metadata(f'{step.lower()}_latency_{c}', + {'name': f'{step} latency {cs}', + 'format': ':>7.2f', 'unit': 'ms'}) + dllogger.metadata( + 'eval_wer', {'name': 'WER', 'format': ':>3.3f', 'unit': '%'}) + + if args.cpu: + device = torch.device('cpu') + else: + assert torch.cuda.is_available() + device = torch.device('cuda') + torch.backends.cudnn.benchmark = args.cudnn_benchmark + + if args.seed is not None: + torch.manual_seed(args.seed + args.local_rank) + np.random.seed(args.seed + args.local_rank) + random.seed(args.seed + args.local_rank) + + # set up distributed training + multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1 + if multi_gpu: + torch.cuda.set_device(args.local_rank) + distrib.init_process_group(backend='nccl', init_method='env://') + print_once(f'Inference with {distrib.get_world_size()} GPUs') + + cfg = config.load(args.model_config) + + if args.max_duration is not None: + cfg['input_val']['audio_dataset']['max_duration'] = args.max_duration + cfg['input_val']['filterbank_features']['max_duration'] = args.max_duration + + if args.pad_to_max_duration: + assert cfg['input_val']['audio_dataset']['max_duration'] > 0 + cfg['input_val']['audio_dataset']['pad_to_max_duration'] = True + cfg['input_val']['filterbank_features']['pad_to_max_duration'] = True + + use_dali = args.dali_device in ('cpu', 'gpu') + + ( + dataset_kw, + features_kw, + splicing_kw, + _, _ + ) = config.input(cfg, 'val') + + tokenizer_kw = config.tokenizer(cfg) + tokenizer = Tokenizer(**tokenizer_kw) + + optim_level = 3 if args.amp else 0 + + feature_proc = torch.nn.Sequential( + torch.nn.Identity(), + torch.nn.Identity(), + features.FrameSplicing(optim_level=optim_level, **splicing_kw), + features.FillPadding(optim_level=optim_level, ), + ) + + # dataset + + data_loader = DaliDataLoader( + gpu_id=args.local_rank or 0, + dataset_path=args.dataset_dir, + config_data=dataset_kw, + config_features=features_kw, + json_names=[args.val_manifest], + batch_size=args.batch_size, + sampler=dali_sampler.SimpleSampler(), + pipeline_type="val", + device_type=args.dali_device, + tokenizer=tokenizer) + + model = RNNT(n_classes=tokenizer.num_labels + 1, **config.rnnt(cfg)) + + if args.ckpt is not None: + print(f'Loading the model from {args.ckpt} ...') + checkpoint = torch.load(args.ckpt, map_location="cpu") + key = 'ema_state_dict' if args.ema else 'state_dict' + state_dict = checkpoint[key] + model.load_state_dict(state_dict, strict=True) + + model.to(device) + model.eval() + + if feature_proc is not None: + feature_proc.to(device) + feature_proc.eval() + + if args.amp: + model = amp.initialize(model, opt_level='O3') + + if multi_gpu: + model = DistributedDataParallel(model) + + agg = {'txts': [], 'preds': [], 'logits': []} + dur = {'data': [], 'dnn': [], 'data+dnn': []} + + rep_loader = chain(*repeat(data_loader, args.repeats)) + rep_len = args.repeats * len(data_loader) + + blank_idx = tokenizer.num_labels + greedy_decoder = RNNTGreedyDecoder(blank_idx=blank_idx) + + def sync_time(): + torch.cuda.synchronize() if device.type == 'cuda' else None + return time.perf_counter() + + sz = [] + with torch.no_grad(): + + for it, batch in enumerate(tqdm.tqdm(rep_loader, total=rep_len)): + + if use_dali: + feats, feat_lens, txt, txt_lens = batch + if feature_proc is not None: + feats, feat_lens = feature_proc([feats, feat_lens]) + else: + batch = [t.cuda(non_blocking=True) for t in batch] + audio, audio_lens, txt, txt_lens = batch + feats, feat_lens = feature_proc([audio, audio_lens]) + feats = feats.permute(2, 0, 1) + if args.amp: + feats = feats.half() + + sz.append(feats.size(0)) + + t1 = sync_time() + log_probs, log_prob_lens = model(feats, feat_lens, txt, txt_lens) + t2 = sync_time() + + # burn-in period; wait for a new loader due to num_workers + if it >= 1 and (args.repeats == 1 or it >= len(data_loader)): + dur['data'].append(t1 - t0) + dur['dnn'].append(t2 - t1) + dur['data+dnn'].append(t2 - t0) + + if txt is not None: + agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], + tokenizer.detokenize) + + preds = greedy_decoder.decode(model, feats, feat_lens) + + agg['preds'] += helpers.gather_predictions([preds], tokenizer.detokenize) + + if 0 < args.steps < it: + break + + t0 = sync_time() + + # communicate the results + if args.transcribe_wav: + for idx,p in enumerate(agg['preds']): + print_once(f'Prediction {idx+1: >3}: {p}') + + elif args.transcribe_filelist: + pass + + else: + wer, loss = process_evaluation_epoch(agg) + + if not multi_gpu or distrib.get_rank() == 0: + dllogger.log(step=(), data={'eval_wer': 100 * wer}) + + if args.save_predictions: + with open(args.save_predictions, 'w') as f: + f.write('\n'.join(agg['preds'])) + + # report timings + if len(dur['data']) >= 20: + ratios = [0.9, 0.95, 0.99] + + for stage in dur: + lat = durs_to_percentiles(dur[stage], ratios) + for k in [0.99, 0.95, 0.9, 0.5]: + kk = str(k).replace('.', '_') + dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]}) + + else: + # TODO measure at least avg latency + print_once('Not enough samples to measure latencies.') + + +if __name__=="__main__": + main() diff --git a/benchmarks/rnnt/ootb/train/mlperf/__init__.py b/benchmarks/rnnt/ootb/train/mlperf/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/rnnt/ootb/train/mlperf/logging.py b/benchmarks/rnnt/ootb/train/mlperf/logging.py new file mode 100644 index 0000000..74e803f --- /dev/null +++ b/benchmarks/rnnt/ootb/train/mlperf/logging.py @@ -0,0 +1,74 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import torch + +from mlperf_logging import mllog +from mlperf_logging.mllog import constants + + +mllogger = mllog.get_mllogger() + + +def configure_logger(benchmark): + mllog.config(filename=os.path.join(os.path.dirname(os.path.abspath(__file__)), f'{benchmark}.log')) + mllogger = mllog.get_mllogger() + mllogger.logger.propagate = False + + +def log_start(*args, **kwargs): + _log(mllogger.start, *args, **kwargs) +def log_end(*args, **kwargs): + _log(mllogger.end, *args, **kwargs) +def log_event(*args, **kwargs): + _log(mllogger.event, *args, **kwargs) + +def _log(logger, *args, **kwargs): + """ + Wrapper for MLPerf compliance logging calls. + All arguments but 'sync' and 'log_all_ranks' are passed to + mlperf_logging.mllog. + If 'sync' is set to True then the wrapper will synchronize all distributed + workers. 'sync' should be set to True for all compliance tags that require + accurate timing (RUN_START, RUN_STOP etc.) + If 'log_all_ranks' is set to True then all distributed workers will print + logging message, if set to False then only worker with rank=0 will print + the message. + """ + if 'stack_offset' not in kwargs: + kwargs['stack_offset'] = 3 + if 'value' not in kwargs: + kwargs['value'] = None + + if kwargs.pop('log_all_ranks', False): + log = True + else: + log = (get_rank() == 0) + + if log: + logger(*args, **kwargs) + + +def get_rank(): + """ + Gets distributed rank or returns zero if distributed is not initialized. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + rank = torch.distributed.get_rank() + else: + rank = 0 + return rank + diff --git a/benchmarks/rnnt/ootb/train/requirements.txt b/benchmarks/rnnt/ootb/train/requirements.txt new file mode 100755 index 0000000..7318388 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/requirements.txt @@ -0,0 +1,10 @@ +https://github.com/NVIDIA/dllogger/archive/26a0f8f1958de2c0c460925ff6102a4d2486d6cc.zip +https://github.com/mlcommons/logging/archive/d08740cadb4188a5ebeb84ad6c68f98c1e129805.zip +tensorboard==2.3.0 +unidecode==1.1.1 +inflect==4.1.0 +soundfile==0.10.3.post1 +librosa==0.8.0 +sox==1.4.1 +sentencepiece==0.1.94 +pandas==1.1.5 diff --git a/benchmarks/rnnt/ootb/train/rnnt/config.py b/benchmarks/rnnt/ootb/train/rnnt/config.py new file mode 100644 index 0000000..35d0a65 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/rnnt/config.py @@ -0,0 +1,117 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +import yaml + +from common.data.dali.pipeline import PipelineParams, SpeedPerturbationParams +from common.data.text import Tokenizer +from common.data import features +from common.helpers import print_once +from .model import RNNT + + +def default_args(klass): + sig = inspect.signature(klass.__init__) + return {k: v.default for k,v in sig.parameters.items() if k != 'self'} + + +def load(fpath, max_duration=None): + + if fpath.endswith('.toml'): + raise ValueError('.toml config format has been changed to .yaml') + + cfg = yaml.safe_load(open(fpath, 'r')) + + # Reload to deep copy shallow copies, which were made with yaml anchors + yaml.Dumper.ignore_aliases = lambda *args: True + cfg = yaml.safe_load(yaml.dump(cfg)) + + # Modify the config with supported cmdline flags + if max_duration is not None: + cfg['input_train']['audio_dataset']['max_duration'] = max_duration + cfg['input_train']['filterbank_features']['max_duration'] = max_duration + + return cfg + + +def validate_and_fill(klass, user_conf, ignore=[], optional=[]): + conf = default_args(klass) + + for k,v in user_conf.items(): + assert k in conf or k in ignore, f'Unknown parameter {k} for {klass}' + conf[k] = v + + # Keep only mandatory or optional-nonempty + conf = {k:v for k,v in conf.items() + if k not in optional or v is not inspect.Parameter.empty} + + # Validate + for k,v in conf.items(): + assert v is not inspect.Parameter.empty, \ + f'Value for {k} not specified for {klass}' + return conf + + +def input(conf_yaml, split='train'): + + conf = copy.deepcopy(conf_yaml[f'input_{split}']) + conf_dataset = conf.pop('audio_dataset') + conf_features = conf.pop('filterbank_features') + conf_splicing = conf.pop('frame_splicing', {}) + conf_specaugm = conf.pop('spec_augment', None) + conf_cutoutau = conf.pop('cutout_augment', None) + + # Validate known inner classes + inner_classes = [ + (conf_dataset, 'speed_perturbation', SpeedPerturbationParams), + ] + amp=['optim_level'] + for conf_tgt, key, klass in inner_classes: + if key in conf_tgt: + conf_tgt[key] = validate_and_fill(klass, conf_tgt[key], optional=amp) + + for k in conf: + raise ValueError(f'Unknown key {k}') + + # Validate outer classes + conf_dataset = validate_and_fill(PipelineParams, conf_dataset) + + conf_features = validate_and_fill(features.FilterbankFeatures, conf_features, optional=amp) + conf_splicing = validate_and_fill(features.FrameSplicing, conf_splicing, optional=amp) + conf_specaugm = conf_specaugm and validate_and_fill(features.SpecAugment, conf_specaugm, optional=amp) + + # Check params shared between classes + for shared in ['sample_rate']: + assert conf_dataset[shared] == conf_features[shared], ( + f'{shared} should match in Dataset and FeatureProcessor: ' + f'{conf_dataset[shared]}, {conf_features[shared]}') + + return conf_dataset, conf_features, conf_splicing, conf_specaugm + + +def rnnt(conf): + return validate_and_fill(RNNT, conf['rnnt'], optional=['n_classes']) + + +def tokenizer(conf): + return validate_and_fill(Tokenizer, conf['tokenizer'], optional=['sentpiece_model']) + + +def apply_duration_flags(cfg, max_duration): + if max_duration is not None: + cfg['input_train']['audio_dataset']['max_duration'] = max_duration + cfg['input_train']['filterbank_features']['max_duration'] = max_duration + diff --git a/benchmarks/rnnt/ootb/train/rnnt/decoder.py b/benchmarks/rnnt/ootb/train/rnnt/decoder.py new file mode 100644 index 0000000..8deb6fa --- /dev/null +++ b/benchmarks/rnnt/ootb/train/rnnt/decoder.py @@ -0,0 +1,126 @@ +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .model import label_collate + + +class RNNTGreedyDecoder: + """A greedy transducer decoder. + + Args: + blank_symbol: See `Decoder`. + model: Model to use for prediction. + max_symbols_per_step: The maximum number of symbols that can be added + to a sequence in a single time step; if set to None then there is + no limit. + cutoff_prob: Skip to next step in search if current highest character + probability is less than this. + """ + def __init__(self, blank_idx, max_symbols_per_step=30, max_symbol_per_sample=None): + self.blank_idx = blank_idx + assert max_symbols_per_step is None or max_symbols_per_step > 0 + self.max_symbols = max_symbols_per_step + assert max_symbol_per_sample is None or max_symbol_per_sample > 0 + self.max_symbol_per_sample = max_symbol_per_sample + self._SOS = -1 # start of sequence + + def _pred_step(self, model, label, hidden, device): + if label == self._SOS: + return model.predict(None, hidden, add_sos=False) + + label = label_collate([[label]]).to(device) + return model.predict(label, hidden, add_sos=False) + + def _joint_step(self, model, enc, pred, log_normalize=False): + logits = model.joint(enc, pred)[:, 0, 0, :] + + if log_normalize: + probs = F.log_softmax(logits, dim=len(logits.shape) - 1) + return probs + else: + return logits + + def decode(self, model, x, out_lens): + """Returns a list of sentences given an input batch. + + Args: + x: A tensor of size (batch, channels, features, seq_len) + out_lens: list of int representing the length of each sequence + output sequence. + + Returns: + list containing batch number of sentences (strings). + """ + model = getattr(model, 'module', model) + with torch.no_grad(): + # Apply optional preprocessing + + logits, out_lens = model.encode(x, out_lens) + + output = [] + for batch_idx in range(logits.size(0)): + inseq = logits[batch_idx, :, :].unsqueeze(1) + logitlen = out_lens[batch_idx] + sentence = self._greedy_decode(model, inseq, logitlen) + output.append(sentence) + + return output + + def _greedy_decode(self, model, x, out_len): + training_state = model.training + model.eval() + + device = x.device + + hidden = None + label = [] + for time_idx in range(out_len): + if self.max_symbol_per_sample is not None \ + and len(label) > self.max_symbol_per_sample: + break + f = x[time_idx, :, :].unsqueeze(0) + + not_blank = True + symbols_added = 0 + + while not_blank and ( + self.max_symbols is None or + symbols_added < self.max_symbols): + g, hidden_prime = self._pred_step( + model, + self._SOS if label == [] else label[-1], + hidden, + device + ) + logp = self._joint_step(model, f, g, log_normalize=False)[0, :] + + # get index k, of max prob + v, k = logp.max(0) + k = k.item() + + if k == self.blank_idx: + not_blank = False + else: + label.append(k) + hidden = hidden_prime + symbols_added += 1 + + model.train(training_state) + return label + diff --git a/benchmarks/rnnt/ootb/train/rnnt/loss.py b/benchmarks/rnnt/ootb/train/rnnt/loss.py new file mode 100644 index 0000000..90e878c --- /dev/null +++ b/benchmarks/rnnt/ootb/train/rnnt/loss.py @@ -0,0 +1,88 @@ +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from warprnnt_pytorch import RNNTLoss as WarpRNNTLoss + + +class RNNTLoss(torch.nn.Module): + """Wrapped :py:class:`warprnnt_pytorch.RNNTLoss`. + Args: + blank_idx: Index of the blank label. + Attributes: + rnnt_loss: A :py:class:`warprnnt_pytorch.RNNTLoss` instance. + """ + + def __init__(self, blank_idx): + super().__init__() + self.rnnt_loss = WarpRNNTLoss(blank=blank_idx) + self.use_cuda = torch.cuda.is_available() + + def forward(self, logits, logit_lens, y, y_lens): + """Computes RNNT loss. + All inputs are moved to the GPU with :py:meth:`torch.nn.Module.cuda` if + :py:func:`torch.cuda.is_available` was :py:data:`True` on + initialisation. + Args: + inputs: A tuple where the first element is the unnormalized network + :py:class:`torch.Tensor` outputs of size ``[batch, max_seq_len, + max_output_seq_len + 1, vocab_size + 1)``. The second element + is a Tuple of two :py:class:`torch.Tensor`s both of + size ``[batch]`` that contain the lengths of a) the audio features + logits and b) the target sequence logits. + targets: A tuple where the first element is a + :py:class:`torch.Tensor` such that each entry in the target + sequence is a class index. Target indices cannot be the blank + index. It must have size ``[batch, max_seq_len]``. In the former + form each target sequence is padded to the length of the longest + sequence and stacked. + The second element is a :py:class:`torch.Tensor` that gives + the lengths of the targets. Lengths are specified for each + sequence to achieve masking under the assumption that sequences + are padded to equal lengths. + """ + + + # cast to required types + if logits.dtype != torch.float: + logits = logits.float() + + if y.dtype != torch.int32: + y = y.int() + + if logit_lens.dtype != torch.int32: + logit_lens = logit_lens.int() + + if y_lens.dtype != torch.int32: + y_lens = y_lens.int() + + # send to gpu + if self.use_cuda: + logits = logits.cuda() + logit_lens = logit_lens.cuda() + y = y.cuda() + y_lens = y_lens.cuda() + + loss = self.rnnt_loss( + acts=logits, labels=y, act_lens=logit_lens, label_lens=y_lens + ) + + # del new variables that may have been created due to float/int/cuda() + del logits, y, logit_lens, y_lens + + return loss + diff --git a/benchmarks/rnnt/ootb/train/rnnt/model.py b/benchmarks/rnnt/ootb/train/rnnt/model.py new file mode 100644 index 0000000..6306866 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/rnnt/model.py @@ -0,0 +1,275 @@ +# Copyright (c) 2019, Myrtle Software Limited. All rights reserved. +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from itertools import chain + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mlperf import logging + +from common.rnn import rnn + + +class StackTime(nn.Module): + def __init__(self, factor): + super().__init__() + self.factor = int(factor) + + def forward(self, x, x_lens): + # T, B, U + seq = [x] + for i in range(1, self.factor): + tmp = torch.zeros_like(x) + tmp[:-i, :, :] = x[i:, :, :] + seq.append(tmp) + # x_lens = torch.ceil(x_lens.float() / self.factor).int() + x_lens = (x_lens.int() + self.factor - 1) // self.factor + return torch.cat(seq, dim=2)[::self.factor, :, :], x_lens + + +class RNNT(nn.Module): + """A Recurrent Neural Network Transducer (RNN-T). + + Args: + in_features: Number of input features per step per batch. + vocab_size: Number of output symbols (inc blank). + forget_gate_bias: Total initialized value of the bias used in the + forget gate. Set to None to use PyTorch's default initialisation. + (See: http://proceedings.mlr.press/v37/jozefowicz15.pdf) + batch_norm: Use batch normalization in encoder and prediction network + if true. + encoder_n_hidden: Internal hidden unit size of the encoder. + encoder_rnn_layers: Encoder number of layers. + pred_n_hidden: Internal hidden unit size of the prediction network. + pred_rnn_layers: Prediction network number of layers. + joint_n_hidden: Internal hidden unit size of the joint network. + """ + def __init__(self, n_classes, in_feats, enc_n_hid, + enc_pre_rnn_layers, enc_post_rnn_layers, enc_stack_time_factor, + enc_dropout, pred_dropout, joint_dropout, + pred_n_hid, pred_rnn_layers, joint_n_hid, + forget_gate_bias, + hidden_hidden_bias_scale=0.0, weights_init_scale=1.0, + enc_lr_factor=1.0, pred_lr_factor=1.0, joint_lr_factor=1.0, mlperf=False): + super(RNNT, self).__init__() + + self.enc_lr_factor = enc_lr_factor + self.pred_lr_factor = pred_lr_factor + self.joint_lr_factor = joint_lr_factor + + self.pred_n_hid = pred_n_hid + + pre_rnn_input_size = in_feats + + post_rnn_input_size = enc_stack_time_factor * enc_n_hid + + enc_mod = {} + enc_mod["pre_rnn"] = rnn(input_size=pre_rnn_input_size, + hidden_size=enc_n_hid, + num_layers=enc_pre_rnn_layers, + forget_gate_bias=forget_gate_bias, + hidden_hidden_bias_scale=hidden_hidden_bias_scale, + weights_init_scale=weights_init_scale, + dropout=enc_dropout, + tensor_name='pre_rnn', + mlperf=mlperf, + ) + + enc_mod["stack_time"] = StackTime(enc_stack_time_factor) + + enc_mod["post_rnn"] = rnn(input_size=post_rnn_input_size, + hidden_size=enc_n_hid, + num_layers=enc_post_rnn_layers, + forget_gate_bias=forget_gate_bias, + hidden_hidden_bias_scale=hidden_hidden_bias_scale, + weights_init_scale=weights_init_scale, + dropout=enc_dropout, + tensor_name='post_rnn', + mlperf=mlperf, + ) + + self.encoder = torch.nn.ModuleDict(enc_mod) + + pred_embed = torch.nn.Embedding(n_classes - 1, pred_n_hid) + if mlperf: + logging.log_event(logging.constants.WEIGHTS_INITIALIZATION, + metadata=dict(tensor='pred_embed')) + + self.prediction = torch.nn.ModuleDict({ + "embed": pred_embed, + "dec_rnn": rnn( + input_size=pred_n_hid, + hidden_size=pred_n_hid, + num_layers=pred_rnn_layers, + forget_gate_bias=forget_gate_bias, + hidden_hidden_bias_scale=hidden_hidden_bias_scale, + weights_init_scale=weights_init_scale, + dropout=pred_dropout, + tensor_name='dec_rnn', + mlperf=mlperf, + ), + }) + + self.joint_pred = torch.nn.Linear( + pred_n_hid, + joint_n_hid) + if mlperf: + logging.log_event(logging.constants.WEIGHTS_INITIALIZATION, + metadata=dict(tensor='joint_pred')) + self.joint_enc = torch.nn.Linear( + enc_n_hid, + joint_n_hid) + if mlperf: + logging.log_event(logging.constants.WEIGHTS_INITIALIZATION, + metadata=dict(tensor='joint_enc')) + + self.joint_net = nn.Sequential( + torch.nn.ReLU(inplace=True), + torch.nn.Dropout(p=joint_dropout), + torch.nn.Linear(joint_n_hid, n_classes)) + if mlperf: + logging.log_event(logging.constants.WEIGHTS_INITIALIZATION, + metadata=dict(tensor='joint_net')) + + def forward(self, x, x_lens, y, y_lens, state=None): + # x: (B, channels, features, seq_len) + y = label_collate(y) + + f, x_lens = self.encode(x, x_lens) + + g, _ = self.predict(y, state) + out = self.joint(f, g) + + return out, x_lens + + def encode(self, x, x_lens): + """ + Args: + x: tuple of ``(input, input_lens)``. ``input`` has shape (T, B, I), + ``input_lens`` has shape ``(B,)``. + + Returns: + f: tuple of ``(output, output_lens)``. ``output`` has shape + (B, T, H), ``output_lens`` + """ + x, _ = self.encoder["pre_rnn"](x, None) + x, x_lens = self.encoder["stack_time"](x, x_lens) + x, _ = self.encoder["post_rnn"](x, None) + + return x.transpose(0, 1), x_lens + + def predict(self, y, state=None, add_sos=True): + """ + B - batch size + U - label length + H - Hidden dimension size + L - Number of decoder layers = 2 + + Args: + y: (B, U) + + Returns: + Tuple (g, hid) where: + g: (B, U + 1, H) + hid: (h, c) where h is the final sequence hidden state and c is + the final cell state: + h (tensor), shape (L, B, H) + c (tensor), shape (L, B, H) + """ + if y is not None: + # (B, U) -> (B, U, H) + y = self.prediction["embed"](y) + else: + B = 1 if state is None else state[0].size(1) + y = torch.zeros((B, 1, self.pred_n_hid)).to( + device=self.joint_enc.weight.device, + dtype=self.joint_enc.weight.dtype + ) + + # preprend blank "start of sequence" symbol + if add_sos: + B, U, H = y.shape + start = torch.zeros((B, 1, H)).to(device=y.device, dtype=y.dtype) + y = torch.cat([start, y], dim=1).contiguous() # (B, U + 1, H) + else: + start = None # makes del call later easier + + y = y.transpose(0, 1) # .contiguous() # (U + 1, B, H) + g, hid = self.prediction["dec_rnn"](y, state) + g = g.transpose(0, 1) # .contiguous() # (B, U + 1, H) + del y, start, state + return g, hid + + def joint(self, f, g): + """ + f should be shape (B, T, H) + g should be shape (B, U + 1, H) + + returns: + logits of shape (B, T, U, K + 1) + """ + # Combine the input states and the output states + f = self.joint_enc(f) + g = self.joint_pred(g) + + f = f.unsqueeze(dim=2) # (B, T, 1, H) + g = g.unsqueeze(dim=1) # (B, 1, U + 1, H) + + res = self.joint_net(f + g) + + del f, g + return res + + def param_groups(self, lr): + chain_params = lambda *layers: chain(*[l.parameters() for l in layers]) + return [{'params': chain_params(self.encoder), + 'lr': lr * self.enc_lr_factor}, + {'params': chain_params(self.prediction), + 'lr': lr * self.pred_lr_factor}, + {'params': chain_params(self.joint_enc, self.joint_pred, self.joint_net), + 'lr': lr * self.joint_lr_factor}, + ] + + +def label_collate(labels): + """Collates the label inputs for the rnn-t prediction network. + + If `labels` is already in torch.Tensor form this is a no-op. + + Args: + labels: A torch.Tensor List of label indexes or a torch.Tensor. + + Returns: + A padded torch.Tensor of shape (batch, max_seq_len). + """ + + if isinstance(labels, torch.Tensor): + return labels.type(torch.int64) + if not isinstance(labels, (list, tuple)): + raise ValueError( + f"`labels` should be a list or tensor not {type(labels)}" + ) + + batch_size = len(labels) + max_len = max(len(l) for l in labels) + + cat_labels = np.full((batch_size, max_len), fill_value=0.0, dtype=np.int32) + for e, l in enumerate(labels): + cat_labels[e, :len(l)] = l + labels = torch.LongTensor(cat_labels) + + return labels diff --git a/benchmarks/rnnt/ootb/train/rnnt_layers.svg b/benchmarks/rnnt/ootb/train/rnnt_layers.svg new file mode 100755 index 0000000..8f98be9 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/rnnt_layers.svg @@ -0,0 +1 @@ +Joint netTranscript prediction netAudio encoding netReLU + FC 512->1024FC 512->5122x LSTM 512Embeding 1023-> 512FC 1024->5123x LSTM 10242x stack,2x subsample2x LSTM 1024 \ No newline at end of file diff --git a/benchmarks/rnnt/ootb/train/run_and_time.sh b/benchmarks/rnnt/ootb/train/run_and_time.sh new file mode 100755 index 0000000..cb87844 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/run_and_time.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# runs benchmark and reports time to convergence +# to use the script: +# run_and_time.sh + +set -e + +# start timing +start=$(date +%s) +start_fmt=$(date +%Y-%m-%d\ %r) +echo "STARTING TIMING RUN AT $start_fmt" + +# run benchmark +#set -x + +echo "running benchmark" + +bash scripts/train.sh + +ret_code=$? + +set +x + +sleep 3 +if [[ $ret_code != 0 ]]; then exit $ret_code; fi + +# end timing +end=$(date +%s) +end_fmt=$(date +%Y-%m-%d\ %r) +echo "ENDING TIMING RUN AT $end_fmt" + +# report result +result=$(( $end - $start )) +result_name="RNN_SPEECH_RECOGNITION" + +echo "RESULT,$result_name,,$result,nvidia,$start_fmt" + diff --git a/benchmarks/rnnt/ootb/train/scripts/create_sentencepieces.sh b/benchmarks/rnnt/ootb/train/scripts/create_sentencepieces.sh new file mode 100644 index 0000000..371a1e8 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/create_sentencepieces.sh @@ -0,0 +1,19 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mkdir -p $DATASET_DIR/sentencepieces +jq -r '.[]["transcript"]' $DATASET_DIR/LibriSpeech/librispeech-train-*-wav.json > tmptxt.txt +python -c "import sentencepiece as spm; spm.SentencePieceTrainer.train(input='tmptxt.txt', model_prefix='librispeech1023', vocab_size=1023, character_coverage=1.0, bos_id=-1, eos_id=-1, model_type='unigram')" +cp librispeech1023.* $DATASET_DIR/sentencepieces/ +rm tmptxt.txt diff --git a/benchmarks/rnnt/ootb/train/scripts/docker/build.sh b/benchmarks/rnnt/ootb/train/scripts/docker/build.sh new file mode 100755 index 0000000..23b17c3 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/docker/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker build . --rm -t mlperf/rnn_speech_recognition diff --git a/benchmarks/rnnt/ootb/train/scripts/docker/launch.sh b/benchmarks/rnnt/ootb/train/scripts/docker/launch.sh new file mode 100755 index 0000000..8138272 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/docker/launch.sh @@ -0,0 +1,32 @@ +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/bin/bash + +DATA_DIR=$1 +CHECKPOINT_DIR=$2 +RESULT_DIR=$3 + +docker run -it --rm \ + --gpus='all' \ + --shm-size=4g \ + --ulimit memlock=-1 \ + --ulimit stack=67108864 \ + -v "$DATA_DIR":/datasets \ + -v "$CHECKPOINT_DIR":/checkpoints/ \ + -v "$RESULT_DIR":/results/ \ + -v $PWD:/code \ + -v $PWD:/workspace/rnnt \ + mlperf/rnn_speech_recognition bash diff --git a/benchmarks/rnnt/ootb/train/scripts/download_librispeech.sh b/benchmarks/rnnt/ootb/train/scripts/download_librispeech.sh new file mode 100755 index 0000000..2e353bb --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/download_librispeech.sh @@ -0,0 +1,31 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env bash + +script_dir=`dirname "${BASH_SOURCE[0]}"` +set -x +UTILS_DIR="$script_dir/../utils" + +DATASET="LibriSpeech" +DATA_DIR="$DATASET_DIR/$DATASET" +if [ ! -d "$DATA_DIR" ] +then + mkdir -p $DATA_DIR + chmod go+rx $DATASET_DIR + python $UTILS_DIR/download_librispeech.py $UTILS_DIR/librispeech.csv $DATA_DIR -e ${DATASET_DIR}/ +else + echo "Directory $DATA_DIR already exists." +fi diff --git a/benchmarks/rnnt/ootb/train/scripts/inference.sh b/benchmarks/rnnt/ootb/train/scripts/inference.sh new file mode 100755 index 0000000..c763063 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/inference.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO Check if NUM_STEPS is still supported and how is set in infer/bench +# TODO Check if multi-gpu works ok + +: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}} +: ${MODEL_CONFIG:=${2:-"configs/rnnt.yaml"}} +: ${OUTPUT_DIR:=${3:-"/results"}} +: ${CHECKPOINT:=${4:-"/checkpoints/rnnt_fp16.pt"}} +: ${DATASET:="dev-clean"} +: ${CUDNN_BENCHMARK:=false} +: ${MAX_DURATION:=""} +: ${PAD_TO_MAX_DURATION:=false} +: ${NUM_GPUS:=1} +: ${NUM_STEPS:="-1"} +: ${AMP:=true} +: ${BATCH_SIZE:=8} +: ${EMA:=false} +: ${SEED:=0} +: ${DALI_DEVICE:="none"} +: ${CPU:=false} +: ${LOGITS_FILE:=} +: ${PREDICTION_FILE="${OUTPUT_DIR}/${DATASET}.predictions"} +: ${REPEATS:=1} + +mkdir -p "$OUTPUT_DIR" + +ARGS="--dataset_dir=$DATA_DIR" +ARGS+=" --val_manifest=$DATA_DIR/librispeech-${DATASET}-wav.json" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --batch_size=$BATCH_SIZE" +ARGS+=" --seed=$SEED" +ARGS+=" --dali_device=$DALI_DEVICE" +ARGS+=" --repeats=$REPEATS" + +[ "$AMP" = true ] && ARGS+=" --amp" +[ "$EMA" = true ] && ARGS+=" --ema" +[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark" +[ -n "$CHECKPOINT" ] && ARGS+=" --ckpt=$CHECKPOINT" +[ "$NUM_STEPS" -gt 0 ] && ARGS+=" --steps $NUM_STEPS" +[ -n "$PREDICTION_FILE" ] && ARGS+=" --save_prediction $PREDICTION_FILE" +[ -n "$LOGITS_FILE" ] && ARGS+=" --logits_save_to $LOGITS_FILE" +[ "$CPU" = true ] && ARGS+=" --cpu" +[ -n "$MAX_DURATION" ] && ARGS+=" --max_duration $MAX_DURATION" +[ "$PAD_TO_MAX_DURATION" = true ] && ARGS+=" --pad_to_max_duration" + +python -m torch.distributed.launch --nproc_per_node=$NUM_GPUS inference.py $ARGS diff --git a/benchmarks/rnnt/ootb/train/scripts/inference_benchmark.sh b/benchmarks/rnnt/ootb/train/scripts/inference_benchmark.sh new file mode 100755 index 0000000..cea4afc --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/inference_benchmark.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -a + +: ${CUDNN_BENCHMARK:=true} +: ${MAX_DURATION:=36} +: ${PAD_TO_MAX_DURATION:=true} + +bash ./scripts/inference.sh "$@" diff --git a/benchmarks/rnnt/ootb/train/scripts/preprocess_librispeech.sh b/benchmarks/rnnt/ootb/train/scripts/preprocess_librispeech.sh new file mode 100755 index 0000000..02f4c93 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/preprocess_librispeech.sh @@ -0,0 +1,54 @@ +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +script_dir=`dirname "${BASH_SOURCE[0]}"` +set -x +UTILS_DIR="$script_dir/../utils" + +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/train-clean-100 \ + --dest_dir $DATASET_DIR/LibriSpeech/train-clean-100-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-train-clean-100-wav.json +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/train-clean-360 \ + --dest_dir $DATASET_DIR/LibriSpeech/train-clean-360-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-train-clean-360-wav.json +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/train-other-500 \ + --dest_dir $DATASET_DIR/LibriSpeech/train-other-500-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-train-other-500-wav.json + + +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/dev-clean \ + --dest_dir $DATASET_DIR/LibriSpeech/dev-clean-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-dev-clean-wav.json +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/dev-other \ + --dest_dir $DATASET_DIR/LibriSpeech/dev-other-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-dev-other-wav.json + + +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/test-clean \ + --dest_dir $DATASET_DIR/LibriSpeech/test-clean-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-test-clean-wav.json +python $UTILS_DIR/convert_librispeech.py \ + --input_dir $DATASET_DIR/LibriSpeech/test-other \ + --dest_dir $DATASET_DIR/LibriSpeech/test-other-wav \ + --output_json $DATASET_DIR/LibriSpeech/librispeech-test-other-wav.json + +bash $script_dir/create_sentencepieces.sh diff --git a/benchmarks/rnnt/ootb/train/scripts/train.sh b/benchmarks/rnnt/ootb/train/scripts/train.sh new file mode 100755 index 0000000..8796009 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/train.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OMP_NUM_THREADS=1 + +: ${DATA_DIR:=${1:-"$DATASET_DIR/LibriSpeech"}} +: ${MODEL_CONFIG:=${2:-"configs/baseline_v3-1023sp.yaml"}} +: ${OUTPUT_DIR:=${3:-"$RESULT_DIR"}} +: ${FB5LOGGER:=${4}} +: ${FB5CONFIG:=${5}} +: ${CHECKPOINT:-} +: ${CUDNN_BENCHMARK:=true} +: ${NUM_GPUS:=1} +: ${AMP:=false} +: ${GLOBAL_BATCH_SIZE:=1024} +: ${VAL_BATCH_SIZE:=2} +: ${GRAD_ACCUMULATION_STEPS:=64} # 8 +: ${LEARNING_RATE:=0.004} +: ${LR_EXP_GAMMA:=0.935} # ~0.005 in 80 epochs +: ${NUM_BUCKETS=6} # empty means to use torch.utils.data.distributed.DistributedSampler +: ${EMA:=0.999} +: ${SEED=1} +: ${EPOCHS:=100} +: ${WARMUP_EPOCHS:=6} # 8000 steps with 1x8x24 should be ~5.6 epochs +: ${HOLD_EPOCHS:=40} +: ${SAVE_AT_THE_END:=false} +: ${EPOCHS_THIS_JOB:=0} +: ${RESUME:=true} +: ${DALI_DEVICE:="cpu"} +: ${VAL_FREQUENCY:=1} +: ${PREDICTION_FREQUENCY:=1000} +: ${BETA1:=0.9} +: ${BETA2:=0.999} +: ${LOG_FREQUENCY:=1} +: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json \ + $DATA_DIR/librispeech-train-clean-360-wav.json \ + $DATA_DIR/librispeech-train-other-500-wav.json"} +: ${VAL_MANIFESTS:="$DATA_DIR/librispeech-dev-clean-wav.json"} +: ${LOG_NORM:=false} +: ${USE_OLD_VAL:=true} +: ${USE_NEW_VAL:=false} +: ${MAX_SYMBOL_PER_SAMPLE=300} +: ${WEIGHTS_INIT_SCALE=0.5} +: ${CLIP_NORM:=1} + +BATCH_SIZE=$(( $GLOBAL_BATCH_SIZE / $NUM_GPUS )) + +mkdir -p "$OUTPUT_DIR" + +ARGS="--dataset_dir=$DATA_DIR" +ARGS+=" --val_manifests $VAL_MANIFESTS" +ARGS+=" --train_manifests $TRAIN_MANIFESTS" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --lr=$LEARNING_RATE" +ARGS+=" --batch_size=$BATCH_SIZE" +ARGS+=" --val_batch_size=$VAL_BATCH_SIZE" +ARGS+=" --min_lr=1e-5" +ARGS+=" --lr_exp_gamma=$LR_EXP_GAMMA" +ARGS+=" --epochs=$EPOCHS" +ARGS+=" --warmup_epochs=$WARMUP_EPOCHS" +ARGS+=" --hold_epochs=$HOLD_EPOCHS" +ARGS+=" --epochs_this_job=$EPOCHS_THIS_JOB" +ARGS+=" --ema=$EMA" +ARGS+=" --seed=$SEED" +ARGS+=" --weight_decay=1e-3" +ARGS+=" --log_frequency=$LOG_FREQUENCY" +ARGS+=" --val_frequency=$VAL_FREQUENCY" +ARGS+=" --grad_accumulation_steps=$GRAD_ACCUMULATION_STEPS " +ARGS+=" --dali_device=$DALI_DEVICE" +ARGS+=" --beta1=$BETA1" +ARGS+=" --beta2=$BETA2" + +[ -n "$FB5LOGGER" ] && ARGS+=" --fb5logger=$FB5LOGGER" +[ -n "$FB5CONFIG" ] && ARGS+=" --fb5config=$FB5CONFIG" +[ "$AMP" = true ] && ARGS+=" --amp" +[ "$RESUME" = true ] && ARGS+=" --resume" +[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark" +[ "$LOG_NORM" = true ] && ARGS+=" --log_norm" +[ "$SAVE_AT_THE_END" = true ] && ARGS+=" --save_at_the_end" +[ -n "$CHECKPOINT" ] && ARGS+=" --ckpt=$CHECKPOINT" +[ -n "$NUM_BUCKETS" ] && ARGS+=" --num_buckets=$NUM_BUCKETS" +[ -n "$TARGET" ] && ARGS+=" --target=$TARGET" +[ -n "$CLIP_NORM" ] && ARGS+=" --clip_norm=$CLIP_NORM" +[ -n "$PREDICTION_FREQUENCY" ] && ARGS+=" --prediction_frequency=$PREDICTION_FREQUENCY" +[ -n "$SAVE_MILESTONES" ] && ARGS+=" --keep_milestones $SAVE_MILESTONES" +[ -n "$SAVE_BEST" ] && ARGS+=" --save_best_from=$SAVE_BEST" +[ -n "$SAVE_FREQUENCY" ] && ARGS+=" --save_frequency=$SAVE_FREQUENCY" +[ -n "$START_CLIP" ] && ARGS+=" --start_clip=$START_CLIP" +[ -n "$HIDDEN_HIDDEN_BIAS_SCALED" ] && ARGS+=" --hidden_hidden_bias_scale=$HIDDEN_HIDDEN_BIAS_SCALED" +[ -n "$WEIGHTS_INIT_SCALE" ] && ARGS+=" --weights_init_scale=$WEIGHTS_INIT_SCALE" +[ -n "$MAX_SYMBOL_PER_SAMPLE" ] && ARGS+=" --max_symbol_per_sample=$MAX_SYMBOL_PER_SAMPLE" + +DISTRIBUTED=${DISTRIBUTED:-"-m torch.distributed.launch --nproc_per_node=$NUM_GPUS"} +script_dir=`dirname "${BASH_SOURCE[0]}"` +set -x +python ${DISTRIBUTED} "$script_dir/../train.py" ${ARGS} diff --git a/benchmarks/rnnt/ootb/train/scripts/train_bench.sh b/benchmarks/rnnt/ootb/train/scripts/train_bench.sh new file mode 100755 index 0000000..420c8fe --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/train_bench.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo -e "\nNVIDIA container build: ${NVIDIA_BUILD_ID}\n" + +export OMP_NUM_THREADS=1 + +DATA_DIR=${DATA_DIR:-"/datasets/LibriSpeech"} +MODEL_CONFIG=${MODEL_CONFIG:-"configs/rnnt.yaml"} +OUTPUT_DIR=${OUTPUT_DIR:-"/results"} +CHECKPOINT=${CHECKPOINT:-""} +CREATE_LOGFILE=${CREATE_LOGFILE:-"true"} +CUDNN_BENCHMARK=${CUDNN_BENCHMARK:-"true"} +NUM_GPUS=${NUM_GPUS:-8} +AMP=${AMP:-"true"} +EPOCHS=${EPOCHS:-100} +WARMUP_EPOCHS=${WARMUP_EPOCHS:-6} # 8000 steps with 1x8x24 should be ~5.6 epochs +HOLD_EPOCHS=${HOLD_EPOCHS:-0} +SEED=${SEED:-1} +BATCH_SIZE=${BATCH_SIZE:-8} +VAL_BATCH_SIZE=${VAL_BATCH_SIZE:-2} +OPTIMIZER=${OPTIMIZER:-"adamw"} +LEARNING_RATE=${LEARNING_RATE:-"0.001"} +LR_POLICY=${LR_POLICY:-"legacy"} +# LR_EXP_GAMMA=${LR_EXP_GAMMA:-0.981} +GRADIENT_ACCUMULATION_STEPS=${GRADIENT_ACCUMULATION_STEPS:-1} +EMA=${EMA:-0.0} # XXX +SAVE_FREQUENCY=${SAVE_FREQUENCY:-10} +EPOCHS_THIS_JOB=${EPOCHS_THIS_JOB:-0} +RESUME=${RESUME:-"true"} +DALI_DEVICE=${DALI_DEVICE:-"none"} + +mkdir -p "$OUTPUT_DIR" + +ARGS=" --batch_size=$BATCH_SIZE" +ARGS+=" --val_batch_size=$VAL_BATCH_SIZE" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --lr=$LEARNING_RATE" +ARGS+=" --min_lr=1e-5" +ARGS+=" --lr_policy=$LR_POLICY" +# ARGS+=" --lr_exp_gamma=$LR_EXP_GAMMA" +ARGS+=" --epochs=$EPOCHS" +ARGS+=" --warmup_epochs=$WARMUP_EPOCHS" +ARGS+=" --hold_epochs=$HOLD_EPOCHS" +ARGS+=" --epochs_this_job=$EPOCHS_THIS_JOB" +ARGS+=" --ema=$EMA" +ARGS+=" --seed=$SEED" +ARGS+=" --optimizer=$OPTIMIZER" +ARGS+=" --dataset_dir=$DATA_DIR" +ARGS+=" --val_manifest=$DATA_DIR/librispeech-dev-clean-wav.json" +ARGS+=" --train_manifest=$DATA_DIR/librispeech-bench-clean-wav.json" +# ARGS+=",$DATA_DIR/librispeech-train-clean-360-wav.json" +# ARGS+=",$DATA_DIR/librispeech-train-other-500-wav.json" +ARGS+=" --weight_decay=1e-3" +ARGS+=" --save_frequency=$SAVE_FREQUENCY" +ARGS+=" --eval_frequency=1000" # XXX =100 +ARGS+=" --train_frequency=1" +ARGS+=" --print_prediction_frequency=100" +ARGS+=" --gradient_accumulation_steps=$GRADIENT_ACCUMULATION_STEPS " +ARGS+=" --dali_device=$DALI_DEVICE" +[ "$AMP" == "true" ] && \ +ARGS+=" --amp" +[ "$RESUME" == "true" ] && \ +ARGS+=" --resume" +[ "$CUDNN_BENCHMARK" = "true" ] && \ +ARGS+=" --cudnn_benchmark" +[ -n "$CHECKPOINT" ] && \ +ARGS+=" --ckpt=${CHECKPOINT}" + +python -m torch.distributed.launch --nproc_per_node=$NUM_GPUS train.py $ARGS diff --git a/benchmarks/rnnt/ootb/train/scripts/train_debug.sh b/benchmarks/rnnt/ootb/train/scripts/train_debug.sh new file mode 100755 index 0000000..224f525 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/train_debug.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OMP_NUM_THREADS=1 + +DATA_DIR="/datasets/LibriSpeech" +TRAIN_MANIFESTS="$DATA_DIR/librispeech-train-clean-100-wav.json" +NUM_GPUS=1 + +: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}} +: ${MODEL_CONFIG:=${2:-"configs/rnnt.yaml"}} +: ${OUTPUT_DIR:=${3:-"/results"}} +: ${CHECKPOINT:=${4:-}} +: ${CUDNN_BENCHMARK:=true} +: ${NUM_GPUS:=8} +: ${AMP:=true} +: ${BATCH_SIZE:=8} +: ${VAL_BATCH_SIZE:=8} +: ${OPTIMIZER:=adamw} +: ${GRAD_ACCUMULATION_STEPS:=1} +: ${LEARNING_RATE:=0.001} +# : ${MIN_LEARNING_RATE:=0.00001} +: ${LR_POLICY:=legacy} +: ${LR_EXP_GAMMA:=0.935} # ~0.005 in 80 epochs +: ${EMA:=0.999} +: ${SEED:=1} +: ${EPOCHS:=100} +: ${WARMUP_EPOCHS:=6} # 8000 steps with 1x8x24 should be ~5.6 epochs +: ${HOLD_EPOCHS:=0} +: ${SAVE_FREQUENCY:=10} +: ${EPOCHS_THIS_JOB:=0} +: ${RESUME:=true} +: ${DALI_DEVICE:="none"} +: ${PAD_TO_MAX_DURATION:=false} +: ${VAL_FREQUENCY:=10000} +: ${PREDICTION_FREQUENCY:=1000} +: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json \ + $DATA_DIR/librispeech-train-clean-360-wav.json \ + $DATA_DIR/librispeech-train-other-500-wav.json"} +: ${VAL_MANIFESTS:="$DATA_DIR/librispeech-dev-clean-wav.json"} + +: ${PDB:=false} + +mkdir -p "$OUTPUT_DIR" + +ARGS="--dataset_dir=$DATA_DIR" +ARGS+=" --val_manifests $VAL_MANIFESTS" +ARGS+=" --train_manifests $TRAIN_MANIFESTS" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --lr=$LEARNING_RATE" +ARGS+=" --batch_size=$BATCH_SIZE" +ARGS+=" --val_batch_size=$VAL_BATCH_SIZE" +ARGS+=" --min_lr=1e-5" +ARGS+=" --lr_policy=$LR_POLICY" +ARGS+=" --lr_exp_gamma=$LR_EXP_GAMMA" +ARGS+=" --epochs=$EPOCHS" +ARGS+=" --warmup_epochs=$WARMUP_EPOCHS" +ARGS+=" --hold_epochs=$HOLD_EPOCHS" +ARGS+=" --epochs_this_job=$EPOCHS_THIS_JOB" +ARGS+=" --ema=$EMA" +ARGS+=" --seed=$SEED" +ARGS+=" --optimizer=$OPTIMIZER" +ARGS+=" --weight_decay=1e-3" +ARGS+=" --save_frequency=$SAVE_FREQUENCY" +ARGS+=" --keep_milestones 50 100 150 200" +ARGS+=" --save_best_from=80" +ARGS+=" --log_frequency=1" +ARGS+=" --val_frequency=$VAL_FREQUENCY" +ARGS+=" --prediction_frequency=$PREDICTION_FREQUENCY" +ARGS+=" --grad_accumulation_steps=$GRAD_ACCUMULATION_STEPS " +ARGS+=" --dali_device=$DALI_DEVICE" + +[ "$AMP" = true ] && ARGS+=" --amp" +[ "$RESUME" = true ] && ARGS+=" --resume" +[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark" +[ -n "$CHECKPOINT" ] && ARGS+=" --ckpt=$CHECKPOINT" + +DISTRIBUTED=${DISTRIBUTED:-"-m torch.distributed.launch --nproc_per_node=$NUM_GPUS"} + +[ "$PDB" = true ] && DISTRIBUTED="-m ipdb" + +python ${DISTRIBUTED} train.py ${ARGS} diff --git a/benchmarks/rnnt/ootb/train/scripts/train_refactor.sh b/benchmarks/rnnt/ootb/train/scripts/train_refactor.sh new file mode 100644 index 0000000..58b5a4f --- /dev/null +++ b/benchmarks/rnnt/ootb/train/scripts/train_refactor.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo -e "\nNVIDIA container build: ${NVIDIA_BUILD_ID}\n" + +export OMP_NUM_THREADS=1 + +DATA_DIR=${DATA_DIR:-"/datasets/LibriSpeech"} +MODEL_CONFIG=${MODEL_CONFIG:-"configs/rnnt.yaml"} +OUTPUT_DIR=${OUTPUT_DIR:-"/results"} +CHECKPOINT=${CHECKPOINT:-""} +CREATE_LOGFILE=${CREATE_LOGFILE:-"true"} +CUDNN_BENCHMARK=${CUDNN_BENCHMARK:-"true"} +NUM_GPUS=${NUM_GPUS:-8} +AMP=${AMP:-"true"} +EPOCHS=${EPOCHS:-100} +WARMUP_EPOCHS=${WARMUP_EPOCHS:-6} # 8000 steps with 1x8x24 should be ~5.6 epochs +HOLD_EPOCHS=${HOLD_EPOCHS:-0} +SEED=${SEED:-1} +BATCH_SIZE=${BATCH_SIZE:-24} +VAL_BATCH_SIZE=${VAL_BATCH_SIZE:-2} +OPTIMIZER=${OPTIMIZER:-"adamw"} +LEARNING_RATE=${LEARNING_RATE:-"0.001"} +LR_POLICY=${LR_POLICY:-"legacy"} +# LR_EXP_GAMMA=${LR_EXP_GAMMA:-0.981} +GRADIENT_ACCUMULATION_STEPS=${GRADIENT_ACCUMULATION_STEPS:-2} +EMA=${EMA:-0.0} # XXX +SAVE_FREQUENCY=${SAVE_FREQUENCY:-10} +EPOCHS_THIS_JOB=${EPOCHS_THIS_JOB:-0} +RESUME=${RESUME:-"true"} +DALI_DEVICE=${DALI_DEVICE:-"none"} + +mkdir -p "$OUTPUT_DIR" + +ARGS=" --batch_size=$BATCH_SIZE" +ARGS+=" --val_batch_size=$VAL_BATCH_SIZE" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --lr=$LEARNING_RATE" +ARGS+=" --min_lr=1e-5" +ARGS+=" --lr_policy=$LR_POLICY" +# ARGS+=" --lr_exp_gamma=$LR_EXP_GAMMA" +ARGS+=" --epochs=$EPOCHS" +ARGS+=" --warmup_epochs=$WARMUP_EPOCHS" +ARGS+=" --hold_epochs=$HOLD_EPOCHS" +ARGS+=" --epochs_this_job=$EPOCHS_THIS_JOB" +ARGS+=" --ema=$EMA" +ARGS+=" --seed=$SEED" +ARGS+=" --optimizer=$OPTIMIZER" +ARGS+=" --dataset_dir=$DATA_DIR" +ARGS+=" --val_manifest=$DATA_DIR/librispeech-dev-clean-wav.json" +# ARGS+=" --train_manifest=$DATA_DIR/librispeech-dev-clean-wav.json" # XXX +ARGS+=" --train_manifest=$DATA_DIR/librispeech-train-clean-100-wav.json" +# ARGS+=",$DATA_DIR/librispeech-train-clean-360-wav.json" +# ARGS+=",$DATA_DIR/librispeech-train-other-500-wav.json" +ARGS+=" --weight_decay=1e-3" +ARGS+=" --save_frequency=$SAVE_FREQUENCY" +ARGS+=" --eval_frequency=1000" # XXX =100 +ARGS+=" --train_frequency=1" +ARGS+=" --print_prediction_frequency=100" +ARGS+=" --gradient_accumulation_steps=$GRADIENT_ACCUMULATION_STEPS " +ARGS+=" --dali_device=$DALI_DEVICE" +[ "$AMP" == "true" ] && \ +ARGS+=" --amp" +[ "$RESUME" == "true" ] && \ +ARGS+=" --resume" +[ "$CUDNN_BENCHMARK" = "true" ] && \ +ARGS+=" --cudnn_benchmark" +[ -n "$CHECKPOINT" ] && \ +ARGS+=" --ckpt=${CHECKPOINT}" + +python -m torch.distributed.launch --nproc_per_node=$NUM_GPUS train.py $ARGS diff --git a/benchmarks/rnnt/ootb/train/tests/Dockerfile b/benchmarks/rnnt/ootb/train/tests/Dockerfile new file mode 100644 index 0000000..926ca01 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/tests/Dockerfile @@ -0,0 +1,14 @@ +FROM nvcr.io/nvidia/pytorch:20.10-py3 + +COPY tests/requirements.txt . +RUN pip install --upgrade pip && \ + pip install -r requirements.txt + +COPY requirements.txt . +RUN pip install -r requirements.txt + + + +WORKDIR /code + +CMD bash diff --git a/benchmarks/rnnt/ootb/train/tests/requirements.txt b/benchmarks/rnnt/ootb/train/tests/requirements.txt new file mode 100644 index 0000000..b54b1d6 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/tests/requirements.txt @@ -0,0 +1 @@ +pytest==6.1.2 diff --git a/benchmarks/rnnt/ootb/train/tests/rnnt/dataset/test_rnnt_wordpiece_tokenizer.py b/benchmarks/rnnt/ootb/train/tests/rnnt/dataset/test_rnnt_wordpiece_tokenizer.py new file mode 100644 index 0000000..eda794f --- /dev/null +++ b/benchmarks/rnnt/ootb/train/tests/rnnt/dataset/test_rnnt_wordpiece_tokenizer.py @@ -0,0 +1,52 @@ +import pytest + +from rnnt.dataset import RNNTWordpieceTokenizer + + +@pytest.fixture +def vocab_file(): + return 'wordpieces/bert-base-1000.txt' + + +@pytest.fixture +def tokenizer(vocab_file): + charset = "abcdefghijklmnopqrstuvwxyz '" + return RNNTWordpieceTokenizer(vocab_file, charset) + + +def test_labels(vocab_file, tokenizer): + with open(vocab_file) as f: + labels = f.read().splitlines() + + assert tokenizer.labels == labels + [''] + + +def test_blank_idex(vocab_file, tokenizer): + with open(vocab_file) as f: + lines = sum(1 for line in f) + + assert tokenizer.blank_index == lines + + +def test_empty_input(tokenizer): + assert tokenizer('') == [] + + +def detokenize(tokens): + output = ' '.join(tokens) + output = output.replace(' ##', '') + output = output.replace('##', '') + return output.strip() + + +def test_expected_input(tokenizer): + expected_input = 'expected input' + tokens = tokenizer(expected_input) + assert detokenize(tokens) == expected_input + + +def test_unexpected_input(tokenizer): + unexpected_input = 'unexpected input!!!' + tokens = tokenizer(unexpected_input) + assert detokenize(tokens) == 'unexpected ' + diff --git a/benchmarks/rnnt/ootb/train/train.py b/benchmarks/rnnt/ootb/train/train.py new file mode 100644 index 0000000..b866b3b --- /dev/null +++ b/benchmarks/rnnt/ootb/train/train.py @@ -0,0 +1,617 @@ +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import copy +import os +import random +import time +import sys + +import torch +import numpy as np +import torch.distributed as dist +from apex import amp +from apex.optimizers import FusedLAMB +from apex.parallel import DistributedDataParallel + +from common import helpers +from common.data.dali import sampler as dali_sampler +from common.data.dali.data_loader import DaliDataLoader +from common.data.text import Tokenizer +from common.data import features +from common.helpers import (Checkpointer, greedy_wer, num_weights, print_once, + process_evaluation_epoch) +from common.optimizers import lr_policy +from common.tb_dllogger import flush_log, init_log, log +from rnnt import config +from rnnt.decoder import RNNTGreedyDecoder +from rnnt.loss import RNNTLoss +from rnnt.model import RNNT + +from mlperf import logging + +# FB5 Logger +import pathlib +p = pathlib.Path(__file__).parent.resolve() / "../../../../fb5logging" +sys.path.append(os.fspath(p)) +from fb5logger import FB5Logger +import loggerconstants + + +def parse_args(): + parser = argparse.ArgumentParser(description='RNN-T Training Reference') + + training = parser.add_argument_group('training setup') + training.add_argument('--epochs', default=100, type=int, + help='Number of epochs for the entire training') + training.add_argument("--warmup_epochs", default=6, type=int, + help='Initial epochs of increasing learning rate') + training.add_argument("--hold_epochs", default=40, type=int, + help='Constant max learning rate epochs after warmup') + training.add_argument('--epochs_this_job', default=0, type=int, + help=('Run for a number of epochs with no effect on the lr schedule.' + 'Useful for re-starting the training.')) + training.add_argument('--cudnn_benchmark', action='store_true', default=True, + help='Enable cudnn benchmark') + training.add_argument('--amp', '--fp16', action='store_true', default=False, + help='Use mixed precision training') + training.add_argument('--seed', default=None, type=int, help='Random seed') + training.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0), type=int, + help='GPU id used for distributed training') + training.add_argument('--target', default=0.058, type=float, help='Target WER accuracy') + training.add_argument('--weights_init_scale', default=0.5, type=float, help='If set, overwrites value in config.') + training.add_argument('--hidden_hidden_bias_scale', type=float, help='If set, overwrites value in config.') + + optim = parser.add_argument_group('optimization setup') + optim.add_argument('--batch_size', default=128, type=int, + help='Effective batch size per GPU (might require grad accumulation') + optim.add_argument('--val_batch_size', default=2, type=int, + help='Evalution time batch size') + optim.add_argument('--lr', default=4e-3, type=float, + help='Peak learning rate') + optim.add_argument("--min_lr", default=1e-5, type=float, + help='minimum learning rate') + optim.add_argument("--lr_exp_gamma", default=0.935, type=float, + help='gamma factor for exponential lr scheduler') + optim.add_argument('--weight_decay', default=1e-3, type=float, + help='Weight decay for the optimizer') + optim.add_argument('--grad_accumulation_steps', default=8, type=int, + help='Number of accumulation steps') + optim.add_argument('--log_norm', action='store_true', + help='If enabled, gradient norms will be logged') + optim.add_argument('--clip_norm', default=1, type=float, + help='If provided, gradients will be clipped above this norm') + optim.add_argument('--beta1', default=0.9, type=float, help='Beta 1 for optimizer') + optim.add_argument('--beta2', default=0.999, type=float, help='Beta 2 for optimizer') + optim.add_argument('--ema', type=float, default=0.999, + help='Discount factor for exp averaging of model weights') + + io = parser.add_argument_group('feature and checkpointing setup') + io.add_argument('--dali_device', type=str, choices=['cpu', 'gpu'], + default='cpu', help='Use DALI pipeline for fast data processing') + io.add_argument('--resume', action='store_true', + help='Try to resume from last saved checkpoint.') + io.add_argument('--ckpt', default=None, type=str, + help='Path to a checkpoint for resuming training') + io.add_argument('--save_at_the_end', action='store_true', + help='Saves model checkpoint at the end of training') + io.add_argument('--save_frequency', default=None, type=int, + help='Checkpoint saving frequency in epochs') + io.add_argument('--keep_milestones', default=[], type=int, nargs='+', + help='Milestone checkpoints to keep from removing') + io.add_argument('--save_best_from', default=200, type=int, + help='Epoch on which to begin tracking best checkpoint (dev WER)') + io.add_argument('--val_frequency', default=1, type=int, + help='Number of epochs between evaluations on dev set') + io.add_argument('--log_frequency', default=25, type=int, + help='Number of steps between printing training stats') + io.add_argument('--prediction_frequency', default=None, type=int, + help='Number of steps between printing sample decodings') + io.add_argument('--model_config', default='configs/baseline_v3-1023sp.yaml', + type=str, required=True, + help='Path of the model configuration file') + io.add_argument('--num_buckets', type=int, default=6, + help='If provided, samples will be grouped by audio duration, ' + 'to this number of backets, for each bucket, ' + 'random samples are batched, and finally ' + 'all batches are randomly shuffled') + io.add_argument('--train_manifests', type=str, required=True, nargs='+', + help='Paths of the training dataset manifest file') + io.add_argument('--val_manifests', type=str, required=True, nargs='+', + help='Paths of the evaluation datasets manifest files') + io.add_argument('--max_duration', type=float, + help='Discard samples longer than max_duration') + io.add_argument('--dataset_dir', required=True, type=str, + help='Root dir of dataset') + io.add_argument('--output_dir', type=str, required=True, + help='Directory for logs and checkpoints') + io.add_argument('--log_file', type=str, default=None, + help='Path to save the training logfile.') + io.add_argument('--max_symbol_per_sample', type=int, default=None, + help='maximum number of symbols per sample can have during eval') + io.add_argument('--mlperf', action='store_true', help='Enable MLPerf Logging.') + + # FB5 Logging + io.add_argument("--fb5logger", type=str, default=None) + io.add_argument("--fb5config", type=str, default="small") + return parser.parse_args() + + +def apply_ema(model, ema_model, decay): + if not decay: + return + + sd = getattr(model, 'module', model).state_dict() + for k, v in ema_model.state_dict().items(): + v.copy_(decay * v + (1 - decay) * sd[k]) + + +@torch.no_grad() +def evaluate(epoch, step, val_loader, val_feat_proc, detokenize, + ema_model, loss_fn, greedy_decoder, use_amp, args): + + ema_model.eval() + + start_time = time.time() + agg = {'losses': [], 'preds': [], 'txts': [], 'idx': []} + if args.mlperf: + logging.log_start(logging.constants.EVAL_START, metadata=dict(epoch_num=epoch)) + for i, batch in enumerate(val_loader): + print(f'{val_loader.pipeline_type} evaluation: {i:>10}/{len(val_loader):<10}', end='\r') + + audio, audio_lens, txt, txt_lens = batch + + feats, feat_lens = val_feat_proc([audio, audio_lens]) + + log_probs, log_prob_lens = ema_model(feats, feat_lens, txt, txt_lens) + loss = loss_fn(log_probs[:, :log_prob_lens.max().item()], + log_prob_lens, txt, txt_lens) + + pred = greedy_decoder.decode(ema_model, feats, feat_lens) + + agg['losses'] += helpers.gather_losses([loss.cpu()]) + agg['preds'] += helpers.gather_predictions([pred], detokenize) + agg['txts'] += helpers.gather_transcripts([txt.cpu()], [txt_lens.cpu()], detokenize) + + wer, loss = process_evaluation_epoch(agg) + if args.mlperf: + logging.log_event(logging.constants.EVAL_ACCURACY, value=wer, metadata=dict(epoch_num=epoch)) + logging.log_end(logging.constants.EVAL_STOP, metadata=dict(epoch_num=epoch)) + + log((epoch,), step, 'dev_ema', {'loss': loss, 'wer': 100.0 * wer, 'took': time.time() - start_time}) + ema_model.train() + return wer + + +def main(): + + args = parse_args() + + if args.mlperf: + logging.configure_logger('RNNT') + logging.log_start(logging.constants.INIT_START) + + if args.fb5logger is not None: + fb5logger = FB5Logger(args.fb5logger) + fb5logger.header("RNN-T", "OOTB", "train", args.fb5config, score_metric=loggerconstants.EXPS) + + assert(torch.cuda.is_available()) + assert args.prediction_frequency is None or args.prediction_frequency % args.log_frequency == 0 + + torch.backends.cudnn.benchmark = args.cudnn_benchmark + + # set up distributed training + multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1 + if multi_gpu: + torch.cuda.set_device(args.local_rank) + dist.init_process_group(backend='nccl', init_method='env://') + world_size = dist.get_world_size() + print_once(f'Distributed training with {world_size} GPUs\n') + else: + world_size = 1 + + if args.seed is not None: + if args.mlperf: + logging.log_event(logging.constants.SEED, value=args.seed) + torch.manual_seed(args.seed + args.local_rank) + np.random.seed(args.seed + args.local_rank) + random.seed(args.seed + args.local_rank) + # np_rng is used for buckets generation, and needs the same seed on every worker + np_rng = np.random.default_rng(seed=args.seed) + + init_log(args) + + cfg = config.load(args.model_config) + config.apply_duration_flags(cfg, args.max_duration) + + assert args.grad_accumulation_steps >= 1 + assert args.batch_size % args.grad_accumulation_steps == 0, \ + f'{args.batch_size} % {args.grad_accumulation_steps} != 0' + + batch_size = args.batch_size // args.grad_accumulation_steps + if args.mlperf: + logging.log_event(logging.constants.GRADIENT_ACCUMULATION_STEPS, value=args.grad_accumulation_steps) + logging.log_event(logging.constants.SUBMISSION_BENCHMARK, value=logging.constants.RNNT) + logging.log_event(logging.constants.SUBMISSION_ORG, value='my-organization') + logging.log_event(logging.constants.SUBMISSION_DIVISION, value=logging.constants.CLOSED) # closed or open + logging.log_event(logging.constants.SUBMISSION_STATUS, value=logging.constants.ONPREM) # on-prem/cloud/research + logging.log_event(logging.constants.SUBMISSION_PLATFORM, value='my platform') + logging.log_end(logging.constants.INIT_STOP) + + if multi_gpu: + torch.distributed.barrier() + if args.mlperf: + logging.log_start(logging.constants.RUN_START) + if multi_gpu: + torch.distributed.barrier() + + print_once('Setting up datasets...') + ( + train_dataset_kw, + train_features_kw, + train_splicing_kw, + train_specaugm_kw, + ) = config.input(cfg, 'train') + ( + val_dataset_kw, + val_features_kw, + val_splicing_kw, + val_specaugm_kw, + ) = config.input(cfg, 'val') + if args.mlperf: + logging.log_event(logging.constants.DATA_TRAIN_MAX_DURATION, + value=train_dataset_kw['max_duration']) + logging.log_event(logging.constants.DATA_SPEED_PERTURBATON_MAX, + value=train_dataset_kw['speed_perturbation']['max_rate']) + logging.log_event(logging.constants.DATA_SPEED_PERTURBATON_MIN, + value=train_dataset_kw['speed_perturbation']['min_rate']) + logging.log_event(logging.constants.DATA_SPEC_AUGMENT_FREQ_N, + value=train_specaugm_kw['freq_masks']) + logging.log_event(logging.constants.DATA_SPEC_AUGMENT_FREQ_MIN, + value=train_specaugm_kw['min_freq']) + logging.log_event(logging.constants.DATA_SPEC_AUGMENT_FREQ_MAX, + value=train_specaugm_kw['max_freq']) + logging.log_event(logging.constants.DATA_SPEC_AUGMENT_TIME_N, + value=train_specaugm_kw['time_masks']) + logging.log_event(logging.constants.DATA_SPEC_AUGMENT_TIME_MIN, + value=train_specaugm_kw['min_time']) + logging.log_event(logging.constants.DATA_SPEC_AUGMENT_TIME_MAX, + value=train_specaugm_kw['max_time']) + logging.log_event(logging.constants.GLOBAL_BATCH_SIZE, + value=batch_size * world_size * args.grad_accumulation_steps) + + tokenizer_kw = config.tokenizer(cfg) + tokenizer = Tokenizer(**tokenizer_kw) + + class PermuteAudio(torch.nn.Module): + def forward(self, x): + return (x[0].permute(2, 0, 1), *x[1:]) + + train_augmentations = torch.nn.Sequential( + train_specaugm_kw and features.SpecAugment(optim_level=args.amp, **train_specaugm_kw) or torch.nn.Identity(), + features.FrameSplicing(optim_level=args.amp, **train_splicing_kw), + PermuteAudio(), + ) + val_augmentations = torch.nn.Sequential( + val_specaugm_kw and features.SpecAugment(optim_level=args.amp, **val_specaugm_kw) or torch.nn.Identity(), + features.FrameSplicing(optim_level=args.amp, **val_splicing_kw), + PermuteAudio(), + ) + + if args.mlperf: + logging.log_event(logging.constants.DATA_TRAIN_NUM_BUCKETS, value=args.num_buckets) + + if args.num_buckets is not None: + sampler = dali_sampler.BucketingSampler( + args.num_buckets, + batch_size, + world_size, + args.epochs, + np_rng + ) + else: + sampler = dali_sampler.SimpleSampler() + + train_loader = DaliDataLoader(gpu_id=args.local_rank, + dataset_path=args.dataset_dir, + config_data=train_dataset_kw, + config_features=train_features_kw, + json_names=args.train_manifests, + batch_size=batch_size, + sampler=sampler, + grad_accumulation_steps=args.grad_accumulation_steps, + pipeline_type="train", + device_type=args.dali_device, + tokenizer=tokenizer) + + val_loader = DaliDataLoader(gpu_id=args.local_rank, + dataset_path=args.dataset_dir, + config_data=val_dataset_kw, + config_features=val_features_kw, + json_names=args.val_manifests, + batch_size=args.val_batch_size, + sampler=dali_sampler.SimpleSampler(), + pipeline_type="val", + device_type=args.dali_device, + tokenizer=tokenizer) + + train_feat_proc = train_augmentations + val_feat_proc = val_augmentations + + train_feat_proc.cuda() + val_feat_proc.cuda() + + steps_per_epoch = len(train_loader) // args.grad_accumulation_steps + + if args.mlperf: + logging.log_event(logging.constants.TRAIN_SAMPLES, value=train_loader.dataset_size) + logging.log_event(logging.constants.EVAL_SAMPLES, value=val_loader.dataset_size) + + # set up the model + rnnt_config = config.rnnt(cfg) + rnnt_config['mlperf'] = args.mlperf + if args.mlperf: + logging.log_event(logging.constants.MODEL_WEIGHTS_INITIALIZATION_SCALE, value=args.weights_init_scale) + if args.weights_init_scale is not None: + rnnt_config['weights_init_scale'] = args.weights_init_scale + if args.hidden_hidden_bias_scale is not None: + rnnt_config['hidden_hidden_bias_scale'] = args.hidden_hidden_bias_scale + model = RNNT(n_classes=tokenizer.num_labels + 1, **rnnt_config) + model.cuda() + blank_idx = tokenizer.num_labels + loss_fn = RNNTLoss(blank_idx=blank_idx) + if args.mlperf: + logging.log_event(logging.constants.EVAL_MAX_PREDICTION_SYMBOLS, value=args.max_symbol_per_sample) + greedy_decoder = RNNTGreedyDecoder(blank_idx=blank_idx, + max_symbol_per_sample=args.max_symbol_per_sample) + + print_once(f'Model size: {num_weights(model) / 10**6:.1f}M params\n') + + opt_eps = 1e-9 + if args.mlperf: + logging.log_event(logging.constants.OPT_NAME, value='lamb') + logging.log_event(logging.constants.OPT_BASE_LR, value=args.lr) + logging.log_event(logging.constants.OPT_LAMB_EPSILON, value=opt_eps) + logging.log_event(logging.constants.OPT_LAMB_LR_DECAY_POLY_POWER, value=args.lr_exp_gamma) + logging.log_event(logging.constants.OPT_LR_WARMUP_EPOCHS, value=args.warmup_epochs) + logging.log_event(logging.constants.OPT_LAMB_LR_HOLD_EPOCHS, value=args.hold_epochs) + logging.log_event(logging.constants.OPT_LAMB_BETA_1, value=args.beta1) + logging.log_event(logging.constants.OPT_LAMB_BETA_2, value=args.beta2) + logging.log_event(logging.constants.OPT_GRADIENT_CLIP_NORM, value=args.clip_norm) + logging.log_event(logging.constants.OPT_LR_ALT_DECAY_FUNC, value=True) + logging.log_event(logging.constants.OPT_LR_ALT_WARMUP_FUNC, value=True) + logging.log_event(logging.constants.OPT_LAMB_LR_MIN, value=args.min_lr) + logging.log_event(logging.constants.OPT_WEIGHT_DECAY, value=args.weight_decay) + + # optimization + kw = {'params': model.param_groups(args.lr), 'lr': args.lr, + 'weight_decay': args.weight_decay} + + initial_lrs = [group['lr'] for group in kw['params']] + + print_once(f'Starting with LRs: {initial_lrs}') + optimizer = FusedLAMB(betas=(args.beta1, args.beta2), eps=opt_eps, max_grad_norm=args.clip_norm, **kw) + + adjust_lr = lambda step, epoch: lr_policy( + step, epoch, initial_lrs, optimizer, steps_per_epoch=steps_per_epoch, + warmup_epochs=args.warmup_epochs, hold_epochs=args.hold_epochs, + min_lr=args.min_lr, exp_gamma=args.lr_exp_gamma) + + if args.amp: + model, optimizer = amp.initialize( + models=model, + optimizers=optimizer, + opt_level='O1', + max_loss_scale=512.0) + + if args.ema > 0: + ema_model = copy.deepcopy(model).cuda() + else: + ema_model = None + if args.mlperf: + logging.log_event(logging.constants.MODEL_EVAL_EMA_FACTOR, value=args.ema) + + if multi_gpu: + model = DistributedDataParallel(model) + + # load checkpoint + meta = {'best_wer': 10**6, 'start_epoch': 0} + checkpointer = Checkpointer(args.output_dir, 'RNN-T', + args.keep_milestones, args.amp) + if args.resume: + args.ckpt = checkpointer.last_checkpoint() or args.ckpt + + if args.ckpt is not None: + checkpointer.load(args.ckpt, model, ema_model, optimizer, meta) + + start_epoch = meta['start_epoch'] + best_wer = meta['best_wer'] + last_wer = meta['best_wer'] + epoch = 1 + step = start_epoch * steps_per_epoch + 1 + + # FB5 Log for a certain amount of time. + if args.fb5logger is not None: + fb5logger.run_start() + total_batches = 0 + start_time = time.time() + MAX_TIME = 120.0 + # Start Batch Loop + + # training loop + model.train() + for epoch in range(start_epoch + 1, args.epochs + 1): + if args.mlperf: + logging.log_start(logging.constants.BLOCK_START, + metadata=dict(first_epoch_num=epoch, + epoch_count=1)) + logging.log_start(logging.constants.EPOCH_START, + metadata=dict(epoch_num=epoch)) + + epoch_utts = 0 + accumulated_batches = 0 + epoch_start_time = time.time() + + for batch in train_loader: + + if accumulated_batches == 0: + adjust_lr(step, epoch) + optimizer.zero_grad() + step_utts = 0 + step_start_time = time.time() + all_feat_lens = [] + + audio, audio_lens, txt, txt_lens = batch + + feats, feat_lens = train_feat_proc([audio, audio_lens]) + all_feat_lens += feat_lens + + log_probs, log_prob_lens = model(feats, feat_lens, txt, txt_lens) + loss = loss_fn(log_probs[:, :log_prob_lens.max().item()], + log_prob_lens, txt, txt_lens) + + loss /= args.grad_accumulation_steps + + del log_probs, log_prob_lens + + if torch.isnan(loss).any(): + print_once('WARNING: loss is NaN; skipping update') + else: + if args.amp: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + loss_item = loss.item() + del loss + step_utts += batch[0].size(0) * world_size + epoch_utts += batch[0].size(0) * world_size + accumulated_batches += 1 + total_batches += 1 + + if accumulated_batches % args.grad_accumulation_steps == 0: + + total_norm = 0.0 + + try: + if args.log_norm: + for p in getattr(model, 'module', model).parameters(): + param_norm = p.grad.data.norm(2) + total_norm += param_norm.item() ** 2 + total_norm = total_norm ** (1. / 2) + except AttributeError as e: + print_once(f'Exception happened: {e}') + total_norm = 0.0 + + optimizer.step() + apply_ema(model, ema_model, args.ema) + + if step % args.log_frequency == 0 or (time.time() - start_time) > MAX_TIME: + + if args.prediction_frequency is None or step % args.prediction_frequency == 0: + preds = greedy_decoder.decode(model, feats, feat_lens) + wer, pred_utt, ref = greedy_wer(preds, + txt, + txt_lens, + tokenizer.detokenize) + print_once(f' Decoded: {pred_utt[:90]}') + print_once(f' Reference: {ref[:90]}') + wer = {'wer': 100 * wer} + else: + wer = {} + + step_time = time.time() - step_start_time + + log((epoch, step % steps_per_epoch or steps_per_epoch, steps_per_epoch), + step, 'train', + {'loss': loss_item, + **wer, # optional entry + 'throughput': step_utts / step_time, + 'took': step_time, + 'grad-norm': total_norm, + 'seq-len-min': min(all_feat_lens).item(), + 'seq-len-max': max(all_feat_lens).item(), + 'lrate': optimizer.param_groups[0]['lr']}) + + # FB5 Logger + if (time.time() - start_time) > MAX_TIME: + break + + step_start_time = time.time() + + step += 1 + accumulated_batches = 0 + # end of step + if args.mlperf: + logging.log_end(logging.constants.EPOCH_STOP, + metadata=dict(epoch_num=epoch)) + + epoch_time = time.time() - epoch_start_time + log((epoch,), None, 'train_avg', {'throughput': epoch_utts / epoch_time, + 'took': epoch_time}) + + # FB5 Logger + if (time.time() - start_time) > MAX_TIME: + break + + if epoch % args.val_frequency == 0: + wer = evaluate(epoch, step, val_loader, val_feat_proc, + tokenizer.detokenize, ema_model, loss_fn, + greedy_decoder, args.amp, args) + + last_wer = wer + if wer < best_wer and epoch >= args.save_best_from: + checkpointer.save(model, ema_model, optimizer, epoch, + step, best_wer, is_best=True) + best_wer = wer + + save_this_epoch = (args.save_frequency is not None and epoch % args.save_frequency == 0) \ + or (epoch in args.keep_milestones) + if save_this_epoch: + checkpointer.save(model, ema_model, optimizer, epoch, step, best_wer) + if args.mlperf: + logging.log_end(logging.constants.BLOCK_STOP, metadata=dict(first_epoch_num=epoch)) + + if last_wer <= args.target: + if args.mlperf: + logging.log_end(logging.constants.RUN_STOP, metadata={'status': 'success'}) + if args.fb5logger is not None: + fb5logger.run_stop(total_batches, args.batch_size) + print_once(f'Finished after {args.epochs_this_job} epochs.') + break + if 0 < args.epochs_this_job <= epoch - start_epoch: + print_once(f'Finished after {args.epochs_this_job} epochs.') + break + # end of epoch + + log((), None, 'train_avg', {'throughput': epoch_utts / epoch_time}) + + if last_wer > args.target: + if args.mlperf: + logging.log_end(logging.constants.RUN_STOP, metadata={'status': 'aborted'}) + if args.fb5logger is not None: + fb5logger.run_stop(total_batches, args.batch_size) + + if epoch == args.epochs: + evaluate(epoch, step, val_loader, val_feat_proc, tokenizer.detokenize, + ema_model, loss_fn, greedy_decoder, args.amp, args) + + flush_log() + if args.save_at_the_end: + checkpointer.save(model, ema_model, optimizer, epoch, step, best_wer) + + +if __name__ == "__main__": + main() diff --git a/benchmarks/rnnt/ootb/train/utils/__init__.py b/benchmarks/rnnt/ootb/train/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/rnnt/ootb/train/utils/convert_librispeech.py b/benchmarks/rnnt/ootb/train/utils/convert_librispeech.py new file mode 100644 index 0000000..9149975 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/utils/convert_librispeech.py @@ -0,0 +1,81 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python +import argparse +import os +import glob +import multiprocessing +import json + +import pandas as pd + +from preprocessing_utils import parallel_preprocess + +parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.') +parser.add_argument('--input_dir', type=str, required=True, + help='LibriSpeech collection input dir') +parser.add_argument('--dest_dir', type=str, required=True, + help='Output dir') +parser.add_argument('--output_json', type=str, default='./', + help='name of the output json file.') +parser.add_argument('-s','--speed', type=float, nargs='*', + help='Speed perturbation ratio') +parser.add_argument('--target_sr', type=int, default=None, + help='Target sample rate. ' + 'defaults to the input sample rate') +parser.add_argument('--overwrite', action='store_true', + help='Overwrite file if exists') +parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(), + help='Number of threads to use when processing audio files') +args = parser.parse_args() + +args.input_dir = args.input_dir.rstrip('/') +args.dest_dir = args.dest_dir.rstrip('/') + +def build_input_arr(input_dir): + txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'), + recursive=True) + input_data = [] + for txt_file in txt_files: + rel_path = os.path.relpath(txt_file, input_dir) + with open(txt_file) as fp: + for line in fp: + fname, _, transcript = line.partition(' ') + input_data.append(dict(input_relpath=os.path.dirname(rel_path), + input_fname=fname+'.flac', + transcript=transcript)) + return input_data + + +print("[%s] Scaning input dir..." % args.output_json) +dataset = build_input_arr(input_dir=args.input_dir) + +print("[%s] Converting audio files..." % args.output_json) +dataset = parallel_preprocess(dataset=dataset, + input_dir=args.input_dir, + dest_dir=args.dest_dir, + target_sr=args.target_sr, + speed=args.speed, + overwrite=args.overwrite, + parallel=args.parallel) + +print("[%s] Generating json..." % args.output_json) +df = pd.DataFrame(dataset, dtype=object) + +# Save json with python. df.to_json() produces back slashed in file paths +dataset = df.to_dict(orient='records') +with open(args.output_json, 'w') as fp: + json.dump(dataset, fp, indent=2) diff --git a/benchmarks/rnnt/ootb/train/utils/download_librispeech.py b/benchmarks/rnnt/ootb/train/utils/download_librispeech.py new file mode 100644 index 0000000..ad36ad4 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/utils/download_librispeech.py @@ -0,0 +1,72 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python + +import os +import argparse +import pandas as pd + +from download_utils import download_file, md5_checksum, extract + +parser = argparse.ArgumentParser(description='Download, verify and extract dataset files') +parser.add_argument('csv', type=str, + help='CSV file with urls and checksums to download.') +parser.add_argument('dest', type=str, + help='Download destnation folder.') +parser.add_argument('-e', type=str, default=None, + help='Extraction destnation folder. Defaults to download folder if not provided') +parser.add_argument('--skip_download', action='store_true', + help='Skip downloading the files') +parser.add_argument('--skip_checksum', action='store_true', + help='Skip checksum') +parser.add_argument('--skip_extract', action='store_true', + help='Skip extracting files') +args = parser.parse_args() +args.e = args.e or args.dest + + +df = pd.read_csv(args.csv, delimiter=',') + + +if not args.skip_download: + for url in df.url: + fname = url.split('/')[-1] + print("Downloading %s:" % fname) + download_file(url=url, dest_folder=args.dest, fname=fname) +else: + print("Skipping file download") + + +if not args.skip_checksum: + for index, row in df.iterrows(): + url = row['url'] + md5 = row['md5'] + fname = url.split('/')[-1] + fpath = os.path.join(args.dest, fname) + print("Verifing %s: " % fname, end='') + ret = md5_checksum(fpath=fpath, target_hash=md5) + print("Passed" if ret else "Failed") +else: + print("Skipping checksum") + + +if not args.skip_extract: + for url in df.url: + fname = url.split('/')[-1] + fpath = os.path.join(args.dest, fname) + print("Decompressing %s:" % fpath) + extract(fpath=fpath, dest_folder=args.e) +else: + print("Skipping file extraction") diff --git a/benchmarks/rnnt/ootb/train/utils/download_utils.py b/benchmarks/rnnt/ootb/train/utils/download_utils.py new file mode 100644 index 0000000..e881388 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/utils/download_utils.py @@ -0,0 +1,68 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python + +import hashlib +import requests +import os +import tarfile +import tqdm + +def download_file(url, dest_folder, fname, overwrite=False): + fpath = os.path.join(dest_folder, fname) + if os.path.isfile(fpath): + if overwrite: + print("Overwriting existing file") + else: + print("File exists, skipping download.") + return + + tmp_fpath = fpath + '.tmp' + + r = requests.get(url, stream=True) + file_size = int(r.headers['Content-Length']) + chunk_size = 1024 * 1024 # 1MB + total_chunks = int(file_size / chunk_size) + + with open(tmp_fpath, 'wb') as fp: + content_iterator = r.iter_content(chunk_size=chunk_size) + chunks = tqdm.tqdm(content_iterator, total=total_chunks, + unit='MB', desc=fpath, leave=True) + for chunk in chunks: + fp.write(chunk) + + os.rename(tmp_fpath, fpath) + + +def md5_checksum(fpath, target_hash): + file_hash = hashlib.md5() + with open(fpath, "rb") as fp: + for chunk in iter(lambda: fp.read(1024*1024), b""): + file_hash.update(chunk) + return file_hash.hexdigest() == target_hash + + +def extract(fpath, dest_folder): + if fpath.endswith('.tar.gz'): + mode = 'r:gz' + elif fpath.endswith('.tar'): + mode = 'r:' + else: + raise IOError('fpath has unknown extention: %s' % fpath) + + with tarfile.open(fpath, mode) as tar: + members = tar.getmembers() + for member in tqdm.tqdm(iterable=members, total=len(members), leave=True): + tar.extract(path=dest_folder, member=member) diff --git a/benchmarks/rnnt/ootb/train/utils/inference_librispeech.csv b/benchmarks/rnnt/ootb/train/utils/inference_librispeech.csv new file mode 100644 index 0000000..40dac4e --- /dev/null +++ b/benchmarks/rnnt/ootb/train/utils/inference_librispeech.csv @@ -0,0 +1,5 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 +http://www.openslr.org/resources/12/dev-other.tar.gz,c8d0bcc9cca99d4f8b62fcc847357931 +http://www.openslr.org/resources/12/test-clean.tar.gz,32fa31d27d2e1cad72775fee3f4849a9 +http://www.openslr.org/resources/12/test-other.tar.gz,fb5a50374b501bb3bac4815ee91d3135 diff --git a/benchmarks/rnnt/ootb/train/utils/librispeech.csv b/benchmarks/rnnt/ootb/train/utils/librispeech.csv new file mode 100644 index 0000000..d48a9f8 --- /dev/null +++ b/benchmarks/rnnt/ootb/train/utils/librispeech.csv @@ -0,0 +1,8 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 +http://www.openslr.org/resources/12/dev-other.tar.gz,c8d0bcc9cca99d4f8b62fcc847357931 +http://www.openslr.org/resources/12/test-clean.tar.gz,32fa31d27d2e1cad72775fee3f4849a9 +http://www.openslr.org/resources/12/test-other.tar.gz,fb5a50374b501bb3bac4815ee91d3135 +http://www.openslr.org/resources/12/train-clean-100.tar.gz,2a93770f6d5c6c964bc36631d331a522 +http://www.openslr.org/resources/12/train-clean-360.tar.gz,c0e676e450a7ff2f54aeade5171606fa +http://www.openslr.org/resources/12/train-other-500.tar.gz,d1a0fd59409feb2c614ce4d30c387708 diff --git a/benchmarks/rnnt/ootb/train/utils/preprocessing_utils.py b/benchmarks/rnnt/ootb/train/utils/preprocessing_utils.py new file mode 100644 index 0000000..15605ce --- /dev/null +++ b/benchmarks/rnnt/ootb/train/utils/preprocessing_utils.py @@ -0,0 +1,76 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python +import os +import multiprocessing +import librosa +import functools + +import sox + + +from tqdm import tqdm + +def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None, + overwrite=True): + speed = speed or [] + speed.append(1) + speed = list(set(speed)) # Make uniqe + + input_fname = os.path.join(input_dir, + data['input_relpath'], + data['input_fname']) + input_sr = sox.file_info.sample_rate(input_fname) + target_sr = target_sr or input_sr + + os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True) + + output_dict = {} + output_dict['transcript'] = data['transcript'].lower().strip() + output_dict['files'] = [] + + fname = os.path.splitext(data['input_fname'])[0] + for s in speed: + output_fname = fname + '{}.wav'.format('' if s==1 else '-{}'.format(s)) + output_fpath = os.path.join(dest_dir, + data['input_relpath'], + output_fname) + + if not os.path.exists(output_fpath) or overwrite: + cbn = sox.Transformer().speed(factor=s).convert(target_sr) + cbn.build(input_fname, output_fpath) + + file_info = sox.file_info.info(output_fpath) + file_info['fname'] = os.path.join(os.path.basename(dest_dir), + data['input_relpath'], + output_fname) + file_info['speed'] = s + output_dict['files'].append(file_info) + + if s == 1: + file_info = sox.file_info.info(output_fpath) + output_dict['original_duration'] = file_info['duration'] + output_dict['original_num_samples'] = file_info['num_samples'] + + return output_dict + + +def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel): + with multiprocessing.Pool(parallel) as p: + func = functools.partial(preprocess, + input_dir=input_dir, dest_dir=dest_dir, + target_sr=target_sr, speed=speed, overwrite=overwrite) + dataset = list(tqdm(p.imap(func, dataset), total=len(dataset))) + return dataset diff --git a/benchmarks/run_all.sh b/benchmarks/run_all.sh new file mode 100755 index 0000000..cfb716f --- /dev/null +++ b/benchmarks/run_all.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Run all major benchmarks with tiny configs as an example +# Also used as a test script to make sure benchmarks run correctly. + +# DLRM OOTB +./run_dlrm_ootb_infer.sh -l results +./run_dlrm_ootb_train.sh -l results # ootb configs use config files. See docs/DLRM.md + +# DLRM UBench +./run_dlrm_ubench_train_linear.sh -c "[(2,2,2,2,2)]" -l results # Config not real +./run_dlrm_ubench_train_embeddingbag.sh -l results -c "[(2,2,2,2),(2,2,2,2),(2,2,2,2),(2,2,2,2),(2,2,2,2)]" # Config not real + +# XLMR OOTB +./run_xlmr_ootb.sh + +# view options: [raw_view -> pure json, intermediate_view -> nice table] +# intermediate view recommended for filling out table +python ../fb5logging/result_summarizer.py -f results -v intermediate_view \ No newline at end of file diff --git a/benchmarks/run_dlrm_ootb_infer.sh b/benchmarks/run_dlrm_ootb_infer.sh new file mode 100755 index 0000000..84d4841 --- /dev/null +++ b/benchmarks/run_dlrm_ootb_infer.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +printUsage() { + echo + echo "Usage: $(basename "$0") " + echo + echo "Options:" + echo " -h Prints this help." + echo " -l Saves FB5 Log to specified directory in first argument." + echo " -c Runs the command in the config file instead of the default config." + echo + return 0 +} + +if [ "$1" == "" ]; then + printUsage + exit 0 +fi + +# Default values +benchmark=dlrm +implementation=ootb +mode=eval +config=tiny # default is tiny, proof of concept +is_config=false + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + config=${OPTARG} ; is_config=true ;; + esac +done + +LOGGER_FILE="${LOG_DIR}/${benchmark}_${implementation}_${mode}_${config}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Config: ${config}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo +echo "Running Command:" + +if [ "$is_config" = true ]; then + config_flags=$(head -n 1 "${config}") + (set -x; python "${benchmark}/${implementation}/dlrm_s_pytorch.py" --inference-only ${config_flags} --fb5logger=${LOGGER_FILE} --fb5config=${config} 2>&1) +else + (set -x; python "${benchmark}/${implementation}/dlrm_s_pytorch.py" --inference-only --mini-batch-size=64 --test-mini-batch-size=64 --test-num-workers=0 --data-generation=random --arch-mlp-bot=512-512-64 --arch-mlp-top=1024-1024-1024-1 --arch-sparse-feature-size=64 --arch-embedding-size=1000000-1000000-1000000-1000000-1000000-1000000-1000000-1000000 --num-indices-per-lookup=100 --numpy-rand-seed=727 --num-batches=200 --print-freq=20 --print-time --fb5logger=${LOGGER_FILE} --fb5config=${config} 2>&1) +fi + +echo "=== Completed Run ===" diff --git a/benchmarks/run_dlrm_ootb_train.sh b/benchmarks/run_dlrm_ootb_train.sh new file mode 100755 index 0000000..fdd6d50 --- /dev/null +++ b/benchmarks/run_dlrm_ootb_train.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +printUsage() { + echo + echo "Usage: $(basename "$0") " + echo + echo "Options:" + echo " -h Prints this help." + echo " -l Saves FB5 Log to specified directory in first argument." + echo " -c Runs the command in the config file instead of the default config." + echo + return 0 +} + +if [ "$1" == "" ]; then + printUsage + exit 0 +fi + +# Default values +benchmark=dlrm +implementation=ootb +mode=train +config=tiny # default is tiny, proof of concept +is_config=false + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + config=${OPTARG} ; is_config=true ;; + esac +done + +LOGGER_FILE="${LOG_DIR}/${benchmark}_${implementation}_${mode}_${config}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Config: ${config}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo +echo "Running Command:" + +if [ "$is_config" = true ]; then + config_flags=$(head -n 1 "${config}") + (set -x; python "${benchmark}/${implementation}/dlrm_s_pytorch.py" ${config_flags} --fb5logger=${LOGGER_FILE} --fb5config=${config} 2>&1) +else + (set -x; python "${benchmark}/${implementation}/dlrm_s_pytorch.py" --mini-batch-size=64 --test-mini-batch-size=64 --test-num-workers=0 --data-generation=random --arch-mlp-bot=512-512-64 --arch-mlp-top=1024-1024-1024-1 --arch-sparse-feature-size=64 --arch-embedding-size=1000000-1000000-1000000-1000000-1000000-1000000-1000000-1000000 --num-indices-per-lookup=100 --numpy-rand-seed=727 --num-batches=200 --print-freq=20 --print-time --fb5logger=${LOGGER_FILE} --fb5config=${config} 2>&1) +fi + +echo "=== Completed Run ===" diff --git a/benchmarks/run_dlrm_ubench_train_allreduce.sh b/benchmarks/run_dlrm_ubench_train_allreduce.sh new file mode 100644 index 0000000..672cee1 --- /dev/null +++ b/benchmarks/run_dlrm_ubench_train_allreduce.sh @@ -0,0 +1,60 @@ +#!/bin/bash +printUsage() { + echo + echo "Usage: $(basename "$0") " + echo + echo "Options:" + echo " -h Prints this help." + echo " -l Saves FB5 Log to specified directory in first argument." + echo " -c Size in bytes" + echo + return 0 +} + +if [ "$1" == "" ]; then + printUsage + exit 0 +fi + +# Default values +benchmark=dlrm +implementation=ubench +mode=train +collective=allreduce +size=small + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + size=${OPTARG} ; size_specified=true ;; + esac +done + +size_name=${size} +if [ $size -eq 2200 ]; then + size_name=small +elif [ $size -eq 9944 ]; then + size_name=medium +elif [ $size -eq 22372 ]; then + size_name=large +fi + +LOGGER_FILE="${LOG_DIR}/${benchmark}_${implementation}_${mode}_${collective}_${size_name}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Collective: ${collective}" +echo "Size: ${size}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo "Running Command:" + +(set -x; python3 "${benchmark}/${implementation}/dlrm_ubench_comms_driver.py" --fb5logger=${LOGGER_FILE} --collective=all_reduce --size=${size} 2>&1) + +echo "=== Completed Run ===" diff --git a/benchmarks/run_dlrm_ubench_train_alltoall.sh b/benchmarks/run_dlrm_ubench_train_alltoall.sh new file mode 100755 index 0000000..53e9f48 --- /dev/null +++ b/benchmarks/run_dlrm_ubench_train_alltoall.sh @@ -0,0 +1,60 @@ +#!/bin/bash +printUsage() { + echo + echo "Usage: $(basename "$0") " + echo + echo "Options:" + echo " -h Prints this help." + echo " -l Saves FB5 Log to specified directory in first argument." + echo " -c Size in bytes" + echo + return 0 +} + +if [ "$1" == "" ]; then + printUsage + exit 0 +fi + +# Default values +benchmark=dlrm +implementation=ubench +mode=train +collective=alltoall +size=small + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + size=${OPTARG} ; size_specified=true ;; + esac +done + +size_name=${size} +if [ $size -eq 134000000 ]; then + size_name=small +elif [ $size -eq 244000000 ]; then + size_name=medium +elif [ $size -eq 544000000 ]; then + size_name=large +fi + +LOGGER_FILE="${LOG_DIR}/${benchmark}_${implementation}_${mode}_${collective}_${size_name}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Collective: ${collective}" +echo "Size: ${size}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo "Running Command:" + +(set -x; python3 "${benchmark}/${implementation}/dlrm_ubench_comms_driver.py" --fb5logger=${LOGGER_FILE} --collective=all_to_all --size=${size} 2>&1) + +echo "=== Completed Run ===" diff --git a/benchmarks/run_dlrm_ubench_train_embeddingbag.sh b/benchmarks/run_dlrm_ubench_train_embeddingbag.sh new file mode 100755 index 0000000..de37695 --- /dev/null +++ b/benchmarks/run_dlrm_ubench_train_embeddingbag.sh @@ -0,0 +1,36 @@ +steps=100 +device='cpu' +dataset='A' + +usage() { echo "Usage: $0 [-s ] [-d <'cpu'|'gpu'>] [-l ]"; exit 1; } + +while getopts "s:d:l:c:h" flag +do + case "${flag}" in + s) steps=${OPTARG};; + d) device=${OPTARG};; + l) LOG_DIR=${OPTARG} ;; + c) dataset=${OPTARG} ;; + h) usage + esac +done +shift $((OPTIND-1)) + +benchmark=dlrm +implementation=ubench +mode=train +config=embeddingbag_${dataset} +LOGGER_FILE="${LOG_DIR}/${benchmark}_${implementation}_${mode}_${config}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Config: ${config}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo +echo "Running Command:" + +(set -x; python dlrm/ubench/dlrm_ubench_train_driver.py --steps=$steps --device=$device --fb5logger=${LOGGER_FILE} emb --dataset="${dataset}" 2>&1) + +echo "=== Completed Run ===" diff --git a/benchmarks/run_dlrm_ubench_train_linear.sh b/benchmarks/run_dlrm_ubench_train_linear.sh new file mode 100755 index 0000000..9f230bb --- /dev/null +++ b/benchmarks/run_dlrm_ubench_train_linear.sh @@ -0,0 +1,36 @@ +steps=100 +device='cpu' +dataset='A' + +usage() { echo "Usage: $0 [-s ] [-d <'cpu'|'gpu'>] [-l ]"; exit 1; } + +while getopts "s:d:l:c:h" flag +do + case "${flag}" in + s) steps=${OPTARG};; + d) device=${OPTARG};; + l) LOG_DIR=${OPTARG} ;; + c) dataset=${OPTARG} ;; + h) usage + esac +done +shift $((OPTIND-1)) + +benchmark=dlrm +implementation=ubench +mode=train +config=linear_${dataset} +LOGGER_FILE="${LOG_DIR}/${benchmark}_${implementation}_${mode}_${config}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Config: ${config}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo +echo "Running Command:" + +(set -x; python dlrm/ubench/dlrm_ubench_train_driver.py --steps=$steps --device=$device --fb5logger=${LOGGER_FILE} linear --dataset="${dataset}" 2>&1) + +echo "=== Completed Run ===" diff --git a/benchmarks/run_rnnt_ootb_infer.sh b/benchmarks/run_rnnt_ootb_infer.sh new file mode 100755 index 0000000..66ed79e --- /dev/null +++ b/benchmarks/run_rnnt_ootb_infer.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +printUsage() { + echo + echo "Usage: $(basename "$0") " + echo + echo "Options:" + echo " -h Prints this help." + echo " -l Saves FB5 Log to specified directory in first argument." + echo " -c Runs the command in the config file instead of the default config." + echo + return 0 +} + +if [ "$1" == "" ]; then + printUsage + exit 0 +fi + +# Default values +benchmark=rnnt +implementation=ootb +mode=eval +config=tiny # default is tiny, proof of concept +is_config=false + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + config=${OPTARG} ; is_config=true ;; + esac +done + +# Resolve to absolute directory to support both relative and absolute dirs. +ABSOLUTE_LOG_DIR=`readlink -f ${LOG_DIR}` +LOGGER_FILE="${ABSOLUTE_LOG_DIR}/${benchmark}_${implementation}_${mode}_${config}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Config: ${config}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo +echo "Running Command:" + +if [[ -z "$DATASET_DIR" ]]; then + echo "ERROR: DATASET_DIR not set!" + exit 1 +fi +if [[ -z "$RESULT_DIR" ]]; then + echo "ERROR: RESULT_DIR not set!" + exit 1 +fi + +benchmark_root=${benchmark}/${implementation}/inference +if [ "$is_config" = true ]; then + config_flags=$(head -n 1 "${config}") + (set -x; python ${benchmark_root}/run.py --backend pytorch --dataset_dir ${DATASET_DIR}/LibriSpeech --manifest ${DATASET_DIR}/LibriSpeech/librispeech-dev-clean-wav.json --pytorch_config_toml ${benchmark_root}/pytorch/configs/rnnt.toml --pytorch_checkpoint ${RESULT_DIR}/rnnt.pt --scenario Offline --log_dir ${RESULT_DIR}/Offline_pytorch_rerun --fb5logger ${LOGGER_FILE} --fb5config ${config} ${config_flags} 2>&1) +else + (set -x; python ${benchmark_root}/run.py --backend pytorch --dataset_dir ${DATASET_DIR}/LibriSpeech --manifest ${DATASET_DIR}/LibriSpeech/librispeech-dev-clean-wav.json --pytorch_config_toml ${benchmark_root}/pytorch/configs/rnnt.toml --pytorch_checkpoint ${RESULT_DIR}/rnnt.pt --scenario Offline --log_dir ${RESULT_DIR}/Offline_pytorch_rerun --fb5logger ${LOGGER_FILE} --fb5config ${config} 2>&1) +fi + +echo "=== Completed Run ===" diff --git a/benchmarks/run_rnnt_ootb_train.sh b/benchmarks/run_rnnt_ootb_train.sh new file mode 100755 index 0000000..02f3400 --- /dev/null +++ b/benchmarks/run_rnnt_ootb_train.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +printUsage() { + echo + echo "Usage: $(basename "$0") " + echo + echo "Options:" + echo " -h Prints this help." + echo " -l Saves FB5 Log to specified directory in first argument." + echo " -c Runs the command in the config file instead of the default config." + echo + return 0 +} + +if [ "$1" == "" ]; then + printUsage + exit 0 +fi + +# Default values +benchmark=rnnt +implementation=ootb +mode=train +config=tiny # default is tiny, proof of concept +is_config=false + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + config=${OPTARG} ; is_config=true ;; + esac +done + +# Resolve to absolute directory to support both relative and absolute dirs. +ABSOLUTE_LOG_DIR=`readlink -f ${LOG_DIR}` +LOGGER_FILE="${ABSOLUTE_LOG_DIR}/${benchmark}_${implementation}_${mode}_${config}.log" + +echo "=== Launching FB5 ===" +echo "Benchmark: ${benchmark}" +echo "Implementation: ${implementation}" +echo "Mode: ${mode}" +echo "Config: ${config}" +echo "Saving FB5 Logger File: ${LOGGER_FILE}" +echo +echo "Running Command:" + +if [[ -z "$DATASET_DIR" ]]; then + echo "ERROR: DATASET_DIR not set!" + exit 1 +fi +if [[ -z "$RESULT_DIR" ]]; then + echo "ERROR: RESULT_DIR not set!" + exit 1 +fi + +if [ "$is_config" = true ]; then + config_flags=$(head -n 1 "${config}") + (set -x; bash ${benchmark}/${implementation}/${mode}/scripts/train.sh ${DATASET_DIR}/LibriSpeech ${benchmark}/${implementation}/${mode}/configs/baseline_v3-1023sp.yaml ${RESULT_DIR} ${LOGGER_FILE} ${config} ${config_flags} 2>&1) +else + (set -x; bash ${benchmark}/${implementation}/${mode}/scripts/train.sh ${DATASET_DIR}/LibriSpeech ${benchmark}/${implementation}/${mode}/configs/baseline_v3-1023sp.yaml ${RESULT_DIR} ${LOGGER_FILE} ${config} 2>&1) +fi + +echo "=== Completed Run ===" diff --git a/benchmarks/run_xlmr_ootb.sh b/benchmarks/run_xlmr_ootb.sh new file mode 100755 index 0000000..09c41a6 --- /dev/null +++ b/benchmarks/run_xlmr_ootb.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# fixed values +benchmark=xlmr +implementation=ootb + +# default values +config_name=default-config +nbatches=10 +batchsize=16 +seqlength=16 +vocabsize=250000 +LOG_DIR=results +config_flags="--inference-only --num-batches=${nbatches} --batch-size=${batchsize} --sequence-length=${seqlength} --vocab-size=${vocabsize} --famconfig=${config_name} --half-model" + +while getopts "hl:c:" flag ; +do + case "${flag}" in + h) + printUsage ; exit 0 ;; + l) + LOG_DIR=${OPTARG} ;; + c) + config_flags=${OPTARG} ;; + esac +done + +(set -x; python "${benchmark}/${implementation}/xlmr.py" ${config_flags} --logdir=${LOG_DIR}) diff --git a/benchmarks/setup_rnnt.sh b/benchmarks/setup_rnnt.sh new file mode 100644 index 0000000..c4e16fb --- /dev/null +++ b/benchmarks/setup_rnnt.sh @@ -0,0 +1,93 @@ +#/bin/bash + +set -euox pipefail + +# Check for two main directory exports required for RNN-T. +if [[ -z "$DATASET_DIR" ]]; then + echo "ERROR: DATASET_DIR not set! Please set using export DATASET_DIR=\"\"!" + exit 1 +fi +if [[ -z "$RESULT_DIR" ]]; then + echo "ERROR: RESULT_DIR not set! Please set using export RESULT_DIR=\"\"!" + exit 1 +fi + +# Setting up the conda environment +set +u +source "$($CONDA_EXE info --base)/etc/profile.d/conda.sh" +conda create -n proxy-rnnt python=3.8.3 +conda activate proxy-rnnt + +# Install PyTorch dependencies +pip install requests bs4 argparse +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch + +# Switch to CUDA 11.0 +if [ ! -d "deps/switch-cuda" ]; then + git clone https://github.com/phohenecker/switch-cuda.git deps/switch-cuda +fi +source deps/switch-cuda/switch-cuda.sh 11.0 +export TORCH_CUDA_ARCH_LIST=8.0 + +# Install required packages +sudo apt-get install sox libsndfile1 jq numactl cmake +pip install unidecode==1.1.1 inflect==4.1.0 pandas==1.1.5 sentencepiece==0.1.94 librosa==0.8.0 soundfile==0.10.3.post1 tensorboard==2.3.0 numba==0.48.0 + +# Install dllogger and mlcommons logger +pip install https://github.com/NVIDIA/dllogger/archive/26a0f8f1958de2c0c460925ff6102a4d2486d6cc.zip +pip install https://github.com/mlcommons/logging/archive/d08740cadb4188a5ebeb84ad6c68f98c1e129805.zip + +# Install Nvidia Dali +pip install --no-cache --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda110==0.28.0 + +# Install Warp-Transducer library +git clone https://github.com/HawkAaron/warp-transducer deps/warp-transducer +cd deps/warp-transducer/ +git checkout f546575109111c455354861a0567c8aa794208a2 +sed -i 's/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_30,code=sm_30 -O2")/#set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_30,code=sm_30 -O2")/g' CMakeLists.txt +sed -i 's/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75")/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80")/g' CMakeLists.txt +mkdir build +cd build/ +cmake .. +make -j32 +export CUDA_HOME="/usr/local/cuda" +export WARP_RNNT_PATH=`pwd` +export CUDA_TOOLKIT_ROOT_DIR="$CUDA_HOME" +export LD_LIBRARY_PATH="$CUDA_HOME/extras/CUPTI/lib64:$LD_LIBRARY_PATH" +export LIBRARY_PATH="$CUDA_HOME/lib64:$LIBRARY_PATH" +export LD_LIBRARY_PATH="$CUDA_HOME/lib64:$LD_LIBRARY_PATH" +export CFLAGS="-I$CUDA_HOME/include $CFLAGS" +cd ../pytorch_binding +python3 setup.py install +cd ../../.. + +# Install Nvidia CuDNN +conda install -c nvidia cudnn==8.0.4 + +# Install apex +pip install --global-option="--cpp_ext" --global-option="--cuda_ext" https://github.com/NVIDIA/apex/archive/8a1ed9e8d35dfad26fb973996319965e4224dcdd.zip + +# Other train deps +pip install pyyaml + +# Changes to source code, add DATASET_DIR to rnnt/ootb/train/configs/baseline_v3-1024sp.yaml +sed -i 's@sentpiece_model: /sentencepieces/librispeech1023.model@sentpiece_model: '"$DATASET_DIR"'/sentencepieces/librispeech1023.model@' rnnt/ootb/train/configs/baseline_v3-1023sp.yaml + +# Install MLPerf loadgen +pushd rnnt/ootb/inference/loadgen +python setup.py install +popd + +# Install other inference dependencies +pip install toml==0.10.0 +pip install tqdm==4.31.1 + +# Download the pre-trained model +mkdir -p $RESULT_DIR +wget https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 -O $RESULT_DIR/rnnt.pt + +# Download and Extract LibriSpeech Dataset +bash rnnt/ootb/train/scripts/download_librispeech.sh + +# Process the .flac files into .wav and .json +bash rnnt/ootb/train/scripts/preprocess_librispeech.sh diff --git a/benchmarks/xlmr/ootb/xlmr.py b/benchmarks/xlmr/ootb/xlmr.py new file mode 100644 index 0000000..8f85e8d --- /dev/null +++ b/benchmarks/xlmr/ootb/xlmr.py @@ -0,0 +1,137 @@ +import torch +import argparse +import sys +import time +import torch.nn.functional as F + +# FB5 Logger +import pathlib +from os import fspath +p = pathlib.Path(__file__).parent.resolve() / "../../../fb5logging" +sys.path.append(fspath(p)) +from fb5logger import FB5Logger + +# from fairseq.models.roberta import XLMRModel + +def time_ms(use_gpu): + """ + Return time. If gpu is available, synchronize. + """ + if use_gpu: + torch.cuda.synchronize() + return time.time_ns() * 1e-6 + +def get_model(): + # download from Internet + fairseq_xlmr_large = torch.hub.load('pytorch/fairseq:main', 'xlmr.large') + + # load model weights file locally + # f = '/path/xlmr.large' + # fairseq_xlmr_large = XLMRModel.from_pretrained(f, checkpoint_file='model.pt') + + # TODO use torchscript? jit/script this model? + return fairseq_xlmr_large.model + +def generate_ml_sample(batchsize=64, seq_length=64, vocab_size=250000, get_y_true=True): + shape = (batchsize, seq_length) + x = torch.rand(shape) * vocab_size + x = x.int() + if get_y_true: + y_true = torch.rand((batchsize, seq_length, 250002)) + return [x, y_true] + else: + return x + +def evaluate_simple(model, x_l, use_gpu=False, famlogger=None): + """ + Run data through the model + """ + for x in x_l: + famlogger.batch_start() + if use_gpu: + x = x.cuda() + y_pred = model(x) + famlogger.batch_stop(time_ms=time_ms(use_gpu)) + +def init_argparse() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Benchmark XLM-R model" + ) + parser.add_argument("--logdir", type=str, default=None) + parser.add_argument("--inference-only", action="store_true", default=False) + parser.add_argument("--famconfig", type=str, default="tiny") + parser.add_argument("--use-gpu", action="store_true", default=False) + parser.add_argument("--num-batches", type=int, default=10) + parser.add_argument("--batch-size", type=int, default=64) + parser.add_argument("--sequence-length", type=int, default=64) + parser.add_argument("--vocab-size", type=int, default=250000) + parser.add_argument("--half-model", action="store_true", default=False) + return parser + +def run(): + parser = init_argparse() + args = parser.parse_args() + + # check for device + if(args.use_gpu): + assert torch.cuda.is_available(), "No cuda device is available." + device = torch.device("cuda", 0) + + # prep logger + famlogger = None + if args.logdir is not None: + mode = "train" + if(args.inference_only): + mode = "eval" + + logpath = "{}/XLMR_OOTB_{}_{}.log".format(args.logdir, mode, args.famconfig) + famlogger = FB5Logger(logpath) + famlogger.header("XLMR", "OOTB", mode, args.famconfig) + + # prep model and data + xlmr = get_model() + if args.inference_only: + xlmr.eval() + if args.half_model: + xlmr.half() + + # use gpu + if args.use_gpu: + xlmr = xlmr.to(device) + + print("generating data") + if args.inference_only: + x_l = [generate_ml_sample(batchsize=args.batch_size, seq_length=args.sequence_length, vocab_size=args.vocab_size, get_y_true=False) for _ in range(args.num_batches)] + else: + x_l, y_true_l = zip(*[generate_ml_sample(batchsize=args.batch_size, seq_length=args.sequence_length, vocab_size=args.vocab_size) for _ in range(args.num_batches)]) + print("data generated") + + # benchmark! + if famlogger is not None: + famlogger.run_start(time_ms=time_ms(args.use_gpu)) + + if args.inference_only: + evaluate_simple(xlmr, x_l, use_gpu=args.use_gpu, famlogger=famlogger) + else: + #training loop + learning_rate = 0.01 + optimizer = torch.optim.SGD(xlmr.parameters(), lr=learning_rate) + for x, y_true in zip(x_l, y_true_l): + famlogger.batch_start() + if args.use_gpu: + x = x.to(device) + y_true = y_true.to(device) + y_pred = xlmr(x) + y_true = y_true.long() + loss = F.cross_entropy(y_pred[0], y_true[:,0,:]) # TODO: fix y_true data input hack + loss.backward() + optimizer.step() + optimizer.zero_grad() + famlogger.batch_stop(time_ms=time_ms(args.use_gpu)) + + if famlogger is not None: + famlogger.run_stop(0, 0, time_ms=time_ms(args.use_gpu)) + famlogger.record_batch_info(num_batches=len(x_l), batch_size=len(x_l[0])) + +if __name__ == "__main__": + run() diff --git a/benchmarks/xlmr/ootb/xlmr_extra.py b/benchmarks/xlmr/ootb/xlmr_extra.py new file mode 100644 index 0000000..09bfff8 --- /dev/null +++ b/benchmarks/xlmr/ootb/xlmr_extra.py @@ -0,0 +1,31 @@ +""" +Hold functions for xlmr todos +""" +from torchtext.datasets import PennTreebank + +def get_inference_data(): + test_dp = PennTreebank(split='test') + # TODO prepare this data properly + + return test_dp + +def evaluate(test_dp, model): + """ + evaluation loop for xlmr + """ + model.eval() + total_correct, total_count = 0.0, 0.0 + with torch.no_grad(): + for batch in test_dp: + print(batch) + model_input = batch["pad_token_ids"] # TODO .to(device). same for next line + target = torch.tensor(batch["labels"]) + logits = model(model_input) + correct = (logits.argmax(1) == target).sum() + total_correct+=float(correct) + total_count+=float(target.size(0)) + return total_correct/total_count + +def train(self, niter=1): + # TODO need the right loss, correct optimizer/learning rate, etc + pass \ No newline at end of file diff --git a/docs/DLRM.md b/docs/DLRM.md new file mode 100644 index 0000000..d95acf4 --- /dev/null +++ b/docs/DLRM.md @@ -0,0 +1,109 @@ +# Deep Learning Recommendation Model for Personalization and Recommendation Systems: + +Copyright (c) Facebook, Inc. and its affiliates. + +## Summary +Deep Learning Recommendation Model (DLRM) supports various flags to control the model characteristics and execution sizes. This document introduces a bash script to toggle between the configurations used for benchmarking. + +## Getting Started with DLRM +Here is an example initial run. Run the following commands in terminal. + +Starting from the top level of the repo, +``` +cd benchmarks +``` +Now we are at proxyworkloads/benchmarks + +Run one of the DLRM benchmarks. This script will log to the +directory using the -l flag. Here, log to results/. +``` +./run_dlrm_ootb_train.sh -l results +``` + +Create summary table and save to results/summary.txt +``` +python ../fb5logging/result_summarizer.py -f results +``` + +See and/or run proxyworkloads/benchmarks/run_all.sh for a runnable example. Please note that to run it, your current dir must be at proxyworkloads/benchmarks. + +### Additional DLRM Configurations +You may choose to run your own model configuration. To do so, create a config file containing all flags for `dlrm_s_pytorch.py` on a single line. For example, create a file called `dlrm_tutorial` with contents: + +``` +--mini-batch-size=64 --test-mini-batch-size=64 --test-num-workers=0 --num-batches=1000 --data-generation=random --arch-mlp-bot=512-512-64 --arch-mlp-top=1024-1024-1024-1 --arch-sparse-feature-size=64 --arch-embedding-size=1000000-1000000-1000000-1000000-1000000-1000000-1000000-1000000 --num-indices-per-lookup=100 --arch-interaction-op=dot --numpy-rand-seed=727 --print-freq=100 --print-time +``` + +Run the `run_dlrm_ootb_train.sh` script with the `-c` flag to specify which config file to use: + +``` +./run_dlrm_ootb_train.sh -l results -c dlrm_tutorial +``` + +In this example, you should see an output similar to this: + +``` +$ ./run_dlrm_ootb_train.sh -l results -c dlrm_tutorial +=== Launching FB5 === +Benchmark: dlrm +Implementation: ootb +Mode: train +Config: dlrm_tutorial +Saving FB5 Logger File: results/dlrm_ootb_train_dlrm_tutorial.log + +Running Command: ++ python dlrm/ootb/dlrm_s_pytorch.py --mini-batch-size=64 --test-mini-batch-size=64 --test-num-workers=0 --num-batches=1000 --data-generation=random --arch-mlp-bot=512-512-64 --arch-mlp-top=1024-1024-1024-1 --arch-sparse-feature-size=64 --arch-embedding-size=1000000-1000000-1000000-1000000-1000000-1000000-1000000-1000000 --num-indices-per-lookup=100 --arch-interaction-op=dot --numpy-rand-seed=727 --print-freq=100 --print-time +world size: 1, current rank: 0, local rank: 0 +Using CPU... +time/loss/accuracy (if enabled): +Finished training it 100/1000 of epoch 0, 56.60 ms/it, loss 0.084849 +Finished training it 200/1000 of epoch 0, 44.95 ms/it, loss 0.082306 +Finished training it 300/1000 of epoch 0, 45.26 ms/it, loss 0.083103 +Finished training it 400/1000 of epoch 0, 47.32 ms/it, loss 0.080760 +Finished training it 500/1000 of epoch 0, 46.90 ms/it, loss 0.084727 +Finished training it 600/1000 of epoch 0, 45.55 ms/it, loss 0.083395 +Finished training it 700/1000 of epoch 0, 47.67 ms/it, loss 0.084470 +Finished training it 800/1000 of epoch 0, 44.90 ms/it, loss 0.083775 +Finished training it 900/1000 of epoch 0, 46.24 ms/it, loss 0.082480 +Finished training it 1000/1000 of epoch 0, 46.44 ms/it, loss 0.082861 +=== Completed Run === +``` + +### Inference +You may also choose to run DLRM in Inference mode. To do so, follow the same steps as above using the `run_dlrm_ootb_infer.sh` script instead. + +## Requirements +pytorch-nightly + +scikit-learn + +numpy + +## Optional +### fbgemm_gpu +Install additional requirements: +``` +conda install jinja2 +conda install nvidiacub +``` +Set export paths: +``` +export CUDACXX=/usr/local/cuda/bin/nvcc +export CUB_DIR=${CUB_DIR} +``` +Clone repo: +``` +git clone https://github.com/pytorch/FBGEMM.git +cd FBGEMM/fbgemm_gpu +git submodule sync +git submodule update --init --recursive +``` +Run installer: +``` +python setup.py build develop +``` +Copy shared object file +``` +cp fbgemm_gpu_py.so //benchmarks +``` +Enable fbgemm_gpu by adding command line argument: --use-fbgemm-gpu diff --git a/docs/RNNT.md b/docs/RNNT.md new file mode 100644 index 0000000..a4ee672 --- /dev/null +++ b/docs/RNNT.md @@ -0,0 +1,139 @@ +# RNN-T from MLCommons w/ LibriSpeech Dataset + +## Requirements + + - CUDA 11.0 + - Microconda + - GPU device compatible with CUDA 11.0 + +## Set-up + +There are two options for set-up, either using the setup_rnnt.sh script or manually installing packages below. + +## Automatic Script +``` +cd benchmarks +bash setup_rnnt.sh +``` +## Manual Set-up + +## Training + +This document provides the detailed instructions to start training RNN-T models with Open-Source LibriSpeech Dataset. The repository can be found here: https://github.com/mlcommons/training/tree/master/rnn_speech_recognition/pytorch + +### Setting up the conda environment +``` +conda create -n proxy-rnnt python=3.8.3 +conda activate proxy-rnnt +pip install requests bs4 argparse +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch +``` + +### Switch to CUDA 11.0 +``` +git clone https://github.com/phohenecker/switch-cuda.git +source ~/cluster/work/switch-cuda/switch-cuda.sh 11.0 +export TORCH_CUDA_ARCH_LIST=8.0 +``` + +### Getting LibriSpeech ready +``` +# Install required packages +sudo apt-get install sox libsndfile1 jq numactl git cmake +pip install unidecode==1.1.1 inflect==4.1.0 pandas==1.1.5 sentencepiece==0.1.94 librosa==0.8.0 soundfile==0.10.3.post1 tensorboard==2.3.0 numba==0.48.0 + +# Set-up directories and exports +# Pick a mounted location that can hold up to 500GB of dataset data +export DATASET_DIR=/rnnt/datasets +export RESULT_DIR=/rnnt/results + +# Download and Extract LibriSpeech Dataset +bash rnnt/ootb/train/scripts/download_librispeech.sh + +# Process the .flac files into .wav and .json +bash rnnt/ootb/train/scripts/preprocess_librispeech.sh +``` + +### Getting Training running: +``` +# Install dllogger and mlcommons logger +pip install https://github.com/NVIDIA/dllogger/archive/26a0f8f1958de2c0c460925ff6102a4d2486d6cc.zip +pip install https://github.com/mlcommons/logging/archive/d08740cadb4188a5ebeb84ad6c68f98c1e129805.zip + +# Install Nvidia Dali +pip install --no-cache --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda110==0.28.0 + +# Install Warp-Transducer library +git clone https://github.com/HawkAaron/warp-transducer deps/warp-transducer +cd deps/warp-transducer/ +git checkout f546575109111c455354861a0567c8aa794208a2 +sed -i 's/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_30,code=sm_30 -O2")/#set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_30,code=sm_30 -O2")/g' CMakeLists.txt +sed -i 's/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75")/set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80")/g' CMakeLists.txt +mkdir build +cd build/ +cmake .. +make -j32 +export WARP_RNNT_PATH=`pwd` +export CUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME +export LD_LIBRARY_PATH="$CUDA_HOME/extras/CUPTI/lib64:$LD_LIBRARY_PATH" +export LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH +export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH +export CFLAGS="-I$CUDA_HOME/include $CFLAGS" +cd ../pytorch_binding +python3 setup.py install +rm -rf ../tests test ../tensorflow_binding +cd ../../.. + +# Install Nvidia CuDNN +conda install -c nvidia cudnn==8.0.4 + +# Install apex +pip install --global-option="--cpp_ext" --global-option="--cuda_ext" https://github.com/NVIDIA/apex/archive/8a1ed9e8d35dfad26fb973996319965e4224dcdd.zip + +# Other deps +pip install pyyaml +``` + +### Changes to Source Code: + +* Change configs/baseline_v3-1024sp.yaml’s tokenizer: sentpiece_model: to your $DATASET_DIR/sentencepieces/librispeech1023.model + +### Finally train with command: +``` +bash rnnt/ootb/train/scripts/train.sh $DATASET_DIR/LibriSpeech rnnt/ootb/train/configs/baseline_v3-1023sp.yaml $RESULT_DIR +``` +At this point, you should be able to see training epochs. + +## Inference + +This document provides the detailed instructions to run inference on a pre-trained model from MLCommons against the Open-Source LibriSpeech dataset. The repository can be found here: https://github.com/mlcommons/inference/tree/master/speech_recognition/rnnt + +### Installing dependencies + +Using the same conda environment as training: +``` +conda activate proxy-rnnt +``` + +Install MLPerf loadgen and additional packages: +``` +# Install MLPerf loadgen +pushd inference/loadgen +python setup.py install +popd + +# Install dependencies +pip install toml==0.10.0 +pip install tqdm==4.31.1 +``` + +Download the pre-trained model: +``` +wget https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 -O $RESULT_DIR/rnnt.pt +``` + +### Finally run inference with the command: +``` +python rnnt/ootb/inference/run.py --backend pytorch --dataset_dir $DATASET_DIR/LibriSpeech --manifest $DATASET_DIR/LibriSpeech/librispeech-dev-clean-wav.json --pytorch_config_toml rnnt/ootb/inference/pytorch/configs/rnnt.toml --pytorch_checkpoint $RESULT_DIR/rnnt.pt --scenario Offline --log_dir $RESULT_DIR/Offline_pytorch_rerun +``` +At this point, wait for inference to finish and produce results. diff --git a/docs/adding_benchmarks.md b/docs/adding_benchmarks.md new file mode 100644 index 0000000..27004e3 --- /dev/null +++ b/docs/adding_benchmarks.md @@ -0,0 +1,6 @@ +# Adding Benchmarks + +The key principles for adding benchmarks: +1. Each benchmark can be run indepdently +2. Do not assume because two benchmarks share a depdency that they will forever in the future +3. Each benchmark is a separate directory \ No newline at end of file diff --git a/docs/getting_started.md b/docs/getting_started.md new file mode 100644 index 0000000..0823e72 --- /dev/null +++ b/docs/getting_started.md @@ -0,0 +1,21 @@ +## Getting Started: DLRM Example +Here is an example initial run. Run the following commands in terminal. + +Starting from the top level of the repo, +``` +cd benchmarks +``` +Now we are at proxyworkloads/benchmarks + +Run one of the DLRM benchmarks. This script will log to the +directory using the -l flag. Here, log to results/. +``` +./run_dlrm_ootb_train.sh -l results +``` + +Create summary table and save to results/summary.txt +``` +python ../fb5logging/result_summarizer.py -f results +``` + +See and/or run proxyworkloads/benchmarks/run_all.sh for a runnable example. Please note that to run it, your current dir must be at proxyworkloads/benchmarks. \ No newline at end of file diff --git a/fb5logging/fb5logger.py b/fb5logging/fb5logger.py new file mode 100644 index 0000000..c2a8aec --- /dev/null +++ b/fb5logging/fb5logger.py @@ -0,0 +1,93 @@ +import logging +import os +import sys +import json +import time +import loggerconstants as constants + +# TODO: change name to FAMLogger +class FB5Logger(): + + def __init__(self, log_file_path): + # Create the directory if it doesn't exist. + log_file_dir = os.path.dirname(log_file_path) + if not os.path.exists(log_file_dir): + os.makedirs(log_file_dir) + + open(log_file_path, 'w') # create or overwrite file + self.log_file_path = log_file_path + + def _dump_json(self, d: dict): + with open(self.log_file_path, 'a') as f: + json.dump(d, f) + f.write('\n') + + def _time_ms(self): + """ + Naive implementation of current time. + """ + return time.time_ns() * 1e-6 + + def log_line(self, log_info : dict, key : str): + """ + Log a line with a dict of arbitrary form for the data and a string key. + """ + log_info['key'] = key + self._dump_json(log_info) + + def header(self, benchmark_name, implementation_name, mode, config_name, score_metric=constants.EXPS): + """ + Required for every log. Describes what the benchmark is. + """ + header_dict = { + "benchmark": benchmark_name, + "implementation": implementation_name, + "mode": mode, + "config": config_name, + "score_metric": score_metric} + self.log_line(header_dict, constants.HEADER) + + def run_start(self, time_ms = None): + """ + Records start of logging. + """ + if(time_ms is None): + time_ms = self._time_ms() + start_dict = {"time_ms": time_ms} + self.log_line(start_dict, constants.RUN_START) + + # TODO: remove batch info args and migrate to record_batch_info + def run_stop(self, num_batches, batch_size, extra_metadata = None, time_ms = None): + """ + Records end of logging and any required data. + """ + if(time_ms is None): + time_ms = self._time_ms() + stop_dict = {"time_ms": time_ms, "num_batches": num_batches, "batch_size": batch_size} + if extra_metadata is not None: + stop_dict["extra_metadata"] = extra_metadata + self.log_line(stop_dict, constants.RUN_STOP) + + def record_batch_info(self, num_batches = None, batch_size = None): + batch_size_dict = {"batch_size": batch_size} + self.log_line(batch_size_dict, constants.BATCH_SIZE) + nbatches_dict = {"num_batches": num_batches} + self.log_line(nbatches_dict, constants.NUM_BATCHES) + + def batch_start(self, time_ms = None): + """ + Marks beginning of the model processing a batch + """ + if(time_ms is None): + time_ms = self._time_ms() + batch_start_dict = {"time_ms": time_ms} + self.log_line(batch_start_dict, constants.BATCH_START) + + def batch_stop(self, time_ms = None, batch_size = None): + """ + Marks end of the model processing a batch + """ + if(time_ms is None): + time_ms = self._time_ms() + batch_stop_dict = {"time_ms": time_ms, "batch_size": batch_size} + self.log_line(batch_stop_dict, constants.BATCH_STOP) diff --git a/fb5logging/loggerconstants.py b/fb5logging/loggerconstants.py new file mode 100644 index 0000000..d39ccad --- /dev/null +++ b/fb5logging/loggerconstants.py @@ -0,0 +1,32 @@ +""" +Master list of constants for logger +Mostly logger keys, but some other constants as well. +""" + +# loggerkey - header +HEADER = "header" + +# loggerkey - timing info +EPOCH_START = "epoch_start" +EPOCH_STOP = "epoch_stop" +RUN_START = "run_start" +RUN_STOP = "run_stop" +BATCH_START = "batch_start" +BATCH_STOP = "batch_stop" + +# loggerkey - run information +NUM_BATCHES = "num_batches" +BATCH_SIZE = "batch_size" +FLOPS = "flops" + +# loggerkey - model hyperparameters +LEARNING_RATE = "learning_rate" + +# type of summary view saved to file +INTERMEDIATE_VIEW = "intermediate_view" # table view +RAW_VIEW = "raw_view" # json view + +# available types of score metrics +EXPS = "exps" # examples/sec (throughput) +TFPS = "tfps" # teraflops/sec (floating point ops rate) +GBPS = "gbps" # gb/sec diff --git a/fb5logging/result_summarizer.py b/fb5logging/result_summarizer.py new file mode 100644 index 0000000..5897202 --- /dev/null +++ b/fb5logging/result_summarizer.py @@ -0,0 +1,249 @@ +''' +Given a folder of .log files, outputs a summarization file of those logs in table form. +''' + +import argparse +import json +import os +import re +import sys +import glob +import math +import loggerconstants as constants + +## Utility +def _flatten_dict(d: dict): + """ + Flattens a nested dictionary, not in-place + """ + res = {} + for key, val in d.items(): + if isinstance(val, dict): + res.update(_flatten_dict(val)) + else: + res[key] = val + return res + +def _dump_json(d: dict, file_path: str): + with open(file_path, 'a') as f: + json.dump(d, f) + f.write('\n') + +def _lst_to_file(lst: list, file_path: str): + for i in range(len(lst)): + lst[i] = str(lst[i]) + delimiter = ' ' #space delimiter + with open(file_path, 'a') as f: + f.write(delimiter.join(lst) + '\n') + +def _find_and_read_row_multiple(log_str : str, key : str): + """ + Finds multiple rows in a log file string and converts it into list of dicts. + Gives in order of how it appears in the document. + """ + regex = r'.*"key": "{}".*'.format(key) + row_lst = re.findall(regex, log_str) + for i, row in enumerate(row_lst): + row_lst[i] = json.loads(row) + row_lst[i].pop('key') + return row_lst + +def _find_and_read_row(log_str : str, key : str, row_must_exist=True): + """ + Finds first matching row in a log file string and converts it into a dict. + """ + regex = r'.*"key": "{}".*'.format(key) + row = re.search(regex, log_str) + if row is None: + if(row_must_exist): + raise Exception('Failed to match regex: '.format(regex)) + else: + return None + row = json.loads(row.group(0)) + row.pop('key') + return row + +## Metrics + +def get_exps_metric(log_str : str): + """ + Given log file in form of loaded in-memory string, calculate + queries/second + """ + run_start_row = _find_and_read_row(log_str, constants.RUN_START) + run_stop_row = _find_and_read_row(log_str, constants.RUN_STOP) + + # calculate runtime + run_start_time = float(run_start_row['time_ms']) + run_stop_time = float(run_stop_row['time_ms']) + seconds_runtime = (run_stop_time - run_start_time) / 1000 + + # get num batches and batch size based on available log info + # batch info in run_stop row to be deprecated + batch_size_row = _find_and_read_row(log_str, constants.BATCH_SIZE, row_must_exist=False) + num_batches_row = _find_and_read_row(log_str, constants.NUM_BATCHES, row_must_exist=False) + if(num_batches_row is not None and batch_size_row is not None): + num_batches, batch_size = num_batches_row['num_batches'], batch_size_row['batch_size'] + else: + num_batches, batch_size = run_stop_row['num_batches'], run_stop_row['batch_size'] + + # calculate throughput + if(seconds_runtime == 0): + throughput = 'error: runtime is zero' + else: + throughput = num_batches * batch_size / seconds_runtime + average_batch_time = seconds_runtime / num_batches + + metrics_dict = {'score': throughput, 'units': "ex/s"} + return metrics_dict + +def get_tfps_metric(log_str): + """ + Given log file in form of loaded in-memory string, calculate + teraflops/second + """ + run_stop_row = _find_and_read_row(log_str, constants.RUN_STOP) + tfps = run_stop_row['extra_metadata']['TF/s'] + metrics_dict = {'score': tfps, 'units': "TF/s"} + return metrics_dict + +def get_gbps_metric(log_str): + """ + Given log file in form of loaded in-memory string, calculate + teraflops/second + """ + run_stop_row = _find_and_read_row(log_str, constants.RUN_STOP) + gbps = run_stop_row['extra_metadata']['GB/s'] + metrics_dict = {'score': gbps, 'units': "GB/s"} + return metrics_dict + +def _calculate_metrics(log_str : str, score_metric : str): + """ + Calculates metrics. Routes to different metrics functions based on the score_metric type. + Allowed score metrics live in loggerconstants.py + """ + + # route to correct score_metric, which gets score and units + if(score_metric == constants.EXPS): + metrics_dict = get_exps_metric(log_str) + elif(score_metric == constants.TFPS): + metrics_dict = get_tfps_metric(log_str) + elif(score_metric == constants.GBPS): + metrics_dict = get_gbps_metric(log_str) + else: + raise Exception("Score metric not available - should never get here") + return metrics_dict + +## Handle batches and batch latency + +def _calculate_batch_latency(log_str : str, percentile : float): + """ + Calculates batch latency at a given percentile in range [0, 1]. + """ + assert 0 <= percentile <= 1 + + batch_start_lst = _find_and_read_row_multiple(log_str, constants.BATCH_START) + batch_stop_lst = _find_and_read_row_multiple(log_str, constants.BATCH_STOP) + if(len(batch_start_lst) != len(batch_stop_lst)): + raise Exception('Number of batch starts does not match number of batch stops') + nbatches = len(batch_start_lst) + if(nbatches == 0): + return None + + batch_times = [] + for i in range(nbatches): + # calculate runtime + batch_start_time = float(batch_start_lst[i]['time_ms']) + batch_stop_time = float(batch_stop_lst[i]['time_ms']) + batch_runtime = (batch_stop_time - batch_start_time) / 1000 # seconds + batch_times.append(batch_runtime) + + batch_times.sort() + # default to slower latency if percentile doesn't exactly match a batch time + batch_idx = math.ceil(percentile * (nbatches - 1)) + batch_time_at_percentile = batch_times[batch_idx] + + return batch_time_at_percentile + +## Read and process log files + +def _create_summary_row(file_path : str): + """ + Takes a single file path. + Return JSON row. + """ + with open(file_path, 'r') as f: + log_file_str = f.read() + header = _find_and_read_row(log_file_str, constants.HEADER) + metrics = _calculate_metrics(log_file_str, header['score_metric']) + row = header + row['metrics'] = metrics + + # TODO: fix units and percentile for latency + batch_latency = _calculate_batch_latency(log_file_str, 0.95) + row['batch_latency_95_sec'] = batch_latency + + # TODO: allow encoding of extra metadata + + return row + +def _rows_to_file(rows: list, folder_path: str, summary_view=constants.INTERMEDIATE_VIEW): + """ + Save list of summary rows into a human-readable table in a file. + rows: list[dict] + """ + file_path = folder_path + '/summary.txt' + if(len(rows) == 0): + print('Nothing to summarize, no changes to summary file.') + return + open(file_path, 'w') # create or overwrite file + + if(summary_view == constants.INTERMEDIATE_VIEW): + top_level_keys = [ + "benchmark", + "implementation", + "mode", + "config", + "score", + "units", + "batch_latency_95_sec"] + _lst_to_file(top_level_keys, file_path) + for row in rows: + flattened_row = _flatten_dict(row) + top_val_lst = [flattened_row[k] for k in top_level_keys] + _lst_to_file(top_val_lst, file_path) + elif(summary_view == constants.RAW_VIEW): + for row in rows: + _dump_json(row, file_path) + else: + print('Summary view of wrong type - should never get here.') + +def summarize_results(benchmark_folder) -> list: + """ + Summarizes a set of results. + returns: list[dict] + """ + rows = [] + pattern = '{folder}/*.log'.format(folder=benchmark_folder) # TODO allow other kinds of files + result_files = glob.glob(pattern, recursive=True) + print('Summarizing files: {}'.format(result_files)) + for file_path in result_files: + row = _create_summary_row(file_path) + rows.append(row) + return rows + +## Parse and main + +def init_argparse() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Summarize a folder of logged benchmark result files." + ) + parser.add_argument('-f', '--benchmark-folder', type=str, default='.') + parser.add_argument('-v', '--summary-view', type=str, default=constants.INTERMEDIATE_VIEW) + return parser + +if __name__ == '__main__': + parser = init_argparse() + args = parser.parse_args() + rows = summarize_results(args.benchmark_folder) + _rows_to_file(rows, args.benchmark_folder, summary_view=args.summary_view) diff --git a/fb5logging/test_mllog.py b/fb5logging/test_mllog.py new file mode 100644 index 0000000..3eff2f4 --- /dev/null +++ b/fb5logging/test_mllog.py @@ -0,0 +1,16 @@ +from fb5logger import FB5Logger +import time + + +def dummy_example(): + """Example usage of fb5logger""" + + logger = FB5Logger("results/example_simple.log") # file to write to. works only with .log + logger.header("DLRM", "OOTB", "train", "small") # benchmark, implementation, mode, config + + logger.run_start() + time.sleep(1) # whatever benchmark here. + logger.run_stop(100, 32) # num_batches, batch_size + +if __name__ == "__main__": + dummy_example() \ No newline at end of file diff --git a/param b/param new file mode 160000 index 0000000..90bf767 --- /dev/null +++ b/param @@ -0,0 +1 @@ +Subproject commit 90bf7676a1a6e9f19d93bf2c123fb8447bf25222