From 8c679ebf92f548e53cbe2c9dd182c1ecbace31ca Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Thu, 25 Sep 2025 09:39:16 +0200 Subject: [PATCH 01/37] use relu multiplier by default, disable fitcompress by default --- src/pquant/configs/config_ap.yaml | 8 ++++---- src/pquant/configs/config_autosparse.yaml | 8 ++++---- src/pquant/configs/config_cs.yaml | 8 ++++---- src/pquant/configs/config_dst.yaml | 9 ++++----- src/pquant/configs/config_mdmm.yaml | 12 ++++++------ src/pquant/configs/config_pdp.yaml | 6 +++--- src/pquant/configs/config_wanda.yaml | 6 +++--- 7 files changed, 28 insertions(+), 29 deletions(-) diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index 015018e..17c9c02 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -16,17 +16,17 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.10 optimize_pruning : false - greedy_astar : true - approximate : true + greedy_astar : true + approximate : true lambda : 1 training_parameters: epochs: 100 diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 4d34e64..2e7854b 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -19,17 +19,17 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.10 optimize_pruning : false - greedy_astar : true - approximate : true + greedy_astar : true + approximate : true lambda : 1 training_parameters: epochs: 100 diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index cf05cef..ae26211 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -15,17 +15,17 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.,1.] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.10 optimize_pruning : false - greedy_astar : true - approximate : true + greedy_astar : true + approximate : true lambda : 1 training_parameters: epochs: 85 diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index 79b9d65..f9be222 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -17,18 +17,17 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false - fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.,1.] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.10 optimize_pruning : false - greedy_astar : true - approximate : true + greedy_astar : true + approximate : true lambda : 1 training_parameters: diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index c7341b7..70bbc83 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -4,7 +4,7 @@ pruning_parameters: pruning_method: mdmm enable_pruning: true disable_pruning_for_layers: - - + - constraint_type: "Equality" target_value: 0.0 metric_type: "UnstructuredSparsity" @@ -13,7 +13,7 @@ pruning_parameters: epsilon: 1.0e-03 scale: 10.0 damping: 1.0 - use_grad: false + use_grad: false l0_mode: "coarse" # 'coarse' or 'smooth' quantization_parameters: @@ -25,7 +25,7 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false training_parameters: epochs: 200 @@ -36,14 +36,14 @@ training_parameters: rounds: 1 save_weights_epoch: -1 fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.10 optimize_pruning : false greedy_astar : true - approximate : true + approximate : true lambda : 1 batch_size: 64 cosine_tmax: 200 @@ -59,6 +59,6 @@ momentum: 0.9 optimizer: sgd plot_frequency: 100 -# Note: +# Note: # use_grad: true is having some bug... flip gradient impl not working as intended # Confirmed: grad for both 'coarse' or 'smooth' are non None diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index e85018a..b9a8290 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -17,17 +17,17 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.,1.] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.10 optimize_pruning : true greedy_astar : true - approximate : true + approximate : true lambda : 1 training_parameters: epochs: 100 diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index 4ee2097..3707b03 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -19,17 +19,17 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false + use_relu_multiplier: true use_symmetric_quantization: false fitcompress_parameters: - enable_fitcompress : true + enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.,1.] pruning_schedule : {start : 0, end : -3, steps : 40} compression_goal : 0.04 optimize_pruning : true greedy_astar : true - approximate : true + approximate : true lambda : 0.5 training_parameters: epochs: 200 From 38af523d9911be05c7747df57cf9838d48e46896 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Tue, 28 Oct 2025 17:24:41 +0200 Subject: [PATCH 02/37] Add hyperparameter optimisation - Add hyperparameter optimisation for the fine-tuning interface - Modified config access from dictionary to object with the help of Pydantic --- examples/example_finetuning.ipynb | 920 +++++ pyproject.toml | 5 +- src/pquant/configs/config_ap.yaml | 2 +- src/pquant/configs/config_autosparse.yaml | 2 +- src/pquant/configs/config_cs.yaml | 2 +- src/pquant/configs/config_dst.yaml | 3 +- src/pquant/configs/config_mdmm.yaml | 2 +- src/pquant/configs/config_pdp.yaml | 2 +- src/pquant/configs/config_wanda.yaml | 2 +- src/pquant/configs/finetuning.yaml | 59 + src/pquant/core/activations_quantizer.py | 59 +- src/pquant/core/compressed_layers.py | 8 +- src/pquant/core/constants.py | 77 + src/pquant/core/finetuning.py | 338 ++ .../core/tf_impl/compressed_layers_tf.py | 79 +- src/pquant/core/tf_impl/train_tf.py | 30 +- .../torch_impl/compressed_layers_torch.py | 149 +- src/pquant/core/torch_impl/fit_compress.py | 3396 +++++++++-------- src/pquant/core/torch_impl/train_torch.py | 30 +- src/pquant/core/utils.py | 12 +- src/pquant/data_models/finetuning_model.py | 18 + src/pquant/data_models/fitcompress_model.py | 21 + src/pquant/data_models/pruning_model.py | 84 + src/pquant/data_models/quantization_model.py | 16 + src/pquant/data_models/training_model.py | 27 + .../pruning_methods/activation_pruning.py | 20 +- src/pquant/pruning_methods/autosparse.py | 33 +- .../pruning_methods/constraint_functions.py | 132 + src/pquant/pruning_methods/cs.py | 27 +- src/pquant/pruning_methods/dst.py | 29 +- src/pquant/pruning_methods/mdmm.py | 297 +- .../pruning_methods/metric_functions.py | 85 + src/pquant/pruning_methods/pdp.py | 31 +- src/pquant/pruning_methods/wanda.py | 27 +- tests/test_ap.py | 1 + tests/test_keras_compression_layers.py | 83 +- tests/test_pdp.py | 1 + tests/test_torch_compression_layers.py | 73 +- tests/test_wanda.py | 1 + 39 files changed, 3993 insertions(+), 2190 deletions(-) create mode 100644 examples/example_finetuning.ipynb create mode 100644 src/pquant/configs/finetuning.yaml create mode 100644 src/pquant/core/constants.py create mode 100644 src/pquant/core/finetuning.py create mode 100644 src/pquant/data_models/finetuning_model.py create mode 100644 src/pquant/data_models/fitcompress_model.py create mode 100644 src/pquant/data_models/pruning_model.py create mode 100644 src/pquant/data_models/quantization_model.py create mode 100644 src/pquant/data_models/training_model.py create mode 100644 src/pquant/pruning_methods/constraint_functions.py create mode 100644 src/pquant/pruning_methods/metric_functions.py diff --git a/examples/example_finetuning.ipynb b/examples/example_finetuning.ipynb new file mode 100644 index 0000000..e713e80 --- /dev/null +++ b/examples/example_finetuning.ipynb @@ -0,0 +1,920 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1ca31c67", + "metadata": {}, + "source": [ + "# Fine‑Tuning guide (Optuna + pruning/quantization)\n", + "\n", + "This notebook is a **hands‑on guide** for running hyperparameter fine‑tuning with Optuna, integrating pruning and quantization.\n", + "\n", + "It covers:\n", + "1. Experiment setup & reproducibility\n", + "2. Search space definitions\n", + "3. Optuna sampler selection\n", + "4. Objective functions\n", + "5. Running studies\n", + "6. Visualising results\n" + ] + }, + { + "cell_type": "markdown", + "id": "4acd3cb4", + "metadata": {}, + "source": [ + "## Environment & installs\n", + "\n", + "Uncomment and run if you need to install dependencies in this environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23eab5d1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: optuna in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (4.3.0)\n", + "Requirement already satisfied: alembic>=1.5.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (1.15.2)\n", + "Requirement already satisfied: colorlog in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (6.9.0)\n", + "Requirement already satisfied: tqdm in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (4.67.1)\n", + "Requirement already satisfied: numpy in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (1.26.4)\n", + "Requirement already satisfied: sqlalchemy>=1.4.2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (2.0.40)\n", + "Requirement already satisfied: packaging>=20.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (23.2)\n", + "Requirement already satisfied: PyYAML in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from optuna) (6.0.2)\n", + "Requirement already satisfied: Mako in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from alembic>=1.5.0->optuna) (1.3.10)\n", + "Requirement already satisfied: typing-extensions>=4.12 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from alembic>=1.5.0->optuna) (4.13.2)\n", + "Requirement already satisfied: greenlet>=1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from sqlalchemy>=1.4.2->optuna) (3.2.1)\n", + "Requirement already satisfied: MarkupSafe>=0.9.2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from Mako->alembic>=1.5.0->optuna) (3.0.2)\n", + "Requirement already satisfied: mlflow in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (2.9.0)\n", + "Requirement already satisfied: numpy<2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (1.26.4)\n", + "Requirement already satisfied: entrypoints<1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (0.4)\n", + "Requirement already satisfied: sqlalchemy<3,>=1.4.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (2.0.40)\n", + "Requirement already satisfied: markdown<4,>=3.3 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (3.8)\n", + "Requirement already satisfied: pytz<2024 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (2023.4)\n", + "Requirement already satisfied: gunicorn<22 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (21.2.0)\n", + "Requirement already satisfied: click<9,>=7.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (8.2.1)\n", + "Requirement already satisfied: alembic!=1.10.0,<2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (1.15.2)\n", + "Requirement already satisfied: gitpython<4,>=2.1.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (3.1.44)\n", + "Requirement already satisfied: querystring-parser<2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (1.2.4)\n", + "Requirement already satisfied: matplotlib<4 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (3.10.1)\n", + "Requirement already satisfied: scipy<2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (1.15.2)\n", + "Requirement already satisfied: sqlparse<1,>=0.4.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (0.5.3)\n", + "Requirement already satisfied: pyarrow<15,>=4.0.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (14.0.2)\n", + "Requirement already satisfied: Flask<4 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (3.1.1)\n", + "Requirement already satisfied: cloudpickle<4 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (3.1.1)\n", + "Requirement already satisfied: pyyaml<7,>=5.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (6.0.2)\n", + "Requirement already satisfied: docker<7,>=4.0.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (6.1.3)\n", + "Requirement already satisfied: pandas<3 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (2.2.3)\n", + "Requirement already satisfied: packaging<24 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (23.2)\n", + "Requirement already satisfied: Jinja2<4,>=2.11 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (3.1.6)\n", + "Requirement already satisfied: protobuf<5,>=3.12.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (4.25.8)\n", + "Requirement already satisfied: databricks-cli<1,>=0.8.7 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (0.18.0)\n", + "Requirement already satisfied: scikit-learn<2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (1.6.1)\n", + "Requirement already satisfied: requests<3,>=2.17.3 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (2.32.3)\n", + "Requirement already satisfied: importlib-metadata!=4.7.0,<8,>=3.7.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from mlflow) (7.2.1)\n", + "Requirement already satisfied: typing-extensions>=4.12 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from alembic!=1.10.0,<2->mlflow) (4.13.2)\n", + "Requirement already satisfied: Mako in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from alembic!=1.10.0,<2->mlflow) (1.3.10)\n", + "Requirement already satisfied: urllib3<3,>=1.26.7 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from databricks-cli<1,>=0.8.7->mlflow) (2.4.0)\n", + "Requirement already satisfied: six>=1.10.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from databricks-cli<1,>=0.8.7->mlflow) (1.17.0)\n", + "Requirement already satisfied: tabulate>=0.7.7 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from databricks-cli<1,>=0.8.7->mlflow) (0.9.0)\n", + "Requirement already satisfied: pyjwt>=1.7.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from databricks-cli<1,>=0.8.7->mlflow) (2.10.1)\n", + "Requirement already satisfied: oauthlib>=3.1.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from databricks-cli<1,>=0.8.7->mlflow) (3.3.1)\n", + "Requirement already satisfied: websocket-client>=0.32.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from docker<7,>=4.0.0->mlflow) (1.8.0)\n", + "Requirement already satisfied: markupsafe>=2.1.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from Flask<4->mlflow) (3.0.2)\n", + "Requirement already satisfied: werkzeug>=3.1.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from Flask<4->mlflow) (3.1.3)\n", + "Requirement already satisfied: blinker>=1.9.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from Flask<4->mlflow) (1.9.0)\n", + "Requirement already satisfied: itsdangerous>=2.2.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from Flask<4->mlflow) (2.2.0)\n", + "Requirement already satisfied: gitdb<5,>=4.0.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from gitpython<4,>=2.1.0->mlflow) (4.0.12)\n", + "Requirement already satisfied: zipp>=0.5 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from importlib-metadata!=4.7.0,<8,>=3.7.0->mlflow) (3.22.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (4.57.0)\n", + "Requirement already satisfied: pillow>=8 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (11.2.1)\n", + "Requirement already satisfied: cycler>=0.10 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (0.12.1)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (1.3.2)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (3.2.3)\n", + "Requirement already satisfied: kiwisolver>=1.3.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (1.4.8)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from matplotlib<4->mlflow) (2.9.0.post0)\n", + "Requirement already satisfied: tzdata>=2022.7 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from pandas<3->mlflow) (2025.2)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from requests<3,>=2.17.3->mlflow) (3.4.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from requests<3,>=2.17.3->mlflow) (3.10)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from requests<3,>=2.17.3->mlflow) (2025.4.26)\n", + "Requirement already satisfied: threadpoolctl>=3.1.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from scikit-learn<2->mlflow) (3.6.0)\n", + "Requirement already satisfied: joblib>=1.2.0 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from scikit-learn<2->mlflow) (1.5.0)\n", + "Requirement already satisfied: greenlet>=1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from sqlalchemy<3,>=1.4.0->mlflow) (3.2.1)\n", + "Requirement already satisfied: smmap<6,>=3.0.1 in /home/anastasiia/Desktop/cern/pquant/lib/python3.10/site-packages (from gitdb<5,>=4.0.1->gitpython<4,>=2.1.0->mlflow) (5.0.2)\n" + ] + } + ], + "source": [ + "!pip install optuna\n", + "!pip install mlflow==2.9.0" + ] + }, + { + "cell_type": "markdown", + "id": "62104e7d", + "metadata": {}, + "source": [ + "## 1. Finetuning configuration" + ] + }, + { + "cell_type": "markdown", + "id": "ec707227", + "metadata": {}, + "source": [ + "The most important part of the module is a `finetuning.yaml` - config, where you indicate all parameters used during the trials. \n", + "\n", + "Let's break down every part of the config file together:" + ] + }, + { + "cell_type": "markdown", + "id": "0208a460", + "metadata": {}, + "source": [ + "# finetuning.yaml example\n", + "\n", + "```yaml\n", + "pruning_parameters:\n", + " disable_pruning_for_layers: []\n", + " pruning_method: pdp\n", + "\n", + "quantization_parameters:\n", + " default_integer_bits: 0.0\n", + " default_fractional_bits: 7.0\n", + " enable_quantization: true\n", + " hgq_gamma: 0.0003\n", + " hgq_heterogeneous: true\n", + " layer_specific: []\n", + " use_high_granularity_quantization: false\n", + " use_real_tanh: false\n", + " use_symmetric_quantization: false\n", + "\n", + "training_parameters:\n", + " batch_size: 128\n", + " optimizer: sgd\n", + " plot_frequency: 100\n", + " label_smoothing: 0\n", + " model: \"resnet18\"\n", + " dataset: \"cifar10\"\n", + " l2_decay: 0.001\n", + " momentum: 0.9\n", + " lr_schedule: \"cosine\"\n", + " milestones: [30, 80]\n", + " gamma: 0.1\n", + " cosine_tmax: 200\n", + " lr: 0.001\n", + " prune_ratio: 10\n", + " default_integer_bits: 0\n", + " epochs: 2\n", + " fine_tuning_epochs: 2\n", + " pretraining_epochs: 0\n", + " pruning_first: false\n", + " rewind: \"post-ticket-search\"\n", + " rounds: 2\n", + " save_weights_epoch: 2\n", + "\n", + "finetuning_parameters:\n", + " experiment_name: \"resnet_18_experiment\"\n", + " num_trials: 10\n", + " sampler:\n", + " type: TPESampler\n", + " params: {}\n", + " hyperparameter_search:\n", + " numerical:\n", + " learning_rate: [1e-5, 1e-3, 0.2] # [low, high, step]\n", + " batch_size: [16, 128, 32] \n", + " default_integer_bits: [0, 8, 1]\n", + " categorical:\n", + " lr_schedule: [\"cosine\", \"multistep\"]\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "b79025db", + "metadata": {}, + "source": [ + "## It has four separate sections: training, pruning, quantization and finetuning parameters.\n", + "\n", + "## 1) Pruning parameters:\n", + "\n", + "Provide **one** of the schemas below under `pruning_parameters`, the `pruning_method` value selects the correct model and other values are validated by Pydantic model. If no values specified, the default ones will be used.\n", + "\n", + "## Common Fields (used by all methods)\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `disable_pruning_for_layers` | `List[str]` | `[]` | Layer names to exclude from pruning. |\n", + "| `enable_pruning` | `bool` | `true` | Master on/off switch. |\n", + "| `threshold_decay` | `float` | `0.0` | Optional decay term. |\n", + "\n", + "! Layer names should match your framework’s naming (e.g., Keras `layer.name`).\n", + "\n", + "## Method Overview\n", + "\n", + "| Method | Model |\n", + "|---|---|\n", + "| `cs` | **CSPruningModel** |\n", + "| `dst` | **DSTPruningModel** |\n", + "| `pdp` | **PDPPruningModel** | \n", + "| `wanda` | **WandaPruningModel** |\n", + "| `autosparse` | **AutoSparsePruningModel** | \n", + "| `activation_pruning` | **ActivationPruningModel** | \n", + "| `mdmm` | **MDMMPruningModel** |\n" + ] + }, + { + "cell_type": "markdown", + "id": "3d301de9", + "metadata": {}, + "source": [ + "## CSPruningModel \n", + " `pruning_method: \"cs\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `final_temp` | `int` | `200` | Target temperature at end of schedule. |\n", + "| `threshold_init` | `int` | `0` | Initial sparsification threshold. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: cs\n", + " final_temp: 200\n", + " threshold_init: 0\n", + " disable_pruning_for_layers: []\n", + " enable_pruning: true\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a3ea4a3d", + "metadata": {}, + "source": [ + "## DSTPruningModel\n", + "`pruning_method: \"dst\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `alpha` | `float` | `5.0e-06` | Mask dynamics update coefficient. |\n", + "| `max_pruning_pct` | `float` | `0.99` | Upper bound on total pruning ratio. |\n", + "| `threshold_init` | `float` | `0.0` | Initial threshold value. |\n", + "| `threshold_type` | `str` | `\"channelwise\"` | Thresholding granularity. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: dst\n", + " alpha: 5.0e-06\n", + " max_pruning_pct: 0.99\n", + " threshold_init: 0.0\n", + " threshold_type: channelwise\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "7c6e5cd7", + "metadata": {}, + "source": [ + "## PDPPruningModel\n", + "`pruning_method: \"pdp\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `epsilon` | `float` | `0.015` | Smoothing/regularization factor for gating. |\n", + "| `sparsity` | `float` | `0.8` | Target sparsity level (0–1). |\n", + "| `temperature` | `float` | `1.0e-05` | Annealing temperature. |\n", + "| `structured_pruning` | `bool` | `false` | Enable structured pruning. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: pdp\n", + " epsilon: 0.015\n", + " sparsity: 0.8\n", + " temperature: 1.0e-05\n", + " structured_pruning: false\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "be455bdd", + "metadata": {}, + "source": [ + "## WandaPruningModel\n", + " `pruning_method: \"wanda\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `M` | `Optional[int]` | `null` | Optional grouping constant. |\n", + "| `N` | `Optional[int]` | `null` | Optional grouping constant. |\n", + "| `sparsity` | `float` | `0.9` | Target sparsity level (0–1). |\n", + "| `t_delta` | `int` | `100` | Window/steps for stats collection. |\n", + "| `t_start_collecting_batch` | `int` | `100` | Warm-up steps before collecting stats. |\n", + "| `calculate_pruning_budget` | `bool` | `true` | Auto-compute budget from data. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: wanda\n", + " sparsity: 0.9\n", + " M: null\n", + " N: null\n", + " t_delta: 100\n", + " t_start_collecting_batch: 100\n", + " calculate_pruning_budget: true\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "fe226cde", + "metadata": {}, + "source": [ + "## AutoSparsePruningModel\n", + "`pruning_method: \"autosparse\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `alpha` | `float` | `0.5` | Weight/penalty coefficient. |\n", + "| `alpha_reset_epoch` | `int` | `90` | Epoch to reset/tune `alpha`. |\n", + "| `autotune_epochs` | `int` | `10` | Tuning window length. |\n", + "| `backward_sparsity` | `bool` | `false` | Apply sparsity in backward path if supported. |\n", + "| `threshold_init` | `float` | `-5.0` | Initial threshold (often logit-space). |\n", + "| `threshold_type` | `str` | `\"channelwise\"` | Thresholding granularity. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: autosparse\n", + " alpha: 0.5\n", + " alpha_reset_epoch: 90\n", + " autotune_epochs: 10\n", + " backward_sparsity: false\n", + " threshold_init: -5.0\n", + " threshold_type: channelwise\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "db8a4334", + "metadata": {}, + "source": [ + "## ActivationPruningModel\n", + "`pruning_method: \"activation_pruning\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `threshold` | `float` | `0.3` | Activation magnitude cutoff. |\n", + "| `t_delta` | `int` | `50` | Steps used to aggregate stats. |\n", + "| `t_start_collecting_batch` | `int` | `50` | Steps to skip before collecting. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: activation_pruning\n", + " threshold: 0.3\n", + " t_delta: 50\n", + " t_start_collecting_batch: 50\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "518913da", + "metadata": {}, + "source": [ + "## MDMMPruningModel\n", + "`pruning_method: \"mdmm\"`\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `pruning_method` | `str` | — | Selects this schema. |\n", + "| `constraint_type` | `ConstraintType` | `\"Equality\"` | Constraint form: equality / ≤ / ≥. |\n", + "| `target_value` | `float` | `0.0` | Target for the chosen metric. |\n", + "| `metric_type` | `MetricType` | `\"UnstructuredSparsity\"` | Which metric is constrained. |\n", + "| `target_sparsity` | `float` | `0.9` | Target sparsity when constraining sparsity. |\n", + "| `rf` | `int` | `1` | Regularization factor / frequency parameter. |\n", + "| `epsilon` | `float` | `1.0e-03` | Feasibility tolerance. |\n", + "| `scale` | `float` | `10.0` | Penalty scaling for constraint violation. |\n", + "| `damping` | `float` | `1.0` | Damping term for stability. |\n", + "| `use_grad` | `bool` | `false` | Use gradient information in updates. |\n", + "| `l0_mode` | `\"coarse\" \\| \"smooth\"` | `\"coarse\"` | L0 approximation mode. |\n", + "| `scale_mode` | `\"mean\" \\| \"sum\"` | `\"mean\"` | Aggregation mode for penalties. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "pruning_parameters:\n", + " pruning_method: mdmm\n", + " constraint_type: Equality # or LessThanOrEqual / GreaterThanOrEqual\n", + " metric_type: UnstructuredSparsity # or StructuredSparsity\n", + " target_sparsity: 0.9\n", + " epsilon: 0.001\n", + " scale: 10.0\n", + " damping: 1.0\n", + " use_grad: false\n", + " l0_mode: coarse\n", + " scale_mode: mean\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "925e3253", + "metadata": {}, + "source": [ + "## 2) Quantization parameters:\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `default_integer_bits` | `float` | `0.0` | Global integer bits for fixed‑point quantization. |\n", + "| `default_fractional_bits` | `float` | `7.0` | Global fractional bits for fixed‑point quantization. |\n", + "| `enable_quantization` | `bool` | `true` | Enable/disable quantization. |\n", + "| `hgq_gamma` | `float` | `0.0003` | HGQ regularization coefficient. |\n", + "| `hgq_heterogeneous` | `bool` | `true` | Allow heterogeneous bit‑widths. |\n", + "| `layer_specific` | `List` | `[]` | Optional per‑layer overrides. |\n", + "| `use_high_granularity_quantization` | `bool` | `false` | Enable high‑granularity quantization policy. |\n", + "| `use_real_tanh` | `bool` | `false` | Use a non‑approximate `tanh` operator. |\n", + "| `use_symmetric_quantization` | `bool` | `false` | Use symmetric quantization (zero‑point = 0). |\n", + "| `use_relu_multiplier` | `bool` | `true` | Apply ReLU multiplier optimization. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "quantization_parameters:\n", + " default_integer_bits: 0.0\n", + " default_fractional_bits: 7.0\n", + " enable_quantization: true\n", + " hgq_gamma: 0.0003\n", + " hgq_heterogeneous: true\n", + " use_high_granularity_quantization: false\n", + " use_real_tanh: false\n", + " use_symmetric_quantization: false\n", + " use_relu_multiplier: true\n", + " layer_specific:\n", + " - layer: \"conv1\"\n", + " weight: { integer_bits: 0.0, fractional_bits: 7.0 }\n", + " bias: { integer_bits: 0.0, fractional_bits: 7.0 }\n", + " relu: { integer_bits: 0.0, fractional_bits: 7.0 }\n" + ] + }, + { + "cell_type": "markdown", + "id": "764e670e", + "metadata": {}, + "source": [ + "## 3) Training parameters:\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `epochs` | `int` | `200` | Total number of training epochs. |\n", + "| `fine_tuning_epochs` | `int` | `0` | Additional epochs for fine‑tuning. |\n", + "| `pretraining_epochs` | `int` | `50` | Pretraining/warmp‑up epochs. |\n", + "| `pruning_first` | `bool` | `false` | Apply pruning before main training loop. |\n", + "| `rewind` | `str` | `\"never\"` | Weight rewinding policy. |\n", + "| `rounds` | `int` | `1` | Number of training rounds/cycles. |\n", + "| `save_weights_epoch` | `int` | `-1` | Save checkpoint at this epoch (`-1` disables). |\n", + "| `batch_size` | `int` | `128` | Training batch size. |\n", + "| `optimizer` | `str` | `\"sgd\"` | Optimizer selection. |\n", + "| `plot_frequency` | `int` | `100` | Steps between progress plots. |\n", + "| `label_smoothing` | `float` | `0.0` | Label smoothing factor. |\n", + "| `model` | `str` | `\"resnet18\"` | Model architecture name. |\n", + "| `dataset` | `str` | `\"cifar10\"` | Dataset identifier. |\n", + "| `l2_decay` | `float` | `0.001` | L2 weight decay. |\n", + "| `momentum` | `float` | `0.9` | Momentum for SGD‑like optimizers. |\n", + "| `lr_schedule` | `Literal[\"cosine\",\"step\",\"none\"]` | `\"cosine\"` | LR schedule policy. |\n", + "| `cosine_tmax` | `int` | `200` | Period (`T_max`) for cosine schedule. |\n", + "| `lr` | `float` | `0.001` | Learning rate. |\n", + "| `prune_ratio` | `float` | `10.0` | Method‑specific pruning ratio meaning. |\n", + "| `default_integer_bits` | `int` | `0` | Pass quant bits to training. |\n", + "\n", + "**YAML example:**\n", + "```yaml\n", + "training_parameters:\n", + " epochs: 200\n", + " fine_tuning_epochs: 0\n", + " pretraining_epochs: 50\n", + " pruning_first: false\n", + " rewind: \"never\"\n", + " rounds: 1\n", + " save_weights_epoch: -1\n", + " batch_size: 128\n", + " optimizer: \"sgd\"\n", + " plot_frequency: 100\n", + " label_smoothing: 0.0\n", + " model: \"resnet18\"\n", + " dataset: \"cifar10\"\n", + " l2_decay: 0.001\n", + " momentum: 0.9\n", + " lr_schedule: \"cosine\"\n", + " cosine_tmax: 200\n", + " lr: 0.001\n", + " prune_ratio: 10.0\n", + " default_integer_bits: 0\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "8706bd07", + "metadata": {}, + "source": [ + "## 4) Finetuning Parameters:\n", + "\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `experiment_name` | `str` | `\"experiment_1\"` | Name of the study. |\n", + "| `sampler` | `str` | `GridSampler` | Sampler selection for the search space. |\n", + "| `num_trials` | `int` | `0` | Number of trials. |\n", + "| `hyperparameter_search` | `HyperparameterSearch` | `{}` | Ranges for non‑grid samplers. |\n", + "\n", + "**YAML example: Grid sampler**\n", + "```yaml\n", + "finetuning_parameters:\n", + " experiment_name: \"experiment_1\"\n", + " num_trials: 0\n", + " sampler:\n", + " type: \"GridSampler\"\n", + " params:\n", + " search_space:\n", + " lr: [1e-5, 1e-4, 1e-3]\n", + " batch_size: [32, 64, 128]\n", + " lr_schedule: [\"cosine\", \"step\"]\n", + "```\n", + "\n", + "### Samplers\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `type` | `str` | `\"TPESampler\"` | Sampler class name (e.g., `TPESampler`, `GridSampler`). |\n", + "| `params` | `Dict[str, Any]` | `{}` | Sampler‑specific kwargs (e.g., `seed`, `search_space` for GridSampler). |\n", + "\n", + "### HyperparameterSearch\n", + "\n", + "| Field | Type | Default | Description |\n", + "|---|---|---:|---|\n", + "| `numerical` | `Dict[str, List[Union[int,float]]]` | `{}` | Numeric ranges, `[low, high, step]`. |\n", + "| `categorical` | `Optional[Dict[str, List[str]]]` | `{}` | Categorical choices. |\n", + "\n", + "\n", + "\n", + "**YAML example: TPE with search ranges**\n", + "```yaml\n", + "finetuning_parameters:\n", + " experiment_name: \"experiment_1\"\n", + " num_trials: 30\n", + " sampler:\n", + " type: \"TPESampler\"\n", + " params:\n", + " seed: 42\n", + " hyperparameter_search:\n", + " numerical:\n", + " lr: [1e-5, 1e-3, 0.2] # [low, high, step]\n", + " batch_size: [16, 128, 16]\n", + " categorical:\n", + " lr_schedule: [\"cosine\", \"multistep\"]\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "01c10957", + "metadata": {}, + "source": [ + "### After the config file is updated, we can run studies. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7676c175", + "metadata": {}, + "outputs": [], + "source": [ + "# Start a server if you want to use it locally\n", + "!mlflow server --host 0.0.0.0 --port 5000" + ] + }, + { + "attachments": { + "image-2.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAC2IAAAMSCAYAAACyTpYWAAAMP2lDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBooUsJvQkiUgJICaEFkN5thCRAKCEGgoq9LCq4FlREwYauiihYAbEjioVFsfcFFRVlXSzYlTcpoOu+8r3zfXPvf/85858z584tA4DaSY5IlIOqA5ArLBDHBPvTk5JT6KRegAAcaINRQIPDzRcxo6LCAbSh89/t3Q3oDe2qg1Trn/3/1TR4/HwuAEgUxGm8fG4uxAcBwKu4InEBAEQpbz61QCTFsAEtMUwQ4sVSnCHHVVKcJsd7ZT5xMSyIWwFQUuFwxBkAqF6GPL2QmwE1VPshdhLyBEIA1OgQ++Tm5vEgToXYBvqIIJbqM9J+0Mn4m2basCaHkzGM5XORmVKAIF+Uw5n+f5bjf1tujmQohhVsKpnikBjpnGHdbmXnhUmxCsR9wrSISIg1If4g4Mn8IUYpmZKQeLk/asjNZ8GaAR2InXicgDCIDSEOEuZEhCv4tHRBEBtiuELQaYICdhzEehAv5ucHxip8NovzYhSx0IZ0MYup4M9xxLK40lgPJNnxTIX+60w+W6GPqRZlxiVCTIHYolCQEAGxKsSO+dmxYQqfsUWZrIghH7EkRpq/BcQxfGGwv1wfK0wXB8Uo/Ety84fmi23OFLAjFHh/QWZciLw+WCuXI8sfzgW7zBcy44d0+PlJ4UNz4fEDAuVzx57xhfGxCp0PogL/GPlYnCLKiVL442b8nGApbwaxS35hrGIsnlAAF6RcH08XFUTFyfPEi7I4oVHyfPAVIBywQACgAwlsaSAPZAFBR19jH7yS9wQBDhCDDMAHDgpmaESirEcIj7GgCPwJER/kD4/zl/XyQSHkvw6z8qMDSJf1FspGZIMnEOeCMJADryWyUcLhaAngMWQE/4jOgY0L882BTdr/7/kh9jvDhEy4gpEMRaSrDXkSA4kBxBBiENEWN8B9cC88HB79YHPGGbjH0Dy++xOeEDoJDwnXCV2E25MF88U/ZTkOdEH9IEUt0n6sBW4FNV1xf9wbqkNlXAc3AA64C4zDxH1hZFfIshR5S6tC/0n7bzP44W4o/MhOZJSsS/Yj2/w8UtVO1XVYRVrrH+sjzzVtuN6s4Z6f47N+qD4PnsN+9sQWYwewNuwUdh47ijUCOnYCa8LasWNSPLy6HstW11C0GFk+2VBH8I94Q3dWWsl8p1qnXqcv8r4C/jTpOxqw8kTTxYKMzAI6E34R+HS2kOs4ku7s5OwKgPT7In99vYmWfTcQnfbv3II/APA+MTg4eOQ7F3oCgH3u8PE//J2zYcBPhzIA5w5zJeJCOYdLDwT4llCDT5o+MAbmwAbOxxm4AS/gBwJBKIgEcSAZTILZZ8J1LgZTwUwwDxSDUrACrAHrwSawFewEe8B+0AiOglPgLLgILoPr4C5cPT3gBegH78BnBEFICBWhIfqICWKJ2CPOCAPxQQKRcCQGSUZSkQxEiEiQmcgCpBQpQ9YjW5AaZB9yGDmFnEc6kdtIN9KLvEY+oRiqgmqhRqgVOgploEw0DI1DJ6IZ6BS0CF2ILkMr0Gp0N9qAnkIvotfRLvQFOoABTBnTwUwxB4yBsbBILAVLx8TYbKwEK8eqsTqsGd7nq1gX1od9xIk4DafjDnAFh+DxOBefgs/Gl+Lr8Z14A96KX8W78X78G4FKMCTYEzwJbEISIYMwlVBMKCdsJxwinIHPUg/hHZFI1CFaE93hs5hMzCLOIC4lbiDWE08SO4mPiAMkEkmfZE/yJkWSOKQCUjFpHWk36QTpCqmH9EFJWclEyVkpSClFSag0X6lcaZfScaUrSk+VPpPVyZZkT3IkmUeeTl5O3kZuJl8i95A/UzQo1hRvShwlizKPUkGpo5yh3KO8UVZWNlP2UI5WFijPVa5Q3qt8Trlb+aOKpoqdCktlgopEZZnKDpWTKrdV3lCpVCuqHzWFWkBdRq2hnqY+oH5Qpak6qrJVeapzVCtVG1SvqL5UI6tZqjHVJqkVqZWrHVC7pNanTla3Umepc9Rnq1eqH1a/qT6gQdMYrRGpkauxVGOXxnmNZ5okTSvNQE2e5kLNrZqnNR/RMJo5jUXj0hbQttHO0Hq0iFrWWmytLK1SrT1aHVr92praLtoJ2tO0K7WPaXfpYDpWOmydHJ3lOvt1buh80jXSZerydZfo1ule0X2vN0LPT4+vV6JXr3dd75M+XT9QP1t/pX6j/n0D3MDOINpgqsFGgzMGfSO0RniN4I4oGbF/xB1D1NDOMMZwhuFWw3bDASNjo2AjkdE6o9NGfcY6xn7GWcarjY8b95rQTHxMBCarTU6YPKdr05n0HHoFvZXeb2poGmIqMd1i2mH62czaLN5svlm92X1zijnDPN18tXmLeb+FicU4i5kWtRZ3LMmWDMtMy7WWbZbvraytEq0WWTVaPbPWs2ZbF1nXWt+zodr42kyxqba5Zku0Zdhm226wvWyH2rnaZdpV2l2yR+3d7AX2G+w7RxJGeowUjqweedNBxYHpUOhQ69DtqOMY7jjfsdHx5SiLUSmjVo5qG/XNydUpx2mb093RmqNDR88f3Tz6tbOdM9e50vnaGOqYoDFzxjSNeeVi78J32ehyy5XmOs51kWuL61c3dzexW51br7uFe6p7lftNhhYjirGUcc6D4OHvMcfjqMdHTzfPAs/9nn95OXhle+3yejbWeix/7Laxj7zNvDneW7y7fOg+qT6bfbp8TX05vtW+D/3M/Xh+2/2eMm2ZWczdzJf+Tv5i/0P+71merFmskwFYQHBASUBHoGZgfOD6wAdBZkEZQbVB/cGuwTOCT4YQQsJCVobcZBuxuewadn+oe+is0NYwlbDYsPVhD8PtwsXhzePQcaHjVo27F2EZIYxojASR7MhVkfejrKOmRB2JJkZHRVdGP4kZHTMzpi2WFjs5dlfsuzj/uOVxd+Nt4iXxLQlqCRMSahLeJwYkliV2JY1KmpV0MdkgWZDclEJKSUjZnjIwPnD8mvE9E1wnFE+4MdF64rSJ5ycZTMqZdGyy2mTO5AOphNTE1F2pXziRnGrOQBo7rSqtn8viruW+4PnxVvN6+d78Mv7TdO/0svRnGd4ZqzJ6M30zyzP7BCzBesGrrJCsTVnvsyOzd2QP5iTm1Ocq5abmHhZqCrOFrXnGedPyOkX2omJR1xTPKWum9IvDxNvzkfyJ+U0FWvBHvl1iI/lF0l3oU1hZ+GFqwtQD0zSmCae1T7ebvmT606Kgot9m4DO4M1pmms6cN7N7FnPWltnI7LTZLXPM5yyc0zM3eO7OeZR52fN+n+80v2z+2wWJC5oXGi2cu/DRL8G/1BarFouLby7yWrRpMb5YsLhjyZgl65Z8K+GVXCh1Ki0v/bKUu/TCr6N/rfh1cFn6so7lbss3riCuEK64sdJ35c4yjbKiskerxq1qWE1fXbL67ZrJa86Xu5RvWktZK1nbVRFe0bTOYt2KdV/WZ66/XulfWV9lWLWk6v0G3oYrG/021m0y2lS66dNmweZbW4K3NFRbVZdvJW4t3PpkW8K2tt8Yv9VsN9heuv3rDuGOrp0xO1tr3GtqdhnuWl6L1kpqe3dP2H15T8CepjqHui31OvWle8Feyd7n+1L33dgftr/lAONA3UHLg1WHaIdKGpCG6Q39jZmNXU3JTZ2HQw+3NHs1HzrieGTHUdOjlce0jy0/Tjm+8PjgiaITAydFJ/tOZZx61DK55e7ppNPXWqNbO86EnTl3Nujs6TZm24lz3ueOnvc8f/gC40LjRbeLDe2u7Yd+d/39UIdbR8Ml90tNlz0uN3eO7Tx+xffKqasBV89eY1+7eD3ieueN+Bu3bk642XWLd+vZ7Zzbr+4U3vl8d+49wr2S++r3yx8YPqj+w/aP+i63rmPdAd3tD2Mf3n3EffTicf7jLz0Ln1CflD81eVrzzPnZ0d6g3svPxz/veSF68bmv+E+NP6te2rw8+JffX+39Sf09r8SvBl8vfaP/Zsdbl7ctA1EDD97lvvv8vuSD/oedHxkf2z4lfnr6eeoX0peKr7Zfm7+Ffbs3mDs4KOKIObJfAQw2ND0dgNc7AKAmA0CD+zPKePn+T2aIfM8qQ+A/YfkeUWZuANTB//foPvh3cxOAvdvg9gvqq00AIIoKQJwHQMeMGW5DezXZvlJqRLgP2Bz7NS03Dfwbk+85f8j75zOQqrqAn8//AiTrfGu232UwAAAAimVYSWZNTQAqAAAACAAEARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAh2kABAAAAAEAAABOAAAAAAAAAJAAAAABAAAAkAAAAAEAA5KGAAcAAAASAAAAeKACAAQAAAABAAALYqADAAQAAAABAAADEgAAAABBU0NJSQAAAFNjcmVlbnNob3RC9VjFAAAACXBIWXMAABYlAAAWJQFJUiTwAAAB12lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj43ODY8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MjkxNDwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlVzZXJDb21tZW50PlNjcmVlbnNob3Q8L2V4aWY6VXNlckNvbW1lbnQ+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgptAfiKAAAAHGlET1QAAAACAAAAAAAAAYkAAAAoAAABiQAAAYkAAaiM3ukBcAAAQABJREFUeAHs3XdgFNUWx/GT3ihJCCFA6L2JNBEQUUERECsWxIq994KKigXsir1gQcWCoCi9ClIVpIh0pPdeEtJI8u6ZOJuZ3Q0JCz5Kvvc92Zk7987OfGazu3/85mxQUvP7c4WGAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUGSBIILYRbZiIAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggYAkQxOaFgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIHKYAQezDBGM4AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBAEJvXAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggcJgCBLEPE4zhCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAQWxeAwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwGEKEMQ+TLCChwdJSESERMYnSsmqNSSmXLKERkVJcEiYNSU3J1uyszIlY88u2blorqRuWlfwrtiCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAse1AEHso3B5gkJCJSYpWeLqNLQeg0JCCtxrdka6rJv4i6Tv3F7gGDYggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPEtQBD7CK9PcHiExNdrLPF1GpmK2JGF7u1gWqqsHDpQcnNzCx3LAAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBA4PgUIYh/BddHK1wmnNJf4uo0lODSsSHtK37VDVo/8vkhjGYQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACx6cAQewjuC6la9STpBZnSHBY+KH3YqpfawXsoOBg2TRtguxdvezQ49mKAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAse1AEHsAC9PWExJSWrZTkpUrOJ3DwfTDsiBbZslddNaSduxVbIzMyQsuoSk7dwukpvjdw6dCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIHBiCBDEDvA6laxcQ5LbdTSzg3z2cGD7Ztm1aL7sX7/KZxsdCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIHDiCxDEDuAaBgUFS/I5XaREhco+s9N375Stc6bKga2bTOXrXJ/tdCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAiS9AEDuAaxgSHiG1Lu8pQcHBrtm5OTmyc9E82fHXH6LLNAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBA4OQUIYgdwXcNLlZYaF13jMzN953bZ8NtoyUrZ77ONDgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBA4eQRO+iB2UFCQ36uVm5vrt78onWVPbSkJjZr7DE3ZuEbWTxrp019YR2hUjM+QnINZkpOV6dNPBwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggce4GTOogdER4mXc45RSqUi3VJ79ydIj+MmiMHD2a7+ou2EiQ1L71OwmJKuIbn5ubIzkXzZPu8Wa7+wlZCIqOkfMuzfIbtXrZQUrds8OmnAwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSOvcBJHcQuXTJKBrx8o7RsUt0lvWTlZrn4lrclLT3L1V+UlaCQMKl79a0+Q3MOHpQ1Y4ZKxu4dPtsO1RFfr7GUa36Gz5CVP30lWSn7fPrpQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFjL3DSB7E/f+0madW0hkt68YpNckHP/iaInenqL8pKaFSU1OrW02dodmamLB88QCQ312fboTqqX9RDIkq5K3br+KXffCi52YFU7D7Us7ENAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBI6GAEHsw1SMq91Qklq285l18ECqrBj6hU9/YR21Lu8poZFRrmHZmRn/hrpd3awggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwHEiQBD7sC5EkFTu0FViylfymZWyca2snzTCp/9QHSEmgF3joqslJDzSNWzHwj9l+/xZrj5WEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQOD4ESCIfRjXIigkRKp1vkIiYuN9Zq2fOFxSNq3z6T9UR1ytBlKuxRkSFBLqGrZq+HeSsWenq48VBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEDh+BAhie12L2Fr1pVS12l69eatBQUESGZ8owaHu4LRuPbBji+RmZ/udp537162SPSsWucZUPLOjlKpcQ8Ts19mWD/5UsjPSnV0sI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMBxJEAQ2+tiJJ/dRUomV/XqPfLVDVNGW2Fs554qnXOBlKhYxdkl2elpsvKnryTnYJarnxUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQOH4ECGJ7XYta3W6U0Khor94jW83NyZYNk0dLysa1nh2FREZJcrtOEp1Y3tOnC7uW/CXb5k6X3JwcVz8rCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIHD8CBDEdlyLoJAQqXv17Y6eo7OYvnO7bJo5STJ27/DssGSVmpLUoq1P6Ntf5WzPJBYQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBA4LgQIIjtuAzBEZFSo+vVjp78xaDgIAkOC5Og4JD8zn+XsjPSTQXrXJ9+u2PPysWyY+Ecyc0+aHdJQqPmktC4hQQFBXv6dGHdhF8kdfN6Vx8rCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIHB8CfynQezyiaWlyzmNpV7N8hIdFS579h6QOQvXyPipi2RfSnqBElGR4dL2tNrSulkNKRtf0hq3edtemTRjicxesFqyDmYXONe5oXTJKPn8tZukVdMazm5ZvGKTXNCzv6SlZ7r6TSpawmJKuPv+XYsoHS9lm7SSyLgyru052dmyfuJwyUrd5+p3rmRnZkpOZoazS8o2Pk0STmnh6stKTRGtiJ2+c5ur/1ArpSuUlfINa0npCgkSFhklubk5krZnv+z4Z71sXbJKMlLTDjWdbQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQgEDAQezZvzzt83SvfjRafhw71wpe39/zXDmndT2JjAjzGbdjd4oM+mmmfDp4quzYlWLCw7kmAx0k8bExct1lreX6y9pIuYRSPvNyzLhVa7fLe19OlOETFkhqmjvc7D3hsIPY3jtwrJeqWkuSWraTkPAIR6+YAHaqrB33o2SlFBzEdk0wKyGm8nZiszYSW6Oua9Pe1ctk65zpkp1ecHhanULCw6TiKbWl7rmtJCYhToJD3FW17Z1mpafL1mVr5e/hU0w4e59kZ+VX5LbHOB+DQkKlTIOmElvTfVw6Ztfi+bJ72ULrWjnnOJfDokuYsPrpEpVYQcxA819O3nizvGnGxEID5hGxcVK+dQfLx5pvqozr//auXi47TUVxGgL/lUCZuBKSbV5ve/am/ldPwX4RQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSMQM1qSbJy9RYsEDgpBAIOYm+e/aYPwLNv/Sy7TZDxgZvOkyoVE7TAdIHtoKlqPWH6Eun3/khZvmqLFd5+5LZO0uGM+hIWGlLgPN1wIC1Tvvl5lvT/fLwV5C5o8FELYpsTiavTSJJatPV5qj3/LDXh6Wk+Fa99Bjo6osomSYU2HSS8ZGlHr8j2+b/Lzr//LDDsHGxcylStKLXOPk3K1a0mIYU42TvXqtjzfhgnG+YvzQtI2xu8HoOCgyWudiMp1+IMry1iHdf2v2ZLrqkA7q/p3NLV61gBc++wuo7fOHWc7Fuzwt9Uq0/nJzRuKWXqNZagkPzrn23C5Osm/Czpu3cUOJcNR0+gTYs6ElqE19WuPSmycMm6o/fEx3BPF57XTJ5/7Err7+6hPl/JxKl/H8Oj4amLIhATHSnNG1e3hq7ftNPcoLO1KNMk2LzPtG2Zd6PJ9p37ZPHyDUWadySDnnmom1zSqYX1udW6a+8j2ZV8/e7d0rBuJVmzfrtcfONrR7QvJiOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggcKQCLU6tIee1O0WqJJc12ZwgK1M2ffYymfvXatHCu/7a+O+flMSE0rJu4w7pet0r/ob47bOzbUtXbpSt2/f6HWN3Vq9STipVKGMV5lyweK3dHdCj7uvHTx+05t7x+Kcyc87ygPZzPEzS7FGZuJI+h5KRkSWr1m2TbTsO7eozkQ5L4KgGsUdPXiiN6iZLclJckXizc3JkhKls/eRrP0r/Z7rL2a3qWX+MRZmcaao7fz54mmj4u6B2tILYGhKOr99EEk21Z++2edZk2bNy8SEDzt5zSiRXleSzOpmguqOStXnT2TpvpuxaNM97uGe9cvMGUqfD6VIqSUPuh0i5e2aIZKaly+qZ82XVtPmSunOPY4v/xZKVa0jFMzv67H+nqYi9Y8EfknMwy+/EUFMNW8PlMUkV/W7fNneG7DzEuUXEJUj5VmdLVJlE13ytwq1zcw4eupq3axIrAQssmFi0D7b9KWlyxkXPBPw8x9PED16+WVo3r20d0vjfFsrDJox9MrTTmtSUiPBQ686xzdsK/9s/kc75gnObyouPX2UdckpqurS50PcXGvydz41XnSX339LZ2rRj135pf/nz/oYd1b5XnuohHc9uLFlZ2dL8/F5HtO8fP3tIapgvdlvM9ezYve8R7YvJCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggEKtCkYVV57ZlrJSHeN9Sr+8zJyZXXPhgug36c5noKLcA4Y/hznr4mHR7zBLbr1KwgiWVKyTZTYHHZyk2eMfaCnW0bMuJ3ef7NoXa338fvP7xP6taqKKkHMuRIiyfWr50s335wr/U89/ceKL/OWOT3OU+Ezl+HPC3xcSUKPNRck2PVQPYnX0+U0ZPmFziODW6BoxrETkvPlKjIcPczFLKWmXlQps5eYULYdaxqpYUMd23W57vp0c/l15mm0rOfdrSC2CERkVKuWRspXSOvkqr9VLkmSL5p+oRDVnq2xzofS1aqbgWxnX0H01Jlyx9TZf+6f5zdnuWk+jWk6ZXnS3Ss7xuXvvgzD6RLpql8XTIx3jNnz8ZtsmTsdNm6ZJUczMwLUOu55GYfLDDYHFOhsiS3O1+CQ8M8+9EFKxA9b5bkZGW6+u0VrRhernkbCQrOr2Ztb9NHDatvnvmrs8uzbFXirttIyjY+3TxvqKc/K3W/mTNJUjdvNH3+747xDGbhqAjYH1aF7WzPvgPS7pJnCxt2QmxvXL+KvNevp/Xhf8sjH/v9ED8hTsTrIOdPeNm6oWLUxHnSq++3XltP7FVnEFvP5N6nvpApM80NMYW0CYOfkrLmy5o2rYjd4YoXCplx5JsJYh+5IXtAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQOH4ENLvzwmNXeoq9an5x7/40yTJZ0FKloq3ikfbRzpi9XO564jOTzcqxu6T/8zeIVreeOG2hPPbCN57+sd8+IUmJsQUWKbSzbQSxPWSHvVBYENu5w9/nrpTbHxvgunbO7SznCxzVILa927xU/HYZP3WRbN+VIsnl40zQuq5UqVjG88dnj9VHHW9XeNa47ep122XMlIWya0+qNeec1vWkQrlYzxjn3Fnz/pFLbn3X2eVZPlpB7IjS8VKp/QUSFuMOQWdnZsqmGRMkZf1qz3MWthAUEioJp7SQhIZNXUMPbN0oG6dNkIMHUlz9uhJRMlo69rpFwmOifLZt/2edLJvwu+xau1kizZvYeY/fbHluXbJa/vp5kuzfutNat/YTG28FyjNT98mWWVN89qUd0UmmormpiK2BbWfbs3KJVZk6OyPd2W0ta/i6xsU9fHycAw9s3yxrx/zo7PIsh5csLeVbny3Rie5q2hre3vbnDMnOzPCMZeG/FTicD6v/9kjY+5EKFKcgtt4Bd8Vtbx2STH9WY9B793jGEMT2ULCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECRBMqboPSoQb1M0d0ga/xvs5bIky99J/tMENtu57RpIP2euFoiI/OKwX701QR5/4tx9uYCHwliF0hz1DbYQex1G3dI1+te8ey3gan63al9Ezm9aS2pWa2cJ6s7Z8EquenBDz3jWPAvcNSD2Dt3p8jH306RgUOmW3c52E8bHxsj997QQXpe0VbCwvxXTd6+a798NGiyfDl0huxPzQ/8ljXl6x+57Xy5+uLTJSQ42N6l9ahh7XZXviQ7TODbux2tIHZU2fJS9fxLvXcvB7ZtMoHmyZKxd7fPtoI6QiKjzL4uEw0fO9v+datkw29jTSo9/84P3R4cEiItb7hIKjaqLZL33mVNS9+fKotGTZW1s/7y3HEQW7GctL3zKlk7Z6EsHTfTqpCtg7XidEzFKpJ46ukSYcLY2taOHyYHtmilaXeLTiwvFc441ydUvX/tStkye5po5W5XCwqSck1bS3z9U13d3isa4F7+w2eaundvMvNLVa0lFVqf46qmnZWaYiqE/yYpG4oecnfvmLVABI5lELta5UTZn5Ju/pb3FenQg81rp1LFBNm9N8X1QV6UyZXMTSHa1m/cWZThnjGxpWMkKiJMNm/b4+mzF4LN31n1KomyZv12OXgw2+4u8DE8LFSqVipr/ZRDUcY7d1QxKV5S0zJkz16vv0fHoMMJYqvHfvNlSCudnwjNuyK2HvM53Z4T/fwpqA14/TZpcWoNz+bCgtihoSFSqUIZc2PQNs+cwhYiws01Na/jNWZOhrnDT1tRK2InJpSW0NBg2bSl4M+THz97SGpUKVfgXX/6fJHm9VnBvD70GHK83291AA0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQAFBn/8gNSpUd6a/ffS9dLjrnf87kkDvR+9eou1LfVAhrTu2tvvOGfniRbE1ixZfGyJEyqnU1AQ23kdvLNZT/T7TkZOmOsc4loOJGelO0guHy9ZJme3dfte1/4KWjmec1FHNYidkZklA76bKm8OGGeFBL1BoqPC5eN+N0j7NvW8N8mBtEz5+Jsp8u7AiX7nxkRHyBev3yRnNK/lmqvz7nr6axkzeaGrX1eOVhA7pmJlqXxOV5/9716xSLaacHJudl7gzmeAn47QqBip1e0G9xYtzb96hWyaPt7db9bK1akqTa/qJDHx+cHtA7v3yV/DJsmG+Utd40sklpG45HKyYcFSc0z5ge6QCBP+7tTNhL9Lecan79wua8b9JLkHszx9uhAZl2CqU58jkfFlXf2pWzbI5pm/SlaKOySr4yp3uNBVQTsnK1OCw8Jd83Vl+ZDPJTvNHfQMDguTSmdfINHlKuSPNx77TDB9y6xfqYadr/J/WTrcIPZdN3aUm68+xzq2H4bPkr5v/+Q6zm5dWsqT9+fdxPD10Kny+ocjpG+v7tLpnFPNjRoH5LKbXpc3nr1OGjeo4rmLRkPJw8f9Kc++PsS1L3uldvXy5o6p7ubOmyS7ywoQz1u4Wp5/c6hPGHfuuJesfb/x0Qhp3riGtG5eW8JNYDYnJ1eanPuYdRfPBy/fbO3rspvfkFVrt1rLo75+XMqXi5M/5q2UX6cvEj3XUiXzqtLrMX4yaJJ8+OV4qW/uBnrt6Wukovlg0KYV/jXQ/OAzX8pcc0zeTc/9kTu7Spm4vAr7Ol6D3VNnLfXxO6/dKfLyUz2sXXTu0c867wZ1KlnHr516HD+O+kNe7J/vPnt0XxPoDfHcdabj9Fy13fvU5zL197z3DT3u15+9VpLKxnrGZmVly6Jl6+XOXp9J6oH8m2GsycfRP94f9npow8bMlmde/cHvUeqH8MwRL3jOUwf5C2LruD6PXC7NGlWXhDIlrdeNGq81d4Dpa/Lz7yb73b/exdf7gcskPq6EZ/tuE5J/4c0fRa9hx7Mbi9o2P7+XZ7su6PO92ed6Ob1ZLc+xpaVnypIVG+WdT8f4vH4OFcR+7tEr5NwzG0l0VIT1HPq60hsbXn3/F/nFHDsNAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEjESgREynTf3nO2oVWwO5wxfOeYoX+9vv8o1fKhR2bWZu0avaI8XlhXs3AVKuUKAuXrJPr7n1PhnzyoNSoWs6Tn9EJdt5JM0F9/s2RHU627fsP75O6tSqaDJQ7BD7t5z4SEx1pZY3s/TqP/cNXbpGWTWrK5q27pfM1L1mbNGf17Qf3Wsv39x4ojepVkmu7nenJcOmGFau2yP1PfyEbNu+yxh2v/xQliK3HfuNVZ8n9t3S2TmPdBlM9+/r86tnaGWjOSjN/L5lq6c6q25rP2mS8NfCtrwnvdiLkoo5qEHvT1j1y+Z3vmwqv270tPOtntKglP7x/p2fdXlhmXog3PvyZrDbVZAtq7dvUl6/fyrtLwh6jVUdf+mCUfPj1r3aX5/FoBbHLNGgiiabqs7NpyG3X4vmybe4MZ3ehy+GlYqXGRXnBSntwjglDb5s7S3Yv+8vu8jw27NpO6pzT0qpqrZ25OTmydMJMWTZhlhzMcIeotfK1bvduQcEhUrZJSylTz1StNlWEteVkZcn6X0fIga2bXMPDYkpK0mlnSonkqq7+9F07ZNO08ab6d/4bRZCp1p3U8iyJrV7Hs1+dtGvJAomr19hZwNva15oxQyVt+xbXfktVqWFV4NZjtJse28ap4yRl4xq7i8f/k8DhfFjpIWkV6F+HPi2xpaKtAPKlN+UHmfXGiyk/Pmt94KSYCvftLu1jBYffeeFGObNVPck0f7saOi1t5vprGjD2/rArY4Kuo795QrTysL+2c/d+Ob97P8nMyr85wj4n3WaHn3WuHcQ+u3UDeev5663ddb/jbVm8fIO1PPnHZyTO3LW0y1RZjjMV/YP+/duxNv77zxffTzEfqm0lJCTY2W0tZ5ubIbrf+bYsW5n/N9a2ZT1558Ub/O5LJ/02c4ncY8LSdruoY3PRDxJt+nMQlU0FcH9t/G8L5eE+X1mb7ErY/sY99OxXMmHqQisIr2F2+5z0/cxe1nn6vqpflJw/GeJvf8eqzxnEXrBorRXkT0/PkpZdnvR7SPeZLwU9zZcDbctXbRYN83sHsbXC+s8DHynQWOd+9u2v0n/AaF30NP2iNei9e1xfBD0bzYJ93fwFsYd9/rBoJXh/TV8/3W5503NjgI4pKIj9iznuKsn5N894X89vh82Ql94Z5u9p6EMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoEgCt/RoL3f37GiNHTh4irzx0chDztM8Tv06ydaYNSYXqhkybXbl639MwcxLe75u8mC9pIIpmOmvjZ28QB59fpC1yc6BDRnxu1Ww0994u6+gIPacMf0kLCxExk35Sx557mt7uOfx63fvNkHryqJFGM8yeTdtziC2HrP+or2/VpRwur95/8++ogax9Zgm/tBbEuLzio227vq0p7BnoDmrxITSMuKrxwrM/mnmSQtfDhn5u4fkRMlFHdUg9tTZy+Wquz/03I3g0XAs6F0RKyb3c/TkLY6c9Jfc+vgXkmMwC2rJSXEye/jTrs0abnv7iwny2sdjXP26crSC2FXP7yZRZd1/PLnZ2bJj4Z/mv9k+z3uojjINm0lik9NdQw4eSJU1Y4ZIVmqKqz+iZIw06XauJJ9a19O/c/VGmfv9GNm7ueDAumewYyGqbJJUPLOjhEXnVWzNzc2RPcsXyZY/fnOMMsFaU8m6XLPWElurgas/K3W/bJgyWrSStt1KVqom5Uxo296n9qft3Cqbp0+UKh0vdVXJ1m0bp0+QfauW6aLVNDhetfPlVhVuu08fUzauM881ylT1znZ2s/x/EDicDyv7cBpoEPX9e6wg76Ytu6WTqdysTe8OamUq/Wq74b73Zd7fa6xlO4htrZh/9EP2028mye+m8vTF57ew7qaJigy3Nj/9ymD5eewcazk8LFTGfNvLE6bWoPZ3P8+QGFMBWO/AOfP0vEr7S0014Stv72/vXuxz0g4Nfn/703T5bdYS631qweK1UlgQW+dpMPkFU237N1NNWsPRD9za2RVcnmCC0O99MU6HypP3XWIqb1e3ln8Z+6f0fuV7a7m6+QAe8skDVmhb37c+/nqCVWX5lPpV5N6bO1k/taAD1eJtUw1ZmzOIret6x8+7n42Vv5aslQvObSaP3XWhVf1aP4TO7vac7N6TaoXiNRg+aUjee+XkGYvluTfyqovrdn2Ptb+86N1Ej74wSCZO/Vv0g06/KOlzaivKFxZr4DH4xxnEvunBD+XTN263jsL5enEe1tRhfaxq5nrn2Z59qdLi1Bo+QeyB/e+UUxtWtabpa+iz7yZbr1l9rpu6ny362aXt+TeGej7stUL6uO+eFPv1OvbXBfLNsOmSZV4vV17c2mOp87yD2O+8aG5I+Pc1O9+EyQcMmmjdEXdJpxZy3eVnWq+vA2kZcu6VL3q+hPoLYrdtWVfe7dtTn0LmLFhlqp5/IWlmXpvT6sgrvXtYFbI11H16l6dcNyhYE/gHAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEiCugvv59zRl6u8I7HB8gMkxcNpHkHsbVCdWREqAz++AEr+Ltj13654tY3rV3rL8LbRTntHFhRck3/VRBbD0oLkA78YYpofq1R3cpWtqhOzQrW8Xpn16zO4+ifwwlif/zqrdKyaU3r6G9+6COZPf8fazmQnJUWdR1jCrDaRVs1bzfox2mSYYoRa86qq8nCBQcHWYVeW5qck+baTqRc1FENYg8eOVvue/abQl82m2fn/ZE4Bw4cOl0efykvLOjsdy7Hm6q0i8a/4Oyywm3vmCD2q/9REFvDwnWuvsOE4lxPK1kHUmTLrMkmNLzWvaGQtZqX3WCCyzGuUVkp++Sfnwf5VLNOqFFJml/dWUok5N/tsXb2Ivnz21GSc5gh5ZCISKuqd2zNvLCqHkDqlg2yftJIE3jOrx6s/WUbt5SEU/QnAfJPOicrU9ZNGiFp2zbrkLzAdoszTDVsExL/F0ercW+bbyp7m4rYGsSOSnCH13csmifbHRXES5tK2hXadLD25/nHBMRXj/7RBL63erpY+P8J2B9WGuw9eNC3urp9JG99Mkq+HjrVXrUCwVdfeoa1rkFhDQx/9Gpe9fpho2fLM6/94BnrDGJrleoOV7xoQtH5z+UMdm/bsdcKourkqy85Qx67+0JrPx9/PVHe+3ysZ5+64Ax+n3VZHyuUrP32OWmYunOPl2THrn3a7WlFCWJfbioTayVlu/V+4DLpdkFLa3XewtVyw/0f2Jusx99HviiRkWFWyPyiG161+j557VY5zfxshdrqB5OGZu2mFb7HmkCvVuB2/hyGM4itgXV7X/a8m68+R+656XxrVe/80jvA7GZXxh41cZ706vut3W1VMZ83/iVrfdK0RfLAMwM923RBg+RapXn+ojVW6Nu18ThZcQaxm3XsZd0lp3df+fsZjNYtassHL91sHfmDz34p3S9u4xPEjjXuU0wFdG0bzc+D2D8rYnWYf7Ta9E/mJ1E04K5f9Npf/ry16bZrO8idN5xnLetPp+hPqDjbg7d1keuvaGd1OYPYGuDWcLg2rcCuldidzfla1zsH9Q5Cbf6C2G/0uU7an9HQ2q4W+iXEbnp33j09z7e+pDz96mDRGyVoCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggEImBXi9a5Lc5/whOQPtx9eQex7fl2/5Zte6Rj9752t+fRzoFp/koLEx6qhYaGWJudWSztONKK2Prctzz8sSeUbB+DXRhT18+76kXZun2vvem4ejycIPat17SXu27saB2/Xbwy0JzVNZe1lUfu7GrtS4tdavFQZ9OMlWattGn+8BNT1PJEykUd1SD2d8N/lweecwfRnFj2sr8g9uc/TJMnXhlqD/H7eCyC2CGRUVL78rxqo86DStu5TdZN+EVyMjOc3YUu17v2Lp8x6bu3y+oRGlR1VwOv2LiOtLjmAgkND7Pm6B/x6pkLrIrYPjspQkeZ+k1MGLuVJzh9YOsmWT/ZhLq9zqFMg6ZS9tTTJCg4780o78lF1k38RVI3r7fml0yuJkkt20loVLTnmQ+YkLaG0zP27pIKrdtL6Rp1Pdt0IXXLRlk3fpjVFxIeIclnd5HoxPKuMfvWrpSNv7kDtq4BRVjRN9EzTZVYbVrB2BlMLMJ0a0gFU31dQ7NLV2ySpSs3FnWaZ5zepaPVoEuWiDJvuiutSreejcfxgv1hVdgh+gtC6x0r5cvFWh9yWnlaKwhbQevLX3BVuncGsW9/bIDMnON7Z9R7/XrKGafVtfbV9LzHrcN567nr5ew2Daw7ilp0esLnEPWajR7Uy+rX4LcGwLXZ56R3YOmdWN6tsCD2rt0pVrVp5zznHOdz2WN++uxhqV4l0fUTFZOG9LaqeS9buUmuuO0te6jn8fbrzpU7rj/XWj/NnJ8Gx51BbGcg154UZ25OmTw0L0CsHz76IWS3goLYuv3Psf2sStp6nfSLgQbnT6TmHcTWCtL3mari2rpc85Lr7+3bD+61fh7E/lI14PXbfILYTucrb+vv92++b6/u0qVDE6uSepNzH7Oeyw7/63uMhqD9tRnDn5eY6AhXRewuHZpK315XWcMvvvE1Wb1um89UO8yv1dvvefJza7u/IPbDd3SVa7u1tbZrGLzP60MC/pLrcxB0IIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDwr4CdXXEWJHTifPn2XdKgTrKzy7M8YsJceeZVzUiKVXQxKTFW/lm7VS7t+bpnTFGD2J4JRViwM0P20CMNYmtVaC3C6d0a1q0kg967x+q2Q8veY46H9cMJYrdrVV/efuEG67DtcHSgOSs7M3ionNV57U4xldHDZOHS9Vae6kTKRRHELuTVHVO+klTukFeF1zlUq0mvG/+zs6vQ5ZDIaBPqvtFn3HZTRXrHwj99+pOb1LWC2CGhoda2HHMXx8rf5shfwyb5jC1KR3zdxpLYrLUJWAdbww9s2yQbJo+W7Ix01/S4OqeYwPbpEhyaFwC3N24woe3961eLVtfWEHapKjXtTZKTlWXOYbbsMtWwtTJ2XJ2GknRaXiVYe9DB9AOycshAUxE4R7QadrkWbUUD2XbT49jw6yg5sH2z3XXYj1r5tlXzWqZId5A1V8Prw8fNld6vfF+kfbVtWU9ee/oaq5qxPeFAWobc+9QXPnex2Nu9HzUU+eBtF1hVaO1tBb0B29uPl0c7tLxkxUYZaT78CmqTZy6W9Rt3ujYnl4+X4V8+5jlvte9mKkmvXL3FNc5+U9XOgu6Mct5No1WsN27ZJc67hrQ6tL9WtVJZq3vMr/PlsRfyqvPb5/Thl+Plg4HjfaY5Q9VamVgrFGubbCoka4Xqv80be4+73nHNa9W8tnz4cl6V5Rvue1/m/b3GtV0/VPXDdffeVDnr0rzKx/Zx6EB/x68VnTW8ru2BpwfKpOmLXEHsy25+w8dSx9r71arJGta226GC2O+8eKOceXp+dfz9KWmyaNkGGTflL/nJBNidFcrt/R1Pj95BbD2230e+YIXLJ077Wx585kvrcPUOrMlDn7beD/SnLF557xfxF8Tu90R36dy+iVWt/NQOeSFr7/O94sJWVrVw7bdfJxO+f0rKJpSSgu7C07FDPnlQalVPcgWxX3z8KtFz0ObvtaD99mvZWRXe/jLrfD6tXv7jpw95/u5ycnJl3cYd1vvVV0OmytoN/v9W9DloCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggUVcAurqnjG7d/1GfaLwMfsX553meD6Zhi8maawdNmB64DDWIvNdm20SYfdqimhR3LxJWUox3E/uy7ydL/k1E+Tx1s8opzx79k5ZSc2TWfgce443CC2FoYs2f3s60j7v3y9/LLuD8l0JzV+O+flMSE0uLMQhVGcSLloghiF3I1K7Q9T0pXreUzav/6VVaI2WfDITri658q5Zq18Rmx8scvJSt1v0//0a6IHW8qYpdzVcTe+G9F7EzXc8fWrGeC2G1M4Do/JK0DNk4bL/vWrJASFatKxbbnuoLa6bt3mBD1SHMeKda+ImLLSPWueRVf7Z3n5mTLyqFfmrrfuVZIu1Tl6p7q3Dpmz8olsnXONBPqdh+PPb+wRw1ht25R23rzHDt5gRVq7dC2kRVQtEOYh9pHW1NFWwOqGuLW8LCGaxs3qCK1q5e3ApqX3vSGrDJ34Ryq6R//T589ZO1DKx9v2b5H2rSoYwVEP/v2V+k/YPShph/zbXaod8iI3+X5N4ce9vG8akLsemeKtr8Wr5Nr73nXZx92EDvTVHz2V9laJ2gg/t2+eTct9Or7rYyaOE9mj+4r4eF5NyX47NSrwxmets9JQ9gaxvZu/48gtr4uhn3+sPdTF7huh8addxB16tFPNm3Z7TPHPr/DCWLrTvRatT+joYSE5N2YYe9Y7zoaNmZOQNff3sd//egdxNZjtium63KLTk9aYfLeD1wm3S5oaf39trnwGfPekO43iD34o/ulTs0KohXCT+/ylN/Dd961poFufU+xX5MLFq2V6+59z++8N/tcL+ec0cAVxP7+w/ukbq2Kfsd7d2pldK2Qrs1fEFv7mzaqJq/07iFly5TSVVfTLy+3PTqg0Pcu1yRWEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDwErjvls7S86qzrN4Lr3/Vp0Bg88bVpVKFBM+skOAg6f3gZdb6+N8WysN9vrKWjzSIXZRsm53POdpB7EefHySaTfTXfh/5olUAVoPiV97e39+QY953OEHsL966Q5qYXJK26+55TxYsXitHmrMqKFNYEMyJkosiiF3QFdT+oGCp0/1WCQ4JcY3SSr8aGN699C9Xf2Er1S/qIRGlYn2GLf36A6tKtPeGhBqVpHn3zlKibJxn09o5i+TPb0ZJTna2p68oC1rFWsPVsTXreoanbl4v6yeNNBWs3fsqWamqlDPVrMOiS3jG6sLmWb9aYelqna+UyPgyrm2bZkyUvf8sze8zYeY6Vxm7f6t56watlL129BAJjSlhBbFDo2M847Va9qbpkyR101pP3+EuzB33khW4bH/FC7LHVCLW1vGsxlZAcefu/XJOt+cPucvBHz8gdWqUl1/G/umqoG2HOVes2mIqPL9xyH2M+eYJKV8uVgb/MlNe7P+TNbZ6lXJWOFtfN12vf8WnkvQhd/h/3miHeovyYeV9aHGxMaIVgkND8/5e9Hyvuftdq6K0c6wdxNa+Juc+7rf68o3mA/t+88Gt7eIbX7N+amDiD71Fq0Znm8rwGoT11zSorQHvZf9s8lSpts/pWAax1eTPsf2sQ9YKyN/+NN3f4VtBcz3+qb8vtaqA/5dBbD2AYFMd/9JOLaS9uWFBbzqIic6/+eKnUX/Is68P8Xucx7rTXxC7SnJZ0bvqtL3+4Qj58offZJapkh0VGS5zF66WG+//wNrmryK2fQOBvmYLqoh9SafT5NmHu1n7sL9Y2K/JTVt3S6er866vNcDxj/2lzvmTLK8/c610OLORNarf28Mco/MX7deyVlW3v7wVFMS2Z+l7zVUXtZYWp9aQapXLWjeE6DZ97g5X5r8v2uN5RAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgqAJaFPX1Z6+1hnsXjfS3Dy2M+m7fntamNz8eKV98P8VaPh6C2M5guPPYvzMFFuuZAoua2Tnr0j7Wpvq1k+XbD+61lj/+eqK89/lY5xTP8vwJL1t5nYL27Rl4DBcOJ4g9dVgfKVUyyuT7cq2Cq1ogM9CclV0Re8u2PdKxe9/DFjjec1EEsQ9xSYPDw6XOlbf4jNDg8j/DBvmtYu0z2O4wweRal10voVH54WPddDAjTVYM/swe5XqMKBktTbqdJ8mn5oend67ZJH9+N1r2bd7uGlvYSlRCOUlu10ns8LOGoneZIPm2P30DoZHxZaVCm/aiVa2dbevsaVZgPOm0M53dkrFvj6we8Z3keoXDa1x0jYSXKu0Zq8+5edZkiUpIkrja9T39Zqey11Ta1nB7dnpafv9hLGnF4YH977Tuurjnyc9dMzWIq+Hdpuc97ur3Xpkzpp+EhYVIy85PSnpGlmezhmjnjOlrvUkWFBzWwXbYVoO0rbr2Fn3jsZsd/iwoDGyPO9aPdmg5kCD2D588YFUP19BnjrmmESYUvXffAesDSdft5gxiazhWQ7Leza5urG/iTc59zNr84Su3SKtmtSQ9PUtadnnSe0qB6/Y5FWT//6iIrQdnfzAtXLLOCqgXeMCODf91ENvxVNaiflnSyspaJTslNV3aXPi095DjYt1fEFsPbPjAR6VycoL1ExZvfjzK+ikM7beD07ps/y1u37lPOpibNrR169LSc/fdJT1f91s9us8jl8vF57ewbvaww9r2vpwha2uHjn+m/dxHSpaIclXEvuLCVvLkfZdYo843Xyw2my8YRWmFBbGd+4gtHSP9n7teTm1Y1ep+9f3h8vXQqc4hLCOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUGSBmOhIk4F61soW+cvIee+ob6/u0qVDE6v7ytv6y9KVG63lYxnE/mN0XyvXNmfBKrnpwQ+9D1kmDH7K+lX6goLYs/5cYX6d/hOfebWrlxfNz2nr984w+W7YDJ8xx0NHUYPYzmu3ePkG6X7H29bhB5qzeufFG+XM0+tZmcpmHXv5paiQFGeuTZhsNkUxnflN78HHYy6KILb3VXKsR8SVkeoXXOXoyVvMzsqU5d/5/jH5DHR0hEZFSzWzr9DIKEevyI6Fs2X7/D9cfc6Vhhe0kzrtW5ri3MFWt1ZsXTJ2uiybMEuysw46h5oK3kGm7LR25YdedS0oOEQSGreQhAZN88aYPj2H9RN+lrQd23SIq4VGRkvFdudLdGJ5V//uFYulRMUqplJ2fphcw9frJ42Q1C0bXGN1pXL7CyWmQqX8fnPs+9evlkgTCnfu42DaASuEvc+EsY920587+PSN22XX7hQ5u9tzh9y93pGieWE7+OscbId5nR8Izu26rIFHDYOvWrtNLun5mmvz9Ve0kwdv6yK/Tl8k9z890LXteFqxz/Nwg9j2+em5aIXfA2kZ8vxjV1qnNmriPOnV91vPaTqD2Bs375LO17zk2aYLWtn4p88esj6wnWHZW3q0l7t7drTGvvzuL/LNT9Nc8/QN9rWnr5FQEyL+eug0mTB1obXdPqdjHcT+6p275ZT6la07hK66o78sW7nJdfytW9SWW805anvsxW9k6/a9cjSC2N4/daF/Ez2vOjuvsvgHv/hUaLcrOOuNBAV96LkO/BisFBTEdnrpl6E485rYsWu/tL88vxq+HZ52vrYS4kvJxB+ess5k2T+b5Ypb33SdVWJCaRn19ePWjRrO95K7buwot16Td820ArdW4nY2Z2V3Z1i7XNnSMu67vJsJZpovZ7f7+XL28lNXSznzvHP+WiXvfpZ3F52/ILZW7C+fGCsLl64TfY07W6WKZWTEl3k3Mnzz4zR5uYBK8s45LCOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUJDA4/dcLN0vbm1t1jDzLQ9/bPJQOT7DLzyvmTz36BVW8VPvwpuFBbE15N2i0xM++7RzYEXJttkZqNQDGdLaFFW12+hBvUQDv3tMgdF2lzxrd1uPWgxWc2tBJodZUBBbC4v6y34NHfCg1KyWZO2nc4+XZOOWXa59Hy8rRQliP3BrF7nhynaeQ77+vvdl/t9rrPVAc1bXdmsrD9/R1drHsNGz5ZnXfvDsXxe6dGgqfXvlZXXtquMnUi6KILbrcrpXyjRsKolNWrk7zVpW6n5Z+eOXPv2H6oir3VASm7WR4NBQ17BVw7+VjD0F/9El1qkqza7qJDHxeZWlD6ZnyFITwl45ZY4czMzy7CvI7De2Rn3Z+88SyTmY36/h7Ljajcx5nC7BYWGe8albN8r6iSNMFWuvMLeOCAqWSmd3tkLXnglmITszQ0LCI5xdsm/NStk0fYJolXDvVq5FW4mve4qrO8cEwIPDwh19uZKycZ21j+yMdEf/kS8Gm/D6hMFPSpm4kvLOp2NkwDeTDrlTu/y9hoY1PGy3dq3qy9sv3GCtPvfGUBk68nd7k+vxusvPlIduv0BmzFkudzw2wLVNQ7YfvHSz35C2a+AxXrE/rH6fu1L05yMKantMyHWRudNFm34wjfzqcQkODpI167fLRTe8avUP/vgBqVMjL8x/80Mfyez5/1j9ziC2dsxftFbeMj89sXDpejn/7Mby2F0XWT9poNve+mSUfP7dZF2UuNgYGTPoCYmMDLOqEn/27a/y/S8zZfuOfdKqeW3p+0R3iS0VbY294ra3PEFn+5yOdRC7TYs68l6/ntYHdYb5svDyuz/L6EnzzF0+OXLlRa3k/ls6W1XVddvppiq7VhF3Bos79egnm7bsts7P+Y99ft4/92HfvaXV4Pv2/0mGj/9TdN/6Mw36hUGbVizXa7N81WZrXb8A9XnkCutaHk7lbmvy//GfgoLYegi/j3zReo3Yh+N8DWmfvyC29g/55EGpVT3vy5D+DetPiCw1Yflzz2wkj9zZ1Xof0XH9B4wWfe1p0y8WY77pZQW0dV2vwSATeNYQ+5UXtbZC2vrFTJsziK3rP332sLkWiboov81cIh9+Od76m9IvdP3Ma1l/4kTbp+Z9623z/qXNXxB70Hv3SMO6eTe8fGb+Vj4y+9E7wvRnQfQmFL3bTlsXc8PDBnPjAw0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgUAFNJM3c/jznnyOFkN86Z2fZdofS0UD1M1MkUjN9ugvz9vtAVO4dJIpYGq3goLY335wr9SvnWwN06KImpvZtz/NylFpp52TOpIgdv/nb5CzWte3nmPE+LlWIFizPprve7X3NVIiJtLaVlAQWzdqkdLXPhghI0weq2bVJLnn5k7Sqlkta966jTuk63WvWMvH4z92EHvn7v3S++XBnkNU9zNOq2PyU+UlJjo/I/rD8Fnywls/esbpQiA5K3XVwpX2vnW/mgvcn5omXc9tZhW5DQ0NsYqLtrrgKSvndiLloghiu14ijhUTnqvc4SKJScoLwzm2yO7li2TL75OdXYUuV2x7npSqWtOMywvl2ROW//C5ZKcfsFd9HoNDQqTlDRdKxUZ1JH1/iiw21bA3zDNvWuYFaLfQqBgp06iZlK5WW/756WsTmM4LNAeHhpn+5iaI3cAdoDYBzzVjhppq2FvtXfg8WsdbxRzvvyFCnwGmQ4PZG6aMkQN+qmHr+NI160mFVuf4m+rp04raG6eNl/3r8kK6ng1HYUFDiFr9d5MpVd/p6n6F7lFD1Bqm1rtWPvpqgkyesVjataont193rhVM1R14hzqdO33q/kvl8q6ny89j58jTr+S/SekYuzKts5quc+7xsmx/WBV2PPtT0uSMi56xho00lYKTy8db4egLrn3ZE/bUqr9jvnnCstM7i840dxDph5YdxNZlDSFrsNpf03DqPU997tqkodLvPrzPqpbt2uBY+WnUH/Ls60M8PfY5Hesgth7QVeZusF7mrrCCmla8f+yFb2Ts5AXWkCMJYtsVuJ3PZX+p+frdu6VRvcqeTXotNDAcYqqJa9Pw9u3mZoI/5q30jDmeFg4VxH72oW5ySefTrMPV82rRyYTaHXfdFRTEDg8LlVGDHrd+WqSgc/V3N9ZpTWrKx6/eYvn5m7dl2x5JMhWrvYPYEeGh1t9HfFwJzzS9/nZwWzvXbtguF9/4uuf4/QWxmzaqJp+9ebtrngbudf920zvS9M40GgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACRypQp2YFGfDabZ5im/b+vLMv2j9sjKl+/Kq7+nFBQexberSXu3t2tHdnPY79dYE8+sIga9nOgR1JEFurVmu1bA392s153GnpmRIVGV5gRWwtpKmFS/01nXveVS9a4XF/24+HPjuIXdixqIlWpn7/i3E+QwPNWWmG8qdPH/YUvfTZsenQ59PsprYTKRdFENu6ZL7/aIXpqh0vlcj4sj4bV434TjJ27/TpL7gjSJLP6iQlK1VzDTmYniarfh5kBZpdG7xWQsLD5LxeN8nk/oMkbc9+19bwUnFS4YwOElUmr7Lqsu8/FVPmWqLLVZSyp54ukbHxPmHqbfNmyc6//3Ttx3slqeVZElurvivc5z1m97K/Zfv8meb4M703WeuRZcpJ1U6XHXIf6bt3yOqRJrRs/nCPZtOy9N0uaGndfaIhbP0pgaK0d168Uc48vZ5rqN6ps37zTqlhKgk7Kzu7BpmVHpeeIY/edaFoNelbH/nYtblty3rybt8bZcWqLdLtljdc246nFfvDqrBj0juN2l78jNx41VlWJWcd/+1P0+UlU+XZ2e4zd/v07H621TViwlx5st93niC2/uRE9zvflreeu06qJOf/nemb+Mw5K+SuXp967mZy7lODr/ozBdVN5eCwsA9wsO0AAEAASURBVPwPxJTUdOuNWCsSO5t9TlrhWD8cvJt9bbTfWUnb/tDxVxlaj+GT1261dnXdPe/JgsVrXbu1g87+gvca9r/KVEsuXy7OE/DXyVqt+Fnzkwt25XDtcwaO9UN66/a92u1q8ye8bP2NaTXk/qaCuN20grjepdXslOqe53nkua9l3JS/rCFqqMfhNNSbEDaY1/ptj37it/q2ve9j/di5fROrarQeR7OOvayAv31Met567TTQPGnaInngmYH2JuvRDmJv27FXzr3yRdc2raj+2jPXWnfW2Xdf6etxx679MuG3hT6vb3vyZV1ayqN3Xui6qUDD0K99MFyaNKwqerzeQWydW8aEsF9+qofr+bRfA+R6x93z5m4yXbabfTfZ5q175Pyr+9rdVpVzrbZe3gS+nUFu/Rv7auhv8u5nYz1jWUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBA4EgFNMisWbvTm9byZJOc+9Tc1CPPfy1zFqxydlvLWtyzfLlYv1m6V5++Rtqf0dBTUFKzTpp50mbnwPxVabYGOP6xq2trAdHWXXs7tohV3PXNPte7guRauHKAqcDdqlltOaV+ZXHmvjR4Pvij+6193PvUF9KhbSPpel5TV05Hs0j3PPmFLF250fVcx9vKpCG9TWappN/D0pySek011c3f/3ycbNyyy+847Qw0Z9W4fhV5pXcPq7Clc+eaR3zm1cGuyum6vbrJbJ4IuSiC2M6r6VgOjY6Ryu0vlAgNMjtabk62LB30oaOn8MWQyChJbttRor2qa+9askC2zZtpctP5QbvC95Y3IjgsXKITk0wI+zxXteuD6ekSGmFKw/upZK3HnrJhrWye9atkZ+RVzS7o+RKbtpL4eqdKkPkpAX8tKzXFVAWfIikb1/jbbPVFxJYxVcUvlNCoaL9jck2V2nUThxdYUdvvpCJ0Xn9FO6tUvb4xXH7rW7JqbcGVv/3trsWpNeTC85pLpQrx8veyDaKVlIcMeEAqmODsaZ2esMre+5vXsG4l0XL4a9Zvl4tueNU1RMPIGkrWMOdDfb5ybStuK3ZFbA2JtuzypHX6+tMDTRpWM2/k6bJ4+QZJz8gqEotWyI4tHWN9gOmb8YnUgs3faOMGVURvQVhsXmeZWQf/k8PXnwOJj82rurxj1z6f5yhVMkoa1Kkk+jMhK1dv8dleXDs0JF3D/HSIVpMu6rXRD/6KSfGiPzGi1awPpyUmlJZa5o47fb/abKpoB9rq1qxofdH5a8lacyOK/5tkAt038xBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ8BbQzEzblnVFM2BLTPZrzl+rjrgqtO4rMiLcBKL3+y3m6X0MgaxHR4XLqabI4k4TGl/xz+bDeh7NZDWokywlS0TK8n+2mEKPvrmsQI7pRJwTSM5KM391a1QwWcwsWbh0vatYZUEGx3MuiiB2AVetZKXqktSynU+IWAPMywd/WsAs/90lq9SQpBZtzb5iXAM2TB4t+9f73vHhGuRnJSIuQWJr1JX4uqf4DVz7mSI5WVnWc+1YOEcy9xUe8osz+y7XtLUEheRXHLb3qwHqvauWydY508x+Cw76hZcsLRXbnS+R5nj9tZSNa2X9pJFm09Grhq13m7z+7LWilX1vevBDmbtwtb+n9tunb6olzRv41N+XurZrQHLcd0+YP/YcaX5+L9c254q+uc4b/5L1pqB30WhFXLt9+fZdVuhWK9N+Mmii3V0sH/0FsYslBCeNAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIntABB7AIuX2ITUxG6vm9F6JRN62S9qeJ8OK1Mw2aSeGpLn9D0+kkjTEXptYezK2sfWqm67CktJDgsrEhzszMzTOXtWaYa9ho5eCClSHNiKlSWZBOiDg71fY6DaQdk49SxcmDrpkPuS6uKl295lpRIruozLudglqz/daSphn30SvFrReqv3rnb+qmDB54e6FOm3nkQbVvWk/q1K5pQ9CQT2s6xNg37/GGpVjlRvvh+irz5sQbE85odop4yc7HoTwvY7brLzzR3suyXURPn2V0yfOCjUjk5QYaNmW1K5f9g9eudGN9/dJ8VDu/co98RVbv1PNEJvEAQ+wS+eBw6AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgh4BAhieyjyF4KCQ6xq2LE16+V3/ru0acZE2fuPu2KyzyCvjgQTmi7b+DRXb1bqftn421hJ27HV1V+UlZDwCNFq04lN20hUUnkJMv/z17R6986//5R961ZZAWytZF3UFlailFTrcoXoc3m3PSuXyJbfJ0th+wsOCzfn3VLi65nK3V5t7+oVsvWPKaIh8aPRYqIjZfLQpyU8PNTa3doN2312e/tjA2TTlt2iPykwY/jzEhQUJO98OkYGfDPJGtu6RW354KWbreWFS9bJxi27pE2LOubnA6IkJTVd2l3ax1MCv+NZjeWV3j2ssZ17vGSN1ZXk8vEy/MvHrDD4ytVbZNvOfdKySU0JCQmWDwaOlw+/HG/NKc7/EMQuzlefc0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDg5BEIOIg948cnfRRGTFwgfd8b4dPv3eFv7tDRc+T1T8Z6D3Wtx5aKllFfPODqO3gwW778cYYM+O43V7+ulCwRKS8/frmcWr+ya9vKNVvl1l4DJT0jy9Vvr2iAuHzLdlKqWm27y3rMzc2R1SMGS8aena7+Q60EhYRIyUrVJCapomtY2q4dsm/NSsk5wiByUGioRJSKk6jEChJeqpTkZmdL+o5tVsA7Ky1VTBlm1/MWfSVIqnbuJqERkT5T1o4bJhokL7SZoHNs9bqScEpz19Acc4zb5s6wKnS7NhzBigagR379+CH30P2Ot2Xx8g3WmGk/95ESMZFyz5Ofy9Tf84P1F57XTJ59+HIrOG3vLPVAhtxhQtwLFudXL69ZLUmGfPKAZGVlS9uLn3G9lq65rK08dPsFVhjb3sfs+f/IzQ99ZK8W68fqVcpJLeO3LyVNZs5ZXqwtOHkEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEDhxBQIOYgcH+1Zhzs0VydV/Cmn/z7la9dj839UKO86IuAQpf/pZEpVQzjUvJytT1oweKhl7d7n6T9oVbzj7RItwje2h1qO//RzuPlw7PPKVYHNMYWEhkpF50O/OGtWrLA3qJItWtZ6zYJXfMaGhISbnnmv+8w27a8hbq2lHmerbf/61StZvLHp43++T0YkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggcFwJBBzEPq7O4igfTMnKNaV8q7MlJDzctef03Ttlw+RRkpWyz9XPCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggULwGC2F7XWytox9ZpKEktzvTaIrJryQLZvuB3ycnK8tlGBwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggUHwGC2F7XOiQiUsqd1lZKV63t2pKbmyubZ/4qe/9Z4upnBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKnwBBbK9rXqJiFUk+u7MEBQW7tuQczDJB7Emyb81KVz8rCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFD8BAhiO655eKlYqdDmXIlKSHT05i2m7dgqW2ZNlvTdO3y20YEAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACxUuAILa53lr9Orp8spRp0ERikpJ9XgG5ubmye9lC2fbndMnNyfHZTgcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFC+BYhfEDomIlIjSZSQ7M02CQsIkMj7BhK8rSlRieQmLLuH36mfu3yebpo6TtJ1b/W6nEwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKl0CxC2KXrlFXyjVrIxrILmrbtWSBbJ0zrajDGYcAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACJ7lAsQpiBwWHSGKz1hJf95QiXdbcnBxJ2bRWNk4ZI7pMQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEVKBYBbFDo2Mk6bQzpWSl6oVe/eyMdNm3dqVsmztDcrKyCh3PAAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAoPgLFKogdU76SVGjTQUKjogu8wlr5Om3nNtmzYpGkrF8j2ZnpBY5lAwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggUT4FiFcSOKpskZRu3lPDYOAkJj5SgkBAJklzJOZgt2RlpkrZ9i1UFO23bZhPAzhANZdMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFvgWIVxPY+edYRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFABAhiB6LGHAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAo1gIEsYv15efkEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCAQAYLYgagxBwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKtQBB7GJ9+Tl5BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAhEgCB2IGrMQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFiLUAQu1hffk4eAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAIRIIgdiBpzEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBYCxDELtaXn5NHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgUAECGIHosYcBE4ygW5v9zrJzojTOVoCQ+7td7R2xX4QQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQOKkECGKfVJeTk0EgMAGC2IG5FYdZBLGLw1XmHBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAIRIIgdiBpzEDjJBAhin2QX9CieDkHso4jJrhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBE4qAYLYJ9Xl5GQQCEyAIHZgbsVhFkHs4nCVOUcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFABAhiB6LGHAROMgGC2CfZBT2Kp0MQ+yhisisEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGTSoAg9kl1OTkZBAITIIgdmFtxmEUQuzhcZc4RAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgEIGgXNMCmcgcBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECguAoQxC6uV57zRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGABQhiB0zHRAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAorgIEsYvrlee8EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBgAYLYAdMxEQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKqwBB7OJ65TlvBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAhYgCB2wHRMRAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHiKkAQu7heec4bAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAIWIIgdMB0TEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKC4ChDELq5XnvNGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgYAFCGIHTMdEBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECiuAgSxi+uV57wRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIGABgtgB0zERAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIqrAEHs4nrlOW8EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCFiAIHbAdExEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeIqQBC7uF55zhsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEAhYgiB0wHRMRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoLgKEMQurlee80YAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBgAUIYgdMx0QEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQKK4CBLGL65XnvBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgYAGC2AHTMREBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEiqsAQezieuU5bwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIWIAgdsB0TEQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB4ipAELu4XnnOGwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQCFiCIHTAdExFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECguAoQxC6uV57zRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGABQhiB0zHRAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAorgIEsYvrlee8EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBgAYLYAdMxEQEEEEAAgf+xdx/wVZNrHMcfoGWUXfYSkCnIliEbUUCZ4kBFRUFQFBQFAbdXxAUiCiiouEBABUWGIIoMGQKyZW+QvaGssu77pCQkp6fltD3Flv5yP/Ykb968Sb5pb085/zxBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIKUKEMROqVee80YAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBeAsQxI43HRsigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQEoVIIidUq88540AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC8RYgiB1vOjZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZQqQBA7pV55zhsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE4i1AEDvedGyIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAilVgCB2Sr3ynDcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIxFuAIHa86dgQAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB4AqMmrFBpi/dKZt2H5Nz5y4Ed/AUMlpISGopli+LNKxUQB5oUCLRzpogdqLRMjACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKBCew8eEJe/XqRrNtxJLAN6BWQQKlC2eSNdlWlQI6MAfWPSyeC2HHRoi8CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKJINBhwExC2IngqkNqGHv4c/WDPjpB7KCTMiACCCCAAALJW2DhwoUyaNAg5yR0Wadq1apZr9WrV7deu3TpYr3yBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBImMGrGBvl4wqqEDcLWsQo82aKsPNCgRKx94rqSIHZcxeiPAAIIIIDANShgh6/t0HWgp9i1a1erK6HsQMXohwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAdAGqYUc3CXZLYlTFJogd7KvEeAgggAACCCQjgfgGsH1PMT6B7Iceesh3mERf1mrehMYTnZkdIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCMRRoP7zE+TcuQtx3IrucREICUktM/u1iMsmV+xLEPuKRHRAAAEEEEDg2hQYPHiwDBo0yO/JVatWzWrX4LLvFNM22k8D2YEEnWPbt+/+gr2s5zZixIhgD8t4CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjEW6D2s+PjvS0bBi4w54NWgXcOoCdB7ACQ6IIAAggggMC1JqDVqLUatu+kQWoNKttBbN/17uWYwtSBhLFj2r97/MSaJ4idWLKMiwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBBfAYLY8ZWL23YEsePmlWx7r1m7TgZ9PEz+nDtP7mrVUrp07iQ5c+ZItufDgSOAAAIIJB0BfyHoQMLTMZ2Bv0D2lcLO7mPQvv4qb8e0v/i0L1iwwAmeX+nY4jM+2yCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkBABgtgJ0Qt82yQXxP5m5GhZt35D4Gfg6lmh/I1y792tXS3M2gKt7rlfVqz8x16Uxzt2kF7duznLzCCAAAIIIBAfAd/QtIaS7SrY8RnP3kara2u42j3FFnh2B7ETEgJ37y+2eff+Yjuu2MZgHQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACiSVAEDuxZL3jJrkg9sPtO8mcefO9RxngUtPbG8ugD/oH2DvldDtw4KBUq13fc8KFChWUWb9N8bSxIHLx4kU5feaMhyJ9unSSKlUqTxsLCCCAAAIi/kLYI0aMCCqNO/CsA8cUsnb3i6lPMA/MvT+C2MGUZSwEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBYAgQxA6G4pXHIIh9ZaNrokfrNg/KsuXLnXOhIrZD4ZlZvGSp3PPAw562H78bJRUrlPO0sYAAAgikdAHfELZ6rFu3LlFY3KFn3YGGvTX87J7cfQhiu2WYRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEiJAgSxr85VJ4h9dZz/872sXbdePhoyVBb9vUS0cniXzp0kZ84c//lxJbUDUJ82D7bzHNYPo0dIlUoVPW0sIIAAAildwDeI7S8cHSyjhQsXigat7clfBWqC2LYOrwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIiBDEvjrfBUk+iJ0jPIcM7P9OQBq5c+eSEsWLBdSXTgj4EyCI7U+FNgQQQMAr4BvCjksFandg2l+g2runy0u++/QNfrvHjcvxXN5D3Obc+4vLecRtL/ROTIFz587Jlq3bZNPmzXLgwEFJnyGDlL2htNxQulRi7jbeY2/YuEkuXLjgbF8gfz7JlCmTs/xfzSzdeECWbjoQ5923b1w6ztuwAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggELkAQO3CrhPRM8kHs0qVKyi8/j0vIOSb6tufPn5dUqVJJ6tSp472vs2fPyjkzTob06eM9hnvDM2fOSNq0aa3jcrfHdz4yMtLaVMeMbbp48aKcOn1awkyYKb6Tbh+SJo2EhobGdwjPdnGxDUYQ+/TpM+bYQySNOQcmBBBA4FoU8A1Fr1u3LuDTTEiAuVSpywFZ3/Cze9yEBLG1+rb+p+PrfzFN7v35HktM21zL7d+MHC3r1m+I9RTTpg2VPHnySB5z49x11xWSiuXLm9+V8X/vFOvOYll5/vwF+eCjwfLxsM+i9Wp7fxvp89rL0dr/6wZ9b1Gm4k2ew3j3rT5yT+tWnrarvfDFr2vli6lr47XbQU/VlkrFc8ZrWzZCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSsLEMS+slEwelyTQeyly1fIx0O94ZriplJ2r+7doplpwLh7rxdFAy72lMGEiD98/10rxPz8C6/IkSNH7FXSulULub3xbbJ9x78yZOin8vfipaaa4lZrffFixaThLfWl3YMPSN48uZ1t/M1oCGj2n3Nk1Hc/yLLlK+XgoYNWt4wZM5qq3sXlrjtbSIumd0jmzNErHe7dt19efu0Nz7C9ejxrBa8HfDhYps+YKSdOnLDW/z1vtoSHZ5c+b78n27fvcLa5rWEDuffu1tZyTOPlz5dXhn81QkZ/P1b27Nlj9bXP8emnnnBC41qdcdxPP8uPP0+Uf1attvZdtEgRqVOrpvGoZ706O/Yzc/LkSZk4eYqMNWOsXbfeOXathl6+XFm579675Zb6df0Gm+fO+0u+GvGtM6qGn4d8OEAOHz4sw78eIWPHjXds8+bNKzeWLSOPP/aoVKlU0dlGZ958p59s27Zd9uzdJ6tWr/asK1/uRsmVMyoodHONatK+3UOe9YcOHZZx43+WNWvXyfIV/zjfD7qd7q+c+U+/b4IVLPfsnAUEEEDgPxBwB6LjGnpOSIA5tgC4e9y4HpNN6B7/SuFq9/6u1Nce/1p+fbh9J5kzb36cTlF/L993z11yf5u7nd+zcRognp179H5ZfjS/t/1NyT2Ire9nL5r/2VNac1NbYt4Y5g5iBxqq1graOrVvUlqoim1fKV4RQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIvgBB7OCb+hvxmgxiawilaau7nUCsfeJjRnwl1apWsRet1+FffiN93+3naXv+uW7SuVMHq61clRpOMFgbnn26i+QzAeWeL8ReLfGzTwZJwwb1rTF8v2jw+fGnnpYVK//xXeVZ1lD2F8M+lqo3Vfa0b9y0WRo1belp+3r4MOnz1nuycdMmT/uCOTOscFOjpq086zQs/trLL1h9/Y337dfD5esRo2Ta79M949kLGrT+/tuvJV26tNLh8S6yaPFie1W01wHvvS2tWjSL1q4NGprv1PlpJyztt5NpLFumjHz28UeSN28eT5cx34+TF1993dO2+K8/pW27Dlao27PCtdDVBMmf7fqU03JHy7ti7W931PPQ87GnhYsWy1PPdL/i8VesUEE+/vD9aMdvj8MrAgggkFwE3GFlPeZ16wKvhq39ExJg1krVur09jRgxwqla7R43PkFs9/b2+LGdm7s/QWyR+ASxbWe98WrQwH5So1pVuynRXvWpG2Ur+t+PBsPvvetO6db1yUTbf3wH1ve2V6qIrU9oKVHWe6PZCz27S8f2j8R3t1fczg5iawhbK1wHMnUdMkc0jE0QOxAt+iCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBB/gcQMYmdIFyI1bsgjqVOJ/LVmr5w4fS7agd5Zq6g0qVpIxs7eLL8t+Tfa+mul4ZoMYuvF+XvxErm3bTvPddIw7/gfRpvKgKmtdq1kXO+22z1B69KlSsqEcd9JSEiI1cc3iF23di2ZPWeuZ9yYFkZ88ZnUqlnDs1orLje/894rBnfdG2nIWqtL25O/4LRWX/YX7I5vELtK5UqyeMlSe5d+X7Vq5Plz52XMD2P9rnc3Dh86RBqYqtbuSatZP9S+o7sp1nkNak386XtPtXF/QezqJsi1YOGiWMfSlSO/+lxq1qhu9YtPEHvSL1Pl6eeev+J+7A4arJ88fqxcV6ig3cQrAgggkOwE/ssgtmLFFIB2t8cliK3h7kGDBom++k4EsX1FYl5OSBDbHjW2G7fsPgl91RvA7mrT1jPMw20fkM6PPyZ5cufytCelhfgGsXt2f1ae6Ng+0U6FIHai0TIwAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCRZIjCD2jUXCpV/HmyVzWKjn+I6djJQen86X1dsOO+1vt68udcrlk4l/bZN3v4s9j+pslAxnrtkgtl6LV9/oKyNHjfFclvfeflPuvjOqmvTrfd6Wb74d5Vk/8ccfTPXl0k6bbxDbWXFpRsPBBw8d9G22lguZwO20SeNN1eh01vLFixflsSe6yIxZs6P1v7XhLZIrRw6ZMftP2bNnj2e97uO3KT9LtqxZrXZ/QWzPBq6F+AaxXUOIVr/esnWru8nvvJ6vhttPnDgRbX3T2xvLoA/6O+1Hjx2TW5u0iGan51q/Xh05c+aM5eQ7Vr26dUyV8CGSKpW5jcJM/oLYzk7MTPFixeTw4SPR9qN9NHT/y8/jrO69X35NNm3eYh2/77nq+YeHZ7f61a9bW556opNo1ck6DZtEu1b33t1abqpSWXbt2i1/zJwVLRz/TJcn5Zkuna2x+IIAAggkR4H4Bp7tc3VvH59K0jFt724PNIjtGyq3j9F+JYhtS1z51TeIrb/PB/Z/x9nw7Llzsn//ftm8dZuMHTfe7+9l7fz7lIlyfdEiOpso0+jvx8pLr/7PM/bcGb9ZTzvxNCaxBYLYSeyCcDgIXEMCekNFTFOlYjlFK94zIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA8hQIdhC7zo355C0Trtb45tnzF2THvghTETuVFMiVUUJNgWQTkZVen/8l81ZHZWAJYsfv+yaVCRsbyvhPvkEeHUkDvleaQtKEyE/fj5IsWTI7XY8dOy4NmzT3hH00GDTzt8mya/ceadysldNXZ7p0flyee6aLpy2mIPbjHTtI2/vulYIF8lvh3SnTfpNXXu/j2VYX3NUd/5w7T9p1eNzTRytZj/rmCwnLkMFpHzL0U3l/4CBnWWeee6arOb5OVltMQWw9tzf/94pUqlhecufKZQLIhyV79qgAcaOmrWTjpk3OmO0efEBee/mFWMd7wJzfC88/J1rJ+dTp0/Jsj94y7ffpzhj2jF6fbz4fJoULX2c1TZ8xUzp27mqvtl712BbNm+m0DfhwsAz+ZJizrDN6TK++1NsJWZ8zoa2OTz4ts0w43T2NGfGVVKtaxWqKKYjd/pGHpeuTj0vWLFmsflN+/U2eeuY59zDW/LKF8zzfM4v+XiJtHvRWUv9h9AipUqmiZ9ut27bJLY2bedpeebGXPPrwg562Tk89I79P/8Npy5s3r8z541dJnTqqKruzghkEEEAgmQgEGnjWfv4m38rTGsb2napXry5dunh/H9t93OFpd5A70OPyN47d5vtKENtXJOZl3/dv7pudfLfSm5m+H/dTtEC09tP3Rfp+zr7hyndb3+WIiAjJkCHMedqJ73rf5S+/GSl93nrXafZ9f+KsiGVGbxLT90ZXc0qOQeylGw/I0k0HojF9MTUq9KnhTn8Bz/aNL98QGW1jGhBAIOgCsf3DS/smpYWfyaCTMyACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACV00gts8D43MQX3SvLyULZpPNu4/JYwNmSeS589Yw6dOmkeFmXeHcmWWTWdfuvajMJEHs+CiLJEoQO9BDWTR3luTIEe7p/vsfM6XTk95QcNcnn5AV/6zyBHy16vHk8WMlffqo6tX2IP6C2M3uuF0+GvCe3cV5HfzJpzLgQ2+A+o4mjWXwwP5Wn14vvSo/mOCRPWn455cJYyVXTm+VMc2yP9O9l0z6ZYrdVfT4pk+daC37C2LrWGNHf+OEoZ0NL83ENYhdtUoVExAfboJNaZyh/IWAdOWMXydH22/fd/rL8K++drbVGbvi5IULF6T2LY091aTr1q4lw02la/f+dJvjxyOkWet7ZMeOf3XRmh584D5549WXrHl/QWw11+vjG3Z+9/2BMuyz4ZdGiXrxrYAeaBB78dJlcs/93pDhyK8+l5o1qnvG37Nnr3z97WinTe8EebbrUxIa6i3L73RgBgEEEEjiAqVKlXKOMLbK0+5+zgYBzrgD1r6buIPYus4OS8cliO07hu8+7GV7bHvZ/ereX2zH697mWp6PSxDbdpi/YKG0bdfBXnReNYhdoXw5Z9k9s2fvPvnuh3EyecqvnhvM9KawGtWqygNt7om2rT5lo+tzPeWief+xfuNGz3sKHbthg/rOLu5s2VzuaNLIWdaZmbPnyF/mWFevWStLl6+wnvyhQWx9Aka5smXk5hrV5Obq0W8oOHv2rDzVrYe1X3vAx9q3k+pVb7IXPa8fD/tMli5b4bRVqlhBnnz8MWvZ33uwd9/qI/e0biVjf/pZfp32u+j7K9+nrqhLyeLFrTGyZcsm/d7u44wfjBmtpKvBag1UD3qqtjOk3e40xGFGx/EX0I7DEHRFIEUKxOXnzv1YrK5D5jheegOFeyKI7dZgHgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHkJxDsIPbM/i0kxFS+7j5svixYu9cDoiHswV1ry4nT5+S+vr9Z69xB7JnLd0nLmkWkcJ5MsmnXMZn01zZZuG6fZwxdaFajsNQvn18K5Mwo+46ckuWbDspXv60zuYjLNaLvqVtMqpbMJSP/2CDF8mWR2yoXlPDM6eTh92Y44fBi+bNIq5pFpbLJNGzZc1x+XbxD5v6zRy4krNZ0tOPVBvdnsH47xLExyQWx9fi7mBDML1N/jfVU/FU91g38BbH/WbJAwsLCoo133pRav6VJU0/AR4M6Kxf/ZfX1HcsdKPYd7I+Zs+SxJ7zVQOfN/F3y5s1jgkebpVHTlp5NHnmorVVN2tPoWohrEPulXs9Lh0cfdo0QNdum7SOyaPFip90dEHcazczEyVNMmLynu0mmTvrJCgOtXbde7mh5l2fd4IHvRws+2R3e6TdAPh3+pb1oVUif9VtUSN1fEHv40CHSoH5dp7894y88/fnQwXJL/Xp2Fwk0iK3V1itWq+lspzNa7fqtN16TWjdXJ2jtkWEBAQSuJQF3wDq5BrG1KndMFbvd14ogtlsj9vn4BLF1RN/f8drWqcOj0ts8kcN3Gv39WL9VtH373XVnK3nz9ZclXbqom+u0anb5m2727eZ3+dmnu1hP1NCVJ0+elDdM9ezvx/7ot6+7sZu5yUqfXOK+CUyfJlK2YlV3N+n/Tl9p3aqFp81e0KeJ6FNF7EkD4p99EnWDX2xB7Lfe7S+ff+m9+c0ew/3qfk/qbk/IvB38jC2IHWio2g6AEvxMyBVh25QsYP88BmLg7x8B9GfQHcrWcfh5DESTPggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggkXYFgB7Gn9G0qmcNCZcL8rfLe98uueOJ2EHvb3uMmgJ05Wv8+3y6WX//e4bR/9XwDKZ4/q7Nsz+w9fMqErKdbIW9t+7RbPSlTOLv8e+CEFDSBbXtq8sJkiTh9VjrecYO0u+1ysUl7/fqdR6V9/xn2YtBe/X0Gm5DBEyWIrQHXK02hoSEycdz3kiVL9Iu1b/9+adikuVXB0N84j7Z7SF55wRsatvv5hqfLl7tRxv9wucKx3c9+7dajt0yYNNletF5XLVsk586ekwpVo4eAateM3qYbrVm7Xg4eOugZR/er+/cXxP7w/fekedPbPf3dC3ENYo/44jOpVbOGewhr3jfU7q747e7sL9BsB7G1smT7Tp3d3UXDQZUqlPe02Qtz5s23Z53XTWtWSCpTXtpfEHv+7D8kT+5cTl97ZueuXVLHVOJ2T8OGfCS3NWzgNPk77phC+s1bt5FVq1c729ozei5albNypYrmvwpyU+VK0Sp92315RQABBJKbgLsSdGxB7JiCzhqCdk9aTdp3ql69unTp4r0Zye7jrmbtrkQd6HH5G8du830liO0rEvNyfIPY+w8ckOq1L/8e1j3o+755M6PujLT3OOjjYfLBR4PtxSu+Vje/h0d++Zn1+zc+QWwNPjdtdbds2br1ivuyOzS9XZ/I0c96f6JtBLGjV8q2rfy9agBUg6AEP/3p0IbAlQXsILbe/NC+celYN/C9QcI3hK3r+XmMlZCVCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQLgWAHsfs8Uk0aVMhvnfum3cdk/NwtMs1UmtYq2P4mO4it6zQEPXTiKsmfI6O0a1RKcmVNL6cjz8utvSZam95Zq6h0v7uCVbH64wmrZPGG/VLnxnzy0G0lJdRU4bY/E9XOdhBb57W69rg5m2W/qZ69YO0+uaVSAXn9oainlY+ft0V+WbhdKlyfQx5vWsaq5q3Bbw2AB3NK8kHs0qVKyi8/j0vwOf/w43jp9eIr0cbRsM/vv/zst8K1dvYNYt9/7z3S941Xo41jN3zy6XDpN2CgvWi9zp3xm5yJPCO3NG7maY/rgl3p2V8Qe/SIL2N81L3uJ65B7JjGC0YQO6ZrERePZQvnWaF7f0Hs9f8slZCQkGjDaSC/Rp1bPO0JCWJv2LhJHni4Q7TAvGcHZkGD2fffe7e0ve9eKVz4Ot/VLCOAAALJSsAdeHYHoQM9iYRuH6wgth6veyx/x08Q25+K/7b4BrF1NN+nbWib+3f50uUr5K42bbXZM+lTOfSmse3bd8jsOXM963RBq2prdW0NVbfr8Lh5o35Btm7dHu33dhVzw5Q96e/qVi2ayc8TJ8uzz/e2m51XDXjnyZ3bqlx94sQJp92ecd/IdrWC2N+O+V7GT5gk+mSWZcuX24diveYIzyFFikS998gRHi5DB3vfo3o6x2PB/iNHQ5uDnqrtjBBTu9PBzwxBbD8ocWyKPHdePhr/j/W4pSsFcXXoIxFnZMzMTVI0X2ZpXKVQHPeWfLvrI6sumkdNPdroclh5+NQ1ki40jTzYsOR/dmJTF22XrXsj5IFbikuWsLRxPo74/NzpTnxD2PqzrGMRxI7zJWADBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBJKcQLCD2KlN8dyhz9S1qlG7T/boiUj5Y9lO+cZ8Hrv/6GlnlR3EPnYyUu546RenvUjezDKyV0NrufELk6wgd3kTli5fNIes2HJQVmy+XMT4lbZVpPFNhWT1tsPSaeAsaxs7iK0Vse/r6y32N/rFW6VQrkzyw+xN8uFPK5193lQylwzsXEv0s/Vbno8KfzsrEziTYoLYq1avleat74nGVahQQflt8s+SNq3/D7t9g9jtH3lYXu79fLRx7IavR46S/735tr1ovc78bYoJAZ2WJs3v9LTHdcEODSf3ILYGhl55vU9cT9/Tf/H82ZI9e3a/FbHd4S33RsEOYuvYmzZvkXf6DbACWe59+ZvXQPa3X31uVTX3t542BBBAIDkIJDRIndDtS5Uq5TC5K3K7x3W3O51jmNEK3bqtv4kgtj8V/20JCWK/+U4/+eKrbzwDu59u0brNg9ECxoMHvi93NGnkbLN3335p1uqeaCHrf5Ys8Nxs9+U3I6XPW+862+nM5rWX33TbK3z3qe8XJ4z7TrJmyWJ1OX/+vPwydZo80937RJV2Dz4gr738gtXnagWx7WPWYypRtqK9aL327P6sPNGxvactmAsxBT9jao9t38khiD14wj8yZsbGaKdRrmi4VC6RS+6sWVRymjt2/6tp6aYD0nXwHGv3P/+vieTIEvuxbNt3XNq+PV3qmbuV+5q7loM1tX9/pqz/90iMwzUyfyC/av5Q/q+mRuaP+JPmbmz3H8L6jw/ZMqeTSW/E/IShxD7eXp//JXNX7ZExL93meXRWoPuNz8+dvxC23liRHH4eA3WhHwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIpWSDYQWzb8sYi4fKoeVJvefN5eYZ0l4vmXjBFsZ79ZJ5VzVr72kHsv9bslR6fzrc3t15/f7e5pE+bRp74cLb8s/WQZ50GtfNlD5Ps5nPcZtULi4a0t+8zha3e/t3qZwexx8zcKIN//sez7az3W0qa1Kmk/w/L5ciJM551b176bPz2lybL8ZNnPesSsuD+/Dkh49jbpjLVxS7aC/F5TUiQJ6b9aTCl1T0PyKrVq/12efbpLtL1ycf9rvMNYt9co7oVpvXb2TS+bALGo0zQ2D0tXzRfzpw5I9Vq13c3mwBRY7mzZcxVss+ciZR06S4HxCtVqCDh4dkluQexp077XZ58+lmPxRuvvSz58ubxtNkL+h119uxZE5YPtZpSmbsq6tWpLWnSpPnPg9j2Me4/cECm/vq7/Dlvvsz/a4H4q5Jp9/1n6UIJy5DBXuQVAQQQSFYCvsHlESNGiFbGDnRyB6bjU1E72EFs+7j9VccmiG3rXPk1Ie/fhn72hbz3/geenUz88QcpW6a0bDPVrhs0usOzrnu3rvLUE508bbqwZu06adrqbk+7b2A7kCD2hQsX5K333pfIyEhnrAfa3CP6lBbfqdU998uKlZff0DdsUF8++2SQ1Y0g9lrxrZTt6+deTg7Bzw9+XCHj/txsHbY+tkinbfsj5MjxqD/c9PFJg7vUltzZ/pv3eXrXrj6eKWumtJ5qz9aB+vmSWEHs+80fvjvMH8AlC2aT0JBU0fZcuXgu65FP0VZcpQaC2FHQMYWwdW1y+Hm8St8u7AYBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBZC2QWEFsN0ouU7CsS8tyUq98PglJk1rOnrsgDZ6fYHWxg9gT/9om73631L2ZTH7zDsmaMa0TxNZq228+Wk1qlsljjePpbBb8BbE/nbxavvl9vdNVx9NxrzS99OVCmbVi15W6Bbw+RQSxP//iaxOo6R8ryjRTFbt4seuj9fENYmtV4xV/zxcNA/ubfAM52mfTmhVy4cJFU6WwgmeTzp0ek+efe8bTFshCcg9iL166TO6531t99MvPhppwda1ATt/TZ8z34+TFV1/3tF3NitieHV9a0HsRtm3fLn/MnC0ffDQkWih71DdfSI1qVf1tShsCCCCQLARiCkMHcvAJCWL7hqXdQWn3uHGpiO0+5tjGd/fTeff+4hMo9x0vuS8nJIg9/MtvpO+7/TwEP48dI+VuLCsjR42RV9/o61nnrpbtWWEW7mh5l6xdd/kN9gP33Stvvv6K0y2QILbTOYCZbj16y4RJk52e7hv2UlIQWwE0dO2eNOR5rQax332shtQqm9c6Xb2bd+32IzLyj/Uye8VuqV0un7zTvrqbIsnOJ3YQe0rfppI5LOpGyqSEQBBbJLYQdlK6VhwLAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggkTOBqBLHtI6xWKrcMeKKmtahPZ9bPpOMSxH7/8ZpSvXRu0c/ht+45Liu3HJKDx09LOVN9u6oZO5AgdmpTCXu2qYit0whXQNtqMF+yhKWVYycjZfy8LbL38Cm7OcGv13wQe9u27dKgcVMP1JOPd5SvR47yBGSrVK4k3438SlKnTu3p6xvE1pUv9uwhj7Vv5+mnCzNM8LbDE0952qtWqSLfffuV1Xb/w+1lwcJFzvqyZcrIT99/KyEhl0uz2ys1vKPj6aSVGUNDQ+XWWxqYKtCpk31F7JOnTsmNlbzVU9s/8rC83Pt5+/Q9r//u3OVUm1SL3LlySbWqUY8zvxpB7OFDh0iD+nU9x6Qhr9279zht2bJnk0oVyjvL9szceX/JQ+072ovW60u9e0qHR7xBdE8HFhBAAIEkLuAOIeuhugPRVzp097ZxDTDHFgB3jxvfILYeu4axddJj0/9imtz7i+t5xDRmcm5PSBD7vQEDZeinwz2nP2fGNMmfL5+8+/5AGfaZd13tmjd7+roX5pgnU7inBvXqyvBhQ5ym+ASxjx07LkuWLZcNGzfKkSNH5djx43L8eIQcPXZMZs3+0xlbZ1JaENs3zOnBMAspIYhtn/PJM+ekUe9J1qL9mKOlmw5YIe36FfLL/iOnZOG6fbL/6GnpeW9F6zFI2nmfaV+x5aDt2p0OAABAAElEQVT1B2SOLOmlUrEcUrpQdlNJOupvkk27jlnblSiYVW4qkcvenfV63tzo+f3sTWL+jpQ29YpbN35+Z5azmj8c76h2nafvIVO1e9H6fdZ+ws2jmxqYY9I/QPWP33pmvu+lxy/ZG50y57N880Frm0wZQqWKqWJ9Q2FzXOZvkStNdkXsQIPYeh5rdxy2zjPi1FnRitlacVz36zvFpe/pyPOyzFwDPe805m+8m0qavyHMH+dXCmLrI7GWmBsJzpu/O6qWzC0Vi+W0Hofleyyrth0y4x+UHaYqel7zSKyS5hrVuCGPuR7Rb9g9YK77ss0HLNPsmdJJKVMtXI/Fvs46dq/P/5K5q/bImJduk4I5Mzq707uwdx08KYXzZDJ3fkfdAOCsdM188eta+WLqlSvR+/7cDnqqdrQbKVzDMosAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAslYIJhB7MqmQNt7HW+WIyfOyN1vTIumop9Bz+rf0hQ5Fuuz6LgGsX9/t7n12ex73y+TCfO3OuPrZ+wtbi4SUBBbN7I/q37iw9nyz9ZDzjiJOXNNB7E1tNv2kcc84eeiRYrIr5N+kjHfj41WYbHvG6/J/ffe7fH2F8TWDl8PHyZ1akWl93V50+YtotWwT5w4oYvO9FKv56XDow9by9/9ME5eeOV1Z53O3Ht3a3m7z+ueCtvnz5+X1/q8JaPGfO/01UrcC+bMkLAMGZJ9EFtPyreCpLa9/+5bcmfL5jrrTBp+0mu4avVqp61u7Vry1edDreVgB7GXr1gpd977gLMvnXm47QPy+isveNr8BcPs6p3ujmvWrpOmrbzfU/3f6SutW7Vwd2MeAQQQSFYCCxcutCpC2wcdl+CzBp0XLFhgbVq9enXp0qWLPUysr77Vqn336Q5G+66LdeB4rnTvjyC2+V3ZvpO4Q9ClS5WUX34eF5Bul2495Jepv3r6rl72t6RPn06693pRfvp5omddXBb0preJP37nbBKXILb+Dtdq3IuXeB+N4wzmZyalBbGVQMOfvpMGPfW/lBTEVoN2/f4QDU6PeuFWuS53Jvlk0ir5dvoGud2Eoqcs3O4w/dGvuaQNSWM95kgfd+Q7lb4uuwx6spZkSBciW/celwffmS5F8maWkb0aerpq0Lvr4DlSzdwRPMDcGWw/3sm37+pth6XTwFmebXXhwYYlZeT09dGC2DsPnJBH358hJ0+f82xTtkh2+ejJ2pIuNI2n3XchLkHsM2fPS7dP5loBcfc4YelD5IvuDTyB5Lj0PXoiUh7pP8MKwLvHbVWrqExbvMM6N/cfwvqPD9lMQL22qXQ+yTwSyz3lz5FRhj9X31Pde+BPK2Ts7M3ubtZ8w8oF5eX7K3sC1rNX7pYXv4j6vefeoFzRcOlrHqsVnjm91ewviP3jnM0yYNwKKWS+nwabwLSG9WOaAg1i2/10HELYMWnSjgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMC1IRDMILYWpdLPu0NMAa+f5m6R98cu9yC9+mAVaVSlkGjRrFt7RWU94lIR2w5ifzxxlYz6Y4M1dqFcmeTLHg2sgHYgFbF1o4/MZ6saGtfPje9641freLS9XNEc5jPvWjort7802Wm3GhL4xf35cwKHsjZPddFMCRnIN8ijY2kIOZCpZfOmnsfP+wvp2gFqDTvf3uIuE2re5Ayt+/l9ykTJk/tytbmYgti6UaFCBaVKpYqmQuJmT1DYGdDMzJ/9hzNeRESENGp2p+zZc7mSsvbV0E6DunWkVKkSsnPnbhnzw1inArQ9VqcOj0rv55+zFjdu2iyNmkaVT7fXjx7xpVSvepO9GO21UdNWnnNt9+AD8trLUeHiuIznG5a6o0ljGTywf7T9Lfp7ibR50Fs1fKoJwJcsXtzq+8+q1dLirjbRtmvRrKnUrFFNcufOLRs3bpJhn38pBw8d9PT7fOhguaV+PavN3zVe/89Sv1XG9+3fLzXq3OIZa9iQj+S2hg2ctkOHDstNNb3Vr3Vl61YtreMqWrSIVfl685atcuvt3tB43rx5pXPHDnJTlUqSOXNm8z2xRv7X951o1/vXSeOlRPFiOiwTAgggkGwF3EFkPYkRI0bEWkE6IScaSPDbfTwEsROiHb9tfd+/BRrEjoyMlNLlo55yYe9Z34+tXPyXtdj12R4yeYo3pG33C+S1eLFiMm3yeKdroEHs0eaGvZde/Z+zXaAzKTGI7c/GDnqmpCD22fMXpOnLv1gB3xn9W1iVo+0gthrdXed60ZBuZlPluXDuzPKPqabc2dx9q1Pv+ypJFVPt+sCx0zLSPBpJqyJrZeX3Otawqis/9sEsU1n7sBPwtjYyXwb+aMLAf26W1x++SW6tVNBvEPugGbOdCSQfMRWx72tQXJqaUPjZ8xetu4fHmz+MdXJXxD5+8qx0+nCW7NgXIY82LiW3VS4kWu37q9/WyRwTKL6tSkF57cGY/+bQ8QINYutfja+P/FumL/lX6pbPJw+ZYHjG9KFWUPqraeus8PHnz9Y3bSESl74XTIXt7p/Ol0WmArk6trutpBV2nrt6j3z000o9RGty/yHs/seH5+4qb22nVaz1vBeujRrHvh4rTKXwJwf9aQW3X3/oJiliruf6nUdk0M//WG7vPlZDaplAt05aNfvxgVHXucc9FcxjtPKY6tYnZMzMjTJ/9V6Pp28Q+0dzfQaYf7golj+LfNi5lmQzlbRjm+Lyc6d9K5lK3/oz6p7c1bLdPu4+zCOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQPIRcH8WGoyj7ta6vPX5t46lT1reYQp9nTeflxfOk1nCTLExnUbP2ChDJvxjzccliK2fi1YxTzrWz4d3HogwTzG+aH1unErM/1JJwBWxM5nPnce+2sh6CvM5c2xajCyNCY/bTybWpyT3MJ8pB3MK9ueriRLEDvSEm97eWAZ9EBUK3rNnr9Ssf6tn04YN6stnnwxy2ubNXyAPPvqYs6wzTRrdJh9/NMBp8w1i5wjPES0Y7HT2mXmxZw95rH07T+u8v8w+TYXnuEwaIvph9DeSNUsWa7O4BKft/SS1ILYe10dDhsrAQUPsQwzoVStm93v7TfMo9dRW/2AHsXXQqjXrx3iNW7VoJgPee9vat2/ozGq8whcNdPd/580r9GI1AgggkPQFfMPRiVkVulSpUh4Qf0Frgtgeoqu+4Ps7MdAg9i9Tp0mXbt09x+v+XfmGuaHpqxHfeta738t5VpiFM2ciJV26tE6zvne6qUplZzmQIPaevfukZj1v5WEdQN8D6nvNwoULWTdc6djP9XzB8zSUKwWx332rj9zTupVzPO4ZfQLIfPM+0Z7c71tPnz4jZSp6w6++Y+lNhiXKVrQ3t157dn9WnujY3tN2NRbiEgi1j6frkDlWFe32TUpL+8al7eYk9fqBCT6PM8Fnd9BWD1ADu1//vk5+mrPFqU6t7XYQWytiv2SqJLunt8cslckLtknPNuYRSjWKOKv0zmCtRq1BaLuy9vh5W6T/D8vliWZlrCrW2ln/4Gz5+lQrYG3fFeyvIvZEU9353e+Wyi0VC8gb7ao6+9Gwcs/P/xL9A9MdxP7VVIvuM3KxaOXoHndXcPq7j8t+jJOz0mfGDmJrGDmtn+rZz7YuZwWjdx86Kff0mWYFjT/tVs9Tabvv6CVWFXHbOi599fFWbd+eLlrJWu+M1iC3PY0wQfdhk6OetOP+Q9j+x4enWt4o99cvbneXiFNn5aH3/rAqa4956TbrD3M9lh37I0TvvM4XHub0/Xn+VulnHpHVtmEJ6dysrNVuX2cNd7eufb3TN+L0WXl6yFw5FXlOvjSVv9OnTSPuIPbi9ful3w/LRKujf2CqnWcOC3W2jWkmPj93vmMRxPYVYRkBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB5C1gfxYazLNoVbOoPGM+9w014Wb3pAXMPjBP/J0wf6vT/Fb76lK3XD75eZ75PNV8BuqeJva5XbKbglT6hGd90nNoSGoZ0qWOlCmc3emmxcf0s1jNEWwzT5Rua54ordOwbnWlbOFwGTpptfUkaKvR9SVfjjDp3/FmKWg+102T2qS4zaSfs0/9e4e8bT6PDvbk/vw5GGMnmSB2x85dZfqMmZ5z+uPXSVKkcGFP2xNdusm036Mujr3CXSXZN4j97NNdZPuOf2XcT5erK9rbuV8feaitvPxCTycw7F43Y+Zsebp7T09wx73ePa8h7G+GD5O8efM4zddKEPvChQvy8bDPZcCHl8Pxzkn6mdHq2x/0e1tCQy8HERIjiK3Xp8MTT/k5AhF3EPv48Qh5rU9fGT9hkt++vo3PdHlSunTuZO6uiP1x7r7bsYwAAggkVYHBgwfLoEGX/z88McLY7oC1OvgLYWu7u19MfbRfsCb3/hLjvIN1nFdrnPgEsY8eOya3NmkR7ean4UOHSIP6UU+n+OTT4dJvwEDPaaz4e75kypTJ0xboQiBB7LE//Sw9X3jZM+QH/d6RFs3uMHdYRr05t1f6nveVgtg9nn1anny8o72559X3RjCC2B6eJLFgB7H1YDRcrdMuc+fsclMhWacwE/jVQHERc6evTnYQu48JQDcwQWj3ZIeVp77V1LoL173u8ylrRCtC25Wu9XFJWm1bKyN//fwtVtdlmw5Kl8F/SrMahaV3m0pWm78g9vvjllsBca3mXLNMVJVme1+/L/1XXv/mb08Qe9DPK+W7mZtM6Lus3GyqSbsnDUev/9dUfjaPcfKtpOzuZ5+bu809bweaZ5sK2y9+scAKrz/V/EZ3F5m2ZId8O32DdLi9tDzaqLTEpe+MZTvlla8XeQLR9uD7jpyS1v+LqrLv/kPY/seHCW80sULidn99tU3sfyBwrzscccZ6nNXJ0+dknqm4rdet+c1FpNe9UTdF2BZXCq/rmHYQu40Jgn9nKmZrkPyLHvVF79YOZCKIHYgSfRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIWQL2Z6GJcda5sqY3n/dGfa68cO1e2W+KmAVjCjEB75IFs1oFs4IxZskCWa0nQf9rPt9PrMn9+XMw9pHgIPajnZ6UWbP/jNextGjWVAb2f0fmzvtLHmrvDbk83rGD9OreLdq4Gqquf9vtnva8efPK3BnTrLCNvyC2hmkn/jJF+vR9L1p4SIPTXZ98XJo39Y7p2YFZ0P1+8+1o+e6HcX4D2YUKFZRnnuosLZs3jRbc3bZtuzRo3NQz5A+jR0iVSlEf+HtWXFq4o+VdsnbdemdVh0fayUu9e1jLcRmvW4/eMmHSZGcc29xpuDSzdPkKuatNW0/z71MmyvVFi3jadGHxkqVWtcvJU6JCEb4dqlapIt27dZVqVav4rhJ/YakNq5ZFM9MNDx06LDfVjAp22QO5w152m77+OXeeVbFbj809uYPYdrsGsb8d8711Hnab/ZoxY0arEmdn8/3n7/jtfrwigAACyVXAHUi2z2HEiBGi4eSETFpxW0Pe+mpPsQWs3ccRWz97rIS+uvdHEFvEN5B8pYrYWnX6iS7PyIqV/3guhf7e/HveLFPVOp3VvnDRYrnvoUc8fYYO/lAa3RoVRvWsMAvLV6yUnbt2W816w9cNpUtJseuLOt0CCWI//VxPmWTe59mTvieb8evkaDfXnThxQvR9ontyB7G13fd9pDtc7d5Oj/vOex9wN4m7b3wrYsf0/tezo0RYsAOhOnRsgV33rrUSr07JoSK2+7h1PlvmdFLVPB6p3W2lnBC2tttB7Pcfv1mqX/rjU9u1uvStvSZa2016I/rfDNNMVeo3TFXqBxuWtKpg6zYaLNaAsV0le+BPK2Ts7M0y2NwNXLFYDu0i/oLYnT+aLSu3HBI7/Gx1vPRl/c6j0r7/DE8Q2+7v7uc7/5yplN3aVMyOabLDxz++1liyhF2uUG/31+rPOrm/T+x1vq8aYNcge1z6Dp+6Rr78dZ28aKqQ33EpMG+Pq4+wqvPceGvR/Yew/uODBumnvd3M7uq8Tvhrq7z33TJ57PYb5JFGpazHYP3w5ybLf9fB6H+o20HsK11nZweXZuwgtt1eJG9mq1q23vUdyGQb6c+chuXjM1EROz5qbIMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA0hVIzCB20j3rq39k7s+fg7H3BAexg3EQwRzDN0CjFbE1aG1Pe/ftly1bt5qy6CFSIH9+yZMnd7RqiXZff6+RkZGya/du0UDSBVOaPTw8u1xnAj9hYZcfc+1vu2uxLSIiwljskf37D0iakDSS11iqqbsC9tU+77Nnz8q+/ftFA1A65cmdK8YqnBdNsmPPnr2y49+dVl8NnefMGRWMsRr4ggACCFyjAu5Qsn2KCQlD+1ba1jGvFHb2PYaEBsHt84jp1R0Qv9KxxTTGtdQeSBD75MmT1o1ok36ZKl+PHOX3RjT3U0nU57x5b1Sjzi2eG980rD3W3IBWqmQJD+H6jRulSbM7PW09uz8rT3Rs77QFEsR++fU+MsrcYGVPOcJzyIxpkzy///X9W88XX/XcnKb9fYPY/p7Q8sWnn0j9updDkrvNex99Eof7hjkdK65BbA2eFy9TQTd1pqJFisj0qROd5as14w5zxnWfySGI3fu+SlK7bFR1aa2SrmFjn2Lp1mnHFMS+YN4zNnlxsmgV5dnvtzQhf2+l9XFzNluPa+ra6kZpU6+4NdbcVXusislaqfqBBsWlxetTrcc8jXu1kaS+tHN/QezepuL0HFN5+rNn68kN111+fJMO+veG/dLt47meIPaLXy6Q2St2W5Woi+XLau3b/nLyzDkJSxci1+fLIgVzZrSbo73aQewrVYG2z7PC9TlEq0C7J/3ZN0+FkrzhGazHScWl79g/N8vAH1dIl5Y3yn0+49rhaN2X+w9h+x8fZvRrYT3uyn0sI6evtx5n1eOeCqKP2LKXNYDfoEJ+6/gyZwiVTbuPyaeTVzsVsTX03fjFSdZ1nmWus/24K/fY7nk7iK2B8BxZ0suOfRHSygTee5jgeyATQexAlOiDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQMoSsD8LTVlnffXP1v35czD2nuKC2MFAYwwEEEAAAQSSu4C/8LSeU6CBbA0221WwfS0CCTr7BrF9x0jM5UCOLzH3nxTG9g1i6zFpYNo9aQXp2Ka77mwl/d7uE63LNyNHy+tvvuVp17HvatXSetpEiLkZTp/EMeyz4Z4+urBo7izJkSPcaQ8kiP3L1GnSpVt3Zxud0SeetGx+h5QtU0Y2b9liAti/RKvmrf18g9gfDRkqAwcN0VWe6Y4mjaVYsaKya9cemTrtN7+h9LgGsXUHjZq2ko2bNnn2Va9uHRPqrmducMsjt95S37MuMRc0FBrXqVKxnAFX0I7r2MHo/4EJ944zId93H6shtS4FsWMbN6Ygtm7z7NB5smjdPhnZu6Gnirau6zt6iUxZuF0GdaktaqLTWRNMbvryL5IvPEy631VBnhz0pxWWfrRRaWu91efcBWnw/ATRSsojezW02r+atk4+n7JGnr+3orS8uYjTV2e+/WODfDJxlSeI/c3v660w8asPVpFGVQp5+ge6EGgQe8Xmg9Z52BWkYxs/Ln2XbjogXQfPkYaVC8r/HrrJM+yqbYfk8YGzrTb3H8L2Pz4Mf66+lCqUzbPNS18tlFnLd8knz9SVckXCpf37M2X9v0ecZbvz1EXb5c1RS5wgtrb3+HS+/LVmr3zRo4Ho467ck55TxOmzVrV0DWnbQexvet4imUyw+8F3p1sh7l5tKknzGoXdm/qdJ4jtl4VGBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBFK0gP1ZaIpGuAon7/78ORi7I4gdDEXGQAABBBBAIBkKxBTGtk9FQ9m+04IFC6wmd4Vpd59Ag9z/ZRA70GN0n9e1Nu8viB2Xc3zkobbyQs/ufp+CoZWe2z7ymCxYuCguQ8rbfV6XNvfc5dkmkCD2wYOHpGqtep7tAl3wDWKvWr1Wmre+J9DNPf3iE8R+/oVXZNxP4z3j2AsaXl+5+C97kdd4CAQziP3VbyYg/csa0WrQHz5ZS0LSpLaO6O/1pkr1J3Ot+alvNbUCufahDvp5pXw3c5NULZXbCnGPeek2T2VqfxWxF5qw93Mm9K1VljWcnTtbBmu4bfuOS9u3p1vz9UxV576PVLPm7WrmuUy/z7rVk5xZ01vtZ86el1e/XiTrdx6Vfh1rSPH83lCx1enSl0CD2CdMRfDGL0yytvroqdpSuXhU6FwrSWuoeNKCbdK+cWkrhByXvsdPnpXbX5psjdu/081S44Y81rxWw378w1myadcxa9n9h7D9jw8lC2aTT56uI+lC01h97ErkuvDr280ko3G0g9gDO9eSm0rmsvodPRFphes1oO0Olo+cvsFU014lOu7QZ+pIWvPUH53s65w/R0YZ/eKtVrVsO4htX9flJqj9lAnc6zSsW12r8ra1EMMXgtgxwNCMAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQAoWsD8LTcEEV+XU3Z8/B2OHBLGDocgYCCCAAAIIJFMBDWPrNGjQoASdgVaZ1oCzvgY66b7tYHeg2wSj34gRI4IxTLIeI75B7EKFCsrzzz4jze5oEuv5R0RESI/eL8u036OCo7F2Nis11N2x/SPRugUSxNaN5v21QB404e/YphbNmlqrJ0yKCnzqgm8QW9t+/2OmdHoy+k0Ius6e2t7fRvbs2SvTZ8y0m0wV6/ry2SdRP0enT5+RMhW9lX3ffauP3NO6ldNfZ3SM25q29FthmyC2hypeC8EMYmtouruplrxkw37R0HP10rll35FTsnDtPuvY/FXdXmdCvh1MNWadyhUNN4Hhuta8/cVfEFvX9R+7XMbP3WKFsWuUziPnTdJZKzzrfvebfbqD2Np/2OTVMsJUxtbw9s1l8kqWsFCZ888eq6+Gmvt1vFlSpdKe/qdAg9i69bzVe6TnZ1E3CFQzBkVNNe/lmw/J2u2HJVvmdDLCVIfOnimdtaO49P196b/y+jd/W9vpuDmypJf5a/Zay0eOn7Fe3X8I2//4oOec1oSwbzbneeDoaSvwrp37mKB6AxNY18kO0WvfBhUKSGhIapm5YpdEmrD6SRMudwexz5lK5r2HL7CqYuv51Lkxn+WoVbJ1esVUHm98qfK4bxBb1/84Z7MMGLfCsviqewMnGK/rfCeC2L4iLCOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAgP1ZKBKJK+D+/DkYeyKIHQxFxkAAAQQQQCCZC8Q3kB2fAHYyp7omDv/RTk/KrNlRlVtjOyENXhcuVEiKFCksze+4XW6qUskEOmNJdLoG08rYk6ZMlRHfjpHFS5a61lyevbNlc3n6qc5S+LpClxtdcyNHjZFX3+jrahHZvHalZ9le0GrW/Qd+JH8vXuIJNucIzyG9nn9W7mzRTF589X/yw7if7E2kXt068uWnHzvL9swvU6fJ1yNGyaLFi+0m61XD0a+80FPuurOVPPXMc56geZNGt8nHHw2w+kVGRkrp8lU82w54721pZY7Bd9q0eYsM+niYuAPi2ocgtq9U3JcH/rRCxs7eLO+ZitA1TUD5StPQSatl5PT1MuCJmlLNVLH2nSJOnzXVkleLVl3WQLROlUvkkjtrFXVCv77bPPjudNm657j0vq+SNKte2LP6rAn9NugxQYqYMLNWv7YnbdeqzH8s2+Xsp2HlgnJ3neul84ezpUHFAtKnXVW7u1wwQW0NYv+xbKdTPVpXPtiwpDzSqJSkT5vG6etvxg5i+1b09tdX22aYUPhPJiiuoXR7usUcU+fmZSVfeJjdZL3Gpe/4eVtkwvxtolWqdSp9XXZ5oU0l6TxotrU8zVS4tif9xwcNSg94vKa8M2aps41Wsm5Vq4i0qFHE7irnL1yUT39ZLd+aatf2VLd8PmlarbBomLqVuX497q5gr5KTZ85Z4fbZK3c7/oVyZ5LOzcpK3XL5nH69v1ggc0yf71++TbRStj31Hb1EpizcboXvP+5aN8YQvB3E1u0qXaoubo8Rl1etiq5TsP+hIC7HQF8EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAiOAEHs4DheaZRgf756zQWxu/d6UQ4djvrwXjG18uAdTRpdyZX1CCCAAAIIIHBJwA5l29WqFy5caK1xV7uOa/VrcFO2wIEDB2XPvn1y7OgxSROSxgp3586dS1KnTh10GA2Ab9m6TU6dOiW5cuWS3LlyBhwe9z2YM2fOyOq160SD1Xnz5JGCBfJLmjSxh1p9xwh0WY/74KFDcvx4hJw7d07Cs2eXnDlzBLo5/a6ywMFjpyVj+tArhpwTelhadTtLWNqA93P85Fk5Yyo9h2dJJ6kDvGkivsd4OvK8HD0RaapXp5OQNLH/LMel75GIM9bPbNaMaQM+ND0OnWLb5oIJZO83VbMzZQg11y4koLG1yrZW0g5LF1j/gAa91EkD1F2HzInLJrH2DfY/FMS6M1YigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECiCBDEThTWaIMG+/PVay6IHU2MBgQQQAABBBBAAAEEEEAAAQSSmIBWxQ7GVKlYzgRV1Q7GMTAGAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggkXIAgdsINAxmBIHYgSvRBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgmQgQxL46F4og9tVxZi8IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghcFQGC2FeFWQhiXx1n9oIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMBVEaj//AQ5d+7CVdlXSt1JSEhqmdmvRVBPP9VFMwV1RAZDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgYAFOgyYKet2HAm4Px3jLlCqUDYZ/lz9uG8YyxYEsWPBYRUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKJLTBqxgb5eMKqxN5Nih7/yRZl5YEGJYJqQBA7qJwMhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQNwFqIodd7NAt0iMati6b4LYgV4B+iGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJBIAjsPnpBXv14k63YcSaQ9pMxhNYT9RruqUiBHxqADEMQOOikDIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggED+BUTM2yPSlO2XT7mNy7tyF+A2SwrcKCUktxfJlkYaVCsgDDUokmgZB7ESjZWAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQuFYFCGJfq1eW80IAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBRBMgiJ1otAyMAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAteqAEHsa/XKcl4IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkGgCBLETjZaBEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQOBaFSCIfa1eWc4LAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBJNIChB7EPHTsqBIxFy6sxZuZhoh8rACCCAAAIIIIBA/ARSmc0ypAuVnNkySXiWsPgNwlYIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAgEsgQUHsyLPn5N99RyVNmlSSM2smyZghrWtoZhFAAAEEEEAAgaQjcOJUpBw4GiHnz1+UgrmzStrQkKRzcBwJAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIJDuBBAWx9x2OEK0wmSt7pmR34hwwAggggAACCKRMgf3m/cvxk2fk+gI5UiYAZ40AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJBEYh3EPuMqYa95+AxKZw3PCgHwiAIIIAAAggggMDVEti255BkDksv4VnCrtYu2Q8CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghcYwLxDmKv375PCuTKJhkzpL3GSDgdBBBAAAEEELjWBU6cipSd+49IyetyX+unyvkhgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAIgnEO4i9fMNOqVCiQCIdFsMigAACCCCAAAKJK8B7mcT1ZXQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBK51gXgHsZeZIHZFgtjX+vcH54cAAggggMA1K8B7mWv20nJiCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghcFQGC2FeFmZ0ggAACCCCAQFITIIid1K4Ix4MAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQPISIIidvK4XR4sAAggggAACQRIgiB0kSIZBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCFChDETqEXntNGAAEEEEAgpQsQxE7p3wGcPwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCRMgiJ0wP7ZGAAEEEEAAgWQqQBA7mV44DhsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSQiQBA7iVwIDgMBBBBAAAEErq4AQeyr683eEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBC41gQIYl9rV5TzQQABBBBAAIGABAhiB8REJwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIhBgCB2DDA0I4AAAggggMC1LUAQ+9q+vpwdAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKJLUAQO7GFGR8BBBBAAAEEkqQAQewkeVk4KAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEko0AQexkc6k4UAQQQAABBBAIpgBB7GBqMhYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAilPIEkGsS9eFDkacVJOnzkr2bNklHRpQ5LdlVm/fZ+cOHVGbiiSV9KnC012xx/sAz5y/KSs27ZXcmXPLNcXyBns4RkPAQQQQACBOAsQxI4zGRsggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICASyBJBbHPnjsvy9btkJMmgC0mjG1PqVKLFCuYSwrkymY3JfnX+Ss2S+TZ81KhZAHJljksXsf759KNcuGCC8JnlNSpU0mdSsV9WpPm4vIN/8qRY6dEr2XdSiWS5kH6HNXxE6flgrkrIGumDD5rWEQAAQQQuBYECGJfC1eRc0AAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBA4L8TSDJBbA1h/7VyS1TwOJVI+rShEpImtZwyoezz5y9YQgVyZ5PihXL9d1px2HMwgtizl26Qi+bUNXCdypj4TupTo9z1vs1JcvlIxClZt3VPsqqIPWvJBuuGgDqVi0tqfxcgSUpzUAgggAACgQoQxA5Uin4IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDgTyDJBLG37joo23YfkjRpUslNNxSW9OlCneNdv22v7D5wLFlVUw5mELv6jUU8Hg4MM4kqQBA7UXkZHAEEEPjPBQhi/+eXgANAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBZCySZIPbfq7fJiVORkidHZildJG801IWrtso5Uxm7YsmCEpY+rbM+8ux5E9I+KgeORFgVtHNlz2yNkSZ1aqePPbPH9Ntv+mmV7XRpQyRbpgxyXd4cnmrTO/cdkcPHT0qhPNmt49l36JhEmmrdGg7XytQ6RZw8I3sOHpMjpl9oSBrJasbR/mlMhWp7cgexT5yOlAOHI0SrfmfRvqaydwbXOdjb+L7aFbGvFMQ+dOyk7Np/RPScbyjqtdu+55AcO3FawrNklPy5sspRU5l6x97Dkj1LmOQ2Vht37JMI457eeOTNkdVUrM7kexjW8olTZ8w+jopWts5ojj13eGbJmc3bNzY73X6bOZZsmTNIwdzZrTHd/U+ba6KmapQtc5hT+Vyvrbbrtc+SMb0UzhvuN5R+6OgJq59a6/XIlyOLZDb93dOG7fvkzNlzlpHu+6DZRqutZ8yQ1uwvt3Uttb+e56FjJ+TgkRPW5uFZw8z3SCopnC+HZA5LZ7Xpdlt3H5RjEafl3IWoMQqZ8/Ldp9WZLwgggAACSVKAIHaSvCwcFAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACyUYgyQSxV2zYKYdNoDhD+lCpVrZIQIAaCl6+/l+Ri97uWlW7RrnrrWC2vWbxmm0mQB1pLzqvGsiuWqawE6JeunaHFVzW4zh1+qzTr1bFYtZ4W3YekO17Djvt9ozus1Kp66xQr7bZQexMJrirwW33pIHum2647oph7ECD2Bpenr9ys1y8IFIgd1YrVKz70wC2no+Y/HjlUoWskPA2Ex7euuuQ5azh54s+dnq8Vcyxuactplr5dlOt3Hfy7RubnYapN+3YL+5t7P4ahNYQvnsKM20Z0oZaYWl3u9rp94deN3tavGZ7NGNdd13e7FK0QE67m/y5dKNcuHDROgbfa2Jy1uZ7pqikDQ2xvqeOHD/lbGfPlLgutxVm12Ndum67CXH74JkxiubPYfYbbm/CKwIIIIBAEhYgiJ2ELw6HhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAMhBIMkHsfYeOy5oteyyy0NA0ki9nVslv/nMHbt2epyPPilbJ1vBx1sxaKTmHqVx9Tjb/e0C0Snb6dCFS/cai1iZa4VirIWsguZgJ5mrF5YNHI6wKzbp94fzhUsRUO9bJDgfrfMawtFIgVzZJZ8K54VkzWhWX123dq6usitBaYfr4ydNWMPuszz7tILb2zZszi3U+WrVbKzFrGDhLpvQmuF1IV8c42UHsqmULS4Z0odH6aZVme3L8TFM1EyzXitvzVmwWPS49Tg0R62QHsXU+lSngrRWmtVr2vsPH5V9zbBpqz2f6l7zU3xnX9Ndx8phK01pVWwPpGuJ2VzCPzU7HjimIrceSKzyTqSoeLlrZWitN2+F6rUZdxISbNRS/dusea5/usPka07bv4HHrXIrmz2lVw95rQt96zXUqc31eU+U7szVvB7F1oWCebFa79t198Kj1faT7Kle8gFWVW7+H/jbhfT2OyiaYntpY6zXQILiG/zWorWH98iUKmG+rVLJl1wHZe+k46lQs4amybu2cLwgggAACSU6AIHaSuyQcEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACyUogyQSxVW2zCffu8Kk2rcHX8CxhVpXhzBnTO7garNawrVZO1orW9uSuDl39xiImkB1qBYc1PJw1UwbrP7uvBns1PJvFjFupdFQo2g4T+6vMrcFvDQRraLhM0Xz2MHIm8lxUKNykdm82lbhDQ9I4FbGzm2PXsK492RW1Q0JSS60Kxexmv692ENvvStOYPUsGM3ZBZ7W7qngOExz/d+8RE2RPY1UHtzu5g9jlzHGprT1t33PIBKwPmurgqaR2xeJWs33OBXJnM5W2c9ld5fDxk7Ji/U4rmFynUlTf2OxiC2JrReybXNdw2bod5pqdjnbs9jXPnDGdVC4dVbV79pINVji7YqmCnmu7ftte2X3gmKkCfrmvHcTWYHypwnmcc9EbADRwntZY6fWzp1lmbA1i16lc3Api2+1zl2+Sc+cumDFym5B9VrvZ7O+oFeIumCe7p7/TgRkEEEAAgSQlQBA7SV0ODgYBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgWQnkKSC2KqnoWatiHzQVI8+a8Ku7qlwPlO52lRH1mnxmu0ScfKMqVQd5gnD6rq1JlirVac1OKwBYvd08nSknD5z1lTPPi97THBWA79hprJx1bJFrG52mFgrJhcreDl4rCvtYHT1cibgnTZ6hWprgEtf7IrYvmFdreS9YOVWq2Jy3col3JtEm7f3p6Ftd/Vru2NuU+nZHY7Wc563YpOcP2/SwzqZ6tgaUg8z1bHtyQ5ixxQEd8LHJlytIXg76KwVtbVSuXtavWm3tVirYjEJSZPaqSbuzy62IHYhE1y+vmBOZ+h1JkS9x4Soc2TLKDcWy++0a9B5/bZ9zvXSa7lolalabaYyxS4H43X55KlI2brLGyq3g9gVTGg7mwnl29PhYyZUvmGnJ4Cu6xwLnyC2/T2iBclzh2e2vv/c49nj8ooAAgggkLQFCGIn7evD0SGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkNQFklwQ2w124eJF2bb7kOzcd9gJF2vlZK2gPGfZRqfNvY173l25etXmXSbcfcKqnuzuo/P+gthFC+SwqnDbfbXS9rzlm61wc70rBKh1GzuIXaFkAcmW+XLVaV03a/GGOAWx7creuu2VpkPHTsjKDbusbvlyZZWSJkDtnuwgdqawdFLlhqiq0u71c5ZpkPuClCuRXzKHpY86Z3cHP/NlTQg6Z7ZMThDb1043iS2I7dvfDmLnym4qj19/OWCtwfl1riD2v+b7YtOOA36OyNtUr0pU4N0OYt9cvqikDQ1xOp0ywfyF/2wNOIit3wt/r94mkWfPO2NoKFsrrpcuktdU8r48ttOBGQQQQACBJCdAEDvJXZJ4HdD4qTNl27+7pcP9LSVTRu97rngNyEbRBC5cuCC79h6Qo8eOS66c4ZI7R/ZofVJ6Q0zfhydOnpJZ85fI5u07zZN80kv7+1qkdKqgn3+kudH181HjJWuWzNK2dZOgj/9fDzj9z4WyfddeuavpLZIlU0bncGL63ho5bop50k+I3Nv8NqdvSprZtXe/jPppqtSoXE5qV6voOfVV6zbJ0lXr5eChI9KoXg25oURRz/rktHD6TKSMHv+r5M+TUxrXv9lz6MciTsiuPfvNk7pCpGC+3JIu3eUbsz0dWUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgaAIJOkgtn2GWulZg9cml23C0dmlaIGcTtA5W+YMkiVjerur9XrWBIlDTYXm7FnCrBD0yo075dDRk1aIOqOpDp3FBGbTmurOx0w1bK2GHEgQWwd2KiRfqhbt2anPwn8VxF69ebfsPxxhHU2oqaRds0Ixz5HZQez06UKk+o3RP3i2w8pVyxaWDOYDW62IrZO6+062c/5c2azwsV0p2jdYrdslRhB7/+HjsnrzHuuwYjo+vc5F8kVVUbfPLaFBbNvhaMQp2bX/iBwxr5GRUaHsNGlSSS1j7q+Cub0drwgggAACSUMgpQax5y9eKZ99+1OsF0HDor4Btlg3+I9WagDzid5vW3t/1BxzHZ/Q3X90WLHu9tTpMybQHCGZM4VJxrDLT+iIaSP7etWvWUUevrup1c1u891Gg+i5c2aXutUrS62qFcxNZql9u8R5ef3m7TL4y+8l4oR5L31puq5gXnmmw/2SPWtmuynJvy5esVaGfPW9dZyf9nvJPM3F+6SXQE5AA+n7Dhw273tDJXu2LM4mMX0fnjM3773w9mA5ePio1TeDCWIP6dvT2S7YMzEdX0z7OWKC9c+9/oH5ngmXd17sElO3JNMe08/OijUbZeBno6zj7P9qNwl3XZskc/AJOJA+Az+XLdt3yZu9OpvQbdQTm2L73mr/3BvmSUYhMuzdF//P3n3AR1F1fRw/9BJ6Cb333osC0lRQUAQRBQsoiih2ERAEUQSkKL2K2MGKCPgAKtKRJkhXeu+9hQAJzz0X77q72TQgEMhv3g/Z2Tt37s79zuwmvs9/zl7Fq8b/XU+cOi0hIaGSOVN6Gzh2Rzzhx5ny2/wl9sacob06umbx/9x8tGlDqV+rqmf7tV6J7Piu1eusWr9JhoybKLdXLitPt3rADnvJ/D9MPp74kyxavtrzMvp7QEP5d91RzdPGCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIXFuBeBPEXrhqiwlaX5IqJfMHrCi8eM02CT1/UfLmyCQFcmaW1Zv22BB1TlP1uYhf1Wd/IhfALZovWHJkSe/ZvHHnQdl36ESMg9iLzDFeuBhuqjRnl6wZfYMne804Fy5elDzZMknixIk8QfHrWRHbVcNOZDI3yUy4RCs2Z8uc1lZpdpN2QWyt4FyrQhFbmdttC71wURav3uZT9VvPy0Uz5wrF80QIvLv93OP1DmJr5W6t4K1zuSMGVcrddRDbIHbNCoUkSeKog0ynzobKir93ipibBUqaCuFZTYVwFgQQQACB+C2QUIPY85f+JZ98PcWGdDNl+O/vIu+z9UiTu6VC6WLeTfF2/aeZc2WXqRbbpkXjm6Ii9sw5f8g3U3611VjVObrFnS8NVmvVb11cmwbs3Dk8bSovh4Sc8wxXqlghefWZVvbvUk9jLFfOmvE6vjtYtPJqurRBUrJIQflzzQa5YP5mLJgvl7z1cttYjnjjug8c/YWs32j+zjVL+ycelKrlS8X6YHab66zHwDGSK3uw9OrU3mf/QNfhlu27pffQ8fa67PJCa0+I1mfHa/gkquML9DJHj5+05zeDqST9Yc9XA3WJV22RvXc0CD/+mym2WnQrE6691ZZAQeyorq2EEsQeOPpL857eKq+2e1TKFP/v5uN9pnr/BFMpulqFUj43FOl7Ud0a31VLGtevaaqGJ4vTSyWy47tWL6pVv38z1dI1hK1hbF2m/bZAJv3vd7tepkRh0TD4zt2Xb1zu8kIbKVow4jdi2c78QAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBqxKIN0HsZet3yNmQ8xKUKrkN/XoHXw8cPSl/bztgJ1q9TAEb1N5z8Lhs3nXIhnArFs8raVKnsNsvmnDusnXbTSg6TEoUMIHYjGnEBXAL5s5igtKXKzuHnDsvyzfsFK22HdOK2K6ytgatbytb0FTSuxzO3XXgmGzdfdgeS02tlm2Swde7IrbOY9HqLRIWdskE07NK2qBUssLMT5fyxXJLelMFXBcXxNZ1rRhetkguXbXLkrXbTNDmovVVZ11Wbdwtx0+FmCpjiaV6mYKeMI9Wg9YwvC5aAVpNrncQW19bg9gayPafi85zx76jkjJFMqlaKr929VwHMQ1iazVwrcJevEA2yZbpctVDvVng8k0DImUK57QV13XscG03x6Lnwdtbt7EggAACCMRPgYQexPYO9sbPM3RrHlVkYdLIZutC197nK1Cb7q/B2nUmmPj5d9Ps30cVyxSXDm1a+Nx4F9nrBGof9fkPsuyvdaLjvPBkC9tFK/F27jNMjpnXeuzBe6VejcqBdo1XbVrN+6XuAz3HVKxQPuncobXneUxXYht0nrt4hXz27TSpXb2itDY3CsT1Etvju1WC2HHteqPHDxTEjuraSuhB7MjOV4du/e3NKsNNVfrUpjp9XC9xHcTu1m+kaOhcb6LQmyn2HTws3d4faafVu/PzkiNbFru+wNx8Nd7cfKXfmDCw+8txHkCPa1fGRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA+CsSbIPaRE2dk7Za9tqKwVjhOZQK0yZImkZDQC7ays+Jp2LpSif+qOK38Z5ecPG0q/5n+KczXL2sw+qwJWGt4VoPDt5uAsC6rN+021bNDbD8dN5F5Ae1nF9M3pkFsDdkuXrPVVsXW10xtxtLAt1bJ1sW7Ove1DGInSZLIHrN9Ea8fOt9qpS8Hpl2FcO+5rN+2Tw4dPS1J1cIEx3XeLoitxuqkY6tdiKkkd0mnYdorG+OgVP8F2zWgrVWx7XlJmdxWLg85d8EeSab0qU0g+XKY+0YEsTUQ/pcJi2slap1LqhTJzfVy0XPNaCg9Z9YM9lhdID+mQWx3DtUkZfKkUixfNhu8Xm5uGjhjbhrQdr1xIHnSpHLyTIgNwau1BtNZEEAAAQTivwBB7P8qLEd2tmbOXSwaYq1oqmMXyPvfzVtnTKXkGbMX6a9CeaBhHXsj0k+/zJWM6dNK3dsry8Jlq2z14WTmbwwN8ZYsWsD8bZY0wsvozVQbt+6QlWv/Ea3oXLZEESltKjmnMTeUuUWDv27symVLyh8r1sg/W3ZIlXIl5bZKZWTuHyvk8LHjtsJpCvN3gHf/ahVKy+I/18iGzdtN5eh0cmetqhKcJZOEnAu1lUR37N5n28uVLCJaQdp/0X5r/94iK9f9Y/4OTSXar1ih/OZvqySerhp+W7R8tRQtkNcG35aa4PLWHXtsMK5yuRJSvHB+2zc8PFwmz5hjj0WrsubOESzlShWVrJkyyh3VK3jG818JFLoO1Oa936ZtO6XvsE9t0zOPNrVO3ttjsq7n+EUTXNTlgx6vSEbj5xY9X8PGf2PneFNUUjbX8Tc//SL31L3dVhPXa3rIux0lbZrUbkr20ftc6nz1XOo10rxRfdmyY7dsNudtkbm2U5rrrL65lvT6b3pPPfs3svd1qH9zT/1tvmzetsteq3lyZpOy5trJkDaN3c+96IZN22TtP1tkrwlT6nVQOH9uqVK+ZIS/+/Vv9l1798uavzeb49gjubNnNddhPp9rds4ff0Z5fO41vR9jG8TW49i5Z5895m0790r+PDns+zVf7hwRjllf58ixE/aYtRJ5ejP3QmZ+lcoWj/BZoNeauu7Ys99WWy+QN6eUL1lUsgdntocb3XtH/xtt8ozZ5nwGyV13VPOeovn8ChHnrDdUljaVk0sWKeBTPd/7M+P2yuVkycq1dp+k5jOruHGuUaW8Pcc+A8fwyeoNm0XfjzXMuFqdePmqDfaxVNGC5n1f0Y6rVZ2XrFwnZ8xnYJECeUx143I+16Z3EDtLxgzRXluRBbGjszh5+oz8Om+JZDOfkTWrlvfM8NTps/LLvMWSOWN6qXNbJU+7fj7+7/eFosdU+7aKnvaYrLhzEtW1P2vBMjl+8pT9bF+2ar2s+2er+RxKa49BPwPnFnJpdQAAQABJREFUL1kpJ0+dsb9jNHSs57WE+XfwyDG7rVC+3FLefMauWr/JvDd22d9Z+jun0Z017SHeXqmsJ6wc3TXoPSftq+dMfzeEhYWZ920eKV+6qP081H4HDx+1nzORHZ/20W/S0t9N+pmi32SQN3d2qWp+XwVnvnzTuPaJagk1/+3+XJe+9joe2quj7ao3fWhAv1a1CvLkw/d5dtf37Zt9h9vjamd+H1Q3vzdZEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDg2grEmyC2TuvU2XOyxlRZdsFmz1RNyiE4Y1pT4Tq7p0lX9H9U1CrVGsbV/wHeLenSpJTShXLaILe2abXiVf/sNmHZ/76uPXmyJJIja3rZsfeoqYiVXKqUzGd3d+HuArkyS97smdyQnkcN+Wro2QW+dUMiUxg7T3BGKZDrctUpbXMh3kDVkef+ucn+j+53VCyiXSNd5q00FZkvZ7wD9tFgtI5hQ+ybTYjdPK9mqj9rFWhddN6LVl2ukh2c2fjlz+4JYqtRChMuPnTstA0xa381KZwnq6kinlafepZzJgy/xoTktYq4muuir50tczobTr7cIhKVnatg7h2mj6z/PzsOyP7DJ20185IFc7jhZf+Rk/LP9gM+50s36hy27D4ooRfCPHPRUHZBcz5cCFv7Lfhrsw1LazVznatbdH5L1m63Qe6a5Qu7Zjl26qysM/PWKuO6FMkbbMP2eq3pdXfcXHcaALeL8UhjrqOyRXJ7rrt/t/CAAAIIIBBPBQhiRx/EnjV/qXz14wxJlzZIBphKmi5MPeaLSTas6Ko0a8Dvpe4DbFhXA88aNPZeNMD3nqnQmSL55b9RdJsGyd4eOMaGw7z7JjE3mr3T8VnJmS2rbXZj6zGkSJ5cDpmQnS71a1aVR5s1lK59R8j+Q0c8VUFdf60QGmTC03v2H7T93Y9Xnmkloz//3nwLyL835f274dGmDX1CslpptMfA0ebvAN8/xrKaoNy7b7T3zEXDuqNN5WgNXGtY94L5W9F7eajxnXJPvdvtfDU457/kyh4svTq192/2PA8Uug7U5tnh3xUNfU/5ZZ5PKE8Din2Gjpd0JrD69mvtzN8+l7/dxX9ffa7nsLfpW7RgXunyQpsIXZ7t3MfOddzAt8w3o0Q+ToQdb0BDlz6XQ4i9uzwv8xavFK1K3uL+u6Rhndt8jsadSw0Mb9+lfwNePvcvtX1EvjZBbg1Y+i/jBna33wzjfR0mNv9x8MrbH/h39Qmuu/eQfye9jl5t18rzXtPt4yZMtmF//76Vy5aQdo83MzejJhE3R/8+7vj82/V5bIPYY7/8URabGyH8l8rmpojnWzf3adabE/S4/Re9EaLz8094gv36nvlg9JcR3o96bb7UtqWUMcFpFzr1H8u9dzTU+mynPvZzavA7r3u66Xu/5wdjPefRbdCxu7/ytOTNdfm/Lb0/MzScv2vv5W9icv31RpKO7R93T22Y+qMJP0pNE9B+vPm9nvZAK598M9WGgquUL2Wry3v30ZtUsmfNbMPM3u16E8A7rz/ruSHFO4idJnXqaK+tQEHsmFicN5/J7c1nlN5AM6ZfV88h6Xvm02+n2udjB3Sz15s+WWFuyBhubsjQG16eNddhTJeYXvuvvzvYVt7XG2A0TK2LXj8Pm/eu3gjivzSoU91su9v2HTJuolSvWEbaPdY00vePHrMee0yuQfdae/YfMtfUmAjXlJq91u5Re4OEu1HF7eMe3fHptwn0GfaJvVHBbdNHvS7bP97c3qygz/W/Od8bMk4OHz1hK/jnMjdguGWduYHjgzFfmRs3SslzTzxom/XzWj+3e7z6tLlJIqfrah9/MTej6GfYvfVqSPPG9X228QQBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBK5eIF4Fsd10NPRw1IRgNfScwfyP4Vp1OLpFw7QXzX5ayVlDwoEWrYJ26myoDSBrFeirXU6bsTTQm/wajHW1xxLT/V1FbA1iVyiWx1a3VpPUptK1VtiObtE56/9IrJXF49vizq+rpn6tjk8D7XpNaoV2/8UF8mNyjfrvy3MEEEAAgRsrkNCD2LdXLitPPdIk4ElInPi/P6b6msDYJlPZ967a1aVlk7vlb1Nduv/Iz23osV+3l2wg2QUZ3WAtH2hgq0draG2CCXJrZVwN0738dEvbRW/sGjDqczuWhl4fMsGw1KlSyeyFy2X2ouW2ymffri+Yv+tS2oq2GvLWRSsRP9KkganOndP20Qrc3gFYDV97H4sGwNs91kzSBqWWiZNn2uq8Oo6Guts//qBoqForwGpITcce2beLbrZVwLu+P9I+1qtRRercXknOmqql30791Va79g5muvCu7qehZQ0J6t8OOu5SU+VW/24a9l4nO/6xE6fk94XL5OffFtiKs00b1rGhxzTm+CJbAoWuA7X57+/Ok1aK7W1C8Lr8bny//OF/dl3D7lqpObJFq7lqiNS/uqrr74K/fbp08FQudtvi0+PeA4fkrX6j7Lnu1+1FU3368nMNdL5vrjHvxftcasXyJg3qmOrAGSWLuU40YL/dVMceNPYrO1aXDq3tnYl6Derifx1q2HLBsr/kx+lzTLXgSnL/XbUkiflbUkPwLkSp4U29DjUQrAHKLydNt9ecC4jquD/NnGv/6fX5ePNGtqq0VoT/bupvtq8LlGvV4qiOT8fyX2ITxJ40fbZM+3W+pDLvySdM+FirDW/ducdcT5ePWauNP3TfnfYl3LWnTx5sVE8qlSlhb5aYYqrmb9+1z1bId8Hm7v1H25sl9POooRlDq19rJWS9/vT97CquR/XeCRTEPnHytK0CrDdcaHXnejUqm/9GTGTfAzq2eur7QkPP3p8Z+l7UKvJZMqW3FYunmjnr8sZzj9tqy7o+1ISA/zIhZH1vfzTgLW2KdHFBbO3Q9J66tjq9VhMf+9UkT5hX7eqa4zt24qQMHve1rZD8hDnX+rmji3cQW29Qiera0v7+QezYWLw35GP7Gfde5+c8N8NoqNkFoV9/9lFPJXY99/p51t4EgauaQHBMlthc+y6IreM2aVDbfGNCYUmVMqV5P2aQU6YS9khzQ42+b9q2bGKrYac216Z+K4Ieq3cQWyt3nzP/upiq0Po+1gr/umgFdf12g5heg1p9+833R9jzoyHvu2tXs+dQq9Hrt0Do+3lkny7mPl3z/3OI4vj0d+Jv5iYn/Z2ox67XpYbdv5v2m/29NuTd122b3vihn7O6uPe5fWJ+6O+iGbP/kDYt7vN8o8JL3Qfaz4Shvd7whPhdfxcO129heNncWMKCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALXViBeBrGv7RQZzVvAP4jtvY11BBBAAAEEEpJAQg9iR3WuNVyoIUNdNMTXqfdQG2Dr/kpbU4n0Wzl+8pR0felJKZw/j+3jHWR87MF7TOixim3XHxrC7tJnmA2sDevdyYar9x88Il1NoE2Dlhq49q6UPeijCbJmw2ZbYVWrlXqP7R0OdC/gH4D17q9VvDWMrYsGqV/o1t+u6zwK5M1l1/XH82++byvyDjYBOA3KahD0KxOK1Yq1LzzZwtMv1IQ633hvqA27DXm3ownypRYX3tVgpwbgNNini1YzfbZzbztv79fTaszfTPnVhPiqm1D53Z6xI1sJFLoO1Oa/vwZQdV66uKrVajDBBNLTmeNucd9d/rv4PNfqqRpQb3ZvPWl8Z02fbfpEq7FqqPKFpx6WiqWLRdgeXxom/DjThB6X+MzjjV5D7HWplci1qrJb3LnUsPHgd17zqUqtfXabSsk9TBV3V4nZ7aeP/tehts1Z9Kd8/v3PtiK6VkZ3i17/epOCVrjNHpzZNdvKyN9Pm+UTfneVx9988UkpUuDy+0132Lh1pwz5+GvJYwLjrmJ5VMfneRGvldgEsd1xeF/LOtTOPftt1WkNoboqyi64q8Hj+0wA3S1nzoZYPw3D9n/rJRuG1qrKettH+VLFPDfT6g2Qz3d9337meIdKI3vvBApiu2r+3jdNuONwnzF600QDUxXd+zPD/5roN+Iz0eC791zU+QcTTK9mwsfVK5VxwwZ8dEFs78rF2lGvC70+/CvOu8+e26uUk6dNSFcX/yC2tkV2bek2/yB2bCz+N2uhfP/zLHHfEKA3umqVbP1c1M/yejWryGPN7tGX8VRhH24+1zUEHZMlNte+C2LrjT133VEtwvADTSX19Ru3mgryj9rK6a6DfxDbtbvP+fEf9nBN9jGm16Bz1HPWuUMbz/Wqg+jnod6Y0MFUhi9ZtKAdN7Lj6zFgtOzed9An3K876Fz0G6gqlC7q+ZaBH811dujocXMTVAP7+8YObH7ot0lo5XZ9H2XJlEEuXgyTdp16R3pzgFZE18C53nykN6SwIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAtRUgiH1tPeP9aASx4/0p4gARQAABBK6TQEIPYmtw2DsE6s3+1sttPUEwbXcBVdfHP0TsHWQc27+bJ4zs+rswo6squ2j5ahk3YbKtyNymRWPXzT7O+WOFrYZ7Z62q0qppQ09IUivVDu3V0aevPvEPwLpjCdTfhUk//qCHT4jOBePeNwG1YBNUG/vVj7YarlaqrVyuhM9rDv/0O1uR9hVT3busqWjqbMqYaq2vPtPKp68LK79owsoV/g0rRxYm9dnR60mg0HWgNq9d7Gro+QvynAlQ6uKC2PZJDH9MNEHsX00QWysaN6ofeRC7Q5sWUqls8RiOen27aYi0Q9d+NmSvVXC1+rEuU3+dZytVu2vMHZU7l/6hWbc9qqCz/3Wo+0QVlnVj6k0NWmlXQ/LL/lpvK8KXN9fKS+aa0crHGkbV43ZVfN1+gR6jOr5A/WMaxNZq1K+/MyjS4+jce5gcOnLMEwr9rzJvR1vhN9Br+7fpjQsHjxyVM2dCrIWGpXXRquVavVyXyN47gYLYY76YJEtWrpXnnmguVcqXtPu7Hxq8HW6qWrsbLdxnhneY3PWdad4D35j3gt5cojeZxHZxQewnH7lfapnK3G5x4zY2QfVmJrDuFg3jaoDXHZu2X20QOzYWeoNA9/6jPN9gsGXHbuk9ZLxoGHr674skkfm2BL0W3Y0e3jclaCX0FWv+dlPxebzvrjt8gsS6MaprX7e7ILb3DTXa7pbIgs6xDWK78aK7Bkd/8YP9lgOtYl+1QvQVwCM7PndN6O9gPf8VzE0I+s0FMV1c6FpvGBlhQvC6uLbIqrS780oQO6bK9EMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgdgIEsWPnddP3PnXmnOw5dFwypkst2TJdDqPc9JNiAggggAACCFyBQEIPYtcwFVfb/ltxNSZ8A0d/YSp2bhMNfw011aBdxWzd1wUZ06UNMpWEX48w3IQfZ5iqxEulhalA29BUoP1q0gxTdXpphH7eDa5SrBtbK1trIM9/8Q/ARtXfBbH9K6K66qIuiN2lz3A5ePio/0v5PG92b11TKbqWJ4hdu3pFae0XKtewuYbOb0QQe+PWHfL+8M8ke9bM0ufNDj7HHpMn8xavlE+/nepTndl7P2fUu8vzkiM45iFC7zHiel0rdmsYXhcNV7vl8NFjsm3nXvv04w+6m1C+1mT+74aDQOdSt0cVdPa/DrV/ZEFskw+Xn2bOkV/NeyLEBLD9FxfEXmkCw8NMYLhMcRPyb+cb8vffR59HdXyB+sc0iB3dcQw1x/iXOdbnTDXgsuZYnzOV2AOFmgMdg77Pxnw5yVbW1krY/suVBrHd9dmr03O28rj3uPqaut19pkT1mbH4zzX2xoyrDWI/3/ohn5s6XBD7gYZ15P677/Ac3oZN22TAqC+uaRA7NhZ6IB3MNwdcCg+XkX27yA//+11+/m2BDdnPMNX8fzffFvDB26/a86WVz72D5FN+mSeTZ8zxzMV7xZ2HmF77uq8LYkdWcTuyoHNsg9gxvQajcvSeq1uP7Pg0xP7BmC9ly/bdrqutDl/TBPWbNKgtQalTedoDrWiFdr25yTusr/30vOnnybD33ogwxl/rNspQU0G/nLl56GVzExELAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghcWwGC2NfWk9EQQAABBBBA4CYRIIgd8yD2mbMh8kavIbYKqp7eju0fk5JFC3rOtAsyRlaNc+yXpsL0ijU2+K0BcBfY05CwBvm8F63secn8XzZTBbd44fyekLcLTXr31XX/AKw7lkD9YxrE7j10vA3JacXk/Hly+rykBt00jJ4/Tw7JmS1rvA1iTzPhyUkmRFmrWgV58uH7fOYQkyebt++SPkM/kWKF8knnDq0j7OIsr6TadoTB4qhh+CffRlqh172kVjHXaua6uIrYcR3E/nbqrzJj9h82rFyuZFEpVjCvpEmTWrbv2muqPi8WF8TetnOPqYb8sa0c/07HZ90hR/oYV0FsVxVZr/kerz4T4fXd+6XLC22kSIG80q7Te6Kh6o8GvOVzw4b/jloF/OUeA21fraxcoXRRyWre9xpE1YrVulxpEPu/Y2otRQvm83lpDcDq9gJ5c0r3V56O8jPmVghix8ZCodzntdoPG/+t6Of/hz1fFRcSb/1QY9mz/6C9uab7K22NYy7rq5XTDxw64mPtnhTKn1uSJU0qMb32db/rEcSOzTX4/vBPZePWneK+2cHNLbLHyILYrv9+Y7V81QZZs2GTbNq2yzbr761+3V4y30hx+eYQ19f7cdL02TLt1/m2QrveIOCW94Z8LFt37JG3X3tG8uXO4Zrt46/zlsjEyTPlnnq3y0ON7/TZxhMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEELh6AYLYV2/ICAgggAACCCBwEwoQxI55EFsrn2qV0eqVyogGEzWI/EGPV2wVTz31Lvys666qtK67pXPvYXLoyDF5r/NzNrzsKhW7wKnrF+jRjR0oWK394yKI/c2UX2wg9rEH75V6NSoHOixPW1Th3agqYmvIu1XThp5xIluZv/Qv+eTrKeJdwTxQm/f+GujV8KWGYbXque4b20XDly++NcDuNqjna5I+XRrPEKs3bJbBH02QyCqgezrewJVQU3VWKzProoFS/yqzGkzUGwK8r8GozqWO44LOgaqM+1+H2j+yitguYOp/Q8MsUyH7K1M93h2Tnr9n3njPhplH9X1TkiZNosPa5WJYmA2Zp0qZ0lTMLmTbojq+f3fzeYhpRWy9OaJdp942OD7KVElOnDixZ5zw8EvyQrd+9iaNUe+/KSmSJ5OeH46Vnbv3S7eXnhIN4Hov+t4/YwLYlcqUsMc/6vPvJXeOYHn3jfaeblo1uX2XPnLhwsWAQWz/986Fixfl2U59fK5HF/jV0KmGT70XF0q9s1Y18x5s4Pn8CvQZcysEsWNjoU7ufdDs3nr2Zo66t1eWx5vfaz9P9LzoTTiHjx63/0a/39VUlPfWjXo9pte+juL6RlcR+xVT4bmsqfTslthUxF7213qJ6TX4/bRZ8r/fF4p/FXN93Z179sv+g0ekiLmpImP6tPZQXBDb//jccXo/qme3fiPtNe8dbvfu49Z7DR5nK/r36dJBsgdnds3yyTdTZf6SleLOl2eDWdGx9x04LE+3ekBur1zWexPrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBwDQQIYl8DRIZAAAEEEEAAgZtPgCB2zILYi1esNRVSJ9mw5Dsd28v4r3+ShctWSdUKpaT94w/aE+/C0vokT85spiJnO09Fzxlz/pBvp/xqw6Rj+3czob1ENoj5Yrf+dt+XTYiu3L8hOg1gfvXjdFlkxr+/QW1pWOe2KEOSOoB/ANYdS6BQpaviPP7DHva13Y+3B46RXXsPeELkq03ofLAJn6dMkVx6dXpOdCxdQs9fkP4jP7OBttdNVfBC+XJ7QouBqigHCmL/vnCZfPnDdFtRO1B1YXdM7jFQ6DpQm/Y/feasrN+0TT6e+JMN9GmlZw0Bqrku583x/zxrgQlVp402YK79XUXp6hXLSLvHmmqThIeHiwbrjxw7IS0faCB33VHNtse3H/MWr5RPv51qqiHnFa3U7L+o1UvdB9pmDRenMOfaBVADnUvtqBVs9XrTZeyAbpI0yX/BaP/rUPtEF8R+4amHpWLpYtpVTpw6Le8OGifHjp/0BLG13V2b3u83bddrSK+lcqWKysttH9GmKI/PdvD7EdMgtu7mgpz+Nl9NmiGzFiyVYFPJWgPvunw39TeZPnuRZMyQTvqasGhyE87WZcXaf2yla72RY1ivjrYa8OgvfpCsmTOaKsAv2j76Y+bcxfLNT7/Y594VsSN77wQKYq8xNwsMMjcLaJX+3uYYgs1r6KLXbdf3R9j3x4vGv4Lxj+ozI1AQW8fQiuYVyxSTEkUK2HEj++HCsc+3fkgqlyvh6ebm6B/qdVWnK5YpLi882cL2d8FbdyOLNkZ2bem2p15714bmx/Trqk9NxeWYW2h/rRL9gvl8TpYsqXV6/dlHpVSxy2F/DRf/s2W7DWV7X3u6X0wWF66OybXv+kYWxB7y8deyat1G+60Kze6p63n52ASxl65cJzG9Bt250WtKK9TrNyLootdDlz7DrEnvLs9LjuAstj3Q8emNC517D5Vw88tOXd0Y+rna8d0hcvzkKXO9/jeGBqsPHDoq95lvjtDPKO2nv8f0Zgh3fu2LmR9apbx7/9H2qfcNUW6OaYJSy4C3XrLjuH14RAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBayNAEPvaODIKAggggAACCNxkAgk9iK0hu+DMmQKeteaN69tw9ImTp6Vjr8E2YKaB5FzZs0rIuVB5/Z1BtgKuf5BRA2paxVcDzMUK55cDpkKohld18a8K6kKvuk3D27lMVdxNW3faUJsemwYzM5jAcFQhSd3XPwAbVf+YBrF13M++nSZzF6+wQc4iBfKKhtjW/r3Zzjtv7uzy9qvtbCVYNw//gKqOESiIvWnbLuk77BPdbMOr1UygvalXiNBu8PoRKHTt2rzPoVaw1hCfWzSA3On5J3yqF7twsvbxDnW6ffwfdczXzLnWysQaRtdKuMv+WmcN8ufJId1feSZW1XD9x4/L5+8O+ki279oXZQVYPQ96Plo/1Fhq31Yx2iC2XttaEVgfNUxcwlzjHdq0sAb+16HOLbKw7EQTMv7VhI31/VLaBFyTmED3GnNtachSx3YVsXUM7+Cwvh8KF8hjquHuse26vefr7SRvruy6aveN7PhsB78fLoitzbmyB/ttvfxUq0lrBd2Dh49K9wGj7bWgAesCeXLKNlN5XYPjeh3qcbgAqlbrfm/Ix7Yqtm7TEO/R4yfscx31sQfvMTcCVPG5IUPD2HrNamVhvSnCfZZ4B7Eje+8ECmLr63zx/f9k9qLldqzC+fNok2zevss61apWQZ58+D7bFtVnRqAg9sjPvjch8vU+YWc7UIAf8SGIrYcVUws3hR7mXO/ed9DaaaVzd9OBu6a135OP3C+1qpZ3u8ToMTbXfnRB7Mkz5tiq9vrCBfPlkrtrV5eq5UvZb2/Qb3HwvoFE+zxvKuSfM5XyvW/E0ers7qagmFyD7vh1vGKF8lmff7bssNeUfzA9suMb9fkP9nNU3xt6I4BW0F6+aoN9T+fIlkV6d35eh7cVxzu9N9Suu5tetmzfbb/tQG+yefWZVnab94+fZs4V/aeLhvn1hpON5veqLm8893i0Nw7YjvxAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFYCxDEjjUZOyCAAAIIIIDArSCQUIPYWs1aKyZHtbRpcZ/cUb2CaPXT9Ru3SgNTmfrh++/y7LJk5VoZ88UkG7ge9M7rNpz5UvcBNqz7+IP3yjgzvgbAdNFKuY3q1xANPvovGij+Zc5iW8nTbdNwW+sWjSV71sy2yQXlAlW41g6uUu+gnq+ZSs9pPOHOQP0jC2L3/HCsDYn2N9VCs2TKYF/3kqlY+o2p5K3VRL0DzjqPVqYStFYn1WWZCWSOMsHM2rdVMoHeRrbN/VAHre79kqlYXN5ULnaLBvS0YrAGnDUA26tTe7cpwqM7X97BUdfm31mDfdmMt4bC9XiSJv2vYrP21YCrBpRTpUwpA7q/LCn+rVTsP473c60CO/zT7yTEBBbdomHB19o96qkU7trjy6NWl3717Q/t4WiINLJ5LjDX3/ivp0iBvDlNqPzpKM+lm9uyv9bLp99N83iMG9jdVn/3vw61/5w//pTPv/tZGt1ZUx68t54bwgauPzPtWu3WLYXy55Y7a1W17yvvasi6XcPJX5vw9qZtO23gU9v0ffV86+aeELa26RLZ8V3e6vvz2IlT9qYK31bfZ83urSuN76xlG7eb4LUex5Ydu+1xaFi6YN5c0uK+u0SP33vRqso6xzUbNtnwq27TmxkeaXK3DXa7vhoq18rBJ0+dsU065mPN7pXZfyy370nvyr7aIdB75+LFMGnXqbekSxskg83nkVu0wv53036VJeY9rIFxXTTMXsXc/PCI+TxzleKj+oxx3whQv2ZVebRZQzuGCyNHFoa1nf794YLYGtivVLa4Z9Ov85bIxMkzzU0YdUy14zs87X9v3m6q7n9uQ7SuIraG2rfu2ONTKTmya0sH8q+IrW0xtdC+ukyaPlum/TpfipubDfSGDrfoZ+FrPQfZp2qt5rFZ9GaDmF77b/QaYsPJI/p0Np9ZKSK8TKgJVX9s3r8aitelQZ3q5vfU3bLaVAAfbKqhV69kKvk/ermSv27vYKp86+eYdxBb22N7Df7wv99l0fLVnmtKP3f1M/eRJg083wSh40Z2fHqjgn5LhN4koDde6KLXfeWyJaWN+d3nfrdov069hsqpM2ekS4c29j029dd58uP0OfZ9pMFz/0UrbmuFb2fitjdvVF/uNb+HWRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIG4ECGLHjSujIoAAAggggEA8F0ioQey4OC2BKspq8FEr/cYkqKeBtROnz0gmU2nXVV6Ni+O80jFPnT4r5y9csMfnwptXOpbbT4Peoecv2LD09ZyzBiF1DrGZhwYCd5kw8AkTls2aOUOk1ZPd3BLCo4Z/w4xlZCHvmBhoaPLwseMSlDqVBJkK29Etrn+m9OZ94hey99/3Whyf/5juuTuOLBkz+ARP3Xb/R63qrUHa1FHMUW/c0GrFmTKkj3bMK3nv6PgaRk6bJrX/4V3Rc/WN7hxc0cDXYadrbXElh+yuoZhe+1G9hoaZ9fNZv4khNp9r/mPG5hrUfbW/fjZquD+qJarj05sh9MLUKvOBFr3W1UqD2rq8P/xTW+H63TfaS27zLRKRLVrtft+BQ/Z3cB5TMT8mny+RjUU7AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghEL0AQO3ojeiCAAAIIIIDALShAEPvandRAQexrNzojIYAAAgggkLAFNJStld8TJ04so9/vakLnCduD2SOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQnwSuOIi9atMeKVckV3yaC8eCAAIIIIAAAgjEWIC/ZWJMFW1HgtjREtEBAQQQQACBKxbYsXufvPPhR1KyaAHp2P7xKx6HHRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBA4NoLXHEQe+POg5IrawbzNbfJr/1RMSICCCCAAAIIIBCHAmdCzsueQ8elaN7gOHyVhDP0xbAw+W3+UkmfNo3cVqlMwpk4M0UAAQQQQOA6CBw8fFRWrP1HCufPbf7luQ6vyEsggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBMBa44iH305Fk5dfac5MueKaavRT8EEEAAAQQQQCBeCOzYf1RSpUguwRnTxIvj4SAQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDg5hO44iC2TnXrniOSNnUKyUqI6eY78xwxAggggAACCVTg0LHTcuzUWaphJ9Dzz7QRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEELhWAlcVxD5/4aLsPnhCkiRJJFnSp5GgVMmv1XExDgIIIIAAAgggcE0FzoScl8MnTktY2CXJHZxekidLek3HZzAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIWAJXFcR2VEdPnpXDx09LSOgFueQaeUQAAQQQQAABBOKJQCJzHKlSJJMsGdJIpnSp48lRcRgIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBwMwtccRC75quTA877yy71A7bTiAACCCCAAAIIxDeB/NnSxrdD4ngQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDgJhEgiH2TnCgOEwEEEEAAAQSuvQBB7GtvyogIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQUAYLYCeVMM08EEEAAAQQQiCBAEDsCCQ0IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQQwGC2DGEohsCCCCAAAII3HoCBLFvvXPKjBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQuF4CBLGvlzSvgwACCCCAAALxToAgdrw7JRwQAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAI3jQBB7JvmVHGgCCCAAAIIIHCtBQhiX2tRxkMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIOEIEMROOOeamSKAAAIIIICAnwBBbD8QniKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECMBQhix5iKjggggAACCCBwqwkQxL7VzijzQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDg+gkQxL5+1rwSAggggAAC8VJgw85jsmHncSmRN4P5lzFeHmNcHRRB7LiSZVwEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBG59AYLYt/45ZoYIIIAAAghEKjBpwTaZtGCrZ3uzmgWlWc0Cnue3+gpB7Fv9DDM/BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTiTuCGBLF/XrJDFm84IDsPnZGwsPBYzS5JksSSN2uQVC+RTRpVyxerfemMAAIIIIAAAv8JeIewtRq2VsXWJSGFsQli/3c9sIYAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKxE7iuQeyDx0Nk2OQ1sm3/qdgdZSS9C2RPKy8+UEaCM6SKpAfNCCCAAAIIIBBIwDuE7YLX3m3dWlWUEnkzBtr1lmojiH1LnU4mgwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAdRW4rkHs7p8uvWYhbKekYexebaq6pzwigAACCCCAQDQC3oFrF8J2u/Se8KetjO3f7rZfyeOx06GyZe9JuXTpkt09SeJEUtyEvFOnSHolw13TfQhiX1NOBkMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIEEJXLcg9s9LdsjE2ZvjBLdl3cLSqFq+OBn7Zhl0456Tsnb7cTlyMlTqlM0mRXKli7NDX7DuoOw9clYaVc0tQSlvfIguzibKwAgggMAtKOAdwtbpeVe+3rDzmPSesMLO+loFsXcfPiM9Plsm5y+E+WhmTpdS+jxV7Yb/HiGI7XNaeIIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKxELhuQey4qIbt5hlfq2Kv2HxUBk1a7w7T85g9UyrJmTmV1CgZLJWLZpbEiRJ5tl3Jiv/rPNuoqNQsFXwlQ8Vonw/NnFaauQ1sV1myZUhp9zlnAnanzlyQoFRJ40WF0xhNhE4IIIBAAhPwD2G76WsYWxcXwi6RN4MJaFdym6/qcdS0dbJx9wl554nKkthUwtbl7LmL0mncYmlxRyG5t2reqxr/ancmiH21guyPAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDCFbhuQezWA2ZLWFh4nEgnSZJYPnujbpyMfTWDLtt4RIZO3iApkyeR3FlS26FOhlyQg8fOeYatYQLT7e4p4gmneTbEYmWIeY3l5rVa1S0gd1bIIcmSJo7F3rHvGiiIPXf1ARk3Y5M0r5VPmtyWJ/aDsgcCCCCAQJwKeIewXbXr3hP+lA07j/u87tWGsOev3Sd7TBVst8xYvkvyZE0jpfJldE328Y/1ByT80iWpUSq7p71U/kxSxvy7ngtB7OupzWshgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIHBrCVy3IPZj78+KU7kvu9SP0/GvZHAXxL63ai5pWaeAZ4jjp8/Lpr2nZOz/Nsq582FSr3x2aXNXYbnSwtivf7Tchrs/euU2G/r2vFAcrRDEjiNYhkUAAQTiSCBQCNu9lHcY+2pD2DrmuOkbZMXmw3LyzHkJzpAq2t9t5y+Gy7FToZI5XUppUDnPda+QTRDbXQk8IoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQGwFbqogdq4sQVK5SFb56Y/tEeZ5MwWx3cFv239aenz+l336arOSUrGwbxVQDWyv33lC/tl9wlTUDpLiedLZRxfYXm+qmG4/cEZ+XLjTBrpbmorYulQuktmE31La9ZDQi6KB8D1HzsqFsEuSN2tqKZU3g2T9d7t2OnPuosxdc0CymbZKZl/vZefBM7J2x3FTyTSD5AsOspu8g9hZ06eQGcv22GC5VuUunT+DlCmQUbKkSyFVi2XxHop1BBBAAIEbIBBVCHvDzmPSe8IKe1TXIoTtprdo/X4ZOWWdjH+9jiRPlsQ1B3zcsu+kvP3ZMunbtpqtnB2wUxw2EsSOQ1yGRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDgFhe4aYLYGsIe8Mxtkjc4jfT66k+ZuXyXz6m5GYPYOoGZf+6VL2dtlfuq55YWd+T3zGnF5qMyaNJ6z3O30rBKLnn038D1hNnbZLoJQfsvrz1YUioUyiQ7D52Rgd+vM5VGz/t0SZk8iXRsXkqK5U5n2/cfC5E3PvpTKhbJJK82LenT9/dV++STmVukzd2FpH75HHabdxA7Y1AyaTvoD5999EnJvOnlzUfKRGinAQEEEEDg+gnciBC2zs47iJ00SWIJu3RJkpnHi2HhktjcTZQ4cSI5fyFMkiVNIlv3E8S+flcEr4QAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALXUuCmCGJ7h7Dd5NsPmSdrtx91T+VmDWJv3ntK3vlylZQwweWu/waXtx84Ld0/u1wpu33jolI8d3rRsPT4XzbLwWPn5KkGhaVuuexyzoTYQs+HyZufrJBTZy/K8A5VrUdQymSSNEki6fv1GltR+x4T3q5bLpuEXxJZuO6gTF282wTag6R3mwq2/9UEsbWK9smzF2SBGXeiCYY3qpZb7qmc07x+YglKmdRzflhBAAEEELi+AjcqhK2z9A5ifzd/q2jl7ffaVJV3zY1UebIGyZN3F5cn+v8uT99TQvR3PBWxr++1washgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAtRG44UHstKmTmRDxhUhnEyiE/danS2XOqr0++9ysQezQC+Hy9KBFdi6fvVHDVgodN2OTzF19QDrcV0yql8jqmacLTHuHqHXjS6OW2qrXX3Sq6emrK+t2HLfPS+bNIKYAqV3CTVXSF0YsscHtMS/fJqlTJLEh7yutiK1BbF30ePW4m9fKJ01uy2Pb+IEAAgggcGMEbmQIW2fsHcSeOGez/L3ruPR9qpr0+GyZ/WYLDWA/9v4saWse9ZsuCGLfmOuEV0UAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGrE7ihQezWdxWToqbac7dPlgacRUxD2LrzzRrEPm+qWrcd9Iedvwtiu2B191ZlJWXyJD423T5daZ9//OptkjzZ5W2uv38Q2+0YbkphHzkVKmdDwyQk9KL0nrjGbhrUvopkSZeCILaD4hEBBBC4BQRudAhbCQli3wIXElNAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBaAVuWBBbQ9jP3FvCHuDc1XsjhLEDhrBNYHuO6RtouVmD2Nv2n5Yen/8lxXKnk7dM8Pr8xXBp++HlCtmB5unaerepYKqIBtmnkQWxD58Mlc9/2yIbdp6Qc+fD3K6eR4LYHgpWEEAAgVtCID6EsBWSIPYtcTkxCQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIhG4IYFsZ9sUFzaNizuOTzvMHZsQ9g6yM0axJ711z759Jct0rhabnm4dn4Jv3RJWg9YaF1eaXo5qO5BMisaqNYq2SXyZpDUKSKviK39Xhy51PYvmTe9lCuYUbJmSGX3ef+btXZI/yB2hcKZ5LVmJb1fTtzxtbm7kNQvn8Nu+3DSelm5+agMbFdZsmVIadvmrj4g42Zskua18kmT2/L4jMETBBBAAIG4F4gvIWydKUHsuD/fvAICCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjceIEbFsTWqQcKY4+cuk4GPHObqfacxqPzVhSVsF2nmzGIvfvwWXnny1U2LP2yCV1XLpLZTqfv12tkvali7YLSbo6RPQaqiL162zEZ8N06KZ0/g3RuUdqzq8l5S4cRi+XU2Yue8Y+eCpWXRy2TtKmTysgXqnv66srXc7fLz0t2S0yD2M1q5pWmt+f1GYMnCCCAAAJxKxCfQtg6U4LYcXu+GR0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTih8ANDWIrgX8Y+2zoRVO1OalHJyYhbO18MwWxz5y7KJv2nJSx0zfaQPQdZbKZ6uCFJXGiRHbePy7aKZMW7JSypoq1VqhOkvhy+6ET52Tg9+tsnz5PVvS0Bwpi/7XlqHzww3opnDOtvP1YObuP/pi35oB8NH2Tfe6C3hrObjfkDxsIf69NBckXHGS3Hzt9XjqN+9O2RxfEXrj+oIyetlHKF8oorz9Yyu7PDwQQQACB6yPggtj6bQndWlXyvOiGncek94QV9rn/Nk+nOFjxDmL/b9lO2bj7hHRqUV4GT1ot+q0XD91RSF4ZtVAerVdEMqVLKW9/tkz6tq0mebL+dxNWHBxWwCHzZ0sbsJ1GBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA6gRsexNYD9A9ju4OOaQhb+8fnILZWms5jgme6nDYh7J0Hz9h1/VG1WGbpcH9xTwhb2y5cDJcBJnC9wVTFDs6YUkrnyyAXwy7ZELVuf7hOfmlcNbeu2iVQEDvEBNrbDVlst2sYu2judKIVuFdvPSYpkyex4WoXxNZO42ZskrmrD9j+FYtkkgxByU1F00P2+bnzYdFWxN5+4LR0/+wv219fq0rRzNKwci77nB8IIIAAAnErECiIfaNC2DpT7yB28mRJopz8ln0nCWJHKcRGBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQiK8C8SKIrTgfvVpHtFqnW76Zu1mGTV7rnkb7GD+D2Idl6OS/Ixy7BrPzBaeRGqWCpXrxrJI0yeWK194dtWr2t/O2y0pT2frYqfN2U8a0yaV5zXyiFbS9l9c/Wi4Hj52TLzrV9G62ge8RU/+WvUdCbLsGsNs2KCzz1x20gewhz1WRTGlT2G0a/tYwtgtfa2Ot0sGSL1sa+XLWVnny7sJSr3x223fQj+tlxSZTcbtdJQnOkMq26Y8Zy/fIlMW7bJXvknnTy5uPlPFsYwUBBBBAIO4EvIPYJfJmsi80acFW+3g9K2G7Ga7fcVT6TFwpg5+rIVnSp3TNAR9XbDokg39cIyNeqCVpUycL2CcuG6mIHZe6jI0AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwK0tcN2C2K0HzJawsPAoNYd2qCkVC2eRib9vkhFT10XZ13tjkiSJ5bM36no33VLrx06fl2QmrJ0m1ZUF1DTUHXohzFa5Tpw4YujbGyv80iU5aoLf6UwYLnnSxN6bYrSu+1+4eMmGy5NE81oxGpBOCCCAAALRCnhXv/bufCNC2Pr64eGX5PWxf8ih45dvBPI+pkDrFYtkkdceLBdoU5y3EcSOc2JeAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBW1bgugWxu3+6VLbtPxUt5CtNy9jKmNF29OpQIHta6dWmqlcLqwgggAACCCQsAa2KvWHnUZ9Jd2tVyef59Xxy3twAtGzjITlvvnEhqiV9UHIpXyizJE4U9Y1CUY1xNdsIYl+NHvsigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJCwBa5bEPvnJTtk4uzNcaLdsm5haVQtX5yMzaAIIIAAAgggcOsKEMS+dc8tM0MAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIK4FrlsQWycS06rYsZk01bBjo0VfBBBAAAEEEPAWIIjtrcE6AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIxEbgugaxDx4PkWGT18i2/adic4yR9tUQ9osPlJHgDKki7cMGBBBAAAEEEEAgMgGC2JHJ0B6fBc6ePSuJEieWVClTXpfDPHr0mKRPn16SJEl8XV6PF0EAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBG4WgesaxHYoPy/ZIYs3HJCdh85IWFi4a47RowZA8mYNkuolskmjavlitA+dEEAAAQQQQACBQAIEsQOpxP+2LVu3SVh4WIQDTZUylQRnzSIpUqSIsO1WaVi1eo00bdFKgoKC5LsJn0vxYkXjdGpffPW1vN2rt+TJk1t+mTb5lraNU0gGRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFbUuCGBLFvSUkmhQACCCCAAAI3nQBB7OhP2ZKly6Ra1SrRd7yOPQoWLxPlq2XPnl1atmguDz3YVLJnC46y7822ceSYcTJw0BB72D3f6ipPPNbyqqewY8dOuXDxogmxZ5V06dL6jPfEU+1kwaI/bNvUSd9JqZLFfbbzBAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSMgCBLET8tln7ggggAACCCRwAYLYUV8ArZ54ShabIPbEz8fHqzB2dEFsNyutGj3kg35Sr05t13TTP+7Zu1c6dnlLkiVNKgPe7y3ZgrNe9ZzKVKouZ86ckV49u8ujj7TwGW/hosXSu98AKVumtPTt1VMSJUrks50nCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAQhYgiJ2Qzz5zRwABBBBAIIELEMSO/ALQStgtTRDbLfEpjO2C2ANNELnZA/fbQwwPD5fNW7fKihWrbHh8yrSf3aHLF+M/khq3V/c8Z8VXIKogtm/PW/PZ8dOh8vWcLVIgR1ppUCmPneTuw2fk4xkbpHbZnFLH/GNBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQCCRDEDqRCGwIIIIAAAggkCAGC2FGf5iHDR8mQ4SM9neJLGDtQENtzkP+ufDnxG+nxznv2WeZMmWX2L9MkTZo0/t3s80uXLsnx48clVarUkjJlioB9/Bt1n8NHjkimjBklSZIk/psDPj8bEiIhZ0Mkc+ZMAbdH1njMHJsuGTNkiKxLwPawsHA5cvSIZEifXpInTx6wjzZeiyC2vtaJEyckU6aMkb5OZBtOnDxpjy9VypSRdQnYPnzKWvl69uaA21zj5J4NJUv6qMfdcfCUPNp3ltQul1N6t6lqd/30l39k3PQNkic4jUx88043nBw8HiJh4ZckR6bUnjZWEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCDhChDETrjnnpkjgAACCCCQ4AUIYkd/CcTHMHZMgtg6M+9j/6BfH2na5D6fCR88dEiGjxorX0742tN+R80aUsv8a9niQUmdOmLY9s+Vf8mwEaNFH8+cOWP3K1WypDzV+rEI4+vGsLAw+Wj8Z/L5VxNl//79tr8GwytUKCdt2zwu1apUtm3ux+117pIL5y/IkA/7yYYN/8g330+SzVu2iO6zbNEc2b5jhzzUsrXtPn3KD5IlS2a7/nrnrjJv/iJp80QraXJfY+n/wWCZ9r/pblipUqmSdHr9ZalUsYKn7e5GD8ixY8dtWNs16uvoMvGL8VK4UEGZPGWa9H5/oOTPn1e+m/C562YfL168KF9/94Ms+mOJzF+4yHrkyZNb1PDuO+tJrRq3+/Tf8Pc/8sRTz9q2WTOmyseffi7TZ/5q56eNuu+rL3aQB+5v7LNfZE8GTVotP8zfKjkzB0nGtIGD5v3aVpcMaaIO1wcKYu8xFbHHz/xbapXJ4VMR++43p8nZcxdl7gdNJEniRJEdGu0IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkEAECGInkBPNNBFAAAEEEEAgogBB7IgmgVpaPfGULF66zLPpRlfGjmkQW6s0FylVzh533dp3yMdjRnjmcPbsWWne8nH5+5+Nnjbvlfp168jo4YN9ql1//+NP0unNtzzdgoKCPGFsbXy05cPS6+3/tmvV7De795RvTZg6sqVXz+7y6CMtPJvd3PT1Z82e42l3QeyNmzZLw/ua2vZFc2dJ9mzBdv3Jds/L3HnzpdE9DWTxkuU+4WrPIGZl+pRJUqxoEdvkKmF7b3fr//vpBylerKh89fW30r1nLxuSnvvrf8FunVunrj3khx8nu10iPI4aNlga3FXf0/7XqjXS7OFW9vmDTR+IdN+eb3WVJx5r6dkvshUXxO73dHWpUSp7ZN2ibQ8UxI5sJ4LYkcnQjgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQMIUIIidMM87s0YAAQQQQAABI0AQO+aXQXwKY7uw8sD3e0uzB+6PchItTYh8iQmRuyCzdtYq1e1feMUTdO7f9z1bvTk0NFRm/PKrvD/gQzvmU22ekLe6vGHXNdRdvVY9G3DWkHS/3u9KpkwZ5cBBrao9Rr6a+I3tN3vmz5IvX167PtRUzh487HL4+7FWj0iLB5vaQPPiJctk3CefyZ8rVphArp0AAEAASURBVNp+c0zAOa+pBq2Lm5uuly1TWp5r97SULFFcEpvqy7ly5pTogti6ny7dOr8hd9arI6mDUsvCRYul+zvv2dB49uzZ5efJ30nGDBkk5Nw5uRQeLqUrVvPs0/Lh5nY9VapUkihRokiD2B8OGW7nrZ3Vqen990m+vLll5arVMnzkWFn25592nO8mfiGVKpS3695BbG149pm28nDzpnZeq1avkde7dJNdu3aLBtyXLJgtqc0xRLVcSRD76KlQWbbxoKzZdlQypU0hdcvltLaP9p0ltc167zZV7UsePB4is1bukaK500ulIllN/yOydvsxGTFlrd3evnEpWxH7zoq5JWv6lFEdJtsQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBA4BYWIIh9C59cpoYAAggggAACUQsQxI7ax39rfAlju7ByTILY/T4YLGM++thOZdO6VabCdWJZsmy5tHz8Sds2duQwG1j2nuvIMR/JwEFDbdPalUttIHjt+g1yf7PLlaun/PCNlC5V0rPLhQsXZPxnX4oGue+qX1dKFC9mQ86lylexfe5t2ECGDRpgg81up0OHD0u9Bo1tOLp9u7bS6bVX7CY3N61G/d2Ez20o2e2jjzEJYnft1FGefqq1924yf+Eiad32Wdv2rqna/Zip3u0WVxnbvzq3bg9UEVsD3G5uTZvcJx/06+OGso/Hjh2TRk1byP79++XO+vVk7Ightt07iP3IQ82lT6+3ffabO3+hPPlMe9s2ddJ3UqpkcZ/t/k9iG8Rev+OYtBs8138Yeax+Ufly1kafIPaKzYflpRELpEXtQvLSA2Xk4xl/yycz/46w78gXa0nZgpkjtNOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAglDgCB2wjjPzBIBBBBAAAEEAggQxA6AEkXTzRjEnvDNd/LW2+/aWa1e/oekSZNGPv70C+n9fn8pXKiQ/PLz5AgzPnHypFSoWsO2//DNV1KhXFlb+fq2O+rZthbNm0mPrp0lderUEfZ1Dd6h4wWzf5GcOXK4TZ7Hnab6s4aWM5jq1Pny5rHtLoj9hglmP2cC2v5LdEFsrfy9dOFsn9C3G6PN0+1l3oKF0rLFQ9L73R6uWWIbxPae2x/zfpdswVk9Y7kV5+5didx7vy/GfyQ1bq/uutvHsyEhUrrC5YrUY0YMtaF2nw5+T1wQu2/balKjZHa/reZpIpHEpqq3LkdOnpPWA2fLcVMR+5G6haVR1bxyIeySTPlju0xeuM328a6I7R/EPnc+TM6GXpBH+vwmZ89dlB97NjQVsUXSB6WwlbHtAPxAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQQnQBA7wZ1yJowAAggggAACToAgtpOI/jG+hLD1SF1YOSYVsXv17S+ffPaFneCWDattQPmVjl1kyrSfbZtWZg60fP3d97bZu3r0k+2el7nz5nu6a6XralUq20BxwQL5Pe268uXEb6THO+/ZitZr/lzssy2qJ25u40YPl3p1akfoGl0Qu27tO+TjMSMi7KcNrjp4qZIlZeqkbzx9YhvEdlWyg4KCJLK5Lf9zhbR49HJV7kVzZ0n2bMHiHcResmC2ZM2SxXMMbsXNf8SQD+WeBne55oCPLogdcKNprFA4iwzrUNNunrp4h/T7ZqXUK59L3m19uVK5bggPvySdxi2WxRsORFkR2w5iftz95jQbxJ77QRMC2A6FRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBKwAEHsBHzymToCCCCAAAIJXYAgdsyugPgUwtYjdmHdmASxmz38mAkAr5Ls2bPLojm/2gnf3egB2bxlS4wm367tk9Lljdds35Bz56T/wEHy/Y8/yZkzZ3z2r1SxgnTt3NFWz9YNLgBetkxpmfzdRJ++UT1xc/t49AipW+eOCF2jC2I/2fpx6f5mpwj7acPEb7+Xbj3esdu2/r3G0ye2QeyYzG3fvv1So+7lIPWEz8dL9apVfILYK5culPTp0nmOwa24+ccmiF0oZzoJzpDKDeF5LJQjvbRvXNI+/+CHVfLjgm3S/5nqcrtf9ezfVu6Wnp8vJ4jtkWMFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBGIqQBA7plL0QwABBBBAAIFbToAgdvSndMjwUTJk+EhPx4kmVFvNhGpv5OLCutEFsUNDQ6VEucr2UB+4v7F82L+vXW/X4WX5bdbvNpw9ZGC/gFO5cOGCJEuWzPTJJnly5/Lpo4HsxYuXylJT9fl/M2bKrl27Pdv/99MPUrxY0auuiH2lQew769eTsSOGeI7He+XDIcNl+KgxUrhQIfnl58meTbENYn/x1dfydq/eUVb7XrlqtTz48KP2Neb/PlNy5cwZZ0Hsfk9XlxqlsnvmE2jluaHzZM22o/J1t7skd5Ygny4b95yQpwbOJojto8ITBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIiJAEHsmCjRBwEEEEAAAQRuSQGC2FGf1vgYwtYjjkkQOzw8XF7r1FWmTPvZTvKjUcOkft06dn3kmI9k4KChUqpkSZk66RvbdqU/9HV++32OtH/hZTvEKy92kJc6tBfvIPLC2b9KjhwRg8JnQ0Ik1IS6U6ZKJalSprT7u7ldaRA7c6bMsmzRHDuW/49nnntRZs2eIw82fUAG9O3l2RzbIPafK/+Sh1o+bvdftnCuZM6cyTOWW/nuhx+lc7cePmHtv1atkWYPt7JdrmVF7JgEsbuMXyIL1uyTj16tLSXyZnSHaR+Xbzokr4xcSBDbR4UnCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBATAYLYMVGiDwIIIIAAAgjckgIEsSM/rfE1hK1H7MLKUVXE7tW3v3zy2Rd2ghq4/vHbryRp0qT2+Zx5C+Spds/Z9YlffCLVqlyumm0bzA+teD123CeiIet7GtxlK1wvXrrMVnROkyZIWj38kCROnNh1t493N3pANm/ZIq0eaSHv9exuxyhV/nLl8KZN7pMP+vXx6X/y5ClpeH8z2b9/v7Rt01q6delot7u5XWkQWwfp1/tdeejBpj6v5x2e7vlWV3nisZae7S6I/VSbJ+StLm942nXlq6+/le49e0mePLll7q/T7bYzZ86I7qNLm8cflR7duth19+P06dPSpHlL2bZ9u9StfYd8PGaE3XQjg9if/vKPjJu+Qd5oUV6a3JbfHap9/Or3TTJq6rpYBbFnD7xfkiXxvQZ8BuUJAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQIgSsOYicIHSaJAAIIIIAAAggkYIFWTzwlGkCe+Pl4qVb1cqg4PnC4sPJrL79og9J6TGHhYbJx42ZZ/udKmb9wkQ0Bu2OdOW2yFClcyD0VrUT98KNtZN369bZis4akb6tWVTRkvXXbdunVp5/MW7DQ9v9j3u+SLTir/D5nrjzd/gXb9vyzz8hz7drafbXhx5+myuudu9pt3gHqfgMHyZhx4z37PNKiueTInl3+/mejDBkxSn6b9bvd9tv0qVKwQH677ubmPY7d8O+PjZs2S8P7LoesF82dJdmzBdstT7Z7XubOm+/p+mH/vlK3zh2SPHlyY7JCXnujqxw5esQe86//m+LZT3e4t8mD9piym2MbNmiAlC9bRpIkSWLHChTE1g09e/WVz7+aYPt0fPVlaXp/YwkODpYN//wjQ4aNtJW3deNnH4+RWjVut/1uZBB76T8H5bXRiyR1yqTyZef6EpwhlT2mHQdPyaN9Z9n12uVySu82Ve36is2H5aURC6RF7ULy0gNlbJv+aPrOTDl0PCRgZW1PJ1YQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIMEIEMROMKeaiSKAAAIIIIAAArEXWGKC2PEphK0zcGHl6GZTIH9+GTtyqBQqWCBC14OHDskDD7WyFakjbPy3oX/f96R50yb2WVhYmDz4yGOyes1aT/fChQrJPlPRWitE63Jb9WryydiRNvysz7Wi9suvd5Kfp8/UpwGXl194Xl5+4XJ1bu3g5nalQezad9SywWt3TP4v+t3EL6RShfI+zf0/HCyjx37s0zZ9yiQpVrRIwIrY2lE9Orz8uvzy2+UQs8/O/z7p16eXPNTsAc+muApi58+eVjKlTel5He+VXq2rSPqg5LZp4PerZPLCbTaMXb14Ngm7dEnmrtorWU0oW8PVMQlidxm/RBas2WfHqFU6hzzZoLjkzhLk/ZKsI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEACEiCInYBONlNFAAEEEEAAAQRuBQEXVg40Fw1HFy1SWJrc10jq3FFTkiVLFqibbduxY6d89uUEmTt/oU8F7SqVKsmrL3eQ6n5VwC9cuCAjRn8kX0381laXdgPnyZNbHm7eTJ59+ilPJWm3LTQ0VAYNHWEDzd7h6FIlS9qq2vc2vNt1tY9ubuPHjrLH77PRPNm0eYs0aHw53OyqdWsfVxFbj+H+xvfKu6aqt4bo3aLH2KNrZ6lft45r8jxqhfBx4z+TL7762jOvnyd/LyWKF5OJ334v3Xq8IxpqnzVjqmcfXQk5d04++vhTW4H8zxUr7bagoCCpcVt1aXRPA7mv0T0+/TXE/sBDLW3bqmV/SNq0aXy265MylarbYPvIoYOk4d13Rtju3TD4x9Xy/byt3k0R1if3bChZ0l8OaV8IC5fR09bJ73/ttcFr7Vy/Ym5pXqugPDdkntQtn0s0uK3Lyi2H5cXhC+ThOoXkxSb/VcTedei09Jm4QtZsO2r7jXyxlpQtmNmu8wMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEp4AQeyEd86ZMQIIIIAAAggggICfgFbIDjkbItmyZZOUKVP4bY349MTJk3Lo0GHJni1Y0qSJGCj23+PixYty4OBBCQ09L7ly5pAUKaJ/Df8xonruHcTu3PFV2/XUqdOyZ+9eyZgxowRnzSKJEiWKaghbwTvEhLJ10UB1bJazZ89aj9y5c0UIo8dmnOvV96CpgJ0udXJJmTzJFb1kWPgluXAx/Ir3v6IXZScEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQiHcCBLHj3SnhgBBAAAEEEEAAAQQQiJ1AoCB27EagNwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBsBQhix1aM/ggggAACCCCAAAIIxDMBgtjx7IRwOAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCQIAQIYieI08wkEUAAAQQQQAABBG5lAYLYt/LZZW4IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEF8FCGLH1zPDcSGAAAIIIIAAAgggEEOBnbt2y6lTpyVLlsySLThrDPeiGwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDVCBDEvho99kUAAQQQQOAWEFi5+bDovwqFs9h/t8CUmAICCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEOcCBLHjnJgXQAABBBBAIP4KjJ/xt4yf+bfnAJ9qUFyealjc85wVBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBwAI3JIh99OQZOXTsjISEXhBJFPjAIm29JJIqRTLJmjFIMqULirQbGxBAAAEEEEAgagHvELZWw9aq2LoQxo7aja0IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAgApc1yB26PmLsufQCUmSOJFkyZBGglIlv6KzcCbkvBw+flrCwi9JrqzpJUXypFc0DjshgAACCCCQUAW8Q9gueO3dNqxDTdFwNgsCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBggesaxD549JQkSpTIVLNOE/hoYtl66NhpOXU2VArmyhzLPemOAAIIIIBAwhXwDly7ELbTeHHEAlsZ27/dbb+Sx8Mnzsn6ncfk0iXztRZmSZoksZQrmFnSpEp2JcOxDwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLwSuWxA79MJF2X/4pOTLkemaTnzHvqOSNiiFZEoXdE3HZbD4KXDw+DlJkSyxpA+6smrq8XNWV3dU5y+Gy7CfNkimtMnlybuLXN1gN/He4aZC/rYDp83NHiL5g9NIYlN5nwUBBCIKeIewdat35euVmw+LBrF1uVZB7G37T8ozg+fKudAwO677kS1jKvm0Yz1Jm5owtjPhEQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBP7P3nnAN1G+cfxh70KBskdpSykbykb2UqYTUUEFkQ36V5YLUHGBIiBbBEERBBRUkL33LptCKbPMDqDQ0pYW/u/z1jdcQ5Im1yRN2t/rR3K5e+f33rtc0u89514EnCZin754k8oUK0T58thXoI25n0BhN29TpfLF3Iu8m/X2vhDobsfEk0fenJQvd3a79D5JiLNLtl2geetDKDYuiZaPaUVFPXI9UXdMXCJN/uskbT12XebjDF6FclOXhmXpjda+mV64PRwaRUOm75Xc/hIMi5hg+ATUdF5hz/nE82jqP6do1f4ww/zImzubmBt+1L2VTzqPFM2DgGsRMJawVe9YxuakJOzafkWloK22p+V17G8H6ej5SJr9v+aG8/Xd2AfUY9xG6tuhCr3a0i8t1aMsCIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACKQbAaeJ2IfPXKFa/qUdMlBH1u2QDrthpSv3XqZxS45Tn/b+9EYb3zSPICwilr76/SgdO3/LUNey0S3Jq2Buw3teuHf/AfWdvJsuh8dQqSJ5qGHlYnQ/PtEgZTerXpy+7BmYokxme8MRsaevCKZC+XNSz7buITTacz6N+TWINh2+TmW98lGLmiXknFl78IqUsns/U9FtmGS2eYvxOp+AVsJW0a5ZvOYo2NqUVgl7zf5LdO76XUOVS7eFkk9JD6pT0cuwjhc2BoWRuI+C2gaWMayv5+9F9SrhxioDECyAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAi4NAHnidghQsSu6CAR24F1u/Tec2Ln7CnObj5ynUb/EiR73zawJJ24eJuuRt4nUyL2hqCr9NmCI1TPvwh91asO5c6ZTZYLvxNH/X7YTeG342jRh82oTNF8TqSBptJKwF7z6dLNGOo+bhv5lipA0wY1NERr5znVX8wPjoy9+ou2lDVLlrR2GeVBwK0JmJKw1YC0MnZaJWyuc9ziINpx4jrduhtPpcS5ObXDL+HBQ3Euv0/FPfPQS019ESFb7Ri8ggAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIuDwBiNgO3EWHQ6Po1OU71KJGcQq/E0/7TkdQhBCIh3etRtmyJouhkdHxdPBsJB0VkaErFM9PtX0LU4USBZ4Q1x6IqMc7Ttyk02F3KCYuUUQXzU9PVSlOxQo9jiB9NTJWRIq+QVXKFRTRo/PS7lPhMn/hArmoYYAXVS1f6InRJolwpMGij/vPRNBdEX060K8I1azgSfnz5JB5efvS7Rdk5Optom4WoutV8qISQphrKaIP60nfLD5Gm49eoxFdq1PrWiWpz+RdFHzpjkkRe8KfJ+ivXZfo0x61qHXtkimam/LPKVqy9QJ9/GoNeqaufsn/4o17ch+cu36PqnsXEvugiIFrXEISLRftc+rUoAwV+I8Lv78gyjFjr4K5qE3tUvQg6SH9sf0iFRG82wjBnHlxxO8c2bNS/UpFRd2ecpnLatP9+CQ6cj5K7gOun/dBZbEPc2TLasim3bee+XOJdm9S6LW71L2VD5UVouMSsY8K5s1B7eslR5bV5uc5wnPnvBifb8kC1LF+GSm0X4+6T+sOXaVrUbHkXYLnUzGTQrut/bM09+w9n/7Zc5m+XXqcBncJoG7NKxh48cJr32yTkdR/HdGUvMWxhQQCmZWAJQmbo2GziM3JHhK2Yrzu4GX6fMFB2ji+M+XKkXwDjdpm/Hry0i3qO3ErzR/RSpyjPIw34z0IgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIuCwBiNgO3DUz/z1Nv206J+TY0rR6/xVDSxvHPU05hZy74/gN+vDnQ4b1auHl5t40pEtl9ZaiRFTRkXMPSlnZsFIscLTfsW8ESsmX17OcO+Kng1JYPhQSSbfuJWizC/G5GnVuWNawLv5BEr03a7+UhQ0rxQLXO+e9JkLKzUucp80H67Sb5XJgxSI0uX/9J9Zbs2Jj0DWqKYTzoh65ZHZLIjbn/XTBYerR2of6dahkqP7RI6J3Z+6loLNRIiJ2c9lXw0YbFqYKmXuxkLmN01e9AqlpteJyNYvj/+4Loy6NytLwl6rJdSwU9564k0Kv3qVxvetQYyEx3xMie/tPNpB/GQ8pqrOIrU0BQq6eOrBBCinxipDn3/p+B8XGJWmzSml+8oD6hrxq3zarXpwOhEQY8k8SeWoIwbvVyLVSpv51eFNZj8rfqlYJyUg7F1i6Htw5gIbNPpCiTd7v0wc3krK22mBr/1iWtzT37D2f+OaBmPuJVCh/TkO0dNX3bl9tkZHW//60FfHNCEggkBkJpIeEzZy1IjbfVMLnTL4phW9Y4Qj1fDMSnw9yZs8mbliCiJ0Z5ybGDAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIZgQBEbAfuRSVicxMvNikvoiaXlBGVyxXLTyFXoqXIy9tGvVaTavp4yui93/5xXMqjWml63vqzNGdNiIxkPEhE/mWJbdPhazReRAIu65WPFoxsKtcp+Zbr5GjVb7b1o0fCWF65N4z+3HFRCtZ/j2kthVUWmT/77TCx6Mxyb4/WvpQvV3YZIXm+aI/rnf2/xpQvd3Ypgq89eJWmrwim11r6iMjD3lKo00aH5jb1Jksidmx8Ir09cZdkwzI2R/bm6OAr9l4WDK7LcX7+Rm1dTf+9+xJ998cJOdb3X6giX4+cu0VjFx6R9c15/ynyL+0hI5C//u12Cr8dR1MHNRD7qrCQt8/T1H+CZZTskS9Xl/mViK0689Er1UWE2SJ0JSKWZq46LUV6jkb9Qbfk/CwR95u8W46tZzs/aiuiavN45284KyT9m9RWRNUe3b2WrE67bzkq+SstfKTszdGuWWg0J2JzYZbveb/FJSTSF4uOSnmc13P9PdtWpKyi/IyVwTKCt1Y219u/1OYe31jgyPnEY1O8fEsVoHlDm/AqJBDIdATSS8Jm0FoR+8dVp8QNIRE0d2gLGvDDNvItVZCGvVSTmg/7W5wPa1P54gUQETvTzU4MGARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAAQyBgGI2A7cj0rE5ojYH71SI0VL45Yck4L0pz1qyQjWamNYRAy9+jWLao8F0vdn7aP9ZyJp5juNZKRklff4hVtC3E2iOiI6Ncu4Sj71EnLu4o+aE0ch5fRQRCJtP2q9jKI8b1gTGfH4WlQsvfzlVtnOLFFvrhzZVLX01e9HZQRvFemZN6wU4vO4JcepT3t/eqONryGvPRYsidhcf1xCEg2atofOhEWnaI6lZpaghZeuK70wdrOUq5lVqSJ5DXWs3h8mGByjV1tWoIGdAuT6I+eiaPC0vSJfHhEBuy69Pn47MWeOQM2yOietiM1yfbs6peR6/iciOp66j9sq98GaL9vKMuuE3M7S97ONy9GwF6sa8vJ43/p+pxS0V33RRsr7at9y+wtGNJMivCrAYro5EZujX88XIjLL1pz2nY6goT/ul1L+qrFt5bzh9dr5oMRlPf2zZu5xe46cTzeFMP+2iFbOUcCVOM9tIoFAZiKQnhI2c9aK2DNWnKBDoRH0y/BW1GfSVvIr6UEjhYDd5L2/5KtvKQ+I2JlpcmKsIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIJCBCEDEduDOVCI2R2zmKMHapCTg6YMbCik2WeRV23t+t0MubvimnRSkVT0sufYUUa5ZvC6tEYdVOSXrmhK/v1h0hNYeuEqT+teX5bcfv0Ef/XyI6gcUNcjGqp71h67Sb5vOUe9nKsr2eL0jxVlLIvZtIdMOmbGXLly/J8Xn6t6FKP7BQwoKjZRSM0fzZr4sotuSWNLtMmajLMJyujaF34mj4bMPUPUKnsT7RyW1H9R7Y8lXK2JvGv+0QYRX+cf8GiSjeM8Y0pCqeXvSlH9O0ZKtF6h/x0rUsLKXyiZfWYZn8fyHgQ2otm9hg2T/gois/t7zVVLktSRiG+e/dDNGCOHbqFEVLxovhHKVOEJ6s2GrJeNlo1rK1Xr6Z83c48odNZ9i4hKltB969S693b4ivdnGTw0RryCQaQikt4TNoCFiZ5rphoGCAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAQKYmABHbgbtfibvf9alLDQIei7Yc8bjth+tSbVlFr+b8HDl527EbhjIsZXdpWJa6NvU2RGRWInaP1j7Ur0MlQ15emCqk38VC+p3Yrx7V9S9KP68Loblrz6bIY/yG5XGWnDk5Spzlui2J2B/MPUg7T9wkFoqHdKlM2bMlC9fRsQ/ok/mHKOhsFPXt4E+vt7YtSvfBs5H0vxn7uHmLafuE9obtCSLy9Ctfb5VRtE0Jx0rE5ijUHCnbOM1bf5bmrAmhES9Xo84NytLAqXvo2PlbxtlSvH//xSr0fOPyBhHb1L61JGIb51ciNgvsX/YMTNFW06EpRWx79c947nGjjphPzGHknAMyenxao6WnAIM3IOBGBFxBwmZcELHdaNKgqyAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAroJQMTWjS71guZE7Ici/HDzYWtkBV/1SinD8srY+ETKmyu7jIScP08OQ0PXo+7TnuBwOhASQVuPJkvZAeUK0qwhjSiriAhti4j9546LNGn5Sarh40mvNK9gaIMXEpMeEfexhGceqlq+kNzmCHFWNWpOxNYK6+u/bke5c2ZTReTrqct3qO+kXcQMZr/bOMW21N6cuRJNvb/fKbOZ2wcFBPvGVYoZqjoddofenrhLvvfMn5MWjGxGHnkf7x8lYufNnY3WftnOUE4tTPlbRMDedkEK0CxCfzzvkJTrOfK4b8kCKpt8VXOgQokCVKZoXov71lEitr365wwRm+frl4uOCvnzqoz2zZJ5jmxZUzDFGxDI6ARcRcJmzhCxM/psw/hAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAASYAERsB84DcyI2N/nuzH10KCSSln7SQgrPtnYj/E6cFIlv3UsgFTnbFhH7qIjEPEhEZO4somqP6Fot1eaViM3ScM+2fqnmtyWDORGbo153HLVBVrVp/NNPiLWh1+5Sz+92UFmvfLTwg2a2NElKXubI4stGtUy1bPyDJOo1YSddDo+hF0V0bhbZ2waWpNHdaxnKKhGbV/wp6iwm6tam/j/sphMXb9OiD5tLufrXjaH046ozNKp7TWoXWEqb9YllS/tWjUUbidtcflsiYturf5ZEbHvNpxkrT9PCzeeotl9hGt+77hPS/hNAsQIEMhgBV5KwGS1E7Aw2wTAcEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABkwQgYpvEYp+VlkTseevP0pw1IdSwshd93asOZc+WRTbKUa+Hzzkgl39+vwmJQNfUb8puevjwEX32em0p8PLGJPG+98SdFHr1Li3+qDmVKmI5arKxDBsTl0jPfLxetjN5QH0K9Csil0VgYfp5XQit3BdGvdr5UecGZeV6jjQ8duERGW2YRVd7JnMiNrcxaNoeOnruFrGw+2YbP8qSjIk4Wjb3Z9uxG9SjtQ/161DJ5i6puvt28KfXW/sayrPE/O0fJ6i+f1H6oFt1uX7aimD6fct56tPen7q38qH+Yp8EX7pDY9+sTS1qlJB5tCJ2/YCi9K3gxJHKOa3Ye5nGLzlOHC179RdtxX7NQkGhUfTO9L3EMviPIqJ3UY9cMi9L32N+PUwctXt87zrkV8ojXSJi26t/xnOPB2nP+bR0+wX64a9TxCL6TBEdPl/u7JIj/gGBzERAidi1/YrSlEFNDEMPOhtBQ6btkO+NtxkyOWBBK2Iv2nKWjp+Pou/6NqKPft5L3sU9qG+HyvTi2HX0zrPVxDkwD/WduJXmj2glng7g4YDeoEoQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQcAwBiNiO4SprtSRiJyQ+pGGz91PQ2SghUeehepW8KEEIuKv3X5FlB3SqRK+19JHLE5efpGUiArNn/pzUqlZJKiKE3W3Hb0gRmCMA/zCggcxnLgoybzQlw6r8vJ3FYe/i+YkjZbNgzG3NH95UvvL2M2HRUvzm5eoVPKl5jeLUrVkFfpvmZEnEPnAmgt6btV+24V/Gg6p5e1Ji0kMhJodT+O04KTFPGdiASgsR3dbE0nvfybuIo4oHlCtINUTdVyJjaeeJm7KqqYMaUE2fwnTkXBQNnrZXRt6eP6wJ5cieVUrSvb/fKcXqRR80p8IFcpESsVm2jo1Lkn1rUKkoXRJRtFkm5/SD6Gtt38Jymf+Zteo0Ldh4TtbTqHIx8sibg3aI9nlsLOmz9M7yudpXpqRzR0XEtlf/TM09e80n7fzg/gZWTL6hgJdV6ioimDepVly9xSsIZEgCpkTs9JKwGbBWxM6VI5tF5icv3YKIbZEQNoIACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACLgqAYjYDtwzSrKd0Lce1RdCrnG6e/+BEHHP0K6TyeItb+foyL2frkgd65cxZE9MekQLNoXSoi3npODLG1j27VCvjIwEnTtnsuS2Jzichs8+QK+38aW+InKzNqmIzpNE9Os6/0W/5u1bjl6n5bsu0aGQSEP2VrVK0ICOAVSicB7DOl5Ysu2CkIZDpbjMwuvk/vVTbNf7xpKIzXVeuHGPWEbX9pHH/1SVYvTu81WpoJCX9aazV6Pplw2htPd0uIFt1fKFiEV4lrBZcu729VYpRk8f3FBK6Kqt6SuDadHm80KOLyGjlSsRm6XugZ0C6Nulx+mykLA5sbzetak3tayZHD1b1fFQhCBnEXvTkWsyurlaz8I1RwC3Zt8+EGJ6qxFrZUToX4U8z8ncXAiLiKVXxXiaVS9OX/YMVM3J16ZDV8ubAhZ/1MKw3h79Mzf37DGf/tp9iSaI6OWW0tCXqtJzjcpZyoJtIOD2BLQidm3f5M+buWuD5bicGQlbgTwkInG/IyJx/zG6HZXwtHyjzM4T1+nDuXtpxeftqWC+nKoKvIIACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACICAyxNwnoh95grV8i/tECCHHVi3QzpsotKI6HgZaTk1qTjqbrwszRGY7ZniEpLoTkyCiLadm7JnEyGYzSQWcxMePBR5stL1W/cp4k6cmZwpV3NUZxab05IePnxE10SbOUTbxYSwrk0sqx+/kBx1Wrve3HLponnJq+DjOrjum2IshYQEqORnc2XNrdeK2LPfbSyz8f7KJvqb2n7lzCzmxwu2hQvkpKwMzMWSI/qXnvPJxfCiOyCQJgLa6NfaitJDwub2+Zza7av1dE08ZcCa1KR6SfrmreSnO1iTH3lAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAwBUIOE3EPn3xJpUpVojy5bFvtMuY+wkUdvM2VSpfzBV4Zqo+cJTqZTsuWj3m7RPaW53X1oyRQmR/7rNNVhcb2DmAXm1Rwer81mQ0JWJbUw55kgm40nzCPgEBdyTAUbGDQiNSdH3KoCYp3jvzTfyDJNp27BrxjT6Wkmf+nNS4agmXvAHFUr+xDQRAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAAScJmJHRcfQ3Zh4Kl8ybVGRjXfZxWtRlCd3DirmWcB4E947mMCBMxF05kq0Va1ky5qFujW3r/isbZhFv2U7rZfCa/sWpsrlCmmrSPMyROy0IXSl+ZS2kaA0CIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIBAZiDgNBGbYZ67EkkF8uYiL8/8dmEbfuseRUXHIhq2XWiikrQSSEx6RKsPhJFnvpzUpFrxtFaH8iAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAi5MwKkidnxCIl0Jv0McHbloofyUL09OXWhi7idQxO17lPTwEZX2Kki5cmbXVQ8KgQAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgIAeAk4VsVUHo6JjKPxWDN2Pf0CURa218vURUZ5cOURU7XxU2COflYWQDQRAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAATsRyBdRGz7dR81gQAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgIDzCUDEdj5ztAgCIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIODmBCBiu/kORPdBAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAAScTwAitvOZo0UQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAE3JwAR2813ILoPAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiDgfAIQsZ3PHC2CAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAi4OQGI2G6+A9F9EAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAAB5xOAiO185mgRBEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEDAzQlAxHbzHYjugwAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIOJ8ARGznM0eLIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACbk4AIrab70B0HwRAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAwPkEIGI7nzlaBAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQcHMCELHdfAei+yAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAs4nABHb+czRIgiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAgJsTgIjt5jsQ3QcBEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEHA+AYjYzmeOFkEABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABNycAERsN9+B6D4IgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgIDzCUDEdj5ztAgCIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIODmBCBiu/kORPdBAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAAScTwAitvOZo0UQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAE3JwAR2813ILoPAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiDgfAIQsZ3PHC2CAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAi4OQGI2G6+A9F9EAABEAABEEgrgaCzEcT/1/YrKv9Pa30oDwIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAKZgQBE7MywlzFGEAABEAABEDBDYO6aYJq7Ntiw9a2nA+itZwIM77EAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiBgmkC6iNhR0TEUfiuG7sc/IMpiumNm1z4iypMrB3l55qPCHvnMZsMGEAABEAABEAABywS0EjZHw+ao2JwgY1vmhq0gAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgwAScKmLHJyTSlfA7lC1rFipaKD/ly5NT116IuZ9AEbfvUdLDR1TaqyDlypldVz0oBAIgAAIgAAKZlYBWwlbitXbdlEFNiOVsJBAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAAdMEnCpi34y6S1myZBHRrPOb7o2Na8Nv3aO7sfHkU7qIjSWRHQRAAARAAAQyLwGtcK0kbEVjyLQdMjK28Xq1Xc9rxJ04OnnpFj16JB5rIVL2bFmppk8Ryp8nh57qUMZOBKKiY+WNbfyEkuQ9Y6eKUQ0IgAAIgAAIgAAIgIBLEOCH0PFT5TgYQmGPvHbpU3RMHF2PjJZPucM1pF2QopJ0IOCIYwPfr9JhR6JJmwhg3tuEC5lBwGkEHHFs4nrNabsPDekg4Ig5r6MbKAICIAACIAACIAACIJABCSzcHEIbg65Q6LVoSkx8mAFHiCHpJZA9e1byLelBrWuXptdaVtRbTarlnCZixz9IpOsR0VS+ZOFUO2VLhovXoqhAvlziD0r5bCmGvG5K4ObtOMqVIysVzKcvmrqbDttitxPEh8eUv09R4QI5qVc7x50sLHYCG0EABNyGgFbC5k5rI18HnY0gFrE52UvEPn89mvpM2kpx8UmyXvVPcc88NG9YKyqQFzK2YuKs1wRxTRZ2UzyhJJt4QklB/U8ocVZ/0Q4IgAAIgAAIgAAIgIB+AvKpcnfEU+WSHlGZYgUpZw59T5XDNaT+fYCSrkkAx4Zr7hf0yrEEMO8dyxe1g4BeAjg29ZJDOXclYK85767jR79BAARAAARAAARAAATsS+BKZAyNnr+fTl++bd+KUVuGJFCpbCH6/M16VLqI/V1jp4nYpy/eFH/wKUT58thXoOUva2E3b1Ol8sUy5M53lUHdFwLd7Zh48sibk/Ll1vdHO+OxJD18REu2XaB560MoNi6Jlo9pRUU9chlno5i4RJr810naeuy6zMcZvArlpi4Ny9IbrX0pa1a+fzrzpsOhUTRk+l4J4C/BsIgJhq5GJz3nk6uxQH9AwJkEjCVs1TbL2JyUhF3br6gUtNX2tLyO/e0gHT0fSbP/19xwvr4b+4B6jNtIfTtUoVdb+qWlepTVQeCmeKIIf3La6wklOrqAIiAAAiAAAiAAAiAAAk4mkNanyuEa0sk7DM05jQCODaehRkMuRADz3oV2BroCAhoCODY1MLCYKQikdc5nCkgYJAiAAAiAAAiAAAiAQKoEen+/BRJ2qpSQQUuAZew577fQrrLLstNE7MNnrlAt/9J26bRxJY6s27itzPp+5d7LNG7JcerT3p/eaOObZgxhEbH01e9H6dj5W4a6lo1uSV4Fcxve88K9+w+o7+TddDk8hkoVyUMNKxcTj79NNEjZzaoXpy97BqYok9necETs6SuCqVD+nNSzrXsIjek1nzLb3MB4QUBLQCthq2jXLF5zFGxtSquEvWb/JTp3/a6hyqXbQslHPOKjTkUvwzpe2BgURuJ+HGobWMawvp6/F9WrhBurDEAcsCCfUCIeJV++hH2fUOKArqJKEAABEAABEAABEAABOxO4eF08VS5vbvFUubw21RwVHUt3Y+NwDWkTNWR2JwJ6jw18v3KnvYy+GhPAvDcmgvcg4BoE9B6buF5zjf2HXthOQO+ct70llAABEAABEAABEAABEMiIBBZuDqHp/5zIiEPDmBxMYGCXqvRay4p2bcV5InaIELErOkjEdmDddqXtxpXZU5zdfOQ6jf4lSNJoG1iSTly8TVcj75MpEXtD0FX6bMERqudfhL7qVYdy58wmy4XfiaN+P+ym8NtxtOjDZlSmqP3Dxbvx7nL5rqfXfHJ5MOggCDiIgCkJWzWllbHTKmFzneMWB9GOE9fp1t14KiXOzVlSeWhBwoOH4lx+n4p75qGXmvoiQrbaMQ56PXPpJpX2sv8TShzUXVQLAiAAAiAAAiAAAiBgRwL8VLkr4bfJv5xtNz/iGtKOOwFVuSQBHBsuuVvQKQcTwLx3MGBUDwI6CeDY1AkOxdyWgN4577YDRsdBAARAAARAAARAAATsSgDRsO2KM1NV5oio2BCxHTiFDodG0anLd6hFjeIUfiee9p2OoAghEA/vWo2yZU020yKj4+ng2Ug6KiJDVyien2r7FqYKJQo8Ia49EFGPd5y4SafD7lBMXKKILpqfnqpSnIoVehxB+mpkrIgUfYOqlCsookfnpd2nwmX+wgVyUcMAL6pavtATo00S4UiDRR/3n4mguyL6dKBfEapZwZPy58kh8/L2pdsvyMjV20TdLETXq+RFJYQw17JmiSfqs2bFN4uP0eaj12hE1+rUulZJ6jN5FwVfumNSxJ7w5wn6a9cl+rRHLWpdu2SK6qf8c4qWbL1AH79ag56pq1/yv3jjntwH567fo+rehcQ+KGLgGpeQRMtF+5w6NShDBf7jwu8viHLM2KtgLmpTuxQ9SHpIf2y/SEUE7zZCMGdeHPE7R/asVL9SUVG3p1zmstp0Pz6JjpyPkvuA6+d9UFnswxzZshqyafetZ/5cot2bFHrtLnVv5UNlhei4ROyjgnlzUPt6yZFltfl5jvDcOS/G51uyAHWsX0YK7dej7tO6Q1fpWlQseZfg+VTMpNBua/8szb30nk8GoFgAgUxEwJKEzdGwWcTmZA8JW2Fdd/Ayfb7gIG0c35ly5Ui+gUZtM349eekW9Z24leaPaCXOUR7Gm/HezgSOiJvXajroxjg7dxXVgQAIgAAIgAAIgAAIOICAnutBPWUc0HVUCQIOJaBnnusp49BBoHIQsJGAnjmsp4yN3UJ2EMj0BPQcZ3rKZHrQAOAyBDB/XWZXoCMgAAIgAAIgAAIg4HYEWgz/hxKFU4kEArYSyC58zi3fdrG1mMX8ELEt4knbxpn/nqbfNp0TcmxpWr3/iqGyjeOeppxiZ+44foM+/PmQYb1aeLm5Nw3pUlm9pSgRVXTk3INSVjasFAt5c2ejsW8ESsmX17OcO+Kng1JYPhQSSbfuJWizC/G5GnVuWNawLv5BEr03a7+UhQ0rxQLXO+e9JkLKzUucp80H67Sb5XJgxSI0uX/9J9Zbs2Jj0DWqKYTzoh65ZHZLIjbn/XTBYerR2of6dahkqP7RI6J3Z+6loLNRIiJ2c9lXw0YbFqYKmXuxkLmN01e9AqlpteJyNYvj/+4Loy6NytLwl6rJdSwU9564k0Kv3qVxvetQYyEx3xMie/tPNpB/GQ8pqrOIrU0BQq6eOrBBCinxipDn3/p+B8XGJWmzSml+8oD6hrxq3zarXpwOhEQY8k8SeWoIwbvVyLVSpv51eFNZj8rfqlYJyUg7F1i6Htw5gIbNPpCiTd7v0wc3krK22mBr/1iWtzT30ns+qXHhFQQyC4H0kLCZrVbE5ptK+JzJN6XwDStZRYhsvhmJzwc5s2cTNyxBxHbmfDyMp4g4EzfaAgEQAAEQAAEQAAGXI6DnelBPGZcbODoEAqkQ0DPP9ZRJpRvYDAJOJaBnDusp49RBoTEQyAAE9BxnespkAFQYQgYhgPmbQXYkhgECIAACIAACIAAC6UCgyXt/pUOraDKjENgx8Tm7DgUitl1xpqxMidi89sUm5UXU5JIyonK5Yvkp5Eq0FHl526jXalJNH0+6HB5D3/5xnK5G3k8hTc9bf5bmrAmRkYwHdQmQEtumw9do/NLjVNYrHy0Y2VSuU/It18nRqt9s60ePhLG8cm8Y/bnjohSs/x7TWkZDZpH5s98OE4vOLPf2aO1L+XJllxGS54v2uN7Z/2tM+XJnlyL42oNXafqKYHqtpQ91E6I4C3Xa6NDcpt5kScSOjU+ktyfukmxYxubI3hwdfMXey7Tp8HU5zs/fqK2r6b93X6Lv/jghx/r+C1Xk65Fzt2jswiOyvjnvP0X+pT1kBPLXv91O4bfjaOqgBmJfFRby9nma+k+wjJI98uXqMr8SsVVnPnqluogwW4SuRMTSzFWnpUjP0ag/6JacnyOQ95u8W46tZzs/aiuiavN452+vK+CjAABAAElEQVQ4KyT9m9RWRNUe3b2WrE67bzkq+SstfKTszdGuWWg0J2JzYZbveb/FJSTSF4uOSnmc13P9PdtWpKyi/IyVwTKCt1Y219u/1OYe31iQXvOJx40EApmFQHpJ2MxXK2L/uOqUuCEkguYObUEDfthGvqUK0rCXalLzYX+L82FtKl+8ACJiO3FS4kdtJ8JGUyAAAiAAAiAAAiDgggT0XA/qKeOCQ0eXQMAiAT3zXE8Zi53ARhBwMgE9c1hPGScPC82BgNsT0HOc6Snj9qAwgAxDAPM3w+xKDAQEQAAEQAAEQAAEnE4AIrbTkWeoBiFim9idrvoFTYnYHBH7o1dqpOj5uCXHpCD9aY9aMoK12hgWEUOvfs2iWgGaN7SJXP3+rH20/0wkzXynkYyUrPIev3BLiLtJVEdEp2YZV8m6XkLOXfxRc+IopJweikik7Uetl1GU5w1rIiMeX4uKpZe/3CrbmSXqzZUjm6qWvvr9qIzgrSI984aVQnwet+Q49WnvT2+08TXktceCJRGb649LSKJB0/bQmbDoFM2x1MwStAiuqiu9MHazlKuZVakieQ11rN4fJhgco1dbVqCBnQLk+iPnomjwtL0iXx4RAbsuvT5+OzFnjkDNsjonrYjNcn27OqXkev4nIjqeuo/bKvfBmi/byjLrhNzO0vezjcvRsBerGvLyeN/6fqcUtFd90UYK72rfcvsLRjSTIrwqwGK6ORGbo1/PF/OIZWtO+05H0NAf90spf9XYtnLe8HrtfFDzTk//rJl73F56ziduHwkEMjqB9JSwma1WxJ6x4gQdCo2gX4a3oj6TtpJfSQ8aKQRsviDmV99SHhCxnTghXfWayYkI0BQIgAAIgAAIgAAIZGoCeq4H9ZTJ1JAxeLckoGee6ynjlnDQ6QxLQM8c1lMmwwLEwEDAQQT0HGd6yjio+6gWBGwmgPlrMzIUAAEQAAEQAAEQAAEQ+I8ARGxMhbQQgIhtgp6rfkFTIjZHbOYowdqkJODpgxsKKTZZ5FXbe363Qy5u+KadFKRVPSy59hRRrlm8Lq0Rh1U5JeuaEr+/WHSE1h64SpP615fltx+/QR/9fIjqBxQ1yMaqnvWHrtJvm85R72cqyvZ4fXqJs7fvJdCQGXvpwvV7Unyu7l2I4h88pKDQSCk1czRv5ssiui3plqi3y5iNsgjL6doUfieOhs8+QNUreBLvH5XUflDvVXRs9V4rYm8a/7RBhFfbx/waJKN4zxjSkKp5e9KUf07Rkq0XqH/HStSwspfKJl9Zhmfx/IeBDai2b2GDZP+CiKz+3vNVUuS1JGIb5790M0YI4duoURUvGi+EcpU4QnqzYasl42WjWsrVevpnzdzjytNrPqnx4hUEMjKB9JawmS1EbNedYa56zeS6xNAzEAABEAABEAABEMhYBPRcD+opk7GoYTSZgYCeea6nTGZgiTG6DwE9c1hPGfchgp6CgGsQ0HOc6SnjGqNFL0CACPMXswAEQAAEQAAEQAAEQEAvAYjYesmhHBOAiG1iHrjqFzQl7n7Xpy41CHgs2nLE47YfrjMxkpSrVPRqzs+Rk7cdu2HIwFJ2l4ZlqWtTb0NEZiVi92jtQ/06VDLk5YWpQvpdLKTfif3qUV3/ovTzuhCau/ZsijzGb1geZ8mZU3qJsx/MPUg7T9wkFoqHdKlM2bMlC9fRsQ/ok/mHKOhsFPXt4E+vt7YtSvfBs5H0vxn7jIf8xPvtE9ob1iWIyNOvfL1VRtE2JRwrEZujUHOkbOM0b/1ZmrMmhEa8XI06NyhLA6fuoWPnbxlnS/H+/Rer0PONyxtEbFP71pKIbZxfidgssH/ZMzBFW02HphSx7dU/47nHjabXfEoxYLwBgQxIwBUkbMYKEdt1J5erXjO5LjH0DARAAARAAARAAATSTuB06EVZyd9rt8rX4LMX5GuAnzdV8i0vl599url8dfQ/eq4H9ZRx9DhQPwjYm4Ceea6njL37jfpAIC0E9MxhPWXS0keUBYHMSEDPcaanTGZkizG7JgHMX9fcL+gVCIAACIAACIAACLgDAYjY7rCXXLePELFN7BtX/YJmTsR+KMIPNx+2Ro7kq14pZVheGRufSHlzZZeRkPPnyWEY8fWo+7QnOJwOhETQ1qPJUnZAuYI0a0gjyioiQtsiYv+54yJNWn6Savh40ivNKxja4IXEpEfEfSzhmYeqli8kt6WHOKsV1td/3Y5y58yWop+nLt+hvpN2ETOY/W7jFNtSe3PmSjT1/n6nzGZuHxQQ7BtXKWao6nTYHXp74i753jN/Tlowshl55H28f5SInTd3Nlr7ZTtDObUw5W8RAXvbBSlAswj98bxDUq7nyOO+JQuobPJVzYEKJQpQmaJ5Le5bR4nY9uofROwUuxZvQMBhBFxFwuYBQsR22G5Oc8Wues2U5oGhAhAAARAAARAAARBwQQIsYLN8rcTr1LrIMrajhWw914N6yqQ2VmwHAVcjoGee6ynjauNGfzI3AT1zWE+ZzE0ZowcB2wnoOc70lLG9ZygBAo4hgPnrGK6oFQRAAARAAARAAAQyAwGI2JlhLztujBCxTbB11S9o5kRsHsK7M/fRoZBIWvpJCyk8mxiWxVXhd+KkSHzrXgKpyNm2iNhHRSTmQSIic2cRVXtE12oW2+KNSsRmabhnW79U89uSoc/kXRR86Q4tG92SvArmNhTlqNcdR22Q7zeNf5pyZMtq2MYLodfuUs/vdlBZr3y08INmKbal9kbJyxxZfNmolqllp/gHSdRrwk66HB5DL4ro3Cyytw0sSaO71zKUVSI2r/hT1FlM1K1N/X/YTScu3qZFHzaXcvWvG0Ppx1VnaFT3mtQusJQ26xPLlvatGos2Ere5/LZExLZX/yyJ2M6cT09AxQoQyEAEXEnCZqwQsV13crnqNZPrEkPPQAAEQAAEQAAEQEAfAZawx02bbygc4Octl7WitRK0VaRsldmRQrae60E9ZdRY8AoC7kJAzzzXU8ZdeKCfmYOAnjmsp0zmoIlRgoD9COg5zvSUsV+PURMIpI0A5m/a+KE0CIAACIAACIAACGRmAhCxM/PeT/vYIWKbYOiqX9Asidjz1p+lOWtCqGFlL/q6Vx3Kni2LHBlHvR4+54Bc/vn9JiQCXVO/Kbvp4cNH9NnrtaXAyxuTxPveE3dS6NW7tPij5lSqiOWoycYybExcIj3z8XrZzuQB9SnQr4hcFoGw6ed1IbRyXxj1audHnRuUlevXHbxKYxceoUZVvGh877pynb3+MSdic/2Dpu2ho+duEQu7b7bxoyzJmIijZXN/th27QT1a+1C/DpVs7o6qu28Hf3q9ta+hPEvM3/5xgur7F6UPulWX66etCKbft5ynPu39qXsrH+ov9gnL42PfrE0tapSQebQidv2AovSt4MSRyjmt2HuZxi85Thwte/UXbcV+zUJBoVH0zvS9xDL4jyKid1GPXDIvS99jfj1MHLV7fO865FfKI10iYturf8ZzjweZXvNJAsY/IJABCSgRu7ZfUZoyqIlhhEFnI2jItB3yvfE2QyYHLGhF7EVbztLx81H0Xd9G9NHPe8m7uAf17VCZXhy7jt55tpo4B+ahvhO30vwRrcTTATwc0BtUqSXg7GsmFpA4KclICUiVfMtru4VlEAABEAABEAABEMhQBFisVnI1X/+wWJ3a9Y+2DMNwlIyt53pQT5kMtUMxmExBQM8811MmU8DEIN2GgJ45rKeM2wBBR0HARQjoOc70lEltuPy7nvpNL7W81mzX3pBoTX7kyTwEHDF/Mw89jBQEQAAEQAAEnEfA0vWhuWs99RupqV6aK2Mqb3qsszRePf1x9fHqGZMrlEkvEfutZwLk8NnVQXJfAhCxTew7V/2CZknETkh8SMNm76egs1FCos5D9Sp5UYIQcFfvvyJHOKBTJXqtpY9cnrj8JC0TEZg98+ekVrVKUhEh7G47fkOKwLX9CtMPAxrIfOaiIPNGUzKsys/bWRz2Lp6fOFI2C8bc1vzhTeUrbz8TFi3Fb16uXsGTmtcoTt2aVeC3aU6WROwDZyLovVn7ZRv+ZTyomrcnJSY9FGJyOIXfjpMS85SBDai0ENFtTSy99xXRuDmqeEC5glRD1H0lMpZ2nrgpq5o6qAHV9ClMR85F0eBpe2Xk7fnDmlCO7FmlJN37+51SrF70QXMqXCAXKRGbZevYuCTZtwaVitIlEUWbZXJOP4i+1vYtLJf5n1mrTtOCjedkPY0qFyOPvDloh2ifx8aSPkvvLJ+rfWVKOndURGx79c/U3Euv+WQAjwUQyGAETInY6SVhM1qtiJ0rRzaLtE9eugUR2yIh+2501jUTfzHlL9bm/ljDQhLLSPjCad/9i9pAAARAAARAAATSn4BWqOZrnhED37CpU+On/2K4hho56M1UBW6bKheZ9VwP6ilja7+QHwTSm4Ceea6nTHqPE+2DgJaAnjmsp4y2TSyDAAikTkDPcaanjKWeaK9pLeWzZZsjrm1taR95XZeAveev644UPQMBEAABEMiMBJTM6+5/E03t+nDu96NN7t633v/c5Hpe6crXh6mN1+ygLGxw5fFa6LZhk5rLvEL9rd+wMR0X0kPE1gZBTI/20xF3hmsaIraJXeqqX9CUZDuhbz2qL4Rc43T3/gMh4p6hXSeTxVveztGRez9dkTrWL2PInpj0iBZsCqVFW85JwZc3sOzboV4ZGQk6d85kyW1PcDgNn32AXm/jS31F5GZtUhGdJ4no13X+i37N27ccvU7Ld12iQyGRhuytapWgAR0DqEThPIZ1vLBk2wUhDYdKcTmwYhGa3L9+iu1631gSsbnOCzfuEcvo2j7y+J+qUozefb4qFRTyst509mo0/bIhlPaeDjewrVq+ELEIzxI2S87dvt4qxejpgxtKCV21NX1lMC3afF7I8SVktHIlYrPUPbBTAH279DhdFhI2J5bXuzb1ppY1k6NnqzoeihDkLGJvOnJNRjdX61m45gjg1uzbB0JMbzViLXmXyE+/Cnmek7m5EBYRS6+K8TSrXpy+7BmompOvTYeuljcFLP6ohWG9Pfpnbu6l13wyDA4LIJCBCGhF7Nq+yZ83c9cm33mnvQh01pAPiUjc74hI3H+MbkclPC3fKLPzxHX6cO5eWvF5eyqYL6ezuphp23H0NZM5AZu/jHEyJWa7+5dOOTD8AwIgAAIgAAIgAAL/EVB/XNAjYSuIjpSx9VwP6imjxoJXEHAXAnrmuZ4y7sID/cwcBPTMYT1lLNGcPHUGTZ463VIWs9veHTyQ3h08wOx2bAABdyWg5zjTU8YSH614on7X4/za3/a067XbzK1n+cjdBSRLzLBNPwF7z1/9PUFJEAABEAABELAvAfU7oarVnKystrvyq7nrQ9Vnc8Eo+HdO46SuKV35+tDUeFW/ja93jcdn/F6Vc+XxGvdZvWcOWgFbrVevzCK9g6+lhwitdXDSo33FH69pJ+C+IvaZK1TLv3TaCZio4bAD6zbRnENWRUTHy0jLqUnFUXfjZfscgdmeKS4hie7EJIho27kpezYRgtlMYjE34cFDkScrXb91nyLuxJnJmXI1R3VmsTkt6eHDR3RNtJlDtF1MCOvaxLL68QvJUae1680tly6al7wKPq6D674pxlJISIBKfjZX1tx6rYg9+93GMhvvr2yiv6ntV87MYn68YFu4QE7KysBcLDmif+k5n1wML7oDAmkioI1+ra1IewGoXe/oZT6ndvtqPV0TTxmwJjWpXpK+eSv56Q7W5Ece/QQc+aO29gsp95C/TKovX9oeqy9rnF8ld/ziqfqu5/Xu3bsUHBxMJUqUoLJly+qpAmVAgOLi4igkJIQKFChA3t7eIJKBCFy8eJGio6PJz8+P8uRJeXOsNcM8f/483bt3j/z9/SlXLvt+b7SmfeQBgcxMQHs9lNY/rCgZm6+nzP0hQw9rPdeDesro6Zu5Mjt37aHfl/5Bg/r3pYBKKQMPmCuD9Y4nEBV1i36aN5/8xefVc106Ob5BB7egZ57rKWPLMG6Gh9Ply2FUpkwZKl7My5aiyGuBAM4pj+HomcN6yjxu8cklVxCxHz58KI+1qNu3yc/HR3zHyv9kR91gTXx8PF25eo2ioqLIu3x5Klq0iBv02n5dnDrjR+J9+c6g/varNJ1q0nOc6SljaXjqutb4WtTceq5LXb8a/85nbr2l9tW2JBGI6Nr163Tjxg0qWqQIlStXVjxFNu1/P1v21z909tw56tu7FxUqWFA1h9d0ImDv+ZtOw0CzIAACIAACIJCCgLpu0q40vk7SbnP1ZTUe4+tDPf1Oy/Whnvb0lDE1XiXW2xpkzB3Ga8zI1uBrPC94frOU7ezxpocIrfVw0qN94/2F9/oJuK2IffriTSpTrBDly2PfaJcx9xMo7OZtqlS+mH6qKKmLAEepXrbjotVlt09ob3VeWzNGCpH9uc82WV1sYOcAerVFBavzW5PRlIhtTTnkSSbgSvMJ+wQE3JEAR8UOCo1I0fUpg5qkeO/MN/EPkmjbsWvEN/pYSp75c1LjqiVc8gYUS/12122O+lGbv4yNmzZfYtF+0bLEyfgLnK1fWi3V7cxtMTEx9EjcqJY/v/V/qD158iTdvHlT3KyVjZo2TX6ahTP7jLbME+B9eebMGbp+7Ro9Etnq169PefNajuxvvjbHbrlz5w4FBQXJ/nE/kTIOgf379xOfW2rWrEmenp42D2zv3r10//59qlOnjhT1ba7AxgLHjh2jyMhI8vX1lTeY7Ny584nz24MH4qZTIYbkzJlT/m9jE3bPfvXqVXmsM1/mvGvXLkpISKB69epRvnz57N4eKsw8BNSP8an9UUXlsyRra6+v7HmdpOd6UE8Zc3s9+PQZ6vDsi+Y2p1g/eEA/ev/dwfTqG2/R3n376Y3ur9Gnoz5MkQdvzBOIjY2lSCFLs1zkCKkw9Nx5atuhCz3Tri1N/+F78x1xky165rmeMtbgmL9gIU2d/qPYf4+fYFikcBEpF77e/ZUUVdhrP9+6dYvuxcRK4Zs/r9MjdX3tDTp4KIjGfjqKur/ycpq7YG5MOKc8RqtnDusp87hF00t8jtebGtSvp7co3Rc3t37x1Xj6599V8vpbVVTB25u+Hvsp1a9XR62y+6u5+amnIRZVp86YRT/9PD/FOPjm7+6vdKUBfd+mrFmz6qnabmWSkpLo6rXrlDt3LvIq+uTTY+3RUPU6DeX4zwUfs0d16VqHnuNMTxlLgzQlnnB+c+t5mznhwtx6LmMprVy1hr6dOFneKKHy8fc1vvYY9eEI8vAooFbb/NpnwBDauHkLbV77L5UvX87m8ihgXwL2nr/27R1qAwEQAAEQAAF9BNQ1kLZ0ar8ZavO62rKl60Bb+6rYuDIPU+NVv+na+lutO4xXuw+1v0vzet5P/Pd/lqy1ifNxtG9mpRLnVeudtX/TQ4R2lIi9YVxnGTx24aYQmr7ihMKa4nXmu82omndhmr3qFM1ffzrFNryxnYDbithR0TF0NyaeypdMW1RkY2QXr0VRntw5qJin/i/cxnXivXUEDpyJoDNXoq3KnC1rFurW3L7is7ZhFv2W7byoXWVxubZvYapcrpDFPLZuhIhtK7GU+V1pPqXsGd6BAAiAQMYh4KgftdUXTz13Qasvn3rKusKe2bplixR2mzdvbnVEHo6Iffr0aSpevDgiYrvCTvyvD7xfjh49SiyMqgQRW5HAqzMJuJuIffz4cYqIiJARvEuWLEnbt2+XokezZs0M2M6JSGOXLl2iYsWKUZUqVQzr02vhmrjZgs/DSsRmeZyPfVc+5tOLFdq1noD6cZ5LWBKseTvn5R+kU4t0ra6TbP1xn9swl/RcD+opY679c+cv0LAPPkmx+fCRI/I9R7vOnfvxkwCeF1GWWTrl6LVLl/1F/fu8hYjYKchZfrPkj2X0wSdjaOj/hsho4pZz274VIjaRPY8NtQdGffYF/bZosXzLoll5Efnz/IWLtG7DRrmux2uv0OejP1bZyV77+f0RH9Jf/6ykxQvmU726gYb6nbVwNvQctev4rGyuqrhWWLEsmUFa2jc3JpxTHlPVM4f1lHncoussxYobF7t170knxI3SfKND82ZNyLNQITp67DjtP3hQdnTyhPHUuaNjgsuYm596CH017jspYXNZlsibPtWYLl6+TFu3bZfVtW7ZgiZ990263nDIn/9t2nemRg0b0G/zftIzzFTLQMQWTyWuaL+nEqtrW+Pf68yt5x2krl2NhQtz6y3t1A2btlDfgUNkFpavO7Z/Wor2W7btkK8812fPmEI+FbwtVWN2G0Rss2jSZUNG+WxJF3hoFARAAARAwGUJGMus3FF7/sbn7IFbug60tS96rg9tbSOt+U2NV/093Nb96A7jVby085a/C/C1vbGArfKqV8VKvVevxt8L1Hp7v2YkEXvzd10oR7asIggc0VsTNlPIlTtP4Jo7tAX5lylE89adpp9Wn3piO1bYRsBtRWwe5rkrkVQgr7jj3dP6iIGW8ITfukdR0bGIhm0JErY5jUBi0iNafSCMPPPlpCbVijutXTQEAiAAAiAAAtYScMSP2tovV7Z+8eR+a7/Q6Slv7dgdlU+PiO2ovqBe/QRYEGVRlBP/gY+jCfMjjV1ZykREbP3729VLupuIraL8V6pUiVjE3iJuUDGO+O9qIjY/lYD7XUQ82rp69eq0e/duGbG7UaNGlCtXLlefIuifixJQ10T2/IFZ/UhvLMCkBYGe60E9ZWzpo4qCu2X9aipXtowtRZHXAgF7CbrmmoCIbX8RO0pEMK/buJm8Hv3z9wXkX9HPgF8bTX73tk0ycjVvtNd+tqcUaui0DQsTf5hGU6bPNJRYs3I5+fs9Hr9hgw0L6T0mG7qabln1nN/1lEm3AVpoWEmeNapXo3k/zZRPD1DZ/1j+N4348BMpNW9cs0KttuurveanOgdw5/75czFVq/r4pkc+T/cd+I64meMCjRj6nryhya6DsKEyiNg2wBJZ9RxnespY6pW6tjW+DjW3nutS167G18Pm1ptr/8TJYOr8Qle5edyXn9MLzz0rvmMmR3WPjr5LIz4aJW9QatbkKXn8mqvH0nqI2ESviafOvDt4AJl7sgA/rWDy1Bm08Je5llDaZZu9569dOoVKQAAEQAAEQMAOBPjaiRP/LdQaodUOTTqsCkvXgbY2auv1oa312yO/qfFmBhFbjdH4e4Alpjy/OTEzjpCtkvH3ArXe3q8ZUcRmRndiEqjzqNX0kK1sTYKIrYFhh0W3FrHjExLpSvgd4ujIRQvlp3x59D3mMOZ+AkXcvkdJDx9Raa+ClCtndjugRRUgAAIgAAIgAAIgkLEJOOJHbfWFLC1fpkx9mdWzJ27cuEG3b9+m0qVLS6Hu+vXrlJCQQIULFxaPGU1+XFJ4eDjx+sTERCooHtNepkwZMvXobZZcWdS7d++ejJjq5eWVInoU1xMVFUUcVZUTP/I3S5YsMsJ13rx56YL4Q2d8fDz5+vrKPJGRkVLuq1y5sozcExYWRoVEtC2Oiq1N3C7XHR0dLdvjvnPbxolFYW6b8/EjhgsUKCDbzp7d8nUxy8UhISGyLyxMXrlyRTLjKLXMwl4Mbekf5+V2+dHMLEFzX5hNWhPXy/In7xuWLS2lgyLi2T0REbuCjw+VK1eOduzYIeeIrSI2RwXm//kR8blz55ZzrFSpUimipWsZcwRefs/5ed6ULVs2xTxTfY4Tj8zmecuMeL7yHOf5FhQUJMtxP80l7T7n44DnHs9dnjfMuqh4LDRLs8bJ1rGo/cjzvnbt2oZHX9taD5fXe+zaMu+049Uy4v2ljgsWcpkZz0veV3xcc+R03rd8XJo6Nvmcw2PmY/6R+HGE+fL/ps4zvJ3z8Rzg+vmcxO3xfIyJiaGaNWvK84+2r6mdmzjv3r175c0EderUkecGbXlHLHNkaT4f8fmNz2ksYufIkYOeeuopyeDMmTPyPMP7h5nyeS1PnjzyWNOyN3VO4v5yHp6zfE5mTsyT2Ztiam1e5n7s2DFDhG7FrEmTJpTaedQRDFFnxiBgyx8Q+NqHE18/WUrqhjVbfvi2VB9v03M9qKdMav3Qbk9NxD4YdJgOif/btm5J3v9d03Hk3nBxvn2j+6u0Y9du2r13P+UQ10FtRJ46tWvJc8eadRvowMFD4nMuOwXWrknt2rQ2SDyq/aSkJDp6/ARt37GLosU5vlGDelRPnD9tedT9jZvhtG//ARFB9ZCIqlpYSHCVZTRS7XlK9ZejGXOE0n37D8pz2YC+vVVXiCMC796zj06LazUeQ8P69cQNLiUM29UCX5+uWb+RzpwJEde6D6hygD81btSQypZJjoLJY/p5/gI6cChISkpNGjeipk0aU2nxGdfhmXaqGnn9se/AIcnPQ1xLNm5Yn2rWqC7P4YZM/y1ERETKfFynlzgPt3+mrfh9Nxu17dCFOGrz9B++Ny7idu/1zHM9ZSyBWbVmHQ3+31Dq0qmjjF5rnPeXBYto6/Yd1LvXG9SgXl2r9vOly2Fizu2gM2fPUv78+alKQCVq0aypuEZIDpRy5epV4naX/vmXmIOh9OrLXcnbuxzVrRNItWvWMHThpviOwvOT53lFP185P1kU52vStKakpIfUsGkrioyKpAnjvqKhIz+igf360LD33jFZNR8DBw4dpr3iuEt8kChuqqpCTzVsKK75k5/EmdqYjM8pm7dso7PiewNHDTaOrHpVXOP8u3qtPL54rqvkSB6qDWe86pnDeso4Yyy2tjH68y9pwcLf6euxn1K3ri8+UZyfKBAeHkHjvxprmFuciSXQQ4ePyPnHIarq161DgYG1qKCHh6EOvmad9+tv8nzJkbY3bNxMQYePUssWzahK5UpWHXOGyiws8Pm+YtVaMse0yd9T+6cfz1FV7PjJU9TlhZdl1O892zfK73587HDkb57T/F1k+87ddF28fvX5GMN3Q2vnOB+/GzdvoZOngsX1erh4So6P/AzjzxOVVvy7mo6Jz9qffp4vv5/3eqM7ZRH/8blMnUNs+TzmaOb8Ocqf/9nFd9mnGjeUn7uIiJ1xImL37jeINm/dJq7zXqNPR32oppLh9b74jaRZq2fk58aG1StSnLutOUa5ImMR+474fY1vbCgvfhNq16aVoS1e4Juhtu/cJa6VGlLVKgFym7q203stqj1PtGvbmjZt3koHxXUWf5fm69an27YxHB+qM3ztyxHBT5w4Jb4P55BPimkvru3yiN8obE0sYe/Zt18WWyREa2MZmyXsV0UeTnxN6mgZO6N8tkhg+AcEQAAEQAAEMigBe/0tl/HY8jtqeuE0NV71N3FbA4u5w3iZsxozL9syRm05LqtSWtwBVYc1rxlVxOaxbwgKo09/OZACgyURu1Hl4tShfjnyKelBh0Mj6d+9F+nkpVuG8m89E0ABIpr2rFUnKfRq9OP1T4v1ZUWU7fWn6eTFx/m7t65INSsUeWK9oaBYqOlThLq3qkh7T9+kyzfv0QtNfMi7RAGa+OdRunTzLr33Qg0KvRZNs/49qS1GLWqK36rrlaO1By/TxqArctuwrjWpWME8NObXA/SiqOepqiUoX+7sdO56tKyP5XR7J7cWsRWMqOgYCr8VQ/fjH5D4vcW2JET/PLlyiKja+aiwRz7byiI3CIAACIAACIAACGRiAo74UVvvl07tblCSEa+z5Yudtg5eZqmO5TqWeFnI1iYWHFn8Y8FTm1i4Y4lVK8ocOXJECq/afLzsJyKysazM6cSJE1KYlm80/1StWlVKNUrkZUGaxU1OSkzkP3SeOnVKCpYsWqpkrl3uO+djcZYTS57cPkuc2sQyLQuwLFiYSyyJ7tq1S/5xlevjP/xwYqmR+24Phrb0j6Xgs0IKMU6enp5yzMbrrXnPwub58+cN3FWUXktlL168KBmwDM1J7T9bROzDhw8/Me+4LpZ4AwMDDX/QtsSY89eoUUOKqrzMiXkeP348+Y3mX5bLeb5zny2J2Gqf81znOcgyrDbxeo4CrJWxbR2Ldp5z3U2bNpX12VpPWo5dW+addvy8rBgxA+bBMrg2sWB8+nSwkPpSHnPe3t5CVPI2ZOUbI4IOHaKUuZK/8tYSxyYfyyrx8btv3z6T+4P7wPK9sYht7hyhPTdx/UoqTk3E5jZSSyxGpBYhmo9hPpY5sjTPS35SQE4hXPO8YqFi+/btTzTDdfJ2LXtT5yQuv2fPHsO5SlXEPyMEGonmtuTlzwienyx/8zniwIED8sab5s2bP/HHbtUmXkEgNQLWXhNpr3vmfj/aYrW25LVYkWajnutBPWU0Taa6mJqI/cO0mTRpyjSaMWWSkFJay/o6v9CNTojI9m/2eI3mL1iYog0WOVksZTFMm15+6QX65ovPDKvi4uLp9bf6SOnFsFIs8Gf3ymVLxM0x5bSrTS6v27CJ+g9+94ltdQJrSzmZpWVOqr9dX3xeCK/L5TrOs3ThL3L5y2++oznz5stl7T8zp05OIQOdCj5NvfsPfuKalvs8d9Z0qlc3UHyGxFOVWnW11cjlRg0b0G/zfpLLFy9dpk7Pd5U3/mgz1hLXnAvnzxE3HeUyrD585Bi90O01w3u10F9I5DN/nOMQEZv/QMV/rDH36FM+NviPPCMGvqG6k+ZXPfNcTxlLHd0vxPhuPd4kP3Ez519/LKK84vuLuWTNfv7n31X0v6Ejn6iC6/9l7o9Uongx2rV7L/Xo9fYTeViCZhma03ohkfYb9KQU/VbPN+iTD4YbyrK03PPtAbLen2fPSHF9achkYmHXHtGHnm8TH6NjPvmQqtaql0Ia1Ra5ePESdRJSKd+0pk1FChehn2ZOkTcTpDYm43PK70v+pI9Gf0rG4+H6p0yfRRN/mJoimrC1PLT9s2Y5s857jrY6eep0axDRueBjKfL5BCTLvqYkwhQZTbyZNvNHmjBpCvF5mT8blBBsIqth1QXxvZHP58bzj8/By5csFMeuj8zLImit+o3lsczfzw+L3xg4vTt4oLjZJ9DiMWfLccQ38LTr+CxxVO+/li6SbZj6Z8269XQvJlbe0MTC+LgJk2jW7Dn04vPP0Z/L/zIUOXXkgLz2t3aOs7jaf/D/iIVN4zTqo5HU640ecrX6nDfOE3LiiDhPZJWfW9Z+Ht8S1/Edn+v6xOfga6+8TH+v+FfuG+N5Ytyure8z67GpZArjGwLNrWeu5gQTc+vN7Qsl1R89sNvsb1x8U835CxflTUO+PhVkVdYeo5zZWMTmsq2e7iRu6mtFP06bnKJri5b8QR+P/ow+H/MJ9Xi1m9ymru30Xouq80RAJX8qJn6T27ZjZ4o2jW/KCjpylPoOeEfK59qMFby9ZVRwdUOedpulZa1ozfm051FL2yzVmZZt9r6mSktfUBYEQAAEQAAEQAAEmICp615rf/81Jmjr9bBxeWe9V+OzVaBWrIz7aWs9xuWtfZ8RRezJy4/Ru88n/+YxdNYu2ht804DDnIit1hsy/rfw64YzBgl6Qr9G1CCgOK3cc5G+WRxkyLrluy7iRuestOP4dfpgzh7D+vXfdBKObnbqMW4jXbie7DsYNv630EtI3L2F4B0ZHUeFC+QWv68kb5jy1zEpYE8a8BRF3Y2jLqPXpCg6qnsderpu2RSy+YZxnSl3zmx0Juw2+QthXJsSxY3gL36+TrajXZ/W5QwhYqcVAsqDAAiAAAiAAAiAAAjYTsDeP2prv1ilJhKl1lv15c4eIja3xRGD+X+W7UJENFYlRnJ0ZI54zAIiy60cPbVChQoyCi2XU0IhC5kVK1aU0WQ5QvUFEQmXk5JkOaI2S8z7RORZrptFWCUsskyoRF4uwxF2WXRmCZb/CGtKxOaIsVdFNLqs4skxPj6+UtJmoZjbZbmQIyVzdG1OLFKzvMhSOPedpVGOSMuRcln0ZBnbXFLSI2/ntry9K8gyLEVylF8lCfN2vQyt7R+P7+jRo9yU6Ie3ZMTSOrPg/aIds8xk4R+WWpkfC9U8Rk4s1/O+Zv5KYrdQRYpNav9ZK2LzHGE5nucAS528H3h8HJGb9x/PJebJScuYI1LzOPkP+SyP875UgirnZWmaZV0eH0cR5vEwGx4n729O1orYMrP4hyMuc7vMmuc7z2UWqVna5aR3LCxR8xiZO49fbz2OnndykEb/aI8LlqCrVKkixxEcHCzlXM7O5wQWspk3H5ccnZmPoWbNmsva+Jyyb99eKWurfcUbLl++LIV5zluvXn15QwivPynkweQ6sspjm/mpOaNustCK2Naem7hua0RsnnP79z8pTHB5beI5zXKypcTntEuXLkluLKGw4Mznk4CA5EhhzIbnLEfNZjZ8PDBPnita9qbOSdxH7ivfSMPCOZ9HuS0W77mOhiIiGa/jZEtePrb4M4DPoyxjh4oIoBxxvm7dJ8VFS2PHNhDQErD2WsYWudqWvNq+WFrWcz2op4ylPhhvU4LWlvWrqVzZ5JvetHmMpUnepuQXPu9MnTRBRgRctWYtjf1qnKEoS8y1a9WQUUiVLL1j8zoqJY57Pte+O3QkrVy1WkbKHtjvbXmdxhLXlOkziaWWv4UIa+kGNxZiXuzWXbY39tNR1KzJU3RZ3Jgy5+dfZBRHrUCj+suZWTTlaMbcd5ZmFi5eSp+M+Vy2+fnoj8X1XXkZ6fP9EckRIFcsW2qIvthdyKq7hbTau+eb9Gq3l+R1xvJ/VkghmkWeVX//KfvDEax5/dfjJ1C/t98SgunryZ/RQsBjce6Fl7sLeekCDRnUn54V0Zf5XDtlxo8iauumFNGYOSJqx2e7Sunn7V5v0ksvPkcPxBMPFy1ZSgt/XyLbsndEbPXHKa7c1HcD7XFhLIfJDun8R88811PGUve0ET5Zlu4t9ltzEb2ahWlTydJ+5qjRjZq3kft2/NdfUBMRNf2K+CxmeZ5vUujbuxd9MPx9eS3IYuOoT7+QEdSn/zCR6ojovjz3OcKmiqbL7X8//muqX6+OuG69SB+N+Uxc54SJCLqf0isvvyi7x8fPe8M/kMvrV/1DSoyTKyz8M/zDUVIG/XXubBnZ1vi9Ksr9fK7rq7LdXm++Ti8LgTYxMYlWCOH8xzk/G+RtPr4tjcn4nHJbXFcHNmjyX/lNUgxVbbZ+prM8VtS5wxYeqg5rXjPzvE8vEfv4iZPU5cVkobJl82ZiHr8kz83mnogQFXVL3gTAN3fzefX5ZzvLOy45Ki4fV/xbA39u8A04SrDkfc/r+YaFSv4VZdTsQoUKWpyfthxHK1etoXfeH04sIX8hPoesTUrE5vwskXbq2F72jY/ZEyKyNUfQ5pTaMT991mz6buIPxDf6fDhiKBX2LER7xBMqhn3wsSx/aO8OKiS+G/LnzpkzZ+WNJnzDz6xpkyiL+K9o0SI2fR5z9O1effrLSNh8bhzcv498Ws6mLVtTfP7bU8TOzMem+s3P+LPW3Hre6YqXsXBhbr2cKEb/XL9xkxo3by1+KylDW8W1obXJlmOU67SXiK33WlR7nuDrTr4hpLj4vOcbIb78Zrwc9tJFv8oI8/ymW/ee4qkUB4mvOV96/llxHrlDU2fMktdj3YUcPlZI4rYmU8I116EiYfOyVtDm945K9r6mclQ/US8IgAAIgAAI2EqAf8MIPntBFuNrJCT3IWDqutfa33+NR2nL9bBxWWe+1zs+Z/bRVFv2FrFr+yUH1jDVllrHed4SAjKnIdN2qNVmX4PORpjdpt2wWQjROYQQ/eyYNfTRq4FCmi5GcQlJ1GnUKvnKeZVwPW/dafpp9SlZfHSPOtSuTll6IL43z151SkTDjqD2Itr0809VkNvH/LJfRp1uE1iGPn29LoVFxNArX66X2zii9bQhTeVydGwCdfh4lVwu4pGb/v7sGdlum5Er5DpT/ygRm7dx+YWbzlLIldt0OuwO+ZXyID0iNte1eGuo6HOYHEenhuUll73BN2jorN282W4JIrbdUKIiEAABEAABEAABEMhcBOz9o7apL6F6iaovoaZkC2vrVIIrC421aiU/npfLsmzH0p6xsMoyJf/PYiAL1py2bdsqRUqW8bTiDUuyLBJq83J+jvzKIrZxFFUl8rL0yrK0NpkSsbdu3Sr/AKlEb5WfIx6z4MtSI0vBLOFu27ZNbm7WrJlBMOb1LHWyRFr0v8iLqg7tq1Z6NBUtN60MbenfIRE5mCMIa0V47iuv420sWXJkZUuJZXjeLyxhc9ucWALmOnke6E1q/1krYrPowmIzC83aecNyJ4u4HCWYowVzUow5n1b6VBF6s4g8zVu04KxSDuXy/Ie9evXqyXX8D8slLNuy4Go8rw2Z/lvQ7nMW3vl/lWJjY2k/i95ixVNPPSWFVj1j0Yrcqm499eg9dm2Zd6p/2lctowYNGhhkaZ5fO3cmR6XSStFcVp0rOKozy/N8LuH/jfcV5z0o/ljK84NFeh+f5Ch56txhXK8S2Lmcdptqz5pzkzUiNkv/PBdTS3xO0Z5PU8tvbjvflMACNd8EwKK7Slr2xucknkMcqZrPBY0bN5avqpxiqm5ysCWvqgOvIGBvAtZey2gl0tRuZLPntZYar57rQT1lVHvWvKZFxGYJu4N4JLtKL3TrISOPstDc47VX1Gr64JMx8nHzs2dModYtWwhh+go1b/OMFLiXLV6YIgK0kkB/mjmVWrUw/0eykR+PltGtPxv1Mb3e/XFbd+/eo9fe7E38Obti+RIZ1ViJ2CzEsjyjTY1btJWRPY1F9D+W/00jPvzEIMxymZ27kiOSNG7UwBC9lW/6ati0tZSlD+/bRUoiXPLHMjnuof8bQoP69zU0ydIgS97G4h5LwJ2e6yql06B9O6WUt3jpn/ThqE+pY/unacrE7wx1sAz39oDBtHXbdrtHxNYeI9yg9vuBpW2Gzulc0DPP9ZRJrXvnzl+QslVkVKQhK4ucPM+f69yRqlV9/DnKGcztZ56Hh8VNj57imlhbJuRsKD3d6Tkh91ehFcsWG9rgOcFzY/GC+TKyutqgjp3JE8ZTZyFrqnRBXINz5FDtDQAs9LNUy0+3GSAipluTuAxHPuVrqMNi3vHn/vadu+jN3v2k6MoR7lVa/vcKGjryI8GC5+O3hmOAt/P6A4eCpDhap3byd0FzYzIWsbn8kPeG0b+r15JWejsjblp8ptPz1KRxIxlBnPPZwoPzW5sszW1L26yt31w+PXNYTxlz7av1piIqq23a1wb1H38n4vVpiYjN5des20AD33mPFw2Jj43nunSiDk+3FTfslTCsV/NPe5ON2qg+N1ik5Mju0f9FxObtm9f+a/IJC+bmpy3HkRKqjT+HVL/MvapyHBH726/HpshmyxxnmZ1veqgpvuuqzx6u7J33R8gbneb+OINaNGsi6+dzW5v2naW0rZ7OwBts+TwOPXee2nboIgXdf5cvTfHde/qsn4QUnhzF2J4itqXjz9I2Oeg0/KPnONNTxlIXzV2HmlvPdanr4bSI2Fu27aC3+g6w+frClmOU+2ovEVvvtaj2PLFp7UryFr8fqqQ+k9SxLW/wr5r82RZy4rDhu7EMELH/IOXNl1dGBlflbXk1lrG1ZZ0lYXOb9p6/2nFgGQRAAARAAATSk4ASW1UftL9xqHV4dU0Cpq571f60dT+au052pZFrv9+k9pu1K/Wb+2JvEfstEd1ZSdb2GuvctcE0d01wqtVpRWyWmv/9ogPlFRGpWawePDVZ+DYlYm+d8CxlEwGhBk3ZTkfOPf5NcWS32tRZSMynLt2iPhO3yqjXm7/tIv4u/Iiavf+37M/HrwVK2ZmFb45G3WX0ahHBOp5eaeFHg5+tlqJtUwNQIjZHrO74ySqKiUs0ZKvr76VLxDaO2D1GyONthUQecSeOnvt0jaF+eyxAxLYHRdQBAiAAAiAAAiAAApmQgL1/1Db1JVQvVnt8CVWCK0eO5ijDKikBkKOecrRilZTw6CEiAwYG8mPc42jPnmS5hQVIbeLoqRypmSOvsrCqkpIpzYnYWqlTlTEWsbluFidZeGzSJPmPlCqvqVclCXM0WR4nR9tmUduapKRHc5JzWhlyH6ztnxJLq1atKseu7f+R/x7dzCI299VU4mjFHI2ME0fSLVGipJSMmUtakxqDtSK2tj2eRyy58h/EuH98E4CaY5xPMTYl6Sshn+cBz4egoCAZ+drf319G9ta2wwI63yBgi4itlfdVXSqKMO8HnkvaZO1YWC5mydhcsrYevccut6v2mb2PC7VPWvwnx6sx7t69W+5nFuSlOCSiQLNMb2pf8Tzg+aoi1rOYx5HOzR3zfLMFy+VKxLb13GSNiK3G4axXdR42J2KbOieFiaiy/2fvTOCumtYw/hZSNGlyNRijjJerKBqkAZky5BouRYTQJOLmUhkThZKSoqTMQlQ0KRIZLkKEUMlFk5Jc4a5nffc99tntfb6z19nn65zvPOv3+76995rXf6+19zp7P+vdsAQOobta19b6IgwLVTS/KHE1D25JIG4COpfxWw30lxPlobbOtfxiFn+eUY5d5oMuaaLUKRMh9oJ5s+zn3LU8iDEhxJn8xCQ56MAD1FvuM9ZKBw+5S+66c5CcdHx7Y/l3lsBKNqxYwyqw1z075UUZNXqM9LziMuluLEaHObWUq6LlsHjwVyH263Nnyc61/rzfrlq1WhofWST2VmvWmg8sQkKIdOjfDpEnJo5Xb7uFEHqlub+sW/ejtXh85rmdrf+8WdOljvkaCFyYQPem2wbL2IfGy9VX9koI5GwC8w9iwg/NVxsmjh8rTYzg8fqBN8uEiY/KmJH3SqujWmg0u33+hanGqvjVkYVSSZmEHHjHCaLgBRfcoHvH2S3+RX3plUgYsuPSz13ShBSf5I0vlrxqRPfPTXlB5r36uhXZa4RLu14oV/XuoYeh5zkRwexgwdIP5r65YcNP8qNZHPaPzhda688L589JRAsThepCAfRBzHm8rv3JRZawP/r3W0mLGbxxitvXhQEXdD7PWg1GfLT/EGOhGoLU9996PSG2HHDTrTJuwsTA/hhUTlibgoTYs+fMlS6XXCawNt/vmj42u+HGSvyQu4dZcTeEuXDZ5FHo/d4CjvgvUyE2isOihekzZlohPhaXeJ0u3oHfwJtvk4cefkS84mKNi0Uy515wUWKBiwos9zCLYGdOC7YaFdY/Nc90tiqoHmgs4f7DWMRN12k6v4AU6V37OL6gsGbNWtlgxi2+TvH+B4tk6ODb5GSzgAQuTIgd5X784rSX5PKeV8rFF3WRvlf2tPnqv5Urv5UjW7W1h3EKsZFhoY5NnYf657Zh/mCl82H/3DXMH2n8Tq/HWHQz/K4/F4H54/mPo4xRpI1LiO06F9XrRPVq1cV7P0bdnnjqGcGCP3y5pJeZj8LpXBILUmDJ/rBGhxpjETvZsEz/BYmxS1KEjfpna06VKRumJwESIAESIIFMCOi8yZuHf57kDeN+bhHQ8+edD5dmIXZQe3PrjITXprQKsVf9uEm81qpvffRdeeGNr7awiL37XyrJhL6tLaDrHnozCdRuO1eSi47bVzb+slnaXTPFhj1+XVupXX3HhGgbVq93qrS9jH/5U+ncroGMNla2xxlr28Muayaw/H3nk+/JM68tTcrXe6BC7M9X/iidbp/lDRJXITZE5xCfqzusQS0ZcskRSe3QsEy3FGJnSpDpSYAESIAESIAESKBACcT9UNv7MirT1bH64zWThxAqcIXYGqJrdSoArFOnjsB6qjoIZGEtW0WyKpDW8LCtV5BZnBBbrQx789JyYCkOQksVafotJHvTePdhMRpCZVjEUQdBZ20julFru+rv36oQO0wAmilDlJdO/ay45NXiP9WkQlR/O3Cs7LEfJCKGv6tTUW8UITaE+jiXapnbW7b2MfiFMUbYvHnz7HlVITasMUPQ7bcUjLirV6+W942Vw3SF2EFCV+TzkRFbwZo6RP0QQsPF0ZY48kl37KKsdPod4gW5VOMiXSF2qnMFEREE7zruihvzKqTW/q/XjKC6e/302qTpg/qNN35J7uu5VOG0lp2KPb4GgAUzqZz2/yhxU+XHMBLIhIDOi7wP54Py03gIK27+FMf8yF8Hl/mgSxp/uamOMxFif/D2giRxqAqxn3/6CWPxt2Gi2FEPPCiD7hiSEGLDau/dw0ckwoN2UgmAYD16/4MbbyFmDcoHfiqe8dd3/oI3rCg2LJ36q6hshfkKSP8bb5UFby60IlUN1206QmzlrWmCtmpRXOMGWXT98KPFpl0dsyLERp28Y8Vfx7hF2MjfpZ+7pPG3JZ1jCBohQMYiA7iRw++Wdm2OtvthgnsEQhx62x1DrbjeRvb8g6ga/VFdkChU+7nGCdtiEQEsY7u4s867QCAAg2C1+ZFHJLIY/8hEuw+L2KecfKLdT9UfEwk9O0FtQnCQEFvF3+W3Ly9vvDrLfnmo3fEd5DPzhZtF77xh59wlwYP93nMC09iNQ4jtLQa/sWcZUf79Yx6Ut42Fdbg3Xp0tNc1Xp1L1P1ybmx99jF0AhIVAKrDEgiAcB7mw/hkUN8xPF8ScY0TYNxoxdrpOhdgPjh4pLZv/udA8ah9H/EGDh1oRu9eKv9YjHSF2lPvx0GH3yrB7R0rQ1yXw1ai99i360pneM7UecWwLcWyGCTHC/ME5THAd5h90blRUX69eXXnl5alBUQL9ooxRZBCXENs/t0t3LprqOjFj1hzp2u2KJCE2vkTRtVsPe19SALjGnHPmGXL6qR2SvhKh4VG2XjF2SYuwUc+SmlNFYcK4JEACJEACJJApAZ03efPJ5B2oNx/uZ5+Anj/vs159Vhv1uVSU+XD2W5b9Ekq6vXELsdMhBIEyhMpwcZbvtYgNITacWrWGxelTB0yXO7o2lX3qVpWHjFj6ASOa/nvLveSKDgfauKn+aT17nnqQnN58TyuuHvH8h/LybSfIV/9ZL91HvCYQZS9ZsU7Ov2O2TL/1BNmx/LbSpu/zAmvZYU6F2Iu+XC2X3D03KZqrEPvkG6aJth8Z1q2xozzary2F2El0eUACJEACJEACJEACJLBVCcT9UNv7Iirqj04vCG8+xQmSvOn8+2ECVxUAFifEVmEr8q1fv74/e2uZDRaxkY86FQOHWcROR4i9Zs0aK6yG1demTZtq1sVuYQ0WAlrUG2JduGrVqslBBxW9fAzKIJXoEfEzZegtM1X98JIUAle4MNYQrUK0GWbhGtwgGIZFcTjEh5i4bt26oVa0bcQ0/kUVYn/88ccCsWyZMmWssB8ie/QViIPh7yrEVkHtgeZTz9WrV0+quYp5VYiaFOg50HNexvi19Fl1RjSI+sES5wHs4mpLHPmkO3Y9zbVWkuMcF+kKsVOdK722VKhQQWAlX49hyb5Jkybe6tt9FXWrEFvjIzBsvHivTVqXfBdiL1myRFasWGGvAX6L67rgAf2/hhHGRIm7BXB6kEBMBLzzmVTzIsRb/NmXtlS8fAlz+rAf4ZnMj/z5u8wHXdL4y011rMKZOUZss6sR3fhdkGgyTNicrvhl/IRJ0v+mW6TxoYdKl/PPSyoSgkxcZ+rUqR36mXfMZQ5q1NSKob2fiE/KyHMQVl8VMyMqxLV+h8U8VapUlqOPamnLatKitd02bXK4tGrR3NaxcqVK1hIr0qYjxL7k8p7GIvhMa/HbL55FeRDoNti7vuy2267S9bIeMmPmrC0sjKOs+a8bEfn5F2ZNiI0yvOMKx3CpxldRDLf/Lv3cJY1b7YpSaf/2Wo8OE2JDRIqxBXf8ccfIEU2bmIUD1aRSpYpyTqcu5msyf5H5c14uytj8DxKFYhzU36/oS0Fh/RP9BdbTK1eulMgr3Z1vVq6UZq3apYzutQh/ec8+8uK06fLUY4+Ejk1vZkFtQnjQNQX+twy6Qx54cJw8Melhwbg65oQO0vG0U2TQzQMRbK8L2eRhCzH/2O+VRPHbuIXYWiIE2c1bH2sX2urCB7127tA62wAAKBBJREFU+r+4gDR6LW/d6iiBFe1UAkstI6x/ang620/N12OOPeEUOdgs8H76sQmhSSCY/s3c2zBe8Zs1TIgddcyf37WbwIp4fbOg97hj2prtnuYaU0keGj9B5r76WloWsaPcj7EgBZbx+/W9aot7t1dEng0hNuAW2tjUuahXeAIOYf4ICxNchPkjTZA78NAmdr6jC2GC4uBZB/7wuxrPg6KMUeQXJsTWcewtc8Kkx+T6ATeJ1/p82NxO79XFLQpMdZ0IEmJrfT5Y9KH9asasOa8kFoxcfOEF0rdPL43ivIUYGw5Wt0valfScqqTbx/JIgARIgAQKk0DQ/DHOZ3yFSbXkWh0076UQOz3+rpzSy33LWCow3jIkez4lKcRGKyb3P1ZqVClvBdO//PpbkhC79SF1ZMB5RXP4h2d8ukWjK+9QTlat3yRjpy22YWpB++vvNsiT876Q3qcdZC1gwxL2Cze1t+JrCKGx//26TXJK/2lb5On1SE+I/YucdH3yQtv+5zWSNofUlRnvLpf+49+yWc4YdKKUL7eNUIjtJcx9EiABEiABEiABEiCBnCSQjYfaUV+mBIEJ+jEbFK84v0xFxHjZCovEZcuWkRYtwoVR3nqoELtFixbWapqGqZA3HSE2XnbOnTtXIJRt7ssHYRDcqihZ8/dv1WIu8mjRsmWoJRwV5SI/WF32u0wZ+vPT46D6vf766/LLL79I48aNkyxZapp0txA7Q7C7du1amwQMahoB9+67724t16Wbjzeenr90LWJrfL9l7qVLl8pXxmqRqxA7yFq11nPx4sW2b6QrxEa6INYq+lXRblxtiSMfFyG28sE2qN95w3U/1bhIV4itwnOvZXHNX/uBWoPWaw1EELh2YKtOw3CsQmz1S/falI4Qe+PGjfLmm8mfKNM6eLdly5a1dfT6uezruVQGmkcq9rCGDUvX3vGj6fzbKHH9aXlMAnES0HmRX7DiUoY+sI7bUo7LfNAlTZQ2bw0h9ltvvyNnnNNJzux4utxy4w1RqpuIqwI0v9AGEZD/+vUbpIWxNIqvUoSJdXAdbHjQoVuIYhOFeHbmzH1VLuh6qTQ7oqmMH3t/IgSi8MOObCWwRhokxO5pPmnf3XzaXt2IUaPljqH3JAnkNMy/HTZilAy9Z7jcPPAGOeuM05OC1cr4se3ayoh7hiSFxXngfXGZLRE26uvSz13SpGIDceSrr70uN/TrK40O/dsWUSc9/qT0u36ABAmx/edZhZZ9evWQbhdfmMjrq6+XSat27bfocyoKnTh+rBVWa4JzOl8orxvL7d6+pWGZbrUPnXfO2XJlzyuSsvv9j9/lkMOOtH5zZ06XumZhBKwU3zZ4iPS/7p9y3j/OSoq/5LPPZfnyFXLgAfubRVpFCxjD2hQmxFYx7YXnd7ILIO68a5iUJA9vgwqp34dZRO5xeTfpcfmlXixytrGgDoGg+gcJsREH4cUJCWGNHYt6x4+5P3AhQffeV8uUF6fKqHvvkbatW8l994+RwUPuklsG9pczzzgtqV5PPvOsXH3tddKr++VyRbeLIwmx/X0sKeNiDvA7Ye/9D7ax7h8xTNocfdQWKb4zc+omzY+2/u+++ZpUqVw5VIiNSOmO+XXmdzjGKMTdC+bOTPpNf9W1/5KnnpmcdJ/5YumX0ua4E+0CqMceeQhFWRflfvzGwrfkrHPPlxPaHyf3DLlds7Dbd997X077+zl2P1tCbGReSGMz7FldmD/46FzYP38N80eaINf5wkusmL9L507S75o+W0TBs7KTTz/LfvHh4bGj5cgjmkQao8jQL8RWS9zVq1WXhfPnIErCDbpjqIx6YGxOCLETlTI7+mUVjMP3Fs5Pei7pjZcP+3HPqfKhzawjCZAACZBA4RDA/AkOzwsb7LVb3jY81TwwaqOizg+j5h9H/KD26vPaqM+n8qG9cTDTPJRTSS08KAQhdr2aFeWRa1tLWc/7RLWIDavVsF792+9/SMsrn9XTkHILwfN225aVD40V64P2rG7F1hBd39j5MGn119oy/a1lckyjevLim1/LLZPeSZlXKiH2frvuJPf3aim/m+fXLXon12208d/XhFOInRIvA0mABEiABEiABEiABHKVQDYeamf6Esqb3v+iJirHOETE8+fPtxZ1dt55Z9l3330TVYBV1s8//8y8XKwoEKyqmzv3FWMZ7Q/xWyxWEWo6QmzkpaLkqlWrysEHF71IhT9EiBAYqhARYmO0E1aiGzVqlLD8jBfIEF96hZoQWkIACuFjzZo1kZ1tG9qYLSF2lPqpyBjWiyB4huATbsOGDfLuu0WfgQY/9beBKf5B1I32QnwLQRIcXkZBHA2xchSn5y+qELthw4ZW1IKyNhmrY2+99Za1pK7nD/5h/RRhWAiAl+kQyeMcrTRWAj/55BMr0m/kEayvW7cuwSiKEBtW12GBWYW/n5tPrS9btszmrwJ+bXumbYkjHxXvFmfNPkq/izou0hViwwo3+jQk1d5zhfIWLlxo+6SXqQrgYekc1w9177zzjrWkjmMVYmM/yrUpHSE2+ufbb7+NrFM69ENY8c7Uffnll4I/vxXwVEJsWPoHJzhcj3FdVodr46pVPxhLsHVlL2OBL0pczYNbEsgGAe+8JhMxtj6cRx3jfmDtMh90SROF79YQYmO+AYvWcI+MGyNNDz/M7mMOAVHg4089Iz2MePnvHZPFdjbS//+NHD1Wbr9zqOy/337y5KTxgvss3GvzF1gL1fWMde9Z06akFGIj/t/P6SwLzTW5T6/uRjB7Ebysmz1nrvTrf6M0P7KptciL4y6XXLaF5dMnTF379rvepvGKZSc/N8VaOW7VsoWMGXXv/3MVWWCsHUKoCIvIzzw+UXauVTRP3LTpF7miVx/58OPFMmbkcNm3YQOZ99p86dTlYjunemnKZNlll7/YfD7/Yqm0bX+S3c+2EBuFYGzBZfOFpUs/d0ljGxLy7/kXpkqPK68WWIEeOewu80WUaomYEFNCmLb4k0/lwdEjpaUR+cOFnWcVjV3R7RIjDr3MxsU8859GyI0+47eIfc11Nwisa/stzapouaWxwD56xD12jorMlq/4xiwM6GbzffHZJ60/5uMTJj0uNUy9Tz7xeBsW9g9jrc1xJ8lSMz+Y+tzT0mCfvbeIigUDWDhwVe+ecmnXLvL2u/+Wjmeda/vjlKcft1bbkeg/331vBZ6w6O61rF9cm+4zjI9p2zqp3HbHdzBfi1lrhbk//rheFsybZcZw0e8URIzCIyljh4NC6ffpCrG98VSk7Rdi49qGaxzcJLOoIJUY+zpzfZ346OOChQAQeuILM+re/2CRdOhYJPb/95vzbX94c+Hbcua5nbe4HqL/ndCho10MM+HBB4z1+cPTEmKH9c8o4wj1nTDxUbl+4M22Xk8Zq9j7eL7utXr1Grnymn7WavX5nc6Vf117tW2iLtTwXku07en28Q1mvP3t8KLF3W8vmCc7mWcZcIs++tiKpTEehw6+LXEtwDWjRetjbJxPF72buJZEuR+r+BuZjL3/PjmqRVH5sIYNETauj3DZFGIj/0IZm0HCE7Q/zB9hOof1P98L80eaIOcdg3fdOUhONOJ7fY6BL5cMvedeI7x+wFpjnz7lGRsWZYyiTL8QG/cl/dqJd5Gd9x6ztSxif7rkM8FXIXbbtZ4Mv+uOxJzz+x9+kMObtbL39Ndmv5RgFMQ01/3inlPlentZPxIgARIgARLIRwKp5oFR2xN1fhg1/zjiB7VXBcalUYit7fWz88/t/eH+Y28+cT/X9pelx4UgxEZbz2uzj3Q9fj9ttqgQGx4QYkOQvfCT76TXyPmJOBcc01A6tWsgK1f9JGfeMiPhP/zyZnLwXjWsQPqnnzfLcf1esGGHN6wld158hPWH6PvSe+bJB0tXJdIF7aQSYiOPV+482fxWEblh/EKZ+e4Km8U+darImCtbWX8KsYOo0o8ESIAESIAESIAESCDnCWTrobb+YHYRHGWS1g88TOCarpgT+eElIMSJeAEDASI+qwvBIoTOcPsZoQ2EzepU8Ajx5Y4VK0qDBg1sGhWhpivE9oo1YTWxoskLLy7xggnukEMOMVbZqth6IW+IKBBvp512suGrVq2yYV5rsxAzQ7CLl1UtjZVsuFSiR4RnyhDc0q0f2gB+qBPE1mANq0br169HVeyLJAhXozrkAXEx/sAP52SXXXaJlI2ev3SF2CoqRz+oaNqB9sBStzkpAkm4qxAblU6cR7O/gxGWgzH6C0T3WASQrhAbdUNdkK5ixaJ+DfZwXpFrXG2JI590x26Ufpfgmea4SFeIDY6LFi2SH8yLULDGuYLbaMYxuPsF1xib/zZjFGG41uA8YsxjXOi59Qqxo1yb9LqkVs5Rj63tVq9eLe+//76tBkQuNWrUsGOzuGvSN998I59+WiSogMARnHCNwNjGtQ0icYi74aLEtQn4jwSyRCBTMbbOjVC9qA+602mSy3zQJU06ddE4W0OIjbJV2Iz9Fs2OlPr197KWrCH+gTXEac8/nSSERTyvw7Xoom7drcAN8WExdaVZEPbK3Hk22pDbb5UOJ51g98MsYiMQ4rRTOp5tRXwHHXiAFeF+bawWz5w9x6Z9bMI4adzob3aequLxg//6VznM+EF0NvfV16wAD/cRrxAbYriTTj3D5gFhLwTTXTqfa48HD7nbCpiwaA1C7apVq8iMWXPs1zYguB1rhNsqeFKxIuJC9Pbbb7/LtJdetnM1fLmlJITYttJZ/ufSz13SpGoGxIRnnN3JWvgEb/SpenXrmj6yQl6aMcvOFXDuHxn3gFT4//0v7DyrIA3ltWl9tOxqFgbMf32BLDNWo9FX/ELsB8dPkBtvGWSrh/hnmUUIrY5qYb9g0/miS+UNI27F4oLmRx5h/P5rrd0ict8+veXiC8+36V6c9pIRil1p92dPfyEhlLYevn/vvf+BnHLG2VZE99ILk32hRYfo3+1PPs2WO+elF22fHH7f/TLk7mE2wvHHHWN+E21rxwradMbpp8ptNw1I5BXWJhWaBgmxvWm6X3ap9LyiSGyumUIkmy4PTZPLW5c+7JKmOAboX0HOL6T2Cq0hxr57+AibDKJrCLVVhK1C7aA81e/Tzz4z4t1/2PGAvt2saVN7zYfg8aUZM200r/V5eED8OWzESHvNVREwvlaA/nfxRV2k75U9bTqI+A8+7AjBNX3yE5Osn/+ft695x1yUcaR5DrjpVhk3YaI9xOKgZsY68H/MQtGXZ862datvFi7iSwp/2bnoWUYqIXaUPg6r4nptaHN0K8Hi1BemTk/ck7xCbPzOOdhY0AYr5X3TgH/ZcR3lfqwLVtBY3Lux6HzOK0X3XXwZAi7bQmxbSJb/uYwzlzSpmqECCv+zvjB/5KXzWP8cNsw/VflTp78sl/XobaPgntXO3Js2/7bZLhJbtmy57WcPmIVjhzdulMgm3TGKBH4hNvx0gQT2MS5r1awhzz5fJEhA391aQmw852rb/mS7eKlhg33M/KDIyv3ER5+w88fePa6Qyy/timrnrYu7/+YtCFacBEiABEiABHKYQKp5YNRqu8wPo5aRafyg9lKInZqq97m4/zdB6pSZhRaKEBuUxl11tOxVu7IF5hViH7hHdbn3imbWYvam//4my77fINUrl5dqlYoMhwx56n15+tUvEqA7HLGH9On4V3s874OVcu3YNxJhEE5vY94l/2qeAbfq81zCP2wnlRAbaSb9s43Aojfc6vWbZP3GX2XXWpXssXnNR4vYlgT/kQAJkAAJkAAJkAAJ5B2BbD3U9v6wwgsa/Lgqzlod0uBH7OLPvrQc4/hBFiYihpXkr776ylhOrSN77/2npTUIJiGchMAZQmd1a9askSVLlsjPRuwKkSQchINI6xVhwx9iW5QLa6xwsL6MF4Eq5E1XiI20EGZCcKjCTfih3AMOOMDWEcdwEIajTLwEUgfhZu3adYyAqL562Taj7RA4qxVvtRoL4WezZkXWoxIJzE4cDNOtH8qFCBOWbdev/9GKiuGHttStW0/23HNPHGbkYE28QoUKVtgeJSM9f+kKsSEERl+CIF4dyq1rhDPoS94+FsYY6dQidvPmza3QHn544YY069atTTCCMGf33Xe37NIVYuOcow+jj+ElOBz6V7169WTXXXe1x/gXV1viyCfK2E233+FaEGVchAmxFyxYYMeiv498/PHHth/oIgosmIAIG2J3FbQpbFhvh9gcYgc4hKPfwx/Ca1jHh5V8delem1SIDav5WNSRKw59D1be0Tcgqm5qxC7FXZNQdyyqWL58eYIT/DAGwNTfvihxkQ8dCWSLgD6o1/zTmedkY26k5Xu3LvNBlzTeMovbV4vQc2dOl7p1am8RfdiIUUb8NlxGDr9b2rUpEp7AUikE04veeSPpyxd9rrlOnp78rLww+Ulr0Vkzu3/Mg3Lb4CFy9523y4nHH6feApHPhEmPyesL/nzYDHEnxKVBdUkk/P8O5mODh9wjL82cZUXM8N7D3KP79umVqCv8wuqLMLiPF38i944cLa/MKxL0wQ+C22uv6m1F2DiGgzC1e++r5TPzVQs4XA9vGXiDPP3s81YA/trslxNWqxE+dtzDct+oMVak07TJ4fLIQw/A284vYEkSgjm1IAr/S4zVYVhQVpEv/HCthmgP4kAIr+FOaH+cdDr3bGuduP2xx1jrjDYgj/+59HOXNMUhgmVy9NXxjxSJKr3x/3H2mcZidZ+EJUwNCzvPECwOvHmQPf+IC/HW4FtvlhNP7WiFkK+8PFWzEIjAb739TmtdF55eC+2wQgvx/ixjlV37AERxvYxIueNppyTygKV09PVa5vcQxmD58kUvexIRPDu3DLpDHnhwnFxj+njXLud7QpJ3Wx97ohWePffUY3LA/kVWf0aPfUiemzLVCtYRGwshzj3nTLnskq5J1qvD2hR0TdFSYXm8SfOi68yMqc/LnnvsrkGJbbo8EglyeMelD7ukiROBV4yt+TY5rHEkEbam+/bb/1jhJRa0eB36VM/u3eScM4sWs2gY5rH4GgKuhx+ar+HAYVzhOtjt4gvNb9my1k+tPKcSYof1zyjjyBZm/uH33R13DZNRo8eol93iHoEFQf801w3vdf32IXfJyPvHyLgxo+ziiqRE5iDdPr7WPMO4ytx3deEQ8sGXHTBHxxcj/PdciNZvuPFmM79fbotc8uF7iTEb5X78iLFk/ujjTyXOAThjEQYWdsF98PYCu83nfy7jzCVNKkY6n91aQmzU7enJz9m+rfcerW+zI5rKrTf1lzq1k+eN6Y5R5NP1sh4yw8zfvF9SwG/zvv1uMPeYIvE14p3a4WTz9ZOGdrHSjf3/lbguhM3t0p2LprpOYExBKO5dVILnAQPMPd1bN9yLIcA+++8dUdW8dnH337yGwcqTAAmQAAmUKgL6vE/fmeIZYb66sPmhS3soxHahlt00en61FH3fj2PtvxoWtlWhOsJLyho2yiqNQuyTrp9qRMtF7w7RRnVVdiwnkwccK9uZr7eNnbZYxk5frEHS6uA60r3DgVLDCLAhcIb7+ZfNct+Uj5JE2PAvX24bmTHoROzK1aMXyPyPip754nhsn1YCi9Uff71GLhr6CrxSuk5tG8hF7fc1lrNXGwvac7eIW7H8dvLQ1eZLPjvtkAj7xljo/uybH6XFgbvIy+8slwEPv2XDUCfUzd/+2tV3lMevaysbTXvaXTMlkU8cO68O7RBHNok8ypgfpqovSXhyhwRIgARIgARIgARIoPQRyOZDbf8PNDxMwI80/48zPHSA+Brx/S7qp5z86eM+xjQZlochGISINZVDXLz8LC5eqjw0TMuFkFdf5GqYd4t4sNSNMsuVK+cNSuxDxJsqj0TELOykUz9vsWAN0Sp456vTc4fzAZFz3A6MkHeUfhZkcRj9Bv0iFeu42hJXPumyTKfflcS4UGvjYWPT2x4I3CDcxphPxynTdK5N6eRXknFQd1wr0f+iXpvACC/Gi7s2oj1R4pZk+1lWYRHwz43Qen3ZgjmSOp0T6eI0+GdzTuQyH3RJo+3Lly0EcRC4QEAa5T7rbR8+XV9xxx2sONrrH2Uf1qa//c+39qsnO6S4L0Ak9/PPm6Sm+cLANubBeyqH+94mc/0sZ+YmQW1DXhD/1qxRvdhrM0SLlatUllR1S1WXXA5z6ecuadJlgPO2zIgVvzHi99rm6y716tZJeX5SnefvzQLUsmXKprTwrvXCfRr3W3xxIuhejX5ertx2spNnsZimxRbpsbgsKK03Xhz76Lubf91cbLuKa1MmdSmORyZ5l0Ralz7skibutgSJsVGGV7QYpUzM3b9Y+qVZaL1e9thjN3ttLS49+h9clcpFlqiKix8WHtQ/XcfRRvM776uvvpY1a9dKHVw3jKXvTMdiOn0c14wfzKJk3JPS/f2D+13QYo0o9+PVq9dIGbOQO+x6FMY8H/xdxplLmlQsdC67NYXYqB/Gw5emX2OhDM71XnvukdbzlkzGKMr81iyQrlatWtIihlS8SioM9/vvzL24wg4VMr7+lFSd0ykn7v6bTpmMQwIkQAIkQALZJoD3oYPuHZdUTDaf+SUVlIWDsPmhS1EUYrtQy24aPb/+UtLps7rgQJ9vp5PGX04mx6VJiJ0JB027rXlW3LBeVVn+w0+ydsOWYm6NV9LbcttuI/vUrWKtda/7qeiLzSVdh6DyKMQOokI/EiABEiABEiABEiCBYglk+6F20EMFVErFRvoDTCuqQm0VIMG/pH+caV24JYHSSiBIiF1a28p2kQAJkEAuEgh7iB1UV8yNINb2L2QLiuvq5zIfdEnjWj+mI4GtRcCln7uk2VrtY7kkEETApQ+7pAkqO1M/vxjbVYSdaT2YngSyQcBlnLmkSVV37xxWn+tpfH2+F+aPeN4wjY95ri5M1Ly4JQEQiLv/kioJkAAJkAAJ5AIB73xK65PP8yFve7xzPW3b1d2KvlCjx7qF6Nrv8mF+GNRerXdQ+/1t9B5rulw+/9petA3PpnGsDvVWf/XD1i/Aht/WeM9PITbI07kSoBDblRzTkQAJkAAJkAAJkECBEyiph9r6Yy0MN36s4Uebioz8Au6t8SMtrK70J4F8J0Ahdr6fQdafBEigtBDQh9eY9+jDd31oj3kRnM6Nstlml/mgS5pstoF5k0A2CLj0c5c02ag78yQBVwIufdgljWv9ikunYmyKsIsjxfB8I+AyzlzSpOLif1aXKm66YbksPEm3DYyXHQJx99/s1JK5kgAJkAAJkEA0AkHvSvN5PlTc/HDskOsDAV3Qe2CgPzxzmUdx7Q1tVIqAXG6v9lc8r4aoHu2Hnz7H1mbp8+wgf7SvJJ5va110SyG2kuDWhQCF2C7UmIYESIAESIAESIAESKDErYvgRxqc/hjTH2dBP8K8P2j1Rx5PGQmQQOYE8NnapUuXyvbbby9169bNPEPmQAIkQAIkkNcEXEQOLmnyGhIrX5AEXPq5S5qChMtG5ywBlz7skiabAN54c6EcfljjbBbBvEmgxAm4jDOXNMU1TBcSFhcvnXA86wt6HphOWsYp/QSy0X9LPzW2kARIgARIIB8I+EXIYWLlfGgL6phqfggRbpALS5MP88Owuge1szi/fGhvUBvAAO/w9V2/P462K+z8++Nn43hrCbEPqV/DNmfstMXZaBbzLCECFGKXEGgWQwIkQAIkQAIkQAKljUCuP9TW1bVhn68qbeeD7SEBEiABEiABEiCBkibgMh90SVPS7WJ5JJApAZd+7pIm03oyPQnEScClD7ukibPOzIsECoGAyzhzSVMILNnG/CDA/psf54m1JAESIAEScCMAIauKVd1yYCoSyA0CXkF2LvXprSHEzo0zwlrEQYBC7DgoMg8SIAESIAESIAESKEACfKhdgCedTSYBEiABEiABEiABDwGX+aBLGk+R3CWBvCDg0s9d0uQFDFayYAi49GGXNAUDlA0lgZgIuIwzlzQxVZfZkEDGBNh/M0bIDEiABEiABEiABEigYAlQiF2wpz6WhlOIHQtGZkICJEACJEACJEAChUeAD7UL75yzxSRAAiRAAiRAAiTgJeAyH3RJ4y2T+ySQDwRc+rlLmnxgwToWDgGXPuySpnCIsqUkEA8Bl3Hmkiae2jIXEsicAPtv5gyZAwmQAAmQAAmQAAkUKgEKsQv1zMfTbgqx4+HIXEiABEiABEiABEig4AjwoXbBnXI2mARIgARIgARIgASSCLjMB13SJBXKAxLIAwIu/dwlTR6gYBULiIBLH3ZJU0BI2VQSiIWAyzhzSRNLZZkJCcRAgP03BojMggRIgARIgARIgAQKlACF2AV64mNqNoXYMYFkNiRAAiRAAiRAAiRQaAT4ULvQzjjbSwIkQAIkQAIkQALJBFzmgy5pkkvlEQnkPgGXfu6SJvdJsIaFRMClD7ukKSSmbCsJxEHAZZy5pImjrsyDBOIgwP4bB0XmQQIkQAIkQAIkQAKFSYBC7MI873G1mkLsuEgyHxIgARIgARIgARIoMAJ8qF1gJ5zNJQESIAESIAESIAEfAZf5oEsaX7E8JIGcJ+DSz13S5DwIVrCgCLj0YZc0BQWVjSWBGAi4jDOXNDFUlVmQQCwE2H9jwchMSIAESIAESIAESKAgCVCIXZCnPbZGU4gdG0pmRAIkQAIkQAIkQAKFRYAPtQvrfLO1JEACJEACJEACJOAn4DIfdEnjL5fHJJDrBFz6uUuaXOfA+hUWAZc+7JKmsKiytSSQOQGXceaSJvOaMgcSiIcA+288HJkLCZAACZAACZAACRQiAQqxC/Gsx9dmCrHjY8mcSIAESIAESIAESKCgCLy3ZIX8de86BdVmNpYESIAESIAESIAESOBPAi7zQZc0f5bIPRLIDwIu/dwlTX7QYC0LhYBLH3ZJUyg82U4SiIuAyzhzSRNXfZkPCWRKgP03U4JMTwIkQAIkQAIkQAKFS+Coq56TzZt/L1wAbLkzgW23LStzBp/knD4oYZk/jAsKoB8JkAAJkAAJkAAJkEDpIvDp199JnZpVZccK5UpXw9gaEiABEiABEiABEiCBYgn89PN/ZcX3a2WfXWsVG9cbgXNILw3ul0YCHBul8ayyTcURYL8vjhDDSWDrEODY3DrcWerWI+Da57dejVkyCZAACZAACZAACZBALhHoMmSOfLJsbS5ViXXJEwIN6lWVMb2PirW2FGLHipOZkQAJkAAJkAAJkEDuElj940ZZv3GT7PaXarlbSdaMBEiABEiABEiABEggKwS++na1VNqhvFSrvEOk/DmHjISLkfOQAMZGhe3LSa2dKkaqPcdGJFyMnGME2O9z7ISwOiTwfwKcr7ErFBoB1/tRoXFie0mABEiABEiABEiABIIJTJy9REY892FwIH1JIAWBbiftL2e32jtFjOhBFGJHZ8YUJEACJEACJEACJJC3BL5YscoIcLaXmhFFBnnbYFacBEiABEiABEiABEhAvl+zQdas3xjZGrai4xxSSXBb2ghwbJS2M8r2pEOA/T4dSoxDAiVPAGNz/cZfZM861Z0K53zNCRsTbUUCmd6PtmLVWTQJkAAJkAAJkAAJkEAOEaBV7Bw6GXlSlWxYw0bTKcTOkw7AapIACZAACZAACZBAHAT+++tmWf7dOtlmmzJSo0pF2bFCuTiyZR4kQAIkQAIkQAIkQAI5SACf+v5h3Qb57bc/pG6tKlJuu22dask5pBM2JsphAhwbOXxyWLWsEWC/zxpaZkwCGRHg2MwIHxPnIYG4+nweNp1VJgESIAESIAESIAESyAKBFat+kuvHLZRPlq3NQu7MsrQRgAh7YKfGUqf6jrE3jULs2JEyQxIgARIgARIgARLIfQL4jPYPazfIz7/8Kn/kfnVZQxIgARIgARIgARIggYgEypj4FbbfTqpW2kFqxfQ1FM4hI54ERs9JAjo2alStKNUq7xBLHTk2YsHITLJIgP0+i3CZNQlkQIBjMwN4TJqXBLLR5/MSBCtNAiRAAiRAAiRAAiQQO4GJs5fIzHdXyOcrf5TNm3+PPX9mmL8Ett22rOy1S2VpfUgdObvV3llrCIXYWUPLjEmABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEorAQqxS+uZZbtIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgASyRoBC7KyhZcYkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAKllQCF2KX1zLJdJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACWSNAIXbW0DJjEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCB0kqAQuzSembZLhIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggawRoBA7a2iZMQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQGklQCF2aT2zbBcJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEDWCFCInTW0zJgESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESKC0EqAQu7SeWbaLBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEggawQoxM4aWmZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQWglQiF1azyzbRQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkDUCFGJnDS0zJgESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESKK0EKMQurWeW7SIBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEsgaAQqxs4aWGZMACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZRWAhRil9Yzy3aRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAlkjQCF2FlDy4xJgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKK4H/AQAA//8+8e/SAABAAElEQVTs3QmcTfUbx/HHMmMY+9j3fV9KJCRbaVMpqbRRSSJtWkj1r7RRSSIkJBUqRZIt+76ErNn3sQ5jmLGM5f97frdz3Rl3uDNmzJ07n9Ore89+fud9Lvc493uek+G86YQOAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwGeBDASxfbZiRgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwAoQxOaDgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIJFKAIHYiwZgdAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAgiM1nAAEEEEAAAQQQQAABBBBAINUEJi7fI78s3CEbw6NSrQ1sOOUFKhTJKffXKyl31iqa8htjCwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMBVEiCIfZWg2QwCCCCAAAIIIIAAAggggMAFgT2HY2T22v0yYPKGCyPpC3iBuhXySde7qkjRvNkCfl/ZQQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCHwBgtiBf4zZQwQQQAABBBBAAAEEEEDA7wReHrFMFm885HftokEpL6Bh7D5ta6f8htgCAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkMICBLFTGJjVI4AAAggggAACCCCAAAIIxBXYFREtD/WZG3ckQ+lK4I1W1eXOWkXT1T6zswgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEHgCBLED75iyRwgggAACCCCAAAIIIICAXws8MWCBbAyP8us20riUFahQJKcM71w/ZTfC2hFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEUFiCIncLArB4BBBBAAAEEEEAAAQQQQCCuQIMek+OOYChdCsz/4LZ0ud/sNAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIBI4AQezAOZbsCQIIIIAAAggggAACCCDgFjh/XiTiSKTky5vbPc5feghi+8uRSN12EMROXX+2jgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACVy5AEPvKDVkDAggggAACCCCQ7gU+/3KA7N69R0qWKCHPd+54WY+16/6VYSO+s/N1eOoJqVih/GWXScszHI8+IfsPRkh0zAkpUayQ5M6Zw+vubNm+W8ZNmW2ntbqjiZQqXsTrfIz0T4Hvx06S/YcOS+ECYfLwvalf6fftTwbJ7r0HpM41VeXZx1v5FVp6CWLXKpNXerSqLjNW75MBkzf41TFIyca81rKqlCuUQ975aZWEH45JcFMEsROkYQICCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCKQRAYLYaeRA0UwEEEAAAQQQQOBKBTT8fNd9rX1aTaFChWTBrGk+zasz3XXfg7J23TqpUb2ajPt51GWXmzlrjjzVsbOdb8TQwdKwQf3LLpMWZzgSGSXDxkyQtRu2xGl+1qwhUu+66vJwy1slY8aM7mlLVqyVQSPH2uHO7R6Q62pUck+7Gj3HjsfYCsq6rSIF80twcNDV2OwVb2P6vKXyw6+TLrmeKhVKyysdH7vkPFc6set7fUWPeVieXPLJWy9c6ep8Wn53+H45c/asZMuWVQqE5XEvc+LkKen8Ri87HJIlWL76qJt7mj/0XCqIPf2dWyQkKFOCzYw9c04a/29qgtP9aUK/p+rIdWXCJPasafPbaaPNxfOFSp7QYNmy75hEnzqTJM4J3ZtI3uxZpMvQJbJ86+EE10EQO0EaJiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQBoRIIidRg4UzUQAAQQQQAABBK5UYNXqNdKydRufVhMaGiqr/17k07w6E0Hsi6k0CPtm74E2mHvxVNeYUsULS9dnHpVQE6LVLrWD2H/8NU9+/XOGbUv3Lu2kfOkStt/fX6bMXiRjxl865Fq2VDHp8fyTKborqRHEfvrV9+WsCfmWLlFE3nqxfZz90+O5YOk/0uKWhlK/do0401J74FJB7FnvNZegTBnlZOxZOX/+4pYePxkrLXvNuniCH465trSrIvbMNWmnIvYvrzSSwnmySp8J62Tsop1JUiWInSQ2FkIAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTQoQBA7DR40mowAAggggAACCCRF4GhUlCxctMS96HmTcOz8wst2uGqVKtK549PuaVmCg6VJ45vcw5frIYgdV0iDsR9+OUy27Qy3EwrlD5Obb6orFcuWkN17D8iocVMk6li0nXb9tVWl42OtbD9B7LiOvg55BrEb179OypYsdtGieXPnlMrlS180PjlH+FsQOzn3LbnX5UsQ+/5PZ8veIyeSe9Os7zICBLEvA8RkBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwEOAILYHBr0IIIAAAggggEB6EtAgdtnKriq5zW9uJoP6971o9/cfOCijxvws/27YKHvC90qF8mWlapXK0ubB1pI1JMQ9v2cQ+8cRQ2XE96NkybK/TaXnbNLophulaaObJCwsr3v+mbPmyFMdO9vhEUMHS8MG9d3TIiIOy+y582T+wkWybftOqXt9balbp7adJ5OpkpsWOs9AdfbQbPLxG89JtqwXvLRa9is9v5ATJ06K7lP/D16XLMFBF1XEjo2NlYXLV0v4voOiFZ0bXn+NVK1YNg7BqdOxMnnmAtm8fbfsO3BIcufKIaWLF5GbG14vBfJdMF+7YYvMXPC3Xfb+Fs3k71XrZcWaDaJtefulp2XID7/Jjt17JeLIUTuPVljOkyunNDHB5vjbjNMAPxjwDGJ3efJBubZaxQRbtWTlWuusM9zZrIGpJF3UPe/hyCj58bfJdriacdZQt3b/bt4u85aslF3h++XkqdNSsmgha9KoXi073XnxFsT+ddJMe/xy58whj7a63ZnVVnoe8O1PdrhKhTLStEFt9zRfjul8U+laj9/y1f/a5YKCMkv1SuXs5+zJh+6244aPmSDRMSekhGnv3c3j3lix+t8tssZ8JtZv3CoZzWewqmlDzSoVpEKZuFXQh44abz8jpcxnqnaNyjJ1ziJZt3GbhIQES7UKZaXlbY0lc+ZM7rb72pMcQey65fNJqxtKSvSpM/LuT//E2fTjjctIteJ5ZMGGAzJuyS6pWSqPPNKwjCzedFCmr94nL7WoLOUK55B9R07KxL93y6y1++Wcl/Lb5QrlkHuuLy61yuSV7QeiZcrKcJm3/kCceVvXKyl1yuWT7+dslbJm/uY1C0ue7Fnk8X7zRJdv16Sc/L01QsbM327b6Dl/0bBscmetopIrW7Cdp9/Ef+2676lTXO68rqiEhmSWdbuOyrAZm72G0hPbvnw5ssjtZnvFzXb3HD4hQ6dvknW7XX/mq5XILY83KivXlwuToMwZZcfBaNkdESNz1u+XP5btdvu2qF1MmlQtKEXzZpMDUSdl5bYj8u2sLXLu3IXy5VTEdnPRgwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACAS5AEDvADzC7hwACCCCAAAIIJCRwuSD2gkWL5ZnOL0h0tKtys+d6alSvJsOHDJQ8uXPb0U4Qu1zZspIlSxZZu26d5+xSulQpGffzKMmRI7sdn1AQ+8DBg9Ky9cOyb9++OMvrQLvHHpG3e3S7aLw/jvjxtyny19zFtmkvdXjEBGTjhqd1wqyFf8vEv+bZeTQ4q9WaPQPcGs5dZ0Ky8buXnn5YqlcuZ0cfOhwp733+jRyPjok/mw14v9aprZQvXdxOmzJroYz5fZrt15C1U61bR/Tr+Yo8/9andlr8l4fuaS7NG90Qf7RfDScmiL1+0zb5ZOBI2/5GN9SStg+0cO/L1NmLZPT4qXb4qTb3SIM6NeXnP/6SSTMWuOfx7LnGBL67PPGgZMjgGustiP1W70GyZ98B0UC+Ojudhlbbv9LTDtauWUU6tb3f9vt6TEf89IfMXrTcWZ37XQPZg3u9YYc7df/YBsdLFS9sw/bOTL/+OUP++O+z54xz3jUs3rRBHWdQnn71fdEK7/nD8shxE+rWmwc8uzIli8qbLzzlOcqn/uQIYufKFiTjuzWRIBMkH7toh/SZsN5uWwPFg5+5wYbdOwxaaIPGTzQtJ+2blbPB4kK5s0rmTP8dtP9auzE8Sp4YEPc4d7ilvLRtfPGf3fjzDul4g1Qpntuuu5gJODvdrT3/MiHrYvL8nZVk094oadfftX5n/i37j0nZgjmc2e379oPHJdwEpOtXzB9n/MnYs/Jw37myP/KCf2Lbt3nvMRs+j7NiM/DOmH9k2qq90ubGUvLc7ZXiT5almyPkxeFL7fgRzzXwug5t12MmeK6heO0IYlsGXhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIB0IEMROBweZXUQAAQQQQAABBLwJXC6Ifcc9rWwl7NDQUHmh87O2ovUff06WmbPn2NV9+N478tADrWy/E8R2tnPHbbdK8eLF5M/JU2TXLlclVR3Xv68r7OstiH3q1Cm5v83j7hD3i106S7myZUS3OXmqK0D8vze7S9tHH3Y247fvPft+4w46D/yomwmnB/vUVs8gti6gAdpypYqLVnGOOuYKxBcrXEDee7WjXZ9WK9bKyNrVr11DypnQtVZv1vVoV6VCaXml42O23zOIrSM0GFypbEnRFHGHR+6VKbMXyur1m2Xj1p12/htN9e2C+fNKjcrlpXiRgnacv754BrG12rdWd47faZBYq5Jr0ePOb7gCyvHD0R/0GyZbTGVx7QZ+3F1iTOi467uf2+GcOULltib15cyZMyZkv8R9PN7p2sFWnNaZkiOI7esx1eO8ZcduGTtxhm2f7sutjW+QkOBgaWaqoWvnLYg9y1RF/+6XiXZ6VuPRpN51Env2rMxasExiY10hWs+q4k4QWxcIMZ/j+iacvmfvAdmwZYddh7688fwT9nPqHuFDjy9BbA0e7zl88U0G586JuyL1LTUKyzsP1rTHtY2Zf9ehaPnjjaaSJzRYflu8Uz793XVTiBPE1qbFmmD5iFlbZfHGg3KzWf6B+qVsmF4rZ38y3vVnx1mvzq/jJy7fI9eUzCPPNK9gQ9yTV4RLz19W6WRxgtXav3nfMRMK3ykHTaXoxRsPiVa/TiiIrfPPMNW5v5+7VepVyG+C4uXdof5FZtkhf22SEvmySY9WNew2PcPmSW2fVuYePmOLVC6aSx5tVMZU4g6SozGxcscH0yUkKJMUyZtV+j5RR8JM5Wyt8K0VwCOOnTbznJZ765aQV+6uYu2/mrxBlm05LA0rFxCtPq5heK3aPXT6Zt0tgthWgRcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEgPAgSx08NRZh8RQAABBBBAAAEvApcKYmvYVAPQ2pUvV06qVnFVSY2KOibXXF/fjm95dwvp0/sj2+8ZxO7R7TV5qp0r/Bt59Kjccc/97grX6/9ZZitmewti/71ipbRu41rug/f+J20ecFUIPmtCoo8+8bQsXrJUtBK3Vtb2984Jr2rQdcAHr/ncXM8gdtlSxeSNLk/aYKZWT37m9Q9sZeJMJvD4de837filK9fJMVOxPGf27FK7ZmX3drp92F8OHDpsq2IP+eRNO94ziF2iWCHp8fyTEpQ5s3sZ7dEqyVotWbvuXdqZatolbL+/v3gGsRNq6xOm6nhDEy7X7vtfJ8mMea4Kvx906ySFC+STU6dj5dlurs9zzaoV5IWnHhKtTr1q/Sa7TM0qFSQsTy7bv2LNBvly2Bjb/+Ddt5gAdD3bnxxB7MQcU92o81nTKudvvdjetsN58RbEfvuTQbLbBKm1e//1Z6VIwfy2f+PWHfJx/xG2v2aV8vJC+za231m/DjhW2v/Dr5Nl+rwl2iuPtrrDVNGubft9ffEliJ3QupZtiZAXhrmOn86jweE65cJsReq56w/Yys4ahG7Za5Z7FZ5B7Je/XSaLNx1yT3vMBJI7moD1ydNnpdm70+z40S81lOL5QuXnhTuk7x+uSts6oU7ZMOn7ZB05feacNPmfq3q6E8TeHREjD/Zx3ajirPxBE/JOKIitFbEf7zffmVUGdqgrNUzYO37bX72nqrS8vrhs2BMlT37lqqqdlPaFm1B7688utE9D1B8/WsuG2G980/X3vTbml1caSeE8WU2F8XU2VO40UNtWs1Qe+Wf7EVm144gzWt5uXUNuvaaIrNsVKU8PWmTHUxHbzUMPAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggEuABB7AA/wOweAggggAACCCCQkMClgtieyxw5ckQ2bNoskZFH5bgJ/b7W3RXsbdLoJhk6eICd1TOIvWntShMAzuRexeBvhkuvT/vY4YnjfpHKlSqKtyD2d9+Pknfe/9DO98JznaRAAVdAVEf06dtfIg5H2GnrVi6TkJAstt9fX5zw6pUEsR+573ZpdmMd9y5+NvgHWbthix3ub8LdWt3Z6U6dOi3bdoVL5NFjEh1zQn747UKoclift+1snkHsZx67T+peW81Z3P0eyEHsJ00QW6t8a7dzzz5557OvbX+rO5vKnc1ulOUmXN3/v3C1Z0VonUn/rOgyByMibZVstZ698G+7fItbGsp9tzex/ckRxLYrMi++HFOd1/ms+RLE1kB/+1d62k1o0F/D+J7dqz2/kIgjR2219H49X7GTnPVrRfFePbq4Z9fPon4mtWt5W2O5u/lN7mm+9PgSxI46EStnTZvjd9NX7ZXPPcLRwZkzyp89mknWYNffO1r1/JEv5sqOg9HuRZ0g9jGzztven+4e7/TMe/82e3NDs3emycnYszKn562SKWMGWyE7Mvq0M5t9/+Dha+27rkfX5wSxR83bLv0n/Rtn3ksFsX+cu00GmMrSTvfGfdXlzuuKyjwTJn/9++XOaLmnTnF5rWVVuz9aJVy7pLRv/NJd0nvcWvd6M5pq+LN7Nhd914rYWhlbu4SC2O4FTU+pAtltWFsrj99Vu5gNkO801cjbfO5qH0FsTy36EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgkAUIYgfy0WXfEEAAAQQQQACBSwhcLoi9es1aefGVbrJt+3ava/EWxC5XtqxMnTguzvxT/5ohHZ97wY7r3/czueO25l6D2K/3eFt+HvtbnGW9DUz6/VepWKG8t0l+M+69z4fI9l17bXsGftTNVAEP9qltnhWxO7d7QK6r4apErgsPHzNB5i5eYdfjBLGPmOD1l8NGu7flbSPegtg9XnhSypYsdtHsgRDE7tS2tdSqXvGifcuQIaMN2joTXn7nc4k0Fd61Ovg7L3eQr3/4TRb9vVqCgjKLHrOMGTPaWSfNWCC/Tpphq5E7y3q+J3cQOzHHVNvhBKV9CWLv3X9IevT6yja/Yd1r5YkH7/LcFRusdsL+XxmDEPO5ddYfP7itVbW1urZ2KRXEvv/T2bL3yIk4bUxo4IYK+eSztq6q3PEDx7qME8TetDdK2vV3VZX2XNfUt2+W0CyZpeuIZbJ+91Eb7Pac7q3/jR9XyOy1+91B7MHTNsp3s7bGmfVSQez48ztB7Jlr9smbo1a619PCBJ2731vNHcTOlS0o2do3893mokF2X4LYGth+/+FrpEHFApI5UwZ3+5wegtiOBO8IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJCeBAhip6ejzb4igAACCCCAAAIeApcKYu/bf0DqN2pm5w4NDbXh6UoVK0iunDnllW497HhvQWydd/Xfizy2IjLqp1+kx9vv2nGjR34r19e5zmsQ+/N+A+TLr1zBzg/feydO1esjkZF22xqObdL4JtsfZyN+NvCjqUj919wltlVdn3lEqlYse1EL/928XRaa4K92Wvm6RNFCktgg9ov/+0yijrmq/pYpWVQqlS0lOXKEypjxU93bS29B7PjVrN0Q8XrGTZ4lv0+dY8cO+PB16fru53LSVBavX6emtG9zjx0/d8lKGT76d9uv1c2vrVZRCubLa6uOT53t+pz7GsTWULOGm53u9OlY6djtIztYu2YV6dT2ftufmGOqCzhBaV+C2Lp/nbp/bLdTs0p5eaF9G9vvvPzv08GyK3y/qWifUb7u/aYNrjvr9/cg9vttrpEm1QrZXTkac1pafDhTzmlp7P86J4itwW4NeMfvZplAcpAJJD/yxTzRQPFcUxFbu5Gz4wardVyOrEG2EvZvS3bK/siTVz2IndFU6k6u9iUmiN2nXW2pWz6fdd1+IFpW7Tgih4+fkuol8kidcmHWjYrY+gmhQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB9CRAEDs9HW32FQEEEEAAAQQQ8BC4VBD713G/uwPXw74eKI1vutEuGRMTI9Vq1bX93oLYOmHcz6OkRvVqdh59efrZLjJ95iw7/PeiuZInd26vQey/ZsySDp262PnGjvlBrq1Zw/bry+nTpyXmxAkTDM3g9yFsbe/iFWtk8MhftVdymmB0rze6xKmKfe7cOen+0QA5GHHEztP33a52vsQEsY9GHU+wunG3D/vLgUOH7bqTGsR+6emHpXrlcnYd/v4yxYSinfC5r0HsQ4cj5bX3+9ldu7VxPZkya6Ht7/ZcO6lQpoTt/3zIj7J6/Wbb3/c9c4yyh9r+lWs3Sr+ho23/5YLYPft+I9t2htt5P3nrBQnLk8v2b9q2Uz768lvb7wSxL1Wx2tsx1YWdoHThgvnkg9c72fU5Lxq61vB1qeKF5e2XnrajnUrgGiz/wnzuMmfOZMdHx5wQDYGfPXtOPEPdzvr9OYjtVMOONW0/diJW8mbPIpOW75H3x7pudNAddILYZ86elybvTJVz5y6EtPPnDJFxrze2AeOGb06xHpPfbGYD188MXiRrdkbacQm9DOl4g1QpnlviV7jW+VOiIrauN7nad6kg9hcT18tPC3bo5mw3/Z1bJCQok/Qet1a06rjTvdayqtxTpzhBbAeEdwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSFcCBLHT1eFmZxFAAAEEEEAAgQsClwpiT5g4SV7o+pqduUunjtLpmfZy4uRJ6flhL/lt/AQ7PqEgdqFChaTn/3pIoYKFZPyEP+Sb4SPs/MWLF5PZ0ybZ/pmz5shTHTvb/hFDB0vDBvVl/4GDUu+mpnZc6VKlZFD/vlK+XFnZsXOXbcuq1WtEK26vWDzPhEcz2/n89eXM2bPS8/NvbHVhbWOxwgWkeaMbpFK5UrL3QISMHj9FNHSrnVbL1qrZ2iUmiH3cBGe7ffClXa54kYLSteOjkiU4WCbPXCDjp1yo+JuYIPZ0U8X7B1PNW7sqFcpIm5a3Sr68uc16g+w4f33xDGLf3PB6KVe6+EVNzZMrh5Qv7QpYOxPf6j1I9uw74AyKhpMHfOD63OvI/sN/kuWr/7XTOz7WSupcU8XMf1D6DP5BIqOO2fGXC2J/P3aSzJi/1M6rx/q+2xvLGRMYHjRyrByJjLLjnSD2ARPMT8wx1YWfef1DiY09Y9fz3JMPStkSRc3NCtntsLcg9tc//CaL/qvEXqt6JXnonuZyOjZWho+ZIFu277bL3XnzjdLqDtefRX8PYgebKtaTejSTkOBM0mfCOlmzK1KGdapv9+PZrxfbqs064ASxtX/plgh5cZjrmOjwL680ksJ5strq1vd9MktHyZdPXS+1yuQVra59X+/ZcjL2rB1fo2Qe6fdkHdt/2/vT7fjUCGInV/u8BbFHv9RQiucLlXnrD8jr3y+3+6ovThB7wOQN8uPcbXa8zvftc/VtQFsriVMR281FDwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIpBMBgtjp5ECzmwgggAACCCCAQHyBSwWxDx46JHVvbBJ/kTjD3oLYGpSOjo6OM58zMP6X0VK9WlU76C2IrROm/jVDOj73grPIRe+f9fpQ7r3nrovG++OI6BMn5a1eA92BXW9tzJ0zh3Tr0k4KhOWxkxMTxM5mQsNd3+vrDvN6W7+OS0wQe/uucHnPBMg9uwdNUPdWEyL3584ziJ1QO+NXdNb5ps1ZLKPGuSog6/AtZj/bmP11Os/j4YyL/365IPb6Tdvkk4Ej4y8mQUGZ3QFqJ4itMyXmmOr8vQaMkA1bdmiv7XS9g3u9Yfu9BbFPmQrZ//vsa3fF9P8Wc7+pU7fO7SRTpox2XGoGsWNOnZGzHpWrnUYeP3lG7v/UdbNBXxOKrlM2LE415p4PXSNNqxeS4ydj5c4PZ5jg+3l3EFvXlyljBjl5+qzsP3pSiuTNKkFmX8+dN/P0XyCb97kC9tlDMsvYVxtJ9pAgu/zuw9GSOWNGKRaWzTZj0caD0nXE37Y/NYLYydU+b0HsDx+5VhpVKWj3LeLYKfltyS4ZPmOzfGGsaxtrQyW7D8eYY3NOSpggdgb9L4PEOQYTujexlcm7DF0iy7e6qvM7x8/zff4Ht3kO0o8AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgikOQGC2GnukNFgBBBAAAEEEEAgeQQ8g9h33Har9O/7aZwV/7Nqtbz82huybft2Oz4sb5j0+vBdefeDj2XXrt3SrEljGTLQVZH5rvselLXr1sk1NWvKKy92ke5vv2Pn0QXrXl9H2j/xuJ3frsi8eAaxRw4bIg3qXwj6TpoyTb4fNUYWLlrszC7lypaV17q+KDc3bewelxZ6Io4clSGmAvHGrTvjNFdDrjWrVJCnH24pWbIEu6ctWblWBn031g5rdeNa1Sq6p2nF4rmLV9jhrz7qJiFmOa3K/PnXP7orb+vEWxvfIBGHj8qyVevtvE4Qe+rsRaYS91Q77q0Xn5LSpnKyt06rN/8+dY5EHXMF6h+59zZpZqpM+3MXP1Dtra3egtjHo2Pk+bcufO7fe7WjrV7uubyGvH/9c4Y7NJ3fhOY1nN5/2Bg7293Nb5KWtzW2/U6IWufp1aOLezXT5y2VnyZMc69DK28/1661O6Bd55qq8uzjrez8iTmmuoDzGduyY7ecNZW2vQWxS5coIm+92N7dnuPRJ+TbnybIuo1b5aQJZmuXPTSbVKtUVtq2bhGnArpTcbu8qTLevcsT7nVoZfC3eg+0w/eaKt933XKTe5ovPQ16uCqve5t31nvNbTja2zQdp8HqRm9PkfoV88snj19ng8EP9Jkj4SYcrF2QqZI9+b8q2VP/CZd3f1rlDmKvNRWz90WekKbVCtvwsM6vYeMvJv4r01fv1UF3p1WyP217nRQPC7XhbZ2gQe7JK8Llw19Xu+f7uuMNUrV4bhk0daOMnL3VPV57WtcrKS+2qCwbw6PkiQEL7LSE5u9+bzVpUbuYzFizT94atdK9nhbXFZPu91WT7QePyyN957nHJ0f7nCC2Vvc+diLWrjtXtiDp376ulCnoqqy+dLOpID58qXX9qv31UsXsq9Op3filu+TJpuXitO/3bk0kLEcW6TRksfyz/Ygz+0XvBLEvImEEAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgikMQGC2GnsgNFcBBBAAAEEEEDgagscjYqS06dPS76wMBNcNGVPfewiIg7bkHH27K4wn4+LuWc7deqU7AnfK/nz5ZMcOZK2DvfKUrnnaNRx2XvgkETHnJSihfJLwfxqmXyNOnU6Vo4dj5Y8uXK6Kxlf6do11Hve/Jc5U6YrXVVALH8kMkoyZ84sObK7KiInZacOHY60lZfz58192T9LSTmmZ86ctcc/UX9Ozc0CGU2l5zy5ciRll5K8zKWC2Ele6SUWfMIEhds3KydrdkbKM4MXmc91BqlUNJfsOBjtDiBfYnGpUCSnaIXu3RGusPel5k2NaSnRvmATaNfK25ExsXLOozK52lUokksOmoriB6NOXtHuEsS+Ij4WRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABPxAgiO0HB4EmIIAAAggggAACCCCAAALpSSC1g9jpydqf95Ugtj8fHdqGAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAK+CBDE9kWJeRBAAAEEEEAAAQQQQAABBJJNgCB2slGm6RURxE7Th4/GI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICAESCIzccAAQQQQAABBBBAAAEEEEDgqgo8MWCBbAyPumrbrFIsl9xXt4Qs2xohk1eEX7XtsqGEBSoUySnDO9dPeAamIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmlAgCB2GjhINBEBBBBAAAEEEEAAAQQQCCSBicv3yIdjVwfSLrEviRR4o1V1ubNW0UQuxewIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICAfwkQxPav40FrEEAAAQQQQAABBBBAAIF0IfDyiGWyeOOhdLGv7GRcgboV8kmftrXjjmQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSIMCBLHT4EGjyQgggAACCCCAAAIIIIBAWhfYczhGPpuwjjB2Wj+QiWy/hrC73lVFiubNlsglmR0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDwPwGC2P53TGgRAggggAACCCCAAAIIIJBuBCYu3yO/LNwhG8Oj0s0+p8cdrVAkp9xfr6TcWatoetx99hkBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAJUgCB2gB5YdgsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEUk6AIHbK2bJmBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAhQAYLYAXpg2S0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSDkBgtgpZ8uaEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBABQhiB+iBZbcQQAABBBBAAIH4AsOm/Bt/VJodfvLWSmm27TQcAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDAECGIHxnFkLxBAAAEEEEAAgcsK3PjSuMvOk1ZmmPd5y7TSVNqJAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEKACBLED9MCyWwgggAACCCCAQHwBgtjxRRhGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCDpAgSxk27HkggggAACCCCAQJoSIIidpg4XjUUAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDwcwGC2H5+gGgeAggggAACCCCQXAIEsZNLkvUggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIICBCEJtPAQIIIIAAAgggkE4EVm7ak072lN1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQOBigWvKF7145BWMIYh9BXgsigACCCCAAAIIpCWB4zEn01JzaSsCCCCAAAIIIJAuBaJj0+Vus9MI+I1AaJDfNIWGIIAAAggggAACCCCAAAIIIIAAAggggAACCKSAQPZsIcm6VoLYycrJyhBAAAEEEEAAAf8VIIjtv8eGliGAAAIIIIAAAo4AQWxHgncEUkeAIHbquLNVBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDgagkQxL5a0mwHAQQQQAABBBAIMAGC2AF2QNkdBBBAAAEEEAhIAYLYAXlY2ak0JEAQOw0dLJqKAAIIIIAAAggggAACCCCAAAIIIIAAAggkQYAgdhLQWAQBBBBAAAEEEEBAhCA2nwIEEEAAAQQQQMD/BQhi+/8xooWBLUAQO7CPL3uHAAIIIIAAAggggAACCCCAAAIIIIAAAggQxOYzgAACCCCAAAIIIJAkAYLYSWJjIQQQQAABBBBA4KoKEMS+qtxsDIGLBAhiX0TCCAQQQAABBBBAAAEEEEAAAQQQQAABBBBAIKAECGIH1OFkZxBAAAEEEEAAgasnQBD76lmzJQQQQAABBBBAIKkCBLGTKsdyCCSPAEHs5HFkLQgggAACCCCAAAIIIIAAAggggAACCCCAgL8KEMT21yNDuxBAAAEEEEAAAT8XIIjt5weI5iGAAAIIIIAAAkaAIDYfAwRSV4Agdur6s3UEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCClBQhip7Qw60cAAQQQQAABBAJUgCB2gB5YdgsBBBBAAAEEAkqAIHZAHU52Jg0KEMROgweNJiOAAAIIIIAAAggggAACCCCAAAIIIIAAAokQIIidCCxmRQABBBBAAAEEELggQBD7ggV9CCCAAAIIIICAvwoQxPbXI0O70osAQez0cqTZTwQQQAABBBBAAAEEEEAAAQQQQAABBBBIrwIEsdPrkWe/EUAAAQQQQACBKxQgiH2FgCyOAAIIIIAAAghcBQGC2FcBmU0gcAkBgtiXwGESAggggAACCCCAAAIIIIAAAggggAACCCAQAAIEsQPgILILCCCAAAIIIIBAaggQxE4NdbaJAAIIIIAAAggkToAgduK8mBuB5BYgiJ3coqwPAQQQQAABBBBAAAEEEEAAAQQQQAABBBDwLwGC2P51PGgNAggggAACCCCQZgQIYqeZQ0VDEUAAAQQQQCAdCxDETscHn133CwGC2H5xGGgEAggggAACCCCAAAIIIIAAAggggAACCCCQYgIEsVOMlhUjgAACCCCAAAKBLZCUIPaJU7ESFX1KYk6dlrPnzicZKFPGDJItS7DkDM0iWbMEJXk9LIgAAggggAACCAS6AEHsQD/C7J+/CxDE9vcjRPsQQAABBBBAAAEEEEAAAQQQQAABBBBAAIErEyCIfWV+LI0AAggggAACCKRbgcQGsaNPnpZ9EcesV0hwZsmUMWOS7c6eOycnT5+xyxcKyyGhIcFJXhcLIoAAAggggAACgSxAEDuQjy77lhYECGKnhaNEGxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAgaQLEMROuh1LIoAAAggggAAC6VogMUHs8+fPy7bww5IxU0Ypmi+nBGXOdMV2Z86ekz0Hj8pZ8166SF7JkCHDFa+TFSCAAAIIIIAAAoEmQBA70I4o+5PWBAhip7UjRnsRQAABBBBAAAEEEEAAAQQQQAABBBBAAIHECRDETpwXcyOAAAIIIIAAAgj8J5CYIHbMyVgJj4iS/LlDJVdoSLIZRkWfkgORx6VIWE7JFhKUbOtNzhVt3rbrotWVK138onGMQAABBBBAAAEEUkKAIHZKqLJOfxGYPnu+/DVrfpKac3PjBtKsUYMkLZuYhQhiJ0aLeRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAgbQnQBA77R0zWowAAggggAACCPiFQGKC2EeOnZCIqBgplj+XhARnTrb2n4o9I7sOHJWwnNkkT46sybbepKxo0oz5ov8//1Qb0aC1BrD7DR2V4Kpub9pA9H86BBBAAAEEEEAgJQUIYqekLutObQGC2Kl9BNg+AggggAACCCRWwDw40DzZL7FLMT8CCCCAAAIIIIAAAggggIA/CxDE9uejQ9sQQAABBBBAAAE/FkhMEPuwCWEfNmHs4iaInSUZg9inY8/KzgORkteEsPOaMHZqdk4QW9vgGcbW8Z7dpm07PQdtGJtAdhwSBhBAAAEEEEAgGQVSMoh9zqRI9u7bL8FBwZIvX15JL3mSyX/NlhMnT8r119WUooULJePRYlVJEdi6/eIn0Pi6njKlUv5JNVTE9vVoMB8CCCCAAAKBL7Bxyw4ZNnq8VKlQRh5v3SLwdziJexh75ozs3X/IBNYzSOEC+SRz5kxJXBOLIZD6AsejY+SPv+bahtx7WxPJkiU4UY3aFb5P5i/9xzwRNETuvrVRopZlZgQQQAABBBBAAIGrJ0AQ++pZsyUEEEAAAQQQQCCgBAhiX3w4n3+zt3ukE8Z2j4jX4xncLl+6hHR56qF4czCYUgKzF/4tYydOj7N6/WGnYP4wKVG0kNzcsK4UKhAWZ/rVGnDalj8sj7zeuZ0EBwfF2fTRqOPyVu+v7Lier3eSXDmyx5nOAAIIIIAAAvEFUiKIvW3HLpmzYIls37lbTp48ZTcZkiWLVKpQVu5t0dx8fyXuh+X4bb7S4YjDRyTicKRkzRoixYsWvtLVXbT8x58PlKNRx+Th1vdI9SoVL5rOCAQ8BZIziO2cK3quX4McpYoVkTIli0rVimXt+azn9MT0x5w4KTt277WLVC5fOjGLpui8/tquFN1pVo4AAgggcJFASn8PXrTBKxih36f6/ZUvb27RazxON3P+Uvlt0kzJkyunvPtqR2c07/8JRB2Plu9++kM2mxvtzp07Z8dmzJhRKpYtaYProdlS94mIHKi0J+D590ablrdJvdo1vO7EBnOTxIDhY+y0Jg1qy723N/U6X1JGHjL/Nn2vz9d20Q+7PyfZQxNXUGbFmg0y3NzAkTtXDnnv1WeT0gSWQQABBBBAAAEEELgKAgSxrwIym0AAAQQQQAABBAJRgCB23KPqBKs1VO1UvfYMY+t0nVau9IWqe5u37RIdr/MTxo7rmZJDzgV4DV87P+Ccjo2V06dj7Wb1Ynjndg+YCpcFUrIZXtfttE0nNrj+Gnnw7uZx5iOIHYeDAQQQQAABHwSSO4h98NBh+WroSHcAO7+phH3mzFk5EnnUtqZI4YLS7uFWkiN76t0sNH32fPlr1nwpXbK4dGjXxgelxM1CEDtxXul97pQOYnv6Zs6USTo+fr9UMGGlpHT675N+Q0fZRb/o+ZqpQpmUtST/Mv7aruTfU9aIAAIIIHApAc9rJt7mu9LvQW/rTOq4L4eOttf7bmtSX+5odqN7NbGxZ2TZqnVSqngRW+nZPYEeiTp2XD4dNFIijx6zFYO1WML5c+dlx569om55c+eS159rJ1lDsqCFgM8Cnn9vlC5RVF7q8IjXZb/7+Q9Z9s86O40gtlciRiKAAAIIIIAAAghcRoAg9mWAmIwAAggggAACCCDgXYAg9gUXzxC2VrZ2hnUOJ4zthAdub9rgokC28+OMTtP/6VJWwLkAr1WJ3n65g93Y2bNnZe3GrTJm/FQ5ZqrvVK9cXp5+5N6UbYiXtTttcyY981grW9nQGSaI7UjwjgACCCDgq0ByBrFPnDwpA4aMNNWmj0gB8ySJR1q3tO/alq3bd8oPP403lf9OyA11rpV77rjF1yYm+3wEsZOdlBVegUBKBLH1PPatl1znsXoTxMatO+WveYvlwMHDEmIqZD/fvo0UMzdFJLZz/s2iyxHETqwe8yOAAAIIpLSAc80kpb4Hk7P9zrW++EHs5NxGoK1r3pKV8tPvUyVXzuzy8jOP2qrhuo9aTbjvkB9tUPvhe2+XG66rHmi7zv6koIDz94aziTdfbC8FzM3Ent0J85SnHh8PMDcYn7GjCWJ76tCPAAIIIIAAAggg4KsAQWxfpZgPAQQQQAABBBBAII4AQewLHM+/2dsOOKFrHbhUGDt+9WvPwIPnOi5sgb7kFHAuwHsGsZ31T5g6R6bNWWQrZX/YvYutAjh/6UpZuGyVaNWUVnc2c2a1j23/ecI0CQ4KsmEXnaCPipw+d7EUNOG0O03Fo3GTZ9nHqWogpnbNKnJr4/qSKVNG9zri9zhtc8bnzBEq3bs86a7cfakgtm57vvnRau+Bg6KPbS1aqIA0bVAnTkVEz/bpj4HjJs+ULdt3S57cOaXlrY2lYrlSMnfxCpm96G85fjxGSpoKTfeZR3EWzB/3B4r1m7bZKjGbTOgna9YQqVCmhDSse+1FP2Q4+8E7AggggEDqCSRnEHvZitUy9vdJkjlzZunU/lEpXDDu0yOc6dmyZpU3unYy33mZZPW6DTJn/mLJny9MGtavI1NnzJWdu8PlwftamO+o0hZm4+ZtsnLNOtlinhKSNSREypqniGiYO39Y3O+fvfsP2ErX+/YflBPmUe+FCuaX6lUqSr3ra7mBB3wzUqKijtmgRnBwsP1uKlggv9x/z+3ueXzdngbP5y9aJv9u2irR0TFSoVxpubXZTdJv0Ldy1Gzj4db32O27V0zPVRdwQve+bPij/70WZ7bu77rO4Z9u20bKlLrw1Jo4MyXDQEoFsZ0bCp0mHo6Mkj6Dzef/WLRUq1ROOjx6nzNJomNOyIRpc2TbznBbvT5/WB4pa/b5rltukqCgzHY+DT5tNH8GNcytXXFThVILYmvVQP2zrJ0v55s6X8SRo3Z7u/bsk+Nm24UL5JNa1SvJTTdc+LOq88WYP8d6/quPgt9/MMKeb1etWEYa1LlGJ9vOl3Y58/KOAAIIIBDYAs41E2/XcxL6HkzqNZ2mN9aRP6bNle27wuXx1i2kcnnXeevlvgt3790vo8dNkQPmKTInT52WnDmyS24TLL7+2mr2e3DV+k0yddZC0X1o9+DdcQ6YtlWvt2zdsUdy58phCjkUt9+JnqFRz+s6SbnuFGeDfjYwbPR482+CDfb6mZp7dmv+3WyOxV779Lprq1V0T1LjmfOXil5b3bPvgJ1ezpzj6PWwLOZanNONHj9FdofvN8fgOnMsqjqj7TWwxctXm/OiYnKvuf6l3e9TZ8tGc26i1/E0FD5rwd+y3xzPd7o+Y294O3/+vMxZtNyev2zbucc8CShUShUrLHfe0lBymePt2XH9zFMjdfqdvzecrd9y0w1yV/ObnEH7rn/2tDiH08UPYvv6OdPl9x2IkLlLVsh6U/Ajc+ZM9s9+DVPw4/2+39jVf9j9OdGnMTqdL58R/XM/3Pz50L8X3nv1WWdRn8+53QvQgwACCCCAAAIIIJCiAgSxU5SXlSOAAAIIIIAAAoErQBDbdWydwHX8atae4Wqd0wlYO+OdYecT4qwnfkjbmc578gk4F+C9/XCnIWQNV2tw+uMez9tA85/T58nkmQukSoUy9lHvTkv+3bxdvvr2Jztv77detKOd5XXdQSagrT+8abVtp7vz5oYmjF3PGbzo3WlbIRNW0crcGpqpUaWCtH+4pZ03oSD2tDmLZYL5oUg7vSivoZbTp2Nt+zWEo23Xzmmfhm80RHcw4oi72ktwcJA0MQG5KbMW2GlOFZjcOXOIVovR6dqt3bBFhvzwm5w7d85u65TZjobhdLtasUjnp0MAAQQQ8B+B5Axi/z7pL1m4ZLlULF9G2j18/0U7eUafMLF+ox2voWUNVS9cukJ+/3OahJnvxgwZMsqhCFfI89EH75WqlcqbkPMWGTna9b2Sy3yH6PeXBqC1/9mnHrXvusJtO3bJ0JE/2e9VvQkok7np6LgJR2vXpGE9ad60oe1/t9cX9tHl+v2rNybp950GSts//pCd7uv2zpu5R/z4i2wwIWztMmTIIBq6KG2CHQdNYFS3TRDb0qTqC0HsuPzOuaSGvt5/vZOdqOeFH/YbagPa+uchpwkLHTZVtLUrU7KYvPj0w7Z/8MixprL2DvvnR0c44aWPzM2JGiLx9XxTq1Z+OvA7ez5auGA+CTU3ZmiITf9+aFy/ttx3hyvkdObMWflqxE82OJXZBL3z5sllz031z5nnv60u1y7beF4QQAABBNKFgPM95+16jgI40z2/B5N6TUfPI/WajnbtH77XXJsp79N3oQZzvxrxs/0+1esm+h2XyXyPNjHfgXeYG/YXLPvHBrULmxsKu3d5wq5fXzT8+5e5tqNdaLas9ntUvxM1CPxSh0clr7mBXjvnuk5SrzvZlfjpy/gps0xxgyU2pPps29ZSvMiln+4Ra6oXDzTWeq1VuxzZs5lraa5/H1QoU9Jew9NzGO2++OZHW4jgHlOEoFnD6+04fdEb1abNXhTnJjYnEF7eFB3Qc5jYWFeVZL1OmM38O+SXP/6yQWxd3vNY6fUwfVKiXnPTjutnliHVX5y/F2rVqGwD9log491XnjX/Vszgblufwd/LLhPU12rrWuTCM4idmM+ZXsftPWCEHDkaZdft/Bvy2mqVzA2N/9pxnkFsXz8j3oLYvp5zu3eSHgQQQAABBBBAAIEUFyCIneLEbAABBBBAAAEEEAhMAYLYruPqVMPu937cCntOsNrz6Dvha+cHgnKmso1nl9C6POeh/8oFnAvw8X+40+BXv2GjTZXOvVLdVCp5+pF77caS8qOdLtj0xuvNj2wNJPLoMRk1brL9wUcDJlpBJ6HOaVspU4laH7f6ycAR9gefR+67Q+rWqiaRpvrm270H2sV7vtbJ/iB37tx56Td0lKk2GGOrWmsFRA22DB8zXlav3yz6Q0O7B+6yyzg/2OnA3bc2shWC9puqh1+Y5WPMjwVaEbHDo61MheuS8q+pTjrou19s6Ex/SNKbBDQc/m6fr21I7pH7bjdtqm4C2edlzO9TbNVw/ZGqy5OuoJvdIC8IIIAAAqkukJxB7EHDfpAdu/ZIw3p15I7mTXzaNyeIrTNrOLt5k4aSJ09uG+w8ffq0fNLP9b2iFauvu8Z8r5jAybg/psjS5atsZWwnQD1h8nTZtGWbqUBdSW5ucqPdtlbanvzXbMlhQqdvvOwKneoEJ5xbumRx6dCujZ1XX45HR/u8vWUrVpnq35Ptsi1uaya1r60ukabi8Jjf/pC9ptqedgSxLUOqv2zd7grfXK4h8ateB1pFbN3/rTt2S98hP1qKniaIrVUZl/6zzt5oV9A8gl0rbwaZMPY2Eyr63AROtPN8PLtz06iO/6Lna/bpMNqfmPPNqSbM9IcJNemNgM88dr9dx05TGXu0OR/WKvV6rqgBGH0yy4x5S6WIeYrLC+3bmBs3ski4qXav7T9pHg+v47Rqt3YJtctO5AUBBBBAIN0IONdM4l/PcQC8fQ8m9ZqOVsDWm+l1Wxqm1puZfL32ou35cuho2WSeNKFPI9MAttN5C2LrtZshP/xqb/x74qG7paZ54kvMiRP2JnjdpxKm2vIrHR+zq/C8rpOU605OO/zxXauJ63mAXp/TToPYep2pYtlS5v+S9iZLz3b/+ucMU616mQ1u63mDPp1u7/5D9jhpIFYD1xq81i4pQWxdTtdR77oa9jxFqxivXKuViX+3x0oD+noN7pj5N4ZWK9YnzjWqd519mh7Xz1TPPzrn740611S1wXn9zHR8/H530Qp9KssHXwy1N1sUMTdIaDEOzyB2Yj5nI3+ZKEtXrrVFO55s09JWWtfPxbBR42yFfBVxgtiJ+Yx4C2L7es7tH0eBViCAAAIIIIAAAulDgCB2+jjO7CUCCCCAAAIIIJDsAgSxXaTODyueVdt0imdYwBPfCWN7jtN+J7gdfz3x52P4ygWcC/Ba5a+6+cFEu1MmCKaPRNcfe7Ty89Pmx5SK5UrZaUn50U7X3fvNF+wPM7qSJSvWyPdj/7TDn7/b9aIfj+yGzIvTNg1ia3VpZzjEBFO6dW4nGU1oJX4Q21k2/vsyE7r57uc/pED+vPLmC+3tZOcHO/3xSC/8O53+iKSVWapXLmcCnF/NHQAAQABJREFU6BceY/+eCV1rhZUH724uDa6/RvQRut+YatglTGXRV5593FncVmrq9kE/G+Tu/eaLNlzjnkgPAggggECqCiRnELtn7y9tKOSeO26RG+pc69N+OUFsDa+8/VoX+8QIZ8F1/26SkWN+k2JFCknnpy98r2hF7Pd69bOBl3e6m+8VU5HQWxd17Lh81OcrO+mNrp3tY8F1IKEgdmK2pyFsDWNXMVW7HzPVu51u7/4D0m/Qt3aQILajkjbfAzGIfcxUau/xUX97QJwb6RI6Oj0/H2IrULd78C6pVb2ync3z3zCeQeyE1uHtfNOpLKmVPjuYGxu1Gr63rteAb2XP3gMmrN1KqlYs657FCbvcaUJrt5rwmnaJbZd7ZfQggAACCASUgHONJKEgtrfvwaRc09Hz1o97dJFg86Szy3Xevgt1Ged6oS9B7LETp9vrP1o1V4PYTrfH3PzXq/+39lpSL3ONSZ/e5lzXSep1J2fd/vq+xdxg94vx0HMEz06vY93S6AZbWdwZ/7GxCTdGre+6RRrWvfBvE+dzok/FefW/a1dJCWJ7PjnE2ebPphr23EXLpXbNKvJ46xbOaNEiBxrSzm6qmXP9zM3iFz3O50GD2M1M0YyP+w8Xzz9rTiX2p80TBXeZmwfjB7ET8zlzrqPqE2D0STBO57RBh50gdmKusXoLYvt6zu20gXcEEEAAAQQQQACBlBcgiJ3yxmwBAQQQQAABBBAISAGC2K7D6hkKiB+ydn50if8BiD+fE8LW+eJPi78sw1cu4HnxO/7a9DGmndo9IEVNZT6nS8qPdsUKF5DXTHDa6fQHJA2baPfZ/162gWU7EO/FaZsTxDZFQWXgdz/Lv5u2STlTEfCx1nfK/z4ZZJdyKmLrgM63av1GUwVxj+wzlVxOmEfQ6+NTtdNHor710tO23/nBTqsKvdqprR2nLz/+OkkWLV8t9evUlIfuudU9vvdXI2S3eTTnAyaIfaMJYv8xba5Mnb3QTi9UIMw9n/bsOxBhh3W9l3t8bJwFGUAAAQQQSFGB5Axif/XNSPPj9F5p1KCu3HZzI5/a7QSxC5vv1uefaRdnmakz5srMua7vlQKmgp1nd8B8n2nXpUNbKVLY9VjynbvDbVVsDVxolbtDEYcl2gRPtetuKmLrY+i1SyiInZjtDTD7utvsq+6n7q/Tma9ceeejz+3NWwSxHZW0+R6IQWzPStfvm4rYzp+JCHNj3ap/N9snvxw1NzBoxend/wWcNESkYSLtPP9tEz+I7ev5pp4T9h3yg71RT9ep1SnLlipmw976hJUM5inwsbFnpOu7fXSyaJAsT64ctl9fnHNKrTDZwQRitLtUu+wMvCCAAAIIpAsB55pJQkFsb9+DSbmmU9Rc03nd45qOg+vrd6HO71wT9CWI/fnXP8i2nXvkruaN5JabLpx3njt3Tl5593M5c/asfaKEVod2rusk9bqTsy/+/q4h9HUbt5qq4rtkw+bt9mlt2ub7W9wsN91QS07HxsprPb8wT+04ZwsZ6HU0p9tiqoh/YSprZzKVzD9560X7JJ6kBLE1SKuBWs+uj3miiF5va3lbY/skPM9pnv1cP/PUSN1+5+8NDWI/dv+d8om51rnXnK/qubLe3PD2JwPtddWerz1rQ9ieQezEfM7OnjtrP5PnzV8UXU0F+5Kmkr3T7TBPX/xs0Eg76ASxE/MZ8RbE9uWc29k+7wgggAACCCCAAAJXR4Ag9tVxZisIIIAAAggggEDACRDEvnBInSC1Bgu08pzTaWBAp+mjSON3noHr59/sbSdTDTu+UsoMOxfg8+TOKZ1N6Fo7fWzklFkLbUD67Zc72Me4O1tPyo928YPOThUjXWdigtg6v4ZlPvpyuMSYwJn+aKBt1c4JYp89e06GmkdcrjHhmgwm2aI/xuXIEWrC29vtD1LJEcR2qgs5VZp0+9dUrahv7i4i8qiE5c4lzRvfYNrgCsy5J9KDAAIIIJBqAskZxB43caosXrZSKlUoK23btPK6T/oYde1CsmSxT4BwgthFzXfDcyZU7dlNmDxdFiz+246qZh7B7tkdMd8recz3SpOGN0iRQgVl1rxFokFq/WE7LG8eyZsnt5w1oZSt213nWb4EsROzvb4Dh8v+AwflrtuaSf2613k2TXp+YiqDm+9lgthxWNLcQCAGsectWSk//T7VBLBDTbiksz0m/5rwkvM4dK0mqY9cz5o1i/yzdqOd7ksQOzHnm7pSfaLKvCUrZNW6Tbbfbsi81KpRWdq2vku06r0+TUU7DWnnCA21/fpy2P7ZzykFwvKaQNpNdjxBbDcPPQgggEC6FnCu5yQUxPb2PZgc13QUPbHfhYkJYjsVd+NX0dXg96s9XTcAOk+QcILYSb3ulBY/QIePHJVBI8eam7UOSekSReWlDo+4n8ym+6OheQ3PO91OU9X404Hf2Wtk+rQ6vekrKUHsJg3qyL23N3FWa9+dJ3q0urOZNKoX998InjNy/cxTI3X7nb83nCD2HFPR/BdT2VxD/WF5cslg89lqaipla7je+fuiSYPa5tg3TdTnTG+Y6P7hl3Znuz33hPk3bH73ju/df8hc2x1mh50gdmI+I96C2Lqyy51z6w2QdAgggAACCCCAAAJXT4Ag9tWzZksIIIAAAggggEBACRDEjns4nR9Y4oexdS4NY2un0zSU7Qw7YWwnsO0Z4rYL8JIiAs4FeM8f7k6fjpV3Phssx01VTX2E6IOmArTTTZm5QCZOnyeFCuSTN55/0hktzuNntXpKb1NhR7sr/UHMaZtTEdvZmD7edNio8c6gfXeC2E4wRX9YevOF9pIrp6sa6MK/V8mo3yYnS0VsJ4itIfCRv0wUrcLU5ckLNx3EaRgDCCCAAAJ+JZCcQeyly/+RXydMkeDgIFupOp8JSnp2G7dsk+Hf/2yqzmW2FaqzZQ2RSwWxV6xaKz/9NlHKmnOk9o8n/L1y5swZea93P1tF9/GH7pPKFcvZzUYciZRP+31t+70FsUuVLCbPtHvY3URft6cLjP51gvyzer3UqVVD7rvrNvc6ovQGqT5f2WGC2G6WVOtxqp/Hb8DNjRtIs0YN4oweMmK0lDFPGHHGewti6zw6XedLri40KLnWJOKcK3qexzprP2bOY/sM+l70z0XVimVFA1vaOf9OaXpjHbnn1sY2lKTjtfpf5NFjknAQ+1X3vIk539R1e3b6Z2b2wuUybc4iO/r150xYylTIdx7d/kL7NiaMfWlvZ/u6gi96XmiX53boRwABBBAIfIGkfA8mxzUdlXW+i3y59qLzO9+/vlTE/tFcu1lkruHEf0rZkaNR7qeivd+ts+TMHnrF1520bf7aaYEEvdFSn9RRIF/cf2f8NXex/D5ldpxrXM65RNsH7pLrzM1eTrdkxVr5fuxEKZA/r71OpuP7DxsjG7fukIZ1rxW9xuV0o8ZNloXLVonnkziGjR4vK9dsEG9BbOdY6VPj9OlxTnfq1Gn7hLrMpgq3hsK5fubIpP678/eGE8SOMU8RfLPXV1LYPOkvrwli682Jbzz/lLnuG3ZREFtbn5jPma5Xz30fM0+cqfPfE2d0Hc41ZO13gtiJ+YwkFMTW9TldQufcznTeEUAAAQQQQAABBFJegCB2yhuzBQQQQAABBBBAICAFCGJffFidH1l0yqWqW3vO54SxL14bY1JKwLkAHz/AMmP+Uhk3aaat3tnjhafsjzvaBudit/Z37/KkFC6YT2JNIGzgtz/L5u277GMsUzqIrdv+fuyfsmTFGu21nRPEdn5g0gD2O12fsY9e1R+uvhrxs2zaujPOj1QJBcV//HWSLFq++qIf/Xqbx3XuDt9vf6TSH6sOHDos7/f9xgbsnjfV351Hvx6OjJJxk2fadj1y3+2SJTj4v1byhgACCCCQ2gLJGcTWG5YGDPnOhDejTIWvAvKYCUXnzpXT7uKBgxEycsyvcijiiNSsVlkeanWXHX+pIPbBiMPSp7/re+Xptg9JiWKuR4rr+v+c6vpeuf+e220lsl59B9n1vdjpSSmYP5/t1wrZM+cutP2eQez5psr2H6badrasWaXHK53td7vO5Ov2gs332JwFS2TStFn2e7VDuza2bfro81G//C5r1rsqCRPEtvSp+uJrENtzPiekHT+IrSFsp8L6023bJFsYO6WD2NGmOruek06dvUh2mQqQ9kYJc8Oc8zj0dz/72oaznRvr9IBtMFWyB3z7kz12nkFsz6e4vNqprWi1Te0Sc745dfZCc854RKpUKC21qrtCUbGxZ+S197+w4SqnauWInybI36vWS40q5eWpNi3doW89J9+2c49UM2HyurWq2+0n1C47kRcEEEAAgXQj4O16zuW+B5Pjmo4CJ+a7UOf/5sff7JMhPAO+On7Bsn9k9Lgp5tpSfnON6QkdJQuWmnHjXTc7vma+f50Q8g/mWs1ic61Gn+j27isd7bwJXdfx/K681JPY7Er89GXID7/K6vWbzTlEGXniobvd15ZOnDwlX5nzlh2794pnANo5l9DKw107PiZB5mZQPef4ZOAIUz07Qq6/tpo82uoOu7c/T5hmQ+z61LhXnn1csoZkkcioY9Kr/7einyHP43SpIPZ88/SRMebpI3q+pec0uj7tnOtqGiLXcyuun1kWv3hx/t5wgtjaqOGjfzfXe/+17dNzZv38aBe/IraOS8znbNDIX2Tdhq2i15x1naHZstrP12eDRrqfEuMEsRPzGXH+HsudK4e89+qz2ixz7u/bObedmRcEEEAAAQQQQACBqyJAEPuqMLMRBBBAAAEEEEAg8AQIYns/plrt2ql4rXNoFWztypUubqvnaL9WxfbsCGN7aqR8v3MBPn4QW3+sebfPYFO5JNqERipJuwfvto3RH2Y0fKxVszOYZzrqBXqtSqRhY71ofjUqYmtDTprqOh/3Hy76SFbtnCC2tkUDNhoO0x/yShYtJFt37ZGT5ocq3Rf9Ueitl562yyT0g53zg1H86kvxg9i6EqealP7oVLFsKeMQJGs3bpUTpqJMvdo1pE3LC1VD7UZ5QQABBBBIVYHkDGLrjuw3jwQfNOwH8710yu5XkcIFzXfQeTlw8JD9LsqTO5c88Uhryf9fFbtLBbF1BTPmLJBpM+fZMEO5MqXs+wb9Xjl5Mk416s/6D7Eh7xw5skt58/1z+HCkRBw+IseOR9t2eAaxj2qowgS3z5vnuWcPzSZVK1eQlne6Ktb5ur3TsbHy1Tffm/09aNdfsEB+ORoVZUIbIbZt+j1LENvSpPrLVhNC9tbFr2rtGbTWMPZfs+bbxTR0rUFtJ4TtBLW9rTMp41IiiK3npEFBmW1z9BxWP+vaZTJVGLUSdqVypeywvmiwSwNeeu6mwSa9YW/Ljj1yzrzr+aVnEFvn7/n5EHPTwhG7/uJFCpmnoDwoUebPma/nm87j3rUi5DXVKtogip4rakhcb2jUGxu1Oxp1XPp8/b0cMTf06ZNnShcvInvN3y/bd4Xb82sNrxTMH2bn1Rdv7dL9pUMAAQQQSD8CzvWcxHwPJsc1HRVOzLUXnf/vVetMgPMP7bXB6iYNakuDOtd4DWKfPXtOvjYh5PXm+1K/r8uZJ0UcMue6es1JnzTzbNv73dcXE7quEwhB7A1bdsjg736RM+YcRc/hS5jrW5JBZOfuffYJdiEmPP3cEw+6xhtXz3OJHNmz2WIB23aG23m10vHLHR6RnObfDtr9s26jDP1xnO3Xcyhd9569B+x0dfY1iK3nUYO+Gysbtmx3HStzvdceq4PmWJnzki7mSR96TqMd188sQ6q/OH9veAax15k/a4PMZ007fSqiPh1RO29B7MR8zvQGgM8GjxStkK6fh0Lm3Hff/kOmOnuYhO87YLfhBLF1wNfPiLcgtq/n3HajvCCAAAIIIIAAAghcFQGC2FeFmY0ggAACCCCAAAKBJ0AQ+9LHNH4gO/7cGtDWcLYT2iaMHV8o5YadC/Dxg9i6Recitv6o92qnx6WYCZdpt3bDFhllKhbpYx61q1aprNxQq4atcHS1gti63a07dssX34yyYRsniK3jtX1jxk+11Xy07fpI1krlSttHsSZ3EFu3p4+L1QqG+0xYRjv9Maxxvevk1sb1TQAoox3HCwIIIICAfwgkdxBb90oDqxNMxel9+10hZR2nIRENSLe6+3Zb+UvHaXe5ILbOM2POQvlnzXob5tbhkCxZpH7d66RpI/O9ktH1vaKh6x9NNeq95gdsDZ0WN9WzW9zaVAYO/V4XEc8gtg5ruHvxspWmAlmMlCxeVDo++YiOtp0v29MZD5ltamVt3V8NuxYqWEAeuPcOGfHjWBP8OEYQ28WZpl49w9hOw8uUKpFiIWzdRkoEsZ2263uWLME28FO6RFGpaqpI29CSxwx6M6E+WWXNv5ttsEkr6emNc79NmmGrRcYPYmtQafzkWaaKtuvmP62qmTlzJp/PN3XTGhKbMmuBvSlQhzVUpm3ToEu2rCE6ynb7TSX938wTabaaCth6c4N2ZUoWk/vuaHrRfiTULrsQLwgggAAC6ULAuZ7jubOX+x7Uea/0mo6zPV+vvej8elPfdz9PtIFdDWU2b1RPWtzS0GsQ285vvq9//uMv+9QKDY9riLOEKQRwy0117Xeo04ZADmLrPu4K32fOWybJXo9/Z+h1rmLmKR2P39/C3KSV16Gw7xqi/vXPGe5zCa10XbpkUWl1RzN3tWpnAb0GO9M8eUNvRMto/o1xy003yLnz52SaeaqIr0FsXZcez59MhW0Njut1Qm2f3jz20D3N7XmMsz195/qZp0bq9Dt/b3gGsfVG4nc+HSTRJ07I+693thXStXXegtg6PjGfM/17Qo+7VnDXf7dWr1xebmtSTz756jtdlXgGsXXYl8+ItyC2LuvrObfOS4cAAggggAACCCCQ8gIEsVPemC0ggAACCCCAAAIBKZCYIPaRYyckIipGiuXPJSHBrsptyYFyygRidh04KmE5s0meHFmTY5XJvo7N21wV+rQKtmd1bGdDThD79qYNnFG8+7GAVkAKDgqKEzDzp+bqD0AaXtOwy9XojkfHSOyZs5I7Zw7zw9PV2CLbQAABBBBIrEBKBLGdNmhQOTLyqISainUF8+ezgQZnWlLeNTR9xnyv5NTvlQRWoGFNDUxky+rbuZ9Wrctogize1ufL9rQZsWfOmCdjnDbf/9kSaBWj05KAtzC2tj+5K2E7JskZxHbWmZR3/bMVHXPCVn705bxNn7aiZSgzZoz7pycx55sx5okp+tQUrUqpIaWEOg3DRByJlFymauXlzmMTaldC62Y8AggggAACjkByXdNJzHehblu/g/WmJl87bWd2c97pPP3C1+UCaT4NS4ebMLbelKlP1NBrcZfqNPB62DxlI2/unL6dc+Q05xyXWeeltudM022GZguxT81zxnl75/qZN5W0N87Xz5numZ4Ha7EKfaKiL92VfEZ8Pef2pR3MgwACCCCAAAIIIJB0AYLYSbdjSQQQQAABBBBAIF0LJCaIHXMyVsIjoiR/rlDJlf1CBbQrBYyKPikHIqOlSFhOyRZy6QvyV7otlkcAAQQQQAABBNKiQEoGsdOiB21GQAXih7FTKoSt2/KXILa2hQ4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAg+QUIYie/KWtEAAEEEEAAAQTShUBigthaLWJb+GFbJbFIvpym4ojvVWASwjxtqsmEH4wSrYhWukjeS1Y7SWgdjEcAAQQQQAABBAJdgCB2oB9h9i+pAk4YOyVD2No2gthJPUIshwACCCCAAAIIIIAAAggggAACCCCAAAIIpA0Bgthp4zjRSgQQQAABBBBAwO8EEhPE1sZHnzwt+yKO2f3QILY+WjKp3VkTvj4de9YuXigsh4SG+PaIv6Ruj+UQQAABBBBAAIG0KkAQO60eOdp9NQS2bt8lZUoVT9FNEcROUV5WjgACCCCAAAIIIIAAAggggAACCCCAAAIIpLoAQexUPwQ0AAEEEEAAAQQQSJsCiQ1i616eOBUrUdGnJObUaTl77nySdzxTxgySLUuw5AzNIlmzBCV5PSyIAAIIIIAAAggEugBB7EA/wuyfvwsQxPb3I0T7EEAAAQQQQAABBBBAAAEEEEAAAQQQQACBKxMgiH1lfiyNAAIIIIAAAgikW4GkBLHTLRY7jgACCCCAAAIIpJIAQexUgmezCPwnQBCbjwICCCCAAAIIIIAAAggggAACCCCAAAIIIBDYAgSxA/v4sncIIIAAAggggECKCRDETjFaVowAAggggAACCCSbAEHsZKNkRQgkSYAgdpLYWAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEgzAgSx08yhoqEIIIAAAggggIB/CRDE9q/jQWsQQAABBBBAAAFvAgSxvakwDoGrJ0AQ++pZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA1BAhip4Y620QAAQQQQAABBAJAgCB2ABxEdgEBBBBAAAEEAl6AIHbAH2J20M8FCGL7+QGieQgggAACCCCAAAIIIIAAAggggAACCCCAwBUKEMS+QkAWRwABBBBAAAEE0qsAQez0euTZbwQQQAABBBBISwIEsdPS0aKtgShAEDsQjyr7hAACCCCAAAIIIIAAAggggAACCCCAAAIIXBAgiH3Bgj4EEEAAAQQQQACBRAgQxE4EFrMigAACCCCAAAKpJEAQO5Xg2SwC/wkQxOajgAACCCCAAAIIIIAAAggggAACCCCAAAIIBLYAQezAPr7sHQIIIIAAAgggkGICBLFTjJYVI4AAAggggAACySZAEDvZKFkRAkkSIIidJDYWQgABBBBAAAEEEEAAAQQQQAABBBBAAAEE0owAQew0c6hoKAIIIIAAAggg4F8CBLH963jQGgQQQAABBBBAwJsAQWxvKoxD4OoJEMS+etZsCQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSA0Bgtipoc42EUAAAQQQQACBABAgiB0AB5FdQAABBBBAAIGAFyCIHfCHmB30cwGC2H5+gGgeAggggAACCCCAAAIIIIAAAggggAACCCBwhQIEsa8QkMURQAABBBBAAIH0KkAQO70eefYbAQQQQAABBNKSAEHstHS0aGsgChDEDsSjyj4hgAACCCCAAAIIIIAAAggggAACCCCAAAIXBAhiX7CgDwEEEEAAAQQQQCARAgSxE4HFrAgggAACCCCAQCoJEMROJXg2i8B/AgSx+SgggAACCCCAAAIIIIAAAggggAACCCCAAAKBLUAQO7CPL3uHAAIIIIAAAgikmABB7BSjZcUIIIAAAggggECyCRDETjZKVoRAkgQIYieJjYUQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE0I0AQO80cKhqKAAIIIIAAAgj4l0B4xFH/ahCtQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDgKgoUCcuVrFvLcN50ybpGVoYAAggggAACCCDglwIEsf3ysNAoBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgaskQBD7KkGzGQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAICEBKmInJMN4BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgAQGC2AnAMBoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEhIgCB2QjKMRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEhAgiJ0ADKMRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGEBAhiJyTDeAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIAEBgtgJwDAaAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBISIAgdkIyjEcAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBIQIIidAAyjEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBhAQIYickw3gEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDg/+3aIQEAAACAoP+vXeADqpEsAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAehGc0sAABtfSURBVAIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACRixT0YnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACBixB0YmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDACQSdtNrrAT0cbQAAAABJRU5ErkJggg==" + } + }, + "cell_type": "markdown", + "id": "a6c8b0da", + "metadata": {}, + "source": [ + "![image-2.png](attachment:image-2.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a153178", + "metadata": {}, + "outputs": [], + "source": [ + "# Define functions for model training, validation and accuracy calculation\n", + "def train_resnet(model, trainloader, device, loss_func, writer, epoch, optimizer, scheduler, *args, **kwargs):\n", + " \"\"\"Train ResNets for 1 epoch\"\"\"\n", + " for data in tqdm.tqdm(trainloader):\n", + " inputs, labels = data\n", + " inputs, labels = inputs.to(device), labels.to(device)\n", + " optimizer.zero_grad()\n", + " outputs = model(inputs)\n", + " loss = loss_func(outputs, labels)\n", + " losses = get_model_losses(model, torch.tensor(0.0).to(device))\n", + " loss += losses\n", + " loss.backward()\n", + " optimizer.step()\n", + " epoch += 1\n", + " if scheduler is not None:\n", + " scheduler.step()\n", + " if writer is not None:\n", + " writer.add_scalar(\"train_output_loss\", loss.item(), epoch)\n", + " writer.add_scalar(\"train_sparse_loss\", losses, epoch)\n", + "\n", + "def validate_resnet(model, testloader, device, loss_func, epoch, writer, *args, **kwargs):\n", + " \"\"\"Validation loop for ResNets\"\"\"\n", + " correct = 0\n", + " total = 0\n", + " model.eval()\n", + "\n", + " with torch.no_grad():\n", + " for data in testloader:\n", + " images, labels = data\n", + " images, labels = images.to(device), labels.to(device)\n", + " outputs = model(images)\n", + " if loss_func is not None:\n", + " loss = loss_func(outputs, labels)\n", + " losses = get_model_losses(model, torch.tensor(0.0).to(device))\n", + " _, predicted = torch.max(outputs.data, 1)\n", + " total += labels.size(0)\n", + " correct += (predicted == labels).sum().item()\n", + " ratio = get_layer_keep_ratio(model)\n", + " if writer is not None:\n", + " writer.add_scalar(\"validation_output_loss\", loss.item(), epoch)\n", + " writer.add_scalar(\"validation_sparse_loss\", losses, epoch)\n", + " writer.add_scalar(\"validation_acc\", correct / total, epoch)\n", + " writer.add_scalar(\"validation_remaining_weights\", ratio, epoch)\n", + "\n", + "def calculate_accuracy(model, testloader, device, loss_func, *args, **kwargs):\n", + " \"\"\"Validation loop for ResNets with accuracy return\"\"\"\n", + " correct = 0\n", + " total = 0\n", + " loss = None\n", + " losses = None\n", + " with torch.no_grad():\n", + " for data in testloader:\n", + " images, labels = data\n", + " images, labels = images.to(device), labels.to(device)\n", + " outputs = model(images)\n", + "\n", + " if loss_func is not None:\n", + " loss = loss_func(outputs, labels)\n", + " losses = get_model_losses(model, torch.tensor(0.0).to(device))\n", + "\n", + " _, predicted = torch.max(outputs.data, 1)\n", + " total += labels.size(0)\n", + " correct += (predicted == labels).sum().item()\n", + "\n", + " accuracy = correct / total if total > 0 else 0.0\n", + " ratio = get_layer_keep_ratio(model)\n", + "\n", + " return accuracy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39586a5f", + "metadata": {}, + "outputs": [], + "source": [ + "# Define scheduler and optimizer functions\n", + "def get_scheduler(optimizer, config):\n", + " if config.training_parameters.lr_schedule is None:\n", + " return None\n", + " elif config.training_parameters.lr_schedule == \"cosine\":\n", + " return CosineAnnealingLR(optimizer, config.training_parameters.cosine_tmax)\n", + " elif config.training_parameters.lr_schedule == \"multistep\":\n", + " return MultiStepLR(optimizer, config.training_parameters.milestones, gamma=config.training_parameters.gamma)\n", + " return None\n", + "\n", + "def get_optimizer(config, model):\n", + " if config.training_parameters.optimizer == \"sgd\":\n", + " # CS already has L1-regularization for threshold parameters\n", + " threshold_decay = (\n", + " 0 if (config.pruning_parameters.pruning_method == \"cs\" or config.pruning_parameters.pruning_method == \"mdmm\") else config.pruning_parameters.threshold_decay\n", + " )\n", + "\n", + " parameters = list(model.named_parameters())\n", + " threshold_params = [v for n, v in parameters if \"threshold\" in n and v.requires_grad]\n", + " rest_params = [v for n, v in parameters if \"threshold\" not in n and v.requires_grad]\n", + " optimizer = torch.optim.SGD(\n", + " [\n", + " {\n", + " \"params\": threshold_params,\n", + " \"weight_decay\": threshold_decay if threshold_decay is not None else config.training_parameters.l2_decay,\n", + " },\n", + " {\"params\": rest_params, \"weight_decay\": config.training_parameters.l2_decay},\n", + " ],\n", + " config.training_parameters.lr,\n", + " momentum=config.training_parameters.momentum,\n", + " )\n", + " elif config.training_parameters.optimizer == \"adam\":\n", + " optimizer = torch.optim.Adam(model.parameters(), lr=config.training_parameters.lr)\n", + " elif config.training_parameters.optimizer == \"psgd\":\n", + " # CS already has L1-regularization for threshold parameters\n", + " threshold_decay = (\n", + " 0 if config.pruning_parameters.pruning_method == \"cs\" else config.pruning_parameters.threshold_decay\n", + " )\n", + "\n", + " parameters = list(model.named_parameters())\n", + " threshold_params = [v for n, v in parameters if \"torch_params\" in n and v.requires_grad]\n", + " rest_params = [v for n, v in parameters if \"torch_params\" not in n and v.requires_grad]\n", + " optimizer = pSGD(\n", + " [\n", + " {\n", + " \"params\": threshold_params,\n", + " },\n", + " {\n", + " \"params\": rest_params,\n", + " },\n", + " ],\n", + " config.training_parameters.lr,\n", + " momentum=config.training_parameters.momentum,\n", + " lambda_p=config.training_parameters.lambda_p,\n", + " p_norm=config.training_parameters.p_norm,\n", + " weight_decay=config.training_parameters.l2_decay,\n", + " )\n", + " elif config.training_parameters.optimizer == \"padam\":\n", + " optimizer = pAdam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.training_parameters.lr)\n", + " return optimizer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abb1c8e6", + "metadata": {}, + "outputs": [], + "source": [ + "# Define data loader function\n", + "def get_cifar10_data(batch_size):\n", + " normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n", + " train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), \n", + " transforms.ToTensor(), normalize])\n", + " test_transform = transforms.Compose([transforms.ToTensor(), normalize]) \n", + " trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n", + " download=True, transform=train_transform)\n", + " valset = torchvision.datasets.CIFAR10(root='./data', train=True,\n", + " download=True, transform=test_transform)\n", + " train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n", + " shuffle=True, num_workers=4, pin_memory=True)\n", + "\n", + " val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size,\n", + " shuffle=False, num_workers=4, pin_memory=True)\n", + "\n", + " return train_loader, val_loader\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b4eb78b", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# Import classes from the fine-tuning module\n", + "from pquant.core.finetuning import TuningTask, TuningConfig\n", + "# Import model\n", + "from torchvision.models import resnet18\n", + "import torchvision.transforms as transforms\n", + "\n", + "# Specify your finetuning config path\n", + "CONFIG_PATH = 'PQuant/src/pquant/configs/finetuning.yaml'\n", + "\n", + "\n", + "# Convert yaml file into the object\n", + "config = TuningConfig.load_from_file(CONFIG_PATH)\n", + "\n", + "# Create finetuning task class\n", + "tuner = TuningTask(config)\n", + "# Enable mlflow if needed\n", + "tuner.set_enable_mlflow()\n", + "# Set tracking uri of MLflow, NGT MLflow in my case, can be a local one too\n", + "tuner.set_tracking_uri(\"https://ngt.cern.ch/models\")\n", + "# Set user details if you use NGT MLflow\n", + "tuner.set_user(\"your_email@cern.ch\", \"your_access_token\")\n", + "# Set storage db uri if needed\n", + "tuner.set_storage_db(\"sqlite:///optuna_study.db\")\n", + "# Set training function for your model, must be callable \n", + "tuner.set_training_function(train_resnet)\n", + "# Set validation function for your model, must be callable \n", + "tuner.set_validation_function(validate_resnet)\n", + "# Set objective function for the finetuning, must be callable\n", + "tuner.set_objective_function(name=\"accuracy\", fn=calculate_accuracy, direction=\"maximize\")\n", + "# Set hyperparameters\n", + "tuner.set_hyperparameters()\n", + "# Set optimizer function, must be callable\n", + "tuner.set_optimizer_function(get_optimizer)\n", + "# Set scheduler function, mist be callable\n", + "tuner.set_scheduler_function(get_scheduler)\n", + "\n", + "# Run your code to prepare the model\n", + "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", + "\n", + "# Initialise the model\n", + "model = resnet18()\n", + "model = model.to(device)\n", + "\n", + "# Create train and validation data loaders with a specific batch size\n", + "BATCH_SIZE = 256\n", + "train_loader, val_loader = get_cifar10_data(BATCH_SIZE)\n", + "\n", + "# Define a loss function\n", + "loss_func = nn.CrossEntropyLoss()\n", + "\n", + "# Run optimization loop\n", + "best_params = tuner.run_optimization(model,\n", + " trainloader=train_loader,\n", + " testloader=val_loader,\n", + " loss_func=loss_func)" + ] + }, + { + "cell_type": "markdown", + "id": "b4059eee", + "metadata": {}, + "source": [ + "## For the demonstrational purposes, a toy example:" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAB14AAARYCAYAAACh/3HjAAAMP2lDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBooUsJvQkiUgJICaEFkN5thCRAKCEGgoq9LCq4FlREwYauiihYAbEjioVFsfcFFRVlXSzYlTcpoOu+8r3zfXPvf/85858z584tA4DaSY5IlIOqA5ArLBDHBPvTk5JT6KRegAAcaINRQIPDzRcxo6LCAbSh89/t3Q3oDe2qg1Trn/3/1TR4/HwuAEgUxGm8fG4uxAcBwKu4InEBAEQpbz61QCTFsAEtMUwQ4sVSnCHHVVKcJsd7ZT5xMSyIWwFQUuFwxBkAqF6GPL2QmwE1VPshdhLyBEIA1OgQ++Tm5vEgToXYBvqIIJbqM9J+0Mn4m2basCaHkzGM5XORmVKAIF+Uw5n+f5bjf1tujmQohhVsKpnikBjpnGHdbmXnhUmxCsR9wrSISIg1If4g4Mn8IUYpmZKQeLk/asjNZ8GaAR2InXicgDCIDSEOEuZEhCv4tHRBEBtiuELQaYICdhzEehAv5ucHxip8NovzYhSx0IZ0MYup4M9xxLK40lgPJNnxTIX+60w+W6GPqRZlxiVCTIHYolCQEAGxKsSO+dmxYQqfsUWZrIghH7EkRpq/BcQxfGGwv1wfK0wXB8Uo/Ety84fmi23OFLAjFHh/QWZciLw+WCuXI8sfzgW7zBcy44d0+PlJ4UNz4fEDAuVzx57xhfGxCp0PogL/GPlYnCLKiVL442b8nGApbwaxS35hrGIsnlAAF6RcH08XFUTFyfPEi7I4oVHyfPAVIBywQACgAwlsaSAPZAFBR19jH7yS9wQBDhCDDMAHDgpmaESirEcIj7GgCPwJER/kD4/zl/XyQSHkvw6z8qMDSJf1FspGZIMnEOeCMJADryWyUcLhaAngMWQE/4jOgY0L882BTdr/7/kh9jvDhEy4gpEMRaSrDXkSA4kBxBBiENEWN8B9cC88HB79YHPGGbjH0Dy++xOeEDoJDwnXCV2E25MF88U/ZTkOdEH9IEUt0n6sBW4FNV1xf9wbqkNlXAc3AA64C4zDxH1hZFfIshR5S6tC/0n7bzP44W4o/MhOZJSsS/Yj2/w8UtVO1XVYRVrrH+sjzzVtuN6s4Z6f47N+qD4PnsN+9sQWYwewNuwUdh47ijUCOnYCa8LasWNSPLy6HstW11C0GFk+2VBH8I94Q3dWWsl8p1qnXqcv8r4C/jTpOxqw8kTTxYKMzAI6E34R+HS2kOs4ku7s5OwKgPT7In99vYmWfTcQnfbv3II/APA+MTg4eOQ7F3oCgH3u8PE//J2zYcBPhzIA5w5zJeJCOYdLDwT4llCDT5o+MAbmwAbOxxm4AS/gBwJBKIgEcSAZTILZZ8J1LgZTwUwwDxSDUrACrAHrwSawFewEe8B+0AiOglPgLLgILoPr4C5cPT3gBegH78BnBEFICBWhIfqICWKJ2CPOCAPxQQKRcCQGSUZSkQxEiEiQmcgCpBQpQ9YjW5AaZB9yGDmFnEc6kdtIN9KLvEY+oRiqgmqhRqgVOgploEw0DI1DJ6IZ6BS0CF2ILkMr0Gp0N9qAnkIvotfRLvQFOoABTBnTwUwxB4yBsbBILAVLx8TYbKwEK8eqsTqsGd7nq1gX1od9xIk4DafjDnAFh+DxOBefgs/Gl+Lr8Z14A96KX8W78X78G4FKMCTYEzwJbEISIYMwlVBMKCdsJxwinIHPUg/hHZFI1CFaE93hs5hMzCLOIC4lbiDWE08SO4mPiAMkEkmfZE/yJkWSOKQCUjFpHWk36QTpCqmH9EFJWclEyVkpSClFSag0X6lcaZfScaUrSk+VPpPVyZZkT3IkmUeeTl5O3kZuJl8i95A/UzQo1hRvShwlizKPUkGpo5yh3KO8UVZWNlP2UI5WFijPVa5Q3qt8Trlb+aOKpoqdCktlgopEZZnKDpWTKrdV3lCpVCuqHzWFWkBdRq2hnqY+oH5Qpak6qrJVeapzVCtVG1SvqL5UI6tZqjHVJqkVqZWrHVC7pNanTla3Umepc9Rnq1eqH1a/qT6gQdMYrRGpkauxVGOXxnmNZ5okTSvNQE2e5kLNrZqnNR/RMJo5jUXj0hbQttHO0Hq0iFrWWmytLK1SrT1aHVr92praLtoJ2tO0K7WPaXfpYDpWOmydHJ3lOvt1buh80jXSZerydZfo1ule0X2vN0LPT4+vV6JXr3dd75M+XT9QP1t/pX6j/n0D3MDOINpgqsFGgzMGfSO0RniN4I4oGbF/xB1D1NDOMMZwhuFWw3bDASNjo2AjkdE6o9NGfcY6xn7GWcarjY8b95rQTHxMBCarTU6YPKdr05n0HHoFvZXeb2poGmIqMd1i2mH62czaLN5svlm92X1zijnDPN18tXmLeb+FicU4i5kWtRZ3LMmWDMtMy7WWbZbvraytEq0WWTVaPbPWs2ZbF1nXWt+zodr42kyxqba5Zku0Zdhm226wvWyH2rnaZdpV2l2yR+3d7AX2G+w7RxJGeowUjqweedNBxYHpUOhQ69DtqOMY7jjfsdHx5SiLUSmjVo5qG/XNydUpx2mb093RmqNDR88f3Tz6tbOdM9e50vnaGOqYoDFzxjSNeeVi78J32ehyy5XmOs51kWuL61c3dzexW51br7uFe6p7lftNhhYjirGUcc6D4OHvMcfjqMdHTzfPAs/9nn95OXhle+3yejbWeix/7Laxj7zNvDneW7y7fOg+qT6bfbp8TX05vtW+D/3M/Xh+2/2eMm2ZWczdzJf+Tv5i/0P+71merFmskwFYQHBASUBHoGZgfOD6wAdBZkEZQbVB/cGuwTOCT4YQQsJCVobcZBuxuewadn+oe+is0NYwlbDYsPVhD8PtwsXhzePQcaHjVo27F2EZIYxojASR7MhVkfejrKOmRB2JJkZHRVdGP4kZHTMzpi2WFjs5dlfsuzj/uOVxd+Nt4iXxLQlqCRMSahLeJwYkliV2JY1KmpV0MdkgWZDclEJKSUjZnjIwPnD8mvE9E1wnFE+4MdF64rSJ5ycZTMqZdGyy2mTO5AOphNTE1F2pXziRnGrOQBo7rSqtn8viruW+4PnxVvN6+d78Mv7TdO/0svRnGd4ZqzJ6M30zyzP7BCzBesGrrJCsTVnvsyOzd2QP5iTm1Ocq5abmHhZqCrOFrXnGedPyOkX2omJR1xTPKWum9IvDxNvzkfyJ+U0FWvBHvl1iI/lF0l3oU1hZ+GFqwtQD0zSmCae1T7ebvmT606Kgot9m4DO4M1pmms6cN7N7FnPWltnI7LTZLXPM5yyc0zM3eO7OeZR52fN+n+80v2z+2wWJC5oXGi2cu/DRL8G/1BarFouLby7yWrRpMb5YsLhjyZgl65Z8K+GVXCh1Ki0v/bKUu/TCr6N/rfh1cFn6so7lbss3riCuEK64sdJ35c4yjbKiskerxq1qWE1fXbL67ZrJa86Xu5RvWktZK1nbVRFe0bTOYt2KdV/WZ66/XulfWV9lWLWk6v0G3oYrG/021m0y2lS66dNmweZbW4K3NFRbVZdvJW4t3PpkW8K2tt8Yv9VsN9heuv3rDuGOrp0xO1tr3GtqdhnuWl6L1kpqe3dP2H15T8CepjqHui31OvWle8Feyd7n+1L33dgftr/lAONA3UHLg1WHaIdKGpCG6Q39jZmNXU3JTZ2HQw+3NHs1HzrieGTHUdOjlce0jy0/Tjm+8PjgiaITAydFJ/tOZZx61DK55e7ppNPXWqNbO86EnTl3Nujs6TZm24lz3ueOnvc8f/gC40LjRbeLDe2u7Yd+d/39UIdbR8Ml90tNlz0uN3eO7Tx+xffKqasBV89eY1+7eD3ieueN+Bu3bk642XWLd+vZ7Zzbr+4U3vl8d+49wr2S++r3yx8YPqj+w/aP+i63rmPdAd3tD2Mf3n3EffTicf7jLz0Ln1CflD81eVrzzPnZ0d6g3svPxz/veSF68bmv+E+NP6te2rw8+JffX+39Sf09r8SvBl8vfaP/Zsdbl7ctA1EDD97lvvv8vuSD/oedHxkf2z4lfnr6eeoX0peKr7Zfm7+Ffbs3mDs4KOKIObJfAQw2ND0dgNc7AKAmA0CD+zPKePn+T2aIfM8qQ+A/YfkeUWZuANTB//foPvh3cxOAvdvg9gvqq00AIIoKQJwHQMeMGW5DezXZvlJqRLgP2Bz7NS03Dfwbk+85f8j75zOQqrqAn8//AiTrfGu232UwAAAAimVYSWZNTQAqAAAACAAEARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAh2kABAAAAAEAAABOAAAAAAAAAJAAAAABAAAAkAAAAAEAA5KGAAcAAAASAAAAeKACAAQAAAABAAAHXqADAAQAAAABAAAEWAAAAABBU0NJSQAAAFNjcmVlbnNob3RBKTGOAAAACXBIWXMAABYlAAAWJQFJUiTwAAAB2GlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4xMTEyPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjE4ODY8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpVc2VyQ29tbWVudD5TY3JlZW5zaG90PC9leGlmOlVzZXJDb21tZW50PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KidqOegAAABxpRE9UAAAAAgAAAAAAAAIsAAAAKAAAAiwAAAIsAAHPg5W6N5UAAEAASURBVHgB7J0JvE3VF8dXIZIhY9IoKpGQSFSGEiINmjUhc2QKJUPIlHkKITJlKBVNZhIaRKRBhH+SjJUylfrv336ta5/zzr3v3vfuM73f7tO75+yzzz77fM8+g/3ba60z/jVJmEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABJJN4AwKr8lmxx1JgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwBKg8MqOQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIpJEDhNYUAuTsJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJUHhlHyABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBFBKg8JpCgNydBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABCi8sg+QAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAoJUHhNIUDuTgIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIUXtkHSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESCCFBCi8phAgdycBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABCq/sAyRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQQgIUXlMIkLuTAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAIVX9gESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESSCEBCq8pBMjdSYAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIDCK/sACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACaSQAIXXFALk7iRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRA4ZV9gARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARSSIDCawoBcncSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESoPDKPkACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACKSRA4TWFALk7CZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACVB4ZR8gARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggRQSoPCaQoDcnQRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAQovLIPkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAKCVB4TSFA7k4CJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACFF7ZB0iABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEgghQQovKYQIHcnARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAQqv7AMkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkEICFF5TCJC7kwAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkACFV/YBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEkghAQqvKQTI3UmABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiAwiv7AAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmkkACF1xQC5O4kQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQOGVfYAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEUkiAwmsKAXL3lBM4eOiQpE+XTjJkyJDyyljDCScwd/5CGTHqFdm5a7c88tD90ujJ+pIu3ZknvF1swMlPYPXG3bJ60+6YG1qvauGY9+EOJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJBBvAhRe4030NK/vg7nz5KNlKzxnWbNGNbnh+jKevEgrq79cKx/OnS8LFy+VjZs2hYqec845cnmhQnJr5YpSq+btcuEF+UPbuHBqEPjt99+lZJnynsaOHTlcKlW82ZPHlZOTwJ49e+WVceNl//4/Qg3MdHYm6fRsu9B6ai2M+/BbGffBt8mqfmizG6VkodzJ2pc7kQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEC8CFB4jRfJONfz77//yqHDhz21ZsqYUc444wxP3vFagSDTtUdPeff9DxMd8nkjytR7/NFE+f6MX3buki7dXpS58xf4NwWujxgyQKrdViVwGzNPTgIfzlsgTZq39DTukYcflG6dO3ryuCJy9OhROfLXXyEUZ8gZkilTxtD68V54/8N50u65TvLnn38mOvQP365LlBfvDFd4jVZEhYUsUr1qhYVWr/G+IqyPBEiABEiABEiABEiABEiABEiABEiABEiABEiABEggVgIUXmMldpzKr/pitdz38GOeo705bYqUKF7Mk3c8ViIJMjh+NMLrjl92ygOPPC4//rgtpibXe+Ixad+mJd0Qx0TtxBWmxWv07EeMGiP9Bg727HA8BE7PAc3K3r37pEv3FwMnVWjZ49EuFV4husKCNZrUfPgygfhK4TUaWixDAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQ2gQovKY24WTW/9nnX1ih0t19xtSJUqpkCTcrVZejEWTQgGiE1wZNmsuCRYsTtbdokSJSssQ1cvDgIfn0888DhdkOz7SWhvXrJtqXGScnAY3xCne1te++08R4rccYrwGXavjI0dJ/0FDPluMhcLoHhOvwZ54NtnJ1yx2PdlF4dYlzmQRIgARIgARIgARIgARIgARIgARIgARIgARIgARI4FQkQOH1JL1qJ1p4XbV6jTzxZONAt6N+ZEkJrxu+3yjV7rjbv5uMHDZYbru1cij/n3/+kcHDX5ahw0eG8rCA2K9rPv3YiHfpPPlcIYFTmcCJFl5btG4nc957PyqEFF6jwsRCJJDmCWACRbhUsmBuxmIOB4f5JEACJEACJEACJEACJEACJEACJEACJEACpw2B4ya8HjbxSs8666wkY5QePHRI0huBLUOGDMmGjPioBw8elMyZMye7DnfHv0wcxr9NPMazM2Vys2NajvW8TrTwOvbV1+TFPi95zrFQwYLyXPu2Uq9hE09+UsLrO+++Jy3btPfs07FDO6n/RHBc2IbNnpb5CxZ6yi+e975cfNGFnrzUXDl69B/Thw5IlixZUvMwMdd95MgRex/FvKOzQzz6s1OdRHtvu/tEWsY5IuF5ESnZ+9w8LzKffXakYhG3xXpfRqzMbIyF7YkWXi8rnNhtefOmjWXvvn0yeeo0z6meaOEV7oRXb0qI5+o2bNwHCSIP3BMHxYVl3FeXFpdJIPUJ3NjqrbAHoUvwsGi4gQRIgARIgARIgARIgARIgARIgARIgARI4DQiEBfh9Zedu+T5Lt08WNq3bWWFkwGDh1kXs3/++afd/vnypZIzZ45Q2QMHDsjsd9+XmbPelm+/2xCysMyVM5dcU6yoPHj/vVK54s1JWjsuXrpMlq9YKeu/+VbWrvvK1oM6SpYsLkWvKiy3Vq4kRYsUDh1XFz5evlLGT5ysq/Y4wwcPkH1GfBg7YaLMfOMt2bN3j92eL18+ubpoEeM6tW6SLn+Te149er8kW7f+TxATdf3XX4fahYVril0teXLntnk3lC0j9R4PFi49OyVzxS+8Pv1UU2nSsL4cMIL2tdd74y8mJby+NmmqdO3R09OS2W/OCLweKDRx8us25qS7w9szX5diVxd1s+K+vP7rb63g9PHKlR6XxxCcq1etIvfVvlsuvCC/57irv1wrI0a+4skrVKigjUvryTQrEBTbtH9ODh06HNp0thENB/fvIzt37Q68hwoVvExWfvqZjJswSb5a/7Xs2LHDWgAXvKyA1L7rTql9z51JCo8QkZd+tEymTJsha75cF+rPsCS+vFAh4w64ltSqcbtkzZpYZI713s6UKaM87RPZmzVuGIpNHK6+/Ofnk7HjJ8rU6TPtOQIQuN9SuaK0aNY4NOkBVtFvmGfFm2/PtjzwXClw6aVyU/lypmwF+xuCG7CQ3PsSVcXjWQGhF3z+NeeBZxWup5tuqVQxtFr3sUek3A3Xh9bjveAKr4WvvEL69uohVxe5Snr26SdjXp3gOdyJFF7VBbGnQVGuIFZskCAb5e4sRgJplkAs992ygXeFOCHusiZMmHAThVeXBpdJgARIgARIgARIgARIgARIgARIgARIgAROVwJxEV43bvpBbqtxp4fRhLGjpHvPvrJx0yZP/ifLFoXEQ4hWDZu0CAlBnoLOCuKAvjJiiOTLd56Tm7AIa7tefQfIa5OnJNrmz+jxQmd5+IH7PNmvT39Dnuvc1ZO3auVHUufx+lYI9mxwVpobMahV82ZOzrHFlJzX7XfWjnhcPcpdtWrKgL69dDXuvyq8QpDp36enXFX4SnuMX3/7LWbhFaK430p21PAhUuWWSoHtfrF3PyPCeYWf1cbVcPZs2QLLpzQTVopduvWU12fMTLKqFzp1lEfrPBgqBxG1xl33yuYtW0J5WHh94ngpU7qUJ0+ZupnPtG5pBe2ge2jKa+OMK9gPZMrr091dPMuYDDBlwhi59JJLPPm6AqGzUbMWdjKC5gX9QoQdN2qElL7uWs/moHZFurfPPONMKV2+gqeOEUMGSLXbqti8oPomTxgrEyZOkbnzF3j20xUIq9MnT5CMGc+S+o2eks9WrdJNiX5xT+DeCEopuS9RXzyeFUH3T1Bbkde7xwty/733hNuc4nwVXlu1eEoaN6gX8jJwMguv0YqoKvhQ6ElxN2EFaZRAcoVXxYV70BVhkc/7UenwlwRIgARIgARIgARIgARIgARIgARIgARI4HQmkGrCK6wzYXnqTyq8wnrs0XoN/JvDrsN6dfas6ZLvvLyhMhDM7nv4scDjhAr5Fho9WU9gjaspSEy5vkxp+cRYGSaVJo0fI+XKei3SUnpeJ4vwOstYFP5srPEa1q8r6dOnD6EIEo6SsnjFPjdVrhqyZkZlYDzm5aHWejNUuVmAte+9Dz3mEeNLXVtSZkx5zS0Wt+WjxoV03QZNZNnyFVHX2aJZE2nZvGmo/OervpD76zweWscCJgu8NWOqsaA+0+bv3btPKlSp7mEAUfudN6ZZvkGCZKUKN8uiJUs99QatQDRd8MFsyZsnj2czrKbvuPt+D0tPgYAViKqwHtUU1K5I93ZyhFdc31VfrNZDBv7WeegBOfr30ajE8bEjh0slYyXvppTel6grHs+KoPvHbae7nNrCa92GTa1lNvqhm05W4RWiKyxYo0kQfCD8UOiJhhbLkEBiAiq84r5LymW3f0KEX3TFdt6PiRkzhwRIgARIgARIgARIgARIgARIgARIgARI4PQkkGrCazhcEF4Ru/HWarUSCUIQVytWuMnGjITgpO6Jta4KN99krPKGh+LEznhjlrTv2Fk329+LTBzQxx5+SLJly2pdtM6dvzBRPWs/XxGK3RkkprgVwt3pvn2/JmorykCweO/tN0LFf/v99xSfV4fnu8imHzYLhDq/FSUs/9RNc8WbbxS4cD3eKUg4Skp4RRunz3xTcG5uAr+nmjSSKy4vZK75EVn71VfWzan/ukdyS+zWl5zlV8aNNxbT/RPtCjGw6FVXyarVaxK5fEbhGVMnetxNd+72okya8rqnHrhuvffuBEvwrt17JbLKds8rSOD0VGZWcH+o22v/ttp33yUv9eoeykb80ycbPxUo3N56S2XJkyuXLFr6USJXtzjGvPfflnOzZ7d1RdMuPSju7eQIr7o/ftHH/f3e3a7LuM9xj/j7CrbXqF5Vhg7sp0UlHvclKovHswKujh9/srFt25Yt/0t0PdHvNOH+xn1+vBOF1+NN/OQ93vSlm2TT9t/lqVpXS9bMScddHz/vO8Gzp+5tid36n7xnmbKWzV+9TTb+9LvcX+EyyZk1k63sg8/+J1t++UMerlxIsmU+K2UHSObea3/YI8u//kUqFc8vV150bsy1uMJrtBMecBC/6Ip9UReF15gvAXcgARIgARIgARIgARIgARIgARIgARIgARI4RQmkqvAKEafHC52kZIlrrDUe4qbmyJFDEPd12MujPMgef+Rh6dyxQ0hU/fvvv6VB0xayxIhDbnJduCJeJiwzNeF4i+e967GiRFzMWrUf0CL213VHGk5MqffEY9K8aaOQe9v3P5wnzZ5u7akHK2s+XW5FXizH67xQ12effyEPPOK1ovSLfSh3vFNyhVe0c8abb0n75zpF3WSIcN27Pp/IqjjqCpIoCEGuZJnynlKwHkU82csKXBrKX/HJp9b1dCjDLMBid6pxBazp99/3yy3V7vAIadoft/+8Q6rWPBYDD/tAcG799FO6u3HJndhdt27s1/tFE7+0ou2LsAhG7GG/yIuyS+Z/IBddeIHd7aOPl8vj9RvZZf0DS1W4L85s4spqGj5ytPQfNFRX7W/rp5ub9iWI+uHaFe7e3rNnb8yuhnHQhx+8X559prW9dxEHtVXbDoGuhyG4vjZmlFxyycW2rQsWLZYGTZrbZf2Dtn22fLGuxu2+jOezAo0LYn88YqmGwIRZoPAaBkycsz/66md5duwniWq9KG8WKXNlXql63UVS5OJj8dATFUzljCPGwrzyMwnv1+5PlLECXlKHvO3ZOXLg0N/ixvxMap+ktg975yt5fdHGsMXOzZpR5nSrHnZ7am94YdLnMm/VNpnwTGUpmD/BHX77MSvl4/U75PWOVeTC3OekdhMC65++ZJMMeWudPPfQtXJ7mYTnZWDBMJnJEV6DRFdYu9ICPQxkZpMACZAACZAACZAACZAACZAACZAACZAACZyWBFJNeIX4MXPqayGBROn9888/cqNxO7vDuLHVdPON5WWssWRNly6dZtnf/fv/kJr33Cc//rgtlP/Iww9Kt84d7fpj9Rp6XMSGc0k7c9bb1opUKylrRLMKNyUIbkFiyu3VqsqQAX3lzDMT3MTqfn36D5JRr4zVVfurVovxPC9UfDoKrziv7zZ8b1wJPxpoqYjtmm6pVNFaLWbKlFGz4v4L0R7ivZtcYd/Nn2zirHbq2t3NkuVLFnhcX89fuFgaNvWKgM2bNjaWvOs9EwggKL/71kxxzy2cwBkUlzice+RuXZ6XR4xLXiRYgsMiXBPux/femRmKr6z5sE57uk17E0v2fc0StA+ui5GC2hXu3kb55AivpUuVMoLwWM/9j9i5RUpchyo9adGH7yZ6pgTFBP540Tw5//x8Es/7Ml7PCj0hCq9KIuE3nNATLt+7t3ftVBB6Fn25XTqN/9Q2vPhluezvbweOyJYd+0MnM8RYC15rhKsTlWZ+9IP88PPv0qRm0agsXlNDeB345lp5w7Qjf65zJEfWxNajmTNmkIGNj7lHP96sKLwmEA8numLrqXA/Hu9+w+ORAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmcvgRSTXh94tE61oLVj+7b7zYI4pi6adig/nJ7tdvcrNBy75cGyOixr4bWYfG2ZF6CSNR3wCAZOdorhMJSte5jdeSC/PlD+0RaCBJTgmJEog64nb3PiIZuGjNymFSuWEHieV6o/3QTXhGPF+52p06f4eKLuAyBr2WLplLHWEOmRnqqZVt574MPQ1W7fSuU+d9CkHXs4P595Y4aXksrf53+erAeZLkcJHBCkJw2eTx2SZS2//yz3FjJe89gwsCwQQkudouVKusRt90JC/7KFi5eYt0Su/nLF8+XfPnOCxRew93b2D85wmvH9s9I/bqPuYe3yw/UeUI+W7UqlO8KwqFMszD73feNeNzOzZIP5sySKwoViut9Ga9nhTaUwquSSPhVgRVrQTEjkRety9NTQehR4fXhypdL0zuKhmBs2/2nvPfpVnlt3gbJnCm9vP9iDUl35hmh7SfzQmoKr32eLCvli+Y76U6fwmuwe2H/PXzSXTg2iARIgARIgARIgARIgARIgARIgARIgARIgARSiUCqCa9BohTOYfHSZVKvYRPP6cC9a8ni13jydGXZ8hW6GPrd9M1a65I4qC4thNisZa8vLdeWKC43lL1ezsubRzd5foPElBVLFwaW/2n7drnJWOu6adTwIVLllkpxPS/UfzoJr7A6bNuho7z1zhwXnV2GmFaieDEjEh6QVV+s8bjq1cKwcIZwGO90xz0PJIrfemO5G8Iext8XgwTDnbt2WZfDQbFHUXHdxx+VTs96RULkBwmvjRvWl3atW2JzYPKLq2rxDUvx4qUTn0e4c/vm2w2JuL81Y6rANXFQu8Ld22hkcoTXieNekfLlyiY6R7+I7QrLbuGge0WF16BnRHKfN/F6VmjbKbwqiYRfv8Wcd2uCGJsWhFc9bxWPx7auaGN0bt/zpyxZ+7NcZdwP5zTudZd/vUM2GWvUR265XC7Jm9Xu9sfBv+SrLXtl9abdJtaqSImCuaTYpblC1qr7D/wlcz7ZKtnOOUtqBLifRbzSXb8ekirXXii5s2cSXb+7fAHJdNYxjxSHjhyVNeYYn23YaUThM+W6K/JY98jhhNeDh/+WL03MUZTPcnYGKVUoj1x1SQ7JkM7rVULP3f1Vi9dYhNctv+yXL77fJT8Y6+GrL81prYbznnu2W21oOdqyR//5V779cZ988u1O+e3PI3LlhedKlVIXSs+pX0R0NXz4r6Oy2Fg37/vjsMCyGaxyZEnsyQHXd+U3v9g2n2ME98svyC5lrzpPsmRKHFv3T+POed3mPfY6/330XylsYrj66w3navib/+0z126PnJ0xndxVrkCIg39BJ0IkNeHBf9/iHqXo6qfJdRIgARIgARIgARIgARIgARIgARIgARIggbREINWE16kTX5XrSyd2FRprnM+gi6FxVeEmtUfvl+RVE/MyqQTR6Z67akmtmrd7XAgHiSkbvlot6dOnT1QlRLWyN1X25KvwGs/zwgGCxKQgS0lPY47DSnJivA4dMUoGDhnmaV2+fPlkrLEWvqrwlZ789z6YK0+1bOPJw0o4cS5RwRgySpermEhwjGF3adSgvrRvk1gYDdcXcM7z33tbMmfOnOgwQQLnoP59pFaN2xOV1Qy/q2212N2ydatUrlpTiyXrV62+g9oV7t7GgZIjvIarLx7Ca7hrEQsUfd7E61mhx6bwqiSO/ULs8ScIO/g/KQHI3U9Fy3rVCku9qoXdTSfNcjiLV23g0Le/kmmLN0qHB0tKzesvsUJru1dWys3XnC+fb9hlY6mi7OCm5aXU5Xnkx11/SP0Bi0P5Wg+sZke3rCCXnpdVIB7e2fUD+XX/YZndvbpHAISYemv72Xa3+X3usEJrs6EfWcHULQvR8Yl+i4xAe1APYX/vMuLs3FU/Jorx+pOx4K3bf1GidhW9NIcMaXqjZMxwTND1VPjfSqzCq3Lz19Wz3vVyc7HzPdnRlv3HfGs8b9xCLzXCt5uuMOJrrmwZZcXXvwTGeH301itk4vwN7i52WcV03TDvi23ywsTPdTX0e2m+rMaNcnnJY0RwTbCIrhfAE7Fu+xqrYI0LHCS8rjOifJPBS21VL7e4SYoVyKXVJvqNVnjVcqiAomsijMwgARIgARIgARIgARIgARIgARIgARIgARJIgwSOu/AaFCszVu6rViyVHDly2N0gvo599TUZNebVqES02nffJb17dA3Fk4yXmBLv8zqdhNcKVap74vTiwn26bLHkzh086Pvx8pXyaL0G9vrqn/vvvcdctxd0NS6/fovRWCuFW+vnOzyTaLf1X38rd5jYxP4EYXTeu2/LWWcljlMYJHCOHDZYbrvVK/S7dTZs9rTMX7AwlAXXzJ8tXywbvt8o1e64O5SfnAWdUBDUrnBCKY5zsgmv8bwv4/Ws0OtB4VVJRP5VYSetCa/tx6yUj9fvkFEtb5ail+QMCa+gVfrKvPJQpUKSL0dmgSXnoSN/G3FzsRVD4bq42nUXGa8QIh9+vk0mLdggeUyZsa0rGEvZTDJyztc2TwVdpb947XZ5/tVPBQJq23uL22y/8PqPEW7bjF4hn32301pjPl7lClvnx8b6dsisdVqVLBt4l12GhW3DwUvkx51/SN2qVxpL2ovkgLF+HT/vO1m27mdrMdrlkcQTtEIVmYVYhNe3l2+Rl2askYvyZpE2tYvLRXmyyJofdkv3SQkuy8e1rSRXGEtSpFjKor1j3vvGxpl9+u5i1hr1a2M52m/ml1bERn0TnqksBfNnw6LotcPy3TcWkLtuMJal5nrAhfS0xZsEIunEdpWt8A3r1btf+MAK0889dK21XP3FiNqTjGCL6++6oobo3WDgEoF17P0VCkoNI8hDTJ9vhNspC7+39b7dtZp1Te0XXtdv3SuNBi217qsHNylvLafRvnAplvsOZUsWzJ3I0tW1htU+Ee54zCcBEiABEiABEiABEiABEiABEiABEiABEiCB04XAcRdeP5g7X5q2aOXh163L83K+iScZlIyuKogPetZZCe72zjCjyRVuujEknOo+R4/+I59+/rnMm7/QxIVcnciFrJbDb9tWT0vTRk/arHiJKfE+r9NFeN3xy04pV+EWF7/UqF5Vhg5MiEXq2fDfClwTX39jZY+QDmvR5YvnBRVPdt4t1e6QzVu2hPaHC9pB/XqH1v0Lhw8fkYwZj4mml1x8sRQqeJmn2NGjR+Wu+x4O2/9atXhKmjdt5NkHK0ECZ5uWzaVZ44aJymqGX9AuWqSIzH5zmuzevUfK3FhRi9lfuOm9+87wVrD+cytZvLjkzJkjsF2nkvAaz/syXs8KvTAUXpVE5N9YBCCt6VS2eIVb3vmrf5I+01bb01n0Ui3JkP7MkPCaP9c5MrnDLTZPz/eDz3+UHpNXBQqZLxpXuO9/+r+Q5ewPxj3xY30XWvF2YONyWoV0nvCZLFzzk7iWkH7hdevO/VKn1wIrQL5qREy4xNUEy85R735tV1Vk+9BYwEL0dMVcFIB1LaxgIcgihm3WzInd6Wq9KrwWNi6Wz8+Z2FsA3CBfa+L/It39wodWfJ7+fBXbRq3jPXP+cAnsipixlFUXyuOfqSSF8icIt6gb7n6bDPnIHiZIeEVM2l71r5czoYKbhO+Z5yd8KkuM62GIrLcbd89/HPpLvt66T7JnPsu6lLYFzR+4QH6k9wKBVe24NhVttl7nSiUukG6Plbbiupbvbq7/us17pdMjpYx76ZziCq+wnG358se26IjmN3nOQff3/ybnvvPXQeHVT4TrJEACJEACJEACJEACJEACJEACJEACJEACaYHAcRdeV61eI/c99KiH7auvjDRianlPXkpXDhwwMUPNsaZOmykfzPUKdqVLlZJpk8fbQ8RLTIn3eZ0uwmuQ9Wo4S1H3mgfFX137+QrJkiWLWyxFy35XvYhpitimKUljxk2Qnn3Di8qoe66xevULtkHCa7iYpqgDMWRhseumShVulrGjhgsmIVxeNMFiTbc3afikPNP6aV2N+jeoXaeS8BrP+zJezwqFT+FVSUT+TY4AdCoJrzj76v/FW91vYrTCElRT98dLC0Q2JMR0havh2jddJq3u8cZEHzRrrcxc+oP0a3iDtUTV/fELt8QQ3Vzxs56xjt2w7VeZY9wNn2vijaqbYVjGvtH5tpBQ6BdeFxlhtpMRaOuYuLJNahZ1DyM7jZXmPUb4RFLhdejb66yFZ2NT9gYTr9RNEITRhqTc06rw6u7rLquAiRiqd3R6326CCOqmXb8dlLbGUrdYgZxGWL7ZxluNtuzu3w7JXcY9syuAunXf32OetUANEl571C0jFa/J7xaXpeb6PjfuE3mgYkFpfmcxzzZYv+7df8hav0KQfXrEx9aKdU636rbcoDfNdf7oB+nboKyUK5LPs69/RYXXO264VGav2GI3TzKCPVxOR5OSc9/566Xw6ifCdRIgARIgARIgARIgARIgARIgARIgARIggbRA4LgLrwcOHpSrS5bxsI0kxG37abusXfeVLQ9LyLx58kiZ0qXk8OHDsnzFJ556SpYsLudmP2aNohvveeARWfPll7pqf3/4NsEtYrzElHidlzYySHjVuJta5kT8xhrjdcNG4/a2ptftrcYiDdf+oFi6sEZdt2pluF2SlT/s5dEyYPBQz74rli6U8/Lm8eTpyqLFS+XgoUN2FX3xxvI3ePrb1q3/k0pVa2hx+9u0UQOZMGmKFUp1Q6lrS8q0SeM9sYaDBE6Uf2PaZClZ3CuyIL9P/0Ey6pWxWAylls2bSYtmje36Q4/Vk08+/Sy0Ddaws6ZPDoxdjHPCuSHhvDJkyCC3Vq5krMrPPOUtXuN5X8brWaEXJUh41XiyWuZE/Pbs00/GvDrBc2h9Xnoyj9OKCkA4HNwNR5Mg+CCdCjFeg86nrBEpYckJi0lNKrwibmijGkU02/42GbLUWju+3rGKXJj7HM+2HfsOyL3d5gosRse0qmC3vfnxZhlg3OSqu+Elxs1wR+Nm+Mnbr5InqlwZ2t8vvI794Bt59cPvQtaaoYJmAdacN7V+y2ap8Krtcsv5l1sbt8b3mHMNl1R4hQh9Q4DYCGvgdGeeIau+32WFynD1aD7aFktZFa4hjnc0Vqr+1MGIqBDLg4TXCcadcMHzE9wP635qyeq6zsYxRsxeb4VoLae/iNE7t1dNu6o8g66zltdfFV51Hb8zOt0WaDXsltFlve/cduq2aH8pvEZLiuVIgARIgARIgARIgARIgARIgARIgARIgAROJwLHXXgFvJZtO8g7c971cOzfp6dxhXqHJ+/3341bwyee9LhtvfnG8jJ+zEjZt2+flLrhZk/52269RUYOG+TJw0rjp1rK3PkLQvkaCxMZ8RRT4nFe2sgv166Tu+9/WFft72N1HpaunZ715B3vlViF10OHDkuREolj+D3xaB3paOKjpkuXznMK+3791biibu0RDVHghrLXy+TxYzxlU7qy6YfNUuX2Wp5qCl95hUyfPCGRZe1b78yR1u287GdMnSilSpaw+0OwRF91xc4Cl14qH86ZZfrYTOnc7UXPcV7s1kUeuv/eUF444RV99a2ZU+SC/Mespt77YK481bJNaF9dgLUurHaRps14Q57t1NUu6x/Eye3VvatxT3mGZhnr2KPSpXtPmfL69FAeRO5Pli2SzGeffcoLrzipeN2X8XxWoF1B1tHDBw+Q6lWrYPMJSyeb8OqKN7FCORWE19tMPNYWdybctzi/LGdnkPRm0oM/RRJen3v1E1m69md5xQirVxmB1U0bfvpN6vVbZEXcPk+WtZt+NdahNY11aJnCeWVAo3LSZeLnssDECZ1mhNsLHOHWL7zC2hJWl0+Z9j5YsZB7mJDVLDJVeNV21a9e2AiQ3klRiPWaOWN6ucwIk36x2K1YhVe03RWi3TJY1vPEcs961+PHk3C8rIYt6khO2RuKnCcvNbjBUydWVAwNEl6HPXWTlCiYy7PPui17pcngpVIZ7oKNmOy6K0ZeqSvy2Niv6Acthi+z8Xlndalq64C1MayONe6vp2Lfiiu8Qshf+c0vNgbtqKcrSKazvO9d3652lcJrEBXmkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEDSBE6I8PrV+q+lVu0HErWuVs0aUq5sGcmbN69s3LhJRo151RPnEzuMGTlMKldMsNpp0/45mfX2bE89EJduu7WyFClc2Oy7T2a8MUtemzzFUwbH0Vie8RRT4nVeaOxe0/brynmFZeTfc9edllGBApcGWkKiTGqmWIVXtGXI8JEyaOjwRM2C5eft1W6TywoUMNZS/wr4TZk2Q3bs2JGo7MRxr0j5cgmiQaKNKcjwi/KoCoIp4tDC0vT3/ftlwaIlMue9BBeWeigItO+9/YauBgr4E8aOkpvKl7PiZvVatY2IuSlUHuLm/Pdnh6xrwwmvukMJE3P14osuNHGMVwXygeD6prGOPfPMBMHmjz/+kNuMpbGfJQTsSjffJFdeebn89NPP8vqMmSGLcj1Ww/p1pcMzre1qULtOJVfDOIl43ZfxfFagXXPnLzSTQrzunyG031Wrplxd9CrB/XHhBccEd+xzPNLJJrzinCECxZpKFswdtYVsrHXHo/wiE+ez0/hPPXFHI9UbSXidtGCDjJzztbR7oITUKnuppxqNb/pkdWPNetsxa1YVRWEFeV/3uVL8slwy3MT/dJNfeF29abc0H7ZMbrn2QnnhUe+EmvVb90qjQQmW8yq8vmbivo42cV87m7ijt5W6yK066uVohde//v5HKj3zjkeoDHeQWMoe+fuoVH5mtnX5+3bXata6VuvVbVgPEl5b3FVM7q9QUIvb3zeX/SAD3lgrcL/8iHHZ/PKc9TJ5wffWihnWzJp+2v2nPPDiPM/5TFn0vYx4Z720qn2N1L7xMi1qf2FJu33Pn1L4ohySM2vGUIzXBsaKuU7ly6XVyOWCSQwQ+jvXKeXZN2iFwmsQFeaRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQNIETojwimaFE+MiNRkWsS/16hESl1Z9sVrue/ixSLsEbvtwzltyeaGEwdB4iynxOC9tdOlyFRMJz7oN4syAvr109bj9Jkd4Rbzdx59sLLheyUkPP3i/9OjaKTm7JrnP7t17rHtgxEyNJc1+c4YULVLY7rJjxy9SruKtnt1vqVRRXnl5aCgPbrEfqftkaB0L1W6rIiOGDLB5QQInRLg9e/d49gm38ua0KVKieDHP5uUrzTGNFW4sqVDBgjJj6muSPVuCe8ygdp1qwivOPx73ZbyfFUGuqd1r1bvHC4KJJMc7nYzC6/FmcDyOF0/hdc2mPfLUsI8Ebmkntb9F8ppYrUiIT/pE/0Xy6/7DMqhpebnu8mNu1D/66md5duwnUvrKvPLZdzsD3Qf7hdf9B/6S6h0TvFW48WQRI7bR4CWyafvv9rgqvKq1MmLHvtKyguTOnsluP/zXUelsrDdhefqSiVdaKL/XGtYW+u9PtMIrimt7GxpXzI85IiZE65dmfCllzLk++2BJW3MsZXFu67fsM0LpFUYwPebmue/0NfLOii22viDhFddjXJtKIYven/ead+FLC20MV41tCxfDUxZ+b0VxiONIR//5V1D3u59s9Qivai3rrxfX+eHe822905+vIvlznRMSXjUG7l7TBx4zx0ZfaH7X1fJAhUL2WOH+UHgNR4b5JEACJEACJEACJEACJEACJEACJEACJEACJBCZwAkTXuGadcSoMYlibIZr7u3VqsrAl3rZ+JNumcVLl0nzVm09MTTd7e4yRCUIXYUKHrMUibeYEq/zQrsRd7N+42buKYSWTyXhFY2GBWbbDs97XD6HTibCwlNNGkmrFs087nEjFE/Wph82b5EGTZrL5i1bktwflqqjhg82VsfHXFli3wWLFnv2XfjhHLn0kks8eUHWtaOGD5Eqt1QKdOkL6+7eLw30WMp6KvxvZXD/vnJHjepBm2wfatGmXdT3x2vGSjdfvvNCdZ0uwms87st4PysAuU+/gcayf1yIt7tA4dWlcfotx1N4BZ0x738j4+d+Z8XXsoUT7uGV3/5ixbg6xrKyibGwdBOsPmt0es9uR/4HvWpIlkwZ3CIhIXN29+rW/S02zl+9Tbq+9rktB1fFubJlkhXGjS0SRD0kFV6xPMpYvE40lq8QCxGjNVvmDLLsqx2y69eDAhe4cN/reD/HLp4Ui/AKYbPBoCW2HYhpe02BnALL0Y/XJ3hRgEUvLHuRYim7ded+aTBwiWV1xYXnGqvSc2Xt5j2yZcd+K4ziXIKEVwjO2FbJuBCGg3e9HvfedJm0vOca2w4VzbFyY7Hz5YJcmQUxX9G+A4f+9givKDN+3ncy5r1vsGjdFadLd4Y9P5StWfYS6fBAgrCsroZVeEX5b3/8VZ4csBiLMtgI8aUcId5mOn8ovDowuEgCJEACJEACJEACJEACJEACJEACJEACJEACMRCIi/AaZLnlxr+M1B5YQY6fOFneff/DwGKlS5WSNi2bS5nS4V3j7dq92wgYg+SzVavkxx+3JaoHbmErGPeqLZo1lrMzJVjcaKGZs96Wds8+r6v29/v1axLFHsWGIPe/Y0cOl0oVE7sEjsd54ZgffbzcWuv5rUVPlPCKuLslypRD00LphU4d5dE6D4bWIy3ACnP8xCkyf8HCsMUgbsLVL1zeXlbg0rDl4rkBwvBUE4t10tRpgX0IbXqy7uNS7/FHJWvWLKFDf7x8pTxar0FoHQuNGtSX9m1aevKw8j/TNytW8Qqk+fLlk48XzRXEm72txp2efWBZChfCo43L7cHDRni2YaVShZvlqaaNknQ5jeO+NnmqjfsaZNl7kXFh/HSzJnLnHTUS9ftY7+0gi+jRI4bKrZUroskSS33+2Kyui3Bb2X9/Vn+5Vmo/UMfNsm6cg/pOSu7L1HhW/PXXX/LapKkydsLERG6hT5Tw2qf/IBn1ylgPzx++XedZ50rKCSxZu106vvqpBImiQbUjRmfb0SvksSpXSMPbj1ldalnjrV0mL9wgC9dslw3bfrXZBfNns6IfrD/PDFA3B81aKzOX/iBVjfvZTgHuZ5ubGKOwWp1jhNdzs2TUQ8lbyzcbS8+toeNA5HzWCH5Nhia4Gp7bq2ao7D+mYRBeF5rYpGoRi42wHoXr46TijWob+xrL2HJGuE0qbdz+m0yYt0E++U90Rvmil+aQpndcHRJdtY5YyoIDXCfDOhjpXOPOt+29xWXpup9l7uc/yoR2lU0c2wRPAR3GfSLLTD5isb6+eJONy4p9IMTeUvICaWwsct04vhCzB81aFxKucd06PlzKxua11qvGitVNUxdtlHkmJq9eZ7Sl9o0F5PFbrzQeQRJieE9fukmGmDqfN9e1mrm+mtT1NITwqc/eaoVz3eb+qvCKvJKFcrubYloGNyRXjI+pAhYmARIgARIgARIgARIgARIgARIgARIgARIggVOMQFyE13icM4Sv7T8bK5hduyVd+nSS77y8ckH+/IksXJM61oGDB2Xz5q3y66+/Su7cuaxwlyGD14onqTriuT1e5wWBZueuXXLoUIJF0Xl580iWLMcEwHi2+XjUdfDQIfnuu+9l7769xhrzgI2DmiNHDsmbJ7dccfnlRgA883g0I9ExYBm545df5Jedu+SAaRcE10suvlDQttRMSVmWHj16VLZt+0m2/bRdsmfPJheY2J85zj03piYdOXLE3GM/m/PbKf8c/Udy5sxh48Zmzpw5pnpOh8Lxui/jyQITSPbt+9WK3+nTpZPzz88nZ511VjwPwbrSCAG4BEbKaqxLUzP9+sdh640g+znR9VO0C26Gc2bLGCgEx7Ot/xh3vTt/OyjnnpMxSXE3lrIHDv9tLVFh6RugZQeeAlwx/37gSMgFdGAhk7l3/yHLM4cjcocri3zw/Nu8s6ItH6ku/zZ1E+3PT+46hdfkkuN+JEACJEACJEACJEACJEACJEACJEACJEACpxqBk0Z4PdXAnQztHT5ytCwzFpfxSJmMJfCroxNbVcaj7njWAXe9v/2eEEcwpfWWua6UdWOc0nrisX9Swms8jsE6Tj0Cp2t/P/WuBFtMAmmPAKxe45FKFsydIqvZeLSBdZAACZAACZAACZAACZAACZAACZAACZAACZDA8SJA4fV4kU6F4yC2bTgXzck53KngTrRYqbJRxSuN5vzhqnfsqOHRFE31MhReUx3xKXmA07W/n5IXg40mARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggSQIUHhNAtDJvJnCa8quDoXXlPHj3qlPgMJr6jPmEUiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEggXgQovMaL5Amoh8JryqBTeE0ZP+6d+gQovKY+Yx6BBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABOJFgMJrvEiegHq2bv2f7N6zJy5HTpcuvZQoXiwudaVmJeu+Wi9HjhyJyyFy5MghlxW4NC51pbSSX3bukg7Pd/FU81y7NnJ5oYKePK6kLQKna39PW1eRZ0sCJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBWCFB4TStXmudJAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQagQovKYaWlZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQVghQeE0rV5rnSQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkGoEKLymGlpWTAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkFYIUHhNK1ea50kCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBqBCi8phpaVkwCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBWCFB4TStXmudJAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQagQovKYaWlZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQVghQeE0rV5rnSQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkGoEKLymGlpWTAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkFYIUHhNK1ea50kCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBqBCi8phpaVkwCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBWCFB4TStXmudJAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQagQovKYaWlZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQVghQeE0rV5rnSQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkGoEKLymGlpWTAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkFYIUHhNK1ea50kCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBqBCi8phpaVkwCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBWCFB4TStXmudJAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQagQovKYaWlZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQVghQeE0rV5rnSQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkGoEKLymGlpWTAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkFYIUHhNK1ea50kCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBqBCi8phpaVkwCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBWCFB4TStXmudJAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQagQovKYaWlZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQVghQeE0rV5rnSQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkGoEKLymGlpWTAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkFYIUHhNK1ea50kCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBqBCi8phpaVkwCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBWCFB4TStXmudJAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQagQovKYaWlZMAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQVghQeE0rV5rnSQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkGoEKLymGlpWTAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkFYIUHhNK1ea50kCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBqBCi8BqB964PFsnXbz1L/oTslyzmZA0ow61Qg8Psff8r2HbvkvNw5Jce52aJq8s49+yTjWRkke9YsUZWPpdDRo//Izzt3y2+/75cLzz9PsmeL7zH++ecf2f5LQv15zDnnzZUjyeb9+++/smPnHslm2nLO2ZmSLH88Cvzx50H5488Dcl6enHLGGWckecjf9/8p237+RS7Kn0+yZkn6fsU13rV7r+TKca45Ri5zjCQPEVOBQ4ePyE87dspff/0tl16UXzJlPCvi/tpPM6RPb/pFXsmYRHlUtmPXHtOP/pACpv6zTH9l8hII9wz/88BBWbLiC/nhfz9JZtPf6z1Yy7sj11JMYPsvu2TKrA+k7LXF5MYyJVJc38lWwaQ33jf3XHq5/44qnqYF9a3de3+V2fM+kkKXXig3XV/SUz6trETqD+u/2ySr12+QPYbTbRXKylWXFzhlseC5P/WtDyX/ebmlasUbjut5/PPPv/Lj9h2CbwC8B9OnT5fk8fHePHT4sOTJhfdsksVjLoD6ce3x/kOb0qU7M+Y6Iu2QnPdmrN8WkY4fj22xfn8dNn1s84/bJWeO7FF938X6LRLrOcX6TZucb9RYzznWc2B5EiABEiABEiABEiABEiABEiABEkgtAnERXlesWievTJ4VsY0Y4D4VBmGPHPlLGnfoZc+lrmnzTafAwPHBQ4etCAPR6ZzMZ0e8Dtio16tiuVLy2L01bHnN8+8M4Tlv7hxy8/XXSvnSxVM0eDbr/UV2ENp/DP/6y72fteIn8ld+8ZWMnvSmv0hovVqlGxINgK9a+61MfvN9+dUInJogvD5d/0G5+IJ8mhX6/fPgIXuMrzf8IBhIQsJgIfrrg3dWlTPPDB6VxCDSLCPSf7Boud3vpU5PGzEve6hed+HT1etl/PTZZqDzSCg7W9ZzpHm9B6TgJReG8pLDCDtv+OF/MuzV6Vaw1MouvjCfOeeHJEf2rJoV+oVoN2LCDCtA6TnjWte+vbJUuOHaUDn/QiznrPui/g49h8qefb/ZrBfbN5XzzeC0Py1Y9pm8/eGS0DlgoBZsmj5xn2TLco6/uDnnreYcZgoGeDWBf5vGj0g+I6j604/bf5Hh42fITiO6asIxHql9u1QoG/6cURZibcfew+11PtsIdsNfbKdVeH6nvTPPitooAABAAElEQVRPPly8wpN32SUX2OvsF/Mx6Dp26tuy/PO1ofJoDwSdKjdfH8pzFz5cslLefG+hFXU1v8gVBaR53QeiEmx1nxP9i3t0+PjpthmjX+oo6dMlLRT424y+uHN3wkQJd2JFuGf4338flWd7DQv1w0jX0X+s5KyHa1+4uvQZ7D6Xw5U9GfJ/2/+HHDx4WHLlzC6YNKBpyqwPZf5Hn9hJS0O6t9Xs0+a3XutukiFDehnV57nQOYXrW99v/p/0Gjpeil1VSFo1eDhU/nRciLU/aH9XFnXuria33FRGV+P+G6598TrQl19/L4PHTJVy111jRfY+wyckWXXLJx+Sa4pcHirXptsg2ffr76F1/8LovuZZ6Yiqh8336siJb8hX324Mfbtgn6oVy8p9NW813y5esRPvnOmz58vST1abe/eQrR7vnMKFCkijR+6OONFwm3l/DjXfGLvMuxCTKhqa8kHpgKn35dfeEIjqbsLEA3xv4nhI323aKslhlJz3ZqzfFtruz9Z8La9Oe8d+t0X7bxi8z8dMectWUeLqK6WF+cbzp1i/v1B+wOjJRlz/JVQVvlHrP3SXlLqmcCjPXYjlW8TdD8tov36X1LmnutxyY2l/EYn2m1Z3TM43aqznrMfiLwmQAAmQAAmQAAmQAAmQAAmQAAmcDATiIrx+9OkaefX1d+yASs5zg4WnB++8TUqaQYhTIUH8wQDHE/fXjDgQdbKcC4QeDLLAYgSck0p6vSCkwqoXSfMwKKbX8A9jHaaDcyhT9MqCdvA4nBCJMpHSwo8/k7lLPgksgsE0DOghucLrews/lplzFghEykwZMybat3L56+x564bPvvxaXjZiHNJF+c+Ty4xwB4HuZ2MJitSxRT0paKyPNMEC4jkjqMHCEkLM1eYcYQWiIizKYh9/giXHkLHTPCJe3+dbSO6c5/qLCgYsO/cbZfNxHsWLXCFbjNWCDqI916KusYi6yG5PDiMMdLY1A7YQdVF/kcsvk1XrvrHiHES/55+u72kTBnY79hlhy0OwuvKyS2Tfb7/bgVAUxIBt9crlPPtgJZZzdnd+wwiF785fFsrq3q6xXJAvb2gdC7jGuNZIEBIhAn+7cYsVVXFdXjJsYaGoCdv6jnjNrha54jJrZbT2m432ekAU6dmhmUcExwB1+xeH2PqwvVSxq+TPgwdlndkH6d4at8jtt5S3y0F/ug18xVyzn+0mv+ii5XHtYA2HBMvVAhdfIF9+vcEeE4OkA7u29oijcwwTiKhIEGYgDPxv2w673uGpJ+SKyy62y/pHy+MeLXXNVXK2uR8+Nf0d9yiE7B7tmqaK5ZIeP56//UZONPfYZltl48dqS5kSRWOuXu8r9CX0KTcFPcM3bdkmLw4ZZ/tWh6ceN30mj7tL3JcjtS/oYPoMdp/LQeVOlrx+IyfZ52SrhnWkWOGCoWbhWTvFWP5dX7LoKTHZKtTwKBeChNdwfSstCa+x9gfci+BWs8pNUvOWG1Pdcj9c+6K87EkWg5X3/I8+lScfvsu+j0ZODD9hTCf/YOKV+13c4Jke9jjwxhCUMGlJhUtYAvYc+qr9jsA7qUTRK434elTWGREW3hYKF7pU2jV9zFONPndRB74TMmXKKN98vzn0/dOzQ9NE3j7wbfbO3KV2UpRWdm2xwvJU3ft11fM7eOzr8qWxYkbCN2NW8y7/7Mv1Vhi+suAl0r7Z43YbvoGSw0jfg6gkmvdmrN8WqBeT8fBvmi/WfYtVmx6/r2bESWkohO9IiOfgj4Tzb9Oojl3WP7F+f+G7oFPfkbZuvOtgFQ7rZgjXSP4+hLxYv0Wwjyb0h5denqir9t8U+LeFm/TdhrykvmlRJtZv1OScM47DRAIkQAIkQAIkQAIkQAIkQAIkQAInE4G4Cq+nyoDxyXQB4tGWeAqv/mu414h0640l6Gsz5tiBMwy4NXvi/rgLPGol4Bc61XqqU8v6VsiKxMuMDxpr5Z520OseY7lZ89YbQ8Wnz55nLFNXWMHPFWkWL18lr8181wplEFjVzStcRHZ+aaQVJ/1C4UfGWuTVabNt3RAJNxtBDuJXkPAKS6j2xtoTg22P1K4ulcsfsxxQi4TLC1wkzzavG2pruIVwjGBd8tma9eIOhnqPe7s57nWhat80lsdzjPvL0kbsavTIPSGLXrVCPDdbVhnQtVWoPBZiOWd3R4gwEHkxMJwjezYrjPp5wkKxWcc+drfOrRpYwRwrf/39t3TtP9qK5g1NO8tee3Woah1Ed62kYGGIwfzN/9ueSEgdbSzyVxrLfDBq+vi9IUsgWOX0HzXZtm9I92dCltahA5kFuKadYPo/ROwftv6UyNoNZeH697lew+1uL7RtFDoHXIdew161bbq7eiW5w4gMSHA53bH3CLvsWgAvM5NYxpkBXwjP/YwFtboRdvt251ZPWhfG2BlW3TguRPcurRvIJReeb+s8mf9gcLpFp36hJrqD8aHMKBZ08DdIeA3afclKcx2nz7HWzY+bSTWpnWJt3+kivKY21xNdf5DwGq5vUXgNf7Wadexr35vDjPcAd1JN+D1StkXfGf6JAimr9djeeM/hfYd3J96h4RK+Ldr1GJLwzunWNvTNgXdFw3YvSrTfA/B+MO3tuVb4eqFNo1DoAniW6NT3ZftOgOgH8Q8JXhs6vDjUHhfvKPUKgfdv7+Hj7cQiTNxzRTYIZnin4rwg1mLSGMRI91vDPU/9RsH592jfJHRd9/2233q9gCDZ8Wkz+c3x8uHur8vhGMX63kzOtwXCjMASF+9UTEw7N1sW+/6ORniF1xHwwaQpWHgGCa+xfn/p931xYxndwngwUbfQM+bMl/cXLhf/+zPWbxFljl/0wbbdB9nJYuiH32/+MZHw6v22jO6bNtZv1FjP2T0HLpMACZAACZAACZAACZAACZAACZDAyULguAuvGCzCwPu1xvoVFmGaMMMcLlvh1PWuahVNrKp/5e25S6yb1ErlrpOPP/vSWkhBwMGgDwQv172h1gO3prBwXP3VdwKLzWuuutxaMWY555gLXgwcaN3XXVNEVnyxzs4eL128iNxQqpgVWnbv+9VaYUCIc8tfX/JqK+B8YyzucppBmVuNa7y8Jp4m3P3C2gGDNsjHIIkOeGnb8ItyX32LuGbfSRbjFhjlrix4qcd9HAaXMIB1RYGLrSXbp0ZUg+CDwazril9lLRlQF4QmxDJEW2A5Aiu74kWvkDw5jWvgsuHjyQUN8Afl4RiadAAZ6w3q3G056baU/uI8WnYZYPuFKyqhXh2w6d+5ZZJxWlXowGAZyvuTDvTCCgTWIEgjjOvZz9d+I4/dV0Mq3lDK5ukfFetccQ/b4E7wKyPYqYvaZ7oPtu5Lg4RXHYjEten2jNcq729jnTLX3A9wB1gtiZhw4RjhvmluBrCR/IxwDwwdN832G1dIhdCIfgrWroUuxL36bbrZulyrY2TEcs62gv/+dDGWvrDsbfToPfZcIYr6hVdYr8JlMKw4HzfXwU2wtEEfr2xc3T1iXN4hwQIHA9S411956fmQBRC2aT92rX0gTrbuOtCWG9CldaI4sHi2wJVfudLXJBow3//HAWn9wgB7LFxfHTB33YziuNpXENvvgVpVkBVKGPD+3LgszGNi7pYuUcTmQwCEWAP3i3UfuCNUFtcA7nBhEdXQ3GdlzfMI6X8/7bAi9KUXnW+uW4NQeSzoPXJvTWO1W7m8Z9vJuKKCQfVK5ez1wvtgsBEg/PF53ecg7mk8B9FvYZ28aes22WieecvNtYM1MVyU4t1xd/XKdmAaYrk+wxEnePb8j2SjGUSGlRAs4eHe81wTx1ldm+J9s9K8BxD7FZNNLjZlYFmEQW1/wjWCxREsyzaZ5/KF+fLYcu7zfvGKVRHb568T69p3/ZNfgsoiD+8lvOvwLNq99zf7TLvaWJ6Gi+0MQQZthqUx3F5jkgvcVPrfo3imgOtW0+cglBS4OL+UMIJLvrwJ7rvRN9FWTMaAm2+8j2FxXcTwAjP0d2yDuFLCvI/cBEEF7f3me7w/s9p38xWGsdsG931b7rri8snqr6xVXnrjzriwKVu+dImQ+ODWHc0yXI7ieXDHrTfJF+b5uPab762ba0zqgOU8ni0ffbLGHg9C05XmPVHetMH18uAKrxB2IvUtfW8GuRqGtwVYl4EHjotrB4aYdIGEGJbzln5iY5S7YRrwTJq7dKW16HffWfi2gNeA3MZaMpK7eD+naPu+soN1KjxLrP/uB/NOzmrfm7H0B7jk3bjlx5CL/hr/TZAqV+qakAt63Puff/mN/Gj6IDweoA/i+ywolAL6KzxU4LsKFp/wHlHi6itCz/Kk+it4YJIPJubgmYJJVHDTX8Z864W7l/wM4VGhiQlRgWuXlHtt/ebwe1nA/YlviTLGUrzxo7X9h0i0rkJykMcAnSyE8+jauqHdVycHBImm+p3i76e4DpgghGdFM+Pyf+PmbTZEQVAdOEjLLv3tM8EVfLXheM/jO/bKQpckKbyGYxTrezPWbwu0dYH5jp9srJcxSQ5CNMJWgF1Swiv6IK4Jwh3Akww8cgQJr7F+f6EenAe8NFxhvJNowmQ+WNfiOYXvIE2xfovofvidYdxQv2/+HYZ3EP4dg/jUfjFe+0q037TJ+UaN9Zzdc+AyCZAACZAACZAACZAACZAACZAACZwsBI678KqDGnBPhZiYOuA6yrhlwwCrDjpjULJFp5es2IYBAAiLbsLgRg/jdi3jWRlC2Rj8gtCjbtx0AwYmMMNfXUtq3WhDxrPOCrm4veXGMlLnnmrWggyzxtVyQctD+MTA3087dmrV9relid028rWZdoa8u8Ev2MFqoHO/kVbEcctBkIEop+cCgWGksWKEeASBQd2W6T7qClYH+zRff5OyAAsa4A/K0/r0FwIYhDBXLMIge09jEYEYnF3MAB9Yx5owoIvBraBBql7GlR5m3Y/p97zl8IsZ9M9l3Pme47id1ePpYGO4QUEMmCO510X3QYxP1woO4srzfRMsWGC5oX0H+0NUKVa4UMiVbSThVS1t1WITfQmDmfmNWBN0Dqg/KIVjpC4uYWEB97T+1Kh9ggUw+PnjvfnL4r7p0HNY4OBxLOes9epAr7at+6Ax1nLEL7xq+aDfIUY4XmMEEje+Gq7Nrj177fm4wjH2Rztfm/GuZ/BaB0TLmcHEJ82AKMQS3MPZjPgULiavtkUHgO+5vZKxoL5JXNFFy+AX1tHbft4pg7q1sfcCBtF/NVY+lxihNCh+qbrZ9E80QF0Q4183lkwQUSGmIuFe/80IRnC3jeeWm9TKxmXkbj/ZltHH0NdeNG4tl65cbWPi3m/Eav/kA30OYtAfbik1FnELE6sZfPzPeZznmH6drEgGK2B9hp95xplWEPBzwPMcz3gIv72GjQ+5I3fL1brtZjsRyM1z49+5+deZiQMNzQQDXG89R3c7lrV9/nysR/MM1v3gZhTuy9U9u+bjN0gg0MFytxyWMWmovZmIAmEbCe+b/kY8cGNRIx/PdVhbwaWwTuhAvpsQV/KBWrcZ99oJsS79cSAxwQExjf0J9yDef2cbt6dI7vsWYry6ZNf9MOmqbeNHddUKdK9MmSU3GkH20XtvD+UHLWgMTYhbiFHoJrwXlq9aa59Rbj4mMyE+uCb3GQDhGWKTP2nfCie84vkDa37t07o/OHdq+aSNRY7nFOLN+12b455BvHAkNz4yhORh5nmJyWGY6BJNiqXvKztMFsM1RkL/wUQTTPDxp3D9Idz9gzaj7eqFwl8fnntwm+8+83/asctwHJWII5i1Ni6wMXEiqf4KAQsue/HMdhOuReNH7w3F0Iz0raOeE+BFoolxnR4uob2wRsVkkcEm/rF+/6L8ZjPpo/ugsSY+a8LkHbQH9yG+PdTK0a1XJ5IFTfjSuty+g/bD4hXfm72fe8rUiakqCUndJD90V1VPfHG4fMWkIZ2gAjEcseGDvrHQl+EqWfs+BH1862DS2IXnnxd4Dnp89zcSo1jfm269/uWgbwuUwfcCvpV0cp6KvUHPVa0Tk+gQ7gHPA3hOOWrOubd5pwR90+o+/t9w31+YCHTkr7/MJICc9t2m++n19Iv9sX6LaH0qsqNv9u/Syk6MCBJeY/2mTc43aqznrOfAXxIgARIgARIgARIgARIgARIgARI4mQjEVXgtd901RiBJiBnqP0nXYkTFtCombtBDZkY5ZnNjhjMG1vp0bGEFSB181XowIIQBPwzKYJAIg1JYf/rJh2wRiDEvvZwwMxwD9fcZwSLz2WfLoo8/l0XLP7diUi8z2ASxy60bgwwP3lnVWlRgACNH9qxhhVccCIPEENEQt2qqiWMHCyIktB1WChjUgpUKxBPUPaJXB7sdA5zPGcsB/GImfcVypWzcIwxiwArAHUxWwQE7QrTCwOY/5gRRLwaLMSA4tEc7Wz9cuCGeE2Jowirm7moV7UCtWs3Yg/v+BA3wB+X5dgtdJ1g3wT0q0kLDd9Ib79ll18WqzYjiDyxNnjZuRzHA2L1dE+MK2Bt3EQIG4o/eakRxzMTXpAO+bnw0HeDBNgws+pMKr671JFzp4Ri4LhBfrzXiyd+mTbh+sI5T0dBfl7seSXgdOHqK7SMtTT+FZZQ7gQDtrP9QLeNa8GK3ukTLkRipC2BXDHcrUAEIMU/VYs3dDgEH9cNyD6IIOMBaAxMgIqVI54z9/jTW5q1fGGjFch0YjkZ4xQDmUWPJB2tFiOKwIrcTE8zECXUDHa5dGOhFHFc8G1ToRlncN7hX4X4a25YYcVYT7pM7jbimA8uaj199LkGUQoxZDMa6ootbFgI3Eix9YL2LAVhNeB41r/uARzCFq12whntj1xof+6hQ4Bd8tD73F+eDa4GEiSxJCcnuvidiGXGCn+/zsr2mfTo2t3GDsR50z7rPQVjX3Fm1orH+yyG5zTMWk1G2GOvXgaMn27o6IG6gERLw/EZyhVcIARBXln22Rma9v9hYA5aSWsblc7r06axIPu2duUb8XWndkMPyPbtxLbnOCEuwekJSMR3LiB2L//Fsf/TeGtYSDM8JWArheqqADKvESO1DXf4UzTMY++Bd12NwwiQGWEDXNv0a9wjEMPRzpFZmMhCs15C0H2O5do3KNr4xROl3jEcJxC123z2IJQhREO/yasYiGaIJJn3gOaOCCu7R/aZ/jzCTjfA8w/MClppwF4t7NEh4deMG4n2Gvr3X9F3Ef4YVPCzzuhhLbohB7vsZ9yc8LOTOmd1aJEIIQHqmyaPWuhbLKqDgvehafmGbP6l4iLKNHqlt3/s4N0wo0oQYnVeZSU/wJAGREKm36atq/eh/BkTqW0HCK6zrYdWOdx7e2XADj/PGuxRtQd/C+xXPnR6Dx9rvA3fyD7wPqPDpWhYivjS+BYIsIPXc/L+x9H1lhzrurFrBeBMpZMTyTOZ+PDem/gDL3EPm/w6GAe5j9U6R1UzeOmwEppad+1khFd98eL9jcsHMdxfYfqIT89AGPGOfNfHZYaEKkf+2Ctfb/TD5BiI/RMcRPTvIv+a/SP1VRUd8T6Iv41pA3IYrV/S/wWYyjV6fcN86KkY9cf8dEb2NqJWqO/kL54Kkz328yyH+4dmOhPOAi19MGlLX88hXETIoxucKY737inGvj/Sy+QbVd6d+j+CdVK1iOdPXMsia9d+be/xTa1nc+9mnxO95wFby359IwqsKphAbcd2mvvVBSBDHOUBUBx/33wJu3bociVFK3pvJ/baIRnhFrHbEntXJXXAzHI3wmtzvL7BCuAs8L/SYyi/WbxHdT+PY67fTLBOSIkh41T4U7Tct2oi2JvcbVduH33Dn7JbhMgmQAAmQAAmQAAmQAAmQAAmQAAmcLATiKrxGOinXJSgGHtsZgQSDbojdOXTc9IRYhS3qWjdxqMcdfPXHxsSAVAcTNxMz7Iea+GAQU3fsNDEWzSAcBochsKr1KOoa+IoRv77ZaK1AMPjj1u0OaKIskn/Q3i3vihsQ7Z76z82rPwZp02d724FVHbRXi0W/pQAGXp4x8b4waK/uNlVwwAAshJn0RiBAgrDUqH2Ci1X3eBoPCbG54BYsqRQ0wB+U568HA8U4LyS1oASDKWawP5uxTLr/jir+XZJcn2PEyDffWxRoQYGdlSOWEXPqvDy5bOwstXZz47VBJMCgE/oFrFchpGpS96ZY91sh4LwQnw0D6G7CQKwbU8vd5i5HEiF1GwbRUT8GPGHBogOrEACeM/FdXbfbbt1YjsQIln8Qif0xbbUOuLWDNc5T9R6w7r01H796z2geBkcRvxcD6kklPS8VVf3l1d2dazEYjfA6eOzr8uX6DaHqMNCOPh3kYjJU6L+FiTPfs5Ms/BbfOliHQXTcZ5g4AHeUcBcL4QnJPxCOvoT7Etfs2eZPhMRxv+iCfdWVHq6lpquNRfRZGTKY58739jmA6/9Cm4Z2IB+uVOEqOZxQBOELAhjENIiT4RLqeWHAK1Yo00ks4cqeLPkas9ntr9qX/JbQ+hyEq9FBL7T2WIfhfNS1uP96Y5v/GY48taSrXrmcmZhzK7Jsgjj2+/4D1jrOFcHV3WGTx+4NuYhWC3LEZMbzSBMG2tF3LzICcYf/LM8jtU/3c3+jeQajvFomgQsmBLjxMXWSAdzmtm38iK1ehTo3xjA2YHIErGbxHsZ9jPcNrCZhB1ei6JVGbLK72+dp0+d623LuRAEVSNxnMPYIEl61DeqtIaFmsfcG3HfjvuxmJldcaFw8u+9bf59A7EUI3e65gPMbRiS43lgbqmturd//q+Ih3HtDCNDU3lgCQuBTy3bN1+eRihHID3oGROpbvYaOtyI4xHAk9frhCt52g/mj3yoQp2H5+N6Cj63oqM8nuCSGFSw8TOA7yJ1EpJNsYomZGkvfV3Z+q0hteyz9Afvou33cgM5ahe2T327caiapZA49c7FRXa3jOapCrXLE5Kj2zZ4I9VeUx3sPbsObmXjeuBeQwrVPLQRdMR/l8Y4+eOiIERGvsJNuIn3rqEv9cO9D1KdWqHgPDezaOpF3EO1DKIt7u7gJk4F79Ovvf7D3IL4d2psJJupBQSeB4D2B+N76HIAgDWtquNRG8t9D6prebvzvDyYOvmgmZyXlhSOS8Ar30y+bSUdoO8RwnCfcZ2MSyrcbN9tzKGEE2eZ177dCtnt8XY7EKKXvTb2X9VjRflskJby68WhxXXEdohFeU/L9tWrttzJ8/HTbh2CdiucBUqzfIspChXrEsYdVOVI44VXf19F+06bkG1Xbh99w5+yW4TIJkAAJkAAJkAAJkAAJkAAJkAAJnEwE4iq8YuA2yKoOJ4x/zLuuTnVQXWH4RUN38HV0344h8VHL6wCsDpapK0XE73vCiG5uWmws5zDrGvFYHzbuBLVuDAwFxePyD9pHKq8D8WP7d/YM/OlgnlrKqBCFuIaI0+qmYSbOKAQezCBH7EFl44+3hX1USHOtHI6X8Oq6Nlbh1T2PWJdh+QJ30hBKlZO/DgxyI+l11u1qYYABw0EvtNFsa50Ft8VIGKgscFF+G08OApsOCLquGDEJoItxVYjBSvTfSy4837p1w0Av2oU6njVCitt3Qwf7b0EHooIGXXVwGUVdKzRYrY2a9Ia1YHbjsPnrTorRVCO8zjPCKyzZapjYe/6k/QWCKuI5ugnn/NrMd60bW8SKhACDiQtwbwuLt0gp0jnr4CmuTT8Ta1cHiqMRXjGYDkuz7cayHVZ5SJis8JixLkR94ZIOwkM8hnWva/mprnixb00TTxCin6ZwbjtV8LjOxBVsagbvNQWJLuryD2XwTMEguB4fg/UQRyHqIJYhrBOTHkBOcEcZSXhF/xkyNsHyDc+8TiZer3LWtp5svxCNmj3Xx4ptbjzi2fOWWktUfT5ru/U5GM59ZyRh0/8MR50qbPiFVz0efuHi8HfjXhPXDYI9xC3EcoZVIp7REJ9c8cfd178cqX3+sliPVnhVl71BFkQ6OQbPuuFmUhLSMSuxtrZ/2swk/mCSz07jzvtP4xodLCAIIsGTAKyTkcIJWUHC67E2JLbw1gkT6spT37e4l/2xlHUCDbxGYEJWrEnFQzyXEMJAkz4n/XEU1ZIRzx94qUAKegaE61tBFq8aVsEV9LUd6i5YJ2ipFaF694Ar6BcHjxOIn+8vXC5nnHmGFSL1uruTEODq/Sfj/tyfIAzdZTxj+FOkvo+yys6dfObWEUt/wH76bnSFV7c+iEi/Gm8X6H9/Hjhknneve9wuj5yY8P6EpxG4jk4qhWufTszB+7+msYQvaSYdYHJOtEmf5+49F7Svvv/8or+W1VAOEOSb13swNHkQzyBMkICYCWtsfTfjPsG3C55LODasTOEt4suvN9hnLCb24BsGwhw8AeCeHjB6khGUN1vBDu8NuK6HG3f0H7y7uprJQe59oW3T30jCq+v9xIrETY1I/N+kQXxP4fzRnnbGtbm68dV69TcSI+UcfsJS5Pdmcr8tkhJe9d8irrVzNMJrcr+/cL26DRxjkcHtvhtHO9ZvEVSCyZ+tug6wfcD1jBJOeNX7FvtG802bkm9UHAMp0jknlOBfEiABEiABEiABEiABEiABEiABEjj5CMRVeHXdwEVzqv1GTrSDQBg0GtKtrccCQAdf/cKa1qvu4dS14+Q3P7Du0nR70K+6jdW6IY5gENGf/IP2kcqr8OofPFQLCBUU1RrFfyx3Xa1tVHDwxx1FWY2PdiKEV4hzvYdNkHzG6rTns83cpidrWQe1YaXU0LiTDEqwCsAAvApZbhmNcQbh1RXlMMD2polHi4FKTRBvLrs4v0x7Z57HOlQH+rC9QZ27QuLV73/8KRhQ+9nE5dWYa1qX/zeSCKmuO9VNp7uvK2S7sfrcMkkxUuEwSIRBPdrvEE/z/LzhB5MxKPuVcZs9yLixRPJbydhM50+4c4YgqLFxEfvYtZ5V1knVrYeBKD5u2jvWWl2FB93m/qolBPJcS3Ato+40se6/T5EHa2dcZ1gTISYgBkTbdBuITUbUMNYkjuAbJLrAhWHDZ1605V2BxmaYP7C2h3AFt7CdjTtVJO27Q3s8k8iad42x+IXIEOmcJ5nJBQuNC1i0DW5Jo7EItgc+gX80DiKagPtN0+69+6wbUayP7d8pZA0V6TmIspGETf8zHOXDiWMYeB5nRNY167+zkw9Q1k0qvKorUMR3btUwwXrRLedfjtQ+f1msRyu8qkvZcJaHOjAObxDpjXvsJsZLQZCIGdQGeBIYNelNa2EIkcSfkiO8gm+kNqhgo8+wSO/blcaFKiYxpVR49VuFqvCK+IwQjTTB3SwETve+DnoGhOtbQcKrPpODXOtrnEf32wTPin+NBT7CFsA1M8IKYJLPB4tX2GcAhDUIW7AqhnB4T/VKtvnqilbPRX/dvhBt38e+Krz62Wm94YTNICEe+2g/9T+TcZ/h+QZB0Z9c0S0SR/9+WA/XPoiO/UdN8oQBgAgLN9BwqZzUsxUW2PhWULE86NiwnsXx8R3Qv0vL0DPOLQvrVrx7cO1dl8Ioo7HL/W5lUX6wmYADd92a0PbGxkof9/GRI0dC7rdVSMPECUwwQFuQ4N1h3OvvCCYvwqU74i2HS5GEV/f5rt+9bj0qwPknP2mZaBil9L2px4r22wLlIwmva417d3wzueE3sE80wivKaYr2+wvCKmIEY5Jc0PM/Od8i+m+pW2+63kxMrapNCmvxGus3bUq/UZM651CDuUACJEACJEACJEACJEACJEACJEACJxmBEya8YpAJ4g0GvZDgFlFdwmFdB1/dgTbkaxo9aZas/GJdKB4lYrTBYgCiIAYf3YSZ8ojzdZ4ZcMJMe63bHdx0y/sH7SOVj1Z41UFQWHVdaqww3QSBEOIzhBm4oY0kOJxI4RUxrGBpqgPk7jnEugxhs42JAYrBfb/1UbR1IYYWBrjcOHfuvhBt4W4Plh0YPFWxSkVrFQSwjxsHTetQoTlcP9Fy4URIbFdry3DxOlt26W8HW4MG4aNhtHHLj9JzyKtWMIRw6E/aP6O1UNb7yu8S1F9vuHN2Yzn63X6uMQPquN9h0QPrmnoP1Eo0wOw/Dq5f664D7aSMoPiNKmxgP8R7hljpT4gVO2HGHJvtH+RHplqj60Cmii1oI1wlugnCDxIsrGCRCgtWJB0Q9ltmYxsGeWFR4goeGrsR1rGwsnaTuosNZ5n53kLjfnTOAmuhjUHy3DnPdXc/aZf1XojUQNeCJtJzEHVEEjb9z3CUDyeOdR0wWv63bYcVsUsVu0ouviCfZM6cSeaYeKI/Gje2KryqJTeeJ4hnnVSK1L6gfaMVXtVC2BXZtD5MfKjfJsFLAO75M84407i17mGfs667fy3v/sKy8On/4mvCchIuVvOYdyaencPGTbNFkyO8wtIZrrXxrB/Tr1OiGI/67r7DvLfhQjjS+/Z0EF71W8BvXQvAGqe8wMX5zSSSJy1zfSaDPcIy4NtpQNdWos9aWArDPTniYbsTT5AHN6/+lCF9+pC4HG3fRx3HQ3h1xTtMgoHXDwiE6IMQliEqQoBG0vd/0DPXFvD9CSe8ajF4WICwCPfw32/+0Wbj3d+nI+J7/+d3Wws7v28aN9d4VvhDYjhFQq7Po7XOdffFslr8hRNGMYkLz6fsWbPYcAx/mVi5mOzgxs7WZ6LfNTjqV2tSLAdNBkI+UiThVV3uolzQe1a90oSbuKLti8QoJe9NtMtNSX1baNlIwqtOsMR1gZt0TZg0AEEe3/XFzDdECROj1/89pGXdX73Xg76/cC9jkhjcskeaDBjLt4gr1OIb9exMGUPN+WHrT4KJIBCV8c6Dtx58q+h7PNpv2pR8o0Z7zqFGc4EESIAESIAESIAESIAESIAESIAETiICJ0x41ZhvGIzAYCoGKOB+EgNrSDr4iuWg2fMak01jtOqAHWJItTDxLCMlrTucoKYDQBjcxKBfpPIqbPkHmnRARtuuVnc6iB+pfZEEh0jCq99VZ7hjBA3wB+W5+2PgDwPGGDyv/9CdAuvmlKQJM96VJStW2TisiMcalLb/ssta2BY0cacgqvmTuq/U2LgYkEKsqqwm3lXZa6/2FIfgB1EBlgIqHkDYbNm5vy0X5M5ahRYIcEEuqfUA4URIbNc4bEHWiyr8YnIBXGr63RlHwwiD8M2ff8k2BfHFsmfLos2StcbScpCxtHStxnFMxC7FAC1EO43jqDtp/woa+NMy+A13zmqt6ZYNt6wDvJgwAas3uAH3XzdY9ECcBqPRfSEkHasNVqqd+420fdKNwXisRMKSDvy5g/ZuGb3fn/svxnQ01vPY33U5qwPCGovRrV8tvtz4aereslK56+TRe293i4cscF23klpAhSeIuBBtMVHjVEja19FW3H9+KzKIzRDg3Od3pOcg6lFhM8gCX6+pPsNRPkh4xcAungvoX3iOaJxElNd4m/rMxrOvwTM9bNmXez0bcqOJshjA/mLdt2bgOmGgHXmR2oft/pTUM1jLq2VYUIxQPDOf7/OyxwJLxTW/NSfqw3sTLl0hOKP9L782M5HVG8Tcxh16ep6d2FeFLHWRjzwk7e9lrzWeDB5J8GSgrvfhyhTCtpuUs1rIR3rfav8/lS1e1YtB0DNWJ1241md6H8BFOiY+6TMD/RHXBRPWIHrh/5G9n/M8I13O/uVY+j72jVZ4jaY/oL4gi1d9LvqFJTzrITq5z3BMPsEkFLhNRixxN8ECGBOvLjfxX+FmFylcf3X302WwxPHwveCK2brd/VVPDq6bVne7emTARJ3ezzUPe33GTH1bYEHZvN79nvi2qEufDe6kN9xnEMYggOU1dbtpthGCYeHqWuHq9VPPDm55WL3iWxZ9KtJEuEjCK6w2n2zb3VYb5GFDLSthRYz/3RQtI+0feg+4dajnCve9mdxvC7feSMKrWl275YOW9bol9/sLwjr+TYHrrXUFHQd5sXyLHDH1ImZ0NEldo8f6TRvrN6q2JZZz1n34SwIkQAIkQAIkQAIkQAIkQAIkQAInE4ETIryu/OIrGW3coGGW+AttGxs3Z28L4tbBigyz3ZF08BXLmG3dpXXDkNUBXOxNNy5jE8SYjmYg6ww7eNzcuORDci3fMGg8edb7stzUX8sM9lSreEOo7uMpvKpLMgwcwroRx0bC4ELfEQkubdsYq9+Cl1wYs8Xrwo8/E7ifdF2Z2srD/NFBPNc1dFAedoeY+fX3m2WsGRTEICQsUDCwCuZIGLh5d8EyI/hltXEQbWYSfxCzDMIdkt9NsLsrrt3/2bsLeCmq94/jDyYqioIiNnYHIoqiYHcAKgYWoojdXQgWNlIGdqBiYLeAdAhiggn2D+xA/SPyP98DZ52du7t3Z+4uXLif4wt3duLMmffEzp1nzjmnXHKdbzI4PJQP00Nff9Hg14x//rFT3DGgckZrzimfex971vfzG+83NwRoFLQ+rNWemYeiekB2w5zmB3M1+RzKoc98QUhN00MnBQ71QDP6QFpl6nNffxv7zoe+9pGCItFUrJGWCTUQooEOPUjVywnKJ9TkDPmHIEh8vII26o9UfmpKWgGtfCnfNut40DpzpR6u1pxqFZ3iXoxYqUF9l//y3jsEkrQvL3N9lSoorCSj+11NVfVVGK9dodoqF7sAk2qLh8BYrnWGceFFiD132tba7rdbGJ0Jxs0Ofl/srzE65hWUyJX0cFfzdnHXLTUHGc7jUPssfn5r/3e+8Q5vEg0QqDaamuxTCi9naHj0+PdNfRcq2H+9a0508TkvomhaWIeG5RSvOa/x1TWF5gZDc+/xcspcL1IohdrnIeCU7/zTsaTzVyneVHc4rysLvEZfvohei1TjTddlnbfR4yscR9HfKq0/NP8bPU4LlU/LxFO+a3B8PvX7rL5ylU50/Q83df0QK6msOtZ0bO28vesDtc3sPlD7P+uayx043L8ocI3r/zg0Yxr6E9VLTz26nuNrs+nYi/ctHK61Wkd4aUXD3V1z2BNcs9jxmre5Aq+hmVFdU7qc1ynTpHtovln53dp1dv+v4bc/1+9zrsCrrjcvDRzhgkzr24brrqms8qYQfIo3lzs3mxoOTY/rOnKV2x8haKbtuOjaXv76G1pl0IaoJrJ+1/Syha7N0RYeFEyc9Olkv++jx15egMiEpMd+PruQZZLjQcvkCryqyduho9/OBJc1n37L9Nuh4yoaeA3XQzmqBnp4CUWOF1zdw5tEA4C5yqdg4flX3Wr/uh8buYY8tM5zunT3LWaEPHLd64SApV6aivdHrLKrtvc57l5HNSCj+1TT4ik0ua37uAtOPiZznmo/db7hDl+WaG3Q0A+5+oi/5LQOmRdBFDS+8Jqes68HkRcdQuBT+Z974lGZ2o36ne3/3Kv+HMp1zkXLWSjwqvnCtUY1ti88pX2mTNEX9+IvgCQxSvq7mebeIrq9Gi4UeNWxpuMinj5xfTHf445lORx3WCv/e750nSX9bEnvv3SMqSlr/SYpkH7yMW0z96nx9ep7OC+KvRf5dur37kCtmJP+zhoyaryvXdtim8ZWz/3Nsri750lzT5v0HjXpNlcsPWMQQAABBBBAAAEEEEAAAQQQmPcCJQ286sFgg/r1cm7VQfvu4psBVbOb53S9xT8UCs2r6kGymp1VrcTwcCo8fNVDNT1Q1kOE9V0zwf9ztRj0QFspGsjS9/CgXsMK1q7iArsfu6Zo9XBEZet28amV1mDVsvGH9qEsuR5KFVvjVfmGBzjapnXXXN0/jFG/mtpuPTy7/MyO/oFK2I5cAYdQIzE4KV89kLmmxz0a9E3LbeMC2GqyMV/K9YA/jIvuQz1gUYArJAVNzjvpqKyamSGgonlC7eMwf77PPvc/YWPeft92a9nMDjtg93yz+fGhBpC+KGi6SsMV/PaqSUaleEBW/fGpqVglPaRa2gWwRk/4wAfodAxd72pVL+UCDSFFH/wr8LdOo9Xc8TbT9T38md8vWkZNPqq5tXwpXxAyzB9eFNB31U5a2eWl2qiqvSBvPYiM1wJLYqT9dJY7f/RQXseo1iFfHVd6yHrpGcdnPagLgRGVRy8/rLn6KvbzL7+5APtn/lyL1pLRPLlSZduca5kuN9/pmkz8tkL/sXrweoWbpuZe5aHyL+NqLU/8ZLJNc32a6Xy56NT2vpwh32hNEzWLGk+LLbaI329hfKi9rO+6NmzgAjSTXdOMoUnJ9ofs52uShPnzfebq3zHMG/qs1jZsuckGflvecrUIFRxWk4+XumBp9NgLNUe0vMwVfFTT2Urx5jN/cvvnvCu7+/2j6bm2ecN1G/law5pe3VLY99HaSPEy6hqm/aGmU1tuu2Xmep7rOqhl9bugGn/6VPBwQ/f7EB5Kx6/hmj9XjVeNDzWllMfmG65rP/0yu5nI8NsTDbxGg2NqDWGdNVfzTXxqvFK0Rmeh8vmZY//LdQ2OzWInHXOQ76s5BEA0Xedwfdfc9CR3vuicV3Dzchdw0UNyJdXGVS2ocH5tvP7a9uPPv/jvmh6aSFXN1/DykoKvut6r5qCaWw4W0cCrapOplrKSanPv7q7nW7u+e8P1JfoiiF6K6XJTXx8UDr/lPzoz5a10wpFtbJvGm/jhQr+3uQKvve973AWNP/DnW67gl890zv/yBQ/nZuBVRXng8Rds4PCx3lW/OUqqma9jJleNthCs0X7oc62rbb3wwn6ZcEzrS/tD97cdXL+kSVKSYz+fXVhfkuNBy+QKvIagtKbrmNJ1c+LHk+23P/7wNjp2QlPDmicE9DWsponloyZe5RgPROcrX/it1XW7sWsxRTVkdX7pnI723ZnrXic0DR1/oUvlUQrndDSf2VMq/l/3wQqYqpUHtVKx1WYb2V+uj9Zx7uUsndfqe/j8k47OBDP1cpiasNc0XYuabLahTf7qm0xftW323sn23fW/bjfUV6ZerNLvUTgHdY3QNoTrV6ejDvTncMXSzR4Trjv57hFUprO63OLXoTLJ8yd3D6nmm7VPdE04vl3rrPuRJEYqRZLfzTT3FvFtD/ft4XcpPj3X99Dvr661CuhHU7g+alwx91+hiwzNr2v7wnPOfX0PKfwuhO9J70XCctFP1a5X9yKHunt0XdujKek9bdJ71DTbHC0fwwgggAACCCCAAAIIIIAAAghUB4GSBF5VW1U1IgulY9ruZy2aNc409xZvSm7U+Pfs9gee9A+Ebr7ibB9EOu3S630g6cgD9zY1w6bAhJIexu2zS/OcgRI9xHll0Ej/gDeURw/k1JxtqMEXHjDnCqRqmfAwMjTdWmj+fIHX0Lzjda7WWuiDUQ+BHnU1dVWrLRrQ1IPWw1vtkandNsY9RO7jHia33LaJC0LsEzbDf8pBtXdP63CobeGamAtJDxVVq0nBNwVlurpaRflS2F/RB7xhXHwZPYxU37gKfqg8iywy+4FvmE8PzhVUUROb1196euZhf5ge/1RtjPNcU7d6QHqLa9ozGoiKzxu+q2z3P/6837YwTg8mO7iaBOo/K54GuSaMH3ryRf+gL0zTQ0A9nAoPucN4fSq4cOdDA7KOGZVPx03Hdm38Q9Do/PHhYoKQ2gbVutXDx5AU3DilfVsfCAzj9JnGSLUcet7b3z/wDHnpYe9ZHdtlamWG8fpUIPJ210+ygr8hKfC0q6spVyhoH+YtZpvDvOEzNMmYK0CvGg5q4nfomLez9rNeSFAtn3DuhrzU5GvUMowPn9p/8T5hVevm1rsezTr3tM0694ptOrtQ4FXl0fn92pBRoRj+U8eRXpSINmOrCaptpRqGChpF00H77GJ7u+tbNKnsXW7uGx1VYVjrydXPb4UZ5/KIX35zfdxefpNfq4JGISAYL4ZquqnGW+jfstB1MCw75u0P7F5XK1rBBKXQh2j8Gq5pui7c75o432fX7TN982q8apSp6fvPv/hGX33StW5J19+drqlHuWvwju57SLpePPL0Ky5I/EXmGNRv0kmu9mn8BYp85Qt5RT/zXYOj80SbPVWfiS+58n317VQ/i4IpemlBAYJQuyosq1qTarpcARAFapRUq1oP1bfbarMwm78uqGaggj9KOo+OaLO3DRwx1gdqo7WzFWS5y+2vcPzusWMzO2T/3TNNnKsbgY4uyBKSHr6rqdDwUovG67dqd7dcNGBY6Pc2tJaxy/ZbW7s2e/qsQ/AxX/ArrF+f4brV6+rzMzX+NP7mOx62d91LUBeffqxvdULjlEJzttGgS65rQL5jK7wUFW9qPtQyHOXuBVQbUkm/UU3dS1OH7r+bC0xF2lR300I/ouqjXi8/hRT6qdT3aI3tML2yzyTHfj67sI6kx0PoizLeTYMs+w14OfM7EH671Ze5rte9rjovrNJ/PuECRDoXgqPuWXS/cugBe2RaSdGM+cqnFxPUgooC4eE3Rce9Ap/HuPvG0OpArnud0N9yruCU1hm6Q4i/SKNpuZJeOuh+V79M8FTzqCybuz5C9TsYv//StVXXLr3QFE26t9M9d+ww8vfQutcJL1mFZXSvcJzrQkIvYRVKoUngrVyQ96RjDs45q46p3u5eJLxEpJm0Dbqmtmu9V4UyJTVK8rupdSe9t9Ay0RS6fNDfEDquiknadvVBnO+alOT+S7VdFcgtlOItYCS9F8mVt5qqVpPV8ZZRwrxJ7mm1TJJ71DTbHMrFJwIIIIAAAggggAACCCCAAALVRaAkgddybEyuWi96sKa3vRV0qyzpIdsv7gFQPVeLMdQOqWyZuTldTZn+34wZvnzxh6xpy6HArpou1sO5ubnNerClbSjVduTbftV0nvr9T7aGC8aF5mjzzauHc9+4JjenukBvA1cbTLWfKyuflvmfW8fCzi80/5gv/zTj5aQgyS8uqKHaPMUEnZOsRw+Qv3RBIeW/Qn23zTlqgsbz0zLfuf7zlnHN+xZzXsWXL8d31e5UkEY1g+P93pZifar5o23WMaGXL0qddO2Z8rUehNeytdyD7PjD8vj6fnTXtW9dM8+6tq3m+r8s9XERX9+C+P2ff2baTHd+5QvqFrPNajJSgSzVfF90kUUqXUTXi+9/+tnq1XW/MbEXUuILl6J88TzDd13z9VJSsceyarct4YLK8RcBQn76VH4K0NZbtm5W8Co6TxjWQ379linwW9k1NiyjY17HeQhqhfFpP+Vb2T5Im3e5l5O1ArHxYHm51xvNP+mxH102PpzmeIjnoe+636u1UC0fkM41PT5Ojvo9U6C2UCpUPv32aGfoGpArxe91FFxTkE39pav2YqmSfkMmf/Wtv4/TiyiV/Q7qfvlz93LOIi7AuepKKxZ1LOkFL1071Ox/Zfmn2S69vPHlN9+5sizlXi5r6K4NaXLJv0ya381y31vkL23uKeW+/0p6L5K7lPnHJr2nTXOPmn/tTEEAAQQQQAABBBBAAAEEEECgegvMV4HX6k1J6RBAAAEEEEAAAQQWdAG96NbxvKt80PK2ay8qeWBxQfdj+xBAAAEEEEAAAQQQQAABBBBAAIEFWYDA64K8d9k2BBBAAAEEEEAAgZIKTHE1UtVn6kbrrWnndDqypHmTGQIIIIAAAggggAACCCCAAAIIIIDA/C1QbQOvapLqtSGjfZOy27q+4kgIIIAAAggggAACCMxrAfWPPu69Sa7f+FVz9h0/r8vH+hFAAAEEEEAAAQQQQAABBBBAAAEE5p1AtQ28zjsS1owAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggkEyDwmsyLuRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEKAgReK5AwAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEgmQOA1mRdzI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAhUECLxWIGEEAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkEyAwGsyL+ZGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEKggQeK1AwggEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgmQCB12RezI0AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghUECDwWoGEEQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEAyAQKvybyYGwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKggQOC1AgkjEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgWQCBF6TeTE3AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUEGAwGsFEkYggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACyQQIvCbzYm4EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgggCB1wokjEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSSCRB4TebF3AgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEAFAQKvFUgYgQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQTIPCazIu5EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQoCBF4rkDACAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCZA4DWZF3MjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACFQQIvFYgYQQCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCQTIDAazIv5kYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQqCBB4rUDCCAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCZAIHXZF7MjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCFQQIPBagYQRCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQDIBAq/JvJgbAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqCBA4LUCCSMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBZAIEXpN5MTcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQQYDAawUSRiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBAi8JvNibgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKCCAIHXCiSMQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBJIJEHhN5sXcCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQAUBAq8VSBiBAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIJBMg8JrMi7kRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBCgIEXiuQMAIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIJkDgNZkXcyOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIVBAi8ViCpmSOmT59utRZayJaoXXuuAPz4409Wt25dW3jhhebK+lgJAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAuUUIPA6R/fTzz63mf/OrGC9RO0lrMEKy9viiy9eYdqCMmLCO+9a67aH21JLLWX9H77fNlh/vbJu2gMPPWKXd73KVlttVXvluQELtG1ZIckcAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECg2gjMk8DrqNFjbJutm1YbBBVkrQ02LViehg0b2mFtD7KDD2xtDVdsUHDe6aWz+gAAQABJREFU+W1i79v72g03d/fF7nzJRXbUEYdVeROmTPnCZvzzjwtar2DLLLN0Vn5HHdvRhg4f4cc9+2R/23ijDbKm8wUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB+U1grgdeDz/qWBvpAq/97r+7WgVfKwu8hh2rWqHdb+xmO+/YMoya7z+//uYbO+eCS2zRRRax66+9ylZssEKVt2nTJs3sjz/+sK6dL7V2h7bNym/Y8JF2VbfrbbNNN7Fruna2WrVqZU3nCwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALzm8BcDbyqputhLvAaUnUKvobA6w0u8Nim1f6+iP/++6998tlnNm7cBB8sfua550PR7YG777Tm2zXLfGcgW6BQ4DV7zgXz28+//22PDPrU1lxpadujyWp+I7/6/g+766UPreVmK9uO7h8JAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgwRGYq4FXsXXv2cf9650RrC7B11yB10wh5ww82O9Ru+yKK/23+vXq28BXnrM6derEZ/PfZ82aZT///LMtscSSVrt2cf3Dapnvf/jB6i23nC288MI5842PnP7nn/bn9D+tfv168UkFv//kyqa03LLLFpwvPnHmzH/thx9/sGXr1rXFFlssPjnzvRSBV63rl19+sXr1lsvkW+zAL7/+6su3RO3axS7i5+v5zHv2yMBPCi4zoPOetnzdwvlOmfqbtbvmdWu5+cp21TFb+/zufWWS9X3xQ1utQR3rd+GumXVM/flP17/wLFup3pKZcQwggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjMXwJzPfAqnuoYfC0m8Bov+43drrbWB+yXtcenTptmPfvcYQ8+/EhmfIvtm9sO7t9hbQ+0JZesGFx7a/zb1qPXbaZPNc+rtPFGG9mxRx9RIX9Nmzlzpt159312/0P97LvvvtMoUyC4cePNrcMxR9o2Tbfy48L/tttxN5vxfzOs+03d7MMPJ9mjjz9pn3z6qV9mzPBBNnnKFDv4sKP97C8+84Qtv3x9P3z2+RfZm0OG2zFHHW4H7LevXXfjLfbcCy+GbK1pkyZ23tmnW5MtG2fG7b5PK/vpp599cDaMVNmU+j1wt62z9lo24Jnn7Kprb7BGjVa3/g/fH2bzn/+4fmEf6f+EDR8xyoYMG+49VlttVZPh7rvubDs03y5r/g8nTrKjjj3Bj3v9pWftrnvvtxdfftVvn0Zq2TNPPdla7b9v1nL5vtz85Dv2xJDPbOX6S9lyS+cOLHfr0MyWrVM4mJ4r8Pq1q/F698sTbYdNV8qq8br7hc/Z9L/+scE3HmALL0Szy/n2DeMRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeosME8CrwIJfb0GnHld87XYwKtqYa678ea+2Du1bGF33d4rbIJNnz7dDjrsSJs46aPMuOjALjvtaLf1vCWrNuvjTz1t5114SWY29SEbgq8a2e6wQ6zr5f9NV63YCy/tbI+54Gm+FO9XNWyb1v/6wEGZxRQQVeD1o48/sT33a+3HDx/8ujVcsYEfbt/xJBv85hDbZ689bOSosVnB1EwmbuDFZ5609ddb148KNV2j08PwC08/YRusv5499Mhjdmnnrj4oOvjV/wK52rbzLrrMnnhqQFikwmefHrfYHrvtkhn/9oR3rc0hh/vvB7ZulXfZzpdcZEcdcVhmuXwDIfDa7bhm1nzjhvlmq3R8rsBrvoUIvOaTYTwCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggMP8IzLPAq4iqU/A1BCejfbzm243qp1b91YbApeZTLdROp5yRCWxed82Vvnbm33//bS+98qpde/1NPrtjjznKLrngXD+sIG6zHXb2AU0FRbtd1cU3q/u/qao1e7s95Jo2Vhr48vO2xhqr++FbXc3YW3rMDvYecfih1vbA1j6AOXLUGOt7z3321rjxfr5BLqC5uqvtqRS2TcObbbqJndjxONtoww1sIVe7cpWVV6408KrllC4+/1zbdecdbcmllrRhw0fapa7ZZQWJGzZsaM8P6O+bLf7zr79slusbd5Mtt8ksc9ghB/nhJZZYwmrVqpU38HpT955+uzWznFrvv5+tsfqqNn7CO9az9x025q23fD79+z1gTRpv4YejgVeNOOH4DnbIQa39dk145107+4KL7csvvzIFtEcNHWhLujIUSmkCrz/+9reN+Wiqvfv5j1Zv6cVtJ9e8sGzjTQ2rSeHXx39t661a15qsu4Kb/wd7b/JP1ss1b6zUad+NfY3XXbdc1VaopCnjQtvANAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgbkvME8Dr9rc6hJ8DcHJYgKv3VyTu7ffeZffWx+/P8HVYF3IRo0Za4cd2d6Pu6N3Dx+g9F/m/K/37XfaDTff6r+9N360DwC+98GHtn+btn7cM088aptsvFFmkRkzZtjd9z1oCtzutstOtuEG65uCmhtv0dTPs/eee1iPm6/3gcyw0LTvv7ed99jXB0M7dexg5511hp8Utk21TdW0r4KQ0VRZjVfNe9F559hxx85ujjgsq6aAj+4wu5nfLq5W7hGudm5IoeZrvPatpueq8RrdNjXfrGaco+mnn36yfVq39U0r77rLznZHr+5+cjTweujBB9nVXS+PLmaDhwyz9sd38uOefbK/a8J5g6zp8S9JA68fTPnJOt4yOJ6NHbHLevbg6x9l9fE67pPv7bReQ61ty7XttFab2l0vTbR7XNPD8dT71B1ss7VmN88cn8Z3BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB6ilA4HXOfgnByWICrw8/2t8uubyLX/KdsSOsTp06rm/RB1y/pde5PkzXtleer9hU7i+//mqNt27ul3ni0Yes8eabmWq2bttiZz+u7UFt7LKLzs/ZB+ycIlo0yDh04Cu28korhUmZzy9c7U4FKZdddllXW3Q1Pz5s27kuEHuiC8jGU2WBV9XsHT1sYFaQN+RxzHGd7M2hw1z/tQfbVV0uC6MtaeA1um0j3nzDVmywQiavMBDcozWNo8s9cPed1ny7ZmF2/zn9zz9tk8Zb++Hbe93qg9hZM8S+hMDrNR22seYb5Whq2HXBupCrtav0w69/2dE3DLSfXY3XQ3dax/bZenWbMXOWPTNisg0Y9rmfp6Wr/XrVMbPXHw+8/vV/M2363zPs0Ktf8328PtV5T1fj1azuUovT16vX438IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPwjME8Dr9Wltqt2VwhOFhN47XrNdXbPfQ/4vfzph+/4gOQZ51xgzzz3vB+nmpe50iP9H/ejo7VDQz+qYX7VZN2m6VY+gLjWmo3CaP/5oGt6+DLXvK9qrL771sisaYW+hG3re1tP23nHlhVmrSzwGu/LNppBqP278UYb2bNPzm4aWdOTBl5DLdhC2zb2rXHWtt3sWrehL9po4FVNCa+w/PLR4vnhsP29ut9ke+2xW4Xp0REh8BodFx1uvM7y1uPk7f2oZ0dOsW6Pjredt1jFuhw9uyayJvz7r+urtu9IG/nh/wrWeA350sdrkOATAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEJh/BeZZ4LU6BV21+0JwrpjAa5tDjnC1Tyf4vk2HD3rV7/3d92lln3z6aVFHQscO7e2Cc8/y86qJ3etuuNkef+pp30RwNIMmWza2i84/x9eO1fgQ8FU/rQP694vOWnA4bNtdt/WynXZsUWHeygKv7Y8+0i698LwKy2lEv8cet4svu8JP+2ziu5l5kgZei9m2b7/9zprvNDtw+vD9d1uzrZtm1QIeP3qY1V1mmUwZwkDY/iSB17VXXsYaLFuxP9i1V6rr+mKd3ST0jU9MsKeGfm7XHd/MtovVjn1t/FfW+f6xBF7DTuATAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEFjABeZJ4LV7zz7WvWfvDG0/F0TbxgXR5mUKwbnKAq/qc3XDzbfyRW21/75203XX+OGOJ59ur73+hg/Gdr+hW85NUb+tiy66qJtnRVtt1VWy5lEAduTI0Tba1ep84aWX7UvXZHBILzz9hKl/1qrWeE0beI32qRrKFD5v6t7Teva5vUITy0kDrw889Ihd3vWqgrV5x094xw48pJ1f9ZA3XrZVVl65bIHXbsc1s+Yb52hqOGy4+zzx1jft3c9/tEcu3s1WXT7Wb+7Xv9ixrhniQk0Nh6yo8Rok+EQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE5l+BuR54rY5BV+2+YgKv//77r5113kWZJoXv7NPDdtlpR7/3e99+p91w860Wb3LXT0z4P63ntTcGWadTTvdLnnHqyXbayZ0sGngcNvBVW2mlioFB9Wn6twvi1l5iCVuidm2/fNi2tIHXaJ+q8U05/sRT7fWBg+zA1q3s+mu6ZiYnDby+Nf5tO/iwI/3yY4YNtvr162XyCgP9n3jKzr/4sqzgbLSp4VLWeC0m8HrB3aNs6Lvf2p1ntrQNV18uFNN/jv14mp3RexiB1ywVviCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACC67AXA28Vtegq3ZvCE4WqvEamsPV/AqwPvXYQ7bIIovoqw16c6gd2/FEP9zvgXt8P63+y5z/qUbrHX3vcf1//uv7GVUN1pGjx/gam3XqLGWHH3KwLbTQQtFFLDRffPihbe3Kzpea8th4i9k1g1sfsJ/d2O3qrPl//fU323P/Nvbdd99Zh2OOtosvOMdPD9uWNvCqTLpd1cUOPrB11vqiwdLOl1xkRx1xWGZ6CLwee8xRdskF52bGayD057raaqva4Fdf9NP++OMP3y+svhxzZDu77OIL/Pjwv99//90OOOgw+3zyZIv2OTsvA6/3vjLJ+r74oZ3bdgs7YNtGoaj+86E3PrY+z76fKPA68Ib9bdGFs4+BrEz5ggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUG0F5mrgVQqhb9fq0LxwdK+E4ORZp5/qA6OaNvPfmfbRR5/Y2LfG25Bhw33QLyzz8nMDbN111g5fTTVND2l3jL3/wQe+RqaCottus7UpqPrZ55Ot69Xd7M2hw/z8I958w1ZssIK9MWiwHdfpFD/upBOOtxM7dvDLasRTTz9rZ59/kZ8WDZh2c/3B3t737swyh7Y9yFZq2NAmTvrIuvfq45s71sTXXnzW1lqzkQYzQeVoPn7CnP9V1sdrmFfNKquP2MUWW8yZjLOzzr3IfvjxB1/mV194xhqu2CDMansfcKAvU0NXth43X29bbLapLbzwwn56rsCrJnTueo3d/9DDfp5zzjzdWrumnBs0aGAfTppk3Xv09jVrNfG+u263HZpv5+ebl4HX0ZOm2lm3Dbclay9iD56/S6ZP2ClTf7N217zuy1dMU8Otr3jZpv38Z86asz4T/ocAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFDtBeZ64FUio1xNz3ndp2t8z4TAa3x8/PuajRrZHb1vtbXXWjM+yaZOm2atDj7c1zitMHHOiOuuudIOan2A/zZz5kw78NAj7J1338vMvs7aa9u3rsaqaoAqbdtsG7vnjt4+2KnvqjF7+tnn2fMvvqyvOdPpp5xkp58yu/atZgjbljbw2rLFDj7QGsoUX2n/fg9Yk8ZbZI2+7qZb7LY77soa9+IzT9r6662bs8arZpTHyaefba+8NjtombXwnC/dru5qB7dplZlUrsBro4ZLW72lZzfVnFnZnIGuRze1ukst5r/d8PgEGzDscx98bbbBijZz1iwbPOEbW2HZJXwwtZjAa2iyWAHcHTZZydrvsUGFPmPjZeA7AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA9RKYJ4HX6kUwuzQhOJmrbAqGrrfuOnbAfvvYji22t0UXXTTXbH7clClf2H0PPmyDhwzLqiHbtEkTO/P0k63Z1rObCg4ZzJgxw3rddqc91O8xX3s0jFczvIcc1MZOOO7YTE3RMO3vv/+2m2/t5QOY0WComj9Wrdm999w9zOo/w7bdfUcfX/6sie7Lx598anvsOzuYGWrjap72HU+ywW8O8WXYf9+9rYurtaugeUgq42UXnZ/p5zaM16dqAPe9+z574KFHMtv1/IDHbcMN1rd+jz1uF192hSmI/fpLz0YX880p33nXvb6G8VvjxvtpSy21lDXftpnts9cett8+e2XNr6B1q4NnN3E8YcwIW3rpOlnT9SU0e9z71pttz913rTA9OuKWp96xx9/8LDqqwvCAznva8nVnB2VnzPzXbnvufXvj7W98oFUz77LlqnbQDmvZid3ftJ22WMUUqFUa/+n3dmrPoXbIjmvbqQds6sfpf19O+92u7jfO3v38Rz+u96k72GZr1c9MZwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKD6CxB4LeM+Ug3YP6f/aSuuuKLVrr14pWv65ddfbdq0732TvXXqVAwgxjP4559/7H9Tp9rff/+frbLySrb44pWvI55Hoe/RwOv555zpZ/3tt9/t62++seWWW84arLC81apVq1AWvobuny4Iq6QAapI0ffp077HqqqtUCD4nyWduzTvVNRe8zJKLWe3FZjepnHS9M/+dZTP++Tf18knXx/wIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKlEyDwWjrLBS6nXIHXBW4j2SAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEESiBA4LUEiAtqFgReF9Q9y3YhgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiUWoDAa6lFF6D8CLwuQDuTTUEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECirAIHXsvLO35l/8eVXpj5dl1++vq3YYIX5e2MoPQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJlFCDwWkZcskYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZohQOC1ZuxnthIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMooQOC1jLhkjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACNUOAwGvN2M9sJQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIlFGAwGsZcckaAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRqhgCB15qxn9lKBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAoowCB1zLikjUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNQMAQKvNWM/s5UIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFBGAQKvZcQlawQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqBkCBF5rxn5mKxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoIwCBF7LiEvWCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQMwQIvNaM/cxWIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAGQUIvJYRl6wRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBmCBB4rRn7ma1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEyChB4LSMuWSOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQM0QIPBaM/YzW4kAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmUUIPBaRlyyRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBmiFA4LVm7Ge2EgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEyihA4LWMuGSNAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAI1Q4DAa83Yz2wlAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiUUYDAaxlxyRoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBGqGAIHXmrGf2UoEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECijAIHXMuKSNQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1AwBAq81Yz+zlQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUEYBAq9lxCVrBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoGQIEXmvGfmYrEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgjAIEXsuIS9YIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFAzBAi81oz9zFYigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEAZBQi8lhGXrBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoGYIEHitGfuZrUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTIKEHgtIy5ZI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAzRAg8Foz9jNbiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACZRQg8FpGXLJGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGaIUDgtWbsZ7YSAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTKKEDgtYy4ZI0AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAjVDgMBrzdjPbCUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCJRRgMBrGXHJGgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEaoYAgdeasZ/ZSgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQKKMAgdcy4pI1AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUDAECrzVjP7OVCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQRgECr2XEJWsEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKgZAgRea8Z+ZisRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKCMAgRey4hL1ggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUDMECLzWjP3MViKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQBkFCLyWEZesEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgZggQeK0Z+5mtRAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBMgoQeC0jLlkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDNECDwWjP2M1uJAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJlFCDwWkZcskYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZohQOC1ZuxnthIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMooQOC1jLhkjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACNUOAwGvN2M9sJQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIlFGgpIHXH3+dbt///Lv9+fcMm1XGQpM1AggggAACCCCAwLwRqOVWu8Tii9ryy9axesssWZJC/PrHX/bdD79yD1kSTTKZVwLlODf4+2pe7U3WW6wAx32xUsyHwNwVKMe5yf3a3N2HrC2ZQDmO+WQlYG4EEEAAAQT+EyhJ4PX/ZvxjX039xRZeuJYtX7eOLbXEYv+tgSEEEEAAAQQQQACBBUrgjz//z77/5XebOXOWrdqgri226CKpto97yFRsLFSNBTg3qvHOoWhlE+C4LxstGSNQJQHOzSrxsfB8KFCqY34+3HSKjAACCCBQzQRKEnid+tPvpjeLVliuTjXbPIqDAAIIIIAAAgggUC6Bae4e8Lfpf9taq9RPtQruIVOxsdB8IMC5MR/sJIpYcgGO+5KTkiECJRHg3CwJI5nMRwJVPebno02lqAgggAAC1VSgyoHXv11tVzUNt0bDetV0EykWAggggAACCCCAQLkEpnz3oy29ZO3EzQ6rCdXfpv/FPWS5dgz5znOBtOcGf1/N811HAaogwHFfBTwWRaCMAmnPTe7XyrhTyLqsAmmP+bIWiswRQAABBGqMQJUDrx99MdVWWWFZmheuMYcMG4oAAggggAACCPwnoCa9vp72s623eoP/RhYxxD1kEUjMMl8LcG7M17uPwqcU4LhPCcdiCJRZgHOzzMBkX+0E0h7z1W5DKBACCCCAwHwpUOXA64SPv7bN111lvtx4Co0AAggggAACCCBQdYE094Nplql6SckBgbkrkOY4T7PM3N0q1oZAYYE0x3CaZQqXgqkIIBAXSHOepVkmvl6+IzCvBDh+55U860UAAQQQqHLg9W0XeN2CwCtHEgIIIIAAAgggUGMF0twPplmmxgKz4fOtQJrjPM0y8y0QBV8gBdIcw2mWWSDx2CgEyiiQ5jxLs0wZN4GsEUgkwPGbiIuZEUAAAQRKKEDgtYSYZIUAAggggAACCNREgTQPNdIsUxNt2eb5WyDNcZ5mmflbidIvaAJpjuE0yyxobmwPAuUWSHOepVmm3NtB/ggUK8DxW6wU8yGAAAIIlFqAwGupRckPAQQQQAABBBCoYQJpHmqkWaaGsbK5C4BAmuM8zTILABWbsAAJpDmG0yyzAJGxKQjMFYE051maZebKxrASBIoQ4PgtAolZEEAAAQTKIkDgtSysZIoAAggggAACCNQcgTQPNdIsU3NE2dIFRSDNcZ5mmQXFi+1YMATSHMNpllkwtNgKBOaeQJrzLM0yc2+LWBMChQU4fgv7MBUBBBBAoHwCBF7LZ0vOCCCAAAIIIIBA2QQmfTrF5/30y4P958RPJvvPDdZpZOuvvYYfPmCPlv6z3P9L81AjzTLl3g7yR6DUAmmO8zTLlLrc5IdAVQTSHMNplqlKGVkWgZookOY8S7NMTbRlm6unAMdv9dwvlAoBBBCoCQIEXmvCXmYbEUAAAQQQQGCBEVDAVcHWEGitbMMUfC13ADbNQ400y1S2rUxHoLoJpDnO0yxT3bab8tRsgTTHcJplarYyW49AcoE051maZZKXjCUQKI8Ax295XMkVAQQQQKByAQKvlRsxBwIIIIAAAgggUC0EFHTt1uu+TFlUu1UpGlgNAdlQE9bPMGee6HxhfCk+0zzUSLNMKcpKHgjMTYE0x3maZebmNrEuBCoTSHMMp1mmsnIwHQEEsgXSnGdplsleK98QmHcCHL/zzp41I4AAAjVdYL4LvIZm9cJDxfDAMTSpV9N3KNuPAAIIIIAAAgumgAKpIZiq+x8FUSu7/4kuIxUtU47ga5qHGmmWWTD3LFu1IAukOc7TLLMgG7Jt859AmmM4zTLznwwlRmDeCqQ5z9IsU9lW6rleeKZX2bzFTC/HvW0x62We6i9QjuO3+m81JUQAAQQQqA4C803gtbJm9UJ/ZtxwVYfDijIggAACCCCAQCkFogFU3fOcd9JRibK/rvf9mQdc5598dKUB20SZu5nTPNRIs0zScjE/AvNaIM1xnmaZeb2drB+BqECaYzjNMtF1MowAApULpDnP0ixTqCTRe9pC8yWZVo572yTrZ97qK1Dq47f6biklQwABBBCobgLVPvCaL+AaarrmekuOm67qdphRHgQQQAABBBCoisCxZ3Xxi6cJuob1ljP4muahRpplwrbwicD8IpDmOE+zzPziQTlrhkCaYzjNMoU0u/fsY9179i40S95pp59ykp1+yol5pzMBgflVIM15lmaZQj7RwGt4rqf5o8/2ouOj0/KNL1eLLoW2g2nzh0Cpj9/5Y6spJQIIIIBAdRCo1oHX6A2ZsHQzpRuteLN6oZkSzR9STbvx+u2332zixInWsGFDW2211QIDnwgkEvjrr7/s448/tqWXXtoaNWqUaFlmrt4CU6ZMsV9//dXWWWcdW2KJJRIX9vPPP7fff//d1ltvPVt88cUTL88CCCCQXiB6P3T3TZelz8gtGYKvup9KWmu20IrTPNRIs0yhMiSdNmz4SHuk/+N2cqeOtsH66yVdnPnLJPDjjz9Z33vvs/Xc71Wr/fct01rmXrZpjvM0yyTZoqnTptmXX35lq666qq3YYIUkizJvAQGuKf/hpDmG0yzz3xorDlWHwOu///7rz7Uff/7Z1llrLfc3Vp2KBZ0Pxvz999/29Tff2o8//miN1ljDll++/nxQ6tIVsWefO0z78rSTO5Uu03mUU5rzLM0yhTYv3NfG70XzjVde4f41/pwv3/hC6w/TZs7817797jv73//+Z8vXr2+rr76a1apVK0xO/fnkgGfsk88+s44d2tuydeumzocFSyNQ6uO3NKUiFwQQQACBmiBQbQOvCqZ263Wf3we6IdMNVjzgGt9B8dqx82vN1z/++MNmzZpldeoU/4fZBx98YFOnTrWFF17YdthhhzgN3+ehgPblRx99ZN99+63NcuXYeuutbckll5yHJcq/6l9++cXGjx/vy6dykhYcgTFjxpiuLZtvvrktt9xyiTds1KhR9ueff1qTJk18YD5xBgkXePfdd+2HH36wtdde279QMmzYsArXtxkzZpgeBC222GL+X8JVlHz2b775xp/r8pXz8OHD7f/+7/+sadOmttRSS5V8fWRYcwRCbdf4w6a4QJivUHA2en9VyvukNA810iwT3+bwfeKkj2zvAw4MXwt+nnLiCXbW6afYYUcda6NGj7Gj2h1unS+9sOAyTPxPYPr06faDC47qYWI5ggiffva57bb3/rbn7rtZ71tv+m/F8+lQmuM8zTLF8Nz34MPWs/cdbv/9kJm9fr36PphwZLtDM+M0UKr9/NNPP9nvf0z3AV79Xs+LdPDhR9lb48Zb186XWrtD21a5CPm2iWvKf7RpjuE0y/y3xtxDusanTdts3TTtovane5n1yquvs2eef8Hff4eM1nQvtl7TtbNt3bRJGFXyz3zHZ5oVKTDVs8/t1vee+7K2Qy97tzv0YDux43G20EILpcm6ZMvMnDnTvvn2O6tde3FbYfnlS5ZvNKNNmzTz2//ZxHejo+fL4TTnWZplCuHkC7DmG6+88gVY840vtH5Ne+6Fl+z6m7v7FyPCvPp7Tfcel154ni2zzNJhdOLP40881V4fOMgGvvy8rbHG6omXZ4HSCpT6+C1t6cgNAQQQQGBBFqi2gdfw8DD+FlwxOyPcfKVZtpj8yz3P4EGDfICuZcuWRb9xpxqvkyZNshVXXJEar+XeQQny13555513TAGikAi8Bgk+56bA/BZ4fe+99+z777/3NXRXWmklGzJkiH+w06JFiwzbZ+5N4i+++MIaNGhgG220UWb8vBr41r1coetwCLwqWKxzvzqf8/PKivUWLxAeQmmJQgFVTde8CqxWVpM13CctSIHXzz6fbOdccIkYMuntCRP8sGqz1q79X03/1q4WpYJMqp3W/8kB1un4Y6nxmlGrfOCxx5+0Cy653M4+41RfW7jyJZLNQeA1XZ/JlSlfesWV9lC/R/1serC8hqvZ8/nkKfbKa6/7cUccfqh1ueziTDal2s9nnXehDXjmOXv0wfus6VZbZvKfWwOffPqZ7b7PAX51G7t7hWefnG1QlfXn2yauKf+ppnnQnWaZ/9ZYfYamuxcVD2l3jL3vXozWiw0tW2xvyy27rL3z7ns25q23fEG733id7bfPXmUpdL7jM83Kru52gw+6alkFjXdovp1N+fJLG/zmEJ/dLjvtaLfccO08fcFQv/+77rWfbdtsG3vo3r5pNrPSZQi8fm1brLtKpU7FzhDubePP6/KNV77h3jX+EmK+8YXK8tobg6zjSaf6WRRs3WevPXxgfdCbQ/2njvU7+/SwtdZsVCibvNMIvOalmScTFpTflnmCx0oRQAABBKokUC0Dr+GGS1uW5qFguWpzVEk6wcJpAq8JsmfWuSSggJACQ0q6oVdtQTVRVJ2DMNR4nUsHxzxYzfwWeA21+Ndff31T4HWQeyElXqO/ugVe1eqAyl3fNVW16aab2ogRI3yN3G233ZbmmefBMb+grDLcE8UfNFVl+8JDqvgDr6rkmeahRpplkpQx1HIb9OqLtvpqqyZZlHkLCJQqIJdvFQReSx94VfPNW23Xwt+PPvHIg7beuutk+KO1xUe8+Uam6eFS7edSBoEyhU4wcPOtvaxH79syS7z03FO+GevMiBQD83qbUhR5ri+S5vqeZpm5vmFFrDAEdTbbdBO7t+9tWU2NPv7U03behZf4IObrLz1bRG7JZynV8RmuASrBM088apts/N9LjrpOdzzpNPfyxmQ77+wz/QtMyUtamiUIvCZzTHOepVmmUKnCvW38PjTfeOUV7l3j98P5xudb//sfTLT92hzsJ3e7qou1aXWA+xtzdq3tX3/9zc676FL/QlKL7Zv78zdfPoXGE3g1O9y1KqN+svO1HKDWCNQc/MP3312IsiTTSn38lqRQZIIAAgggUCMEqmXgNdR2jd9UJdkjhW7akuSj/h5+dn2yrLLKKv4B+neuDwg1HVmvXj3XbMgaPqtprp8kjf/nn3+srmt2Tf0l5WpKS0EtPZhXP4mqEbXCCitkvR2qfNRvimpNKakJH/UxoT5b1TTtZPeHjZrVVNObmkfNcKqvxQ033NC/mffVV1/Zsu5tWtV6jSatV3mrf0cFAFV2rTueFBhUvppPTQapn0+te5FFFonPmvVdwUT1C6qyKEDy9ddfezPVQpNFqQyTlE/zar1qaknbrLLIpqpJ+SrYo32j4Eqh9JZ7o/l3V+N1Tdefz+qrr25Dhw71x0jSwKtq/emfmnyrXbu2P8ZWXnnlrNrQUWPVsNN3za/jRvtQBvGk/lx13MpIx6uOcR1vxTQ1HN3nOg907OnY1XEj6+VdM08KksVT0m0J+1HHfePGjTNNWSXNR8unPXeTHHfR7Y0aaX+F80Lnicy0T7SvJrvzWjWjtW91XuY6N3XN0TbrnFfT1fLVv1zXGU3XfDoGlL+uSVqfjsd8TQ1Xdm3Sds3tpoZVc1TXI13fdE1T4HXRRRe15s2bewM1361rs/aPTHVdU9+1Otei9rmuSdoezaNjVtdkOclT9rlMi51X7moiOdTADWbbb799pddRlYmEQC6BJA+UdO+jpPunQim8oBZ/4FVomcqmpXmokWaZysoRnV5Z4PWt8W/bOPdvt1128n3WaVnVzJvmrrdHtTvMhg4fYSNGjbFF3X3Qrm6eJo238NeOl155zca+Nc79zi1iWzbe3HbfdZfMQ7uwfjV9+M5779uQocPtV3eN33abptbUNdWepOm6/02dZqPHjHU1pMa5WlP13EPvDX1to+h1KpRXtRVVA2n0mLf8tezEjh1CUUw1/kaMHG2T3L2atqGZaz5zpZUaZqaHAd2fvvTq667J9I/dve4M23CD9Wy7bZvZaqvOruWibbrnvgdtrGuyVbUkt99uW9th++1sFfcbt/eeu4ds/P3H6LHjvN8y7l5yu2Zb2+abbeqv4ZmZ5gx8//0Pfj7lqSYi99pzN1t4oYVpavjj0tYueuGlV+yUM862/ffdx9dOi++H+x/sZ4OHDLUO7Y+ybZpuVdR+/uLLr9wxN9Q++uQT3z3KRhusbzu22CHT/PTXrvl9rbf/EwPcMfipHdb2YGvUaHXbqsmW1njzzTJFUH+zOj51nK+7ztr++FRguBT97KmZ1GY77OybVr6x29V29vkX2UknHG/nnHlaZv3RAZ0DY8e9baPceffPjH/cS1QbWfNmzdw9fz0/W2XbFL+mDBz0pu/nT7UC4zWnvnH3OM+/+LI/v1QDOaRyeoR1zI3PNNf3NMvMjW1Juo7LulxlDz78iG9S+JCDD6ywuFoMmDbte7vu6q6ZY0szKegz7u0J/vhzN7u29VZNbMstt7C6yyyTyUP3rPc+8JC/Xqom7WuvD7Txb79jO+3YwjbacP2izrlMZgUGdL1fd+Mt/By9ut9ke+3x3zEaFnvvgw9t/zZtfa3ekUNe93/76dxRzV4d0/pbZMiwEfad+7y6y+WZvw2LPcZ1/qq51g8+nOju16e5VnDW8r9h+j0J6dnnX7R33W+tmkLW3+ftj2pntdx/upaFa0iS32PVVtbvqH7/F3F/yzbfrpn/3aXGa2l/k/I9q8s3Xvs73/1wvvHhGIl/djjhZBs4+M283UyomfAWO+/pfzdee/HZrGt3Meeo1hcPvP7inq/pRYY13N+pu++6c1aR9PLTkGHD3b1SM9t4ow38tHBvl/ZeNHqd2H23XeyNgYN9c/v6W1r3rXvstmvm/AiF0b2vavy+//6H7u/hRX1LMHu5e7sl3DOKpElB15GjZzfz3s8FVuPBVwVd1TS/ku5Jyx18XVB+W5LuB+ZHAAEEEJj3AtU68JqmtmsgDQ8V9b0q+eghuh6mK2inh/zRpICGHvQroBNNClQquBZ9MDbBNXenAFc8rbPOOj44qfHvv/++D5DG59l44439Q7QQuFNAVIEapRCI0B82H374YaaJy5BHvvWq7OqDMPTJoqCO1q+gTTQpeKaAV6H+ZhUUUl+Gmlf56UZPSUEMlb0UhknKpyDgJ+4hUDyF5j/j44v5rgDN559/nnEPtfAKLTtlyhRvEPpzDfsvSeD17bffrnDcaZ0K2m255ZaZP2ALGWv+zTbbzAemNKwkTzXlGk8KJut4V5lVznwp7HMd6zoGFfyKJo1XLb9o8DXptkSPc+WtvouVX9J8qnLuJjnuotuv4WCkMstDwd9oUkBx0qSJ7iF+9jnXyDVtpH8h6UWI8ePG+ebHwzh91nL/tnDnps7lkHT+jh49Ouf+UBkUbI/38ZrvGhG9Nin/EESsrI9XraOypAchCpYWSjqHdS6r5qiOS7UEsJhbRseVHqCo6eF4Up6aHrXPdU3S8iNHjsxcq0I+Mt3SBUZ07IWUZF79Ruj4VLBX14ixY8f6F22SNBsf1ssnAkEgvIxW2b1M9L6nsiaJk8wbylHZZ5qHGmmWqawc0emVBV5v7XWb3dKjl/XpcYt7CLWLX3S/Nof45iGPPuJwU3+Y0aTAjQJJehAcTW0PamPXXnlFZtRff/1tRx57vH/IlRnpBvTb/dyTjxXV39crr71hnU45Pbq4H26yZWPf72noxy6U9+ADW7sA11OZefo/fL8fvuraG+yue++rkM9tPbtnPfz7cOIk69DplAr3tCrz3bf39k3Ears22mKrCnlFm3ec8sWXtm/rg/2LPtEZt3D3nA/fd5fvgy+Mf3vCu9bmkMPD18xnJxc0vu2Ou8rSx6se0OrFhPXXnv3yZGalcwZ0bujBb2XNdceXK/Q9zXGeZplCZRjjAuGHHHG0reNe3hzweD9b0v39ki8Vs5/Vb+UZZ59fIQvlf//dd1jDFRvY8BGj7Ij2x1WYR0FPBT+VXnVBoxNOrhgEPfaYo+ySC87NLKsg5THHnejzvefOPln3l5mZcgwMH+nKcMxxpnP08ksutI23aJoVJIouMmXKF7avCyLpJbVoUlOxfW/r4V8eqGyb4teURx57wi66rLPFt0f59+h9u918a8+s2oLFekTLV8xwTT3uVZuqe8/exRBZvO/OtTaYHdzLFTSoLMNet91hN97Sw3Rd1m9DCAAWWm6y+7tR1/P48adr8FOPPezO3bX84gr8bLH1dv5c1t/noUn90085yb3cs2XBcy7JeRSa6Fat3QH9++Ut+kuvvOr7cNYLTAoQd7vxFrv9zrvswNat7ImnBmSW+3DCWH/vX+wxrkBVp1PO8P2wZzKZM3DpRee7AOsR/lv4nY/P8/H7E9x1YiH3t0/xv8c/ufv4fVodXOF38HDXL/TTzz7v9038OImvN+n3mnpu5guw5hsv13wB1nzj8+2LEER/Z+yIvM+49BKNmuLXS0Jrr7Wmz6rYc1QzxwOvWnbnPfZ1L/HtbHf06p5VtH6PPW4XX3aFdbn8EjvisEP8tHBvl/ZeNFwn1NVGA/dM7s2hw7LWGX8Ja/yEd6zjiadl9f+uBdZs1MjX+g0v4GVlUuBLNLCq2aLX0ULTCmRZpUmlvqeqUmFYGAEEEECgRglUu8BruNnSXqjswWFle6rYh5WF8gkBLc2jGoH6p4frH7vaViFcorcrVctKAQcFs1Q7as0118zUiA0BBAVg1l13Xf9QXzVQJ7uabkohKKYaswpajh41yuetwFcIUCh4EAJ3WkY16BTYVNBLf3TlCryqRtg37m3zhRaqZWuttbYPyiqAqPUqmKCakKo9q6TAqYIVqqGqsitINGnSJFNNOAV2FHzNl0KQQ9O1rkaN1vTLKAiiWnylMCy2fNo+9amq1MjdKMpIQWpZaL9Et9nPVOB/CmLJTwFUbaOSguna1/IPQesCWWRNCvuv2MCrjhEFw3UMKIij/aDtU41b7T8dSzoelaLGqm2n7dQf7goWa1+GgJTmVZBUwTltn2oJhhqC2k7tb6ViA69+Zvc/1ajUemWt413HsoJXCtIppd0WBU21jXLX9qfNJ+25W+xx5zcy9r/oeaGgp/og1XZMnDjRB+M0u64JCsDKW+elal/qHGrRoqXPTdeU0aNH+eBs2Fea8KXrW0kBcs3btOnW/gUQjQ/N8+rY1Lktv3DMhJcqooHXYq9NyruYwKuOOTVpXFnSMa1gZKGka5qa65abHjopoKnryQYbzH4TWDY6ZlUrVjY6H+Qp46h9rmtSaHZZL84owKzrqNalQLvyaObeONY4pSTz6tzSb4Cuowq+fupq+OiFm622qhioKLTtTEMgKlDsvUySYGqSeaNlKTSc5qFGmmUKlSE+LTyQzdfUcDxIouXDwy5dd3recqN/4/+Fl162rld3y2SvoGXjLTbztYxCcHTowFdsZXfe61p7+tnn23MvvOhrwp50wnH+Pk0PbdXcqR5iPe0CX4VeaNMDsAMPaefX17Xzpabm7r50L6Lcdc/9vpZG9IFZKK9mVmBJtRVVdj0ke/jR/nbJ5V38OtV355prruFr8qgZSqVnn+yfqV3RzgWnRrggVYdjjrbDDjnI32c89cyzPgCqB3cvPP2EX0Y1VDX+mututBOOO9YFlI6c/RvtHrjrQXmbtu1805OnntzJDnC1K/W70KPPHa5W1htZtS1V42mfAw72D/mOa3+0HXRgK5vxf/9Yv8f628OPPObXpRpTvW+9yQ+X4n/h4azyyvUiQ/S8WNBqg0dr8Cg42sHtt5audqoCpLlSof2sWqHbttzV79vrrrnStne1or92v8UKluulhI4d2tsF557l7wUVyLi085W+hnTvW2+2Jq72no591aAJteW0/puuu8a2btrE3bdOsYsuv8Ld53zlash1tkPbzq4tqPPnzHMv8EV99YVnMg/Cc5U9Ou7cCy/1wZ8H7r7T11yLfw/zqpytDj7Mr7f90UdaWxcw++efmfasCzDfcdc9mWCtzu9C2xS/pvzs7qu33Gb7Ocu/4QNBYZ277LmfP1fCtSOJR8ijmM+afNzPq8Dre+9/YPsfODuAslPLFu44Pshfm/O1eKCmwBX018vcuq62PmA//4alar3pvNKzBv1u6IWbEFDRvtd4vaCw/nrr+qDnssvWLXh8JjmPnnvhJTvtrHNNQccr3e9QsSkEXjW/gkb77rOXL5uCV++7mquqIatU2Tnf+/Y77Yabb/X9tl543tlWb7llbaRrgeKcCy72y48bNdQ34azfnY8++sS/WKIXfG7vdYvVcv8tv3z9RL/Hql3b/vhOvqarro2ndDret4bzxqDBWb//pQy81uRzMzzzi//W5huvnR684q3i5RvvD5TY/77731TbruUu7lnJqjbYdUNRbEpyjirPUgVe096LRq8Tuu/UCyArut97vfhw1bXX+c3u3+8BX4NcX9Qntfqf1j3nQa0PcNeRX6xnn9v9/Vg7Fwzu6oLCSVOuAKvyCDVdNRwNyOp7uVK5/94oV7nJFwEEEEBgARBwf0BWKY3/6KsqLR9feMBLg2a1P/OKWd163ReflPi78lBeEz+ZnHjZsIAL4s0aOHDgLNf8ahjlP12gy493wYis8S7Q5ce7WmSZ8YMHD/LjXFAqM04DLoBWYV6NH+TWN9D9c4FCfc0kV8PLj9dy8eT+UPPTXHAiM2nQoNnrdQGazDgNuODCrMGDB88KZXdBPL/sQLdODYekYRfUmOWCXWFUzk8X2Mss72rnVZinqoZJyueaU/VlcUGsrHK4gKIf/+abb2aNz/XFBW1muaaTvdHAOfti3Lhxs1wAJdfsRY8L+889hCxqGR0vLvA7K37cuGCZ3xa5hhSMXZAojPKfKrO2QcdUSGF5F3wNo/ynjjfXJ6WfPxwbWTNEvkT3uY75aNL2hWNYlkpptsXVFoxmmzqftOdukuOuQkHdiKiRa/Y5M4tMtE/0zwVFM+M1EK4VLqjox4frSXxfaaJ8lIcL7vl59b/gHs/XBXT9vPF1hvXFj7Fc1yZXQ9TnkescDwVQuXUMVvYvvk/C8kk/te3aJveCQtaiUft4ebWtWkbXAveCQNZywdTVtPXjk8yblRFfECihQLH3MrrX0T2P/lWWSnmvFdaV5n4wzTJhfcV8HnTYkbPWXH+TWa4WZs7Z3UN5P901HZyZvm/rtn6cawI0M04Drdu28+MfeKhf1vjzL77Mj3/tjYF+vGt+1X/fa/82s/78c/a1PCxwzgWX+GkuOBVG5fx0/Yv5+Vzzr1nT3YO0WSqfqzUx6485vyuhvP2fHJA1r7644JjPJ779mlcuLniaWWbosBGz9C9676lr5FbbtvTz/vLLf/d3j/Z/wo9zD+Uyy2vgqaef9eMvvrxL1njXdKMvs9bpAlF+2iOPPe7ndc3fZs3rgl2zjjn+RD/txFPPzJpW1S/RcyT+90GhaVVdb5rjPM0ylZXT9cmY2Z/aF/qnY6TrNdfNck11Vlg8337Wcehqz1RY5qOPP/F56piMJhcw9eNd853R0bPCufPMcy9kjXd9Rvr5dQ6F5IK9s6669vpZvW/vG0ZV+qlltI2bbLlN5vde5dY49/JB1vJPDnjGjz/59LOzzgHNpHlb7LrnLNccdmaZfNuU65qiY1zrjC7vmv32445sf3wmzyQemYWKGCh0bBeaVkTWBWdJcwynWaZgIdzEkaNGF/Uvno/2mf5p+TTpxZdf9cuHfPSpc6PvPfe7v+++zcoyHH/upZ2s8foSfjd0PirpWhzynDx5ih8X/1++4zPJeXTtDTf79cR/h+Lrin8Py6nc8ZTkGNc1Sedr9LdH+Z165rm+XAMHD8lkr2ubTA4/ukNmnAaS/B67Gr4+D53r8b+Let12p5+mdZQyFTr/Ck2rahnSnGdplilUznz3ofnGK69wP6x5oinf+Og8YVjHjfZj0vuLJOeo1nVcp1P8esI5Gn7Xjj/ptFCUzKd7Sc7P+8DDj2TGhXu7tPei0euE1h1N4TcpnNu61wvXlOjfxnpmoftC1/x5dPFEw7p+hrzjn2mvrYkKMGfmUh+/acrAMggggAACNVPAqrrZpf4RK3SzlXcCuf4AAEAASURBVLSsSW7C8uUdAlquNlTWLOGBv6u9ljU+BDgUAFRyNaD8Q3496FcwJPrP9fnop7makFl5hOBJ9OGXZgiBu2gQJywYD7xqHq1TyxSTQt7Dhg2bpW1VuYtNIciRL6hZVUOVo9jyhUCS9kPUWsPy0L/oDWV8G11zzZn5lJer9esDaPH50nwP21Bs4DW6Du0PV9PaB8FD0D8cY5ovGOcKyocAvKtN7bNUEFkOOv7iabK7Mde0JIHXaLA+5KdAofLRfoinYrfF1WaML5r1vdh80p67WlnYZ6U+L8I+ydog98XVsPVuejCipAClHHPtK70UoWnan0o6rvRdZc6V9LKFputcUJKfvodx0fMl17WpmMBrrvWWc1y4DucLvOa6Jrnawn6bZR3dZg2H8yjkl2Tecm4neddsgXAvo89CKfqQrtB8mhbuteIPrypbrtD0NPeDaZYpVIb4tKoEXv8X+/1S8EUPjSa8827WahQI0vinn3vej3/51df996M7nDDLNd+b9S88kFZwplBSYFV5hiBloXnDwzlXiyNrNldj0eehfOLlCA8e5RNPCnx++dXXs957/8NZo0aPzeTxVeSeIV9ATgE8ra/PHXdVWGco54g5QYxLr7jSz+v6HYsXYZYCccon6YPRChnlGBE9T0LwNde4HIumHpXmOE+zTDEF1L2g9r8CMyGoLmv9u+7GW7KyyLefozMpQKGHugqSDBsx0uejfKMpXxAovBjg+kuucLyEMsVfXojmW9lweBFAx2VI2n4FYpV/NLjSuevVflyu4zEsG/3Mt025Aq/KU+u78prrM1m4Zob9OJUxpHJ65DrGc40LZSnFZ5pjOM0ypShrrjzCMViV4IBeUtCLLuFlkpCnPsPLOlr3FVde44+HaDAxlEmBD80fXmgJARX9TuRL+Y7PfPPnGh9+r6LBoFzzxceF5eIBI82X9hjX77HrB9O/vHDAQYd6D1cbOLPqfIHXJL/HKq+cVf54UqA87Lv4tKp+z3Ue5hpX1fVEl09znqVZJrrO+HC4D43f2+Ybr+XD/XD83jXf+Pg69T1cj/WSTZKU5BxVvqUKvKa9Fw3Xifjvscrm+pr1x/NNt/bUV5/CPdqhR7afpZdGfvgh++XwMF+az1zB16pcV9OUodTHb5oysAwCCCCAQM0UqKXNrkrF3VI32xBt5qtUTQ3HmyNJsr0uyOWb9Iz36ekCXL5pSjVhqiYuQwp9Zy7jmlxTH5yhCeAwPd/njjvumJmkvgy1U+L9Aoamaps3b55pBjMsFNYT+jFVU0Vq0lTNeRXTzKX6kVRfjy6IFrL0/VKqSd211prdp0xmQmzAvQ3nmypWc6rbb799bOp/zeCmNVSGxZTPBVR9c8wVChAbEW1qNTbJ9yMZTojQt258nrTfw/4rtqlhrUdNJGtfuiB8hdWGY0wT8h2nmuYCcX6/at9oH7kgom/SOldfnS745JtqLrapYTXLqr5X4yk0eRtt2rkU26L1VDWfYs9drauY407z5UqFzgsXBNVLLxY975WHq3Hsm4Zu2rSpbyqy0L5ygVbfDG447yo7510w3TczHY7/cM3IVfbouFDGsHyu4yY6/9wcDvtSzVyrSeKQCtmr+W41WV0oheM/ybyF8mMaAlURCPdF8ebY4nmG+TS+svun0HxxVe6P4utPcz+YZpn4egt9r0pTw+++NdJfh0P+Z59/kbkgSVbzvJp2e997rNsNN9ktN3az/ffZ2/UnWHmfgnvvuYdrxviGkHXWp5qEDf1Qjhk+KGtari+hqeF4eUPflrmWiY4LzSZ+7bpW6Nz1Ghs5eoxvQjY6j4aHvPGyreLuCZXcQzu74JLL7ewzTrWTO3X04/S/4J0ZkWNATR4fcfihmXkHvvx8hT5v3/9gomvy+eCy9PGqIkXPlXgRczVBHJ8n6fc0x3maZZKWS/O/8+57vi9jHdtK0b5/8+1nzTds+EhzAQrfH7K+R5OaRtTxGJKatlaTqY8+eJ/vK1jjw3Ee5sn3qSau1dR1mqTmDNXUoZpZ3KH5dpks7n/oYT+sPpt9k67uWzh2cx2PmQUjA7m2SZPjTQ1rnP4+aeyaG669eG0bNfQN31XJ7vu0sk9cdwTvjRvlu5uYGx4c99obxaeq9PGaay36G/uNQW/6pqvfGjfezzJq6EDffHCh40/X5h123sNCX6sumOv7eA3fc60r3/GZa9584559/kXXbP55lrSZ0dDU8D133mYtd2ieyT7pMa75u11/s7mAaIV+J5Xpzddfawfst4/P/7PPJ9uue+3nmyV+6N6+mXUm+T2+2fX33sP1+67m09XUajTpb7a1N9xs9romvhudVJLhmnhu5mtSON94QedrUjjf+Fw759tvv7PmO+2WuKnhJOeo1luqpobj93bF3osWuk689obrEuCkU01dQpx56smeSX3QdjzpdP+7FNx0jWnnmho/qE2rovqpDsvl+ow2Ozy3mheOlmNu3VNF18kwAggggAACEqjWgdeqPPyI3sBW9gCy0KGQL6AVHvhXFngNgSytQ30JxpP+GFdfgsonpFIEXtWvoAKp0b49Q/6FPtVvpKul6PuFVH+zSuo/Uf3Q5kuFghxapqqG0fUWKp/+KFJASymftYJUCtKoH8hcSW4K7LnagH6y5lfwUH02KshYlZQ08Opq3/rAvfrDVJBVQXUdKwoGKmiWNvAaAmibbrqp1a9fP2uTQvAuBJ6yJka+hH1ey41rGXlpIMyiY0+W2g+yK9W2lCKfYs/dsC36LHTcReeLDgejEBiNTis28FpoX4Vri/op3Wabbfw562ps+n5Q1UdpPIUgbgi8huU1X77zJXptCmWZ3wOvrhlxczV6/TVA/RtHU3jBQcf/8q4frSTzRvNhGIFSCkTvZwrdF2k+V0vCr1oB1XwpPNTS9KrcH8XzT/NQI80y8fUW+h4elKXp4zXtwy7XdJt1vvJqa+r6OO/Q/qis4umeT9eZVVZZ2Rpvnvu+Svcym221rQ9+fvz+25Xee+QLvIbgpQqgYFo86eWdunWXsZ13bOnX1azFLv5z22bb2E6ufzuVcRnXV/uRxx7vFy0m8NrplDN8X55nuAd58WCZ1qeA3PrrruMDrR1PPt33+zqgfz8fUIiWb/iIUXZE++PKFnjVuqLnVVh3ofMrzJPmM81xnmaZNGULy4SHuccec5TvM1Lj8wVeFTTSuaW0z1572HbbNnN9mNazpZeuY+2O7uD7nRw+6FU/Xf/LFQTSebDORpv7efIdnzpemm3d1N3vLp3Jq9iBb1yfs9vvtHvB2Zts2dj6P3y/n+eUM84x9eX8xKMP5T03o5nl2iZNzxV41firu91gfe+5z9Snns6rPfZtZQe7fmS7XdVFk/11oZwefiXufxz3QaLyz1IHXsMaFYDdYZc9/Yu14UWHcO3MdT0M1/JddtrR7uzTI9PHa7kDrx998ontuW9rU7+pTz76YCh+hU8FSGe63zadr/qbNV/gNek5377jSTb4zSGmPqn32mM397mWu8Ysbffe/6C5JoiLCrwm+T2+78GHzdVqtIvPP7fCb3c0aBxeVqoAUcURNe3cDPei8ZcK840Xb74Aa77x+XbJpk2a+fud8OJLrvn0d7z+1Xb9ketv+STnqPLLF3gN53F0nQ/2e9Quu+JK6+L6UT3C9aeqlO/eLvxWP/tkf9t4ow0y2cRfAkwaeA0ZudYrXD/HI91LIoMtvCCifqfPP+fMMEvqTwVflbZxv+tzO83te6q5vX2sDwEEEECgGgu4BzxVSuVotiFJcyH5Cl+omZJ8y+QaH5qeVF+b0RSauHRBuuho3xSsmu4MzcC6h2y+WUs1W1tsCk0Nx5twDc2eupvAClm5gJlfT+jjVcsOdOVQXvF89F1NibrgXYV8oiNCnsrD/bEUnZQ1HJoaztfEaVUNs1YW+ZKrfPGmWiOzJxpUn7ChmdfgqOZH1Zxr2hT2X7F5hPnjzfW6wKHft+EYU3nyGWuamlvVNoSmhrUd+q6+XuMpNLXsgmzxSVnfwz5XPqFZ3OgMaj5b01yQ2I8u1baUIp9iz93o9kSHcx130elhOBipzPFUbFPDruawd8y1r8JxoP2pFK41yjt+voZp2icu4Jo1f7HXpmKaGtaxrXVU9k/NHpcihX0ZDEKehex1Pql80fMnLBf/TDJvfFm+I1BKgXBfpM+qptAPbLyptqrmm+Z+MM0yScpZlaaG479tLtDim2ZTE7zRdNudd/vxoanhMWPf8t8vvKRzdLZEw6FZyvi6lInyV1N5uq4rhebh4uXVdVBNI6ppx8pSaHo42t+kltFvSWiONldTw/Emk3vddodfZ7QJyHzrdgEqP6/6NounYFqOpoaj64o256jhcqU0x3maZQqVX81n6ljR8ZMrhT7mos3yhqaG4/s5NCWqfg+jafKUL3Iec6HZ09DMdFhG/THqGI0eW2FaVT/DMXR5l6vdvehvWf/UhHdoNlTNaivd3nf2eXzfAw9XWLX6rtU5N23a95lp+bZJVso72m+0FtK5rPHqp1Z9I2t4bnpkCu4GatJxf0uP3pl9Hfa5PjU+ng5zTWxGx4f5o01iap7o93ge4bua61STuPH+ScP00E/pK6+94UeFJuv7Pfp4mCXzGfrk1jVTKTQhqvzzpXzHZ775c43Xb0wwePX1gblmmaVmUMM8oWn8cH0Y9GZ2V0rKoNhzPpyjahY8/tsW+ryN/s6EpobbHn60VpNJSX6PXWsPflu0b+JJfVyG7YxPK+X3mnRu5ntWl2+8nMO9cPz+Nd/4fPtGXUFof0abf4/Oq2dl4d5KTX0rJTlHNX+8qeHQXHWupn+vvf4mX55os95h/fHjv9h70ULXCZ3P2v5oU8MqczyF7gN0HsafJ8bnre7fS31PVd23l/IhgAACCFQfATV3WaVUjh+xqt50RpeP35gl3dh8Aa3wwL+ywKvWp/4h9aBfQZRo+uqrr2Yp6DF27NjoaD9O87tmi7PGh6BTMYFXLRiCkAogRtN7773nyxMCD65mog/OKbASHuRp/tBPbDQwo8CKlo8GAwsFOZRPVQ2TlC8EFV2TrVk3iOrHSQFI/Uty4/jXX3/NUjAyBMq0X9R/abHBU21/SGH/FbtsmF99eYakfjnD+LD/NC2fsabFA696iUDboYB69GZefchqvP4lCbzqOIsG+hQkVB7RgH0oc1W3pRT5FHvuJjnukp4X4XjS/ommcM6G/aKXI4JlGKf5tb6QR9Q0BLx1PESTjhXlo38h8KrpSa5NxQRedXyqDJX9U16lSJ9//rnfJp3v0VTomqTrpxz0T0H0aNK1Tde7EOhOMm80H4YRKLVA9L6mKsHX8HBKwddSpzT3g2mWSVLueRF41f1GeEDrmvvNFFe/kzff2ssHQh95rOLD9cyMbkB9pCoPPXjTfUhIoa+/Frvumblfy/dwTsvoIbTyUUA0mhREUkD2vIsu9aNDn2et27aLzpbpB0x5RINjoe/MYzuelDW/AkmaV3lH+5xVP516CKnxH3w40S/jaiz5efVATw8kQ/rk08/8eOVT7sCr1qlzS//KmdIc52mWKbQNod9cnRPfu/5/o0nBk732b+Pdo8GSfPs5PCS+qft//cPp7wcdT2H/R/M//+LL/Pi+d2e/OBKClHrRILwcqOUUDN1t7wP8vzBe50Hfe+6fFQ22RNcRHda5FvpJVr+QudL1N3X3ZdLDdKWx48b77zoeJ0+ekllEx7HGabumfPFlZnxl2xQPvGpBbZMeuqts+vzH9accTUk8osulGa4px32xgdfofCH4qn2ufyHQqqBrfFw+e/XHqnkV+I//3a5+wkM+ITA7ak5/2vHroY4/HSua3zXv7VdXKKASypPv+ExyHimvBx7q59etck36+OOQvf9UH5DhJaEuV3fLTCsUeC32GP/J/U0ajH50zylCevf9DzLnY/RaoGtGmD9cM7RMkt/jEOxVPtG+dqe7v23C9VHTyp1qyrmZL8Cab7zcwz1s/PlevvH59lX0HNRLc9HnGDp+1N+59rWu2WFaknNU640HXpVP+C2JvlQX/Y2ZV4HXSR997LdVZY7ec06dNs076N4tOOQzre7jS31PVd23l/IhgAACCFQfgVoqSlUq5Jar2YbQZEi8+ZFiylqVZeP5l6KZXBcwMRf4UJDbN1WiZnLcTU2mOVv1Tajmb0MKTXqqGdelXB+t6htVy7hghu8nqJg+XpWXC5z6PiC1XjWTq/5eXcDG56HpjRs3ds3M1fXlUt4uIOnnU5O2SmpeVctG+090QVxztUF9U0Lqg1bJ/UFZ1j5eVYZiy6dtkJ/KtNBCC3k3d6No7g8vX9aGDRvaBhv81yyLH1nE/5THl19+6f+5h0t+n6y00kpFLPnfLGH/FdvHa+gnVcdBHbf/tT1qZtjtFN8HcNqmhlWizH50w0u6pqFkrONloYVquebOZvn+plTOfCnsc5VNFxAtV6fO7ONa05Q23HBDW3HFFf1wqbalFPm4mqJF9c+c5LjLeLomtoo5L4ptalh4Lhho6jta1tpXStPdeSx3NRWtJqND0rn5tjtHNU3NIqnJXJ3zOi/Cvg1NDWuZJNemcF2qTk0NR5tLVrPIah5Y18twfOZq5lnb7V4+8E2Ka1jNsctJ1wid22omTU03q2kppSTz+gX4HwJlEog2Q1eVeyMVr5R9u4bNTXM/mGaZsL5iPudFU8Mq10DXj1+HTrP7zWqxfXPXnPvaNvatcb4/zfr16ttLzz7prt/18m6CrkXHn3Sab2ZR8++2y072retiQM0uKt103TXWav99/XC+5ug08auvv7HWBx/u+8dTs5RqWvWLL7601wcO8suGPjf1W6DmjZXUrOTWW21pLmDlm3NU85H6HYk2NfzeBx/a/m3a+vmV556772YdjjnSf3cBLetzR1/f7OROLVvYssvWNfUn5l50sZauCeO7b++V6Svsks5d7eFHHvPz7thi+/9n7z7gnCjaOI4/0nvvvYOAigqiIAJirwgiIirYEEHsHRtiw4KoSBEFsWLH9tIEQZBuQwHpvffe8Z1nwsS9kDuSvdyR3P3Wj3fJZncz+93NZZn/zoz5rjosI0ePsV3V6vK63X5v9LbbTeQffs5zP+ukZKRdZZog3o7JqsdUz6nyZigIE6ib7qHH2WOsx/6joe9I7iPff8kd5+kzfpVrb+ho3+68FudKhfLlZPKUqWLCD7sdvdb2djU8xHQN2vP5XsHl27VpLc2bnWPHtDfBjR2HtbzZho7Dum/ffvny6+F22YcfuE9uv/Um+/h/I0fLnffcbx8faxxWU6kuV11zne2idPQPgW3ZFT0/9Py+5MrWdpy/8aP/Z8/Jvv3flt6vv2mX0i6Us2bNZj8rev5fc3UrefHZHsEtJLdPyXU1rCt617mr6x1yT7cuwe3pA3PTmETqkWTFOH3i5xz2s86xdt91bxm6XGh3l9eZMYF1jGud7r6zixmvu599rOMRmsAwyWt333mHfS25H9pNb+u219vPg57bZ591lv2bb1pP2+7YdT1vt9763NyYI2/2GxD8e6jzzI0Qdhu333aLPHz/PToroq6GveeafkbdZy6az5F9M/NDu9/Vbnh1qmPqLM5udKaYmzVkzNifbNm0K+D3B78tpUoG6jKS62pY14/mHHdjNKvfeec2t0Mh6Xiv7jvJO8ar/jun3hmNk3g/2+MJ+7mO5vvYjWurZdXv7uLFi8v4CYHv3U2bN+lsSauuhu3G0+mHn8+Zn3VS2p3kuhRObr5uy9XxhV7DJjc/pfcfMWqMdL37PruIfmddYD4nBw8dlIm/TDb1PSvtefbOgL7SsEH94GYi/YzqCqFdDeu8Rx5/ynbhr4/1c1mieDH55rsf9Kk9d49XV8Naz2VCZlmydKkdJuJ8UzadPh72ub1+vO/ubnLnHZ3svET9EevzN1EdKDcCCCCAQPoLxG3wGlrBqBdYNatWTFFI19GLNXOnoF0u9KIsxZWTeTG54HXJkiWyzAxCn9wYrxpoarDpJh3vUscL3GPCLQ1FdNKgoHr16klCV52v4Zq+r7njTp9KnTp17IW/C+4iDV51XQ1idMxSF9ToPH3funXr2tBVn+ukQbC+p1YuuEmDmjJlyiYZ/1H3Wfddg2ANYHTScur4kcmFHLEwjLR8Wh4NXUzLVxOkbLchos7TfSlXrrxUqVJFn6Zq2rBhg+i4mhpkRzO54xdp8KrBn4ZuGoC7Sd9Xx0zVc8l7jiVnrOuZVqI2eGvSpElwrDi9wNZ1tm3bGjTSf8hWqlTJ2kU6xqsecz2H9RzTf/TqpOeXjovrHT8zVvsSi+1E89mN9LyL9nORXPBqWoLaz2LoOaJj2+p5oJXxOumNFBq6aritQaF30vF/NVzWyg2d9HU973W+Vq7Xq1fPVIQXCq4S6d8mF7zWr18/6nM/+GZp8EDPPdPq19484Ma0PtbfJC2G3khheh0IOuk8/QyoaehnO5pldTtMCKSVgKuQctuP5DonLa6N3Pt7f/up1PCzjvc9j/W4bfuOMsPc+Pbz2FFSzoxZGjq92W+gqezua8dAveC8QEVTyzbtbEAaOvaX6d5Qvhr+jfww/As5sVbN4KbefneImBaA8vqrL8nll14cnK+Vejpu15Sp04LzNMzRMClcWYILHXmg12Mv935DRo8dZ0NLnV3ZfEfrOFuurDovufLqazrN/WeemC5hZcLEQAW+ztOA7dEH75MGJmB1kwZRd933kCxctMjO0r+Hzz/zlHz1zXc28P3lpzFSunQpt7gMHvqB9B/4rq2U03FhP3rvHfuaXl9o8KoV5LpNN3XudIt069I5GOrpfP1brZX0GgZo0KrTZZdcLB1uuE7atLtBLrnoQunb5xU7P5F/+DnP/axzLKO9e/fZc/X9jwIhinf566+71oxt+IC9Gck7P7njrAHFM8/1ssdfl9cxfV9+4TkzLl0bG2ZOGDMiuBkNfV946VX58ONhdt4D994lXW4PjB28zfybR8P6ceZmBXcOaCX4vSaU1DFQ3WS6ErXnegkThOhnMFeunO6lo3678VQfMed4p1tuOup1N6PFRZfbiuZvv/xU6tapbWcPGvyefPv9CBtQ6wy98eGG9tdK186dzLVXFreqJLdP4f6muJVM6yE5s0ng78yPI76TKpUruZeCvyP1CK4Qxw/8nMN+1oklgTd8ddvVcYa9geyxQle33tq162zQouOReic9p+65q4u0vzZw84p7Tf+NY7rItn8PZ8+ZY2fr50r/Dna5/Vbzb9nA+edulElpjNfkzs9oPkeuXPrvu1f6vCkDB73rZtnf+h2hNwA9Zv5uuJs19IWXeveRAW+/K0PfHWhvpkiyknkS6TluWqDKg+Z7190opNvRvxt6jf7Sq68d9Z2rIfVTPZ+zoZkuu2D2n8HPbDTfxx+Zm4GGffZl8G+AOutNF25cax3/PdEnP58zP+uk5OSuZ0NvJExuvm4ruYA1ufkpvb++9tXwb+257b573PJnNzpLXnj2aSlbJul1Y6SfUd2OG8d+vPku1JuTdNJ/mz/c/SnzHRMIW3Veq5ZX2rFa9eaknk8/Efy7kNy1XaTXoin9ndDPlAbDeoOJ+3um9QE9zHe6t2z6XayB63Vt22hRE3qK9fmb0BgUHgEEEEAgXQXiNnhVBXfh5US0glEvzkIDWK1U1LBVlw+dHu7a4ajlQ5dJz+d6waYtCzUg0NAqpUmX1X/sHGu5lLbhXnPvq8Gd+4ebe837W5cz3YXa98yRI4f3peBjrVRLaRvBBdPgQSTl876tWmtIpd6JOrljp8dDQ81YT2qk247mPAvXolDPGz0vUrKO1b7EajuRWkZy3qXH58K1Jk7us+ndH63Q1qBWP/ORTM40kr9NkWwvPZfRsuvfSj3/ov3bpEb6D+Fj/W3U/Ylm2fTcf94rcwmEXhvp3uv1kU56jeQmd03kbkbT+Wl5TeSnUsPPOm7/EuW3VoBrhZYGRtF8z3r3b936DZIvbx57c4h3fjSPtTXp2nVrRXs1yZPC94JWipuugaW46UHAGzSFey/93ttr/n7mMNcm4fZNt6VhX/FiRY/5t1lDigIFC6RYtnBlSIR5fs5zP+tEaqHHbcWKlbLahN1lTO8t5cuVTfH4pHScN5geObKckCXFFtyuXPo9rd+32qNEuO9qPc9z5MguhT03h7l19beurzeThVvXu1wsHuu5e/DAwWPu17H2KTVlOZZHaradHuv6OYf9rBPrfQkXvup7eEOKaN5Tr90XL1lqW6pWrlzR/m091vp6/ulUsECBYy2a4uvhzk+/nyPT5a656Xy5mG6Apaz+3TBhUmo/i5Gc4/o3Y6O5+VS/kyL9949+34W7OSOa7+PNm7fICebG7eT+HqUIH+cv+vmc+VknJQZ3LXs8g1ctn34ezPjkojfG6LGuWqVyRPUtqfmM6nuuNTdEFylSJMlNCyl5pddr+n2/3nwX586TO9V/f9KrzJG8T6zP30jek2UQQAABBBBQgbgOXrWA3pav+txNrnLRW6Gor7lg1lU46ry0rGjU7TMhkNkEwgWvmc2A/UUAAQSOp4CrtIqkDHptpOFs6I1rkawb6TJ+KjX8rBNpeVgOgXgR8HOe+1knXvaXciCgAn7OYT/rpIV2aPjqN3RNi7KxTQRSK+Dnc+ZnnZTK6b2GdfV6bnlXv5fcfF3O+5pbXq9z3Y2Iblv8RkAFYn3+oooAAggggECkAnEfvLod8V6cuXne36GViqGBLeGrV4vHCKROgOA1dX6sjQACCMRKwN1o5nr/0O26CilXAZWWgavbDz+VGn7Wce/HbwQSRcDPee5nnUTxoJyZQ8DPOexnnbTSdOEroWtaCbPd4yXg53PmZ52U9i+0ri6lZSN9jeA1UqnMt1ysz9/MJ8geI4AAAgj4FUiY4NXtoF6k6eTubHOVi+EqFb0XdLrcQ11udJvhNwIIpEJAu6HRsVK1W1odc5YJAQQQQCBzC/ip1PCzTuZWZu8TUcDPee5nnUS0ocwZV8DPOexnnbQUnDZ9hjQ0Y7wyIZCRBPx8zvyscywzd+PgsZaL5HWt6wtXHxjJuiyT8QXS4vzN+GrsIQIIIIBALAQSLniNdqc1fNWLOkLXaOVYHgEEEEAAAQQQiEzAT6WGn3UiKw1LIRA/An7Ocz/rxM8eUxIE/HXtyHnPmYNA2gv4+Zz5WSft94R3QCAyAc7fyJxYCgEEEEAg9gIZPniNPRlbRAABBBBAAAEEEPAK+KnU8LOO9z15jEAiCPg5z/2skwgWlDHzCPg5h/2sk3lE2VMEYiPg53PmZ53YlJatIJB6Ac7f1BuyBQQQQAABfwIEr/7cWAsBBBBAAAEEEEDgiICfSg0/6wCOQKIJ+DnP/ayTaC6UN2ML+DmH/ayTsRXZOwRiL+Dnc+ZnndiXnC0i4E+A89efG2shgAACCKRegOA19YZsAQEEEEAAAQQQyNQCfio1/KyTqZHZ+YQU8HOe+1knIXEodIYV8HMO+1knwwKyYwikkYCfz5mfddKo+GwWgagFOH+jJmMFBBBAAIEYCRC8xgiSzSCAAAIIIIAAAplVwE+lhp91Mqsv+524An7Ocz/rJK4QJc+IAn7OYT/rZEQ79gmBtBTw8znzs05a7gPbRiAaAc7faLRYFgEEEEAglgIEr7HUZFsIIIAAAggggEAmFPBTqeFnnUxIyy4nuICf89zPOgnORPEzmICfc9jPOhmMjd1BIM0F/HzO/KyT5jvCGyAQoQDnb4RQLIYAAgggEHMBgteYk7JBBBBAAAEEEEAgcwn4qdTws07mUmVvM4KAn/PczzoZwYp9yDgCfs5hP+tkHDH2BIH0EfDzOfOzTvrsDe+CwLEFOH+PbcQSCCCAAAJpI5Dq4PXPBavklOpl06Z0bBUBBBBAAAEEEEAg7gX8XA/6WSfuISggAiECfs5zP+uEvC1PETiuAn7OYT/rHNed5M0RSEABP58zP+skIA1FzqACnL8Z9MCyWwgggEACCKQ6eJ2/fL2ULV5I8ubOkQC7SxERQAABBBBAAAEEYimwa89+WbVhq9SoUCKqzXINGRUXCyegAJ+NBDxoFDnVApz3qSZkAwikiQCfzTRhZaNxLOD3nI/jXaJoCCCAAAIJJJDq4HXz9t2yY/deqViqSALtNkVFAAEEEEAAAQQQiIXAsrWbJX+eXFKkQJ6oNsc1ZFRcLJyAAvrZyJ0zh5QonC+q0vPZiIqLheNMgPM+zg4IxUHgiADXa5wKmU3A7/dRZnNifxFAAAEE0kYg1cGrFmvxqk2mwi2nFI+yUiFtdomtIoAAAggggAACCKSHwIYtO2XLjt1Rt3Z1ZeMa0knwO6MJ8NnIaEeU/YlEgPM+EiWWQSD9BfSzuWP3PqlStqivN+d6zRcbKx1HgdR+Hx3HovPWCCCAAAIZRCAmwev+Awdl5fptkjXrCVKsYD66Hc4gJwe7gQACCCCAAAIIhBPQrrs2btsphw79K+VKFJQc2bOFW+yY87iGPCYRCySYAJ+NBDtgFDcmApz3MWFkIwjEXIDPZsxJ2WCcC8TqnI/z3aR4CCCAAAIJIBCT4NXtp3aLtXHrTtmz74D862byGwEEEEAAAQQQQCDDCJxg9iR3zuxSKH+eqLtQTQ6Ba8jkZJifSALus1GsUL6ou95Obj/5bCQnw/x4EeC8j5cjQTkQSCrAZzOpB88yvkBanPMZX409RAABBBBIK4GYBq9pVUi2iwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCMSzAMFrPB8dyoYAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgkhQPCaEIeJQiKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQDwLELzG89GhbAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkBACBK8JcZgoJAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIxLMAwWs8Hx3KhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCSFA8JoQh4lCIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAPAsQvMbz0aFsCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCQEAIErwlxmCgkAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjEswDBazwfHcqGAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIJIUDwmhCHiUIigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEA8CxC8xvPRoWwIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJAQAgSvCXGYKCQCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCMSzAMFrPB8dyoYAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgkhQPCaEIeJQiKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQDwLELzG89GhbAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkBACBK8JcZgoJAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIxLMAwWs8Hx3KhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCSFA8JoQh4lCIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAPAsQvMbz0aFsCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCQEAIErwlxmCgkAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjEswDBazwfHcqGAAK+BX74bZV8MWWZzF+93fc2WDH+BWqUKSBXn1VRLj2tbPwXlhIigAACCCCAAAIIIIAAAggggAACCCCAAAIIZGgBgtcMfXjZOQQyn8Cqzbtlwux18tbIeZlv5zPxHjesUUzuv7y2lC2SJxMrsOsIIIAAAggggAACCCCAAAIIIIAAAggggAACx1OA4PV46vPeCCAQc4H7hs6UafM3xny7bDD+BTR87d2hfvwXlBIigAACCCCAAAIIIIAAAggggAACCCCAAAIIZEgBgtcMeVjZKQQyp8CKTbvk2t4TM+fOs9dW4LHWJ9HtMOcCAggggAACCCCAAAIIIIAAAggggAACCCCAwHERIHg9Luy8KQIIpIXATW9NZkzXtIBNoG3qmK9DujZKoBJTVAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGMIkDwmlGOJPuBAALSuPtIFBCQX567CAUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBNJdgOA13cl5QwQSW+Dff0U2bdkqxYoUirsdIXiNu0NyXApE8Hpc2HlTBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAg0wvEdfD62ptvycqVq6RihQpyV9fOxzxYs+f8I4OHvm+X63TLTVKzRvVjrpPIC+zctUfWbdgku3bvkQrlSkmhAvnD7s6ipStl+KgJ9rXWlzSXSuXLhF2OmfEp8OGXI2Tdxs1SukRRue6q49+S78mXB8jKNeulQb06cseNreMKLbMEr6dVKSLdzVim4/5aK2+NnBdXxyAtC/NQyzpSrVR+efqzWbJ68+5k34rgNVkaXkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIQ4FUB68adl7eqk1ERSxVqpRMHj8momV1octbtZXZc+bIySfVleGff3LM9X4a/7Pc0rmrXW7ouwOlSeOMOc7flq3bZfCn38nseYuSmOTOnUvOOv0kua7lhZIlS5bga9N/ny0DPvjSPu/a8Ro5/eRawdfS48GOnbttC0l9rzIli0uOHNnT421T/R5jJ82Qj74akeJ2ateoLA90viHFZVL74v3P9BE95kULF5SXn7g7tZuLaP2Vq9fJwUOHJE+e3FKiaOHgOnv27pOuj/Wyz3PlzCH9Xngk+Fo8PEgpeB379PmSK3vWZIt54OBhafbU6GRfj6cX3rilgZxepagcOGTK/GRilLl8sbxSOG8OWbR2h+zad9AX53ePNpci+XJKt3eny2+LNye7DYLXZGl4AQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCANBVIdvM76629p2aZdREXMmzev/PXr1IiW1YUIXo+m0uDr8Zf62yDu6FcDcyqVLy3333695DWhmU7HO3j9/sdJ8tX/xtmyPNqto1SvXME+jvcfoyZMlU+/STnUqlqpnHS/6+Y03ZXjEbze9uCzcsiEepUrlJEn7rk1yf7p8Zw840+57Pwm0qj+yUleO95PUgpexz9zgWTPmkX2Hjgk2l1y6LRz7wFp2Wt86Oy4fH5q5UCL15/+TpwWr1880FRKF84tvb+bI19OXe7LleDVFxsrIYAAAggggAACCCCAAAIIIIAAAggggAACCKSTQKqD123bt8uUqdODxf3XJBpd777PPq9Tu7Z07Xxb8LWcOXJI82bnBJ8f6wHBa1IhDcKef3OwLFm+2r5QqnhROe+chlKzagXb9esnw0fJ9h277GtnnFpHOt8Q6AaW4DWpY6TPvMFrs0anS9WK5Y5atUihAnJi9cpHzY/ljHgLXmO5b7HeViTB69WvTJA1W/bE+q3Z3jEECF6PAcTLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgkvkOrgNVRAg9eqJwZawV1wXgsZ0LdP6CKybv0G+eTTz+WfefNl1eo1UqN6ValT+0Rp17aN5M6VK7i8N3j9eOi7MvTDT2T6zF9NS8480vScs+XcpudI0aJFgsun1NXwpk2bZcLESfLLlKmyZOlyaXhGfWnYoL7tjjiraQWXCJM3QM2XN4+8+Nidksd0L+wmbQ37QM/XZc+evaL71Pe5hyWn6dbXu552NXzgwAGZ8ttfsnrtBtEWm03OqCd1alZ1m7G/9+0/ICN/miwLzfiwa9dvlEIF80tlMzbseU3OkBLF/jPX7o5/mvyrXefqy1rIr7Pmyu9/zxMty5P33iaDPvpalq1cY7oa3maX0RaUhQsWkOYmyAx9zyQFiIMn3uC1281t5dS6NZMt1fQ/ZltnXeDSFo1NS9GywWU3m26CP/56pH1e1zhriKvTPwuXyqTpf8gK063v3n37pWLZUtak6Vmn2dfdj3DB61cjfrLHT8f1vb71xW5R25Lzrfc+s89r16gi5zauH3wtkmP6i2nJqsfvt7/+setlz55NTqpVzZ5nN197hZ03xHRzbccVNuW94oKkN1L89c8i+ducE3PnL5Ys5hysY8pwSu0aUqNK0lbO737yjT1HdLzh+iefKKN/nipz5i+RXLlySN0aVaXlRc0kW7bkuwUO7lTIg1gErw2rF5PWZ1a03eH2+OzPJO9wY7MqUrd8YZk8b70Mn75CTqlUWNo3qSLTFmyQsWa81XsvO1Gqlc4va7fslR9+XSnjZ6+Tw2Ga1+o4pVeeUV50rNal63fJqD9Wy6S565Ms2+asitKgWjH58OfFUtUsf8EppaWw6Wb3xjcm2XFOOzavJr8u3iSf/rLUltG7fNmieeTS08pKwTw57DJv/PCP3faVDcrLpaeXlby5ssmcFdtk8LiFYUPoaMtXLH9Oudi8X3nzvqs275F3xy6QOSsDn/m6FQrJjU2ryhnVikr2bFlk2YZdsnLTbvl57jr5fubKoO9l9ctJ8zolpWyRPLJ++175Y8kWeW/8Ijl8+L/mybR4DXLxAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCAOBdI9eJ08dZrc3vVu2bUr0DLTa6JjuQ4Z1F8KFypkZ7vgtVrVqpIzZ0473qt3+cqVKtmxX/Pnz2dnJxe8rt+wwXSHfJ2sXbvWu7p93PGG9vJk9/gap/KoQh6Z8fHXo+THidPss3s7tTeBWNKwVF8YP+VX+cF0BauTBmXaGtMbvGoYN8eEYqHTvbddJyedWM3O3rh5qzzz2juyc9fu0MVsoPtQlw6mu+Dy9rVR46fIp98Gxu3VUNW1xtUX3+j5gNz1xCtHbUNnXHvlBXJB0zPDvhYvM6MJXucuWCIv9//AFr3pmadJh2suC+7GaNNl8bAjXRbf0u5KadzgFPn8+x9lxLjJwWW8D+qZgLfbTW3lhBMCc8MFr0+8NEBWrV0vGsCrs5s0pLr1gZ72af1TakuXDlfbx5Ee06GffS8Tpv7mNhf8rQHswF6P2eddHn3RBsXapbWG627S7qS1G+Jwk4bD5zZuEHzJdWVc3Iwfu3P3HnuzQPBF86BKxbLy+N23eGdF9DgWwWvBPNnlm0ea226Jv5y6zHSNO9e+twaIA28/04bbnQZMscHiTedWk1tbVLNBYqlCuSVb1iMH7Uhp56/eLje9lfQ4dzq/unRodvRnN3TZQZ3PlNrlC9ltlzOBppsu7PmjCVXLyV2X1pIFa7ZLx76B7bvlF63bIVVL5neL299LN+yU1SYQbVSzeJL52u3ydX0myrqte4Pzoy3fwjU7bNgc3MCRB09/+qeMmbVG2p1dSe68uFboyzJj4Sa5Z8gMO3/onY3DbkPLdYMJmt2YsASvRzEyAwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCOBNI9eL3kyta2pauO93p31ztsi9Xv/zdSfprws2V5/pmn5dprAl3kuuDVeV1y0YVSvnw5+d/IUbJiRaCllM7r2ycQ7oULXvft2ydXt7sxGNre062rVKtaRfQ9R44OBIZPPf6odLj+Ovc2cfu7Z593gsFm/xceMWF0jojK6g1edQUNzKpVKi/aStN1TVyudAl55sHOdnvaGlFbPuqkY3hWMyGrts7U7ehUu0ZleaDzDfaxN3jVGRoE1qpaUTQ17NT+Khk1YYr8NXehzF8cGNPxbNO6tmTxInLyidWlfJmSdhvx+sMbvGprXm29GTppcKitjrVRY9fHAoFkaBj63BuDZZFpOaxT/xcfld2mRfL9PV6zzwvkzysXNW8kBw8eNKH69ODxePr+TlLBtCjVKRbBa6THVI/zomUr5csfxtn31n25sNmZkst0E97CtHbWKVzwOt60en7/ix/s67mNR/OzTpcDhw7J+MkzTQvrg3a+t9WwC171hVzmPG5kwuhVa9bLvEXL7LL647G7brLnaXBGBA8iCV41aFy1+eibCg4flmCL0/NPLi1Ptz3FHtd2ZvkVG3fJ94+dK4Xz5pCvpy2XV76dY0vjgld9csB0BT50/GKZNn+DnGfWv6ZRJRuea8vYl78JfHbcdnV5nf/Db6ukXsXCcvsFNWxoO/L31dLzi1n6srggVR8vXLvDjou6wbQEnTZ/o2jr1uSCV11+nGl9++HExXJWjeImGK4eDPGnmnUH/bhAKhTLI91bn2zf0xsu+y2ftrwdMm6RnFi2oFzftIppaZtdtu0+IJc8N1ZyZc8qZYrklj43NZCipmWstuDVFr6bduw3y+yXqxpWkAeuqG3t+42cJzMXbZYmJ5YQbV2sY/Jqq9x3xy7U3RKCV8vADwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE4FUjX4FXDJQ08daperZrpXjjQCmr79h1S74xGdn7LKy6T3i+9YB97g9fujzwkt3QMhH1bt22TS668OtiCde6fM22L2HDB66+//yFt2gXWe+6Zp6TdNYEWgIdMKHT9TbfJtOkzRFvaDv/8E/ue8fzDhVUabL313EMRF9UbvGrXwo91u9kGMdo68vaHnxMdO1a7Jn77pcft/Bl/zJEdpkVygXz5pP4pJwbf55Hn+8r6jZvtsoNeftzO9wavFcqVku533Wy6E80WXEcfaCtIbQ2p06PdOprWshXs43j/4Q1ekyvrTaZVsXbVrNOHX42QcZMCLfiee6SLlC5RTLR73zseCZzPp9SpIXffcq1o69NZcxfYdbQb3qKFC9rH2sXvm4M/tY/bXnG+CTzPso9jEbxGc0z1Td25pq2Yn7jnVlsO9yNc8PrkywPsOMO6zLMP3yFlSgZaVs5fvExe7DvUrnpK7epy963t7GO3fX3irPTxR1+NlLGTAmNGX9/6kiRdJevrx5oiCV6T28bMRZvk7sGB46fLaFDYwHSPq93iTjTdAGvLTQ0+W/YaH9yEN3i9772ZpsvhjcHXbjABZGcTqO7df0ha9Bhj5w+7t4mUL5ZXPp+yTPp8H2hJqy80qFpU+tzcQPYfPCzNnxptl3XBq75/296BG1PsC+ZHWxPqJhe8aovXG9/4xS0q/Ts1lJNNuBta9gevrCMtTXfH81Ztl5v7BVrN+infahNit3n1v/JpaPri9afZ0PrsxwN/77UwyY3xqmXTLpv/XLpFZi3bEiz3k21OlgvrlTFdIm+V2wZMtfMJXoM8PEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCIQ4F0DV69+79lyxaZt2ChbN26zXRpu0seejQQ5DU347a+O/Atu6g3eF0w+w8T+GUNbmLgO0Ok1yu97fMfhn8hJ9aqKeGC1/fNuLBPP/u8Xe7uO7tIiRKBQEhn9O7TVzZt3mRfm/PHTDO+ZE77OF5/uLAqNcFr+1YXS4uzGwR38dWBH4mO06pTXxPmeseM3WfGHV2yYrVs3bbDjun50ZFxSnXZwb2f1F/iDV5vv6GVNDy1rp3v/ZGRg1ftzllb8eq0fNVaefrVt+3j1peea8Z6PVt+M2Fq3yNhqrfFpy6k4yHrOhs2bbWtYNV6gukqWqfLzm8irS5ubh/HIni1GzI/Ijmmuqw71yIJXr3dG2uwr+G7d3rQjDusY/x6WwK77WuL4V7duwUX13NRz0mddJzX0DFkgwsm8yCS4HX7ngNyyDNuqNvUWNMt7mueMDSHGY/0f91bSO4cgb872qq5/esT7Rilbh0XvO4w27zo2bFudvD3pGcvsjcztHh6jGi3vj/3vFCyZjnBtoDdumt/cDl98Nx1p9rnuh3dngteP5m0VPqO+CfJsikFrx9PXCJvmZajbnqs1Ul2XFcdQ/bhD//rRlrHe32oZR27P9oKWCc/5ftmxgp5afhs93aSxbR2n9DzAvtbW7xqy1edkgtegyuaB5VK5JPShXPblsWXmzFfNZRdblobt3stUD6CV68WjxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTiTSDdg9e//p4t9zzwiCxZujSsRbjgVcd4Hf3D8CTLj/5xnHS+8247r2+fV+WSiy4IG7w+3P1J+fzLr5OsG+7JiG+/kpo1qod7KW7mPfPaIFm6Yo0tj9+uhrt2vEZOP/m/8RaHfPqdTJz2u92mC163mKD1zcHDgu8VDiBc8Nr97pulasVyRy2eEYLXLh3ayGkn1Txq3044IUuwG1d98b6nX5OtpgW3tv59+r5O8vZHX8vUX/8SHSNVj1mWLFnsNnR8169GjLOtjY/aqJkR6+A1mmOq5XHBaCTB65p1G6V7r352N5o0PFVuanu5fex+eMP9fsZAuxZ22w8Nalea7oa19axOaRW8Xv3KBFmzZY8rXoq/z6xRTF7tUN8uExow6kwXvHrHWvVucPST50nenNnk/qEzZe7KbTbI9b4e7vFjH/8uE2avCwavA8fMl/dNF8beKaXgNXR5F7z+9PdaefyTP4KbucwEm49eVTcYvGr3wBo0H2uKpHw/9bhANLiOJHjVoPbZ6+pJ45oljhojV8tC8HqsI8LrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAvEikK7B69p166VR00DFvo7xqmFprZo1pGCBAvLAI92tSbjgVZf969dAV5MO7pPPvpDuT/awT4d98J6c0eD0sMHra2+8JW/2CwQ5On6st1Xrlq1b7XtrGNa82Tn2sdt+PP7+2LQ41XFAdbr/9vZSp2bVo4qpY3ROMUGfTtqyVccJ9XY1HEnwes9TrwbHGq1SsawZs7WS5DdjkX76TaALVN12ZgteQ1urqkG4afjI8fLt6EC3q289/7Ady3WvaTmsY5je2u5Ku8rE6X/IkGHf2sfaevnUujWlZLEitlXx6AmB8zzS4FVDTA0z3bTfdG3c+UjXxvVPqS1dOgS61o7mmOq2XDAaSfCq+6fdD+vk7U7YzjA/nnploKxYvS5Jd9Zu+/EevD7brp40r1vK7oqOR3rZ8z8Fx4HVmS541SBXA93QabwJILObALL965NsgDjRtHjV6YMJSYNUnZc/d3bb0vXr6ctl3da96R68ZjEtcWNVvmiC194d60vD6sWs69L1u2x3w5t37pOTKhS2XT0TvOrZwYQAAggggAACCCCAAAIIIIAAAggggAACCCCQCALpGrx+NfzbYMA6+O3+0uycs63R7t27pe5pDe3jcMGrvqBjsOpYrG667Y5uMvan8fbpr1MnSuFChcIGrz+OGy+dugS6Mv3y04/k1FNOdpuQ/fv3my5e95gWiyfEfeiqhZ72+98y8IOvbPkLmCC012PdzNi2OYL7c/jwYXn0hbdM17Vb7Lw+Pe4XXS6a4HXb9p3Jtl50Y7zqxv0Gr/fedp2cdGK1YJnj+YF3jNdIg1cdv/WhZ9+wu6VjtGpXzDo9cmdHqVGlgn382qCP5a+5C+3jPs+YY5Qvr338x+z58sa7w+zjYwWvPfu8I0uWr7bLvvzE3cFxYhcsWS4vvPmene+C15RapIY7prqyC0ZLlywmzz3cxW7P/Qg3xqtr6atB8uvmvMuWLatdfNfuPaKhr44j7A1x3fbjOXh1rV0PmLJr179F8uWUEb+tkme/DNzYoDvogteDh/6V5k+PFu122U3FC+SS4Q83s4Fik8dH2dkjH29hA9bbB06Vv5dvdYuG/e26Gg5twaoLp0WLV91urMqXUvD6+g9z5bPJy/Tt7DT26fMlV/astrtibVXsJu0GWbtDJnh1IvxGAAEEEEAAAQQ0ZFqvAABAAElEQVQQQAABBBBAAAEEEEAAAQQQiHeBdA1ev/thhNx9/0PWpFuXztLl9ltlz9690vP5XvL1N9/Z+ckFr6VKlZKeT3WXUiVLyTfffS/vDBlqly9fvpxMGDPCPg43xuu69RvkrHPOta9XrlRJBvTtI9WrVZVly1fYssz662/RFrW/T5tkwqJsdrl4/XHw0CHp+do7tvWglrFc6RJyQdMzpVa1SrJm/SYZ9s0o0ZBNJ20Nq61idYomeN1pgrJHnnvTrle+TEm5v/P1kjNHDhn502T5ZtR/LfqiCV7Hmla6bnzY2jWqSLuWF0qxIoXMdrPb94nXH97g9bwmZ0i1yuWPKmrhgvmleuVAoOpefOKlAbJq7Xr3VELH5O075DP57a9/7Oudb2gtDerVNstvkN5mbFPtplinYwWvH345Qsb9MsMuq8e61cXN5KAJCAd88KVs2brdznfB63oTxEdzTHXl2x9+Xg4cOGi3c+fNbaVqhbLm5oR89nm44NV1qawLnHZSLbn2ygtk/4EDol1ZL1q60q536XlnS+tLAp/FeA9etZvcEabb3VxmfNfe382Rv1dslcFdGtn9uOPtabZVpj5xwas+nrFok9wzOHBM9Lkb01Rbr7Z6ebzOkjdvOUNOq1LEjHu6X1q9NMGO+6rzdSzTN24OjL2sY7zqeLDHI3iNVfnCBa/D7m0i5YvlldCxZl3wquPS6vi0Ouly793ZyAayBK+WhB8IIIAAAggggAACCCCAAAIIIIAAAggggAACCSCQrsHrho0bpeHZzVNkCRe8ajC6a9eusOt988UwOaluHftauOBVX/COBxtuI6/2el6uujLpuJThlouHebv27JUnevUPBnThylSoQH55pFtHKVG0sH05muA1j2mxeP8zfYLhXbjt67xogtelK1bLMyYw9k5tTTB3oQmN43nyBq/JlTO0xaYuN+bnafLJ8EALR31+vtnPdmZ/3eQ9Hm5e6O9jBa9zFyyRl/t/ELqaHUvWBaYueNWFojmmunyvt4bKvEXL9KGddIzagb0es4/DBa/7THfDT736tqzfuPnIGkl/qdMjXTva7ob1leMZvO7ed1AOeVqmupLu3Hsw2F1wHxOCNqhaNElry57X1pNzTyolO/cekEufH2eC7n+DwatuL6vpqnfv/kOybtteKVMkt2TPmsW2dr2p72RZuDYQqOfLlU2+fLCp5MuV3a6/cvMuyWa6Oi9XNI8txtT5G8x4sL/ax8cjeI1V+cIFr8+3P1Wa1i5p923Tjn3y9fQVMmTcQnndWNc31v+axsIrN+82x+awVDDB6wn63wmM8erOT34jgAACCCCAAAIIIIAAAggggAACCCCAAAIIxL9Amgavl1x0ofTt80oShT9n/SX3PfSYLFm61M4vWqSo9Hq+h/R47kVZsWKltGjeTAb1D7S4vLxVW5k9Z47UO+UUeeCebvLok0/bZXTFhmc0kFtvutEubzdkfniD1w8GD5LGjf4L9kaMGiMffvKpTJk6zS0u1apWlYfuv0fOO7dZcF4iPNi0ZZsM+uhrmb94eZLiZjVBzym1a8ht17VM0gXx9D9my4D3v7TLauvF08yYom7SFokTp/1un+pYoTpmqLa6fO3tj4Mta/XFC5udKZs2b5OZs+baZV3wqmOSDjsy9usT99xiupMta18P/aGtM3Xs0+07AgF6+6sukhamFWk8T6EBariyhgted+7aLXc98d95/8yDnW3rZO/6Gup+9b9xwValxU1IrmF038Gf2sWuuOAcaXlRM/vYhaa6TK/u3YKbGTtphnz23ZjgNrRl7Z0d2wQD2Qb16sgdN7a2y0dzTHUFd44tWrbSdhMcLnj1dh2s6+zctUfe++w7mTN/sei4rzrly5tH6taqKh3aXJakhbNrUVvdtCJ+tNtNdln9oS1/n3ipv31+lWnFe/n55wRfi+RB4+4jk11s/DNmvFXzGUlu0iC16ZOjpFHN4vLyjafbIPCa3j/LahMG6qRjtY480gp29J+rpcdns4LB62zTInbt1j1ybt3SNizU5TVcfP2Hf2TsX2v0aXAqXTi3vNLhdClfNK8Na/UFDW5H/r5anv/qr+Byb3c+U+qULyQDRs8/akzYNmdVlHsuO1Hmr94uN7012a6T3PKPXlVXLqtfTsb9vVae+OSP4PYvO72cPNqqrizdsFPa95kUnB+L8rngVVvvajfNOhXMk1363tpQqpQMtJyesdC0EB4yw7r2u/UMqW321U1qp90O33xutSTl+/aR5lI0f07pMmia/Lk00KW6W8f7+5fnLvI+5TECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAukiEPPgNdJSb9u+3Y6xWqxoUTvGaqTrbdq02YaK+fIFKu8jXc8tt2/fPlm1eo0UL1ZM8uf3tw23reP9W8djXbN+o+zavVfKliouJYurZexKtW//Admxc5cULlgg2FIxtVvXsT7/Nf9lyxoYAzS120v09bVbYO3iOn++QItHP/uj48oeNs0Fi5vum3W84pQmP8f04MFD9vgfa9ve99XgNotpyaldMafnlFLwmhblcF0N63itOm5rtqwnSK2yBWXZhl3BwDGl961RpoBoC9yVmwLhbkrLHo/X0qJ82o2ztqzduvtAkjFx1a5GmYKywbQY3rB9b6p2l+A1VXysjAACCCCAAAIIIIAAAggggAACCCCAAAIIIOBT4LgFrz7Ly2oIIIBAsgLHO3hNtmC8kK4CBK/pys2bIYAAAggggAACCCCAAAIIIIAAAggggAACCBwRIHjlVEAAgQwjQPCaYQ5lqnaE4DVVfKyMAAIIIIAAAggggAACCCCAAAIIIIAAAggg4FOA4NUnHKshgED8Ceh4pzruaXpNtcsVlFYNK8jMxZvsGK3p9b68T/IC2j3ykK6Nkl+AVxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCMBgtc0gmWzCCCQ/gI//LZKnv/yr/R/Y94xbgQea32SXHpa2bgpDwVBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyDwCBK+Z51izpwhkCoH7hs6UafM3Zop9ZSeTCjSsUUx6d6ifdCbPEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIJwGC13SC5m0QQCB9BFZt3i2vfjeH8DV9uOPmXTR0vf/y2lK2SJ64KRMFQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMhcAgSvmet4s7cIZBoB7Xb4iynL0nXM10yDG0c7qmO6Xn1WRboXjqNjQlEQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHMKkDwmlmPPPuNAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIxEyB4jRklG0IAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgcwqQPCaWY88+40AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAjETIHiNGSUbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBzCqQ6uB18Kh/MozdzRfWyjD7wo4ggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED6CaQ6eD373uHpV9o0fqdJr7VM43dg8wgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkBEFCF49R5Xg1YPBQwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQiFiA4NVDRfDqweAhAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghELEDw6qEiePVg8BABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBCIWSHXw+seCVRG/GQsigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACGVEg1cHrzt17M6IL+4QAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghELEDwGjEVCyKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhBQhew7swFwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIhYgOA1YioWRAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMILELyGd2EuAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggELEAwWvEVCyIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIhBcgeA3vwlwEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgYgGC14ipWBABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIL0DwGt6FuQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDEAgSvEVOxIAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBBegOA1vAtzEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgYgFCF4jpmJBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAILwAwWt4F+YigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACEQsQvEZMxYIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAeAGC1/AuzEUAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQiFiB4jZiKBRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHwAgSv4V2YiwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCEQsQPAaMRULIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAuEFCF7DuzAXAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQiFiA4DViKhZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgsQvIZ3YS4CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQsQDBa8RULIgAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiEFyB4De/CXAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBiAYLXiKlYEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgvQPAa3oW5CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQMQCBK8RU7EgAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEF6A4DW8C3MRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBiAUIXiOmYkEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgvADBa3gX5iKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIRCxC8RkzFgggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEB4AYLX8C7MRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBCIWIHiNmIoFEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgfACBK/hXZiLAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIRCxA8BoxFQsigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QUIXsO7MBcBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCIWIDgNWIqFkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTCCxC8hndhLgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBCxAMFrxFQsiAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQXIHgN78JcBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIGIBgteIqVgQAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCC9A8BrehbkIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAxAIErxFTsSACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQXoDgNbwLcxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGIBQheI6ZiQQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC8AMFreBfmIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAhELELxGTMWCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQHgBgtfwLsxFAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEIhYgeI2YigURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB8AKpDl5Xb9oWfsvMRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBDKJAMFrJjnQ7CYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCKSdQKqD152796Zd6dgyAggggAACCCCAQEwEdh2IyWbYCAII+BTIm93niqyGAAIIIIAAAggggAACCCCAAAIJI0DwmjCHioIigAACCCCAAAL+BQhe/duxJgKxECB4jYUi20AAAQQQQAABBBBAAAEEEEAgvgUIXuP7+FA6BBBAAAEEEEAgJgIErzFhZCMI+BYgePVNx4oIIIAAAggggAACCCCAAAIIJIwAwWvCHCoKigACCCCAAAII+BcgePVvx5oIxEKA4DUWimwDAQQQQAABBBBAAAEEEEAAgfgWIHiN7+ND6RBAAAEEEEAAgZgIELzGhJGNIOBbgODVNx0rIoAAAggggAACCCCAAAIIIJAwAgSvCXOoKCgCCCCAAAIIIOBfgODVvx1rIhALAYLXWCiyDQQQQAABBBBAAAEEEEAAAQTiW4DgNb6PD6VDAAEEEEAAAQRiIkDwGhNGNoKAbwGCV990rIgAAggggAACCCCAAAIIIIBAwggQvCbMoaKgCCCAAAIIIICAfwGCV/92rIlALAQIXmOhyDYQQAABBBBAAAEEEEAAAQQQiG8Bgtf4Pj6UDgEEEEAAAQQQiIkAwWtMGNkIAr4FCF5907EiAggggAACCCCAAAIIIIAAAgkjQPCaMIeKgiKAAAIIIIAAAv4FCF7927EmArEQIHiNhSLbQAABBBBAAAEEEEAAAQQQQCC+BQhe4/v4UDoEEEAAAQQQQCAmAgSvMWFkIwj4FiB49U3HiggggAACCCCAAAIIIIAAAggkjMBxCV737Dsg23ftk9379suhw//6xsqa5QTJkzOHFMibU3LnzO57O6yIAAIIIIAAAghkdAGC14x+hNm/eBcgeI33I0T5EEAAAQQQQAABBBBAAAEEEEi9QLoHr7v27pe1m3bYkufKkU2yZsniey8OHT4se/cftOuXKppf8ubK4XtbrIgAAggggAACCGRkAYLXjHx02bdEECB4TYSjRBkRQAABBBBAAAEEEEAAAQQQSJ1Augav//77ryxZvVmyZM0iZYsVkOzZsqau9Gbtg4cOy6oN2+SQ+V25TBE54YQTUr1NNoAAAggggAACCGQ0AYLXjHZE2Z9EEyB4TbQjRnkRQAABBBBAAAEEEEAAAQQQiF4gXYPX3XsPyOpN26V4obxSMG+u6EubzBrabfH6rTulTNECkidX/HY5vHDJiuAeVKtcPviYBwgggAACCCCAQFoLELymtTDbP54CYyf8Ij+O/8VXEc5r1lhaNG3sa91oViJ4jUaLZRFAAAEEEMj4AqZ9imlAkvH3kz1EAAEEEEAgswmka/C6Zcce2bR9t5QrXlC0m+FYTfsOHJQV67dJ0QJ5pHD+3LHabEy2M2LcL6L/JzddfG5j0f+ZEEAAAQQQQACBtBRIy+D1sKk1WrN2neTInkOKFTM9kKTljsTRtkf+OEH27N0rZ5x+ipQtXSqOSpb5ikLwmvmOOXuMAAIIIIBAIgvMX7RMBg/7RmrXqCI3trkskXclTct+4OBBWbNuo+3hsHSJYpItBr0npmmB2TgCKQjs3LVbvv9xol3iqouaS86c0Q2buGL1Wvllxp+m4VkuueLCpim8Ey8hgMDxFkjX4HWzCV03m/C1vAlec8YweN1/4JAsX79VipjQtYgJX+NhCg1cq1eukKRYC5YsT/KcADYJR7o8mTDlV/nyh7FJ3ku7qi5ZvKhUKFtKzmvSUEqVKJrk9fR64spWvGhhebhrR8mRI2lL7m3bd8oTL/Wzxen5cBcpmD9fehWN90EAAQQQSFCBtAhelyxbIT9Pni5Ll6+UvXv3WZlcOXNKrRpV5arLLjDfX9H9QzLWtJs2b5FNm7dK7ty5pHzZ0rHevLz4Wn/Ztn2HXNfmSjmpds2Yb58NRieweOl/vctEt6ZIlUpp3xtNLFu8umtF735qxU2lcmWkSsWyUqdmVXs96309mse79+yVZSvX2FVOrF45mlXTdNl4LVea7jQbRwABBBA4SiCtvwePesNUzNDvU/3+KlakkGgdj5t++mWGfD3iJylcsID0eLCzm83vIwLbd+6S9z/7Xhaa67vDhw/buVmyZJGaVSvaoDpvnvhqeMOBi38B79+Ndi0vkrPqnxy20PPMTRFvDfnUvta8cX256uJzwy7nZ+ZG82/TZ3q/bVd9/tE7JV/e6HKM3/+eJ0PMDRuFCuaXZx68w08RWAcBBNJJgOA1DaC9oasGrhqqJte1sHdZwtc0OBgpbNJ94WrY6i7Y9h84IPv3H7Br6Zdf147XmBYsJVLYStq85MqmW298Rj1pe8UFSd6I4DUJB08QQAABBCIQiHXwumHjZun37gfBwLW4ael68OAh2bJ1my1NmdIlpeN1rSV/vuN3c5BrBVm5Ynnp1LFdBErRLULwGp1XZl86rYNXr2+2rFml841XSw1TOeln0iFS3nj3E7vq6z0fiptuEOO1XH6MWQcBBBBAwL+At84k3FZS+z0Ybpt+57357jDRxhcXNW8kl7Q4O7iZA6b3vpmz5kil8mVEW3Iy/SewfcdOeWXAB7J12w7bIlAbR/x7+F9ZtmqNqFuRQgXl4Ts7Su5cOf9biUcIHEPA+3ejcoWycm+n9mHXeP/z72Xmn3PsawSvYYmYiQACEQgQvEaAFM0i3iD1rlvaJQlckxvj1VuBQPgajXbqlnVfuHrX4ZP3dbIbO3TokMyev1g+/Wa07DB31510YnW5rf1VqXsjH2u7srlVb7+htW254J4TvDoJfiOAAAIIRCoQy+BVu9d9a9AHpjXpFilheopo36al/a1lWbx0uXz02Tfmzv49cmaDU+XKS86PtIgxX47gNeakbDAVAmkRvOp17BP3Bq5j9aaH+YuXy4+Tpsn6DZsll2kBe9et7aScuQki2sn77xOC12j1WB4BBBBAIK0FXJ1JWn0PxrL8yQWvsXyPjLatSdP/kM++HS0FC+ST+26/3rYK1n3U1oJ9Bn0sGsxed9XFcubpJ2W0XWd/0lDA/d1wb/H4PbdKCXPzsHfaY3px6v7iW+aG4oN2NsGrV4fHCCAQjQDBazRax1jWha6hrVy14kJf83YvHLqMbtpdjBG+HgM6Ri+7L1xv8Oo2/d3on2XMz1NtS9jnH+1m7/L/ZcYfMmXmLNG7olpf2sItarth+/y7MWZcu+y2cktf0K4fxk6cZrstvtTc0Th85HjbPYpWgNU/pbZc2KyRZM2aJbiN0AeubG5+gfx55dFuNwdb5qYUvOp7/2IuUtes3yDaDUvZUiXk3MYNkrR48JZP77ocPvInWbR0pRQuVEBaXthMalarJBOn/S4Tpv4qO3fulormDsxWpmuNksWTXpDMXbDE3gW2wFTyaTeONapUkCYNTz3qwsXtB78RQAABBI6fQCyD15m//yVffjvCjLGUTbrcer2ULpm0dwj3ep7cueWx+7uY77ys8tecefLzL9OkeLGi0qRRAxk9bqIsX7la2ra6zHxHBboynb9wifzx9xxZZFoF5Dbj1lQ1PYdoeFu8aNLvnzXr1suP43+Rtes2yB7TdVupksVtV79nnXFaEPitdz6Q7aYbYK2Y0S6P9R/VJUsUl6uvvDi4TKTvp0HzL1Nnyj8LFssuMy5PjWqV5cIW58gbA96jq+GgJg+OJZBWwau7gdC9/+at26X3QHP+79gldWtVk07Xt3Ivya7de+S7MT/LkuWrbet07fKwqulm+fLzz5Hs2bPZ5bSic775DGp4q1N508pEx23WVgH6WdYpkutNXW7Tlm32/VasWis7zXtri57TTqol55z532dVl9MuGPX6V7t2W7dhk73erlOzijRuUE9ftlMk5XLL8hsBBBBAIGMLuDqTcPU5yX0P+q3TOffsBvL9mImydMVq28Wt64L/WN+FK9esk2HDR8l600vM3n37pYAZIqqQCRLPOLWu/R6cNXeBjB4/xXZB3LHtFUkOmJZV61sWL1tluxStXrm8/U70hkTeeh0/9U5J3jDOnujYt3+Yui2tPwsd//bvfxaaY7HG9k53at3/hvpQY+2+WetgV61db1+vZq5xtD7MO47msG9GycrV68wxON0cizrBPdc6sGm//WWui8oFu5b9dvQE0bF4tRwaAo+f/KusM8fz6ftvtze4/fvvv/Lz1N/s9cuS5atMTz95zbAPpeXS85scNSQY9WdB6uP2wP3dcAU4/5wz5fILznFP7W/97GljHDeFBq+Rnme6/tr1m2Ti9N9lrmngo2MT62f/ZNPA59k+79jNh3Y1HMk5op/7cF0NR3rN7faL3wggkPYCBK8xNL7r8Zfs1t549qEkW3WBqs7UwNUFsPq42y3XBpf13lke2lo2uBAPYibgvnDDXajrBZeGqRqUvtj9Lhtg/m/sJBn502SpXaOK7brNFeSfhUul33uf2WVfeuIeO9utr9vObgJZvdDW1rRuuvS8JiZ8Pcs9Peq3K1spUzmlLW+1kuzk2jXk1uta2mWTC17H/DxNvjMXhjppf/9aiaVdJ2sAq5VuWnadXPm0sk0rzTds2hK8m0vHk21uKsRHjZ9sX3N3eRUqkF/0bjA33uzseYtk0Edf27E29L32mffRym99rHck6vJMCCCAAALxIxDL4PXbET/KlOm/Sc3qVUx3wlcftZMHtQeJufPtfA0pNUSdMuN3+fZ/Y6So+W484YQssnFTINS5vu1VUqdWdRNqLpIPhgW+Vwqa7xD9/tLAUx/fccv19rduUMeVffeDz+z3qt70k9V8x+00YahOzZucJRec28Q+7tHrddsVmX7/6vegft9pgHTrjYFrr0jf71+ztaEffyHzTOiqkw5RoJUslU1FzgYTEOl7M8arpTmuP1zr5kgK8cJTSa/VH+0RuIa/rUO7NB3rNb2CVzVw15Jayfvsw10si14XPv/GuzaQ1c9DAVM5uPlI1+BVKpaTe267zi438IMvTcvZZfbzozNcZeUL5mZErTSK9HpTW6W80v99ez1aumQxyWtuxNBKa/370KxRfWl1SWC8LO2ivN/Qz2xFqXYNWaRwQXttqp8z7w2pxyqXLTw/EEAAAQQyhYD7ngtXn6MA7nXv96DfOh29jtQ6HZ1uve4qUzdTPaLvQg3i+g393H6f6hil+h2X1XyPNjffgdrl8OSZf9pgtrS5gfDRbjfZ7esPDft+NHU7OumwWPr9rd+JGvzd2+l6081uAfuaq9fxW+9kNxKnP74ZNd40Zphux7+8o0MbKV8m5d47DpjWif2Ntdar6pQ/Xx5Tlxb490GNKhVtHZ5ew+j0+jsf24YHV5pGBy2anGHn6Q+9MW3MhKlJblpzAXB108hAr2G0m2OdtJ4wj/l3yBff/2iDV53nPVZaH6b1vW5MX+rPVOj4T+7vwmknn2gDdW0Q0+OBO8y/FfUWw8DUe+CHssIE89qaWhu1eIPXaM4zrcd96a2hsmXbdrth92/IU+vWMjcw/mPneYPXSM+RcMFrpNfcR3aRXwggkE4CBK8xgnatXb2VA7ppb5jqDWRdSBsasLrthIayMSomm/EIuC/c0At1reh9Y/Aw0wpnTZKuhv1cpOvbnXv2GeaiurEdm+KT4SPtBZ5WKOkdcslNrmw61od2n/Jy/6H2Aq99q0uk4Wl1ZatpwfPkS/3t6j0f6mIvwA+b8S50LK6du3fbVqvawkErsoZ8+o38NXeh6IVFx2sut+u4C3R9csWFTe0dgOtMq4bXzfq7zcWBtnjodH1r04K1ovxjWh8NeP8Le6GvF456bmoY3MMMBq9W7VtdbMp0kglg/5VPvx1lWwXrRWm3m/+7qcC+KT8QQAABBI6rQCyD1wGDP5JlK1ZJk7MayCUXNI9ov1zwqgtrGHtB8yZSuHAhG+Ts379fXn4j8L2iLVJPr2e+V0wF0/DvR8mM32bZlq8uMP1u5FhZsGiJaeFaS85rHhgnS1vSjvxxguQ3IdNj9wVCJn0fF8aFjvG6c9euiN9v5u+zTOvekbo5ueyiFlL/1JNkq2lR+OnX38sacze9TgSvluG4/nDHOpJCZIbgdfGylbYrPvXoaYLXguazMcOMVaU31pU0rb+1ZU12E74uMZWIr5kKJp283a15/w3j7Wo4muvN0aby8ntTiak3/t1+w9W2B5nlpuXrMHM9rK3Q9VpRK7y055Vxk2ZIGdNLy92ma2Qdr221ac2uXQnuNd296TxtlatTcuWyL/IDAQQQQCDTCLg6k9D6HAcQ7nvQb52OtnDVm+f1vTQ81ZuXIq170fK4xhihY7yGC1617mbQR1/ZG/1uuvYKOaV2TRO87rE3ves+VTCtKR/ofIPdTW+9jp96J2cVj7+1tbBeB2idk04avGo9U82qlcz/Fe1Nld5yf/W/caY16kwb1Op1Q0kzFMqadRvtcdIATANWDVp18hO86nq6jbNOP9lep+TLm0f+mK0tD7+1x0oDea2D22H+jaGtEbVHuaZnnW57y6P+TPXiY3J/NxrUq2ODcj1nOt94dbCRiva68tzr79qbK8qYGyK08Y03eI3mPPvgix9kxh+zbSOdm9u1tC2p9bwY/Mlw2wJeRVzwGs05Ei54jfSaOz6OAqVAIPMIELzG6Fi7INUbruqmXeVAaJDqLrxCg1ddJ7lt6WtMsRNwX7h6F/9J5gJJp32m4le7ONOLO23ZeZu5eKpput3Vyc9Fum77pcfvthdiuo3pv/8tH375P/v8tR73H3WxqMvo5Mqmwau2HnXPc5mKqEe6dpQsppIqNHgNrHn0Tx0QXgeGL2G6CX787lvtAu4CXS8W9YveTXrRqHdenXRiNTO27X/d0j1jQla9g6rtFRdI4zPqiXaJ845p7VrBtBx64I4b3er2TsxHnnvDBrcvPX5Pit0pB1fiAQIIIIBAugjEMnjt+dKbthJIx2/VroAjmVzwqpVVTz7UzfYI4dab888C+eDTr6VcmVLS9bb/vle0xeszvd6wFVxPP2q+V0yLg3CTdif8Qu9+9qXH7u9qu/nSJy6MCw1eo3k/DV01fK1tWuXeYFrnukm7O9auhnUieLUMx/3H4qWBVg7HKkiVIyGeWy4jtnjdYVpid3+hr91Fd+Oc29/Q3z1fG2RbmHZse7npBvhE+7L7N4w+8Qavoeu65+GuN13LEW3J06n9Vba1u1ve+7vXW+/JqjXrTTjbWurUrBp8yVVuafeJF5qhMXSKtlzBjfEAAQQQQCBDCbg6kuSC13Dfg37qdPS69cXu3ezQUscCDPddqOu4+r9Igtcvfxhr63+0VZwGr27SrnN79X3P1iX1MnVM2jubq9fxW+/kth2vvxeZ67ovjIdeI3gnrcc6v+mZtuWwm/+isVltjNpcfr4d/srNd+eJ9nrz4JG6Kz/Bq7dnELftz01r14mmm+HQ7pC1UYOGsvlMa2Xqz5xWfPx254MGry1MI5kX+w4R72fNtbS+zfQYqMNkhAav0Zxnrh5Ve3jRnl7c5Mqgz13wGk0da7jgNdJrblcGfiOAQPoIELzGyNldSMWqxWvodmJUTDbjEfB+2Xlm24faLUmXjtfY8VHda34u0suVLiEPmaDUTXrBqJVLOr361H02oLRPQn64srng1TT6kf7vf266YVwiOkbFDW0uladeHmDXci1e9YkuN8t07ajjgKw1d2pp17/aHYpO2sXJE/feZh+7C3S9a/DBLh3sPP3x8VcjZKoZ06JRg1Pk2isvDM5/qd9QOwbGNSZ4PdsErzq+yegJU+zrpUoUDS6nD3QMA510u8fqDsYuyA8EEEAAgXQRiGXw2s+Mn7pi1Rpp2rihXHRe04jK74LX0qZV2123d0yyjo73+tPEwPdKCXOHundab77PdOrWqYOUKR3oZkzHhtVWr1rBonexa7fFOvaqTo+aFq/arZxOyQWv0byfjhW70uyr7qfur5vMV648/cJr9mYtglenkpi/M2Lw6m3Jql0Nu8/EJnMj3SwzNpr27LLN3LCgLUpXHqnQ1DHUtPJQp5QCzkivN/WasM+gj+yNebpNbX2i46ZpuKs3pZpeu22PLvf36K0v2y6NC5shK9zkrim949SmVC63Hr8RQAABBDK+gKszSS54Dfc96KdOp6yp03nYU6fjZCP9LtTlXX1hJMHra29/ZMZhX2XGnWwq55/z33WndlX8QI/XbHf92mOEtv509Tp+653cvsT7bw2d55gxMheYboTnmaG+tNtlna6+7Dw7Vu7+AwfkoZ6v22GwtOGC1qO5aZFpJfy6aTmrY9S/bIYG0+6G/QSv3iES3La1S1qtb2t5UTPb052bH/qb+rNQkeP33P3d0OD1hqsvlZdNXecac72q18p6M8OTL/e39ao9H7rDhq7e4DWa8+zQ4UP2nNRz9X7TQr2iaanupmXmGvzVAR/Ypy54jeYcCRe8RnLN7d6f3wggkH4CBK8xsvZWAoS2YnUXWfpWWsmQ3Biv+jqtXVUhfSb3hVvYjI/R1YSsOmk3EKPGT7GB6JP3dbLdsrnS+LlIDw023V2Kus1oglddXivHXnhziO0KWC8StKw6ueD10KHD8q7psuJvU5mmYwfoxXf+/HlNWLvUXoDGInh1dw+6uzD1/evVqam/gtMmM1ZY0UIF5YJmZ5oyBCrIgy/yAAEEEEDguAnEMngd/sNomTbzD6lVo6p0aNc67D5pt2g65cqZ0/bw4ILXsua74U4Tonon7T548rRf7ay6pks177TFfK8UNt8rzZucaboiLSnjJ00VDU71H7JFixQ240EWsuO9Ll663K4WSfAazfv16T9E1q3fIJebboYbNTzdWzTp+bJp+WuCX4LXJCwJ9yQjBq+TzJhUn3072gSueU1lUld7TP4xlZWuezNtLaJdqOXOnVP+nB0YjzmS4DWa6019U+0xZdL032XWnAX2sTs5dAiMDm0ut+M4a28pOmkomz9vXreIHX9Wr9NLFC1iKqDPsfO9/+aKpCVucGM8QAABBBDIUAKuPie54DXc92As6nQUMdrvQlcnGEnw6lrUhbaS06zxwZ6BG/5cDxEuePVb75SIJ8TmLdtkgBmLfu36jVK5Qlkz5m37YM9ruj8akmtY7iYd4kDHm9c6Mu2NTlsH+wlemzduIFddnHR4FddjR+tLW9huhd17hv6m/ixU5Pg9d383XPD6s2mxrOP0aohf1AwJN9CcW9ptt4bp7u+F62pYx1p216zHOs8OHjokjz7/pt3RR+68yfwbtnhwp7UL7BfeHGyfu+A1mnMkXPCqGzvWNbfe8MiEAALpK0DwGkNvdzEV2q2wVhDo2K0ucNW31GW0VWu1yoHxinSeW5/WrqqR9pP7wvVeqGsXw0+/OlB2mlYz2iWIdq3rplGmb/8fxk6SUiWKyWN33exmi+tORu+OesncQadTai+AXdlci1f3ZtpdyeBPvnFP7W8XvLqKKL2Q1C6FCxYItPaZ8uss+eTrkTFp8eqCVw19dbwCxnJNcih4ggACCMS1QCyD1xm//SlffTfKdsuvLVGLmWDEO803rVGHfPi57SJYg9A8uXNJSsHr77Nmy2df/5BkLFfv9tzjgwcPyjMvvWFbyd14bSs5sWZgqIBNW7bKK2aMWJ3CBa+VKpaT2zte5zYjkb6frjDsq+/kz7/mSoPTTpZWl18U3Ia3e2OC1yBLQj7IaMGrdq/Ye8CHop8L7bpXK2h1cv/WOPfsBnacM62E1Env7t+6bYckH7w+aCssddlorjd1ee+kn5kJU36TMT9PtbMfvtNUjpoW8K4rNu9Yrt71vI/d++u813v+Vy7vMjxGAAEEEMj4Aq7OxFuf4/Y6ue/BWNTp6Hu476JI6l50eff9G0nw+rGpu5lq6nBCeyHbsm17sNezZx/pKgXy5U11vZOWLV4nbRBxyIRX2hNHCTM2vXf6ceI0+XbUhCR1XO5aosM1l8vp5uYuN03/fbYZ7uuHJENv9R38qcxfvMx2Sax1XG76xIxBP2XmLDtWayfT1axOg814rX/8Pc+M83l08OqOlfYKp73DuWnfvv22BzodD1hDYOrPnMzx/+3+brjgVcPUx3v1k9KmJ78iJnjVmxEfu+sWU+9b9KjgVUsfzXmm29Vr3xtMjzINjvQoo9twdcj62AWv0ZwjyQWvuj03JXfN7V7nNwIIpI8AwWuMnd0FlW42tOWrXpy5yRu46nwXzIaGtm55fsdewH3hhl6oj/tlhgwf8ZNtndP97lvsxZy+u/ty08ePdrtZSpcsJgdMBXD/9z6XhWbsifQIXvW9dYxYHSvWTS54dReUGrg+ff/ttisVvVDtN/RzWbB4eZKL0uSC4WN1NeyC1/UbN8uzfd6xFep33XJtsCuXzVu3y/CRP9mitW91seTMkcMVk98IIIAAAsdZIJbBq96g9Nag901Ys93cwVtCbjAhaKGCBeweatfAH3z6len+d4ucUvdEubb15XZ+SsHrBtNVcO++ge+V2zpcKxXKBboI0+3/b3Tge+XqKy+2d7T36hPoav+eLjebrkuL2W17uw72Bq+/mFa035vWtHly55buD3QNjq0e6fvlMN9jP0+eLiPGjLffq506trNl0+7ePvniW/nbdO+vE8GrZTiuP1y30qGFOK9ZY2nRtHGS2YOGDhMd59XNDxe86jL6euh4sEk2FOWTvNmjXCGFxcNdx2q323pNOnrCVDsuVY4c2UW7I3Tdm/V49W0bxrrrOd28dtn31nuf2XfyBq/eXlq8w0dEc72pw1Ks37hFateoHBw79sCBg/LQs6/bylTXWmDoZ9/Jr7Pmysm1q8st7VoGQ169JtfuFuua8LjhaSfZMiZXLvsiPxBAAAEEMo2An+/BWNTpKHA034W6/Dsff217fvB2na/zJ8/8U4YNH2XqloqbOqabdJZMnmHmfRO4ufEhM3yTCx0/MsNCTTPDQmlPED0e6GyXTa5ex/tdmVJPa3Yjcfpj0EdfyV9zF5priCp2rFtXt7THDJHQz1y3aHet3sDTXUtoy0Lt2jW7GZtXrzle7j/UDod1xql15frWl9i9/fy7MTa01l7hHjDjvubOlVO2bt9hx9DVaynvcUopeP3F9C7yqeldRK+39JpGt6eTq1dzY79Sf2ZZ4uKH+7vhglct1JBh35r63n9s+fSaWc8fnUJbvOq8aM6zAR98IXPmLRatc9Zt5jVj/ur5pd0Ma+tUnVzwGs054v6OFTLDczzz4B12O5Fec9uF+YEAAukmQPCaBtTe8DWl1qvewFWLQeiaBgcjhU26L9zQ4FUvznr0HmjuTNplKolqSce2V9it6IWYho3aKlZbCOgXst51qBeA+iWZXsHrXnP3nA4Ar12s6OSCVy2LVqhpZbBeuFcsW0oWr1hlx+7SfYllV8P6vu5uUb3IrFm1knHILrPNuBs6ruxZ9U+Wdi3/axWkyzMhgAACCBxfgVgGr7on60wXXwMGfyR79+2zO6bjrx4+/K+s37DRfhdp98A3tW8jxY/cpZ5S8KobGPfzZBnz0yRbeVGtSiX7e55+r+zdm6S16at9B9lQN78Zx7W6+f7ZbP7humnzFtmxc5cthzd43aaVKCao1W6JtWvVOifWkJaXBu5Ij/T9dDyffu98aLsb1jcoWaK4bNu+3VTS5LJl0zEyCV4t/XH9EWnw6l3OhbKhwauGrq7r6ts6tItZ+JoWwatek2bPns3a6zWsG/dMxzLTlq61qlUKHhetyNUKXb1204pMvUFv0bJVctj81utLb/CqK/V8bZBsMDdQ6PbLlyllQty2st18ziK93nTdt2mLj3p1a9qKJ71WXGG6/dMbGPVGRp22bd8pvd/+ULaYG/i0Z5nKZmy2Nebvi46bptfXWlml48O6KVy5dH+ZEEAAAQQyj4Crz4nmezAWdToqHE3diy7/66w5JrD5Xh/aIFW7Lm3coF7Y4FW7MX7bhI5zzfelfl9XMzeKaUijdU7ZTJh4R4erbd2hbisjB6/zFi2Tge9/Yce01Wv4CqZ+S0xHHctXrrU91OUyYemdN7UNzDcW3muJ/Pny2MYBS5avtstqS8b7TJfEbrz7P+fMl3c/Hq6E9hpHt73KjHevr6tzpMGrXkcNeP9LmbdoaeBYmR4N7bHaYI6VuS7pdms7e02j70P9mSoc/8n93fAGrzp+8ABzrumkvR5q74c6hQteoznPdNzVVwd+INoCWs+HUubad63pZriEuaZdbcYt1skFr/o40nMkXPAa6TW3vg8TAgiknwDBaxpZawtW/d87abCqLV1dy1dv18MpBbTebfA4dgLuCzc0eNV3cF9aehH/YJcbg2OVzp63SD4xdyRqtw061a1VVc40XQ/qHYzpFbzq+y5ettKMS/GJrVxzwavO1/J9+s1oe7eell27WKlVrbLtWiXWwau+n3b/oi0UdHwNnfTit9lZp8uFzRqZlkFZ7Dx+IIAAAgjEh0Csg1fdKw2ndLzUtes2BHdSK4U0EG19xcX2zl73wrGCV11u3M9T5M+/59rwVp/r+LA6ruq5Tc33SpbA94qGrB+b1qZrzD9YNWQqb1rHXnbhudL/3Q91lSRdDetzDXN1PNpdu3dLxfJlpfPN7XW2nSJ5P11wo3lPbTmr+6vhVqmSJeSaqy6RoR9/aSp6dhC8BjiP+8/FprVnuCm01ao3WNXw9cfxgWt2DVk1mHWhqwtmw23Tz7y0CF695dAuDzW01DHPtIthW0npWUBvHtSeU/7+Z6GtyNQ75fVGua9HjLOtQUKDV62Y/D97ZwEfxfHF8YdL8ODupRCsSHEt7lKcQguFoqVAcS/uXqRAKe5/pEiw4FY0OASCe4BAgCRA/vMmzLJ3uTvukrvI5TefD9nd2dHvpt3J/Pa9t3G7h7CSDf7Yj61mYseOZfV6k7vmTeEdHoflB418zZvIPDbe2GIX5Co9EpbyG4THmRvCwpU/ZuCUXbgI5xh3xvMwNy7VFo4gAAIgAALOT0Dt5+hn+qX3IJcN656O6s/avRcuzx/x/bPmXynQsQhTtXxJql2lrEnhVZYX7+s1Iu4ke6VgsZhFm8ziw/8q5b6V71A1BmcWXnmOd+4/FOuWbfRA93cG73NlTJ+GfmhcW3yUZeiCmEXT9Vv3aGsJtmTNliUDNapZWbNGVex4v3av8KzBH57FFH9jVClXgj4GfaSdwmuItcIrt8XPc7WwoGWhmPcJeXz8sVizelXlOkb1x0fsn+lpRMy5+v+GXnjlD4eHTZxDfm/f0si+XaQFNI/OlPDK+bb8nvH/J/i5s4U2/92a/+tcVL1iSZow+x9uykB45WtrfkdMCa9c19o1N5dFAgEQCB8CEF4dzNmUAKvvEoKrnkbUOecvHOPGiWOwoRyZRs8LPt6s5s2t8EjscjLw/QdKliSxWGiGR4/oAwRAAARAwFYCjhBe1RhYmHzx4iW5iC/S2f0vb2CEJbFI+l68V5Lwe8VMQyzO8AYJuxG2JvFX6THFxpWp9qzpj/vgEAMBAQHi/Z/Qmi5RJhIT0IuvapjZs2Z2mOjKfdhTeFVjDs2R/9tiV2ds2WHNuo29qbCZScyYhv/12LLe5Bha7BWFrU54U9Jc4s0vjk2bVIztS+tYc+My1zbyQQAEQAAEQEARsNeeji3vQu6b38H8EZO1iceZSKw7lXcLa+s5UzkWR+8L8ZU/wmSPGbwXZymxwMVhsFIIt8xWrTlEuK4vtWmpP3WP+3RJGP+LIbewf6aIRe2jtb9nPEteB7NxinKZ/aWZh+V3xNo195fGgPsgAAJhJxCuwuvzV2/pme8bypgqKcWPG+wSK+xTEF8YCcuDO49fkmuShJQ8sXWbb/bo19Y2lKUr19PHeLW1HZQHARAAARAAARAAAVsJOFJ4tXUsKA8CkYGAKfGVx2VvS1c118givKrx4AgCIAACIAACIAACIAACIAACIAACIGB/AuEqvL55F0j3n/lSqqQulDTRZ9dSYZ2Wr987evzCj9K7JqGE8S1/+RTWvlAfBEAABEAABEAABKIiAQivUfGpYcyOJmAsvjpKdOV5QHh19NNE+yAAAiAAAiAAAiAAAiAAAiAAAiAQ8QTCVXhlM/yb932k+7n0KZMIVw7Wu9cwhypAuOm4/8SX2NVUtvQpLLqRMNcG8kEABEAABEAABEDA2QlAeHX2J4z5hZaAEl8dKbry2CC8hvYJoR4IgAAIgAAIgAAIgAAIgAAIgAAIRB0C4Sq8Mha/dwH08NkrSYiFV/bRH9r0QYitAYEfZPW0ronJJX7c0DaFeiAAAiAAAiAAAiDg1AQgvDr148Xkwkjghvcdyp41UxhbsVwdwqtlPrgLAiAAAiAAAiAAAiAAAiAAAiAAAs5AINyFV4b21j+QfP386Y0IkP7hY1CoOcaKGYMSxotLSVziUYJ4cDEcapCoCAIgAAIgAAIg4PQEILw6/SPGBCM5AQivkfwBYXggAAIgAAIgAAIgAAIgAAIgAAIgYAcCESK82mHcaAIEQAAEQAAEQAAEQMAGAhBebYCFoiDgAAIQXh0AFU2CAAiAAAiAAAiAAAiAAAiAAAiAQCQjAOE1kj0QDAcEQAAEQAAEQAAEHEEAwqsjqKJNELCeAIRX61mhJAiAAAiAAAiAAAiAAAiAAAiAAAhEVQIQXqPqk8O4QQAEQAAEQAAEQMAGAhBebYCFoiDgAAIQXh0AFU2CAAiAAAiAAAiAAAiAAAiAAAiAQCQjAOE1kj0QDAcEQAAEQAAEQAAEHEEAwqsjqKJNELCeAIRX61mhJAiAAAiAAAiAAAiAAAiAAAiAAAhEVQIQXqPqk8O4QQAEQAAEQAAEQMAGAhBebYCFoiDgAAIQXh0AFU2CAAiAAAiAAAiAAAiAAAiAAAiAQCQjAOE1kj0QDAcEQAAEQAAEQAAEHEEAwqsjqKJNELCeAIRX61mhJAiAAAiAAAiAAAiAAAiAAAiAAAhEVQIQXqPqk8O4QQAEQAAEQAAEQMAGAhBebYCFoiDgAAIQXh0AFU2CAAiAAAiAAAiAAAiAAAiAAAiAQCQjAOE1kj0QDAcEQAAEQAAEQAAEHEEAwqsjqKJNELCeAIRX61mhJAiAAAiAAAiAAAiAAAiAAAiAAAhEVQIQXqPqk8O4QQAEQAAEQAAEQMAGAhBebYCFoiDgAAIQXh0AFU2CAAiAAAiAAAiAAAiAAAiAAAiAQCQjAOE1kj0QDAcEQAAEQAAEQAAEHEEAwqsjqKJNELCeAIRX61mhJAiAAAiAAAiAAAiAAAiAAAiAAAhEVQIQXqPqk8O4QQAEQAAEQAAEQMAGAhBebYCFoiDgAAIQXh0AFU2C+9TSKgAAQABJREFUAAiAAAiAAAiAAAiAAAiAAAiAQCQjAOE1kj0QDAcEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQCDqEQiz8Hr/2cuoN2uMGARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAATsSCDMwqsdx4KmQAAEQAAEQAAEQAAEHETgdUCQg1pGsyAAAl8i4B/gT66J4n+pGO6DAAiAAAiAAAiAAAiAAAiAAAiAAAhEMQJsoJreNak2agivGgqcgAAIgAAIgAAIgIDzEoDw6rzPFjOL/AR8X/ka/BEW+UeMEYIACIAACIAACIAACIAACIAACIAACFhDAMKrNZRQBgRAAARAAARAAAScjACEVyd7oJhOlCIA4TVKPS4MFgRAAARAAARAAARAAARAAARAAASsJgDh1WpUKAgCIAACIAACIAACzkMAwqvzPEvMJOoRgPAa9Z4ZRgwCIAACIAACIAACIAACIAACIAAC1hCA8GoNJZQBARAAARAAARAAAScjAOHVyR4ophOlCEB4jVKPC4MFARAAARAAARAAARAAARAAARAAAasJQHi1GhUKggAIgAAIgAAIgIDzEIDw6jzPEjOJegQgvEa9Z4YRgwAIgAAIgAAIgAAIgAAIgAAIgIA1BCC8WkMJZUAABEAABEAABEDAyQhAeHWyB4rpRCkCEF6j1OPCYEEABEAABEAABEAABEAABEAABEDAagIQXq1GhYIgAAIgAAIgAAIg4DwEILw6z7PETKIeAQivUe+ZYcQgAAIgAAIgAAIgAAIgAAIgAAIgYA0BCK/WUEIZEAABEAABEAABEHAyAhBeneyBYjpRigCE1yj1uDBYEAABEAABEAABEAABEAABEAABELCaAIRXq1GhIAiAAAiAAAiAAAg4DwEIr87zLDGTqEcAwmvUe2YYMQiAAAiAAAiAAAiAAAiAAAiAAAhYQwDCqzWUUAYEQAAEQAAEQAAEnIwAhFcne6CYTpQiAOE1Sj0uDBYEQAAEQAAEQAAEQAAEQAAEQAAErCYQKYTX12/86ZmvH/Ex8MNHqwdvXDBOrJiUKGE8ck3iIo/G93ENAiAAAiAAAiAAAiAQTADCK34TQCDiCEB4jTj26BkEQAAEQAAEQAAEQAAEQAAEQAAEHEkgwoVXX7935H3/GQWJWbrEj0uxhXga2vT+40fyextAMUQDWdO7UhKX+KFtCvVAAARAAARAAARAwKkJQHh16seLyUVyAhBeI/kDwvBAAARAAARAAARAAARAAARAAARAIJQEIlx4PXf9vhRbc2RMSfHixA7lND5XCwj8QNfvPqH3wnLWLUc6ihmDZVgkEAABEAABEAABEAABPQFHC6/nvT3J85Ynnfc+R57iXKX8WfOTW9YClD8LH/OrbBxBIFoRgPAarR43JgsCIAACIAACIAACIAACIAACIBCNCES48Hrm2j3KlDoZuSZ1sRt2H983dPvRc8ohrF4Tw+rVblyjW0OB9+5S4J1b9P7pE/ro91pOP6ZLIoqdMhXFyZSF4mTIGN2QYL4gAAIgAAJORMBRwisLriv2LTMQW81ha16hJTUv38LcbeSDgNMSgPDqtI8WEwMBEAABEAABEAABEIgmBF76BVBSl7jRZLaYJgiAgC0EIoXwmjtTKkoo3AzbK731D6Qrtx9TOtcklCZFYns1a/d2rnjd0tr8KkcW7RwnEUvgw3Mfenv6JL1/8sjiQGKnSkMJChehWMlTWCyHmyAAAiAAAiAQGQk4Qnhl0XXA4n7adNm6tXn5lgaWrcoSdoXHMq0cBFgNBU6iCQEIr9HkQWOaIAACIAACIAACIAACTkfgf4dv04xNlygg8CPlzpiE+jXNT7nSJ3G6eWJCIAACoSfg1MJrWiG6phXia2RKG3fsI/5nLtWrVp74H1LEEGArV7+D/Hw46rA1KQa5lCnvlNavvIjwf/+BmpbLZg0Im8ucveFDp7186MXrAGpTJSclT2S/jy/UYMI6h1dvA+n2Yz9KkzwBpUwSTzUb5uPHoCDZLrtEz5w6EcWNHfrY1mEeDBoAARCItgTsLbyu2LeclJhqrZCqrzO6zVgDgTa8Hoy/vz89ePCA0qVLR/Hihf3/9atWraaAAH9q3bp1eE0B/URBAhBeo+BDw5BBAARAAARAAARAIIwEtm53p6PHT1DvHt0pSRLHGgt537pFu3Z70N1796le3VpUuGABCs/+w4gq0la/eteX2k05JCxd41C2tInpjNjbZPF1wW+lI+2YI+PAPnwMogc+byhF4niUMJ7l8JMrPG5S4gSxqfa3mSLjVDAmEDBJAMKrSSz2zzQWXPPkzGrQyeXr3gbXEGANcITLBVu6vnLfJvqyVnRVw4pBiavWcDrL19YTDpDfu/e0fnBFNVG7HQ9dfEz9FpyU7aV3TUBTOhan9K4J7da+aii0c/B+9Jr+WH6WeDGlUr4syWhQi4KUMaXhOP/eeZ0WbL+mihkcS+ZNRePbFdXyhN5KC3ZcpTUHvOnNuw9afrd6X1PjslkQk1ojghMQAIHwIGBv4bXu8Fpy2NaKrmqOjhBfd+3aRYMGDaY3b/xoxYoVlC9fPtWddrx37x4NHDiITp0Kfh/xjUKFCtOoUSMpU6bQ/0HXuHFjevr0KXl4eGh94QQEjAlAeDUmgmsQAAEQAAEQsB+BgUNG0IrVa2SDO7b8j3LlzGFT4wEBAVS7QRO67uVFLi4u5HnyqE31URgEzBGoXL0O3fT2pgVzZlHFCuXMFQtzPoutNeo2JD8/P3JN4UrDhwygmtWrUnj1H+YJhFMDT16+owu3XlDqZAkob+akZnu98fAVnfd+QdfuvaQTV5/RvadvqFXl7NSx5lfUbfYxKb5uGVHZad0OfxQi6aU7L6XYnDGl+XCRHuce0phV5+Se5/wepShPppBMfV750/g15+nQhcca76xpE1GP+nmpSC5XLU+dPBcGO3WH7qZ6pTJT70aGf9d7PXgl92+97r+irvXyfNF4KPD9R/pJiObeD18L76uxaMeoqqobHEHA7gQgvNodacgG9aIrC64sqppzLawvC/E1JEtH5rzes/OL7oXN9c9uhxNVqmLudpTMD61oac1kR644Szv+u0/TO39LhXM4zlVzaOZwS4iuHaYflouEBqUzEwuul26/pHUHb8mX8sr+FQyscyeuu0AbhXVwwzJZQkw9W5pEVF8sDFSauuGibIcXFHVLZJJC6+Zjd4gXCG2r5qR21XKpojiCAAiAgMMJ2FN4VeKpraKrmqSqz66JRwnL19CmFy9e0NixY2nr1q1aE8uXLyc3Nzftmk+ePHlCderUlcJsq1atKEeOHHT16lUp0iZM6ELu7jvEF+ih85oC4dUANS7MEIDwagYMskEABEAABEAgjAQOHz1Grdq211rZvnkD5c6VU7u25mTazD9p2szZsiiEV2uIOb7MzZs3affu3TZ3lC1bsBe3ypUr21zXERVOnjpNZz3PU4umTSh+/LB72zE3xjXrNlDfgUOo56/dqHPH9hQzZrCntfDq39y4IlO++6n79Meys9qQmlXIRl3q5CH2Tsci6/lbz+nGg9d0/MoTeukXqJVTJ5lTuxDvG/7tfp3eBnyg3WOrqVtOd/R9E0i1Bu+icvnT0Ki234SY30txf9qGC7Tz1APt3jwhvH5tJLy+8X9PzUbvIxZTqxVNTwWzpaDHQvxevf+m3Ied+HNR+jZPKq0NPjl4/hH1X3SKhrYqSN8VTi/vsbXs8r03aN7Wq1rZrnWF8FrestfGRe7XaOGO67IOhFcNHU4cRADCq4PAqmb1QmrfLm0MBFdzMV45f9ysxbIJiK+KpGOPwS6GPcLUiUuZCk7lcjg0oqW1ADvPPEqeN5/T/ok1KEYMa2vZXi40c5j2v4u09sAt6tPEjeroXFhs/+8ejVpxTgqsvzXIqw2m/6KTYiH2ilYNqKDlmTrhL7rqDdtDLLrO7lpCuMiII4uxVXGXWUel+LpmUAVKK9waI4EACIBAeBCwl/Cqj+u6aei/oR66spgNrcvh169fU+3atcnHx4caNWpEsWPHoVWrVpIp4XXJkiU0YcIEGjduPNWoUV0b8+zZs2nOnDk0adIkqlIldB9UQXjVcOLEAgEIrxbg4BYIgAAIgAAIhJLA23fvqHqdBvTG7y1VqVyRVq5ZS7YKr1evXZdtlCtTml68fEleN27C4jWUz8Ne1VhwDY3oqu//559/JiXC6vOd9XzKjFk0Y9Yc2rx+DeXLm8dZpxmmebGQGCAsIJuUy0r/CSvWi7dfiNA3yYTg+sLAGWIi4eK2uBADs4v9PLcsyWWZ2Vsu0/qDt7X+ewlLTL3hhXbDSU4sCa+8r9l8TLCYWkcYmcSOFYM2HLpNpoRXtogdvPg0tayUnX6p9ZVGh61pO0w9TJUKpaXhrQtr+Xwya/NlWilcDa8ZKPZMUwTvmfZZ8B8dufiE8mdLTpULpSM2dPmS8MpWy20mHBTPMiXxfG4/fg2LVwPSuLA3AQiv9iaqa0+JrsZWriys8j29e2HjMtzM+Nn/yDIQX3VQHXT65ughCrh1M0ytx82SjRKWKG1TG1fv+dLoleeolXjhqK92uAH/wA/UcfoRKvl1Kum2gvPmbr1CRy49oRHiBcTuao9efkLxYseiUnlTU6faX2nuLNg/Pn8JxBaVr9++JxYM7zzxowOTahBbV7Ll5vDWhSiLiC2qErvVmLD2PLUVsVYrFEgrs41Fy55zj9M7ETR+hrBSjRXzs1qq+ishFiH6l6ZqW388fuUp8eKELTw55UgfHM9ipmgzkRAi338IorUHvYXbjqd0/PJT+QIt/lVKaiysSfm+PnFby8TXTZfvvJAc8onFUZvvchq4sTCeg76+ufMWY/dLXnvHV5eLBVWO3WrUGLxTfoG1b2J1zS3wz9MOyxits7qUUEVNHtXiwtRCYP2hWzRl/UXqIr7OavaFr7NMNo5MEAABEAgFAXsJr8pa1RZrV66TP0t+g5iuAxf3I09vT7KlHf20WXBt2LAhjRgxgsqVK0fTp8+gv/6ab1J43bNnD90SMY/Y2jVOnM/vl23btlPfvn2kC+Km4it0S+nVq1e0ZMlS2rNnt7SWLVOmDPXo0YMGDBgQwtXwrVu3acaM6XT27Fl69eq1tMBt0KA+1apVS3axd+9emjVrFrVt21aKx/p+p02fTgf276epU6dSxowZia16p02bRseOHaO7d+9K98hNmnwfop6+DT7/+PEjrVy5kjZv3kIXLpynLFmyiroFqWfPnpQ8eXKD4gcPHqSNGzfSgQMHKUWK5IJrIzHmfDRx4kTq3r275KsqeIov9rns4cOHhAXxGypbtqwQs2tQqVKlVBEcTRCA8GoCCrJAAARAAARAIIwEJk6ZTrPnzqeZUyfRxUuX5bktwiuvl5q2aktsFbhv13bq9ltvk8Lrc7EemzBpGh06epTu3LlLRb4pTC2bNaH6dWuHcQaoboqAEl5Hjx5t6vYX83h9zhavlqxe79y9Rx27dKfmYl0dEBhIy1eupsfCS07F8uVo2KD+9PTZMxozYTLt239AhCXJKNz2VpNxWmPFCrYk5UFwTNWJU2bQqTO85n9FBfK7UZNGDaheneA1P5eZLsTQ7e47aeWSv2WMV32/McRe29r1/6NzYn2dL29e6tqpA1WrYpulro/Pc2r1Y3vidtnNMI/VJWFCGjKwH5UoXszh/fMco0J64RdAdYbspkLCCx/vc24QnuwmC492nFjMYw92uTIkId5rzJXetCekTjOOSMtYU1aaphiofd1+TfLTQmF5ycKh3j0u7xvuPftA7vdmFnu2pcSe8Pdls4bYD9179qEY7y06fd1HhG1LQAWzp5D7sRwrldM+z4e0SFjh/ipc9x688EhajPJ83bImp86181COdIaxhS8L0XPribvkIdpNEC94n7l+ycyURTDgtHq/N/1P8OG9ZU68l8uWrH3FPDix9WobEa6uf7P8Yh87Nc3bdpWW7PIyKbyq/c+RbQtT+fzB+8/cRqCwMq7UZ4dkz8Yq+tR20kEplOpD4XFe1W/SSwvXk9eeUa95JywKrx9F7Leus45JI6DVA8vTkCVnILzqIePcIQQgvDoEa3CjP/UcIU8WTh5i0IsSVDmTBVclwPJ5n84/aGX1lq/G1rJaIZzYhYCviPnx0e91mNqK6ZKIktSub1MbZ2/4yP/xd6//tXyZqspv/T9Q1QHuVLlwOhrWqpDMHrHsjHTZwC/VWMJFSDbxpdV+z0fyXgnxMp7QPjiWKLvKbTX+gHSHyy8/DvCeKml8GvtTEfpHvPjmixcgB3znfJVYxOSX1O/fu0nBlvONRcsZGy/Jly0vSHhhohK/gPneMCHm8ldGlhILvLw4OCDcRHCMU3YrwalXQzeKHzcWjVvtSf8evyvHXjhnCiGqvqT7z97SN8LHPy9k4nxa0CoRk91CFMudknxeBciXJ7e1pE9ZyvppcWA8B77/pVRtoLsUcjcND7m4LduLYwATbRhaiVImCV7QNPxjLxUQi7IfvstB+8Qiid2LFBCuMgplT26wONp09A5NEDEM+gpLWuNg8H9uuSJdZLCLkp4NDeMVfGm8uA8CIAACoSVgL+FVCabWWqoqoZbHrbeQVZazoXU3/OHDB7nBkSxZMonEkvBqihnH8ure/VcpIG7YsEG6HzZVjvN4U+7XX3vQvn0eQphMQcWKFaMTJ07Qu3f+sgq7DVMxXu/ff0DVxcYMp5IlS1LcuPFkPb4eNWq0cHlcWwq1lSpVoqKinYULFvAtmQLFpk/ZsuWk+Pnvv//S+/fvqWbNmvTo0SOqVq2a7JtFZL7mmLYswJpL7IKZrX/TpElDRYoUEeLrRSE+e0sxl4VTJUAfFRuIHTp0kM3weDkdOXJECrVcXo2Z89k9M1v4cuKxx48Xj1i05cSWwxBfJQqTPyC8msSCTBAAARAAARAINYHrXjeoaq16VL5cWVo0b7YQwIJFWFuE1zVC9Oo7YDD17d2TOrb/kep/3zyE8Mrrs/JVatLDhw+pVo1qMn6m++498vqPYYOlABvqSaCiSQLhIbyq35+0adPKvykqlCtDV65el3F+i4m188XLlymduJf36zx06PBReubzTLrxZXGU073796lspeA1f5lSJSmeWBfv3ush700aN5oa1Ksjz/sNGkqr166nk0f2y48f9f3y71TlihXo1u07sl+usG7VMipcsICsa82Pl76+NGLUWDpz1lPGkmXhOFmypPTjD63ILV9ecnT/1owxspRpMU4YXjz2o4oF08o4r49fvJNDayGMY1h4TPfJutLceBuN3Eu+wgXxzjFVzRUxyFf7uskTxSX/98F7hzWLZZT97z79gIYtPSNDnPE+5y0xLo5DytaZ49sV1QxgtgmBdPRKTym4lnVLQ7eFGMoCLre5dnBFaRiyWew/cgzVVMnii78fP8o93DNePlIg5X3U9YMrkUv82HJs+nBr3BfHQGVBl9tb2KuM3PvksbGYy4Iv53O5zKkSyX1QboTd/r4WVq9JE8aRbVoSXh89f0uNR3qIvVxXGv1jEbkXzJXYSIitVo33x1+9DaSag3ZRjWIZaECzz/8dsFdBJTSrPW1Thi5yQOIHC8tjBDc2XGpRMTuxEQ0sXhUdHB1FAMKrg8gqa1dja1W9mKoXZJVIayywqnaMRVkHDTvaNvti1VK7zD1Z01Y2tRMa4bXKN+loUIuC0uJS785BiYFKeOWB8FdC/KWWSmERXi8Jlxsdph2hxmWzyK+mVJu/CMtcFlR5ocHiqTWJ69x89MrApYNaZPDLd8LPxbRFBS8WeNHQoWZual05h2yeX8bX7vtS/6YFKGPKhDJPCZvqJcqZoRFeh/xzWi4m/vqtFH2V8XMQ+JtiwfOD+IKL05/dSsgvxcQHU1Su9za54GGBWJ94gTOnW0lKLY6cOOB724kHqYxbahrdtoiBi+Xf//qPjgprZl68TBJzRwIBEACB8CBgL+FVuQjWi6jmxq8XXU0Jtba0Za4PlW+N8MqxXteuXSc2V3yldSdfDxw4UIqhqh1Tx02bN9MgUa548W9p7tw5FCtWLCnG/vbbb8TWqyzGKuGVRc1169ZTmzY/aF/Ze3l5UYMGDYitZNm9MacuXbtKy1YWUlOmTCnzlAjKgnD79u3o+PHj4tie2rVrJ4TfX2WZ58+fU79+/Sht2nQ0fPgwmWf8g0XlXzp1ohTCsnXkyJEinlTwu6lPn760ffs2Wrp0KRUoUEDOoVSp0jL2rcrjti6LjaYmTYItgPXCK4vAbHW7QIjFLD5z4vhb9erVk+dslZsgAVzoSxhGPyC8GgHBJQiAAAiAAAiEkcAPP3Wgg4eP0N4d/4oPxjLbLLyyYFWmYlVKnSqVcE+8Xn6UZkp4VTFkf+nQjvr07CFHzVaGPXr3pfTp09HYkcPDOBNUNyYQnsIrx/TduHYlZc+WlVhkb9rqRyFinqWq31WmWdMmi3V/TLp77z6Vq1yNChUsSOs/7Seu3bCRVq1ZR+1/bKNZqV677kXVhIGG+hiA52VO+OR7m9atkuIony9Y9A+NGjeBWrVoRiOGDOQsm9KU6cLV8GxhXbtFxDjO+TnGcXj1b9NgI6jwGWEMM0LEeH3ySXBNJwxd2BDmhTBi4WRJgGVPhd/1c5fWn38LgdKapIRXFnoHtyyoGZc8ETFOG47YK/cW53YvRcmEuMmJ9z5ZkOzZKC81KJVF5qnQbVtHfqeFMGMr0l1CHOX4tPmyJJN7qLyXmimVC83pXpKSCEGUxVHe72QDnhE/FJZiLzeovP6pfU7OU8YuPE4uy8mSq2FZQPfDkvDKxXafeSCEYU9p9JJXjPehEGPZMyLvN3es+ZXB3jJ7e/x9/n9CdM0vxNeMul4+n35JeGXxtrEQyV0Tx6fFv5eR3CG8fuaHM8cRgPDqILZKSNWLq9yVEl6NhVRlBWssvHIdc23xPST7EIhKwut8EZw8jy44+dhVwVai/DLlF6wSXvUvSEUpLMIri4wtxddgr8UL63/C4jOmcIHCX4M1Ehaftb/NqLmY4IDq/IWUcUrqEkdbVJgSXqeIBcV6saBY1rcccYB6ld4JC9Iq/d1Jb9Wr7umPKoaq/isoY+GVXVe8FF+jGac4sWNqX2bxF1y8GOGvuNhNRq4MSenavZfyyyi2IOY0qUMxYhfI6ssrzuN4DmyhzIs0tupduvuGXOTwfGIIz8zsqriNcIXBX6w1FK6TG5TKzNVo45HbMqYsn/Pz4+eIBAIgAALhQcDRwiuLrOe9z9GoNmPldL4kunKh8BZeL1y4QM2bN9dwVxIuyH7p2JHy5LEcB+kPIV6uWb1autjVx4pi4ZZdmOmFV61xo5M6depKwVcJtO7u7tS7d28aOmwYNRIukzmpfrZu3SotU48fPyEFWBZHx4wZHcJFsFEXX7zctWuXdDX8xx9/SLHU29ub6tatK61Yhwwx9NjCro4XLlyoWbyqudauXYdGjx5l0Neff86hP/+crQm6BjdxIQnYU3jlTUGf5y9CkI0bNw4l/2QBHuImMkAABEAABEDAiQiw69bO3XtSty6/0G/dusiZ2WrxOnzkGFq8dDktWTifSpcKdnVpSng9cuw4tWzTjjgG7OTxY6RnEidCGSmnEp7Ca93atWjqxOC/XxgGuwaeKuKlLpz3J7EVrEqVq9eRrog9Tx5VWSaPXM7X9xWdOOwh75sTPtnSdf6fM7Q2bgsX1hWq1JC/Z3//NUfLt/bEVuHV3v1bO87IUO7qXV9hAXlIuhZmA4pV+7xp5b6bFgXYG8K4oo0wrihfIA2NbPONVdNQwuvfvcsYuPtla9JBf5+mwcLIpmqRYM+A3CAb21QfuJPYAGdIy2BviF1mHaVzN54La9FvqHS+1FoYNP0AlMWr3o0x399x8h6NXH5Os/pU+6j6PV3VjrEwaU/hlccxa9NlaYGr+uNjk/JZqV21XJQwXrA1LucpEdd4r5jvqfQl4VUJ2FM6FqOiwpqYk/H8VFs4goA9CUB4tSdNXVtKSLWXxatxO7qucGoHAlHJ1fD2UVU0lxA89TUHvGn6/y7RzC7fSr/+SnhtLVzfdqiR24BOWIRXbmjpHi+a++9VzeJT+eaf2qk4FcnpKvvil9fl2y8N+uULJVbyuSnhVdXbN0HETxWirj6xgPr4xVvaPrKqZil68MJjOu31jK6Lr6JYDOaFEid2XzyoeUF5biy8qi+l5E3dD2PBc/neG8Tuf/Upq3DtXEjETeC4Biv6l5eWtrwIYv4shHOMW31SlrP6xcFTX3/qMvOIdJ+sL8tWutyfXjTW38c5CIAACDiCgKOFV+WCmF0Hu2UtQCs8lslpmLJ0VfMLb+E1SHxVxNagvsLC4fDhw7Ro0d9044YXjRs3XsQpra6GFeLYtGlTunTpEp05c0a8sz7HdeKCJUqUFBaln10Nc9758+eJRc7Ll6/Qy5cvxb8X0lKU7507d44Pwk3xO2FBW1y6I547dy6x6+TSpctQ7ty56Z9/Fssy/v7+1LHjL3Tq1El5/c03RUSZ0sL9cA3KkCGDzDP3g9vfunWbjDPL8W05VizPlRPHxa1fvz7t3LmTevXqJa1+eY76tH37durTp48mvB46dIg6CStaLt+mTRt9UeLNMbb+/ZL7Y4NK0ezCnsKrx/6D9FOHTiEI6q0wQtxEBgiAAAiAAAg4CYG3Yo3D4lZgQCDt272NEn7ytmGL8Hrl6jWqUbehtGqcM3OqRsaU8MrrsR9+6kgnTgavx9gNbflypalOrZqUKaPl9ZjWME5sIhCewmvnjj9T79+6a+ObM38hjZ80hTasXk4FC+TX8k39bnBs1m3uu8TfCZfphVjzcyxgjgHM6cZlT3k0J7x2/Lkd9e0VbEEtC4of2fPkp29FXNYV/yxUWVYfbRVe7d2/1QONJAVHrThH2/+7R1N/EfubItzZe2G4YUmAZcvRgX+fopbCLfEvtb6yahZKeHUfXVXGUlWV/tp+lRbvDP67LE/mz573+D7vr7JhiAqHdujiY+q3IPj/PZzPRirlC6SVxiEqPJsSXieIkG36vUreN2035RC1r5GL2nyXk5Rgyf2Y6pfz1w+pKMPX2Ut4Vd4OeY+1tzBgySni5z4VFr+r9ntLS91y+dPQqLafhWy1f6zfD+Zx6ZOahylXw0ogN24XwqueIM4dRQDCq4PIKstWbt7YilWJsnyPLV/NxXjl+7B2ZQqOT2+OHqKAWzfD1FHcLNkoYYnSNrURGlfDxi9o5Qc/PITXBz5vqMmofdSsQjbpwoK/tOJ4CMoClifPFpwsMBqnakUyaG6B1Ytzx6jPcRBMiauqDV4Y8AJBibKT11+gDYduy9vsmjhN8gTSbca2E/csCq93n/qJL7zuq2a1Y2oRA7dOiUzaNZ/cffpGCruPnr+TX6KVEV+SjVl1TsbZ9RDicCwjcdigsrjgWLVsjWz8xRpb8LJr5nM3fYS7j7jEsWxjCZNYjsvbsVZualUp2J2ycXu4BgEQAAF7E7CX8KoEVlOCqrqnxm6qjLoX1hivqh11tMbVsCqrjhyftHPnzlSoUGFN7FT39Mf69RtI0fKscDsWg90a6FKVKlWkWzJlybp58xYhZA6QJfLlcyO2kOU4S+zKl5MSXvl86NBhtGHDehEDdh9dv35duhQeNmw4NWzYgG/LxJt9fJ8tZA8ePCTdAvMNZbUaXMrw55s3b0Rb7UVc1/OUMKELlShZglxdXWm/aIfjwyrhlePUshvjLl26CIG3o0Ejq1atEqLrKE14ZZfK7O54wIAB1KxZM4Oy+/fvp67CdfLvv/9OrVu3NriHi2AC9hRevYWQvmHTlhBo06VJS82aNAqRjwwQAAEQAAEQcCYCfy1cTKPHT6Rm3zeWMVfV3FauWUv/bttBI4YOkm5jS5X4Vt0KcWzXsQvt3bdfunTNljWrdn/A0OFSOFs0fw6lFGunfHmDvaLwemz33n20dbs77TtwkPz8/GSd8WNGUuMGwSEXtEZwEmYCUUF43bBxM/XqG7zmL5DfjXJk5zV/Mlq0eImc/5eEV2PBlyuFp/Bq7/7D/NDDuYGdp+5Lt8Nsddmt7tda76YE2ApC6DwlDEE4viuHPxshLF5zpU+s1TF3Yk54nbHpEq0WVrYsfuZMZ9jOebF/WFxYaXar93lMt8U+rLsY75FLjzUjlNwZk9CUjsWlW2ElvOoNYHhMV+8J4XXyZ+H1wPlHNGDRKTlctnrVJ+7XTXjla1c9t4zzai/hVRndqHB5+j67/3lMxpddM7ACpRXxddmjYNUB7tK74LBWwRa/+vLq3JLw2mfBfzIGLrtrzpTys3fFCWvPS6OYiUKcTp44HuXOkEQ1hyMI2I0AhFe7oQzZkBJYjd0KsyjLsVuV4Mo1uQxbtX6VI9hnO+ep+rB2ZRqOTYH37pLfQY8wdeJSpgLFyWD4ovpSg+e9n1OnGUelOwX9i10JnOy6Vr1czL2gbRFelcXqGOGSoowIwq6SuxAj/1h+ln7/3o3qfhIgja1FVVklti7oWVrGIGhVObv0wa/uW3M0JbyOW+1JW47d1b6mUu1wHIIKv2+nAtmT06wuJYhdGdcevEu68V0oxqDiyrJQ2nzMPovCq2rT0vGhz1viWA1Z0iQyKMbWrQ3/2CNjAizvV07e47itW47dofL508pg9foKympWubJgwZXjFvAXaUld4uqLShcm7GbjjzaFiRdwSCAAAiAQHgTsJbwqF8Js2arcCuvHr8RXS6Irl1ftNK/QkpqXb6FvIlTnloRXdpvr4pKIfv65vUHb7LK1iLBayJgxo7AO3WpwT38xaNAg2rRpk7QQTZPm8/v09evXVKpUKQNXw40bN6arV68auCVmS9uKFSvKJpVAyxdK+GQRlS1qly9fLsTVg5Qkiek/BNkqljei2EUxC6qHDx8KYYHL7e4TG4ndunWl+iKu7FDhQphj0nLi+LODBw/WhFe2/OW4sywQL1nyD8WOHVuW437Y0vb48WOa8Hrv3j1hFVyDvhexXwcLHvr099+LafLkSQaxX/X3cS7iJIm4wuldDb9oBxcQAAEQAAEQAAHbCQz7Ywz9s2y5xYoct9OSS1i2mL0pQi5YSjWrV6OZUyeGKMLrpB07d1PXHr3E+tKFzp44bHI9FqIiMqwmEBWE15r1GtHlK1dp59ZNUnTlyfGav3jp4DX/icMenGU2xqu9hU9bLV7t3b+cbBT6wXt21Qa6S3fDf/UIaVSjBNgVHje0EGZstcnhxDhcGnu7+1Iyt6/LlrZsccuWnmyZaUvi/ePpGy/RwfOPqVfjfFS/ZGYtxuuXhNd7z95Qs9H7qEk5ITbrhF1T/dtLeGXGnExZsKoQdNOEV8VvhFfFk9efUY8/j1NvMa96Yl7mkiXhVcWwNVeX802F6rNUHvdAwFoCEF6tJRXKcko85erGlq8swKqkF1z1wqyxaKvK42h/Aq/37KT3Tx6FquHYqdJQokpVbK6r/Onzy3rJ72W1+kogtbfwquIG6F+qH8VC8Pe//qPjl59aJbxuPXFXxjvluAMs2C4WsQmyG32RpU3EzIkp4ZUtZSeuvUB1S2ai3xu7aTWX7blBc/69ornvUK6Ui+dJSZN+LqaVm7v1ioyrasnVsFbYwgl/9bTpyJ0QIqhqv0vdPNSsfDbZAsd8rTt0t4zNOr3ztxRXxIrlxK6PO808KhdgO0ZXkfEJlJieQ3wFx4u42LFiyLLqd4AF2WVC0E2cII7Mxw8QAAEQcDQBewmvylKVx7tp6L8mh81l3IQwaykpN8NfEmgttaG/Z0l4ZaGSLUZXrFghRMZ8WjVlxdmkSVPhJneglm98woLo2LFjqUGDhjR8+DDt9oQJE4RgucRAeGXXw5z27t1DCT65vlOWtcaxYD9+/EiVKlWir776iq5cuUJFixaliRM/b/Bt27Zdiqs8vvziS3qVOF7srVvedFK4vIsTJ+R7ZN26dWKcw6lHjx70008/yWp6t8XK4pVv9BAugvcIMbdmzZrSkpVdKa8W8WxZaOY0atRoqlOnttxIKlmylLS4ZQFXxbr18fERFroNiY+WRGPZWDT+AeE1Gj98TB0EQAAEQMCuBFjsunP3Xog216z/H+3avYcG9e8jLF6zafE5fXye09HjJ+R1woQJZb3DR48Jq9U3IdoY+sdoevjwIc2dNZ3SpE5FbMm4+d9ttP/gYWrVvImB61kl3l7xPGVyPRaicWRYTSAqCK/5iwTHBT52cK/m7lqFg3BN4frFGK/2Fj4dIbzy3w979x2gQsLlctq0wQLhrVu36ZrXDapYvqz8uPODcNG7X1iBZ8qUkXLmyG71M44MBY3dDZsa056zD2joP2e0PcpBi0/RvnOPaGnfspQldSJTVbQ8c8KrcoebP1tyYbVajOLFCf5Ilr0JzhTWsDnEnmvryjmIxWH2AJg6WXz6ScRCjfnJ85La6+1cJw81r5DNauFVbAdT9UHBQuiinmXER6HB/z9kA5gZQsx9JfY2BzYrIMPBcd9V+rtLIxhljKJNzOhExWWd16MUfS1Cs+nT0CWnac+ZhyH2XHmuP006KOO+7hxTVRrZ/L3zOi3Yfu2L+86WhFcWb98KQxrjNHnDRXry4h2xYZKr8IJoPE7j8rgGgdAQgPAaGmo21tGLr5asV/WCK3cB0dVG0GEs/uG5D71y3yZaEW8em1IMSly1BsVKnsKmWqowW5byF1IsyH0nLFzZZQS7zOVkb+H1ifCb33DEXtl2ybypZMxSjnt6+rqPzLPG4pVfvDUH7ZLlecx/9yojz235YUp4DXj/kXrPPyHHwqJqAbHg8BLxW/eefShf7HO6l5QuM3hhUG/YbvkyZpE1d4akxC6bOb4Cp7AKr8y/5bj9si2O1ZBWuDHmhRUzYqvbGZ2+lYsOWUD8GL3ynHxe7BKkVrGM5P/+g7zmsRtbA+vL1i6eke488aPNwmL2zbsPcnGlgryrtnEEARAAAUcSsJfwymNUVq2htVa1t7Urj8mS8Prff/9pAmSrVq0oS5YswrWvF61atZKr0sqVKylv3rzy3NQPjpfKcU3ZKrVUqdJUuHAhEcf1grAs9ZDF9YIqW5SyMFm0WDGqUL48saUoC76c9OVkhvgxa9Ys4hivnKbPmCHryAvxg4XVH3/8UdZj977pRVxXdhfMIjJbs44Q4qqpxBsiLJayVWxj4YYvRfLktGPHDjl+Lq8XXtlVHs+NrXRV4notWrSgv/6arwmvfE8vINeqVUtuMvJY7t69K9wrD6KmTZuoJnA0IgDh1QgILkEABEAABEDAzgTGT55Kc+YtoO2bN1DuXDm11jt0+VUKsp06tKffe/6q5Zs6MRXH8/iJk9SsdVtiMa11y2aUMUN62uOxX7gd3kHfN2pA40aNMNUU8sJAICoIr7/3H0zrNvxPxmT9rmIF+TGAssR2FuF18dLlNHzkGPkBwv/WBP89U6pCFflxwrRJ40Wc4xq0V/y30O6XLtL6+8zxQ5qnnTA8/nCras7dsBoAW71O/d8l2nj4trTIrF8qMy3YcZVuPRJ7eyMqUzIj73aqnjqaE175PhucsOEJ77OyJzwWP7kfNvjQey1U7njLuKWm0nlTC3H0Pf2985rcV1zRv5xwfexitfDK/R4WMWP7ipixbAxSRRjYpEgUT+6Bcri3puWzUled2+Wec4/TiavPpFUuW+ZySDlTyZLwesbLh7rNPiar8f5tLuHi18c3QOyj3pVzbV4xG3WuHezSned65e5L2vZHFYN9WOM+LQmvxmXVNWK8KhI4OpIAhFdH0tW1za6F+Z8+sbDKlq7K8lXvetiSQKtvA+f2JRDscpifk7XiawxyKVPeZhfD+lGz+9nB/5yWAdM5n192fZrkp/4LT1KVb9LRkJbBfuzZFTBbmKovf1Qbxq6GlXDYpkoOai988RsnfiGNFW59+cseTvmEz/6aQgScsOa86NeN6nwbHOu0rfjSiF1JrB9c0bgJUl8osSsKtp61NXUW1qBeD3xJH+OV22BRl7+qOnrpiXzhJowfi779KhV1qv0VpUsR/OUVl2Or16FLz0hhlq8zpXKhng3z0m9zT1CNYhlogPgii5OlOcgCZn5cuv1CiAinNUZcjL88G9KyoBRi9dV4MTR7y2UZj0Hl87ibV8hObat8/uOO7wWKRdqcLVdo9X5vVVQeQ8vRoBFcgAAIgICNBOwpvOqtXm0VX5XoysM3ZzFr49Rk8ZkzZ9K8efNCWLWqtjw9z9PIkcEufVUex3YdPHgQ5cqVS2WZPd6//4DGTxgvrUO5EIuoLGBOnz6dXr58Kd0Qcz67Hx4kxFe2IuXEIubAgQNFjNclMr6q3tUw3+fYrmwxyokF4rhxDd3T79q1m8aNGyvrchklpnbv1i1EWb6v0p49e+T42BKVE7sUrlq1Kg0RroeN48Oy5e21a9fo9OnTlChRIlmW3SD36tWLRo8eTbVr11bNStF36dJldObMaZmXO3duatSoETVv3lwrg5OQBCC8hmSCHBAAARAAARCwJ4EJk6fRn/P+oh1b/ke5cubQmp4+aw5NnTGLJo8fQ/Xrfl7TaAV0J6aEV7693X0XjRg9TgpOfM0uhps3aUy9f+tucT3GZZFsJ3Dz5k2aP38+Zc8eOgvKGzduUOXKleU/c73fuOlN39WoQ107daSev3bVis39axGNmziZNq5dSfndPnvKadi0FV0T63blwvrVq9f0e/9B5C7W6pz4d2LEkIG0cPFS8XvySLN4HTB4OHH84ZNHD1ByEQPWXL/cBsd4LSliEy/7+y++tClNnTGbps/6M8Tvf1j6P3DoMLVp15FaNGtCI4cNluP5pWsPOecNq5dLC/Cr165To2atpDgbmnHbNEk7F1buhnn/kS1HC2ZPQVlFGDLlZnjlvpv0QgihxqmN2PtrX/3Lfz+a29fl9tgb4SL367T79ANppMF56V0TiP3QPAYhyTj82rQNF2jnqQdcRCaO79qrUT7KmzmZvOaQaONWnydzroZ/rpGbfvju8/8Tuc81B7zpgojryon3NFtUzE6tKuWgWDFjyDz+cfLaMxoljE94P1mFg9Nu6k7+2n6VFu/0ovnC4jWPkcUrF+OwbZPWXSDPm8+1WjzX5qJPdpXMiY1zKvfdIUVedsFsKWnCa7081LRcNktFtXsQXjUUOHEgAacWXtO5JqE0KQyDUjuQpVVNmxJg9RUhuOppRMw5W76+PX3yi26H2b1wgsJFQm3pajw7jiHKsUWTCeFVuYswLmPPa14sxBQv0CQJ49jcrPrCylQwdJsbM1GBrVqfvfIXX1oJFrqXvHFRFmrffwiSYrXxPXtcPxKiOAvjKYXbiQyfXG6Ya5dFVRaEYwmXjFnSuFh8hryYuynK8vKFY0EkjBfbXLPIBwEQAAGHEbCn8MqDDI34qhddbRVs7QWGXWbxZgi7y4oXL57NzXJ9jo2aMmVKivHJ3ZOpRrgcC7Kurq4Wv/xW1rhsidunTx9TTck8FnTZ8pb7tSU9ffpUzjNxYtNr5FWrVol3b0wp/qpYsNx+t27dpUUv3//6669DdMmWsizYmms3RIVongHhNZr/AmD6IAACIAACEUrg/fv3Wiz7sAyExbZ3/u8olY3rsbD0GV3rstUrC7ChSSy6qrAYoalvS5137/zphVjzp0ppec1vS5uBgYHEoqc1KUP69PRV7i+LgNa0ZaqMqf92jPM47rH+7whT7UTWvMajPOiRz1tteJUKpaNTwl2tElzZCrRRmax09Z4v3RAGJV8LsbNEnlRaeXuccF+xRHgyS6HIPgojEN435T1d5Zo4rH2rfenkwurVwp+1FChEUf67V4VQC22/vJ/Le65sgOQS33Bf9Lz3c+o04yh1r/81fV82a2i7QD0QiFACkUJ4zZgqGaVM5mI3EM9e+tGdxy+Eeb4rJXaJb7d27d2QsnTldvUxXu3dD9oLHQG2fg28c4veP31CH/1ey0ZiuiSi2ClTUZxMWcJk5Rq6EUVsLRZDvYVYuP/8Q/pr2zWqJ1xq9BZfVCGBAAiAAAhETQL2Fl6Zgl5I5WsWU/NnyW8Q35UF2hX7lpGnOKpkr7iuqr2oeHz+4gVdu3pNWpTeuOFF27dvp/Ri4yS804ABA2nLls303XdVqHr1asRxmg4cOCDz0qRJI10UszCLFDYCEF7Dxg+1QQAEQAAEQAAEogcBFlztmViEjWrp6dNnVLxMBauG3aZVCxo6qL9VZVHIkIByNVxWuNHl8Gfztl6lBHFjSW+ASnBNlyKBYSVcOYQAu11m98t//VaKvspoGCfWIR2iURBwAIEIF17PXb8vzdZzZEwpAifbbnlnzMQ/4D1dv/tU+EL/SG450lm0OjOui2sQAAHzBFQgdS7BbiwmtC9KKRLbbhlkvgfcAQEQAAEQCE8CjhBe1fiNBViVb3zMnzU/NS/f0kCYNS4TXa71sV05Luz3338fIVN//vy5iOM6SroQ1g+A3TBPmjSRUqWy7xfd+j6i0zmE1+j0tDFXEAABEAABEACB0BIYMGDAF6sqMdUakZbDZkS1xBalR44dt2rY6dOloxzZrXO3alWD0agQu/pduOMaDf+hEFUqmI5aTzhA3g9f0+qBFUT4Mwiu4fmr8Ptf/8kwdB4Tqhu4Ow7PMaAvEAgrgQgXXn393pH3/WcyoiYLr7Fjh/4Lev4i/61/oHTfmVVYuyaJxNauYX1wqA8C4U2A3Viwew12hZxFxDiIEyv0/62G99jRHwiAAAiAQEgCjhReuTe2bPW85SmO5wysW1lsdctaIIQlbMgRRq+c27dv0+PHjylLliyRQtx89uwZ3RJj4pQ9WzZKJmJQIdmPAIRX+7FESyAAAiAAAiAAAs5LQLkY5jitYUkcH5bdDSuRNixtoa5zEjhx9Sn1nHuCUiULDjd2xsuHqhZJT4NbFHTOCUfiWT0WcWQ57m3a5BC8I/FjwtC+QCDChVce3+s3/vTM108eOU5iaBMLQYkSxiPXJC7yGNp2UA8EQAAEQAAEQAAEnJ2Ao4VXZ+eH+YFAWAhAeA0LPdQFARAAARAAARAAARAAAfsTWLZXuLjdckU2XL5AGupS52tYu9ofM1oEgWhBIFIIr9GCNCYJAiAAAiAAAiAAApGIAITXSPQwMJRoRwDCa7R75JgwCIAACIAACIAACIBAFCHg+yaQkiQMe0jEKDJdDBMEQMABBCC8OgAqmgQBEAABEAABEACByE4Awmtkf0IYnzMTgPDqzE8XcwMBEAABEAABEAABEAABEAABEIjOBCC8Ruenj7mDAAiAAAiAAAhEWwIQXqPto8fEIwEBCK+R4CFgCCAAAiAAAiAAAiAAAiAAAiAAAiDgAAIQXh0AFU2CAAiAAAiAAAiAQGQnAOE1sj8hjM+ZCUB4deani7mBAAiAAAiAAAiAAAiAAAiAAAhEZwIQXqPz08fcQQAEQAAEQAAEoi0BCK/R9tFj4pGAAITXSPAQMAQQAAEQAAEQAAEQAAEQAAEQAAEQcAABCK8OgIomQQAEQAAEQAAEQCCyE4DwGtmfEMbnzAQgvDrz08XcQAAEQAAEQAAEQAAEQAAEQAAEojMBCK/R+elj7iAAAiAAAiAAAtGWAITXaPvoMfFIQADCayR4CBgCCIAACIAACIAACIAACIAACIAACDiAAIRXB0BFkyAAAiAAAiAAAiAQ2QlAeI3sTwjjc2YCEF6d+elibiAAAiAAAiAAAiAAAiAAAiAAAtGZAITX6Pz0MXcQAAEQAAEQAIFoSwDCa7R99Jh4JCAA4TUSPAQMAQRAAARAAARAAARAAARAAARAAAQcQADCqwOgokkQAAEQAAEQAAEQiOwEILxG9ieE8TkzAQivzvx0MTcQAAEQAAEQAAEQAAEQAAEQAIHoTADCa3R++pg7CIAACIAACIBAtCUA4TXaPnpMPBIQgPAaCR4ChgACIAACIAACIAACIAACIAACIAACDiAA4dUBUNEkCIAACIAACIAACER2AhBeI/sTwvicmYB/gD+5JorvzFPE3EAABEAABEAABEAABEAABEAABEAgWhKwu/DKDSKBAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAQHQjkN41qTblGEEiaVc4AQEQAAEQAAEQAAEQcEoCsHh1yseKSUURArB4jSIPCsMEARAAARAAARAAARAAARAAARAAARsJ2N3i1cb+URwEQAAEQAAEQAAEQCACCEB4jQDo6BIEPhFAjFf8KoAACIAACIAACIAACIAACIAACICAcxKA8OqczxWzAgEQAAEQAAEQAAGLBCC8WsSDmyDgUAIQXh2KF42DAAiAAAiAAAiAAAiAAAiAAAiAQIQRgPAaYejRMQiAAAiAAAiAAAhEHAEIrxHHHj2DAIRX/A6AAAiAAAiAAAiAAAiAAAiAAAiAgHMSgPDqnM8VswIBEAABEAABEAABiwQgvFrEg5sg4FACEF4diheNgwAIgAAIgAAIgAAIgAAIgAAIgECEEYDwGmHo0TEIgAAIgAAIgAAIRBwBCK8Rxx49gwCEV/wOgAAIgAAIgAAIgAAIgAAIgAAIgIBzEoDw6pzPFbMCARAAARAAARAAAYsEILxaxIObIOBQAhBeHYoXjYMACIAACIAACIAACIAACIAACIBAhBGA8Bph6NExCIAACIAACIAACEQcAQivEccePYMAhFf8DoAACIAACIAACIAACIAACIAACICAcxKA8OqczxWzAgEQAAEQAAEQAAGLBCC8WsSDmyDgUAIQXh2KF42DAAiAAAiAAAiAAAiAAAiAAAiAQIQRgPAaYejRMQiAAAiAAAiAAAhEHAEIrxHHHj2DAIRX/A6AAAiAAAiAAAiAAAiAAAiAAAiAgHMSgPDqnM8VswIBEAABEAABEAABiwQgvFrEg5sg4FACEF4diheNgwAIgAAIgAAIgAAIgAAIgAAIgECEEYgUwuvrN/70zNeP+Bj44WOoYcSJFZMSJYxHrklc5DHUDaEiCIAACIAACIAACDg5AQivTv6AMb1ITQDCa6R+PBgcCIAACIAACIAACIAACIAACIAACISaQIQLr75+78j7/jMKElNwiR+XYgvxNLTp/ceP5Pc2gGKIBrKmd6UkLvFD2xTqgQAIgAAIgAAIgIBTE4Dw6tSPF5OL5AQgvEbyB4ThgQAIgAAIgAAIgAAIgAAIgAAIgEAoCUS48Hru+n0ptubImJLixYkdyml8rhYQ+IGu331C74XlrFuOdBQzBsuwSCAAAiAAAiAAAiAAAnoCjhZez3t7kuctTzrvfY48xblK+bPmJ7esBSh/Fj7mV9k4gkC0IgDhNVo9bkwWBEAABEAABEAABEAABEAABEAgGhGIcOH1zLV7lCl1MnJN6mI37D6+b+j2o+eUQ1i9JobVq924RreGnu7bSU92b6WXZ/6jdw/uyenHT5eBkhYqSqkq16SU5atENySYLwiAAAiAgBMRcJTwyoLrin3LDMRWc9iaV2hJzcu3MHcb+SDgtAQgvDrto8XEQAAEQAAEQAAEQAAEogmBl34BlNQlbjSZLaYJAiBgC4FIIbzmzpSKEgo3w/ZKb/0D6crtx5TONQmlSZHYXs3avZ0rXre0Nr/KkUU7x0nEEnh1+Tx5TR5JL04fsziQZIW/pRw9B1HiPG4Wy+EmCIAACIAACERGAo4QXll0HbC4nzZdtm5tXr6lgWWrsoRd4bFMKwcBVkOBk2hCAMJrNHnQmCYIgAAIgAAIgAAIgIDTEfjf4ds0Y9MlCgj8SLkzJqF+TfNTrvRJnG6emBAIgEDoCTi18JpWiK5phfgamdLGHfuI/5lL9aqVJ/6HFDEEnu7fRed7dRCdc9Rha1IMcps0j1KW+86awg4v8y7gA63wuCFf9mXc0ji8P+7g1uPXtOfMAyor+ssZjouMiOo3XKCiE2xIhMAAAEAASURBVBAAARAIBwL2Fl5X7Fsu3kHBYqq1Qqq+zug2Yw0E2nBAILvw9/enBw8eULp06ShevHhh7nbVqtUUEOBPrVu3DnNbaMB5CUB4dd5ni5mBAAiAAAiAAAiAgDkCW7e709HjJ6h3j+6UJIljjYW8b92iXbs96O69+1Svbi0qXLAAhWf/5hhE9fyrd32p3ZRDwtI1DmVLm5jOePlI8XXBb6Wj+tTCdfwfPgbRA583lCJxPEoYz3L4yRUeNylxgthU+9tMYRrjk5fvZP1USeOHqR1UBgFrCEB4tYaSHcoYC655cmY1aPXydW+DawiwBjjC5YItXU+2riv6slZ0VcOKQUWWbIoUlq/s4qL2kN3iRZSR+jYJn7h5B88/ov6LTtHgFgWpapH0CorDj47ot8uso3TuxnOTY+9SNw81K5/N5L3A9x/pJ7Ho8n74Wljvx6Ido6qaLMeZHuce0phV5+jNuw80v0cpypMpqdmyuAECIAACjiRgb+G17vBacrjWiq5qbo4QX3ft2kWDBg2mN2/8aMWKFZQvXz7VnXa8d+8eDRw4iE6dOqnlFSpUmEaNGkmZMoX+D7rGjRvT06dPycPDQ2sXJyBgTADCqzERXIMACIAACICA/QgMHDKCVqxeIxvcseV/lCtnDpsaDwgQeysNmtB1Ly9ycXEhz5NHbaqPwiBgjkDl6nXoprc3LZgziypWKGeuWJjzWWytUbch+fn5kWsKVxo+ZADVrF6Vwqv/ME8gnBpgIe7CrReUOlkCypvZ/P7cjYev6Lz3C7p27yWduPqM7j19Q60qZ6eONb+ibrOPSfF1y4jKTut2+KMQSS/deSnF5owpzYeLtGbP0+eVP41fc54OXXisPeWsaRNRj/p5qUguVy1PnTx/HUB1h+6meqUyU+9Ghn/Xez14RX8sP0te919R13p5qGk50/u2LNyu2HuDuC1OvHf7Q+Wc1KJidooRQ/WEIwjYlwCEV/vyNNmaXnRlwZVFVXOuhfVlIb6axOmwzDMdmn3RvbC5ztntcKF5K83dDrd8CK9hQ910tAcFfgiS1rvGLZXNl5qK5k5pnC2vF7lfo4U7rstzc8LryzeBNG3DBdp56oHWxjwhvH4N4VXjgRMQAIHwJWBP4VWJp7aKrmrGqj67Jh4lLF9Dm168eEFjx46lrVu3ak0sX76c3NwMwwI8efKE6tSpK4XZVq1aUY4cOejq1atSpE2Y0IXc3XeIL9BD5zUFwquGHicWCEB4tQAHt0AABEAABEAgDAQOHz1Grdq211rYvnkD5c6VU7u25mTazD9p2szZsiiEV2uIOb7MzZs3affu3TZ3lC1bsBBTuXJlm+s6osLJU6fprOd5atG0CcWPH3ZvO+bGuGbdBuo7cAj1/LUbde7YnmLGjCmLhlf/5sYVmfLdT92nP5ad1YbUrEI26lInD73/8FGKrOdvPacbD17T8StP6KVfoFZOnWRO7UINSmemv92v01vhgXD32GrqltMdfcWeZq3Bu6hc/jQ0qu03IeZn7Z7nG//31Gz0PimAViuangpmS0GPhfi9ev9NaaAy8eei9G2eVAbtK8Oboa0K0neFgw1+2Fp2uRBS5229qpXtKgxmmpowmFmw45p8RuwSWhkMbTl2VxrPdKiZm1pXtu3DHK1DnIDAFwhAeP0CoLDe1gupfbu0MRBczcV45fxxsxbLriG+hvUJWFf/6b6ddL43uxgOfXKbKFwOl68S+gbsUBPCa9gglu21zeQXVJZa5a/e2kw4SMXzpCReiNwWrpeNLV793r2n5mOCFxZ1SmSi2LFi0IZDtwnCqyWyuAcCIOBoAvYSXvVxXTcN/TfUw1YWs6F1Ofz69WuqXbs2+fj4UKNGjSh27Di0atVKMiW8LlmyhCZMmEDjxo2nGjWqa2OePXs2zZkzhyZNmkRVqoTunQ7hVcOJEwsEILxagINbIAACIAACIBBKAm/fvaPqdRrQG7+3VKVyRVq5Zi3ZKrxevXZdtlGuTGl68fIled24CYvXUD4Pe1VjwTU0oqu+/59//pmUCKvPd9bzKTNm0YxZc2jz+jWUL28eZ51mmObFQmKA8GDXpFxW+k9YsV68/UKEvklG54UFrN4ZYiLh4ra4EAOzC6tMtyzJZZnZWy7T+oO3tf57CUvM+sIi01mTJeHVlj1PtogdvPg0tayUnX6p9ZWGi61pO0w9TJUKpaXhrQtr+Xwya/NlWiksVtcMrEBpUySQ9/os+I+OXHxC+bMlp8qF0tHUDRfJlPDKHgprD90lLZrZFXTc2MEfILx+G0g1Bu2iTKlcaHk/x1meG0wEF9GOAIRXBz5yJboaW7mysMr39O6FjcvwsMbP/keWgfjqwIf0qelLQ36jR9v+F6aO0tSoT1+PmBKqNvjFw/+OXHpMyVziyhdHu2q5KF2KhFp7/CJbKL7SOX71qfwqh7/UKZU3NbWtkpNixQz2i2BKeJ279Ypo9wkNEa6A52+/SqeuPyPXxPGpXfVcVKFAWvny2nT0Nt1/9paK5XalDsJNhi3ub9WXR3pXw+yCYoMINL/95D26fPulfJHx4qVz7TyULFFcbU6/zjlOqZLGoyrii6V/dntJN7/80uzRIC+lSZ5AfLl0hQ54PpLlC+dMQT0buVHShHHktep3YPMCUvDkwPZ3nvhRxYJpqVrRDFRasLElvfX/QFUHuEsuzNSa9DEoiLrOOkaeN5/T6oHlaciSMyaFV3Zl0WbCAerfLD+V/Do1zdt2lZbs8oLwag1klAEBEHAYAXsJr8pa1RZrV66TP0t+g5iuAxf3I09vT7KlHT0cFlwbNmxII0aMoHLlytH06TPor7/mmxRe9+zZQ7dEzCO2do0TJ/i9wm1t27ad+vbtI10QNxVfoVtKr169oiVLltKePbultWyZMmWoR48eNGDAgBCuhm/duk0zZkyns2fP0qtXr6UFboMG9alWrVqyi71799KsWbOobdu2UjzW9ztt+nQ6sH8/TZ06lTJmzEhs1Ttt2jQ6duwY3b17l9g9cpMm34eop2+Dzz9+/EgrV66kzZu30IUL5ylLlqyibkHq2bMnJU+e3KD4wYMHaePGjXTgwEFKkSK54NpIjDkfTZw4kbp37y75qgqe4ot9Lnv48CFhQfyGypYtK8TsGlSqVClVBEcTBCC8moCCLBAAARAAARAII4GJU6bT7LnzaebUSXTx0mV5bovwyuulpq3aElsF7tu1nbr91tuk8PpcrMcmTJpGh44epTt37lKRbwpTy2ZNqH7d2mGcAaqbIqCE19GjR5u6/cU8Xp+zxaslq9c7d+9Rxy7dqblYVwcEBtLylavpsfCSU7F8ORo2qD89ffaMxkyYTPv2HxBhSTIKt73VZJzWWLGChRweBMdUnThlBp06w2v+V1Qgvxs1adSA6tUJXvNzmelCDN3uvpNWLvlbxnjV9xtD7O2tXf8/OifW1/ny5qWunTpQtSq2Wer6+DynVj+2J26X3QzzWF0SJqQhA/tRieLFHN4/zzEqpBciVFsdEaqtUI4UNKPzt3IPc/K6C3LovC+ZLU0iypUhCeUTe5m50pv2hNRpxhFpGWvKStMUA7U/20+EhlsoPOexcKh3j8v7wnvPPqCjl59Q5tSJqNTXqej7slkpUYLPf69yu3vPPhTjvUWnr/tQetcEVDB7CilicqxUTvs8H9IiYYX7q3Dde/DCI+L9U56vW9bkcl82RzrD2MKXhei59cRd8hDtJogXS+411y+ZmbIIBpxW7/cmtefK1znSJ5be+1SIO1v2PNcfukVT1l+kkW0LU/n8abk5mQKFlXGlPjvkfvjsriVUtjy2nXRQ7vuuH1xRy+e8qt+klxauJ689o17zTpgUXnls28Tcvs6cjAqLZ61P7PWQuRgbz+jL4BwEwkIAwmtY6H2h7k89R8gSCycPMSipBFXOZMFVCbB83qfzD1pZveWrsbWsVggndiFwtG5Zevfgbpjaip8uI5XYdMDmNtxPCtcWwh89pzJuqUVg8bfSNz27rJ3/a2nxsnURm6VB1Ea8VDiGaB4RcyC9+MKHX8QcJ7RW8YzUr2lwPFdTwuuIZWeke9tUyeKTa5J4lE4ImvyS5sQuIvYLYZOPr4Wwe0q8rLjftYMqiqDlhi92WcHEDyWA6oVX/tJo3cFbxH0Wyp6cLt/xlaIoLwiW9i1HcT4tTKsNdNda5IXCe/ElEsdKSC7E2Yziq6O7QkgtKsTga8JXP8+dxdfpnb6VdVS//HUSC658L7ZYpHJ9TlM6FjPrGlgWMPrBAd2bjNpHvRvno7SC0WkvH4ofN5Z8MefNkkwbs74aL0zGrPSkTrW/knEBfp522KTwyi4wmK8SjSG86iniHARAIKII2Et4VYKptZaqSqjleestZJXlbGjdDX/48EFucCRLlkwitSS8mmLOsby6d/9VCogbNmyQ7odNleM83pT79dcetG+fhxAmU1CxYsXoxIkT9O6dv6zCbsNUjNf79x9QdbExw6lkyZIUN248WY+vR40aLVwe15ZCbaVKlaioaGfhggV8S6ZAselTtmw5KX7++++/4j35nmrWrEmPHj2iatWqyb5ZROZrjmnLAqy5xC6Y2fo3TZo0VKRIESG+XhTis7cUc1k4VQL0UbGB2KFDB9kMj5fTkSNHpFDL5dWYOZ/dM7OFLycee/x48YhFW05sOQzxVaIw+QPCq0ksyAQBEAABEACBUBO47nWDqtaqR+XLlaVF82YLASxYhLVFeF0jRK++AwZT3949qWP7H6n+981DCK+8PitfpSY9fPiQatWoJuNnuu/eI6//GDZYCrChngQqmiQQHsKr+v1Jmzat/JuiQrkydOXqdRnnt5hYO1+8fJnSiXt5v85Dhw4fpWc+z6QbXxZHOd27f5/KVgpe85cpVZLiiXXx7r0e8t6kcaOpQb068rzfoKG0eu16Onlkv/z4Ud8v/05VrliBbt2+I/vlCutWLaPCBQvIutb8eOnrSyNGjaUzZz1lLFkWjpMlS0o//tCK3PLlJUf3b80YI0uZFuP2053HwQYcHOf18Yt3cmgthDUmC4/pPllXmhtvo5F7yVe4IN45pqq5Igb5an+W9zz933+gAsLNbs1iGaUBye7TD2jY0jNyT7aYCHN2S4yL90HZu974dkU1gxsWEUeLfUjeXy3rloZui/1QFnC5zbVCmGSLzs1H78gYqrwny/usLC6fEXucLELynu/6wZXIJX5sObZbj15Th+mH5f4y98UWoizocnsLe5WhlGIfmcfGYi7vJXM+l8ucKhH98F0O2YYte56Pnr+lxiM9pOHP6B+LyH1XboT3kHkvuXv9r6XYLBsWP14Jy9SawjK1RrEMNKDZ5/8OOE6sEpqPX3lqVnhV7RgfTwqjpB5/Hqcq36SjIS0LGd/GNQjYhQCEV7tgDNmIsnY1tlbVi6l6QVaJtMYCq2rHWJQN2SNywkLAo1h2UT0oLE2IujGowokbNrXxULxwvhcvHH7xLe1TTlh/xpf11YtUBQ6/ft+XJosvgr7J6Urtq+eSZd6LWKQNR+yRL86946tL97WWhFd2ecGuLzgdvviY+i44Kc/n/VpSfvnDFxPF110bheWoLaKlEkCV8Mov6V7zT0jLXX4psnjJiRcQ/LKe070k5RNCJicWXlk8HtaqEFUunE7msbuOFXtvStF2Uc/SMjA9u/5oN+WQXHS4j64qv8BS/XKlqb8U1wKwXxKuQTpMOyKZbv2jirY4kY1b+KHqKSFXX7SE+MpstIhhEOeTSwq+xy//xmKRxdbDi38vI4VZc8Krvi0+h/BqTATXIAACEUHAXsKrchGsF1HNzUcvupoSam1py1wfKt8a4ZVjva5du05srvhK606+HjhwoBRDVTumjps2b6ZBolzx4t/S3LlzKFasWFKM/e2334itV1mMVcIri5rr1q2nNm1+0L6y9/LyogYNGhBbybJ7Y05dunaVlq0spKZMGRxTXImgLAi3b9+Ojh8/Lo7tqV27dkL4/VXWe/78OfXr14/Spk1Hw4cPk3nGP1hU/qVTJ0ohLFtHjhwp4kkFrzf69OlL27dvo6VLl1KBAgXkHEqVKi1j36o8buuy2Ghq0iTYAlgvvLIIzFa3C4RYzOIzJ46/Va9ePXnOVrkJEgS7g5IZ+KERgPCqocAJCIAACIAACNiFwA8/daCDh4/Q3h3/ig/GMtssvLJgVaZiVUqdKpVwT7xefpRmSnhVMWR/6dCO+vTsIcfOVoY9evel9OnT0diRw+0yHzTymUB4Cq8c03fj2pWUPVtWYpG9aasfhYh5lqp+V5lmTZss1v0x6e69+1SucjUqVLAgrV+1VA507YaNtGrNOmr/YxvNSvXadS+qVru+9jEAFzQnfPK9TetWSXGUzxcs+odGjZtArVo0oxFDBnKWTWnKdOFqeLawrt0iYhznzKnVDa/+tQ4j8cmZGz40QsR4ffJJcE0nxEz2hvdCCJScLAmw/oEf6Lt+7tL6828hUFqTlPDKnvoGtyyoGXg8ETFOG47YK8XUud1LaZ4ClVFLz0Z5qUGpLLKLzjOPSq97W0d+pxnMsBXpLrHfyvFpeb9VCa+8v8l7sEmE50AWR4f8c1oa34z4obAUe7nBFmOF+CzE2z+7lZAWsZyn3AHzOLksJ0uuhmUB3Y8v7XnuPvNACMOeFC92LGJDF94b9xIGN43LZqGOwguj2kfmJtno6Pf5/wnRNb8QXzPqevl8aq3wyjF9ea43Rci4feceUQFhKDT2pyIax88t4gwE7EPAWHiloHBOp6/eDfJ762/XXt+8Cwjidh88fWnXdm1p7MffhgfxP+MkrFtlvojhanCLr7k83zdO5toyLofr0BPYWzRb0N6iWcP4L5vNAxAuJILK9NwatPHI7RB1hdvhoNPXn4XI12dMWX9B1hcvDpn94rW/vB676pxWbPjS0zLP++ErLS9AqLbc70+TD2p5fOJxLng8wvWwli8WAEGm/qkCBzwfyrZ2/HdPZZk8qrluPX5Hu191wI4g/icserUkXCHL9v7cclnL45OpG4Ln6v0oeB6qX7FwMSjHF5PWnZdtiJepvMfzNTUH4VJCq3v44iNZp8GIPUFHBfu3/u+DvB74BvWef0LmTxas9UmxP3HliZbdfuohOR8tw8yJcC8i2xTxI8yUQDYIgAAIOJ7AK/+PQfb4V3FAjSD+Z9zWPPelQd3n9dHy+VqVPXLlrJavr6fu6/NCez5qwrSgbF+5BR05ec5kX9zu0VOesgyX43/tOv8adOLsRbPl1Vj6DB4hy5+77GVQ9sbdRzK/SMlyBvmqnv5YoWrtIH25dZu3y7p/r1ir1VX9XPK6LfN2Hzgmy7T8sWPQ7YfPtHL6dm053/Cvu2xv6eoNsi3PKzfk9e8Dh4Vo+49xk+W95Ws3yXtqrt169Q9RdvzU2bLsoRNnQtyzZXzOXPbeU/utAYSwHvTw0eMQ/3yeP3f8/0jQAwiAAAiAAAhEAgLbdgSvaSZPn6mNZsLk4LXglavXtDxLJ8P+GC3XLwcPHdGK1WvcLMjtm2+1az4Rwqss16Zdx6Bnz3wM7uHCMQR27doV1L9//1A3znW5DUtJiKTyuf7aq69BsWkz/5T5e/cdMMivVK12iN8NgwKfLrhc0ZLltVt9Bw6R7YkwKTJP9dv+l65aGT4RVq/a75nBDSsvJk+bKetfuWb4+x9e/Vs5zEhR7Mqdl0Hlem8NajflYFCg2D9cutsrqPaQXXLfjvdOhYFI0P1nbwzG6nXfV94f+PdJg3xLF2p/VhjXGBRTe7HG+6oiDqnsg+up1HnmEZm3X+zFftBvpqoC4rhJ7DHzuFfuu6HLDQra/t9dmb9sj5fMf+b7Tl7r95BVBeP9TWHoI8sOWPTl+X5pz5PHUUfHl8fK/6ZvvBjk9y5QDUEeVVvCMtcgX39x7PITWX+lh+F89WX4vO+C/2Q51R9z4r1fJBBwFAHjv/ljcEf20XSta+XMtXuUO1MqYZ0W17oKVpR66x9IV24/FgGXE1NaV9N+2K1oJkxFlDthe1m8GrcTpsGhcggCEeVqmP37L919g/RWpyEG9ymDXV64n7pH7P7i+asA+bURf6nDaUX/cpQxpQtZsnhVlqKfmqOyvbZRybyppMsKlae+EurzvRvVKZGJOK5s9YE71W2DI7vS4K+QlOWpsnjlQu8CPoivre7LsbLbC/4yil1kcOI4p+xKgxNbvHLcgvm/fo4Dd977OXWacZS61M1Dzcpnk+X4h/piammfsjLOgOpXHw9BFd58TLjVWH2ehrUWlrQiyLr6QkrdV0f+Eoy//uJ0Q3zxxG6XOe5t1k9xDDif59JklAef0qbhleXxxoNX1GbiQemieZSwhFUJFq+KBI4gAAJRgYCjLV6VC2J2HeyWtQCt8FgmsZiydFW8wtvilZe9bA3qKywcDh8+TIsW/U03bnjRuHHjRZzS6mpYIY5NmzalS5cu0ZkzZyhmzJgG90uUKCksSj+7Guab58+fJ7HZIyxHr9DLly/FvxfSUpTvnTt3jg/CTfE7YUFbXLojnjt3LrHr5NKly1Du3Lnpn38WyzL+/v7UseMvdOpUsNeKb74pIsqUFu6Ha1CGDBlkGXM/uP2tW7fJOLMc35ZjxfJcOXFc3Pr169POnTupV69e0uqX56hP27dvpz59+miuhg8dOkSdhBUtl2/Tpo2+KLFVAlv/fsn9sUGlaHZhT4tXj/0H6acOnUIQ1FthhLiJDBAAARAAARBwEgJvxRqncvU6FBgQSPt2b6OEn7xt2OJqWIizVKNuQ2nVOGfmVI2MKYtXXo/98FNHOnEyeD3GbmjLlytNdWrVpEwZLa/HtIZxYhOB8LR47dzx5/+zdx5wTV1fHD/ugVoFF5S6tQ5ctVr33nvVVa1aV1tH3fp31r2to1pnq3WPauteiLj3QquiKKg4AScKivK/56Y3JiGJIQSJye9+PvBe7rvze6PvcX/vnEP9+/TSjm/ewt9p8rRfaOPalVS0SGFtvrHvBsdm3b5rj/g74TI9Fs/8HAuYYwBzun7ZTx5NWZx269KJBvXTWFDLguJXrvyF6SsRl3XVn7+rLIuPsbV4tXX/Fg/UTgqOW3WedpwM1nrUixIxR9f4BtJq3xtGLWB5/3DoktP0jXBL/H29zy2ahbJ4NdyfXbTDn5bu1vxdxuHldNPlm0+ke1+1H3lIeDAc/J8HQ3b7yx76Kol9zFKfZ9Ra0CqL1yldvqTS+TNpm/O//VR6E+xcJy+1r56H1B4wFzDWL+dvGFFFeme0lcWrcqmcI2sa6i+8MuYR8XNDhMXvmv2B0lKXQ+Hp7rN+P+sI3bj/jHaMrUmJEvGIYiY1jx5iH7mlzj6yYUm2+uV1vRr8lHxEPN21Yn15HL/3Kafn4dCwHj6DgLUEDC1eIbxaS9Kgnq5LYUP3wUqU5SrsQlhYucrafK4b45UzlQtiXbfEsjB+2ZTApRF96P72v+PUZpY6janA6F9i1cavmy7JG/li4VI3nwjabioFCKFPvNUk3fKyH38WDNOmTk4bhA98TvElvHJw8z/3aG7+hmPjm3TSJIliCK/slqPXvGPEDwfsQvnLvBkpgwjwzu6N2X2HrYVXXZcbaow7TwXT2JXnaWjrIlT7y0/pdkg47RSxdA1TZuHamQXm96Wxq87RzpN3aN2wyjL+68DFJ2UMBe77MyF4qzRl/QW6E/qSpoqHG56zqTVVIvKC3mVlQHpVH0cQAAEQ+JAEbCW8KoHVmKCqrql5GSujrsU1xqtqRx0tcTWsyqojxyf98ccfqVix4lqxU13TPTZu3ESKlueE27FEBn8B1qhRQ7olU66GN2/eIoTMIbJ6oUJelDNnThlniV35clLCK5+PHPkzbdy4QcSA9aVr165Jl8I//zyKmjZtwpdl4s0+vr5r1y4RT/WQdAvMF8aMGaN18asp+e73ixcvRFudRVzXC5Q6tQuVLlOa3NzcaL9oh+PDKuGV49SyG+Pu3bsLgbfbuwbE2Zo1a4ToOk4rvLJLZXZ3PGTIEGrVqpVe2f3791MP4Tp5wIAB1K5dO71r+KAhYEvhNVAI6Rs3bYmB1j1LVmrVolmMfGSAAAiAAAiAgCMRWPT7Uho/eSq1+rq5jLmq5rZ63Xraun0njR45TLqNLVv6K3UpxrFTt+7k47tfunTNmSOH9vqQkaOkcPbHwnmUUTw7FSqYX17j5zFvH1/atmMX+R44SOHhmpfiJ08YS82baEIuaBvBSZwJfAzC68Z/NlO/QZpn/iKFvSh3Ln7mT09/LF0m5/8+4dVQ8OVKH1J4tXX/cV70D9zAbuGGlt0Ot6iUg3o2LKDt3ZgAywYbpwNCZXxXz4ypaXT7LyivR1ptHVMnpoTX2WJvmEVAFj/zuOu3c0EY35QSMV97Nno3ppsi/iu7zRWeEonFVE75PNOJsHGlpFthJbxO61pSCrJqPP5CcOw0/RAp4fXAhfs05I/T8nL9r/Td+HK/XmLvuVPtfDLOq62EVzZY4f3ijSOrynbV2PjY67djMr7suqFi71XE1+X95ZpDdsnQdByizlSyVHg1rK/Gohu+zrAMPoNAXAhAeI0LvffUVQKroaDKoizHblWCKzfDZdiq9fPcGp/tnKfqw9qVacRvCvHdTRf6d41TJ15TF1DGSjVi1QbfKMeIG7uutahqgGMKJBHCZtpUyUjFPTX0aT9+9XnafiI43oRXNRZzR2V5quag4sfWK+VJA4TlbJLEmleSVNxaWwuvKg6u7hjnbb1CK/Zep8XirSV++LAk8dtqZ8SDU5squbSxdlW9AYtO0tFLD2nn+BqUOkVSbQwEdd3YUTcWguF1CK+GRPAZBEAgIQjYSnhVcVvZsnVc+4kxpqLEV3OiK1dS7bSu/A21rtQmRjuxzTAnvM6YMYNcXNJQly6d9ZrlOE4lhNWCp6ensA7dpndN98OwYcNo06ZN0kI0S5Ys2kvPnz+nsmXL6sV4bd68Ofn7+xPHemXRlRNb2lapUkWeK4GWPyjhk0VUtqhduXKlEFcPUrp0xu9lbBXLG1H9+/eXgurhw4diWOByu75iI7Fnzx7UWMSVHTlihIxJy/k8puHDh2uFV7b85bizLBAvW/YnJU2alItJ61u2tD1+/JhWeA0ODhZWwXXoaxH7dbjgoZuWLFlK06dP04v9qnsd5yJOkogr7OGm/0Y7uIAACIAACIAACMSewM9jJtCfK1aarchxO/1OHTVZhi1mbwQGmrzOF+rWrkW/zpgaoww/j+3c7U09evcTz5cudO7EYaPPYzEqIsNiAh+D8Fq3UTO6fMWfdm/bJEVXnhw/85cqp3nmP3F4H2eZjPFqa+Ezthavtu5fTvYj+sXe7tgrX15hFLOod7kYI1cC7Kp914W3wdfyOltLsne/bJldaMWgijHqGGaYEl7Z0pYtbtnSky0+Y5Puhr2gWf9cEkYxD6hf80LUuEw2bYzX9wmvwaEvqNV4X2pRUYjNOsKusf5tJbwyY07GLFh/2fivNDCa+UMp+iKPG526Fkq9fztO/cW8Gol5mUrmhNdzIo7v3nP3qIEQltm6Vjct3XONFm2/qvWWqHsN5yBgCwKGwitivIq7oi2Tit1qLH6rEF5lTFc+6ib+rOoZxoLVLYdz2xI406Wl1TFeua41iX3Us2/5FuN8otl3v0oc25Xzhy89LbNUHAC/G+/ih9x/9FLGE+Vytx5qfN2bi/H6IkLfbz3XG7DohOpSHpVffPZzb2lSsVZVLAIVS4BjIqgkgs5HqzgEWw1ivHLcAN3Ec+SxiYcZ3exo5ddfxapV/XJZFcuVK9wVcRc4biznR7zSn7NegwYfVHvCClnvCsde4La6zTyszT95NSSayxv+cHxYLsv55uK3qrmYK6PtDCcgAAIgEE8EbBU/k+O1vi82q6mYrrpjUG1YUla3nqlzczFeu/XsK2MecYxX3fqbdnjL/EHDx+jl65bh8/l/LJfl+g4eoVduxNhJMl83dmuh4l9F88+Dx+Hastu998cox+0+eRkl47626dBFHnmcun2v/XtbdO+BQ6MPn9SPW8vxYjlGbZiI9a5bXp0vWblOXp/52yLt9ZCnL6ObtW4v85et2aDN5zi33Fb3PgOjDxw7Hc1xWn8aMETmcb6K8fo04o2cF+fpxroNuhsix875wQ8fa9tVY8FRE1vZMN5LPP0zR7MgAAIgAAIg4PAELl2+Er1rz94YP11+7CWfXxYv+TNaNz4nx2UVlrDRwkpVy+bQkaMx6nObZSpVl23w+bnzfrL8pi3bovsPHhZ99tx5bX0+4Vie/PzDsdeRbEvgQ8Z45djAuum3BYvluhqut2H8X44FzD/hL97FAuXvHX8nLInxatgvj4HrtmrXUXc4Fp/HNsarJf2L0CXR23fujr579552HIGBQdG7vX2io6I0+29RIkbqXh/faI5d+7GlsSvPyT29k/4hJofuffaOLPObiPvKiWO88j5g4P1nJuuoC2pv13B/VsWL/WH2Eb19zIdPIqJHLjsT/eeea7IJjkc6btW56IXbr+jFd1UxYlf6aPZR1b4s7/Hqpiu3n8ixLtmtifv79m203D/lPdTgkHf/HwqROfqXDRejhVAc/Uacc+K+eZ6tJ/jqNmn03Nye54g/T8t2fM7d1avLc1VxX1Xc1T92XZVlmY+5pPayjcV4VWw5Ni3PSyXeL+Z9Xp7Tg8cvVTaOIGBTAoZ/88PVsC3kbIM2lOUqZ5uzXjW0hDW0lDVoFh9tTODZ5Qt0ql1D0WpswxwnohLLNlHa/F5WjWjJ7mu0eMdV+iyTC1Uv7k6PhKUrx3J9EfFGxh9lt8LKWpTdDNcr9Rm9inpDm47ckmW50/hyNWzJhAwtXtmtb+sJ+6Wb4Yals1F6l+Ti7aK7WvcXtrZ4ZXfGKZImoRolPCixcPfoffaudGms4tRaMgcu8yrqLbWbvF+6Cq5aLKuMg3An7CWt3X9DrsXkziWoTIHMZptDjFezeHARBEDAzgjYyuKVp6WsWq21VrW1tSuPyZzF68mTJ+m7777jYtS2bVvKnj27cO0bINzprpZ5q1evpoIFC8pzY784XirHNWWr1LJly1Hx4sVEHNeLwrJ0nyzu6upKypKVLUrZsvTLkiWpcqVKxJaiq1atilFO9TNnzhziGK+cZs2eLeuoa6dELLGOHTtKi1p27+sh4rqyu2B2O8zWrKNHjVJF9Y5BQTepQYP60iq2uXDD55ohA+3cuVOOnwsqV8N8zq7yeG5spasSuydu06YNLVq0UGvxyteUa2aeb7169ShZsmRyLLdv3xbulYdRy5YtVBM4GhCAxasBEHwEARAAARAAARsTmDx9Bs1bsJh2bN5I+fLm0bbetftPtMd7L/3QtTMN6PuTNt/YibE4nsdPnKJW7TqQm6sbtfumFXl+6kF79+0Xbod30tfNmtCkcaONNYW8OBD4GCxeB/xvOP218W8Zk7V6lcp063aw1hKbvyuOYPG6dPlKGjV2ArEr5b/Xaf6eKVu5Bt27d49mTpss4hzXIR/xb6HT992l9ffZ44e0nnbisPwfrKopd8NqAGz1OuPvS/TP4ZvSIrNx2Wy0eKc/Bd0Pp82jq8n9T1XW2NGUxSuXVZ77cguXxezKmOORcj+8Rzyh4xdU3ktjCavc8Zb3ykzlCmamZy+jSAipct9S7Q1b6mqY+1VeCzleLO+ruqZJod3DbSncLvfQcbvcd/5xOuEfKq1y2TK3VolPuYkYyZyXv7MBYdRz7jFZp9aXHtLCOOzpK7nvzXNtXSUn/Vhf49Kd5yrEYto+pobwIpAoRj8qw5zFK5fpt/AEHb8cIsP28biF/Eo+wgqW3TTX+MKdRnxj2o2x6gNHELCGgKHFK4RXayhaUIddC/OPbmJhlV0Ls+DKSVi6yiP/MifQagvhxOYEQvbvoQv92OWwpeJrIvKaJlwMV6xu9VjE//f0p/c1GUP01kNNXBAWYdlNb/HcrrJdvin8sesaLRE/KvENkG88q3xuCOG1EnFcgScvXlP94Xtk3FIWHjmNWXmOdon4prsn1KSUyZOo6lSh33YqVygzTfyuhDZP3awGtfCi+l+9P/YpVzx48QH97/dTNPybolTzCw/ZFscJmLLuglYY5mDv7Hp3wmo/0nWXzC4mcmZJKwVmNYiLIo4AB0/v0UgERa+YU2WTCjbP7jvYjYfql+cpLEdpy7HbsiyL0yz4coD72KYn4a9otODFN2SVMqVPSX2bFqLygtX7kqXCq5rLQhHjNf9ncDP4Pq64DgIgED8EbCm8qvisPNLYiq9KdOW6m0Zu5YNN0q+//koLFiyQImehQoVitOnnd4HGjtW49FUXObbr8OHDKG/evCrL5PHOnbs0ecpk2itc/XJi8ZEFzFmzZtGTJ0+kG2LOZ/fDw4T4qsqxiDl06FBavnyZjK+qBFouy4ljuzZt2lSes0CcPHlyea5+7dnjTZMmTZR1OY/bYzG1V8+eMcqqOnzcu3evHF9YWJjMZpfCNWvWpBHC9bBhfNi3b9/S1atX6cyZM5QmTRrpfpjdIPfr14/Gjx9P9evX1zbNou/y5Svo7NkzMi9fvnzUrFkzat26tbYMTmISgPAakwlyQAAEQAAEQMCWBIQFH/22YBHt3PI35c2TW9v0rDnzaMbsOTR98gRq3PDdM422gM6JMeGVL+/YtYdGj58kBSf+zC6GW7doTv379DL7PMZlkWJP4MaNG7Rw4ULKlSv2+zzc2/Xr16latWryx1Tv128EUvU6DajHD92o7089tMXmL/qDJk2dTv+sX02Fvd79TdG0ZVu6Kp7blQvrZ8+e04D/DaNd4lmdE38nRo8YSr8vXS6+J/e1wuuQ4aOI4w+fOnqAMogYsKb65TY4xmsZEZt4xZJF/DFWacbsuTRrzm8xvv9x6f/AocPUvlM3atOqBY39ebgcz/c9ess5b1y7kooWKUz+V69Rs1ZtpThrzbhjNUkbF1buht1dU4u/aXNS0VyulCNLGlJuhlf73iAOC2eY2tfIQ51rv//vR1P7s9ye2vf1PnOX1N4w72/+IERIFmJV4n3fmRsv0u7Td1WWDLHWr1khKpgtvczbcuwWTVp7gUy5Gu5SJx99W/3d/4nc57oDgcT7sZzYwIXDsLWtmlsbPo7zT10NpXEi5N3DxxFUJFcGmtO9NGfHSO/b8wy4+4ym/XWR/G480tblubYWfbKrZE5sHFNt0E4p8rILZnNJ7WUb7iOrOq+FYD5fhKTbLLiwkRMnFpobir6+q5nHrKir2sARBKwh4NDCq7tbOsriqh+U2hpItqxjTIDVbR+Cqy6NhDlny9eA6WPp8RnNGzimRpG++FeUu+8wqy1djbXLwl9yYb2ZKsU7gVS3nHADQaHPImWw9BTJjJfRLZ/Q52FirMmTJqY0qZLF+1D4ASk8Iorc0qWIc18vIqPohojTkOmTlJRZCK9IIAACIOCIBGwpvDIfa8RXXdE1toKtrdYkMjJSboZkzZqFUqSI/T2E63Ns1IwZM1Ii4XnBVOJyLMi6ubmZffNbWeOyJe7AgQNNNScFXba85X5jk0JCQuQ806Y1/oy8Zs0aGZeMxd8kSd49a/Ts2Uta9PL1AgUKxOiSLWVZsDXVbowKTp4B4dXJvwCYPgiAAAiAQIISEG5RtbHs4zIQFtsiIiMoUyyfx+LSp7PWZatXFmCtSSy65sz57sV+a9qwtE5ERCQ9Fs/8mTKaf+a3tD0u9/r1a2LR05L0qYcHfZ7v/SKgJW0ZK2Ps345hHsc91v07wlg79prXfNw+ui+836lUtZg7nRaxRpXgykYwzcrnIP/gp3T97lMqIMTO0vkzqeI2OXJfSZIkorRm9lLjY3+Y91RFqDghSqYQf9eansprIYry371JxRjjkoRXarr36KUUQV1SJtVr6kLgI/ph9lHq1bgAfV0hh961uHwQLo3l2DPaYO84LuNAXecgYBfCq2em9JQxvYvNiIc+CadbDx5Tbg83Sutiv6KJsnTlibPlK5J9EQjx3U0PvbfRk7MnKeJusBxcSvdP6ZNiX1KmanUpY6Ua9jVgjAYEQAAEQAAEYkHA1sIrd60rpPJnFlMLZy9MXjkK80eZWKBd5buC/MRRpfHtJ+qVUfnOdHz0+DFd9b8qLUqvXw+gHTt2kIfYOPnQaciQobRly2aqXr0G1a5di0RcHzpw4IDMy5Ili3RRnDhx4g89LIfrD8Krwy0pJgQCIAACIAACIBAPBFhwtWViEfZjSyEhoVSqfGWLht2+bRsaOex/FpVFIX0CytVwBeGOtkjODLRgmz+lEp4DnworUyW4urum0q+ET/FCYMXe69L98qI+ZelzT3gKjBfIaDTeCSS48Hr+2h1ptp7bM6Nwgxp3q7jIV1F07XaI8IX+lrxyu8uYj/FOER2AAAiAAAiAAAiAwEdGID6EV4XAUIBV+YbHwkKQbV3pG6cXXZmLbmxXjgv79ddfG+L6IJ8fPXok4riOk7FadTtkN8zTpk2lTJls+0a3bh/OdA7h1ZlWG3MFARAAARAAARCwlsCQIUPeW1WJqZaItBw242NLbFF65Nhxi4bt4e5OuXN9GOteiwb0ERXiEG+/77xKo74tRlWLulO7KQcoUHjDWzu0MkFw/bALOWDRSTp66SHtm1Jbz93xhx0FegOBuBFIcOH1aXgEBd4JlRE1WXhNKtySWpv4jfyXka+JDd1zCGvXdHZs7WrtHFEPBEAABEAABEAABGxBID6FVx4fW7b6BfmJ43k961YWW71yFIlhCWuLOX3Mbdy8eZMePHhA2bNntwtxMzQ0lILEmDjlEq7Z0osYVEi2IwDh1XYs0RIIgAAIgAAIgIDjElAuhjlOa1wSx4dld8NKpI1LW6jrmARO+IdQ3/knKJMIOfapW2o6GxBGNUt40PA2RR1zwnY8qwcijizHvc2aARbGdrxMGNp7CCS48Mrje/4ikkKfhssjBzy2NiVLImJJpk4hYjy6yKO17aAeCIAACIAACIAACDg6gfgWXh2dH+YHAnEhAOE1LvRQFwRAAARAAARAAARAAARsT2CFj3Bxu+WKbLhSkSzUvUEBWLvaHjNaBAGnIGAXwqtTkMYkQQAEQAAEQAAEQMCOCEB4taPFwFCcjgCEV6dbckwYBEAABEAABEAABEDgIyHAcV3TpY57SMSPZLoYJgiAQDwQgPAaD1DRJAiAAAiAAAiAAAjYOwEIr/a+QhifIxOA8OrIq4u5gQAIgAAIgAAIgAAIgAAIgAAIODMBCK/OvPqYOwiAAAiAAAiAgNMSgPDqtEuPidsBAQivdrAIGAIIgAAIgAAIgAAIgAAIgAAIgAAIxAMBCK/xABVNggAIgAAIgAAIgIC9E4Dwau8rhPE5MgEIr468upgbCIAACIAACIAACIAACIAACICAMxOA8OrMq4+5gwAIgAAIgAAIOC0BCK9Ou/SYuB0QgPBqB4uAIYAACIAACIAACIAACIAACIAACIBAPBCA8BoPUNEkCIAACIAACIAACNg7AQiv9r5CGJ8jE4Dw6siri7mBAAiAAAiAAAiAAAiAAAiAAAg4MwEIr868+pg7CIAACIAACICA0xKA8Oq0S4+J2wEBCK92sAgYAgiAAAiAAAiAAAiAAAiAAAiAAAjEAwEIr/EAFU2CAAiAAAiAAAiAgL0TgPBq7yuE8TkyAQivjry6mBsIgAAIgAAIgAAIgAAIgAAIgIAzE4Dw6syrj7mDAAiAAAiAAAg4LQEIr0679Ji4HRCA8GoHi4AhgAAIgAAIgAAIgAAIgAAIgAAIgEA8EIDwGg9Q0SQIgAAIgAAIgAAI2DsBCK/2vkIYnyMTgPDqyKuLuYEACIAACIAACIAACIAACIAACDgzAQivzrz6mDsIgAAIgAAIgIDTEoDw6rRLj4nbAQEIr3awCBgCCIAACIAACIAACIAACIAACIAACMQDAQiv8QAVTYIACIAACIAACICAvROA8GrvK4TxOTKByFeR5JYmpSNPEXMDARAAARAAARAAARAAARAAARAAAackYHPhlRtEAgEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAEQAAFnI+Dh9ol2yomiRdJ+wgkIgAAIgAAIgAAIgIBDEoDFq0MuKyb1kRCAxetHslAYJgiAAAiAAAiAAAiAAAiAAAiAAAjEkoDNLV5j2T+KgwAIgAAIgAAIgAAIJAABCK8JAB1dgsB/BBDjFV8FEAABEAABEAABEAABEAABEAABEHBMAhBeHXNdMSsQAAEQAAEQAAEQMEsAwqtZPLgIAvFKAMJrvOJF4yAAAiAAAiAAAiAAAiAAAiAAAiCQYAQgvCYYenQMAiAAAiAAAiAAAglHAMJrwrFHzyAA4RXfARAAARAAARAAARAAARAAARAAARBwTAIQXh1zXTErEAABEAABEAABEDBLAMKrWTy4CALxSgDCa7ziReMgAAIgAAIgAAIgAAIgAAIgAAIgkGAEILwmGHp0DAIgAAIgAAIgAAIJRwDCa8KxR88gAOEV3wEQAAEQAAEQAAEQAAEQAAEQAAEQcEwCEF4dc10xKxAAARAAARAAARAwSwDCq1k8uAgC8UoAwmu84kXjIAACIAACIAACIAACIAACIAACIJBgBCC8Jhh6dAwCIAACIAACIAACCUcAwmvCsUfPIADhFd8BEAABEAABEAABEAABEAABEAABEHBMAhBeHXNdMSsQAAEQAAEQAAEQMEsAwqtZPLgIAvFKAMJrvOJF4yAAAiAAAiAAAiAAAiAAAiAAAiCQYAQgvCYYenQMAiAAAiAAAiAAAglHAMJrwrFHzyAA4RXfARAAARAAARAAARAAARAAARAAARBwTAIQXh1zXTErEAABEAABEAABEDBLAMKrWTy4CALxSgDCa7ziReMgAAIgAAIgAAIgAAIgAAIgAAIgkGAE7EJ4ff4ikkKfhhMfX795azWMZEkSU5rUKcgtnYs8Wt0QKoIACIAACIAACICAgxOA8OrgC4zp2TUBCK92vTwYHAiAAAiAAAiAAAiAAAiAAAiAAAhYTSDBhden4REUeCeUosUUXFImp6RCPLU2Rb19S+EvX1Ei0UAODzdK55LS2qZQDwRAAARAAARAAAQcmgCEV4deXkzOzglAeLXzBcLwQAAEQAAEQAAEQAAEQAAEQAAEQMBKAgkuvJ6/dkeKrbk9M1KKZEmtnMa7aq9ev6Frtx9SlLCc9crtTokTsQyLBAIgAAIgAAIgAAIgoEsgvoXXC4F+5BfkRxcCz5OfOFepcI7C5JWjCBXOzsfCKhtHEHAqAhBenWq5MVkQAAEQAAEQAAEQAAEQAAEQAAEnIpDgwuvZq8H0Web05PaJi82whz19QTfvP6Lcwuo1LaxebcbV2Rp6HXybXt8KoqiQh/Q2/LmcfmKXNJQ0YyZK9ll2Svapp7MhwXxBAARAAAQciEB8Ca8suK7yXaEntprC1rryN9S6UhtTl5EPAg5LAMKrwy4tJgYCIAACIAACIAACIOAkBJ6Ev6JPXJI7yWwxTRAAgdgQsAvhNd9nmSi1cDNsq/Qy8jVdufmA3N3SURbXtLZq1ubtXAkI0rb5ee7s2nOcJCyBN4/C6OWZUxT18L7ZgSTNlIVSFS9BSTK4mi2HiyAAAiAAAiBgjwTiQ3hl0XXI0sHa6bJ1a+tK3+hZtipL2FX7VmjLQYDVosCJkxCA8OokC41pggAIgAAIgAAIgAAIOByBvw/fpNmbLtGr128pn2c6GtyyMOX1SOdw88SEQAAErCfg0MJrViG6ZhXiqz2lf3b6Ev+YSo1qVSL+QUoYAmzlGn6Q14ejDluSEpFL+UpOYf0a8eoNrdp3XT5IlPfKYgkcm5Thh5nIqDfUsmJOm7T3oRuJFO7PV/pcpzziAayCFdzeRkfTzQfh0n16tsxpKHlS6+NgG85dNE0PHr+kZKJN17QpDC/jMwiAgIMTsLXwusp3pbhPaMRUS4VU3Trj20/UE2g/FP7IyEi6e/cuubu7U4oUcf+/cM2atfTqVSS1a9fuQ00B/XyEBCC8foSLhiGDAAiAAAiAAAiAQBwJbNuxi44eP0H9e/eidOni11goMCiI9njvo9vBd6hRw3pUvGgR+pD9xxGV3Vb3v/2UOv1ySFi6JqOcWdPS2YAwKb4u7lPObsdsjwN78zaa7oa9kPuRqVOYDz+5at8NSpsqKdX/6jOrp/JW9Bd4X+PRMkeWNJQ4cSKr20JFELCEAIRXSyjZoIyh4Jo/Tw69Vi9fC9T7DAFWD8cH+cCWrs92bRd9WSq6qmElorQ16zi85Su7z6g/wlvc5DxpUIsPF5Ov3ZQDFB4RRRuGV1HAP6rjkxevqf7wPVSvlKd8A87SwbMouninP607EEgvIt5oq/VsVICaV8huNn71njN3aNTyc7LOgK+9qGFp/QeT1yIG9vytV2jzsVvatjOkSU7fVMv10QrcWkA4AQEQsJiArYXXhqPqyb4tFV3VQONDfN2zZw8NGzacXrwIp1WrVlGhQoVUd9pjcHAwDR06jE6fPqXNK1asOI0bN5Y++0z//01tAQtOmjdvTiEhIbRv3z4LSqOIsxKA8OqsK495gwAIgAAIfAgCQ0eMplVr18mudm75m/LmyR2rbl+9EvsfTVrQtYAAcnFxIb9TR2NVH4VBwBSBarUb0I3AQFo8bw5VqVzRVLE457PYWqdhUwoPDyc3VzcaNWII1a1dkz5U/3GewAdq4OGTCLoY9Jgyp09FBbN9YrLX6/ee0YXAx3Q1+Amd8A+l4JAX1FbsoXWr+zn1nHtMiq9bRldzWLfDLFpeuvVEis2eGU2Hi9x3/h5NWHNe7jUu7F2W8n8Wk2nYs0iavO4CHbr4QMs7R9Y01LtxQSqR102bp04ePX9FDUd6U6Oy2ah/M/2/6wPuPqMxK89RwJ1n1KNRfpN7msv3BtAy7wDtHmjqlEmoR8MC1CAOQq4aH44gYIoAhFdTZGyYryu6suDKoqop18K6ZSG+2nARLGjq+d7d73UvbKoZdjucpmoNU5cdIh/Cq3XLaK3wOmPjv/TXwSDihw8WThMnSiSFUn6Y6FAzD3WqldfogPiBpNWEfdqHiX7NC1HjMtn0yg5dcpr2+92nwjkzULVi7vQq6i1tPnqLbj0Mp/Y1clPn2vn0yuMDCICAYxKwpfCqxNPYiq6KrKrPronHCctXa9Pjx49p4sSJtG3bNm0TK1euJC8vL+1nPnn48CE1aNBQCrNt27al3Llzk7+/vxRpU6d2oV27doo30K3zmgLhVQ81PpggAOHVBBhkgwAIgAAIgEAcCRw+eozaduisbWXH5o2UL28e7WdLTmb++hvN/HWuLArh1RJi8V/mxo0b5O3tHeuOcubUeE+rVq1arOvGR4VTp8/QOb8L1KZlC0qZMu7edkyNcd1fG2nQ0BHU96ee9GO3zsK6T+M97UP1b2pc9pS/6/QdGrNCY7TA42pVOSd1b5BfepxjkfVC0CO6fvc5Hb/ykJ6Ev44x9GyZXahJuWy0ZNc1eim8BHpPrBWjjKNkPBVGJfWEUUnFwlloXIcvYkyL9z5nbrxIu0/f1V5bIITXAgbC64vIKGo13pd477LWlx5UNKcrPRDi99r9N+Q+5tQuX9JX+TNp2+CTgxfu0//+OE0j2xal6sU95DW2lmXvggu2+WvL9mgohNdKmn/v2kxxwqLr/K3+cg+0gTAm4rXiNeMx/NyumNwX1S2PcxCwFQEIr7YiaaIdXSF1UPf2eoKrqRivnD9pzlLZIsRXE2BtnK1xMbwvTq26lK/s0C6HIbxa9/WwRnjlt78a/bxXiq5ze5QW7jSSyc7Z8rf7nKPyTa51wypT1gypYgxq9Iqz8kGH37xb7n2dDIXX0KeR1HjUXiqUPT3NEW0n+c+1BrfdeoKvdOu8bUwNbX6MDpABAiDgMARsJbzqxnXdNHKr1XyUxay1LocDVeRxAAAO+0lEQVSfP39O9evXp7CwMGrWrBklTZqM1qxZTcaE12XLltGUKVNo0qTJVKdObe2Y586dS/PmzaNp06ZRjRrWvVAF4VWLEydmCEB4NQMHl0AABEAABEDASgIvIyKodoMm9CL8JdWoVoVWr1tPsRVe/a9ek21ULF+OHj95QgHXb8Di1cr1sFU1FlytEV11++/SpQspEVY331HPf5k9h2bPmUebN6yjQgXzO+o04zQvFhLZEKFFxRx0Ulix/nvzsQh9k14Iro/1nCGmES5uSwkxMJcwjPDKnkGWmbvlMm04eFPbfz9hidlYWGQ6ajInvKr9RBYyGwjDkaRJEtHGQzfJmPDKFrHDl56hb6rmou/rfa7Fxda0XWccpqrFstKodsW1+XwyZ/NlWi1cDa8bKvZBXTX7oAMXn6Qj/z7UGpSw8Yop4bXpGB9KJsa0tH8FSpk8iWzbP1i4i55+iMoVykwTvyuh1x8+gICtCEB4tRVJI+0o0dXQypWFVb6m617YsAw3N3nun7IMxFcjcG2c9eLoIXoVdCNOrSbPnpNSl46dP3/2Zc9v7bBFI8fYXLL7mhTVSuXPSFWKZDXqu97n3D3ad/4uHb38ULrCKPV5RmooXCNkF/7pVZq/7QodufSQJnYsIdv0EeVdUiaTbTYpl508M6ZWRWnK+guyz3m9ymjz+GTDoSDaJKwgJ3T8gtxdU4u3u4y7Gr4dEi7fMOIHk/CI18KNRHrpVrfmF5q3kLgtNZ7BwkXx77uuypujORcQXEcl5WqYx7F451U6dz1MjqeiiJfKlp9sCapS1JtoWn8wULj9CKHjl0PkDZj5NC+fndL8J15yWUvnrLs+3M/WE7fp8s0nMnZD++p55Jteqm8+Bj14LrjdJJ+zmje8OBYuP8C1m3wgVq6G1YOIsYcGXpdfNvxL3cWbXK0M3uQ6fiWE+i04QU3FfMsVzCzPDYXXa3eeUsdph6S74p+EGw/d9NO843T6aijtGFdDfF/Mx1fQrYdzEACBj5OArYRXZa0aG2tXrlM4e2G9mK5Dlw4mv0A/ik07uuRZcG3atCmNHj2aKlasSLNmzaZFixYaFV737t1LQSLmEVu7JkumebmF29q+fQcNGjRQuiBuKd5CN5eePXtGy5Ytp717vaW1bPny5al37940ZMiQGK6Gg4Ju0uzZs+jcuXP07NlzaYHbpEljqlevnuzCx8eH5syZQx06dJDisW6/M2fNogP799OMGTPI09OT2Kp35syZdOzYMbp9+zaxe+QWLb6OUU+3DT5/+/YtrV69mjZv3kIXL16g7NlziLpFqW/fvpQhQwa94gcPHqR//vmHDhw4SK6uGQTXZmLMhWjq1KnUq1cvyVdV8BNv7HPZw4cPCQviF1ShQgUhZtehsmXLqiI4GiEA4dUIFGSBAAiAAAiAQBwJTP1lFs2dv5B+nTGN/r10WZ7HRnjl56WWbTsQWwX67tlBPfv0Nyq8PhLPY1OmzaRDR4/SrVu3qcQXxembVi2occP6cZwBqhsjoITX8ePHG7v83jx+PmeLV3NWr7duB1O37r2otXiufvX6Na1cvZYeCC85VSpVpJ+H/Y9CQkNpwpTp5Lv/gAhL4inc9taScVqTJNFYkvIgOKbq1F9m0+mz/Mz/jIoU9qIWzZpQowaaZ34uM0uIoTt27abVy5bIGK+6/SYSL8av3/A3nRfP14UKFqQeP3SlWjViZ6kbFvaI2nbsTNwuuxnmsbqkTk0jhg6m0qVKxnv/PMePIT0We5wNRDi1YrldafaPX9HGwzdp+l8X5dDZO1xOscea99N0VEgIsXk9jHtC+mH2Eel+2JiVpjEG79sb5b1An3Oa/d5smdNQ2QKZ6OsKOfT2M7ld3hfeeDiIzlwLIw+3VFQ0l6sUMXlfmZOv3z36Q1h08p7fwYv3pcUoz9crRwb6sX5+yu2uH1v4shA9t4n9zn2i3VQpklBZsZ/InvPUPvPa/YH0t+DDXvI45fZIKy1ZVRg6FlzbizBx/2tVmMoUyEwLtvvTsj0BRoVXtac5tkNxqlQ4q2yPf3FItKoDd8p9XDZA0U0dph0kFn51Q9BxHu87s4XrKbGPyXuhxvZQH4uxsYHKl/kyUpsquXSbpQr9tlOm9Cn12tUrgA8gEEcCEF7jCNBc9e/6jpaXf58+Qq+YElQ5kwVXJcDy+cAfv9WW1bV8NbSW1RbCiU0IPBUxP96GawJsW9tgYpc0lK5+41hVDxJBvdsKUe6zTC7yBpbPM50QU1OKm6LGz73hG1PbxY1w/Go/2Ud5r8wUIqwXWQjk+JzsOz/LfxaQyuqR2w19FkElxQ2G3WPwTZJvyotEwHdlRanEtgPT6uiNXd0olw2sQBx03Jjweu/RS/p67D5Zr2Q+N0qeLInWR/+wNkWoVolP5TU1Hh5nZNQbKiJcSdQt6UlVir67yep1rvOBhdfAe8+J/e/zgwe3wW81cWpXPTd1raNxi8sxUSet9aOtx2/LMsXzuBI/PNwJfUlfiBgB/CCU7L8HYkvnrNaHb8QPH0fIN6GCQ1/I8XD/LFaz5Sgn5tNRvC3F5dg9cDbB/uTVEHJLm1Jyj02MVxa8p4h4B4NaeMUQ33/bckW602B3Jn2bvottEPn6DX0rWPHD1PphVeiS+F7wg4eh8Mqc2BXxy8g3cvwebhoRnt/s6zbzCPH3aoIQ7JFAAAQcn4CthFclmFpqqaqEWiasayGrLGetdTf85s0bucGRPr3m/2Vzwqux1eVYXr16/SQFxI0bN0r3w8bKcR5vyv30U2/y9d0nhElXKlmyJJ04cYIiIiJlFXYbpmK83rlzl2qLjRlOZcqUoeTJU8h6/HncuPHC5XF9KdRWrVqVvhTt/L54MV+S6bXY9KlQoaIUP7du3UpRUVFUt25dun//PtWqVUv2zSIyf+aYtizAmkrsgpmtf7NkyUIlSpQQ4uu/QnwOlGIuC6dKgD4qNhC7du0qm+Hxcjpy5IgUarm8GjPns3tmtvDlxGNPmSIFsWjLiS2HIb5KFEZ/QXg1igWZIAACIAACIGA1gWsB16lmvUZUqWIF+mPBXCGAaUTY2Aiv64ToNWjIcBrUvy9169yRGn/dOobwys9nlWrUpXv37lG9OrVk/Mxd3nvl5zE/D5cCrNWTQEWjBD6E8Kq+P1mzZpV/U1SuWJ6u+F+TcX5Limfnfy9fJndxrWCB/HTo8FEKDQuVbnxZHOUUfOcOVaiqeeYvX7YMpRDPxd4+++S1aZPGU5NGDeT54GEjae36DXTqyH758qNuv/ydqlalMgXdvCX75Qp/rVlBxYsWkXUt+fXk6VMaPW4inT3nJ2PJsnCcPv0n1PHbtuRVqCDFd/+WjNFeyrSZtJ9uPQiXe5Mc5/WB2M/j1EZYY7Lw6P6fdaWp8TYb60NPhQvi3RNqmiqil29ub9T7zF36eflZuffJe7hBYly8F8qGOZM7fan1Sqf2hXlvt4Iw9rgp9nl5j5T3StcPr0LJkyaWocQ4hirvZUZFvZXi8tmAMOlal/dWNwyvqjW24H3PrrMOSze/3NdrUZ4FXW7v937lKWM68T0WY2MxlwVfzudy2TKloW/Fniwndvv7XHjR+yS15oVmtZ9szOL1vthHbi72kXkPebzYe1QWqBxqja1WezUuIMVmBe7Zy9dUd9geqlPyUxrS6t2/A/YUqIRmZYRiTHhV7RgeeW+XXR6XKZhJ8jW8js8gYAsCEF5tQdFIG8ra1dBaVVdM1RVklUhrKLCqdgxFWSNdIisOBB6vWR6H2u+qpm/Z9t0HC86UsMdFdUVWDu7edvJ+eeNbP6yyFFTvhQmRc9w+eZNb0r+89gajYhKwuDjz+1KyV3Uz5xvxYiGyKmtPdfPTFe0sFSGNCa98w9987La0vGQ//5wCxU2bLTxLizezpnT+Um88LLQO/6aoVgCVF9/zSwmvnWrnpQ41NHFZ7ogbZEtxg9R9M0k9pPDNe0qXktqHEn7Y4PilXevmo3bVNA8Fls5Zd30W9SlLn3tqgsKv8b1Bv266LGM5KPFz/OrztP1EMOmOk90MdxRvYbEYGxvhlYPDd5h6UIqg4zuUIB2jXhqw6CQdFdbM/KAzTcxTJQ4Sz7ENhrcpSjVLeIgYFBrrV0PhlcuzSw0OPs8PccwrSjwk8YNV/myf0DDxIKPealNt4wgCIOCYBGwlvCoXwboiqiliuqKrMaE2Nm2Z6kPlWyK8cqzX9ev/EpsrT6V1J38eOnSoFENVO8aOmzZvpmGiXKlSX9H8+fMoSZIkUozt06cPsfUqi7FKeGVR86+/NlD79t9q37IPCAigJk2aEFvJsntjTt179JCWrSykZsyYUeYpEZQF4c6dO9Hx48fFsTN16tRJCL8/yTKPHj2iwYMHU9as7jRq1M8yz/AXi8rf//ADuQrL1rFjx4p4UillkYEDB9GOHdtp+fLlVKRIETmHsmXLydi3Ko8LXhYbTS1aaCyAdYVXFoHZ6naxEItZfObE8bcaNWokz9kqN1WqmG7x5UUn/wXh1cm/AJg+CIAACICAzQl8+11XOnj4CPns3CpeGMsWa+GVBavyVWpS5kyZhHviDfKlNGPCq4oh+33XTjSwb285D7Yy7N1/EHl4uNPEsaNsPjdnb/BDCq8c0/ef9aspV84cxCJ7y7YdhYh5jmpWr0ZzZk4Xz/2J6XbwHapYrRYVK1qUNvy3n7h+4z+0Zt1f1Llje62V6tVrAVRLGGiolwF4HU0Jn3xt019rpDjK54v/+JPGTZpCbdu0otEjhnJWrNIvs4Sr4bnCunaLiHGcR7OXxg18qP5jNdgEKnxWeNQbLWK88p4dJ3exh8pGCmwlycmcAMvGD9UH75LWn0uEQGlJUnu1hnujvAfcdLSPNJSZ36sspRfiJicWIlmQ7NusIDUpm13m/fjrUfK78Yi2ja2uNahhK9I9Qhzl+LRsHMJ7oLwXysY4bDCSTgiiLI6O+PMM7fe7T6O/La41hGkzUYjPQrz9rWdpaRHLnSgvfDxOLsvJnKthWUDnl9p7Nia8cjFv4SVw8jo/SpE0CRUU42WjnoA7z6Rnvm51P9eKsVyWvT0OWHhSiK6FhfjqyVkxktr/tFR4fSssUob8cVoaDllqrRyjU2SAgAUEDIXX/wMAAP//X2qWmAAAQABJREFU7J0FfBTHF8dfixOgQIDg7k6R4kGKu0uh9A8UihanENy9aHErDi0UKYRgCe4WKBBICBI8QQOEIP95c8yyd7m7XJILRH7z+ZDdnR39btqdzG/fe199EIk+Yzp71Y9yZ0pNiRPGt1uvr4KC6crNB5Q2ZVJK65jMbu2GpaH2fUbJ4kumDTOqdsX7Bk2cs5zy5sxKA7r+qN2b9MefdPmaLw3s1o7y5Mii5fOJpbaMCuEiQgSerFsZofqqcvIWbdSpTccb919Qm0kHKEf6pLSsb3mjOhsP3aDfN/5H4/73LVUo6ETu5+/R0OVn6LcWhahOqYxGZbvOPkqe1x+T++SaFOfrr2jUqrO06/RdGt++OJUvkEYr++79B6o9dBelSZ6IVvSvIPN/nXecTl/1pwNTa2nl+GTBDi9asdubVgyoQFmdktDTwDdUd9geqvtdRhrYvJBRWdOL1hP204tXwbRlZFV5S41nWb/ylCNdUq34y6C39DLonXatThwSxKVECeLIy7aTD5DvvRfkOrYaOSSMq4rQzzMO0+WbT2n7mO8paaJ49Pum/2jjwRu0amBFypzGQSv3+s07qjbIjUrnS02TO5aQ+bbOWT2fcoLhBMFSpTv+L6nFOA8qlTcVTf25pMxuMc6dXom5/DO8Cn0tnoFK+87do2F/npHPjJ8dp6cvgyn47XtVRDt+4xCP4sX5mt6L59Ru6kE578bls1Cjspllmc1HbtJfB27I8wJZktO8nmXk+b3Hr6jZGHcqlC0FzelWmr4S3R+/8oj6LjhBfZsWoIZlDPVVRxd8H9OMzZckP5XHx/IF01C3evkoY6rE+mycgwAIxFACL97YZ8lXf2QdSWjL8H+NSK3xWE0XfM/T2HYTZD5fr3FfJc/HibyCWUO+Syy1ZdSwjRczZ86iRYsW0urVq6lgwYJma128eJFatWql3atStSr90rkz5c2bV8szdzJ6zBjasH49bd68mbJly6YVefjwIVUVbaRMmZLc3d21fHMn9erVp+fPn2nl3NzcqF+/fjR8xAhq0rixrKL62b59O2XMmJGOHz9BHTt2oLJly9H48eMoRYoU5pq2OW/37t3Up08fGj16NDVo0IB8fX2pfv361LRpUxo2bJhRO9OnT6clS5bQ2LHjqF69uqTmWrduPRo3bqxR2blz59HcuX/QypUrqXDhwkb3cGEg8Ew8+/SO39gFR3BwMAU8fhKirfjx41GK5MlD5CMDBEAABEAABGIaAVe3XdS1Zx/q0e0X6t2jm5zelN9n0h/zF5Lr1k2UO1fOUKc8csx4Wr5yNa1YspDKlS0tyzds1oq8fa6T56mjWv0jx47TD+06UMXy5WjapPFi3Rex9ZjWME4sEtizZw/xv3HjxlksY+3G4MGD5Rqd1+mW0jVvH6pepwHVr1uHpk8x/P3CZWfOmUfTZ82hJQvmUqWKn/btqtasRw/E2l//u2GubS737NlzOnHYXd7+bchwWv/XRjp1ZL9cy6t+q1auRAvnztKauHnrNlWqVkv+ni1bNE/Lt/Xk95lzaNYf88h1m/j9z/np9/9z9W/rOKNCOa/bz8Qe4yHKlSEZzetRhtZ5+NJaj+v05MUbObzWVbLLfbV0KRNpw/W5+5zaTTlIzoWdaEy7b7V8ayeW9kY9PO/RkGVnaGjrIlS9eHqticDXb6mmyy6q9m06GvZDUZnfbc5ROu/zWO4V817l17wBaJK2Hr1FkzZcoO4N8lKLip/+Vt15yo/GrD5PXermodaVs1PA8yBqMGKv2X1e3nO9+eAF7RxbXbb+TOxj1hm6myoWcqKxP1mfr9pPXtCrLOXLFPLvHR7HnC2X6fFHvmr4zZ2zUocauSix2BNWSbVluter7vNR7X92ry/m6/xpvvoy+vMlO6/SUrdrRlz193EOAvYicMf/qdHf/F9BeLUPWiWkNqjhTPxPJSW88rVelFXiqqnwunmnB/E/03ZUezjah8Czbf/Q+8AXEWrsa4cklKxuwzC1oYQ9c2LmOZ8A6j7nGHWomYt+qpaT5m+/Qiv3+NCCX8tQvszGm2izhIi2fr8vLe9fnrKnTaoJrxtcKokPED4tDHhwSnTcNb46JYwfR7sOr/B66dZT8hCi8NU7z4hfxM+EQHvH/5XkoNpUiwu3cdU1QZULqDnJwrof7arloI41c8scFl6fipexEnFVsSl/X6TNh29KodMxWQJNiPUQ4rNe+OTy3MaDJ6/IdUx1KUoqBmp8qk31Qldis3o+P4hF1i918qhi8lih7w4qljMlzezyHanFUJn8qWlSB4O4qwrffBBIP0zcbyS8KtFYlVHHqZ1KUqk8qeTlo2dB1G32EY2lKsMLpLnbrlCtkhlocEvDZvbwFWdo79l7tLhPOcotFoqc1MLDVHhV40mcMA4NaFqIvs3lKEXgAxfu03QhXqdOnpCWC4GcxWwkEACBmE0gsoVXl+W/kaevJxUSAmvBrIVDFV2Z9ucWXvl7wzdv3ojNkGd0+PBhWrp0Gfn4eNPEiZOoVq2aFn8BWrRoQZcuXaKzZ8+Kd87XRuVKly5DCRMm0ARVvnnhwgVikfPy5Sv09OlT8e8J3b59W9Y7f/68PL5+/ZpKlSpFZcqUofnz59O7d++oXLnylDt3bvrzz+WyTFBQEHXu/AudPn1KXn/7bXFRphzVrl2LMmTIIPMs/eD2t2/fQefOnaMbN27QkydP5Fy5/KhRo6hhw4a0a9cu6tu3L7m4uBDPUZ9cXV1pwIABmvB66NAh6tKliyzfrl07fVG5Mda7d28aMmQoNW/ezOgeLgwE7Cm8uu8/SO07dQmBtmiRIrTRTh8XhmgcGSAAAiAAAiAQRQi8EmscFreC3wSTx54dlDiRYQ8kLMLrFa+rVKt+Y6r+fVWaN3u6NjNzwiuvx35s35lOnDKsx0oWL07OFctRvTq1KVNG6+sxrWGchInA5xReu3b+mfr17qmNb97CJTRp6u+0af1qKlL404ej5n43znteoB1uu8XfCZfpiVjzPxbr7VtCQOXkc9lTHi0Jn51/7kAD+/aSZdSP7HkL0XelStKaP5eoLJuPYRVe7d2/zQONIgXHrjlPrif9aPovpai42Cd7++69VQF2v+d9cll2msztF1qakqW90UWuXrR8l7esljezsVDJBicpksTX9kQP/feAflts+H8P57ORiXPhtHIvkQ05OCnhdfLPJah03tQyj3+wwNzh90PUsVYuavd9Tm3fkO+Z65fzNw6rTKm/SSj3e+0hvO45c5dGrDxLWdMmoX5NClDO9Mno0dPXtE7safO4TYXdX2Yeoev3n2v7uTwm06T2P20RXvkZ87NmYxZ+1rwvjgQCkUUAwmskkdULrKZiqhJlueu8ObNKS1d1rreC5TwlyOpFWs5Hsi+Bl0cP0Zsb1yPUaPws2Shx6XJhakMJe2zV2LtRfqO6/918Qp1nHKE2VbNT59p5aNYWIa6Kr66W9i0nX0z6wvP+vUKr9vrQQvE1UV7xNZF6mW8S1pephCipT/0XnaSjlx5qFqS2ipDmLF7V11LcPr+kswhL028Sx5ciMOcpYVONx1R45Zejp7C+NE1FhOVmidwGAZJFUxY2Nw6tbFTMVHg1FVf1hXlhwQsMJcraOmf1fNp+n4M61TIIwapdvfDKX8HVG74nxAKBy94NeEnNx3oYCa9sucrCqmmqUTyDkbUpW+tevPGEzl8PoGSCKwu9ccTXbGwl3blObmpTJQddEr8nncTvCfPvrBvjRZG/aMdValQuM5XLn4YKZ0spRW8lLo/6sRhVLpLWaAjLdl2jxa5XQ3xlZ1QIFyAAAjGGgL2EVyWwmrNiVfcUNHNl1L0LQqQdLMRaFmqVlay6F56jLRavpu0ePHiQunbtSkWLFtPETtMyfN2wYSMpWrKI+ZXJV8bVqlUjtkB0d3eXVbdu3SaEzMHyvECBgtJCNnnyb6Q1KGcq4ZXPhw8fQZs2bSQPDw+6du0adejQgUaMGEmNGzfi2zLxZh/fZwvZgwcP0cuXgTJfWa1+LGZ0ePnypWirI128eIESJ3ag0mVKk6OjI+0X7dy/f18TXk+cOCH77NatmxB4Oxu1sW7dOiG6jtWE13379tGvv/5KbEHQsmVLo7L79++n7t27U//+/alt27ZG93BhIGBP4dVXCOmbtmwLgTadU1pq2bxJiHxkgAAIgAAIgEBMIrBoyXIaN2kKtWzWlOrUqqFNbe2Gv+jfHTtp1PAhlD1bVipb+jvtnulJh87daJ/Hfho1zIWyZc2q3R48fKQUzpYunEepxNqpQH6DVxRej+3Z50HbXd3I48BBCgw0rMcmjR9DTRs10OrjxD4EooPwumnzVuo70LDmL1yoIOXIno2SC88jS5evkBBCE15NBV+u9DmFV3v3b58n//la2XX6jthLPUdsddmjfj6tY3MCbCUhdJ729heGJ8FyD2+UsHjNJTwZhpYs7Y2q/V7e18up8xLI7V0Qe4KlxP5ojwafxsQGFW5ivEcuPZB7nVwud8Zk9HvnUmLvMJ4mvOqNO7iMl58QXqd9El7ZAGPw0tN8S1q9ypOPP7jfgkKc7CCMYnhf2V4Wr8oQxdx+dc+5x+jMtQBSRkTsVbD6YDeqWiwdjWhTVD88o3NbhdeTXo+o9/wTwgIxEc0Vls0pkxrvlxs1igsQsAMBCK92gGipCSWwsriqF1RZlGUrVnYtrBKXYatWvZthVR/WropS5B2D/W5T4EH3CHXgUL4Sxctg7AI4tAaVsMcuYv/obnBlo+rsOHGbxq31lC8Xfsm4nbpDo1ef065VOT4qMXX3hOqUIF4cTXid3kV8qZXTUV+U6guBkN34rhtcSeb3mX+cTnj507ZRVekbh/ha2YnrPWnbsdtWXQ3/JNzhet95TisHVhCiaxJZl52VNxixR54rK1VLiwutMysntgqvarzqayzVJLtXrtTflQpnN7jh5Xxb56yeT2jCK7dpypXzOB29/JD6LzxpJLwa7lj+yYIruw/mr9f0z4RrsLsTdskxul0x4sWeckFtuTXDHeXmWVnHslVrdpMFnWpLWVmH1ibugwAIRG8C9hJelQthS4KpEl+tia5MUrXTqtIP1Mq5dYThWhNe2W2ug/BU8fPPHY36YcG0uLBaYLe+7N7XUhoyZAht2bJFWog6OTlpxV68eCHcAJc1cjXMbnu9vLyM3BKzpW3lyoYPipRAy40o4ZNFVLaoZTfJLAYnS2bwZqB19PGErWJ5I4pdFLOgevjwoRAWuFzUQ2wk9ujRnRo2akTDhQvhOHEMX/ayq+ShQ4dqwitb/pYvX55YIF6x4k+KG9fg5on7YUvb48ePacKrn5+fsAquRc2aN6ehgoc+LVu2nKZNm0qLFy+mkiVL6m/h/CMBewqvgAoCIAACIAACsZnAiNHj6c9Vq60icHBwsOoSli1mr/v6Wm2jds0aNHv6lBBleJ20c9ce6t6rr1hfOtC5E4fNrsdCVESGzQSig/Bau0ETunzFi3Zt3yJFV54cr/lLlTOs+U8cducssmTxam/hM6wWr/buX042Gv3gfbgaLm7S3fCiXiGNapQAu8bdR4RiC5YzY6tNDo3G4c7YFW5oydLeqLLCZBe+bPEZlsTGHjOFF8SDFx5oocaUxWtowqufCKPWUoRRa15RiM06Yddc//YSXpkxJ+WRUN+XCiE3Q+xlfyv2sk9d86dec49TPxFCrYFJCDV9PVuEV2/hGvon4Rqa91k5bFt6R4RY0zPEeeQQgPAaOVy1VpV4yhmmlq8swKqkF1z1wqypaKvK42h/Ai/27qK3D++Hq+G4qZ0oSZVqYa6rhD2uqHcBwbFPO/5+mG49DNSEz+viZf6jsP7keLAcc0C5QzgvYrt2EzFe+eumxb0NiwP1MmdBl13hxo3zlRzbv8dv04R1nkZfC8345z8ZN1T/gn8u4rM2HbOPXr5+p/VvzuJVvTA3D6+qjUcJjXpXGGo8phavtgCzVXhlK9Ipf12k+mUyUf+mn2L5sSUwWwTr3X/YOmf1fGwRXgctPSUXOvqYDBzHtauIv8CuQTgur4rxGtq8lZUsP2te8Knnp+IvMNtVv1WU7oDZcvaysG41TZdvP5WuSuqVzkRlheuRYmLRwjFy/xGcpgpOvLDqLr7iU4ZaPFaX5afpyH8PaWbX76hYjpSmTeIaBEAghhGwl/CqLFUZj2mcV4WMy5iL6aru81G5GQ5NoNXXsXZuTXhloZItRtesWSNExgJaM8qKs3nzFsJNrouWb3rCguiECROoUaPGNHLkCO325MmThWC5wkh4ZdfDnPbt20uJPrq+U5a1prFg379/T1WqVKE8efLQlStXqESJEjRlyqcNvh07XKW4yuMrJL6kV4njxd644UunhMu7ePHiqWzt+Pfff4txjqRevXpR+/btZb7ebbFyNcw3egkXwXuFmFu7dm1pycqulNeLeLYsNHNSMV55I6lMmbLS4lYf6zYgIEBY6DYmPloTjWVjsfgHhNdY/PAxdRAAARAAAbsSYLHr1m2/EG1u2PgP7d6zl4YMGiAsXrNp8TkDAh7T0eMn5HXixIbN98NHjwmr1Zch2hg+ehzdu3eP5s+ZSU5pUhNbMm79dwftP3iY2rRqbuR6Vom3VzxPm12PhWgcGTYTiA7Ca6HiBmOKYwf3ae6uVTgIx5SOocZ4tbfwGRnCK//9sM/jABUVLpfTpjUIhDdu3KSr3j5U2bmC/LjznXDRu19YgWfKlJFy5shu8zOOCgVN3Q2bG9Pec3dp+J9ntT3GIWIfzeP8fSODFHP1OM/S3qiKF8t7uL93LikNarg87/fNFt4PcwijibZVcxCLw9M2XqQ0IkRYexELVcV3VTFiu9bLS60qZbPZ4pUNZ2oOMQihS/uU18RINmDhkHa8N+wiQpxxODfuu9ogN8qU2oFWi/1Ia0l52jMX41UZgyhjEtUOz7W9MO7huK8qNJ7yymfOcETV42NowuuDJ6/FHvsh2XZobenbxTkIRJQAhNeIErShvl58tWa9qhdcuVmIrjbAtWORd48D6LnbDtGiePOEKX1FSavXojgpwi5UKWGP422yyMmxXjleqYeIFcBfTZm6uFAxUfmrKudCaemJiKe667SfrKvcDPPQ1cuc280sLFFZeOMvmXaevEOct0S8UDN8/LrnoHAtMeijawkW6ThY/LZjt7TYoireqTnhddza87TjhJ90gcvubO8EvKKNBw0fFHxu4fWNEA77LTwh3VKUyptKuNZNIa1x9527JxcG/EUTu9zgZOuc1fOxRXhlVx8/zxAuH8Vz5PiraVMkIo69cPtRoMwLi/DKY1Rs2dVIXSHasgi/VTwXbp8XYsoVM5c1l5QAbhrjlcXbjtMP00Ox8OD4riXEP15UsbUrWy9zf3O7l9HEXnNtIw8EQCBmELCX8Mo0lFVreK1V7W3tymOyJryePHlSEyDbtGlDWbJkEa59vWndurVcldauXUv58xuHAJA3Pv7geKkc15StUsuWLUfFihUVcVwvCstSd1lCL6iyRSkLkyWE5WclZ2diS1EWfDnpy8kM8WPOnDkyxitfz5w1S9ZR91hY/d///ifrsXvf9CKuK7sLZhGZrVlHCXHVXOINkXr16kqr2KbCDV/KFClo586dcvxcXi+8sqs8nhtb6arE1rStW7emRYsWasIr39MLyHXq1JGbjDwWjl/r4jJExIltrprA0YQAhFcTILgEARAAARAAATsTmDRtOs1bsJhct26i3Llyaq136varFGS7dOpI/fv8quWbOzEXx/P4iVPUsu1PxGJa2x9aUsYM6Wmv+37hdngnNWvSiCaOHWWuKeRFgEB0EF77DxpKf2/6R8Zk/b5yJfkxgLLEjinC6/KVq2nkmPHyA4R/Nhj+nilbqZr8OGHG1EkiznEt2if+W+jwSzdp/X32+CHN004EHv9nq2rJ3bAaAFu9Tv/nEm0+fFNaZDYsm5kW7/SiG/fFfp3wIphc50VQ1dEf1V6tOaMUFUKODTDYux3v03E/LESO/9+3VL6gQehW7njLF0wjw4o9f/WWlu26KvcK1wyqKFwfO9gsvPLYDot9y4EiZizv4VYrnp5SJklALC5zuLYWzgaDDTUH5T2QrXL5H4dLM5esCa9nvQOoxx/HZLUaJdJLC+OAZ2/E3vJtOddWlbNR17oGl+481yvCqGTH6GpS/DXXF+dZE17ZyKS9EF15j50T77ubJhaskyYK+fGyaTlcg0BYCUB4DSuxcJZn18L8T59YWGVLV2X5qnc9bE2g1beBc/sSMLgc5udkq/j6FTmUdw6zi2E1aiXscRxXDoK+1O2avMUvPHYv3K1ePiMB7L34HGm5iMO56/RdKcRxYRbP2lbJbiTEqZc5uy+eLixa+YXJiWOEtq+ei4qaWDOu3OtNK/Z4yxc1l2PhMLkYw5p917Wvtp6+DKa6Q3cTi7MDmhmsbDj2KguEHFSeE4u6fRoXkDFeWdhTrobZRTK7SlZfLcnCNv5gd8bs0sI0xuvUvy/SP2IRsnlEFc0vP3+NxV9lcQxbXpzweL7Lk5q61M0jBGVjNxK2zJnF1B8m7qd21XJQRxHXQJ84xiuznyGCsat06qq/WHRdJU9hhcyJRcyewqq0q7BI5pf7wOaFVNFQj8FiQTdv2xUtXq6qwO4/2Fo1tHRMuDjuJ1wc9xfPqr54ZvrEz3La3xekG2QWcjnx71yVoumIFxzx436tL45zEACBGErAnsKr3uo1rOKrEl0ZsyWL2fA8gtmzZ9OCBQtCWLWqtjw9L9CYMQaXviqPY7sOHTqEcuXKpbIsHu/cuUuTJk+S1qFciEVUFjBnzpxJT58+lW6IOZ/dDw8R4itbkXJiEdPFxUXEeF0h46vqXQ3zfY7tyhajnFggjh//UxgAztu9ew9NnDhB1uVrbo/F1J49eoQoy/dV2rt3rxwfW6JyYpfC1atXp2HC9bBpfFi2vL169SqdOXOGkiRJIsuyG+S+ffvSuHHjqG7duqpZKfquXLmKzp49I/Ny585NTZo0oVatWmllcBKSAITXkEyQAwIgAAIgAAL2JDB52gyau2AR7dz2D+XKmUNreuaceTR91hyaNmk8Naz/aU2jFdCdmBNe+bar224aNW6iFJz4ml0Mt2relPr17ml1PcZlkcJO4Pr167Rw4ULKnj18FpQ+Pj5UtWpV+c9S7z7Xfen7WvWoe5fO1OfX7lqx+YuW0sQp02jzX2upUMFPnnIat2hDV8W63fPUUVn2+fMX1H/QEHITa3VO/DvBMYOXLF8pfk/uaxavg4eOJI4/fOroAUohYsBa6pfb4BivZURs4lXLFvFlmNL0WX/QzDlzQ/z+R6T/A4cOU7sOnal1y+Y0ZsRQOZ5fuveSc960frW0APe6eo2atGwjxdnwjDtMk7RzYeVumPcP2XK0SPaUlNUpCSk3wxz664nYazRN7arlFHuGof/9aG1vlPd7eU94z5lP+70ci7SLECFZiFWJ9/NmbLoo94VVHntA7NukAOXPnFxmsTHNxPUXyJKr4Z9r5aYfv//0/0Tuc8MBX7oo4rpy4r3U1pWzU5sqOSiOsHZVifc8x4o9YN7v1YdzU/fVcZGrl/TApzcQUvf4yG5/eU9X7Z1yHs+1leizYZnMfElsXFN14E4p8LKHRmtJE14b5KUWFbMZFVUuko0yTS5MQ9aZ3MYlCISbQIwWXtM5JiOnlKEHtw43vXBUNCfA6puB4Kqn8WXO2fL11ZlTobodZvfCiYoVD5elq5qZEl6VReXbdx/I/9lr4TYikeYCVpU1PbIFKrsb5piupkkJr+orKi7LL8skVr7gYRcTj0Tf/JWPcmNs2q6l66Dgd1Ic5cDk+peypfKRnc9z8ReWnSmFmMguMSyliMzZUpucz4L0W/F12jcfLWytlQ3tHi/8rt9/QTwLjhuROIEh3l5o9Wy5z/N/8OSV+ALxa0olLK2RQAAEYhcBewqvTC484qtedA2rYGuvp8Uus3gzhN1lJUgQ9v8Xcn2OjZoqVSrx7rb8zuFyLMg6Ojpa/fJbWeOyJe6AAQMsTpMFXba85X7Dkh49eiTnmTSp+TXyunXrZFwyFn9VLFhuv0ePntKil+/ny5cvRJdsKcuCraV2Q1SI5RkQXmP5LwCmDwIgAAIg8EUJvH37VotlH5GBsNj2Oug1pQ7jeiwifcbWumz1ygJseBKLrtmEu+nPkV6/DqInYs2fOpX1NX9YxhIcHEwsetqSMqRPT3lyhy4C2tKWuTLm/tsxzeO4x/q/I8y1E1Xzmo51p/vCm59KbKBwWsQaVYIrW4E2KZ+VvPyekc/dZ5RPiJ2l86ZWxe1y5L7iiJBx1qww34s9R973ZM9+5vaFwzMQ3svk/d0UwurVyp+1xFak/HevCosWnr64Du+B33v8ShqCcGg0fbrg+5i6zDpKPRvmo2YVsupv4RwEog2BKCG8ZkydnFIld7AbNP+ngXTrwRMRC9ORkjoktFu79m5IWbpyu/oYr/buB+2FjwBbvwbfukFvHz2k94EGlwRfOyShuKlSU7xMWcJt5aofjanwqr8XkXNT4TUibaEuCIAACIBAzCRgb+GVKemFVL5mMbVQlkJG8V1ZoF3jsYo8xVEle8V1Ve1Fx+PjJ0/oqtdVaVHq4+NNrq6ulF5snHzuNHiwC23btpW+/74a1axZgzhO04EDB2Sek5OTdFHMcV+RIkYAwmvE+KE2CIAACIAACIBA7CDAgqs9E4uw0S09euRPpcpXsmnY7dq0puFDBtlUFoWMCShXwxWEG10OX7ZguxclEgYvbDWpBFcOz4YU+QRW7fUhdr+8qHdZypPxm8jvED2AQCQQ+OLC6/lrd6SFXI6MqYSVXbwITzHozVu6dvuR8IX+ngrmSKcFmo5ww2gABCKBAITXSICKJkEABEAABGwiEBnCq+rYVIBV+abHQlkLUSvnH4yEWdMyseVaH9uV48I2a9bsi0z98ePHIo7rWOlCWD8AdsM8deoUSp3avl906/uITecQXmPT08ZcQQAEQAAEQAAEwktg8ODBoVZVYqotIi2HzYhuiS1Kjxw7btOw06dLRzmyfx7rXpsGFI0KsavfJSJ82Mgfi1KVIumo7eQDMjboepdKInwZBNfP+Sj7Lzopw8i5T64ZJTwrfs65o6+YQ+CLC6/PAl+T7x1/GVGThde4EYgtyF/kvwoKli45swpr12RR2No15vwKYSYRIaDcKiQVriHs4ZZWjSVAuJt4GfSOMjgmtuoeQpXHEQRAAARAIPYRiEzhlWmyZavnDU9xPG9k3cpia8GshUNYwsa+J2A845s3b9KDBw8oS5YsUULc9Pf3pxtiTJyyC9dsyUUMKiT7EYDwaj+WaAkEQAAEQAAEQCDmElAuhjlOa0QSx4dld8NKpI1IW6gbMwmc8HpEfeafoNTJE8r91LPeAVS9eHoa2rpIzJxwFJ7VAxFHluPepk0BwTsKPyYMLRQCX1x45fG9eBkk4loGymOwEE/Dm+KJOIVJEicgx2QO8hjedlAPBEAABEAABEAABGI6gcgWXmM6P8wPBCJCAMJrROihLgiAAAiAAAiAAAiAAAjYn8CqfcLF7bYrsmHnwk7UrV4+WLvaHzNaBIFYQSBKCK+xgjQmCQIgAAIgAAIgAAJRiACE1yj0MDCUWEcAwmuse+SYMAiAAAiAAAiAAAiAQDQhwHFdkwnvhEggAAIgEF4CEF7DSw71QAAEQAAEQAAEQCAaE4DwGo0fHoYe7QlAeI32jxATAAEQAAEQAAEQAAEQAAEQAAEQAAGzBCC8msWCTBAAARAAARAAARCI2QQgvMbs54vZRW0CEF6j9vPB6EAABEAABEAABEAABEAABEAABEAgvAQgvIaXHOqBAAiAAAiAAAiAQDQmAOE1Gj88DD3aE4DwGu0fISYAAiAAAiAAAiAAAiAAAiAAAiAAAmYJQHg1iwWZIAACIAACIAACIBCzCUB4jdnPF7OL2gQgvEbt54PRgQAIgAAIgAAIgAAIgAAIgAAIgEB4CUB4DS851AMBEAABEAABEACBaEwAwms0fngYerQnAOE12j9CTAAEQAAEQAAEQAAEQAAEQAAEQAAEzBKA8GoWCzJBAARAAARAAARAIGYTgPAas58vZhe1CUB4jdrPB6MDARAAARAAARAAARAAARAAARAAgfASgPAaXnKoBwIgAAIgAAIgAALRmACE12j88DD0aE8Awmu0f4SYAAiAAAiAAAiAAAiAAAiAAAiAAAiYJQDh1SwWZIIACIAACIAACIBAzCYA4TVmP1/MLmoTgPAatZ8PRgcCIAACIAACIAACIAACIAACIAAC4SUA4TW85FAPBEAABEAABEAABKIxAQiv0fjhYejRngCE12j/CDEBEAABEAABEAABEAABEAABEAABEDBLAMKrWSzIBAEQAAEQAAEQAIGYTQDCa8x+vphd1CYA4TVqPx+MDgRAAARAAARAAARAAARAAARAAATCSwDCa3jJoR4IgAAIgAAIgAAIRGMCEF6j8cPD0KM9gaA3QeSYJGG0nwcmAAIgAAIgAAIgAAIgAAIgAAIgAAIgYEzA7sIrN4gEAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAArGNQHrHb7Qpf/VBJO0KJyAAAiAAAiAAAiAAAjGSACxeY+RjxaSiCQFYvEaTB4VhggAIgAAIgAAIgAAIgAAIgAAIgEAYCdjd4jWM/aM4CIAACIAACIAACIDAFyAA4fULQEeXIPCRAGK84lcBBEAABEAABEAABEAABEAABEAABGImAQivMfO5YlYgAAIgAAIgAAIgYJUAhFereHATBCKVAITXSMWLxkEABEAABEAABEAABEAABEAABEDgixGA8PrF0KNjEAABEAABEAABEPhyBCC8fjn26BkEILzidwAEQAAEQAAEQAAEQAAEQAAEQAAEYiYBCK8x87liViAAAiAAAiAAAiBglQCEV6t4cBMEIpUAhNdIxYvGQQAEQAAEQAAEQAAEQAAEQAAEQOCLEYDw+sXQo2MQAAEQAAEQAAEQ+HIEILx+OfboGQQgvOJ3AARAAARAAARAAARAAARAAARAAARiJgEIrzHzuWJWIAACIAACIAACIGCVAIRXq3hwEwQilQCE10jFi8ZBAARAAARAAARAAARAAARAAARA4IsRgPD6xdCjYxAAARAAARAAARD4cgQgvH459ugZBCC84ncABEAABEAABEAABEAABEAABEAABGImAQivMfO5YlYgAAIgAAIgAAIgYJUAhFereHATBCKVAITXSMWLxkEABEAABEAABEAABEAABEAABEDgixGA8PrF0KNjEAABEAABEAABEPhyBCC8fjn26BkEILzidwAEQAAEQAAEQAAEQAAEQAAEQAAEYiYBCK8x87liViAAAiAAAiAAAiBglQCEV6t4cBMEIpUAhNdIxYvGQQAEQAAEQAAEQAAEQAAEQAAEQOCLEYgSwuuLl0Hk/yyQ+Bj87n24YcSL8zUlSZyAHJM5yGO4G0JFEAABEAABEAABEIjhBCC8xvAHjOlFaQIQXqP048HgQAAEQAAEQAAEQAAEQAAEQAAEQCDcBL648Pos8DX53vGnD2IKDgnjU1whnoY3vX3/ngJfvaGvRANZ0ztSMoeE4W0K9UAABEAABEAABEAgRhOA8BqjHy8mF8UJQHiN4g8IwwMBEAABEAABEAABEAABEAABEACBcBL44sLr+Wt3pNiaI2MqShAvbjin8anam+B3dO32Q3orLGcL5khHX3/FMiwSCIAACIAACIAACICAnkBkC68XfD3J84YnXfA9T57iXKVCWQtRwayFqVAWPhZS2TiCQKwiAOE1Vj1uTBYEQAAEQAAEQAAEQAAEQAAEQCAWEfjiwuvZq36UKU1ycvzGwW7YA569pJv3H1MOYfWaFFavduMa2xq6f82f7nn5U8DtZ/Tq2Ws5/UTJElLKjMkobe5U5JQzZWxDgvmCAAiAAAjEIAKRJbyy4LrGY5WR2GoJW6tKP1Ar59aWbiMfBGIsAQivMfbRYmIgAAIgAAIgAAIgAAKxhMDTwDf0jUP8WDJbTBMEQCAsBKKE8Jo7U2pKLNwM2yu9CgqmKzcfUDrHZOSUMqm9mrV7O1e8b2ht5smRRTvHyZcl8PT+C7q077oQXJ9aHUjKjN9QvsrZ6BunJFbL4SYIgAAIgAAIREUCkSG8sug6ePlv2nTZurWV8w9Glq3KEnaN+yqtHARYDQVOYgkBCK+x5EFjmiAAAiAAAiAAAiAAAjGOwD+Hb9KsLZfoTfB7yi0MdH5rUYhypU8W4+aJCYEACISfQIwWXtMK0TWtEF+jUtq804P4n6XUoIYz8T+kL0Pg/rUAOvXPf2HqvHjD/JFi/Xrs8kP67+YTalohKyVNFC9MY7Kl8FHR/iXRfjPRfhIr7T99GUyLdnjRd3lSUfmCTrY0jTJ2JvBBBMH28w+kgOdvyCl5QnJKkchqDy9eBdPNh4HSjXvm1A6UMH4cq+VxEwRAIHYSsLfwusZjNSkx1VYhVV9nXLsJRgLt53oqQUFBdPfuXUqXLh0lSJAgwt2uW7ee3rwJorZt20a4LTQQcwlAeI25zxYzAwEQAAEQAAEQAAFLBLa7utHR4yeoX6+elCxZ5BoL+d64Qbv3uNNtvzvUoH4dKlakMH3O/i0xiO75XsIzYoffDwlL13iULW1SOusdIMXXxb3LRfepfdbxv3v/ge4GvKSUSRNQ4gTWw0+ucb8u9sbjUt3vMn3WMaIzEIgIAQivEaEXhrqmgmvenFmNal++5mt0DQHWCMdnuWBL10Mrzoarr3Jti9rd8nW2+HJqnYcvrRvsTOkdE4drXNYq8ZdZ60X7612cKV1Ky+27n79HQ5efiRKLiHuPX5H/syDKkS7pFxUTn7Ow+SBQcEskFwjWOOvvHbxwnwYtPa3PCnHeu3F+alzukwX8gyevyWX5abp885MFdvmCaWj4D0XNMli/35dmbb6ktZs4YRwa2roolS+QRsvDCQiAAAgwAXsLr/VH1pFgbRVd1VOIDPF19+7dNGTIUHr5MpDWrFlDBQoUUN1pRz8/P3JxGUKnT5/S8ooWLUZjx46hTJnC/wdd06ZN6dGjR+Tu7q61ixMQMCUA4dWUCK5BAARAAARAwH4EXIaNojXrN8gGd277h3LlzBGmxt+8eUN1GzWna97e5ODgQJ6njoapPgqDgCUCVWvWo+u+vrR43hyqXKmipWIRzmextVb9xhQYGEiOKR1p5LDBVLtmdfpc/Ud4Ap+pgYdPX9PFG08oTfJElD/zNxZ79bn3nC74PqGrfk/phAhL5/foJbWpmp06185DPf44JsXXbaOqxli3w++FSHrp1lMpNmdMZTlcJO8hj193nl6+fkcLe5WlvJlCMg14HkSTNlygQxcfaLyzpk1CvYRhU/FcjlqeOnn84g3VH76HGpTNTP2aGP9d7333OY1efY687zyn7g3yUouK2VS1EMe37z7Qyr3etNj1qrx3YGqtEGWQAQL2JADh1Z40LbSlF11ZcGVR1ZJrYX1ZiK8WgEZS9tG1nqG6F7bUNbsdLt2ykKXb4cqPKsLr6zfvaPORm8IKKQUVyJI8XHOxV6U5Wy/TWvGV09K+5SjnF3Thcei/B/Tb4lNkKpKGNs9rd57R1mO3zRZj62YWV/s3K0j1Sxs2/N8LU9d+C07IRV3lImnpW7EA2Xv2Lp0RltnNK2alHg3yGbXFX9nxgi+1sIptJBYkga/f0qbDN+SCZ4NLJUorhGIkEAABEFAE7Cm8KvE0rKKrGouqz66JxwrL1/CmJ0+e0IQJE2j79u1aE6tXr6aCBQtq13zy8OFDqlevvhRm27RpQzly5CAvLy8p0iZO7EBubjvFF+jh85oC4dUINS4sEIDwagEMskEABEAABEAgggQOHz1GbX7qqLXiunUT5c6VU7u25WTG7Lk0Y/YfsiiEV1uIRX6Z69ev0549e8LcUbZsBiGmatWqYa4bGRVOnT5D5zwvUOsWzSlhwoh727E0xg1/b6KBLsOoz689qGvnjvT111/Lop+rf0vjikr5bqfv0OhV57QhtayUjbrVy0tv372XIuuFG4/J5+4LOn7lIT0NDNbKqZPMaRyoUbnMtMztGr0Se6d7JtRQt2Lc8Znwhlhn6G6qWMiJxv70bYj5sbfEGZsu0q7Td7V7C4Twms9EeH0Z9JZajvMgFlNrlEhPRbKlpAdC/F6//7rcu5zycwn6Lm9qrQ0+UUYsw9sUoe+LpZf32Fp29T4fWrDdSyvbvb4QXp3NC68snI9dc57YWlklCK+KBI6RRQDCa2SR/diuXkgd2K2dkeBqKcYr50+cs1y2APE1kh/Qx+bD42LYdGTFG+YTLodDfpljWs7W66givNo63s9RLroLr5YY8Zdj7aYepAdPXtEGl8qULLHBtfSWo7dosvgKrO53GWlgc4Owz26HO808LEXa2d2+oyLZU8pmWSBvPXE/PRQWsmsGOVPGVAYrZnYp3X/hSSnazvillKUhIB8EQCAWErCX8KqP67pl+L/hJqksZsPrcvjFixdUt25dCggIoCZNmlDcuPFo3bq1ZE54XbFiBU2ePJkmTpxEtWrV1Mb8xx9/0Lx582jq1KlUrVo1LT8sJxBew0Ir9paF8Bp7nz1mDgIgAAIgEHkEXr1+TTXrNaKXga+oWtXKtHbDXxRW4dXr6jXZRsXy5ejJ06fk7XMdFq+R98hsapkF1/CIrvrGf/75Z1IirD4/pp7/PmsOzZozj7Zu3EAF8ueNqdOM0LxYSHzz9r00bDgprFjZIKJg1uR0QVjAkth7UymJcHFbSoiB2YVVZsEsKWSZP7Zdpo0Hb6oi1FdYYjYUBhAxNVkTXtnoo9V4g5haTxiSxI3zFW06dJPMCa/Kq+IPVbLTL3XyaLjYmrbT9MNUpWhaGtm2mJbPJ2ovWG9QMmDxSTry30MqlC0FVS2ajqZv+o8sCa8XfB9Tl1kGrwW9GuWn7SduSwEWwqsRZlxEAgEIr5EAVTWpRFdTK1cWVvme3r2waRluY9Iff8oyEF8V0cg7nhNfyPgJK8aIpAz501CR2rnD3AQLaZuP3qT9wh0Du6vgoOztvs9J568HhHA1zC+zJTuv0nGvR+R774UsW1b0+1O1nBTn66+0vvlLo/n/XqFTVx/RHf9X8kXUsExmql7c8GUQF1SuhvlF+PdBXzp++ZHYpP6aSgiryu7182ni3yPh2rffwhNUs0QGavnxy6H526/QkUsPaZR4GS7e6UUs7iWIG4d4LF3q5jFyrcFWmxx0nr9Q4vnlSJ+UOtTIJWKWviTXk3405eeSlCqZ9a/8+IuorrOPSt//7K4ivWMiSiT8//8mxEh2W6HGw9dL3K7Kl6/exQTHst1+wk9+pfYq6B2VzpdafiVVSsStVWnrsVuCww3xgi9KWdIkUdnS5cjkvy5IxpUKp6VRq87SWZ/HUuBMkSQ+pRRjZ+vSBoJveNOeM3dpxMqz1LFWLvnsVTv9F52ko4LzxmGVKfU3CVU2nRGWrT2FZWvrytklb77BC8TOM45Q4/JZqLdYSOhTz7nHpJXs9jHfR0q8YH1fOAcBEIg+BOwlvCpr1bBYu3KdQlkKiT9aP3mLcFn+G3n6elJY2tHTZsG1cePGNGrUKKpYsSLNnDmLFi1aaFZ43bt3L90QMY/Y2jVePMPHLtzWjh2uNHDgAOmCuIX4Ct1aev78Oa1YsZL27t0jrWXLly9PvXr1osGDB4dwNXzjxk2aNWsmnTt3jp4/fyEtcBs1akh16tSRXezbt4/mzJlDP/30kxSP9f3OmDmTDuzfT9OnT6eMGTMSW/XOmDGDjh07Rrdv3yZ2j9y8ebMQ9fRt8Pn79+9p7dq1tHXrNrp48QJlyZJV1C1Cffr0oRQpUhgVP3jwIG3evJkOHDhIKVOmEFybiDEXoClTplDPnj0lX1XBU3yxz2UPHz4kLIhfUoUKFYSYXYvKli2riuBohgCEVzNQkAUCIAACIAACESQw5feZ9Mf8hTR7+lT679JleR4W4ZXXSy3a/ERsFeix25V69O5nVnh9LNZjk6fOoENHj9KtW7ep+LfF6IeWzalh/boRnAGqmyOghNdx48aZux1qHq/P2eLVmtXrrdt+1LlbT2ol1tVvgoNp9dr19EB4yansXJFGDBlEj/z9afzkaeSx/4AIS5JRuO2tIeO0xoljsCTlQXBM1Sm/z6LTZ3nN/5wKFypIzZs0ogb1DGt+LjNTiKGubrto7YplMsarvt+vxL7eXxv/ofNifV0gf37q3qUT1agWNkvdgIDH1OZ/HYnbZTfDPFaHxIlpmMtvVLpUyUjvn+cYHdKTwDdUb9geKpojJc3q+p3wFneTpv19UQ6dxbz0wmOc9PwnhNhcFjzudZl1RFrGmrPSNMcgtL1LFiX3nbsr91gzi33JsmLvslmFrJQk0ae/V7ndfefuSe927A2P90fZIINFTI6VysnD8x4tFVa4vwrXvQcv3pf7sTxfnk/Xunll+DZZ8OOPy0L0ZDHSXbSbKIFhb5f3kLM4GfZGOaQZ7+veehgoa/C+LluyKgMRtl5tN/kADRKeIMvkS0MLdnjRit3eZoXXjYdu0O8b/6MxPxUj50JptWEECyvjKgN2yv3rP7qX1vL55CdhqMLC78ahlbV8zqv+bXpp4Xrqqj/1Fd4CLQmv+z3v0/Ld12hY6yJyTj/PMBizQHjVcOIkkghAeI0ksNxs+z6jZOtLpg0z6kUJqpzJgqsSYPl8QNcftbJ6y1dTa1mtEE7sQmDfgpP06tnrCLWVKFlCqtypRJjbWOtxneZsuSzrcezOuwGvpG96fnmyaKpivCqrSBZc84q4A7wIYMGThcg6pTLSby0Mm9f8smoh3Daw5SN/KZQ8SQI6IERPvu7bVHyB9VEgVMIru6WNJ75GyiPcJR8T7jO4PXYpPLdHGfpKaLkc6Lz5WA9q7ixc2wpBlhOLj+w+gscYR7grySa++uIXGScWNSd3/MRhqRBCl+y8RhxrtESuVMRxWtm1Q6bUDvKlreYnK1v4wdacU/6+QP/deCrrcB8cxL5tlRzypanGw0Jo0Nt3VFi4qqhdMiOxe1729//TlIOy5WI5U1KCeHGkmMkZUzuVJCW+/ikWBQvF4mBx73JS0FZDOX7lkXyBKxfAi4XwffqaP50X4iuL5Dz3igXTSncbqk5Yjhxj4IeJHsSLoI1Dq5BDwrha9caj98lns25wJS2PT5hHtUFuVE7EbZ3Qvri853ZKuEgRcQ1GtClKVYulMyrPY2bXJ5biKxgVxgUIgECsIWAv4VUJprZaqiqhlkHrLWSV5Wx43Q2/e/dObnAkT25wi29NeDX3kDmWV8+ev0oBcdOmTdL9sLlynMebcr/+2os8PNyFMJmSSpYsSSdOnKDXr4NkFXYbpmK83rlzl2qKjRlOZcqUofjxE8h6fD127Djh8riuFGqrVKlCJUQ7SxYv5lsyBYtNnwoVKkrx899//6W3b99S7dq16f79+1SjRg3ZN4vIfM0xbVmAtZTYBTNb/zo5OVHx4sWF+PqfEJ99pZjLwqkSoI+KDcROnTrJZni8nI4cOSKFWi6vxsz57J6ZLXw58dgTJkhALNpyYsthiK8ShdkfEF7NYkEmCIAACIAACISbwDVvH6pepwE5V6xASxf8IQQwgwgbFuF1gxC9Bg4eSgP79aHOHf9HDZu1CiG88vrMuVptunfvHtWpVUPGz3Tbs1dejx4xVAqw4Z4EKpol8DmEV/X7kzZtWvk3RaWK5emK1zUZ57ekWDv/d/kypRP38ufLS4cOHyX/AH/pxpfFUU5+d+5QhSqGNX/5smUogVgX79nnLu9NnTiOGjWoJ89/GzKc1v+1kU4d2S8/ftT3y79TVStXohs3b8l+ucLf61ZRsSKFZV1bfjx99oxGjZ1AZ895yliyLBwnT/4N/e/HNlSwQH6K7P5tGWNUKcNe4249CJR7hxzn9YHYN+XUWlhj8t5pulDCdTUZs4+eCRfEu8ZXt2lK1vYulUEG752WzJ2Kbohx8f5vqbypaFKHEpqxzQ4hkI4TofJ4P7ZCQSe6KcRQtvzk/dC/hDAZXxjUbBXe8ziGKu/3vhUWvSwuc2gyFki5ff3e4437L6RXPd4L5r6CRXkWdLm9JX3LS0MZHhuLuSz4cj6Xy5w6Cf34fQ45b3b7+0IYCn3z0XufNeH1vtgTbjrGXczRkcb9rzgljB9HtsGGMGy12lN4k2SxWaXnr4Kp9pDdVKtkBhrc8tN/BxwnVgnNat/WkvDKbSSKH1da4nK7EF4VXRwjmwCE10girKxdTa1V9WKqXpBVIq2pwKraMRVlI2nYsbbZ7R+FuYgCqN2vfJiauC2CsbM7Bn5xLRUvNMePlp/rhG/72ZsNYqwSJjk+6DTxVdC3wp1xx5q5ZD8s2jUetVe+PPdNqilfIqeEKNhr7nEtyDsXfCJervyCd0qRSPsiSQmv/MJk8S6e+EqPBT1+AfHLfb2Ls1hkJLYqvFb7Nh0NEV8MfS0UWr1riU3Dq8iXs48QPdsJtvyy5/mpl7BaBPDY1Pz4PLSk3EuYxnhVixcWWof+UETORbXVYpy7FLBnii/YionFBqcbD15Qm4kH5DkvkPhFb6vwypXCG+NVdmjyY5uwtJ24/oK0XGULVpWUuMqLkWmdQ7oIruHiRskd4gt+lWQVtoTmL9r4yzD+Ok+fVB/mRFl9OZyDAAjELgL2El6Vi2C9iGqJpF50NSfUhqUtS32ofFuEV471+tdff4vNlWfSupOvXVxcpBiq2jF33LJ1Kw0R5UqV+o7mz59HceLEkWJs7969ia1XWYxVwiuLmn//vZHatftR+8re29ubGjVqRGwly+6NOXXr3l1atrKQmiqVwSODEkFZEO7YsQMdP35cHDtShw4dhPD7q6z3+PFj+u233yht2nQ0cuQImWf6g0XlX7p0oZTCsnXMmDEinpTBi8KAAQPJ1XUHrVy5kgoXLiznULZsORn7VuVxW5fFRlPz5gYLYL3wyiIwW90uFmIxi8+cOP5WgwYN5Dlb5SZKhPjiEobJDwivJkBwCQIgAAIgAAIRJPBj+0508PAR2rfzX/HBWOYwC68sWJWvXJ3SpE4t3BNvlB+lmRNeVQzZXzp1oAF9eslRs5Vhr34DKX36dDRhzMgIzgTVTQl8TuGVY/pu/mstZc+WlVhkb9Hmf0LEPEfVv69Kc2ZME+v+r+m23x2qWLUGFS1ShDauWymH+9emzbRuw9/U8X/tNCvVq9e8qUbdhtrHAFzQkvDJ97b8vU6Ko3y+eOmfNHbiZGrTuiWNGubCWWFKv88Urob/ENa120SM45w5tbqfq3+twyh8ctYnQOyVnpOGKjzMdELMZC95vIfKyZoAGxT8jr7/zU169Vsm9jttSZb2Lh+KGKeNR+2TYur8nmWFAU182RwLkSxI9mmSX3jayyLz2Bug5/XHpPdox1aku4U4yvFp2ZBG7bmywcu8nmWkR0MWR4f9eUYazYz6sZgUm7nB1hOE+CzE27k9SkuLWM5T7oB5j5XLcrLmalgW0P2wJrxysT1n7wph2FN6TswvxssGOt53nlPTClmoc+08mhjLZVX4tMHCmraWMLAxl0ITXk3rQHg1JYLryCJgKrzSh8+cznjd/hD4Ksiuvb58/eYDt3v30VO7thuWxv7Xe+QH/meahHWrzBcxXI1u8TWX5/umyVJbpuVwHX4C/04+8MEe/8I6gt2n73wo32f7hy1HbxpVFdatH4QgK+/5PQo0umd68fvGi7KceFHKW8K9sLzus+D4h8fPLf+3NXPzf7LckUsPjJqc9+9lmX/4v/sy/45/oLzm8iqNXHlG5gkXvipLHsevPS/zhf98eS0EP3m967SfUTm+aD/toLyn5vdGqMhisRHin7AE1eqKuBemdkgAAEAASURBVLeyzlU/4/+21XiEOK2V5ZNHoj3mO3r1WaN8vliy00veU2NdvuuavL5yy7jtY5cfyvzNRz49I+GqQ+YJF81G7fJYzc2B52YuvX7z9kO9Ybs/VB+888PL12+NiijuPDdzqd2UA3IM6t6EdQb2vvefqyztKCyeZdm17j5aHk5AAARA4HnQ+w/2+Fd5cK0P/M+0rQVuKz/0XDBAy+drVfbIlXNavr6euq/PC+/52MkzPmTLU/DDkVPnzfbF7R497SnLcDn+16Hrrx9OnPvPYnk1lgFDR8ny5y97G5X1uX1f5hcvU9EoX9XTHytVr/tBX+7vra6y7rI1f2l1VT+XvG/KvD0HjskyP/yv84eb9/y1cvp2w3K+6V832d7K9ZtkW55XfOR1f5cRIdoePXGavLf6ry3ynpprj76DQpSdNP0PWfbQibMh7oVlfDG5rN8j4zVURP6PJIT1D/fuPwjxL+CxYT0WkbZRFwRAAARAAASiA4EdOw1rmmkzZ2vDnTzNsBa84nVVy7N2MmL0OLl+OXjoiFasQdOWHwp++512zSdCeJXl2nXo/MHfP8DoHi4ih8Du3bs/DBo0KNyNc11uw1oSIql8rr/2HWhUbMbsuTJ/n8cBo/wqNeqG+N0wKvDxgsuVKOOs3RroMky2J8KkyDzVb8dfumtl+ERYvWq/Z0Y3bLyYNmO2rH/lqvHv/+fq38ZhRolivA9Ysd/2Dx1+P/ghWOzfrdzj/aGu2Kvj/UT+J+K5frjj/9JorN5i/5HvuSw7ZZRv7cLS3qX7+buyrZ0njfdOX7wKlvn6fcGus4/IvP2e9z68481jM2mL2L/ksa31MN4DdD15W+av2usta/k/M+yZ8n6iaeo4/ZDcq1T5T8V+J7c5eGno8xUulWVZERJNVTc68jh4L5Tb0//jfefA18FGZVVbwjLXKF9/ofZtbd3z5Llxv0ggENkETP/m/4o7jCyV11y7Z6/6Ue5MqYWpu+FrDnNlwpr3KiiYrtx8QGlTJqW0jsnCWt0u5ZU7YXtZvJq2Y5dBohGNwJdyNTxPxGFdtdeHFvxahvJlNrhGVINit7HsPlZvEcpuL9xO+8m4o4+fv5FfHCkf+2sGVaSMqRxkYPje849LV7jcVuHsKai0CAL/vXA/yxasKimL17WDnSmD46d85d5idLtixDFNrbkadh1bzcg17oYDvjTzn0s0u9t3MsbA7+LrrI3i66xlwhI4R7qkqmt5nCpiJ3CMADU/9RWTUSFxwV9r8RdanEKzeHUbV13GI5CFxQ/x8hXxaU9St/p5tfi06h67RnZZdlpzv2wPi1f11ZTqQx31Lo1VHh8tudLge2+Ee4+qA3cSu0ee2eU7zjJKbPHqmDQhrf6tosxftusaLXa9qrHXF94i3IxMFm5G9F+16e/jHARAIHYSiGyLV+WCmF0HF8xamNa4r5KgzVm6qifwuS1eednL1qDPhIXD4cOHaenSZeTj400TJ04ScUprqmGFOLZo0YIuXbpEZ8+epa+Fy319Kl26jLAo/eRqmO9duHCBxGaPsBy9Qk+fPhX/nkhLUb53/vx5Pgg3xa+FBW0p6Y54/vz5xK6Ty5UrT7lz56Y//1wuywQFBVHnzr/Q6dOn5PW33xYXZcoJ98O1KEOGDDLP0g9uf/v2HTLOLMe35VixPFdOHBe3YcOGtGvXLurbt6+0+uU56pOrqysNGDBAczV86NAh6iKsaLl8u3bt9EWJrRLY+jc098dGlWLZhT0tXt33H6T2nbqEIKi3wghxExkgAAIgAAIgEEMIvBJrnKo161Hwm2Dy2LODEn/0thEWV8NCnKVa9RtLq8Z5s6drZMxZvPJ67Mf2nenEKcN6jN3QOlcsR/Xq1KZMGa2vx7SGcRImAp/T4rVr55+pX++e2vjmLVxCk6b+TpvWr6YihQtp+eZ+Nzg26w633eLvhMv0RKz5ORYwxwDm5HPZUx4tWZx2/rkDDexrsKCWBcWP7HkL0XciLuuaP5eoLJuPYbV4tXf/Ng80ihQcu+Y8uZ70o+m/lKLiuRzprQjjts7Dlzg8nDkLWLWn+INwS8zxVW1JyuLVdO9ykasXLd9l+LuMQ8vp0+WbT6WXxC0jq8ps5YGPL9h7IodicxZ7txxGjT0ZclIWr5N/LiH3g2Wm+MFh3zr8fog61spF7b7PScpSlO+b65fzNw6rTKm/SWg3i1e155xVhG3r16QA5RTxc4XRDK3b7yvHXbGQE4396VvuWqZfZh6h6/efk+uY6jIcnsrXH9U8LLka1pflc7V3ixivpmRwbW8CphavEF7tRFjvUtjUfbASZbkrdiEsrFxlr3yuj/HKmcoFsd4tsSyMH3YlcG67F/n99yBCbWbIn4aK1M4dpjaU+GnqOpcbmbjek7Ydu60JkxyrVHzZJGOwsi9/FiSTJo4vhU0ur4RXPmfR7tDFBzIou4rbyvl61wyqb+VSmO9zUi9BW4RX08WCEhKV8KrEQA5UzzEF9IldXHB8ACW83n4USDuF0Gya0ogXfL3SmWR2WIXXgxfu06Clp6l34/zUuJzBLYdqX1j60oBFp6h7g7zUomI2u7gaFlax9OiZIb6f6oePNYpnEKL4J3Gb89h9ScNRe6RrjfUulYxcafB9TuwmOVi4k9YHkOd8rlt9sBtxTODxIiYCJ/Xc2NUyB5jXJ+XmY3EfEb82w5f5GEU/HpyDAAhEDQL2El6VwGpOUFX31IzNlVH3IhrjVbWjjra4GlZl1ZHjk3bt2pWKFi2miZ3qnv7YsGEjKVqeE27HvuKA6LpUrVo16ZZMuRreunWbEDIHyxIFChSkbNmyyThL7MqXkxJe+Xz48BG0adNGEQPWg65duyZdCo8YMZIaN27Et2XizT6+7+bmJuKpHpJugfnG6NGjNRe/hpKffr58+VK01VHEdb1AiRM7UOkypcnR0ZH2i3Y4PqwSXjlOLbsx7tatmxB4O39qQJytW7dOiK5jNeGVXSqzu+PBgwdTy5Ytjcru37+fugvXyf3796e2bdsa3cOFgYA9hVdfIaRv2rItBNp0TmmpZfMmIfKRAQIgAAIgAAIxicCiJctp3KQp1LJZUxlzVc1t7Ya/6N8dO2nU8CHSbWzZ0iE/aFZlO3TuRvs89kuXrtmyZlXZNHj4SCmcLV04j1KJtVOB/HnlPV6P7dnnQdtd3cjjwEEKDAyU+ZPGj6GmjQwhF7RGcBJhAtFBeN20eSv1HWhY8xcuVJByZOc1f3JaunyFnH9owqup4MuVPqfwau/+I/zQP3MDu07fkW6HmztnpR7182m9mxNg2UjltLe/jO/Ke32j2n1LudIbG5toDehOLAmvan+Wxc+cJkYrF0Ts2VIi5muPBp/GdFPEf3UT4+V9TRZTOeXOmIx+F2HKkok4q0p4NTUC8fITwuu0T8Kr8I5Hg8WeKae63xm78eV+C4p95w41c8tQcvZyNaxETxWiTnb+8UfPucdkfNkNYo80rYivq/Y+qwpDIg6dZilBeLVEBvlfmgCE10h8AkpgNRVUWZTl2K1KcOUhcBm2as2T45M4pOrD2jUSH9LHpu+LuKinhKVmRFLxhvnJSVgnhiWpoOjmYm/yV0j8AlXCpHBtQWv2XTcST7mvcWvP044TfkbCq34M7Mefv8RioZODqO8YU03GZFUv9sgUXg8LMXvg4lNkunDhgO4tx7tLEVnNTz9mS+dhFV6VtW6Dspnll1T6dte4X6c/tl4mFft15V5vmv+vlxAyvxWCppNWlK2O2fq4f7OCVP+jAKy+MDMn6GoVQzlR/fVtWoAalslstvSAxSfpyH8Ptd8BVUhZ8rapml3GP+D8S7eeUqfph+ViaWDzT19hsg+DTjMPE38lZ2qhrNrDEQRAIHYSsJfwquK2smXr2HYTQsBU4qs10ZUrqXZaVfqBWjm3DtFOWDOsCa/Tp08nB4ck9PPPHY2a5ThOxYXVQsaMGYV16Haje/qLIUOG0JYtW6SFqJPTp3fGixcvqGzZskYxXps2bUpeXl7EsV5ZdOXElraVK1eW50qg5QslfLKIyha1q1evFuLqQUqWzPxHM2wVyxtR/fr1k4Lq4cOHQljgcrseYiOxR4/u1FDElR0+bJiMScv5PKahQ4dqwitb/nLcWRaIV6z4k+LGjcvFpPUtW9oeP35ME179/PyEVXAtaiZivw4VPPRp2bLlNG3aVKPYr/r7OBdxkkRc4fSOxl+0gwsIgAAIgAAIgEDYCYwYPZ7+XLXaakWO2+l56qjFMmwxe93X1+J9vlG7Zg2aPX1KiDK8Htu5aw9179VXrC8d6NyJw2bXYyEqIsNmAtFBeK3doAldvuJFu7ZvkaIrT47X/KXKGdb8Jw67c5bFGK/2Fj7DavFq7/7lZKPRj9dv3hF7lssljBUW9SoXYuRKgF3j7kNPA4Plfbba9L33gjKncaBVAw3e6EJU1GVYEl7Z0pYtbtnSky0+w5J433Pm5kt08MIDzaOfrcKrn/9LajnOg5pXFGKzTtg117+9hFdmzMmcBavymjijSyn6NqcjnRJ79b3mHqd+Yt+0gYV9U24LwitTQIqKBCC8RvJTUeIpd2Nq+coCrEp6wVUvzJqKtqo8jvYncHStJwXcfhquhlNm/IZKi0DfYU3qayN+WS/8taxm9agsNbk9JUyqF7Q+4Dm7Hm47eb8UMJXFK1s+spVrIyE26t0Xq4DpeyfVkO4nPofwKuIR0I9TDspA9ezKonwBJ+lCYtHOq1II1M/PFnaLRb1lbtdIWeOqOoqNqQUui441h7hJPisHVqAsaZLIKiz8tpt8gPioAtJ7eN6jIcvOGC04RLQE6r/oJB2//MhIeD3p9Yh6zz9Bjctnod6N8qth2Hx8Lrg0HbNPuGmOR+sGOVO8uMZuKlVDO0/50ZjV56lGifQ0pFURmc1jai++UOPA8/OFi+r8H11Us5XzT4I1u57Wz/WgsHwetOQUlcmfmiZ1KKGaxhEEQAAEyF7Cq7JUZaRbhv9rliyXKSiEWWtJuRkOTaC11ob+njXhlYVKthhds2aNEBkLaNWUFWfz5i2Em1wXLd/0hAXRCRMmUKNGjWnkyBHa7cmTJwvBcoWR8Mquhznt27eXEn10facsa1OmTEl64fX9+/dUpUoVypMnD125coVKlChBU6Z82uDbscNVuEQ+RDy+QuJLepXq1atPN2740inh8i5evHgqWzv+/fffYpwjqVevXtS+fXuZr3dbrCxe+UYv4SJ4rxBza9euLS1Z2ZXy+vXrpdDM98eOHUf16tWVG0llypSVFrd6UVnEqxIWuo2Jj9ZEY24rNicIr7H56WPuIAACIAAC9iTAYtet234hmtyw8R/avWcvDRk0QFi8ZqNKFcvLMgEBj+no8RPyOnFig2cqEbdVWK2+DNHG8NHj6N69ezR/zkxySpOa2JJx6787aP/Bw9SmVXMj17NKvL3iedrseixE48iwmUB0EF4LFS8t53Ps4D7N3bUKB+GY0pFigvDKfz+IWLdUVLhcTpvWIBDeuHGTrnr7UGXnCvLjznfCRe9+YQWeKVNGypkju83POCoUNHU3bG5Me8/dpeF/niXlYnjI8tPkcf6+0T6cuXqcZ2nv0kd4OGwn9vMKZUshrFZLUoJ4cWQT7FFv9pZLMnRb26o5iMXhaRsvUprkCal9jVzSqIYLqv3MrvXyUqtK2Wy2eFV7ptzG0j7lxUehhv8fsgHPLCHm8t6lS8vC4kOSr2Tf1Qa5UabUDlrIM65nLimvewt6laV8mYw/NB2+4gztPXsvxL4uz7X91INyn3bX+Opyf1x5UVwuwtdlN7EE1vcL4VVPA+dRiQCE18/wNPTiqzXrVb3gysOC6PoZHo6ui6f3X9ChFWd1ObaflmtblL5xMoh6ttcylFSxTll8ZXcVD4WY+u/x29I69eXrd5rwqqxj2c1wnVKZhDvhd7TlyC35UuKWlPB6zieAus85Jn39szCYNkUiYstTdutbp1RG+q2FYeP7cwivPC6fe8+pyyyDi2S+5sRz5ZcvW+oqYdlwx/rPC76PRVtH5dxqi7mwpSi7n7C0eOHWVOxYjn1QrXh6KTrvEwulO/6vqK+IJ9BQCNScHoqYAo1H7ZPnLFIWzZ5S1j1zLUDm6S1eebHDCw5ObE1bTbi9KCLK25oWiVisy0VM1kFCrK9d0tidh74NXgSx1evRSw+pXIE0VFK4F2H3JxeFy4/WlbNTl7rGcST0fPjZs/DNMSnY0nnlgIoyLoO+fZyDAAjEbgL2El6ZorJqDa+1qr2tXXlM1oTXkydPagJkmzZtKEuWLMK1r7dwp7uWq9LatWspf37LH9ZwvFSOa8pWqWXLlqNixYqKOK4XhWWpu6yvF1TZopSFyRIlS1IlZ2diS1EWfDnpy8kM8WPOnDnEMV45zZw1S9aRF+IHC6v/+9//ZD1275texHVld8EsIrM16yghrppLvCHCYim7GW4q3PClTJGCdu7cKcfP5fXCK7vK47mxla5KXK9169a0aNFCTXjle3oBuU6dOnKTkcdy+/Zt4V55CLVo0Vw1gaMJAQivJkBwCQIgAAIgAAJ2JjBp2nSat2AxuW7dRLlz5dRa79TtVynIdunUkfr3+VXLN3diLo7n8ROnqGXbn4jFtLY/tKSMGdLTXvf9wu3wTmrWpBFNHDvKXFPIiwCB6CC89h80lP7e9I+Myfp95UryYwBliR1ThNflK1fTyDHj5QcI/2ww/D1TtlI1+XHCjKmTRJzjWrRP/LfQ4Zdu0vr77PFDmqedCDz+z1bVkrthNQC2ep0uvCVuPnxTWmTyfuLinV50434gbR1VlZI7xFdFzR6t7V3O+/cKrdrrQzmEy2LeG2bxk/thgxG9Zz7ljpdDj5UTIe+ev3pLy3ZdNTLIsdXilQepPBWqPdOUSRIQi8vsgbGFc1bqrnO73Gf+cTrh5S+tctkyl8OqmUvWhNez3gHU449jshobmbCFccCzN2J/+Laca6vK2ahrXYNLd57rFWEgtWO08NwoxF9LCcKrJTLI/9IEILx+pifAroX5nz6xsMqWrsryVe962JpAq28D5/YlcF+IbKf++S9MjYbHxbC+g2BhqThbuLzdd/auJqJ2ErFi2Y3DWuEOV/m2Z0vHpcLaky0+VeKXIL982AXxmkHOWhxR/tpphlgMsIjLiYW3+qUzU6dauTXrSv5qikW5DUOE73whzqq0R4xjhBCgx/xUjJwLpaV7j19RszHuRi9cdr3LLnjVV0iqrmmMV5XP1pgsFnr5PaUMKROL4O9piIXfjQdvkKmrY1XH3PG9WHhM/uuCjH3L95X7CUvjUW2w6LzhgC95Xn8ss3ghU++7TNREiJP6xC/rCSK2ruLGcXRZ4J284QINaF5Q1lHl13lcpyVuhsVNh5q56Kdqn/6QU2XMHZV7DhbQVw10prhxLC8euD5bNbuIL+jYVbBKvMAa/kNRzUJa5fOR5zlT5zabn/3Q1kWFtXEafTGcgwAIgIDdLF4Zpd7qNaziqxJduR1LFrN8L6xp9uzZtGDBghBWraodT88LNGaMwaWvyuPYrkOHDqFcuXKpLIvHO3fu0qTJk6R1KBdiEZUFzJkzZ9LTp0+lG2LOZ/fDQ4T4ylaknFjEdHFxoZUrV8j4qnqLV77PsV3ZYpQTC8Tx4xv/Ab979x6aOHGCrMtllJjas0ePEGX5vkp79+6V42NLVE7sUrh69eo0TLgeNo0Py5a3V69epTNnzlCSJElkWXaD3LdvXxo3bhzVrVtXNStF35UrV9HZs2dkXu7cualJkybUqlUrrQxOQhKA8BqSCXJAAARAAARAwJ4EJk+bQXMXLKKd2/6hXDlzaE3PnDOPps+aQ9MmjaeG9T+tabQCuhNzwivfdnXbTaPGTZSCE1+zi+FWzZtSv949ra7HuCxS2Alcv36dFi5cSNmzh8+C0sfHh6pWrSr/Werd57ovfV+rHnXv0pn6/NpdKzZ/0VKaOGUabf5rLRUq+MlTTuMWbeiqWLcrF9bPn7+g/oOGkJtYq3Pi34lRw1xoyfKV4vfkvmbxOnjoSOL4w6eOHqAUIgaspX65DY7xWkbEJl61bBFfhilNn/UHzZwzN8Tvf0T6P3DoMLXr0Jlat2xOY0YMleP5pXsvOedN61dLC3Cvq9eoScs2UpwNz7jDNEk7F1buhtOJfUu2HGUDi6zCyEa5GV4r9gGfCCHUNLUT+4Edxb5gaMna3qXa82UvhuzJjhPvG3YRIiQLsSo9FXvFMzZdFEYZd1WWjO/KhiXKI962Y7do4voLZCnG689ib/jH7z/9P5H75L1E3rflxPuIbOzRpkoOiqMTPE9d9aexItwd75kWzp6C5nQzWHlrA/l4ssjVSxibeNNCYfGa18TilYt4CwtfNkJSe7Scx3NtJfpUodh4H7nqwJ1S5GUXzNaSJrw2yEstKmazVlTe+2XmETnXA1NrhVoWBUAgIgRitPCazjEZOaVMGhE+dq9rToDVdwLBVU/jy5yz5eslIWSG5naY3QvnE1/ihNfS1XR2/JL1F64VUoivi6yJcSw++j8PkgHTlfsJ07bUdeDrtxQU/I5SJk2gsj7rMUCMk0VPJ+EGQx83lRczDUYaFqPbxZdL+he5LQN8++6DXPgkjG9wv2FLHS7zMugtMb8kieJZrcILKRa0OSh9aInnEj/e15qLj9DKh+c+/27cfviSngS+kS5F9EK5ufbYHchtsVDjObB75bByMtcm8kAABGIeAXtavDKd8IivetE1rIKtvZ4Iu8zizRB2l5UgQdjfl1yfY6OmSpWKvvrK8sc0XI4FWUdHR6tffitrXLbEHTBggMVpsqDLlrfcb1jSo0eP5DyTJjW/Rl63bp14f3wtxd84cT69Z3v06Cktevl+vnz5QnTJlrIs2FpqN0SFWJ4B4TWW/wJg+iAAAiAAAl+UwNu3b7VY9hEZCIttr4NeU+owrsci0mdsrctWryzAhiex6JpNuJv+HOn16yB6Itb8qVNZX/OHZSzBwcHEoqctKUP69JQnd+gioC1tmStj7r8d0zyOe6z/O8JcO1E1r+lYd7of8EobXpWi6ei0iDWqBFc24GjhnE0Yljwjn7vPZIi30nlTa+XtccJ9xRFGGkmt7F2GZW/Y1jGpPWTel7byZy2xARH/3Wtt79qWPnlvl4192NrWIWFcoyrKo1/PhvmoWYWsRvdwAQLRhUCUEF4zpk5OqZI72I2Z/9NAuvXgiTDPd6SkDgnt1q69G1KWrtyuPsarvftBe+EjwNav90QszwDhXuHVM4PlaKJkCSllxmSUNrcjOYlA30jWCfBLtPk4d/k1FMc/KCK+iLr/+LV0W8EufGuVzECDRbwAJBAAARAAgc9PwN7CK89AL6TyNYuphbIUMorvygLtGo9V5CmOKtkrrqtqLzoeHz95Qle9rkqLUh8fb3J1daX0YuPkc6fBg11o27at9P331ahmzRrEcZoOHDgg85ycnKSLYhZmkSJGAMJrxPihNgiAAAiAAAiAQOwgwIKrPROLsNEtPXrkT6XKV7Jp2O3atKbhQwbZVBaFjAkoV8MVhBvdwiLe6oLtXpRIGHyw5zr2ONikfFZKJ8KdIUU+AXa7zO6XF/UuS3mE4RMSCERHAl9ceD1/7Y60dsuRMZWwygrduiw0yEFv3tK124+EL/T3VDBHuki1QgttLLgPAiAgXCbeeUZj1pwn7zvPjXDUL5OJejXMr7k+NrqJCxAAARAAgUgnEBnCqxq0qQCr8k2PhbIWolbOPxgJs6ZlYsu1PrYrx4Vt1qzZF5n648ePRRzXsdKFsH4A7IZ56tQplDq1fb/o1vcRm84hvMamp425ggAIgAAIgAAIhJfA4MGDQ62qxFRbRFoOmxHdEluUHjl23KZhp0+XjnJk/zzWvTYNKBoV4vBuS3ZepZE/FqUqRdJR28kHyPfeC1oxoIJ0ORyNphLth9p/0Uk6eukhuU+uGWYvidF+8phAjCHwxYXXZ4GvyfeOP30QSFl4jRs3/F/Q8xf5r4KCiZ28ZRXWrsmisLVrjPkNwkRAwAYC7AKDY5X6BbykxAniUjYRIwHub20AhyIgAAIgEIkEIlN45WGzZavnDU9xPG9k3cpia8GshUNYwkbiVKNF0zdv3qQHDx5QlixZooS46e/vTzfEmDhlF67ZkosYVEj2IwDh1X4s0RIIgAAIgAAIgEDMJaBcDHOc1ogkjg/L7oaVSBuRtlA3ZhI4Ibwe9pl/glKLcGkZHBPTWe8Aql48PQ1tXSRmTjgKz4r3kDnsWmih1qLwFDA0EKAvLrzyM3jxMkjEtgyUx2AhnoY3xYvzNSVJnIAckznIY3jbQT0QAAEQAAEQAAEQiOkEIlt4jen8MD8QiAgBCK8RoYe6IAACIAACIAACIAACIGB/Aqv2CRe3267Ihp0LO1G3evngXtj+mNEiCMQKAlFCeI0VpDFJEAABEAABEAABEIhCBCC8RqGHgaHEOgIQXmPdI8eEQQAEQAAEQAAEQAAEogkBjuuaLHHEQyJGk+limCAAApFAAMJrJEBFkyAAAiAAAiAAAiAQ1QlAeI3qTwjji8kEILzG5KeLuYEACIAACIAACIAACIAACIAACMRmAhBeY/PTx9xBAARAAARAAARiLQEIr7H20WPiUYAAhNco8BAwBBAAARAAARAAARAAARAAARAAARCIBAIQXiMBKpoEARAAARAAARAAgahOAMJrVH9CGF9MJgDhNSY/XcwNBEAABEAABEAABEAABEAABEAgNhOA8Bqbnz7mDgIgAAIgAAIgEGsJQHiNtY8eE48CBCC8RoGHgCGAAAiAAAiAAAiAAAiAAAiAAAiAQCQQgPAaCVDRJAiAAAiAAAiAAAhEdQIQXqP6E8L4YjIBCK8x+elibiAAAiAAAiAAAiAAAiAAAiAAArGZAITX2Pz0MXcQAAEQAAEQAIFYSwDCa6x99Jh4FCAA4TUKPAQMAQRAAARAAARAAARAAARAAARAAAQigQCE10iAiiZBAARAAARAAARAIKoTgPAa1Z8QxheTCUB4jclPF3MDARAAARAAARAAARAAARAAARCIzQQgvMbmp4+5gwAIgAAIgAAIxFoCEF5j7aPHxKMAAQivUeAhYAggAAIgAAIgAAIgAAIgAAIgAAIgEAkEILxGAlQ0CQIgAAIgAAIgAAJRnQCE16j+hDC+mEwAwmtMfrqYGwiAAAiAAAiAAAiAAAiAAAiAQGwmAOE1Nj99zB0EQAAEQAAEQCDWEoDwGmsfPSYeBQhAeI0CDwFDAAEQAAEQAAEQAAEQAAEQAAEQAIFIIADhNRKgokkQAAEQAAEQAAEQiOoEILxG9SeE8cVkAkFvguj/7J0FeBRXF4YPRYtD8OJWJMHdpbhLsUKhxYq7uxbXwk+RAsU1BVoIgQDB3RKchAS3AAESEgLsf89d7jC72SyRDQnJd58n2Znr952Fmdxvzjl2SRPF5CVibSAAAiAAAiAAAiAAAiAAAiAAAiAQKwnYXHjlDpFAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAILYRyGSXQltyHINI2hkOQAAEQAAEQAAEQAAEYiQBWLzGyMuKRX0lBGDx+pVcKEwTBEAABEAABEAABEAABEAABEAABMJIwOYWr2EcH9VBAARAAARAAARAAASigACE1yiAjiFB4CMBxHjFVwEEQAAEQAAEQAAEQAAEQAAEQAAEYiYBCK8x87piVSAAAiAAAiAAAiBglQCEV6t4UAgCkUoAwmuk4kXnIAACIAACIAACIAACIAACIAACIBBlBCC8Rhl6DAwCIAACIAACIAACUUcAwmvUscfIIADhFd8BEAABEAABEAABEAABEAABEAABEIiZBCC8xszrilWBAAiAAAiAAAiAgFUCEF6t4kEhCEQqAQivkYoXnYMACIAACIAACIAACIAACIAACIBAlBGA8Bpl6DEwCIAACIAACIAACEQdAQivUcceI4MAhFd8B0AABEAABEAABEAABEAABEAABEAgZhKA8BozrytWBQIgAAIgAAIgAAJWCUB4tYoHhSAQqQQgvEYqXnQOAiAAAiAAAiAAAiAAAiAAAiAAAlFGAMJrlKHHwCAAAiAAAiAAAiAQdQQgvEYde4wMAhBe8R0AARAAARAAARAAARAAARAAARAAgZhJAMJrzLyuWBUIgAAIgAAIgAAIWCUA4dUqHhSCQKQSgPAaqXjROQiAAAiAAAiAAAiAAAiAAAiAAAhEGQEIr1GGHgODAAiAAAiAAAiAQNQRgPAadewxMghAeMV3AARAAARAAARAAARAAARAAARAAARiJgEIrzHzumJVIAACIAACIAACIGCVAIRXq3hQCAKRSgDCa6TiRecgAAIgAAIgAAIgAAIgAAIgAAIgEGUEooXw+to/kHxe+hF/Br3/EG4Y8eN+Q0kTJyS75EnkZ7g7QkMQAAEQAAEQAAEQiOEEILzG8AuM5UVrAhBeo/XlweRAAARAAARAAARAAARAAARAAARAINwEolx4fekXQF73fcgglpAkUQKKJ8TT8KZ3Hz6Q35u3FEd0kD2THSVPkii8XaEdCIAACIAACIAACMRoAhBeY/TlxeKiOQEIr9H8AmF6IAACIAACIAACIAACIAACIAACIBBOAlEuvF68eV+Krbkyp6GE8eOFcxmfmr0Nek837z6hd8Jy1j5XRvomDsuwSCAAAiAAAiAAAiAAAnoCkS28unu5kZu3G7l7XSQ3caySQ3YHss9eiByy8aeDysYnCMQqAhBeY9XlxmJBAARAAARAAARAAARAAARAAARiEYEoF17P37hHWdKlJLsUSWyG/dlLf7r96DnlElavyWD1ajOusa2jp6576InLTvI9f5oCHtyTy0+U8TtKUaQEpa1el9JUrhHbkGC9IAACIAACMYhAZAmvLLiuc11jIraGhK11lZ+odeU2IRUjHwRiLAEIrzH20mJhIAACIAACIAACIAACsYSAr99bSpEkQSxZLZYJAiAQFgLRQnjNmyUtJRZuhm2V3gQG0bXbjymjXXJKnzqZrbq1eT/XPLy1Pr/PlU07xkHUEnh11Z08Zk2kF+dOWJ1IyqKlKVf/kZQsn73VeigEARAAARAAgehIIDKEVxZdh68cqi2XrVtbV/7JxLJVWcKuO7BGqwcBVkOBg1hCAMJrLLnQWCYIgAAIgAAIgAAIgECMI/DP0ds0f/sVehv0gfJmTk5DWzpQnkzJY9w6sSAQAIHwE4jRwmsGIbpmEOJrdErbdrsS/4SUGtWqTPyDFDUEnh7cS+4DuojBOepwaFIcsp+5mNJU+iE0lYPVCXj7ntYd8JQ35wr26YOVhzfD+/Fr2nf+AVUUfeb+zI0/LHXDO5+Y0O7VmyDy9QuiTKm/pW++gQvzmHBNsQYQiO0EbC28rnNdK+5pRjE1tEKqvs3k9lNMBNovdX0CAwPpwYMHlDFjRkqYMGGEh92wYSO9fRtI7dq1i3Bf6CDmEoDwGnOvLVYGAiAAAiAAAiAAAiER2OnkTMdPnqKBfXtT8uSRayzk5e1Ne10O0N1796lRw3pUtHAh+pLjh8Tga8+/fvcldZx9RFi6xqccGZLReY9nUnxd1q/81760Lzr/9x8M9OCZP6VOlpASJ7QefnLdgVuU7Nt4VL90lnDP8bXY1739xE+GvMyaNgklShA33H2hIQiEhgCE19BQskEdc8E1X+7sJr1evellcg4B1gTHFzlhS9cz7RqKsUIruqppxaHiq7aHy/KVXVLUH+0ibhyZaUgL28W5O+z+iIYtP0uj2hSmmsUzqYla/Dx86TEN++sMjWxTiGoV/85ina8pc++5+zRu9QU55UE/2lPDMqY35RV7btIypxsWl1S2QFqa1rGESdnp609p1tbLdEfcnFWqVSITDWxmj5u0AoJPEACBr5KArYXXhuPqSQ6hFV0VtMgQX/fu3UsjR44if38/WrduHRUsWFANp33eu3ePRowYSWfPntHyihQpSpMmTaQsWUzvHVqFUBw0b96cnj59SgcOHAhFbVSJrQQgvMbWK491gwAIgAAIfAkCI0aPp3UbN8mhdv/7D+XJneuzw85bsIjmzF9gsV7VypVo2Z+Wyyw2QCYIhECgeu0GdMvLi5YtWkBVq1QKoVbEs1lsrdOwKfn5+ZFdajsaN3o41a1dk77U+BFfwZfp4YlvAF3yfkHpUn5LBbKmCHFQz4evyN3rBd2450unrvvQvaf+1LZ6Tupa93vqtfCEFF//HV89xrod/iBE0it3fKXYnDlNyOEiD1x8SL9vuEj+Ae9pSd9ylC9LcKbPXgXStE3udETsR6uUPUNS6tu4ABXPY6eytM/nr99SwzEu1KhcVrEXa/p3vceDVzRh7QXyuP+KejbKRy0r5dDa6Q82HvSi+duuaFmJE8UVe+ZFqELBdFoeDkDA1gQgvNqaqIX+9KIrC64sqobkWlhfF+KrBZiRmHW+S6vPuhcOaXh2O1xk8fqQikPMh/AaIppwFfDNuNXvB+QNnjsY0LwgNS6b1aSvGVsu0TbhEqRpheDuvXOkT0qNxY1cpZPXntKAxaeEK/S41LpKTkqZNAEdvfyYjl1+QsXEw8Dc30qpqvgEARAAga+OgC2FVyWehlV0VdBUe3ZNPElYvoY3vXjxgqZMmUI7d+7Uuli7di3Z25uGBXjy5Ak1aNBQCrNt27alXLly0fXr16VImzhxEnJ23i3eQA+f1xQIrxp6HFghAOHVChwUgQAIgAAIgEAECBw9foLaduik9eC0w5Hy5smtnYd0MHLsBFq7fiP9/FObYFVy585JbVu3DJaPjC9H4NatW+Ti4hLmAXPkMAox1atXD3PbyGhw5uw5uuDmTm1atqBEiSLubSekOW7a4khDRoym/n16UfeunYTntm9k1S81fkjzik75zmfv04Q1RsMNnlerKjmoR4N89O79Bymyuns/J88Hr+nktSfSA5753LOmS0JNymelFc436Y3waOgypZZ5lRhz/tI/iOqN2kuVHNLTpA7Fgq3LV5TPdbxEe84+0MoWC+E1v5nw6h/4jlpNdiXev2WjlsI5UtNjIX5vPHhL7uXO6FyCSudLq/XBB8q4aEzbwvRDUaNxEVvLrt3vSYt3Xtfq9mwohNfKwYVXtkhmcTxtykTUROz5+gW8I8ej3nK8TSOqUAbh2RAJBCKDAITXyKCq61MvpA7p0d5EcA0pxivnT12wUvYC8VUHMxIPn7ruIfeB7GI4/Ml+hnA5XLlGmDqA8BomXJ+tPH7NeXmT57fOVrt4WhRehy0/Ix6cXtGG4VU+299QYQnMb2Dpb8QfDAZ5w77o+Zwcx1SjNMkj70H5sxNEBRAAARCIAAFbCa/6uK7bx/wX7hkpi9nwuhx+/fo11a9fn549e0bNmjWjePHi04YN68mS8Lpq1SqaPn06TZ06jerUqa3NeeHChbRo0SKaOXMm1agRtnu66gTCqyKBT2sEILxao4MyEAABEAABEAgfgTcBAVS7QRPy93tDNapXpfWbNlNohdcuPfrQNfEinuueXeEbHK0ijQALruERXfUT6ty5MykRVp8fU49nC+vt+cKKe8fWTVSwQL6YuswIrYuFxLfvPlCLStnptLBivXz7hQh9k5LchQWs3hliUuHitpQQA3MKq0z7bKlknYX/XqWth29r4w8Qlph6Qw6tIIYcWBNeWchs/btRTG0gvA7GixuHHI/cJkvCK1vEjlp5jn6qlpN+q/e9RoetabvMOUrVimSgce2Kavl8sGDHVVovXA3r92YHLzstjWIccqSi6kUy0hzHy2RJeOUQf22mHqQnLwJo3bDKlDlNYtn38atPaNCS0zCqMSGNE1sTgPBqa6K6/pToam7lysIql+ndC5vX4W6mLfxb1oH4qoMaSYdXRvejR7v+iVDv6es0pvzjZ4epD73wWqVQBlq6+wZdve1LRXOnlvFZm5XPFiye6P4LD+nAxQfENwl2hVHq+zTUUPi4zyasNVVSbwOZuxpmC86dp+7SsSuPKWWSBMQ3RHYRwTe9sLgadnV7SMvFG13d6+cjdu17/MoTKpAtJU35tbicwt2nfvKtI35Y8QsIEq4lUlK9UpmpZjHjm0lLhavfw5ce0di2RSi7bt5C06S+f54kgziY1620Wk6oPpV1Kluyli+QTlqqWrJ47Tz3KCWI9w0t6FHms/1uOuQlYgjEp9olTF0wK6vZlQMrUM6MkRuT47OTRAUQAAEQCCcBWwmvylo1LNau3MYhm4NJTNcRK4eSm5eb8DDwE7WuHNzS4HPLZMG1adOmNH78eKpUqRLNmzefli5dYlF43bdvH3mLmEds7Ro/fnyt6127nGjIkMHSBXFL8Ra6tfTq1StatWo17dvnIq1lK1SoQH379qXhw4cHczXs7X2b5s+fRxcuXKBXr15LC9wmTRpTvXr15BD79++nBQsWUIcOHaR4rB937rx5dOjgQZozZw5lzpyZ2Kp37ty5dOLECbp79y6xe+QWLX4M1k7fBx9/+PCB1q9fTzt2/EuXLrlTtmzZRdvC1L9/f0qVKpVJ9cOHD9O2bdvo0KHDlDp1KsG1mZhzQZoxYwb17t1b8lUN3MQb+1z36NEjwoLYnypWrCjE7DpUrlw5VQWfFghAeLUABVkgAAIgAAIgEEECM2bPo4V/LqE/5syky1euyuPQCq+Nf2xNCRMkpA1rVnx2Fs/F89j0mXPpyPHjdOfOXSperCj91KoFNW5Y/7NtUSHsBJTwOnny5LA3Fi34+ZwtXq1Zvd65e4+69uhNrcVz9dugIGn9/Fh4yWE302NHDqOnPj70+/RZ5HrwkAhLklm47a0l47TGjWu0JOWJcUzVGbPn09nz/Mz/igo52FOLZk2oUQPjMz/XYZfWTs57aP2qFTLGq37cON/Eoc1b/6GL4vm6YIEC1LNbF6pVI2yWus+ePae2v3Qi7pfdDPNckyROTKNHDKUypUpG+vi8xq8hvRCh3xqI0G9FcqWm+d1LCwvI2zRLeMjjxGIee8TL811yKiiE2DyZLHtC6jb/mLSMtWSlaYnBnzuvif3YJzRUhJr7y/mGFA717nFZlNx/wbjfmzVdUiqXPy39WDE7JRX7kvrE+8JssXnu5jPKZPctFc6ZWoqYHCuVk9qz7SNc9/LeK+8R83rts6eS+7i5zPYxrwrRk/eKD4h+v00Yl8qJPVX2Hqj2mdlN7z+CjwrBlitTMmnJqkLmsfVq++mHaFgrByqbPx0t3nWdVu31sCi8bj3iTbNFOLeJHYpSZYcM2rKChJVxtcG7JfuFPU33azvMPEws/G4dVVWrz3m8x8wWrmdu+Mg9YEvCK4vpXecek14P+zUpoLXng97/OyEZ7pz4g9z7NSnECQjYgACEVxtADKmLX/uPl0V/zRptUkUJqpzJgqsSYPl4cPeftbp6y1dza1mtEg5sQuB4w4oU8OBuhPpKlDEzldl+KEx9KOGVb5T3fd4Q+7TnAN+nbzyVLg9+qZmbfq2VR+tzl7gRTl7vJs8r2Kejpy8DpVCbSrjAZd/56VMZ3SNYEl7dvZ5Tt/nHZVsWduOJBzqOS5BFjMc3z7AIrzuO35H++HlcvsFyf4XEg0mn2nnp4fM39OPEA3KcknntKEH8uJrffjXGQbdHNGLFWepYOw91qPHJ5c+1u77UafZR+ZYYvy0W2hQY9J5+Fjd5fpDYPLIqXRHiNbsItiS8Np2wX8715x9ykat4qGF3IIWEa4siOVMFe5ixND7Hf+g464gs+kdYvH4jOCKBAAiAwNdIwFbCqxJMQ2upqoRaZqa3kFWWs+F1N/z+/Xu5wZEyZUp5OawJr5au19u3b4Wo2EcKiI6OjtL9sKV6nMciZp8+fcnV9YAQJlNTyZIl6dSpUxQQECibsNswFeP1/v0HVFtszHAqW7YsJRAbetyO06RJk4XL4/pSqK1WrRqVEP38tWyZLONfQWLTp2LFSlL8/O+//+jdu3dUt25devToEdWqVUuOzSIyn3NMWxZgQ0rsgpmtf9OnT0/FixcX4utlIT57STGXhVMlQB8XG4hduhi9gPB8OR07dkwKtVxfzZnz2T0zW/hy4rknSpiQWLTlxJbDEF8lCou/ILxaxIJMEAABEAABEAg3gZsenlSzXiOqXKkiLV+8UAhgRhE2tMJruSo1qIQQUFns2rV7D71584ZKFC9GpUoUlwKZmhg/n1WuUZcePnxI9erUkvEznV32yfMJY0dJAVbVxadtCHwJ4VV9fzJkyCD/pqhSqYKwgL5JNz08qKR4dr589SplFGUF8uejI0ePk88zH+nGl78vnO7dv08Vqxmf+SuUK0sJxXOxy/4Dsmzm1MnUpFEDeTx05BjauHkrnTl2UL78qB+Xv1PVq1Yh79t35LjcYMuGNVS0cCHZNjS/fF++pPGTptD5C24yliwLxylTpqBffm5L9gULUGSPH5o5Rpc6bAl557EfVS2cQcZ5fSysIjm1EdaYLDxm/IwL2mYT99NLvyDa83vNUC1JeenjvdTAd8a9yLolM8vxXc49oLGrz8tQZyXzpiFvMS+vh6+FpW0amtaxBMX9uPeo9oV5H7mifXq6LfZzORwa97lZCJNsaKL2bNm17jth0cviMrvb5f1bDqW2dVQ1SpIonpyz96PX1GXeUbkHzWMFifos6HJ/fw2oIL388dxYzGXBl/O5Xta0SYn3VTmx29/Xwuo1ReL48tya8PpI7Bk3F3vGvF88+ZfilChBXNlmy2FvabXau3F+KTbLTPHr1ZsgqjtyL9Up+R0Nb/Xp3wHHiVVCszLEsSS8Op8R7qRFDFg2/KleNKPqVn4uE8ZP7CY6pFi0JpVxAgLhIADhNRzQQtNEWbuaW6vqxVS9IKtEWnOBVfVjLsqGZg6oE3oCB0rmFJWFuWWEUhyqcsozTD0o4ZUb/VorN/1S0yiy8o2l/YzD0hXCsv7lKa94y+rhMyFoTjogb3IrhKWlusGomAT6mKOWhFcORM432TndSlHx3MZg5RxA/rd5x+SclSgamgWomziLtvPEm2F6d7v8ELDjxF1qJd46Yt//nLzEjbzdtENURrytNb1TCenKo8GYvdJid9WgitqQbAm7cs9N4reb+A2z0KZVLuJNKuHXX1n4qpuuufDKFrWVBu6Sb4Sx0K1P/ECyqFdZMadE+mx5zKL1qetP6dHzANovrI2TJIovrXv5uiCBAAiAwNdKwFbCq3IRrBdRQ2KiF10tCbVh6SukMVR+aIRXjvW6efMWsbnyUlp38vmIESOkGKr6sfS5fccOGinqlSpVmv78cxHFjRtXirH9+vUjtl5lMVYJryxqbtmyldq3/1l7y95DbOA0adKE2EqW3Rtz6tGzp7RsZSE1TZo0Mk+JoCwId+rUkU6ePCk+O1HHjh2F8NtH1nn+/DkNHTqUMmTISOPGjZV55r9YVP6tWzdKLSxbJ06cKOJJGe91gwcPISenXbR69WoqVKiQXEO5cuVl7FuVx31dFRtNLVoYLYD1wiuLwGx1u0yIxSw+c+L4W40aNZLHbJX77beImSNhmP2C8GoGBKcgAAIgAAIgEEECP//ahQ4fPUb7d/8nXhjLGibhlb1u5cpfSFoHsgWrPrEQt2XdKsqY0WidpWLI/talIw3u31dWZSvDvgOHUKZMGWnKxHH65ji2AYEvKbwmSZKEtm1eTzlzZJcvQbZs+4sQMS9QzR+q04K5s8Rz/zd09959qlS9FhUpXJi2blgtV7jZcRtt2LSFOv3SXrNSvXHTg2rVb6y9DMAVQxI+uWz7lg1SHOXjZcv/pklTp1PbNq1o/OgRnBWmNHuecDW8UFjX/itiHOfOrbX9UuNrA0bjg/Oez2i8iPHKbmg5ZRRi5pvA9/RC7J1ysibAsgHID0Odia0/VwiBMjRJCa8s9I76qTDF/2gtzQYeTcfvl3uVf/YuRymFuMmJ3eeyINm/WQERmzSbzOv+x3Fyu/Wc9FaabEW6V4ijHJ+2oPBGqN+zXdS7LCUXgiiLo6P/PkdsCDP+56JS7OUO20wR4rMQb//Xq4y0iOU85Q6Y58l1OVlzNSwr6H5ZE165msv5B8KYx40SxosrvSeyAY/H/VfUvGI26lr3e02M5brKJfBwYU1bR4jUlpLaA7YkvP4lxFX22Ghpn/nfE3do6kZ3i6KspXGQBwJhJWAuvLKLzy+azl2/a/B7E2jTMf0D3hq43wdPfW3ab1g6+6XfOAP/mCdh3SrzRQxXkyI+5/pcbp5C6su8Hs7DT2B/iRyG/SWyR/AnR5gn8OJ1oKFC/52GBqP3Gt6Lu6A+7Tv/QJZtO3ZbZgt3E/Jc3Bj01eSxcG8hy0QAeHl+yO2hPN99+p48F28UyfNxq88Faztzi7ssczp9VysTN32DpR9VYbuYE89765Hg31dVx/xT+PuX61T5alyvR69UlqHFpP2GJuP3Gd5/MK5DuCm2OA//gHdamwfP/OVcmMHHZoYTV5/IPOF6Q6vHBy/938p8nrvjEW95zmwW/XdV5vMcVR/6hutdPbV23Fa4B5Hz0tfBMQiAAAh8bQReBX4w2OKn6vA6Bv4x72ux82pD78WDtXw+V3WPXbug5evbqXJ9XniPJ02fa8jxvb3h2JmLFsfifo+fdZN1uB7/dOzex3DqwuUQ66u5DB41Xta/eNXDpK7n3Ucyv3jZSib5qp3+s0rN+gZ9vS07nGTbFes2a23VOFc8bss8l0MnZJ2ffulquP3QR6un7zcsx47/Ocv+Vm90lH25XfOU54NGjA3W94Sps2TZ2s3bZZlaa68Bw4LVnTZnoax75NT5YGVhmV9Mrnvv6Qub/ZchhHXDw0ePg/08e/7cZmOgIxAAARAAARCIzgR27TY+08ya94c2zemzjM+C167f0PJCOnjh6yufXfh5cNXa9QY+F15LDNNmzpH51WrVF3sFxn0KIbzKvPYduxp8fJ6F1CXybUhg7969hmHDhoW7R27LfVhLQiSV17XPgCEm1eb+8T+Zv9/1kEk+fyfsi5U2ybN0wvVKlK2sFQ0ZMVr2J8KkyDw1bqffemp1+EBYvWrfM5OCUJ7MmvuHbH/thun3/0uNH8ppRotq1+74GioN3GnoOPuwIejde8NqFw9DfbFPy/t//CPiuRru+/ibzNXj/ktZNmLFGZN8aye8J8v93RRt9UmEk5P5ag9Xlb1+EyTz9Xu53f8w7v8eFPu+au9U1Vefas+W9zL1ifd9efw1+zxkts/LAHk+ZcNFfTV53GnOEUPN4bu1fGE4JOsOX/759fKeKY8j3Pxq7fUHPA/eB+c6+p952y4beB9Yn1RfwjJXn21yrPaA1x8wXS9X4rXxGPq9Z9X4kPsjWWapnaqDTxCICAHzv/njcGdhVW8jUv/8jXuUN0taYepufJsjIn2ptm8Cg+ja7ceUIXUyymAXNRZpyp2wrSxezftRa8WnbQhEtavh8gXTafFR1Yq8H7+mtlMPaW53ORbAahdPWtynLOXPanSjqOrO33aF2Of+ykEi5miGZNJ//7DlZzULUPWGUI+G+aQlqmrHn+oNH2XxykHRa4/Yo6+iHbP7DHYDod6emtmlpIwxq1X4eMBB0dmN7437L+VbUS+FC2BlYXpoZh1ZS7k+7lovL7Wtlos8HryiDsLKV2/5q9Zs3n/7GrmkW2POH7PqHO07/5CUZTDnqbedzC1eeW0ctzVflhRUJl9arqolfvOL3WasGVKJsqZLouXzgfj7it6JH7Y6Piri4y4XsRj8A96bBGY3aYATEAABEPgKCES2xatyQcyug+2zF6J1B9ZIKpYsXRWuL23xyo+9bA36UrjkOnr0KC1fvoI8PT1o6tRpIk5pbTWtYJ8tW7akK1eu0Pnz54XL+W9MysuUKSssSj+5GuZCd3d3Eps9wnL0Gvn6+oqfF9JSlMsuXrzIH8JNcYCwoC0l3RH/+eefxK6Ty5fNAexMAABAAElEQVSvQHnz5qW//14p6wQGBlLXrr/R2bNn5HmxYsVFnfLC/XAd+u4703jksoLuF/e/c+cuGWeW49tyrFheKyeOi9u4cWPas2cPDRgwQFr98hr1ycnJiQYPHqy5Gj5y5Ah1E1a0XL99+/b6qsRWCWz9+zn3xyaNYtmJLS1eDxwUz09dugUjqLfCCFaIDBAAARAAARCIIQTeiGec6rUbUNDbIHJ12UWJP3rbCIur4devX9Pyv9eQg4hpzy5m9aln34G002k37d21Q1pB8vPYz792pVNnjM9j7Ia2cqXy1KBeXcqS2frzmL5fHIeewJe0eO3etTMN7Ndbm9yiJX/RtJmzyXHjWipcyEHL55jAHp63yO2MMaQXF3Bs1l3Oe8XfCVdJiPfEsYCVBbXnVWPYsJAsTrt27khDBhgtqNUgOfM5UGkRl3Xd33+prFB/htXi1dbjh3qi0aTipHUXyen0PZrzm/ASmMeOhGELbXD1ovWutyxawKoQaj8Jt8S/1fs+VKtQFq/Ok2vKWKqq0VKn68L7n/HvsnxZU6hs+XlVhFJj977bx1WX50cuP6ahy4z/93A+exasXCiD3JtVFrRqz3Z65xIme5/X776kjrOPUKc6eaj9D7m1vVPu2NK4nL91dFVKmyKRzSxelUtlDrU3UISYyy3i5z4VFr8bxJ42z5s9J07qUIyHlom9NN569IqcJtakOCFEelN7wJYsXlcIr4rLnG7QHz1Ky1i4ql/+3C7Gm77J3cQCWF+OYxCIKAFzi1cIrxEl+rG93qWwuftgJcpyVXYhfPWml2xlyZ2wckGsd0ssK+OXTQlcGd2PHu36J0J9pq/TmPKPnx2mPpSrYb37BtWBeJuKWk521fzYz98uxFVx018+oLy8Mal6/Lnov2u0Zp+n5pfe3NWweIuHhgshtl/TAtS0vNE9hWqv/N0r4ZUDmv+913jDV3XUJ9+Y48WNY1V43X3mHk1ca9xE5ht3NiFipkicQArD3I8SXsW7otKvP7u8YLcc6maoFz755ukm3Pyap8LCDXEJEfPgigiS3kUESedxutbJq1W7JPKX7rpBTcpnpfIiKDzHcOUA8dbSfyfv0pQNbppYba2uEoTNY/Baa4MyEAABEIhuBGwlvCqB1ZKgqsrU2i3VUWURjfGq+lGfoXE1rOqqT45P2r17dypSpKgmdqoy/Wfjxk2kaHlBuB2LY/YXYI0aNaRbMuVqeMeOf4WQOVw2L1jQnnLkyCHjLLErX05KeOXjMWPGkqPjVhED1pVu3rwpXQqPHTuOmjZtwsUy8WYflzs7O4t4qkekW2AumDBhgubi11jz029/f3/RVycR19WdEidOQmXKliE7Ozs6KPrh+LBKeOU4tezGuEePHkLg7fqpA3G0YcMGIbpO0oRXdqnM7o6HDx9OrVq1Mql78OBB6ilcJw8aNIjatWtnUoYTIwFbCq9eQkh33P5vMLQZ02egVi2aBctHBgiAAAiAAAjEJAJL/1pJk6fNoFY/NpcxV9Xa1m/aTP/t2k3jx4yUgmm5MqVVUZg+N21xJGElSLOm/U6NG9aXbfl5zGW/qxBkncn10GHy8/OT+dN+n0jNmxhDLoRpEFS2SuBrEF4dt+2gAUOMz/yFHOwpV05+5k9Jy1eukmv7nPBqLvhyoy8pvNp6fKsXNBoW7jl7X7odblE5O/VqmF+boSUBtooQOs96+Mj4rpnTJKbx7YtRHuFy+HMpJOFV7ffy3mbujKb9uIsQcaXE/mevRp/mdFvEf+Wwc8eEYQiLqZzyZk5Os7uWkm6FlfBqbixz/Z4QXmd9El7VXjG3r1/a1I0vj2sv3BZ3rJ1XhpezlavhznOPEovJjmOqmYSt4zn0/t8JGV9204gqwpjO6Pa55nBnGZuVY7SGlKwJr0roZdfONYtlMulCuUTWG/KYVMAJCESQAITXCAK01lwJrOaCKouyHLtVCa7cB9dhq9bvc30SxVR7WLtao2ybsqeue8h9YJcIdWY/YzGlqVwjTH0o4ZWDom8YXsWkLd9ABy89QyqwuBJILQUEH7T0NB2/8oT2TqlJCePHDWbxevepPwk3utSoXFb5RpF+ICXaKuFVXxbScUg3ca7fYeZh6Zt/9ZCKQnRNKrtgO/pGY13ksXpLi0+U2LpuWGWx1tOU5Nt4tKRPOVkvNL9U3IHP1eWYuLnEwwtb1bKFb2WHDDK4vL7d2v2e9L9/r4kHlZJS1H36MpA4dmxREYSeH6r06YKIA9FzwQnNGllfhmMQAAEQ+FoI2Ep4VXFb2bJ1UvspwZavxFdrois3Uv20rvITta7cJlg/Yc2wJrzOmTOHkiRJSp07dzLpNigoiIoLq4XMmTML69CdJmX6k5EjR9L27dulhWj69MZ45lzO1hLlypUzifHavHlzun79OnGsVxZdObGlbdWqVeWxEmj5RAmfLKKyRe3atWuFuHqYkie37MGFrWJ5I2rgwIFSUD169EgwC1zu19X1IPXq1ZMai7iyY0aPljFpOZ/nNGrUKE14ZctfjjvLAvGqVX9TvHjxuJq0vmVL25MnT2jC671794RVcB36UcR+HSV46NOKFStp1qyZJrFf9eU4FnGSRFzhTHamb7SDCwiAAAiAAAiAQNgJjJ3wO/29Zq3Vhhy3U2+ZaF756rXrtHHzVqpV8wcqXbKESfHiZctpyvRZtOqvJVS+XBmTMj7h57Hde1yoZ98B4vkyCV04ddTi81iwhsgINYGvQXit26gZ8fdoz87tUnTlxfEzf6nyxmf+U0cPcFaIMV5tLXyG1eLV1uPLxX5FvwLevqdaI5wpz3fJaWnf8sFmrgTYdQc8ydcvSJaz1abXw9fSax4bkXwuhSS8sqUtW9yypSdbfIYlifBrNE94QTzs/piU17+Q9mzNhdd7wuCnlTD4aVFJiM06YdfS+LYSXpkxJ0sWrLNFTNutIqbt3G6lqFhuOzpz04f6/u8kDWxekBqVzWppWjLPmvDKHhm7zDkqheUhLRy0Pnifuss8owjsNKkGJUlk/LtXq4ADELABAQivNoBorQslnnIdc8tXFmBV0guuemHWXLRV9fFpewLnu7SiF+dOhKvjlEVLU5HF68PcVgmv3HBUG/H2TXHj2zdsddpLCHuXxBtGyh3CLXEz/3n6IRm4fVEv4cZQuPzldFEEVe8hgqvz203L+hkfDswtXtlVbuVBTrK+XhDlGzQLpew211bCq7qJbhtTXZujcnWsd4/Bk7n71E8IwgflullYHiDcTDQW4nBoE4ujV4V1q3m6etdXuuloUCYLlRNuN4qKGzbfRJ+/fksNx7jIYPPzupemBPGM7iFF3ATqJhjyA9PuyTUoccJ4MvB83VF7RDujRS5b5qr0h7A+ZpcjY9sVoepFMqpsfIIACIDAV0XAVsKrslTlxW8f859FBlzHXgiz1pJyM/w5gdZaH/oya8IrC5VsMbpu3TohMhbUmikrzhYtWgo3uSO0fPMDFkSnTJlCTZo0pXHjxmrF06dPF4LlKhPhlV0Pc9q/fx99+9H1nbKsTZ06NemFVxE7jKpVq0bff/89Xbt2jUqUKEEzZszQ+t+1y0m4RD5CPD8H8Sa9Sg0aNCRvby86I1zexY//6X6lyrds2SLmOY769u1Lv/76q8zWuy1WFq9c0Fe4CN4nxNy6detKS1Z2pbxx40YpNHP5pEmTqUGD+nIjqWzZctLiVi8qi3hVwkK3KfGnNdGY+4rNCcJrbL76WDsIgAAIgIAtCbDYdefuvWBdbtr6D+112Ucjhw0WFq85NBfCz549p+MnT8nzxIkTy3YiViuVLF+Z2E3/ur+XUcKECWX+y5evqHnrdnTTw0MKtyys7vhvFx08fJTatm5h4nqW3R3f8vKia25nLT6PBZsgMkJN4GsQXh2KG0X5E4f3a+6uVTgIu9R2FBOEV/77QcS6pSLC5XKGDEaB0Nv7Nt3w8KSqlSvKlzvfi/3Mg8IKPEuWzJQ7V85QX+PoUNHc3bClOe278IDG/H2elIvhkSvPilBrj0i/12qpHeeFJLx6CiOR9iL0moPw7sfGIGxQw4n3PHn/kQ1J2lXPRSwOz9p6idKlTCTCtOWhbz56XnJ1e0gjV5yj7g3yUesqOUL0UmguvLL4WHukUQhd3r+CeCnU+P/he7GHzCHtXom90hGtCokXSeLIsWsMc6YsaZPQ2qHWRWZlSbq4bznKL0K96ZMKFzehfVETIxde669if5r3bVWYO2Wss1IY0+Q0swTW92lNeH377oMMa3fniZ/JNTp86TEN++sMlS2QlqZ1NH3ZRt83jkEgIgQgvEaEXijb6sVXa9aresGVu4boGkrANqr26qo7nWnXUPQm7jxhSnGo+KrtlCzfpw3Q0DZXwmviRHGl+FmrRCb6Ttzo+AbA7iJqFMsoBNkimh975eKW36piq80XInbqnrP3ZNsl4obGsUs5mQuvnKfcK7D4WUMIvHFEHoudge/e21R4nbz+Iu06dU+Inamlm9/7Ii4qv7HEyVx45Tz2188CMye2huU6EU1K6FVve+n7U/NjFx71SmaW6+f5etx/RW2r56SudT/FZlBWsGnFQ039UpkpRZIEdPrGU/kmGT9scLzdpN8G3+DWj4djEAABEIiuBGwlvPL6lFVreK1VbW3tynOyJryePn1aEyDbtm1L2bJlE659PYQ7XeNLVOvXr6cCBQpwNxYTx0vluKZslVquXHkqWrSIiON6SViWHpD19YIqW5SyMFmiZEmqUrkysaUoC76c9PVkhvi1YMEC4hivnObNny/byBPxi4XVX375RbZj976ZRFxXdhfMIjJbs44X4qqlxBsiLJaym+Hmwg1f6lSpaPfu3XL+XF8vvLKrPF4bW+mqxO3atGlDS5cu0YRXLtMLyPXq1ZObjDyXu3fvCvfKI6llyxaqC3yaEYDwagYEpyAAAiAAAiBgYwLTZs2hRYuXkdMOR8qbJ7fWe5cefaQg261LJxrUv4+WP2jYKNri+A+xm9jmTRtTYOBbec7Crr7uyVNnqFW7DsRiWrufWlHm7zLRvgMHZRzYH5s1oamTxmt94sA2BL4G4VV9fzgm6w9Vq8iXAZQldkwRXleuXkvjJv4u/438s8n490y5KjXo4cOHNHfmNBHnuA7tF/8WOv7WQ1p/nz95RPO0Y5tvQuT2EpK7YTUqW73O+ecKbTt6W1pksuHIst3XyfuRH+0YX51Sij1Daykk4ZXbKG+EuYTLYva6x+Inj8NC5O+/FKMK9kahW7njrWCfTu65vnrzTngTvCH3ddcNq0SZ04iXQ0Ts0mkidunnXA3zuEdFzNghImas2itOnTQhsbjMe9ItK2ennjq3y/3/PEmnrvtIq1y2zK1V/DvuIliyJrye93hGvRYajZ54D5wtjJ+9fCv2ke/KtbaumoO6188n++S1XhOGNbsm1JDib7CBPmZYE165irsIYddt/nG5xqYVshEb37AxDe/Frx5cScawDalv5INARAhAeI0IvTC0ZdfC/KNPLKyypauyfNW7HrYm0Or7wLFtCTw9uJfcB3QRnYZWfI1D9jOFi+FKP4RrIr7+QVR/1F5iy0x+g2mOcKvAif/zr1Y4I/VpXECzGuV8jou6UgQG33P2AfHbOpyKiaDv7UQwd455qpJ6c8fch/3WI960+ZC31paF3TL50tGEtRdMLG5VPyF9srveqRuD38S5vl/AO2JxkwPNc+K19G9aUMZ4ffIiQAsILwvFr3+O3aaZmy9R+YLpaMqvxVV2hD5PXH1CA5ecpkE/2lNDwVaf+OFl4b9XZbxclc9zbF0lJ3Wo8emPMVW2U9z8mfl9nzcyi+uWyZdWuLqwp2QQXRUmfIIACHyFBGwpvOqtXsMqvirRlRGGZDEbHrx//PEHLV68OJhVq+rLzc2dJk40uvRVeRzbddSokZQnTx6VFeLn/fsPaNr0adI6lCuxiMoC5rx588jX11e6IeZ8dj88UoivbEXKiUXMESNG0OrVq2R8Vb3FK5dzbFe2GOXEAnGCBKZ/wO/d60JTp06RbbmOElN79+oVrC6Xq7Rv3z45P7ZE5cQuhWvWrEmjheth8/iwbHl748YNOnfuHCVNmlTWZTfIAwYMoMmTJ1P9+vVVt1L0Xb16DZ0/f07m5c2bl5o1a0atW7fW6uAgOAEIr8GZIAcEQAAEQAAEbElg+qy59L/FS2n3v/9Qnty5tK7nLVhEc+YvMInZyoXsMvh34VL4rxV/a3XZwrXzrx2od4/ftDw+cHLeS+MnT5WCE59zvdYtmtPAfr2tPo9xXaSwE7h16xYtWbKEcuYMnwWlp6cnVa9eXf6ENLrnLS/6oU4D6tmtK/Xv01Or9ufS5TR1xizatnk9Odh/8pTTtGVbuiGe25UL61evXtOgYSPJWTyrc+LvxPjRI+ivlavF9+SRZvE6fNQ44vjDZ44folQiBmxI43IfHOO1rIhNvGbFUj4NU5ozfyHNW/C/YN//iIx/6MhRat+xK7Vp1YImjh0l5/Nbz75yzY4b10oL8Os3blKzVm2lOBueeYdpkTaurNwNZ0ydWFqOFs6ZmrKnT0rKzfB611v0Qgih5qm92EvsVPvzfz/y3isbwCiLTn0/vN+73PmmNJpR+70clq6bECH14c94H3mu4yW5L6zaswdE9iBYIGtKmRXSnq2yeO1cJy/9/MOn/xPZUGfTIS/NKIb3PdtUzUltq+WiuMLaVaUzN3xoktjv5b3dQjlT0YIewV2vc92lTtelF0K9gZDqgz85DNzMLZfITXhwVInX2lqM2fijS2G2VK0+ZLcUedkFs7WkCa+N8lHLSjksVuX1zROiuUq8RjZ0qiD2opFAILIIxGjhNaNdckqfOllksQtXv5YEWH1HEFz1NKLmmC1fPWZN/KzbYXYvnKv/yHBZuoa0MhYFn70KJLtkCa2+zcPt2VqW3Q0rFxQh9Wkpn8dgd7rKXbGlOhHNCwx6TxwDILVYi/5Gbd6vch0RnlgG5n2F5ZzdOXs/ei3m9g1lS59Ec9ERUh/sYoPXkzHVt5+9NiH1gXwQAAEQiE4EbCm88rrCI77qRdewCra2Yskus3gzhN1lKbdyYemb23Ns1DRp0ggPFZ/+MDXvg+uxIGtnZ2f1zW9ljcuWuIMHDzbvRjtnQZctb3ncsKSnT5/KdSZLZvkZecOGDeI+940Uf+PGNbq54v579eotLXq5PH/+/MGGZEtZFmxD6jdYg1ieAeE1ln8BsHwQAAEQAIEoJfDu3Tstlr35RIKCgoRrYU+KGy8u5RZCHz8XhZRYbAsIDKC0YXweC6k/5IdMgK1eWYANT2LRNYdwN/0lUkBAIL0Qz/xp01h/5g/LXPg7yaJnaNJ3mTLR93k/LwKGpi9LdSz92zHP45cY9H9HWOonuuY1n3SAHgnPfSpVE+HFzopYo0pwZSvQZhWyE4uYng9eUn4hdrJxhi0TjxU3bhyrxh4cUs5H7O1yWLTw7Atbmi8b0vBebiph9Wrlz1oKEqIo/90bT8wxIundewM9fP5GWqKax1hVVqq9G+enHytmj8gwWlve170rjJjYdXK2dEkjdU9cGxQHsZpAtBBeM6dNSWlSJrHZhfDx9aM7j1+IWJh2lCxJIpv1a+uOlKUr96uP8WrrcdBf+Ag8dd1DT1x2ku/50xTw4J7sJFHG7yhFkRKUtnpdSlO5Rvg6RitJ4JG4uV6+7Uuj/z4nYwSsHlwRgia+GyAAAiDwBQnYWnjlqeuFVD5nMdUhm4NJfFcWaNe5riE38amSreK6qv6+xs/nL17Qjes3pEWpp6cHOTk5USaxcfKl0/DhI+jff3fQDz/UoNq1awnrjw906NAhmZc+fXrpotjaBuSXnu/XOh6E16/1ymHeIAACIAACIAACX5IAC662TCzCfm3p6VMfKlWhSqim3b5tGxozclio6qKSKQHlariicKNbSMRbXbzzOn0rDF7YCEMJrhlTf2vaCGeRQmDNPk/pfnlpv3L0fWZjWL1IGQidgkAkEohy4fXizfvSGi5X5jTiTYP4EV5q4Nt3dPPuU+EL/QPZ58r4WSu2CA+IDkAABMJFQMV2ZfcOs7qUooLZjC4xwtUZGoEACIAACISZQGQIr2oS5gKsyjf/dMjuQK0r/2QizJrXiS3n+tiuHBf2xx9/jJKlP3/+XMRxnSRdCOsnwG6YZ86cQWnT2vaNbv0YsekYwmtsutpYKwiAAAiAAAiAQHgJDB8+/LNNlZgaGpGWw2Z8bYktSo+dOBmqaWfKmJFy5fwy1r2hmtBXVIld/f61+waN+7mIDP/Wbvoh8nr4mjaOqEIQXL/shRy09DQdv/KEDkyvbdWL4pedFUYDgbARiHLh9aVfAHnd95ERNVl4jRcvZBcen1sav5H/JjCI2NA9u7B2TR6NrV0/txaUg0BMJ3Dl9gsRs1b8WxXxEsxdSsT0tWN9IAACIBAdCESm8MrrY8tWN2838XnRxLqVxVb77IWCWcJGByZROYfbt2/T48ePKVu2bNFC3PTx8SFvMSdOOYVrtpQiBhWS7QhAeLUdS/QEAiAAAiAAAiAQcwkoF8McpzUiiePDsrthJdJGpC+0jZkETl1/Sv3/PEVpUyai7+wS03mPZ1SzeCYRC7RwzFxwNF7VYxFHluPeZhDh3pBA4GslEOXCK4N77R9IPi/95CfHXQxvih/3G0qaOCHZJU8iP8PbD9qBAAiAAAiAAAiAQEwnENnCa0znh/WBQEQIQHiNCD20BQEQAAEQAAEQAAEQAAHbE1izX7i4/fea7LhyofTUo0F+WLvaHjN6BIFYQSBaCK+xgjQWCQIgAAIgAAIgAALRiACE12h0MTCVWEcAwmusu+RYMAiAAAiAAAiAAAiAwFdCgOO6Jk8c8ZCIX8lyMU0QAIFIIADhNRKgoksQAAEQAAEQAAEQiO4EILxG9yuE+cVkAhBeY/LVxdpAAARAAARAAARAAARAAARAAARiMwEIr7H56mPtIAACIAACIAACsZYAhNdYe+mx8GhAAMJrNLgImAIIgAAIgAAIgAAIgAAIgAAIgAAIRAIBCK+RABVdggAIgAAIgAAIgEB0JwDhNbpfIcwvJhOA8BqTry7WBgIgAAIgAAIgAAIgAAIgAAIgEJsJQHiNzVcfawcBEAABEAABEIi1BCC8xtpLj4VHAwIQXqPBRcAUQAAEQAAEQAAEQAAEQAAEQAAEQCASCEB4jQSo6BIEQAAEQAAEQAAEojsBCK/R/QphfjGZAITXmHx1sTYQAAEQAAEQAAEQAAEQAAEQAIHYTADCa2y++lg7CIAACIAACIBArCUA4TXWXnosPBoQgPAaDS4CpgACIAACIAACIAACIAACIAACIAACkUAAwmskQEWXIAACIAACIAACIBDdCUB4je5XCPOLyQQgvMbkq4u1gQAIgAAIgAAIgAAIgAAIgAAIxGYCEF5j89XH2kEABEAABEAABGItAQivsfbSY+HRgACE12hwETAFEAABEAABEAABEAABEAABEAABEIgEAhBeIwEqugQBEAABEAABEACB6E4Awmt0v0KYX0wmAOE1Jl9drA0EQAAEQAAEQAAEQAAEQAAEQCA2E4DwGpuvPtYOAiAAAiAAAiAQawlAeI21lx4LjwYEILxGg4uAKYAACIAACIAACIAACIAACIAACIBAJBCA8BoJUNElCIAACIAACIAACER3AhBeo/sVwvxiMoHAt4FklzRRTF4i1gYCIAACIAACIAACIAACIAACIAACsZKAzYVX7hAJBEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABGIbgUx2KbQlxzGIpJ3hAARAAARAAARAAARAIEYSgMVrjLysWNRXQgAWr1/JhcI0QQAEQAAEQAAEQAAEQAAEQAAEQCCMBGxu8RrG8VEdBEAABEAABEAABEAgCghAeI0C6BgSBD4SQIxXfBVAAARAAARAAARAAARAAARAAARAIGYSgPAaM68rVgUCIAACIAACIAACVglAeLWKB4UgEKkEILxGKl50DgIgAAIgAAIgAAIgAAIgAAIgAAJRRgDCa5Shx8AgAAIgAAIgAAIgEHUEILxGHXuMDAIQXvEdAAEQAAEQAAEQAAEQAAEQAAEQAIGYSQDCa8y8rlgVCIAACIAACIAACFglAOHVKh4UgkCkEoDwGql40TkIgAAIgAAIgAAIgAAIgAAIgAAIRBkBCK9Rhh4DgwAIgAAIgAAIgEDUEYDwGnXsMTIIQHjFdwAEQAAEQAAEQAAEQAAEQAAEQAAEYiYBCK8x87piVSAAAiAAAiAAAiBglQCEV6t4UAgCkUoAwmuk4kXnIAACIAACIAACIAACIAACIAACIBBlBCC8Rhl6DAwCIAACIAACIAACUUcAwmvUscfIIADhFd8BEAABEAABEAABEAABEAABEAABEIiZBCC8xszrilWBAAiAAAiAAAiAgFUCEF6t4kEhCEQqAQivkYoXnYMACIAACIAACIAACIAACIAACIBAlBGA8Bpl6DEwCIAACIAACIAACEQdAQivUcceI4MAhFd8B0AABEAABEAABEAABEAABEAABEAgZhKA8BozrytWBQIgAAIgAAIgAAJWCUB4tYoHhSAQqQQgvEYqXnQOAiAAAiAAAiAAAiAAAiAAAiAAAlFGIFoIr6/9A8nnpR/xZ9D7D+GGET/uN5Q0cUKyS55Efoa7IzQEARAAARAAARAAgRhOAMJrDL/AWF60JgDhNVpfHkwOBEAABEAABEAABEAABEAABEAABMJNIMqF15d+AeR134cMYglJEiWgeEI8DW969+ED+b15S3FEB9kz2VHyJInC2xXagQAIgAAIgAAIgECMJgDhNUZfXiwumhOA8BrNLxCmBwIgAAIgAAIgAAIgAAIgAAIgAALhJBDlwuvFm/el2JorcxpKGD9eOJfxqdnboPd08+4TeicsZ+1zZaRv4rAMiwQCIAACIAACIAACIKAnENnCq7uXG7l5u5G710VyE8cqOWR3IPvshcghG386qGx8gkCsIgDhNVZdbiwWBEAABEAABEAABEAABEAABEAgFhGIcuH1/I17lCVdSrJLkcRm2J+99Kfbj55TLmH1mgxWrzbjGts6uuR8kC7+50K3Tl2g53cfyOWnypyRcpQsTIXqVaeCNSvFNiRYLwiAAAiAQAwiEFnCKwuu61zXmIitIWFrXeUnal25TUjFyAeBGEsAwmuMvbRYGAiAAAiAAAiAAAiAQCwh4Ov3llIkSRBLVotlggAIhIVAtBBe82ZJS4mFm2FbpTeBQXTt9mPKaJec0qdOZqtubd7PNQ9vrc/vc2XTjnEQtQTuuV2l7ePnkOeJc1YnkrN0UWo4ui9955DPaj0UggAIgAAIgEB0JBAZwiuLrsNXDtWWy9atrSv/ZGLZqixh1x1Yo9WDAKuhwEEsIQDhNZZcaCwTBEAABEAABEAABEAgxhH45+htmr/9Cr0N+kB5MyenoS0dKE+m5DFunVgQCIBA+AnEaOE1gxBdMwjxNTqlbbtdiX9CSo1qVSb+QYoaApf2HKQVHQeFenB2ZN1+2XQqWAPWr+bQrt97SY5HvOnHStkpZ4aoeQHi+NUndOX2C/qxYnZK+m188ymG69z78Wvad/4BVbRPT7mj+UPV23cf6NbDVyJ+djzKlDoxffMNXK+H66KjEQjEUAK2Fl7Xua4lJaaGVkjVt5ncfoqJQPulsAcGBtKDBw8oY8aMlDBhwggPu2HDRnr7NpDatWsX4b7QQcwlAOE15l5brAwEQAAEQAAEQAAEQiKw08mZjp88RQP79qbkySN3r8zL25v2uhygu/fuU6OG9aho4UL0JccPicHXnn/97kvqOPuIsHSNTznEfud5j2dSfF3Wr/zXvrQvOv/3Hwz04Jk/pU6WkBIntB5+ct2BW5Ts23hUv3QWkzk+fhFAPi8DKEvaJDbb9zUZACcgEAECEF4jAC8sTc0F13y5s5s0v3rTy+QcAqwJji9ywpauc+u1J0M4Ruv738pYa/n66k0Q3X7sRxlTfytvlgrfbMfLtPWwN7WpmpO61f9eZX/RT377bKOrF20cUVnML7FNxj7s/oiGLT9Lo9oUpprFM9mkT+5k77n7NG71BdnfoB/tqWEZ04cJNdCL129ptuMlIf4+pPIF09GUX4urIu3TL+AdTV5/kQ66PdLyUiVNQKN/Kkwl8qbR8nAAAiAQuwnYWnhtOK6eBBpa0VXRjwzxde/evTRy5Cjy9/ejdevWUcGCBdVw2ue9e/doxIiRdPbsGS2vSJGiNGnSRMqSxfL/wVpFKwfNmzenp0+f0oEDB6zUQlFsJwDhNbZ/A7B+EAABEACByCQwYvR4Wrdxkxxi97//UJ7cucI03Nu3b6l+kxZ008ODkiRJQm5njoepPSqDQEgEqtduQLe8vGjZogVUtUrkGXGw2FqnYVPy8/Mju9R2NG70cKpbuyZ9qfFDWn90y3/iG0CXvF9QupTfUoGsKUKcnqcwanD3ekE37vnSqes+dO+pP7WtnpO61v2eei08IcXXf8dXj7Fuhz8IkfTKHV8pNmdOE3K4yAMXH9LvGy6Sf8B7WtK3HOXLEpzps1eBNG2TOx259FjjnT1DUurbuAAVz2On5amD52IftOEYF2pULisNbGb8u57HmbftCj0RwqtK9Uplpl6N8kvjE5WHTxCISgIQXr8Afb3oyoIri6ohuRbW14X4+gUujm6I//3422fdC+uqmxyy2+FumxaZ5MWWkyOXH9PQZWeoX9MC1LT8J5fZD5+9IRdhGVqjWCbxAJMoSnB8LcIrP0S0+v2AfDBhUAOaF6TGZbMGY8ai74R1F7R6ZQukpWkdS5jUexP4nvr9eVI+OHJ5ZYcMxA+S6w54ynazu5aE+GpCDCcgEHsJ2FJ4VeJpWEVXRV+1Z9fEk4Tla3jTixcvaMqUKbRz506ti7Vr15K9vb12zgdPnjyhBg0aSmG2bdu2lCtXLrp+/boUaRMnTkLOzrvFG+jh85oC4dUENU5CIADhNQQwyAYBEAABEACBCBI4evwEte3QSevFaYcj5c2TWzsPzcHcP/5Hc/9YKKtCeA0Nscivc+vWLXJxcQnzQDly5JBtqlevHua2kdHgzNlzdMHNndq0bEGJEkXc205Ic9y0xZGGjBhN/fv0ou5dOwkPaN/Iql9q/JDmFZ3ync/epwlrjAYQPK9WVXJQjwb56N37D1Jkdfd+Tp4PXtPJa0/I1y8o2NSzpktCTcpnpRXON+nN2/fkMqVWsDoxJeOlfxDVG7WXKjmkp0kdigVblq8onyuMRPacfaCVLRbCa34z4dU/8B21muxKvA9aq0QmKpwjNT0We5YbD96Se5YzOpeg0vnSan3wgTKAGdO2MP1QNBPtv/CQRv99ToStjEutKueg9Km+lYYnLOQ65EhFC3qUoThw+GfCECdRQwDCayRz1wupQ3q0NxFcQ4rxyvlTF6yUM4P4GskX6GP3l5yFi+FOoXcxbGlWHZYKl8M1I+9tNUtjRoe8kITX6DC3r0V4Hb/mvHw44bflVrt4WhReN7jeoj+2XyW2XGVhduSKc2RJeHV1eyjL+AFmeKtC9M3Hpw3PB6+o/YzDxG+RrRhQgeLC7XB0+IpiDiAQpQRsJbzq47puH/NfuNekLGbD63L49evXVL9+fXr27Bk1a9aM4sWLTxs2rCdLwuuqVato+vTpNHXqNKpTp7Y254ULF9KiRYto5syZVKNGDS0/LAcQXsNCK/bWhfAae689Vg4CIAACIBB5BN4EBFDtBk3I3+8N1aheldZv2kxhFV6v37gp+6hUoTy98PUlD89bsHiNvEsWqp5ZcA2P6KrvvHPnzqREWH1+TD2ePX8BzV+wiHZs3UQFC+SLqcuM0LpYSOQQXS1EiLTTwor1sghVZp89JbkLC1i9O8SkwsVtKSEG5hT7afbZUsk6C/+9Krz83dbGHyAsMRsLi8yYmqwJr+x1r/XvRjG1gfDeFy9uHBF67jZZEl7ZUnXUynP0U7Wc9Fu97zVcbE3bZc5RqlYkA41rV1TL54MFO67SeuFqeNOIKpRBeFtsM+Ug3XniR8sHlNdCwBmE+8ppm9zo3xN3afzPRalq4QwmfeAEBKKCAITXSKSuRFdzK1cWVrlM717YvA5Pa9rCv2UdiK+ReJE+dr2uzxg66+gUoYGKNalNreeOC1Mf7Mue3dayW9nXb96R0+l78uZxaGYd2c/Tl4HEAdsPX3ok/d5zXNGaxb6jUt8HdxfLNy/+OXblMaVMkkC+5dOxVp5gLna5zv4LD4jjn2ZNl5TK5U8bLAZqn0UnKW2KhOLhIwctEg8T7EaDg8Xz+C1F3rcJ48r5sWB43vO5dO3AgmDq5AmpiXjQaCSsNXkef+68Tn2Eq4iiuVLTUqcbch3jxQ2U3wpT6YO4O7JbjoTxvqFZXUuFed2qH0ufloRXfiD4a/cNOnn9KXk9fC3XVa5AOupQI7cmRrJ4uVy8sda9fj7pAvj4lSdUIFtK6dZXvWmldzXM7pbHi7fk2F3GsJYO2o3f0pzM805ee0oDFp+iphWyUXkxDz62ZPE6X7jQ8PV/K3nGE28q1hzubFF4nSNcPG8RLp4tPeCMXHmWXC8+osV9ylL+rCnNp4JzEACBWEbAVsKrslYNi7Urt3HI5mAS03XEyqHk5uVGYelHf8lYcG3atCmNHz+eKlWqRPPmzaelS5dYFF737dtH3iLmEVu7xo//KQb4rl1ONGTIYOmCuKV4C91aevXqFa1atZr27XOR1rIVKlSgvn370vDhw4O5Gvb2vk3z58+jCxcu0KtXr6UFbpMmjalevXpyiP3799OCBQuoQ4cOUjzWjzt33jw6dPAgzZkzhzJnzkxs1Tt37lw6ceIE3b17l9g9cosWPwZrp++Djz98+EDr16+nHTv+pUuX3ClbtuyibWHq378/pUqVyqT64cOHadu2bXTo0GFKnTqV4NpMzLkgzZgxg3r37i35qgZu4o19rnv06BFhQexPFStWFGJ2HSpXrpyqgk8LBCC8WoCCLBAAARAAARCIIIEZs+fRwj+X0B9zZtLlK1flcViEV35eatm2A7FVoOteJ+rVb6BF4fW5eB6bPnMuHTl+nO7cuUvFixWln1q1oMYN60dwBWhuiYASXidPnmyp+LN5/HzOFq/WrF7v3L1HXXv0ptbiufptUBCtXb+RHgsvOVUrV6KxI4fRUx8f+n36LHI9eEiEJcks3PbWknFa48Y1WpLyJDim6ozZ8+nseX7mf0WFHOypRbMm1KiB8Zmf68wTYqiT8x5av2qFjPGqHzeOeEF+89Z/6KJ4vi5YoAD17NaFatUIm6Xus2fPqe0vnYj7ZTfDPNckiRPT6BFDqUypkpE+Pq/xa0gv/N5Sg9EuVETsV87vXpocxd7rrC2X5NTZajJH+qSU57vkVFAIsXkyWfaE1G3+MWkZa8lK0xKDP3deE3ulT2hoCwf6y/kGHbv8hHo2yif3Wbl+aPZruR5bfDoe9aZzN59RJrtvqXDO1FLE5FipnNSeJu/H8n4y72Pyeu2zp5L7nLkymsYWvipEz52n7tIB0S/v9/IeKXvhyyYYcNp40EvuTbPYySlXpmTSknWIWAcntl5tP/0QDWvlQGXzp6PFu67Tqr0eFvcltx7xptlbL9PEDkWlhz7ZgfgVJKyMqw3eLfeyF/Yso7LlZ4eZh4mF362jqop4roHUeNw+i/uhHsLgpIMwOLFkpGLSIU5A4AsRgPAaiaB/7T9e9v7XrNEmoyhBlTNZcFUCLB8P7v6zVldv+WpuLatVwoFNCEwu15ie3/3kDiE8nabKnJGGH/0nTE29H72mttMOSStGvlGxuJk2RSIp8AUINxU9FhwnDtqeT8QZSCNEzcPuRv/3c7uVomK5P/m9dz4j3GOsNbrHqGCfToi0b8jj/ivpdmFJn/Ka0Oly7gGNXX1e5pcUsT69RWxWFh9L5UsjXdYqK8haI5yFEBqXAt+9F77x45ODeNDgGzrPkd8+Gtu2qHTbsEwImGdv+tBFIb7y3HOIt78q2WeQrifUnH7/pRhVEIItxxsdseIsda2Xl9pWy6Vx4lgKv807Rs0rZpOiYljWrXUSwoG58MoxCdqLGzavmZlmEm9KsQDNsQc4FsBQIZpy2nH8jow3wGIyr7lo7tRUSDx4daqdV3NxoYRXFnL7Cte+V2/70oT2RalKodC/VRUY9J5+Fg8n/AC0eWRVuiL6CEl4ZVFXPUSxO+GQhFdlPbtmSCXtuis8FQfskod4+0sRwScIxG4CthJelWAaWktVJdQyfb2FrLKcDa+74ffv38sNjpQpjS+WWBNeLV15juXVu3cfKSA6OjpK98OW6nEeb8r16dOXXF0PCGEyNZUsWZJOnTpFAQGBsgm7DVMxXu/ff0C1xcYMp7Jly1KCBAllOz6fNGmycHlcXwq11apVoxKin7+WLeMimYLEpk/FipWk+Pnff//Ru3fvqG7duvTo0SOqVauWHJtFZD7nmLYswIaU2AUzW/+mT5+eihcvLsTXy0J89pJiLgunSoA+LjYQu3TpIrvh+XI6duyYFGq5vpoz57N7Zrbw5cRzT5RQPKsI0ZYTWw5DfJUoLP6C8GoRCzJBAARAAARAINwEbnp4Us16jahypYq0fPFCIYAZRdiwCK+bhOg1ZPgoGjKwP3Xt9As1/rF1MOGVn88q16hLDx8+pHp1asn4mc4u++T5hLGjpAAb7kWgoUUCX0J4Vd+fDBkyyL8pqlSqQNeu35RxfkuKZ+fLV69SRlFWIH8+OnL0OPk885FufFkc5XTv/n2qWM34zF+hXFlKKJ6LXfYfkGUzp06mJo0ayOOhI8fQxs1b6cyxg/LlR/24/J2qXrUKed++I8flBls2rKGihQvJtqH55fvyJY2fNIXOX3CTsWRZOE6ZMgX98nNbsi9YgCJ7/NDMMbrUaTNVWE6KfVG2juS9yccf44W2EdaYLDxmFHuG1lKzifvppXBBvOf3mtaqaWVqv473Gnm/tZBws1u3ZGY5fmj3a3cJgXTyejcpuLJxzG0hhrKAy31uFsJkAmHUovY004qwb++ERS+Ly+c9jHu67J5366hqWhxU3pfuMu+o3BflveEgUZ/3f7m/v4S3PN6L5rmxmMuCL+dzvaxpk9LPPxj3dt+LvdbXYm80RWLjC83WhNdHz99Q84kHqGReO5r8S3FKlCCu5MMGJGxI0rtxfmkcpKCxoUvdkXupTsnvpFc/LzHfdmIfvWbxTMT7svqk+mYxesPwKvoiHINAlBAwF17J8IXTuet3DX5vAm06qn/AWwP3++Cpr037DUtn/zgdMPzSb5yBP/VJiKwyn8v0ic/5h8v1SfUjXA/rs3FsYwIDs5Qy2OInrNPyevjKUKH/Tvlz0fOZSfM5jpdk/npXTy3/zpPXhprDd8sfEXNA5gurWVmP8x+/eKPV3Xnyjsyfvtld5nEZj9Vi0n7D81ef/s3N3mocZ+uRT9897ovrjv77rEGIlTKJm6+h+x/HZP6+8w+0ccTbUzJvy+FP7blw9+l7Mv+Q20NZV4iMct7tZxzS2vLBwh1XZb1L3s9lfmjXbdJJCCfztl2Wfd/38ZM1btzzNYg30gxLdl3XWvC6GozeK+vxMaftx27Lc+EqwyDio2p1+YDXw2x4ff4B72R/fK7WaVL5Myd/772p9cVVT1x9Is/Fm2tWW/K4POagpaeC1ePryGXCNbFJ2ZtAYxsuW+3iYVKGExAAgdhJ4FXgB4MtfqoOr2Pgn9D0tdh5tazL9Y9duxCsTVj6+tx4k6bPNeT43t5w7MzFYOOotp53HxmmzVloGDVhiqFKzfqGgkVLG9Zu3h5ifdVuzeZtsu+W7ToaXvgHyfq+b94Zfv2tl8wvXraS1sfqjY6GJi3bGf7ZuUfLO3/5hqzXruNvWt7PnbvLvFv3Hmt5e1yPyrzZC5bIPJdDx+X5xGmztTq3H/oY2nTobOg/dLSWp+apPn1eBRh+bPuLoWuv/uK+5q/V69ZnkOzvyKnzMo/XwAyYm8rjPk5duCzzOF/Pp2L12jJ/3+ETWp8Xr3podR+/8NPy1Vzwafx3d+/pi9j5Hw9WDQIgAAIgAAKRRKDdL53lM4iXl/Hv6emzjM+C167fCNWIwq2wwb5YaUO1WvUN4oU82aZR81YyT9/BkWPG57GpM2dr2T4+zww8voipqeXhwHYE9u7daxg2bFi4O+S23Ie1dOOm8RmWvwPCvbSsyt+DJi1+kt+rrj36GN69ey/zhTWpzOMylYRob2jeup3ByfnTOMJttazXoXM3VU1+R/iZWnjrkXlqXM5zc7+k1Vv610rZdtS4iVpeWA5mzf1Dtr92w/T7z9/RLzF+WOYaVXXPefgYmozfJ/fQeK/sR7FfWv/j/iCfC3fChvs+/hanF/DWuMdmvsdpsfLHzHGrz8mxRq08a3j78bvERWHZr+U9TZ7bS3/j/1HcnvdjOd/dy7ivqt/T9PUz1uM95OHLz8i2+j1d3vfk/txufdqTFl4StXly/5y4H67HfXwuCcteWVe4brZYde+5+3J/mPdihyw7bWCG3DfvB/PepT4Jb4qyjPe4Ob0Xm9S8Z81t/QKC9FUN/4i9VO6Hf9SeuUkFnIDAFyZg/jc/feHxpUAaE4VXJaSa81TCq7mQyueWhFduH1Jf5n3jPPwEbCG6ch9hTUp45ZuueeKbHz8AmKdF/xmFSq9Hr2SRuiFuE2KheeIb1LmbPjL7wEXjjZMFQ316/SZI3pT4AUAlJbwKS0yVJT9ZuOQbGAuaKoVWeOX6M7e4y/bCPYVszqIur5HFYCXwhnbd/JDCoqj5j37O5sKrmrP5pxKf1bzUQ4pejFZtlPDK4mivhcfleo5efqSK5Sff/M3nxecsmKqkBHN+OFJrt4Xwevepn5wTXyfHI94GHocfvnr/74SWv2TXNTUNfIIACMRiArYSwEISS1lk7b14sCa8fU505fmE1Fd45hoa4fX4WTe58cCbD/zTsXsfKTJ+brzBo8bL+iwy6uuykMv96IVXfbn+mIVefb0tO5xk2xXrNmt9qnGueNyWeS6HTsg6P/3S1cCCq76/8Bw7/ucs+2NxmNu7XfOU54NGjA3W94Sps2SZEl7VWnsNGBasLovZzEEv3oZnfjG5jfkfYRH5r4g3BR8+ehzs59lz4+ZLRPpGWxAAARAAARD4Ggjs2m18ppk17w9tumEVXsdOmCyfXw4fOab1YUl4PXrc+DzWvmNXAwuuSJFP4EsKr30GDDFZ0Nw//ie/F/tdTY0IWKBnkfZzieuVKFtZqxaS8Nnpt55aHT4QVq9yXP6ehSeFVXi19fjhmXNUtbl2x9dQaeBOQ8fZhw1BYq+RjRU+J8B63H8p99hGrPi8EKnWpYTXm6KtPoVlv1YZxBwURiEsQlpKak9Tb8jD9ZxO35VzXrPPaIzh8zJAnk/ZcDFYN53mHJECpyqwpfDK81AGMEooVXvN5mKqEnGFZa6aimHWRwMi3pPl/U7e99SLrtyXuYCrNcYBCHxBAuZ/88fhsb+k7e35G/cob5a0wvVpApsN+yYwiK7dfiwCLiejDHaW/bDbbLAQOlLuhM3js+rdB+tdECu3xOYuhVWcWPN+QhgW2eEkENWuhtsJ9wxd6uTVZu8f+I5qDd8jz9klrj6xS1tOY9sVoepFMoo4qtdotYvnZ+N2LnW6Tiv3eMi2lvpkdxHbx1WX5exqmF0Ms/98fVI+9wvlTEULehh97h+5/JiGLjtD/ZoWoKbls2nVzV0Nc4G4IVK3+cepR8N81KpyDuI4Ap1F8PQudfNSu+q5KCzrZhfBg5ac1sZTBwVFLNZFvY2uEc1dDXMddh3ifPaedCPy/NVbGSdAxSlYN6wSZU6TRHPLMbNLyWDxdFWMVzWeJRcW6pqoOuqzfY1c0l0xn49ZdY72nX9Iy/qXp7wibgQnFe/VUoxXWeHjL2uuhrnKBc9n1HPBCX0T6V66dZWctMzphnDP4SDcdGQ2KccJCIBA7CNgK1fDDccZYxbp3QYzTeWCmF0H22cvROsOrJGQrbkkDqmv8Fyd0Lga5sdedjH8UrjkOnr0KC1fvoI8PT1o6tRpIk5p7RCHbdmyJV25coXOnz9P34i42/pUpkxZ0rsa5jJ3d3cSG0Z09eo18vX1FT8vZHxWLrt48SJ/CDfFAVSqVCnpjvjPP/8kdp1cvnwFyps3L/3990pZJzAwkLp2/Y3Onj0jz4sVKy7qlBfuh+vQd999J/NC+sX979y5S8aZ5fi2HCuW18qJ4+I2btyY9uzZQwMGDBAxbkcQr1GfnJycaPDgwZqr4SNHjlC3bt1k/fbt2+urEruD69ev32fdH5s0imUntnQ1fODgYfq1S7dgBIsULkxbN6wOlo8MEAABEAABEIhJBN6IZ5zqtRtQ0NsgcnXZRYm/NboHDYurYWEVS3UaNqWaP1SnRX/M0fBYcjXMz2M//9qVTp0xPo+xG9rKlcpTg3p1KUtm689jWsc4CBMBfrbkn8iM8apc/nbv2pkG9uutzW/Rkr9o2szZ5LhxLRUuZAxPxYWWvhscm3WX817xd8JVEhbUxLGAOQYwJ8+rbvIzJFe/XTt3pCED+so66lfOfA5UWsRlXff3Xyor1J+z5y2g+QtFPNl/HSlv7txauy81vjbgV3Iwad1Fcjp9j+b8VoqK57EjYTFJG1y9aL3rLXohQpBx0rsgVuHUfhJuiX+r932oVqlcDTtPriljqapGYdmvVXuw3Jb3ccvkT0uVRcizUt+nofgf4w0rV8PTO5egMvnSqmFkKLuOs49Qpzp5qP0PubU9SK5gaZ+Y87eOripD4nGM1Xqj9srQcpM6FOOiEJM1V8PKpXJ2EapuYLOClFvEz33qG0AbDnrJvdhKDulJ3z+Hprv16BU5TawpQ97xoEIbp1F/n9XC8amJcGi8pyIGLLuPVvvbqgyfIBAVBMxdDUN4tdFV0Aus5mKqEmV5KGsxXrlcCbJ6kZbzkWxLYF2fMXTW0SlCnRZrUptazx0Xpj5UjFdz4ZVv6g3GuMi+6pc2Fcg4LmvaFAmpSbls0k//H9uvyIcBvYBnaRJKhOSbaW6zQOruIpZBKRHztVej/LIpC692yRLR2qGVTLoSL1NR5UFO8oa8pE85WaZu+qERXvm1Do6BYCdiBHB7dTPeOKKyiJ2QWD7MhHbdwrKTdovYtuYpnYiR26BMFpmt1qz650Dr4u0wGbuABVMWaZMlTkBbRSwBTuERXrmdEsH5mBMLqG5CZDZPhUWc2BKC85XbL6jL3GOSY1ed4H5J5C/ddYOalM9K5UUwe473wIHtzdPnhFeuz9+h80KAvX7vJX1nl1gEuE9LRy49lrFr/9erjBBBUpl3i3MQAIFYRsBWwqsSWC0JqqpMobVUR5VFNMar6kd9hkZ4VXXVJ8cn7d69OxUpUlQTO1WZ/rNx4yZStLxw4YL4AzCOvohq1KhBHPvrwIEDMn/Hjn+FkDlcHhcsaE85cuSQcZZWrzYKYkp45QpjxowlR8etIgasK928eZM6duxIY8eOo6ZNm8j2/Is3+7jc2dlZxFM9Qv7+frJswoQJ1KhRI62e/sDf31/01UnEdXWnxImTUJmyZcjOzo4Oin44PqwSXjlOLY/Zo0cPIfB21XdBGzZsEKLrJE143b9/v4hz24eGDx9OrVq1Mql78OBB6tmzJw0aNIjatWtnUoYTIwFbCq9eQkh33P5vMLQZ02egVi2aBctHBgiAAAiAAAjEJALCJStNnjaDWv3YXMZcVWtbv2kz/bdrN40fM5Jy5shO5cqUVkXBPjt27UH7XQ/S+NEjKEf27Fr58DHjpHC2fMkiSiOenQoWyCfL+HnMZb8r7XRyJtdDh8nPz/g8Nu33idS8ieXnMa1THISZwNcgvDpu20EDhhif+Qs52FOunPzMn5KWr1wl1/s5Mv15OgAAQABJREFU4dVc8OVGX1J4tfX4Yb7IUdxgz9n7NH7NBWpROTv1amjcG+UpWRJgqwih86yHj4zvmjlNYhrfvhjlyZTssysISXhVe5eh2a/lQW6LfWFnMV/h5VCKqZyXN3Nymt21FCUXcVaV8GpuTML7gx1nfRJeD7k/ouHLz3JzMt975n1ie7Fn2rF2Xhnn1VbCa+e5R4kNihzHVJP9ysE//hKe+mR82U0jqghjum9J7X1WL5qRxrYtoq9KvD994/5LOivi0f6fvbMAj+rowvDBJUCB4MWDE5zS4lasuHuhhZafUtyKu0vxogWKQ4Eixd3dAhRCEoIET9BAQgL550yY5e5md9kku8my+83zJPfeuaPvLOzNfPecw2tUIGtKKZjzfnZBcT5VsEACgdgmAOHVhiugBFYWV/v98r2uJxZl2ZJVuB3W5XEZtmrN6/bRYlDVh7WrDpPNTq7sOkRLOvaNVvvtF06igtX1hcpPNWhKeOV69YTw+oV4e2lZ3/Jmm+Ev21Hi4YCDinNwcW1i8S1evDiUPEkC+eYWv8HFbw7xG0TmEn9RvQ56JwPEq0DnXP7uk9ckXAFT/TJZ5ZtJnBcZ4ZXLL9ntJa0u+Yu0+9yTlEYIpcp6lu9bOm8u+6mkHl6U8CriM9Cq/TcjWHyOXX2Jtp/2i5TwyoHd21Rxo9YTDslhLO1bjnJm+PSDFhc+cOkBDVl6/lPDpyV9ypGbgUjOldTDR+kCaWlih5J67fCaC3fLlCFVEl2QelWg36IzdPzqY9o0vAqlTp5IZeMIAiDgpASsJbyuOrhSWrOyZeuYduMj0FTiqznRlSupdlpWak0tK7aK0E5kM8wJr9OmTSMXl2T0008d9ZplwbSEsFrInDmzsA7dpndPezF48GDavHmztBBNn/7jd+qrV6+oTJkylDp1ap3w2qRJE/L09KRNmzZJ0ZXbYUvbypUryyaVQMsXSvhkEZUtaleuXCnE1SOUIoVxDy5sFcsbUX369JGC6rFjRyNY4HK7B8VGYteuv1KDhg1p2NCh4tkg/KUeHtOQIUN0witb/pYrV45YIF627C+KHz8+V5fWt2xpe+rUSZ3w6ufnJ6yCa1HTZs1oiOChTUuWLKWpU6fQokWL6KuvvtLewvkHAtYUXgEVBEAABEAABJyZwPBR4+ivFSvNInBxcSGPsydMlmGL2Zu+vibv843vatagWdMmRyjDz2M7d++lX3v0Fs+XLnTx9DGjz2MRKiLDYgKfg/D6Xf3GdO26J+3etlmKrjw5fuYvVTb8mf/0sQOcRaYsTq0tfEbW4tXa/cvJfka/gt6+I94LzS080i3sUTbCyJUAu+qADz0PDJH32WrT98EryprOhVb0//R+sCnhlS1tLd2vNRyYcLNLMzb9J60/lfc8S4VXP//X1GLsQWpWQYjNHwxxDNtX19YSXpkxJ60Fq+rj941XpWHM9M6lqHguVzrr5U89/jhFfZoUpPqls8pibNBz+9ErSpQwntz3VHX5qIxcGpfLRj0aFtDewjkIxAoBCK82xq7EU+7G0PKVBViVtIKrVpg1FG1VeRytT+CPpv8jn5OfFsOM9Zzz62LUed1cY7fM5pkTXof+dZ72X3xA4zsIN4LC+lGlDUdvSVeyPwlLSXaLy286sfjHFpx/9ionXASHb5Je8A6grnNOUuUiGWjk98XIR1h7tpt8hAoJq8vfO31FiRKEb7qyGwa2mmWBj939clLCa5uqOanTdx9dZoxedZF2nrlHfZu6U70PVqVnPJ9Qz3mnqZH4Yuup+WIz5mqY21biLYvEXGaAcHv7ncbtraXz5rY+lQyFV/WQo7X4ZNfDbScdkkJzZCxeldCtXA8z/4U9y0qR+1PjYubXhHWrYbp297l0B80Wu2WEhWox8aCh1lNb1pzwKoLU04jlF6lJ+WzUvcHHBw0RP5b6CNfMxsRabds4BwEQcB4C1hJelaUqkzN0N6xochl3IcyaS8rN8KcEWnNtaO+ZE15ZqGSL0VWrVgmRsaCumrLibNasuXCTO0iXb3jCguj48eOpYcNGNGLEcN3tSZMmCcFymZ7wyq6HOe3fv4+SfHB9pyxrtQItl3n//j1VqVKF8ubNS9evX6eSJUvS5MkfN/i2b98hXCIfJR5fIfEmvUp169ajW7d86axweZcgQQKVrTuuX79ejHME9ejRg3788UeZr3VbrCxe+UYP4SJ4nxBzv/vuO2nJyq6U165dK4Vmvj9mzFiqW7eO3EgqXbqMtLjVisoBAQHCQrcR8dGcaMxtOXOC8OrMq4+5gwAIgAAIWJMAi1137vpFaHLdhn9oz959NHhAP2HxmoMqVSgnywQEPKUTp07L66RJk8o8EbdVWK2+jtDGsFFj6cGDBzRv9gxKny4tsSXjln+306Ejx6hNy2Z6rmeVeHvd45zR57EIjSPDYgKfg/BaqER4OK6TR/br3F2rcBCuqV3JEYRX/vtBxLqlosLlcoYM4S+f3rp1m254+1DliuXly53vhPXhIWEFniVLZsrlltPiNbaHgobuho2Nad/F+zTsrwukXAwPXnqODl56SMv7l6ds6ZIZq6LLU3uShq6GLd2vZXFYxDeldCkT0481clPcD56XDno8oMFLztMvdfNRy0o5LLZ4ZRGz5uBwIXSx2E/OJLzlcXonrElnCjH35ZsQGtSisHiRJA5x39UG7KIsaV0ieEfUTfDDifJuOL9HGcqf5Qu92yrs2qh2xYgth1XifdIfpxyhp8KYZPe46tKQRBnuLBVGKTk/GKWwpWvL8Qfpnv8bWjdYWMYKoxNOItot/SJC210Rlrocfo49HCKBQGwTgPAaAyugFV/NWa9qBVceFkTXGFgcTRd+HtdoWu12mhzLTuOIYt3/XUpfFgp3OWNZrfBS5oRXfmupvfjSYctTtq7kLxmPm0+J4wiwC4n5wlVvPPHlx0l9GfEX4LfCBQN/UXEcU66r/cKZ++91WrHPh9yECwz+guMv003Hbsvy434oTuXcwx+c1BtIXL+cezrKm/kLuiTc1p729JfucdlCNWH88Jh26suXx8GWsNVE/0Vyppai6qiVF0nbLpfhxD76+cuQ07bR3+qJlZGZt2zAzC9D4XX76bs0drWHFKlrl8pCb0Pf0ebjd+T8uZmoCK9cTz1UlC2YjsYKjurhh+9FJqm4teotNVN1zQmvHOug9YTwhxB2x8GxKc6Lt8R2n7sv47yu7F9Runo21TbyQQAEnIeAtYRXJqasWqNqrWpta1cekznh9cyZMzoBsk2bNpQtWzbh2tdbuNNdzVVp9erVVKDAx5dXZKbmF8dL5bimbJVapkxZKlasqIjjekVYlh6QpbSCKluUsjBZUlh+VqpYkdhSlAVfTtpyMkP8mj17NnGMV04zZs6UdeSF+MXC6g8//CDrsXvfTCKuK7sLZhGZrVlHCnHVWOINERZL2c1wE+GGL3WqVLRz5045fi6vFV7ZVR7Pja10VeJ6rVq1ooULF+iEV76nFZBr164tNxl5LHfv3hXulQeLOLHNVBM4GhCA8GoABJcgAAIgAAIgYGUCE6dOo7nzF9GOLSLGZe5cutZ/7tJdCrKdf+5IfXt11+UbOzEWx/PU6bPUom17YjGtbesWlPnLTLTvwCHhdngnNW3ckCaMGWmsKeRFg8DnILz2HTCE1m/8R8Zk/bZyJfkygLLEdhThdenylTRi9Dj5AsI/68L/nilTqZp8OWH6lIkiznEt2i/+LXT4Xxdp/X3h1FGdp51oLH+MVTXlblgNgK1ep/3zn9xHZYvMBmIPdNFOT7r1MJC2jKxKKV0SqqJGj6aEVy5s6X6tcsfLe7VspPPyTajYE75hkTGJoath7vfY1UfUf9FZGS+2mjCQSZ0sEbG47Hn3BTWvmJ1+1bhd7jXvlNwXZi+K/FOjxJfcRISk9kiNCa/KSIgr1SiZSVoYB7x4K7wQ3pV7sy0r56Bf6oTvr/NcrwsDle2jqknxV3WkLIQ5xi3H3eV12XbKj+48DqT21XNRByFKI4GAPRCA8BpDq8CuhflHm1hYZUtXZfmqdT1sTqDVtoFz6xK4svsQLe3Ql8RLPxan9ouEi+FqFSwury2orFXbVXOjjsJvvmHiL5hFO29I97DqHouwXcRbTF9ovtD5LaW/9npJa1T+ouHEIixbphZzS62qyjeAFu/yIg5mrsqxpWZn8aWmfdOIhdes4k2t1pVz0tT1V3TCZMXC6aUVZVrhHlib1ohg83/uCv+i71AzN7WvlkvGG2AXyON+LEHlhCCpTcrtBQc+H9G2mPaWPLd03hEqGmSo+LfqLSh+A4rnv0T8qMQPEvz2FrsgXjWgorAiTkpbT96hCWsvk2E8BK5zRMRJHfDnWRrSWrh2Lp5JNsMCNrvxPXXtCf2vdl755ptqPzJHZZWqtSg2Vl+J3Sz0jhd8DVPAy2AaLURvFspVSiveiOM31ViIRQIBEAABJmBN4VVr9RpZ8VWJrjwmUxazfC+yadasWTR//vwIVq2qHQ+PyzR6dLhLX5XHsV2HDBlMuXN/+o+1e/fu08RJE6V1KNdnEZUFzBkzZtDz58+lG2LOZ/fDg4X4ylaknFjEHDRoEC1fvkzGV9W6Gub7HNuVLUY5sUCcMKH+H/B79uylCRPGy7pcRomp3bp2jVCW76u0b98+OT62ROXELoWrV69OQ4XrYcP4sGx5e+PGDTp//jwlS5ZMlmU3yL1796axY8dSnTp1VLNS9F2+fAVduBDuNSRPnjzUuHFjatmypa4MTiISgPAakQlyQAAEQAAEQMCaBCZNnU5/zF9IO7f+Q7lzhXv34vZnzJ5L02bOpqkTx1GDeh+faYz1bUx45XI7du2hkWMnSMGJr9nFcMtmTahPz25mn8e4LFLkCdy8eZMWLFhAOXNGzYLSx8eHqlatKn9M9e5z05e+rVWXfu3ciXp1/1VXbN7CxTRh8lTa9PdqKuT+0VNOo+Zt6IZ4blcurF++fEV9BwymXeJZnRN/Jjhm8J9Ll4vPyUOdxevAISOI4w+fPXGYUokYsKb65TY4xmtpEZt4xZKFfBmpNG3mHPFZ/yPC5z86/R8+eozadehErVo0o9HDh8jx/O/XHnLOG9eulBbgnje8qHGLNlKcjcq4IzVJKxfmvTbeD82YOqm0HGWjkuzpkxmN86rtup3YA+0o9kI/ldg4hT3/KYtObXm1X/mp/drnr0No+sYr0rhC1WfjnN6NC8o4p5xnak9TCa/sQfH7bz/+n8h9rjvsqzOQSZo4HrUS+8EcXk0Z/HC7Z2/40xgRqu2x8BxYOGcqvbBxfF+lhTs8pTe/BcLiNZ+BxSuX8RYeGaeIvWY2LlKJ96Zbij4bfHAp/Db0PVXtv1MKvBwyzzApwxptPu+X835qgnjhhkLaezgHgdgg4NDCa0bXFJQ+dfLY4GqyT2MCrLYwBFctjdg5Z8vXzSOnfdLtMLsXrje0R5QsXSM7sxDx9g7H7uS4nNovPWPtPBfxPRPGj0dJEsUzdluXp43/qsv8cKKE1wXCqpZF3ScvguSbWwk+WLkallfX/JCSMEHcKFt8qnbUMTLzVnUsObJrCn8hTnLQeeVy2ZJ6n1sZjsHg+/AVJUsSn7KmTUbxRbxfJBAAARBQBKwpvHKbURFftaJrZAVbNY/oHtllFm+GsLusRIkSRbo5rs+xUdOkSUNxPrh7MtYIl2NB1tXV1eyb38oaly1x+/XrZ6wpmceCLlvecr+RSU+ePJHzTJ7c+DPymjVrZFwyFn9VLFhuv2vXbtKil+/nz58/QpdsKcuCral2I1Rw8gwIr07+AcD0QQAEQAAEYpVAaGioLpZ9dAbCYltQcBCljeTzWHT6dNa6bPXKAmxUEouuOYS76ZhIQUHB9Ew886dNY/6ZPzJjCQkJIRY9LUlfZspEefN8WgS0pC1jZYz92zHM47jH2r8jjLVjr3lNxhyghwFvdMOrUjQjnRNe5HgPlRMbbzQul51YxPS5/4LyZ01J3+RLqytvjRNz+7WqfVvsawYGhVJwyDth/ZpI/F2reop4DBGiKP/dG909RvbY9+DpG2ltaxhm7bLvU+osXAd3a5CfmpbPHnEQIoeNYNio6KXY+/xSGNHwnjkSCNgTAbsQXjOnTUlpUrpYjYv/80C68+iZcKfqSsld9C3zrNaJFRpSlq7clDbGqxWaRhNWIHBl1yG69K94sDt9kZ7evS9bTJU5I+X4qggVrl2VClaPmpWrFYZm8ya0wqvNO0MHIAACIAACsULA2sIrT0IrpPI1i6mFshXSi+/KAu2qgyvIQxxVslZcV9Xe53h8+uwZ3fC8IS1KfXy8aceOHZRJbJzEdBo4cBBt3bqFvv22GtWsWYM4TtPhw4dlXvr06aWLYo77ihQ9AhBeo8cPtUEABEAABEAABJyDAAuu1kwswn5u6ckTfypVrpJFw27XphUNGzzAorIopE9AuRouL9zoFs6RiuZv86QkCeMRGzUowTVj6vCYovo1cWVtAhwmj90vL+xZRoa/s3b7aA8EYoJArAuvl7zuSQs+t8xpRODkBNGec/DbUPK6+0S89fCe3N0yWs3yLtoDQwMg8BkRgPD6GS0WhgoCIAACUSRgC+FVDcVQgFX5hsdC2QtRy4qt9YRZwzLOcq2N7cpxYZs2bRorU3/69KmI4zpGuhDWDoDdME+ZMpnSprXuG93aPpzpHMKrM6025goCIAACIAACIBBVAgMHDvxkVSWmWiLSctiMzy2xRenxk6csGnamjBnJLWfMWPdaNKDPqBCHJvtThHsb8X1RqlIkI7WddJh8H7yitYMqCffDEFxjcin7LjxDJ/57TAcm1fyk58eYHBf6AoHIEIh14fVFYBD53vOXMTVZeI3/CVem5ibHb+S/CQ4htobPLqxdU9ixtau5eeAeCMQ2gfsBr4XLiLhkGMs1tseF/kEABEAABKxHwJbCK4+SLVs9bnmI4yU961YWW92zF45gCWu9mX2eLd2+fZsePXpE2bJlswtx09/fn26JMXHKKVyzpRQxqJCsRwDCq/VYoiUQAAEQAAEQAAHHJaBcDHOc1ugkjg/L7oaVSBudtlDXMQmc9nxCveadprQpE9OXrknpgncAVS+RiYa0KuKYE7bjWT0ScWQ57m2GVBC87XiZMLRPEIh14ZXH9+p1MPm/CJRHjukY1cTBk5MlTUSuKVzkMartoB4IgAAIgAAIgAAIODoBWwuvjs4P8wOB6BCA8BodeqgLAiAAAiAAAiAAAiAAAtYnsGK/cHG79bpsuGLh9NSlbn5Yu1ofM1oEAacgYBfCq1OQxiRBAARAAARAAARAwI4IQHi1o8XAUJyOAIRXp1tyTBgEQAAEQAAEQAAEQOAzIcBxXVMkjX5IxM9kuhgmCICADQhAeLUBVDQJAiAAAiAAAiAAAvZOAMKrva8QxufIBCC8OvLqYm4gAAIgAAIgAAIgAAIgAAIgAALOTADCqzOvPuYOAiAAAiAAAiDgtAQgvDrt0mPidkAAwqsdLAKGAAIgAAIgAAIgAAIgAAIgAAIgAAI2IADh1QZQ0SQIgAAIgAAIgAAI2DsBCK/2vkIYnyMTgPDqyKuLuYEACIAACIAACIAACIAACIAACDgzAQivzrz6mDsIgAAIgAAIgIDTEoDw6rRLj4nbAQEIr3awCBgCCIAACIAACIAACIAACIAACIAACNiAAIRXG0BFkyAAAiAAAiAAAiBg7wQgvNr7CmF8jkwAwqsjry7mBgIgAAIgAAIgAAIgAAIgAAIg4MwEILw68+pj7iAAAiAAAiAAAk5LAMKr0y49Jm4HBCC82sEiYAggAAIgAAIgAAIgAAIgAAIgAAIgYAMCEF5tABVNggAIgAAIgAAIgIC9E4Dwau8rhPE5MgEIr468upgbCIAACIAACIAACIAACIAACICAMxOA8OrMq4+5gwAIgAAIgAAIOC0BCK9Ou/SYuB0QgPBqB4uAIYAACIAACIAACIAACIAACIAACICADQhAeLUBVDQJAiAAAiAAAiAAAvZOAMKrva8QxufIBCC8OvLqYm4gAAIgAAIgAAIgAAIgAAIgAALOTADCqzOvPuYOAiAAAiAAAiDgtAQgvDrt0mPidkAAwqsdLAKGAAIgAAIgAAIgAAIgAAIgAAIgAAI2IADh1QZQ0SQIgAAIgAAIgAAI2DsBCK/2vkIYnyMTCH4bTK7JEjvyFDE3EAABEAABEAABEAABEAABEAABEHBKAlYXXrlBJBAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARAAARBwNgKZXL/QTTlOmEi6K5yAAAiAAAiAAAiAAAg4JAFYvDrksmJSnwkBWLx+JguFYYIACIAACIAACIAACIAACIAACIBAJAlY3eI1kv2jOAiAAAiAAAiAAAiAQCwQgPAaC9DRJQh8IIAYr/gogAAIgAAIgAAIgAAIgAAIgAAIgIBjEoDw6pjrilmBAAiAAAiAAAiAgFkCEF7N4sFNELApAQivNsWLxkEABEAABEAABEAABEAABEAABEAg1ghAeI019OgYBEAABEAABEAABGKPAITX2GOPnkEAwis+AyAAAiAAAiAAAiAAAiAAAiAAAiDgmAQgvDrmumJWIAACIAACIAACIGCWAIRXs3hwEwRsSgDCq03xonEQAAEQAAEQAAEQAAEQAAEQAAEQiDUCEF5jDT06BgEQAAEQAAEQAIHYIwDhNfbYo2cQgPCKzwAIgAAIgAAIgAAIgAAIgAAIgAAIOCYBCK+Oua6YFQiAAAiAAAiAAAiYJQDh1Swe3AQBmxKA8GpTvGgcBEAABEAABEAABEAABEAABEAABGKNAITXWEOPjkEABEAABEAABEAg9ghAeI099ugZBCC84jMAAiAAAiAAAiAAAiAAAiAAAiAAAo5JAMKrY64rZgUCIAACIAACIAACZglAeDWLBzdBwKYEILzaFC8aBwEQAAEQAAEQAAEQAAEQAAEQAIFYIwDhNdbQo2MQAAEQAAEQAAEQiD0CEF5jjz16BgEIr/gMgAAIgAAIgAAIgAAIgAAIgAAIgIBjEoDw6pjrilmBAAiAAAiAAAiAgFkCEF7N4sFNELApAQivNsWLxkEABEAABEAABEAABEAABEAABEAg1gjYhfD66nUw+b8IJD6GvHsfZRgJ4sWlZEkTkWsKF3mMckOoCAIgAAIgAAIgAAIOTgDCq4MvMKZn1wQgvNr18mBwIAACIAACIAACIAACIAACIAACIBBlArEuvL4IDCLfe/4UJqbgkjghxRfiaVRT6Pv3FPjmLcURDWTP5EopXBJHtSnUAwEQAAEQAAEQAAGHJgDh1aGXF5OzcwIQXu18gTA8EAABEAABEAABEAABEAABEAABEIgigVgXXi953ZNiq1vmNJQoQfwoTuNjtbch78jr7mMKFZaz7m4ZKW4clmGRQAAEQAAEQAAEQAAEtARsLbxe9vUgj1sedNn3EnmIc5UKZS9E7tkLU6FsfCyksnEEAaciAOHVqZYbkwUBEAABEAABEAABEAABEAABEHAiArEuvF644UdZ0qUk1y9crIY94MVruv3wKbkJq9fksHq1Gldna+h94G16H+hLYUGPKCzklZx+nATJKE7idBTXJbv4yepsSDBfEAABEAABByJgK+GVBddVB1foia2msLWs1JpaVmxl6jbyQcBhCUB4ddilxcRAAARAAARAAARAAASchMDzwLf0hUtCJ5ktpgkCIBAZAnYhvObJkpaSCjfD1kpvgkPo+u1HlNE1BaVPndxazVq9nevet3Rt5nXLpjvHSewSCAv2p3f+p+j9m4dmBxI3SXqK51qK4iRyNVsON0EABEAABEDAHgnYQnhl0XXg0t9002Xr1pYVW+tZtipL2FUHVujKQYDVocCJkxCA8OokC41pggAIgAAIgAAIgAAIOByBf47dppmb/6O3Ie8pT+YU9FvzQpQ7UwqHmycmBAIgEHUCDi28ZhCiawYhvtpT2rTzIPGPqVS/RkXiH6TYIfA+8A6FPtgbqc7jZ6gqrF+zRKqOsxX29HtBG4/eoqYVslPODPb7MkRk1uXEtcf03+1n1LR8dkqWJIHJqvsvPqDz3v70c608ZssZa4Af5IJD31HzCjmM3barvJdvQuj2o0BKnyoJpUmRyK7GhsGAAAgYJ2Bt4XXVwZWkxFRLhVRtnbHtxusJtMZHbf3c4OBgun//PmXMmJESJYr+/19r1qylt2+DqW3bttYfLFp0GAIQXh1mKTEREAABEAABEAABELCYwLYdu+jEqdPUp0c3SpHCtvtjvrdu0Z69B+iu3z2qX682FStSmGKyf4uhfGYFPe++oA6/HxWWrgkoh9jjvOAdIMXXRT3LfmYzid3hvnsfRvcDXlPq5IkoaSLz4SdXHbhJyZPEpzpf6++/B719R4+eBQnjuySUIF7c2J0QegcBAwIQXg2A2OrSUHDNlyu7XlfXvHz1riHA6uGIkQu2dA25uyVKfSXIXPezt3xVwlnG1Enkl16UQJio9PvGq7ThyC1qVTknda6T10Spzyub32xbe9CX1g6qSBlTJzU5+FbjD9Gdx4E0sWMJKp0/nclyxm60nXSYAoNCacOQysZuW5y3ZLcXLdpxw2j50gXS0sQOJeU9dpFSZ6j5Fw8qFEpPY9oX17Xl+/AVjVp5kfjBU6WC2VLS4FZFKHMa01xUWRxBAARij4C1hdd6I2rLyVgquqqZ20J83bNnDw0ePIRevw6kVatWUcGCBVV3uqOfnx8NGjSYzp07q8srWrQYjRkzmrJk0f+DTlfAgpMmTZrQkydP6MCBAxaURhFnJQDh1VlXHvMGARAAARCICQKDho6kVWvXya52bv2Hcudyi1S3b9+Kv40bNiMvb29ycXEhj7MnIlUfhUHAFIGqNevSTV9fWjR3NlWuVMFUsWjns9haq14jCgwMJNfUrjRi6ED6rmZ1iqn+oz2BGGrg8fMgunLrGaVLmYQKZP3CZK8+D17SZd9ndMPvOZ329Ce/J6+pTdWc1Om7vNR1zkkpvm4dWdVh3Q6/FyLpf3eeS7E5cxrT4SIPXHpA49ZcotdB72hBjzKUL0tEpgEvg2niust09MojHe/sGZJRjwYFqETuiF4ln756S/WG7aX6ZbJSn8bhf9fzWCb/fVlvL7K4qDugWSHKIPa1kUDAHghAeI2BVdCKriy4sqhqyrWwtizE1xhYHE0Xofe2f9K9sKa43im7HY6fqZZe3ud2cfTqI/pt0Vnq2agANSprXdfXDwLe0N4L96la8UziYSbx54bG6HgtFV49bj6VDyf1S2ehRAniGW3LVKa1hNfJ66/QJmE926hcxHXNkT4ZNRAPL5zeBL+juduuGx3Oo2dv6MjlR1S1WEYa3qaoLHNLiK4/zzgmH6gals1KLLj+d/s5rRcie9LE8Wj1gEqUKpn13MgbHRgyQQAEokzAmsKrEk8jK7qqwav67Jp4jLB8jWp69uwZjR8/nrZt26ZrYuXKleTu7q675pPHjx9T3br1pDDbpk0bcnNzI09PTynSJk3qQrt27RRvoEfNawqEVz3UuDBBAMKrCTDIBgEQAAEQAIFoEjh24iS1ad9R18qOLRspT+5cumtLTqbP+oOmz5oji0J4tYSY7cvcvHmT9u41/6K4sVHkyBHuQaxq1arGbsd43tlz5+mix2Vq1bwZJU4cfW87piawbv1G6j9oKPXq3pV+6dSR4sYNtwaMqf5Njcue8nedu0ejVlzUDalFpRzUpW4+Cn33Xoqsl289JZ/7r+jU9cf0PDBEV06dZE3nQrwXtmSXF70Rlpd7x9dQtxzu+OJ1CNUesocMjTHURJ+L+9M3XqHd5+6rLJovhNf8BsLr6+BQajH2ILGYWqNkJiqSIzU9EuL32kM35d7i5J9K0tf50ura4JMjlx/SgMXnaFibIvRtsUzS++DP04/LMq2r5KQvhdHHdWEMwvuevBe5on9FeOLTI4iL2CIA4dXG5LVCav8u7fQEV1MxXjl/wuylcmQQX228QB+afx94W7gY3hetzuJnqCJcDocLWNFqKJYq21J4jaUp2bRbS4XX6AzCWsLrgMVnxcPiS1ozsFKUhzNNWC2zoDqtcykqkSv8DbTp/1ylvw/fon7N3Kmuxt3HjjN+NGbVJSn09mxYIMp9oiIIgIBtCVhLeNXGdd087N8oD1pZzEbV5fCrV6+oTp06FBAQQI0bN6b48RPQmjWryZjwumzZMpo0aRJNmDCRatWqqRvznDlzaO7cuTRlyhSqVq2aLj8yJxBeI0PLectCeHXetcfMQQAEQAAEbEfgTVAQ1azbkF4HvqFqVSvT6nV/U2SFV88bXrKNCuXK0rPnz8nb5yYsXm23ZBa1zIJrVERXbeM//fQTKRFWm++o57/PnE0zZ8+lLRvWUcEC+Rx1mtGaFwuJb0PfUzMRFu2MsGK9KsKJuWdPSZeFBSyFfWw6mXBxW0qIgTmFVaZ7tlSyzJyt14Rnv9u6Qr2FJaYyatBlOtCJOeGVPfW1HBcuptb9JgvFjxdHhJu7bVR4ZYvYIUvPEwum/6udV0eILVh/nnaMqhTNQCPaFtPl88nsLddotXA1vG5QJWnNqvYnDT0Lrjl4k2Ztvka9m4i1KP357s/rTR4XnzUBCK82XD4luhpaubKwyve07oUNy/CwJs75S5aB+GrDRfrQdOijQ/T+pU+0OoqbPCfFT1chUm2wL3t+a6ee+GIKEW9UbTp2h/xfBkmXtOxiIeBVMP2x9Tqd+O8xZRL+6isXyShjhcaNG4cWCtexR648lNaH2YXVokph4uGgx7xTFCZOZnT+WmZ7C9Ft4Q5PuiLcYnDM0MLijaIfa+TWvXk0csUFuuDzlB4Lv/hsoZhaxOhsKKwg63/4onryIpg43ij3x2Mu756eqhf/kkrlTaO6pYMeD2ixeMvrlzr5aM/5e3LMBYQF5PgfS9Dx/x7RvG2e1F3MqZhbal0djpG67bSffHuMrS2/yZ9Wvr2kbXfLyTtS8BvRtihlS/dxnuwKZJJwK9G+Wi6qVDiDbJPjA6zY5yPHwq5v3TIll2P9vqobJYhvua9/S9oxJrx63XshXGp40BdJE9CodsXJJXF8Yje//GAx65evZYxXxYlZME9+c+uZcPHrnj2VZOeW8WOMD2PC686zfiKG4k0qkjM1WSpq/jT9GCUU85/d5Rsd+8ic8Jo3G3OQiuVKrftMcX3lRnn/xJrywUq1yS5Iag3ZLd9WOzi5JsWNE0fdwhEEQMCOCFhLeFXWqpGxduU6hbIVEv/3FdIRGbT0N/Lw9aDItKOrLE5YcG3UqBGNHDmSKlSoQDNmzKSFCxcYFV737dtHt0TMI7Z2TZDgY5zu7dt3UP/+/aQL4ubiLXRz6eXLl7Rs2XLat2+vtJYtV64c9ejRgwYOHBjB1fCtW7dp5swZdPHiRXr58pW0wG3YsAHVrl1bdrF//36aPXs2tW/fXorH2n6nz5hBhw8domnTplHmzJmJrXqnT59OJ0+epLt37xK7R27WrGmEeto2+Pz9+/e0evVq2rJlK125cpmyZcsu6hahXr16UapUqfSKHzlyhDZt2kSHDx+h1KlTCa6NxZgL0uTJk6lbt26Sr6rgId7Y57LHjh0VFsTiGaF8eSFm16IyZcqoIjgaIQDh1QgUZIEACIAACIBANAlM/n0GzZm3gGZNm0JX/7smzyMjvPLzUvM27YmtAg/u2UFde/YxKrw+Fc9jk6ZMp6MnTtCdO3epRPFi1LpFM2pQr040Z4Dqxggo4XXs2LHGbn8yj5/P2eLVnNXrnbt+1KlLN2opnqvfhoTQytVr6ZHwklO5YgUaPngAPfH3p3GTptLBQ4dFWJLMwm1vDRmnNZ4mriTHVJ38+0w6d4Gf+V9S4ULu1KxxQ6pfN/yZnwc6Q4ihO3btptXLlsgYr9p+44i9vr83/EOXxPN1wQIF6NfOP1ONapGz1A0IeEptfuhI3C67GeaxuiRNSkMH/UbflPrK5v1/cjHspADvw9UVobaKij3KmWK/bqPY85wqvMVxKpQjFbF3uNxfpqCCQojNncm4J6TOM49Ly1hjVprGpjlPeJg7LvZ3fxPucP/cdYOOX31Mv9bPR80rhFtl897h/ov36cS1x5RV7H+WEXukTctnl3uJ2vb2X3wgxnuLznsFyL1i3h9kEZNjpXKKzL4jl78mRM9tp+/SAdFukkTxqEyBdFK4zPZhr3ntIV+5J8xh1DjxXitbsvYX8+DE1qvtRKi0AS0Kyf3s+ds9adkeb6PC64ajt+j3DVdpdPtiVLFQ+F4ut8F74lX67ZTs5/yqv3fZfsoRYuFXhWEbtOQcHfJ4SJuGV9HNmdu46BNAv84+Se2r56IOYs8bCQRimwCEVxuuwI+9RsrW/5w6VK8XJahyJguuSoDl836/fK8rq7V8NbSW1RXCiVUIhNz+m8JCXkWrrTgJklGCrE0i1Qa7am0z8TClFe53A4NC6BvxBpXPg1fkK34K50xFLORxnIHc4kuN377iL7OOtXJTu29zyS8Z/rLpUDO3FB9Vx9fvPqeOvx+Tb1rxG1cch6DdpCPyNrua5X4OC7GP/e0v6VOOWOhbtPMGnfPyp0tCfM2TOYUIDp+MKrhnkC4kOFB5l9knpN/8fCLeQRohyrLLWU7ThfVj8Q/Wj1tO3JE++lm45XGySFdYPKx0rJmHdp0V7jtEHNBxPxSnckK05cRicPvJ4ePisuyGlwVmTlN+/kon6v4lvqwXiC9tDlLPY1Pp1PUn1Hv+aerb1F0K15w/dvUl2i6EXH5I4tgMF4Wb32vC9a2xN6ZUO8aOlrRjKLwyZ37oShQ/Hs3s8rVOJJ6w1oO2nrxLKtaD4sRrHirerOOHvAveAZIZu8TYMKSKFGx5XIbC697z92n48gvyszGpY8lPBp9Xc2s0ar9ci++/daOD4kGOXaCw+F5UfMaSJUmgipk8jhdi8r+n7hI//DBblWoM2iXnu3lExD8EyvfeLottHFYFLj4UMBxBwM4IWEt4VYKppZaqSqhlHFoLWWU5G1V3w+/evZMbHClTppSkzQmvxpaCY3l169ZdCogbN26U7oeNleM83pTr3r0HHTx4QAiTqemrr76i06dPU1BQsKzCbsNUjNd79+5TTbExw6l06dKUMGEiWY+vx4wZK1we15FCbZUqVaikaOfPRYv4lkwhYtOnfPkKUvz8999/xfdGKH333Xf08OFDqlGjhuybRWS+5pi2LMCaSuyCma1/06dPTyVKlBDi61UhPvtKMZeFUyVAnxAbiD///LNshsfL6fjx41Ko5fJqzJzP7pnZwpcTjz1xIvGMIERbTmw5DPFVojD6C8KrUSzIBAEQAAEQAIEoE/Dy9qHqtetTxQrlafH8OUIACxdhIyO8rhOiV/+BQ6h/n17UqeMP1KBpywjCKz+fVaz2HT148IBq16oh42fu2rtPXo8aPkQKsFGeBCoaJRATwqv6/GTIkEH+TVGpQjm67ukl4/x+JZ6dr167RhnFvQL589HRYyfIP8BfuvFlcZST3717VL5K+DN/uTKlKZF4Lt67/4C8N2XCWGpYv648/23wMFr79wY6e/yQfPlR2y9/pqpWrkS3bt+R/XKF9WtWULEihWVdS349f/GCRo4ZTxcueshYsiwcp0z5Bf3wfRtyL1iAbN2/JWO0lzKtJhyiO48ChZFLBhnn9ZEwRuHUSlhjssVkxk/ECm08ej+9EC6Id4+rbtGU2OiFXfHyvqkyivnuq8yyf7Xfx/uCX+VJQ7fEuHhvuFS+NDSxQ0mKJ0R5TtuFQDp2tYcUXNko5rYQQ1nA5Tb/HlJZGl1EZt9RG0KM+woR+5Qs6HJ7f/YuJ/fzeGws5rLgy/lcLmvaZMR7jJzYeOWVsHplIxRO5oTXh0/fUJPRB8QcXWnsDyUoccJ4so70sCc87XVrkF+KzTJT/Hr5JoS+G7yHan31JQ1sEf7vgI19Riy/SO2quck9Zy7Lwu1ose+878KDCPvHqi0cQSCmCUB4tRFxZe1qaK2qFVO1gqwSaQ0FVtWOoShro2E7bbNvvZdYZe4J3dpHqh0lvPIX64LuZcUbTS7yy6KreEOHLTrZd/6o74uJWAxxiOOkNh1zQMbRnNuttHSHUXfYHinMLutbXtcvW8IuFVaWSiRbLN6i+nOnF00SfvJZ2OV0SQiSc/+9TtVLZNK5XzDlali5k9W+hXX3SSB1+P2obGvbqGryAUB9sWdJ60IzxNtiLNCqZEx4bT72AN3zfyPLKivYW4+EED3hsKzGDy78BWyp8MpxAmoM3C35zOn6jbSyfC+sfieJgO38wDJViLnqC12Ny9jR0na0wmtIaJh4q+qEbI771gaaNyW8MidexxTiwYQfUob+dV6K6SPFevNDHyet8KrccbBIPeHHkvItNFnoE7/YArpCn+3yoYx5axOLv3O7ljYbd1d9RvnBaspPX2mryzHzg9fCnmUob+YvdPduiofD78Xbbpz+EDzYmhcJBEDA/ghYS3hVLoK1Iqqp2WpFV2NCbWTaMtWHyrdEeOVYr3//vV5srryQ1p18PWjQICmGqnaMHTdv2UKDRblSpb6mefPmUrx48aQY27NnT2LrVRZjlfDKoub69RuoXbvvdW/Ze3t7U8OGDYmtZNm9Macuv/4qLVtZSE2TJtyjhBJBWRDu2LEDnTp1Shw7UocOHYTw213We/r0Kf3222+UIUNGGjFiuMwz/MWi8v86d6bUwrJ19OjRIp5UYlmkX7/+tGPHdlq+fDkVLlxYzqFMmbIy9q3K44LXxEZTs2bhFsBa4ZVFYLa6XSTEYhafOXH8rfr168tztspNkiSJPMcvfQIQXvV54AoEQAAEQAAEokvg+x9/piPHjtP+nf+KF8ayRlp4ZcGqXOXqlC5tWuGeeIN8Kc2Y8KpiyP7v5w7Ur1cPOWy2MuzRpz9lypSRxo8eEd2poL4BgZgUXjmm76a/V1POHNmJRfbmbX4QIuZFqv5tVZo9fap47o9Ld/3uUYWqNahokSK0Yc1yOdq/N26iNevWU8cf2umsVG94eVONOg10LwNwQVPCJ9/bvH6NFEf5fNHiv2jMhEnUplULGjl0EGdFKv0+Q7ganiOsa7eKGMe5cunqxlT/ug7t+OSCsJAcKWK8svc/ThldkxB743smjEk4mRNgg0Pe0be/7ZLWn0uEQGlJUsIr7/kNaV2EEnywln4sYpw2Grlf7tvN61aGUgpxk5NyqdurcQHhlTCbzPtl1gnyEHu620Z/S8k/GFKwFekeIY5yfNqCwvOgdn/2U/uOypOddu9O7T/yOHmPkpM5V8OygOaXOeGVi+29cF8Y7nhIQw72lPhAiLHe915Sk/LZqNN3efX2btn6t++CM0J0LSTE18yyFxZZea+YY+vy3mq29C4yxiuvo9Y4RzMknIJArBAwFF7ZPWmMpvOed8MC3wRbtc/XQW/DuN37T55btd3INPZDzxFh/GOYhHWrzBcxXPVu8TWX5/uGyVRbhuVwHXUCwV6Lw6zxE9kR+D54GVau17awEcvP61UVYqnMFy569fKFz/yw6gN36vKmrL8sy/k+fKnLazZmf1jDkfvC3gl/r5xUW3P/vRYW9DZUV87wRLi9lW2tP6L/GeQ+uT3DxO3x2FXfm4/fltcbjurX53o7z/jJe4c9HshmnjwPktejVl4wbDbsz52e8t5l36fynhCR5fX1O/r/nk9eeyzzN4l+Ob0JDpXXdYfuCfO+90LmGfslLH7DxENNhJ/XQeFsLG1nxqarsj8eB/fJP8LtRoQux6+5JMs9exX+/5zitPqgj17ZHWfuynIr9nnr8ttMPCTZq7XpOfeknKeugDgxNhfOU+nF67eyXV6rjUdvhfG1eMMsTK0fr++Hj4qqoncc+tc5WV/EutDL54t9F+7Lezz3Y1cfyrHwka+5P/5hPkggAAL2SeBl8Pswa/xUHlgrjH8M25q/a3lYt/n9dPl8rcoev35Rl6+tp+5r86J6PmbS9LAced3Djp+9ZLQvbvfEOQ9ZhsvxT4dfuoedvnjVZHk1ln5DRsryl65565X1uftQ5pcoXUEvX9XTHitVrxOmLbd+yw5Zd8mqv3V1VT//ed+WeXsPn5RlWv/QKez2A39dOW27kTnf+O8u2d7ytRtlWx7XfeR130HDI7Q9asJUeW/l35vlPTXXrr0HRCg7cdocWfbo6QsR7kVmfI5c1u9JxO/VqP5PIYT1sAcPH0X4CXga/iwV1XZRDwRAAARAAAQ+FwLbd4Y/00ydMUs35ElTw58Fr3ve0OWZOxk+aqx8fjly9LiuWP0mLcLci3+tu+YTIbzKcu06dArz9w/Qu4cL2xDYs2dP2IABA6LcONflNswlIZLKde3eu79esemz/pD5+w8e1suvUqNOhM+GXoEPF1yuZOmKulv9Bw2V7YkwKTJP9dvxf7/qyvCJsHrVfc70blh4MXX6LFn/+g39z39M9W/hMO2iGO83VuizLazD70fCQkLfhS3f6x1WR7OvJeK5ht3zf603Vt535D2vQUvO6uWbu+C9X64jvBvqFTtwKXxvjfdOtenVmxBZXrtn/Mus4zLvkNhfVfu+2jp8bum+o/+L8L1Z3rc0TB2nHdXbf34eGL63OHDxp+crXCrLMRrbR+R+eP9Tu2+o9g95n5X3bLVJtSWMQnTZb0PehQnPiLIPVZePvF9uuKetq4QTEIgFAoZ/88fhMcSkBHzhhh/lyZKWkiYOf5vDGn2/CQ6h67cfiYDLySmD60fXpNZo29I2lDtha1m8GrZj6ThQzjICse1quK1wz/BzrTy6wXKcUrZInd+9NOXPGu4ykW9yrM7bwip055hwNxZCnBTubU9Qp9p5qE0VN5373h9r5KIfqueW7fn5v6bOM45LV7acUc49nfTXz3FR1dtRnG/M4lVZf/J9djOsTezCl9NwEXu1atGMujeqtG6CVXlDi1chxlEf8cZSl3r5qEXF8FgGqiz76WcXyioYuqUWr1yfmTE7TtmFu+SvRQzaKmJsBTQMOabC8r3hZWTBD7+0LiosaUdZvKo2fhVzaW4wF75nyuJVa4HM5TgmLVsRK1fSnMcWr+xaRKVl/cqTNp4vB7CvOWi3uq13VBbDXGbdYV/KJ+IvKItnVZCtbNlidUX/CtLaWuWro6efGNPUo/IzM064ADGWVu73kXGItfeYfVERY4LjAq8aUFFYACfV3sY5CICAnRCwtcWrckHMroPdsxcW8alXyJkbs3RVSGLa4pUfe9ka9IWwcDh27BgtXryEfHy8acKEiSJOaU01rAjH5s2b03///UcXLlwQXin0Y4h/801pYVH60dUwV758+TKJzR5hOXqdnj9/Ln6eSUtRvnfp0iU+CDfFQcKCtpR0Rzxv3jxi18lly5ajPHny0F9/LZVlgoODqVOn/9G5c2fldfHiJUSZssL9cC368ssvZZ6pX9z+tm3bZZxZjm/LsWJ5rpw4Lm6DBg1o9+7d1Lt3b2n1y3PUph07dlC/fv10roaPHj1KnYUVLZdv166dtiixVQJb/37K/bFeJSe7sKbF64FDR+jHnztHIKi1wohwExkgAAIgAAIg4CAE3ohnnKo161LI2xA6uHc7Jf3gbSMyroaFOEu16jWSVo1zZ03TkTFm8crPY9//2IlOnw1/HmM3tBUrlKW6tb+jLJnNP4/pGsZJpAjEpMXrL51+oj49u+nGN3fBnzRxyu+0ce1KKlI4PKYl3zT22eDYrNt37RF/J1yjZ+KZn2MBcwxgTj7XPOTRlMVpp586UP/e4RbUsqD4lTNfIfpaxGVd9defKsviY2QtXq3dv8UDtZOCY1Zdoh1n/Gja/0pRidyuFCqsKtcc9KXVB28atYBVe5ethVtijq9qSVIWr7vGVtfzYrdwh6fwXBj+d5mxvVd276tCfKm9W+6P878RcWAriv3dUmL/U1nQKovXT+07qhBu3Jaxfjl/w9DKlPaLxFazeFUulXnfsI8Ij5dLxM8Vxjm05pCv3Fdmz49j2hfnrmX6n9jPvvnwJe0YXZ3ihHtbpskiFu8msd/IFrntquWiDKmSCE+Hr2jGP/9J75E9GhagxiLUHhIIxDYBQ4tXCK9WWhGtS2FD98FKlOWu2IWwsHKVvfK5NsYrZyoXxFq3xLIwflmVQOijQ/T+ZUQxLjKdxE2ek+KnqxCZKqTcuEZVeGVXuuwbn93VsmuLJcLF8KIdNyIIaeLNJOnK4aAQNc/d8JdjZPfGM3/5hvKIYPGc1Jd3z0YFqFHZ8C8odq9Rd9heeb/O1+EuHeSF+MXue9N+kUi6u+A4peqL3RLh9YiIMTtg8TnS9qXaFVa+1G/hWV2A+cgIr/zayKWbAbRbuNg4dvWRzl1I7VKZqV8zd+l+mB8sPIRgbZiKiNilJUUcBU6WtGMovGrdRWvbNiW8GnJSIqc54ZVj3M7u8o3O7YZyr6HtT51zHOD48T48lahMgyPHbeX4rUNaFZFupw1uU79FZ2SsiMW9y8qHIcP76vruk9d03tufHj4NkjGDyxVMR+PWXJKxKw5MqqmLRaHK4wgCIGAfBKwlvCqB1Zigqu6pGRsro+5FN8arakcdLXE1rMqqI8cn/eWXX6ho0WI6sVPd0x4bNGgoRcuLwu1YHPUX4IcC1apVk27JlKvhLVu2CiFzoLxbsKA75ciRQ8ZZYle+nJTwyufDhg2njRs3iBiwB8nLy0u6FB4+fAQ1atSQb8vEm318f9euXSKe6lHpFphvjBo1SufiN7zkx9+vX78WbXUUcV0vU9KkLvRN6W/I1dWVDol2OD6sEl45Ti27Me7SpYsQeDt9bECcrVmzRoiuY3TCK7tUZnfHAwcOpBYtWuiVPXToEP0qXCf37duX2rZtq3cPF+EErCm8+gohfePmrRHQZkyfgVo0axwhHxkgAAIgAAIg4EgEFv65lMZOnEwtmjaRMVfV3Fav+5v+3b6TRg4bLN3Glvnma3UrwrFDpy60/+Ah6dI1R/bsuvsDh42QwtniBXMpjXh2Klggn7zHz2N79x+kbTt20cHDRygwMFDmTxw3mpo0DA+5oGsEJ9Em8DkIrxs3baHe/cOf+QsXcie3nPzMn5IWL10m5/8p4dVQ8OVKMSm8Wrv/aC96DDew+9w96Xa4WcXs1LVefl3vxgRYNmQ5J/bAOL4rGxqMbFeccmdKrqtj6sSU8Kr2F1n8zJVRv53LIgxdKbFX2bX+xzHdFvuxu8R4ef+UjTg48X7h751Kyf1hU/uzhvuOh8Xe7ECxN8vJcM+X+3UXboA71MwjQ8lZy9UwGxSxIc/GYVX0QtTxGLr9cVLGl103qJIwpgt3+1x94C6qWiwjDW9TlIuQMkBhF8PL+5eX+7zyhvjFbqK5PIdV2yDi3SKBQGwTMBReheAQs8lRXQ0zReU+2NCtsBBadfeUG2Euw/napOr/s+OANhvnNiDw7tWtaLsa5jYim5SrYXadoE3s1oLdJFy9pe8iztDVA9dRroTZzS27jeUy5hK7qlAuGdiNrErKna2hWwZ2/8Aubz+VlCsLY65lDV0N3/MPlPOb9PflCM0KC0p575yXv7y3bG+4q2HlplhVUG0qV8MqX3tkd8XseplZet/Xd+WhLfepc2PtKFfDF7z9w/acuyf7YE6GbjFMuRo25HT97nPZxpLdH93AcHs8dnajwaz4fMyqi58art59dmEybeOVsPMfeGpvsltjbvP09YjugD1uBsh72s+Iti6f3xeuVvgzbJj4M8YuPvjziAQCIGC/BKzlxlW5ENa6Fda2zfnsQtiUe2FVVrXDR5UXnaM5V8PsNnfqrPkR+gkQbuHZ5XD5qjUj3NOOpXvfgbKc1+37euXu+7+Q+VoXwjXqNpJ5WrfEL4LeSTfD2nLc/r4j4a7r2PXvoBHh7u78Hj/T60M7jmevQ8KUi+KCxb4Oe/4m1GjZrbsOyOhlFIwAAEAASURBVDH0/G1IGNdRbXA/PN9lazbIPO6Lr+s0ahH2VLiTUuW4TvO2HeQ95Wr4mk+4+zN2h6zKqePMeYtlWZ6PysNR37W3odsh+/2fAiMDARAAARAAAfsmMGxk+DMTP8OY+jF0F2w4I3YHa6quyu/SvbdhNXkdGhoaJgReWZ/7EV5LjJZDZtQJxKSrYXZRrU1/zF8k1/bCRX13rIZuqIXFtCzn5f0xrNR7EdepZOmKFrkaNuyXx8CfvRZtf9AOx+LzyLoatnb/Fg/UTgpy2DHlbtjYkJQL4tpDdsu9Mt5LU3t2rcZbtvelXA2rUGeqn+2nw0OPHbwUHp5N5Vty5P3V3/48I8e08Vj4vrSp/VnDfce7T8L3Zmf8c/WTXVnL1TDvFfKPsZBnUzdckfM4e+OJHM8ZcWTO/3yYF2fy3i7nDVn6cT9bO3jeh+T75kLtacvjHARsScDwb34Ir1amrcRTY/FbWWhVP9putcKsoWirLYdz6xII8dsWZfGV60YlWUN4vfP4lfxSEW9OySPH8dSmRTs8w4TVZdgzsYGqUsDLYFm2z4LTKkuKb/zlxF902sRfZpzPwqw2sUDLohz3z8nUFzvfUyKpEk/5C5a/aLldFSOWy/G4lJ9/jkXKScU60D4IcByDXvNPyfpKeOUHCBYlOVaANrHoyP1cMRCxtWW055a2o4RXfsjhpK6ZifYBIrrCq4qvGyzEVxbVeS7G4uhq56A9V2vdafqxMG5DpZeCr3pINBSLuUzXOSdkXzeNCKuqjYnrPGSZ/Rfvqyx5VPFjVx34+AeHXgFcgAAI2AUBawlhLKh+Kjbrp0RXHotqw5KylozdnPDaqWsvuZHBMV61bW3esVfm9x8ySi9fW4bP5y1eLsv1+m2oXrmhoyfIfK2gyoIo/zx6Fqgru33voQjluF0WTrluq/Y/ySOPU9v32n+2hfXoNyjs2Bn9uLUcL5Y3Zlg41pZX50tWrpP3p/+xUHf/yYs3YY1btpP5Snjl8hznltvq0rNf2OGT58I4TqsSmjlfCa8sHvO8OE8rKt+6/0SOnfPNicZqbM56NPwjzC7+U8AgQAAEQAAEQOAzJPDftethu/bsi/Dz0y/d5HPKoiV/hWnjc3JcVhZKhZWqbrZHj5+IUJ/bLF3xW9kGn1+85CHLb966LazPb4PDDIU4Jd5y7HUk6xL4HIRXFt35J/D1x1ig/LnjZ2IWX1UyFWPV2sKnLYRXEbokbPvO3WH3738UCH19b4Xt3rs/jF9A4BQqYqTu238wjGPXfm5p9MqLco/rjGe48Gds/HsvhBte/CHivnLiGK+Ge5vG6nGeKeFVxYvtPPO4nmD4+HlQ2LBl58P+2uMlm2RxmPc9F2y/rhffVe2bsiELJ1P7s4bC6/sPe7O8P+snRFiVhJVv2O9ib5j3md+Jc07cN8/TEgMLFZfVWIxX3jPldgz3EXmuaj+Y++KkDI2Yj0o8HiXesgGSNrFhDLfNcXCRQMAeCBj+zR+HBxWTZriOGuNVy1DrWthcrFZ2T7xp50Gzroe17eLcugTCgv0p5O6WKDWaIHNdipPINdJ1o+tqWHXIPu+vCDcQnNjvP/v5V2npHi9auP2GdDtRs+SXlDhBPFp/9BZ533tJg1sVpholwmOQBL19R9UG7JLV6pfJStWEK4ciIk7n/YDX1H7KEXod9I5qffUl5RRuLzxuPiWOZ8CuLOZ3LyNdyZpyZcENGsZ45bwTIs5rXxHnlcdarUQmGYtAfPHSPf831Fv4+W8gxsBJfPlSo5H75XnpAmll7FCue94rQOb1bepO9b7JonM3wZlNymeTMU2977+kVftvSjcTfwtXFXHjmne9y3WV24pPtaNcgawdVJEypk4q4j+EUfe5J+mSz1P6pW4+alkpBzdhMsarpa6GeTzKRQazEGKpXIs/un4jYiamkn186tfY1Zdo+2k/GbOh9leZKTj0nbzmz0Cbqjmp03f68SjEQyb1nHeaapTMRINbFjHZPLs3aT3hkLzPcS04rsI+sYa8NoVzpqKZnb+2iLnJDnADBEDApgSs5WqYB6lcCres1JpaVmwV6XGvOrhSxoCNan1jHZpzNXzmzBn68ccfZbU2bdpQtmzZhGtfb+FOd7XMW716NRUoUMBYszKP46VyXFOO81qmTFkqVqyoiON6RbgAPiDvp06dmpSr4SFDhtCmTZuo5FdfUaWKFcnPz49WrVoVoZzMEL9mz55NHOOV04yZM2UdeSF+nRWxxH744Qfi9tm9byYR15XdBbPb4QYNG9LIESNUUb3jrVu3qW7dOtLNcBPhhi91qlS0c+dOOX4uqFwN8zm7yuO5eXp68qVM7J64VatWtHDhAp2rYb6hXDPzeGrXrk0JEiSQY7l7965wrzyYmjdvFt4AfkcgYE1XwxEaRwYIgAAIgAAIgABNnDqN5s5fRDu2bKQ8uXPpiPzcpTvt2buPOv/ckfr26q7LN3ZiLI7nqdNnqUXb9uSa2pXatm5Bmb/MRPsOHBJuh3dS08YNacKYkcaaQl40CHwOrob7DhhC6zf+I2Oyflu5Et2560d/rVgpZ82fldPHDshzUzFere3qN7IxXi3pf+nylTRi9DhiV8r/rAv/e6ZMpWr04MEDmj5loohzXIv2i38LHf7XhVxcXOjCqaMUL148Oe/P4Zcpd8Nq7Ox2eJqII8rxRYvncpX7lot2eoowcoG0ZWRVSunycS9W1dEeTbka5jJz/71OK/b5kJtwWcyujIW+KPt5KkLAjfuhOJVzTy+bUu54y7mno7IF0tHLN6Ei7NwNuU+4akAF4frYxWQoOENXw9wgh2nrv+isbm82dbJEcl+PXRg3F26Xf9W4Xe417xSd9vQnjsPKP2o/WQ5M82v+dk9atseb5vcoQ/mzfKG5Q3TBO4C6zjkp83jPMbcIfxfw4q3Yo7xLPNeWlXPQL3XCXbrzXIVYTNtHVdPbWxQCM/2x9TpxyLc6X2eh9CnDY7xuPn5HtjtW8Cr/gZde57gAgRgmYOhqGMKrjRaABVX+0SaO6ZrXLRux4MpJWLrKI/8yJ9DqCuHE6gTeB96h0AfhMU0tbTx+hqoU1yWLpcX1yinhql01N+oo/OarpL5EFogvqXyaLykVVHznmOqqqDz+c/w2Tfn7CpUVsTXH/1hC7x5/WS/b6y02tH3kFzHfZH/3P9XMLYRU/bita0TQ+D93hX9hdxD324sg5Zz4i27Rzhsy3qfMEL9YhO0iBMYvPjxYbD15R4iMl8lQUOTyHHtg1IqLNE6MjeN/qrT/4gNad9hXCrmcxw8YdcWXpmEQdI7LOn6thy5ma0ERZ+A7Ebd10rrLMnYr1+HkI4TWiSJPidCcxw8jfRq7k2uKRHxpUbKknVmb/6M1B31p3WARe0AIjpyEdSm1n3xEPizM+fUbKiTixvJ4WJTeOupb+kLE4jXFST0A/VQrD33/rZtsjwVvjqOghFfOPC8eUrqJhxQWrJf3ryDjN8jCZn7xZ2DO1mu0VoxXJX5AaVkpp26NVT4flZCvHtq09wzP/7v9TAgu53Vrw/d53kNbF9FxMayDaxAAAfsgYE3hVcVn5ZlFVjxVoivX3TzsXz5YJc2aNYvmz58vRc6CBQtGaNPD4zKNHj1KJz5yAY7tOmTIYMqdO3eE8oYZ9+7dp4mTJtK+veHPDSw+soA5Y8YMev78Oe3evVtWefXqFQ0W4qsqxyLmoEGDaPnyZTK+qhJoVfsc27VRo0bykgXihAkTqlvyuGfPXpowYbysyxncHoup3bp2jVBWW3Hfvn1yfAEB4S8ulStXjqpXr05Dhw6NEB9WuEWjGzdu0Pnz5ylZsmTEZTn+a+/evWns2LFUp04dXdMs+i5fvoIuXDgv8/LkyUONGzemli1b6srgJCIBCK8RmSAHBEAABEAABKxJQFgQ0h/zF9LOrf9Q7lzhf2Nz+zNmz6VpM2fT1InjqEG9j880xvo2JrxyuR279tDIsROk4MTXLDK1bNaE+vTsZvZ5jMsiRZ7AzZs3acGCBZQzZ87IVxY1fHx8qGrVqvLHVAM+N33p21p16dfOnahX9191xeYtXEwTJk+lTX+vpkLuH/+maNS8Dd0Qz+0eZ0/Isi9fvqK+AwbTLvGszok/EyOHDqI/ly4Xn5OHOuF14JARxPGHz544TKlEDFhT/XIbHOO1tIhNvGLJQr6MVJo2c474rP8R4fMfnf4PHz1G7Tp0olYtmtHo4UPkeP73aw85541rV1KRwoXI84YXNW7RRoqzURl3pCZp5cJskFJj0C5pXMHGFGyMkj19MmFo8V7u/60We6bPhDhomNqJvdOOYg/1U2nUyovSMGX3uOqUOKG+IC3sSmnxLi/ae/4+CUtO2VQm1yTUWYiQLMSq9FzsEU7feIV2n7uvsqRRDBuwFMiaUuZFZt+RK3CfvDer9lJ5v7BV5ZzUpoqbNLRRHZ294U9jhGHH42dB0thidpdv1C2948IdnrR0tzcZ7mmrQmwkM2X9Fd1eMOfzXFuKPhuUDjfCeRv6nqr23ykF3jHti6uquiOPmfepFSu+wcZB3RsUoMJiTxIJBOyBgEMLrxldU1D61PpBqWMbujEBVjsmCK5aGrFzzpav7/xP0fs3D80OIG6S9BTPtVSULF3NNhyFm0t2e9GiHTeIv4z4rSNTid8eih8vDiVPksBUEZnPDxsJE8TVC1LON0LEwwY/ZKROnkjvy9dsYxbcfB0cSsLFBSX7xLi4b7ZaTSEETHOJv6BF/IFoj9Na7Zgba0ze4/VjK+t4ceNStvQuEdY3OmN5+PQNPRA/ab5ITF+6Jo1OU6gLAiAQQwSsKbzykKMivmpF18gKttbCFBwcLDdDMmRIT4kSWf6Sjuqf67948YLSpElDceLEUdkRjlyOBVlXV1ezb34ra1y2xO3Xr1+EdlQGC7psecv9RiY9efJEzjN5cuPPyGvWrBHftXGl+Kt9Q71r127Sopfv58+fP0KXbCnLgq2pdiNUcPIMCK9O/gHA9EEABEAABGKVgHCLSvHjx4/2GFhsCwoOorSRfB6LdsdO2ABbvbIAG5XEomuOHDmiUjXSdYKCgumZeOZPm8b8M39kGg4JCSEWPS1JX2bKRHnzfFoEtKQtY2WM/dsxzBNxjs3+vWOsXXvJazLmAD0MeKMbTpWiGemcl79OcGUr0MblshMbT/jcf0H5hdj5Tb60uvLWOOG9z3if2LvlPVR/YfzB+6OJhGdDayT2uCdClAlDj0Ti71rTLYaIPVf+u5f3l6OT2HMg7yOyYYlLYv3/jy/7PqXOM09Qtwb5qWn57Ca7eRP8TnpKzJg6CSWIH9dkOdwAgdggYBfCa+a0KSlNSherzd//eSDdefRMWM+5UnKXxFZr19oNKUtXbpctX5Hsi8D7wNv0PtCXwoIeUVjIKzm4OAmSUZzE6YSFa3bxE/4WTmyOmsWuq7ef09C/zlOWtC60vF95PfcLsTk29A0CIAACIGDfBKwtvPJstUIqX7OYWihbIeEavRBfysQC7aqDK8hDHFUa2268XhmV70zHp8+e0Q3PG9Ki1MfHm3bs2EGZxMZJTKeBAwfR1q1b6Ntvq1HNmjVIxNGhw4cPy7z06dNLF8UszCJFjwCE1+jxQ20QAAEQAAEQAAHnIMCCqzUTi7CfW3ryxJ9Klatk0bDbtWlFwwYPsKgsCukTUK6GywuDFraanL/Nk5IIy1T2RKcEVxb4kGxPgN0us/vlhT3LUN7M+u6Kbd87egAB6xCIdeH1ktc9aTnnljmNMLM3b8VmyZSD34aS190nwhf6e3J3y2hViy5L+kcZEIhJAsolLLuBmPpzKWIXvEggAAIgAAIgYAkBWwivql9DAVblGx4LCUG2ZcXWTi+6MhdtbFeOC9u0aVNDXDFy/fTpUxHHdYyM1artkN0wT5kymdKmte4b3do+nOkcwqszrTbmCgIgAAIgAAIgEFUCAwcO/GRVJaZaItJy2IzPLbFF6fGTpywadqaMGcktZ8xY91o0oM+oELv6/VO4rx3xfVGqUiQjtZ10mHwfvKK1gyoJ98MQXGNyKfsuPEMn/ntMBybVtKrHxZicA/oCgVgXXl8EBpHvPX8KE2vBwmv8aJiF8xv5b4JDiA3dswtr1xR2bO2Kjx4IWIMAx9cU3iVkzAFDtwzWaB9tgAAIgAAIOC4BWwqvTI0tWz1ueYjjJT3rVhZb3bMXjmAJ67ikLZvZ7du36dGjR5QtWza7EDf9/f3plhgTp5zCNVtKEYMKyXoEILxajyVaAgEQAAEQAAEQcFwCysUwx2mNTuL4sOxuWIm00WkLdR2TwGnPJ9Rr3mlKmzI8hNYF7wCqXiITDWlVxDEnbMezeiTiyHLc2wypIHjb8TJhaJ8gEOvCK4/v1etg8n8RKI8cgzCqKUG8uJQsaSJyTeEij1FtB/VAAARAAARAAARAwNEJ2Fp4dXR+mB8IRIcAhNfo0ENdEAABEAABEAABEAABELA+gRX7hYvbrddlwxULp6cudfPD2tX6mNEiCDgFAbsQXp2CNCYJAiAAAiAAAiAAAnZEAMKrHS0GhuJ0BCC8Ot2SY8IgAAIgAAIgAAIgAAKfCQGO65oiafRDIn4m08UwQQAEbEAAwqsNoKJJEAABEAABEAABELB3AhBe7X2FMD5HJgDh1ZFXF3MDARAAARAAARAAARAAARAAARBwZgIQXp159TF3EAABEAABEAABpyUA4dVplx4TtwMCEF7tYBEwBBAAARAAARAAARAAARAAARAAARCwAQEIrzaAiiZBAARAAARAAARAwN4JQHi19xXC+ByZAIRXR15dzA0EQAAEQAAEQAAEQAAEQAAEQMCZCUB4debVx9xBAARAAARAAASclgCEV6ddekzcDghAeLWDRcAQQAAEQAAEQAAEQAAEQAAEQAAEQMAGBCC82gAqmgQBEAABEAABEAABeycA4dXeVwjjc2QCEF4deXUxNxAAARAAARAAARAAARAAARAAAWcmAOHVmVcfcwcBEAABEAABEHBaAhBenXbpMXE7IADh1Q4WAUMAARAAARAAARAAARAAARAAARAAARsQgPBqA6hoEgRAAARAAARAAATsnQCEV3tfIYzPkQlAeHXk1cXcQAAEQAAEQAAEQAAEQAAEQAAEnJkAhFdnXn3MHQRAAARAAARAwGkJQHh12qXHxO2AAIRXO1gEDAEEQAAEQAAEQAAEQAAEQAAEQAAEbEAAwqsNoKJJEAABEAABEAABELB3AhBe7X2FMD5HJgDh1ZFXF3MDARAAARAAARAAARAAARAAARBwZgIQXp159TF3EAABEAABEAABpyUA4dVplx4TtwMCEF7tYBEwBBAAARAAARAAARAAARAAARAAARCwAQEIrzaAiiZBAARAAARAAARAwN4JQHi19xXC+ByZQPDbYHJNltiRp4i5gQAIgAAIgAAIgAAIgAAIgAAIgIBTErC68MoNIoEACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACIAACICAsxHI5PqFbspxwkTSXeEEBEAABEAABEAABEDAIQnA4tUhlxWT+kwIwOL1M1koDBMEQAAEQAAEQAAEQAAEQAAEQAAEIknA6havkewfxUEABEAABEAABEAABGKBAITXWICOLkHgAwHEeMVHAQRAAARAAARAAARAAARAAARAAAQckwCEV8dcV8wKBEAABEAABEAABMwSgPBqFg9ugoBNCUB4tSleNA4CIAACIAACIAACIAACIAACIAACsUYAwmusoUfHIAACIAACIAACIBB7BCC8xh579AwCEF7xGQABEAABEAABEAABEAABEAABEAABxyQA4dUx1xWzAgEQAAEQAAEQAAGzBCC8msWDmyBgUwIQXm2KF42DAAiAAAiAAAiAAAiAAAiAAAiAQKwRgPAaa+jRMQiAAAiAAAiAAAjEHgEIr7HHHj2DAIRXfAZAAARAAARAAARAAARAAARAAARAwDEJQHh1zHXFrEAABEAABEAABEDALAEIr2bx4CYI2JQAhFeb4kXjIAACIAACIAACIAACIAACIAACIBBrBCC8xhp6dAwCIAACIAACIAACsUcAwmvssUfPIADhFZ8BEAABEAABEAABEAABEAABEAABEHBMAhBeHXNdMSsQAAEQAAEQAAEQMEsAwqtZPLgJAjYlAOHVpnjROAiAAAiAAAiAAAiAAAiAAAiAAAjEGgEIr7GGHh2DAAiAAAiAAAiAQOwRgPAae+zRMwhAeMVnAARAAARAAARAAARAAARAAARAAAQckwCEV8dcV8wKBEAABEAABEAABMwSgPBqFg9ugoBNCUB4tSleNA4CIAACIAACIAACIAACIAACIAACsUbALoTXV6+Dyf9FIPEx5N37KMNIEC8uJUuaiFxTuMhjlBtCRRAAARAAARAAARBwcAIQXh18gTE9uyYA4dWulweDAwEQAAEQAAEQAAEQAAEQAAEQAIEoE4h14fVFYBD53vOnMDEFl8QJKb4QT6OaQt+/p8A3bymOaCB7JldK4ZI4qk2hHgiAAAiAAAiAAAg4NAEIrw69vJicnROA8GrnC4ThgQAIgAAIgAAIgAAIgAAIgAAIgEAUCcS68HrJ654UW90yp6FECeJHcRofq70NeUdedx9TqLCcdXfLSHHjsAyLBAIgAAIgAAIgAAIgoCVga+H1sq8HedzyoMu+l8hDnKtUKHshcs9emApl42MhlY0jCDgVAQivTrXcmCwIgAAIgAAIgAAIgAAIgAAIgIATEYh14fXCDT/Kki4luX7hYjXsAS9e0+2HT8lNWL0mh9Wr1bg6W0Pnz5+nU6dO0Y0bN+jJkydy+mnSpKHcuXNTqVKlqFixYs6GBPMFARAAARBwIAK2El5ZcF11cIWe2GoKW8tKrallxVambiMfBByWAIRXh11aTAwEQAAEQAAEQAAEQMBJCDwPfEtfuCR0ktlimiAAApEhYBfCa54saSmpcDNsrfQmOISu335EGV1TUPrUya3VrNXbue59S9dmXrdsunOcxC4BX19fWrVqFV2/ft3sQPLmzUstW7ak7Nmzmy2HmyAAAiAAAiBgjwRsIbyy6Dpw6W+66bJ1a8uKrfUsW5Ul7KoDK3TlIMDqUODESQhAeHWShcY0QQAEQAAEQAAEQAAEHI7AP8du08zN/9HbkPeUJ3MK+q15IcqdKYXDzRMTAgEQiDoBhxZeMwjRNYMQX+0pbdp5kPjHVKpfoyLxD1LsEGAr1+nTp0eq8+7du8P61QSx/Rcf0Hlvf/q5Vh5KliSBiVKOmc0PYcGh76h5hRyxMsGXb0LoeWAIZUqdhOLGjRMrY0CnIAAC9k3A2sLrqoMrSYmplgqp2jpj243XE2hjil5wcDDdv3+fMmbMSIkSJYp2t2vWrKW3b4Opbdu20W4LDTguAQivjru2mBkIgAAIgAAIgAAImCKwbccuOnHqNPXp0Y1SpLCtsZDvrVu0Z+8Buut3j+rXq03FihSmmOzfFIPPPd/z7gvq8PtRYemagHJkSE4XvAOk+LqoZ9nPfWoxOv5378PofsBrSp08ESVNZD785KoDNyl5kvhU5+ssMTpGdAYC0SEA4TU69CJR11BwzZcru17ta16+etcQYPVwxMgFW7oOHz48Sn1xPVi+RkTXavwhuvM4kCZ2LEGl86eLWMCBc9pOOkyBQaG0YUhlq80yJPQ9/Sge7nwfvBJeAuLRzjHVI7R9xvMJTd1wVXJXN2uUzER9GrtT4oTxVFaE490nr6nluPCXQqoWy0jD2xSNUAYZIAACjkXA2sJrvRG1JSBLRVdF0xbi6549e2jw4CH0+nWg9GJRsGBB1Z3u6OfnR4MGDaZz587q8ooWLUZjxoymLFmi/gddkyZNZIiCAwcO6NrFCQgYEoDwakgE1yAAAiAAAiBgPQKDho6kVWvXyQZ3bv2Hcudyi1Tjb9++pToNm5GXtze5uLiQx9kTkaqPwiBgikDVmnXppth/XDR3NlWuVMFUsWjns9haq14jCgwMJNfUrjRi6ED6rmZ1iqn+oz2BGGrg8fMgunLrGaVLmYQKZP3CZK8+D17SZd9ndMPvOZ329Cc/sYfWpmpO6vRdXuo656QUX7eOrOqwboffC5H0vzvPpdicOY3pcJEHLj2gcWsu0eugd7SgRxnKlyUi04CXwTRx3WU6euWRjnf2DMmoR4MCVCK3qy5PnTx99ZbqDdtL9ctkpZ9q5qY6Q/eqW0aPFQqlpzHtixu9h0wQiEkCEF5jgLZWdGXBlUVVU66FtWUhvsbA4mi6GDdu3CfdC2uK652y2+EBAwbo5eGCyOPmU/nFXL90FkqUwLTo54isbCG8Lt51g/7c6SVxGRNeT11/Qr3nn5aibMtKOSllsoR07OojOn71MRUXDy/T/1fKKOr3YWHUY+4pOu8VIO9XKZqBRrRFDGOjsJAJAg5EwJrCqxJPIyu6KpyqPrsmHiMsX6Oanj17RuPHj6dt27bpmli5ciW5u7vrrvnk8ePHVLduPSnMtmnThtzc3MjT01OKtEmTutCuXTvFG+hR85oC4VUPNS5MEIDwagIMskEABEAABEAgmgSOnThJbdp31LWyY8tGypM7l+7akpPps/6g6bPmyKIQXi0hZvsyN2/epL17zQsuxkaRI0e4F7KqVasaux3jeWfPnaeLHpepVfNmlDhx9L3tmJrAuvUbqf+godSre1f6pVNH4QktriwaU/2bGpc95e86d49GrbioG1KLSjmoS918FPruvRRZL996Sj73X9Gp64+lRzldwQ8nWdO5UMOyWWnJLi968/Yd7R1fw7CIw1y/eB1CtYfsIVOi5nNxf/rGK7T73H3dnOcL4TW/gfD6OjiUWow9SCymspFIkRyp6ZEQv9ceuinF2sk/laSv86XVtcEnRy4/pAGLz9GwNkWobIH0NHeb8dCAj569EWUfEYxJ9PDhIhYJQHi1MXytkNq/Szs9wdVUjFfOnzB7qRwZxFcbL9CH5qPiYthwZHA5bEjEua+tLbzy23XtJh2hUvnSED/w3H70KoLF629/npVvjK0bVIkyCBfDnFhU5bfvLvk8pY3DqlCaFBEf7LecvEMT116mtt+60bI93gTh1bk/u5i98xCwlvCqjeu6edi/UQaoLGaj6nL41atXVKdOHQoICKDGjRtT/PgJaM2a1WRMeF22bBlNmjSJJkyYSLVq1dSNec6cOTR37lyaMmUKVatWTZcfmRMIr5Gh5bxlIbw679pj5iAAAiAAArYj8CYoiGrWbUivA99QtaqVafW6vymywqvnDS/ZRoVyZenZ8+fk7XMTFq+2WzKLWmbBNSqiq7bxn376iZQIq8131PPfZ86mmbPn0pYN66hggXyOOs1ozYuFxLfCs1yzCtnpjLBivXr7mQh9k5IuCwtYCvvYdDLh4raUEANzCqtM92ypZJk5W6/RhiO3dYV6Ny5IDYRFpqMmc8Ire/tjD3osptb9JgvFjxeHNh69TcaEV7aIHbL0PLWukpP+VzuvDhdb0/487ZjR/cjZW67RauFqWLvXqauoOZm28SqtP3KLpnUuRSVyRbSc1RTFKQjECAEIrzbErERXQytXFlb5nta9sGEZHtbEOX/JMhBfbbhIH5qeN28eHT9+PFodlS5dmjp16hTpNlgY43ig/AYPu6twy5ScOtTITX7+r2nHGT+a/NNXUixjv/f8hk898SX26k2ovMdufA9PqSX7fPIiOLydKw+lj/zy7umpevEvqVTeNHpj4tifaw760hFRzvveSynkdamTj5bu8aLXwe9oUseSuvJ3/8/eWYBXcXRh+FBcSoHgDS7FgkMhuLs7xbW4u7tT3LW4Fynu7hYoDsGluFPk/883l1n23tzIDTckJGeeh+zu7OzIu2l3Mt+ccx6+omnrL6pJx6u379lFRDQqld2V641vlNnlcY9m8+6u5lzH1hN36OC5fylNomg0tEEWmrPlMuGjOqH5r1YxXhH7defpu3Tw/L/KnQf6WJb99CeKE8WoF2IgPpj9amekRLG/5MMFyIjlZ6hekeSUP31cVR5xARZsv0roC2ItgCHGX6dQMgobxrKrz6jYm5P2Uw/T2/efaDz3NbQpJqrmnoMnWZgUwL3GKn5fG4/dpvM3nlGCWJHVpAvjh4WpTvaE1038DOISZEgag9pVSKOL+nrE70jLiYeUBfHSHvmo97yTdoXXZXs8OeZBWCqe9WerOkeuOEuruc9zO+ampPGsY4jAxUeNobvUe5jUMgeV7LnV7kTHqkK5EAJCIFgQcJbwqq1VHbF2xTNuidz4/59uBssec7uSh6cHOVKP8TCfQHCtWLEi9e/fn/LmzUvjxo2nGTOm2xVet2/fTtc55hGsXcOG/RKDfMOGjdSlS2flgrga70L3Kb148YLmzZtP27dvU9ayuXPnprZt21L37t29uBq+fv0GjR8/jk6dOkUvXrxUFrgVKpSnUqUs7pl37NhBEydOpHr16inx2Nzu2HHjaM/u3TRmzBhydXUlWPUiHv2hQ4fo1q1bBPfIVatW8fKcuQ6cf/r0iRYvXkxr166js2fPUKJEifnZDNS+fXuKHj26VfG9e/fS6tWrac+evRQjRnTmWon7nJZGjhxJrVu3Vnz1Ax68Yx9l9+/fxxbErylPnjwsZpcgd3d3XUSOdgiI8GoHimQJASEgBISAEPhKAiP/GEeTpk6nCWNG0T/nzqtzR4RXzJeq1apHsArctXUjtWrX0a7w+oTnYyNGjaV9Bw/SzZu3KEvmTPRb9apUvmzprxyBPG6PgBZeBw8ebO+2r3mYn8Pi1Ser15u3blPTFq2pBs+r/3v/nhYuXkoP2EtOgXx5qW/PbvTw0SMaMmI07dq9h8OSuLLb3mIqTmvo0F/WnBBTdeQf4+n4Scz5X1B6t3RUtVIFKlfGMudHR8exGLpx8xZaPG+OivFqbjcUr0UtX/kXneb5ddo0aahlsyZUrIhjlrqPHz+hWvUbEeqFm2H0NXKkSNS7R1fKkT1bgLfv68sIIgWevmKRkF3WZkwWQ60DYp1vNK+dIbkliU5JeH0yxc9RKS0LsSni2/eE1Gz8AWUZa89K094wp7Kl5gFeM+1a1Y1msUc7eKdrWS4VVctrscrG+umOU5a10oS8DuqeOhZVyZPYaj0V9WJNddX+68prXXyXiGqNEeuViJWKpNdp27DrXqz9Yr0Z402XOLpau01msy54nkXP9Udu0U6uN2L40OSeJjaVz5nQWKNduttTrTdjDRoJa66wZO3C40CC4FqXw611q+6mwsxN23BRGXXYE15X7rtOf3B4tIH1MlE+N8uaLup4z1bGBTtvUuyxNmlO9UbtVQYoPoVyw9pt1UG7KFPyGDSu2a/mx+VcCAQaARFeAxB9g/b9Ve2zRve2akULqsiE4KoFWJx3bl7HKGu2fLW1ljUKyYlTCHTs2FEtkn5NZTFjxlQLko7Wod3HwnVs1hQx6d6TN0o8hKCHj9qS7vkovkskun7/JdUavoeis7iHj1pK16gU66cISuB8yy4tWkw8qJ5LxTEJYNUI9wpIY3mnT+bPO30+fPwfdZ5xRAm8saJFIDeeQHhwjAKIqkjhw4SmNf0skzr0o8rAnSo/W0oXCseugrX//Z4101OxLBZxb+1BtpZk3/y6X/jIpedJSqPiKWnYUg9ad+gWmeMcbOCP+eDFHqre3OliEwRjCJh4Hv7/40S3WGr+yZaX0/ljjeD0GKtO2p1upyrplAiN/MGLT9OGI7fVBxoxGU6xi2PU6Yjl5vjV59i1haeacGHipZPK43t9WQAulDEe6R1U4JcxaXQ6f/O5ek+Y7MzvkpfCfp502wqv207cpb7zT1J6fgbitm+B43X7OGICNISZNSv9C9UskJQaj91vV3g1P6PPEa+i4eh96vIvtnj9wSQqI3PgolO06egdmtwqByWLF5WKdt/sEDfdjhyFgBD4/gg4S3jVgqlfLVW1UAtiZgtZbTnrX3fDHz9+VAsc0aJFUy/DJ+HV3ttCLK/WrdsoAXHVqlXK/bC9csjDolybNm1p166dLEzGoGzZstGRI0fo7dt36hG4DdMxXu/cuUvFeWEGCZu0woULr57D9aBBg9nlcWk1BylYsCBl5XpmzZyJWyq950WfPHnyKvHz77//pg8fPlDJkiXp/v37VKxYMdU2RGRcI6YtBFjvElwww/o3Tpw4lCVLFhZf/2Hx2VOJuRBOtQB9kBcQmzRpoqpBf5GwOQ1CLcrrPiMf7plh4YuEvkcIz/MPFm2RYDks4qtCYfeHCK92sUimEBACQkAICAF/E7h85SoVLVWO8uXNQ7OnTWIBzCLCOiK8LmPRq0v3XtSlY3tq2qg+la9Sw4vwivlZviIl6d69e1SqRDEVP3Pztu3qekDfXkqA9fcg5EG7BL6F8Kp/f+LGjav+psifNzdduHhZxfnNxnPnf86fp3h8L03qVLRv/0F69PiRcuMLcRTp9p07lKegZc6f2z0nhed58bYdO9W9UcMGU4VyZdR51559aOnylXTswG61+dHcLn6nChXIT9dv3FTt4oEVSxZQpgzp1bN++fHs+XPqP2gonTzloWLJQjiOFu0nql+nFqVLm4YCun2/9DGolKk5bDfdfPCKCmSIq+K8Pnj6VnWtJltjQniM99mTnHf9rTRwBz1/9Z62DCnqXRGr/P4LTipXvFj/fPfhI6+dxqCS2VxV+3rNEGvD2VLGpOvcL897L5WxzPCGWQ0DEb2mijVIGJzc4HVjCLioc3mvAhSOjU/0Oi3WLT+wRS/WOE9eeazWklH/yl4FKXKEMKpvWGtuMm6/cvMLD3vvuTzCkKG+WR1yq/Vl9A1iLgRf5KNcwlhRqA57zUOCMcxLtnr9KVJYde2T8Hqf15or81oz1pkH189CEcKFVs8oS1W2WG1dPrUSm1Um/4DxEAxESmT7mbpX9/6/g6FLPOjvw7cIoi2Ec0lCICgQEOE1gN6Ctna1tVY1i6lmQVaLtLYCq67HVpQNoG6H2GphYeKMNGfOHIequXqX3ceO3Ev4GM7mD5r+SOmPJCqzFV6RZ/shGfvXP7R8z3WrnVKwVm34h0VwWz+giPpIw7J2FO/gguCJQOM/hApFbEypxLfNx+6oD6gWXvExX8uiafV8SZQPf7TryR/k2iz+5uBdV9oyVvcVQvE4thY1u7K1FV7vPWYxd9BO1c4ctr7Uu7F0XAVzHFK/Cq+ID1Cs+xZKy1a2k1g8xJhgITqCxWBMVEY34YXgzx9yjMG7dI5dijQZe4Aq50lE2BWm0+/jDqgJGCZSsITtMP0IRYscTn3wdb0QVDERmdI6p+oHnjULr9qVBkTpYQ2yqh1kun7fjphkVObJnMuPEWhup9xK2PVNeD3j+YTF9Yd0/8lb2sGWxZEjhFUCfUrerWdOp64+Vpa0pX91VTvV3rDFswivZkJyLgSCNwFnCa/aRbBZRPWOnFl0tSfUOlKXd23ofL8Ir4j1unz5Cl5cea6sO3Hdo0cPJYbqeuwd16xdSz25XPbsv9LUqVModOjQSoxt164dwXoVYqwWXiFqrlixkurWrWPssr9y5QpVqFCBYCUL98ZILVq2VJatEFKxmQtJi6AQhBs1akiHDx/mYyNq2LAhC79tVJknT55Q165dKW7ceNSvX1+VZ/sDovLvzZpRDLZsHThwIMeTiqCKdO7chTZu3EDz58+n9OnTqzG4u+dSsW91Hgqe54WmqlUtFsBm4RUiMKxuZ7JYDPEZCfG3ypUrp85hlRsxomVDlcqQHwYBEV4NFHIiBISAEBACQsApBOo0aEJ79x+gHZv+5g1jCR0WXiFY5S5QlGLHisXuiVeqTWn2hFcdQ/b3Jg2pc/u2qu+wMmzbsQvFjx+Phg7s55TxSCVfCHxL4RUxfVcvX0xJkyQmiOzVatVnEfMUFS1ciCaOHc3z/h/o1u07lLdQMcqYIQOtXDJfdXT5qtW0ZNkKalS/rmGleunyFSpWuryxGQAFvRM+cW/NiiVKHMX5zNl/0qBhI6hWzerUv3cPZDmU/hjHroYnsXXtOo5xnDy58ey3at9oMAifnOQ1sf4c4/Xfz4JrPBYzsS72lI1dkHwSYN+9/0iFu25W1p9zeD3XL0kLrxB6e/2WwTDcgMFExf472OgmIk1t7W5409OGH+0rpaEK7olUE80nHFTe8NYPLKw83iETVqRbeU0S8WmxNmpep8U6ZVQWRCGO9v7zBO32uE/962RSYi+erTmUxWcWb2GMAYtYJL2GiX6iLJJProZVAdMPn4RXFNt28i4b8Hgo4x94TIThDzwyYj22aclfrNZw4Smx0/SjvAbrxuKrq6mVL6faUAmC8Cj2GilJCAQVAiK8BtCb0EKqWVxFU1p4tRVStRWsrfCKZ7yrC/ckOYdAYAmv2I2DXTkIEF440xf3vRgVRFO4zbUVXs0fPj16fCjf8kff1u0C3FjM33aVLTHzKHe92rpxec/8hmUp6njJ4l4J3kGEnUtaeNV12x7RFsrrcvqDbp4I6GdshVf98e5azU25LNblcNSTh50jiiuB06/CK6x9i3TbrPo+5vfsXlzp6jYQ6B07t2zTT5HDqskOBOjfeLcbxqYtQ7HbrdKAHaSFSdtn9bUel3kioIVXxHnoOvOYl91ceBbWvvaSWbzWE60/mmajrLzrDck34XUJB6WfsPq8UTVit1bMlchKFMekC79jcMexuFt+xU+EVwOZnAiBEEEgoIVXiKxnPE/ToLpDFU/fRFcU+tbC69mzZ6lGjRrG+y7ILsh+57ABqVL5HAdpAIuXy5YuVS52zbGiINzChZlZeDUqtzkpU6asEny1QLt582aCB44+fftSJXaZjKTbWb9+vbJMPXz4iBJgIY4OGTLYi4tgmyZ8vdy6datyNTxgwAAllnp6elLZsmWVFWvv3r2tnoer41mzZhkWr3qspUuXocGDB1mVnTx5Ck2ePMkQdK1uyoUi4EzhFYuCj59wLCqbFC5cWIr+2QLc5pZcCgEhIASEgBAIVgTgurV56/bUqsXv1K5VCzU2Ry1e+w0cQnPnL6R5s6ZTLneLq0t7wuuBQ4fpt7oNCTFgRw8fojyTBCuYQXAw31J4LVu6FI0Zafn7BSjgGngMx0udNW0ywQpWp0LFyyhXxB7HDuosu0eUe/78BR3Zv1Pd9074hKXr9MnjjTpusAvr/EVKqN+zOTOmGPl+PXFUeHV2+37tZ1Aoh7XXxmP3KdfCU1rlVOHZFu+65qMAqw1p8qWPQwPrZvbTMLTwCkMUs7tfWJP2nHOCetXMQEWzfFkbRuzU4j22UJHM8aj3bxlVG/B2ePrqE7YWzUy50sZWxie2jet1WrMbY5RB+LOBC08b3vQQeqxc3+121zxt1x2dKbyiHxPXnFcWuOa+V82XWIXeM3sI1CLuAvYwmDB2ZHNx47zPvBO0/eQ9uzFljUJyIgQCgYAIrwEEXQupzrJ4ta0ngLodYqsNLFfDf7AbhZUcx9T2o4sXActUWKjaCq8Q0ZqUSGm8K23xiQy4GTYnuNtF0m5yIZo+evGWNg3y6gaj2uCdameXFlTxHIKb7+IYA5fuPFe7m55zTIA7j97glhFbVn/QR7FlqW08WVvhVQvB09rkpNQJLa4gVWX8Q7v6hVVn0rg/kl+FVzw/5e8LKsYrzhNzsPtfOWZsQXYLnMbUBiYNmgfK6WTu9/ztV2jq3xeNnV469oA5MDuEXsSyRaxZuPTA5APuP5AQzwBuQpAgvOp8XM/rnIcSm2LY6gkU7tkmWNfCmlZP5PK6xVEWyrqc7QRI5+sj4tB+4H+wMN5/7gHH4L2k3IYs6paPXGNGUsVWH7hBI5efpXYV0yhRFpkivGqCchQCIYNAQAuv2gUxXAenS5yeY1wvUGDtWbpq4t9aeP0f77qBNehztnDYv38/zZ49h65evULDhg3nOKXFdbe8HKtVq0bnzp2jkydPsgv3L3GdUDBHjpxsUfrF1TDyzpw5QxA5z5+/QM+ePeN/T5WlKO6dPn0aB3ZT/JYtaLMrd8SIPQ/Xybly5aaUKVPSn3/OVWXevXvH8eR/p+PHj6nrzJmzcJlc7H64BP38syUEgLph5wfqX79+g4ozi/i2iBWLsSIhLm758uVpy5Yt1KFDB2X1izGa08aNG6lz586G8Lpv3z5qxla0KF+3bl1zUcLiGKx/fXN/bPVQCLtwpvC6c/deatCkmReCZisMLzclQwgIASEgBIRAMCHwhuc4ELfe//eedm3bQJE+e9twRHi9cPESlShbUVk1TpkwxiBjT3jFfKxOg6Z05JhlPgY3tPny5qIypUpSAlef52NGxXLiEIFvKbw2b9qYOrZrbfRvyvRZNHzUH7Rq6ULKkN7NyLf3u4HYrBs2b+W/E87TU57zIxYwYgAjXT1vCbnlnfDatHFD6tKhrVE/TpKmcqNfOS7roj9nWeX75cJR4dXZ7fulj0GpzKBFp2nj0dsEg44sKVzoA8ccXbLLk7wTYGE52mPOcfqN3RIjvqpfkhZeNw8uauUJb8bGizR3i+XvMnvrumYjmX3/PFDGHWgP+fBImC99XLUeq0Of6XXaEY2zUo5UsYyuQWCGAUajEimobuHkpEO5oYC9dpG/sncBFebOWcKrdqmMtduObKiSnOPnPmSL3yW7PZWlru36J7wQXrv/gjYOLErs4NBLunibx8Th1eDZcQi7LpYkBIISARFeA+htaMtWVG9rxapFWdyD5at3MV5xX6xdQSHgExY4Eb/saxLioDVlKxlH0pwtl2nmxkte4oqiDriAgP9834RXuMAo02ebahaWmeYEV7uxfgqvXFLAp7/eGbVtWDHl91+XhbVn8Z6brWK86p1QKIMPcCLeWfRTpHAqDiry9owqgYPhwsIsYKob/MNWeB2/huOo8sRldodc6uOqy+GoxVPEeU3FQdodEV7R/9PXHtMWdq2xnych2k1Iqeyu1LlqOrUDDEKjPQtTxKrVYqQOxl49fxLlogO8EO9BW8BCmGw95ZAScHVM3ugcvF636ZPwiji1E1vkMFxmIHA8xmgvYQIUJnQo6jzzqIrVAGviBDG/7OwasfyMEsBH8iQK7du6ELatUwve9YsmpwbFUhDE43L9tikxdnijLIZ7kzec3332cRUPoS4L/BCKdcxd2zrlWggIge+fgLOEVy2w2hNU9T1Ny14Zfe9rY7zqevTRL66GdVl9RHzS5s2bU8aMmQyxU98zH8uXr6BEy1PsdiyUzV+ARYoUUW7JtCXr2rXrWMjsrh5PmzYdwUIWcZbgyhdJC68479OnL61atZJjwO6iy5cvK5fCffv2o4oVK+C2Sljsw31YyO7du0+5BcYNbbVqKWX98/Xr11xXI47reoYiRYpMOXLmIBcXF9rN9SA+rBZeEacWboxbtGjhZU6zZMkSFl0HGcIrXCrD3XH37t2pevXqVg3u3r2bWrLr5E6dOlHt2rWt7smFhYAzhVdPFtJXrVnnBW28OHGpetVKXvIlQwgIASEgBIRAcCIwY9ZcGjx8JFWvUlnFXNVjW7xsOf29YRP179NTuY11z/GrvuXl2LBpC9qxa7dy6ZokcWLjfvc+/ZRwNnv6FIrJc6e0aSxeUTAf27ZjF63fuJl27dlLr169Us8MHzKQKlewhFwwKpGTrybwPQivq1avpQ5dLHP+9G7pKFlSzPmj0ey589T4fRNebQVfPPQthVdnt//VL/0bV7Dl+B3ldhhWl63KpjZatyfA5meh8/iVRyq+K9YT+7PFa4r4PxrPeHfinfCq10qx9po8nnU9Z9joIzt7v2tV7kufbvA6JUK2HWBDC4ipSFhz/KNpduVWWAuvtuu0WqTUwuueM/fVGiCet11PRrvp2A1ww+Iplfc8Zwmv2ihmVZ+CVl750IfWkw+p+LLLeuSnuBxfVxuHFMoUj/rWslj8opw56XVTe+vM5nJyLgQCg4AIrwFIXQusEFc7N69jtARRFrFbteCKGygDq9ZfkiUyyunnxdrVQBJgJydOnKCxY8d+Vf1YfMyUyeL73q8VQbDrwm5obT/sT1hMrT5kpxLGfBNe0VZZFl5/4p1O8zrl8bHpSevO06Id13gXUGbeDRTHKHv88iNqM/mwlavheqP2Kh/72k0xCkPgLNfXIvJqy1jvPugobyu8Io7sgIWn1AcTH05z6jTjKB089y9tHVqUwocNTdr61Lavuo5OVdJR2RwJzFUY57BGxYQG1rnagta46cuJFltnts+lYizUKpRUxRjAY/p9QdBF+4j5iqSD29sTXrezyD129TlazdbLvgWDV5WZfuhYC6YsL6fa9TRE5XnbrlAmFtgxCTQnHcu1vHtCguvjR1y2fL/t5iJ2z+25j7ZbUDKFgBD4Lgk4S3jVLoRh2ardCpuBaPHVJ9EV5XU9NfL/RjXy1TRX4a9zn4RXuM2NHDkKNW7cyKpuuGzNwlYLrq6ubB263uqe+aJnz560Zs0aZSEaJ86X7+nLly/J3d3dytVw5cqV6eLFi1ZuiWFpW6BAAVWlFmhxoYVPiKiwqF24cCGLq3spalTrGN26L7CKxUIUPHdAUN2/f58XC1yU3cULia1ataTyHFe2D7sQRkxaJMSf7dWrlyG8wvIXcWchEM+b9yeFCRNGlUM7sLQ9fPiQIbzevn2brYJLUBWO/dqLeZjTnDlzafToUVaxX8335ZzjJHFc4fgu1p5KhIsQEAJCQAgIASHgOIG+A4bQnwsW+vgg4nb65BIWFrPXOOSCT6lk8WI0YcxIL0UwT9q0ZRu1bNuB55eR6dSR/XbnY14elAw/E/gehNeS5SrR+QsXacv6NUp0xeAw58+eyzLnP7J/J7K8jfHqbOHTUYtXZ7evBvsd/YCBQrEem5W74Rltc3npuRZgF+28Ss9evVf3YbUJT3dwgQtXuL4l74RXWNrC4nZQvcwEi09HEgxIxvGa494zD6hD5bRUPmdCbw1kbIXX249eU/XBu6hqXhabTcKuvfadJbyCMZI9C1btFXJss+yUObkLHeO16ra8Vt2Rx1WOx2Wbzng+oWbjD7LHw7jUr7Zj6/G2dcm1EAgIAiK8BgRVU51aPEWWreUrBFidzIKrWZi1FW11eTk6n8CQIUPowoUL/qr4l19+oW7dujn8LOKJ1hm5V1loYsdR7rRxlIuFGZsuGW5x/SK8auvYoQ3Z5WCa2EY/4CoXoltjdk3syhaTcB3cZMx+grVmh4rp1IQC7myHL/dQIq/ZfYX+GK7uU8iw0tRBzc3lHBFer/GEpA674E3GO8EQNwHudJFOX3tCLThAPHZozWxnmeDoGAfmCcAnnrRCoD18/qESPiG8YuKwfI+ncgUC61Wdxv71D+dfp6ns1tjscljf9+64/sgtGrLYQ8VVgMg7l2MvJP2840yPFW5E4E4E6T+OG9tu6mEVY8FWeIU7YcTdRRkIunB17IiYiUnGG67DNo1mF9Ww6oUo7fJTBErNFsKI2Vqy1xaKHCEszemQW+1y089NYEtjuEjRLqcR6/YQB6i3Ta95ojlgwSlKy7vaavH48J7ixbC4JrYtK9dCQAh8/wScJbxqS1UQWdPnb7tgUCYdC7M+Je1m2DeB1qc6zPd8El4hVMJidNGiRSwypjUe01acVatWYze5PYx82xMIokOHDqUKFSpSv359jdsjRoxgwXKelfAK18NIO3Zsp4ifXd9py1rbWLCfPn2iggULEuYVmJNkzZqVRo78ssC3YcNGJa6if268k14nxIu9ft2TjrHLu7Bhw+ps47hixQruZz9q27YtNWjQQOWb3RZri1fcaMsugrezmFuyZEllyQpXyks5ni2EZqRBgwZTmTKl1UJSzpzuyuIWAq6Odfv48WO20K1IOPokGqvKQvAPEV5D8MuXoQsBISAEhIBTCUDsunnrtpc6l638i7Zu2049u3Vmi9ckRnzOx4+f0MHDR9R1pEiWv3cpPPvlAABAAElEQVT3HzzEVquvvdTRZ8BgunfvHk2dOI7ixI5FsGRc+/cG2r13P9WqUdXK9awWby94HLc7H/NSuWT4mcD3ILy6ZbHEBT60d4fh7lqHg3CJ4eJrjFdnC58BIbzi74cdu/ZQRna5HDeuRSC8fv0GXbpylQrky6M2d35k72672Qo8QQJXSp7Msm7m5xcdyAVt3Q3b6872U3epz58nDRfDPece5xBt98lstGLvOeR5J7zqMGNuSaKz1Wo2ZYyC8jCwwHoe4sHWLpRMea8bvfIsxY4WQXmz++Gz5yW9ftq8TCqqwR789NqlbxavMK6B90Ok2e1z86ZQy/8Psb6IcHAveM26R/X0vJEklGq7SLfNlCBWZFrY1WeRWcdlncYeDbFeaU46HuuAupmsjEYw1gZsAARDJB1+TXuJNK/LmuvSFrK2od3MZeRcCAQmARFevwF9s/jqk/WqWXBFt0R0/QYvx9SEJ+9u7Nu3rynH76d4LnHixH5/wFTy6r0XvEPngBI+dTZ2TeHjtOHIbV9dDeMZ7HCCherrtx+VVSWEQg8WMxFzAGLmtDbuhnWmthjVbeFYLGt8unznBT3mD522ZB28+LRqP1PyGErMvcPxQhGPFsm/wiue1W5vMcZ8bnHpKceN3XL8tuq7djOMcv+yj/+K/XfglHKmiUUZk8YgCL8nLj9Wedri1RwrtXKeRMpN8RUWk2HZG4snI8t75FeTBPWQH35gYlGy51ZVEsIjREydbj18RTWG7FbCddkcCSla5HCESZd27eGd8IrnMZ5aw3ercU5ulYMFiOi6WoeP3sV4XbjjKk1ed0GNuzRb5f7E/Tt66aHa+YbJEWLrRonodUFed0CzlN1imogchUDwJuAs4RWUtFWrf61VnW3tij75JLwePXrUECBr1apFiRIlYte+V2jJksV4lBYvXkxp0qRR5/Z+IF4q4prCKtXdPRd7vMjIcVzPsmXpTlXcLKjCohTCZNZs2Sh/vnwES1EIvkjmciqDf0ycOJEQAgFp3Pjx6hl1wT8grNavX189B/e+8TmuK9wFQ0SGNWt/FlftJSyIQCyFVWxldsMXI3p02rRpk+o/ypuFV7jKw9hgpasTnqtZsybNmDHdEF5xzywglypVSi0yoi+3bt1i98o9qVq1qroKOdoQEOHVBohcCgEhIASEgBBwMoHho8fQlGkzaePaVZQyRXKj9iYt2ihBtlmTRtSpfRsj396JvTieh48co+q16xHEtNq/VSfXn+PT9p272e3wJqpSqQING9TfXlWS9xUEvgfhtVO3XrRi1V8qJmvhAvnVZgBtiR1chNe58xdSv4FD1AaEv5ZZ/p5xz19EbU4YO2o4xzkuQTv4v4WGv7dQ1t8nD+8zPO18xev/Zo96525YdwBWr2P+sni0g0UmvMrN3HSRrt9/RWv7F1JrhLqsvaN3wivK6vBrWIeEFzuIn/CcByHS7AlQi42IaQrDmxdvPtCcLZfUWuOibnmV0Y1fhVe0qz37YZ23SJb4FCNKeGOds1q+xNTS5Ha5PRudHLn4SFnlwjLXbPyCunTySXg9eeUxtZp0SBXFWnSKn6PyWvR/ypMgxlqjQBJqXtri0h1jvXDrGW0YUMTLuu7Riw/ZCOaIWs/uWSODblqOQiBIERDh9Ru9DrgWxj9zgrAKS1dt+Wp2PeyTQGuuQ86dS8A/Lof942LYttewiIR73Iu3n9HPbGGYI3Vsgo9/CJ1Le+RTVofw4f/bsN1Ut0gyasQ+9m0TPkYz2VL2wD9fLBnh2rYF73iCAGdOEBchYEIMxEcuPe+qglvbt+8/KgtNlIUIB/EV4i0SrGTbV0yrYrzC2lILtOsO3WSXwmfIdicVnhm+7IzaabVuQGGOD2sR/GC1Opdj2245fpdu/muJg5KZA9fXZgvLrBy3wJwQ6H3oUg8jZissMUuyoDiC60Xs1jK/WlwNK6tdzgNDnTAJ6VgpHblEDa+z/HzUO7DgagMWt+aEGAhoHxMCJASyh7tfWMl2r+7GwreryocQDlccsHjV6QRPMFrzBAMTmvnshiTqZyb6vl+P3gmveB4Wu+ALN8tIeG85UsVi1xzp6EcfRFeUff3uAxXrvoV8ip+AcpKEgBAIHgScKbyarV4dFV+16Aqq3lnM+of4hAkTaNq0aV6sWnVdHh5naOBAi0tfnYfYrr169aQUKVLoLG+Pd+7cpeEjhivrUBSCiAoBc9y4cfTs2TPlhhj5cD/ck8VXWJEiQcTs0aMHx3idp+Krml0N4z5iu8JiFAkCcbhw1t/wrVu30bBhQ9WzKKPF1NatWnkpi/s6bd++XfUPlqhIcClctGhR6s2uh23jw8Ly9tKlS4R5UZQoUVRZuEHu0KEDDR48mEqXLq2rVaLv/PkL6OTJEyovZcqUVKlSJapRo4ZRRk68EhDh1SsTyRECQkAICAEh4EwCI0aPpcnTZtCmdX9RiuTJjKrHTZxCY8ZPpNHDh1D5sl/mNEYB04k94RW3N27eSv0HD1OCE67hYrhG1crUsV1rH+djKCvJcQLXrl2j6dOnU9Kk/rOgvHr1KhUqVEj98671q9c8qXCJMtSyWVNq36alUWzqjNk0bORoWr18Mbml++Ipp2K1WnSJ5+3ahfWLFy+pU7eetJnn6kj4nejfuwfNmjuff0/uGxav3Xv1I8QfPnZwD0XnGLDetYs6EOM1J8cmXjBnBi4dSmPGT6JxEyd7+f3/mvb37NtPdRs2pZrVq9LAvr1Uf35v2VaNedXShcoC/OKly1Spei0lzvqn3w4N0smFtbtheH6D5WgGNgBJHCcKaTfDi3ddo6ef1wLNTdctkpzXaX3/+xGh12AMoy06zXVgrXT25su07cSXtdL4LhGpGYuQ5nBiz3idceyqs2pNVT8PgxuEFdPe/rxbp9WuhuEVsU7hL/9PRJvL9ngaa6pYR6xZICl7wktmGPGgrWOXHtEgXifGmnD6pNFpYguLlbfuhz7O2HiR1yWvkNm4Rt/DEcYyo1acVQZDOh9jrcFtwlUyEtbJC3XZpEReuGC2Tb+PO6D6q8Vm2/tyLQSCAoFgLbzGc4lKcWJYB6UObOj2BFhzn0RwNdMInHNYvsISxTe3w3ADiEVF/1q6YnSPX7CbjlP3KA5bZppjruJjX66fZbK2nnf26FiifiHynndgYSIQ48fwXp6DZaYH+8CHgJnK5O7hPLsgbswuiCFWDqmfxaqZdyzGQkC0V59VQX9cPGNrV7gbRkxXnxLGA9cWvgmV+DCjzq/tq95BZi/Yu+4n3l24MD/4aEGqywbGEeI63lu86BG97AwLjP5Im0JACAQ9As4UXjE6/4ivZtHVUcHWWUThMguLIXCXFT6845t18Dxio8aMGZNChQrlbbdQDoKsi4uLjzu/tTUuLHE7d+7sbX0QdGF5i3YdSQ8fPlTj/PFH+3PkJUuW8HfjByX+6liwqL9Vq9bKohf3U6dO7aVJWMpCsPWuXi8PhPAMEV5D+C+ADF8ICAEhIAQClcCHDx+MWPZf0xGIbW/fvaVYDs7HvqbNkPosrF4hwPonQXTVYTH887wjz7x9+46e8pw/Vkyf5/yO1Pn+/XuC6OmX9HP8+PRLSt9FQL/UZa+Mvf92bPMQ99j8d4S9eoJqXuVBO+k+e/zTqWDGeHScw4BpwRVWoJVyJ1Zhz67efU6pE0ZTxg66vDOOaCt06FA+Gk98YovYR7wuiXVS39ZU/donGOBgDTg6W7368GctIXwZ/u4Nw338mvTh4//o3pM3yjglcoQwVlXp+K2ty6emKnkSW92TCyHwvRAIEsKra6xoFDNaZKcxe/TsFd188JTjE7rQj5EjOK1eZ1ekLV1RrznGq7Pbkfr8RwBWHocPH1YWH1ikRMLiJqxgsmfPzm4Fvz5wNz4yVQfvVLuFEDM0A+8Yuv/krXLrAItUWKx2Z3/6zkraaha7lxoWS0mJOAD8ebaU3XT0jrI+NVtsOqvN76UexDbwvP+Sdp+5RzM2XKJy7DKkI+8YkyQEhIAQCK4EnC28gpNZSMU1xFS3RG5W8V0h0C7atYA3AnmgiErOiuuq6/sej0+ePqVLFy8pi9KrV6/Qxo0bKT4vnHzr1L17D1q3bi0VLlyEihcvRojTtGfPHpUXJ04c5aIYwqykryMgwuvX8ZOnhYAQEAJCQAgIgZBBAIKrMxNE2O8tPXz4iLLnzu+nbtetVZP69Ozmp7JSyJqAdjWch93owjPgtPUXKSIbi8CoQQuu8WJEtH5IrgKEwILtV5X75Rnt3OkXV+s4sQHSoFQqBAKAQKALr6cv31FWeclcY7LlW9ivHuK7/z7Q5VsP2Rf6J0qXLB7pQNNfXbFUIAQCiMDlO89p4KLTdIVjrJpT2ZwJqG35NBSWrSqdmXaevkdDlpxW/v/N9XaonNZw6WDODynnsDJGoHgkuOkY0SirspwNKeOXcQoBIRDyCASE8Kop2gqwOt/26JbYjWrk+81KmLUtE1KuzbFdERe2SpUqgTL0J0+ecBzXQcqFsLkDcMM8atRIihUrljlbzv1JQIRXf4KTx4SAEBACQkAICIEQRaB79+6+jleLqX4RaRE243tLsCg9cOiwn7odP148SpY0iZ/KSiFrAnD1O4tDuPWrk5EKZohHtUfsIc97LzkEXH4OASeCqzWtgL3qNOMoHTz3L+0cUdyLN8eAbVlqFwLOIxDowuvzV2/J884jYmMzJbyG+QqRCTvy37x7TzB0T8zWrlGDsLWr816h1BQcCMBFxAP2kX/78WuKFD4MJeEYAnDBG1AJbiHuPHpN97nN2OzmOEGsyCH+Q4Z3APch0Tj+aiLmHza0cwXvgHqXUq8QEAJCwL8EAlJ4RZ9g2epx3YOPp62sWyG2pkuc3oslrH/HEVyeu3HjBj148IASJUoUJMTNR48e0XXuE1LSJEkoGsegkuQ8AiK8Oo+l1CQEhIAQEAJCQAgEXwLaxTDitH5NQnxYuBvWIu3X1CXPBk8CRy4+pPZTj1AsXif92SUSnbzymIpmiU+9amYIngMOwqPCGjni3sbl8GmShMD3SiDQhVeAe/n6HT16/kodEZ/SvwlCSZRI4cklamR19G898pwQEAJCQAgIASEgBII7gYAWXoM7PxmfEPgaAiK8fg09eVYICAEhIASEgBAQAkJACDifwIId7OJ23QVVcb70cahFmdRi7ep8zFKjEAgRBIKE8BoiSMsghYAQEAJCQAgIASEQhAiI8BqEXoZ0JcQREOE1xL1yGbAQEAJCQAgIASEgBITAd0IAcV2jRvr6kIjfyXClm0JACAQAARFeAwCqVCkEhIAQEAJCQAgIgaBOQITXoP6GpH/BmYAIr8H57crYhIAQEAJCQAgIASEgBISAEBACQiAkExDhNSS/fRm7EBACQkAICAEhEGIJiPAaYl+9DDwIEBDhNQi8BOmCEBACQkAICAEhIASEgBAQAkJACAiBACAgwmsAQJUqhYAQEAJCQAgIASEQ1AmI8BrU35D0LzgTEOE1OL9dGZsQEAJCQAgIASEgBISAEBACQkAIhGQCIryG5LcvYxcCQkAICAEhIARCLAERXkPsq5eBBwECIrwGgZcgXRACQkAICAEhIASEgBAQAkJACAgBIRAABER4DQCoUqUQEAJCQAgIASEgBII6ARFeg/obkv4FZwIivAbntytjEwJCQAgIASEgBISAEBACQkAICIGQTECE15D89mXsQkAICAEhIASEQIglIMJriH31MvAgQECE1yDwEqQLQkAICAEhIASEgBAQAkJACAgBISAEAoCACK8BAFWqFAJCQAgIASEgBIRAUCcgwmtQf0PSv+BMQITX4Px2ZWxCQAgIASEgBISAEBACQkAICAEhEJIJiPAakt++jF0ICAEhIASEgBAIsQREeA2xr14GHgQIiPAaBF6CdEEICAEhIASEgBAQAkJACAgBISAEhEAAEBDhNQCgSpVCQAgIASEgBISAEAjqBER4DepvSPoXnAmI8Bqc366MTQgIASEgBISAEBACQkAICAEhIARCMgERXkPy25exCwEhIASEgBAQAiGWgAivIfbVy8CDAAERXoPAS5AuCAEhIASEgBAQAkJACAgBISAEhIAQCAACIrwGAFSpUggIASEgBISAEBACQZ2ACK9B/Q1J/4IzgXf/vSOXKBGC8xBlbEJACAgBISAEhIAQEAJCQAgIASEgBEIkAacLr6hQkhAQAkJACAgBISAEhIAQEAJCQAgIASEgBISAEBACQkAICAEhIASEgBAQAkIgpBGI7/KTMeRQ/+NkXMmJEBACQkAICAEhIASEQLAkIBavwfK1yqC+EwJi8fqdvCjpphAQAkJACAgBISAEhIAQEAJCQAgIAQcJON3i1cH2pbgQEAJCQAgIASEgBIRAIBAQ4TUQoEuTQuAzAYnxKr8KQkAICAEhIASEgBAQAkJACAgBISAEgicBEV6D53uVUQkBISAEhIAQEAJCwEcCIrz6iEduCoEAJSDCa4DilcqFgBAQAkJACAgBISAEhIAQEAJCQAgEGgERXgMNvTQsBISAEBACQkAICIHAIyDCa+Cxl5aFgAiv8jsgBISAEBACQkAICAEhIASEgBAQAkIgeBIQ4TV4vlcZlRAQAkJACAgBISAEfCQgwquPeOSmEAhQAiK8BiheqVwICAEhIASEgBAQAkJACAgBISAEhECgERDhNdDQS8NCQAgIASEgBISAEAg8AiK8Bh57aVkIiPAqvwNCQAgIASEgBISAEBACQkAICAEhIASCJwERXoPne5VRCQEhIASEgBAQAkLARwIivPqIR24KgQAlIMJrgOKVyoWAEBACQkAICAEhIASEgBAQAkJACAQaARFeAw29NCwEhIAQEAJCQAgIgcAjIMJr4LGXloWACK/yOyAEhIAQEAJCQAgIASEgBISAEBACQiB4EhDhNXi+VxmVEBACQkAICAEhIAR8JCDCq4945KYQCFACIrwGKF6pXAgIASEgBISAEBACQkAICAEhIASEQKAREOE10NBLw0JACAgBISAEhIAQCDwCIrwGHntpWQiI8Cq/A0JACAgBISAEhIAQEAJCQAgIASEgBIInARFeg+d7lVEJASEgBISAEBACQsBHAiK8+ohHbgqBACUgwmuA4pXKhYAQEAJCQAgIASEgBISAEBACQkAIBBqBICG8vnz9jh49f0U4vv/4yd8wwob+gaJECk8uUSOro78rkgeFgBAQAkJACAgBIRDMCYjwGsxfsAwvSBMQ4TVIvx7pnBAQAkJACAgBISAEhIAQEAJCQAgIAX8TCHTh9fmrt+R55xH9j4cQOUI4CsPiqX/Th0+f6NWb/ygUV5A4vgtFjRzBv1XJc0JACAgBISAEhIAQCNYERHgN1q9XBhfECYjwGsRfkHRPCAgBISAEhIAQEAJCQAgIASEgBISAPwkEuvB6+vIdJbYmc41J4cOG8ecwvjz23/uPdPnWv/SBLWfTJYtHP4SCDCtJCAgBISAEhIAQEAJCwEwgoIXXM54e5HHdg854niYPPtfJLbEbpUucntwS4eims+UoBEIUARFeQ9TrlsEKASEgBISAEBACQkAICAEhIASEQAgiEOjC68lLtylB7Gjk8lNkp2F//Pw13bj/hJKx1euPYvXqNK4hraJPr27Qp1ee9L+3D+h/71+q4YcKG4VCRYhNP0ROzP8ShjQkMl4hIASEgBAIRgQCSniF4Lpo1wIrsdU7bDXy/0Y18tX07rbkC4FgS0CE12D7amVgQkAICAEhIASEgBAQAiGEwLNX/9FPkcOFkNHKMIWAEHCEQJAQXlMmiEWR2M2ws9Kbd+/pwo0HFM8lKsWJ8aOzqnV6PReuXDfq/CVZIuNcTgKXwP/ePaKPjw7Tpzf3fezIDxHjUGiX7BQqvIuP5eSmEBACQkAICIGgSCAghFeIrt3ndjWGC+vWGvl+s7Js1Zawi3YuMMqJAGugkJMQQkCE1xDyomWYQkAICAEhIASEgBAQAsGOwF/7b9D4Nefov/efKKVrVOpazY1SxI8a7MYpAxICQsD/BIK18BqXRde4LL4GpbR60y7CP+9SuWL5CP8kBQ6BT69u0od72xxqPEzcQmz9msChZ3Tht/99pEU7r6qPc+50cXS23aMjZe1W4IfMg+f/pXM3nlKVPIkpSsSwPj4xe/Ml5Sa8dqFkPpb73m9ef/CStp+8S3n4/ST/BpMovOcHT9/yxpGIFNaXmNcfP/2P7j5+TTF+DE+Rwn+9q/bv/V1J/4WAEHCMgLOF10W7FvI3zSKm+lVINT8zuO5QK4HWsdH4v/S7d+/o7t27FC9ePAofPrz/K/r85JIlS+m//95R7dq1v7ouqSD4EhDhNfi+WxmZEBACQkAICAEhIAS8I7B+42Y6ePgIdWzbmqJGDVhjIc/r12nrtp106/YdKle2FGXKkJ6+ZfveMfje8y/eek4N/9jHlq5hKUncH+nklcdKfJ3ZLtf3PrRv2n9H1jQX7bxGP0YMQ6V/tV5/x/rp/SdvyCVqeIoXIxJJxMlv+gqlMV8IiPDqCyBn3bYVXFMlT2xV9fnLnlbXIsBa4fgmF7B0fX9rrb/aCutaxl+Wr3BJUbr3Nv5wuFKXqj7HuXOkrL8GwQ9ht9bSXZ60tEc+9cHyrp5Hz99R+X7b1e31Awvzx89nkda7er6H/L1n7lO32cepV80MVDRLfD91WXP0qfDsDrmshNxzN5/RyOVnCBM4nTKncKFu/HsRN0ZEnaWOj1+8o+HLztC+sw+M/MRxo1Db8mkoCz8jSQgIASHgFwLOFl7L9iulmvWr6Kr7GBDi69atW6lnz170+vUrWrRoEaVNm1Y3Zxxv375NPXr0pOPHjxl5GTNmokGDBlKCBNZ/0BkF/HBSuXJlevjwIe3cudMPpaVISCUgwmtIffMybiEgBISAEPgWBHr07k+Lli5TTW1a9xelSO7YhvH//uO1mgpV6fKVKxQ5cmTyOHbwW3Rb2ggBBAoVL0PXPD1p5pSJVCB/3gAbMcTWEmUr0qtXr8glhgv1692dShYvSt+q/QAbmJMr/vfZWzp7/SnFjhaR0iT8ydvar957QWc8n9Kl28/oyMVHdPvha6pVKCk1LfkLtZp0SImv6/oXCrZuhz+x4QfWLSE2u8b0PlzkztP3aMiS0/T67Uea3tadUiXwytTRNc0nL/+jsn22UTn3hNSxkuXv+qecN2DRKTp8/qHxzqJHCUe9f8tAWVPGNPLkRAgEJgERXr8BfbPoCsEVoqp3roXNZUV8/QYvx9TEhzsbfHUvbCpudQq3w2Hil7DK88uFI2KqI2X90ra9Mlow9E14xbObj92h0D+EokKZ4tmrKtjk+Ud4xUTjBO94s5d2sPUsJg3zOuehxHGiqCKwMm4y9oA6/61gUvo5ZiS6wALsanZdEilCaFrQJR/F5N1bSK/ffaDqg3epOopljU8ZksSgBzxRXLr7mprYjGyclX5NFUuVlR9CQAgIAZ8IOFN41eKpo6Kr7p9+Hq6JB7Hlq3/T06dPaejQobR+/XqjioULF1K6dOmMa5z8+++/VKZMWSXM1qpVi5IlS0YXL15UIm2kSJFp8+ZNvAPdf15TRHi1Qi0X3hAQ4dUbMJItBISAEBACQuArCew/eIhq1Wtk1LJx7SpKmSK5ce2Xk7ETJtPYCZNUURFe/UIs4Mtcu3aNtm3b5nBDSZIkUc8UKlTI4WcD4oFjx0/QKY8zVLNaVYoQ4eu97XjXx2UrVlGXHr2pfZtW1LxpI/rhhx9U0W/Vvnf9Ckr5m4/foQELThldqp4/CbUok4o+fPykRNYz15/Q1bsv6fCFf+nZq/dGOX2SMHZkqpArIc3ZfJnesPe6bUOL6VvB7vj89Xsq1Wsr5XWLQ4PqZfYyvmd8f+yqs7Tl+F3j3jQWXlPbCK/+WdPU67J9amWgwpni0//+R9R9zjHae+YBZUoegwpmjEcvuP3526+oddGJLXNQ+iTRjX7IiRAILAIivAYwebOQ2qVFXSvB1bsYr8gfNnGu6pmIrwH8gj5X/+nVDXYxbLHg9G+LYeIWZJfDCR163BEx1ZGyDnXCVNgR4dX0WLA+1R94RyxevQMCFxiVBuyg9Emj08QWOYxiY1b9Qyv2XqfhjbJQztSxjfwlu67RhDXnqUPltFQ+p+V3C6Jur7knCALt76V+Mcpi51mTMft5whGX+tXOZOTLiRAQAkLAOwLOEl7NcV3X9Pnbu+Z8zdcWs/51Ofzy5UsqXbo0PX78mCpVqkRhwoSlJUsWkz3hdd68eTRixAgaNmw4lShR3OjbpEmTaMqUKTRq1CgqUqSIke/IiQivjtAKuWVFeA25715GLgSEgBAQAgFH4M3bt1S8TAV6/eoNFSlUgBYvW06OCq8XL11WdeTNnYuePntGV65eE4vXgHtlfqoZgqt/RFdz5Y0bNyYtwprzg+v5H+Mn0viJU2jtymWUNk2q4DrMrxoXhMT/PnyiqnkT01G2Yv2HjSLSJY5GZ9gClljc0ykKu7jNzgYOSdnTXLpE0VWZSevO08q9N3QR6sCWmOXZIjO4Jp+E11dvP1CNIRYDkTI5EnBYulC0at8Nsie8+mdNc+La87SYXQ0v65FfeQRcc/AmjWAvgOAN7jqd53XRxrwumiN1LBrRKKvOlqMQCDQCIrwGIHotutpauUJYxT2ze2HbMujW8El/qjIivgbgS/pc9YcHu+nTi6tf1dAPPyalMLHzOlSHWUzNnz4uzdh0ic7feKZ27CCmaKVciXhXWihVp7msdks8gl3TXrnzgqa0zmnV7sp91wkfoiH1Mxsug7H7aOrfF+jYpYd059EbcuPdPxDzzO5ztfCKj+OKvZ7KZUOYMD9QVnZf27Jsaooa6YtL4U4zjlLEcKGpfx2LyDd1/QU6cO5f6s+i38xNFwnxYsOHCU3uaWJTs9K/WLnb+MTbkxCIHqImXHQki/8jNSyWgm4/ek0bj96mkY2zGRaeVgPz5uIhuz5Gf49eekQ3OCZrpmQuLGDGorI5Eir//ne43u5zjlPxrD9T9XyW3Y6oavzqc3Ts8iMa2iALxY1ucecLN8odph+hQrxjCvFrnSm8jlxxVlmxTmjxK2VIGsMYTQ/u226P+7S6b0EVr1XfOHX1MbWceIjqFU2u+CAf7/aPlf/QwHqZKJ9bXF2U3vOOvIKdN6n3Ool3d0kSAkJACPhGwFnCq7ZWdcTaFc+4JXKziunaY25X8vD0IEfqMY8RgmvFihWpf//+lDdvXho3bjzNmDHdrvC6fft2us4xj2DtGjbsl2/bhg0bqUuXzsoFcTXehe5TevHiBc2bN5+2b9+mrGVz585Nbdu2pe7du3txNXz9+g0aP34cnTp1il68eKkscCtUKE+lSpVSTezYsYMmTpxI9erVU+Kxud2x48bRnt27acyYMeTq6kqw6h07diwdOnSIbt26RXCPXLVqFS/PmevA+adPn2jx4sW0du06Onv2DCVKlJifzUDt27en6NGtdwTv3buXVq9eTXv27KUYMaIz10rc57Q0cuRIat26teKr6/fgHfsou3//PrYgfk158uRhMbsEubu76yJytENAhFc7UCRLCAgBISAEhMBXEhj5xziaNHU6TRgziv45d16dOyK8Yr5UrVY9glXgrq0bqVW7jnaF1yc8HxsxaiztO3iQbt68RVkyZ6Lfqlel8mVLf+UI5HF7BLTwOnjwYHu3fc3D/BwWrz5Zvd68dZuatmhNNXhe/d/797Rw8VJ6wF5yCuTLS317dqOHjx7RkBGjadfuPRyWxJXd9hZTcVpDh7ZYkqITiKk68o/xdPwk5vwvKL1bOqpaqQKVK2OZ86PMOBZDN27eQovnzVExXs3thuL1v+Ur/6LTPL9OmyYNtWzWhIoVccxS9/HjJ1SrfiNCvXAzjL5GjhSJevfoSjmyZwvw9jHG7yE95dBvZTj0W8ZkMWh8819pFa9RjuY1OySslyZhD3Upfo5KaVmITRHfviekZuMPKMtYv3qe0+umXTmk2KzNl+jAP/9Sy3KpqFpeyzolRMkdp+6q9dSEsaOQO69rVsmTmKLYhHfbceoe9/c6nbj8mOK7RFTrizDMiPGjxYJ6l8c9ms1WuG04HNnes/fVuibGmy5xdGpeOhUli2cdWxiC5fojt2gn1xsxvGUdF+vFiT576Vu621Ot4d7895XigzVcWLLqtWl49as7Yg91q+6mjEmmbbhI87ZesSu8+mdNs96ovQThd2WvAqr9FhMP0uU7zzlMXgH6ybROjZtYO8dCsG2+elB+CIFvTECE1wAE3qB9f1X7rNG9rVrRgioyIbhqARbnnZvXMcqaLV9trWWNQnLiFALvbyyn/71/+VV1hQobhcImrOxQHVpMxYcSYijidCaMFZkFxIfKPUJ9FtwasCCJpMua48G2mXKYjrPYuGeUtZtj/ZHT7mwhylVj97T/ssUlLCKjRQlPe1j0xLXZmlILr7GiRaCwvEPpF9ef6BC71IBv/rSJotHkVjmNQOUV2XIzcoQwNK9THtW//gtOKpcSGEtodmGShMcCMRHJdrfRbJ5gzNp0WbnRzZoiJt3jQOiIbZqAx44P+ZLu+XjyEEk969sP7KzCRxcCNPhhcqT7rAVLxCIo0WsL3/vREKkRxL0k52FsXau5UansrqopvfuqF8cFKJo5vtOE11sc/wE7wLKldKHRTbNbDWvriTvUb/4pqlskGTUqnlLdwzsbuPAUbT95j2a2y0UpXS0TPQSNrzxwp6pncP0sFIHFbyRYzMJytnX51GpipjLlhxAQAkLABwLOEl61YOpXS1Ut1KJrZgtZbTnrX3fDHz9+VAsc0aJFU6P2SXi1hwWxvFq3bqMExFWrVin3w/bKIQ+Lcm3atKVdu3ayMBmDsmXLRkeOHKG3b9+pR+A2TMd4vXPnLhXnhRmknDlzUrhw4dVzuB40aDC7PC6thNqCBQtSVq5n1syZuKXSe170yZMnrxI///77b/rw4QOVLFmS7t+/T8WKFVNtQ0TGNWLaQoD1LsEFM6x/48SJQ1myZGHx9R8Wnz2VmAvhVAvQB3kBsUmTJqoa9BfpwIEDSqhFed1n5MM9Myx8kdD3COHDE0RbJFgOi/iqUNj9IcKrXSySKQSEgBAQAkLA3wQuX7lKRUuVo3x589DsaZNYALOIsI4Ir8tY9OrSvRd16diemjaqT+Wr1PAivGJ+lq9ISbp37x6VKlFMxc/cvG27uh7Qt5cSYP09CHnQLoFvIbzq35+4ceOqvyny581NFy5eVnF+s/Hc+Z/z5yke30uTOhXt23+QHj1+pNz4QhxFun3nDuUpaJnz53bPSeF5Xrxtx051b9SwwVShXBl13rVnH1q6fCUdO7BbbX40t4vfqUIF8tP1GzdVu3hgxZIFlClDevWsX348e/6c+g8aSidPeahYshCOo0X7ierXqUXp0qahgG7fL30MKmVqDttNNx+8ogIZ4qo4r/BSh1STPcxBeIwXw2Kg4V1/Kw3cQc/ZBfGWIUW9K2KVr9dNEYv03YeP7A43BpXM5qra33biLvWdf1Ktk2bjGKXXuV+e916ypW1MGt4wqwr1hso2sEA6eLGHElxhsHOD11Ah4KLO5SxMhmPjmbVsiDOcLUKxtvuBLXohLp/kcGgQSBHObGWvgmo9F/Vdv/+Smozbr9ZG0dZ7Lg9BF/XN6pBbGcWgbxBzIfgiH+USxopCdQonQxWE9dWXvDarxU69Jm3P4tXRNc0Xb95TyZ5bqUS2n6l7dct/B8V6bKa0CaPREDai2cLuoi/xejD6lTm5i1q7Rlg8SUIgKBAQ4TWA3oK2drW1VjWLqWZBVou0tgKrrsdWlA2gbofYav+7MscpYw+XrJ5D9WgxFQ81KJac6he1iKz4sNQduVcJozPbs+jGu6x0Wf8Ir7DqbDv5sBH4He0hEDk++nHY0lPvUtLCKz6isAINyzv33nKcgsZj96sPvjn2q3fCa5HM8ahnzQz0A+8wMrubWNWnoPpgX737Qo0NE4DZ/BHXH2Y9MUDfHBFetRVpoxIpqG5hS9wW9Pl33nkGMVb79h/AIibi0m4eXFTt4DrLrkN+H3cAzVm554UVrNrNxf114biqzrJ41e3DOhkitjlBZP2Td4MhLgTE50RxIqsYrxDGO1VJx5a7CczFaRvHiR2+zENZFKfhuiBcY6yV8ySipiV/McRYq4fkQggIASFgQ8BZwqt2EWwWUW2aMi7Noqs9odaRuoxKvTnxi/CKWK/Ll6/gxZXnyroT1z169FBiqDfVquw1a9dSTy6XPfuvNHXqFAodOrQSY9u1a0ewXoUYq4VXiJorVqykunXrGLvsr1y5QhUqVCBYycK9MVKLli2VZSuE1JgxY6o8LYJCEG7UqCEdPnyYj42oYcOGLPy2UWWePHlCXbt2pbhx41G/fn1Vnu0PiMq/N2tGMdiydeDAgRxPKoIq0rlzF9q4cQPNnz+f0qdPr8bg7p5Lxb7VeSh4nheaqla1WACbhVeIwLC6ncliMcRnJMTfKleunDqHVW7EiD4vWKiCIfCHCK8h8KXLkIWAEBACQiBACdRp0IT27j9AOzb9zRvGEjosvEKwyl2gKMWOFYvdE69Um9LsCa86huzvTRpS5/Zt1ZhgZdi2YxeKHz8eDR3YL0DHGRIr/5bCK2L6rl6+mJImSUwQ2avVqs8i5ikqWrgQTRw7muf9P9Ct23cob6FilDFDBlq5ZL56JctXraYly1ZQo/p1DSvVS5evULHS5Y3NACjonfCJe2tWLFHiKM5nzv6TBg0bQbVqVqf+vXsgy6H0xzh2NTyJrWvXcYzj5F9iHH+r9h3qbCAVPsle5vpzjFesvSHFY0OSN+8+qvVSXPskwL57/5EKd92sPPjN4bVNvyQtvELohbEH1lyR/n32lir236HE1Kmt3dlYJpzK12HJ2ldKQxXcE6m85hMOkse1J7R+YGH68bMlLKxIt7I4ivi0WG/U66tYX8QaJLwXQhzt/ecJZSADz4XoA1LNoSw+s3g7uVUOZRGLPG2QgjLay6FProbxjDn5JLyinCNrmvCm2Gn6URZd3Vh8dVWWr3ARjbXn8zefq76b24ZAC+MarElLEgKBTUCE1wB6A1pINYuraEoLr7ZCqraCtRVe8Yx3deGeJOcQCGzhFTtz/mKhT7sVxqiwkwgfRS28fY3wepyF1zYsvEJQ7VUjg/ERt6WnhdcRjbNSDo5foBPcYczfdtUqBql3wut0dlOcyhQ8fegSD/r78C31sccEAOfI00HRdRs4Nvxjn7J81cIrBEl7AezD8g4uLdhikvCSheq/2E2v+cOqxWa4Oa5ZIKkSXSF+jmmWnbLwLqi5Wy/Twh1XqRzvYlt94AatH1BE7SCDCwskPXGyJ7zCtbG9FJOFWntJi80508RSO9Vsy2BH2VwWXuduuWx1CzvRIKRWZJfT5rTp2G2ayLFfsVvNnKrmS6xcEkcKH8acLedCQAgIAbsEAlp4hch6xvM0Dao7VLXvm+iKQt9aeD179izVqFHD4FOQXZD93rQppUrlcxykASxeLlu6VLnYNceKgnALF2Zm4dWo3OakTJmySvDVAu3mzZupY8eO1KdvX6rELpORdDvr169XlqmHDx9RAizE0SFDBntxEWzThK+XW7duVa6GBwwYoMRST09PKlu2rLJi7d3b2mMLXB3PmjXLsHjVYy1dugwNHjzIqq3Jk6fQ5MmTDEHX6qZcKALOFF6xKPj4CceisknhwoWl6J8twG1uyaUQEAJCQAgIgWBFAK5bm7duT61a/E7tWrVQY3PU4rXfwCE0d/5CmjdrOuVyz6HqsCe8Hjh0mH6r25AQA3b08CHKM0mwghkEB/MthdeypUvRmJGWv1+AAq6Bx3C81FnTJhOsYHUqVLyMckXsceygzrJ7RLnnz1/Qkf071X3vhE9Yuk6fPN6o4wa7sM5fpIT6PZszY4qR79cTR4VXZ7fv134GhXLwwNd47D7lWngKe/pbssuTFu+65qMAq9f58qWPQwPrZvbTMLTwOqdjbit3v7Am7TnnBPViIxZzODgYsxTvsUWJjL1/y6jagMe/01ef0GAOLZcrbWyrdVDdCS28mt0Y4x7WEgcuPK3CwWGd9PGLd1Su73YyG/joOmCAg1BumwZZrHmdKbw6sqapRdwFXfJSwtiR1Zox1o6RsJYOYTh1wp+UhfCwpR7qPtwu/8ZWy5KEQGATEOE1gN6AFlKdZfFqW08AdTvEVhvYrobxsYSFqTld5w9crWF7jGDhXyO8Ilh8u6mH1ccZbaRPGl0Jq4UzxTNiwCJfC6+L2dXvzyZXv9rlxYC6mQixaJG8E143DipiuKxAuWV7PGncX+dIxzX9g93hrmS3uLYTDZQdxfEUEPtVC696ZxPumRMEXOza0pMQe4ImRMmyfbYZ1qx6B1nD4imoXpHkyto1TvQIStREHFXUh91g2DlVq1BSJXiiTVvhVbdp7o8+h3sR7fpX5+HYbfYxrueBlctg831ttYvdZHW5b4g3i/cPbrDMbVshDVXKbRFf9buAW+WOHEQ+OceaeMi745bs9lS72vK6xaFB9fw26TP3Qc6FgBAIeQQCWnjVLojhOjhd4vS0aOcCBdmepaum/62F1/9xzHFYgz5nC4f9+/fT7Nlz6OrVKzRs2HCOU1pcd8vLsVq1anTu3Dk6efIkb5qy7FTWhXLkyMkWpV9cDSP/zJkzBJHz/PkL9OzZM/73VFmK4t7p06dxYDfFb9mCNrtyRzx16lSC6+RcuXJTypQp6c8/56oy7969o6ZNf6fjx4+p68yZs3CZXOx+uAT9/PPPKs+7H6h//foNKs4s4tsiVizGioS4uOXLl6ctW7ZQhw4dlNUvxmhOGzdupM6dOxvC6759+6gZW9GifN26dc1FCYtjsP71zf2x1UMh7MKZwuvO3XupQZNmXgiarTC83JQMISAEhIAQEALBhMAbnuNA3Hr/33vatW0DRfrsbcMR4fXCxUtUomxFZdU4ZcIYg4w94RXzsToNmtKRY5b5GNzQ5subi8qUKkkJXH2ejxkVy4lDBL6l8Nq8aWPq2K610b8p02fR8FF/0KqlCylDejcj397vBmKzbti8lf9OOE9Pec6PWMCIAYx09byHOnonvDZt3JC6dLBYUKuC/CNpKjf6leOyLvpzls7y89FR4dXZ7fu5o0Gk4KBFp2nj0ds05nc21kjhQh/YEMQnARah1XrMOa4EPgh9fklaeNWe+PQzMzZeZEMMy99lqVhENKfzN54pgXFNv0Iqe98/D6jrTMv/eyA8IrRbPl6nzf5LTMOCVguvtkY1EJghWmqPgYcvPKQO046oeu21ixsrexegWD9FMCxN/bLmqMVSe66GHV3ThKfCa/df0MaBRVXou9uPXlN1DqWHZFu/FpIxlult3FUZ+SEEApOACK8BRF9btqJ6WytWLcriXqrk3sd4xX2xdgWFgE8fHuymTy+uflVDP/yYlMLEzutQHVpMNbtv0BXc4Y8J4rJqP/a6rHknkl9jvKJOiK/7zj5Qgdp1DFTka3cNONfCq9mlMPL1h9EvwqvtBELHHtXC6xy26py58ZIKXo84A+YEC19Y+mrh9dbDV7wj6465iDqPzR/9Mux+VzOx9+GHFWwJjgNgFrVrc7B3TEwgTCJGAMZemOO4Fuy8SU084NK584xjNJatYhEbAMlWeNVugdVNmx9wdRyGY+Oa07kbT6nJ2ANkr48op4VciL7zu+Sx2q0GFydFu29WcRl0EHnsOsPES7tuNrfVevIhFYthWY/8FNeXWBTm5+RcCAiBkEnAWcKrFljtCar6niZsr4y+97UxXnU9+ugXV8O6rD4iPmnz5s0pY8ZMhtip75mP5ctXUKLlKXY7FsrGjVGRIkWUWzJtybp27ToWMrurx9OmTUewkEWcJbjyRdLCK8779OlLq1at5Biwu+jy5cvKpXDfvv2oYsUKuK0SFvtwHxaye/fuU26BcUNbrVpKWf98/fo119WI47qeoUiRIlOOnDnIxcWFdnM9iA+rhVfEqYUb4xYtWrDA29SqkiVLlrDoOsgQXuFSGe6Ou3fvTtWrV7cqu3v3bmrJrpM7depEtWvXtronFxYCzhRePVlIX7VmnRe08eLEpepVK3nJlwwhIASEgBAQAsGJwIxZc2nw8JFUvUplFXNVj23xsuX094ZN1L9PT+U21j3Hr/qWl2PDpi1ox67dyqVrksSJjfvd+/RTwtns6VMoJs+d0qaxeEXBfGzbjl20fuNm2rVnL7169Uo9M3zIQKpcwRJywahETr6awPcgvK5avZY6dLHM+dO7paNkSTHnj0az585T4/dNeLUVfPHQtxRend3+V7/0b1wBYoXC7XDF3AmpXYW0Ruv2BFgYpBy/8kjFd3WNGYn6s8Vrivg/Gs94d+Kd8KrXYiEYJo9nXc8ZNsbIzjFfW5VLbVR7g+O/bub+Hjj3QFl44kZK16j0R9Psyq2wFl5HNcmmBFn94MXbLLyO/iK87jlzn7rPPq5uY63ZnNBuOjZ6aVg8pQob5yyLV0fWNPWaaCE2Gupby2LxizVZrOEi7RpZ3GoNFXnak6J3hjEoI0kIfCsCIrwGIGktsEJc7dy8jtESRFnEbj1/2dPIQxlYtf6S7ItLUf28WLsamALs5NOrG/Th3vavqj9MXHZ1GzmhQ3Vo4TA+xxFY0j2/1bP4gEIEbF0+NVXJk9gQGc3Ca3u2Yj1y8RGt61+IfopsiQGASuBeYd2hWzSvcx5KHCeKVb24gG9/7M6C0Al3thsGFlEfK/2xD0jhdT/vzurCu7PgFrdV2S8TB1ioVh+yUwV018Krl47byYDlbVgWO235/cOCZ1MWPLWFKx6d8vcFWrD9qnJz3G/+KWPnFjjAfXAGtgSGS+Vtw4qpgPR4xlZ4RZ4jSb+juexKJKnNBAr1XL3HMW9H7FXxFXTsBHP9Ot7C1qFFKXzY0IQg8kh6t5e5rLYmNgvH5vtyLgSEgBAwE3CW8KpdCMOyVbsVNrejxVefRFeU1/XUyP8b1chX01yFv859El7hNjdy5CjUuHEjq7rhsjULWy24urqydeh6q3vmi549e9KaNWuUhWicOHGMWy9fviR3d3crV8OVK1emixcvWrklhqVtgQIF1HNaoMWFFj4hosKiduHChSyu7qWoUaMabZhPYBWLhSi4KIagun//Pi8WuCi/ixcSW7VqSeU5rmwfdiGMmLRIiD/bq1cvQ3iF5S/izkIgnjfvTwoTJowqh3ZgaXv48CFDeL19+zZbBZegKhz7tRfzMKc5c+bS6NGjrGK/mu/LOZEzhVfhKQSEgBAQAkIgJBPoO2AI/blgoY8IELfTJ5ewsJi9xiEXfEolixejCWNGeimCedKmLduoZdsOPL+MTKeO7Lc7H/PyoGT4mcD3ILyWLFeJzl+4SFvWr1GiKwaHOX/2XJY5/5H9O5HlbYxXZwufjlq8Ort9Ndjv6Mfb/z6q9bYYP4anVb0Leum5FmAX7bxqhESDJzrPey+VC1y4wvUteSe8wtIWFrcwEoHRhiPp7uPXNG71OeVlr0PltFSew6n5VXjV1qNV8/L6rEnYtde+s4RXR9Y0dQi5jjwuhInTCevAiMtr63ER9+H58N2Hj3bXTPXzchQC34qACK8BTFqLp2jG1vIVAqxOZsHVLMzaira6vBydT+DDnQ306c19f1X8Q8Q4FCZ+CYef1cIrHjT78scOnlbs/hZuZrWlqC5rFl7H/vUPLd9z3erj/IItPSsP3KEETC28wmIVVq4V3BOy7/toRj+1qLd9eDHlkuJbCK+wRK0zcq/6SMK9Re60cZSr3BmbLilLTnTOEeG1z7wTtP3kPauYsZ9YWO4y6xgdPPevVVzaY5ceUdsph5VL4dAs1s7rlEexWH/kFg1Z7KEC2SdioXp4w6wGo68RXk9ceUytJx2yisdgVPz5BH0t0WuLuprZLjdht5xOp64+JrhBhmvoiS1yqGw9XrP1MW5AOG7A8WkhYMvOLk1QjkJACPhEwFnCq7ZURVtr+vxtt0mUScfCrE9Juxn2TaD1qQ7zPZ+EVwiVsBhdtGgRi4xfdjRrK86qVauxm9we5uqsziGIDh06lCpUqEj9+vU17o0YMYIFy3lWwitcDyPt2LGdIn52facta21jwX76xDt4CxakX375hS5cuEBZs2alkSO/LPBt2LBRiavonxvvpNcJ8WKvX/ekY+zyLmzYsDrbOK5YsYL72Y/atm1LDRo0UPlmt8Xa4hU32rKL4O0s5pYsWVJZssKV8lKOZwuhGWnQoMFUpkxptZCUM6e7sriFgKtj3T5+/JgtdCsSjj6JxqqyEPxDhNcQ/PJl6EJACAgBIeBUAhC7bt667aXOZSv/oq3btlPPbp3Z4jWJEZ/z8eMndPDwEXUdKZLl7+/9Bw+x1eprL3X0GTCY7t27R1MnjqM4sWMRLBnX/r2Bdu/dT7VqVLVyPavF2wsex+3Ox7xULhl+JvA9CK9uWSxrNof27jDcXetwEC4xXHyN8eps4TMghFf8/bBj1x7KyC6X48a1CITXr9+gS1euUoF8edTmzo+8nrmbrcATJHCl5Mm+rzibtu6G7f2Cbj91l/r8edJwMdxz7nHadfq+8mCXKLZXwxdzHd4JrzperFuS6Gy1mk0ZXeA5rPNNWHNOxYOtXSgZQRwevfIsxY4WgRoUS2FYe+oYsc3LpKIa+ZP4WXjlfQFUvKfFuGN2+9y8Jmr5/yGMdcazmIv15R7V0/NGklCq7SLdNqv11IVdfRaZfXI17MiapvaYaGvIsnDHVZq87oKVoQ14aW+NRTLHIx0TF/mShEBgERDh9RuQN4uvPlmvmgVXdEtE12/wckxN/O/dI3p/a60px++nYV3LUKjwFte0fn+KDCtWWJ2+fsu7q7LGV7FV97JLYPjex8eiV82Myo+9PeFVi4JoE65347F72XWHbtKdR29UN7TwqgU8uNmtyLFCEUMUlqdw61squyt1rWZZDP8Wwis6BivPZuMPqDGrjvIP7BRLneAn2nDktkPC673Hb9iV734lOIKfa8zIhDgFHteeENxR9P4tgzEZwSQFEwUkcxzX+0/esFi9U+W3q5hGxX1VF/xDMzYL4/qeb8fmEw6qfugg8N6V15MG/B6U/jUBxYlmifG65sBN9cjg+pkpTzrLpPYki7mtWMxFwnhTsHvkx8//Y263FIMaBZJQ89IW90eqkPwQAkJACHhDwFnCK6rXVq3+tVZ1trUr+uST8Hr06FFDgKxVqxYlSpSIXfteoSVLFuNRWrx4MaVJk0ad2/uBeKmIawqrVHf3XJQpU0aO43qWLUt3quJmQRUWpRAms2bLRvnz5SNYikLwRTKXUxn8Y+LEiYQYr0jjxo9Xz6gL/gFhtX79+uo5uPeNz3Fd4S4YIjKsWfuzuGovYUEEYimsYiuzG74Y0aPTpk2bVP9R3iy8wlUexgYrXZ3wXM2aNWnGjOmG8Ip7ZgG5VKlSapERfbl16xa7V+5J1apV1VXI0YaACK82QORSCAgBISAEhICTCQwfPYamTJtJG9euopQpkhu1N2nRRgmyzZo0ok7t2xj59k7sxfE8fOQYVa9djyCm1f6tOrn+HJ+279zNboc3UZVKFWjYoP72qpK8ryDwPQivnbr1ohWr/lIxWQsXyK82A2hL7OAivM6dv5D6DRyiNiD8tczy94x7/iJqc8LYUcM5znEJ2sH/LTT8vYWy/j55eJ/haecrXv83e9Q7d8O6A7B6HfPXOVq9/4YKT1bePSHN3HSRrt9/RWvZE2E0kydC/Yz56J3wijLaQ18ydlkMV8YQP9EOjCuG8Jpg7s9rgjrEWO50sSlXmtgsjn6gOVsuqfXVRd3yqjVRv1q8ol3tlRDrxUWyxKcYUcITxGWsSVfLl5hamjwVao9+sMrFv2JZfkYVXpJPwqsja5oY64VbFZqLswAAQABJREFUz2jDAPbSyOKvTljbrTpop2JTlPuMUHGX2I0yQt1hXXV8sxzK9bIuL0chEFgERHj9RuThWhj/zAnCKixdteWr2fWwTwKtuQ45dy6BT69ussvhbQ5VGiZuIXYxnMChZ3ThZ6/fU+leW5Vomozd0I5Z9Y+6hQ9FwQzxqE35NBQhnMUdoLls5ypfrFzmb79C87ZdMURMxISNxh/MRTuuWe24wg6osTxBgDsGJLRRNkdCalIiJYUN84PKw04qBI9f1pNjhLI4q9O2k3ep77yTNLBeJsrnFldlw7VD1EhhaU6H3Op6wMJTtJnjsdpaW9rGeNV1IuYsLHov3n5GP8eIxAHhY6sYsyv5Q2nr6lg/490RcQpmscUsYtgixeLdXxAqW/Bur3Cfx6af7TTjqLKEHdf8V8pkijGL+K9wETKf3TPD6lUniODd2Hq2Fwu4RTkerF/T0YsPqd3UI0aMXt+ew86smTyGm/9aYsOgPGI04HcgPe96M6crd1/QqBVnlair8+GuukaBpMqtiM6ToxAQAkLAJwLOFF7NVq+Oiq9adEVfvbOY9Wkc3t2bMGECTZs2zYtVqy7v4XGGBg60uPTVeYjt2qtXT0qRIoXO8vZ4585dGj5iuLIORSGIqBAwx40bR8+ePVNuiJEP98M9WXyFFSkSRMwePXpwjNd5Kr6q2dUw7iO2KyxGkSAQhwv3JZQA8rZu3UbDhg1Vz+Jai6mtW7XyUhb3ddq+fbvqHyxRkeBSuGjRotSbXQ/bxoeF5e2lS5foxIkTFCVKFFUWbpA7dOhAgwcPptKlS+tqleg7f/4COnnyhMpLmTIlVapUiWrUqGGUkROvBER49cpEcoSAEBACQkAIOJPAiNFjafK0GbRp3V+UInkyo+pxE6fQmPETafTwIVS+7Jc5jVHAdGJPeMXtjZu3Uv/Bw5TghGu4GK5RtTJ1bNfax/kYykpynMC1a9do+vTplDSp/ywor169SoUKFVL/vGv96jVPKlyiDLVs1pTat2lpFJs6YzYNGzmaVi9fTG7pvnjKqVitFl3iebt2Yf3ixUvq1K0nbea5OhJ+J/r37kGz5s7n35P7hsVr9179CPGHjx3cQ9E5Bqx37aIOxHjNybGJF8yZgUuH0pjxk2jcxMlefv+/pv09+/ZT3YZNqWb1qjSwby/Vn99btlVjXrV0obIAv3jpMlWqXkuJs/7pt0ODdHJh7W44Hq9RwnI0Q9IYKnybdjO8eNc1espCqG2qWyQ5NSru+9+P3q2bor5PbH46e/NlZbWp1wWxzteMDSsgxOqEteGxq87SluN3dZZaO+xQKS2l+ezhEAY5w5aeIe9ivDbmdeA6hb/8PxHrkcv2eKo1WlSK9eKavL5Yq2AyCm0SPOFFcNDi02pd2eyZz+jI55MZGy/S3C1XaHpbd0rFBja2yS9rmlgzLtRlkxJ44YLZNsEaeMiS03T4/EPjFgx6urOFLox6JAmBoEAgWAuv8VyiUpwY1kGpAxu6PQHW3CcRXM00Aucclq8fHx321e0w3AuHdsnuL0tX70aGHU2PX7wjF44pYN7N4115nQ/3EA+fv6UfI4Y1hFp9z/b46u0Hevf+IyFuQWAkjA+WtnFYHNU7ttAPTHDK9bNMUNfzbibzx92v/fzw8X/0/PV/gTY2v/bTp3IIHv/vs7fKelkL4t6Vx3jvsbUudqZFjmCJw+ddWckXAkJACNgScKbwirr9I76aRVdHBVvb8fj3Gi6zsBgCd1nhwzv+bcTziI0aM2ZM9lDxZSeubX9QDoKsi4uLjzu/tTUuLHE7d+5sW41xDUEXlrdo15H08OH/2TsPuCiOL44/e0GNikYx9hYb1qjR2I0tdo1do/41mmjsiQ1777HEGmvsPXbF3lus2BUFVLABioIg6v3nDc55d3AnB3dw3v3m84HdnZ19M/Pd01vmt++9Z3KeqVNH/Yy8du1amZeMxV+VC5bt9+jRU3r08vmCBQtG6pI9ZVmwNWY30gUOXgHh1cE/AJg+CIAACIBAvBJ4+/atNpd9bAbCYltoWChlNPN5LDZ9Ouq17PXKAmxMCouuKi1GTK4355rQ0DB6Lp75M2Yw/cxvjs3w8HBi0TM65assWejr/J8WAaNjK6o2Uf3bMazjvMe6f0dEZcdW634UnpSPRWQ9VaoVd6Hzd/y1gmtTEUWwReVcwokkiO76Bcl0bt8WyKiaW2TL4i6nR+M1XmOF05b5i/VVdohJliTCYcdY2+jWq/XidMLr1cSftRQuRFH+uzexGGNsiqk1zStegSJS4inq2aggNauY02g3vMbs6x8iPX3ZCQkFBGyJgE0Ir1kzpqUMaZ0sxsX/RTDdf/Kc8mRxptROyS1m19KGlKcr29XN8WrpfmAvZgTeB/vQ+2Av0oQ+IU34K2kkQZJUlCD5l8LDNaf4yR4zww5+FX+xNh93SL4h1aZabvEGWTp6HBgqQ1lcuBMQbQ9RB8eI6YMACIBArAlYWnjlAekKqXzMYqprDle9/K4s0K4+vJI8xFYVS+V1VfY+x23g8+d0+9Zt6VF6964n7d69m7KIhZO4LoMHu9H27dvo++9rUO3atYjzNB09elTWZcqUSYYo5ryvKLEjAOE1dvxwNQiAAAiAAAiAgGMQYMHVkoVF2M+tPHvmT2UqVInWsNu3bU3DhwyKVls00iegQg1XFGF0OfLcgp23KIWIQhgkvEyV4Mop3lCsT2Dlgbsy/PLCPuXp66zwYLU+cfRgDQLxLrxevuMrPdvyZM0gPPWMv8kR3cmHvXlLdx48E7HQ31ORPC7a3I7RvR7tQAAErE/gjm8QjVl9mTx9X+p11qBcNuotQut+ytNT7yIcgAAIgAAIxIiANYRXNRBDAVbVG25dc7pSq8pt9IRZwzaOcqyb25XzwjZr1ixeph4YGCjyuI6VIYR1B8BhmKdOnUIZM1r2jW7dPhxpH8KrI91tzBUEQAAEQAAEQCCmBAYPHvzJS5WYGh2RltNmfG6FPUpPnj4TrWFncXGhPLlzRastGukT4FC/nMZs5E/FZfo3lZJsuUhJllMnJZn+VTiyBgGVJu7Q5NoxiohojTHBJgiYSyDehdeg4FDy8vUnESlVCq+JDfIxmjMhfiP/dVg4saN7TuHtmsaGvV3NmRfagoA9EuCwGE9EvtmHASGUMlliyiUeYlQ+W3ucL+YEAiAAArZGwJrCK8+VPVs9vD3E9rKedyuLrUVyFo3kCWtrfOJ6PD4+PvTkyRPKkSOHTYib/v7+5C3GxCV3rlyUVuSgQrEcAQivlmMJSyAAAiAAAiAAAvZLQIUY5jytsSmcH5bDDSuRNja2cK19Ejh76xn1nX+WMorUaF85p6SLngFUs1QWGtq6mH1O2IZnxevFnPc2czp4GNvwbcLQPkEg3oVXHt+rkDDyDwqW23Ahnsa0JEmUkFKlTEbOaZzkNqZ2cB0IgAAIgAAIgAAI2DsBawuv9s4P8wOB2BCA8BobergWBEAABEAABEAABEAABCxPYOVBEeJ2+01puHLRTNS9fkFCeGHLc4ZFEHAEAjYhvDoCaMwRBEAABEAABEAABGyJAIRXW7obGIujEYDw6mh3HPMFARAAARAAARAAARD4XAhwXtc0KWOfEvFzmS/GCQIgYHkCEF4tzxQWQQAEQAAEQAAEQMDmCUB4tflbhAHaMQEIr3Z8czE1EAABEAABEAABEAABEAABEAABhyYA4dWhbz8mDwIgAAIgAAIg4KgEILw66p3HvG2BAIRXW7gLGAMIgAAIgAAIgAAIgAAIgAAIgAAIWJ4AhFfLM4VFEAABEAABEAABELB5AhBebf4WYYB2TADCqx3fXEwNBEAABEAABEAABEAABEAABEDAoQlAeHXo24/JgwAIgAAIgAAIOCoBCK+Oeucxb1sgAOHVFu4CxgACIAACIAACIAACIAACIAACIAAClicA4dXyTGERBEAABEAABEAABGyeAIRXm79FGKAdE4Dwasc3F1MDARAAARAAARAAARAAARAAARBwaAIQXh369mPyIAACIAACIAACjkoAwquj3nnM2xYIQHi1hbuAMYAACIAACIAACIAACIAACIAACICA5QlAeLU8U1gEARAAARAAARAAAZsnAOHV5m8RBmjHBCC82vHNxdRAAARAAARAAARAAARAAARAAAQcmgCEV4e+/Zg8CIAACIAACICAoxKA8Oqodx7ztgUCEF5t4S5gDCAAAiAAAiAAAiAAAiAAAiAAAiBgeQIQXi3PFBZBAARAAARAAARAwOYJQHi1+VuEAdoxAQivdnxzMTUQAAEQAAEQAAEQAAEQAAEQAAGHJgDh1aFvPyYPAiAAAiAAAiDgqAQgvDrqnce8bYEAhFdbuAsYAwiAAAiAAAiAAAiAAAiAAAiAAAhYngCEV8szhUUQAAEQAAEQAAEQsHkCEF5t/hZhgHZMIOxNGDmnSm7HM8TUQAAEQAAEQAAEQAAEQAAEQAAEQMAxCVhceGWDKCAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiAAAiDgaASyOH+hnXICjSjaI+yAAAiAAAiAAAiAAAjYJQF4vNrlbcWkPhMC8Hj9TG4UhgkCIAACIAACIAACIAACIAACIAACZhKwuMermf2jOQiAAAiAAAiAAAiAQDwQgPAaD9DRJQh8IIAcr/gogAAIgAAIgAAIgAAIgAAIgAAIgIB9EoDwap/3FbMCARAAARAAARAAAZMEILyaxIOTIGBVAhBerYoXxkEABEAABEAABEAABEAABEAABEAg3ghAeI039OgYBEAABEAABEAABOKPAITX+GOPnkEAwis+AyAAAiAAAiAAAiAAAiAAAiAAAiBgnwQgvNrnfcWsQAAEQAAEQAAEQMAkAQivJvHgJAhYlQCEV6vihXEQAAEQAAEQAAEQAAEQAAEQAAEQiDcCEF7jDT06BgEQAAEQAAEQAIH4IwDhNf7Yo2cQgPCKzwAIgAAIgAAIgAAIgAAIgAAIgAAI2CcBCK/2eV8xKxAAARAAARAAARAwSQDCq0k8OAkCViUA4dWqeGEcBEAABEAABEAABEAABEAABEAABOKNAITXeEOPjkEABEAABEAABEAg/ghAeI0/9ugZBCC84jMAAiAAAiAAAiAAAiAAAiAAAiAAAvZJAMKrfd5XzAoEQAAEQAAEQAAETBKA8GoSD06CgFUJQHi1Kl4YBwEQAAEQAAEQAAEQAAEQAAEQAIF4IwDhNd7Qo2MQAAEQAAEQAAEQiD8CEF7jjz16BgEIr/gMgAAIgAAIgAAIgAAIgAAIgAAIgIB9EoDwap/3FbMCARAAARAAARAAAZMEILyaxIOTIGBVAhBerYoXxkEABEAABEAABEAABEAABEAABEAg3gjYhPD6KiSM/IOCibfh797HGEaSRAkpVcpk5JzGSW5jbAgXggAIgAAIgAAIgICdE4Dwauc3GNOzaQIQXm369mBwIAACIAACIAACIAACIAACIAACIBBjAvEuvAYFh5KXrz9pxBSckielxEI8jWl5+/49Bb9+QwmEgZxZnCmNU/KYmsJ1IAACIAACIAACIGDXBCC82vXtxeRsnACEVxu/QRgeCIAACIAACIAACIAACIAACIAACMSQQLwLr5fv+EqxNU/WDJQsSeIYTuPjZW/C39GdB0/prfCcLZLHhRImYBkWBQRAAARAAARAAARAQJeAtYXXK14e5OHtQVe8LpOH2FfFNacrFclZlFxz8NZVVWMLAg5FAMKrQ91uTBYEQAAEQAAEQAAEQAAEQAAEQMCBCMS78Hrx9kPK9mVacv7CyWLYA4JCyOdxIOURXq+p4fVqMa6OZuiq+xG6vGM/3Tt7iQIf+Mnpp8vqQrlKF6OidatT4ZqVHA0J5gsCIAACIGBHBKwlvLLguvrwSj2x1Ri2VlXaUKvKrY2dRj0I2C0BCK92e2sxMRAAARAAARAAARAAAQch8CL4DX3hlNRBZotpggAImEPAJoTX/NkyUkoRZthS5XVYON30eUIuzmkoU/rUljJrcTs3Pb21Nr/Ok0O7j534JfDQ4wZtHTWd7p6+YHIgucuWoAbDetNXrgVMtsNJEAABEAABELBFAtYQXll0HbxsoHa67N3aqnIbPc9W5Qm7+tBKbTsIsFoU2HEQAhBeHeRGY5ogAAIgAAIgAAIgAAJ2R+DfEz40a+t1ehP+nvJnTUMDW7hSvixp7G6emBAIgEDMCdi18JpZiK6ZhfhqS2XLnsPEP8ZKw1qViX9Q4ofA1b1HaGmnP6LdOQeybr9oMhWuYfver0vcb8uw3u2q55Hz837yig5c9KOKRTJR3g8PB4Ztog0ijhryg03Y23fUolKuOOoxohtr9fv+vYYePAshfkMua0YnSpfK+AsoGpEI+6F/MAW8fEOZ0ianTOlSmGTwTtj2Cwih9KmTUcpksQ/jbrIznAQBEPgsCVhaeF19eBUpMTW6QqruNePaT9ATaOMKalhYGPn5+ZGLiwslS5Ys1t2uXbuO3rwJo3bt2sXaFgzYLwEIr/Z7bzEzEAABEACBz5vA69BQ8vb2odDQMMqfPy+lTGH6b+/Pe7YYfVwT2LnbnU6dOUu/9+5JadJY11nIy9ub9u0/RA8e+lLDBnWpRLGiFJf9xzXbuOrv1oMg6vTnceHpmoRyZU5NFz0DpPi6qM93cTUEu+jHnHXL1YfuUeoUiale2Wx2MXdMwjEIQHiNo/tsKLgWyJtTr+cbd7z0jiHA6uGIkwP2dJ1Rtz0Jfcvs0nvHMpv2fPUPCqNGIw/Iee0c8734skpCx648pkFLztPQ1sWoZqksFFUbs0FE4wIWG6/ffyEfULJmMC/EeLvJRyk49C1tGlo1Gj1Zrok1+mUGo1deovtPg7UDLZD9CxrRtjh95ZxSW8c7T56Hktuy83TD54W2vkKRL2l4m+KUPGkibR3vBLwMo0nrr9Dxq0+09Tkzp6LejQpRqXzO2jrsgAAIgIClhdcGI+tKqNEVXdUdsIb4um/fPhoyZCiFhATT6tWrqXDhwqo77fbhw4fk5jaEzp8/p60rXrwEjR07hrJli/kfdD/++CM9e/aMDh06pLWLHRAwJADh1ZAIjkEABEAABEDAcgTcho2i1evWS4N7tv9L+fJGvID+qR42/buVho8eR8HBH/9Ob9ywPo0ePoRSptT/O/1TtnAeBKIiUL12fbrn5UWL5s2mqlWs58TBYmudBk3kZ9k5vTONHDaYfqhdk+Kq/6jmbot1T1+E0lXv5/Rl2hRUSKzJGSt3H72kK17P6fbDF3T2lj89FE4Ubavnpq4/fE095pyW4uv2UdXtNuxwdNdyD11+ROPXXqaQ0Hf0d+/yVCBbZKbmrlsGvnpDDYbvp4bls9PvTQvTm7fvqfqAPcZuFS3oVY4KZk9r9DxOgEBcEYDwGgekdUVXFlxZVDUWWli3LcTXOLg5Ol3MbfbLJ8ML6zTX2+Www7+un6dXZ2sH7ud8KVHCBFS9hIscmqHwypWGbawxh6CQcKo7dB9Vcs1EYzuUNKsLawig0RmApft9+Tqc2k85Rk+FoMr3o+zXGeiGeGNu0zFvEXY9EW0cUpVSCXGcy3vh6vr7grPywa5qscxUUoin7Kl84U4ANa+Uk3o0LKidQkjYW2o57jDxQ0mtb7JQsVzp6Yl4iFx35J586Jny8zdUtkBGbXvsgAAIODYBSwqvSjw1V3RVd0Bdz6GJxwrP15iW58+f04QJE2jnzp1aE6tWraIiRYpoj3nn6dOnVL9+AynMtm3blvLkyUO3bt2SIm3KlE7k7r5HvIEes6gpEF71UOPACAEIr0bAoBoEQAAEQAAEYkngxKnT1LZDZ62V3ds2U/58ebXHxnYuXvKgJi1ak5OTE7Vo1pQKfJ2fduzaQ4ePHKUK5cvRP4sXGLsU9XFA4N69e7R//36ze8qVKyJiWvXq1c2+1hoXnDt/gS55XKHWLZpT8uSxj7ZjbIzrN26mAW7DqG+vHtSta2dKmDChbBpX/Rsbly3Vu5/3lQ4Rakwtq+Si7vUL0Nt376XIesU7kO76vaIzN5+KSHXhqpl2m/1LJ2r8XXZa6n6HXr95R/sn1NKes7edT63lvhBrvTM2X6W95/20U18ghNeCBsJrTNYt1fr18LbF6PsSWYjF8iajDpJrrnSU76vIf7O3qpyLMqdHpALtjcBOvBGA8Gpl9LpC6oDu7fUEV2M5Xrl+4uxlcmQQX618gz6Yv+ouQgx3jn6I4ahG1WGhCDlc03pvq0XVZ2zq1BeX8niNjS1zrv3Ul7UpW5YWQE31pXvO0v2OWHGR9l/woxHtilP14hFCOPe37ogXzdpynX6t9zW1rppbDmHrqfs0WXiw1iublQY0d5V1HHa4y8wT0gP2r+5lqVju9LKe3ywbuuwCtamWm36p+7Ws41/sXdtl+gmqVjwzjWxXQluPHRAAAccmYCnhVTev69bhO2IMVXnMxjTk8KtXr6hevXoUEBBATZs2pcSJk9DatWsoKuF1+fLlNHnyZJo4cRLVqVNbO+Y5c+bQvHnzaOrUqVSjRg1tvTk7EF7NoeW4bSG8Ou69x8xBAARAAASsR4DDBNeu35hCgl9TjepVac36DRQd4TUkJITqNGxK9+8/oMP7dlO2rF/JQWrEH99df+stwrUeoI1rV8pQrdYbPSwbI8CCa0xEV117P//8MykRVrfeXvf/nDWbZs2eR9s2rafChQrY6zRjNS92CmHvSXZq+E94sV7zeS5S36SlK8IDVjccYioR4raMcGLILaLJFcmRTraZs/2GcJ7w0fbfT3hiNhIemfZaTK3lcmTCVuMjnEDqf5tNpLlLQJuP+1BUwmtM1i1nb7tBa0So4fVuVaSgeuuhCPU87biMGKici+yVO+b1eROA8GrF+6dEV0MvVxZW+ZxueGHDNjysSXP+kW0gvlrxJn0wvbrXcDq/eXesOirZuDa1mjHSLBucg5PD/TYQX0yvXr+l3f89lKFnj06tI+08EyGCOb/osauPZb5Ozsdas+RXVEZ4SOoWfrNo/o6bdO72M/L1fy3f+mlULrsMIaza/bHwP0ohwtKO+ilCeItKeDVsw9e+faehTce96dT1J9Lr8tuCGem7wl9Sw2+zU4IEEdZ155FQVO44+0CKgpxgvv33eaV3K7dkYZHno8Lr5smSWr79pATFCGvGf0clgD54FkwLdt6SD0bBoeEijEVaqlsmq+CURWvosMcjWiLeQOvRoCAdFOLkEfHDpYFg1LFmXjp5/ak4f5s4TwOH+21WIaceO9Xv9K5laO6OG3T+jr8MQ8L3o3nFnJTWRG5W7SA+7KiQGPxm1uzu32oZ8mnOb8D5Xjl8sMrLyvfklBjfpmFVKeMXybXmLogcEj1FOBMWaFmo5cL36c9N12hMhxJU2TWztm24eFuvWv898nMx57dvtfXYAQEQcGwClhJelbeqOd6ufI1rDle9nK5uywaSh5cHmWNH9w6y4NqkSRMaNWoUVapUiWbOnEULF/4dpfB64MABkbvLm9jbNUmSiAgDbGvXrt00YEB/GYK4hXgL3VR5+fIlLV++gg4c2C+9ZStUqEC9e/emwYMHRwo1zHnCZs2aSZcuXaKXL19JD9zGjRtR3boR4ZkPHjxIs2fPpg4dOkjxWLffGTNn0tEjR2j69OmUNWtWYq/eGTNm0OnTp+nBgwfE4ZGbN28W6TpdG7z//v17WrNmDW3btp2uXr1COXLkFNcWo759+1K6dOn0mh87doy2bNlCR48eo/Tp0wmuTcWYC9OUKVOoZ8+ekq+6wEO8sc9tT5w4LjyIQ6hixYpCzK5D5cuXV02wjYIAhNcooKAKBEAABEAABGJJYMqfM2nO/L/pr+lT6dr1G3I/OsIr59xs/dP/qGf3X6l3j256o+Bcr/yCX+rUqShZsmTyXKB4Hps8dQYdP3VKirWlSpagNi2bU6MG9fSuxYFlCCjhddy4cTEyyM/n7PFqyuv1/oOH1LV7T2olnqvfhIfTqjXr6ImIklO1ciUaMWQQPfP3p/GTp0kP6GzZsoqwvbVkntZEiSI8SXlgnFN1yp+z6PxFfuZ/SUVdi1Dzpo2pYf2IZ35uM1OIobvd99Ka5UtljlfdfhOIKHUbNv1Ll8XzdeFChei3X7tQrRrmeeoGBARS246die1yyGweq5MIkz3MbSB9W6a01fvnOX4O5blYe6s/bD8Vz5OeZnUrS5vFWuW0jVfl0Hm9LlemVNKbsrAQYvNliexVyQ1/nXVSesZGN7rc/J035frjQOFUsVisQZ689pR+a1iAWlSK8MpmUfLgJT86deMpZf8yFZUX66/NxJqjioinuB689EiM11tGwsvinEI6Y7DzRfrUEf8/qTXQXiLlGK8n8/ovz7dIznTUrV4ByuOin1v4hnDW2CnWcQ8JuymSJaLyhb4kXlPOIRhw+dRaLkfday9Sww1q6UrlCn5JC3bdouX7PKMUXmOybtlh6jFi4VelnTtz8xn1E5EBZ/xahkrmRUo19bnA1vYIQHi14j35X99R0vriacP0elGCKley4KoEWN7v3+0nbVtdz1dDb1ltI+xYhMC48o0o8IFfrGyly+pCg0/8a5YN78evqO2ko5ROCHf8RcVCJYtrE/5XikJFmIrus09pxcAMaZKJL8uIvJ26Xy4sqrUQ4WU5bC17NKZNlYyOii9VPu73o3jjSnxZcmky+iA5JU9My/+oKI+jEl4N23DDKeLBY4t4AMmYNrl4syutzFvAY+1cJ58UVbmNmge34X5ZmH3oH0Jej17xaZrXsxwVFteylyc/APBDAs+5TIEMlD1jKvrp++jlW1ECqPqyfRT4mpqNOST7KJ3fmZImSaTNbTqkdVGqVSriLdVtwmuU857y+LgUz51Ovs3G8+CQx0c8HssQvpyo/fDlx7INP3jxAxgX7vfJ89eULHEieVwib3ohDjyXc+XQv9O6lJZhnOXJT/xSrLr8kJ/qlM5K+y74SiE6d+bU8oEhl3iDTrfwPUki3hZbO7iKbrX8fNQY5C5Z8+eFy2PB40fBg1mM61hKm/91owhhPH3zNerZqKB8aNMzhAMQAAGHJWAp4VUJptH1VFVCLYPX9ZBVnrMxDTf87t07ucCRNm1EPhdTwmtUN/3NmzdCVOwlBcTNmzfL8MNRteM6FjF79epNhw8fEsJkeipdujSdPXuWeGGOC4cNUzlefX39qLZYmOFSrlw5Spo0mbyOj8eOHSdCHteTQm21atXoG2Fn8aJFfEqWcLHoU7FiJSl+7tixg96+fUs//PADPX78mGrVqiX7ZhGZjzmnLQuwxgqHYGbv30yZMlGpUqWE+HpNiM9eUsxl4VQJ0KfEAmKXLl2kGR4vl5MnT0qhlturMXM9h2dmD18uPPbkYjGSRVsu7DkM8VWiiPIXhNcosaASBEAABEAABGJM4I7nXapZtyFVrlSRliyYIwSwCBE2OsLrmnUbafCwEbR+1T9SXD1w6IgU3b4Rgmq5b8tSpi8zasfFz2eVa/xAjx49orp1ahHnz3QXHrF8PHrEUCnAahtjxyIE4kJ4VZ+fzJkzy78pqlSqQDdv3aE7np5UWjw7X7txg1zEuUIFC9DxE6fIP8BfhvFlcZTLQ19fqlgt4pmfQ1OzSL//4CF5burEccS5grkMHDKc1m3YROdOHpEvP+r2y5+h6lWrkLfPfdkvtzfX0/pFUBCNGjuBOHQ255Jl4Tht2i+o409tqUjhQlbvn8f8uZTWE4/Q/SfBxGm9OM/rE7GeyYXXKOuVzUYunwhX23TMQQoSIYj3jq8ZrSmPWnlRhuLl9dCwt++oqEgP9oNYF+T+ZVQ8ER2P04+Vzp+BvMW4eE2V100ndfpGu+a4Swik49Z4EAuu7Azi8zRYCrhsc8PQqpQ0cULSXQN9Kzx6eW3zonDe4DVQtr9paDW5PsyD5jVKjqjHOVm5r3DRnlObsb3F/SoQr0V/ai2XnUheCa/XL1JGvNBsSng1d92SU7X9MGSfWD/9iga3LCo5c5q80asu0aI+35GnXxDd8X0pPWFZhDUUlaN1Y9AIBKxEAMKrlcAqb1dDb1VdMVVXkFUiraHAquwYirJWGrbDmv0je1mLzH2yz2mz7CgRji9iT0R+q0qVGf9eow1HvfXefmLvzk5/HpdNdo6uIb94zwnvy95zz2iTuvPJ5+LLlL/QM6VLoQ1PayiqRkd4ZZF0yNILUpwcLTxlE4q371gQ7jH3tPRoXfp7BfmlpjuPhX3K09dZI5Knrz18j/7aekPmPOjbpLAct6nwFLKBiV+Gwis/cGw7/YBaivj9LKBy8RIPDe2EmM2euZM7fyPr1EMHC9vsZcoepexN3HjkAXle9w0zFkJHrrhE7Wvkpc6188nz3C8/8OiKrBzud8iy81K07d24EDWtkEO2/dSv49ee0MBF56h55Zy0/fR9+XCjew2/IcYPXlyYNYurLKROE962hqWWmzuldUqqJ8ruF/lfJ633kCJxISF2szjtKR5CfqyYg7r+8LVWjDW0hWMQAAHHI2Ap4VWFCNYVUY3R1BVdoxJqzbFlrA9VHx3hlXO9btiwUSyuBEnvTj52c3OTYqiyE9V267ZtNES0K1OmLM2fP48SJUokxdg+ffoQe6+yGKuEVxY1N27cRO3b/6R9y95TLOA0btyY2EuWwxtz6f7bb9KzlYXUDBkiIlsoEZQF4c6dO9GZM2fEtjN16tRJCL+95HWBgYE0cOBAypzZhUaOHCHrDH+xqPzLr79SeuHZOmbMGCEMR7yI1L//ANq9exetWLGCihYtKudQvvx3MvetqmNbN8RCU/PmER7AusIri8DsdbtIiMUsPnPh/FsNGzaU++yVmyIF8utIGAa/ILwaAMEhCIAACIAACMSSwE//60LHTpykg3t2iBfGspslvI6bOIUWLllGPbr9QrPmzNMbCed8Ze9EFa5V5ZD9pUsn6t+3t2zLXoa9fx9AWbK40IQx5kVC0+sMB1ESiEvhle/3lg1rKHeunMQie4u2HYWIeYlqfl+dZs+YJp77E9KDh75UqXotKl6sGG1au0KOecPmLbR2/Ubq3LG91kv19h1PqlWvkfZlAG5oTHjlc1s3rpXiKO8vWvIPjZ04mdq2bkmjhrlxlVnlz5ki1LD4LO/eLnIc5/2Y4ziu+jdrsPHU+OLdALF2ekk6VfAQXISY+TrsnVxT5eOOtfJSnW+yRinAhoW/o+8HuhNH8lsqBMroFCW8stA6tE0x4WQR4S2tcpaymDq/Z3ltVD12oGBHir5NC1Hj8hFrjt3+OkUe9wJp55jvKXWKCKGTvUj3CUcXzk/LTi9qDTRbRifpCJNGCKIsjg7754Jcw+RIiDwGLq0nCPFZiLdze3wrPWK5ToUD5jYqaqI5a7mmhFe2b866JXv//vH3f0J0dZXOK3z9GrHWPFusNfP8VERFruei6yQUUYPfIBB/BCC8Wom9ElJ1xVXuSgmvhkKq8oI1FF75GmO2+ByKZQjEt/Cq+2WmZsRffqHii1x5d6p6Dk2xYv9dWjGgIuUQoSc47G0vIbzym0lDWxXTfkGr9mobE+FVib/r3CqLB42UypQMe8FffP2bF6H64i0wJbyyp6vyvuTGvsLrlb1xeWxTf45YkDX2Zc0J1kPEA45hcUqWWIa64HpD4dWwrTpmdq/EW1FbR1aXVeqho38zMV4R1lkV9ii+fDeQdo+toX3bK+BlGDUccYBqlHShYW2Ky6ZKeF05oJII9+GkLteKt5xTYETbiLYc9pnfEDMsXzglkQ9V64960cx/r8vTLOT2FqE/+C06fuAbvvyCFGLVAw+HcG4+9rDeWHTtcrgNFlVVaGo+t+fcQ/kAwm+y6RYWejvVyqcNYax7DvsgAAKOScDawiuLrFe8LtPY9hMk4E+JrtworoXXq1evUqtWrbQfgGoiBNkvXbtSgQIFtHVR7YwW4uX6detkiF3dXFEs3HIIM13hNarrua5+/QZS8FUCrbu7O/3+++80fMQIaipCJnNR/ezcuVN6pp45c1YKsCyOjh8/LlKIYHmRGb/27dsnQw2PHj1aiqVe4q34Bg0aSC/WYcOG6VniUMeLFy/WeryqudarV5/GjRur13bu3Hk0d+4craCrdxIHkoAlhVdeFAwIfB6JbNKkSSjdBw/wSCdRAQIgAAIgAAJ2RIBDt3br2Zd6dP+F+vToLmdmjsdrl+69ZB5XvvB/HX6izuIneYrktGXbDho5ZjyxGHfi0D4Zbvjk6TPUpn0nqlThO5o2abyMTGJHKG1yKnEpvDaoV5emT4n4+4VhcGjg6SJf6uIFc4m9YFWpXru+9Ir2OHdKVUW55XZBQS/p7IlD8rwx4ZM9Xf+eO0trw0fkG65So478nC1dqP8ygLaRiR1zhVdL929iaDZ3itOO/TzjuAwtPK9HOVp72EuKe+zUwiUqAfau30tqP+UYVS6aica0LxmtOSnhVTmxqIuU08vQ1sX00p5x7tTabnv11gTVOua4jiVlBDxO92ZY1BqorpMJt+H1wjGrLst0ZZy2TK1/1iubVeu0o2z9POME+Tx5RXvGRnjzGlvLVe11t58SXs1Zt1S2dNdj526/SasO3iWORsjhlL9yTkk3H7ygkR9E9NHtS1CVohHCsu64sA8CcU0AwquViCsh1VIer4Z2rDRshzUb36GG24kwFl3q5NfyZxGy1uC98phzjuqWGz4v5OGIdsWpenEXmQi+z/wzUkDkE0VFGN1vRdL374UYqCuWxkR45Ws4dLDhGPjLl8NQcOJ4TiCvhNc21XIT5xXQLRX77ZJfhjN/jfAqNvZlrQRl3Wt5v32NPMLzNIJNVMLrdZGL4LDIg3DbN0jG/A8SeQs4zy0XJUiqh46pIiSwbn7cfn+fpTM3nmnb8TX8dlvNwe56DzZKeD08ubb0+uV2qhh6nfLDibpHqg1vVd+cx3fs6svyFIcjYe9bVZQXsvo8qHyw/DCh+Km2vOW+nVMnp1UDK8lqFZokpwhX/Lu4L3lFHopnL0Jp7REv+cYbewWP7RC9B0LdfrAPAiBgnwSsLbyqEMQcOrhIzqK0+tBKCTIqT1dFOK6FV40IX8DeoEEiJNeJEydoyZKldPeuJ02cOEnkKa2thhVp26JFC7p+/TpdvHhRfC9EvKmsGn37bTm9UMNcf+XKFWKR88aNm/TixQvx81x6ivK5y5cjvhNCQ0OFB20ZGY54/vz5xKGTv/uuAuXPn5/++WcZN6WwsDDq2vUXOn/+nDwuWbKUaPOdCD9ch776KiK8vjwRxS+2v3PnLplnlvPbcq5YnisXzovbqFEj2rt3L/Xr1096/fIcdcvu3bupf//+WuH1+PHj9KvwouX27du3121KvDjG3r+fCn+sd5GDHVhSeD105Bj9r8uvkQjqemFEOokKEAABEAABELATAq/FMw6LW+Fvwunw/l2U8kO0DXOE1z8GDaWNm/+V+TAP792lR2bMhMm0eOk/tGzRfKr4XXn5PPbT/7rS2XMRz2MchrZype+oft0fKFtW089jeoZxEG0CcSm8duv6M/3ep6d2bPP+XkyTpv5Jm9etomJFXbX1jZq1Is+790hXeOXcrLvc94m/E27Qc/HMz7mA7wsBlcvdGx5ya0x47fpzJxrQL8KDWjYUv3IXcKWyIi/r6n8Wq6pob80VXi3df7QHaiMNeZ2O1+um/1KGSgknibcirZspAZZTlrktPU9RrYMam5ISXt3H1dQ6mHDbhbtv0bK9EX+XGa6/8voih/1VjiUqih5fx/Uc7a+yEBl5rVN50Ko10Mk/fyPXh7ktFxaYOYqi8gpVuVL5XFT9cv2mYVVlSjxja7ncxrAosXRB7/JUMJv+mra565a/zDxJ9x6/pN1japLSmE9ef0K3HgZR84q59Dh6CjG8gxDDa32ThYYIxyQUEIhvAhBerXQHlGcrmzf0YlWiLJ9jz1djOV75PLxdmYL1y+pew+n85t2x6qhk49rUaoZ5IWWUYKmENjUAfquq/vD98pDfPNItHOc/4xfJZJgJlYOUBbrjV5/IJOynbz7Vhq/VDcUQE+G1gRgDe04ajoHzyvI46pXJSg1FDllj8+BxR1d45S98D69A3anK/WIi/PI3Ir8BF0PhVb2txef4ISGH8Eb9ImVSmfid6ywpvHKOV/WmF9tWhRlxUQ9BW076SE9YdV5tOd9s1gwp6ZLwbP1tdkRIajU+1Ua9zcbiOYdE5tJi3CEKf6eJ5PmsBOIKRb6k8SKfKxcl+m4eXk3mYZCVH371FOGhOU/DercqMveB7jnsgwAIOCYBSwmvSmCNSlBV5xThqNqoc7HN8arsqG10Qg2rtmrL+Um7detGxYuX0Iqd6pzutlGjxlK0vCTCjiVQfwF+aFCjRg0Zlkx5sm7btl0ImYPl2cKFixB7yHKeJQ7ly0UJr7w/fPgI2rx5k8gBe5ju3LkjQwqPGDGSmjRpzKdlYfGVz7OH7LFjx2VYYD6hvFYjWun/DgkJEbY6i7yuVyhlSif6tty35OzsTEeEHc4Pq4RXzlPLYYy7d+8uBN6uekbWrl0rRNexWuGVQypzuOPBgwdTy5Yt9doeOXKEfhOhk//44w9q166d3jkcRBCwpPDqJYT0zVu3R0LrkikztWzeNFI9KkAABEAABEDAnggsXLyMxk2aQi2b/Shzrqq5rVm/gXbs2kOjhg+RYWPLi1ytxspfcxfQtBmz6Pvq1WjB7Bl6zQ6KfK+dfulOvYUnbU/hUcuFn8f2HzxMO3e70+Gjxyg4OFjWTxo/hn5s3FDu45flCHwOwuvmLduo34CIZ/6irkUoT25+5k9LS5YtlyA+JbwaCr58UVwKr5bu33J3P24s7T3vK8MOc7S4Hg0KajuNSoBlb8rznv4yvyuv840SHq/5RMjhTxVjwuusrddpnfCy5XXNvC76dq6I3LNlxJpoj4Yfx+Qj1mPdxXilACnEVC6cWu1PkaKMwwor4VU5gKhxsVjZadpH4fXolcc0eMl5edpw3Zf7LSLCFncSjjCc59VSwqs565Zq3VM3yqCai7Etr9EmFnluDaNHGmuPehCwJgFD4ZXEm/9xWi7ceqAJfh1m0T5DQt9o2K7fsxcWtWuusYmzl2k69hmp4a1uEUKrrONz6ofbcL1uUdf/u/uQbjX2rUDgyp7Dmt+zlYnVD9swt3g9eqmp0HenRnh7Rrq0/rB9mraTjkSq/1SFeCjQHLjoJ+3WHLxH8+79e3lJ41EH9Owd9Xgk2+z576HWpGGbsasvyTaBL03/GzU1D55fjzmntH28CH4jbQ5eck5bF90d5sFjVKX9lKPSltfjl6pKw9NldvyjytaTPrLd6RtPVZXc9l1wRtbrVoaEvpV1I1dc0FZzvzwPIb5q63jnZUjEXAYu/k+v3tTBo4AQaavz9OORmt1/+ipS338sPCvrHj4L1mt/6voTWT9vxw1tPd9v/vlwy7X1vDNt01XZ/tztZ3r1OAABEHBcAi/D3mss8bPAfYWm6uA6mp4L+kdpj+v5/Mmbl6I8r8ag7PBW1cVmO3byDE2ur4toTp67HMne6InTNNP+WhCpPuBVmLymYvXakc7pjqXXH4Nluzs+fnrt/PyDZH2pcpW09bXqN5F1l294auuCQt9puI1uO7Z/4Nhp2XbFus0at5Hj5P7Dp8+11+mOgfefh4RrNm7bLdsVLlFW8+L12yjbbnc/JNv0GThUXqPscD/MaPnaTfI67ouP6zVpqQkU39eqHffTol0neW7Vhq2y/sbd+/K4/9BR2naq/az5S+Q5no+qw1b/39vDZ88d9z8fzBwEQAAEQAAELEhg+KiIZyZ+hjH2U6RkWZM9btm+Q147aer0SO3WbYx4XlqzbkOkc1zx9u1bjRB45fXcj4haEmU7VMacgIgcoxk0aFCMDfC1bMNUEflY5T2cPG2GXrO5CxbJ+ouXLuvVN/yxpUb3c1WnQcQz/x3Pu9p278XizDflKssfVTnAbZi0FxAQIKuM9csn+fPcsl1HdalZ22kz/pLX37x9W++6uOpfr9PP4OB12FtNpd93ajr9eSzK0Ya/fadZsd9TU3foXrm2xmuEaq2w9YTorQfzOiNfx+uOumXX2QeyXkTz062O1r6vf7CG1yTZ7uYT3vIaY2ugIhyvbLd0b8Rn4oFYZ+TrZv577ZN9mbOWy2vcbPeaT+S/d8xZt/xPrF+ynX8/zEsNku/D3O03Iq19irRvsn3XGSdUU2xBIF4JGP7ND+HVwrdDiacssBoKq3ysfnS75Tp1HW9R4obAnB+7xlh45WtjUkwJlkOXnZdfGMeuPtYzvfGYl2bYP+c1LNJx2XfeV8MC6TXvQL12rcYflte/EQ8HXAxF1egIr5uPe0sbs7dd17Mt8srKMbANLqbmwV+SusIrP8xwHY/P3GIovKovbLapinjjS9q3hvA6Ya3+g/ZfW6/LvtRDixrDp7a//x0hpp69+VEIZrF08oYr0t7WUz5aE7v/i3gAG73qoraOxXQlOl/Vue/8uWC2By/5advyztMXoVKI5nO6rPQa4QAEQMDhCFhKCGNBlYVV/jFm81OiK1+nbESnrbF+dOtNCa9de/SVCxGnznvojXnr7v2yfsDQ0Xr1unZ5f/6SFbJd34HD9NoNGzNR1usKqiyI8s+T58Hatrv2H4nUju2ycMrXtu7ws9zyOHX7XvfvTk3v/m6aE//pi8lVataT9lg41m2v9peuWi/Pz5i7UHv+WdBrTdNW7WW9El65faduvWRd9z79NUdPn9ccP3tRo4RmXvxRwiuLxzwvrtMVlb39nsmxc70p0ViNzVG3hn+EOdx/QJgwCIAACIAACFiIwPUbNzXu+w5E+vm5W0/5nLJo6T+ag4ePanvz9w+QQqnwUtXWiRycUkRjkczP76P48erVK029xs2lHSWobd2+U/P7wCEaQyGuWq2I5zGRxkJrFzuWIfA5CK8swvJPcEiIdtL8ueNnYv5cqRJXwqc1hFeRukSza89evX8jXl7emr37D8oXEHiOb8Ua5IGDhzUsKH9uZcyqCOeT/24Zd1jYf9FXrrux8MfFbek5eazrEGJs3saEV0/fIGnj11knNaFvPq5v8lre8OUXNP/suyNN8noer//+veum1smGTxy6HOF8I/KeynbRFV55HVKtq+o6e7BDz5/CeUN46IoXSUQjUbjv6K7lmhJezVm3XOJ+W/bJfHTLzC3XZL3hevk2sZbKY2Q+KCBgCwQM/+ZPwIOypoutoe2Ltx9S/mwZKWXypIanYnz8Oiycbvo8EeE0U1Nm5zQxtmOpC3VDC5vK1crhibfsOWwy9LClxgQ7kQk89LhB0+vq5yiL3CpyDacx77VjGX3lWiDyyU/UmArR6xcQQh2mHpNhg+uU/opyi3ATHvcCifMIcAiJBb3KU6KECbShazm2f5MKOShzuhR04hqHHX5EdUUo4IEtInJQxCTUMIcwVvljyxXKSMVzp6fHIufrpmPe4t9sIlrWr6IMW2tqHoahhhlJX5GT9uwtf+Kco/zDYXijUwxDDY9bc5l2nX0oc8h+V+hL8g14LcfGtnRzIBgLs2Fujle2W6ZABnLNmU4mbj925Qlly+hEf4u8BU7JE/PpaBWVd4Abt6qaS9rge8b28ojwJLO6laXUKZJIW/w/cv9F/9Gp60/pu8JfUmkRYoRDoFwVYT9aV81Nv9b7mFP3omcA9ZgTEcaYcxrk+yoNBQS9EYweyJDR3Fe3euZ/TqM1KTQCARD47AhYKtQwT1yFFG5VpQ21qtzabBarD6+SOWBjen1UHZoKNfzff//R//73P3lZ27ZtKUeOHCK0ryetXbtG1q1Zs4YKFSoUlVlZx/lSOa8p53ktX/47KlGiuMjjelWEAD4kz6dPn55UqOGhQ4fSli1b6JvSpalK5cr08OFDWr16daR2skL8mj17NnGOVy4zZ82S18gD8eucyCXWsWNHYvsc3jeLyOvK4YI57HCjxo1p1MioUx54e/tQ/fr1ZJjhH0UYvvTp0tGePXvk+Nm2CjXM+xwqj+d269YtPpSFwxO3bt2aFi78WxtqmE+o0Mw8nrp161KSJEnkWB48eCDCKw+hFi2aRxjA70gELBlqOJJxVIAACIAACIAACNCkadNp3oJFtHvbZsqfL6+WSJfuvWjf/gP0a5fO9EffXtr6lWvW0dARoylz5szUtlULSpEiOW36dxtdvXaNmjRqSJPHj5YpJs6cPUct23Ug5/TO1K5NS8r6VRY6IMIR79y9h5o1bUwTx47S2sSOZQh8DqGGVZ5gzsn6fdUqdP/BQ/pn5SoJgD8rZ08ckvvGcrxaOtSvuTleo9P/shWraOSY8cShlP9dH/H3TPkqNejRo0c0Y+okkee4DqnQ3E5OTnTxzHFKlCiRnPfn8MtYuGE1dg47PP3f67TlhA+VzOtMjcpnp0V7bon0a8G0bVR1SutkWtswFmqY7c/bcZNWHrgr1wQ5lLHQO2U/nP5tfMeSVKFIJjkMlUaM047xOujL129JOIPItePVgyqJFGdO0Q41zAZ5LXLAonNyDbVGqSyUPlUyOnDJT+aDbSHCLv+mE3Y5umu5pnK8mrNuyXMVXrq0a3QNSijWv1W5K3K5the5XLnwumiuzKlIeNfS5uM+ch7zepajLM4pVXNsQSDeCBiGGobwaqVbwYIq/+gWzu/6dZ4cxIIrF+HpKrf8y5RAq22EHYsTuLr3CC3r9AeZ8/ZBh0WTqXCNSjEaC8flbzPxCLWvkYc6i7j5hoW/YBbtuU0nrz3VnmIRtnv9AvSFzhf6YQ/xkCO+/J8KUZQLi6INvs1OXerkpyQitj0XFl451v/SfhXk8TGRE3bQ4nM0tE0xqlkyS5RtuJLj+AvPTin88Rc+lxJ501NPkV8gb5aIFxtMzYOF15IiMf0MkaBelXO3/WmsEE15vLr5TNV5Y1sWonk8KlY/50Rl8ZXFaC48775NCsscr2xb5V3dfvo+TVx3hQzzG0QlvIa+eUc1BrlTTfHAMbR1RDJ21a9by6LacXN/LISykJld5JY1t/BDwcgVF8nX/7X2UhZ1h4k+de8tn3wi5uK27Dzd8HmhbcsPWcPbFKfkSfUfYlnUnbrxqhTpVeMszimEwJubGol8vCggAAIgoAhYUnhV+VnZtrniqRJd+dqtw3fwxiLlr7/+ogULFkiRs3DhwpFsenhcoTFjRmvFR27AuV2HDh1C+fLli9TesMLX148mTZ5EB/ZH5Ppm8ZEFzJkzZ9KLFy9o79698hLhKUFDhPiq2rGI6ebmJnK8Lpf5VZVAq+xzbtcmTZrIQxaIkybV/wN+3779NHHiBHktN2J7LKb27NEjUltlk7cHDhyQ4xNhzWR1hQoVqGbNmjRs2LBI+WFFWDS6ffs2XbhwgVKlSkXclvO/9uvXj8aNG0f16tXTmmbRd8WKlXTx4gVZlz9/fmratCm1atVK2wY7kQlAeI3MBDUgAAIgAAIgYEkCImQszV2wkPZs/5fy5c2jNT1z9jyaPms2TZs0nho1+PhMww02bN5C/QcN0bZl8Yhztg4Z1F9PQNrtvo9GjZsoBSduzO1aNf+Rfu/T0+TzmNYwdswicO/ePfr7778pd+7cZl2nGt+9e5eqV68uf1Sd4fbuPS/6vk59+u3XrtS312/a0/MXLqGJU6bRlg1ryLXIx78pmrRoS7fFc7vHuVOy7cuXr+gP8dlxF8/qXPgzMWqYGy1etkJ8Th5rhdfBQ0cS5x8+d+oopRM5YI31yzY4x2s5kZt45dKFfGhWmT5rDs2cPTfS5z82/R89foLad+pKrVs2pzEjhsrx/PJbbznnzetWUbGirnTr9h1q2rKtFGdjMm6zJmnhxrweWMvNnVzSpxR/0+aiYsL5JGemVBRVnlfdrtvXyCvWdD/99+PoVZfI/Zwv7R1fM9JanvArpSXud2j/BT+6/zQiZzSv5f0q1mxvXwEAAEAASURBVBxZiFXlhVgTnbH5qnDI8FNV0jmnX9PCVCh7WllnbA1U5Xj9WawV//T9x/8Tuc/1R72kgwcb4LVVFjTbVssjHX5UR9Fdy124+xYt2+spnVQKZPtCXa7dRmfdkh2Bqg/YI511xnYoqb1W7fBcRiy/qGXF9Zwjd7hY42bxGQUEbIGAXQuvLsLbNZPwerWlEpUAqzs+CK66NOJnnz1ft46aTndPRywgGhtF7rIlqMGw3jHydDVm01h9uHir6rkQPdOnTqb3pWfYnoXIsPB3sp3hudges+fls6BQ6YlpKPbF1LaIvy/fGE2c6OObSzGxxXNmQfZTfGJi2/Aa5hD4KoxSJksc6UHJsO2njvnByk946fK9zSM8mk1x5bYPnobQ8+A39GXa5NKz2ZR9Ed2FHgW+lm97meONa8omzoEACNgXAUsKr0wmJuKrruhqrmBrqbsRFhYmF0MyZ85EyZIlM9ssXx8UFEQZMmSQ32nGDHA7FmSdnZ31Fu4M2ytvXPbE7d+/v+Fp7TELuux5y/2aU549eybnmTp11M/Ia9euFW8UJ5Tir+4b6j169JQevXy+YMGCkbpkT1kWbI3ZjXSBg1dAeHXwDwCmDwIgAAIgEK8ERF5WSpw4cZRjCA8PJ8+79+Q5Fmx1n4cML2CxLTQslDKa+TxmaAfHnybAXq8swMaksOiaK1eumFxq9jWhoWH0XDzzZ8xg+pnfHMP8mWTRMzrlqyxZ6Ov8nxYBo2MrqjZR/dsxrBN5jk3+u4nKrq3U/Tj2ED0W63SqVCvuQiLdmly34zr2Am1aISex8HfXL4gKCrHz2wIZVXOLbHmNMJFYJ1WR8KIyKsIEk//LMOlgkyyJvkNGVO2jU6fWlNMJr9cEJpZpLbWWa2rd8opXIP066xT1bFSQmlXMaXT4/kFhYl01hHIIgdwUL6MGcAIErEjAJoTXrBnTUoa0lnsbwf9FMN1/8ly45ztTaqfkVsQXO9PK05WtsOcrim0RuOp+hC7vEA92Zy9R4IOIN4nSZXWhXKWLUdG61alwzZh5udrWLDEaEAABEAABRyVgaeGVOeoKqXzMYqprDlcqkjMi7D3XsUC7+vBK8hBbVca1n6DXRtU70jbw+XO6feu29Ci9e9eTdu/eTVnEwklcl8GD3Wj79m30/fc1qHbtWiTy+tDRo0dlXaZMmWSIYhZmUWJHAMJr7PjhahAAARAAARAAAccgwIKrJQuLsJ9befbMn8pUqBKtYbdv25qGDxkUrbZopE9AhRquKFKiFc2VjhbsvEUpRJQ5dvRQgqtL+hT6F+HIKgQ47DKHX17Ypzx9nTWy16xVOoVRELAwgXgXXi/f8ZUefHmyZhDeXkliPb2wN2/pzoNnIhb6eyqSx4USmnpFI9a9wQAIgAAIgAAIgAAIfJ4ErCG8KhKGAqyqN9y6CkG2VeU2Di+6Mhfd3K6cF7ZZs2aGuOLkODAwUORxHStztep2yGGYp06dQhkzWvaNbt0+HGkfwqsj3W3MFQRAAARAAARAIKYEBg8e/MlLlZgaHZGW02Z8boU9Sk+ePhOtYWdxcaE8uePGuzdaA/qMGnGo38Ui3dvIn4pTtWIu1G7yUfJ69IrWuVUR4YchuMblrfxj4X8y5d2hybVNRn6MyzGhLxAwl0C8C69BwaHk5esvc2qy8Jr4Qz5KcyfC7fmN/Ndh4cTe8DmFt2saG/Z2jcn8cA0IgAAIgAAIgAAIWIqANYVXHiN7tnp4e4jtZT3vVhZbi+QsGskT1lLz+lzt+Pj40JMnTyhHjhw2IW76+/uTtxgTl9wiNFtakYMKxXIEILxajiUsgQAIgAAIgAAI2C8BFWKY87TGpnB+WA43rETa2NjCtfZJ4OytZ9R3/lnKKNJ7feWcki56BlDNUlloaOti9jlhG57Vk+ehxCnXMqeD4G3DtwlD+wSBeBdeeXyvQsLIPyhYbjmXZUxLkkQJKVXKZOScxkluY2oH14EACIAACIAACICAvROwtvBq7/wwPxCIDQEIr7Ghh2tBAARAAARAAARAAARAwPIEVh4UIW6335SGKxfNRN3rF4S3q+UxwyIIOAQBmxBeHYI0JgkCIAACIAACIAACNkQAwqsN3QwMxeEIQHh1uFuOCYMACIAACIAACIAACHwmBDiva5qUsU+J+JlMF8MEARCwAgEIr1aACpMgAAIgAAIgAAIgYOsEILza+h3C+OyZAIRXe767mBsIgAAIgAAIgAAIgAAIgAAIgIAjE4Dw6sh3H3MHARAAARAAARBwWAIQXh321mPiNkAAwqsN3AQMAQRAAARAAARAAARAAARAAARAAASsQADCqxWgwiQIgAAIgAAIgAAI2DoBCK+2focwPnsmAOHVnu8u5gYCIAACIAACIAACIAACIAACIODIBCC8OvLdx9xBAARAAARAAAQclgCEV4e99Zi4DRCA8GoDNwFDAAEQAAEQAAEQAAEQAAEQAAEQAAErEIDwagWoMAkCIAACIAACIAACtk4Awqut3yGMz54JQHi157uLuYEACIAACIAACIAACIAACIAACDgyAQivjnz3MXcQAAEQAAEQAAGHJQDh1WFvPSZuAwQgvNrATcAQQAAEQAAEQAAEQAAEQAAEQAAEQMAKBCC8WgEqTIIACIAACIAACICArROA8Grrdwjjs2cCEF7t+e5ibiAAAiAAAiAAAiAAAiAAAiAAAo5MAMKrI999zB0EQAAEQAAEQMBhCUB4ddhbj4nbAAEIrzZwEzAEEAABEAABEAABEAABEAABEAABELACAQivVoAKkyAAAiAAAiAAAiBg6wQgvNr6HcL47JkAhFd7vruYGwiAAAiAAAiAAAiAAAiAAAiAgCMTgPDqyHcfcwcBEAABEAABEHBYAhBeHfbWY+I2QADCqw3cBAwBBEAABEAABEAABEAABEAABEAABKxAAMKrFaDCJAiAAAiAAAiAAAjYOgEIr7Z+hzA+eyYQ9iaMnFMlt+cpYm4gAAIgAAIgAAIgAAIgAAIgAAIg4JAELC68skEUEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEHA0Almcv9BOOYFGFO0RdkAABEAABEAABEAABOySADxe7fK2YlKfCQF4vH4mNwrDBAEQAAEQAAEQAAEQAAEQAAEQAAEzCVjc49XM/tEcBEAABEAABEAABEAgHghAeI0H6OgSBD4QQI5XfBRAAARAAARAAARAAARAAARAAARAwD4JQHi1z/uKWYEACIAACIAACICASQIQXk3iwUkQsCoBCK9WxQvjIAACIAACIAACIAACIAACIAACIBBvBCC8xht6dAwCIAACIAACIAAC8UcAwmv8sUfPIADhFZ8BEAABEAABEAABEAABEAABEAABELBPAhBe7fO+YlYgAAIgAAIgAAIgYJIAhFeTeHASBKxKAMKrVfHCOAiAAAiAAAiAAAiAAAiAAAiAAAjEGwEIr/GGHh2DAAiAAAiAAAiAQPwRgPAaf+zRMwhAeMVnAARAAARAAARAAARAAARAAARAAATskwCEV/u8r5gVCIAACIAACIAACJgkAOHVJB6cBAGrEoDwalW8MA4CIAACIAACIAACIAACIAACIAAC8UYAwmu8oUfHIAACIAACIAACIBB/BCC8xh979AwCEF7xGQABEAABEAABEAABEAABEAABEAAB+yQA4dU+7ytmBQIgAAIgAAIgAAImCUB4NYkHJ0HAqgQgvFoVL4yDAAiAAAiAAAiAAAiAAAiAAAiAQLwRgPAab+jRMQiAAAiAAAiAAAjEHwEIr/HHHj2DAIRXfAZAAARAAARAAARAAARAAARAAARAwD4JQHi1z/uKWYEACIAACIAACICASQIQXk3iwUkQsCoBCK9WxQvjIAACIAACIAACIAACIAACIAACIBBvBGxCeH0VEkb+QcHE2/B372MMI0mihJQqZTJyTuMktzE2hAtBAARAAARAAARAwM4JQHi18xuM6dk0AQivNn17MDgQAAEQAAEQAAEQAAEQAAEQAAEQiDGBeBdeg4JDycvXnzRiCk7Jk1JiIZ7GtLx9/56CX7+hBMJAzizOlMYpeUxN4ToQAAEQAAEQAAEQsGsCEF7t+vZicjZOAMKrjd8gDA8EQAAEQAAEQAAEQAAEQAAEQAAEYkgg3oXXy3d8pdiaJ2sGSpYkcQyn8fGyN+Hv6M6Dp/RWeM4WyeNCCROwDIsCAiAAAiAAAiAAAiCgS8DawusVLw/y8PagK16XyUPsq+Ka05WK5CxKrjl466qqsQUBhyIA4dWhbjcmCwIgAAIgAAIgAAIgAAIgAAIg4EAE4l14vXj7IWX7Mi05f+FkMewBQSHk8ziQ8giv19TwerUYV0cz9NbvGL31PUTv/C+TJuSRnH6ClJkpkXNRSpylCiV2qeBoSDBfEAABEAABOyJgLeGVBdfVh1fqia3GsLWq0oZaVW5t7DTqQcBuCUB4tdtbi4mBAAiAAAiAAAiAAAg4CIEXwW/oC6ekDjJbTBMEQMAcAjYhvObPlpFSijDDliqvw8Lpps8TcnFOQ5nSp7aUWYvbuenprbX5dZ4c2n3sxC+B989vUZjHLCG4XjI5kETOxSiZaw9KmDa/yXY4CQIgAAIgAAK2SMAawiuLroOXDdROl71bW1Vuo+fZqjxhVx9aqW0HAVaLAjsOQgDCq4PcaEwTBEAABEAABEAABEDA7gj8e8KHZm29Tm/C31P+rGloYAtXypcljd3NExMCARCIOQG7Fl4zC9E1sxBfbals2XOY+MdYaVirMvEPSvwQYC/X0NODzeo8edlx8H41i5jtNH7z9j3de/RS5JdOTFnSp6SECU2HJn/yPJQeB74m5zTJyEW0RyRz27mXGAkIgID5BCwtvK4+vIqUmBpdIVX3mnHtJ+gJtObPKGZXhIWFkZ+fH7m4uFCyZMliZkTnqrVr19GbN2HUrl07nVrsgoA+AQiv+jxwBAIgAAIgAAIgAAKOQGDnbnc6deYs/d67J6VJY11nIS9vb9q3/xA9eOhLDRvUpRLFilJc9m+v9/PWgyDq9Odx4emahHJlTk0XPQOk+Lqoz3f2OmWrzOvdew35BYRQ+tTJKGUy0+knVx+6R6lTJKZ6ZbNZZSwwCgLWIADh1RpUo7BpKLgWyJtTr9WNO156xxBg9XDEyQF7uoYc6hyjvlJWWQjP1xiRi/lFj4QA6h8URnlcUlPypInMMhQc+pbGrblMRzwea69LlyopDWtTjL7Jn0Fbp3aev3pDo1dfojM3nqkqiqp9u8lHyevRK22bqHaOTq0TVTXqQAAEQCDOCVhaeG0wsq6cQ3RFVzVha4iv+/btoyFDhlJISDCtXr2aChcurLrTbh8+fEhubkPo/Plz2rrixUvQ2LFjKFu2mP9B9+OPP9KzZ8/o0KFDWrvYAQFDAhBeDYngGARAAARAAAQsR8Bt2ChavW69NLhn+7+UL28es4y/efOG6jVuTnc8PcnJyYk8zp0y63o0BgFjBKrXrk/3vLxo0bzZVLVKJWPNYl3PYmudBk0oODiYnNM708hhg+mH2jUprvqP9QTiyMDTF6F01fs5fZk2BRXK/oXRXu8Kp40rXs/p9sMXdPaWPz18FkJtq+emrj98TT3mnJbi6/ZR1e027PB7IZJev/9Cis1ZMxhPF3no8iMav/YyhYS+o797l6cC2SIzDXgZRpPWX6HjV59oeefMnIp6NypEpfI5a+vUTqBYk20wfD81LJ+dfm8a+e/6fRd8aeSKiKiVfzQrQg2+jfnf8qpPbEHAEgQgvFqC4ids6IquLLiyqGostLBuW4ivnwBr4dOvj/b4ZHhhY11y2OEUFWcZO416KxCYve0GrRFvPC3p9x3lNSOcx+uwd9Rn/hn5YFWuUEaq7JqZ+EFr9aG78sHgz66l9cRXjYZo8NJzdOzKEyqRNz1VK+5CL0PCacUBT9l+9m/fUtFc6eQMl+/3pGdCDDYs/IDCYUhSJk9Ee8bWNDyNYxAAARCIFwKWFF6VeGqu6Komrq7n0MRjhedrTMvz589pwoQJtHPnTq2JVatWUZEiRbTHvPP06VOqX7+BFGbbtm1LefLkoVu3bkmRNmVKJ3J33yPeQI9Z1BQIr3qocWCEAIRXI2BQDQIgAAIgAAKxJHDi1Glq2+HjS/W7t22m/PnymmV1xl9zacZfc+Q1EF7NQme1xvfu3aP9+/ebbT9XrlzymurVq5t9rTUuOHf+Al3yuEKtWzSn5MljH23H2BjXb9xMA9yGUd9ePahb184iwltC2TSu+jc2Lluqdz/vS6NXfkwz17JKLupevwC9ffdeiqxXvAPprt8rOnPzKb0IDo809OxfOlHj77LTUvc79PrNO9o/oVakNvZSESTWQesO3UeVXDPR2A4lI03rhTg/Y/NV2nveT3tugRBeCxoIryFhb6nluMPEYmqtb7JQsVzp6YlYk1135J5cY53y8zdUtkBGrQ3eOXblMQ1acp6Gty1G35fIoneO7bQcf0heyyf6/ViYGpXLrtcGByAQXwQgvFqZvK6QOqB7ez3B1ViOV66fOHuZHBnEVyvfoA/mYxJi2HBkCDlsSMS6xzEVXg97PKIhSy/IL/jBLYtSwg/xgu/6vaT2U44Rv2W1tF8FSvQh7PDWU/dpsngTq5F4s6qfzptVN8SbXj9PP0HfFsxIkzt/Y3Ky/MbX0GUX6H+18lLHmvlMtsVJEAABEIgrApYSXnXzum4dviPGw1ceszENOfzq1SuqV68eBQQEUNOmTSlx4iS0du0aikp4Xb58OU2ePJkmTpxEderU1o55zpw5NG/ePJo6dSrVqFFDW2/ODoRXc2g5blsIr4577zFzEAABEAAB6xF4HRpKtes3ppDg11SjelVas34DmSu83rp9R9qoVOE7ev7iBXnevQePV+vdsmhZZsE1JqKrrvGff/6ZlAirW2+v+3/Omk2zZs+jbZvWU+FCBex1mrGaFwuJnIKseaWc9J/wYr3m81ykvklLV4QHLAknDFVSiRC3ZYQYmFusFxbJkU62mbP9Bm065qOayPVCXje012JKeOWogq3GR4ip9YW3aeJECWjzcR+KSnhV66NtquWmX+p+rcXF3rRdxBprteKZaWS7Etp63lHrv+vdqlDm9Cn0zo1aeVGKvex9vGL/XQivenRwEN8EILxa8Q4o0dXQy5WFVT6nG17YsA0Pa9Kcf2QbiK9WvEkfTIeeG0Nv77vHqqPE2WpS8lJDzLbBbwXN33GTzt1+Rr7+r8lVeE/y2zk1S318i2fyhivk6fuS5vUsp2d/03FvYmFwfMeSMucon+QvvMV7btOZW89k2FtO8l6+0JfUoUZeKSbym0ILxfl21fJQ9RIuevb4y+ysuG7C/0pR5nQRX2b8pXjwkh+duvGUsn+ZisoLobFZxZyUKkUSvWs/dfDgWTAt2HlLPsAEh4aLcBNpqW6ZrFSz5Md5zt95k05ef0qjxJfsoj23ZJ/JEieS4/+13tcyZAe/HdXtr1MyDwCHrsjinIJSiFwAA5u7RhnCwnBc0zdfo43HvKN8ABiy7DwdvvyYFvQqRwWzp5WXdp99iu74BtE6t6r0RUr9Ob8IfkOc6NWwXrdPzlnQbtJR8n8ZShuGVBU5CfRt6LbFPgiAAAjEJQFLCa/KW9Ucb1e+xjWHq15OV7dlA8nDy4PMsaPLiwXXJk2a0KhRo6hSpUo0c+YsWrjw7yiF1wMHDpC3yHnE3q5Jknz8f3nXrt00YEB/GYK4hXgL3VR5+fIlLV++gg4c2C+9ZStUqEC9e/emwYMHRwo17O3tQ7NmzaRLly7Ry5evpAdu48aNqG7durKLgwcP0uzZs6lDhw5SPNbtd8bMmXT0yBGaPn06Zc2aldird8aMGXT69Gl68OABcXjk5s2bRbpO1wbvv3//ntasWUPbtm2nq1evUI4cOcW1xahv376ULl1E5AZ1zbFjx2jLli109OgxSp8+neDaVIy5ME2ZMoV69uwp+aq2HuKNfW574sRx4UEcQhUrVhRidh0qX768aoJtFAQgvEYBBVUgAAIgAAIgEEsCU/6cSXPm/01/TZ9K167fkPvmCK/8vNSibQdir8DD+3ZTjz6/Rym8BornsclTZ9DxU6fo/v0HVKpkCWrTsjk1alAvljPA5VERUMLruHHjojr9yTp+PmePV1Ner/cfPKSu3XtSK/Fc/SY8nFatWUdPRJScqpUr0Yghg+iZvz+NnzyNDh85KtKSZBVhe2vJPK2JEkV4kvIgOKfqlD9n0fmL/Mz/koq6FqHmTRtTw/oRz/zcZqYQQ3e776U1y5fKHK+6/SYQDgAbNv1Ll8XzdeFChei3X7tQrRrmeeoGBARS246die1ymGEeq1PKlDTMbSB9W6a01fvnOX4O5blYz6s/bD8Vz5OeZnUrS5tFlLppG6/KofO6bK5MqSjfV2mosBBi8xmJsvfrrJPSMzYqL82oGKg1T16/XOx+m05ee0q/NSxALSpFeGVHd/314KVHYrzedOFOgFwTLZY7vRQxOVcqF3Y4WSK8cHuJ0L3Hrj6WHqM83yI501G3egVkyjbd8bFjyc6zD+iQsJsiWcQaLK9L5xAMuKw74iWj+N1/GiyP82RJLT1ZB4h5cGGv0/Yi9dqglq5UruCXtGDXLVq+zzPKdVdex/5z0zUa06GEjEAoDYhf4cLLuFr/PXJNfI6ILKhbOkw9Riz8bhpaVbdaeCI/o34LzlKTCjnoO7HuzfvweNVDhIN4JgDh1Yo34H99R0nri6cN0+tFCapcyYKrEmB5v3+3n7RtdT1fDb1ltY2wYxECwe7NSRPyKFa2EqTMTE4115llg79YWogQC0+fh8q3etKmSkZHhTDKx7pfFr3mnaHzt/3JMD+o+jJb3r8i5RRfiBzStr34QuI8owVEboIs4k0gFkxZoGSRc2ALV+JY+g1HHKDS+Z1pWtcy2vGGhb+j7we6U7aMTrRqYCVZv/+CH41YcVGGyC0tcp96PwmWtssUyECTOn2j9QrVGjGyw/lYm405JM9yv0mTJNLG8h/SuijVKvWVPKfeVGIxNZEIg5JLvE2m8rAqz9JQEb5jysYrdM37BfGXPtdzQnsWktVDgZFh6PWxckAlISTr5yWo2G9XRJufSlDVYpnlfi03dyosRNjxQozeK8KQ3BYCOOd3LZnXmQrnSPtJBnvOPaQxqy5Tlx/yU7vq5uWVMTUPnAMBEACB2BKwlPCqBNPoeqoqoZbHr+shqzxnYxpu+N27d3KBI23aiBdnTAmvUbHjXF49e/aSAuLmzZtl+OGo2nEdL8r16tWbDh8+JITJ9FS6dGk6e/YshYZGhJvnsGEqx6uvrx/VFgszXMqVK0dJkyaT1/Hx2LHjRMjjelKorVatGn0j7CxetIhPyRIuFn0qVqwkxc8dO3bQ27dv6YcffqDHjx9TrVq1ZN8sIvMx57RlAdZY4RDM7P2bKVMmKlWqlBBfrwnx2UuKuSycKgH6lFhA7NKlizTD4+Vy8uRJKdRyezVmrufwzOzhy4XHnjxZMmLRlgt7DkN8lSii/AXhNUosqAQBEAABEACBGBO443mXatZtSJUrVaQlC+YIASxChDVHeF0vRK8Bg4fSgN/7UtfOHalRs1aRhFd+Pqtc4wd69OgR1a1TS+bPdN9/QB6PHjFUCrAxngQujJJAXAiv6vOTOXNm+TdFlUoV6OatOzLPb2nx7Hztxg1yEecKFSxAx0+cIv8AfxnGl8VRLg99falitYhn/grly1Ey8Vy8/+AheW7qxHHUuGF9uT9wyHBat2ETnTt5RL78qNsvf6aqV61C3j73Zb98wca1K6lEsaLy2uj8ehEURKPGTqCLlzxkLlkWjtOm/YI6/tSWihQuRNbuPzpjtJU2rSceoftinZPX/zjP6xOxFsultfDGZOHRxcC70nDcTcccpCARgnjv+JqGp6I8VmuevKYY9vadSFuWnn4onVX2H931111CIB23xkMKrhWLZCIfsS7KAi7b3CCEyaSJE9I24aDDOVQzpk0u/n58L8Xli54BUiDlFGibhlYjp+SJ5Ri9H7+iLjNPyHVjXusNF+1Z0GV7i0U0wAxpxOdYrA2zmMuCL9dzu+wZU9FP30escbLDySvhBKScUtRadVQer4/F+vCPYn2Y14bHdSxFyZMmkuNgBxl2lOnZqKB09lEAX74Opx+G7KM6pb8ijlqoCq9h/yTEXhaU2cnlus8LCK8KDrY2QwDCq5VuhfJ2NfRW1RVTdQVZJdIaCqzKjqEoa6VhO6zZV/9GCI2xBZCq0RGzTJy740+9557RJmTni5+LN4X4yziT8DhVbw9FV3hlz8xp4s0hFgU7184nx/L2nYaajDogv2APTqotQz70X/Sf/GLWTfx+8voT6r/wHHUT+QxaVckl8542GXVQfpnP71me0oovVy7KY7Rv00LUuHwOWfepX/xgsO30A2pZOZfMB8DtvcSXO3uCKkGV69RDSI2SLjSkdTEZBlg3ZMXm4dXklz63VaEmzM3xym+FTdt4jXo0LCjDibAtLizo1hjkLvc53AWHvVChNHg8N+4HSaFXNvjwi7/4WcxW4Yp1z/E+P7C0GH9YeCHzm1kfH2wM2+EYBEAABOKDgKWEVxUiWFdENTYfXdE1KqHWHFvG+lD10RFeOdfrhg0bxeJKkPTu5GM3Nzcphio7UW23bttGQ0S7MmXK0vz58yhRokRSjO3Tpw+x9yqLsUp4ZVFz48ZN1L79T9q37D09Palx48bEXrIc3phL999+k56tLKRmyJBB1ikRlAXhzp070ZkzZ8S2M3Xq1EkIv71km8DAQBo4cCBlzuxCI0eOkHWGv1hU/uXXXym98GwdM2aMyCeVXDbp338A7d69i1asWEFFixaVcyhf/juZ+1bVccMbYqGpefMID2Bd4ZVFYPa6XSTEYhafuXD+rYYNG8p99spNkUI/HJQ8gV8E4RUfAhAAARAAARCwLIGf/teFjp04SQf37BAvjGU3W3hlwapC1Zr0ZcaMIjzxJvlSWlTCq8oh+0uXTtS/b285CfYy7P37AMqSxYUmjBlp2YnBmgwzzOKrNT1elQDKOX23bFhDuXPlJBbZW7TtKETMS1Tz++o0e8Y08dyfkB489KVK1WtR8WLFaNPaFfIObdi8hdau30idO7bXeqnevuNJteo10r4MwA2NCZ98buvGtVIc5f1FS/6hsRMnU9vWLWnUMDeuMqv8OVOEGp4jvGu3ixzHeT/mOI6r/s0abDw1vng3QKxDXpLOLzwEF+EE8jrsnVyX5WNTAqxyXmHvT05XFp2i1jxZ6B3aphgl+eAt/VTkOI3u+itHAPS4F0g7x3yvjajHXqT7hDjK+WnZQUQJr+xYw5ET04jofSyODvvngnRuGaXjbNJ6ghCfhXg7t8e30iOW56HCAfM4uS0XtT5qLMerbPThlynhlZvsv+gnhGEP4giHhcR42VmHozz+WDEHdf3ha60Yy23ZmeiPv/8ToqurEF+zcpUsy/cLj1oRVXGoWDvmiJHK+1XXiUm1xRYE4ouAofBKmjguF2490AS/DrNoryGhbzRs1+/ZC4vaNcdYxz4jNfxjWIR3q6wXOVz1TvExt+fzhsWYLcN2OI45gZebK2os8WPuCER4YU2Fvjs1fRec0QS+NP7voOfc07KdoX0RpkLW33v00vCU3vGfm67KduLLVNYfuOgnj3eeua9tN3b1JVkn3j6SdYcuR7TZ899DbRveefU6XLYbueKCtl48JGii+tE2MLIjcgBo6g/bpz3LNpnHdZ/n2jreGb/msqy/4hWorf9r63VZd/uh/r/zqMbBdaqIkMfyOu5n83FvjV9AiIbtKsZc//eum7L5zfsvtG15nBfu+GtC37zV3HzwQvO/acfkuRX7PZXpSFsh8so2aw7djXQOFSAAAiAQ3wRehr3XWOKn6uA6Gv4xtLXAfYWm54L+2no+Vm1P3rykrde9Tp3XrYvp/tjJMzS5vi6iOXnucpR9sd1T5z1kG27HP5269dKcvXTNaHs1lv5DR8n2l2946rW9++CxrC9VrpJevbpOd1ulZj2NbruN23bLa5eu3qC9VvVz3dNH1u0/elq2adOxq8bnkb+2na5dc/Y373CX9las2yxtedy8K4//cBsRyfboidPkuVUbtspzaq49+g2K1HbS9Dmy7fGzFyOdM2d89tz24TP9Z53Y/H8ghHXNo8dPIv0EBH58boqNfVwLAiAAAiAAArZOYNeeiGeaaTP/0g518rSIZ8Gbt25r60ztjBg9Tj6/HDt+Utus4Y8tNUVKltUe844QXmW79p26avz9A/TO4cA6BPbt26cZNGhQjI3ztWzDVBEiqbyvvfoN0Gs246+5sv7g4aN69dVq1Yv02dBr8OGA231TrrL21AC3YdKeSJMi61S/nX/5TduGd4TXq/ZzpncimgfTZvwlr795W//zH1f9R3OYNtGM1/4q/b5T0+nPY5pw4b3C63z1xBogrw/yj0iFpvH1D9Ebq6dvkDzntvScXr2pA7XmKZxm9JqZs/7a7a+Tst8jHo8070TYw6jK1pM+ss2aw/prkbv/eyDrVx6IWMf0DwqVxxPWXo5kpvP045qag/do60WqNdl28JJPz1etVYucudrrdXd4HLzGqviq7cwt1zTCcUW3qUbZEp652npex+VrRKhnjUJw+sZTWcfrsCggYCsEDP/mT8ADi0sV+OLth5Q/W0YRyjTCm84Sfb8OC6ebPk9EwuXUlNk5jSVMmm1DhRO2lMeroR2zB4QLTBKIr1DDnMS9z//ZOwv4KI4vjj9cAhQIFooE1yBFiru7Q5DSlgJtKZQChUKCuxYpLS7FoUALFAkarEjRwB9IIATX4Bas/3kTZtlL7i53yV1y3P3m8+F2b2Z25DvX7mZ++96beYhOBt+X4yuSMw2VEQHba4jYqx5pk2tjttTilS9g1xh+R69JNxn3H7+UbyUpP/zL+leiLOncNOvOsgXTS5fBbJnZYPA26VJXuR+eszmQFm69IMfAbov16axw4cDuJdYNrS5jytbx2aov1s7Z3YZyG8GB0v1FvNggYZXLb0o9Eu4gOKYtJ+VCWb39tXlkTc3tBZev2hNCU/88Q790+5Q4dgEnYxavbB1ryVhOiLfavpt+ULajPtjdhneVnDR3c5D2JtW10GfURriC5hTRRYZy2cxsZn8fOY4dW9C2GrlLXrvSp4rGQWbgAwRAAAQcgIC9LV6VC2J2HVzYswgt27VEztqYpavCEdsWr/zYy9agj4SFw/79+2n+/AUUHHyBxo4dJ+KU1lHDinRs3bo1nTlzho4fP07xhWt8fSpTpqywKH3vapjLTp06RWKzR1iOnqOHDx+Kfw+kpSiXnTx5kg/CTfELYUFbWrojnjlzJrHr5PLlK1DevHnp998XyjphYWHUtevXdPToEfn9k09KiDrlhfvhuvTxx+Fu+2WBkQ9uf+PGTTLOLMe35VixPFdOHBe3SZMmtHXrVurdu7e0+uU56tPmzZupb9++mqvhffv20TfCipbrd+zYUV9VWiWw9W9U7o8NLnKxL7a0eN21ey992eWbSAT1VhiRCpEBAiAAAiAAAk5C4Ll4xqlepyG9evmK/LdvouTvvG1Y42pYiLNUt1EzadU445fJGhljFq/8PPbZl13p8JHw5zF2Q1u5UnlqWL8eZc1i/nlMaxgnVhGITVfD33btTH1+6KGNb8bseTRu4s+0duVSKlokPKYlFxr7bXBs1k1+28TfCWfpgXjm51jAHAOYU/DZAHk0ZXHatXMn6tc73IJaVhQfOfN70aciLuuy3+epLIuP1lq82rp/iwfqIBVHLjtJm/+9RpO/Lk0l8rjTaxEaboV/CC33v2jUApbDovksOCq95bHXPEuS2vP0G1VLxlJV11i6/8r19/3vNv00N/z/Pbwvy14EKxfJRKXzpdMsaJXF6/jOJeUes+on8Ooj6vTzPvqqbh7qWCO3ZiXK5cb2fTl/zaCqlP6jpDazeFUulT1FaLk+zQtRbhE/966w+F2xO0Ra6ka0qP166j908dZj2jyiFsWLxyMiGrzoGO04fpPm9ipPeUUcXk6weJUY8OFgBCJavEJ4tdEC6V0KR3QfrERZ7opdCJ89HyJ75XN9jFfOVC6I9W6JZWV82JTAiyMj6PWVcDez0W04YVYhMpbwtfpyFl/3nb4tfOXfoIPnwuOxciN6NwqWCq8Xbjwm8faT9M3PcVLZxUTK5IlpjfCVz0kJr3w+dmUAbRDuf9k9hfKFr1w0cPm0dWdopXjI4Jtvbo+UnKWlUyL2QWkR85Xd9XKc2t9F0HRjiW/kCRPEIxXnlOtwe9lFbNWPxLg4QDuniMJrxIcQ5es/KuHVkrHIDsUHu3RmtyKB1x7Rx+7JRQD49HIdOA6CcrGhgrvzNf4T6kRyKcwPLPzgoheYVfv8cDZ93Vn6oVlBalbeMpfM6locQQAEQCA2CNhKeFUCqzFBVZWp+Riro8piGuNVtaOOlrgaVnXVkeOTfvvtt1SsWHFN7FRl+mOTJk2laHlCuB2Lp/4CfFehZs2a0i2ZcjW8fv0GIWQOkKWFChWmHDlyyDhL7MqXkxJe+Xzw4CG0du0aEQPWn86fPy9dCg8ZMpSaNWvKxTLxZh+X+/n5iXiq+6RbYC4YPny45uI3vOb7z2fPnom2vhJxXU9R8uRuVKZsGXJ3d6fdoh2OD6uEV45Ty26Mu3XrJgTeru8bEGcrVqwQoutITXhll8rs7njAgAHUpk0bg7q7d++m74Tr5B9//JE6dOhgUIYv4QRsKbyGCCF97boNkdB6ZMxEbVo1j5SPDBAAARAAARBwJgJz5i2kUeMmUJuWLWTMVTW35av+oL83baFhg32l29hyZT5VRZGOnbp2o53+u6VL1xyenlr5gMFDpXA2f/YMSieenQoVzC/L+Hls+05/2rjZj/z37KWnT5/K/HGjR1CLpuEhF7RGcBJjAh+C8Lr2r/XUu1/4M38Rr8KUKyc/86em+QsXyflHJbxGFHz5otgUXm3df4wXPZYb2Hr0unQ73KqyJ3VvVEDr3ZgAW0UInUcvhMr4rlnSJadhHT+hPMLlcFTJlPBq6f6rav+yiEvrJ8bLIeN4T5JT3iyp6OeupaVbYSW8TuxSSgqy6jre/+w06b3wuufULRow/6gsbvDpeze+nMH7voXFnnKnOnllyDdbuRruPGU/sTGPPpScGp/wRCjjy64SxiuZRHxddvtca4AfVRfGSUPaF5PVhIdE6jLlH7m33LVuXnUpnRb5czYFUdPy2ah8wQwyfm6yJOHxY7VKOAGBWCYA4dWOwJXAGlFQZVGWY7cqwZWHwHXYqjVfrvcCjboe1q52XKR3Tb++sZdeHAx/QIpub0k/HUUJPSzz62+qD/a5z29Nsd99tsDcNKKmFPt6CavYw4GhpI/Jym0o8XRR34rkmTEF/brhLC3bedFAtOV6o5afpE2HrxkIrxxYvfuvB2VcgZMiPsBf+y+T3tKU3/TiN75Gfv6JFpeV24pO+nziXumvf3G/ikJ0TSGbYNv6xkO2y3O2nOVk6iHEUuFVNhLFBwuuHHw9k4ihq6xx1SUq9u1fQ6pR2pRJZHaz4TtlvAc9G1W/0eDtFPb6jcGbV1zGlrfNhu+Q8QpW+VahJIlws1fMcAQBEHAcArYSXlXcVrZsHdlxTKQJKvHVnOjKF6l2vKu0I+/KbSO1Y22GOeF18uTJ5OaWgjp3/sqgWY7jVEJYLWTJkkVYh240KNN/8fX1pXXr1kkL0YwZM2pFT548oXLlyhnEeG3RogUFBgYSx3pl0ZUTW9pWrVpVniuBlr8o4ZNFVLaoXbp0qRBX91KqVMY9uLBVLG9E9enTRwqq+/fvi2SBy+36i43E7t2/oyYiruzgQYNkTFrO5zENHDhQE17Z8pfjzrJAvGjR75QwYUKuJq1v2dL20KGDmvB67do1YRVcl1qK2K8DBQ99WrBgIU2aNNEg9qu+HOciTpKIK5zZ3dCjCLiAAAiAAAiAAAhYT2DI8NH0+5KlZi/kuJ0BRw6YrMMWsxdDQkyWc0G9OrXpl8kTItXh57EtW7fTdz17i+dLNzpxeL/R57FIFyLDYgIfgvBar3FzOnsukLZuXCdFV54cP/OXLh/+zH94/y7OMhnj1dbCp7UWr7buX072A/pgr3W1ffwoj7CgnNOzfKSRKwF22a5gevj0lSxnq82Qm08omzAuWdKvUqRrImaY2vOMyf6rcLtLU/86Q3tP3SYV39RS4VV5+WtVSYjNwrDGXLKV8MqMOektWFW/P6/9nzQcmvJNafoktzsdOR9KPX87RH1aFKLGZbPJair+rLrG1HFBnwqUK4IRkam6yAcBexGA8Govsu/aVeIpf41o+coCrEp6wVUvzEYUbVV9HG1P4Pme7vQm9ES0Gk7gXpSSVZxm9bXsYoGtXJuWy0YFsqXWrlfBzXeMqy1dRUz583/0x55LBiLo4+evqMWIndK6VQmv6iauLDa5QXY93GH8bllPb/Eq/OBTk6E75FtC7Hq3YuGM5OtdVBtDsLCe7ThhL3nlSCPemiqliYd3H4WRiK8qb2AdqufS6ps7UTfWvwZX18ROFSBduSzm69X4LbF4nbsliBb4nafhHYsTv21madp27DoNXXxCBm3/vklB7TIRD4D6iIDtyv2yKli6M5h+23BOvOWVhz6vmVtlk3KPUfMTDxrULvzNK1W4YOt56bK4b8vC1LBMVpWNIwiAAAg4FAFbCa/KUpUnt27w30bnyHUKC2HWXFJuhqMSaM21oS8zJ7yyUMkWo8uWLRMiYyHtMmXF2apVa+Em10fLj3jCguiYMWOoadNmNHToEK14/PjxQrBcZCC8suthTjt37qBk71zfKcvatGnTkl54ffv2LVWrVo3y5ctH586do5IlS9KECe83+DZt2ixcIu8jHp+XeJNepYYNG9GlSyF0RLi8S5QokcrWjqtXrxbjHEo9e/akL7/8Uubr3RYri1cu6ClcBO8QYm69evWkJSu7Ul65cqUUmrl85MhR1LBhA7mRVLZsOWlxqxeVRbwqYaHbjPhoTjTmtlw5QXh15dXH3EEABEAABGxJgMWuK1evRWpy1Zo/adv2HeTbv6+weM1BVSqFvyh/7959OnDosPyePHl4iCcRt1VYrT6L1Mbg4aPo5s2bNHP6VMqYIT2xJeP6vzfR7r37qb13KwPXs0q8PRdw1OjzWKTGkWExgQ9BePUqUUbO5+DenZq7axUOwj2tOzmD8Mp/P4hYt1RMuFzOlCn85dNLly5T0IVgqlq5ony5843wirdbWIFnzZqFcufKafEaO0LFiO6GjY1ph/BWOPj345qLYd+FR0VYtVukNzQxdh3nmdrztHT/lcXhSWtOU4bUSenL2nk0r3z+ATfJd8Ex+rZhfhFCLYd02cve/KKyeGWDmDq+4ULo/F4VxEuh4f8/ZKOgaULM5X1nnzZFxIsk8bSQdVnTu9HSn8yLzLM2BdIi4RkxYsg2ZqDcBEfcy+W95i+F0c59YSyjvAqqvdWFQkTN+U5E5XpnhXVrxHT26kMZLo/3YMsJj4bFhXDrljT8JeKIdfEdBGKLAITXWCCtF1/NWa/qBVceFkTXWFgcXRdvHwTSs12Gli+6YrOnyavMofip37s4MFtZV6hijbL42KxCdmmFuV/469954ibVL52Ffmodvkm9V7h/6P/O/QPfRDyEy4UNB69oMVKV8Lrp8FVh3RogbpbJxPVZ6aWwxlz3zxV54+Ju9cIrf58j4pkuFCIhp4g3ZM6b8fc5WrIjmHIJlxksbvLNly1j+UY4+otPqIIQay1JyuK2eO600uXD9XvPNffH0RVeT4Xcp2+mHZCxZusJVk3E20/siiKq9PrNf9RurL9kx+4qOHbDMfEW1dajN6SV8dJ+lck9Vbi1K7elYrXynGuVyCzfugoS7jnYCpetkqd9U0a69FD9ioDz1GDQdkovHoRWDKisxVhQ5TiCAAiAgKMQsJXwyvNRVq3RtVa1tbUrj8mc8Prvv/9qAmT79u0pe/bswrXvBeFOdzlfSsuXL6eCBd+/nCMzdR8cL5XjmrJVarly5al48WIijutpYVm6S9bSC6psUcrCZMlSpahK5crElqIs+HLS15MZ4mP69OnEMV45TZ02TV4jv4gPFla/+OILeR27980s4rqyu2AWkdmadZgQV40l3hBhsZTdDLcQbvjSpklDW7ZskePn+nrhlV3l8dzYSlclvq5t27Y0Z85sTXjlMr2AXL9+fbnJyGO5evWqcK/sS61bt1JN4BiBAITXCEDwFQRAAARAAARsTGDcpMk0Y9Zc2rx+LeXN8/4l6i7dvpeC7DddvqIfe31vtldjcTwPHT5CbTp8TiymdWjXhrJ8nJl27Not3A5voZbNm9LYkcPMtolC6wl8CMLrj/0H0uq1f8qYrDWqVpEvAyhLbGcRXhcuXkpDR4yWLyD8uSr875lyVWrKlxOmTBwn4hzXpZ3iv4VOX3eT1t/HD+3TPO1Yv+qxf4VyN9ysQjb6oen7l3PVSNjqdfKfZ+S+KFtkNimXjeZuCaRLt57S+mHVKbVbYlXV6NGU8MqVLd1/Ve54KxTOIPdXHz9/TQu2BhkY21hq8cr98h50PxEzlvdma4o9z7QpkhCLy+zCuHVlT/pO53ZZeWPkOKz8r3aJj7mJSMmc8Kq8L/JFtUtmlhbG9x69FF4ar8q9Zu+qOejbBuEu3Xmu54Sgumm48AYpxF9zSRn3KKtfc3VRBgKxRQDCayyRZtfC/E+fWFhlS1dl+ap3PWxOoNW3gXPbEoiOy+GYuhjmN5OmiBv3HWGZyonFvEZlslEX4as+UcL42gQX77hAi7ZfkDdTzqxb6mNKLW6M7FpYvVn1VryuNF9YgbIlqEp8o+QbFNdb1r8ycfwBlS7dekLtx+2RN1j2r58gwo1MtcfWnVfuhMcsYVH3G3ETtMbKlF3vsvjKbpQ58Rx7NSskY7zyvJWr4eFLT5Dfkeva201qnMZcDbPF7vg/Tsk4tVxPuaJQ15g73nscRiNEX+y+WSUWSvlNLhZiIyZ+o2r0ipN06OxdrYhdigwQ9QtkNXQTqMRsH+8iVKek8YcQrRGcgAAIgEAcErCl8Kq3erVWfFWiK6MwZTEbHUy//PILzZo1K5JVq2orIOAUjRgR7tJX5XFs14EDfSlPnjwqy+Tx+vUbNG78OGkdypVYRGUBc+rUqfTw4UPphpjz2f2wrxBf2YqUE4uYPj4+tHjxIhlfVW/xyuUc25UtRjmxQJw4seEf8Nu2baexY8fIa7mOElN7dO8eqS6Xq7Rjxw45PrZE5cQuhWvVqkWDhOvhiPFh2fI2KCiIjh07RilSpJB12Q1y7969adSoUdSgQQPVrBR9Fy9eQsePH5N5efPmpebNm5O3t7dWByeRCUB4jcwEOSAAAiAAAiBgSwLjJ02h32bNoS0b/qQ8ud9765o6fQZNnjadJo0bTU0avX+mMda3MeGV623220bDRo2VghN/ZxfD3q1aUJ8feph9HuO6SNYTuHjxIs2ePZty5oyeBWVwcDBVr15d/jPVe/DFEKpRtyF9901X6vX9d1q1mXPm09gJk+ivP5aTV+H3Ylyz1u0pSDy3KxfWjx8/oR/7+5KfeFbnxL+JYYN8aN7CxeJ3ckuzeB0wcChx/OEjB8RenIgBa6pfboNjvJYVsYmXLJjDX61Kk6f9SlOn/xbp9x+T/vfs208dO3Wltm1a0YghA+V4vv6up5zz2pVLpQV4YNB5at6mvRRnozNuqyZp48rK3bBH2uTScrRozrQyrJtyM7zc/yJx+LKIqaPwjveV8JIXVTK158nXWbr/+vDZK5qy9rQ0HlH9cXzX3s0LUcF3nhTZUGfsStMWr53FfvNnNd7/P5H3fFftCaHTIq4rJ96zbVs1J7Wvlstgn/hIUCiNFHu7vI9bJGcamt4t3MpbjUMd52wOlNans3uWo/wR9ky5zgXhYXHi6tMUIMLeqcR7zd6iTzaq4fTy9Vuq3m+LFHg5BF5USXkx/FF4HmwkDJaQQMARCDi18Orhnooypk3pCJy1MRgTYLVCcQLBVU8jbs7Z8jUsYFqUbofZvXASr+7RsnQ1NjMWJ8NevdFiixqrw24g7j56QSmTJdJc9hqrx6JkqBAXUyVPpLkINlbPmjx+uEiQIJ7s25rr9HV5fhwXgOOnRhR59fWsOWcLVn4Iihiv1ZI2eCwhQnxOkSwhZUufghKK+ZlLLNheD30mxGs3KXqbq4syEAABEHB0ArYUXnmu0RFf9aKrtYKtrfiyyyzeDGF3WUmSvPd4YGn7fD3HRk2XLh3Fi2f6PsL1WJB1d3c3++a3ssZlS9y+ffuaHAYLumx5y/1ak+7evSvnmTKl8WfkFStWyLhkLP4mSJBAa7p79x7SopfLCxSIHAOILWVZsDXVrtYQTiQBCK/4IYAACIAACIBA3BF4/fq1Fss+JqNgse1F2AtKb+XzWEz6dNVr2eqVBdjoJBZdcwh307GRXrwIowfimT99OvPP/NaM5dWrV8SipyXp48yZKV/eqEVAS9oyVsfYfzsR8zjusf7vCGPtOGpei5G76Jbw0qdStWIedFR4yVOCKxu3NK/gSYHCE17wjUcybFyZ/OlVdZscLdl/tce+r9qXTiOsXs38WUuvhCjKf/dGtYcaFQzez715/7k0BoroFlh5OezRpAC1rOgZVVMoBwGHJOAQwmuW9KkpXWo3mwEKffiUrtx+INyjulNKt6Q2a9fWDSlLV25XH+PV1v2gvegRYOvX19d3CQH2JP337KZsJF7yTJTAvQglzFyFEnpUiF7DuAoEQAAEQAAEHICArYVXnpJeSOXvLKZ6ZfcyiO/KAu0y/yUUII4q2Squq2rvQzzef/CAggKDpEVpcPAF2rx5M2UWGyexnQYM8KENG9ZTjRo1qU6d2sRxmvbs2SPzMmbMKF0Uc9xXpJgRgPAaM364GgRAAARAAARAwDUIsOBqy8Qi7IeW7t4NpdIVqlg07I7t29Jg3/4W1UUlQwLK1XBF4Ua3SI40NGtjICVLnEAakCjBlUO/IdmfAIe9Y/fLc34oR/myGHoatH/v6AEEbEMgzoXXk+evS8u3XFnSCYu1RDGeVdjL13T+6l0Ri/ItFc7loQWajnHDaAAEQAAEQAAEQAAEnIiAPYRXhSeiAKvyIx69PL3Iu3I7A2E2Yh1X+a6P7cpxYVu2bBknU79//76I4zpSuhDWD4DdME+cOIHSp7ftG936PlzpHMKrK6025goCIAACIAACIBBdAgMGDIjyUiWmWiLSctiMDy2xRek/Bw9ZNOzMHh6UK2fsWPdaNKAPqBKHbpu3JYiGflaMqhX1oA7j91DIzSe00qcKQXCN3YX8cc6/dODMHdo1vo7NPCbG7gzQGwiQ8Jr5kDK7v39xIN5/IsUmmEdPX1DI9VDiTll4TaiLaWntOPiN/Odhr4idvHkKa9dUDmztau3cUB8EQAAEQAAEQAAEbEnAnsIrj5MtWwMuBYjjSQPrVhZbC3sWiWQJa8u5fYhtXb58mW7fvk3Zs2d3CHEzNDSULokxccopXLOlFjGokGxHAMKr7ViiJRAAARAAARAAAecloFwMc5zWmCSOD8vuhpVIG5O2cK1zEjgceJd6zTxM6VMnpY/dk9PxC/eoVonMNLBtUeecsAPP6raII8txbzOlgYWxAy8ThhYFgTgXXnl8T56FUeijp/L4Soin0U2JEsSnFMmTkHsqN3mMbju4DgRAAARAAARAAAScnYC9hVdn54f5gUBMCEB4jQk9XAsCIAACIAACIAACIAACtiewZKdwcbvhnGy4cpGM1K1hAVi72h4zWgQBlyDgEMKrS5DGJEEABEAABEAABEDAgQhAeHWgxcBQXI4AhFeXW3JMGARAAARAAATEDbiqAABAAElEQVRAAARA4AMh8OjZK0qVPOYhET+Q6WKYIAACdiAA4dUOUNEkCIAACIAACIAACDg6AQivjr5CGJ8zE4Dw6syri7mBAAiAAAiAAAiAAAiAAAiAAAi4MgEIr668+pg7CIAACIAACICAyxKA8OqyS4+JOwABCK8OsAgYAgiAAAiAAAiAAAiAAAiAAAiAAAjYgQCEVztARZMgAAIgAAIgAAIg4OgEILw6+gphfM5MAMKrM68u5gYCIAACIAACIAACIAACIAACIODKBCC8uvLqY+4gAAIgAAIgAAIuSwDCq8suPSbuAAQgvDrAImAIIAACIAACIAACIAACIAACIAACIGAHAhBe7QAVTYIACIAACIAACICAoxOA8OroK4TxOTMBCK/OvLqYGwiAAAiAAAiAAAiAAAiAAAiAgCsTgPDqyquPuYMACIAACIAACLgsAQivLrv0mLgDEIDw6gCLgCGAAAiAAAiAAAiAAAiAAAiAAAiAgB0IQHi1A1Q0CQIgAAIgAAIgAAKOTgDCq6OvEMbnzAQgvDrz6mJuIAACIAACIAACIAACIAACIAACrkwAwqsrrz7mDgIgAAIgAAIg4LIEILy67NJj4g5AAMKrAywChgACIAACIAACIAACIAACIAACIAACdiAA4dUOUNEkCIAACIAACIAACDg6AQivjr5CGJ8zE4Dw6syri7mBAAiAAAiAAAiAAAiAAAiAAAi4MgEIr668+pg7CIAACIAACICAyxKA8OqyS4+JOwABCK8OsAgYAgiAAAiAAAiAAAiAAAiAAAiAAAjYgQCEVztARZMgAAIgAAIgAAIg4OgEILw6+gphfM5MIOxlGLmnSOrMU8TcQAAEQAAEQAAEQAAEQAAEQAAEQMAlCdhceOUGkUAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEAABEDA1Qhkdv9Im3K8/0TSvuEEBEAABEAABEAABEDAKQnA4tUplxWT+kAIwOL1A1koDBMEQAAEQAAEQAAEQAAEQAAEQAAErCRgc4tXK/tHdRAAARAAARAAARAAgTggAOE1DqCjSxB4RwAxXvFTAAEQAAEQAAEQAAEQAAEQAAEQAAHnJADh1TnXFbMCARAAARAAARAAAbMEILyaxYNCELArAQivdsWLxkEABEAABEAABEAABEAABEAABEAgzghAeI0z9OgYBEAABEAABEAABOKOAITXuGOPnkEAwit+AyAAAiAAAiAAAiAAAiAAAiAAAiDgnAQgvDrnumJWIAACIAACIAACIGCWAIRXs3hQCAJ2JQDh1a540TgIgAAIgAAIgAAIgAAIgAAIgAAIxBkBCK9xhh4dgwAIgAAIgAAIgEDcEYDwGnfs0TMIQHjFbwAEQAAEQAAEQAAEQAAEQAAEQAAEnJMAhFfnXFfMCgRAAARAAARAAATMEoDwahYPCkHArgQgvNoVLxoHARAAARAAARAAARAAARAAARAAgTgjAOE1ztCjYxAAARAAARAAARCIOwIQXuOOPXoGAQiv+A2AAAiAAAiAAAiAAAiAAAiAAAiAgHMSgPDqnOuKWYEACIAACIAACICAWQIQXs3iQSEI2JUAhFe74kXjIAACIAACIAACIAACIAACIAACIBBnBCC8xhl6dAwCIAACIAACIAACcUcAwmvcsUfPIADhFb8BEAABEAABEAABEAABEAABEAABEHBOAhBenXNdMSsQAAEQAAEQAAEQMEsAwqtZPCgEAbsSgPBqV7xoHARAAARAAARAAARAAARAAARAAATijIBDCK9PnoVR6KOnxMdXb95GG0aiBPEpRfIk5J7KTR6j3RAuBAEQAAEQAAEQAAEnJwDh1ckXGNNzaAIQXh16eTA4EAABEAABEAABEAABEAABEAABEIg2gTgXXh89fUEh10PpPzEFt6SJKaEQT6ObXr99S0+fv6R4ogHPzO6Uyi1pdJvCdSAAAiAAAiAAAiDg1AQgvDr18mJyDk4AwquDLxCGBwIgAAIgAAIgAAIgAAIgAAIgAALRJBDnwuvJ89el2JorSzpKkihhNKfx/rKXr97Q+at36LWwnC2cy4Pix2MZFgkEQAAEQAAEQAAEQEBPwN7C66mQAAq4FECnQk5SgDhXycvTiwp7FiGv7Hz0Utk4goBLEYDw6lLLjcmCAAiAAAiAAAiAAAiAAAiAAAi4EIE4F16PB12jrBlSk/tHbjbDfu/RM7p86z7lElavKWH1ajOurtbQsWPH6NChQxQUFER3796V00+XLh3lyZOHSpcuTcWLF3c1JJgvCIAACICAExGwl/DKgusy/yUGYqspbN5V2pF35bamipEPAk5LAMKr0y4tJgYCIAACIAACIAACIOAiBB4+fUkfuSV2kdlimiAAAtYQcAjhNW/W9JRcuBm2VXoe9orOXb5NHu6pKGPalLZq1ubtnLtwSWszX67s2jlO4pZASEgILVu2jM6dO2d2IPny5SNvb2/y9PQ0Ww+FIAACIAACIOCIBOwhvLLoOmDhT9p02brVu3I7A8tWZQm7bNcSrR4EWA0FTlyEAIRXF1loTBMEQAAEQAAEQAAEQMDpCPy5/zJNW3eGXr56S3mzpKKfWntRnsypnG6emBAIgED0CTi18JpJiK6ZhPjqSOmvLf7E/0ylxrUrE/9DihsCbOU6ZcoUqzr//vvvXcb69e1//9HOEzcp8OojSpQwPn1VJ49VrA6cvUNnLj+glhU9KUWyRFZdi8rGCbx9+x9dv/dcvGGXiFKCqXFIyAUBEDBKwNbC6zL/paTEVEuFVP01ozqOMRBojQ7aDplhYWF048YN8vDwoCRJksS4hxUrVtLLl2HUoUOHGLeFBpyXAIRX511bzAwEQAAEQAAEQAAETBHYuNmPDhw6TH169qBUqexrLBRy6RJt276Lrl67To0b1afiRYtQbPZvisGHns97op1+3if34XJkSknHL9yT4uvcH8p/6FOL1fG/EfuZN+49o7Qpk1DyJObDTy7bdVHseSakBp9mNRjjy9dv6abYE82UNhklFvvUSCDgSAQgvMbSakQUXPPn9jTo+ez5EIPvEGANcMTKF7Z0HTJkSLT64uuia/l68/5zCn0URrk8UlLSxAmi1b8lFwXfeEx8Q8qf9SNLqhut8+uGs7Rs50VZ5pUjDf36XRmj9Uxl8ttgK/1DaKVPZfJIm9xUNYfKj8n6LNh6nuZuDjI6n7IF09O4TiWNlu06eZNGrzhJz168odk9yxldsxcv39CkNadp0+FrWhuZ3ZNR7+aFqXS+dFoeTkAABEDAFAFbC6+NhtaXXVkquqpx2UN83bZtG/n6DqRnz55KLxaFChVS3WnHa9eukY+PLx09ekTLK1asOI0cOYKyZjX8g06rYMFJixYtZIiCXbt2WVAbVVyVAIRXV115zBsEQAAEQCA2CPgMGkbLVq6SXW3Z8CflyZ0rym6nTp9Bk6dNN1qvauVKNHem8TKjFyATBEwQqF6nIV0U+49zZ0ynqlUqmagV82wWW+s2akZPnz4l97TuNHTQAKpXpxbFVv8xn0HstHDn4Qs6fekBZUidjApmM71fGnzzMZ0KeUBB1x7S4cBQunb3GbWvnpO61stH3X89KMXXDcOqO63bYTb6OHPloRSbs6QzHS7Skv3Me4/DaNyqU7Tv9G1tkT0zpaCeTQpSiTzuWp46uf/kJTUavJ0al8tGfZqH/13Pgu2IZSfpZPB9VY14n9rHuwh97P5h7DdrA8eJ0xKA8BoLS6sXXVlwZVHVlGthfV2Ir7GwOLouRo8eHaV7YV11g1N2O9y/f3+DPEu/TF9/lpaLN3fm9y5Pue3oluLziXvpwvXHtGdiXUuHFqke3+jCXr8RwmlV+ii59RarH6LwGpP1mbD6NP0l3I80qxDZlXiOjCmoiXho0KeHz17RlLWnaevRG1r2LCG8FjAiln/7ywEKuHifKnllpFJCaH0gHkSW7QqWYu0v3T6lojnTam3gBARAAASMEbCl8KrEU2tFVzUudT27Jh4pLF+jmx48eEBjxoyhjRs3ak0sXbqUChcurH3nkzt37lDDho2kMNu+fXvKlSsXBQYGSpE2eXI38vPbIt5Aj57XFAivBqjxxQQBCK8mwCAbBEAABEAABGJIYP+Bg9T+86+0VjavX0t58+TWvps68R0ynJYuX0mftWsbqUru3DmpvXfrSPnIiD0CFy9epO3bt1vdYY4cOeQ11atXt/pae1xw5OgxOhFwitq2bkVJk8bc246pMa5avZb6+QyiXt93p2+7fkXx44dbA8ZW/6bG5Uj5fkev0/AlJ7QhtamSg7o1zE+v37yVIuupS/cp+MYTOnTuDj18+kqrp06yZXCjpuWz0QK/8/RcGEdsH1NbFTnd8ZHYr6w/cJvcgxz5+SeR5mfpfuazsNfUZpQ/sZhau2RmKpojLd0W4vfK3RflfuaEziXp0/zpDdrfe+oW9Z9/lAa3L0o1imemu8J4qd1Yf1m/VSVP8hT7q+eFsdGavZdEKMsE9IdvVXgENCCIL3FFAMKrncnrhdR+3ToaCK6mYrxy/tjpC+XIIL7aeYHeNR8dF8MRRxZdl8MxEfYijsHc95gKr2xhWbO/n8mbrLm+VZmrCa/95x8RD2mPacWAKgqByePTF6/Je3T4w0fDMlkpYYJ4tHbfZTImvJ4Vb5l1nrzf4G0vbvjfwLv0w8zDUuj9oWlBk32hAARAAASYgK2EV31c13WD/442XGUxG12Xw0+ePKEGDRrQvXv3qHnz5pQwYSJasWI5GRNeFy1aROPHj6exY8dR3bp1tDH/+uuvNGPGDJo4cSLVrFlTy7fmBMKrNbRcty6EV9dde8wcBEAABEDAfgSev3hBdRo2pWdPn1PN6lVp+ao/yFLhtUu37+mceBHPf+sm+w0QLUeLAAuu0RFd9Z117tyZlAirz3fW85+F9fY0YcW9fs0qKlQwv7NOM0bzYiGRPQOyePevsGL9nwiNVtgzNZ0SFrD03/umUwgXt6WFGJhTWGUWzp5G1mGPgGv2XtYq9RaWmBGNK7RCJzgxJ7xas5/JFrEDFx6jdtVy0tf182lk2Jq2i9jnrFYsEw3tUFzL5xO1b77Kp4p0KbxCiLS//HWWhrQvRtWLe2h1524JkiL48I7FqUqRTFo+TkAgrghAeLUjeSW6RrRyZWGVy/TuhSPW4WGN+/V3WQfiqx0X6V3TM2fOpH/++SdGHZUtW5a6du1qVRsshrJ7BHYpy25ikwmf9j+18tJcyx46d5eW7Ayms1ceUJKECaiQeADoWCO3Vq4647ira/dfomPn78l22NqRb2DsJ/9E8D36ee3/pLUr18+VOTyGxJye5aW4x3ks5G08fJV2iXaSJUlA5QpmoCZls1F28dYQp0XbLxC/CRZy84n8zm1kS+9Gwz4rToHXHtGo5Sepvbhp8ptHKoW9ekNdp/5DZQukl643OD+6wutF0S/Pb69wQ/H0xSuqVtRD3KRzke/Co/Jm+nnN92+v8ptPq/eG0L9BoXT59hMqnstdjqFRmWwUL54aHRG7yVgrrFE3H7lGZy8/pKxiPvyA9W2D/JQ6RWLit7DYqtTc+rxvzfhZ5yn7ZYyB6d2idsnMb3t1HL+H+rfxEuPNQLM2BdKibReMCq8cT+LfoLtUWTxI6F1o3H7wgpoP30lVi2aSa2N8VMgFARAAgXACthJelbWqNdaufI1Xdi/x/10vbTl8Fv5EASEBZE072sXihAXXZs2a0bBhw6hSpUo0deo0mjNntlHhdceOHXRJxDxia9dEid57cNi0aTP169dXuiBuLd5CN5ceP35MixYtph07tktr2QoVKlDPnj1pwIABkVwNX7p0maZNm0onTpygx4+fSAvcpk2bUP369WUXO3fupOnTp9Pnn38uxWN9v1OmTqU9u3fT5MmTKUuWLMRWvRyP/uDBg3T16lVi98itWrWMdJ2+DT5/+/YtLV++nNav30CnT5+i7Nk9xbVFqVevXpQmTRqD6nv37qW//vqL9uzZS2nTphFcm4sxF6IJEyZQjx49JF91QYB4Y5/r7t+/T1gQP6OKFSsKMbsulStXTlXB0QgBCK9GoCALBEAABEAABGJIYMLPU+nXmbPpl8kT6X9nzspzS4XXJi29KUniJLRiyYIoR3FfPI+NnziF9h04QFeuXKUSnxSndm1aUZNGDaK8FhWsJ6CE11GjRll/sbiCn8/Z4tWc1euVq9eoa7ce5C2eq1++eiWtn28LLznsZnqIb3+6GxpKo8dPIv/de0RYkizCbW9tGac1QYL3cSU5puqEn6fR0eP8zP+YingVplbNm1LjhuHP/Dx4dmm92W8rLV+0QMZ41fcbL348+mPNn3RSPF8XKliQvvumC9WuaZ2l7r1796n9F18Rt8tuhnmsbsmT0yCfn6hM6VJ27z9aCxQHFz14+pIaDtpOxXKlpWnffir3BycJr3Wc2GVtZhEztLBnGrkPm8eEd8Jvpv0jLWONWWkam9LMjefonzN35L7vPL8g+ud/d+i7xvmpdaVwq2wWJXeeuEEHzt6hbBlSUDmxn9qyoielSPb+71Vu19weMJf7B9yk+cIK93vhunfv6VvEFqM8X54P73lyqDt9impPeOXuEPpT7J9eufNUXsZ7wuyZr5/Yv+ZkzX7mmn2X6Oc1/6MRnxenyl7vxdFXwsq4Wt8tkn3EsHa8d87C75qBVWV/e8R8eCwthWCeSPff3/ZjN2jI4uMiBJtzi+ASAj4+CAIQXu24TF/2GiZbnzdpkEEvSlDlTBZclQDL532//Uyrq7d8jWgtq1XCiU0I9OnTR26SxqSxdOnSyQ1Ja9oYsewE/e/SQ3nDKCNuqB+5JaIOQlBkwVO9BcRuEkrlTUf3Hr+UrmW5/UV9K0pXCny+SQimo5YHSMG1YuGMdFncfPjmnUaIh3+Im1LIrSfSZQPfmFjgZVcOnPq3LkIJxEPdJVHeZep+WVY6fzp6Jd72YgGXr5/XuwKlS5VE9nFQiMB8E+N8rsfxD7rUzSuF3e+mH6QeTQrIBwLZuPh4HvaGag3wk28f8VtInKIjvHK8hU6T9skbOd/cs6RLLuIp3CX3lEklt/qls9BPrcNv9vyWVbfpB6TIzPEB2J3vQeEShOf9ea3c1Kl2HjkO/pgsxOjVwg1F+tRJqVjONEJ8fiTbYwF8cb9K9ObNfzRh9SmT66M1ZOakmRBBi4gHts9q5CJ/8QDFrkeKCDca3F/EBycOKv9EjF+5cDYnvJrqcr54cJu35Tz1bVWYGkYIOG/qGuSDAAi4LgFbCa9KMLXUUlUJtUxebyGrLGej6274zZs3coMjderUclHNCa/GVv3ly5dCVPxeCohr166V7oeN1eM8FjG//74n+fvvEsJkWipVqhQdPnyYXrwIk5ew2zAV4/X69RtUR2zMcOKXtBKLDT2+jtPIkaOEy+MG8hmkWrVqVFK0M2/uXFnGH6/Epk/FipWk+Pn333/T69evqV69enTr1i2qXbu27JtFZP7OMW1ZgDWV2AUzW/9mzJiRSpQoIcTX/wnxOUSKuSycKgH6gNhA7NKli2yGx8uJX05joZbrqzFzPrtnZgtfTjz2pEmSEIu2nNhyGOKrRGH0A8KrUSzIBAEQAAEQAIFoEzh/IZhq1W9MlStVpPmzfhUCWLgIa6nwWq5KTSopBFQWuzZt2UrPnz+nkiU+odIlS0iBTA2Mn88q16xHN2/epPp1a8v4mX7bd8jvw4cMlAKsqoujbQjEhvCqfj+ZMmWSf1NUqVRBWECfp/MXLlAp8ez8v7NnyUOUFSyQn/btP0Ch90KlG1/+vXC6dv06VawW/sxfoVxZSiKei7fv3CXLJo4dRU0bN5TnP/kOppV/rKEj/+yWLz/q++XfVPWqVejS5SuyX75g9YolVLxoEXmtJR8PHz2iYSPH0PETATKWLAvHqVN/RF981p4KFypI9u7fkjE6Sp22Y3fTldtPpfECx3llYwZObYVhCRujeAjx1VxqPmInPRIuiLeOrmWumlY2bMlxGVqM91U5jBvvD9YrlUX2r0RDtQd8SYyLjV94/3Vcp5Jy/5YbimoPOHHC+LT+wBUZQ5X3O1+LPV4Wl49fuCf3Vbn9NQOrkVvShHJcluwJ89hYzGXBV+0JZ0ufQu51ciPW7Gfeuv+cWozYJfa53WnUFyUoaeIEchy8P8v7tBH3lh8/f0X1fLdR3VIf04A2pv874L3sn4TXwUNn79LvP1akHGJPGAkE4poAhFc7rYCydo1oraoXU/WCrBJpIwqsqp2Ioqydhu2yzbKFiS3SggULrG5GuUyIGOOVbzhB1x9JgZTFRk7rxM1zvAhA/k2DfNS2ak6Zp2J9bhxRQ/Nhz28QbRM3Ro5NUCh7+Aa0KVfDbceIBw0h1v7WvYx8+4kbVaKv3nJSuRqO6PaBLWrtKbwO+v2YvLl3b1xAuv/g8T0RN94uU/6JJLyqmKpf1c0jLYO5Lo/7a/EWGse3nf5dGSmE8g259+zDlNotsbxxqxs9vxnFDxQzepTVuJlaH27bXPpPuCWp1GeTFMSvhz43qMoPPzO6lxXidVKDfP0XS4XXhdvOyzmeFPFeOah8cxFPlh9U4uvNe/UN4xwEQAAE3hGwlfCqXATrRVRTkPWiqzGh1pq2TPWh8i0RXjnW6x9/rBabK4+kdSd/9/HxkWKoasfYcd369eQr6pUu/SnNnDmDEiRIIMXYH374gdh6lcVYJbyyqLl69Rrq2PEz7S37C2IDp2nTpsRWsuzemFO3776Tlq0spPLLXJyUCMqC8FdfdaJDhw6J41fUqVMnIfx+L+vcv3+ffvrpJ8qUyYOGDh0i8yJ+sKj89TffUFph2TpixAgRTyr8/tO3bz/avHkTLV68mIoUKSLnUK5ceRn7VuVxW2fFRlOrVuEWwHrhlUVgtrqdK8RiFp85cfytxo0by3O2yk2WzPyGhazogh8QXl1w0TFlEAABEAABuxL47MsutHf/P7Rzy9/ihbFsVgmv/4k/4HMVKCKtA9mCVZ9YiFu9bBF5eIRbZ6kYsl936UR9e/WUVdnKsGeffpQ5sweNGTFUfznObUAgNoVXNzc3+uuP5ZQzh6d8CbJ1+y+EiHmCatWoTtOnTBLP/fHp6rXrVKl6bSpWtCitWbFYzvCPtX/RilWr6asvOmpWqkHnL1DtBk20lwG4oinhk8vWrV4hxVE+nzv/dxo5djy1b9uGhg3y4Syr0s9ThavhX4V17QYR4zh3bu3a2Opf69CBT46L/cxhIsbrnXeCq4cwxGAjkgfCIx0ncwIse/mr8ZOf9Cq4QBitWJKU8Mp7rQPbFdWsNdngpNmwnXL/cGaPctILH7enDEZ6NS9ITctll11YsgeshFf27Mf7m6mSJ5LiKO+v7g64JT3k8Rg4WbonbM7VsGxI9xHVfub24zeEMBwgPTsWFHvWN4UYy3u2LSpmlx4T1R4tN8nWvz/O/lfs3XoJ8TWLrheSsV7XH7gs9ohfS0vi0McvqFezQlS7xMcG9fAFBOKKAIRXO5FXQqpeXOWulPAaUUhVVrARhVe+xlRbXIZkGwKOKLwam9m9x2HUeMgOgzd92MKTBbdRX3xC5QtlMCm4GRNeVXsNPs2iuYhQ/bKbXHbVu2Vk+Jtb9hBe2ZWEsQD1icQbWsrys9Hg7dL9ccQ4qermq7d45QcGFmX/HFLNgMOR86HU87dDBoK1mqf+qARn/Q3dlPDKLo2NJbYQ5qTeyuJzdnPBcQf4AY5dJi/eHixdGy8RlrWm9NGoHlS4XU4Ve7+PP8NWvt8KUf7TfOkpvrBmRgIBEAABcwTsLbyyyHoq5CSN7DhGDiMq0ZUrxbbwevr0afL29tYwVRMuyL4WYQPy5zcfB2m4EC9XrVwpXezqY0WxcMsuzPTCq9Z4hJOGDRtJwVcJtH5+fsQeOAYPGULNhctkTqqfjRs3SsvUQ4cOSwGWxdHRo0dFchEcoYsov27btk26Gh4+fLgUS0NCQqhRo0bSinXQIEOPLezqeN68eZrFq5prgwYNadSokQZ9/fbbDPrtt181QdegEF8kAVsKr2x5c+/+g0hkEydORGneWYBHKkQGCIAACIAACDgRAXbd+m2PXtS929f0Q/ducmbWWLyylWDx0uXldcMG+1LD+nVFnNhntHjZCvpt1hzK4elJ2zatE3+/x6N/Dh6idh07UaUK5WnSuNHSM4kToXTIqcSm8NqoQX2aPCH87xeGwa6BJ4t4qfNm/UZsBatS9ToNiV0RBxw5oLKMHrneo0eP6fD+XbLclPDJlq6zf5umtXFZvABQpWZd+TtbMGeGlm/pibXCq637t3ScjlCPw3l1nrKP8nycShpJrPAPoeX+F80KsME3HlPHCXtFCLCMNKLjJxZNQwmvC/pUMHD3y9akvguO0cC2RalWifch3NirXx2frVTzEw8a1K6Y7MOSPWAlvOrdGPPFW0SotRFLT2p7o9bsCdtSeOVxTF93Vlrg6sG1quwpPRUmF2H4VFJ7o7x/mi2Dm8qWR3aR3FnEhVWpkldG6ihC0eUV64gEAo5AAMKrnVZBCam2sniN2I6dhu2yzcaVq2EGbkrY4zKOaXrsQiidF2/+sKDIDwOc2F2wr3dReb7vf7fpp7lH5Dm7fGCXxRz7s3S+dNrbU1xoTHjlGLK9Zx2W1+bP9pE8qg+Oe8ppzaCqlP6jpNKqsmZ/v0iBzmNi8arEU9WnOrKVLr+VFSrEzSZDdxiNWareCFPCq3ogKVswvXTDodriI8cbYAFXb63LQvK2Y9eJ3Ymwe2Z+iFAxbDnOKrv74GRsfVRfskKED3Yxwm9ncZ1Ve0JkPN4y+dMb1FJWvMYeHFRF9XAxq2c5GTtB5Uc8svXuS/GP14EtoveJ30zjctmojxB7kUAABEDAHAF7C6/KBTG7Di7sWYSW7Voih2PM0lWNM7aFV7ZuYGvQR2Kzbf/+/TR//gIKDr5AY8eOE3FK66hhRTq2bt2azpw5Q8ePHxcvuryP68QVy5QpKyxK37sa5rxTp04Ri5xnz56jhw8fin8PpKUol508eZIPwk3xC2FBW1q6I+bY8+w6uXz5CpQ3b176/feFsk5YWJiIJ/81HT0aft//5JMSok554X64Ln38sfk3e7n9jRs3yTizHN+WY8XyXDlxXNwmTZrQ1q1bqXfv3tLql+eoT5s3b6a+fftqwuu+ffvoG2FFy/U7duyor0q8OcbWv1G5Pza4yMW+2FJ43bV7L33Z5ZtIBPVWGJEKkQECIAACIAACTkLguXjGYXHr1ctX5L99EyV/523DGuH1yZMnNP/3JeQlYtrrxTVG9F3PPrRx8xYhvK6XVpD8PPbZl13p8JHw5zF2Q1u5Unkh1tajrFnMP485CfJYn0ZsCq/fdu1MfX7ooc1xxux5NG7iz7R25VIqWsRLy+eYwBeCLxoIrxybdZPfNvF3wll6IJ75ORawsqAOPhsgrzUlvHbt3In69Q63oFad5MzvRZ+KuKzLfp+nsiw+Wiu82rp/iwfqIBVHLjtJm/+9RpO/Lk0l8rjTa2EoYk6AZctRnwVHqZ1wS/x1/XwWzUIJr36jakkDE3XRnM2BtHBr+N9lxvZmea933dDqsrole8BKeB3fuSTp9yN5T7nTz/tIeQm0Zk/YVsKrcqnMhiO8b5lbxM+9Kyx+V+wOkS6SWTwd+fl7Ifvrqf/QxVuPafOIWpEMV9jTIBv0PBb7uYdEmLmluy7KfV0Od8fGL0ggENcEILzaaQWUZSs3H9GKVYmyXMaWr6ZivHI5rF2Zgv0Tb3By/LKYJI6D1lVYyVibjAl73MakNadp7b7Lsjn2fZ8xTTLpGmLT4WsGwitXuCx8//sdvS5cK9zWxNm8WVLRz11LS5cSXMeY8MpxXwfMP8rFxFav+nRKCJKFhQDaqU5eGefVHhavV+8+FW9cXdd3K88zCKG3YZmsmthbPHdamvrNpwb1Loi3yz4Xb5cp4fWhCBTfYNB2iniT5otYtK4rYgKwRfCYL0tIy9MeMw4Si8sc36BknnSUJmUS2i9EbHYxEpXwyjf237eFPxQZDEp86VgjNyVMYN7a9O9DV2nMioBIb7Pp27JUeNVf81A8bDQYuE1mKQFYX45zEAABENATsJXwqgRWY4KqKlP9GqujymIa41W1o46WuBpWddWR45N+++23VKxYcU3sVGX6Y5MmTaVoeUK4HWPLB32qWbOmdEumLFnXr98ghMwBskqhQoWJLWQ5zhK78uWkhFc+Hzx4CK1du0bEgPWn8+fPS5fCQ4YMpWbNmnKxTLzZx+VsIbt37z7pFpgLlNVqeC3Dz2fPnom2vhJxXU9R8uRuVKZsGXJ3d6fdoh2OD6uEV45Ty26Mu3XrFumZZsWKFUJ0HakJr+xSmd0dDxgwgNq0aWPQ4e7du+k74Tr5xx9/pA4dOhiU4Us4AVsKryFCSF+7bkMktB4ZM1GbVs0j5SMDBEAABEAABJyJwJx5C2nUuAnUpmULGXNVzW35qj/o701biC1Y2W1suTKGewqqXlTHVavXUj+fQdK6tUmjBrI6P49t3+kvBFk/8t+zl54+fSrzx40eQS2ahodciKpdlFtO4EMQXtf+tZ569wt/5i/iVZhy5eRn/tQ0f+EiOdGohNeIgi9fFJvCq637t3x1HaPmVrGnym6H2eqye6MC2qCMCbBVhLHLUWEkw/FdOTTcMGHxmidzSu0aUyemhNdp687QSmFly6Jrbg/DdnhvtnTedMTh11SKag9YCa8Tu5SSRjnqusBrQnid9F54tWZP2FbCK3tX5L3YtYOryb1mNTY+9vjtIB07f49W+VShTCK+LnsNrDXAT4qoLKZGlZRxj1eONPSrCDWHBAJxTQDCqx1XQAmsEd0KsyjLsVuV4MpD4Dps1ZovV7jPds5T18PalWnYNx07doymTJkSo05487F48eJWt2FMeFUCGvvjn9ervBZs/OrdZ+Q92j+S8Krv9Ma9ZzT1rzO099Rt6t2ikAwIz+XGhNdroc+ozSh/GTtVfxPXt6fOTQmvp0Lu0zfTDkR6OOFxtBrpb3CDVA8TK30qiyD14XFrVfumjmrcqwdWNYiJumDreZq7OUgTXvn6ZsN3CivfeBTRLfH/Lj+griImbKc6eehz4XaCBdZ+wkqYRdsfWxaOFKQ+KuHV1Fj1+SwMbzh4hSp7ZZKB7PVlS3cG028bzglhvBSVFA9QxpI54ZWFW35g+rpePoO35Lgd9aCiLJWNtY08EAABEGACthJelQthtmxVboX1hJX4ak505fqqHe8q7ci7clt9E9E6Nye8sttcN7cU1LnzVwZts8vWEsJqIUuWLMI6dKNBmf6Lr68vrVu3TlqIZsyYUStia4ly5coZuBpu0aIFBQYGGrglZkvbqlWryuuUQMtflPDJIipb1C5dulSIq3spVSrj7pLYKpY3othzBwuq+/fvi2SBy+36+++m7t2/oyYiruxg4UKYY9Jy4vizAwcO1IRXtvzluLMsEC9a9DslTBju5on7YUvbQ4cOasLrtWvXhFVwXWopYr8OFDz0acGChTRp0kSD2K/6cpwT2VJ4BU8QAAEQAAEQcGUCQ4aPpt+XLDWLgON2mnMJe/ZcIK38Yw3VrlWDPi1V0qCtWXPn05jxk2jRvNlUvlzkzXx+TtqydbuwjO0tni/d6MTh/UafxwwaxRerCHwIwmu9xs2Jf0dbN66ToitPkJ/5S5cPf+Y/vH8XZ5mM8Wpr4dNai1db9y8n+wF98J5nbR8/6W54Ts9wt+P64SsBdtmuYC1kGlttsuc8doHLHu2iSqaEV7a0ZYtbtvRkYxJrkrE9YEuFV2v2hG0lvDJjTsYsWH9e+z9as/cSTfmmNH2S251U2Lg+Ym+7cdlsGpYZf58jt6QJqUP1XFoen7CRTLW+W2Ss3Ij7wgYV8QUEYokAhFc7g1biKXcT0fKVBViV9IKrXpiNKNqq+jjansDo0aPp3Llz0Wo4X7581L9//2hdO3dLEC3wO0/DOxYnfmuK06VbT6j9uD1UOn86mti5lNbuzI3nZHxQ5WqYHwzYMjZD6qT0Ze08WlxTFR/g24b5ybtKDnm9CsD+p3iryP1dHFJ2y1DHN/ymN79XBXFzChdD37z9j6YJ8ZbjlPq0KSLjhZoSXlVMAH7gWPRjRW2si3dcoJl/B8ZYeF0tbrocUJ4teDvXzSvdHh84c4f4RstJWbzy+eBFx2jH8Zs0uH1RqlE8M2fRWzGXfvOOEF8z7qsSVLZABum+YtyqU9IdCLsF4cTuen+YeUjGy9ULr8bWR14QxYdyb8xuk6d++yklFnFrObH17Te/HJAPZ1tG1SR97AJ9k+aE1z/3X6aJq0/LN95aVfLULrslAtK3GLELDxkaEZyAAAiYI2Ar4VVZqnJf6wb/bbRLrlNYCLPmknIzHJVAa64NfZk54ZWFSrYYXbZsmRAZ37tmV1acrVq1Fm5yffTNGZyzIDpmzBhq2rQZDR06RCsbP368ECwXGQiv7HqY086dOyjZO9d3yrI2YizYt2/FH4vVqhE/V/AzScmSJWnChAla+5s2bZbiKo/PS7xJrxLHi710KYSOCJd3iRIlUtnacfXq1WKcQ6lnz5705Zdfyny922Jl8coFPYWL4B1CzK1Xr560ZGVXyitFPFsWmjmNHDmKGjZsIDeSypYtJy1uWcBVsW7v3bsnLHSbER/NicayMRf+gPDqwouPqYMACIAACNiUAItdV65ei9TmqjV/0rbtO8i3f19h8ZpDcyF87959OnDosPyePHn4Hkho6D0qVb4ysZv+Zb/PpSRJksj2ODZnC+8OdP7CBSncsrC6/u9NtHvvfmrv3crA9Sy7O74YEkLnAo4afR6LNEBkWEzgQxBevUqEi/IH9+7U3F2rcBDuad2jjPFqa+HTHsIr//2w038PFRMulzNlChcIL126TEEXgqlq5Yry5c43QvzaLazAs2bNQrlzhe+3WbzQcVwxorthY8PZceIGDf79uOZi2HfhUfI/eYsW96tI2TOkMHaJlmdKeFXxYtlSkw00kiQKf0n2rgi/9ouwhs0lrGBZZLR0D9hS4TU6e8JsILT0J/Mis7n9TLVvq98DZ0A81y8n7pWh4pQHP2Vws1DExM2pswRW4dtmi9Bs+bO+D5u3V3h17C+8OjYpl416I/ya9rvDSdwRgPAaC+z14qs561W94MrDgugaC4uj6yJEPCAPGTJEl2P5KV/n6elp+QW6mspilH321xMWmE3EWzzsVrjxkO3yhsMia96PP5IxPDmGACclvPK5snCsUDgDlS+YQYilr2nB1iB69uINLetfSbi9CA8+roRQdl3B9T4TN+348eNp1p/cf00RxD1tiiTEDxLs+7+1cLHx3TsXG6aEVx5Dh/F7pJCYS7jWqCH86LPbC3aJzIn96iuXENGxeOU2xv9xitb9c4VPtdSlXl6atTHQQHi9ee85dRFuK1j0ZEY8d45ZEHDxvhzHoHZFpTjNLo69R++WboYblclGqd0Sa3PmDvTCq7H1YZcXlqRRy09KDsy8vogZG/b6jfx+QcTsbV89J3UVFqumkrkHFZ5fR8Gcjxy3tkiOtBT6OIw2iBivnNdXWPGyq2YkEAABEDBHwFbCK/ehrFqja61qa2tXHpM54fXff//VBMj27dtT9uzZhWvfC7RixXK+lJYvX04FCxaU58Y+OF4qxzVlq9Ry5coLjxfFRBzX08KydJesrhdU2aKUhcmSpUpRlcqViS1FWfDlpK8nM8TH9OnTiUMgcJo6bZq8Rn4RHyysfvHFF/I6du+bWcR1ZXfBLCKzNeswIa4aS7whwmIpW8W2EG740qZJQ1u2bJHj5/p64ZVd5fHc2EpXJb6ubdu2NGfObE145TK9gFy/fn25ychjuXr1qnCv7EutW7dSTeAYgQCE1whA8BUEQAAEQAAEbExg3KTJNGPWXNq8fi3lzZNba71Lt++lIPtNl6/ox17fa/k/9h9Iq9f+SewmtkWzJhQW9lJ+Z2FXX/fQ4SPUpsPnxGJah3ZtKMvHmWnHrt0yDmzL5k1p7MhhWps4sQ2BD0F4Vb8fjslao2oV+TKAssR2FuF14eKlNHTEaPnfyJ+rwv+eKVelJt28eZOmTBwn4hzXpZ3iv4VOX3eT1t/HD+3TPO3Y5pdg31ZMuRtWvbLV6+Q/z9Bf+y9Li0wW+OZuCRTGM09p/bDqcm9R1TV2NCW8cl02LlmyI5h4X5WNctgghvvhPb7RX3xCFQqHC92W7AFbKrxyv8ojYFR7wly3lzBWORwYKq1y2TK3dgnjMa3N7Wcev3CPuv96kJuT+7Z5Pk5F9x69FHulV+VcvavmoG8b5JflPNdzVx/SpuE15f61zBQf+jbYGCVL+uR0UVgeq3B9c34oR/myvBdk1XU4gkBsE4DwGkvE2bUw/9MnFlbZ0lVZvupdD5sTaPVt4Ny2BKLjcji6LobVyNkik4XFDQevyizlUoGtXgcvPk4s0nHit4p6NSsorDIPU91SH9MAYYnKid0ST1l7mrYevSG/8wdbh/LbPQWzpdbybovYpWNXBdChs3dl3o6xtSnROytMDm6+ak8InRaxAzhx3NO2VXNS+2q5NDe8SnjVC6mysvi4KSwtB/5+TPrp5zy+Yfdt5UX9haVpzU88aFC7cF/8/KYWB6df5Sv89Qtx2ZrEoirHUHj95j8qljOtdCvRZOgOGZu2n+hLJXbBO09YEe87fVtmpRfWwBXFA0o3Yf2rrE65gGMZjBdWr/wQw6lMgfRUtWgmGr08QLD1EozDY96aWh95URQf/KD064azMlaDqspsvavklC6PVZ6x45zNgbRw6wWK+AaXqsuWxmy1q+bJ+fwbYatgngcSCIAACERFwJbCq97q1VrxVYmuPF5TFrNRzcVY+S+//EKzZs2KZNWq6gYEnKIRI8Jd+qo8ju06cKAv5cmTR2WZPF6/foPGjR8nrUO5EouoLGBOnTqVHj58KN0Qcz67H/YV4itbkXJiEdPHx0fEeF0k46vqXQ1zOcd2ZYtRTiwQJ06cWJ6rj23bttPYsWPktZynxNQe3btHqquu4eOOHTvk+NgSlRO7FK5VqxYNEq6HI8aHZcvboKAg4ueiFClSyLrsBrl37940atQoatCggWyDP1hoXbx4CR0/fkzm5c2bl5o3b07e3t5aHZxEJgDhNTIT5IAACIAACICALQmMnzSFfps1h7Zs+JPy5H7vknLq9Bk0edp0g5it3C+7DB4tXArPW/C7Ngy2cO385efUo9vXWh6fbPbbRsNGjZWCE3/net6tWlCfH3qYfR7jukjWE7h48SLNnj2bcuaMngVlcHAwVa9eXf4z1XvwxRCqUbchffdNV+r1/XdatZlz5tPYCZPorz+Wk1fh955ymrVuT0HiuV25sH78+An92N+X/MSzOif+TQwb5EPzFi4Wv5NbmsXrgIFDieMPHzmwh9KIGLCm+uU2OMZrWRGbeMmCOfzVqjR52q80dfpvkX7/Mel/z7791LFTV2rbphWNGDJQjufr73rKOa9duVRagAcGnafmbdpLcTY647ZqkjauzPue7AqXQ6Ox98CiYu/RM2MKsQ/5Vu5lLve/SA/e7SHqu+5YMzd9JUKbRZWGLz1Bfkeuk7Lo1Nd/K8xP5wtviLw/e+VOeMzozO7J6BshQirviFzfkj1gDns2duUpMhXjlfcNP6vx/v+JluwJc99HgkJppDAwuSP2l4vkTEPTu4VbeXOZPkW1n8mh2diLHxvJqMRz9Rb70GyMxIm9Elbvt0WKvOyCOWI6I0LKTRBtsNGQSmwx3EfsheutY1UZjiAQFwScWnj1cE9FGdMaBqWOC8j6Po0JsPpyCK56GnFzzpavbIkSldthdgPIm4rRtXSNODsWFPlmnjRxAoMidvfLZSxmmkssELLVY6rkiTS3FMbqh716Q/HixTMQIVW9py9eE5enEVavoorVSV2fWow1fnQaMNLjXiGgsgtdftBQLpK52ordF+mXv85SjyYFqGVFz0hXMrNHz15S2pRJIpXpM1jAZEE2RbJE+uxI56bWJ1JFIxkcZ4CF9ATCXWP2jG42Y8NdsbjLwjeve8oo5mBkaMgCARBwYQK2FF4ZY3TEV73oaq1ga6ulY5dZvBnC7rKUWzlr2ubrOTZqunTp5P3V1LVcjwVZd3d3s29+K2tctsTt27evqeakoMuWt9yvNenu3btynilTGn9GXrFihYxLxuKvigXL7Xfv3kNa9HJ5gQIFInXJlrIs2JpqN9IFLp4B4dXFfwCYPgiAAAiAQJwSeP36tRbLPuJAXr16JVwLB1OChAkotxD6OOyCqcRi24uwF5TeyucxU+0h3zQBtnplATY6iUVXFRYjOtdbc82LF2H0QDzzp09n/pnfmjb5N8mipyXp48yZKV/eqEVAS9oyVsfYfzsR8/glBv3fEcbacdS8FiN30S1h+KFStWIedPR8qCa4smfA5hU8iY0+gm88ogLC4KVM/vSquk2OLO4mSBDP7B6fpXvA1gxI7elGtSf8SoiivK+cUIwxJon3WXk/k/e7OWarPinvg6b2fFVd3sO+8/CFCMGXzOg+t6qHIwjEBQGHEF6zpE9N6VKHu0O1BYTQh0/pyu0HwjzfnVK6JbVFk3ZpQ1m6cuP6GK926QyNWk2ArTwOHTokLT54k5ITb26yFUzp0qWFW8HiVreJC6wnsFG4m2ArVHa3wQIr35A5wDq/wcXulDmAPQeyRwIBEAABELCOgK2FV+5dL6TydxZTvbJ7GcR3ZYF2mf8SChBHlWwV11W19yEe7z94QEGBQdKiNDj4Am3evJkyi42T2E4DBvjQhg3rqUaNmlSnTm1h/fGW9uzZI/MyZswoXRSb24CM7fF+qP1BeP1QVw7jBgEQAAEQAAEQiE0CLLjaMrEI+6Glu3dDqXSFKhYNu2P7tjTYt79FdVHJkIByNVxRuNEtIqwnObxZMmEc80h4GlSCq4eFoccMW8Y3awmw22V2vwy3wdaSQ31HIhDnwuvJ89elK9NcWdIJSz/zFmeWgAt7+ZrOX70rrMDeUuFcHja1LLOkf9QBARCwLQF+A4rj087dHGTQMAuwo78sQYWyv3enbFABX0AABEAABMwSsIfwqjqMKMCq/IhHL08v8q7czkCYjVjHVb7rY7tyXNiWLVvGydTv378v4riOlC6E9QNgN8wTJ06g9Olt+0a3vg9XOofw6kqrjbmCAAiAAAiAAAhEl8CAAQOivFSJqZaItBw240NLbFH6z8FDFg07s4cH5cqZw6K6qGRIgF39cuiyoZ8Vo2pFPajD+D0UImKHrvSpItwPWxcuzbBlfLOWwI9z/qUDZ+7QrvF1tBB41raB+iAQ1wTiXHh99PQFhVwPpf8ECRZeE76LORkdMPxG/vOwV8SG7p7C2jWVA1u7Rmd+uAYEXJkAu7y4evcpPRZvmnEsU3YjYSNvxq6MFXMHARBwYQL2FF4ZK1u2BlwKEMeTBtatLLYW9iwSyRLWhZdCTv3y5ct0+/Ztyp49u0OIm6GhoXRJjIlTzhw5KLWIQYVkOwIQXm3HEi2BAAiAAAiAAAg4LwHlYpjjtMYkcXxYdjesRNqYtIVrnZPA4cC71GvmYUqfOil97J6cjl+4R7VKZKaBbYs654QdeFa3RRxZjnubKQ0EbwdeJgwtCgJxLrzy+J48C6PQR0/lkWMhRjclSiDiNCZPIuJAusljdNvBdSAAAiAAAiAAAiDg7ATsLbw6Oz/MDwRiQgDCa0zo4VoQAAEQAAEQAAEQAAEQsD2BJTuFi9sN52TDlYtkpG4NC8Da1faY0SIIuAQBhxBeXYI0JgkCIAACIAACIAACDkQAwqsDLQaG4nIEILy63JJjwiAAAiAAAiAAAiAAAh8IAY7rmip5zEMifiDTxTBBAATsQADCqx2gokkQAAEQAAEQAAEQcHQCEF4dfYUwPmcmAOHVmVcXcwMBEAABEAABEAABEAABEAABEHBlAhBeXXn1MXcQAAEQAAEQAAGXJQDh1WWXHhN3AAIQXh1gETAEEAABEAABEAABEAABEAABEAABELADAQivdoCKJkEABEAABEAABEDA0QlAeHX0FcL4nJkAhFdnXl3MDQRAAARAAARAAARAAARAAARAwJUJQHh15dXH3EEABEAABEAABFyWAIRXl116TNwBCEB4dYBFwBBAAARAAARAAARAAARAAARAAARAwA4EILzaASqaBAEQAAEQAAEQAAFHJwDh1dFXCONzZgIQXp15dTE3EAABEAABEAABEAABEAABEAABVyYA4dWVVx9zBwEQAAEQAAEQcFkCEF5ddukxcQcgAOHVARYBQwABEAABEAABEAABEAABEAABEAABOxCA8GoHqGgSBEAABEAABEAABBydAIRXR18hjM+ZCUB4debVxdxAAARAAARAAARAAARAAARAAARcmQCEV1defcwdBEAABEAABEDAZQlAeHXZpcfEHYAAhFcHWAQMAQRAAARAAARAAARAAARAAARAAATsQADCqx2gokkQAAEQAAEQAAEQcHQCEF4dfYUwPmcmAOHVmVcXcwMBEAABEAABEAABEAABEAABEHBlAhBeXXn1MXcQAAEQAAEQAAGXJQDh1WWXHhN3AAIQXh1gETAEEAABEAABEAABEAABEAABEAABELADAQivdoCKJkEABEAABEAABEDA0QlAeHX0FcL4nJlA2Mswck+R1JmniLmBAAiAAAiAAAiAAAiAAAiAAAiAgEsSsLnwyg0igQAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgAAIgICrEcjs/pE25Xj/iaR9wwkIgAAIgAAIgAAIgIBTEoDFq1MuKyb1gRCAxesHslAYJgiAAAiAAAiAAAiAAAiAAAiAAAhYScDmFq9W9o/qIAACIAACIAACIAACcUAAwmscQEeXIPCOAGK84qcAAiAAAiAAAiAAAiAAAiAAAiAAAs5JAMKrc64rZgUCIAACIAACIAACZglAeDWLB4UgYFcCEF7tiheNgwAIgAAIgAAIgAAIgAAIgAAIgECcEYDwGmfo0TEIgAAIgAAIgAAIxB0BCK9xxx49gwCEV/wGQAAEQAAEQAAEQAAEQAAEQAAEQMA5CUB4dc51xaxAAARAAARAAARAwCwBCK9m8aAQBOxKAMKrXfGicRAAARAAARAAARAAARAAARAAARCIMwIQXuMMPToGARAAARAAARAAgbgjAOE17tijZxCA8IrfAAiAAAiAAAiAAAiAAAiAAAiAAAg4JwEIr865rpgVCIAACIAACIAACJglAOHVLB4UgoBdCUB4tSteNA4CIAACIAACIAACIAACIAACIAACcUYAwmucoUfHIAACIAACIAACIBB3BCC8xh179AwCEF7xGwABEAABEAABEAABEAABEAABEAAB5yQA4dU51xWzAgEQAAEQAAEQAAGzBCC8msWDQhCwKwEIr3bFi8ZBAARAAARAAARAAARAAARAAARAIM4IQHiNM/ToGARAAARAAARAAATijgCE17hjj55BAMIrfgMgAAIgAAIgAAIgAAIgAAIgAAIg4JwEILw657piViAAAiAAAiAAAiBglgCEV7N4UAgCdiUA4dWueNE4CIAACIAACIAACIAACIAACIAACMQZAYcQXp88C6PQR0+Jj6/evI02jEQJ4lOK5EnIPZWbPEa7IVwIAiAAAiAAAiAAAk5OAMKrky8wpufQBCC8OvTyYHAgAAIgAAIgAAIgAAIgAAIgAAIgEG0CcS68Pnr6gkKuh9J/YgpuSRNTQiGeRje9fvuWnj5/SfFEA56Z3SmVW9LoNoXrQAAEQAAEQAAEQMCpCUB4derlxeQcnACEVwdfIAwPBEAABEAABEAABEAABEAABEAABKJJIM6F15Pnr0uxNVeWdJQkUcJoTuP9ZS9fbOLFRwAAQABJREFUvaHzV+/Qa2E5WziXB8WPxzIsEgiAAAiAAAiAAAiAgJ6AvYXXUyEBFHApgE6FnKQAca6Sl6cXFfYsQl7Z+eilsnEEAZciAOHVpZYbkwUBEAABEAABEAABEAABEAABEHAhAnEuvB4PukZZM6Qm94/cbIb93qNndPnWfcolrF5TwurVZlxdraE9gcdo59nDdPJKIN18eFdOP9NH6ahI1rxUNX8pqpi3uKshwXxBAARAAASciIC9hFcWXJf5LzEQW01h867SjrwrtzVVjHwQcFoCEF6ddmkxMRAAARAAARAAARAAARch8PDpS/rILbGLzBbTBAEQsIaAQwivebOmp+TCzbCt0vOwV3Tu8m3ycE9FGdOmtFWzNm/n3IVLWpv5cmXXznEStwTO3bxEU7ctpROXz5kdSNFs+ahHjbaULxPWziwoFIIACIAACDgkAXsIryy6Dlj4kzZftm71rtzOwLJVWcIu27VEqwcBVkOBExchAOHVRRYa0wQBEAABEAABEAABEHA6An/uv0zT1p2hl6/eUt4sqein1l6UJ3Mqp5snJgQCIBB9Ak4tvGYSomsmIb46Uvpriz/xP1Opce3KxP+Q4obA3qBj9NOqKVZ1Pqbl91QhT/SsXx8+e0VzNgXSp/nSUYXCGa3qN7qV5/sFSffeHarnim4TsXJdmHAbvnRnMOUWDy4VY4lNrEzMgk7evP2Pbtx7RmlTJqHkSWLugl11+Z8Ipn0t9Cnde/ySMqZOShnTJFNFOIIACLggAVsLr8v8l5ISUy0VUvXXjOo4xkCgja0lCQsLoxs3bpCHhwclSZIkxt2uWLGSXr4Mow4dOsS4LTTgvAQgvDrv2mJmIAACIAACHzaBNyJ02fkLF+QkcufKRQkSxP+wJ4TROxSBjZv96MChw9SnZw9Klcq+xkIhly7Rtu276Oq169S4UX0qXrQIxWb/DgXehoMJvPqIOv28T1i6JqIcmVLS8Qv3pPg694fyNuzF+ZuyZu9z2a6LlDJZQmrwaVYDMI/Evvrl208oW4YUlCp5IoMyfAGBuCYA4TWWViCi4Jo/t6dBz2fPhxh8hwBrgCNWvrCla6d5g6PV19wvh0bL8nXXyZs0cOGxaN+gb95/TqGPwiiXR0pKmjhBlGPnuk2G7pD1No6oIW5ajntTYlG6wcBtVL90FvnmWJSTc4IK9x6H0bhVp2jf6dvabDwzpaCeTQpSiTzuWp6xk/F/nKJ1/1yRRYv6ViTPjCkMqt1+8IJ8Fh6ls5cfavkVCmegwe2KWfTb0S7CCQiAgNMQsLXw2mhofcnGUtFVgbSH+Lpt2zby9R1Iz549pWXLllGhQoVUd9rx2rVr5OPjS0ePHtHyihUrTiNHjqCsWQ3/oNMqWHDSokULunv3Lu3atcuC2qjiqgQgvLrqymPeIAACIAAC9iDwz8FD1K5jJ5NNB58NMFmmL/ht1lz6deZsevr0qcx2c3Mj359+pNYtm+ur4RwEok2gep2GdDEkhObOmE5Vq1SKdjtRXchia91GzeRv2T2tOw0dNIDq1alFsdV/VONzlPI7D1/Q6UsPKEPqZFQw20cmhxV88zGdCnlAQdce0uHAULp29xm1r56TutbLR91/PSjF1w3Dqjut2+G3wkDkzJWHUmzOks50uEje5x694iQ9e/GGZvcsR/mzRmZq7d7n/ScvqdHg7dS4XDbq0zz87/qb957LPU4WwVUqkjMNjej4CaVJYTuvqqptHEEgOgQgvEaHmpXX6EVXFlxZVDXlWlhfF+KrlaBjWL3b4tFRuhc21QW7HZ7evr+pYpP5L16+ob/+uSwsfNJQoeypTdYzVTB9/VlaLt76md+7vLQMNVVPn+935DoliB+Pqhf30Gc73LmrCa/Pwl5Tm1H+xA8UtUtmpqI50tJt8QC4cvdF+cAyoXNJ+jR/eqPrdOR8KPX87ZBWtvDHCpRTvHWn0lth6tpn1mH5cFi1aCb6RIi4O47foGPn71GrSp7UvXEBVRVHEAABFyJgS+FViafWiq4Kt7qeXROPFJav0U0PHjygMWPG0MaNG7Umli5dSoULF9a+88mdO3eoYcNGUpht37495RLWDIGBgVKkTZ7cjfz8tog30KPnNQXCqwFqfDFBAMKrCTDIBgEQAAEQAIFoEFj/9yb6vndfql+3NrHIpE/xxf7HIJ/3oTD0ZfpzFl3HT5pMJT4pLoXW58+e09TpMyj0XihNnTSeGtSro6+O81gmcPHiRdq+fbvVvebIkUNeU716dauvtccFR44eoxMBp6ht61aUNGnMve2YGuOq1Wupn88g6vV9d/q261cUP3645XZs9W9qXI6U73f0Og1fckIbUpsqOahbw/z0Wli9s8h66tJ9Cr7xhA6du0MPn77S6qmTbBncqGn5bLTA7zw9F/u728fUVkVOd2TL0vrCOKaSV0Ya+fknkebHe7hT1p6mrUdvaGWzhPBaIILwGp29z72nblH/+UdpcPuiVKN4Zrlv2nH8Hnn8olZuyi6sXf85e5u2/HudsqZ3o8XCGIX/v48EAnFNAMKrnVdAL6T269bRQHA1FeOV88dOXyhHBvHVzgv0rvk9gceo/x/WuRiOOLLRLb6ninmj53I4YluWfo+O8Gpp2/9n7yzAo0i2KHx2cVkWd3cLsnhwd3dbHBZncXe3RRZ3D+4W3IJrcA3uwV3eu7eGGnomk5BJJiSb3Po+0j3d1dVVf2d3OnXq3BvY9UKa8Krdz3WLJMdfZdOY8fOKsubj3FAkS1wMqO/194vF+waj9+Hdhy8qXPX6Q7dhLbyuo2OjyElbLldCdKvhpNrmsMPNJ7gpB+y/rXMhc/Lo5nvKjhAQAiGDgKOEV2Ne13X9NvoZnnbM+jXk8OvXr1GuXDl4enqiatWqCB06DJYudYEt4XXBggUYNWoURowYidKlv0+iTZ48GVOnTsWYMWNQvHhxP41FhFc/YQtxF4nwGuIeuQxYCAgBISAEApDAvIWLMWDwMBzYtY3SR8T1052cCxVHmDChsWX9akQIH161ce78RZSvUh1FCxfCjCkT/dSuXOR/Aiy4+kV0Nd65WbNm0CKs8Xhw3f9n4iRMpIUD61ctR4b0aYPrMP01LhYSP37+qgwJx8jFev7WczLGRCXB9TlAc2a6RKYQt2yY4XMZk5i2kzdcxKr9t3QVdCInZiVyZAbX4pPw+ub9Z9QeZjKSlM+diNLb/YLVB27BlvDql7lPPfe9vFchxI0eAWwoGrT4NLrWyIjyhtDDveaewF73h5j5tzPSJPTqtA2uz0bGFXQJiPAagM9Gi67WLlcWVvmcMbywdR3u1sjJ81UdEV8D8CF9a3rguulwPevmrxuVyOiMvhWa29XGEwr923nGUZTKngC1CppW4U3bdAkHLzzGQBLZZm29jEMXHyNc6FBwTh8bLculUWEreIVQq38PqTygHL4hfowIiEC5QLuTqMZhHHQb/Hk25XQ9eP4x2lRMi5oFkqHLzGOIQGGJB/75XcR79e4Tlu7xwP5zD3Ht3ivkTBsTrculxbztV/GWBL1RTbOrca0/fBsr998kATCLWlGkB8thOTjUbcPiKVEo0/c/ci6SaLjp6B3sPv2A+mcaQ6U8iZHEKgzuTYrHv4q+lHeRC5ML57tlJ2b9kfv8FGqYv8j538ELjxA1Ulg4JYuGJiVTIV70iLrL4DAZq91uYcvxu0p85FVR/BLVisYd9VtYCs6zyquqKtCLw+t3n7Hl2F3cfvwG+8aUVu20n3oEsX4PR31Nhqn00sXhRlInNOWkZdY8Zt+WVQdu4p9V5zG4YVYUdPrO8BOttCvSdasaw+Q2ub00N51yBC/Yfk09zyv3Xqp9a+GVn/kh+p1a1bcw9df0ByQ3dJLyULSjkCh1CidXv1teGpcDQkAIBGsCjhJetVvVHrcrX+OUxIn+v2taDMKge83rDncPd9jTjvEBseBapUoVDBw4EAUKFMCECRMxc+YMm8Lrzp07cZNyHrHbNUyY72H3N2/egm7duqoQxDVpFbpP5dWrV1iwYCF27tyh3LL58uVDhw4d0LNnTy+hhm/evIWJEyfg9OnTePXqtXLgVq5cCWXLmsIz79q1C5MmTULDhg2VeGy87/gJE7Bv716MGzcOCRMmBLt6x48fj8OHD+POnTvg8Mg1alT3cp2xDd7/+vUrXFxcsH79Bpw7dxZJkiSlazOjY8eOiBYtmkX1/fv3Y+3atdi3bz+iR49GXKtSnzNg9OjRaNeuneKrL3CnFftc183tADmI3yJ//vwkZpeGs7OzriJbGwREeLUBRQ4JASEgBISAEPAjgdH/TFAhgi+eOY6wYe0PM+np+QwdOndDvrx50LxJI4teJE/rhLhx48Jt9zbz8Wf0PjZqzHgcOHQIt2/fUS7ZurVqoFKFcuY6suM4Alp4HTp0qJ8a5fdzdrz65Hq9fecuWrRuh9r0Xv3x0ycsdlmGRxQlp3DBAujfuweePH2KYaPGYs/efZSWJCGF7S2p8rQacwBzTtXR/0zEiVP8zv8KmZwyokbVyqhY3vTOz51nF/UW121wWTBX5Xg13vcXcumtWLUGZ+j9OkP69GjTsjlKFrfPqcu/y/UaNQW3yyGzua+RIkZUru/cOXME+P399IAC4aLnbz6ifN8dyJIiOia2yqXmB8euPKd6wnOI8Ung02Jryvi2IyG1nHhQOWN9ilBnHJpPc7Vcj+cxd52+r+aBOWepc7pYqJ4/KSJbpYnbRXOsq91uqih2PB/MRgo2cET/LZy63R73B5hDLtz2lLaM53nZMcrj5fHwnCenqzOWH83dLtvrgTU0f8rzoVxSxP9NOVm1sYMj97EDtUctJ+RJFxt6ntKW8OqXuc+GY/aDhd9VfQqr+3N/Dpx/hL51MiNGFNOY+QQfn7j2AvrQ8RLZ4qu68kMIBCYBEV4DkH7jjgNV67PH9rW4ixZU+SALrlqA5f2urf401zU6X63dsuZKsuMQAtUmdcaDF0/81Vbc32NiRevRdrXBwl6NIXtQoyCFe61gCvc6cNEpFZqBvzxDUSiQZJTjk1fscMlNX7osgrLLcfTKszh/84X64uPjnNS9fpEUStTUbXBc+w+fvyATha0tkyMhOMxslUG7ECl8aCzokl+1+fnL/9B1pikMbayo4eFE4qM7hdR4894URoNF33UDTC9680ngm0FCHyeMZ4FRlyOXnqAThbLtUj2jEin5+M2Hr5WjkoVhFnI/0SoyDm3LfZrdKR9ifvtyfEFf/o3GHsBjykHK+UwTkwB67MoTxPgtvBqbvTle9con7gPnML1Pcf9ZTI4YPhRmtM9LCddNuQjGrT6vRGQecxbKA3Dx9kt1P+a+sFsBhAn1qxpDPRJ/uc/8IsFjZuFyeONs3DxK9nJVojgzjhQ+jGLHY+S67FDtXy8rfvlFVf3hj4eUr7fa4N3IkToGhjbKZs67ykI397VdpXTqhcvYEDPm/unfC/1yYy288jMPQyvOlvYsZLxc/R4V7+GKvBlim8dkUUE+CAEhEKwJOEp41YKpb52qWqhluEaHrHbO+jXc8JcvX9QER9SoptD9Pgmvth7sx48fSVRsrwTE1atXq/DDturxMRYx27fvgD17dpMwGR05cuTA0aNH8f79B3UJhw3TOV7v3buPUjQxwyVPnjw0GRhOXcefhwwZSiGPyymhtkiRIshO7cyeNYtPqfKJJn3y5y+gxM+NGzfi8+fPKFOmDB4+fIiSJUuqe7OIzJ85py0LsN4VDsHM7t84ceIgW7ZsJL6eJ/HZQ4m5LJxqAfoQTSA2b95cNcP95XLw4EEl1HJ93Wc+zuGZ2eHLhfsePlw4sGjLhZ3DIr4qFDZ/iPBqE4scFAJCQAgIASHgJwLde/fDxs1bsdt1E9as24C79+4hY4b0yJMrp58dsNyRm7duo3CJMkp8mzVtkuobv58VLF4GDx48MIc2dt2xU30e1L8PWICV4lgCP0N4vXrtOkqUrahEdhZNCxXIh0uXr+LqtWvIQe/O5y9eRDwS4NOnS4sDbodUCGoO48viKBf+nctfxPTOn885D8LRe/GOXbvVuTEjhqJyxfJqn39Xl61YheMH96rFj8b78u8Uu6v5947vy2Xl0kXImjmT2vfNjxcvX2LgkOE4ddpd5ZJl4Thq1N/R6M966r+JgL6/b/oYVOrUGbEXtx+9UXOlbCh5RPOSXOpQJDo2jcQj8dWnUnXwLrykEMTbhpXwqZr5nE9ztTtO3kf/hafU3GWO1DFxk/rl8eC1mk8d2SS7ShnHDW0mc8tQF3dlwMlPppVbJIay2YbnLVeQMBk29K/gSHgjKeodz3d+prlYFpdPkfGC5yp5bnRVnyJqXpjb883cLfeNxVwWfPk+PMebOFZk/FksBTeBL2RseU2u198jhlGf9dykLeHV3rlPNgqV6b0dpXMkQM9aPv93MNTlDPG5i+nt8yBdYvvT+anOyw8h4EACIrw6EKaxKe12tXarGsVUoyCrRVprgVW3Yy3KGu8l+/4nkG9oQ/83Qi3s7znXrnZ8El6L/xEPvWmVzq+k3BnDNqzuV8QsWupwC9Y5XvWXOQutfepmViKi7pi18MqrlsbQqi4WKTlOP9+Pw9AOXnJahW/gL1W/CK91htMLDL0ATGmbW62q4vvrkBLcL+241V+MTUqlUo5ZrsdhhhvRiiYWY+0RXh+QeFmdxEt+kVjYtYDZ3alfTHQidhaBO5HTmN2w/MUdnhzAXPglh18oprbLo3LuamGTz7HblFe9GQsLrywsG0VWFrLbTz2MM9efqTHyWH1bdpDjd+RydyXmpqecvzweFo2r5U+CFmXSmPup2+s47Yhy2S7pURAJY0Y0ryozCq8s0rO4yoLu2BY59aXmLY+BOViLsuYKsiMEhECwJeAo4VWHCDaKqN5BM4qutoRae9ry7h76uG+EV871umLFShJsXyp3J3/u1auXEkN1O7a269avR2+qlzNnLkybNhWhQoVSYuzff/8Ndq+yGKuFVxY1V65chQYN/jSvsr9GEymVK1cGu2Q5vDGX1m3aKGcrC6kxY8ZUx7QIyoJw06ZNcOTIEdo2RZMmTUj4ba/qPHv2DN27d6dJongYMKC/Omb9g0Xlv1q2RHRytg4ePJjySZmiH3Tt2g1btmzGwoULkSlTJjUGZ+e8KvetPsZtXaSJpho1TJOIRuGVRWB23c4isZjFZy6cf6tixYpqn125ESL4PGGhKobAHyK8hsCHLkMWAkJACAiBACPQpEVrHDl2nBaBhVeCmPFGC2bPQF5nr9GjjHVs7fNCuxat2yvxbM6MqSiYP6+q5nboMOo1bIq/mjdB144d1DHtmI0fPx6GDx5gqzk55g8CP1N4jRQpEtaucEHyZEnBInvNeo1IxDyNEsWKYtL4sfTe/yvu3L2HAkVLIkvmzFi1dKEa2YrVa7F0+Uo0bdTA7FK9cvUaSparhIIF8mPOdNM7v3fCJzeybuVSJY7y/qw58zFkxCjUq1MLA/v24kN2lX8mUKjhyeSu3bAaqVOmNF/7s+5vvmEQ3jl13RMDKccrzz1yiUdGDE7j9ZwESi4+CbAfPn1Bse6uyv05l8wlvinezdU+fvEeVQbuUmLqtHbO5ih82jDSsWp6VHZOom7B0Q/dbzzDpsHF8FsEk9DJLtLtNJfJ+Wkz0FyiFl45sh/Pb0YhQZTF0b7zTypjD8/H6rlK387d+hRq2HrsPgmvXNeeuU+OAtllxjGau3Ui8TWh9a3Mn1k4/2vCQTX+yTQPzXPbUoRAYBMQ4TWAnoAWUo3iKt9KC6/WQqp2wVoLr3yNd23xOSmOIRAUhdcZlIScwwbrMnypOzYeuWMWBfn4j4TXuZ3zeQkhYS28ssDKCchX9C6EONG+T46+plVFpWlVkV+EV89XH1Cx/06LnKJ6HM3Gu+EWhRbeOsS0Iqzm0N3qxWYNCcrG5Oe8kopfCozCKwuyLJpaF3b7skNVC7tG562uy1/W4cOEUiu99DHrrb5ef6Fr4dUoFBuv0cLrhkHFzCu7+PxVCvnbaMwBs5OZwwW/oFVw1iUMrUTTK8L43FYKezxp3UW1Cs1Ylx3RHCo5IoWT1kX3tWGJlOocH9cvN0bhVYv7LOT3rZtFX27ecsgOFnd1+GTzCdkRAkIg2BMIaOGVRdazHmcwpMFwxfJHoitX+tnC67lz51C7dm3zsy5CIcj+atECadP6nAdpEImXy5ctUyF2jbmiWLjlEGZG4dXcuNVO+fIVlOCrBVpXV1d07twZ/fr3R1UKmcxF32fTpk3KmXrkyFElwLI4OmzYUC8hgq1u8cOP27dvV6GGBw0apMRSDw8PVKhQQblY+/bta3E9hzqePXu22fGqx1quXHkMHTrEou6UKVMxZcpks6BrcVI+KAKOFF55UtDz2XMvZMOGDYNo3xzgXk7KASEgBISAEBACwYhA+So1ce78eSVStWrRjBZ+hceu3XvRsWsPNco927cgUcIEdo143MTJFJZ1CiqUK4txo03vs9zAwcNHULdBExTIlxdjRw5TkUnsalgq203gZwqv1s+bQwOPo3yps6dPUS5Y3fmipcqrUMTuxw/pQza3XO/ly1c46rZbnfdO+GSnqzGP8C0KYV2oeGn1ezZ35lSbbft00F7h1dH396lvQe3c5Tsv0Wz8AaRKEAVT2+ZRqdhc9tzwUYC9fv8VGozej4KZ4mBwgz98NSQtvFrP1bKbtPfck15C5LIJp1SvbTDO57WedEgZPYY2+kNFr7MlMGrhVaec053jOcfBi8+oVGOccsyeuVtHCq/2zH3qec5FFJlQRzDU49FbFq6bUCRFdvTO6kgRGuk5ShECQYGACK8B9BS0kOoox6t1OwHU7RDbbFAMNbxlSHFz6Ad+MMv3eWDCmgv4t3UuFb+fj/1IeHUdWsJLnlFr4ZVXNz199d4shHK7umhR1F7Hqw49zO2kTfxdPObPF2+94I3KN8pCIr9E5EkfCxw6w1huUViNuhT2wyi8smirrzfWHdM8B3Kmialy2y7ccd1XYSXYCbr95D3wqigOzcEvERzGgwvnJeDQzFp4rU/hM5qXTm28pdpn4ZVDDOs8A7qCzsuaiUIYT2qdW+Vn4BVa1oVXovHqMy46rAiHW+5cNQM4h8QTenlYutdDrVYr4BRHOZK5LvedQ6JwyJBlvQqZnbD6hcQovH6kOkW7bUXWlNExoWUuvtyi8Bg4rPPi7gUsjssHISAEgj+BgBZedQhiDh2cMWkmLNm9SEG15XTVtH+28Po/CvHAbtCXFJLLzc0Nc+bMxfXr1zBixEjKU1pKd8vLtmbNmrhw4QJOnTpFi4Z+tTifO3cecpR+DzXMJ8+ePQsWOS9evIQXL17Qv+fKKcrnzpw5wxsKU/yeHLQ5VTjiadOmgUMn582bD6lTp8b8+fNUnQ8fPqBFi79w4sRx9fmPP7JRnbwUfrg0EiTweTKR29+0abPKM8v5bTlXLI+VC+fFrVSpErZt24ZOnTop1y+P0Vi2bNmCrl27moXXAwcOoCW5aLl+gwYNjFXBk2Ps/v1R+GOLi0LYB0cKr7v37kfj5i29EDS6MLyclANCQAgIASEgBIIRgQWLXMD5MevVtnx/WbjYBX0HDsGIIQNRnXJt+rasWrMOnbv3Uo7GRfNmIsK3aCF8Pb+P/dm4BY4eN72PcRjaggXyonzZMnaLu77tT0iv9zOFVxbuO//dzox86ozZGDnmH6xethiZMzmZj1eqXhvXrt+AUXjl3KybXbfT3wkX8Zze+TkXMOcA5nL9orvaeie8tmjWBN06mRzUqiL94PzCuSgv65L5s/UhX2/tFV4dfX9fdzSIVByy5Ay2HLuLcX/lRLZUMfCZDBRL93jAOwGWU8L1mnsCdSksMedX9U3Rwqv1XO3MLZcxb5vp7zJbc6hGQwznNu0+y/T/Hj7OqccKZoqr5kTZkMJFC6+jmmVH7rSxzF1jgbnJPwfQtHQqNCiWEr6du+WUa44SXu2Z++SOs4v1xsNX2DK4hM1UbixOsxjNhhKO4shzp1KEQFAhIMJrAD0J7Wzl5q1drFqU5XPsfPUuxyufF7crUwj4MnDddLiedfPXjUpkdEbfCs3takO7EW3leLX+Ita5Ph0tvOrVUjtGlFS5APQAONxwqd6mHKb2Cq/7KHF7zzknVFPlclmGgjhLQmdGEhyblEqN0PSHUfl+O9QXI39BGotmYxRe1x68hScvPxirqf2S2RKoULv/rrugXox+tMKJQ4e0o3DALOJyWOLsqWIiGiWhd6MXGA4xYo/waku0/EohPAp22aJE5xntnXHnyRtys97z0u/Y9PJSPncidVyLysZQ0vqCdlMOq/y4y0lkjUs5JvjFj52xfG0RQyjj9YdvY+epB+hEwm0iymWbLWUM1QQL6J8oBLK1QMwcSvR0VWGmh1FeWSlCQAiELAKOEl61wGpLUNXnNFlbdfQ5/+Z41e3orW9CDeu6esv5SVu1aoUsWbKaxU59zritVKmyEi1PU9ixX6zCGBUvXlyFJdNO1vXrN5CQ2VNdniFDRrBDlvMscShfLlp45f1+/fpj9epVlAN2D65evapCCvfvPwBVqnyfKOTJPj7PDtn9+w+osMB8rXat8r51efv2LbXVlPK6nkXEiJGQO09uxIgRA3upHc4Pq4VXzlPLYYxbt25NAm8Li2aWLl1KousQs/DKIZU53HHPnj1Rq1Yti7p79+5FGwqd3KVLF9SvX9/inHwwEXCk8OpBQvpqymdnXeLFiYtaNapaH5bPQkAICAEhIARCDIHrNzxQrHR51KhWxdchgDl/Z/3GzZAoUUKsXLKQUkCY/q42QuP3sR279mDTFlfs2bcfb968UadHDhuMapVNKReM9WXffwT+C8Lr6rXr0amb6Z0/k1NGpEjO7/xRMWfeAjX4Hwmv1oIvX/QzhVdH399/T/znX73txD0Vdtg4P8u9sCXAFiKh88S1pyq/K6f9GkiO11Txf/thp70TXifSXOYyEnlZdE0Zz7IdnkPNSTlf21ZMZ26fjSqu1N+DFx6BxVQuqRNGwT+UXozDCmvhVZtU9IWX75LwSs5QLbz6du42ZpRwDhNe7Zn71HOWRbPGQ/96XiP4seml++zjOHLxCdpUSIuaBZPpocpWCAQJAiK8BuBj0AKrdVhhFmU5d6sWXLkLXIddrWlSmGK28zF9vbhdmUbAln2XT6LHivH+usmwau2RP3VWu9rQ4qLxi927L+KAEl4nb7iIJbtuYBiFqchHidl1OXH1KdpPOWIRanjhzmuYtvGyl7quJCoOWnwaOsTv3advUWvoHtQokNTi5UC3bdxWIOE1QrhQXnKM6jj+RuHVeJ2tfX7xGET5GfpQbtwS2eJbVOEcDaFC/aJyILDA2o1WiHHb3OdQJABz0blg7RFeOcfrtmElzK5TbufOk7eoPWwPdE5ZPvajws5TLrZWcf2z+jxW7b+J8S1z4g8SU/Vnn9pkQVmHc+466xgOnn9MjAtSzoiI5ssOU/jlzuTErVc0ucohaz4hO0JACIQIAo4SXnUIYXa26rDCRoBafPVJdOX6up3aheqidsE6xib8tO+T8MphcyNFioxmzZpatM0hW7ORayFhwoTkDt1kcc74oXfv3li3bp1yiMaJ8/278/Xr13B2drYINVytWjVcvnzZIiwxO20LFy6smtQCLX/QwieLqOyoXbx4MYmr+xEliu1wSeyK5YkoDlHMgqqb2wEvDlxud8+evWjbtg0qUV7ZfhRCmHPScuH8s3369DELr+z85byzLBAvWDAfoUOHVvX4Puy0PXLksFl4vXv3LrmCS6M65X7tQzyMZe7ceRg7doxF7lfjedkHHCm8Ck8hIASEgBAQAiGZwDuK6jFqzDikTZNaCaxGFidPn0HVmnXRttVf+Ltda+Mpm/sXL11GmYpVESN6DKyk3J2JSXz9UeH3pK3bdqBNh070fhkJp4+62Xwf+1E7ct57Av8F4ZV/b/j3Z9umdUp05dHwO3/OvKZ3/qNuu/kQvHO8Olr4tNfx6uj7q8H+h35wZDmel+NwwzM75PXScy3ALtl93ZxKjCPWceQ8DoHLoXB/VLyb72WnLTtu/eLY5HnlCWsvYP/ZR+hULQMq5Unsa+HVnrlbRzle7Zn7PE7z0h1oXrozjasijctY2Cw0bOkZmse9K3OaRjCyH6QIiPAawI9Di6d8G2vnKwuwuhgFV6Mway3a6vqydTyB1guH4fStS35qOHPiNJhUr4fd1/pXeJ219Qrmul7FoAZZwSuudPHuy5zPW4cavnD7BZqPc1POz05VMqqXDM5VMHKFO1hUNIa00HkHjILqV/q26zLzmFphpIVX/gJktyyXOR3zmcU+TuY+kV4IXlH+2F61Mqmcrj3mHFcvCEaxlPO4tqJQEexItUd41eGJ48eIgNl030jhTRPGp655ou3kwyp5PCeR16u/OBwIhwXhwiF5/552ROVKsFd4tRYudd5czUPd4Ac/+i04qdyq1s+SHb6NKQ8r5yrQAu81ej73Sdy2LhuP3lEseSUcv/jpkCI6j0PJ7PHRu3ZmdRk/t8a00o3DcUxrnwfpE0e1bk4+CwEhEMwJOEp41U5VxrWu30ab1LhORhJmfSo6zPCPBFqf2jCe80l4ZaGSHaNLliwhkTGD+TLt4qxRoyaFye1lPm69w4Lo8OHDUblyFQwY0N98etSoUSRYLrAQXjn0MJddu3ZSvjFTLnXtrLXOBfv161cUKVIEadKkwaVLl5A9e3aMHj3a3P7mzVuUuMr9c6KV9LpwvtibNz1wnELehQkTRh82b1euXEn9HIAOHTqgcePG6rgxbLF2vPKJDhQieCeJuWXKlFFOVg6lvIzy2bLQzGXIkKEoX76cmkjKk8dZOW5ZwNW5bj09PcmhWwW89Uk0Vo2F4B8ivIbghy9DFwJCQAgIAYcT0KKX2+7tiBvXtCiO36u69OgNdiLOmzUN+fM6q/t6ej7DoSNHVb7OiBG/L0y+f/8BKlSthaeeT7F53SqkSZ3KZj/Xb9yMvfvdKKxxDYvQs5zL84aHBy65n7D5PmazMTnoKwL/BeHVKVtuNZbD+3ch4rd3fp0OgoX84CC88t8Pu/bsQxYKuaz/O7t58xauXLuOwgXzq8WdX8iFuJdc4OwYT5nCNN/mq4ccBCpZhxu21aWdp++j3/xT5hDDveedwJ4zD7GwW34kiR3Z1iXmY97N1ep8sU7JopFrNQfChTEtkuX5QI7sl4JcsPWLplBpx8auOofYUcOjcclU0Pld9Vxtq/JpUbtQMvOc548cr/bM3bIwXbyHKxLFivTDVGU6Ddr0Ds5Il8gy/Zw9c59zt13FrC1XMK9zPiS3cgLPpvnwOTQfzpEAu1TLaDMMsRm87AiBQCIgwutPAG8UX31yrxoFV+6WiK4/4eEYbnHpwU00md3PcMT3u7MaD0CauEl8f8G3mv4VXs96PEPLiYeUOFqG3Ju8solD0Xr3Zc63tRZe+Zh2rPK+LizSXSVRzpO+6HWoYU5YXmXgLlWF87JmSR5d5S89edVTHTMKjdpVysJtcXKfRo8cDvyCwmEwahZMSmEgTGEyWCzlJPYs8pbOkQBxo0UA5yzg8Lx8zB7hlTuhv5j5ZaAYhaNgwdL1xF3VFudT5byq3HbtYXuV2Fwhd2JEjRTW3Dduwx7hletzP/NljI00CX8n4dYTRy8/VSFCOL9r2NCmHAtcz6eixWGuw+x5lZ3ny4/KhctjqF04GVqVS+tTE+Yct8Ycr3wBv0yx6/XQhcfImyE2clCYEg6jwvlt6xROjpblfJePwseby0khIAT+cwQcJbzywLWr1a9uVUe7XblPPgmvx44dMwuQ9erVQ5IkSSi07zUsXerCl8LFxQXp06dX+7Z+cL5UzmvKrlRn57zImjUL5XE9R87S3aq6UVBlRykLk9lz5EChggXBTlEWfLkY66kD9GPSpEngHK9cJkycqK5RH+gHC6uNGjVS13F43/iU15XDBbOIzG7WgSSu2io8IcJiKbtiq1WvhujRomHr1q2q/1zfKLxyqDweG7t0deHr6tSpg5kzZ5iFVz5nFJDLli2rJhm5L3fu3KHwyr1Rs2YN3YRsrQiI8GoFRD4KASEgBISAEPAHgRWr16IriawscDVp9CeiUVoH1+07SSTaq3Jkzpk+BeHDh1N3aN66Pbbv2ImWzZuiS8f26tjHjx9RrnINXL1myrPIoYmtS4+unfA7RSE5cvQ4atVvqO5Vv24tJEwQHzt376Www1tVHlnOJyvFsQT+C8Jrlx59sHL1GvX7VqxwIdy+cxfzFy1WIIKL8Dpv4WIMGDwMHEp5zXLT3zPOhYrjwYMHGD9mJOU5Lo1d9N9Ck79aK/f3qSMHzJF2HPsbETCteRduWN+NXa/j1lzAWrdbKhpdJefEmLX1Mm4+fIP1A4uquUVd19bWp7naqRsvYdHO60hBIYvZWMPGFb4PzwcaIxTqVGQ8B5k3fWwytnymedAral5ySY8ClIYtkq+FV+6jb+duuW5HMqvwfCfnUeV/nPbNVvFJeLVn7pPHeunOC2weVFwZd/S9dJ5Y/szzpzq3rT6fJUV0b/um68hWCPwMAiK8/gzKdA8OLcz/jIWFVXa6auerMfSwTwKtsQ3ZdyyB/VdOovty+0IOD6/eHvlS2RdiWPf6wbN3qD54t4UQySF7WQjV7kZd11aoYc4lOmrFWWw4fEdV06FovWuDK7HwyjH/53bKp5tWW3ahsoDK4iqLfplopVWd4Xvx/tMXi9ygnHx9+DJ3lQuVL2Qhk0XfUcvPomuNjCify5SzlM/xl+HyfR5K4OPPHP6Whb56RVKYw/vy8eNXntLLyhW433jGH5Vo2Y6E2Vb/HgLniO1Ww2eXlLro2w8WGefvuIqtx+7h9mNTnhUWYVkUzkpfvrpwLgPuM7/EcOGE9IUpX+owF3f0rOVEInBCaAdtg+Ip0LRUan2pecshMhLTira6NKaxK8+Z2yqYKQ7aV0oPTkBvT2En6xhqR3Pga9m9W5vaZ1H9R2XapktYuOM6FnTNj6RxLFfaPaLctb1oJR67iHXhF7V+dbNYhEnW52QrBIRA8CfgSOHV6Hq1V3zVoisT984x65en8e+//2L69OleXK26LXf3sxg82BTSVx/j3K59+vRGqlS2HQ66Hm/v3buPkaNGKncof2YRlQXMCRMm4MWLFyoMMR/n8MO9SXxlFykXFjF79epFOV4XqPyqxlDDfJ5zu7JjlAsLxGHDhlX7+sf27TswYsRwdS0f02Jqu7ZtvdTV1/B2586dqn/sROXCIYVLlCiBvhR62Do/LDtErly5gpMnTyJy5MiqLodB7tSpE4YOHYpy5cqpNvgHC60LFy7CqVMn1bHUqVOjatWqqF27trmO7HglIMKrVyZyRAgIASEgBISAfwisoXznfQYMNudb5bbKlCqJkcMGmR2IfGzCpKkYN3ESxo4chkoVTO80z+nd7Y9clnMkXNdY3PbsQNw4sdWhLa7bMXDoCCU48QEOMVy7RjV0/rudj+9jxvZk3/cEbty4gRkzZiB5cr85KK9fv46iRYuqf97dVecDbtOyBTq2b2OuNm3mHIwYPRZrV7jAKeP3SDlVatbDFXpvdz9+SNV99eq1cli70rs6F/6dGNi3F2bPW0i/Jw/NjteefQbAZfkKHD+0jxYIRIV39+U2OMdrnty5sGjuTP5oVxk3cTL9rk/B1g1rkCplCvO1/rn/vgNuaNCkBerUqoHB/fuoNv9q04EWOezA6mWLlQP88pWrqFqrnhJn/dJvc0cDYUeHG44XPaJyjmYmswnPrekwwy57boDTmFmXBsVT0pzhj/9+9GmulqPSsYOT51H1XCbPB7YkA4YxwuGLt58wfvU5MlPcN3eD87t2qprBHMluw+HbGLHsLLxzvDYrnRp/Fvv+O2HP3O0QlzNqPjhT8mhgs4mtMnPLZczbdg0zyPGa1srxyvV9M/fJUQmLdtuqBF4OwWwsk9ZfhMvuG8ZDFvssiDMPKUIgsAkEa+E1XowoiBP9t8BmbHF/WwKssYIIrkYagbPPztcJ2xf/MOwwhxduV6yOn5yujh7Z5y//Uy8C4cOawlHY0z47UN3JOcsCqvEL8SKFIG5GIYhZnBvWKJuXJvll41fKjcoi7o/Km/ef8YEE3Gjkev3lF+9rc73PJCb/7os2vW/l+5kXbz6S4zSUyiH7/ajlnuerD8qVGjnCj8dheSWtrPomvM5o76xcpU9evlcr3ML40uVq3Z7+zM+TRXl2C+twyfqcf7b8Infn8Vs8Jy4cmoTdxVKEgBAIuQQcKbwyRb+Ir0bR1V7B1lFPjkNm8WQIh8sKF87khLCnbb6ec6PGjBmTvuO8/5LjeizIxogRw8eV39qNy07crl27etsVFnTZecv3tac8efJEjfO332y/Iy9dulTlJWPxV+eC5fbbtm2nHL18Pl06U9QK433ZKcuCrXftGuvKvuR4ld8BISAEhIAQEAIBQYBzat68dQssgnGoYOsFbPqenz9/Nuey18f8suX7vP/wHrHsfB/zy71C+jXsemUB1i+FRVedFsMv19tzzfv3H8BCfqyYPr/z29Pmp0+fwKKnb0qC+PG9DZPtm+t/VMfWfzvWxzjvsfHviB+1GZTOVxuyGw8935m7VCRLPJygXKNacOUIflXzJcXluy9x/f5LpKO0XTrNl/kif+7wvUKF+gW/+TBPyUacpzSfyXOyOjSxP28L387dcno4/rs3NPXRP8WnuU8d4bFdpXSonj+pf24j1wqBQCMQJITXhLGiImbUSA6D8PTFG9x+9Jzs+THwWyT7HGcO64QvGtJOV65qzPHqi0ulyk8gsO/ySey6eBRnbl/GgxdP1B3j/h4TmRKlRuG0OZA/td9crj+h63bdQrs62Y3apGRqykkQCRcplIN2jGr3p12NhpDKRuE1hAxZhikEhEAwIuBo4ZXRGIVU/sxiqlMSJ4v8rizQLtmziBb9uHMVVRyV11W391/cPnv+HFcuX1GO0uvXr2HLli2ITxMnP7v07NkLGzasR7FixVGKXCKcp2nfvn3qWJw4cVSIYs77KsV/BMTx6j9+crUQEAJCQAgIASEQMgiw4OrIwiLsf608efIUOfMV8lW3G9Srg369e/iqrlSyJKBDDeenMLocBXD6psuIQAaXl+Qy1YJrPErvJiXgCXDYZQ6/PPNvZ5XWLeDvKHcQAo4nEOjC65mr91TI0RQJY1K4S/sdZ9ZIPnz8jKt3nlAs9K/ImCKeOdG0dT35LASEwHcCu888wLClZ1ROgO9HgU7VMvgqxK3xmpC0L8JrSHraMlYhEPwIBITwqilZC7D6uPXWKakTahesayHMWtcJKZ+NuV05L2z16tUDZejPnj2jPK5DVAhhYwc4DPOYMaMRK1Ys42HZ9yMBEV79CE4uEwJCQAgIASEgBEIUgZ49e/5wvFpM9Y1Iy2kz/muFHaUHDx/xVbfjx4uHFMmT+aquVLIkwKF+Z1MatAF/ZkGRzPFQf9Q+eDx4bTOdl+WV8snRBLrMPIZDFx5j96hSFqnqHH0faU8IBCSBQBdeX755D497T0FpGZXwGtofITp5Rf67D5/ARvek5HaNEoTdrgH5UKVtIeAXAhwq4t7Tt3hIuUA5DC3nRQ1FoYSleE/gvudbCq3xq925XL1vUc4IASEgBH4egYAUXnkU7Gx1v+lO2zMW7lYWWzMmzeTFCfvzRh4073SLwuI9evQISZIkCRLi5tOnT1WoPqaVPFkyRKUcVFIcR0CEV8exlJaEgBAQAkJACAiB4EtAhxjmPK3+KZwflsMNa5HWP23JtcGTwNHLT9Bx2lHEojnRBDEi4tQ1T5TIFh996mQOngMOwqN6RHPTnC5NUqQF4YckXfshgUAXXrmHr99+wNOXb9T2E4mnfi1hSACJHDEcYkSJpLZ+bUeuEwJCQAgIASEgBIRAcCcQ0MJrcOcn4xMC/iEgwqt/6Mm1QkAICAEhIASEgBAQAkLA8QQW7aIQtxsuqYYLZoqD1uXTQcILO56ztCgEQgKBICG8hgTQMkYhIASEgBAQAkJACAQlAiK8BqWnIX0JaQREeA1pT1zGKwSEgBAQAkJACAgBIfBfIcB5XaNE9H9KxP/KeKWfQkAIOJ6ACK+OZyotCgEhIASEgBAQAkIgyBMQ4TXIPyLpYDAmIMJrMH64MjQhIASEgBAQAkJACAgBISAEhIAQCNEERHgN0Y9fBi8EhIAQEAJCQAiEVAIivIbUJy/jDgoERHgNCk9B+iAEhIAQEAJCQAgIASEgBISAEBACQsDxBER4dTxTaVEICAEhIASEgBAQAkGegAivQf4RSQeDMQERXoPxw5WhCQEhIASEgBAQAkJACAgBISAEhECIJiDCa4h+/DJ4ISAEhIAQEAJCIKQSEOE1pD55GXdQICDCa1B4CtIHISAEhIAQEAJCQAgIASEgBISAEBACjicgwqvjmUqLQkAICAEhIASEgBAI8gREeA3yj0g6GIwJiPAajB+uDE0ICAEhIASEgBAQAkJACAgBISAEQjQBEV5D9OOXwQsBISAEhIAQEAIhlYAIryH1ycu4gwIBEV6DwlOQPggBISAEhIAQEAJCQAgIASEgBISAEHA8ARFeHc9UWhQCQkAICAEhIASEQJAnIMJrkH9E0sFgTECE12D8cGVoQkAICAEhIASEgBAQAkJACAgBIRCiCYjwGqIfvwxeCAgBISAEhIAQCKkERHgNqU9exh0UCIjwGhSegvRBCAgBISAEhIAQEAJCQAgIASEgBISA4wmI8Op4ptKiEBACQkAICAEhIASCPAERXoP8I5IOBmMCIrwG44crQxMCQkAICAEhIASEgBAQAkJACAiBEE1AhNcQ/fhl8EJACAgBISAEhEBIJSDCa0h98jLuoEBAhNeg8BSkD0JACAgBISAEhIAQEAJCQAgIASEgBBxPQIRXxzOVFoWAEBACQkAICAEhEOQJiPAa5B+RdDAYE/jw8QNiRA4fjEcoQxMCQkAICAEhIASEgBAQAkJACAgBIRAyCThceOUGpQgBISAEhIAQEAJCQAgIASEgBISAEBACQkAICAEhIASEgBAQAkJACAgBISAEQhqB+DF+Nw/5l/9RMX+SHSEgBISAEBACQkAICIFgSUAcr8Hyscqg/iMExPH6H3lQ0k0hIASEgBAQAkJACAgBISAEhIAQEAJ2EnC449XO+0t1ISAEhIAQEAJCQAgIgUAgIMJrIECXWwqBbwQkx6v8KggBISAEhIAQEAJCQAgIASEgBISAEAieBER4DZ7PVUYlBISAEBACQkAICAEfCYjw6iMeOSkEApSACK8BilcaFwJCQAgIASEgBISAEBACQkAICAEhEGgERHgNNPRyYyEgBISAEBACQkAIBB4BEV4Dj73cWQiI8Cq/A0JACAgBISAEhIAQEAJCQAgIASEgBIInARFeg+dzlVEJASEgBISAEBACQsBHAiK8+ohHTgqBACUgwmuA4pXGhYAQEAJCQAgIASEgBISAEBACQkAIBBoBEV4DDb3cWAgIASEgBISAEBACgUdAhNfAYy93FgIivMrvgBAQAkJACAgBISAEhIAQEAJCQAgIgeBJQITX4PlcZVRCQAgIASEgBISAEPCRgAivPuKRk0IgQAmI8BqgeKVxISAEhIAQEAJCQAgIASEgBISAEBACgUZAhNdAQy83FgJCQAgIASEgBIRA4BEQ4TXw2MudhYAIr/I7IASEgBAQAkJACAgBISAEhIAQEAJCIHgSEOE1eD5XGZUQEAJCQAgIASEgBHwkIMKrj3jkpBAIUAIivAYoXmlcCAgBISAEhIAQEAJCQAgIASEgBIRAoBEQ4TXQ0MuNhYAQEAJCQAgIASEQeAREeA089nJnISDCq/wOCAEhIASEgBAQAkJACAgBISAEhIAQCJ4ERHgNns9VRiUEhIAQEAJCQAgIAR8JiPDqIx45KQQClIAIrwGKVxoXAkJACAgBISAEhIAQEAJCQAgIASEQaASChPD6+u0HPH35Brz99OWrn2GECfUrIkcMhxhRIqmtnxuSC4WAEBACQkAICAEhEMwJiPAazB+wDC9IExDhNUg/HumcEBACQkAICAEhIASEgBAQAkJACAgBPxMIdOH15Zv38Lj3FP+jIUQKHxahSTz1a/n89SvevPuIX6iBpPFjIEqk8H5tSq4TAkJACAgBISAEhECwJiDCa7B+vDK4IE5AhNcg/oCke0JACAgBISAEhIAQEAJCQAgIASEgBPxIINCF1zNX7ymxNUXCmAgXJrQfh/H9so+fvuDqncf4TM7ZjCni4ddfWIaVIgSEgBAQAkJACAgBIWAkENDC61kPd7jfdMdZjzNwp31dnJI6IWPSTHBKwlsnfVi2QiBEERDhNUQ9bhmsEBACQkAICAEhIASEgBAQAkJACIQgAoEuvJ66cheJYkdFjN8jOQy758u3uPXwGVKQ6/U3cb06jGtIa+jQ3fM4cOsMzj32wKO3z9TwY0eMhgyxkiJv4kzInSB9SEMi4xUCQkAICIFgRCCghFcWXJfsWWQhtnqHrXahuqhdsI53p+W4EAi2BER4DbaPVgYmBISAEBACQkAICAEhEEIIvHjzEb9HChtCRivDFAJCwB4CQUJ4TZ0oFiJSmGFHlXcfPuHSrUeIFyMK4kT/zVHNOrydS9dumttMkyKJeV92ApfAtWd3MePEBpx9dN3HjmSMnRzN/iiHFNES+FhPTgoBISAEhIAQCIoEAkJ4ZdG157zu5uGyu7V2wboWzlbthF2ye5G5ngiwZhSyE0IIiPAaQh60DFMICAEhIASEgBAQAkIg2BFY43YLE9ddwMdPX5E6YRR0r+mEVPGjBLtxyoCEgBDwO4FgLbzGJdE1LomvQams3boH/M+7UrFkQfA/KYFD4DC5XAftmweVdNg3XaBI1n3yN0CuAHC/Hr74GOdvPUe1/EnxW4QwvumNuQ6/AHz4/AU1CyQzHwuKO7tOP8DJa0/RvHRqRLZzjD97PDcfvcbOU/eRP2McpPTDy9T7j19w9+lbfKBw6Mnj/obwYUP5OIRHz9/j6cv3SBQr0g/ZfP36P9zzfEer7MLY/bviYyfkpBAQAsGagKOF1yV7FkOLqb4VUo3XDG0w3EKg/VnwP3z4gPv37yNevHgIFy6cv2+7dOkyfPz4AfXr1/d3W9JA8CUgwmvwfbYyMiEgBISAEBACQkAIeEdg0xZXHDpyFJ07tEOUKAFrFvK4eRPbd+zGnbv3ULFCWWTNnAk/8/7eMfivH7985yWa/HNAzcElo/m9U9c8lfg66++8//Wh/dT+f6G5zPuebxH9t3CIGM7n9JNLdt+g+c7QKJcr0U/to9xMCPiHgAiv/qFnx7XWgmvalEktrr541cPiswiwFjh+ygd2urbfOsH3oqvuFYmv40u2c7jz9V9aObV0jweW9iyI+DEi6rv5alt/1D68ef8Zq/oU9lX9wKpUZ/he3H78BiObZkOedLEDqxu+uu/+sw/RY84J9KmTGSWyxffVNbrSlmN38c/qc3j7/os+hJLZ46NTlYyIEM5SgN195gEmrL2AxyS86lI2Z0K0rZgOkcJbvoiwmDt21TlsPnpXV6XflQjoVDUjcqaJaT4mO0JACAgBWwQcLbxWGFBW3ca3oqvuU0CIr9u3b0fv3n3w9u0bLFmyBBkyZNC3M2/v3r2LXr1648SJ4+ZjWbJkxZAhg5Eokd//oKtWrRqePHmC3bt3m9uVHSFgTUCEV2si8lkICAEhIASEgOMI9Oo7EEuWLVcNbt2wBqlSpvCx8cHDR2H23Pk+1tm4ZgXSpU3jYx05KQR+RKBoqfK44eGBWVMnoXChAj+q7ufzLLaWrlAFb968QYzoMTCgb0+UKVUCP+v+fu74T77w8Yv3OHfzOWJHjYD0iX/39u7XH7zCWY/nuHL3BUy/byoAADLCSURBVI5efoq7T96iXtHkaFEmDdpOPqzE1w0DiwbbsMNs+Lhw+4USmxPG9D5dJM9pDlt6Rs1/zujgjLSJvDL1fPUBI5efxYFzj8y8k8aNjA6V0iNbqhjmY3rn2euPqNBvByo6J0bnqpZ/11+7/wqDFp/GtXuv0KZi2iBvQNJjkm3IICDC6094zkbRlQVXFlW9Cy1srCvi6094OIZbdN8x7YfhhQ3VLXY57PDwoi0sjvn3Q0gQXt1vPFNf3BXzJEK4MJYCpH/5Ofp6vwqv7FpuMf4ghVMPpVZmpYz3G3aS0/fQhcfIkToGxrbIae4qO4D7zj+p6tYqmAxxokXAXveH6mXEKVk0TGqdG7+Q0K9Lq38PgRkWcIqDHCS0PqeXkSW7r6sXnH9b50Lm5NF1VdkKASEgBLwQcKTwqsVTe0VX3Sl9PYcmHkLOV7+W58+fY/jw4di0aZO5icWLFyNjxozmz7zz+PFjlC9fQQmz9erVQ4oUKXD58mUl0kaMGAmurltpBbrfoqaI8GqBWj54Q0CEV2/AyGEhIASEgBAQAv4k4HboMOo1bGpuZcv61UidKqX5s62dzVu34fCRY7ZOYePmrXjq+RS+EXBtNiAHHULgxo0b2LFjh91tJUtmigRXtGhRu68NiAuOnziJ0+5nUadmDYQP7/9oO971cfnK1ejWqy86tm+LVi2a4tdff1VVf9b9vetXUDrueuIeBi06be5SrULJ0Lp8Wnz+8lWJrGdvPsP1+69x5NJjvHjzyVxP7ySOHQmV8ybGXNereEfGiB3DS+pTwW778u0nlO2zXc0/Dmn4h5fxvaDz48lwsu3EffO56SS8prMSXt9++IxaQ/eAxVQ2pGROFh2PSPxetveGmssc3Sw7cqWNZW6Dd/R8bL96mVEsq8kIw27ZxbuuY/qmy+a6bSqQ8EpzqVKEQFAhIMJrAD8Jo5DarXUDC8HVuxyvfHzEpHmqZyK+BvAD+tb8IQoxPHiviblf79i7QAPkdmDI4ZAgvPqVdWBcp7/o7XG8vvvwBQ3H7MO9p++wrFdBxItuci7/739Az7nH6eXhEaa2y4MMSaKqIWkH8JxOec3hjLnuyOXu2HD4Dgb+mRWFM8dVdS/SSrNm49y8rPg6dvkJ/p52FFXyJcHfldMHBiq5pxAQAv8RAo4SXo15Xdf12+jn0WvHrF9DDr9+/RrlypWDp6cnqlatitChw2DpUhfYEl4XLFiAUaNGYcSIkShdupS5z5MnT8bUqVMxZswYFC9e3Hzcnh0RXu2hFXLrivAacp+9jFwICAEhIAQCjsC79+9RqnxlvH3zDsWLFobL8hXwjfDqXY/u33+AvIWLI0e2bFi6aK531eR4ABNgwdUvoquxW82aNYMWYY3Hg+v+PxMnYeKkqVi/ajkypE8bXIfpr3GxkPjx81fUKJAUx8jFysaJjEmj4iw5YI0p6CJTiNucJAYmJ1dmxiTRVJ3JGy5i1f5b5vt3IidmJXJkBtfik/DKERdrDzOJqeVzJ0LoUL9g9YFbsCW8siO2z7yTqFskOf4qm8aMi920zWmOs0iWuBhQP6v5OO9MWn8RLhRqeHmvQogbPYI613XWMRw8/xhsUimaJR7GrT4PEV4tsMmHIEBAhNcAfAhadLV2ubKwyueM4YWt63C3Rk6er+qI+BqAD+lb02MOumCXx0l/3ahw0qzolKeW3W2wsLb20C3spS8fDlfBSdkbFEuJMzc8bYYa5i+pXafv4xDlgE0cOzKc08VCdcoDa8yRqkMNjyM35ZSNF3Hi6lMVNoPzk9agulEjh7Xo55OXH7ByvweOXXmKW5TLNGuKGBT6NxYq5E5sdlhy3H0OtVuBvkR/JdvlxqN3cPHWC3N/2XVpLEcuPcEiWn108fZzhAsdChno5YXHZQwzMXfbVfB4/m2VS/XfeI/X7z6DQ/RyKOJ9Y0obm/Z2/0fjuEc5VnvOPYFS2ROAHaW6TKTQvseJ0fDG2RCXXKZcnhKTTjOOqi/w+kVTmFdY2SO8nqQ8D+0o5EijEinRuGQqfTu15Vyv/HLCzy1s6F/V/SoN2Ik86WNhZJPsFnU5dEbD0fstznFOiWNXnqBgprhIYAhFzblhqw7apQRaFmqlCAEhIAS8I+Ao4VW7Ve1xu/I1TkmcLHK69prXHe4e7rCnHePYWHCtUqUKBg4ciAIFCmDChImYOXOGTeF1586duEk5j9jtGiZMGHMzmzdvQbduXVUI4pq0Ct2n8urVKyxYsBA7d+5Qbtl8+fKhQ4cO6Nmzp5dQwzdv3sLEiRNw+vRpvHr1WjlwK1euhLJly6pb7Nq1C5MmTULDhg2VeGy87/gJE7Bv716MGzcOCRMmBLt6x48fj8OHD+POnTvg8Mg1alT3cp2xDd7/+vUrXFxcsH79Bpw7dxZJkiSlazOjY8eOiBYtmkX1/fv3Y+3atdi3bz+iR49GXKtSnzNg9OjRaNeuneKrL3CnFftc183tADmI3yJ//vwkZpeGs7OzriJbGwREeLUBRQ4JASEgBISAEPAngdH/TMDkaTPw77gxOH/hotr3j/Dau/8gLHZZBpcFc5EzRzZz757R+9ioMeNx4NAh3L59B9n+yIq6tWqgUoVy5jqy4zgCWngdOnSonxrl93N2vPrker195y5atG6H2vRe/fHTJ/XcH1GUnMIFC6B/7x548vQpho0aiz1791FakoQUtrekytMaKpTJScod45yqo/+ZiBOn+J3/FTI5ZUSNqpVRsbzpnZ/rTCAxdIvrNvU7xTlejff95ddfsGLVGpyh9+sM6dOjTcvmKFncPqeup+cz1GvUVLXLYYa5r5EiRkTfXt2RO2eOAL8/j/G/UJ6/+YjyfXcgS4romEhzkqvdbmHsynOq6yzmJYsTGakSRFFzmani246E1HLiQeWMteXStMVg2qZLOEjR77rXcMJs1ytKODSGx/XNfC+3y9HyVrvdxMmrnirdGEe7YxGTc6Vy2eP+AHPIhdueQvfuP/dQzWfyeDMmjYZW5dIiBUXiMxY2dmyiOd7d1C6nQ3NOHxuV8iRGEmLAZdleD6whPjw/yyVF/N+Uk7UbjYMLu1cbUMq7HrWcVBq56ZsvY8H2azaF11UHbuKfVWR+apgVBZ1MxhJu4xO5jIt03aqE1MltcvMhc2k4Zj9Y+DWm0+NjJf6Irxyux2kuu9P0oyK8monJTlAhIMJrAD6Jxh0HqtZnj+1rcRctqPJBFly1AMv7XVv9aa5rdL5au2XNlWTHIQQarxuOR2+e+aut2JGiYXaF7na34bLnBiatu6iuy5cxNiUWf6di03OuTnZKGnO87jh5H/0XnlKhaHOkjombj97A48FrWnkVU4l1oegljQsLr4+ev1OCJ3/OmjI6TWY/V3lD/6B4+WOb54Cuy+Jf60mH1D05pj6/XBymMBqcj7QhCYZNvgmGNx++Rr2R+xAranjVTt4MsXGXhEy+Pxejc1OvYOLwutxPz1cfVUhcrrega34k/fblPWKZycmp8yDoe0QjYZi/uFmEjvV7eCWI8rU+Fd+Mg3MSlO6zjcb4m+ovt8fhKcrQMR5v95pO4HyqXPQY+tSlnK70Ze4Xx+v6Q7dV3gJ+aQgb5lf1gsXicCZ62eHnEDOK6aWI7+dBfOsTX84fy+KusTx89g7VBu9WL1RLexYynvKyP4de3mZvvYquNTKivCSd98JHDggBIfCdgKOEVy2Y+tapqoVa7onRIauds34NN/zlyxc1wRE1qimKgE/C63cK3/c+fvxIomJ7JSCuXr1ahR/+ftZyj0XM9u07YM+e3SRMRkeOHDlw9OhRvH//QVXksGE6x+u9e/dRiiZmuOTJkwdhw4ZT1/HnIUOGUsjjckqoLVKkCLJTO7NnzeJTqnyiSZ/8+Qso8XPjxo34/PkzypQpg4cPH6JkyZLq3iwi82fOacsCrHeFQzCz+zdOnDjIRq6Nc+fOk/jsocRcFk61AH2IJhCbN2+umuH+cjl48KASarm+7jMf5/DM7PDlwn0PHy4cWLTlws5hEV8VCps/RHi1iUUOCgEhIASEgBDwM4Gr166jRNmKKFggP+ZMn0wCmEmE9avwygvnCpcsi3zOeTB/9nRzv/j9rGDxMnjw4AHKli6p8me67tipPg/q30cJsObKsuMQAj9DeNW/P3HjxlV/UxQqkA+XLl/F1WvXlOP5/MWLiEfn0qdLiwNuh1T4aQ7jy+Iol7v37iF/EdM7P//OhKP34h27dqtzY0YMReWK5dV+9979sGzFKhw/uFctfjTel3+nihYuhJu3bqv78gUrly5C1syZ1LW++fHi5UsMHDIcp067q1yyLBxHjfo7Gv1ZDxkzpEdA3983fQwqdeqM2IvbNK/KkeU4zysbGbjUITcmC4/xvrkrvetv1cG78JJCEG8bVsK7KhbHBy46pULx8pznh89fkInC7JbJkVDd37fzvZtJIB3q4q7mB9lcc4vEUHZ+cpsr+hRWxg49F8nzt5/J0cvi8ikyhvA8K8/TrupTBJHCh1Z943nY5hPc1Jwozy1/ovos6HJ7szvlU/OW3DcWc1nw5eNcL3GsyPizWArVBs+rvqa55d8jmhY0+yS86vlNTr02tFE2hA8bSrWxcv9N5VptVymdMhdpcK/efUKZ3ttROkcC9Kz1/b8DzhOrhWY2/ojwqonJNigREOE1gJ6Gdrtau1WNYqpRkNUirbXAqtuxFmUDqNshttlyLt0swkj4CQRpnhtqjbDr0juUjJ3DMfAX1xz6QovxTYhbSrHt/11rEmO18MoJ36sM3KW+XKe1cza7VjmcAn9BdayaHpWdk6j7s/DKgqhRZGVnbe95J1TO0A4UgrYqhaLlMppWdK2llUtNS6dSjlQ+9p5yE/xFK7c4OfkkEg0z0WovLYry+Zl/OyNNQlOC9KUkHP9LwjHnNehYxZTknPt05d5L9KiZCQljmsLrriMRchQlT29ZLg3qFE7OzcA74ZXPsVjJq8x8W3w7Dk667nr8HlyHllArufjl6q8JB9VtjCEt2AWrVnX1K6Kei1+EVx0Oo0HxlJhH7l5j4Zedia1yIzWtoOPylR5Q6d7blFju0rMgIoYzvQTxubUHb2H0CtPKu92jSplFcz7HZd72q+qZnaF8r2euP1PPll9W2JksRQgIASHgHQFHCa86RLBRRPXunkbR1ZZQa09b3t1DH/eN8Mq5XlesWEmTKy+Vu5M/9+rVS4mhuh1b23Xr16M31cuZMxemTZuKUKFCKUfp33//DXavshirhVcWNVeuXIUGDf40r7K/RhM4lStXBrtkObwxl9Zt2ihnKwupMWPGVMe0CMqCcNOmTXDkyBHaNkWTJk1I+G2v6jx79gzdu3dH3LjxMGBAf3XM+geLyn+1bIno5GwdPHgw5ZMKr6p07doNW7ZsxsKFC5EpUyY1BmfnvCr3rT7GFS/SRFONGiYHsFF4ZRGYXbezSCxm8ZkL59+qWLGi2mdXboQIpkgS6oD8MBMQ4dWMQnaEgBAQAkJACDiEwJ+Nm2O/20Hs2rqRFowl9rfw2rFrD6xZt8GL8KVzyP7VvAm6duyg+s4uww6duyF+/HgYPniAQ8YjjXwn8DOF10iRImHtChckT5YULLLXrNeIRMzTKFGsKCaNH0vv/b/izt17KFC0JLJkzoxVSxeqjq5YvRZLl69E00YNzC7VK1evoWS5SubFAFzRO+GTz61buVSJo7w/a858DBkxCvXq1MLAvr34kF3lnwkUangyuWs3UI7jlCnN1/6s+5tvGIR3Tl33xEDK8fr4m+AajwwwnDLsOQmUXHwSYDmKXbHursr9OZfmc31TtPDKQi+bPMJ8c0vbM9/b6t9DytiyaXAx/EYR9Liwi3Q7iaOcn5ZTmWnhNVGsSMp0EoUEURZH+84/qeaEjWnMdMqzKW1zK0cst6eNKNxPHUnPp1DDfI2x+CS8cr0dp+6rlGocHTE99fcBmU14/rla/iRoUSaNWYzluhztscuMYyS6OpH4ajLK8HFjEeHVSEP2gxIBEV4D6GloIdUorvKttPBqLaRqF6y18MrXeNcWn5PiGAKBJbzqFU3W7kQWSevyyitauaSFV15d1HvuSeWGZFekLuz0LNVrG4r/EQ9962ZRh7XwuqhbAQpHHElXBbstK1M426JZ46F/PVNd/pJ9TSuI1vQvYiHUcejdDlOOmIVSLbyy05VD8urC4XtrUmJ0XvE0pplp0lWfM255NVLF/jstVil5J7wav9y5DQ45YSuRfRgK0atXVPl2HCy6svg6rmVOZEsZQ4mWnJC9Iq1mY4Fz06DiStjksBVc9AuULeGVedoq2snaY44pjyvXqVEwqQpvHC5MKFrhdk+t5LJeafYPCdarSERnhzK/bLAQf5heMrToyu3wSjq9Iow/c8nfabNph36ya7kVidu50sTCr98c0OaTsiMEhIAQMBAIaOGVRdazHmcwpMFwddcfia5c6WcLr+fOnUPt2rXNVIpQCLK/WrRA2rQ+50EaROLl8mXLVIhdY64oFm45hJlReDU3brVTvnwFJfhqgdbV1RWdO3dGv/79UZVCJnPR99m0aZNyph45clQJsCyODhs21EuIYKtb/PDj9u3bVajhQYMGKbHUw8MDFSpUUC7Wvn0tI7ZwqOPZs2ebHa96rOXKlcfQoUMs7jVlylRMmTLZLOhanJQPioAjhVeeFPR89twL2bBhwyDaNwe4l5NyQAgIASEgBIRAMCLAoVtbteuItq3/wt9tW6uR+cfxeunyFZSuUEWFmZ01bZIFqYOHj6BugyYokC8vxo4cpiKTWFSQDw4n8DOF1wrlymLcaNPfLzwQDg08jvKlzp4+BeyC1aVoqfLgUMTuxw/pQza3XO/ly1c46rZbnfdO+GSn64wpE81t3KIQ1oWKl1a/Z3NnTjUf9+2OvcKro+/v234GhXqcyqvZ+AMqtPDUtnlU2jeOTuiTAHudUoI1oJRgBTPFweAGf/hqGFp4nds5n0W4X3vmezliIRsuhjb6Azw/a8twoYVXYxhj7uDW43cxePEZ8zyvnqctlyshdNhgPZBm491UKrqtQ0xuXkcKr9wPjvzIDlxj4XlTjrpoNKJoEdd6ftt4nQivRhqyH5QIiPAaQE9DC6mOcrxatxNA3Q6xzQZWqOGpGy9h0c7rmN4+D9IljmrBXzsztfA6c8tlck1eU3XSJja5TfUFnGuVXbPrBhRVh7TwuofckdbiW8lerogaKSwJuoVUjlEWbW3lFeUvwAr9dpgTm2vh1ToBOt+QhT8WCye0zKW7RHkEHuHktae4SquWWNjlFxkuJbPHR+/amdW+d8JrfQpX0bx0alWHf+gVTuYD33Z4JReHONbis2/GoVeSNSmVCg3Jicpu1zjRwqNK3iRoM+mwao9XhZXtsx31iiZXAijfzlp41fe07hN/1uLoUJcz2Hz0rs0QwRPXkaN2jwfGUNjnnGlMzqbPX/6HPvNP0L0eWTTLTlwWeTkEin7GxgocCuQj/TtNq/XYWXyA2Fd0TozOVU0OZGNd2RcCQkAIaAIBLbzqEMQcOjhj0kxYsnuRurUtp6vu088WXv9HK53YDfqSQnK5ublhzpy5uH79GkaMGEl5SkvpbnnZ1qxZExcuXMCpU6foe/Z7XieumDt3HnKUfg81zMfOnj0LFjkvXryEFy9e0L/nyinK586cOcMbClP8nhy0OVU44mnTpoFDJ+fNmw+pU6fG/PnzVJ0PHz6gRYu/cOLEcfX5jz+yUZ28FH64NBIkSKCOefeD29+0abPKM8v5bTlXLI+VC+fFrVSpErZt24ZOnTop1y+P0Vi2bNmCrl27moXXAwcOoCW5aLl+gwYNjFXBk2Ps/v1R+GOLi0LYB0cKr7v37kfj5i29EDS6MLyclANCQAgIASEgBIIJgXf0jsPi1qePn7Bnx2ZE/BZtwz/Ca/PW7bGdwgevW7UMGdOnsyDF72N/Nm6Bo8dN72M5KIVDwQJ5Ub5sGSRK6PP7mEVD8sHXBH6m8NqqRTN0/ruduW9TZ8zGyDH/YPWyxcicycl8vFL12rh2/YaF8Mq5WTe7bqe/Ey7iOb3zcy5gzgHM5fpFd7X1Tnht0awJunXqoOroH8nTOiEX5WVdMn+2PuTrrb3Cq6Pv7+uOBpGKQ5acwZZjdzHuLzJpUGqwz2QAWUpzdt4JsHvdH6LX3BOwNUfq3ZC08Koj8Ol69sz3Hjj/CN1nmf7fw/PAudPFIvE3rppX1A5aLbyOapYdudPG0rdR87JN/jlgjnioBUuuYGuemY+v6ltYpYBzlPCqDUhsGuE5y5SUP/cJRXhcutdDOXULOMXBkIbfhWyes73x8BW2DC4B74L66XG0qZBW5XzlfksRAkGBgAivAfQUtLOVm7d2sWpRls+x89W7HK98XtyuTCHgy5iDLtjlcdJfNyqcNCs65allVxtafJvTKa/6sjFerEVJLbzquvxlmNIqEfpZCpebk3Kptq1o+oNA53jVK5OM7bKYyoUFvBeUXL0cJZO3/mLj8yyWlqY4+trhqoVXa1GU61oLr2NXncPqA7f4FOV4jUHCZgQV1oJFSL8Ir3eevKGVWfdUe8YfsSn/a/nciewaB1/PfPgFhb/MOVcAh6woRnlcOZE7h1zm8L9dZx7HeHLF/kGuWC7Wwiu7cOdvN01YqwqGHw2KpUToUL8oN+3MzVfAuXuHUe4CYzl44ZG6hxaA9TnOQ8thmk9QTgV+0UtPgjy/9LFgnoH2x7bIqava3L6ghPPlSDTmogVgmxXloBAQAiGegKOEVy2w2hJU9TkN21Ydfc6/OV51O3rrm1DDuq7ecn7SVq1aIUuWrGaxU58zbitVqqxEy9MUduwXq78AixcvrsKSaSfr+vUbSMjsqS7PkCEj2CHLeZY4lC8XLbzyfr9+/bF69SrKAbsHV69eVSGF+/cfgCpVKvNpVXiyj8+zQ3b//gMqLDCf0K5VUy3Ln2/fvqW2mlJe17OIGDEScufJjRgxYmAvtcP5YbXwynlqOYxx69atSeBtYdHI0qVLSXQdYhZeOaQyhzvu2bMnatWyfP/Zu3cv2lDo5C5duqB+/foW7cgHEwFHCq8eJKSvplCI1iVenLioVaOq9WH5LASEgBAQAkIgWBGYOXseho4cjVrVq6mcq3pwLstXYOPmrRjYr7cKG+uc+/tCcV3H1vb0GXdUrlFHhZad+u84W1XA72M7du3Bpi2u2LNvP968eaPqjRw2GNUqm1Iu2LxQDvqJwH9BeF29dj06dTO982dyyogUyfmdPyrmzFugxvwj4dVa8OWLfqbw6uj7++lBB+JFHJmOww6z67Jthe+LLWwJsIVI6DxBRhPO78rp1QaS4zVV/N9+2HvvhFd75nv5JrfIlOFK/eV5RW1ySZ0wCv6h+UIOK6yFV6PRg6+7fPclmoz9LrzuO/sQPeec4FNg16ux8DxzRjK7NCmVWuV5dZTwyk5aNg+tprRuOlqgvm+7KYdVftnlvQohLuXX5bDPJXq6WkRt1HWNWxFejTRkPygREOE1AJ+GFlitwwqzKMu5W7Xgyl3gOuxqTZPClHeTj+nrxe3KNAK2HLp7HoP3mtwkfr1T7wINkDtBersu10nROewvh/81Fl6FxF+gWnjllVe8AovFQhZKfSra8apXJum6Wkw1CoFVBu2ivAK/KAesrsfb87eeo8X4g/Qla3KG+lZ41cIfu0Znd8xrDour89n6RXg19su7fd+Og6/XTuN+9TJjwMLT5hVcnO+AnaWZk0fDwh3XsWNESZWYnq+xFl752I/K9pP3VPtG56y+ZtPROxjm4g4dZprDS9969BrhKLF8XBKqjeUCPYvm9Cw4Ly/n5+Wy8cgd9dL0F4UkjhAulLE69MuK9fO3qCQfhIAQCPEEHCW86hDC7GzVYYWNcLX46pPoyvV1O7UL1UXtgnWMTfhp3yfhlcPmRooUGc2aNbVom0O2ZiPXQsKECckdusninPFD7969sW7dOuUQjRPn+3fy69ev4ezsbBFquFq1arh8+bJFWGJ22hYuXFg1qQVa/qCFTxZR2VG7ePFiElf3I0oUUz5wYx94n12xPBHFIYpZUHVzO+DFgcv19uzZi7Zt26AS5ZXtRyGEOSctF84/26dPH7Pwys5fzjvLAvGCBfMROnRoVY/vw07bI0cOm4XXu3fvkiu4NKpT7tc+xMNY5s6dh7Fjx1jkfjWel33AkcKr8BQCQkAICAEhEJIJ9B80DPMXLfYRAeft/FFIWN2AzhW7ed0qpEmdSh/2dsvvSVu37UCbDp3o/TISTh91s/k+5m0DcuKHBP4LwmuZilVx8dJlbNu0TomuPCh+58+Z1/TOf9RtNx/yNsero4VPex2vjr6/Gux/6Mf7j1+U4SEVGTFmdsjrpedagF2y+7o5FRq7Nj0evFYp3jgU7o+Kd8KrPfO91ve47/kWE9ZeUJHzOlXLgEqURs23wutdSh1Xi1LH1ShAYvM3I491+/qzo4RXNpVwseVg1enXtAlGp8DrTOPi9HDeFRFevSMjxwObgAivAfwEtHjKt7F2vrIAq4tRcDUKs9aira4vW8cT6L5jGs4+uu6nhjPGTo7hRS2dIb5pSK824i/rGe2dzSKlFvm4DS286vwBTsmi0SqmHOBcoVxYKPyXwtamIBds/aIp1DEtvFrH6Z+0/iJcdt8wh5Xgyv0WnMTOUw/AImSxrPHV9ey67Db7OA5deIyRTbMhT7rY8K3wqutZ53ydtumSEjMDSnj17Th4gMevUP7aqUfA4nAoEp0XdMmvxq3F0PgxIiBJnMgY2SS7Os4/9DPpUyczjDl2zRVs7HA44iqDdoITxs8iEToWOXS5vP3wGW0nH1bC+sJu+ZEkdmQw89rD9+De03dY3ptWd30TX7/Si3qriYdwjlabcVhlDq/MZY3bLYxZeU69HPFLki4PKSl9tcG7bYY31nVkKwSEgBBgAo4SXrVTldtc128jb7wUrpORhFmfig4z/COB1qc2jOd8El5ZqGTH6JIlS0hk/B6WXbs4a9SoSWFyexmbs9hnQXT48OGoXLkKBgzobz43atQoEiwXWAivHHqYy65dOxHhW+g77ay1zgX79etXFClSBGnSpMGlS5eQPXt2jB492tz+5s1blLjK/XOilfS6cL7Ymzc9cJxC3oUJE0YfNm9XrlxJ/RyADh06oHHjxuq4MWyxdrzyiQ4UIngnibllypRRTlYOpbyM8tmy0MxlyJChKF++nJpIypPHWTluWcDVuW49PT3JoVsFvPVJNFaNheAfIryG4IcvQxcCQkAICAGHEmCx6/adu17aXL5qjQoX3LtHV3K8JjPn5/T0fIZDR46qzxEjRrS4jo/X+bMxrPN8Giut37gZe/e7oV7tGhahZznc8Q0PD1xyP2HzfczYhuzbR+C/ILw6ZcutBnV4/y5zuGudDiJG9Bg/zPHqaOEzIIRX/vth1559yEIhl+PGNS0+vXnzFq5cu075kPOrxZ1fKHLbXnKBJ0qUEClTJLfvQQdybetww7a6s/P0ffSbf8ocYrj3vBPYc+Yh9NyerWv0Me+EV9/O97I4zBEGY0cNj8aUC1Xnd9U5YluVT4vahZL5WnhlA0ip3iYhdE7HfDSPaPr/4Rean5xIYu4rioTYq1YmlcKO7128h6uaR13c3WeRWedlnd7BGekSWabK03O3gxpkBTuHdeG57cZj9qu8rzp639xtVzFryxXMo5y4ya0iP+rreCvCq5GG7AclAiK8/oSnYRRffXKvGgVX7paIrj/h4Rhuce3ZXbTfOoGWpBkO+mb3F2B8yXZIEc1vuTxYPGMRjcVX/tJ5/Py9cjNGDB8Kb99/MQuv3BXt1ExBISy4Ln8ZrqVrOR/rMEqsni+j6cVHC698DQugTkmj4dKdF2oFFIuNM+jLL1J4k4vlgec7clO6qTZYFE0YM5L60nK/8Uy5cPvWzay+zLWg+qNQw/zFXbH/DnN7qRP8rnKPcv4DLgElvPp2HNwH/cLA+0Y3qhYt+fjfVdKrvK+8z8Uvwitft+YgCaQrziEWvRhVdk6sxHVezcZu5tI5EqBHzUzmPAV6lRuHQa5TJLkKNbzpyF3cfvwGDUukVEnmuU0u/MwbUMhk3nIO2EzJouPpqw/YQDle+VjX6hlVGGZTbfkpBISAEPBKwFHCK7esXa1+das62u3KffJJeD127JhZgKxXrx6SJElCoX2vYelSF74ULi4uSJ/e+ygWnC+V85qyK9XZOS+yZs1CeVzPkbN0t7reKKiyo5SFyew5cqBQwYJgpygLvlyM9dQB+jFp0iRwjlcuEyZOVNeoD/SDhdVGjRqp6zi8b3zK68rhgllEZjfrQBJXbRWeEGGxlF2x1SgMX/Ro0bB161bVf65vFF45VB6PjV26uvB1derUwcyZM8zCK58zCshly5ZVk4zclzt37lB45d6oWbOGbkK2VgREeLUCIh+FgBAQAkJACDiYwMix4zB1+ixsWb8aqVOlNLeu87e2bN4UXTq2Nx/nnep1/sTxEyexffN6FZ7Y4uS3D0eOHket+g3BYlr9urWQMEF87Ny9l8IOb0X1qpUxYshAW5fJMX8Q+C8Ir1169MHK1WtUTtZihQupxQDaiR1chNd5CxdjwOBh4FDKa5ab/p5xLlQcDx48wPgxIynPcWnsov8WmvzVWrm/Tx05YI6044/H/9Mu9S7csO4Au17Hrbmg5mE5LVklmuObtfUyGVXeYP3AoogaKayuanPrnfDKlX0736sj3HEkw7zpY5M4+hlzt11R88dLehRQc7q+dbzyfd0oZ2w3yhnL85DFs8VH9MjhwOIyz1nWLJgUbQxhlztOO4Kjl5+qKIwcibFkNtvz4D4Jr6eueSojCt+b54fZYez58iM4IiTPZdYunAytyqXl0yqaH89lbx5UXIm/6qCNHyK82oAih4IEARFef9Jj4NDC/M9YWFhlp6t2vhpDD/sk0BrbkH3HEjhMIYcH7Zvne/GVRNc++Rsgl50hho29/vT5K/4lJ+quU/fVlwyfa14mNTiMA7tTdWx7Ps7uxzmuV8HJyFmM48LuzJb0pWRcKdSQVgnx9bwyaYjLGSXmcl3O18pfYIljR+KP5sLO29lbr+DAuUfqGIuE+UnEbU2rpcKG/lUd4xwCdUfsRYPiKdCUYvwbC+d4/YPykI6nJPRcWKTtt/AUrt17pT6z2NuRhMy/px1VYmNP6heXkcvPqpVYGwYVw++Uh8Cne6gLfvDDN+PQTXSZeUw5eie0yoWsKaLrwyr/K4cKWdiVnKjketVlP7HpQS7gPiREl6B8sPYUHVJaX8OieunsCZVbNdSv9EtkKNZ1+RQLtF1ISA0TyvQsdHVPElqZoX5ufJxZNyudGoUzf185puvLVggIASFgJOBI4dXoerVXfNWiK/fNO8essd++3f/3338xffp0L65Wfb27+1kMHmwK6auPcW7XPn16I1WqVPqQt9t79+5j5KiRyh3KlVhEZQFzwoQJePHihQpDzMc5/HBvEl/ZRcqFRcxevXpRjtcFKr+qMdQwn+fcruwY5cICcdiwln/Ab9++AyNGDFfXch0tprZr29ZLXT6vy86dO1X/2InKhUMKlyhRAn0p9LB1flh23l65cgUnT55E5MiRVV0Og9ypUycMHToU5cqV080q0XfhwkU4deqkOpY6dWpUrVoVtWvXNteRHa8ERHj1ykSOCAEhIASEgBBwJIFRY8djyvSZ2LphDVKlNEUH4/YnTJqKcRMnYezIYahU4fs7zQG3Q6jfuBmqVq6EUcMG+diVLa7bMXDoCCU4cUUOMVy7RjV0/rudj+9jPjYqJ70lcOPGDcyYMQPJk/vNQXn9+nUULVpU/fPuJtdveKBY6fJo07IFOrZvY642beYcjBg9FmtXuMAp4/dIOVVq1sMVem/XIaxfvXqNLj16w5Xe1bnw78TAvr0we95C+j15aHa89uwzAJx/+PihfYhGOWC9uy+3wTle81Bu4kVzZ/JHu8q4iZPpd32Kl99//9x/3wE3NGjSAnVq1cDg/n1Uf/5q00GNefWyxcoBfvnKVVStVU+Js37pt12DdHBlHW44XvSIyjmaOXl0JKV5QR1m2GXPDTwncdC6NCj+//buJjbKIo7j+F8BC4WWlm2tLYS21EQqtBBNDFEQRFQOGk2wMSDBkwdDIFETjEVjjOFglIMHDyoXD2jAgwcbUyXxJY1RJBLl5WChTSkGtHZt5c1uW8T5P+VZt0u73Zfn6T77zHcu290+zzwznyl0+/x2Zm4390mn/vvxjY9+kS9/OifujM7EetK936vbu73z6Uk5dPR8/HTd3/XFTcvkzsVjK+S1HT4rbx48IZPt8ar3DLdt+P//RL3H/ElHj7PSnlaq9yy3PLBEtq5vkMR7lrp6oHuPudls0fbu9rFZ3vGGXP9iX3unfHioy5n0szRpxqse0nX+orOCn074cYve295srqlLJWsZNvfKH3zpCyfk1S33UpV48Pr4Unnq/vpUh/I9BKZVINTBa3WkVKoWTL259XSKTxTAJl6fwDVRIz9f68zXD462TbnssC4v/Oxdj2Y90zW5d/pLNmqWVig3ny6aaZa/naroL3tdJrdkzqyUh+oM1IFLMSkumhlfyniyE0avXjOB7bAsKCma7JCMXtdlKbRO/eTUdBav++FF20fMJ+M0WNaib9wS37wk168zmTVYv2jeUC2sKJ5yPPT4380Sw6UmvJ7q5yH5WjxHAAF7BbwMXlUxm/A1MXTNNLD1auR0ySy9GaLLZRUVZf77T8/XvVErKirMCgaT//7W4zSQjUQiKT/57c7G1Zm4u3btmrSbGujqzFu9bialv7/f6WdJycTvkQ8cOODsS6bhr7sXrNa/Y8dOZ0avfr+xsfGGS+pMWQ1sJ6v3hhMsf4Hg1fIfALqPAAIIIJBXgdHR0fhe9rk0RMO2odiQVGb4fiyXa9p6rs561QA2m6Khq7stRjbnZ3LO0FBMBs17/sqK1O/5M6lzZGRENPRMpyysqUlrb+J06promIn+7SS/pvseJ/4dMVE9QX3tyT3fyB9mZUC3rF9ZLUdPR+OBq84C3bS6TnTiR/f5C9Jows5VSyvdwz15TOd+r25Xpqve6X1Adyu6XC+u26XFRq4696VT/FkrOoFI/+5N5951qjbpvVu9l6n3jN1VGd3jT/QMyHNm27WdTzRKy5o692UeESgogUAEr4sqy6SibPwMvFwUo39flrN9g9JQE5GSuWN7KuZSn1/nujNdtf7EPV79uh71Zibwg5n9+l3vMTn5Z4/0XRn7FM6txeWyrLJO7lvcLKtymOWaWUs4GgEEEEAAAe8FvA5etYWJQao+1zC1qbZp3P6uGtB+/O1+OW4e3eLVvq5ufYX4ODA4KKc6TzkzSru7u6S9vV1qzI2T6S6trbulre0z2bDhIdm48RHRfZo6Ojqc16qqqpwlinXfV0puAgSvuflxNgIIIIAAAgjYIaCBq5dFQ9hCK/39Ubln9bq0mv3M1i3y2isvp3UsB40XcJcaXmOW0W2uL5f3P++UObfMcFYUdAPX6gVzxp/EM18E9n/V7Sy/vO/5e+WORfN9uQaVIuC3QN6D12OnzzkzvxoWVZjZeKln7qWDERseldO/9Zu9L/+V5Q3V8Y2m0zmXYxBAAAEEEEAAAVsE/AheXbvkANZ9Pfmxqa5JNq99elwwm3yMLc8T93bVfWFbWlry0vWBgQGzj+seZwnhxAboMsx7974tlZXefqI78Ro2fU3watNo01cEEEAAAQQQyFagtbV1ylPdMDWdkFa3zSi0ojNKvz/8Y1rNrqmuloYlLLeaFlbSQbq1m27D9vq2lbJ+RXV8K7KDu9cJgWsSls9P3e3hvnlrY8oVA31uBtUjkJNA3oPXC5eHpOdcVMxqqE7wOvP6fpLZ9Eo/kf9PbER0kbc6M9u1NMCzXbPpH+cggAACCCCAAAJeCfgZvGobdWbr8TPHzeOxcbNbNWxdXtd8w0xYr/pVqPX09vZKX1+f1NbWBiLcjEajcsa0ScuS+nopM3tQUbwTIHj1zpKaEEAAAQQQQCC8Au4Sw7pPay5F94fV5YbdkDaXujg3nAJHOvvlhfeOSGXZbFkYKZafu/6Sh++ukVe3rAhnhwPcq77BIdEt+W4rZ4ZxgIeJpk0hkPfgVdt36UrM7G152XnUfRCzLbNm3CzzioskUjrXecy2Hs5DAAEEEEAAAQTCLuB38Bp2P/qHQC4CBK+56HEuAggggAACCCCAAALeC+z/2ixx2/arU/Ha5irZ/lgjs129Z6ZGBKwQCETwaoU0nUQAAQQQQAABBAIkQPAaoMGgKdYJELxaN+R0GAEEEEAAAQQQQKBABC5cGZHS4ty3RCyQ7tJMBBDwQYDg1QdUqkQAAQQQQAABBIIuQPAa9BGifWEWIHgN8+jSNwQQQAABBBBAAAEEEEAAAZsFCF5tHn36jgACCCCAAALWChC8Wjv0dDwAAgSvARgEmoAAAggggAACCCCAAAIIIICADwIErz6gUiUCCCCAAAIIIBB0AYLXoI8Q7QuzAMFrmEeXviGAAAIIIIAAAggggAACCNgsQPBq8+jTdwQQQAABBBCwVoDg1dqhp+MBECB4DcAg0AQEEEAAAQQQQAABBBBAAAEEfBAgePUBlSoRQAABBBBAAIGgCxC8Bn2EaF+YBQhewzy69A0BBBBAAAEEEEAAAQQQQMBmAYJXm0efviOAAAIIIICAtQIEr9YOPR0PgADBawAGgSYggAACCCCAAAIIIIAAAggg4IMAwasPqFSJAAIIIIAAAggEXYDgNegjRPvCLEDwGubRpW8IIIAAAggggAACCCCAAAI2CxC82jz69B0BBBBAAAEErBUgeLV26Ol4AAQIXgMwCDQBAQQQQAABBBBAAAEEEEAAAR8ECF59QKVKBBBAAAEEEEAg6AIEr0EfIdoXZgGC1zCPLn1DAAEEEEAAAQQQQAABBBCwWYDg1ebRp+8IIIAAAgggYK0Awau1Q0/HAyBA8BqAQaAJCCCAAAIIIIAAAggggAACCPggQPDqAypVIoAAAggggAACQRcgeA36CNG+MAvEhmMSmTc7zF2kbwgggAACCCCAAAIIIIAAAghYKeB58KoVUhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAHbBGoi8+NdvumaKfFnfIEAAggggAACCCAQSgFmvIZyWOlUgQgw47VABopmIoAAAggggAACCCCAAAIIIJChgOczXjO8PocjgAACCCCAAAII5EGA4DUP6FwSgesC7PHKjwICCCCAAAIIIIAAAggggAAC4RQgeA3nuNIrBBBAAAEEEEAgpQDBa0oevomArwIEr77yUjkCCCCAAAIIIIAAAggggAACeRNIDl7/A6O/Mp/3afU4AAAAAElFTkSuQmCC" + } + }, + "cell_type": "markdown", + "id": "d2781eb4", + "metadata": {}, + "source": [ + "![image.png](attachment:image.png)" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABigAAARQCAYAAAB3ddmRAAAMP2lDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBooUsJvQkiUgJICaEFkN5thCRAKCEGgoq9LCq4FlREwYauiihYAbEjioVFsfcFFRVlXSzYlTcpoOu+8r3zfXPvf/85858z584tA4DaSY5IlIOqA5ArLBDHBPvTk5JT6KRegAAcaINRQIPDzRcxo6LCAbSh89/t3Q3oDe2qg1Trn/3/1TR4/HwuAEgUxGm8fG4uxAcBwKu4InEBAEQpbz61QCTFsAEtMUwQ4sVSnCHHVVKcJsd7ZT5xMSyIWwFQUuFwxBkAqF6GPL2QmwE1VPshdhLyBEIA1OgQ++Tm5vEgToXYBvqIIJbqM9J+0Mn4m2basCaHkzGM5XORmVKAIF+Uw5n+f5bjf1tujmQohhVsKpnikBjpnGHdbmXnhUmxCsR9wrSISIg1If4g4Mn8IUYpmZKQeLk/asjNZ8GaAR2InXicgDCIDSEOEuZEhCv4tHRBEBtiuELQaYICdhzEehAv5ucHxip8NovzYhSx0IZ0MYup4M9xxLK40lgPJNnxTIX+60w+W6GPqRZlxiVCTIHYolCQEAGxKsSO+dmxYQqfsUWZrIghH7EkRpq/BcQxfGGwv1wfK0wXB8Uo/Ety84fmi23OFLAjFHh/QWZciLw+WCuXI8sfzgW7zBcy44d0+PlJ4UNz4fEDAuVzx57xhfGxCp0PogL/GPlYnCLKiVL442b8nGApbwaxS35hrGIsnlAAF6RcH08XFUTFyfPEi7I4oVHyfPAVIBywQACgAwlsaSAPZAFBR19jH7yS9wQBDhCDDMAHDgpmaESirEcIj7GgCPwJER/kD4/zl/XyQSHkvw6z8qMDSJf1FspGZIMnEOeCMJADryWyUcLhaAngMWQE/4jOgY0L882BTdr/7/kh9jvDhEy4gpEMRaSrDXkSA4kBxBBiENEWN8B9cC88HB79YHPGGbjH0Dy++xOeEDoJDwnXCV2E25MF88U/ZTkOdEH9IEUt0n6sBW4FNV1xf9wbqkNlXAc3AA64C4zDxH1hZFfIshR5S6tC/0n7bzP44W4o/MhOZJSsS/Yj2/w8UtVO1XVYRVrrH+sjzzVtuN6s4Z6f47N+qD4PnsN+9sQWYwewNuwUdh47ijUCOnYCa8LasWNSPLy6HstW11C0GFk+2VBH8I94Q3dWWsl8p1qnXqcv8r4C/jTpOxqw8kTTxYKMzAI6E34R+HS2kOs4ku7s5OwKgPT7In99vYmWfTcQnfbv3II/APA+MTg4eOQ7F3oCgH3u8PE//J2zYcBPhzIA5w5zJeJCOYdLDwT4llCDT5o+MAbmwAbOxxm4AS/gBwJBKIgEcSAZTILZZ8J1LgZTwUwwDxSDUrACrAHrwSawFewEe8B+0AiOglPgLLgILoPr4C5cPT3gBegH78BnBEFICBWhIfqICWKJ2CPOCAPxQQKRcCQGSUZSkQxEiEiQmcgCpBQpQ9YjW5AaZB9yGDmFnEc6kdtIN9KLvEY+oRiqgmqhRqgVOgploEw0DI1DJ6IZ6BS0CF2ILkMr0Gp0N9qAnkIvotfRLvQFOoABTBnTwUwxB4yBsbBILAVLx8TYbKwEK8eqsTqsGd7nq1gX1od9xIk4DafjDnAFh+DxOBefgs/Gl+Lr8Z14A96KX8W78X78G4FKMCTYEzwJbEISIYMwlVBMKCdsJxwinIHPUg/hHZFI1CFaE93hs5hMzCLOIC4lbiDWE08SO4mPiAMkEkmfZE/yJkWSOKQCUjFpHWk36QTpCqmH9EFJWclEyVkpSClFSag0X6lcaZfScaUrSk+VPpPVyZZkT3IkmUeeTl5O3kZuJl8i95A/UzQo1hRvShwlizKPUkGpo5yh3KO8UVZWNlP2UI5WFijPVa5Q3qt8Trlb+aOKpoqdCktlgopEZZnKDpWTKrdV3lCpVCuqHzWFWkBdRq2hnqY+oH5Qpak6qrJVeapzVCtVG1SvqL5UI6tZqjHVJqkVqZWrHVC7pNanTla3Umepc9Rnq1eqH1a/qT6gQdMYrRGpkauxVGOXxnmNZ5okTSvNQE2e5kLNrZqnNR/RMJo5jUXj0hbQttHO0Hq0iFrWWmytLK1SrT1aHVr92praLtoJ2tO0K7WPaXfpYDpWOmydHJ3lOvt1buh80jXSZerydZfo1ule0X2vN0LPT4+vV6JXr3dd75M+XT9QP1t/pX6j/n0D3MDOINpgqsFGgzMGfSO0RniN4I4oGbF/xB1D1NDOMMZwhuFWw3bDASNjo2AjkdE6o9NGfcY6xn7GWcarjY8b95rQTHxMBCarTU6YPKdr05n0HHoFvZXeb2poGmIqMd1i2mH62czaLN5svlm92X1zijnDPN18tXmLeb+FicU4i5kWtRZ3LMmWDMtMy7WWbZbvraytEq0WWTVaPbPWs2ZbF1nXWt+zodr42kyxqba5Zku0Zdhm226wvWyH2rnaZdpV2l2yR+3d7AX2G+w7RxJGeowUjqweedNBxYHpUOhQ69DtqOMY7jjfsdHx5SiLUSmjVo5qG/XNydUpx2mb093RmqNDR88f3Tz6tbOdM9e50vnaGOqYoDFzxjSNeeVi78J32ehyy5XmOs51kWuL61c3dzexW51br7uFe6p7lftNhhYjirGUcc6D4OHvMcfjqMdHTzfPAs/9nn95OXhle+3yejbWeix/7Laxj7zNvDneW7y7fOg+qT6bfbp8TX05vtW+D/3M/Xh+2/2eMm2ZWczdzJf+Tv5i/0P+71merFmskwFYQHBASUBHoGZgfOD6wAdBZkEZQbVB/cGuwTOCT4YQQsJCVobcZBuxuewadn+oe+is0NYwlbDYsPVhD8PtwsXhzePQcaHjVo27F2EZIYxojASR7MhVkfejrKOmRB2JJkZHRVdGP4kZHTMzpi2WFjs5dlfsuzj/uOVxd+Nt4iXxLQlqCRMSahLeJwYkliV2JY1KmpV0MdkgWZDclEJKSUjZnjIwPnD8mvE9E1wnFE+4MdF64rSJ5ycZTMqZdGyy2mTO5AOphNTE1F2pXziRnGrOQBo7rSqtn8viruW+4PnxVvN6+d78Mv7TdO/0svRnGd4ZqzJ6M30zyzP7BCzBesGrrJCsTVnvsyOzd2QP5iTm1Ocq5abmHhZqCrOFrXnGedPyOkX2omJR1xTPKWum9IvDxNvzkfyJ+U0FWvBHvl1iI/lF0l3oU1hZ+GFqwtQD0zSmCae1T7ebvmT606Kgot9m4DO4M1pmms6cN7N7FnPWltnI7LTZLXPM5yyc0zM3eO7OeZR52fN+n+80v2z+2wWJC5oXGi2cu/DRL8G/1BarFouLby7yWrRpMb5YsLhjyZgl65Z8K+GVXCh1Ki0v/bKUu/TCr6N/rfh1cFn6so7lbss3riCuEK64sdJ35c4yjbKiskerxq1qWE1fXbL67ZrJa86Xu5RvWktZK1nbVRFe0bTOYt2KdV/WZ66/XulfWV9lWLWk6v0G3oYrG/021m0y2lS66dNmweZbW4K3NFRbVZdvJW4t3PpkW8K2tt8Yv9VsN9heuv3rDuGOrp0xO1tr3GtqdhnuWl6L1kpqe3dP2H15T8CepjqHui31OvWle8Feyd7n+1L33dgftr/lAONA3UHLg1WHaIdKGpCG6Q39jZmNXU3JTZ2HQw+3NHs1HzrieGTHUdOjlce0jy0/Tjm+8PjgiaITAydFJ/tOZZx61DK55e7ppNPXWqNbO86EnTl3Nujs6TZm24lz3ueOnvc8f/gC40LjRbeLDe2u7Yd+d/39UIdbR8Ml90tNlz0uN3eO7Tx+xffKqasBV89eY1+7eD3ieueN+Bu3bk642XWLd+vZ7Zzbr+4U3vl8d+49wr2S++r3yx8YPqj+w/aP+i63rmPdAd3tD2Mf3n3EffTicf7jLz0Ln1CflD81eVrzzPnZ0d6g3svPxz/veSF68bmv+E+NP6te2rw8+JffX+39Sf09r8SvBl8vfaP/Zsdbl7ctA1EDD97lvvv8vuSD/oedHxkf2z4lfnr6eeoX0peKr7Zfm7+Ffbs3mDs4KOKIObJfAQw2ND0dgNc7AKAmA0CD+zPKePn+T2aIfM8qQ+A/YfkeUWZuANTB//foPvh3cxOAvdvg9gvqq00AIIoKQJwHQMeMGW5DezXZvlJqRLgP2Bz7NS03Dfwbk+85f8j75zOQqrqAn8//AiTrfGu232UwAAAAimVYSWZNTQAqAAAACAAEARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAh2kABAAAAAEAAABOAAAAAAAAAJAAAAABAAAAkAAAAAEAA5KGAAcAAAASAAAAeKACAAQAAAABAAAGKKADAAQAAAABAAAEUAAAAABBU0NJSQAAAFNjcmVlbnNob3QsoQf+AAAACXBIWXMAABYlAAAWJQFJUiTwAAAB2GlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4xMTA0PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjE1NzY8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpVc2VyQ29tbWVudD5TY3JlZW5zaG90PC9leGlmOlVzZXJDb21tZW50PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KXinbHwAAABxpRE9UAAAAAgAAAAAAAAIoAAAAKAAAAigAAAIoAAF1q2VBCV4AAEAASURBVHgB7J0JvE3VF8eXEAlFJc2EDCUkEsoUKdKgvwalkHlIyJghlDHzHJnJEBVN5iRJREqDCI2SqBRS6r9/+7Wufc479757n+t66rd9vHuGffY553vm9dtrrXR/myIsJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJJBAAukoUCSQNldFAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRgCVCg4IlAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQcAIUKBKOnCskARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKgQMFzgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIIOEEKFAkHDlXSAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQIGC5wAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEDCCVCgSDhyrpAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIACBc8BEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBhBOgQJFw5FwhCZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAABQqeAyRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAgknQIEi4ci5QhIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAQoUPAdIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAQSToACRcKRc4UkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIUKHgOkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJJJwABYqEI+cKSYAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEKFDwHCABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEkg4AQoUCUfOFZIACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACVCg4DlAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiSQcAIUKBKOnCskARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKgQMFzgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIIOEEKFAkHDlXSAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQIGC5wAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEDCCVCgSDhyrpAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIACBc8BEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBhBOgQJFw5FxhtAQOHT4sGdKnl4wZM0a7COulYQKLly6X0eOelT0/7JUH7qsjTR5pKOnTn5aGt5ibllYIbNy2VzZu3xvz5jS4uVDMy3ABEiABEiABEiABEiABEiABEiABEiABEiCBxBGgQJE41v/qNb2+eIm8tfodzz7WrFFdrr+utGdapJGNH2yWNxYvleUrV8m27dtDVc8880wpkD+/3FS5otSqeatcfNGFoXkcODUI/PzLL1KidDnPxk4cO0oqVbzRM40jaZPAjz/uk2efmywHDvwa2sDMZ2SWbp07hMZP1MBzb3wqz73+aaqaH9GivJTIf26qluVCJEACJEACJEACJEACJEACJEACJEACJEACJ54ABYoTzziua/j777/l8O+/e9rMnCmTpEuXzjMtUSMwXPbs87S88tobyVb5hDFeNnjowWTT/RO+3/OD9Oj1lCxeusw/K3B89PDBUr1a1cB5nJg2CbyxZJk0a9XGs3EP3H+v9Ore1TONIyJHjx6VI3/8EUKRTtJJ5syZQuOJHnjtjSXSoUs3+e2335Kt+otPP0w2Ld4TXIEiWrEBHhcoDaoXEnpRxPuIsD0SIAESIAESIAESIAESIAESIAESIAESiB8BChTxY5mQlja8v1H+d389z7rmz54pxYsV9UxLxEgkwyXWH41Asfv7PXLPAw/JV199HdMmN3i4nnRs14bhn2KidvIq04Mievajx02QQUOGeRZIhBDgWaEZ2bdvv/To/VSg+Kh1E7FdKlBAnIBHRDSl1ajVApGCAkU0tFiHBEiABEiABEiABEiABEiABEiABEiABE4eAQoUJ499qtb83vr3rUHfXXjurGlSskRxd9IJHY7GcIkNiEagaNSslSxbsTLZ9l5ZpIiUKH61HDp0WNatXx8oYHR6vK00blg/2bKckDYJaA4KhAmqfeftJgdFA+agCDhUo8aOl2eGjvDMSYQQ4K4QIdse7xzsNeHWS8R2UaBwiXOYBEiABEiABEiABEiABEiABEiABEiABP5dBChQnGLH82QLFBs2bpKHH2kaGO7FjzIlgWLr59uk+m13+heTsSOHSbWbKoem//XXXzJs1BgZMWpsaBoGkJti07q3jZE7vWc6R0jgVCZwsgWK1m07yKJXX4sKIQWKqDCxEgn85wlAaAxXSuQ7l7liwsHhdBIgARIgARIgARIgARIgARL4DxA46QLF7yafwumnn55iDoVDhw9LBmOIzpgxY6oPC/I3HDp0SLJkyZLqNtwF/zBx4v808eLPyJzZnRzTcKz7dbIFiomTpspT/Qd69jF/vnzSpWN7adC4mWd6SgLFy6+8Km3adfQs07VTB2n4cHDeisYtHpWly5Z76q9c8ppcesnFnmkncuTo0b/MOXRQsmbNeiJXE3PbR44csddRzAs6C8TjfHaak2ivbXeZSMPYRxTcLyIVe52b+0WWM86IVC3ivFivy4iNmZmxsD3ZAsXlhZKHi2vVvKns279fZsya7dnVky1QIIzTxu1J+SbcDdOk2ggLFZS3gnkpXFocJoETT6D8Yy+GXQlDsYVFwxkkQAIkQAIkQAIkQAIkQAIk8J8gcEIFCiQ/fqJHLw/Iju0fswbGwcNG2tA+mnh1/ZpVkjNnjlDdgwcPysJXXpN5C16STz/bGuqxf07Oc+TqolfKvXXulsoVb0yx9/zKVatlzTtrZcsnn8rmDz+y7aCNEiWKyZWFC8lNlSvJlUUKhdarA2+vWSuTp83QUbueUcMGy35jpJs4ZZrMe+FF+XHfj3Z+7ty55aori5iQNfVTDLWU2v3q02+g7Nr1pSBnw5aPPw5tFwauLnqVnHfuuXba9WVKR5WY2tNADCN+geLRls2lWeOGctAIP9dc540Pn5JAMXX6LJtg2139wvlzA48H6kyb8byNie/Wf2ne81L0qivdSXEf3vLxp9Yw+/batZ5QUxBmbrm5qvyv9p1y8UUXeta78YPNMnrss55p+fPns3kzPBPNCAzv7Tp2kcOHjyU/P8MY14c901/2/LA38BrKn+9yWbvuPXluynT5aMvHsnv3butRku/yvFL7jtul9l23p2igh9iy6q3VMnP2XNn0wYeh8xmeKQXy5zdhmGpJrRq3SrZsycWYWK9tJHl+1CdGtWjaOJQ7JVx7F16QWyZOniaz5syz+wh24F6lckVp3aJpSByEl80L5l4x/6WFlgfuK3nz5JEbypU1dSvYXywbrqT2ukR78bhXQBABn7/NfuBehePpliqVKoZG69d7QMpef11oPN4DrkBRqOAVMqBvH7mqSGF5uv8gmTBpimd1J1Og0NBPng2KcgS5LIKEiygXZzUS+M8SiOW6Wz3kjhAn5IXRoknsdZwChZLgLwmQAAmQAAmQAAmQAAmQAAn8NwmcUIFi2/YvpFqN2z1kp0wcJ72fHiDbtm/3TH939YqQkR3G3cbNWocMpp6KzgjyFDw7erjkzn2+MzVpEL23+w4YLFNnzEw2zz+hz5Pd5f57/ueZ/PycF6RL956eaRvWviV1H2poBRPPDGeklTGaPtaqhTPl2ODx7Nett9eOuF5dyx21asrgAX11NO6/KlDAcPlM/6elcKGCdh0//fxzzAIFxCO/18W4UcOlapVKgdv9VL9BxljtNZBuNCGezsqePbD+8U5Er/cevZ6W5+fOS7GpJ7t1lQfr3huqB7Ghxh13y46dO0PTMPD8tMlSulRJzzRl6k58vG0bK/wEXUMzpz5nQvC8LjOfn+Mu4hmGaDZzygTJc9llnuk6AkGgSYvWVrTTaUG/ECueGzdaSl17jWd20HZFurZPS3ealCpXwdPG6OGDpXq1qnZaUHszpkyUKdNmyuKlyzzL6QgEiDkzpkimTKdLwyYt5b0NG3RWsl9cE7g2gsrxXJdoLx73iqDrJ2hbMa1fnyelzt13hZt93NNVoHisdUtp2qhByGstLQsU0YoNahilQfS4TxM28B8lkFqBQnHhGnTFCkzn9ah0+EsCJEACJEACJEACJEACJEAC/00CCRco0Nsfngz+ogIFeiM/2KCRf3bYcXhDLFwwR3KfnytUB4bl/91fL3A9oUq+ASTshXeHliCj43WlS8m7ptd6SmX65AlStoy3h/Px7ldaESgWmB7q35ne3UhOnSFDhhCKIANrSh4UWOaGyjeHvGPQGBhPGDPCegOEGjcD8B65+756HtGq5DUlZO7MqW61uA0fNaG76jdqJqvXvBN1m61bNJM2rZqH6q/f8L7UqftQaBwDENVenDsrlBwaCccrVL3FwwDiz8svzLZ8gwz3lSrcKCveXOVpN2gE4sKy1xdKrvPO88yGF85td9bxsPRUCBiB+ABvBC1B2xXp2k6NQIHju+H9jbrKwN+6990jR/88GpWINHHsKKlkvK7ccrzXJdqKx70i6Ppxt9MdPtECRf3Gza2nD85Dt6RVgQLiBDwioikwjMJASoNoNLRYhwSSE1CBAtddSqHS/MKhX5zAfF6PyRlzCgmQAAmQAAmQAAmQAAmQAAn81wgkXKAIBxgCBWLL31S9VjLDKUSIihVusDHtYZjVsFDaVoUbbzC9vEeF8ljMfWGBdOzaXWfb30tMnoJ6998n2bNns6FxFi9dnqydzevfCeUWCDI6ug0izMz+/T8l21bUgWHv1ZdeCFX/+Zdfjnu/Oj3RQ7Z/sUNg0Pb3ykdPcg2PVfHG8oLQOYkuQQbWlAQKbOOcefMF++YW8GvZrIlcUSC/OeZHZPNHH9nwMv7jHikclNteaoaffW6y8cB5JtmiMJpfWbiwIFm4P9QWKs+dNc0T5qt7r6dk+sznPe0gZM7ddyZ5FvXs3TeZl4+7X0FCgKcxM4LrQ8ON+efVvvMOGdi3d2gy8jM80rRloMBxU5XKct4558iKVW8lCzGEdSx57SU5+6yzbFvRbJeuFNd2agQKXR6/OMf95707X4dxneMa8Z8rmF/jlptlxJBBWlXicV2isXjcKxBi6iGTfB5l584vkx1PnHdacH3jOk90oUCRaOJpd31zVm2X7d/+Ii1rXSXZsqScF2ryks8E95761ZKHU0y7e3l8W7Z049ey7ZtfpE6FyyVntsy2sdff+1J2fv+r3F85v2TPcvrxrSCVS2/+4kdZ8/H3UqnYhVLwkrNjbsUVKKIVBrESvziBZdEWBYqYDwEXIAESIAESIAESIAESIAESIIF/HYGTIlDA2NnnyW5SovjVtnc38jrkyJFDkJdi5JhxHsgPPXC/dO/aKSQ+/Pnnn9KoeWt50xhR3eKGzkE8f/T014L1rVzyiqdXPuL216p9j1axv24YmHBGxwYP15NWzZuEwgq99sYSafFoW087GNm0bo0VQzAcr/1CWyc7STa2IaikVqBAW3Pnvygdu3QLajZwGozVvXs+kcxLJbByKibCcF2idDnPkvBGQL6Ly/PmCU1/5911NuRXaIIZgAfILBOCScsvvxyQKtVv8xic9Xz89rvdcnPNYzG6sQyEmbaPttTFTSi05GHSdOagfk+Z/AoV7bkIDxPkRvGLIaj75tLX5ZKLL7KLvfX2GnmoYRM7rH/g+YCwUW5S6aBEzW0fbWW2L0n8Crdd4a7tH3/cF3OIJ2zf/ffWkc6Pt7XXLvI0PNa+U2DIJwgTUyeMk8suu9Tu1rIVK6VRs1a6i/YX2/bempWhafG6LuN5r8DGBbFPRK6HEJgwAxQowoCJ8+S3PvpOOk98N1mrl+TKKqUL5pKbr71Eilx6LF9TsooneMIR47FU+fGk52vvh0tbQ3dKq6zWeZEcPPynuDkJUlompfkjX/5Inl+xLWy1s7NlkkW9bgk7/0TPeHL6elmy4WuZ8nhlyXdhUhjCjhPWyttbdsvzXavKxeeeeaI3IbD9OW9ul+Evfihd7rtGbi2ddL8MrBhmYmoEiiBxAt4T9GgKA5mTSYAESIAESIAESIAESIAESOA/RiDhAgWMhPNmTQ0ZEpU3ktyWN+F+3OSwN5YvJxONZ0T69Om1mv09cOBXqXnX/zwJix+4/17p1b2rnV+vQWNPaJ5woYCQgBteCVrKGONyhRuSDNNBRsdbq98swwcPkNNOO00Xsb/9nxkq456d6JmmveDjuV9Ywb9RoMB+fbb1cxPC6cHAnu+YrwXJgtELHkmXT1SBuAWRyy2uAOZOn2HyQHTr2dudJGveXOYJObZ0+Upp3NxrLG/VvKnxDNniEdogvLzy4jzPvoUTAoLypoQLS9WrxxPygAmFhALPIngYacH1+OrL80L5X3Q6ejsjafOiV1/TSYLtQ8golKDtCndto35qBIpSJUsa4WSi5/pHbo8ixa9Fk56y4o1Xkt1TgnKWvL1iiVxgEm/H87qM171Cd4gChZJI+g1nEA033bu0d+xUMIiu+OBb6TZ5nd3wYpefY39/PnhEdu4+ENqZ4ab3+TXGwHuyyry3vpAvvvtFmtW8MioPihMhUAyZv1leMNtx4TlnSo5syb0RsmTKKEOaHgtLl2hWFCiSiIcTJzD3VLgeE33ecH0kQAIkQAIkQAIkQAIkQAIk8F8kkHCB4uEH61qPCD/sTz/bKsiz4JaRQ5+RW6tXcyeFhvsNHCzjJ04KjaMH9ZtLkoypAwYPlbHjvYIBPB/q16srF114YWiZSANBRsegGPZoA+F+/meM626ZMHakVK5YwSa2jtd+of1/m0CBfCEIczRrzlwXX8RhGMLbtG4udU3v+hNRWrZpL6++/kaoaffcCk38ZyDI22LYMwPkthrenrv+Nv3tYNwfHgrTgoQAGO5nz5iM2cnKt999J+Urea8ZCGsjhyaFNipasoxHBHKFPX9jy1e+acNBudPXrFxqk9IHbVe4axvLp0ag6NrxcWlYv567ejt8T92HPQmxXeHErbzwldeMyNLBnSSvL1ogV+TPH9frMl73Ct1QChRKIulXhQiMBcW0x7RoQ82cCgZRFSjur1xAmt92ZQjG13t/k1fX7ZKpS7ZKlswZ5LWnakj609KF5qflgRMpUPR/pIyUuzJ3mtt9ChTBYZ3813CaO3DcIBIgARIgARIgARIgARIgARIggYQTSLhAEWS8xV6vXLVaGjRu5gGAsDolil3tmaYjQcmLt3+y2YaCCmpLl0PuiDLXlZJriheT600i6/NzeRMIa70go+M7q5YH1v/m229tsmddFr/jRg2XqlUqxXW/0O6/SaBAL/b2nbrKiy8vwq55CozOxYsVNcb0gyZR8iZPiCStCI8ZGNjjXW67655k+SXKl70+7Gr852KQYX3PDz/YUE9BuRHQcP2HHpRunb3GdEwPEgKaNm4oHdq2wezA4hch1IMInkfFSiXfj3D79smnW5NxR4JvhIQK2q5w1zY2MjUCxbTnnpVyZcsk20e/2OMKMG7loGtFBYqge0Rq7zfxulfotlOgUBJJv/4e2N65SaLFf0Gg0P1WkWVi24o2h8C3P/4mb27+TgqbsE85TVijNR/vlu3Gu+GBKgXkslzZ7GK/HvpDPtq5TzZu32tyQYgUz3eOFM1zTsj74cDBP2TRu7sk+5mnS42AsD/Ip/DDT4el6jUXy7lnZRYdv7NcXsl8+jEPx8NHjsoms473tu4x4slpcu0V59mwVOEEikO//ykfmJwIqJ/1jIxSMv95UviyHJIxvddLUffd/VUPilgEip3fH5D3P/9BvjDeKFflyWm9UHKdfYbbbGg42rpH//pbPv1qv7z76R75+bcjUvDis6VqyYvl6VnvRwzx9PsfR2Wl8ZbZ/+vvAk8ZsMqRNblnII7v2k++t9t8phGmClx0lpQpfL5kzZw898dvJozWhzt+tMf5z6N/SyGTY8LfbrgQT598ud8cux/ljEzp5Y6yeUMc/AMqGKYkDPqvW1yjFCf8NDlOAiRAAiRAAiRAAiRAAiRAAiQAAgkXKGZNmyTXlUoeoiXWPARBh0/zPiA8TZ9+A2WSicmfUoFx9q47akmtmrd6QjcFGR23frRRMmTIkKxJGJ/L3FDZM10FinjuF1YQZHQN6nnv2ZgEjKQmB8WI0eNkyPCRnq3LnTu3TDTeJ4ULFfRMf/X1xdKyTTvPNIyEM2InqxjDhFJlKyYzzMewuDRp1FA6tksuIIQ7F7DPS199SbJkyZJsNUFCwNBn+kutGrcmq6sT/CHO1ANk565dUvnmmlotVb/qRRS0XeGubawoNQJFuPbiIVCEOxaxQNH7TbzuFbpuChRK4tgvjKL+AgMo/qdkKHWXU+N+g+qFpMHNaTNhczgPCt2PES99JLNXbpNO95aQmtddZgWJDs+ulRuvvkDWb/3B5npA3WHNy0nJAufJVz/8Kg0HrwxN13bghTG+TQXJc342gZH99p6vy08HfpeFvW/xGMohOtzUcaFdbGn/26wg0WLEW1ZYcOvCOP/woBVGyDikq7C/dxgRY/GGr5LloPjGeITUf2ZFsu26Mk8OGd68vGTKeEz48DT4z0isAoVy87f1dIPr5MaiF3gmR1v3L/Ou8YQJx7XKCERuucKIFOdkzyTvmGTUQTkoHrzpCpm2dKu7iB1W0UlnLHn/a3ly2nodDf3myZ3NhK8qJ+cZsUgLPGwaBPBELo4BxstE85YECRQfGvGq2bBVtqkxrW+QonnP0WaT/UYrUGg9NEBxIhlGTiABEiABEiABEiABEiABEiABEnAIpBmBIiiWv7OdUQ1ueGeVTbaNyhApJk6aKuMmTIrK2Fz7zjukX5+eoXj38TI6xnu//k0CRYWqt3jyiOC4rVu9Us49N9g48vaatfJgg0aoFip17r7LHLcnQ+PxGPB7IMTaJsKJPdHp8WSLbfn4U7nN5E7xFwgIS155SU4/PXkc9SAhYOzIYVLtJq8g5rbZuMWjsnTZ8tAkhMRCcuitn2+T6rfdGZqemgEV3oK2K5yggPWkNYEintdlvO4VejwoUCiJyL9qAP2vCRSaaHlcmxvlystyhgQK0CplkmjfVym/5M6RReAZcPjIn0YEWGlFA4SMqm4SbKczUaHeWP+1TF+2Vc4zdSa2rWA8LzLL2EUf22kqfCj9lZu/lScmrRMIDe3vLmYn+wWKv4zA0W78O/LeZ3ts7/6Hql5h23zbeHMMX/ChNhVKkg2PjcbD3pSv9vwq9W8uaDwzLpGDxpti8pLPZPWH31kPhB4PJO/IEGrIDMQiULy0ZqcMnLtJkGi8Xe1icsl5WWXTF3ul9/QNtsnn2leSK4xnAkosdbG9E179xObBePTOota74WPjiTBo3gdW7EF7QQIFpt9ZPq/ccb3xVDDHA6G7Zq/cLhATpnWobAUieEPc+eTrVsBBQmt4QnxvxJ/pRthAom03BBjEoUZD3hR4W9SpkE9qGOEKotNSI3DMXP65bfelntVtSDC/QLFl1z5pMnSVDRs2rFk564mD7QtXYrnuULdEvnOTeU643hXxTJwebps5nQRIgARIgARIgARIgARIgARIIG0TSDMCxeuLl0rz1o95aCG57wW5z/dM0xGjPwjyF5x+elKYg3TG6lLhhvIhgUHrHT36l6xbv16WLF1u4tZvTBa6R+vht/1jj0rzJo/YSfEyOsZ7v/4tAsXu7/dI2QpVXPxS45abbQJsz0RnBCGhritf2SM4wftgzcolTq3jH6xS/TbZsXNnqCGE/hk6qF9o3D/w++9HJFOmY+LCZZdeKvnzXe6phgTWd/zv/rDn32OtW0qr5k08y2AkSAho16aVtGjaOFldneAXfq4sUkQWzp8te/f+KKXLV9Rq9hfhke68PbxXhX/fShQrJjlz5gjcrlNJoIjndRmve4UeGAoUSiLybyyGUm3pVPagQDikpRu/kf6zN9rdWTGwlmTMcFpIoECy6Bmdqthpur+vr/9K+szYEGjwf8qEIHpt3ZchTwwkva43YLkVOdzk0t2nvCfLN30jbs96v0Cxa88Bqdt3mTXUTzLGfoQi0gJPgXGvfGxH1Rj9hvGogDjgih6oAG8NeFVAuECOjWxZkocx0nZVoChkQltdkDO59xnCT2ki8TuffMOKNHOeqGq3Udt41ew/QjG5xv5Y6mroqsmPV5L8FyYJHGgbYZaaDX/LriZIoEDOjL4Nr5PToBaZgveZJ6askzdNyCeIEbeaMFu/Hv5DPt61X87KcroN5WUrmj8IPfVAv2UCL43n2lW0k/U4Vyp+kfSqV8qKUFq/tzn+H+7YJ90eKGnCeuUUV6CAJ0abMW/bqqNb3eDZB13e/5ua687fBgUKPxGOkwAJkAAJkAAJkAAJkAAJkMB/m0CaESiCEk1PenasER3KxfUIHTxochqYpNazZs+T1xd7Ddtu8uF4GR3jvV//FoEiyBsinOeBewIE5YfYvP4dyZo1q1vtuIb9IZKQcwG5F46nTHhuijw9YFDEJhYbLwq/sBEkUITLuYDGkeMCHiBuqVThRpk4bpRArCtwZVIPaJ3frPEj8njbR3U06t+g7TqVBIp4XpfxulcofAoUSiLyb2oMpaeSQIG9v+WffBAHTA4JeBZo6f1QKYExGgU5JxDiqfYNl8tjd3lzNg1dsFnmrfpCBjW+3no26PL4RTgoGKddkaCB8bbY+vVPssiEeTrb5EPQ8E7wtHihe7WQQd0vUKwwAkY3I2TUNXkvmtU8ltgb69ljev3fZQQCFBUoRrz0ofUYaGrqXm/yKbgFwgm2IaWwQCpQuMu6w2roR46H27q9ZmdBLHDLDz8fkvbG86No3pxGgLnR5oOItu7enw/LHSYslisUuG3X6bPEejQECRR96peWildf6FaXVeb4dnnuXbmnYj5pdXtRzzx4U+w7cNh6U0C4eHT029YrYlGvW2y9ofPNcX7rCxnQqIyULZLbs6x/RAWK267PIwvf2WlnTzfCFkJ9RVNSc93526VA4SfCcRIgARIgARIgARIgARIgARL4bxNIMwLFwUOH5KoSpT1HI5LB+utvvpXNH35k66Nnfa7zTDLOUiXl999/lzXvvOtpp0SJYnL2Wcd6N+rMu+55QDZ98IGO2t8vPk0KRxEvo2O89ks3Mkig0LwAWudk/Maag2LrNhNuqKY33JDmSgi3/UG5PuDd8OGGteEWSdX0kWPGy+BhIzzLhkuQjkorVq6SQ4cP2/o4F8uXu95zvu3a9aVUurmGp73mTRrJlOkzraCgM5DMevb0yZ5cKEFCAOq/MHtGYAL5/s8MlXHPTtQm7W+bVi2kdYumdvi+eg3k3XXvhebDu2LBnBmBuVWwT9g3FOxXxowZ5abKlYyX0mmnvAdFPK/LeN0r9KAECRSa70LrnIzfp/sPkgmTpnhWrfdLz8QEjaihFKuLNvkuDKMop0IOCruhvj9IjgzPAPTA16ICBfIaNKlRRCfb32bDV9ne8893rSoXn3umZ97u/Qfl7l6LBR4IEx6rYOfNf3uHDDbhiTTM05smvFNXE97pkVsLy8NVC4aW9wsUE1//RCa98Vmo93+oohmAd8ANbV+0k1Sg0O1y6/mH25pwUneZfQ1XVKCAWHN9gFEe3iXpT0snG0xSbBj0UyrYtljqqsADEamr8Xrwl05GbICoFCRQTDFhnPJdkN2ziHpGuCHLsI7RC7dYwcZT2Ywgh8jivkneb8oz6Dj7l1OBwp0+t1u1QC8Ut44O63XnbqfOi/aXAkW0pFiPBEiABEiABEiABEiABEiABP4bBNKMQAHcbdp3kpcXveIh/0z/p00Imts80375xYSTePgRT7icG8uXk8kTxsr+/ful5PU3eupXu6mKjB051DMNI01btpHFS5eFpmusfkyIp9ExHvulG/nB5g/lzjr366j9rVf3funZrbNnWqJHYhUoDh/+XYoUTx5j/OEH60pXk78hfXpvgtT9P/1kQoC19RjXsY/Xl7lOZkyeENfd3f7FDql6ay1Pm4UKXiFzZkxJ5qnx4suLpG0HL3s3aTkM+zhXXVEgb5488saiBeYcmyfdez3lWc9TvXrIfXXuDk0LJ1DgXH1x3ky56MJjvXDDJRKH9we8QFBmz31BOnfraYf1D/J49O3d04QFSaeTjLfFUenR+2mZ+fyc0DSIQe+uXiFZzjjjlBcosFPxui7jea/AdgV524waNlhuubkqZp+0ktYECtfIGSuUU0GgqGbyRbS+Pem6xf5lPSOjZDDioL9EEii6THrXJnB+1ggQhY0Q4Zat3/wsDUxSa4gd/U0SZZSfjLdBTeNtULpQLhncpKz0MAmal5k8BrONwHGRI3D4BQr03kcv/pZme++tmN9dTcgLAxNVoNDtanhLIWOo93YeQC6KLJkyyOXGgO8XVdyGVaDAtruCjVsHw7qfGEZCbH/B+rIZtmgjNXWvL3K+DGx0vb9ZE+IpSRwKEihGtrxBiufz5lrSRNWVEabJiC5umChMK2lyUOQwXi04D1qPWm3zhyzocbNdL7xX4MWieUmSbYwzwRUoIHit/eR7yXdhdhn3aAWbAN2pGjhIgSIQCyeSAAmQAAmQAAmQAAmQAAmQAAkcB4E0JVB8tOVjqVX7nmS7U6tmDSlbprTkypVLtm3bHpj4esLYkVK5YlIv0HYdu8iClxZ62oERFomFixQqZHIY7Je5LyyQqTNmeupgPZprIJ5Gx3jtFzZ2n9n2a8t6BRhMv+uO2y2jvHnzBPasR50TWWIVKLAtw0eNlaEjRiXbLHgS3Fq9mlyeN69Ndg5+M2fPld27dyerO+25Z6Vc2STjWrKZxzHBL16hKQgLyJNRotjV8suBA7JsxZuy6NWk0CG6KggZr770go4GCl1TJo6TG8qVtSLALbVqG2P/9lB9iABLX1so5+c6z04LJ1DoAsVNTohLTZLtdes3BPKBMDHfeFucdlqSYfPXX3+VasZzxc8SQk+lG2+QggULyDfffCfPz50X8lDSdTVuWF86Pd427HadSiGesBPxui7jea/Adi1eutyIp96wWxCk7qhVU666srDg+rj4omPCFJZJRElrAgX2GcbSWEtQ0t5Y2ziR9VeYPATdJq/z5EWItL5IAgUSYSP5dYd7ikutMnk8zWj+hUduMd4R1Y55R6h4gF71/+u9WIpdfo6MMvkJ3OIXKDZu3yutRq6WKtdcLE8+6BWeNQkzlleBYqrJSzHe5KXobvIiVCt5idt01MPRChR//PmXVHr8ZY9BP9xKYql75M+jUvnxhZ4E1NquzsN4kEDR+o6iNpm11sfv/NVfyOAXNgvCXj1gQmWNWbRFZiz73HrFwDtGyzd7f5N7nlri2Z+ZKz6X0S9vkcdqXy21y1+uVe0vPDOQPLvQJTlM4vJMoRwUjYxXTF2TOP2xsWsEYh8Ese51S3qWDRqhQBFEhdNIgARIgARIgARIgARIgARIgASOh0CaEiiwI+GM1pF2Eh4WA/v2CRlhN7y/Uf53f71IiwTOe2PRi1Igfz47L95Gx3jsl250qbIVPYmidTp+YcQcPKCvOykhw6kRKJAP5KFHmgqOV2rK/ffWkT49u6Vm0RSXQUJphGVCTodYysL5c+XKIoXsIrt3fy9lK97kWbxKpYry7Jhj4aMQjuyB+o946lSvVlVGDx9spwUJFDBW/7jvR88y4Ubmz54pxYsV9cxes9as03h1xFLy58snc2dNlbOyJ4UlCdquU02gwP7H47qM970iKCSYe6z69XlSILgmuqRFgSLRDBKxvngKFJu2/ygtR75lwwFN71hFcplcEijIn/CwSUb904HfZWjzcnJtgSRBFPPe+ug76TzxXZss+73P9gSGbfILFAcO/iG3dH0Fi3vyXSCHRZNhb8r2b3+x81SgUO8X5LZ4tk0FOfeszHb+738cFSTlhifDQJNPwU08bSs4f6IVKLCIbm9jEwKrnmPsh7gzcO4HUrpgLul8bwnbeix1sW9bdu43gsIVRlg4Fl5rwJxN8vI7O217QQIFwjM9165SyEPku33mWThwuc0xobk3ENpp5vLPrXgEEQnl6F9/C9p+5d1dHoFCvS/87eI4399vqW1XE4SrB4Xm6NhnzoF6Zt04F1rdcZXcUyG/XVe4PxQowpHhdBIgARIgARIgARIgARIgARIggdQSSHMCBULijB43IVkOgHA7iITBQwb2tfHx3TorV62WVo+1j8rADOMrDMJuguJ4Gx3jtV/YR+QFaNi0hbu7oeFTSaDARqNHf/tOT3hCbYV2JsJAy2ZN5LHWLTxhiSJUT9WsL3bslEbNWsmOnTtTXB6eD+NGDTNeLMdCiGDZZStWepZd/sYiyXPZZZ5pQd4a40YNl6pVKgWGUoK3UL+BQzyeF54G/xkZ9swAua1GUhJV/3ycQ63bdYj6+phqvD5y5z4/1My/RaCIx3UZ73sFIPcfNMR4ij0X4u0OUKBwafz7huMpUIDOhNc+kcmLP7MiRZlCSdfw2k+/t0broKTW8CKo0e1VOx/Lv963hmTNnBGDoaJG/IUmmTbCDqEs3fi19Jy63g4jRNQ52TPLOyZ8EAqM3ygqUGB4nPGgmGY8KWBURw6J7FkyyuqPdssPJqk2Qg8hbJITdQ6LeEosAgUEgEZD37TbgZwbV5uk2PBEeHtLklcePETgKYISS91dew5IoyFvWlZIll3okrNl844fZefuA1ZAwL4ECRQQZjAPic4RWE+Px90m0XmbfxKdq7iEbSpf9AK56JwsNrE5tu+gSZqNNjTEE+pMXvKZTHj1EwwKQkKlT5/O7h/q1ixzmXS6J0mA8QsUqP/pVz/JI4NXYlCGGcGqpCNY2YnOHwoUDgwOkgAJkAAJkAAJkAAJkAAJkAAJxIXACRUognoCu/H5I+0BetVPnjZDXnntjcBqpUqWlHZtWtnE2IEVzMQf9u41hr6h8t6GDfLVV18nq4ZwPBVMWBskED4jc1IPTq00b8FL0qHzEzpqfz/fsilZbgTMCAq7FC5xdTz2C+t86+01tve33/vgZAkUyAtSvHRZbFqoPNmtqzxY997QeKQB9OqfPG2mLF22PGw1iAAIsYRQQ5fnzRO2XjxnQECZZXJFTJ81O/AcwjY9Uv8hafDQg5ItW9bQqt9es1YebNAoNI6BJo0aSsd2bTzTMPKlOTcrVvUKCblz55a3VywW5MOoVuN2zzLwVEDopvETJsmwkaM98zBSqcKN0rJ5kxRDfWG9U2fMsnkpgjxFkLT80RbN5PbbaiQ772O9toM8bMaPHmGSblfEJkss7flzR7ih2Wxj//zZ+MFmqX1PXXeSDZ8VdO4cz3V5Iu4Vf/zxh0ydPksmTpmWLBzXyRIoghKwn8wk2Z4D+y8a0cTUQeJB0G4ih0D78e9IvapXSONbj/Xi17p/myTVM5ZvleWbvg0lW0bOARjH4U1wWoAKMHTBZpm36gu52YT96RYQ9qeVyYEAL4hFRqA4+x+BAut7cc0O4zmwK7QeiAGdjWG82YhVdnM0qTNG/jIbBoFiucmdoB4WmA5vBIScynx6eoyGLbqNA4ynRdmAJNn+Bbd9+7NMWbJV3v1HnMH8K/PkkOa3XRUSJ3SZWOqCA0JWwdsE5WwTRqm9SfC9yiTIXrz+K3ETYmvibOSKeH7ldps3AstAbKhS4iJpajw83DwjEH2GLvgwJPDguHW9v6TNHXLhOWcKvCLcMmvFNllicoZs/fonOxnbUrt8XnnopoLGwzQpx9CcVdtluGnzCXNcq5vjq0VDfkEwmtX5Jisw6Tz3VwUKTIs2Ob27vA6DG4orWuk8/pIACZAACZAACZAACZAACZAACfy3CJxQgSIeKGEg/vY706vyh72SPkN6yX1+LpsYOGNGb4/OlNZ18NAh2bFjl/xkki2fe+451sAdaxsprSOW+fHaLxgy9/zwgyDpNApyF2TNesxQHss2pYW6hw4fls8++1z27d9nevcftHkacuTIIbnOO1euKFDAGMqTcikkelvR037399/L93t+kINmuyBMXHbpxYJtO5ElJU8FJLP++utv5OtvvpWzzsouF5ncBDnOPjumTTpy5Ii5xr4z+7dH/jr6l+TMmcPmtciSJUtM7fwbKsfruownCwit+/f/ZEWiDCZ5/AUX5JbTTz89nqtgW/8RAgjFhJLNeCucyIJk2+mM8HHWmdGdp9guhHfKmT1ToGASz239y4RJ2vPzITn7zEwpiiCx1EWybXgrwHMkQPMJ3AWEwPrl4JFQ6K3ASmbivgOHLU/1VglXT6eD55/mmRVtfV0uml8NzxVN3WjqUKCIhhLrkAAJkAAJkAAJkAAJkAAJkMC/m0CaFyj+3fhPzN6NGjteVpse/PEomY1nyaTxyXvpx6PteLaBMEk//5IU5/x42y19bUkbPup424nH8ikJFPFYB9s49Qj8W8/3U+9IcItJ4L9HAF4U8ShpPWF9PPaRbZAACZAACZAACZAACZAACZAACaRMgAJFyoxOuRrIvREuNFZqduZUCONStGSZqPIpRLP/CJE0cdyoaKqe8DoUKE444lNyBf/W8/2UPBjcaBIgARIgARIgARIgARIgARIgARIgARIggVQToECRanRpd0EKFMd3bChQHB8/Ln3iCVCgOPGMuQYSIAESIAESIAESIAESIAESIAESIAESIIETT4ACxYlnnPA1UKA4PuQUKI6PH5c+8QQoUJx4xlwDCZAACZAACZAACZAACZAACZAACZAACZDAiSdAgeLEM074Gnbt+lL2/vhjXNabPn0GKV6saFzaOpGNfPjRFkGy53gUJL6+PG+eeDR13G0gKXenJ3p42unSoZ0UyJ/PM40j/y0C/9bz/b91FLm3JEACJEACJEACJEACJEACJEACJEACJEACFCh4DpAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACSScAAWKhCPnCkmABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABChQ8BwgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARJIOAEKFAlHzhWSAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAlQoOA5QAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkHACFCgSjpwrJAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESoEDBc4AESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESCDhBChQJBw5V0gCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJECBgucACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZBAwglQoEg4cq6QBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiAAgXPARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggYQToECRcORcIQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAUKngMkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIJJ0CBIuHIuUISIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEKFDwHSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEEk6AAkXCkXOFJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACFCh4DpAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACSScAAWKhCPnCkmABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABChQ8BwgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARJIOAEKFAlHzhWSAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAlQoOA5QAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkkHACFCgSjpwrJAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESoEDBc4AESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESCDhBChQJBw5V0gCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJECBgucACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZBAwglQoEg4cq6QBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiAAgXPARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggYQToECRcORcIQmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAUKngMkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIJJ0CBIuHIuUISIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEKFDwHSIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAEEk6AAkXCkXOFJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACFCh4DpAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACSScAAWKhCPnCkmABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABChQ8BwgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARJIOAEKFDEgf/H1lbLr6++k4X23S9Yzs8SwJKumJQK//PqbfLv7Bzn/3JyS4+zsUW3anh/3S6bTM8pZ2bJGVT+WSkeP/iXf7dkrP/9yQC6+4Hw5K3t81/HXX3/Jt98ntX+e2edc5+RIcfP+/vtv2b3nR8lutuXMMzKnWD8RFX797ZD8+ttBOf+8nJIuXboUV/nLgd/k6+++l0suzC3ZsqZ8veIY/7B3n5yT42yzjnPMOlJcRUwVDv9+RL7ZvUf++ONPyXPJhZI50+kRl9fzNGOGDOa8yCWZUqiPxnb/8KM5j36VvKb90835yuIlEO4e/tvBQ/LmO+/LF19+I1nM+d7g3lreBTl23AS+/f4HmbngdSlzTVEpX7r4cbeX1hqY/sJr5prLIHVuq+rZtKBza+++n2Thkrckf56L5YbrSnjq/1dGIp0PWz7bLhu3bJUfDadqFcpI4QJ5T1ksuO/PevENufD8c+XmitcndD/++utv+erb3YJ3ADwHM2RIn+L68dw8/Pvvct45eM6mWD3mCmgfxx7PP2xT+vSnxdxGpAVS89yM9d0i0vrjMS/W96/fzTm246tvJWeOs6J6v4v1XSTWfYr1nTY176ix7nOs+8D6JEACJEACJEACJEACiSdwQgWKdzZ8KM/OWBBxr2AIOhWMFUeO/CFNO/W1+1LfbPMNp4CB5dDh362xEsbZM7OcEfE4YKYer4plS0q9u2vY+jrNvzAEmlzn5pAbr7tGypUqdlwfmQteW2GNNf51+MfH9OtsRQJMX/v+RzJ++nx/ldB49UrXJzMUbdj8qcyY/5r8ZIQALRAoHm14r1x6UW6dFPr97dBhu46Pt34h+OBCwUc1ztd7b79ZTjst+OsdH1sLjJj1+oo1drmB3R41Ru+zQu26A+s2bpHJcxYag8CR0OTs2c6UVg3ukXyXXRyalhpGWHjrF1/KyElzrGFfG7v04txmn++THGdl00mhXxi3R0+Zaw21us841rVvrSwVrr8mVM8/EMs+67Jov9PTI+TH/T/bSU91bC4XGCOOvyxb/Z689MaboX2AQQNsmj/8P8me9Ux/dbPPu8w+zBMYQrSAf7umD0huIzz4y1fffi+jJs+VPUac0IJ1PFD7VqlQJvw+oy5Eja79RtnjfIYxbI96qoM24fmd/fISeWPlO55pl192kT3OftELxomJs16SNes3h+pje2D4rHrjdaFp7sAbb66V+a8ut+KHTi9yRV5pVf+eqIQNXeZk/+IaHTV5jt2M8QO7Sob0KRvU/NuMc3HP3iRB0RUgw93D//zzqHTuOzJ0HkY6jv51pWY83PaFa0vvwe59OVzdtDD95wO/yqFDv8s5Oc8SiGtaZi54Q5a+9a4V94f3bq+T/zW/Ddr2kowZM8i4/l1C+xTu3Pp8x5fSd8RkKVo4vzzW6P5Q/X/jQKzng57vyqLundWlyg2ldTTuv+G2L14r+uDjz2XYhFlS9tqrrRjVf9SUFJtu88h9cnWRAqF67XoNlf0//RIa9w+MH2DulY748Lt5Xx077QX56NNtoXcXLHNzxTLyv5o3mXcXryiAZ86chUtl1bsbzbV72DaPZ06h/HmlyQN3RuyQ87V5fo4w7xg/mGchxMfGpn5QOWjaHTP1BYH45BYIdHjfxPpQPtu+S1LDKDXPzVjfLXS739v0sUya/bJ9b4v2GwbP8wkzX7RNFL+qoLQ273j+Euv7F+oPHj/DiFDfh5rCO2rD++6QklcXCk1zB2J5F3GXwzC2X99L6t51i1QpX8pfRaJ9p9UFU/OOGus+67r4SwIkQAIkQAIkQAIkkLYJnFCB4q11m2TS8y/bD4+cZwcbaO+9vZqUMC/rp0KBkRQfAg/XqRnxgy2t7AsMovgYQQ9EcE6p6PGC4AAvERSdho9HPYa/mt7G+hGLOlcWzGeNLOEM9qgTqSx/+z1Z/Oa7gVXw0YkPXxRXoHh1+dsyb9EygTE/c6ZMyZatXO5au986470PPpYxxmiNcsmF58vlxsANQ/Z3xrMApWvrBpLP9GbVgh51XYzhGT32YbC8yuwjehWqWIG6WMZf0DNw+MTZHmP3gCday7k5z/ZXFXzYdx80zk7HfhQrcoXsNL3g9GOzS+v6poftJXZ+ahjBINDeGDYgfqD9IgUulw0ffmKN2DCOP/FoQ882wQDStf9oWx+G3YKXXyb7f/7FGgxQEYaNWyqX9SyDkVj22V34BWNQf2Xp6tCk3h2aykW5c4XGMYBjjGONAoM7xJJPt+204gOOy0DDFj3etWDegNFT7WiRKy63vVY3f7LNHg8YD5/u1MIjFsGQ0/Gp4bY9zC9ZtLD8duiQfGiWQbm7RhW5tUo5Oxz0p9eQZ80x+87O8hsntT6OHXpXo8ATIu+lF8kHH2+164QxYUjPth4RYZFhArEBBQZMGNC+/Hq3He/U8mG54vJL7bD+0fq4RkteXVjOMNfDOnO+4xqF4NOnQ/MT0hNW1x/P30Fjp5lrbIdtsmm92lK6+JUxN6/XFc4lnFNuCbqHb9/5tTw1/Dl7bnVq+ZA5Z85zF4n7cKTtC1qZ3oPd+3JQvbQybdDY6fY++VjjulK0UL7QZuFeO9P0JL+uxJWnRKeE0IZHORAkUIQ7t/5LAkWs5wOuRXCrWfUGqVml/An3BAu3fVEe9hSrwWto6Vvr5JH777DPo7HTwnesUJEcHRTc9+JGj/ex64F3X1CBuK8GfvQsf3rEJPsegWdS8SsLGpHiqHxoxAp47xXKn0c6NK/naUbvu2gD7wmZM2eSTz7fEXr/ebpT82Teo3g3e3nxKtt5QBu7pmghaVm/jo56fodNfF4+MF4xKHhnzGae5e99sMUKKAXzXSYdWzxk5+EdKDWM9DmIRqJ5bsb6boF20WkF3zTvf/gpRm156H81I3Yz0JWMAABAAElEQVTeQCW8R0JkAn8U7H+7JnXtsP6J9f0L7wXdBoy1beNZBy8jeMtA4EHxn0OYFuu7CJbRgvNh4JhpOmq/KfBt4RZ9tmFaSu+0qBPrO2pq9hnrYSEBEiABEiABEiABEjg1CCREoDhVDCunxiGLfivjKVD4j+E+Y8zeYjwLps5dZD8w8WHa4uE6cTeEaq8zvyCgvXG7tWloDb6RqJjvaOP98rT9OLzLeALUvKl8qPqchUuMp8M71jDuGjNXrtkgU+e9Yg3KECI0vA5Cc3QfONYa8f0G9bdM78NJsxfatmFM32EM1zASBwkU6Fnb0XgP4KP0gdq3SOVyx3qiaQ+3Ankvkc6t6oe2NdxAOEborfjepi3iGg28673VrPfaULPzjSfLIhN2pJQxCjd54K6Qh4j2aj87ezYZ3POxUH0MxLLP7oIwVkIMgQElx1nZrYDg54ke7y269reLdX+skRWWMPLHn39Kz2fGW3GpsdnOMtdcFWpajU1ur1v0WIfRa8eX3yYTHMYbD6+1xtMLjJo/dHeoZyl6eT4zbobdvuG9Hw957oRWZAYQEmiKOf8h9nyx65tkvadRFyGXuvQdZRd7sn2T0D7gOPQdOclu0523VJLbjDEOBaG+uvYbbYddj5LVRux9zhhGINAMMh45Gr7JPbe7P/aIDR2FheElhPVCnOrRtpFcdvEFts20/AdGnNbdBoU20TVahSZGMaBGkiCBImjxN9ea4zhnkfWWeciIzye6xLp9/xaB4kRzPdntBwkU4c4tChThj1aLrgPsc3Ok8UZzxefwSxzfHH1m+AW142v12NJ4zuF5h2cnnqHhCt4tOvQZnvTM6dU+9M6BZ0XjDk9JtO8D8Kab/dJiayB+sl2TUMhIeCp2GzDGPhNgHIeRHAVegJ2eGmHXi2eUehni+dtv1GQrwKODi2uMhmEZz1TsF0QNdK6A0d5913D3U99RsP99OjYLHdf9Px+wXpQw3Hd91HQScbxG3eV1OByjWJ+bqXm3QHhXeHbgmYoOHGdnz2qf39EIFPBiBR90LoDHQJBAEev7l77fFzOeNq2NR6yG45q7aKm8tnyN+J+fsb6LKHP84hxs33uo7VSB8/DzHV8lEyi875bRvdPG+o4a6z67+8BhEiABEiABEiABEiCBtE8gzQgU+KiCgeoa402BHsZa0GMJoXIQTOeO6hVNLN2/5aXFb9rwNJXKXitvv/eB7XELQyc+jmAYdsNKaDsIJ4Me8xs/+kzgAXB14QK2V3zWM4+FPsILtrZ97dVF5J33P7S9kUoVKyLXlyxqDZJ79/9ke/XBYO3Wv67EVdbQ+YnpwZ3TfLzcZEIS5DLx/hFmCb3n8HGD6fiY0A9D3Tb8ot5HnyLu8meS1YRjQr2C+fJ43PbxEYYPvSvyXmp7Rq8zxmcYRvHRd22xwrZnHNqCQRax1rEt6ImIXtvFrrxCzstpQjKVCR/vOsgQFjQN69CihhaMN6p7p+Wk8473F/vRpsdge164xle0qx82z3Rvk2IeCTUI4qMS9f1FDSLoVYjehSijTcif9Zs/kXr/qyEVry9pp+kfNWq7RnDMQxiHj4xhW0MDPd57mA0bEyRQ6Ac7jk2vx729vP80vR0Xm+sBYRiqpxCzOhwjXDetjKEHxc8I18CI52bb88YVHGCQx3kK1q7HB4zgDdv1sm25XiyYEMs+2wb++dPDeI7AU6TJg3fZfYV44Bco4A2BUE3wCnjIHAe3oOcmzvHKJsTAAybUAAp6dMKQg2v92YFPhHqUYp6ex27vURjx2/YcYusN7tE2WZ4K3FsQQqFsqauTGZYO/HpQ2j452K4Lx1cNS254F6xXzxXEHr+nVlVMChUYhtabUBHnmZwgpYoXsdNhKIdRE2Ev6t9zW6gujgHCEKGHbWNznZUx9yOUL7/ZbcWaPJdcYI5bo1B9DOg1cndN4wVSuZxnXlocUcPaLZXK2uOF58EwY6jz5w9x74O4pnEfxHkLb5ftu76Wbeaet8YcO3inIDQMnh133lLZGnAgKuk9HHlMFi59S7YZYwt6ncKzCmFVzjZ5ZjSkDJ43a81zALkpIMpeauqgpyqMP/6CY4QerOipvN3cly/OfZ6t597vV76zIeL2+dvEuJ67fpE4qC6m4bmEZx3uRXv3/WzvaVcZT4ZwuWdguMQ2w3MF4cYgBiM8iP85insKuO4y5xwMinkvvVCKG8Nk7lxJYdNwbmJbIVoivBqex/DgKWJ4gRnOd8yDEbK4eR65BYZHbO8nn+P5mc0+m68wjN1tcJ+3Za8tJu9u/Mj28s5gwkgVMnXLlSoeMtK5bUczjFAvuB/cdtMN8r65P27+5HMbXgziJzyxcG95691Ndn0wyBY0z4lyZhtcr0FXoIABNNK5pc/NoBBP8N5Db2XwwHpx7MAQ4iQKYuwvWfWuzaHkhsfEPWnxqrXWQ8x9ZuHdAl5o55re95HC9Pk5RXvuKzt4O8BTcctnX5hncjb73IzlfEAopG07vwqFRqzxT0eCsiWvDoX+w7W//oNP5CtzDsKDDucg3s+CQljifIXHI96r4EEAb8TiV10RupendL6CB8RwCNi4p6CzAcIjljbveuGuJT9DeOg1M6FBcexSCmum7xx+rz1cn3iXKG08j5o+WNu/imTjKrgEeaCpqI796Nm2sV1WRbQgcUHfU/znKY4DhHTcK1qYUIvbdnxtQ0MGtYGVtOnxjL0nuMKIbjie83iPLZj/shQFinCMYn1uxvpugW1dZt7jZxhvGHQmgWCDcKFgl5JAgXMQxwRhJuGZDA/PIIEi1vcvtIP9gNffFcbbVQs6vcBbA/cpvAdpifVdRJfD71wT/us18x2GZxC+Y5A/xy9a6bkS7Tttat5RY91ndx84TAIkQAIkQAIkQAIkkPYJpBmBQl/+4RaMmP1qmBhn3OFhiFDjDD7eW3cbaI3SeFGGAd4t+AjoY9zdkdBYCz4SYRBV93mdjhd49BjTkB7aNrYh0+mnh0ILVSlfWureVd32SEYvJO0Jp/UhEOADGQlw3dLGxJYeO3We7XHlTvcbttELrfugsdbY6daD4RLGa90XGOLGml7xMLLCEKfu4rqMhuDRj2Kdrr8p9SgOMoQFTdP29BeGYhiMXaMqjFFPmx52yBHQw3wIg3WsBYYPfAQGfcz1NSEM0ItrwqAnLIfvkdzYhFEKSuisH+XhPp5hWEJxj4sugxwEbq9qGCGfGJDUIxI9AfXcwfIwPhYtlD8UQiiSQKGeG+oBgHMJH/0XGqNm0D6g/aASjpGGFkGPPYQF8pcmHZM8SsDPH4/aXxfXTaenRwYaWWLZZ21XDSK6bb2HTrA9Ef0ChdYP+h1uBJZNxpDoxn/Gsfnhx312f1yBBctjO6fOfcVj5FHDQVnz0f2IMRzAqIhrOLsx0obLGaLbooaSu26tZDxybhDXOKl18Atvm6+/2yNDe7Wz1wKMTT+ZXqOXGUEhKL+ChjfxC3JoC6LV86ZnLMQGiA4ouNaRXB1hznDfcov22nQZufPT2jDOMZxrT5lwIqvWbrQ5O+oYUccv0ul9EMYxhAPRXCmtTS4Z8PHf57GfEwZ1s8ZkeJXoPfy0dKdZw5mfA+7nuMdDIOk7cnIoDJxbr1a1G61g7k5z43O70681AltjI8TheOs+uvMxrNvnn47xaO7BuhzCuyBsnIbF0+n4DTKkqVHJrYdhiOsdjWALAQgFz5tnjJHNzZWD6bivo/cuQjmp8InpbkHc+3tqVTNhzZJi8fvj1EMIRM4Vf8E1iOffGSbcDIr7vIVopaHwdDl0Tmjf9EEdtYbsZ2cukPJGuHjw7ltD04MGNMY/jMCIoe4WPBfWbNhs71HudIj+yF+kxb0HQKCBUdZf9NwKJ1Dg/gPvMD2ndXlw7tbmEZsrCfcp5MPyh5TDNYN8Rihu/hYILiPN/RKdKCAIR1NiOfeVHTpV4Bij4PyBIAsh3F/CnQ/hrh9sM7ZdvRr97eG+h3CF7j3/m90/GI7jknEEs7Ym9BgExpTOVxh6ESoJ92y34Fg0ffDuUIz/SO866okHr8RmJmRduILthXcDRNVhJj+Lvv+i/g4jjvYeOtEm2AZTbA+uQ7x7aK95t13tcBHUMULbcs8dbD88KPC+2a9LS9MmJN2kouGp7rvjZk/+I4TagbiuQi5EI+SuCnrHwrmMEFV67kP4wrsOOldcfMH5gfug63d/IzGK9bnptusfDnq3QB28L+BdSTuxqCgSdF/VNtHZBGE2cT+AJ+5Rs8/9zDMl6J1Wl/H/hnv/gmB+5I8/jFiW0z7bdDk9nn5RLNZ3EW1PxSicm8/0eMwKiEECRazvtKl5R411n3Uf+EsCJEACJEACJEACJHBqEEiIQIHkgA3uTcpp4Mfi9kBUo3NVE9f0PtNDCb2D0GMGH6D9u7a2hno1Umg7+HDChzE+XvAxhY83jD9qkgyiwGg5cExSTyMYtP5nDHtZzjhDVry9XlasWW+Nrn3NRxmMwm7beBlHImT00MOLPhIKu8YtfGy59WFMgbEZcXVnmTjb6JGKgm1Hrzd8/KHXI4yMaHt03052PgwBXUxPNPyiZxYSocJ9Hi/76FXmGl3UMIcFYdzFx+pfZgfRLowq+HAe0aeDbR+u84g3ixj/6GV5Z/WK1qChvTDtyn1/ggxhQdN8i4WOE3rLIiwNynLDd/oLr9phN7SNnRDFH/RcfNSEe8GHeO8OzUwIJm9ceBj6kB/hJiMeoWeXFjWMuPGb9UMI8/AB7i8qULi98XEMsA4cF4gU1xgj459mm3D80Ntajev+ttzxSALFkPEz7TmCZJjoaesKbdjOhvfVMiEdLnWbSzYciZGGXnJFI7cBNZQiJ4P2gHbnw9CJ9tETHMZDcEDvPwiFkUqkfcZyvxnvpbZPDrGikhpQohEo8KF/1PQMR+93iEfwSrICnhEYNfxWuO2CQQR5JnBvUEEIdXHd4FpF2C/Me9OIGFpwndxujNBqgNHp+NX7Eoy3yIEBo4VrnHTrQghCQc9Rf+Ju3I+QxNoVFhDiCKwRVsr17kIbalDzG0Yxz1+wPzgWKJGStPuXO1njyGPyRP8x9pj279rK5jXBeNA1694H0Vvz9psrmt7kOeRcc4+FaLvTeFMMMYlDcX50QlxzY3DThPD+eziMkKvf2yQLXltpepeXlFom1FZ6k2wWwurslxcbkWStDf8GT6qzTEiPD40BFr1oUVR0wjByW+A/7u0PmoSvOLa4T6DnKY6nCi3o5R5p+9CWv0RzD8YyeNb1GZYk9sGjBontwQBGY5znKEjIjN7QKHoeY7h2jco2/wrEm5eNhyLyqrjPHsQ6h/Ecz/LqxsMFxkWIo7jPqOER1+gBY4gbbUR53M9wv0DPf4TpwTUaJFC4cc3xPMO5vc+cu8hPA68q9PTuYTyDYDR1n7e4PuGxd65JxI0e7jCYoTze7EHrrYFhNTTiuej2JMY8f1EjO+o2eaC2fe5j3yC8a0EOgcKmcwA8EzXZbT9zrmpvev89INK5FSRQwFsLXlJ45uGZjfB72G88S7EtOLfwfMV9p8+wifb9wBXJ4c2mAoHbUx35b/AuENSjXvfN/xvLua/s0MbtN1cw3qn5jaiU2VyPZ8d0PsDT47D538kwwHWs3o7ZzLX4uzHEtuk+yAoOeOfD8x0i3LxXltnzRDuwYBtgDO5s8kfB4wFiWLUK19nlIFJDDINxfvTTneRv8y/S+arGebxP4lzGsYAIhBA6OP+GGdFZj0+4dx012j5c57aI3qvq9eB2ksC+oOh9H89yGMlxb0fBfiC0EsR1DfmH6WqsD8pB4CYgH2PeQfXZqe8juG9Vr1jWnGsZZdOWz801vs56qvTr3FL8nmxYl5ZIAoUKCzDK47jNevH1kHCEfYD4BD7ut4C26/5GYnQ8z83UvltEI1AglxRyY2gnCIR3ikagSO37F3ghzCjuF7pOZRjru4gup3m29N1pgQkFGiRQ6DkU7TstthHbmtp3VN0+/IbbZ7cOh0mABEiABEiABEiABNI+gYQIFJEwuKFY8IHewRgS8XGK3AIjnpuTFEvdSRbsGin8sfvx4dbJxPVHj60RJn4xRIfde0wMePOxCiMKhAj1RsA2DXnWGIlNMlztoee27X746/b7jVtufdcICON2y3/C6/hzJDTv3M8aINS4pT3g/T3P8IHyuIlHDOOWhjlRwxwMFTBgZjCGNBQYYJt0TApt465P47XGI0m2awBQHvoLgwr2C0V75IMBEqJmNz1d69xWVatG/bvIGO3nv7oisEceGlGOGEZM3PPPO8fG9tXe0248aRjT8HGG8wLeEBActGhYGYz7e7VhvxA/GoYmt8Bg4cb8dee5w5GM9ToPxia0D8MAekSqAQKGsi4m/4Qb7sxtG8ORGKEnOcQUf84NbQPhBNC7s6VJxomwam7Ra0anwYiA/CIwPKVUdL9UfPDX1zADbg/0aAQKN8Em2sT5iBADQaE9/OucNu9VK0b6PYj0oxbGJlxnENgQBgRhejTxtd9ghHMJ1yWOWedWD4dEJL9xEtugIQxwLLVcZTxsTs+Y0dx3Prf3ARz/J9s1tgYvhLBBiKpwBlUYiGEohtEZRvxwBe08OfhZa1BWsTdc3bQyXXPKuOernkt+zxq9DyLEy9An23p6G2N/NKSb/3hjnv8ejmnaMxsJ4OGFpgVG5F8OHLS9rV2xSMNMNKt3dyg0l3okIWcM7kdaYJDCuXuJEVI6/ePJFGn7dDn3N1qBQnu6BiWPVzEO4YraN33ANq8GbTcHCmZARIQXBp7DuI7xvEEvfPSrRsJdY6e1BffT5l362XquoKaGRPcejAWCBArdBvX+S2pZ7LWBsGm4LnsZEfJiE1rLfd76zwnEhocg5O4LOL9gjGnXmd7rGhJN2/f/qpEdYdVgMNPS0fQshyFcPaV0ut6P1GiH6UH3gEjnVt8Rk61YBNEIRb1IXWHIzjB/9F0FIg7Cxb267G1rnNf7E0JBwasCwhreg1yxXcXoWHI6xHLuKzt/L3vd9ljOByyjz/bnBnfXJuw5+em2XUbMzRK652KmhrjDfVQFDeWITgQdWzwcOl9RH889hGtrYfIN4VpACbd92uPcFb1QH8/oQ4ePGGP7FVacjvSuo6EMwz0P0Z56NeA5NKRn22TepnoOoS6u7WImPCmu0Y8//8K+0+DdAQmm1SNPxVI8J5B/SPN4QLiBdw5CmaH4ryENCWhn/vMH4vlTphNDSl6dkQQKhP0aY0I1YtshGmE/EbYMYu2n23bYfShu3kFameTaEHyCSiRGx/vc1GtZ1xvtu0VKAoWbLwPHFcchGoHieN6/NF8Y3iHg7YD7AUqs7yLKQgUt5NmClxJKOIFCn9fRvtMezzuqbh9+w+2zW4fDJEACJEACJEACJEACpwaBhAgUMHAE9dIGIrz0uiFm1Pik+PzGdddIMX5A15CRXuuroUI/KjWEBeKLP+xLgLrS9MRGLx7ki7jfhHHQtvEBFRQv2G/cilRfDVYTn+nu+UDWj17teakGW8RdRx4Jt4w0eRBgCEWPJMRGVzb+eMBYRg3Obq+5RAkUbkgpFSjc/Yh1GD0pEcYLBjDl5G8DxiAUPc46X3us4cN66JPtdLLt7YtwUSj4oM97yYU23jUM0frh7IbAgFjWw4SIwEc9zl8kGYY7PQwi2C600dkYHN1zN7Syfwb0gy3IOKFGGFR1ezWjF/S46S9Yjxg3TrS/7ZQYzTICxRIjUKBndA0TG9xf9HyB8IB4827BPiNBOI4rYtnDUAmBD2GF0IM6Uom0z2pkwLEZZHKBqEElGoECRif0XP7WeEqhlzcKRL16pre664Hg3zY1VkFkgbeIG7pJQyBhGSROh3FcS7hwKWoYvNbEPUdSbS1BxkkNtYA6uKfAWKTrh1ELIgKMn4i1jt7uKRtaksKARBIocP4Mn5jUkxr3vG4mn4hy1m1Na78wrrbo0t8apd18KQuXrLKeDXp/1u3W+2C4sCmRBAD/PRxtqgHQL1Do+vCL0BK/mLAmOG4QtmAERq4Z9HLHPRpGWtdI6i7rH460ff66GI9WoNBQSUE9UlVExr1ulBHvUY71Om5vz087MYU/EMP3mDBqv5mQdGABwzkKPNPg7YISzuAbJFAc24bkHkMqLGoIFX3e4lr253pRoRleiOi4EGtRIzvuSwgdqUXvk/4479ozHvcfeD2iBN0Dwp1bQR4UGs7SFb50OzRMk3Zk0F7p6i2KEFxPDXtOIBIgQW6609JZg70ed1esQ4i9b0zYOX+BARV5vvwl0rmPusrO7aThthHL+YDl9NnoChRuezC2/mS8J3H+/XbwsLnfPe8JdzV2WtLzE56rCNmVUgm3fSpg4/lf03hWlTDiHETsaIvez91rLmhZff75xTGtqyE0IVy1anBvqJMN7kEQEmH0h3ePPptxneDdBfclrBteC/A+/ODjrfYeC+M13mFgwIZnGa7pweOnG+FlhxVH8NxAyECEz8P5g2dXTyOiu9eFbpv+RhIoXG9aK6Y0N2LKP51r8D6F/cf2uDnAtF39jcRIOYcX9iM/N1P7bpGSQKHfIq73TDQCRWrfv3C8eg2ZYJEh3KGb5yfWdxE0gk5Sj/UcbM8B19M2nECh1y2Wjead9njeUbEOlEj7nFSDf0mABEiABEiABEiABE4lAgkRKCL1vg+CNWjsNPuxhI+r4SZJKj48tKiRwm+A1vnqlq8hNWbMf926qev8oF8N16Ntw4iIj21/8Ru3ItVXgcL/ka096tTwrr0b/etyx7X3phrm/HkRUFfjN58MgQJG7H4jp0hu48XwdOcW7qanaliNP+j1ioTAQQW9zGCoUoOvW0djMEOgcI3X+BCdb/Jl4INeC4ycl196oQnnssTjbaAfxJjfqO4dISMvEpTiw/M7kzckKPGxtovfSMZ6DZmi4VHc5VzBx40l7tZJiZEa2IOMlWhHzzvE+78gV3ijC4wXH5lwZUNN+BAUf69LO9H5E26fYTjX3B3IzeJ6YyjrlNrW1UA8em72y9b7SQ10Os/91Z51mOZ6FmkdDWOCcf91imnwnsFxRu9UxCyH4aBdryGYZYx/pneiEVq0BBknETqi8eNP2SquIVOXgfcWDLxugms9d0f0eTyZd8imLVutMS7SPk83ItxyE3oH24ZwMNF4mOj2nKxfjdOO9eN607J3334bvgXjE5/pFupdG+k+iLqRBAD/PRz1wxmRYaB5zogRm7Z8ZkU61HWLChQaggX5Zx5rnNQb3q3nH460ff66GI9WoNBQPuF6sqsBCd6FGUxYsmbG6y3I2B+0DfBMGzd9fkig9ddJjUABvpG2QQ2beg+L9LxFmCeI/ccrUPi9DFSgQPx4GFe1IMwPhAD3ug66B4Q7t4IECr0nB4U01Dj07rsJ7hV/G48uhItESCyEc4QY/vrKd+w9AAZoGIDhpQID+123VLKbryGAdF/01z0Xoj33sawKFH522m44ASBIsMIyep7678m4znB/g+HdX1zjdCSO/uUwHm77YJx/Ztx0T/hFiBUIv4VQVindW+HRg3cFFZWC1g1vDKwf7wHP9GgTuse5deEtgWcPjr0bygl1NLeSP5wP6g8zQjXCpGnBtjc1Xl+4jo8cORIKe6YGZwiMEOKwLSjwFnzu+ZcFnXyCEh9ru/iNJFC493d973WXVUO1v5OA1omG0fE+N3Vd0b5boH4kgWKzCauHdyY37CmWiUagQD0t0b5/QYBADhN0Jgm6/6fmXUS/pW664TrTgetm3aSwHhSxvtMe7ztqSvsc2mAOkAAJkAAJkAAJkAAJnDIE0pxAgY8xGDnxcYiCcBTqio9xNVK4H6SYrmX89AWy9v0PQ/HyEUMaPdBgPMdHulvQ8wpxiM83H2ZIfKdtu0YAt77fuBWpfrQChRoL0Es4j+nV7xYY0iHSwICJ8D+RDHMnU6BAjF14Lqghyd2HWIchALQzOQrQo87fmzXathDjFx+Cbhxud1mIGwhzgJ6CMDKoUVfFHTWcYRk3TrO2oYJMuPNE64Uz1mO+9t4Pl08ACVZhZAgyVkXDaNvOr0yS8knWsA4Du7/o+Rmtx4teV/5QLP52w+2zG2veH24Fya5xvaOHKHprNrinVjJDjH89OH5tew4JGw5JDYBYDvloYNT3F02Ejul+YximqXeTfvCrURLbiBAVboGBFAU9duHhAI8IFDWc+D19MA/GEPRQdA2DGlse3hbw2nGLhukJ19P/1eUm7MuiZdbjB8mF3aSxbjtpbVivhUjb5fbIjHQfRBuRBAD/PRz1wxmRew4eL19+vduKPSWLFrYJirNkySyLTL4DJGhWgUI9g3A/Qb6dlEqk7QtaNlqBQj1OXGO0tgeBsGG7JK8zXPPp0p1mwon1sfdZN8yi1nd/0VP90X/i/6MnPkLbnGeembh3IvkySmoECnjOIKQZ7vVBScL12X2beW4jdFOk5+2/QaDQdwG/twb4ah4l5MRCsmwUvSeDPcJh4t0Jyd31XgvPE4SFQ74eV6DFNITX8RckZlYRJtpzH20kQqBwjdwQi+FFCkM6zkEIMDC+a14vff4H3XP9+4zxcAKF1oXHHgzwCMv3+Y6v7GQ8+5EXLVLehPkmvBjuFf5QpNoufvV+FK23h7sshrUHeTgBAZ0dcH86K1tWGwbzD+MFClHQze2j2+APyYb21TsBw0GiOaajRBIoNNQR6gU9Z9XLOZzAq9sXidHxPDexXW5J6d1C60YSKLQjEo4LwtNpgbgG4Qrv9UXNO0Rxk0PE/z6kdd1fvdaD3r9wLaMzBcLhReo0E8u7iCto4B31jMyZQpuD3HgQTCG+4JkH72+8q+hzPNp32uN5R412n0MbzQESIAESIAESIAESIIFTgkCaEyg0JjVe2mF0wIs8wn7gAxRFjRQYDuqNpTGjNYeEftgixm1rE28/UtG2wxme9UMJRgB8HEeqrwZg/weZfrjotmsvbjV2Rdq+SIa5SAKFP0RKuHUEGcKCprnL4wMZhhUYmaJJouwuGzQ8Ze4rNllxkJeI1kdCXXhs5DNxcTUZus7Dr4YN0dwd+HBDLF0k2yxzzVVuVWsYh/ENPc/UyAYBoE33Z2y9oDBiapCEoTooFJiuIJyxHvM1TnRQb3gVSCDCIZSJP4xUNIxgrGr1xEC7KYh/jAS/WjabnvtDTc991wsJ60RuBRgyYNz2h4LW8yvoA1nbxW+4fdbe/27dcMNqCIGwiF7UCL/mP24QbyDigNH4ATC4HmsNXg/dB42156QbI/5YjaQh/UB2jVtuHb3eu/yTAycabyws74b6UcOJxop329cexG58Zw0rUqnstSbZ8q1u9ZBHhxvOQyuogRZiB8QNCJqnQtFzHduK68/fKxmiDAzV7v070n0Q7agAEOTRpcdU7+GoHyRQwACC+wLOL9xHNI476ms+AL1n497X6PE+tu6Yvp1D4UtQF4ae9z/81CYNhkEKJdL22Qq+Pyndg7W69jQOymGgScjdHr1qhPZ7B6A9PDcRSgfCDLZ/jEl87TeCQvRo2ulpz70Ty6rBV0MTYhqKnu9IXNz4gSTPOA15iBAyl16UO6niP3+Vs3pcRXre6vl/KntQqFdc0D1WxUm3N7NeBwhNhw4Ces/A+Yjjgo4dMA7j/9h+XTz3SA9o30gs5z4WjVagiOZ8QHtBHhR6X/QbYHGvh3HWvYdDpIVYi3BVyHXkFniUoINCAZOfAuGNUMKdr+5yOgyWWB/eF1zRR+e7v+oZ6IbHceerhx8E7X5dWoU9PhNmvSTokd+qQR1P/g20pfcGt3MIrjMYkGEo1gTuul4kN4bHhOvVocdPPQW1Ln7d/F2ROoxEEijgBfBI+9622SCPTe2pD68U/HdLtIz0/NBrwG1DPSHd52Zq3y3cdiMJFOrF49YPGtbjltr3LwhQ+KbA8da2gtaDabG8ixwx7SKnTTRFQ9LF+k4b6zuqbkss+6zL8JcESIAESIAESIAESODUIJCmBIq1739kegXOt4aQJ9s3Ne7lLwniaqNXMnpPoaiRAsPovdOjbeNQLzaENphjQvUkGS27mg++dKHkcKjv9qSGcWXGgtdkjWm/lvkoqm4ST2rbiRQo1BUcH9joLY91o+AlfMDopFBC7YwXSb7LLo7Zg2L52+8Jwn64IWRs42H+6MeuG5IraBoWh9H/4893yETz8YyPdfRohAFCkxziA+eVZauNYTybjdMeZpWeyYipDAM3ij88k1sRx67lEwNsqCY1Xul8jUXuGon/+PNPm7Qc2+n2xEY7k+cstHlI/Hk91JAJcee+O6qHjAf4kBz0T9iHSCIKtiecsR7z8HEGAzuMSa7hBts0ZspcWb/5E9ubFcZDt0TLCMtojzbXIAiDA0Q8tKOeAdq+Ggv902HcRL4E8EMILxh+w5Vw+4zzAesMKiNML2z0UkXC7gtynWPaP9fyVoMrjmV3k0sB4gkKGE2du0gQS93fWw+9H7v2H2PPDTUgB61Tp6lgWL3S9Z5k7mq0ThKJutp7DM55GO+CCowgqNvL3LcQhkOvY+3N7L++cfyRsBRMXEOaJsLGOlTExPC6jVsEsdUhig00YVwy/SPYYp6uA8Pg5PfEwvS0WjTMg4bZ828nmENwRFFvJjXMhrv+cC7h+kXxh0jT6zolgcIVKd17EXpQ476M69Y9v/Q8cp9VWL+GXXLP00jbh2X8Jdw92F8PeWmQywOlmcmPUsrkSUHBtuJcw7nlJk+eu9CEKVqxxgpqfU1+Fg0fo/kO0DlgRO/2tnc0zj1/7hO912IdKu5iWJPO+j05ggQKDe+Ce0qvDk1DofQ0bBba0wTckZ7PQQIFrq3XV7xjjLEFpXCBvGgqbFEjrT9MUSJDPGnIN9xHkJhYjcvYjy79Rtn7r3r5YUfg2dLShHmCKIl7s+sxCKP7Z9t32mPvnnthATgzYj33w7HTJmM5H7BMkECBUEOr120KiTCoh2cZnh04r1yBQu+H4AiPJhVrwbHT0yMsE9dQHrR9MKp3fGq4/GUeNuCqbWCd7XsNsx6Y2kbQu44a9tG5wJ8vBdsO76H25l0HPerdY4p5/qKhzvAe18kk/dbrFMep56CkpNeud4HmSUIOqydaNwwJphBXOvcdmXQ/cARBFQjQ/uPN6oV6y+M5O3fREnsNhXsn1m2NJFCgjt5r4AHUuWX90Da5HVz8QmksjGJ9bqbm3UL3VX8jCRQ413Be+Ms2kytmkjmXweGR++6wz/NsWbPYarG+f+EcQwgxPJMgOCGfmNtRw79uvS6ifRf5bs9ec6L6WxEbQg65+yAW3nhdCclpvlkymXee1LzTxvqOGus+J996TiEBEiABEiABEiABEkjLBBIiUOADOtc5SQk0/TCQeBe9yBHupH3vofbjScPawOCCcD8I/6IfcWqkwMcnDC942S5owjN9b3rFwfCD4hp8Ma4GLQxD1LjIuF1/bkIA4SMC29a/a6sUPSKwrN+4pdsS9PEWrQcF2tUPHexTgbyX2o8WxP3HfuMjs8djje2Hh+5HkGFOe7grJ7SLD5e+IyZh0Lr0X2eEHoTKCFeCDGE6zT2G+BCBIVgLjItIcOj29FfDI+qoN4vWD/c7ZuoL8t6mLVK1Qhm57/Zq4arZ6dqjFCMQFy7KfZ7dX4TCQPELF4gXjhA9KPiYy2YMves++NgasnEODTReOmcag5wW10AGA3n+PJeY8+2oyY3yhT0uWAahNiIlzQxnrNd1qKCGcfR2vdC4zMO7Ab3hwBsf7P5exbEwwnFqa64fGK9wjmId4IvzCsaIbm0aeT5o1YCI7UFv6byXXmSSkR4wQtQX9lpze12iTlBJaZ+Dluk15FkTquK7ZPktYKB40sxDmB3wwPZnN14wn5pk2UgujeulS6v6dju1XbfnIsLR+Mvpp2ewx02nqzcMxnFvKGQMmTtNSAwN5REucakur79B8ed1nubUwT5cc1Uhuy8bTK90hHBDqA0ksnbPPe2JiOXBHEZ6hCxD8Yct2W+OT4c+w+zxwfygfS5cII/1QsH8tFb02Lu9W/3biHsYjocmS450H8SyeC6gBzl+YWQvbJ4Parzx38NRX8Uof+gs7XmLNooVLiD7TWJehOfQZ48rULhGZHjX5c97iQ2tgukorodApO2zlX1/gu7BvirS/OG7bS4ZNRRiPq7hc3KeLZ+Z6wXXPESAHsYwCWMSCrw70KtWr68rC+YzycB/tuOYr6Fp4EnRyhjCUSBS4H6PnugIc6UsXIFCk/qiPryDqpn7eWmTW0TvL65gCvG41+AJVjzRZ/k+wwxtozR58C65rsRVdjjS8zZIoBg9ZZ4RVz6211uQkdg2+s+fcEb2RAoU2BRNDA6ueOagwNML50xQD2k1aqL+mH7Geyd9eruMntMYqX9vLbnB5E2IpcRy7odjp+uL5XzAMkEChYo3mI9zCvfNTz/fKQd++82ywbmjIZ5QR4UvDCMkFPjg2gVHv2ATbvv0WYv7NhJNw+MC1xeuadcTKehdR0Ny+Ts+YHtQ9Jp220mak/wv3oMhLMBrEF6P115dRA6bHBLvm04MuK79iafRiUKTG+NeVPLqwrLz629DuTQ0p5muCbH80QEBzyO9BnGPwD7o/atpPZNw3MkPpMvqr953wr0jYJva9hpq14FtAs/95h0SYbNwTHBPaGTyjbkG9lgYYTtieW6m5t1C91V/9b1dn0s6PdKv5iXBvRbCl1v0/ohp0bx/aWhS1Me9Pf0/1z7GtehzQcdjfRfR5dxfeGshrOu95h0d93a3xPpOG+s7amr22d0+DpMACZAACZAACZAACaRtAidUoID3A3rYRyoP17lNbixTIuRm73fhf3fjRzJu2nz74TTEJD2GsbV1t4HW4Ppg7VsF7u8w4KHgo7VGlXL2Q96/TnzsLF651hpCdB4+XB+qUzPUI1wNMUGCA5bRj3YNmROpfjiBQsNqIJmlxojHxxKSNKOXtGv4h0Hi/jtuDvWWfs8YW8YYo0uF60saY10N3Q37Cw7wBmnd8F4pblz7teDjG71kwQ3GSyQiDlf0eLmGEJ3mXwYf7cjdAbEE25MhQ5JhROvBwATj4xmZM9uE42oU0/n+X/Tu62BCDMGQMNSEVHENtv66Oo5tmzrvFbtvOg0f8A1NzzQNp6LT8YuEkjNMkk18EGvBxzI+4tQYpNPxCyPcszNe9Jwz2D6cN43r3mWNBW59/3A0xnrsA7w43G2CEbBl/TrWYO62mRpG6DU3cvJcaxjQtmAUadu4bqiXv07HLwz240weF4gkWmCgval8qYjiltaNZp+1rv5qKIwgIQs95hBaafV7mzzHGcIdeo36vTkQasdlqevQXxw/xNx3C3pxDp8423PtYZ9x7cGbKJoSSaDA9uD6XvrWu56mcB5BUHTDB6ECeu+ixzqMq265u0YVudXc39yCbe81ZII7Kdkw1hOUhyRZxQRP+PmAycHRY7BdK4yr4e4R6DmNHtQafz/SfVB34f/s3QXcFMUfx/EfJiqKUmJjd4tigN2JCorYKCJ2d/xt7AKxsMXuLkBBARHEBBNUFAFBUTGR/3wH59y753Kfu+e5e+6zvHjubndnd+e9cXvz25l5+92P7E5Xy0aFbhpCHwep13BNCx3N7rzNZom+QzReTyirycFoR7O61s3v2uPWNfVAdw3ewn0Og64XDzz5kgumfJU4BvWd1NPVZkgNNGbavrCs6Guma3B0nmhzM2rT/QW3fd98N8nPokJHBfdUkBae1g1p9RS+moxTQaEKOzWolo4KnzbZYK0wm78u6ElzFZJq0Hm0/5472YC3RviARrS2jwojb3f7Kxy/22/RzvbZbTsffFXTcmq+sbsrjAyDCqnUREsI/mq8vqu2c+miBevZvm9D7cutN9vQuu65g190KKTPVEgc1q/XcN3qfclpiSfINf6aW+63993DAmcd5zrJdrUYwxCaEYoWTqa7BmQ6tsLDA6lN/IWn1oe5e4HQGbS+o9q6hwv23W1bV4AbacvObUzo50B9aOkhgTCEdvT1OVoDKEzP9VrIsZ/JLqyj0OMhtJWf2jymLPs/8WLieyB8d6uvJV2ve7vO36ODOg7XuRAcdc+i+5V9d98+UetW82faPgXwVCN3wJsjEuezjnsFCA52942hFlu6e53QH0y6QlytMzRDmRpw1rR0g4Jz6vQ6PIChebQta7s+DPQ9mHr/pWurrl0K/EcH3dvpnjvlMPL30LrXCQ8jhDS6Vzisy+5JDwGEadHX0BTTBi4Y0vPgTtFJifc6pvq4e5EQbNcE5UHX1K4dd6yxTYUaFfK9qXUXem+hNNEhNLWp3xA6rvIZQifZma5Jhdx/qfaEAh7ZhtQalYXei6RbduhUPbWmbZi3kHtapSnkHjVOnsN28YoAAggggAACCCBQ/gIlDVCUIvvpnqLUD1A9PaTC6VyDfoz+5H4oNXNPxYenDXOlqcvpakLmT9eRobYvtTAi7nYoAKImo/Qjti7zrB+AykOx8pEp/6o5M2nKNNdRX+tEM0CZ5tWP2G9dUyeTXECklXu6WLVpcm2f0nzv1jGn8wvNbmRafpzxclJh4k+u8E9Ph+YTnClkPSpo+doVnmr5LZu7PKepWZC6PKWZ6Nr3Xsg1q5TPeZWavhSfVVtAhZmqaRKtrVOsdelJUuVZx4SClMUedO0ZP0EFRo1sOVc7JbVQKXV9U9117TvXvJaubUu59vmLfVykrq8hflYnrzPd+ZUp+JFPntVUhwp8VZNKHQnnGnS9mDLtR2vW1H3HpARuU9MWY/tSlxk+65qv4H2+x7KellZnqKkBs7A8vWp5CmQ0W7hpUiFvdJ7wXoVh+i5TgCTXNTak0TGv4zwU/obxcV/lm2sfxF12qdPJWgGL1KBSqdcbXX6hx340ber7OMdD6jL0Wfd7jeZo5Gu9ppueOk6O+j5TQCPbkG379N2jnaFrQLoh9V4ndNSt/pz0NHyxBn2HjPvmO38fp4Btru9B3S9/6YLYc7lAwJKLLZrXsaQHIXTtUHOLuZYfJ18Kcn797US3LQu4hzBau2tDnKVkThPne7PU9xaZtzb9lFLffxV6L5J+KzOPLfSeNs49aua1MwUBBBBAAAEEEECgUgUaRICiUvHZbgQQQAABBBBAAIGGIaAHQrqferEv3C+kc/KGkXtygQACCCCAAAIIIIAAAgjEEyBAEc+NVAgggAACCCCAAAIIJATGuxoO6tNhtZWWtZN7HJAYzxsEEEAAAQQQQAABBBBAAIHMAhUXoFBV4FfeGO6b8tnYtWXNgAACCCCAAAIIIIBAfQuo/6aRH4x1/VotmbZvq/rePtaPAAIIIIAAAggggAACCJSjQMUFKMoRkW1CAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBwgQIUBTmxdwIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQBAECFEVAZBEIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQmAABisK8mBsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKIECAogiILAIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQKEyBAUZgXcyOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEARBAhQFAGRRSCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBhAgQoCvNibgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECiCAAGKIiCyCAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEChMgABFYV7MjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAkUQIEBRBEQWgQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAoUJEKAozIu5EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoAgCBCiKgMgiEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDABAhSFeTE3AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFEGAAEUREFkEAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFCZAgKIwL+ZGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBIggQoCgCIotAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBwgQIUBTmxdwIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQBAECFEVAZBEIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQmAABisK8mBsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKIECAogiILAIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQKEyBAUZgXcyOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEARBAhQFAGRRSCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBhAgQoCvNibgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECiCAAGKIiCyCAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEChMgABFYV7MjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAkUQIEBRBEQWgQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAoUJEKAozIu5EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoAgCBCiKgMgiEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDABAhSFeTE3AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFEGAAEUREFkEAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFCZAgKIwL+ZGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBIggQoCgCIotAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBwgQIUBTmxdwIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQBAECFEVAZBEIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQmAABisK8mBsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKIECAogiIDXkRM2bMsEZzzGHzNW5cJ9mcOnWaNW3a1Oacc446WR8rQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKgfAQIUOdw//+JLm/nPzBpzzdd4PmvVsoXNO++8NaY1lBGj33vfOnbezxZYYAF7+P67bZWVVypp1u657wE778KLbamllrSXnnmiQduWFJKFI4AAAggggAACCCCAAAIIIIAAAggggAACFSBQVgGKYcPfto02bFtWbMutsmbW7WndurV16by3ddqro7VetFXWeSttYp+bb7Mrr7nOb/b5Z59pB+7fpdZZGD/+K/vr779dcKelLbTQgknLO/DQ7jb4zbf8uKcfe9hWX22VpOl8QAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGg4AmUToNjvwENtqAtQ9L+7X1kFKXIFKMKhoFoG113Vy7baYvMwquJfJ3z7rZ18+tk291xz2RWXXWyLtmpZ6zytuX47+/XXX+3C88+xrvt2TlrekDeH2sW9rrC11lzDLr3wfGvUqFHSdD4ggAACCCCAAAIIIIAAAggggAACCCCAAAIINByBsghQqOZEFxegCEM5BSlCgOJKV0C/5x67+U38559/7LMvvrCRI0f7oMpTzzwbNt3u6XerbbpJu8Rn3iQLZAtQJM/ZMD/9+Msf9sDAz23ZxRa07ddfymfymym/2u0vfGybr7W4beH+MyCAAAIIIIAAAggggAACCCCAAAIIIIAAAtUgUBYBCkFfd+NN7n+fhHm5BCnSBSgSG/nvm3v7P2jn/u8i/6l5s+Y24KVnrEmTJqmz+c+zZs2yH3/80eabb35r3Di//iuUZsoPP1izRRZxnUfPmXa5qSNn/Pab/TbjN2vevFnqpKyfp7lt07DIwgtnnS914syZ/9gPU3+whV0H1/PMM0/q5MTnYgQotK6ffvrJmjVbJLHcfN/8NH26375CO/2+8akP7IEBn2VdzRPn72AtmmbvTHz8pJ+t66Wv2uZrL24XH7yhX96dL421257/2JZq1cT6n7FNYh2TfvzN9X8yyxZrNn9iHG8QQAABBBBAAAEEEEAAAQQQQAABBBBAAIGGIlA2AQqBlmOQIp8AReq2X9XrEuu4+65Jx8ikyZPtxptusXvvfyAxvsNmm1p7979L571s/vlrFkK/M+pdu6F3X9OrmkXSsPpqq9mhB+1fY/maNnPmTLu131129339beLEiRplCpisu+7a1u3gA2yjthv4ceHPJltsa3/9+Zddd3Uv+/jjsfbgI4/ZZ59/7tO8/eZAGzd+vHXqcpCf/fmnHrUWLZr79yeddqa9/sabdvCB+9nuu+5il191rT3z3PNhsdZ2/fXt1JOOs/XXWzcxbrud97Bp0370QYwwUtumof89/WyF5ZezJ556xi6+7Epr02Zp3yl3mE+vf7t+Kx54+FF7861h9saQN72HOtOW4XbbbGXtN90kOrt9PGasHXjoEX7cqy88bbffebc9/+LLPn8aqbQnHHOU7bHbLknpMn245rH37NE3vrDFmy9giyyYPgDTq1s7W7hJ9qBTugDFBFeDot+LY6z9mosl1aDY7oxnbMbvf9ugq3a3OeeguatM+4bxCCCAAAIIIIAAAggggAACCCCAAAIIIFCZAmUVoBBh6IsicNZ3TYp8AxR6qn/F1df2m73l5h3s9pt7hyzYjBkzbO8uB9iYsZ8kxkXfbL3lFtb3xmuTakc88viTduoZZydmUx8XIUihkV277GMXnvffdNWyOOOc8+0hF2TINKT2+xDypvW/OmBgIpkCBwpQfPLpZ7bDrh39+DcHvZroBPyQ7j1t0Otv2M47bm9Dh41ICjokFuLePP/UY7bySiv6UaHmRHR6eP/ck4/aKiuvZPc98JCdc/6FPngw6OX/Ah7K26lnnmuPPv5ESFLj9aYbrrXtt906Mf7d0e/bnvvs5z/v1XGPjGnz7fw7BCh6HdbONl29dWI9hb5JF6DItAwCFJlkGI8AAggggAACCCCAAAIIIIAAAggggAACDUGg7AIUQi2nIEUoxI/2QZFpx6sfDfWnEQr4NZ9qNfQ4+vhEAODySy/yT/v/8ccf9sJLL9tlV1ztF3fowQfa2aef4t8r2NGu/Va+4F/Bg14XX+CbM/p+kmph3Gz3uSalNAx48VlbZpml/fvrXU2La2+YHRTZf799rfNeHX1B/9Bhb9ttd9xl74wc5ecb6Ar+l3a1BzSEvOm9OqY+svthttqqq9gc7mn9JRZfPGeAQuk0nHXaKbbNVlvY/AvMb+ro+hzX3JWCKa1bt7Znn3jYNxf12++/2yzXd8ca622USNNln739+/nmm893iJ0pQHH1dTf6fGtmOXXcbVdbZuklbdTo9+zGPrfY2++845fzcP97bP111/HvowEKjTji8G62z94dfb5Gv/e+nXT6Wfb119+YAj/DBg+w+d02ZBviBCim/vyHvf3JJHv/y6nWbMF5bUvXrJNsU5t4UlNOr46aYCst2dTWX7Glm/8H+2DcNOvtmpXS0GOX1X0Nim3WW9Ja5mhCKlsemIYAAggggAACCCCAAAIIIIAAAggggAACCJSTQFkGKARULkGKUIifT4Cil2vq6OZbb/f799MPR7saEXPYsLdHWJcDDvHjbulzgy/I9x/+/dPn5lvtymuu958+GDXcF5R/8NHHttuenf24px590NZYfbVEkr/++sv63XWvKcCx7dZb2qqrrGwq/F99nbZ+np122N5uuOYKX+AfEk2eMsW22n4XHzTo0b2bnXri8X5SyJtqLzx8/92+sD6k0WuuGhSa58xTT7bDDp3dDJQ+a1ATTAd1m9280gWulsf+rrZHGEJNitTaHJqeLkARzZuazVLzWdFh2rRptnPHzr5Jq2223spuMliAlAAAQABJREFU6X2dnxwNUOzbaW+75MLzosls0BtD7JDDe/hxTz/2sGs6a5Wk6akfCg1QfDR+mnW/dlDqYmz/rVeye1/9JKkPipGfTbFjew+2zpsvb8fusabrMHuM3eGafEod+hzT3tZabnazWKnT+IwAAggggAACCCCAAAIIIIAAAggggAACCFSaAAGKHHssFOLnE6C4/8GH7ezzLvBLfG/EW76j7NvvvMf1q3C562NheXvp2ZpNFKnT5nU33NSnefTB+2zdtdcy1ZTYuMNWflznvfe0c888LW0fFWHTo4Xxgwe8ZIsvtliYlHj9ytUWUGH+wq7z62WWXsqPD3k7xQUsjnSBi9QhV4BCNUWGDxmQFAwJyzj4sB72+uAhrn+NTnbxBeeG0VZogCKat7def80WbdUysazwJrhHa65E093T71bbdJN2YXb/qk7E11h3difVN/e+3gd7kmZI+RACFJd228g2XS1NE0+ui4g5Gs3uJ+KH6b/bQVcOsB9dDYp9t1zBdt5waftr5ix76q1x9sSQL/2So51kpwYofv9zps344y/b95JXfB8Uj7vOt12sy5ouMC99UaTsFz4igAACCCCAAAIIIIAAAggggAACCCCAQOUKlGWAolxqT2i3hkL8fAIUF156ud1x1z3+aPj84/d8wf3xJ59uTz3zrB+nJ/nTDQ88/IgfHa1tEPp5CPOrZoQ6uVZB+3LLtgmj/eu9rsmnc12zSmqu6P13hiZNy/Yh5O22vjfaVltsXmPWXAGK1L42ogsItUnUqffTj81ukkrTCw1QhFoV2fI24p2R1rnr7Focoa+MaIBCTTi1bNEiunn+fch/7+uuth2337bG9OiIEKCIjou+X3eFFnbDUZv5UU8PHW+9HhxlW62zhF1w0OyaLZrwzz+uL43bhtrQj7/PWoMiLJc+KIIErwgggAACCCCAAAIIIIAAAggggAACCCDQEAXKLkBRTsEJ7fBQiJ1PgGLPffa3d0eP9n0vvDnwZX+8bLfzHvbZ55/ndex073aInX7KiX5eNW10+ZXXmDrLjnaOrYnrr7eunXnayb62hT6HwIj6kXji4f4aldcQ8nZ739625RYdaqTJFaA45KAD7JwzTq2RTiP6P/SInXXu//y0L8a8n5in0ABFPnn77ruJtumWswMM99/dz9pt2Nbth/86yR41fIg1XWihxDaENyH/hQQoll98IWu1cM3+KpZfrKnrK2J2U1xXPTraHh/8pV1+eDvbJKW2xSujvrHz7x5BgCLsBF4RQAABBBBAAAEEEEAAAQQQQAABBBBAoGoFyipAcd2NN9l1N/ZJ7Iz+rrB5I1fYXJ9DKMTOFaBQnxCrrr2B39Q9dtvFrr78Uv+++1HH2SuvvuaDFtdd2SttVtSvxNxzz+3mWdSWWnKJpHkUqBg6dLgNd7UEnnvhRd+xc5jhuScfNfUfUdsaFHEDFNE+H8I2hdfQsXVq01aFBijuue8BO+/Ci7PWDlFn2Xvt09Wv+o3XXvQdYZcqQNHrsHa26eppmngKGXevR17/uu8Y+4GztrUlWywQmeL69Zjwkx3qmn/K1sRTSEANiiDBKwIIIIAAAggggAACCCCAAAIIIIAAAgg0RIGyCVCUY3BCOzyfAMU///xjJ556ZqIpp1tvusG23nILf7yETrBTmzryEwv8o/W88tpA63H0cT7l8cccZcce1cOiBfRDBrxsiy1WswBdfS784YIdjeebz+Zr3NinD3mLG6CI9vmQmpXDjzzGXh0w0PbquIddcemFicmFBijeGfWudepygE//9pBB1rx5s8SywpuHH33cTjvr3KQgRn0GKE7vN8wGv/+d3XrC5rbq0ouEzfSvIz6dbMf3GUKAIkmFDwgggAACCCCAAAIIIIAAAggggAACCCBQjQJlEaAo1+CEDohQiJ+tBkVohkjzKxDx+EP32VxzzaWPNvD1wXZo9yP9+/733OH7kfAf/v2jGhK33HaH65/gH98PgmpEDB3+tm+iqEmTBWy/fTrZHHO4HpIjQ2g2ar99O9tF559jWsbq68yuadJx913tql6XROY2mz79Z9thtz1t4sSJ1u3gg+ys00/200Pe4gYotJBeF19gnfbqmLS+aFDh/LPPtAP375KYHgIUhx58oJ19+imJ8XoT+ptYaqklbdDLz/tpat5KaTQcfEBXO/es0/378OeXX36x3ffuYl+OG2fRPjHqM0Bx50tj7bbnP7ZTOq9ju2/cJmyqf73vtU/tpqc/LChAMeDK3Wxu9ZLNgAACCCCAAAIIIIAAAggggAACCCCAAAIINCCBsghQyDP0PVEOzTpF928oxD/xuGMSHSnP/GemffLJZzbinVH2xpA3feF4SPPiM0/YiissHz6aai7s0/Vg+/Cjj/wT/goebLzRhqbgwxdfjrMLL+llrw8e4ud/6/XXbNFWLe21gYPssB5H+3E9jzjcjuzezafViMeffNpOOu1MPy0aWOjl+qu4+bZ+iTT7dt7bFmvd2saM/cSu632Tb2ZKE195/ulEJ9shb9Hl+AX8+ydXHxRhXjVnpT4s5plnHmcy0k485Uz7YeoPfptffu4pa71oqzCr7bT7Xn6bWrttu+GaK2ydtda0Oeec009PF6DQhPMvvNTuvu9+P8/JJxxnHV0TWq1atbKPx461627o42tqaOJdt99s7TfdxM9XnwGK4WMn2Yl937T5G89l9562daLPivGTfraul77qty+fJp46/u9Fm/zjb2lrYviF8AcBBBBAAAEEEEAAAQQQQAABBBBAAAEEEKhggbIJUMhwmKs5UN99TqTuy1CInzo+9fOybdrYLX2ut+WXWzZ1kk2aPNn26LSfr8FQY+K/Iy6/9CLbu+Pu/tPMmTNtr333t/fe/yAxu/py+M7VgAgdZm/cbiO745Y+PiigmVQD47iTTrVnn38xkSb1zXFH97Tjjp5dm0PTQt7iBig279DeByTCNqWu7+H+99j6666TNPryq6+1vrfcnjTu+aces5VXWjFtDQrNKI+jjjvJXnplduF+UuJ/P/S65ELrtOceiUmlClC0ab2gNVtwdhNZiZX9++bCg9pa0wXm8Z+ufGS0PTHkSx+kaLfKojZz1iwbNPpba+k62FbQIZ8ARWgqSoGO9mssZodsv0qNPi1St4HPCCCAAAIIIIAAAggggAACCCCAAAIIIIBApQiUVYCiHNFCIX66bVPQYKUVV7Ddd93Ztuiwme/oOt18Gjd+/Fd2173326A3hiTVuGi7/vp2wnFHWbuUzsDVcXbvvrfaff0f8rURwnLV/NE+e+9pRxx2aKLmQZimjrqvub63L+iPBg3U7JRqYey0w3ZhVv8a8tbvlpv89idNdB8+/exz236X2YX+oXaH5jmke08b9Pobfht222Unu8DVAlFwKQzaxnPPPC3RD0cYr1fVKLmt312mzq9Vy0LDs088YquusrL1f+gRO+vc/5mCPa++8LSfFv6oGatbb7/T11h5Z+QoP3qBBRawTTduZzvvuL3tuvOOYVb/quDOHp1mNy01+u23bMEFmyRN14fQ3FSf66+xHbbbpsb06IhrH3/PHnn9i+ioGu+fOH8Ha9F0dvDir5n/WN9nPrTX3v3WByQ089brLWl7t1/OjrzuddtynSVMAQ0Noz6fYsfcONj22WJ5O2b3Nf04/fl68i92Sf+RvsNtfe5zTHtba7nmesuAAAIIIIAAAggggAACCCCAAAIIIIAAAghUvAABinrYhapR8duM32zRRRe1xo3nzbkFP02fbpMnT/FNJTVpUrOgPXUBf//9t30/aZL98ceftsTii9m88+ZeR+oysn2OBihOO/kEP+vPP/9iE7791hZZZBFr1bKFNWrUKNsifI2P31ywQoMCDYUMM2bM8B5LLrlEjSBNIcupq3knuRoTC80/jzWeZ3ZTVoWud+Y/s+yvv/+Jnb7Q9TE/AggggAACCCCAAAIIIIAAAggggAACCCBQFwIEKOpCuYGtI12AooFlkewggAACCCCAAAIIIIAAAggggAACCCCAAAIIlFiAAEWJgRvi4glQNMS9Sp4QQAABBBBAAAEEEEAAAQQQQAABBBBAAIG6FSBAUbfeDWJtBCgaxG4kEwgggAACCCCAAAIIIIAAAggggAACCCCAQL0KEKCoV/7KXPlXX39j6nOiRYvmtmirlpWZCbYaAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoF4FCFDUKz8rRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgOgUIUFTnfifXCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUqwABinrlZ+UIIIAAAggggAACCCCAAAIIIIAAAggggAACCFSnAAGK6tzv5BoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXoVIEBRr/ysHAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB6hQgQFGd+51cI4AAAggggAACCCCAAAIIIIAAAggggAACCCBQrwIEKOqVn5UjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIFCdAgQoqnO/k2sEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBOpVgABFvfKzcgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEqlOAAEV17ndyjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAvQoQoKhXflaOAAIIIIAAAggggAACCCCAAAIIIIAAAggggEB1ChCgqM79Tq4RQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKhXAQIU9crPyhFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqE4BAhTVud/JNQIIIIAAAggggAACCCCAAAIIIIAAAggggAAC9SpAgKJe+Vk5AggggAACCCCAAAIIIIAAAggggAACCCCAAALVKUCAojr3O7lGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBeBQhQ1Cs/K0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDoFCFBU534n1wgggAACCCCAAAIIIIAAAggggAACCCCAAAII1KsAAYp65WflCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghUpwABiurc7+QaAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIF6FSBAUa/8rBwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeoUIEBRnfudXCOAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUK8CBCjqlZ+VI4AAAggggAACCCCAAAIIIIAAAggggAACCCBQnQIEKKpzv5NrBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTqVYAARb3ys3IEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBKpTgABFde53co0AAggggAACCCCAAAIIIIAAAggggAACCCCAQL0KEKCoV35WjgACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAdQoQoKjO/U6uEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoVwECFPXKz8oRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKhOAQIU1bnfyTUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAvUqQICiXvlZOQIIIIAAAggggAACCCCAAAIIIIAAAggggAAC1SlAgKI69zu5RgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgXgUIUNQrPytHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKA6BQhQVOd+J9cIIIAAAggggAACCCCAAAIIIIAAAggggAACCNSrAAGKeuVn5QgggAACCCCAAAIIIIAAAggggAACCCCAAAIIVKcAAYrq3O/kGgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBehUgQFGv/KwcAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHqFCBAUZ37nVwjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIFCvAgQo6pWflSOAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUJ0CBCiqc7+TawQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE6lWAAEW98rNyBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSqU4AARXXud3KNAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC9ChCgqFd+Vo4AAggggAACCCCAAAIIIIAAAggggAACCCCAQHUKEKCozv1OrhFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqFcBAhT1ys/KEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoTgECFNW538k1AggggAACCCCAAAIIIIAAAggggAACCCCAAAL1KkCAol75WTkCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtUpQICiOvc7uUYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoF4FCFDUKz8rRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgOgUIUFTnfifXCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUqwABinrlZ+UIIIAAAggggAACCCCAAAIIIIAAAggggAACCFSnAAGK6tzv5BoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXoVqJMAxdTpM2zKj7/Yb3/8ZbPqNbusHAEEEEAAAQQQQKAUAo3cQuebd25rsXATa7bQ/EVZxfRff7eJP0znHrIomiykvgRKcW7w+6q+9ibrzVeA4z5fKeZDoG4FSnFu1m0OWBsCCDREgZIGKP7862/7ZtJPNuecjaxF0ya2wHzzNERD8oQAAggggAACCCDgBH797U+b8tMvNnPmLFuyVVObZ+65YrlwDxmLjURlLMC5UcY7h00rmQDHfcloWTACtRIo1rlZq40gMQIIIBARKGmAYtK0X0zR2ZaLNImskrcIIIAAAggggAACDVlgsrsH/HnGH7bcEs1jZZN7yFhsJKoAAc6NCthJbGLRBTjui07KAhEoikBtz82ibAQLQQABBJxAyQIUf7jaE6qSv0zrZkAjgAACCCCAAAIIVJnA+IlTbcH5Gxfc3JOarvl5xu/cQ1bZ8VJN2Y17bvD7qpqOkoaXV477hrdPyVHDEIh7bjaM3JMLBBAoF4GSBSg++WqSLdFyYZp1Kpc9zXYggAACCCCAAAJ1KKDmAyZM/tFWWrpVQWvlHrIgLmauQAHOjQrcaWxyrQU47mtNyAIQKIlA3HOzJBvDQhFAoGoFShagGP3pBFt7xSWqFpaMI4AAAggggAAC1S4Q534wTppqdyb/lScQ5ziPk6byZNjihiwQ5xiOk6YhG5I3BEohwHlWClWWiQAChQiULEDxrgtQrEOAopB9wbwIIIAAAggggECDEohzPxgnTYNCIzNVIRDnOI+TpiowyWTFCMQ5huOkqRgQNhSBMhHgPCuTHcFmIFDFAgQoqnjnk3UEEEAAAQQQQKCUAnF+8MZJU8o8sGwESiEQ5ziPk6YU284yEYgrEOcYjpMm7vaRDoFqFeA8q9Y9T74RKB8BAhTlsy/YEgQQQAABBBBAoEEJxPnBGydNg0IjM1UhEOc4j5OmKjDJZMUIxDmG46SpGBA2FIEyEeA8K5MdwWYgUMUCBCiqeOeTdQQQQAABBBBAoJQCcX7wxklTyjywbARKIRDnOI+TphTbzjIRiCsQ5xiOkybu9pEOgWoV4Dyr1j1PvhEoHwECFOWzL9gSBBBAAAEEEECg1gJjPx/vl/Hki4P865jPxvnXVVZoYysvv4x/v/v2m/vXUv+J84M3TppS54PlI1BsgTjHeZw0xd5ulodAbQTiHMNx0tRmG0mLQDUKcJ5V414nzwiUlwABivLaH2wNAggggAACCCAQS0CBCQUlQkAi10IUpCh1oCLOD944aXLllekIlJtAnOM8TppyyzfbU90CcY7hOGmqW5ncI1C4AOdZ4WakQACB4goQoCiuJ0tDAAEEEEAAAQTqXEDBiV6970qsV7UlNEQDECFwEWpW+Bn+nSc6XxhfjNc4P3jjpCnGtrIMBOpSIM5xHidNXeaJdSGQSyDOMRwnTa7tYDoCCCQLcJ4le/AJAQTqXqDBBChCcwbhx3f4YR6aMqh7WtaIAAIIIIAAAgiUXkABhxB00P2Pgg257n+iabSFSlOKIEWcH7xx0pRemTUgUFyBOMd5nDTF3WqWhkDtBOIcw3HS1G4rSY1A9QlwnlXfPifHCJSbQMUHKHI1ZxDaWy7Fj+5y25lsDwIIIIAAAghUl0A00KB7nlN7HlgQwOV97k40CXXaUQflDGwUtHA3c5wfvHHSFLpdzI9AfQvEOc7jpKnvfLJ+BKICcY7hOGmi6+Q9AgjkFuA8y23EHAggUFqBig1QZApMhJoToSZFlK8UP7yjy+c9AggggAACCCBQlwKHnniBX12c4ETYzlIGKeL84I2TJuSFVwQqRSDOcR4nTaV4sJ3VIRDnGI6TJpvmdTfeZNfd2CfbLBmnHXd0Tzvu6CMzTmcCApUqUOzzrFId2G4EEKg/gYoMUESfFhSdakeEmhJRSgUxFKgIzR6EeaupNsXPP/9sY8aMsdatW9tSSy0V5eE9AnkL/P777/bpp5/aggsuaG3atMk7HTOWv8D48eNt+vTptsIKK9h8881X8AZ/+eWX9ssvv9hKK61k8847b8HpSYAAAvEFovdD/a4+N/6CXMoQpKhNoCPdBsT5wRsnTbp1xx035M2h9sDDj9hRPbrbKiuvFHcxpCuywNSp0+y2O++yldz31R677VLkpdf94uIc53HSFJKzSZMn29dff2NLLrmkLdqqZSFJmTeLANeU/3DiHMNx0vy3xprvyiFA8c8///hzbeqPP9oKyy3nfmM1qbmhFTDmjz/+sAnffmdTp061NsssYy1aNK+ArS7eJt540y2mfXnsUT2Kt9B6WlKxz7N6ygarRQCBChaouACFgg6hE0j9iFawIVc7y6m1LSq1JsWvv/5qs2bNsiZN8r+B+eijj2zSpEk255xzWvv27Sv4UG14m659+cknn9jE776zWS57G264oc0///xlmdGffvrJRo0a5bdP28nQcATefvtt07Vl7bXXtkUWWaTgjA0bNsx+++03W3/99X0Aq+AFFJjg/ffftx9++MGWX355H3gdMmRIjevbX3/9ZfrBNM888/j/Ba6i6LN/++23/lyXr5zffPNN+/PPP61t27a2wAILFH19LLB6BELtCd0LZXv4IsyXLYgRvb8q5n1SnB+8cdJk2utjxn5iO+2+V6bJSeOPPvIIO/G4o63LgYfasOFv24Fd97PzzzkjaR4+ZBaYMWOG/eCCCAs3bVqSwrbPv/jStt1pN9thu22tz/VXZ96QCpkS5ziPkyYfjrvuvd9u7HOL238/JGZv3qy5L3Q7oOu+iXF6U6z9PG3aNPvl1xk+EKLv6/oYOu13oL0zcpRdeP451nXfzrXehEx54pryH22cYzhOmv/WmP6drvFxh402bBs3qf3mHvq66JLL7alnn/P332FBy7oHwC698HzbsO36YVTRXzMdn3FWNHPmP3bjTTfbbXfclZQPPRTZdd9OdmT3w2yOOeaIs+iipZk5c6Z9+91Ea9x4XmvZokXRlhtd0Jrrt/P5/2LM+9HRFfm+FOdZRUKw0QggUG8CFRegCD+y4zzhV6qnA+tq7w0aONAXZG+++ebWqFGjvFarGhRjx461RRddlBoUeYnVzUzaL++9956pIDUMBCiCBK91KVBpAYoPPvjApkyZ4mt8LLbYYvbGG2/4H0AdOnRIsH3xxRf21VdfWatWrWy11VZLjK+vN9+5IKSuwyFAoaCKzv1yPufry4r15i9QSO0JzasARK7+KcJ9UkMKUHzx5Tg7+fSzk2DfHT3af1btiMaN/6s51tE9la/CWD3t/PBjT1iPww+lBkWSXPYPDz3ymJ1+9nl20vHH+Non2ecufCoBinh9uuSSPud/F9l9/R/0syn4s8zSS9mX48bbS6+86sftv9++dsG5ZyUWU6z9fOKpZ9gTTz1jD957l7XdYL3E8uvqzWeff2Hb7by7X93q7l7h6cdmG9Rm/ZnyxDXlP9U4haBx0vy3xvJ5N8M90LNP14PtQ/cAoQKAm3fYzBZZeGF77/0P7O133vEbet1Vl9uuO+9Yko3OdHzGWdklva70wQmlVXCl/aab2Pivv7ZBr7/hF7f1llvYtVdeVq8P4uj7f5sdd7WN221k9915W5xs5kxDgCInETMggAACeQtUVIAi+mM8zo/nUj0dmLd2LWeME6Co5SpJXgIBFZyqAFWDnp7W0+eqGlrOhZXUoCjBgVAmi6y0AEWoFbbyyiubAhQDXeA2tYZYuQUoVItN2928eXNbc8017a233vI1PDbeeGOaxSqT86ASNyPcE+WqPVFI3kKAIs5DIJnWE6dgKU6aTOtPNz48NT3w5edt6aWWTDcL42IIFKvgOtOqCVAUP0ChZrM22KSDvx999IF7baUVV0jwR2sfvfX6a4kmn4q1n4tZWJrY6ALeXHN9b7uhT99Eiheeedw3H5YYEeNNfecpxibXeZI41/c4aeo8Y3ms8JXXBlr3nsfYWmuuYXfe1tfXNgvJHnn8STv1jLN9Yf+rLzwdRhf1tVjHZ7gGaOOeevRBW2P1/x4G0nW6e89jXZBznJ160gk+0F/UTBSwMAIUBWC5WRvKeVZYrpkbAQTKSaCiAhSh9kRtfoyHH/S1/fH9/fff24+uzcgllljCFzRNnDjRN9nRrFkzW8a1v6hhsmvHVeP//vtva+qqu6s913RVmFX4qwIsteOuJ2xbtmyZ9LSBlqN2HfUUrgZVnVQNCvUpoSaBxrkbADVnoiZPNI+aP1Fb8KuuuqqvcvjNN9/Ywu7pDNWiiA5ar5at9udVUK5t17pTBxWga7maT1U11Q+B1j3XXHOlzpr0WYXu6rdA26KCxAkTJngzPdUsi2IZFrJ9mlfrVRVX5VnbIpvaDlquCkW1b1QImW14xz0h84urQbGsa2906aWXtsGDB/tjpNAAhZ4i139VtW/cuLE/xhZffPGk2jVRYz2xrc+aX8eN9mG65mXU34SOWxnpeNUxruMtnyaeovtc54GOPR27Om5k3cJVr1VhcupQaF7CftRxv+666yaqEBe6HKWPe+4WctxF8xs10v4K54XOE5lpn2hfjXPntWraaN/qvEx3bqqZIOVZ57yaDJOv/qe7zmi65tMxoOXrmqT16XjM1MRTrmuT8lXXTTypJoKuR7q+6ZqmAMXcc89tm266qTdQs2m6Nmv/yFTXNfWtoXMtap/umqT8aB4ds7omy0mesk9nmu+8clfTVKFGRzDbbLPNcl5HtU0MCKQTCMGEfO6JdO+jIVszUJoeHuSo7T2SlhWGOD9446QJ68vnNVeA4p1R79pI93/brbf0bWprmXrSe7K73h7YtYsNfvMte2vY2za3uw/axs2z/rrr+GvHCy+9YiPeGem+5+ay9dZd27bbZmv3PrmJCzU58d4HH9obg9+06e4av/FGba2tayJvoYUWzGfT/TzfT5psw98e4Z64Hemewm3mCodW9U+vRq9TYXv19LueaB3+9jv+WnZk926J9egJ8reGDrex7l5NeWjnmi1ZbLHWienhje5PX3j5VddU3afuXvcvW3WVlWyTjdvZUksu4WdRnu64614b4ZrK0VP3m22ysbXfbBNbwn3H7bTDdmEx/v5j+IiR3m8hdy+5SbsNbe211vTX8MRM/76ZMuUHP5+WqaY5dtxhW5tzjjlp4unTCbbOirPdU83ifH7uhZfs6ONPst122dk/7Zy6jLvv7W+D3hhs3Q450DZqu0Fe+/mrr79xx9xg++Szz3yztKutsrJt0aF9otmvCa7ZQ6334UefsM8+/9y6dO5kbdosbRusv56tu/ZaiU1Qfxg6PnWcr7jC8v74VAAl31rkiQWleaPmadq138o3aXVVr0vspNPOtJ5HHG4nn3BsmrnN/0YbMfJdG+bOu7//+ts9bLCabdqunbvnb+bnz5Wn1GvKgIGv22fud4OeMl9u2TZJ6/zW3eM8+/yL/vxSjZYwlNIjrKMuXuNc3+OkqYu8FLqOcy+42O69/wHflNM+nfaqkVw10CZPnmKXX3Jh4tjSTNOn/2wj3x3tjz93s2sbbrC+rbfeOtZ0oYUSy9A965333Oevl6qZ8cqrA2zUu+/Zllt0sNVWXTmvcy6xsCxvdL1fcfV1/By9r7vadtz+v2M0JPvgo49ttz07+1oiQ9941f/207mjmiI6pvVb5I0hb9lE93rJBeclfhvme4zr/H11wED76OMx7n59sqtVvZz/DtP3SRiefvZ5e99916oJKv0+P+TArtbI/dO1LFxDCvk+Vu0XfY/q+38u91t2003a+e9dalAEcV4RQACB2gtUZIAiTu2JQBV+fOtzbZYT2kFX4bYKw6KDCv5UIKaCz+igAn0VQkd/QI52zQyoIDh1UIe1KsTX8OGHH/pAQuo8q6++uv+xGQq4FThQgaaGUGCnG4CPP/440bRIWEam9Wrb1UZ6aDNShZ9avwo3o4MKmVUwnK0/DBWeqq11zavl6cZJgwr7tO3FMCxk+1RY/pn7sZQ6hGZXUsfn81kFmeokOLiHp7qzpVWnxDII/U2E/VdIgOLdd9+tcdxpnSrcXm+99RI3etmMNf9aa63lC3D1XoM81YRO6qCgiwpZtc3azkxD2Oc61nUMqpA4Omi8nhqPBikKzUv0ONey1beKllfocmpz7hZy3EXzr/fBSNssDwVJooMK3seOHeMKu5LPuTau+rT+h0EBw1EjR/pm38I4vTZy/9dx56bO5TDo/B0+fHja/aFtUFAqtQ+KTNeI6LVJyw+F7bn6oNA6cg36waCgQrZB57DOZdVE0HGpmmXzuDQ6rvRDQ00+pQ5apqZH7dNdk5R+6NChiWtVWI5M10vpY6OQefUdoeNTQRFdI0aMGOELOwppri9sC68IBIHw0Eaue5nofU+2Pii03ELmDduR6zVOwVKcNLm2Izo9V4Di+t597dobettNN1xr22+7tU+66577+GY5Dtp/P1N7/dFBBZwqcFWBSXTovPeedtlF/0uM+v33P+yAQw/3bd4nRro3+u5+5rGHXNB46ejotO9feuU163H0cTWmrb/eur5fhtDOdtjeTnt1dAXBj/v5Nc/D99/t31982ZV2u+twOnXoe+N1LrCyVWL0x2PGWrceR9e4p9U297u5j2+aR/labZ0NEmnCm2izGuO/+tp26dgpqa1yzbeOu+e8/67bfRvhId27o9+3PffZL3xMvPZwwZW+t9xekj4oFPBTAC9Tv3Y6NxToy9VMWmJj83gT5ziPkybbprztAkb77H+QreAecnrikf42v/v9kmnIZz+rXf3jTzqtxiK0/Lv73WKtF21lb741zPY/5LAa8yg4oCCBhpdd4eoRR9UMFhx68IF29umnJNKqMP/gw470y73j1puS7i8TM6V58+ZQtw0HH2Y6R887+wxbfZ22SYWp0STjx39lu7jCVj3MER3URM9tfW/wQbZceUq9pjzw0KN25rnnW2p+tPwb+txs11x/Y9LT5/l6RLcvn/fVetwX0kl2at8Cy60yuxC8/939rNC+KHr3vcWuuvYG03VZ3w2hoDzbvhrnfjfqep56/Oka/PhD97tzdzmfXEGMdTbcxJ/L+n0emjI87uieLgi+XtZzrpDzKDSNplogTzzcP+Omv/DSy76PGQX6FUjpddW1dvOtt9teHfewRx9/IpHu49Ej/L1/vsf4T+73T4+jj/f9RCUW8u+bc848zQUi9vefwvd86jyffjjaXSfmcL998v8+nubu43feo1ON78H9XL81Tz79rN83qcdJ6noL/Vwp52ah+WJ+BBBAIJtAxQQoQs0HZSbXD+xsGda0fH/UZ1tOKPjVPHrCXP9VCPWpe3o3FCsqWq+ndlUwp0JfPW277LLLJmpYhII2FVSuuOKKvmaCajSMc09OawiFx6qBocL94a4zWi1bBcShIE+FbKGAW2n0RLYKv1U4rJuTdAEKPWGsTlvnmKORLbfc8j54oYJ2rVeFbnqyXrUxNITOXBUs0barMHXs2LGmJ6tVAKogRaYhFAZqutbVps2yPo0KC/VUeDEM890+5U99Pmho06aNN1JQQRbaL9E8+5my/FFhr/wUaFAeNSjopH0t/xDcybKIpElh/+UboNAxoqCRjgEVdmo/KH+qwaH9p2NJx6OGqLGe3lY+dYOroIr2ZSi41bwKJqgQW/nTU+fhiXPlU/tbQ74BCj+z+6Mn9LVeWet417GsAIMKszXEzYuCC8qj3JX/uMuJe+7me9z5TKb8iZ4XCg6ojwTlY8yYMb7QWrPrmqBAhbx1Xuppfp1DHTps7pema8rw4cN8ECPsK0342rX9qkCS5m3bdkMfKNX40CySjk2d2/ILx0wIPkYDFPlem7TsfAIUOubUlFSuQce0Cu2zDbqmqZk0uenHmQr+dT1ZZZVVfDLZ6JhVLQvZ6HyQp4yj9umuSaG5KwWYFYjRdVTrUkBKy2jnnpbUOA2FzKtzS98Buo4qSPG5e2JUgekNNqhZoOcXzh8E8hDI916mkKBDIfPmsYl+ljgFqnHS5Ls9mi8UXGRq4im1MFFpQoG/rjs3XnuV75viuRdetAsv6aXJflDh/rrrrOWfWg1BhMEDXrLF3Xmva+1xruD2meee9zUreh5xmL9PU+GGmplZ1t2bPOkKiLM9+DFq9Hu21z5d/brUqW+HzTa1r13A9vY77rYBg15Pego+bK9mVgGsnn7XtqvWw/0PPmxnn3eBX6f6Flh22WX8k6Fq/kPD0489bKuvNvua2tUV4r7lCnO7HXyQddlnb3+f8fhTT/tAgfrweO7JR30a1XjQ+Esvv8qOOOxQV/B6wOzvaFcwpQKlPTt39U1+HHNUD9vdPa2v74UbbrrFPeX7WtJ26wnanXfv5J9sP+yQg2zvvfawv/782/o/9LDd/8BDfl3F7iQ71EbSwtMF/KLnRUOrXaQOeztstYP3VhChm9tvm7vaDgokpBuy7WfVtNl48238vr380otsM1fLZoL7LlZQScG77t0OsdNPOdHfC6rA75zzL/I1bvpcf42t754G17E/n/s+D09fa/1XX36p7zT4yy/H25nn/c/d53zjnrg+3/btPPvpc50/J5xyut/Ul597ypZfbtl0m11j3ClnnOMLSe/pd6t/Ejr1c0ig7dyjUxe/3kMOOsA6u4Llv/+eaU+7QMwtt9+RCGro/M6Wp9Rryo/uvnq9jTb7N/1rvsA0rHPrHXb150q4dhTiEZaRz2s1H/f1FaD44MOPbLe99vG7Z8vNO7jjeG9/bc5Ug05NsCk4pocedV3tuPuu/kkk1ZDTeaWyBn1vKDAdAhRauMYrkLfySiv64MDCCzfNenwWch4989wLduyJp5gK5y9y30P5DiFAofkV6N9l5x39tumc/dDVhFCNCw25zvk+N99qV15zve9X4oxTT7JmiyxsQ12NxpNPP8unHzlssG86S987n3zymQ/AKhB+c+9rrZH716JF84K+j1Vb45DDe/iaE7o2Ht3jcF+7+rWBg5K+/4sZoKikc9Oj8wcBBBAoloC7oSrJMOqTb4q63CdeGDjrkBP+N6tX77tqvVwtQ8sa89m42Mtyhd2zBgwYMMs1e5O0DFcg7Me7Qruk8a5A2I93TyUnxg8aNNCPc4W3iXF64wqaa8yr8QPd+ga4/65AXR8Tg3ti2I9XutTB3dD4aa4QLzFp4MDZ63UFmYlxeuMK4WYNGjRoVth2V9jt0w5w69T7MOi9K/yb5QqFw6i0r64APJHePe1dY57aGhayfa4ZG78trrA3aTtcwbsf//rrryeNT/fBFW7Ock1WeaMB/+6LkSNHznIFjelmz3tc2H/ux3peaXS8uADJrNTjxhUq+7zINQzB2BWmhlH+VdusPOiYCkNI74IUYZR/1fHm2sz384djI2mGyIfoPtcxHx2Uv3AMy1JDnLy4p8+ji429nLjnbiHHXY0NdSOiRq65rcQsMtE+0X8XPEiM15twrXCF7358uJ6k7itNlI+W4QrB/bz6E9xTl+sCH37e1HWG9aUeY+muTa7GgV9GunM8bIC2W8dgrv+p+ySkL/RVeVeeXCAvKWnUPnV7lVel0bXABdKS0gVTV3PDjy9k3qQF8QGBIgrkey+jex3d8+h/rqGY91phXXHuB+OkCevL53XvLgfMWnblNWa5p/rTzu4Kr/x012RTYvouHTv7ca7plcQ4venYuasff899/ZPGn3bWuX78K68N8ONdszf+84677Tnrt99mX8tDAteJt5/mCnHDqLSvp555jp/PNbuTNN0VTM3S9m21/S6zfv33eyVsr+vsO2lefXCFyH45qfnXvHJxQYZEmsFD3pql/9F7T10jN9h4cz/vTz/9d3/34MOP+nE33nRzIr3ePP7k0378WeddkDTeNZnht1nrdAW2ftoDDz3i53XNDiXN6wqFZx18+JF+2pHHnJA0rbYfoudI6u+DbNNqu944x3mcNLm207UZn9if2hf6r2Pkwksvn+WaSKmRPNN+1nH4+uAhNdJ88ulnfpk6JqODCyz48a7ZlOjoWeHceeqZ55LGuzbt/fw6h8LggiKzLr7sill9br4tjMr5qjTK4xrrbZT4vtd2a5wL0iWlf+yJp/z4o447Kekc0Eyat8M2O8xyzZAl0mTKU7prio5xrTOa3jW35scdcMjhiWUW4pFIlMebbMd2tml5LDrrLHGO4Thpsm6Emzh02PC8/qcuR/tM/5U+zvD8iy/79GE5etW5cdsdd7vfd98lLTIcfy64nTReH8L3hs5HDboWh2WOGzfej0v9k+n4LOQ8uuzKa/x6Ur+HUteV+jmk03anDoUc47om6XyNfvdoececcIrfrgGD3kgsXtc2mex3ULfEOL0p5PvY1Rjxy9C5nvq7qHffW/00raOYQ7bzL9u02m5DKc6z2m4T6RFAoLoEKq4GRTGeXApR6Xzabc4UCHKBCP+ksp5G1lPpYXAFeP6J29CURxgfnvBeyD1JpiZ49JSvmhLRoCeXo4OettWT/XpSV+2qhyFTJ9nhCfyNNtoo8cR0SJNag0LL1hPPenJb7Z/nGsKy9fSx8qnaGXpaOZ8hPK2sJ4/VDE/qUFtDLS/f7Xv99UH+aXM1LaW8Rwc1ZaMhNBUUnRbe6+n20GSXnrxu3XoxXxNDLrUdQh7yrUERXZ+OI1fo6mvYaPv0pHc4xjRfMFZNBtWAiQ4uGOWfIAnt4LvCYV9TYqWVVvI1QaLz6ol0VyheUA2KDh061KhNEp46D82TRdeRb16W+7fvjmja6Pt8lxP33NW6wj4r9nkR9skWW2wRzVKiU+W2bdsmag2oxla6faXjQMdrqOGkPkdUMybTOe8K5H0tolCDotBrk64nuq7kauIpKUMl/hCuw6HPh7C6bNek0AScahWF2hghnaapZkpYXiHzhmXwikCxBcK9TK77oujT37lqoIbaqrW5P0rNZ5zaEHHSpK432+fa1KAY+sZr1srdC4VBbde7Anjf1IWavAjDTe7p1iuuvtauvaqX7bbzTu5J8dlNM6nWg54ijw5PPvOcb/ri+GOOsmNdDYNMQ3iyetTwIUltj6ebP9SgiHZurPl++GGqtd10dk21UPshpJ/4/SQ7tPuR7mn2/5qCCtP0FOl37vvFFQr5J+T3PeBgP+mN1170fU3oQ+g49aTjj7GjenT30/XnosuusH533u2brNnCtY0eHfTk+ocffWT3u+ZS1AdGaKP99r69TW2nRwe1KX7cSaeWpImn6HmidaomhQYXCPSv+pOudkViYow3cY7zOGny2TTVcB385lB76plnXf8ob/kaFSHdkd0Ps1NO/K9ZsUz7OcyvV9WmmOK+N3/55Vff14qaU1KTSG+/OVCT/ZCpw95NttjW33OrOTLV+okOO+0+u+bER++OSGoWLDpPrvd6+lzrjjavpPyv62o0qGbPeyPeStRk+t9Fl/om3dIdj+nWkylPqTUolFb9UHTrcZSvnXTW6Sf7xd3oahVdfd0N/inyPXbbxY8rpUe1H/ceuMA/tWniKazq559/sRddfz3qa0R9BEWHW2+6wfdNonEXXHyZ71ei3y03uX5ckq+dQ9z5qiYDQ00GFyD0TTwt26aNZepkO9PxGV1/rvehJsQF551t+3eZXRskVxpND+lUAzHaN5GmxT3GVeNu2rQfXVNSv/raDOrj4porLrPdd91Zi7Uvvhxn2+y4q69tcd+dt/lx+lPI93Hop+eIw7vZaScdn1iG3nz33UTbdMvZfXAUswaFll0p56a2lQEBBBAolkDFBCiiF+lcP7Bz4YRmEWrzAzwU/Kb2ORAKxtR0jJoWCYMKjtXERyg8DoGDMD3Ta7SgMleAQsGM0PxIWF5YT+hnIRReqhp1Ps2LqJ17FeC7p8bDIn1Bp5oyUkFxtiEUBmYqGK2todadz/b5H12uI+pcQyigTTdfsNe0dIXr6dLkOy4UdhcSoFAAS/tSzVOlDuEY0/hMxpqmtvq1X0OAYsiQIT7Qka6gWc0BudoYeQcoMgWlQlND0Sa1ipEX5ae2y8n33NW68jnuNF+6Idt5kW+AItu+0o9rBYLCeZfrnA8BhnD8h2tGum2PjgvXppA+3XETnb8u34d9GQIKYd3Z7NVsmgLJ2YbQxFkh82ZbHtMQqI1AuC8qZoCiGPdHqXmKU6AaJ03qerN9rk2A4v13hiYVmoYARbRZJK375tvusF5XXp0IUOTTpMhOO2zvmo+6Mu2mqyme0E5+tJA37cxuZAhQpG5vaHs/U7owPhS2qPPf8y+81IYOf9sX3obp4TWfAEXwDmnSvaqpKXXoHeYd8OKzNfrk+PCjMS5fnUoSoNA2hXMq3fYVOzihdcQ5zuOkSZefXONU0Ke+VhR80xDtmyRbgEKFpu5JaR90Sl2Hgg06HsOQrrA0HOdhnkyvCq6pibE4Q5cDD/Xt16sgt/2mmyQWcfd99/v36lPGN6XjPmU7HhMJI2/S5UmT0wUoQlCk8byNbdjg1/xDPdvtvIfvOPyDkcP8PXddeHDcR3ZgHm+LEaCIrka/xV5zwSo1GfbOyFF+0rDBA3yzTdmOP12b22+1vYW+IEKAInyOriO8z3R8hun5vIZAcVcXnLjQBSnyHUKA4o5b+9rm7f97ALPQY1zz97riGh/c+WHqDzVWn0+AopDv42tcf1Q3uH6p1Gzd3h13T1qfe67Zll91LT8ufGcmzVDLD5VwbtYyiyRHAAEEkgQqMkBRmx8J0Qt9bQIdmQp+Q8FYrgBFKPDV3lBb56mDbloVbNBywhAKyVM7Vg0F3PkEKNTuuQIO0b4HwvKzverpYbWDr+0OnV2rfXf1k5FpyFYYqDS1NYyuN9v26eZBBb8aMlmrMFeFmZlqRMhNBeB6UlyD5lchu9qUV2F8bYaw//INUKjTcxUiq71+BSMUfNKxokJzjY8boAgFzaHz4WieQiF3KKCNTou+D/u8kRu5eUotAM2nY0+WoaPlYuWlGMvJ99yN5jfbcRedL/o+GIUAQnRavgGKbPsqXFvUj4JqVYXPqvmkPhRShxDsCAGKML/my3S+RK9NYVsqPUDhmm+zCRMm+GuA+l+JDiEQqOO/hWvnt5B5o8vhPQLFFIjez2S7L9J8rkkAv2o9mJFpCLUnNL0290epy49ToBonTep6s30OhT5x+qBILfDPN0DhmsOw8y+6xHVWur51O+TApM3TPZ+uM0sssbitu3b6+yrdy6y1wcY+SPDph+/mvPfIFKAIhfzaABU6pw4KcjdtupBttcXmfl3tOmztX9Xh9Zau/W1t40KuLyk9uashnwCFOjR9yT0trBoiqYXKWp8KrldecQUfkOh+1HG+Xwp1vhqtkaJ1hY6Ii90HhZYdhuh5FcZlO7/CPHFe4xzncdLE2baQJhzf0doGmQIUKlzVuaVh5x23t002budqTTRzfY81sa4HdfPt4r858OWwaF+LQbUZHrz3Lt/ZuiboPFhhtdk1yzMdnzpeVNsmU7v9iRWkeaPOgDfbcrs0U/4bFa1BdPTxJ5v6mnn0wfsynpv/pbS0edL0dAEKjb+k15V22x132cP97/Hn1fa77OE7UO518QWaXHIPvxL3h+M+SOR+LXaAIqxRgYr2W+/gH0ALAcFw7Ux3PQzX8q233MJU66KuAhSfuD4Fd9ilo6lfh8cevDdsfo1XBRJmuu82na/6zZopQFHoOX9I956+1on6zNlx+219J+Hq3/DOu+811/RTXjUoCvk+VqBWNanOOu2UGt/d0eBKKQIUQi33c7PGjmcEAgggUBuBUrVoVYo27EJ7y2ojOe5QrPaVQ9v+6gsgOoS2z11hdnS0769B7ZurLwQN7seob+9cbb3nO4R25N0NTFKS0IeBK/hMGq8PrmDZryf0QaG0A9x2aFmpy9FnV0A3yxVy11hOdERYppbhbiqik5Leh/betX3phtoaplumxqXbPtepsc+32tiszaA+K9ROvgz1XwZq5979wI692LD/8l1GmF/9B0QHV8DutykcY5qWyVjT1Na+8uACTvro86HP6osidXABAD+vK4xOnZT0OexzLSedtQvG+OW4YIpPV6y8FGM5+Z67SRmOfEh33EUmJ94GI21z6hD6h0kdn3r8upoo3jHdvgrHgY5LDeFao2Wnnq9hmvaXC0wkzZ/vtSmfPih0bGsduf67AI3fhtr+CfsyGITlZbMP/XFEz5+QLvW1kHlT0/IZgWIKhPsivdZ2CP1U1OYeK902xLkfjJMm3bozjatNHxSp323uiVTfBvUHH36ctLq+t/bz45985lk//u0R7/jPZ5x9ftJ8hXwIfTCkrkvL0PJfGzAo0aZ+6IMidXt1HVR72epjINegtrw1b7Q9fKXRd0nog+Ibd98YhtA3gdrbjw69+97il+MKo6Oj0753Bbl+XteRd43pwbTYfVCkrijaxrfel2qIc5zHSZNt+9UuvI4VHT/pBu0HHQPqjyIMmfZzaGNe7bJHh3Hjv/LLSD3mQnv4b6W05a/24rXO6LEVXV5t3odj6LwLLpnlCnST/qsfFK1X/7/+ZvZxffNts8/ju+65v8Zq1beGzrnJk6ckpmXKU7o+KJRI57LWp3401HeL3telR2LD3ZtqOu6vvaFPYl+Hfa5XjU8duhxwSNL4MH+0DwrNE/2cuozweV833+5771uj/4QwPfSj4Jog8qPUt4rW1//BR8IsidfQZ5CumRpCHxRafqYh0/GZaf504/XbIRi8/OqAdLPM+t79Rg3zhP6FwvVh4OuDa6TJ95wP56j6j0n9bgt9ckS/Z0IfFJ33OyhpnYV8H7vagz4v2jepw8h3RyfymTqtmJ/L+dwsZj5ZFgIIIGClIij2DbS2s7YX52j62v4Az1TwGwrGcgUolB/35LIvsFNhY3RQR6wqHEztDFjjVMCnzqyjQyiczSdAoXShsDO1Q1rXBJVffiigU0fKKsRWAaRuRsKgjn21HdECTBVAKn200DxbYaCWVVvDQrZPBZXaZnX2HA3MqLMr5VH/o+NDXjO9unb6Z6nQPhQoa9nqsDjfIEN0uWH/5Zs2zK+OysPganbMCuPD/tO0TMaalhqgULBN+VDQJXrT5/o68OM1rZAAhY6zaIF46IQ7GtgK21zbvBRjOfmeu4Ucd4WeF+F40v6JDuGcDftFQcR0+0rrC8uImobAkI6H6KBjRcvR/xCg0PRCrk35BCh0fGobcv3XsooxuP5SfJ50vkeHbNckXT/loP8KNkUHXdt0vQsBoULmjS6H9wgUWyB6X1ObIEUIdOTTkXaheYhzPxgnTSHbVR8BCt1vhAIb18xSYnP1PXnN9b19wEAdRGcbXL8WfsCTkNwAAEAASURBVBkqUNZ9SBjUibWWrU48w/1apgCF0qiwRvMrcBAdVNiqQmR1xq1BnzWfOgKPDu4Jej9e06KFyKEz7EO794zO7gtcNa+W7fq5SExTZ+GH9Tjaj//o4zF+fOisWAVQ0Q5jQ0elWk6pAxTaEJ1b+l/KIc5xHidNtjyoI2qZ6pyYMuWHpFlVyKgOqTU9WqiYaT9fdsXVft6rr7sxsRwdj6Fz99QARegY97Z+yQHWUJivgFx4iEYLVNBg25129//DeJ0H6lw4WiiZWHnKG51r6khe+RkzNvlBsjDrFVdf56eHTrfVgbXm1/E4LtLxsI5jjdO0aGfzufL0wkuvhFUlXpUnBfy0bXr923UIHx0K8Yimi/O+Wo77fAMU0flC8EL7XP9DQELBidRxmezPOu8CP68CZKm/20e/935iOaED6GHDR/hxqddDHX8hSOyaVfOryydAken4LOQ80sruua9/YrvUsXt0cP0czQrB9Asu6ZWYlC1Ake8xPs39Jg3WU105RRje//CjxPkYvRbomhHmD9cMpSnk+zgERbScaAfcM9xvm3B91LRSD+V6bpY63ywfAQSqS6BimngKtUTy7RQyzB99rU3a6HL0vhjNE7nCRnMFhAoS+SaDVD3R3SAkmhFabbXVfLNDYd2hKRU1n7OA60NC/V8ojSv0M/cDwHeonasPCi1LneaqjXqtV80TqT8KV7Dpl6Hp6667ru9gV9O1bFdw7+dTU0Ia1KyNpkXbdw8dLKsKp5qg0uBuvMwVrCbawvcjI39qa1jI9ikP8tM2zTHHHN7N/VAxd4Pit6h169Y1OsaNbGrGt1rG119/7f9rH6T2SZIxYWRC2H/5NvEU+nHQcdDE7X/lR807uZ1is9y4uE08aZMS+9G9n99VyZWxjhd1DO5+1+XdB4W2TduidE2azD6uZa9h1VVXtUUXXdS/L1ZeirEcV/PAd3Cfq3m2Qo67hGee50W+TTwJT33aqG8bWWtfaZjhzmO5N2/e3NRUVxhczR9713WCrmlqWkpNFemc13kR9m1o4klpCrk2hetSOTXxFG2mStdENcukczPXNckF6XxTbjJQM3hy0jVC57aubWoyS01laShkXp+APwiUSCBa/T9XfxTpNiHcG2labfrmSrdsjYvTJE2cNJnWn258fTTxpO0IneLqvTrLXmGF5W3EOyNN7f2rA+EXnn7MXb+baXLaQdeiw3se65u30Pzbbr2lfeeadgydrF59+aUWOtbN1MSTFvzNhG+tY6f9fEfIakZJTdp89dXX9uqAgX69ockdfReoWSkNas5jww3WM1ew65vRULMd+h6JNvH0wUcf2257dvbza5lqiqnbwQf4z67g12665Tbf3MeWm3ewhRduaq+8NtA3Z7K5azqq3829/XVWM599/oV2/wMP+XnVMaw66H7hpZd9E0EugFyyPij8htbhnzjHeZw02bKkJkpcwMr3GaF9qmNqKdd8qQs8+Y5ktY+17++76zab79/vv0z7efjb71joPH2brbeypZda0jXLNdRcIaE/VnSvHW3i6Q7XJMuFl/Tym6f5u3Tay3eM7h4mMFfA6fuJWMotQ/1E/PHHn/bo40/4eU87+UQ74rBD/PvQga0+pOu3xM/07x9XAGwdO+/nmoRZ3l56dvayotP1Xse3OuLWege+9Jw/JkPH1ZqupqvmnHMuf67IpvPee9plF/1Pk/yQKU+ZmnhSomiaY4860jWF1nP2wv79W4hHUsIy/RDnGI6TJlf2h7l+ddING7nmw6LDfq7PEvXBo+G4o3vadTf28e/7393Pvb8padpxRx/pp2X6o+aR9tpnf38+6BjbbOON/TXf1cbxzeApXbQ5NX12AWy7oU/fxPVQ41zA0C8j2nFzPk08RY+16DlXyHmk9WsIHcjr/equzGKzTdqZC2ray68O8Num8+zufrdY60VbaZaMTTxpWiHHeOhDRn7bbLWlb4JanY2H76RoHxT6nbPOhpsmeV/0v3P8eV3I93Hod0Pbqu/uli1b2sBBszs3D/1glKqJJ62zroZSnGd1te2sBwEEGoZAxQUoUn+I68f0yssvk3VvKI3aVnaRZz9fMX6AZypcd0/u2vjx433fEek6yW7atKkPAIQNVnv8as/8N1cIrMJDDSpQU1oFAKKDCqG1XvcEgB8dOmsOBdz59EERlqcCS/WpEAo0NV7rXWONNXxwIsyngInWqZvwMKhAc/HFl0hqn155Vt4VMFFBpQZtp9q3T9fWvqYXwzDf7dP6VDjpalK4AsfpvrBd45SXJZdcKmeH35o316AOdtXuvwI+hQxh/+UboFABuQqnFSgKg9ar/jB0LEWPsUzGSudqHfgC6vbt2yfaslbARWl++unHhJFu+Nq0aePt8u2DQvtcx7COMd0catDxpX47ou37FysvxVhOIeduvsddoedFpgCFq1ngg5epx4j63tBxoEIrDQo4KjihIJAK1KOD+idREEY/AjRoujq613gVQq2zzjquwGjhRJJ8r00hQLHBBhsUfOwnVlaCNzr2XC0SH2QLfe7kuiZpMxRwdLXYEk4ap3NApqnndiHzajkMCJRKINp/hNaRz31OKe6N0uUvzg/eOGnSrTvTuH26HmxvuwdEXn/1RVvS9amQOtzQ52ZXKHRjUsfAe3Tq4gMJofPakMY1K2GPPfGkPfvEI7bqKiuH0b7DU/dEuV131eW26847JsY//+LLdm//B+2tocMS41ToqULXdNuSmOnfN7ofu+Lq6+2lV1/zhfsarc5+Tzv5BNtum60Ss2fa3jDDx2PGmmuKxwa9MbugS+NVEH3GKScm+gPQOBXYHnviqb7jXn3W9fCSC86zx1znyQqMDBnwsi22WGtN8kO/u+6xm26+3Qc/1G/FfXfe5sfr/kIBChUkaZlh6NG9mx3Ts0ei8Fvjda1We+UqNFNAQsMuO+1oBx2wn3XqcoBl60zcz1whf+Ic53HS5OL4/fc/TMdq6CQ6Or86Lj/rtJN90D46PtN+VkHeBRf38vtf86vPkSsuvdh3bq4CxUEvP59YjIIjl15+ld17/wN+3MknHGs9j5jdt8lP7jePglrqPDgcAwpwnOAK7zvt1TGxDNeEi+lYb+UKDHUONm48b2Ja6pvQ38Pp7hjv3u2Q1MmJz1vvsKt9OW6cPfXog7bG6qv58bf2u9Oeeub5ROffChAe0HVfO6pHd3fvNUcibaY8pbumhEST3O+Hdu1nn7uvPP+0LbdsmzAp8ZqvRyJBGb+JcwzHSVNMgmiQIixX/aBEAxe5ghMh3cSJ39vpZ5/nA71hnF51TB1/bE/ruu/sIG+Ypt84rmkyfz388KOP/GidV7oO9jziMPdbdvbxFwLK2TrJznR8FnIehe3S77srr73Bbr719jDKv+o7QoHyM911IwQ1NeHyq6+1vrfcbnfdfnNS5/Qhcb7HuKvRYKe4790QUFd6XTd0j375VdfU+M5VMOe8Cy929/ff+FV9+uHoxDlbyPfxfS5o/sBDjyauAXJWcDL0u6P+qSp9qO/zrNL92H4EEKi9QMUFKJTldD/E9dRgaqBCP74VlND8qUOpOrxLXU++n3XzoSfVVZCmwt1sg+bVTUGu+bItI0wL61UBd7jBCdOir5pPHURrnZk6ktaPz2zLiC6v2O/z2b7oOmWtwlx5V+oQ9p32hwr/iz3ISMsu5DhL94S6jhsdF9msi5WXYi0nX8t8jru6OC9C7ZRM52Y0Pyr4UUBD53w+QzDN59qUz/Lqch5tu66VOv4KvTbJSMGcXNdG5aeQeesy/6yrugRS742UewUqNOgeKQzhnig8tKHxpbwnivODN06akL9KeVVBkQLBKlgt5Hs2mr/vJ022JgvM74MG0fGFvFfthInfTzTVkp0/y/eCCo9ck0zW0tVIixbIpluXvvd+d9fPedy9Sbq8aVkqFG/ZonnOa7MK8xZynXZn27Z021AJ4+Ic53HS5Guh/fa1K8T71gWFFl9sMVeTYoms+yfbfp7sanjO0WiOrDWCwnbpe1rft6qhmO67Wsf5PPPMbYtEHqIIafWq9HroIl3a6HzFeK9j9++//s6Zr1x5qs225PKozbLrIm2cYzhOmmLnJV2QQutQrYp8gxPRbdK9+xdfjvOdWy+77DL+2hqdnu69jj8NTRdaKN3kvMelOz7jnkeuqSP3cOZX5ppfsiV03XCByNqei/kc47pmTHEPaek7Kd/fP/q+SxfELOT7eOrUadbIPeCY6XqU904owxnL4TwrQxY2CQEE6lCgIgMU8onWpIh6hR/h0R/emh4CGOGHucaV8ge5ls+AQLUJpAtQVJsB+UUAAQTqUyBdoCLT9ujeSEGM1Ac8Ms0fZ3ycH7xx0sTZNtIgUJ8CcY7zOGnqM4+sG4FUgTjHcJw0qestxufUIEXc4EQxtoVlIFBsgXI5z4qdL5aHAAKVI1CxAYpAnOuHeOqP79TABkGKIMkrArUXIEBRe0OWgAACCBRDIDyQEWqTapnhIY5Qs6KUgYmQhzg/eOOkCevjFYFKEYhznMdJUykebGd1CMQ5huOkKZVmCFIQnCiVMMutL4FyOs/qy4D1IoBA/QpUfIAi8OkHuIZQcyL8CE/34zsapNB8p/Y8MCyGVwQQqIWAqvurLwc1B6Q+MRgQQAABBKpbIM4P3jhpqluZ3FeiQJzjPE6aSrRhmxuuQJxjOE6aUgqqg+3UzrRLuT6WjUBdCJTbeVYXeWYdCCBQXgINJkBRKKuCFHq6kOBEoXLMjwACCCCAAAII5CcQ5wdvnDT5bQ1zIVA+AnGO8zhpyifHbAkCZnGO4ThpsEYAgcIEOM8K82JuBBAovkDVBiiKT8kSEUAAAQQQQAABBKICcX7wxkkTXSfvEagEgTjHeZw0lWDBNlaPQJxjOE6a6hElpwgUR4DzrDiOLAUBBOILEKCIb0dKBBBAAAEEEEAAgSwCcX7wxkmTZROYhEBZCsQ5zuOkKcvMs1FVKxDnGI6TpmqByTgCMQU4z2LCkQwBBIomQICiaJQsCAEEEEAAAQQQQCAqEOcHb5w00XXyHoFKEIhznMdJUwkWbGP1CMQ5huOkqR5RcopAcQQ4z4rjyFIQQCC+AAGK+HakRAABBBBAAAEEEMgiEOcHb5w0WTaBSQiUpUCc4zxOmrLMPBtVtQJxjuE4aaoWmIwjEFOA8ywmHMkQQKBoAgQoikbJghBAAAEEEEAAAQSiAnF+8MZJE10n7xGoBIE4x3mcNJVgwTZWj0CcYzhOmuoRJacIFEeA86w4jiwFAQTiCxCgiG9HSgQQQAABBBBAAIEsAnF+8MZJk2UTmIRAWQrEOc7jpCnLzLNRVSsQ5xiOk6Zqgck4AjEFOM9iwpEMAQSKJlCyAMXoTyfY2isuUbQNZUEIIIAAAggggAAClSUQ534wTprKUmFrETCLc5zHSYM1AuUkEOcYjpOmnPLMtiBQCQKcZ5Wwl9hGBBq2QMkCFJ98NcmWaLmwLTDfPA1bkNwhgAACCCCAAAII1BD49bc/bcLkH22lpVvVmJZtBPeQ2XSY1hAEODcawl4kD4UKcNwXKsb8CNSNQNxzs262jrUggEC1CJQsQDF1+gz7ecbvtkzrZtViST4RQAABBBBAAAEE/hUYP3GqLTh/Y2u20PwFmXAPWRAXM1eggM6N+eadx1ot0qSgrefcKIiLmctMgOO+zHYIm4PAvwJx79cARAABBIopULIAhTbyiwk/uB+m81rLAm++i5lBloUAAggggAACCCBQtwKTp/1i036eUXDtibCV3EMGCV4bmgDnRkPbo+QnHwGO+3yUmAeBuheo7blZ91vMGhFAoKEKlDRA8edff9s3k36yOedsZC2aNqG5p4Z6FJEvBBBAAAEEEEDACaiZgCk//WIzZ86yJVs1tXnmniuWC/eQsdhIVMYCnBtlvHPYtJIJcNyXjJYFI1ArgWKdm7XaCBIjgAACEYGSBijCelQdecqPv9hvf/xls8JIXhFAAAEEEEAAAQQajEAjl5P55p3bFl5w/oKbrsmEwD1kJhnGV5JAODdaLNyk4CbPMuWTcyOTDOPLRYDjvlz2BNuBQLJAODeLeb+WvAY+IYAAAoUL1EmAovDNIgUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgg0ZAECFA1575I3BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTKVIAARZnuGDYLAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGGLECAoiHvXfKGAAIIIIAAAggggAACCCCAAAIIIIAAAggggECZChCgKNMdw2YhgAACCCCAAAIIIIAAAggggAACCCCAAAIIINCQBQhQNOS9S94QQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEChTAQIUZbpj2CwEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBqyAAGKhrx3yRsCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmUqQICiTHcMm4UAAggggAACCCCAAAIIIIAAAggggAACCCCAQEMWIEDRkPcueUMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoEwFCFCU6Y5hsxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaMgCBCga8t4lbwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIlKkAAYoy3TFsFgIIIIAAAggggAACCCCAAAIIIIAAAggggAACDVmAAEVD3rvkDQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBMhUgQFGmO4bNQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgIQsQoGjIe5e8IYAAAggggAACCCCAAAIIIIAAAggggAACCCBQpgIEKMp0x7BZCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgg0ZAECFA1575I3BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTKVIAARZnuGDYLAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGGLECAoiHvXfKGAAIIIIAAAggggAACCCCAAAIIIIAAAggggECZChCgKNMdw2YhgAACCCCAAAIIIIAAAggggAACCCCAAAIIINCQBQhQNOS9S94QQKCGwLMjJ9gjb423T76dXmMaIxqOwEqLL2R7b7yM7bzeEg0nU+QEAQQQQAABBBBAAAEEEEAAAQQQaGACBCga2A4lOwggkF5gwtQZNujD7633C2PTz8DYBimw0Uot7KRdV7Mlms3fIPNHphBAAAEEEEAAAQQQQAABBBBAAIFKFiBAUcl7j21HAIG8BU68a4QN+2RK3vMzY8MRUJDi6oM2aDgZIicIIIAAAggggAACCCCAAAIIIIBAAxEgQNFAdiTZQACBzAJf//Cr7Xv1G5lnYEqDFzhzrzVp7qnB72UyiAACCCCAAAIIIIAAAggggAAClSZAgKLS9hjbiwACBQsc0vtN+pwoWK1hJVCfFHcctUnDyhS5QQABBBBAAAEEEEAAAQQQQAABBCpcgABFhe9ANh8BBHILbHrWC7lnYo4GLzDk4h0afB7JIAIIIIAAAggggAACCCCAAAIIIFBJAgQoKmlvsa0IlLHArFlmP0z70Vo0W7jstpIARdntknrZIAIU9cLOShFAAAEEEEAAAQQQQAABBBBAAIGMAhUZoLj30eft+ylTbbFWzW2/jrmfiP183Df2xIuDPMJeO21pbZZaPCNIQ5gwffrP9vU3E+znX362FZdf3po3b5Y2Wx9+NMb63XW3n9a92yG28korpp2PkeUpUOh5UOpcnHtFX/vmu0nWdp3V7cgD9yr16gpafrUEKNZbrpmd5fpaeO39idb7hbEFGVXyzKfusbqt0HpBO/+h9+zbqTMyZoUARUYaJiCAAAIIIIAAAggggAACCCCAAAL1IlCyAMVn4762S66/I69MNZ53Hutz6el5zauZTrrgWpv243RrvkhTu+Kc43KmGz7qQ+t7z6N+vqMO7mzrr7VKzjSVOMPkKVPssiuutseffDpp85daaknruNuudkzPI2zOOedMTBsw8HXr1uMo//mu22+29pvWbfvsP02f/n/27gK+qvKP4/iPHt3d3d0lJRggKgaY8LcVu1HAQhRQVMQObEBFEYOS7u7u7t5gsA3+z++Z53J32cbq3t34nP/L7fR5zvtc/q/tfPc8Pzlw4KC9ftkypSVbtmyutvnzzJTZi+TH38Yn2sQaVcrLsw/dleg+qd2Y3H8Hqb2eHr977wGJjomRHDmyS5GC+V2nPBN5Vnq/NMguJ/ffs+skXpxJLKCY8mpHCcty8d+FZzOios9L21cmea72y+Vh9zaWhhUKSlSMaXP/wGhz6UI5JX/OrLJl/ymJOBudItc/+7STArmyyWNfLZSlW48meA4CigRp2IAAAggggAACCCCAAAIIIIAAAgiki4DXAoqNW3fI28O/TdJNZcqUUb4Y0jdJ++pOyX0xGwoBxenTp6X7nf+TNWvXJujYrs0V8v47gyR37lx2n/QOKH4YOVr6vzbAtmXcmNFSq2aNBNvuTxsmzpgvo/9I/OVvxXKl5OXH7/Fqs5P77yAtGnP/cwMkxrz8Ll+mhPR78r44p/zr39kyd9EK6dKxtbRoVCfOtvReSCygmP56J8li/j8oMipGdJgqzyk8MkpuGDTdc7VfLtcvH9uDYtrqwOlB8euzbaR4/uwy9M+1Mmb+zhS5ElCkiI2DEEAAAQQQQAABBBBAAAEEEEAAgXQX8FpAER5xWlas3eS6wfPmzd+IUePscv58eaTbNe1c27JkzixN6td0LV9uJrkvZoM9oIgxf9H+0KNPypRp0y1d44YN5fYet0jdOrVk67btplfFe7J5yxa7reedt8srffvYeQIKy5DsL+4BRdsWDaVi2VKXnKOA+YxXr1z+kvVpuSK5/w7S4tqJBRRpcX5vnSMpAcXN78yQfcfOeKsJnDcBAQKKBGBYjQACCCCAAAIIIIAAAggggAACCISAgNcCCk87/cvke5953a5O6K/Ljxw7IeOnzZUdu/fJUTOEU4mihc3L35JyTbsWZvifrK5Tur+Yff25h+TPyTNl7aZtEpY1qzSoXc2GHfny5Hbtn1hAcdzUa1iyYp0sN2HKQVPXolqlclKrWkVpWLu6ZMyYwXUOf55xDxrKlysn48aMkpw5c7qarL0rrrn+Jtm1a7ddv2jODAkLyybux+kQT2fPnpOJk6fIps2bpX7dOtLl2qulYYP6rvPoTKQZymf0r2Nk5arVsmXrNilWtKjUqF5Nbrqxq5QsUcK17+IlS81QU3/Z5YcfvE9mzJpt/psjp06dkq8/+1gGvDVY1qxbZ8+jO3Vo11aKFC4s3W647pJruk7qJzPuAcVj93SX+rWqJtiyhcvXiH7+dOrcoaXpeVDSta9+xn/6fYJdrlW1omjYodP6zdtl9sLlsssMpxRpnknZksWkptnepnkDu9354v7vwBnq7Lfx02Tv/kOin/87b7rG2dX2DPjom5/tco0qFaR9y0aubWfPRckE8+9us6nVsv/gYcmXN7eUN3VarmzdRIoUiq1fMsf0jFi2eoMsXbXeHpclS2apXa2S5MgeJvf06GrXjRj9p0ScPiNlTHu7drrCdX6dWbV+i6zesEXWbdwqGU1vhZqmDXVrVJEqFcrE2e+rkX+IDheldWIa1akuk2bOl7Ubzb/tsKxSq0pFueHqtpI5c8LDMcU5mdtCWgQUTSsXkpualbXDEL328wq3s4vc3baC1CqdX+ZuOChjF+6SuuXyyx2tK8iCTYdkiqkH8VSX6lKpeG7ZfyxS/l6yW6avOSAa2npOWkfh+ialRWtJbD8YIROX75XZ6w7G2feW5mWlcaVC8sPMrVLR7N+pbnHJb4Y3unvYbFuHoVe7SrJk6xEZPWe7Pb37/iUL5pDODUpK3hxZ7T7D/l5vz31949LSuWFJyRmWWdbuOiFfT90cb1iT3PYVyp1NrjHXK22uu+foGflqyiZZu/uEbVetMvnk7jYVpUmlgpIlc0bZcShCdh85LTPXHZC/Fu920XRpVEra1SwqJQvkkIMnI2X5tmPyzfQtcv78RT96ULi4mEEAAQQQQAABBBBAAAEEEEAAAQQCSsBvAorlazaKvkDV4WM8J601ocPJ5Mkd+9LdeTGry1pTQetRuE+5cuaQt/r0lpxmnHydEgoo9AVx30Ef25fA7sfrfNP6teTBu7p5rvbL5Q8//kzeGzbctm30D99K40ZxX2Trhj//Hi9fffOd3Ud7UGgA4R5Q3NC1i4wdFxso2J3++zLq+2+kSePYF+f79x+QO3rdJ9u2b3ffxc5rIDLy+xFSq0Z1u/zzr7/Ji31fsfMaPji9O3TF8oVzpF6Tlnab55dBA9+QW7rd4Lnar5aTE1CsM8HZkE++t+1v06yB9Ly1i+teJpmhokb9N1TUvbddLy0b15Vf/vpXxk+d69rHfaaeCUIe+193yfBfbub8O3CvxdJv8KeyZ/9B0X8Dw9541nW4vsy979k37HKjujXkkZ432/nDR4/L6+99KdrjyXPSodeef6SnVC5fWr79+S+ZMX+p5y6iQcVng16y6x/p87b9t1SudHHp/9T9rn1/+2eq6PBP8U0aorRv2di1yemhUdjUtwg3YceZM5GubTpTwQSWfZ+4N866pCykRUCRN0cW+ePFdnY4qDHzd5ghidbZS+uL9s8ebGZDoAc+nWdfwP+vfSW5r0Ml+8K9WL7skjlT3LBz496T8r+P4j7nBzpWlp5tK15yO577fvFQM6lROp89dynz4t+ZrnrjXxM+lJLHO1eTTftOSq/hsed39t9y4JRULHoxuNXjth8KN0Wlz0iLqoWd09jvOtzV7e/PkgPHL/ont32b952yoUycE5uFV0evkMkr98ltrcrJo9dcWg9o0eYj8uSIRfawbx9tGe85tF13mUDGqVlBQOGpzDICCCCAAAIIIIAAAggggAACCCAQGAJ+E1A8/ep7or0Z9KWo9pjIa+okzFm8Qrbv2mclb+lypVzTvoWdd17MOsT6V9hFCxeUxSvXuV5o6roXH+1ld4kvoDhn/mq8z9sfucIN/WvxUsWLypxFy2XTtl32uOuvaiP6n79P9z/8mCsAWLN8kWQPC0tSk90DCj1Aa1TUrVNb/vpngmtIqFYtmst3X39uz6cFuD//aoSdv7dXT6lTu6YsXbZcvv3hJ7uua5fOpsbF23bePaDQFdqzo0Xzpublegbp1+d5+XnM7zJv/kL5Z8JEu/+D990jpUqVlGZNGkvFCuXtOn/94h5Q3Nylg+0N4NlWfcGuvQv0j+R7vxT74t4zNHhz2NeyxfRa0OmTt/vIafMy/pnX3rPLGr5dbf4dREdHy7+zFsrJUxF2/avPPGB7KOiC8+8gNQGF9ljQ3hE6ad2ISiaM0B4cTq8Pp9i3rtuyY7eM+Xuq3Vfv5aq2zWyvpQ7m345O8QUU0+cuke9+/dtuz2482jVvaAo4x8j0uYslKiq2ILJ7LxQnoNADtNh2CxPa7Nl3UDZs2WHPoV9eevx/UqlcaddyUmaSElDoC/k9Ry8Nas6bzNTp7dCxTnF5tXtd+1xvM/vvOhwhf73U3hZ5/n3BTnlnXGwNGCeg0LZpwepvp2+VBRsPyZXm+FtblLMhk/a0GPLHGtt857y6oOv/XrpH6pXNLw92qmLDjQnL9sobv660+zqBgy5sNoWltW7DIdOzYMHGw6K9JRIKKHT/qaY3xw+ztkrzKoVNgFLZFXbNN8d+8e8mKVMoh7x8Ux17TfcQJqXt054cI6Zukeol88qdbSqYnhtZ5MTpKLn2zSm2MHmJAtnl/f81loKmp4X2CNEeI0dOnTP7nJMbm5aRZ7vWsPYfT9ggi7ccldbVi9jeKlozRHt5fDVls96WEFBYBr4ggAACCCCAAAIIIIAAAggggAACASfgFwFFtHlhOfO/v84uW7K46BBQOoVHnJHH+w2x8zoszbMP3WnnnRezunCdKch743/1LE6GR8iLb37o6hHxqXnpmzVrlnh7ULj/Zbu+ZL62fexf9J83byP7DflU9h04LO4vfu2F/fRL7YbNJCIiQkqXLiUzJo9PcivdA4pOV3aQTz58zwYI2oulXpMW9pzaM2Ll4nl2/fSZs+X48eOSP39+adM61ksvdsvtd8uSpcvs8FGrlsy313cPKNpc0Vo+G/6+eRZZ47QtmItk/88Me9S6ST17vz/8Nl6mzo79i/A3X3xEihcpJDqs0sMvvmW3161ZRZ64t4dob4aV6zbFrjPDH+nnTycdWunDr0fb+e5dO5pgoLmdd/4duH9Ok9uDYtHytXLKfHby5MoljerG9n7Rk784cLgd8syzgL0TIMRXJDu+gKK/+be02wQMOg144WE7bJvOb9y6Q94e/q3OmqGeKssT991m553z64JjpfM//jZBpsxeqLNm6Kpr4wxRZVde5ktSAoqETrF4yxF54uvY56f76Av1xmZYIh2OaJYZfkl7AmhA4F5I2z2gePqbxWaop8Ou099lXtQ/ZIKHyHMx0uG1yXb9qKdaS+lCOeWXeTvk/b9ie2bohsYVC8r79zSWc9Hnpd0rk+y+TkCh1+8+dKZd53zp3qJcggGF9qC4e9gcZ1f55IGmUseEIJ5tf+76mnKDGWZqw56Tcs/Hsb0wUtK+vSbsueXdi+3TcOHtOxvYcKdV39ihzbQxCdWg0LbpUFkrth+TlTuOudrd/5Y6clW9EmYoquNy/6ex/39DQOHiYQYBBBBAAAEEEEAAAQQQQAABBBAIKAG/CCjcxU6cCpcdptfEKTPkjP5F+cixsX9h7z5sjPNiVo/7Ykhf2+vCOYf7EDkvP3GPLWAcXw+KcZNmytgJ0+1hHVo1kfz5Lg598sfEGa6/7ta/bM9mQg5/ntIioBg6+C3RYZ6c6ZkXXjI1JP60ixo6uNe0OBMZKRs2bJLDR47YmhK6rzNtXb/KzroHFF9+Olzat720J0owBxRal6HVfwHFzj375dV3Y3uh3NS5valF0UqWmtBh+H+hg3sPAsW7YLpd6DGHjhy3/wa27dorM+Ytsa5dTCDnFJh3/h2kJqCwJzVftP6IXuf4iVO2jsSP/9XG0O1fD+3v7CZOgJCUgMJ9WKn46s4898YHonVn3HuWOOfXHiiDXn7Mdd01pn7Fu5/9aJe1DoVnjQvXjgnMJCWgOHkmSmLc6ho4p5pihiN6zy00yGrqJfzzcgfJnjWT3UV7ydzxwSxbQ8E5xgkoTplzXj1girPa9X32gKtt74UOr04WHU5p5htXSSZT80Z7VByPOOfaT2fevD22DoyeR8/nBBQjZ2+X4eNja4I4ByQWUPw0a5t8ZHoiONNL3WrbuhNa4+KFHy4O36X1KJ6/oaa9H+1VolNK2vfHol0yeGxsDxE9R0bTe2rGG53sd+1BoT0pdEoooLAb//tSrkguKZ4/u+2pcp2pSaHhxU7Te+W292LbR0DhrsU8AggggAACCCCAAAIIIIAAAgggEDgCfhNQbNy6U4aP+DnesfCVM76AQofBef+1Z+Joz128Ur78aaxd1+vW6+SKZvXj7UHxwZcjZYUpjH25qf9T99mCvZfbLz233/tgb5k2I/YvlVM6xJMWyW7dsoXrNga9+7589sVXdtkJKA4fPiIvvNzfdS3Xzm4z8QUUf4/9VapXu7SQdDAEFI/0vMUUZr/03jJkyOgaPkd5nCHMypQqJq8+/YB8/uPvMn/JKlvD4ZO3XjQF2TNaRa0/8dv4qfHWYtEd0jqgOGYCiQ+/HuUaSs02wuNLSgMK7YX0sqnxolPrpvXlf92vi3NmDRw0eNDpY2OgQzo5AYVnoKG9MLQ3hk7eCihufmdGvIWh7UU9vjSrUkje7dnIrvV8Ea8rnYDCvRaE+ykm9b9ScmbLLM98u1jWmaLRGnhcbnrpp2UywxTXdgKKzyZvlO/M0FHuU2IBhef+TkAxbfV+6Ttyues0WpS6z421XAGFDsuUVu2b9lon0YAnKQGFBhoDbq8nLasWuaSGhzaWgML1yJhBAAEEEEAAAQQQQAABBBBAAAEEAlbALwIKHdrm+QHDLKIOKVOrakUpW6q45Dbj3Dt/yR1fQOE5/Iye4J+pc+TXv2L/Yvmp+2+X2tUrxRtQfP/rPzLNjIOvk9a30KGgnOmUGSpKr61vmJvUqym5c5l5P56GffSpvP/hR7aFv/z0nTRsEPsX1+5NXr5ilUz8N9al2/XXSeVKFeMUyU5KQNGp8w2u2hRXdmgvjRrUMz1P8tnQwrlWqAUUnr0fHAfP79pbR3vt6PTRwBdsrYlI02tBayzcZwpk6zRr4XIZMWqcndd6DfVNUeyihQrYHg1aUFunpAYU+rJfX/o7k9Zceei/IaXci2Q/+cq7rvoWWoC6WsVyktsEf6P/K96tx6c0oND702GfdHIfxsmuMF9eeecz2bX3gO0B9fngvjbQCZSAYsBt9aRdrWL2VrReQpeB01x1KnSlE1DsO3ZGNPjwnKabF/VZzIv6Oz6YbV+0zzI9KHT6fkbcwEHX5c6exfac+H3hTlu02tcBRUbTsyOt2pecgGJor0bStHIh67r9YIQd5ulo+FmpXSa/HWKLgEI/HUwIIIAAAggggAACCCCAAAIIIIBAYAv4RUAxxRQBdoKIh+++WRrXq2FVz0SeNQWGB9n5+AIK3fDioz2lSoWydh/98sb7X8q2nXvtsvau0F4W8Q3xpH+9rn/FrtNzD98l1StfLMwcZQoT67UzmP/5ezih7Z86fYbc99CjOiuVKlaUP8aMilMoO8bU+Ljl9p6yfEVsMeQl82fZYMG9BsXlAorDR45Ku07X2ms8eP+98sIzT9p5/eLUoND5lAYUo77/Rpo0bqin8PvJvUh2UgMK9xBOa0hMnD7P3qcWcteC7jq998VPsmpdbNHf9183n91cOe365Ws2yrCvRtn5ywUU7p//If2ecNWx2LRtp7z14Tf2HE5AkVgPB6cGhR4QX0BRvGghefOFR+z5nC/x1aBweo5o4PKB+feYOXMmu3vE6TOi4YjWO3EfLioQAgqn94QWv9YhlwrkyibjTVHrAWNihzfTG3QCiuiYC9Lu1Umiw105U+E8YTL2hbb2xXvrvrFD2E3o28EGEQ9+Nl9W7zzu7Brvd18HFNqItGpfYgHFB3+vk5/nXiyGPuXVjraQtg4Tpb1UnEmHn9JhqAgoHBG+I4AAAggggAACCCCAAAIIIIAAAoEr4BcBxXQzvv53v/xtFdu1aCRaCDjy3Dn54sexriFgEgoo9C/F776lixQqkFemz1sqcxfFvoTXF6Ifvfm8PWd8AYWOfa9j4OukY+BrAe4yJYvZ4thDP//Rjo2vPTS0BkXmTLEvVe3Ofvgl2gQqGlDMnB1bALdVi+Zye49bpF6dOrJr9x4ZOmy4LFgYW+T3RtN74t1BA+1dJCegOHnylLRs19Eed0WrlvL+u4PMkDxhMvrXMfLagLdcKskJKLTGhVO/QutfPNH7YSlatKiEhWVznc8fZ9wDiitbN5FK5Utf0sz8eXNL5fKxwYOz0Slg7Sy7f0Z1nQ5xtnRVbE2Bh+66yQZ1e/YfkqFmKKTjxl+nywUUP4wxBbnnxD7rmqYnUrdr2kq0eZH+6fdj5Njxk/YcTkBx8MgxW1ReV5YuUVSeMf8GsplC5hOmzRWtw+JM7gHFgy8MdNVnefSe7lKxTEnJmyeX3TW+gMIZykp3aFC7mvS4vpOci4qSEaP/lC3bd9vjOl/ZSm66tr2d9/eAQocnGm+GYwoz9SeG/rlWVptCzV8/Ejs02sOfL3AVc3YCCr2pRabI9pNuRbadmgsHjkdKtyHT7X1/eG8TaVChgKnLcE66DZ5h61LoBq21MMwUydZJa1BovYr0CCjSqn3xBRROAW7PWhhOQKF1M7R+hk5aSPybR1vY4IKAwpLwBQEEEEAAAQQQQAABBBBAAAEEEAhoAb8IKPTFqRb8TWyKL6DQAEH/Aju+yf0v0+MLKPQY93oV8Z3jjhuvlg7mBXQgTOHh4XLrHT1l/YaNCTa3WtUqogWrSxQvbvdJTkChRbKv69Zd1qxdm+D5dUNyAoqNmzbL1dfdGOd8g958XW65Ke66ODv4wYJ7QJFQczxrKOh+k2cucBV91+WObZrJbeaFvTO5f06ddZ7fLxdQrNu0TYZ88r3nYbbWRVRUtF3vBBS64BTavuQAtxXuAcWgj76VDVsu/pV7liyZ5bNBL9m94wsotPj2K6ZA+MHDR93OeHFWnV7s3ctV6D49A4rTZ6PjLZIdHhntGqbpfRMWNK5YMM5f77/Ro560r11MwiOjpPPAqSYQuuDqQaFFt7X4deS5GDlwIlJKFMguWcz/b503lbX/N3yubN4fGzzlCsssY55rI7nCstjjdx+NkMymLkmpgrHDy83feMjUq4gtlJ4eAUVatS++gGLgHfWlTY2i9kNx5NRZ+X3hLhkxdbN8YKwbGWstQr776GnzbM5LGRNQaM82M/penGdAkeyL/6aYQwABBBBAAAEEEEAAAQQQQAABBAJJIF0CCh3SRgME90lfen70zS+uItn64vOeHl3lO1Mr4syZyDjDwDgvVQsXzG//IvvLkX/YffR8OvTMdR2vkGYNarlOv3D5Gvn0uzF2Wf/qu4EZ29+ZdNz/SdPny579B51VdlioHl07SbOGtV3rAmHmwMFD8sbAQfLPhNhhY5w2a7hw/XWd5eUXn4sz9JN7QPH9119IyxbNnEPEvUj26mULJUf27HLEDPOkPR6cnhq687NPPS779h+QH0eOtsc6AcUvv42VF17qZ9f988cY0XAkvmnsuL/k48++dNW2eOftN6XbDV3j29Vv1nkGDfE1LL6AIjzitDze7x3X7q8/95CUKl7EtawzGn789s9UVy8F/Yx3NyHG8K9jfbt2usIWidZ93f8dDHr5MV1lpymzF8nPf052nUN7ajza6xZXcNHY1FV5+O6b7L7aM+O9z3+ytSD+O1yuattMjhw9IYtXrrOr3AMK7Xn0hRkabcuO3TYcjC+gcB+ySU8QHnFGvvn5T1m7catoXQqdtNdSrWoVpafp/ZTNrf6L00OjsumV0uex/9l99Yv2JOk3+BO7fKPpFaL/xpMztXx5QoK7T3/d1IMwoUFCkwYObfpPlBZVC8uQuxvaF+a3Dp0pe81Lc520lsSE/3pVTFqxV177eaUroFhjeljsP35G2tcq7iqari/hP/h7vUxZtS/OJYvnzy7v9GwopQvmtKGGbtSAY8KyvTLwt4vDR33+UDOpWTqffDpp4yU1K25pXlae7FJdNu49Kf/7aK49f0L7ayFsLYg91RTJ7udeJLuhKZLdrZZsPxQud7w/29XGtGifE1BobxAdHksnLcI9/L6mUqFobE+cRZtNj5MRi6zrx/c1kRrmXp1J7XS4p3vaV4rTvnEvtpOCubPJI18skBXbjzm7X/J9zptXX7KOFQgggAACCCCAAAIIIIAAAggggAAC6Sfgs4Aiqbd4Kvy06JBF+fLmcb3QS8qx+qI1S5YsktO8jE3JpEWEdcgbHZonZ47sKTmF3xxz9Ogx2bFrl2ivinJly0qpkiWMpfmT4zSaIk19juPHj0vhwoXMX76nzfBXWifjgvlT6cyZM6dRKwP7NNqrSC1SUwNF617oX+oXLpDvss//rPn8a3H4/ObfnfZMSsoUHR1j903OZ0sDjoymZ4D+O/PllFhA4Y12OEM8aT0JrSuROVMGqVYyr+w4FOF6MZ/YdauUyCPao2P3kdgQJLF902ObN9qnw2dpT43jp6Pi1OxQuyol8soh0wPl0MnIVN0uAUWq+DgYAQQQQAABBBBAAAEEEEAAAQQQSHMBvwso0vwOOSECCIS8QHoHFCH/APwEgIDCTx4EzUAAAQQQQAABBBBAAAEEEEAAAQT+EyCg4KOAAAJBL0BAEfSPOEk3SECRJCZ2QgABBBBAAAEEEEAAAQQQQAABBHwmQEDhM2ouhAAC6SWg9Ri0LoOvphql8kq3pmVk8dYjtoaEr67LdRIW0GGpRvRukfAObEEAAQQQQAABBBBAAAEEEEAAAQQQ8LkAAYXPybkgAgj4WuDvpXtk4JiLhaZ9fX2ul/4CL91UWzo3KJn+DaEFCCCAAAIIIIAAAggggAACCCCAAAIuAQIKFwUzCCAQzAJPf7tYFmw8HMy3yL0lINC0SiEZ2rNRAltZjQACCCCAAAIIIIAAAggggAACCCCQXgIEFOklz3URQMCnAnuOnpZ3/1xLSOFT9fS/mIYTz1xXQ0oWyJH+jaEFCCCAAAIIIIAAAggggAACCCCAAAJxBAgo4nCwgAACwS6gwz39Om+HT2tSBLupP96f1py4uXlZhnXyx4dDmxBAAAEEEEAAAQQQQAABBBBAAIH/BAgo+CgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICAzwUIKHxOzgURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECAgILPAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCPhcgIDC5+RcEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAgoAvwz8PXE9QF+B5c2/56rql26kjUIIIAAAggggAACCCCAAAIIIIAAAggggAACQSVAQBHgj7PVU2MD/A4ubf7s9264dCVrEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIJKgIAiwB8nAUWAP0CajwACCCCAAAIIIIAAAggggAACCCCAAAIIhKgAAUWAP3gCigB/gDQfAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIUQECigB/8AQUAf4AaT4CCCCAAAIIIIAAAggggAACCCCAAAIIIBCiAl4LKJZv2hOipNw2AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIXE7AawFF+OnIy12b7QgggAACCCDgIbB5zxGpVLKgx1oWEUAAAQQQQAABBBBAAAEEEEAAgeATIKAIvmfKHSGAAAIIBLAAAUUAPzyajgACCCCAAAIIIIAAAggggAACyRIgoEgWFzsjgAACCCDgXQECCu/6cnYEEEAAAQQQQAABBBBAAAEEEPAfAQIK/3kWtAQBBBBAAAEhoOBDgAACCCCAAAIIIIAAAggggAACoSJAQBEqT5r7RAABBBAICAECioB4TDQSAQQQQAABBBBAAAEEEEAAAQTSQICAIg0QOQUCCCCAAAJpJUBAkVaSnAcBBBBAAAEEEEAAAQQQQAABBPxdgIDC358Q7UMAAQQQCCkBAoqQetzcLAIIIIAAAggggAACCCCAAAIhLUBAEdKPn5tHAAEEEPA3AQIKf3sitAcBBBBAAAEEEEAAAQQQQAABBLwlQEDhLVnOiwACCCCAQAoECChSgMYhCCCAAAIIIIAAAggggAACCCAQkAIEFAH52Gg0AggggECwChBQBOuT5b4QQAABBBBAAAEEEEAAAQQQQMBTgIDCU4RlBBBAAAEE0lGAgCId8bk0AggggAACCCCAAAIIIIAAAgj4VICAwqfcXAwBBBBAAIHEBQgoEvdhKwIIIIAAAggggAACCCCAAAIIBI8AAUXwPEvuBAEEEEAgCAQIKILgIXILCCCAAAIIIIAAAggggAACCCCQJAECiiQxsRMCCCCAAAK+ESCg8I0zV0EAAQQQQAABBBBAAAEEEEAAgfQXIKBI/2dACxBAAAEEEHAJEFC4KJhBAAEEEEAAAQQQQAABBBBAAIEgFwiogGLztl2ux1GpfGnXPDMIIIAAAggEiwABRbA8Se4DAQQQQAABBBBAAAEEEEAAAQQuJ+D3AcX4qXNE/0touqZ9S9H/mBBAAAEEEAgGAQKKYHiK3AMCCCCAAAIIIIAAAggggAACCCRFwG8DCs9gonL5MnHuZ9O2nXGWCSricLCAAAIIIBCgAgQUAfrgaDYCCCCAAAIIIIAAAggggAACCCRbwC8DCvdwQoMJDR8SGtLJfV9CimQ//zQ7YMa8JTLm7ylxzpchQwYpWriglClZTK5s3VSKFSkYZ7uvFpy2FS6YX17o3UuyZs0S59InToZLv8Ef23VvvPCI5M2dK852FhBAAAFfChBQ+FKbayGAAAIIIIAAAggggAACCCCAQHoK+F1A4R44PH7vbXGCiYRqUOj6YV+NtI6EFOnzcXJCAA0lcubIbhtxLipKzp2LsvO5cuaQ3r1ulZLFi/i8gU7b9MItm9ST7l07xWkDAUUcDhYQQCCdBQgo0vkBcHkEEEAAAQQQQAABBBBAAAEEEPCZgF8FFE444dlrQgMI3eY+rJPnPir24Vej7D6EFD77/Lgu5IQAhQrkk/5PP2DXx8TEyJqNW2X0H5PkVHiE1K5eWe6/40bXMb6acdrmXO/Bu26SmlUrOotCQOGiYAYBBPxAgIDCDx4CTUAAAQQQQAABBBBAAAEEEEAAAZ8I+FVA8Xjfwfamhw14Ps7NO8GDrtRgwgkqdP6xe3u49nXvSeHZ+8K1EzNeEXBCAPeAwrnQn5NmyuSZ823PioF9HhPTyULmLFou8xavlPJlSspNnTs4u8qO3fvklz8nS9YsWeTx+26z65et3iBTZi2ww0V17tBKxk6YLpu375KwbFmlUd0aclXbFpIpU0bXOTxnnLY56/Pkzil9HrvH1dMjsYBCrz1n4XLZd/CQZMyYUUoWKyLtWzaWKhXLOqcT9/Zd3a6Fad802bJ9t+TPl0duuKqtVK1UTmYtWCYz5i+R8PDTUrZ0Cel2TXtzPwVc59CZdZu2yeIVa2XT1p2SPXuYVKlQRlo3rS9FCsXdL85BLCCAQNAJEFAE3SPlhhBAAAEEEEAAAQQQQAABBBBAIAEBvwkonN4Tnr0f3EMH9+DCCTM8gwjnPJ7hRQL3z+o0EnBCgPgCCn05r6GDBgpvv/y4fdH/z5TZMmHaXKlRpYI8dPfNrlas37xdPv7mZ7vv4H5P2vXO8XruLCa4OHj4qGjvDGfqfGVrE1I0dxYv+e60rViRQrYnR8TpM1KnRhW57/Yb7L4JBRSTZy6QPyfNsPvky5tbTp+JtENWaVDxwJ3dbNt1o9M+rXGROXNmOXTkmERHR9vjtN5FuxaNZeL0uXabsz5fntzS98n7XPUw1mzYIl/8+LucP39e9FpnzdBYZ8z1dP7pB+8U3Z8JAQRCQ4CAIjSeM3eJAAIIIIAAAggggAACCCCAAAIifhNQOIGDewihD8gJKDwDB6dXhWdAocckdC7dxuQdAScE8AwotAbFsK9HyU7TM8J9iKeUBBTa8vatmsi1HVrK8ROnZOTYCbanQoH8eeXVZx5M8MactpUzPRduv/EaGfLJtxIVFS13dLtWmjaoJcdPnpL+gz+xx7/xvCmSnSeXCQou2Lom4adP214QtapVMqFDjIwY/YesWrdZGtSpLr1uvc4e4wQUutD1qja2h8WBQ0flA1MX5bQJQ7JkyWwCjZtMj4iysn7zNvn0u1/lwoULtvePfq51+KvXhn5uw487ul1j2lTbXn/0uIm2l0ll05PisXsu9hSyF+ULAggErQABRdA+Wm4MAQQQQAABBBBAAAEEEEAAAQQ8BPwmoHACh7TqQeF5Ho/7ZjGNBZwQIJvpJVHbvMzX6ey5c7Jhyw774l17Etx/+412uCPdlpKAQs89uO8TZogoM0aUmRYuWy0/jPnHLr/32jO2Z4bd4PHFaZsGFNobwVkOC8smL/buJRnN8FCeAYXHKVyLOgTTd7/8JUXM8Ex9n7jPrncCCi0EPrDPo659R4waZ4Z/Wm+CmUqm9kY31/rXTRhx+OhxW6xbi3avXLdJvjS9J8qULCbPPny3az/tsfHim8NswDG475OJDmPlOogZBBAIeAECioB/hNwAAggggAACCCCAAAIIIIAAAggkUcBvAgqnp4S227NXhBNe6Db9i/OEalDYYxOoY6HbmLwn4Lz0j+8rigq0AABAAElEQVQKuXPlkEd63WrrNzjbUxJQlCpeRJ43gYIz7dl3UAZ99I1dfPeVp+2LfGeb+3enbU5AYTovyCff/SLrTc2HSuVKy123dJZXhnxqD3F6UOiC7rdy3UbZumOP7D90xA65tH3XXrufDufU76n77bwTUJQuUVSee6SnXadffvptvMxfukpaNK4rPa6/yrV+8Mffyu69B+TWrp2klQko/po8SybNmGe3FytS0LWfzuw/eMQu63n1/EwIIBD8AgQUwf+MuUMEEEAAAQQQQAABBBBAAAEEEIgV8JuAQpvjBBGewzlpeKG1JZxgQvfVfbSXRKXypXXRTs7x9J5wRHz33QkBtDB0bxNG6LRo+RpTe2GeDQ76P/2A5M2dy9WglAQUngHAnv0moBj+jT1ncgIKPeDEqXB568MRdgimxvVq2rbqeiegiIk5L1+NHCur12+2PTQ0HMltimuv37Td1olIi4Dilus62iLYY/6eYnt16PXr1ayq31zTkeMnpGC+vNKpbTMpVZyAwgXDDAJBLEBAEcQPl1tDAAEEEEAAAQQQQAABBBBAAIE4An4VUGjLnJBB5z17UmhQ4UzuwYR7gOEZbjj78927Ak5A4V6DQutPvPruZxIecVp0KKPupseAM000BbL/NoWytXD1S4/f46wWZwglLajtWSQ7LQMKveDyNRvk65F/uK6tM05A4fTo0WGldCgnrUuh07wlK2Xk7xMkLQMKDXK+//VvodaEJeYLAiEvQEAR8h8BABBAAAEEEEAAAQQQQAABBBAIGQG/CyhU3j2kSKw3hHswoccRTqhC+kzxBRTakqlzFsnY8dNsfYiXn7jXvtjX9ctWb5ARo2LDgT6P3SPFixaSqOho+eSbX2Tz9l3ii4BC26E1LLSWhTM5AcXCZWvMtr9tMKEFuDNlyiQxMTHy8be/yKatO9M0oDh4+KgMeP9LyZw5swnleogORaXT0eMnZeyEaXZei2dny5rVzvMFAQSCW4CAIrifL3eHAAIIIIAAAggggAACCCCAAAIXBfwyoNDm6ZBO+p/7pAGE9pxwelK4D/mUWJDhfg7mvSOQUEARFRUtrw39TE6eipAGtatJr+5dbQOOnzxlX8prLwstel22VHE5duKkfQmvL+x9FVBEnj0nbw8fIUePnbDtcgIKbctr735uh3MqXrSwlDUFrLfu2iORkWftvaRlDwq9sNOjRIuJV61YzjhkkTUbt9q6F80b1ZHbbrjato8vCCAQ/AIEFMH/jLlDBBBAAAEEEEAAAQQQQAABBBCIFfDbgMJ5QPEFFc42/U4w4a6RfvMJBRTaopnzl8qvf/1rg4jnHrnbVUthzYYtMnLsRPPCP9w2vFa1itKsQR358qfffRZQ6IW37tgtH3w50hTFvuAa4knXa/tG/zFJNEzREKVhnepSrVJ527MirQMKvZ7W61iycp0pjH1YFyUsLJu0bd5QrmrbwvTgyGjX8QUBBIJfgIAi+J8xd4gAAggggAACCCCAAAIIIIAAArECfh9QuD8op+eErnOvQeG+D/OBJ6C9FbJmySI5c2T3y8ZrgBKWLZto7wZfTFqzIyo6RvLlyW2CEV9ckWsggIA/CRBQ+NPToC0IIIAAAggggAACCCCAAAIIIOBNgYAKKLwJwbkRQAABBBDwBwECCn94CrQBAQQQQAABBBBAAAEEEEAAAQR8IUBA4QtlroEAAggggEASBQgokgjFbggggAACCCCAAAIIIIAAAgggEPACBBQB/wi5AQQQQACBYBIgoAimp8m9IIAAAggggAACCCCAAAIIIIBAYgIEFInpsA0BBBBAAAEfCxBQ+BicyyGAAAIIIIAAAggggAACCCCAQLoJEFCkGz0XRgABBBBA4FIBAopLTViDAAIIIIAAAggggAACCCCAAALBKUBAEZzPlbtCAAEEEAhQAQKKAH1wNBsBBBBAAAEEEEAAAQQQQAABBJItQECRbDIOQAABBBBAwHsCBBTes+XMCCCAAAIIIIAAAggggAACCCDgXwIEFP71PGgNAggggECICxBQhPgHgNtHAAEEEEAAAQQQQAABBBBAIIQECChC6GFzqwgggAAC/i9AQOH/z4gWIoAAAggggAACCCCAAAIIIIBA2ggQUKSNI2dBAAEEEEAgTQQIKNKEkZMggAACCCCAAAIIIIAAAggggEAACBBQBMBDookIIIAAAqEjQEAROs+aO0UAAQQQQAABBBBAAAEEEEAg1AUIKEL9E8D9I4AAAgj4lQABhV89DhqDAAIIIIAAAggggAACCCCAAAJeFCCg8CIup0YAAQQQQCC5AgQUyRVjfwQQQAABBBBAAAEEEEAAAQQQCFQBAopAfXK0GwEEEEAgKAUIKILysXJTCCCAAAIIIIAAAggggAACCCAQjwABRTworEIAAQQQQCC9BAgo0kue6yKAAAIIIIAAAggggAACCCCAgK8FCCh8Lc71EEAAAQQQSESAgCIRHDYhgAACCCCAAAIIIIAAAggggEBQCXgtoNh75ERQQXEzCCCAAAII+ELg4NFwqVSyoC8uxTUQQAABBBBAAAEEEEAAAQQQQACBdBUgoEhXfi6OAAIIIIBAXAECirgeLCGAAAIIIIAAAggggAACCCCAQPAKeC2gCF4y7gwBBBBAAAHvCSzftIceFN7j5cwIIIAAAggggAACCCCAAAIIIOBHAgQUfvQwaAoCCCCAAAIEFHwGEEAAAQQQQAABBBBAAAEEEEAgVAQIKELlSXOfCCCAAAIBIUBAERCPiUYigAACCCCAAAIIIIAAAggggEAaCBBQpAEip0AAAQQQQCCtBAgo0kqS8yCAAAIIIIAAAggggAACCCCAgL8LEFD4+xOifQgggAACISVAQBFSj5ubRQABBBBAAAEEEEAAAQQQQCCkBQgoQvrxc/MIIIAAAv4mQEDhb0+E9iCAAAIIIIAAAggggAACCCCAgLcECCi8Jct5EUAAAQQQSIEAAUUK0DgEAQQQQAABBBBAAAEEEEAAAQQCUoCAIiAfG41GAAEEEAhWAQKKYH2y3BcCCCCAAAIIIIAAAggggAACCHgKEFB4irCMAAIIIIBAOgoQUKQjPpdGAAEEEEAAAQQQQAABBBBAAAGfChBQ+JSbiyGAAAIIIJC4AAFF4j5sRQABBBBAAAEEEEAAAQQQQACB4BEgoAieZ8mdIIAAAggEgQABRRA8RG4BAQQQQAABBBBAAAEEEEAAAQSSJEBAkSQmdkIAAQQQQMA3AgQUvnHmKggggAACCCCAAAIIIIAAAgggkP4CBBTp/wxoAQIIIIAAAi4BAgoXBTMIIIAAAggggAACCCCAAAIIIBDkAgQUQf6AuT0EEEAAgcASIKAIrOdFaxFAAAEEEEAAAQQQQAABBBBAIOUCBBQpt+PIRASi9uyWqF07JPrwITkfEW73zJgzl2QuVFiylC4rWUqWSuRoNiGAAAKhK0BAEbrPnjtHAAEEEEAAAQQQQAABBBBAINQEAiqg2LBlh+v5VK1Y1jXPjP8IxBw7KmeWLZHoQwcSbVTmwkUle/2Gkil/gUT3YyMCCCAQagIEFKH2xLlfBBBAAAEEEEAAAQQQQAABBEJXwO8Dij8mzhD9L6Hp+qvaiP7HlP4C2msiYrY+qwtJbEwGydmqTVD2phg7d6ecjY6R7leUT6JF8nZbsfWoLNtyVI6Hn5OeHStJ/lxZk3eCJOyd2ns4dSZKdh6MkKL5s0uhPNmScMWk7XL+wgV73uiY81KmSC7Jmjlj0g5kLwQCRICAIkAeFM1EAAEEEEAAAQQQQAABBBBAAIFUC/htQOEZTFSrVC7Oza7fvD3OMkFFHA6fL2jPiVOTxpvrJjWccJqYQXJ3uiboelLcNWSWRERGy2/92jk3mmbf56w9KC9+tcSer0TB7PLeg02kRMEcaXZ+50QpvYftB8LljZ9WyMbdJ51TSc2y+aTv7XWlVKG47fxm8mb5asIm137uM81rFJbB9zZyrTK5hHw1caP8Mmu7nI6Mca1/7PrqcnPrspIxQwbXOmYQCGQBAopAfnq0HQEEEEAAAQQQQAABBBBAAAEEkiPglwGFezihwYSGDwkN6eS+LyFFch592u4bPnXyZYd1SuiKOtxTrvYdE9ockOtT+nI/KTc7YOQKmbh4rwx7pKnUr+i9IbJScg87TDjxwLC5NkC4sWUZG0ys23lCxszeITnCMsmoPm3j9PZ4Z8wa+cP0NunWquwlt16+aC65oUUZ1/r3f19rz1OuWC7p2qy0DST+XLBLtuw9Jb06VZJ7r6rs2pcZBAJZgIAikJ8ebUcAAQQQQAABBBBAAAEEEEAAgeQI+F1A4R44vNC7Z5xgIqEaFLp+0Eff2vsmpEjO40+bfWOHdpqeqpPlbNU2qIZ6SsnL/aQCPjJ8vqzadkxmvnONeLPTQEru4YOxa+XXWTvk+VtryXVNS7tuacLiPfLmyJU2iHjqxhqu9X1GLJGt+07J6JfautbFN3P01Fm5/tWpouHEx482k9zZs9jdtJdK74/m25Dil75tpZgZTooJgUAXIKAI9CdI+xFAAAEEEEAAAQQQQAABBBBAIKkCfhVQOOGEZ68JDSB0m/uwTp776A0P/vg7uw8hRVIff9rsd3r+HDm3Y1uqTpa1bHnJ0axlss6xcc9JGThqpdzZvoJcWb+E69izUTHy4LB50rx6YXnw2qp2/Wf/bJB56w7J63fVt8MEzV9/SLJlziQtahSRh7tUlbw5Y2s47Dt6WvqMWGr/Qj/8TLToi/VdhyJk1rvXiP61vvYEeO2uelLW1D5wpjU7jsuQX1dLL1MLom2dYna158v9pz9bKJFR5+VD0+shU8aLQxE512tWrbA81Dm2rc55Pb8v3HBYPv5rvX0Zr9sqlshtdxluzpnLvLCPjrkgv87eLos2HpaF6w9L7fL5pUnVQnKz6Z2g290nPdeP07bK+l3HrUPNcvmk55WVpFrpvK7dPO/BtSGRmdvfnmm9pg2+WjJnunif589fkGv6TbY9K2a8c7VrOKb7P5hra0h81LtZImcVmb5yv/T7dpk82rWadG9TPs6+v83ZIe/9tlZ6m209PLbF2ZEFBAJEgIAiQB4UzUQAAQQQQAABBBBAAAEEEEAAgVQL+FVAcc/Tr9sb+npo/zg35gQPulKDCSeo0PnnH7nbta97TwrP3heunZhJc4GTf42V8xHhqTpvxpy5JE+XG5J1Di0U/ehHC+TxG6rLLa3LuY49czZGOr00STrULy6v3lnPrn/9x+Uyeek+U6shuwkIMkp585f4M1cdsNuamSBjyH2xtQ50iKI7B8+ywxAdMwWoq5TKI4Xzhsnb9zSU7/7dIl+M3yhfPdXSrncuqC/7n/l8kTx3Sy0bbOh6z5f7H/6xTn6eud0GFPXchmWy68y2V03o0aFeceeU8X7XIOT3uTtk1uoD9kX/VY1iQ5lnutWSsKyZZNDPq+Tvhbtt2+tXKmDChxOy98gZaVC5oLxzfyPJkim2mLTzsl+HXGpcpZAcPXXO9sjQi37/fGspZ4ZW0snzHuzKy3y56uVJNvAY91qHS/Zs/YzWKBH5/ZX2rqLZ3d6YJnVMkHL3lRVlhgkhzpyLMcsFpF6F/HFClXHzd8mQX1bLC6ZnRhe3nhl6vk/+2iA/mbBFh5R6ultNXcWEQEALEFAE9OOj8QgggAACCCCAAAIIIIAAAgggkAwBvwkonN4Tnr0f3EMH9+DCCTM8gwjnPJ7hRTJM2DWZAsdH/5DMI+LfPV/3O+PfkMDalAQUHRsUt8WataCyDg9021szRIMI56W5E1DoJXUoIe2F4EypCSjW7TwuD3wwzxZzfuKGi0McPWR6emjwMPmtTjZkcK6V2Hc9ZtuBUzLxzU6u3aYs2yev/rDcBA4FZcj9jV29NAabl/p/mpf7D1xbRe7qUNHur7UcNu09KX2613EVrXYCAO1Ncnu7Cna/lAQU/b9bJtNW7Jcvn2ohVUtd7I2xbX+43G0Kh+v0yWPNpFa5/KJFr694drwNjTRIcZ8K5wuTTx9rLkXMd522mGGger0zW1rVKiIDezWMM7TVc18ulvmmd0yTaoXkXXPvTAgEugABRaA/QdqPAAIIIIAAAggggAACCCCAAAJJFfCbgMIJHNxDCL0JJ6DwDBycXhWeAYUek9C5dBtT2gsEUkDxxZMt4gxj9Pbo2F4Hnz7e3BZ0dgKKdnWLyet314+DlZqAQl/G3zFopoSfiZKxpgdBRjPM08HjkXKT6UHQpWkp0zOgtr3WidNREhV9Ps51dSFvziyuHhDxBRTvmdDhNzP81I8vXCFliuR0HR9peiR07DNJ3HuJuDa6zTg1Hq5pXFJe6lHHbvEMKKJizsuJiCi3o2Jns2TOKHlzxA4hpeGEhhT5c2WVPj1qS+WSeWXTnhPy1qhVNgjSI959oLEdeuqUsbi277/2JM/cVNP2eNHeL9pL5IcpW6V04Zz2fkyWJDpEVM93Z8t2E3RoQe0b/yue/ce8nbbmhZ6kZtl8os+RCYFAFyCgCPQnSPsRQAABBBBAAAEEEEAAAQQQQCCpAn4TUDiBQ1r1oPA8T1JB2C/5AoE0xNOENztKzrDMrpv8ZdZ2GTZ2nQzv3VTqViggTkBxlxly6IFrqrj205nUBBR6/A9Tt8hnf2909SBwaie8/3ATaVipoO4iWpNh/c4Tdt79i/NSX9fFF1A4x80YYuo7uNW40P01aDh4/IxMGNDJ1fNg9pqDsmzLEdm895QNTTbuPqm7ig4b1fe2unbeM6DQuh3PfbHYbnP/4hkM6HBLOuyS+6TFresZ37Fzd8rIPm1szw3twaL+WvdCa3C4T05PDPfA5fDJs9J7+Dw7bJX7vtrrQ6/nHq64b2cegUATIKAItCdGexFAAAEEEEAAAQQQQAABBBBAIKUCfhNQOD0l9EY8e0U44YVuS6wGhW6n94Qq+HZKryLZKRniadLATpI9WyYXkBa91iGPfBFQaEHsW9+cIT3alpfe11WT3h/Nl10HI1w9KrRR2iNAX8R7Tlc1LOkajim+gCK+EMI5x73vzRENIJzwYuhva+T3OTvtZh0Sqmj+7BJjeiiMX7Qn0YBi9+EImbhkr3Na1/cipkbHdc1Ku5Z1Zvfh0zYAOXAsUioWzy2tahaRt0avtHVAppsQxb1QeJwD/1vQWhrau6Xf7XWlU8OLBdC1R4gOibVy21HJkyOraK2NTKaLhdYNebBzFVMwPXYYq/jOyToEAkWAgCJQnhTtRAABBBBAAAEEEEAAAQQQQACB1Ar4TUChN+IEEZ7DOWl4obUlnOLYuq/uo70kqlYsq4t2co6n94Qj4pvvUXt2S8Ts6am6WM5WbSVLyVLJOsfq7cfk4Q/ny61tysljXau7jnWCgPiKZKcmoHB6QLz1vwamFkJR1/UmmZf2b/y0ItEi2c7OTijx1dMtpdvr0+TODhXkwWurOpuT9D2+gEILZP+1YLf81r+dLertnEiDh7bPTZA6puj0R72biQ4h1aXfv3b4pK9NG7S4tk4aKGg9jsR6UDjnTOz7/qNn5GxUjJT9r9C2s6/2luj2xlQpmDtMfnrxCrta60r8tWCXtKldTNwLh+tGpxfGew82lkamkLcGE/uPnbFDR+XNmdU5rf0+asY2+WjcenmjZ31pW6dYnG0sIBCIAgQUgfjUaDMCCCCAAAIIIIAAAggggAACCKREwK8CCr0BJ2TQec+eFBpUOJN7MOEeYHiGG87+fPeuQPjUyRJ96ECKLpK5cFHJ1b5jso916ibo8EHfP9fadbwTJKR1QDFj1X7p+80yufUKE4hcHxuInDfFJbRI88L1h5MUUPyzaLetx6C9AjTY+PbZVlLB9DBIzhRfQKE9L975dY10bV5anru5lut0P07dKp/+vUHuaF9BHupc1TWElWdB6c/+2WDrPqQ2oBjy62oZN2/XJWGBc/7eXatJjzblbfu0OHnXV6bY2hHDHmkqWU0tC520TsfDw+fbehMTB3aUHNkyixM6VSyRW758sqVkzmQKU5jJ+QxozYsfTfCRO3tsLQy7kS8IBKgAAUWAPjiajQACCCCAAAIIIIAAAggggAACyRbwu4BC78A9pEisN4R7MKHHEU6oQvpMMceOyqlJ483FTTXoZE0ZJHenayRT/gLJOsrZWYc20sLJ+uL6yvrFZacZMkmHKtIprQOKQyciba8HPXfzGoVtTQWty7Bs81FdlaSAwr0wtLb5m2da2WOT8yW+gOKcKaz97BeLbFs0fKhTPr9sMfUltGi1FpvW4tF5TCFrLdZ9/atTbMFqDSOqmCLWOlTWzFWx4VJqAwr112LgOmkoUswMHzV1xT7bLu3F8eHDTePUyBg4aqV9XtXK5JXOjUvJ2egYu6xt9+xd4r5vlyalZNehCPnT9MA4HRkjTk+L5DiyLwL+KkBA4a9PhnYhgAACCCCAAAIIIIAAAggggEBaC/hlQKE3qUM66X/ukwYQ2nPC6UnhPuRTYkGG+zmY955A7FBP+sySGlJkkJyt2iR7aCf3O9Bhf/p9t8xVWFr/kv75W2tLn6+XSMcGxaX/HfXs7joEk/ZYmPxWJ9ewRrrBswaF84K9Z8eKct/VcYtk6/4LNxyWt81wSoeOR+qi/ev/a83L8iG/rDbXrSXXNY2txdDr3dly0gyn9Fu/dnY/9y+vfL9Mpi7fb3thaG+M5E6PmN4FW/adlIlvdopzqIYfH/6xTuavO2QDiBxhmaRp1cKiRaSLF8jh2lcLgb/yw3IbYOhKDTCe7lZDnvpsUZxC04ndg+tk8cys23lcXv52mctId6ltApP+d9S1gYX7IToE1cd/rZefZ2x3rdZ239a2gvTqWMm1TmeiYs7Lp6YY9s8zt8dZr71ZUuIY5yQsIOBHAgQUfvQwaAoCCCCAAAIIIIAAAggggAACCHhVwG8DCueu4wsqnG36nWDCXSP957UnxZllSy473JMO65S9fsMU95zwvFOtcaC1D/KZgCKjKZrs7em4GZ4oY8YMtldCcq/1+CcLbI+C319pL4XyZEvu4ZfdX3tJHDl1VgqohWljQpMGGtExF2xdh4T2Sc36AyY80gCpkCmiXbLgxYAkvnNq+KDBSaaMGU39ipyJPkOtR7HN7Kt3VqZITjsEVHznZB0CgSpAQBGoT452I4AAAggggAACCCCAAAIIIIBAcgX8PqBwvyGn54Suc69B4b4P8/4hoL0ponbtkOjDh+R8RLhtVMacuSRzocKSpXTZVPWa8I87TF4rNDTYbl6qz1y9X74cv0mub1FGnr2pZvJOwt4IIBASAgQUIfGYuUkEEEAAAQQQQAABBBBAAAEEEDACARVQ8MQQCFQB/av/jn0m2eZXKZVHhtzXSArkTvveE4HqQ7sRQOCiAAHFRQvmEEAAAQQQQAABBBBAAAEEEEAguAUIKIL7+XJ3fiJw3tRaWLr5iB2CqmzRXJIlU0Y/aRnNQAABfxMgoPC3J0J7EEAAAQQQQAABBBBAAAEEEEDAWwIEFN6S5bwIIIAAAgikQICAIgVoHIIAAggggAACCCCAAAIIIIAAAgEpQEARkI+NRiOAAAIIBKsAAUWwPlnuCwEEEEAAAQQQQAABBBBAAAEEPAUIKDxFWEYAAQQQQCAdBQgo0hGfSyOAAAIIIIAAAggggAACCCCAgE8FCCh8ys3FEEAAAQQQSFyAgCJxH7YigAACCCCAAAIIIIAAAggggEDwCBBQBM+z5E4QQAABBIJAgIAiCB4it4AAAggggAACCCCAAAIIIIAAAkkSIKBIEhM7IYAAAggg4BsBAgrfOHMVBBBAAAEEEEAAAQQQQAABBBBIfwECivR/BrQAAQQQQAABlwABhYuCGQQQQAABBBBAAAEEEEAAAQQQCHIBAoogf8DcHgIIIIBAYAkQUATW86K1CCCAAAIIIIAAAggggAACCCCQcgECipTbcSQCCCCAAAJpLkBAkeaknBABBBBAAAEEEEAAAQQQQAABBPxUgIDCTx8MzUIAAQQQCE0BAorQfO7cNQIIIIAAAggggAACCCCAAAKhKEBAEYpPnXtGAAEEEPBbAQIKv300NAwBBBBAAAEEEEAAAQQQQAABBNJYgIAijUE5HQIIIIAAAqkRIKBIjR7HIoAAAggggAACCCCAAAIIIIBAIAkQUATS06KtCCCAAAJBL0BAEfSPmBtEAAEEEEAAAQQQQAABBBBAAIH/BLwWUOw9cgJkBBBAAAEEEEimwMGj4VKpZMFkHsXuCCCAAAIIIIAAAggggAACCCCAQOAJeC2gCDwKWowAAggggED6C9CDIv2fAS1AAAEEEEAAAQQQQAABBBBAAAHfCBBQ+MaZqyCAAAIIIJAkAQKKJDGxEwIIIIAAAggggAACCCCAAAIIBIEAAUUQPERuAQEEEEAgeAQIKILnWXInCCCAAAIIIIAAAggggAACCCCQuAABReI+bEUAAQQQQMCnAgQUPuXmYggggAACCCCAAAIIIIAAAgggkI4CBBTpiM+lEUAAAQQQ8BQgoPAUYRkBBBBAAAEEEEAAAQQQQAABBIJVgIAiWJ8s94UAAgggEJACBBQB+dhoNAIIIIAAAggggAACCCCAAAIIpECAgCIFaByCAAIIIICAtwQIKLwly3kRQAABBBBAAAEEEEAAAQQQQMDfBAgo/O2J0B4EEEAAgZAWIKAI6cfPzSOAAAIIIIAAAggggAACCCAQUgIEFCH1uLlZBBBAAAF/FyCg8PcnRPsQQAABBBBAAAEEEEAAAQQQQCCtBAgo0kqS8yCAAAIIIJAGAgQUaYDIKRBAAAEEEEAAAQQQQAABBBBAICAECCgC4jHRSAQQQACBUBEgoAiVJ819IoAAAggggAACCCCAAAIIIIAAAQWfAQQQQAABBPxIgIDCjx4GTUEAAQQQQAABBBBAAAEEEEAAAa8KEFB4lZeTI4AAAgggkDwBAorkebE3AggggAACCCCAAAIIIIAAAggErgABReA+O1qOAAIIIBCEAgQUQfhQuSUEEEAAAQQQQAABBBBAAAEEEIhXgIAiXhZWplbg8IzJcmjKP3Ji+WKJ3LfHni6seEnJW6+RFO5wrRRq0zG1l+B4BBBAICgFCCiC8rFyUwgggAACCCCAAAIIIIAAAgggEI9AQAUUG7bscN1C1YplXfPM+I/AqfWrZcvQAXJ82YJEG5WvflOp+HRfyV2tVqL7sREBBBAINQECilB74twvAggggAACCCCAAAIIIIAAAqEr4PcBxR8TZ4j+l9B0/VVtRP9jSn+BwzP/ldXPPGAaciGJjckgtd79XApdcWUS9/fubpHnYmTk9K1SuUQeaVWrqHcv9t/ZdxwMl6nL90lrc71K5rq+mtLrur66P66DQCALEFAE8tOj7QgggAACCCCAAAIIIIAAAgggkBwBvw0oPIOJapXKxbmv9Zu3x1kmqIjD4fMF7Tmx5K6u5rpJDSecJmaQht+P84ueFCcizkmX/lOkS9NS8sKttZ0GevX77NUHpM+IpdLv9rrSqWEJr17L/eTeuG7vj+bLyq3H3C/jmu/dtZr0aFPetew+ExV9Xu55b45s3x8uOcIyycQ3O7lvjjM/feV+eWv0SjkdGSNfPNlCqpXOG2c7CwgEgwABRTA8Re4BAQQQQAABBBBAAAEEEEAAAQSSIuCXAYV7OKHBhIYPCQ3p5L4vIUVSHrl39ln+QI/LDuuU0JV1uKd6n49KaLPP1hNQpI66+8DpEhVzwfYG8TxT65pFpFGVQp6r7fKISZvk64mb7XxCAcWJ01Hywe9rZPLSfa5zfG4CiuoEFC4PZoJHgIAieJ4ld4IAAggggAACCCCAAAIIIIAAAokL+F1A4R44vNC7Z5xgIqEaFLp+0Eff2jslpEj8gXtjqxbEXv2sDu2U8qnWO2aop3QunE1AkfLnp0e2fma8XN+ijDx7U80kn2jr/lPSc8hsaVKtkJw0IcROM+SVZw+KiMhoue2tGXIs/Jxc16y0ZM6UQX6fs1MIKJLMzI4BJkBAEWAPjOYigAACCCCAAAIIIIAAAggggECKBfwqoHDCCc9eExpA6Db3YZ0891GBwR9/Z/chpEjx5yFFB67r/5QcGD82Rcc6BxW95gap/vp7zmKyvuuwP/rfvHUHJV/OrFK7fH6596rKUrxADtd59CX31xM3ycKNh+1QQlVK5ZEWNYpIr46VJFPGDHa/+AKKz/7ZYM57SPqbIZi+mLBRlm4+IgVzh8m9V1eWtnWKyajp22Tc/J2y98gZaVyloDxwbdVkDTsU31BL589fkN/n7pQJS/bI+p0npHThnFKrXD55pEs1yZcrq+uenvh0oRTOm0061i8h303ZYodX0nt/8sYaUjR/dvnctH3WqgN2//qVCsjTN9WSvDmy2GXnui/fVscGA2PN9XYdipB2dYvJVY1KSktjk5zpzNkY6fTSJOuipkmZzl+4II9+tEBWbTsmP7/cRvp/vzzegEKDiZ5DZkmfHrWlefUi8vn4jfL9v1sIKJKCzD4BKUBAEZCPjUYjgAACCCCAAAIIIIAAAggggEAKBPwqoLjn6dftLXw9tH+cW3GCB12pwYQTVOj884/c7drXvSeFZ+8L107MpLnA/K6tJXLf7lSdN6x4KWk2blayzzFpyV5546cV9rhWtYrIvqNnZMveU7aWwRdPtJQyRXKKvvDv+e5sG0xUK5NXShTILvPXH7J1DDo3KSUvdo+tNxFfQPH6j8vtsEKF84VJwTzZpLh58T9txX57vStqF5WZJgDQ7+EmAFm66Yi97q9920nu7LFBwOVuyAkK3GtQvP/7Whkze4foNetVyC/rd5204UGJgtnlhxeukCyZMtrTXvXyJNfp61YoINGmlsOijUckvwkxSplQY7cJHBqZ0GST8dD6DhpSDHu4qT3Gua6GHxpM6LbMJqjR43V678HGCQ7JZHfw+LLv6Gm59c0Z8uzNNaWYMVq25aiEZc0k9SsWkBpl87na7H7YP4t2y1ujVsnDXarK7e0qyP0fzI03oIgxz099nXCFgMJdkflgFCCgCManyj0hgAACCCCAAAIIIIAAAggggEB8An4TUDi9Jzx7P7iHDu7BhRNmeAYRznk8w4v4bp51aSMwvXEFc6LkFsf2vHYGabtoq+fKRJf3HzsjtwyYbkOBH56/wvQmCLP7jzcvvgeaF9/OcEOb956Uob+tlQaVCsp9V1e2+0SbWgndXp9qhw2aNvhqO2xQYgHFDWboomf+G7po7tqD8sJXS+x5Pn+iuVQvk8/OvzNmjfxheiIk5+W+ExQ4AYUWjH7mi0W2J8hLPerYl/x68ld/WC5Tlu2TTx9vLjXNC3+dNKDQYtGv3llPOtQvbtd9/Nd6GTltmw03RjzdUvKaHiXnzDnvfS+2CPWkgZ0ke7ZM4lxXD3r/oSbSsHJBe/y6ncflgQ/mWdN/3ujo6l1iNybyxTnOCTzcd21WvbAM7NVAsmSODVZ026kzUXLzgGm2N8q3z7WyAUZCAYX7uXSegMJThOVgE/B1QHEq4qyciIiUyKjoYKPkfhBAAAEEEEAAAQQQQAABrwmEZcls3ruESe6c2bx2DU6MQCgI+E1A4QQO7iGEPgAnoPAMHJxeFZ4BhR6T0Ll0G1PaC6RXQKHDOvX7dpk8d0st6WpqE7hP2kMiLEsmqWf+gj+hyempMLJPGylVKId5QXdOuvSfIl2alpIXbo3tVeH0oPjh+dZStmgue6qomPPS/vmJosNEffVUS9fpZ6zaL32/WSbP31pLrmsa257DJ8+6trvPFDK9MXRyggInoHDfx33eudeXzDBH1zQuZTc5PSgmDOgkGWJHqbI9Fx7/eIHc0b6CPNS5qusUH4xdK7/O2mF6YJj7KJLLdd1ODUuIXtt9GvrbGlvj4Xtzz+XMPev9noiIct/Fzmvg4PRq0OG1nv9yiQ1GXjDPQ3t07DW9Kj75a4PMN0NkdWtVVp4yQ085k2PvHuYQUDg6fA91AV8FFFHRMXLmbJRoMFooXy7Jmf3iEHKh/gy4fwQQQAABBBBAAAEEEEDgcgIRZ87J4ePh5neqGCmYN4f5w8xMlzuE7QggEI+A3wQUTuCQVj0oPM8Tz72zKo0E0muIJ60P8cOUreLeiyGhWzp4PFImLd0ja3Ycl2Onztm6Czq0kU4j+1xhAoqciQYUTs8D5/xaELp5jcIy+N5GzipZuOGwPPP5InnevKDXYs5a9+Lqlye7trvPTH6rk+0dEV9AEXkuRv5dtte2dadpoxaP1iGadNI6DNe6BRRlTNjwxRMtXKdevf2YPPzhfOndtZr0aFPetd7pdeAELc51H72+mnS/4uJ+esCfC3bJ4J9Xy6t3mZ4Z9Yrb4bCe+2Kx61zOjPbk0B4dOmmxax3uSutyaKjhTHovt7453S6Oe62D/b51nymM/c5sOzTWm6ZnhTMRUDgSfA91AV8FFPuPnJK8ubJL4fwX/82Guj33jwACCCCAAAIIIIAAAggkV+DQsXA5EX5GihXMndxD2R8BBIyA3wQUTk8JfSqevSKc8EK3aU+KhGpQ6HZ6T6iCb6f0KpI9fNw6GT1ju3xlhjKqUjJPgje9xbwQf2T4PDscktZx0BfruXNkld9MnQedvBVQaM+D70wx5/imnldWssNKOUGB04NCi00//ukCWxw7R1gmaVS5kOTPnU10WKlDJmRJ64Di6ZtqyI0tysZp4kRTnHvATytFC2hfbQpm7z4cIRNNrQ/PqYgZUkuDmMtNA0aukImL98ovfdva+hTPf7VY5q09ZIp215DSJhhypiG/rrbFxt+5v5G954SeqRO2fP5kC6leOq9zON8RCBoBXwQU+hc+J83QTmWLJ9zLLGhAuREEEEAAAQQQQAABBBBAwMsCO/YdlaymBwXDPXkZmtMHpYDfBBSq6wQRnsM5aXihtSWcYEL31X20l0TVihdfrjrH03tChXw3HZ4xWVY/+0CqLljrnc+lUJuOyTrHpKWmQPaPK+wQRTpUkft0PPycZMqUwRarduoyuA+PpPsOHLVSxi/a47WAwr09Cc17BhROfQst3q1DV2Uyhat1cupqpHVA4dTpcG/fp39vkB+nbrXDV+kwVkmZtPfEsi1HbLFrpxaIc9xzXy62wzxNHNhRcmTLLLe/PdMW5na2x/e9Xd1i8vrd9ePbRA2KeFVYGUwCvggodh88IaWL5mdYp2D64HAvCCCAAAIIIIAAAgggkG4COtzTrgPHpFQR/pAy3R4CFw5YAb8KKFTRCRl03rMnhQYVzuQeTLgHGJ7hhrM/370rsPyBHnJ82YIUXSRf/aZS7/NRyT5258EIuWPQTNFeEV8/3UpyhmW251i+5ag8ZuowOC+5nToSnzzWTGqVy2/30SGf7hoy0/aq8FYPiqTckGdA8ed8M7zSL6tt/QitI6GTFrl+6rOFsnLrsTTvQaHnd2pN6Pz+o2ek57uzrMu/b3eSbKaOR1Im5z56tC0vva+r5jpEe6/0MsM5uQ8HtWTzETljhr/ynIb+vtb2Ennrfw3M2I1hCfaOoAeFpxzLwSbgi4Bi854jUq9yyWCj434QQAABBBBAAAEEEEAAgXQT8MXvcul2c1wYAS8K+F1AoffqHlIk1hvCPZjQ4wgnVCF9plPrV8uSu7qai19IZgMySMPvx0nuarWSeVzs7t9M3ixfTdgkpQvnlCvrF5djpueE1po4HRlj6yPoi3Gn94EGGZ2blDYv/GNk3Lxddl89iz8FFDqc0m1vzRQd3qlrszKSL2dWmbpin2zcfdLecFr3oNDrZDNdEDuaHigZTaXtKcv32ZDAqaMRq3z5rxqi3DV4ph2iqX29YtKsWmFTJPuM/Dxzm30Wg+9rKM2rF0n0RNSgSJSHjSEk4IsfagkoQugDxa0igAACCCCAAAIIIICATwR88bucT26EiyDgYwG/DCjUQId00v/cJw0gtOeE05PCfcinxIIM93Mw7z2BwzP/ldXP6FBPSQ0pMkitd83QTldcmeJGXTCX+m7KZlvjwCl6rWGFDo9Uv2Ls2OrnzU4jJm2Wb8x/ztS9TTnJaIZPGjltmwko2pgi2TnkhClG3aXfv7augr6g1+mNn1bIJFN/wSlq7RyvRbJb1iwib9/T0FnlKpL9wq21pEvTy9dm0ANnrzkofb5eIv3uqCudGsQOUzVr9QEZYnpRaNiiU7PqhW1vkLdGrRL3YaquenmSlC+a21WoWvfVIuAPDZsnnsWvv5ywUb6dvEV+fOEKKVMkp+u6ep9rdx6Xvxbs1sNtbxQNRpzeG3ZlEr+ciDgnrxuvhesPu44onC9Mnu5WU1oZq8tNSQ0onHv5wtSgqEYNisuxsj0ABXzxQy0BRQB+MGgyAggggAACCCCAAAII+LWAL36X82sAGodACgX8NqBw7ie+oMLZpt8JJtw10n9ee1JsGTrgssM96bBOFZ/um+KeE/Hdqb4g14JE2bPFPyzR+fMX5Mips5InR5YkD10U33V8te6oaWvWzBklV/YsXr9k5LkYiTDDLhXMky3V1zp9Nlq27Q8XrUVRxAQUTAggkDwBX/xQS0CRvGfC3ggggAACCCCAAAIIIIDA5QR88bvc5drAdgQCUcDvAwp3VKfnhK5zr0Hhvg/z/iGghbMPTflHTixfLJH79thGhRUvKXnrNZLCHa5NdkFs/7grWoEAAgh4X8AXP9QSUHj/OXIFBBBAAAEEEEAAAQQQCC0BX/wuF1qi3G2oCARUQBEqD4X7RAABBBAIXQFf/FBLQBG6ny/uHAEEEEAAAQQQQAABBLwj4Ivf5bzTcs6KQPoKEFCkrz9XRwABBBBAII6AL36oJaCIQ84CAggggAACCCCAAAIIIJBqAV/8LpfqRnICBPxQgIDCDx8KTUIAAQQQCF0BX/xQS0ARup8v7hwBBBBAAAEEEEAAAQS8I+CL3+W803LOikD6ChBQpK8/V0cAAQQQQCCOgC9+qCWgiEPOAgIIIIAAAggggAACCCCQagFf/C6X6kZyAgT8UICAwg8fCk1CAAEEEAhdAV/8UEtAEbqfL+4cAQQQQAABBBBAAAEEvCPgi9/lvNNyzopA+goQUKSvP1dHAAEEEEAgjoAvfqgloIhDzgICCCCAAAIIIIAAAgggkGoBX/wul+pGcgIE/FCAgMIPHwpNQgABBBAIXQFf/FBLQBG6ny/uHAEEEEAAAQQQQAABBLwj4Ivf5bzTcs6KQPoKEFCkrz9XRwABBBBAII6AL36oJaCIQ84CAggggAACCCCAAAIIIJBqAV/8LpfqRnICBPxQgIDCDx8KTUIAAQQQCF0BX/xQS0ARup8v7hwBBBBAAAEEEEAAAQS8I+CL3+W803LOikD6ChBQpK8/V0cAAQQQQCCOgC9+qCWgiEPOAgIIIIAAAggggAACCCCQagFf/C6X6kZyAgT8UICAwg8fCk1CAAEEEAhdAV/8UEtAEbqfL+4cAQQQQAABBBBAAAEEvCPgi9/lvNNyzopA+goQUKSvP1dHAAEEEEAgjoAvfqgloIhDzgICCCCAAAIIIIAAAgggkGoBX/wul+pGcgIE/FCAgMIPHwpNQgABBBAIXQFf/FBLQBG6ny/uHAEEEEAAAQQQQAABBLwj4Ivf5bzTcs6KQPoKEFCkrz9XRwABBBBAII6AL36oJaCIQ84CAggggAACCCCAAAIIIJBqAV/8LpfqRnICBPxQgIDCDx8KTUIAAQQQCF0BX/xQS0ARup8v7hwBBBBAAAEEEEAAAQS8I+CL3+W803LOikD6CngtoNh75ET63hlXRwABBBBAIAAFDh4Nl0olC3q15QQUXuXl5AgggAACCCCAAAIIIBCCAgQUIfjQueU0EfBaQJEmreMkCCCAAAIIhJiAL36oJaAIsQ8Vt4sAAggggAACCCCAAAJeF/DF73JevwkugEA6CBBQpAM6l0QAAQQQQCAhAV/8UEtAkZA+6xFAAAEEEEAAAQQQQACBlAn44ne5lLWMoxDwbwECCv9+PrQOAQQQQCDEBHzxQy0BRYh9qLhdBBBAAAEEEEAAAQQQ8LqAL36X8/pNcAEE0kGAgCId0LkkAggggAACCQn44odaAoqE9FmPAAIIIIAAAggggAACCKRMwBe/y6WsZRyFgH8LEFD49/OhdQgggAACISbgix9qCShC7EPF7SKAAAIIIIAAAggggIDXBXzxu5zXb4ILIJAOAgQU6YDOJRFAAAEEEEhIwBc/1BJQJKTPegQQQAABBBBAAAEEEEAgZQK++F0uZS3jKAT8W4CAwr+fD61DAAEEEAgxAV/8UEtAEWIfKm4XAQQQQAABBBBAAAEEvC7gi9/lvH4TXACBdBAgoEgHdC6JAAIIIIBAQgK++KGWgCIhfdYjgAACCCCAAAIIIIAAAikT8MXvcilrGUch4N8CBBT+/XxoHQIIIIBAiAn44odaAooQ+1BxuwgggAACCCCAAAIIIOB1AV/8Luf1m+ACCKSDAAFFOqBzSQQQQAABBBIS8MUPtQQUCemzHgEEEEAAAQQQQAABBBBImYAvfpdLWcs4CgH/FiCg8O/nQ+sQQAABBEJMwBc/1PoyoFi+YpX8PX6ibNq8WU6eCpeaNapJk0YN5brO11zyZM9ERsqMmbNl8dJlsnTZCilRvJjUqllDbujaRYoVLRJn/6Tu+9WI72Trtu1SqHBBeeqx3q5zXLhwQfq9NkAunL8gTRo3lOuv62y3vT1kqJwy7axevao0alBffhs7TpatWCl33dFDuna+1u5z4OAhGTn6F1m/YaPs2btPqlSuaO6rutzW/RbJHhbmuobOJKWds+bMlQkT/7XHdbuxqzSsX891jujoaHl1wFu2neXKlZH77+nl2sYMAggggAACCCCAAAII+I+AL36X85+7pSUIpJ0AAUXaWXImBBBAAAEEUi3gix9qfRVQfPblCBn0ztB4TXrccrO8+Xp/yZAhg93+f/bOAj6KY4/jf0gChIQgwd3dgxPcKVaKtHgpUIo83LVQvGiBIsVaSpGWQnF3dwvB3T0QAhF4858wx96RC3fJZXO5/OZ9uNvbnZ2d+U7ee7vz2///FxgYSO06dqG9+w98Ut8zmSctmDtTihV80Jq6zdu0owMHD1GWzJlp28Y1hrZDQt5RjnyF5O8Wzb6mEUMHye0CXqXI39+fChbIT3fu3KMnT5/I/b26d6XOHTvQftHW9527yTqGxj5s8DkL5v5KSZMkkXss7ec5H1+q27CxPKdJo4Y09qcfDU0fPXacmjRvLX/37vE/6vR9e8MxbIAACIAACIAACIAACIAACNgPAT2e5exntOgJCNiOAAQK27FESyAAAiAAAiAQaQJ63NTqIVA8fvyESnhXlDyyZ8tG37VpSUFBQfT7n0vp8pUrcv/mdaspe7ascrtXv4H07+pQAcG7TGmqUb0qXbh4iRYvWWpoY/O6VVbXjahAIS8kPqpUqijEjUxUskQxuV27/lcycsLNzY26df6BPD2T0dr1G2nHrt3ylNEjhtPXTb6yup9Vatala9evE7d78vB+cnKKK9uYMGkq/TrnN7m9fdNaypwpk9zGBwiAAAiAAAiAAAiAAAiAgH0R0ONZzr5GjN6AgG0IQKCwDUe0AgIgAAIgAAI2IaDHTa0eAsX9Bw/p4KHDkknpUiUpVcoUcnv/gUPU4tt2cpsjBThi4O3bt5SnUDG5j6MQViz5nVxcXOTvWXPn0/iJk+X2+tX/SLHA0rq5c+WkyAgUS36fT6VKFJfX5g9Ot8RiBJcc2bPLdFW87ef3kgqXKMObMh3VpPFjrBoT91M7zpXLllDhQgVke0q4YC6rVvwl9+EDBEAABEAABEAABEAABByNwInLj+nElcfUtkbuGDs0PZ7lYiwcdBwEwiEAgSIcODgUcQJBd25T0K0bFPz4Eb3zfyUbiuvmTs7JU5BLhkzkki59xBvHmSAAAiDgwAT0uKnVQ6BQU/Tu3Tu6fPUq3bv3QKZF8jnvSzNnz5WHB/XrQ99924rOnD1H9Rt9Lff99ONQaia8HFThVEyvPvz/iLuILuDzLa3r5OQUYYGCRQMWRMyVZ8+e0YVLl+n58xeif/7Ud8BgWbVShfI0b/YMq8bE/bx1+w5VqFpTttGtSyfq1uUHunnrNlWsFurVMXzwQGrV4htz3cF+EAABEAABEAABEAABEIixBLx7hEZKqwHsndxAbcaobz2e5WIUEHQWBCwkEKMEigtXbhiGlSsbUhwYYNjRRsizpxRw4hgFP3oQbq+cU6Qi1yJe5JQ0Wbj1cBAEQAAEYhsBPW5q9RIoli7/h0aNmxCmXwPPqxIo2Ii6d/9QD4g/5s+lsmVKmZ12a+pyIxGNoKhetQrNmj7lk36wmNK9d3+ZjumTg2KHEiis7Se31bhZKzomDMKVOLL4r2U0VBh5czmwe7shCkXuwAcIgAAIgAAIgAAIgAAIOACB+Zt8af5GX6ORtK2ZO0ZGUujxLGcECj9AwEEI2L1AsXrTLuJ/5kr9GhWI/6FEPwGOmvDfy3P13sLOxCE37wqxIpriTWAI/bXzKuVI60He+VNZyCfy1Vbtv0lvg0OoafkskW8sGlp4GxRCS3ZcpeyCW7kIcHv3/j3dfOhPweIN7Iwp3Smec2hOd1sMRTRND58HkItoM1mi+LZoEm2AgCSgx02tHgLFhk1bqHO3nnJMGTKkpxpiwT9D+vTk99KPJk75Re5XAoU27ROnR2pQr47ZvwZr6nIjSqBInTo17d+5xdCuNq1UWCbZYQkUnLaqTIUqsg32iqhds7oUExJ7eBgEFiVQWNtPbpQFnYFDh8v29+3YQgOHjaBdu/cQp8j6c2GoD4U8iA8QAAEQAAEQAAEQAAEQcBACXWfsJU7vpC0QKLQ0sA0Cjk/AbgUKU2Eid/bMRrPhe/m60W8IFUY4dP/BkRMvN28Q17VUnFBdjEOJqtdy+EiKF/6BVGfoNqpTMj31axKaV1wRiMrvlhP2kP+bYFo5pFJUXibK2n7xOojqDNlKX5RIT/2bWs6NxYN5my7Sij3X6fWbEEP/utbPQ43KZaK4ceIY9plubD1xl35cfEru7tM4P9UrlcGoSpAQO2avu0BrDt0ytJ3UPR41r5I1xgpBRgPEj2gn4CgCRZ8BQ+iff0NDtY8f2ktJEieWbPcfFB4UbUI9KJRA8ez5c/IqVU4eZ08K9qZQ5eSpM/TX8hX07t176tKxPXkk9rC4bqZMGan9D11p246dsjltFMLZcz5U76umcr+lAoU2KmL+nF+pYnlvef7r168pf9GSclsJFNaMifvJhdNGeZUuL7eZDUefcBk3eiQ1bhgzw9zlAPABAiAAAiAAAiAAAiAAAmYIsDjBIoW2/NLZm4pkT67dFSO29XiWixEg0EkQsJKAXQoUWnGChQkWH8yldNLWhUhh5ezbsPqr7Vs+m9bJ3OU43ZN75WrmDjvEfggUEZvGiAoUU/71oX/23qDMqd2lwMCCBAsKV+6+pDbVs9N3NXKE2aFnrwLp6zE7DcJDr0b5qEHp0IVDdcKghcdp95kHVCBLUqpSOA0FBr+jNQdv0a1H/tS6WjZqVzOnqopvEIgQAT1uavWIoBg8fCQtWbpcMpg3awZVEIv512/cpC7de5HvhYtyvxIo+Icyg+Zt9qH4olYNuiU8GLr16ifTKXHEwtH9uyh+/PhW1Z38ywz6ZcYsbpa+rF+X2rdtI6KqQmjA4OF0zsdH7rdUoFizboPoT195TtdOHanT9+0o4M0bGjl6HP27eo3crwQK/mHNmOTJ4qND5260ddt29VN+nzi8jzhKAwUEQAAEQAAEQAAEQAAEHJEAp3niwmIFm2THRHGC+6/HsxxfBwUEHI2A3QkUWsGhX+fWRsKEOQ8K3j9uxiI5NxAp9P8TDU3ttDNSF3bzrujQqZ4gUETszyMiAsXTl2+p/vDtUpyY2aUUJXJ1kRfnSJLOMw5KkWLF4IqUOqnrDb7BMgAANTtJREFUJ50a8edJ2nL8HrUQ0RCLt10lU4Hiid9bavDjdsqXKQnNEG07xY1jaPubMbtkOq31I6sZ9n9yAewAAQsI6HFTq4dAsWPnbvquY+dwR6wVKK5euy7Nr/2F4XRYZWDf3tSubWt5yJq6x06cpMbftPykSc9knvTk6RO531KB4tHjx1TSu9InbWl3aAUKa/qp2tCmxuJ9YaWaUnXxDQIgAAIgAAIgAAIgAAIgYD8E9HiWs5/RoicgYDsCdiVQKHHCNGqCBQg+pk3rZFqHkYyf+busA5HCdn8glrT0+uA+CrxxzZKqZuvEy5SFEpYqa/Z4WAfuPX1NAxYcl2/IswfAwi2X5eJzidzJqVLB1CKdknFqHm5jx6n7tPP0PTro+4hSJnGlErmSUz1RL1Mqd8MlZq+/QAfOP6Kx33rJNneI+m4JXGSbX5bNROmTJzTUnfD3WXnNWf8rbdjHGyv33aD/xFv1Y74tSmmSJSRzAsXtx/40Z/1FOnvjuUjFFES5MySR6YyqF01raE/1p79IDTV/8yU64POIutTPbVE6IZXiifsxb9MlOnX1qexPeeHnwJEE2lRHwSHv6e+91+nIxcd02PexjBBgPo28M5H7h0V+7pSlY9bOD19n3ZHb5HvzBeVM70Gtq2an8gWMvThuPHwluN2kHSfvybGzV0eT8pmp5fg9VqV42nn6Pg1ZdIK61BOMKmQxcOQNnpfJK32oszj2tcmxwxceU685R6ihGG/ZvCnltqlAcfmuH307cZ9ME9WtQV6jtrvNOkzHLz2hjaOqib8XZ6Nj+AEC1hDQ46ZWD4GCx7zin39p/MSpBiGgcKFC1KtbF2rZtr1EMnhAX2rb+qN4cNbnPE2fOZv2HThoMNZm/4ruXTrJ6ActR2vqrvpvLY0a+7OhH9zmlJ/H0VdNm8smWzVvRsOHDJDbxctUlPVqVq9GM6dN0l5Sbp86fYZ69h1oMMlmoWPc6B/px1FjZcRHlUoVae6voR4bfII1/eT6HJGRr3Bx3pRl+pSJ0utC/cY3CIAACIAACIAACIAACICAfRLQ41nOPkeOXoFA5AjYlUDRtucIOZr5k4YajUoJD7yThQklVPB2306tDHW1kRSm0ReGStiwOQG/tavonf+rSLUb182dPOpYl1/7xoNX1EIsXmdI4SbT6/DCd8okCWjv2YeyL72+Eul5ymQ09GuDWCAfvfSM/O2dPyU9Fm/D84I5+wfM7V6GUn14o169Rc/tPnn5hornTE5X772S10jr6Uq/9ShreCtfLUrvmVjLcB3emLPhIv2x9Qr90bccZRbiR1gCxf1nAdT4p53yvOI5PSmeixPtOxfa98HNClINr3TymOoP95MNrwtmSUa1i6enSoVSy+PhfbBAcf3+K0qYwEmaRHMbLHBwaVk1G3WoFZqOiD0bxi0/Q+sO35Y8imRPRr63XtDdJwFUNIcn/dy+GLk4hRpMWzpmNT8pxJw8ev6GyuZLSXeevJb94euzqMORCFyYz7eT9sl6nJYpo2B/9NJj8kyUQHK3xoOChaEJK84Kr4/8n4hUv669IE23vyybkXo2zCevzR9sxt1KsHou+vH34Ep0XvxdsFhhKlAwJ04BFfA2RPY/rWeoWOVz8zl9P/WAMEBPKUQpL0O72ACBiBDQ46ZWL4FCjZ8jD+LFi2dxmqIQ4fVy+84dSpokCXl4JFLNhPltTd379x/Qe/Ff5FSpUlLcuKH/mxZmoxbsfOHnR4GBgZTc05PiCBH2c8XSft65e5fKVa4hm2PxY/f2jeSaIMHnmsdxEAABEAABEAABEAABEIixBDi104kroUbZnOIpphY9nuViKhv0GwTCI2A3AoWKnjCNftCKDlrhQokZpkKEasdUvAgPAo5FjsDzZYsj18CHs5M0bWFVO2oBnE/SihGPXrwRwsVu6SPwt0jlw8LD/adCDBi1Uy6+L+ztTRxxwWXz8bs08s9TchF+ascScp8SBFiMmCfECBU9oEQH7eK2pYv1YQkULJisOXRbvsmvogmuC9GFIwZK5UlBE9oVM+oPCxJDmhcyCAXy4Gc+lEDxXc0c1KZadln7rhAJmo7eRSwcKPPsbSfu0fDFJ4UY40kT2hc3pCgaLxb62V+hQ+2c1LJKNnm+pWPWzs9vPcpQrvShBrnLdl2j6f/5kpbj6KWnacORO6TtJ6d3+nbiXilaWCNQXLknfCZ+3ivFgtFtvMTC4UdIfX47SgdFdAxH2UwU41Tlj21XZCTLkGaFqLpXWlLRFKYCBde/eMePRi45JYUW5hUsjHtPXH5KuTMmpsFfFzSKxlHt4xsErCGgx02t3gKFNeOPrXUvXrpMO3fvob9XrqbLV65IDMMGD6DWLZrFViQYNwiAAAiAAAiAAAiAQCwh4N1jldFIYZJthAM/QMDhCdiNQKEEB60IwfSVQGEqOKioClOBgs8x1xYfQ7E9gegWKLKlTUQLe3kbDUyl8hktUhuVE6mCVNqf/k0LyHRB2sqdph+kM9ee0c4JNeXCvBIoxrT1Im/x1r8qIWIhuvaQLTI11B99ysndli7WhyVQqHZNv5uN3U2vAoLovx+ryEOqPyysZEvz8U3i12+D6bV4k9+0uMV3Jtf4TnK3EihM0w61n7pfRo+s/6mqjAaZLEylVwpT6T/7lReRFm6GJt8EhlC1AZuNBBNLx6wECo6cGCtYqqIEEq1I0HR0aFTCqmGVxVvNHxUFTsk19PcTRimeWLgIEsbUpiWxm4sUb96JeWothA2OHOF0TV9+iKJZfeAm/b3nhjyNIzdUWi4VycKm1zM6l5KCRngCxdnrz2jq6vOSn7YPHD3RuW4eoxRg2uPYBgFLCUCgsJSUY9X7ffFfNPyn0YZBcYqpSePHUIIEoYK64QA2QAAEQAAEQAAEQAAEQMCBCLBB9vyNoSbZalhta+aWZtnqd0z51uNZLqawQD9BwBoCdiNQKMHBVhEUpu1YAwV1rSMQ3Sme6pRML9L5FDDqNHstdJlxyPBGPvs4sOnxnG6lKU/G0LRC6oRfxGLz8t3XaVEfb8qaOhEpQWDFIGGknMzYSFktzm8ZU50SxHMi9TsiKZ74+udFGqVdwjPhkvA28BML734ixRCnVeKi2lT92Ty6ukF44ONqTLytLa2rZaN2NUNTN7FA8eJVoEHsUPV+/uccrd5/k1gQ8PSIT0qw2CVEGq1AwPW5jYfPA2jjT9Xl4r2lY1YCRfPKWanjF7nUpeV3uV4biNNITfuhpPDeCKaag7ZQ6bwpaPx3oVEjqvLNh/7UfNxuI4FC9VXVUd8TOxSXniL8m9N3dZ5+wMBS1fmhTi7iNE+1iqejgSLagcuwP07Q9pP3aV7PspQznYfcZ06gUP3hlFl9GxWQkTcsluw5+4CmCJGHo1IWCSFJGXPLxvABAlYS0OOmFhEUVk6KDtX37T9I6zZsotSpU1HePLmpSqUKFqWO0qFruAQIgAAIgAAIgAAIgAAIRBkBCBRRhhYNg0CMIWA3AoWKlGByplERSrzgYxxJYc6Dgo8jeoIp6FuiyyRbLYDzW/I9vjQ2LFaeAC2qZKXva+eiX/4TIsSu67SgV1nKnjZ0EVpRmrXuAv25/ar0ocidIbFBoPhXLN4nF4v32qJSBKmIBEsX68OKoNh07A79tOS0bJ7TA2USkQuJE8aTYgnv/JxAwYvoZ8Tb/KalkIgEKCZ8M7iwuMACgErlpOqaChSmIoSqx9/fTd5HF2/7kRIvLB2zmh+t14VqVytQPBcCSt1h26Rp9qg2RVUV+c1G201G7TISKDgSggUI08KeHVoDc47+OCfMx09fe0oegisLIk4i3xP7lnz/RU5qUTmb8Jp4Th2EdwTz//6DHwe3e07s/23DJZmGig2z2feDo1JUmq8RrYp84gHCJu3zNl4ilSbKtH/4DQKWEoBAYSkp1AMBEAABEAABEAABEAABEIjpBNh/ouuMvUbD2DvZOo9So5Oj8Ycez3LRODxcGgSijIDdCBQ8QiVEmKZzYvGCvSWUMMF1uQ5HSeTKlol/yqLOR/SEIqLPd9Cd2+S/d2ekLubmXZFc0qW3qg21AM6peWZ2KWV0rjLEHt6iMFUpkoY2HxNeE8I3QP3WVlaiw9ax1Sm+MKpWEQtTfihBXtk9tVWpnlhI54XqZQMryv09Zx+mIxef0NoRVSixWzxDXTacXiv8JcIzyW4j0hBdufuSFvcrJ8QJd3kumzDXH75NbpumeDKNoDBcLJwNSwUK1d+VQytRisQfzVg5rVXFPhupYNbQ9Ed8KUvHrObncwIFt2nKlfdxOej7iPrMPWokUIQeMf/JwgSnbWJDcO2c8BlLhf/FDOF/MbJ1EapYMLUh9Zf51kKPqPRaKtqCoySyatJtcS2VRkzro/G5dnEcBMIioMdNLSIowiKPfSAAAiAAAiAAAiAAAiAAAtFFgCMpuBTJlly8YBj60mV09SWi19XjWS6ifcN5IGDPBOxKoGBQSmTgbdNIChYqVNEKE1oBw1TcUPXxHbUEXm3fQsGPHkToIs4pUpF75WpWn6sWwPnECe2LUancKWQb7M3QbvJ+uvXI3yAQXBN+BK1ENAH7VczqWlqmZ+LKp4X3RGfhQZEzvYc0xOZ9SqBg4YNTEDk7hXoirDt8m8YuOyMFDxY6uExd5SN9DfjNf2V0/VL4RzT6aYc06Q5PoKgxaLNsY/WwKob+qAV5XlzXU6DgqISf/z5H9UpnoD6N8st+8QdHlnCEiTZNk6VjVvNjiUAxYMEx2nv2oVH0AadO6jTjoPR6sMYkW0Vd8Fz/1r2sYf6evnwrxJ/tUrj4s395mYaJIzF8RbSEafG9/YIWbblCdUtloDLCsLyIEKrcEjjTKsFpouDUpHxm6lIvj8GAm/s6aNFxOuDziKZ1KiluqJKZNonfIGAxAT1uaiFQWDwdqAgCIAACIAACIAACIAACIAACFhHQ41nOoo6gEgjEMAJ2J1AwP61IEV40hFaY4PMgTjCF6Ckhz57Sy80bxMVFCIBVJQ4lql6LnJJav6CrFsDZD+D1mxBiLwr2U9h15oE0SG5SITN1FYvIqijPhsyp3alCgdT0XPg9bDl+R547t3sZ4vROXJRAwe1mFJENvEB958lr2nT0LvG++T29KZ1nQll3r/AeGLDguNzmxew0wrNi7aFbBu+D8ASK0UtP04Yjd2TqIU4jdPdpgDSq5sb0FigCxQJ777lH6MTlp8Tm1QWFOMPRHWxSnSGFmzSU9kjoYtWY1fxYIlCwt0P7qfvkXLA/ROqkrrTP5yHdfuwv91kjUHAnFVtO3VSnRHopVq0R88J/J5O/L25IgSUHFMaHEop6NcpHDUpnNNRgkaPdlP306Pkb6T9RLIcncZQJR08wL77er11KG0QRw4nYAAErCOhxUwuBwooJQVUQAAEQAAEQAAEQAAEQAAEQsICAHs9yFnQDVUAgxhGwS4GCKXJKJ/6nLSxAcOSEiqTQpnwKT8jQtoHtqCMQmuqJ58xSkSIOuXlXsDq1kxqBWgBnnwkXp7i0YPNleYgX9zmtU+e6eYwWit+J/EmLhE/AluP35II1Vy4qFphbChNn5dnA+5RAwWmjpogICfZf4MIeBm2r56DCJm/HL95+hf7YdkUufnM9XmBPIvrw145rhvRNL4QBdp0hW+Ub+X0bh0YosDcEL6TvFoIKFxY/ejbMJz0oeAFcRVBwaipOUaWMuWVlCz84jRSbb5t6UEwUJtmrhEn26uGVKVmiUJ8Njvxgw/CD5x/RM+ELwf0pmSsFsbF0mmShgoy6rCVjVobSWtNudT57UDD7qR1LqF107NITmrfpEp0RUS1ceLH/f0Jg6iQiXMIyQjecGMZGUMg7miXMsNn8XFu61s8jox+0+8LaPiRSS/UWqaX6iLmqJ4QnbeG5nPTPWZl+igUPLvw3V7lwGupUNzfFc46rrY5tELCagB43tRAorJ4WnAACIAACIAACIAACIAACIBBFBNiHglM8qdRObWvkjqIrRW2zejzLRe0I0DoIRA8BuxUoFI6whAp1jL8hTGhpRP82R1IEnDj22XRPnNbJtYhXhCIn1CiVQKHe0A8OeU9P/N5QyiSuhtQ7qq7pN5tWJ4jnJD0nTI8pgUJ5PnBdp7hxyN01NILAtD7/Zu+Ix+LaiUQdbtea8jYoRIoILBTwdaK78FieiEiBZGLRPW44/YnMmMMbIws3wSIqIfGHiI3w6n7uGPtRXHvwiphqRmFCnjC+8+dOsfg4j//h8wByEuKYqZm6xY2gIgiEQUCPm1oIFGGAxy4QAAEQAAEQAAEQAAEQAAHdCYRlkv1LZ2+DWKF7hyJxQT2e5SLRPZwKAnZLwO4FCi05FTnB+7QeFNo62LYPAhxNEXTrBgU/fkTv/F/JTsV1cyfn5CnIJUOmCEdNaEdnKlBoj0Vm21SgiExbOBcEQAAErCWgx00tBAprZwX1QQAEQAAEQAAEQAAEQAAEooIAR07M3xhqkK3ab1szN8XEKAo9nuUUI3yDgCMRiFEChSOBx1giTwACReQZogUQAAH7I6DHTS0ECvubd/QIBEAABEAABEAABEAABGIjAQgUsXHWMWYQMCYAgcKYB37FIAKc0un+swBKJFIB2SIdkBo6GyG/fhsijbDjRH/GJdUtfIMACMQSAhAoYslEY5ggAAIgAAIgAAIgAAIgAAKSgHePVUYk9k5uYPQ7pvzQ41kuprBAP0HAGgIQKKyhhbogAAIgAAIgEMUE9LipRQRFFE8imgcBEAABEAABEAABEAABELCKgDTJzpY8RnpPqIHq8SynroVvEHAkAhAoHGk2MRYQAAEQAIEYT0CPm1oIFDH+zwQDAAEQAAEQAAEQAAEQAAEQsDMCejzL2dmQ0R0QsAkBCBQ2wYhGQAAEQAAEQMA2BPS4qYVAYZu5QisgAAIgAAIgAAIgAAIgAAIgoAjo8SynroVvEHAkAhAoHGk2MRYQAAEQAIEYT0CPm1oIFDH+zwQDAAEQAAEQAAEQAAEQAAEQsDMCejzL2dmQ0R0QsAkBCBQ2wYhGQAAEQAAEQMA2BPS4qYVAYZu5QisgAAIgAAIgAAIgAAIgAAIgoAjo8SynroVvEHAkAhAoHGk2MRYQAAEQAIEYT0CPm1oIFDH+zwQDAAEQAAEQAAEQAAEQAAEQsDMCejzL2dmQ0R0QsAkBCBQ2wYhGQAAEQAAEQMA2BPS4qYVAYZu5QisgAAIgAAIgAAIgAAIgAAIgoAjo8SynroVvEHAkAhAoHGk2MRYQAAEQAIEYT0CPm1oIFDH+zwQDAAEQAAEQAAEQAAEQAAEQsDMCejzL2dmQ0R0QsAkBCBQ2wYhGQAAEQAAEQMA2BPS4qYVAYZu5QisgAAIgAAIgAAIgAAIgAAIgoAjo8SynroVvEHAkAhAoHGk2MRYQAAEQAIEYT0CPm1oIFDH+zwQDAAEQAAEQAAEQAAEQAAEQsDMCejzL2dmQ0R0QsAkBCBQ2wYhGQAAEQAAEQMA2BPS4qYVAYZu5QisgAAIgAAIgAAIgAAIgAAIgoAjo8SynroVvEHAkAhAoHGk2MRYQAAEQAIEYT0CPm1oIFDH+zwQDAAEQAAEQAAEQAAEQAAEQsDMCejzL2dmQ0R0QsAkBCBQ2wYhGQAAEQAAEQMA2BPS4qYVAYZu5QisgAAIgAAIgAAIgAAIgAAIgoAjo8SynroVvEHAkAlEmUNx98sKROGEsIAACIAACIKALgYdPX1H2dJ5Rei0IFFGKF42DAAiAAAiAAAiAAAiAAAjEQgIQKGLhpGPINiEQZQKFTXqHRkAABEAABEAglhHQ46Y2ugSKt28DKX78eA49o/6vA2jfkVNUskh+SuzhrutYb965T9du3aWUnkkpT44sdO/BY/K5eJUqlPEiZycnXfsSkYsFh4RQ3DhxKG7cuBE5HeeAAAiAAAiAAAiAAAiAQLQS0ONZLloHiIuDQBQRgEARRWDRLAiAAAiAAAhEhIAeN7V6ChRPnr2gTTsP0CmfS/ToyTNycopLmTOkpSZ1q1GOLBkigsiuz1m7dS+tXL+dKnsXpxYNa+nWV2a87L8t8noF8mSnHu2b0djpC+ni1ZvUtW1TKpI/V6T7wnMZP54LubsltLgtv1f+tGDZGmLxpE3jOsR9My03bt+j1Zt305nzl6Q44VUgDzWoVVEKLaZ18RsEQAAEQAAEQAAEQAAE7JWAHs9y9jp29AsEIkMAAkVk6OFcEAABEAABELAxAT1uavUSKALevKVBY2fSc7+X5Jk0MRXKl5Oev3gpxIqLFBLyjtp+XY+8SxS2McHobY4X8ddu2UNVy5ekdKlT6NaZnsMnk39AAI3s+4NhYd/38nU6euo8NapThRLYIHKlbc8RVChvDurW7huLxsXXnrvkXwoKCpb1OzT/kkp5FTA6942Iquk1YgoFBgZS8UL5KODtWzp17iKlTJ6MRvfvhGgKI1r4AQIgAAIgAAIgAAIgYM8E9HiWs+fxo28gEFECECgiSg7ngQAIgAAIgEAUENDjplYvgWLUtPl05fptali7EtWpWs5A697DxzRy8m8UFBxMM8f0JxdnZ8MxtfH+/XuKI9L9fK6EVS+sfWG1Y0k9S+pw2+/evReL6Z/vL9f9XJufO85tmBYWD1TkhOkxc7/N9dnc9a0RKDZs308r1m6lpEk8qGyxgsSRJWEJFPP+Wi1TYv3QupEQKPLKrm4U0SDLRTRIverlqUHNiua6/8l+c+P5pKKZHebGra1uSR1tfWyDAAiAAAiAAAiAAAjEHgJ6PMvFHpoYaWwiAIEiNs02xgoCIAACIGD3BPS4qdVDoODUPt2HTqSsmdLR4G7ffcJ914HjIiXRZvrumwbkVTC3PM4eFUtWbRKpfi7LqIs0qZJTafHGvVbcWLVxJ23Zc5jafVOflvy7kThigaMzOrdpTC/9A2jh8jX07LmfTENUv0YFqiJSLXFhMaT7sElUpWwxYq+DbXuPyDf7Oc0UL4KzZ4Mqz0SUx0KRlsj3ynVZh9uvWamMoS2uN27GIik05MuVlXgxntufO2EwXbp2i6b89pfsH6dVUtetLK77wu8VHTpxVraZLnVK6tiqoYiySKkuSxeu3KB/N+ygKzduy30VSnlRksSJaMOO/TR2QBdK5P5paiX2u2BmAQFv5DmurgmkINDsy5q0fts+Wrd9H40f1JXcErrS+UvXaPrCFdSkTlXavu8I3br7wCAeMce5f/4rr83RLRzB0KpRbcqbMysdOeUjuK41ukZVwfXLWpUMfTfdWLxyA8UR/2lav5qIiLhEMxYuD1Og6DxoPLmLvo0TfVSFr9918Hgpbozq10nt/uTb3BzwtY+KPk/5sZfROX1GTqXc2TOLv7n6cr86v3jhfLRmy27ye+lPHoncqLlIzaXEEq4YHhujC+AHCIAACIAACIAACIBArCagx7NcrAaMwTssAQgUDju1GBgIgAAIgEBMJKDHTa0eAsVpITJMmbuEvmlQg6qJdEefK7woPWb6Arp6445cFM+YLpX0rWCj5y+qetNXtSvLJnjxebsQF1xcnKlU0QIyBdCuA8dkCiNOF1RMvIWfIlkS2rH/KPHv4b06UMZ0qeltYBD90H+M9MBgE2Y+N+TdOzp0/Ixsd0TvjsSCiAjcoF4/TpYCSRnx5r+b8Fs4cuKc/K19y7//6On08PFT2V6hvDkphTCmblqvGp27cIUmzv6TOrRoKK6R3+i6HoncqYRYDL949QZdu3mXcmXLRP06t5bXv3P/EQ2fOFtus8F2POH1cPDYGSlwMJvJw3uGabrN3g7Hz/jSf8LDgYWUssULUZaM6WQqJvakYG+KKSN6kYe7G504e4F+mb9MXoMFiHxCfChWKI9ctO8yeIJMs/RFFW/JdKsQgV75v6axQjgICgqiIyd95DU4IqKcSMuVM2tGOU+ysTA+goNDyNk51Jj72GnfMAWKQDEnHcWccNonjq7QFo6+uS4Mv1n0MVfMzcFMIcIcPX2e5k8aanRq+z4/ibFmoV7fN5f71fn8t1SuRBE5x8ySfVJmjxsoOXDERHhs2JAcBQRAAARAAARAAARAAASYgB7PciANAo5IAAKFI84qxgQCIAACIBBjCehxU6uHQMGRDrxozgvwvBD/uXJYiACz/viHqlcoRV/Xry6rvxMCwo+T5so3/aeO6C0jCJRAwW+5q+iIFWu2yigDrTG1WoxXAokSKLjhYT3bU6b0aeQ1WCwYO32RIT0SL8pv2nWQsmfOIBf5udLjp8+p70/TpKjRoUXoQrpa3B7S/TspCMjGxIc5gYI9IH4e2p0SiggHLvz2PkdMcIorPjZ9wXIpNGh58XUHjJku/TrMCRSyMfHB6ZeKFshNXb5tonZJ0+ywBAqej76dWhlSaLHIMXziHBElUlqal3MDD4Wh+Z5DJ8hLtMmm5lysSfEkT/jwYU6gUNcNK5XTbyL1034RHTJtZG+zptzm5sBagWJoj3aGMXLUyd/rtkmTcU6Zpfr4OTba8WIbBEAABEAABEAABEAgdhLQ41kudpLFqB2dAAQKR59hjA8EQAAEQCBGEdDjplYPgUKJBoO6taVsmdJ/dg44XRO/tW+6EM8pjNinoLswZi4oDJqVQDFpeA9K4pFItsuRBnNEeiJVh3cqUaFutXIyFZESKLJkTEtDurcz6g8vdL8R5szalED85jx7ZXBaJv/Xb2jmohVGEQ98zmuRVokX0LXFnEBhKh5sFiLI0tWbpVDAaYfY5Jrf3J8wpJu2OVKL7aZcjCqJH9YIFJziiCMtVGEz884Dx8nrczotTk2VIW0qg4Ch6tlaoOBUVqOmzpfzw/OkLQtEii0WSLTzrD3O2+bmQDGzJILipRCkZozqa2j6zv2HNGT8LKpduaw0F7eUjaEBbIAACIAACIAACIAACMRaAno8y8VauBi4QxOAQOHQ04vBgQAIgAAIxDQCetzU6iFQsG/Br4v+plaNv6CKpb0+Ow0qosB0UZlNtjndz1dfVCZOP6QEChV5wA0rgaJ3xxaGtEPsG8CeA6YCRRmxMM/+FdoyWaSiYt+LeROHiN1xhLnzFtqy+5CMXNDW06Zk4sVxFjG03glc15xAUbGMl/B0+MLQ3P6jp+m3Jauozw8tpf8Fpx/iVFSm4ok54cbQ0IcNawSKzm2aGHw/VDs8/t//Xif9FngfR3XUqFia2MdDFVsLFCzwdBEeFN4iZVTbr+upy8jv8TN/F34eNz+b4imsObBGoDA9nwWpHsMnUa3KZaix8OrgYgkbWREfIAACIAACIAACIAACsZqAHs9ysRowBu+wBCBQOOzUYmAgAAIgAAIxkYAeN7V6CBRKIChWMA91EgbWpuX+oyd0XHgT5M+dTS7ML1qxjthLwjSlD5tKz/5jpTDBDl1Uj6xAwd4J/bu0MerOoHEzhf/AK/kmvRJWWIzg1EPp0qQkN1dX4ZUwWqZ9Up4RthYohk+aQ+y3MXN0fxnJoDrI+2/evv9JZIk6rr4jK1Codji10xmfS7ReGHOz2bhWYLK1QMHX/L7faOn9MbxnB9UF+d192ESKHy/eJwKQtpK5OVACxZzxgww+GHwe9z9frmxGHhSWCBTqmuGxUXXwDQIgAAIgAAIgAAIgEHsJ6PEsF3vpYuSOTAAChSPPLsYGAiAAAiAQ4wjocVOrh0DB4HuNmCIXubXm0ryfTZ9HTZsnTJDvSRNmNhpWqZwa1aki0+twPS78Jr3v5euGVD+RFSg4jdKo/p1JmRurlD55c2ah3h1bCt+GzcJY+qCRd4ZKRRSVERTKs4NNvls1qk0uzs60btteWrt1r+RgyxRPSuyRDYuP23cf0HnBmNNQsdE2FzbtHjL+VypXsgh927Su3McL/JkzpKGhPdrL35Z+mPOg4PPZUJyjTgb+71spAPE+JUpVKFWUWjepw7vCLOYECjWH2ogaFYljrUBhKZswO4idIAACIAACIAACIAACsYqAHs9ysQooBhtrCECgiDVTjYGCAAiAAAjEBAJ63NTqJVCwDwRHJwQFBcvF78L5cpK/SOuzVaRP4giLEkXyUceWX8lpefs2kIYJo+aHj58Sp0NKLyIXTpy9KBevS3kVoA7NQ82pIytQ8MU8ErkJg+0SMkUTG2IHiD4N6NqGcmTJKK55gX6Zv0y+1V+jQmnye/WK1mzZI8cQlQKFfJNfGGdfvHrT6M80W+b0xIvrUSlQKCNo9vSoX7MCebi70frt++R1uwnvj0LC+4PLiMlzpajEqZ9KFM5rZA5u1GmTH+EJFE9FlEa/UdPINUECqlPVm9grZO3WPSLywZkmClNxTjVlrpgTKE6eu0jT5i2V81y1XEkh9jjR6k27hM9IoNURFJayMddH7AcBEAABEAABEAABEIg9BPR4los9NDHS2EQAAkVsmm2MFQRAAARAwO4J6HFTq5dAwbB5cX2piErgb1U4iqFe9QpiQbqcMGJWe4n8XvrTr7//TZev35JRFrw4XbxwPmrduA7FjRta8c+VG2nb3sP065j+FP/D4rV64175OXCLvPDdW0RwcJqmBjUryoXvH/qPkebQbHx8/IyvvDAvynPURpliBeVvYSshoyjYxFqVpvWqyQXuTOnTyMgK3m9ucdzn4lX6edZi+r5lQypZJD8FigX3juK6lcoUo5YiMkKVA8LYe64w9u7bqRWxSTaXd+/e05Ubt6TnQTyR3qi0EGa27jkkIzom/9iTEidyl/XC+ggrxdPyNVto444DNHVEb0rknpDUwn2Xtk2pqDDC1haOYPlz5Qa5iM/7mX2TutWkWKTqnRKpn9iMnAUd0/GoOmF9Hxeiz3Qh+nRo0ZBKFc3/SZWjp87TX6s3yWgbPpg6hSe1aVqHcmbN9Eld7Q5zc8B1eOxaH5Eva1WSwkfubJmpR4dmspmwzn/xUnhQDJtkMMnmipawkQ3iAwRAAARAAARAAARAIFYT0ONZLlYDxuAdlgAECoedWgwMBEAABEAgJhLQ46ZWT4FCzUFwSIh4+/6ueKvd3ZBeSR0z/eaFeo5cYPHAVoXfzGeBQplV8+83QqhI7BH2oj/34dkLP9kHFlSiunDkBptCVxRChko/xVEV3YZOFOJKIM0aO9BIzImq/rzyfy2bdndLGOYlWMAJFP1xcXExiEZhVozAzvsPn4h2nQ1ppiLQhNEpzI+FKp5jZycno2MR+fE5NhFpE+eAAAiAAAiAAAiAAAg4DgE9nuUchxZGAgIfCUCg+MgCWzYk8ODyE7p/8Qk9ve1HAX5vZMuuHgkoWXoPSp0zOaXKnsyGV0NTIAACIOA4BPS4qY0OgSK6Z8hUoIju/phen3022G/D1TUBlS1WSBplc5THI2FaXaG0l4gi+cL0FPwGARAAARAAARAAARAAARCwIwJ6PMvZ0XDRFRCwGYEYJVBcuHLDMHDOA41ifwRePHhF53dcE8LEi3A7lyx9YspTKQslThX2m6vhnoyDIAACIODABPS4qY2NAkVQcDB1G/IzVS5bXKZ0ssc/IU6FtGz1ZilKcP+SJvGgciUKU/0aFXWJnrBHJugTCIAACIAACIAACIAACMQUAno8y8UUFugnCFhDwO4FCjY15H/mSv0aFcSDewVzh7FfRwIPLj+lY6t8rLqiV4O8URJNccj3EfncfE6NymWmRK4uVvXJksoHRfvnRfuNRfvu4bT/4nUQ/bbhIpXMlZy886eypGnUsTEBTkdy54k/PX0ZSKmSJKBUSV3DvcKrgCC6+chfpAOJSxlTuFGCeJFPCxLuBXEQBEwI6HFTGxsFChPMdv2TUxPxv7hxoz61lF2DQOdAAARAAARAAARAAARAIAYR0ONZLgbhQFdBwGICditQmAoTykBSjYxTIWgLhAotDf23OXJi3x8nI3Thsi0L2zySYvp/52nZruu0bGAFSusZdh7tCHX2w0m/iPaXi/aXD6pAaZKZb3/n6fs0ZNEJyilSW83rUTYyl4z0ufefBdATv7eULU2iaF10f8kCwEN/wc2VkiWKb/G49p59QAMWHA+3fo+Gealh2Y/RVQ+fv6FBi46T782PET3e+VPSsOaFw2SwfPd1+mX1ecM1EiZwoiHNCpN3vpSGfdgAgagmoMdNLQSKqJ5FtA8CIAACIAACIAACIAACIBDbCOjxLBfbmGK8sYOAXQoUWnGChQkWH8yldNLWhUgRfX+0B5ee+WxaJ3O943RPpb4uYO5whPbbi0DxJjCEVh+4SfkzJ6V8mZJEaCy2OmnGGl9auvMaLehVlrKn9bBVs1a3s8/nIfWfd4xMxYTPNXT5rh+tOXQ7zGocLcMiRJ/G+aleqQyyzjvx9nHvOUfoiPBCqVQoNRXN4UnbT96jEyLSp0n5zNS1fh6jtk5eeUpdZx6iFCLK4ssyGcn/TTD9u/8GvX4TQisGVaTUQlBBAQE9COhxUwuBQo+ZxDVAAARAAARAAARAAARAAARiEwE9nuViE0+MNfYQsDuBQis49Ovc2kiYMOdBwfvHzVgkZw0ihf5/vBFJ7WTaS68GeUSqJ0/T3RH+bS8CRYQHEAUnxnSBwhySd+/eU+uJe+nh8wAhJFQij4ShKb3+O3iLJqw4S3VKpqd+TUIFME731GHafilmTO9ckgplDTVrZyGp2bjd9EhEXPw1oAKlTx4aFcOpvPrMPSrFjakdS5jrAvaDgE0J6HFTC4HCplOGxkAABEAABEAABEAABEAABECA9HiWA2YQcEQCdiVQKHHCNGqCBQg+pk3rZFqHJ2f8zN9lHYgU+v6pnlp/ke6It+IjU9LlTUmFaue0uglecF598CbtFqmU+E15TqXUump2On3t6ScpnviN+PmbLtHhi4/p+v1Xsm4Zcd021bKTU9w4hmuzb8TsdRfo2KXHdPdJABXIkpQalM5I1b3SGuqoFE9zupehf/Zep8O+j8nZOS4VE2/pd6mXx7BI/likVOo99wjVLJaOvq6QRZ4/e/0FOnD+EY1oWYTmbbpIvAge39mJuC8/1MlFid3iGa7DUQCr9t8kTm/E48uWNhF9VyOH8FR4TRuP3qGf2xen5B7hp0l6/TaYOk0/SPeevpbRAGk9Xck1vjP1F4v2uTMkJtUf/j1/8yU64POIutTPTU3Lh/aXvTbWH7lDhy88ooC3IVQqTwqqWiQtlRC+GqqsOXRLcLhBP4p0XZlSfjQ+P3fjOU34+6xkXLFgahrx50k6efWZFAKSusejZKLvHK1QX/CNaNl24h4NX3yS2tXKIedetdPnt6N0UHBeObQSpUicQO2mEyJS4n8iUqJZpaySNx/gCIzvpx6ght6ZqMeXeQ11eeN/vx6SURfrf6oaJX4mRhfDDxAQBPS4qYVAgT81EAABEAABEAABEAABEAABELAtAT2e5WzbY7QGAvZBwK4EirY9R0gq8ycNNaKjhAfeycKEEip4u2+nVoa62kgK0+gLQyVs2JzAjjlHKcDvTaTadfVIQJU6FLO6jaW7rtGM/3zleewtcO9pAF25+1L4TrhKcUF5UKi37FmYyJ0xMaUV6XpYGOD0PV+USE/9m4a+YR8U8o6ajt4lF9ArF05NSdzj0x4hDvCb9b0a5ZNCBV9MCRScDsjFKQ7lEmmqDokFfG6PUzn92rU0xRGaB4sCTUbtoiYVREohIVxw4UX6LcfvyT46CQPULKndafeZB/IYL/5PaPeRwwIhGMzfdJnYC6FYjuTEPhIXb/tRBmHefEsYOavxyZPNfHB0wM//nCWfGy/kOXyNxG4u1LJyNsqUyt3QHxYM3gaHUMEsyah28fQyLdKVey+pzc97ZctFsiej+C5OctGfd0zsUNwgUvy+9QrNFWbg7LPBIpEqhy88pl4izZJKvTRPCETHLz+h00Kk4Ho89vL5U1P5AhEzEA8OeU/Nx+2i5/6BtHJIZXJL4KwuTQ1H7pBzs2xgRcM+3mAe1QZsprLCV2JsWy95bPOxuzRyySka3qIwVSmSxqg+93nh5ss0V4hRLOiggEBUE9DjphYCRVTPItoHARAAARAAARAAARAAARCIbQT0eJaLbUwx3thBwG4EChU9YRr9oBUdtMKFEjNMhQjVjql4ETumM3pGuf7DAnZkr167t7dVTdx+/Jq+GbOLeGF9QS9v8vwQSbBs9zWavjpUtFAL+OxfMGmlDxUVaaTa1cwhr8OL2w1HbKdnrwJpx/ia5CyEhmNi8bz7r4epRZWs9H3tXLLec3GcRYVUSV0NqYKUQFEid3K5yO3iFFcufLeful9GZyjz7PAEimpF09DgZoUorlAyOLqDx8J9+XdYZRkVcVWIA60FWxZBeHyJP6QuWiNSF40XqYu4qPHJH5/5MJfiSQkm7NMwpHkhsagf19BS09E7pdAzrVNJKpItNB3SjYevqMW4PbLOljHVpdm0pQIFnxRRDwp5QZOPtSJyY9zyszISgiMiVFEiRPGcnjTp+09TM9UYtJmSiEgVJV5wZM0CIULM7FJKRsyodvhbXSMs8UJbD9sgYCsCetzUQqCw1WyhHRAAARAAARAAARAAARAAARAIJaDHsxxYg4AjErAbgUIJDloRgoErgcJUcFBRFaYCBZ9jri0+hmJ7AtElUKjUPn2b5Ke6JUONkXl0nPapufATsCTCYMq/PjI1kfId4Lf7uwmBgoWHId8UEhEUH9MtackpgWJC+2JUKncKwyFOl7R421Ua386LSufhiA7zERSmb+SPXXaG1h2+TbP+V1pGYfA27xvWopBMqWS4iNj4bvI+GUmhBAqO/HjhH6StIrddRNopJWx8TqBYKASibGkSGdp4ItJTNfhxO9UolpYGCxbaoiI7VF9tIVBwaq2g4Hfay8htjvbQiiaqwtugEGr8004Z9bFqaBWRtspJHTJwZxFoaPPChv1qo43wrOBImz0Ta8ld45afEULEbVrcr5xRiio+uPfcQxow/5hI3SXSXn1I06XawTcIRAUBPW5qIVBExcyhTRAAARAAARAAARAAARAAgdhMQI9nudjMF2N3XAJ2I1AowcFWERSm7TjuFEb/yKIrxdMs4RPx5/arNKdbacqTMYkRCE7Xw2l71AI+H3wo0jRtPn6H2Bfh2ctA8hML4ixicPlrQHlhjOxGgWKBvMfswzIFEe8vmDWpFCCqirQ/aZKFGifzfiVQLB1YgdJ5ftyvRJORrYsQey6EJ1BsHFXNKCXRij3Xadqq86TMmycL8WSl8HUwFQ74+hP/OSe9KdT4lJkzH9MWTjfFIgKXzwkUm0dXN1rkPyRSYPUWBtGdxcK88s9QbXNKqkELjxvSXtlCoODoE9+bL9QlDN/aVFKGnWKDPS9YYPqfMFhvXC6z9pCcxyr9NhGnpZr2Q0mjY/yDIyg8EyWgJf3Ly2MLt1ymeRsvGdhrT1Bm2yNaFZFpr7THsA0CUUFAj5taCBRRMXNoEwRAAARAAARAAARAAARAIDYT0ONZLjbzxdgdl4DdCBQqUoJRm0ZFKPGCj3EkhTkPCj6O6AmmoG+JLpNsJRIs6FWWsqf96HvAo1dvxKsFfPZS6DT9gMEkmhfuEyWMJwUArq8ECt5mkWKfeGt+x6l7Bl8J3j/w6wJUS3gzcFHXVqmc5E7xYY1AYSoIqAV3JVCoRfNfRHqlwh/SK6nrDP39hOjffYMAc/uxP20SgoxpSSnMoeuWCo0usVagYGPuAQuOU4+Gealh2UxGTR84/5D6/iaiCj6YadtCoFh94CaxqbhpqeGVTohHH0UgPs5m3Q1GbJPm4ssHVZRppkzP4/RUQSKN18ohlYwO8bnVB24m9iwZ822oB4WaN05xVb3oRzN0PnGO8Nb4Q3hszOsp/DXSGf+dGTWMHyBgIwJ63NRCoLDRZKEZEAABEAABEAABEAABEAABEPhAQI9nOcAGAUckYDcCBcNVQoRpOicWL9hbQgkTXJfrcJRErmwfF07V+YieYEL6lQciLdIx8eZ/ZIpXg7yUSrztbk3ZcOQ2jV56JkxjY9MUSDPX+tJfO64ZiQx8rdFLT9OGI3eMBAptH0LevZcG1iwIsFH1hp+qSc8IPQSK/T4Pqd+8Y0YG29w39qn4esxOKbYoAUbbZ3Pb1goUKvqjfpmM1PurfEbN/rXzGs1c40vKm2Lx9is0e91FseBfVCz8fzS8VubTyiSbG7GFB4W6nta43KiD4kffeUfpgM8jg4ijjqvIEK3PyPlbL6jDlP1Up2R6g88I1+d0YR2mhUZ2mEa8qPbwDQK2JqDHTS0EClvPGtoDARAAARAAARAAARAAARCI7QT0eJaL7Ywxfsck8H8AAAD//+zJ4kAAAEAASURBVOydB5wUxdbFzybYZck555xzzkgSCYKYFRQkiH6gCIqAIiAgCOZABkFRUREMSI4SJOecc4677LKBr6r29TA7TC+7MNMz3X36/WB6uqqr6v5vPazuM3VvwB1xwI+O0V9/h70Hj6oRvd2rE0oUKeAY3b5DxxznrtfnLlih7itZtCD6v/qiox5PjCGw7scduHzy2gN1ljlvBtR8ulyK791/6jq6jPsXBXOmxcTetRGaKki1sXrnOQyYulmd//RuA+TOkgZDv9+KRZvP4JvXa6JswUyq7PzVKLwwZiUio+Iwa0B95M0ajiVbzmD9vgt4vHZ+lMqf0TGmZ0etxIkLEVg6ujlCggLxxbw9+HnFUfw8sAFyZU7jqCfvHzJzK4Z1qoSG5XPizOVIPPnhCjzZoCBeb1NK1dPGsnBEM4SlThizLPh19TF8Omc3vuxVAxUKZ8bNWzF48ePVuCDG2bVlMdQtkwMXr0Vh0oID2Hs8gbVmn2MASZxMFvdNW3jQMTatqt545L8MLQYtVHxmvl0PBbKnVbdcuXkbncasgvz8e/gjSBcWghU7zmLQtC14sr6ws22CnfGigX6TNuK/vRfRr2NZtKmZT92/cf9FvDF+A9rXLYA3Hi+tDSPZnzcElyeGL0N4aAh+GtAAIcGBbu9dsOkUhv+wHc2r5sagZyqoOnJML4s5c+j0DYzvXQul/+fj27Hx6CxYSx8727p613kMmLIJtUpnw+guVd32w4sk4GkCWw+cQtE8WTzdbKL2Dp66hIrF8iS6xi8kQAIkQAIkQAIkQAIkQAIkQAIPTsCIZ7kHHx3vJAH/JRDgbwKFROUsUrRt3gDyj7tDChaaMCHLKU64o2TMtWvnbuLfGVsfqLM6L1REhhwJL79T2sDYX3fh9zXHlUghBQH5Mv+v/04iTWiQerGuvcCfv+EkRggRJXeWMLSqng+3Y+Mwb+0J9ZJd9qkJFNsOX8ZrX61HprSp1Av0nJnCsGb3eSzbdlbclxfvPJUgpBghUMhxHT57Az2/WKtskd/lIQWZUvkyYP6GU9DsSyhJ+u+dR6+IttYp2x4VtrSrlR85M4c5xBtXwUS2tm7vBfSbuFHd07RKbiXOLNt2Bqcv3ULfDmXQTgg58rgghJP2Q5epc/kyv6IQWOS9Ww5eVtecBYqo23FoOmChut5W3N+0Ui4lyKgLyfhr0j8HMH3RQQwQotaj1fLq3iEFlv6TN2LdnguoUyY7qhXPKkSq09h17CqebVQYPR8rkeheZz5SPJEC0U9ChJJzaWb/+siWITRRfX4hAW8RMGJRS4HCW95juyRAAiRAAiRAAiRAAiRAAnYlYMSznF3Z0m5rE/BLgUIil8KD/ON8SAFC7pzQdlJoOy1knaSEDOc2eO49AufEy+hNv+9OUQdV2pVGjqKZU3SPc+UY8cv3L//Yi2VbzzjEhm6PFsf1yBj8uPwIZg9sqF7Cy1/OTxW7B+QOAu14SuxqCAwMwKxlR4RA0UDsoEjYCSF3A3z2+x4ldsi68gV1m5r50a1lccev9b8UOyjky+vZg0T7QsTQjiViHEOEUDO8cyU0KJcTZ6/cQsfhyyH7eu1/OyiG/bANCzedxqKRzRy7PuT9rjsotDblr/vlS/X9p64hj9itUbNUdrWD4zex48J1B4d2j7vP+Pg7GPPLTvy5/qQq/qxndVQumgV649HakOLM7FVHsePIFXWpSO50aF0jHzqIl/jOx3/7LmLUzzsc3MoUyAgphIyZvRP9nyyr7tHq/7TiCKYsPKCEly4tiqFz06JaUZKf0q+tBi9WQtP3bzdAcFBAkvXlLpmB0zc7dpzIynXLZsf7z1VMxF5rRNr5ufC9dkjfD362oti9kl27xE8S8DoBIxa1FCi87kZ2QAIkQAIkQAIkQAIkQAIkYDMCRjzL2QwpzbUJAb8VKDT+7oQKrUx+UphwpuH7c7mTYo944X+/cE8yrFOpRoUeeOeEq6VSgLh0PVr80j91ki+t5Uv6SzeikT5NCFKH3A2v5Nqe/B4RFYvomDhkTpfaXbHXr10W45TiQI6MoeKleg5Hf3IHQtsPlqjvfw9riiAhsqTkiI27g9i4eLcv6JNqJzI6FpJfWhHSKanjqgj9JIUfyfh+h7QlVUggAgNSZsP92nUul3Pj5IVIXI24jeyCpbOg5FxPO5chpE6KUE/SBhnWSgsdppXzkwS8TcCIRS0FCm97ke2TAAmQAAmQAAmQAAmQAAnYjYARz3J2Y0p77UHA7wUKZzdoOyfkNeccFM51eO4fBORuirMi18Dlk9dx63qUGlRY+lBkzpseOYtnEbsmvBtf3T8oPNwopJDw5IjlakfCc40LizBImXDuShSWihBLMnRSy2p58O7T5R+uE95NAiTgdwSMWNRSoPA7t3NAJEACJEACJEACJEACJEACJidgxLOcyRFx+CTgloCpBAq3FvAiCViYwMHT1zF81naV1NnZzDa18qGPCI+llyDauS7PSYAEzEXAiEUtBQpzzQmOlgRIgARIgARIgARIgARIwP8JGPEs5/8UOEISSDkBChQpZ8Y7SMBQAjKsksylcOpyJNKkDkYhkVCcYYcMdQE7IwFDCRixqKVAYahL2RkJkAAJkAAJkAAJkAAJkIANCBjxLGcDjDTRhgQoUNjQ6TSZBEiABEjAfwkYsailQOG//ufISIAESIAESIAESIAESIAEzEnAiGc5c5LhqEkgaQIUKJLmw1ISIAESIAESMJSAEYtaChSGupSdkQAJkAAJkAAJkAAJkAAJ2ICAEc9yNsBIE21IgAKFDZ1Ok0mABEiABPyXgBGLWgoU/ut/jowESIAESIAESIAESIAESMCcBIx4ljMnGY6aBJImQIEiaT4sJQESIAESIAFDCRixqKVAYahL2RkJkAAJkAAJkAAJkAAJkIANCBjxLGcDjDTRhgQoUNjQ6TSZBEiABEjAfwkYsailQOG//ufISIAESIAESIAESIAESIAEzEnAiGc5c5LhqEkgaQIUKJLmw1ISIAESIAESMJSAEYtaChSGupSdkQAJkAAJkAAJkAAJkAAJ2ICAEc9yNsBIE21IgAKFDZ1Ok0mABEiABPyXgBGLWgoU/ut/jowESIAESIAESIAESIAESMCcBIx4ljMnGY6aBJImQIEiaT4sJQESIAESIAFDCRixqKVAYahL2RkJkAAJkAAJkAAJkAAJkIANCBjxLGcDjDTRhgQoUNjQ6TSZBEiABEjAfwkYsailQOG//ufISIAESIAESIAESIAESIAEzEnAiGc5c5LhqEkgaQIUKJLmw1ISIAESIAESMJSAEYtaChSGupSdkQAJkAAJkAAJkAAJkAAJ2ICAEc9yNsBIE21IgAKFDZ1Ok0mABEiABPyXgBGLWgoU/ut/jowESIAESIAESIAESIAESMCcBIx4ljMnGY6aBJImQIEiaT4sJQESIAESIAFDCRixqKVAYahL2RkJkAAJkAAJkAAJkAAJkIANCBjxLGcDjDTRhgQoUNjQ6TSZBEiABEjAfwkYsailQOG//ufISIAESIAESIAESIAESIAEzEnAiGc5c5LhqEkgaQJeEyhOX7qWdM8sJQESIAESIAESuIfA+cs3UTRPlnuue/ICBQpP0mRbJEACJEACJEACJEACJEACJABQoOAsIIEHI+A1geLBhsO7SIAESIAESMDeBIxY1FKgsPcco/UkQAIkQAIkQAIkQAIkQAKeJ2DEs5znR80WScD3BChQ+N4HHAEJkAAJkAAJOAgYsailQOHAzRMSIAESIAESIAESIAESIAES8AgBI57lPDJQNkICfkaAAoWfOYTDIQESIAESsDcBIxa1FCjsPcdoPQmQAAmQAAmQAAmQAAmQgOcJGPEs5/lRs0US8D0BChS+9wFHQAIkQAIkQAIOAkYsailQOHDzhARIgARIgARIgARIgARIgAQ8QsCIZzmPDJSNkICfEaBA4WcO4XBIgARIgATsTcCIRS0FCnvPMVpPAiRAAiRAAiRAAiRAAiTgeQJGPMt5ftRskQR8T4AChe99wBGQAAmQAAmQgIOAEYtaChQO3DwhARIgARIgARIgARIgARIgAY8QMOJZziMDZSMk4GcEKFD4mUM4HBIgARIgAXsTMGJRS4HC3nOM1pMACZAACZAACZAACZAACXiegBHPcp4fNVskAd8ToEDhex9wBCRAAiRAAiTgIGDEopYChQM3T0iABEiABEiABEiABEiABEjAIwSMeJbzyEDZCAn4GQEKFH7mEA6HBEiABEjA3gSMWNRSoLD3HKP1JEACJEACJEACJEACJEACnidgxLOc50fNFknA9wQoUPjeBxwBCZAACZAACTgIGLGo9ZVAERMTo+wMCQlx2MsTEvAEgTt3gIAAT7TENkiABEiABEjAOgTuiP9ABvA/kNZxqB9ZwrWXe2cY8SznvmdeJQFzE6BAYW7/cfQkQAIkQAIWI2DEotZXAsWQIUOUt7RPi7mO5viQwK3oGFy5GYnsGdMhOCjQhyNh11YkcO7yDYSFhiB9mlArmkebfEjgRmQ0bt6KQq4sGXw4CnZtRQLx8Xdw9sp1ZAwPQ5rQVFY0kTb5kMDVm7cQFx+PLOnDfTgK/+zaiGc5/7ScoyKBhyNAgeLh+PFuEiABEiABEvAoASMWtRQoPOoyNuYHBChQ+IETLDwEChQWdq6PTaNA4WMHWLh7ChQWdq4fmEaBQt8JRjzL6ffOEhIwLwEKFOb1HUdOAiRAAiRgQQJGLGopUFhw4tjcJAoUNp8AXjafAoWXAdu4eQoUNna+l02nQOFlwDZvngKF/gQw4llOv3eWkIB5CVCgMK/vOHISIAESIAELEjBiUUuBwoITx+YmUaCw+QTwsvkUKLwM2MbNU6CwsfO9bDoFCi8DtnnzFCj0J4ARz3L6vbOEBMxLgAKFeX3n1yO/uGIRLiz5G9e2bkTUmVNqrKG58iBDxarI1uRRZG3Q1K/Hz8GRAAmQgK8IGLGopUDhK++yX28RoEDhLbJsVxKgQMF54C0CFCi8RZbtUqDgHPAmAQoU+nSNeJbT750lJGBeAqYSKPYdOuYgXaJIAcc5T/yHwI29O3Fo3HBc3bI+yUFlrFQDRd4chHQlyyZZj4UkQAIkYDcCRixqKVDYbVZZ314KFNb3sS8tpEDhS/rW7psChbX960vrKFD4kr71+6ZAoe9jI57l9HtnCQmYl4DfCxRzF6yA/KN3tG3eAPIPD98TuLhyMXb27SYGcieZgwlA2bETkLX+I8msn7ha1O04zFp+GMVyp0fdsjkSFz7Et2Pnb2Lp1jOoJ9osKtpO6khJ3aTasXrZjVsxuBYRg9yZwxAYGGB1c2kfCTwUASMWtRQoHspFvNkPCVCg8EOnWGhIFCgs5Ew/M4UChZ85xELDoUBhIWf6oSkUKPSdYsSznH7vLCEB8xLwW4HCVZgoWbRgIsp7Dx5N9J1CRSIchn+ROyc2vdBG9JtccUIbYgCqzJj3QDsprkXcxmPvLcFjNfLi7SfLaQ0+9OfqnecwYOpmDH62AppVyZ1ke6t3nceAKZsw6NnyaF4lT5J1zVC4eMtpfDBzmxpqv45l0aZmvkTDnrboICb/cyDRNe1LrdLZMLpLVe2r+ty4/yLG/bYbJy5EOK43r5obb3Uoi9BUQY5rPCEBErhLwIhFrS8FiuyhqdG9e/e7BvOMBDxAIPp2LK5HRiFzujQICgr0QItsggTuErh0LQKhqUMQHprq7kWekYAHCERE3cYt8SdrxrQeaI1NkMBdAlKguHQ9AunCUqt/v+6W8IwEHp6AFFfj4+ORIW2Y28aCMmV2e90OF414lrMDR9poPwJ+KVA4ixNSmJDig15IJ+e6FCl8N4G3dnv6vmGd9EYnwz1VnPCjXrHudQoUumgeqODKzdt4euRyREbFqfv7PlEG7WrlT9TWx7/uwtw1x9G+7r0h1grlSIt2te/W/2/fRfSdsAFpQoPwTMPCyJg2FdbsPo+1uy+gcrEs+KxH9URt8wsJkEACASMWtb4UKPqUKkpXkwAJkAAJkAAJkAAJkAAJWJBAcKYsSNuspQUtS55JRjzLJW8krEUC5iLgdwKFs+Dwdq9OiYQJvRwU8vpHX01X5ClSGD8BZULsnW/J0E4PfpT9WIR6SmHibAoUD87b3Z1Dv9+KRZvP4PkmhTFzyWG4EygGTN2Ew2du4Kd3G7prItG1d8TOkn/FDpPZAxsipwjtJI/4O3fw+tfrsf3wFcx5vzGypk+d6B5+IQESAIxY1FKg4EwjARIgARIgARIgARIgARLwNAEKFKdQNE8WT2NleyRgeQJ+JVBo4oTrrgkpQMgy57BOrnWkp0Z//Z2qQ5HC2Hm75703cG7+7w/VaY6W7VBq6CcpasNZoGhYPicmLTiAvcevoVLRzCp/RIc6Be7Jd7Bs21ks334G6/ZeQPaMYaheIiva1MiHAuLX/9qhF+JJ7gj4e8NJrN1zHhnDU6G1CH+UN2s4Bk/fkqIQTyt2nMXUhQfx6mMlIUMqrdtzAaULZMSol6uoIZy8GIEJf+/HzmNXEREVg5L5MqJV9bxoVjkh3NQkEWJp9a5zGPJ8RRR0Grd4948+4//DHXHyec8amjnJ+tR2O8idEXVKZ1c7H9wJFK98tgapggPxVa+a92139qqjYktxCFpUTRz6StuFMf2tuiicK91922EFErAbAQoUdvM47SUBEiABEiABEiABEiABaxCgQEGBwhozmVYYTcCvBIqX3xyq7J8y7r1EHDThQV6UwoQmVMjz/q++6KjrvJPCdfeFoxJPPE5gXZt6iDpz8qHaDc2VFzXnrUpRG5pAkTtLGE5fuoWCOdMif7ZwbDxwUYUpeqlZUbzcvJijzflCXBjx4w71vW7Z7Lh4PVoJGplE6KGJfWojR6aEX/m7Eyh2Hr2Cnl+sU/dKASRYJHresP8S8on+ZH6FlOSg+GPdCYyevROyXxlWSbZXvlAmdG1RHGev3ELH4ctVP9WKZ0GqkCC1C0Fe0PpYueMcBk7bjC4tiqFz07uhUvadvIaun6xRYZb6diij2kjOX9ExcXhxzCpcFTk9fhnUCHuEyCNDM7kTKNoPW6bG+uIjRbBi+1ncEonKyxfKjIqFMyGtECPud1y4FoUu4/5V1X4XOyiYMPt+xFhuRwIUKOzoddpMAiRAAiRAAiRAAiRAAuYnkJRAcezYccz982+cPnMGZUqVQscOjyNU5KfTO2S9RUuW4eChw0idOjUa1a+HalUrI1WqhJxQ585fwO/z/rzn9iqVKqBqlcr3XDfighHPckbYwT5IwGgCfiNQaLsnXHc/OIsOzsKFJma4ChFaO67ihdFg7dTf8mqFhbkpTY7tSigADTccdr2Y5HdNoJCVXm5eFC81SxAjbtyKQaePV+PC1ShMfrMOiudJj7OXxYv/D5crUWCa+OV+5nQJ/xFcuPk0hn2/LVFOBHcCRZv3lygx4dOe1VGlaMJ2vV1ih0OPz9eqMWriQZID/l+hJlBIcePzV2skCnMkRZQ/1p/E0w0KoX65HOqOo+du4oXRq1CzVDaM6VoVt2Pj0fr9xWoHyIx+9Rxdyp0V00US669fq4lyQvBI7jFjySG1Y0NLCq7tpnAVKOQOjfpvzYcmCDm3ny1jKL59vZYYU6jzZXUuxZ0NIln2uStRWCZ2r4SHhqjdItIvPEiABO4lYMSiliGe7uXOKyRAAiRAAiRAAiRAAiRAAg9HQE+gOH7iJFq1ewIlSxRHxfLllbBQvHhRzJw6EQEBAW47HTVmHH6dMw+NG9VHZGQk/pq/ABUrVMDsH6YjKCgIW7ZtR4ennkON6tWQ+n+ihWyoXZvH1B+3jXr5ohHPcl42gc2TgE8I+I1AoQkOziKEJKIJFK6Cg7arwlWgkPfotSXLeHiegK8FCrkTwfXX+DKU03vfbUG/jmXRRoRiWi5+7S9DMb3zVDkVLsmZwqtfrsOOI1ewfEwLBImdEa4CxXkhdHQQOweaVs6F956r6Hwrxv22C3P+Pe7Y3SAL5c4Md4eWb0ETKN7sUBqP17432bS7e58dtRI3hfAy74Mmqljrd+bb9VAge0J4qqdGLEdM3B2xC6IhAsV/4COjY8WfhITXzm2Gpw5GWOogdUnbsSEFDRm2Sa4L9AQKKfw8Omixuk/u0GhSKRduifbnrDmmclZIweX7t+urNpz7+2nlEXw5d6/j0gti90V7EX5L4+Eo4AkJkIAiYMSilgIFJxsJkAAJkAAJkAAJkAAJkICnCegJFB+N/RQLFy3B/Hm/qh0Qe/buU4LF7FkzUKVS4vcs2piOHjuGXDlzqt0T8tovc+ai/4BB+OePOSherKhDoFi3aimyZ8um3ebTTyOe5XxqIDsnAS8R8BuBQhMcPLWDwrUdL/Fjs4KAr0M81SmT3ZG/QXPIsfM38fxHqxzhjsb/vU+9RJ/QuxZK5c+oVVOfX8zdg59XHsX0fiInQs509wgUMl9Fv4kb0atNSbWzwfnmP9efwEc/73QIFBFRsWgxcJFzFcf5opHNEJoqCJpAMbZbNZUDw1Hhfyd7TlxT4ZMOnL6O65ExuC5CL8kQVvJYNbal+tRCTnVvVRzPNy6CQyJxdWexa8R5J4lms7rB6a9OTYuocFLy0vsztmDp1rOOnSbymp5AIW2TeSVK5suAmiUT/8dfikFSFJICRf7s4bIZxxEffwex4o/cxbJG5O+YuvCACsE1a0ADkcMjjaMeT0iABBIIGLGopUDB2UYCJEACJEACJEACJEACJOBpAnoCRbuOz6iwS4Pe6efoslrthujy0ovo8crLjmtJnSxdvgJde7yGv+f+qnZiaDsoKFAkRY1lJGAOAn4jUGg7JSQ2110Rmnghy+ROCr0cFLKcuyckBWMPXyfJblQhJ4a+WCmR0acvReKpESvQsloevPt0eXwxT4gQK45iat86KJo7cWihb//ah++XHlZ5KOTLd9cdFKt2nsO7Uzfjjfal1S//nTtauEmEiPphm0OgiImLx3eLDzlXcZx3eqQogoMCkhQoFmw6heE/bFf3lMyfQeyOCEeGNKmUgCIvagJFvIi39ITIVZE+TQim9a2LaSK002QR4slZIJBCww4RXsn1qCB2S1QtnlXkmriKbp+theyne8vijmq7xPVJ8w/g8Tr5VcJsmWNC23HhqORy8td/JzHqpx3QwkS5FCf6qgknrjlCElXiFxKwMQEKFDZ2Pk0nARIgARIgARIgARIgARMT0BMopBjR9eVO6N71JYd17kQLR6HLyR3xDqT7a32wc9durFw8H8HBwY4dFLJqeHi4Ei16duuCxg0buNxt3FcjnuWMs4Y9kYBxBPxGoJAma0KEazgnKV7I3BKaMCHryjpyl0SJIndD5Gj3c/eEJGTccXHFIux8q9tDdVj24wnI2qBpitrQclDInAg/vdsw0b1rxS/1+0/ahP9rJxIv1SsITUgY8nxFFZrIuXK/SRuxbs8FLB7VDKlDgu4RKE5ejMQzI1egbe38eMsl+bQmbjxIDgp3Oyg6j12NQ6dvwDl0k8z90HbIEjVkLcST/KKJEnInQn9hQ3hYMCb2ru1sWpLnWtirJCvJfkTOjiK50qldGnLHSINyOVGxSOZEt/2w7DC++XMfPuleTYkfMsyVzG1RSdRrWD5norrbDl/Ga1+td+xuSVTILyRAAjBiUcsdFJxoJEACJEACJEACJEACJEACniagJ1CUq1ITfV7vhS6dX3B02fHZF1GieDEMHzLYcU3vZMy4z/DNhEmYOW0SatesoarJJNmbNm9B1ixZEBERgVmzf8XiJUvx++xZKF+urF5TXr1uxLOcVw1g4yTgIwJ+JVBIBprIIM9dd1JIoUI7nIUJZwHDVdzQ6vPTuwS2dnsaV7esf6BOMlaqgYoTfkzxvZpAIW90/uW+3MXwungBLpNYf9mrBioUzowjZ2/ixTGrUCR3OpXMWYZaksd2kXuil8hBUTxvekx+o4665rqDQoYoatDvH1XmLBycuRwJKShERsU5dlCoSvf5K6kQT80HLlR3z32/iQoHJb9oIaZkrg1ngeLkxQghnKxEsyq5lQAj80K0EyJKcg8pIuwVuyVcj70nr4lk24fQWuTuqC0Sc1cSScHDQ4NVknCZLLxMgYwquXeq4EB1q8yN0VMwPCoYLxjRFGlEjos4wezRwYtUQmy5w0Pu9NCOL8Vulp/EbpYhLwixqGIu7TI/SYAE/kfAiEUtBQpONxIgARIgARIgARIgARIgAU8T0BMomrRojfbtWqNXj7s/bn20bQc0faQx3hDCRVLHtxOnYPTYT/DVZ+PQsrn+D1vj4uJQsXodvNr9FcidFL44jHiW84Vd7JMEvE3A7wQKabCzSJHUbghnYULeR3FCUvDNcWPvTmx6oY3oXPzcP0VHAKrMmId0JVOubmsCRZrQICUSNK+aG3mypMHqXeex/+R1ldR68LMVHUmbtdBCBXOmVbsArorcDos2n1L3TuxTW+VWkEN3FSjktSVbzmDIzK2QIkFTIQiIXNJKFIiOjfOoQDHix+2Yv+GUEAUyq/BKp0Xeht9WJwhzrgKFHFePz9cqIUaeS/FC1nnYQxNE+j4hBI9aiQUPbXwyLFSrankh7Zfjlbs+nm9SGN0fLeHoXttVkS1jKB6rnhcZwlNh44GLgu95yITaMh9I2rC7woXjRp6QgM0JGLGopUBh80lG80mABEiABEiABEiABEjACwT0BIou3XshLCwMX376seo1KioapStWxUcfDkXHDo+7HUmc+PHpyDFjMWXad5g68Vs0qJfwo1K3lcVFWb9i9dpKBEluXgu9th70uhHPcg86Nt5HAv5MwC8FCglMhnSSf5wPKUDInRPaTgrnkE9JCRnObfDcewQurlyMnX2lGp5ckSIAZceK0E71H3mgQV0TCaQfG7xY/dJfhiD6dM5u1Y4ULBpXyIXe7Uo7diHIApm3YbrI1bBo8xmcuBCh6lYulgUvNC6swhKpC+IvKXAMmLIJg5+rgGaVc2uX8du/x/DLqmOOe5tWziWSRWdXOSicd3A4btA50RJruwvxJBNRSxFg5Y5z6m5py5vty6gcFBeuRiXaQSEr/L72OMb+sgvuEoXrdH/fy+tFUvC3RFLwfh3Loo3YReF8yJ0RX/+5V+Xz0K7LMT7TsDA6Ny2qXXJ8/r3hpGKuJfmWdWWC7beeKIt0FCccnHhCAs4EjFjUUqBwJs5zEiABEiABEiABEiABEiABTxDQEyj++Gs+evftr4SGihXKYfykKfh2wmRsWrcKmTJmVF3LMFDt2jyGYe8PUt/fGfQ+fv7lNwwTIaAqOIVsyijq582TG5OmTEfGTBlRvWoVpE6dGuMnTsa0Gd87kmh7wp6UtmHEs1xKx8T6JGAGAn4rUGjw3AkVWpn8pDDhTMP353InxaFxw+8b7kmGdSry5qAH2jmhZ6V8eX75RjSypEuNwEC5x0H/kLsvZJgnmXMipYfsQ4Yx0sJEpfT+5NSPjonDdSHAZBa2BCVhi5aH4sPOlVG/XI7kNO2ROjKM1rFzN8XYAlEgRzgCA5LmfUOEgZL25MoUdl/feGSAbIQETEzAiEUtBQoTTxAOnQRIgARIgARIgARIgAT8lICeQCHDL304aowSELShj//qczRt0kj7isIly4kwUG3x8ajh6ppMor19x05HuXai1Zk4ZRpGjh6rXVaJskcMfR+tW7V0XDP6xIhnOaNtYn8kYAQBvxconCFoOyfkNeccFM51eO4fBGTi7AtL/sa1rRsRdeaUGlRorjzIILbwZWvyaIoTYvuHVf4zinNXbmH38Wt477stKlzSzP71+OLff9zDkZDAQxEwYlFLgeKhXMSbSYAESIAESIAESIAESIAE3BDQEyi0qreionBeJLfOmycPgoIS8lpqZQ/yGRMTg/MXLqhbc+XMKd6LPHybDzIO7R4jnuW0vvhJAlYiYCqBwkrgaQsJPAwBLfeEDJk0rlt1lbj6YdrjvSRAAv5DwIhFLQUK//E3R0ICJEACJEACJEACJEACViFwP4HCKnbq2WHEs5xe37xOAmYmQIHCzN7j2G1LYM/xqyKnBlAwR1qEhwbblgMNJwErEjBiUesrgWLE0KEokT4tWrdubUXX0SYfErgdG4fI6NtIlyYUQfcJO+jDYbJrkxK4HhGNVCIsaGgqrrlM6kK/HXbU7VhEi1//ZggP89sxcmDmJCDSL+Ja5C2kSZVK/ftlTis4an8lcCs6BnFikqUNTXXPEO/cjkbqUmXvuW6XC0Y8y9mFJe20FwEKFPbyN60lARIgARLwcwJGLGp9JVAMGTJE0dc+/dwVHJ6JCMgH5Ss3I5E9YzoEeyBcgIlM51ANIHDu8g2EhYYgvRDAeJCAJwnciIzGzVtRyJUlgyebZVskgHjxa7azV64joxC/0rh5iUxEJPAwBK7evIW4+HhkSR/+MM1Y8l4jnuUsCY5G2Z4ABQrbTwECIAESIAES8CcCRixqKVD4k8c5Fk8QoEDhCYpsQ48ABQo9Mrz+sAQoUDwsQd6vR4AChR4ZXvcEAQoU+hSNeJbT750lJGBeAhQozOs7jpwESIAESMCCBIxY1FKgsODEsblJFChsPgG8bD4FCi8DtnHzFChs7Hwvm06BwsuAbd48BQr9CWDEs5x+7ywhAfMSoEBhXt9x5CRAAiRAAhYkYMSilgKFBSeOzU2iQGHzCeBl8ylQeBmwjZunQGFj53vZdAoUXgZs8+YpUOhPACOe5fR7ZwkJmJcABQrz+o4jJwESIAESsCABIxa1FCgsOHFsbhIFCptPAC+bT4HCy4Bt3DwFChs738umU6DwMmCbN0+BQn8CGPEsp987S0jAvAQoUJjXdxw5CZAACZCABQkYsailQGHBiWNzkyhQ2HwCeNl8ChReBmzj5ilQ2Nj5XjadAoWXAdu8eQoU+hPAiGc5/d5ZQgLmJUCBwry+48hJgARIgAQsSMCIRS0FCgtOHJubRIHC5hPAy+ZToPAyYBs3T4HCxs73sukUKLwM2ObNU6DQnwBGPMvp984SEjAvAQoU5vUdR04CJEACJGBBAkYsailQWHDi2NwkChQ2nwBeNp8ChZcB27h5ChQ2dr6XTadA4WXANm+eAoX+BDDiWU6/d5aQgHkJUKAwr+84chIgARIgAQsSMGJRS4HCghPH5iZRoLD5BPCy+RQovAzYxs1ToLCx871sOgUKLwO2efMUKPQngBHPcvq9s4QEzEuAAoV5fceRkwAJkAAJWJCAEYtaChQWnDg2N4kChc0ngJfNp0DhZcA2bp4ChY2d72XTKVB4GbDNm6dAoT8BjHiW0++dJSRgXgIUKMzrO46cBEiABEjAggSMWNRSoLDgxLG5SRQobD4BvGw+BQovA7Zx8xQobOx8L5tOgcLLgG3ePAUK/QlgxLOcfu8sIQHzEqBAYV7fceQkQAIkQAIWJGDEopYChQUnjs1NokBh8wngZfMpUHgZsI2bp0BhY+d72XQKFF4GbPPmKVDoTwAjnuX0e2cJCZiXAAUK8/qOIycBEiABErAgASMWtRQoLDhxbG4SBQqbTwAvm0+BwsuAbdw8BQobO9/LplOg8DJgmzdPgUJ/AhjxLKffO0tIwLwEvCZQnL50zbxUOHISIAESIAES8BGB85dvomieLF7tnQKFV/GycR8QoEDhA+g26pIChY2cbbCpFCgMBm6j7ihQ2MjZPjCVAoU+dAoU+mxYQgJJEfCaQJFUpywjARIgARIgARJwT8CIRS0FCvfsedW8BChQmNd3Zhg5BQozeMmcY6RAYU6/mWHUFCjM4CXzjpEChb7vjHiW0++dJSRgXgIUKMzrO46cBEiABEjAggSMWNT6UqBo+Nd0C3qNJpEACZAACZAACZAACfiSQMFXeqNgtz6+HIJt+qZAoe9qI57l9HtnCQmYlwAFCvP6jiMnARIgARKwIAEjFrUUKCw4cWgSCZAACZAACZAACdiYAAUK45xPgUKftRHPcvq9s4QEzEuAAoV5fceRkwAJkAAJWJCAEYtaChQWnDg0iQRIgARIgARIgARsTIAChXHOp0Chz9qIZzn93llCAuYlQIHCvL7jyEmABEiABCxIwIhFLQUKC04cmkQCJEACJEACJEACNiaQlEDx34ZNWLp8BaKiolGjelW0bN5Ul1R8fDxWrFyNjZu34Nz5C6hYoRwaNaiHPLlzO+7ZuWs3VqxajaPHTiB/vrxo3LABSpcqgYCAAEcdK59QoND3rhHPcvq9s4QEzEuAAoV5fceRkwAJkAAJWJCAEYtaChQWnDg0iQRIgARIgARIgARsTEBPoFi4eCl6vNYbbR5rhXTp0uL7WT/hzd6v47We3dzSunb9OipVr4O6tWuhcKFCWLNuPQ4eOoQpE75Bw/p11T2Ptu2gPqtWqYxDh49grajT/ZUueLuvPXJgUKBwO3XURSOe5fR7ZwkJmJcABQrz+o4jJwESIAESsCABIxa1FCgsOHFoEgmQAAmQAAmQAAnYmICeQNG6/VMoU7okRg3/QNGZ8f2PeH/Yh9i1dQPCQkPvIRYXF4cjR4+haJHCqkx+r9ekBapUqoAvPvlYXdu3/wCKFyvq2DHx1juDsGDRYuzYtO6e9qx4gQKFvleNeJbT750lJGBeAhQozOs7jpwESIAESMCCBIxY1FKgsODEoUkkQAIkQAIkQAIkYGMC7gSKiIgIlKtSE99++RmaPdJY0Tl2/AQaNXsUs2fNEKJDxWQRkzsmChYogK8/H+e2/qgx4zBh8lQc3rvDbbnVLlKg0PeoEc9y+r2zhATMS4AChXl9x5GTAAmQAAlYkIARi1oKFBacODSJBEiABEiABEiABGxMwJ1AoYkRv/70PSpVKK/oaKLF+K8+R9Mmje5LTOabaNPhKXw0Yhg6tm93T30ZEuqRFm1Qv15tjP1oxD3lVrxAgULfq0Y8y+n3zhISMC8BChTm9R1HTgIkQAIkYEECRixqKVBYcOLQJBIgARIgARIgARKwMQF3AsX+AwfRovXjmPvLjyhXtoyiIxNll65YFZ+NHY3WrVomSezkqdN4vOOzKFWyOKZO/AZBQUGJ6kdHR6PzKz1x8OBh/DV3NrJny5ao3KpfKFDoe9aIZzn93llCAuYlQIHCvL7jyEmABEiABCxIwIhFLQUKC04cmkQCJEACJEACJEACNibgTqA4f+ECatZrjJ9mTke1qpUVnStXr6JKzXqYMWUi6tSuqUvs9JkzeObFl5XoME2IE+Hh4YnqSqGjz1tvY9PmrZg1Y4ojZ0WiShb9QoFC37FGPMvp984SEjAvAQoU5vUdR04CJEACJGBBAkYsailQWHDi0CQSIAESIAESIAESsDEBdwKFTHBdrExFDH1/EJ5/5ilFZ8PGzXjq+U5YPP8PFC5U0C2xvfv244WXuqFihXL4/JMx9yTTlmGderzWB1LEkEJH/nx53bZj1YsUKPQ9a8SznH7vLCEB8xKgQGFe33HkJEACJEACFiRgxKKWAoUFJw5NIgESIAESIAESIAEbE3AnUEgcg4YMw6p/12DqhIRdEG/2H4Dr12/gj99+UrT27N2HJ5/rhOFDBqNt61Y4cPAQmj/WDmVKlxbCxkCEBAc7qBYvVlSdt2zTAXJ3xheffIysWTI7yvPkyY1MGTM6vlv1hAKFvmeNeJbT750lJGBeAhQozOs7jpwESIAESMCCBIxY1FKgsODEoUkkQAIkQAIkQAIkYGMCegKFDOn06v+9ifX/bVB08ondDlPGf40ihQup79u278DjTz6L0SOH44nH2yoxo1OX7m5JLlvwF7JmzYJyVdyHhtLacHuzhS5SoNB3phHPcvq9s4QEzEuAAoV5fefXI9+1cCW2/7UERzZsw5WTZ9RYM+XNhULVKqB8qyYo06y+X4+fgyMBEiABXxEwYlFLgcJX3mW/JEACJEACJEACJEAC3iCgJ1BofUmhIjr6NnLmyK5d4ucDEqBAoQ/OiGc5/d5ZQgLmJWAqgWLfoWMO0iWKFHCc88R/CJzasRfzhn6Kw+u3JDmowjUqoc17fZCnXMkk67GQBEiABOxGwIhFLQUKu80q2ksCJEACJEACJEAC1iZwP4HC2tYbax0FCn3eRjzL6ffOEhIwLwG/FyjmLlgB+UfvaNu8AeQfHr4nsGvRSkzr0i/ZAwkQNTtNHoMyTbmbwhXa/lPXMeffY+hYvyAK50znWmzI93V7L2DP8avoWK8g0oaFeKTPY+dvYunWM6hXNgeK5k7vkTa91cjt2HgcOXsD4aHByJ05DQID5YzlQQLeJ2DEotZXAsUH772H4rvWo0qVKt4HyR5sRSA2Lh7RMbEISx2CwAD+e20r5xtgbGR0DIKDApEqOMiA3tiFnQjcjo1DrPiTJjSVncymrQYQuHMHiBS7BVKHBKt/vwzo0uddBKUJR5E+A30+DjsMgAKFvpeNeJbT750lJGBeAn4rULgKEyWLFkxEee/Bo4m+U6hIhMPwL3LnxGetOkGsg1J89Plrum13Uty4FYPj5yOQK3MYMqdL7WD3yZzd+G31MTzbqDB6PlbCcd3Iky/m7cHPK47i54ENxPjSeKTr1TvPYcDUzRj8bAU0q5LbI23KRhZvOY0PZm5T7fXrWBZtauZz2/bVm7fxyZxdQiQ5izplsmPUy/e+JI2IisWIH7dj5Y5zjjYypU2F956rgKrFszqu8YQEvEXAiEWtrwSKIUOGKGzap7cYsl37EbglXiBfuRmJ7BnT2eZFjP287DuLz12+gbDQEKRPE+q7QbBnSxK4ERmNm7eikCtLBkvaR6N8RyA+/g7OXrmOjOFhFMB85wbL9kyBQt+1RjzL6ffOEhIwLwG/FCicxQkpTEjxQS+kk3NdihS+m4jfdOxx37BOeqOT4Z56zv5Wr9jS1//dfR7vTN6EN9qXRvs6BRy2nr18C0vEToOmlXOLly2+eRg2i0BxRYgOT49cjsioOMWv7xNl0K5WfgdL7USKI8NmbXPUq1U6G0Z3qaoVq89b0XF4Y/x/2HXsKmR5g3I5ceFaFGYtP6zu+6R7NYoUiYjxizcIGLGopUDhDc+xTV8SoEDhS/rW75sChfV97CsLKVD4irz1+6VAYX0f+9JCChT69I14ltPvnSUkYF4CfidQOAsOb/fqlEiY0MtBIa9/9NV05QWKFMZPRpkQe1rX5Id2cjfCzpNEqCcbJs7WEyjcMTL6mlkEiqHfb8WizWfwfJPCmLnkMNwJFD+tOIIv5+2F3AkhywdN26IECFeBYsWOs6qsedXcePfp8o4wIYfP3ECnj1ejYM60mNa3LoIY7sno6Wir/oxY1FKgsNWUsoWxFChs4WafGUmBwmfoLd8xBQrLu9hnBlKg8Bl6W3RMgULfzUY8y+n3zhISMC8BvxIoNHHCddeEFCBkmXNYJ9c60gWjv/5O1aFIYeyEnNX7fWye889DdVr58RZ45rMPUtTGmcuRKlyQDOdz81Ys/tl4CicuRGDV2JaqnYvXo/H7muNYvescZF2Z96BZ5TyoXuLeMD3Lt5+F/LN2z3mxDTYVyhXKhC7Ni90T2kjWWbbtDGR+hvzZ06J2qWz35Gjo/e1/yJYhNZ6sXwjf/rkXG/ZfQvG86VX/T4lrYakT4hfLF+tbD1/BhatR6sV55vSp8Xjt/Ggrfv0vxzH+7/3o3a40KhXJjEn/HFB2DH2hkug33MEpXgQXff3r9UgdHIhx3aun2G5HQ25O3AkUMvzRlAUH8N/+izh69qayq3bp7OjctKjjpb18yT914UG8+lhJFXpp3Z4LKF0gowqn5C7EkwxzNfT7bbh8IxoDniqXotwU/+27iL4TNqB93QKoI8Yhz90JFF/M3YNrkbcVz+DAQDR7d6FbgeJTEVrrVxFaa0Kf2iiVL/FW+0HTN2PF9nOY0LsWSuXP6IYYL5GAZwgYsailQOEZX7EV/yFAgcJ/fGHFkVCgsKJX/cMmChT+4QcrjoIChRW96j82UaDQ94URz3L6vbOEBMxLwK8EipffHKpIThn3XiKimvAgL0phQhMq5Hn/V1901HXeSeG6+8JRiSceJzCidjtcOXnmodrNlDcX3l3ze4raOHbuJp4fvUq93JdhfqQIkC1DqHoRHnU7Dr2+Wof9J6+jZP4MyCpe/q/eeV61/1nP6qhcNIujr4WbTmPYDwn5C+qWzS7EjFs4dPqGiNUZhIm96zgEgSVbzmDIzK3qejWRi+CYyB0hX9JXL5lVhQrSflXffOBCIRgEIVokvAsX8YrLFcyILQcvi9jYt9G4Yk4Meb4SZP7OyeJF/+aDl7BdiBRy7IXEr/Prl82J+uVyQBvTyJcqo64QVmQ+hIHTNqN7q+J4vnERx9hlKKIen6/FE/UKqJfvKbHb0YjOiatAIRe5ncauVjZLprlF3gwp1MjQSq2q58U7QlyQxx/rTmD07J0Ov1QqmhnlheDTtUVx4YPEOSik4NFHhFTae/wahnWqhIblc+qM5t7L0TFxeHHMKlyNuI1fBjUSCb2v6QoUUvzQcnzIME56AoW2G+P7t+s7/K71XK/vfHU69MVKaFQh+ePU7ucnCSSXgBGLWgoUyfUG65mFAAUKs3jKnOOkQGFOv5lh1BQozOAlc46RAoU5/WaWUVOg0PeUEc9y+r2zhATMS8BvBApt94Tr7gdn0cFZuNDEDFchQmvHVbwwr4v8f+T98tfwyCDHHF+fonY0gULe9PVrNdWuB62Bz37fjV9WHcNrbUtC7lqQx8mLEejyyb/q/O9hTdUv/s9euYWOw5cr0WFm//pK4JAV5m84KRIl70BbsaPhrQ5lVB6C9kOXIXeWMIz/v9rIKEIFyUP7xf2bHUqL3Q8JOSSkQCFf2juLEbFxd9D72/VKjHB+wa0X4slVoLgdG4/W7y9WOzpkiCHt+ObPffhh2WGMF7/qLy1+1Z9cu7X7k/p0FSgOnr6Ocb/tVuJO1xbF1K3SrvZDlyrxZdnoFiIxaYBDoMiXLRyfv1pDiUNaP84ChdzR0nfiBuw4cgWaEKPVS87njCWHMEHsMtESbmu7KdztoHBuLymBYs6aYxj362683raU2AFT0HGbFH6aDliovvdoVQLPNS7sKOMJCXiagBGLWgoUnvYa2/M1AQoUvvaAtfunQGFt//rSOgoUvqRv7b4pUFjbv762jgKFvgeMeJbT750lJGBeAn4jUGiCg7MIIbFqAoWr4KDtqnAVKOQ9em3JMh6eJ+BrgUL+ml2+9Hc+nh21ElHiF/a/DW7kfFmETdqn8hTMfLseCogQTTJk0+DpW9CvY1nIUFHOh9wdEBoShIoixJKWm0B7Ga7VkzsAWgxcJJJZ58J7z1VUlzWB4s9hjyBDmhCtKuQL/pfG/osnGxTE621KqevJFShk5XG/7cKcf49j1oAGyJs1DUR0J3QYvgwhQhT4cUBDtSsjuXbHxMXjWkSMY2zaSYgIFaWN2VWg0Oq4fmoijTYubQeFs2ij3aMJFFJEWCqSgMudJaO7VkGtUtm1KoiMjhV/EhJeOy6Kk/DUwY7wWJqwJENxfdWrprLdEwLFqUuReHrECtVtXyFM1RQhvC6JUGET5u/H5gOX1PVOTYuo3SDOY+M5CXiSgBGLWgoUnvQY2/IHAhQo/MEL1h0DBQrr+tbXllGg8LUHrNs/BQrr+tYfLKNAoe8FI57l9HtnCQmYl4DfCBSa4OCpHRSu7ZjXRf4/cl+HeHrhkSLo1rK4A5R8wd383UXquwxF5HzIUELyGPJCRTSpmMshWNwvr8Ckf/Zj+qJD6l53bcrky/M+aKLKpUAhQzu5iiNSFGjcfwHKF054qS4rp0Sg2Hn0Cnp+sQ692pTE0w0KYe+Ja3jl0zXo9mhxvNCkiHqxn1y7pfjSb+JGNV7nv8qIXBHf/l8tdcmdQHFe5MtYuPkUZGipKzdu43pkjMr7IW+YNaC+EE7CHTsoxnardk++D02g0PqUO1J+ereh9lV9aiJSoovii7Mw8P6MLULgOIvJb9ZB8TzpVVVPCBSyoW2HL+O1rxLv5pHhvp5pWBiTRS6Qd58uh5bV8qo++RcJeIOAEYtaChTe8Bzb9CUBChS+pG/9vilQWN/HvrKQAoWvyFu/XwoU1vexLy2kQKFP34hnOf3eWUIC5iXgNwKFtlNConTdFaGJF7JM7qTQy0Ehy7l7QlIw9vBVkmwtxJOrQHFV5Hpo/f4SBeGxGolfJMu8ETKBtQzHJHdGfDlvD35acTTRi2539LSX9VKcKJorXaIqO8XL+uoiJ4UMCyQPKVBkSReKH96pn6ieXCQ26PePyokxsXdtVZYSgULbMZFF5NOQ98tf9c9YfAg/D2ygQj+lxG4Z7mqByL3hemQXOTxa/28niWaz1v6hMzfw6pdrVfgqKSxIMSNdmlT4TSSVlseDCBTyPk0skufykELDDiHGuB4VxG6JqoLznuNX0e2ztYpjdydhape4Pmn+ATxeJ79KmF2+UGbHjgvntpIK8aTVkyy3CqFi/6nryJMljdjhkQ3/7jqvcmt883pNlC2YSavKTxLwOAEjFrUUKDzuNjboYwIUKHzsAIt3T4HC4g72oXkUKHwI3+JdU6CwuIN9bB4FCn0HGPEsp987S0jAvAT8RqCQCDUhwjWckxQvZG4JTZiQdWUduUuiRJGEuP/O93P3hKRh3LFr4UpM69rvoTrsPGkMyjRL/EL/fg3qCRTyvjZCoMggdjXM6FcvyWYWbhYJsr/f5shj4FxZvqQOEuGT0oWF4J+Np/DhrO34sHNllcTauZ7ruRbiadHIZghNFeQoPnkxEs+MXOHIayELUiJQyPrTFh1Uv+KfPbChymmRVQgKMsSRdiTXbq1+Up+uAsXXf+7FrGVH7tlBMOLH7SJnx6kUCRQtq+VRyb6f+2ilGsL0fnVROGdi4UdvbFpYLr1y7fq0t+qiiIuYJMuSEiikz2XS7ZyZwhL5Tt7Xf/JGrN19AXOHNHYk3JbXeZCApwkYsailQOFpr7E9XxOgQOFrD1i7fwoU1vavL62jQOFL+tbumwKFtf3ra+soUOh7wIhnOf3eWUIC5iXgVwKFxKiJFPLcdSeFFCq0w1mYcBYwXMUNrT4/vUvgm449cHj9lgfqpHCNSug5+9sU35uUQPHed1uwbNtZjOpSRf2aXmv8t3+PqRA+r4hf3stwRMfFjgr5klzuCJjyZl0RmilYVd166DJe/3o9tPwWh8XugU4fr1aJuD/pXg2pRW4KeVwU+QnkLgz5IlyGWZKHJlA836Qwuj9aQl2Tfw2ftQ0LNp5OlO9i4/6LeGP8BrSvWwBvPF7aUdc1SbZWoIkczarkhqwzQIQbetQp3FBy7dbaS+rTVaAY+v1WLNp8Bs47CGTIpxfGrFS7KlKyg0LL5aGFfJL8J71RR4lBSY1Jlknme8VuCddj78lrKgyX3AFSW+x4qFQ0i8OfznWTEigWbzmND2ZuwxP1CqB3u7v+WC9CYr0lQmLVKp0No7tUdW6O5yTgcQJGLGopUHjcbWzQxwQoUPjYARbvngKFxR3sQ/MoUPgQvsW7pkBhcQf72DwKFPoOMOJZTr93lpCAeQn4nUAhUTqLFEnthnAWJuR9FCckBd8cp3bsxaetOqW48wBxR++/piNPuZIpvjcpgeLM5Uh0HrtavTiXv9YvLASEHUeuYOWOcyieNz0miBBJQYGy97u7EvJlC8cjlXLhivgVvcyzEBkVp/IxyFBG8vj2r334fulhFMmdDg3L50ScCNk0d81xVX/kS5VRt2wOVU8KFPKQ99ctmx0l8mbAdhEuaMP+SyoskdzxkEoko5ZH1O04NB2QUL9t7fxoKvqvUDizEh+G/bANzu2qG8RfPT5fq3JAyO9/D38k0Uv9lNittaf36SpQzN9wEiN+3KHEnFbV8+F2bBzmrT2h7JdtPIhAIe/TQlXVKZMdIwTHwIAEv8iylBxaXg2ZgLtdrfydBuTFAABAAElEQVS6tyYlUMTG3RGC1QqcvnQLTYQvqhTLIhJ5X1LCjMxD8cPbDSBDbPEgAW8SMGJRS4HCmx5k274gQIHCF9Tt0ycFCvv42mhLKVAYTdw+/VGgsI+vfWEpBQp96kY8y+n3zhISMC8BvxQoJE4Z0kn+cT6kACF3Tmg7KZxDPiUlZDi3wXPvEdi1aCWmd+mHOynoovNkEdqpacpCO2nNa7sfnJMna2Xyc5/4Rf3kBQdUWB7tuhQrerUuiQzhqbRLkLkdvltyUO1uOHEhQl2XYkW/jmVRSeSp0I54UXHqwoNYsuWMIzG0/OV/z8dKKsFCqycFivzZ0+K5RoUx7tddjhf4DcrnUL/KzybCMjkfP604gikLDyhBo0uLYujctKgQSBJCT418uQrqihf3zscf606oXAiNK+bEBy9Uci5S58m1+54bXS5o+TlmD2qoQh5p9k8TDLTjqQYFESiEHhn6adaABmJXShr8uf4EPvp5J9wmyRZ5HAZM2YTBz1VAs8q5VTNS6JHhk/7bexE9WpXAc40La82n6FPb5SD91uZ/eTTcNaCJQlIQGSX4uh6Xb0RjuBCHpKCkHdkyhmLg0+WVYKFd4ycJeIuAEYtaChTe8h7b9RUBChS+Im+PfilQ2MPPvrCSAoUvqNujTwoU9vCzr6ykQKFP3ohnOf3eWUIC5iXgtwKFhtSdUKGVyU8KE840fH8ud1LMG/rpfcM9ybBObd7r80A7J1JqZUxcPGRugczpUjt2Tei1cU3kH0gVHOQ2ubLzPc75KZyvy3NNoJCJrKX4cfF6FDIKQSTkf7smXOtr3+WL81QhgQ+8g0BrR/tMid3aPcn5lIvdS+Ilfvo0IY5QV8m5z2x1rkfG4Oi5m0gbFoz82dIiWOQj4UECRhAwYlFLgcIIT7IPIwlQoDCStv36okBhP58bZTEFCqNI268fChT287mRFlOg0KdtxLOcfu8sIQHzEvB7gcIZrbZzQl5zzkHhXIfn/kFAJs7e/tcSHNmwDVdOnlGDypQ3FwpVq4DyrZqkOCG2f1iVvFE4CxTJu4O1SIAESOAuASMWtRQo7vLmmTUIUKCwhh/91QoKFP7qGfOPiwKF+X3orxZQoPBXz1hjXBQo9P1oxLOcfu8sIQHzEjCVQGFezBy5nQhQoLCTt2krCXiegBGLWl8JFOM+HI7HhFidO1dCiDfP02OLdiUgwxDGih2TcrdigPgfDxLwJIGYuDi1wzUoMCGHmCfbZlv2JhAXHw/5IjlE7ODmQQKeJHBHBF6OiY1Xu8ADAyz+b5f4/0/axs08iY9t3YcABQp9QEY8y+n3zhISMC8BChTm9R1H7qcEZKLq4KBAuOaa8NPhclgkQAJ+RsCIRa2vBIohQ4agT6mifkacwyEBEiABEiABEiABEjArgeDsOZC2UVOzDt+U46ZAoe82I57l9HtnCQmYlwAFCvP6jiMnARIgARKwIAEjFrUUKCw4cWgSCZAACZAACZAACdiQAAUK451OgUKfuRHPcvq9s4QEzEuAAoV5fceRkwAJkAAJWJCAEYtaChQWnDg0iQRIgARIgARIgARsSCApgeLYseOY++ffOH3mDMqUKoWOHR5HaGhqXUo7d+3GilWrcfTYCeTPlxeNGzZA6VIlEBCQED5StrNoyTIcPHQYqVOnRqP69VCtamWkSpVKt00rFlCg0PeqEc9y+r2zhATMS4AChXl9x5GTAAmQAAlYkIARi1oKFBacODSJBEiABEiABEiABGxIQE+gOH7iJFq1ewIlSxRHxfLl8fu8P1G8eFHMnDrRITi44nq0bQd1qWqVyjh0+AjWrluP7q90wdt9+6jro8aMw69z5qFxo/qIjIzEX/MXoGKFCpj9w3QEBdknlwwFCteZc/e7Ec9yd3vjGQlYhwAFCuv4kpaQAAmQAAlYgIARi1oKFBaYKDSBBEiABEiABEiABEgAegLFR2M/xcJFSzB/3q9qh8OevfuUYDF71gxUqVTRLbl9+w+geLGiDgHjrXcGYcGixdixaZ2qf/TYMeTKmVPtnpAXfpkzF/0HDMI/f8xR97lt1IIXKVDoO9WIZzn93llCAuYlQIHCvL7jyEmABEiABCxIwIhFLQUKC04cmkQCJEACJEACJEACNiSgJ1C06/gM5E6IQe/0c1CpVrshurz0Inq88rLjWlIncsfEhMlTcXjvDrfVli5fga49XsPfc39VOzXcVrLgRQoU+k414llOv3eWkIB5CVCgMK/vOHISIAESIAELEjBiUUuBwoIThyaRAAmQAAmQAAmQgA0J6AkUUozo+nIndO/6koOKO9HCUehycu36dTzSog3q16uNsR+NcCkF7ty5g+6v9YHMW7Fy8XwEBwffU8eqFyhQ6HvWiGc5/d5ZQgLmJUCBwry+48hJgARIgAQsSMCIRS0FCgtOHJpEAiRAAiRAAiRAAjYkoCdQlKtSE31e74UunV9wUOn47IsoUbwYhg8Z7Ljm7iQ6OhqdX+mJgwcP46+5s5E9W7Z7qo0Z9xm+mTAJM6dNQu2aNe4pt/IFChT63jXiWU6/d5aQgHkJUKAwr+84chIgARIgAQsSMGJRS4HCghOHJpEACZAACZAACZCADQnoCRRNWrRG+3at0atHNwcVmQS76SON8YYQLvSOqKho9HnrbWzavBWzZkxB0SKF76n67cQpGD32E3z12Ti0bN70nnKrX6BAoe9hI57l9HtnCQmYlwAFCvP6jiMnARIgARKwIAEjFrUUKCw4cWgSCZAACZAACZAACdiQgJ5A0aV7L4SFheHLTz9WVKTwULpiVXz04VB07PC4W1IyrFMPEbbp9JkzmDFlIvLny5uoXlxcPEaOGYsp077D1InfokG9OonK7fKFAoW+p414ltPvnSUkYF4CFCjM6zuOnARIgARIwIIEjFjUUqCw4MShSSRAAiRAAiRAAiRgQwJ6AsUff81H7779lZBQsUI5jJ80Bd9OmIxN61YhU8aMipQMA9WuzWMY9v4g3L59Gy3bdMD5CxfwxScfI2uWzA6aefLkVve8M+h9/PzLbxgmQkRVKFfWUZ5RtJdX1LHLQYFC39NGPMvp984SEjAvAQoU5vUdR04CJEACJGBBAkYsailQWHDi0CQSIAESIAESIAESsCEBPYEiLi4OH44ag2kzvndQGf/V52japJHje+GS5UQYqLb4eNRwREREQAoW7o7RI4fjicfbQibZ3r5j5z1VtDbuKbDoBQoU+o414llOv3eWkIB5CVCgMK/vOHISIAESIAELEjBiUUuBwoIThyaRAAmQAAmQAAmQgA0J6AkUGopbUVE4f/6C2OGQB0FBgdplfj4EAQoU+vCMeJbT750lJGBeAhQozOs7jpwESIAESMCCBIxY1FKgsODEoUkkQAIkQAIkQAIkYEMC9xMobIjE6yZToNBHbMSznH7vLCEB8xLwmkBx+tI181LhyEmABEiABEjARwTOX76JonmyeLV3XwkUwz/4AE8WzIvcuewTo9erjmTjDgLxd+4gViSuDAkORID4Hw8S8CSBGBEmJDAgAEGB/OWtJ7myLSAuPh7x8XfEv11BxEECHiVwB3cQExuP4KAA8e+Xxf/tEv//Sdu4mUf5sbGkCVCg0OdDgUKfDUtIICkCXhMokuqUZSRAAiRAAiRAAu4JGLGo9ZVAMWTIEGW09umeAK+SQMoJ3IqOwZWbkcieMZ14GWPxFzEpx8M7HpLAucs3EBYagvRpQh+yJd5OAokJ3IiMxs1bUciVJUPiAn4jgYckIIWvs1euI2N4GNKEpnrI1ng7CSQmQIEiMQ/nb0Y8yzn3x3MSsAoBChRW8STtIAESIAESsAQBIxa1FCgsMVVohBMBChROMHjqcQIUKDyOlA3+jwAFCk4FbxGgQOEtsmxXEqBAoT8PjHiW0++dJSRgXgIUKMzrO46cBEiABEjAggSMWNRSoLDgxLG5SRQobD4BvGw+BQovA7Zx8xQobOx8L5tOgcLLgG3ePAUK/QlgxLOcfu8sIQHzEqBAYV7fceQkQAIkQAIWJGDEopYChQUnjs1NokBh8wngZfMpUHgZsI2bp0BhY+d72XQKFF4GbPPmKVDoTwAjnuX0e2cJCZiXAAUK8/qOIycBEiABErAgASMWtRQoLDhxbG4SBQqbTwAvm0+BwsuAbdw8BQobO9/LplOg8DJgmzdPgUJ/AhjxLKffO0tIwLwEKFCY13ccOQmQAAmQgAUJGLGopUBhwYljc5MoUNh8AnjZfAoUXgZs4+YpUNjY+V42nQKFlwHbvHkKFPoTwIhnOf3eWUIC5iVAgcK8vuPISYAESIAELEjAiEUtBQoLThybm0SBwuYTwMvmU6DwMmAbN0+BwsbO97LpFCi8DNjmzVOg0J8ARjzL6ffOEhIwLwEKFOb1HUdOAiRAAiRgQQJGLGopUFhw4tjcJAoUNp8AXjafAoWXAdu4eQoUNna+l02nQOFlwDZvngKF/gQw4llOv3eWkIB5CVCgMK/vOHISIAESIAELEjBiUUuBwoITx+YmUaCw+QTwsvkUKLwM2MbNU6CwsfO9bDoFCi8DtnnzFCj0J4ARz3L6vbOEBMxLgAKFeX3HkZMACZAACViQgBGLWgoUFpw4NjeJAoXNJ4CXzadA4WXANm6eAoWNne9l0ylQeBmwzZunQKE/AYx4ltPvnSUkYF4CFCjM6zuOnARIgARIwIIEjFjUUqCw4MSxuUkUKGw+AbxsPgUKLwO2cfMUKGzsfC+bToHCy4Bt3jwFCv0JYMSznH7vLCEB8xKgQGFe33HkJEACJEACFiRgxKKWAoUFJ47NTaJAYfMJ4GXzKVB4GbCNm6dAYWPne9l0ChReBmzz5ilQ6E8AI57l9HtnCQmYlwAFCvP6jiMnARIgARKwIAEjFrUUKCw4cWxuEgUKm08AL5tPgcLLgG3cPAUKGzvfy6ZToPAyYJs3T4FCfwIY8Syn3ztLSMC8BChQmNd3HDkJkAAJkIAFCRixqKVAYcGJY3OTKFDYfAJ42XwKFF4GbOPmKVDY2PleNp0ChZcB27x5ChT6E8CIZzn93llCAuYlQIHCvL7z65HHRxxHfMRR3Ik6jzsxN9VYA0LSIiA0OwLDC4o/+f16/BwcCZAACfiKgBGLWgoUvvIu+/UWAQoU3iLLdiUBChScB94iQIHCW2TZLgUKzgFvEqBAoU/XiGc5/d5ZQgLmJWAqgWLfoWMO0iWKFHCc88R/CNyJvoS4S/8h/ta5JAcVGJYDQVmqIyB1liTrsZAESIAE7EbAiEWtrwSKoUOGokBoCVSpXMVubqW9XiYQGx+P6JhYhKUKQWBAgJd7Y/N2IxAZHYPgoECkCg6ym+m018sEbsfGITYuDmlSp/JyT2zebgTu3AEib99G6uBg9e+XN+zPXjQzshfO7I2m2aafE6BAoe8gI57l9HtnCQmYl4DfCxRzF6yA/KN3tG3eAPIPD98TiI84gdizS1I0kOCcTcRuinwpusdulfefuo45/x5Dx/oFUThnOkuYv27vBew5fhUd6xVE2rAQXZuWbTuLLYcuoVvL4knWc9fA72uOI1o89D1Vv5C7Yr+6duNWDI6fj0COTGHImj61X42NgzGegBGLWl8JFEOGDEH1tI8YD5U9kgAJkAAJkAAJkAAJeJRA7ecqIGMuazyfehSMDRqjQKHvZCOe5fR7ZwkJmJeA3woUrsJEyaIFE1Hee/Boou8UKhLhMPyL3DkRc/KPB+o3JG9r0++k0F4w58ochszpPPuC+ZM5u/Hb6mN4tlFh9HysxAMx9rebvpi3Bz+vOIqfBzZArsxpdIf37KiVOHEhAqO7VkGtUtl167kreGHMKkRExeK3wY3cFSf72rRFBzH5nwNu69cqnQ2ju1RVZdcibuOx95IW6OqXy4EPO1d2tHX03E0M+2Eb9p+87rhWpkBGDHq2AvJm1efiqMwTSxIwYlFLgcKSU4dGkQAJkAAJkAAJkIBhBChQGIba7zqiQKHvEiOe5fR7ZwkJmJeAXwoUzuKEFCak+KAX0sm5LkUK303E2NPz7xvWSW90MtxTcO6WesWmuP7v7vN4Z/ImvNG+NNrX8Wz4sbOXb2HJ1jNoWjk3smcMNQWP+w0yuQLFjiNXsOfENbStlQ+pQ1IWVsFTAsXHv+7CXLEbo33de/1aKEdatKudkE/lVnQcvv17n1vTz1+9hdU7z6NJpVwY8nxFVeeYECe6fb4GkVFxeLxOfkhhYs/xa/hViFFpQoPw44CGyJSW2/3dArX4RSMWtRQoLD6JaB4JkAAJkAAJkAAJeJkABQovA/bj5ilQ6DvHiGc5/d5ZQgLmJeB3AoWz4PB2r06JhAm9HBTy+kdfTVdeoEhh/GSUCbFjzy59qI6DczY2deJsbwoUDwXWT29OrkDxMMP3lEAxYOomHD5zAz+92/CBh/Op2AUjhYdPe1ZHlaIJeVc++303fll1DP2fLIvWNe6GOftn4yl8OGu7EkTeeLz0A/fJG81LwIhFLQUK884PjpwESIAESIAESIAE/IGAnkBxRyTAmPfn39iwaTPSpUuH5o80QcUK5XSHHBERgQWLlmD33n24efMm6tetgzq1ayJD+vT33LNpy1acOn0abVo9ek8ZLxhHgAKFPmsjnuX0e2cJCZiXgF8JFJo44bprQgoQssw5rJNrHemC0V9/p+pQpDB2QsaeX4n4G4cfqtPAdIURnL1+ito4czkSA6ZuRpua+RATFy9+5X4Cl25EqVBAfdqVxuWb0fjmz31Yt+cCcmcJQ6MKuVQug8DAAEwSIXtW7zqnfs1eUPwKXjtkMrE+4/+DXFR93rOGunxIvJye9M9+7Dp6VeU0KF8oM15uXgyl8mVQ5UO/34qth6/gwtUo9Yv3zCKHwOPiV/VtayX8sv7i9WjIfAiyPznmemVzoFnlPKheIqvWLVbsOIupCw/i1cdKYvGW02rMpcUv6ke9XAVr95zH+L/3o7ewqVKRu0nIZA6Hvzecwn/7LkD+er9mqWx4pFLuRO3+sf6EejH+wQsVUSD7XTt3HbuKMb/sROemRdGwfE41jrj4O/h+6WE1FhlyqEjudGqsLzYpgpDgQMdY73eSnHbcCRQHT1/HyJ92IEOaEAzrVBnhocGQ4ZWWbz+LL1+toXJQaJwkC8lz9c5zuCpCK5UtmEmxK+IUA9WdQLFg0ynMWn4EFUQyt+S+/H/lszUiKWYgvupV836muy2XPn/ywxWoJJLIaXNKVtTCVy0b3UIkrgtw3Bsv/NBy8CK1s2LFxy2Y7NVBxj4nRixqKVDYZz7RUhIgARIgARIgARLwBgE9gWLERx9j0tTpePG5Z3H23DksXLwEM6dNQu2aCc/XrmNZunwFuvZ4DY+2aI70QtBYtGQZoqKjsOivuciZM4eqfvLUaezYuQujPh6H4kWLYuI3X7g2w+8GEqBAoQ/biGc5/d5ZQgLmJeBXAsXLbw5VJKeMey8RUU14kBelMKEJFfK8/6svOuo676Rw3X3hqMQTjxOIOf4L7sTcfKh2A0LSIiT/EylqQ4bIeX70KmQTYY8iomJQs2Q2HD57E0fFn/KFM0G+8M6eMQzFxIv2jfsv4crN2+jashg6PVIUK3ecw8Bpm9GlRTH1kl7reN/Ja+j6yRoVtqdvhzKivRvoNGa1KpYhfmQ/q8RLcRmWZ9pbdSFfiE9ecACbD17CdiFSFM+bHoVypkX9sjkh8w1E3Y5Dr6/WqRwDJfNnUAmQZagfeXwmfk1f+X+/pv9j3QmMnr1TCRxynPJldvlCmdC1RXEs3HRa5SkY+VJl1BXihjykaNL544Rxyboy/JEUYuQxtls1h0jx3eJDmDh/Pya/UUeNTVUQf/237yL6TtiAfh3LKoFHXh/x43bMF4JHOdFvaTHWbSK80l4RcqhxxZz44IVK2q33/UxOO64CheTc84u1SB0chC961XCIKR/9vAN/rj+JP4c2QYbwVNA4SZ/HxsajohBsth66rHwrwyL9NrixEjbkIF0FiiVbzmDIzK1qbozpWhVpUgff1xZZof2wZcoXLz5SBCuEWHJL+FSKVBXFHEsqwbfW+Cghuvz130l8/VpNxVa73nzgQmXvvA+aaJccn/X6zlfnc95vzKTZDir2OTFiUUuBwj7ziZaSAAmQAAmQAAmQgDcIuBMorly9iio162Hc6JFo1+Yx1W2P1/ogKioK0yZ963YYFy9eQmBgIDJnzqTKjx07jkbNW2HYkMF47ukn1bWZP/yIP/76B4ePHEWlShUw4avP3LbFi8YQoEChz9mIZzn93llCAuYl4DcChbZ7wnX3g7Po4CxcaGKGqxChteMqXpjXRf4/8tuHpnlkkKmKdE5RO5pAIV9MT+xdB/mzh6udFK9/tR5yh4AUCIa9WEksdgIg8zh0/HC5ivP/7f/Vwm3xcrv1+4uVgDGjXz1Hv3JnxXTxq33tZfLUhQcwZcFBjHmlqhJAZMXt4sX9t3/tQ7MqudHuf7sk9EI8aWF8XmtbEk/VL6T6OXkxAl0++Ved/z2sKYLE+LQX7/myheNzsVsgq9iFoR3uBIqnRizH6Uu3VF1tV8Wx80Kw+WiVum3RyGYITRWE5AoUkdGxaP7uIsXn69drql/tx4tdJGOEaHLsfATGCdFDtne/I7ntOAsUMbF38JoQceQh+86bNdzRjZ5AITlJP6YXuy3kjo33vtuiRKehwt+NKuRU9zsLFHIXxuDpW5Tw89HLVRGW+v62yEbkjpr6b81XO3Akb+dDiiTfvl4rybwg2hytXjIrxr5Szfl2NeZl285i0hu1USJvwm4cWeGIENheFAm+5fGN4CF3h/CwFwEjFrUUKOw1p2gtCZAACZAACZAACXiagDuBYsWqf/HSKz2wcc1Kh+Dw2+/z8NY7A3Fw9zYlRNxvHJrI8cHggXjhuacTVR80ZBjOX7hIgSIRFeO/UKDQZ27Es5x+7ywhAfMS8BuBQhMcnEUIiVUTKFwFB21XhatAIe/Ra0uW8fA8AV8LFE0r58J7zyUkHpbWybBAk4XQ4CwqyOsypI4MAbXgw2byK8b9tgtz/j2OmW/Xc/xiX774j4m7g18GNVQv6bW2nm9SWO200EvUrCdQyD6jYuLEL/sbqT61v8aLZMozlxx29K0JFG92KC3CQyVOxuwqUFwSIaPafbAUzavmxqBnKmhNqk9NUJEv72XS5eQKFHKnR9MBC9UOjk97VEdhp1BJzh1IASJShJNyPcLFbgT50j+57WgChdztMfyHbaq5r8XL/rxZ0yRqWk+gcBZ85A0ydNPwH7aj52Ml8GyjwqoNTaCQO2FkAvNqxbNgxEtVEgktMvyWu0MTiG7cisGjgxarKrIdmeRahtOas+aY8p8USr5/uz4C7kZoStTc+zO2YOnWs5jQp7YjJJhWQYoTUliRibAHPF0OxfJkwIFT1zDyxx1qR4is57wbRruPn9YnYMSilgKF9ecRLSQBEiABEiABEiABbxJwJ1BoYsShPdvFM1LCQ5ImWmxauxKZMt3/x1dTps/A8JGjsXj+HyhcqGAiEyhQJMLhsy8UKPTRG/Esp987S0jAvAT8RqDQBAdP7aBwbce8LvL/kfs6xNMLIvROt5bFHaBkHgW5w2FC71oolT+j47rMJXBc7DLQBIqdR6+IsELr0L1VcTzfuIgjbNLLzYvipWbF1H2nLkWi5+drHS+M65bNjtqls6u8DenCQhxtuxMotN0EspIM7+R8yNBJ8hgickM0qZjLsYPC3QtpV4Fi/d4LeGviRvRqUxJPN0jYlaG1rYWu6vtEGbW7I7kChbxfMpPs5FFQhKmqIXJkNBZjK+3EUBNWVCWnvzo1LaLCUclLyWlHEyi0Jl4TtjzlYoss0xMoXMUnmTND7krRQnjJe6VAIcN9aceM/vXgnG8kIioWLQYu0ooTfWo7UGSd2auOoqTINyJDiDkfUlyQIoMUKOTuHddj/ykxpnH/irBc2TFSCCPujh+WHVZ5UpzLJPuKIkeGzFsya0CDe0Qb57o8tyYBIxa1FCisOXdoFQmQAAmQAAmQAAkYRcCdQPH9jz9jsNjlcHjvDscw1qxdj+df6oo1yxc7cko4Cl1Oli1fiS49euGN/3sNr7/a3aUUoEBxDxKfXKBAoY/diGc5/d5ZQgLmJeA3AoW2U0KidN0VoYkXskzupNh78Kg8VefOOSjkNe6ekBSMPXyVJFsLn/OgAoUMYfTE8OUqTNC0vnUdOy9cXzhfE0mYl2w9I5JHn8PmA5cUXBlW6otXa6J4nvTquzuB4qrIJdH6/SWq/LEaeRM5RYZNypZBJtMuoPIoaDsokiNQyMTQMjn4G+1Lo32dxLstZELt/pM2QdthkBKBQoYz2n7kMhaJXA1rdp9XSb/loFtVz4v+T5ZVO0pk7oodQthxPSqIvBVViyck/U5OO64ChXOYLue29QQKV06aGJCUQCHzg8hE11qoKplYXfJxd8g8Jc6Jq93VkXklZH6Jwc9WUOG+XOv0n7wRa3dfwNS+dVA0d8I8ca0jv5+8GIkthy7h3JUoldOkbhkhaPy0HYs2n8HyMS1UCDB39/GadQkYsailQGHd+UPLSIAESIAESIAESMAIAu4EivkLFqFX7zdxYNdWBAUlhNVdvHQ5ur36OnZt3YCw0FDdoa1ZJ4SMzl3xcucXMfDttxw7MJxvoEDhTMN35xQo9Nkb8Syn3ztLSMC8BPxGoJAINSHCNZyTFC9kbglNmJB1ZR25S6JEkbsvaLX7uXtCEjLuiI84jtizSx+qw+CcjREYnj9FbTysQCE700I4yV+q95+0EeFhwSKfRW3dcchf1M9afkTlqXBOHu1OoJCNtBECRQYRwsc5z4W7xlMiUJy5HIknP1yBtrXz4y0Rdsj5kGP7+o+9jtwUM5cewvi/9otf8N9NsC3ra7synJNkO7cjz2Uej6Hfb1W5Lqb3q4vCOdO5VknWd3ftaALFlyIh9sVr0Sp5tdw5MF6EpnJOXu0JgWLpR83x2dw9mCt2JLSslgfvPl0+WeOWlWQy8j/Xn0CDcjmVkOR8o7b74ZPu1RzijFau7c5xniNamfYp86JEi/BfBXKk1S6pTznH2g9biizpQvHDO/UTlfGLPQgYsailQGGPuUQrSYAESIAESIAESMBbBNwJFNt37ES7js9g/rzfUKJ4QlSCr76dgGnf/YANa5brDuWPv+ajd9/+ujsntBspUGgkfPtJgUKfvxHPcvq9s4QEzEvArwQKiVETGeS5604KKVRoh7Mw4SxguIobWn1+epdA7On5iL917oE6CQzLgeDcLVN8rycECpmw+pmRK9Uv4OVLe5lnoJ148a8dUxYcgMxT0L1VCWQQCZnlcUXsjJDCQ81S2TCma1V1beP+i3hj/Aa0r1sAbzxeWl2Tf2lhgEZ1qYI6IjSUdvz27zFsO3wZr4jQVDIpdEoECrlDocWghYiMinPksJDtynF1EmGN5Offwx+BDEG1YsdZDJq2BU/WL4jX25ZS3cudI/2EGPPf3ovQBAq5A+EXEcqoSrEsaF4ljzZMaEm+x4twWc6hnhwVXE6S244mUPw8sAFyZU4D7bt8oT/k+UqOnA4PK1DIl/0y/4dMit5LJOKWobXc5flwMcPxVfO1zOchk5enCg5UZTdFboqeX65TIaQWjGiaSFSRFf7vm/XYcvAyXMNKORoWJ2N+2Yl5a09gWKdKKmSYVqaF0XIXwkurw09rEzBiUUuBwtpziNaRAAmQAAmQAAmQgLcJuBMo7ohnzYbNHkWpEiUwfMhgXLx0CS+81A1PdGiHt/v2UUOaOesnfPTxJ/hzzmwUyJ8Pv8/7E2/2H4DnnnkKTz3R3jHskJAQh8gRERGBW7eiMHLMWFy6dBkfj/oQYWGhCA+/N9SuowGeeI0ABQp9tEY8y+n3zhISMC8BvxMoJEpnkSKp3RDOwoS8j+KEpOCb4070JcSc/OOBOg/J2xoBqbOk+F5PCBSy0x4ix4T8lb885n3QRCUtVl/EX9MXH8Sk+QcgwwO1qJoHoSFB+FWIC4dO38CgZ8s7XuZryaHlfXJnQ1ORTLmCyCMgdzt0HrtaiQny1/sy+fSOI1cgc0XINieI3RpBgQEpEihkH+tEHop+Ig+FTLDctEpuhAQFinwIZ9RuB2eR5cK1KLQfukzeglqls6ncBvJe+fJcHppAIV/k/z975wFeRfG18UNCQkLvvffepSgdaRaKFAVFEBBUVFBUREBRkG7BhggIfKgIKn/BgvQqHSK9I50QQqiBBJLwzZm4l5vLnUAgO/fu7rvPQ+7endmZM78zj87ue88cIx9Dh/pFZM4Fjh6YtfxfypU1hH4eLJKGCzvvdNxtO4YgYQgUcSIxeb+vN9D2w+fppcfLUudGibk1UkugYLuZxTNjV0lfTHylDlUseucEbXzfyB+304JNJ2UekUcfKEixcfHyO88BTp7e55EyXM11GGKVtyTmrkri5JjY5uvpMavkpaebFKe82UJpmfAh+6Zy8Wz0+Yu174q5e5s4twcBHYtaCBT2mCsYBQiAAAiAAAiAAAj4ioA3gYJt2bV7Lz0vtnQKDw+XpjVu2IAmfDSGMmZMjByf8u0MGjl2PC1f+AcVKVKYxn/yGX01afJtw8iRPYcr6mLMR5/SpMlTk9Tp8lQnKYIkuYgvWghAoFBj1vEsp+4dJSBgXQJ+KVAwTt7Sif+5HyxAcOSEEUnhvuVTckKGexs4N49AQvRxsdVTYs6Fu+0lbd6mYmunQndbPUk94wWve4JmrmBsvTO5/4PyRbtxEwsR/5657EqSbVz/dd0x+ujnXfSQ2Pt/dI+kyYzjE27SzKWHxLZOh+WLbb6HX9g/37KU2C4oaV6J2Sv/pW8XHZD1eory7s1Kyi72nbhIU0UkBucjMA4WK/qKF/FZMgTLS7yN0Jg5O8kztwIXLtp6ioZ/v41GCds4P4FxcIJmTuDMggcfJfJnosdrF6L2IorD/eC8EaPn7HDllOBogEdEXolxP+2UuSX4Hj4OC0FirLhmiDV8jRM8v9G+IuXInI6/3tVxN+18MX8PzV55hH4a0ki+mOeGoy7HUvfxa2QEyFcv16FKIq8F28PRJb8Pf1hGsKg4GTkoOCLlWZE0nQ8Whi5dvSEjKOQF8SfsUBS9+tUGKex8J5JbZ/4vKsYo9/bJc+Cr3/fSHGGvcXDOjM6Nirt8bFznT0PwmjWogYyOcS/zPN9z7AINnhHm8g2X87jffbqKi4vnPfhufwI6FrUQKOw/jzBCEAABEAABEAABEDCTgEqgMPo8dfq0jHDIklmdj8+oi09rEYBAofaXjmc5de8oAQHrEvBbgcJA6k2oMMr4E8KEOw3fn3MkRfy5jXfc7om3dQrMUeueIidSe5RGHooPu1enBpXyKJvn7X44cTJvnZTcwdEUwUEBMqm0ez1OysyJs7NnSpeqiY+vxsZRgniJnvEOdnHfHAVxp5fyvB0SJwa/XztTqx13hr48Z/9x1E5gQIDIG5HhNv/ej21nzl+jcPEvZ5YQKpAj/f00hXttQEDHohYChQ0mCoYAAiAAAiAAAiAAAj4kcCeBwoemoWuTCUCgUAPW8Syn7h0lIGBdAn4vULijNSIn+Jp7Dgr3Ojj3DwKcODsh+gjdjImgmzeuSKPSBGWkNCG5RcRE0RQnxDZjVPxSeLfIScB5IgrlykDfvVUfW+qYARptggAIpIiAjkWtLwUKhjFs2LAUMUFlELgTgWuxN0QE3lXKnTWT+DFBYr6gO92DchC4WwJnoi5TaEiQ+JFHyN3egnogcFcELl+NpStiX/18ObLcVX1UAoG7JcA/YAs/f4myZgil9CGJUft3ey/qgcCdCECgUBPS8Syn7h0lIGBdApYSKKyLGZb7IwFjKx7erufj3rWItz7CAQIgAAK+JqBjUQuBwtdeRv+pTQACRWoTRXvuBCBQuNPAeWoSgECRmjTRljsBCBTuNHCe2gQgUKiJ6niWU/eOEhCwLgEIFNb1HSy/TwK8/7/4YQkVzZORMoSkvc/WcDsIgAAIpA4BHYtaCBSp4yu04j8EIFD4jy/saAkECjt61T/GBIHCP/xgRysgUNjRq/4zJggUal/oeJZT944SELAuAQgU1vUdLAcBEAABELAhAR2LWggUNpw4Dh8SBAqHTwCThw+BwmTADm4eAoWDnW/y0CFQmAzY4c1DoFBPAB3PcureUQIC1iUAgcK6voPlIAACIAACNiSgY1ELgcKGE8fhQ4JA4fAJYPLwIVCYDNjBzUOgcLDzTR46BAqTATu8eQgU6gmg41lO3TtKQMC6BCBQWNd3sBwEQAAEQMCGBHQsaiFQ2HDiOHxIECgcPgFMHj4ECpMBO7h5CBQOdr7JQ4dAYTJghzcPgUI9AXQ8y6l7RwkIWJcABArr+g6WgwAIgAAI2JCAjkUtBAobThyHDwkChcMngMnDh0BhMmAHNw+BwsHON3noEChMBuzw5iFQqCeAjmc5de8oAQHrEoBAYV3fwXIQAAEQAAEbEtCxqIVAYcOJ4/AhQaBw+AQwefgQKEwG7ODmIVA42PkmDx0ChcmAHd48BAr1BNDxLKfuHSUgYF0CECis6ztYDgIgAAIgYEMCOha1EChsOHEcPiQIFA6fACYPHwKFyYAd3DwECgc73+ShQ6AwGbDDm4dAoZ4AOp7l1L2jBASsSwAChXV9B8tBAARAAARsSEDHohYChQ0njsOHBIHC4RPA5OFDoDAZsIObh0DhYOebPHQIFCYDdnjzECjUE0DHs5y6d5SAgHUJQKCwru9gOQiAAAiAgA0J6FjUQqCw4cRx+JAgUDh8Apg8fAgUJgN2cPMQKBzsfJOHDoHCZMAObx4ChXoC6HiWU/eOEhCwLgEIFNb1HSwHARAAARCwIQEdi1oIFDacOA4fEgQKh08Ak4cPgcJkwA5uHgKFg51v8tAhUJgM2OHNQ6BQTwAdz3Lq3lECAtYlAIHCur6D5SAAAiAAAjYkoGNRC4HChhPH4UOCQOHwCWDy8CFQmAzYwc1DoHCw800eOgQKkwE7vHkIFOoJoONZTt07SkDAugQgUFjXd7AcBEAABEDAhgR0LGohUNhw4jh8SBAoHD4BTB4+BAqTATu4eQgUDna+yUOHQGEyYIc3D4FCPQF0PMupe0cJCFiXAAQK6/oOloMACIAACNiQgI5FLQQKG04chw8JAoXDJ4DJw4dAYTJgBzcPgcLBzjd56BAoTAbs8OYhUKgngI5nOXXvKAEB6xKAQGFd38FyEAABEAABGxLQsaj1lUAxYugQynXyMLV+/HEbeg5D8iWB63HxdDX2OmUKDaHAgDS+NAV925DApehYCgoKpNDgtDYcHYbkSwIx1+Mo5sYNypoh1JdmoG8bEki4SXTp6jVKHxxMweK/Xzj0EEgTFEx5H2uvpzMf9gKBQg1fx7OcuneUgIB1CZgmUJw6d9G6VGA5CIAACIAACPiIQETUFSpZIIepvftKoBg2bBg1+mOGqWND4yAAAiAAAiAAAiAAAiDgCwIlXxtCBbv09EXXWvuEQKHGDYFCzQYlIJAcAdMEiuQ6RRkIgAAIgAAIgIB3AjoWtRAovLPHVRAAARAAARAAARAAARC4VwIQKO6VnH3u0/EsZx9aGAkI3CIAgeIWC5yBAAiAAAiAgM8J6FjUQqDwuZthAAiAAAiAAAiAAAiAgM0IQKCwmUPvYTg6nuXuwSzcAgJ+TwAChd+7CAaCAAiAAAg4iYCORS0ECifNKIwVBEAABEAABEAABEBAB4HkBIqNm7bQshUrKSYmlmrXqkmtWjRTmpSQkEArV62hzVvD6EzEWapapRI1blifCuTP77pn/8GDtGz5Kjpy9CjFxydQlcoVqX27NhQaEuKqY9YJtnhSk9XxLKfuHSUgYF0CECis6ztYDgIgAAIgYEMCOha1EChsOHEwJBAAARAAARAAARAAAZ8SUAkUi5Ysoxde7ketH3uUMmXKSN/Pmk2v93uFXn6xt1d7L166RNVqPUT1HqxLxYsVo7XrN9DBQ4fo228mUqMG9eQ9z/boTSdPnaa6dWrJ7z/8OIeaNm5Ekyd+7rXN1LwIgUJNU8eznLp3lICAdQlAoLCu72A5CIAACICADQnoWNRCoLDhxMGQQAAEQAAEQAAEQAAEfEpAJVA8/sSTVKF8WRo94n1p38zvf6T3hn9Iu/7Z5DXiIT4+nv49cpRKligu6/P3+k1bUo1qVejzT8bLa3v37afSpUpSQECA/D7jux/o/RGjaPc/mykkJJ28ZtYfCBRqsjqe5dS9owQErEsAAoV1fQfLQQAEQAAEbEhAx6IWAoUNJw6GBAIgAAIgAAIgAAIg4FMC3gSK6OhoqlSjDn39xQRq/nATad/RY8epcfNH6KdZM4XoUPWubH6kTXsqWqQIffXZx7fVj4uLoz59+1FMbCx9P33KbeWpfQEChZqojmc5de8oAQHrEoBAYV3fwXIQAAEQAAEbEtCxqIVAYcOJgyGBAAiAAAiAAAiAAAj4lIA3gcIQI36Z/T1Vq1JZ2meIFpO+/IyaNW18R5t37tpNrds/SWNGDqeOT7RNUn/AwHfof/N+ExEa5emLT8ZRkSKFk5Sb8QUChZqqjmc5de8oAQHrEoBAYV3fwXIQAAEQAAEbEtCxqIVAYcOJgyGBAAiAAAiAAAiAAAj4lIA3gWL/gYPU8vF2NO/nH6lSxQrSPk6UXb5qTZrw0Vh6/NFWydp84uQpatexC5UrW5qmTZ5IgYGBSer/Ov93+lckyv5h1k/UtElD1zZSSSql8hcIFGqgOp7l1L2jBASsSwAChXV9B8tBAARAAARsSEDHohYChQ0nDoYEAiAAAiAAAiAAAiDgUwLeBIqIs2epTv0mNPu7GfRAzerSvvMXLlCNOvVp5reT6aEH6yhtPnX6NHV+tgflzpWLpgtxIkOGDMq6q/9eS9169qHFf86nEsWLKeulRgEECjVFHc9y6t5RAgLWJQCBwrq+g+UgAAIgAAI2JKBjUQuBwoYTB0MCARAAARAAARAAARDwKQFvAgUnuC5VoSp98N4Qeqbzk9K+TZu30pPPdKMlC36j4sWKerWZk2B3fa43Va1SiT4TWzeFhoR4rWdcDNu2ndo/+TTN/2U2VaxQ3rhsyicECjVWHc9y6t5RAgLWJQCBwrq+g+UgAAIgAAI2JKBjUQuBwoYTB0MCARAAARAAARAAARDwKQFvAgUbNGTYcOIIh2nfJEZBvP7WILp06TL9Nne2tHfP3n3U6eluNGLYUGrz+KN04OAhavFYW5lX4oP3BlNQ2rSucZUuVVJu8/Tiq69T1y5PUpnSpSgq6jwNfX8EHTlyjNYsX0jp0qVz1TfjBAKFmqqOZzl17ygBAesSgEBhXd/BchAAARAAARsS0LGohUBhw4mDIYEACIAACIAACIAACPiUgEqg4C2dXhKCwoaNm6R9hQoVpG8nfeXaimnb9h3UrlMXGjtqBHVo10aKGbxdk7dj+cI/qGDBAvRU1+doy9YwV5WSJUrQx+NGUcXy5VzXzDqBQKEmq+NZTt07SkDAugQgUFjXd7AcBEAABEDAhgR0LGohUNhw4mBIIAACIAACIAACIAACPiWgEigMo1ioiI29Tnnz5DYu3dfntZgYijgTIXNT5MyZ477aSsnNECjUtHQ8y6l7RwkIWJcABArr+g6WgwAIgAAI2JCAjkUtBAobThwMCQRAAARAAARAAARAwKcE7iRQ+NS4VOwcAoUapo5nOXXvKAEB6xKAQGFd3/m15WFhYbRx40Y6cOAARUZGSltz5sxJpUqVolq1alG1atX82n4YBwIgAAK+IqBjUetLgSLThXPU+wXvIeu+Yo5+rU8g9nocXboaQ9kzpRf7MgdYf0AYgV8RiLoYLfbzTksZQszd09uvBg1jtBC4GnOd+F/OrBm19IdOnEPgZsJNirwUTZlC01FIuiDnDNzXI01IoEzlK/vaCtP7h0ChRqzjWU7dO0pAwLoELCVQ7Dt01EW6TIkirnOc+A+BI0eO0KxZs2jfvn3JGlWmTBnq3LkzFS1aNNl6KAQBEAABpxHQsaj1pUDB/hw2bJjT3IrxmkzgWuwNOn/lKuXOmonSQqAwmbbzmj8TdZlCQ4Ioc/oQ5w0eIzaVwOWrsXTlWgzly5HF1H7QuPMIJAiBIvz8JcqaIZTShwQ7DwBGbCoBCBRqvDqe5dS9owQErEvA7wWKeQtXEv9THW1aNCT+h8P3BDhqYsKECSkypF+/foimUBBbvi2cwg6JXxq3Kk0ZQ531q5df1x6j2Lh4erJBMQUdcy9fvnaDLkbfoPzZQykgII25naF1EPAgoGNRC4HCAzq+Wp4ABArLu9CvBwCBwq/dY2njIFBY2n1+bTwECr92j+WNg0ChdqGOZzl17ygBAesS8FuBwlOYKFuyaBLKew8eSfIdQkUSHNq/cOTEvf4ilu9DJMXtLusyehUdPxtNY3vVoLrlUieJ1u29+OeVruNWU3RMHM0d2jjVDLwRl0A9PvmbjoRfEb8iCqSFHza/re3N+yPp47m7JXejsEXN/PRG+4oUEhxoXLrt80TkVeo8KlFIbVotHw17puptdXABBO6WgI5FLQSKu/UG6lmFAAQKq3jKmnZCoLCm36xgNQQKK3jJmjZCoLCm36xiNQQKtad0PMupe0cJCFiXgF8KFO7iBAsTLD6otnRyrwuRwncTcdSoUXfc1kllHW/3NGjQIFWxY6/v+Pc87Tl+kdrULUTpgtQvx+0IyAyBYtqiA/TtwoMSlzeBYuO+SBrwzSYpXnRuVJyyZgymtbsjaN3us1S9VA6a8EItr6gTbt6k/l9vpLCDUbK8SdW89H5X5FjxCgsX74qAjkUtBIq7cgUqWYgABAoLOcuCpkKgsKDTLGIyBAqLOMqCZkKgsKDTLGQyBAq1s3Q8y6l7RwkIWJeA3wkU7oLDwL7dkggTqhwUfH3MlzOkFyBS6J+M97K1k6eV2OrJk4izv6e2QHE4/DJ1G7eGapXNKZKo3qBjEVdui6B4+9st9PeuCPppcCPKK7Z24oPFh1e+2kDbD5+n/73XhHJmvj055m8bjtPYOTup68MlaOaSQwSBwtlzNzVGr2NRC4EiNTyFNvyJAAQKf/KG/WyBQGE/n/rLiCBQ+Isn7GcHBAr7+dSfRgSBQu0NHc9y6t5RAgLWJeBXAoUhTnhGTbAAwWXu2zp51mEXjP3q/2QdiBR6J+SkSZNo3bp199Vp3bp1qU+fPilug18gc76CNTvP0Kb956hE/kzUs0UpOnnuKv21+SSNf/4B+VL5dNRVGjRtK7WuU0gkoouTZbx90uqPWsk+Iy/FJraz6wxx3foV81Dz6gWoVpmcSWzi3ASzVx6hNaLeoVOX5Qvvvo+VpRlLDtLV2Hga16umq/6JyGj65s/9tPPoBbFd0Q0qWygrPVqroGg3v6vOyh3hNG3RQXpJtLEk7BSt33OWyhfJSqN71KDpiw/Siu3h9MVLtZPkoODcFCu2n6b1e8+KZKCh0sbWtQtRkTwZXe3yS/Nf1hwVv+SvSkVy37q+S9gy7ued1L1ZSWpUOa+sHy8SqH2/7DCxLftPXJIMefzPNi1BQWkDXG0md/L6pI0UcyOBPhe2BrrlbDC41ymbi154tAzxQvl/wl9/bTlJe49dpEK5MlDFolnl+DliwTi8CRQLxT2zVvxLVYpnp9falTeq3vGT58jLX24gjkiZM7ghvTvzH68CxU+rj1Cm0CBqWbNAkjbH/7KL5gmbZ7xRj4rny5SkLOpyLHUevVL64auX69AjQ5ZAoEhCCF/uhYCORS0EinvxDO7xZwIQKPzZO9a3DQKF9X3oryOAQOGvnrG+XRAorO9Dfx4BBAq1d3Q8y6l7RwkIWJeAXwkUPV7/QJL89uN3kxA1hAe+yMKEIVTw+VsvPeuq6x5J4Rl94aqEk1Qn8MYbb1BkZOR9tZszZ04aP358itswtu3hLXtqlspJ4eevyZfs/OKbBYjZ7zSk/DnS09EzV+iZsaspm3gJfv7KdSpdMDPlyhIihYCY6/HU98v18r6yhbNIQWPNzghpy4QXa1H1kjnkeVz8TXpryiYphOTKGkKVxIv1HUcSxQeukC5tIM1/v6msy3Z0HLFCnj9QOgcFiy2a+Nf5fAzpUpla1Eh8Cf7bevHr+592uuyqVjI7VS6WjXq1LE1j5uyg3zecoN8/aEpZMiS+vF+w6QSN/HGHbKdexdzEwgq/6OdxTe7/IOXJlvjL//8Tv+SfvGA/TX3tITlWeYP4Y2xj9GbHilKs4esjf9xOCzadpEqi3/Ji/NvEi3xuMyWRAJ/P20NzVh2RAkXVEtmN7hKvibJhQihpWjUfffq/3VI4YX5Vi2ejvccvST/lzxFK3w1sQEGBiYKIp0CxNOw0DfvuH6os7mERKH26tK4+7nTyp2A2SjB78bEy1KVxcXp+wlqvAoW3ds5ejKGeH/8ti34VERSeCbNHzNpGCzefoomv1KES+TJT83cWpYibtz5xDQR0LGohUGCe2Y0ABAq7edS/xgOBwr/8YSdrIFDYyZv+NRYIFP7lD7tZA4FC7VEdz3Lq3lECAtYl4DcChRE94Rn94C46uAsXhpjhKUQY7XiKF9Z1kf9b3r1791Qxcvr06Slq5/BpsW3P+DXEL7unDahHWdIHyfuNl/78xVOg4Gv8S3d+GW8cE37dTT+vPkovtylLTzYoJi9z9EPPTxJfTP85vJmMCuBIjY/Er+lZGPiwe3UKSJOGxI/ziV9SL9pySooEhkDBQsJvQlx4qmExalApj2zziBBJugqRpE65XK5IC8NWFlQ+E9EH7lsIeQoU4VFC9Phwhexnuvg1f/ZMidsNLdp6ioZ/vy1JnoS7FSiuxsZRi3cWUwURtfGVeMnOY+KIg3FCNDkaEU0f934g2eTQBsM9xy5Q7wnrqEP9ItSv7a3ohhc+W0cctbF4VHPJcMDkTZRViC3vPFXZ1S4LDyxAfP1qXWkHt+kuUHAUydAZYcTizZgeNSk03d3n4+CIlw4jllOOTCE04816UgC5k0Cx88h5IUJF0pnzMbRcRKpkCAmSQlbpApmN4crPbYejZGTGY7UL0sBOleiaiKCBQJEEEb7cIwEdi1oIFPfoHNzmtwQgUPita2xhGAQKW7jRLwcBgcIv3WILoyBQ2MKNfjsICBRq1+h4llP3jhIQsC4BvxEoDMHBXYRgrIZA4Sk4GFEVngIF36Nqi8twpD4BXwkUf2w8QaNn76D3nqlCD1e7tW0Sj5DFBd6uyFOgaFwlL33wbNIExl1GrxLbE8XT3KGNk8CZ9Oc++m7pYfHL/vpymyTj1/I/D2nkilTgG66Il+CtxNY+HMVgCBRJGnL7wn1xfaOeIVC83r48tXuwiFtNui2CwnhR//aTleRWUe6VX/pivdzCaMW4llIIuFuBgqNHmg1aJG3/VCSB9tzCyOjjosjbcCMuwfjq+sySIUi+9Geh5ukxiWMzIg0iLsRQ++HLyXiB77rJ48QY1ztPVaJWDxSUpYZAMaB9BXp76hbiKJSRz9VwiRpciaNHvB3uIo8RsfFJnweoZunE7bruJFDMXvUvfTFvr6tpzi3xxENFkohHvC0WzzHewurHQY0kPwgULmQ4uU8COha1ECju00m43e8IQKDwO5fYyiAIFLZyp18NBgKFX7nDVsZAoLCVO/1uMBAo1C7R8Syn7h0lIGBdAn4jUBiCQ2pFUHi2Y10X+b/lvtri6ROxXdBckWeBowlKeOQG4EgHjnjwFCj4ZXPvVqVdUI0IAr7A2zu5H7zNER/G9kQsLpy7HHNbcmWu8+TIFfIX9IbwwNf2HL9IK8Wv/w+cuiQTM1+Kvk6nzl3jIlfuC0Og+EhEKnjmu/CMoDAEk2/61aVyhbPKdow/xhZLHCVQPG8muluBgu//+o99MgcFnxfNm5Fqi7wbTcR2TOXd+uCX+gYPh9HQ6gAAQABJREFUrmcc7nZ/t+wQTfpjv9zuqGLRbDT376P0ydzd9KnYJqvGf9tksSDCuTY4quKY2IKLE1YfCb8imxskBIpH3AQK4zoXznyrPhV1y7ERHRNHLQcvNsxI8snRGiHBgWRE2HAEC0e8GMedBApezMeJfxyxsnZPhMgRcoCuxsTTrEENqWDO9LKZeeuO0fifd9FrT5SX4gVfhEBhEMbn/RLQsaiFQHG/XsL9/kYAAoW/ecRe9kCgsJc//Wk0ECj8yRv2sgUChb386W+jgUCh9oiOZzl17ygBAesS8BuBwoiUYJSeURGGeMFlHEmhykHB5YieYAp6D18lyeYk0lP/OnBb3gMe/bv/F0acTPpOAsUFkY/i8feWSmD8S3/3g7c4ypUlnYxs4LwKnKdi++HztHRMCwp2Sx7N0QMthyxKkoOCEzqP+GG7bI6FjyK5M4gtqIJlTga+aCTnTolA8fl8kedBJOieNuAhKpk/6XZDhsjAeSjKFsqSIoGC7d/+bxQtFtssrd0dQWdF5AMfnND7rU4V5bZP/ELeW8QC59IwXtpzNEGnD1fSU42KUd/Hy0pexwVDI6KCX+C/+vUGKXQYOUOyiW2qjD6TEyg4Z8iXfeu4IihuxCfIMUpDPf50e7gkpQ1MQ29N3Uzrdp8ljk4plDODqxYnCWehaPzzNYn799y6yVXxvxNDGHqueUnqIRKws8jS5v2lUrQY26uGK2/GNXH9HZGInbcP6yaEMBZUjJwgnm3iOwgkR0DHohYCRXIeQJkVCUCgsKLXrGMzBArr+MpqlkKgsJrHrGMvBArr+MqKlkKgUHtNx7OcuneUgIB1CfiNQMEIDSHCczsnFi84t4QhTHBdrsNREmVK3NoWx7gf0RNMSN8RFhZGEyZMuK8O+/XrR9WqJd166U4N8ovtgWL7n04Ni9Irrcu5qnMS7KdGrZAvkO8kUPBNrYVAkUVszzTzzfquNrydfPX7Xpq1/F8a9Vx1kYciMa8E19t68Bz1m7gxyRZP3T9aQ4dOXXZtD8X1WAhoMyxRDDEiLVIiUHCei+E/bKNhz4iE09XycZOu480pm2n9nrO0ZHRzSicSchvRDJ62Gm24J8l2NfLfCUc3fPD9P/IlvhGR4VlH9Z1FHBYlpr7+ED3xwXJ6pmlx6vNIGVnd8BcLH9x/YEAaed1I/O1NoFgmxKAJIsn2PBEN0+qBAjJ3hapvz+sc8cKJ0pM7jC2/WHyZufQQVRNCVKPKeZPcYuSaaPtgYeItp86Jum3fX5akjrcv3rbt8lYP10DAk4CORS0ECk/q+G51AhAorO5B/7YfAoV/+8fK1kGgsLL3/Nt2CBT+7R+rWweBQu1BHc9y6t5RAgLWJeBXAgVjNEQGPveMpGChwjjchQl3AcNT3DDq49NcAqNGjaJ9+/bdUydlypShQYMGpfhezuXwrEiSzb/479WqFNWrkIciL8bQlIUHXNsR3Y1AYURbjO5Zgx4qn9tlB29RxC+nnxdbQhUUv8DnLZt6f7qW+Nf/A56oSKVE0mTeRmjszzukGOKeg6LF4EWynXnvNXX96n/93rP05uTNSYSMlAgU/4qtkJ4dt5pK5M9EX79S19Xu9n/PU1+Rg4KjDKa+9pDsd+WOcBoyPYw6NRDiTZtE8YaTX7OQsXFvpBQIWtcpRPtPXhIJwo9QjVI5iKMhjMNIHD5JbCflvtWTUa76/FMkBx/14w5qXiO/TBw+Q2y/ZeS1MMb6wqNl6OkmxWUT10Vei9cmbZSRKZ4CBW/jxHlBuA4LH7zFVEpe+m8RwtE10Ybn8bHYGoznDIs3ObKEUDkRccI5JR4ZulgmxJ4uEq5n/i/hOt/7hYhcmS0iV4ytvjgXxwbhS8/jqoig4GTlnHD8GTE+9lO+7IlbQnnWxXcQSI6AjkUtBIrkPIAyKxKAQGFFr1nHZggU1vGV1SyFQGE1j1nHXggU1vGVFS2FQKH2mo5nOXXvKAEB6xLwO4GCUbqLFMlFQ7gLE3wfxAmm4JvjyBHxAnfYsHvqnO8rWrToPd17OPwyvfj5OikQGA1wHgV+6bxg08k7bvHE9/DWRBzxwHkG+Ff6/EJ9h3jpv2rHGfnS/5t+D7p+7W9EIBh98WeLmvnpoIiWiBK/rDciI0b+uF32X61kdil6nBL5DDhfBh/uQobx0t49l4OsJP545qDg68Z2QzzGhpXy0gWR12Lx1pPSdmN7J653Vgg1HMHAR93yuahq8ezEAknYwSh5zYigcM/l0KF+Ebk91CEhunCkSK6sIfTz4EYU8F+kg7zxDn8uC9HoEZEwnA9+Qc8v+43jRGQ0dR61Sgo8resUpqwZgmnZttMymTnXUQkUXMbjeWbsKjnOia/UIc5xca+HKgfFD8sP08Tf98lxPyaiPLII+zYfiKQ1OyOoUK4MxLk/MoYGKbs1WDapmpfe75qyaCBloyhwJAEdi1pfCRScs4iP8ePHO9K3GLR5BCBQmMcWLROxQBGSLq1YG4QCBwikKgEIFKmKE425EYBA4QYDp6lO4PzlqxQntl3OlTVjqrdt9QZ1PMtZnRHsBwFvBPxSoGBDeUsn/ud+sADBkRNGJIX7lk/JCRnubeDcPAL3stXTvWzt5DkC/oU9b0u0/+RFKiB+sV6nXG7ifA0sCMwZ3FD+iv2Y2Hbo6TGrqFuzEtSr5a0k2UZb+05cpKki8oJzFhgHixWcS4FfVLsf/BKeX/TzS3OOoqgscg7wdkIxN+LlL/65Lr+sZpGCRQ4+OOri9ScqyBwU/Ot9Q8j4fcNxIUTsJG8CxdifdhILGL8Pf1jkr0h8Mc5REDNE7o3FW0+7ti+qLqIfuopf7NcsnVP2ZfzZuC+SRs/Z4copwb/sf0S8eB8n2uXcEo/XLiSryigQcY0ZGke9irnpjfYVKUfmdMalu/58b2YYLfsnXEZucASH+7F65xnZP2/DxUedcrmIt1niqIt3RJLsVv8lyWbBiBNocwSFcYQdiqJXv9ogBZ7vBjZIEuVg1LmbT5VAwfdyBAjzNZKZs9/qlM1Fb3SoSJmSESf4XiPhOm+/xdtw4QCBeyWgY1HrU4Hi/BXq37//veLBfSDglUDs9Ti6fC1G5BZKL35UEOC1Di6CwL0SiLoYTcHBacUPFVK+LrrXPnGffxMoWKlsqhgIgSJVMKIRLwQgUHiBgkupRgAChRqljmc5de8oAQHrEvBbgcJA6k2oMMr4E8KEOw3fn3MkxaxZs+643RNv69S5c+d7jpzgkUZdjpWJsPOIX/q754QwkhhznT+HN3NFP/D3Ox2cfJkTZ2cXyZONHAnGPftPXKIdR87LLXw4EbVx7BVbPz0vtn7il/qjnqthXJafsUK04Bft3tpLUvEevlwU0RMhwYEy50Ryt/N4OArCfdsib/VZ6OE279fWVydukALO/95rQjkVAgf7jhONJxeR4M1GXddYhGK/5csWmqIIEl32oR97E9CxqPWlQJFmzmp7OxCjAwEQAAEQsDWBHEUK0Nur56bKGCFQpApGNOKFAAQKL1BwKdUIQKBQo9TxLKfuHSUgYF0Cfi9QuKM1Iif4mnsOCvc6OPcPAhxNsXHjRjpw4ABFRkZKo3LmzEmlSpWiWrVqpTghtrdRxcXfpE4jV8gIAc5pUKV4NjpzPkZuG8QRDilNquytD/drRhQG/6q+Z4vSVCR3BtorIi8Wbj4loxncIwDc73PCOScAP3LmCq3aGU5TFhygNiKh9BsioTQOEACBlBPQsaiFQJFyv+AOEAABEAABEGACECgwD6xAAAKFFbxkXRshUKh9p+NZTt07SkDAugQsJVBYFzMsN4vAwVOXaMSs7XRI5IBwP1rXLUT925anIPEr/dQ8VmwPp1GztyfJecHtD+hQgdrWLZyaXVmqLY5aaTYoMTE4J+se16umjMSw1CBgLAj4CQEdi1oIFH7ibJgBAiAAAiBgOQIQKCznMkcaDIHCkW7XNmgIFGrUOp7l1L2jBASsSwAChXV9B8v/I8CLrwiR1+GkSHadXiQwLJYno9z6yCxAN8RWSKfOXaUzos/cYnspTqDsuR2UWX37a7vsg60Hz1HWjMFURPAPCkxdYchfxw27QMAMAjoWtRAozPAc2gQBEAABEHACgeQEiqNHj9G83/+kU6dPU4Vy5ahj+3YUEqLOXWJs8ZQ3e2aa+M0Uat+uLeXJnUuJcd5vf1BQUBA90rK5sg4KQIAJQKDAPDCTAAQKNV0dz3Lq3lECAtYlAIHCur6D5SAAAiAAAjYkoGNRC4HChhMHQwIBEAABENBCQCVQHDt+gh5t24HKlilNVStXpl/n/06lS5ek76ZNpjRp0ni17cLlq7R2wyYK27yJpk6fQfN/mU0VK5T3WnfOz3Pp7SHvUcMG9WnaN195rYOLIGAQgEBhkMCnGQQgUKip6niWU/eOEhCwLgEIFNb1HSwHARAAARCwIQEdi1oIFDacOBgSCIAACICAFgIqgWLMR5/SosVLacH8Xyg4OJj27N0nBYufZs2kGtWqerUt/Ox5evHlV+hmQjxt37GT5s+dQxXLl7ut7t9r11PXHs9L8SNPnjwQKG4jhAueBCBQeBLB99QkAIFCTVPHs5y6d5SAgHUJQKCwru9gOQiAAAiAgA0J6FjUQqCw4cTBkEAABEAABLQQUAkUbTt2ppo1qtOQt9902fHAg42o53PP0gvP93Bdcz8xtnii69foocbNvAoU+w8epPZPPkOjhg+jzVvC6Ojx4xAo3CHi3CsBCBReseBiKhGAQKEGqeNZTt07SkDAugQgUFjXd7AcBEAABEDAhgR0LGohUNhw4mBIIAACIAACWgioBAoWI3r16EZ9ej3nssObaOEqFCd3EijORkZSmw6d6ZnOneilPs/TsOGjIFC4A8S5kgAECiUaFKQCAQgUaog6nuXUvaMEBKxLAAKFdX0Hy0EABEAABGxIQMeiFgKFDScOhgQCIAACIKCFgEqgqFSjDvV/pS/17N7VZUfHLs9SmdKlaMSwoa5r7ifJCRRxcXHUoXNXef/oEe/LPBYQKNzp4Tw5AhAokqODsvslAIFCTVDHs5y6d5SAgHUJQKCwru9gOQiAAAiAgA0J6FjUQqCw4cTBkEAABEAABLQQUAkUTVs+Tk+0fZz6vtDbZccjbdpTs4eb0GtCuPB2JCdQnD9/nmrUbUCVK1WkdMHp5O279+6l6OhoeqBGDZr4+SeUPXs2b83iGggQBApMAjMJQKBQ09XxLKfuHSUgYF0CECis6ztYDgIgAAIgYEMCOha1EChsOHEwJBAAARAAAS0EVAJFzz59KTQ0lL74dLy0IyYmlspXrUljPvyAOrZv59W25ASK69ev05JlK5Lc9+Ocnyny3Dl6+cU+1LhRAwoNCUlSji8gYBCAQGGQwKcZBCBQqKnqeJZT944SELAuAQgU1vUdLAcBEAABELAhAR2LWggUNpw4GBIIgAAIgIAWAiqB4rc/FlC/AW/RtMlfU9UqlWjSlG/p62+m0pb1qylb1qzSNt4Gqm3rx2j4e0Pk94tXrtGJ0+F080YMtX6iE82YOonKlSkjIyMCAgJuGw+2eLoNCS4oCECgUIDB5VQhAIFCjVHHs5y6d5SAgHUJQKCwru9gOQiAAAiAgA0J6FjUQqCw4cTBkEAABEAABLQQUAkU8fHx9OHocTR95vcuOyZ9+Rk1a9rY9b142UpiG6g2NH70CHntZPhZqt+oiavcOFm7YgnlzZvH+Or6hEDhQoGTOxCAQHEHQCi+LwIQKNT4dDzLqXtHCQhYlwAECuv6DpaDAAiAAAjYkICORS0EChtOHAwJBEAABEBACwGVQGF0fi0mhiIizlLBAgUoMPD2KAijHn8aWzzly5HF/TLOQeC+CUCguG+EaCAZAhAo1HB0PMupe0cJCFiXAAQK6/oOloMACIAACNiQgI5FLQQKG04cDAkEQAAEQEALgTsJFCkxAgJFSmihbkoIQKBICS3UTSkBCBRqYjqe5dS9owQErEsAAoV1fQfLQQAEQAAEbEhAx6LWpwLF4XDq0KGDDT2HIfmSwPW4eLoac50ypQ+hwIA0vjQFfduQwKXoGApKG0Ch6YJtODoM6V4I1O7S9l5uu+0eCBS3IcGFVCIAgSKVQKIZrwQgUHjFIi/qeJZT944SELAuAQgU1vUdLAcBEAABELAhAR2LWl8KFOyy8ePH29BzGJIvCVyLvUHnr1yl3FkzUdo7bKniSzvRtzUJnIm6TCHp0lKWDKHWHACs9lsCECj81jWWNwwCheVd6NcDgEChdo+OZzl17ygBAesSME2gOHXuonWpwHIQAAEQAAEQ8BGBiKgrVLJADlN7h0BhKl407gMCECh8AN1BXUKgcJCzNQ8VAoVm4A7qDgKFg5ztg6FCoFBDh0ChZoMSEEiOgGkCRXKdogwEQAAEQAAEQMA7AR2LWggU3tnjqnUJQKCwru+sYDkECit4yZo2QqCwpt+sYDUECit4ybo2QqBQ+07Hs5y6d5SAgHUJQKCwru9gOQiAAAiAgA0J6FjUQqCw4cRx+JAgUDh8Apg8fAgUJgN2cPMQKBzsfJOHDoHCZMAObx4ChXoC6HiWU/eOEhCwLgEIFNb1HSwHARAAARCwIQEdi1oIFDacOA4fEgQKh08Ak4cPgcJkwA5uHgKFg51v8tAhUJgM2OHNQ6BQTwAdz3Lq3lECAtYlAIHCur6D5SAAAiAAAjYkoGNRC4HChhPH4UOCQOHwCWDy8CFQmAzYwc1DoHCw800eOgQKkwE7vHkIFOoJoONZTt07SkDAugQgUFjXd7AcBEAABEDAhgR0LGohUNhw4jh8SBAoHD4BTB4+BAqTATu4eQgUDna+yUOHQGEyYIc3D4FCPQF0PMupe0cJCFiXAAQK6/oOloMACIAACNiQgI5FLQQKG04chw8JAoXDJ4DJw4dAYTJgBzcPgcLBzjd56BAoTAbs8OYhUKgngI5nOXXvKAEB6xKAQGFd38FyEAABEAABGxLQsaiFQGHDiePwIUGgcPgEMHn4EChMBuzg5iFQONj5Jg8dAoXJgB3ePAQK9QTQ8Syn7h0lIGBdAhAorOs7WA4CIAACIGBDAjoWtRAobDhxHD4kCBQOnwAmDx8ChcmAHdw8BAoHO9/koUOgMBmww5uHQKGeADqe5dS9owQErEsAAoV1fQfLQQAEQAAEbEhAx6IWAoUNJ47DhwSBwuETwOThQ6AwGbCDm4dA4WDnmzx0CBQmA3Z48xAo1BNAx7OcuneUgIB1CUCgsK7vYDkIgAAIgIANCehY1EKgsOHEcfiQIFA4fAKYPHwIFCYDdnDzECgc7HyThw6BwmTADm8eAoV6Auh4llP3jhIQsC4BCBTW9R0sBwEQAAEQsCEBHYtaCBQ2nDgOHxIECodPAJOHD4HCZMAObh4ChYOdb/LQIVCYDNjhzUOgUE8AHc9y6t5RAgLWJQCBwrq+g+UgAAIgAAI2JKBjUQuBwoYTx+FDgkDh8Alg8vAhUJgM2MHNQ6BwsPNNHjoECpMBO7x5CBTqCaDjWU7dO0pAwLoEIFBY13ewHARAAARAwIYEdCxqIVDYcOI4fEgQKBw+AUwePgQKkwE7uHkIFA52vslDh0BhMmCHNw+BQj0BdDzLqXtHCQhYlwAECuv6DpaDAAiAAAjYkICORa0vBYqRL1a0odcwJBAAARAAARAAARAAAV0Eggq1pTTBWXV1h348CECg8ADi9lXHs5xbdzgFAdsQgEBhG1f610ASoo9RQvQRuhkTQTdvXJHGpQnKSGlCclNAhqLiX2H/MhjWgAAIgICfENCxqIVA4SfOhhkgAAIgAAIgAAIgAAIpJgCBIsXIUvUGCBRqnDqe5dS9owQErEvAUgLFvkNHXaTLlCjiOseJ/xC4GXuO4s9tpIRrZ5I1KiA0DwXmqEVp0uVIth4KQQAEQMBpBHQsaiFQOG1WYbwgAAIgAAIgAAIgYB8CECh860sIFGr+Op7l1L2jBASsS8DvBYp5C1cS/1MdbVo0JP6Hw/cEEqKPU1z40hQZkjZvUxFNUShF9xiVY67H06wVh6lU/sxUr2Ie47LXz5TU9drAXVxcv/cs7Tl2gTrWL0oZQ4OSvWPaogOUNjCAujYtkWw9qxcejbhCy/45TfWFf0oKP5l9sJ8jLsRQvhyhFCT4JnfEJ9yk01FXKXumdJQ+XdrkqqIMBLQS0LGohUCh1aXoDARAAARAAARAAARAIBUJqASKmzdv0vzf/6RNW7ZSpkyZqMXDTalqlUrKnqOjo2nh4qW0e+8+unLlCjWo9xA99GAdypI58dk1ISGBVq5aQ5u3htGZiLOyrcYN61OB/PmVbTqhAAKF2ss6nuXUvaMEBKxLwG8FCk9homzJokko7z14JMl3CBVJcGj/wpETN078dk/9BhV8/J4iKS5GX6fH3l1Kj9UuSAM7qRcdbFRK6t7TIMRNn8/fQ3NWHqE5gxtSvuzplc2cuxRLbd9fJsv/HPEwZbqDmKFsyAIFa3aeoUHTttLQLlWoeY27W8QZHJMb3rQBDyURPPYcv0jjf95J+09cct1WvVQOGiTmRd7soa5rfBJ1OZbG/rST/t4V4bpeNG9G6t+2PNUQ9+AAAV8T0LGohUDhay+jfxAAARAAARAAARAAgXsloBIoRo4ZT1OmzaBnn+5C4WfO0KIlS+m76VPowTq1vXa1bMVK6vXCy/RIyxaUWQgai5cup5jYGFr8xzzKmzcPXbx0iarVeojqPViXihcrRmvXb6CDhw7Rt99MpEYN6nlt0wkXIVCovazjWU7dO0pAwLoE/FKgcBcnWJhg8UG1pZN7XYgUvpuIcacW3HFbJ5V1vN1T2vytVMXK6ykRHVJSV9nhHQqMF+t3Eii4mUVbTlFgQBpqWi3fHVq1dvG9CBQrtodT2KEorwNfLqIxzl+5TjPfqk9F82SUdThqpfeEdfL86SbFqUDO9LRPCBXz1h6j9CGB9P3AhpQzczpZfjU2jp4auVK20aJmfqpSLDtFXIyhOav+pasx8TT++ZpUu2wur33jIgjoIqBjUQuBQpc30Q8IgAAIgAAIgAAIgEBqE/AmUJy/cIFq1KlPH48dRW1bPya7fOHl/hQTE0PTp3zt1YTIyHMUEBBA2bNnk+VHjx6jxi0epeHDhtLTT3Wi+Ph4+vfIUSpZorgs5+/1m7akGtWq0OefjPfaphMuQqBQe1nHs5y6d5SAgHUJ+J1A4S44DOzbLYkwocpBwdfHfDlDegEihf7JyAmx48ITIwLutfe0eZukOHF2SkSHlNS91zGkRKC41z6sdt+9CBSqMfLWTe2HL6fKxbPRl33ruKp9+r/d9MuaozS2Vw2qWy636/rslf/SF/P30oAOFaht3cSk7Cx+DJ0RRixkvPBoGVddjsDo/elaalI1L73ftZrrOk5AwBcEdCxqIVD4wrPoEwRAAARAAARAAARAIDUIeBMoVq7+m557/gXavHaVS3CY++t8euPtwXRw9zYpRNypb0PkeH/oYOr69FNeqz/Spj0VLVKEvvrsY6/lTrgIgULtZR3PcureUQIC1iXgVwKFIU54Rk2wAMFl7ts6edZhF4z96v9kHYgUeidkXMQqSrh8+L46DchUnNLmbpCiNtxFh0aV89KUhQdo77GLVK1kdpnzoP1DRcQiJI1s072usR3UOLEl0KFTl+nrV+sm6Xfu30dp/vrjNOq56q6tmi5evUGT/thHWw5E0qlz16hSsWzypbf7tkWGQPFN/wfFC/MjtHFvJKVNG0A1xbZBL7cuR5nT38pL8eaUzRQaHEgfPJv4MnzSn/to3Z6z9IF4OT514X7ifBbp0gbSg+Vz04uPlaEsGYJdNiaIfTV/FdEB/PJ/0/5zVCJ/JurZohSdPHeV/tp8UkQBPOCKGHDdlMxJpNhyiu3dfOAcHRM5I6qVyCFe9Oei1nUKUxqB75Ro953pW6llzQL0VMNirpY+n7eHthw8R6N71KC82RK3UeLtqwZM3kRNq+aT+TVSU6AY/8suGRXxRd/aVKV4dpcdg4Vtq3acoXnDmsh8EkbBtsNR9PKXG6h785KSD19n334ydzeN6F6NGlbKa1SlG/EJ1OSthdKvX718S/xwVcAJCGgkoGNRC4FCo0PRFQiAAAiAAAiAAAiAQKoS8CZQGGLEoT3bxXNs4nsAQ7TYsm4VZcuWGCWRnCHfzphJI0aNpSULfhNbOhW9rerOXbupdfsnaczI4dTxiba3lTvlAgQKtad1PMupe0cJCFiXgF8JFD1e/0CS/Pbjd5MQNYQHvsjChCFU8PlbLz3rquseSeEZfeGqhJNUJ3Dj2M9088aV+2o3TVBGCircIUVtGKJDfpEQmUUDziNQOFcG8aI9Um7X85x4Md1DvLjnw6jrnq+i39cbaat4Kb/6o6TbS32zYD/NXHLItY0Qv7x+UmwLdFb8gp9/YZ81YzpaLcQB/u7+63xDoMiVNUQkaE5DZQpmoQ37zkpbKhTJShNfqStf+LM9T4hIgAwhaWnmm/X5K33w/T+0eOtp4rEEihDTYmIs/NKdjzpCLBjXq6Y85z+cYPvbhQfl9kU1S+Wk8PPXZO6FQmLsx89G0+x3Gop21DkwXA2Jk+iYOOr75Xop1DC/YmLbJMNm48V+gkgm3WroYlGWySXmcILpR8Q13hbp7Scr0aO1CspmjQiFoU+LnBPV80sRJaU5KNztM85PRF6lzqNW0gOlc9DHfWoZl+XnkrBT9P5326hbsxLUq2VpeY19NuKHbSJBdzhNfe0hKl0wMcnZGcGqw4gVsp2Rz9WgECES8cERGByJ8WrbcjLJubyIPyDgIwI6FrUQKHzkXHQLAiAAAiAAAiAAAiBw3wS8CRTf/ziHhg4bTof37nC1v3bdBnrmuV60dsUSmVPCVeDlZPmKVdTzhb702qsv0ysv9bmtxomTp6hdxy5UrmxpmjZ5IgUGJj5L3lbRARcgUKidrONZTt07SkDAugT8RqAwoic8ox/cRQd34cIQMzyFCKMdT/HCui7yf8uvH5qeKkYGl+ieonYM0YFv6tGiJD3XPFGMuHztBnUbv0YKCFNfFy+nC2S+L4GCowT6T9xIzzQtTn0eSdwW6ILIg8CiQh4ROWBEZBgCRa2yOWVUQVBgAMVcj6fnJ6ylI+FXkiTPVgkUzarnoyEioXSA+MUHiwf8Up5zLvzvvSYyKuLw6ctybCyCTBtQj7L8F5Xxm4j44MTPfKREoDCiEnq1KkXdHi4p72ebX/h8nRQtvhTRBJVFtMhw8bKf82YsGtmcQtMF0q6jF+iFz9bJ+u7bInFUxZxVR+hXYW8OkfchtSIojP452oXFHveDxYj/E4LS9EUHiUWaInkyyBwULCC92bGiiAQp5F6dloo8FmN/2iEjVMqLtljg4UiaDvWLSP8aokWSm/AFBDQS0LGohUCh0aHoCgRAAARAAARAAARAIFUJeBMoFixcTH37vU4Hdv3jEg+WLFtBvV96hXb9s4lCQ0KUNnDy62e696Ie3Z+lwQPfcEVgGDecOn2aOj/bg3LnykXThTiRIUMGo8iRnxAo1G7X8Syn7h0lIGBdAn4jUBiCg7sIwVgNgcJTcDCiKjwFCr5H1RaX4Uh9Ar4WKLJlDJYvxI3tnHiEy7eF07v/F+Z6QW2IGfcSQbFVCBT9hEDBwsPQzlVEBMWt7ZbcaRoCxTiRaLmOW6Jl3r7pu6WHk+RIUAkUk8X2UGULZXE1O3r2Dvpj4wkZucAv5vmcr733TBV6uFp+Vz0+6fnJ3zKSwhAo+MX9xegbSerwlyCx7ZQhbHQZvYquCEHnV7E9EosixmGIMry9VJfGxaU4wSLBpy/Woholc9CMJQfph+WHqY3I7TBv3TH6c3gzmfS7+0drZBPThXjChzeBgreU8nYYiaw9ywxRpm75XDS2561IEqPejbgEYc8hmrH4oHFJfnKCbBaUnhBbfbkfC7ecpC9FbgoWftyPTg2Lyq2g0qdL634Z5yCgnYCORS0ECu1uRYcgAAIgAAIgAAIgAAKpRMCbQLF9x05q27EzLZg/l8qUTvzx4pdff0PT/+8H2rR2hbLn3/5YQP0GvKWMnNi7bz91fa43Va1SiT77ZFyyQoeyE5sVQKBQO1THs5y6d5SAgHUJ+I1AYQgOqRVB4dmOdV3k/5b7eounhyrklhEL7qSOilwKz4xZTW0fLEwD2le4rwiK6+IF+GuTNtL2w+dlF5ykmQWIh6vlc+Wo4AJDoPhRbLFUwG2LpaVhp2nYd//Q8G7ViHNl8KESKP76sJnc+klWEn9+Wn2EPvt1Dxl5Fz4R2xDNFdsRTX+jHpXIl8moJj8/EjkaODeFIVBwHos3J29OUoe/sNDBkQgcodFy8GLy9uKfX963fm+pK2n02Ysx9MQHy6lny1LUvVlJGT2RJ1uIfPnPeR64PY5eeHTokiSRJp4ChdHnbUaJC4tHNXdtueRePmjaFiF0RCTZqsm93IgCaVwlr9jmqaTMh8H+Z24c6dG/XXlqXy9RpDB8wdtZvSHmRcn8mSlSjG22iPrgKJQGlfLQh92ruzePcxDQTkDHohYChXa3okMQAAEQAAEQAAEQAIFUIuBNoLgpcjU2av4IlStThkYMG0qR585JYaFD+7Y0cEB/2fMrr71Bu/fso6V//Sa//zr/d3r9rUH0dOcn6ckOT7isCwoKkiLHgYOHqMVjbalC+fL0wXuDxY/9bv2YrXSpkhQc7P3Hi66GbHoCgULtWB3PcureUQIC1iXgNwKFESnBKD2jIgzxgss4kkKVg4LLET3BFPQevk6SzS+mjWTTxsg5sTPnjWj1QAF656nK9yVQcJssUvy9K0JEZpx25Wjg6+88VUn0kZh/wRAo5gxumES4MF6K341AYWyhxG3zYeRGMASK6SJKYOpfB+jzl2pT1RK3EkVzXY4Y4cgRQ6A4ERlNC8W2TJ5H7iwh9LjY9siIKvH2Up6jKloNWULu4k/XcauJo1X4Bf4joozH/rDIM8HJpXmLKN5K660pW2iCiLKoLqIs+PAUKIztmDxt4u+8xVRakbvD/dhz7AL1nrBOKRwYggeLI98NrJ8kCuRabDw1f2cR8XZYc4c2ls3ydlucSN3YMsu9r1cnbqCwg1H00+BGlDd7YtJv93Kcg4AuAjoWtRAodHkT/YAACIAACIAACIAACKQ2AW8CBfexa/deel5s6RQeHi67bNywAU34aAxlzJhRfn/+xVfon207XBEV4z/5jL6aNFmWuf/JkT2HrLP677XUreft+Si47vKFf1CRIoXdb3PMOQQKtat1PMupe0cJCFiXgN8IFIzQECI8t3Ni8YJzSxjCBNflOhwlUabEre1bjPsRPcGE9B0J0ccoLnzZfXWYNq/YYihDyv7nbrxg58TSs99plKT/dXsi5MtyI+mxUdd9i6fXRVTEpv3n6PcPmlKWDLd++TBmzg76fcMJV5LsJA2LL5wgmhNYsyDA2wgtGNFMvhjXIVCs3R1BA6duId6O6JXW5VymccTDU6NWyKTVhkDhKkzmhCM5OKG3J7/dQhjoI4QBI2KCm/j6j330/bLDcnspTko9993GlEuIHcyBt22qIiJLeCurpWNaULDYRooPT4FCXkzBH8NHM0TESHGPiBFu5nC4yMkxbg15E6m4nLew4sThS0Y3p3RBgdRi8CK+TH+NaO5KWC4viD9GdIq7wGKU4RMEdBLQsaiFQKHTo+gLBEAABEAABEAABEAgNQmoBAqjD84ZwXkismTObFzCZyoSgEChhqnjWU7dO0pAwLoE/EqgYIyGyMDnnpEULFQYh7sw4S5geIobRn18mksg7tQCSrh25p46CQjNQ2nzt0rxvYbowDcOFYmlm9dIzMnAv9J/RWw7xNv7GJEHRl13gWLCr7vp59VHZUQARxHwwQm2O4xYLl/0z3yrPhXNk5E4AmLDvrPUTmwXVa7wrQTNxsvvZWNbiJf8Aa4tnsyMoODIhmf/SwDOUQv1KuSRWxRNWXhARgbwGFIiULw3M4yW/ROeJKdFghBgBn67hdbvOZskb8aWAyJZ+Ncb5VZOgULUmPlmfe6O/tx0gkb9uINYKCoieLnnibgfgSLsUBS9+tUG4sTh7z5dVfbl+YdtbTV0sbw89bV6VDBneleVbYejiLef4i25vuxbR143xusezcIFLLD0EPkzWOhRbTXlahgnIGAyAR2LWggUJjsRzYMACIAACIAACIAACJhG4E4ChWkdo2FJAAKFeiLoeJZT944SELAuAb8TKBilu0iRXDSEuzDB90GcYAq+OW7GnqMbJxL3cUypBUEFH6c06RK3BErJvYbowFEMV2PiqUXN/DL3wxqxFdP+E5fki+2hXarKX8obdd0FCuPlOffJWx7lE9v6/L7hOJ06d02aYQgUxotu3t7oCZHLIG+2UOJIBt5O6dFaBentJyvJ+joiKLgjjhp48fN1csyyY/GHcyqUE8m1F2w6mSKBIjzqmthCaa18Mc/8CubMQBv3RdKOf89TU5Fj492nq7i2TYq5Hk/NBiVGIDzTtLhMQM39nzl/TYg6K/iUXnuifJKk1AZjdwFJVryLPy99sV7a8f3ABlQ4dwblHZyse+Lv+2Q0y2O1C1GerKHEOSjmrzsu7xn5XHWqXzFRgPpHiB6vCNGDDx5vKbEtVdSl64LbCcmgc+Ni9NJjZWU5/oCArwjoWNT6UqBo1zA/1a6TKBr6ijH6tR+BePHjhNi4OAoNDhL/30+6XaD9RosR6SZwLfYGBYofowSnDdTdNfqzOYEbcfEUFx9PoeluRXPbfMgYni4CN4muXr8u/ruVVmyjmxjdrqtrHf0EZilPaYJv/XhQR5/o4xYBCBS3WHie6XiW8+wT30HADgT8UqBgsLylE/9zP1iA4MgJI5LCfcun5IQM9zZwbh6BhOjjYqunpSnqIG3epmJrp0IpuseofPHqDXpMJGVmcYETRn8qEkjzwYJFkyr5qF/b8q6ky+513+pY0WiCvlt2iGYuPeR62c85K7IKIWLW8n9lToMiuRP3qly5I5wmiKTLZy/EuPpoXacw9W5VWiTKSlzwfTF/D81eeYR+GiJyGAgRwziW/iOSZM/8h0Z0r0YNK+WVl3lrpczpg2j6gHry+/AfttEikS/C89f7njkojDY5JwZHiOw/eZEKZE9PdcrllhEcnEDbM4LDuEf1uf/kJfpWRGBwjg0+OGcDv9Dv+3hZ11ZNxr1vTtksIys+EzkwqrnlwOD8FEfCr9B3IuqEoyiMg8WiQSIaY6gQOpqLfBV3e2zeHykSk29y5RC5030c5TJVjIG3czKO0gUzyzlQuVg245L8PHT6MnFCcRZhjIOjPzo3Lk5t66ZsmzHjfnyCQGoS0LGo9aVAwazGjx+fmsjQFggQv0A+f+Uq5c6ayZYvYuBi3xI4E3WZQtKlFVuC3lrf+dYi9G4XApevxtKVazGUL0cWuwwJ4/ATAhxpHn7+EmUV/91KHwIBzE/cYhszIFCoXanjWU7dO0pAwLoE/FagMJB6EyqMMv6EMOFOw/fnHEkRf27jHbd74m2dAnPUuqfICdUoOTdE1OVYypEpHQUE3P2vJ2+KX5dEXoqhTKFBLkFD1QcnZY69EU/ZRR++OHh8HLmRR4gI9f6LCmA7OLqhzfuJ4tCfw5tRYArGb4wjLv4mXbp63WdjM+y4n09OjH32onjIE9EwhnCkao/HGy6iPzgyJkNIWlU1XAcB7QR0LGohUGh3Kzo0mQAECpMBO7x5CBQOnwAmDh8ChYlwHd40BAqHTwCThw+BQg1Yx7OcuneUgIB1Cfi9QOGO1oic4GvuOSjc6+DcPwhw4uyE6CN0MyaCbt64Io1KE5SR0oTkFhETRVOcENs/RuV7K/ileqeRK2Qkx9NNisvE1GfOx9Cybacp7GDUXUcc+H4ksAAEQEBFQMeiFgKFij6uW5UABAqres4adkOgsIafrGglBAores0aNkOgsIafrGolBAq153Q8y6l7RwkIWJeApQQK62KG5SCQegQOnrpEI2Ztp0OnLidptHXdQtRfbGt1p8iBJDfhCwiAgN8R0LGohUDhd26HQfdJAALFfQLE7ckSgECRLB4U3gcBCBT3AQ+3JksAAkWyeFB4nwQgUKgB6niWU/eOEhCwLgEIFNb1HSx3MAFecEaIfBgno65SerEncjGR9yEkGIkbHTwlMHQbEdCxqIVAYaMJg6FIAhAoMBHMJACBwky6zm4bAoWz/W/m6CFQmEkXbUOgUM8BHc9y6t5RAgLWJQCBwrq+g+UgAAIgAAI2JKBjUQuBwoYTx+FDgkDh8Alg8vAhUJgM2MHNQ6BwsPNNHjoECpMBO7x5CBTqCaDjWU7dO0pAwLoEIFBY13ewHARAAARAwIYEdCxqIVDYcOI4fEgQKBw+AUwePgQKkwE7uHkIFA52vslDh0BhMmCHNw+BQj0BdDzLqXtHCQhYlwAECuv6DpaDAAiAAAjYkICORS0EChtOHIcPCQKFwyeAycOHQGEyYAc3D4HCwc43eegQKEwG7PDmIVCoJ4COZzl17ygBAesSgEBhXd/BchAAARAAARsS0LGohUBhw4nj8CFBoHD4BDB5+BAoTAbs4OYhUDjY+SYPHQKFyYAd3jwECvUE0PEsp+4dJSBgXQIQKKzrO1gOAiAAAiBgQwI6FrUQKGw4cRw+JAgUDp8AJg8fAoXJgB3cPAQKBzvf5KFDoDAZsMObh0ChngA6nuXUvaMEBKxLAAKFdX0Hy0EABEAABGxIQMeiFgKFDSeOw4cEgcLhE8Dk4UOgMBmwg5uHQOFg55s8dAgUJgN2ePMQKNQTQMeznLp3lICAdQlAoLCu72A5CIAACICADQnoWNRCoLDhxHH4kCBQOHwCmDx8CBQmA3Zw8xAoHOx8k4cOgcJkwA5vHgKFegLoeJZT944SELAuAQgU1vUdLAcBEAABELAhAR2LWggUNpw4Dh8SBAqHTwCThw+BwmTADm4eAoWDnW/y0CFQmAzY4c1DoFBPAB3PcureUQIC1iUAgcK6voPlIAACIAACNiSgY1ELgcKGE8fhQ4JA4fAJYPLwIVCYDNjBzUOgcLDzTR46BAqTATu8eQgU6gmg41lO3TtKQMC6BCBQWNd3sBwEQAAEQMCGBHQsaiFQ2HDiOHxIECgcPgFMHj4ECpMBO7h5CBQOdr7JQ4dAYTJghzcPgUI9AXQ8y6l7RwkIWJcABArr+g6WgwAIgAAI2JCAjkWtLwWKCxcuUMGCBW3oOQzJlwT4RUx8QgKlDQygNGnS+NIU9G1DAjfi4ikgIA0FBgTYcHQYki8J8H+3+L9fQWkDfWkG+rYhgZs3ieLi4+V/t/i/X6l55MuXj/r06ZOaTaItixGAQKF2mI5nOXXvKAEB6xKAQGFd38FyEAABEAABGxLQsaj1pUARGRlpQ69hSCAAAiAAAiAAAiDgDAJ169aFQOEMVytHCYFCiYZ0PMupe0cJCFiXAAQK6/oOloMACIAACNiQgI5FLQQKG04cDAkEQAAEQAAEQAAENBCAQKEBsp93AYFC7SAdz3Lq3lECAtYlYJpAcercRetSgeUgAAIgAAIg4CMCEVFXqGSBHKb2DoHCVLxoHARAAARAAARAAARsSyA5geLo0WM07/c/6dTp01ShXDnq2L4dhYSkU7LYuWs3rVy9ho4cPU6FCxWkJo0aUvlyZW7brvGm2LNq4jdTqH27tpQndy5leyjQQwAChZozBAo1G5SAQHIETBMokusUZSAAAiAAAiAAAt4J6FjUQqDwzh5XQQAEQAAEQAAEQAAEkiegEiiOHT9Bj7btQGXLlKaqlSvTr/N/p9KlS9J30ybfJjgYPTzSpr08rVmjOh06/C+tW7+B+jzfkwYO6C+vx4s8Gpu3htGSpSto6vQZNP+X2VSxQnnjdnz6iAAECjV4Hc9y6t5RAgLWJQCBwrq+g+UgAAIgAAI2JKBjUQuBwoYTB0MCARAAARAAARAAAQ0EVALFmI8+pUWLl9KC+b9QcHAw7dm7TwoWP82aSTWqVfVq2b79B6h0qZIuAeONt4fQwsVLaMeW9bJ+dHQ09ejdl2Kvx9L2HTtp/tw5VLF8Oa9t4aI+AhAo1Kx1PMupe0cJCFiXAAQK6/oOloMACIAACNiQgI5FLQQKG04cDAkEQAAEQAAEQAAENBBQCRRtO3YmjoQY8vabLiseeLAR9XzuWXrh+R6ua8mdjB73MX0zdRod3rsjSbXTp8PpocbNIFAkoeK7LxAo1Ox1PMupe0cJCFiXAAQK6/oOloMACIAACNiQgI5FLQQKG04cDAkEQAAEQAAEQAAENBBQCRQsRvTq0Y369HrOZYU30cJV6HFy8dIlerhla2pQ/0H6aMzIJKUQKJLg8PkXCBRqF+h4llP3jhIQsC4BCBTW9R0sBwEQAAEQsCEBHYtaCBQ2nDgYEgiAAAiAAAiAAAhoIKASKCrVqEP9X+lLPbt3dVnRscuzVKZ0KRoxbKjrmreT2NhY6v78i3Tw4GH6Y95PlDtX0kTYECi8UfPdNQgUavY6nuXUvaMEBKxLAAKFdX0Hy0EABEAABGxIQMeiFgKFDScOhgQCIAACIAACIAACGgioBIqmLR+nJ9o+Tn1f6O2ygpNgN3u4Cb0mhAvVERMTS/3fGEhbtv5Ds2Z+SyVLFL+tKgSK25D49AIECjV+Hc9y6t5RAgLWJQCBwrq+g+UgAAIgAAI2JKBjUQuBwoYTB0MCARAAARAAARAAAQ0EVAJFzz59KTQ0lL74dLy0goWH8lVr0pgPP6CO7dt5tYy3dXrh5f506vRpmvntZCpcqKDXehAovGLx2UUIFGr0Op7l1L2jBASsSwAChXV9B8tBAARAAARsSEDHohYChQ0nDoYEAiAAAiAAAiAAAhoIqASK3/5YQP0GvEXTJn9NVatUoklTvqWvv5lKW9avpmxZs9Kevfuo09Pd5HZPbR5/lK5fv06tWreniLNn6fNPxlPOHNld1hcokF/ek5CQQFFR5yk8IoJaP9GJZkydROXKlKHs2bNRQECAqz5O9BKAQKHmreNZTt07SkDAugQgUFjXd7AcBEAABEDAhgR0LGohUNhw4mBIIAACIAACIAACIKCBgEqgiI+Ppw9Hj6PpM793WTHpy8+oWdPG8vu27TuoXacuNHbUCOrQrg1FR0cT563wdhh1OMKiWq2HbquydsUSyps3z23XcUEPAQgUas46nuXUvaMEBKxLAAKFdX0Hy0EABEAABGxIQMeiFgKFDScOhgQCIAACIAACIAACGgioBAqj62sxMRQRcZYKFihAgYGIcjC42OkTAoXamzqe5dS9owQErEsAAoV1fQfLQQAEQAAEbEhAx6IWAoUNJw6GBAIgAAIgAAIgAAIaCNxJoNBgArrwMQEIFGoH6HiWU/eOEhCwLgEIFNb1HSwHARAAARCwIQEdi1pfChQXLlygggW9J0C0oTsxJE0EEhJuUrzYpzqt+KVmmjRpNPWKbpxC4EZcvNjrPA0FYr9zp7hc2zj5v1v836+gtIHa+kRHziBw8yZRnNhyif+7xf/9Ss0jX7581KdPn9RsEm1ZjAAECrXDdDzLqXtHCQhYlwAECuv6DpaDAAiAAAjYkICORa0vBQp22fjx423oOQzJlwSuxd6g81euUu6smaRI4Utb0Lf9CJyJukwh6dJSlgyh9hscRuRTApevxtKVazGUL0cWn9qBzu1HgIWv8POXKKv471b6kGD7DRAj8ikBCBRq/Dqe5dS9owQErEsAAoV1fQfLQQAEQAAEbEhAx6IWAoUNJ47DhwSBwuETwOThQ6AwGbCDm4dA4WDnmzx0CBQmA3Z48xAo1BNAx7OcuneUgIB1CUCgsK7v/NryXYtW0fY/ltK/m7bR+ROnpa3ZCuajYg9UocqPNqUKzRv4tf0wDgRAAAR8RUDHohYCha+8i37NIgCBwiyyaJcJQKDAPDCLAAQKs8iiXQgUmANmEoBAoaar41lO3TtKQMC6BCwlUOw7dNRFukyJIq5znPgPgZM79tL8Dz6lwxvCkjWqeO1q1Prd/lSgUtlk66EQBEAABJxGQMeiFgKF02aV/ccLgcL+PvblCCFQ+JK+vfuGQGFv//pydBAofEnf/n1DoFD7WMeznLp3lICAdQn4vUAxb+FK4n+qo02LhsT/cPiewK7Fq2h6zzfv2hBO1dVt6jiq0Mz/oymmLTog97Tu2rSEHN/RiCu07J/TVL9iHiqZP7O85lnnrkFoqvjr2mMUK5I8PtmgmKYeE7sxq19edJ+IvEoXo69TwVwZKFtG9d6qnCTu5Lloirp8nfJkDaE82ZLfQzpetH066iplz5SO0os9p3GAgE4COha1ECh0ehR96SAAgUIHZef2AYHCub43e+QQKMwm7Nz2IVA41/c6Rg6BQk1Zx7OcuneUgIB1CfitQOEpTJQtWTQJ5b0HjyT5DqEiCQ7tXzhyYsKj3Ui8B07x0f+PGX4dSXHuUiy1fX+ZHNefIx6mTKFBtGbnGRo0bSsN7VKFmtfIT97qpBjEXdzAC809xy+KJI1BVDBnhru441aVruNWU3RMHM0d2vjWRQ1nZvTLDIZ/v42On412jaBs4Sw07JmqVCBHetc1Pom4EEODZ2ylvccuuq7Xq5ib3nu6KoUEB7qu8UnU5Vga+9NO+ntXhOt60bwZqX/b8lSjVA7XNZyAgJkEdCxqIVCY6UG07QsCECh8Qd05fUKgcI6vdY8UAoVu4s7pDwKFc3zti5FCoFBT1/Esp+4dJSBgXQJ+KVC4ixMsTLD4oNrSyb0uRArfTcSJHV+447ZOKut4u6cXf/paVewX1xdtOUWBAWmoabV80h5PgYIvetYxw/BLV2/Qo0OXUINKeejD7tVT1IUZQsHdGJDa/V6+doO6jV9DZ4XwwP6oXSYn7T1xieauOUrpQwLplyGNKaMQkfhIEKETb3yziTbtP0eNq+Sl6kJk4MiXsINR1KlBUXqlTTnXEK7GxtFTI1fS+SvXqUXN/FSlWHaKuBhDc1b9S1dj4mn88zWpdtlcrvo4AQGzCOhY1EKgMMt7aNdXBCBQ+Iq8M/qFQOEMP/tilBAofEHdGX1CoHCGn301SggUavI6nuXUvaMEBKxLwO8ECnfBYWDfbkmECVUOCr4+5ssZ0gsQKfRPRk6IPb3X3W/t5M3C7lPEVk8WSpztTaDwNq7UvgaBgmjYd//Q0rDTNKxrVWpaNVEwYs5zVh2hz+ftoRcfK0NdGheX6OevP07jRETEY7UL0sBOleQ13u6p92drZUTFF31rU5Xi2eX1FdvDaeiMMHq6SXF64dEy8hr/4WiN3p+upSZV89L7Xau5ruMEBMwioGNRC4HCLO+hXV8RgEDhK/LO6BcChTP87ItRQqDwBXVn9AmBwhl+9tUoIVCoyet4llP3jhIQsC4BvxIoDHHCM2qCBQguc9/WybMOu2DsV/8n60Ck0DshZ/V7j7b+76/76rR6u5bUecL7KWqDcwTwNkut6xSiK9fi6K/NJ+WWP6s/aiXbiRRbM3H+gzW7zsh8Apwvonn1AlRL/OLe/bgoohIm/bGPthyIpFPnrlGlYtmobd3Ccusmo96bUzZTqNgO6INnE19QexMoPOvwvXHxN2nu30dp/Z4I+Sv+OuVy0UMVclObOoUpTZrE1t3HESAu/rHphHx5XrpgZur2cEkZLcE1+QU8j8fY1qhE/kxUrlAW14v3xNbUf71FMpyIjKZv/txPO49eENs/3aCyhbLSo7UKCk75XQ2t3BFO0xYdpFdal6Pl4iX+KvGPj9aC0XPNS9K6PWdF+QHaL6IYeJuljvWKJmFn9Ptpn1o08Y+9tPXgOcqdNVTm7+hUvyhlTSZ3hMuI/06uxyVQ04ELpY++7FvHxZCLOW8E56PgbZuMvBHsk/XCvrnvNqZcWUJczYUdiqJXv9oghQwWNPhgP30ydzeN6F6NGlbK6xdgc1AAAEAASURBVKp7Iz6BmryV2OdXL9dxXccJCJhFQMeiFgKFWd5Du74iAIHCV+Sd0S8ECmf42RejhEDhC+rO6BMChTP87KtRQqBQk9fxLKfuHSUgYF0CfiVQ9Hj9A0ny24/fTULUEB74IgsThlDB52+99KyrrnskhWf0hasSTlKdwMgH29L5E6fvq91sBfPRO2t/TVEbR89coWfGrpbJkXlbHn6hzy+hR/eoQTHX46nvl+tdL81zZk4n8kYk5hWY8GItql4yMZ8Av3x+Umzrw9sF8S/ks2ZMR6tFfgn+PqBDBSlUsFFPDF9OGULS0sw360sbvQkUnnW44vhfdtE8ISrkEomZKxbJSv+IF+Nsa69WpaT4wHWMcXAd7pcFjJPnrtKR8CtcTF+/WpcqiHs5aoDFguXbwuWYa5XNSYVzZaRnH05M3C0rJ/PHEAqMHBTh569RxxEr5B0PlM5BwUGBrtwLQ7pUphY1Csiy30QUAudlYPv4qFo8G20WWybxOHirqVU7zsitkzKFpqWV28/IOp+/VJuqlkiMTOB+Iy5co3RpE/M9VCuZnXYcuSDHylsufdz7Abl9lrzxDn8MVr0fKU2tHihIS8JOScGmeN5M0qfFRL4I94N9EhSYhma/08j9spwfzQYtkqx5vvBxRvDoIHgwi5HP1XDlp/hFbB316f9206tty1FHIajgAAGzCehY1EKgMNuLaF83AQgUuok7qz8IFM7yt87RQqDQSdtZfUGgcJa/dY8WAoWauI5nOXXvKAEB6xLwG4HCiJ7wjH5wFx3chQtDzPAUIox2PMUL67rI/y1/s3DtVDFy3LENKWrHeFnNN/Ev2znywTgm/Lqbfl59lF5uU5aebFBMXuZogZ6f/C3P/xzeTL4U3yJ+zd9/4kZ6pmlx6vNI4i/pL4gX7x98/w/lyRbqik7wFB/uRqBgMWHI9DD5En+4iLwIEDksWDh5ZeIGGSEx/Y16VCJfJpdAwYZNee1BKlMwi7Rx9sp/6Yv5e6ndQ4Xp9ScqyGupucXTAhGp8duGE/RUw2KuKI0jQvTpKkQfjvQY16um7NMQKFgA4qgFjlDg6JR2/yUOd2fMgsH7322jbs1KUq+WpeT9LFCw2OIuRvA2S0NE4moWN/q3K0/t6xWRde/05+/dEfT21C3UqWFR+n3DcZkbwv2eQU9VokeEcMEHs2YRggWHj0X0hufRYvAiypohOIl4sVTkpxj70w4pppQXohCLOIdOXaYO9YvI+eGZVNuzTXwHgdQgoGNRC4EiNTyFNvyJAAQKf/KG/WyBQGE/n/rLiCBQ+Isn7GcHBAr7+dSfRgSBQu0NHc9y6t5RAgLWJeA3AoUhOLiLEIzVECg8BQcjqsJToOB7VG1xGY7UJ+BrgYKTHxtbLxmj6zJ6FcXciCcjWsC4PunPffTd0sP03cD6VCR3RrndUD8hUHA0wtDOVZTbDd2LQGGIJHMGN6R82dMbJtD6vWfpzcmb6a1OFenx2oVcAgVHThi/5ufKp0QUBUd3sG0fPf+AvF8lUHCC56ux8a4+jJMM6dJSaLrEyAXPCAqjjucns7siElHPf7+pLDIEirc6CnvFdlrGwREq2w+fp78+bCajS/h61OVYajNsGTWrno/efbqqrGoIFN8PbECFc2cwbneJHJzoetgziXV5u60bYhsnzyNLhiARCRFAP60+Qp/9ukcWs+DRv215wTaU/jkcRe/NDJOCxcRX6lDFotnktl6dPlyZxBb3drt/tEaKD8aWYFy2cMtJ+lKIQhwd4n6wINKzRSnX1lHuZTgHgdQmoGNRC4Eitb2G9nxNAAKFrz1g7/4hUNjbv74cHQQKX9K3d98QKOztX1+PDgKF2gM6nuXUvaMEBKxLwG8ECkNwSK0ICs92rOsi/7fc11s8dRVbHPVuVdoFil/Wt3hnsfzOORHcj73HLsqvRoJlzmnw2qSN8kU7F1QW2xfVKZuLHhYvzd1FhXsRKPge3rLJ04ZjEVfki/S2DxamAe0ruAQKz+TMbE/9AQuIt0T67MXEKBWVQGEIL3yP+9GtWQkRyZDIxptAwQmgV4qcEgdOXSJu+5LI4cB5OPgwXtwbAsVHYism9/wdAyZvoo17I131+J5rQiRp/s6iJKKAIVCsHNdSRpFwPePwjGJ4fkJi8mqj3Pg0+uY8Ix/O2i4vLx7V3LUNE18wolqM+WDkq3DnZ7THn9x3jkwh9MPbDeRlmXhbJOAuKraJekP4pWT+zBR5MYZmrzpCzIC3s/qwe3X3JnAOAqYQ0LGohUBhiuvQqA8JQKDwIXwHdA2BwgFO9tEQIVD4CLwDuoVA4QAn+3CIECjU8HU8y6l7RwkIWJeA3wgURqQEo/SMijDECy7jSApVDgouR/QEU9B7+CpJtrHFk/FC2hg1b9H0+HtL5dfHaidu92OUHY2IFnkq0lG7B4u4ciTwi+y/d0WI3A6nacO+s65tg94R2wVxngM+7kWgaC1s4F/ie9rAeS/YjsdEMuo2ItG0ahzc790KFBv3RYq8Duf5liRHFbHtVc3SiUnBPQUKjhYY8UPiy34WUYqI6IYs6YNlMm5uJDUFCs5BsfDD5kls4y/MiA8jWmPeumMyskJedPvD+TAK5kxP20SkxMtfJm4FZthnVIuOiaOWgxdLkYm3ouLjyZEr6AYnKh/aWH43/hhCSr2KuWmUyDfBhyGO/O+9JsQ5S9yPV8W2XGEHo+inwY0or4jYwAECZhLQsaiFQGGmB9G2LwhAoPAFdef0CYHCOb7WPVIIFLqJO6c/CBTO8bUvRgqBQk1dx7OcuneUgIB1CfiNQMEIDSHCczsnFi84t4QhTHBdrsNREmVK3Nq73rgf0RNMSN+xa9Eqmt7rzfvqsPuUcVSheeIv2e+2oeRe7POL7ywZg11Jre+2zfiEmzIvwrv/F0bpQwJpwYhmFJAmzT0JFCN/3E4LNp2k38RWSVmFLaojuXHcrUChatv9uqdAYWxxZGx3xXU5N0SbYUlFg9SKoJj7bmOZxNywibeRajVkCbmLBEaZ6tNIZM2CyuR+DyapxjlGOo9alSR6462pm2nd7rMiz0RDyp/j1jZbG8Q2W2+Ibbbcc49wRAUff41oTsLlSY5PRJLsuSJZtnuC9SQV8AUEUpGAjkUtBIpUdBia8gsCECj8wg22NQIChW1d6/OBQaDwuQtsawAECtu61i8GBoFC7QYdz3Lq3lECAtYl4FcCBWM0RAY+94ykYKHCONyFCXcBw1PcMOrj01wCEzu+QIc3hN1TJ8VrV6MXf/o6xfcm92KfBYbl28JpdM8a9FD53K625/59VP4K/3mxJVTBnBmIt/XhqIl2YrulcoWzuupxHobjZ6Np2dgWMvfBvURQ/Lr2GH30yy7q3LgYvfRYWVfbYYei6Ne1R6lZtfzi5XyeFEVQGImfC+XK4NqayNXwHU48BQrjhfy895q6tkoy8mNkE4KKEdWQWgIFR5IM7FTJZeWXv+2lH1f8S71alaJuD5d0Xb/TyZtTNtP6PWfpkz4PuKJDWFj5aO4umieYG7k9uB0jSqRFzfw0ROQY4SNBVO7x8d8y/8SkfnWp/P+zdx7gURRtHH8hoYQOoYSOtCC9SJXekSoigoAgKApIEZTepAoCgogiCIKiKCgggvTeQXoLvUMoCT0kQOCbd/LtcXfcJrkkN3e799/nIbe7MzvlN/McM/u/933/P+4cw2L9gWAa2b4UVS8eIPPyHw4I3lHEq2BrGHu3UpZMOAGBBCSgYlHrToHizWrZqHyFKCunBMSGorycQKSwTox4+pT8kiYRIrOdyuzlbND9+BNgAcxHxMJK6hsV1yv+JaIEEIgi8ORpJD2NjBQx4/R/zARWUQR8M4q1Q6LEwBFLAhAoYgkK2eJEAAKFPjYVezn92pECAsYl4HECBaO0Fimis4awFib4OYgTTME9x5XDQTS5YXunK+dXCD2Xz6XsxV68wI9tIdEJFNdCw4gtBMLCI4WbpuyUN2tqOnzutrSOKJgjDc0Qv773SZzI4jKIX8g3r5ybAtL70fZj7O4pmBoKF0z934l6oR4XgcI6vkXFwpmoZN4MdF3EpOBf4rN1xtw+VaS7oOj6YW9BwWx6i5gZe06GyJgIHBeB3R/F5rAXKDQLD47RwCLO1dBHsm1clisECi6XA34XEwGsT1y+K2JG3CAWWmb2qmQJss15YjrOXLtPHSZsldlY/OEyeMy4vHzZUtPUruUptV8Smc7CBVtRsKDBQcjLCndXa/ZdpaMX7tC7NfJSl0aBluoOCOGo+3dR7qNY0CiQPQ2F3nssrGAuS3HCXmiyPIgTEEhgAioWte4UKMZ0KZrAxFAcCIAACIAACICA2QkkzfseBAonBhkChROwkNVpAhAo9JGp2Mvp144UEDAuAY8UKBgnu3Tif9YHCxBsOaFZUli7fIpOyLAuA+euI3B0zWaa2+lzEu+EY310mCVcO9VxzrWTVvhFEcehzbjNZB0IWkvjT34JPmvVKeniR7vPYkW3xoUobcoXv1LadDiYpiw5LgNacz4WD5pUyCUDbyfxjfqVDgsUaVIkoTl9KsuitoqYFQNm76UhbUpQ3dLZ5D37PHyTA09/u/S4fEHOv8DngwWBHk1flUGY+Tq6frBAUbqAP035uBxnlcfeUyE0WriP4gDcHNRbi7egpet9smDD7dHiMXDMBhYpNh++Lh/hfvduXkTGoOCyNQuKZbsu0bgFR0gLVK2V7yhItmbhUbdMNhrybpTFglbvoFbFLe3mMlgwYMuSXCL2hbPHsYt36AsR0FoL6M3Ps/gxVNRpPbZ8/4boy6C5+0gLkM732K3UsDYlLZYjfI8PFj/Y6oXFLO3I5u8nrGDyUjMRLwQHCKggoGJRC4FCxUiiDhAAARAAARAAgYQiAIHCOZIQKJzjhdzOEYBAoc9LxV5Ov3akgIBxCXisQKEhdSRUaGn8CWHCmob7z9mSYumIyTG6e2K3Tk2G9oqT5YSzveSg1Bw4O0PqZNJqQu95fmEf8SRS5tPLE9f7/Ev+W/fC5S/7kydNGPcAT0Rwb3Zj4esTP1cW3GcWLmLiE9e+Wz/HHG4/iKAUyXxfEges88XmnF01XRNWHzy2+YSFTHRcOe/lm2F05+FjypwuubSUia6OpyKwdvDtR9KSJGVy3+iyIg0EEpyAikUtBIoEHzYUCAIgAAIgAAIg4EICECicgwuBwjleyO0cAQgU+rxU7OX0a0cKCBiXgMcLFNZoNcsJvmcdg8I6D849gwAHzj60fB2d23OQbl++JhuVPkdWeqVsCSresJbTAbE9o1doBQiAAAi4noCKRS0ECtePI2oAARAAARAAARBIOAJ6AsXDhw/pjz8X0clTpylXzhz0ZpPGlDXri3hy0bVg6fJ/KXu2bFSmVElLtu07dtHho8cs19pJUhHj6P332mqXHv8JgcLjh8jQDYRAoT98KvZy+rUjBQSMS8BQAoVxMaPlIAACIAACIBA7AioWtRAoYjcWyAUCIAACIAACIOAZBBwJFJEiwPhbrdrS7Tt3qF7tWrT7v7105co1Wv73QsqcKZNuw48eC6L9Bw7Q0BGjafCAvtSxfTtL3l9+/Z3WbdhoueaTcxcuUGjobTq8d6fNfU++gEDhyaNj/LZBoNAfQxV7Of3akQICxiUAgcK4Y4eWgwAIgAAImJCAikUtBAoTThx0CQRAAARAAARMTMCRQLFx81bq2LkLbVyzQlpPhD16RNVqNaBO779HH3/YUZdG/8HD6Ny5C7Rnr4gpOLBfjJYRrd/rSP4ZMtC3kyfolulpCRAoPG1EzNUeCBT646liL6dfO1JAwLgEIFAYd+zQchAAARAAARMSULGohUBhwomDLoEACIAACICAiQk4Eii+mjSF1qzbQKuXL7H0/PMBQ+jmzZs058fplnt6J9XqNKAO7dpEK1BoIsiaf5dSvryv6BXlcfchUHjckJiqQRAo9IdTxV5Ov3akgIBxCUCgMO7YoeUgAAIgAAImJKBiUQuBwoQTB10CARAAARAAARMTcCRQOBIjHIkWelhiEijYhVSDJm9R2TKlafSIoXrFeOR9CBQeOSymaRQECv2hVLGX068dKSBgXAIQKIw7dmg5CIAACICACQmoWNRCoDDhxEGXQAAEQAAEQMDEBBwJFN0//YzCwyNo5vdTLT2fPPU7Wrz0H9ok3D7FdMQkUPy5+G/qO2Awbd+4lgICssRUnEelQ6DwqOEwXWMgUOgPqYq9nH7tSAEB4xKAQGHcsUPLQQAEQAAETEhAxaIWAoUJJw66BAIgAAIgAAImJuBIoBg+ciydOnOGfp3zo6Xno7+cQAcOHaKFv/1suad3Ep1A8Sg8nKrWrE8t3mpG/fr00ivCY+9DoPDYoTFFwyBQ6A+jir2cfu1IAQHjEoBAYdyxQ8tBAARAAARMSEDFohYChQknDroEAiAAAiAAAiYm4EigmPXTzzT52+/o4J7tlDhxYtn7Nh0+oIAsmWniuDEx0ohOoJg+czZNmz6DNq9bSenTpYuxLE/LAIHC00bEXO2BQKE/nir2cvq1IwUEjEsAAoVxxw4tBwEQAAEQMCEBFYtaCBQmnDjoEgiAAAiAAAiYmIAjgeLK1atUpWY9+uzTntSmdUvasXMXde3Rm2bP+J6qV60sabAbqGPHT9C6lf9Y6Ny+c4cin0ZSkxatqHXLFvJfmjSpKWnSpDJPaOhteq1SVfq8dy/q0rmT5TkjnUCgMNJoGa+tECj0x0zFXk6/dqSAgHEJQKAw7tih5SAAAiAAAiYkoGJRC4HChBMHXQIBEAABEAABExNwJFBwd5csXUa9+w6w9PyTLh9Rr+5dLRYVH3bpTgcOHqY92zda8jRu/g4dPXbMcs0nUyaOp8YNG8h7Y8ZNoMV/L6NN61ZQCj8/m3xGuYBAYZSRMmY7IVDoj5uKvZx+7UgBAeMSgEBh3LFDy0EABEAABExIQMWiFgKFCScOugQCIAACIAACJiagJ1BwlyMjI+nylauUJXNmSp48mYkpxL5rEChizwo5nScAgUKfmYq9nH7tSAEB4xKAQGHcsUPLQQAEQAAETEhAxaIWAoUJJw66BAIgAAIgAAImJhCdQGHibse5axAo4owOD8aCAAQKfUgq9nL6tSMFBIxLAAKFcccOLQcBEAABEDAhARWLWncKFDxkEyZMMOHIoUvuJPAo4gndfhBGmdOlJl+fqECp7mwP6jYXgeuh9yl5Ml9Km9KYrl7MNRrm6s39sAh68CicsvqnNVfH0Bu3E4BA4fYhMHUDIFDoD6+KvZx+7UgBAeMSgEBh3LFDy0EABEAABExIQMWiFgKFCSeOl3cJAoWXTwAXdx8ChYsBe3HxECi8ePBd3HUIFC4G7OXFQ6DQnwAq9nL6tSMFBIxLAAKFcccOLQcBEAABEDAhARWLWggUJpw4Xt4lCBRePgFc3H0IFC4G7MXFQ6Dw4sF3cdchULgYsJcXD4FCfwKo2Mvp144UEDAuAQgUxh07tBwEQAAEQMCEBFQsaiFQmHDieHmXIFB4+QRwcfchULgYsBcXD4HCiwffxV2HQOFiwF5ePAQK/QmgYi+nXztSQMC4BFwmUFwNuWtcKmg5CIAACIAACLiJwI3QB5Q/u79La4dA4VK8KNwNBCBQuAG6F1UJgcKLBltxVyFQKAbuRdVBoPCiwXZDVyFQ6EOHQKHPBikgEB0BlwkU0VWKNBAAARAAARAAAccEVCxqIVA4Zo+7xiUAgcK4Y2eElkOgMMIoGbONECiMOW5GaDUECiOMknHbCIFCf+xU7OX0a0cKCBiXAAQK444dWg4CIAACIGBCAioWtRAoTDhxvLxLECi8fAK4uPsQKFwM2IuLh0DhxYPv4q5DoHAxYC8vHgKF/gRQsZfTrx0pIGBcAhAojDt2aDkIgAAIgIAJCahY1EKgMOHE8fIuQaDw8gng4u5DoHAxYC8uHgKFFw++i7sOgcLFgL28eAgU+hNAxV5Ov3akgIBxCUCgMO7YoeUgAAIgAAImJKBiUQuBwoQTx8u7BIHCyyeAi7sPgcLFgL24eAgUXjz4Lu46BAoXA/by4iFQ6E8AFXs5/dqRAgLGJQCBwrhjh5aDAAiAAAiYkICKRS0EChNOHC/vEgQKL58ALu4+BAoXA/bi4iFQePHgu7jrEChcDNjLi4dAoT8BVOzl9GtHCggYlwAECuOOHVoOAiAAAiBgQgIqFrUQKEw4cby8SxAovHwCuLj7EChcDNiLi4dA4cWD7+KuQ6BwMWAvLx4Chf4EULGX068dKSBgXAIQKIw7dmg5CIAACICACQmoWNRCoDDhxPHyLkGg8PIJ4OLuQ6BwMWAvLh4ChRcPvou7DoHCxYC9vHgIFPoTQMVeTr92pICAcQlAoDDu2KHlIAACIAACJiSgYlELgcKEE8fLuwSBwssngIu7D4HCxYC9uHgIFF48+C7uOgQKFwP28uIhUOhPABV7Of3akQICxiUAgcK4Y4eWgwAIgAAImJCAikUtBAoTThwv7xIECi+fAC7uPgQKFwP24uIhUHjx4Lu46xAoXAzYy4uHQKE/AVTs5fRrRwoIGJcABArjjh1aDgIgAAIgYEICKha1EChMOHG8vEsQKLx8Ari4+xAoXAzYi4uHQOHFg+/irkOgcDFgLy8eAoX+BFCxl9OvHSkgYFwCECiMO3ZoOQiAAAiAgAkJqFjUulOgSHQ2mFq0aGHCkUOX3Eng8dNICgt/TKlTJCefxInc2RTUbUIC9x6GUxLfxOSXLKkJe4cuuZNA+OOnFPH4CaVN5efOZqBuBQRylixC2QoXUFBTVBUQKJSh9sqKIFDoD7uKvZx+7UgBAeMSgEBh3LFDy0EABEAABExIQMWi1q0CxYItJhw1dAkEQAAEQAAEQAAE9Al8sngm5S5TXD9DAqdAoEhgoCjOhgAEChscNhcq9nI2FeICBExCAAKFSQYS3QABEAABEDAHARWLWggU5pgr6AUIgAAIgAAIgIAxCECgMMY4oZWxIwCBQp+Tir2cfu1IAQHjEoBAYdyxQ8tBAARAAARMSEDFohYChQknDroEAiAAAiAAAiDgsQQgUHjs0KBhcSAAgUIfmoq9nH7tSAEB4xKAQGHcsfPolj+9tpWeXt1IkSGH6HlYsGxrohQB5ONfnHyzVSffrJU9uv1oHAiAAAi4i4CKRS0ECneNLuoFARAAARAAARDwRgJ6AsXz589p6bJ/ac/efZQ6dWqqV7sWlSxRLFaIli7/l7Jny0ZlSpW0yR8REUF/LFxE/x04RAGZM9FbzRpTYEF18S9sGoMLUxKAQKE/rCr2cvq1IwUEjEvAUALFiTMXLKQD8+W2nOPEcwg8u3OSIg5PFcLEwWgb5eNfgpIV606J0xWMNh8SQQAEQMDbCKhY1EKg8LZZhf6CAAiAAAiAAAi4k4CeQDFm3AT68ae59F6bdyn4+nVavXYdzZvzI1WqUF63uUePBdH+Awdo6IjRNHhAX+rYvp0lL4sTLVq/R+cvXKD69evTzRs3aPOWLbRw/i8vCRmWh3ACAk4SgEChD0zFXk6/dqSAgHEJeLxA8feqTcT/9I6m9aoR/8PhfgJsNRG+a6BTDUlefgysKZwi5jmZHz99RueC71PK5L6ULUMKSpw4UbSNu3EnnK7ffkT+aZJRVpE/UfTZoy0LiSBgZgIqFrUQKMw8g9A3EAABEAABEAABTyPgSKC4fecOlalQhSaNH0vNmjSSTf74k14UHh5Oc36crtuF/oOH0blzF4TVxV4aMrAfvf9eW0vehYuWUL+BQ2jF0sWU2j8TpUvpRz0/7UM3b92iJQvnW/LhBATiQwAChT49FXs5/dqRAgLGJeCxAoW9MFEofx4bykGnz9tcQ6iwwaH8gi0nwjZ+EKd6U1T/EZYUcSIX94eChVAQci+C8mVNTcmT+jhV0MPwpzTm90O0+fB1y3PpUyWloW1K0GsFM1ruaSd3HjymkfMP0u6gW9otcpS/3Vdb6HzwA0seRydbJjZwdBv3QMBUBFQsaiFQmGrKoDMgAAIgAAIgAAIeTsCRQLFpyzZ6/8OP6b/tmylDhvSyB4uWLKXP+g+i08cOih+AJY62V9XqNKAO7drYCBSjv5xAS5Yuo11bN1Dw7XtSoFi9ejX17juATh09QD4+zu39om0AEr2WAAQK/aFXsZfTrx0pIGBcAh4pUFiLEyxMsPig59LJOi9ECvdNxEdbusfo1kmvdezuya/KVL1k3HcBgWn/BNHvG8/RT31ep/zZ0sS6hkcRkfTpD7vp6IU7VLFwJqpWLIBu3g2n+RvPUlh4JH39UVkbkUK4VKWBc/bS1iM3qFT+DFSzZFa6H/aE5q0/I/NP+6QCFX8lajH+y7ozdEuIJvbHs2fPacn2i5QiuQ+tGl3XPhnXIGA6AioWtRAoTDdt0CEQAAEQAAEQAAEPJuBIoNDEiDPHDwnr8ijzck202LtjM6VPH7VP0uuWI4Hil19/p2EjR9P2jevoWZJkUqDYv28ftev4Ie3YvJ6yiJgUOEAgvgQgUOgTVLGX068dKSBgXAIeJ1BYCw79urW3ESb0YlDw/XHT5spRgEihfjLGxbWTfSvh6smeiGuv4ypQbDocTIPn7Kd6r2Wjga2KU+L/L6TPXrtP7SdspTwBqWhOn8rk8393T0t3XqKvFh6hZpVyUZ+3ilg6FXTpLn04eTtVeDUTffXBa5b7jk42HgqmIXP3U8d6+en9ugju5ogR7pmLgIpFLQQKc80Z9AYEQAAEQAAEQMCzCTgSKH79fQENGT6SzgYdtjR++45d1Pb9D4TAsJYCArJY7js6cSRQ3LoVQk1atCISvxSrVr06RTwKExYV/8jHD+/dSSlTpnRUFO6BgFMEIFDo41Kxl9OvHSkgYFwCHiVQaOKEvdUECxCcZu3WyT4PD8H4736WeSBSqJ2Q4XtH0dNLq+NVqW/OupS8zGCny7grfo3/w/ITtPfULboa8oiKiV/jN6uYi+qWyWYp66s/j9CZq/dpeo+Klnt8smjbBeIX6GPfLy1jIvA9dl80e9Up2n3ylnQ3VDBHGqpUODN1qJNfvnTfeuQ6/SjS29XMR7VKZeVHLAe/9N8jnvuyYxkKSO8n7/PL9Q0Hr9HOoJuUK3MqqiReyL9dJQ+l8ktieS42J5dvPaQZ/56kI8Jy4WH4EyqUMx01LJeD6pZ+0c8f/j1BO47fpBHtStGsVSdlncl8fWT7uzQKpLQpk1JYxFPq+u1OuhYaJi0Ysvn7kV8yX+rfspgoM22MTZm8+Bj9tfUCzehViV61yz947j7adOg6zehZkV7NlU6W1W3aTjp99R4tGFSD0qaw7fPdh4+JA1HY37duRKSwnmg3fguF3A+nPwfXoNROcrMuC+cgYBQCKha1ECiMMhvQThAAARAAARAAATMQcCRQrFi1hrr17G3jemnt+o3UuWt3OnpgD/klTx5t1x0JFPxASEgosXXGKRGnImf2bHT06FHatmMnsUCBAwQSggAECn2KKvZy+rUjBQSMS8CjBIqOvUdIkrMnDbUhqgkPfJOFCU2o4PO+Xd+z5LW2pLC3vrBkwkmCE3i4uiU9DwuOV7mJUgRQyroLnCrjSeQzemfMJropgi/XLBlA6VIloy1CQODrPi2KSKGCC+w5fTftOxVC9vELZqw4Sb+sPUO/9K1CebKkInYl1H7iVilMFMqVVgR+9pMv+dl1EYsB/d8pRqH3I6jp8PVUtqA/TfqonKW9EU8iqXb/1ZQzU0r6rX9VeX/d/ms0fN4B6ZqorIjNcOHGQ1l2uUIZaXyn1yxWBpZCdE44XsTbozbKVK43aRIf2nb0hrwe/G5xqlcmuzwf8esBWrPvGrHo4CP8lb4irBm0OBGapUL440ia8NcROnbhLl26+VBaMKRNmUQKLrkFg5gOrY5f+1UVgovtr2+q9FkR1Y73SlGNEgHyvN6g1VREiBVjhWizZt9VOiWEIo4/UTq/PxXJnS5GBqv2XqFRvx2izm8UpHa18sXUPKSDgCkIqFjUQqAwxVRBJ0AABEAABEAABAxCwJFAceiwsDR/u7UIaL2IAgsWkD2ZNn0Gzfn5N9qzfWOMPdMTKPhB3ttyDIoUYu9YpUZterNpYxoxdFCMZSIDCMSGAAQKfUoq9nL6tSMFBIxLwGMECs16wt76wVp0sBYuNDHDXojQyrEXL4w7RJ7f8gdLol7Ix7elqZptdqqIvadDqNf3u6ltrbz00RuB8lkOyMwv0bMIC4Z+wiqAj9gKFPxL/0mLjsmX5x/Uj1ogPo18Ts1HrKfbotwN4+uTr08i6jvrP9px7CYtG1FLWiVwHTuO36C+P+6lro0LUevqr8i4DM1HbJBiwQ89KgnxJClnI80CofdbhenNSrnlvZj+rNhzmf7ZdZlaVXuFqhaLMvM9f/2BtCzQhAcuQxMP6pTOSoPfLSHdL7FFSOuxm2T7Fw+rSRnTJJPVxdXF0+LtF2jSX8eoe9NXqWXVPLIs/sPCR50BUVY0HzcMpDY189I9Yd3ScMha4vYEXbonBRHLA+KkQdnsUvTR3ERZp/H5k6dCgBJtZ4uRRUNqUsrkvvZZcA0CpiSgYlELgcKUUwedAgEQAAEQAAEQ8FACjgSK58INU/W6b9CrgYE0avgQuhUSQu3e70wt3mpG/fr0kj2ZN/8PGjfha1q2eCHlzpVT3rt95w5FPo2Urpxat2xB/C9NmtSUNGnUnvP8hQuUOlUaOnb6HP326zzaum07rfxnEWXP9sL63kMxoVkGIQCBQn+gVOzl9GtHCggYl4DHCBSa4GAtQjBWTaCwFxw0qwp7gYKf0SuL03AkPAF3CRT7hEDRUwgUbJEwpHUJiwhg38PYChT2z2nXmqgwf0A1ypExhXDZFExDf94vYjAUEy/Zc8hsY34/RCv2XKG/htSgzOmSkxarYYgQCqzdTbFgUH/QGvnSfmibkvJZR4GhOUETE2QmB3/e/XIzPXj0hJZ+UUumagLFTOF+ydpd05d/HKbluy9LF1dstcCHnkARU1uuhIRRK2G1wgfHlGCBJEQEtmZrFLZS4aN9nXz0Qf2CdPLyPer09TZ5j60mRgjLileFZQpbkoxbcFima2KGzGT3Z8mOizTxz6P0SZNC9I4QZ3CAgLcQULGohUDhLbMJ/QQBEAABEAABEPAEAo4ECm7X0WNB9KFw6RQcHOWRoEa1qjRl4jhKlSrKuv3H2XNpzPgJtGHVcsqdO5fsSuPm74jnjtl0a8rE8dS4YQN5r2uP3rRy9Rp5Xr5cWRo3egTlyhm1b7V5CBcgEEcCECj0wanYy+nXjhQQMC4BjxEoNMEhoSwo7Msx7hB5fsvd5eLpsfiF/ac/7KZDZ29LSMXzpqcKhTJRbREbImuGFBZwzggUN4R7qNX7rtBREevh9v3H0gqAXSHxMX9AVSFQpLRYC1QsnEm6auJf+jcatla6MtLcPv248iTNXXNGPsfuoqyPoIt3pZsjFhY0wcI6XTtfM1bE5UjqIy+Pi6DSm0Q8i1PCyoMtE+6J+A0cc4MPzXWVJlCsHF3Hxtpg4Zbz9M2S4/Rtt/JUIm8G+YwjgSK2bTl4NpQ+mbZLlqP9SZHcR1iO5KVZK09ZhBtrMcM+ZoXmKovZzOxZSSvG8skWGS1Hb5TXCwZVt3CwZMAJCJiYgIpFLQQKE08gdA0EQAAEQAAEQMDjCOgJFFpDr167JgNYp02TRrsV58+79+5RaOht8vFLRRnTphYuh6MsK+JcIB4EATsCECjsgFhdqtjLWVWHUxAwDQGPESg0Swkma28VoYkXnMaWFHoxKDgd1hNMQe3hziDZLFJwPAYORL3rxE0Z+Jl7b23dEFuB4sy1+yKA9A5L8Gi2NkidIiktEkGh+dAECj5nC4Blwu3Sv6Nq03EhOPSZsYesrSWmLj1OCzadJ34Bnz9ran7EcnCg63IiJgW7SeI4Gj+LOBiOjva180uXUlocBs7D5eUWsR/SinYt2HxePmYvUKweU1cEvo4SNjgDB7VmK5CYBIrYtEVWKP6wK60DQqg4eeUeZfdPQRWFJQWPw/iFR+j77hWoaJ70sm81+66Sj2yaUF+6nNKe50+2rmArC2shRkv/fdM5mrY0iD5tXpiavx47V1jas/gEAaMTULGohUBh9FmC9oMACIAACIAACBiJQEwCRUL3RYtBkS6lHwSKhIaL8sSPOcPoqXiXkSldzHEsvQ2Xir2ctzFFf72DgMcIFIxbEyJYhLAPfs2xJTRhgvNyHraSCMz34uWl9jysJ5iQuuPpta0UvmtgvCpMXn4M+WatHK8yIkUgMA4Kze6X+Bf9K0bVkS/Fewsriz0nQ2xiRnBFmsigBcn+blkQzd9wzkbc4Hya+yZrgeLAmVDq/t0uGtKmBB06d5v+3n6RrC0XVv53hUbPP0SjO5S2xI3gsuJydBCBu8+I4NLz+lUR4kTUAkC4KxXButfJ4uxdPMVVoIhN21iYuCOsNwJEjA/NukN7TovN8ffwmpQhdVSsi+YjN8ig5dZstPxNhq2jCOE7deWoupQokXaXpFVJ85HrKZmvDy0cXJ2SicBuOEDAmwioWNS6U6AQQXGoV68ov8reNK7oq2sJRDx+SvcfhVP61CnIJ3Fi11aG0r2OQOjdh8K3uy+l8ota33gdAHTYZQTCwh9TWMRj8St3vORzGWQPKThdtiyUyj+9stZAoFCG2isrgkChP+wq9nL6tSMFBIxLwKMECsaoiQx8bm9JwVYW2mEtTPB9TcCwFze0/Ph0LYFHW7pTZMjBOFXi41+C/KpMdfrZdfujrCberJRLxDaIiq3AhXBsBnbLtH58PUrik5imLDlGf265YCMW3BexG1qM2iCtJTSBQnORpFkAcFns8qndV5tlPmuBghd8zb5YLy0a2OVRlaJZaLCIg6EdZ4U1RvsJW6nYK+np64/KWl6yc4yHb4V1RT5hVdGuVj4te7Sf9QZFBZ/+e1gtiyiwM+gmfT7zP4urKC5Aa39sBIpZq07RnNWnaWT7UlS9eEC09Vsnrt1/lb6Yd5BaVMlNPZsVtiTtEu35TLRHc3ulJfy24Sx9v+wEdapfgDrUya/dJh674fMO2MTi0BLnrDktXUX1fbsoNa4QFQhOS8MnCHgDARWLWrcKFGIQJ0yY4A1DiT4qJPAo4onQvsJEHKjUwvoQAoVC9F5R1fXQ+5Q8mS+lFb9ExgECCUngfliEiCkXTln9bV3CJmQdKMs7CUCg8M5xV9VrCBT6pFXs5fRrRwoIGJeAxwkUjNJapIjOGsJamODnIE4wBfccz+6cpLCNH8Sp8hTVf6TE6Qo6/awWC4EDMDevnFv+qn/7MXb3FEwNy+Wg/u8Uk2VuPXKdBvy0T57zC++sGfyEe6ZLlhgOmkCxYs9lYS1xmLL5+4nnc9Jj8ev+pTsuiRcej+Wz1gIF3/hRxFuYK16m8zGxc1kqF5hRnmt/pi8/Qb+uP0v5sqWWIgBbeLClBZc39v3SVFmIGrE5NAuOUvkz0OuFM9PV0EcWt1Pc97hYUBw5f5u6TN0pBY43BKtmFXNRgOAS0/E08jm1GbdJsqslYn2UKeBP+0Ww8jX7rkmrld/6VSP/NC9+XajFkuA+c7Dw0vn96ZRwC8Vup9jKZWqXClQwxws/q3eFdUajoeuEqWhy+mNgNSkwxdQmpIOA2QioWNRCoDDbrEF/IFBgDriSAAQKV9L17rIhUHj3+Luy9xAoXEkXZUOg0J8DKvZy+rUjBQSMS8AjBQrGyRYR/M/6YAGCLSc0Swprl0/RCRnWZeDcdQTi4uopvq6dNh0OFhYSx6UbIe4Zv/RuUiEXdW5QkJL4vvgF5bz1Z+iXdWcsMSoalM1O6cTLfXbppLlOeib8Jv0krArYskA73qmWhxInTiTzzR9QTQTJfhF8+8L1B9R2/Bb5kn/xsJrCpYSVnyJRgFYeWwtogbZZ/OjSqJBTVgscvJpFCnZfxQf3sXfzIjIGxU1h4aEJFCN/O0ir9159KaaDoxgUvGD96s8jMo4GlzmlSzkpHvB5TAcHuB4l6mK3WdrBgsKgVsWlYKHd0z7ZamTsH4dod9At7RblCUglXGkVp1dz2v5aTBN9BrUuTvVfy27JjxMQ8CYCKha1ECi8aUZ5R18hUHjHOLurlxAo3EXe/PVCoDD/GLurhxAo3EXeO+qFQKE/zir2cvq1IwUEjEvAYwUKDakjoUJL408IE9Y03H/OlhQRh6fG6O6J3TolK9Y9TpYTjnrJL/EjnkRaYh84ysNxG27dC6fUfkksrpIc5ePFXIh4CZ8mRRKLayZH+Zy5x7EbfHwSybqdec46L/fvXtgT2Ud7McQ6nzPnbBHBwa3s40nEpgxuy3kh0qTy86VcmVLJgN7RPcfCxtWQMCHypJTiUHR5kQYC3kxAxaIWAoU3zzBz9h0ChTnH1VN6BYHCU0bCfO2AQGG+MfWUHkGg8JSRMGc7IFDoj6uKvZx+7UgBAeMS8HiBwhqtZjnB96xjUFjnwblnEGBriqdXNwqh4hA9DwuWjUqUIoB8/IuTb7bq8Q6I7Rm9RCtAAARAIOEJqFjUQqBI+HFDie4lAIHCvfzNXjsECrOPsPv6B4HCfezNXjMECrOPsHv7B4FCn7+KvZx+7UgBAeMSMJRAYVzMaDkIgAAIgAAIxI6AikUtBIrYjQVyGYcABArjjJURWwqBwoijZow2Q6AwxjgZsZUQKIw4asZpMwQK/bFSsZfTrx0pIGBcAhAojDt2aDkIgAAIgIAJCahY1EKgMOHE8fIuQaDw8gng4u5DoHAxYC8uHgKFFw++i7sOgcLFgL28eAgU+hNAxV5Ov3akgIBxCUCgMO7YoeUgAAIgAAImJKBiUesugcKEw4UugQAIgAAIgAAIgAAIgAAIgIAkoGIvB9QgYEYCECjMOKroEwiAAAiAgGEJqFjUQqAw7PRAw0EABEAABEAABEAABEAABDyUgIq9nId2Hc0CgXgRgEARL3x4GARAAARAAAQSloCKRS0EioQdM5QGAiAAAiAAAiAAAiAAAiAAAir2cqAMAmYkAIHCjKOKPoEACIAACBiWgIpFLQQKw04PNBwEQAAEQAAEQAAEQAAEQMBDCajYy3lo19EsEIgXAQgU8cKHh0EABEAABEAgYQmoWNRCoEjYMUNpIAACIAACIAACIAACIAACIKBiLwfKIGBGAhAozDiq6BMIgAAIgIBhCahY1EKgMOz0QMNBAARAAARAAARAAARAAAQ8lICKvZyHdh3NAoF4EYBAES98eBgEQAAEQAAEEpaAikWtOwSKp0+f0ukzZylRokSUP19e8vHxSVhwKM3wBMLCwujkqTPk75+BcubIHuf+xLac58+fR1sHz1Uc5iYQ27kSGwqPHz+myGfPyC958thkRx4TEQgJCaXzFy9S3jy5KX369PHq2aPwcPJJnJiSJk2qW05031343tLFZqqEhPjuCnv0iK5cuUq3b9+hLJkzUQ7x/y7WZqaaJjF2JiG+u+7cvUuXxTyKiIig7Nmyybnk6Hsouu8tbqijZ2LsgIdmULGX89Cuo1kgEC8CECjihQ8PgwAIgAAIgEDCElCxqFUpUPCGZOp3P9DkqdNsQPXt8yl9/GFHm3u48E4C/KKld9+BtHrtOguAlClT0vSpk+n1ShUs92I6caacJUuXiToH6BbZpFFDmjzhS910JBibgDNzJbY9fadNB9qzdy+dDToc20eQz+AELly4SB927SHE9zOWnhQKLEgzvptKObJns9yL7QnPy6Kly1Pd2rVo+reTHT7Wf/AwWvDnIodpfHPMiOHUquVbuulIMDaBhPjuOnn6NI35cgJt3rrNBoZ/Bn/6YuhAeqN+XZv7uDAfgYT47lq9dr1c2wedOGkDqGSJEjRy2GAqUriQzf1K1etQcHCwzT3ri9XL/5Y/YLK+Z9RzFXs5o7JBu0EgOgIQKKKjgzQQAAEQAAEQUExAxaJWpUDxzbTpcgMTEBBAb7/VjJ4/e07z//iTQkJDaEDfPvRhxw6KCaM6TyOgvdjlTW3d2jXFBvY6/fzrb7KZC+f/QmVKlYxVk50pZ/Hf/1CffgMpZ84c9Eru3C+VX75cWerSudNL93HDHAScmSvR9Zhf8pw5d45WrFpLfy1eIrNCoIiOmHnSbt0KoRr1GtLDhw+pYYN6VKJYMfpv334ptPKL3rUrl1LaNGli1eHDR47SeTGXfv51Pu0VZUQnUPQbNJQW/rVYvPwrTP4ZXrbW6PBeW6petXKs6kUm4xFIiO+uf1eupk969aFX8uShKq9XolSpUtL6jZtIe9H869xZVLF8OePBQYtjRSChvruGjxwr12ply5Sh0qVKULiw/lr27yq5vucfmWxYtZwyZvS3tKlspeoyrWrl1y33rE/GjBxG2bJmtb5l2HMVeznDwkHDQSAaAhAoooGDJBAAARAAARBQTUDFolaVQBEeHkFlX68mES5bvJBy58opz8+cPUd13mgiXq74047Na8nX11c1ZtTnIQQOHT5Czd5uTcWLFaU/5s2hZMmSyZZpFg784m/q1xNibK2z5WgCxee9e0GIiJGuuTI4O1ei6/1n/QfToiV/22SBQGGDw7QXs376mUaP+4o6tGtDQwf1t/RTs3AYOXwItWnV0nI/uhPtxZ2WJzYChTPirVYuPo1NIKG+u44eC6Lg68FUo1pVSizciWmHJn692bQxTRw3RruNT5MRSKjvrjXrNkhBwdpSIjIykqrUqi8tJcaNGUlvN29moad9z505fshU7pwsHbQ6UbGXs6oOpyBgGgIQKEwzlOgICIAACICAGQioWNSqEijWrt9Inbt2J0fucj7s0p3WbdhIv8ye6ZQbHzOMMfrwgsBo4WZi1py5L7kl4U1uyXKvy18nH/pvh/iFZ6oXDzk4c7YcCBQOIHrJLWfnSnRYrt+4SXfv3ZVZ6jd6U35CoIiOmHnS3mj6lvzFub1bkiNHj1GTt94htghb9Me8WHX4/IUL9PjJEzp//iJ9/EnPWFlQQKCIFVpTZUrI7y5HYDZt2Ubvf/ixU3PXUTm459kEEvK7y1FPv5o0hb6f8SN17/oxfdqjmyULBAoLCpyAAAjoEIBAoQMGt0EABEAABEDAHQTMJFD89PM8GjlmHA0bPIDat33XBuf0mbNp/MSvX3oxbZMJF6Yn0LlbT1q7bj39s2jhS/6KO3zwsfSRbf8C0BEUZ8vRBIpPunxE7dq0Il8RtD1t2rQ2vyZ1VA/uGZ+As3Mltj0uVqaCFNQgUMSWmLHz5S1UTHbA/tfALK4WKFKS2MXJ4b07nerkqdNnqF6jZrESKGZNn0ZFixamZEmTUZo0qZ2qB5mNScBV310ajWnTZ9DEyVOp0RsN6JtJ47Xb+DQZAVd8d1kjatPhA9qxcxeNHjGMWrdsYUnSBIr/tm8mjk+XLl1a0wZlV7GXs4DFCQiYiAAEChMNJroCAiAAAiBgfAIqFrWqLCjGT5pM02fMkq4C2GWA9TF/wZ80aOgX4tdVn4hfWX1knYRzLyLA7p3YbcXGNSsol4gHYX306N1X+DNeQfN/+YnKl33NOumlc2fL0QQK+4LeerMZffLxh5Q7dy77JFybhICzcyW23YZAEVtSxs/34MEDKv5aRemmcM/2jS91SJsLJw7voyRJkryUrnfDGYHCugwWQ94XsSc6d+oQo7WZ9XM4NxYBV313MYVHIn5Ao2Zv07nz5+mHad9QnVo1jAUHrY0VAVd9d2mVnz13nmo3iFrv79yynjJnyqQlkSZQWG6Ik/z58tH77dtSq7ffMpXbJxV7OWuOOAcBsxBwmUBxNSTK3NksoNAPEAABEAABEFBB4EboA8qf/UVQOVfUqUqgGDR0BM1fsJCmTZlEDerVsemK9oK4U4f2NKj/ZzZpuPAeApWq15G+iu03skxA84c947upVLtmdb6lezhbzsbNW2nBn4soR/Zs5JfCj0JCQmnV6nUygCPHRln61++UNWuAbn1IMC4BZ+dKbHuqvZSGBUVsiRk339Vr16hyjboyyPC6lf+81BHtRdzenVsofbp0L6Xr3YiNQPG9EP1PnzlDmTNnJh8RP4DbwjF7+KhcqSLN+mGaU6KIXltw3/MIuOq7i61+en3Wj5avWEW1alQXAsUUWBN63vAnSItc9d3FjQsNvU1vv/ueFLkG9fucOr3/nk2b+UcnqYW7Tn//DPREuLQLOnmKNm3eIvP0/KQr9fyki01+I19AoDDy6KHt7iTgMoHCnZ1C3SAAAiAAAiBgVAIqFrWqBIpxEyfTDzNn0ddffUlNGze0GRLNgqJX927Uo9vHNmm48B4CjZu/Q0ePHaMt61dR9mzZbDquWVD89vNsqlCurE2a/UVClMMb5o8/6UUbNm2mPr26U7ePO9tXg2sTEEiIueIIAwQKR1TMee/evfsiRk4lCggIoO0b17zUSW0uuMKC4qXKxA2OhfJmy3el2PvHvLlU9rXSjrLhnsEJuOK7i8WJvgOHEP9ohOOm/DxrOqxwDD5Pomu+q767WJxo1e59KZ6+1+Zd4dq1f6wsInbv2Sue6yCbHHRoLyVNmjS65hsmTcVezjAw0FAQcIIABAonYCErCIAACIAACLiagIpFrSqBYtacX2j0l+NpxLDB1Lb1OzbofvjxJxo3YdJLPmptMuHC9AS0YOkrli6iwIIFbPqrxaBYtWwJFcifzybN/iKhymHLio6du0TrA96+blwbi0BCzRX7XmsvpWFBYU/GnNfsx91RnInIyGciBkUJh2kxkYiNBYVeGdoPAr4YMkjG1dHLh/vGJZDQ313s1qlnn34yDlTFCuVp+tTJlDp1KuMCQstjRSChv7suXrosgqt3kZYTH33Qkfr26RUrcUJrrCa8LV/yJ71aKFC7behPFXs5QwNC40FAhwAECh0wuA0CIAACIAAC7iCgYlGrSqBYuXotde3xKbFf/6/GjrTByb9UX712Hc2e8T1Vr1rZJg0X3kNgxOgvac4vv9L4saOoxZtNLR3nl3z8C+WHDx/Sgd3bYwwCm1DlaHO2ebOmNOHLUZb24MQ8BBJqrtgTgUBhT8Tc17XqN5Yv5DasWm4Ts+Z40Alq2KwFFS9WlJYsnO8UhPgIFHrz2qkGILNHE9AbY2f/v+RO3rh5kzp37SFjQDVp1JDGjxlhml+ve/QgekDjEvK768DBw9Su44dyrTZs8ABq3/Zdp3uo1x6nC/KgB1Ts5Tyou2gKCCQYAQgUCYYSBYEACIAACIBA/AmoWNSqEijCHj2i8pWjAi2uFr+C13z6X7hwkWrUayh/Ybp3x2ZsiuM/bQxbwt79B+jt1u2oTOlS9NvcWRbf6cv+XUk9en/+kiUD5/91/h9y7owUljna4Ww5fy7+m+rXqWXjyoJ/Tdqm/Qd04OBBGjNiOLVq+ZZWPD5NRMDZuaI35+yRQKCwJ2Lu6+kzZ9P4iV8T/2K432efWjo75ItR8jtq+OCB9F7b1pb7C/9aTDt27aZSwo1OuzatLPetT2ISKG7dCiGej7VrVicfHx/Lo/wLZhZFWNBdvfxvEXg2ryUNJ+YhkFDfXUePBRFbKIaEhhC72fykS2fEnDDPNImxJ85+d43+coKcK2+/9SZVLF/OUj7Hvundd4C8/nH6t1SzejVLmv3JocNHKPLZM/H9V9wmafXa9cK1Zk/i2F+7tq43zTxUsZezAYkLEDAJAQgUJhlIdAMEQAAEQMAcBFQsalUJFDwiX38zjaZ+N51y5sxBzZs2oefPn9MC8aImODiYPu/di7p07mSOgUMv4kSA50PLNu1p7779VLZMGapdqzpduxYsrSq4QHt/6v8sXyFcUvR9yX2Ks+WwiwM+3qhfT7qPingcIQLNLpfzslBgQREk+w/y9fWVefDHXAScnSt6c46psMXN8RMnJKCp06bLz+7/j6lTrEiRGIO7ywfwx5AEOO5D7QaNpSjQrEkjKlK4sBQ3OdBR/TDbAABAAElEQVQwu37iuDrp0qa19G3w8JH02+8LiH+tPnnCl5b7fDJz9hx6IMSFWzdDaP6ChfL/Sy6TjwZ16xB/J/HBL5YbN39bxr54vWIFypM7p4w/Me+332U6/3qZf8WMw5wEEuq7q/unn8mA2Eypbu1aDmF9IAIcv1YGsUwcwjH4TWe/u6rVaUCXhAg6duRweuftFz/c0ER5jsVTvGgRh1TGjhpO6dOlo5/nzafho8bI78kypUpS5swZ6djxE/TvylXyue++mUT1xXedWQ4VezmzsEI/QMCaAAQKaxo4BwEQAAEQAAE3E1CxqFUpUPCGevK335H28k7DC3FCI4FP/tXvp30HSj/YGg1+wccb1iqvV9Juyc/oXhY7U87AIV/QP/+ukC8XrSto2aI59f+8t82LRet0nJuDgDNzJbo59/mAIfTX4iUOobzbqiWNGj7EYRpumoPA2XPnxa9/e8nAsFqPWKjg766cObJrt+SnJlCw8DBp/FibtLKVqstfKNvc/P/FlInjqXHDBvKKXywOHDKcNmza/FLWQf0+pw7vtbGxrHgpE24YnkBCfHf16N2Xlon//6I7vpn0FTV6o350WZBmYALOfHdpAsWXo74gXiNphyZQaNeOPrdvWkcBWTLTnv/20Ygx44TIeswmG4sbo8X/kzWqV7W5b/QLFXs5ozNC+0HAEQEIFI6o4B4IgAAIgAAIuImAikWtSoFCwxgZGUlnzp6TgfPyvpIHL1E0MPi0EGCXYGfOnKUMGdJT9mzZLPedPYltOTwnr9+4QTdu3KIUKf0od86clCxZMmerQ34DE4jtXDFwF9F0BQRu375NFy5eFrEocspfC7u6SnZHd+P6DboVGkqZMmaU35c+PoldXS3K9yAC+O7yoMEwcFNUf3fdvXdPrLluSouxHNmzye8vA+PTbbqKvZxu5UgAAQMTgEBh4MFD00EABEAABMxHQMWi1h0ChflGCj0CARAAARAAARAAARAAARAAgRcEVOzlXtSGMxAwDwEIFOYZS/QEBEAABEDABARULGohUJhgoqALIAACIAACIAACIAACIAACHkVAxV7OozqMxoBAAhGAQJFAIFEMCIAACIAACCQEARWLWggUCTFSKAMEQAAEQAAEQAAEQAAEQAAEXhBQsZd7URvOQMA8BCBQmGcs0RMQAAEQAAETEFCxqIVAYYKJgi6AAAiAAAiAAAiAAAiAAAh4FAEVezmP6jAaAwIJRAACRQKBRDEgAAIgAAIgkBAEVCxqIVAkxEihDBAAARAAARAAARAAARAAARB4QUDFXu5FbTgDAfMQgEBhnrFET0AABEAABExAQMWiFgKFCSYKugACIAACIAACIAACIAACIOBRBFTs5Tyqw2gMCCQQAQgUCQQSxYAACIAACIBAQhBQsaiFQJEQI4UyQAAEQAAEQAAEQAAEQAAEQOAFARV7uRe14QwEzEMAAoV5xtKjerJ//37avXs3nTp1im7duiXbljFjRipQoACVK1eOSpUq5VHtRWNAAARAwFMIqFjUQqDwlNFGO0AABEAABEAABEAABEAABMxCQMVeziys0A8QsCZgKIHixJkLlrYH5sttOceJ5xA4f/48zZ8/n06cOBFtowIDA6l169aUJ0+eaPMhEQRAAAS8jYCKRS0ECm+bVegvCIAACIAACIAACIAACICAqwmo2Mu5ug8oHwTcQcDjBYq/V20i/qd3NK1XjfgfDvcTYKuJKVOmONWQnj17eo01xbPnz2nDwWA6efkeJfFNTB/UL+AUq51BN+n4xTv0dpU8lMoviVPPIrNjAs+ePaeroY8obcoklBpMHUPCXeUEVCxqIVAoH1ZUCAIgAAIgAAIgAAIgAAIgYHICKvZyJkeI7nkpAY8VKOyFiUL589gMUdDp8zbXECpscCi/YMuJ4cOHx6lefi6ulhTBtx9RyL0Iypc1NSVP6hOn+mPz0Nlr9+nx02dUKGfa2GR3mOe7ZUE0f8M5mVbslfT03ScVHObTuzl16XFasOk8LRhUjbJmSKGXzaPux2d85qw5TbNWnnLYn4qFM9H4Tq85TNt4KJjG/nGIwsIjaWavSg7HLPxxJE1adJRW7LliKSObvx/1easolQvMaLmHExBwBwEVi1oIFO4YWdQJAiAAAiAAAiAAAiAAAiBgZgIq9nJm5oe+eS8BjxQorMUJFiZYfNBz6WSdFyKF+yby2LFjY3TrpNc6dvc0YMAAveRo70/7J4h+33iOfurzOuXPlibavPFJ7DBxK525ep+2TGwQ52KaDFtHEU8jhcBQg9KmcN4CwogCRXzGZ8JfR+nv7RepeeWX3bm9kiUVNauUy2Ys7oY9oSmLj9Kafdcs92cIgeJVB6JS12930uFzt6lqsSxUVggSdx48pvkbz0pR49tu5alE3gyWMnACAqoJqFjUQqBQPaqoDwRAAARAAARAAARAAARAwOwEVOzlzM4Q/fNOAh4nUFgLDv26tbcRJvRiUPD9cdPmyhGESKF+IsfFtZN9K+Pq6ik+L8Dt2xDddXwFCv7Ffp0Bq+UL8dEdSkdXlW6atwkUA37aS2y58sfA6rpMtISH4U+p9dhNdFsIDY0r5CRfn0S0eNtFciRQBF26Sx9O3k5NhcDx2VtFtCLov5O36NMf9khB5NM3C1vu4wQEVBNQsaiFQKF6VFEfCIAACIAACIAACIAACICA2Qmo2MuZnSH6550EPEqg0MQJe6sJFiA4zdqtk30eHr7x3/0s80CkUDuZf/jhB9qxY0e8Kq1YsSJ99NFHTpXBosG10DD5q3d2z+OXzJf6tyxmcemz+8Qt+nXDWQq6dIeS+fpQkTzpqH3t/JZ0rTKOC7F4+wXafzqUuBz+9fzHDQMpQ+pkdPBsKH29+Ji0nuD8+bKllo/92Ot1+RKcL/iF9797LtNGUY5fMh+qVDgzNauYi3KLX/nz8cu6M7R631U6H/xAXnMZuTKlpBHvlaKTV+7RmN8PUduaeal2qWwynf9EPImkj77ZQRVfzUQfvREo78dVoDgn6uX+bT16gx6GP6GaJbJSm5r5aPDcfVS9eAB1qJPfUu8t4S7rr63n6b9TIXTxxgMqlc9ftqFJhVyUKJElG3HshsXCumHl3isUdPEu5RT9KSr4dm1UiNKlSkphEU+JrRSiG58XpTk++3DKdkoqYnVM6xazKywWJtp/tYUGtCom2puZZqw4Sb+sPeNQoOAYIP+dukXVRN+z+79wlXXjTji9NXID1SgRIMfGcatwFwRcT0DFohYChevHETWAAAiAAAiAAAiAAAiAAAh4FwEVeznvIoreegsBjxIoOvYeIbnPnjTUhr8mPPBNFiY0oYLP+3Z9z5LX2pLC3vrCkgknCU7gs88+o1u3bsWr3IwZM9KECROcKmPU/IN07MJdunTzIVUQL/I50HE78eKdhQGOQzBk7n5KkdyHyhbMSKH3H0uXPlzBL32rUJ7/iwcrhLAw5vfDUpioUjQLXRRl7Th2k9KLl+x/DqlB568/oAWbz9GWI9elEFLvtSgRYcA7xckncSK6INI7f7NdppUrlJGeiDgVLHTw87P7VKaMaZKJOAeXaZcQS9btvybvc77M6fyoc4OCUgD5ZNou6tHsVRn8WgPwKCKS6g5cTbVKZaXhbUvK23ERKG7eDadOk7ZJywIWRnJkTEF7hKWAf+rkklvDcjmo/zvFZPlshdBt2k4pxuQJSEXsRmnXiZuybx3q5qdO9QpozaPJQrT5a+sFypQuOZXMm16INPdkeSzwzOtXlSIjn9OEv47ojo+loGhOmguxoLiI1fFe7Xy0SYznI2GFUvyVDLI++yDhkUIweSDar7nOik6g0Kvyp9WnaPaq09S3ZVFqXD6nXjbcBwGXE1CxqIVA4fJhRAUgAAIgAAIgAAIgAAIgAAJeRkDFXs7LkKK7XkLAYwQKzXrC3vrBWnSwFi40McNeiNDKsRcvvGQ83dLNDh06JEi9c+bMcbocPRdP/AL91NV7xEICv5TnY+nOS/TVwiPUpVEgvVsjr7ynxSL4d1RtSu2XRN5btO0CrRViQrfGhahI7nTynp6Lp3e/3CxfzH/fvYKwIEgv82riiPUv8TUXTzVLBtAX7UrJfPyHLTRcKVAM/Xk/sYVI96avUsuqeWS9Dx49oc5TdrwkUGgxHz5oUEBamnBmbvfHU3dI0WKaCOrNggGLMH1m7qF0KZPSwFbFLcHJh887IEWY6T0qWrjpjY9sSDR/nj8nqvrZCikcXQ15ZJOTRZHp3SsKkSe5zX3ri9gKFHPXnpZ9PCTiURw6e5veEvEuWCxKbG0uYl0wzkFAAQEVi1oIFAoGElWAAAiAAAiAAAiAAAiAAAh4FQEVezmvAorOeg0BjxEoNMHBWoTgUdAECnvBQbOqsBco+Bm9sjgNR8IT8ESBwlEvQ+9HUNPh66lB2ezyxTrnYYsBfjE95v3S9HqRzLovph0JFFp5jcrnoH7CtZT1we6J2EXSqtF15W1XCBRPIp/R3YdPrKuV50mEWyTNkoADc7PbKfs4DjuDbtLnM/8jawsKFltYvFgyvKYNh72nQ6jX97tthJ2XKhU3NGFmoHCz1KBsDplFT6BgV1KODrY44eO+aMcbg9fK8z4iTgRbkrBVCbuqmrfurHQp9auw1NDTEWIrUFTps0LWwX/YaqSrEK/KB2aixMI6BgcIuIuAikUtBAp3jS7qBQEQAAEQAAEQAAEQAAEQMCsBFXs5s7JDv7ybgMcIFJrgkFAWFPblePcwu7b37nLxxL3SewHOaRxzYf+ZEDp99b588c6xB/hgN02DW5eQ59uO3aD+s/bKc3bLxK6iODZBucCMlMQnsbzPfxwJFBzjos+MPTJPoVxpLXn5hOMy8LFoaA3KlDa5/JU+B8lOSAsKTWSQFVn9YasPtmIIESJAsy/WO4ypwK6fmo/YYBEo2L1T/UFrqGLhTDS+02tWpZF0D8VCh3XbWXBZu/8qHb1wR7rFuhf2xBJjg+NAvBGNQKHVZVPJ/y/WjK0rLTI4z8It52W8kAqFMtlk1axCWKDIlTmlTZp2EVuBgq1BHot/bMnCFjbbxJyxD56tlYlPEFBFQMWiFgKFqtFEPSAAAiAAAiAAAiAAAiAAAt5CQMVezltYop/eRcBjBArNUoLx21tFaOIFp7ElhV4MCk6H9QRTUHu4K0g291JPoJi06Cgt3nZRgihb0J+ypPcjjlOwYs8VG4GCM1y88VAGsd5x/AZpIkbBHGno64/KUZoUUW6fHAkUHJdi4E/7ZB1sRWF9HBEv7osKoaBT/YIyDoUrLCgu33pIq/Zeta5WnmcWgkjjCjktokip/Bnomy7lbfKduXafOkzYahEo7j58TI2GrqOqxbLQ6A6lbfKyVUUDYc3AFiZfdiwjLRl6TN8lRRiO8fFagYyUXgQU3y7Enpsi0HRMAgVbfvwsAlg7OjiIua9P9NYLy3dfpi//OExD3i1Bdcu8CCxuXV5sBQrrZ+4KkaXRkCirDU0osU7HOQioIqBiUQuBQtVooh4QAAEQAAEQAAEQAAEQAAFvIaBiL+ctLNFP7yLgMQIFY9eECHt3TixecGwJTZjgvJyHrSQC8+XmS3loz8N6QiOi5nP//v00ZcqUeFXWs2dPKlXqRWyG2BbmSKDQXjTnzJSSZvd+3RIj4fKtMGo9dtNLAoV1XddCw+ibv4/T1iM3qE+LItSsYi6Z7EiguBISRq3GbJKxHTjGQ3SHnkBx5Pxt6jJ1J7Wsloe6N3lRBrej5ehN8Q6SrbX7LxHw2zpmw5w1p2nWylMWgYLbzkGpkwhxwN4d1LGLd+gjEbOiU/0C1KFOfilE9BNWJ+we6vO3i8pg4fy8FnA8JoGC88Z0sICybNclqlYsgErmy2CT/bcNZ+n7ZSeEgFSWXhMB0B0d0QkULHCcvHKPPn4jULq/sn6+x/e7ZJBzzfLFOg3nIKCKgIpFLQQKVaOJekAABEAABEAABEAABEAABLyFgIq9nLewRD+9i4BHCRSMXhMZ+NzekoKFCu2wFiasBQx7cUPLj0/XEhg7diydOHEiTpUEBgbSgAED4vTsrFWnaM7q0zSyfSmqLlwz8XHh+gNqO34LlSuUkSZ+WNZS7g//npDxCzQXTywasKUFv7jvWK+AJe7CpsPBNHjOfuoqgmS3rv6KfF4Lpr1kWE3y/3+cBA7kXH/wapn+U+/KIqBzVDButtSYKkQOjqMwSASR5ngGegKFFseC4x/88nkVS1vnrT9DPyw/GW+B4q+tF4gDhrNFyIcNCkp3UzuP36Tpy6PGyjoGxbBf9tP6A8E0rG0Jql0qyjLhmehLv9l7iZ8Z/0EZqvhqZvpHuEIaL4KNf9wwkNrUjAo2zm6SPv1ht4znYS1QOBofSyejObn94DGxWyl2V/VN1/KUVMTV4IOtObp8u1O6k1o1pg6lSObrsJToBIol2y/SxL+O2gQO50Ku335ELUZtlIG57UUah5XgJgi4iICKRS0EChcNHooFARAAARAAARAAARAAARDwWgIq9nJeCxcdNzUBjxMomLa1SBGdNYS1MMHPQZxgCu45zp8/T8OHD49T5fxcnjx54vSsZoHA8SPeEL/oZ4sHdufUdPg6GTuBxYiC2dPKGAObD1+XdWgCBV9ov5ivXDQzvV44sxAVntKcNacoLDyS5g+oSjkyRsU40AQDjjXB+d6rlU8KD+zWiK0JuP46wt1QhlTJaP3Ba9JV1DvCKuKT/1tF6AkU3IZ2X22RL9zzZUsthIGs0uUUu6Lig4NDD29bUp5PXXqcFmw6TwsGVaOsGaLEEJkQw5+v/jxCS3dcssnV+Y2CNOPfkzYWFMGhj6izCO7N4gAz4r5znI3D527LdgxtU0KKOOxaqvXYzcTunZpUyEXpUia19JkrsRYoHI1PQAY/m7boXYz5/ZB0ycXMG4qYFhFPI+X1GRFTpG2tvPSRsIDQO6ITKLh/7QVz/uS4GsVfyUAhIoD6MiG88L2+wiqEXWThAAF3EVCxqIVA4a7RRb0gAAIgAAIgAAIgAAIgAAJmJaBiL2dWduiXdxPwSIGCh4RdOvE/64MFCLac0CwprF0+RSdkWJeBc9cRiIurp7i6dtJ6wb/w5xfwy3ZdlremdClHpfP7SyuKYfMOEL/M5oPdPfVuXlj8yn8PNSibnQYKywY+2B3UlMVHac2+a/Ka/7C1QZ+3ilDhXOks926I2ArjFh6m3UG35L314+pRkv//qn/d/msyoDMHjOaDX9y/WyMvta2Zz+L+SBMorAUHmVn8CRa/3B/y835LYG0WO/q2LEYDhOVCndJZaWibKIHiWyFQ/CEEioWDq1OAEGGcOVh82CcChj+NfE4l82aglMl9ZQBtjp3RT9SlHez6aLawSuFg0XxkEtYlVYpmoW7CmkSzYuD7HH/jK2FFwS/0+eDg4jVKBNDY3w8LtsUE46iYHHrjIx+K4Q9bony3LEiKMlpWZtu6el7pakq75+jzx5Unae6aMzSzVyUZaNs+D1uusBWI1k9O5znCVibcDxwg4E4CKha1ECjcOcKoGwRAAARAAARAAARAAARAwIwEVOzlzMgNfQIBjxUotKFxJFRoafwJYcKahvvP2ZJi/vz5Mbp7YrdOrVu3jrPlhH1P+cX7UxF8OXlSH5skdrPEafzSP7qDX6Tzr+g5KHayJLZlWD8X8SSSEiVKZPOyXkt/GP6UOD29sKIQWZw+tOfTibYmjksBDmrcKoQGdl3E7q8011Sc7Y/N5+jbv4OoR7NX6e0qeV56kpndC3tMGUTw6+gOftHPwkUqv6hg4np59cZHL7/1fQ6qzW67fBInptxZUiYYG66DRRAWiHjcU8fQB+s24RwEXElAxaIWAoUrRxBlgwAIgAAIgAAIgAAIgAAIeCMBFXs5b+SKPpufgMcLFNZDoFlO8D3rGBTWeXDuGQTYmmL37t106tQpunUryuogY8aMVKBAASpXrlycAmJ7Rs+M1Yp/91yWVg3sPoqFCBZq9p4OkQGo2Y3Vr/2qUq7MUW6sjNUztBYEzEtAxaIWAoV55w96BgIgAAIgAAIgAAIgAAIg4B4CKvZy7ukZagUB1xIwlEDhWhQoHQTMR4AtFzh+xqyVp2w6x0LF2I5lZBBqmwRcgAAIuJ2AikUtBAq3DzMaAAIgAAIgAAIgAAIgAAIgYDICKvZyJkOG7oCAJACBAhMBBLyAALuP4uDW90XMDY61kDmdX5zcUHkBKnQRBNxOQMWiFgKF24cZDQABEAABEAABEAABEAABEDAZARV7OZMhQ3dAQBKAQIGJAAIgAAIgAAIeREDFohYChQcNOJoCAiAAAiAAAiAAAiAAAiBgCgIq9nKmAIVOgIAdAQgUdkBwCQIgAAIgAALuJKBiUQuBwp0jjLpBAARAAARAAARAAARAAATMSEDFXs6M3NAnEIBAgTkAAiAAAiAAAh5EQMWiFgKFBw04mgICIAACIAACIAACIAACIGAKAir2cqYAhU6AgB0BCBR2QHAJAiAAAiAAAu4koGJRC4HCnSOMukEABEAABEAABEAABEAABMxIQMVezozc0CcQgECBOQACIAACIAACHkRAxaIWAoUHDTiaAgIgAAIgAAIgAAIgAAIgYAoCKvZypgCFToCAHQEIFHZAcAkCIAACIAAC7iSgYlELgcKdI4y6QQAEQAAEQAAEQAAEQAAEzEhAxV7OjNzQJxCAQIE5AAIgAAIgAAIeREDFohYChQcNOJoCAiAAAiAAAiAAAiAAAiBgCgIq9nKmAIVOgIAdAQgUdkBwCQIgAAIgAALuJKBiUQuBwp0jjLpBAARAAARAAARAAARAAATMSEDFXs6M3NAnEIBAgTkAAiAAAiAAAh5EQMWiFgKFBw04mgICIAACIAACIAACIAACIGAKAir2cqYAhU6AgB0BCBR2QHAJAiAAAiAAAu4koGJRC4HCnSOMukEABEAABEAABEAABEAABMxIQMVezozc0CcQgECBOQACIAACIAACHkRAxaIWAoUHDTiaAgIgAAIgAAIgAAIgAAIgYAoCKvZypgCFToCAHQEIFHZAcAkCIAACIAAC7iSgYlELgcKdI4y6QQAEQAAEQAAEQAAEQAAEzEhAxV7OjNzQJxCAQIE5AAIgAAIgAAIeREDFohYChQcNOJoCAiAAAiAAAiAAAiAAAiBgCgIq9nKmAIVOgIAdAZcJFFdD7tpVhUsQAAEQAAEQAIGYCNwIfUD5s/vHlC1e6RAo4oUPD4MACIAACIAACIAACIAACIDASwQgULyEBDdAIFYEXCZQxKp2ZAIBEAABEAABELAhoGJRC4HCBjkuQAAEQAAEQAAEQAAEQAAEQCDeBFTs5eLdSBQAAh5IAAKFBw4KmgQCIAACIOC9BFQsaiFQeO/8Qs9BAARAAARAAARAAARAAARcQ0DFXs41LUepIOBeAhAo3MsftYMACIAACICADQEVi1oIFDbIcQECIAACIAACIAACIAACIAAC8SagYi8X70aiABDwQAIQKDxwUNAkEAABEAAB7yWgYlELgcJ75xd6DgIgAAIgAAIgAAIgAAIg4BoCKvZyrmk5SgUB9xKAQOFe/qgdBEAABEAABGwIqFjUQqCwQY4LEAABEAABEAABEAABEAABEIg3ARV7uXg3EgWAgAcSgEDhgYOCJoEACIAACHgvARWLWggU3ju/0HMQAAEQAAEQAAEQAAEQAAHXEFCxl3NNy1EqCLiXAAQK9/JH7SAAAiAAAiBgQ0DFohYChQ1yXIAACIAACIAACIAACIAACIBAvAmo2MvFu5EoAAQ8kAAECg8cFDQJBEAABEDAewmoWNRCoPDe+YWegwAIgAAIgAAIgAAIgAAIuIaAir2ca1qOUkHAvQQgULiXP2oHARAAARAAARsCKha1EChskOMCBEAABEAABEAABEAABEAABOJNQMVeLt6NRAEg4IEEIFB44KCgSSAAAiAAAt5LQMWiFgKF984v9BwEQAAEQAAEQAAEQAAEQMA1BFTs5VzTcpQKAu4lAIHCvfxROwiAAAiAAAjYEFCxqIVAYYMcFyAAAiAAAiAAAiAAAiAAAiAQbwIq9nLxbiQKAAEPJACBwgMHBU0CARAAARDwXgIqFrUQKLx3fqHnIAACIAACIAACIAACIAACriGgYi/nmpajVBBwLwEIFO7lj9pBAARAAARAwIaAikUtBAob5LgAARAAARAAARAAARAAARAAgXgTULGXi3cjUQAIeCABCBQeOChoEgiAAAiAgPcSULGohUDhvfMLPQcBEAABEAABEAABEAABEHANARV7Ode0HKWCgHsJQKBwL3/T1r7l5H7aELSHDl06ScF3b8l+BqTNSMVzFqQahcpSlYKlTNt3dAwEQAAE4kNAxaIWAkV8RgjPggAIgAAIgAAIgAAIgAAIgMDLBFTs5V6uFXdAwPgEDCVQnDhzwUI8MF9uyzlOPIfAieAL9M3a3+jgxRPRNqpErkDqUftdCgzAOEYLCokgAAJeR0DFohYChddNK3QYBEAABEAABEAABEAABEDAxQRU7OVc3AUUDwJuIeDxAsXfqzYR/9M7mtarRvwPh/sJbD21n/ovnOJUQ758uydVLhA3a4q7YU/oxxUnqXxgRqpcNItT9cY180+rT5GvT2JqVytfXItQ8lzEk0j6bcNZyp8tDVVRxEZJx2JRSeSz53QtNIwypE5GKZL5xuKJ2GV5/pzoSshDCr3/mLKkS05Z0vvF7kHkAgEnCahY1EKgcHJQkB0EQAAEQAAEQAAEQAAEQAAEYiCgYi8XQxOQDAKGJOCxAoW9MFEofx4bwEGnz9tcQ6iwwaH8gi0nOs0eFqd6Z3X8Ik6WFBsPBdOQufupYI40NOvT152uO/j2Iwq5F0H5sqam5El9Ynye8zb7Yr3M9++o2pTaL0mMz7grA4s3jYaspYblclD/d4q5qxlK6w29H0HjFx6hbUdvWOrNE5CKejUrTGUK+FvuOTr56s8jtHTHJZn0S98qlCdLKptsN+6E06C5+yjo4l3L/cpFM9OwNiVjNXcsD+EEBGJBQMWiFgJFLAYCWUAABEAABEAABEAABEAABEDACQIq9nJONAdZQcAwBDxSoLAWJ1iYYPFBz6WTdV6IFO6bd93mjY3RrZNe69jd07S2A/SSde+HP46kv3dcpKJ50lOR3Ol08+klTPsniH7feI5+6vO6tDTQy2d9f/Xeq+STOBHVKpXV+rbHnXubQBEW8ZRajdlEtx88pnqvZaMSr2SgG3fDacHmcxQWHkkTPnyNyhfK5HCc9p4OoV7f77akzf28MuUNSG25fiZMJz6bsYf2nAyhGiUCqLQQO9YfuEb7T4dSy6p5qHvTVy15cQICCUFAxaIWAkVCjBTKAAEQAAEQAAEQAAEQAAEQAIEXBFTs5V7UhjMQMA8BjxMorAWHft3a2wgTejEo+P64aXPlqECkUD85OSD2gD+dc+1k38qxLXoqD5wdF4HCvt2eeu1tAoVmTdOmZl76uGGgZViOX7pLnSdvp5olA+iLdi+7EmORq/2ELfQoIlK6Cftn5yWyFyiWintfCcuMRuVzUL+WUdYo7O6p8zfbpUXFt93KU4m8GSx14gQE4ktAxaIWAkV8RwnPgwAIgAAIgAAIgAAIgAAIgIAtARV7OdsacQUC5iDgUQKFJk7YW02wAMFp1m6d7PPwcIz/7meZByKF2sk5YukMWn1ke7wqrVu0Eg1t0tmpMm4Jl0ufzdxD9V/LTq2qvSKf/eHfE7Tj+E0aIV5Gz1p1knYG3aRkvj5UqXBm6tIokNKmTEr8a/uu3+6UcQr41/XZ/P3IT8Qq6C9ePhfKmZa0Mvh6tog5sePYTfqkaSF6p+or9PmP/5GfcAc14r0XL7vvP3pCf2w6T1uPXqczV+9TuUIZqVujQjR37WlRVyR99cFrsm3/7LpEf229IF6Ul6TcmV+4EDp64Q6xi6EOdfJT9eIBFgZB4uX6v3su08aDwaJ9UX1oVjEX5bZzP3ThxgNatO0ibRC/6ueD43HwL/vbjd8SJxdP/LKf/+04foPSCV7FXklPneoVoKwZUlja9kzEeVi8/SKt3HtFvqTPmSmlsGRJR11Fv9OlSirzcRyIAT/toyYVctKDR09p5X9X6NLNh7RlYgOZ3nP6bsqUNplo6ys0fVmQtFBgd10cM4NZc59jeyzadoG+XnSMRnUoRdWKvWD4JPIZ1ey7Svbhu08qvFTcDBHD5Je1Z+R4nrp6T57bCxQ85jvFnFo0tIZob3JLGfvPhFKP73bRuzXyyrllScAJCMSTgIpFLQSKeA4SHgcBEAABEAABEAABEAABEAABOwIq9nJ2VeISBExBwKMEio69R0iosycNtYGrCQ98k4UJTajg875d37PktbaksLe+sGTCSYITaDHtMwq+eyte5QakzUh/dpvgVBn8Arzl6E3Usppws9Mkys3OiF8P0Jp916To4JM4Mb0iYhBsPnxdllvh1UxSLOBfzU/46wgdu3BXvjDn+2lTJqF2NfPJl/9aGenFi/aIp5FUXLgLeqNsDunep/nIDZQyuS/98nkVWebTyOfU98co9z+ZRODkYuIl/eHzd+hh+BOZzuLI0i9qyfOfxYvwmeKFOMfL4Bfx2rH7xC3qI1wIff52Ufkyn+9fuP5A/kKfBRQWPJ48fSZdCnGbZvepTBnTJJOP3334mN6ftI1uihgJHG8hlxAK/jt1i/xTJ5d9czYGBbuwGvnbQVk2x1i4FvpIii4pkvvQzJ6vU67MKWXa5MXHpNjCfS6ZNz0FXbon62OxZ16/qpREBBLnPrQVIgm3mV0vcZ/5Bf+XHcvIMuoNWi3FI2acMnkSyY7dJnFetngY3rYUJUoks8b457qIJ9Ji1EYqW9CfxrxfxhIXggUhbmuPZq/S21Xy2JSjtU+bF5pYYS9Q8Jgn8UlEfwysbvM8z6M6A1bT60UyW/pkkwEXIBBHAioWtRAo4jg4eAwEQAAEQAAEQAAEQAAEQAAEdAio2MvpVI3bIGBoAh4jUGjWE/bWD9aig7VwoYkZ9kKEVo69eGHoUfLwxlce0yFBWrh14BynyolOoKhTOisNfrcEJRZvuB+GP6XWY6PiEyweVtPycl/PxZMmUHC8gSFtSsiX7VrD7AWKJcKKYOJfR4XVQmYa3aG0rI/d/4yaf5D4ZT+/nI+LQPHul5vlC//vu1eQMTa4fs2NEbdLs+AY8/shWrHnCnWqX0BaYHA+du/0/sStUrRwRqDgoOFvi5f8LEbM61vVYi2wQlhxjPn9MDWtlIs+e6uIFEv6CMsVtq4Y2Kq4RQwYPu8Ardt/jab3qChjgmgCALeJrRfYEsP6YIGCBRhrMYIFn57Td9Ghs7dlH7mvsT3WCQuS8QsPS9GjsIhJwv1hi5YWVXLTR28EWtqpldf7h93SamP+gGqUI2MKciRQaCIECx+TPiqnPWr55D4wB3vxwpIBJyAQBwIqFrUQKOIwMHgEBEAABEAABEAABEAABEAABKIhoGIvF031SAIBwxLwGIFCExysRQimqgkU9oKDZlVhL1DwM3plcRqOhCfgiQLFzF6VpLsmrbdf/nGYlu++bHl5zvdjEijmfFaZ8mV9ESyZn7EXKFiIWPXfVfpzcHXKkt6Ps8jjgXD71GDw2jgJFKH3I6jp8PU2MQ+0cj+csp0uCpdOq0bXlbfeGbNRxk9YIoSXxCJ4t3ZsEG6hhv6838bFEwsXbIlhf7D1CFs8aAKItSWHlpddZSVP4kMl8+nHWtCeH9iqGDUQFieaQGEtqGjl8acmUCwbWZvSpkhiSTotXC29P3GbxTKG3TTdfRhlkWLJJE6S+Ca2eW6VcDc1bWmQtMCwzscWNuyiKoVw46UdWls71M0v0/i+I4FCE8FY8BrapqT2uOWzgxCCWATR3FZZEnACAvEgoGJRC4EiHgOER0EABEAABEAABEAABEAABEDAAQEVezkH1eIWCBiegMcIFJrgkFAWFPblGH6kPLgDnujiaeXoOtIVk4Zt4Zbz9M2S42Qd0DgmgWL1mLovxUGwFyjY0iHkfrhFMNDq409NPHDWgkJz+cRlFMqVlj8sR9DFu/Kc4yHwC/f6g9ZQxcKZaHynqDgXWsaLNx5Sm3GbbQQKFje057V8/Dmxc1kqF5hRxt6Yt+4szehZkV7Nlc46y0vnbFmwdv9V4vgZF0VciXtC/Dgf/EDmGyAECnaJpQkU7Wrno84NCr5UBgsU7Npp0ZAaNmla3IjiwnXUtG4VZByRz2f+Z5OHL4oIKwm21uCDLTfYgoPdXLGVR/5saejW3XD6Y/N54sDXVYtlkRYunJfb/q5g81SINQsGVbdYVjgSKB6LPLX6raJS+TPQN13K8+M2B/eB3Wn91r+qzX1cgEB8CKhY1EKgiM8I4VkQAAEQAAEQAAEQAAEQAAEQeJmAir3cy7XiDggYn4DHCBSapQQjtbeK0MQLTmNLCr0YFJwO6wmmoPZwV5Bs7dftjmJQ2IsLWiyChBYouk3bKd0RrRtXj5KKX/RrB7t5qj84KsaCswLFliPXaaAILs1Ho/I5tCLl5xEhCBQVL+Y71S9IvsJiovGwdTYv37XMGhtrF09/77hIHFjc/qhXJrt0cfTt0uMy2Pes3iJGRvYXMTLs8z8Sgb97CDdMLHawO6jXCmSk9KmT0fZjN6RbKWcECkcv9zkAd7XPV0pxZmbPSnT51kNaJdxl2R+ZRTyLxiIANx+a+GLtwkvL3+P7XTJ+x0IhRgRk8KPfN52Tlhb8bE0rF1IcxHz9gWDqIwSOnCLWRpn8/rIIFpqeCNdT9kIKc6g7cLV07zVWxL3AAQIJRUDFohYCRUKNFsoBARAAARAAARAAARAAARAAgSgCKvZyYA0CZiTgMQIFw9WECHt3TixecGwJTZjgvJyHrSQC8+XmS3loz8N6QiOi5nPLyf004M8p8apsbIueVKVgKafK0F7Cu1Og+G5ZEM3fcI7Gvl9avKjOYmn/vtMh1PP73TYunuatP0M/LD/5Ul4tMLXmWulKSBi1GiOCf1cVwb+bRgX/thRsd9JECBR+yXxeioHALpnY6sBaoLB79KXL1ftEgOxfD9IQEbujbplsNul3ROBqHxEoOrVfEilE9Ju1V5bNbfb5v2spLVaFMwIFx6BYM7auxYqBK718K0zGDNFiXtg0ROeCLRn4WDmq7kuBtb8WQbIXiWDZU7qUo9JCdNCudYqSt1l40dxo9Z31H+04dlMwriaCr6ewPLZLMP5MMG5bK6+McWFJwAkIxJOAikUtBIp4DhIeBwEQAAEQAAEQAAEQAAEQAAE7Air2cnZV4hIETEHAowQKJqqJDHxub0nBQoV2WAsT1gKGvbih5cenawl0mzeWDl48EadKSuQKpGltBzj9bHwFilmrTtGc1adpZPtSVL34i2DMWpBseysMbqC9i6fjl+5S58nbpSVBn+ZFqYCwPDh77T6N//OwDABtHSR70+FgGjxnv43w8EyYWnz+43+0O+gWaQKFZn3B9f3Uu7LlpXiksCyY+vdxui/iWwwSwak55sSAn/bS1iM3bEQFjjPRVVh2sIWDMwKF5hYqm78fzRb1pkweFbPhwJlQ6v7dLtJiSbDLpPELj9DHDQOpTc283ExiV0ifiqDTHNzaWYHC/gW/FtdD4yEriOHPsF/2S+sH+7Fki5GOIk7EbSGwaELIGTE+14QIZH8sF8HAmSWLQrmEBUWFQplkFo5tMeq3Q1TvtWw0uHUJeY/HreOkbTL+xA/CJVbhGFxi2deFaxCIjoCKRS0EiuhGAGkgAAIgAAIgAAIgAAIgAAIg4DwBFXs551uFJ0DA8wl4nEDByKxFiuisIayFCX4O4gRTcM9xIvgCdZo9LE6Vz+r4BQUGvLCEiW0h8RUojpy/TV2m7pRWDm+Uy0HNKuaSLoCcESi4rZoFhHW7+WX2aRE8OVS8INdcPN0UMRGaj9ggs3HciJJ5M8j4CvtPh8p71i/k2V0SWymwwFFHWDNkSJWM1h+8Ricv36N3RNDnT5pEWVawqPDhlG1SDGlQNjsFiEDd28Sz7BaJrROcESi4EXPWnKZZK09RzkwpqXaprPLF/up9V2RZHO+B4z5w2a3HbpaiTJMKuShdyqSWtnEZzggUnJ/bWbloZgrMkVYIHKG052TI/9q7DzCpqvOP46/03ll679KkF0EEBCTBEoPGoAYiVlAw8hdExIYGRVFRiRqNihLFGrGDIF1AFKRJ731h6b35P+9ZzzAzbptlyt2d730edmfunHvuuZ+7zxNzf3POa5d30voT/stmadvUNhei6Odqr0HR3oMnRWd1aDjx147VpF+Puqkdbve/+tUq0Roc4+5rJ9XLniuOroGRzqKYt2K3XFw/QVrULiXfmtkmWn+jV8fqcmePOmn2y4cIhCoQjf+oJaAI9a7QHgEEEEAAAQQQQAABBBBIWyAa/18u7RHwKQJZU8CTAYVS6pJO+s9/0wBCZ064mRT+Sz6lFWT498HryAnMXrNI7v8wtKWenrx2oLSrFdrSTu4Kdu47Jtc+Pj3ggf2IdxfbwMB9W961TakGhdY6ePqjZfLF/K22mVsCKLU+tJHOoChSILe8Naid69r+1lkNGjRoCKEPxxtVKy5aQPv4qTMBtQu0APaTHyy1tRr0QH3gr+HI02ZGwuDrGsgVrZJrKuhnWvhZi3vrg3DddNkhfSB+Y6cavmWVdP9Pa5JEZ4Ms3bBP39qH+wNMgNHvpXm2hsWQ6xra/Rn5oQ/j3566Vib9uF22mOLXumlYoeFJkxolfF1onQwdsz781611vdJ2hsXICUvlAVMku7spku1mZPTuUkNuMTUzgjddlqlyQiG5wVzTsx8v9/XVoVEZGXj1hVLa1JgIZdOZEaNNP85Bj9XZIH81/Wv4lN7mAop3BreXqmUKBTRP3H9cho1bGFBkXEOVh2+4KGB5qoCDeINAJgWi8R+1WxMPSKUyxaVg/jyZHCWHIYAAAggggAACCCCAAAIIOIEjx07Kll37pGJCUbeL3wggkEEBzwYUbvwpBRXuM/1NMOGvEfvXOpPihSnvprvcky7rNOCyXpmaORHuqzxtCiCfPnM2Uw+adUbDUjMTQ4OGupXO/Y/QSrP0061m6Sd9iJ1SAWWt6aBLNGnYkd525PhpOWGCjuJmFsUFF6TeWtudNqFL0Qz0mXov5z45cOSkmcGQ09a4OLc38NXeQyfsLIdC+dO/jsAjzUyH3wIKLYStwcieg8ftbIzcfsXGg4/JyHu9nxpe6ewTt0xVRo5Lr40u67R191HZb1wSiuWzs1XSO4bPEciMQDQCiv2HjokuG1el3LngMTNj5RgEEEAAAQQQQAABBBBAAAGRTTv22i+TFiucHw4EEAhRwPMBhf/1uJkTus+/BoV/G157Q0ALZ09buUCWbFktOw/ssYMqW7SUNKpUWzrWbRFyQWxvXNXvR+FmCejshr7daksVU7tg5dYDvhkIbjbB749kj39AgQYCCJwTiEZAoWfTWRQlixaU0sUDZwydGwmvEEAAAQQQQAABBBBAAAEE0hPYve+wJB04wuyJ9KD4HIFUBLJUQJHKNbAbgZgKTF+yU0a+v8TWUvAfyKCe9TO0tJD/MfH0moAinu421xqKQLQCilOnz8iuvYfNLKncUqpYIZZ7CuUm0RYBBBBAAAEEEEAAAQTiXkCXddqz/7AcO3FKypQoJLnNKhRsCCAQugABRehmHIHA7wROnT4r25OOyi5Tq0CX/9G6DTnNEk5sqQtokfNcOXOEXGsi9R75BIHsIRCtgMJp6XJPh81/WB8/ddrt4jcCCCCAAAIIIIAAAggggEA6Avly55JCpq4fyzqlA8XHCKQjQECRDhAfI4AAAgggEE2BaAcU0bw2zoUAAggggAACCCCAAAIIIIAAAgj4CxBQ+GvwGgEEEEAAgRgLEFDE+AZwegQQQAABBBBAAAEEEEAAAQQQiJoAAUXUqDkRAggggAAC6QsQUKRvRAsEEEAAAQQQQAABBBBAAAEEEMgeAgQU2eM+chUIIIAAAtlEgIAim9xILgMBBBBAAAEEEEAAAQQQQAABBNIVIKBIl4gGCCCAAAIIRE+AgCJ61pwJAQQQQAABBBBAAAEEEEAAAQRiK0BAEVt/zo4AAggggECAAAFFAAdvEEAAAQQQQAABBBBAAAEEEEAgGwsQUGTjm8ulIYAAAghkPQECiqx3zxgxAggggAACCCCAAAIIIIAAAghkToCAInNuHIUAAggggEBEBAgoIsJKpwgggAACCCCAAAIIIIAAAggg4EEBAgoP3hSGhAACCCAQvwIEFPF777lyBBBAAAEEEEAAAQQQQAABBOJNgIAi3u4414sAAggg4GkBAgpP3x4GhwACCCCAAAIIIIAAAggggAACYRQgoAgjJl0hgAACCCBwvgIEFOcryPEIIIAAAggggAACCCCAAAIIIJBVBAgossqdYpwIIIAAAnEhQEARF7eZi0QAAQQQQAABBBBAAAEEEEAAASNAQMGfAQIIIIAAAh4SIKDw0M1gKAgggAACCCCAAAIIIIAAAgggEFGBiAUU25MORHTgdI4AAggggEB2FEjce1hqViiZHS+Na0IAAQQQQAABBBBAAAEEEEAAAQQCBCIWUASchTcIIIAAAgggkCEBZlBkiIlGCCCAAAIIIIAAAggggAACCCCQDQQIKLLBTeQSEEAAAQSyjwABRfa5l1wJAggggAACCCCAAAIIIIAAAgikLUBAkbYPnyKAAAIIIBBVAQKKqHJzMgQQQAABBBBAAAEEEEAAAQQQiKEAAUUM8Tk1AggggAACwQIEFMEivEcAAQQQQAABBBBAAAEEEEAAgewqQECRXe8s14UAAgggkCUFCCiy5G1j0AgggAACCCCAAAIIIIAAAgggkAkBAopMoHEIAggggAACkRIgoIiULP0igAACCCCAAAIIIIAAAggggIDXBAgovHZHGA8CCCCAQFwLEFDE9e3n4hFAAAEEEEAAAQQQQAABBBCIKwECiri63VwsAggggIDXBQgovH6HGB8CCCCAAAIIIIAAAggggAACCIRLgIAiXJL0gwACCCCAQBgECCjCgEgXCCCAAAIIIIAAAggggAACCCCQJQQIKLLEbWKQCCCAAALxIkBAES93mutEAAEEEEAAAQQQQAABBBBAAAECCv4GEEAAAQQQ8JAAAYWHbgZDQQABBBBAAAEEEEAAAQQQQACBiAoQUESUl84RQAABBBAITYCAIjQvWiOAAAIIIIAAAggggAACCCCAQNYVIKDIuveOkSOAAAIIZEMBAopseFO5JAQQQAABBBBAAAEEEEAAAQQQSFGAgCJFFnYigAACCCAQGwECiti4c1YEEEAAAQQQQAABBBBAAAEEEIi+AAFF9M3j4ozztv0iczYvkeW7N0ri0X32mhMKFJf6pavKxZUbSesKF8aFAxeJAAIIhCpAQBGqGO0RQAABBBBAAAEEEEAAAQQQQCCrCmSpgGLVuk0+5zo1qvhe88I7Auv2bZPXFn4hyxLXpzmoBgnV5damPaRG8QpptuNDBBBAIN4ECCji7Y5zvQgggAACCCCAAAIIIIAAAgjEr4DnA4qJk2aI/kttu6pbB9F/bLEXmG9mTYyYNU7k1wyO5QKR4e17S6sIzKaYv3K3/LJ5v/RsX1UK58+dwQElN/v0+81y4vQZ+csl1UI6LtqNpy3eKYvWJclt3WtLoRCvMdpj3ZR4WL77eYe0b1BGapYvEvLpj588I9uSjsqJU2eketnCki9PzjT7SNx/XJIOHpdKpQuma3P27K+yfe8xKVowd8h/K2kOgg8RyKQAAUUm4TgMAQQQQAABBBBAAAEEEEAAAQSynIBnA4rgYKJuzaoBuCvXbgx4T1ARwBH1NzpzYuCkFzIeTrgRmpBiTLcBYZ9J8dJnK+T9GRvl/Qc6SPmSBdzZMvT7pqdnyZHjp+WT4R0z1D5WjXo9OVO27D4io25pJm3qJcRqGBk67+xlu2TomwtleK/G0rVZ+Qwd4xp98+M2ee5/y+Xo8TNul3RrXl4GXdNA8ucNDCqmL9kpL0xcIbtNQOG2P7asKHdfVU8K5svldtnfGno8+8ly+XrBNt/+8iXzy6A/N5CWdUr59vECgWgLEFBEW5zzIYAAAggggAACCCCAAAIIIIBArAQ8GVD4hxMaTGj4kNqSTv5tCSli9Wckcv/UV9Nd1im10elyT092vj21jzO1Px4CiqUb9smKLQfkqjaVJG/uwAf1mUKL4EGZDSh0FsztY+ZKgXw5pUerSlKzXGH5zswcmbdit7SoXVKevb2lb9Q6o+ShtxfZttd3qCZliueXmUt3yZzlidKwWnEZ27+1XGACMbf1e2meqOElDctICxNI7D98Ut6bvt4GIS/1byWNq5dwTfmNQFQFCCiiys3JEEAAAQQQQAABBBBAAAEEEEAghgKeCyj8A4ch/XsHBBOp1aDQ/U+NHWcZCSmi/9ekBbEfn5nsn9mzP3hJ77AWzo6HgCKz1rE4LjMBxbETZ6TP6FmyPemYfDCsg5QrkTwT5lezhNgDb/0ks5clyisD2kj9KsXsJbkZJW8Outi3jJS2HfXhUvli/lZ57G9NpGPjsrbtShPs3Pr893JV28ryf3+u7yP5cfUe+cerC+SadlXkH3+ikLsPhhdRFSCgiCo3J0MAAQQQQAABBBBAAAEEEEAAgRgKeCqgcOFE8KwJDSD0M/9lnYLbqOGof71t2xBSRPcvavTcCTJt46LzOmnHqk1kUJvrQ+5DH0BPnLdZZpqlfRasTpLaFYtI78tqypINe1Nc4kmXAJq2eIfMMzUqKicUkrb1Ssu1pk6Ffw0Ht8TT8+bb+S9/uVIWrk2ShGL5bf2E60zbYoXyBIxzz8ET8vHsjfLjmiTZbGotNKlR0iy5VFqubF3Z9439HXuP2iWOrmxdSXKYr/F/uWCrrNx8wDde/Ra///bDqj3y32nrZeWW/ZI3V06pX7WYva66lYr6mr317VrR63mpXys7fv9zHD52WnRpJF0Catbo7r5j0nqR3nVsNzUgHnhroVzevILoDAW3vWiWVPrJGD15czMpa2Yt6JZkTAa9tkA6X1RObupcw4QJoS/xtGjdXhnwr/ny96415eZutdzp7G+tRaHLcOl9y5Mrhz3f1Y9+J20uLC2j+jYPaLtuxyHp88zsgM9Wbz1o7tce6dCorFTwWwJMa1f8ecQ0G2RooMGGQCwECChioc45EUAAAQQQQAABBBBAAAEEEEAgFgKeCihuvvcxa/DGsw8FWLjgQXdqMOGCCn09uN/ffG39Z1IEz77wNeJF2AVu/uxJSTyy77z6TShYXN648v6Q+5gwY4OM/WylPa5dgwTZYYodr9t+yNSdyG+/ee9fg2Lqoh3yyPif7RJALWqXkk2JR2TjzsPSsm4p+1A7Z47k9X80oEjcf8wGA9pxk5olZOnG/bauQdNaZlmh21qIa6sPyfuPnWfPWbVsIalWppDMX7XbLhPUxzxY7/vbg/VNuw7LjaNmSeli+Ww/F9dPsEWf9fy6+c8E0NBh+LjkpYp0nHsPnbRLEWm7dwa3l6rmHLo99UHyzIAvHutsCjznEXeO4iZA2WeWK9KwpnTRfDY4sAek8SMj16HFpLsP/9ZcY2E7Xu3ujNn3B7NP60Pc/5eGovUedHPXMPwGU3OiaflMBRSfz9tiZj8sk3/d1Vry5M4hc82yThqiNKpaXPQ+lCqS155Lf2w0vjcZX61voXUu/Ldd+45Jz8en27+J9x+41P+j371+c/IaeWPSWhl8XQO5wiwpxYZALAQIKGKhzjkRQAABBBBAAAEEEEAAAQQQQCAWAp4JKNzsieDZD/6hg39w4cKM4CDC9RMcXsQCN17O2WPCkNCLYwfjmGzgi+ufCt6b5vute47KX0fOEH0g/+agdlLytwfW78/cIC9NTA4tUiU8wgAAJeFJREFUXECx+8BxueaxafYh9asD2vpmQTz/v1/M7IdNcu+fL5Q/ta1iz6cBhQYH/mGEztR4cNxCW9PgHrP0z5/NEkC6PfPxcpn4/Wa5pXstO8NB92nx5TtenGtDi7Hm4XojU//AhQf6+ev/aCt1KibPhHjfBCwvmYDlTxdXlnuvSV5qSMe0ZvtBGfqXRlKxVPKyRp+Zh/VPm4f1d/aoI706VtduUg0o9DN9qK91FzK6ZfQ6Rry7WCb/tF0m/7OrLVC9fNN+ueOFufY0nS4qK4/elDzrQGdVfDBzo3z6cCd7XzIzg2Ls5ytlwvQN0rtLTRlnZov4b1qT4sV+raV2hSJ291lzg7o/+K0NlSaYwugF8p4riD1x7mZ55qPltt30py/3hUuuv3FT1tp7tsTUo1iyfp+9twOurmdnurg2/EYgmgIEFNHU5lwIIIAAAggggAACCCCAAAIIIBBLAc8EFC5w8A8hFMYFFMGBg5tVERxQ6DGp9aWfsYVfIFYBhZsREfxtdw0Tbnhqpl3eyAUUM5bulAffWmS/Xa/fsnebzhy4fNi30qVpOXnohovsbhdQ/HfIJWYZqIKuqf32/p/MMkKdm5STR25Mbqt1Dw4fOyWfPtIp4IG2Lnl0z8s/+AIFF1DozAldCsltumzSX/45w87iGH1rC7f7d7/3HjohVz3ynXRvUUEeuL6R/Ty1GRRaZ8F/eaJTZ87KgSOnftdnbrM0UtECue3+jF6HhhMaUjx/Z0tpVrOk6MP9d81SVFe1qSwaBHw1oosNAPqMnm37fcsER7qlFFDobIiUNjczYuibyXUmtM11HaraZaW0GPi3C7eLhjgaUnwyvJMUzJccRjxn9n1iwiad8XL7H+rYYGS+WcrLhRPaz7cju0q+PIEFxdsP+lo/spvOgulnQqBWdUpLjt9m1LjP+I1AtAQIKKIlzXkQQAABBBBAAAEEEEAAAQQQQCDWAp4JKFzgEK4ZFMH9xBo6O58/Vks8vfLlKvnvd+vl3wPbSL3KyYWSnbP7pr8LKF7/ZrX5Fv46+3HdyufqOOgOrQWhszA+e7Sz/dwFFDPMt+2DH1J3GzZZipnllHSpIBdupFT3QJdYuvLhqeJmFbiA4oZO1eWOP9ax53E/9AG5PlR/4c5WbpfMXp4oi9YlyVqzXJUGIFozQbduzcvLg39NXsIotYDipstqyG3da/v60nob9732o++9e6HFpXVpqVCuw81E6Xt5LeljZjbo7IkyxfPJNRdXkbvGzrf9VSpdUP44fIrc2Lm6DQr0fMEBhTunG4v/bxci/HPCEvl6wbYUl2Z68TMzQ2PGRhltlttqWaeUPfz0mV9l+NsLzbkS/buz90DDkC1mSS93j/0bnDp9Vk6af4vX7xWdqTLH2AcXz/Zvz2sEIi1AQBFpYfpHAAEEEEAAAQQQQAABBBBAAAGvCHgmoHAzJRQmeFaECy/0M51JkVoNCv2c2ROqEN0tVkWy3UPqNwddLDXLJy/1467cPbx3AYVrq+FEzXKFXTP7e5lZpqilqfVw91X17HtXg2LSE10D2ukbDR100wfdB46clB4PTRUtcP1En6Z2v/uhoUL3B6eImzHhAorg8EDbBwcUz36yXP43Z7PtqkXtkiYAyG9rPejD+swEFFv3HJFJZuZD8JZg6lNcYYp2h3Id2of6aKCj1/wHc40PXN9QLjN1JjoNnmSXutJllwa//pOMMbMsmppZFroFBxQ6q+PtKcmBkW3g90OLnOfKeYGdnfH612tEa4uM/Pu5WSfadO6KRHsOF5S4w7VOhi6PtXDtXjltznGhCa6amXoVGizVN6+fNYXP09oOHD0lPUy4opsLStJqz2cIREKAgCISqvSJAAIIIIAAAggggAACCCCAAAJeFPBMQKE4LogIXs5JwwutLeGCCW2rbXSWRJ0aybUA/I9n9oRqRG+bt+0XeXzmuPM64YOX9JbWFS4MqY+vF2yVf05Yapdb0mWX/Le+z82xsw5cQPHNj9vkifeW2IfqGiiktbkZFJ881NEWmXZtXejg/8D8mhHTJLd5mB5cfPmXzfvl9jFzxT1Az2hA4R6Q6yyEN+692Lcckau3kZmAwo0/rd8ZvQ7tw81cefjGxvLo+MXinB56e5FdBqtx9eIyfup6mfpUN8ljlpHSLTigsDvT+TFl0Xbbv/9MDHfIV+bejzT33i3vpct6bU48LHnN8k1lTaDjv60w9+I2cy+0bojWD9Htyx+2yuptB+UOsxRU/ryBSz4NeHm+LDIBh7su/754jUA0BAgooqHMORBAAAEEEEAAAQQQQAABBBBAwAsCngooFMSFFPo6eCaFBhVu8w8m/AOM4HDDted3ZAXun/qqLEtcn6mTNEioLk92vj3kY/UBc99n54jWDXhtYFvfw3z3MFw7dAHF+h2HpPczs23h6OdubyFay0A3XfrnJbNcUA0zq+KmzjXsPhdQ9GhVUYZc19Du0x+uaLN/QeyH31kk3/28U/Rh/WVNkmtb6Lf4h7zxk8xbsVtG3dJM2tRL8BXJTm8GhQsyWtYtJf41KV79apV96B+pgCKj16EOP60x9TVe+UE0RMlpwpl37muvu8WFBuVL5pcqZQrJqL7N7X794e7J8F6Nxb8GiK9BCi90GahrRnxnC1//x4Q1pc2MD92Onjgtd/9rvg2gxg9pL1USComa//XJGbI96Zh8+OClvpBCi2f3e3GeaDFvXc5Kl7XS7VNT2Hy0KXCus2auu6Sq3ac/du07Jj0fn57islK+RrxAIMICBBQRBqZ7BBBAAAEEEEAAAQQQQAABBBDwjIDnAgqV8Q8p0poN4R9M6HGEE6oQm23dvm0ycNILIuab7CFtF4iM6TZAahSvENJhrrE+ZNaHzRpSXNqorOzef9x+O14LKB89fsYXUGh7983/GuUL27ZnzEPtieZYrRcx8u9NzVJCyTMrXEChx2hQ0LBqcVm19YCtbaAP5V+7p62vMPPOvcfMt/O/t31oeFCxVEH5YdUeWbphny2m/dANjW3xbBc8pBdQ6EyAqx6Z6uuvdoWitjbCzKW7dDiZWuLJHpjOj4xeh3Zz/OQZ6TJ0su3Rf3aDe7ivH/zjmgttXQrbyPzITEChx35qCm+P/mi5lC6WT/7UtrINoXQ2jNbk0ILhQ//SSC4wf0O6uVkyuvxUL1PrQ5d4+uqHbbZYep+uNaVvt1rJDc1Pvee9zVJV+lvrhDSqVkKSTCHyL0wNCt03+NoGdvkr3wG8QCCKAgQUUcTmVAgggAACCCCAAAIIIIAAAgggEFMBTwYUKqJLOuk//00DCJ054WZS+C/5lFaQ4d8HryMnMN8s9TRi1riMhxTmwfLw9r2lVYhLO/lfgRY4funzlTLt5x32wbJ+dtsfastBU0tgwvQN8uEw8236EslL/ui36d+cvFamLtphH1prW/22/5096trAQt/r1mf0bHv8sOsbyROmULOGHrppPYl+pm3lhIL2vfuhMznemLTGFlfWffowvb0JO/pfUde3xNFmU6D5hqdmSu8uNeSWy88VsNb2WoOiqamTMOaO5PoIGmY8PP5nWWcKZOumoci95oH/P15dYB/KP2DGpduoD5fJ5+aB+hcjLpOiBXKbJY5SP4c9IJ0fGbkO18V9r/9oZ4i80K+VNKlRwu229Sk27jws4webmQ1mFoXbtOj3UDOrZLgJbLqaehWhbG4pL3eMhk/dm1e0sx9y5vgtnfjtw+C2uluDjPtM4JA7Z/JyU66fvSaQUEMtiu02tb7VFBjv2Lis28VvBKIuQEARdXJOiAACCCCAAAIIIIAAAggggAACMRLwbEDhPFIKKtxn+ptgwl8j9q91JsVrC79Id7knXdbp1qY9Mj1zIvhKNXxIMss1FS+U1xZYDv48+P1+8y15XZ6ocP7cwR8FvNcZDfsOn5ACeXP5lpAKaOD35vSZX02wcVJKFM7rtzfzLw+ZQtvap84IiOYW7usIx9i1qLYGMLpVNcFHcDDhfw6dGbNl9xE5ZEKqCqUKpHs/tP1Os7RTERPypPf34H8eXiMQKQECikjJ0i8CCCCAAAIIIIAAAggggAACCHhNwPMBhT+Ymzmh+/xrUPi34bU3BLRw9pzNS2T57o2SeHSfHVRCgeJSv3RVubhyo5ALYnvjqhgFAgggEHkBAorIG3MGBBBAAAEEEEAAAQQQQAABBBDwhkCWCii8QcYoEEAAAQQQiJwAAUXkbOkZAQQQQAABBBBAAAEEEEAAAQS8JUBA4a37wWgQQAABBOJcgIAizv8AuHwEEEAAAQQQQAABBBBAAAEE4kiAgCKObjaXigACCCDgfQECCu/fI0aIAAIIIIAAAggggAACCCCAAALhESCgCI8jvSCAAAIIIBAWAQKKsDDSCQIIIIAAAggggAACCCCAAAIIZAEBAooscJMYIgIIIIBA/AgQUMTPveZKEUAAAQQQQAABBBBAAAEEEIh3AQKKeP8L4PoRQAABBDwlQEDhqdvBYBBAAAEEEEAAAQQQQAABBBBAIIICBBQRxKVrBBBAAAEEQhUgoAhVjPYIIIAAAggggAACCCCAAAIIIJBVBQgosuqdY9wIIIAAAtlSgIAiW95WLgoBBBBAAAEEEEAAAQQQQAABBFIQIKBIAYVdCCCAAAIIxEqAgCJW8pwXAQQQQAABBBBAAAEEEEAAAQSiLUBAEW1xzocAAggggEAaAgQUaeDwEQIIIIAAAggggAACCCCAAAIIZCsBAopsdTu5GAQQQACBrC5AQJHV7yDjRwABBBBAAAEEEEAAAQQQQACBjAoQUGRUinYIIIAAAghEQYCAIgrInAIBBBBAAAEEEEAAAQQQQAABBDwhQEDhidvAIBBAAAEEEEgWIKDgLwEBBBBAAAEEEEAAAQQQQAABBOJFgIAiXu4014kAAgggkCUECCiyxG1ikAgggAACCCCAAAIIIIAAAgggEAYBAoowINIFAggggAAC4RIgoAiXJP0ggAACCCCAAAIIIIAAAggggIDXBSIWUGxPOuD1a2d8CCCAAAIIeE4gce9hqVmhpOfGxYAQQAABBBBAAAEEEEAAAQQQQACBcAtELKAI90DpDwEEEEAAgXgQYAZFPNxlrhEBBBBAAAEEEEAAAQQQQAABBFSAgIK/AwQQQAABBDwkQEDhoZvBUBBAAAEEEEAAAQQQQAABBBBAIKICBBQR5aVzBBBAAAEEQhMgoAjNi9YIIIAAAggggAACCCCAAAIIIJB1BQgosu69Y+QIIIAAAtlQgIAiG95ULgkBBBBAAAEEEEAAAQQQQAABBFIUIKBIkYWdCCCAAAIIxEaAgCI27pwVAQQQQAABBBBAAAEEEEAAAQSiL0BAEX1zzogAAggggECqAgQUqdLwAQIIIIAAAggggAACCCCAAAIIZDMBAopsdkO5HAQQQACBrC1AQJG17x+jRwABBBBAAAEEEEAAAQQQQACBjAsQUGTcipYIIIAAAghEXICAIuLEnAABBBBAAAEEEEAAAQQQQAABBDwiQEDhkRvBMBBAAAEEEFABAgr+DhBAAAEEEEAAAQQQQAABBBBAIF4ECCji5U5znQgggAACWUKAgCJL3CYGiQACCCCAAAIIIIAAAggggAACYRAgoAgDIl0ggAACCCAQLgECinBJ0g8CCCCAAAIIIIAAAggggAACCHhdgIDC63eI8SGAAAIIxJUAAUVc3W4uFgEEEEAAAQQQQAABBBBAAIG4FiCgiOvbz8UjgAACCHhNgIDCa3eE8SCAAAIIIIAAAggggAACCCCAQKQECCgiJUu/CCCAAAIIZEKAgCITaByCAAIIIIAAAggggAACCCCAAAJZUoCAIkveNu8PetbqRTJt5QJZsmW17Dywxw64bNFS0qhSbelYt4W0r93E+xfBCBFAAIEYCBBQxACdUyKAAAIIIIAAAggggAACCCCAQEwEslRAsWrdJh9SnRpVfK954R2BVTs3yQtT3pXFm1elOajGlevIgMt6SZ2y3Mc0ofgQAQTiToCAIu5uOReMAAIIIIAAAggggAACCCCAQNwKeD6gmDhphui/1LarunUQ/ccWe4HZaxbJ/R+OCWkgT147UNrVytxsiuMnz8h709dLrfJFpF2DMiGd93wbr952UP43Z5Nce0lVqV628Pl2x/F+AvNW7pYVm/fLte2rSqH8uf0+Ob+Xv/4qkrj/mOTOlUNKFM57fp1xNAIRFCCgiCAuXSOAAAIIIIAAAggggAACCCCAgKcEPBtQBAcTdWtWDYBbuXZjwHuCigCOqL/RmRN933g4U+f9z82PZmomxYEjJ6XHQ1OlR6uKMuS6hpk6d2YPeu5/v8gnszdJr47V5c4edTLbTVwft3PfMUk6eEJqlCss+fLk9Fm8+NkK+WDGRvlgWAcpV6KAb39mX5w6c1Ze/XKVfD5/ixw9fsZ2U7xQHrmhc3X5yyXV0uz26Y+WyWdzt9g27wxuL1XLFEqzPR8iEA4BAopwKNIHAggggAACCCCAAAIIIIAAAghkBQFPBhT+4YQGExo+pLakk39bQorY/cn1Hz8y3WWdUhudLvc09sahqX2c6v5YBhQ79x6TqT/vkC5Ny0tCsXypjpEPUhcY+/lKmTB9g7w56GKpaWbBuC3cAcWwtxbKzKW7pGG14tL5onJy8vRZ+XzeFtmy+4j07lJDbrm8tjt1wO+f1ibJPS//4Ns37r52zJbxafAikgIEFJHUpW8EEEAAAQQQQAABBBBAAAEEEPCSgOcCCv/AYUj/3gHBRGo1KHT/U2PHWVdCiuj/eWlB7KEfhba0U/AoR/YcGHLh7FgGFMHj533oAtEIKHSGxtWPfif1qxSTsXe1lpw5LrADPXL8tPx15Aw5cfqMfDWii2+/uwpdPqz3M7Pk2IkzdvkwDTQIKJwOvyMtQEARaWH6RwABBBBAAAEEEEAAAQQQQAABrwh4KqBw4UTwrAkNIPQz/2Wdgtso6Kh/vW3bEFJE98/rsc/+LZOXfX9eJ+3aoK08dOVtIfWRUkChS/Ks235IXhnQJqCvT0y9iM/MQ+aRf29qlw3asfeoDH1zoVzZupLoEkATv98iSYeOS5t6CXLP1RfK3sMn5OUvVsm8FbulfMn80rFxObmte23J8dsD7rkrEuXVr1bLQNO2SY0S9lwDX/lBShfNK39sWUnembJWFqxOssf2aFXJLgXlHo5r49NnfpWPZm80bfbIDyv32G/3t6xTSnq2q+Kru3D27K9y83NzpFmtknL3lfUCrufx9xabegrH5YU7W/n2r9txSF7/ZrUs37jfPnhvVK2E3NytltSrVNTXJr0XM5bulDcnr7Xnm7Zkp8w0/3S7sk1l+XvXmjLXeLw5eY2s3npQ6lYuKte2qypdm5UP6HaPCQU+Ntf245ok2Zx42PiUNK6ljXVlucDkA0dPnJZ+L80TvQe65JL65s+bS+43y3TVNWN1Myj+fU9b24/65DJ1I5obh7uMQ5ECGa9LsXb7Qfn76DnSs30Ve6/8B6r3a6EZ4zdPdJGC+XL5fyT//nq1uYfr5LG/NZE1pg99TUARQMSbCAoQUEQQl64RQAABBBBAAAEEEEAAAQQQQMBTAp4KKG6+9zGL88azDwUgueBBd2ow4YIKfT243998bf1nUgTPvvA14kXYBXqO/T/ZeWDPefVbtmgp+aj/MyH1kVJA4R46zxrdPaAv98DZ1RHYtOuw3DhqlpQ2yzMdOX5KWtctLet3HpaN5l+j6sVFH2wnFMtvCnAXlh9N0LDv8Em5pXst6X1ZTdvv5J+2y4h3F9vAwxXo7jZssu+cumRRXvNQXUMK3bROhdar0E2LNT/1wVL58oetorUQmtQsISu3HJDtScekqXkI/8ytzSV3zhxyxgQUl973jfkGf4I5TzN7rPtx65jv7cP/SU90tbvW7zwkvZ+ebV9fY0IOvaZZy3bZAOCt/2tn6zy4Y9P6rTMFRn24zLpou4uMhbv+SxqWsUsl6RgL588lM5bssl292K+VXPRbSKMzE/qPnWdDoqplC0k1U7Nh/qrddhx9TMDR1wQmOjvhmY+XyS+bDthlllqb8KJowdxyU6caUsW0dwGF3pvcOS+QOhWL+vrQmRAv393GBh1pXYf7TK2vHzndzoTQ0Kp8yeSaFr+YIty3j5mboq3729BxPX1Lc19YQUDhVPkdaQECikgL0z8CCCCAAAIIIIAAAggggAACCHhFwDMBhZs9ETz7wT908A8uXJgRHES4foLDC6+AZ8dxtPtnn7Bc1uwH3gqpn3AEFAXy5ZTXBl4slRMK2pkUd4+dL8s37Rd9GD/CfHteZ0xovYlrn5hulwlyMzNSCyh0RsCgnvXlajPjQLcVJni47fnv7SyB9x+41O6bumiHPDL+Z2lRu6Q8fWsL3/JCGgxoQHDbH2rLTZ1rhBRQ6KyGNyatNf01t2GLnmjJhn3yiikOrTMc3HjsANL44QKK2hWLyNj+rW3xap0R8SezTJJud11V11dYesqi7fLo+MWmjkNNU8ehlv38mY+Xm9komwPCHA0k7nhxrg0tdJmlRqYWhG7pLfHUsm4pefLmZjas0T40lNEAKdTi2au3HbRhkh6r5qdN8LNo7V47A+TB6xvZUMQO6Lcf9776gw2W3hvaQSqWKkBA4Y/D66gIEFBEhZmTIIAAAggggAACCCCAAAIIIICABwQ8E1C4wME/hFAfF1AEBw5uVkVwQKHHpNaXfsYWfoGsHFB0aVpOHrrhIh/KW9+ulf98sybgQb9+2OvJmXYJKDdjIa2AYsbTl/uWgtJjrxkxTXab5ZhmPGP2mzWOnvvfL/LJ7E3y3yGX2GBE2+imD+G7DJ0s7pv7ocygcOO+sXN16WMCg7y5cyZ3GvRTw4aUtlJF8trdLqAYfG0DucIsf+U2nRWxZP2+gOWQ9h46IVc98p0pFH7OUJ0OHzslnz7SyV6rO94VnPafSZJeQOEftmg/r361SsZPXS+jbmlml+LSfeldj7ZZtnGfjJm4QlZuPqBvfZvOTOl/RT0bQrid082SVsPHLRI320P3u9k3zKBwSvyOtAABRaSF6R8BBBBAAAEEEEAAAQQQQAABBLwi4JmAwgUO4ZpBEdyPV8Cz4ziy8hJPN11Ww9aWcPflv9+tt7MO/j2wjdSrXMzttt/e13oK6QUUFUsVlP/842Lfcfpi6Js/yexliTLdBBdah0JnAujD8uAgQ9ve9PQsU1vimHzzeFc5a9YnyugST9uSjsqdL8y1S1FpP/rwve2FCXJpo7JmOabcusss+3RaLh/2rX0d/OPbkV3tbAkXUIy+rYVoTQy3DXptga2V4b90lhaQ7vrAZF9A4fpvc2FpGdW3uTvU/tYlsq58eKp0uqisPHpTE7svvYBiwgMdpMJvSzLpAW7myYjeTex1ufPZzoJ+uOvZnHhEbnhqpuhMmcE9G9oltE6dPmuXv3reBEW6jNQ4swSWGmlA1Mu0PW0+/2DYpdZDuyWgCMLlbcQFCCgiTswJEEAAAQQQQAABBBBAAAEEEEDAIwKeCSjcTAl1CZ4V4cIL/UxnUqRWg0I/Z/aEKkR381KR7FBrUIQ7oKicUMgsGdU24AYEBxT+IYQWjfbf+j43xxag1vDClE/IcEChfeiSV1N/3iEzlu6yxZ91nz6Yf7Ffa6ldoYhdwuptU+w5pU1ra+Qy9R7OJ6BwS27p8lhP9GkacBqdVdH9wSlycf0Eu2yTfpheQBG8lFNwQKHFzdO7HhcuaLHrjo3LBozJzToZ3quxXQZrwowNMvazlXbmSCe/tp/P3yLf/bxTBv25vlQyS4E1q1kyoB/eIBBuAQKKcIvSHwIIIIAAAggggAACCCCAAAIIeFXAMwGFArkgQkOI4OLXWlvCBRPaVtvoLIk6NaroW7u545k94USi83vW6kUy9KMx53WykT0HSvvayd+sz2hH7oF4j1YVZch1De1hrn7AF491NoWX8/i60qLUX8zfKsFFsmMRULixfPJQRyldNJ9vjG5JJy3SrfUfzppaCR1MkWwtNv3Ofe197fSFLhulhbDdjI6AD80bnV3w3vQNMs4sWeU/ayG4XfD78wkotC8dlxa2dvU2XP+uKHXfy2vZJah0//kGFK7vtH4//M4iGy7oLInq5QoHNHXLObkxuaW3AhoFvdHAJzXzoKa8RSDTAgQUmabjQAQQQAABBBBAAAEEEEAAAQQQyGICngoo1M6FDPo6eCaFzrJwm38woftdgBEcbrj2/I6sQP/xI2Xx5lWZOknjynVk7I1DQz42pYBizKe/yEezNtlv8Os3+XU7ZL693/PxaaIFrL0QUEycu1me+Wi5XNmmktzXs4Hvut3yUjd0qi53/LGO3a81HbbsPiITTU2HEoWT60RoTYU7X5xnZ0e4h+VvTFpj6zHcbo4rWiB5SSe3rJKraeE7URovzjegcIHAwzc2lsualLdn0qBlyBs/ybwVuwPqR/zHjPmtyWvFLdnkhvXiZyvkgxkbf1cMO3gGhWuf1u9PjfVoY33dJVXlrivriZuxoss8DRu3UOb+slte6NdKmtQoIet2HJIdZqms4O3LBVvtEl13X1XP1gxpXbd0cBPeIxBWAQKKsHLSGQIIIIAAAggggAACCCCAAAIIeFjAcwGFWvmHFGnNhvAPJvQ4wglViM22aucm6fvGw5k6+X9uflTqlD03EyajnaQUUMxetsvUfFhou9Aiz+VK5DczJ7bI9qRjdp8XAoqT5uH4/5maDovW7pWWdUtJo2rFZd32QzJt8U6pVLqgvDKgjRT5LWRwD+uLF8ojPcz16MN+DTJ08/82/7gpa+X1r9dI7YpF5PLmFSSfKZL98ZxNtt8HezWSbs0q2GPS+3G+AcXOvcfkNlNjQ8ORbs3LmwLUBeWHVXtk6YZ90rmJFtNu7Cue7YIWvbY/tKwoV7epLGXN/XLXnN4ST+ldi36uhbxvef57W6S8aa2S0tz805kqOntCzetWLiov39XGLm+VWn+uODdFslMTYn+4BQgowi1KfwgggAACCCCAAAIIIIAAAggg4FUBTwYUiqUzIvSf/6YBhM6ccDMp/Jd8SivI8O+D15ETmL1mkdz/YWhLPT157UBpVyu0pZ3cFRw4ekp6DJ9iawYMvvbcTITx362Td6auszMmtG33FhWkmHkI/t60DTJ+SHupYupEuOLJvbvUkFsur+26lHenrZeXv1glr93TVupWKurbf4cpQL1h1yHf8j6TF26XEf9dLCNvbibtTF0F3boNmyzVyhS2AYPvQPNi2FsLZaapC+GKZOtnOqvjxYkr7KwCfZivYUOrOqXlzh51TKhSwHe4ftP/iQlLbIFot3PA1fXse//x6EN3veb3pq/3XbcWgL7VLKnUvUVFd2i6vzXMeeqDZZKRItlaVLrL0Mm2foPWcXDb6m0HRWd0zFmeaHfpONo3KCP9r6greXLlcM1s2PL0R8vs0lu6c8ydLaWpqe/wkplB8b6ZQfHhg5dK2eL5fe21vsYj7/wsj/dpIh0aBtaT8DVK4YX+nTz78TKZt3K3z0ZDkU4XlZN+QWNK4XBxAYULt1Jqwz4EwilAQBFOTfpCAAEEEEAAAQQQQAABBBBAAAEvC3g2oHBoKQUV7jP9TTDhrxH71zqT4oUp76a73JMu6zTgsl6ZmjmRkav89VcxSx4dl8L5c0u+PDkzckhM2ug4k8y3/EuYB+Y5cgRVzPYbkRaE3m+CjOKF8qb5bX89RAMPLXit1x7L7fSZX+Xg0ZO+palSG4u2O22uL9L3Sa0T9x+TnDlzSKkiyctlpTYm9iMQSwECiljqc24EEEAAAQQQQAABBBBAAAEEEIimgOcDCn8MN3NC9/nXoPBvw2tvCGjh7GkrF8iSLatl54E9dlBli5rljCrVlo51W4RcENsbV8UoEEAAgcgLEFBE3pgzIIAAAggggAACCCCAAAIIIICANwSyVEDhDTJGgQACCCCAQOQECCgiZ0vPCCCAAAIIIIAAAggggAACCCDgLQECCm/dD0aDAAIIIBDnAgQUcf4HwOUjgAACCCCAAAIIIIAAAgggEEcCBBRxdLO5VAQQQAAB7wsQUHj/HjFCBBBAAAEEEEAAAQQQQAABBBAIjwABRXgc6QUBBBBAAIGwCBBQhIWRThBAAAEEEEAAAQQQQAABBBBAIAsIEFBkgZvEEBFAAAEE4keAgCJ+7jVXigACCCCAAAIIIIAAAggggEC8CxBQxPtfANePAAIIIOApAQIKT90OBoMAAggggAACCCCAAAIIIIAAAhEUIKCIIC5dI4AAAgggEKoAAUWoYrRHAAEEEEAAAQQQQAABBBBAAIGsKkBAkVXvHONGAAEEEMiWAgQU2fK2clEIIIAAAggggAACCCCAAAIIIJCCAAFFCijsQgABBBBAIFYCBBSxkue8CCCAAAIIIIAAAggggAACCCAQbQECimiLcz4EEEAAAQTSECCgSAOHjxBAAAEEEEAAAQQQQAABBBBAIFsJEFBkq9vJxSCAAAIIZHUBAoqsfgcZPwIIIIAAAggggAACCCCAAAIIZFSAgCKjUrRDAAEEEEAgCgIEFFFA5hQIIIAAAggggAACCCCAAAIIIOAJAQIKT9wGBoEAAggggECyAAEFfwkIIIAAAggggAACCCCAAAIIIBAvAgQU8XKnuU4EEEAAgSwhQECRJW4Tg0QAAQQQQAABBBBAAAEEEEAAgTAIEFCEAZEuEEAAAQQQCJcAAUW4JOkHAQQQQAABBBBAAAEEEEAAAQS8LhCxgGJ70gGvXzvjQwABBBBAwHMCiXsPS80KJT03LgaEAAIIIIAAAggggAACCCCAAAIIhFsgYgFFuAdKfwgggAACCMSDADMo4uEuc40IIIAAAggggAACCCCAAAIIIKACBBT8HSCAAAIIIOAhAQIKD90MhoIAAggggAACCCCAAAIIIIAAAhEVIKCIKC+dI4AAAgggEJoAAUVoXrRGAAEEEEAAAQQQQAABBBBAAIGsK/D/X46oT1m0J2MAAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "id": "8a991198", + "metadata": {}, + "source": [ + "![image.png](attachment:image.png)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pquant", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml index 4db9f20..33c6a9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,10 +4,9 @@ build-backend = "setuptools.build_meta" requires = [ "setuptools>=46.1", "setuptools-scm[toml]>=5" ] [project] -name = "pquant" +name = "pquant-ml" description = "Pruning and Quantization of ML models" readme = "README.md" - license = { text = "Apache-2.0" } authors = [ { name = "NGT 1.3 team" } ] requires-python = ">=3.10" @@ -24,7 +23,7 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dynamic = [ "version" ] -dependencies = [ "keras>=3", "pyyaml>=6.0.1", "quantizers>=1.1", "torch>=2.1" ] +dependencies = [ "keras>=3", "pyyaml>=6.0.1", "quantizers>=1.1", "torch>=2.1", "pydantic>=2.0"] urls.repository = "https://github.com/nroope/PQuant" [tool.setuptools] diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index 17c9c02..2ea7b03 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -27,7 +27,7 @@ fitcompress_parameters: optimize_pruning : false greedy_astar : true approximate : true - lambda : 1 + f_lambda : 1 training_parameters: epochs: 100 fine_tuning_epochs: 0 diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 2e7854b..7b10edb 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -30,7 +30,7 @@ fitcompress_parameters: optimize_pruning : false greedy_astar : true approximate : true - lambda : 1 + f_lambda : 1 training_parameters: epochs: 100 fine_tuning_epochs: 0 diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index ae26211..512f6aa 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -26,7 +26,7 @@ fitcompress_parameters: optimize_pruning : false greedy_astar : true approximate : true - lambda : 1 + f_lambda : 1 training_parameters: epochs: 85 fine_tuning_epochs: 85 diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index f9be222..6116b38 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -28,8 +28,7 @@ fitcompress_parameters: optimize_pruning : false greedy_astar : true approximate : true - lambda : 1 - + f_lambda : 1 training_parameters: epochs: 160 fine_tuning_epochs: 0 diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index 70bbc83..71720d6 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -44,7 +44,7 @@ fitcompress_parameters: optimize_pruning : false greedy_astar : true approximate : true - lambda : 1 + f_lambda : 1 batch_size: 64 cosine_tmax: 200 gamma: 0.1 diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index b9a8290..1ff44fa 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -28,7 +28,7 @@ fitcompress_parameters: optimize_pruning : true greedy_astar : true approximate : true - lambda : 1 + f_lambda : 1 training_parameters: epochs: 100 fine_tuning_epochs: 100 diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index 3707b03..b9f4a2e 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -30,7 +30,7 @@ fitcompress_parameters: optimize_pruning : true greedy_astar : true approximate : true - lambda : 0.5 + f_lambda : 0.5 training_parameters: epochs: 200 fine_tuning_epochs: 0 diff --git a/src/pquant/configs/finetuning.yaml b/src/pquant/configs/finetuning.yaml new file mode 100644 index 0000000..5b4cdb0 --- /dev/null +++ b/src/pquant/configs/finetuning.yaml @@ -0,0 +1,59 @@ +pruning_parameters: + disable_pruning_for_layers: + [] + pruning_method: pdp +quantization_parameters: + default_integer_bits: 0. + default_fractional_bits: 7. + enable_quantization: true + hgq_gamma: 0.0003 + hgq_heterogeneous: True + layer_specific: [] + use_high_granularity_quantization: false + use_real_tanh: false + use_symmetric_quantization: false +training_parameters: + batch_size: 128 + optimizer: sgd + plot_frequency: 100 + label_smoothing: 0 + model: "resnet18" + dataset: "cifar10" + l2_decay: 0.001 + momentum: 0.9 + lr_schedule: "cosine" + milestones: [30, 80] + gamma: 0.1 + cosine_tmax: 200 + learning_rate: 0.001 + prune_ratio: 10 + default_integer_bits: 0 + epochs: 2 + fine_tuning_epochs: 2 + pretraining_epochs: 0 + pruning_first: false + rewind: post-ticket-search + rounds: 2 + save_weights_epoch: 2 +fitcompress_parameters: + enable_fitcompress : false + optimize_quantization : true + quantization_schedule : [7.,4.,3.,2.,1.] + pruning_schedule : {start : 0, end : -3, steps : 40} + compression_goal : 0.04 + optimize_pruning : true + greedy_astar : true + approximate : true + f_lambda : 0.5 +finetuning_parameters: + experiment_name: resnet_18_experiment_2 + num_trials: 10 + sampler: + type: TPESampler + hyperparameter_search: + numerical: + learning_rate: [1e-5, 1e-3, 0.2] + batch_size: [16, 128, 32] + default_integer_bits: [0, 8, 1] + categorical: + lr_schedule: ["cosine", "multistep"] diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py index e5da9e9..fa8f922 100644 --- a/src/pquant/core/activations_quantizer.py +++ b/src/pquant/core/activations_quantizer.py @@ -5,18 +5,23 @@ from quantizers import get_fixed_quantizer +@keras.saving.register_keras_serializable(package="PQuant") class QuantizedTanh(keras.layers.Layer): - def __init__(self, config, i, f): + def __init__(self, config, i, f, **kwargs): super().__init__() + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.i = convert_to_tensor(i) self.f = convert_to_tensor(f) self.k = convert_to_tensor(1.0) self.config = config - self.use_high_granularity_quantization = config["quantization_parameters"]["use_high_granularity_quantization"] + self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization self.is_pretraining = True - self.overflow = "SAT_SYM" if config["quantization_parameters"]["use_symmetric_quantization"] else "SAT" - self.use_real_tanh = config["quantization_parameters"]["use_real_tanh"] - self.hgq_heterogeneous = config["quantization_parameters"]["hgq_heterogeneous"] + self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" + self.use_real_tanh = config.quantization_parameters.use_real_tanh + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous def build(self, input_shape): super().build(input_shape) @@ -53,9 +58,9 @@ def set_activation_bits(self, i, f): def hgq_loss(self): if self.is_pretraining: return 0.0 - return (ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f)) * self.config["quantization_parameters"][ - "hgq_gamma" - ] + return ( + ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f) + ) * self.config.quantization_parameters.hgq_gamma def post_pre_train_function(self): self.is_pretraining = False @@ -69,22 +74,33 @@ def call(self, x): x = self.quantizer(x, k=1.0, i=convert_to_tensor(0.0), f=self.f, training=True) return x + def get_config(self): + config = super().get_config() + config.update({"config": self.config.get_dict(), "i": float(self.i), "f": float(self.f)}) + return config + +@keras.saving.register_keras_serializable(package="PQuant") class QuantizedReLU(keras.layers.Layer): - def __init__(self, config, i, f): + def __init__(self, config, i, f, **kwargs): super().__init__() + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.config = config self.i = convert_to_tensor(i) self.f = convert_to_tensor(f) self.k = convert_to_tensor(0.0) - self.use_high_granularity_quantization = config["quantization_parameters"]["use_high_granularity_quantization"] + self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization self.is_pretraining = True self.overflow = "SAT" - self.use_multiplier = config["quantization_parameters"]["use_relu_multiplier"] - self.hgq_heterogeneous = config["quantization_parameters"]["hgq_heterogeneous"] - self.use_fitcompress = config["fitcompress_parameters"]["enable_fitcompress"] + self.use_multiplier = config.quantization_parameters.use_relu_multiplier + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.post_fitcompress_calibration = False self.saved_inputs = [] + def build(self, input_shape): super().build(input_shape) @@ -125,9 +141,9 @@ def post_pre_train_function(self): def hgq_loss(self): if self.is_pretraining: return 0.0 - return (ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f)) * self.config["quantization_parameters"][ - "hgq_gamma" - ] + return ( + ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f) + ) * self.config.quantization_parameters.hgq_gamma def call(self, x): if self.use_high_granularity_quantization: @@ -145,6 +161,17 @@ def call(self, x): x = self.quantizer(x, k=convert_to_tensor(0.0), i=convert_to_tensor(self.i), f=convert_to_tensor(self.f), training=True) return x + def get_config(self): + config = super().get_config() + config.update( + { + "config": self.config.get_dict(), + "i": float(self.i), + "f": float(self.f), + } + ) + return config + def hard_sigmoid(x): """Computes hard_sigmoid function that saturates between 0 and 1.""" diff --git a/src/pquant/core/compressed_layers.py b/src/pquant/core/compressed_layers.py index 0a02ea9..f8bf81f 100644 --- a/src/pquant/core/compressed_layers.py +++ b/src/pquant/core/compressed_layers.py @@ -78,8 +78,8 @@ def post_training_prune(model, calibration_data, config): remove_pruning_from_model_torch, ) - t_delta = config["pruning_parameters"]["t_delta"] - config["pruning_parameters"]["t_start_collecting_batch"] = 0 + t_delta = config.pruning_parameters.t_delta + config.pruning_parameters.t_start_collecting_batch = 0 for i in range(t_delta): inputs = calibration_data[i] if i == 0: @@ -94,8 +94,8 @@ def post_training_prune(model, calibration_data, config): remove_pruning_from_model_tf, ) - t_delta = config["pruning_parameters"]["t_delta"] - config["pruning_parameters"]["t_start_collecting_batch"] = 0 + t_delta = config.pruning_parameters.t_delta + config.pruning_parameters.t_start_collecting_batch = 0 for i in range(t_delta): inputs = calibration_data[i] diff --git a/src/pquant/core/constants.py b/src/pquant/core/constants.py new file mode 100644 index 0000000..7f23200 --- /dev/null +++ b/src/pquant/core/constants.py @@ -0,0 +1,77 @@ +import optuna + +from pquant.data_models.pruning_model import ( + ActivationPruningModel, + AutoSparsePruningModel, + CSPruningModel, + DSTPruningModel, + MDMMPruningModel, + PDPPruningModel, + WandaPruningModel, +) +from pquant.pruning_methods.constraint_functions import ( + EqualityConstraint, + GreaterThanOrEqualConstraint, + LessThanOrEqualConstraint, +) +from pquant.pruning_methods.metric_functions import ( + StructuredSparsityMetric, + UnstructuredSparsityMetric, +) + +PRUNING_MODEL_REGISTRY = { + "cs": CSPruningModel, + "dst": DSTPruningModel, + "pdp": PDPPruningModel, + "wanda": WandaPruningModel, + "autosparse": AutoSparsePruningModel, + "activation_pruning": ActivationPruningModel, + "mdmm": MDMMPruningModel, +} + +SAMPLER_REGISTRY = { + "GridSampler": optuna.samplers.GridSampler, + "RandomSampler": optuna.samplers.RandomSampler, + "TPESampler": optuna.samplers.TPESampler, + "CmaEsSampler": optuna.samplers.CmaEsSampler, + "GPSampler": optuna.samplers.GPSampler, + "NSGAIISampler": optuna.samplers.NSGAIISampler, + "NSGAIIISampler": optuna.samplers.NSGAIIISampler, + "QMCSampler": optuna.samplers.QMCSampler, + "BruteForceSampler": optuna.samplers.BruteForceSampler, +} + + +try: + import mlflow + LOG_FUNCTIONS_REGISTRY = { + "torch": mlflow.pytorch.log_model, + "tensorflow": mlflow.tensorflow.log_model, + } +except ModuleNotFoundError: + LOG_FUNCTIONS_REGISTRY = {} + + +TRACKING_URI = "http://0.0.0.0:5000/" +DB_STORAGE = "sqlite:///optuna_study.db" + +JAX_BACKEND = "jax" + +FINETUNING_DIRECTION = {"maximize", "minimize"} +CONFIG_FILE = "config.yaml" + +N_JOBS = 1 +TORCH_BACKEND = "torch" +TF_BACKEND = 'tensorflow' + + +METRIC_REGISTRY = { + "UnstructuredSparsity": UnstructuredSparsityMetric, + "StructuredSparsity": StructuredSparsityMetric, +} + +CONSTRAINT_REGISTRY = { + "Equality": EqualityConstraint, + "LessThanOrEqual": LessThanOrEqualConstraint, + "GreaterThanOrEqual": GreaterThanOrEqualConstraint, +} diff --git a/src/pquant/core/finetuning.py b/src/pquant/core/finetuning.py new file mode 100644 index 0000000..564db78 --- /dev/null +++ b/src/pquant/core/finetuning.py @@ -0,0 +1,338 @@ +import copy +import json +import logging +import os +from typing import Annotated, Callable, Dict, Optional, Union + +import keras +import optuna +import torch +import yaml +from pydantic import BaseModel, Field, field_validator + +from pquant.core import constants +from pquant.core.compressed_layers import add_compression_layers +from pquant.core.train import iterative_train +from pquant.data_models.finetuning_model import BaseFinetuningModel +from pquant.data_models.fitcompress_model import BaseFitCompressModel +from pquant.data_models.pruning_model import ( + ActivationPruningModel, + AutoSparsePruningModel, + BasePruningModel, + CSPruningModel, + DSTPruningModel, + MDMMPruningModel, + PDPPruningModel, + WandaPruningModel, +) +from pquant.data_models.quantization_model import BaseQuantizationModel +from pquant.data_models.training_model import BaseTrainingModel + + +def get_sampler(sampler_type, **kwargs): + try: + return constants.SAMPLER_REGISTRY[sampler_type](**kwargs) + except KeyError: + raise ValueError(f"Unknown sampler type: {sampler_type}") + + +def log_model_by_backend(model, name, signature=None, registered_model_name=None): + backend = keras.backend.backend() + + kwargs = { + "artifact_path": name, + "signature": signature, + "registered_model_name": registered_model_name, + } + + if backend == constants.JAX_BACKEND: + raise NotImplementedError("JAX is not supported yet.") + + if backend not in constants.LOG_FUNCTIONS_REGISTRY: + raise ValueError(f"Unsupported backend: {backend}") + + return constants.LOG_FUNCTIONS_REGISTRY[backend](model, **kwargs) + + +class MetricFunction(BaseModel): + function_name: Callable + direction: str + + @field_validator('direction') + def validate_direction(cls, direction): + if direction not in constants.FINETUNING_DIRECTION: + raise ValueError("direction must be 'maximize' or 'minimize'") + return direction + + +class TuningConfig(BaseModel): + finetuning_parameters: BaseFinetuningModel + pruning_parameters: Annotated[ + Union[ + CSPruningModel, + DSTPruningModel, + PDPPruningModel, + WandaPruningModel, + AutoSparsePruningModel, + ActivationPruningModel, + MDMMPruningModel, + ], + Field(discriminator="pruning_method"), + ] + quantization_parameters: BaseQuantizationModel + training_parameters: BaseTrainingModel + fitcompress_parameters: BaseFitCompressModel + + @classmethod + def load_from_file(cls, path_to_config_file): + if path_to_config_file.endswith(('.yaml', '.yml')): + with open(path_to_config_file) as f: + config_data = yaml.safe_load(f) + elif path_to_config_file.endswith('.json'): + with open(path_to_config_file) as f: + config_data = json.load(f) + else: + raise ValueError("Unsupported file type. Use .yaml, .yml, or .json") + + return cls.load_from_config(config_data) + + @classmethod + def load_from_config(cls, config): + pruning_section = config.get("pruning_parameters", {}) + pruning_method = pruning_section.get("pruning_method", "cs") + pruning_model_cls = constants.PRUNING_MODEL_REGISTRY.get(pruning_method, BasePruningModel) + + return cls( + finetuning_parameters=BaseFinetuningModel(**config.get("finetuning_parameters", {})), + pruning_parameters=pruning_model_cls(**config.get("pruning_parameters", {})), + quantization_parameters=BaseQuantizationModel(**config.get("quantization_parameters", {})), + training_parameters=BaseTrainingModel(**config.get("training_parameters", {})), + fitcompress_parameters=BaseFitCompressModel(**config.get("fitcompress_parameters", {})), + ) + + def get_dict(self): + return self.model_dump() + + +class TuningTask: + def __init__(self, config: TuningConfig): + self.config = config + self.hyperparameters = {} + self.objectives: Dict[str, MetricFunction] = {} + self._training_function: Optional[Callable] = None + self._validation_function: Optional[Callable] = None + self._optimizer_function: Optional[Callable] = None + self._scheduler_function: Optional[Callable] = None + self.device = "cuda" if torch.cuda.is_available() else "cpu" + self.enable_mlflow = False + self.tracking_uri = None + self.storage_db = None + + def set_tracking_uri(self, tracking_uri: str): + self.tracking_uri = tracking_uri + os.environ["MLFLOW_TRACKING_URI"] = tracking_uri + + def set_user(self, user_email: str, access_token: str): + os.environ.pop("MLFLOW_TRACKING_TOKEN", None) + os.environ["MLFLOW_TRACKING_USERNAME"] = user_email + os.environ["MLFLOW_TRACKING_PASSWORD"] = access_token + os.environ["NO_PROXY"] = "ngt.cern.ch" + + def set_storage_db(self, storage_db: str): + self.storage_db = storage_db + + def set_enable_mlflow(self): + self.enable_mlflow = True + + def get_dict(self): + return self.config.model_dump() + + def set_objective_function(self, name: str, fn: Callable, direction: str): + if not callable(fn): + raise TypeError("Objective function must be callable.") + self.objectives[name] = MetricFunction(function_name=fn, direction=direction) + + def set_training_function(self, fn: Callable): + if not callable(fn): + raise TypeError("Training function must be callable.") + self._training_function = fn + + def set_validation_function(self, fn: Callable): + if not callable(fn): + raise TypeError("Validation function must be callable.") + self._validation_function = fn + + def set_optimizer_function(self, fn: Callable): + if not callable(fn): + raise TypeError("Optimizer function must be callable.") + self._optimizer_function = fn + + def set_scheduler_function(self, fn: Callable): + if not callable(fn): + raise TypeError("Scheduler function must be callable.") + self._scheduler_function = fn + + def get_training_function(self) -> Callable: + if not self._training_function: + raise ValueError("Training function is not set.") + return self._training_function + + def get_validation_function(self) -> Callable: + if not self._validation_function: + raise ValueError("Validation function is not set.") + return self._validation_function + + def get_optimizer_function(self) -> Callable: + if not self._optimizer_function: + raise ValueError("Optimizer function is not set.") + return self._optimizer_function + + def get_scheduler_function(self) -> Callable: + if not self._scheduler_function: + raise ValueError("Scheduler function is not set.") + return self._scheduler_function + + def set_hyperparameters(self): + hp_config = self.config.finetuning_parameters.hyperparameter_search + numerical_params = hp_config.numerical + categorical_params = hp_config.categorical + + if numerical_params: + self.set_numerical_params(numerical_params) + elif categorical_params: + self.set_categorical_params(categorical_params) + + def set_numerical_params(self, numerical_params): + try: + for param, value in numerical_params.items(): + if not isinstance(value, list) or len(value) < 2: + continue + start_value, end_value = value[0], value[1] + step = value[2] if len(value) == 3 else None + use_float = any(isinstance(x, float) for x in (start_value, end_value, step) if x is not None) + suggest_func = optuna.trial.Trial.suggest_float if use_float else optuna.trial.Trial.suggest_int + + if use_float: + start_value, end_value = float(start_value), float(end_value) + if step is not None: + step = float(step) + else: + start_value, end_value = int(start_value), int(end_value) + if step is not None: + step = int(step) + + if step is None: + self.register_hyperparameter(param, suggest_func, param, start_value, end_value) + else: + self.register_hyperparameter(param, suggest_func, param, start_value, end_value, step=step) + + except Exception as e: + logging.error(f"Failed to register numerical hyperparameter '{param}': {e}") + + def set_categorical_params(self, categorical_params): + for param, choices in categorical_params.items(): + if not isinstance(choices, list) or len(choices) == 0: + continue + try: + self.register_hyperparameter(param, optuna.trial.Trial.suggest_categorical, param, choices) + except Exception as e: + logging.error(f"Failed to register categorical hyperparameter '{param}': {e}") + + def register_hyperparameter(self, name, optuna_func, *args, **kwargs): + self.hyperparameters[name] = (optuna_func, args, kwargs) + + def objective(self, trial, model, train_func, valid_func, **kwargs): + for param_name, (optuna_func, func_args, func_kwargs) in self.hyperparameters.items(): + new_value = optuna_func(trial, *func_args, **func_kwargs) + logging.info(f"Suggested {param_name} = {new_value}") + + applied = False + for sub_config in [self.config.training_parameters, self.config.finetuning_parameters]: + if hasattr(sub_config, param_name): + setattr(sub_config, param_name, new_value) + applied = True + break + if not applied: + logging.error(f"'{param_name}' not found in config: value not applied.") + + trainloader = kwargs['trainloader'] + raw_input_batch = next(iter(trainloader)) + sample_input = raw_input_batch[0] + sample_output = model(sample_input.to(next(model.parameters()).device)) + + input_shape = sample_input.shape + compressed_model = add_compression_layers(model, self.config, input_shape) + optimizer_func = self.get_optimizer_function() + optimizer = optimizer_func(self.config, compressed_model) + scheduler_func = self.get_scheduler_function() + scheduler = scheduler_func(optimizer, self.config) + + trained_model = iterative_train( + compressed_model, + self.config, + train_func, + valid_func, + optimizer=optimizer, + scheduler=scheduler, + device=self.device, + writer=None, + **kwargs, + ) + trained_model.eval() + objectives = [ + metric_object.function_name(trained_model, device=self.device, **kwargs) + for _, metric_object in self.objectives.items() + ] + + if self.enable_mlflow: + import mlflow + from mlflow.models import infer_signature + + with mlflow.start_run(nested=True): + mlflow.log_params({param_name: getattr(self.config, param_name) for param_name in self.config.model_fields}) + mlflow.log_metrics({key: val for key, val in zip(self.objectives.keys(), objectives)}) + signature = infer_signature(sample_input.cpu().numpy(), sample_output.detach().cpu().numpy()) + + mlflow.log_text(yaml.safe_dump(self.get_dict()), "config.yaml") + log_model_by_backend( + model=trained_model, + name=self.config.training_parameters.model, + signature=signature, + registered_model_name=self.config.training_parameters.model, + ) + + return objectives if len(objectives) > 1 else objectives[0] + + def run_optimization(self, model, **kwargs): + if self.enable_mlflow: + import mlflow + + if not self.tracking_uri: + raise ValueError("Tracking URI must be set when MLflow logging is enabled.") + mlflow.set_tracking_uri(self.tracking_uri) + finetuning_parameters = self.config.finetuning_parameters + mlflow.set_experiment(finetuning_parameters.experiment_name) + + sampler = get_sampler(finetuning_parameters.sampler.type, **finetuning_parameters.sampler.params) + study = optuna.create_study( + study_name=finetuning_parameters.experiment_name, + storage=self.storage_db, + sampler=sampler, + load_if_exists=True, + directions=[metric_object.direction for _, metric_object in self.objectives.items()], + ) + + num_trials = finetuning_parameters.num_trials + study.optimize( + lambda trial: self.objective( + trial, + copy.deepcopy(model.cpu()).to(self.device), + self.get_training_function(), + self.get_validation_function(), + **kwargs, + ), + n_trials=num_trials, + n_jobs=1, + ) + + return study.best_params diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 42f9d13..04d5a9f 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -23,22 +23,22 @@ class CompressedLayerBase(keras.layers.Layer): def __init__(self, config, layer, layer_type): super().__init__() - i_bits = config["quantization_parameters"]["default_integer_bits"] - f_bits = config["quantization_parameters"]["default_fractional_bits"] + i_bits = config.quantization_parameters.default_integer_bits + f_bits = config.quantization_parameters.default_fractional_bits self.i_weight = ops.convert_to_tensor(i_bits) self.f_weight = ops.convert_to_tensor(f_bits) self.i_bias = ops.convert_to_tensor(i_bits) self.f_bias = ops.convert_to_tensor(f_bits) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) - self.pruning_method = config["pruning_parameters"]["pruning_method"] - self.overflow = "SAT_SYM" if config["quantization_parameters"]["use_symmetric_quantization"] else "SAT" - self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] - - self.pruning_first = config["training_parameters"]["pruning_first"] - self.enable_quantization = config["quantization_parameters"]["enable_quantization"] - self.use_high_granularity_quantization = config["quantization_parameters"]["use_high_granularity_quantization"] - self.hgq_heterogeneous = config["quantization_parameters"]["hgq_heterogeneous"] - self.enable_pruning = config["pruning_parameters"]["enable_pruning"] + self.pruning_method = config.pruning_parameters.pruning_method + self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" + self.hgq_gamma = config.quantization_parameters.hgq_gamma + + self.pruning_first = config.training_parameters.pruning_first + self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.enable_pruning = config.pruning_parameters.enable_pruning self.do_transpose_data = None self.weight_transpose = None self.data_transpose = None @@ -366,16 +366,16 @@ def call(self, x, training=None): class QuantizedPooling(keras.layers.Layer): def __init__(self, config, layer): super().__init__() - self.i = ops.convert_to_tensor(config["quantization_parameters"]["default_integer_bits"]) - self.f = ops.convert_to_tensor(config["quantization_parameters"]["default_fractional_bits"]) + self.i = ops.convert_to_tensor(config.quantization_parameters.default_integer_bits) + self.f = ops.convert_to_tensor(config.quantization_parameters.default_fractional_bits) self.is_pretraining = True - self.overflow = "SAT_SYM" if config["quantization_parameters"]["use_symmetric_quantization"] else "SAT" - self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] + self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" + self.hgq_gamma = config.quantization_parameters.hgq_gamma - self.use_high_granularity_quantization = config["quantization_parameters"]["use_high_granularity_quantization"] - self.hgq_heterogeneous = config["quantization_parameters"]["hgq_heterogeneous"] + self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.pool_size = layer.pool_size self.strides = layer.strides self.padding = layer.padding @@ -676,9 +676,8 @@ def post_pretrain_functions(model, config): layer.pointwise_conv.pruning_layer.post_pre_train_function() elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): layer.post_pre_train_function() - if config["pruning_parameters"]["pruning_method"] == "pdp" or ( - config["pruning_parameters"]["pruning_method"] == "wanda" - and config["pruning_parameters"]["calculate_pruning_budget"] + if config.pruning_parameters.pruning_method == "pdp" or ( + config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget ): pdp_setup(model, config) @@ -713,7 +712,7 @@ def pdp_setup(model, config): abs_global_weights = ops.abs(global_weights) global_weight_topk, _ = ops.top_k(abs_global_weights, ops.size(abs_global_weights)) - threshold = global_weight_topk[int((1 - config["pruning_parameters"]["sparsity"]) * float(ops.size(global_weight_topk)))] + threshold = global_weight_topk[int((1 - config.pruning_parameters.sparsity) * float(ops.size(global_weight_topk)))] global_weights_below_threshold = ops.where(abs_global_weights < threshold, 1, 0) idx = 0 for layer in model.layers: @@ -856,7 +855,7 @@ def check_activation(layer, config): Replaces activations with quantized activations. The activation can be a part of another layer such as Conv2D, or an Activation layer """ - quantization_enabled = config["quantization_parameters"]["enable_quantization"] + quantization_enabled = config.quantization_parameters.enable_quantization act = None if hasattr(layer.activation, "__name__"): if layer.activation.__name__ == "relu": @@ -954,9 +953,9 @@ def add_compression_layers_tf(model, config, input_shape=None): act = check_activation(layer, config) # Activation layers elif isinstance(layer, ReLU): - if config["quantization_parameters"]["enable_quantization"]: - i_bits = config["quantization_parameters"]["default_integer_bits"] - f_bits = config["quantization_parameters"]["default_fractional_bits"] + if config.quantization_parameters.enable_quantization: + i_bits = config.quantization_parameters.default_integer_bits + f_bits = config.quantization_parameters.default_fractional_bits i_bits, f_bits = get_quantization_bits_activations(config, layer) new_layer = QuantizedReLU(config, i_bits, f_bits) new_layer.build(layer.input.shape) @@ -968,7 +967,7 @@ def add_compression_layers_tf(model, config, input_shape=None): if new_layer is not None: x = new_layer(x) elif isinstance(layer, (AveragePooling1D, AveragePooling2D, AveragePooling3D)): - if config["quantization_parameters"]["enable_quantization"]: + if config.quantization_parameters.enable_quantization: i_bits, f_bits = get_quantization_bits_activations(config, layer) new_layer = QuantizedPooling(config, layer) new_layer.set_quantization_bits(i_bits, f_bits) @@ -985,11 +984,11 @@ def add_compression_layers_tf(model, config, input_shape=None): def get_quantization_bits_activations(config, layer): - i_bits = config["quantization_parameters"]["default_integer_bits"] - f_bits = config["quantization_parameters"]["default_fractional_bits"] + i_bits = config.quantization_parameters.default_integer_bits + f_bits = config.quantization_parameters.default_fractional_bits if isinstance(layer, ReLU): f_bits += 1 # Unsigned, add 1 bit to default value only - layer_specific = config["quantization_parameters"]["layer_specific"] + layer_specific = config.quantization_parameters.layer_specific if layer.name in layer_specific: if hasattr(layer, "activation") and layer.activation.__name__ in layer_specific[layer.name]: i_bits = layer_specific[layer.name][layer.activation.__name__]["integer_bits"] @@ -1001,10 +1000,10 @@ def get_quantization_bits_activations(config, layer): def get_quantization_bits_weights_biases(config, layer): - layer_specific = config["quantization_parameters"]["layer_specific"] + layer_specific = config.quantization_parameters.layer_specific if isinstance(layer, SeparableConv2D): - dw_i_bits_w = pw_i_bits_w = pw_i_bits_b = config["quantization_parameters"]["default_integer_bits"] - dw_f_bits_w = pw_f_bits_w = pw_f_bits_b = config["quantization_parameters"]["default_fractional_bits"] + dw_i_bits_w = pw_i_bits_w = pw_i_bits_b = config.quantization_parameters.default_integer_bits + dw_f_bits_w = pw_f_bits_w = pw_f_bits_b = config.quantization_parameters.default_fractional_bits if layer.name in layer_specific: if "depthwise" in layer_specific[layer.name]: if "weight" in layer_specific[layer.name]["depthwise"]: @@ -1019,8 +1018,8 @@ def get_quantization_bits_weights_biases(config, layer): pw_f_bits_b = layer_specific[layer.name]["pointwise"]["bias"]["fractional_bits"] return dw_i_bits_w, dw_f_bits_w, pw_i_bits_w, pw_f_bits_w, pw_i_bits_b, pw_f_bits_b else: - i_bits_w = i_bits_b = config["quantization_parameters"]["default_integer_bits"] - f_bits_w = f_bits_b = config["quantization_parameters"]["default_fractional_bits"] + i_bits_w = i_bits_b = config.quantization_parameters.default_integer_bits + f_bits_w = f_bits_b = config.quantization_parameters.default_fractional_bits if layer.name in layer_specific: if "weight" in layer_specific[layer.name]: i_bits_w = layer_specific[layer.name]["weight"]["integer_bits"] @@ -1032,16 +1031,16 @@ def get_quantization_bits_weights_biases(config, layer): def get_enable_pruning(layer, config): - enable_pruning = config["pruning_parameters"]["enable_pruning"] + enable_pruning = config.pruning_parameters.enable_pruning if isinstance(layer, SeparableConv2D): enable_pruning_depthwise = enable_pruning_pointwise = True - if layer.name + "_depthwise" in config["pruning_parameters"]["disable_pruning_for_layers"]: + if layer.name + "_depthwise" in config.pruning_parameters.disable_pruning_for_layers: enable_pruning_depthwise = False - if layer.name + "pointwise" in config["pruning_parameters"]["disable_pruning_for_layers"]: + if layer.name + "pointwise" in config.pruning_parameters.disable_pruning_for_layers: enable_pruning_pointwise = False return enable_pruning_depthwise, enable_pruning_pointwise else: - if layer.name in config["pruning_parameters"]["disable_pruning_for_layers"]: + if layer.name in config.pruning_parameters.disable_pruning_for_layers: enable_pruning = False return enable_pruning @@ -1088,6 +1087,6 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): custom_scheme["disable_pruning_for_layers"].append(layer.name + "_pointwise") elif layer.__class__ in [Activation, ReLU, AveragePooling1D, AveragePooling2D, AveragePooling3D]: custom_scheme["layer_specific"][layer.name] = {"integer_bits": 0.0, "fractional_bits": 7.0} - config["quantization_parameters"]["layer_specific"] = custom_scheme["layer_specific"] - config["pruning_parameters"]["disable_pruning_for_layers"] = custom_scheme["disable_pruning_for_layers"] + config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] + config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] return config diff --git a/src/pquant/core/tf_impl/train_tf.py b/src/pquant/core/tf_impl/train_tf.py index d19b650..8b80127 100644 --- a/src/pquant/core/tf_impl/train_tf.py +++ b/src/pquant/core/tf_impl/train_tf.py @@ -15,31 +15,31 @@ def iterative_train_tf(model, config, train_func, valid_func, **kwargs): Generic training loop, user provides training and validation functions """ epoch = keras.ops.convert_to_tensor(0) # Keeps track of all the epochs completed - training_config = config["training_parameters"] - if training_config["pretraining_epochs"] > 0: - for e in range(training_config["pretraining_epochs"]): - pre_epoch_functions(model, e, training_config["pretraining_epochs"]) + training_config = config.training_parameters + if training_config.pretraining_epochs > 0: + for e in range(training_config.pretraining_epochs): + pre_epoch_functions(model, e, training_config.pretraining_epochs) train_func(model, epoch=epoch, **kwargs) valid_func(model, epoch=epoch, **kwargs) - post_epoch_functions(model, e, training_config["pretraining_epochs"]) + post_epoch_functions(model, e, training_config.pretraining_epochs) epoch += 1 post_pretrain_functions(model, config) - for r in range(training_config["rounds"]): - for e in range(training_config["epochs"]): - if r == 0 and training_config["save_weights_epoch"] == e: + for r in range(training_config.rounds): + for e in range(training_config.epochs): + if r == 0 and training_config.save_weights_epoch == e: save_weights_functions(model) - pre_epoch_functions(model, e, training_config["epochs"]) + pre_epoch_functions(model, e, training_config.epochs) train_func(model, epoch=epoch, **kwargs) valid_func(model, epoch=epoch, **kwargs) - post_epoch_functions(model, e, training_config["epochs"]) + post_epoch_functions(model, e, training_config.epochs) epoch += 1 - call_post_round_functions(model, training_config["rewind"], training_config["rounds"], r) + call_post_round_functions(model, training_config.rewind, training_config.rounds, r) pre_finetune_functions(model) - if training_config["fine_tuning_epochs"] > 0: - for e in range(training_config["fine_tuning_epochs"]): - pre_epoch_functions(model, e, training_config["fine_tuning_epochs"]) + if training_config.fine_tuning_epochs > 0: + for e in range(training_config.fine_tuning_epochs): + pre_epoch_functions(model, e, training_config.fine_tuning_epochs) train_func(model, epoch=epoch, **kwargs) valid_func(model, epoch=epoch, **kwargs) - post_epoch_functions(model, e, training_config["fine_tuning_epochs"]) + post_epoch_functions(model, e, training_config.fine_tuning_epochs) epoch += 1 return model diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 73f1d62..e55e38a 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -2,6 +2,7 @@ import torch.nn as nn import torch.nn.functional as F from hgq.quantizer import Quantizer +from keras import ops from quantizers import get_fixed_quantizer from torch.fx import symbolic_trace @@ -18,25 +19,24 @@ class CompressedLayerBase(nn.Module): def __init__(self, config, layer, layer_type): super().__init__() - self.f_weight = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i_weight = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) - self.f_bias = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i_bias = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) - + self.f_weight = torch.tensor(config.quantization_parameters.default_fractional_bits) + self.i_weight = torch.tensor(config.quantization_parameters.default_integer_bits) + self.f_bias = torch.tensor(config.quantization_parameters.default_fractional_bits) + self.i_bias = torch.tensor(config.quantization_parameters.default_integer_bits) self.weight = nn.Parameter(layer.weight.clone()) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) - self.pruning_method = config["pruning_parameters"]["pruning_method"] - self.overflow = "SAT_SYM" if config["quantization_parameters"]["use_symmetric_quantization"] else "SAT" + self.pruning_method = config.pruning_parameters.pruning_method + self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" self.quantizer = get_fixed_quantizer(overflow_mode=self.overflow) - self.hgq_heterogeneous = config["quantization_parameters"]["hgq_heterogeneous"] + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.bias = nn.Parameter(layer.bias.clone()) if layer.bias is not None else None self.init_weight = self.weight.clone() - self.pruning_first = config["training_parameters"]["pruning_first"] - self.enable_quantization = config["quantization_parameters"]["enable_quantization"] - self.use_high_granularity_quantization = config["quantization_parameters"]["use_high_granularity_quantization"] - self.enable_pruning = config["pruning_parameters"]["enable_pruning"] - self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] + self.pruning_first = config.training_parameters.pruning_first + self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + self.enable_pruning = config.pruning_parameters.enable_pruning + self.hgq_gamma = config.quantization_parameters.hgq_gamma def build(self, input_shape): if self.use_high_granularity_quantization: @@ -142,9 +142,8 @@ def __init__(self, config, layer, layer_type): super().__init__(config, layer, layer_type) self.in_features = layer.in_features self.out_features = layer.out_features - self.use_fitcompress = config["fitcompress_parameters"]["enable_fitcompress"] - self.is_pretraining = True - + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.is_pretraining = True def post_pre_train_function(self): self.is_pretraining = False @@ -174,9 +173,8 @@ def __init__(self, config, layer, layer_type): self.out_channels = layer.out_channels self.kernel_size = layer.kernel_size self.padding_mode = layer.padding_mode - self.use_fitcompress = config["fitcompress_parameters"]["enable_fitcompress"] - self.is_pretraining = True - + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.is_pretraining = True def post_pre_train_function(self): self.is_pretraining = False @@ -214,9 +212,8 @@ def __init__(self, config, layer, layer_type): self.out_channels = layer.out_channels self.kernel_size = layer.kernel_size self.padding_mode = layer.padding_mode - self.use_fitcompress = config["fitcompress_parameters"]["enable_fitcompress"] - self.is_pretraining = True - + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.is_pretraining = True def post_pre_train_function(self): self.is_pretraining = False @@ -256,18 +253,18 @@ class QuantizedPooling(nn.Module): def __init__(self, config, layer): super().__init__() - self.f = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) - self.overflow = "SAT_SYM" if config["quantization_parameters"]["use_symmetric_quantization"] else "SAT" + self.f = torch.tensor(config.quantization_parameters.default_fractional_bits) + self.i = torch.tensor(config.quantization_parameters.default_integer_bits) + self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" self.config = config - self.hgq_heterogeneous = config["quantization_parameters"]["hgq_heterogeneous"] + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.is_pretraining = True - self.use_high_granularity_quantization = config["quantization_parameters"]["use_high_granularity_quantization"] + self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization self.pooling = layer - self.use_fitcompress = config["fitcompress_parameters"]["enable_fitcompress"] + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.post_fitcompress_calibration = False - self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] self.saved_inputs = [] + self.hgq_gamma = config.quantization_parameters.hgq_gamma def build(self, input_shape): if self.use_high_granularity_quantization: @@ -306,9 +303,9 @@ def post_pre_train_function(self): def hgq_loss(self): if self.is_pretraining: return 0.0 - return (torch.sum(self.hgq.quantizer.i) + torch.sum(self.hgq.quantizer.f)) * self.config["quantization_parameters"][ - "hgq_gamma" - ] + return ( + torch.sum(self.hgq.quantizer.i) + torch.sum(self.hgq.quantizer.f) + ) * self.config.quantization_parameters.hgq_gamma def quantize(self, x): if not hasattr(self, "hgq") or not hasattr(self, "quantizer"): @@ -321,7 +318,7 @@ def quantize(self, x): # Save inputs self.saved_inputs.append(x) # During FITcompress, we do not use any quantized pooling - return ops.average_pool(x, pool_size = 1) + return ops.average_pool(x, pool_size=1) x = self.quantizer(x, k=torch.tensor(1.0), i=self.i, f=self.f, training=True) return x @@ -333,41 +330,37 @@ def forward(self, x): def add_layer_specific_quantization_to_model(module, config): for name, layer in module.named_modules(): if isinstance(layer, CompressedLayerBase): - if name in config["quantization_parameters"]["layer_specific"]: - if "weight" in config["quantization_parameters"]["layer_specific"][name]: - weight_int_bits = config["quantization_parameters"]["layer_specific"][name]["weight"]["integer_bits"] - weight_fractional_bits = config["quantization_parameters"]["layer_specific"][name]["weight"][ - "fractional_bits" - ] + if name in config.quantization_parameters.layer_specific: + if "weight" in config.quantization_parameters.layer_specific[name]: + weight_int_bits = config.quantization_parameters.layer_specific[name]["weight"]["integer_bits"] + weight_fractional_bits = config.quantization_parameters.layer_specific[name]["weight"]["fractional_bits"] layer.i_weight = torch.tensor(weight_int_bits) layer.f_weight = torch.tensor(weight_fractional_bits) - if "bias" in config["quantization_parameters"]["layer_specific"][name]: - bias_int_bits = config["quantization_parameters"]["layer_specific"][name]["bias"]["integer_bits"] - bias_fractional_bits = config["quantization_parameters"]["layer_specific"][name]["bias"][ - "fractional_bits" - ] + if "bias" in config.quantization_parameters.layer_specific[name]: + bias_int_bits = config.quantization_parameters.layer_specific[name]["bias"]["integer_bits"] + bias_fractional_bits = config.quantization_parameters.layer_specific[name]["bias"]["fractional_bits"] layer.i_bias = torch.tensor(bias_int_bits) layer.f_bias = torch.tensor(bias_fractional_bits) layer.build(None) elif layer.__class__ in [QuantizedPooling, QuantizedReLU, QuantizedTanh]: - if name in config["quantization_parameters"]["layer_specific"]: - i = config["quantization_parameters"]["layer_specific"][name]["integer_bits"] - f = config["quantization_parameters"]["layer_specific"][name]["fractional_bits"] + if name in config.quantization_parameters.layer_specific: + i = config.quantization_parameters.layer_specific[name]["integer_bits"] + f = config.quantization_parameters.layer_specific[name]["fractional_bits"] layer.set_activation_bits(i, f) return module def add_quantized_activations_to_model_layer(module, config): - if not config["quantization_parameters"]["enable_quantization"]: + if not config.quantization_parameters.enable_quantization: return module # Replaces ReLU and Tanh layers with quantized versions for name, layer in module.named_children(): - i = config["quantization_parameters"]["default_integer_bits"] - f = config["quantization_parameters"]["default_fractional_bits"] + i = config.quantization_parameters.default_integer_bits + f = config.quantization_parameters.default_fractional_bits if layer.__class__ in [nn.ReLU]: # For ReLU, if using default values, add 1 bit since values are unsigned. # Otherwise user provides bits. TODO: Find better way to do this - f = config["quantization_parameters"]["default_fractional_bits"] + 1 + f = config.quantization_parameters.default_fractional_bits + 1 relu = QuantizedReLU(config, i=i, f=f) setattr(module, name, relu) elif layer.__class__ in [nn.Tanh]: @@ -383,28 +376,28 @@ def add_quantized_activations_to_model_layer(module, config): def add_quantized_activations_to_model_functional(module, config): # Currently not in use. TODO: Fix this - if config["quantization_parameters"]["use_high_granularity_quantization"]: + if config.quantization_parameters.use_high_granularity_quantization: return module # Replaces functional activation calls with quantized versions traced_model = symbolic_trace(module) for node in traced_model.graph.nodes: if node.op in ["call_method", "call_function"] and (node.target == "tanh" or "function relu" in str(node.target)): with traced_model.graph.inserting_after(node): - if node.name in config["quantization_parameters"]["layer_specific"]: - bits = config["quantization_parameters"]["layer_specific"][node.name]["bits"] + if node.name in config.quantization_parameters.layer_specific: + bits = config.quantization_parameters.layer_specific[node.name]["bits"] else: bits = ( - config["quantization_parameters"]["default_integer_bits"] - + config["quantization_parameters"]["default_fractional_bits"] + config.quantization_parameters.default_integer_bits + + config.quantization_parameters.default_fractional_bits + 1 ) # 1 sign bit kwargs = {"bits": bits} if node.target == "tanh": - kwargs["use_real_tanh"] = config["quantization_parameters"]["use_real_tanh"] - kwargs["use_symmetric"] = config["quantization_parameters"]["use_symmetric_quantization"] + kwargs["use_real_tanh"] = config.quantization_parameters.use_real_tanh + kwargs["use_symmetric"] = config.quantization_parameters.use_symmetric_quantization # new_node = traced_model.graph.call_function(quantized_tanh, node.args, kwargs) else: - kwargs = {"integer_bits": config["quantization_parameters"]["default_integer_bits"], "bits": bits} + kwargs = {"integer_bits": config.quantization_parameters.default_integer_bits, "bits": bits} # new_node = traced_model.graph.call_function(quantized_relu, node.args, kwargs) # node.replace_all_uses_with(new_node) traced_model.graph.erase_node(node) @@ -416,7 +409,7 @@ def add_quantized_activations_to_model_functional(module, config): def disable_pruning_from_layers(module, config): for name, layer in module.named_modules(): - enable_pruning = name not in config["pruning_parameters"]["disable_pruning_for_layers"] + enable_pruning = name not in config.pruning_parameters.disable_pruning_for_layers if layer.__class__ in [CompressedLayerLinear, CompressedLayerConv2d, CompressedLayerConv1d] and not enable_pruning: layer.enable_pruning = enable_pruning return module @@ -444,8 +437,8 @@ def add_pruning_to_model(module, config): def remove_pruning_from_model_torch(module, config): for name, layer in module.named_children(): if isinstance(layer, CompressedLayerLinear): - if config["pruning_parameters"]["pruning_method"] == "pdp": # Find better solution later - if config["training_parameters"]["pruning_first"]: + if config.pruning_parameters.pruning_method == "pdp": # Find better solution later + if config.training_parameters.pruning_first: weight = layer.weight if layer.enable_pruning: weight = layer.pruning_layer.get_hard_mask(weight) * weight @@ -465,8 +458,8 @@ def remove_pruning_from_model_torch(module, config): if getattr(module, name).bias is not None: getattr(module, name).bias.data.copy_(bias_values.data) elif isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d)): - if config["pruning_parameters"]["pruning_method"] == "pdp": # Find better solution later - if config["training_parameters"]["pruning_first"]: + if config.pruning_parameters.pruning_method == "pdp": # Find better solution later + if config.training_parameters.pruning_first: weight = layer.weight if layer.enable_pruning: weight = layer.pruning_layer.get_hard_mask(weight) * weight @@ -548,28 +541,28 @@ def pre_finetune_functions(model): layer.pruning_layer.pre_finetune_function() -def post_pretrain_functions(model, config, train_loader = None, loss_func=None): +def post_pretrain_functions(model, config, train_loader=None, loss_func=None): - if config["fitcompress_parameters"]["enable_fitcompress"]: + if config.fitcompress_parameters.enable_fitcompress: from pquant.core.torch_impl.fit_compress import call_fitcompress + config, pruning_mask_importance_scores = call_fitcompress(config, model, train_loader, loss_func) - #idx = 0 + # idx = 0 for layer in model.modules(): if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): layer.pruning_layer.post_pre_train_function() layer.post_pre_train_function() - - #layer.pruning_layer.mask = pruning_mask_importance_scores[idx] - #idx += 1 + + # layer.pruning_layer.mask = pruning_mask_importance_scores[idx] + # idx += 1 elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): layer.post_pre_train_function() - if config["pruning_parameters"]["pruning_method"] == "pdp" or ( - config["pruning_parameters"]["pruning_method"] == "wanda" - and config["pruning_parameters"]["calculate_pruning_budget"] + if config.pruning_parameters.pruning_method == "pdp" or ( + config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget ): - # pass + # pass pdp_setup(model, config) @@ -588,7 +581,7 @@ def pdp_setup(model, config): abs_global_weights = torch.abs(global_weights) global_weight_topk, _ = torch.topk(abs_global_weights, abs_global_weights.numel()) - threshold = global_weight_topk[int((1 - config["pruning_parameters"]["sparsity"]) * global_weight_topk.numel())] + threshold = global_weight_topk[int((1 - config.pruning_parameters.sparsity) * global_weight_topk.numel())] global_weights_below_threshold = torch.where(abs_global_weights < threshold, 1, 0) idx = 0 for layer in model.modules(): @@ -661,6 +654,6 @@ def create_default_layer_quantization_pruning_config(model): def add_default_layer_quantization_pruning_to_config_torch(model, config): custom_scheme = create_default_layer_quantization_pruning_config(model) - config["quantization_parameters"]["layer_specific"] = custom_scheme["layer_specific"] - config["pruning_parameters"]["disable_pruning_for_layers"] = custom_scheme["disable_pruning_for_layers"] - return config \ No newline at end of file + config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] + config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] + return config diff --git a/src/pquant/core/torch_impl/fit_compress.py b/src/pquant/core/torch_impl/fit_compress.py index d72adde..53f1f40 100644 --- a/src/pquant/core/torch_impl/fit_compress.py +++ b/src/pquant/core/torch_impl/fit_compress.py @@ -1,1677 +1,1767 @@ +import logging +import math +import random +import string +import typing +import numpy as np import torch import torch.nn as nn -import numpy as np -import random -import string -import math from quantizers import get_fixed_quantizer -quantizer = get_fixed_quantizer(overflow_mode="SAT", round_mode="RND") -import typing -if typing.TYPE_CHECKING: - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, CompressedLayerLinear, CompressedLayerConv2d, QuantizedPooling, QuantizedReLU, QuantizedTanh - - -def call_fitcompress(config, trained_uncompressed_model, train_loader, loss_func): - """ - Calls the path-finding algorithm of FITcompress to find an optimal configuration for quantization - (layer-wise) and pruning (global sparsity value) of weights for the uncompressed network. - - - Args: - config : configuration settings - trained_uncompressed_model : pre-trained, uncompressed model - train_loader : training data loader - loss_func : loss function - - Returns: - config : configuration settings , but with updated pruning sparsity and - layer-wise quantization bits for weights and activations. - - """ - - from pquant.core.torch_impl.compressed_layers_torch import add_layer_specific_quantization_to_model - from pquant.core.compressed_layers import add_compression_layers - - # Set the device - device = "cuda" if torch.cuda.is_available() else "cpu" - - # Check that we have a pruning method active which has a global pruning sparsity target - if config["fitcompress_parameters"]["optimize_pruning"]: - assert config["pruning_parameters"]["pruning_method"] in ["pdp", "wanda"], "Pruning method must be either 'pdp' or 'wanda' if FITcompress should find a global pruning target." - - - def enable_quantization(model): - """ - - Helper function to enable quantization for weights, activations and pooling. - - Args : - model - current model - - Returns : - model - current model with quantization enabled - - """ - - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, QuantizedPooling, QuantizedReLU, QuantizedTanh - - - for m in model.modules(): - if isinstance(m, CompressedLayerBase): - m.enable_quantization = True - if m.__class__ in [QuantizedReLU, QuantizedTanh, QuantizedPooling]: - m.enable_quantization = True - return model - - def add_quantization_settings_to_config(model, quant_info_weights, config, activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits): - """ - - - Helper function to add the found optimal configuration for quantization - regarding weights, activations and - pooling - to the config file. - - - Args : - model - current model - quant_info_weights - the found optimal quantization settings for weights, layerwise - config - current configuration file - activ_int_bits - the found optimal integer bits for activations - activ_frac_bits - the found optimal fractional bits for activations - pool_int_bits - the found optimal integer bits for pooling layer(s) - pool_frac_bits - the found optimal fractional bits for pooling layer(s) - - - Notes : - 1. This logic needs to be changed such that it can work with any model, i.e. by tracing the model beforehand - - - """ - - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerLinear, CompressedLayerConv2d, QuantizedReLU, QuantizedPooling +if typing.TYPE_CHECKING: + from pquant.core.torch_impl.compressed_layers_torch import ( + CompressedLayerBase, + CompressedLayerConv2d, + CompressedLayerLinear, + QuantizedPooling, + QuantizedReLU, + QuantizedTanh, + ) - # Counter for activations - counter = 0 - # Since in config currently a list, but dictionary makes it easier - config["quantization_parameters"]["layer_specific"] = {} - - - for name, layer in model.named_modules(): - - # For weights - if (isinstance(layer, (CompressedLayerLinear, CompressedLayerConv2d))): - config["quantization_parameters"]["layer_specific"][name] = { - "weight": {"integer_bits": quant_info_weights[name][0], "fractional_bits": quant_info_weights[name][1]}, - } - # For activations (in this case only ReLU since we are working on res20) - if layer.__class__ in [QuantizedReLU]: - - config["quantization_parameters"]["layer_specific"][name] = {"integer_bits": activ_int_bits[counter], "fractional_bits": activ_frac_bits[counter]} - counter += 1 - - # NOTE : This is specific to res20 - if layer.__class__ in [QuantizedPooling]: - config["quantization_parameters"]["layer_specific"][name] = {"integer_bits": pool_int_bits, "fractional_bits": pool_frac_bits} - - def print_bits(model): - """ - Print integer bits and fractional bits for all weight layers and activations and pooling layers. - - Args: - model - current model - - """ - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerConv1d, CompressedLayerConv2d, CompressedLayerLinear, QuantizedPooling, QuantizedReLU, QuantizedTanh - - for n,m in model.named_modules(): - if isinstance(m, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): - print(f"Layer {n}: {m.i_weight, m.f_weight} bits") - elif isinstance(m, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): - print(f"Layer {n}: {m.i, m.f} bits") - - - - ## LOAD UNCOMPRESSED MODEL - # Save the this model's state dict (i.e. uncompressed version) - trained_uncompressed_model_state_dict = trained_uncompressed_model.state_dict() - - - print("Starting FITcompress ...") - - # Instantiate FITcompress - fit_compress_computer = FITcompress(model = trained_uncompressed_model, - device = device, - dataloader = train_loader, - criterion = loss_func, - config = config, - layerwise_pruning = False) - - - # Start A* (path-finding through compression space) - optimal_node, quant_prune_config, trained_uncompressed_model, activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits, optimal_node_pruning_mask = fit_compress_computer.astar() - - print("Finished FITcompress") - - # Reset the model's state dict to the uncompressed version (for the next training phases), since FITcompress - # only finds optimal pruning and quantization settings, but shouldn't change the model's weights/quantization settings - trained_uncompressed_model.load_state_dict(trained_uncompressed_model_state_dict) - - - ## SET PRUNING - # Only in PDP and Wanda we have a global pruning sparsity target, which can be found via fitcompress - if config["pruning_parameters"]["pruning_method"] in ["pdp", "wanda"]: - # Create copy of default sparsity - default_sparsity_target = float(config["pruning_parameters"]["sparsity"]) - - # Set the optimal sparsity target for pruning - if config["fitcompress_parameters"]["optimize_pruning"]: - config["pruning_parameters"]["sparsity"] = float(quant_prune_config["pruning_metrics"]["percentage"]) - - # If 0 was found as optimal, set to default sparsity target - if config["pruning_parameters"]["sparsity"] == 0: - # Set to the previous default value - config["pruning_parameters"]["sparsity"] = default_sparsity_target - - - ## SET QUANTIZATION - # Enable quantization for the model - if config["quantization_parameters"]["enable_quantization"]: - trained_uncompressed_model = enable_quantization(trained_uncompressed_model) - - if config["fitcompress_parameters"]["optimize_quantization"]: - # Set layer specific quantization in config file - add_quantization_settings_to_config(trained_uncompressed_model, quant_prune_config["quant_config"], config, activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits) - # Now add the layer specific configuration to the model - add_layer_specific_quantization_to_model(trained_uncompressed_model, config) +quantizer = get_fixed_quantizer(overflow_mode="SAT", round_mode="RND") +def call_fitcompress(config, trained_uncompressed_model, train_loader, loss_func): + """ + Calls the path-finding algorithm of FITcompress to find an optimal configuration for quantization + (layer-wise) and pruning (global sparsity value) of weights for the uncompressed network. - ## PRINT OF NEW FOUND OPTIMAL PRUNING + QUANTIZATION SETTINGS - if config['fitcompress_parameters']['optimize_pruning']: - print("Pruning Sparsity after FITcompress : ", config["pruning_parameters"]["sparsity"]) + Args: + config : configuration settings + trained_uncompressed_model : pre-trained, uncompressed model + train_loader : training data loader + loss_func : loss function - print("Layerwise quantization bits after FITcompress : ", config["quantization_parameters"]["layer_specific"]) + Returns: + config : configuration settings , but with updated pruning sparsity and + layer-wise quantization bits for weights and activations. - print_bits(trained_uncompressed_model) + """ + from pquant.core.torch_impl.compressed_layers_torch import ( + add_layer_specific_quantization_to_model, + ) + + # Set the device + device = "cuda" if torch.cuda.is_available() else "cpu" + + # Check that we have a pruning method active which has a global pruning sparsity target + if config.fitcompress_parameters.optimize_pruning: + assert config.pruning_parameters.pruning_method in [ + "pdp", + "wanda", + ], "Pruning method must be either 'pdp' or 'wanda' if FITcompress should find a global pruning target." + + def enable_quantization(model): + """ + + Helper function to enable quantization for weights, activations and pooling. + + Args : + model - current model + + Returns : + model - current model with quantization enabled + + """ + for m in model.modules(): + if isinstance(m, CompressedLayerBase): + m.enable_quantization = True + if m.__class__ in [QuantizedReLU, QuantizedTanh, QuantizedPooling]: + m.enable_quantization = True + return model + + def add_quantization_settings_to_config( + model, quant_info_weights, config, activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits + ): + """ + + + Helper function to add the found optimal configuration for quantization - regarding weights, activations and + pooling - to the config file. + + + Args : + model - current model + quant_info_weights - the found optimal quantization settings for weights, layerwise + config - current configuration file + activ_int_bits - the found optimal integer bits for activations + activ_frac_bits - the found optimal fractional bits for activations + pool_int_bits - the found optimal integer bits for pooling layer(s) + pool_frac_bits - the found optimal fractional bits for pooling layer(s) + + + Notes : + 1. This logic needs to be changed such that it can work with any model, i.e. by tracing the model beforehand + + + """ + + from pquant.core.torch_impl.compressed_layers_torch import ( + CompressedLayerConv2d, + CompressedLayerLinear, + QuantizedPooling, + QuantizedReLU, + ) + + # Counter for activations + counter = 0 + # Since in config currently a list, but dictionary makes it easier + config.quantization_parameters.layer_specific = {} + + for name, layer in model.named_modules(): + + # For weights + if isinstance(layer, (CompressedLayerLinear, CompressedLayerConv2d)): + config.quantization_parameters.layer_specific[name] = { + "weight": {"integer_bits": quant_info_weights[name][0], "fractional_bits": quant_info_weights[name][1]}, + } + # For activations (in this case only ReLU since we are working on res20) + if layer.__class__ in [QuantizedReLU]: + + config.quantization_parameters.layer_specific[name] = { + "integer_bits": activ_int_bits[counter], + "fractional_bits": activ_frac_bits[counter], + } + counter += 1 + + # NOTE : This is specific to res20 + if layer.__class__ in [QuantizedPooling]: + config.quantization_parameters.layer_specific[name] = { + "integer_bits": pool_int_bits, + "fractional_bits": pool_frac_bits, + } + + def print_info_bits(model): + """ + logging.info integer bits and fractional bits for all weight layers and activations and pooling layers. + + Args: + model - current model + + """ + from pquant.core.torch_impl.compressed_layers_torch import ( + CompressedLayerConv1d, + CompressedLayerConv2d, + CompressedLayerLinear, + QuantizedPooling, + QuantizedReLU, + QuantizedTanh, + ) + + for n, m in model.named_modules(): + if isinstance(m, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + logging.info(f"Layer {n}: {m.i_weight, m.f_weight} bits") + elif isinstance(m, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): + logging.info(f"Layer {n}: {m.i, m.f} bits") + + # Save the this model's state dict (i.e. uncompressed version) + trained_uncompressed_model_state_dict = trained_uncompressed_model.state_dict() + + logging.info("Starting FITcompress ...") + + # Instantiate FITcompress + fit_compress_computer = FITcompress( + model=trained_uncompressed_model, + device=device, + dataloader=train_loader, + criterion=loss_func, + config=config, + layerwise_pruning=False, + ) + + # Start A* (path-finding through compression space) + ( + optimal_node, + quant_prune_config, + trained_uncompressed_model, + activ_int_bits, + activ_frac_bits, + pool_int_bits, + pool_frac_bits, + optimal_node_pruning_mask, + ) = fit_compress_computer.astar() + + logging.info("Finished FITcompress") + + # Reset the model's state dict to the uncompressed version (for the next training phases), since FITcompress + # only finds optimal pruning and quantization settings, but shouldn't change the model's weights/quantization settings + trained_uncompressed_model.load_state_dict(trained_uncompressed_model_state_dict) + + # Only in PDP and Wanda we have a global pruning sparsity target, which can be found via fitcompress + if config.pruning_parameters.pruning_method in ["pdp", "wanda"]: + # Create copy of default sparsity + default_sparsity_target = float(config.pruning_parameters.sparsity) + + # Set the optimal sparsity target for pruning + if config.fitcompress_parameters.optimize_pruning: + config.pruning_parameters.sparsity = float(quant_prune_config["pruning_metrics"]["percentage"]) + + # If 0 was found as optimal, set to default sparsity target + if config.pruning_parameters.sparsity == 0: + # Set to the previous default value + config.pruning_parameters.sparsity = default_sparsity_target + + # Enable quantization for the model + if config.quantization_parameters.enable_quantization: + trained_uncompressed_model = enable_quantization(trained_uncompressed_model) + + if config.fitcompress_parameters.optimize_quantization: + # Set layer specific quantization in config file + add_quantization_settings_to_config( + trained_uncompressed_model, + quant_prune_config["quant_config"], + config, + activ_int_bits, + activ_frac_bits, + pool_int_bits, + pool_frac_bits, + ) + # Now add the layer specific configuration to the model + add_layer_specific_quantization_to_model(trained_uncompressed_model, config) + + if config.fitcompress_parameters.optimize_pruning: + logging.info("Pruning Sparsity after FITcompress : ", config.pruning_parameters.sparsity) + + logging.info("Layerwise quantization bits after FITcompress : ", config.quantization_parameters.layer_specific) + + print_info_bits(trained_uncompressed_model) + + return config, optimal_node_pruning_mask - return config, optimal_node_pruning_mask class node: - def __init__(self, matrices_params_layerwise, FeM, quant_config,pruning_metrics,gscore,fscore, full_dist, state, curr_compression_rate, unquantized_weights, int_bits, frac_bits): - - """ - Setup a node (i.e. a current model) in the compression space ; we can then go from node-to-node in - the compression space. - - Args: - matrices_params_layerwise - current parameters of the model, layerwise - FeM - FeM, layerwise - quant_config - the quantization config to use, layerwise (UPDATED VIA QUANTIZATION SCHEDULE!) //part of the configuration c in paper - pruning_metrics - the pruning metrics used based on the pruning method (sparsity, only for PDP and Wanda) - gscore - the current score of g (as in paper) , i.e. path cost/distance between initial model and current model - fscore - the current score of f (as in paper), i.e. heuristic cost/distance between current model and goal model - full_dist - the full distance : gscore + lambda * fscore (as in paper) - state - Describes the current "state" of the model (this is important for the schedulers to know what value to use) - compression - The current compression rate (alpha_j in the papers pseudo code) of the model - unquantized_weights - refers to the original weights of the model that are only affected by pruning. These are needed such that we can always quantize based on 32 bit values and not loose - precision. - int_bits - the current integer bits for quantization of the weights, layerwise - frac_bits - the current fractional bits for quantization of the weights, layerwise - """ - - self.parameters = matrices_params_layerwise - self.FeM = FeM - self.quant_config = quant_config - self.pruning_metrics = pruning_metrics - self.gscore = gscore - self.fscore = fscore - self.full_dist = full_dist - self.state = state - self.curr_compression_rate = curr_compression_rate - self.unquantized_weights = unquantized_weights - self.int_bits = int_bits - self.frac_bits = frac_bits - self.key = ''.join(random.choices(string.ascii_uppercase + string.digits, k=20)) - - def extract_config_from_node(self, layer_names): - """ - Extract the quantization and pruning configuration from the node. - This is used to get the configuration of the model after the path finding process. - - Returns : - config - A dictionary containing the quantization and pruning configuration of the node. - """ - - assert len(self.quant_config) == len(layer_names), "Quantization config length does not match number of layers" - - # Create a dictionary to store the quantization config w.r.t layer names - quant_config = {layer_name: [i_bits, f_bits] for layer_name, i_bits, f_bits in zip(layer_names, self.int_bits, self.frac_bits)} - - config = { - 'quant_config': quant_config, - 'pruning_metrics': self.pruning_metrics - } - - return config - -class FITcompress: - - def __init__(self, model, device, dataloader, criterion, config, layerwise_pruning = False): - - """ - Calculating intial EF of uncompressed model and set up quantization & pruning schedules as - well as the initial node in the compression space. - - - Notes : - 1. We use the "FeM" value for FIT calculation (as in original code) and not directly the EF trace (as in the paper). - - - Args : - model : Pre-trained model (uncompressed, but loaded with compression layers) - device : Device to use for the model - dataloader : Dataloader for data - criterion : loss function (Cross-Entropy) - config : config - layerwise-pruning : Whether or not to find layerwise pruning targets (NOTE : this has not been tested) - - Config Args : - compression_goal : desired compression constraint alpha in paper - quantization_schedule : the bits that are to be used for quantization during search through compression space - pruning_schedule (start,end, steps) : starting and end-point for the scheduler as well as how many steps it should take - """ - self.model = model - self.device = device - self.dataloader = dataloader - self.criterion = criterion - self.config = config - self.layerwise_pruning = layerwise_pruning - - - # Initialize an instance of the FIT class based on the uncompressed model. - # This marks which weights & activations can be pruned/quantized. - # We can then reuse this instance and its corresponding .get_EF() function - # and get_FIT() functions, passing the appropriate empirical Fisher traces. - self.fit_computer = FIT(self.model, self.device, input_spec = (3,32,32)) - - # Calculate the EF trace of the uncompressed model (i.e. initial EF trace), only based on weights - self.FeM, self.EF_trace_params_layerwise_uncompressed ,_,_,_ = self.fit_computer.get_EF(self.model, self.dataloader, self.criterion, tolerance = 0.01, min_iterations = 100, max_iterations = 100) - - # Get the number of layers in the model - self.n_layers = len(self.EF_trace_params_layerwise_uncompressed) - - # Get the current model weights - matrices_params_layerwise ,_, self.layer_names = self.fit_computer.get_model_weights(self.model) - - # Store a copy of the original weights - import copy - self.original_matrices_params_layerwise = copy.deepcopy(matrices_params_layerwise) - - ## SET COMPRESSION CONSTRAINT - self.compression_goal = config['fitcompress_parameters']['compression_goal'] - ## SET QUANTIZATION SCHEDULER - self.quant_schedule = config['fitcompress_parameters']['quantization_schedule'] - ## SET PRUNING SCHEDULER - self.pruning_schedule = 1-np.logspace(config['fitcompress_parameters']['pruning_schedule']['start'], \ - config['fitcompress_parameters']['pruning_schedule']['end'],\ - base=10, num=config['fitcompress_parameters']['pruning_schedule']['steps']) - - # for N:M pruning in Wanda, use 50% pruning cap during FITcompress - if (self.config['pruning_parameters']['pruning_method'] == 'wanda' and type(self.config['pruning_parameters']['N']) is int): - self.pruning_schedule = 0.5 * (1-np.logspace(config['fitcompress_parameters']['pruning_schedule']['start'], \ - config['fitcompress_parameters']['pruning_schedule']['end'],\ - base=10, num=config['fitcompress_parameters']['pruning_schedule']['steps'])) - - - # Dictionary structure allows us to possibly iterate over multiple different pruning metrics - # but currently only one as in FITcompress, the target pruning sparsity, i.e. percentage - pruning_metrics = {'percentage' : 0} - - # If we want to find sparsity targets per layer (not part of FITcompress paper) - if layerwise_pruning: - self.pruning_schedulers_layerwise = self.get_pruning_schedulers_layer_specific(matrices_params_layerwise, None, mode = 'fit') - # Add the layer-specific starting pruning percentages to the current metric - pruning_metrics = pruning_metrics | {f'{self.layer_names[i]}_percentage' : 0 for i in range(self.n_layers)} - - - # Initialize the first node in the compression space - self.initial_node = node( - matrices_params_layerwise = matrices_params_layerwise, - FeM = self.FeM.copy(), - quant_config = [31 for _ in range(self.n_layers)], # 31 bits for each layer (since one goes to sign) - pruning_metrics = pruning_metrics, # Initial pruning defined in config.yaml - gscore = 0.0, # Initial gscore - fscore = np.inf, # Initial fscore - full_dist = np.inf, # Initial full distance (gscore + lambda * fscore) - # -1 such that 0 will be the first for first neighbouring nodes - state = [-1 for _ in range(self.n_layers)] + [0], # Initial state for all layers (for quantization) and one for global pruning - curr_compression_rate = 1.0, # Initial compression rate (no compression yet) - unquantized_weights = self.original_matrices_params_layerwise, # Unquantized weights - int_bits = [0 for _ in range(self.n_layers)], # Initial integer bits (although this could be set to any number, just need the list structure) - frac_bits = [31 for _ in range(self.n_layers)] # Initial fractional bits ((although this could be set to any number, just need the list structure)) - - ) - - # Intialize a list to store nodes that can be traversed during the path finding process - self.potential_nodes = [self.initial_node] - - def get_pruning_schedulers_layer_specific(self, matrices_params_layerwise, global_sparsity_scheduler, mode = 'fit'): - """ - Calculates layer-specific pruning schedulers. The idea is that layers with weights - that are not that much affected by pertubation should be pruned more/faster than layers with weights - that are more affected by pertubation. - The "speed" of schedulers are calculated depending on the mode. - For mode 'fit' : via the layer-wise FIT score of the initial - model. - - Returns : - List of pruning schedulers (one per layer) - - """ - - schedulers = {} - if mode == 'fit': - # Get the layer-wise FIT scores of the initial model - _, FIT_layerwise = self.fit_computer.get_FIT_old(FeM = self.FeM, params_after = matrices_params_layerwise, same_theta = True) - - # For each layer, first take the sum over the FIT values (of each weight) - FIT_layerwise_summed = [torch.sum(layer).item() for layer in FIT_layerwise] - # Find min and max importance - min_importance = min(FIT_layerwise_summed) - max_importance = max(FIT_layerwise_summed) - - ## SCHEDULER CREATION - for layer_idx, importance in enumerate(FIT_layerwise_summed): - - # Scale importance between 0 and 1 - importance_ratio = (importance - min_importance) / (max_importance - min_importance) - - - # Decides "strength" of difference between flattest/steepest curves (the higher the value, the more difference) - scale_factor = 5 - - # The higher the importance score, the bigger the exponent, the flatter the curve - exponent = scale_factor * importance_ratio - - scheduler_curve = [] - for step in range(40): - # Normalize to [0,1] range - t = step / 39 - pruning_at_step = t ** exponent - scheduler_curve.append(pruning_at_step) - - schedulers[self.layer_names[layer_idx]] = np.array(scheduler_curve) - - # Adjust each layer-specific scheduler such that each step's sum of all layer-specific scheduler values equals the global sparsity scheduler value at that step - if global_sparsity_scheduler is not None: - for step in range(len(global_sparsity_scheduler)): - if step == 0: # So we do not get a problem with division by 0 - continue - # Calculate the sum of all layer-specific scheduler values at that step - sum_layer_schedulers = sum(curr_scheduler[step] for curr_scheduler in schedulers.values()) - # Scale each layer-specific scheduler value at that step such that the sum equals the global sparsity scheduler value at that step - for layer_name in self.layer_names: - schedulers[layer_name][step] *= global_sparsity_scheduler[step] / sum_layer_schedulers - - assert np.isclose(sum(curr_scheduler[step] for curr_scheduler in schedulers.values()), global_sparsity_scheduler[step]), f"Sum of layer-specific schedulers at step {step} does not match global sparsity scheduler value {global_sparsity_scheduler[step]}" - - - return schedulers - - def assign_parameters(self,model, params): - """ - Update model's actual parameters with new values from the compression search process. - - Args: - model : The model to update the parameters of. - params : Array of new parameters to assign to the model's layers. - - Notes: - 1. This only deals with weights. We do not look at any activations & bias, as - this is not done in the original code. - """ - - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, CompressedLayerLinear, CompressedLayerConv2d, QuantizedPooling, QuantizedReLU, QuantizedTanh - - - i = 0 - for name, module in model.named_modules(): - if (isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d))): - for name_param, matrix_param in list(module.named_parameters()): - if name_param.endswith('weight'): - matrix_param.data = nn.parameter.Parameter(params[i].to(self.device)) - matrix_param.collect = True - i+=1 - - def add_quantization(self, model, params, quant_config, reset = False): - """ - Quantizes weights of the model based on fixed-point quantization. Given the - bit-width of the current quantization configuration of a layer, bits are - distributed between integer and fractional part based on the maximum absolute - value of the weights of that layer. - - Args : - model : The model to quantize - params : List of unquantized (but possibly pruned) parameters of the model, layerwise - quant_config : List of current quantization configuration, layerwise - reset : Flag that allows to quantize based on unpruned weights - - Returns : - neighbour_node_parameters_layerwise : List of quantized parameters of the model, layerwise - all_int_bits : List of integer bits for parameters, layerwise - all_frac_bits : List of fractional bits for parameters, layerwise - - """ - - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, CompressedLayerLinear, CompressedLayerConv2d, QuantizedPooling, QuantizedReLU, QuantizedTanh - - - neighbour_node_parameters_layerwise = [] - module_f_weights = [] - module_i_weights = [] - all_int_bits = [] - all_frac_bits = [] - - - # Get integer & fractional bits based on max abs value of each layer's weights - for idx, param_layer in enumerate(params): - max_abs = torch.max(torch.abs(param_layer.detach().cpu())) - eps = 1e-12 - int_bits = max(0, math.ceil(math.log2(max_abs + eps))) - fractional_bits = quant_config[idx] - int_bits - - all_int_bits.append(int_bits) - all_frac_bits.append(fractional_bits) - - - for idx, param_layer in enumerate(params): - # If reset is inactive, we quantize weights given the unquantized, but possibly pruned weights - # of the current node - # Note that this doesn't quantize the actual module, but just returns us the weights - if not reset: - new_weight = quantizer(param_layer, k=torch.tensor(1.0), i= torch.tensor(all_int_bits[idx]), f = torch.tensor(all_frac_bits[idx]), training=True) - - # If reset active, we quantized based on unpruned weights - else: - new_weight = quantizer(self.original_matrices_params_layerwise[idx], k=torch.tensor(1.0), i= torch.tensor(all_int_bits[idx]) , f = torch.tensor(all_frac_bits[idx]), training=True) # module_f_weights[idx] - - neighbour_node_parameters_layerwise.append(new_weight) - - - return neighbour_node_parameters_layerwise, all_int_bits, all_frac_bits - - def add_pruning(self,current_node, params, importance_score, pruning_percentage): - """ - Unstructured, global pruning based on a pruning percentage. - This provides the pruning mask P as described in the FITCompress paper. - - Notes : - 1. Pruning importance scores are calculated based on the node that - we are currently searching neighbours for : We prune the current node's weights - to create the neighbour node based on pruning. - - Args : - current_node : The current best model - params : parameters after quantization - importance_score : importance scores for each weight of the current best model - pruning_percentage : The percentage of weights to prune from the current node's weights. - - Returns : - current_node_matrices_params_layerwise : List of pruned parameters, layerwise. - current_node_matrices_unquantized_params_layerwise : List of unquantized pruned parameters, layerwise. - """ - - # Get shape of each layers weight matrix - current_node_matrices_params_shapes_layerwise = [layer.shape for layer in params] - # Get total number of each layers weights - current_node_matrices_params_numel_layerwise = [layer.numel() for layer in params] - # Get cumulative sum of weights per layer (for indexing purposes) - current_node_matrices_params_cumsum_layerwise = list(np.cumsum(current_node_matrices_params_numel_layerwise)) - # Add 0 at the beginning for easier indexing - current_node_matrices_params_cumsum_layerwise.insert(0,0) - - # Flatten parameters & importance scores and concatenate to have single vectors containing everything - current_node_params_flat = torch.cat([layer.view(-1) for layer in params]).detach().cpu() - # Same for importance scores - current_node_importance_scores_flat = torch.cat([layer.view(-1) for layer in importance_score]).detach().cpu() - - # Also create an instance for the unquantized weights - current_node_unquantized_params_flat = torch.cat([layer.view(-1) for layer in current_node.unquantized_weights]).detach().cpu() - - # Calculate the number of parameters to prune (percentage * nums of all parameters in the model) - num_params_to_prune = int(pruning_percentage * len(current_node_params_flat)) - # Based on the negative importance scores of all the weights, find the indices of weights with the - # smallest importance scores (closest to 0) (i.e. the ones that are not affected by pertubation as much) - _, indices_params_to_prune = torch.topk(-current_node_importance_scores_flat, num_params_to_prune) - # Set those parameters/weights to 0 - current_node_pruned_params_flat = torch.scatter(current_node_params_flat, -1, indices_params_to_prune, 0.) - - # Also for unquantized weights - current_node_pruned_unquantized_params_flat = torch.scatter(current_node_unquantized_params_flat, -1, indices_params_to_prune, 0.) - - # Now reconstruct the correct shape - current_node_matrices_params_layerwise = [] - - for i in range(self.n_layers): - current_node_matrices_params_layerwise.append(torch.reshape(current_node_pruned_params_flat[current_node_matrices_params_cumsum_layerwise[i]:current_node_matrices_params_cumsum_layerwise[i+1]], current_node_matrices_params_shapes_layerwise[i])) - - current_node_matrices_unquantized_params_layerwise = [] - for i in range(self.n_layers): - current_node_matrices_unquantized_params_layerwise.append(torch.reshape(current_node_pruned_unquantized_params_flat[current_node_matrices_params_cumsum_layerwise[i]:current_node_matrices_params_cumsum_layerwise[i+1]], current_node_matrices_params_shapes_layerwise[i])) - - # Put everything on GPU again - current_node_matrices_params_layerwise = [layer.to(self.device) for layer in current_node_matrices_params_layerwise] - current_node_matrices_unquantized_params_layerwise = [layer.to(self.device) for layer in current_node_matrices_unquantized_params_layerwise] - - - return current_node_matrices_params_layerwise, current_node_matrices_unquantized_params_layerwise - - def add_pruning_layer_specific(self, current_node, pruning_metrics, layer_idx = None): - """ - NOTE : IDEA, UNTESTED - The idea of this function is to find pruning sparsity targets for each layer, instead of - globally for the whole network. This could be used to e.g. set init sparsity targets for each layer - for PDP. - - The actual pruning that is done per layer is importance pruning based on the FIT value (just now, - using the FIT of the corresponding layer). - - - Notes : - 1. There are two ways how I could see this being used : - A. Either we create a new node now for each possible pruning layer, similar how it was done in quantization ; - this would be an expensive process, since for pruning, atleast according to the paper's theory, the FIT - would need to be recalculated each time - B. We only create one new node for pruning, but instead of simple global pruning on all parameters, we take - into account some pruning sparsity target for each layer (could e.g. come from PDP_setup(), but also using - e.g. FIT(theta_i,theta_i) of each layer), and prune per layer based on that current pruning sparsity target. - - """ - - ### B - - - ## CALCULATION IMPORTANCE SCORES - # As in the original code, the importance scores are calculated - # as FIT(theta_i, theta_i) of the current model, i.e. the current node. - _, FIT_layerwise = self.fit_computer.get_FIT_old(FeM = current_node.FeM, params_after = current_node.parameters, same_theta = True) - - # Get shape of each layers weight matrix - current_node_matrices_params_shapes_layerwise = [layer.shape for layer in current_node.parameters] - # Flatten parameters & importance scores - current_node_params_flat_layerwise = [layer.view(-1).detach().cpu() for layer in current_node.parameters] - # Same for importance scores - current_node_importance_scores_flat_layerwise = [layer.view(-1).detach().cpu() for layer in FIT_layerwise] - - - current_node_matrices_params_layerwise = [] - # Now iterate through all layers - for idx,curr_pruning_percentage in enumerate(pruning_metrics.values()): - - if idx == 0: # Global pruning percentage - continue - - - # Calculate number of parameters to prune in this layer, based on the current pruning percentage - num_params_to_prune = int(curr_pruning_percentage * len(current_node_params_flat_layerwise[idx - 1])) - - # Based on the negative importance scores of all the weights, find the indices of weights with the - # smallest importance scores (closest to 0) (i.e. the ones that are not affected by pertubation as much) - _, indices_params_to_prune = torch.topk(-current_node_importance_scores_flat_layerwise[idx - 1], num_params_to_prune) - # Set those parameters/weights to 0 - current_node_pruned_params_flat = torch.scatter(current_node_params_flat_layerwise[idx - 1], -1, indices_params_to_prune, 0.) - # Now reconstruct the correct shape - current_node_matrices_params_layerwise.append(torch.reshape(current_node_pruned_params_flat, current_node_matrices_params_shapes_layerwise[idx - 1])) - - # Put everything on GPU again - current_node_matrices_params_layerwise = [layer.to(self.device) for layer in current_node_matrices_params_layerwise] - - - - return current_node_matrices_params_layerwise - - def post_fitcompress_calibration(self, best_node_quant_config, calibration_epochs=50): - """ - After the path-finding process, we need to find the optimal integer and fractional bits for activations, - pooling layers and inputs into the model. This is done based on forward passes through the quantized, but - unpruned model (i.e. quantization settings for weights found during FITcompress, but not applying - the optimal pruning sparsity), usinge some calibration epochs. - Allocation of integer and fractional bits for activations are calculated by taking the maximum absolute value - of the inputs to the activation units during the calibration passes and then distributing integer and fractional bits - based on that value. The total bit-width is given by the corresponding weight layer (i.e. the optimal bit-width that - FITcompress found for that weight layer) of that activation unit. - Allocation of integer and fractional bits for pooling layers and inputs is done similarly, but based on the inputs to the - pooling layers and model, respectively. Furthermore, as these modules don't have any corresponding weight layer, the bit-width - is set to 7 bits (1 bit goes to sign). - - Args: - best_node_quant_config: FITcompress found quantization configuration for each weight layer - calibration_epochs: The number of calibration epochs to run. - - Returns: - activ_int_bits - Number of integer bits for each activation unit - activ_frac_bits - Number of fractional bits for each activation unit - pool_int_bits - Number of integer bits for the only pooling layer in res20 - pool_frac_bits - Number of fractional bits for the only pooling layer in res20 - - - """ - from pquant.core.torch_impl.compressed_layers_torch import QuantizedReLU, QuantizedPooling - - # To avoid numerical issues - eps = 1e-12 - # Store input data, as we also need to quantize input (which is currently done in resnet.py of pquant-dev) - data_input = [] - - # Set post calibration in QuantizedReLU + QuantizedPooling to true ; means we collect quantized inputs into activation functions now - # Note that after the pre_training flag is set to False, post_fitcompress_calibration is not used anywhere else, therefore it is not - # set to False anywhere and can stay True - for m in self.model.modules(): - if m.__class__ in [QuantizedReLU, QuantizedPooling]: - m.post_fitcompress_calibration = True - - - # Trigger forward pass through model - self.model.eval() - counter = 0 - - for num_mini_batch, data in enumerate(self.dataloader): - while counter < calibration_epochs: - self.model.zero_grad() - data_batch, target_batch = data[0].to(self.device), data[1].to(self.device) - data_input.append(data_batch) - _ = self.model(data_batch) - counter += 1 - - - # Get ranges of activation inputs - activation_ranges = [] - # Access the inputs to the ReLU - for name,m in self.model.named_modules(): - if m.__class__ in [QuantizedReLU]: - # Average over calibration data - avg_relu = torch.stack(m.saved_inputs, dim = 0).mean(dim = 0) - # Now get the activation range - range_relu = (avg_relu.min().item(), avg_relu.max().item()) - activation_ranges.append((name,range_relu)) - - - # Get ranges of data input - avg_inputs = torch.stack(data_input, dim = 0).mean(dim = 0) - range_inputs = (avg_inputs.min().item(), avg_inputs.max().item()) - - # Get ranges of pooling layer input(s) - activation_ranges_pool = [] - # And for the pooling layer (specific to res20) - for m in self.model.modules(): - if m.__class__ in [QuantizedPooling]: - # Average over calibration data - avg_pool = torch.stack(m.saved_inputs, dim = 0).mean(dim = 0) - # Now get the activation range - range_pool = (avg_pool.min().item(), avg_pool.max().item()) - activation_ranges_pool.append(range_pool) - - - - activ_int_bits = [] - activ_frac_bits = [] - for idx, (name,layer) in enumerate(activation_ranges): - max_abs = np.abs(np.max(layer))#np.abs(layer[1]) - # Find the corresponding quant config of the weight layer that belongs to this activation unit - try: - curr_quant_config = best_node_quant_config[name.replace("relu", "conv")] - except KeyError: - curr_quant_config = best_node_quant_config["conv1"] - - # curr_quant_config[0] : integer bits of weights, curr_quant_config[1] : fractional bits of weights - int_bits = (curr_quant_config[0] + curr_quant_config[1] + 1) if max(0, math.ceil(math.log2(max_abs + eps))) > (curr_quant_config[0] + curr_quant_config[1] + 1) else max(0, math.ceil(math.log2(max_abs + eps))) - activ_int_bits.append(int_bits) - # + 1 since ReLUs don't need the sign bit - frac_bits = (curr_quant_config[0] + curr_quant_config[1] + 1) - int_bits - activ_frac_bits.append(frac_bits) - - # Same logic for data input (using 7 bits as standard, 1 goes to sign) - max_abs_input = np.abs(np.max(range_inputs))#np.abs(range_inputs[1]) - int_bits_input = (7) if max(0, math.ceil(math.log2(max_abs_input + eps))) > (7) else max(0, math.ceil(math.log2(max_abs_input + eps))) - frac_bits_input = (7) - int_bits_input - - # Same logic for pooling layer (using 7 bits as standard, 1 goes to sign) ; just one pooling layer in res20 - for idx, layer in enumerate(activation_ranges_pool): - max_abs = np.abs(np.max(layer))#np.abs(layer[1]) - int_bits = (7) if max(0, math.ceil(math.log2(max_abs + eps))) > (7) else max(0, math.ceil(math.log2(max_abs + eps))) - pool_int_bits = int_bits - frac_bits = (7) - int_bits - pool_frac_bits = frac_bits - - - print("SET INT BITS INPUT:",int_bits_input, " SET FRAC BITS INPUT:", frac_bits_input) - print(f"INT BITS POOLING: {pool_int_bits}, FRAC BITS POOLING: {pool_frac_bits}") - - return activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits - - def astar(self): - """ - The actual search algorithm of FITcompress, which is based on the A* algorithm. - Find either the node that has an optimal configuration (i.e. compression rate lower than the goal) and break or find - the node with the lowest distance between initial & optimal model among the potential best nodes. - - Config Args : - greedy_astar : If set to True, remove all other current neighbour nodes and only keep the current best node (i.e. remove A* fallback mechanism) - - Returns : - A node descriptor of the best node found in the compression space, i.e. the current model with the best configuration and - the found quantization/pruning configuration - - """ - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, QuantizedPooling, QuantizedReLU, QuantizedTanh - iterations = 0 - while len(self.potential_nodes) > 0 and iterations < 1000: - print(f'Iteration : {iterations} ') - - next_best_node = None - - print(f"Finding the next best node among the {len(self.potential_nodes)} neighbour nodes...") - # Iterate through all potential next nodes to visit in the compression space - for p_node in self.potential_nodes: - # If we find a node with wanted compression rate, we can return it and stop the A* algorithm - if p_node.curr_compression_rate < self.compression_goal: - - print(f"Optimal node found with full distance {p_node.full_dist}, compression rate {p_node.curr_compression_rate}, quantization config {p_node.quant_config} and pruning metrics {p_node.pruning_metrics}") - - - # Based on the unquantized, but pruned weights, get the pruning mask that was applied based on the importance scores, layerwise - p_node_pruning_mask_layerwise = [(p_node.unquantized_weights[i] != 0).float() for i in range(self.n_layers)] - - - # Reset model's pruning (i.e. remove pruning), but keep quantization, such that we can do post fitcompress calibration - params_quantized_unpruned, _, _ = self.add_quantization( - model = self.model, - params = p_node.unquantized_weights.copy(), - quant_config = p_node.quant_config.copy(), - reset = True - ) - - self.assign_parameters(self.model, params_quantized_unpruned) - - - activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits = self.post_fitcompress_calibration(p_node.extract_config_from_node(self.layer_names)['quant_config']) - - return p_node, p_node.extract_config_from_node(self.layer_names), self.model, activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits, p_node_pruning_mask_layerwise - - - - # Find the node with lowest distance between initial & optimal model - if next_best_node is None or p_node.full_dist < next_best_node.full_dist: - next_best_node = p_node - - print(f"Next best node found with full distance {next_best_node.full_dist}, compression rate {next_best_node.curr_compression_rate}, quantization config {next_best_node.quant_config} and pruning metrics {next_best_node.pruning_metrics}") - - # Keep only the found next best neighbouring node to our current node and remove all other potential nodes in greedy search - if self.config['fitcompress_parameters']['greedy_astar']: - self.potential_nodes = [next_best_node] - - # After the next best node was found, set the model parameters - self.assign_parameters(self.model, next_best_node.parameters.copy()) - - # Now that we found the next best neighbouring node to our current best node, we can start exploring its neighbours. - # The other neighbouring nodes, if not greedy_astar, are still in the potential nodes list (this is the A* fallback - # mechanism, i.e. they might still get explored if our greedy path doesn't lead to an optimal solution) - self.create_neighbours(next_best_node) - - iterations += 1 - - def create_neighbours(self, current_node): - """ - Create the neighbours of the current node in the compression space that are then again - explored with the A* algorithm (i.e. astar()). - In particular, this function generates the neighbours quantization/pruning settings based on the schedule and - current node's state, applies quantization or pruning and then passes the new parameters to the create_new_node() function. - - We create L new nodes, where L refers to the # of layers, for quantization settings per layer - and 1 for global unstructured pruning, based on the importance scores calculed as FIT(theta_i, theta_i) - - Args : - current_node : The current node in the compression space, i.e. the current model. - Config Args : - use_quantization : If set to True, we create neighbours based on quantization settings. - use_pruning : If set to True, we create neighbours based on pruning settings. - approximate : If set to True, we use the previous FeM to calculate FIT values for Quantization (in Paper : Computational Details section) - - Notes : - 1. In the original code, during creation of neighbours based on quantization we also prune (and vice versa). - Since this is not coherent with the paper's theory, I implemented it such that we either quantize or prune - when creating the neighbours. - 2. For the approximate flag, we use the previous FeM (i.e. of the current best node) to calculate the FIT values - for quantization of the neighbour nodes. According to the paper, this is done only for the quantization part, but - not for pruning. In the original code, it was also used for pruning, but in this implementation we do not use it for pruning. - """ - - if self.config['fitcompress_parameters']['approximate']: - - # Update FeM for the best node and use it when creating the neighbours for quantization. - # This leads to num_layers less FIT calculations, as we do not need to calculate the FeM again, - # which reduces runtime - # Assign the current parameters to the model (i.e. the ones of the current node) - self.assign_parameters(self.model, current_node.parameters) - # Calculate the FeM of the current model - curr_FeM, _, _, _, _ = self.fit_computer.get_EF(self.model, self.dataloader, self.criterion, min_iterations = 100, max_iterations = 100) - # Set the current node's FeM to the calculated one - current_node.FeM = curr_FeM.copy() - - - # Calculate importance score of the current node (FIT(theta_i, theta_i)) - _, FIT_layerwise = self.fit_computer.get_FIT_old(FeM = current_node.FeM, params_after = current_node.parameters, same_theta = True) - - current_node_state = current_node.state.copy() - - print("Current node states for quantization & pruning: ", current_node_state) - - ### CREATING NEIGHBOUR NODES BASED ON QUANTIZATION PER LAYER ### - if self.config['fitcompress_parameters']['optimize_quantization']: - for layer_idx in range(self.n_layers): - - # Set neighbour state to current state - neighbour_node_state = current_node_state.copy() - - # Check that we do not go out of bounds ; if the scheduler ends, we don't create a new node for this layer anymore - if neighbour_node_state[layer_idx] < len(self.quant_schedule) -1: - # Move one step forward in the state for the current layer - neighbour_node_state[layer_idx] += 1 - # Set neighbour quant config to current quant config - neighbour_node_quant_config = current_node.quant_config.copy() - # Update the neighbour quant config for the current layer based - # on the quantization schedule and the neighbour node state - neighbour_node_quant_config[layer_idx] = self.quant_schedule[neighbour_node_state[layer_idx]] - - # If we want to skip a layer's quantization - if neighbour_node_quant_config[layer_idx] == 0: - continue - - # Get the current node's pruning metrics (will not be changed during quantization) - neighbour_node_pruning_metrics = current_node.pruning_metrics.copy() - - - ## QUANTIZATION - neighbour_node_parameters_layerwise, neighbour_node_int_bits, neighbour_node_frac_bits = self.add_quantization( - model = self.model, - params = current_node.unquantized_weights.copy(), - quant_config = neighbour_node_quant_config, - - ) - - # Create node structure based on applied quantization setting for the current layer - neighbour_node = self.create_new_node( - current_node = current_node, - neighbour_node_parameters_layerwise = neighbour_node_parameters_layerwise, - neighbour_node_quant_config = neighbour_node_quant_config, - neighbour_node_state = neighbour_node_state, - neighbour_node_pruning_metrics = neighbour_node_pruning_metrics, - neighbour_node_unquantized_parameters_layerwise = current_node.unquantized_weights.copy(), # Take the current node's unquantized weights, because during the quantization steps these do not change - neighbour_node_int_bits = neighbour_node_int_bits, - neighbour_node_frac_bits = neighbour_node_frac_bits, - approximate = self.config['fitcompress_parameters']['approximate'] - ) - - # Add the neighbour node to the potential nodes list - self.potential_nodes.append(neighbour_node) - - - ### CREATE NEIGHBOURS BASED ON PRUNING (GLOBAL, UNSTRUCTRED) ### - if self.config['fitcompress_parameters']['optimize_pruning']: - - # Set neighbour state to current state - neighbour_node_state = current_node_state.copy() - - # Check that we do not go out of bounds ; if the scheduler ends, we don't create new nodes for pruning anymore - if neighbour_node_state[-1] + 1 < len(self.pruning_schedule): - # Move one step forward in the state for pruning - neighbour_node_state[-1] += 1 - - # Get current quantization config of the current node (will not be changed during pruning) - neighbour_node_quant_config = current_node.quant_config.copy() - neighbour_node_int_bits = current_node.int_bits.copy() - neighbour_node_frac_bits = current_node.frac_bits.copy() - - neighbour_node_pruning_percentage = self.pruning_schedule[neighbour_node_state[-1]] - - neighbour_node_pruning_percentages = [neighbour_node_pruning_percentage] - - # layerwise pruning (works, but not tested for performance) ; probably outdated - if self.layerwise_pruning: - neighbour_node_pruning_percentage_layerwise = [layer_scheduler[neighbour_node_state[-1]] for layer_scheduler in self.pruning_schedulers_layerwise.values()] # Layerwise pruning percentage - neighbour_node_pruning_percentages += neighbour_node_pruning_percentage_layerwise # Add the layerwise pruning percentages to the list - - neighbour_node_pruning_metrics = current_node.pruning_metrics.copy() - - # Update the pruning metrics for the neighbour node - for idx,key in enumerate(neighbour_node_pruning_metrics.keys()): - neighbour_node_pruning_metrics[key] = neighbour_node_pruning_percentages[idx] - - - ## PRUNING - if self.layerwise_pruning: - neighbour_node_parameters_layerwise = self.add_pruning_layer_specific( - current_node = current_node, - pruning_metrics = neighbour_node_pruning_metrics - ) - else: - - neighbour_node_parameters_layerwise, neighbour_node_unquantized_parameters_layerwise = self.add_pruning( - current_node = current_node, - params = current_node.parameters.copy(), - importance_score = FIT_layerwise, - pruning_percentage = neighbour_node_pruning_percentage - ) - - - # Create node structure based on applied pruning - neighbour_node = self.create_new_node( - current_node = current_node, - neighbour_node_parameters_layerwise = neighbour_node_parameters_layerwise, - neighbour_node_quant_config = neighbour_node_quant_config, - neighbour_node_state = neighbour_node_state, - neighbour_node_pruning_metrics = neighbour_node_pruning_metrics, - neighbour_node_unquantized_parameters_layerwise = neighbour_node_unquantized_parameters_layerwise, # Take the newly created pruned, but unquantized weights of the neighbour node - neighbour_node_int_bits = neighbour_node_int_bits, - neighbour_node_frac_bits = neighbour_node_frac_bits, - approximate = False - ) - - # Add the neighbour node to the potential nodes list - self.potential_nodes.append(neighbour_node) - - - # Remove the current node from the potential nodes list, as we have now created all its neighbours - current_node_key = current_node.key - for idx, node in enumerate(self.potential_nodes): - if node.key == current_node_key: - del self.potential_nodes[idx] - break - - def create_new_node(self, current_node, neighbour_node_parameters_layerwise, neighbour_node_quant_config, neighbour_node_state, neighbour_node_pruning_metrics, neighbour_node_unquantized_parameters_layerwise, neighbour_node_int_bits, neighbour_node_frac_bits, approximate = False): - """ - Create a new node in the compression space based on the current node and the new parameters. - - Args : - current_node : The current node, i.e. the current model. - neighbour_node_parameters_layerwise : The parameters of the neighbour node, layerwise. - neighbour_node_quant_config : The quantization configuration of the neighbour node, layerwise. - neighbour_node_state : The state of the neighbour node (which determine current quantization setting per layer + pruning settings). - neighbour_node_pruning_metrics : Sparsity goal for pruning of the neighbour node - neighbour_node_unquantized_parameters_layerwise : The unquantized parameters of the neighbour node, layerwise. - neighbour_node_int_bits : The integer bits for weights of the neighbour node, layerwise. - neighbour_node_frac_bits : The fractional bits for weights of the neighbour node, layerwise. - approximate : If set to True, we use the previous FeM to calculate FIT values for Quantization (see Computational Details in FITcompress paper). - - Returns : - The newly created neighbour node. - """ - - - if approximate: - # If approximate is set to True, we do not recalculate the FeM, but use the one from the current node - neighbour_node_FeM = current_node.FeM.copy() - - - else: - # First, assign the new parameters of the neighbour node to the model - # This is done in order that we can calculate the FeM based on these new parameters - self.assign_parameters(self.model, neighbour_node_parameters_layerwise) - - # Then, compute the new FeM based on the new parameters of the neighbour node - neighbour_node_FeM, _, _, _, _ = self.fit_computer.get_EF( - model = self.model, - data_loader = self.dataloader, - loss_func = self.criterion, - tolerance = 1e-3, - min_iterations = 100, - max_iterations = 100 - ) - - # Calculate the gscore, fscore, full distance and current compression rate - neighbour_node_gscore, neighbour_node_fscore, neighbour_node_full_dist, neighbour_node_compression_rate = self.calculate_path_cost( - current_node = current_node, - neighbour_node_parameters_layerwise = neighbour_node_parameters_layerwise, - neighbour_node_FeM = neighbour_node_FeM, - neighbour_node_quant_config = neighbour_node_quant_config, - ) - - # Create the instance for the neighbour node - neighbour_node = node( - matrices_params_layerwise = neighbour_node_parameters_layerwise, - FeM = neighbour_node_FeM, - quant_config = neighbour_node_quant_config, - pruning_metrics = neighbour_node_pruning_metrics, - gscore = neighbour_node_gscore, - fscore = neighbour_node_fscore, - full_dist = neighbour_node_full_dist, - state = neighbour_node_state, - curr_compression_rate = neighbour_node_compression_rate, - unquantized_weights = neighbour_node_unquantized_parameters_layerwise, - int_bits = neighbour_node_int_bits, - frac_bits = neighbour_node_frac_bits - ) - - - - return neighbour_node - - def calculate_path_cost(self, current_node, neighbour_node_parameters_layerwise, neighbour_node_FeM, neighbour_node_quant_config): - """ - - Calculates the g and f score to evaluate the cost of the path from initial model to current model (g score) - and the heuristic cost to the goal model (f score). Furthermore, calculates the full distance based on both scores. - Additionally, the compression rate of the neighbour node (i.e. model) is calculated. - - Args : - current_node : The current node in the compression space, i.e. the current model. - neighbour_node_parameters_layerwise : The parameters of the neighbour node, layerwise. - neighbour_node_FeM : The FeM of the neighbour node, layerwise. - neighbour_node_quant_config : The quantization configuration of the neighbour node, layerwise. - - Config Args: - lambda : The lambda value to use for the full distance calculation (default is 1.0, as in original code). - - Returns : - neighbour_node_gscore : The g score of the neighbour node, i.e. the cost of the path from initial node to neighbour node. - neighbour_node_fscore : The f score of the neighbour node, i.e. the heuristic cost to the goal model from the neighbour node. - neighbour_node_full_dist : The full distance from initial node to final node, given we use the neighbour node, i.e. g score + lambda * f score. - neighbour_node_compression_rate : The compression rate of the neighbour node, i.e. how much of the original model is still active. - - """ - - ## CALCULATION G SCORE - # curr_g_score + sqrt(FIT(params_current_node, params_neighbour_node)) - neighbour_node_gscore = current_node.gscore + np.sqrt(self.fit_computer.get_FIT_old( - params_before = current_node.parameters, - FeM = current_node.FeM, - params_after = neighbour_node_parameters_layerwise, - same_theta = False - )) - - ## CALCULATION F SCORE - # abs(neighbour_node_compression_rate - compression_goal) * sqrt(FIT(params_neighbour_node, params_neighbour_node)) - # First, calculate the compression rate of the neighbour node (i.e. model) - neighbour_node_compression_rate = self.calculate_current_compression_rate( - params_layerwise = neighbour_node_parameters_layerwise, - quant_config = neighbour_node_quant_config - ) - # Then get FIT(params_neighbour_node,params_neighbour_node) - neighbour_node_FIT, _ = self.fit_computer.get_FIT_old( - params_after = neighbour_node_parameters_layerwise, - FeM = neighbour_node_FeM, - same_theta = True - ) - # Finally, calculate the f score - neighbour_node_fscore = np.sqrt(((np.abs(neighbour_node_compression_rate - self.compression_goal)**2)*neighbour_node_FIT)) - - ## CALCULATION FULL DISTANCE - # g_score + lambda * f_score - neighbour_node_full_dist = neighbour_node_gscore + self.config['fitcompress_parameters']['lambda'] * neighbour_node_fscore - - return neighbour_node_gscore, neighbour_node_fscore, neighbour_node_full_dist, neighbour_node_compression_rate - - def calculate_current_compression_rate(self, params_layerwise, quant_config): - """ - - Calculates the compression ratio of the model (what fraction of the original model in bytes is still "active" after applying - compression) - - Args: - params_layerwise : The current parameters (theta) of the model, layer-wise. - quant_config : The current (per layer) quantization config. - - Returns: - The compression ratio, i.e. how many bytes are still active after pruning and quantization - (= active bytes / uncompressed bytes) - """ - - active_bytes = 0.0 - uncompressed = 0.0 - - for params_layer , quant_conf_layer in zip(params_layerwise, quant_config): - - # Count which parameters are non-zero (i.e. which were not pruned), i.e non_zero is simply the number of non-zero parameters in the current layer - non_zero = torch.sum(torch.where(torch.abs(params_layer)<10e-8, 0, 1)).detach().cpu().numpy() - - # For all non-zero parameters in that layer, calculate their bytes (by multiplying bits with the quantization config c and dividing by 8 to convert bits to bytes) - active_bytes += non_zero*quant_conf_layer/8 # Gives us the number of total bytes needed to store the parameters in the current layer (and takes into account the quantization & pruning effect !!) - - # For the uncompressed version, we simply look at ALL parameters that are in that layer (p.numel(), i.e. we assume that all the parameters of a layer are active and unpruned) - # and simply multiply by 4, since each original parameter is 32 bits (i.e. 4 bytes) - uncompressed += (params_layer.numel()*4) - - return active_bytes/uncompressed - -class FIT: - - def __init__(self, model, device, input_spec): - """ - Initialize the FIT class, which is used to compute the FIT values for quantization and pruning. - Args: - model (torch.nn.Module): The model for which to compute the FIT values. - device (torch.device): The device on which the model is located. - input_spec (tuple): The input specification for the model, e.g. (3, 32, 32) for CIFAR-10. - """ - self.hooks = [] - self.device = device - - - self.matrices_params_layerwise, self.matrices_params_sizes_layerwise, _ = self.get_model_weights(model) - self.hook_layers(model) - - # Dummy Forward Pass to trigger hooks & collect activations - _ = model(torch.randn(input_spec)[None, ...].to(self.device)) - - # List of sizes of tensors of activation inputs - self.matrices_activs_sizes_layerwise = [] - - for name, module in model.named_modules(): - if module.act_quant: - self.matrices_activs_sizes_layerwise.append(module.activ_in[0].size()) - - # Sanity check - print(len(self.matrices_activs_sizes_layerwise), "# Layers from which we extract activations") - print(len(self.matrices_params_sizes_layerwise), "# Sizes of weight matrices for each layer") - - def get_model_weights(self, model): - """ - Set collect flag to True for all weights of the layers of interest in the model. - This will give us easy access to the weights that we want to quantize/prune later on. - Furthermore, we can also access the weights, which we need for the FIT calculation. - - Notes: - 1.This is only called once initially. Its main purpose is to set the .collect flag to True, - such that we can then later on access the weights easily. - - Args: - model (torch.nn.Module): The model from which to get the weights. - Returns: - matrices_params_layerwise (list): A list of the weight matrices for each layer of interest. - matrices_params_sizes_layerwise (list): A list of sizes of the weight matrices for each layer of interest. - layer_names (list): A list of the names of the layers of interest. - """ - - - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, CompressedLayerLinear, CompressedLayerConv2d, QuantizedPooling, QuantizedReLU, QuantizedTanh - - matrices_params_layerwise = [] - layer_names = [] - # Iterate through all modules in the model - for name, module in model.named_modules(): - - if (isinstance(module, (CompressedLayerLinear,CompressedLayerConv2d))): - layer_names.append(name) - for name_param, matrix_param in list(module.named_parameters()): - # Search for the weights - if name_param.endswith('weight'): - matrices_params_layerwise.append(matrix_param) - # Set their collect flag to True (later on we can then access them easily like this) - matrix_param.collect = True - else: - matrix_param.collect = False - continue - - # For Batch Normalization layers etc. we do not collect any weights - for matrix_param in list(module.parameters()): - if matrix_param.requires_grad: - matrix_param.collect = False - - - # Collect the sizes of the weight matrices - matrices_params_sizes_layerwise = [param.size() for param in matrices_params_layerwise] - - - return matrices_params_layerwise, matrices_params_sizes_layerwise, layer_names - - def hook_layers(self, model): - """ - Used to get the activation inputs during the forward pass, which are - needed for computing the FIT (if calculated with the noise model) w.r.t activations. - - Args : - model (torch.nn.Module): The model to hook the layers of. - """ - from pquant.core.torch_impl.compressed_layers_torch import CompressedLayerBase, CompressedLayerLinear, CompressedLayerConv2d, QuantizedPooling, QuantizedReLU, QuantizedTanh - - def hook_inp(module, inp, outp): - """ - Store activation input of the module. - - """ - module.activ_in = inp - - - for name, module in model.named_modules(): - if (isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d))): - # Forward Hook to get inputs into activation function - hook = module.register_forward_hook(hook_inp) - self.hooks.append(hook) # Store hooks so we can remove them later - module.act_quant = True # mark it - else: - module.act_quant = False - - def hook_removal(self): - """ - Remove all hooks that were registered to the model. - """ - for hook in self.hooks: - hook.remove() - - self.hooks.clear() - assert len(self.hooks) == 0, "Hooks were not removed properly!" - - def get_loss(self,model, data_batch,target_batch, loss_func, mode = 'mini-batch'): - """ - This function triggers the loss calcuation of a model. - We use it such that we can then calculate gradients which are - needed for EF trace (which is one part of the FIT). - - Notes: - 1. I here give the idea how we could do it for both mini-batch and sample loss calculation. - But since the original code works on the mini-batch implementation, I only implement this one - from here on. - - Args: - model (torch.nn.Module): The (trained) model to calculate the loss for. - data_batch (Tensor): Current input data mini-batch for the model. - target_batch (Tensor): Current target data mini-batch for the model. - loss_func (callable): Loss function to use for the calculation. - mode (str): Mode of loss calculation, either 'mini-batch' or 'sample'. - 'mini-batch' calculates loss for each mini-batch (by summing losses - and averaging over mini-batch), - 'sample' calculates loss for each sample (which should be more close - to the actual paper's definition). - - Returns: - loss : Loss for current mini-batch (either averaged over mini-batch or per sample). - - """ - - output = model(data_batch) - - if mode == 'mini-batch': - # Check which loss_func instance is active - if isinstance(loss_func, torch.nn.CrossEntropyLoss): - # Calculate loss based on mini-batch and averaged over it - loss_func = torch.nn.CrossEntropyLoss() - - if mode == 'sample': - if isinstance(loss_func, torch.nn.CrossEntropyLoss): - # Calculate loss for each sample - loss_func = torch.nn.CrossEntropyLoss(reduce = False, reduction = 'none') - - loss = loss_func(output, target_batch) - - - return loss - - def get_gradients(self,model,loss, matrices_layerwise, batch_size): - """ - This function calculates the gradients & squared gradients. - These are then used to calculate the EF trace down the line, - either for parameters or activations. - - Args: - model (torch.nn.Module): The model to calculate the loss for. - loss (Tensor): The loss tensor for which to calculate gradients. - matrices_layerwise (list): List of parameters/activation matrices for which to calculate gradients. - batch_size (int): Size of the mini-batch used for the loss calculation. - - Returns: - squared_grad : Squared gradients for the passed parameters/activations, layer-wise. - - """ - grads = torch.autograd.grad(loss, [*matrices_layerwise], retain_graph = True) - squared_grads = [batch_size * g ** 2 for g in grads] - - return squared_grads - - def get_EF(self,model, data_loader, loss_func, tolerance = 1e-3, min_iterations = 100, max_iterations = 100): - """ - Calculate the approximate Empirical Fisher trace (EF trace) /approximate Fisher Information Metric (FIM) for the model. - - Notes : - 1. The calculations are done per mini-batch, i.e. we calculate the EF trace for each mini-batch and then accumulate it (over as many mini-batches before convergence flag or max_iterations reached). - This is different compared to the paper's theory, where the EF trace is calculated for each sample and then accumulated (over all samples in the dataset). - 2. The difference between FeM and EF_trace_params_layerwise_cpu is that in the former, we didn't sum over the parameters per layer, but rather kept them as tensors. - 3. The returned ranges are only needed when FIT is calculated with the noise model. - - Args : - model (torch.nn.Module): The model to calculate the EF trace for. - data_loader (DataLoader): DataLoader for the training data. - loss_func (callable): Loss function to use for the calculation. - tolerance (float): Tolerance for convergence check. - min_iterations (int): Minimum number of iterations before convergence check. - max_iterations (int): Maximum number of iterations to perform. - - Returns : - FeM - The EF of parameters stored as tensors and accumulated over mini-batches, layer-wise - EF_trace_params_layerwise_cpu - The EF trace of parameters accumulated over mini-batches , layer-wise - EF_trace_activs_layerwise_cpu - The EF trace of activations accumulated over mini-batches, layer-wise - per_batch_layerwise_minmax_range_params - The min/max range of parameters for each layer , for each mini-batch - per_batch_layerwise_minmax_range_activs - The min/max range of activations for each layer, for each mini-batch - """ - - # Convergence flag based on variance of change in EF between current mini-batch estimation and accumulated EF trace (of all mini-batches) - convergence_flag = False - total_batches = 0 - model.eval() - - # Hook layers again (needed for when we recalculate EF traces during FITcompress, this could be made cleaner possibly) - self.hook_layers(model) - - # Initialize list to store accumulated EF of parameters (weights) over mini-batches - batch_accum_EF_matrices_params_layerwise = [torch.zeros(size).to(self.device) for size in self.matrices_params_sizes_layerwise] - # Initialize list to store accumulated EF of activations over mini-batches ; NOTE: We do not store the first layer's activations, since it is the input layer - batch_accum_EF_matrices_activs_layerwise = [torch.zeros(size).to(self.device) for size in self.matrices_activs_sizes_layerwise[1:]] - - - # These ranges will be needed for the noise model - # NOTE : layerwise here means that each element itself is a list of the ranges for each layer for the current mini-batch - per_batch_layerwise_minmax_range_params = [] - per_batch_layerwise_minmax_range_activs = [] - - # These will be needed for the convergence check - # NOTE : layerwise here means that each element itself is a list of the ranges for each layer for the current mini-batch - per_batch_layerwise_grad_sum_squared_params = [] - per_batch_layerwise_grad_sum_squared_activs = [] - - - # Iterate over mini-batches in the data loader as long as we have not reached the max_iterations or convergence flag is not set - while (total_batches < max_iterations and not convergence_flag): - for num_mini_batch, data in enumerate(data_loader): - - model.zero_grad() - data_batch, target_batch = data[0].to(self.device), data[1].to(self.device) - batch_size = data_batch.size(0) - - loss = self.get_loss(model, data_batch, target_batch, loss_func, mode = 'mini-batch') - - - ## GET CURRENT BATCH PARAMETERS (weights) - curr_batch_matrices_params_layerwise = [] - curr_batch_minmax_range_params_layerwise = [] - for weights in model.parameters(): - if weights.collect: - curr_batch_matrices_params_layerwise.append(weights) - curr_batch_minmax_range_params_layerwise.append((torch.max(weights.data) - torch.min(weights.data)).detach().cpu().numpy()) - - per_batch_layerwise_minmax_range_params.append(curr_batch_minmax_range_params_layerwise) - - - ## GET CURRENT BATCH ACTIVATIONS - curr_batch_matrices_activs_layerwise = [] - curr_batch_minmax_range_activs_layerwise = [] - for name, module in model.named_modules(): - if module.act_quant: - curr_batch_matrices_activs_layerwise.append(module.activ_in[0]) - curr_batch_minmax_range_activs_layerwise.append((torch.max(module.activ_in[0]) - torch.min(module.activ_in[0])).detach().cpu().numpy()) - - per_batch_layerwise_minmax_range_activs.append(curr_batch_minmax_range_activs_layerwise) - - ## SQUARED GRADIENTS : 1/mini_batch_size * GRAD(f_theta(z))**2, where z current mini-batch - # Calculate squared gradients for current mini-batch - curr_batch_squared_grads_params_layerwise = self.get_gradients(model, loss, curr_batch_matrices_params_layerwise, batch_size) - curr_batch_squared_grads_activs_layerwise = self.get_gradients(model, loss, curr_batch_matrices_activs_layerwise[1:], batch_size) # skip first layer activations, since it is the input layer - - ###### - - # NOTE : We need this for early stopping based on convergence , it is not necessary for the EF calculation per se - - # Take the sum of squared gradients for parameters/activations of each layer - curr_batch_summed_squared_grads_params_layerwise = np.array([torch.sum(param_matrix).detach().cpu().numpy() for param_matrix in curr_batch_squared_grads_params_layerwise]) - curr_batch_summed_squared_grads_activs_layerwise = np.array([torch.sum(activ_matrix).detach().cpu().numpy() for activ_matrix in curr_batch_squared_grads_activs_layerwise]) - # Append the current mini-batch squared gradients to the list of per-batch squared gradients - per_batch_layerwise_grad_sum_squared_params.append(curr_batch_summed_squared_grads_params_layerwise) - per_batch_layerwise_grad_sum_squared_activs.append(curr_batch_summed_squared_grads_activs_layerwise) - - ###### - - ## ACCUMULATION OVER MINI-BATCHES : 1/mini_batch_size * (GRAD(f_theta(z_0))**2) + ... + GRAD(f_theta(z_total_batches-1))**2) - # Accumulate the current mini-batch squared gradient values into already accumulated data from previous mini-batches... - # ... for parameters (weights) - batch_accum_EF_matrices_params_layerwise = [curr_val_layer + curr_squared_grad_layer + 0. for curr_val_layer, curr_squared_grad_layer in zip(batch_accum_EF_matrices_params_layerwise, curr_batch_squared_grads_params_layerwise)] - # ... for activations - batch_accum_EF_matrices_activs_layerwise = [curr_val_layer + curr_squared_grad_layer + 0. for curr_val_layer, curr_squared_grad_layer in zip(batch_accum_EF_matrices_activs_layerwise, curr_batch_squared_grads_activs_layerwise)] - - total_batches += 1 - - ## NORMALIZATION OVER BATCHES: 1/N * (GRAD(f_theta(z_0))**2 + ... + GRAD(f_theta(z_total_batches-1))**2), where N : number of samples - # NOTE : Only when we iterated over all mini-batches and would stop there (i.e. while loop breaks), it is 1/N ! - batch_accum_EF_matrices_params_normalized_layerwise = [accum_grad_layer / float(total_batches) for accum_grad_layer in batch_accum_EF_matrices_params_layerwise] - batch_accum_EF_matrices_activs_normalized_layerwise = [accum_grad_layer / float(total_batches) for accum_grad_layer in batch_accum_EF_matrices_activs_layerwise] - - # FeM of the original code and in usage for the current FITcompress implementation - FeM = [value.detach().cpu() for value in batch_accum_EF_matrices_params_normalized_layerwise] - - ## EMPIRICAL FISHER TRACE/FIM (kind of) 1/N * (||GRAD(f_theta(z_0)) + ... + GRAD(f_theta(z_total_batches-1))||**2) - EF_trace_params_layerwise = [torch.sum(value) for value in batch_accum_EF_matrices_params_normalized_layerwise] - EF_trace_activs_layerwise = [torch.sum(value) for value in batch_accum_EF_matrices_activs_normalized_layerwise] - - EF_trace_params_layerwise_cpu = np.array([value.detach().cpu().numpy() for value in EF_trace_params_layerwise]) - EF_trace_activs_layerwise_cpu = np.array([value.detach().cpu().numpy() for value in EF_trace_activs_layerwise]) - - # Convergence check - if total_batches >= 2: - # Calculate variance of the change in EF trace for parameters and activations - params_var = np.var((per_batch_layerwise_grad_sum_squared_params - EF_trace_params_layerwise_cpu)/EF_trace_params_layerwise_cpu)/total_batches - activs_var = np.var((per_batch_layerwise_grad_sum_squared_activs - EF_trace_activs_layerwise_cpu)/EF_trace_activs_layerwise_cpu)/total_batches - - if activs_var < tolerance and params_var < tolerance and total_batches > min_iterations: - convergence_flag = True - #print(f"Convergence reached after {total_batches} mini-batches.") - - if convergence_flag or total_batches >= max_iterations: - #print(f"Stopping after {total_batches} mini-batches.") - break - - # Remove hooks after the forward pass - self.hook_removal() - - self.FeM = FeM - self.EF_trace_params_layerwise_cpu = EF_trace_params_layerwise_cpu - self.EF_trace_activs_layerwise_cpu = EF_trace_activs_layerwise_cpu - self.per_batch_layerwise_minmax_range_params = per_batch_layerwise_minmax_range_params - self.per_batch_layerwise_minmax_range_activs = per_batch_layerwise_minmax_range_activs - - - return FeM, EF_trace_params_layerwise_cpu, EF_trace_activs_layerwise_cpu, per_batch_layerwise_minmax_range_params, per_batch_layerwise_minmax_range_activs - - def squared_step_width_quantization(self, ranges_layerwise, quant_bit_precision_layerwise): - """ - Calculate the squared step width of the quantization (reference can be found in FIT paper (Appendix E), - this is the formula for Delta). - This is needed for the uniform noise model, which will calculate delta_theta - - Notes: - Since this was part of the FIT paper, the noise model is based solely on quantization, not pruning. - - Args: - ranges: min-max range of the weights/activations, layer-wise - quant_bit_precision: quantization bit precision, layer-wise - Returns: - squared step width of the quantization, layer-wise - - """ - - return (ranges_layerwise/(2**quant_bit_precision_layerwise - 1))**2 - - def get_FIT_noise_model(self, EF_trace_params_layerwise_cpu, quant_bit_precision_params_layerwise, quant_bit_precision_activs_layerwise = None, EF_trace_activs_layerwise_cpu = None, use_activations = False): - """ - Calculate the FIT for the model, based on the empirical Fisher trace and the squared step width of the quantization - that comes from the noise model. This implements the FIT formula from the FITCompress paper. - - Notes: - 1. The original FITCompress code does not use the noise model, but rather the actual parameter values. - 2. The original FIT was implemented only based on activations. I here also include parameters. - 3. TODO : FOR FITCOMPRESS : Since FITCompress works only with parameters, does it even make sense to include activations? - since they will not change during the path-finding process, i.e. they are not quantized - 4. TODO : FOR FITCOMPRESS : How to deal with the FIT(theta_i, theta_i) calculation ? When using the actual parameter values, - we simply square their values, but with the noise model we can approximate the effect of - quantization or pruning without actually ever using the parameter values. But the noise model - only takes into account the current bit precision and the min-max range of the parameters/activations, - so how can this be adjusted to the FIT(theta_i, theta_i) calculation? - - - Args: - EF_trace_params_layerwise_cpu (list): empirical Fisher trace values parameters (weights), layer-wise - EF_trace_activs_layerwise_cpu (list): empirical Fisher trace values activations, layer-wise - quant_bit_precision_params (list): quantization bit precision for the parameters (weights), layer-wise - quant_bit_precision_activs (list): quantization bit precision for the activations, layer-wise - use_activations (bool): whether to include activations in the FIT calculation or not - - Returns: - FIT_full (float): FIT value - """ - - - # Get the mean across all stored mini-batches for each layer - mean_range_params_layerwise = np.mean(self.per_batch_layerwise_minmax_range_params, axis=0) - - # Calculate the squared step width of the quantization for parameters and activations - # TODO : here we need to deal with the FIT(theta_i, theta_i) calculation ; I don't really know what to do here for that - squared_step_width_quant_params_layerwise = self.squared_step_width_quantization(mean_range_params_layerwise, np.array(quant_bit_precision_params_layerwise)) - - FIT_params_layerwise = squared_step_width_quant_params_layerwise * EF_trace_params_layerwise_cpu - - # print("Type of FIT_params_layerwise: ", type(FIT_params_layerwise)) # np..ndarray - # print("Length of FIT_params_layerwise: ", len(FIT_params_layerwise)) # 20 - - # Calculate full FIT by summing over all layers - # TODO : missing is the 1/n(l) normalization, but cancels itself out according to Adrian - FIT_full = np.sum(FIT_params_layerwise) - - - - if use_activations: - mean_range_activs_layerwise = np.mean(self.per_batch_layerwise_minmax_range_activs, axis=0)[1:] # # skip first layer activations, since it is the input layer - squared_step_width_quant_activs_layerwise = self.squared_step_width_quantization(mean_range_activs_layerwise, np.array(quant_bit_precision_activs_layerwise[1:])) # 1: depends on the setting (whether first layer is included in config or not) - FIT_activs_layerwise = squared_step_width_quant_activs_layerwise * EF_trace_activs_layerwise_cpu - FIT_full += np.sum(FIT_activs_layerwise) - - - self.FIT_full = FIT_full - - print("FIT value : ", self.FIT_full) - - - - return FIT_full - - def get_FIT_real_values(self, params_before, EF_trace_params_layerwise, params_after = None, same_theta = False): - """ - Calculate the actual FIT for the model, based on the parameter values between two - consecutive models during the FITCompress path-finding process. - - Notes: - 1. This deviates from the code for FITCompress. In the original code, - they use the still layer-wise EF and not the trace and do element-wise multiplication. - Here I implement the actual FIT formula from the theoretical paper. This means it still has to be tested. - 3. Does not include any activations until now since original FITCompress code doesn't include - them, so would need to be extended. - - Args: - params_before (list): List of parameters (weights) before the path-finding step. - params_after (list): List of parameters (weights) after the path-finding step. - EF_trace_params_layerwise (list): List of empirical Fisher trace values for each layer. - same_theta (bool) : When we need to calculate FIT(theta,theta) for the f heuristic (see FITCompress paper, section 3.4) - - Returns: - curr_FIT (float): FIT value - - - """ - - curr_FIT = 0 - - - if not same_theta: - for (theta_before, theta_after, EF_trace) in zip(params_before, params_after, EF_trace_params_layerwise): - # Calculate the squared difference between the parameters before and after - delta_theta = torch.sum((theta_before.detach().cpu() - theta_after.detach().cpu())**2).numpy() - # Calculate the FIT for the current layer - curr_FIT += EF_trace * delta_theta - - else: - - for (theta, EF_trace) in zip(params_before, EF_trace_params_layerwise): - # Calculate the squared difference between the parameters before and after - delta_theta = torch.sum(theta.detach().cpu()**2) - # Calculate the FIT for the current layer - curr_FIT += EF_trace * delta_theta - - return curr_FIT - - def get_FIT_old(self, params_before = None, FeM = None, params_after = None, same_theta = False): - """ - Implementation of the original FIT code (compute_fake_FIT_params()), with the addition that we add the - same_theta mode). Calculates the FIT of the model. - - Args: - params_before (list): List of parameters (weights) before the path-finding step. - FeM (list) : The EF of parameters stored as tensors and accumulated over mini-batches, layer-wise - params_after (list): List of parameters (weights) after the path-finding step. - same_theta (bool) : When we need to calculate FIT(theta,theta), i.e. the importance score, for the f heuristic (see FITCompress paper, section 3.4) - - Notes : - 1. This is currently used in the FITCompress path-finding process. - - Returns: - curr_FIT (float) : FIT value - FIT_layerwise (list) : FIT value per layer (only returned when same_theta is True) - - - """ - - curr_FIT = 0 - - if not same_theta: - - # Taken from compute_fake_FIT_params() - for (theta_before, theta_after, layer_FeM) in zip(params_before, params_after, FeM): - - curr_FIT_layer = torch.sum(layer_FeM * (theta_before.detach().cpu() - theta_after.detach().cpu())**2).numpy() - curr_FIT += curr_FIT_layer - - return curr_FIT - - - else: - FIT_layerwise = [] - - # Taken from generate_FIT_pruning_importance() - for (theta_after, layer_FeM) in zip(params_after, FeM): - - curr_FIT_layer = layer_FeM * (theta_after.detach().cpu()**2) - FIT_layerwise.append(curr_FIT_layer) - - # Taken from renorm_heuristic() - final_FIT = torch.sum(torch.cat([FIT_score.view(-1) for FIT_score in FIT_layerwise])).detach().cpu().numpy() - - curr_FIT += final_FIT - - return curr_FIT, FIT_layerwise - - - - - - - - - - - + def __init__( + self, + matrices_params_layerwise, + FeM, + quant_config, + pruning_metrics, + gscore, + fscore, + full_dist, + state, + curr_compression_rate, + unquantized_weights, + int_bits, + frac_bits, + ): + """ + Setup a node (i.e. a current model) in the compression space ; we can then go from node-to-node in + the compression space. + + Args: + matrices_params_layerwise - current parameters of the model, layerwise + FeM - FeM, layerwise + quant_config - the quantization config to use, layerwise + pruning_metrics - the pruning metrics used based on the pruning method (sparsity, only for PDP and Wanda) + gscore - the current score of g (as in paper), path cost/distance between initial model and current model + fscore - the current score of f (as in paper), heuristic cost/distance between current model and goal model + full_dist - the full distance : gscore + lambda * fscore (as in paper) + state - the current "state" of the model (this is important for the schedulers to know what value to use) + compression - the current compression rate (alpha_j in the papers pseudo code) of the model + unquantized_weights - refers to the original weights of the model that are only affected by pruning. + These are needed such that we can always quantize based on 32 bit values and not loose precision. + int_bits - the current integer bits for quantization of the weights, layerwise + frac_bits - the current fractional bits for quantization of the weights, layerwise + """ + + self.parameters = matrices_params_layerwise + self.FeM = FeM + self.quant_config = quant_config + self.pruning_metrics = pruning_metrics + self.gscore = gscore + self.fscore = fscore + self.full_dist = full_dist + self.state = state + self.curr_compression_rate = curr_compression_rate + self.unquantized_weights = unquantized_weights + self.int_bits = int_bits + self.frac_bits = frac_bits + self.key = ''.join(random.choices(string.ascii_uppercase + string.digits, k=20)) + + def extract_config_from_node(self, layer_names): + """ + Extract the quantization and pruning configuration from the node. + This is used to get the configuration of the model after the path finding process. + + Returns : + config - A dictionary containing the quantization and pruning configuration of the node. + """ + + assert len(self.quant_config) == len(layer_names), "Quantization config length does not match number of layers" + + # Create a dictionary to store the quantization config w.r.t layer names + quant_config = { + layer_name: [i_bits, f_bits] for layer_name, i_bits, f_bits in zip(layer_names, self.int_bits, self.frac_bits) + } + + config = {'quant_config': quant_config, 'pruning_metrics': self.pruning_metrics} + + return config +class FITcompress: + def __init__(self, model, device, dataloader, criterion, config, layerwise_pruning=False): + """ + Calculate initial EF of the uncompressed model and set up quantization & + pruning schedules, as well as the initial node in the compression space. + + Notes: + 1) We use the FeM value for FIT calculation (as in the original code), + not the EF trace directly (as in the paper). + + Args: + model: Pre-trained model (uncompressed, but loaded with compression layers). + device: Device to use for the model. + dataloader: Dataloader for training data. + criterion: Loss function (e.g., CrossEntropyLoss). + config: Configuration object. + layerwise_pruning: Whether to find layerwise pruning targets + (NOTE: not tested). + + Config Args: + compression_goal: Desired compression constraint (alpha in the paper). + quantization_schedule: Bit-widths to try during the search. + pruning_schedule: A schedule with (start, end, steps) describing the + sparsity progression. + """ + self.model = model + self.device = device + self.dataloader = dataloader + self.criterion = criterion + self.config = config + self.layerwise_pruning = layerwise_pruning + + # Initialize an instance of the FIT class based on the uncompressed model. + # This marks which weights & activations can be pruned/quantized. + # We can then reuse this instance and its corresponding .get_EF() function + # and get_FIT() functions, passing the appropriate empirical Fisher traces. + self.fit_computer = FIT(self.model, self.device, input_spec=(3, 32, 32)) + + # Calculate the EF trace of the uncompressed model (i.e. initial EF trace), only based on weights + self.FeM, self.EF_trace_params_layerwise_uncompressed, _, _, _ = self.fit_computer.get_EF( + self.model, self.dataloader, self.criterion, tolerance=0.01, min_iterations=100, max_iterations=100 + ) + + # Get the number of layers in the model + self.n_layers = len(self.EF_trace_params_layerwise_uncompressed) + + # Get the current model weights + matrices_params_layerwise, _, self.layer_names = self.fit_computer.get_model_weights(self.model) + + # Store a copy of the original weights + import copy + + self.original_matrices_params_layerwise = copy.deepcopy(matrices_params_layerwise) + + self.compression_goal = config.fitcompress_parameters.compression_goal + self.quant_schedule = config.fitcompress_parameters.quantization_schedule + self.pruning_schedule = 1 - np.logspace( + config.fitcompress_parameters.pruning_schedule.start, + config.fitcompress_parameters.pruning_schedule.end, + base=10, + num=config.fitcompress_parameters.pruning_schedule.steps, + ) + + # for N:M pruning in Wanda, use 50% pruning cap during FITcompress + if self.config.pruning_parameters.pruning_method == 'wanda' and type(self.config.pruning_parameters.N) is int: + self.pruning_schedule = 0.5 * ( + 1 + - np.logspace( + config.fitcompress_parameters.pruning_schedule.start, + config.fitcompress_parameters.pruning_schedule.end, + base=10, + num=config.fitcompress_parameters.pruning_schedule.steps, + ) + ) + + # Dictionary structure allows us to possibly iterate over multiple different pruning metrics + # but currently only one as in FITcompress, the target pruning sparsity, i.e. percentage + pruning_metrics = {'percentage': 0} + + # If we want to find sparsity targets per layer (not part of FITcompress paper) + if layerwise_pruning: + self.pruning_schedulers_layerwise = self.get_pruning_schedulers_layer_specific( + matrices_params_layerwise, None, mode='fit' + ) + # Add the layer-specific starting pruning percentages to the current metric + pruning_metrics = pruning_metrics | {f'{self.layer_names[i]}_percentage': 0 for i in range(self.n_layers)} + + # Initialize the first node in the compression space + self.initial_node = node( + matrices_params_layerwise=matrices_params_layerwise, + FeM=self.FeM.copy(), + quant_config=[31 for _ in range(self.n_layers)], # 31 bits for each layer (since one goes to sign) + pruning_metrics=pruning_metrics, # Initial pruning defined in config.yaml + gscore=0.0, # Initial gscore + fscore=np.inf, # Initial fscore + full_dist=np.inf, # Initial full distance (gscore + lambda * fscore) + # -1 such that 0 will be the first for first neighbouring nodes + state=[-1 for _ in range(self.n_layers)] + + [0], # Initial state for all layers (for quantization) and one for global pruning + curr_compression_rate=1.0, # Initial compression rate (no compression yet) + unquantized_weights=self.original_matrices_params_layerwise, # Unquantized weights + int_bits=[ + 0 for _ in range(self.n_layers) + ], # Initial integer bits (although this could be set to any number, just need the list structure) + frac_bits=[ + 31 for _ in range(self.n_layers) + ], # Initial fractional bits ((although this could be set to any number, just need the list structure)) + ) + + # Intialize a list to store nodes that can be traversed during the path finding process + self.potential_nodes = [self.initial_node] + + def get_pruning_schedulers_layer_specific(self, matrices_params_layerwise, global_sparsity_scheduler, mode='fit'): + """ + Calculates layer-specific pruning schedulers. The idea is that layers with weights + that are not that much affected by pertubation should be pruned more/faster than layers with weights + that are more affected by pertubation. + The "speed" of schedulers are calculated depending on the mode. + For mode 'fit' : via the layer-wise FIT score of the initial + model. + + Returns : + List of pruning schedulers (one per layer) + + """ + + schedulers = {} + if mode == 'fit': + # Get the layer-wise FIT scores of the initial model + _, FIT_layerwise = self.fit_computer.get_FIT_old( + FeM=self.FeM, params_after=matrices_params_layerwise, same_theta=True + ) + + # For each layer, first take the sum over the FIT values (of each weight) + FIT_layerwise_summed = [torch.sum(layer).item() for layer in FIT_layerwise] + # Find min and max importance + min_importance = min(FIT_layerwise_summed) + max_importance = max(FIT_layerwise_summed) + + for layer_idx, importance in enumerate(FIT_layerwise_summed): + + # Scale importance between 0 and 1 + importance_ratio = (importance - min_importance) / (max_importance - min_importance) + + # Decides "strength" of difference between flattest/steepest curves -higher the value, more difference) + scale_factor = 5 + + # The higher the importance score, the bigger the exponent, the flatter the curve + exponent = scale_factor * importance_ratio + + scheduler_curve = [] + for step in range(40): + # Normalize to [0,1] range + t = step / 39 + pruning_at_step = t**exponent + scheduler_curve.append(pruning_at_step) + + schedulers[self.layer_names[layer_idx]] = np.array(scheduler_curve) + + if global_sparsity_scheduler is not None: + for step in range(len(global_sparsity_scheduler)): + if step == 0: # So we do not get a problem with division by 0 + continue + # Calculate the sum of all layer-specific scheduler values at that step + sum_layer_schedulers = sum(curr_scheduler[step] for curr_scheduler in schedulers.values()) + for layer_name in self.layer_names: + schedulers[layer_name][step] *= global_sparsity_scheduler[step] / sum_layer_schedulers + + assert np.isclose( + sum(sched[step] for sched in schedulers.values()), + global_sparsity_scheduler[step], + ), ( + f"Sum of layer-specific schedulers at step {step} does not match " + f"global sparsity scheduler value {global_sparsity_scheduler[step]}" + ) + + return schedulers + + def assign_parameters(self, model, params): + """ + Update model's actual parameters with new values from the compression search process. + + Args: + model : The model to update the parameters of. + params : Array of new parameters to assign to the model's layers. + + Notes: + 1. This only deals with weights. We do not look at any activations & bias, as + this is not done in the original code. + """ + i = 0 + for _, module in model.named_modules(): + if isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d)): + for name_param, matrix_param in list(module.named_parameters()): + if name_param.endswith('weight'): + matrix_param.data = nn.parameter.Parameter(params[i].to(self.device)) + matrix_param.collect = True + i += 1 + + def add_quantization(self, model, params, quant_config, reset=False): + """ + Quantizes weights of the model based on fixed-point quantization. Given the + bit-width of the current quantization configuration of a layer, bits are + distributed between integer and fractional part based on the maximum absolute + value of the weights of that layer. + + Args : + model : The model to quantize + params : List of unquantized (but possibly pruned) parameters of the model, layerwise + quant_config : List of current quantization configuration, layerwise + reset : Flag that allows to quantize based on unpruned weights + + Returns : + neighbour_node_parameters_layerwise : List of quantized parameters of the model, layerwise + all_int_bits : List of integer bits for parameters, layerwise + all_frac_bits : List of fractional bits for parameters, layerwise + + """ + neighbour_node_parameters_layerwise = [] + all_int_bits = [] + all_frac_bits = [] + + # Get integer & fractional bits based on max abs value of each layer's weights + for idx, param_layer in enumerate(params): + max_abs = torch.max(torch.abs(param_layer.detach().cpu())) + eps = 1e-12 + int_bits = max(0, math.ceil(math.log2(max_abs + eps))) + fractional_bits = quant_config[idx] - int_bits + + all_int_bits.append(int_bits) + all_frac_bits.append(fractional_bits) + + for idx, param_layer in enumerate(params): + # If reset is inactive, we quantize weights given the unquantized, but possibly pruned weights + # of the current node + # Note that this doesn't quantize the actual module, but just returns us the weights + if not reset: + new_weight = quantizer( + param_layer, + k=torch.tensor(1.0), + i=torch.tensor(all_int_bits[idx]), + f=torch.tensor(all_frac_bits[idx]), + training=True, + ) + + # If reset active, we quantized based on unpruned weights + else: + new_weight = quantizer( + self.original_matrices_params_layerwise[idx], + k=torch.tensor(1.0), + i=torch.tensor(all_int_bits[idx]), + f=torch.tensor(all_frac_bits[idx]), + training=True, + ) # module_f_weights[idx] + + neighbour_node_parameters_layerwise.append(new_weight) + + return neighbour_node_parameters_layerwise, all_int_bits, all_frac_bits + + def add_pruning(self, current_node, params, importance_score, pruning_percentage): + """ + Unstructured, global pruning based on a pruning percentage. + This provides the pruning mask P as described in the FITCompress paper. + + Notes : + 1. Pruning importance scores are calculated based on the node that + we are currently searching neighbours for : We prune the current node's weights + to create the neighbour node based on pruning. + + Args : + current_node : The current best model + params : parameters after quantization + importance_score : importance scores for each weight of the current best model + pruning_percentage : The percentage of weights to prune from the current node's weights. + + Returns : + current_node_matrices_params_layerwise : List of pruned parameters, layerwise. + current_node_matrices_unquantized_params_layerwise : List of unquantized pruned parameters, layerwise. + """ + + # Get shape of each layers weight matrix + current_node_matrices_params_shapes_layerwise = [layer.shape for layer in params] + # Get total number of each layers weights + current_node_matrices_params_numel_layerwise = [layer.numel() for layer in params] + # Get cumulative sum of weights per layer (for indexing purposes) + current_node_matrices_params_cumsum_layerwise = list(np.cumsum(current_node_matrices_params_numel_layerwise)) + # Add 0 at the beginning for easier indexing + current_node_matrices_params_cumsum_layerwise.insert(0, 0) + + # Flatten parameters & importance scores and concatenate to have single vectors containing everything + current_node_params_flat = torch.cat([layer.view(-1) for layer in params]).detach().cpu() + # Same for importance scores + current_node_importance_scores_flat = torch.cat([layer.view(-1) for layer in importance_score]).detach().cpu() + + # Also create an instance for the unquantized weights + current_node_unquantized_params_flat = ( + torch.cat([layer.view(-1) for layer in current_node.unquantized_weights]).detach().cpu() + ) + + # Calculate the number of parameters to prune (percentage * nums of all parameters in the model) + num_params_to_prune = int(pruning_percentage * len(current_node_params_flat)) + # Based on the negative importance scores of all the weights, find the indices of weights with the + # smallest importance scores (closest to 0) (i.e. the ones that are not affected by pertubation as much) + _, indices_params_to_prune = torch.topk(-current_node_importance_scores_flat, num_params_to_prune) + # Set those parameters/weights to 0 + current_node_pruned_params_flat = torch.scatter(current_node_params_flat, -1, indices_params_to_prune, 0.0) + + # Also for unquantized weights + current_node_pruned_unquantized_params_flat = torch.scatter( + current_node_unquantized_params_flat, -1, indices_params_to_prune, 0.0 + ) + + # Now reconstruct the correct shape + current_node_matrices_params_layerwise = [] + + for i in range(self.n_layers): + current_node_matrices_params_layerwise.append( + torch.reshape( + current_node_pruned_params_flat[ + current_node_matrices_params_cumsum_layerwise[i] : current_node_matrices_params_cumsum_layerwise[ + i + 1 + ] + ], + current_node_matrices_params_shapes_layerwise[i], + ) + ) + + current_node_matrices_unquantized_params_layerwise = [] + for i in range(self.n_layers): + current_node_matrices_unquantized_params_layerwise.append( + torch.reshape( + current_node_pruned_unquantized_params_flat[ + current_node_matrices_params_cumsum_layerwise[i] : current_node_matrices_params_cumsum_layerwise[ + i + 1 + ] + ], + current_node_matrices_params_shapes_layerwise[i], + ) + ) + + # Put everything on GPU again + current_node_matrices_params_layerwise = [layer.to(self.device) for layer in current_node_matrices_params_layerwise] + current_node_matrices_unquantized_params_layerwise = [ + layer.to(self.device) for layer in current_node_matrices_unquantized_params_layerwise + ] + + return current_node_matrices_params_layerwise, current_node_matrices_unquantized_params_layerwise + + def add_pruning_layer_specific(self, current_node, pruning_metrics, layer_idx=None): + """ + NOTE: Experimental (untested). + + This function aims to estimate per-layer pruning sparsity targets instead of a + single global target. It could, for example, initialize layerwise targets for + PDP. + + Per-layer pruning is performed via importance pruning based on the layer’s FIT + value (currently using the FIT of the corresponding layer). + + Notes: + A) Create a new node for each candidate pruned layer (similar to the + quantization step). This is expensive because, per the paper, FIT would + need to be recomputed each time. + B) Create a single node for pruning, but apply per-layer sparsity targets + (e.g., from PDP_setup() or FIT(theta_i, theta_i)) instead of one global + target, and prune each layer according to its current target. + """ + + # As in the original code, the importance scores are calculated + # as FIT(theta_i, theta_i) of the current model, i.e. the current node. + _, FIT_layerwise = self.fit_computer.get_FIT_old( + FeM=current_node.FeM, params_after=current_node.parameters, same_theta=True + ) + + # Get shape of each layers weight matrix + current_node_matrices_params_shapes_layerwise = [layer.shape for layer in current_node.parameters] + # Flatten parameters & importance scores + current_node_params_flat_layerwise = [layer.view(-1).detach().cpu() for layer in current_node.parameters] + # Same for importance scores + current_node_importance_scores_flat_layerwise = [layer.view(-1).detach().cpu() for layer in FIT_layerwise] + + current_node_matrices_params_layerwise = [] + # Now iterate through all layers + for idx, curr_pruning_percentage in enumerate(pruning_metrics.values()): + + if idx == 0: # Global pruning percentage + continue + + # Calculate number of parameters to prune in this layer, based on the current pruning percentage + num_params_to_prune = int(curr_pruning_percentage * len(current_node_params_flat_layerwise[idx - 1])) + + # Based on the negative importance scores of all the weights, find the indices of weights with the + # smallest importance scores (closest to 0) (i.e. the ones that are not affected by pertubation as much) + _, indices_params_to_prune = torch.topk( + -current_node_importance_scores_flat_layerwise[idx - 1], num_params_to_prune + ) + # Set those parameters/weights to 0 + current_node_pruned_params_flat = torch.scatter( + current_node_params_flat_layerwise[idx - 1], -1, indices_params_to_prune, 0.0 + ) + # Now reconstruct the correct shape + current_node_matrices_params_layerwise.append( + torch.reshape(current_node_pruned_params_flat, current_node_matrices_params_shapes_layerwise[idx - 1]) + ) + + # Put everything on GPU again + current_node_matrices_params_layerwise = [layer.to(self.device) for layer in current_node_matrices_params_layerwise] + + return current_node_matrices_params_layerwise + + def post_fitcompress_calibration(self, best_node_quant_config, calibration_epochs=50): + """ + Calibrate integer/fractional bit allocation for activations, pooling layers, + and model inputs *after* the FITcompress path search. + + We run forward passes through the **quantized but unpruned** model (weights + use the FITcompress-found quant settings; pruning is not applied). For each + module, we collect input ranges during these calibration passes and derive + bit splits accordingly. + + - Activations: integer/fractional bits are chosen from the max absolute + activation input. Total bit-width equals that of the corresponding weight + layer selected by FITcompress for that activation unit. + - Pooling & inputs: computed similarly from their input ranges. Since they + have no corresponding weight layer, the total bit-width is fixed at 7 + bits (1 bit is the sign). + + Args: + best_node_quant_config: Per-layer quantization configuration produced + by FITcompress (mapping from layer name to [int_bits, frac_bits]). + calibration_epochs: Number of calibration forward passes to run. + + Returns: + activ_int_bits: List of integer bits per activation unit. + activ_frac_bits: List of fractional bits per activation unit. + pool_int_bits: Integer bits for the (single) pooling layer (res20). + pool_frac_bits: Fractional bits for the (single) pooling layer (res20). + """ + from pquant.core.torch_impl.compressed_layers_torch import ( + QuantizedPooling, + QuantizedReLU, + ) + + # To avoid numerical issues + eps = 1e-12 + # Store input data, as we also need to quantize input (which is currently done in resnet.py of pquant-dev) + data_input = [] + for m in self.model.modules(): + if m.__class__ in [QuantizedReLU, QuantizedPooling]: + m.post_fitcompress_calibration = True + + # Trigger forward pass through model + self.model.eval() + counter = 0 + + for _, data in enumerate(self.dataloader): + while counter < calibration_epochs: + self.model.zero_grad() + data_batch, _ = data[0].to(self.device), data[1].to(self.device) + data_input.append(data_batch) + _ = self.model(data_batch) + counter += 1 + + # Get ranges of activation inputs + activation_ranges = [] + # Access the inputs to the ReLU + for name, m in self.model.named_modules(): + if m.__class__ in [QuantizedReLU]: + # Average over calibration data + avg_relu = torch.stack(m.saved_inputs, dim=0).mean(dim=0) + # Now get the activation range + range_relu = (avg_relu.min().item(), avg_relu.max().item()) + activation_ranges.append((name, range_relu)) + + # Get ranges of data input + avg_inputs = torch.stack(data_input, dim=0).mean(dim=0) + range_inputs = (avg_inputs.min().item(), avg_inputs.max().item()) + + # Get ranges of pooling layer input(s) + activation_ranges_pool = [] + # And for the pooling layer (specific to res20) + for m in self.model.modules(): + if m.__class__ in [QuantizedPooling]: + # Average over calibration data + avg_pool = torch.stack(m.saved_inputs, dim=0).mean(dim=0) + # Now get the activation range + range_pool = (avg_pool.min().item(), avg_pool.max().item()) + activation_ranges_pool.append(range_pool) + + activ_int_bits = [] + activ_frac_bits = [] + for _, (name, layer) in enumerate(activation_ranges): + max_abs = np.abs(np.max(layer)) # np.abs(layer[1]) + # Find the corresponding quant config of the weight layer that belongs to this activation unit + try: + curr_quant_config = best_node_quant_config[name.replace("relu", "conv")] + except KeyError: + curr_quant_config = best_node_quant_config["conv1"] + + # curr_quant_config[0] : integer bits of weights, curr_quant_config[1] : fractional bits of weights + int_bits = ( + (curr_quant_config[0] + curr_quant_config[1] + 1) + if max(0, math.ceil(math.log2(max_abs + eps))) > (curr_quant_config[0] + curr_quant_config[1] + 1) + else max(0, math.ceil(math.log2(max_abs + eps))) + ) + activ_int_bits.append(int_bits) + # + 1 since ReLUs don't need the sign bit + frac_bits = (curr_quant_config[0] + curr_quant_config[1] + 1) - int_bits + activ_frac_bits.append(frac_bits) + + # Same logic for data input (using 7 bits as standard, 1 goes to sign) + max_abs_input = np.abs(np.max(range_inputs)) # np.abs(range_inputs[1]) + int_bits_input = ( + (7) + if max(0, math.ceil(math.log2(max_abs_input + eps))) > (7) + else max(0, math.ceil(math.log2(max_abs_input + eps))) + ) + frac_bits_input = (7) - int_bits_input + + # Same logic for pooling layer (using 7 bits as standard, 1 goes to sign) ; just one pooling layer in res20 + for _, layer in enumerate(activation_ranges_pool): + max_abs = np.abs(np.max(layer)) # np.abs(layer[1]) + int_bits = ( + (7) if max(0, math.ceil(math.log2(max_abs + eps))) > (7) else max(0, math.ceil(math.log2(max_abs + eps))) + ) + pool_int_bits = int_bits + frac_bits = (7) - int_bits + pool_frac_bits = frac_bits + + logging.info("SET INT BITS INPUT:", int_bits_input, " SET FRAC BITS INPUT:", frac_bits_input) + logging.info(f"INT BITS POOLING: {pool_int_bits}, FRAC BITS POOLING: {pool_frac_bits}") + + return activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits + + def astar(self): + """ + The actual search algorithm of FITcompress, which is based on the A* algorithm. + Find either the node that has an optimal configuration (i.e. compression rate lower than the goal) and break or find + the node with the lowest distance between initial & optimal model among the potential best nodes. + Config Args : + greedy_astar : If set to True, remove all other current neighbour nodes and only keep the current best node + Returns : + A node descriptor of the best node found in the compression space + """ + iterations = 0 + while len(self.potential_nodes) > 0 and iterations < 1000: + logging.info(f'Iteration : {iterations} ') + + next_best_node = None + + logging.info(f"Finding the next best node among the {len(self.potential_nodes)} neighbour nodes...") + # Iterate through all potential next nodes to visit in the compression space + for p_node in self.potential_nodes: + # If we find a node with wanted compression rate, we can return it and stop the A* algorithm + if p_node.curr_compression_rate < self.compression_goal: + + logging.info( + f"Optimal node found with full distance {p_node.full_dist}, " + f"compression rate {p_node.curr_compression_rate}, " + f"quantization config {p_node.quant_config} and " + f"pruning metrics {p_node.pruning_metrics}" + ) + # Based on the unquantized, but pruned weights, get the pruning mask based on the importance scores + p_node_pruning_mask_layerwise = [ + (p_node.unquantized_weights[i] != 0).float() for i in range(self.n_layers) + ] + + # Reset model's pruning, keep quantization, such that we can do post fitcompress calibration + params_quantized_unpruned, _, _ = self.add_quantization( + model=self.model, + params=p_node.unquantized_weights.copy(), + quant_config=p_node.quant_config.copy(), + reset=True, + ) + + self.assign_parameters(self.model, params_quantized_unpruned) + + activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits = self.post_fitcompress_calibration( + p_node.extract_config_from_node(self.layer_names)['quant_config'] + ) + + return ( + p_node, + p_node.extract_config_from_node(self.layer_names), + self.model, + activ_int_bits, + activ_frac_bits, + pool_int_bits, + pool_frac_bits, + p_node_pruning_mask_layerwise, + ) + + # Find the node with lowest distance between initial & optimal model + if next_best_node is None or p_node.full_dist < next_best_node.full_dist: + next_best_node = p_node + + logging.info( + f"Next best node found with full distance {next_best_node.full_dist}, " + f"compression rate {next_best_node.curr_compression_rate}, " + f"quantization config {next_best_node.quant_config} and " + f"pruning metrics {next_best_node.pruning_metrics}" + ) + + # Keep only the found best neighbouring node to our current node and remove all other nodes in greedy search + if self.config.fitcompress_parameters.greedy_astar: + self.potential_nodes = [next_best_node] + + # After the next best node was found, set the model parameters + self.assign_parameters(self.model, next_best_node.parameters.copy()) + self.create_neighbours(next_best_node) + + iterations += 1 + + def create_neighbours(self, current_node): + """ + Create neighbour nodes in the compression space for A* exploration. + + This expands the current node by advancing **either** quantization (per + layer) **or** pruning (global, optionally layerwise) one step according to + the configured schedules and the node's current state. The transformed + parameters are passed to `create_new_node()`, and the resulting neighbours + are added to `self.potential_nodes`. + + Specifically: + • Quantization: create L neighbours (one per weight layer), each moving + that layer to the next bit-width in `quantization_schedule`. + • Pruning: create one neighbour that advances global sparsity to the next + value in `pruning_schedule` (importance based on FIT(theta_i, theta_i)). + + Args: + current_node: The node to expand (i.e., the current best model state). + + Config flags: + fitcompress_parameters.optimize_quantization: enable quantisation neighbours. + fitcompress_parameters.optimize_pruning: enable pruning neighbours. + fitcompress_parameters.approximate: reuse previous FeM for quantisation + neighbours (computational shortcut). + + Notes: + 1) Unlike the original code (which combined pruning during quantisation + neighbour creation), we **separate** the two steps to match the + paper’s formulation. + 2) With `approximate=True`, we reuse the current node’s FeM when + building quantisation neighbours (per the paper’s computational + details). We do **not** use this shortcut for pruning. + """ + + if self.config.fitcompress_parameters.approximate: + + # Update FeM for the best node and use it when creating the neighbours for quantization. + # This leads to num_layers less FIT calculations, as we do not need to calculate the FeM again, + # which reduces runtime + # Assign the current parameters to the model (i.e. the ones of the current node) + self.assign_parameters(self.model, current_node.parameters) + # Calculate the FeM of the current model + curr_FeM, _, _, _, _ = self.fit_computer.get_EF( + self.model, self.dataloader, self.criterion, min_iterations=100, max_iterations=100 + ) + # Set the current node's FeM to the calculated one + current_node.FeM = curr_FeM.copy() + + # Calculate importance score of the current node (FIT(theta_i, theta_i)) + _, FIT_layerwise = self.fit_computer.get_FIT_old( + FeM=current_node.FeM, params_after=current_node.parameters, same_theta=True + ) + + current_node_state = current_node.state.copy() + + logging.info("Current node states for quantization & pruning: ", current_node_state) + if self.config.fitcompress_parameters.optimize_quantization: + for layer_idx in range(self.n_layers): + + # Set neighbour state to current state + neighbour_node_state = current_node_state.copy() + + # Check that we do not go out of bounds; if the scheduler ends, don't create a new node for this layer + if neighbour_node_state[layer_idx] < len(self.quant_schedule) - 1: + # Move one step forward in the state for the current layer + neighbour_node_state[layer_idx] += 1 + # Set neighbour quant config to current quant config + neighbour_node_quant_config = current_node.quant_config.copy() + # Update the neighbour quant config for the current layer based + # on the quantization schedule and the neighbour node state + neighbour_node_quant_config[layer_idx] = self.quant_schedule[neighbour_node_state[layer_idx]] + + # If we want to skip a layer's quantization + if neighbour_node_quant_config[layer_idx] == 0: + continue + + # Get the current node's pruning metrics (will not be changed during quantization) + neighbour_node_pruning_metrics = current_node.pruning_metrics.copy() + neighbour_node_parameters_layerwise, neighbour_node_int_bits, neighbour_node_frac_bits = ( + self.add_quantization( + model=self.model, + params=current_node.unquantized_weights.copy(), + quant_config=neighbour_node_quant_config, + ) + ) + + # Create node structure based on applied quantization setting for the current layer + neighbour_node = self.create_new_node( + current_node=current_node, + neighbour_node_parameters_layerwise=neighbour_node_parameters_layerwise, + neighbour_node_quant_config=neighbour_node_quant_config, + neighbour_node_state=neighbour_node_state, + neighbour_node_pruning_metrics=neighbour_node_pruning_metrics, + neighbour_node_unquantized_parameters_layerwise=current_node.unquantized_weights.copy(), + neighbour_node_frac_bits=neighbour_node_frac_bits, + approximate=self.config.fitcompress_parameters.approximate, + ) + + # Add the neighbour node to the potential nodes list + self.potential_nodes.append(neighbour_node) + + if self.config.fitcompress_parameters.optimize_pruning: + + # Set neighbour state to current state + neighbour_node_state = current_node_state.copy() + + # Check that we do not go out of bounds ; if the scheduler ends, we don't create new nodes for pruning anymore + if neighbour_node_state[-1] + 1 < len(self.pruning_schedule): + # Move one step forward in the state for pruning + neighbour_node_state[-1] += 1 + + # Get current quantization config of the current node (will not be changed during pruning) + neighbour_node_quant_config = current_node.quant_config.copy() + neighbour_node_int_bits = current_node.int_bits.copy() + neighbour_node_frac_bits = current_node.frac_bits.copy() + + neighbour_node_pruning_percentage = self.pruning_schedule[neighbour_node_state[-1]] + + neighbour_node_pruning_percentages = [neighbour_node_pruning_percentage] + + # layerwise pruning (works, but not tested for performance) ; probably outdated + if self.layerwise_pruning: + neighbour_node_pruning_percentage_layerwise = [ + layer_scheduler[neighbour_node_state[-1]] + for layer_scheduler in self.pruning_schedulers_layerwise.values() + ] # Layerwise pruning percentage + neighbour_node_pruning_percentages += ( + neighbour_node_pruning_percentage_layerwise # Add the layerwise pruning percentages to the list + ) + + neighbour_node_pruning_metrics = current_node.pruning_metrics.copy() + + # Update the pruning metrics for the neighbour node + for idx, key in enumerate(neighbour_node_pruning_metrics.keys()): + neighbour_node_pruning_metrics[key] = neighbour_node_pruning_percentages[idx] + + if self.layerwise_pruning: + neighbour_node_parameters_layerwise = self.add_pruning_layer_specific( + current_node=current_node, pruning_metrics=neighbour_node_pruning_metrics + ) + else: + + neighbour_node_parameters_layerwise, neighbour_node_unquantized_parameters_layerwise = self.add_pruning( + current_node=current_node, + params=current_node.parameters.copy(), + importance_score=FIT_layerwise, + pruning_percentage=neighbour_node_pruning_percentage, + ) + + # Create node structure based on applied pruning + neighbour_node = self.create_new_node( + current_node=current_node, + neighbour_node_parameters_layerwise=neighbour_node_parameters_layerwise, + neighbour_node_quant_config=neighbour_node_quant_config, + neighbour_node_state=neighbour_node_state, + neighbour_node_pruning_metrics=neighbour_node_pruning_metrics, + neighbour_node_unquantized_parameters_layerwise=neighbour_node_unquantized_parameters_layerwise, + neighbour_node_int_bits=neighbour_node_int_bits, + neighbour_node_frac_bits=neighbour_node_frac_bits, + approximate=False, + ) + + # Add the neighbour node to the potential nodes list + self.potential_nodes.append(neighbour_node) + + # Remove the current node from the potential nodes list, as we have now created all its neighbours + current_node_key = current_node.key + for idx, node in enumerate(self.potential_nodes): + if node.key == current_node_key: + del self.potential_nodes[idx] + break + + def create_new_node( + self, + current_node, + neighbour_node_parameters_layerwise, + neighbour_node_quant_config, + neighbour_node_state, + neighbour_node_pruning_metrics, + neighbour_node_unquantized_parameters_layerwise, + neighbour_node_int_bits, + neighbour_node_frac_bits, + approximate=False, + ): + """ + Create a new node in the compression space based on the current node and the new parameters. + Args : + current_node : The current node, i.e. the current model. + neighbour_node_parameters_layerwise : The parameters of the neighbour node, layerwise. + neighbour_node_quant_config : The quantization configuration of the neighbour node, layerwise + neighbour_node_state : The state of the neighbour node. + neighbour_node_pruning_metrics : Sparsity goal for pruning of the neighbour node + neighbour_node_unquantized_parameters_layerwise: Unquantized parameters of the neighbour node, layerwise. + neighbour_node_int_bits : The integer bits for weights of the neighbour node, layerwise. + neighbour_node_frac_bits : The fractional bits for weights of the neighbour node, layerwise. + approximate : If set to True, we use the previous FeM to calculate FIT values for Quantization. + Returns : + The newly created neighbour node. + """ + + if approximate: + # If approximate is set to True, we do not recalculate the FeM, but use the one from the current node + neighbour_node_FeM = current_node.FeM.copy() + + else: + # First, assign the new parameters of the neighbour node to the model + # This is done in order that we can calculate the FeM based on these new parameters + self.assign_parameters(self.model, neighbour_node_parameters_layerwise) + + # Then, compute the new FeM based on the new parameters of the neighbour node + neighbour_node_FeM, _, _, _, _ = self.fit_computer.get_EF( + model=self.model, + data_loader=self.dataloader, + loss_func=self.criterion, + tolerance=1e-3, + min_iterations=100, + max_iterations=100, + ) + + # Calculate the gscore, fscore, full distance and current compression rate + neighbour_node_gscore, neighbour_node_fscore, neighbour_node_full_dist, neighbour_node_compression_rate = ( + self.calculate_path_cost( + current_node=current_node, + neighbour_node_parameters_layerwise=neighbour_node_parameters_layerwise, + neighbour_node_FeM=neighbour_node_FeM, + neighbour_node_quant_config=neighbour_node_quant_config, + ) + ) + + # Create the instance for the neighbour node + neighbour_node = node( + matrices_params_layerwise=neighbour_node_parameters_layerwise, + FeM=neighbour_node_FeM, + quant_config=neighbour_node_quant_config, + pruning_metrics=neighbour_node_pruning_metrics, + gscore=neighbour_node_gscore, + fscore=neighbour_node_fscore, + full_dist=neighbour_node_full_dist, + state=neighbour_node_state, + curr_compression_rate=neighbour_node_compression_rate, + unquantized_weights=neighbour_node_unquantized_parameters_layerwise, + int_bits=neighbour_node_int_bits, + frac_bits=neighbour_node_frac_bits, + ) + + return neighbour_node + + def calculate_path_cost( + self, current_node, neighbour_node_parameters_layerwise, neighbour_node_FeM, neighbour_node_quant_config + ): + """ + Calculates the g and f score to evaluate the cost of the path from initial model to current model (g score) + and the heuristic cost to the goal model (f score). Furthermore, calculates the full distance based on both scores. + Additionally, the compression rate of the neighbour node (i.e. model) is calculated. + Args : + current_node : The current node in the compression space, i.e. the current model. + neighbour_node_parameters_layerwise : The parameters of the neighbour node, layerwise. + neighbour_node_FeM : The FeM of the neighbour node, layerwise. + neighbour_node_quant_config : The quantization configuration of the neighbour node, layerwise. + Config Args: + f_lambda : The lambda value to use for the full distance calculation. + Returns: + neighbour_node_gscore : The g score of the neighbour node, the cost from initial to neighbour nodes. + neighbour_node_fscore : The score of the neighbour node, cost to the goal model from the neighbour node. + neighbour_node_full_dist : The full distance from initial node to final node, + given we use the neighbour node, i.e. g score + f_lambda * f score. + neighbour_node_compression_rate: Compression rate of the neighbour node, % of the original model is active + """ + # curr_g_score + sqrt(FIT(params_current_node, params_neighbour_node)) + neighbour_node_gscore = current_node.gscore + np.sqrt( + self.fit_computer.get_FIT_old( + params_before=current_node.parameters, + FeM=current_node.FeM, + params_after=neighbour_node_parameters_layerwise, + same_theta=False, + ) + ) + + # abs(neighbour_node_compression_rate - compression_goal) * sqrt(FIT(params_neighbour_node, params_neighbour_node)) + # First, calculate the compression rate of the neighbour node (i.e. model) + neighbour_node_compression_rate = self.calculate_current_compression_rate( + params_layerwise=neighbour_node_parameters_layerwise, quant_config=neighbour_node_quant_config + ) + # Then get FIT(params_neighbour_node,params_neighbour_node) + neighbour_node_FIT, _ = self.fit_computer.get_FIT_old( + params_after=neighbour_node_parameters_layerwise, FeM=neighbour_node_FeM, same_theta=True + ) + # Finally, calculate the f score + neighbour_node_fscore = np.sqrt( + (np.abs(neighbour_node_compression_rate - self.compression_goal) ** 2) * neighbour_node_FIT + ) + + # g_score + lambda * f_score + neighbour_node_full_dist = ( + neighbour_node_gscore + self.config.fitcompress_parameters.f_lambda * neighbour_node_fscore + ) + + return neighbour_node_gscore, neighbour_node_fscore, neighbour_node_full_dist, neighbour_node_compression_rate + + def calculate_current_compression_rate(self, params_layerwise, quant_config): + """ + Calculates the compression ratio of the model (what fraction of the original model in bytes is + still "active" after applying compression) + + Args: + params_layerwise : The current parameters (theta) of the model, layer-wise. + quant_config : The current (per layer) quantization config. + + Returns: + The compression ratio, i.e. how many bytes are still active after pruning and quantization + (= active bytes / uncompressed bytes) + """ + + active_bytes = 0.0 + uncompressed = 0.0 + for params_layer, quant_conf_layer in zip(params_layerwise, quant_config): + # Count which parameters are non-zero, non_zero is simply the number of non-zero parameters in the current layer + non_zero = torch.sum(torch.where(torch.abs(params_layer) < 10e-8, 0, 1)).detach().cpu().numpy() + active_bytes += ( + non_zero * quant_conf_layer / 8 + ) # Gives us the number of total bytes needed to store the parameters in the current layer + + # For the uncompressed version, we simply look at ALL parameters that are in that layer + # and simply multiply by 4, since each original parameter is 32 bits + uncompressed += params_layer.numel() * 4 + + return active_bytes / uncompressed +class FIT: + def __init__(self, model, device, input_spec): + """ + Initialize the FIT class, which is used to compute the FIT values for quantization and pruning. + Args: + model (torch.nn.Module): The model for which to compute the FIT values. + device (torch.device): The device on which the model is located. + input_spec (tuple): The input specification for the model, e.g. (3, 32, 32) for CIFAR-10. + """ + self.hooks = [] + self.device = device + + self.matrices_params_layerwise, self.matrices_params_sizes_layerwise, _ = self.get_model_weights(model) + self.hook_layers(model) + + # Dummy Forward Pass to trigger hooks & collect activations + _ = model(torch.randn(input_spec)[None, ...].to(self.device)) + + # List of sizes of tensors of activation inputs + self.matrices_activs_sizes_layerwise = [] + + for _, module in model.named_modules(): + if module.act_quant: + self.matrices_activs_sizes_layerwise.append(module.activ_in[0].size()) + + def get_model_weights(self, model): + """ + Set collect flag to True for all weights of the layers of interest in the model. + This will give us easy access to the weights that we want to quantize/prune later on. + Furthermore, we can also access the weights, which we need for the FIT calculation. + + Notes: + 1.This is only called once initially. Its main purpose is to set the .collect flag to True, + such that we can then later on access the weights easily. + + Args: + model (torch.nn.Module): The model from which to get the weights. + Returns: + matrices_params_layerwise (list): A list of the weight matrices for each layer of interest. + matrices_params_sizes_layerwise (list): A list of sizes of the weight matrices for each layer of interest. + layer_names (list): A list of the names of the layers of interest. + """ + + matrices_params_layerwise = [] + layer_names = [] + # Iterate through all modules in the model + for name, module in model.named_modules(): + + if isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d)): + layer_names.append(name) + for name_param, matrix_param in list(module.named_parameters()): + # Search for the weights + if name_param.endswith('weight'): + matrices_params_layerwise.append(matrix_param) + # Set their collect flag to True (later on we can then access them easily like this) + matrix_param.collect = True + else: + matrix_param.collect = False + continue + + # For Batch Normalization layers etc. we do not collect any weights + for matrix_param in list(module.parameters()): + if matrix_param.requires_grad: + matrix_param.collect = False + + # Collect the sizes of the weight matrices + matrices_params_sizes_layerwise = [param.size() for param in matrices_params_layerwise] + + return matrices_params_layerwise, matrices_params_sizes_layerwise, layer_names + + def hook_layers(self, model): + """ + Used to get the activation inputs during the forward pass, which are + needed for computing the FIT (if calculated with the noise model) w.r.t activations. + + Args : + model (torch.nn.Module): The model to hook the layers of. + """ + + def hook_inp(module, inp, outp): + """ + Store activation input of the module. + + """ + module.activ_in = inp + + for _, module in model.named_modules(): + if isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d)): + # Forward Hook to get inputs into activation function + hook = module.register_forward_hook(hook_inp) + self.hooks.append(hook) # Store hooks so we can remove them later + module.act_quant = True # mark it + else: + module.act_quant = False + + def hook_removal(self): + """ + Remove all hooks that were registered to the model. + """ + for hook in self.hooks: + hook.remove() + + self.hooks.clear() + assert len(self.hooks) == 0, "Hooks were not removed properly!" + + def get_loss(self, model, data_batch, target_batch, loss_func, mode='mini-batch'): + """ + This function triggers the loss calcuation of a model. + We use it such that we can then calculate gradients which are + needed for EF trace (which is one part of the FIT). + + Notes: + 1. I here give the idea how we could do it for both mini-batch and sample loss calculation. + + Args: + model (torch.nn.Module): The (trained) model to calculate the loss for. + data_batch (Tensor): Current input data mini-batch for the model. + target_batch (Tensor): Current target data mini-batch for the model. + loss_func (callable): Loss function to use for the calculation. + mode (str): Mode of loss calculation, either 'mini-batch' or 'sample'. + 'mini-batch' calculates loss for each mini-batch (by summing losses + and averaging over mini-batch), + 'sample' calculates loss for each sample (which should be more close + to the actual paper's definition). + Returns: + loss : Loss for current mini-batch (either averaged over mini-batch or per sample). + + """ + + output = model(data_batch) + + if mode == 'mini-batch': + # Check which loss_func instance is active + if isinstance(loss_func, torch.nn.CrossEntropyLoss): + # Calculate loss based on mini-batch and averaged over it + loss_func = torch.nn.CrossEntropyLoss() + + if mode == 'sample': + if isinstance(loss_func, torch.nn.CrossEntropyLoss): + # Calculate loss for each sample + loss_func = torch.nn.CrossEntropyLoss(reduce=False, reduction='none') + + loss = loss_func(output, target_batch) + + return loss + + def get_gradients(self, model, loss, matrices_layerwise, batch_size): + """ + This function calculates the gradients & squared gradients. + These are then used to calculate the EF trace down the line, + either for parameters or activations. + + Args: + model (torch.nn.Module): The model to calculate the loss for. + loss (Tensor): The loss tensor for which to calculate gradients. + matrices_layerwise (list): List of parameters/activation matrices for which to calculate gradients. + batch_size (int): Size of the mini-batch used for the loss calculation. + Returns: + squared_grad : Squared gradients for the passed parameters/activations, layer-wise. + """ + grads = torch.autograd.grad(loss, [*matrices_layerwise], retain_graph=True) + squared_grads = [batch_size * g**2 for g in grads] + + return squared_grads + + def get_EF(self, model, data_loader, loss_func, tolerance=1e-3, min_iterations=100, max_iterations=100): + """ + Calculate the approximate Empirical Fisher (EF) trace / approximate Fisher + Information Metric (FIM) for the model. + + Notes: + 1) Computed per mini-batch: we accumulate EF over mini-batches. This + differs from the paper, which defines EF per sample and then + accumulates. + 2) FeM vs. EF_trace_params_layerwise_cpu: FeM keeps layer tensors + (no per-layer sum), while EF_trace_params_layerwise_cpu stores + per-layer scalar traces (sum over parameters). + 3) The returned min/max ranges are only needed when using the noise + model for FIT. + + Args: + model (torch.nn.Module): Model to evaluate. + data_loader (DataLoader): Training data loader. + loss_func (Callable): Loss function used to compute gradients. + tolerance (float): Convergence tolerance for EF accumulation. + min_iterations (int): Minimum mini-batches before checking convergence. + max_iterations (int): Maximum mini-batches to process. + + Returns: + FeM (list[Tensor]): Layer-wise EF tensors accumulated over mini-batches. + EF_trace_params_layerwise_cpu (np.ndarray): Layer-wise EF traces + (parameters), accumulated over mini-batches. + EF_trace_activs_layerwise_cpu (np.ndarray): Layer-wise EF traces + (activations), accumulated over mini-batches. + per_batch_layerwise_minmax_range_params (list[list[float]]): Per-batch + min–max ranges for parameters per layer. + per_batch_layerwise_minmax_range_activs (list[list[float]]): Per-batch + min–max ranges for activations per layer. + """ + + # Convergence flag based on variance of change in EF between current mini-batch estimation and accumulated EF trace + convergence_flag = False + total_batches = 0 + model.eval() + + # Hook layers again, needed for when we recalculate EF traces during FITcompress + self.hook_layers(model) + + # Initialize list to store accumulated EF of parameters (weights) over mini-batches + batch_accum_EF_matrices_params_layerwise = [ + torch.zeros(size).to(self.device) for size in self.matrices_params_sizes_layerwise + ] + # Initialize list to store accumulated EF of activations over mini-batches + batch_accum_EF_matrices_activs_layerwise = [ + torch.zeros(size).to(self.device) for size in self.matrices_activs_sizes_layerwise[1:] + ] + # NOTE:layerwise means that each element itself is a list of the ranges for each layer for the current mini-batch + per_batch_layerwise_minmax_range_params = [] + per_batch_layerwise_minmax_range_activs = [] + # These will be needed for the convergence check + # NOTE: layerwise means that each element itself is a list of the ranges for each layer for the current mini-batch + per_batch_layerwise_grad_sum_squared_params = [] + per_batch_layerwise_grad_sum_squared_activs = [] + # Iterate over mini-batches in the data loader untill we reach the max_iterations or convergence flag is not set + while total_batches < max_iterations and not convergence_flag: + for _, data in enumerate(data_loader): + model.zero_grad() + data_batch, target_batch = data[0].to(self.device), data[1].to(self.device) + batch_size = data_batch.size(0) + + loss = self.get_loss(model, data_batch, target_batch, loss_func, mode='mini-batch') + curr_batch_matrices_params_layerwise = [] + curr_batch_minmax_range_params_layerwise = [] + for weights in model.parameters(): + if weights.collect: + curr_batch_matrices_params_layerwise.append(weights) + curr_batch_minmax_range_params_layerwise.append( + (torch.max(weights.data) - torch.min(weights.data)).detach().cpu().numpy() + ) + + per_batch_layerwise_minmax_range_params.append(curr_batch_minmax_range_params_layerwise) + + curr_batch_matrices_activs_layerwise = [] + curr_batch_minmax_range_activs_layerwise = [] + for _, module in model.named_modules(): + if module.act_quant: + curr_batch_matrices_activs_layerwise.append(module.activ_in[0]) + curr_batch_minmax_range_activs_layerwise.append( + (torch.max(module.activ_in[0]) - torch.min(module.activ_in[0])).detach().cpu().numpy() + ) + + per_batch_layerwise_minmax_range_activs.append(curr_batch_minmax_range_activs_layerwise) + + # Calculate squared gradients for current mini-batch + curr_batch_squared_grads_params_layerwise = self.get_gradients( + model, loss, curr_batch_matrices_params_layerwise, batch_size + ) + curr_batch_squared_grads_activs_layerwise = self.get_gradients( + model, loss, curr_batch_matrices_activs_layerwise[1:], batch_size + ) # skip first layer activations, since it is the input layer + + # NOTE : We need this for early stopping based on convergence , it is not necessary for the EF calculation + # Take the sum of squared gradients for parameters/activations of each layer + curr_batch_summed_squared_grads_params_layerwise = np.array( + [ + torch.sum(param_matrix).detach().cpu().numpy() + for param_matrix in curr_batch_squared_grads_params_layerwise + ] + ) + curr_batch_summed_squared_grads_activs_layerwise = np.array( + [ + torch.sum(activ_matrix).detach().cpu().numpy() + for activ_matrix in curr_batch_squared_grads_activs_layerwise + ] + ) + # Append the current mini-batch squared gradients to the list of per-batch squared gradients + per_batch_layerwise_grad_sum_squared_params.append(curr_batch_summed_squared_grads_params_layerwise) + per_batch_layerwise_grad_sum_squared_activs.append(curr_batch_summed_squared_grads_activs_layerwise) + batch_accum_EF_matrices_params_layerwise = [ + curr_val_layer + curr_squared_grad_layer + 0.0 + for curr_val_layer, curr_squared_grad_layer in zip( + batch_accum_EF_matrices_params_layerwise, curr_batch_squared_grads_params_layerwise + ) + ] + batch_accum_EF_matrices_activs_layerwise = [ + acc + grad + 0.0 + for acc, grad in zip( + batch_accum_EF_matrices_activs_layerwise, + curr_batch_squared_grads_activs_layerwise, + ) + ] + total_batches += 1 + # NOTE: Only when we iterated over all mini-batches and would stop there, it is 1/N ! + batch_accum_EF_matrices_params_normalized_layerwise = [ + accum_grad_layer / float(total_batches) for accum_grad_layer in batch_accum_EF_matrices_params_layerwise + ] + batch_accum_EF_matrices_activs_normalized_layerwise = [ + accum_grad_layer / float(total_batches) for accum_grad_layer in batch_accum_EF_matrices_activs_layerwise + ] + + # FeM of the original code and in usage for the current FITcompress implementation + FeM = [value.detach().cpu() for value in batch_accum_EF_matrices_params_normalized_layerwise] + + EF_trace_params_layerwise = [ + torch.sum(value) for value in batch_accum_EF_matrices_params_normalized_layerwise + ] + EF_trace_activs_layerwise = [ + torch.sum(value) for value in batch_accum_EF_matrices_activs_normalized_layerwise + ] + + EF_trace_params_layerwise_cpu = np.array( + [value.detach().cpu().numpy() for value in EF_trace_params_layerwise] + ) + EF_trace_activs_layerwise_cpu = np.array( + [value.detach().cpu().numpy() for value in EF_trace_activs_layerwise] + ) + + # Convergence check + if total_batches >= 2: + # Calculate variance of the change in EF trace for parameters and activations + params_var = ( + np.var( + (per_batch_layerwise_grad_sum_squared_params - EF_trace_params_layerwise_cpu) + / EF_trace_params_layerwise_cpu + ) + / total_batches + ) + activs_var = ( + np.var( + (per_batch_layerwise_grad_sum_squared_activs - EF_trace_activs_layerwise_cpu) + / EF_trace_activs_layerwise_cpu + ) + / total_batches + ) + + if activs_var < tolerance and params_var < tolerance and total_batches > min_iterations: + convergence_flag = True + + if convergence_flag or total_batches >= max_iterations: + break + + # Remove hooks after the forward pass + self.hook_removal() + + self.FeM = FeM + self.EF_trace_params_layerwise_cpu = EF_trace_params_layerwise_cpu + self.EF_trace_activs_layerwise_cpu = EF_trace_activs_layerwise_cpu + self.per_batch_layerwise_minmax_range_params = per_batch_layerwise_minmax_range_params + self.per_batch_layerwise_minmax_range_activs = per_batch_layerwise_minmax_range_activs + + return ( + FeM, + EF_trace_params_layerwise_cpu, + EF_trace_activs_layerwise_cpu, + per_batch_layerwise_minmax_range_params, + per_batch_layerwise_minmax_range_activs, + ) + + def squared_step_width_quantization(self, ranges_layerwise, quant_bit_precision_layerwise): + """ + Calculate the squared step width of the quantization (reference can be found in FIT paper (Appendix E), + this is the formula for Delta). + This is needed for the uniform noise model, which will calculate delta_theta + + Notes: + Since this was part of the FIT paper, the noise model is based solely on quantization, not pruning. + + Args: + ranges: min-max range of the weights/activations, layer-wise + quant_bit_precision: quantization bit precision, layer-wise + Returns: + squared step width of the quantization, layer-wise + + """ + + return (ranges_layerwise / (2**quant_bit_precision_layerwise - 1)) ** 2 + + def get_FIT_noise_model( + self, + EF_trace_params_layerwise_cpu, + quant_bit_precision_params_layerwise, + quant_bit_precision_activs_layerwise=None, + EF_trace_activs_layerwise_cpu=None, + use_activations=False, + ): + """ + Calculate the FIT for the model, based on the empirical Fisher trace and the squared step width of the quantization + that comes from the noise model. This implements the FIT formula from the FITCompress paper. + Notes: + 1. The original FITCompress code does not use the noise model, but rather the actual parameter values. + 2. The original FIT was implemented only based on activations. I here also include parameters. + 3. TODO : FOR FITCOMPRESS : Since FITCompress works only with parameters + since they will not change during the path-finding process, i.e. they are not quantized + 4. TODO : FOR FITCOMPRESS : How to deal with the FIT(theta_i, theta_i) calculation ? + Args: + EF_trace_params_layerwise_cpu (list): empirical Fisher trace values parameters (weights), layer-wise + EF_trace_activs_layerwise_cpu (list): empirical Fisher trace values activations, layer-wise + quant_bit_precision_params (list): quantization bit precision for the parameters (weights), layer-wise + quant_bit_precision_activs (list): quantization bit precision for the activations, layer-wise + use_activations (bool): whether to include activations in the FIT calculation or not + + Returns: + FIT_full (float): FIT value + """ + + # Get the mean across all stored mini-batches for each layer + mean_range_params_layerwise = np.mean(self.per_batch_layerwise_minmax_range_params, axis=0) + + # Calculate the squared step width of the quantization for parameters and activations + # TODO : here we need to deal with the FIT(theta_i, theta_i) calculation + squared_step_width_quant_params_layerwise = self.squared_step_width_quantization( + mean_range_params_layerwise, np.array(quant_bit_precision_params_layerwise) + ) + + FIT_params_layerwise = squared_step_width_quant_params_layerwise * EF_trace_params_layerwise_cpu + + # Calculate full FIT by summing over all layers + # TODO : missing is the 1/n(l) normalization, but cancels itself out according to Adrian + FIT_full = np.sum(FIT_params_layerwise) + + if use_activations: + mean_range_activs_layerwise = np.mean(self.per_batch_layerwise_minmax_range_activs, axis=0)[ + 1: + ] # # skip first layer activations, since it is the input layer + squared_step_width_quant_activs_layerwise = self.squared_step_width_quantization( + mean_range_activs_layerwise, np.array(quant_bit_precision_activs_layerwise[1:]) + ) # 1: depends on the setting (whether first layer is included in config or not) + FIT_activs_layerwise = squared_step_width_quant_activs_layerwise * EF_trace_activs_layerwise_cpu + FIT_full += np.sum(FIT_activs_layerwise) + + self.FIT_full = FIT_full + + return FIT_full + + def get_FIT_real_values(self, params_before, EF_trace_params_layerwise, params_after=None, same_theta=False): + """ + Calculate the actual FIT for the model, based on the parameter values between two + consecutive models during the FITCompress path-finding process. + + Notes: + 1. This deviates from the code for FITCompress. In the original code, they use the still + layer-wise EF and not the trace and do element-wise multiplication. + Here I implement the actual FIT formula from the theoretical paper. This means it still has to be tested. + 3. Does not include any activations until now since original FITCompress code doesn't include + them, so would need to be extended. + Args: + params_before (list): List of parameters (weights) before the path-finding step. + params_after (list): List of parameters (weights) after the path-finding step. + EF_trace_params_layerwise (list): List of empirical Fisher trace values for each layer. + same_theta (bool) : When we need to calculate FIT(theta,theta) for the f heuristic + Returns: + curr_FIT (float): FIT value + """ + + curr_FIT = 0 + if not same_theta: + for theta_before, theta_after, EF_trace in zip(params_before, params_after, EF_trace_params_layerwise): + # Calculate the squared difference between the parameters before and after + delta_theta = torch.sum((theta_before.detach().cpu() - theta_after.detach().cpu()) ** 2).numpy() + # Calculate the FIT for the current layer + curr_FIT += EF_trace * delta_theta + + else: + + for theta, EF_trace in zip(params_before, EF_trace_params_layerwise): + # Calculate the squared difference between the parameters before and after + delta_theta = torch.sum(theta.detach().cpu() ** 2) + # Calculate the FIT for the current layer + curr_FIT += EF_trace * delta_theta + + return curr_FIT + + def get_FIT_old(self, params_before=None, FeM=None, params_after=None, same_theta=False): + """ + Implementation of the original FIT code (compute_fake_FIT_params()), with the addition that we add the + same_theta mode). Calculates the FIT of the model. + + Args: + params_before (list): List of parameters (weights) before the path-finding step. + FeM (list) : The EF of parameters stored as tensors and accumulated over mini-batches, layer-wise + params_after (list): List of parameters (weights) after the path-finding step. + same_theta (bool) : When we need to calculate FIT(theta,theta), the importance score + Returns: + curr_FIT (float) : FIT value + FIT_layerwise (list) : FIT value per layer (only returned when same_theta is True) + + + """ + + curr_FIT = 0 + + if not same_theta: + + # Taken from compute_fake_FIT_params() + for theta_before, theta_after, layer_FeM in zip(params_before, params_after, FeM): + + curr_FIT_layer = torch.sum( + layer_FeM * (theta_before.detach().cpu() - theta_after.detach().cpu()) ** 2 + ).numpy() + curr_FIT += curr_FIT_layer + return curr_FIT + + else: + FIT_layerwise = [] + + # Taken from generate_FIT_pruning_importance() + for theta_after, layer_FeM in zip(params_after, FeM): + + curr_FIT_layer = layer_FeM * (theta_after.detach().cpu() ** 2) + FIT_layerwise.append(curr_FIT_layer) - \ No newline at end of file + # Taken from renorm_heuristic() + final_FIT = torch.sum(torch.cat([FIT_score.view(-1) for FIT_score in FIT_layerwise])).detach().cpu().numpy() + + curr_FIT += final_FIT + + return curr_FIT, FIT_layerwise diff --git a/src/pquant/core/torch_impl/train_torch.py b/src/pquant/core/torch_impl/train_torch.py index 1aa7767..9ec1d64 100644 --- a/src/pquant/core/torch_impl/train_torch.py +++ b/src/pquant/core/torch_impl/train_torch.py @@ -15,37 +15,37 @@ def iterative_train_torch(model, config, train_func, valid_func, **kwargs): Generic training loop, user provides training and validation functions """ epoch = torch.tensor(0) # Keeps track of all the epochs completed - training_config = config["training_parameters"] - if training_config["pretraining_epochs"] > 0: - for e in range(training_config["pretraining_epochs"]): + training_config = config.training_parameters + if training_config.pretraining_epochs > 0: + for e in range(training_config.pretraining_epochs): model.train() - pre_epoch_functions(model, e, training_config["pretraining_epochs"]) + pre_epoch_functions(model, e, training_config.pretraining_epochs) train_func(model, epoch=epoch, **kwargs) model.eval() valid_func(model, epoch=epoch, **kwargs) - post_epoch_functions(model, e, training_config["pretraining_epochs"]) + post_epoch_functions(model, e, training_config.pretraining_epochs) epoch += 1 post_pretrain_functions(model, config, kwargs['trainloader'], kwargs['loss_func']) - for r in range(training_config["rounds"]): - for e in range(training_config["epochs"]): + for r in range(training_config.rounds): + for e in range(training_config.epochs): model.train() - if r == 0 and training_config["save_weights_epoch"] == e: + if r == 0 and training_config.save_weights_epoch == e: save_weights_functions(model) - pre_epoch_functions(model, e, training_config["epochs"]) + pre_epoch_functions(model, e, training_config.epochs) train_func(model, epoch=epoch, **kwargs) model.eval() valid_func(model, epoch=epoch, **kwargs) - post_epoch_functions(model, e, training_config["epochs"]) + post_epoch_functions(model, e, training_config.epochs) epoch += 1 - call_post_round_functions(model, training_config["rewind"], training_config["rounds"], r) + call_post_round_functions(model, training_config.rewind, training_config.rounds, r) pre_finetune_functions(model) - if training_config["fine_tuning_epochs"] > 0: - for e in range(training_config["fine_tuning_epochs"]): + if training_config.fine_tuning_epochs > 0: + for e in range(training_config.fine_tuning_epochs): model.train() - pre_epoch_functions(model, e, training_config["fine_tuning_epochs"]) + pre_epoch_functions(model, e, training_config.fine_tuning_epochs) train_func(model, epoch=epoch, **kwargs) model.eval() valid_func(model, epoch=epoch, **kwargs) - post_epoch_functions(model, e, training_config["fine_tuning_epochs"]) + post_epoch_functions(model, e, training_config.fine_tuning_epochs) epoch += 1 return model diff --git a/src/pquant/core/utils.py b/src/pquant/core/utils.py index 4d66e2c..29852eb 100644 --- a/src/pquant/core/utils.py +++ b/src/pquant/core/utils.py @@ -6,13 +6,13 @@ from pquant.pruning_methods.autosparse import AutoSparse from pquant.pruning_methods.cs import ContinuousSparsification from pquant.pruning_methods.dst import DST +from pquant.pruning_methods.mdmm import MDMM from pquant.pruning_methods.pdp import PDP from pquant.pruning_methods.wanda import Wanda -from pquant.pruning_methods.mdmm import MDMM def get_pruning_layer(config, layer_type): - pruning_method = config["pruning_parameters"]["pruning_method"] + pruning_method = config.pruning_parameters.pruning_method if pruning_method == "dst": return DST(config, layer_type) elif pruning_method == "autosparse": @@ -56,7 +56,7 @@ def write_config_to_yaml(config, output_path, sort_keys=True): def validate_pruning_parameters(config): - pruning_method = config["pruning_parameters"]["pruning_method"] + pruning_method = config.pruning_parameters.pruning_method if pruning_method == "dst": valid_keys = [ "alpha", @@ -121,7 +121,7 @@ def validate_pruning_parameters(config): "sparsity", ] for k in valid_keys: - assert k in config["pruning_parameters"].keys(), f"missing pruning parameter: {k}" + assert k in config.pruning_parameters.keys(), f"missing pruning parameter: {k}" def validate_quantization_parameters(config): @@ -136,7 +136,7 @@ def validate_quantization_parameters(config): "use_symmetric_quantization", ] for k in valid_keys: - assert k in config["quantization_parameters"].keys(), f"missing quantization parameter: {k}" + assert k in config.quantization_parameters.keys(), f"missing quantization parameter: {k}" def validate_training_parameters(config): @@ -150,7 +150,7 @@ def validate_training_parameters(config): "save_weights_epoch", ] for k in valid_keys: - assert k in config["training_parameters"].keys(), f"missing training parameter: {k}" + assert k in config.training_parameters.keys(), f"missing training parameter: {k}" def validate_config(config): diff --git a/src/pquant/data_models/finetuning_model.py b/src/pquant/data_models/finetuning_model.py new file mode 100644 index 0000000..c2bf1ba --- /dev/null +++ b/src/pquant/data_models/finetuning_model.py @@ -0,0 +1,18 @@ +from typing import Dict, List, Optional, Union, Any +from pydantic import BaseModel, Field +from typing_extensions import Literal + +class HyperparameterSearch(BaseModel): + numerical: Dict[str, List[Union[int, float]]] = Field(default_factory=dict) + categorical: Optional[Dict[str, List[str]]] = Field(default_factory=dict) + +class Sampler(BaseModel): + type: str = Field(default="TPESampler") + params: Dict[str, Any] = Field(default_factory=dict) + + +class BaseFinetuningModel(BaseModel): + experiment_name: str = Field(default="experiment_1") + sampler: Sampler = Field(default_factory=Sampler) + num_trials: int = Field(default=0) + hyperparameter_search: HyperparameterSearch = Field(default_factory=HyperparameterSearch) diff --git a/src/pquant/data_models/fitcompress_model.py b/src/pquant/data_models/fitcompress_model.py new file mode 100644 index 0000000..8ea61d6 --- /dev/null +++ b/src/pquant/data_models/fitcompress_model.py @@ -0,0 +1,21 @@ +from typing import List + +from pydantic import BaseModel, Field + + +class PruningSchedule(BaseModel): + start: int = Field(default=0) + end: int = Field(default=-3) + steps: int = Field(default=40) + + +class BaseFitCompressModel(BaseModel): + enable_fitcompress: bool = Field(default=False) + optimize_quantization: bool = Field(default=True) + quantization_schedule: List[float] = Field(default_factory=lambda: [7.0, 4.0, 3.0, 2.0]) + pruning_schedule: PruningSchedule = Field(default_factory=PruningSchedule) + compression_goal: float = Field(default=0.10) + optimize_pruning: bool = Field(default=False) + greedy_astar: bool = Field(default=True) + approximate: bool = Field(default=True) + f_lambda: float = Field(default=1) diff --git a/src/pquant/data_models/pruning_model.py b/src/pquant/data_models/pruning_model.py new file mode 100644 index 0000000..7acfacb --- /dev/null +++ b/src/pquant/data_models/pruning_model.py @@ -0,0 +1,84 @@ +from enum import Enum +from typing import List, Literal, Optional +from pydantic import BaseModel, Field + + +class BasePruningModel(BaseModel): + disable_pruning_for_layers: List[str] = Field(default_factory=list) + enable_pruning: bool = Field(default=True) + threshold_decay: float = Field(default=0.0) + + +class CSPruningModel(BasePruningModel): + pruning_method: Literal["cs"] = "cs" + final_temp: int = Field(default=200) + threshold_init: int = Field(default=0) + + +class DSTPruningModel(BasePruningModel): + pruning_method: Literal["dst"] = "dst" + alpha: float = Field(default=5.0e-06) + max_pruning_pct: float = Field(default=0.99) + threshold_init: float = Field(default=0.0) + threshold_type: str = Field(default="channelwise") + + +class PDPPruningModel(BasePruningModel): + pruning_method: Literal["pdp"] = "pdp" + epsilon: float = Field(default=0.015) + sparsity: float = Field(default=0.8) + temperature: float = Field(default=1.0e-05) + structured_pruning: bool = Field(default=False) + + +class WandaPruningModel(BasePruningModel): + pruning_method: Literal["wanda"] = "wanda" + M: Optional[int] = Field(default=None), + N: Optional[int] = Field(default=None), + sparsity: float = Field(default=0.9) + t_delta: int = Field(default=100) + t_start_collecting_batch: int = Field(default=100) + calculate_pruning_budget: bool = Field(default=True) + + +class AutoSparsePruningModel(BasePruningModel): + pruning_method: Literal["autosparse"] = "autosparse" + alpha: float = Field(default=0.5) + alpha_reset_epoch: int = Field(default=90) + autotune_epochs: int = Field(default=10) + backward_sparsity: bool = Field(default=False) + threshold_init: float = Field(default=-5.0) + threshold_type: str = Field(default="channelwise") + + +class ActivationPruningModel(BasePruningModel): + pruning_method: Literal["activation_pruning"] = "activation_pruning" + threshold: float = Field(default=0.3) + t_delta: int = Field(default=50) + t_start_collecting_batch: int = Field(default=50) + + +class MetricType(str, Enum): + UNSTRUCTURED = "UnstructuredSparsity" + STRUCTURED = "StructuredSparsity" + + +class ConstraintType(str, Enum): + EQUALITY = "Equality" + LEQ = "LessThanOrEqual" + GEQ = "GreaterThanOrEqual" + + +class MDMMPruningModel(BasePruningModel): + pruning_method: Literal["mdmm"] = "mdmm" + constraint_type: ConstraintType = Field("Equality") + target_value: float = Field(default=0.0) + metric_type: MetricType = Field(default="UnstructuredSparsity") + target_sparsity: float = Field(default=0.9) + rf: int = Field(default=1) + epsilon: float = Field(default=1.0e-03) + scale: float = Field(default=10.0) + damping: float = Field(default=1.0) + use_grad: bool = Field(default=False) + l0_mode: Literal["coarse", "smooth"] = Field(default="coarse") + scale_mode: Literal["mean", "sum"] = Field(default="mean") diff --git a/src/pquant/data_models/quantization_model.py b/src/pquant/data_models/quantization_model.py new file mode 100644 index 0000000..b043e48 --- /dev/null +++ b/src/pquant/data_models/quantization_model.py @@ -0,0 +1,16 @@ +from typing import List + +from pydantic import BaseModel, Field + + +class BaseQuantizationModel(BaseModel): + default_integer_bits: float = Field(default=0.0) + default_fractional_bits: float = Field(default=7.0) + enable_quantization: bool = Field(default=True) + hgq_gamma: float = Field(default=0.0003) + hgq_heterogeneous: bool = Field(default=True) + layer_specific: List = Field(default_factory=list) + use_high_granularity_quantization: bool = Field(default=False) + use_real_tanh: bool = Field(default=False) + use_symmetric_quantization: bool = Field(default=False) + use_relu_multiplier: bool = Field(default=True) diff --git a/src/pquant/data_models/training_model.py b/src/pquant/data_models/training_model.py new file mode 100644 index 0000000..3db4e54 --- /dev/null +++ b/src/pquant/data_models/training_model.py @@ -0,0 +1,27 @@ +from typing import Literal + +from pydantic import BaseModel, ConfigDict, Field + + +class BaseTrainingModel(BaseModel): + model_config = ConfigDict(extra='allow') + epochs: int = Field(default=200) + fine_tuning_epochs: int = Field(default=0) + pretraining_epochs: int = Field(default=50) + pruning_first: bool = Field(default=False) + rewind: str = Field(default="never") + rounds: int = Field(default=1) + save_weights_epoch: int = Field(default=-1) + batch_size: int = Field(default=128) + optimizer: str = Field(default="sgd") + plot_frequency: int = Field(default=100) + label_smoothing: float = Field(default=0.0) + model: str = Field(default="resnet18") + dataset: str = Field(default="cifar10") + l2_decay: float = Field(default=0.001) + momentum: float = Field(default=0.9) + lr_schedule: Literal["cosine", "step", "none"] = Field(default="cosine") + cosine_tmax: int = Field(default=200) + lr: float = Field(default=0.001) + prune_ratio: float = Field(default=10.0) + default_integer_bits: int = Field(default=0) diff --git a/src/pquant/pruning_methods/activation_pruning.py b/src/pquant/pruning_methods/activation_pruning.py index 5b4c742..6afb000 100644 --- a/src/pquant/pruning_methods/activation_pruning.py +++ b/src/pquant/pruning_methods/activation_pruning.py @@ -2,9 +2,13 @@ from keras import ops +@keras.saving.register_keras_serializable(package="Layers") class ActivationPruning(keras.layers.Layer): - def __init__(self, config, layer_type, *args, **kwargs): + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) super().__init__(*args, **kwargs) self.config = config self.act_type = "relu" @@ -15,8 +19,8 @@ def __init__(self, config, layer_type, *args, **kwargs): self.total = 0.0 self.is_pretraining = True self.done = False - self.threshold = ops.convert_to_tensor(config["pruning_parameters"]["threshold"]) - self.t_start_collecting_batch = self.config["pruning_parameters"]["t_start_collecting_batch"] + self.threshold = ops.convert_to_tensor(config.pruning_parameters.threshold) + self.t_start_collecting_batch = self.config.pruning_parameters.t_start_collecting_batch def build(self, input_shape): self.shape = (input_shape[0], 1) @@ -46,7 +50,7 @@ def collect_output(self, output, training): gt_zero = ops.cast((output > 0), output.dtype) gt_zero = ops.sum(gt_zero, axis=0) # Sum over batch, take average during mask update self.activations += gt_zero - if self.batches_collected % self.config["pruning_parameters"]["t_delta"] == 0: + if self.batches_collected % self.config.pruning_parameters.t_delta == 0: pct_active = self.activations / self.total self.t = 0 self.total = 0 @@ -64,7 +68,7 @@ def collect_output(self, output, training): self.done = True def call(self, weight): # Mask is only updated every t_delta step, using collect_output - if self.is_pretraining and self.config["fitcompress_parameters"]["enable_fitcompress"]: + if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: return weight else: return self.mask * weight @@ -94,3 +98,9 @@ def post_epoch_function(self, epoch, total_epochs): if self.is_pretraining is False: self.t += 1 pass + + def get_config(self): + config = super().get_config() + + config.update({"config": self.config.get_dict(), "layer_type": self.layer_type}) + return config diff --git a/src/pquant/pruning_methods/autosparse.py b/src/pquant/pruning_methods/autosparse.py index a9ead18..507efa8 100644 --- a/src/pquant/pruning_methods/autosparse.py +++ b/src/pquant/pruning_methods/autosparse.py @@ -21,11 +21,11 @@ def cosine_sigmoid_decay(i, T): def get_threshold_size(config, weight_shape): - if config["pruning_parameters"]["threshold_type"] == "layerwise": + if config.pruning_parameters.threshold_type == "layerwise": return (1, 1) - elif config["pruning_parameters"]["threshold_type"] == "channelwise": + elif config.pruning_parameters.threshold_type == "channelwise": return (weight_shape[0], 1) - elif config["pruning_parameters"]["threshold_type"] == "weightwise": + elif config.pruning_parameters.threshold_type == "weightwise": return (weight_shape[0], np.prod(weight_shape[1:])) @@ -52,13 +52,19 @@ def grad(*args, upstream=None): return mask, grad +@keras.saving.register_keras_serializable(package="Layers") class AutoSparse(keras.layers.Layer): def __init__(self, config, layer_type, *args, **kwargs): super().__init__(*args, **kwargs) + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.g = ops.sigmoid self.config = config + self.layer_type = layer_type global BACKWARD_SPARSITY - BACKWARD_SPARSITY = config["pruning_parameters"]["backward_sparsity"] + BACKWARD_SPARSITY = config.pruning_parameters.backward_sparsity self.is_pretraining = True def build(self, input_shape): @@ -66,10 +72,10 @@ def build(self, input_shape): self.threshold = self.add_weight( name="threshold", shape=self.threshold_size, - initializer=Constant(self.config["pruning_parameters"]["threshold_init"]), + initializer=Constant(self.config.pruning_parameters.threshold_init), trainable=True, ) - self.alpha = ops.convert_to_tensor(self.config["pruning_parameters"]["alpha"], dtype="float32") + self.alpha = ops.convert_to_tensor(self.config.pruning_parameters.alpha, dtype="float32") super().build(input_shape) def call(self, weight): @@ -77,7 +83,7 @@ def call(self, weight): sign(W) * ReLu(X), where X = |W| - sigmoid(threshold), with gradient: 1 if W > 0 else alpha. Alpha is decayed after each epoch. """ - if self.is_pretraining and self.config["fitcompress_parameters"]["enable_fitcompress"]: + if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: return weight else: mask = self.get_mask(weight) @@ -114,5 +120,16 @@ def post_pre_train_function(self): def post_epoch_function(self, epoch, total_epochs): self.alpha *= cosine_sigmoid_decay(epoch, total_epochs) - if epoch == self.config["pruning_parameters"]["alpha_reset_epoch"]: + if epoch == self.config.pruning_parameters.alpha_reset_epoch: self.alpha *= 0.0 + + def get_config(self): + config = super().get_config() + + config.update( + { + "config": self.config.get_dict(), + "layer_type": self.layer_type, + } + ) + return config diff --git a/src/pquant/pruning_methods/constraint_functions.py b/src/pquant/pruning_methods/constraint_functions.py new file mode 100644 index 0000000..0753996 --- /dev/null +++ b/src/pquant/pruning_methods/constraint_functions.py @@ -0,0 +1,132 @@ +import abc + +import keras +from keras import ops + + +@ops.custom_gradient +def flip_gradient(x, scale=-1.0): + def grad(*args, upstream=None): + if upstream is None: + (upstream,) = args + scale_t = ops.convert_to_tensor(scale, dtype=upstream.dtype) + return (ops.multiply(upstream, scale_t),) # ops.abs() + + return x, grad + + +# Abstract base class for constraints +@keras.utils.register_keras_serializable(name="Constraint") +class Constraint(keras.layers.Layer): + def __init__(self, lmbda_init=1.0, scale=1.0, damping=1.0, **kwargs): + self.use_grad_ = bool(kwargs.pop("use_grad", True)) + self.lr_ = float(kwargs.pop("lr", 0.0)) + super().__init__(**kwargs) + + self.scale = self.add_weight( + name='scale', + shape=(), + initializer=lambda shape, dtype: ops.convert_to_tensor(scale, dtype=dtype), + trainable=False, + ) + self.damping = self.add_weight( + name='damping', + shape=(), + initializer=lambda shape, dtype: ops.convert_to_tensor(damping, dtype=dtype), + trainable=False, + ) + self.lmbda = self.add_weight( + name=f'{self.name}_lmbda', + shape=(), + initializer=lambda shape, dtype: ops.convert_to_tensor(lmbda_init, dtype=dtype), + trainable=self.use_grad_, + ) + + if not self.use_grad_: + self.prev_infs = self.add_weight( + name=f'{self.name}_prev_infs', + shape=(), + initializer=lambda shape, dtype: ops.convert_to_tensor(0.0, dtype=dtype), + trainable=False, + ) + + def call(self, weight): + """Calculates the penalty from a given infeasibility measure.""" + raw_infeasibility = self.get_infeasibility(weight) + infeasibility = self.pipe_infeasibility(raw_infeasibility) + + if self.use_grad_: + ascent_lmbda = flip_gradient(self.lmbda) + # ascent_lmbda = ops.maximum(ascent_lmbda, 0.0) + else: + lmbda_step = self.lr_ * self.scale * self.prev_infs + ascent_lmbda = self.lmbda + lmbda_step + self.lmbda.assign_add(lmbda_step) + self.prev_infs.assign(infeasibility) + + l_term = ascent_lmbda * infeasibility + damp_term = self.damping * ops.square(infeasibility) / 2 + penalty = self.scale * (l_term + damp_term) + + return penalty + + @abc.abstractmethod + def get_infeasibility(self, weight): + """Must be implemented by subclasses to define the violation.""" + raise NotImplementedError + + def pipe_infeasibility(self, infeasibility): + """Optional transformation of raw infeasibility. + Default is identity. Subclasses may override.""" + return infeasibility + + def turn_off(self): + if not self.use_grad_: + self.lr_ = 0.0 + self.scale.assign(0.0) + self.lmbda.assign(0.0) + + +@keras.utils.register_keras_serializable(name="EqualityConstraint") +class EqualityConstraint(Constraint): + """Constraint for g(w) == target_value.""" + + def __init__(self, metric_fn, target_value=0.0, **kwargs): + super().__init__(**kwargs) + self.metric_fn = metric_fn + self.target_value = target_value + + def get_infeasibility(self, weight): + metric_value = self.metric_fn(weight) + infeasibility = metric_value - self.target_value + return ops.abs(infeasibility) + + +@keras.utils.register_keras_serializable(name="LessThanOrEqualConstraint") +class LessThanOrEqualConstraint(Constraint): + """Constraint for g(w) <= target_value.""" + + def __init__(self, metric_fn, target_value=0.0, **kwargs): + super().__init__(**kwargs) + self.metric_fn = metric_fn + self.target_value = target_value + + def get_infeasibility(self, weight): + metric_value = self.metric_fn(weight) + infeasibility = metric_value - self.target_value + return ops.maximum(infeasibility, 0.0) + + +@keras.utils.register_keras_serializable(name="GreaterThanOrEqualConstraint") +class GreaterThanOrEqualConstraint(Constraint): + """Constraint for g(w) >= target_value.""" + + def __init__(self, metric_fn, target_value=0.0, **kwargs): + super().__init__(**kwargs) + self.metric_fn = metric_fn + self.target_value = target_value + + def get_infeasibility(self, weight): + metric_value = self.metric_fn(weight) + infeasibility = self.target_value - metric_value + return ops.maximum(infeasibility, 0.0) diff --git a/src/pquant/pruning_methods/cs.py b/src/pquant/pruning_methods/cs.py index 3c5687c..c606fae 100644 --- a/src/pquant/pruning_methods/cs.py +++ b/src/pquant/pruning_methods/cs.py @@ -3,24 +3,30 @@ from keras.initializers import Constant +@keras.saving.register_keras_serializable(package="PQuant") class ContinuousSparsification(keras.layers.Layer): def __init__(self, config, layer_type, *args, **kwargs): super().__init__(*args, **kwargs) + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.config = config self.beta = 1.0 - self.final_temp = config["pruning_parameters"]["final_temp"] + self.final_temp = config.pruning_parameters.final_temp self.do_hard_mask = False + self.layer_type = layer_type self.mask = None self.is_pretraining = True def build(self, input_shape): - self.s_init = ops.convert_to_tensor(self.config["pruning_parameters"]["threshold_init"] * ops.ones(input_shape)) + self.s_init = ops.convert_to_tensor(self.config.pruning_parameters.threshold_init * ops.ones(input_shape)) self.s = self.add_weight(name="threshold", shape=input_shape, initializer=Constant(self.s_init), trainable=True) self.scaling = 1.0 / ops.sigmoid(self.s_init) super().build(input_shape) def call(self, weight): - if self.is_pretraining and self.config["fitcompress_parameters"]["enable_fitcompress"]: + if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: return weight self.mask = self.get_mask() return self.mask * weight @@ -47,7 +53,7 @@ def post_epoch_function(self, epoch, total_epochs): self.beta *= self.final_temp ** (1 / (total_epochs - 1)) def get_hard_mask(self, weight=None): - if self.config["pruning_parameters"]["enable_pruning"]: + if self.config.pruning_parameters.enable_pruning: return ops.cast((self.s > 0), self.s.dtype) return ops.convert_to_tensor(1.0) @@ -58,8 +64,19 @@ def post_round_function(self): def calculate_additional_loss(self): return ops.convert_to_tensor( - self.config["pruning_parameters"]["threshold_decay"] * ops.norm(ops.ravel(self.get_mask()), ord=1) + self.config.pruning_parameters.threshold_decay * ops.norm(ops.ravel(self.get_mask()), ord=1) ) def get_layer_sparsity(self, weight): return ops.sum(self.get_hard_mask()) / ops.size(weight) + + def get_config(self): + config = super().get_config() + + config.update( + { + "config": self.config.get_dict(), + "layer_type": self.layer_type, + } + ) + return config diff --git a/src/pquant/pruning_methods/dst.py b/src/pquant/pruning_methods/dst.py index fb1d443..6ceb4ca 100644 --- a/src/pquant/pruning_methods/dst.py +++ b/src/pquant/pruning_methods/dst.py @@ -4,11 +4,11 @@ def get_threshold_size(config, weight_shape): - if config["pruning_parameters"]["threshold_type"] == "layerwise": + if config.pruning_parameters.threshold_type == "layerwise": return (1, 1) - elif config["pruning_parameters"]["threshold_type"] == "channelwise": + elif config.pruning_parameters.threshold_type == "channelwise": return (weight_shape[0], 1) - elif config["pruning_parameters"]["threshold_type"] == "weightwise": + elif config.pruning_parameters.threshold_type == "weightwise": return (weight_shape[0], np.prod(weight_shape[1:])) @@ -29,11 +29,17 @@ def grad(*args, upstream=None): return output, grad +@keras.saving.register_keras_serializable(package="PQuant") class DST(keras.layers.Layer): def __init__(self, config, layer_type, *args, **kwargs): super().__init__(*args, **kwargs) + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.config = config self.is_pretraining = True + self.layer_type = layer_type def build(self, input_shape): self.threshold_size = get_threshold_size(self.config, input_shape) @@ -47,11 +53,11 @@ def call(self, weight): 0.4 if 0.4 < |W| <= 1 0 if |W| > 1 """ - if self.is_pretraining and self.config["fitcompress_parameters"]["enable_fitcompress"]: + if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: return weight mask = self.get_mask(weight) ratio = 1.0 - ops.sum(mask) / ops.cast(ops.size(mask), mask.dtype) - flag = ratio >= self.config["pruning_parameters"]["max_pruning_pct"] + flag = ratio >= self.config.pruning_parameters.max_pruning_pct self.threshold.assign(ops.where(flag, ops.ones(self.threshold.shape), self.threshold)) mask = self.get_mask(weight) masked_weight = weight * mask @@ -76,7 +82,7 @@ def get_layer_sparsity(self, weight): return ops.sum(self.get_mask(weight)) / ops.size(weight) def calculate_additional_loss(self): - return self.config["pruning_parameters"]["alpha"] * ops.sum(ops.exp(-self.threshold)) + return self.config.pruning_parameters.alpha * ops.sum(ops.exp(-self.threshold)) def pre_finetune_function(self): pass @@ -89,3 +95,14 @@ def post_pre_train_function(self): def post_round_function(self): pass + + def get_config(self): + config = super().get_config() + + config.update( + { + "config": self.config.get_dict(), + "layer_type": self.layer_type, + } + ) + return config diff --git a/src/pquant/pruning_methods/mdmm.py b/src/pquant/pruning_methods/mdmm.py index 29e908b..d87bc37 100644 --- a/src/pquant/pruning_methods/mdmm.py +++ b/src/pquant/pruning_methods/mdmm.py @@ -3,242 +3,26 @@ # modified by: -import abc +import inspect import keras from keras import ops - -@ops.custom_gradient -def flip_gradient(x, scale=-1.0): - def grad(*args, upstream=None): - if upstream is None: - (upstream,) = args - scale_t = ops.convert_to_tensor(scale, dtype=upstream.dtype) - return (ops.multiply(upstream, scale_t),) # ops.abs() - - return x, grad - - -# Abstract base class for constraints -@keras.utils.register_keras_serializable(name="Constraint") -class Constraint(keras.layers.Layer): - def __init__(self, lmbda_init=1.0, scale=1.0, damping=1.0, **kwargs): - self.use_grad_ = bool(kwargs.pop("use_grad", True)) - self.lr_ = float(kwargs.pop("lr", 0.0)) - super().__init__(**kwargs) - - self.scale = self.add_weight( - name='scale', - shape=(), - initializer=lambda shape, dtype: ops.convert_to_tensor(scale, dtype=dtype), - trainable=False, - ) - self.damping = self.add_weight( - name='damping', - shape=(), - initializer=lambda shape, dtype: ops.convert_to_tensor(damping, dtype=dtype), - trainable=False, - ) - self.lmbda = self.add_weight( - name=f'{self.name}_lmbda', - shape=(), - initializer=lambda shape, dtype: ops.convert_to_tensor(lmbda_init, dtype=dtype), - trainable=self.use_grad_, - ) - - if not self.use_grad_: - self.prev_infs = self.add_weight( - name=f'{self.name}_prev_infs', - shape=(), - initializer=lambda shape, dtype: ops.convert_to_tensor(0.0, dtype=dtype), - trainable=False, - ) - - def call(self, weight): - """Calculates the penalty from a given infeasibility measure.""" - raw_infeasibility = self.get_infeasibility(weight) - infeasibility = self.pipe_infeasibility(raw_infeasibility) - - if self.use_grad_: - ascent_lmbda = flip_gradient(self.lmbda) - # ascent_lmbda = ops.maximum(ascent_lmbda, 0.0) - else: - lmbda_step = self.lr_ * self.scale * self.prev_infs - ascent_lmbda = self.lmbda + lmbda_step - self.lmbda.assign_add(lmbda_step) - self.prev_infs.assign(infeasibility) - - l_term = ascent_lmbda * infeasibility - damp_term = self.damping * ops.square(infeasibility) / 2 - penalty = self.scale * (l_term + damp_term) - - return penalty - - @abc.abstractmethod - def get_infeasibility(self, weight): - """Must be implemented by subclasses to define the violation.""" - raise NotImplementedError - - def pipe_infeasibility(self, infeasibility): - """Optional transformation of raw infeasibility. - Default is identity. Subclasses may override.""" - return infeasibility - - def turn_off(self): - if not self.use_grad_: - self.lr_ = 0.0 - self.scale.assign(0.0) - self.lmbda.assign(0.0) - - -# ------------------------------------------------------------------- -# Generic Constraint Classes -# ------------------------------------------------------------------- - - -@keras.utils.register_keras_serializable(name="EqualityConstraint") -class EqualityConstraint(Constraint): - """Constraint for g(w) == target_value.""" - - def __init__(self, metric_fn, target_value=0.0, **kwargs): - super().__init__(**kwargs) - self.metric_fn = metric_fn - self.target_value = target_value - - def get_infeasibility(self, weight): - metric_value = self.metric_fn(weight) - infeasibility = metric_value - self.target_value - return ops.abs(infeasibility) - - -@keras.utils.register_keras_serializable(name="LessThanOrEqualConstraint") -class LessThanOrEqualConstraint(Constraint): - """Constraint for g(w) <= target_value.""" - - def __init__(self, metric_fn, target_value=0.0, **kwargs): - super().__init__(**kwargs) - self.metric_fn = metric_fn - self.target_value = target_value - - def get_infeasibility(self, weight): - metric_value = self.metric_fn(weight) - infeasibility = metric_value - self.target_value - return ops.maximum(infeasibility, 0.0) - - -@keras.utils.register_keras_serializable(name="GreaterThanOrEqualConstraint") -class GreaterThanOrEqualConstraint(Constraint): - """Constraint for g(w) >= target_value.""" - - def __init__(self, metric_fn, target_value=0.0, **kwargs): - super().__init__(**kwargs) - self.metric_fn = metric_fn - self.target_value = target_value - - def get_infeasibility(self, weight): - metric_value = self.metric_fn(weight) - infeasibility = self.target_value - metric_value - return ops.maximum(infeasibility, 0.0) - - -# ------------------------------------------------------------------- -# Metric Functions -# ------------------------------------------------------------------- - - -class UnstructuredSparsityMetric: - """L0-L1 based metric""" - - """Calculates the ratio of non-zero weights in a tensor.""" - - def __init__(self, l0_mode='coarse', scale_mode="mean", epsilon=1e-3, target_sparsity=0.8, alpha=100.0): - # Note: scale_mode:"sum" give very high losses for large model - assert l0_mode in ['coarse', 'smooth'], "Mode must be 'coarse' or 'smooth'" - assert scale_mode in ['sum', 'mean'], "Scale mode must be 'sum' or 'mean'" - assert 0 <= target_sparsity <= 1, "target_sparsity must be between 0 and 1" - self.l0_mode = l0_mode - self.scale_mode = scale_mode - self.target_sparsity = float(target_sparsity) - self.epsilon = float(epsilon) - self.alpha = float(alpha) - - self.l0_fn = None - self._scaling = None - - self.build() - - def build(self): - # l0 term -> number of zero weights/number of weights - if self.l0_mode == 'coarse': - self.l0_fn = self._coarse_l0 - elif self.l0_mode == 'smooth': - self.l0_fn = self._smooth_l0 - - if self.scale_mode == 'mean': - self._scaling = self._mean_scaling - elif self.scale_mode == 'sum': - self._scaling = self._sum_scaling - - def _sum_scaling(self, fn_value, num): - return fn_value - - def _mean_scaling(self, fn_value, num): - return fn_value / num - - def _coarse_l0(self, weight_vector): - return ops.mean(ops.cast(ops.abs(weight_vector) <= self.epsilon, "float32")) - - def _smooth_l0(self, weight_vector): - """Differentiable approximation of L0 norm using Keras ops.""" - return ops.mean(ops.exp(-self.alpha * ops.square(weight_vector))) - - def __call__(self, weight): - num_weights = ops.cast(ops.size(weight), weight.dtype) - weights_vector = ops.reshape(weight, [-1]) - - l0_term = self.l0_fn(weights_vector) - l1_term = ops.sum(ops.abs(weights_vector)) - - # farctor by constrction goes to zero when l0_term == target_sparsiity - factor = ops.square(self.target_sparsity) - ops.square(l0_term) - fn_value = factor * l1_term - fn_value = self._scaling(fn_value, num_weights) - - return fn_value - - -class StructuredSparsityMetric: - """Calculates the ratio of near-zero weight groups (based on Reuse Factor: rf).""" - - def __init__(self, rf=1, epsilon=1e-3): - self.rf = rf - self.epsilon = epsilon - - def __call__(self, weight): - original_shape = weight.shape - w_reshaped = ops.reshape(weight, (original_shape[0], -1)) - num_weights = ops.shape(w_reshaped)[1] - - padding = (self.rf - num_weights % self.rf) % self.rf - w_padded = ops.pad(w_reshaped, [[0, 0], [0, padding]]) - - groups = ops.reshape(w_padded, (original_shape[0], -1, self.rf)) - group_norms = ops.sqrt(ops.sum(ops.square(groups), axis=-1)) - zero_groups = ops.less_equal(group_norms, self.epsilon) - num_groups = ops.cast(ops.size(group_norms), "float32") - - return ops.sum(ops.cast(zero_groups, "float32")) / num_groups - +from pquant.core.constants import CONSTRAINT_REGISTRY, METRIC_REGISTRY # ------------------------------------------------------------------- # MDMM Layer # ------------------------------------------------------------------- +@keras.saving.register_keras_serializable(package="PQuant") class MDMM(keras.layers.Layer): def __init__(self, config, layer_type, *args, **kwargs): super().__init__(*args, **kwargs) + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.config = config self.layer_type = layer_type self.constraint_layer = None @@ -247,42 +31,42 @@ def __init__(self, config, layer_type, *args, **kwargs): self.is_finetuning = False def build(self, input_shape): - metric_type = self.config["pruning_parameters"].get("metric_type", "UnstructuredSparsity") - constraint_type = self.config["pruning_parameters"].get("constraint_type", "GreaterThanOrEqual") - target_value = self.config["pruning_parameters"].get("target_value", 0.0) - target_sparsity = self.config["pruning_parameters"].get("target_sparsity", 0.9) - l0_mode = self.config["pruning_parameters"].get("l0_mode", "coarse") - scale_mode = self.config["pruning_parameters"].get("scale_mode", "mean") + pruning_parameters = self.config.pruning_parameters + metric_type = pruning_parameters.metric_type + constraint_type = pruning_parameters.constraint_type + target_value = pruning_parameters.target_value + target_sparsity = pruning_parameters.target_sparsity + l0_mode = pruning_parameters.l0_mode + scale_mode = pruning_parameters.scale_mode + + candidate_kwargs = { + "epsilon": pruning_parameters.epsilon, + "target_sparsity": target_sparsity, + "l0_mode": l0_mode, + "scale_mode": scale_mode, + "rf": pruning_parameters.rf, + } - if metric_type == "UnstructuredSparsity": - metric_fn = UnstructuredSparsityMetric( - epsilon=self.config["pruning_parameters"].get("epsilon", 1e-5), - target_sparsity=target_sparsity, - l0_mode=l0_mode, - scale_mode=scale_mode, - ) - elif metric_type == "StructuredSparsity": - metric_fn = StructuredSparsityMetric( - rf=self.config["rf"], epsilon=self.config["pruning_parameters"].get("epsilon", 1e-5) - ) + metric_cls = METRIC_REGISTRY.get(metric_type) + sig = inspect.signature(getattr(metric_cls, "__init__", metric_cls)) + metric_kwargs = {k: v for k, v in candidate_kwargs.items() if v is not None and k in sig.parameters} + if metric_cls: + metric_fn = metric_cls(**metric_kwargs) else: raise ValueError(f"Unknown metric_type: {metric_type}") common_args = { "metric_fn": metric_fn, "target_value": target_value, - "scale": self.config["pruning_parameters"].get("scale", 1.0), - "damping": self.config["pruning_parameters"].get("damping", 1.0), - "use_grad": self.config["pruning_parameters"].get("use_grad", True), - "lr": self.config.get("lr", 0.0), + "scale": self.config.pruning_parameters.scale, + "damping": self.config.pruning_parameters.damping, + "use_grad": self.config.pruning_parameters.use_grad, + "lr": self.config.training_parameters.lr, } - if constraint_type == "Equality": - self.constraint_layer = EqualityConstraint(**common_args) - elif constraint_type == "LessThanOrEqual": - self.constraint_layer = LessThanOrEqualConstraint(**common_args) - elif constraint_type == "GreaterThanOrEqual": - self.constraint_layer = GreaterThanOrEqualConstraint(**common_args) + constraint_type_cls = CONSTRAINT_REGISTRY.get(constraint_type) + if constraint_type_cls: + self.constraint_layer = constraint_type_cls(**common_args) else: raise ValueError(f"Unknown constraint_type: {constraint_type}") @@ -304,7 +88,7 @@ def call(self, weight): return weight def get_hard_mask(self, weight): - epsilon = self.config["pruning_parameters"].get("epsilon", 1e-5) + epsilon = self.config.pruning_parameters.epsilon return ops.cast(ops.abs(weight) > epsilon, weight.dtype) def get_layer_sparsity(self, weight): @@ -338,3 +122,14 @@ def post_pre_train_function(self): def post_round_function(self): pass + + def get_config(self): + config = super().get_config() + + config.update( + { + "config": self.config.get_dict(), + "layer_type": self.layer_type, + } + ) + return config diff --git a/src/pquant/pruning_methods/metric_functions.py b/src/pquant/pruning_methods/metric_functions.py new file mode 100644 index 0000000..0f22b5e --- /dev/null +++ b/src/pquant/pruning_methods/metric_functions.py @@ -0,0 +1,85 @@ +from keras import ops + + +class UnstructuredSparsityMetric: + """L0-L1 based metric""" + + """Calculates the ratio of non-zero weights in a tensor.""" + + def __init__(self, l0_mode='coarse', scale_mode="mean", epsilon=1e-3, target_sparsity=0.8, alpha=100.0): + # Note: scale_mode:"sum" give very high losses for large model + assert l0_mode in ['coarse', 'smooth'], "Mode must be 'coarse' or 'smooth'" + assert scale_mode in ['sum', 'mean'], "Scale mode must be 'sum' or 'mean'" + assert 0 <= target_sparsity <= 1, "target_sparsity must be between 0 and 1" + self.l0_mode = l0_mode + self.scale_mode = scale_mode + self.target_sparsity = float(target_sparsity) + self.epsilon = float(epsilon) + self.alpha = float(alpha) + + self.l0_fn = None + self._scaling = None + + self.build() + + def build(self): + # l0 term -> number of zero weights/number of weights + if self.l0_mode == 'coarse': + self.l0_fn = self._coarse_l0 + elif self.l0_mode == 'smooth': + self.l0_fn = self._smooth_l0 + + if self.scale_mode == 'mean': + self._scaling = self._mean_scaling + elif self.scale_mode == 'sum': + self._scaling = self._sum_scaling + + def _sum_scaling(self, fn_value, num): + return fn_value + + def _mean_scaling(self, fn_value, num): + return fn_value / num + + def _coarse_l0(self, weight_vector): + return ops.mean(ops.cast(ops.abs(weight_vector) <= self.epsilon, "float32")) + + def _smooth_l0(self, weight_vector): + """Differentiable approximation of L0 norm using Keras ops.""" + return ops.mean(ops.exp(-self.alpha * ops.square(weight_vector))) + + def __call__(self, weight): + num_weights = ops.cast(ops.size(weight), weight.dtype) + weights_vector = ops.reshape(weight, [-1]) + + l0_term = self.l0_fn(weights_vector) + l1_term = ops.sum(ops.abs(weights_vector)) + + # farctor by constrction goes to zero when l0_term == target_sparsiity + factor = ops.square(self.target_sparsity) - ops.square(l0_term) + fn_value = factor * l1_term + fn_value = self._scaling(fn_value, num_weights) + + return fn_value + + +class StructuredSparsityMetric: + """Calculates the ratio of near-zero weight groups (based on Reuse Factor: rf).""" + + def __init__(self, rf=1, epsilon=1e-3): + self.rf = rf + self.epsilon = epsilon + + def __call__(self, weight): + original_shape = weight.shape + w_reshaped = ops.reshape(weight, (original_shape[0], -1)) + num_weights = ops.shape(w_reshaped)[1] + + padding = (self.rf - num_weights % self.rf) % self.rf + w_padded = ops.pad(w_reshaped, [[0, 0], [0, padding]]) + + groups = ops.reshape(w_padded, (original_shape[0], -1, self.rf)) + group_norms = ops.sqrt(ops.sum(ops.square(groups), axis=-1)) + zero_groups = ops.less_equal(group_norms, self.epsilon) + num_groups = ops.cast(ops.size(group_norms), "float32") + + return ops.sum(ops.cast(zero_groups, "float32")) / num_groups diff --git a/src/pquant/pruning_methods/pdp.py b/src/pquant/pruning_methods/pdp.py index ce7afb7..88f9ba8 100644 --- a/src/pquant/pruning_methods/pdp.py +++ b/src/pquant/pruning_methods/pdp.py @@ -2,13 +2,18 @@ from keras import ops +@keras.saving.register_keras_serializable(package="PQuant") class PDP(keras.layers.Layer): def __init__(self, config, layer_type, *args, **kwargs): super().__init__(*args, **kwargs) - self.init_r = ops.convert_to_tensor(config["pruning_parameters"]["sparsity"]) - self.epsilon = ops.convert_to_tensor(config["pruning_parameters"]["epsilon"]) - self.r = config["pruning_parameters"]["sparsity"] - self.temp = config["pruning_parameters"]["temperature"] + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) + self.init_r = ops.convert_to_tensor(config.pruning_parameters.sparsity) + self.epsilon = ops.convert_to_tensor(config.pruning_parameters.epsilon) + self.r = config.pruning_parameters.sparsity + self.temp = config.pruning_parameters.temperature self.is_pretraining = True self.config = config self.fine_tuning = False @@ -35,7 +40,7 @@ def post_round_function(self): def get_hard_mask(self, weight): if self.fine_tuning: return self.mask - if self.config["pruning_parameters"]["structured_pruning"]: + if self.config.pruning_parameters.structured_pruning: if self.layer_type == "conv": mask = self.get_mask_structured_channel(weight) else: @@ -70,7 +75,7 @@ def get_mask_structured_linear(self, weight): t = ops.ones(norm.shape) * 0.5 * (Wh + Wt) soft_input = ops.concatenate((t**2, norm**2), axis=0) / self.temp softmax_result = ops.softmax(soft_input, axis=0) - zw, mw = ops.unstack(softmax_result, axis=0) + _, mw = ops.unstack(softmax_result, axis=0) mw = ops.expand_dims(mw, 0) self.mask = mw return mw @@ -121,7 +126,7 @@ def get_mask(self, weight): t = self.t * (Wh + Wt) soft_input = ops.concatenate((t**2, weight_reshaped**2), axis=-1) / self.temp softmax_result = ops.softmax(soft_input, axis=-1) - zw, mw = ops.unstack(softmax_result, axis=-1) + _, mw = ops.unstack(softmax_result, axis=-1) mask = ops.reshape(mw, weight.shape) self.mask = mask return mask @@ -130,7 +135,7 @@ def call(self, weight): if self.fine_tuning: mask = self.mask else: - if self.config["pruning_parameters"]["structured_pruning"]: + if self.config.pruning_parameters.structured_pruning: if self.layer_type == "conv": mask = self.get_mask_structured_channel(weight) else: @@ -149,3 +154,13 @@ def get_layer_sparsity(self, weight): def post_epoch_function(self, epoch, total_epochs): pass + + def get_config(self): + config = super().get_config() + config.update( + { + "config": self.config.get_dict(), + "layer_type": self.layer_type, + } + ) + return config diff --git a/src/pquant/pruning_methods/wanda.py b/src/pquant/pruning_methods/wanda.py index 3df8d9e..2477a87 100644 --- a/src/pquant/pruning_methods/wanda.py +++ b/src/pquant/pruning_methods/wanda.py @@ -2,10 +2,14 @@ from keras import ops +@keras.saving.register_keras_serializable(package="PQuant") class Wanda(keras.layers.Layer): - def __init__(self, config, layer_type, *args, **kwargs): super().__init__(*args, **kwargs) + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) self.config = config self.act_type = "relu" self.t = 0 @@ -14,11 +18,11 @@ def __init__(self, config, layer_type, *args, **kwargs): self.inputs = None self.total = 0.0 self.done = False - self.sparsity = self.config["pruning_parameters"]["sparsity"] + self.sparsity = self.config.pruning_parameters.sparsity self.is_pretraining = True - self.N = self.config["pruning_parameters"]["N"] - self.M = self.config["pruning_parameters"]["M"] - self.t_start_collecting_batch = self.config["pruning_parameters"]["t_start_collecting_batch"] + self.N = self.config.pruning_parameters.N + self.M = self.config.pruning_parameters.M + self.t_start_collecting_batch = self.config.pruning_parameters.t_start_collecting_batch def build(self, input_shape): self.mask = ops.ones(input_shape) @@ -95,7 +99,7 @@ def collect_input(self, x, weight, training): self.total += 1 self.inputs = x if self.inputs is None else self.inputs + x - if self.batches_collected % (self.config["pruning_parameters"]["t_delta"]) == 0: + if self.batches_collected % (self.config.pruning_parameters.t_delta) == 0: inputs_avg = self.inputs / self.total self.prune(inputs_avg, weight) self.done = True @@ -135,3 +139,14 @@ def post_epoch_function(self, epoch, total_epochs): if self.is_pretraining is False: self.t += 1 pass + + def get_config(self): + config = super().get_config() + + config.update( + { + "config": self.config.get_dict(), + "layer_type": self.layer_type, + } + ) + return config diff --git a/tests/test_ap.py b/tests/test_ap.py index c5f8b79..0ee480c 100644 --- a/tests/test_ap.py +++ b/tests/test_ap.py @@ -9,6 +9,7 @@ def config(): return { "pruning_parameters": { + "pruning_method": "activation_pruning", "disable_pruning_for_layers": [], "enable_pruning": True, "threshold": 0.3, diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 589bf27..0cc9090 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -1,3 +1,5 @@ +from types import SimpleNamespace + import keras import numpy as np import pytest @@ -27,6 +29,15 @@ remove_pruning_from_model_tf, ) + +def _to_obj(x): + if isinstance(x, dict): + return SimpleNamespace(**{k: _to_obj(v) for k, v in x.items()}) + if isinstance(x, list): + return [_to_obj(v) for v in x] + return x + + BATCH_SIZE = 4 OUT_FEATURES = 32 IN_FEATURES = 16 @@ -36,7 +47,7 @@ @pytest.fixture def config_pdp(): - return { + cfg = { "pruning_parameters": { "disable_pruning_for_layers": [], "enable_pruning": True, @@ -62,11 +73,12 @@ def config_pdp(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture def config_ap(): - return { + cfg = { "pruning_parameters": { "disable_pruning_for_layers": [], "enable_pruning": True, @@ -91,11 +103,12 @@ def config_ap(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture def config_wanda(): - return { + cfg = { "pruning_parameters": { "calculate_pruning_budget": False, "disable_pruning_for_layers": [], @@ -123,11 +136,12 @@ def config_wanda(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture def config_cs(): - return { + cfg = { "pruning_parameters": { "disable_pruning_for_layers": [], "enable_pruning": True, @@ -151,6 +165,7 @@ def config_cs(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture @@ -214,7 +229,7 @@ def test_separable_conv2d_call(config_pdp, conv2d_input): def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): # Case pruning not quantizing - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") @@ -249,7 +264,7 @@ def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): def test_separable_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") @@ -276,7 +291,7 @@ def test_separable_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): def test_separable_conv2d_trigger_post_pretraining(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) act1 = Activation("tanh")(out) @@ -313,7 +328,7 @@ def test_conv1d_call(config_pdp, conv1d_input): def test_dense_add_remove_layers(config_pdp, dense_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=(dense_input.shape[1:])) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") @@ -335,7 +350,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): def test_conv2d_add_remove_layers(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") @@ -357,7 +372,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") @@ -379,7 +394,7 @@ def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): def test_conv1d_add_remove_layers(config_pdp, conv1d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv1d_input.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") @@ -401,7 +416,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): def test_dense_get_layer_keep_ratio(config_pdp, dense_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=(dense_input.shape[1:])) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") @@ -421,7 +436,7 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") @@ -441,7 +456,7 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") @@ -461,7 +476,7 @@ def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv1d_input.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") @@ -489,7 +504,7 @@ def test_check_activation(config_pdp, dense_input): assert isinstance(model.layers[2], ReLU) - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="relu")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") @@ -497,7 +512,7 @@ def test_check_activation(config_pdp, dense_input): assert isinstance(model.layers[2], QuantizedReLU) # Tanh - config_pdp["quantization_parameters"]["enable_quantization"] = False + config_pdp.quantization_parameters.enable_quantization = False inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="tanh")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") @@ -506,7 +521,7 @@ def test_check_activation(config_pdp, dense_input): assert isinstance(model.layers[2], Activation) assert model.layers[2].activation.__name__ == "tanh" - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="tanh")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") @@ -515,8 +530,8 @@ def test_check_activation(config_pdp, dense_input): def test_hgq_activation_built(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=True, padding="same")(inputs) act = ReLU()(out) @@ -1218,8 +1233,8 @@ def test_cs_dense_channels_last_transpose(config_cs, dense_input): def test_calculate_pruning_budget(config_wanda, dense_input): sparsity = 0.75 - config_wanda["pruning_parameters"]["calculate_pruning_budget"] = True - config_wanda["pruning_parameters"]["sparsity"] = sparsity + config_wanda.pruning_parameters.calculate_pruning_budget = True + config_wanda.pruning_parameters.sparsity = sparsity inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) @@ -1249,7 +1264,7 @@ def test_calculate_pruning_budget(config_wanda, dense_input): def test_trigger_post_pretraining(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) act1 = Activation("tanh")(out) @@ -1273,8 +1288,8 @@ def test_trigger_post_pretraining(config_pdp, conv2d_input): def test_hgq_weight_shape(config_pdp, dense_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) act1 = Activation("tanh")(out) @@ -1287,7 +1302,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): layer_2_input_shape = [1] + list(model.layers[2].input.shape[1:]) assert model.layers[2].hgq.quantizer._i.shape == layer_2_input_shape - config_pdp["quantization_parameters"]["hgq_heterogeneous"] = False + config_pdp.quantization_parameters.hgq_heterogeneous = False inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) act1 = Activation("tanh")(out) @@ -1301,8 +1316,8 @@ def test_hgq_weight_shape(config_pdp, dense_input): def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_input, dense_input): - config_pdp["quantization_parameters"]["enable_quantization"] = False - config_pdp["pruning_parameters"]["enable_pruning"] = False + config_pdp.quantization_parameters.enable_quantization = False + config_pdp.pruning_parameters.enable_pruning = False # Case Dense inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=True)(inputs) @@ -1334,8 +1349,8 @@ def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_inp def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) out = ReLU()(out) @@ -1371,7 +1386,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert ops.all(m.hgq.quantizer.i == 0.0) assert ops.all(m.hgq.quantizer.f == 7.0) - config_pdp["quantization_parameters"]["layer_specific"] = { + config_pdp.quantization_parameters.layer_specific = { 'conv2d_17': { 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 2.0, 'fractional_bits': 4.0}, @@ -1418,8 +1433,8 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = False + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = False inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) out = ReLU()(out) @@ -1445,7 +1460,7 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): assert m.i == 0.0 assert m.f == 7.0 - config_pdp["quantization_parameters"]["layer_specific"] = { + config_pdp.quantization_parameters.layer_specific = { 'conv2d_19': { 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 2.0, 'fractional_bits': 4.0}, diff --git a/tests/test_pdp.py b/tests/test_pdp.py index 3d58b16..0cebf16 100644 --- a/tests/test_pdp.py +++ b/tests/test_pdp.py @@ -9,6 +9,7 @@ def config(): return { "pruning_parameters": { + "pruning_method": "pdp", "disable_pruning_for_layers": [], "enable_pruning": True, "epsilon": 1.0, diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index bb19914..e3b7ec4 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -1,3 +1,5 @@ +from types import SimpleNamespace + import keras import numpy as np import pytest @@ -21,6 +23,15 @@ remove_pruning_from_model_torch, ) + +def _to_obj(x): + if isinstance(x, dict): + return SimpleNamespace(**{k: _to_obj(v) for k, v in x.items()}) + if isinstance(x, list): + return [_to_obj(v) for v in x] + return x + + BATCH_SIZE = 4 OUT_FEATURES = 32 IN_FEATURES = 16 @@ -30,7 +41,7 @@ @pytest.fixture def config_pdp(): - return { + cfg = { "pruning_parameters": { "disable_pruning_for_layers": [], "enable_pruning": True, @@ -56,11 +67,12 @@ def config_pdp(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture def config_ap(): - return { + cfg = { "pruning_parameters": { "disable_pruning_for_layers": [], "enable_pruning": True, @@ -85,11 +97,12 @@ def config_ap(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture def config_wanda(): - return { + cfg = { "pruning_parameters": { "calculate_pruning_budget": True, "disable_pruning_for_layers": [], @@ -117,11 +130,12 @@ def config_wanda(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture def config_cs(): - return { + cfg = { "pruning_parameters": { "disable_pruning_for_layers": [], "enable_pruning": True, @@ -145,6 +159,7 @@ def config_cs(): "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, } + return _to_obj(cfg) @pytest.fixture @@ -210,7 +225,7 @@ def test_conv1d_call(config_pdp, conv1d_input): def test_dense_add_remove_layers(config_pdp, dense_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, dense_input.shape) @@ -230,7 +245,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): def test_conv2d_add_remove_layers(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) @@ -251,7 +266,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): def test_conv1d_add_remove_layers(config_pdp, conv1d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) @@ -272,7 +287,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): def test_dense_get_layer_keep_ratio(config_pdp, dense_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, dense_input.shape) @@ -291,7 +306,7 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) @@ -310,7 +325,7 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): - config_pdp["pruning_parameters"]["enable_pruning"] = True + config_pdp.pruning_parameters.enable_pruning = True layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) @@ -335,20 +350,20 @@ def test_check_activation(config_pdp, dense_input): model = add_compression_layers_torch(model, config_pdp, dense_input.shape) assert isinstance(model.activation, ReLU) - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) assert isinstance(model.activation, QuantizedReLU) # Tanh - config_pdp["quantization_parameters"]["enable_quantization"] = False + config_pdp.quantization_parameters.enable_quantization = False layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) assert isinstance(model.activation, Tanh) - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) @@ -386,8 +401,8 @@ def forward(self, x): def test_hgq_activation_built(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModelWithAvgPool(layer, "relu") model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) @@ -404,12 +419,12 @@ def test_hgq_activation_built(config_pdp, conv2d_input): def test_post_training_wanda(config_wanda, conv2d_input): - config_wanda["pruning_parameters"]["calculate_pruning_budget"] = False + config_wanda.pruning_parameters.calculate_pruning_budget = False layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModel(layer, "relu") calibration_dataset = [conv2d_input, conv2d_input] model = post_training_prune(model, calibration_dataset, config_wanda) - assert get_layer_keep_ratio_torch(model) == 1 - config_wanda["pruning_parameters"]["sparsity"] + assert get_layer_keep_ratio_torch(model) == 1 - config_wanda.pruning_parameters.sparsity class TestModel2(nn.Module): @@ -445,8 +460,8 @@ def forward(self, x): def test_calculate_pruning_budget(config_wanda, dense_input): sparsity = 0.75 - config_wanda["pruning_parameters"]["calculate_pruning_budget"] = True - config_wanda["pruning_parameters"]["sparsity"] = sparsity + config_wanda.pruning_parameters.calculate_pruning_budget = True + config_wanda.pruning_parameters.sparsity = sparsity layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) @@ -476,7 +491,7 @@ def test_calculate_pruning_budget(config_wanda, dense_input): def test_trigger_post_pretraining(config_pdp, dense_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) model = TestModel2(layer, layer2, "relu", "tanh") @@ -497,8 +512,8 @@ def test_trigger_post_pretraining(config_pdp, dense_input): def test_hgq_weight_shape(config_pdp, dense_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) model = TestModel2(layer, layer2, "relu", "tanh") @@ -509,7 +524,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): assert model.submodule.hgq_weight.quantizer._i.shape == model.submodule.weight.shape assert model.activation.hgq.quantizer._i.shape == (1, OUT_FEATURES) - config_pdp["quantization_parameters"]["hgq_heterogeneous"] = False + config_pdp.quantization_parameters.hgq_heterogeneous = False layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) model = TestModel2(layer, layer2, "relu", "tanh") @@ -522,8 +537,8 @@ def test_hgq_weight_shape(config_pdp, dense_input): def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) layer2 = AvgPool2d(2) model = TestModel2(layer, layer2, "relu", "tanh") @@ -557,7 +572,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert torch.all(m.hgq.quantizer.i == 0.0) assert torch.all(m.hgq.quantizer.f == 7.0) - config_pdp["quantization_parameters"]["layer_specific"] = { + config_pdp.quantization_parameters.layer_specific = { 'submodule': { 'weight': {'integer_bits': 1, 'fractional_bits': 3}, 'bias': {'integer_bits': 2, 'fractional_bits': 4}, @@ -599,8 +614,8 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = False + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = False layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) layer2 = AvgPool2d(2) model = TestModel2(layer, layer2, "relu", "tanh") @@ -617,7 +632,7 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): assert m.i == 0.0 assert m.f == 8.0 - config_pdp["quantization_parameters"]["layer_specific"] = { + config_pdp.quantization_parameters.layer_specific = { 'submodule': { 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 1.0, 'fractional_bits': 3.0}, diff --git a/tests/test_wanda.py b/tests/test_wanda.py index 440de0f..609572d 100644 --- a/tests/test_wanda.py +++ b/tests/test_wanda.py @@ -9,6 +9,7 @@ def config(): return { "pruning_parameters": { + "pruning_method": "wanda", "disable_pruning_for_layers": [], "enable_pruning": True, "sparsity": 0.75, From 9e3c73c8c8b77c2987fb8077d5bfc3eeb7d0a4f0 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 29 Sep 2025 19:05:08 +0200 Subject: [PATCH 03/37] initial PQBatchNorm for keras and torch. Initial input output quantizer option for weight layers --- src/pquant/configs/config_ap.yaml | 4 +- src/pquant/configs/config_autosparse.yaml | 4 +- src/pquant/configs/config_cs.yaml | 4 +- src/pquant/configs/config_dst.yaml | 4 +- src/pquant/configs/config_mdmm.yaml | 2 + src/pquant/configs/config_pdp.yaml | 15 +- src/pquant/configs/config_wanda.yaml | 2 + src/pquant/core/activations_quantizer.py | 144 ++++---- .../core/tf_impl/compressed_layers_tf.py | 297 ++++++++++++--- .../torch_impl/compressed_layers_torch.py | 347 ++++++++++++------ .../pruning_methods/activation_pruning.py | 2 +- src/pquant/pruning_methods/autosparse.py | 2 +- src/pquant/pruning_methods/cs.py | 2 +- src/pquant/pruning_methods/dst.py | 7 +- tests/test_keras_compression_layers.py | 66 ++-- tests/test_torch_compression_layers.py | 66 ++-- 16 files changed, 621 insertions(+), 347 deletions(-) diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index 2ea7b03..9031b93 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -18,6 +18,8 @@ quantization_parameters: use_real_tanh: false use_relu_multiplier: true use_symmetric_quantization: false + overflow: SAT + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true @@ -36,7 +38,7 @@ training_parameters: rewind: never rounds: 1 save_weights_epoch: -1 -batch_size: 64 +batch_size: 256 cosine_tmax: 200 gamma: 0.1 l2_decay: 0.0001 diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 7b10edb..623fa78 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -21,6 +21,8 @@ quantization_parameters: use_real_tanh: false use_relu_multiplier: true use_symmetric_quantization: false + overflow: SAT + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true @@ -39,7 +41,7 @@ training_parameters: rewind: never rounds: 1 save_weights_epoch: -1.0 -batch_size: 64 +batch_size: 256 cosine_tmax: 200 gamma: 0.1 l2_decay: 3.0517578125e-05 diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index 512f6aa..070a7d7 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -17,6 +17,8 @@ quantization_parameters: use_real_tanh: false use_relu_multiplier: true use_symmetric_quantization: false + overflow: SAT + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true @@ -35,7 +37,7 @@ training_parameters: rewind: post-ticket-search rounds: 3 save_weights_epoch: 2 -batch_size: 64 +batch_size: 256 cosine_tmax: 200 gamma: 0.1 l2_decay: 0.0001 diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index 6116b38..9fbc0ff 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -7,7 +7,7 @@ pruning_parameters: pruning_method: dst threshold_decay: 0.0 threshold_init: 0.0 - threshold_type: channelwise + threshold_type: weightwise quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. @@ -19,6 +19,8 @@ quantization_parameters: use_real_tanh: false use_relu_multiplier: true use_symmetric_quantization: false + overflow: SAT + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index 71720d6..7513efc 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -27,6 +27,8 @@ quantization_parameters: use_real_tanh: false use_relu_multiplier: true use_symmetric_quantization: false + overflow: SAT + round_mode: RND training_parameters: epochs: 200 fine_tuning_epochs: 30 diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index 1ff44fa..f91bbdc 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -4,10 +4,10 @@ pruning_parameters: enable_pruning: true epsilon: 0.015 pruning_method: pdp - sparsity: 0.8 + sparsity: 0.4 temperature: 1.0e-05 threshold_decay: 0. - structured_pruning: false + structured_pruning: true quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. @@ -17,8 +17,9 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: true - use_symmetric_quantization: false + use_relu_multiplier: false + overflow: "SAT" + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true @@ -31,13 +32,13 @@ fitcompress_parameters: f_lambda : 1 training_parameters: epochs: 100 - fine_tuning_epochs: 100 - pretraining_epochs: 0 + fine_tuning_epochs: 20 + pretraining_epochs: 20 pruning_first: false rewind: never rounds: 1 save_weights_epoch: -1 -batch_size: 64 +batch_size: 256 cosine_tmax: 200 gamma: 0.1 l2_decay: 0.0001 diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index b9f4a2e..f34f746 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -21,6 +21,8 @@ quantization_parameters: use_real_tanh: false use_relu_multiplier: true use_symmetric_quantization: false + overflow: SAT + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py index fa8f922..521919c 100644 --- a/src/pquant/core/activations_quantizer.py +++ b/src/pquant/core/activations_quantizer.py @@ -1,8 +1,8 @@ import keras -from hgq.quantizer import Quantizer from keras import ops from keras.ops import convert_to_tensor, maximum, minimum, tanh -from quantizers import get_fixed_quantizer + +from pquant.core.quantizer_functions import create_quantizer @keras.saving.register_keras_serializable(package="PQuant") @@ -17,41 +17,38 @@ def __init__(self, config, i, f, **kwargs): self.f = convert_to_tensor(f) self.k = convert_to_tensor(1.0) self.config = config - self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.is_pretraining = True - self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow self.use_real_tanh = config.quantization_parameters.use_real_tanh self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous def build(self, input_shape): super().build(input_shape) - if self.use_high_granularity_quantization: - if self.hgq_heterogeneous: - self.hgq = Quantizer( - k0=self.k, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(0,), - ) - else: - self.hgq = Quantizer( - k0=self.k, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq.build(input_shape) - - else: - self.quantizer = get_fixed_quantizer(round_mode="RND", overflow_mode=self.overflow) + self.quantizer = create_quantizer( + k=self.k, + i=self.i, + f=self.f, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + ) + self.input_quantizer = create_quantizer( + k=self.k, + i=self.i, + f=self.f, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + ) + if self.use_hgq: + self.quantizer.build(input_shape) - def set_activation_bits(self, i, f): + def set_bits(self, i, f): self.i = convert_to_tensor(i) self.f = convert_to_tensor(f) @@ -66,13 +63,14 @@ def post_pre_train_function(self): self.is_pretraining = False def call(self, x): - if self.use_high_granularity_quantization: - x = tanh(x) if self.use_real_tanh else hard_tanh(x) - return self.hgq(x) + if self.use_hgq: + x = self.input_quantizer(x) else: - x = tanh(x) if self.use_real_tanh else hard_tanh(x) - x = self.quantizer(x, k=1.0, i=convert_to_tensor(0.0), f=self.f, training=True) - return x + x = self.input_quantizer(x, k=self.k, i=self.i, f=self.f) + x = tanh(x) if self.use_real_tanh else hard_tanh(x) + if self.use_hgq: + return self.quantizer(x) + return self.quantizer(x, k=self.k, i=self.i, f=self.f) def get_config(self): config = super().get_config() @@ -92,46 +90,38 @@ def __init__(self, config, i, f, **kwargs): self.i = convert_to_tensor(i) self.f = convert_to_tensor(f) self.k = convert_to_tensor(0.0) - self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.is_pretraining = True - self.overflow = "SAT" + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow self.use_multiplier = config.quantization_parameters.use_relu_multiplier self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + + self.post_fitcompress_calibration = False self.saved_inputs = [] def build(self, input_shape): super().build(input_shape) - if self.use_high_granularity_quantization: - if self.hgq_heterogeneous: - self.hgq = Quantizer( - k0=self.k, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(0,), - ) - else: - self.hgq = Quantizer( - k0=self.k, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq.build(input_shape) - else: - self.quantizer = get_fixed_quantizer(round_mode="RND", overflow_mode=self.overflow) + self.quantizer = create_quantizer( + k=self.k, + i=self.i, + f=self.f, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + ) + if self.use_hgq: + self.quantizer.build(input_shape) + if self.use_multiplier: self.multiplier = self.add_weight(shape=(1,), trainable=True, initializer=keras.initializers.Constant(-1.0)) - def set_activation_bits(self, i, f): + def set_bits(self, i, f): self.i = convert_to_tensor(i) self.f = convert_to_tensor(f) @@ -146,20 +136,18 @@ def hgq_loss(self): ) * self.config.quantization_parameters.hgq_gamma def call(self, x): - if self.use_high_granularity_quantization: - return self.hgq(x) - else: - if self.use_fitcompress and self.is_pretraining: - if self.post_fitcompress_calibration: - # Save quantized input into ReLU - self.saved_inputs.append(x) - # During FITcompress, we do not use any quantized activations - return ops.relu(x) - # Multiplier after fitcompress if condition, such that we don't use any relu multiplier during FITcompress search - if self.use_multiplier: - x = x * 2 ** (ops.stop_gradient(ops.round(self.multiplier) - self.multiplier) + self.multiplier) - x = self.quantizer(x, k=convert_to_tensor(0.0), i=convert_to_tensor(self.i), f=convert_to_tensor(self.f), training=True) - return x + if self.use_fitcompress and self.is_pretraining: + if self.post_fitcompress_calibration: + # Save quantized input into ReLU + self.saved_inputs.append(x) + # During FITcompress, we do not use any quantized activations + return ops.relu(x) + # Multiplier after fitcompress if condition, such that we don't use any relu multiplier during FITcompress search + if self.use_multiplier: + x = x * 2 ** (ops.stop_gradient(ops.round(self.multiplier) - self.multiplier) + self.multiplier) + if self.use_hgq: + return self.quantizer(x) + return self.quantizer(x, k=self.k, i=self.i, f=self.f) def get_config(self): config = super().get_config() @@ -183,4 +171,4 @@ def hard_sigmoid(x): def hard_tanh(x): """Computes hard_tanh function that saturates between -1 and 1.""" - return 2.0 * hard_sigmoid(x) - 1.0 \ No newline at end of file + return 2.0 * hard_sigmoid(x) - 1.0 diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 04d5a9f..0a2ce3e 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -6,6 +6,7 @@ AveragePooling1D, AveragePooling2D, AveragePooling3D, + BatchNormalization, Conv1D, Conv2D, Dense, @@ -17,11 +18,12 @@ from quantizers import get_fixed_quantizer from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh +from pquant.core.quantizer_functions import create_quantizer from pquant.core.utils import get_pruning_layer class CompressedLayerBase(keras.layers.Layer): - def __init__(self, config, layer, layer_type): + def __init__(self, config, layer_type, use_input_quantizer=False, use_output_quantizer=False): super().__init__() i_bits = config.quantization_parameters.default_integer_bits f_bits = config.quantization_parameters.default_fractional_bits @@ -29,19 +31,27 @@ def __init__(self, config, layer, layer_type): self.f_weight = ops.convert_to_tensor(f_bits) self.i_bias = ops.convert_to_tensor(i_bits) self.f_bias = ops.convert_to_tensor(f_bits) + + self.i_input = self.i_output = ops.convert_to_tensor(i_bits) + self.f_input = self.f_output = ops.convert_to_tensor(f_bits) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) + self.pruning_method = config.pruning_parameters.pruning_method - self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow self.hgq_gamma = config.quantization_parameters.hgq_gamma self.pruning_first = config.training_parameters.pruning_first self.enable_quantization = config.quantization_parameters.enable_quantization - self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.enable_pruning = config.pruning_parameters.enable_pruning + self.do_transpose_data = None self.weight_transpose = None self.data_transpose = None + self.use_input_quantizer = use_input_quantizer + self.use_output_quantizer = use_output_quantizer def set_quantization_bits(self, i_bits_w, f_bits_w, i_bits_b, f_bits_b): self.i_weight = ops.convert_to_tensor(i_bits_w) @@ -49,58 +59,40 @@ def set_quantization_bits(self, i_bits_w, f_bits_w, i_bits_b, f_bits_b): self.i_bias = ops.convert_to_tensor(i_bits_b) self.f_bias = ops.convert_to_tensor(f_bits_b) + def set_input_output_quantization(self, input_quantization, output_quantization): + self.use_input_quantizer = input_quantization + self.use_output_quantizer = output_quantization + + def set_input_output_quantization_bits(self, i_input, f_input, i_output, f_output): + self.i_input = i_input + self.f_input = f_input + self.i_output = i_output + self.f_output = f_output + def set_enable_pruning(self, enable_pruning): self.enable_pruning = enable_pruning def build(self, input_shape): super().build(input_shape) - if self.use_high_granularity_quantization: - if self.hgq_heterogeneous: - self.hgq_weight = Quantizer( - k0=1.0, - i0=self.i_weight, - f0=self.f_weight, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(), - ) - self.hgq_weight.build(self.weight.shape) - if self.use_bias: - self.hgq_bias = Quantizer( - k0=1.0, - i0=self.i_bias, - f0=self.f_bias, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(), - ) - self.hgq_bias.build(self.bias.shape) - else: - self.hgq_weight = Quantizer( - k0=1.0, - i0=self.i_weight, - f0=self.f_weight, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq_weight.build(self.weight.shape) - if self.use_bias: - self.hgq_bias = Quantizer( - k0=1.0, - i0=self.i_bias, - f0=self.f_bias, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq_bias.build(self.bias.shape) - else: - self.quantizer = get_fixed_quantizer(round_mode="RND", overflow_mode=self.overflow) + self.weight_quantizer = create_quantizer( + k=ops.convert_to_tensor(1.0), + i=self.i_weight, + f=self.f_weight, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=False, + ) + if self.use_bias: + self.bias_quantizer = create_quantizer( + k=ops.convert_to_tensor(1.0), + i=self.i_bias, + f=self.f_bias, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=False, + ) def save_weights(self): self.init_weight = self.weight.value @@ -123,17 +115,17 @@ def handle_transpose(self, x, transpose, do_transpose=False): def quantize_i(self, weight, bias): if self.enable_quantization: - if self.use_high_granularity_quantization: - weight = self.hgq_weight(weight) - bias = None if bias is None else self.hgq_bias(bias) + if self.use_hgq: + weight = self.weight_quantizer(weight) + bias = None if bias is None else self.bias_quantizer(bias) else: - weight = self.quantizer( + weight = self.weight_quantizer( weight, k=ops.convert_to_tensor(1.0), i=self.i_weight, f=self.f_weight, training=True ) bias = ( None if bias is None - else self.quantizer(bias, k=ops.convert_to_tensor(1.0), i=self.i_bias, f=self.f_bias, training=True) + else self.bias_quantizer(bias, k=ops.convert_to_tensor(1.0), i=self.i_bias, f=self.f_bias, training=True) ) return weight, bias @@ -170,7 +162,7 @@ def collect_output(self, x, training): class CompressedLayerDepthwiseConv2dKeras(CompressedLayerBase): def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + super().__init__(config, layer_type) self.depthwise_regularizer = layer.depthwise_regularizer self.use_bias = layer.use_bias self.strides = layer.strides @@ -211,7 +203,7 @@ def call(self, x, training=None): class CompressedLayerConv2dKeras(CompressedLayerBase): def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + super().__init__(config, layer_type) self.kernel_regularizer = layer.kernel_regularizer self.filters = layer.filters self.use_bias = layer.use_bias @@ -283,7 +275,7 @@ def call(self, x, training=None): class CompressedLayerConv1dKeras(CompressedLayerBase): def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + super().__init__(config, layer_type) self.kernel_regularizer = layer.kernel_regularizer self.filters = layer.filters self.use_bias = layer.use_bias @@ -328,7 +320,7 @@ def call(self, x, training=None): class CompressedLayerDenseKeras(CompressedLayerBase): def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + super().__init__(config, layer_type) self.kernel_regularizer = layer.kernel_regularizer self.use_bias = layer.use_bias self.units = layer.units @@ -363,6 +355,118 @@ def call(self, x, training=None): return x +class PQBatchNormalization(keras.layers.BatchNormalization): + + def __init__( + self, + config, + axis=-1, + momentum=0.99, + epsilon=1e-3, + center=True, + scale=True, + beta_initializer="zeros", + gamma_initializer="ones", + moving_mean_initializer="zeros", + moving_variance_initializer="ones", + beta_regularizer=None, + gamma_regularizer=None, + beta_constraint=None, + gamma_constraint=None, + synchronized=False, + **kwargs, + ): + super().__init__( + axis, + momentum, + epsilon, + center, + scale, + beta_initializer, + gamma_initializer, + moving_mean_initializer, + moving_variance_initializer, + beta_regularizer, + gamma_regularizer, + beta_constraint, + gamma_constraint, + synchronized, + **kwargs, + ) + self.overflow = config["quantization_parameters"]["overflow"] + self.round_mode = config["quantization_parameters"]["round_mode"] + self.config = config + self.f = ops.convert_to_tensor(config["quantization_parameters"]["default_fractional_bits"]) + self.i = ops.convert_to_tensor(config["quantization_parameters"]["default_integer_bits"]) + + def build(self, input_shape): + super().build(input_shape) + self.parameter_quantizer = create_quantizer( + k=1.0, + i=self.i, + f=self.f, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=False, + is_data=True, + ) + + def set_bits(self, i, f): + self.i = ops.convert_to_tensor(i) + self.f = ops.convert_to_tensor(f) + + def call(self, inputs, training=None, mask=None): + # Check if the mask has one less dimension than the inputs. + if mask is not None: + if len(mask.shape) != len(inputs.shape) - 1: + # Raise a value error + raise ValueError( + "The mask provided should be one dimension less " + "than the inputs. Received: " + f"mask.shape={mask.shape}, inputs.shape={inputs.shape}" + ) + + compute_dtype = keras.backend.result_type(inputs.dtype, "float32") + # BN is prone to overflow with float16/bfloat16 inputs, so we upcast to + # float32 for the subsequent computations. + inputs = ops.cast(inputs, compute_dtype) + + moving_mean = ops.cast(self.moving_mean, inputs.dtype) + moving_variance = ops.cast(self.moving_variance, inputs.dtype) + + if training and self.trainable: + mean, variance = self._moments(inputs, mask) + + self.moving_mean.assign(moving_mean * self.momentum + mean * (1.0 - self.momentum)) + self.moving_variance.assign(moving_variance * self.momentum + variance * (1.0 - self.momentum)) + else: + mean = moving_mean + variance = moving_variance + + if self.scale: + gamma = self.parameter_quantizer(self.gamma, k=1.0, i=self.i, f=self.f) + gamma = ops.cast(gamma, inputs.dtype) + else: + gamma = None + + if self.center: + beta = self.parameter_quantizer(self.beta, k=1.0, i=self.i, f=self.f) + beta = ops.cast(beta, inputs.dtype) + else: + beta = None + + outputs = ops.batch_normalization( + x=inputs, + mean=mean, + variance=variance, + axis=self.axis, + offset=beta, + scale=gamma, + epsilon=self.epsilon, + ) + return ops.cast(outputs, self.compute_dtype) + + class QuantizedPooling(keras.layers.Layer): def __init__(self, config, layer): super().__init__() @@ -834,7 +938,7 @@ def get_model_losses_tf(model, losses): ), ): loss = layer.pruning_layer.calculate_additional_loss() - if layer.enable_quantization and layer.use_high_granularity_quantization: + if layer.enable_quantization and layer.use_hgq: loss += layer.hgq_loss() losses += loss elif isinstance(layer, CompressedLayerSeparableConv2dKeras): @@ -880,6 +984,11 @@ def add_compression_layers_tf(model, config, input_shape=None): new_layer = CompressedLayerDepthwiseConv2dKeras(config, layer, layer_type="conv") i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) + i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( + config, layer + ) + new_layer.set_input_output_quantization(quantize_input, quantize_output) + new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -893,6 +1002,11 @@ def add_compression_layers_tf(model, config, input_shape=None): new_layer = CompressedLayerConv2dKeras(config, layer, layer_type="conv") i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) + i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( + config, layer + ) + new_layer.set_input_output_quantization(quantize_input, quantize_output) + new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -908,6 +1022,13 @@ def add_compression_layers_tf(model, config, input_shape=None): ) new_layer.depthwise_conv.set_quantization_bits(dw_i_bits_w, dw_f_bits_w, pw_i_bits_b, pw_f_bits_b) new_layer.pointwise_conv.set_quantization_bits(pw_i_bits_w, pw_f_bits_w, pw_i_bits_b, pw_f_bits_b) + i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( + config, layer + ) + new_layer.depthwise_conv.set_input_output_quantization(quantize_input, False) + new_layer.pointwise_conv.set_input_output_quantization(False, quantize_output) + new_layer.depthwise_conv.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) + new_layer.pointwise_conv.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) enable_pruning_depthwise, enable_pruning_pointwise = get_enable_pruning(layer, config) new_layer.depthwise_conv.set_enable_pruning(enable_pruning_depthwise) new_layer.pointwise_conv.set_enable_pruning(enable_pruning_pointwise) @@ -930,6 +1051,11 @@ def add_compression_layers_tf(model, config, input_shape=None): new_layer = CompressedLayerConv1dKeras(config, layer, layer_type="conv") i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) + i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( + config, layer + ) + new_layer.set_input_output_quantization(quantize_input, quantize_output) + new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -943,6 +1069,11 @@ def add_compression_layers_tf(model, config, input_shape=None): new_layer = CompressedLayerDenseKeras(config, layer, layer_type="linear") i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) + i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( + config, layer + ) + new_layer.set_input_output_quantization(quantize_input, quantize_output) + new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -957,13 +1088,16 @@ def add_compression_layers_tf(model, config, input_shape=None): i_bits = config.quantization_parameters.default_integer_bits f_bits = config.quantization_parameters.default_fractional_bits i_bits, f_bits = get_quantization_bits_activations(config, layer) + new_layer = QuantizedReLU(config, i_bits, f_bits) new_layer.build(layer.input.shape) x = new_layer(x) + else: x = layer(x) elif isinstance(layer, Activation): new_layer = check_activation(layer, config) + if new_layer is not None: x = new_layer(x) elif isinstance(layer, (AveragePooling1D, AveragePooling2D, AveragePooling3D)): @@ -973,6 +1107,30 @@ def add_compression_layers_tf(model, config, input_shape=None): new_layer.set_quantization_bits(i_bits, f_bits) new_layer.build(layer.output.shape) x = new_layer(x) + + elif isinstance(layer, (BatchNormalization)): + if config["quantization_parameters"]["enable_quantization"]: + i_bits, f_bits = get_quantization_bits_activations(config, layer) + new_layer = PQBatchNormalization( + config, + layer.axis, + layer.momentum, + layer.epsilon, + layer.center, + layer.scale, + layer.beta_initializer, + layer.gamma_initializer, + layer.moving_mean_initializer, + layer.moving_variance_initializer, + layer.beta_regularizer, + layer.gamma_regularizer, + layer.beta_constraint, + layer.gamma_constraint, + layer.synchronized, + ) + new_layer.set_bits(i_bits, f_bits) + new_layer.build(layer.output.shape) + x = new_layer(x) else: x = layer(x) else: @@ -1030,6 +1188,25 @@ def get_quantization_bits_weights_biases(config, layer): return i_bits_w, f_bits_w, i_bits_b, f_bits_b +def get_quantization_bits_inputs_outputs(config, layer): + layer_specific = config["quantization_parameters"]["layer_specific"] + i_input = i_output = config["quantization_parameters"]["default_integer_bits"] + f_input = f_output = config["quantization_parameters"]["default_fractional_bits"] + quantize_input = quantize_output = False + if layer.name in layer_specific: + if "input" in layer_specific[layer.name]: + i_input = layer_specific[layer.name]["input"]["integer_bits"] + f_input = layer_specific[layer.name]["input"]["fractional_bits"] + if "quantize_input" in layer_specific[layer.name]["input"]: + quantize_input = layer_specific[layer.name]["input"]["quantize_input"] + if "output" in layer_specific[layer.name]: + i_output = layer_specific[layer.name]["output"]["integer_bits"] + f_output = layer_specific[layer.name]["output"]["fractional_bits"] + if "quantize_output" in layer_specific[layer.name]["output"]: + quantize_output = layer_specific[layer.name]["output"]["quantize_output"] + return i_input, f_input, i_output, f_output, quantize_input, quantize_output + + def get_enable_pruning(layer, config): enable_pruning = config.pruning_parameters.enable_pruning if isinstance(layer, SeparableConv2D): diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index e55e38a..8024290 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -1,89 +1,97 @@ +import typing + import torch import torch.nn as nn import torch.nn.functional as F -from hgq.quantizer import Quantizer -from keras import ops -from quantizers import get_fixed_quantizer + from torch.fx import symbolic_trace from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.utils import get_pruning_layer -import typing if typing.TYPE_CHECKING: - from pquant.core.torch_impl.fit_compress import call_fitcompress - + from pquant.core.torch_impl.fit_compress import call_fitcompress # noqa: 401 from keras import ops +from pquant.core.quantizer_functions import create_quantizer + + class CompressedLayerBase(nn.Module): def __init__(self, config, layer, layer_type): super().__init__() - self.f_weight = torch.tensor(config.quantization_parameters.default_fractional_bits) - self.i_weight = torch.tensor(config.quantization_parameters.default_integer_bits) - self.f_bias = torch.tensor(config.quantization_parameters.default_fractional_bits) - self.i_bias = torch.tensor(config.quantization_parameters.default_integer_bits) + self.i_weight = self.i_bias = self.i_input = self.i_output = torch.tensor( + config.quantization_parameters.default_integer_bits + ) + self.f_weight = self.f_bias = self.f_input = self.f_output = torch.tensor( + config.quantization_parameters.default_fractional_bits + ) + self.weight = nn.Parameter(layer.weight.clone()) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) self.pruning_method = config.pruning_parameters.pruning_method - self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" - self.quantizer = get_fixed_quantizer(overflow_mode=self.overflow) + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.use_input_quantizer = False + self.use_output_quantizer = False self.bias = nn.Parameter(layer.bias.clone()) if layer.bias is not None else None self.init_weight = self.weight.clone() self.pruning_first = config.training_parameters.pruning_first self.enable_quantization = config.quantization_parameters.enable_quantization - self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.enable_pruning = config.pruning_parameters.enable_pruning + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.hgq_gamma = config.quantization_parameters.hgq_gamma - def build(self, input_shape): - if self.use_high_granularity_quantization: - if self.hgq_heterogeneous: - self.hgq_weight = Quantizer( - k0=1.0, - i0=self.i_weight, - f0=self.f_weight, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(), - ) - self.hgq_weight.build(self.weight.shape) - if self.bias is not None: - self.hgq_bias = Quantizer( - k0=1.0, - i0=self.i_bias, - f0=self.f_bias, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(), - ) - self.hgq_bias.build(self.bias.shape) - else: - self.hgq_weight = Quantizer( - k0=1.0, - i0=self.i_weight, - f0=self.f_weight, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq_weight.build(self.weight.shape) - if self.bias is not None: - self.hgq_bias = Quantizer( - k0=1.0, - i0=self.i_bias, - f0=self.f_bias, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq_bias.build(self.bias.shape) + def build(self): + # Build function to delay quantizer creation until after custom i,f bits have been set + self.weight_quantizer = PyTorchQuantizer( + torch.tensor(1.0), + self.i_weight, + self.f_weight, + self.overflow, + self.round_mode, + self.use_hgq, + False, + self.hgq_gamma, + ) + + if self.bias is not None: + self.bias_quantizer = PyTorchQuantizer( + torch.tensor(1.0), + self.i_bias, + self.f_bias, + self.overflow, + self.round_mode, + self.use_hgq, + False, + self.hgq_gamma, + ) + if self.use_input_quantizer: + self.input_quantizer = PyTorchQuantizer( + torch.tensor(1.0), + self.i_input, + self.f_input, + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) + if self.use_output_quantizer: + self.output_quantizer = PyTorchQuantizer( + torch.tensor(1.0), + self.i_output, + self.f_output, + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) def save_weights(self): self.init_weight = self.weight.clone() @@ -92,25 +100,30 @@ def rewind_weights(self): self.weight.data = self.init_weight.clone() def hgq_loss(self): - if self.pruning_layer.is_pretraining: + if self.pruning_layer.is_pretraining or not self.use_hgq: return 0.0 - loss = (torch.sum(self.hgq_weight.quantizer.i) + torch.sum(self.hgq_weight.quantizer.f)) * self.hgq_gamma + loss = ( + torch.sum(self.weight_quantizer.quantizer.quantizer.i) + torch.sum(self.weight_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma if self.bias is not None: - loss += (torch.sum(self.hgq_bias.quantizer.i) + torch.sum(self.hgq_bias.quantizer.f)) * self.hgq_gamma + loss += ( + torch.sum(self.bias_quantizer.quantizer.quantizer.i) + torch.sum(self.bias_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma + if self.use_input_quantizer: + loss += ( + torch.sum(self.input_quantizer.quantizer.quantizer.i) + torch.sum(self.input_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma + if self.use_output_quantizer: + loss += ( + torch.sum(self.output_quantizer.quantizer.quantizer.i) + + torch.sum(self.output_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma return loss def quantize(self, weight, bias): if self.enable_quantization: - if self.use_high_granularity_quantization: - weight = self.hgq_weight(weight) - bias = None if bias is None else self.hgq_bias(bias) - else: - weight = self.quantizer(weight, k=torch.tensor(1.0), i=self.i_weight, f=self.f_weight, training=False) - bias = ( - None - if bias is None - else self.quantizer(bias, k=torch.tensor(1.0), i=self.i_bias, f=self.f_bias, training=True) - ) + weight = self.weight_quantizer(weight) + bias = None if bias is None else self.bias_quantizer(bias) return weight, bias def prune(self, weight): @@ -127,15 +140,27 @@ def prune_and_quantize(self, weight, bias): weight = self.prune(weight) return weight, bias - def forward(self, x): - weight, bias = self.prune_and_quantize(self.weight, self.bias) + def pre_forward(self, weight, bias, x): + if self.use_input_quantizer and not self.use_fitcompress and not self.pruning_layer.is_pretraining: + x = self.input_quantizer(x) if self.pruning_method == "wanda": self.pruning_layer.collect_input(x, self.weight, self.training) - x = F.linear(x, weight, bias) + weight, bias = self.prune_and_quantize(weight, bias) + return weight, bias, x + + def post_forward(self, x): + if self.use_output_quantizer and not self.use_fitcompress and not self.pruning_layer.is_pretraining: + x = self.output_quantizer(x) if self.pruning_method == "activation_pruning": self.pruning_layer.collect_output(x, self.training) return x + def forward(self, x): + weight, bias, x = self.pre_forward(self.weight, self.bias, x) + x = F.linear(x, weight, bias) + x = self.post_forward(x) + return x + class CompressedLayerLinear(CompressedLayerBase): def __init__(self, config, layer, layer_type): @@ -259,7 +284,10 @@ def __init__(self, config, layer): self.config = config self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.is_pretraining = True - self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization + + self.overflow = config.quantization_parameters.overflow + self.round_mode = config.quantization_parameters.round_mode + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.pooling = layer self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.post_fitcompress_calibration = False @@ -267,33 +295,20 @@ def __init__(self, config, layer): self.hgq_gamma = config.quantization_parameters.hgq_gamma def build(self, input_shape): - if self.use_high_granularity_quantization: - if self.hgq_heterogeneous: - self.hgq = Quantizer( - k0=1.0, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(0,), - ) - - else: - self.hgq = Quantizer( - k0=1.0, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq.build(input_shape) - else: - self.quantizer = get_fixed_quantizer(round_mode="RND", overflow_mode=self.overflow) + self.quantizer = PyTorchQuantizer( + k=torch.tensor(1.0), + i=self.i, + f=self.f, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + hgq_gamma=self.hgq_gamma, + ) + if self.use_hgq: + self.quantizer.quantizer.build(input_shape) - def set_activation_bits(self, i, f): + def set_bits(self, i, f): self.i = torch.tensor(i) self.f = torch.tensor(f) @@ -303,23 +318,19 @@ def post_pre_train_function(self): def hgq_loss(self): if self.is_pretraining: return 0.0 - return ( - torch.sum(self.hgq.quantizer.i) + torch.sum(self.hgq.quantizer.f) - ) * self.config.quantization_parameters.hgq_gamma + + return (torch.sum(self.quantizer.quantizer.i) + torch.sum(self.quantizer.quantizer.f)) * self.config.quantization_parameters.hgq_gamma def quantize(self, x): - if not hasattr(self, "hgq") or not hasattr(self, "quantizer"): + if not hasattr(self, "quantizer"): self.build(x.shape) - if self.use_high_granularity_quantization: - x = self.hgq(x) - else: - if self.use_fitcompress and self.is_pretraining: - if self.post_fitcompress_calibration: - # Save inputs - self.saved_inputs.append(x) - # During FITcompress, we do not use any quantized pooling - return ops.average_pool(x, pool_size=1) - x = self.quantizer(x, k=torch.tensor(1.0), i=self.i, f=self.f, training=True) + if self.use_fitcompress and self.is_pretraining: + if self.post_fitcompress_calibration: + # Save inputs + self.saved_inputs.append(x) + # During FITcompress, we do not use any quantized pooling + return ops.average_pool(x, pool_size=1) + x = self.quantizer(x) return x def forward(self, x): @@ -327,6 +338,90 @@ def forward(self, x): return self.quantize(x) +class PQBatchNorm2d(nn.BatchNorm2d): + + def __init__( + self, + config, + num_features: int, + eps: float = 1e-5, + momentum: typing.Optional[float] = 0.1, + affine: bool = True, + track_running_stats: bool = True, + device=None, + dtype=None, + ): + super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) + self.f = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) + self.i = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) + self.overflow = config["quantization_parameters"]["overflow"] + self.round_mode = config["quantization_parameters"]["round_mode"] + self.config = config + self.parameter_quantizer = PyTorchQuantizer( + k=torch.tensor(1.0), + i=self.i, + f=self.f, + round_mode=self.round_mode, + overflow=self.overflow, + is_data=False, + is_heterogeneous=False, + ) + self._weight = nn.Parameter(self.weight.clone()) + self._bias = nn.Parameter(self.bias.clone()) + del self._parameters["weight"] + del self._parameters["bias"] + + def set_bits(self, i, f): + self.i = torch.tensor(i) + self.f = torch.tensor(f) + + @property + def weight(self): + return self.parameter_quantizer(self._weight) + + @property + def bias(self): + return self.parameter_quantizer(self._bias) + + def set_quantization_bits(self, i, f): + self.i = torch.tensor(i) + self.f = torch.tensor(f) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return super().forward(input) + + +class PyTorchQuantizer(nn.Module): + # HGQ quantizer wrapper + def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): + super().__init__() + self.k = 1.0 + self.i = i + self.f = f + self.overflow = overflow + self.round_mode = round_mode + self.use_hgq = is_heterogeneous + self.quantizer = create_quantizer(self.k, self.i, self.f, overflow, round_mode, is_heterogeneous, is_data) + self.is_pretraining = False + self.hgq_gamma = hgq_gamma + + def post_pretrain(self): + self.is_pretraining = True + + def forward(self, x): + if self.use_hgq: + x = self.quantizer(x) + else: + x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + return x + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return 0.0 + loss = (torch.sum(self.quantizer.quantizer.i) + torch.sum(self.quantizer.quantizer.f)) * self.hgq_gamma + return loss + + def add_layer_specific_quantization_to_model(module, config): for name, layer in module.named_modules(): if isinstance(layer, CompressedLayerBase): @@ -342,11 +437,11 @@ def add_layer_specific_quantization_to_model(module, config): layer.i_bias = torch.tensor(bias_int_bits) layer.f_bias = torch.tensor(bias_fractional_bits) layer.build(None) - elif layer.__class__ in [QuantizedPooling, QuantizedReLU, QuantizedTanh]: + elif layer.__class__ in [PQBatchNorm2d, QuantizedPooling, QuantizedReLU, QuantizedTanh]: if name in config.quantization_parameters.layer_specific: i = config.quantization_parameters.layer_specific[name]["integer_bits"] f = config.quantization_parameters.layer_specific[name]["fractional_bits"] - layer.set_activation_bits(i, f) + layer.set_bits(i, f) return module @@ -369,6 +464,16 @@ def add_quantized_activations_to_model_layer(module, config): elif layer.__class__ in [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: new_layer = QuantizedPooling(config, layer) setattr(module, name, new_layer) + elif layer.__class__ == nn.BatchNorm2d: + new_layer = PQBatchNorm2d( + config, + num_features=layer.num_features, + eps=layer.eps, + momentum=layer.momentum, + affine=layer.affine, + track_running_stats=layer.track_running_stats, + ) + setattr(module, name, new_layer) else: layer = add_quantized_activations_to_model_layer(layer, config) return module @@ -589,10 +694,10 @@ def pdp_setup(model, config): weight_size = layer.weight.numel() w = torch.sum(global_weights_below_threshold[idx : idx + weight_size]) layer.pruning_layer.init_r = w / weight_size - print(f"PDP Layer {layer} target: {layer.pruning_layer.init_r}") layer.pruning_layer.sparsity = w / weight_size # Wanda idx += weight_size + @torch.no_grad def get_layer_keep_ratio_torch(model): total_w = 0 @@ -626,11 +731,11 @@ def get_model_losses_torch(model, losses): for layer in model.modules(): if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): loss = layer.pruning_layer.calculate_additional_loss() - if layer.use_high_granularity_quantization: + if layer.use_hgq: loss += layer.hgq_loss() losses += loss elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): - if layer.use_high_granularity_quantization: + if layer.use_hgq: losses += layer.hgq_loss() return losses diff --git a/src/pquant/pruning_methods/activation_pruning.py b/src/pquant/pruning_methods/activation_pruning.py index 6afb000..fe90326 100644 --- a/src/pquant/pruning_methods/activation_pruning.py +++ b/src/pquant/pruning_methods/activation_pruning.py @@ -68,7 +68,7 @@ def collect_output(self, output, training): self.done = True def call(self, weight): # Mask is only updated every t_delta step, using collect_output - if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: + if self.is_pretraining: return weight else: return self.mask * weight diff --git a/src/pquant/pruning_methods/autosparse.py b/src/pquant/pruning_methods/autosparse.py index 507efa8..3025f2d 100644 --- a/src/pquant/pruning_methods/autosparse.py +++ b/src/pquant/pruning_methods/autosparse.py @@ -83,7 +83,7 @@ def call(self, weight): sign(W) * ReLu(X), where X = |W| - sigmoid(threshold), with gradient: 1 if W > 0 else alpha. Alpha is decayed after each epoch. """ - if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: + if self.is_pretraining: return weight else: mask = self.get_mask(weight) diff --git a/src/pquant/pruning_methods/cs.py b/src/pquant/pruning_methods/cs.py index c606fae..96c125a 100644 --- a/src/pquant/pruning_methods/cs.py +++ b/src/pquant/pruning_methods/cs.py @@ -26,7 +26,7 @@ def build(self, input_shape): super().build(input_shape) def call(self, weight): - if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: + if self.is_pretraining: return weight self.mask = self.get_mask() return self.mask * weight diff --git a/src/pquant/pruning_methods/dst.py b/src/pquant/pruning_methods/dst.py index 6ceb4ca..66c8fa4 100644 --- a/src/pquant/pruning_methods/dst.py +++ b/src/pquant/pruning_methods/dst.py @@ -44,7 +44,7 @@ def __init__(self, config, layer_type, *args, **kwargs): def build(self, input_shape): self.threshold_size = get_threshold_size(self.config, input_shape) self.threshold = self.add_weight(shape=self.threshold_size, initializer="zeros", trainable=True) - self.mask = ops.ones(input_shape) + self.mask = self.add_weight(shape=input_shape, initializer="ones") def call(self, weight): """ @@ -53,14 +53,16 @@ def call(self, weight): 0.4 if 0.4 < |W| <= 1 0 if |W| > 1 """ - if self.is_pretraining and self.config.fitcompress_parameters.enable_fitcompress: + if self.is_pretraining: return weight mask = self.get_mask(weight) ratio = 1.0 - ops.sum(mask) / ops.cast(ops.size(mask), mask.dtype) flag = ratio >= self.config.pruning_parameters.max_pruning_pct self.threshold.assign(ops.where(flag, ops.ones(self.threshold.shape), self.threshold)) mask = self.get_mask(weight) + self.mask.assign(mask) masked_weight = weight * mask + self.add_loss(self.calculate_additional_loss()) return masked_weight def get_hard_mask(self, weight): @@ -72,7 +74,6 @@ def get_mask(self, weight): pre_binarystep_weights = ops.abs(weights_reshaped) - self.threshold mask = binary_step(pre_binarystep_weights) mask = ops.reshape(mask, weight_orig_shape) - self.mask = mask return mask def pre_epoch_function(self, epoch, total_epochs): diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 0cc9090..0dab228 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -69,6 +69,8 @@ def config_pdp(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -99,6 +101,8 @@ def config_ap(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -132,6 +136,8 @@ def config_wanda(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -161,6 +167,8 @@ def config_cs(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -1298,21 +1306,9 @@ def test_hgq_weight_shape(config_pdp, dense_input): model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") model = add_compression_layers_tf(model, config_pdp, dense_input.shape) - assert model.layers[1].hgq_weight.quantizer._i.shape == model.layers[1].weight.shape + assert model.layers[1].weight_quantizer.quantizer._i.shape == model.layers[1].weight.shape layer_2_input_shape = [1] + list(model.layers[2].input.shape[1:]) - assert model.layers[2].hgq.quantizer._i.shape == layer_2_input_shape - - config_pdp.quantization_parameters.hgq_heterogeneous = False - inputs = keras.Input(shape=dense_input.shape[1:]) - out = Dense(OUT_FEATURES, use_bias=False)(inputs) - act1 = Activation("tanh")(out) - out2 = Dense(OUT_FEATURES, use_bias=False)(act1) - act2 = ReLU()(out2) - model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") - - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) - assert model.layers[1].hgq_weight.quantizer._i.shape == (1, 1) - assert model.layers[2].hgq.quantizer._i.shape == (1, 1) + assert model.layers[2].quantizer.quantizer._i.shape == layer_2_input_shape def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_input, dense_input): @@ -1363,23 +1359,23 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): if isinstance(m, (CompressedLayerConv2dKeras)): assert m.i_weight == 0.0 assert m.i_bias == 0.0 - assert ops.all(m.hgq_weight.quantizer.i == 0.0) - assert ops.all(m.hgq_bias.quantizer.i == 0.0) + assert ops.all(m.weight_quantizer.quantizer.i == 0.0) + assert ops.all(m.bias_quantizer.quantizer.i == 0.0) assert m.f_weight == 7.0 assert m.f_bias == 7.0 - assert ops.all(m.hgq_weight.quantizer.f == 7.0) - assert ops.all(m.hgq_bias.quantizer.f == 7.0) + assert ops.all(m.weight_quantizer.quantizer.f == 7.0) + assert ops.all(m.bias_quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedTanh)): assert m.i == 0.0 assert m.f == 7.0 - assert ops.all(m.hgq.quantizer.i == 0.0) - assert ops.all(m.hgq.quantizer.f == 7.0) + assert ops.all(m.quantizer.quantizer.i == 0.0) + assert ops.all(m.quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedReLU)): assert m.i == 0.0 assert m.f == 8.0 - assert ops.all(m.hgq.quantizer.i == 0.0) - assert ops.all(m.hgq.quantizer.f == 8.0) + assert ops.all(m.quantizer.quantizer.i == 0.0) + assert ops.all(m.quantizer.quantizer.f == 8.0) elif isinstance(m, (QuantizedPooling)): assert m.i == 0.0 assert m.f == 7.0 @@ -1391,9 +1387,9 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 2.0, 'fractional_bits': 4.0}, }, - 're_lu_7': {'integer_bits': 1.0, 'fractional_bits': 3.0}, + 're_lu_6': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'average_pooling2d_2': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'activation_7': {'integer_bits': 0.0, 'fractional_bits': 3.0}, + 'activation_6': {'integer_bits': 0.0, 'fractional_bits': 3.0}, } inputs = keras.Input(shape=conv2d_input.shape[1:]) @@ -1403,28 +1399,27 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): out = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=out) model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) - for m in model.layers: if isinstance(m, (CompressedLayerConv2dKeras)): assert m.i_weight == 1.0 assert m.i_bias == 2.0 - assert ops.all(m.hgq_weight.quantizer.i == 1.0) - assert ops.all(m.hgq_bias.quantizer.i == 2.0) + assert ops.all(m.weight_quantizer.quantizer.i == 1.0) + assert ops.all(m.bias_quantizer.quantizer.i == 2.0) assert m.f_weight == 3.0 assert m.f_bias == 4.0 - assert ops.all(m.hgq_weight.quantizer.f == 3.0) - assert ops.all(m.hgq_bias.quantizer.f == 4.0) + assert ops.all(m.weight_quantizer.quantizer.f == 3.0) + assert ops.all(m.bias_quantizer.quantizer.f == 4.0) elif isinstance(m, (QuantizedTanh)): assert m.i == 0.0 assert m.f == 3.0 - assert ops.all(m.hgq.quantizer.i == 0.0) - assert ops.all(m.hgq.quantizer.f == 3.0) + assert ops.all(m.quantizer.quantizer.i == 0.0) + assert ops.all(m.quantizer.quantizer.f == 3.0) elif isinstance(m, (QuantizedReLU)): assert m.i == 1.0 assert m.f == 3.0 - assert ops.all(m.hgq.quantizer.i == 1.0) - assert ops.all(m.hgq.quantizer.f == 3.0) + assert ops.all(m.quantizer.quantizer.i == 1.0) + assert ops.all(m.quantizer.quantizer.f == 3.0) elif isinstance(m, (QuantizedPooling)): assert m.i == 1.0 assert m.f == 3.0 @@ -1465,9 +1460,9 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 2.0, 'fractional_bits': 4.0}, }, - 're_lu_9': {'integer_bits': 1.0, 'fractional_bits': 3.0}, + 're_lu_8': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'average_pooling2d_4': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'activation_9': {'integer_bits': 0.0, 'fractional_bits': 3.0}, + 'activation_8': {'integer_bits': 0.0, 'fractional_bits': 3.0}, } inputs = keras.Input(shape=conv2d_input.shape[1:]) @@ -1477,7 +1472,6 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): out = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=out) model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) - for m in model.layers: if isinstance(m, (CompressedLayerConv2dKeras)): assert m.i_weight == 1.0 diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index e3b7ec4..7b90774 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -63,6 +63,8 @@ def config_pdp(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -93,6 +95,8 @@ def config_ap(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -126,6 +130,8 @@ def config_wanda(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -155,6 +161,8 @@ def config_cs(): "use_real_tanh": False, "use_relu_multiplier": True, "use_symmetric_quantization": False, + "round_mode": "RND", + "overflow": "SAT", }, "training_parameters": {"pruning_first": False}, "fitcompress_parameters": {"enable_fitcompress": False}, @@ -371,10 +379,9 @@ def test_check_activation(config_pdp, dense_input): def check_keras_layer_is_built(module, is_built): - for m in module.children(): + for m in module.modules(): if hasattr(m, "built"): is_built.append(m.built) - is_built = check_keras_layer_is_built(m, is_built) return is_built @@ -521,19 +528,8 @@ def test_hgq_weight_shape(config_pdp, dense_input): model = add_compression_layers_torch(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) - assert model.submodule.hgq_weight.quantizer._i.shape == model.submodule.weight.shape - assert model.activation.hgq.quantizer._i.shape == (1, OUT_FEATURES) - - config_pdp.quantization_parameters.hgq_heterogeneous = False - layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) - layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) - model = TestModel2(layer, layer2, "relu", "tanh") - - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - post_pretrain_functions(model, config_pdp) - - assert model.submodule.hgq_weight.quantizer._i.shape == (1, 1) - assert model.activation.hgq.quantizer._i.shape == (1, 1) + assert model.submodule.weight_quantizer.quantizer.quantizer._i.shape == model.submodule.weight.shape + assert model.activation.quantizer.quantizer._i.shape == (1, OUT_FEATURES) def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): @@ -548,29 +544,29 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): if isinstance(m, (CompressedLayerBase)): assert m.i_weight == 0.0 assert m.i_bias == 0.0 - assert torch.all(m.hgq_weight.quantizer.i == 0.0) - assert torch.all(m.hgq_bias.quantizer.i == 0.0) + assert torch.all(m.weight_quantizer.quantizer.quantizer.i == 0.0) + assert torch.all(m.weight_quantizer.quantizer.quantizer.i == 0.0) assert m.f_weight == 7.0 assert m.f_bias == 7.0 - assert torch.all(m.hgq_weight.quantizer.f == 7.0) - assert torch.all(m.hgq_bias.quantizer.f == 7.0) + assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 7.0) + assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedTanh)): assert m.i == 0.0 assert m.f == 7.0 - assert torch.all(m.hgq.quantizer.i == 0.0) - assert torch.all(m.hgq.quantizer.f == 7.0) + assert torch.all(m.quantizer.quantizer.i == 0.0) + assert torch.all(m.quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedReLU)): assert m.i == 0.0 assert m.f == 8.0 - assert torch.all(m.hgq.quantizer.i == 0.0) - assert torch.all(m.hgq.quantizer.f == 8.0) + assert torch.all(m.quantizer.quantizer.i == 0.0) + assert torch.all(m.quantizer.quantizer.f == 8.0) elif isinstance(m, QuantizedPooling): assert m.i == 0.0 assert m.f == 7.0 - assert torch.all(m.hgq.quantizer.i == 0.0) - assert torch.all(m.hgq.quantizer.f == 7.0) + assert torch.all(m.quantizer.quantizer.quantizer.i == 0.0) + assert torch.all(m.quantizer.quantizer.quantizer.f == 7.0) config_pdp.quantization_parameters.layer_specific = { 'submodule': { @@ -589,28 +585,28 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): if isinstance(m, (CompressedLayerBase)): assert m.i_weight == 1.0 assert m.i_bias == 2.0 - assert torch.all(m.hgq_weight.quantizer.i == 1.0) - assert torch.all(m.hgq_bias.quantizer.i == 2.0) + assert torch.all(m.weight_quantizer.quantizer.quantizer.i == 1.0) + assert torch.all(m.bias_quantizer.quantizer.quantizer.i == 2.0) assert m.f_weight == 3.0 assert m.f_bias == 4.0 - assert torch.all(m.hgq_weight.quantizer.f == 3.0) - assert torch.all(m.hgq_bias.quantizer.f == 4.0) + assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 3.0) + assert torch.all(m.bias_quantizer.quantizer.quantizer.f == 4.0) elif isinstance(m, (QuantizedTanh)): assert m.i == 0.0 assert m.f == 3.0 - assert torch.all(m.hgq.quantizer.i == 0.0) - assert torch.all(m.hgq.quantizer.f == 3.0) + assert torch.all(m.quantizer.quantizer.i == 0.0) + assert torch.all(m.quantizer.quantizer.f == 3.0) elif isinstance(m, (QuantizedReLU)): assert m.i == 0.0 assert m.f == 4.0 - assert torch.all(m.hgq.quantizer.i == 0.0) - assert torch.all(m.hgq.quantizer.f == 4.0) + assert torch.all(m.quantizer.quantizer.i == 0.0) + assert torch.all(m.quantizer.quantizer.f == 4.0) elif isinstance(m, QuantizedPooling): assert m.i == 1.0 assert m.f == 3.0 - assert torch.all(m.hgq.quantizer.i == 1.0) - assert torch.all(m.hgq.quantizer.f == 3.0) + assert torch.all(m.quantizer.quantizer.quantizer.i == 1.0) + assert torch.all(m.quantizer.quantizer.quantizer.f == 3.0) def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): From 0167deada3e2c01ea658cc7b52618d8032268b05 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 29 Sep 2025 19:35:27 +0200 Subject: [PATCH 04/37] add file that handles quantizer creation --- src/pquant/core/quantizer_functions.py | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 src/pquant/core/quantizer_functions.py diff --git a/src/pquant/core/quantizer_functions.py b/src/pquant/core/quantizer_functions.py new file mode 100644 index 0000000..1776e3e --- /dev/null +++ b/src/pquant/core/quantizer_functions.py @@ -0,0 +1,43 @@ +def create_fixed_quantizer(k, i, f, overflow, round_mode): + from quantizers import get_fixed_quantizer + + quantizer = get_fixed_quantizer(round_mode=round_mode, overflow_mode=overflow) + return quantizer + + +def create_hgq_parameters_quantizer(k, i, f, overflow, round_mode): + from hgq.quantizer import Quantizer + + return Quantizer( + k0=k, + i0=i, + f0=f, + round_mode=round_mode, + overflow_mode=overflow, + q_type="kif", + homogeneous_axis=(), + ) + + +def create_hgq_data_quantizer(k, i, f, overflow, round_mode): + from hgq.quantizer import Quantizer + + return Quantizer( + k0=k, + i0=i, + f0=f, + round_mode=round_mode, + overflow_mode=overflow, + q_type="kif", + homogeneous_axis=(0,), + ) + + +def create_quantizer(k, i, f, overflow, round_mode, is_heterogeneous, is_data): + if is_heterogeneous: + if is_data: + return create_hgq_data_quantizer(k, i, f, overflow, round_mode) + else: + return create_hgq_parameters_quantizer(k, i, f, overflow, round_mode) + else: + return create_fixed_quantizer(k, i, f, overflow, round_mode) From 01d09b1c75096582f06e4814320617acf157c0fc Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 29 Sep 2025 20:00:46 +0200 Subject: [PATCH 05/37] force cuda in qbatchnorm --- src/pquant/core/torch_impl/compressed_layers_torch.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 8024290..fb87733 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -348,10 +348,9 @@ def __init__( momentum: typing.Optional[float] = 0.1, affine: bool = True, track_running_stats: bool = True, - device=None, dtype=None, ): - super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) + super().__init__(num_features, eps, momentum, affine, track_running_stats, device="cuda", dtype=dtype) self.f = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) self.i = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) self.overflow = config["quantization_parameters"]["overflow"] From 5f87f6d6ecb71fc33ae50fe0020f8479147d6d09 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 29 Sep 2025 20:04:40 +0200 Subject: [PATCH 06/37] no forcing cuda, but put model to device before example input --- src/pquant/core/torch_impl/compressed_layers_torch.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index fb87733..c7fe677 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -264,12 +264,13 @@ def forward(self, x): return x -def add_compression_layers_torch(model, config, input_shape): +def add_compression_layers_torch(model, config, input_shape, device="cuda"): model = add_quantized_activations_to_model_layer(model, config) # model = add_quantized_activations_to_model_functional(model, config) model = add_pruning_to_model(model, config) model = disable_pruning_from_layers(model, config) model = add_layer_specific_quantization_to_model(model, config) + model.to(device) model(torch.rand(input_shape, device=next(model.parameters()).device)) return model @@ -348,9 +349,10 @@ def __init__( momentum: typing.Optional[float] = 0.1, affine: bool = True, track_running_stats: bool = True, + device=None, dtype=None, ): - super().__init__(num_features, eps, momentum, affine, track_running_stats, device="cuda", dtype=dtype) + super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) self.f = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) self.i = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) self.overflow = config["quantization_parameters"]["overflow"] From 5b91d07a445a3708c2e0ba77a371eec532e0933a Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 29 Sep 2025 20:08:07 +0200 Subject: [PATCH 07/37] add PQBatchNorm into default config --- src/pquant/core/torch_impl/compressed_layers_torch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index c7fe677..61e1fd5 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -753,7 +753,7 @@ def create_default_layer_quantization_pruning_config(model): "bias": {"integer_bits": 0, "fractional_bits": 7}, } config["disable_pruning_for_layers"].append(name) - elif layer.__class__ in [nn.Tanh, nn.ReLU, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: + elif layer.__class__ in [nn.BatchNorm2d, nn.Tanh, nn.ReLU, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: config["layer_specific"][name] = {"integer_bits": 0, "fractional_bits": 7} return config From 145bcd32377647d9d248db339d21ae02ddb10dcb Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 13 Oct 2025 12:53:48 +0200 Subject: [PATCH 08/37] separate i and f bits for input output quantizers, separate default k values for data and weights --- src/pquant/__init__.py | 4 +- src/pquant/configs/config_ap.yaml | 8 +- src/pquant/configs/config_autosparse.yaml | 2 + src/pquant/configs/config_cs.yaml | 2 + src/pquant/configs/config_dst.yaml | 2 + src/pquant/configs/config_mdmm.yaml | 2 + src/pquant/configs/config_pdp.yaml | 6 +- src/pquant/configs/config_wanda.yaml | 2 +- src/pquant/core/activations_quantizer.py | 121 ++- src/pquant/core/compressed_layers.py | 18 +- .../core/tf_impl/compressed_layers_tf.py | 703 +++++++++--------- .../torch_impl/compressed_layers_torch.py | 456 +++++++----- tests/test_keras_compression_layers.py | 157 ++-- tests/test_torch_compression_layers.py | 153 ++-- 14 files changed, 922 insertions(+), 714 deletions(-) diff --git a/src/pquant/__init__.py b/src/pquant/__init__.py index c04302c..56f5c95 100644 --- a/src/pquant/__init__.py +++ b/src/pquant/__init__.py @@ -2,10 +2,10 @@ from .core.compressed_layers import ( add_compression_layers, add_default_layer_quantization_pruning_to_config, + apply_final_compression_model, get_layer_keep_ratio, get_model_losses, post_training_prune, - remove_pruning_from_model, ) from .core.train import iterative_train from .core.utils import get_default_config @@ -13,7 +13,7 @@ __all__ = [ "iterative_train", "add_compression_layers", - "remove_pruning_from_model", + "apply_final_compression_model", "get_model_losses", "get_default_config", "add_default_layer_quantization_pruning_to_config", diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index 9031b93..187b1f9 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -5,11 +5,13 @@ pruning_parameters: pruning_method: activation_pruning threshold: 0.15 threshold_decay: 0. - t_delta: 50 - t_start_collecting_batch: 50 + t_delta: 100 + t_start_collecting_batch: 100 quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. + default_data_keep_negatives: 0. + default_weight_keep_negatives: 1. enable_quantization: true hgq_gamma: 0.0003 hgq_heterogeneous: True @@ -31,7 +33,7 @@ fitcompress_parameters: approximate : true f_lambda : 1 training_parameters: - epochs: 100 + epochs: 200 fine_tuning_epochs: 0 pretraining_epochs: 0 pruning_first: false diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 623fa78..58ece98 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -13,6 +13,8 @@ pruning_parameters: quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. + default_data_keep_negatives: 0. + default_weight_keep_negatives: 1. enable_quantization: true hgq_gamma: 0.0003 hgq_heterogeneous: True diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index 070a7d7..ef8833a 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -9,6 +9,8 @@ pruning_parameters: quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. + default_data_keep_negatives: 0. + default_weight_keep_negatives: 1. enable_quantization: true hgq_gamma: 0.0003 hgq_heterogeneous: True diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index 9fbc0ff..3d05b77 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -11,6 +11,8 @@ pruning_parameters: quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. + default_data_keep_negatives: 0. + default_weight_keep_negatives: 1. enable_quantization: true hgq_gamma: 0.0003 hgq_heterogeneous: True diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index 7513efc..34ccaa8 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -20,6 +20,8 @@ quantization_parameters: enable_quantization: true default_integer_bits: 0. default_fractional_bits: 7. + default_data_keep_negatives: 0. + default_weight_keep_negatives: 1. hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index f91bbdc..ce306a4 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -4,13 +4,15 @@ pruning_parameters: enable_pruning: true epsilon: 0.015 pruning_method: pdp - sparsity: 0.4 + sparsity: 0.8 temperature: 1.0e-05 threshold_decay: 0. - structured_pruning: true + structured_pruning: false quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. + default_data_keep_negatives: 0. + default_weight_keep_negatives: 1. enable_quantization: true hgq_gamma: 0.0003 hgq_heterogeneous: True diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index f34f746..cf667df 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -36,7 +36,7 @@ fitcompress_parameters: training_parameters: epochs: 200 fine_tuning_epochs: 0 - pretraining_epochs: 0 + pretraining_epochs: 50 pruning_first: false rewind: never rounds: 1 diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py index 521919c..041b62a 100644 --- a/src/pquant/core/activations_quantizer.py +++ b/src/pquant/core/activations_quantizer.py @@ -7,15 +7,21 @@ @keras.saving.register_keras_serializable(package="PQuant") class QuantizedTanh(keras.layers.Layer): - def __init__(self, config, i, f, **kwargs): + def __init__(self, config, i_input=0.0, f_input=7.0, i_output=0.0, f_output=7.0, quantize_input=True, quantize_output=True + ): super().__init__() if isinstance(config, dict): from pquant.core.finetuning import TuningConfig config = TuningConfig.load_from_config(config) - self.i = convert_to_tensor(i) - self.f = convert_to_tensor(f) + self.i = convert_to_tensor(i_input) + self.f = convert_to_tensor(f_input) self.k = convert_to_tensor(1.0) + + self.i_output = convert_to_tensor(i_output) + self.f_output = convert_to_tensor(f_output) + self.k = convert_to_tensor(1.0) + self.config = config self.use_hgq = config.quantization_parameters.use_high_granularity_quantization @@ -24,13 +30,15 @@ def __init__(self, config, i, f, **kwargs): self.overflow = config.quantization_parameters.overflow self.use_real_tanh = config.quantization_parameters.use_real_tanh self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.quantize_input = quantize_input + self.quantize_output = quantize_output def build(self, input_shape): super().build(input_shape) - self.quantizer = create_quantizer( + self.output_quantizer = create_quantizer( k=self.k, - i=self.i, - f=self.f, + i=self.i_output, + f=self.f_output, overflow=self.overflow, round_mode=self.round_mode, is_data=True, @@ -38,19 +46,16 @@ def build(self, input_shape): ) self.input_quantizer = create_quantizer( k=self.k, - i=self.i, - f=self.f, + i=self.i_input, + f=self.f_input, overflow=self.overflow, round_mode=self.round_mode, is_data=True, is_heterogeneous=self.use_hgq, ) if self.use_hgq: - self.quantizer.build(input_shape) - - def set_bits(self, i, f): - self.i = convert_to_tensor(i) - self.f = convert_to_tensor(f) + self.input_quantizer.build(input_shape) + self.output_quantizer.build(input_shape) def hgq_loss(self): if self.is_pretraining: @@ -62,15 +67,27 @@ def hgq_loss(self): def post_pre_train_function(self): self.is_pretraining = False + def pre_activation(self, x): + if self.quantize_input: + if self.use_hgq: + x = self.input_quantizer(x) + else: + x = self.input_quantizer(x, k=self.k, i=self.i_input, f=self.f_input) + return x + + def post_activation(self, x): + if self.quantize_output: + if self.use_hgq: + return self.output_quantizer(x) + else: + return self.output_quantizer(x, k=self.k, i=self.i_input, f=self.f_output) + return x + def call(self, x): - if self.use_hgq: - x = self.input_quantizer(x) - else: - x = self.input_quantizer(x, k=self.k, i=self.i, f=self.f) + x = self.pre_activation(x) x = tanh(x) if self.use_real_tanh else hard_tanh(x) - if self.use_hgq: - return self.quantizer(x) - return self.quantizer(x, k=self.k, i=self.i, f=self.f) + x = self.post_activation(x) + return x def get_config(self): config = super().get_config() @@ -80,15 +97,21 @@ def get_config(self): @keras.saving.register_keras_serializable(package="PQuant") class QuantizedReLU(keras.layers.Layer): - def __init__(self, config, i, f, **kwargs): + def __init__( + self, config, i_input=0.0, f_input=8.0, i_output=0.0, f_output=8.0, quantize_input=True, quantize_output=True + ): super().__init__() if isinstance(config, dict): from pquant.core.finetuning import TuningConfig config = TuningConfig.load_from_config(config) self.config = config - self.i = convert_to_tensor(i) - self.f = convert_to_tensor(f) + self.i_input = convert_to_tensor(i_input) + self.f_input = convert_to_tensor(f_input) + self.k = convert_to_tensor(0.0) + + self.i_output = convert_to_tensor(i_output) + self.f_output = convert_to_tensor(f_output) self.k = convert_to_tensor(0.0) self.use_hgq = config.quantization_parameters.use_high_granularity_quantization @@ -102,29 +125,36 @@ def __init__(self, config, i, f, **kwargs): self.post_fitcompress_calibration = False self.saved_inputs = [] - + self.quantize_input = quantize_input + self.quantize_output = quantize_output def build(self, input_shape): super().build(input_shape) - self.quantizer = create_quantizer( + self.output_quantizer = create_quantizer( + k=self.k, + i=self.i_output, + f=self.f_output, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + ) + self.input_quantizer = create_quantizer( k=self.k, - i=self.i, - f=self.f, + i=self.i_input, + f=self.f_input, overflow=self.overflow, round_mode=self.round_mode, is_data=True, is_heterogeneous=self.use_hgq, ) if self.use_hgq: - self.quantizer.build(input_shape) + self.input_quantizer.build(input_shape) + self.output_quantizer.build(input_shape) if self.use_multiplier: self.multiplier = self.add_weight(shape=(1,), trainable=True, initializer=keras.initializers.Constant(-1.0)) - def set_bits(self, i, f): - self.i = convert_to_tensor(i) - self.f = convert_to_tensor(f) - def post_pre_train_function(self): self.is_pretraining = False @@ -135,6 +165,24 @@ def hgq_loss(self): ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f) ) * self.config.quantization_parameters.hgq_gamma + def pre_activation(self, x): + if self.quantize_input: + if self.use_hgq: + x = self.input_quantizer(x) + else: + x = self.input_quantizer(x, k=self.k, i=self.i_input, f=self.f_input) + if self.use_multiplier: + x = x * 2 ** (ops.stop_gradient(ops.round(self.multiplier) - self.multiplier) + self.multiplier) + return x + + def post_activation(self, x): + if self.quantize_output: + if self.use_hgq: + return self.output_quantizer(x) + else: + return self.output_quantizer(x, k=self.k, i=self.i_input, f=self.f_output) + return x + def call(self, x): if self.use_fitcompress and self.is_pretraining: if self.post_fitcompress_calibration: @@ -143,11 +191,10 @@ def call(self, x): # During FITcompress, we do not use any quantized activations return ops.relu(x) # Multiplier after fitcompress if condition, such that we don't use any relu multiplier during FITcompress search - if self.use_multiplier: - x = x * 2 ** (ops.stop_gradient(ops.round(self.multiplier) - self.multiplier) + self.multiplier) - if self.use_hgq: - return self.quantizer(x) - return self.quantizer(x, k=self.k, i=self.i, f=self.f) + x = self.pre_activation(x) + x = ops.relu(x) + x = self.post_activation(x) + return x def get_config(self): config = super().get_config() diff --git a/src/pquant/core/compressed_layers.py b/src/pquant/core/compressed_layers.py index f8bf81f..0f7a8be 100644 --- a/src/pquant/core/compressed_layers.py +++ b/src/pquant/core/compressed_layers.py @@ -55,27 +55,27 @@ def get_model_losses(model, losses): return get_model_losses_tf(model, losses) -def remove_pruning_from_model(model, config): +def apply_final_compression_model(model): if keras.backend.backend() == "torch": from pquant.core.torch_impl.compressed_layers_torch import ( - remove_pruning_from_model_torch, + apply_final_compression_torch, ) - return remove_pruning_from_model_torch(model, config) + return apply_final_compression_torch(model) else: from pquant.core.tf_impl.compressed_layers_tf import ( - remove_pruning_from_model_tf, + apply_final_compression_tf, ) - return remove_pruning_from_model_tf(model, config) + return apply_final_compression_tf(model) def post_training_prune(model, calibration_data, config): if keras.backend.backend() == "torch": from pquant.core.torch_impl.compressed_layers_torch import ( add_compression_layers_torch, + apply_final_compression_torch, post_pretrain_functions, - remove_pruning_from_model_torch, ) t_delta = config.pruning_parameters.t_delta @@ -86,12 +86,12 @@ def post_training_prune(model, calibration_data, config): model = add_compression_layers_torch(model, config, inputs.shape) post_pretrain_functions(model, config) model(inputs) - return remove_pruning_from_model_torch(model, config) + return apply_final_compression_torch(model) else: from pquant.core.tf_impl.compressed_layers_tf import ( add_compression_layers_tf, + apply_final_compression_tf, post_pretrain_functions, - remove_pruning_from_model_tf, ) t_delta = config.pruning_parameters.t_delta @@ -103,4 +103,4 @@ def post_training_prune(model, calibration_data, config): model = add_compression_layers_tf(model, config, inputs.shape) post_pretrain_functions(model, config) model(inputs, training=True) # True so pruning works - return remove_pruning_from_model_tf(model, config) + return apply_final_compression_tf(model, config) diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 0a2ce3e..815f423 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -1,5 +1,4 @@ import keras -from hgq.quantizer import Quantizer from keras import ops from keras.layers import ( Activation, @@ -15,18 +14,19 @@ ReLU, SeparableConv2D, ) -from quantizers import get_fixed_quantizer from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.quantizer_functions import create_quantizer from pquant.core.utils import get_pruning_layer -class CompressedLayerBase(keras.layers.Layer): - def __init__(self, config, layer_type, use_input_quantizer=False, use_output_quantizer=False): +class PQWeightBiasBase(keras.layers.Layer): + def __init__(self, config, layer_type, quantize_input=True, quantize_output=False): super().__init__() i_bits = config.quantization_parameters.default_integer_bits f_bits = config.quantization_parameters.default_fractional_bits + self.data_k = config.quantization_parameters.default_data_keep_negatives + self.weight_k = config.quantization_parameters.default_weight_keep_negatives self.i_weight = ops.convert_to_tensor(i_bits) self.f_weight = ops.convert_to_tensor(f_bits) self.i_bias = ops.convert_to_tensor(i_bits) @@ -40,28 +40,22 @@ def __init__(self, config, layer_type, use_input_quantizer=False, use_output_qua self.round_mode = config.quantization_parameters.round_mode self.overflow = config.quantization_parameters.overflow self.hgq_gamma = config.quantization_parameters.hgq_gamma - self.pruning_first = config.training_parameters.pruning_first self.enable_quantization = config.quantization_parameters.enable_quantization self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.enable_pruning = config.pruning_parameters.enable_pruning - + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.final_compression_done = False self.do_transpose_data = None self.weight_transpose = None self.data_transpose = None - self.use_input_quantizer = use_input_quantizer - self.use_output_quantizer = use_output_quantizer - - def set_quantization_bits(self, i_bits_w, f_bits_w, i_bits_b, f_bits_b): - self.i_weight = ops.convert_to_tensor(i_bits_w) - self.f_weight = ops.convert_to_tensor(f_bits_w) - self.i_bias = ops.convert_to_tensor(i_bits_b) - self.f_bias = ops.convert_to_tensor(f_bits_b) + self.quantize_input = quantize_input + self.quantize_output = quantize_output def set_input_output_quantization(self, input_quantization, output_quantization): - self.use_input_quantizer = input_quantization - self.use_output_quantizer = output_quantization + self.quantize_input = input_quantization + self.quantize_output = output_quantization def set_input_output_quantization_bits(self, i_input, f_input, i_output, f_output): self.i_input = i_input @@ -75,7 +69,7 @@ def set_enable_pruning(self, enable_pruning): def build(self, input_shape): super().build(input_shape) self.weight_quantizer = create_quantizer( - k=ops.convert_to_tensor(1.0), + k=ops.convert_to_tensor(self.weight_k), i=self.i_weight, f=self.f_weight, overflow=self.overflow, @@ -85,7 +79,7 @@ def build(self, input_shape): ) if self.use_bias: self.bias_quantizer = create_quantizer( - k=ops.convert_to_tensor(1.0), + k=ops.convert_to_tensor(self.weight_k), i=self.i_bias, f=self.f_bias, overflow=self.overflow, @@ -93,6 +87,33 @@ def build(self, input_shape): is_heterogeneous=self.use_hgq, is_data=False, ) + if self.quantize_input: + self.input_quantizer = create_quantizer( + k=self.data_k, + i=self.i_input, + f=self.f_input, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + ) + if self.quantize_output: + self.output_quantizer = create_quantizer( + k=self.data_k, + i=self.i_output, + f=self.f_output, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + ) + + def apply_final_compression(self): + weight, bias = self.prune_and_quantize(self.weight, self.bias) + self.weight.assign(weight) + if self.bias is not None: + self.bias.assign(bias) + self.final_compression_done = True def save_weights(self): self.init_weight = self.weight.value @@ -137,6 +158,8 @@ def prune(self, weight): return weight def prune_and_quantize(self, weight, bias): + if self.final_compression_done: + return weight, bias weight = ops.cast(weight, weight.dtype) bias = ops.cast(bias, bias.dtype) if bias is not None else None if self.pruning_first: @@ -150,6 +173,33 @@ def prune_and_quantize(self, weight, bias): def call(self, x): return x + def pre_forward(self, weight, bias, x, training=None): + if self.quantize_input: + if self.use_hgq and not self.input_quantizer.quantizer.built: + self.input_quantizer.quantizer.build(x.shape) + if not self.pruning_layer.is_pretraining and not self.use_fitcompress: + if self.use_hgq: + x = self.input_quantizer(x) + else: + x = self.input_quantizer(x, k=self.data_k, i=self.i_input, f=self.f_input) + if self.pruning_method == "wanda": + self.collect_input(x, self.weight, training) + weight, bias = self.prune_and_quantize(weight, bias) + return weight, bias, x + + def post_forward(self, x, training=None): + if self.quantize_output: + if self.use_hgq and not self.output_quantizer.quantizer.built: + self.output_quantizer.quantizer.build(x.shape) + if not self.pruning_layer.is_pretraining and not self.use_fitcompress: + if self.use_hgq: + x = self.output_quantizer(x) + else: + x = self.output_quantizer(x, k=self.data_k, i=self.i_output, f=self.f_output) + if self.pruning_method == "activation_pruning": + self.collect_output(x, training) + return x + def collect_input(self, x, weight, training): collect_x = self.handle_transpose(x, self.data_transpose, self.do_transpose_data) weight_channels_first = self.handle_transpose(weight, self.weight_transpose, True) @@ -160,9 +210,9 @@ def collect_output(self, x, training): self.pruning_layer.collect_output(collect_x, training) -class CompressedLayerDepthwiseConv2dKeras(CompressedLayerBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer_type) +class PQDepthwiseConv2d(PQWeightBiasBase): + def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=True): + super().__init__(config, layer_type, quantize_input, quantize_output) self.depthwise_regularizer = layer.depthwise_regularizer self.use_bias = layer.use_bias self.strides = layer.strides @@ -190,20 +240,17 @@ def build(self, input_shape): super().build(input_shape) def call(self, x, training=None): - weight, bias = self.prune_and_quantize(self.weight, self.bias) - if self.pruning_method == "wanda": - self.collect_input(x, weight, training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) x = ops.depthwise_conv( x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate ) - if self.pruning_method == "activation_pruning": - self.collect_output(x, training) + x = self.post_forward(x, training) return x -class CompressedLayerConv2dKeras(CompressedLayerBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer_type) +class PQConv2d(PQWeightBiasBase): + def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): + super().__init__(config, layer_type, quantize_input, quantize_output) self.kernel_regularizer = layer.kernel_regularizer self.filters = layer.filters self.use_bias = layer.use_bias @@ -234,21 +281,18 @@ def build(self, input_shape): super().build(input_shape) def call(self, x, training=None): - weight, bias = self.prune_and_quantize(self.weight, self.bias) - if self.pruning_method == "wanda": - self.collect_input(x, weight, training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) x = ops.conv( x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate ) if self.bias is not None: x = ops.add(x, bias) - if self.pruning_method == "activation_pruning": - self.collect_output(x, training) + x = self.post_forward(x, training) return x -class CompressedLayerSeparableConv2dKeras(Layer): - def __init__(self, config, layer): +class PQSeparableConv2d(Layer): + def __init__(self, config, layer, quantize_input=True, quantize_output=True): super().__init__() self.weight_transpose = (3, 2, 0, 1) self.weight_transpose_back = (2, 3, 1, 0) @@ -256,26 +300,30 @@ def __init__(self, config, layer): layer.kernel = layer.depthwise_kernel bias = layer.use_bias layer.use_bias = False - self.depthwise_conv = CompressedLayerDepthwiseConv2dKeras(config, layer, "conv") + self.depthwise_conv = PQDepthwiseConv2d(config, layer, "conv", quantize_input, False) layer.kernel_regularizer = layer.pointwise_regularizer layer.kernel_size = 1 layer.kernel = layer.pointwise_kernel layer.use_bias = bias - self.pointwise_conv = CompressedLayerConv2dKeras(config, layer, "conv") + self.pointwise_conv = PQConv2d(config, layer, "conv", False, quantize_output) self.do_transpose_data = layer.data_format == "channels_last" def build(self, input_shape): super().build(input_shape) + def apply_final_compression(self): + self.depthwise_conv.apply_final_compression() + self.pointwise_conv.apply_final_compression() + def call(self, x, training=None): x = self.depthwise_conv(x, training=training) x = self.pointwise_conv(x, training=training) return x -class CompressedLayerConv1dKeras(CompressedLayerBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer_type) +class CompressedLayerConv1dKeras(PQWeightBiasBase): + def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): + super().__init__(config, layer_type, quantize_input, quantize_output) self.kernel_regularizer = layer.kernel_regularizer self.filters = layer.filters self.use_bias = layer.use_bias @@ -305,20 +353,17 @@ def build(self, input_shape): super().build(input_shape) def call(self, x, training=None): - weight, bias = self.prune_and_quantize(self.weight, self.bias) - if self.pruning_method == "wanda": - self.collect_input(x, weight, training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) x = ops.conv( x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate ) if self.bias is not None: x = ops.add(x, bias) - if self.pruning_method == "activation_pruning": - self.collect_output(x, training) + x = self.post_forward(x, training) return x -class CompressedLayerDenseKeras(CompressedLayerBase): +class CompressedLayerDenseKeras(PQWeightBiasBase): def __init__(self, config, layer, layer_type): super().__init__(config, layer_type) self.kernel_regularizer = layer.kernel_regularizer @@ -344,14 +389,11 @@ def build(self, input_shape): super().build(input_shape) def call(self, x, training=None): - weight, bias = self.prune_and_quantize(self.weight, self.bias) - if self.pruning_method == "wanda": - self.collect_input(x, weight, training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) x = ops.matmul(x, weight) if self.bias is not None: x = ops.add(x, bias) - if self.pruning_method == "activation_pruning": - self.collect_output(x, training) + x = self.post_forward(x, training) return x @@ -374,6 +416,7 @@ def __init__( beta_constraint=None, gamma_constraint=None, synchronized=False, + quantize_input=True, **kwargs, ): super().__init__( @@ -395,25 +438,50 @@ def __init__( ) self.overflow = config["quantization_parameters"]["overflow"] self.round_mode = config["quantization_parameters"]["round_mode"] + self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] + self.data_k = config["quantization_parameters"]["default_data_keep_negatives"] + self.weight_k = config["quantization_parameters"]["default_weight_keep_negatives"] + self.enable_quantization = config["quantization_parameters"]["enable_quantization"] + self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] + self.quantize_input = quantize_input self.config = config - self.f = ops.convert_to_tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i = ops.convert_to_tensor(config["quantization_parameters"]["default_integer_bits"]) + self.f_input = self.f_weight = ops.convert_to_tensor(config["quantization_parameters"]["default_fractional_bits"]) + self.i_input = self.i_weight = ops.convert_to_tensor(config["quantization_parameters"]["default_integer_bits"]) + self.final_compression_done = False def build(self, input_shape): super().build(input_shape) self.parameter_quantizer = create_quantizer( - k=1.0, - i=self.i, - f=self.f, + k=self.weight_k, + i=self.i_weight, + f=self.f_weight, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + ) + self.input_quantizer = create_quantizer( + k=self.data_k, + i=self.i_input, + f=self.f_input, overflow=self.overflow, round_mode=self.round_mode, - is_heterogeneous=False, + is_heterogeneous=self.use_hgq, is_data=True, ) - def set_bits(self, i, f): - self.i = ops.convert_to_tensor(i) - self.f = ops.convert_to_tensor(f) + def apply_final_compression(self): + gamma, beta = self.gamma, self.beta + if self.enable_quantization: + if self.use_hgq: + gamma = self.parameter_quantizer(gamma) + beta = self.parameter_quantizer(beta) + else: + gamma = self.parameter_quantizer(self.gamma, k=self.data_k, i=self.i_weight, f=self.f_weight) + beta = self.parameter_quantizer(self.beta, k=self.data_k, i=self.i_weight, f=self.f_weight) + self.gamma.assign(gamma) + self.beta.assign(beta) + self.final_compression_done = True def call(self, inputs, training=None, mask=None): # Check if the mask has one less dimension than the inputs. @@ -430,6 +498,11 @@ def call(self, inputs, training=None, mask=None): # BN is prone to overflow with float16/bfloat16 inputs, so we upcast to # float32 for the subsequent computations. inputs = ops.cast(inputs, compute_dtype) + if self.quantize_input and self.enable_quantization: + if self.use_hgq: + inputs = self.input_quantizer(inputs) + else: + inputs = self.input_quantizer(inputs, k=self.data_k, i=self.i_input, f=self.f_input) moving_mean = ops.cast(self.moving_mean, inputs.dtype) moving_variance = ops.cast(self.moving_variance, inputs.dtype) @@ -444,13 +517,15 @@ def call(self, inputs, training=None, mask=None): variance = moving_variance if self.scale: - gamma = self.parameter_quantizer(self.gamma, k=1.0, i=self.i, f=self.f) + if self.enable_quantization and not self.final_compression_done: + gamma = self.parameter_quantizer(self.gamma, k=1.0, i=self.i, f=self.f) gamma = ops.cast(gamma, inputs.dtype) else: gamma = None if self.center: - beta = self.parameter_quantizer(self.beta, k=1.0, i=self.i, f=self.f) + if self.enable_quantization and not self.final_compression_done: + beta = self.parameter_quantizer(self.beta, k=self.weight_k, i=self.i, f=self.f) beta = ops.cast(beta, inputs.dtype) else: beta = None @@ -468,7 +543,7 @@ def call(self, inputs, training=None, mask=None): class QuantizedPooling(keras.layers.Layer): - def __init__(self, config, layer): + def __init__(self, config, layer, quantize_input=True): super().__init__() self.i = ops.convert_to_tensor(config.quantization_parameters.default_integer_bits) self.f = ops.convert_to_tensor(config.quantization_parameters.default_fractional_bits) @@ -477,76 +552,53 @@ def __init__(self, config, layer): self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" self.hgq_gamma = config.quantization_parameters.hgq_gamma - - self.use_high_granularity_quantization = config.quantization_parameters.use_high_granularity_quantization - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.data_k = config.quantization_parameters.default_data_keep_negatives + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization + self.hgq_heterogeneous = config.hgq_heterogeneous + self.enable_quantization = config.quantization_parameters.enable_quantization + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow self.pool_size = layer.pool_size self.strides = layer.strides self.padding = layer.padding self.data_format = layer.data_format + self.quantize_input = quantize_input self.dimensions = layer.__class__.__name__[-2] def post_pre_train_function(self): self.is_pretraining = False - def set_quantization_bits(self, i_bits, f_bits): - self.i = ops.convert_to_tensor(i_bits) - self.f = ops.convert_to_tensor(f_bits) - def build(self, input_shape): super().build(input_shape) - if self.use_high_granularity_quantization: - if self.hgq_heterogeneous: - self.hgq = Quantizer( - k0=1.0, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - homogeneous_axis=(0,), - ) - self.hgq.build(input_shape) - else: - self.hgq = Quantizer( - k0=1.0, - i0=self.i, - f0=self.f, - round_mode="RND", - overflow_mode=self.overflow, - q_type="kif", - heterogeneous_axis=(), - ) - self.hgq.build(input_shape) - - self.hgq_gamma = self.hgq_gamma - else: - self.quantizer = get_fixed_quantizer(round_mode="RND", overflow_mode=self.overflow) + self.input_quantizer = create_quantizer( + k=self.data_k, + i=self.i_input, + f=self.f_input, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + ) + self.hgq_gamma = self.hgq_gamma def hgq_loss(self): - if self.is_pretraining: + if self.is_pretraining or not self.use_hgq: return 0.0 - loss = (ops.sum(self.hgq_weight.quantizer.i) + ops.sum(self.hgq_weight.quantizer.f)) * self.hgq_gamma - if self.bias is not None: - loss += (ops.sum(self.hgq_bias.quantizer.i) + ops.sum(self.hgq_bias.quantizer.f)) * self.hgq_gamma - return loss - - def quantize_i(self, x): - if self.use_high_granularity_quantization: - x = self.hgq(x) - else: - x = self.quantizer(x, k=ops.convert_to_tensor(1.0), i=self.i, f=self.f, training=True) - return x + return (ops.sum(self.input_quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.f)) * self.hgq_gamma def call(self, x): - x = ops.average_pool( + if self.quantize_input and self.enable_quantization: + if self.use_hgq: + x = self.input_quantizer(x) + else: + x = self.input_quantizer(x, k=self.data_k, i=self.i_input, f=self.f_input) + return ops.average_pool( x, pool_size=self.pool_size, strides=self.strides, padding=self.padding, data_format=self.data_format, ) - return self.quantize_i(x) def get_config(self): config = super().get_config() @@ -573,88 +625,12 @@ def call_post_round_functions(model, rewind, rounds, r): post_round_functions(model) -def _prune_and_quantize_layer(layer, use_bias): - layer_weights = layer.get_weights() - layer_weight = ops.cast(layer_weights[0], layer_weights[0].dtype) - - layer_bias = ops.cast(layer_weights[1], layer_weights[1].dtype) if use_bias else None - weight, bias = layer.prune_and_quantize(layer_weight, layer_bias) - return weight, bias - - -def remove_pruning_from_model_tf(model, config): +def apply_final_compression_tf(model): x = model.layers[0].output for layer in model.layers[1:]: - if isinstance(layer, CompressedLayerDepthwiseConv2dKeras): - new_layer = DepthwiseConv2D( - kernel_size=layer.kernel_size, - strides=layer.strides, - padding=layer.padding, - dilation_rate=layer.dilation_rate, - use_bias=layer.use_bias, - depthwise_regularizer=layer.depthwise_regularizer, - activity_regularizer=layer.activity_regularizer, - ) - x = new_layer(x) - use_bias = layer.use_bias - weight, bias = _prune_and_quantize_layer(layer, use_bias) - new_layer.set_weights([weight, bias] if use_bias else [weight]) - elif isinstance(layer, CompressedLayerConv2dKeras): - new_layer = Conv2D( - filters=layer.filters, - kernel_size=layer.kernel_size, - strides=layer.strides, - padding=layer.padding, - dilation_rate=layer.dilation_rate, - use_bias=layer.use_bias, - kernel_regularizer=layer.kernel_regularizer, - activity_regularizer=layer.activity_regularizer, - ) - x = new_layer(x) - use_bias = layer.use_bias - weight, bias = _prune_and_quantize_layer(layer, use_bias) - new_layer.set_weights([weight, bias] if use_bias else [weight]) - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): - new_layer = SeparableConv2D( - filters=layer.pointwise_conv.filters, - kernel_size=layer.depthwise_conv.kernel_size, - strides=layer.depthwise_conv.strides, - padding=layer.depthwise_conv.padding, - dilation_rate=layer.depthwise_conv.dilation_rate, - use_bias=layer.pointwise_conv.use_bias, - depthwise_regularizer=layer.depthwise_conv.depthwise_regularizer, - pointwise_regularizer=layer.pointwise_conv.kernel_regularizer, - activity_regularizer=layer.activity_regularizer, - ) - x = new_layer(x) - use_bias = layer.pointwise_conv.use_bias - depthwise_weight, _ = _prune_and_quantize_layer(layer.depthwise_conv, False) - pointwise_weight, bias = _prune_and_quantize_layer(layer.pointwise_conv, layer.pointwise_conv.use_bias) - new_layer.set_weights( - [depthwise_weight, pointwise_weight, bias] if use_bias else [depthwise_weight, pointwise_weight] - ) - - elif isinstance(layer, CompressedLayerConv1dKeras): - new_layer = Conv1D( - filters=layer.filters, - kernel_size=layer.kernel_size, - strides=layer.strides, - padding=layer.padding, - dilation_rate=layer.dilation_rate, - use_bias=layer.use_bias, - kernel_regularizer=layer.kernel_regularizer, - activity_regularizer=layer.activity_regularizer, - ) - x = new_layer(x) - use_bias = layer.use_bias - weight, bias = _prune_and_quantize_layer(layer, use_bias) - new_layer.set_weights([weight, bias] if use_bias else [weight]) - elif isinstance(layer, CompressedLayerDenseKeras): - new_layer = Dense(units=layer.units, use_bias=layer.use_bias, kernel_regularizer=layer.kernel_regularizer) - x = new_layer(x) - use_bias = new_layer.use_bias - weight, bias = _prune_and_quantize_layer(layer, use_bias) - new_layer.set_weights([weight, bias] if use_bias else [weight]) + if isinstance(layer, (PQWeightBiasBase, PQSeparableConv2d, PQBatchNormalization)): + layer.apply_final_compression() + x = layer(x) else: x = layer(x) replaced_model = keras.Model(inputs=model.inputs, outputs=x) @@ -666,14 +642,14 @@ def post_epoch_functions(model, epoch, total_epochs, **kwargs): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.pruning_layer.post_epoch_function(epoch, total_epochs, **kwargs) - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.post_epoch_function(epoch, total_epochs, **kwargs) layer.pointwise_conv.pruning_layer.post_epoch_function(epoch, total_epochs, **kwargs) @@ -683,14 +659,14 @@ def pre_epoch_functions(model, epoch, total_epochs): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.pruning_layer.pre_epoch_function(epoch, total_epochs) - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.pre_epoch_function(epoch, total_epochs) layer.pointwise_conv.pruning_layer.pre_epoch_function(epoch, total_epochs) @@ -700,14 +676,14 @@ def post_round_functions(model): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.pruning_layer.post_round_function() - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.post_round_function() layer.pointwise_conv.pruning_layer.post_round_function() @@ -717,14 +693,14 @@ def save_weights_functions(model): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.save_weights() - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.save_weights() layer.pointwise_conv.save_weights() @@ -734,14 +710,14 @@ def rewind_weights_functions(model): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.rewind_weights() - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.rewind_weights() layer.pointwise_conv.rewind_weights() @@ -751,14 +727,14 @@ def pre_finetune_functions(model): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.pruning_layer.pre_finetune_function() - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.pre_finetune_function() layer.pointwise_conv.pruning_layer.pre_finetune_function() @@ -768,14 +744,14 @@ def post_pretrain_functions(model, config): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), ): layer.pruning_layer.post_pre_train_function() - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.post_pre_train_function() layer.pointwise_conv.pruning_layer.post_pre_train_function() elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): @@ -796,8 +772,8 @@ def pdp_setup(model, config): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), @@ -806,7 +782,7 @@ def pdp_setup(model, config): global_weights = ops.ravel(layer.weight) else: global_weights = ops.concatenate((global_weights, ops.ravel(layer.weight))) - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): if global_weights is None: global_weights = ops.ravel(layer.depthwise_conv.weight) global_weights = ops.concatenate((global_weights, ops.ravel(layer.pointwise_conv.weight))) @@ -823,8 +799,8 @@ def pdp_setup(model, config): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), @@ -834,7 +810,7 @@ def pdp_setup(model, config): layer.pruning_layer.init_r = ops.convert_to_tensor(w / weight_size, dtype=layer.weight.dtype) layer.pruning_layer.sparsity = ops.convert_to_tensor(w / weight_size, dtype=layer.weight.dtype) # Wanda idx += weight_size - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): weight_size = ops.size(layer.depthwise_conv.weight) w = ops.sum(global_weights_below_threshold[idx : idx + weight_size]) layer.depthwise_conv.pruning_layer.init_r = ops.convert_to_tensor( @@ -863,8 +839,8 @@ def get_layer_keep_ratio_tf(model): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), @@ -881,7 +857,7 @@ def get_layer_keep_ratio_tf(model): total_w += ops.size(weight) rem = ops.count_nonzero(weight) remaining_weights += rem - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): depthwise_weight = ops.cast(layer.depthwise_conv.weight, layer.depthwise_conv.weight.dtype) pointwise_weight = ops.cast(layer.pointwise_conv.weight, layer.pointwise_conv.weight.dtype) bias = ( @@ -931,8 +907,8 @@ def get_model_losses_tf(model, losses): if isinstance( layer, ( - CompressedLayerDepthwiseConv2dKeras, - CompressedLayerConv2dKeras, + PQDepthwiseConv2d, + PQConv2d, CompressedLayerConv1dKeras, CompressedLayerDenseKeras, ), @@ -941,7 +917,7 @@ def get_model_losses_tf(model, losses): if layer.enable_quantization and layer.use_hgq: loss += layer.hgq_loss() losses += loss - elif isinstance(layer, CompressedLayerSeparableConv2dKeras): + elif isinstance(layer, PQSeparableConv2d): loss = layer.depthwise_conv.pruning_layer.calculate_additional_loss() loss += layer.pointwise_conv.pruning_layer.calculate_additional_loss() if layer.enable_quantization and layer.use_high_granularity_quantization: @@ -963,12 +939,16 @@ def check_activation(layer, config): act = None if hasattr(layer.activation, "__name__"): if layer.activation.__name__ == "relu": - i_bits, f_bits = get_quantization_bits_activations(config, layer) - act = QuantizedReLU(config, i_bits, f_bits) if quantization_enabled else ReLU() + + act = QuantizedReLU(config) if quantization_enabled else ReLU() + if quantization_enabled: + get_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) elif layer.activation.__name__ == "tanh": - i_bits, f_bits = get_quantization_bits_activations(config, layer) - act = QuantizedTanh(config, i=i_bits, f=f_bits) if quantization_enabled else Activation(activation="tanh") + act = QuantizedTanh(config) if quantization_enabled else Activation(activation="tanh") + if quantization_enabled: + get_quantization_bits_activations(config, layer, act) + act.build(layer.input.shape) else: act = None return act @@ -981,14 +961,9 @@ def add_compression_layers_tf(model, config, input_shape=None): for layer in model.layers[1:]: act = None if isinstance(layer, DepthwiseConv2D): - new_layer = CompressedLayerDepthwiseConv2dKeras(config, layer, layer_type="conv") - i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) - new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) - i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( - config, layer - ) - new_layer.set_input_output_quantization(quantize_input, quantize_output) - new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) + new_layer = PQDepthwiseConv2d(config, layer, layer_type="conv") + set_quantization_bits_weight_layers(config, layer, new_layer) + enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -999,14 +974,8 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Conv2D): - new_layer = CompressedLayerConv2dKeras(config, layer, layer_type="conv") - i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) - new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) - i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( - config, layer - ) - new_layer.set_input_output_quantization(quantize_input, quantize_output) - new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) + new_layer = PQConv2d(config, layer, layer_type="conv") + set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -1016,19 +985,9 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, SeparableConv2D): - new_layer = CompressedLayerSeparableConv2dKeras(config, layer) - dw_i_bits_w, dw_f_bits_w, pw_i_bits_w, pw_f_bits_w, pw_i_bits_b, pw_f_bits_b = ( - get_quantization_bits_weights_biases(config, layer) - ) - new_layer.depthwise_conv.set_quantization_bits(dw_i_bits_w, dw_f_bits_w, pw_i_bits_b, pw_f_bits_b) - new_layer.pointwise_conv.set_quantization_bits(pw_i_bits_w, pw_f_bits_w, pw_i_bits_b, pw_f_bits_b) - i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( - config, layer - ) - new_layer.depthwise_conv.set_input_output_quantization(quantize_input, False) - new_layer.pointwise_conv.set_input_output_quantization(False, quantize_output) - new_layer.depthwise_conv.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) - new_layer.pointwise_conv.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) + new_layer = PQSeparableConv2d(config, layer) + set_quantization_bits_weight_layers(config, layer, new_layer) + enable_pruning_depthwise, enable_pruning_pointwise = get_enable_pruning(layer, config) new_layer.depthwise_conv.set_enable_pruning(enable_pruning_depthwise) new_layer.pointwise_conv.set_enable_pruning(enable_pruning_pointwise) @@ -1049,13 +1008,7 @@ def add_compression_layers_tf(model, config, input_shape=None): act = check_activation(layer, config) elif isinstance(layer, Conv1D): new_layer = CompressedLayerConv1dKeras(config, layer, layer_type="conv") - i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) - new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) - i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( - config, layer - ) - new_layer.set_input_output_quantization(quantize_input, quantize_output) - new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) + set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -1067,13 +1020,7 @@ def add_compression_layers_tf(model, config, input_shape=None): act = check_activation(layer, config) elif isinstance(layer, Dense): new_layer = CompressedLayerDenseKeras(config, layer, layer_type="linear") - i_bits_w, f_bits_w, i_bits_b, f_bits_b = get_quantization_bits_weights_biases(config, layer) - new_layer.set_quantization_bits(i_bits_w, f_bits_w, i_bits_b, f_bits_b) - i_input, f_input, i_output, f_output, quantize_input, quantize_output = get_quantization_bits_inputs_outputs( - config, layer - ) - new_layer.set_input_output_quantization(quantize_input, quantize_output) - new_layer.set_input_output_quantization_bits(i_input, f_input, i_output, f_output) + set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) pruning_layer_input = layer.kernel @@ -1084,12 +1031,9 @@ def add_compression_layers_tf(model, config, input_shape=None): act = check_activation(layer, config) # Activation layers elif isinstance(layer, ReLU): - if config.quantization_parameters.enable_quantization: - i_bits = config.quantization_parameters.default_integer_bits - f_bits = config.quantization_parameters.default_fractional_bits - i_bits, f_bits = get_quantization_bits_activations(config, layer) - - new_layer = QuantizedReLU(config, i_bits, f_bits) + if config["quantization_parameters"]["enable_quantization"]: + new_layer = QuantizedReLU(config) + get_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.input.shape) x = new_layer(x) @@ -1102,15 +1046,13 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) elif isinstance(layer, (AveragePooling1D, AveragePooling2D, AveragePooling3D)): if config.quantization_parameters.enable_quantization: - i_bits, f_bits = get_quantization_bits_activations(config, layer) new_layer = QuantizedPooling(config, layer) - new_layer.set_quantization_bits(i_bits, f_bits) + get_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.output.shape) x = new_layer(x) elif isinstance(layer, (BatchNormalization)): if config["quantization_parameters"]["enable_quantization"]: - i_bits, f_bits = get_quantization_bits_activations(config, layer) new_layer = PQBatchNormalization( config, layer.axis, @@ -1127,8 +1069,9 @@ def add_compression_layers_tf(model, config, input_shape=None): layer.beta_constraint, layer.gamma_constraint, layer.synchronized, + quantize_input=True, ) - new_layer.set_bits(i_bits, f_bits) + get_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.output.shape) x = new_layer(x) else: @@ -1141,70 +1084,113 @@ def add_compression_layers_tf(model, config, input_shape=None): return replaced_model -def get_quantization_bits_activations(config, layer): - i_bits = config.quantization_parameters.default_integer_bits - f_bits = config.quantization_parameters.default_fractional_bits +def get_quantization_bits_activations(config, layer, new_layer): + i_input = i_output = config.quantization_parameters.default_integer_bits + f_input = f_output = config.quantization_parameters.default_fractional_bits if isinstance(layer, ReLU): - f_bits += 1 # Unsigned, add 1 bit to default value only - layer_specific = config.quantization_parameters.layer_specific - if layer.name in layer_specific: - if hasattr(layer, "activation") and layer.activation.__name__ in layer_specific[layer.name]: - i_bits = layer_specific[layer.name][layer.activation.__name__]["integer_bits"] - f_bits = layer_specific[layer.name][layer.activation.__name__]["fractional_bits"] + f_input += 1 + f_output += 1 # Unsigned, add 1 bit to default value only + if layer.name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[layer.name] + if hasattr(layer, "activation") and layer.activation.__name__ in layer_config: + if "input" in layer_config[layer.activation.__name__]: + if "integer_bits" in layer_config[layer.activation.__name__]["input"]: + i_input = layer_config[layer.activation.__name__]["input"]["integer_bits"] + if "integer_bits" in layer_config[layer.activation.__name__]["input"]: + f_input = layer_config[layer.activation.__name__]["input"]["fractional_bits"] + if "quantize" in layer_config[layer.activation.__name__]["input"]: + new_layer.quantize_input = layer_config[layer.activation.__name__]["input"]["quantize"] + if "output" in layer_config[layer.activation.__name__]: + if "integer_bits" in layer_config[layer.activation.__name__]["output"]: + i_output = layer_config[layer.activation.__name__]["output"]["integer_bits"] + if "fractional_bits" in layer_config[layer.activation.__name__]["output"]: + f_output = layer_config[layer.activation.__name__]["output"]["fractional_bits"] + if "quantize" in layer_config[layer.activation.__name__]["output"]: + new_layer.quantize_output = layer_config[layer.activation.__name__]["output"]["quantize"] else: - i_bits = layer_specific[layer.name]["integer_bits"] - f_bits = layer_specific[layer.name]["fractional_bits"] - return i_bits, f_bits - - -def get_quantization_bits_weights_biases(config, layer): - layer_specific = config.quantization_parameters.layer_specific + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + i_input = layer_config["input"]["integer_bits"] + if "fractional_bits" in layer_config["input"]: + f_input = layer_config["input"]["fractional_bits"] + if "quantize" in layer_config["input"]: + new_layer.quantize_input = layer_config["input"]["quantize"] + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + i_output = layer_config["output"]["integer_bits"] + if "fractional_bits" in layer_config["output"]: + f_output = layer_config["output"]["fractional_bits"] + if "quantize" in layer_config["output"]: + new_layer.quantize_input = layer_config["output"]["quantize"] + new_layer.i_input = i_input + new_layer.f_input = f_input + new_layer.i_output = i_output + new_layer.f_output = f_output + + +def set_quantization_bits_weight_layers(config, layer, new_layer): + layer_specific = config["quantization_parameters"]["layer_specific"] if isinstance(layer, SeparableConv2D): - dw_i_bits_w = pw_i_bits_w = pw_i_bits_b = config.quantization_parameters.default_integer_bits - dw_f_bits_w = pw_f_bits_w = pw_f_bits_b = config.quantization_parameters.default_fractional_bits + dw_i_bits_w = pw_i_bits_w = pw_i_bits_b = i_input = i_output = config.quantization_parameters.default_integer_bits + dw_f_bits_w = pw_f_bits_w = pw_f_bits_b = f_input = f_output = config.quantization_parameters.default_fractional_bits if layer.name in layer_specific: - if "depthwise" in layer_specific[layer.name]: - if "weight" in layer_specific[layer.name]["depthwise"]: - dw_i_bits_w = layer_specific[layer.name]["depthwise"]["weight"]["integer_bits"] - dw_f_bits_w = layer_specific[layer.name]["depthwise"]["weight"]["fractional_bits"] - if "pointwise" in layer_specific[layer.name]: - if "weight" in layer_specific[layer.name]["pointwise"]: - pw_i_bits_w = layer_specific[layer.name]["pointwise"]["weight"]["integer_bits"] - pw_f_bits_w = layer_specific[layer.name]["pointwise"]["weight"]["fractional_bits"] - if "bias" in layer_specific[layer.name]: - pw_i_bits_b = layer_specific[layer.name]["pointwise"]["bias"]["integer_bits"] - pw_f_bits_b = layer_specific[layer.name]["pointwise"]["bias"]["fractional_bits"] - return dw_i_bits_w, dw_f_bits_w, pw_i_bits_w, pw_f_bits_w, pw_i_bits_b, pw_f_bits_b + layer_config = layer_specific[layer.name] + if "input" in layer_config: + if "quantize" in layer_config["input"]: + new_layer.depthwise_conv.quantize_input = layer_config["input"]["quantize"] + if "integer_bits" in layer_config["input"]: + i_input = layer_config["input"]["integer_bits"] + if "fractional_bits" in layer_config["input"]: + f_input = layer_config["input"]["fractional_bits"] + if "depthwise" in layer_config: + if "weight" in layer_config["depthwise"]: + dw_i_bits_w = layer_config["depthwise"]["weight"]["integer_bits"] + dw_f_bits_w = layer_config["depthwise"]["weight"]["fractional_bits"] + if "pointwise" in layer_config: + if "weight" in layer_config["pointwise"]: + pw_i_bits_w = layer_config["pointwise"]["weight"]["integer_bits"] + pw_f_bits_w = layer_config["pointwise"]["weight"]["fractional_bits"] + if "bias" in layer_config: + pw_i_bits_b = layer_config["pointwise"]["bias"]["integer_bits"] + pw_f_bits_b = layer_config["pointwise"]["bias"]["fractional_bits"] + if "output" in layer_config: + if "quantize" in layer_config["output"]: + new_layer.quantize_input = layer_config["output"]["quantize"] + if "integer_bits" in layer_config["output"]: + i_output = layer_config["output"]["integer_bits"] + if "fractional_bits" in layer_config["output"]: + f_output = layer_config["output"]["fractional_bits"] + new_layer.depthwise_conv.i_input = i_input + new_layer.depthwise_conv.f_input = f_input + new_layer.depthwise_conv.i_weight = dw_i_bits_w + new_layer.depthwise_conv.f_weight = dw_f_bits_w + new_layer.pointwise_conv.i_weight = pw_i_bits_w + new_layer.pointwise_conv.f_weight = pw_f_bits_w + new_layer.pointwise_conv.i_bias = pw_i_bits_b + new_layer.pointwise_conv.f_bias = pw_f_bits_b + new_layer.pointwise_conv.i_output = i_output + new_layer.pointwise_conv.f_output = f_output else: i_bits_w = i_bits_b = config.quantization_parameters.default_integer_bits f_bits_w = f_bits_b = config.quantization_parameters.default_fractional_bits if layer.name in layer_specific: - if "weight" in layer_specific[layer.name]: - i_bits_w = layer_specific[layer.name]["weight"]["integer_bits"] - f_bits_w = layer_specific[layer.name]["weight"]["fractional_bits"] - if "bias" in layer_specific[layer.name]: - i_bits_b = layer_specific[layer.name]["bias"]["integer_bits"] - f_bits_b = layer_specific[layer.name]["bias"]["fractional_bits"] - return i_bits_w, f_bits_w, i_bits_b, f_bits_b - - -def get_quantization_bits_inputs_outputs(config, layer): - layer_specific = config["quantization_parameters"]["layer_specific"] - i_input = i_output = config["quantization_parameters"]["default_integer_bits"] - f_input = f_output = config["quantization_parameters"]["default_fractional_bits"] - quantize_input = quantize_output = False - if layer.name in layer_specific: - if "input" in layer_specific[layer.name]: - i_input = layer_specific[layer.name]["input"]["integer_bits"] - f_input = layer_specific[layer.name]["input"]["fractional_bits"] - if "quantize_input" in layer_specific[layer.name]["input"]: - quantize_input = layer_specific[layer.name]["input"]["quantize_input"] - if "output" in layer_specific[layer.name]: - i_output = layer_specific[layer.name]["output"]["integer_bits"] - f_output = layer_specific[layer.name]["output"]["fractional_bits"] - if "quantize_output" in layer_specific[layer.name]["output"]: - quantize_output = layer_specific[layer.name]["output"]["quantize_output"] - return i_input, f_input, i_output, f_output, quantize_input, quantize_output + layer_config = layer_specific[layer.name] + if "input" in layer_config: + if "quantize" in layer_config["input"]: + new_layer.quantize_input = layer_config["input"]["quantize"] + if "weight" in layer_config: + i_bits_w = layer_config["weight"]["integer_bits"] + f_bits_w = layer_config["weight"]["fractional_bits"] + if "bias" in layer_config: + i_bits_b = layer_config["bias"]["integer_bits"] + f_bits_b = layer_config["bias"]["fractional_bits"] + if "output" in layer_config: + if "quantize" in layer_config["output"]: + new_layer.quantize_output = layer_config["output"]["quantize"] + new_layer.i_weight = i_bits_w + new_layer.f_weight = f_bits_w + new_layer.i_bias = i_bits_b + new_layer.f_bias = f_bits_b def get_enable_pruning(layer, config): @@ -1223,6 +1209,9 @@ def get_enable_pruning(layer, config): def add_default_layer_quantization_pruning_to_config_tf(model, config): + """Create a default config, where all the layers are added to the disable_pruning list, and have their + own default quantization bits in layer_specific. By default input/output quantization is disabled. + """ custom_scheme = {"layer_specific": {}, "disable_pruning_for_layers": []} for layer in model.layers: if layer.__class__ in [Dense, Conv2D, Conv1D, DepthwiseConv2D]: @@ -1230,18 +1219,26 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): custom_scheme["layer_specific"][layer.name] = { "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, "bias": {"integer_bits": 0.0, "fractional_bits": 7.0}, + "input": {"quantize_input": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "output": {"quantize_input": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } else: - custom_scheme["layer_specific"][layer.name] = {"weight": {"integer_bits": 0.0, "fractional_bits": 7.0}} + custom_scheme["layer_specific"][layer.name] = { + "input": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, + "weight": {"integer_bits": 0, "fractional_bits": 7}, + "bias": {"integer_bits": 0, "fractional_bits": 7}, + "output": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, + } if hasattr(layer.activation, "__name__") and layer.activation.__name__ in ["relu", "tanh"]: custom_scheme["layer_specific"][layer.name][layer.activation.__name__] = { - "integer_bits": 0.0, - "fractional_bits": 7.0, + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } custom_scheme["disable_pruning_for_layers"].append(layer.name) if layer.__class__ == SeparableConv2D: if layer.use_bias: custom_scheme["layer_specific"][layer.name] = { + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "depthwise": { "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, }, @@ -1249,21 +1246,37 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, "bias": {"integer_bits": 0.0, "fractional_bits": 7.0}, }, + "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } else: custom_scheme["layer_specific"][layer.name] = { - "depthwise": {"weight": {"integer_bits": 0.0, "fractional_bits": 7.0}}, + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "depthwise": { + "weight": { + "integer_bits": 0.0, + "fractional_bits": 7.0, + } + }, "pointwise": {"weight": {"integer_bits": 0.0, "fractional_bits": 7.0}}, + "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } if hasattr(layer.activation, "__name__") and layer.activation.__name__ in ["relu", "tanh"]: custom_scheme["layer_specific"][layer.name][layer.activation.__name__] = { - "integer_bits": 0.0, - "fractional_bits": 7.0, + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } custom_scheme["disable_pruning_for_layers"].append(layer.name + "_depthwise") custom_scheme["disable_pruning_for_layers"].append(layer.name + "_pointwise") elif layer.__class__ in [Activation, ReLU, AveragePooling1D, AveragePooling2D, AveragePooling3D]: - custom_scheme["layer_specific"][layer.name] = {"integer_bits": 0.0, "fractional_bits": 7.0} + custom_scheme.layer_specific[layer.name] = { + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + } + elif layer.__class__ == BatchNormalization: + custom_scheme["layer_specific"][layer.name] = { + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, + } config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] return config diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 61e1fd5..f0cc137 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -17,8 +17,8 @@ from pquant.core.quantizer_functions import create_quantizer -class CompressedLayerBase(nn.Module): - def __init__(self, config, layer, layer_type): +class PQWeightBiasBase(nn.Module): + def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): super().__init__() self.i_weight = self.i_bias = self.i_input = self.i_output = torch.tensor( config.quantization_parameters.default_integer_bits @@ -30,11 +30,11 @@ def __init__(self, config, layer, layer_type): self.weight = nn.Parameter(layer.weight.clone()) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) self.pruning_method = config.pruning_parameters.pruning_method - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous - self.use_input_quantizer = False - self.use_output_quantizer = False - + self.quantize_input = quantize_input + self.quantize_output = quantize_output + self.data_k = config.quantization_parameters.default_data_keep_negatives + self.weight_k = config.quantization_parameters.default_weight_keep_negatives self.bias = nn.Parameter(layer.bias.clone()) if layer.bias is not None else None self.init_weight = self.weight.clone() self.pruning_first = config.training_parameters.pruning_first @@ -45,11 +45,12 @@ def __init__(self, config, layer, layer_type): self.enable_pruning = config.pruning_parameters.enable_pruning self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.final_compression_done = False def build(self): # Build function to delay quantizer creation until after custom i,f bits have been set self.weight_quantizer = PyTorchQuantizer( - torch.tensor(1.0), + torch.tensor(self.weight_k), self.i_weight, self.f_weight, self.overflow, @@ -61,7 +62,7 @@ def build(self): if self.bias is not None: self.bias_quantizer = PyTorchQuantizer( - torch.tensor(1.0), + torch.tensor(self.weight_k), self.i_bias, self.f_bias, self.overflow, @@ -70,20 +71,19 @@ def build(self): False, self.hgq_gamma, ) - if self.use_input_quantizer: - self.input_quantizer = PyTorchQuantizer( - torch.tensor(1.0), - self.i_input, - self.f_input, - self.overflow, - self.round_mode, - self.use_hgq, - True, - self.hgq_gamma, - ) - if self.use_output_quantizer: + self.input_quantizer = PyTorchQuantizer( + torch.tensor(self.data_k), + self.i_input, + self.f_input, + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) + if self.quantize_output: self.output_quantizer = PyTorchQuantizer( - torch.tensor(1.0), + torch.tensor(self.data_k), self.i_output, self.f_output, self.overflow, @@ -93,6 +93,13 @@ def build(self): self.hgq_gamma, ) + def apply_final_compression(self): + weight, bias = self.prune_and_quantize(self.weight, self.bias) + self.weight.data = weight + if self.bias is not None: + self.bias.data = bias + self.final_compression_done = True + def save_weights(self): self.init_weight = self.weight.clone() @@ -109,11 +116,11 @@ def hgq_loss(self): loss += ( torch.sum(self.bias_quantizer.quantizer.quantizer.i) + torch.sum(self.bias_quantizer.quantizer.quantizer.f) ) * self.hgq_gamma - if self.use_input_quantizer: + if self.quantize_input: loss += ( torch.sum(self.input_quantizer.quantizer.quantizer.i) + torch.sum(self.input_quantizer.quantizer.quantizer.f) ) * self.hgq_gamma - if self.use_output_quantizer: + if self.quantize_output: loss += ( torch.sum(self.output_quantizer.quantizer.quantizer.i) + torch.sum(self.output_quantizer.quantizer.quantizer.f) @@ -132,6 +139,8 @@ def prune(self, weight): return weight def prune_and_quantize(self, weight, bias): + if self.final_compression_done: + return weight, bias if self.pruning_first: weight = self.prune(weight) weight, bias = self.quantize(weight, bias) @@ -141,16 +150,22 @@ def prune_and_quantize(self, weight, bias): return weight, bias def pre_forward(self, weight, bias, x): - if self.use_input_quantizer and not self.use_fitcompress and not self.pruning_layer.is_pretraining: - x = self.input_quantizer(x) + if self.quantize_input: + if self.use_hgq and not self.input_quantizer.quantizer.built: + self.input_quantizer.quantizer.build(x.shape) + if not self.pruning_layer.is_pretraining and not self.use_fitcompress: + x = self.input_quantizer(x) if self.pruning_method == "wanda": self.pruning_layer.collect_input(x, self.weight, self.training) weight, bias = self.prune_and_quantize(weight, bias) return weight, bias, x def post_forward(self, x): - if self.use_output_quantizer and not self.use_fitcompress and not self.pruning_layer.is_pretraining: - x = self.output_quantizer(x) + if self.quantize_output: + if self.use_hgq and not self.output_quantizer.quantizer.built: + self.output_quantizer.quantizer.build(x.shape) + if not self.pruning_layer.is_pretraining and not self.use_fitcompress: + x = self.output_quantizer(x) if self.pruning_method == "activation_pruning": self.pruning_layer.collect_output(x, self.training) return x @@ -162,7 +177,7 @@ def forward(self, x): return x -class CompressedLayerLinear(CompressedLayerBase): +class PQDense(PQWeightBiasBase): def __init__(self, config, layer, layer_type): super().__init__(config, layer, layer_type) self.in_features = layer.in_features @@ -174,20 +189,13 @@ def post_pre_train_function(self): self.is_pretraining = False def forward(self, x): - if self.use_fitcompress and self.is_pretraining: - weight, bias = self.weight, self.bias - else: - weight, bias = self.prune_and_quantize(self.weight, self.bias) - - if self.pruning_method == "wanda": - self.pruning_layer.collect_input(x, self.weight, self.training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x) x = F.linear(x, weight, bias) - if self.pruning_method == "activation_pruning": - self.pruning_layer.collect_output(x, self.training) + x = self.post_forward(x) return x -class CompressedLayerConv2d(CompressedLayerBase): +class PQConv2d(PQWeightBiasBase): def __init__(self, config, layer, layer_type): super().__init__(config, layer, layer_type) self.stride = layer.stride @@ -205,12 +213,8 @@ def post_pre_train_function(self): self.is_pretraining = False def forward(self, x): - if self.use_fitcompress and self.is_pretraining: - weight, bias = self.weight, self.bias - else: - weight, bias = self.prune_and_quantize(self.weight, self.bias) - if self.pruning_method == "wanda": - self.pruning_layer.collect_input(x, weight, self.training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x) + self.pre_forward(self.weight, self.bias, x) x = F.conv2d( input=x, weight=weight, @@ -220,12 +224,11 @@ def forward(self, x): dilation=self.dilation, groups=self.groups, ) - if self.pruning_method == "activation_pruning": - self.pruning_layer.collect_output(x, self.training) + x = self.post_forward(x) return x -class CompressedLayerConv1d(CompressedLayerBase): +class PQConv1d(PQWeightBiasBase): def __init__(self, config, layer, layer_type): super().__init__(config, layer, layer_type) @@ -244,12 +247,7 @@ def post_pre_train_function(self): self.is_pretraining = False def forward(self, x): - if self.use_fitcompress and self.is_pretraining: - weight, bias = self.weight, self.bias - else: - weight, bias = self.prune_and_quantize(self.weight, self.bias) - if self.pruning_method == "wanda": - self.pruning_layer.collect_input(x, self.weight, self.training) + weight, bias, x = self.pre_forward(self.weight, self.bias, x) x = F.conv1d( input=x, weight=weight, @@ -259,8 +257,7 @@ def forward(self, x): dilation=self.dilation, groups=self.groups, ) - if self.pruning_method == "activation_pruning": - self.pruning_layer.collect_output(x, self.training) + x = self.post_forward(x) return x @@ -277,11 +274,11 @@ def add_compression_layers_torch(model, config, input_shape, device="cuda"): class QuantizedPooling(nn.Module): - def __init__(self, config, layer): + def __init__(self, config, layer, quantize_input=True, quantize_output=False): super().__init__() - self.f = torch.tensor(config.quantization_parameters.default_fractional_bits) - self.i = torch.tensor(config.quantization_parameters.default_integer_bits) - self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" + self.f_output = self.f_input = torch.tensor(config.quantization_parameters.default_fractional_bits) + self.i_output = self.i_input = torch.tensor(config.quantization_parameters.default_integer_bits) + self.overflow = config.quantization_parameters.overflow self.config = config self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.is_pretraining = True @@ -290,16 +287,29 @@ def __init__(self, config, layer): self.round_mode = config.quantization_parameters.round_mode self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.pooling = layer + self.enable_quantization = config.quantization_parameters.enable_quantization self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.post_fitcompress_calibration = False self.saved_inputs = [] self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.quantize_input = quantize_input + self.quantize_output = quantize_output def build(self, input_shape): - self.quantizer = PyTorchQuantizer( + self.input_quantizer = PyTorchQuantizer( + k=torch.tensor(1.0), + i=self.i_input, + f=self.f_input, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + hgq_gamma=self.hgq_gamma, + ) + self.output_quantizer = PyTorchQuantizer( k=torch.tensor(1.0), - i=self.i, - f=self.f, + i=self.i_output, + f=self.f_output, overflow=self.overflow, round_mode=self.round_mode, is_heterogeneous=self.use_hgq, @@ -307,7 +317,9 @@ def build(self, input_shape): hgq_gamma=self.hgq_gamma, ) if self.use_hgq: - self.quantizer.quantizer.build(input_shape) + self.input_quantizer.quantizer.build(input_shape) + output_shape = self.pooling(torch.rand(input_shape)).shape + self.output_quantizer.quantizer.build(output_shape) def set_bits(self, i, f): self.i = torch.tensor(i) @@ -319,11 +331,10 @@ def post_pre_train_function(self): def hgq_loss(self): if self.is_pretraining: return 0.0 + return (torch.sum(self.input_quantizer.quantizer.i) + torch.sum(self.input_quantizer.quantizer.f)) * self.config.quantization_parameters.hgq_gamma - return (torch.sum(self.quantizer.quantizer.i) + torch.sum(self.quantizer.quantizer.f)) * self.config.quantization_parameters.hgq_gamma - - def quantize(self, x): - if not hasattr(self, "quantizer"): + def pre_pooling(self, x): + if not hasattr(self, "input_quantizer"): self.build(x.shape) if self.use_fitcompress and self.is_pretraining: if self.post_fitcompress_calibration: @@ -331,12 +342,20 @@ def quantize(self, x): self.saved_inputs.append(x) # During FITcompress, we do not use any quantized pooling return ops.average_pool(x, pool_size=1) - x = self.quantizer(x) + if self.quantize_input and self.enable_quantization: + x = self.input_quantizer(x) + return x + + def post_pooling(self, x): + if self.quantize_output and self.enable_quantization: + x = self.output_quantizer(x) return x def forward(self, x): + x = self.pre_pooling(x) x = self.pooling(x) - return self.quantize(x) + x = self.post_pooling(x) + return x class PQBatchNorm2d(nn.BatchNorm2d): @@ -351,47 +370,87 @@ def __init__( track_running_stats: bool = True, device=None, dtype=None, + quantize_input=True, ): super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) - self.f = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) + self.f_weight = self.f_input = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) + self.i_weight = self.i_input = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) self.overflow = config["quantization_parameters"]["overflow"] self.round_mode = config["quantization_parameters"]["round_mode"] + self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] + self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] + self.enable_quantization = config["quantization_parameters"]["enable_quantization"] self.config = config + self.quantize_input = quantize_input + self._weight = nn.Parameter(self.weight.clone()) + self._bias = nn.Parameter(self.bias.clone()) + del self._parameters["weight"] + del self._parameters["bias"] + self.built = False + self.final_compression_done = False + + def build(self): + self.built = True + self.input_quantizer = PyTorchQuantizer( + k=torch.tensor(1.0), + i=self.i_input, + f=self.f_input, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + hgq_gamma=self.hgq_gamma, + ) self.parameter_quantizer = PyTorchQuantizer( k=torch.tensor(1.0), - i=self.i, - f=self.f, + i=self.i_weight, + f=self.f_weight, round_mode=self.round_mode, overflow=self.overflow, is_data=False, is_heterogeneous=False, ) - self._weight = nn.Parameter(self.weight.clone()) - self._bias = nn.Parameter(self.bias.clone()) - del self._parameters["weight"] - del self._parameters["bias"] - def set_bits(self, i, f): - self.i = torch.tensor(i) - self.f = torch.tensor(f) + def apply_final_compression(self): + self._weight.data = self.weight + self._bias.data = self.bias + self.final_compression_done = True @property def weight(self): - return self.parameter_quantizer(self._weight) + if self.enable_quantization and not self.final_compression_done: + return self.parameter_quantizer(self._weight) + return self._weight @property def bias(self): - return self.parameter_quantizer(self._bias) + if self.enable_quantization and not self.final_compression_done: + return self.parameter_quantizer(self._bias) + return self._bias def set_quantization_bits(self, i, f): self.i = torch.tensor(i) self.f = torch.tensor(f) def forward(self, input: torch.Tensor) -> torch.Tensor: + if not self.built: + self.build() + if self.quantize_input and self.enable_quantization: + if self.use_hgq and not self.input_quantizer.quantizer.built: + self.input_quantizer.quantizer.build(input.shape) + input = self.input_quantizer(input) return super().forward(input) +class QuantizedActivationTorchWrapper(torch.nn.Module): + def __init__(self, activation): + super().__init__() + self.activation = activation + + def forward(self, x): + return self.activation(x) + + class PyTorchQuantizer(nn.Module): # HGQ quantizer wrapper def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): @@ -425,24 +484,106 @@ def hgq_loss(self): def add_layer_specific_quantization_to_model(module, config): for name, layer in module.named_modules(): - if isinstance(layer, CompressedLayerBase): + if isinstance(layer, PQWeightBiasBase): if name in config.quantization_parameters.layer_specific: - if "weight" in config.quantization_parameters.layer_specific[name]: - weight_int_bits = config.quantization_parameters.layer_specific[name]["weight"]["integer_bits"] - weight_fractional_bits = config.quantization_parameters.layer_specific[name]["weight"]["fractional_bits"] + layer_config = config.quantization_parameters.layer_specific[name] + if "weight" in layer_config: + weight_int_bits = layer_config["weight"]["integer_bits"] + weight_fractional_bits = layer_config["weight"]["fractional_bits"] layer.i_weight = torch.tensor(weight_int_bits) layer.f_weight = torch.tensor(weight_fractional_bits) - if "bias" in config.quantization_parameters.layer_specific[name]: - bias_int_bits = config.quantization_parameters.layer_specific[name]["bias"]["integer_bits"] - bias_fractional_bits = config.quantization_parameters.layer_specific[name]["bias"]["fractional_bits"] + if "bias" in layer_config: + bias_int_bits = layer_config["bias"]["integer_bits"] + bias_fractional_bits = layer_config["bias"]["fractional_bits"] layer.i_bias = torch.tensor(bias_int_bits) layer.f_bias = torch.tensor(bias_fractional_bits) - layer.build(None) - elif layer.__class__ in [PQBatchNorm2d, QuantizedPooling, QuantizedReLU, QuantizedTanh]: + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + input_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["output"]: + input_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["output"]: + quantize = layer_config["output"]["quantize"] + layer.quantize_output = quantize + + layer.build() + elif layer.__class__ in [PQBatchNorm2d]: + if name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[name] + if "weight" in layer_config: + i = torch.tensor(layer_config["weight"]["integer_bits"]) + f = torch.tensor(layer_config["weight"]["fractional_bits"]) + layer.i_weight = i + layer.f_weight = f + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + elif layer.__class__ == QuantizedPooling: + if name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[name] + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.i_output = output_int_bits + if "fractional_bits" in layer_config["output"]: + output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) + layer.f_output = output_fractional_bits + if "quantize" in layer_config["output"]: + quantize = layer_config["output"]["quantize"] + layer.quantize_output = quantize + + elif layer.__class__ == QuantizedActivationTorchWrapper: if name in config.quantization_parameters.layer_specific: - i = config.quantization_parameters.layer_specific[name]["integer_bits"] - f = config.quantization_parameters.layer_specific[name]["fractional_bits"] - layer.set_bits(i, f) + layer_config = config.quantization_parameters.layer_specific[name] + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.activation.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.activation.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.activation.quantize_input = quantize + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.activation.i_output = output_int_bits + if "fractional_bits" in layer_config["output"]: + output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) + layer.activation.f_output = output_fractional_bits + if "quantize" in layer_config["output"]: + quantize = layer_config["output"]["quantize"] + layer.activation.quantize_output = quantize return module @@ -457,10 +598,10 @@ def add_quantized_activations_to_model_layer(module, config): # For ReLU, if using default values, add 1 bit since values are unsigned. # Otherwise user provides bits. TODO: Find better way to do this f = config.quantization_parameters.default_fractional_bits + 1 - relu = QuantizedReLU(config, i=i, f=f) + relu = QuantizedActivationTorchWrapper(QuantizedReLU(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, relu) elif layer.__class__ in [nn.Tanh]: - tanh = QuantizedTanh(config, i=0.0, f=f) + tanh = QuantizedActivationTorchWrapper(QuantizedTanh(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, tanh) elif layer.__class__ in [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: new_layer = QuantizedPooling(config, layer) @@ -473,6 +614,7 @@ def add_quantized_activations_to_model_layer(module, config): momentum=layer.momentum, affine=layer.affine, track_running_stats=layer.track_running_stats, + quantize_input=True, ) setattr(module, name, new_layer) else: @@ -516,7 +658,7 @@ def add_quantized_activations_to_model_functional(module, config): def disable_pruning_from_layers(module, config): for name, layer in module.named_modules(): enable_pruning = name not in config.pruning_parameters.disable_pruning_for_layers - if layer.__class__ in [CompressedLayerLinear, CompressedLayerConv2d, CompressedLayerConv1d] and not enable_pruning: + if layer.__class__ in [PQDense, PQConv2d, PQConv1d] and not enable_pruning: layer.enable_pruning = enable_pruning return module @@ -524,15 +666,15 @@ def disable_pruning_from_layers(module, config): def add_pruning_to_model(module, config): for name, layer in module.named_children(): if layer.__class__ is nn.Linear: - sparse_layer = CompressedLayerLinear(config, layer, "linear") + sparse_layer = PQDense(config, layer, "linear") sparse_layer.pruning_layer.build(layer.weight.shape) setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv2d: - sparse_layer = CompressedLayerConv2d(config, layer, "conv") + sparse_layer = PQConv2d(config, layer, "conv") sparse_layer.pruning_layer.build(layer.weight.shape) setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv1d: - sparse_layer = CompressedLayerConv1d(config, layer, "conv") + sparse_layer = PQConv1d(config, layer, "conv") sparse_layer.pruning_layer.build(layer.weight.shape) setattr(module, name, sparse_layer) else: @@ -540,65 +682,10 @@ def add_pruning_to_model(module, config): return module -def remove_pruning_from_model_torch(module, config): - for name, layer in module.named_children(): - if isinstance(layer, CompressedLayerLinear): - if config.pruning_parameters.pruning_method == "pdp": # Find better solution later - if config.training_parameters.pruning_first: - weight = layer.weight - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(weight) * weight - weight, bias = layer.quantize(weight, layer.bias) - else: - weight, bias = layer.quantize(layer.weight, layer.bias) - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(weight) * weight - else: - weight, bias = layer.prune_and_quantize(layer.weight, layer.bias) - out_features = layer.out_features - bias_values = bias - in_features = layer.in_features - bias = True if bias_values is not None else False - setattr(module, name, nn.Linear(in_features=in_features, out_features=out_features, bias=bias)) - getattr(module, name).weight.data.copy_(weight) - if getattr(module, name).bias is not None: - getattr(module, name).bias.data.copy_(bias_values.data) - elif isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d)): - if config.pruning_parameters.pruning_method == "pdp": # Find better solution later - if config.training_parameters.pruning_first: - weight = layer.weight - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(weight) * weight - weight, bias = layer.quantize(weight, layer.bias) - else: - weight, bias = layer.quantize(layer.weight, layer.bias) - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(weight) * weight - else: - weight, bias = layer.prune_and_quantize(layer.weight, layer.bias) - bias_values = bias - bias = True if bias_values is not None else False - conv = nn.Conv2d if isinstance(layer, CompressedLayerConv2d) else nn.Conv1d - setattr( - module, - name, - conv( - layer.in_channels, - layer.out_channels, - layer.kernel_size, - layer.stride, - layer.padding, - layer.dilation, - layer.groups, - bias, - layer.padding_mode, - ), - ) - getattr(module, name).weight.data.copy_(weight) - if getattr(module, name).bias is not None: - getattr(module, name).bias.data.copy_(bias_values.data) - else: - remove_pruning_from_model_torch(layer, config) +def apply_final_compression_torch(module): + for layer in module.modules(): + if isinstance(layer, (PQWeightBiasBase, PQBatchNorm2d)): + layer.apply_final_compression() return module @@ -613,37 +700,37 @@ def call_post_round_functions(model, rewind, rounds, r): def post_epoch_functions(model, epoch, total_epochs, **kwargs): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.pruning_layer.post_epoch_function(epoch, total_epochs, **kwargs) def pre_epoch_functions(model, epoch, total_epochs): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.pruning_layer.pre_epoch_function(epoch, total_epochs) def post_round_functions(model): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.pruning_layer.post_round_function() def save_weights_functions(model): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.save_weights() def rewind_weights_functions(model): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.rewind_weights() def pre_finetune_functions(model): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.pruning_layer.pre_finetune_function() @@ -656,15 +743,15 @@ def post_pretrain_functions(model, config, train_loader=None, loss_func=None): # idx = 0 for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): layer.pruning_layer.post_pre_train_function() layer.post_pre_train_function() # layer.pruning_layer.mask = pruning_mask_importance_scores[idx] # idx += 1 - elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): - layer.post_pre_train_function() + elif isinstance(layer, (QuantizedActivationTorchWrapper, QuantizedPooling)): + layer.activation.post_pre_train_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget ): @@ -679,7 +766,7 @@ def pdp_setup(model, config): """ global_weights = None for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): if global_weights is None: global_weights = layer.weight.flatten() else: @@ -691,7 +778,7 @@ def pdp_setup(model, config): global_weights_below_threshold = torch.where(abs_global_weights < threshold, 1, 0) idx = 0 for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): weight_size = layer.weight.numel() w = torch.sum(global_weights_below_threshold[idx : idx + weight_size]) layer.pruning_layer.init_r = w / weight_size @@ -704,7 +791,7 @@ def get_layer_keep_ratio_torch(model): total_w = 0 remaining_weights = 0 for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): if layer.pruning_first: weight = layer.weight if layer.enable_pruning: @@ -730,12 +817,15 @@ def get_layer_keep_ratio_torch(model): def get_model_losses_torch(model, losses): for layer in model.modules(): - if isinstance(layer, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): loss = layer.pruning_layer.calculate_additional_loss() if layer.use_hgq: loss += layer.hgq_loss() losses += loss - elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): + elif isinstance(layer, (QuantizedActivationTorchWrapper)): + if layer.activation.use_hgq: + losses += layer.activation.hgq_loss() + elif isinstance(layer, QuantizedPooling): if layer.use_hgq: losses += layer.hgq_loss() return losses @@ -746,15 +836,29 @@ def create_default_layer_quantization_pruning_config(model): for name, layer in model.named_modules(): if layer.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d]: if layer.bias is None: - config["layer_specific"][name] = {"weight": {"integer_bits": 0, "fractional_bits": 7}} + config["layer_specific"][name] = { + "input": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, + "weight": {"integer_bits": 0, "fractional_bits": 7}, + "output": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, + } else: config["layer_specific"][name] = { + "input": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, "weight": {"integer_bits": 0, "fractional_bits": 7}, "bias": {"integer_bits": 0, "fractional_bits": 7}, + "output": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, } config["disable_pruning_for_layers"].append(name) - elif layer.__class__ in [nn.BatchNorm2d, nn.Tanh, nn.ReLU, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: - config["layer_specific"][name] = {"integer_bits": 0, "fractional_bits": 7} + elif layer.__class__ in [nn.Tanh, nn.ReLU, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: + config["layer_specific"][name] = { + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + } + elif layer.__class__ in [nn.BatchNorm2d]: + config["layer_specific"][name] = { + "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, + "weight": {"integer_bits": 0, "fractional_bits": 7.0}, + } return config diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 0dab228..05856fb 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -18,15 +18,15 @@ from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.tf_impl.compressed_layers_tf import ( CompressedLayerConv1dKeras, - CompressedLayerConv2dKeras, CompressedLayerDenseKeras, - CompressedLayerSeparableConv2dKeras, + PQConv2d, + PQSeparableConv2d, QuantizedPooling, add_compression_layers_tf, + apply_final_compression_tf, get_layer_keep_ratio_tf, post_pretrain_functions, pre_finetune_functions, - remove_pruning_from_model_tf, ) @@ -45,6 +45,11 @@ def _to_obj(x): STEPS = 16 +@pytest.fixture(autouse=True) +def run_around_tests(): + keras.backend.clear_session() + + @pytest.fixture def config_pdp(): cfg = { @@ -61,6 +66,8 @@ def config_pdp(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -93,6 +100,8 @@ def config_ap(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -128,6 +137,8 @@ def config_wanda(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -159,6 +170,8 @@ def config_cs(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -214,7 +227,7 @@ def test_conv2d_call(config_pdp, conv2d_input): layer_to_replace = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same") layer_to_replace.build(conv2d_input.shape) out = layer_to_replace(conv2d_input) - layer = CompressedLayerConv2dKeras(config_pdp, layer_to_replace, "conv") + layer = PQConv2d(config_pdp, layer_to_replace, "conv") layer.build(conv2d_input.shape) layer.weight.assign(layer_to_replace.kernel) out2 = layer(conv2d_input) @@ -225,7 +238,7 @@ def test_separable_conv2d_call(config_pdp, conv2d_input): layer_to_replace = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same") layer_to_replace.build(conv2d_input.shape) out = layer_to_replace(conv2d_input) - layer = CompressedLayerSeparableConv2dKeras(config_pdp, layer_to_replace) + layer = PQSeparableConv2d(config_pdp, layer_to_replace) layer.depthwise_conv.build(conv2d_input.shape) layer.pointwise_conv.build(conv2d_input.shape) layer.depthwise_conv.weight.assign(layer_to_replace.depthwise_kernel) @@ -258,16 +271,16 @@ def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): output1 = model(conv2d_input) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count_depthwise = ops.count_nonzero(mask_50pct_dw) - nonzero_count_depthwise = ops.count_nonzero(model.layers[1].depthwise_kernel) + nonzero_count_depthwise = ops.count_nonzero(model.layers[1].depthwise_conv.weight) assert ops.equal(expected_nonzero_count_depthwise, nonzero_count_depthwise) expected_nonzero_count_pointwise = ops.count_nonzero(mask_50pct_pw) - nonzero_count_pointwise = ops.count_nonzero(model.layers[1].pointwise_kernel) + nonzero_count_pointwise = ops.count_nonzero(model.layers[1].pointwise_conv.weight) assert ops.equal(expected_nonzero_count_pointwise, nonzero_count_pointwise) @@ -291,7 +304,7 @@ def test_separable_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): model.layers[1].pointwise_conv.pruning_layer.mask = mask_50pct_pw ratio1 = get_layer_keep_ratio_tf(model) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) ratio2 = get_layer_keep_ratio_tf(model) assert ops.equal(ratio1, ratio2) @@ -349,11 +362,11 @@ def test_dense_add_remove_layers(config_pdp, dense_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(dense_input) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) output2 = model(dense_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].kernel) + nonzero_count = ops.count_nonzero(model.layers[1].weight) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -371,11 +384,11 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv2d_input) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].kernel) + nonzero_count = ops.count_nonzero(model.layers[1].weight) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -393,11 +406,11 @@ def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv2d_input) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].kernel) + nonzero_count = ops.count_nonzero(model.layers[1].weight) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -415,11 +428,11 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv1d_input) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) output2 = model(conv1d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].kernel) + nonzero_count = ops.count_nonzero(model.layers[1].weight) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -437,7 +450,7 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_tf(model) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) ratio2 = get_layer_keep_ratio_tf(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -457,7 +470,7 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_tf(model) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) ratio2 = get_layer_keep_ratio_tf(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -477,7 +490,7 @@ def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_tf(model) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) ratio2 = get_layer_keep_ratio_tf(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -497,7 +510,7 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_tf(model) - model = remove_pruning_from_model_tf(model, config_pdp) + model = apply_final_compression_tf(model) ratio2 = get_layer_keep_ratio_tf(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -1308,7 +1321,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): model = add_compression_layers_tf(model, config_pdp, dense_input.shape) assert model.layers[1].weight_quantizer.quantizer._i.shape == model.layers[1].weight.shape layer_2_input_shape = [1] + list(model.layers[2].input.shape[1:]) - assert model.layers[2].quantizer.quantizer._i.shape == layer_2_input_shape + assert model.layers[2].input_quantizer.quantizer._i.shape == layer_2_input_shape def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_input, dense_input): @@ -1356,7 +1369,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) for m in model.layers: - if isinstance(m, (CompressedLayerConv2dKeras)): + if isinstance(m, (PQConv2d)): assert m.i_weight == 0.0 assert m.i_bias == 0.0 assert ops.all(m.weight_quantizer.quantizer.i == 0.0) @@ -1367,31 +1380,31 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert ops.all(m.weight_quantizer.quantizer.f == 7.0) assert ops.all(m.bias_quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 7.0 - assert ops.all(m.quantizer.quantizer.i == 0.0) - assert ops.all(m.quantizer.quantizer.f == 7.0) + assert m.i_input == 0.0 + assert m.f_input == 7.0 + assert ops.all(m.input_quantizer.quantizer.i == 0.0) + assert ops.all(m.input_quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedReLU)): - assert m.i == 0.0 - assert m.f == 8.0 - assert ops.all(m.quantizer.quantizer.i == 0.0) - assert ops.all(m.quantizer.quantizer.f == 8.0) + assert m.i_input == 0.0 + assert m.f_input == 8.0 + assert ops.all(m.input_quantizer.quantizer.i == 0.0) + assert ops.all(m.input_quantizer.quantizer.f == 8.0) elif isinstance(m, (QuantizedPooling)): - assert m.i == 0.0 - assert m.f == 7.0 - assert ops.all(m.hgq.quantizer.i == 0.0) - assert ops.all(m.hgq.quantizer.f == 7.0) + assert m.i_input == 0.0 + assert m.f_input == 7.0 + assert ops.all(m.input_quantizer.quantizer.i == 0.0) + assert ops.all(m.input_quantizer.quantizer.f == 7.0) config_pdp.quantization_parameters.layer_specific = { - 'conv2d_17': { + 'conv2d': { 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 2.0, 'fractional_bits': 4.0}, }, - 're_lu_6': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'average_pooling2d_2': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'activation_6': {'integer_bits': 0.0, 'fractional_bits': 3.0}, + 're_lu': {"input": {'integer_bits': 1.0, 'fractional_bits': 3.0}}, + 'average_pooling2d': {"input": {'integer_bits': 1.0, 'fractional_bits': 3.0}}, + 'activation': {"input": {'integer_bits': 0.0, 'fractional_bits': 3.0}}, } - + keras.backend.clear_session() inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) out = ReLU()(out) @@ -1400,7 +1413,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): model = keras.Model(inputs=inputs, outputs=out) model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) for m in model.layers: - if isinstance(m, (CompressedLayerConv2dKeras)): + if isinstance(m, (PQConv2d)): assert m.i_weight == 1.0 assert m.i_bias == 2.0 assert ops.all(m.weight_quantizer.quantizer.i == 1.0) @@ -1411,20 +1424,20 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert ops.all(m.weight_quantizer.quantizer.f == 3.0) assert ops.all(m.bias_quantizer.quantizer.f == 4.0) elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 3.0 - assert ops.all(m.quantizer.quantizer.i == 0.0) - assert ops.all(m.quantizer.quantizer.f == 3.0) + assert m.i_input == 0.0 + assert m.f_input == 3.0 + assert ops.all(m.input_quantizer.quantizer.i == 0.0) + assert ops.all(m.input_quantizer.quantizer.f == 3.0) elif isinstance(m, (QuantizedReLU)): - assert m.i == 1.0 - assert m.f == 3.0 - assert ops.all(m.quantizer.quantizer.i == 1.0) - assert ops.all(m.quantizer.quantizer.f == 3.0) + assert m.i_input == 1.0 + assert m.f_input == 3.0 + assert ops.all(m.input_quantizer.quantizer.i == 1.0) + assert ops.all(m.input_quantizer.quantizer.f == 3.0) elif isinstance(m, (QuantizedPooling)): - assert m.i == 1.0 - assert m.f == 3.0 - assert ops.all(m.hgq.quantizer.i == 1.0) - assert ops.all(m.hgq.quantizer.f == 3.0) + assert m.i_input == 1.0 + assert m.f_input == 3.0 + assert ops.all(m.input_quantizer.quantizer.i == 1.0) + assert ops.all(m.input_quantizer.quantizer.f == 3.0) def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): @@ -1439,32 +1452,32 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) for m in model.layers: - if isinstance(m, (CompressedLayerConv2dKeras)): + if isinstance(m, (PQConv2d)): assert m.i_weight == 0.0 assert m.i_bias == 0.0 assert m.f_weight == 7.0 assert m.f_bias == 7.0 elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 7.0 + assert m.i_input == 0.0 + assert m.f_input == 7.0 elif isinstance(m, (QuantizedReLU)): - assert m.i == 0.0 - assert m.f == 8.0 + assert m.i_input == 0.0 + assert m.f_input == 8.0 elif isinstance(m, (QuantizedPooling)): - assert m.i == 0.0 - assert m.f == 7.0 + assert m.i_input == 0.0 + assert m.f_input == 7.0 config_pdp.quantization_parameters.layer_specific = { - 'conv2d_19': { + 'conv2d': { 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 2.0, 'fractional_bits': 4.0}, }, - 're_lu_8': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'average_pooling2d_4': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'activation_8': {'integer_bits': 0.0, 'fractional_bits': 3.0}, + 're_lu': {"input": {'integer_bits': 1.0, 'fractional_bits': 3.0}}, + 'average_pooling2d': {"input": {'integer_bits': 1.0, 'fractional_bits': 3.0}}, + 'activation': {"input": {'integer_bits': 0.0, 'fractional_bits': 3.0}}, } - + keras.backend.clear_session() inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) out = ReLU()(out) @@ -1473,18 +1486,18 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): model = keras.Model(inputs=inputs, outputs=out) model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) for m in model.layers: - if isinstance(m, (CompressedLayerConv2dKeras)): + if isinstance(m, (PQConv2d)): assert m.i_weight == 1.0 assert m.i_bias == 2.0 assert m.f_weight == 3.0 assert m.f_bias == 4.0 elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 3.0 + assert m.i_input == 0.0 + assert m.f_input == 3.0 elif isinstance(m, (QuantizedReLU)): - assert m.i == 1.0 - assert m.f == 3.0 + assert m.i_input == 1.0 + assert m.f_input == 3.0 elif isinstance(m, (QuantizedPooling)): - assert m.i == 1.0 - assert m.f == 3.0 + assert m.i_input == 1.0 + assert m.f_input == 3.0 diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index 7b90774..1c62cb4 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -6,21 +6,22 @@ import torch from keras import ops from torch import nn -from torch.nn import AvgPool2d, Conv1d, Conv2d, Linear, ReLU, Tanh +from torch.nn import AvgPool2d, BatchNorm2d, Conv1d, Conv2d, Linear, ReLU, Tanh from pquant import post_training_prune from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.torch_impl.compressed_layers_torch import ( - CompressedLayerBase, - CompressedLayerConv1d, - CompressedLayerConv2d, - CompressedLayerLinear, + PQConv1d, + PQConv2d, + PQDense, + PQWeightBiasBase, + QuantizedActivationTorchWrapper, QuantizedPooling, add_compression_layers_torch, + apply_final_compression_torch, get_layer_keep_ratio_torch, post_pretrain_functions, pre_finetune_functions, - remove_pruning_from_model_torch, ) @@ -55,6 +56,8 @@ def config_pdp(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -87,6 +90,8 @@ def config_ap(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -122,6 +127,8 @@ def config_wanda(): "quantization_parameters": { "default_integer_bits": 0.0, "default_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, "enable_quantization": False, "hgq_gamma": 0.0003, "hgq_heterogeneous": True, @@ -208,7 +215,7 @@ def forward(self, x): def test_dense_call(config_pdp, dense_input): layer_to_replace = Linear(IN_FEATURES, OUT_FEATURES, bias=False) out = layer_to_replace(dense_input) - layer = CompressedLayerLinear(config_pdp, layer_to_replace, "linear") + layer = PQDense(config_pdp, layer_to_replace, "linear") layer.weight.data = layer_to_replace.weight.data out2 = layer(dense_input) assert ops.all(ops.equal(out, out2)) @@ -217,7 +224,7 @@ def test_dense_call(config_pdp, dense_input): def test_conv2d_call(config_pdp, conv2d_input): layer_to_replace = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False, padding="same") out = layer_to_replace(conv2d_input) - layer = CompressedLayerConv2d(config_pdp, layer_to_replace, "conv") + layer = PQConv2d(config_pdp, layer_to_replace, "conv") layer.weight.data = layer_to_replace.weight.data out2 = layer(conv2d_input) assert ops.all(ops.equal(out, out2)) @@ -226,7 +233,7 @@ def test_conv2d_call(config_pdp, conv2d_input): def test_conv1d_call(config_pdp, conv1d_input): layer_to_replace = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, stride=2, bias=False) out = layer_to_replace(conv1d_input) - layer = CompressedLayerConv1d(config_pdp, layer_to_replace, "conv") + layer = PQConv1d(config_pdp, layer_to_replace, "conv") layer.weight.data = layer_to_replace.weight.data out2 = layer(conv1d_input) assert ops.all(ops.equal(out, out2)) @@ -244,7 +251,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct output1 = model(dense_input) - model = remove_pruning_from_model_torch(model, config_pdp) + model = apply_final_compression_torch(model) output2 = model(dense_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -265,7 +272,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct output1 = model(conv2d_input) - model = remove_pruning_from_model_torch(model, config_pdp) + model = apply_final_compression_torch(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -286,7 +293,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct output1 = model(conv1d_input) - model = remove_pruning_from_model_torch(model, config_pdp) + model = apply_final_compression_torch(model) output2 = model(conv1d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -307,7 +314,7 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_torch(model) - model = remove_pruning_from_model_torch(model, config_pdp) + model = apply_final_compression_torch(model) ratio2 = get_layer_keep_ratio_torch(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -326,7 +333,7 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_torch(model) - model = remove_pruning_from_model_torch(model, config_pdp) + model = apply_final_compression_torch(model) ratio2 = get_layer_keep_ratio_torch(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -345,7 +352,7 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_torch(model) - model = remove_pruning_from_model_torch(model, config_pdp) + model = apply_final_compression_torch(model) ratio2 = get_layer_keep_ratio_torch(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -362,7 +369,7 @@ def test_check_activation(config_pdp, dense_input): layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - assert isinstance(model.activation, QuantizedReLU) + assert isinstance(model.activation, QuantizedActivationTorchWrapper) # Tanh config_pdp.quantization_parameters.enable_quantization = False @@ -375,7 +382,7 @@ def test_check_activation(config_pdp, dense_input): layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - assert isinstance(model.activation, QuantizedTanh) + assert isinstance(model.activation, QuantizedActivationTorchWrapper) def check_keras_layer_is_built(module, is_built): @@ -413,8 +420,8 @@ def test_hgq_activation_built(config_pdp, conv2d_input): layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModelWithAvgPool(layer, "relu") model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) - is_built = check_keras_layer_is_built(model, []) + torch.save(model.state_dict(), "test_model.pt") assert all(is_built) layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) @@ -506,16 +513,16 @@ def test_trigger_post_pretraining(config_pdp, dense_input): model = add_compression_layers_torch(model, config_pdp, dense_input.shape) assert model.submodule.pruning_layer.is_pretraining is True - assert model.activation.is_pretraining is True + assert model.activation.activation.is_pretraining is True assert model.submodule2.pruning_layer.is_pretraining is True - assert model.activation2.is_pretraining is True + assert model.activation2.activation.is_pretraining is True post_pretrain_functions(model, config_pdp) assert model.submodule.pruning_layer.is_pretraining is False - assert model.activation.is_pretraining is False + assert model.activation.activation.is_pretraining is False assert model.submodule2.pruning_layer.is_pretraining is False - assert model.activation2.is_pretraining is False + assert model.activation2.activation.is_pretraining is False def test_hgq_weight_shape(config_pdp, dense_input): @@ -529,7 +536,19 @@ def test_hgq_weight_shape(config_pdp, dense_input): post_pretrain_functions(model, config_pdp) assert model.submodule.weight_quantizer.quantizer.quantizer._i.shape == model.submodule.weight.shape - assert model.activation.quantizer.quantizer._i.shape == (1, OUT_FEATURES) + assert model.activation.activation.input_quantizer.quantizer._i.shape == (1, OUT_FEATURES) + + +def test_qbn_build(config_pdp, conv2d_input): + config_pdp["quantization_parameters"]["enable_quantization"] = True + config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) + layer2 = BatchNorm2d(OUT_FEATURES) + model = TestModel2(layer, layer2, None, "tanh") + + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + assert model.submodule.weight_quantizer.quantizer.quantizer._i.shape == model.submodule.weight.shape def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): @@ -541,7 +560,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) for m in model.modules(): - if isinstance(m, (CompressedLayerBase)): + if isinstance(m, (PQWeightBiasBase)): assert m.i_weight == 0.0 assert m.i_bias == 0.0 assert torch.all(m.weight_quantizer.quantizer.quantizer.i == 0.0) @@ -552,37 +571,37 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 7.0) assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 7.0 - assert torch.all(m.quantizer.quantizer.i == 0.0) - assert torch.all(m.quantizer.quantizer.f == 7.0) + assert m.i_input == 0.0 + assert m.f_input == 7.0 + assert torch.all(m.output_quantizer.quantizer.i == 0.0) + assert torch.all(m.output_quantizer.quantizer.f == 7.0) elif isinstance(m, (QuantizedReLU)): - assert m.i == 0.0 - assert m.f == 8.0 - assert torch.all(m.quantizer.quantizer.i == 0.0) - assert torch.all(m.quantizer.quantizer.f == 8.0) + assert m.i_input == 0.0 + assert m.f_input == 8.0 + assert torch.all(m.input_quantizer.quantizer.i == 0.0) + assert torch.all(m.input_quantizer.quantizer.f == 8.0) elif isinstance(m, QuantizedPooling): - assert m.i == 0.0 - assert m.f == 7.0 - assert torch.all(m.quantizer.quantizer.quantizer.i == 0.0) - assert torch.all(m.quantizer.quantizer.quantizer.f == 7.0) + assert m.i_input == 0.0 + assert m.f_input == 7.0 + assert torch.all(m.input_quantizer.quantizer.quantizer.i == 0.0) + assert torch.all(m.input_quantizer.quantizer.quantizer.f == 7.0) config_pdp.quantization_parameters.layer_specific = { 'submodule': { 'weight': {'integer_bits': 1, 'fractional_bits': 3}, 'bias': {'integer_bits': 2, 'fractional_bits': 4}, }, - 'submodule2': {'integer_bits': 1, 'fractional_bits': 3}, - 'activation': {'integer_bits': 0, 'fractional_bits': 4}, - 'activation2': {'integer_bits': 0, 'fractional_bits': 3}, + 'submodule2': {"input": {'integer_bits': 1, 'fractional_bits': 3}}, + 'activation': {"input": {'integer_bits': 1, 'fractional_bits': 4}}, + 'activation2': {"input": {'integer_bits': 0, 'fractional_bits': 3}}, } model = TestModel2(layer, layer2, "relu", "tanh") model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) for m in model.modules(): - if isinstance(m, (CompressedLayerBase)): + if isinstance(m, (PQWeightBiasBase)): assert m.i_weight == 1.0 assert m.i_bias == 2.0 assert torch.all(m.weight_quantizer.quantizer.quantizer.i == 1.0) @@ -593,20 +612,20 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 3.0) assert torch.all(m.bias_quantizer.quantizer.quantizer.f == 4.0) elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 3.0 - assert torch.all(m.quantizer.quantizer.i == 0.0) - assert torch.all(m.quantizer.quantizer.f == 3.0) + assert m.i_input == 0.0 + assert m.f_input == 3.0 + assert torch.all(m.input_quantizer.quantizer.i == 0.0) + assert torch.all(m.input_quantizer.quantizer.f == 3.0) elif isinstance(m, (QuantizedReLU)): - assert m.i == 0.0 - assert m.f == 4.0 - assert torch.all(m.quantizer.quantizer.i == 0.0) - assert torch.all(m.quantizer.quantizer.f == 4.0) + assert m.i_input == 1.0 + assert m.f_input == 4.0 + assert torch.all(m.input_quantizer.quantizer.i == 1.0) + assert torch.all(m.input_quantizer.quantizer.f == 4.0) elif isinstance(m, QuantizedPooling): - assert m.i == 1.0 - assert m.f == 3.0 - assert torch.all(m.quantizer.quantizer.quantizer.i == 1.0) - assert torch.all(m.quantizer.quantizer.quantizer.f == 3.0) + assert m.i_input == 1.0 + assert m.f_input == 3.0 + assert torch.all(m.input_quantizer.quantizer.quantizer.i == 1.0) + assert torch.all(m.input_quantizer.quantizer.quantizer.f == 3.0) def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): @@ -618,39 +637,39 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) for m in model.modules(): - if isinstance(m, (CompressedLayerBase)): + if isinstance(m, (PQWeightBiasBase)): assert m.i_weight == 0.0 assert m.f_bias == 7.0 elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 7.0 + assert m.i_input == 0.0 + assert m.f_input == 7.0 elif isinstance(m, (QuantizedReLU)): - assert m.i == 0.0 - assert m.f == 8.0 + assert m.i_input == 0.0 + assert m.f_input == 8.0 config_pdp.quantization_parameters.layer_specific = { 'submodule': { 'weight': {'integer_bits': 1.0, 'fractional_bits': 3.0}, 'bias': {'integer_bits': 1.0, 'fractional_bits': 3.0}, }, - 'submodule2': {'integer_bits': 1.0, 'fractional_bits': 3.0}, - 'activation': {'integer_bits': 0.0, 'fractional_bits': 4.0}, - 'activation2': {'integer_bits': 0.0, 'fractional_bits': 3.0}, + 'submodule2': {"input": {'integer_bits': 1.0, 'fractional_bits': 3.0}}, + 'activation': {"input": {'integer_bits': 0.0, 'fractional_bits': 4.0}}, + 'activation2': {"input": {'integer_bits': 0.0, 'fractional_bits': 3.0}}, } model = TestModel2(layer, layer2, "relu", "tanh") model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) for m in model.modules(): - if isinstance(m, (CompressedLayerBase)): + if isinstance(m, (PQWeightBiasBase)): assert m.i_weight == 1.0 assert m.f_bias == 3.0 elif isinstance(m, (QuantizedTanh)): - assert m.i == 0.0 - assert m.f == 3.0 + assert m.i_input == 0.0 + assert m.f_input == 3.0 elif isinstance(m, (QuantizedReLU)): - assert m.i == 0.0 - assert m.f == 4.0 + assert m.i_input == 0.0 + assert m.f_input == 4.0 elif isinstance(m, QuantizedPooling): - assert m.i == 1.0 - assert m.f == 3.0 + assert m.i_input == 1.0 + assert m.f_input == 3.0 From 4a0f0c4eff56cc02864049971923b8dae9f61472 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 13 Oct 2025 15:46:18 +0200 Subject: [PATCH 09/37] fix QBatchNorm in keras --- .../core/tf_impl/compressed_layers_tf.py | 99 +++++++++++++------ 1 file changed, 68 insertions(+), 31 deletions(-) diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 815f423..c852c9d 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -121,12 +121,28 @@ def save_weights(self): def rewind_weights(self): self.weight.assign(self.init_weight) + def ebops(self): + return 0.0 + def hgq_loss(self): - if self.pruning_layer.is_pretraining: + if self.pruning_layer.is_pretraining or not self.use_hgq: return 0.0 - loss = (ops.sum(self.hgq_weight.quantizer.i) + ops.sum(self.hgq_weight.quantizer.f)) * self.hgq_gamma + loss = self.ebops() + loss += ( + ops.sum(self.weight_quantizer.quantizer.quantizer.i) + ops.sum(self.weight_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma if self.bias is not None: - loss += (ops.sum(self.hgq_bias.quantizer.i) + ops.sum(self.hgq_bias.quantizer.f)) * self.hgq_gamma + loss += ( + ops.sum(self.bias_quantizer.quantizer.quantizer.i) + ops.sum(self.bias_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma + if self.quantize_input: + loss += ( + ops.sum(self.input_quantizer.quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma + if self.quantize_output: + loss += ( + ops.sum(self.output_quantizer.quantizer.quantizer.i) + ops.sum(self.output_quantizer.quantizer.quantizer.f) + ) * self.hgq_gamma return loss def handle_transpose(self, x, transpose, do_transpose=False): @@ -321,7 +337,7 @@ def call(self, x, training=None): return x -class CompressedLayerConv1dKeras(PQWeightBiasBase): +class PQConv1d(PQWeightBiasBase): def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): super().__init__(config, layer_type, quantize_input, quantize_output) self.kernel_regularizer = layer.kernel_regularizer @@ -363,7 +379,7 @@ def call(self, x, training=None): return x -class CompressedLayerDenseKeras(PQWeightBiasBase): +class PQDense(PQWeightBiasBase): def __init__(self, config, layer, layer_type): super().__init__(config, layer_type) self.kernel_regularizer = layer.kernel_regularizer @@ -376,6 +392,7 @@ def __init__(self, config, layer, layer_type): self.weight_transpose = (1, 0) self.weight_transpose_back = (1, 0) self.data_transpose = (0, 1) # Always (BATCH_SIZE, OUT_FEATURES) + self.parallelization_factor = -1 def build(self, input_shape): self.weight = self.add_weight( @@ -387,6 +404,20 @@ def build(self, input_shape): else None ) super().build(input_shape) + self.input_shape = input_shape + self.n_parallel = ops.prod(input_shape[1:-1]) + self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel + + def ebops(self, shape): + bw_inp = self.input_quantizer.bits_(shape) + bw_ker = self.weight_quantizer.bits_(ops.shape(self.weight)) + ebops = ops.sum(ops.matmul(bw_inp, bw_ker)) + ebops = ebops * self.n_parallel / self.parallelization_factor + if self.use_bias: + bw_bias = self.bias_quantizer.bits_(ops.shape(self.bias)) + size = ops.cast(ops.prod(self.input_shape), self.dtype) + ebops += ops.mean(bw_bias) * size + return ebops def call(self, x, training=None): weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) @@ -518,14 +549,20 @@ def call(self, inputs, training=None, mask=None): if self.scale: if self.enable_quantization and not self.final_compression_done: - gamma = self.parameter_quantizer(self.gamma, k=1.0, i=self.i, f=self.f) + if self.use_hgq: + gamma = self.parameter_quantizer(self.gamma) + else: + gamma = self.parameter_quantizer(self.gamma, k=self.weight_k, i=self.i_weight, f=self.f_weight) gamma = ops.cast(gamma, inputs.dtype) else: gamma = None if self.center: if self.enable_quantization and not self.final_compression_done: - beta = self.parameter_quantizer(self.beta, k=self.weight_k, i=self.i, f=self.f) + if self.use_hgq: + beta = self.parameter_quantizer(self.beta) + else: + beta = self.parameter_quantizer(self.beta, k=self.weight_k, i=self.i_weight, f=self.f_weight) beta = ops.cast(beta, inputs.dtype) else: beta = None @@ -644,8 +681,8 @@ def post_epoch_functions(model, epoch, total_epochs, **kwargs): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.pruning_layer.post_epoch_function(epoch, total_epochs, **kwargs) @@ -661,8 +698,8 @@ def pre_epoch_functions(model, epoch, total_epochs): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.pruning_layer.pre_epoch_function(epoch, total_epochs) @@ -678,8 +715,8 @@ def post_round_functions(model): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.pruning_layer.post_round_function() @@ -695,8 +732,8 @@ def save_weights_functions(model): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.save_weights() @@ -712,8 +749,8 @@ def rewind_weights_functions(model): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.rewind_weights() @@ -729,8 +766,8 @@ def pre_finetune_functions(model): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.pruning_layer.pre_finetune_function() @@ -746,8 +783,8 @@ def post_pretrain_functions(model, config): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): layer.pruning_layer.post_pre_train_function() @@ -774,8 +811,8 @@ def pdp_setup(model, config): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): if global_weights is None: @@ -801,8 +838,8 @@ def pdp_setup(model, config): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): weight_size = ops.size(layer.weight) @@ -841,8 +878,8 @@ def get_layer_keep_ratio_tf(model): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): # weight, bias = layer.prune_and_quantize(layer.weight, layer.bias) @@ -909,8 +946,8 @@ def get_model_losses_tf(model, losses): ( PQDepthwiseConv2d, PQConv2d, - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, + PQDense, ), ): loss = layer.pruning_layer.calculate_additional_loss() @@ -1007,7 +1044,7 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Conv1D): - new_layer = CompressedLayerConv1dKeras(config, layer, layer_type="conv") + new_layer = PQConv1d(config, layer, layer_type="conv") set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1019,7 +1056,7 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Dense): - new_layer = CompressedLayerDenseKeras(config, layer, layer_type="linear") + new_layer = PQDense(config, layer, layer_type="linear") set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) From 64fede79ca322a734b8aea399857032e96d207a2 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Fri, 17 Oct 2025 19:44:01 +0200 Subject: [PATCH 10/37] ebops calculations, refactoring quantizers to wrapper classes --- src/pquant/configs/config_ap.yaml | 1 + src/pquant/configs/config_autosparse.yaml | 1 + src/pquant/configs/config_cs.yaml | 1 + src/pquant/configs/config_dst.yaml | 1 + src/pquant/configs/config_mdmm.yaml | 1 + src/pquant/configs/config_pdp.yaml | 1 + src/pquant/configs/config_wanda.yaml | 1 + src/pquant/core/activations_quantizer.py | 105 +++- .../core/tf_impl/compressed_layers_tf.py | 556 +++++++++++++----- .../torch_impl/compressed_layers_torch.py | 262 +++++++-- 10 files changed, 702 insertions(+), 228 deletions(-) diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index 187b1f9..7ea2dae 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -13,6 +13,7 @@ quantization_parameters: default_data_keep_negatives: 0. default_weight_keep_negatives: 1. enable_quantization: true + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 58ece98..28ae0af 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -16,6 +16,7 @@ quantization_parameters: default_data_keep_negatives: 0. default_weight_keep_negatives: 1. enable_quantization: true + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index ef8833a..9bd2528 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -12,6 +12,7 @@ quantization_parameters: default_data_keep_negatives: 0. default_weight_keep_negatives: 1. enable_quantization: true + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index 3d05b77..956fdc9 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -14,6 +14,7 @@ quantization_parameters: default_data_keep_negatives: 0. default_weight_keep_negatives: 1. enable_quantization: true + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index 34ccaa8..6ecde47 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -22,6 +22,7 @@ quantization_parameters: default_fractional_bits: 7. default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index ce306a4..20e1ed7 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -14,6 +14,7 @@ quantization_parameters: default_data_keep_negatives: 0. default_weight_keep_negatives: 1. enable_quantization: true + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index cf667df..1609182 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -14,6 +14,7 @@ quantization_parameters: default_integer_bits: 0. default_fractional_bits: 7. enable_quantization: true + hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True layer_specific: [] diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py index 041b62a..b94a9c5 100644 --- a/src/pquant/core/activations_quantizer.py +++ b/src/pquant/core/activations_quantizer.py @@ -7,7 +7,8 @@ @keras.saving.register_keras_serializable(package="PQuant") class QuantizedTanh(keras.layers.Layer): - def __init__(self, config, i_input=0.0, f_input=7.0, i_output=0.0, f_output=7.0, quantize_input=True, quantize_output=True + def __init__( + self, config, i_input=0.0, f_input=7.0, i_output=0.0, f_output=7.0, quantize_input=True, quantize_output=False ): super().__init__() if isinstance(config, dict): @@ -28,6 +29,8 @@ def __init__(self, config, i_input=0.0, f_input=7.0, i_output=0.0, f_output=7.0, self.is_pretraining = True self.round_mode = config.quantization_parameters.round_mode self.overflow = config.quantization_parameters.overflow + self.hgq_beta = config.quantization_parameters.hgq_beta + self.hgq_gamma = config.quantization_parameters.hgq_gamma self.use_real_tanh = config.quantization_parameters.use_real_tanh self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.quantize_input = quantize_input @@ -35,6 +38,7 @@ def __init__(self, config, i_input=0.0, f_input=7.0, i_output=0.0, f_output=7.0, def build(self, input_shape): super().build(input_shape) + self.input_shape = input_shape self.output_quantizer = create_quantizer( k=self.k, i=self.i_output, @@ -57,12 +61,46 @@ def build(self, input_shape): self.input_quantizer.build(input_shape) self.output_quantizer.build(input_shape) + def get_input_quantization_bits(self): + if self.use_hgq: + return self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f + else: + return self.i_input, self.f_input + + def set_input_quantization_bits(self, i, f): + if self.use_hgq: + self.input_quantizer.quantizer._i.assign(self.input_quantizer.quantizer._i * 0.0 + i) + self.input_quantizer.quantizer._f.assign(self.input_quantizer.quantizer._f * 0.0 + f) + else: + self.i_input = i + self.f_input = f + + def get_output_quantization_bits(self): + if self.use_hgq: + return self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f + else: + return self.i_output, self.f_output + + def set_output_quantization_bits(self, i, f): + if self.use_hgq: + self.output_quantizer.quantizer._i.assign(self.output_quantizer.quantizer._i * 0.0 + i) + self.output_quantizer.quantizer._f.assign(self.output_quantizer.quantizer._f * 0.0 + f) + else: + self.i_output = i + self.f_output = f + + def ebops(self): + bw_inp = self.input_quantizer.bits_(self.input_shape) + bw_out = self.output_quantizer.bits_(self.input_shape) + return ops.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore + def hgq_loss(self): - if self.is_pretraining: - return 0.0 - return ( - ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f) - ) * self.config.quantization_parameters.hgq_gamma + if self.is_pretraining or not self.use_hgq: + return ops.convert_to_tensor(0.0) + loss = self.beta * self.ebops() + loss += (ops.sum(self.input_quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.f)) * self.hgq_gamma + loss += (ops.sum(self.output_quantizer.quantizer.i) + ops.sum(self.output_quantizer.quantizer.f)) * self.hgq_gamma + return loss def post_pre_train_function(self): self.is_pretraining = False @@ -87,6 +125,7 @@ def call(self, x): x = self.pre_activation(x) x = tanh(x) if self.use_real_tanh else hard_tanh(x) x = self.post_activation(x) + self.add_loss(self.hgq_loss()) return x def get_config(self): @@ -98,7 +137,7 @@ def get_config(self): @keras.saving.register_keras_serializable(package="PQuant") class QuantizedReLU(keras.layers.Layer): def __init__( - self, config, i_input=0.0, f_input=8.0, i_output=0.0, f_output=8.0, quantize_input=True, quantize_output=True + self, config, i_input=0.0, f_input=8.0, i_output=0.0, f_output=8.0, quantize_input=True, quantize_output=False ): super().__init__() if isinstance(config, dict): @@ -119,8 +158,11 @@ def __init__( self.round_mode = config.quantization_parameters.round_mode self.overflow = config.quantization_parameters.overflow self.use_multiplier = config.quantization_parameters.use_relu_multiplier + self.hgq_beta = config.quantization_parameters.hgq_beta + self.hgq_gamma = config.quantization_parameters.hgq_gamma self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.post_fitcompress_calibration = False @@ -130,6 +172,7 @@ def __init__( def build(self, input_shape): super().build(input_shape) + self.input_shape = input_shape self.output_quantizer = create_quantizer( k=self.k, i=self.i_output, @@ -155,15 +198,50 @@ def build(self, input_shape): if self.use_multiplier: self.multiplier = self.add_weight(shape=(1,), trainable=True, initializer=keras.initializers.Constant(-1.0)) + def get_input_quantization_bits(self): + if self.use_hgq: + return self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f + else: + return self.i_input, self.f_input + + def set_input_quantization_bits(self, i, f): + if self.use_hgq: + self.input_quantizer.quantizer._i.assign(self.input_quantizer.quantizer._i * 0.0 + i) + self.input_quantizer.quantizer._f.assign(self.input_quantizer.quantizer._f * 0.0 + f) + else: + self.i_input = i + self.f_input = f + + def get_output_quantization_bits(self): + if self.use_hgq: + return self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f + else: + return self.i_output, self.f_output + + def set_output_quantization_bits(self, i, f): + if self.use_hgq: + self.output_quantizer.quantizer._i.assign(self.output_quantizer.quantizer._i * 0.0 + i) + self.output_quantizer.quantizer._f.assign(self.output_quantizer.quantizer._f * 0.0 + f) + else: + self.i_output = i + self.f_output = f + def post_pre_train_function(self): self.is_pretraining = False + def ebops(self): + bw_inp = self.input_quantizer.bits_(self.input_shape) + bw_out = self.output_quantizer.bits_(self.input_shape) + return ops.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore + def hgq_loss(self): - if self.is_pretraining: - return 0.0 - return ( - ops.sum(self.hgq.quantizer.i) + ops.sum(self.hgq.quantizer.f) - ) * self.config.quantization_parameters.hgq_gamma + if self.is_pretraining or not self.use_hgq: + return ops.convert_to_tensor(0.0) + loss = self.beta * self.ebops() + loss = self.beta * self.ebops() + loss += (ops.sum(self.input_quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.f)) * self.hgq_gamma + loss += (ops.sum(self.output_quantizer.quantizer.i) + ops.sum(self.output_quantizer.quantizer.f)) * self.hgq_gamma + return loss def pre_activation(self, x): if self.quantize_input: @@ -194,6 +272,7 @@ def call(self, x): x = self.pre_activation(x) x = ops.relu(x) x = self.post_activation(x) + self.add_loss(self.hgq_loss()) return x def get_config(self): @@ -218,4 +297,4 @@ def hard_sigmoid(x): def hard_tanh(x): """Computes hard_tanh function that saturates between -1 and 1.""" - return 2.0 * hard_sigmoid(x) - 1.0 + return 2.0 * hard_sigmoid(x) - 1.0 \ No newline at end of file diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index c852c9d..567e1e9 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -1,5 +1,6 @@ import keras from keras import ops +from keras.initializers import Constant from keras.layers import ( Activation, AveragePooling1D, @@ -14,6 +15,7 @@ ReLU, SeparableConv2D, ) +from keras.src.ops.operation_utils import compute_pooling_output_shape from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.quantizer_functions import create_quantizer @@ -52,61 +54,70 @@ def __init__(self, config, layer_type, quantize_input=True, quantize_output=Fals self.data_transpose = None self.quantize_input = quantize_input self.quantize_output = quantize_output - - def set_input_output_quantization(self, input_quantization, output_quantization): - self.quantize_input = input_quantization - self.quantize_output = output_quantization - - def set_input_output_quantization_bits(self, i_input, f_input, i_output, f_output): - self.i_input = i_input - self.f_input = f_input - self.i_output = i_output - self.f_output = f_output + self.parallelization_factor = -1 def set_enable_pruning(self, enable_pruning): self.enable_pruning = enable_pruning + def get_weight_quantization_bits(self): + return self.weight_quantizer.get_quantization_bits() + + def get_bias_quantization_bits(self): + return self.bias_quantizer.get_quantization_bits() + + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_output_quantization_bits(self): + return self.output_quantizer.get_quantization_bits() + def build(self, input_shape): super().build(input_shape) - self.weight_quantizer = create_quantizer( - k=ops.convert_to_tensor(self.weight_k), - i=self.i_weight, - f=self.f_weight, - overflow=self.overflow, - round_mode=self.round_mode, - is_heterogeneous=self.use_hgq, - is_data=False, + self.weight_quantizer = KerasQuantizer( + self.weight_k, + self.i_weight, + self.f_weight, + self.overflow, + self.round_mode, + self.use_hgq, + False, + self.hgq_gamma, ) - if self.use_bias: - self.bias_quantizer = create_quantizer( - k=ops.convert_to_tensor(self.weight_k), - i=self.i_bias, - f=self.f_bias, - overflow=self.overflow, - round_mode=self.round_mode, - is_heterogeneous=self.use_hgq, - is_data=False, - ) - if self.quantize_input: - self.input_quantizer = create_quantizer( - k=self.data_k, - i=self.i_input, - f=self.f_input, - overflow=self.overflow, - round_mode=self.round_mode, - is_heterogeneous=self.use_hgq, - is_data=True, - ) - if self.quantize_output: - self.output_quantizer = create_quantizer( - k=self.data_k, - i=self.i_output, - f=self.f_output, - overflow=self.overflow, - round_mode=self.round_mode, - is_heterogeneous=self.use_hgq, - is_data=True, - ) + + # if self.use_bias: + self.bias_quantizer = KerasQuantizer( + self.weight_k, + self.i_bias, + self.f_bias, + self.overflow, + self.round_mode, + self.use_hgq, + False, + self.hgq_gamma, + ) + self.input_quantizer = KerasQuantizer( + self.data_k, + self.i_input, + self.f_input, + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) + self.output_quantizer = KerasQuantizer( + self.data_k, + self.i_output, + self.f_output, + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) + self.input_shape = input_shape + self.n_parallel = ops.prod(input_shape[1:-1]) + self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel def apply_final_compression(self): weight, bias = self.prune_and_quantize(self.weight, self.bias) @@ -121,28 +132,20 @@ def save_weights(self): def rewind_weights(self): self.weight.assign(self.init_weight) - def ebops(self): + def ebops(self, shape): return 0.0 - def hgq_loss(self): + def hgq_loss(self, shape): if self.pruning_layer.is_pretraining or not self.use_hgq: - return 0.0 - loss = self.ebops() - loss += ( - ops.sum(self.weight_quantizer.quantizer.quantizer.i) + ops.sum(self.weight_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + return ops.convert_to_tensor(0.0) + loss = self.hgq_beta * self.ebops(shape) + loss += self.weight_quantizer.hgq_loss() if self.bias is not None: - loss += ( - ops.sum(self.bias_quantizer.quantizer.quantizer.i) + ops.sum(self.bias_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss += self.bias_quantizer.hgq_loss() if self.quantize_input: - loss += ( - ops.sum(self.input_quantizer.quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss += self.input_quantizer.hgq_loss() if self.quantize_output: - loss += ( - ops.sum(self.output_quantizer.quantizer.quantizer.i) + ops.sum(self.output_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss += self.output_quantizer.hgq_loss() return loss def handle_transpose(self, x, transpose, do_transpose=False): @@ -152,18 +155,8 @@ def handle_transpose(self, x, transpose, do_transpose=False): def quantize_i(self, weight, bias): if self.enable_quantization: - if self.use_hgq: - weight = self.weight_quantizer(weight) - bias = None if bias is None else self.bias_quantizer(bias) - else: - weight = self.weight_quantizer( - weight, k=ops.convert_to_tensor(1.0), i=self.i_weight, f=self.f_weight, training=True - ) - bias = ( - None - if bias is None - else self.bias_quantizer(bias, k=ops.convert_to_tensor(1.0), i=self.i_bias, f=self.f_bias, training=True) - ) + weight = self.weight_quantizer(weight) + bias = None if bias is None else self.bias_quantizer(bias) return weight, bias def prune(self, weight): @@ -192,12 +185,9 @@ def call(self, x): def pre_forward(self, weight, bias, x, training=None): if self.quantize_input: if self.use_hgq and not self.input_quantizer.quantizer.built: - self.input_quantizer.quantizer.build(x.shape) + self.input_quantizer.build(x.shape) if not self.pruning_layer.is_pretraining and not self.use_fitcompress: - if self.use_hgq: - x = self.input_quantizer(x) - else: - x = self.input_quantizer(x, k=self.data_k, i=self.i_input, f=self.f_input) + x = self.input_quantizer(x) if self.pruning_method == "wanda": self.collect_input(x, self.weight, training) weight, bias = self.prune_and_quantize(weight, bias) @@ -206,12 +196,9 @@ def pre_forward(self, weight, bias, x, training=None): def post_forward(self, x, training=None): if self.quantize_output: if self.use_hgq and not self.output_quantizer.quantizer.built: - self.output_quantizer.quantizer.build(x.shape) + self.output_quantizer.build(x.shape) if not self.pruning_layer.is_pretraining and not self.use_fitcompress: - if self.use_hgq: - x = self.output_quantizer(x) - else: - x = self.output_quantizer(x, k=self.data_k, i=self.i_output, f=self.f_output) + x = self.output_quantizer(x) if self.pruning_method == "activation_pruning": self.collect_output(x, training) return x @@ -245,6 +232,7 @@ def __init__(self, config, layer, layer_type, quantize_input=True, quantize_outp self.do_transpose_data = layer.data_format == "channels_last" def build(self, input_shape): + super().build(input_shape) self.weight = self.add_weight( self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.depthwise_regularizer ) @@ -253,10 +241,39 @@ def build(self, input_shape): if self.bias_shape is not None else None ) - super().build(input_shape) + + def ebops(self, shape): + bw_inp = self.input_quantizer.quantizer.bits_(shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + if self.parallelization_factor < 0: + ebops = ops.sum( + ops.depthwise_conv( + bw_inp, + bw_ker, + strides=self.strides, + padding=self.padding, + data_format=None, + dilation_rate=self.dilation_rate, + ) + ) + else: + reduce_axis_kernel = tuple(range(0, 3)) + if self.do_transpose_data: # Is channels last + reduce_axis_input = reduce_axis_kernel + else: + reduce_axis_input = (0,) + tuple(range(2, 4)) + bw_inp = ops.max(bw_inp, axis=reduce_axis_input) + reduce_axis_kernel = tuple(range(0, 2)) + bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) + ebops = ops.sum(bw_inp[:, None] * bw_ker) + if self.bias is not None: + size = ops.cast(ops.prod(shape), self.dtype) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + ebops += ops.mean(bw_bias) * size + return ebops def call(self, x, training=None): - weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) + weight, _, x = self.pre_forward(self.weight, self.bias, x, training) x = ops.depthwise_conv( x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate ) @@ -286,15 +303,49 @@ def __init__(self, config, layer, layer_type, quantize_input=True, quantize_outp self.do_transpose_data = layer.data_format == "channels_last" def build(self, input_shape): + super().build(input_shape) self.weight = self.add_weight( self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.kernel_regularizer ) + self.weight_quantizer.build(self.weight.shape) self.bias = ( self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) if self.bias_shape is not None else None ) - super().build(input_shape) + if self.use_bias: + self.bias_quantizer.build(self.bias.shape) + + def ebops(self, shape): + bw_inp = self.input_quantizer.quantizer.bits_(shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + if self.parallelization_factor < 0: + ebops = ops.sum( + ops.conv( + bw_inp, + bw_ker, + strides=self.strides, + padding=self.padding, + data_format=None, + dilation_rate=self.dilation_rate, + ) + ) + else: + reduce_axis_kernel = tuple(range(0, 3)) + if self.do_transpose_data: # Is channels last + reduce_axis_input = reduce_axis_kernel + else: + reduce_axis_input = (0,) + tuple(range(2, 4)) + bw_inp = ops.max(bw_inp, axis=reduce_axis_input) + reduce_axis_kernel = tuple(range(0, 2)) + bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) + + ebops = ops.sum(bw_inp[:, None] * bw_ker) + if self.bias is not None: + size = ops.cast(ops.prod(shape), self.dtype) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + ebops += ops.mean(bw_bias) * size + return ebops def call(self, x, training=None): weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) @@ -358,15 +409,46 @@ def __init__(self, config, layer, layer_type, quantize_input=True, quantize_outp self.do_transpose_data = layer.data_format == "channels_last" def build(self, input_shape): + super().build(input_shape) self.weight = self.add_weight( self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.kernel_regularizer ) + self.weight_quantizer.build(self.weight.shape) self.bias = ( self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) if self.bias_shape is not None else None ) - super().build(input_shape) + + def ebops(self, shape): + bw_inp = self.input_quantizer.quantizer.bits_(shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + if self.parallelization_factor < 0: + ebops = ops.sum( + ops.conv( + bw_inp, + bw_ker, + strides=self.strides, + padding=self.padding, + data_format=None, + dilation_rate=self.dilation_rate, + ) + ) + else: + reduce_axis_kernel = tuple(range(0, 2)) + if self.do_transpose_data: # Is channels last + reduce_axis_input = reduce_axis_kernel + else: + reduce_axis_input = (0,) + tuple(range(2, 3)) + bw_inp = ops.max(bw_inp, axis=reduce_axis_input) + reduce_axis_kernel = tuple(range(0, 1)) + bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) + ebops = ops.sum(bw_inp[:, None] * bw_ker) + if self.bias is not None: + size = ops.cast(ops.prod(shape), self.dtype) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + ebops += ops.mean(bw_bias) * size + return ebops def call(self, x, training=None): weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) @@ -392,39 +474,41 @@ def __init__(self, config, layer, layer_type): self.weight_transpose = (1, 0) self.weight_transpose_back = (1, 0) self.data_transpose = (0, 1) # Always (BATCH_SIZE, OUT_FEATURES) - self.parallelization_factor = -1 def build(self, input_shape): + super().build(input_shape) self.weight = self.add_weight( self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.kernel_regularizer ) + self.weight_quantizer.build(self.weight.shape) self.bias = ( self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) if self.bias_shape is not None else None ) - super().build(input_shape) - self.input_shape = input_shape - self.n_parallel = ops.prod(input_shape[1:-1]) - self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel + if self.use_bias: + self.bias_quantizer.build(self.bias.shape) def ebops(self, shape): - bw_inp = self.input_quantizer.bits_(shape) - bw_ker = self.weight_quantizer.bits_(ops.shape(self.weight)) + bw_inp = self.input_quantizer.quantizer.bits_(shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) ebops = ops.sum(ops.matmul(bw_inp, bw_ker)) ebops = ebops * self.n_parallel / self.parallelization_factor if self.use_bias: - bw_bias = self.bias_quantizer.bits_(ops.shape(self.bias)) - size = ops.cast(ops.prod(self.input_shape), self.dtype) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + size = ops.cast(ops.prod(shape), self.dtype) ebops += ops.mean(bw_bias) * size return ebops def call(self, x, training=None): + input_shape = x.shape weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) x = ops.matmul(x, weight) if self.bias is not None: x = ops.add(x, bias) x = self.post_forward(x, training) + if self.use_hgq and self.enable_quantization: + self.add_loss(self.hgq_loss(input_shape)) return x @@ -474,46 +558,82 @@ def __init__( self.weight_k = config["quantization_parameters"]["default_weight_keep_negatives"] self.enable_quantization = config["quantization_parameters"]["enable_quantization"] self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] + self.hgq_beta = config["quantization_parameters"]["hgq_beta"] self.quantize_input = quantize_input self.config = config - self.f_input = self.f_weight = ops.convert_to_tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i_input = self.i_weight = ops.convert_to_tensor(config["quantization_parameters"]["default_integer_bits"]) + self.f_input = self.f_weight = self.f_bias = ops.convert_to_tensor( + config["quantization_parameters"]["default_fractional_bits"] + ) + self.i_input = self.i_weight = self.i_bias = ops.convert_to_tensor( + config["quantization_parameters"]["default_integer_bits"] + ) self.final_compression_done = False + self.is_pretraining = True def build(self, input_shape): super().build(input_shape) - self.parameter_quantizer = create_quantizer( - k=self.weight_k, - i=self.i_weight, - f=self.f_weight, + self.input_quantizer = KerasQuantizer( + k=1.0, + i=self.i_input, + f=self.f_input, overflow=self.overflow, round_mode=self.round_mode, is_heterogeneous=self.use_hgq, is_data=True, + hgq_gamma=self.hgq_gamma, ) - self.input_quantizer = create_quantizer( - k=self.data_k, - i=self.i_input, - f=self.f_input, + self.weight_quantizer = KerasQuantizer( + k=1.0, + i=self.i_weight, + f=self.f_weight, + round_mode=self.round_mode, overflow=self.overflow, + is_data=False, + is_heterogeneous=self.use_hgq, + ) + self.bias_quantizer = KerasQuantizer( + k=1.0, + i=self.i_bias, + f=self.f_bias, round_mode=self.round_mode, + overflow=self.overflow, + is_data=False, is_heterogeneous=self.use_hgq, - is_data=True, ) + self.input_quantizer.build(input_shape) + self.weight_quantizer.build(self.moving_variance.shape) + self.bias_quantizer.build(self.moving_mean.shape) + shape = [1] * len(input_shape) + shape[self.axis] = input_shape[self.axis] + self._shape = tuple(shape) def apply_final_compression(self): gamma, beta = self.gamma, self.beta if self.enable_quantization: - if self.use_hgq: - gamma = self.parameter_quantizer(gamma) - beta = self.parameter_quantizer(beta) - else: - gamma = self.parameter_quantizer(self.gamma, k=self.data_k, i=self.i_weight, f=self.f_weight) - beta = self.parameter_quantizer(self.beta, k=self.data_k, i=self.i_weight, f=self.f_weight) + gamma = self.weight_quantizer(gamma) + beta = self.bias_quantizer(beta) self.gamma.assign(gamma) self.beta.assign(beta) self.final_compression_done = True + def ebops(self, shape): + bw_inp = self.input_quantizer.quantizer.bits_(shape) + bw_ker = ops.reshape(self.weight_quantizer.quantizer.bits_(self.moving_mean.shape), self._shape) + bw_bias = ops.reshape(self.bias_quantizer.quantizer.bits_(self.moving_mean.shape), self._shape) + size = ops.cast(ops.prod(shape), self.dtype) + ebops = ops.sum(bw_inp * bw_ker) + ops.mean(bw_bias) * size + return ebops + + def hgq_loss(self, shape): + if self.is_pretraining or not self.use_hgq: + return ops.convert_to_tensor(0.0) + loss = self.hgq_beta * self.ebops(shape) + loss += self.weight_quantizer.hgq_loss() + loss += self.bias_quantizer.hgq_loss() + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + return loss + def call(self, inputs, training=None, mask=None): # Check if the mask has one less dimension than the inputs. if mask is not None: @@ -530,11 +650,7 @@ def call(self, inputs, training=None, mask=None): # float32 for the subsequent computations. inputs = ops.cast(inputs, compute_dtype) if self.quantize_input and self.enable_quantization: - if self.use_hgq: - inputs = self.input_quantizer(inputs) - else: - inputs = self.input_quantizer(inputs, k=self.data_k, i=self.i_input, f=self.f_input) - + inputs = self.input_quantizer(inputs) moving_mean = ops.cast(self.moving_mean, inputs.dtype) moving_variance = ops.cast(self.moving_variance, inputs.dtype) @@ -549,20 +665,14 @@ def call(self, inputs, training=None, mask=None): if self.scale: if self.enable_quantization and not self.final_compression_done: - if self.use_hgq: - gamma = self.parameter_quantizer(self.gamma) - else: - gamma = self.parameter_quantizer(self.gamma, k=self.weight_k, i=self.i_weight, f=self.f_weight) + gamma = self.weight_quantizer(self.gamma) gamma = ops.cast(gamma, inputs.dtype) else: gamma = None if self.center: if self.enable_quantization and not self.final_compression_done: - if self.use_hgq: - beta = self.parameter_quantizer(self.beta) - else: - beta = self.parameter_quantizer(self.beta, k=self.weight_k, i=self.i_weight, f=self.f_weight) + beta = self.bias_quantizer(self.beta) beta = ops.cast(beta, inputs.dtype) else: beta = None @@ -576,19 +686,33 @@ def call(self, inputs, training=None, mask=None): scale=gamma, epsilon=self.epsilon, ) + self.add_loss(self.hgq_loss(inputs.shape)) return ops.cast(outputs, self.compute_dtype) + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_weight_quantization_bits(self): + return self.weight_quantizer.get_quantization_bits() + + def get_bias_quantization_bits(self): + return self.bias_quantizer.get_quantization_bits() + + def post_pre_train_function(self): + self.is_pretraining = False + class QuantizedPooling(keras.layers.Layer): - def __init__(self, config, layer, quantize_input=True): + def __init__(self, config, layer, quantize_input=True, quantize_output=False): super().__init__() - self.i = ops.convert_to_tensor(config.quantization_parameters.default_integer_bits) - self.f = ops.convert_to_tensor(config.quantization_parameters.default_fractional_bits) + self.i_input = ops.convert_to_tensor(config.quantization_parameters.default_integer_bits) + self.f_input = ops.convert_to_tensor(config.quantization_parameters.default_fractional_bits) self.is_pretraining = True self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_beta = config.quantization_parameters.hgq_beta self.data_k = config.quantization_parameters.default_data_keep_negatives self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.hgq_heterogeneous = config.hgq_heterogeneous @@ -601,41 +725,80 @@ def __init__(self, config, layer, quantize_input=True): self.data_format = layer.data_format self.quantize_input = quantize_input self.dimensions = layer.__class__.__name__[-2] + self.quantize_output = quantize_output def post_pre_train_function(self): self.is_pretraining = False def build(self, input_shape): - super().build(input_shape) - self.input_quantizer = create_quantizer( - k=self.data_k, + self.input_quantizer = KerasQuantizer( + k=1.0, i=self.i_input, f=self.f_input, overflow=self.overflow, round_mode=self.round_mode, is_heterogeneous=self.use_hgq, is_data=True, + hgq_gamma=self.hgq_gamma, + ) + self.output_quantizer = KerasQuantizer( + k=1.0, + i=self.i_output, + f=self.f_output, + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + hgq_gamma=self.hgq_gamma, + ) + if self.use_hgq: + self.input_quantizer.build(input_shape) + self.output_quantizer.build(self.compute_output_shape(input_shape)) + + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_output_quantization_bits(self): + return self.output_quantizer.get_quantization_bits() + + def compute_output_shape(self, input_shape): + return compute_pooling_output_shape( + input_shape, + self.pool_size, + self.strides, + self.padding, + self.data_format, ) - self.hgq_gamma = self.hgq_gamma - def hgq_loss(self): + def ebops(self, shape): + bw_inp = self.input_quantizer.quantizer.bits_(shape) + return ops.sum(bw_inp) + + def hgq_loss(self, shape): if self.is_pretraining or not self.use_hgq: - return 0.0 - return (ops.sum(self.input_quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.f)) * self.hgq_gamma + return ops.convert_to_tensor(0.0) + loss = self.hgq_beta * self.ebops(shape) + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + if self.quantize_output: + loss += self.output_quantizer.hgq_loss() + return loss def call(self, x): + input_shape = x.shape if self.quantize_input and self.enable_quantization: - if self.use_hgq: - x = self.input_quantizer(x) - else: - x = self.input_quantizer(x, k=self.data_k, i=self.i_input, f=self.f_input) - return ops.average_pool( + x = self.input_quantizer(x) + x = ops.average_pool( x, pool_size=self.pool_size, strides=self.strides, padding=self.padding, data_format=self.data_format, ) + self.add_loss(self.hgq_loss(input_shape)) + if self.quantize_output and self.enable_quantization: + x = self.output_quantizer(x) + return x def get_config(self): config = super().get_config() @@ -653,6 +816,59 @@ def get_config(self): return config +class KerasQuantizer(keras.layers.Layer): + # HGQ quantizer wrapper + def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): + super().__init__() + self.k = k + self.i = i + self.f = f + self.overflow = overflow + self.round_mode = round_mode + self.use_hgq = is_heterogeneous + self.quantizer = create_quantizer(self.k, self.i, self.f, overflow, round_mode, is_heterogeneous, is_data) + self.is_pretraining = False + self.hgq_gamma = hgq_gamma + + def build(self, input_shape): + super().build(input_shape) + self.i = self.add_variable((), Constant(self.i), dtype="float32", trainable=False) + self.f = self.add_variable((), Constant(self.f), dtype="float32", trainable=False) + if self.use_hgq: + self.quantizer.build(input_shape) + + def get_quantization_bits(self): + if self.use_hgq: + return self.quantizer.quantizer.i, self.quantizer.quantizer.f + else: + return self.i, self.f + + def set_quantization_bits(self, i, f): + if self.use_hgq: + self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) + self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) + self.i = i + self.f = f + + def post_pretrain(self): + self.is_pretraining = True + + def call(self, x): + if not self.built: + self.build(x.shape) + if self.use_hgq: + x = self.quantizer(x) + else: + x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + return x + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return 0.0 + loss = (ops.sum(self.quantizer.quantizer.i) + ops.sum(self.quantizer.quantizer.f)) * self.hgq_gamma + return loss + + def call_post_round_functions(model, rewind, rounds, r): if rewind == "round": rewind_weights_functions(model) @@ -791,7 +1007,7 @@ def post_pretrain_functions(model, config): elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.post_pre_train_function() layer.pointwise_conv.pruning_layer.post_pre_train_function() - elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): + elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling, PQBatchNormalization)): layer.post_pre_train_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget @@ -957,12 +1173,12 @@ def get_model_losses_tf(model, losses): elif isinstance(layer, PQSeparableConv2d): loss = layer.depthwise_conv.pruning_layer.calculate_additional_loss() loss += layer.pointwise_conv.pruning_layer.calculate_additional_loss() - if layer.enable_quantization and layer.use_high_granularity_quantization: + if layer.enable_quantization and layer.use_hgq: loss += layer.depthwise_conv.hgq_loss() loss += layer.pointwise_conv.hgq_loss() losses += loss - elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): - if layer.use_high_granularity_quantization: + elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling, PQBatchNormalization)): + if layer.enable_quantization and layer.use_hgq: losses += layer.hgq_loss() return losses @@ -979,12 +1195,12 @@ def check_activation(layer, config): act = QuantizedReLU(config) if quantization_enabled else ReLU() if quantization_enabled: - get_quantization_bits_activations(config, layer, act) + set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) elif layer.activation.__name__ == "tanh": act = QuantizedTanh(config) if quantization_enabled else Activation(activation="tanh") if quantization_enabled: - get_quantization_bits_activations(config, layer, act) + set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) else: act = None @@ -1070,7 +1286,7 @@ def add_compression_layers_tf(model, config, input_shape=None): elif isinstance(layer, ReLU): if config["quantization_parameters"]["enable_quantization"]: new_layer = QuantizedReLU(config) - get_quantization_bits_activations(config, layer, new_layer) + set_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.input.shape) x = new_layer(x) @@ -1084,8 +1300,8 @@ def add_compression_layers_tf(model, config, input_shape=None): elif isinstance(layer, (AveragePooling1D, AveragePooling2D, AveragePooling3D)): if config.quantization_parameters.enable_quantization: new_layer = QuantizedPooling(config, layer) - get_quantization_bits_activations(config, layer, new_layer) - new_layer.build(layer.output.shape) + set_quantization_bits_activations(config, layer, new_layer) + new_layer.build(x.shape) x = new_layer(x) elif isinstance(layer, (BatchNormalization)): @@ -1108,8 +1324,8 @@ def add_compression_layers_tf(model, config, input_shape=None): layer.synchronized, quantize_input=True, ) - get_quantization_bits_activations(config, layer, new_layer) - new_layer.build(layer.output.shape) + set_quantization_bits_activations(config, layer, new_layer) + new_layer.build(x.shape) x = new_layer(x) else: x = layer(x) @@ -1122,8 +1338,8 @@ def add_compression_layers_tf(model, config, input_shape=None): def get_quantization_bits_activations(config, layer, new_layer): - i_input = i_output = config.quantization_parameters.default_integer_bits - f_input = f_output = config.quantization_parameters.default_fractional_bits + i_input = i_output = i_weight = i_bias = config.quantization_parameters.default_integer_bits + f_input = f_output = f_weight = f_bias = config.quantization_parameters.default_fractional_bits if isinstance(layer, ReLU): f_input += 1 f_output += 1 # Unsigned, add 1 bit to default value only @@ -1152,13 +1368,28 @@ def get_quantization_bits_activations(config, layer, new_layer): f_input = layer_config["input"]["fractional_bits"] if "quantize" in layer_config["input"]: new_layer.quantize_input = layer_config["input"]["quantize"] + if "weight" in layer_config: + if "integer_bits" in layer_config["weight"]: + i_weight = layer_config["weight"]["integer_bits"] + if "fractional_bits" in layer_config["weight"]: + f_weight = layer_config["weight"]["fractional_bits"] + if "bias" in layer_config: + if "integer_bits" in layer_config["bias"]: + i_bias = layer_config["bias"]["integer_bits"] + if "fractional_bits" in layer_config["bias"]: + f_bias = layer_config["bias"]["fractional_bits"] if "output" in layer_config: if "integer_bits" in layer_config["output"]: i_output = layer_config["output"]["integer_bits"] if "fractional_bits" in layer_config["output"]: f_output = layer_config["output"]["fractional_bits"] if "quantize" in layer_config["output"]: - new_layer.quantize_input = layer_config["output"]["quantize"] + new_layer.quantize_output = layer_config["output"]["quantize"] + if isinstance(layer, BatchNormalization): + new_layer.i_weight = i_weight + new_layer.f_weight = f_weight + new_layer.i_bias = i_bias + new_layer.f_bias = f_bias new_layer.i_input = i_input new_layer.f_input = f_input new_layer.i_output = i_output @@ -1215,6 +1446,10 @@ def set_quantization_bits_weight_layers(config, layer, new_layer): if "input" in layer_config: if "quantize" in layer_config["input"]: new_layer.quantize_input = layer_config["input"]["quantize"] + if "integer_bits" in layer_config["input"]: + new_layer.i_input = layer_config["input"]["integer_bits"] + if "fractional_bits" in layer_config["input"]: + new_layer.f_input = layer_config["input"]["fractional_bits"] if "weight" in layer_config: i_bits_w = layer_config["weight"]["integer_bits"] f_bits_w = layer_config["weight"]["fractional_bits"] @@ -1224,6 +1459,10 @@ def set_quantization_bits_weight_layers(config, layer, new_layer): if "output" in layer_config: if "quantize" in layer_config["output"]: new_layer.quantize_output = layer_config["output"]["quantize"] + if "integer_bits" in layer_config["output"]: + new_layer.i_output = layer_config["output"]["integer_bits"] + if "fractional_bits" in layer_config["output"]: + new_layer.f_output = layer_config["output"]["fractional_bits"] new_layer.i_weight = i_bits_w new_layer.f_weight = f_bits_w new_layer.i_bias = i_bits_b @@ -1313,6 +1552,7 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): custom_scheme["layer_specific"][layer.name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, + "bias": {"integer_bits": 0.0, "fractional_bits": 7.0}, } config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index f0cc137..9439b28 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -46,9 +46,23 @@ def __init__(self, config, layer, layer_type, quantize_input=True, quantize_outp self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.hgq_gamma = config.quantization_parameters.hgq_gamma self.final_compression_done = False + self.built = False + self.parallelization_factor = -1 + self.hgq_beta = config["quantization_parameters"]["hgq_beta"] + self.input_shape = None - def build(self): + def build(self, input_shape): # Build function to delay quantizer creation until after custom i,f bits have been set + self.input_quantizer = PyTorchQuantizer( + torch.tensor(self.data_k), + self.i_input, + self.f_input, + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) self.weight_quantizer = PyTorchQuantizer( torch.tensor(self.weight_k), self.i_weight, @@ -71,16 +85,7 @@ def build(self): False, self.hgq_gamma, ) - self.input_quantizer = PyTorchQuantizer( - torch.tensor(self.data_k), - self.i_input, - self.f_input, - self.overflow, - self.round_mode, - self.use_hgq, - True, - self.hgq_gamma, - ) + if self.quantize_output: self.output_quantizer = PyTorchQuantizer( torch.tensor(self.data_k), @@ -93,6 +98,23 @@ def build(self): self.hgq_gamma, ) + self.n_parallel = ops.prod(tuple(input_shape)[1:-1]) + self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel + self.built = True + self.input_shape = input_shape + + def get_weight_quantization_bits(self): + return self.weight_quantizer.get_quantization_bits() + + def get_bias_quantization_bits(self): + return self.bias_quantizer.get_quantization_bits() + + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_output_quantization_bits(self): + return self.output_quantizer.get_quantization_bits() + def apply_final_compression(self): weight, bias = self.prune_and_quantize(self.weight, self.bias) self.weight.data = weight @@ -106,25 +128,20 @@ def save_weights(self): def rewind_weights(self): self.weight.data = self.init_weight.clone() + def ebops(self): + return 0.0 + def hgq_loss(self): if self.pruning_layer.is_pretraining or not self.use_hgq: return 0.0 - loss = ( - torch.sum(self.weight_quantizer.quantizer.quantizer.i) + torch.sum(self.weight_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss = self.hgq_beta * self.ebops() + loss += self.weight_quantizer.hgq_loss() if self.bias is not None: - loss += ( - torch.sum(self.bias_quantizer.quantizer.quantizer.i) + torch.sum(self.bias_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss += self.bias_quantizer.hgq_loss() if self.quantize_input: - loss += ( - torch.sum(self.input_quantizer.quantizer.quantizer.i) + torch.sum(self.input_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss += self.input_quantizer.hgq_loss() if self.quantize_output: - loss += ( - torch.sum(self.output_quantizer.quantizer.quantizer.i) - + torch.sum(self.output_quantizer.quantizer.quantizer.f) - ) * self.hgq_gamma + loss += self.output_quantizer.hgq_loss() return loss def quantize(self, weight, bias): @@ -150,6 +167,8 @@ def prune_and_quantize(self, weight, bias): return weight, bias def pre_forward(self, weight, bias, x): + if not self.built: + self.build(x.shape) if self.quantize_input: if self.use_hgq and not self.input_quantizer.quantizer.built: self.input_quantizer.quantizer.build(x.shape) @@ -188,6 +207,17 @@ def __init__(self, config, layer, layer_type): def post_pre_train_function(self): self.is_pretraining = False + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + ebops = ops.sum(F.linear(bw_inp, bw_ker)) + ebops = ebops * self.n_parallel / self.parallelization_factor + if self.bias is not None: + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + size = ops.cast(ops.prod(list(self.input_shape)), self.weight.dtype) + ebops += ops.mean(bw_bias) * size + return ebops + def forward(self, x): weight, bias, x = self.pre_forward(self.weight, self.bias, x) x = F.linear(x, weight, bias) @@ -212,6 +242,24 @@ def __init__(self, config, layer, layer_type): def post_pre_train_function(self): self.is_pretraining = False + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + if self.parallelization_factor < 0: + ebops = ops.sum(F.conv2d(bw_inp, bw_ker, stride=self.stride, padding=self.padding, dilation=self.dilation)) + else: + reduce_axis_kernel = tuple(range(2, 4)) + reduce_axis_input = (0,) + tuple(range(2, 4)) + + bw_inp = ops.max(bw_inp, axis=reduce_axis_input) + bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) + ebops = ops.sum(bw_inp[None, :] * bw_ker) + if self.bias is not None: + size = ops.cast(ops.prod(list(self.input_shape)), self.weight.dtype) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + ebops += ops.mean(bw_bias) * size + return ebops + def forward(self, x): weight, bias, x = self.pre_forward(self.weight, self.bias, x) self.pre_forward(self.weight, self.bias, x) @@ -246,6 +294,24 @@ def __init__(self, config, layer, layer_type): def post_pre_train_function(self): self.is_pretraining = False + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + if self.parallelization_factor < 0: + ebops = ops.sum(F.conv1d(bw_inp, bw_ker, stride=self.stride, padding=self.padding, dilation=self.dilation)) + else: + reduce_axis_kernel = tuple(range(2, 3)) + reduce_axis_input = (0,) + tuple(range(2, 3)) + + bw_inp = ops.max(bw_inp, axis=reduce_axis_input) + bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) + ebops = ops.sum(bw_inp[None, :] * bw_ker) + if self.bias is not None: + size = ops.cast(ops.prod(list(self.input_shape)), self.weight.dtype) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + ebops += ops.mean(bw_bias) * size + return ebops + def forward(self, x): weight, bias, x = self.pre_forward(self.weight, self.bias, x) x = F.conv1d( @@ -288,6 +354,8 @@ def __init__(self, config, layer, quantize_input=True, quantize_output=False): self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.pooling = layer self.enable_quantization = config.quantization_parameters.enable_quantization + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_beta = config.quantization_parameters.hgq_beta self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.post_fitcompress_calibration = False self.saved_inputs = [] @@ -316,22 +384,34 @@ def build(self, input_shape): is_data=True, hgq_gamma=self.hgq_gamma, ) + self.input_shape = input_shape if self.use_hgq: self.input_quantizer.quantizer.build(input_shape) output_shape = self.pooling(torch.rand(input_shape)).shape self.output_quantizer.quantizer.build(output_shape) - def set_bits(self, i, f): - self.i = torch.tensor(i) - self.f = torch.tensor(f) + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_output_quantization_bits(self): + return self.output_quantizer.get_quantization_bits() def post_pre_train_function(self): self.is_pretraining = False + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + return torch.sum(bw_inp) + def hgq_loss(self): - if self.is_pretraining: - return 0.0 - return (torch.sum(self.input_quantizer.quantizer.i) + torch.sum(self.input_quantizer.quantizer.f)) * self.config.quantization_parameters.hgq_gamma + if self.is_pretraining or not self.use_hgq: + return torch.tensor(0.0) + loss = self.hgq_beta * self.ebops() + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + if self.quantize_output: + loss += self.output_quantizer.hgq_loss() + return loss def pre_pooling(self, x): if not hasattr(self, "input_quantizer"): @@ -373,12 +453,15 @@ def __init__( quantize_input=True, ): super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) - self.f_weight = self.f_input = torch.tensor(config["quantization_parameters"]["default_fractional_bits"]) - self.i_weight = self.i_input = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) + self.f_weight = self.f_bias = self.f_input = torch.tensor( + config["quantization_parameters"]["default_fractional_bits"] + ) + self.i_weight = self.i_bias = self.i_input = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) self.overflow = config["quantization_parameters"]["overflow"] self.round_mode = config["quantization_parameters"]["round_mode"] self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] + self.hgq_beta = config["quantization_parameters"]["hgq_beta"] self.enable_quantization = config["quantization_parameters"]["enable_quantization"] self.config = config self.quantize_input = quantize_input @@ -388,8 +471,9 @@ def __init__( del self._parameters["bias"] self.built = False self.final_compression_done = False + self.is_pretraining = True - def build(self): + def build(self, input_shape): self.built = True self.input_quantizer = PyTorchQuantizer( k=torch.tensor(1.0), @@ -401,40 +485,79 @@ def build(self): is_data=True, hgq_gamma=self.hgq_gamma, ) - self.parameter_quantizer = PyTorchQuantizer( + self.weight_quantizer = PyTorchQuantizer( k=torch.tensor(1.0), i=self.i_weight, f=self.f_weight, round_mode=self.round_mode, overflow=self.overflow, is_data=False, - is_heterogeneous=False, + is_heterogeneous=self.use_hgq, + ) + self.bias_quantizer = PyTorchQuantizer( + k=torch.tensor(1.0), + i=self.i_bias, + f=self.f_bias, + round_mode=self.round_mode, + overflow=self.overflow, + is_data=False, + is_heterogeneous=self.use_hgq, ) + shape = [1] * len(input_shape) + shape[1] = input_shape[1] + self._shape = tuple(shape) + self.input_shape = input_shape def apply_final_compression(self): self._weight.data = self.weight self._bias.data = self.bias self.final_compression_done = True + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_weight_quantization_bits(self): + return self.weight_quantizer.get_quantization_bits() + + def get_bias_quantization_bits(self): + return self.bias_quantizer.get_quantization_bits() + @property def weight(self): if self.enable_quantization and not self.final_compression_done: - return self.parameter_quantizer(self._weight) + return self.weight_quantizer(self._weight) return self._weight @property def bias(self): if self.enable_quantization and not self.final_compression_done: - return self.parameter_quantizer(self._bias) + return self.bias_quantizer(self._bias) return self._bias - def set_quantization_bits(self, i, f): - self.i = torch.tensor(i) - self.f = torch.tensor(f) + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_ker = ops.reshape(self.weight_quantizer.quantizer.bits_(self.running_mean.shape), self._shape) + bw_bias = ops.reshape(self.bias_quantizer.quantizer.bits_(self.running_mean.shape), self._shape) + size = ops.cast(ops.prod(list(self.input_shape)), self._weight.dtype) + ebops = ops.sum(bw_inp * bw_ker) + ops.mean(bw_bias) * size + return ebops + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return ops.convert_to_tensor(0.0) + loss = self.hgq_beta * self.ebops() + loss += self.weight_quantizer.hgq_loss() + loss += self.bias_quantizer.hgq_loss() + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + return loss + + def post_pretrain_function(self): + self.is_pretraining = False def forward(self, input: torch.Tensor) -> torch.Tensor: if not self.built: - self.build() + self.build(input.shape) if self.quantize_input and self.enable_quantization: if self.use_hgq and not self.input_quantizer.quantizer.built: self.input_quantizer.quantizer.build(input.shape) @@ -442,11 +565,14 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return super().forward(input) -class QuantizedActivationTorchWrapper(torch.nn.Module): +class QuantizedActivation(torch.nn.Module): def __init__(self, activation): super().__init__() self.activation = activation + def hgq_loss(self): + return self.activation.hgq_loss() + def forward(self, x): return self.activation(x) @@ -455,9 +581,9 @@ class PyTorchQuantizer(nn.Module): # HGQ quantizer wrapper def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): super().__init__() - self.k = 1.0 - self.i = i - self.f = f + self.k = torch.nn.Parameter(torch.tensor(k), requires_grad=False) + self.i = torch.nn.Parameter(torch.tensor(i), requires_grad=False) + self.f = torch.nn.Parameter(torch.tensor(f), requires_grad=False) self.overflow = overflow self.round_mode = round_mode self.use_hgq = is_heterogeneous @@ -465,6 +591,19 @@ def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq self.is_pretraining = False self.hgq_gamma = hgq_gamma + def get_quantization_bits(self): + if self.use_hgq: + return self.quantizer.quantizer.i, self.quantizer.quantizer.f + else: + return self.i, self.f + + def set_quantization_bits(self, i, f): + if self.use_hgq: + self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) + self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) + self.i.data = i + self.f.data = f + def post_pretrain(self): self.is_pretraining = True @@ -509,16 +648,15 @@ def add_layer_specific_quantization_to_model(module, config): layer.quantize_input = quantize if "output" in layer_config: if "integer_bits" in layer_config["output"]: - input_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) - layer.i_input = input_int_bits + output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.i_output = input_int_bits if "fractional_bits" in layer_config["output"]: input_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) - layer.f_input = input_fractional_bits + layer.f_output = input_fractional_bits if "quantize" in layer_config["output"]: quantize = layer_config["output"]["quantize"] layer.quantize_output = quantize - layer.build() elif layer.__class__ in [PQBatchNorm2d]: if name in config.quantization_parameters.layer_specific: layer_config = config.quantization_parameters.layer_specific[name] @@ -527,6 +665,11 @@ def add_layer_specific_quantization_to_model(module, config): f = torch.tensor(layer_config["weight"]["fractional_bits"]) layer.i_weight = i layer.f_weight = f + if "bias" in layer_config: + i = torch.tensor(layer_config["bias"]["integer_bits"]) + f = torch.tensor(layer_config["bias"]["fractional_bits"]) + layer.i_bias = i + layer.f_biast = f if "input" in layer_config: if "integer_bits" in layer_config["input"]: input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) @@ -561,7 +704,7 @@ def add_layer_specific_quantization_to_model(module, config): quantize = layer_config["output"]["quantize"] layer.quantize_output = quantize - elif layer.__class__ == QuantizedActivationTorchWrapper: + elif layer.__class__ == QuantizedActivation: if name in config.quantization_parameters.layer_specific: layer_config = config.quantization_parameters.layer_specific[name] if "input" in layer_config: @@ -598,10 +741,10 @@ def add_quantized_activations_to_model_layer(module, config): # For ReLU, if using default values, add 1 bit since values are unsigned. # Otherwise user provides bits. TODO: Find better way to do this f = config.quantization_parameters.default_fractional_bits + 1 - relu = QuantizedActivationTorchWrapper(QuantizedReLU(config, i_input=i, f_input=f, i_output=i, f_output=f)) + relu = QuantizedActivation(QuantizedReLU(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, relu) elif layer.__class__ in [nn.Tanh]: - tanh = QuantizedActivationTorchWrapper(QuantizedTanh(config, i_input=i, f_input=f, i_output=i, f_output=f)) + tanh = QuantizedActivation(QuantizedTanh(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, tanh) elif layer.__class__ in [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: new_layer = QuantizedPooling(config, layer) @@ -750,8 +893,10 @@ def post_pretrain_functions(model, config, train_loader=None, loss_func=None): # layer.pruning_layer.mask = pruning_mask_importance_scores[idx] # idx += 1 - elif isinstance(layer, (QuantizedActivationTorchWrapper, QuantizedPooling)): + elif isinstance(layer, (QuantizedActivation, QuantizedPooling)): layer.activation.post_pre_train_function() + elif isinstance(layer, PQBatchNorm2d): + layer.post_pretrain_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget ): @@ -816,16 +961,18 @@ def get_layer_keep_ratio_torch(model): def get_model_losses_torch(model, losses): + loss = 0.0 for layer in model.modules(): if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - loss = layer.pruning_layer.calculate_additional_loss() + if layer.enable_pruning: + loss += layer.pruning_layer.calculate_additional_loss() if layer.use_hgq: loss += layer.hgq_loss() losses += loss - elif isinstance(layer, (QuantizedActivationTorchWrapper)): + elif isinstance(layer, (QuantizedActivation)): if layer.activation.use_hgq: - losses += layer.activation.hgq_loss() - elif isinstance(layer, QuantizedPooling): + losses += layer.hgq_loss() + elif isinstance(layer, (QuantizedPooling, PQBatchNorm2d)): if layer.use_hgq: losses += layer.hgq_loss() return losses @@ -858,6 +1005,7 @@ def create_default_layer_quantization_pruning_config(model): config["layer_specific"][name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "weight": {"integer_bits": 0, "fractional_bits": 7.0}, + "bias": {"integer_bits": 0, "fractional_bits": 7.0}, } return config From 648d3a7e848b50d9818be78381e9a59556f048e5 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Tue, 28 Oct 2025 12:49:03 +0100 Subject: [PATCH 11/37] separate weight and data default quantization values --- src/pquant/configs/config_ap.yaml | 10 ++- src/pquant/configs/config_autosparse.yaml | 10 ++- src/pquant/configs/config_cs.yaml | 10 ++- src/pquant/configs/config_dst.yaml | 10 ++- src/pquant/configs/config_mdmm.yaml | 10 ++- src/pquant/configs/config_pdp.yaml | 10 ++- src/pquant/configs/config_wanda.yaml | 10 ++- .../core/tf_impl/compressed_layers_tf.py | 70 ++++++++++--------- .../torch_impl/compressed_layers_torch.py | 62 ++++++++-------- 9 files changed, 119 insertions(+), 83 deletions(-) diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index 7ea2dae..fd46147 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -8,10 +8,14 @@ pruning_parameters: t_delta: 100 t_start_collecting_batch: 100 quantization_parameters: - default_integer_bits: 0. - default_fractional_bits: 7. - default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false enable_quantization: true hgq_beta: 1e-5 hgq_gamma: 0.0003 diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 28ae0af..6f66bf1 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -11,10 +11,14 @@ pruning_parameters: threshold_init: -5.0 threshold_type: channelwise quantization_parameters: - default_integer_bits: 0. - default_fractional_bits: 7. - default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false enable_quantization: true hgq_beta: 1e-5 hgq_gamma: 0.0003 diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index 9bd2528..65b085d 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -7,10 +7,14 @@ pruning_parameters: threshold_decay: 1.0e-09 threshold_init: 0 quantization_parameters: - default_integer_bits: 0. - default_fractional_bits: 7. - default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false enable_quantization: true hgq_beta: 1e-5 hgq_gamma: 0.0003 diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index 956fdc9..074c64f 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -9,10 +9,14 @@ pruning_parameters: threshold_init: 0.0 threshold_type: weightwise quantization_parameters: - default_integer_bits: 0. - default_fractional_bits: 7. - default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false enable_quantization: true hgq_beta: 1e-5 hgq_gamma: 0.0003 diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index 6ecde47..4a79da6 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -18,10 +18,14 @@ pruning_parameters: quantization_parameters: enable_quantization: true - default_integer_bits: 0. - default_fractional_bits: 7. - default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false hgq_beta: 1e-5 hgq_gamma: 0.0003 hgq_heterogeneous: True diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index 20e1ed7..a561393 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -9,10 +9,14 @@ pruning_parameters: threshold_decay: 0. structured_pruning: false quantization_parameters: - default_integer_bits: 0. - default_fractional_bits: 7. - default_data_keep_negatives: 0. default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false enable_quantization: true hgq_beta: 1e-5 hgq_gamma: 0.0003 diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index 1609182..46273ac 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -11,8 +11,14 @@ pruning_parameters: t_delta: 100 t_start_collecting_batch: 100 quantization_parameters: - default_integer_bits: 0. - default_fractional_bits: 7. + default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false enable_quantization: true hgq_beta: 1e-5 hgq_gamma: 0.0003 diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 567e1e9..2a8c212 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -25,8 +25,8 @@ class PQWeightBiasBase(keras.layers.Layer): def __init__(self, config, layer_type, quantize_input=True, quantize_output=False): super().__init__() - i_bits = config.quantization_parameters.default_integer_bits - f_bits = config.quantization_parameters.default_fractional_bits + i_bits = config.quantization_parameters.default_weight_integer_bits + f_bits = config.quantization_parameters.default_weight_fractional_bits self.data_k = config.quantization_parameters.default_data_keep_negatives self.weight_k = config.quantization_parameters.default_weight_keep_negatives self.i_weight = ops.convert_to_tensor(i_bits) @@ -34,8 +34,10 @@ def __init__(self, config, layer_type, quantize_input=True, quantize_output=Fals self.i_bias = ops.convert_to_tensor(i_bits) self.f_bias = ops.convert_to_tensor(f_bits) - self.i_input = self.i_output = ops.convert_to_tensor(i_bits) - self.f_input = self.f_output = ops.convert_to_tensor(f_bits) + self.i_input = self.i_output = ops.convert_to_tensor(config["quantization_parameters"]["default_data_integer_bits"]) + self.f_input = self.f_output = ops.convert_to_tensor( + config["quantization_parameters"]["default_data_fractional_bits"] + ) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) self.pruning_method = config.pruning_parameters.pruning_method @@ -214,8 +216,8 @@ def collect_output(self, x, training): class PQDepthwiseConv2d(PQWeightBiasBase): - def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=True): - super().__init__(config, layer_type, quantize_input, quantize_output) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, "conv", quantize_input, quantize_output) self.depthwise_regularizer = layer.depthwise_regularizer self.use_bias = layer.use_bias self.strides = layer.strides @@ -282,8 +284,8 @@ def call(self, x, training=None): class PQConv2d(PQWeightBiasBase): - def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): - super().__init__(config, layer_type, quantize_input, quantize_output) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, "conv", quantize_input, quantize_output) self.kernel_regularizer = layer.kernel_regularizer self.filters = layer.filters self.use_bias = layer.use_bias @@ -367,12 +369,12 @@ def __init__(self, config, layer, quantize_input=True, quantize_output=True): layer.kernel = layer.depthwise_kernel bias = layer.use_bias layer.use_bias = False - self.depthwise_conv = PQDepthwiseConv2d(config, layer, "conv", quantize_input, False) + self.depthwise_conv = PQDepthwiseConv2d(config, layer, quantize_input, False) layer.kernel_regularizer = layer.pointwise_regularizer layer.kernel_size = 1 layer.kernel = layer.pointwise_kernel layer.use_bias = bias - self.pointwise_conv = PQConv2d(config, layer, "conv", False, quantize_output) + self.pointwise_conv = PQConv2d(config, layer, False, quantize_output) self.do_transpose_data = layer.data_format == "channels_last" def build(self, input_shape): @@ -389,8 +391,8 @@ def call(self, x, training=None): class PQConv1d(PQWeightBiasBase): - def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): - super().__init__(config, layer_type, quantize_input, quantize_output) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, "conv", quantize_input, quantize_output) self.kernel_regularizer = layer.kernel_regularizer self.filters = layer.filters self.use_bias = layer.use_bias @@ -462,8 +464,8 @@ def call(self, x, training=None): class PQDense(PQWeightBiasBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer_type) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, "linear", quantize_input, quantize_output) self.kernel_regularizer = layer.kernel_regularizer self.use_bias = layer.use_bias self.units = layer.units @@ -561,12 +563,12 @@ def __init__( self.hgq_beta = config["quantization_parameters"]["hgq_beta"] self.quantize_input = quantize_input self.config = config - self.f_input = self.f_weight = self.f_bias = ops.convert_to_tensor( - config["quantization_parameters"]["default_fractional_bits"] - ) - self.i_input = self.i_weight = self.i_bias = ops.convert_to_tensor( - config["quantization_parameters"]["default_integer_bits"] + self.f_weight = self.f_bias = ops.convert_to_tensor( + config["quantization_parameters"]["default_weight_fractional_bits"] ) + self.i_weight = self.i_bias = ops.convert_to_tensor(config["quantization_parameters"]["default_weight_integer_bits"]) + self.i_input = ops.convert_to_tensor(config["quantization_parameters"]["default_data_integer_bits"]) + self.f_input = ops.convert_to_tensor(config["quantization_parameters"]["default_data_fractional_bits"]) self.final_compression_done = False self.is_pretraining = True @@ -705,8 +707,8 @@ def post_pre_train_function(self): class QuantizedPooling(keras.layers.Layer): def __init__(self, config, layer, quantize_input=True, quantize_output=False): super().__init__() - self.i_input = ops.convert_to_tensor(config.quantization_parameters.default_integer_bits) - self.f_input = ops.convert_to_tensor(config.quantization_parameters.default_fractional_bits) + self.i_input = self.i_output = ops.convert_to_tensor(config.quantization_parameters.default_data_integer_bits) + self.f_input = self.f_output = ops.convert_to_tensor(config.quantization_parameters.default_data_fractional_bits) self.is_pretraining = True @@ -1214,7 +1216,7 @@ def add_compression_layers_tf(model, config, input_shape=None): for layer in model.layers[1:]: act = None if isinstance(layer, DepthwiseConv2D): - new_layer = PQDepthwiseConv2d(config, layer, layer_type="conv") + new_layer = PQDepthwiseConv2d(config, layer) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) @@ -1227,7 +1229,7 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Conv2D): - new_layer = PQConv2d(config, layer, layer_type="conv") + new_layer = PQConv2d(config, layer) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1260,7 +1262,7 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Conv1D): - new_layer = PQConv1d(config, layer, layer_type="conv") + new_layer = PQConv1d(config, layer) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1272,7 +1274,7 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Dense): - new_layer = PQDense(config, layer, layer_type="linear") + new_layer = PQDense(config, layer) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1337,9 +1339,9 @@ def add_compression_layers_tf(model, config, input_shape=None): return replaced_model -def get_quantization_bits_activations(config, layer, new_layer): - i_input = i_output = i_weight = i_bias = config.quantization_parameters.default_integer_bits - f_input = f_output = f_weight = f_bias = config.quantization_parameters.default_fractional_bits +def set_quantization_bits_activations(config, layer, new_layer): + i_input = i_output = i_weight = i_bias = config.quantization_parameters.default_data_integer_bits + f_input = f_output = f_weight = f_bias = config.quantization_parameters.default_data_fractional_bits if isinstance(layer, ReLU): f_input += 1 f_output += 1 # Unsigned, add 1 bit to default value only @@ -1397,10 +1399,12 @@ def get_quantization_bits_activations(config, layer, new_layer): def set_quantization_bits_weight_layers(config, layer, new_layer): - layer_specific = config["quantization_parameters"]["layer_specific"] + layer_specific = config.quantization_parameters.layer_specific if isinstance(layer, SeparableConv2D): - dw_i_bits_w = pw_i_bits_w = pw_i_bits_b = i_input = i_output = config.quantization_parameters.default_integer_bits - dw_f_bits_w = pw_f_bits_w = pw_f_bits_b = f_input = f_output = config.quantization_parameters.default_fractional_bits + dw_i_bits_w = pw_i_bits_w = pw_i_bits_b = config.quantization_parameters.default_weight_integer_bits + dw_f_bits_w = pw_f_bits_w = pw_f_bits_b = config.quantization_parameters.default_weight_fractional_bits + i_input = i_output = config.quantization_parameters.default_data_integer_bits + f_input = f_output = config.quantization_parameters.default_data_fractional_bits if layer.name in layer_specific: layer_config = layer_specific[layer.name] if "input" in layer_config: @@ -1439,8 +1443,8 @@ def set_quantization_bits_weight_layers(config, layer, new_layer): new_layer.pointwise_conv.i_output = i_output new_layer.pointwise_conv.f_output = f_output else: - i_bits_w = i_bits_b = config.quantization_parameters.default_integer_bits - f_bits_w = f_bits_b = config.quantization_parameters.default_fractional_bits + i_bits_w = i_bits_b = config.quantization_parameters.default_weight_integer_bits + f_bits_w = f_bits_b = config.quantization_parameters.default_weight_fractional_bits if layer.name in layer_specific: layer_config = layer_specific[layer.name] if "input" in layer_config: diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 9439b28..277c99a 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -20,12 +20,8 @@ class PQWeightBiasBase(nn.Module): def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): super().__init__() - self.i_weight = self.i_bias = self.i_input = self.i_output = torch.tensor( - config.quantization_parameters.default_integer_bits - ) - self.f_weight = self.f_bias = self.f_input = self.f_output = torch.tensor( - config.quantization_parameters.default_fractional_bits - ) + self.i_weight = self.i_bias = torch.tensor(config.quantization_parameters.default_weight_integer_bits) + self.f_weight = self.f_bias = torch.tensor(config.quantization_parameters.default_weight_fractional_bits) self.weight = nn.Parameter(layer.weight.clone()) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) @@ -35,6 +31,8 @@ def __init__(self, config, layer, layer_type, quantize_input=True, quantize_outp self.quantize_output = quantize_output self.data_k = config.quantization_parameters.default_data_keep_negatives self.weight_k = config.quantization_parameters.default_weight_keep_negatives + self.i_input = self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_input = self.f_output = config.quantization_parameters.default_data_fractional_bits self.bias = nn.Parameter(layer.bias.clone()) if layer.bias is not None else None self.init_weight = self.weight.clone() self.pruning_first = config.training_parameters.pruning_first @@ -197,8 +195,8 @@ def forward(self, x): class PQDense(PQWeightBiasBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, layer, "linear", quantize_input, quantize_output) self.in_features = layer.in_features self.out_features = layer.out_features self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress @@ -226,8 +224,8 @@ def forward(self, x): class PQConv2d(PQWeightBiasBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, layer, "conv", quantize_input, quantize_output) self.stride = layer.stride self.dilation = layer.dilation self.padding = layer.padding @@ -277,8 +275,8 @@ def forward(self, x): class PQConv1d(PQWeightBiasBase): - def __init__(self, config, layer, layer_type): - super().__init__(config, layer, layer_type) + def __init__(self, config, layer, quantize_input=True, quantize_output=False): + super().__init__(config, layer, "conv", quantize_input, quantize_output) self.stride = layer.stride self.dilation = layer.dilation @@ -342,8 +340,8 @@ class QuantizedPooling(nn.Module): def __init__(self, config, layer, quantize_input=True, quantize_output=False): super().__init__() - self.f_output = self.f_input = torch.tensor(config.quantization_parameters.default_fractional_bits) - self.i_output = self.i_input = torch.tensor(config.quantization_parameters.default_integer_bits) + self.f_output = self.f_input = torch.tensor(config.quantization_parameters.default_data_fractional_bits) + self.i_output = self.i_input = torch.tensor(config.quantization_parameters.default_data_integer_bits) self.overflow = config.quantization_parameters.overflow self.config = config self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous @@ -396,7 +394,7 @@ def get_input_quantization_bits(self): def get_output_quantization_bits(self): return self.output_quantizer.get_quantization_bits() - def post_pre_train_function(self): + def post_pretrain_function(self): self.is_pretraining = False def ebops(self): @@ -450,13 +448,13 @@ def __init__( track_running_stats: bool = True, device=None, dtype=None, - quantize_input=True, + quantize_input=False, ): super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) - self.f_weight = self.f_bias = self.f_input = torch.tensor( - config["quantization_parameters"]["default_fractional_bits"] - ) - self.i_weight = self.i_bias = self.i_input = torch.tensor(config["quantization_parameters"]["default_integer_bits"]) + self.f_weight = self.f_bias = torch.tensor(config["quantization_parameters"]["default_weight_fractional_bits"]) + self.i_weight = self.i_bias = torch.tensor(config["quantization_parameters"]["default_weight_integer_bits"]) + self.i_input = config["quantization_parameters"]["default_data_integer_bits"] + self.f_input = config["quantization_parameters"]["default_data_fractional_bits"] self.overflow = config["quantization_parameters"]["overflow"] self.round_mode = config["quantization_parameters"]["round_mode"] self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] @@ -733,21 +731,23 @@ def add_layer_specific_quantization_to_model(module, config): def add_quantized_activations_to_model_layer(module, config): if not config.quantization_parameters.enable_quantization: return module + quantize_input = config["quantization_parameters"]["quantize_input"] + quantize_output = config["quantization_parameters"]["quantize_output"] # Replaces ReLU and Tanh layers with quantized versions for name, layer in module.named_children(): - i = config.quantization_parameters.default_integer_bits - f = config.quantization_parameters.default_fractional_bits + i = config.quantization_parameters.default_data_integer_bits + f = config.quantization_parameters.default_data_fractional_bits if layer.__class__ in [nn.ReLU]: # For ReLU, if using default values, add 1 bit since values are unsigned. # Otherwise user provides bits. TODO: Find better way to do this - f = config.quantization_parameters.default_fractional_bits + 1 + f = config.quantization_parameters.default_data_fractional_bits + 1 relu = QuantizedActivation(QuantizedReLU(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, relu) elif layer.__class__ in [nn.Tanh]: tanh = QuantizedActivation(QuantizedTanh(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, tanh) elif layer.__class__ in [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: - new_layer = QuantizedPooling(config, layer) + new_layer = QuantizedPooling(config, layer, quantize_input, quantize_output) setattr(module, name, new_layer) elif layer.__class__ == nn.BatchNorm2d: new_layer = PQBatchNorm2d( @@ -757,7 +757,7 @@ def add_quantized_activations_to_model_layer(module, config): momentum=layer.momentum, affine=layer.affine, track_running_stats=layer.track_running_stats, - quantize_input=True, + quantize_input=quantize_input, ) setattr(module, name, new_layer) else: @@ -807,17 +807,19 @@ def disable_pruning_from_layers(module, config): def add_pruning_to_model(module, config): + quantize_input = config["quantization_parameters"]["quantize_input"] + quantize_output = config["quantization_parameters"]["quantize_output"] for name, layer in module.named_children(): if layer.__class__ is nn.Linear: - sparse_layer = PQDense(config, layer, "linear") + sparse_layer = PQDense(config, layer, quantize_input, quantize_output) sparse_layer.pruning_layer.build(layer.weight.shape) setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv2d: - sparse_layer = PQConv2d(config, layer, "conv") + sparse_layer = PQConv2d(config, layer, quantize_input, quantize_output) sparse_layer.pruning_layer.build(layer.weight.shape) setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv1d: - sparse_layer = PQConv1d(config, layer, "conv") + sparse_layer = PQConv1d(config, layer, quantize_input, quantize_output) sparse_layer.pruning_layer.build(layer.weight.shape) setattr(module, name, sparse_layer) else: @@ -893,9 +895,9 @@ def post_pretrain_functions(model, config, train_loader=None, loss_func=None): # layer.pruning_layer.mask = pruning_mask_importance_scores[idx] # idx += 1 - elif isinstance(layer, (QuantizedActivation, QuantizedPooling)): + elif isinstance(layer, QuantizedActivation): layer.activation.post_pre_train_function() - elif isinstance(layer, PQBatchNorm2d): + elif isinstance(layer, (PQBatchNorm2d, QuantizedPooling)): layer.post_pretrain_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget From a37e4f58998a4c5b6c1411ba19c15f757b26567b Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Wed, 29 Oct 2025 11:36:06 +0100 Subject: [PATCH 12/37] direct layer use torch --- src/pquant/core/activations_quantizer.py | 16 +- .../torch_impl/compressed_layers_torch.py | 744 +++++++++++++----- 2 files changed, 566 insertions(+), 194 deletions(-) diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py index b94a9c5..83367eb 100644 --- a/src/pquant/core/activations_quantizer.py +++ b/src/pquant/core/activations_quantizer.py @@ -63,9 +63,9 @@ def build(self, input_shape): def get_input_quantization_bits(self): if self.use_hgq: - return self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f + return self.input_quantizer.quantizer.k, self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f else: - return self.i_input, self.f_input + return self.k, self.i_input, self.f_input def set_input_quantization_bits(self, i, f): if self.use_hgq: @@ -77,9 +77,9 @@ def set_input_quantization_bits(self, i, f): def get_output_quantization_bits(self): if self.use_hgq: - return self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f + return self.output_quantizer.quantizer.k, self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f else: - return self.i_output, self.f_output + return self.k, self.i_output, self.f_output def set_output_quantization_bits(self, i, f): if self.use_hgq: @@ -200,9 +200,9 @@ def build(self, input_shape): def get_input_quantization_bits(self): if self.use_hgq: - return self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f + return self.input_quantizer.quantizer.k, self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f else: - return self.i_input, self.f_input + return self.k, self.i_input, self.f_input def set_input_quantization_bits(self, i, f): if self.use_hgq: @@ -214,9 +214,9 @@ def set_input_quantization_bits(self, i, f): def get_output_quantization_bits(self): if self.use_hgq: - return self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f + return self.output_quantizer.quantizer.k, self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f else: - return self.i_output, self.f_output + return self.k, self.i_output, self.f_output def set_output_quantization_bits(self, i, f): if self.use_hgq: diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 277c99a..ad2e5e8 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -1,10 +1,12 @@ import typing +from typing import Optional, Tuple, TypeVar, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.fx import symbolic_trace +from torch.nn.common_types import _size_1_t, _size_2_t from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.utils import get_pruning_layer @@ -16,25 +18,57 @@ from pquant.core.quantizer_functions import create_quantizer +T = TypeVar("T") + class PQWeightBiasBase(nn.Module): - def __init__(self, config, layer, layer_type, quantize_input=True, quantize_output=False): - super().__init__() - self.i_weight = self.i_bias = torch.tensor(config.quantization_parameters.default_weight_integer_bits) - self.f_weight = self.f_bias = torch.tensor(config.quantization_parameters.default_weight_fractional_bits) + def __init__( + self, + config, + layer_type, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + *args, + **kwargs, + ): + super().__init__(**kwargs) + + if input_quantization_bits is not None: + self.k_input, self.i_input, self.f_input = input_quantization_bits + else: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits + + if weight_quantization_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits + else: + self.k_weight = config.quantization_parameters.default_weight_keep_negatives + self.i_weight = config.quantization_parameters.default_weight_integer_bits + self.f_weight = config.quantization_parameters.default_weight_fractional_bits + if bias_quantization_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits + else: + self.k_bias = config.quantization_parameters.default_weight_keep_negatives + self.i_bias = config.quantization_parameters.default_weight_integer_bits + self.f_bias = config.quantization_parameters.default_weight_fractional_bits + + if output_quantization_bits is not None: + self.k_output, self.i_output, self.f_output = output_quantization_bits + else: + self.k_output = config.quantization_parameters.default_data_keep_negatives + self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_output = config.quantization_parameters.default_data_fractional_bits - self.weight = nn.Parameter(layer.weight.clone()) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) self.pruning_method = config.pruning_parameters.pruning_method - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.quantize_input = quantize_input self.quantize_output = quantize_output - self.data_k = config.quantization_parameters.default_data_keep_negatives - self.weight_k = config.quantization_parameters.default_weight_keep_negatives - self.i_input = self.i_output = config.quantization_parameters.default_data_integer_bits - self.f_input = self.f_output = config.quantization_parameters.default_data_fractional_bits - self.bias = nn.Parameter(layer.bias.clone()) if layer.bias is not None else None - self.init_weight = self.weight.clone() + self.pruning_first = config.training_parameters.pruning_first self.enable_quantization = config.quantization_parameters.enable_quantization self.round_mode = config.quantization_parameters.round_mode @@ -46,15 +80,15 @@ def __init__(self, config, layer, layer_type, quantize_input=True, quantize_outp self.final_compression_done = False self.built = False self.parallelization_factor = -1 - self.hgq_beta = config["quantization_parameters"]["hgq_beta"] + self.hgq_beta = config.quantization_parameters.hgq_beta self.input_shape = None def build(self, input_shape): # Build function to delay quantizer creation until after custom i,f bits have been set self.input_quantizer = PyTorchQuantizer( - torch.tensor(self.data_k), - self.i_input, - self.f_input, + torch.tensor(self.k_input), + torch.tensor(self.i_input), + torch.tensor(self.f_input), self.overflow, self.round_mode, self.use_hgq, @@ -62,9 +96,9 @@ def build(self, input_shape): self.hgq_gamma, ) self.weight_quantizer = PyTorchQuantizer( - torch.tensor(self.weight_k), - self.i_weight, - self.f_weight, + torch.tensor(self.k_weight), + torch.tensor(self.i_weight), + torch.tensor(self.f_weight), self.overflow, self.round_mode, self.use_hgq, @@ -72,29 +106,27 @@ def build(self, input_shape): self.hgq_gamma, ) - if self.bias is not None: - self.bias_quantizer = PyTorchQuantizer( - torch.tensor(self.weight_k), - self.i_bias, - self.f_bias, - self.overflow, - self.round_mode, - self.use_hgq, - False, - self.hgq_gamma, - ) + self.bias_quantizer = PyTorchQuantizer( + torch.tensor(self.k_bias), + torch.tensor(self.i_bias), + torch.tensor(self.f_bias), + self.overflow, + self.round_mode, + self.use_hgq, + False, + self.hgq_gamma, + ) - if self.quantize_output: - self.output_quantizer = PyTorchQuantizer( - torch.tensor(self.data_k), - self.i_output, - self.f_output, - self.overflow, - self.round_mode, - self.use_hgq, - True, - self.hgq_gamma, - ) + self.output_quantizer = PyTorchQuantizer( + torch.tensor(self.k_output), + torch.tensor(self.i_output), + torch.tensor(self.f_output), + self.overflow, + self.round_mode, + self.use_hgq, + True, + self.hgq_gamma, + ) self.n_parallel = ops.prod(tuple(input_shape)[1:-1]) self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel @@ -114,11 +146,7 @@ def get_output_quantization_bits(self): return self.output_quantizer.get_quantization_bits() def apply_final_compression(self): - weight, bias = self.prune_and_quantize(self.weight, self.bias) - self.weight.data = weight - if self.bias is not None: - self.bias.data = bias - self.final_compression_done = True + pass def save_weights(self): self.init_weight = self.weight.clone() @@ -142,29 +170,17 @@ def hgq_loss(self): loss += self.output_quantizer.hgq_loss() return loss - def quantize(self, weight, bias): + def quantize(self, x, quantizer): if self.enable_quantization: - weight = self.weight_quantizer(weight) - bias = None if bias is None else self.bias_quantizer(bias) - return weight, bias + return quantizer(x) if x is not None else x + return x def prune(self, weight): if self.enable_pruning: weight = self.pruning_layer(weight) return weight - def prune_and_quantize(self, weight, bias): - if self.final_compression_done: - return weight, bias - if self.pruning_first: - weight = self.prune(weight) - weight, bias = self.quantize(weight, bias) - else: - weight, bias = self.quantize(weight, bias) - weight = self.prune(weight) - return weight, bias - - def pre_forward(self, weight, bias, x): + def pre_forward(self, x): if not self.built: self.build(x.shape) if self.quantize_input: @@ -174,8 +190,7 @@ def pre_forward(self, weight, bias, x): x = self.input_quantizer(x) if self.pruning_method == "wanda": self.pruning_layer.collect_input(x, self.weight, self.training) - weight, bias = self.prune_and_quantize(weight, bias) - return weight, bias, x + return x def post_forward(self, x): if self.quantize_output: @@ -187,20 +202,52 @@ def post_forward(self, x): self.pruning_layer.collect_output(x, self.training) return x - def forward(self, x): - weight, bias, x = self.pre_forward(self.weight, self.bias, x) - x = F.linear(x, weight, bias) - x = self.post_forward(x) - return x - -class PQDense(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, layer, "linear", quantize_input, quantize_output) - self.in_features = layer.in_features - self.out_features = layer.out_features +class PQDense(PQWeightBiasBase, nn.Linear): + def __init__( + self, + config, + in_features: int, + out_features: int, + bias: bool = True, + quantize_input=True, + quantize_output=False, + device=None, + dtype=None, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): + super().__init__( + in_features=in_features, + out_features=out_features, + bias=bias, + device=device, + dtype=dtype, + config=config, + layer_type="linear", + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) + self.in_features = in_features + self.out_features = out_features self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.is_pretraining = True + self._weight = nn.Parameter(self.weight.clone()).to(self.weight.device) + if bias: + self._bias = nn.Parameter(self.bias.clone()).to(self.bias.device) + else: + self.register_parameter("_bias", None) + del self._parameters["weight"] + del self._parameters["bias"] + self.pruning_layer.build(self._weight.shape) def post_pre_train_function(self): self.is_pretraining = False @@ -216,26 +263,96 @@ def ebops(self): ebops += ops.mean(bw_bias) * size return ebops + @property + def weight(self): + if self.final_compression_done: + return self._weight + if self.pruning_first: + weight = self.prune(self._weight) + return self.quantize(weight, self.weight_quantizer) + else: + weight = self.quantize(self._weight, self.weight_quantizer) + return self.prune(weight) + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize(self._bias, self.bias_quantizer) + return bias + + def apply_final_compression(self): + self._weight.data = self.weight + if self._bias is not None: + self._bias.data = self.bias + self.final_compression_done = True + def forward(self, x): - weight, bias, x = self.pre_forward(self.weight, self.bias, x) - x = F.linear(x, weight, bias) + x = self.pre_forward(x) + x = super().forward(x) x = self.post_forward(x) return x + def extra_repr(self) -> str: + """ + Return the extra representation of the module. + """ + return ( + f"in_features={self.in_features} " + f"out_features={self.out_features} " + f"bias={self._bias is not None} " + f"quantize_input={self.quantize_input} " + f"quantize_output={self.quantize_output} " + ) + -class PQConv2d(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, layer, "conv", quantize_input, quantize_output) - self.stride = layer.stride - self.dilation = layer.dilation - self.padding = layer.padding - self.groups = layer.groups - self.in_channels = layer.in_channels - self.out_channels = layer.out_channels - self.kernel_size = layer.kernel_size - self.padding_mode = layer.padding_mode +class PQConv2d(PQWeightBiasBase, nn.Conv2d): + def __init__( + self, + config, + in_channels: int, + out_channels: int, + kernel_size: _size_2_t, + stride: _size_2_t = 1, + padding: Union[str, _size_2_t] = 0, + dilation: _size_2_t = 1, + groups: int = 1, + bias: bool = True, + padding_mode: str = "zeros", # TODO: refine this type + device=None, + dtype=None, + quantize_input=True, + quantize_output=False, + **kwargs, + ): + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + device=device, + dtype=dtype, + config=config, + layer_type="conv", + quantize_input=quantize_input, + quantize_output=quantize_output, + **kwargs, + ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.is_pretraining = True + self._weight = nn.Parameter(self.weight.clone()).to(self.weight.device) + if bias: + self._bias = nn.Parameter(self.bias.clone()).to(self.bias.device) + else: + self.register_parameter("_bias", None) + del self._parameters["weight"] + del self._parameters["bias"] + self.pruning_layer.build(self._weight.shape) def post_pre_train_function(self): self.is_pretraining = False @@ -258,36 +375,103 @@ def ebops(self): ebops += ops.mean(bw_bias) * size return ebops + @property + def weight(self): + if self.final_compression_done: + return self._weight + if self.pruning_first: + weight = self.prune(self._weight) + return self.quantize(weight, self.weight_quantizer) + else: + weight = self.quantize(self._weight, self.weight_quantizer) + return self.prune(weight) + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize(self._bias, self.bias_quantizer) + return bias + + def apply_final_compression(self): + self._weight.data = self.weight + if self._bias is not None: + self._bias.data = self.bias + self.final_compression_done = True + def forward(self, x): - weight, bias, x = self.pre_forward(self.weight, self.bias, x) - self.pre_forward(self.weight, self.bias, x) - x = F.conv2d( - input=x, - weight=weight, - bias=bias, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - groups=self.groups, - ) + x = self.pre_forward(x) + x = super().forward(x) x = self.post_forward(x) return x - -class PQConv1d(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, layer, "conv", quantize_input, quantize_output) - - self.stride = layer.stride - self.dilation = layer.dilation - self.padding = layer.padding - self.groups = layer.groups - self.in_channels = layer.in_channels - self.out_channels = layer.out_channels - self.kernel_size = layer.kernel_size - self.padding_mode = layer.padding_mode + def extra_repr(self): + s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}" + if self.padding != (0,) * len(self.padding): + s += ", padding={padding}" + if self.dilation != (1,) * len(self.dilation): + s += ", dilation={dilation}" + if self.output_padding != (0,) * len(self.output_padding): + s += ", output_padding={output_padding}" + if self.groups != 1: + s += ", groups={groups}" + if self._bias is None: + s += ", bias=False" + if self.padding_mode != "zeros": + s += ", padding_mode={padding_mode}" + s += ", self.quantize_input={quantize_input} " + s += ", self.quantize_output={quantize_output}" + + return s.format(**self.__dict__) + + +class PQConv1d(PQWeightBiasBase, nn.Conv1d): + def __init__( + self, + config, + in_channels: int, + out_channels: int, + kernel_size: _size_1_t, + stride: _size_1_t = 1, + padding: Union[str, _size_1_t] = 0, + dilation: _size_1_t = 1, + groups: int = 1, + bias: bool = True, + padding_mode: str = "zeros", # TODO: refine this type + device=None, + dtype=None, + quantize_input=True, + quantize_output=False, + **kwargs, + ): + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=padding_mode, + device=device, + dtype=dtype, + config=config, + layer_type="conv", + quantize_input=quantize_input, + quantize_output=quantize_output, + **kwargs, + ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.is_pretraining = True + self._weight = nn.Parameter(self.weight.clone()).to(self.weight.device) + if bias: + self._bias = nn.Parameter(self.bias.clone()).to(self.bias.device) + else: + self.register_parameter("_bias", None) + del self._parameters["weight"] + del self._parameters["bias"] + self.pruning_layer.build(self._weight.shape) def post_pre_train_function(self): self.is_pretraining = False @@ -310,20 +494,54 @@ def ebops(self): ebops += ops.mean(bw_bias) * size return ebops + @property + def weight(self): + if self.final_compression_done: + return self._weight + if self.pruning_first: + weight = self.prune(self._weight) + return self.quantize(weight, self.weight_quantizer) + else: + weight = self.quantize(self._weight, self.weight_quantizer) + return self.prune(weight) + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize(self._bias, self.bias_quantizer) + return bias + + def apply_final_compression(self): + self._weight.data = self.weight + if self._bias is not None: + self._bias.data = self.bias + self.final_compression_done = True + def forward(self, x): - weight, bias, x = self.pre_forward(self.weight, self.bias, x) - x = F.conv1d( - input=x, - weight=weight, - bias=bias, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - groups=self.groups, - ) + x = self.pre_forward(x) + x = super().forward(x) x = self.post_forward(x) return x + def extra_repr(self): + s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}" + if self.padding != (0,) * len(self.padding): + s += ", padding={padding}" + if self.dilation != (1,) * len(self.dilation): + s += ", dilation={dilation}" + if self.output_padding != (0,) * len(self.output_padding): + s += ", output_padding={output_padding}" + if self.groups != 1: + s += ", groups={groups}" + if self._bias is None: + s += ", bias=False" + if self.padding_mode != "zeros": + s += ", padding_mode={padding_mode}" + s += ", self.quantize_input={quantize_input}" + s += ", self.quantize_output={quantize_output}" + return s.format(**self.__dict__) + def add_compression_layers_torch(model, config, input_shape, device="cuda"): model = add_quantized_activations_to_model_layer(model, config) @@ -336,21 +554,37 @@ def add_compression_layers_torch(model, config, input_shape, device="cuda"): return model -class QuantizedPooling(nn.Module): +class PQAvgPoolBase(nn.Module): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__() - self.f_output = self.f_input = torch.tensor(config.quantization_parameters.default_data_fractional_bits) - self.i_output = self.i_input = torch.tensor(config.quantization_parameters.default_data_integer_bits) + def __init__( + self, + config, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): + super().__init__(**kwargs) + if input_quantization_bits is not None: + self.k_input, self.i_input, self.f_input = input_quantization_bits + else: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits + + if output_quantization_bits is not None: + self.k_output, self.i_output, self.f_output = output_quantization_bits + else: + self.k_output = config.quantization_parameters.default_data_keep_negatives + self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_output = config.quantization_parameters.default_data_fractional_bits self.overflow = config.quantization_parameters.overflow self.config = config - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.is_pretraining = True - self.overflow = config.quantization_parameters.overflow self.round_mode = config.quantization_parameters.round_mode self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.pooling = layer self.enable_quantization = config.quantization_parameters.enable_quantization self.hgq_gamma = config.quantization_parameters.hgq_gamma self.hgq_beta = config.quantization_parameters.hgq_beta @@ -363,9 +597,9 @@ def __init__(self, config, layer, quantize_input=True, quantize_output=False): def build(self, input_shape): self.input_quantizer = PyTorchQuantizer( - k=torch.tensor(1.0), - i=self.i_input, - f=self.f_input, + k=torch.tensor(self.k_input), + i=torch.tensor(self.i_input), + f=torch.tensor(self.f_input), overflow=self.overflow, round_mode=self.round_mode, is_heterogeneous=self.use_hgq, @@ -373,9 +607,9 @@ def build(self, input_shape): hgq_gamma=self.hgq_gamma, ) self.output_quantizer = PyTorchQuantizer( - k=torch.tensor(1.0), - i=self.i_output, - f=self.f_output, + k=torch.tensor(self.k_output), + i=torch.tensor(self.i_output), + f=torch.tensor(self.f_output), overflow=self.overflow, round_mode=self.round_mode, is_heterogeneous=self.use_hgq, @@ -383,10 +617,6 @@ def build(self, input_shape): hgq_gamma=self.hgq_gamma, ) self.input_shape = input_shape - if self.use_hgq: - self.input_quantizer.quantizer.build(input_shape) - output_shape = self.pooling(torch.rand(input_shape)).shape - self.output_quantizer.quantizer.build(output_shape) def get_input_quantization_bits(self): return self.input_quantizer.get_quantization_bits() @@ -419,7 +649,7 @@ def pre_pooling(self, x): # Save inputs self.saved_inputs.append(x) # During FITcompress, we do not use any quantized pooling - return ops.average_pool(x, pool_size=1) + return super().forward() if self.quantize_input and self.enable_quantization: x = self.input_quantizer(x) return x @@ -429,9 +659,82 @@ def post_pooling(self, x): x = self.output_quantizer(x) return x + def extra_repr(self) -> str: + return f"kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, quantize_input={self.quantize_input}, quantize_output={self.quantize_output}" # noqa: 501 + + +class PQAvgPool1d(PQAvgPoolBase, nn.AvgPool1d): + + def __init__( + self, + config, + kernel_size: _size_1_t, + stride: _size_1_t = None, + padding: _size_1_t = 0, + ceil_mode: bool = False, + count_include_pad: bool = True, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): + super().__init__( + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + config=config, + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) + def forward(self, x): x = self.pre_pooling(x) - x = self.pooling(x) + x = super().forward(x) + x = self.post_pooling(x) + return x + + +class PQAvgPool2d(PQAvgPoolBase, nn.AvgPool2d): + + def __init__( + self, + config, + kernel_size: _size_2_t, + stride: _size_2_t = None, + padding: _size_2_t = 0, + ceil_mode: bool = False, + count_include_pad: bool = True, + divisor_override: Optional[int] = None, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): + super().__init__( + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + divisor_override=divisor_override, + config=config, + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) + + def forward(self, x): + x = self.pre_pooling(x) + x = super().forward(x) x = self.post_pooling(x) return x @@ -448,13 +751,31 @@ def __init__( track_running_stats: bool = True, device=None, dtype=None, - quantize_input=False, + quantize_input=True, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, ): super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) - self.f_weight = self.f_bias = torch.tensor(config["quantization_parameters"]["default_weight_fractional_bits"]) - self.i_weight = self.i_bias = torch.tensor(config["quantization_parameters"]["default_weight_integer_bits"]) - self.i_input = config["quantization_parameters"]["default_data_integer_bits"] - self.f_input = config["quantization_parameters"]["default_data_fractional_bits"] + if input_quantization_bits is not None: + self.k_input, self.i_input, self.f_input = input_quantization_bits + else: + self.k_input = config["quantization_parameters"]["default_data_keep_negatives"] + self.i_input = config["quantization_parameters"]["default_data_integer_bits"] + self.f_input = config["quantization_parameters"]["default_data_fractional_bits"] + + if weight_quantization_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits + else: + self.k_weight = config["quantization_parameters"]["default_weight_keep_negatives"] + self.i_weight = config["quantization_parameters"]["default_weight_integer_bits"] + self.f_weight = config["quantization_parameters"]["default_weight_fractional_bits"] + if bias_quantization_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits + else: + self.k_bias = config["quantization_parameters"]["default_weight_keep_negatives"] + self.i_bias = config["quantization_parameters"]["default_weight_integer_bits"] + self.f_bias = config["quantization_parameters"]["default_weight_fractional_bits"] self.overflow = config["quantization_parameters"]["overflow"] self.round_mode = config["quantization_parameters"]["round_mode"] self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] @@ -474,9 +795,9 @@ def __init__( def build(self, input_shape): self.built = True self.input_quantizer = PyTorchQuantizer( - k=torch.tensor(1.0), - i=self.i_input, - f=self.f_input, + k=torch.tensor(self.k_input), + i=torch.tensor(self.i_input), + f=torch.tensor(self.f_input), overflow=self.overflow, round_mode=self.round_mode, is_heterogeneous=self.use_hgq, @@ -484,18 +805,18 @@ def build(self, input_shape): hgq_gamma=self.hgq_gamma, ) self.weight_quantizer = PyTorchQuantizer( - k=torch.tensor(1.0), - i=self.i_weight, - f=self.f_weight, + k=torch.tensor(self.k_weight), + i=torch.tensor(self.i_weight), + f=torch.tensor(self.f_weight), round_mode=self.round_mode, overflow=self.overflow, is_data=False, is_heterogeneous=self.use_hgq, ) self.bias_quantizer = PyTorchQuantizer( - k=torch.tensor(1.0), - i=self.i_bias, - f=self.f_bias, + k=torch.tensor(self.k_bias), + i=torch.tensor(self.i_bias), + f=torch.tensor(self.f_bias), round_mode=self.round_mode, overflow=self.overflow, is_data=False, @@ -507,8 +828,6 @@ def build(self, input_shape): self.input_shape = input_shape def apply_final_compression(self): - self._weight.data = self.weight - self._bias.data = self.bias self.final_compression_done = True def get_input_quantization_bits(self): @@ -591,9 +910,9 @@ def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq def get_quantization_bits(self): if self.use_hgq: - return self.quantizer.quantizer.i, self.quantizer.quantizer.f + return self.quantizer.quantizer.k, self.quantizer.quantizer.i, self.quantizer.quantizer.f else: - return self.i, self.f + return self.k, self.i, self.f def set_quantization_bits(self, i, f): if self.use_hgq: @@ -678,7 +997,7 @@ def add_layer_specific_quantization_to_model(module, config): if "quantize" in layer_config["input"]: quantize = layer_config["input"]["quantize"] layer.quantize_input = quantize - elif layer.__class__ == QuantizedPooling: + elif layer.__class__ == PQAvgPool1d: if name in config.quantization_parameters.layer_specific: layer_config = config.quantization_parameters.layer_specific[name] if "input" in layer_config: @@ -746,8 +1065,30 @@ def add_quantized_activations_to_model_layer(module, config): elif layer.__class__ in [nn.Tanh]: tanh = QuantizedActivation(QuantizedTanh(config, i_input=i, f_input=f, i_output=i, f_output=f)) setattr(module, name, tanh) - elif layer.__class__ in [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: - new_layer = QuantizedPooling(config, layer, quantize_input, quantize_output) + elif layer.__class__ == nn.AvgPool1d: + new_layer = PQAvgPool1d( + config, + layer.kernel_size, + layer.stride, + layer.padding, + layer.ceil_mode, + layer.count_include_pad, + quantize_input, + quantize_output, + ) + setattr(module, name, new_layer) + elif layer.__class__ == nn.AvgPool2d: + new_layer = PQAvgPool2d( + config, + layer.kernel_size, + layer.stride, + layer.padding, + layer.ceil_mode, + layer.count_include_pad, + layer.divisor_override, + quantize_input, + quantize_output, + ) setattr(module, name, new_layer) elif layer.__class__ == nn.BatchNorm2d: new_layer = PQBatchNorm2d( @@ -811,16 +1152,58 @@ def add_pruning_to_model(module, config): quantize_output = config["quantization_parameters"]["quantize_output"] for name, layer in module.named_children(): if layer.__class__ is nn.Linear: - sparse_layer = PQDense(config, layer, quantize_input, quantize_output) + sparse_layer = PQDense( + config, layer.in_features, layer.out_features, layer.bias is not None, quantize_input, quantize_output + ) sparse_layer.pruning_layer.build(layer.weight.shape) + sparse_layer._weight.data = layer.weight.data + if layer.bias is not None: + sparse_layer._bias.data = layer.bias.data + setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv2d: - sparse_layer = PQConv2d(config, layer, quantize_input, quantize_output) + sparse_layer = PQConv2d( + config, + layer.in_channels, + layer.out_channels, + layer.kernel_size, + layer.stride, + layer.padding, + layer.dilation, + layer.groups, + layer.bias is not None, + layer.padding_mode, + layer.weight.device, + layer.weight.dtype, + quantize_input, + quantize_output, + ) sparse_layer.pruning_layer.build(layer.weight.shape) + sparse_layer._weight.data = layer.weight.data + if layer.bias is not None: + sparse_layer._bias.data = layer.bias.data setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv1d: - sparse_layer = PQConv1d(config, layer, quantize_input, quantize_output) + sparse_layer = PQConv1d( + config, + layer.in_channels, + layer.out_channels, + layer.kernel_size, + layer.stride, + layer.padding, + layer.dilation, + layer.groups, + layer.bias is not None, + layer.padding_mode, + layer.weight.device, + layer.weight.dtype, + quantize_input, + quantize_output, + ) sparse_layer.pruning_layer.build(layer.weight.shape) + sparse_layer._weight.data = layer.weight.data + if layer.bias is not None: + sparse_layer._bias.data = layer.bias.data setattr(module, name, sparse_layer) else: add_pruning_to_model(layer, config) @@ -897,7 +1280,7 @@ def post_pretrain_functions(model, config, train_loader=None, loss_func=None): elif isinstance(layer, QuantizedActivation): layer.activation.post_pre_train_function() - elif isinstance(layer, (PQBatchNorm2d, QuantizedPooling)): + elif isinstance(layer, (PQBatchNorm2d, PQAvgPool1d, PQAvgPool2d)): layer.post_pretrain_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget @@ -915,9 +1298,9 @@ def pdp_setup(model, config): for layer in model.modules(): if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): if global_weights is None: - global_weights = layer.weight.flatten() + global_weights = layer._weight.flatten() else: - global_weights = torch.concat((global_weights, layer.weight.flatten())) + global_weights = torch.concat((global_weights, layer._weight.flatten())) abs_global_weights = torch.abs(global_weights) global_weight_topk, _ = torch.topk(abs_global_weights, abs_global_weights.numel()) @@ -926,7 +1309,7 @@ def pdp_setup(model, config): idx = 0 for layer in model.modules(): if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - weight_size = layer.weight.numel() + weight_size = layer._weight.numel() w = torch.sum(global_weights_below_threshold[idx : idx + weight_size]) layer.pruning_layer.init_r = w / weight_size layer.pruning_layer.sparsity = w / weight_size # Wanda @@ -939,21 +1322,10 @@ def get_layer_keep_ratio_torch(model): remaining_weights = 0 for layer in model.modules(): if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - if layer.pruning_first: - weight = layer.weight - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(weight) * weight - weight, bias = layer.quantize(weight, layer.bias) - total_w += weight.numel() - rem = torch.count_nonzero(weight) - remaining_weights += rem - else: - weight, bias = layer.quantize(layer.weight, layer.bias) - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(weight) * weight - total_w += weight.numel() - rem = torch.count_nonzero(weight) - remaining_weights += rem + weight, _ = layer.weight, layer.bias + total_w += weight.numel() + rem = torch.count_nonzero(weight) + remaining_weights += rem elif layer.__class__ in (nn.Conv2d, nn.Conv1d, nn.Linear): total_w += layer.weight.numel() remaining_weights += torch.count_nonzero(layer.weight) @@ -974,7 +1346,7 @@ def get_model_losses_torch(model, losses): elif isinstance(layer, (QuantizedActivation)): if layer.activation.use_hgq: losses += layer.hgq_loss() - elif isinstance(layer, (QuantizedPooling, PQBatchNorm2d)): + elif isinstance(layer, (PQAvgPool1d, PQBatchNorm2d)): if layer.use_hgq: losses += layer.hgq_loss() return losses From ab73256d13f549ef4e9211a5bdcd98c99a0a5f8c Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 3 Nov 2025 16:23:07 +0100 Subject: [PATCH 13/37] rebase dev --- src/pquant/core/activations_quantizer.py | 8 +- .../core/tf_impl/compressed_layers_tf.py | 40 +- .../torch_impl/compressed_layers_torch.py | 55 +- tests/test_keras_compression_layers.py | 211 +++++-- tests/test_torch_compression_layers.py | 531 +++++++++++++++++- 5 files changed, 704 insertions(+), 141 deletions(-) diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py index 83367eb..f9e87fe 100644 --- a/src/pquant/core/activations_quantizer.py +++ b/src/pquant/core/activations_quantizer.py @@ -15,8 +15,8 @@ def __init__( from pquant.core.finetuning import TuningConfig config = TuningConfig.load_from_config(config) - self.i = convert_to_tensor(i_input) - self.f = convert_to_tensor(f_input) + self.i_input = convert_to_tensor(i_input) + self.f_input = convert_to_tensor(f_input) self.k = convert_to_tensor(1.0) self.i_output = convert_to_tensor(i_output) @@ -162,8 +162,6 @@ def __init__( self.hgq_gamma = config.quantization_parameters.hgq_gamma self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress - - self.post_fitcompress_calibration = False self.saved_inputs = [] @@ -297,4 +295,4 @@ def hard_sigmoid(x): def hard_tanh(x): """Computes hard_tanh function that saturates between -1 and 1.""" - return 2.0 * hard_sigmoid(x) - 1.0 \ No newline at end of file + return 2.0 * hard_sigmoid(x) - 1.0 diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 2a8c212..9defe6b 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -34,16 +34,15 @@ def __init__(self, config, layer_type, quantize_input=True, quantize_output=Fals self.i_bias = ops.convert_to_tensor(i_bits) self.f_bias = ops.convert_to_tensor(f_bits) - self.i_input = self.i_output = ops.convert_to_tensor(config["quantization_parameters"]["default_data_integer_bits"]) - self.f_input = self.f_output = ops.convert_to_tensor( - config["quantization_parameters"]["default_data_fractional_bits"] - ) + self.i_input = self.i_output = ops.convert_to_tensor(config.quantization_parameters.default_data_integer_bits) + self.f_input = self.f_output = ops.convert_to_tensor(config.quantization_parameters.default_data_fractional_bits) self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) self.pruning_method = config.pruning_parameters.pruning_method self.round_mode = config.quantization_parameters.round_mode self.overflow = config.quantization_parameters.overflow self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_beta = config.quantization_parameters.hgq_beta self.pruning_first = config.training_parameters.pruning_first self.enable_quantization = config.quantization_parameters.enable_quantization self.use_hgq = config.quantization_parameters.use_high_granularity_quantization @@ -553,22 +552,20 @@ def __init__( synchronized, **kwargs, ) - self.overflow = config["quantization_parameters"]["overflow"] - self.round_mode = config["quantization_parameters"]["round_mode"] - self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] - self.data_k = config["quantization_parameters"]["default_data_keep_negatives"] - self.weight_k = config["quantization_parameters"]["default_weight_keep_negatives"] - self.enable_quantization = config["quantization_parameters"]["enable_quantization"] - self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] - self.hgq_beta = config["quantization_parameters"]["hgq_beta"] + self.overflow = config.quantization_parameters.overflow + self.round_mode = config.quantization_parameters.round_mode + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.data_k = config.quantization_parameters.default_data_keep_negatives + self.weight_k = config.quantization_parameters.default_weight_keep_negatives + self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization + self.hgq_beta = config.quantization_parameters.hgq_beta self.quantize_input = quantize_input self.config = config - self.f_weight = self.f_bias = ops.convert_to_tensor( - config["quantization_parameters"]["default_weight_fractional_bits"] - ) - self.i_weight = self.i_bias = ops.convert_to_tensor(config["quantization_parameters"]["default_weight_integer_bits"]) - self.i_input = ops.convert_to_tensor(config["quantization_parameters"]["default_data_integer_bits"]) - self.f_input = ops.convert_to_tensor(config["quantization_parameters"]["default_data_fractional_bits"]) + self.f_weight = self.f_bias = ops.convert_to_tensor(config.quantization_parameters.default_weight_fractional_bits) + self.i_weight = self.i_bias = ops.convert_to_tensor(config.quantization_parameters.default_weight_integer_bits) + self.i_input = ops.convert_to_tensor(config.quantization_parameters.default_data_integer_bits) + self.f_input = ops.convert_to_tensor(config.quantization_parameters.default_data_fractional_bits) self.final_compression_done = False self.is_pretraining = True @@ -712,12 +709,11 @@ def __init__(self, config, layer, quantize_input=True, quantize_output=False): self.is_pretraining = True - self.overflow = "SAT_SYM" if config.quantization_parameters.use_symmetric_quantization else "SAT" self.hgq_gamma = config.quantization_parameters.hgq_gamma self.hgq_beta = config.quantization_parameters.hgq_beta self.data_k = config.quantization_parameters.default_data_keep_negatives self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.hgq_heterogeneous = config.hgq_heterogeneous + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.enable_quantization = config.quantization_parameters.enable_quantization self.round_mode = config.quantization_parameters.round_mode self.overflow = config.quantization_parameters.overflow @@ -1286,7 +1282,7 @@ def add_compression_layers_tf(model, config, input_shape=None): act = check_activation(layer, config) # Activation layers elif isinstance(layer, ReLU): - if config["quantization_parameters"]["enable_quantization"]: + if config.quantization_parameters.enable_quantization: new_layer = QuantizedReLU(config) set_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.input.shape) @@ -1307,7 +1303,7 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) elif isinstance(layer, (BatchNormalization)): - if config["quantization_parameters"]["enable_quantization"]: + if config.quantization_parameters.enable_quantization: new_layer = PQBatchNormalization( config, layer.axis, diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index ad2e5e8..a222ce2 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -4,7 +4,6 @@ import torch import torch.nn as nn import torch.nn.functional as F - from torch.fx import symbolic_trace from torch.nn.common_types import _size_1_t, _size_2_t @@ -582,7 +581,6 @@ def __init__( self.overflow = config.quantization_parameters.overflow self.config = config self.is_pretraining = True - self.overflow = config.quantization_parameters.overflow self.round_mode = config.quantization_parameters.round_mode self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.enable_quantization = config.quantization_parameters.enable_quantization @@ -591,7 +589,6 @@ def __init__( self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.post_fitcompress_calibration = False self.saved_inputs = [] - self.hgq_gamma = config.quantization_parameters.hgq_gamma self.quantize_input = quantize_input self.quantize_output = quantize_output @@ -760,28 +757,28 @@ def __init__( if input_quantization_bits is not None: self.k_input, self.i_input, self.f_input = input_quantization_bits else: - self.k_input = config["quantization_parameters"]["default_data_keep_negatives"] - self.i_input = config["quantization_parameters"]["default_data_integer_bits"] - self.f_input = config["quantization_parameters"]["default_data_fractional_bits"] + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits if weight_quantization_bits is not None: self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits else: - self.k_weight = config["quantization_parameters"]["default_weight_keep_negatives"] - self.i_weight = config["quantization_parameters"]["default_weight_integer_bits"] - self.f_weight = config["quantization_parameters"]["default_weight_fractional_bits"] + self.k_weight = config.quantization_parameters.default_weight_keep_negatives + self.i_weight = config.quantization_parameters.default_weight_integer_bits + self.f_weight = config.quantization_parameters.default_weight_fractional_bits if bias_quantization_bits is not None: self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits else: - self.k_bias = config["quantization_parameters"]["default_weight_keep_negatives"] - self.i_bias = config["quantization_parameters"]["default_weight_integer_bits"] - self.f_bias = config["quantization_parameters"]["default_weight_fractional_bits"] - self.overflow = config["quantization_parameters"]["overflow"] - self.round_mode = config["quantization_parameters"]["round_mode"] - self.use_hgq = config["quantization_parameters"]["use_high_granularity_quantization"] - self.hgq_gamma = config["quantization_parameters"]["hgq_gamma"] - self.hgq_beta = config["quantization_parameters"]["hgq_beta"] - self.enable_quantization = config["quantization_parameters"]["enable_quantization"] + self.k_bias = config.quantization_parameters.default_weight_keep_negatives + self.i_bias = config.quantization_parameters.default_weight_integer_bits + self.f_bias = config.quantization_parameters.default_weight_fractional_bits + self.overflow = config.quantization_parameters.overflow + self.round_mode = config.quantization_parameters.round_mode + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_beta = config.quantization_parameters.hgq_beta + self.enable_quantization = config.quantization_parameters.enable_quantization self.config = config self.quantize_input = quantize_input self._weight = nn.Parameter(self.weight.clone()) @@ -997,7 +994,7 @@ def add_layer_specific_quantization_to_model(module, config): if "quantize" in layer_config["input"]: quantize = layer_config["input"]["quantize"] layer.quantize_input = quantize - elif layer.__class__ == PQAvgPool1d: + elif layer.__class__ in [PQAvgPool1d, PQAvgPool2d]: if name in config.quantization_parameters.layer_specific: layer_config = config.quantization_parameters.layer_specific[name] if "input" in layer_config: @@ -1050,8 +1047,8 @@ def add_layer_specific_quantization_to_model(module, config): def add_quantized_activations_to_model_layer(module, config): if not config.quantization_parameters.enable_quantization: return module - quantize_input = config["quantization_parameters"]["quantize_input"] - quantize_output = config["quantization_parameters"]["quantize_output"] + quantize_input = config.quantization_parameters.quantize_input + quantize_output = config.quantization_parameters.quantize_output # Replaces ReLU and Tanh layers with quantized versions for name, layer in module.named_children(): i = config.quantization_parameters.default_data_integer_bits @@ -1148,8 +1145,8 @@ def disable_pruning_from_layers(module, config): def add_pruning_to_model(module, config): - quantize_input = config["quantization_parameters"]["quantize_input"] - quantize_output = config["quantization_parameters"]["quantize_output"] + quantize_input = config.quantization_parameters.quantize_input + quantize_output = config.quantization_parameters.quantize_output for name, layer in module.named_children(): if layer.__class__ is nn.Linear: sparse_layer = PQDense( @@ -1265,7 +1262,7 @@ def pre_finetune_functions(model): def post_pretrain_functions(model, config, train_loader=None, loss_func=None): if config.fitcompress_parameters.enable_fitcompress: - from pquant.core.torch_impl.fit_compress import call_fitcompress + from pquant.core.torch_impl.fit_compress import call_fitcompress # noqa: 811 config, pruning_mask_importance_scores = call_fitcompress(config, model, train_loader, loss_func) @@ -1357,26 +1354,26 @@ def create_default_layer_quantization_pruning_config(model): for name, layer in model.named_modules(): if layer.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d]: if layer.bias is None: - config["layer_specific"][name] = { + config.layer_specific[name] = { "input": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, "weight": {"integer_bits": 0, "fractional_bits": 7}, "output": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, } else: - config["layer_specific"][name] = { + config.layer_specific[name] = { "input": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, "weight": {"integer_bits": 0, "fractional_bits": 7}, "bias": {"integer_bits": 0, "fractional_bits": 7}, "output": {"integer_bits": 0, "fractional_bits": 7, "quantize": True}, } - config["disable_pruning_for_layers"].append(name) + config.disable_pruning_for_layers.append(name) elif layer.__class__ in [nn.Tanh, nn.ReLU, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]: - config["layer_specific"][name] = { + config.layer_specific[name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } elif layer.__class__ in [nn.BatchNorm2d]: - config["layer_specific"][name] = { + config.layer_specific[name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "weight": {"integer_bits": 0, "fractional_bits": 7.0}, "bias": {"integer_bits": 0, "fractional_bits": 7.0}, diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 05856fb..6d9959d 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -7,6 +7,7 @@ from keras.layers import ( Activation, AveragePooling2D, + BatchNormalization, Conv1D, Conv2D, Dense, @@ -17,9 +18,9 @@ from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.tf_impl.compressed_layers_tf import ( - CompressedLayerConv1dKeras, - CompressedLayerDenseKeras, + PQConv1d, PQConv2d, + PQDense, PQSeparableConv2d, QuantizedPooling, add_compression_layers_tf, @@ -64,12 +65,17 @@ def config_pdp(): "structured_pruning": False, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -98,12 +104,17 @@ def config_ap(): "t_delta": 1, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -135,12 +146,17 @@ def config_wanda(): "M": None, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -168,12 +184,17 @@ def config_cs(): "threshold_init": 0.1, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -216,7 +237,7 @@ def test_dense_call(config_pdp, dense_input): layer_to_replace = Dense(OUT_FEATURES, use_bias=False) layer_to_replace.build((BATCH_SIZE, IN_FEATURES)) out = layer_to_replace(dense_input) - layer = CompressedLayerDenseKeras(config_pdp, layer_to_replace, "linear") + layer = PQDense(config_pdp, layer_to_replace, "linear") layer.build(dense_input.shape) layer.weight.assign(layer_to_replace.kernel) out2 = layer(dense_input) @@ -341,7 +362,7 @@ def test_conv1d_call(config_pdp, conv1d_input): layer_to_replace = Conv1D(OUT_FEATURES, KERNEL_SIZE, strides=2, use_bias=False) layer_to_replace.build(conv1d_input.shape) out = layer_to_replace(conv1d_input) - layer = CompressedLayerConv1dKeras(config_pdp, layer_to_replace, "conv") + layer = PQConv1d(config_pdp, layer_to_replace, "conv") layer.build(conv1d_input.shape) layer.weight.assign(layer_to_replace.kernel) out2 = layer(conv1d_input) @@ -1319,7 +1340,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") model = add_compression_layers_tf(model, config_pdp, dense_input.shape) - assert model.layers[1].weight_quantizer.quantizer._i.shape == model.layers[1].weight.shape + assert model.layers[1].weight_quantizer.quantizer.quantizer._i.shape == model.layers[1].weight.shape layer_2_input_shape = [1] + list(model.layers[2].input.shape[1:]) assert model.layers[2].input_quantizer.quantizer._i.shape == layer_2_input_shape @@ -1370,30 +1391,24 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): for m in model.layers: if isinstance(m, (PQConv2d)): - assert m.i_weight == 0.0 - assert m.i_bias == 0.0 - assert ops.all(m.weight_quantizer.quantizer.i == 0.0) - assert ops.all(m.bias_quantizer.quantizer.i == 0.0) - - assert m.f_weight == 7.0 - assert m.f_bias == 7.0 - assert ops.all(m.weight_quantizer.quantizer.f == 7.0) - assert ops.all(m.bias_quantizer.quantizer.f == 7.0) + iw, fw = m.get_weight_quantization_bits() + ib, fb = m.get_bias_quantization_bits() + assert ops.all(iw == 0.0) + assert ops.all(ib == 0.0) + assert ops.all(fw == 7.0) + assert ops.all(fb == 7.0) elif isinstance(m, (QuantizedTanh)): - assert m.i_input == 0.0 - assert m.f_input == 7.0 - assert ops.all(m.input_quantizer.quantizer.i == 0.0) - assert ops.all(m.input_quantizer.quantizer.f == 7.0) + k_input, i_input, f_input = m.get_input_quantization_bits() + assert ops.all(i_input == 0.0) + assert ops.all(f_input == 7.0) elif isinstance(m, (QuantizedReLU)): - assert m.i_input == 0.0 - assert m.f_input == 8.0 - assert ops.all(m.input_quantizer.quantizer.i == 0.0) - assert ops.all(m.input_quantizer.quantizer.f == 8.0) + k_input, i_input, f_input = m.get_input_quantization_bits() + assert ops.all(i_input == 0.0) + assert ops.all(f_input == 8.0) elif isinstance(m, (QuantizedPooling)): - assert m.i_input == 0.0 - assert m.f_input == 7.0 - assert ops.all(m.input_quantizer.quantizer.i == 0.0) - assert ops.all(m.input_quantizer.quantizer.f == 7.0) + i_input, f_input = m.get_input_quantization_bits() + assert ops.all(i_input == 0.0) + assert ops.all(f_input == 7.0) config_pdp.quantization_parameters.layer_specific = { 'conv2d': { @@ -1414,30 +1429,24 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) for m in model.layers: if isinstance(m, (PQConv2d)): - assert m.i_weight == 1.0 - assert m.i_bias == 2.0 - assert ops.all(m.weight_quantizer.quantizer.i == 1.0) - assert ops.all(m.bias_quantizer.quantizer.i == 2.0) - - assert m.f_weight == 3.0 - assert m.f_bias == 4.0 - assert ops.all(m.weight_quantizer.quantizer.f == 3.0) - assert ops.all(m.bias_quantizer.quantizer.f == 4.0) + iw, fw = m.get_weight_quantization_bits() + ib, fb = m.get_bias_quantization_bits() + assert ops.all(iw == 1.0) + assert ops.all(ib == 2.0) + assert ops.all(fw == 3.0) + assert ops.all(fb == 4.0) elif isinstance(m, (QuantizedTanh)): - assert m.i_input == 0.0 - assert m.f_input == 3.0 - assert ops.all(m.input_quantizer.quantizer.i == 0.0) - assert ops.all(m.input_quantizer.quantizer.f == 3.0) + k_input, i_input, f_input = m.get_input_quantization_bits() + assert ops.all(i_input == 0.0) + assert ops.all(f_input == 3.0) elif isinstance(m, (QuantizedReLU)): - assert m.i_input == 1.0 - assert m.f_input == 3.0 - assert ops.all(m.input_quantizer.quantizer.i == 1.0) - assert ops.all(m.input_quantizer.quantizer.f == 3.0) + k_input, i_input, f_input = m.get_input_quantization_bits() + assert ops.all(i_input == 1.0) + assert ops.all(f_input == 3.0) elif isinstance(m, (QuantizedPooling)): - assert m.i_input == 1.0 - assert m.f_input == 3.0 - assert ops.all(m.input_quantizer.quantizer.i == 1.0) - assert ops.all(m.input_quantizer.quantizer.f == 3.0) + i_input, f_input = m.get_input_quantization_bits() + assert ops.all(i_input == 1.0) + assert ops.all(f_input == 3.0) def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): @@ -1501,3 +1510,95 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): elif isinstance(m, (QuantizedPooling)): assert m.i_input == 1.0 assert m.f_input == 3.0 + + +def test_ebops_dense(config_pdp, dense_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=dense_input.shape[1:]) + out = Dense(OUT_FEATURES, use_bias=False)(inputs) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_dense") + model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + post_pretrain_functions(model, config_pdp) + model.layers[1].hgq_loss(dense_input.shape) + + inputs = keras.Input(shape=dense_input.shape[1:]) + out = Dense(OUT_FEATURES, use_bias=True)(inputs) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_dense") + model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + post_pretrain_functions(model, config_pdp) + model.layers[1].hgq_loss(dense_input.shape) + + +def test_ebops_conv2d(config_pdp, conv2d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=conv2d_input.shape[1:]) + out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=False)(inputs) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d") + model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + model.layers[1].hgq_loss(conv2d_input.shape) + + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=conv2d_input.shape[1:]) + out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d") + model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + model.layers[1].hgq_loss(conv2d_input.shape) + + +def test_ebops_conv1d(config_pdp, conv1d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=conv1d_input.shape[1:]) + out = Conv1D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=False)(inputs) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_dense") + model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + post_pretrain_functions(model, config_pdp) + model.layers[1].hgq_loss(conv1d_input.shape) + + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=conv1d_input.shape[1:]) + out = Conv1D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_dense") + model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + post_pretrain_functions(model, config_pdp) + model.layers[1].hgq_loss(conv1d_input.shape) + + +def test_ebops_bn(config_pdp, conv2d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=conv2d_input.shape[1:]) + out = Conv2D(OUT_FEATURES, KERNEL_SIZE)(inputs) + axis = 1 if keras.backend.image_data_format() == "channels_first" else -1 + + out = BatchNormalization(axis=axis)(out) + act = ReLU()(out) + model = keras.Model(inputs=inputs, outputs=act, name="test_bn") + model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + if keras.backend.image_data_format == "channels_first": + model.layers[2].hgq_loss((1, 32, 30, 30)) # Does not work, TODO: Fix + else: + model.layers[2].hgq_loss((1, 30, 30, 32)) + + +def test_ebops_activations(config_pdp, dense_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + inputs = keras.Input(shape=dense_input.shape[1:]) + act = ReLU()(inputs) + act2 = Activation("tanh")(act) + model = keras.Model(inputs=inputs, outputs=act2, name="test_activations") + model = add_compression_layers_tf(model, config_pdp, dense_input.shape) diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index 1c62cb4..ec7bd0a 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -6,17 +6,27 @@ import torch from keras import ops from torch import nn -from torch.nn import AvgPool2d, BatchNorm2d, Conv1d, Conv2d, Linear, ReLU, Tanh +from torch.nn import ( + AvgPool2d, + BatchNorm2d, + Conv1d, + Conv2d, + Linear, + ReLU, + Tanh, +) from pquant import post_training_prune from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh from pquant.core.torch_impl.compressed_layers_torch import ( + PQAvgPool1d, + PQAvgPool2d, + PQBatchNorm2d, PQConv1d, PQConv2d, PQDense, PQWeightBiasBase, - QuantizedActivationTorchWrapper, - QuantizedPooling, + QuantizedActivation, add_compression_layers_torch, apply_final_compression_torch, get_layer_keep_ratio_torch, @@ -54,12 +64,17 @@ def config_pdp(): "structured_pruning": False, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -88,12 +103,17 @@ def config_ap(): "t_delta": 1, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -113,24 +133,29 @@ def config_ap(): def config_wanda(): cfg = { "pruning_parameters": { - "calculate_pruning_budget": True, + "calculate_pruning_budget": False, "disable_pruning_for_layers": [], "enable_pruning": True, "pruning_method": "wanda", "sparsity": 0.75, "t_start_collecting_batch": 0, "threshold_decay": 0.0, - "t_delta": 2, + "t_delta": 1, "N": None, "M": None, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, "default_data_keep_negatives": 0.0, "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -158,10 +183,17 @@ def config_cs(): "threshold_init": 0.1, }, "quantization_parameters": { - "default_integer_bits": 0.0, - "default_fractional_bits": 7.0, + "default_weight_integer_bits": 0.0, + "default_weight_fractional_bits": 7.0, + "default_data_integer_bits": 0.0, + "default_data_fractional_bits": 7.0, + "default_data_keep_negatives": 0.0, + "default_weight_keep_negatives": 1.0, + "quantize_input": True, + "quantize_output": False, "enable_quantization": False, "hgq_gamma": 0.0003, + "hgq_beta": 1e-5, "hgq_heterogeneous": True, "layer_specific": [], "use_high_granularity_quantization": False, @@ -215,8 +247,10 @@ def forward(self, x): def test_dense_call(config_pdp, dense_input): layer_to_replace = Linear(IN_FEATURES, OUT_FEATURES, bias=False) out = layer_to_replace(dense_input) - layer = PQDense(config_pdp, layer_to_replace, "linear") - layer.weight.data = layer_to_replace.weight.data + layer = PQDense( + config_pdp, layer_to_replace.in_features, layer_to_replace.out_features, layer_to_replace.bias is not None + ) + layer._weight.data = layer_to_replace.weight.data out2 = layer(dense_input) assert ops.all(ops.equal(out, out2)) @@ -224,8 +258,21 @@ def test_dense_call(config_pdp, dense_input): def test_conv2d_call(config_pdp, conv2d_input): layer_to_replace = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False, padding="same") out = layer_to_replace(conv2d_input) - layer = PQConv2d(config_pdp, layer_to_replace, "conv") - layer.weight.data = layer_to_replace.weight.data + layer = PQConv2d( + config_pdp, + layer_to_replace.in_channels, + layer_to_replace.out_channels, + layer_to_replace.kernel_size, + layer_to_replace.stride, + layer_to_replace.padding, + layer_to_replace.dilation, + layer_to_replace.groups, + layer_to_replace.bias is not None, + layer_to_replace.padding_mode, + layer_to_replace.weight.device, + layer_to_replace.weight.dtype, + ) + layer._weight.data = layer_to_replace.weight.data out2 = layer(conv2d_input) assert ops.all(ops.equal(out, out2)) @@ -233,8 +280,21 @@ def test_conv2d_call(config_pdp, conv2d_input): def test_conv1d_call(config_pdp, conv1d_input): layer_to_replace = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, stride=2, bias=False) out = layer_to_replace(conv1d_input) - layer = PQConv1d(config_pdp, layer_to_replace, "conv") - layer.weight.data = layer_to_replace.weight.data + layer = PQConv1d( + config_pdp, + layer_to_replace.in_channels, + layer_to_replace.out_channels, + layer_to_replace.kernel_size, + layer_to_replace.stride, + layer_to_replace.padding, + layer_to_replace.dilation, + layer_to_replace.groups, + layer_to_replace.bias is not None, + layer_to_replace.padding_mode, + layer_to_replace.weight.device, + layer_to_replace.weight.dtype, + ) + layer._weight.data = layer_to_replace.weight.data out2 = layer(conv1d_input) assert ops.all(ops.equal(out, out2)) @@ -242,11 +302,13 @@ def test_conv1d_call(config_pdp, conv1d_input): def test_dense_add_remove_layers(config_pdp, dense_input): config_pdp.pruning_parameters.enable_pruning = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) + orig_weight = layer.weight.data + model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) - + assert torch.all(orig_weight == model.submodule._weight.data) mask_50pct = ops.cast(ops.linspace(0, 1, num=OUT_FEATURES * IN_FEATURES) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct @@ -262,12 +324,13 @@ def test_dense_add_remove_layers(config_pdp, dense_input): def test_conv2d_add_remove_layers(config_pdp, conv2d_input): config_pdp.pruning_parameters.enable_pruning = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) + orig_weight = layer.weight.data model = TestModel(layer) model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) - + assert torch.all(orig_weight == model.submodule._weight.data) mask_50pct = ops.cast(ops.linspace(0, 1, num=OUT_FEATURES * IN_FEATURES * KERNEL_SIZE * KERNEL_SIZE) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct @@ -369,7 +432,7 @@ def test_check_activation(config_pdp, dense_input): layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - assert isinstance(model.activation, QuantizedActivationTorchWrapper) + assert isinstance(model.activation, QuantizedActivation) # Tanh config_pdp.quantization_parameters.enable_quantization = False @@ -382,7 +445,7 @@ def test_check_activation(config_pdp, dense_input): layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - assert isinstance(model.activation, QuantizedActivationTorchWrapper) + assert isinstance(model.activation, QuantizedActivation) def check_keras_layer_is_built(module, is_built): @@ -417,6 +480,7 @@ def forward(self, x): def test_hgq_activation_built(config_pdp, conv2d_input): config_pdp.quantization_parameters.enable_quantization = True config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.quantize_output = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModelWithAvgPool(layer, "relu") model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) @@ -488,8 +552,8 @@ def test_calculate_pruning_budget(config_wanda, dense_input): weight2 = ops.linspace(0.01, 0.99, OUT_FEATURES * OUT_FEATURES) model = add_compression_layers_torch(model, config_wanda, dense_input.shape) - model.submodule.weight.data = ops.reshape(weight, model.submodule.weight.shape) - model.submodule2.weight.data = ops.reshape(weight2, model.submodule2.weight.shape) + model.submodule._weight.data = ops.reshape(weight, model.submodule.weight.shape) + model.submodule2._weight.data = ops.reshape(weight2, model.submodule2.weight.shape) # Triggers calculation of pruning budget for PDP and Wanda post_pretrain_functions(model, config_wanda) @@ -540,8 +604,8 @@ def test_hgq_weight_shape(config_pdp, dense_input): def test_qbn_build(config_pdp, conv2d_input): - config_pdp["quantization_parameters"]["enable_quantization"] = True - config_pdp["quantization_parameters"]["use_high_granularity_quantization"] = True + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) layer2 = BatchNorm2d(OUT_FEATURES) model = TestModel2(layer, layer2, None, "tanh") @@ -581,7 +645,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert torch.all(m.input_quantizer.quantizer.i == 0.0) assert torch.all(m.input_quantizer.quantizer.f == 8.0) - elif isinstance(m, QuantizedPooling): + elif isinstance(m, PQAvgPool2d): assert m.i_input == 0.0 assert m.f_input == 7.0 assert torch.all(m.input_quantizer.quantizer.quantizer.i == 0.0) @@ -621,7 +685,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert m.f_input == 4.0 assert torch.all(m.input_quantizer.quantizer.i == 1.0) assert torch.all(m.input_quantizer.quantizer.f == 4.0) - elif isinstance(m, QuantizedPooling): + elif isinstance(m, PQAvgPool2d): assert m.i_input == 1.0 assert m.f_input == 3.0 assert torch.all(m.input_quantizer.quantizer.quantizer.i == 1.0) @@ -670,6 +734,413 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): elif isinstance(m, (QuantizedReLU)): assert m.i_input == 0.0 assert m.f_input == 4.0 - elif isinstance(m, QuantizedPooling): + elif isinstance(m, PQAvgPool2d): assert m.i_input == 1.0 assert m.f_input == 3.0 + + +def test_ebops_dense(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) + model = TestModel(layer, "relu") + model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.hgq_loss() + + layer = Linear(IN_FEATURES, OUT_FEATURES, bias=True) + model = TestModel(layer, "relu") + model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.hgq_loss() + + +def test_ebops_conv2d(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) + model = TestModel(layer, "relu") + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.hgq_loss() + + layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) + model = TestModel(layer, "relu") + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.hgq_loss() + + +def test_ebops_conv1d(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) + model = TestModel(layer, "relu") + model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.hgq_loss() + + layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) + model = TestModel(layer, "relu") + model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.hgq_loss() + + +def test_ebops_bn(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) + layer2 = BatchNorm2d(OUT_FEATURES) + model = TestModel2(layer, layer2, None, "relu") + shape = [1] + list(conv2d_input.shape[1:]) + model = add_compression_layers_torch(model, config_pdp, shape) + post_pretrain_functions(model, config_pdp) + model.submodule2.hgq_loss() + + +def test_linear_direct(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, quantize_output=True) + layer(dense_input) + assert layer.get_input_quantization_bits() == (0, 0, 7) + assert layer.get_weight_quantization_bits() == (1, 0, 7) + assert layer.get_bias_quantization_bits() == (1, 0, 7) + assert layer.get_output_quantization_bits() == (0, 0, 7) + + layer = PQDense( + config_pdp, + IN_FEATURES, + OUT_FEATURES, + quantize_output=True, + input_quantization_bits=(1, 2, 5), + weight_quantization_bits=(1, 0, 3), + bias_quantization_bits=(1, 0, 3), + output_quantization_bits=(1, 2, 5), + ) + layer(dense_input) + assert layer.get_input_quantization_bits() == (1, 2, 5) + assert layer.get_weight_quantization_bits() == (1, 0, 3) + assert layer.get_bias_quantization_bits() == (1, 0, 3) + assert layer.get_output_quantization_bits() == (1, 2, 5) + + +def test_linear_direct_hgq(config_pdp, dense_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, quantize_output=True) + layer(dense_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_weight_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_bias_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 7) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + + layer = PQDense( + config_pdp, + IN_FEATURES, + OUT_FEATURES, + quantize_output=True, + input_quantization_bits=(1, 2, 5), + weight_quantization_bits=(1, 0, 3), + bias_quantization_bits=(1, 0, 3), + output_quantization_bits=(1, 2, 5), + ) + layer(dense_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + k, i, f = layer.get_weight_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 3) + k, i, f = layer.get_bias_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 3) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + +def test_conv2d_direct(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv2d(config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, quantize_output=True) + layer(conv2d_input) + assert layer.get_input_quantization_bits() == (0, 0, 7) + assert layer.get_weight_quantization_bits() == (1, 0, 7) + assert layer.get_bias_quantization_bits() == (1, 0, 7) + assert layer.get_output_quantization_bits() == (0, 0, 7) + layer = PQConv2d( + config_pdp, + IN_FEATURES, + OUT_FEATURES, + KERNEL_SIZE, + quantize_output=True, + input_quantization_bits=(1, 2, 5), + weight_quantization_bits=(1, 0, 3), + bias_quantization_bits=(1, 0, 3), + output_quantization_bits=(1, 2, 5), + ) + layer(conv2d_input) + assert layer.get_input_quantization_bits() == (1, 2, 5) + assert layer.get_weight_quantization_bits() == (1, 0, 3) + assert layer.get_bias_quantization_bits() == (1, 0, 3) + assert layer.get_output_quantization_bits() == (1, 2, 5) + + +def test_conv2d_direct_hgq(config_pdp, conv2d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv2d(config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, quantize_output=True) + layer(conv2d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_weight_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_bias_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 7) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + + layer = PQConv2d( + config_pdp, + IN_FEATURES, + OUT_FEATURES, + KERNEL_SIZE, + quantize_output=True, + input_quantization_bits=(1, 2, 5), + weight_quantization_bits=(1, 0, 3), + bias_quantization_bits=(1, 0, 3), + output_quantization_bits=(1, 2, 5), + ) + layer(conv2d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + k, i, f = layer.get_weight_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 3) + k, i, f = layer.get_bias_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 3) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + +def test_conv1d_direct(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv1d(config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, quantize_output=True) + layer(conv1d_input) + assert layer.get_input_quantization_bits() == (0, 0, 7) + assert layer.get_weight_quantization_bits() == (1, 0, 7) + assert layer.get_bias_quantization_bits() == (1, 0, 7) + assert layer.get_output_quantization_bits() == (0, 0, 7) + layer = PQConv1d( + config_pdp, + IN_FEATURES, + OUT_FEATURES, + KERNEL_SIZE, + quantize_output=True, + input_quantization_bits=(1, 2, 5), + weight_quantization_bits=(1, 0, 3), + bias_quantization_bits=(1, 0, 3), + output_quantization_bits=(1, 2, 5), + ) + layer(conv1d_input) + assert layer.get_input_quantization_bits() == (1, 2, 5) + assert layer.get_weight_quantization_bits() == (1, 0, 3) + assert layer.get_bias_quantization_bits() == (1, 0, 3) + assert layer.get_output_quantization_bits() == (1, 2, 5) + + +def test_conv1d_direct_hgq(config_pdp, conv1d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv1d(config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, quantize_output=True) + layer(conv1d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_weight_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_bias_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 7) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + + layer = PQConv1d( + config_pdp, + IN_FEATURES, + OUT_FEATURES, + KERNEL_SIZE, + quantize_output=True, + input_quantization_bits=(1, 2, 5), + weight_quantization_bits=(1, 0, 3), + bias_quantization_bits=(1, 0, 3), + output_quantization_bits=(1, 2, 5), + ) + layer(conv1d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + k, i, f = layer.get_weight_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 3) + k, i, f = layer.get_bias_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 0) + assert torch.all(f == 3) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + +def test_avgpool_direct(config_pdp, conv1d_input, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQAvgPool1d(config_pdp, kernel_size=3) + layer(conv1d_input) + assert layer.get_input_quantization_bits() == (0, 0, 7) + assert layer.get_output_quantization_bits() == (0, 0, 7) + layer = PQAvgPool1d( + config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) + ) + layer(conv1d_input) + assert layer.get_input_quantization_bits() == (1, 2, 5) + assert layer.get_output_quantization_bits() == (1, 2, 5) + + layer = PQAvgPool2d(config_pdp, kernel_size=3) + layer(conv2d_input) + assert layer.get_input_quantization_bits() == (0, 0, 7) + assert layer.get_output_quantization_bits() == (0, 0, 7) + + layer = PQAvgPool2d( + config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) + ) + layer(conv2d_input) + assert layer.get_input_quantization_bits() == (1, 2, 5) + assert layer.get_output_quantization_bits() == (1, 2, 5) + + +def test_avgpool_direct_hgq(config_pdp, conv1d_input, conv2d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + layer = PQAvgPool1d(config_pdp, kernel_size=3, quantize_output=True) + layer(conv1d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + + layer = PQAvgPool1d( + config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) + ) + layer(conv1d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + # 2D + layer = PQAvgPool2d(config_pdp, kernel_size=3, quantize_output=True) + layer(conv2d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + + layer = PQAvgPool2d( + config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) + ) + layer(conv2d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + k, i, f = layer.get_output_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) + + +def test_batchnorm2d_direct(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQBatchNorm2d(config_pdp, IN_FEATURES) + layer(conv2d_input) + assert layer.get_input_quantization_bits() == (0, 0, 7) + layer = PQBatchNorm2d(config_pdp, IN_FEATURES, input_quantization_bits=(1, 2, 5)) + layer(conv2d_input) + assert layer.get_input_quantization_bits() == (1, 2, 5) + + +def test_batchnorm2d_direct_hgq(config_pdp, conv2d_input): + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.enable_quantization = True + layer = PQBatchNorm2d(config_pdp, IN_FEATURES) + layer(conv2d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 0) + assert torch.all(i == 0) + assert torch.all(f == 7) + layer = PQBatchNorm2d(config_pdp, IN_FEATURES, input_quantization_bits=(1, 2, 5)) + layer(conv2d_input) + k, i, f = layer.get_input_quantization_bits() + assert torch.all(k == 1) + assert torch.all(i == 2) + assert torch.all(f == 5) From 2f58069a3ffb8efbf931b96b2c8bc38f6ea58b87 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Tue, 4 Nov 2025 11:15:44 +0100 Subject: [PATCH 14/37] fix fitcompress pretraining boolean check for torch --- .../torch_impl/compressed_layers_torch.py | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index a222ce2..4677c80 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -179,13 +179,16 @@ def prune(self, weight): weight = self.pruning_layer(weight) return weight + def is_fitcompress_pretraining(self): + return self.pruning_layer.is_pretraining and self.use_fitcompress + def pre_forward(self, x): if not self.built: self.build(x.shape) if self.quantize_input: if self.use_hgq and not self.input_quantizer.quantizer.built: self.input_quantizer.quantizer.build(x.shape) - if not self.pruning_layer.is_pretraining and not self.use_fitcompress: + if not self.is_fitcompress_pretraining(): x = self.input_quantizer(x) if self.pruning_method == "wanda": self.pruning_layer.collect_input(x, self.weight, self.training) @@ -195,7 +198,7 @@ def post_forward(self, x): if self.quantize_output: if self.use_hgq and not self.output_quantizer.quantizer.built: self.output_quantizer.quantizer.build(x.shape) - if not self.pruning_layer.is_pretraining and not self.use_fitcompress: + if not self.is_fitcompress_pretraining(): x = self.output_quantizer(x) if self.pruning_method == "activation_pruning": self.pruning_layer.collect_output(x, self.training) @@ -264,7 +267,7 @@ def ebops(self): @property def weight(self): - if self.final_compression_done: + if self.final_compression_done or self.is_fitcompress_pretraining(): return self._weight if self.pruning_first: weight = self.prune(self._weight) @@ -275,7 +278,7 @@ def weight(self): @property def bias(self): - if self.final_compression_done: + if self.final_compression_done or self.is_fitcompress_pretraining(): return self._bias bias = self.quantize(self._bias, self.bias_quantizer) return bias @@ -638,10 +641,13 @@ def hgq_loss(self): loss += self.output_quantizer.hgq_loss() return loss + def is_fitcompress_pretraining(self): + return self.is_pretraining and self.use_fitcompress + def pre_pooling(self, x): if not hasattr(self, "input_quantizer"): self.build(x.shape) - if self.use_fitcompress and self.is_pretraining: + if self.is_fitcompress_pretraining(): if self.post_fitcompress_calibration: # Save inputs self.saved_inputs.append(x) @@ -652,7 +658,7 @@ def pre_pooling(self, x): return x def post_pooling(self, x): - if self.quantize_output and self.enable_quantization: + if self.quantize_output and self.enable_quantization and not self.is_fitcompress_pretraining(): x = self.output_quantizer(x) return x @@ -779,6 +785,7 @@ def __init__( self.hgq_gamma = config.quantization_parameters.hgq_gamma self.hgq_beta = config.quantization_parameters.hgq_beta self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.config = config self.quantize_input = quantize_input self._weight = nn.Parameter(self.weight.clone()) @@ -836,15 +843,18 @@ def get_weight_quantization_bits(self): def get_bias_quantization_bits(self): return self.bias_quantizer.get_quantization_bits() + def is_fitcompress_pretraining(self): + return self.is_pretraining and self.use_fitcompress + @property def weight(self): - if self.enable_quantization and not self.final_compression_done: + if self.enable_quantization and not self.final_compression_done and not self.is_fitcompress_pretraining(): return self.weight_quantizer(self._weight) return self._weight @property def bias(self): - if self.enable_quantization and not self.final_compression_done: + if self.enable_quantization and not self.final_compression_done and not self.is_fitcompress_pretraining(): return self.bias_quantizer(self._bias) return self._bias @@ -875,7 +885,8 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: if self.quantize_input and self.enable_quantization: if self.use_hgq and not self.input_quantizer.quantizer.built: self.input_quantizer.quantizer.build(input.shape) - input = self.input_quantizer(input) + if not self.is_fitcompress_pretraining(): + input = self.input_quantizer(input) return super().forward(input) From d6a729fd592af8c53af1336710e867993c2bd967 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Wed, 5 Nov 2025 15:48:29 +0100 Subject: [PATCH 15/37] Separate torch activation layers from Keras, combine tanh and relu into one layer. Fixed bugs, added new values to quantization config --- src/pquant/core/torch_impl/activations.py | 169 ++++ .../torch_impl/compressed_layers_torch.py | 193 ++-- src/pquant/core/torch_impl/quantizer.py | 47 + src/pquant/data_models/quantization_model.py | 14 +- tests/test_torch_compression_layers.py | 849 +++++++++++++++++- 5 files changed, 1114 insertions(+), 158 deletions(-) create mode 100644 src/pquant/core/torch_impl/activations.py create mode 100644 src/pquant/core/torch_impl/quantizer.py diff --git a/src/pquant/core/torch_impl/activations.py b/src/pquant/core/torch_impl/activations.py new file mode 100644 index 0000000..b8ba44c --- /dev/null +++ b/src/pquant/core/torch_impl/activations.py @@ -0,0 +1,169 @@ +import torch +import torch.nn as nn +from torch import maximum, minimum, relu, tanh + +from pquant.core.torch_impl.quantizer import Quantizer + + +def hard_sigmoid(x): + """Computes hard_sigmoid function that saturates between 0 and 1.""" + x = torch.tensor(0.5) * x + torch.tensor(0.5) + x = maximum(x, torch.tensor(0.0)) + x = minimum(x, torch.tensor(1.0)) + return x + + +def hard_tanh(x): + """Computes hard_tanh function that saturates between -1 and 1.""" + return 2.0 * hard_sigmoid(x) - 1.0 + + +activation_registry = {"relu": relu, "tanh": tanh, "hard_tanh": hard_tanh} + + +class PQActivation(nn.Module): + def __init__( + self, + config, + activation="relu", + i_input=0.0, + f_input=8.0, + i_output=0.0, + f_output=7.0, + quantize_input=True, + quantize_output=False, + ): + super().__init__() + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) + self.config = config + self.i_input = i_input + self.f_input = f_input + self.k = 0.0 if activation.lower() == "relu" else 1.0 + + self.i_output = i_output + self.f_output = f_output + + self.activation_name = activation.lower() + self.activation_function = activation_registry.get(self.activation_name) + + self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization + self.is_pretraining = True + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow + self.use_multiplier = config.quantization_parameters.use_relu_multiplier + self.hgq_beta = config.quantization_parameters.hgq_beta + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + + self.post_fitcompress_calibration = False + self.saved_inputs = [] + self.quantize_input = quantize_input + self.quantize_output = quantize_output + self.built = False + + def check_is_built(self, input_shape): + if self.built: + return + self.built = True + self.input_shape = input_shape + self.output_quantizer = Quantizer( + k=self.k, + i=self.i_output, + f=self.f_output, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + hgq_gamma=self.hgq_gamma, + ) + self.input_quantizer = Quantizer( + k=self.k, + i=self.i_input, + f=self.f_input, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + hgq_gamma=self.hgq_gamma, + ) + if self.use_hgq: + self.input_quantizer.quantizer.build(input_shape) + self.output_quantizer.quantizer.build(input_shape) + + if self.use_multiplier: + self.multiplier = nn.Parameter(torch.tensor(-1.0), requires_grad=True) + + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def set_input_quantization_bits(self, i, f): + self.input_quantizer.set_quantization_bits(i, f) + + def get_output_quantization_bits(self): + return self.output_quantizer.get_quantization_bits() + + def set_output_quantization_bits(self, i, f): + self.output_quantizer.set_quantization_bits(i, f) + + def post_pre_train_function(self): + self.is_pretraining = False + + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_out = self.output_quantizer.quantizer.bits_(self.input_shape) + return torch.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return torch.tensor(0.0) + loss = self.hgq_beta * self.ebops() + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + if self.quantize_output: + loss += self.output_quantizer.hgq_loss() + return loss + + def pre_activation(self, x): + if self.use_multiplier: + x = x * 2 ** ((torch.round(self.multiplier) - self.multiplier).detach() + self.multiplier) + if self.quantize_input and self.enable_quantization: + x = self.input_quantizer(x) + return x + + def post_activation(self, x): + if self.quantize_output and self.enable_quantization: + return self.output_quantizer(x) + return x + + def forward(self, x): + self.check_is_built(x.shape) + if self.use_fitcompress and self.is_pretraining and self.activation_name == "relu": + if self.post_fitcompress_calibration: + # Save quantized input into ReLU + self.saved_inputs.append(x) + # During FITcompress, we do not use any quantized activations + return relu(x) + # Multiplier after fitcompress if condition, such that we don't use any relu multiplier during FITcompress search + x = self.pre_activation(x) + x = self.activation_function(x) + x = self.post_activation(x) + return x + + def get_config(self): + config = super().get_config() + config.update( + { + "config": self.config.get_dict(), + "i": float(self.i), + "f": float(self.f), + } + ) + return config + + def extra_repr(self): + return f"quantize_input = {self.quantize_input}, quantize_output = {self.quantize_output}" diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 4677c80..39ef0d5 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -7,7 +7,8 @@ from torch.fx import symbolic_trace from torch.nn.common_types import _size_1_t, _size_2_t -from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh +from pquant.core.torch_impl.activations import PQActivation +from pquant.core.torch_impl.quantizer import Quantizer from pquant.core.utils import get_pruning_layer if typing.TYPE_CHECKING: @@ -15,8 +16,6 @@ from keras import ops -from pquant.core.quantizer_functions import create_quantizer - T = TypeVar("T") @@ -81,10 +80,13 @@ def __init__( self.parallelization_factor = -1 self.hgq_beta = config.quantization_parameters.hgq_beta self.input_shape = None + self.is_pretraining = True - def build(self, input_shape): + def check_is_built(self, input_shape): + if self.built: + return # Build function to delay quantizer creation until after custom i,f bits have been set - self.input_quantizer = PyTorchQuantizer( + self.input_quantizer = Quantizer( torch.tensor(self.k_input), torch.tensor(self.i_input), torch.tensor(self.f_input), @@ -94,7 +96,7 @@ def build(self, input_shape): True, self.hgq_gamma, ) - self.weight_quantizer = PyTorchQuantizer( + self.weight_quantizer = Quantizer( torch.tensor(self.k_weight), torch.tensor(self.i_weight), torch.tensor(self.f_weight), @@ -105,7 +107,7 @@ def build(self, input_shape): self.hgq_gamma, ) - self.bias_quantizer = PyTorchQuantizer( + self.bias_quantizer = Quantizer( torch.tensor(self.k_bias), torch.tensor(self.i_bias), torch.tensor(self.f_bias), @@ -116,7 +118,7 @@ def build(self, input_shape): self.hgq_gamma, ) - self.output_quantizer = PyTorchQuantizer( + self.output_quantizer = Quantizer( torch.tensor(self.k_output), torch.tensor(self.i_output), torch.tensor(self.f_output), @@ -147,17 +149,22 @@ def get_output_quantization_bits(self): def apply_final_compression(self): pass + def post_pre_train_function(self): + self.is_pretraining = False + if self.pruning_layer is not None: + self.pruning_layer.post_pre_train_function() + def save_weights(self): - self.init_weight = self.weight.clone() + self.init_weight = self._weight.clone() def rewind_weights(self): - self.weight.data = self.init_weight.clone() + self._weight.data = self.init_weight.clone() def ebops(self): return 0.0 def hgq_loss(self): - if self.pruning_layer.is_pretraining or not self.use_hgq: + if self.is_pretraining or not self.use_hgq: return 0.0 loss = self.hgq_beta * self.ebops() loss += self.weight_quantizer.hgq_loss() @@ -170,7 +177,7 @@ def hgq_loss(self): return loss def quantize(self, x, quantizer): - if self.enable_quantization: + if self.enable_quantization and not self.is_fitcompress_pretraining(): return quantizer(x) if x is not None else x return x @@ -180,26 +187,19 @@ def prune(self, weight): return weight def is_fitcompress_pretraining(self): - return self.pruning_layer.is_pretraining and self.use_fitcompress + return self.is_pretraining and self.use_fitcompress def pre_forward(self, x): - if not self.built: - self.build(x.shape) + self.check_is_built(x.shape) if self.quantize_input: - if self.use_hgq and not self.input_quantizer.quantizer.built: - self.input_quantizer.quantizer.build(x.shape) - if not self.is_fitcompress_pretraining(): - x = self.input_quantizer(x) + x = self.quantize(x, self.input_quantizer) if self.pruning_method == "wanda": self.pruning_layer.collect_input(x, self.weight, self.training) return x def post_forward(self, x): if self.quantize_output: - if self.use_hgq and not self.output_quantizer.quantizer.built: - self.output_quantizer.quantizer.build(x.shape) - if not self.is_fitcompress_pretraining(): - x = self.output_quantizer(x) + x = self.quantize(x, self.output_quantizer) if self.pruning_method == "activation_pruning": self.pruning_layer.collect_output(x, self.training) return x @@ -241,7 +241,6 @@ def __init__( self.in_features = in_features self.out_features = out_features self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress - self.is_pretraining = True self._weight = nn.Parameter(self.weight.clone()).to(self.weight.device) if bias: self._bias = nn.Parameter(self.bias.clone()).to(self.bias.device) @@ -251,17 +250,14 @@ def __init__( del self._parameters["bias"] self.pruning_layer.build(self._weight.shape) - def post_pre_train_function(self): - self.is_pretraining = False - def ebops(self): bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self._weight)) ebops = ops.sum(F.linear(bw_inp, bw_ker)) ebops = ebops * self.n_parallel / self.parallelization_factor if self.bias is not None: bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) - size = ops.cast(ops.prod(list(self.input_shape)), self.weight.dtype) + size = ops.cast(ops.prod(list(self.input_shape)), self._weight.dtype) ebops += ops.mean(bw_bias) * size return ebops @@ -346,7 +342,6 @@ def __init__( **kwargs, ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress - self.is_pretraining = True self._weight = nn.Parameter(self.weight.clone()).to(self.weight.device) if bias: self._bias = nn.Parameter(self.bias.clone()).to(self.bias.device) @@ -356,9 +351,6 @@ def __init__( del self._parameters["bias"] self.pruning_layer.build(self._weight.shape) - def post_pre_train_function(self): - self.is_pretraining = False - def ebops(self): bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) @@ -465,7 +457,6 @@ def __init__( **kwargs, ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress - self.is_pretraining = True self._weight = nn.Parameter(self.weight.clone()).to(self.weight.device) if bias: self._bias = nn.Parameter(self.bias.clone()).to(self.bias.device) @@ -475,9 +466,6 @@ def __init__( del self._parameters["bias"] self.pruning_layer.build(self._weight.shape) - def post_pre_train_function(self): - self.is_pretraining = False - def ebops(self): bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) @@ -596,7 +584,7 @@ def __init__( self.quantize_output = quantize_output def build(self, input_shape): - self.input_quantizer = PyTorchQuantizer( + self.input_quantizer = Quantizer( k=torch.tensor(self.k_input), i=torch.tensor(self.i_input), f=torch.tensor(self.f_input), @@ -606,7 +594,7 @@ def build(self, input_shape): is_data=True, hgq_gamma=self.hgq_gamma, ) - self.output_quantizer = PyTorchQuantizer( + self.output_quantizer = Quantizer( k=torch.tensor(self.k_output), i=torch.tensor(self.i_output), f=torch.tensor(self.f_output), @@ -652,7 +640,7 @@ def pre_pooling(self, x): # Save inputs self.saved_inputs.append(x) # During FITcompress, we do not use any quantized pooling - return super().forward() + return x if self.quantize_input and self.enable_quantization: x = self.input_quantizer(x) return x @@ -796,9 +784,11 @@ def __init__( self.final_compression_done = False self.is_pretraining = True - def build(self, input_shape): + def check_is_built(self, input_shape): + if self.built: + return self.built = True - self.input_quantizer = PyTorchQuantizer( + self.input_quantizer = Quantizer( k=torch.tensor(self.k_input), i=torch.tensor(self.i_input), f=torch.tensor(self.f_input), @@ -808,7 +798,7 @@ def build(self, input_shape): is_data=True, hgq_gamma=self.hgq_gamma, ) - self.weight_quantizer = PyTorchQuantizer( + self.weight_quantizer = Quantizer( k=torch.tensor(self.k_weight), i=torch.tensor(self.i_weight), f=torch.tensor(self.f_weight), @@ -817,7 +807,7 @@ def build(self, input_shape): is_data=False, is_heterogeneous=self.use_hgq, ) - self.bias_quantizer = PyTorchQuantizer( + self.bias_quantizer = Quantizer( k=torch.tensor(self.k_bias), i=torch.tensor(self.i_bias), f=torch.tensor(self.f_bias), @@ -826,6 +816,8 @@ def build(self, input_shape): is_data=False, is_heterogeneous=self.use_hgq, ) + if self.use_hgq: + self.input_quantizer.quantizer.build(input_shape) shape = [1] * len(input_shape) shape[1] = input_shape[1] self._shape = tuple(shape) @@ -833,6 +825,8 @@ def build(self, input_shape): def apply_final_compression(self): self.final_compression_done = True + self._weight.data = self.weight + self._bias.data = self.bias def get_input_quantization_bits(self): return self.input_quantizer.get_quantization_bits() @@ -880,72 +874,13 @@ def post_pretrain_function(self): self.is_pretraining = False def forward(self, input: torch.Tensor) -> torch.Tensor: - if not self.built: - self.build(input.shape) + self.check_is_built(input.shape) if self.quantize_input and self.enable_quantization: - if self.use_hgq and not self.input_quantizer.quantizer.built: - self.input_quantizer.quantizer.build(input.shape) if not self.is_fitcompress_pretraining(): input = self.input_quantizer(input) return super().forward(input) -class QuantizedActivation(torch.nn.Module): - def __init__(self, activation): - super().__init__() - self.activation = activation - - def hgq_loss(self): - return self.activation.hgq_loss() - - def forward(self, x): - return self.activation(x) - - -class PyTorchQuantizer(nn.Module): - # HGQ quantizer wrapper - def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): - super().__init__() - self.k = torch.nn.Parameter(torch.tensor(k), requires_grad=False) - self.i = torch.nn.Parameter(torch.tensor(i), requires_grad=False) - self.f = torch.nn.Parameter(torch.tensor(f), requires_grad=False) - self.overflow = overflow - self.round_mode = round_mode - self.use_hgq = is_heterogeneous - self.quantizer = create_quantizer(self.k, self.i, self.f, overflow, round_mode, is_heterogeneous, is_data) - self.is_pretraining = False - self.hgq_gamma = hgq_gamma - - def get_quantization_bits(self): - if self.use_hgq: - return self.quantizer.quantizer.k, self.quantizer.quantizer.i, self.quantizer.quantizer.f - else: - return self.k, self.i, self.f - - def set_quantization_bits(self, i, f): - if self.use_hgq: - self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) - self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) - self.i.data = i - self.f.data = f - - def post_pretrain(self): - self.is_pretraining = True - - def forward(self, x): - if self.use_hgq: - x = self.quantizer(x) - else: - x = self.quantizer(x, k=self.k, i=self.i, f=self.f) - return x - - def hgq_loss(self): - if self.is_pretraining or not self.use_hgq: - return 0.0 - loss = (torch.sum(self.quantizer.quantizer.i) + torch.sum(self.quantizer.quantizer.f)) * self.hgq_gamma - return loss - - def add_layer_specific_quantization_to_model(module, config): for name, layer in module.named_modules(): if isinstance(layer, PQWeightBiasBase): @@ -1029,29 +964,29 @@ def add_layer_specific_quantization_to_model(module, config): quantize = layer_config["output"]["quantize"] layer.quantize_output = quantize - elif layer.__class__ == QuantizedActivation: + elif layer.__class__ == PQActivation: if name in config.quantization_parameters.layer_specific: layer_config = config.quantization_parameters.layer_specific[name] if "input" in layer_config: if "integer_bits" in layer_config["input"]: input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) - layer.activation.i_input = input_int_bits + layer.i_input = input_int_bits if "fractional_bits" in layer_config["input"]: input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) - layer.activation.f_input = input_fractional_bits + layer.f_input = input_fractional_bits if "quantize" in layer_config["input"]: quantize = layer_config["input"]["quantize"] - layer.activation.quantize_input = quantize + layer.quantize_input = quantize if "output" in layer_config: if "integer_bits" in layer_config["output"]: output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) - layer.activation.i_output = output_int_bits + layer.i_output = output_int_bits if "fractional_bits" in layer_config["output"]: output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) - layer.activation.f_output = output_fractional_bits + layer.f_output = output_fractional_bits if "quantize" in layer_config["output"]: quantize = layer_config["output"]["quantize"] - layer.activation.quantize_output = quantize + layer.quantize_output = quantize return module @@ -1068,10 +1003,29 @@ def add_quantized_activations_to_model_layer(module, config): # For ReLU, if using default values, add 1 bit since values are unsigned. # Otherwise user provides bits. TODO: Find better way to do this f = config.quantization_parameters.default_data_fractional_bits + 1 - relu = QuantizedActivation(QuantizedReLU(config, i_input=i, f_input=f, i_output=i, f_output=f)) + relu = PQActivation( + config, + "relu", + i_input=i, + f_input=f, + i_output=i, + f_output=f, + quantize_input=quantize_input, + quantize_output=quantize_output, + ) setattr(module, name, relu) elif layer.__class__ in [nn.Tanh]: - tanh = QuantizedActivation(QuantizedTanh(config, i_input=i, f_input=f, i_output=i, f_output=f)) + type_of_tanh = "tanh" if config.quantization_parameters.use_real_tanh else "hard_tanh" + tanh = PQActivation( + config, + type_of_tanh, + i_input=i, + f_input=f, + i_output=i, + f_output=f, + quantize_input=quantize_input, + quantize_output=quantize_output, + ) setattr(module, name, tanh) elif layer.__class__ == nn.AvgPool1d: new_layer = PQAvgPool1d( @@ -1280,14 +1234,13 @@ def post_pretrain_functions(model, config, train_loader=None, loss_func=None): # idx = 0 for layer in model.modules(): if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - layer.pruning_layer.post_pre_train_function() layer.post_pre_train_function() # layer.pruning_layer.mask = pruning_mask_importance_scores[idx] # idx += 1 - elif isinstance(layer, QuantizedActivation): - layer.activation.post_pre_train_function() + elif isinstance(layer, (PQActivation, Quantizer)): + layer.post_pre_train_function() elif isinstance(layer, (PQBatchNorm2d, PQAvgPool1d, PQAvgPool2d)): layer.post_pretrain_function() if config.pruning_parameters.pruning_method == "pdp" or ( @@ -1343,18 +1296,16 @@ def get_layer_keep_ratio_torch(model): def get_model_losses_torch(model, losses): - loss = 0.0 + for layer in model.modules(): + loss = 0.0 if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): if layer.enable_pruning: loss += layer.pruning_layer.calculate_additional_loss() if layer.use_hgq: loss += layer.hgq_loss() losses += loss - elif isinstance(layer, (QuantizedActivation)): - if layer.activation.use_hgq: - losses += layer.hgq_loss() - elif isinstance(layer, (PQAvgPool1d, PQBatchNorm2d)): + elif isinstance(layer, (PQAvgPool1d, PQAvgPool2d, PQBatchNorm2d, PQActivation)): if layer.use_hgq: losses += layer.hgq_loss() return losses diff --git a/src/pquant/core/torch_impl/quantizer.py b/src/pquant/core/torch_impl/quantizer.py new file mode 100644 index 0000000..cb7e78e --- /dev/null +++ b/src/pquant/core/torch_impl/quantizer.py @@ -0,0 +1,47 @@ +import torch +import torch.nn as nn + +from pquant.core.quantizer_functions import create_quantizer + + +class Quantizer(nn.Module): + def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): + super().__init__() + self.k = torch.nn.Parameter(torch.tensor(k), requires_grad=False) + self.i = torch.nn.Parameter(torch.tensor(i), requires_grad=False) + self.f = torch.nn.Parameter(torch.tensor(f), requires_grad=False) + self.overflow = overflow + self.round_mode = round_mode + self.use_hgq = is_heterogeneous + self.quantizer = create_quantizer(self.k, self.i, self.f, overflow, round_mode, is_heterogeneous, is_data) + self.is_pretraining = False + self.hgq_gamma = hgq_gamma + + def get_quantization_bits(self): + if self.use_hgq: + return self.quantizer.quantizer.k, self.quantizer.quantizer.i, self.quantizer.quantizer.f + else: + return self.k, self.i, self.f + + def set_quantization_bits(self, i, f): + if self.use_hgq: + self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) + self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) + self.i.data = i + self.f.data = f + + def post_pre_train_function(self): + self.is_pretraining = False + + def forward(self, x): + if self.use_hgq: + x = self.quantizer(x) + else: + x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + return x + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return 0.0 + loss = (torch.sum(self.quantizer.quantizer.i) + torch.sum(self.quantizer.quantizer.f)) * self.hgq_gamma + return loss diff --git a/src/pquant/data_models/quantization_model.py b/src/pquant/data_models/quantization_model.py index b043e48..86c47f1 100644 --- a/src/pquant/data_models/quantization_model.py +++ b/src/pquant/data_models/quantization_model.py @@ -4,13 +4,21 @@ class BaseQuantizationModel(BaseModel): - default_integer_bits: float = Field(default=0.0) - default_fractional_bits: float = Field(default=7.0) + default_weight_keep_negatives: float = Field(default=1.0) + default_weight_integer_bits: float = Field(default=0.0) + default_weight_fractional_bits: float = Field(default=7.0) + default_data_keep_negatives: float = Field(default=0.0) + default_data_integer_bits: float = Field(default=0.0) + default_data_fractional_bits: float = Field(default=7.0) + quantize_input: bool = Field(default=True) + quantize_output: bool = Field(default=False) enable_quantization: bool = Field(default=True) hgq_gamma: float = Field(default=0.0003) + hgq_beta: float = Field(default=1e-5) hgq_heterogeneous: bool = Field(default=True) layer_specific: List = Field(default_factory=list) use_high_granularity_quantization: bool = Field(default=False) use_real_tanh: bool = Field(default=False) - use_symmetric_quantization: bool = Field(default=False) + overflow: str = Field(default="SAT") + round_mode: str = Field(default="RND") use_relu_multiplier: bool = Field(default=True) diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index ec7bd0a..5f5a015 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -17,7 +17,7 @@ ) from pquant import post_training_prune -from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh +from pquant.core.torch_impl.activations import PQActivation from pquant.core.torch_impl.compressed_layers_torch import ( PQAvgPool1d, PQAvgPool2d, @@ -26,10 +26,10 @@ PQConv2d, PQDense, PQWeightBiasBase, - QuantizedActivation, add_compression_layers_torch, apply_final_compression_torch, get_layer_keep_ratio_torch, + get_model_losses_torch, post_pretrain_functions, pre_finetune_functions, ) @@ -248,7 +248,11 @@ def test_dense_call(config_pdp, dense_input): layer_to_replace = Linear(IN_FEATURES, OUT_FEATURES, bias=False) out = layer_to_replace(dense_input) layer = PQDense( - config_pdp, layer_to_replace.in_features, layer_to_replace.out_features, layer_to_replace.bias is not None + config_pdp, + layer_to_replace.in_features, + layer_to_replace.out_features, + layer_to_replace.bias is not None, + quantize_input=False, ) layer._weight.data = layer_to_replace.weight.data out2 = layer(dense_input) @@ -271,6 +275,7 @@ def test_conv2d_call(config_pdp, conv2d_input): layer_to_replace.padding_mode, layer_to_replace.weight.device, layer_to_replace.weight.dtype, + quantize_input=False, ) layer._weight.data = layer_to_replace.weight.data out2 = layer(conv2d_input) @@ -293,6 +298,7 @@ def test_conv1d_call(config_pdp, conv1d_input): layer_to_replace.padding_mode, layer_to_replace.weight.device, layer_to_replace.weight.dtype, + quantize_input=False, ) layer._weight.data = layer_to_replace.weight.data out2 = layer(conv1d_input) @@ -432,7 +438,7 @@ def test_check_activation(config_pdp, dense_input): layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - assert isinstance(model.activation, QuantizedActivation) + assert isinstance(model.activation, PQActivation) # Tanh config_pdp.quantization_parameters.enable_quantization = False @@ -445,7 +451,7 @@ def test_check_activation(config_pdp, dense_input): layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") model = add_compression_layers_torch(model, config_pdp, dense_input.shape) - assert isinstance(model.activation, QuantizedActivation) + assert isinstance(model.activation, PQActivation) def check_keras_layer_is_built(module, is_built): @@ -577,16 +583,16 @@ def test_trigger_post_pretraining(config_pdp, dense_input): model = add_compression_layers_torch(model, config_pdp, dense_input.shape) assert model.submodule.pruning_layer.is_pretraining is True - assert model.activation.activation.is_pretraining is True + assert model.activation.is_pretraining is True assert model.submodule2.pruning_layer.is_pretraining is True - assert model.activation2.activation.is_pretraining is True + assert model.activation2.is_pretraining is True post_pretrain_functions(model, config_pdp) assert model.submodule.pruning_layer.is_pretraining is False - assert model.activation.activation.is_pretraining is False + assert model.activation.is_pretraining is False assert model.submodule2.pruning_layer.is_pretraining is False - assert model.activation2.activation.is_pretraining is False + assert model.activation2.is_pretraining is False def test_hgq_weight_shape(config_pdp, dense_input): @@ -600,7 +606,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): post_pretrain_functions(model, config_pdp) assert model.submodule.weight_quantizer.quantizer.quantizer._i.shape == model.submodule.weight.shape - assert model.activation.activation.input_quantizer.quantizer._i.shape == (1, OUT_FEATURES) + assert model.activation.input_quantizer.quantizer.quantizer._i.shape == (1, OUT_FEATURES) def test_qbn_build(config_pdp, conv2d_input): @@ -634,16 +640,16 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert m.f_bias == 7.0 assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 7.0) assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 7.0) - elif isinstance(m, (QuantizedTanh)): - assert m.i_input == 0.0 - assert m.f_input == 7.0 - assert torch.all(m.output_quantizer.quantizer.i == 0.0) - assert torch.all(m.output_quantizer.quantizer.f == 7.0) - elif isinstance(m, (QuantizedReLU)): - assert m.i_input == 0.0 - assert m.f_input == 8.0 - assert torch.all(m.input_quantizer.quantizer.i == 0.0) - assert torch.all(m.input_quantizer.quantizer.f == 8.0) + elif isinstance(m, PQActivation) and m.activation_name == "tanh": + k_input, i_input, f_input = m.get_input_quantization_bits() + + assert torch.all(i_input == 0.0) + assert torch.all(f_input == 7.0) + elif isinstance(m, PQActivation) and m.activation_name == "relu": + k_input, i_input, f_input = m.get_input_quantization_bits() + + assert torch.all(i_input == 0.0) + assert torch.all(f_input == 8.0) elif isinstance(m, PQAvgPool2d): assert m.i_input == 0.0 @@ -675,16 +681,16 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert m.f_bias == 4.0 assert torch.all(m.weight_quantizer.quantizer.quantizer.f == 3.0) assert torch.all(m.bias_quantizer.quantizer.quantizer.f == 4.0) - elif isinstance(m, (QuantizedTanh)): - assert m.i_input == 0.0 - assert m.f_input == 3.0 - assert torch.all(m.input_quantizer.quantizer.i == 0.0) - assert torch.all(m.input_quantizer.quantizer.f == 3.0) - elif isinstance(m, (QuantizedReLU)): - assert m.i_input == 1.0 - assert m.f_input == 4.0 - assert torch.all(m.input_quantizer.quantizer.i == 1.0) - assert torch.all(m.input_quantizer.quantizer.f == 4.0) + elif isinstance(m, PQActivation) and m.activation_name == "tanh": + k_input, i_input, f_input = m.get_input_quantization_bits() + + assert torch.all(i_input == 0.0) + assert torch.all(f_input == 3.0) + elif isinstance(m, PQActivation) and m.activation_name == "relu": + k_input, i_input, f_input = m.get_input_quantization_bits() + + assert torch.all(i_input == 1.0) + assert torch.all(f_input == 4.0) elif isinstance(m, PQAvgPool2d): assert m.i_input == 1.0 assert m.f_input == 3.0 @@ -704,10 +710,10 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): if isinstance(m, (PQWeightBiasBase)): assert m.i_weight == 0.0 assert m.f_bias == 7.0 - elif isinstance(m, (QuantizedTanh)): + elif isinstance(m, PQActivation) and m.activation_name == "tanh": assert m.i_input == 0.0 assert m.f_input == 7.0 - elif isinstance(m, (QuantizedReLU)): + elif isinstance(m, PQActivation) and m.activation_name == "relu": assert m.i_input == 0.0 assert m.f_input == 8.0 @@ -728,10 +734,10 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): if isinstance(m, (PQWeightBiasBase)): assert m.i_weight == 1.0 assert m.f_bias == 3.0 - elif isinstance(m, (QuantizedTanh)): + elif isinstance(m, PQActivation) and m.activation_name == "tanh": assert m.i_input == 0.0 assert m.f_input == 3.0 - elif isinstance(m, (QuantizedReLU)): + elif isinstance(m, PQActivation) and m.activation_name == "relu": assert m.i_input == 0.0 assert m.f_input == 4.0 elif isinstance(m, PQAvgPool2d): @@ -1144,3 +1150,778 @@ def test_batchnorm2d_direct_hgq(config_pdp, conv2d_input): assert torch.all(k == 1) assert torch.all(i == 2) assert torch.all(f == 5) + + +class DummyLayer(nn.Module): + + def __init__(self, is_pretraining=False): + super().__init__() + self.built = True + self.layer_called = 0 + self.is_pretraining = is_pretraining + + def forward(self, x): + self.layer_called += 1 + return x + + def extra_repr(self): + return f"Layer called = {self.layer_called} times." + + +def test_dense_input_parameters_quantizers_called(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, bias=True) + + layer(dense_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(dense_input) + + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + +def test_dense_output_parameters_quantizers_called(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, bias=True, quantize_input=False, quantize_output=True) + + layer(dense_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(dense_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def test_dense_quantizers_not_called_when_global_disabled(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = False + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, bias=True, quantize_input=True, quantize_output=True) + + layer(dense_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(dense_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + +def test_dense_quantizers_not_called_when_fitcompress_pretraining(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.fitcompress_parameters.enable_fitcompress = True + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, bias=True, quantize_input=True, quantize_output=True) + + layer(dense_input) + layer.is_pretraining = True + layer.pruning_layer = DummyLayer() + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + + assert layer.pruning_layer.layer_called == 0 + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + layer.is_pretraining = False + layer(dense_input) + assert layer.pruning_layer.layer_called == 1 + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def test_dense_pruning_layer_not_called_when_global_disabled(config_pdp, dense_input): + config_pdp.pruning_parameters.enable_pruning = False + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, bias=True, quantize_input=False, quantize_output=False) + layer(dense_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer.pruning_layer = DummyLayer() + + layer(dense_input) + + assert layer.pruning_layer.layer_called == 0 + + +def test_dense_pruning_layer_called_when_global_enabled(config_pdp, dense_input): + config_pdp.pruning_parameters.enable_pruning = True + layer = PQDense(config_pdp, IN_FEATURES, OUT_FEATURES, bias=True, quantize_input=False, quantize_output=False) + layer(dense_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer.pruning_layer = DummyLayer() + + layer(dense_input) + + assert layer.pruning_layer.layer_called == 1 + + +# Conv1d + + +def test_conv1d_input_parameters_quantizers_called(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv1d(config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) + + layer(conv1d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv1d_input) + + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + +def test_conv1d_output_parameters_quantizers_called(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv1d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=False, quantize_output=True + ) + + layer(conv1d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv1d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def test_conv1d_quantizers_not_called_when_global_disabled(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = False + layer = PQConv1d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + + layer(conv1d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv1d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + +def test_conv1d_quantizers_not_called_when_fitcompress_pretraining(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.fitcompress_parameters.enable_fitcompress = True + layer = PQConv1d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + + layer(conv1d_input) + layer.is_pretraining = True + layer.pruning_layer = DummyLayer() + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + + assert layer.pruning_layer.layer_called == 0 + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + layer.is_pretraining = False + layer(conv1d_input) + assert layer.pruning_layer.layer_called == 1 + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def test_conv1d_pruning_layer_not_called_when_global_disabled(config_pdp, conv1d_input): + config_pdp.pruning_parameters.enable_pruning = False + layer = PQConv1d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + layer(conv1d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer.pruning_layer = DummyLayer() + + layer(conv1d_input) + + assert layer.pruning_layer.layer_called == 0 + + +def test_conv1d_pruning_layer_called_when_global_enabled(config_pdp, conv1d_input): + config_pdp.pruning_parameters.enable_pruning = True + layer = PQConv1d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + layer(conv1d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer.pruning_layer = DummyLayer() + + layer(conv1d_input) + + assert layer.pruning_layer.layer_called == 1 + + +# Conv2d + + +def test_conv2d_input_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv2d(config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + +def test_conv2d_output_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv2d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=False, quantize_output=True + ) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def test_conv2d_quantizers_not_called_when_global_disabled(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = False + layer = PQConv2d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + +def test_conv2d_quantizers_not_called_when_fitcompress_pretraining(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.fitcompress_parameters.enable_fitcompress = True + layer = PQConv2d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + + layer(conv2d_input) + layer.is_pretraining = True + layer.pruning_layer = DummyLayer() + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + + assert layer.pruning_layer.layer_called == 0 + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + layer.is_pretraining = False + layer(conv2d_input) + assert layer.pruning_layer.layer_called == 1 + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def test_conv2d_pruning_layer_not_called_when_global_disabled(config_pdp, conv2d_input): + config_pdp.pruning_parameters.enable_pruning = False + layer = PQConv2d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer.pruning_layer = DummyLayer() + + layer(conv2d_input) + + assert layer.pruning_layer.layer_called == 0 + + +def test_conv2d_pruning_layer_called_when_global_enabled(config_pdp, conv2d_input): + config_pdp.pruning_parameters.enable_pruning = True + layer = PQConv2d( + config_pdp, IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True, quantize_input=True, quantize_output=True + ) + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer.pruning_layer = DummyLayer() + + layer(conv2d_input) + + assert layer.pruning_layer.layer_called == 1 + + +# AvgPool + + +def test_avgpool2d_input_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + +def test_avgpool2d_output_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE, quantize_input=False, quantize_output=True) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 1 + + +def test_avgpool2d_quantizers_not_called_when_global_disabled(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = False + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE, quantize_input=True, quantize_output=True) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + +def test_avgpool2d_quantizers_not_called_when_fitcompress_pretraining(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.fitcompress_parameters.enable_fitcompress = True + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE, quantize_input=True, quantize_output=True) + + layer(conv2d_input) + layer.is_pretraining = True + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + layer.is_pretraining = False + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +# BatchNorm + + +def test_batchnorm2d_input_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQBatchNorm2d(config_pdp, IN_FEATURES) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + + +def test_batchnorm2d_input_quantizer_disabled_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQBatchNorm2d(config_pdp, IN_FEATURES, quantize_input=False) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + + +def test_batchnorm2d_quantizers_not_called_when_global_disabled(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = False + layer = PQBatchNorm2d(config_pdp, IN_FEATURES) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + + +def test_batchnorm2d_quantizers_not_called_when_fitcompress_pretraining(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.fitcompress_parameters.enable_fitcompress = True + layer = PQBatchNorm2d(config_pdp, IN_FEATURES) + + layer(conv2d_input) + layer.is_pretraining = True + layer.input_quantizer = DummyLayer() + layer.weight_quantizer = DummyLayer() + layer.bias_quantizer = DummyLayer() + + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + + layer.is_pretraining = False + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + + +# Activations + + +def test_activation_input_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQActivation(config_pdp, "relu") + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + +def test_activation_output_parameters_quantizers_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQActivation(config_pdp, "relu", quantize_input=False, quantize_output=True) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 1 + + +def test_activation_quantizers_not_called_when_global_disabled(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = False + + layer = PQActivation(config_pdp, "relu", quantize_input=True, quantize_output=True) + + layer(conv2d_input) # Builds quantizers + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + layer(conv2d_input) + + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + +def test_activation_quantizers_not_called_when_fitcompress_pretraining(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.fitcompress_parameters.enable_fitcompress = True + layer = PQActivation(config_pdp, "relu", quantize_input=True, quantize_output=True) + + layer(conv2d_input) + layer.is_pretraining = True + layer.input_quantizer = DummyLayer() + layer.output_quantizer = DummyLayer() + + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + + layer.is_pretraining = False + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + +def dummy_ebops(): + return 0.0 + + +def dummy_hgq_loss(): + return 1.0 + + +class ModelWithAllLayers(nn.Module): + + def __init__(self, use_bias=True): + super().__init__() + self.conv = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=use_bias) + self.bn = BatchNorm2d(OUT_FEATURES) + self.relu = ReLU() + self.avgpool2d = nn.AvgPool2d(2) + self.conv1d = Conv1d(OUT_FEATURES, 4, KERNEL_SIZE, bias=use_bias) + self.avgpool1d = nn.AvgPool1d(2) + self.tanh = Tanh() + self.flatten = nn.Flatten() + self.dense = nn.Linear(444, 2, bias=use_bias) + + def forward(self, x): + x = self.relu(self.bn(self.conv(x))) + x = self.avgpool2d(x) + x = self.tanh(x) + x = torch.reshape(x, list(x.shape[:-2]) + [x.shape[-2] * x.shape[-1]]) + x = self.conv1d(x) + x = self.avgpool1d(x) + x = self.flatten(x) + x = self.dense(x) + return x + + +def test_hgq_loss_calc_no_qoutput(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.hgq_beta = 0.0 + + # Bias in weight layers, don't quantize output + model = ModelWithAllLayers() + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + expected_loss = 0.0 + for m in model.modules(): + if isinstance(m, (PQWeightBiasBase)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + m.output_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + expected_loss += 3.0 + elif isinstance(m, (PQAvgPool1d, PQAvgPool2d, PQActivation)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.output_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + expected_loss += 1.0 + elif isinstance(m, (PQBatchNorm2d)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 3.0 + + losses = get_model_losses_torch(model, torch.tensor(0.0)) + assert losses == expected_loss + + +def test_hgq_loss_calc_no_bias_no_qoutput(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.hgq_beta = 0.0 + + # No bias in weight layers, don't quantize output + model = ModelWithAllLayers(use_bias=False) + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + + expected_loss = 0.0 + for m in model.modules(): + if isinstance(m, (PQWeightBiasBase)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + m.output_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + expected_loss += 2.0 + elif isinstance(m, (PQAvgPool1d, PQAvgPool2d, PQActivation)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.output_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + expected_loss += 1.0 + elif isinstance(m, (PQBatchNorm2d)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 3.0 + + losses = get_model_losses_torch(model, torch.tensor(0.0)) + assert losses == expected_loss + + +def test_hgq_loss_calc_qoutput(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.hgq_beta = 0.0 + + # Bias in weight layers, quantize output + config_pdp.quantization_parameters.quantize_output = True + model = ModelWithAllLayers() + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + expected_loss = 0.0 + for m in model.modules(): + if isinstance(m, (PQWeightBiasBase)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + m.output_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 4.0 + elif isinstance(m, (PQAvgPool1d, PQAvgPool2d, PQActivation)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.output_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 2.0 + elif isinstance(m, (PQBatchNorm2d)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 3.0 + + losses = get_model_losses_torch(model, torch.tensor(0.0)) + assert losses == expected_loss + + +def test_hgq_loss_calc_no_qinput(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.quantization_parameters.hgq_beta = 0.0 + + config_pdp.quantization_parameters.quantize_output = True + config_pdp.quantization_parameters.quantize_input = False + # Bias in weight layers, don't quantize input + model = ModelWithAllLayers() + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + post_pretrain_functions(model, config_pdp) + expected_loss = 0.0 + for m in model.modules(): + if isinstance(m, (PQWeightBiasBase)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + m.output_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 3.0 + elif isinstance(m, (PQAvgPool1d, PQAvgPool2d, PQActivation)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + m.output_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 1.0 + elif isinstance(m, (PQBatchNorm2d)): + m.ebops = dummy_ebops + m.input_quantizer.hgq_loss = dummy_hgq_loss # Won't be called + m.weight_quantizer.hgq_loss = dummy_hgq_loss + m.bias_quantizer.hgq_loss = dummy_hgq_loss + expected_loss += 2.0 + + losses = get_model_losses_torch(model, torch.tensor(0.0)) + assert losses == expected_loss + + +# After final compression done + + +def test_conv1d_parameter_quantizers_not_called_when_final_compression_done(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.quantize_output = True + layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) + model = TestModel(layer) + model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + model = apply_final_compression_torch(model) + model.submodule.input_quantizer = DummyLayer() + model.submodule.weight_quantizer = DummyLayer() + model.submodule.bias_quantizer = DummyLayer() + model.submodule.output_quantizer = DummyLayer() + model(conv1d_input) + + assert model.submodule.input_quantizer.layer_called == 1 + assert model.submodule.weight_quantizer.layer_called == 0 + assert model.submodule.bias_quantizer.layer_called == 0 + assert model.submodule.output_quantizer.layer_called == 1 + + +def test_batchnorm2d_parameter_quantizers_not_called_when_final_compression_done(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.quantize_output = True + layer = BatchNorm2d(IN_FEATURES) + model = TestModel(layer) + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = apply_final_compression_torch(model) + model.submodule.input_quantizer = DummyLayer() + model.submodule.weight_quantizer = DummyLayer() + model.submodule.bias_quantizer = DummyLayer() + model(conv2d_input) + + assert model.submodule.input_quantizer.layer_called == 1 + assert model.submodule.weight_quantizer.layer_called == 0 + assert model.submodule.bias_quantizer.layer_called == 0 From 088e8e1e34703627c185b3a6ee935e1f743f20c9 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Fri, 7 Nov 2025 11:39:47 +0100 Subject: [PATCH 16/37] multiplier only for relu when not using hgq. Add custom quantization bits in one loop. Enable pruning as parameter to layers, quantization bits as parameters to conv layers --- src/pquant/core/compressed_layers.py | 4 +- src/pquant/core/torch_impl/activations.py | 2 +- .../torch_impl/compressed_layers_torch.py | 314 +++++++++++------- tests/test_torch_compression_layers.py | 20 ++ 4 files changed, 214 insertions(+), 126 deletions(-) diff --git a/src/pquant/core/compressed_layers.py b/src/pquant/core/compressed_layers.py index 0f7a8be..4f4064d 100644 --- a/src/pquant/core/compressed_layers.py +++ b/src/pquant/core/compressed_layers.py @@ -74,8 +74,8 @@ def post_training_prune(model, calibration_data, config): if keras.backend.backend() == "torch": from pquant.core.torch_impl.compressed_layers_torch import ( add_compression_layers_torch, - apply_final_compression_torch, post_pretrain_functions, + remove_compression_layers, ) t_delta = config.pruning_parameters.t_delta @@ -86,7 +86,7 @@ def post_training_prune(model, calibration_data, config): model = add_compression_layers_torch(model, config, inputs.shape) post_pretrain_functions(model, config) model(inputs) - return apply_final_compression_torch(model) + return remove_compression_layers(model, config) else: from pquant.core.tf_impl.compressed_layers_tf import ( add_compression_layers_tf, diff --git a/src/pquant/core/torch_impl/activations.py b/src/pquant/core/torch_impl/activations.py index b8ba44c..3836578 100644 --- a/src/pquant/core/torch_impl/activations.py +++ b/src/pquant/core/torch_impl/activations.py @@ -129,7 +129,7 @@ def hgq_loss(self): return loss def pre_activation(self, x): - if self.use_multiplier: + if not self.use_hgq and self.use_multiplier and self.activation_name == "relu": x = x * 2 ** ((torch.round(self.multiplier) - self.multiplier).detach() + self.multiplier) if self.quantize_input and self.enable_quantization: x = self.input_quantizer(x) diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 39ef0d5..3e11922 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -26,6 +26,7 @@ def __init__( layer_type, quantize_input=True, quantize_output=False, + enable_pruning: bool = None, input_quantization_bits: Tuple[T, T, T] = None, weight_quantization_bits: Tuple[T, T, T] = None, bias_quantization_bits: Tuple[T, T, T] = None, @@ -72,7 +73,7 @@ def __init__( self.round_mode = config.quantization_parameters.round_mode self.overflow = config.quantization_parameters.overflow self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.enable_pruning = config.pruning_parameters.enable_pruning + self.enable_pruning = enable_pruning if enable_pruning is not None else config.pruning_parameters.enable_pruning self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress self.hgq_gamma = config.quantization_parameters.hgq_gamma self.final_compression_done = False @@ -214,6 +215,7 @@ def __init__( bias: bool = True, quantize_input=True, quantize_output=False, + enable_pruning: bool = None, device=None, dtype=None, input_quantization_bits: Tuple[T, T, T] = None, @@ -232,6 +234,7 @@ def __init__( layer_type="linear", quantize_input=quantize_input, quantize_output=quantize_output, + enable_pruning=enable_pruning, input_quantization_bits=input_quantization_bits, weight_quantization_bits=weight_quantization_bits, bias_quantization_bits=bias_quantization_bits, @@ -321,6 +324,11 @@ def __init__( dtype=None, quantize_input=True, quantize_output=False, + enable_pruning: bool = None, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -339,6 +347,11 @@ def __init__( layer_type="conv", quantize_input=quantize_input, quantize_output=quantize_output, + enable_pruning=enable_pruning, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, **kwargs, ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress @@ -436,6 +449,11 @@ def __init__( dtype=None, quantize_input=True, quantize_output=False, + enable_pruning: bool = None, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -454,6 +472,11 @@ def __init__( layer_type="conv", quantize_input=quantize_input, quantize_output=quantize_output, + enable_pruning=enable_pruning, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, **kwargs, ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress @@ -535,10 +558,7 @@ def extra_repr(self): def add_compression_layers_torch(model, config, input_shape, device="cuda"): model = add_quantized_activations_to_model_layer(model, config) - # model = add_quantized_activations_to_model_functional(model, config) model = add_pruning_to_model(model, config) - model = disable_pruning_from_layers(model, config) - model = add_layer_specific_quantization_to_model(model, config) model.to(device) model(torch.rand(input_shape, device=next(model.parameters()).device)) return model @@ -881,122 +901,122 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return super().forward(input) -def add_layer_specific_quantization_to_model(module, config): - for name, layer in module.named_modules(): - if isinstance(layer, PQWeightBiasBase): - if name in config.quantization_parameters.layer_specific: - layer_config = config.quantization_parameters.layer_specific[name] - if "weight" in layer_config: - weight_int_bits = layer_config["weight"]["integer_bits"] - weight_fractional_bits = layer_config["weight"]["fractional_bits"] - layer.i_weight = torch.tensor(weight_int_bits) - layer.f_weight = torch.tensor(weight_fractional_bits) - if "bias" in layer_config: - bias_int_bits = layer_config["bias"]["integer_bits"] - bias_fractional_bits = layer_config["bias"]["fractional_bits"] - layer.i_bias = torch.tensor(bias_int_bits) - layer.f_bias = torch.tensor(bias_fractional_bits) - if "input" in layer_config: - if "integer_bits" in layer_config["input"]: - input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) - layer.i_input = input_int_bits - if "fractional_bits" in layer_config["input"]: - input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) - layer.f_input = input_fractional_bits - if "quantize" in layer_config["input"]: - quantize = layer_config["input"]["quantize"] - layer.quantize_input = quantize - if "output" in layer_config: - if "integer_bits" in layer_config["output"]: - output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) - layer.i_output = input_int_bits - if "fractional_bits" in layer_config["output"]: - input_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) - layer.f_output = input_fractional_bits - if "quantize" in layer_config["output"]: - quantize = layer_config["output"]["quantize"] - layer.quantize_output = quantize - - elif layer.__class__ in [PQBatchNorm2d]: - if name in config.quantization_parameters.layer_specific: - layer_config = config.quantization_parameters.layer_specific[name] - if "weight" in layer_config: - i = torch.tensor(layer_config["weight"]["integer_bits"]) - f = torch.tensor(layer_config["weight"]["fractional_bits"]) - layer.i_weight = i - layer.f_weight = f - if "bias" in layer_config: - i = torch.tensor(layer_config["bias"]["integer_bits"]) - f = torch.tensor(layer_config["bias"]["fractional_bits"]) - layer.i_bias = i - layer.f_biast = f - if "input" in layer_config: - if "integer_bits" in layer_config["input"]: - input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) - layer.i_input = input_int_bits - if "fractional_bits" in layer_config["input"]: - input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) - layer.f_input = input_fractional_bits - if "quantize" in layer_config["input"]: - quantize = layer_config["input"]["quantize"] - layer.quantize_input = quantize - elif layer.__class__ in [PQAvgPool1d, PQAvgPool2d]: - if name in config.quantization_parameters.layer_specific: - layer_config = config.quantization_parameters.layer_specific[name] - if "input" in layer_config: - if "integer_bits" in layer_config["input"]: - input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) - layer.i_input = input_int_bits - if "fractional_bits" in layer_config["input"]: - input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) - layer.f_input = input_fractional_bits - if "quantize" in layer_config["input"]: - quantize = layer_config["input"]["quantize"] - layer.quantize_input = quantize - if "output" in layer_config: - if "integer_bits" in layer_config["output"]: - output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) - layer.i_output = output_int_bits - if "fractional_bits" in layer_config["output"]: - output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) - layer.f_output = output_fractional_bits - if "quantize" in layer_config["output"]: - quantize = layer_config["output"]["quantize"] - layer.quantize_output = quantize - - elif layer.__class__ == PQActivation: - if name in config.quantization_parameters.layer_specific: - layer_config = config.quantization_parameters.layer_specific[name] - if "input" in layer_config: - if "integer_bits" in layer_config["input"]: - input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) - layer.i_input = input_int_bits - if "fractional_bits" in layer_config["input"]: - input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) - layer.f_input = input_fractional_bits - if "quantize" in layer_config["input"]: - quantize = layer_config["input"]["quantize"] - layer.quantize_input = quantize - if "output" in layer_config: - if "integer_bits" in layer_config["output"]: - output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) - layer.i_output = output_int_bits - if "fractional_bits" in layer_config["output"]: - output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) - layer.f_output = output_fractional_bits - if "quantize" in layer_config["output"]: - quantize = layer_config["output"]["quantize"] - layer.quantize_output = quantize - return module - - -def add_quantized_activations_to_model_layer(module, config): +def add_layer_specific_quantization_to_model(name, layer, config): + if isinstance(layer, PQWeightBiasBase): + if name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[name] + if "weight" in layer_config: + weight_int_bits = layer_config["weight"]["integer_bits"] + weight_fractional_bits = layer_config["weight"]["fractional_bits"] + layer.i_weight = torch.tensor(weight_int_bits) + layer.f_weight = torch.tensor(weight_fractional_bits) + if "bias" in layer_config: + bias_int_bits = layer_config["bias"]["integer_bits"] + bias_fractional_bits = layer_config["bias"]["fractional_bits"] + layer.i_bias = torch.tensor(bias_int_bits) + layer.f_bias = torch.tensor(bias_fractional_bits) + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.i_output = input_int_bits + if "fractional_bits" in layer_config["output"]: + input_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) + layer.f_output = input_fractional_bits + if "quantize" in layer_config["output"]: + quantize = layer_config["output"]["quantize"] + layer.quantize_output = quantize + + elif layer.__class__ in [PQBatchNorm2d]: + if name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[name] + if "weight" in layer_config: + i = torch.tensor(layer_config["weight"]["integer_bits"]) + f = torch.tensor(layer_config["weight"]["fractional_bits"]) + layer.i_weight = i + layer.f_weight = f + if "bias" in layer_config: + i = torch.tensor(layer_config["bias"]["integer_bits"]) + f = torch.tensor(layer_config["bias"]["fractional_bits"]) + layer.i_bias = i + layer.f_biast = f + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + elif layer.__class__ in [PQAvgPool1d, PQAvgPool2d]: + if name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[name] + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.i_output = output_int_bits + if "fractional_bits" in layer_config["output"]: + output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) + layer.f_output = output_fractional_bits + if "quantize" in layer_config["output"]: + quantize = layer_config["output"]["quantize"] + layer.quantize_output = quantize + + elif layer.__class__ == PQActivation: + if name in config.quantization_parameters.layer_specific: + layer_config = config.quantization_parameters.layer_specific[name] + if "input" in layer_config: + if "integer_bits" in layer_config["input"]: + input_int_bits = torch.tensor(layer_config["input"]["integer_bits"]) + layer.i_input = input_int_bits + if "fractional_bits" in layer_config["input"]: + input_fractional_bits = torch.tensor(layer_config["input"]["fractional_bits"]) + layer.f_input = input_fractional_bits + if "quantize" in layer_config["input"]: + quantize = layer_config["input"]["quantize"] + layer.quantize_input = quantize + if "output" in layer_config: + if "integer_bits" in layer_config["output"]: + output_int_bits = torch.tensor(layer_config["output"]["integer_bits"]) + layer.i_output = output_int_bits + if "fractional_bits" in layer_config["output"]: + output_fractional_bits = torch.tensor(layer_config["output"]["fractional_bits"]) + layer.f_output = output_fractional_bits + if "quantize" in layer_config["output"]: + quantize = layer_config["output"]["quantize"] + layer.quantize_output = quantize + return layer + + +def add_quantized_activations_to_model_layer(module, config, prefix=""): if not config.quantization_parameters.enable_quantization: return module quantize_input = config.quantization_parameters.quantize_input quantize_output = config.quantization_parameters.quantize_output # Replaces ReLU and Tanh layers with quantized versions for name, layer in module.named_children(): + full_name = f"{prefix}.{name}" if prefix else name i = config.quantization_parameters.default_data_integer_bits f = config.quantization_parameters.default_data_fractional_bits if layer.__class__ in [nn.ReLU]: @@ -1013,6 +1033,7 @@ def add_quantized_activations_to_model_layer(module, config): quantize_input=quantize_input, quantize_output=quantize_output, ) + relu = add_layer_specific_quantization_to_model(full_name, relu, config) setattr(module, name, relu) elif layer.__class__ in [nn.Tanh]: type_of_tanh = "tanh" if config.quantization_parameters.use_real_tanh else "hard_tanh" @@ -1026,6 +1047,7 @@ def add_quantized_activations_to_model_layer(module, config): quantize_input=quantize_input, quantize_output=quantize_output, ) + tanh = add_layer_specific_quantization_to_model(full_name, tanh, config) setattr(module, name, tanh) elif layer.__class__ == nn.AvgPool1d: new_layer = PQAvgPool1d( @@ -1038,6 +1060,7 @@ def add_quantized_activations_to_model_layer(module, config): quantize_input, quantize_output, ) + new_layer = add_layer_specific_quantization_to_model(full_name, new_layer, config) setattr(module, name, new_layer) elif layer.__class__ == nn.AvgPool2d: new_layer = PQAvgPool2d( @@ -1051,6 +1074,7 @@ def add_quantized_activations_to_model_layer(module, config): quantize_input, quantize_output, ) + new_layer = add_layer_specific_quantization_to_model(full_name, new_layer, config) setattr(module, name, new_layer) elif layer.__class__ == nn.BatchNorm2d: new_layer = PQBatchNorm2d( @@ -1062,9 +1086,10 @@ def add_quantized_activations_to_model_layer(module, config): track_running_stats=layer.track_running_stats, quantize_input=quantize_input, ) + new_layer = add_layer_specific_quantization_to_model(full_name, new_layer, config) setattr(module, name, new_layer) else: - layer = add_quantized_activations_to_model_layer(layer, config) + layer = add_quantized_activations_to_model_layer(layer, config, full_name) return module @@ -1101,18 +1126,18 @@ def add_quantized_activations_to_model_functional(module, config): return traced_model -def disable_pruning_from_layers(module, config): - for name, layer in module.named_modules(): - enable_pruning = name not in config.pruning_parameters.disable_pruning_for_layers - if layer.__class__ in [PQDense, PQConv2d, PQConv1d] and not enable_pruning: - layer.enable_pruning = enable_pruning - return module +def disable_pruning_from_layers(name, layer, config): + enable_pruning = name not in config.pruning_parameters.disable_pruning_for_layers + if layer.__class__ in [PQDense, PQConv2d, PQConv1d] and not enable_pruning: + layer.enable_pruning = enable_pruning + return layer -def add_pruning_to_model(module, config): +def add_pruning_to_model(module, config, prefix=""): quantize_input = config.quantization_parameters.quantize_input quantize_output = config.quantization_parameters.quantize_output for name, layer in module.named_children(): + full_name = f"{prefix}.{name}" if prefix else name if layer.__class__ is nn.Linear: sparse_layer = PQDense( config, layer.in_features, layer.out_features, layer.bias is not None, quantize_input, quantize_output @@ -1122,6 +1147,8 @@ def add_pruning_to_model(module, config): if layer.bias is not None: sparse_layer._bias.data = layer.bias.data + sparse_layer = add_layer_specific_quantization_to_model(full_name, sparse_layer, config) + sparse_layer = disable_pruning_from_layers(full_name, sparse_layer, config) setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv2d: sparse_layer = PQConv2d( @@ -1144,6 +1171,8 @@ def add_pruning_to_model(module, config): sparse_layer._weight.data = layer.weight.data if layer.bias is not None: sparse_layer._bias.data = layer.bias.data + sparse_layer = add_layer_specific_quantization_to_model(full_name, sparse_layer, config) + sparse_layer = disable_pruning_from_layers(full_name, sparse_layer, config) setattr(module, name, sparse_layer) elif layer.__class__ is nn.Conv1d: sparse_layer = PQConv1d( @@ -1166,9 +1195,11 @@ def add_pruning_to_model(module, config): sparse_layer._weight.data = layer.weight.data if layer.bias is not None: sparse_layer._bias.data = layer.bias.data + sparse_layer = add_layer_specific_quantization_to_model(full_name, sparse_layer, config) + sparse_layer = disable_pruning_from_layers(full_name, sparse_layer, config) setattr(module, name, sparse_layer) else: - add_pruning_to_model(layer, config) + add_pruning_to_model(layer, config, full_name) return module @@ -1348,3 +1379,40 @@ def add_default_layer_quantization_pruning_to_config_torch(model, config): config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] return config + + +def remove_compression_layers(module, config): + for name, layer in module.named_children(): + if isinstance(layer, PQDense): + out_features = layer.out_features + in_features = layer.in_features + bias = True if layer.bias is not None else False + setattr(module, name, nn.Linear(in_features=in_features, out_features=out_features, bias=bias)) + getattr(module, name).weight.data.copy_(layer.weight) + if getattr(module, name).bias is not None: + getattr(module, name).bias.data.copy_(layer.bias) + elif isinstance(layer, (PQConv1d, PQConv2d)): + bias_values = layer.bias if layer.bias is not None else None + bias = True if bias_values is not None else False + conv = nn.Conv2d if isinstance(layer, PQConv2d) else nn.Conv1d + setattr( + module, + name, + conv( + layer.in_channels, + layer.out_channels, + layer.kernel_size, + layer.stride, + layer.padding, + layer.dilation, + layer.groups, + bias, + layer.padding_mode, + ), + ) + getattr(module, name).weight.data.copy_(layer.weight) + if getattr(module, name).bias is not None: + getattr(module, name).bias.data.copy_(bias_values.data) + else: + remove_compression_layers(layer, config) + return module diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index 5f5a015..3b61f2b 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -698,6 +698,26 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): assert torch.all(m.input_quantizer.quantizer.quantizer.f == 3.0) +def test_disable_pruning_from_single_layer(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + config_pdp.pruning_parameters.enable_pruning = True + layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) + layer2 = Conv2d(OUT_FEATURES, OUT_FEATURES, KERNEL_SIZE) + model = TestModel2(layer, layer2, "relu", "tanh") + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + + assert model.submodule.enable_pruning + assert model.submodule2.enable_pruning + + config_pdp.pruning_parameters.disable_pruning_for_layers = ["submodule2"] + model = TestModel2(layer, layer2, "relu", "tanh") + model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + + assert model.submodule.enable_pruning + assert not model.submodule2.enable_pruning + + def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): config_pdp.quantization_parameters.enable_quantization = True config_pdp.quantization_parameters.use_high_granularity_quantization = False From e474f0206d8197efa7ddcc8fdfae07f9b5b0c38e Mon Sep 17 00:00:00 2001 From: Anastasiia Petrovych Date: Tue, 18 Nov 2025 15:12:44 +0100 Subject: [PATCH 17/37] Direct Keras layers usage --- src/pquant/core/tf_impl/activations.py | 165 +++ .../core/tf_impl/compressed_layers_tf.py | 1176 ++++++++++++----- src/pquant/core/tf_impl/quantizer.py | 57 + tests/test_keras_compression_layers.py | 330 +++-- 4 files changed, 1278 insertions(+), 450 deletions(-) create mode 100644 src/pquant/core/tf_impl/activations.py create mode 100644 src/pquant/core/tf_impl/quantizer.py diff --git a/src/pquant/core/tf_impl/activations.py b/src/pquant/core/tf_impl/activations.py new file mode 100644 index 0000000..5140373 --- /dev/null +++ b/src/pquant/core/tf_impl/activations.py @@ -0,0 +1,165 @@ +import keras +from keras.ops import maximum, minimum, relu, tanh + +from pquant.core.tf_impl.quantizer import Quantizer + + +def hard_sigmoid(x): + """Computes hard_sigmoid function that saturates between 0 and 1.""" + x = 0.5 * x + 0.5 + x = maximum(x, 0.0) + x = minimum(x, 1.0) + return x + + +def hard_tanh(x): + """Computes hard_tanh function that saturates between -1 and 1.""" + return 2.0 * hard_sigmoid(x) - 1.0 + + +activation_registry = {"relu": relu, "tanh": tanh, "hard_tanh": hard_tanh} + + +class PQActivation(keras.layers.Layer): + def __init__( + self, + config, + activation="relu", + i_input=0.0, + f_input=8.0, + i_output=0.0, + f_output=7.0, + quantize_input=True, + quantize_output=False, + ): + super().__init__() + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) + self.config = config + self.i_input = i_input + self.f_input = f_input + self.k = 0.0 if activation.lower() == "relu" else 1.0 + + self.i_output = i_output + self.f_output = f_output + + self.activation_name = activation.lower() + self.activation_function = activation_registry.get(self.activation_name) + + self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization + self.is_pretraining = True + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow + self.use_multiplier = config.quantization_parameters.use_relu_multiplier + self.hgq_beta = config.quantization_parameters.hgq_beta + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + + self.post_fitcompress_calibration = False + self.saved_inputs = [] + self.quantize_input = quantize_input + self.quantize_output = quantize_output + self.built = False + + def build(self, input_shape): + super().build(input_shape) + self.input_shape = input_shape + self.output_quantizer = Quantizer( + k=self.k, + i=self.i_output, + f=self.f_output, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + hgq_gamma=self.hgq_gamma, + ) + self.input_quantizer = Quantizer( + k=self.k, + i=self.i_input, + f=self.f_input, + overflow=self.overflow, + round_mode=self.round_mode, + is_data=True, + is_heterogeneous=self.use_hgq, + hgq_gamma=self.hgq_gamma, + ) + if self.use_hgq: + self.input_quantizer.build(input_shape) + self.output_quantizer.build(input_shape) + + if self.use_multiplier: + self.multiplier = self.add_weight(shape=(1,), trainable=True, initializer=keras.initializers.Constant(-1.0)) + + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def set_input_quantization_bits(self, i, f): + self.input_quantizer.set_quantization_bits(i, f) + + def get_output_quantization_bits(self): + return self.output_quantizer.get_quantization_bits() + + def set_output_quantization_bits(self, i, f): + self.output_quantizer.set_quantization_bits(i, f) + + def post_pre_train_function(self): + self.is_pretraining = False + + def ebops(self): + bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_out = self.output_quantizer.quantizer.bits_(self.input_shape) + return keras.ops.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return 0.0 + loss = self.hgq_beta * self.ebops() + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + if self.quantize_output: + loss += self.output_quantizer.hgq_loss() + return loss + + def pre_activation(self, x): + if not self.use_hgq and self.use_multiplier and self.activation_name == "relu": + x = x * 2 ** (keras.ops.stop_gradient(keras.ops.round(self.multiplier) - self.multiplier) + self.multiplier) + if self.quantize_input and self.enable_quantization: + x = self.input_quantizer(x) + return x + + def post_activation(self, x): + if self.quantize_output and self.enable_quantization: + return self.output_quantizer(x) + return x + + def call(self, x): + if self.use_fitcompress and self.is_pretraining and self.activation_name == "relu": + if self.post_fitcompress_calibration: + # Save quantized input into ReLU + self.saved_inputs.append(x) + # During FITcompress, we do not use any quantized activations + return relu(x) + # Multiplier after fitcompress if condition, such that we don't use any relu multiplier during FITcompress search + x = self.pre_activation(x) + x = self.activation_function(x) + x = self.post_activation(x) + return x + + def get_config(self): + config = super().get_config() + config.update( + { + "config": self.config.get_dict(), + "i": float(self.i), + "f": float(self.f), + } + ) + return config + + def extra_repr(self): + return f"quantize_input = {self.quantize_input}, quantize_output = {self.quantize_output}" diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 9defe6b..9a9ea92 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -1,6 +1,7 @@ +from typing import Tuple, TypeVar + import keras from keras import ops -from keras.initializers import Constant from keras.layers import ( Activation, AveragePooling1D, @@ -17,45 +18,74 @@ ) from keras.src.ops.operation_utils import compute_pooling_output_shape -from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh -from pquant.core.quantizer_functions import create_quantizer +from pquant.core.tf_impl.activations import PQActivation +from pquant.core.tf_impl.quantizer import Quantizer from pquant.core.utils import get_pruning_layer +T = TypeVar("T") + class PQWeightBiasBase(keras.layers.Layer): - def __init__(self, config, layer_type, quantize_input=True, quantize_output=False): - super().__init__() - i_bits = config.quantization_parameters.default_weight_integer_bits - f_bits = config.quantization_parameters.default_weight_fractional_bits - self.data_k = config.quantization_parameters.default_data_keep_negatives - self.weight_k = config.quantization_parameters.default_weight_keep_negatives - self.i_weight = ops.convert_to_tensor(i_bits) - self.f_weight = ops.convert_to_tensor(f_bits) - self.i_bias = ops.convert_to_tensor(i_bits) - self.f_bias = ops.convert_to_tensor(f_bits) + def __init__( + self, + config, + layer_type, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + *args, + **kwargs, + ): + super().__init__(**kwargs) + if input_quantization_bits is not None: + self.k_input, self.i_input, self.f_input = input_quantization_bits + else: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits - self.i_input = self.i_output = ops.convert_to_tensor(config.quantization_parameters.default_data_integer_bits) - self.f_input = self.f_output = ops.convert_to_tensor(config.quantization_parameters.default_data_fractional_bits) - self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) + if weight_quantization_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits + else: + self.k_weight = config.quantization_parameters.default_weight_keep_negatives + self.i_weight = config.quantization_parameters.default_weight_integer_bits + self.f_weight = config.quantization_parameters.default_weight_fractional_bits + if bias_quantization_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits + else: + self.k_bias = config.quantization_parameters.default_weight_keep_negatives + self.i_bias = config.quantization_parameters.default_weight_integer_bits + self.f_bias = config.quantization_parameters.default_weight_fractional_bits + + if output_quantization_bits is not None: + self.k_output, self.i_output, self.f_output = output_quantization_bits + else: + self.k_output = config.quantization_parameters.default_data_keep_negatives + self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_output = config.quantization_parameters.default_data_fractional_bits + self.pruning_layer = get_pruning_layer(config=config, layer_type=layer_type) self.pruning_method = config.pruning_parameters.pruning_method - self.round_mode = config.quantization_parameters.round_mode - self.overflow = config.quantization_parameters.overflow - self.hgq_gamma = config.quantization_parameters.hgq_gamma - self.hgq_beta = config.quantization_parameters.hgq_beta + self.quantize_input = quantize_input + self.quantize_output = quantize_output + self.pruning_first = config.training_parameters.pruning_first self.enable_quantization = config.quantization_parameters.enable_quantization + self.round_mode = config.quantization_parameters.round_mode + self.overflow = config.quantization_parameters.overflow self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.enable_pruning = config.pruning_parameters.enable_pruning self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.hgq_gamma = config.quantization_parameters.hgq_gamma self.final_compression_done = False - self.do_transpose_data = None - self.weight_transpose = None - self.data_transpose = None - self.quantize_input = quantize_input - self.quantize_output = quantize_output + self.built = False self.parallelization_factor = -1 + self.hgq_beta = config.quantization_parameters.hgq_beta + self.input_shape = None + self.is_pretraining = True def set_enable_pruning(self, enable_pruning): self.enable_pruning = enable_pruning @@ -74,10 +104,10 @@ def get_output_quantization_bits(self): def build(self, input_shape): super().build(input_shape) - self.weight_quantizer = KerasQuantizer( - self.weight_k, - self.i_weight, - self.f_weight, + self.weight_quantizer = Quantizer( + ops.convert_to_tensor(self.k_weight), + ops.convert_to_tensor(self.i_weight), + ops.convert_to_tensor(self.f_weight), self.overflow, self.round_mode, self.use_hgq, @@ -86,30 +116,30 @@ def build(self, input_shape): ) # if self.use_bias: - self.bias_quantizer = KerasQuantizer( - self.weight_k, - self.i_bias, - self.f_bias, + self.bias_quantizer = Quantizer( + ops.convert_to_tensor(self.k_bias), + ops.convert_to_tensor(self.i_bias), + ops.convert_to_tensor(self.f_bias), self.overflow, self.round_mode, self.use_hgq, False, self.hgq_gamma, ) - self.input_quantizer = KerasQuantizer( - self.data_k, - self.i_input, - self.f_input, + self.input_quantizer = Quantizer( + ops.convert_to_tensor(self.k_input), + ops.convert_to_tensor(self.i_input), + ops.convert_to_tensor(self.f_input), self.overflow, self.round_mode, self.use_hgq, True, self.hgq_gamma, ) - self.output_quantizer = KerasQuantizer( - self.data_k, - self.i_output, - self.f_output, + self.output_quantizer = Quantizer( + ops.convert_to_tensor(self.k_output), + ops.convert_to_tensor(self.i_output), + ops.convert_to_tensor(self.f_output), self.overflow, self.round_mode, self.use_hgq, @@ -121,11 +151,7 @@ def build(self, input_shape): self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel def apply_final_compression(self): - weight, bias = self.prune_and_quantize(self.weight, self.bias) - self.weight.assign(weight) - if self.bias is not None: - self.bias.assign(bias) - self.final_compression_done = True + pass def save_weights(self): self.init_weight = self.weight.value @@ -154,11 +180,11 @@ def handle_transpose(self, x, transpose, do_transpose=False): x = ops.transpose(x, transpose) return x - def quantize_i(self, weight, bias): + # Quantize i? + def quantize_i(self, x, quantizer): if self.enable_quantization: - weight = self.weight_quantizer(weight) - bias = None if bias is None else self.bias_quantizer(bias) - return weight, bias + return quantizer(x) if x is not None else x + return x def prune(self, weight): if self.enable_pruning: @@ -167,32 +193,15 @@ def prune(self, weight): weight = self.handle_transpose(weight, self.weight_transpose_back, True) return weight - def prune_and_quantize(self, weight, bias): - if self.final_compression_done: - return weight, bias - weight = ops.cast(weight, weight.dtype) - bias = ops.cast(bias, bias.dtype) if bias is not None else None - if self.pruning_first: - weight = self.prune(weight) - weight, bias = self.quantize_i(weight, bias) - else: - weight, bias = self.quantize_i(weight, bias) - weight = self.prune(weight) - return weight, bias - - def call(self, x): - return x - - def pre_forward(self, weight, bias, x, training=None): + def pre_forward(self, x, training=None): if self.quantize_input: if self.use_hgq and not self.input_quantizer.quantizer.built: self.input_quantizer.build(x.shape) if not self.pruning_layer.is_pretraining and not self.use_fitcompress: x = self.input_quantizer(x) if self.pruning_method == "wanda": - self.collect_input(x, self.weight, training) - weight, bias = self.prune_and_quantize(weight, bias) - return weight, bias, x + self.collect_input(x, self._kernel, training) + return x def post_forward(self, x, training=None): if self.quantize_output: @@ -214,38 +223,135 @@ def collect_output(self, x, training): self.pruning_layer.collect_output(collect_x, training) -class PQDepthwiseConv2d(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, "conv", quantize_input, quantize_output) - self.depthwise_regularizer = layer.depthwise_regularizer - self.use_bias = layer.use_bias - self.strides = layer.strides - self.dilation_rate = layer.dilation_rate - self.padding = layer.padding - self.kernel_size = layer.kernel_size - self.bias_shape = layer.bias.shape if layer.use_bias else None - self.init_bias = layer.bias.value if layer.use_bias else None - self.weight_shape = layer.kernel.shape - self.init_weight = layer.kernel.value +class PQDepthwiseConv2d(PQWeightBiasBase, keras.layers.DepthwiseConv2D): + def __init__( + self, + config, + kernel_size, + strides=(1, 1), + padding="valid", + depth_multiplier=1, + data_format=None, + dilation_rate=(1, 1), + activation=None, + use_bias=False, + depthwise_initializer="glorot_uniform", + bias_initializer="zeros", + depthwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + depthwise_constraint=None, + bias_constraint=None, + quantize_input=True, + quantize_output=False, + bias: bool = True, + device=None, + dtype=None, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): + super().__init__( + kernel_size=kernel_size, + strides=strides, + padding=padding, + depth_multiplier=depth_multiplier, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + use_bias=use_bias, + depthwise_initializer=depthwise_initializer, + bias_initializer=bias_regularizer, + depthwise_regularizer=depthwise_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + depthwise_constraint=depthwise_constraint, + bias_constraint=bias_constraint, + config=config, + layer_type="conv", + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) + self.depthwise_regularizer = depthwise_regularizer + self.use_bias = use_bias + self.strides = strides + self.dilation_rate = dilation_rate + # self.weight_transpose = (2, 3, 0, 1) + # self.weight_transpose_back = (2, 3, 1, 0) self.weight_transpose = (3, 2, 0, 1) self.weight_transpose_back = (2, 3, 1, 0) self.data_transpose = (0, 3, 1, 2) - self.do_transpose_data = layer.data_format == "channels_last" + self.do_transpose_data = self.data_format == "channels_last" + self._weight = None + self._bias = None def build(self, input_shape): super().build(input_shape) - self.weight = self.add_weight( - self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.depthwise_regularizer - ) - self.bias = ( - self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) - if self.bias_shape is not None - else None + input_channel = input_shape[-1] + self._kernel = self.add_weight( + name="kernel", + shape=self.kernel.shape, + initializer=self.depthwise_initializer, + regularizer=self.depthwise_regularizer, + constraint=self.depthwise_constraint, + trainable=True, + dtype=self.dtype, ) + if self.use_bias: + self.bias = self.add_weight( + name="bias", + shape=(self.depth_multiplier * input_channel,), + initializer=self.bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + trainable=True, + dtype=self.dtype, + ) + else: + self.bias = None + if self.use_hgq: + self.input_quantizer.build(input_shape) + self.weight_quantizer.build(self._kernel.shape) + if self.use_bias: + self.bias_quantizer.build(self._bias.shape) + self.output_quantizer.build(self.compute_output_shape(input_shape)) + + @property + def kernel(self): + if self.final_compression_done: + return self._kernel + if self.pruning_first: + weight = self.prune(self._kernel) + return self.quantize_i(weight, self.weight_quantizer) + else: + weight = self.quantize_i(self._kernel, self.weight_quantizer) + return self.prune(weight) + + @kernel.setter + def kernel(self, kernel): + self._kernel = kernel + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize_i(self._bias, self.bias_quantizer) + return bias + + @bias.setter + def bias(self, bias): + self._bias = bias def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) if self.parallelization_factor < 0: ebops = ops.sum( ops.depthwise_conv( @@ -274,52 +380,142 @@ def ebops(self, shape): return ebops def call(self, x, training=None): - weight, _, x = self.pre_forward(self.weight, self.bias, x, training) - x = ops.depthwise_conv( - x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate - ) + input_shape = x.shape + x = self.pre_forward(x, training) + x = super().call(x) x = self.post_forward(x, training) + if self.use_hgq and self.enable_quantization: + self.add_loss(self.hgq_loss(input_shape)) return x + # Is it supposed to be like this? + def apply_final_compression(self): + self._kernel.assign(self.kernel) + if self._bias is not None: + self._bias.assign = self.bias + self.final_compression_done = True + + def extra_repr(self) -> str: + """ + Return the extra representation of the module. + """ + return ( + f"in_features={self.in_features} " + f"out_features={self.out_features} " + f"bias={self._bias is not None} " + f"quantize_input={self.quantize_input} " + f"quantize_output={self.quantize_output} " + ) + + +class PQConv2d(PQWeightBiasBase, keras.layers.Conv2D): + def __init__( + self, + config, + filters, + kernel_size, + quantize_input=True, + quantize_output=False, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), + groups=1, + activation=None, + use_bias=False, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): + super().__init__( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + groups=groups, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + bias_constraint=bias_constraint, + config=config, + layer_type="conv", + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) -class PQConv2d(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, "conv", quantize_input, quantize_output) - self.kernel_regularizer = layer.kernel_regularizer - self.filters = layer.filters - self.use_bias = layer.use_bias - self.strides = layer.strides - self.dilation_rate = layer.dilation_rate - self.padding = layer.padding - self.kernel_size = layer.kernel_size - if hasattr(layer, "groups"): - self.groups = layer.groups - self.bias_shape = layer.bias.shape if layer.use_bias else None - self.init_bias = layer.bias.value if layer.use_bias else None - self.weight_shape = layer.kernel.shape - self.init_weight = layer.kernel.value self.weight_transpose = (3, 2, 0, 1) self.weight_transpose_back = (2, 3, 1, 0) self.data_transpose = (0, 3, 1, 2) - self.do_transpose_data = layer.data_format == "channels_last" + self.do_transpose_data = self.data_format == "channels_last" + self.use_biase = use_bias def build(self, input_shape): super().build(input_shape) - self.weight = self.add_weight( - self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.kernel_regularizer - ) - self.weight_quantizer.build(self.weight.shape) - self.bias = ( - self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) - if self.bias_shape is not None - else None - ) if self.use_bias: - self.bias_quantizer.build(self.bias.shape) + self._bias = self.add_weight( + name="bias", + shape=(self.filters,), + initializer=self.bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + trainable=True, + dtype=self.dtype, + ) + else: + self._bias = None + if self.use_hgq: + self.input_quantizer.build(input_shape) + self.weight_quantizer.build(self._kernel.shape) + if self.use_bias: + self.bias_quantizer.build(self._bias.shape) + self.output_quantizer.build(self.compute_output_shape(input_shape)) + + @property + def kernel(self): + if self.final_compression_done: + return self._kernel + if self.pruning_first: + weight = self.prune(self._kernel) + return self.quantize_i(weight, self.weight_quantizer) + else: + weight = self.quantize_i(self._kernel, self.weight_quantizer) + return self.prune(weight) + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize_i(self._bias, self.bias_quantizer) + return bias + + @bias.setter + def bias(self, bias): + self._bias = bias def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) if self.parallelization_factor < 0: ebops = ops.sum( ops.conv( @@ -349,32 +545,80 @@ def ebops(self, shape): return ebops def call(self, x, training=None): - weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) - x = ops.conv( - x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate - ) - if self.bias is not None: - x = ops.add(x, bias) + input_shape = x.shape + x = self.pre_forward(x, training) + x = super().call(x) x = self.post_forward(x, training) + if self.use_hgq and self.enable_quantization: + self.add_loss(self.hgq_loss(input_shape)) return x class PQSeparableConv2d(Layer): - def __init__(self, config, layer, quantize_input=True, quantize_output=True): + def __init__( + self, + config, + filters, + kernel_size, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), + depth_multiplier=1, + use_bias=True, + depthwise_initializer="glorot_uniform", + pointwise_initializer="glorot_uniform", + bias_initializer="zeros", + depthwise_regularizer=None, + pointwise_regularizer=None, + bias_regularizer=None, + depthwise_constraint=None, + pointwise_constraint=None, + bias_constraint=None, + quantize_input=True, + quantize_output=True, + **kwargs, + ): super().__init__() self.weight_transpose = (3, 2, 0, 1) self.weight_transpose_back = (2, 3, 1, 0) self.data_transpose = (0, 3, 1, 2) - layer.kernel = layer.depthwise_kernel - bias = layer.use_bias - layer.use_bias = False - self.depthwise_conv = PQDepthwiseConv2d(config, layer, quantize_input, False) - layer.kernel_regularizer = layer.pointwise_regularizer - layer.kernel_size = 1 - layer.kernel = layer.pointwise_kernel - layer.use_bias = bias - self.pointwise_conv = PQConv2d(config, layer, False, quantize_output) - self.do_transpose_data = layer.data_format == "channels_last" + self.depthwise_conv = PQDepthwiseConv2d( + config, + kernel_size, + strides, + padding, + depth_multiplier, + data_format, + dilation_rate, + None, + use_bias=False, + depthwise_initializer=depthwise_initializer, + depthwise_regularizer=depthwise_regularizer, + depthwise_constraint=depthwise_constraint, + quantize_input=quantize_input, + quantize_output=False, + ) + + self.pointwise_conv = PQConv2d( + config, + filters=filters, + kernel_size=1, + quantize_input=False, + quantize_output=quantize_output, + padding="same", + data_format=data_format, + groups=1, + activation=None, + use_bias=use_bias, + kernel_initializer=pointwise_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=pointwise_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=pointwise_constraint, + bias_constraint=bias_constraint, + ) + self.do_transpose_data = data_format == "channels_last" def build(self, input_shape): super().build(input_shape) @@ -389,41 +633,115 @@ def call(self, x, training=None): return x -class PQConv1d(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, "conv", quantize_input, quantize_output) - self.kernel_regularizer = layer.kernel_regularizer - self.filters = layer.filters - self.use_bias = layer.use_bias - self.strides = layer.strides - self.dilation_rate = layer.dilation_rate - self.padding = layer.padding - self.kernel_size = layer.kernel_size - self.groups = layer.groups - self.bias_shape = layer.bias.shape if layer.use_bias else None - self.init_bias = layer.bias.value if layer.use_bias else None - self.weight_shape = layer.kernel.shape - self.init_weight = layer.kernel.value +class PQConv1d(PQWeightBiasBase, keras.layers.Conv1D): + def __init__( + self, + config, + filters, + kernel_size, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + strides=1, + padding="valid", + data_format=None, + dilation_rate=1, + groups=1, + activation=None, + use_bias=False, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs, + ): + + super().__init__( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + groups=groups, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_regularizer, + bias_constraint=bias_constraint, + config=config, + layer_type="conv", + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) + self.weight_transpose = (2, 1, 0) self.weight_transpose_back = (2, 1, 0) self.data_transpose = (0, 2, 1) - self.do_transpose_data = layer.data_format == "channels_last" + self.do_transpose_data = self.data_format == "channels_last" + self.use_bias = use_bias def build(self, input_shape): super().build(input_shape) - self.weight = self.add_weight( - self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.kernel_regularizer - ) - self.weight_quantizer.build(self.weight.shape) - self.bias = ( - self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) - if self.bias_shape is not None - else None - ) + if self.use_bias: + self._bias = self.add_weight( + name="bias", + shape=(self.filters,), + initializer=self.bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + trainable=True, + dtype=self.dtype, + ) + else: + self._bias = None + if self.use_hgq: + self.input_quantizer.build(input_shape) + self.weight_quantizer.build(self._kernel.shape) + if self.use_bias: + self.bias_quantizer.build(self._bias.shape) + self.output_quantizer.build(self.compute_output_shape(input_shape)) + + @property + def kernel(self): + if self.final_compression_done: + return self._kernel + if self.pruning_first: + weight = self.prune(self._kernel) + return self.quantize_i(weight, self.weight_quantizer) + else: + weight = self.quantize_i(self._kernel, self.weight_quantizer) + return self.prune(weight) + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize_i(self._bias, self.bias_quantizer) + return bias + + @bias.setter + def bias(self, bias): + self._bias = bias def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) if self.parallelization_factor < 0: ebops = ops.sum( ops.conv( @@ -452,47 +770,114 @@ def ebops(self, shape): return ebops def call(self, x, training=None): - weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) - x = ops.conv( - x, weight, strides=self.strides, padding=self.padding, data_format=None, dilation_rate=self.dilation_rate - ) - if self.bias is not None: - x = ops.add(x, bias) + input_shape = x.shape + x = self.pre_forward(x, training) + x = super().call(x) x = self.post_forward(x, training) + if self.use_hgq and self.enable_quantization: + self.add_loss(self.hgq_loss(input_shape)) return x -class PQDense(PQWeightBiasBase): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__(config, "linear", quantize_input, quantize_output) - self.kernel_regularizer = layer.kernel_regularizer - self.use_bias = layer.use_bias - self.units = layer.units - self.bias_shape = layer.bias.shape if layer.use_bias else None - self.init_bias = layer.bias.value if layer.use_bias else None - self.weight_shape = layer.kernel.shape - self.init_weight = layer.kernel.value +class PQDense(PQWeightBiasBase, keras.layers.Dense): + def __init__( + self, + config, + units, + device=None, + dtype=None, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + weight_quantization_bits: Tuple[T, T, T] = None, + bias_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + activation=None, + use_bias=True, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + lora_rank=None, + lora_alpha=None, + **kwargs, + ): + super().__init__( + units=units, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + bias_constraint=bias_constraint, + lora_rank=lora_rank, + lora_alpha=lora_alpha, + config=config, + layer_type="linear", + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + weight_quantization_bits=weight_quantization_bits, + bias_quantization_bits=bias_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) self.weight_transpose = (1, 0) self.weight_transpose_back = (1, 0) self.data_transpose = (0, 1) # Always (BATCH_SIZE, OUT_FEATURES) + self.do_transpose_data = False + self.use_bias = use_bias def build(self, input_shape): super().build(input_shape) - self.weight = self.add_weight( - self.weight_shape, initializer=self.init_weight, trainable=True, regularizer=self.kernel_regularizer - ) - self.weight_quantizer.build(self.weight.shape) - self.bias = ( - self.add_weight(self.bias_shape, initializer=self.init_bias, trainable=True) - if self.bias_shape is not None - else None - ) if self.use_bias: - self.bias_quantizer.build(self.bias.shape) + self._bias = self.add_weight( + name="bias", + shape=(self.units,), + initializer=self.bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + ) + else: + self._bias = None + if self.use_hgq: + self.input_quantizer.build(input_shape) + self.weight_quantizer.build(self._kernel.shape) + if self.use_bias: + self.bias_quantizer.build(self._bias.shape) + self.output_quantizer.build(self.compute_output_shape(input_shape)) + + @property + def kernel(self): + if self.final_compression_done: + return self._kernel + if self.pruning_first: + weight = self.prune(self._kernel) + return self.quantize_i(weight, self.weight_quantizer) + else: + weight = self.quantize_i(self._kernel, self.weight_quantizer) + return self.prune(weight) + + @property + def bias(self): + if self.final_compression_done: + return self._bias + bias = self.quantize_i(self._bias, self.bias_quantizer) + return bias + + @bias.setter + def bias(self, bias): + self._bias = bias def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) ebops = ops.sum(ops.matmul(bw_inp, bw_ker)) ebops = ebops * self.n_parallel / self.parallelization_factor if self.use_bias: @@ -503,10 +888,8 @@ def ebops(self, shape): def call(self, x, training=None): input_shape = x.shape - weight, bias, x = self.pre_forward(self.weight, self.bias, x, training) - x = ops.matmul(x, weight) - if self.bias is not None: - x = ops.add(x, bias) + x = self.pre_forward(x, training) + x = super().call(x) x = self.post_forward(x, training) if self.use_hgq and self.enable_quantization: self.add_loss(self.hgq_loss(input_shape)) @@ -514,7 +897,6 @@ def call(self, x, training=None): class PQBatchNormalization(keras.layers.BatchNormalization): - def __init__( self, config, @@ -571,7 +953,7 @@ def __init__( def build(self, input_shape): super().build(input_shape) - self.input_quantizer = KerasQuantizer( + self.input_quantizer = Quantizer( k=1.0, i=self.i_input, f=self.f_input, @@ -581,7 +963,7 @@ def build(self, input_shape): is_data=True, hgq_gamma=self.hgq_gamma, ) - self.weight_quantizer = KerasQuantizer( + self.weight_quantizer = Quantizer( k=1.0, i=self.i_weight, f=self.f_weight, @@ -590,7 +972,7 @@ def build(self, input_shape): is_data=False, is_heterogeneous=self.use_hgq, ) - self.bias_quantizer = KerasQuantizer( + self.bias_quantizer = Quantizer( k=1.0, i=self.i_bias, f=self.f_bias, @@ -701,35 +1083,52 @@ def post_pre_train_function(self): self.is_pretraining = False -class QuantizedPooling(keras.layers.Layer): - def __init__(self, config, layer, quantize_input=True, quantize_output=False): - super().__init__() - self.i_input = self.i_output = ops.convert_to_tensor(config.quantization_parameters.default_data_integer_bits) - self.f_input = self.f_output = ops.convert_to_tensor(config.quantization_parameters.default_data_fractional_bits) +class PQAvgPoolBase(keras.layers.Layer): + def __init__( + self, + config, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + **kwargs, + ): - self.is_pretraining = True + super().__init__(**kwargs) - self.hgq_gamma = config.quantization_parameters.hgq_gamma - self.hgq_beta = config.quantization_parameters.hgq_beta + if input_quantization_bits is not None: + self.k_input, self.i_input, self.f_input = input_quantization_bits + else: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits + + if output_quantization_bits is not None: + self.k_output, self.i_output, self.f_output = output_quantization_bits + else: + self.k_output = config.quantization_parameters.default_data_keep_negatives + self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_output = config.quantization_parameters.default_data_fractional_bits + + self.overflow = config.quantization_parameters.overflow + self.config = config + self.is_pretraining = True + self.round_mode = config.quantization_parameters.round_mode self.data_k = config.quantization_parameters.default_data_keep_negatives self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous self.enable_quantization = config.quantization_parameters.enable_quantization - self.round_mode = config.quantization_parameters.round_mode - self.overflow = config.quantization_parameters.overflow - self.pool_size = layer.pool_size - self.strides = layer.strides - self.padding = layer.padding - self.data_format = layer.data_format + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_beta = config.quantization_parameters.hgq_beta + self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous + self.saved_inputs = [] self.quantize_input = quantize_input - self.dimensions = layer.__class__.__name__[-2] self.quantize_output = quantize_output def post_pre_train_function(self): self.is_pretraining = False def build(self, input_shape): - self.input_quantizer = KerasQuantizer( + self.input_quantizer = Quantizer( k=1.0, i=self.i_input, f=self.f_input, @@ -739,7 +1138,7 @@ def build(self, input_shape): is_data=True, hgq_gamma=self.hgq_gamma, ) - self.output_quantizer = KerasQuantizer( + self.output_quantizer = Quantizer( k=1.0, i=self.i_output, f=self.f_output, @@ -768,6 +1167,18 @@ def compute_output_shape(self, input_shape): self.data_format, ) + def pre_pooling(self, x, training): + if not hasattr(self, "input_quantizer"): + self.build(x.shape) + if self.quantize_input and self.enable_quantization: + x = self.input_quantizer(x, training) + return x + + def post_pooling(self, x, training): + if self.quantize_output and self.enable_quantization: + x = self.output_quantizer(x, training) + return x + def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) return ops.sum(bw_inp) @@ -782,22 +1193,6 @@ def hgq_loss(self, shape): loss += self.output_quantizer.hgq_loss() return loss - def call(self, x): - input_shape = x.shape - if self.quantize_input and self.enable_quantization: - x = self.input_quantizer(x) - x = ops.average_pool( - x, - pool_size=self.pool_size, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - ) - self.add_loss(self.hgq_loss(input_shape)) - if self.quantize_output and self.enable_quantization: - x = self.output_quantizer(x) - return x - def get_config(self): config = super().get_config() config.update( @@ -814,58 +1209,82 @@ def get_config(self): return config -class KerasQuantizer(keras.layers.Layer): - # HGQ quantizer wrapper - def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): - super().__init__() - self.k = k - self.i = i - self.f = f - self.overflow = overflow - self.round_mode = round_mode - self.use_hgq = is_heterogeneous - self.quantizer = create_quantizer(self.k, self.i, self.f, overflow, round_mode, is_heterogeneous, is_data) - self.is_pretraining = False - self.hgq_gamma = hgq_gamma - - def build(self, input_shape): - super().build(input_shape) - self.i = self.add_variable((), Constant(self.i), dtype="float32", trainable=False) - self.f = self.add_variable((), Constant(self.f), dtype="float32", trainable=False) - if self.use_hgq: - self.quantizer.build(input_shape) +class PQAvgPool1d(PQAvgPoolBase, keras.layers.AveragePooling1D): + def __init__( + self, + config, + pool_size, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size=pool_size, + strides=strides, + padding=padding, + data_format=data_format, + name=name, + config=config, + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + output_quantization_bits=output_quantization_bits, + **kwargs, + ) - def get_quantization_bits(self): - if self.use_hgq: - return self.quantizer.quantizer.i, self.quantizer.quantizer.f - else: - return self.i, self.f + def call(self, x, training=None): + input_shape = x.shape + x = self.pre_pooling(x, training) + x = super().call(x) + x = self.post_pooling(x, training) + if self.use_hgq and self.enable_quantization: + self.add_loss(self.hgq_loss(input_shape)) + return x - def set_quantization_bits(self, i, f): - if self.use_hgq: - self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) - self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) - self.i = i - self.f = f - def post_pretrain(self): - self.is_pretraining = True +class PQAvgPool2d(PQAvgPoolBase, keras.layers.AveragePooling2D): + def __init__( + self, + config, + pool_size, + quantize_input=True, + quantize_output=False, + input_quantization_bits: Tuple[T, T, T] = None, + output_quantization_bits: Tuple[T, T, T] = None, + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size=pool_size, + strides=strides, + padding=padding, + data_format=data_format, + name=name, + config=config, + quantize_input=quantize_input, + quantize_output=quantize_output, + input_quantization_bits=input_quantization_bits, + output_quantization_bits=output_quantization_bits, + ) - def call(self, x): - if not self.built: - self.build(x.shape) - if self.use_hgq: - x = self.quantizer(x) - else: - x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + def call(self, x, training=None): + input_shape = x.shape + x = self.pre_pooling(x, training) + x = super().call(x) + x = self.post_pooling(x, training) + if self.use_hgq and self.enable_quantization: + self.add_loss(self.hgq_loss(input_shape)) return x - def hgq_loss(self): - if self.is_pretraining or not self.use_hgq: - return 0.0 - loss = (ops.sum(self.quantizer.quantizer.i) + ops.sum(self.quantizer.quantizer.f)) * self.hgq_gamma - return loss - def call_post_round_functions(model, rewind, rounds, r): if rewind == "round": @@ -879,7 +1298,7 @@ def call_post_round_functions(model, rewind, rounds, r): def apply_final_compression_tf(model): x = model.layers[0].output for layer in model.layers[1:]: - if isinstance(layer, (PQWeightBiasBase, PQSeparableConv2d, PQBatchNormalization)): + if isinstance(layer, (PQWeightBiasBase, PQSeparableConv2d, PQBatchNormalization, PQDepthwiseConv2d)): layer.apply_final_compression() x = layer(x) else: @@ -1005,7 +1424,7 @@ def post_pretrain_functions(model, config): elif isinstance(layer, PQSeparableConv2d): layer.depthwise_conv.pruning_layer.post_pre_train_function() layer.pointwise_conv.pruning_layer.post_pre_train_function() - elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling, PQBatchNormalization)): + elif isinstance(layer, (PQActivation, PQAvgPoolBase, PQBatchNormalization)): layer.post_pre_train_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget @@ -1030,16 +1449,16 @@ def pdp_setup(model, config): ), ): if global_weights is None: - global_weights = ops.ravel(layer.weight) + global_weights = ops.ravel(layer.kernel) else: - global_weights = ops.concatenate((global_weights, ops.ravel(layer.weight))) + global_weights = ops.concatenate((global_weights, ops.ravel(layer.kernel))) elif isinstance(layer, PQSeparableConv2d): if global_weights is None: - global_weights = ops.ravel(layer.depthwise_conv.weight) - global_weights = ops.concatenate((global_weights, ops.ravel(layer.pointwise_conv.weight))) + global_weights = ops.ravel(layer.depthwise_conv.kernel) + global_weights = ops.concatenate((global_weights, ops.ravel(layer.pointwise_conv.kernel))) else: - global_weights = ops.concatenate((global_weights, ops.ravel(layer.depthwise_conv.weight))) - global_weights = ops.concatenate((global_weights, ops.ravel(layer.pointwise_conv.weight))) + global_weights = ops.concatenate((global_weights, ops.ravel(layer.depthwise_conv.kernel))) + global_weights = ops.concatenate((global_weights, ops.ravel(layer.pointwise_conv.kernel))) abs_global_weights = ops.abs(global_weights) global_weight_topk, _ = ops.top_k(abs_global_weights, ops.size(abs_global_weights)) @@ -1056,29 +1475,29 @@ def pdp_setup(model, config): PQDense, ), ): - weight_size = ops.size(layer.weight) + weight_size = ops.size(layer.kernel) w = ops.sum(global_weights_below_threshold[idx : idx + weight_size]) - layer.pruning_layer.init_r = ops.convert_to_tensor(w / weight_size, dtype=layer.weight.dtype) - layer.pruning_layer.sparsity = ops.convert_to_tensor(w / weight_size, dtype=layer.weight.dtype) # Wanda + layer.pruning_layer.init_r = ops.convert_to_tensor(w / weight_size, dtype=layer.kernel.dtype) + layer.pruning_layer.sparsity = ops.convert_to_tensor(w / weight_size, dtype=layer.kernel.dtype) # Wanda idx += weight_size elif isinstance(layer, PQSeparableConv2d): - weight_size = ops.size(layer.depthwise_conv.weight) + weight_size = ops.size(layer.depthwise_conv.kernel) w = ops.sum(global_weights_below_threshold[idx : idx + weight_size]) layer.depthwise_conv.pruning_layer.init_r = ops.convert_to_tensor( - w / weight_size, dtype=layer.depthwise_conv.weight.dtype + w / weight_size, dtype=layer.depthwise_conv.kernel.dtype ) layer.depthwise_conv.pruning_layer.sparsity = ops.convert_to_tensor( - w / weight_size, dtype=layer.depthwise_conv.weight.dtype + w / weight_size, dtype=layer.depthwise_conv.kernel.dtype ) # Wanda idx += weight_size - weight_size = ops.size(layer.pointwise_conv.weight) + weight_size = ops.size(layer.pointwise_conv.kernel) w = ops.sum(global_weights_below_threshold[idx : idx + weight_size]) layer.pointwise_conv.pruning_layer.init_r = ops.convert_to_tensor( - w / weight_size, dtype=layer.pointwise_conv.weight.dtype + w / weight_size, dtype=layer.pointwise_conv.kernel.dtype ) layer.pointwise_conv.pruning_layer.sparsity = ops.convert_to_tensor( - w / weight_size, dtype=layer.pointwise_conv.weight.dtype + w / weight_size, dtype=layer.pointwise_conv.kernel.dtype ) # Wanda idx += weight_size @@ -1097,43 +1516,36 @@ def get_layer_keep_ratio_tf(model): ), ): # weight, bias = layer.prune_and_quantize(layer.weight, layer.bias) - weight = ops.cast(layer.weight, layer.weight.dtype) - bias = ops.cast(layer.bias, layer.bias.dtype) if layer.bias is not None else None - weight, bias = layer.quantize_i(weight, bias) - transpose = layer.weight_transpose - if layer.enable_pruning: - weight = layer.pruning_layer.get_hard_mask(ops.transpose(weight, transpose)) * ops.transpose( - weight, transpose - ) + weight = layer.kernel total_w += ops.size(weight) rem = ops.count_nonzero(weight) remaining_weights += rem elif isinstance(layer, PQSeparableConv2d): - depthwise_weight = ops.cast(layer.depthwise_conv.weight, layer.depthwise_conv.weight.dtype) - pointwise_weight = ops.cast(layer.pointwise_conv.weight, layer.pointwise_conv.weight.dtype) + depthwise_weight = ops.cast(layer.depthwise_conv.kernel, layer.depthwise_conv.kernel.dtype) + pointwise_weight = ops.cast(layer.pointwise_conv.kernel, layer.pointwise_conv.kernel.dtype) bias = ( ops.cast(layer.pointwise_conv.bias, layer.pointwise_conv.bias.dtype) if layer.pointwise_conv.bias is not None else None ) - depthwise_weight, _ = layer.depthwise_conv.quantize_i(depthwise_weight, None) + depthwise_weight = layer.depthwise_conv.quantize_i(depthwise_weight, None) transpose = layer.depthwise_conv.weight_transpose if layer.depthwise_conv.enable_pruning: depthwise_weight = layer.depthwise_conv.pruning_layer.get_hard_mask( ops.transpose(depthwise_weight, transpose) ) * ops.transpose(depthwise_weight, transpose) - total_w += ops.size(layer.depthwise_conv.weight) + total_w += ops.size(layer.depthwise_conv.kernel) rem = ops.count_nonzero(depthwise_weight) remaining_weights += rem - pointwise_weight, _ = layer.pointwise_conv.quantize_i(pointwise_weight, bias) + pointwise_weight = layer.pointwise_conv.quantize_i(pointwise_weight, bias) transpose = layer.pointwise_conv.weight_transpose if layer.pointwise_conv.enable_pruning: pointwise_weight = layer.pointwise_conv.pruning_layer.get_hard_mask( ops.transpose(pointwise_weight, transpose) ) * ops.transpose(pointwise_weight, transpose) - total_w += ops.size(layer.pointwise_conv.weight) + total_w += ops.size(layer.pointwise_conv.kernel) rem = ops.count_nonzero(pointwise_weight) remaining_weights += rem @@ -1175,7 +1587,7 @@ def get_model_losses_tf(model, losses): loss += layer.depthwise_conv.hgq_loss() loss += layer.pointwise_conv.hgq_loss() losses += loss - elif isinstance(layer, (QuantizedReLU, QuantizedTanh, QuantizedPooling, PQBatchNormalization)): + elif isinstance(layer, (PQActivation, PQAvgPoolBase, PQBatchNormalization)): if layer.enable_quantization and layer.use_hgq: losses += layer.hgq_loss() return losses @@ -1190,13 +1602,12 @@ def check_activation(layer, config): act = None if hasattr(layer.activation, "__name__"): if layer.activation.__name__ == "relu": - - act = QuantizedReLU(config) if quantization_enabled else ReLU() + act = PQActivation(config, "relu") if quantization_enabled else ReLU() if quantization_enabled: set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) elif layer.activation.__name__ == "tanh": - act = QuantizedTanh(config) if quantization_enabled else Activation(activation="tanh") + act = PQActivation(config, "tanh") if quantization_enabled else Activation(activation="tanh") if quantization_enabled: set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) @@ -1212,7 +1623,25 @@ def add_compression_layers_tf(model, config, input_shape=None): for layer in model.layers[1:]: act = None if isinstance(layer, DepthwiseConv2D): - new_layer = PQDepthwiseConv2d(config, layer) + new_layer = PQDepthwiseConv2d( + config, + kernel_size=layer.kernel_size, + strides=layer.strides, + padding=layer.padding, + depth_multiplier=layer.depth_multiplier, + data_format=layer.data_format, + dilation_rate=layer.dilation_rate, + activation=layer.activation, + use_bias=layer.use_bias, + bias_initializer=layer.bias_initializer, + depthwise_initializer=layer.depthwise_initializer, + bias_regularizer=layer.bias_regularizer, + activity_regularizer=layer.activity_regularizer, + depthwise_constraint=layer.depthwise_constraint, + bias_constraint=layer.bias_constraint, + bias=layer.bias, + dtype=layer.dtype, + ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) @@ -1225,7 +1654,25 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Conv2D): - new_layer = PQConv2d(config, layer) + new_layer = PQConv2d( + config=config, + filters=layer.filters, + kernel_size=layer.kernel_size, + strides=layer.strides, + padding=layer.padding, + data_format=layer.data_format, + dilation_rate=layer.dilation_rate, + groups=layer.groups, + activation=layer.activation, + use_bias=layer.use_bias, + kernel_initializer=layer.kernel_initializer, + bias_initializer=layer.bias_initializer, + kernel_regularizer=layer.kernel_regularizer, + bias_regularizer=layer.bias_regularizer, + activity_regularizer=layer.activity_regularizer, + kernel_constraint=layer.kernel_constraint, + bias_constraint=layer.bias_constraint, + ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1233,10 +1680,33 @@ def add_compression_layers_tf(model, config, input_shape=None): transpose_shape = new_layer.weight_transpose pruning_layer_input = ops.transpose(pruning_layer_input, transpose_shape) new_layer.pruning_layer.build(pruning_layer_input.shape) + new_layer.build(x.shape) x = new_layer(x) + new_layer._kernel.assign(layer._kernel) + if layer.use_bias: + new_layer._bias.assign(layer.bias) act = check_activation(layer, config) elif isinstance(layer, SeparableConv2D): - new_layer = PQSeparableConv2d(config, layer) + new_layer = PQSeparableConv2d( + config, + layer.filters, + layer.kernel_size, + layer.strides, + layer.padding, + layer.data_format, + layer.dilation_rate, + layer.depth_multiplier, + layer.use_bias, + layer.depthwise_initializer, + layer.pointwise_initializer, + layer.bias_initializer, + layer.depthwise_regularizer, + layer.pointwise_regularizer, + layer.bias_regularizer, + layer.depthwise_constraint, + layer.pointwise_constraint, + layer.bias_constraint, + ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning_depthwise, enable_pruning_pointwise = get_enable_pruning(layer, config) @@ -1258,7 +1728,18 @@ def add_compression_layers_tf(model, config, input_shape=None): x = new_layer(x) act = check_activation(layer, config) elif isinstance(layer, Conv1D): - new_layer = PQConv1d(config, layer) + new_layer = PQConv1d( + config=config, + filters=layer.filters, + kernel_size=layer.kernel_size, + strides=layer.strides, + padding=layer.padding, + data_format=layer.data_format, + dilation_rate=layer.dilation_rate, + groups=layer.groups, + activation=None, + use_bias=layer.use_bias, + ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1266,11 +1747,26 @@ def add_compression_layers_tf(model, config, input_shape=None): transpose_shape = new_layer.weight_transpose pruning_layer_input = ops.transpose(pruning_layer_input, transpose_shape) new_layer.pruning_layer.build(pruning_layer_input.shape) - + new_layer.build(x.shape) x = new_layer(x) + new_layer._kernel.assign(layer._kernel) + if layer.use_bias: + new_layer._bias.assign(layer.bias) act = check_activation(layer, config) elif isinstance(layer, Dense): - new_layer = PQDense(config, layer) + new_layer = PQDense( + config=config, + units=layer.units, + activation=layer.activation, + use_bias=layer.use_bias, + kernel_initializer=layer.kernel_initializer, + bias_initializer=layer.bias_initializer, + kernel_regularizer=layer.kernel_regularizer, + bias_regularizer=layer.bias_regularizer, + activity_regularizer=layer.activity_regularizer, + kernel_constraint=layer.kernel_constraint, + bias_constraint=layer.bias_constraint, + ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) new_layer.set_enable_pruning(enable_pruning) @@ -1279,11 +1775,14 @@ def add_compression_layers_tf(model, config, input_shape=None): pruning_layer_input = ops.transpose(pruning_layer_input, transpose_shape) new_layer.pruning_layer.build(pruning_layer_input.shape) x = new_layer(x) + new_layer._kernel.assign(layer._kernel) + if layer.use_bias: + new_layer._bias.assign(layer.bias) act = check_activation(layer, config) # Activation layers elif isinstance(layer, ReLU): if config.quantization_parameters.enable_quantization: - new_layer = QuantizedReLU(config) + new_layer = PQActivation(config, "relu") set_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.input.shape) x = new_layer(x) @@ -1295,13 +1794,34 @@ def add_compression_layers_tf(model, config, input_shape=None): if new_layer is not None: x = new_layer(x) - elif isinstance(layer, (AveragePooling1D, AveragePooling2D, AveragePooling3D)): + elif isinstance(layer, AveragePooling1D): if config.quantization_parameters.enable_quantization: - new_layer = QuantizedPooling(config, layer) + new_layer = PQAvgPool1d( + config=config, + pool_size=layer.pool_size, + strides=layer.strides, + padding=layer.padding, + data_format=layer.data_format, + name=layer.name, + input_quantization_bits=layer.input_quantization_bits, + output_quantization_bits=layer.output_quantization_bits, + ) + set_quantization_bits_activations(config, layer, new_layer) + new_layer.build(x.shape) + x = new_layer(x) + elif isinstance(layer, AveragePooling2D): + if config.quantization_parameters.enable_quantization: + new_layer = PQAvgPool2d( + config=config, + pool_size=layer.pool_size, + strides=layer.strides, + padding=layer.padding, + data_format=layer.data_format, + name=layer.name, + ) set_quantization_bits_activations(config, layer, new_layer) new_layer.build(x.shape) x = new_layer(x) - elif isinstance(layer, (BatchNormalization)): if config.quantization_parameters.enable_quantization: new_layer = PQBatchNormalization( diff --git a/src/pquant/core/tf_impl/quantizer.py b/src/pquant/core/tf_impl/quantizer.py new file mode 100644 index 0000000..d79f6ee --- /dev/null +++ b/src/pquant/core/tf_impl/quantizer.py @@ -0,0 +1,57 @@ +import keras +from keras.initializers import Constant + +from pquant.core.quantizer_functions import create_quantizer + + +class Quantizer(keras.layers.Layer): + # HGQ quantizer wrapper + def __init__(self, k, i, f, overflow, round_mode, is_heterogeneous, is_data, hgq_gamma=0): + super().__init__() + self.k = k + self.i = i + self.f = f + self.overflow = overflow + self.round_mode = round_mode + self.use_hgq = is_heterogeneous + self.quantizer = create_quantizer(self.k, self.i, self.f, overflow, round_mode, is_heterogeneous, is_data) + self.is_pretraining = False + self.hgq_gamma = hgq_gamma + + def build(self, input_shape): + super().build(input_shape) + self.i = self.add_variable((), Constant(self.i), dtype="float32", trainable=False) + self.f = self.add_variable((), Constant(self.f), dtype="float32", trainable=False) + if self.use_hgq: + self.quantizer.build(input_shape) + + def get_quantization_bits(self): + if self.use_hgq: + return self.quantizer.quantizer.k, self.quantizer.quantizer.i, self.quantizer.quantizer.f + else: + return self.k, self.i, self.f + + def set_quantization_bits(self, i, f): + if self.use_hgq: + self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) + self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) + self.i = i + self.f = f + + def post_pretrain(self): + self.is_pretraining = True + + def call(self, x, training=None): + if not self.built: + self.build(x.shape) + if self.use_hgq: + x = self.quantizer(x) + else: + x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + return x + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return 0.0 + loss = (keras.ops.sum(self.quantizer.quantizer.i) + keras.ops.sum(self.quantizer.quantizer.f)) * self.hgq_gamma + return loss diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 6d9959d..616e1d0 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -16,13 +16,16 @@ SeparableConv2D, ) -from pquant.core.activations_quantizer import QuantizedReLU, QuantizedTanh +from pquant.core.tf_impl.activations import PQActivation from pquant.core.tf_impl.compressed_layers_tf import ( + PQAvgPool1d, + PQAvgPool2d, + PQBatchNormalization, PQConv1d, PQConv2d, PQDense, + PQDepthwiseConv2d, PQSeparableConv2d, - QuantizedPooling, add_compression_layers_tf, apply_final_compression_tf, get_layer_keep_ratio_tf, @@ -210,7 +213,7 @@ def config_cs(): return _to_obj(cfg) -@pytest.fixture +@pytest.fixture(scope="function", autouse=True) def conv2d_input(): if keras.backend.image_data_format() == "channels_first": inp = ops.convert_to_tensor(np.random.rand(BATCH_SIZE, IN_FEATURES, 32, 32)) @@ -219,7 +222,7 @@ def conv2d_input(): return inp -@pytest.fixture +@pytest.fixture(scope="function", autouse=True) def conv1d_input(): if keras.backend.image_data_format() == "channels_first": inp = ops.convert_to_tensor(np.random.rand(BATCH_SIZE, IN_FEATURES, 32)) @@ -228,7 +231,7 @@ def conv1d_input(): return inp -@pytest.fixture +@pytest.fixture(scope="function", autouse=True) def dense_input(): return ops.convert_to_tensor(np.random.rand(BATCH_SIZE, IN_FEATURES)) @@ -237,9 +240,15 @@ def test_dense_call(config_pdp, dense_input): layer_to_replace = Dense(OUT_FEATURES, use_bias=False) layer_to_replace.build((BATCH_SIZE, IN_FEATURES)) out = layer_to_replace(dense_input) - layer = PQDense(config_pdp, layer_to_replace, "linear") + layer = PQDense( + config_pdp, + units=OUT_FEATURES, + use_bias=False, + quantize_input=False, + quantize_output=False, + ) layer.build(dense_input.shape) - layer.weight.assign(layer_to_replace.kernel) + layer._kernel.assign(layer_to_replace.kernel) out2 = layer(dense_input) assert ops.all(ops.equal(out, out2)) @@ -248,9 +257,9 @@ def test_conv2d_call(config_pdp, conv2d_input): layer_to_replace = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same") layer_to_replace.build(conv2d_input.shape) out = layer_to_replace(conv2d_input) - layer = PQConv2d(config_pdp, layer_to_replace, "conv") + layer = PQConv2d(config_pdp, OUT_FEATURES, KERNEL_SIZE, padding="same", quantize_output=True, use_bias=True) layer.build(conv2d_input.shape) - layer.weight.assign(layer_to_replace.kernel) + layer._kernel.assign(layer_to_replace.kernel) out2 = layer(conv2d_input) assert ops.all(ops.equal(out, out2)) @@ -259,11 +268,30 @@ def test_separable_conv2d_call(config_pdp, conv2d_input): layer_to_replace = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same") layer_to_replace.build(conv2d_input.shape) out = layer_to_replace(conv2d_input) - layer = PQSeparableConv2d(config_pdp, layer_to_replace) + layer = PQSeparableConv2d( + config_pdp, + layer_to_replace.filters, + layer_to_replace.kernel_size, + layer_to_replace.strides, + layer_to_replace.padding, + layer_to_replace.data_format, + layer_to_replace.dilation_rate, + layer_to_replace.depth_multiplier, + layer_to_replace.use_bias, + layer_to_replace.depthwise_initializer, + layer_to_replace.pointwise_initializer, + layer_to_replace.bias_initializer, + layer_to_replace.depthwise_regularizer, + layer_to_replace.pointwise_regularizer, + layer_to_replace.bias_regularizer, + layer_to_replace.depthwise_constraint, + layer_to_replace.pointwise_constraint, + layer_to_replace.bias_constraint, + ) layer.depthwise_conv.build(conv2d_input.shape) layer.pointwise_conv.build(conv2d_input.shape) - layer.depthwise_conv.weight.assign(layer_to_replace.depthwise_kernel) - layer.pointwise_conv.weight.assign(layer_to_replace.pointwise_kernel) + layer.depthwise_conv._kernel.assign(layer_to_replace.depthwise_kernel) + layer.pointwise_conv._kernel.assign(layer_to_replace.pointwise_kernel) out2 = layer(conv2d_input) assert ops.all(ops.equal(out, out2)) @@ -282,11 +310,11 @@ def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): pre_finetune_functions(model) # Set Depthwise mask to 50% 0s - mask_50pct_dw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].depthwise_conv.weight)) < 0.5, "float32") + mask_50pct_dw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].depthwise_conv.kernel)) < 0.5, "float32") mask_50pct_dw = ops.reshape(keras.random.shuffle(mask_50pct_dw), model.layers[1].depthwise_conv.pruning_layer.mask.shape) model.layers[1].depthwise_conv.pruning_layer.mask = mask_50pct_dw # Set Pointwise mask to 50% 0s - mask_50pct_pw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].pointwise_conv.weight)) < 0.5, "float32") + mask_50pct_pw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].pointwise_conv.kernel)) < 0.5, "float32") mask_50pct_pw = ops.reshape(keras.random.shuffle(mask_50pct_pw), model.layers[1].pointwise_conv.pruning_layer.mask.shape) model.layers[1].pointwise_conv.pruning_layer.mask = mask_50pct_pw @@ -297,11 +325,11 @@ def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): assert ops.all(ops.equal(output1, output2)) expected_nonzero_count_depthwise = ops.count_nonzero(mask_50pct_dw) - nonzero_count_depthwise = ops.count_nonzero(model.layers[1].depthwise_conv.weight) + nonzero_count_depthwise = ops.count_nonzero(model.layers[1].depthwise_conv.kernel) assert ops.equal(expected_nonzero_count_depthwise, nonzero_count_depthwise) expected_nonzero_count_pointwise = ops.count_nonzero(mask_50pct_pw) - nonzero_count_pointwise = ops.count_nonzero(model.layers[1].pointwise_conv.weight) + nonzero_count_pointwise = ops.count_nonzero(model.layers[1].pointwise_conv.kernel) assert ops.equal(expected_nonzero_count_pointwise, nonzero_count_pointwise) @@ -316,11 +344,11 @@ def test_separable_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): pre_finetune_functions(model) # Set Depthwise mask to 50% 0s - mask_50pct_dw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].depthwise_conv.weight)) < 0.5, "float32") + mask_50pct_dw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].depthwise_conv.kernel)) < 0.5, "float32") mask_50pct_dw = ops.reshape(keras.random.shuffle(mask_50pct_dw), model.layers[1].depthwise_conv.pruning_layer.mask.shape) model.layers[1].depthwise_conv.pruning_layer.mask = mask_50pct_dw # Set Pointwise mask to 50% 0s - mask_50pct_pw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].pointwise_conv.weight)) < 0.5, "float32") + mask_50pct_pw = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].pointwise_conv.kernel)) < 0.5, "float32") mask_50pct_pw = ops.reshape(keras.random.shuffle(mask_50pct_pw), model.layers[1].pointwise_conv.pruning_layer.mask.shape) model.layers[1].pointwise_conv.pruning_layer.mask = mask_50pct_pw @@ -362,9 +390,9 @@ def test_conv1d_call(config_pdp, conv1d_input): layer_to_replace = Conv1D(OUT_FEATURES, KERNEL_SIZE, strides=2, use_bias=False) layer_to_replace.build(conv1d_input.shape) out = layer_to_replace(conv1d_input) - layer = PQConv1d(config_pdp, layer_to_replace, "conv") + layer = PQConv1d(config_pdp, OUT_FEATURES, KERNEL_SIZE, strides=2) layer.build(conv1d_input.shape) - layer.weight.assign(layer_to_replace.kernel) + layer._kernel.assign(layer_to_replace.kernel) out2 = layer(conv1d_input) assert ops.all(ops.equal(out, out2)) @@ -387,7 +415,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): output2 = model(dense_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].weight) + nonzero_count = ops.count_nonzero(model.layers[1].kernel) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -409,7 +437,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].weight) + nonzero_count = ops.count_nonzero(model.layers[1].kernel) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -423,7 +451,7 @@ def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) - mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].weight)) < 0.5, "float32") + mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].kernel)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv2d_input) @@ -431,7 +459,7 @@ def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].weight) + nonzero_count = ops.count_nonzero(model.layers[1].kernel) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -445,7 +473,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) - mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].weight)) < 0.5, "float32") + mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].kernel)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv1d_input) @@ -453,7 +481,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): output2 = model(conv1d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) - nonzero_count = ops.count_nonzero(model.layers[1].weight) + nonzero_count = ops.count_nonzero(model.layers[1].kernel) assert ops.equal(expected_nonzero_count, nonzero_count) @@ -507,7 +535,7 @@ def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) - mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].weight)) < 0.5, "float32") + mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].kernel)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_tf(model) @@ -518,6 +546,7 @@ def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): + config_pdp.pruning_parameters.enable_pruning = True inputs = keras.Input(shape=conv1d_input.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) @@ -527,7 +556,7 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) - mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].weight)) < 0.5, "float32") + mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].kernel)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct ratio1 = get_layer_keep_ratio_tf(model) @@ -551,7 +580,8 @@ def test_check_activation(config_pdp, dense_input): out = Dense(OUT_FEATURES, use_bias=False, activation="relu")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") model = add_compression_layers_tf(model, config_pdp, dense_input.shape) - assert isinstance(model.layers[2], QuantizedReLU) + assert isinstance(model.layers[2], PQActivation) + assert model.layers[2].activation_name == "relu" # Tanh config_pdp.quantization_parameters.enable_quantization = False @@ -568,7 +598,8 @@ def test_check_activation(config_pdp, dense_input): out = Dense(OUT_FEATURES, use_bias=False, activation="tanh")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") model = add_compression_layers_tf(model, config_pdp, dense_input.shape) - assert isinstance(model.layers[2], QuantizedTanh) + assert isinstance(model.layers[2], PQActivation) + assert model.layers[2].activation_name == "tanh" def test_hgq_activation_built(config_pdp, conv2d_input): @@ -613,14 +644,18 @@ def test_hgq_activation_built(config_pdp, conv2d_input): def test_ap_conv2d_channels_last_transpose(config_ap, conv2d_input): + if keras.backend.image_data_format() == "channels_last": + conv2d_input = ops.transpose(conv2d_input, (0, 3, 1, 2)) keras.backend.set_image_data_format("channels_first") inp = ops.reshape(ops.linspace(0, 1, ops.size(conv2d_input)), conv2d_input.shape) inputs = keras.Input(shape=inp.shape[1:]) - out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) + out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same", data_format="channels_first")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") + model_cf(conv2d_input) + model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1].kernel post_pretrain_functions(model_cf, config_ap) model_cf(inp, training=True) @@ -634,34 +669,37 @@ def test_ap_conv2d_channels_last_transpose(config_ap, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1].kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1].kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) assert ops.all(ops.equal(ops.ravel(cf_mask), ops.ravel(cl_mask))) assert ops.all(ops.equal(cf_masked_weight, cl_masked_weight)) - np.testing.assert_allclose(out_cf, out_cl_transposed, rtol=0, atol=5e-6) + np.testing.assert_allclose(out_cf, out_cl_transposed, rtol=0, atol=5e-4) def test_ap_conv1d_channels_last_transpose(config_ap, conv1d_input): + if keras.backend.image_data_format() == "channels_last": + conv1d_input = ops.transpose(conv1d_input, (0, 2, 1)) keras.backend.set_image_data_format("channels_first") + inp = ops.reshape(ops.linspace(0, 1, ops.size(conv1d_input)), conv1d_input.shape) inputs = keras.Input(shape=inp.shape[1:]) - out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) + out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same", data_format="channels_first")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_ap) model_cf(inp, training=True) @@ -675,17 +713,17 @@ def test_ap_conv1d_channels_last_transpose(config_ap, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -702,7 +740,7 @@ def test_ap_depthwiseconv2d_channels_last_transpose(config_ap, conv2d_input): out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_ap) model_cf(inp, training=True) @@ -716,17 +754,17 @@ def test_ap_depthwiseconv2d_channels_last_transpose(config_ap, conv2d_input): out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -743,7 +781,7 @@ def test_ap_dense_channels_last_transpose(config_ap, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_ap) model_cf(inp, training=True) @@ -756,17 +794,17 @@ def test_ap_dense_channels_last_transpose(config_ap, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -786,7 +824,7 @@ def test_wanda_conv2d_channels_last_transpose(config_wanda, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) model_cf(inp, training=True) @@ -800,17 +838,17 @@ def test_wanda_conv2d_channels_last_transpose(config_wanda, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -827,7 +865,7 @@ def test_wanda_conv1d_channels_last_transpose(config_wanda, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) model_cf(inp, training=True) @@ -841,17 +879,17 @@ def test_wanda_conv1d_channels_last_transpose(config_wanda, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -868,7 +906,7 @@ def test_wanda_depthwiseconv2d_channels_last_transpose(config_wanda, conv2d_inpu out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) model_cf(inp, training=True) @@ -882,17 +920,17 @@ def test_wanda_depthwiseconv2d_channels_last_transpose(config_wanda, conv2d_inpu out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -909,7 +947,7 @@ def test_wanda_dense_channels_last_transpose(config_wanda, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) model_cf(inp, training=True) @@ -922,17 +960,17 @@ def test_wanda_dense_channels_last_transpose(config_wanda, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -952,7 +990,7 @@ def test_pdp_conv2d_channels_last_transpose(config_pdp, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) model_cf(inp, training=True) @@ -966,17 +1004,17 @@ def test_pdp_conv2d_channels_last_transpose(config_pdp, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -993,7 +1031,7 @@ def test_pdp_conv1d_channels_last_transpose(config_pdp, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) model_cf(inp, training=True) @@ -1007,17 +1045,17 @@ def test_pdp_conv1d_channels_last_transpose(config_pdp, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -1034,7 +1072,7 @@ def test_pdp_depthwiseconv2d_channels_last_transpose(config_pdp, conv2d_input): out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) model_cf(inp, training=True) @@ -1048,17 +1086,17 @@ def test_pdp_depthwiseconv2d_channels_last_transpose(config_pdp, conv2d_input): out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -1074,7 +1112,7 @@ def test_pdp_dense_channels_last_transpose(config_pdp, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) model_cf(inp, training=True) @@ -1087,17 +1125,17 @@ def test_pdp_dense_channels_last_transpose(config_pdp, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1].kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1].kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) assert ops.all(ops.equal(ops.ravel(cf_mask), ops.ravel(cl_mask))) @@ -1116,7 +1154,7 @@ def test_cs_conv2d_channels_last_transpose(config_cs, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel s = model_cf.layers[1].pruning_layer.s.value new_s = np.zeros_like(s) + 0.1 new_s = np.reshape(new_s, -1) @@ -1133,7 +1171,7 @@ def test_cs_conv2d_channels_last_transpose(config_cs, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) model_cl.layers[1].pruning_layer.s.assign(new_s) post_pretrain_functions(model_cl, config_cs) @@ -1141,10 +1179,10 @@ def test_cs_conv2d_channels_last_transpose(config_cs, conv2d_input): out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.get_hard_mask(None) - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.get_hard_mask(None) - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) assert ops.all(ops.equal(ops.ravel(cf_mask), ops.ravel(cl_mask))) @@ -1160,7 +1198,7 @@ def test_cs_conv1d_channels_last_transpose(config_cs, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_cs) model_cf(inp, training=True) @@ -1174,17 +1212,17 @@ def test_cs_conv1d_channels_last_transpose(config_cs, conv1d_input): out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_cs) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -1201,7 +1239,7 @@ def test_cs_depthwiseconv2d_channels_last_transpose(config_cs, conv2d_input): out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_cs) model_cf(inp, training=True) @@ -1215,17 +1253,17 @@ def test_cs_depthwiseconv2d_channels_last_transpose(config_cs, conv2d_input): out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_cs) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) @@ -1242,7 +1280,7 @@ def test_cs_dense_channels_last_transpose(config_cs, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) - weight_cf = model_cf.layers[1].weight + weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_cs) model_cf(inp, training=True) @@ -1255,17 +1293,17 @@ def test_cs_dense_channels_last_transpose(config_cs, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) - model_cl.layers[1].weight.assign(weight_cf) + model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_cs) model_cl(inp, training=True) model_cl(inp, training=True) out_cl = model_cl(inp, training=True) cf_mask = model_cf.layers[1].pruning_layer.mask - cf_weight = ops.transpose(model_cf.layers[1].weight, model_cf.layers[1].weight_transpose) + cf_weight = ops.transpose(model_cf.layers[1]._kernel, model_cf.layers[1].weight_transpose) cf_masked_weight = cf_mask * cf_weight cl_mask = model_cl.layers[1].pruning_layer.mask - cl_weight = ops.transpose(model_cl.layers[1].weight, model_cl.layers[1].weight_transpose) + cl_weight = ops.transpose(model_cl.layers[1]._kernel, model_cl.layers[1].weight_transpose) cl_masked_weight = cl_mask * cl_weight out_cl_transposed = ops.transpose(out_cl, (model_cl.layers[1].data_transpose)) assert ops.all(ops.equal(ops.ravel(cf_mask), ops.ravel(cl_mask))) @@ -1290,8 +1328,8 @@ def test_calculate_pruning_budget(config_wanda, dense_input): weight2 = ops.reshape(ops.linspace(0.01, 0.99, OUT_FEATURES * OUT_FEATURES), (OUT_FEATURES, OUT_FEATURES)) model = add_compression_layers_tf(model, config_wanda, dense_input.shape) - model.layers[1].weight.assign(weight) - model.layers[2].weight.assign(weight2) + model.layers[1]._kernel.assign(weight) + model.layers[2]._kernel.assign(weight2) # Triggers calculation of pruning budget for PDP and Wanda post_pretrain_functions(model, config_wanda) total_weights = IN_FEATURES * OUT_FEATURES + OUT_FEATURES * OUT_FEATURES @@ -1299,7 +1337,7 @@ def test_calculate_pruning_budget(config_wanda, dense_input): for layer in model.layers: if hasattr(layer, "pruning_layer"): calculated_sparsity = layer.pruning_layer.sparsity - remaining_weights += (1 - calculated_sparsity) * ops.cast(ops.size(layer.weight), "float32") + remaining_weights += (1 - calculated_sparsity) * ops.cast(ops.size(layer.kernel), "float32") # First layer should have 50% sparsity, total sparsity should be around 75% assert model.layers[1].pruning_layer.sparsity == 0.5 np.testing.assert_allclose(remaining_weights / total_weights, 1 - sparsity, atol=1e-3, rtol=0) @@ -1340,9 +1378,9 @@ def test_hgq_weight_shape(config_pdp, dense_input): model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") model = add_compression_layers_tf(model, config_pdp, dense_input.shape) - assert model.layers[1].weight_quantizer.quantizer.quantizer._i.shape == model.layers[1].weight.shape + assert model.layers[1].weight_quantizer.quantizer.quantizer._i.shape == model.layers[1].kernel.shape layer_2_input_shape = [1] + list(model.layers[2].input.shape[1:]) - assert model.layers[2].input_quantizer.quantizer._i.shape == layer_2_input_shape + assert model.layers[2].input_quantizer.quantizer.quantizer._i.shape == layer_2_input_shape def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_input, dense_input): @@ -1391,22 +1429,22 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): for m in model.layers: if isinstance(m, (PQConv2d)): - iw, fw = m.get_weight_quantization_bits() - ib, fb = m.get_bias_quantization_bits() + _, iw, fw = m.get_weight_quantization_bits() + _, ib, fb = m.get_bias_quantization_bits() assert ops.all(iw == 0.0) assert ops.all(ib == 0.0) assert ops.all(fw == 7.0) assert ops.all(fb == 7.0) - elif isinstance(m, (QuantizedTanh)): + elif isinstance(m, PQActivation) and m.activation_name == "tanh": k_input, i_input, f_input = m.get_input_quantization_bits() assert ops.all(i_input == 0.0) assert ops.all(f_input == 7.0) - elif isinstance(m, (QuantizedReLU)): + elif isinstance(m, PQActivation) and m.activation_name == "relu": k_input, i_input, f_input = m.get_input_quantization_bits() assert ops.all(i_input == 0.0) assert ops.all(f_input == 8.0) - elif isinstance(m, (QuantizedPooling)): - i_input, f_input = m.get_input_quantization_bits() + elif isinstance(m, (PQAvgPool2d)): + _, i_input, f_input = m.get_input_quantization_bits() assert ops.all(i_input == 0.0) assert ops.all(f_input == 7.0) @@ -1429,22 +1467,22 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) for m in model.layers: if isinstance(m, (PQConv2d)): - iw, fw = m.get_weight_quantization_bits() - ib, fb = m.get_bias_quantization_bits() + _, iw, fw = m.get_weight_quantization_bits() + _, ib, fb = m.get_bias_quantization_bits() assert ops.all(iw == 1.0) assert ops.all(ib == 2.0) assert ops.all(fw == 3.0) assert ops.all(fb == 4.0) - elif isinstance(m, (QuantizedTanh)): + elif isinstance(m, PQActivation) and m.activation_name == "tanh": k_input, i_input, f_input = m.get_input_quantization_bits() assert ops.all(i_input == 0.0) assert ops.all(f_input == 3.0) - elif isinstance(m, (QuantizedReLU)): + elif isinstance(m, PQActivation) and m.activation_name == "relu": k_input, i_input, f_input = m.get_input_quantization_bits() assert ops.all(i_input == 1.0) assert ops.all(f_input == 3.0) - elif isinstance(m, (QuantizedPooling)): - i_input, f_input = m.get_input_quantization_bits() + elif isinstance(m, (PQAvgPool2d)): + _, i_input, f_input = m.get_input_quantization_bits() assert ops.all(i_input == 1.0) assert ops.all(f_input == 3.0) @@ -1467,13 +1505,13 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): assert m.f_weight == 7.0 assert m.f_bias == 7.0 - elif isinstance(m, (QuantizedTanh)): + elif isinstance(m, PQActivation) and m.activation_name == "tanh": assert m.i_input == 0.0 assert m.f_input == 7.0 - elif isinstance(m, (QuantizedReLU)): + elif isinstance(m, PQActivation) and m.activation_name == "relu": assert m.i_input == 0.0 assert m.f_input == 8.0 - elif isinstance(m, (QuantizedPooling)): + elif isinstance(m, (PQAvgPool2d)): assert m.i_input == 0.0 assert m.f_input == 7.0 @@ -1501,13 +1539,13 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): assert m.f_weight == 3.0 assert m.f_bias == 4.0 - elif isinstance(m, (QuantizedTanh)): + elif isinstance(m, PQActivation) and m.activation_name == "tanh": assert m.i_input == 0.0 assert m.f_input == 3.0 - elif isinstance(m, (QuantizedReLU)): + elif isinstance(m, PQActivation) and m.activation_name == "relu": assert m.i_input == 1.0 assert m.f_input == 3.0 - elif isinstance(m, (QuantizedPooling)): + elif isinstance(m, (PQAvgPool2d)): assert m.i_input == 1.0 assert m.f_input == 3.0 @@ -1582,13 +1620,12 @@ def test_ebops_bn(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE)(inputs) axis = 1 if keras.backend.image_data_format() == "channels_first" else -1 - out = BatchNormalization(axis=axis)(out) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_bn") model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) - if keras.backend.image_data_format == "channels_first": + if keras.backend.image_data_format() == "channels_first": model.layers[2].hgq_loss((1, 32, 30, 30)) # Does not work, TODO: Fix else: model.layers[2].hgq_loss((1, 30, 30, 32)) @@ -1602,3 +1639,52 @@ def test_ebops_activations(config_pdp, dense_input): act2 = Activation("tanh")(act) model = keras.Model(inputs=inputs, outputs=act2, name="test_activations") model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + + +def test_linear_direct(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQDense(config_pdp, OUT_FEATURES, quantize_output=True, use_bias=True) + layer(dense_input) + assert True + + +def test_1dconv_direct(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv1d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_output=True, use_bias=True) + layer(conv1d_input) + assert True + + +def test_2dconv_direct(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQConv2d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_output=True, use_bias=True) + layer(conv2d_input) + assert True + + +def test_batch_normalization(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQBatchNormalization(config_pdp) + layer(conv2d_input) + assert True + + +def test_2dconv_depth(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQDepthwiseConv2d(config_pdp, KERNEL_SIZE) + layer(conv2d_input) + assert True + + +def test_avg_pool2d(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE) + layer(conv2d_input) + assert True + + +def test_avg_pool1d(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + layer = PQAvgPool1d(config_pdp, KERNEL_SIZE) + layer(conv1d_input) + assert True From 7114f7be58eb26227dbbd4d95a2e7387cc4ce4a9 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Fri, 21 Nov 2025 19:29:07 +0100 Subject: [PATCH 18/37] some tests, bug fixes etc. --- src/pquant/core/activations_quantizer.py | 298 ------------------ src/pquant/core/tf_impl/activations.py | 33 +- .../core/tf_impl/compressed_layers_tf.py | 184 ++++++----- src/pquant/core/torch_impl/activations.py | 33 +- .../torch_impl/compressed_layers_torch.py | 130 ++++---- tests/test_keras_compression_layers.py | 183 +++++++++++ tests/test_torch_compression_layers.py | 68 ++-- 7 files changed, 416 insertions(+), 513 deletions(-) delete mode 100644 src/pquant/core/activations_quantizer.py diff --git a/src/pquant/core/activations_quantizer.py b/src/pquant/core/activations_quantizer.py deleted file mode 100644 index f9e87fe..0000000 --- a/src/pquant/core/activations_quantizer.py +++ /dev/null @@ -1,298 +0,0 @@ -import keras -from keras import ops -from keras.ops import convert_to_tensor, maximum, minimum, tanh - -from pquant.core.quantizer_functions import create_quantizer - - -@keras.saving.register_keras_serializable(package="PQuant") -class QuantizedTanh(keras.layers.Layer): - def __init__( - self, config, i_input=0.0, f_input=7.0, i_output=0.0, f_output=7.0, quantize_input=True, quantize_output=False - ): - super().__init__() - if isinstance(config, dict): - from pquant.core.finetuning import TuningConfig - - config = TuningConfig.load_from_config(config) - self.i_input = convert_to_tensor(i_input) - self.f_input = convert_to_tensor(f_input) - self.k = convert_to_tensor(1.0) - - self.i_output = convert_to_tensor(i_output) - self.f_output = convert_to_tensor(f_output) - self.k = convert_to_tensor(1.0) - - self.config = config - - self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.is_pretraining = True - self.round_mode = config.quantization_parameters.round_mode - self.overflow = config.quantization_parameters.overflow - self.hgq_beta = config.quantization_parameters.hgq_beta - self.hgq_gamma = config.quantization_parameters.hgq_gamma - self.use_real_tanh = config.quantization_parameters.use_real_tanh - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous - self.quantize_input = quantize_input - self.quantize_output = quantize_output - - def build(self, input_shape): - super().build(input_shape) - self.input_shape = input_shape - self.output_quantizer = create_quantizer( - k=self.k, - i=self.i_output, - f=self.f_output, - overflow=self.overflow, - round_mode=self.round_mode, - is_data=True, - is_heterogeneous=self.use_hgq, - ) - self.input_quantizer = create_quantizer( - k=self.k, - i=self.i_input, - f=self.f_input, - overflow=self.overflow, - round_mode=self.round_mode, - is_data=True, - is_heterogeneous=self.use_hgq, - ) - if self.use_hgq: - self.input_quantizer.build(input_shape) - self.output_quantizer.build(input_shape) - - def get_input_quantization_bits(self): - if self.use_hgq: - return self.input_quantizer.quantizer.k, self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f - else: - return self.k, self.i_input, self.f_input - - def set_input_quantization_bits(self, i, f): - if self.use_hgq: - self.input_quantizer.quantizer._i.assign(self.input_quantizer.quantizer._i * 0.0 + i) - self.input_quantizer.quantizer._f.assign(self.input_quantizer.quantizer._f * 0.0 + f) - else: - self.i_input = i - self.f_input = f - - def get_output_quantization_bits(self): - if self.use_hgq: - return self.output_quantizer.quantizer.k, self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f - else: - return self.k, self.i_output, self.f_output - - def set_output_quantization_bits(self, i, f): - if self.use_hgq: - self.output_quantizer.quantizer._i.assign(self.output_quantizer.quantizer._i * 0.0 + i) - self.output_quantizer.quantizer._f.assign(self.output_quantizer.quantizer._f * 0.0 + f) - else: - self.i_output = i - self.f_output = f - - def ebops(self): - bw_inp = self.input_quantizer.bits_(self.input_shape) - bw_out = self.output_quantizer.bits_(self.input_shape) - return ops.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore - - def hgq_loss(self): - if self.is_pretraining or not self.use_hgq: - return ops.convert_to_tensor(0.0) - loss = self.beta * self.ebops() - loss += (ops.sum(self.input_quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.f)) * self.hgq_gamma - loss += (ops.sum(self.output_quantizer.quantizer.i) + ops.sum(self.output_quantizer.quantizer.f)) * self.hgq_gamma - return loss - - def post_pre_train_function(self): - self.is_pretraining = False - - def pre_activation(self, x): - if self.quantize_input: - if self.use_hgq: - x = self.input_quantizer(x) - else: - x = self.input_quantizer(x, k=self.k, i=self.i_input, f=self.f_input) - return x - - def post_activation(self, x): - if self.quantize_output: - if self.use_hgq: - return self.output_quantizer(x) - else: - return self.output_quantizer(x, k=self.k, i=self.i_input, f=self.f_output) - return x - - def call(self, x): - x = self.pre_activation(x) - x = tanh(x) if self.use_real_tanh else hard_tanh(x) - x = self.post_activation(x) - self.add_loss(self.hgq_loss()) - return x - - def get_config(self): - config = super().get_config() - config.update({"config": self.config.get_dict(), "i": float(self.i), "f": float(self.f)}) - return config - - -@keras.saving.register_keras_serializable(package="PQuant") -class QuantizedReLU(keras.layers.Layer): - def __init__( - self, config, i_input=0.0, f_input=8.0, i_output=0.0, f_output=8.0, quantize_input=True, quantize_output=False - ): - super().__init__() - if isinstance(config, dict): - from pquant.core.finetuning import TuningConfig - - config = TuningConfig.load_from_config(config) - self.config = config - self.i_input = convert_to_tensor(i_input) - self.f_input = convert_to_tensor(f_input) - self.k = convert_to_tensor(0.0) - - self.i_output = convert_to_tensor(i_output) - self.f_output = convert_to_tensor(f_output) - self.k = convert_to_tensor(0.0) - - self.use_hgq = config.quantization_parameters.use_high_granularity_quantization - self.is_pretraining = True - self.round_mode = config.quantization_parameters.round_mode - self.overflow = config.quantization_parameters.overflow - self.use_multiplier = config.quantization_parameters.use_relu_multiplier - self.hgq_beta = config.quantization_parameters.hgq_beta - self.hgq_gamma = config.quantization_parameters.hgq_gamma - self.hgq_heterogeneous = config.quantization_parameters.hgq_heterogeneous - self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress - - self.post_fitcompress_calibration = False - self.saved_inputs = [] - self.quantize_input = quantize_input - self.quantize_output = quantize_output - - def build(self, input_shape): - super().build(input_shape) - self.input_shape = input_shape - self.output_quantizer = create_quantizer( - k=self.k, - i=self.i_output, - f=self.f_output, - overflow=self.overflow, - round_mode=self.round_mode, - is_data=True, - is_heterogeneous=self.use_hgq, - ) - self.input_quantizer = create_quantizer( - k=self.k, - i=self.i_input, - f=self.f_input, - overflow=self.overflow, - round_mode=self.round_mode, - is_data=True, - is_heterogeneous=self.use_hgq, - ) - if self.use_hgq: - self.input_quantizer.build(input_shape) - self.output_quantizer.build(input_shape) - - if self.use_multiplier: - self.multiplier = self.add_weight(shape=(1,), trainable=True, initializer=keras.initializers.Constant(-1.0)) - - def get_input_quantization_bits(self): - if self.use_hgq: - return self.input_quantizer.quantizer.k, self.input_quantizer.quantizer.i, self.input_quantizer.quantizer.f - else: - return self.k, self.i_input, self.f_input - - def set_input_quantization_bits(self, i, f): - if self.use_hgq: - self.input_quantizer.quantizer._i.assign(self.input_quantizer.quantizer._i * 0.0 + i) - self.input_quantizer.quantizer._f.assign(self.input_quantizer.quantizer._f * 0.0 + f) - else: - self.i_input = i - self.f_input = f - - def get_output_quantization_bits(self): - if self.use_hgq: - return self.output_quantizer.quantizer.k, self.output_quantizer.quantizer.i, self.output_quantizer.quantizer.f - else: - return self.k, self.i_output, self.f_output - - def set_output_quantization_bits(self, i, f): - if self.use_hgq: - self.output_quantizer.quantizer._i.assign(self.output_quantizer.quantizer._i * 0.0 + i) - self.output_quantizer.quantizer._f.assign(self.output_quantizer.quantizer._f * 0.0 + f) - else: - self.i_output = i - self.f_output = f - - def post_pre_train_function(self): - self.is_pretraining = False - - def ebops(self): - bw_inp = self.input_quantizer.bits_(self.input_shape) - bw_out = self.output_quantizer.bits_(self.input_shape) - return ops.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore - - def hgq_loss(self): - if self.is_pretraining or not self.use_hgq: - return ops.convert_to_tensor(0.0) - loss = self.beta * self.ebops() - loss = self.beta * self.ebops() - loss += (ops.sum(self.input_quantizer.quantizer.i) + ops.sum(self.input_quantizer.quantizer.f)) * self.hgq_gamma - loss += (ops.sum(self.output_quantizer.quantizer.i) + ops.sum(self.output_quantizer.quantizer.f)) * self.hgq_gamma - return loss - - def pre_activation(self, x): - if self.quantize_input: - if self.use_hgq: - x = self.input_quantizer(x) - else: - x = self.input_quantizer(x, k=self.k, i=self.i_input, f=self.f_input) - if self.use_multiplier: - x = x * 2 ** (ops.stop_gradient(ops.round(self.multiplier) - self.multiplier) + self.multiplier) - return x - - def post_activation(self, x): - if self.quantize_output: - if self.use_hgq: - return self.output_quantizer(x) - else: - return self.output_quantizer(x, k=self.k, i=self.i_input, f=self.f_output) - return x - - def call(self, x): - if self.use_fitcompress and self.is_pretraining: - if self.post_fitcompress_calibration: - # Save quantized input into ReLU - self.saved_inputs.append(x) - # During FITcompress, we do not use any quantized activations - return ops.relu(x) - # Multiplier after fitcompress if condition, such that we don't use any relu multiplier during FITcompress search - x = self.pre_activation(x) - x = ops.relu(x) - x = self.post_activation(x) - self.add_loss(self.hgq_loss()) - return x - - def get_config(self): - config = super().get_config() - config.update( - { - "config": self.config.get_dict(), - "i": float(self.i), - "f": float(self.f), - } - ) - return config - - -def hard_sigmoid(x): - """Computes hard_sigmoid function that saturates between 0 and 1.""" - x = 0.5 * x + 0.5 - x = maximum(x, 0.0) - x = minimum(x, 1.0) - return x - - -def hard_tanh(x): - """Computes hard_tanh function that saturates between -1 and 1.""" - return 2.0 * hard_sigmoid(x) - 1.0 diff --git a/src/pquant/core/tf_impl/activations.py b/src/pquant/core/tf_impl/activations.py index 5140373..091ea8a 100644 --- a/src/pquant/core/tf_impl/activations.py +++ b/src/pquant/core/tf_impl/activations.py @@ -1,3 +1,6 @@ +from typing import Tuple +from typing import TypeVar as T + import keras from keras.ops import maximum, minimum, relu, tanh @@ -25,10 +28,8 @@ def __init__( self, config, activation="relu", - i_input=0.0, - f_input=8.0, - i_output=0.0, - f_output=7.0, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, quantize_input=True, quantize_output=False, ): @@ -37,13 +38,19 @@ def __init__( from pquant.core.finetuning import TuningConfig config = TuningConfig.load_from_config(config) - self.config = config - self.i_input = i_input - self.f_input = f_input - self.k = 0.0 if activation.lower() == "relu" else 1.0 - - self.i_output = i_output - self.f_output = f_output + if in_quant_bits is None: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits + else: + self.k_input, self.i_input, self.f_input = in_quant_bits + + if out_quant_bits is None: + self.k_output = config.quantization_parameters.default_data_keep_negatives + self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_output = config.quantization_parameters.default_data_fractional_bits + else: + self.k_output, self.i_output, self.f_output = out_quant_bits self.activation_name = activation.lower() self.activation_function = activation_registry.get(self.activation_name) @@ -69,7 +76,7 @@ def build(self, input_shape): super().build(input_shape) self.input_shape = input_shape self.output_quantizer = Quantizer( - k=self.k, + k=self.k_output, i=self.i_output, f=self.f_output, overflow=self.overflow, @@ -79,7 +86,7 @@ def build(self, input_shape): hgq_gamma=self.hgq_gamma, ) self.input_quantizer = Quantizer( - k=self.k, + k=self.k_input, i=self.i_input, f=self.f_input, overflow=self.overflow, diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/tf_impl/compressed_layers_tf.py index 9a9ea92..dfaa68a 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/tf_impl/compressed_layers_tf.py @@ -32,36 +32,36 @@ def __init__( layer_type, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, *args, **kwargs, ): super().__init__(**kwargs) - if input_quantization_bits is not None: - self.k_input, self.i_input, self.f_input = input_quantization_bits + if in_quant_bits is not None: + self.k_input, self.i_input, self.f_input = in_quant_bits else: self.k_input = config.quantization_parameters.default_data_keep_negatives self.i_input = config.quantization_parameters.default_data_integer_bits self.f_input = config.quantization_parameters.default_data_fractional_bits - if weight_quantization_bits is not None: - self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits + if weight_quant_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quant_bits else: self.k_weight = config.quantization_parameters.default_weight_keep_negatives self.i_weight = config.quantization_parameters.default_weight_integer_bits self.f_weight = config.quantization_parameters.default_weight_fractional_bits - if bias_quantization_bits is not None: - self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits + if bias_quant_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quant_bits else: self.k_bias = config.quantization_parameters.default_weight_keep_negatives self.i_bias = config.quantization_parameters.default_weight_integer_bits self.f_bias = config.quantization_parameters.default_weight_fractional_bits - if output_quantization_bits is not None: - self.k_output, self.i_output, self.f_output = output_quantization_bits + if out_quant_bits is not None: + self.k_output, self.i_output, self.f_output = out_quant_bits else: self.k_output = config.quantization_parameters.default_data_keep_negatives self.i_output = config.quantization_parameters.default_data_integer_bits @@ -153,6 +153,11 @@ def build(self, input_shape): def apply_final_compression(self): pass + def post_pre_train_function(self): + self.is_pretraining = False + if self.pruning_layer is not None: + self.pruning_layer.post_pre_train_function() + def save_weights(self): self.init_weight = self.weight.value @@ -195,20 +200,14 @@ def prune(self, weight): def pre_forward(self, x, training=None): if self.quantize_input: - if self.use_hgq and not self.input_quantizer.quantizer.built: - self.input_quantizer.build(x.shape) - if not self.pruning_layer.is_pretraining and not self.use_fitcompress: - x = self.input_quantizer(x) + x = self.quantize_i(x, self.input_quantizer) if self.pruning_method == "wanda": self.collect_input(x, self._kernel, training) return x def post_forward(self, x, training=None): if self.quantize_output: - if self.use_hgq and not self.output_quantizer.quantizer.built: - self.output_quantizer.build(x.shape) - if not self.pruning_layer.is_pretraining and not self.use_fitcompress: - x = self.output_quantizer(x) + x = self.quantize_i(x, self.output_quantizer) if self.pruning_method == "activation_pruning": self.collect_output(x, training) return x @@ -234,7 +233,7 @@ def __init__( data_format=None, dilation_rate=(1, 1), activation=None, - use_bias=False, + use_bias=True, depthwise_initializer="glorot_uniform", bias_initializer="zeros", depthwise_regularizer=None, @@ -247,10 +246,10 @@ def __init__( bias: bool = True, device=None, dtype=None, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -260,7 +259,7 @@ def __init__( depth_multiplier=depth_multiplier, data_format=data_format, dilation_rate=dilation_rate, - activation=activation, + activation=None, use_bias=use_bias, depthwise_initializer=depthwise_initializer, bias_initializer=bias_regularizer, @@ -273,10 +272,10 @@ def __init__( layer_type="conv", quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) self.depthwise_regularizer = depthwise_regularizer @@ -294,10 +293,18 @@ def __init__( def build(self, input_shape): super().build(input_shape) - input_channel = input_shape[-1] + if self.data_format == "channels_last": + input_channel = input_shape[-1] + else: + input_channel = input_shape[1] + + depthwise_shape = self.kernel_size + ( + input_channel, + self.depth_multiplier, + ) self._kernel = self.add_weight( name="kernel", - shape=self.kernel.shape, + shape=depthwise_shape, initializer=self.depthwise_initializer, regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint, @@ -305,7 +312,7 @@ def build(self, input_shape): dtype=self.dtype, ) if self.use_bias: - self.bias = self.add_weight( + self._bias = self.add_weight( name="bias", shape=(self.depth_multiplier * input_channel,), initializer=self.bias_initializer, @@ -351,7 +358,7 @@ def bias(self, bias): def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) + bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self._kernel)) if self.parallelization_factor < 0: ebops = ops.sum( ops.depthwise_conv( @@ -365,7 +372,7 @@ def ebops(self, shape): ) else: reduce_axis_kernel = tuple(range(0, 3)) - if self.do_transpose_data: # Is channels last + if self.data_format == "channels_last": # Is channels last reduce_axis_input = reduce_axis_kernel else: reduce_axis_input = (0,) + tuple(range(2, 4)) @@ -375,7 +382,7 @@ def ebops(self, shape): ebops = ops.sum(bw_inp[:, None] * bw_ker) if self.bias is not None: size = ops.cast(ops.prod(shape), self.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops @@ -430,10 +437,10 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -444,7 +451,7 @@ def __init__( data_format=data_format, dilation_rate=dilation_rate, groups=groups, - activation=activation, + activation=None, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, @@ -457,10 +464,10 @@ def __init__( layer_type="conv", quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) @@ -641,10 +648,10 @@ def __init__( kernel_size, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, strides=1, padding="valid", data_format=None, @@ -670,7 +677,7 @@ def __init__( data_format=data_format, dilation_rate=dilation_rate, groups=groups, - activation=activation, + activation=None, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, @@ -683,10 +690,10 @@ def __init__( layer_type="conv", quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) @@ -788,10 +795,10 @@ def __init__( dtype=None, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, activation=None, use_bias=True, kernel_initializer="glorot_uniform", @@ -807,7 +814,7 @@ def __init__( ): super().__init__( units=units, - activation=activation, + activation=None, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, @@ -822,10 +829,10 @@ def __init__( layer_type="linear", quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) self.weight_transpose = (1, 0) @@ -889,7 +896,10 @@ def ebops(self, shape): def call(self, x, training=None): input_shape = x.shape x = self.pre_forward(x, training) - x = super().call(x) + x = ops.matmul(x, self.kernel) + bias = self.bias + if bias is not None: + x = ops.add(x, bias) x = self.post_forward(x, training) if self.use_hgq and self.enable_quantization: self.add_loss(self.hgq_loss(input_shape)) @@ -989,13 +999,15 @@ def build(self, input_shape): self._shape = tuple(shape) def apply_final_compression(self): + self.final_compression_done = True gamma, beta = self.gamma, self.beta if self.enable_quantization: - gamma = self.weight_quantizer(gamma) - beta = self.bias_quantizer(beta) - self.gamma.assign(gamma) - self.beta.assign(beta) - self.final_compression_done = True + if gamma is not None: + gamma = self.weight_quantizer(gamma) + self.gamma.assign(gamma) + if beta is not None: + beta = self.bias_quantizer(beta) + self.beta.assign(beta) def ebops(self, shape): bw_inp = self.input_quantizer.quantizer.bits_(shape) @@ -1045,6 +1057,7 @@ def call(self, inputs, training=None, mask=None): variance = moving_variance if self.scale: + gamma = self.gamma if self.enable_quantization and not self.final_compression_done: gamma = self.weight_quantizer(self.gamma) gamma = ops.cast(gamma, inputs.dtype) @@ -1052,6 +1065,7 @@ def call(self, inputs, training=None, mask=None): gamma = None if self.center: + beta = self.beta if self.enable_quantization and not self.final_compression_done: beta = self.bias_quantizer(self.beta) beta = ops.cast(beta, inputs.dtype) @@ -1089,22 +1103,22 @@ def __init__( config, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__(**kwargs) - if input_quantization_bits is not None: - self.k_input, self.i_input, self.f_input = input_quantization_bits + if in_quant_bits is not None: + self.k_input, self.i_input, self.f_input = in_quant_bits else: self.k_input = config.quantization_parameters.default_data_keep_negatives self.i_input = config.quantization_parameters.default_data_integer_bits self.f_input = config.quantization_parameters.default_data_fractional_bits - if output_quantization_bits is not None: - self.k_output, self.i_output, self.f_output = output_quantization_bits + if out_quant_bits is not None: + self.k_output, self.i_output, self.f_output = out_quant_bits else: self.k_output = config.quantization_parameters.default_data_keep_negatives self.i_output = config.quantization_parameters.default_data_integer_bits @@ -1216,8 +1230,8 @@ def __init__( pool_size, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, strides=None, padding="valid", data_format=None, @@ -1233,8 +1247,8 @@ def __init__( config=config, quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) @@ -1255,8 +1269,8 @@ def __init__( pool_size, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, strides=None, padding="valid", data_format=None, @@ -1272,8 +1286,8 @@ def __init__( config=config, quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + out_quant_bits=out_quant_bits, ) def call(self, x, training=None): @@ -1803,8 +1817,8 @@ def add_compression_layers_tf(model, config, input_shape=None): padding=layer.padding, data_format=layer.data_format, name=layer.name, - input_quantization_bits=layer.input_quantization_bits, - output_quantization_bits=layer.output_quantization_bits, + in_quant_bits=layer.in_quant_bits, + out_quant_bits=layer.out_quant_bits, ) set_quantization_bits_activations(config, layer, new_layer) new_layer.build(x.shape) diff --git a/src/pquant/core/torch_impl/activations.py b/src/pquant/core/torch_impl/activations.py index 3836578..5b0a44e 100644 --- a/src/pquant/core/torch_impl/activations.py +++ b/src/pquant/core/torch_impl/activations.py @@ -1,9 +1,13 @@ +from typing import Tuple, TypeVar + import torch import torch.nn as nn from torch import maximum, minimum, relu, tanh from pquant.core.torch_impl.quantizer import Quantizer +T = TypeVar("T") + def hard_sigmoid(x): """Computes hard_sigmoid function that saturates between 0 and 1.""" @@ -26,10 +30,8 @@ def __init__( self, config, activation="relu", - i_input=0.0, - f_input=8.0, - i_output=0.0, - f_output=7.0, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, quantize_input=True, quantize_output=False, ): @@ -39,12 +41,19 @@ def __init__( config = TuningConfig.load_from_config(config) self.config = config - self.i_input = i_input - self.f_input = f_input - self.k = 0.0 if activation.lower() == "relu" else 1.0 - - self.i_output = i_output - self.f_output = f_output + if in_quant_bits is None: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits + else: + self.k_input, self.i_input, self.f_input = in_quant_bits + + if out_quant_bits is None: + self.k_output = config.quantization_parameters.default_data_keep_negatives + self.i_output = config.quantization_parameters.default_data_integer_bits + self.f_output = config.quantization_parameters.default_data_fractional_bits + else: + self.k_output, self.i_output, self.f_output = out_quant_bits self.activation_name = activation.lower() self.activation_function = activation_registry.get(self.activation_name) @@ -72,7 +81,7 @@ def check_is_built(self, input_shape): self.built = True self.input_shape = input_shape self.output_quantizer = Quantizer( - k=self.k, + k=self.k_output, i=self.i_output, f=self.f_output, overflow=self.overflow, @@ -82,7 +91,7 @@ def check_is_built(self, input_shape): hgq_gamma=self.hgq_gamma, ) self.input_quantizer = Quantizer( - k=self.k, + k=self.k_input, i=self.i_input, f=self.f_input, overflow=self.overflow, diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch_impl/compressed_layers_torch.py index 3e11922..e35f946 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch_impl/compressed_layers_torch.py @@ -27,37 +27,37 @@ def __init__( quantize_input=True, quantize_output=False, enable_pruning: bool = None, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, *args, **kwargs, ): super().__init__(**kwargs) - if input_quantization_bits is not None: - self.k_input, self.i_input, self.f_input = input_quantization_bits + if in_quant_bits is not None: + self.k_input, self.i_input, self.f_input = in_quant_bits else: self.k_input = config.quantization_parameters.default_data_keep_negatives self.i_input = config.quantization_parameters.default_data_integer_bits self.f_input = config.quantization_parameters.default_data_fractional_bits - if weight_quantization_bits is not None: - self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits + if weight_quant_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quant_bits else: self.k_weight = config.quantization_parameters.default_weight_keep_negatives self.i_weight = config.quantization_parameters.default_weight_integer_bits self.f_weight = config.quantization_parameters.default_weight_fractional_bits - if bias_quantization_bits is not None: - self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits + if bias_quant_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quant_bits else: self.k_bias = config.quantization_parameters.default_weight_keep_negatives self.i_bias = config.quantization_parameters.default_weight_integer_bits self.f_bias = config.quantization_parameters.default_weight_fractional_bits - if output_quantization_bits is not None: - self.k_output, self.i_output, self.f_output = output_quantization_bits + if out_quant_bits is not None: + self.k_output, self.i_output, self.f_output = out_quant_bits else: self.k_output = config.quantization_parameters.default_data_keep_negatives self.i_output = config.quantization_parameters.default_data_integer_bits @@ -218,10 +218,10 @@ def __init__( enable_pruning: bool = None, device=None, dtype=None, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -235,10 +235,10 @@ def __init__( quantize_input=quantize_input, quantize_output=quantize_output, enable_pruning=enable_pruning, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) self.in_features = in_features @@ -325,10 +325,10 @@ def __init__( quantize_input=True, quantize_output=False, enable_pruning: bool = None, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -348,10 +348,10 @@ def __init__( quantize_input=quantize_input, quantize_output=quantize_output, enable_pruning=enable_pruning, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress @@ -450,10 +450,10 @@ def __init__( quantize_input=True, quantize_output=False, enable_pruning: bool = None, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -473,10 +473,10 @@ def __init__( quantize_input=quantize_input, quantize_output=quantize_output, enable_pruning=enable_pruning, - input_quantization_bits=input_quantization_bits, - weight_quantization_bits=weight_quantization_bits, - bias_quantization_bits=bias_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + weight_quant_bits=weight_quant_bits, + bias_quant_bits=bias_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress @@ -571,20 +571,20 @@ def __init__( config, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__(**kwargs) - if input_quantization_bits is not None: - self.k_input, self.i_input, self.f_input = input_quantization_bits + if in_quant_bits is not None: + self.k_input, self.i_input, self.f_input = in_quant_bits else: self.k_input = config.quantization_parameters.default_data_keep_negatives self.i_input = config.quantization_parameters.default_data_integer_bits self.f_input = config.quantization_parameters.default_data_fractional_bits - if output_quantization_bits is not None: - self.k_output, self.i_output, self.f_output = output_quantization_bits + if out_quant_bits is not None: + self.k_output, self.i_output, self.f_output = out_quant_bits else: self.k_output = config.quantization_parameters.default_data_keep_negatives self.i_output = config.quantization_parameters.default_data_integer_bits @@ -686,8 +686,8 @@ def __init__( count_include_pad: bool = True, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -699,8 +699,8 @@ def __init__( config=config, quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) @@ -724,8 +724,8 @@ def __init__( divisor_override: Optional[int] = None, quantize_input=True, quantize_output=False, - input_quantization_bits: Tuple[T, T, T] = None, - output_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + out_quant_bits: Tuple[T, T, T] = None, **kwargs, ): super().__init__( @@ -738,8 +738,8 @@ def __init__( config=config, quantize_input=quantize_input, quantize_output=quantize_output, - input_quantization_bits=input_quantization_bits, - output_quantization_bits=output_quantization_bits, + in_quant_bits=in_quant_bits, + out_quant_bits=out_quant_bits, **kwargs, ) @@ -763,26 +763,26 @@ def __init__( device=None, dtype=None, quantize_input=True, - input_quantization_bits: Tuple[T, T, T] = None, - weight_quantization_bits: Tuple[T, T, T] = None, - bias_quantization_bits: Tuple[T, T, T] = None, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, ): super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) - if input_quantization_bits is not None: - self.k_input, self.i_input, self.f_input = input_quantization_bits + if in_quant_bits is not None: + self.k_input, self.i_input, self.f_input = in_quant_bits else: self.k_input = config.quantization_parameters.default_data_keep_negatives self.i_input = config.quantization_parameters.default_data_integer_bits self.f_input = config.quantization_parameters.default_data_fractional_bits - if weight_quantization_bits is not None: - self.k_weight, self.i_weight, self.f_weight = weight_quantization_bits + if weight_quant_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quant_bits else: self.k_weight = config.quantization_parameters.default_weight_keep_negatives self.i_weight = config.quantization_parameters.default_weight_integer_bits self.f_weight = config.quantization_parameters.default_weight_fractional_bits - if bias_quantization_bits is not None: - self.k_bias, self.i_bias, self.f_bias = bias_quantization_bits + if bias_quant_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quant_bits else: self.k_bias = config.quantization_parameters.default_weight_keep_negatives self.i_bias = config.quantization_parameters.default_weight_integer_bits @@ -1026,10 +1026,8 @@ def add_quantized_activations_to_model_layer(module, config, prefix=""): relu = PQActivation( config, "relu", - i_input=i, - f_input=f, - i_output=i, - f_output=f, + in_quant_bits=(0, i, f), + out_quant_bits=(0, i, f), quantize_input=quantize_input, quantize_output=quantize_output, ) @@ -1040,10 +1038,8 @@ def add_quantized_activations_to_model_layer(module, config, prefix=""): tanh = PQActivation( config, type_of_tanh, - i_input=i, - f_input=f, - i_output=i, - f_output=f, + in_quant_bits=(0, i, f), + out_quant_bits=(0, i, f), quantize_input=quantize_input, quantize_output=quantize_output, ) diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 616e1d0..894a610 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -1,4 +1,5 @@ from types import SimpleNamespace +from unittest.mock import patch import keras import numpy as np @@ -1688,3 +1689,185 @@ def test_avg_pool1d(config_pdp, conv1d_input): layer = PQAvgPool1d(config_pdp, KERNEL_SIZE) layer(conv1d_input) assert True + + +class DummyLayer(keras.layers.Layer): + + def __init__(self, *args, **kwargs): + super().__init__() + self.built = True + self.layer_called = 0 + + def call(self, x, *args, **kwargs): + self.layer_called += 1 + return x + + def extra_repr(self): + return f"Layer called = {self.layer_called} times." + + +def test_avgpool_quant_called(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + layer = PQAvgPool1d(config_pdp, KERNEL_SIZE, quantize_input=True) + layer(conv1d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + layer = PQAvgPool1d(config_pdp, KERNEL_SIZE, quantize_input=False, quantize_output=True) + layer(conv1d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 1 + + config_pdp.quantization_parameters.enable_quantization = False + layer = PQAvgPool1d(config_pdp, KERNEL_SIZE, quantize_input=True, quantize_output=True) + layer(conv1d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + assert True + + +def test_batchnorm_quant_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + axis = -1 if keras.backend.image_data_format() == "channels_last" else 1 + with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + layer = PQBatchNormalization(config_pdp, axis=axis, quantize_input=True) + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + + layer = PQBatchNormalization(config_pdp, axis=axis, quantize_input=False) + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + + config_pdp.quantization_parameters.enable_quantization = False + layer = PQBatchNormalization(config_pdp, axis=axis, quantize_input=True) + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert True + + +def test_pqconv2d_quant_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + layer = PQConv2d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=True, use_bias=True) + layer.post_pre_train_function() + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + layer = PQConv2d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=False, quantize_output=True, use_bias=True) + layer.post_pre_train_function() + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + config_pdp.quantization_parameters.enable_quantization = False + layer.post_pre_train_function() + layer = PQConv2d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=True, quantize_output=True, use_bias=True) + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + assert True + + +def test_pqdepthwiseconv2d_quant_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + + with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + layer = PQDepthwiseConv2d(config_pdp, KERNEL_SIZE, quantize_input=True, use_bias=True) + layer.post_pre_train_function() + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + layer = PQDepthwiseConv2d(config_pdp, KERNEL_SIZE, quantize_input=False, quantize_output=True, use_bias=True) + layer.post_pre_train_function() + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + config_pdp.quantization_parameters.enable_quantization = False + layer.post_pre_train_function() + layer = PQDepthwiseConv2d(config_pdp, KERNEL_SIZE, quantize_input=True, quantize_output=True, use_bias=True) + layer(conv2d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + assert True + + +def test_pqconv1d_quant_called(config_pdp, conv1d_input): + config_pdp.quantization_parameters.enable_quantization = True + with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + layer = PQConv1d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=True, use_bias=True) + layer.post_pre_train_function() + layer(conv1d_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + layer = PQConv1d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=False, quantize_output=True, use_bias=True) + layer.post_pre_train_function() + layer(conv1d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + config_pdp.quantization_parameters.enable_quantization = False + layer.post_pre_train_function() + layer = PQConv1d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=True, quantize_output=True, use_bias=True) + layer(conv1d_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + assert True + + +def test_dense_quant_called(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + layer = PQDense(config_pdp, OUT_FEATURES, quantize_input=True, use_bias=True) + layer.post_pre_train_function() + layer(dense_input) + assert layer.input_quantizer.layer_called == 1 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 0 + + layer = PQDense(config_pdp, OUT_FEATURES, quantize_input=False, quantize_output=True, use_bias=True) + layer.post_pre_train_function() + layer(dense_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 1 + assert layer.bias_quantizer.layer_called == 1 + assert layer.output_quantizer.layer_called == 1 + + config_pdp.quantization_parameters.enable_quantization = False + layer.post_pre_train_function() + layer = PQDense(config_pdp, OUT_FEATURES, quantize_input=True, quantize_output=True, use_bias=True) + layer(dense_input) + assert layer.input_quantizer.layer_called == 0 + assert layer.weight_quantizer.layer_called == 0 + assert layer.bias_quantizer.layer_called == 0 + assert layer.output_quantizer.layer_called == 0 + assert True diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index 3b61f2b..61deab8 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -839,10 +839,10 @@ def test_linear_direct(config_pdp, dense_input): IN_FEATURES, OUT_FEATURES, quantize_output=True, - input_quantization_bits=(1, 2, 5), - weight_quantization_bits=(1, 0, 3), - bias_quantization_bits=(1, 0, 3), - output_quantization_bits=(1, 2, 5), + in_quant_bits=(1, 2, 5), + weight_quant_bits=(1, 0, 3), + bias_quant_bits=(1, 0, 3), + out_quant_bits=(1, 2, 5), ) layer(dense_input) assert layer.get_input_quantization_bits() == (1, 2, 5) @@ -879,10 +879,10 @@ def test_linear_direct_hgq(config_pdp, dense_input): IN_FEATURES, OUT_FEATURES, quantize_output=True, - input_quantization_bits=(1, 2, 5), - weight_quantization_bits=(1, 0, 3), - bias_quantization_bits=(1, 0, 3), - output_quantization_bits=(1, 2, 5), + in_quant_bits=(1, 2, 5), + weight_quant_bits=(1, 0, 3), + bias_quant_bits=(1, 0, 3), + out_quant_bits=(1, 2, 5), ) layer(dense_input) k, i, f = layer.get_input_quantization_bits() @@ -918,10 +918,10 @@ def test_conv2d_direct(config_pdp, conv2d_input): OUT_FEATURES, KERNEL_SIZE, quantize_output=True, - input_quantization_bits=(1, 2, 5), - weight_quantization_bits=(1, 0, 3), - bias_quantization_bits=(1, 0, 3), - output_quantization_bits=(1, 2, 5), + in_quant_bits=(1, 2, 5), + weight_quant_bits=(1, 0, 3), + bias_quant_bits=(1, 0, 3), + out_quant_bits=(1, 2, 5), ) layer(conv2d_input) assert layer.get_input_quantization_bits() == (1, 2, 5) @@ -959,10 +959,10 @@ def test_conv2d_direct_hgq(config_pdp, conv2d_input): OUT_FEATURES, KERNEL_SIZE, quantize_output=True, - input_quantization_bits=(1, 2, 5), - weight_quantization_bits=(1, 0, 3), - bias_quantization_bits=(1, 0, 3), - output_quantization_bits=(1, 2, 5), + in_quant_bits=(1, 2, 5), + weight_quant_bits=(1, 0, 3), + bias_quant_bits=(1, 0, 3), + out_quant_bits=(1, 2, 5), ) layer(conv2d_input) k, i, f = layer.get_input_quantization_bits() @@ -998,10 +998,10 @@ def test_conv1d_direct(config_pdp, conv1d_input): OUT_FEATURES, KERNEL_SIZE, quantize_output=True, - input_quantization_bits=(1, 2, 5), - weight_quantization_bits=(1, 0, 3), - bias_quantization_bits=(1, 0, 3), - output_quantization_bits=(1, 2, 5), + in_quant_bits=(1, 2, 5), + weight_quant_bits=(1, 0, 3), + bias_quant_bits=(1, 0, 3), + out_quant_bits=(1, 2, 5), ) layer(conv1d_input) assert layer.get_input_quantization_bits() == (1, 2, 5) @@ -1039,10 +1039,10 @@ def test_conv1d_direct_hgq(config_pdp, conv1d_input): OUT_FEATURES, KERNEL_SIZE, quantize_output=True, - input_quantization_bits=(1, 2, 5), - weight_quantization_bits=(1, 0, 3), - bias_quantization_bits=(1, 0, 3), - output_quantization_bits=(1, 2, 5), + in_quant_bits=(1, 2, 5), + weight_quant_bits=(1, 0, 3), + bias_quant_bits=(1, 0, 3), + out_quant_bits=(1, 2, 5), ) layer(conv1d_input) k, i, f = layer.get_input_quantization_bits() @@ -1070,9 +1070,7 @@ def test_avgpool_direct(config_pdp, conv1d_input, conv2d_input): layer(conv1d_input) assert layer.get_input_quantization_bits() == (0, 0, 7) assert layer.get_output_quantization_bits() == (0, 0, 7) - layer = PQAvgPool1d( - config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) - ) + layer = PQAvgPool1d(config_pdp, KERNEL_SIZE, quantize_output=True, in_quant_bits=(1, 2, 5), out_quant_bits=(1, 2, 5)) layer(conv1d_input) assert layer.get_input_quantization_bits() == (1, 2, 5) assert layer.get_output_quantization_bits() == (1, 2, 5) @@ -1082,9 +1080,7 @@ def test_avgpool_direct(config_pdp, conv1d_input, conv2d_input): assert layer.get_input_quantization_bits() == (0, 0, 7) assert layer.get_output_quantization_bits() == (0, 0, 7) - layer = PQAvgPool2d( - config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) - ) + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE, quantize_output=True, in_quant_bits=(1, 2, 5), out_quant_bits=(1, 2, 5)) layer(conv2d_input) assert layer.get_input_quantization_bits() == (1, 2, 5) assert layer.get_output_quantization_bits() == (1, 2, 5) @@ -1104,9 +1100,7 @@ def test_avgpool_direct_hgq(config_pdp, conv1d_input, conv2d_input): assert torch.all(i == 0) assert torch.all(f == 7) - layer = PQAvgPool1d( - config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) - ) + layer = PQAvgPool1d(config_pdp, KERNEL_SIZE, quantize_output=True, in_quant_bits=(1, 2, 5), out_quant_bits=(1, 2, 5)) layer(conv1d_input) k, i, f = layer.get_input_quantization_bits() assert torch.all(k == 1) @@ -1130,9 +1124,7 @@ def test_avgpool_direct_hgq(config_pdp, conv1d_input, conv2d_input): assert torch.all(i == 0) assert torch.all(f == 7) - layer = PQAvgPool2d( - config_pdp, KERNEL_SIZE, quantize_output=True, input_quantization_bits=(1, 2, 5), output_quantization_bits=(1, 2, 5) - ) + layer = PQAvgPool2d(config_pdp, KERNEL_SIZE, quantize_output=True, in_quant_bits=(1, 2, 5), out_quant_bits=(1, 2, 5)) layer(conv2d_input) k, i, f = layer.get_input_quantization_bits() assert torch.all(k == 1) @@ -1150,7 +1142,7 @@ def test_batchnorm2d_direct(config_pdp, conv2d_input): layer = PQBatchNorm2d(config_pdp, IN_FEATURES) layer(conv2d_input) assert layer.get_input_quantization_bits() == (0, 0, 7) - layer = PQBatchNorm2d(config_pdp, IN_FEATURES, input_quantization_bits=(1, 2, 5)) + layer = PQBatchNorm2d(config_pdp, IN_FEATURES, in_quant_bits=(1, 2, 5)) layer(conv2d_input) assert layer.get_input_quantization_bits() == (1, 2, 5) @@ -1164,7 +1156,7 @@ def test_batchnorm2d_direct_hgq(config_pdp, conv2d_input): assert torch.all(k == 0) assert torch.all(i == 0) assert torch.all(f == 7) - layer = PQBatchNorm2d(config_pdp, IN_FEATURES, input_quantization_bits=(1, 2, 5)) + layer = PQBatchNorm2d(config_pdp, IN_FEATURES, in_quant_bits=(1, 2, 5)) layer(conv2d_input) k, i, f = layer.get_input_quantization_bits() assert torch.all(k == 1) From 975be74de13d75656f07cbc5f9543aa85621b97d Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 24 Nov 2025 16:06:30 +0100 Subject: [PATCH 19/37] rename compressed layers, import automatically based on keras backend in __init__, bug fixes for keras layer replacement. Training parameter to quantizers to enable WRAP mode --- src/pquant/__init__.py | 71 +++--- src/pquant/core/compressed_layers.py | 106 --------- src/pquant/core/finetuning.py | 5 +- src/pquant/core/keras/__init__.py | 0 .../core/{tf_impl => keras}/activations.py | 2 +- .../layers.py} | 169 +++++++++----- .../core/{tf_impl => keras}/quantizer.py | 4 +- .../{tf_impl/train_tf.py => keras/train.py} | 4 +- src/pquant/core/torch/__init__.py | 0 .../core/{torch_impl => torch}/activations.py | 2 +- .../{torch_impl => torch}/fit_compress.py | 10 +- .../layers.py} | 36 ++- .../core/{p_optim.py => torch/optimizers.py} | 0 .../core/{torch_impl => torch}/quantizer.py | 4 +- .../train_torch.py => torch/train.py} | 4 +- src/pquant/core/train.py | 12 - tests/test_keras_compression_layers.py | 219 ++++++++++-------- tests/test_torch_compression_layers.py | 122 +++++----- 18 files changed, 379 insertions(+), 391 deletions(-) delete mode 100644 src/pquant/core/compressed_layers.py create mode 100644 src/pquant/core/keras/__init__.py rename src/pquant/core/{tf_impl => keras}/activations.py (99%) rename src/pquant/core/{tf_impl/compressed_layers_tf.py => keras/layers.py} (93%) rename src/pquant/core/{tf_impl => keras}/quantizer.py (93%) rename src/pquant/core/{tf_impl/train_tf.py => keras/train.py} (93%) create mode 100644 src/pquant/core/torch/__init__.py rename src/pquant/core/{torch_impl => torch}/activations.py (99%) rename src/pquant/core/{torch_impl => torch}/fit_compress.py (99%) rename src/pquant/core/{torch_impl/compressed_layers_torch.py => torch/layers.py} (98%) rename src/pquant/core/{p_optim.py => torch/optimizers.py} (100%) rename src/pquant/core/{torch_impl => torch}/quantizer.py (91%) rename src/pquant/core/{torch_impl/train_torch.py => torch/train.py} (93%) delete mode 100644 src/pquant/core/train.py diff --git a/src/pquant/__init__.py b/src/pquant/__init__.py index 56f5c95..5007ca4 100644 --- a/src/pquant/__init__.py +++ b/src/pquant/__init__.py @@ -1,26 +1,45 @@ -from . import configs, pruning_methods -from .core.compressed_layers import ( - add_compression_layers, - add_default_layer_quantization_pruning_to_config, - apply_final_compression_model, - get_layer_keep_ratio, - get_model_losses, - post_training_prune, -) -from .core.train import iterative_train -from .core.utils import get_default_config - -__all__ = [ - "iterative_train", - "add_compression_layers", - "apply_final_compression_model", - "get_model_losses", - "get_default_config", - "add_default_layer_quantization_pruning_to_config", - "post_training_prune", - "get_layer_keep_ratio", - "pruning_methods", - "configs", - "pSGD", - "pAdam", -] +import importlib +import os +import sys + +# flake8: noqa +backend = os.getenv("KERAS_BACKEND", "tensorflow") +if backend == "torch": + from . import configs, pruning_methods + from .core.torch import activations, layers, optimizers, quantizer + from .core.torch.layers import add_compression_layers, post_training_prune + from .core.torch.train import train_model + + _forwards = ["activations", "layers", "quantizer", "optimizers"] + + for name in _forwards: + mod = importlib.import_module(f".core.torch.{name}", package="pquant") + sys.modules[f"{__name__}.{name}"] = mod + setattr(sys.modules[__name__], name, mod) + + _forwards.append("train_model") + _forwards.append("add_compression_layers") + _forwards.append("configs") + _forwards.append("pruning_methods") + _forwards.append("post_training_prune") + __all__ = _forwards + +else: + from . import configs, pruning_methods + from .core.keras import activations, layers, quantizer + from .core.keras.layers import add_compression_layers, post_training_prune + from .core.keras.train import train_model + + _forwards = ["activations", "layers", "quantizer"] + + for name in _forwards: + mod = importlib.import_module(f".core.keras.{name}", package="pquant") + sys.modules[f"{__name__}.{name}"] = mod + setattr(sys.modules[__name__], name, mod) + + _forwards.append("train_model") + _forwards.append("add_compression_layers") + _forwards.append("configs") + _forwards.append("pruning_methods") + _forwards.append("post_training_prune") + __all__ = _forwards diff --git a/src/pquant/core/compressed_layers.py b/src/pquant/core/compressed_layers.py deleted file mode 100644 index 4f4064d..0000000 --- a/src/pquant/core/compressed_layers.py +++ /dev/null @@ -1,106 +0,0 @@ -import keras - - -def add_default_layer_quantization_pruning_to_config(model, config): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.compressed_layers_torch import ( - add_default_layer_quantization_pruning_to_config_torch, - ) - - return add_default_layer_quantization_pruning_to_config_torch(model, config) - else: - from pquant.core.tf_impl.compressed_layers_tf import ( - add_default_layer_quantization_pruning_to_config_tf, - ) - - return add_default_layer_quantization_pruning_to_config_tf(model, config) - - -def add_compression_layers(model, config, input_shape): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.compressed_layers_torch import ( - add_compression_layers_torch, - ) - - return add_compression_layers_torch(model, config, input_shape) - else: - from pquant.core.tf_impl.compressed_layers_tf import add_compression_layers_tf - - return add_compression_layers_tf(model, config, input_shape) - - -def get_layer_keep_ratio(model): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.compressed_layers_torch import ( - get_layer_keep_ratio_torch, - ) - - return get_layer_keep_ratio_torch(model) - else: - from pquant.core.tf_impl.compressed_layers_tf import get_layer_keep_ratio_tf - - return get_layer_keep_ratio_tf(model) - - -def get_model_losses(model, losses): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.compressed_layers_torch import ( - get_model_losses_torch, - ) - - return get_model_losses_torch(model, losses) - else: - from pquant.core.tf_impl.compressed_layers_tf import get_model_losses_tf - - return get_model_losses_tf(model, losses) - - -def apply_final_compression_model(model): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.compressed_layers_torch import ( - apply_final_compression_torch, - ) - - return apply_final_compression_torch(model) - else: - from pquant.core.tf_impl.compressed_layers_tf import ( - apply_final_compression_tf, - ) - - return apply_final_compression_tf(model) - - -def post_training_prune(model, calibration_data, config): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.compressed_layers_torch import ( - add_compression_layers_torch, - post_pretrain_functions, - remove_compression_layers, - ) - - t_delta = config.pruning_parameters.t_delta - config.pruning_parameters.t_start_collecting_batch = 0 - for i in range(t_delta): - inputs = calibration_data[i] - if i == 0: - model = add_compression_layers_torch(model, config, inputs.shape) - post_pretrain_functions(model, config) - model(inputs) - return remove_compression_layers(model, config) - else: - from pquant.core.tf_impl.compressed_layers_tf import ( - add_compression_layers_tf, - apply_final_compression_tf, - post_pretrain_functions, - ) - - t_delta = config.pruning_parameters.t_delta - config.pruning_parameters.t_start_collecting_batch = 0 - - for i in range(t_delta): - inputs = calibration_data[i] - if i == 0: - model = add_compression_layers_tf(model, config, inputs.shape) - post_pretrain_functions(model, config) - model(inputs, training=True) # True so pruning works - return apply_final_compression_tf(model, config) diff --git a/src/pquant/core/finetuning.py b/src/pquant/core/finetuning.py index 564db78..f23ece0 100644 --- a/src/pquant/core/finetuning.py +++ b/src/pquant/core/finetuning.py @@ -10,9 +10,8 @@ import yaml from pydantic import BaseModel, Field, field_validator +from pquant import add_compression_layers, train_model from pquant.core import constants -from pquant.core.compressed_layers import add_compression_layers -from pquant.core.train import iterative_train from pquant.data_models.finetuning_model import BaseFinetuningModel from pquant.data_models.fitcompress_model import BaseFitCompressModel from pquant.data_models.pruning_model import ( @@ -267,7 +266,7 @@ def objective(self, trial, model, train_func, valid_func, **kwargs): scheduler_func = self.get_scheduler_function() scheduler = scheduler_func(optimizer, self.config) - trained_model = iterative_train( + trained_model = train_model( compressed_model, self.config, train_func, diff --git a/src/pquant/core/keras/__init__.py b/src/pquant/core/keras/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/pquant/core/tf_impl/activations.py b/src/pquant/core/keras/activations.py similarity index 99% rename from src/pquant/core/tf_impl/activations.py rename to src/pquant/core/keras/activations.py index 091ea8a..5c12121 100644 --- a/src/pquant/core/tf_impl/activations.py +++ b/src/pquant/core/keras/activations.py @@ -4,7 +4,7 @@ import keras from keras.ops import maximum, minimum, relu, tanh -from pquant.core.tf_impl.quantizer import Quantizer +from pquant.core.keras.quantizer import Quantizer def hard_sigmoid(x): diff --git a/src/pquant/core/tf_impl/compressed_layers_tf.py b/src/pquant/core/keras/layers.py similarity index 93% rename from src/pquant/core/tf_impl/compressed_layers_tf.py rename to src/pquant/core/keras/layers.py index dfaa68a..95ee8a0 100644 --- a/src/pquant/core/tf_impl/compressed_layers_tf.py +++ b/src/pquant/core/keras/layers.py @@ -18,8 +18,8 @@ ) from keras.src.ops.operation_utils import compute_pooling_output_shape -from pquant.core.tf_impl.activations import PQActivation -from pquant.core.tf_impl.quantizer import Quantizer +from pquant.core.keras.activations import PQActivation +from pquant.core.keras.quantizer import Quantizer from pquant.core.utils import get_pruning_layer T = TypeVar("T") @@ -168,11 +168,12 @@ def ebops(self, shape): return 0.0 def hgq_loss(self, shape): + shape = (1,) + shape[1:] if self.pruning_layer.is_pretraining or not self.use_hgq: return ops.convert_to_tensor(0.0) loss = self.hgq_beta * self.ebops(shape) loss += self.weight_quantizer.hgq_loss() - if self.bias is not None: + if self._bias is not None: loss += self.bias_quantizer.hgq_loss() if self.quantize_input: loss += self.input_quantizer.hgq_loss() @@ -185,12 +186,6 @@ def handle_transpose(self, x, transpose, do_transpose=False): x = ops.transpose(x, transpose) return x - # Quantize i? - def quantize_i(self, x, quantizer): - if self.enable_quantization: - return quantizer(x) if x is not None else x - return x - def prune(self, weight): if self.enable_pruning: weight = self.handle_transpose(weight, self.weight_transpose, True) @@ -199,15 +194,15 @@ def prune(self, weight): return weight def pre_forward(self, x, training=None): - if self.quantize_input: - x = self.quantize_i(x, self.input_quantizer) + if self.quantize_input and self.enable_quantization: + x = self.input_quantizer(x, training=training) if self.pruning_method == "wanda": self.collect_input(x, self._kernel, training) return x def post_forward(self, x, training=None): - if self.quantize_output: - x = self.quantize_i(x, self.output_quantizer) + if self.quantize_output and self.enable_quantization: + x = self.output_quantizer(x, training=training) if self.pruning_method == "activation_pruning": self.collect_output(x, training) return x @@ -336,9 +331,13 @@ def kernel(self): return self._kernel if self.pruning_first: weight = self.prune(self._kernel) - return self.quantize_i(weight, self.weight_quantizer) + if self.enable_quantization: + weight = self.weight_quantizer(weight) + return weight else: - weight = self.quantize_i(self._kernel, self.weight_quantizer) + weight = self._kernel + if self.enable_quantization: + weight = self.weight_quantizer(weight) return self.prune(weight) @kernel.setter @@ -347,9 +346,11 @@ def kernel(self, kernel): @property def bias(self): - if self.final_compression_done: + if self.final_compression_done or self._bias is None: return self._bias - bias = self.quantize_i(self._bias, self.bias_quantizer) + bias = self._bias + if self.enable_quantization: + bias = self.bias_quantizer(self._bias) return bias @bias.setter @@ -504,16 +505,22 @@ def kernel(self): return self._kernel if self.pruning_first: weight = self.prune(self._kernel) - return self.quantize_i(weight, self.weight_quantizer) + if self.enable_quantization: + weight = self.weight_quantizer(weight) + return weight else: - weight = self.quantize_i(self._kernel, self.weight_quantizer) + weight = self._kernel + if self.enable_quantization: + weight = self.weight_quantizer(weight) return self.prune(weight) @property def bias(self): - if self.final_compression_done: + if self.final_compression_done or self._bias is None: return self._bias - bias = self.quantize_i(self._bias, self.bias_quantizer) + bias = self._bias + if self.enable_quantization: + bias = self.bias_quantizer(self._bias) return bias @bias.setter @@ -545,9 +552,9 @@ def ebops(self, shape): bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) ebops = ops.sum(bw_inp[:, None] * bw_ker) - if self.bias is not None: + if self._bias is not None: size = ops.cast(ops.prod(shape), self.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops @@ -583,7 +590,7 @@ def __init__( pointwise_constraint=None, bias_constraint=None, quantize_input=True, - quantize_output=True, + quantize_output=False, **kwargs, ): super().__init__() @@ -730,16 +737,22 @@ def kernel(self): return self._kernel if self.pruning_first: weight = self.prune(self._kernel) - return self.quantize_i(weight, self.weight_quantizer) + if self.enable_quantization: + weight = self.weight_quantizer(weight) + return weight else: - weight = self.quantize_i(self._kernel, self.weight_quantizer) + weight = self._kernel + if self.enable_quantization: + weight = self.weight_quantizer(weight) return self.prune(weight) @property def bias(self): - if self.final_compression_done: + if self.final_compression_done or self._bias is None: return self._bias - bias = self.quantize_i(self._bias, self.bias_quantizer) + bias = self._bias + if self.enable_quantization: + bias = self.bias_quantizer(self._bias) return bias @bias.setter @@ -770,9 +783,9 @@ def ebops(self, shape): reduce_axis_kernel = tuple(range(0, 1)) bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) ebops = ops.sum(bw_inp[:, None] * bw_ker) - if self.bias is not None: + if self._bias is not None: size = ops.cast(ops.prod(shape), self.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops @@ -866,16 +879,22 @@ def kernel(self): return self._kernel if self.pruning_first: weight = self.prune(self._kernel) - return self.quantize_i(weight, self.weight_quantizer) + if self.enable_quantization: + weight = self.weight_quantizer(weight) + return weight else: - weight = self.quantize_i(self._kernel, self.weight_quantizer) + weight = self._kernel + if self.enable_quantization: + weight = self.weight_quantizer(weight) return self.prune(weight) @property def bias(self): - if self.final_compression_done: + if self.final_compression_done or self._bias is None: return self._bias - bias = self.quantize_i(self._bias, self.bias_quantizer) + bias = self._bias + if self.enable_quantization: + bias = self.bias_quantizer(self._bias) return bias @bias.setter @@ -1018,6 +1037,7 @@ def ebops(self, shape): return ebops def hgq_loss(self, shape): + shape = (1,) + shape[1:] if self.is_pretraining or not self.use_hgq: return ops.convert_to_tensor(0.0) loss = self.hgq_beta * self.ebops(shape) @@ -1043,7 +1063,7 @@ def call(self, inputs, training=None, mask=None): # float32 for the subsequent computations. inputs = ops.cast(inputs, compute_dtype) if self.quantize_input and self.enable_quantization: - inputs = self.input_quantizer(inputs) + inputs = self.input_quantizer(inputs, training=training) moving_mean = ops.cast(self.moving_mean, inputs.dtype) moving_variance = ops.cast(self.moving_variance, inputs.dtype) @@ -1185,12 +1205,12 @@ def pre_pooling(self, x, training): if not hasattr(self, "input_quantizer"): self.build(x.shape) if self.quantize_input and self.enable_quantization: - x = self.input_quantizer(x, training) + x = self.input_quantizer(x, training=training) return x def post_pooling(self, x, training): if self.quantize_output and self.enable_quantization: - x = self.output_quantizer(x, training) + x = self.output_quantizer(x, training=training) return x def ebops(self, shape): @@ -1198,6 +1218,7 @@ def ebops(self, shape): return ops.sum(bw_inp) def hgq_loss(self, shape): + shape = (1,) + shape[1:] if self.is_pretraining or not self.use_hgq: return ops.convert_to_tensor(0.0) loss = self.hgq_beta * self.ebops(shape) @@ -1309,7 +1330,7 @@ def call_post_round_functions(model, rewind, rounds, r): post_round_functions(model) -def apply_final_compression_tf(model): +def apply_final_compression(model): x = model.layers[0].output for layer in model.layers[1:]: if isinstance(layer, (PQWeightBiasBase, PQSeparableConv2d, PQBatchNormalization, PQDepthwiseConv2d)): @@ -1516,7 +1537,7 @@ def pdp_setup(model, config): idx += weight_size -def get_layer_keep_ratio_tf(model): +def get_layer_keep_ratio(model): total_w = 0 remaining_weights = 0 for layer in model.layers: @@ -1537,13 +1558,8 @@ def get_layer_keep_ratio_tf(model): elif isinstance(layer, PQSeparableConv2d): depthwise_weight = ops.cast(layer.depthwise_conv.kernel, layer.depthwise_conv.kernel.dtype) pointwise_weight = ops.cast(layer.pointwise_conv.kernel, layer.pointwise_conv.kernel.dtype) - bias = ( - ops.cast(layer.pointwise_conv.bias, layer.pointwise_conv.bias.dtype) - if layer.pointwise_conv.bias is not None - else None - ) - depthwise_weight = layer.depthwise_conv.quantize_i(depthwise_weight, None) + depthwise_weight = layer.depthwise_conv.kernel transpose = layer.depthwise_conv.weight_transpose if layer.depthwise_conv.enable_pruning: depthwise_weight = layer.depthwise_conv.pruning_layer.get_hard_mask( @@ -1553,7 +1569,7 @@ def get_layer_keep_ratio_tf(model): rem = ops.count_nonzero(depthwise_weight) remaining_weights += rem - pointwise_weight = layer.pointwise_conv.quantize_i(pointwise_weight, bias) + pointwise_weight = layer.pointwise_conv.kernel transpose = layer.pointwise_conv.weight_transpose if layer.pointwise_conv.enable_pruning: pointwise_weight = layer.pointwise_conv.pruning_layer.get_hard_mask( @@ -1579,7 +1595,7 @@ def get_layer_keep_ratio_tf(model): return 0.0 -def get_model_losses_tf(model, losses): +def get_model_losses(model, losses): for layer in model.layers: if isinstance( layer, @@ -1613,15 +1629,25 @@ def check_activation(layer, config): The activation can be a part of another layer such as Conv2D, or an Activation layer """ quantization_enabled = config.quantization_parameters.enable_quantization + quantize_input = config.quantization_parameters.quantize_input + quantize_output = config.quantization_parameters.quantize_output act = None if hasattr(layer.activation, "__name__"): if layer.activation.__name__ == "relu": - act = PQActivation(config, "relu") if quantization_enabled else ReLU() + act = ( + PQActivation(config, "relu", quantize_input=quantize_input, quantize_output=quantize_output) + if quantization_enabled + else ReLU() + ) if quantization_enabled: set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) elif layer.activation.__name__ == "tanh": - act = PQActivation(config, "tanh") if quantization_enabled else Activation(activation="tanh") + act = ( + PQActivation(config, "tanh", quantize_input=quantize_input, quantize_output=quantize_output) + if quantization_enabled + else Activation(activation="tanh") + ) if quantization_enabled: set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) @@ -1630,10 +1656,12 @@ def check_activation(layer, config): return act -def add_compression_layers_tf(model, config, input_shape=None): +def add_compression_layers(model, config, input_shape=None): # Pruning algorithms assume channels_first format # Creates a new functional model from model, replacing certain layers with compressed / quantized variants x = model.layers[0].output + quantize_input = config.quantization_parameters.quantize_input + quantize_output = config.quantization_parameters.quantize_output for layer in model.layers[1:]: act = None if isinstance(layer, DepthwiseConv2D): @@ -1655,6 +1683,8 @@ def add_compression_layers_tf(model, config, input_shape=None): bias_constraint=layer.bias_constraint, bias=layer.bias, dtype=layer.dtype, + quantize_input=quantize_input, + quantize_output=quantize_output, ) set_quantization_bits_weight_layers(config, layer, new_layer) @@ -1686,6 +1716,8 @@ def add_compression_layers_tf(model, config, input_shape=None): activity_regularizer=layer.activity_regularizer, kernel_constraint=layer.kernel_constraint, bias_constraint=layer.bias_constraint, + quantize_input=quantize_input, + quantize_output=quantize_output, ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) @@ -1720,6 +1752,8 @@ def add_compression_layers_tf(model, config, input_shape=None): layer.depthwise_constraint, layer.pointwise_constraint, layer.bias_constraint, + quantize_input=quantize_input, + quantize_output=quantize_output, ) set_quantization_bits_weight_layers(config, layer, new_layer) @@ -1753,6 +1787,8 @@ def add_compression_layers_tf(model, config, input_shape=None): groups=layer.groups, activation=None, use_bias=layer.use_bias, + quantize_input=quantize_input, + quantize_output=quantize_output, ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) @@ -1780,6 +1816,8 @@ def add_compression_layers_tf(model, config, input_shape=None): activity_regularizer=layer.activity_regularizer, kernel_constraint=layer.kernel_constraint, bias_constraint=layer.bias_constraint, + quantize_input=quantize_input, + quantize_output=quantize_output, ) set_quantization_bits_weight_layers(config, layer, new_layer) enable_pruning = get_enable_pruning(layer, config) @@ -1796,7 +1834,7 @@ def add_compression_layers_tf(model, config, input_shape=None): # Activation layers elif isinstance(layer, ReLU): if config.quantization_parameters.enable_quantization: - new_layer = PQActivation(config, "relu") + new_layer = PQActivation(config, "relu", quantize_input=quantize_input, quantize_output=quantize_output) set_quantization_bits_activations(config, layer, new_layer) new_layer.build(layer.input.shape) x = new_layer(x) @@ -1957,7 +1995,7 @@ def set_quantization_bits_weight_layers(config, layer, new_layer): pw_f_bits_b = layer_config["pointwise"]["bias"]["fractional_bits"] if "output" in layer_config: if "quantize" in layer_config["output"]: - new_layer.quantize_input = layer_config["output"]["quantize"] + new_layer.quantize_output = layer_config["output"]["quantize"] if "integer_bits" in layer_config["output"]: i_output = layer_config["output"]["integer_bits"] if "fractional_bits" in layer_config["output"]: @@ -2005,7 +2043,7 @@ def set_quantization_bits_weight_layers(config, layer, new_layer): def get_enable_pruning(layer, config): enable_pruning = config.pruning_parameters.enable_pruning - if isinstance(layer, SeparableConv2D): + if isinstance(layer, (SeparableConv2D, PQSeparableConv2d)): enable_pruning_depthwise = enable_pruning_pointwise = True if layer.name + "_depthwise" in config.pruning_parameters.disable_pruning_for_layers: enable_pruning_depthwise = False @@ -2018,13 +2056,13 @@ def get_enable_pruning(layer, config): return enable_pruning -def add_default_layer_quantization_pruning_to_config_tf(model, config): +def populate_config_with_all_layers(model, config): """Create a default config, where all the layers are added to the disable_pruning list, and have their own default quantization bits in layer_specific. By default input/output quantization is disabled. """ custom_scheme = {"layer_specific": {}, "disable_pruning_for_layers": []} for layer in model.layers: - if layer.__class__ in [Dense, Conv2D, Conv1D, DepthwiseConv2D]: + if isinstance(layer, (Dense, Conv2D, Conv1D, DepthwiseConv2D, PQWeightBiasBase, PQDepthwiseConv2d)): if layer.use_bias: custom_scheme["layer_specific"][layer.name] = { "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, @@ -2045,7 +2083,7 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } custom_scheme["disable_pruning_for_layers"].append(layer.name) - if layer.__class__ == SeparableConv2D: + if isinstance(layer, (SeparableConv2D, PQSeparableConv2d)): if layer.use_bias: custom_scheme["layer_specific"][layer.name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, @@ -2077,12 +2115,14 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): } custom_scheme["disable_pruning_for_layers"].append(layer.name + "_depthwise") custom_scheme["disable_pruning_for_layers"].append(layer.name + "_pointwise") - elif layer.__class__ in [Activation, ReLU, AveragePooling1D, AveragePooling2D, AveragePooling3D]: + elif isinstance( + layer, (Activation, ReLU, AveragePooling1D, AveragePooling2D, AveragePooling3D, PQActivation, PQAvgPoolBase) + ): custom_scheme.layer_specific[layer.name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "output": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, } - elif layer.__class__ == BatchNormalization: + elif isinstance(layer, (BatchNormalization, PQBatchNormalization)): custom_scheme["layer_specific"][layer.name] = { "input": {"quantize": True, "integer_bits": 0.0, "fractional_bits": 7.0}, "weight": {"integer_bits": 0.0, "fractional_bits": 7.0}, @@ -2091,3 +2131,16 @@ def add_default_layer_quantization_pruning_to_config_tf(model, config): config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] return config + + +def post_training_prune(model, config, calibration_data): + t_delta = config.pruning_parameters.t_delta + config.pruning_parameters.t_start_collecting_batch = 0 + + for i in range(t_delta): + inputs = calibration_data[i] + if i == 0: + model = add_compression_layers(model, config, inputs.shape) + post_pretrain_functions(model, config) + model(inputs, training=True) # True so pruning works + return apply_final_compression(model, config) diff --git a/src/pquant/core/tf_impl/quantizer.py b/src/pquant/core/keras/quantizer.py similarity index 93% rename from src/pquant/core/tf_impl/quantizer.py rename to src/pquant/core/keras/quantizer.py index d79f6ee..0c46fff 100644 --- a/src/pquant/core/tf_impl/quantizer.py +++ b/src/pquant/core/keras/quantizer.py @@ -45,9 +45,9 @@ def call(self, x, training=None): if not self.built: self.build(x.shape) if self.use_hgq: - x = self.quantizer(x) + x = self.quantizer(x, training=training) else: - x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + x = self.quantizer(x, k=self.k, i=self.i, f=self.f, training=training) return x def hgq_loss(self): diff --git a/src/pquant/core/tf_impl/train_tf.py b/src/pquant/core/keras/train.py similarity index 93% rename from src/pquant/core/tf_impl/train_tf.py rename to src/pquant/core/keras/train.py index 8b80127..59de0dc 100644 --- a/src/pquant/core/tf_impl/train_tf.py +++ b/src/pquant/core/keras/train.py @@ -1,6 +1,6 @@ import keras -from pquant.core.tf_impl.compressed_layers_tf import ( +from pquant.core.keras.layers import ( call_post_round_functions, post_epoch_functions, post_pretrain_functions, @@ -10,7 +10,7 @@ ) -def iterative_train_tf(model, config, train_func, valid_func, **kwargs): +def train_model(model, config, train_func, valid_func, **kwargs): """ Generic training loop, user provides training and validation functions """ diff --git a/src/pquant/core/torch/__init__.py b/src/pquant/core/torch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/pquant/core/torch_impl/activations.py b/src/pquant/core/torch/activations.py similarity index 99% rename from src/pquant/core/torch_impl/activations.py rename to src/pquant/core/torch/activations.py index 5b0a44e..67b9337 100644 --- a/src/pquant/core/torch_impl/activations.py +++ b/src/pquant/core/torch/activations.py @@ -4,7 +4,7 @@ import torch.nn as nn from torch import maximum, minimum, relu, tanh -from pquant.core.torch_impl.quantizer import Quantizer +from pquant.core.torch.quantizer import Quantizer T = TypeVar("T") diff --git a/src/pquant/core/torch_impl/fit_compress.py b/src/pquant/core/torch/fit_compress.py similarity index 99% rename from src/pquant/core/torch_impl/fit_compress.py rename to src/pquant/core/torch/fit_compress.py index 53f1f40..fcc646b 100644 --- a/src/pquant/core/torch_impl/fit_compress.py +++ b/src/pquant/core/torch/fit_compress.py @@ -10,7 +10,7 @@ from quantizers import get_fixed_quantizer if typing.TYPE_CHECKING: - from pquant.core.torch_impl.compressed_layers_torch import ( + from pquant.core.torch.layers import ( CompressedLayerBase, CompressedLayerConv2d, CompressedLayerLinear, @@ -39,7 +39,7 @@ def call_fitcompress(config, trained_uncompressed_model, train_loader, loss_func layer-wise quantization bits for weights and activations. """ - from pquant.core.torch_impl.compressed_layers_torch import ( + from pquant.core.torch.layers import ( add_layer_specific_quantization_to_model, ) @@ -98,7 +98,7 @@ def add_quantization_settings_to_config( """ - from pquant.core.torch_impl.compressed_layers_torch import ( + from pquant.core.torch.layers import ( CompressedLayerConv2d, CompressedLayerLinear, QuantizedPooling, @@ -141,7 +141,7 @@ def print_info_bits(model): model - current model """ - from pquant.core.torch_impl.compressed_layers_torch import ( + from pquant.core.torch.layers import ( CompressedLayerConv1d, CompressedLayerConv2d, CompressedLayerLinear, @@ -742,7 +742,7 @@ def post_fitcompress_calibration(self, best_node_quant_config, calibration_epoch pool_int_bits: Integer bits for the (single) pooling layer (res20). pool_frac_bits: Fractional bits for the (single) pooling layer (res20). """ - from pquant.core.torch_impl.compressed_layers_torch import ( + from pquant.core.torch.layers import ( QuantizedPooling, QuantizedReLU, ) diff --git a/src/pquant/core/torch_impl/compressed_layers_torch.py b/src/pquant/core/torch/layers.py similarity index 98% rename from src/pquant/core/torch_impl/compressed_layers_torch.py rename to src/pquant/core/torch/layers.py index e35f946..3f74ca3 100644 --- a/src/pquant/core/torch_impl/compressed_layers_torch.py +++ b/src/pquant/core/torch/layers.py @@ -7,12 +7,12 @@ from torch.fx import symbolic_trace from torch.nn.common_types import _size_1_t, _size_2_t -from pquant.core.torch_impl.activations import PQActivation -from pquant.core.torch_impl.quantizer import Quantizer +from pquant.core.torch.activations import PQActivation +from pquant.core.torch.quantizer import Quantizer from pquant.core.utils import get_pruning_layer if typing.TYPE_CHECKING: - from pquant.core.torch_impl.fit_compress import call_fitcompress # noqa: 401 + from pquant.core.torch.fit_compress import call_fitcompress # noqa: 401 from keras import ops @@ -133,7 +133,7 @@ def check_is_built(self, input_shape): self.n_parallel = ops.prod(tuple(input_shape)[1:-1]) self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel self.built = True - self.input_shape = input_shape + self.input_shape = (1,) + input_shape[1:] def get_weight_quantization_bits(self): return self.weight_quantizer.get_quantization_bits() @@ -556,7 +556,7 @@ def extra_repr(self): return s.format(**self.__dict__) -def add_compression_layers_torch(model, config, input_shape, device="cuda"): +def add_compression_layers(model, config, input_shape, device="cuda"): model = add_quantized_activations_to_model_layer(model, config) model = add_pruning_to_model(model, config) model.to(device) @@ -624,7 +624,7 @@ def build(self, input_shape): is_data=True, hgq_gamma=self.hgq_gamma, ) - self.input_shape = input_shape + self.input_shape = (1,) + input_shape[1:] def get_input_quantization_bits(self): return self.input_quantizer.get_quantization_bits() @@ -841,7 +841,7 @@ def check_is_built(self, input_shape): shape = [1] * len(input_shape) shape[1] = input_shape[1] self._shape = tuple(shape) - self.input_shape = input_shape + self.input_shape = (1,) + input_shape[1:] def apply_final_compression(self): self.final_compression_done = True @@ -1199,7 +1199,7 @@ def add_pruning_to_model(module, config, prefix=""): return module -def apply_final_compression_torch(module): +def apply_final_compression(module): for layer in module.modules(): if isinstance(layer, (PQWeightBiasBase, PQBatchNorm2d)): layer.apply_final_compression() @@ -1254,7 +1254,7 @@ def pre_finetune_functions(model): def post_pretrain_functions(model, config, train_loader=None, loss_func=None): if config.fitcompress_parameters.enable_fitcompress: - from pquant.core.torch_impl.fit_compress import call_fitcompress # noqa: 811 + from pquant.core.torch.fit_compress import call_fitcompress # noqa: 811 config, pruning_mask_importance_scores = call_fitcompress(config, model, train_loader, loss_func) @@ -1305,7 +1305,7 @@ def pdp_setup(model, config): @torch.no_grad -def get_layer_keep_ratio_torch(model): +def get_layer_keep_ratio(model): total_w = 0 remaining_weights = 0 for layer in model.modules(): @@ -1322,7 +1322,7 @@ def get_layer_keep_ratio_torch(model): return 0.0 -def get_model_losses_torch(model, losses): +def get_model_losses(model, losses): for layer in model.modules(): loss = 0.0 @@ -1370,7 +1370,7 @@ def create_default_layer_quantization_pruning_config(model): return config -def add_default_layer_quantization_pruning_to_config_torch(model, config): +def populate_config_with_all_layers(model, config): custom_scheme = create_default_layer_quantization_pruning_config(model) config.quantization_parameters.layer_specific = custom_scheme["layer_specific"] config.pruning_parameters.disable_pruning_for_layers = custom_scheme["disable_pruning_for_layers"] @@ -1412,3 +1412,15 @@ def remove_compression_layers(module, config): else: remove_compression_layers(layer, config) return module + + +def post_training_prune(model, config, calibration_data): + t_delta = config.pruning_parameters.t_delta + config.pruning_parameters.t_start_collecting_batch = 0 + for i in range(t_delta): + inputs = calibration_data[i] + if i == 0: + model = add_compression_layers(model, config, inputs.shape) + post_pretrain_functions(model, config) + model(inputs) + return remove_compression_layers(model, config) diff --git a/src/pquant/core/p_optim.py b/src/pquant/core/torch/optimizers.py similarity index 100% rename from src/pquant/core/p_optim.py rename to src/pquant/core/torch/optimizers.py diff --git a/src/pquant/core/torch_impl/quantizer.py b/src/pquant/core/torch/quantizer.py similarity index 91% rename from src/pquant/core/torch_impl/quantizer.py rename to src/pquant/core/torch/quantizer.py index cb7e78e..38224c6 100644 --- a/src/pquant/core/torch_impl/quantizer.py +++ b/src/pquant/core/torch/quantizer.py @@ -35,9 +35,9 @@ def post_pre_train_function(self): def forward(self, x): if self.use_hgq: - x = self.quantizer(x) + x = self.quantizer(x, training=self.training) else: - x = self.quantizer(x, k=self.k, i=self.i, f=self.f) + x = self.quantizer(x, k=self.k, i=self.i, f=self.f, training=self.training) return x def hgq_loss(self): diff --git a/src/pquant/core/torch_impl/train_torch.py b/src/pquant/core/torch/train.py similarity index 93% rename from src/pquant/core/torch_impl/train_torch.py rename to src/pquant/core/torch/train.py index 9ec1d64..71aae0c 100644 --- a/src/pquant/core/torch_impl/train_torch.py +++ b/src/pquant/core/torch/train.py @@ -1,6 +1,6 @@ import torch -from pquant.core.torch_impl.compressed_layers_torch import ( +from pquant.core.torch.layers import ( call_post_round_functions, post_epoch_functions, post_pretrain_functions, @@ -10,7 +10,7 @@ ) -def iterative_train_torch(model, config, train_func, valid_func, **kwargs): +def train_model(model, config, train_func, valid_func, **kwargs): """ Generic training loop, user provides training and validation functions """ diff --git a/src/pquant/core/train.py b/src/pquant/core/train.py deleted file mode 100644 index 776a1d7..0000000 --- a/src/pquant/core/train.py +++ /dev/null @@ -1,12 +0,0 @@ -import keras - - -def iterative_train(model, config, train_func, valid_func, **kwargs): - if keras.backend.backend() == "torch": - from pquant.core.torch_impl.train_torch import iterative_train_torch - - return iterative_train_torch(model, config, train_func, valid_func, **kwargs) - else: - from pquant.core.tf_impl.train_tf import iterative_train_tf - - return iterative_train_tf(model, config, train_func, valid_func, **kwargs) diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 894a610..d8575a4 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -17,8 +17,8 @@ SeparableConv2D, ) -from pquant.core.tf_impl.activations import PQActivation -from pquant.core.tf_impl.compressed_layers_tf import ( +from pquant.activations import PQActivation +from pquant.layers import ( PQAvgPool1d, PQAvgPool2d, PQBatchNormalization, @@ -27,9 +27,9 @@ PQDense, PQDepthwiseConv2d, PQSeparableConv2d, - add_compression_layers_tf, - apply_final_compression_tf, - get_layer_keep_ratio_tf, + add_compression_layers, + apply_final_compression, + get_layer_keep_ratio, post_pretrain_functions, pre_finetune_functions, ) @@ -304,7 +304,7 @@ def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) @@ -321,7 +321,7 @@ def test_separable_conv2d_add_remove_layers(config_pdp, conv2d_input): output1 = model(conv2d_input) - model = apply_final_compression_tf(model) + model = apply_final_compression(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) @@ -339,7 +339,7 @@ def test_separable_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = SeparableConv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -353,9 +353,9 @@ def test_separable_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct_pw = ops.reshape(keras.random.shuffle(mask_50pct_pw), model.layers[1].pointwise_conv.pruning_layer.mask.shape) model.layers[1].pointwise_conv.pruning_layer.mask = mask_50pct_pw - ratio1 = get_layer_keep_ratio_tf(model) - model = apply_final_compression_tf(model) - ratio2 = get_layer_keep_ratio_tf(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct_dw) / ops.size(mask_50pct_dw), ratio1) @@ -371,7 +371,7 @@ def test_separable_conv2d_trigger_post_pretraining(config_pdp, conv2d_input): act2 = ReLU()(out2) model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) assert model.layers[1].depthwise_conv.pruning_layer.is_pretraining is True assert model.layers[1].pointwise_conv.pruning_layer.is_pretraining is True assert model.layers[2].is_pretraining is True @@ -403,7 +403,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): inputs = keras.Input(shape=(dense_input.shape[1:])) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) model(dense_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -412,7 +412,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(dense_input) - model = apply_final_compression_tf(model) + model = apply_final_compression(model) output2 = model(dense_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -425,7 +425,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -434,7 +434,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv2d_input) - model = apply_final_compression_tf(model) + model = apply_final_compression(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -447,7 +447,7 @@ def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -456,7 +456,7 @@ def test_depthwise_conv2d_add_remove_layers(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv2d_input) - model = apply_final_compression_tf(model) + model = apply_final_compression(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -469,7 +469,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): inputs = keras.Input(shape=conv1d_input.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") - model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) model(conv1d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -478,7 +478,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct output1 = model(conv1d_input) - model = apply_final_compression_tf(model) + model = apply_final_compression(model) output2 = model(conv1d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -491,7 +491,7 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): inputs = keras.Input(shape=(dense_input.shape[1:])) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) model(dense_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -499,9 +499,9 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=OUT_FEATURES * IN_FEATURES) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_tf(model) - model = apply_final_compression_tf(model) - ratio2 = get_layer_keep_ratio_tf(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -511,7 +511,7 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -519,9 +519,9 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=OUT_FEATURES * IN_FEATURES * KERNEL_SIZE * KERNEL_SIZE) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_tf(model) - model = apply_final_compression_tf(model) - ratio2 = get_layer_keep_ratio_tf(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -531,7 +531,7 @@ def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): inputs = keras.Input(shape=conv2d_input.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -539,9 +539,9 @@ def test_depthwise_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].kernel)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_tf(model) - model = apply_final_compression_tf(model) - ratio2 = get_layer_keep_ratio_tf(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -552,7 +552,7 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): inputs = keras.Input(shape=conv1d_input.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False)(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") - model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) model(conv1d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -560,9 +560,9 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.layers[1].kernel)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.layers[1].pruning_layer.mask.shape) model.layers[1].pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_tf(model) - model = apply_final_compression_tf(model) - ratio2 = get_layer_keep_ratio_tf(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -572,7 +572,7 @@ def test_check_activation(config_pdp, dense_input): inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="relu")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.layers[2], ReLU) @@ -580,7 +580,7 @@ def test_check_activation(config_pdp, dense_input): inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="relu")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.layers[2], PQActivation) assert model.layers[2].activation_name == "relu" @@ -589,7 +589,7 @@ def test_check_activation(config_pdp, dense_input): inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="tanh")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.layers[2], Activation) assert model.layers[2].activation.__name__ == "tanh" @@ -598,7 +598,7 @@ def test_check_activation(config_pdp, dense_input): inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="tanh")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.layers[2], PQActivation) assert model.layers[2].activation_name == "tanh" @@ -611,7 +611,7 @@ def test_hgq_activation_built(config_pdp, conv2d_input): act = ReLU()(out) avg = AveragePooling2D(2)(act) model = keras.Model(inputs=inputs, outputs=avg, name="test_conv2d_hgq") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) is_built = [] for layer in model.layers: @@ -627,7 +627,7 @@ def test_hgq_activation_built(config_pdp, conv2d_input): out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=True)(inputs) act = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d_hgq") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) is_built = [] for layer in model.layers: @@ -655,7 +655,7 @@ def test_ap_conv2d_channels_last_transpose(config_ap, conv2d_input): model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") model_cf(conv2d_input) - model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) + model_cf = add_compression_layers(model_cf, config_ap, inp.shape) weight_cf = model_cf.layers[1].kernel post_pretrain_functions(model_cf, config_ap) @@ -669,7 +669,7 @@ def test_ap_conv2d_channels_last_transpose(config_ap, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") - model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) + model_cl = add_compression_layers(model_cl, config_ap, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) @@ -699,7 +699,7 @@ def test_ap_conv1d_channels_last_transpose(config_ap, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same", data_format="channels_first")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") - model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) + model_cf = add_compression_layers(model_cf, config_ap, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_ap) @@ -713,7 +713,7 @@ def test_ap_conv1d_channels_last_transpose(config_ap, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") - model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) + model_cl = add_compression_layers(model_cl, config_ap, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) @@ -740,7 +740,7 @@ def test_ap_depthwiseconv2d_channels_last_transpose(config_ap, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") - model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) + model_cf = add_compression_layers(model_cf, config_ap, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_ap) @@ -754,7 +754,7 @@ def test_ap_depthwiseconv2d_channels_last_transpose(config_ap, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") - model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) + model_cl = add_compression_layers(model_cl, config_ap, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) @@ -781,7 +781,7 @@ def test_ap_dense_channels_last_transpose(config_ap, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model_cf = add_compression_layers_tf(model_cf, config_ap, inp.shape) + model_cf = add_compression_layers(model_cf, config_ap, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_ap) @@ -794,7 +794,7 @@ def test_ap_dense_channels_last_transpose(config_ap, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") - model_cl = add_compression_layers_tf(model_cl, config_ap, inp.shape) + model_cl = add_compression_layers(model_cl, config_ap, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_ap) @@ -824,7 +824,7 @@ def test_wanda_conv2d_channels_last_transpose(config_wanda, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) + model_cf = add_compression_layers(model_cf, config_wanda, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) @@ -838,7 +838,7 @@ def test_wanda_conv2d_channels_last_transpose(config_wanda, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") - model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) + model_cl = add_compression_layers(model_cl, config_wanda, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) @@ -865,7 +865,7 @@ def test_wanda_conv1d_channels_last_transpose(config_wanda, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") - model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) + model_cf = add_compression_layers(model_cf, config_wanda, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) @@ -879,7 +879,7 @@ def test_wanda_conv1d_channels_last_transpose(config_wanda, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") - model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) + model_cl = add_compression_layers(model_cl, config_wanda, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) @@ -906,7 +906,7 @@ def test_wanda_depthwiseconv2d_channels_last_transpose(config_wanda, conv2d_inpu inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") - model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) + model_cf = add_compression_layers(model_cf, config_wanda, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) @@ -920,7 +920,7 @@ def test_wanda_depthwiseconv2d_channels_last_transpose(config_wanda, conv2d_inpu inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") - model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) + model_cl = add_compression_layers(model_cl, config_wanda, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) @@ -947,7 +947,7 @@ def test_wanda_dense_channels_last_transpose(config_wanda, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model_cf = add_compression_layers_tf(model_cf, config_wanda, inp.shape) + model_cf = add_compression_layers(model_cf, config_wanda, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_wanda) @@ -960,7 +960,7 @@ def test_wanda_dense_channels_last_transpose(config_wanda, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") - model_cl = add_compression_layers_tf(model_cl, config_wanda, inp.shape) + model_cl = add_compression_layers(model_cl, config_wanda, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_wanda) @@ -990,7 +990,7 @@ def test_pdp_conv2d_channels_last_transpose(config_pdp, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) + model_cf = add_compression_layers(model_cf, config_pdp, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) @@ -1004,7 +1004,7 @@ def test_pdp_conv2d_channels_last_transpose(config_pdp, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") - model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) + model_cl = add_compression_layers(model_cl, config_pdp, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) @@ -1031,7 +1031,7 @@ def test_pdp_conv1d_channels_last_transpose(config_pdp, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") - model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) + model_cf = add_compression_layers(model_cf, config_pdp, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) @@ -1045,7 +1045,7 @@ def test_pdp_conv1d_channels_last_transpose(config_pdp, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") - model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) + model_cl = add_compression_layers(model_cl, config_pdp, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) @@ -1072,7 +1072,7 @@ def test_pdp_depthwiseconv2d_channels_last_transpose(config_pdp, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") - model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) + model_cf = add_compression_layers(model_cf, config_pdp, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) @@ -1086,7 +1086,7 @@ def test_pdp_depthwiseconv2d_channels_last_transpose(config_pdp, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") - model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) + model_cl = add_compression_layers(model_cl, config_pdp, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) @@ -1112,7 +1112,7 @@ def test_pdp_dense_channels_last_transpose(config_pdp, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model_cf = add_compression_layers_tf(model_cf, config_pdp, inp.shape) + model_cf = add_compression_layers(model_cf, config_pdp, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_pdp) @@ -1125,7 +1125,7 @@ def test_pdp_dense_channels_last_transpose(config_pdp, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") - model_cl = add_compression_layers_tf(model_cl, config_pdp, inp.shape) + model_cl = add_compression_layers(model_cl, config_pdp, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_pdp) model_cl(inp, training=True) @@ -1154,7 +1154,7 @@ def test_cs_conv2d_channels_last_transpose(config_cs, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv2d") - model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) + model_cf = add_compression_layers(model_cf, config_cs, inp.shape) weight_cf = model_cf.layers[1]._kernel s = model_cf.layers[1].pruning_layer.s.value new_s = np.zeros_like(s) + 0.1 @@ -1171,7 +1171,7 @@ def test_cs_conv2d_channels_last_transpose(config_cs, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv2D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv2d1") - model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) + model_cl = add_compression_layers(model_cl, config_cs, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) model_cl.layers[1].pruning_layer.s.assign(new_s) @@ -1198,7 +1198,7 @@ def test_cs_conv1d_channels_last_transpose(config_cs, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_conv1d") - model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) + model_cf = add_compression_layers(model_cf, config_cs, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_cs) @@ -1212,7 +1212,7 @@ def test_cs_conv1d_channels_last_transpose(config_cs, conv1d_input): inputs = keras.Input(shape=inp.shape[1:]) out = Conv1D(OUT_FEATURES, KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_conv1d1") - model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) + model_cl = add_compression_layers(model_cl, config_cs, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_cs) @@ -1239,7 +1239,7 @@ def test_cs_depthwiseconv2d_channels_last_transpose(config_cs, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") - model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) + model_cf = add_compression_layers(model_cf, config_cs, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_cs) @@ -1253,7 +1253,7 @@ def test_cs_depthwiseconv2d_channels_last_transpose(config_cs, conv2d_input): inputs = keras.Input(shape=inp.shape[1:]) out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d1") - model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) + model_cl = add_compression_layers(model_cl, config_cs, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_cs) @@ -1280,7 +1280,7 @@ def test_cs_dense_channels_last_transpose(config_cs, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dense") - model_cf = add_compression_layers_tf(model_cf, config_cs, inp.shape) + model_cf = add_compression_layers(model_cf, config_cs, inp.shape) weight_cf = model_cf.layers[1]._kernel post_pretrain_functions(model_cf, config_cs) @@ -1293,7 +1293,7 @@ def test_cs_dense_channels_last_transpose(config_cs, dense_input): inputs = keras.Input(shape=inp.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False)(inputs) model_cl = keras.Model(inputs=inputs, outputs=out, name="test_dense1") - model_cl = add_compression_layers_tf(model_cl, config_cs, inp.shape) + model_cl = add_compression_layers(model_cl, config_cs, inp.shape) model_cl.layers[1]._kernel.assign(weight_cf) post_pretrain_functions(model_cl, config_cs) @@ -1328,7 +1328,7 @@ def test_calculate_pruning_budget(config_wanda, dense_input): weight = ops.reshape(ops.convert_to_tensor(weight), (IN_FEATURES, OUT_FEATURES)) weight2 = ops.reshape(ops.linspace(0.01, 0.99, OUT_FEATURES * OUT_FEATURES), (OUT_FEATURES, OUT_FEATURES)) - model = add_compression_layers_tf(model, config_wanda, dense_input.shape) + model = add_compression_layers(model, config_wanda, dense_input.shape) model.layers[1]._kernel.assign(weight) model.layers[2]._kernel.assign(weight2) # Triggers calculation of pruning budget for PDP and Wanda @@ -1353,7 +1353,7 @@ def test_trigger_post_pretraining(config_pdp, conv2d_input): act2 = ReLU()(out2) model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) assert model.layers[1].pruning_layer.is_pretraining is True assert model.layers[2].is_pretraining is True @@ -1378,7 +1378,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): act2 = ReLU()(out2) model = keras.Model(inputs=inputs, outputs=act2, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert model.layers[1].weight_quantizer.quantizer.quantizer._i.shape == model.layers[1].kernel.shape layer_2_input_shape = [1] + list(model.layers[2].input.shape[1:]) assert model.layers[2].input_quantizer.quantizer.quantizer._i.shape == layer_2_input_shape @@ -1393,7 +1393,7 @@ def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_inp model = keras.Model(inputs=inputs, outputs=out) orig_output = model(dense_input) - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) output = model(dense_input) assert ops.all(ops.equal(orig_output, output)) @@ -1403,7 +1403,7 @@ def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_inp model = keras.Model(inputs=inputs, outputs=out) orig_output = model(conv2d_input) - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) output = model(conv2d_input) assert ops.all(ops.equal(orig_output, output)) # Case Conv1D @@ -1412,7 +1412,7 @@ def test_replace_weight_with_original_value(config_pdp, conv2d_input, conv1d_inp model = keras.Model(inputs=inputs, outputs=out) orig_output = model(conv1d_input) - model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) output = model(conv1d_input) assert ops.all(ops.equal(orig_output, output)) @@ -1426,7 +1426,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): out = AveragePooling2D(2)(out) out = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=out) - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.layers: if isinstance(m, (PQConv2d)): @@ -1465,7 +1465,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): out = AveragePooling2D(2)(out) out = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=out) - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.layers: if isinstance(m, (PQConv2d)): _, iw, fw = m.get_weight_quantization_bits() @@ -1497,7 +1497,7 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): out = AveragePooling2D(2)(out) out = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=out) - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.layers: if isinstance(m, (PQConv2d)): @@ -1532,7 +1532,7 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): out = AveragePooling2D(2)(out) out = Activation("tanh")(out) model = keras.Model(inputs=inputs, outputs=out) - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.layers: if isinstance(m, (PQConv2d)): assert m.i_weight == 1.0 @@ -1558,7 +1558,7 @@ def test_ebops_dense(config_pdp, dense_input): out = Dense(OUT_FEATURES, use_bias=False)(inputs) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) model.layers[1].hgq_loss(dense_input.shape) @@ -1566,7 +1566,7 @@ def test_ebops_dense(config_pdp, dense_input): out = Dense(OUT_FEATURES, use_bias=True)(inputs) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) model.layers[1].hgq_loss(dense_input.shape) @@ -1578,7 +1578,7 @@ def test_ebops_conv2d(config_pdp, conv2d_input): out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=False)(inputs) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) model.layers[1].hgq_loss(conv2d_input.shape) @@ -1588,7 +1588,7 @@ def test_ebops_conv2d(config_pdp, conv2d_input): out = Conv2D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) model.layers[1].hgq_loss(conv2d_input.shape) @@ -1600,7 +1600,7 @@ def test_ebops_conv1d(config_pdp, conv1d_input): out = Conv1D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=False)(inputs) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) post_pretrain_functions(model, config_pdp) model.layers[1].hgq_loss(conv1d_input.shape) @@ -1610,7 +1610,7 @@ def test_ebops_conv1d(config_pdp, conv1d_input): out = Conv1D(OUT_FEATURES, kernel_size=KERNEL_SIZE, use_bias=True)(inputs) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_dense") - model = add_compression_layers_tf(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) post_pretrain_functions(model, config_pdp) model.layers[1].hgq_loss(conv1d_input.shape) @@ -1624,7 +1624,7 @@ def test_ebops_bn(config_pdp, conv2d_input): out = BatchNormalization(axis=axis)(out) act = ReLU()(out) model = keras.Model(inputs=inputs, outputs=act, name="test_bn") - model = add_compression_layers_tf(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) if keras.backend.image_data_format() == "channels_first": model.layers[2].hgq_loss((1, 32, 30, 30)) # Does not work, TODO: Fix @@ -1639,7 +1639,7 @@ def test_ebops_activations(config_pdp, dense_input): act = ReLU()(inputs) act2 = Activation("tanh")(act) model = keras.Model(inputs=inputs, outputs=act2, name="test_activations") - model = add_compression_layers_tf(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) def test_linear_direct(config_pdp, dense_input): @@ -1708,7 +1708,7 @@ def extra_repr(self): def test_avgpool_quant_called(config_pdp, conv1d_input): config_pdp.quantization_parameters.enable_quantization = True - with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + with patch('pquant.layers.Quantizer', DummyLayer): layer = PQAvgPool1d(config_pdp, KERNEL_SIZE, quantize_input=True) layer(conv1d_input) assert layer.input_quantizer.layer_called == 1 @@ -1730,7 +1730,7 @@ def test_avgpool_quant_called(config_pdp, conv1d_input): def test_batchnorm_quant_called(config_pdp, conv2d_input): config_pdp.quantization_parameters.enable_quantization = True axis = -1 if keras.backend.image_data_format() == "channels_last" else 1 - with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + with patch('pquant.layers.Quantizer', DummyLayer): layer = PQBatchNormalization(config_pdp, axis=axis, quantize_input=True) layer(conv2d_input) assert layer.input_quantizer.layer_called == 1 @@ -1754,7 +1754,7 @@ def test_batchnorm_quant_called(config_pdp, conv2d_input): def test_pqconv2d_quant_called(config_pdp, conv2d_input): config_pdp.quantization_parameters.enable_quantization = True - with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + with patch('pquant.layers.Quantizer', DummyLayer): layer = PQConv2d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=True, use_bias=True) layer.post_pre_train_function() layer(conv2d_input) @@ -1785,7 +1785,7 @@ def test_pqconv2d_quant_called(config_pdp, conv2d_input): def test_pqdepthwiseconv2d_quant_called(config_pdp, conv2d_input): config_pdp.quantization_parameters.enable_quantization = True - with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + with patch('pquant.layers.Quantizer', DummyLayer): layer = PQDepthwiseConv2d(config_pdp, KERNEL_SIZE, quantize_input=True, use_bias=True) layer.post_pre_train_function() layer(conv2d_input) @@ -1815,7 +1815,7 @@ def test_pqdepthwiseconv2d_quant_called(config_pdp, conv2d_input): def test_pqconv1d_quant_called(config_pdp, conv1d_input): config_pdp.quantization_parameters.enable_quantization = True - with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + with patch('pquant.layers.Quantizer', DummyLayer): layer = PQConv1d(config_pdp, OUT_FEATURES, KERNEL_SIZE, quantize_input=True, use_bias=True) layer.post_pre_train_function() layer(conv1d_input) @@ -1845,7 +1845,7 @@ def test_pqconv1d_quant_called(config_pdp, conv1d_input): def test_dense_quant_called(config_pdp, dense_input): config_pdp.quantization_parameters.enable_quantization = True - with patch('pquant.core.tf_impl.compressed_layers_tf.Quantizer', DummyLayer): + with patch('pquant.layers.Quantizer', DummyLayer): layer = PQDense(config_pdp, OUT_FEATURES, quantize_input=True, use_bias=True) layer.post_pre_train_function() layer(dense_input) @@ -1871,3 +1871,26 @@ def test_dense_quant_called(config_pdp, dense_input): assert layer.bias_quantizer.layer_called == 0 assert layer.output_quantizer.layer_called == 0 assert True + + +def test_layer_replacement_quant_called(config_pdp, conv2d_input): + config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.quantize_input = True + config_pdp.quantization_parameters.quantize_output = True + config_pdp.quantization_parameters.use_high_granularity_quantization = True + with patch('pquant.layers.Quantizer', DummyLayer): + inp = keras.Input(shape=conv2d_input.shape[1:]) + x = Conv2D(OUT_FEATURES, KERNEL_SIZE)(inp) + + x = ReLU()(x) + x = keras.layers.Flatten()(x) + out = Dense(4)(x) + + model = keras.Model(inputs=inp, outputs=out) + model.summary() + + model = add_compression_layers(model, config_pdp, conv2d_input.shape) + model(conv2d_input, training=True) + assert model.layers[-1].output_quantizer.layer_called == 1 + model(conv2d_input, training=False) + assert model.layers[-1].output_quantizer.layer_called == 2 diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index 61deab8..c3eaf57 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -17,8 +17,8 @@ ) from pquant import post_training_prune -from pquant.core.torch_impl.activations import PQActivation -from pquant.core.torch_impl.compressed_layers_torch import ( +from pquant.activations import PQActivation +from pquant.layers import ( PQAvgPool1d, PQAvgPool2d, PQBatchNorm2d, @@ -26,10 +26,10 @@ PQConv2d, PQDense, PQWeightBiasBase, - add_compression_layers_torch, - apply_final_compression_torch, - get_layer_keep_ratio_torch, - get_model_losses_torch, + add_compression_layers, + apply_final_compression, + get_layer_keep_ratio, + get_model_losses, post_pretrain_functions, pre_finetune_functions, ) @@ -311,7 +311,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): orig_weight = layer.weight.data model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) assert torch.all(orig_weight == model.submodule._weight.data) @@ -319,7 +319,7 @@ def test_dense_add_remove_layers(config_pdp, dense_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct output1 = model(dense_input) - model = apply_final_compression_torch(model) + model = apply_final_compression(model) output2 = model(dense_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -332,7 +332,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) orig_weight = layer.weight.data model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -341,7 +341,7 @@ def test_conv2d_add_remove_layers(config_pdp, conv2d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct output1 = model(conv2d_input) - model = apply_final_compression_torch(model) + model = apply_final_compression(model) output2 = model(conv2d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -353,7 +353,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): config_pdp.pruning_parameters.enable_pruning = True layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) model(conv1d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -362,7 +362,7 @@ def test_conv1d_add_remove_layers(config_pdp, conv1d_input): mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct output1 = model(conv1d_input) - model = apply_final_compression_torch(model) + model = apply_final_compression(model) output2 = model(conv1d_input) assert ops.all(ops.equal(output1, output2)) expected_nonzero_count = ops.count_nonzero(mask_50pct) @@ -374,7 +374,7 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): config_pdp.pruning_parameters.enable_pruning = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) model(dense_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -382,9 +382,9 @@ def test_dense_get_layer_keep_ratio(config_pdp, dense_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=OUT_FEATURES * IN_FEATURES) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_torch(model) - model = apply_final_compression_torch(model) - ratio2 = get_layer_keep_ratio_torch(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -393,7 +393,7 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): config_pdp.pruning_parameters.enable_pruning = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) model(conv2d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -401,9 +401,9 @@ def test_conv2d_get_layer_keep_ratio(config_pdp, conv2d_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=OUT_FEATURES * IN_FEATURES * KERNEL_SIZE * KERNEL_SIZE) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_torch(model) - model = apply_final_compression_torch(model) - ratio2 = get_layer_keep_ratio_torch(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -412,7 +412,7 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): config_pdp.pruning_parameters.enable_pruning = True layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) model(conv1d_input) post_pretrain_functions(model, config_pdp) pre_finetune_functions(model) @@ -420,9 +420,9 @@ def test_conv1d_get_layer_keep_ratio(config_pdp, conv1d_input): mask_50pct = ops.cast(ops.linspace(0, 1, num=ops.size(model.submodule.weight)) < 0.5, "float32") mask_50pct = ops.reshape(keras.random.shuffle(mask_50pct), model.submodule.pruning_layer.mask.shape) model.submodule.pruning_layer.mask = mask_50pct - ratio1 = get_layer_keep_ratio_torch(model) - model = apply_final_compression_torch(model) - ratio2 = get_layer_keep_ratio_torch(model) + ratio1 = get_layer_keep_ratio(model) + model = apply_final_compression(model) + ratio2 = get_layer_keep_ratio(model) assert ops.equal(ratio1, ratio2) assert ops.equal(ops.count_nonzero(mask_50pct) / ops.size(mask_50pct), ratio1) @@ -431,26 +431,26 @@ def test_check_activation(config_pdp, dense_input): # ReLU layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.activation, ReLU) config_pdp.quantization_parameters.enable_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.activation, PQActivation) # Tanh config_pdp.quantization_parameters.enable_quantization = False layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.activation, Tanh) config_pdp.quantization_parameters.enable_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "tanh") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert isinstance(model.activation, PQActivation) @@ -489,14 +489,14 @@ def test_hgq_activation_built(config_pdp, conv2d_input): config_pdp.quantization_parameters.quantize_output = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModelWithAvgPool(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) is_built = check_keras_layer_is_built(model, []) torch.save(model.state_dict(), "test_model.pt") assert all(is_built) layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModelWithAvgPool(layer, "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) is_built = check_keras_layer_is_built(model, []) assert all(is_built) @@ -507,8 +507,8 @@ def test_post_training_wanda(config_wanda, conv2d_input): layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModel(layer, "relu") calibration_dataset = [conv2d_input, conv2d_input] - model = post_training_prune(model, calibration_dataset, config_wanda) - assert get_layer_keep_ratio_torch(model) == 1 - config_wanda.pruning_parameters.sparsity + model = post_training_prune(model, config_wanda, calibration_dataset) + assert get_layer_keep_ratio(model) == 1 - config_wanda.pruning_parameters.sparsity class TestModel2(nn.Module): @@ -557,7 +557,7 @@ def test_calculate_pruning_budget(config_wanda, dense_input): weight = ops.convert_to_tensor(weight) weight2 = ops.linspace(0.01, 0.99, OUT_FEATURES * OUT_FEATURES) - model = add_compression_layers_torch(model, config_wanda, dense_input.shape) + model = add_compression_layers(model, config_wanda, dense_input.shape) model.submodule._weight.data = ops.reshape(weight, model.submodule.weight.shape) model.submodule2._weight.data = ops.reshape(weight2, model.submodule2.weight.shape) @@ -580,7 +580,7 @@ def test_trigger_post_pretraining(config_pdp, dense_input): layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) assert model.submodule.pruning_layer.is_pretraining is True assert model.activation.is_pretraining is True @@ -602,7 +602,7 @@ def test_hgq_weight_shape(config_pdp, dense_input): layer2 = Linear(OUT_FEATURES, OUT_FEATURES, bias=False) model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) assert model.submodule.weight_quantizer.quantizer.quantizer._i.shape == model.submodule.weight.shape @@ -616,7 +616,7 @@ def test_qbn_build(config_pdp, conv2d_input): layer2 = BatchNorm2d(OUT_FEATURES) model = TestModel2(layer, layer2, None, "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) assert model.submodule.weight_quantizer.quantizer.quantizer._i.shape == model.submodule.weight.shape @@ -627,7 +627,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) layer2 = AvgPool2d(2) model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.modules(): if isinstance(m, (PQWeightBiasBase)): @@ -668,7 +668,7 @@ def test_set_activation_custom_bits_hgq(config_pdp, conv2d_input): } model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.modules(): if isinstance(m, (PQWeightBiasBase)): @@ -705,14 +705,14 @@ def test_disable_pruning_from_single_layer(config_pdp, conv2d_input): layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) layer2 = Conv2d(OUT_FEATURES, OUT_FEATURES, KERNEL_SIZE) model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) assert model.submodule.enable_pruning assert model.submodule2.enable_pruning config_pdp.pruning_parameters.disable_pruning_for_layers = ["submodule2"] model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) assert model.submodule.enable_pruning assert not model.submodule2.enable_pruning @@ -724,7 +724,7 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) layer2 = AvgPool2d(2) model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.modules(): if isinstance(m, (PQWeightBiasBase)): @@ -748,7 +748,7 @@ def test_set_activation_custom_bits_quantizer(config_pdp, conv2d_input): } model = TestModel2(layer, layer2, "relu", "tanh") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) for m in model.modules(): if isinstance(m, (PQWeightBiasBase)): @@ -770,13 +770,13 @@ def test_ebops_dense(config_pdp, dense_input): config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) model.submodule.hgq_loss() layer = Linear(IN_FEATURES, OUT_FEATURES, bias=True) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) model.submodule.hgq_loss() @@ -786,13 +786,13 @@ def test_ebops_conv2d(config_pdp, conv2d_input): config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) model.submodule.hgq_loss() layer = Conv2d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) model.submodule.hgq_loss() @@ -802,13 +802,13 @@ def test_ebops_conv1d(config_pdp, conv1d_input): config_pdp.quantization_parameters.use_high_granularity_quantization = True layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=False) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) post_pretrain_functions(model, config_pdp) model.submodule.hgq_loss() layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModel(layer, "relu") - model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) post_pretrain_functions(model, config_pdp) model.submodule.hgq_loss() @@ -820,7 +820,7 @@ def test_ebops_bn(config_pdp, conv2d_input): layer2 = BatchNorm2d(OUT_FEATURES) model = TestModel2(layer, layer2, None, "relu") shape = [1] + list(conv2d_input.shape[1:]) - model = add_compression_layers_torch(model, config_pdp, shape) + model = add_compression_layers(model, config_pdp, shape) post_pretrain_functions(model, config_pdp) model.submodule2.hgq_loss() @@ -1767,7 +1767,7 @@ def test_hgq_loss_calc_no_qoutput(config_pdp, conv2d_input): # Bias in weight layers, don't quantize output model = ModelWithAllLayers() - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) expected_loss = 0.0 for m in model.modules(): @@ -1790,7 +1790,7 @@ def test_hgq_loss_calc_no_qoutput(config_pdp, conv2d_input): m.bias_quantizer.hgq_loss = dummy_hgq_loss expected_loss += 3.0 - losses = get_model_losses_torch(model, torch.tensor(0.0)) + losses = get_model_losses(model, torch.tensor(0.0)) assert losses == expected_loss @@ -1801,7 +1801,7 @@ def test_hgq_loss_calc_no_bias_no_qoutput(config_pdp, conv2d_input): # No bias in weight layers, don't quantize output model = ModelWithAllLayers(use_bias=False) - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) expected_loss = 0.0 @@ -1825,7 +1825,7 @@ def test_hgq_loss_calc_no_bias_no_qoutput(config_pdp, conv2d_input): m.bias_quantizer.hgq_loss = dummy_hgq_loss expected_loss += 3.0 - losses = get_model_losses_torch(model, torch.tensor(0.0)) + losses = get_model_losses(model, torch.tensor(0.0)) assert losses == expected_loss @@ -1837,7 +1837,7 @@ def test_hgq_loss_calc_qoutput(config_pdp, conv2d_input): # Bias in weight layers, quantize output config_pdp.quantization_parameters.quantize_output = True model = ModelWithAllLayers() - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) expected_loss = 0.0 for m in model.modules(): @@ -1860,7 +1860,7 @@ def test_hgq_loss_calc_qoutput(config_pdp, conv2d_input): m.bias_quantizer.hgq_loss = dummy_hgq_loss expected_loss += 3.0 - losses = get_model_losses_torch(model, torch.tensor(0.0)) + losses = get_model_losses(model, torch.tensor(0.0)) assert losses == expected_loss @@ -1873,7 +1873,7 @@ def test_hgq_loss_calc_no_qinput(config_pdp, conv2d_input): config_pdp.quantization_parameters.quantize_input = False # Bias in weight layers, don't quantize input model = ModelWithAllLayers() - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) expected_loss = 0.0 for m in model.modules(): @@ -1896,7 +1896,7 @@ def test_hgq_loss_calc_no_qinput(config_pdp, conv2d_input): m.bias_quantizer.hgq_loss = dummy_hgq_loss expected_loss += 2.0 - losses = get_model_losses_torch(model, torch.tensor(0.0)) + losses = get_model_losses(model, torch.tensor(0.0)) assert losses == expected_loss @@ -1908,8 +1908,8 @@ def test_conv1d_parameter_quantizers_not_called_when_final_compression_done(conf config_pdp.quantization_parameters.quantize_output = True layer = Conv1d(IN_FEATURES, OUT_FEATURES, KERNEL_SIZE, bias=True) model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, conv1d_input.shape) - model = apply_final_compression_torch(model) + model = add_compression_layers(model, config_pdp, conv1d_input.shape) + model = apply_final_compression(model) model.submodule.input_quantizer = DummyLayer() model.submodule.weight_quantizer = DummyLayer() model.submodule.bias_quantizer = DummyLayer() @@ -1927,8 +1927,8 @@ def test_batchnorm2d_parameter_quantizers_not_called_when_final_compression_done config_pdp.quantization_parameters.quantize_output = True layer = BatchNorm2d(IN_FEATURES) model = TestModel(layer) - model = add_compression_layers_torch(model, config_pdp, conv2d_input.shape) - model = apply_final_compression_torch(model) + model = add_compression_layers(model, config_pdp, conv2d_input.shape) + model = apply_final_compression(model) model.submodule.input_quantizer = DummyLayer() model.submodule.weight_quantizer = DummyLayer() model.submodule.bias_quantizer = DummyLayer() From d445140f42a774ed78c6934dd71096c32d3e1f1a Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Sat, 29 Nov 2025 20:42:58 +0100 Subject: [PATCH 20/37] example notebook for jet tagger. Fix input quantization batch size for EBOPs calculation to 1. Enable EBOPs as a metric for non-HGQ quantizers, allow multiplying kernel bitwidths with pruning mask --- examples/example_jet_tagging.ipynb | 423 ++++++++++++++++++ src/pquant/__init__.py | 63 ++- src/pquant/core/finetuning.py | 57 +++ src/pquant/core/keras/activations.py | 10 +- src/pquant/core/keras/layers.py | 154 ++++--- src/pquant/core/keras/quantizer.py | 7 + src/pquant/core/torch/activations.py | 10 +- src/pquant/core/torch/layers.py | 72 +-- src/pquant/core/torch/quantizer.py | 7 + .../pruning_methods/activation_pruning.py | 2 +- src/pquant/pruning_methods/autosparse.py | 2 +- src/pquant/pruning_methods/cs.py | 13 +- src/pquant/pruning_methods/dst.py | 2 +- src/pquant/pruning_methods/mdmm.py | 7 +- src/pquant/pruning_methods/pdp.py | 4 +- tests/test_keras_compression_layers.py | 27 +- tests/test_torch_compression_layers.py | 16 + 17 files changed, 749 insertions(+), 127 deletions(-) create mode 100644 examples/example_jet_tagging.ipynb diff --git a/examples/example_jet_tagging.ipynb b/examples/example_jet_tagging.ipynb new file mode 100644 index 0000000..4d6396f --- /dev/null +++ b/examples/example_jet_tagging.ipynb @@ -0,0 +1,423 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "2ac0177a-0354-437b-b13a-947144dba15e", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install scikit-learn\n", + "!pip install pandas\n", + "!pip install da4ml\n", + "# For da4ml, also required: !conda install conda-forge::verilator -y" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94033d11-b0c5-4ca6-9664-cd6dd0193c14", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import random\n", + "import numpy as np\n", + "import torch.nn.functional as F\n", + "import numpy as np\n", + "from torch.utils.data import TensorDataset, DataLoader\n", + "\n", + "os.environ[\"KERAS_BACKEND\"] = \"torch\"\n", + "import keras\n", + "keras.backend.set_image_data_format(\"channels_first\")\n", + "from pquant.layers import PQDense\n", + "from pquant.activations import PQActivation\n", + "from pquant import get_ebops\n", + "from da4ml.trace.ops import quantize, relu\n", + "from da4ml.trace import comb_trace, FixedVariableArrayInput, FixedVariableArray\n", + "from da4ml.codegen import VerilogModel\n", + "import random\n", + "\n", + "def set_seed(seed):\n", + " random.seed(seed)\n", + " np.random.seed(seed)\n", + " torch.manual_seed(seed)\n", + " torch.cuda.manual_seed(seed)\n", + " torch.cuda.manual_seed_all(seed)\n", + "set_seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1473a61e-955d-430a-9737-7fd2fc7ba59b", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "import pickle as pkl\n", + "from pathlib import Path\n", + "\n", + "from sklearn.datasets import fetch_openml\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.preprocessing import StandardScaler\n", + "\n", + "\n", + "def get_data(data_path: Path, seed=42):\n", + " try:\n", + " import zstd\n", + " except ImportError:\n", + " zstd = None\n", + " if not os.path.exists(data_path):\n", + " print('Downloading data...')\n", + " data = fetch_openml('hls4ml_lhc_jets_hlf')\n", + " buf = pkl.dumps(data)\n", + " with open(data_path, 'wb') as f:\n", + " if zstd is not None:\n", + " buf = zstd.compress(buf)\n", + " f.write(buf)\n", + " else:\n", + " os.makedirs(data_path.parent, exist_ok=True)\n", + " with open(data_path, 'rb') as f:\n", + " buf = f.read()\n", + " if zstd is not None:\n", + " buf = zstd.decompress(buf)\n", + " data = pkl.loads(buf)\n", + "\n", + " X, y = data['data'], data['target']\n", + " codecs = {'g': 0, 'q': 1, 't': 4, 'w': 2, 'z': 3}\n", + " y = np.array([codecs[i] for i in y])\n", + "\n", + " X_train_val, X_test, y_train_val, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)\n", + "\n", + " X_train_val, X_test, y_train_val, y_test = X_train_val.astype(np.float32), X_test.astype(np.float32), y_train_val, y_test\n", + "\n", + " scaler = StandardScaler()\n", + " X_train_val = scaler.fit_transform(X_train_val)\n", + " X_test = scaler.transform(X_test)\n", + "\n", + " X_train_val = X_train_val.astype(np.float32)\n", + " y_train_val = y_train_val.astype(np.float32)\n", + "\n", + " return X_train_val, X_test, y_train_val, y_test\n", + "\n", + "\n", + "X_train, X_test, y_train, y_test = get_data(Path('/tmp/inp_data.zst'))\n", + "np.random.seed(42)\n", + "random.seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2659f88-9f0c-45f4-a09d-8ef346915187", + "metadata": {}, + "outputs": [], + "source": [ + "import torch.nn.functional as F\n", + "import torch\n", + "from torch.utils.data import TensorDataset, DataLoader\n", + "\n", + "X_train_t = torch.from_numpy(X_train).float()\n", + "X_test_t = torch.from_numpy(X_test).float()\n", + "\n", + "y_train_idx = torch.from_numpy(y_train).long()\n", + "y_test_idx = torch.from_numpy(y_test).long()\n", + "\n", + "y_train_oh = F.one_hot(y_train_idx, num_classes=5).float()\n", + "y_test_oh = F.one_hot(y_test_idx, num_classes=5).float()\n", + "\n", + "train_ds = TensorDataset(X_train_t, y_train_oh)\n", + "test_ds = TensorDataset(X_test_t, y_test_oh)\n", + "\n", + "\n", + "train_loader = DataLoader(train_ds, batch_size=33200, shuffle=True, num_workers=4)\n", + "test_loader = DataLoader(test_ds, batch_size=33200, shuffle=False, num_workers=4)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea03f950-15d5-47df-86bd-921956e98d83", + "metadata": {}, + "outputs": [], + "source": [ + "from pquant import cs_config, dst_config\n", + "\n", + "def build_model(config):\n", + " class Model(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.dense1 = PQDense(config, 16, 64, \n", + " in_quant_bits = (1, 3, 3))\n", + " self.relu = PQActivation(config, \"relu\")\n", + " self.dense2 = PQDense(config, 64, 32)\n", + " self.dense3 = PQDense(config, 32, 32)\n", + " self.dense4 = PQDense(config, 32, 5, \n", + " quantize_output=True, \n", + " out_quant_bits=(1, 3, 3))\n", + "\n", + " def forward(self, x):\n", + " x = self.relu(self.dense1(x))\n", + " x = self.relu(self.dense2(x))\n", + " x = self.relu(self.dense3(x))\n", + " x = self.dense4(x)\n", + " return x\n", + " return Model(config)\n", + "\n", + "config = dst_config()\n", + "config.training_parameters.epochs = 1000\n", + "config.quantization_parameters.default_data_integer_bits = 3.\n", + "config.quantization_parameters.default_data_fractional_bits = 2.\n", + "config.quantization_parameters.default_weight_fractional_bits = 3.\n", + "config.quantization_parameters.use_relu_multiplier = False\n", + "model = build_model(config)\n", + "\n", + "model.to(\"cuda\")\n", + "model(torch.rand(1, 16).to(\"cuda\")) # Call once to build Keras layers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10cc72e0-a4c5-4e19-a31d-ed9de6bee9f7", + "metadata": {}, + "outputs": [], + "source": [ + "model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7c2f58dd-d4ab-4f2b-a1d1-5ac220569d81", + "metadata": {}, + "outputs": [], + "source": [ + "loss_func = torch.nn.CrossEntropyLoss()\n", + "optimizer = torch.optim.Adam(lr=1e-2, params=model.parameters())\n", + "scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[600, 800], gamma=0.1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "983c6bad-310e-4d7c-a6a6-f0164f01808c", + "metadata": {}, + "outputs": [], + "source": [ + "from pquant import get_layer_keep_ratio, get_model_losses\n", + "train_accuracies = []\n", + "\n", + "def training_loop(model, trainloader, device, loss_func, optimizer, epoch, scheduler=None, *args, **kwargs):\n", + " for data in trainloader:\n", + " inputs, labels = data\n", + " inputs, labels = inputs.to(device), labels.to(device)\n", + " optimizer.zero_grad()\n", + " outputs = model(inputs)\n", + " loss = loss_func(outputs, labels)\n", + " losses = get_model_losses(model, torch.tensor(0.).to(device))\n", + " loss += losses\n", + " loss.backward()\n", + " optimizer.step()\n", + " epoch += 1\n", + " accuracy = torch.mean((torch.argmax(outputs, dim=1) == torch.argmax(labels, dim=1)).float())\n", + " if scheduler is not None:\n", + " scheduler.step()\n", + " train_accuracies.append(accuracy.cpu().numpy())\n", + "\n", + "val_accuracies = []\n", + "remaining_weights = []\n", + "ebops = []\n", + "def validate_loop(model, testloader, device, loss_func, epoch, *args, **kwargs):\n", + " correct = 0\n", + " total = 0\n", + " model.eval()\n", + " with torch.no_grad():\n", + " for data in testloader:\n", + " inputs, labels = data\n", + " inputs, labels = inputs.to(device), labels.to(device)\n", + " outputs = model(inputs)\n", + " loss = loss_func(outputs, labels)\n", + " accuracy = torch.mean((torch.argmax(outputs, dim=1) == torch.argmax(labels, dim=1)).float())\n", + " val_accuracies.append(accuracy.cpu().numpy())\n", + " ratio = get_layer_keep_ratio(model)\n", + " remaining_weights.append(ratio.cpu().numpy())\n", + " ebops.append(get_ebops(model).cpu().numpy())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e67b9c3b-00eb-4945-822f-ae9e0864d5d5", + "metadata": {}, + "outputs": [], + "source": [ + "from pquant import train_model\n", + "model.to(\"cuda\")\n", + "trained_model = train_model(model = model, \n", + " config = config, \n", + " train_func = training_loop, \n", + " valid_func = validate_loop, \n", + " trainloader = train_loader, \n", + " device=\"cuda\",\n", + " testloader = test_loader, \n", + " loss_func = loss_func,\n", + " optimizer = optimizer,\n", + " scheduler=scheduler\n", + " )\n", + "print(f\"Remaining weights={remaining_weights[-1] * 100:.2f}%\", f\" EBOPs={int(ebops[-1])}\", f\" Accuracy={val_accuracies[-1]*100:.2f}:%\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a3bd9d0a-476b-4530-93aa-41ea2578a521", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c94d83bf-d7e6-4900-91ec-92cfc80132f9", + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "plt.plot(train_accuracies, label=\"Train\")\n", + "plt.plot(val_accuracies, label=\"Valid\")\n", + "plt.ylabel(\"Accuracy\")\n", + "plt.xlabel('Epoch')\n", + "plt.ylim(0.6, 0.77)\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "plt.plot(remaining_weights)\n", + "plt.ylabel('Remaining weights')\n", + "plt.xlabel('Epoch')\n", + "plt.show()\n", + "\n", + "plt.plot(ebops)\n", + "plt.ylabel('EBOPs')\n", + "plt.xlabel('Epoch')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "7dc9fead-b8ee-4d78-b2eb-5bd012b8a9b3", + "metadata": {}, + "source": [ + "# da4ml\n", + "For this part you need to have verilator installed (conda install conda-forge::verilator -y). We extract the weight and bias matrices from the model as numpy arrays, and build the forward pass of the model using numpy operations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b1beb03d-69c8-4f13-914d-376ae11578fe", + "metadata": {}, + "outputs": [], + "source": [ + "w0, b0 = model.dense1.weight.detach().cpu().numpy(), model.dense1.bias.detach().cpu().numpy()\n", + "w1, b1 = model.dense2.weight.detach().cpu().numpy(), model.dense2.bias.detach().cpu().numpy()\n", + "w2, b2 = model.dense3.weight.detach().cpu().numpy(), model.dense3.bias.detach().cpu().numpy()\n", + "w3, b3 = model.dense4.weight.detach().cpu().numpy(), model.dense4.bias.detach().cpu().numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d54328c4-f12c-4c36-bf60-44d2d161f077", + "metadata": {}, + "outputs": [], + "source": [ + "data_i = int(config.quantization_parameters.default_data_integer_bits)\n", + "data_f = int(config.quantization_parameters.default_data_fractional_bits)\n", + "data_np_test = np.clip(X_test_t, -(2**data_i), 2**data_i-2**(-data_f))\n", + "import yaml\n", + "if True:\n", + " inp = FixedVariableArrayInput((16))\n", + " x = quantize(inp, k=1, i=data_i, f=data_f, overflow_mode=\"WRAP\", round_mode=\"RND\")\n", + "\n", + " x = w0 @ x\n", + " x = x + b0\n", + " x = quantize(x, k=0, i=data_i, f=data_f, overflow_mode=\"SAT\", round_mode=\"RND\") \n", + " x = w1 @ x\n", + " x = x + b1\n", + " x = quantize(x, k=0, i=data_i, f=data_f, overflow_mode=\"SAT\", round_mode=\"RND\") \n", + " x = w2 @ x\n", + " x = x + b2\n", + " x = quantize(x, k=0, i=data_i, f=data_f, overflow_mode=\"SAT\", round_mode=\"RND\") \n", + " x = w3 @ x\n", + " x = x + b3\n", + " out = quantize(x, k=1, i=data_i, f=data_f, overflow_mode=\"SAT\", round_mode=\"RND\") \n", + "\n", + "\n", + " comb_logic = comb_trace(inp, out)\n", + " verilog_model = VerilogModel(comb_logic, \"vmodel\", \"path_to_model_folder\", latency_cutoff=5, clock_uncertainty=0., part_name=\"xcu250-figd2104-2L-e\")\n", + " verilog_model.write()\n", + " verilog_model.compile(verbose=True)\n", + " \n", + "verilog_model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b2ac250-3896-4270-89bf-758282478a25", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "319f4e6d-15c1-4154-843b-f998bb5c94d8", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0f562e58-e6ed-4e5a-9e0f-04fdb74f8b47", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4068cd60-1069-4400-adf1-87b4ff109b8f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pquantml-dev-kernel", + "language": "python", + "name": "pquantml-dev-kernel" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/src/pquant/__init__.py b/src/pquant/__init__.py index 5007ca4..1285546 100644 --- a/src/pquant/__init__.py +++ b/src/pquant/__init__.py @@ -6,8 +6,26 @@ backend = os.getenv("KERAS_BACKEND", "tensorflow") if backend == "torch": from . import configs, pruning_methods + from .core.finetuning import ( + ap_config, + autosparse_config, + cs_config, + dst_config, + load_from_dictionary, + load_from_file, + mdmm_config, + pdp_config, + wanda_config, + ) from .core.torch import activations, layers, optimizers, quantizer - from .core.torch.layers import add_compression_layers, post_training_prune + from .core.torch.layers import ( + add_compression_layers, + apply_final_compression, + get_ebops, + get_layer_keep_ratio, + get_model_losses, + post_training_prune, + ) from .core.torch.train import train_model _forwards = ["activations", "layers", "quantizer", "optimizers"] @@ -20,14 +38,44 @@ _forwards.append("train_model") _forwards.append("add_compression_layers") _forwards.append("configs") + _forwards.append("get_layer_keep_ratio") + _forwards.append("get_model_losses") _forwards.append("pruning_methods") _forwards.append("post_training_prune") + _forwards.append("ap_config") + _forwards.append("autosparse_config") + _forwards.append("cs_config") + _forwards.append("dst_config") + _forwards.append("mdmm_config") + _forwards.append("pdp_config") + _forwards.append("wanda_config") + _forwards.append("load_from_file") + _forwards.append("load_from_dictionary") + _forwards.append("get_ebops") __all__ = _forwards else: from . import configs, pruning_methods + from .core.finetuning import ( + ap_config, + autosparse_config, + cs_config, + dst_config, + load_from_dictionary, + load_from_file, + mdmm_config, + pdp_config, + wanda_config, + ) from .core.keras import activations, layers, quantizer - from .core.keras.layers import add_compression_layers, post_training_prune + from .core.keras.layers import ( + add_compression_layers, + apply_final_compression, + get_ebops, + get_layer_keep_ratio, + get_model_losses, + post_training_prune, + ) from .core.keras.train import train_model _forwards = ["activations", "layers", "quantizer"] @@ -40,6 +88,17 @@ _forwards.append("train_model") _forwards.append("add_compression_layers") _forwards.append("configs") + _forwards.append("get_layer_keep_ratio") + _forwards.append("get_model_losses") _forwards.append("pruning_methods") _forwards.append("post_training_prune") + _forwards.append("ap_config") + _forwards.append("autosparse_config") + _forwards.append("cs_config") + _forwards.append("dst_config") + _forwards.append("mdmm_config") + _forwards.append("pdp_config") + _forwards.append("wanda_config") + _forwards.append("load_from_file") + _forwards.append("load_from_dictionary") __all__ = _forwards diff --git a/src/pquant/core/finetuning.py b/src/pquant/core/finetuning.py index f23ece0..51a7464 100644 --- a/src/pquant/core/finetuning.py +++ b/src/pquant/core/finetuning.py @@ -335,3 +335,60 @@ def run_optimization(self, model, **kwargs): ) return study.best_params + + +def ap_config(): + yaml_name = "config_ap.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def autosparse_config(): + yaml_name = "config_autosparse.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def cs_config(): + yaml_name = "config_cs.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def dst_config(): + yaml_name = "config_dst.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def mdmm_config(): + yaml_name = "config_mdmm.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def pdp_config(): + yaml_name = "config_pdp.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def wanda_config(): + yaml_name = "config_wanda.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + +def load_from_file(path): + return TuningConfig.load_from_file(path) + + +def load_from_dictionary(config): + return TuningConfig.load_from_config(config) diff --git a/src/pquant/core/keras/activations.py b/src/pquant/core/keras/activations.py index 5c12121..7cd81cc 100644 --- a/src/pquant/core/keras/activations.py +++ b/src/pquant/core/keras/activations.py @@ -54,7 +54,7 @@ def __init__( self.activation_name = activation.lower() self.activation_function = activation_registry.get(self.activation_name) - + self.config = config self.enable_quantization = config.quantization_parameters.enable_quantization self.use_hgq = config.quantization_parameters.use_high_granularity_quantization self.is_pretraining = True @@ -74,7 +74,7 @@ def __init__( def build(self, input_shape): super().build(input_shape) - self.input_shape = input_shape + self.input_shape = (1,) + input_shape[1:] self.output_quantizer = Quantizer( k=self.k_output, i=self.i_output, @@ -162,8 +162,10 @@ def get_config(self): config.update( { "config": self.config.get_dict(), - "i": float(self.i), - "f": float(self.f), + "i_input": float(self.i_input), + "f_input": float(self.f_input), + "i_output": float(self.i_output), + "f_output": float(self.f_output), } ) return config diff --git a/src/pquant/core/keras/layers.py b/src/pquant/core/keras/layers.py index 95ee8a0..e1d868d 100644 --- a/src/pquant/core/keras/layers.py +++ b/src/pquant/core/keras/layers.py @@ -146,7 +146,7 @@ def build(self, input_shape): True, self.hgq_gamma, ) - self.input_shape = input_shape + self.input_shape = (1,) + input_shape[1:] self.n_parallel = ops.prod(input_shape[1:-1]) self.parallelization_factor = self.parallelization_factor if self.parallelization_factor > 0 else self.n_parallel @@ -164,14 +164,13 @@ def save_weights(self): def rewind_weights(self): self.weight.assign(self.init_weight) - def ebops(self, shape): + def ebops(self): return 0.0 - def hgq_loss(self, shape): - shape = (1,) + shape[1:] + def hgq_loss(self): if self.pruning_layer.is_pretraining or not self.use_hgq: return ops.convert_to_tensor(0.0) - loss = self.hgq_beta * self.ebops(shape) + loss = self.hgq_beta * self.ebops() loss += self.weight_quantizer.hgq_loss() if self._bias is not None: loss += self.bias_quantizer.hgq_loss() @@ -317,13 +316,14 @@ def build(self, input_shape): dtype=self.dtype, ) else: - self.bias = None + self._bias = None if self.use_hgq: self.input_quantizer.build(input_shape) self.weight_quantizer.build(self._kernel.shape) if self.use_bias: self.bias_quantizer.build(self._bias.shape) self.output_quantizer.build(self.compute_output_shape(input_shape)) + self.input_shape = (1,) + input_shape[1:] @property def kernel(self): @@ -357,9 +357,12 @@ def bias(self): def bias(self, bias): self._bias = bias - def ebops(self, shape): - bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self._kernel)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._kernel)) + if include_mask: + mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) + bw_ker = bw_ker * mask if self.parallelization_factor < 0: ebops = ops.sum( ops.depthwise_conv( @@ -381,19 +384,18 @@ def ebops(self, shape): reduce_axis_kernel = tuple(range(0, 2)) bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) ebops = ops.sum(bw_inp[:, None] * bw_ker) - if self.bias is not None: - size = ops.cast(ops.prod(shape), self.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self._bias)) + if self.use_bias: + size = ops.cast(ops.prod(self.input_shape), self.dtype) + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops def call(self, x, training=None): - input_shape = x.shape x = self.pre_forward(x, training) x = super().call(x) x = self.post_forward(x, training) if self.use_hgq and self.enable_quantization: - self.add_loss(self.hgq_loss(input_shape)) + self.add_loss(self.hgq_loss()) return x # Is it supposed to be like this? @@ -527,9 +529,12 @@ def bias(self): def bias(self, bias): self._bias = bias - def ebops(self, shape): - bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._kernel)) + if include_mask: + mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) + bw_ker = bw_ker * mask if self.parallelization_factor < 0: ebops = ops.sum( ops.conv( @@ -552,19 +557,18 @@ def ebops(self, shape): bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) ebops = ops.sum(bw_inp[:, None] * bw_ker) - if self._bias is not None: - size = ops.cast(ops.prod(shape), self.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self._bias)) + if self.use_bias: + size = ops.cast(ops.prod(self.input_shape), self.dtype) + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops def call(self, x, training=None): - input_shape = x.shape x = self.pre_forward(x, training) x = super().call(x) x = self.post_forward(x, training) if self.use_hgq and self.enable_quantization: - self.add_loss(self.hgq_loss(input_shape)) + self.add_loss(self.hgq_loss()) return x @@ -759,9 +763,12 @@ def bias(self): def bias(self, bias): self._bias = bias - def ebops(self, shape): - bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._kernel)) + if include_mask: + mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) + bw_ker = bw_ker * mask if self.parallelization_factor < 0: ebops = ops.sum( ops.conv( @@ -783,19 +790,18 @@ def ebops(self, shape): reduce_axis_kernel = tuple(range(0, 1)) bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) ebops = ops.sum(bw_inp[:, None] * bw_ker) - if self._bias is not None: - size = ops.cast(ops.prod(shape), self.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self._bias)) + if self.use_bias: + size = ops.cast(ops.prod(self.input_shape), self.dtype) + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops def call(self, x, training=None): - input_shape = x.shape x = self.pre_forward(x, training) x = super().call(x) x = self.post_forward(x, training) if self.use_hgq and self.enable_quantization: - self.add_loss(self.hgq_loss(input_shape)) + self.add_loss(self.hgq_loss()) return x @@ -901,19 +907,21 @@ def bias(self): def bias(self, bias): self._bias = bias - def ebops(self, shape): - bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.kernel)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._kernel)) + if include_mask: + mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) + bw_ker = bw_ker * mask ebops = ops.sum(ops.matmul(bw_inp, bw_ker)) ebops = ebops * self.n_parallel / self.parallelization_factor if self.use_bias: - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) - size = ops.cast(ops.prod(shape), self.dtype) + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) + size = ops.cast(ops.prod(self.input_shape), self.dtype) ebops += ops.mean(bw_bias) * size return ebops def call(self, x, training=None): - input_shape = x.shape x = self.pre_forward(x, training) x = ops.matmul(x, self.kernel) bias = self.bias @@ -921,7 +929,7 @@ def call(self, x, training=None): x = ops.add(x, bias) x = self.post_forward(x, training) if self.use_hgq and self.enable_quantization: - self.add_loss(self.hgq_loss(input_shape)) + self.add_loss(self.hgq_loss()) return x @@ -1016,6 +1024,7 @@ def build(self, input_shape): shape = [1] * len(input_shape) shape[self.axis] = input_shape[self.axis] self._shape = tuple(shape) + self.input_shape = (1,) + input_shape[1:] def apply_final_compression(self): self.final_compression_done = True @@ -1028,19 +1037,18 @@ def apply_final_compression(self): beta = self.bias_quantizer(beta) self.beta.assign(beta) - def ebops(self, shape): - bw_inp = self.input_quantizer.quantizer.bits_(shape) - bw_ker = ops.reshape(self.weight_quantizer.quantizer.bits_(self.moving_mean.shape), self._shape) - bw_bias = ops.reshape(self.bias_quantizer.quantizer.bits_(self.moving_mean.shape), self._shape) - size = ops.cast(ops.prod(shape), self.dtype) + def ebops(self): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = ops.reshape(self.weight_quantizer.get_total_bits(self.moving_mean.shape), self._shape) + bw_bias = ops.reshape(self.bias_quantizer.get_total_bits(self.moving_mean.shape), self._shape) + size = ops.cast(ops.prod(self.input_shape), self.dtype) ebops = ops.sum(bw_inp * bw_ker) + ops.mean(bw_bias) * size return ebops - def hgq_loss(self, shape): - shape = (1,) + shape[1:] + def hgq_loss(self): if self.is_pretraining or not self.use_hgq: return ops.convert_to_tensor(0.0) - loss = self.hgq_beta * self.ebops(shape) + loss = self.hgq_beta * self.ebops() loss += self.weight_quantizer.hgq_loss() loss += self.bias_quantizer.hgq_loss() if self.quantize_input: @@ -1101,7 +1109,7 @@ def call(self, inputs, training=None, mask=None): scale=gamma, epsilon=self.epsilon, ) - self.add_loss(self.hgq_loss(inputs.shape)) + self.add_loss(self.hgq_loss()) return ops.cast(outputs, self.compute_dtype) def get_input_quantization_bits(self): @@ -1185,6 +1193,7 @@ def build(self, input_shape): if self.use_hgq: self.input_quantizer.build(input_shape) self.output_quantizer.build(self.compute_output_shape(input_shape)) + self.input_shape = (1,) + input_shape[1:] def get_input_quantization_bits(self): return self.input_quantizer.get_quantization_bits() @@ -1213,15 +1222,14 @@ def post_pooling(self, x, training): x = self.output_quantizer(x, training=training) return x - def ebops(self, shape): - bw_inp = self.input_quantizer.quantizer.bits_(shape) + def ebops(self): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) return ops.sum(bw_inp) - def hgq_loss(self, shape): - shape = (1,) + shape[1:] + def hgq_loss(self): if self.is_pretraining or not self.use_hgq: return ops.convert_to_tensor(0.0) - loss = self.hgq_beta * self.ebops(shape) + loss = self.hgq_beta * self.ebops() if self.quantize_input: loss += self.input_quantizer.hgq_loss() if self.quantize_output: @@ -1232,8 +1240,10 @@ def get_config(self): config = super().get_config() config.update( { - "i": self.i, - "f": self.f, + "i_input": self.i_input, + "f_input": self.f_input, + "i_output": self.i_output, + "f_output": self.f_output, "is_pretraining": self.is_pretraining, "overflow": self.overflow, "hgq_gamma": self.hgq_gamma, @@ -1274,12 +1284,11 @@ def __init__( ) def call(self, x, training=None): - input_shape = x.shape x = self.pre_pooling(x, training) x = super().call(x) x = self.post_pooling(x, training) if self.use_hgq and self.enable_quantization: - self.add_loss(self.hgq_loss(input_shape)) + self.add_loss(self.hgq_loss()) return x @@ -1312,12 +1321,11 @@ def __init__( ) def call(self, x, training=None): - input_shape = x.shape x = self.pre_pooling(x, training) x = super().call(x) x = self.post_pooling(x, training) if self.use_hgq and self.enable_quantization: - self.add_loss(self.hgq_loss(input_shape)) + self.add_loss(self.hgq_loss()) return x @@ -1550,11 +1558,19 @@ def get_layer_keep_ratio(model): PQDense, ), ): - # weight, bias = layer.prune_and_quantize(layer.weight, layer.bias) - weight = layer.kernel - total_w += ops.size(weight) - rem = ops.count_nonzero(weight) - remaining_weights += rem + if layer.pruning_first: + weight = ops.transpose(layer.pruning_layer.get_hard_mask(), layer.weight_transpose_back) * layer._kernel + if layer.enable_quantization: + weight = layer.weight_quantizer(weight) + weight = weight + else: + weight = layer._kernel + if layer.enable_quantization: + weight = layer.weight_quantizer(weight) + weight = ops.transpose(layer.pruning_layer.get_hard_mask(), layer.weight_transpose_back) * weight + total_w += ops.size(weight) + rem = ops.count_nonzero(weight) + remaining_weights += rem elif isinstance(layer, PQSeparableConv2d): depthwise_weight = ops.cast(layer.depthwise_conv.kernel, layer.depthwise_conv.kernel.dtype) pointwise_weight = ops.cast(layer.pointwise_conv.kernel, layer.pointwise_conv.kernel.dtype) @@ -1854,9 +1870,6 @@ def add_compression_layers(model, config, input_shape=None): strides=layer.strides, padding=layer.padding, data_format=layer.data_format, - name=layer.name, - in_quant_bits=layer.in_quant_bits, - out_quant_bits=layer.out_quant_bits, ) set_quantization_bits_activations(config, layer, new_layer) new_layer.build(x.shape) @@ -1869,7 +1882,6 @@ def add_compression_layers(model, config, input_shape=None): strides=layer.strides, padding=layer.padding, data_format=layer.data_format, - name=layer.name, ) set_quantization_bits_activations(config, layer, new_layer) new_layer.build(x.shape) @@ -2144,3 +2156,13 @@ def post_training_prune(model, config, calibration_data): post_pretrain_functions(model, config) model(inputs, training=True) # True so pruning works return apply_final_compression(model, config) + + +def get_ebops(model): + ebops = 0 + for m in model.layers: + if isinstance(m, (PQWeightBiasBase)): + ebops += m.ebops(include_mask=True) + elif isinstance(m, (PQAvgPoolBase, PQBatchNormalization, PQActivation)): + ebops += m.ebops() + return ebops diff --git a/src/pquant/core/keras/quantizer.py b/src/pquant/core/keras/quantizer.py index 0c46fff..f292cb1 100644 --- a/src/pquant/core/keras/quantizer.py +++ b/src/pquant/core/keras/quantizer.py @@ -25,6 +25,13 @@ def build(self, input_shape): if self.use_hgq: self.quantizer.build(input_shape) + def get_total_bits(self, shape): + if self.use_hgq: + return self.quantizer.bits_(shape) + else: + b = self.i + self.f + self.k + return keras.ops.ones(shape) * b + def get_quantization_bits(self): if self.use_hgq: return self.quantizer.quantizer.k, self.quantizer.quantizer.i, self.quantizer.quantizer.f diff --git a/src/pquant/core/torch/activations.py b/src/pquant/core/torch/activations.py index 67b9337..d6e2418 100644 --- a/src/pquant/core/torch/activations.py +++ b/src/pquant/core/torch/activations.py @@ -123,8 +123,8 @@ def post_pre_train_function(self): self.is_pretraining = False def ebops(self): - bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) - bw_out = self.output_quantizer.quantizer.bits_(self.input_shape) + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_out = self.output_quantizer.get_total_bits(self.input_shape) return torch.sum((2.0**bw_inp) * bw_out) * 1e-4 # type: ignore def hgq_loss(self): @@ -168,8 +168,10 @@ def get_config(self): config.update( { "config": self.config.get_dict(), - "i": float(self.i), - "f": float(self.f), + "i_input": float(self.i_input), + "f_input": float(self.f_input), + "i_output": float(self.i_output), + "f_output": float(self.f_output), } ) return config diff --git a/src/pquant/core/torch/layers.py b/src/pquant/core/torch/layers.py index 3f74ca3..0efcf14 100644 --- a/src/pquant/core/torch/layers.py +++ b/src/pquant/core/torch/layers.py @@ -169,7 +169,7 @@ def hgq_loss(self): return 0.0 loss = self.hgq_beta * self.ebops() loss += self.weight_quantizer.hgq_loss() - if self.bias is not None: + if self._bias is not None: loss += self.bias_quantizer.hgq_loss() if self.quantize_input: loss += self.input_quantizer.hgq_loss() @@ -253,13 +253,15 @@ def __init__( del self._parameters["bias"] self.pruning_layer.build(self._weight.shape) - def ebops(self): - bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self._weight)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._weight)) + if include_mask: + bw_ker = bw_ker * self.pruning_layer.get_hard_mask() ebops = ops.sum(F.linear(bw_inp, bw_ker)) ebops = ebops * self.n_parallel / self.parallelization_factor - if self.bias is not None: - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + if self._bias is not None: + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) size = ops.cast(ops.prod(list(self.input_shape)), self._weight.dtype) ebops += ops.mean(bw_bias) * size return ebops @@ -364,9 +366,11 @@ def __init__( del self._parameters["bias"] self.pruning_layer.build(self._weight.shape) - def ebops(self): - bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._weight)) + if include_mask: + bw_ker = bw_ker * self.pruning_layer.get_hard_mask() if self.parallelization_factor < 0: ebops = ops.sum(F.conv2d(bw_inp, bw_ker, stride=self.stride, padding=self.padding, dilation=self.dilation)) else: @@ -376,9 +380,9 @@ def ebops(self): bw_inp = ops.max(bw_inp, axis=reduce_axis_input) bw_ker = ops.sum(bw_ker, axis=reduce_axis_kernel) ebops = ops.sum(bw_inp[None, :] * bw_ker) - if self.bias is not None: + if self._bias is not None: size = ops.cast(ops.prod(list(self.input_shape)), self.weight.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops @@ -489,9 +493,11 @@ def __init__( del self._parameters["bias"] self.pruning_layer.build(self._weight.shape) - def ebops(self): - bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) - bw_ker = self.weight_quantizer.quantizer.bits_(ops.shape(self.weight)) + def ebops(self, include_mask=False): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._weight)) + if include_mask: + bw_ker = bw_ker * self.pruning_layer.get_hard_mask() if self.parallelization_factor < 0: ebops = ops.sum(F.conv1d(bw_inp, bw_ker, stride=self.stride, padding=self.padding, dilation=self.dilation)) else: @@ -503,7 +509,7 @@ def ebops(self): ebops = ops.sum(bw_inp[None, :] * bw_ker) if self.bias is not None: size = ops.cast(ops.prod(list(self.input_shape)), self.weight.dtype) - bw_bias = self.bias_quantizer.quantizer.bits_(ops.shape(self.bias)) + bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) ebops += ops.mean(bw_bias) * size return ebops @@ -636,7 +642,7 @@ def post_pretrain_function(self): self.is_pretraining = False def ebops(self): - bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) return torch.sum(bw_inp) def hgq_loss(self): @@ -873,9 +879,9 @@ def bias(self): return self._bias def ebops(self): - bw_inp = self.input_quantizer.quantizer.bits_(self.input_shape) - bw_ker = ops.reshape(self.weight_quantizer.quantizer.bits_(self.running_mean.shape), self._shape) - bw_bias = ops.reshape(self.bias_quantizer.quantizer.bits_(self.running_mean.shape), self._shape) + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = ops.reshape(self.weight_quantizer.get_total_bits(self.running_mean.shape), self._shape) + bw_bias = ops.reshape(self.bias_quantizer.get_total_bits(self.running_mean.shape), self._shape) size = ops.cast(ops.prod(list(self.input_shape)), self._weight.dtype) ebops = ops.sum(bw_inp * bw_ker) + ops.mean(bw_bias) * size return ebops @@ -1138,7 +1144,6 @@ def add_pruning_to_model(module, config, prefix=""): sparse_layer = PQDense( config, layer.in_features, layer.out_features, layer.bias is not None, quantize_input, quantize_output ) - sparse_layer.pruning_layer.build(layer.weight.shape) sparse_layer._weight.data = layer.weight.data if layer.bias is not None: sparse_layer._bias.data = layer.bias.data @@ -1163,7 +1168,6 @@ def add_pruning_to_model(module, config, prefix=""): quantize_input, quantize_output, ) - sparse_layer.pruning_layer.build(layer.weight.shape) sparse_layer._weight.data = layer.weight.data if layer.bias is not None: sparse_layer._bias.data = layer.bias.data @@ -1187,7 +1191,6 @@ def add_pruning_to_model(module, config, prefix=""): quantize_input, quantize_output, ) - sparse_layer.pruning_layer.build(layer.weight.shape) sparse_layer._weight.data = layer.weight.data if layer.bias is not None: sparse_layer._bias.data = layer.bias.data @@ -1310,9 +1313,18 @@ def get_layer_keep_ratio(model): remaining_weights = 0 for layer in model.modules(): if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - weight, _ = layer.weight, layer.bias - total_w += weight.numel() - rem = torch.count_nonzero(weight) + if layer.pruning_first: + weight = layer.pruning_layer.get_hard_mask() * layer._weight + if layer.enable_quantization: + weight = layer.weight_quantizer(weight) + weight = weight + else: + weight = layer._weight + if layer.enable_quantization: + weight = layer.weight_quantizer(weight) + weight = layer.pruning_layer.get_hard_mask() * weight + total_w += ops.size(weight) + rem = ops.count_nonzero(weight) remaining_weights += rem elif layer.__class__ in (nn.Conv2d, nn.Conv1d, nn.Linear): total_w += layer.weight.numel() @@ -1424,3 +1436,13 @@ def post_training_prune(model, config, calibration_data): post_pretrain_functions(model, config) model(inputs) return remove_compression_layers(model, config) + + +def get_ebops(model): + ebops = 0 + for m in model.modules(): + if isinstance(m, (PQWeightBiasBase)): + ebops += m.ebops(include_mask=True) + elif isinstance(m, (PQAvgPoolBase, PQBatchNorm2d, PQActivation)): + ebops += m.ebops() + return ebops diff --git a/src/pquant/core/torch/quantizer.py b/src/pquant/core/torch/quantizer.py index 38224c6..8f7e27f 100644 --- a/src/pquant/core/torch/quantizer.py +++ b/src/pquant/core/torch/quantizer.py @@ -23,6 +23,13 @@ def get_quantization_bits(self): else: return self.k, self.i, self.f + def get_total_bits(self, shape): + if self.use_hgq: + return self.quantizer.bits_(shape) + else: + b = self.i + self.f + self.k + return torch.ones(shape).to(b.device) * b + def set_quantization_bits(self, i, f): if self.use_hgq: self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) diff --git a/src/pquant/pruning_methods/activation_pruning.py b/src/pquant/pruning_methods/activation_pruning.py index fe90326..fd7d597 100644 --- a/src/pquant/pruning_methods/activation_pruning.py +++ b/src/pquant/pruning_methods/activation_pruning.py @@ -73,7 +73,7 @@ def call(self, weight): # Mask is only updated every t_delta step, using collec else: return self.mask * weight - def get_hard_mask(self, weight): + def get_hard_mask(self, weight=None): return self.mask def post_pre_train_function(self): diff --git a/src/pquant/pruning_methods/autosparse.py b/src/pquant/pruning_methods/autosparse.py index 3025f2d..a3e33e3 100644 --- a/src/pquant/pruning_methods/autosparse.py +++ b/src/pquant/pruning_methods/autosparse.py @@ -90,7 +90,7 @@ def call(self, weight): self.mask = ops.reshape(mask, weight.shape) return ops.sign(weight) * ops.reshape(mask, weight.shape) - def get_hard_mask(self, weight): + def get_hard_mask(self, weight=None): return self.mask def get_mask(self, weight): diff --git a/src/pquant/pruning_methods/cs.py b/src/pquant/pruning_methods/cs.py index 96c125a..9a2de5c 100644 --- a/src/pquant/pruning_methods/cs.py +++ b/src/pquant/pruning_methods/cs.py @@ -12,24 +12,25 @@ def __init__(self, config, layer_type, *args, **kwargs): config = TuningConfig.load_from_config(config) self.config = config - self.beta = 1.0 self.final_temp = config.pruning_parameters.final_temp self.do_hard_mask = False self.layer_type = layer_type - self.mask = None self.is_pretraining = True def build(self, input_shape): self.s_init = ops.convert_to_tensor(self.config.pruning_parameters.threshold_init * ops.ones(input_shape)) self.s = self.add_weight(name="threshold", shape=input_shape, initializer=Constant(self.s_init), trainable=True) self.scaling = 1.0 / ops.sigmoid(self.s_init) + self.beta = self.add_weight(name="beta", shape=(), initializer=Constant(1.0), trainable=False) + self.mask = self.add_weight(name="mask", shape=input_shape, initializer=Constant(1.0), trainable=False) super().build(input_shape) def call(self, weight): if self.is_pretraining: return weight - self.mask = self.get_mask() - return self.mask * weight + mask = self.get_mask() + self.mask.assign(mask) + return mask * weight def pre_finetune_function(self): self.do_hard_mask = True @@ -50,7 +51,7 @@ def pre_epoch_function(self, epoch, total_epochs): pass def post_epoch_function(self, epoch, total_epochs): - self.beta *= self.final_temp ** (1 / (total_epochs - 1)) + self.beta.assign(self.beta * self.final_temp ** (1 / (total_epochs - 1))) def get_hard_mask(self, weight=None): if self.config.pruning_parameters.enable_pruning: @@ -60,7 +61,7 @@ def get_hard_mask(self, weight=None): def post_round_function(self): min_beta_s_s0 = ops.minimum(self.beta * self.s, self.s_init) self.s.assign(min_beta_s_s0) - self.beta = 1 + self.beta.assign(1.0) def calculate_additional_loss(self): return ops.convert_to_tensor( diff --git a/src/pquant/pruning_methods/dst.py b/src/pquant/pruning_methods/dst.py index 66c8fa4..45774c0 100644 --- a/src/pquant/pruning_methods/dst.py +++ b/src/pquant/pruning_methods/dst.py @@ -65,7 +65,7 @@ def call(self, weight): self.add_loss(self.calculate_additional_loss()) return masked_weight - def get_hard_mask(self, weight): + def get_hard_mask(self, weight=None): return self.mask def get_mask(self, weight): diff --git a/src/pquant/pruning_methods/mdmm.py b/src/pquant/pruning_methods/mdmm.py index d87bc37..8140335 100644 --- a/src/pquant/pruning_methods/mdmm.py +++ b/src/pquant/pruning_methods/mdmm.py @@ -84,10 +84,13 @@ def call(self, weight): weight = weight * self.get_hard_mask(weight) else: self.penalty_loss = self.constraint_layer(weight) - + epsilon = self.config.pruning_parameters.epsilon + self.hard_mask = ops.cast(ops.abs(weight) > epsilon, weight.dtype) return weight - def get_hard_mask(self, weight): + def get_hard_mask(self, weight=None): + if weight is None: + return self.hard_mask epsilon = self.config.pruning_parameters.epsilon return ops.cast(ops.abs(weight) > epsilon, weight.dtype) diff --git a/src/pquant/pruning_methods/pdp.py b/src/pquant/pruning_methods/pdp.py index 88f9ba8..0b5272d 100644 --- a/src/pquant/pruning_methods/pdp.py +++ b/src/pquant/pruning_methods/pdp.py @@ -37,9 +37,11 @@ def pre_epoch_function(self, epoch, total_epochs): def post_round_function(self): pass - def get_hard_mask(self, weight): + def get_hard_mask(self, weight=None): if self.fine_tuning: return self.mask + if weight is None: + return ops.cast((self.mask >= 0.5), self.mask.dtype) if self.config.pruning_parameters.structured_pruning: if self.layer_type == "conv": mask = self.get_mask_structured_channel(weight) diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index d8575a4..aae58a0 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -1560,7 +1560,7 @@ def test_ebops_dense(config_pdp, dense_input): model = keras.Model(inputs=inputs, outputs=act, name="test_dense") model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) - model.layers[1].hgq_loss(dense_input.shape) + model.layers[1].hgq_loss() inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=True)(inputs) @@ -1568,7 +1568,7 @@ def test_ebops_dense(config_pdp, dense_input): model = keras.Model(inputs=inputs, outputs=act, name="test_dense") model = add_compression_layers(model, config_pdp, dense_input.shape) post_pretrain_functions(model, config_pdp) - model.layers[1].hgq_loss(dense_input.shape) + model.layers[1].hgq_loss() def test_ebops_conv2d(config_pdp, conv2d_input): @@ -1580,7 +1580,7 @@ def test_ebops_conv2d(config_pdp, conv2d_input): model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d") model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) - model.layers[1].hgq_loss(conv2d_input.shape) + model.layers[1].hgq_loss() config_pdp.quantization_parameters.use_high_granularity_quantization = True config_pdp.quantization_parameters.enable_quantization = True @@ -1590,7 +1590,7 @@ def test_ebops_conv2d(config_pdp, conv2d_input): model = keras.Model(inputs=inputs, outputs=act, name="test_conv2d") model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) - model.layers[1].hgq_loss(conv2d_input.shape) + model.layers[1].hgq_loss() def test_ebops_conv1d(config_pdp, conv1d_input): @@ -1602,7 +1602,7 @@ def test_ebops_conv1d(config_pdp, conv1d_input): model = keras.Model(inputs=inputs, outputs=act, name="test_dense") model = add_compression_layers(model, config_pdp, conv1d_input.shape) post_pretrain_functions(model, config_pdp) - model.layers[1].hgq_loss(conv1d_input.shape) + model.layers[1].hgq_loss() config_pdp.quantization_parameters.use_high_granularity_quantization = True config_pdp.quantization_parameters.enable_quantization = True @@ -1612,7 +1612,7 @@ def test_ebops_conv1d(config_pdp, conv1d_input): model = keras.Model(inputs=inputs, outputs=act, name="test_dense") model = add_compression_layers(model, config_pdp, conv1d_input.shape) post_pretrain_functions(model, config_pdp) - model.layers[1].hgq_loss(conv1d_input.shape) + model.layers[1].hgq_loss() def test_ebops_bn(config_pdp, conv2d_input): @@ -1626,20 +1626,19 @@ def test_ebops_bn(config_pdp, conv2d_input): model = keras.Model(inputs=inputs, outputs=act, name="test_bn") model = add_compression_layers(model, config_pdp, conv2d_input.shape) post_pretrain_functions(model, config_pdp) - if keras.backend.image_data_format() == "channels_first": - model.layers[2].hgq_loss((1, 32, 30, 30)) # Does not work, TODO: Fix - else: - model.layers[2].hgq_loss((1, 30, 30, 32)) + model.layers[2].hgq_loss() -def test_ebops_activations(config_pdp, dense_input): - config_pdp.quantization_parameters.use_high_granularity_quantization = True - config_pdp.quantization_parameters.enable_quantization = True +def test_ebops_activations(config_cs, dense_input): + config_cs.quantization_parameters.use_high_granularity_quantization = True + config_cs.quantization_parameters.enable_quantization = True inputs = keras.Input(shape=dense_input.shape[1:]) act = ReLU()(inputs) act2 = Activation("tanh")(act) model = keras.Model(inputs=inputs, outputs=act2, name="test_activations") - model = add_compression_layers(model, config_pdp, dense_input.shape) + model = add_compression_layers(model, config_cs, dense_input.shape) + post_pretrain_functions(model, config_cs) + model.layers[1].hgq_loss() def test_linear_direct(config_pdp, dense_input): diff --git a/tests/test_torch_compression_layers.py b/tests/test_torch_compression_layers.py index c3eaf57..bb6c26c 100644 --- a/tests/test_torch_compression_layers.py +++ b/tests/test_torch_compression_layers.py @@ -1937,3 +1937,19 @@ def test_batchnorm2d_parameter_quantizers_not_called_when_final_compression_done assert model.submodule.input_quantizer.layer_called == 1 assert model.submodule.weight_quantizer.layer_called == 0 assert model.submodule.bias_quantizer.layer_called == 0 + + +def test_ebops_dense_nonhgq(config_pdp, dense_input): + config_pdp.quantization_parameters.enable_quantization = True + # config_pdp.quantization_parameters.use_high_granularity_quantization = True + layer = Linear(IN_FEATURES, OUT_FEATURES, bias=False) + model = TestModel(layer, "relu") + model = add_compression_layers(model, config_pdp, dense_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.ebops(include_mask=True) + + layer = Linear(IN_FEATURES, OUT_FEATURES, bias=True) + model = TestModel(layer, "relu") + model = add_compression_layers(model, config_pdp, dense_input.shape) + post_pretrain_functions(model, config_pdp) + model.submodule.ebops(include_mask=True) From 02923de6d19da2e46eb803589654a3b0e3bb8e84 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Sat, 29 Nov 2025 21:18:37 +0100 Subject: [PATCH 21/37] fix finetune import bug --- src/pquant/core/finetuning.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pquant/core/finetuning.py b/src/pquant/core/finetuning.py index 51a7464..5c429b3 100644 --- a/src/pquant/core/finetuning.py +++ b/src/pquant/core/finetuning.py @@ -10,7 +10,6 @@ import yaml from pydantic import BaseModel, Field, field_validator -from pquant import add_compression_layers, train_model from pquant.core import constants from pquant.data_models.finetuning_model import BaseFinetuningModel from pquant.data_models.fitcompress_model import BaseFitCompressModel @@ -241,6 +240,8 @@ def register_hyperparameter(self, name, optuna_func, *args, **kwargs): self.hyperparameters[name] = (optuna_func, args, kwargs) def objective(self, trial, model, train_func, valid_func, **kwargs): + from pquant import add_compression_layers, train_model + for param_name, (optuna_func, func_args, func_kwargs) in self.hyperparameters.items(): new_value = optuna_func(trial, *func_args, **func_kwargs) logging.info(f"Suggested {param_name} = {new_value}") From af45936bc67b875b5d9461e1fdabcaf2a5a3fcca Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 1 Dec 2025 12:14:22 +0100 Subject: [PATCH 22/37] hard_tanh was missing from keras layer replacement, added it --- src/pquant/core/keras/layers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pquant/core/keras/layers.py b/src/pquant/core/keras/layers.py index e1d868d..0cfdecb 100644 --- a/src/pquant/core/keras/layers.py +++ b/src/pquant/core/keras/layers.py @@ -1659,8 +1659,9 @@ def check_activation(layer, config): set_quantization_bits_activations(config, layer, act) act.build(layer.input.shape) elif layer.activation.__name__ == "tanh": + type_of_tanh = "tanh" if config.quantization_parameters.use_real_tanh else "hard_tanh" act = ( - PQActivation(config, "tanh", quantize_input=quantize_input, quantize_output=quantize_output) + PQActivation(config, type_of_tanh, quantize_input=quantize_input, quantize_output=quantize_output) if quantization_enabled else Activation(activation="tanh") ) From a1e84d17bd8849b2f5f44eb37fa43cef76899664 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 8 Dec 2025 11:55:08 +0100 Subject: [PATCH 23/37] Create ci.yml --- .github/workflows/ci.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..ccb2687 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,29 @@ +name: ci +on: + push: + branches: + - master + - dev +permissions: + contents: write +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Configure Git Credentials + run: | + git config user.name github-actions[bot] + git config user.email 41898282+github-actions[bot]@users.noreply.github.com + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v4 + with: + key: mkdocs-material-${{ env.cache_id }} + path: ~/.cache + restore-keys: | + mkdocs-material- + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force From 2440baefe4a96b16c7fc7b7c5c6523064a95711f Mon Sep 17 00:00:00 2001 From: Anastasiia Petrovych Date: Mon, 8 Dec 2025 12:00:34 +0100 Subject: [PATCH 24/37] Trial documentation --- docs/index.md | 17 +++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 18 insertions(+) create mode 100644 docs/index.md create mode 100644 mkdocs.yml diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..000ea34 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,17 @@ +# Welcome to MkDocs + +For full documentation visit [mkdocs.org](https://www.mkdocs.org). + +## Commands + +* `mkdocs new [dir-name]` - Create a new project. +* `mkdocs serve` - Start the live-reloading docs server. +* `mkdocs build` - Build the documentation site. +* `mkdocs -h` - Print help message and exit. + +## Project layout + + mkdocs.yml # The configuration file. + docs/ + index.md # The documentation homepage. + ... # Other markdown pages, images and other files. diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..c97182f --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1 @@ +site_name: My Docs From d07e0bb1b48c98afce76788ca5e20755c62ed726 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 8 Dec 2025 14:01:12 +0100 Subject: [PATCH 25/37] Create .readthedocs.yaml --- .readthedocs.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .readthedocs.yaml diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..575f578 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,23 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version, and other tools you might need +build: + os: ubuntu-24.04 + tools: + python: "3.13" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Optionally, but recommended, +# declare the Python requirements required to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +# python: +# install: +# - requirements: docs/requirements.txt + From 5d28a28ac1fc2cfb1221f652a69e0cf8a74375e2 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 14:23:16 +0100 Subject: [PATCH 26/37] Add documentation page (#18) Add documentation page for PQuantML using readthedocs --- .github/workflows/ci.yml | 29 --- .github/workflows/python-publish.yml | 33 ++++ .github/workflows/sphinx-build.yml | 59 ++++++ README.md | 8 +- docs/Makefile | 21 ++ docs/_static/pquant.png | Bin 27797 -> 0 bytes docs/index.md | 17 -- docs/make.bat | 35 ++++ docs/pruning_methods.md | 111 ----------- docs/quantization_parameters.md | 11 -- docs/requirements.txt | 5 + docs/source/_static/custom.css | 242 ++++++++++++++++++++++++ docs/source/_static/overview_pquant.png | Bin 0 -> 84938 bytes docs/source/_static/pquant.png | Bin 0 -> 24115 bytes docs/source/conf.py | 70 +++++++ docs/source/faq.md | 30 +++ docs/source/getting_started.md | 211 +++++++++++++++++++++ docs/source/index.rst | 64 +++++++ docs/source/install.md | 8 + docs/source/reference.md | 208 ++++++++++++++++++++ docs/source/status.md | 17 ++ mkdocs.yml | 1 - src/pquant/configs/config_pdp.yaml | 5 +- src/pquant/configs/finetuning.yaml | 11 +- 24 files changed, 1014 insertions(+), 182 deletions(-) delete mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/python-publish.yml create mode 100644 .github/workflows/sphinx-build.yml create mode 100644 docs/Makefile delete mode 100644 docs/_static/pquant.png delete mode 100644 docs/index.md create mode 100644 docs/make.bat delete mode 100644 docs/pruning_methods.md delete mode 100644 docs/quantization_parameters.md create mode 100644 docs/requirements.txt create mode 100644 docs/source/_static/custom.css create mode 100644 docs/source/_static/overview_pquant.png create mode 100644 docs/source/_static/pquant.png create mode 100644 docs/source/conf.py create mode 100644 docs/source/faq.md create mode 100644 docs/source/getting_started.md create mode 100644 docs/source/index.rst create mode 100644 docs/source/install.md create mode 100644 docs/source/reference.md create mode 100644 docs/source/status.md delete mode 100644 mkdocs.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index ccb2687..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: ci -on: - push: - branches: - - master - - dev -permissions: - contents: write -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Configure Git Credentials - run: | - git config user.name github-actions[bot] - git config user.email 41898282+github-actions[bot]@users.noreply.github.com - - uses: actions/setup-python@v5 - with: - python-version: 3.x - - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV - - uses: actions/cache@v4 - with: - key: mkdocs-material-${{ env.cache_id }} - path: ~/.cache - restore-keys: | - mkdocs-material- - - run: pip install mkdocs-material - - run: mkdocs gh-deploy --force diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 0000000..ac7cc16 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,33 @@ +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + environment: + name: pypi + url: https://pypi.org/p/pquant-ml + runs-on: ubuntu-latest + permissions: + id-token: write + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Publish package + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/sphinx-build.yml b/.github/workflows/sphinx-build.yml new file mode 100644 index 0000000..3851f33 --- /dev/null +++ b/.github/workflows/sphinx-build.yml @@ -0,0 +1,59 @@ +name: Documentation + +on: + # Runs on pushes targeting the default branch + push: + branches: ["dev"] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + # Build job + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Pages + uses: actions/configure-pages@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + pip3 install ".[docs]" + - name: Build + run: | + cd docs + make html + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: 'docs/_build/html' + + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/README.md b/README.md index 1ffeaae..78a44e4 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,9 @@ ## Prune and Quantize ML models PQuant is a library for training compressed machine learning models, developed at CERN as part of the [Next Generation Triggers](https://nextgentriggers.web.cern.ch/t13/) project. +Installation via pip: ```pip install pquant-ml```. +To run the code, [HGQ2](https://github.com/calad0i/HGQ2) is also needed. + PQuant replaces the layers and activations it finds with a Compressed (in the case of layers) or Quantized (in the case of activations) variant. These automatically handle the quantization of the weights, biases and activations, and the pruning of the weights. Both PyTorch and TensorFlow models are supported. @@ -29,11 +32,6 @@ A description of the pruning methods and their hyperparameters can be found [her A description of the quantization parameters can be found [here](docs/quantization_parameters.md). -### Installation - -```pip install .``` for regular install, ```pip install -e .``` if you wish to install as a local editable package -To run the code, [HGQ2](https://github.com/calad0i/HGQ2) is also needed. For now it only has local install available, so download the repository and install it locally. - ### Authors - Roope Niemi (CERN) - Anastasiia Petrovych (CERN) diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..5647f38 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,21 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @sphinx-apidoc -f -o autodoc/ ../src/HGQ + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/_static/pquant.png b/docs/_static/pquant.png deleted file mode 100644 index 086778bb6aa3e53a792592eedb586a94ee657308..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27797 zcmeFYWmH_vx;5HBpb!qBZ%>ZeUJ z!#H}DNWz}!@Ga-3LN7&r(eS~3o6+(*lmiY_eG+q3>U~HJfh5hDx&!!rvmRgyA$_i# z*}|=Z^e^02PEJ!ZPhb{Q&CO2sC$ugtXn0aPujxg#f(yG0fJ@`s?@qeAxhC3volnmX z?Vu$URDKoi?xBYo=I;r<^1ARiKR@7XO=9bpBPLhzrwk`%+|F*+JiMZ! zqC8MO9zH%Uqy?9|kCO-7o6E_a;Q``r40%g;gqy94hpn>{_yH4c?)=h25&}Wyga6q+ zM;A4;#H(Xw=Re(8p= zRPeNP@?iLP2n)o&%DcRDbNI_03j~j)gQX+V)E!wX?|-WDiL#pJze+q%U~TK@@>eM& z+5gGX!`A9Q$ofxhd&v3Aoqul#viQGn|C9AUegDfCX{DwnChv@R`OrLNc}d7a{bCl* z2wMxWzb*wuEqVDYpdwr%g2GTP0U>^2E)f9{OD<6>At8Q#OLHp=5sQC=Qg(9pfIA^9 zAE1!n+_p#@WN|AvpRgbo+#G7fCBSRJ&m|%%V#S5v6BQER72*@*w)O zZ5`p(mOL&_)_-L@5H2R8sVoWM_@Qa8-1^#aHUwC?!Zth4WK49`fx%vOjdFU1~q%%lj;SV}R0{jJw^hHe0 z%@Xe6?55-F>>vqwfB-*~{8zRbGMp^n9&mZMhb0me$|ooW6%gYU)ZrBp<3(NwaX|UR zp#Ns?Y+-BV^Z#Z25IkUszq?$?)*V^D&tFM@N0hdu>)&^O-#Xa-6-r?6UqK-TNBr%A zJKWRq@BT(&{hfudfje1SB1eyZ$n~G;w*M0;i11tT2?`-BxCHqvM3HI|LiQh@6(1Me zN*F3&Au4PoDlGCB3;#lQcee8Ig1cGDSR;8va)p%VUtED%|LQ2V|7`7LWBJevNW!?F z0$fmD9bR5BK>;xVk$(`z^MBq&OSpw7p8!&w=At4<7p?d#xkN;eIu*7M5wH@mv^3{K z{0{{Fueyko0NVlmE>w z@{92ciSdd4-*b^q#1anYgNkxl2_ZXBz*6vG0JIW8>J}j^%E!xZDZ-2R&(ix}>Bawu z?EgWi5AFw9k+$OG;lesepDQjjUwE=tec0RUW*hd&g++xHa6L`)B5H3iHaARz$(vG6QQ zH2?qxD9g*}c+c+z__Q-xzC3pyGx{EIy(I2+4yUBBh(;Kwl4`oDqyiMxHTuVPl`uShuWtDul3LWm7UnY}D7~*6_7Y8hKCLWC@)* zwJYy0#(ODx_7yB-oc_3brQP>8Q; zE@ME--tFRG%G2qGFM4z}Vk8U-nK-DMld1;*&~{(eV=>eEVtFn4*Xh*p8#Mf6rFQEF zeQ|1xe(ALj%+#dx(|818KJ030;l1wmJXpLuC-3?-u;qC|uR5B5C>4%i&~xbiwNX~> zbSBn*L36Kq9fbyt#hvGY(I<&ZTDc_x%$iu4e7Zm@?}B`2I~{@Y%|}M2e|jT|+K(b| zO%?;1es~U$2i{r=)q<%MEaX`krK*F^=KM_ zP_K1ybgY|^?di8zyJ!laL!O>mp?G>Ug*Ac-N8WS<3bl)f!MKZ?LL@;gNpv>6a-bxB z27MqiSU)~~e|)b!oGjNu@e`iK+-egLW(!OMr7|nh#9~p-vBG%i5weQs{PBJowm_mO z34N7-PovQ4xaBAIql=4~0dRRQHfTKP-Pc})Ho&|@xZ^_8=RgojH$^nz{f}uCV7$|n zvcuH?|0E@q+}y5Aa(5D{40sd_5Dr@J+m>{x3Rsb^o~RRQvEN!K@;ngJ$7()}q}s2P z$Ag4GRwi=p(aXi}fZKDnNuR-kFkS{Kqr{dHHvjvMLs>Xbwp-G)182s66#>~7B9J}+ zQU@#^tBFC6j9*`KQv!+T?Hk@+*E+vHPV>pmG14nYDF5VC4M82iQRteLh62nLjd+#p ziE0*-LvpUtOb5oht^`%MtG({NsuWyw2aLHkM>%zOdR}g9EgfwN%MKsxIa(#PQO07w z%f5AxxPG6(^GEdJ{z$0hG~h5YL`E@uUq6si-}urJ3R0K2cl@F)WY)^w!(DI+Hx>Hk zQ}!UaVEq86PyX4(0GB3mf_yYw5U}Y$j~2o zoF)XF?`~e`0T=}P$55xGg&vHdwALQSEc~bk?0cS?je7xfU))$swGfv+XtA``*Kfvy zhKxiH4HgUvlNfiJ<$(Ygetz5IcG~ru*tn<7FV6d^NnMXEzZmefeHk_SrN-}2jX<)j zM;z7W8=q&mJ#WsWYhnN^LEM)g{dW_Ba1ahAWzoT!jq#p4%ckJcACYgeP$+vAr@2Hf zA60eNX!u=)f(BWZ{%o0BS@YIg;m3pUUM2ken9)fZiv@3lc6liMYc@V0F6N}Zl zfKKR@TZ^agtk=fz)Db1!S>kcmRg<&v5r5b3O0Umh%PrXK;{ZJy!iM9IrjnRLlDCu| zeUJ2q1rLMhiur+TDlXX!&Q0)o#pkx`WqLoomQI&k77q@Lt1c&Y?^k*s?am-(!i=hS z#f33m6hMXS9(Qe6-TX*VClV9@GDp67BiqG#^kljHpy7jxj>BU=kLg{nab_y)ZVD7u zlxf|_eADdu91#E-s%8uf6!DsRYpxH=ono~Vm64Pk{j^7#iis5!2!ssgQ3)+puU7YI z1d6aoe{-7qv3yB}FY{dY)WmDIZ5jL7a~HlhkehznA}-&>h5P-0+&uVhvwRZU;PJTM zQfO6H>snOiO^+kJZ_NdJtN4DfutIo@cwqIm^^(^Y!P$U)VL5|gRgnWGYZO4^QUhh1 zb2}LjJF(@QsZu(hmLBw@;jF8w|D3aqN$29W2M&P1ctt_P{BCLF0M_e)2*6{^Bi8-G z!+IB=Y;jRG-?I+UZbBR@786hFp{h6&U5u3;PNMh+y^4S8qNQvYM}v5*s*oXx67#x)>yKB}5hvrYqhCKoxVn z9Qq`ime@;w$XZ(f?FkZ8C5S^%dZKG)=({T`7F@V*0%*fSbRwXe2w=1{J z2P3BGx>3}ji0m5+;^c(Ks3(a^{_Pw2wXky8-c6X&%ws=J*?@#s0k;Su1YlK@?;_V> zk@I$+9Lj&UNwT%D-11V!Atwi)BXPbB6#S7WVu4;BV61n4E%OP@7vxhIGTh@Q9Uc54 z+JVk#?vIPc5_m}h6`i1`oqCo6i$niwJ%O^#agp)sAi4EgrIMPLe?SzU#HS~1@%cNc z>^vwV2h-s;bA0hM0eA#6$otctsd{&bE ze1b;$;TL&QY5xJ0b1*2pUIwg*MiK0K(cS#RXtDBIDK^*e-I0mNS7NUE>X^kab+Y%s zJaN$$Zyb{M^8M_kEiLG#*QL1k`$5LgjE>8y_48K&!yEpSZ8HZ=rV zR5BZ)vH9|4X+0D@i~YdPd!EC(XOXjqIEr#{4%EgSo9$Jm=Z9N3fHA%oe#Zl|XJ=Kd z#JQ5^pGk5;wF6IeZ4&UpGAHdewl8qs-@6wH#$E7M*RtEiy~X&jOK~02j!+8B=#0Wr z@z~#^$AodOW6nij1JXd-`-FK-YxJ{UTCp367 z<&n3B>YTMl0Vj{hdCyPlv91(an$a|=wJ=@Vm+k8j#@P> z%cXB|we=|DZ#Rv+{Yta|w?ykg^BIB*$Hlb7bnF63O?afr59E8tIEaUFJ%uy+LzIVk zW+e70y%(o)LgFdQgdH+7P>J~25xH1n%FyWT%BX;or!6PS9`d!-!gW=y>IrY4jaFZX z31D;RyVT$ElLz|Lbo*g`I8J7|)2}k<+$g>_a)hEkA+MXBm_RolZ3(et3a+_uUAjf+ zb2q3j1irg;kwagaVuN8dN87muFaMFada@ei@xtrzVh512>AVtc_=V(;oN``J!|yf; zCUM5g>If`O09)_Wn2+d#6q`l6?F&v#@MIJzHkry30m*#8;;3U90J#A zL+jip-0N7at!pl8$LEQDJd#(op~ZNn%j7#Kg$;aH%c4dD2=VX42N?a5u z4Uv(d+L3)vnGaivo`3Y4jYlEn?LYg{ZO|S^^B%XUP21pfQU=f{u^qJZ{YOUzFCa?i zN@0wf@Y9O-x|Ax&4POkEkgcZHOxMqYR7ICEnz~UDD*z)-@^{?=oiBcjsNF<7J!bMMK^rFVaBlu!+9AZt5Rcc)R+tc<^5)&YXXqR z0-*dPfT?=JW2g|Nmm?l=-HK6vkp~2NkoQ>Jm%-)x%v;^osJ(R(jlx~0c6GxFAsFfh z!TEi9Rdw~%(U~qR^s7AHvd&!0w-?vUT#&UPJ>$)0e0<1{zTfu6bLEjQ(9ig|x{_GA7Mvj_pJLXYaK&(^PyC)@> z`-!`qTy*|dDR~JiG*2oYJ*Mg_>+*jT=sl$dCaTZO)Z^@TB?QB6+D*E;7_qiTQr5>f z8g_oPX}rsN(~`eE_VZTyg1Q=^R_KjQkD41l`;6^~CiNw;m9x1%>w@yhRY83GH-J?m zXhad<^Uc5%)oWnQR;nY{%AN==kI~Ks;fT=ZhKSNv2t|xK-6HlL{SWuj#J6z(-G7D z>H8a0n;|*x-NK=e$czZg>_yqM5Z~L_w6e;ZvNQW1y>-Gf(Ybq`8LAq+iTf_W-%u#V zG;L@Af(qTS9}>HdH3H^CRnDcCE(tEj+U`;2DYnX?uO4SLElkrFGL}ZUVzT)}XOaib ztgqJxFe!T+6x!x3=})gp_z!6U3ERvh(&R7$$h&@?Sv%j)>w411$P9+R`O#Lr zzvKrT49=X~!81PEk-kmQe0h{0zZzVjf7iuh?r~<91X{W8bnmg}U9~?Bo{r;x909Q@ zqK=8)-0w`*Wg!}MT5j1sjy2(FUqVX{Q#TUr{>G2OW=s5hLWX?cY+X*dMqH*LdZ@bI zDhwDCUip^P%=Eds&mOAhWq`*TFFH}ocvY22#ED-hsA-{MXbGkGJPDtx{+msY;_lF% zV!27%xh3GJbNSl12Dgv*EWYPEa<^ES9w7$ILN@$vrc}_YU?Xp!Q1&S6|a<4I@7$Rt^E*Y()svKYn3e46~ z2r<7FRL|2pV6eJ3e2> zxAYSFZTUyP_>HaqpZ3#_X~-jmL3pciszS(e*ER{hs12bJTS3LgXQOMkvQ79l%tVo_ zP8Zx8q`}V+U!fWnp%Jo~|lIj1} zJ?)S%8{HR)OKgz1lNhj_{A4wJ*@vc{xA``VE_f2->+a8N6>c|3VdCnZ6H<>bGX2W1 z0^S*|{GUH3(lQir;)*nDM)=G?aVr@1i6X=mkUOWH(R%NY56`w#rR_L*Flo|F}Cwvi_oKA?MHgt?>KH zcL*V@wt$6cN3NETXqQnA)3}_#yVK)+!dt4wz@1ZW^qSW@VQer_!7M-@DKvUOTL4#u z=yfDh12pBN;Na@8W25b4($v2*OlI$ve^68(l39v^aS?fYNq2vlZ@NFM9mTXJ5x)2u z^;y_UA6Z#4-*}ns_9<-KQE3Z#n3O>M6K4W1>wW85g|jyw*sCF-5CTH~`5~zy^aub$ z6iws$aR4HMWsPXhnhjNm+K*G+d!9|;e2OJvTz_;pGXr4gP*(4z-U|a*5rL+id{Gm{ zFWG*l6lK9dzRXbmOu%X@gCmy_#sIKDBs!NH{lIE}8eNC-TFh$wDZTwhE>kPpre3m7XlpOWVF?SaYAl8z*?>@YONy2(qlXkb;eCBbAfD!%5UjDIraf?2NVqbT# zUarIt=$+91B$dn85!0AC@^&okwKa;zl;|-baEy3GUeO3n?OZ*oZ+3aY;!*bZn^nC| z#el{0<2aY^1sP9~o0+2a74!xc-+@wH;hWLruHBESa|({~-OnSJev?`i7P)?7E)(!r zRWXTQjh~`d2>D8}{uF-pOAz<>!tcAYmgz z1ZHZ5|GXP0{c?X5a3zkL`L*9vByViI@CwwgQcynk6$BGnmV$ACAcN{N)3S-hhG9p)nfEG)?u@NL(SN<7Gw?q zkL~whbs7QZDJL=G#_=U@u&G-&HAlxr;RCaLs^jWwzrHME4V#v`J}Z6^ff4etLARvb z6_6n4nLGQUD$aTF@?|&0V*5s0<%e-0`vF&fLsAr?5#8zFEnEFC!hsKmougrgU!{A1Yx)86-|CK_VvSx zI~QYLBE_oEgUzje_154&(e~Op-;KHaoIfL-aXMYlx}Aj&DQ*w2%`OSDt$OitBfFX4 zoK$`MC7seCg79hUJmz~DSu#7qhR2u_vptUFT{rscBrMcuC{-51zr$n3zUW38-EGEiBCyv3PXDx#c%CW0B;bXJ2 z`G?xO1W~l(dv3%kZyuS~|CURN&sCAtCjmTCaX}?wA70F+9PH_rc|4}T4>zo4{L~>$ zCoN+?Y41;ZE2?&_kH8F%na%{4h?BEF*ri(u8tVhv`#oz8aNB|9d*@H}}v z=~pOPI&+8~LRN)}|8ent_9Iz@%#G$7M!}=~jaVuq6*5jwTev^Thtf7Ch#Hxjaq0d5 zJ8tE8(}iBk&_G;maDCF)L7BC)3>kcq!fT2LJzs3`xT>&Om3AISWf0{&!3AF(GouQg zBZyIc`*Hg>arBN)RFh1mZ%3I1u)2_k7KV{4qJrXG*H(KSaEoSm#(GBg-q=boPWM#L zv>#0ZK&o(XfxK%5Q3>M;?Bq}@%>JQKJz*%LqwwlGM|rbM%`aozcY`Izxd)ugpBjvp zj8jZfbP8T2Id>7}Ty0~bXAxLgSyekYh$=owhkJTHo~q*)`F+%vo7y{8617MH4O4BX zaquSPubX1^2+^CCEl^=igU?O4=@j;+>6^yLW3e7=8%_ij7e`&boZ&sZK!xe!lT_D) zb#D%O_?RTBg3$_u->p-|kj$XX9*r8VqgI72;OV4K3VC$MbnC??@`leRR+slgf zb82SV%mla}9}!TVC_SRUY&e@aM8?4tj~V07|OuxAgp22sHM4^2Zkk}y?t zT1vU}rsI?fZbMS035nP0z;fCt10kJ|G>no(E(v@J{ILb_y`eIt=Q+Q%Z+sN(Q*WV9 zM3h<9BKCHVB_EL-0zQMP4_Nders-VOuU}&e$RD5fiv=HJTJ05g$b2G1^JqHDf2>J- z)wMonz3RAiZ~O&2hPSwW@U37Ok8z#V{>WRb+#0m`!Ned)VWT68TP@BvU=r_2eq8y! zVz(a~zc5CE3=y+tC19-l93omz=o9}wA{4bM?E^Ul8ii!_pC3u;Yr##NOgrI|U%0JJ zEqpn?1H^fLQ>GPu!hRBYne=-7I>zhi)ZOHJX zG)wRB=JCrG^x1}K>zQx2*h%dOu^WX5a%*k}KWB;e5}0T!VNAHeo0l1})iI3oJN?;DCP@w0Sd4@XEt?-wSGAzyyf}yjo_(PaPX1O5F&s2LzNV? zhp-+3e$z`+bv6mURl}K>MS=4-Er#n5z3T{OE2v{%8;@Ph^gZW)9glYNHGM8zH;VB~ zlmV{+A#vfxOSD{6&E{wOu5fMOjfn9IaX!Y!QyLP2sw4zTYhLB2?XDGp8`4&nL<|6~ zReGt1z(eD|)3)z&^O{fCHbBK^28M^B?2hHpC0M1U^5V`Y<}RP=a-8hXd~o?BvF)0Y z9z8N-oG3I_L5b%jSgd;yEG9lyfEM<%ZK4~X8;2%u_JuZ1vQM2eHc7qWTuxMZxQ0Yj z?9FK8&bRbt6GPJv_Yj-@bEcQh4kABecvrI<)wtIP9Yq~SR5aT2D+=wk&cJAeBF!&h!bN1w z*KhFH2m!X02UxHYx)Q93bHt)R_D2p4E!zuUixvFv308& zKACL!FnM(I`!4dbHaSOC*RD;Agu%>+lX0yg#;{qwkq4P`5Q^S>7SCy;<d1O@(%sg@HsV96DJ z2Hv&w3^&=-LPf1jN40F|kobrVsTq9IK;13&rbz0Jv73l?zJbO*y-Kl*GigiobEyqA z41t+_#>~8z!o~c?b9b&M7cF+~@9x553NpocOHYd$y z6d<+I=SieVDbb`!K%zNu;Lh>AS7`dr)^$+@q{{X5YSEBZ+=6sOh@gc&UHRGaz*#<% zdX!pqN3H3Kdrs#~=y&z46ZJ*PJJ0=@${!5N8rtYK4zCP5&C%nPB#W6C&2lU^OQyf( zlYO&`5`Eqe+P|n4%*5XqS2vLZ_b>okhaeje}JAbBlM zwHG2+GUaF7Ku>ePf^C+p-uj!R!Ivj)@gasJ?FzS}TrC1?z6;53feaZwsL&NHzZCWN zfRN4xyyn~t-((T$CoE~c9+w|SN#tK*;2Gba-^)aiDTreIcEf+19A>n>^N14LRtW%+ zdv_d+rxc87lT$u8h$7JVL=7K*0W*OsibfuqTD;hdKKt(d3o%q8nvxoBjSJ&|K+LFj zrxdkl#IsUwqH(CM zz5*T4E(gpn-2^_y%dL6h*c5b1#V08hQaaN&5u3((Es4iPCD-=7SNHG=)4_Yg`9p&_ zdV1A!T;tz0h|{cDpW`Qe=R-ieghaF53nyYdN)c^nZ&E3tcB)wc^tx3W0e@xABi-`j zg*{3e^BWPfl+X#x5E|A@7aad5W-dg7A+hJehW<7=mA2B{+4iFTjE7kAs@+VXDX%}K zz1fTbM6Sb~g|{AJ{E6eti$5rPH@#b0k@#5QLb%!Yrlx(oL-*g>x2{s``DKusb6!8g zv0ZBvgu5-ys3eM#%O78w-Q8$h#jmtM-C`(?O-874_0BCZiDARG08LIpW^8#8 z-}Dt6`UIDPemv|)6v?UvSO!Og8>oR+p^|c_0;Pa8^|-&}^y=vwS2NnSn;em`&kTSL*}R#i zvAlABl~*CWu!0m-apNK<5EaCMhk@Oc5<5A#Vp=q59F9L&M8N414qS&61M2ZKYs{QV zF<-t4<;IIf*VQme&q)^fIYdt)xEh2WuMzM3LNqsG;(-DCI(%-Y+5{BRHm$cPVR{Sj z&bB&^Y{q;#MgVdb_8q16Ci<`do_mYOI}Ot2<3VeH#|w1?-L5+!%||hut78&$)4#?> z9n#CS$#<|~+iSKk)B!+$3r+}g8eRxkJDw!Ri%-dhpJHOr?04}q z6s)Stj%6nZk{^u`tl`vJc$z4x zX?t>Y-Ga6|i7cA7OtpD>rNn59k`_D|YTE4G0PZW7P;@0!M%2LMciVjNJ+-QNa^0N2 zE+g^W?r4_im#~|28nyvgvcXd1V1T&Sp#yTHy)Mc+-#qP6EiPqG^5+}a)=*zv8h;T> zCGDN3laf-*%+if|v*PI7^I0f@?;V-UU^3&nij{f4(Dk1y33>u_5H=B<^yCPTgZ?OH z_T|d6uW2Jn8{(>b@p2M?pJaDt`X*1!*%J<@3pAe-(NiRyVL!)Ve(JbM_nMIbkb(OV zM@Y7oz~&?W*9Vno_}!%BwA)->ouuS|t+s?%n_iFt6!h{J&>TJ&5ctF;SqnKEJUShwOi!OiCJG*^?94b9mDAVS@pC{lpfGlc(HG@k#fG3sw zL{CLtD&w25;<$$gy#8qgtaV)22 z11Dt5ruJj_&L`=-UV7VFt^1kF4;G$g?HCNL$Xy>zFaYi~zdye^v>26T_>4L}nXUf^ zPwKrYWX*{D5D%6Gn2#=6S(y0Y+{~9dmd1|utX58ZB`F}v48KCDk_m+zcWp4XfT|5k ziLE{YwfW62R8(BvTgH|`L(b6LXWgL^BM-IFkp-G%>29X zu20f6Ydn)b34IzD3OYU$#N_gH*EhEK#FdYdFWUQOcNH?Bc>F#w$1B7BqbajP%5B8X z2u1+&H4NrPClu%oDUBm(@z|lHj1#GTx-n90Hww-KBn~uK%AZ+P+h3GAFuroS(SHak zFA$)NXbcmUu=_I) z^FfWSeVRCFv)#nd`403IrK4k9uBYB8lpGWvdSW5FOa)Jf76pFx3e$dG-ox5toZpw!5Vfq5co!@E@wi%tXvT<&!A%V%1_f1J z!~K}%qqqL7li!Y5+YUU*B|5XS6L6%JV%|3?DOdLek%5KGw@qlT!9qM`;>av zcP-#7J>!EEI|Vl8yk%rE^32+%$uv+3e^eSEHd}v;E1+Y8m8em7vCfLhT$Tu|B4nY^ z9v;qN=x~M|O@v99HohJ3ijuy%qPsyR>k~_Vy`0@zH&%D$YiI>F<#T=b`^sS6#$SLW zbTW`Oy{mT2oR5L>EsjEUEplkv7YF8Y8XG)q(4x`*zIsTHF8hec)%0QrP{S0_2WInH zssWt!;s%R30`S%Zurc(@a?s3P3_C8a!Ht$RR&Xsa6*He z^>H_!`Vlb@{PD;koGDICW+GK+Il&U~-I9&|)nELuoepqd={O+NRMq(KeJ2>p=2fD( z|CSRc73E-u@Dex{`*w$NouLnGzWoV%@v@YhzTg}2uh%rH${Ex}0lvfv(#o3ZiHO~b zCg5cCXDskzapzW1%7G2vV^B{wUkRQ~3~W7eIY81d3(iS*U{=hT8yS@sM_ecTBf6zb zn*8P!uhZKkFZRxOwwlXX#yqvps=w90Ol38EU{i0=%}GQL2iZVuOmlQ(aTK(Rl2SbUN%iRBzb(Fik)Y&7|j_l>7O2jUxAbC%z~9H?iKfXPQmR;pSOSMK8z3d^$x`w z+m)U%O0T;O*TpxcQbw#1Z*Y$qPJ8HcNJ&HKJq0qBAKms6L8<`}TlZ~5^H_GGp6{>Z zR%$i?jXc>}&9x@1*DZA?C2@a@-`eR8VZm^io_6b?J!a}HN`_?F5PUV;%GFjdZ?Gnn zd&dq#yqdSMp?G1m={d7z;r@HLol)tt#ABf!Ft2q5UlxL0jbJe7g31|gUF zlY|KwCrk7|@ zc}PQzZQO+L_8>GIhf1ioRhCDaW?y}6y;^Cp%QEdF9?Sv5?GT=*Dk-xkd_*@AV@B~P zF2zo@D@W&S5ezEwQTjv*&acnu3iL6dpF2gDWgtxwP#ge887uTQ@oA%Kv7_=?Wd>F$ zqabUl*e=L7v^^86-1*2Oo!()->qM*p&dQckF3#E&Y-Doq+W)h0x z++0nC1X4MjP?I@kMH`Y+ODKTS0Rj;28x0r?)h^Zg5PVbcc!=6U-+ZIiIYjYL(g1l1 zZ#XjeFuUEoMRN%-bTY_IMA=SfWO*ulH^hl6$%jppPCc}Xro{M*VuopJD3}r}{^O$m zxcE`rtQbH8zL~W^)mijhcc4!r=;DV2xSOHD1wMN1fOL;$=77eY$OST?4!gQ zf6fq__4{ORdHp;8Q`3iWvjcH|b{w~!ZQm79^2H0)hpIxi3}(32JT>0!9(cQPI^k~D zFsVl@>pZ@CswUmn_n!5|K(+A59@>GA2=%X3?2P3PKgDP>!u04cqk4d;CGM0-7>L7< z(ZTs~pJ*0@xJ~`G+fk_HUyJavoG5GUViJK;^P$h54}F7B`4jQRK80EWEr}13qQ5h^ zsKMq%aR}LclR?bjWjQtJhl33GLgMv7A6mu>V8woz%BUHNm4?q%uk*y?H*PPTYUo>* z6@;BMhnsN(^t;{+J~K{Lax9?nQGN@<+pj;FEkkL^Mh%isXq+NAFs)juyAw8zJJXPy z%hq_F-{3%*%fVUy2L(2MCi^X^Ze=1JtN~Po7!vMravDuEyMv7?yHhc6yzmFdvGA0N zA1R&<9q)v8S;rPX42nj(jy^=tPl@*B=IZIzaiPJqW(crS5D1TSAyP)jj@Z9Q20q_d>U^EdESR# zZ{`fumZg`=llHPHNQWzKO#r`DLZ?Z;x$>c>K9X^LVVNwva&zsp*> zntVPvWB*zwYIjwZ4#-NgF++%(AHG23aX0k=7jlHMi`BqOVcb&s7p)vaLzAERXDYTB z>#CdiDjx5IznDF5WVxCvnRiw*VW@~hOS6)wMu+waGEplBcgRaZIi=#bh@O(DxMc@m zjYIr_R`Kt64G<>-gE_gznIe@FPvd<#pd@WGg9KQPjHyB)V%5q3U-!DdS5K#RI~~wL zGmC+zs(Xte@s0=wDaidD!_%MF8;`NU&LE&p{rfsN^Ud!~*) z3(7H!wV~=2RaYKn2dv~%z;`xpNaKJIg5dU&7WroS$=FfoO!MWg{s>K#H+^Y-Fj>(V zNB3`QDG45&qhI~mR@4xhDmidX*sGt)lHEsz2-iQjroaNu=%3U zz_>b1^8+>7et$iS(fmfp)2VcDzc3TVPV?3Y7Ak?3DD{FR`aO25ADr-I$6X`YE5`Rv zTrl?1oAQ7ZD(?P=%I{+nmC5pjFkF6hsv;FyhlP_t+-JQwKO)@cJ zuARtSvY!)vsLwp0T2pkcA3xlJ$713ppZ{XiqNSH5l2`+Z3g9lz-SmyIACb z+GKyvXVdd?b|1>h13Q{SJYRY)u825vWox61gWnBU6almY4#=6nAL;IPcrlpps<=i+ zUHzN-AWG#FPehxlGmC%9d(P(2DZmn8toSb1t zrXrCP_q_rP4iYDQDP8TSr+xOAqkbZGboYS2 zQqM~KieePVpd+4LMLH|iPBqK6is3f@)iifA@{{Qvzb;&xQ}h5oOD2hA>_HQ?G03@fOD_#w={2)~ zn|NnGTU+3edj76qqW=sUUd4xWlh*j*v@Z^((gP*Et5KecqP#Iw))gelGEnu1%ou3L zCBfodLRH*0@$)kD5BRFj%!Zn}9N(qNP9inAUl)fMWTo(3cj5#v8ek-tk#^J;$okFQ4^Hm`1@>=5} zFE^~)Z2x^$N3up$by=bBza`dlOIgMIC9TiI%6W5qygW<@>&$7bY_nPHq@^Y>LNzobS zja?aU-IWBScwK)8yUyWeW14%0!S?Kzn-RWUbKX>#E=fkubd|}o3E@BF=HI~PA@=uIt z2%!aA_cN~-c2m(@VAhy#jpD)|@f2k95FC6Q;uQ5n?^f$G-!r}`ptR>@?a2#&5~HaU zi#tnZi9$(|7WlHpRFW#(t{=>>y8bD6zI*|&pXS#}V?<1y&xxD}Dh znocg}UjRKzhquR=qk7b#ljJwZ?=n2M7)k%Y_n41AVkzL_gV)-3u(^(5Uvp;=u%lgR z`dG9+=^IS`H7Q55x9sERqc%N+TM%<+YFJUG%j&y3vUk~+s&VQ3^v+E(@_)9+a|qsM zdwx+}_W%-wOKQUei1NuN*EXrtP*S1~tz3oR+|^wmy~GbA<$5z1JG0LSotqljmEAHx1d>%uiD6^p3$oXMghsNXgx3< zK>0grfCtg%yb6Ngz=w6QICCOaCT<~BoR56haj{|4D(UQIFT5aMqp<`T#^ zi`v9MtVtWZjEI;3LTMhY5f*Z$%(oKNB43l*>>Bqw*N5Wy(Qchcko*6O@;Y;-#DKNS zqmwH&-60eZ#@?;b4*{4&A&t*Ir;ix7*$=0c^5}4>f~WRu>=}ZT0FyI~z^C5~h(%kCt^K_Id-um{ z>Wi$~>QWscv6kgepA-e@YvQW%Hz ze|>%nq+>Sod<+kD0^>3rMgSH~46st_8`&br=gg=M$`r!+&lbz=6;rRm&B)*SQ98MK z7;o?rG?q#xi5=!B=NZb&zOPGHqoHmtqMpv{R+qi$u70Rj8~~~&ulP+Jq)}86V>TE` zxeByQ5dn-jy}yY=K**sC|POWCgyxZ}N$eW>ZmLcEDD zo10)= zSMzP<9(tuJzMcR`t(b4O=6mr|c2j9SZmgHY87us08K4%F!f`#2?L#^7E9E1JCKj|q zRKn%dt3Nu#TrtQO2i>~)HkJd;%tjSRcDfNaY;|_^?5a(u_?sQ(-Z#(EYEM*1nnBH1 z*&>pdAD1z?s4hc(%Ms$sr?0I2%$~KhOMk?MS@5AQ`Tg}kd|3eGR+Ya?X7h2qH8q$* z7L_+Jn5EZg%iSj>+VjtI03%tkO51)+eyzF(Fnn%ADwNLT9CYC|cuot=3wdh4V9T0Z z!2&FPgd3M4>k1}7m(iFp`xsH(P$p+PReGzad`MBoXxEo_E0Qk9QDdO2p1rr$jK}S( zIhaz6Tx}Vci#Ix^&;_2AZI6w0+a#DfjhzhRPuN*b7$ThP9xvL_YsZt{9p^|vh<36M z7g^u&Tyq*SFt8RCcFWNm9bv^|O>26z%yv1E}aVD#rWZMgANs>>2pH?U?gK} zd)^fPg59fm+{t>=$@=10*3?VRK#nTso_uWw&>L2-QahkO>4q{7Oi#I*2B&`SWrLMj z7Bsa_)vN~p6w3!h)BSvOKNw<6@jNcLf+GBRl(L%i2a)UXuz1h*49?2`r<1d6ivrrB z@X!Mc-3`)1gLFv807D59f~0hJ4<+45NOwz1j&y@aBOTJ+A#u6S{Rj8_f%EC?z0ca~ zUD7i`Fq%(GPrn5|UOuczu@1OQQiY4d98P#f)1|8C=-0+eOX{}hPeZH-6x1n7%LVO% zKa%iB#sOT1^h17?g)&ip6YNSxtR`j}ffecdJCFDE6H!Sa0zlCIFl9W&opJn_gC31k zEsbTpQpZ;7jm+QKn8iKjH%!3^^IiN82!&&z#DAo+obck&4I_v}&mK_&bLDlpdvX?! zeEu0mrR={7mjW%K_truKU;bpwC%DGJhOS#EkaXh3;@{Tdm~ET4i48FggczPsKK9xn zdqZh&>b&5o9K(yTFEla+YoEHD#(n}q`z7vxCBnb4%I7}4sZJcbjy{&wP+HJ}iEkRI zs%<3iKGwflkWHd6UY(2>h5RRe{|r7tWayX$C0Q8p^93SiF7NKou7wWQBoJ+y>@pp- zyMe!RGy6%5o*?PxhIqf-pccA#I-iI4oOj960U4C9UCRs!W^mDUzj{NPeC#_VjxTsM zBuB?mbtV)eqq)2F^?)INj~t==bw8>W?EKMZ3q{~aifMh*Swhp5YfZVf5elPv?xgEI`JpLNXo6 z@5@Ks3>LKY>z@)fT#SpI3F7rB1Wc!=p?cigKD(FDuLlo?bQNb{`C>9s!h_O(yeJzf zNZsUed2n-i+e*K~`BqOLL%!=+;jqo@HwC}Pf~8pT<6(*8>#OpcrH3I*X;21Ixpv?7 zlLn}P1tmEd)wLnm=mWQ)CAX-VaaK|DCx(4wn$JB$!Zp2Qo)eS%f~!)Nk4|Xfrzd10 z+ikvTG(O!mtvgg|JZHRSMizsie_3SdYc)Wk3=%@r^yy{D29Rzzijx}6f+pe;zlnm} zf;Lk<9bs{QD8Qzf1YT39@NRX|3w_@yNfD7!#Te%D`N!)Y4D0B(!xR)-R8_z=PLfB8 z&Ssd8;P;8x=GLxEwiE*Oore+I29dJiQI>ScfuT36h(a80uS$>_GgHM2w+2VkIg&8L zY7XTB~PAdNQjP0^F4i;g5f$rmr$da1S?)oj9_8FQq!RvswszGXZOm+lD7F748F&&8LBk9g@z-HBec>0F(gm9dK8qwNj;7&1ogq}z#Q|q*1X(4`CO;1h3a)zh zVhs&R_?Z+`*V8$ad}Pk_Eo)FQ8Qc+-!@~MA+@a?YV%j!(72Oa<2A<%eHG|4%=ggem z$oxi*Bg~cbV_KdLJ1@tHA*6BTO}<>? ziQe0(9SNm3Ngws75y>cQZ4nnpnDO^$lM?M9nscAgFb+7Bu-Tk%bFT{zcWr04+aVY>N=j(%zB!{d@%d7VuQlD#SE0g-o_xC24JW^7>e#-v8x%9nBo zF6q0wtJ1pTEgFz)flwcP^%8Tv`LBD81ZINf^sTqlZ(#0Cd$+BdL-ch|&J-pO{)&^~ zCSUfiCnbF7MHJo(A{qbyWC8!p1#p0s+buO(A+ey6Nv-a`#Wr@#Z!{>gQ zV6~60`(e1fbd07!YYBI5B)e%A&arAdWjCAiUtD^lA5*W^?gpMJ-7kG)J^{5Bq%Iph zRza{B<`M=vkKj9n8LC9-Oz6BypGeF!Djx$62q%;V0^nSmvn_q{@#f4IS!bDaWN^IZ z(icKxDJ?}HU_;G$TZAp@hu0MqY+M@Z8!M?&5yL=)h5i-g%Ql{!-|=DXb5SxKlqnWv z@7C9Az3uP}9{9|RsR-$~ZRNq;WW(9Hy2|STY$l+gw^nk);J}eu-}MfR?-BNpVZ;PR zC9Rm59!rX(fyf^L`C5zVX}At5yrF5VdNE3OYa{DK9rTwR8tdB(ZcGKg#|TPpbdmY2 z_SjMv*W;X?X~)QneaXZ_Jgj@a+y0DWi}b{YYv>EAn~=G5qe)&k~x zzRyyrMaLVPn&G$4{S@lr`#Gq<&79?onsb$-WA9M+B@lNmhHFG`7_w}`y)i1 z;bO7}{KN2$d`{1cMh~jO;pJFaj(&xLt=NQ_v(oy3Ne55t4{+E?4Mp-WEKEVnYzm0- zl~xA88s^}6l80J`Q$X0YQ=`pR>5jJw1QWsXB4+jb@MCohco?0a-i#V zFn8H5a3RB3N|?EyQHV(tfP6njDDk2Y$O6w6O=SI22KxLNmG6W9@Q620dwn@vFp3I^ zr!_ZA<qyoTGrXm);+ufmji@hE>;YX}$w-x{=_1EMPjh;KJ)%GVl z$ojdJnpxAbJIe*7<`L9c9Mx@C4-$9ClXj|6In$9>TgX9qHk9z9InKu76qU&Q~=kns3WCkkRX&8E6Xw~gkeag8t zU-kH7X?~D=>UjvBJFL`nIwgiKdcs?FAC($Yoc0dgCF4{DOx`#pAj=v)%}op{69*CR12#{x$VqIg(j5;56+tEAT0;54-!elQ(e(h6L* zuJPEi)nz?RA;muHJeHkdcQ#^<FTuWCnlsAYKJ!!HS*rDKoq%dUpkp# zf_bfwfZW}$y#_gS@&mCZzPcN6e!H5Tr%5XIKmu}ta<@?0>HIEg|3(lskO*z>0kL?S zS1Ce5^DEuUPrE-ZRU$XYjUqA$1@T73QYvLp_-yfuu<~qC=lSa9h?CoHFcMGUD~H)F zDrh!M^GsH>hQ>jGbiJcKK8`?Zj)q%#`bv|r`y>{Qb{Q3nq$A_SLkQ?TT=Ac$SjZz1 ze;GNnZ?h;rHcfR~RP8!Q6nO9faA(^nM}8Nl)BYPd?GUmTnrqqs%-Og;LHmY)xTlC@ zA%ZN)%h^Fn$Cw2RZQZNsZ#m>w`b!Il%Im>Iv5wWcAwfpJKF2Zx?u>d84()h z(o*J-Mx(=%r|v}Wye#J8B5&@z9%1|Y@W)zVt7&k&0c7ca-+AWR>GP}S)OTqK=b-7_ zY!Z`pEYa!>zIpDSY7q#oD5Ry_`6e_PLq$BCp#;|kpM|r6TLnoPI69-_>;o`sYTqcT4Aq`o zDO4=@w|v$zK_w$AC3qMX=OaQNEmk|}%~9Q_HkZ|nQ4nj|`BGKJ5RWj)>XL%up*f?jLcp$T)T15mBPxGg0x2W=F`%r1 zwhwAGQ%pbb`i>=={yE-%Pq9E}39De~kyPegjFQkHA`LZMd*YN&Gsq~9iIr(crXn9! z5v32(H5+#EU_k}!p`G@oW;&+$^@Cp%Z_?Xr`D9fjJNQr1XEC-A=Qr|<80u0 zVY(m`Go>uL+*OW!6|qkE18)Kj=?LxT+0S_a&wG^_@%8=Tll zSlx!gSz8cJo#wfa-HaZ=RsO4FlD@!{?Su;Ca?n!ei@4vZ>TX+yn1q-|Z2k0#nJ3pU z5Am_mE;5MCgX5>hs{@-ceVVCnD`$5d&F$OU_;b9lK23V`SugylLk>9JUoyAy5G~ZI z)HP$9`PT~H_w%l8-uMEoSt>E_xaSrsPiishYI!SOL%}EHV!{8rEn`R%?IurdRs0vz zq$(_fJ79RJ_WsivwiY?=(nUT_26}MExR9Sa5v8I?<*zj44G*4|R@vZcQFIF_r7I~V zi8vSzE^PJdOB~wsKi)J^1wGE_82ZE&h-_sqt?KT&^fE^fy4A8UHr11motVD_PT-}g zAoA}nGpt4bBuLM#Ix3|)9XHf!LV*##1jY5^!gp1@2*d3V)Tp95_xZb_U-1V_l2{yA z3yC9=U!B(+k>9+^0Iy;yZuKX!t5{lQNnw*nO-#tboCfA90^A0-U7?zQ>nO%SL!pEH zm-n>3IH?qA(tw5$?3*e-sNjQqk%>GO%9vIJMn^IR;q{O|1S-n|9h z^oro`Z|7)yDfzM20gd?^bYrbym5db)ciGdYn(&^PMeW`<7|x|LZuS+19)S@8cjx@RcU z(|;w5v@nZ4hO%oBahtlI^O-6TIC0JjbG-bawo@&|k}Eae{GRh(8DzT8hdHxR#4{|1 zmLp21NKt0jI4kV;lr%9)Hz@Wz=#fv8f=x?9g>~PQD``NsL`m;d#kmAd^%xd`iuOR2 zzx(SZX`94kGh2#D?o}WJXxg`7`MNDu_8`Z?aa%R3xoAQCFz2rT8a)tTnjIj8JUuMl|5`!S-gwpnxEyC@dhBFln{S9SP(cnx|{%-Znn4!Qp z1`8QvW;~!Kk}^*Uu3VljdwE1T^o>oE^JZ>#^q=q_Cfgf_K^q!Z9w)F#;st}I0P(ZB zc)`kt!F~jW;Cda2kz=up(!jRmrVr#}slqNORx-x;+@QvVTFH<}(?el9qOLA$uV6kD ztv_82FE2~&ZKUlL;KyqCL~EzpUL%!0X%TBEZeCv9dOMH@0l6=aXW%Y4k3cFZ1r+47GD(&;MC{|WN(rH*mt2w4GRx8b-2^(bkVmTrL7 z*u-Sk*1mBdvEn9!xS%LC?sMMv68oX(6`u zTzeKGsH69`5kwn&%yxpic(ru$MD;*X+6~EHIe;|=_{`S?v~K;zaSW+r*g0V(qN>(f z)Gom8CTP?pu3t=`?|jx7*Y5i1tWI2#{y5~BTB!U46CnU@{_zATW>j76o<3=)M(3k$_Zr3y@p}} z^wyV&1@Caf=JKxjT0C~wTmVC4u-<>a#2-{{*Rwr&Hx%L2DJx=T`~MoRHFYHLK(l$&>a6q0s&XI!X4~x&d87 z^}?`*tNzmDxDJ0CpdMUPv|>>RUtsaMYY^>*hhLvE$|xkv+vtW1vP{SM;x&LLUr90x zm(Rg|Sp89Y=!`B=Gj+ohFM&v>KlK1=hbh(d@6Rx;t+c*~_-Qp9<+n@XFS!TYTB3sC zBCaa8zA#lHyPQ3fV$&SQ?8u4o_qyqnnfP^G`__?Tgdx&0Xuk=*!b+CZhd5(^Z(O(| z!nng{h(V%W+~7z^H-U?(oiG@A^svnTt@=1pxDb9_EtO?1Uc})xW_|L^0eBFWg+STN zEkr$tE^)JOjyGT~m)kK3EZl)k0pMFiAhfp~Br}dxnb1;B6*CRIVY| z??t>KRz9*CuXyj8-SnoJy^pQ?zNVl|k|%9)wPRGawv2hxKY8YP3>B?epajGvPr0={ zXPp-1EN4vdpa}2nqaUa$)_oPj8GY2JBVEO*5cf6n+JN!j@iiKEM=A@V}S7fwtNc1PIC zllz`Smb^GAEK=R0U0tgbF}IVoW|0!`EC;|{R%a%WODz1}rJ=xCGP>cp;kBbYaJ(+C zcfv4S>3q}~nhmYZNe=L#UTE7pjO{x%Z6ud4^a=VVaS2`u+JTuQN<~N?2*YG0w+t{hg z24}HeygZl(1OvXaqQ2q9WEM#}YA1sh3npdC+BEKeQ(J!!{fYG%(?FY)lRa$H&*kR}8O4ivfzzIJ)=$ORx!1tY`xbDE&y41ossk^pGqZo_|8@7S_g{*Bp zBwG|&yQM-Rje{M2hltwlNr&MPp)Z*ZDwC=u-MG!YCuFzA1*LE;psclm_h#>@rkaRW z^jhU44G7Oor7`-Yk_}d+wkt)p;NOC=7Theka}? zUf^w+MMNAnqE1sM4VZzl5VcD#f-%Q(+~<_I#`ZJH7Ova@UVAr7>A>2O8<(}1;2;mt z(g$Hc76T{z;2=3blBVsNIN-Q6kc3drMpxRPyM-^6N1jR8frNlRT5~KIhKJdBx#!~&$f^8|kr*@zD8nk#bv;w*)5TP>lbt=T z-iK?#e@=*1k^EyI)om2(<)imRSk;*+bWR#(!cTgy*^)$_G(%~LC#E;VwpO7oZ+vqs zC+qMIlg*welrvNSTTMNZLa^#6N`SfmBaRCD16P#K?DFs&6Ekz&OeGHSHjj)Fl2ZA< z+Ar;DHM{j#MGmn{!46;VO~w$`kI{%AR=1@!u@&OV=(SEd&}a^!m(V$igwF_7^G#aG z;O6`%lBh(xNh3k`7&YrDG4UkTA5)cF-&n#_%hgFpvJ(E9dgwm~cntaDF!(xfL*PH+ zM@Jqh8*2g+xH(yUgpr06V4i@_#Zd+Y+a=8YPMy}^sIG?1U|LDdYmmJH{+wY{uxT8m zGug(jPP`nwY@l*tyRt3Hx-|tabA~NOP#0YqP!v+*_%BT3VCdAdFkA|UZpfGQO^S^@ z4To_ti(wh!xs7r5dtThV(AQu;X%fgm6>q@P`7ipHelMIp3A|nwJ91+EfzWFUena?; za=y!xa-mtAZTc@gK3>^$|M-Mh_;qIUUrSmv7$~wo1nneu@$ID&Absf)HNFxf+k=8l z8-5yw{Z_MXDz4a1DOo>Wq`HG=5)<}2Fc-$N!TE}@dyZgz<=V|QVGnh;m&{4|M97xl7x1>!SSqK}AA1Q@^qr73PMhc_O7nw#8 z!@c@GD-#43%#><@bfCU1JBD7ZAmS7tW;Jw!2nC$5>PS zl%dJNiEa0$DgwHeiIDIYN$UDZ^zblUpi~;8Pd4@sL#Ivf544+;ZA}Du7Ar6!#$Z>w z*xEiNfLAAo;2YubyEl6tyP;eanqbb4qY>KP+x9~oxY+^OU@isuoY;3)C*2|~3jnsl zjn3=9nE{wSRmzIM(M3JQdl5056>Ay1;}OpX@tpk~8%;tZ=Y_g zQD|5*HK^8pkTE?Y`zii(Y4dS;TWgx)0*{vd|2tcONUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/pruning_methods.md b/docs/pruning_methods.md deleted file mode 100644 index 9c69441..0000000 --- a/docs/pruning_methods.md +++ /dev/null @@ -1,111 +0,0 @@ -# Descriptions of the pruning methods - -Our implementations follow the actual implementations of the author's of the papers, whenever we were able to find one. Because of this some of the functionality of the pruning methods can differ slightly from the equations shown in the papers. - -#### [Activation pruning](https://arxiv.org/abs/1903.04476) -Collect layer outputs to calculate average layer activity (how often layer neuron / channel outputs values greater than 0). Prune those neurons and channels which have smaller activity value than a given threshold. - -**Hyperparameters** -- `threshold`: If a neuron or channel is less active than this threshold, prune it. -- `threshold_decay`: Not used. -- `t_delta`: How many batches to collect as calibration data. -- `t_start_collecting_batch`: At which epoch during training the collection begins - -#### [AutoSparse](https://arxiv.org/abs/2304.06941) -$x = sign(W) \cdot ReLU(|W| - \sigma(T))$. -```math -g = \begin{cases} - 1, & \text{if W > 0} \\ - \alpha, & \text{otherwise}\quad, -\end{cases} -``` -where T is threshold, W is the weight matrix, g is the gradient. - $\alpha$ is decayed after each epoch using cosine sigmoid decay. - -**Hyperparameters:** -- `alpha`: initial value for $\alpha$ -- `backward_sparsity`: if true, sets gradients to 0 for weights in the bottom 50% magnitude of weights in the layer. False in the default config. -- `threshold_decay`: threshold decay for optimizer. 0 in the default config. -- `threshold_init`: initial value for threshold. -5 in the default config. -- `threshold_type`: weightwise/channelwise/layerwise. Defines whether each weight has its own threshold, or is threshold shared between weights in a channel, or does the whole layer have one threshold. - -#### [Continuous Sparsification](https://arxiv.org/abs/1912.04427) -A multi-round pruning algorithm. -```math - x = W\cdot M -``` -where -```math -M=(\frac{\sigma(\beta s)}{\sigma(s_{init})}) -``` -$\beta$ starts from the initial value at the beginning of each round, and increased exponentially until reaching a final value. $s$ is a learnable matrix with a same shape as the weight matrix. $s_{init}$ is the initial value of $s$. - -During each round, as the $s$ matrix is learning and the $\beta$ is increased, the values of the mask get pushed more and more towards 0 and 1. After each round, $\beta$ is reset, and the positive values of $s$ are set to $s_{init}$ value, and negative values are kept as they are. This means that the weights pruned by $s$ stay pruned after each round, but the weights that have not been pruned previously can be pruned after a new round begins, since their values are reset in $s$. - -Before fine-tuning the mask is fixed and converted to a hard mask of 0s and 1s, and all the weights rewinded back to an earlier state. - -**Hyperparameters** -- `final_temp`: Value up to which $\beta$ is increased during each round. 200 in the default config. -- `threshold_decay`: L1 decay for the $s$ matrix. 1.0e-09 in the default config. -- `threshold_init`: Initial value for $s$. 0 in the default config. Lower value means more pruning, higher value means less pruning. - - -#### [DST](https://arxiv.org/abs/2005.06870) -$x = ReLU(|W| - T)$. -```math -g = \begin{cases} - 2-4\cdot|W|, & \text{if } |x| \leq 0.4 \\ - 0.4, & \text{if } 0.4 < |x| \leq 1 \\ - 0, & \text{if }|x| > 1\quad. -\end{cases} -``` -The threshold T is controlled by additional loss, which is calculated by -```math -\alpha \cdot \sum_{i,j}{e^{-T_{i,j}}} -``` - -**Hyperparameters** -- `alpha`: Used to control the threshold via loss. 5.0e-06 in the default config. -- `max_pruning_pct`: The algorithm has a tendency to prune whole layers, so if pruning goes higher than this value, reset the threshold. 0.99 in the default config. -- `threshold_decay`: threshold decay for optimizer. 0 in the default config. -- `threshold_init`: Initial value for threshold. 0 in the default config. -- `threshold_type`: weightwise/channelwise/layerwise. Defines whether each weight has its own threshold, or is threshold shared between weights in a channel, or does the whole layer have one threshold. - - - -#### [PDP](https://arxiv.org/abs/2305.11203) -Captures weight distribution of each layer and calculates a threshold, then does a softmax between the weights and this value, creating a soft mask. - - -$`W_h = topK(|W|, (1-r) \cdot n(W))\newline`$\ -$`W_i = bottomK(|W|, r \cdot n(W))`$\ -$`t = 0.5 \cdot (min(W_h) + max(W_i))`$\ -$`zw, mw = softmax(\frac{t^2, w^2}{\tau})\text{ for $w$ in $W$}`$\ -$`w = mw \cdot w`$, - -where $\tau$ is the temperature, $r$ is the target sparsity of the layer for that iteration, $n(W)$ is the number of weights. The $mw$ in the above equation will have all the softmax values of the weights between the weight tensor and the threshold. If a weight is above the threshold, due to the temperature, the softmax result will very quickly go towards 1. The $r$ is increased linearly during training. The layerwise budget sparsity is calculated after a pre-training phase, in a way that the total sparsity of the model is the target sparsity given in the config. - -PDP has an unstructured, N:M pruning (not yet implemented here), and channel pruning version. - -**Hyperparameters** - `epsilon`: How fast to increaes the sparsity during training. After each epoch, the sparsity is increased by this amount, until the value reaches 1 (100% of target sparsity). 0.015 in the default config, which means after ~70 epochs the target sparsity has been reached. -- `sparsity`: Target sparsity for the whole model -- `temperature`: Temperature of the softmax. 1e-5 in the default config -- `threshold_decay`: Not used -- `structured_pruning`: Whether to use a structured pruning variant or not. Structured pruning uses l2 norms of the channels/neurons instead of absolute values of weights when calculating the threshold, and prunes whole channels/neurons using that threshold value. - -#### [Wanda](https://arxiv.org/abs/2306.11695) -One shot pruning, originally a post-training pruning method without fine-tuning (to implement the post-training version is on the to-do list). - -Using a calibration data set, calculate a metric based on the average input to the layer, and multiply the absolute values of the weights with that metric. Prune weights based on this multiplication result (lowest values being pruned first), until a target sparsity has been reached. - -For linear layers, the metric is calculated as L2 norm over the batch dimension. For convolutions, reduce dimensions by taking the average of the batch dimension, then calculate L2 norm over a flattened kernel dimension. - -**Hyperparameters** -- `calculate_pruning_budget`: If True, calculate the pruning budget for each layer, while keeping the target sparsity. If False, prunes every layer using target sparsity. -- `M`: If doing N:M pruning, N and M should be non-null (N < M) -- `N`: If doing N:M pruning, N and M should be non-null (N < M) -- `threshold_decay`: not used -- `sparsity`: target sparsity. 0.9 in the default config -- `t_delta`: how many batches to collect as calibration data -- `t_start_collecting`: training step when collection starts diff --git a/docs/quantization_parameters.md b/docs/quantization_parameters.md deleted file mode 100644 index 4da5270..0000000 --- a/docs/quantization_parameters.md +++ /dev/null @@ -1,11 +0,0 @@ -# Descriptions of the quantization parameters - - `default_integer_bits`: Default integer bits used for quantization - - `default_fractional_bits`: Default fractional bits used for quantization. For ReLU, because it is unsigned and no bits are used for the sign, 1 bit is added to the default value during adding of compression layers. - - `enable_quantization`: Enables quantization - - `hgq_gamma`: scales the loss of HGQ. If too high, can prune the whole model. - - `hgq_heterogeneous`: If true, HGQ learns one set of bits for each weight in the model. If false, learns one set of bits for each layer in the model - - `layer_specific`: Layers that use non-default quantization bits, should be added here. A default config with all the layers can be created using the function `pquant.add_default_layer_quantization_pruning_to_config` - - `use_high_granularity_quantization`: If true, uses HGQ instead of fixed quantizers - - `use_real_tanh`: If true, use real tanh function before quantization. If false, uses hard tanh - - `use_relu_multiplier`: If true, multiply the input of QuantizedReLU with a learned multiplier before the QuantizedReLU operation. The multiplication operation will be `inputs_to_relu = inputs_to_relu * 2 ** (round(learned_multiplier))`. Learned multiplier is initialized at -1, therefore at the beginning of the training the inputs are multiplied by 0.5 before QuantizedReLU. - - `use_symmetric_quantization`: if true, `minimum_quantized_value == -maximum_quantized_value` diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..726ada1 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,5 @@ +sphinx +furo +myst-parser +sphinx_rtd_theme +sphinx-autodoc-typehints diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css new file mode 100644 index 0000000..3300cdf --- /dev/null +++ b/docs/source/_static/custom.css @@ -0,0 +1,242 @@ +html body nav.wy-nav-top, +.wy-nav-top, +.wy-header, +.wy-header .wy-side-nav-search, +.wy-nav-top > *, +.wy-header > * { + background-color: #ffffff !important; + background-image: none !important; +} + +.wy-nav-content { + max-width: 2000px !important; + width: 100% !important; +} + +.rst-content, +.rst-content .section { + max-width: 2000px !important; + width: 100% !important; +} + +.wy-table-responsive { + overflow-x: visible !important; +} + +.wy-table-responsive table, +.rst-content table.docutils, +.docutils { + width: 100% !important; + max-width: 100% !important; + table-layout: fixed !important; +} + +.wy-table-responsive table td, +.wy-table-responsive table th, +.rst-content table.docutils td, +.rst-content table.docutils th { + white-space: normal !important; + word-break: break-word !important; + overflow-wrap: break-word !important; +} + +.wy-table-responsive table { + display: table !important; +} + +.rst-content .literal-block, +.rst-content .line-block, +.rst-content .topic { + max-width: 2000px !important; +} + +.wy-body-for-nav { + background-color: #ffffff !important; +} + +.wy-nav-content { + background-color: #ffffff !important; + max-width: 1200px !important; +} + +.wy-side-nav-search { + background-color: #b30000 !important; +} + +.wy-side-nav-search .wy-logo { + background-color: #ffffff !important; +} +.wy-logo { + background-color: #ffffff !important; +} + +.wy-nav-side { + background-color: #b30000 !important; +} +.wy-menu-vertical a { + color: #ffffff !important; +} + +.wy-menu-vertical a:hover { + background-color: #990000 !important; +} + +.wy-menu-vertical li.current > a, +.wy-menu-vertical li.toctree-l1.current > a { + background-color: #660000 !important; + color: #ffffff !important; +} + + +a { + color: #cc0000 !important; +} + +a:hover { + color: #990000 !important; + text-decoration: underline; +} + +h1, h2, h3, h4, h5, h6 { + color: #b30000 !important; +} + +.wy-header, +.wy-nav-top { + background-color: #ffffff !important; +} + +.wy-header .wy-logo { + background-color: #ffffff !important; +} + +.rst-content > .document > .toctree-wrapper, +.rst-content > .document > .section { + background-color: #ffffff !important; +} + + +.rst-content table { + border-color: #b30000 !important; +} + +.rst-content table th { + background-color: #ffe6e6 !important; + color: #b30000 !important; +} + +.rst-content table td { + background-color: #ffffff !important; +} + +.highlight { + border-left: 4px solid #cc0000 !important; +} + +.wy-nav-top { + background-color: #b30000 !important; +} + +.wy-nav-top { + background-color: #b30000 !important; + background-image: none !important; +} + +.wy-header { + background-color: #b30000 !important; + background-image: none !important; +} +/* Force the search bar container to show borders */ +.wy-side-nav-search { + padding: 0 !important; + background-color: #b30000 !important; +} + +.wy-side-nav-search input[type="text"] { + border: 2px solid #ffffff !important; + background: #ffffff !important; + color: #000000 !important; + padding: 8px !important; + margin: 12px !important; + width: calc(100% - 24px) !important; + box-sizing: border-box !important; +} + +.wy-side-nav-search input[type="text"]:focus { + border: 2px solid #990000 !important; + outline: none !important; + box-shadow: 0 0 5px #cc0000 !important; +} + +.wy-side-nav-search { + background-color: #ffffff !important; + padding: 20px 15px !important; + border-bottom: 2px solid #b30000 !important; +} + +.wy-nav-side { + background-color: #b30000 !important; +} + +.wy-side-nav-search .wy-logo, +.wy-logo { + background-color: #ffffff !important; +} + +.wy-side-nav-search .version-switch { + color: #b30000 !important; + font-weight: 600 !important; +} + +.wy-side-nav-search .version-switch :hover { + color: #990000 !important; +} + +.wy-side-nav-search .fa-caret-down { + color: #b30000 !important; +} +.wy-side-nav-search select.version-switch { + color: #b30000 !important; + background-color: #ffffff !important; + border: 2px solid #b30000 !important; + font-weight: 600 !important; + padding: 5px !important; + border-radius: 4px !important; +} + +.wy-side-nav-search select.rtd-version-select { + color: #b30000 !important; + background-color: #ffffff !important; + border: 2px solid #b30000 !important; + font-weight: 600 !important; + padding: 6px !important; + border-radius: 4px !important; + width: 100% !important; + box-sizing: border-box !important; +} + +.wy-side-nav-search select.rtd-version-select::-ms-expand { + color: #b30000 !important; +} +.wy-side-nav-search select.rtd-version-select option { + color: #000000 !important; +} + +.wy-side-nav-search input[type="search"] { + height: 100% !important; + width: 100% !important; + font-size: 18px !important; + padding: 10px 14px !important; + margin: 0 !important; + border-radius: 6px !important; + border: none !important; + box-shadow: none !important; + background: #ffffff !important; + color: #000000 !important; + box-sizing: border-box !important; +} + +.wy-side-nav-search input[type="search"] { + -webkit-appearance: none !important; + appearance: none !important; +} diff --git a/docs/source/_static/overview_pquant.png b/docs/source/_static/overview_pquant.png new file mode 100644 index 0000000000000000000000000000000000000000..5006f924225bc7e4d9ecad544a0d2f3c192b59f7 GIT binary patch literal 84938 zcmeEuhgVbEw=Nuy6$BNrAx#cVnt=3XL6F{i04Wh66bZcqJc`n#cce&@mPjWA5|yHa zCN=aZ5Rl$Gfw!XPoZq?k-8aVj1Fj4QdxyQ(UTdydzWL3$6RNGLeD=(>GgMSmXH_05 z>QYgiaHpdB?f$9Xz$Xo*PW0f{>8B4(+^DGNnkfIN6Zq(usHlz^+v^*<8*8Xb+qgIh zTG_f-Lj}E^o`S2XsALtqpIX_#pzdteP&<2PIqqc)j+@QiR*u_9Oyicu)BDgT_78pG zP(5EweH&kxjg&37g8Ug-Z)q@q6V%;`&D+V**-hG8j{E1h(%_i#u@E=72yXjWT31o& zuS>w09QPA<_ovcALS9~8f?lG6E^s>`VJRsop<5zCA|e9d3IR7CXLl=a0cST3Fv1zO zpJOOO-E83YPu=ZZoY^R2T3NezxXW>KpJDrJ=)3OjaC_^2Mt61-wEFpp2jvz)uxKGG zTQ?zL!CRDRv9Zbix?cLOtsCX^*F*H6u0KzHojKb7e9}`ZxEoa8=P6W5WOWSa_g~(wWzR|#6JW5y5+wPqht+65fc#;5f_sZzAY>vB_{cw zC;$B8f4yGc#lz+aWhwF^|BU;e&wgGjD@2j5qx~WX?J^fN9cdH=&qytf88RWd(w_p&USL# z-U7DJ$5tMW?%eX4%KDbVf}(5!Y{LJ;g8mvqR_I^D{}0Rg^$}2BpvFS~C_6a$$6TP! zz((M}7>pjieMLpZMx~;7SKm8*VeF(ogU94&*E=zJ6TTa(N0R5x}+>c>DC34)Be#m{ffqSfj9N5~u{zLu#>~gCAIBK~#aSD!B zgD1@sm`L~}AxpZSCQEfkc+B-PGl_@r>y(N~8%h1^Q2Z_A*smkUcnn1EufJamgP;9% z`2IZU+OMNW9C)!`M<0~pVZV-^MgC_H}?|?uO|0~0v>hQm6^UEs! zWqkj$rKB#ZbLn|{dKwxUa#4L)X__-FscbGX+p0OKdW_ipp|nMn5kz)UYE?D)@N-B^ zj*MqOO>0F5%ieM$iHZt35O}!$t+(s8Ju$m?rAzs6Z)6}o{^F3ONG&h7X2zlOUdcFO z4?D0HIqZ?nXN2V6=Qtsm_?ZQ@I?*OjxBX+Yzp2%4i#fVMm5M5>(w-O^58DmVdT5OH zbvjAt>|d%EB?^QV^%wQOGbx^R6RKa)rMY*C{Z)rs7f!#za8YKtj*pn&lW@~u+`czp zcFiCrMyDL-^QiR4;EqYf3~tvj4YP#BWGU<%EeA%)j@1@!M6t4OT`t?)_m1Rl!dGPF zE$2<6)RS*F3$Yn*75zr_tmh6oG3v*U#oTOYn zPP|_^HAH;C^L3mY=&`tk4cM;qTWY8o*>B)hPr*JN8n&!il=ELJ#{^bx&@-{WK9~6| zX!6M0am3@v&|DShA^USOKHAF;;_zS%i$Q+GqZ5`;t3;U-S$GiGO@7A zdvlYQ*dc}JHrlJEVGoKni>F!oNbEB0E83J9wz$H0Q~$@e;oZ^Q@N!(z(-(}z1AQKa zD1W$?*hh_K8SOTyv#8$mzZHX=rXczZ4VcNxjfxx|iSM5N6MHI-)=d{KJ!vi?5Z znv2~9AGk5UEtAb&mxVN?Q|xpJ&qCre1IN1eC;uGIxa@-MpWX@WEyv|#wZFmr z=>E)ddHG>~k%1Ym-*YdWRnX^^>d}XD_s&FVJ^fVP&xFBle>_Hipv;fk9`ia%c4e@h zoPeTqsHk|X;$f<$ro->LC;J-un~bo77D-rkAbLgL0OPVH9Q{#d-Q|&j`|@h4nu%lt zDNEdORDUuwb*`oLGx?<7$!4`9$8+}_zf9RzEF&$<`M=Q1u4MYGUv9IgqN37{z1{$k zab0s4C>WmO-=5t*owg8k?q20iuqD&lP|}=NQ%;l|)`s9bL^OR(T0qluSa_=-$eB)C z6WyZDJqj0sqSHdtEc^WADv9bxcD!HC5K{l26c{vupj_Omd~2vq_U&YQ!&WLFcSj{1sp^d!;&H-36LQ z9Q%Il8Jt|4RrZ9th$>4&gV>A$XOp0)_s+X?vaaQ+(`VxvCa9^t9OtHa8?XK4B$R!{Hykhl|tan$#b{HxujFC?>RaT6TPi@O9mHEsmTF)YL<*|#q z?t>3!U8LEk2B5o9xOXE{6w$^NbhTQLK`fF9<|AAZv?%JFo)pXYwjcn;3`-DAI)KcH zY2$df)%TRZU=62f&ln1wzIpK8`prg7+BtY}#mu+a;KTj*##766`%5;TwOTDsQ-jF% z`6dCIDrxct^H`{Uf$<7A!teRHdk<5>19z%w#_dsWo_E-av>w7M7_b3{4i#L7J%{r@ zd}dv4v*)_Qo4F@uTQr)>Ef+gw=vm}%`WgIA^-SRUI|!4g^K)q!Vh?cxA^LN;&JZD- zE>3Un?2h8rcADHN+A}^2tJfwr9XH%3i?K01M9X^*c?;YK+s)UqR1kiP_bfhjgp*zfWg;^U^G3j282Ru!Iv)GswPXx9lRn5q!1 zDJo(&yXH*4skZT>EUp~4iqB0%)m#$#_JW?gU2c@T;T3o8hFq7ppH2ybgRG7yJFm&s z-f1c-*pLs}b8%P)fy)WPqIO++UF92u zGMXNu^1YDr6IajubhD}7$r%k$6tIc+iEol%V}2VK*5OBF`gAXvSZpa}BteeDiw8uG zs4!HT_#y-f2IDjulkL-Zof$ABNY?lx=nD7eqt)8^B}@ljum|pog$_^$Mn6G9qM&77 zXIsXLf;<;aU|4072Z%Y#l8A`&}f=t4Fd z&Ao`s7cboOGe1ssr+c9qqV`Z-f1AU&{=jZ_+S8ilPMRl>mxvi>8DdK|LDrIxcPJj`v{~_nM&1fly zkY5tom-+hR;Mi})`i1c2hHY1A{d*7DeGZ$at@G+^XU`1r34lF<~&+C|&}w?1G!? zLJ4N0<2SJXbg^zm$C{10oNC5A51M#~h(WIgu=lxF?;+$o5;tlh&RI_#_>S z218Mmz*#;=B*NxAXzSePc@b&P&fN>S5B*k<87hoxu20hiF_A1UjJs(}?iC|#DyT@~xss6Om0yoWGt%xVjh8x&Z05HQLlj!lPm zGc*yjKX)$U59M#arMWB=etBl~n%2X>v{UR*&Np?Qm**oNs3o>p{7Frv&F05Pcz*vL zkeDtSPHG$f-Xn4R6nl{5qk~Ve%b4ZE5f>b}C@pxw0kfEe5(@f&3Zj@1XNNxmyNt+uh`<|k~u z*i2rFMCax0#KRMe@gcwKdHajAD=O}-z3XcCQIi^Fp?YNNX@Gnq@2C03JM-J`vrIqd zA?0O+62wMrEgb=Wn)7F$E0WJ5j+Nk&aJV#uhjx9176%;um<({D{z1zmZ{3*BEYH4H zQ#^L?wrMyAIGux|GgQxnhYZm97Wi*fhAyha-++Nne4FhlE!a_=8_!8vSXRoB1P;OD z7zf*p2YZ+t(vHGX&S#D2u}4&|rswAN$n8Exwwax^D!q@bkC*$*(+%;7QcJaK2!v5+%P?OBr^@7q7qiynfeHX4WM#^+QO!@FW-BS>TyS z4q|_c7$7nKR{4Di)N@pET{^ex^D$dgI~CQJU~)XH%++;n!*Y!_0o$qxtp4L9Yrx`O z${ByMZQ%WDpI|5)yG0~A+)ytje z$%%q`UA$u=W*1^T<*(m4Ci0$aeQN6=UX1y8Ke)}A`&qfu((^psOXGL%D}{))YRw14 zG^m2{DMN#G1qvo z$t?aW>_f!pwZf$JM6A~?x|q70>GTK9`lEoTXDhJ0`cp^M2%K?Tr4vzS>*iUiXQK-R zkYdl7H`e7szCjOL`^LwMESb!gk{GY0sf4^x^#9iU*}{J( zPv}`*CJ0ZLi5gbt0X*mCEM8UASd}gr+rcz#PdxXFUJ(54J)b!dQ4?>pV1(dkPMtzx zmjCkz5fPE5+eEkJhMuF>Hxx6?_g_a`l~`@q9uHj2OJYH6bi<$76hqME6&34DE|p6o zE+0zJG+OBmHxcX&{e>pQ)yb|dM5-ZTT&w|ZhGkWHLHg;v3BT3emeO*}sSo4n(#r;T z8n_hT9-gt=2d>30!UY5bO4EIWR_iEF{DbWVl$=lW+gw@)u`Dx<;h zpmrSB*EOam?NdDzZY@40hkyoZspezH>;!jJ{DqVvS-3GFR0|!c9=L?H=LeZbvJU}Y zaf@w{KE2!=2}B-d9{}P2B|ec>1oVy0I(SgA#Eq4yuC zSXQe2(Xh91S#+~&w~exK3_|xne44^uE;l>%fes~?@RX203DUKfqc#BzD03nQP-C&Z`Z|^LuxNM6%h)t+S=TezqIv?{W zcI+ON=a0CL_tkt%0)XN30`}>IB3da#;Cx^~hh=3+Rztr--#g(aU$YbrcQ`mXA8dW1 zzLI3@adf!9Oy1uX`T=L%CI%jj9#{J`S&=8SCLRpecz`}Jm>0FbB}9Uz07~hRdOR%X zqD(=jA9iD{%_sbsktM>TCE}_P$|9lR4i&U3Pc125BW!s~B_8x^8k`h9OdmYoI8BY^BdTFQgg`uql3n{U-H`?rqJGve^xmrRKD}H#%oRq! zzA$=JvSDzy>>fHFP;LtsD74y5yx6@GJf4+61RIP#>|>R7f1;}!yAY-DD05k;5(*b-m;?DHV``@6{-j-#Z?{DPDdxBV?&|_B~`y&)c_eZ3CyMKVP_h{a`9i zYc8p2e+h$9-+;x#z|uYlQYgW!w-Aop503jbfk?!FwC6%{YU<7Xbvnr#Wf?AGbsoro z#hsMm9;1sd|JvMtq&k(^a8hgBgJ8agoBAAam1957;%#|FJV)HcI29@J<^vh+OSDYl z>Nx$w<-jA%g8h$qz-kr(J`zLWQl5puBcuH%n6DV+D4it}S1bc|^A@6fT3b8y4^8s< z3yb-Y_JV|IBC5psU2UnJ`cQcREzfW?`~8i!$BnPfB3>}$qHdo#Ls@(WTW7jdTzY0u zP*8Gm^4G805)w6Xep?Z&3Km=Org?5NoqOmX1XSh%FI@6(DYbGtjZ_hiHplZdn1(V} zFT}pd`<=zW@U|YdFw5Uez5s&JO3|@MF}S;1_40yeSi|5We-w7FDBv`rlr+ySe6@$b zwEK#pW{=MSRjyhSka3+*oauZ6)p9vs=RW&3bDH|ti7y#9g75#m*3lGkMD^ja=jF>u z6pklzH8%Q6{=r6~Na6D}4v zquEjQ@W$_t3$!v5$+K9gtA+ShPW*Nhucd)D_9jZ5r$C+G$QUhNkS>BsFVCtkfA@UN zO`{o+5St+A$MWoeS@P)}TqSm=N6pIBq=&$c{%M+`rx0k*ot5v9?X9`~`N1-qJ&C2v zz{41kH#~e4vRhjRd4A31l8|}ckr|>lU!z=MSc08TZ&jhF-T7TaVOE3yDuS;dLdebo zZK!FYuW16YFwwm`bP#dDhqBG;ZHWTCLKS=g5FvzEEZfwsFfS3EexN;7%xmp z5GFbD5)>2TjS>xRmDb(gtw@Ny#OH^6=3nX1uWII)={*tp^u=Pm626Gj-KR~*+4rW~C*OjS& z!l@nTVT#RaoR_it$SVeLc)9r1?Cfk{JRM^9RyCMQduv=Q7m;U~#Glk?rAwB9kkghV zL{CpoPy$A`(*OjMt!WZn?=U6L=x42_}jsK z*;G%JHNeqC1P2E%bXAqobsDxPWJQ!@Mew5{ZWcrc+WDck?n^=KrJz1OP=n)8@{6z; zk)ibyQ=VH3CgfNdyF=?)J_f{3zj?w+0}X^N@08r$uD-Zc1tU1rpv-CnrKL@eE}<+M zOlj8%mVx#)jw96qgyflyw1P3FbLX;tE_r^vOCc&O48Q$iV;+T@V>S44{H*7~VD^|i zY@lR)rfWv#6JTDa#$GxBdHQH6a5ptKYrK&**e~!F4nLwBWcoYEBr}|gSzBqOhh-i` z$?rYf3f|)?Inv&(e=hPieeD7I!c796mk-Zd%*Rvksx?D`jdgu z-O8WNk@oEu_~HwC`LD}Dmo8n(AIlRW=<4ZhE~8p)b)!c4W8eaUf|^WDNP1nsI^zP! zmC7b2NPCWUuXO984LuRpiKcqiM~gap#ld>Wsyq-)*iM4r3A6xbj9U*sj1Yx()s3Uu zs>(#gc6MD4=4PDD*o--v>TEnNO*;(%nggh?vcDm*N(mMiTvf|jwisFWjqhS z0&yVxLo5ULo~v6T3!4V3oyUBaM$teSiW}n}qPc!Qems&@VZ6V;%5QrK_&Uf1C8%pq9r2NOQCOm8dJi_qMqArc35M?xV%?>+K=Op`K2hKXPa1Wx@cdA(s`D zsXZW%Jkb8=-zf&11w3TI=hzwnWvSqFK41wwlZ!I=OW5};k&L`@a^|=+VZ9RLmSiAWSCX{&!!v9Cf+F%VJ$M zT01MNAq+hX32N&JV)VRiH

2p5Q8ezwu`WFN1@PkPs8I=0)b}>$$(TIZxvg z|8#9i#fb`ERZWS6Q&|uW#o6wPq^;J=Y18Ejoe~-X1SLc57GkO6)!f{v>n*w^aE*j( zdDoX>sX;VY~>Jrh1P2P+7k;_Y)p2aN~w`Pp%4@r5LEKuI?Ih zASg-9Rh{41&=|_TA6z+7%1@xgYFN}z44gdjRdDI9@vt$roV3LI`zf2 zmPn?m8%;FH_O~P=4tE=nq;za*f zXCfBiH-0r;)b6!770J|$;tk;1$n(lw6M4mzQ3wF|Z37e4K6S2!?Xz(5iEb|fJ6Ol_ zDaa!s>8Rv#kEjmq=bk*5vAi9-RAHR*^4RoFR?JZ>);Ba4(smcXiw710mFM>09b;p1 zM5_&w={VeBb)tE=;blC`ZMwa6^Fv3P#Ei@k7+S94|)R$dvICT&A5X2>SPvWg7bj+wy}x~+Cg*GvWEy6-{M`!>@Nb6{+!@wCU)kj!eo4SKFxsVTA_*3T zhRE>x)%|Jxq$fADTHA%_ac58UFZfhb2QjOCnth0{Z1D8}GHhNX?`>k-XJZcqZ4zOh z&~7sulkQ$#mRSE?0N+MON8yMfUc#_0zzWeX;E|HVF_^Ko%;IXO8R ziM?K7#Hx|AX2{T(9|fXG-@ZLs^!k@epnD@|PVARouXx-MOTLS3F5xF69w|#nsF}EC zuH8TGhQ1U4*>XSDe9LFe)+zfC-Yz-IcNH(jNJ_msItSO-AthO^k!kt=zS0zpm~z?w|@`T%u$N0>^gXLmWd)xVZOF1D1Gut z%~0Lfbb5*7$RO4@xv@LKCB|j<%-$&+ypP&Qp#6n}RkI-si9(i0g<$)~kEel~@!g#3 zudvfvRN_d2eLBhN&z5mpQc@DQ`Svz|&pt=73ON&!U@vi}=$`+TUAq3zM_HKfw}&r! zP=s4zOiI1E5E6Tl$0h7f>k;6AsEaJeYEpNh@H0Q_3^(P&Fk)5g-68O94A&wKUi)!_!qKyp=+irA^Yr{$U>L|AIr zwiwEsFpGkpef=lA;~5V@+YnOkt#j1f(cZqXzP`S-^*SnwKxAQI83$f&h=GswucUaQ zwmS(1mK#Ur#cuKtzFlGQnL1wz36E{a_^EZ-C0A}edUohQyQ`|Dbu z&4fU2fov3ic#-HE4|__hsi_$cQ@W3SB{G#0B`@v2^ZxLZwuYzY*6QTv%_L5br++1F zA(OA*k?^yC6ZkpTX@IApQ(t0$mhJ&>pD1W94d8?8WJ?1`a?1`x&ykG@Fs``r zEe+EANPH`4U5GFpZ!|rIT$sDzckj=%d+5h^{+)bZm!LC3o*pv6qYdzHeen{E@h&d> zgvSN|R1|tU>rztD3$?uQFu;|Z82mdOZgQOrA*JGNb?IJ9bkr;FW+&!BFvQ!-7NiCK zp}6Dmw0N;&|K=4YYF$U+8f6=>(|_W}KTcb2oz=PU0;+Xx!P3{)S5Z+>SGT7$;Ku)b z(+~JrpA>cblL1L`=RNrV)8?~2UFFcfN8ToCaPY zX3bQHrl|As@@k}smzq>eZ!M1S60S#Yz%>*-X{ZOYts0f%*Pg2bD07PoFE&w|4Y^O( zTgBOHEhCE%C8f67;=g?Hp3O^|1Bgl}c&9-pM@b{~w*62!G*ceoxjbHa5og_o$4mRH zm8$wMQqWrt}LmnOED=T(CaCwOL0>@$pd- zLF;tE-Sau6=9ucO2;YfBlL}hbH)rr-crlK67-Iiyw7DYH*5mO{(Es-MEL*#RgKClK zH@;;~Of8-cv94S1eBBJQw0h#%3uPpX6}aMB<1;c20m;{2sAo4)?S#D~FE0&|%T9x=&>k$cjy|=2cxxyn+tEZm_ zJtM7U#DzIq_y)x8nxYLsq__F}8Vdf+ziUjFN9?88Ww?5JUdpTq2{|<{!zFBM$dZzh zV*SU&tV4g1$HtHDg@e(#Yi+9WfB;hBAsov4sCb^I9%M8!B7j1~f13rJAZg}hnbX|o zYWs8R)oO-WQ3EBsTzY%o6>Ns(QY5jKodORsIW=%I!=Vh^Yl~3;mXZ$+0AZ*D=Jg=_ z(%95Sc)`~sn4!F)`U}&8p|4LFci%l%vUrHj*VJ__$OUG7!WK|q7j%B4BJJ$_Jk#aNMx#SQzmrm_ujWGbZMul%^QS&& z*4EWoY=-iX^OFA}Ih$PXqo?2ZY#li}4-CuM%XbZxxKpjn^X?@Sd_V>W2;-Ao0N1F`^wZ#egbb)JlkmzOQh zada6V$slpF*0+`ge8xq3pS{_F<`33uO)F;Aj=WElN|ZrFXyGLwy@S1GhYdW1sNB>Ih`FrDgo*ZfWb+d2PHBw z;X4ivMcy~64AbNhzNV(Bo+uP*d4Cxvly4~O5AZ6QWu(|hNZ0_Vk?(5r<>pERKwA@N zO1&qCOFj7<14B**pT>cplAuygjxxP4dAPB89po$K>RZ5BGLB`yCeY*l>l(rxrDiqb zmG*JQ9(;+DDqahNtL>s)A*;8ae7#f8_OiZn0HjZBlRz6VN7C$=R4V*?kUzb_Tw{an zQW%CS45VwWqoZ|jr}3UmW`&*lFC~zg(%&;T2Ci-|KEF3@y;FPDIvF$$R0akne;J~2 zE-$FLHKb!T-S({7=?Wp|C9i1C557NB0h8|fCNt7$ga;d#_UR-)(EJf(L544WXvpWo zGwR`6ptb>r!zn5EI^yBer|*6F3ry{2mv6$mD3HCsN;2@!e>|KvBvaI`)4h5S7l;dZ zX3^jq%EWa0$x0K0)^!K^TU1Mp(;f&SaGgm)m4Ey-fPY_TA?3RrYR1 zRql&NF47x;rS}zLn4H}gaZgA>oIANSopzItr)9_vdvyDzv=y%N&YPLsM`(UOUz)f8 zN0)Xp!7`_7`K&wUIwd2Ef-?KV6TFmAROu2=n$h{1a6g=%&}Sy@JZcOaE6`}?u{r|` zqWkTGVXULi*(`WH_Rgn!*3F1SvnjocKl?<}V$?1Uv*!T0@8ROKZi3hbRPWgt1^#+*kMj18{ z>1v7w=m0e)2Qo|LWD1E<-?dU5;w9(?n4ascwrWr79J2azK)xmZ4QcXHFH%Q6?S#Fb z_+`m>Bt$0Qpti0!DL?^LSy_3U=F+?U7{KXi%H7ay;FFR<1Bf@V1?+t4X2t~uhW_5T z#i0uBWuzU*h$qH@9|dtBTsVC7vU_q-U0ZUUUHX+Y@nm1)CvjXFec@r#9;*`%R_lRd z=wm%=`XxL^VaD`sW2N($srt&w_r~z+R^NFDv|pZ)QbPib*FbFxy3Kg!>etc{zxlv1 zmP<*Th_e4^ef;q8v*J#~;zT_&FBTr(CzEIkq{ok$SqAN|x1f0twG1&Ox$mZ)j{4o7 zgB(Oe0fcgr*BE)ldHn2KAws|rdAk_+ap2(&cGm%z7zbQ2z&xhaj)fL?vg~{Fsi~>a z!^l66os^c6+MMm>CP+rH$e98yxNPaKQ$h+eLpH2;$^amqcH3Szrl1AZn;#h--Uf)* zA_G6*{w~zCXDZ;b%*O$9#+R{ISdf92vD&4}oOj6HrK5x#5P1OiQMppaNCDLmCdfkc zH5|4dXM9D*bnfpT_!iBilz{AZbEhWvxo|*8B zW1lrs&(22haoB&VH6Uw1kilsAqX*Yb(mb5VV28fr(- z`Hb&21-Sk0cSwX1-BmJquOpC))3R5=2%!Hz8w*+xnD=Hxor&2Kk?D17Gfu*;r-!LN zvY!Vdvt=wbrKkliDGa;p*a{CC&|8vT!8>qSKJ7J-ws!om-%3+5?>dn@c#xa0$OR}Wo`7;G29&_ zQ;54TzG|T)nNF)2tEb@6NDuZc5!NNY?+7X~>FMcxEE)&8#fGH-oQ4q1ArJtGk3cGN zvraV0J_-#-faM)#T?^m||*O`rM4PL-U|m#$svM}EGwK065M@G4X=^SHJ$ zbC~0>86A_dyYxAyY0-Q?!oQ*|E4{a7Q!lX7)SZk^SPfNSrM+f{ZXn8xov86~`2u5Z zzmFxjxI_HkN~=G9Jh0<(8H;3n0aFk*9@!4w-8)&_SnIcE;=CX+)wT@RL=9*)NdM_X zGWAtz`4Z~Z&rRO;cVgZ{2dtPpdcXUyCnBP~W}K`)D&{il>m+NF&#xV96eZ3W8;dGr z*dh_!*5zGX^GY(DxN2ZX1 zeR#3Wy5D^=_dxb};TY|8$@eS5utq)P*pzyL2NV=)g03h~QcxRk_?jt{WD3?ph*B~! zeIKA^=n@vGCK=e8vmR_2VrAjA7}z|f+?@ckPOhm7fbZ-me9Vr)Iga!e_1+P#e%+v8 zq+E4SAb>LQEr!CxEis8VM4UQRt6HyZFW&G-ZY~ZmaG4$0R`U`%dLQ!RDh=UX+(y8W zutRTl0&H$k?&NE-dtYUn^Ee(i1K}ID$|iPM=DjZ3F0kA^9;B$)+-|&*%J9v%YkzTeD+fG$mkuUUV_#r0AUDLP5@j}FPrRCBMr zAH0R+fFF*a_=l7%{a501p{+_`>!9?KQt^OO?+J`|vtEPiYLO*lDQJv9*`T@_Io|eL z0mO_u@I}_HZoVd?@^Zt>u*tW~oUoOR0wBUET$3(z+Rk3|qtjCB3W*4F>bHM(MQJoS z*_~|M-Cj~cf{7PnV##$*sJDEC;q5Ea)3X3eFFIrc7qQjcG_3)|!SDfBc>+qRzym=oF~-bT@iS%$KSA@|$LoH8R=2a#EiHPB^{5ddoe_E0JK za+;yh+s_YETPrCgb^OGMu)wgS`ACWJcc8#~chUGe&^Xg!;3D|A9s_{5%Q9R@0L3^v zJ3I2nf(MDf>1!}>1Rn0rtT!Y+8UM~paE9mOr9l+N0Aq)wb7qzukYQd44!>Y{si}-HKsvfM54Dt2FxQv%6unS8qSFkD| zbmX46kL8>1jp*t24mT+DK~f^q6B82h5@0lmE?>k>EWZC57`4={(nW34Zv{_)1;2h{ zkhxK_xWAF3icMV8todjy+xWTj+&X6YtBPmN`>9%(t@&!X38WBL=lPoY&7}h2 zvgsE*GY2c{Fy=k4A5yZ7VHXOXaOw5z1e7gaYp&N*ATN&rn%f=|$Z#w?*HLgG*JTgx zG(S-KV9R9pb+&R8i=dz&kQ^hug4=Wi$g_ZanxD{I!>-U=^I!RR(%KQ!E(QFp0Z_K^ z{{9lMg|qZ*0_BjAGMiv;3*nbmR#uR&S8lZ*v0$yy8p)4{fF)`6W=j&ER#^Cb{ zX-19o4U2>h5Qn^$F5C#@`J zYCVp2phkW*#;p;t`93l5GU@R+Ma=rsGG5EF(zj2CKDN0#=H6g|X2hOVe#)-@j(x z2jtU*%$A*<9pFwu9nTMBZO09CfLa2g#0?I=aBRvCFP<y!*u3C&qpFMM?CxcT~79v;oHd;BfRJ$ytfNpta2J}|gw$)R=4yI5`ePDBV> z;y=LM?Zzwp0+X#h&|EtwU(#J))lf59AMAS}IO54FV!s!AyX}bDtj4~eu6Ib`^4JDN z;JjS-`HPv+ ziu*6n9(~YmYf77ek*5{ROkdxNj0iKB`GMjna0(!=tM1z01u4j!eV>R>`fXzF@~5T~ z!O%%A@`|Qg=|KKn0E$iIosUPT`)QwEKyy_;z9#OtL&;ne0uDgt`pyy;psybffQh<# zM0j|3a>mjahR;V(r)ca({!nYHJ>Z@8FlU4al~!}6-oC!mGPvPL3fJ?GXkQIpU)$cs zr8WXiIu$N(`?fyq!t`{$d{ZjCgFrCydkFr0C#BZ%iHi?(+@oDyTFQuye$;VQlM2+d zK1WW;5%@j&|HNsr%6PCFX=XI!UMzs++4-=V7#@ssgieH9tlcDo09*J5c2=NYQ#yd*n#K^NqtH>DO8rT@tc!NZas+^${Hhf@{#=OO z$-x?z%qw>yihywEzMq&-+MGKx;Z=s%`a~lXa@v`IGzV$;VHxl^6GQU^l#tRmfat)z zkPx0iLMRXhi|yXzKrpQ-H|Ih3)h~Z>wiJSalfrgp7Z;rn===-x=L3&lKD-E)0Ej~) zeo);3?K^!1Prbaf)YWq#$4c%}h(8ejT#15p0c4Hk(f#L8JIkZ3@_Y!u$+(4SMDrN; zO)gC)Hg;aL-;wa#t<#~cz{somEXV_nqsYLwK~1^h%V*Cw`ilqT?-U7sglinEH>HF- z=)LO+fXxwHIUi``=Rfh{>P6AQ)B3p)ouvmkgWcKhePvmtJC3m z*nN9b)9#sUo)ha_&v~JfJ%d|X&6W0#1iOa;L5sGBL|rOdmWXmiOz&l9IbCZ)tT;@g zZNdtFF_BhciSChi1ynoKT@)V?yC z&!C!(sjDk8XCV!1bn2TZqm@qa#%LoyI*+zzz#LfZIx=9@39zN5r8#{cZ*Ne0F#HR> z4Og3=>_}>MUzG4&^Cw4Jo$t11 z)P<~_Y^nJyV+u95x`Bskm zO0LTd@v<)W4E1aKCqxLV^lWbFpLf%iCgA5Pk1?zYqLZC>8OFYu1m)7I3mc9UWH_hZ*cgJ2aFa-iSpSK=>uIv}y7fGq0De)Ihdn5s{n#}#bR5d=n z@~)*n44v`#2-}FWXgJ)CG~4!YGz>j4!X7w+?rmKBuRySvgnL7LdGchNd%uQa%;#S-`Elk$CTdx;alij_;U+K92=UVL(1V&+MP znthBvmsIEE)FaV&0lV-9v+|MUWCeY)Frcmnt_%b$dgP`>|^P$yBW^ zr{13ill5_)`DC(Ix#yB;VSQ|I~x-u7Gj1x#$rgLr{yU4c`cPZ#+sqb&E3R+e{^W6E*8ifh4&syfmA~uB& zkrn$i6fh%YrMaxd9TEXd_dsud>jKf=EvaPx@$?$2p%cDG>@pys1_x(lwwYUHu3`^A>OB^uKw*>?_Cn(U`sj(_050e3nX{R1Li^&Cj1F#x=7?&lBji&p$teb*Sj=9L@IEfC(*+iMAuUqFL{ z4&zn$P{jWzzDQTR*qt)R4p8eY1MUN4Z>yV|335`GP%to2Qc?ot6u@%qZ7uS-#x79(zpnlQ$}Fx2JFbhFph2b3q#{2qj?!DvR9JrO zXVLU;PYs;1C#dLBN`Pzx&vyvD#L+My&^cWx8)}f-fEcdI3sWlnTrV^dYEV*kvL>S6UsYD}xby<_Zfy2M?Fu z+Xd?XGGkEI1LVNs`(M#`lphH}Pb@B)+oBUQG6Y*ns?u-oFU>!DTa z_tl}E%k2=#^lzo8Td$*P{g9O26nfrr2t%J7p)xq6qqDue{ZoNzpz*t%mtea=54!6p zMM^)(ISBH&a)HZm^?t?qoZ;VUbw0ml=GI85-bXR?B)Sz&B@fvMuL` zqNB=zSjvdquX?yB8|X~|bLR^fT^aWqgGyX^*1fqX@o2vDn9^Vjr54{3-}oxn_~@PW zS(mz)O@L%UJfk&6f|9`sg-qt70c5f;yfVD$^gXNRA*iy&!^CqS$FwIX9IiHLc&IIaLfF&J_wV0d{^o7c zCPcaxAq1!bAR2LTaejV&9-c2>sTXS@NhRA{N{i%%v)^+wyD5X*cY9khZmR&!4W;FMEW)y8?M?9rW^zjQY zM;mMF-;Nz?sILcF1{4f#Fca2L>&qcr0LsUp$r9w6Dk>@_56ssuzBi)m0R$A-SN>>F z+XedeYY~#9Ver)e_$5v%zgupd_jto^ffcA6ykul$U7nwxpPo)ER>q_Td*+x8WoI(i zr2DjQ>aA>S(4IZ3p{5o`enoXb8whlRl)5lp^5oU9B@pW{WuiyrAUy7L<{V z>KoMpdGjGJsG+vDpq2i~SZ~5}FJnH8OgW>JvJrgR9E_^2sv1kKju%czF#!l`|;yPc6Rn`X)9=$_>i?=Q+@2GHi<;~{ylq{&_^C_>Pk+&GM9ZL6o=Q->wWvy=Gbp98vM!i2=aq^e=_3Ww|k#H zO)^{ndk)p&D0zT3{0(SSNl6K4NgB6UeJyj-$Uv_w_Rkyn1-c0ijLsOR@%r5pvy^6q zcuCMqA#xAxxzKJfWd8djBhUk`cGM2isax?I8ynA_QAvztXE~H|IY$5mQCC;j&|qw| z1x-kPA3Mr#m?^)Z5mvUwHjQ#}bAN7bJ`P-lpWnQf9Rn(~prD{OJfQU4M3siRIvp)t znY#lat!kc}8nv4laQLUy_w)9%v*iUB4YPr&0T;k51s_>tbiw7oNk#oj*ct5H_NZT% z+_oWZ8ytpZPHYDTp06RgxqS2@$>P`c5mG?3ZWvNxAZx;mVB3d1+aBOMA?6p#soxN%_iitE;CEsqgNYcCvD!= zy3i_!=(4WS>T8`Gth$Oy%m@m21s^U>!cm`Ypw1C4eGfC+awN*_7iM)tvGh}<;ez|G zT7ULa*ftOU>|}U!&N@w8L_}AHPM^#PzO6|>puMG63n(%UN{!+nXdH%kG@I9?6*Yyg0$AIrKI*?cWJCvF4Ccyfoy=IGu%SM92oHJdS zxXIV5)_8uI0fRd7Ydmc|gM+nM-_E(HZRT&mfbz7q5^S}^!tMY? zZ>@pM_iUPn<{1cDs|rGDwO3!Odg&Lf^|T3{Ws#lQ8sSUrI%8BA2DYMGSa`?;&XoW! zUBG+XII$|{yYj)(GLm^8lv@?HN8LcC=DaoPhD0KJ0mRMhwL@}p!xotsnVdm`4aG>f zFfXYVzJl*KQTwSp=R-7z@891_t@00DJOBo|DeFF`Hue2`Ol)i{>{i<$>(HyA%Xu5` z?8ep9RoBE=&p8Y^br6UOwkqa=A2`W@zt_7m;j+BS&kF4Eb3N^ z?K~_jEap~~?7bI55Cu5HqGiwIUf(g(vF?Rj{YA}R!ss8uRp6${EuHug_hT=m(bMOk z9UBZ?fc}1IJ68t)Eg+F167Jgn9R~#FoBrcR z$&W#AKfaX^Q2oL7MblOGSmPWJxY%D6-t}COxO#d@Dca{hk)3ZG7!u}`8O7xS{ebqv z?BW<$txjLHhIW%xONWe9_s5XvAK43rudsE%;}YQGvtc-XG3TTn)D@A7v)j*Aguf#5 zsXPO!=zH2qAWU@jMZQ zLX%yC6c4%C*uEYcuF~ATM##71rh2f`(uy}C92HGI)?Pr3Z*j@`Lng*2*`6{!5`acz;*MWA7&tN0GcXW_ zZL}uS^nNQxb;5A~T;HuuX)VyP=P{dpN>|lO!({>AnB`?Qmcy zF3{j$V=Lyr(ROXe2qj(#_L*iv^fC44vCodp|EUJX#(b!h)I-2eCr?L~~pHJj!0S?K6~1RINWnb})eSpnmvv$J!y zB#@j{D4f2F&pg9%w1USD4yQf{f1;@(A*WrQ3M0DcE@ zAO)*vdxjDG1{ryx@7L_lA%rmJh4!1q><|s+muz#a=6rnAt$tr|o+lbwfFu8i6%U&T z77YLk03acNpvSC4TVcHLLP9Phu7_Z2C|T!M>-gol>uFr#E4NMRsADGZUnm%mU*~?i zTIPELUCy_x{a(=UwfG#W$xj-)b+zz)p37lhjR5|Mohn zUr!w)F$W;_mH~)La8)!=%)Pi9nj61^pkV!9xo98?{s;Gl#zxpb#9}$GMVGoWaO;VP z_<BRh`uNs9O5a4 zpP!r^V&mcxxvgF%|DdPaDkUZ(#5cS7g2#M+{Qv;cDqIn8|KP#Pv$t9cOcqRczV*d^ zanQ)|-YZni*%CURBcP^Ev#XEWPlb_jt)}YfO#poXbXA_~5X^%@S3|;RHwxdaxmudy zU-sxfBYDaB&u@;0X=(Mxu~`B|e(Y+ZdU;6NKq8Nz_ePnmhRk#&YNPo7^s#{`pv!R{ z(;axqz|h@?Q7GsH#PdaXs+%7Wp7}*lE1qZ2JJ1*btiH;TgExA0=NIU4XWRn<1M7DJ zrJDHtMW0z!=2Fp=ze+Cco*Bp@S zEb4`9_x%pWUZb$aUhDqtou=}zu7};-IsCoauN)nqeJEi<{91={&uh9A{QQ*q+TrN% zCoCU<2Mp1DfJi?Uy3?+SvN_$XU_$JKT}sQ#NZ&XQr<Z;W|* zS;pehz(xlq?t5cwJ5oq%4X|5STNm?$e4}W+4W@jnfyC6t#zwG* zKDE7uMo%E+f{V4A8NDXm+zg+{ zS8}gc|CG0C@1iyG7*bBivflY!R=vzYRG#LH(=hs;X_>S?MLk#j^F! zh(5w{a&pF%jO!R!Bi})FhIXYaIo?~yhJBAL$#qQ3R<6ta!LHiKQ72VhnVoIY9uUOY;vW}x25@&tqw>!yuq~os7_I!H4XlfR zdEpyt3k!bE+Ts1y*(&SJ(a&XNNeDYAzY7D`b1L=cG>~b}4Km*^29S48;Fjr}P;x)u zlxhWrK7#DF4xnNQZj{v2bT+IDK25@hz(w!mET+>6f@l5>otI-2W_F*i@RuSYL`+U!7>_t_aq_sTwG7t7UR0Fjr=+K+mu4TI(NyA=m5Yk!bzuO{j0`a=9XJ6B0%-g*~U zciA}Djgz=bb?ZG3uY*p;s|E+<{YBbJ))zv)3buG@xz@^!|l#=<+ve$S*+wc0jd{tiz?{0 zM{!wWnac_7yp*$iU@yl+xVFBY6=%D&%!_|jMfU(7=n4GA(`650e(_-}ll6pa!wqW{ z=`*|E*uS^(&XmgJgsO;dVL+3l+p0A%2;e*gC~&1uV>2@ihJR`sJUtscFSTa``ZbCl z_*~vjm4dy2MIVWN*<`;*5i}8V%oAEnN=BxwqwW7Yd25Suwx-$ruw!C`w8^f$k$94s z5R=N(#IvqWV0v2EJ3>`m8B1@4X0yqY zqt`jV;NLSfHPzRLbwaVJ$5I*eWDPD{*5H72!QFp1E93J*dEKyTc&7UW* z=t2GVn1anDD5BIqwJGXXx|hD5o_q>x1S$BF5%zx$0R++GhaR{>j78UIjEdI| z5`vmvn09eT;G{eWE72(XUWkBb9RpC9lV) z#8TF@|2g~HDm67VRnh1TSciv)3kwV22Low+dXad){qfqsvaL(&DKMX8eG)O&1P;UB zGHSZIC9fddTwKevGk~a5tdWZjTAuk#yfdVlqhac0B7E#|InKDwd^jDAWlM6v?=v-8 zC~4zj|*oz^_ifRH!lw37Z!fdem&9b(d8c|TDqm$FKcItpsx;aS|%2u{C z9rZSL4hz4BZ1HOoeD)h0ghXiQ!4?mOudlr&xqph*1#82@JnJS0_t}_iz14>giv47K z&$8#%Av_cmF$O1~hVx|mOJb|7?kUrYFb+7%NmnVVA(Qo325RD5Mry$IDEWL}i|gy_ zd@6T8WLzpJ#L`zWDU&Mcw72;fY9uRKi@Y}AmG*7e1eJL<9>_5x(l{aq^Wq{R?2j+Q zhSLqotb2__uXqa0LaQCNx0_$TF4b+IASKpQQfi0U`_jaW0NOu`KhY5p5uk_5@BpS5g2#{j*B)a90V0VE&Wk78 zM>@&*-k3ahxx?N@LWYLsQmOpyetBNS6gnF=3U6W?*}1}eUvwWvEhM{-Dn_f>l-Wi~ zAwZvC@CWQ&tIViEt>Ul?8!Ky8GE=OAa;~dGU*b?PWab3EyEc6${A){IXN}+cyr$$T zhpyb>X&xbknVcMi=oKMF%Z{*be)1#u>e`yJy$uuI2PU)+H&twdv%tW5LD_hBj_0L) zIcxnXizFHq`Sg39fo1Xi=_OOGn>1%2gTexa+_Z&Q8iV@m&|Fakxs;Y#Z4NXl= zgsV7MSYl4-sHpk1^h;XJ_k7kL&G$(`Idd=XHND6eg!Uyg^&PHp5DWB1H}a`5&A2i4J^i}UXDQF??MrfU zB$a9vnFz%MIlfb~;SE5w4e(0BkdtW3Sr-fd1(6_YQW}%17TwO+zbtXxwxq3L_g3Qx zo0=&3iY7eFwk|G4{RVNqqklmT`$fL95RxO*YczC-`sRblk564>W&c!SyQ*PP7i=|| zD#dy)q_^w4yWD2Ko&?|W>k2iuHXllXkb*RCxq9#$>c#w3FFVK8SmoBpNJo;zGk)f0 zWGr)d`4*HG#Xlv)5#y2}7VNoGH__xjL?*_!E4A1m>?3kF>2C=n>;`VTWL0=U1bqj63bKPQT{L~ z_&*KX`zpuP_nZ&128o{dvgtOcDy^;d$0Np1+~4^#MuDEKx;i~QU9S+_^P_#i!NJNp z&CSi0I&OP&q}0@SRE1TGxI2Tui~yW&7HgnkP*xN2Ji#PZke1Ha*>9_SjFW>)_4;!S zg$a7riz>$hqT&^Me$F9K0`skaDl)$Ufc>W|J>A{tHgx^y`c?w+>5DW|qxVw&T-PqXVIyp5T^ai?qQXlD_zvV1= z-G0k6J;d)@z%5PFNjsSQrqbai_+APD0yy-2E>_p5d6O3Eke=gnY6o8A4`P(F5feO5 zgYT`G@nIhwwWCeWjhny>AL_L_drpw|K7LwUS((}NnuVsLvumKgla`jfxw%A+jmL?A zR#18Mk;pA>1}DplBjLN-^SOE_OVKBu#B0rXHMW2S@Mw36`3<>E=-x{E&z&y;)P<5m zv7pO6x&~0F#Vz3eRg$*0w+l`IAe=|^H7qQr2AE}&CegKugIO3E%`c8tRc}wX5h_W5 zJ{n39{3J&Ie#y{x?H8%036L0svS28vet)YZ68P~WD!onC%G#O~k%Ob7a_aQrqW)AvPR!I z=42hq7>KX$@hRfe#YG5Hf%ei%QwVg^C_&4K`1N&l>Fb3dHX266^ zPEI}|&~;?Zy0g1mK*Iz@Nx`yeqSGm?sUapNeP37e;m%iEX65+q9OJ)QfT&UsHJ^4I&a^Vq7lh zpIR6GR9VfN+^)Wm?1CAIIL~EmCnxULu1u15PulFk1-J+XFcop-$8e+pcUW%Ca}?{; z7cN_>C@9rA1?UFh0ysXHh?u${D}V3O+XaJ}sseOQrC8Z$X)J_78kh0h-^IYJ2M>2n z6h7R_n3#;W$-0C04-tDn{BTraL*w`Jh)=alvT;XGHjt4+aK?l~DdNaNfvB z10PRbW~MnbB`QjcNOYM@)yirM$oNs;@6Oc|e9an`i!k(;`}XY{XhJs_Kz+?FOQEUj zHhBR|o1GmUB9$PY2>`<=C@7*{!-XOm_CUMGDQtmLeG8185kV!I<;dgV+EeW}g`=GG z_PZ;+(HlAsE*)==`Z<942I$+xWV(L-%o4`HLin}iU;PLn0Jky5dK_^*a#vy^0}eu} ziW9nBdWnsgnVkg{oe3As)`^}0zNMV}VIkMerZGX=uHr1p9ztaXgRx>EtY9!A)9!y( zF!7%ie3kCMvg@`uVz^<+ed2t2DoRDe@7L1eti|v+iLfNkE+Bck8le{$2dg?p(T+Mk zDX4)zr!#7XsWib}B{k?^pP>~6Y&OazU4D78Kh)IJ#wP*AM4x8_9uy21!LXZ&2TL;`V1XGIEwwxl zf(Uimsu^<1go?_IVd=L`pxarEO(pFJb_C6&}092#&HQ zOFbp^Z{2)~szmQ+pGu(csK#ob{=G~{O6-!5h>eS>dda8ql;Mtt>mkZ zEHto+&o=94yZGf2@MFSBP{Vm3_H<0d#?_`(bB_ zYlbSGT_-pd3lI-h1Xzg7tSn2kKfizHm$>aOsI6`UOFn^zSs2z$>O*kgO0Ig~)w#=c z9FvQC8jkE2!F2jy0Uc4J? zyPVk~Baf4m)Jnj=Q}lGn=OVW(nVg%uZc1O$bHxGP3t6`ABs^0MAiO(xZa=fGFnl^x9fCb#-F) zAEKtF^s!)jg4BbzMHuvNH;U~!@DS>7XhK?AxH|CYXRSB`U}t`O9E65EiOF>NMMF!g zX8j!$8l&?3*z|b%TBQ7Nb63}zIhB>29a&a1-=|ir18J&RAOE`tWjc8H*JlhUu=7=> z7D$bvs||A@4@bT`_Rbjms_$O5C*%Ro3f=|mA6Q4SZjyc#mJ-N-Ow9D{>4Wtojd)aK z|G=z2tj3=0JPAIod~fxNKD)^6MlDmqK(E)7`L!pRlJW}{HYh=2fuAiin6D~$srJ_< zwkKlMQhcUMM+$QsGjTHju8Jsyn$Exc5O-Wvor9L>K_%qRenp60732Fd@8r+FF}=JniMPXQH+PiBJDqU0)KwAN!a^u2?#DEewI22yRl@I`nTO zjX|Rym&u610$aT+?5uvKNWJ`Og3-S0o4j)!!yn?%WwJPB>D^zBX>S?{h#(7fdvIGd zqS*PmQ0ImfRYaYjfQE)@p-JncN+ncG`+N&j$|Ym=Ii~tNA*^KW$5iU>@02 z$z!Srew?(1`Jdh74C1m*pd(PWu&^+2an*n*Gb;$B`{r`Hbew?@uG{1uSwlNc8%4}c zh;wHltObL?06FID{IWW?sw##>ED%;9@~A{H%pL-{eGn5J9hge`9@H@>aKrC@Zo@oS zX!ZiStZ_2u=at|xJgF5~&lX<=+iFz_6`?q!l@DH{6>Bie0&JDA4*QbUQO&x^Nt%~8 z`aiaqImMX9B&gKMaq7@NE^C7xO;Z`bQo-3u_BK6qB=@*k@hz57LEIWKiPzhM>dfMu zzZsoT3uBqp$L_5=mjyKqx%y`?lU?T@=vG;-)^^5M(^2$kK4%GAnvhc*H0-;Jq4m6P zX?4_TPq%hvMJ3zgPiV==En z&#I-Ri&i>#CbY)^;!?)6L$vaNRfq>teCTlQ9ue`GEWm%Y;>eL)rl>nO_{}cD!WtW5 z^uQBEDrp<$}+o+y%k>T@^)s~iv+PMRkx*4x{FIJ{4I6mvJndu79l9Bz2}9CpJy zSTA}6s^3}<^0NJ>|8&f#M7TX0I|lt`VC2JUDx6X9cY$l!U6I9bcrI_oJ8Z0+SA?LC z7s*h3S0e0l`BHnOFP3$jVSLx2&E5vMnk+;omhWS%M((s6>!Fgjl|a&*S@94Rd05q% zHMHR#pS&+zHE#QQ&O(1#W~olz9v&ZOO&ZtnmVI1KB)dn0bNub)*zkoz+2>E-B{2n$ z&0sOBWaXTXJF7+Scho&N{_GSDap~^J+?!z$EfMRf)-$&b$SI?`;2dHl%&#iCK5IXi zmGbf{vpOe69?p$3y+tWC=fs_^^x}J>sB{MFW0m+KJk;-29PlViJDEPZ!JSO0pI+l%o% z#D66!ei_t?_MaVC58`IRqG%81(*5o|0g7L^RmCz6K2iIMY>P$`Tg9Qd`M(Kbs;luh zHmi~+Qxywj&+l^F1qr9(*rN{;{(Uns4*^7#DRh!T+Y5db44eIGh2UGn1ht5BD+T8bWQ{@zd(k#hRdNRsqDLd0w0@q=%j<+YVX zrKRzwEfVL?Y1Fi|F8H}w>6oUbC*KSsgwLnBmEJPWB_<|2uKv#PX}wO2aTk8YYzaaG z2zgb|;O#@DAbr?XPshwNm_EtI*i^TFxy@``Rbvqsx7<+2v(d{WH%B8TKClIC!eHj1 zk{yQ3GU-Lw^zgqgzC^*p!(&8VTS+N0DQOk}T_7+@kxoK-k{1Hedin@cKA9g0K`rD- zc=}{lZ0SC)dHB@d-(S@RfLanaYD$U#S*Y-7krr^>My4uIukP<@15$Q1Ma2k^ zD9S=RjDd2&0j={hdjm*=Vsns^VSy>1zOiBPPiuoTJw2U~)b@@%`3y*t0)7Lf;Kng8 zGxI|s(91yBz;jAL9)PO^6TeRpQS;=aL_lQ!y=;+I*C6^%Ts}2ix*;5{p(1INSv{JY zxvfUwDg6=$ewc_hXik8TtBktzf8q5#@nlTm&obGxactU3Dyws|^^iIpevx}{EP|H! z{Ll3}z+%vV7MUz+W_hNK2A}D3jon$Dbw6rK|;vu}@ccu}iJs8O!wFJ);9L~=EG5&+OWNI1f zd?)H*Nm^Q6-RNObler$Wkg8XEeT};}j~JbUs_0f)GzySoR*yV(*QwM7I0cx{EB16XQDAA46N9nv+p6<~pg|0p% z&H^8ba+>|KY3jvKQLCixm;SOgG_V2HFKNQAI>Pf-Ilr!9WS9t5lAulj!7fR9xpQju zphf0~p-e55$24c6U1P6pb-Wm3RiuSzm<@=qBqiwwLhy<@K6{4Rz9sp|sJ24|WxsgVK2MWUe9ebXNb2kB{A93KIC}ce?VaAzK2bC&EG*o*tlmyH zACs$)pz#%KjqB|7J(-&C?V8&$v#@{XWE{=bi)?o?XFeoPA*iRS>3%z&@bvCMrXfw_ zr=p~cjAva4!s*fs9pM{9ogo%BZNTZ?z`8kHAz89o2D8t?)|9aCow1S5)^!k@#%Yh{!ztI+?pAg zn@Xoky2pQRxKO~^Y$K+!($klYR{H_U8Po;ZsP+d^wu70OG}zeKTVP5)+g~iT$SWu) z*k}tmvhpUalS<;nEzx4Phx9Bg=qaTh9N1ft;yeafdrgLY%QGPV1Rozi<|PgeGbkpq zg8w`1r$`7K&@e~TM=xr{>!kI0KdE{)44`o(E0$ z%N@&n61eC!qXn^QuKJsoat~*otne@T7l%pn#?srcD7CKQV6Wq)h=0KJu!vKRR2{Ti z`XczF?;=sEz1qz7>X4}RzQJHHYU-Z3t9L7N>7K0rRKZd80lDc&M@kO}8>s!uFGg-cFqb-H%#FpXJR&w#d zxp%#Na2wdRqH|*V<(aFckRa`%?#}HM>7??bZVKT6&u$EOcap7sT|#2?Px@`8kjHL- zj0d$ydsJi{y1hWh@s>K`JraJj{`<#M%JK)7#nk>VPeQ1QA)@)?`aK$NIr!!G?;;Jj zH^9a~+UfT%Jv}`eAb41_+&UqtQbvbW;o(?DzM`zEsuA?{ugyQ3YHCW-C<+Dy?p!1* z>$93VwJ&R%n0z1OKUp!_-}tk4@$^}Fei2Po)b zfYk&rMXTp2OErEFY%$_NQFbUD_^eIwx)$Ju z2Vs{>WRH>tauW&x?k6g&b%LrapVdd6O92&c};m4dUf4)E3I`p z6!olgnw*u&$t@fGe&e805) z*HWFD{j_xX5v|Rzi;sGnkPEx%AU?-UL7ee|L8NBbD_3N%9aQ3X1_|1)j!_VS=KJd_ zj*>5aV!EC)VyIcuIH3H1 z5A-E=sVyOo-9qtn<%hYNtmfFaq)slq+W~$$xpAG-wd*7nO=qu#7w4zZQh0DGI1h|$ zZ7dJ%o%pm`0$j$f4V4`~l`(qyNu-L{bPZvLQ@oj*ndvJiJ~}_SbvdVcxh5hm-dR(k zV;VMYp$UoN$(@#qN%#MKTJ2(nu=SG)|WAzFk2f=`Nqg|S75`| z?~vTVLRqKA*Pl10*q?DZ=S%#8*S?TvWsdA4?*RR+sS#tb412;^ph~!(pKKn?fUjPF z#B|^ebOzsdC@TZdSe-8zvH>=_^a^EYxAPO8X+LO>?(@3RcR&n~g~|@=NVxE=`*KGR zNDIA3F1taBvLL?43?Ky1ltJ>dMsv^&KEG1>5!lUXLSAtKUV-yvU^WX3So#I>8Of4| zZi%TK{$Yw;>eR{fJz=q}r-JIl=+-qzmE_uTl5Yxl#@<<&wjsZ=F`u3ja?fjSk<%g{ z$3P^$PsEr1`HJ0{j$Hadb*j2ttR-Uw`WlA5&0^)D?pCyA(zNz`Vg5gx&&bD#?^jiD zba8_02G4ThY5P=-*>YChD0?vN1Nh-Z%DSw|X`XhZ^_MU@WV?a8j=Y9o#K<#n+x}LG z7I{HV&VcVliqGiY$>mY3$4p6yV?Yd|1a&`Waf63pgt~x%skp zG#-g5Hz2Mylj$w|^Hidg)NGCQvZ_WF31hST(WpyVO|S2@gak4#BgW5@aaDKe+QXTn z)K#gsQXU>a_*pLma<_V;trgtDC*#!XE5A#J5Ig9bn45$0$j8eI3RqG6bBxQ2&ffW9 z>j^crpW&1`z;ib;S6`8uVK4ybrn}?)4_*!R{&B;u7%kv`i;jjG5G2UO^=4tgkzCv~ z7#}ML4Gm3c`7szYZ{M^4wtI`e8lSmFW$&HL9eYp6mOPbki@`0jr2T^gW)6N`VEpv8e_ zgmBU)od>!3hRJF)cgUac<>viEpul;5< z1_lsvYTqN|?iB;E7+!o2XpfAj2v=nQll8|v&mk>l4lQ|jy1gev$QxBsnMB|c>izn) z;SYtf)B%&VH$GOTg95GxWsT8JaxOf-6zW7=omFGv_ir6YN~>cNQy?=xlt}1$)U~gF z|0)<;Vn*7FEQ^Q$bLt)(uDim9U^)(H>>nDqS-#pA)~v&vdwchkVElKuaErq!S!)TT zOHUVb7CB9wjW;hBy=qI|`4@0N4)0#_`G}X{RP<=Ia=lc)DJCv?RPxLX5K@OG zL*yr~gq)X_z|I+Qf?wB8)qW7()szbRmt^66rwwRudp@LXKA<3ys=5q@$;_!F;LX2r zpnpi1L8}{Vck2sZ{(EE(vEH)S##|&t-(6d&`c(X>N-;}z zqG4Y08J2|3V3co|An|O_6CtImEi&~wbWfYP!bx<6G1((vQCB1X>IP4J5wbP2-6) z(4C^}2YgRWzCjPG*++{E240Jq*FvJe}^Bmm~i4Og|wzkMZ!d==7P( z+}StWL%Bw_p80H(u#3=`pCfMbDn!wR2TA+3e6^TFDgV1Lwiux*gi0zQ4JF-#qyhev zl2TIgJeDwhFWOpMNPh(2isN~=@WIr@#f?AwO)MPPh9Gzq z4@6dtX^+4>iH}w)ATKunXh}&*JfcV*vT3baS6!5N);&lE({lzwOppePOU3VgGC;a=4X7HqNl%M_Ltlb>GX^ulc$G0QyytPs`@cEUJN?nI0zmj?AvEmH2AZRB|nX; z01isa9tJv)qo6Mj4-ZuXH{X=49N{hAi8!ID-Lkg5X4NQOvga>bd)DY1Eb_0$@_&o0 zP$yN)XP}I35h0}|Uu&ZK`da{X*rBLRXMYry+9oQmGR z_xtx=(@IpwfWX^ZMWY^0^FDfLOo23>E6al!{@MHbZ*M~7x-%&bWpE3w_|Rxy;u(d8 zjB%OHhMfu_Mgt}Oeb%=JR+A`^C1?rnOFp%!keEZP$kPYSVZ~J^ItwjllYo@k3$Lj7 z1=rtygk@+Fc=D-W@C;Y=8mV4_jbpulD0;qawIflKS+RFBI**K;%)s40pvn77lB})I zO+7*0M`VBK61|eB6$8_>o6i)71Nc+wVltb?H{~E9VTs`mKa@?*%gANkN``Y{Nlxt$pdTlye zH!qP{Zc2&3hpENL^k4SLx17DOmlG5C7*MjqEc<1XxVYd%yzG0gJU^vAz(a=FHZ1lZ zJ(Qq%7AeAPSXowIT+iRYbgSBQ#b_&GHX1r$L^ccLHQ^K$BE0oY#o*v;!wBw9w%XL9 zrwF06uf?VNtL+wRdYg0tM%$E-5S};#-A(Nu*>)<`{4s#H$TG=vH_B(=h0|=}p{}_nAQ*!i5spYBZXZ z_lo#~PkTEXuU%J|&=9J0Fx5ew+gbymWU_Cqr5yh-k159+4w4>dtg!3(oGRSD&npZ|uc9)lzxW=_jr--_D?d1hhvlC8R!?(+yCjv!(f+pd^x0H< zq&+9zriiFL%p&({QN2&zTOY zKQmucqf_YTJHPi!I21;XVh|JDy33}^Y*xHv;>WmUabwd|Y4sZBs{M&2E)Z5IO7puk za%NVBGIcpKnZPHU?TjyVoLZA5jx{OJ?8CB+=e^}%G+t-;97AodZE3057p9rz1) z>3VQ1`0Z~llkB>q$hg(FiK=tCv}b1MptFs39H#!r)aC>XvIZOBV_t&F7GM$G!sbTjU43x>%rfVKPrq0 zfUXxc=2N7j6}qu?#&K8F)X{wZ1OHV^$`(I&8D#F#ljJR9pC9gL%Rvy^MZm-vzg;PR z@?aS`z8`{%)!@4tS{XeymYnwqmm*on-1UPf<9LEf9%xs`0p|%Q&wq_3VHT$k5!r@( z)1uobu(XM@m?&o@THL7u`) z&z}4D>U>9*q1;QZnm5wGhNJ~zxi(7v8sN8NRu$OW&+4J*$|W0BIS6d#p*a$IKe9;rHLZ8@SLcP7!Vq|K~p(cQC~4+5Mn zpme4C=EasHrl#CCr^>aBF~iiHXwEUt{=)3)(5dvmvJR8;>FLCavL5w)=K>M#&C7{m zAttEd*o{HAyui)?w3xV0;J}mdy05RW0G_*3pM9JZsS){d+%GNOjr|kKI$&xlYvbL{ zq8Tfs%*QqAHf?2pt;sEdt6z!lX+%nx97?>7o;or9lXt5WU!zg>&kxGVJ|Ea`4GRbg zTsQl?P##mQ)fZ<85}MNc453Q&8~X6~>E`eHCpWu*!8r z@Z7odt$Jq8H>zzdC-6am`PQ8g(?|9;M@cH}+}N+XM7TeSJ)7+?+R?NLZdv$MYleR% zx#ebWuXv&<+AO*p(O6b?FanYn9utuebv|y{8WE!h5pnPBtA-<)+|{aTYVJdkfr1yM zEDjla@xzs_^)Fp68>8EWZVLkybt$vH8w~scH@ed5({!tdo=~ z+<=-xJDE+}L0Wf{|Yet&xBfRV~^u|Ll_0e+{Lm}H7jj^|Xg=JiT10ZgVSI2hgNwwiJJ%^sCe zWPV*)Pe5^YLB_~+FtfWl&ervfs@Ku+5m5LjeS>E=G9kJ zQDn)@;?niASB`Oww2E6DU0;Bx zHr*_Efgfn??V$!9U*pbzfSGoR3?Co)tSG0X*=vaEf+MyC7YQhX8L=+u=oj`isuWp< zz=tcNuGne;|8g+Njz!fwj}1QXLXi?hO{e6I<=r440A`^O&!i@Q917g^(fVTpf{zYM z+*c#UV_)KRDJj}@sJ)vhDJUo(PuFVXN@O&sZr~{GB9)vvI;~4z>6A&Q3p$=25h0bz zef`^Z_I$Ajy^d`Kdrz{=@F)YWQthEaQn>JaLbJ)o{DJZGj5{Z8Gz#x z{2}=NUV3bc74Pa(+3*Wb^gYri3n$xLkch`tI;)2Vo&}f*%)67;p{q}>Pk8xmT5^TW zvxXu+*6_Eu19J|HYCJHm3=3B{5Fl>kNIQeL_h=>R@ zERe(upH@bxryGB;2t$6@*id?X`w5fF)`2S>ihuY^pF1QTC_F%KmUu?T3xPL_eIzx# z+q9W&3|XCWgvdk!5K4TDTYJMVTqZJ}UMNYE;_>4{5X~@yqe~eKkHZc8N?mxj8AUHA zCs&A*w7Q2$c)fQex1?F+;tavTx$jd0EyudI9-V$TU9@X!4`5saCJ>PLkEuuGcuq4! zUKI~sOh^D*ZrD$qR$Izd-F%t>t`{#ZEfLSD!ZDsx@sS{ax3Jz{khx7&r?cFo63zRK zXx}t{MoMVxmP~d5<0A<)j2^ngFAG8$a_S!Lyeu4 zqu!ZYZC7fba{I-ttgdO7Pf?Dhbi@+H5}C`Vvc#aD#m;m5>Rvw(}Sq}v$`vt%2C z{z6i>guRJkvK3OlPR)i1LXmt=Z~Isep&iF%$Y3(ku}P{Emz6X~C%$y(4t@mpm)BSE z5p+^G=?j^cPiLY9WG~B%&|7J>EOdCntkBjLc1Wm%k>XGYE3>Bx+s`J^@Y9E3gKWvE z`R;y`&!-#gwEF^K#lpAqyaI658utI*kT1W+!RDuXM%<+>H&4m=;@Gq*4B7+07X{{U zu>(!ydOI<-e}0y}exCG@@Or5e&zbacM&RrC_@^aFd=e78L2hxoxu%QbAhBmANL7)q z+)djt!&R@ipyZ~RoGK>ne>>2y*9+R5nbMEjGbLY5Pajk;Gy6LH+2SG8&S*@ZymI%9 zZDPtu7H;PP02HjV0JqUMpa<;n4@|&QndbWiN@`{&O}@b9 z@m=py$#8}9cy_*m6c zAbU#p69Hos^7VB)vz4_)c1_0>S+BR=3j;;BY*-?%+Pk`%E|% zbDiGQkmo{aL#dilcol|A3Z?B){lwo*Yd+RkQ^Zg%$Dr)|^&zF$bnC)$?T9E*q0$2c zCIe68F3Dw6Q4!w$fT3eqVZ3LHvGT3Wih4<+3#Ati!zcSv`4 zNq4t&cf;L2?|sI7?^nh+9~k?;_g;IgIe(K7HUo#7?yM#YzEW95Wf=n%y2>%i+BwFr zOXgy@E~3r%IHU7y2$e#0A;5j}#`pYpo`6?RSi-8+CsC$@K(q0JBftg&iimrHa-4Uz zg8r}QUB0n%a6AEJA*g?jDfpM4Ew1?8G;cMx?k?EeS{-`c_ecD5&ianS*VSytd3K^xl$D7k9i%k>_Z&biDd&r= z`$hAsNQb=Dm-N&SehrttbC)r<+@Axn0H}7T4i}wTjawKC&grbSAazjC;;^LPhl_@M zYqO0&C3;Y~=_UzfLhg^dR8n^OcLcmKojJO%jq9f>7y`3;c707CsqLCAt*HCriViBQ zW}%MindOR*?+p72=q1`FreSF&qCJA?oz*iY^NXM5FD0*#`zY5Er6d+-9K)9 z`(NKdbsNNj9i!6|W6u`vD8HoztS~-csew3bYez>&H?L+tI5l2aCe@T;uoG(K_|=xmkCVKMo|<*9KdTo^ zEOkn((f|JTqxFK-1zVbrkGG+(@#o627tq|{(U^uPFx2n;Da=B`^n5t&g=apndO(OY zZudhNh+~op3s)tHdQpr+3z)G031CGIZ}G->bGYW}+eq5V*|~jHHR{EsdrXwNwlYEk zaZCyUdXXh@;Q+XjcO ze{$^@>NbeDYT6p&EAYaVDTqwPJPi*67131QX5u||?e`dzDF$#&BciO%*ZRZGw3*77 zgx{MU*=|8_Jxe%EYD0I}aV7qG*h-r-D&<1-C%0bLTwbxYnud#pdk!;mvfH8?nVFl_ z&N@3!8F9RvQG1^V^I>Y9YPzOABE;7<#S`+Tqgt5~w1X}P8>l=5tG0}xi$as8breXQX3 zS+%gRYw-ef+^!q7D>4Z@%5fQaIh~*>@w}52Nq53|UW~7oQ^PErl=0E{PmJR}Cskf{ z+w>jMqMQAn*9Oa+e2Dq3cVYN)6_sz~wgW&EZ<+jh=f>&P!5o|S(}U|7MNkKX)azzV zMv<+)ZReAafyN)xtUsbS_lJfCnN#?#Gwb3_DiMoNPR_k!e!bhH z3S$t4g$1X=kw~warurvm6AdqU^1Q%Gpg^h@v)>idX@2#FYtY;4Mju#gMT}dfdo~hu zn&)NbgF`+W%J~Y8wr%nQ)}PB!2aq6M}Df27@^{z6tp$Hz1@|A&CFcO~@l^x`KU zmaPVd_p?8vWR@4&&6LsRAG{G%B*Jyftbg~vIjq)zhX(5+87wMP>7@hSCsm7J8za!( z$#3_Ag187on36B_Es6)Vi!*i&?3VeylTwo}$_@5Q66h)ejUOX^t+1{*gjQ{hn;4r~ z=C5jG4@Aox&wr|fyd{tEf#Rq`(HT=lmTjyK7Uqw>8q7`CV^C=s_IE&faZW2L&x-Oh zzCo|bsb0IA*O?^!(8^hd0W#MASzh*Z?wS1EzF->0w2|wxGuRCsE31?bhP`{c`z$(5 zT=evkfzqEt`$|>&sm(F$Ug#<*U0k<5YbRwO5fn~576_R9o{q{AZ+-T{5tfsa(iFE@ zbg|M7#K8Bu0N$q8bX@LNzf+gF%5*eU<1z%Sf5#51F%_eOzDA>$w)Z0N%X*gSBxRuP zO-{{tUoSa_rn98_sjP9&h?U6y?1pYGh1XZw-$!_Ro|S^ia5xHNFz$qo2naa+m6-1g z+G%<+Q$Gp(H(GS~k?qKp&wE*2J;SW^8UgrrP}=TQ{;nI)~E8{VbYkY zoL@{$_1n*b7xb-VnPHx4e@6clbcT0;%x+1m*Q#jnrT^?rP`^^);3bLF)U~m(5+3?R zO`f7ykep60r^>lu6ZXj;eS7c&d}~nDtDE=i>>ew9J4Gz5RTUM5B_%2!xH^%+Y}!8? zXc|C1IB@93^c(rP7wQ^AU`l*12A1JE!zsaq5y@zk67SNp+NR9(%+9C zix@@?jI3B+Q*Dn9BFk@s(AlLc$qptN`(l4Arz7@-SvG5nXS^YV(hqD|6=@dPfP04F zyCWGsc3o_oO7l=(RDglB#ZSnM<8f=Vonleq-aO&{eB$21W?n;$iM6|=%UCS;)%VrJ zDc?qPA|1A7y~g}Xn~5K4fF4K8W>3 zeI`vF;mnievwOJdoK(t?55_o~7$28S<%O89Cy`;Acjxw!#S_5AP~i6NvdtZ+>fmPy zdrWzQgldou&^>Pd2}qSGHI$wO86US*75O*;^6xSw&{a9w}P>4H^|LrIB zemK>AzEdq593{ODn$eZ}?226Z<(u2%u6Ei1^=F%pWMw9R3#`GzGmpmicv^~xfQ0jj zP)c&9SX1ukYg0*T%TwJcu=j+^NxU#sp8gs}(1r@{7ZA_|c;*p=oU-Dom@x$SFb>nm^fnl?nML%pCo?i*gua`Ty}4ZG$T4!x*2*g+b3hVG$2+%xV@DOn~{`~ z%%Y%Z6H+#Qcc1I(r9+Z!_sQ3HhZ#BX>iwn8AaR|$yzoh}Pi~J`m2&y@(XGq9!{@9` z6&0ltt@6xXA(?9VBvf4HCSvUGLc;Upp**6Z-@bheRMdM zkeX`UUXx#1nwJ|Ztgj9ix36cjBD)+(hBr*FWLWm>_(R`o(zI6cWbq7;GnjaX9V?K=dGjm`RaYjP>vIqJ#m89@aKB1| zxxh3Ye-sO8%l+*GsZEqjGRg|ZfYE{$+6dfy!-VN>D4wFNn;x3UuM+x{`IS9!Q{#Hk zYCL2%Ux8XgTEWT7%`uGzfMDv87XI5G_Z+ULjv6jo9-a01KCsEb&OV&LChq;fWu?6Y z9-u;DD%#pI(!pYNI(wc?E?@bgtbAZc94=mB(6`#I`9+kJVEC75g@%XvZ?UcZ3wR6x zXv=?$HzrD9Y1z_13V&IJ<_tl+hs0kA^|oV~txuPw0Ob-p&i6&kWD8KRhDT|_Rsoy3 z3ne!V@K0U`Bl;U)nIxQb!hCiLXw;BF!G@(@BsQK0<4@7^&9&-V)0j_LqC9Q@?tC z@qTsnINsqKeQ{jcbg(_I{%shD;{wJ61amb;TJQn26h=Ls$Q8{sOr=L|niT%q4i+ zL?^B&VI%qYIeNtsT-bp-dUdk0B9}dcHaqXzsGEVWd4M7RCty88`{sV@Nl|5M?o=2L ze6Gs{rDlR2ElVtR(A}L|DtkO7FVPLGnYA?{W>f0EHn_AUJ4}9HKJ(45X^D?@V#2-b3(JTEg|LIsq@2JLvd+ll zua`{oCAux+%hO_Hs}Ztz;(0-z`zh#TzoTJBHsUcj>(sQhwP`**t!6fRJvP|c$cuU$ zg6sZob&b$(&_t5bX?oGTT}16&D?abMhw%g0vG3gxt;?YV6Ve7s1Cfenp*Sl z-pIf4@Z(P?YTgL#piuk#RX@u4n0w=ntC*D_lhm>9-`XM|Vt!vG7=@NL|3j=cA%e9f zGv;H^E;s15op{OkJF1YB!-lS43w`GCBhM;<3m=_p{=c`j@CF4B{Te~D-G`_yVgoOn z)||{7jz$gvDuG)Kh1I(k`_8{Vr^%bg_*?&(ZeG1&ndw;ISAR_-q_J)QFxD{$hT455SA=C~mO~t5b0@n^-bH4j0yE`A7lMCbbsYr}A?3fAYwmu)uf(OH-ZXF^X>-YpWB)QG! z1?cprYu2V3Z%*iH(g|r%l@fqYM8Gzyp%(m&cN*56u??vVH~D^VUb}30pQ4u$FmC?pIs5GZ-5~ zU@a3tP}CXCo*x2gV4JAJWG7_4y<_8@nr2@7%BarUr$w7@x^mDcS|nVf$+4z%-nnTf zFdSG*Z2Di_9L5>O;^K?TOZ+nt`02@sq}%J-FDeKmVv4!42$yF2f0(j=Uc3zx0EWo! zZWc2aR1XA95LBQ$WwP^Z9TuDO>&NLjg7gy+X9Vd> zEV(@n{@O6 z{-O%^fM-*K^(|R{Eb#^ROv{D3FZ@el+dU`P6p3xKu5Bu}E$do&-tGU{OaJ!IT>w;@ zY6KQD2I%)aL{`6|@vpe;AG>YzL{3d}yB)kGO7{OS2uvYW4lhL@rbhs?uSMYulW<3O z(iH9KCrt9eeVB+ikbk!OG@9nio$dXVmH+O919CcttW?kklY`l3_l%xzZtFVXr^?i~98(3)l*3ii@ktJwGqUk4o?~(BH0sGkZ_S zL}%mt_NvCo&xs!;uMINDYPuD@hS65McDOb+=Z*n6*@|d%9%=8HUKzm@t3l9gA6V!d zB5ttiaM|NCD$Zg%)Gy2nEy^p>wD{8roOGCIB_LtZY~*MOZ#en#{H%o#2k!cvn7>!Y zlcLS_NX%a&Ra4V>C5Ih!WLY62m#&29y!E4HoG6_IJ!4QRpI*$(6UeS$WB`vLdSYAm zEr`mdB}H!mN1YuF+PGjHqT=7?zoa#m#MVZ>-*R^6y-y^>oKDMD%Br)FOJG3wAb{C0 zc;|z#8|M=(5pg6f_kOQ0Uh@3%uy3>Ht3$b<_zPVX8jX$fnSn6%T?hXOKcDtU(wzfVF zzh$xlLa(PA)y_`JRibUUXssFql^zlC{rCTdUqH$O4AYH#YTfG}osXYR<-GmGEWBL~ zFM(uMOhn|&h4;G*@<$#D3QE=ATDKqxevkE{k1UVpsn+_-L<1)$XY1`&QW%I0wlM!N zXK8`RS^uw(`@O@6TP}5x9g@RL<0}YG!2~cA>r440tIKkn+e4jrnkY|pbF8Y{Ov@a& zFDeo+6xgO&o4=2{Y`waU)=KPTd(4lCH?!3vrsnay+ze-nig0)0k4dniBRTO4{fg|cRRL6z8uT4!|gbU1d0=Vw7dAU?in%}0eQ#G z&BObXgPb^)0+^Qb&rVPE-@la%^s~zfwqVrqr(y$6olUK6%WJT1EXMe5D_0^ zwdlfSm68jrUfD&%lfGQK0Ec1MnswXRTh3ckv$#FM3;fY@R&2OR+LP07QcRK^kb) zaSO4$uiWQM<@G{i9@s0SuO!R`Zqdmc(ZRd4A3x5>LSU7#azugJ{sy!%Pjh&g4Joo% z0rNyKnLyUVGPx+aGOy7caBwVeIb1f8Zf*g`vsE`1id7D*dKimMkf|v~MYd7%7X!cq zUhDA)RO6JC6oo}a8Qu(_DP1;LT;%-qXp?BZQikd~MMQVn$}-971|gxnxHL8jjXP2U z8{Iz-J;yApVMy(=o7An@4_r~D&NYHNoqTg_RfxlSz=RF2FQNYIv3R|COgn^ZlnJ0l z8^I+dif}M#)clYSCZ(S}l<}`t^fdCPLHfAj&6dd^zyCNE1pWJC~Vt6R&K%>A8vj)sI7{Q_Y z^Ue?Inf~D=xBC4Yn<=%NtD=;qRH$v?WXcEo++X1k{dwvr+GMhZ73^qsxz}k}n6hyt zPh5bzL%yzGdlM~(Y{(wC43KQUVqzS=i=?0|%l?4-yS+V<%7!HN@t@5X1bC2(=NA;* zdV4=I_{ZnU2=Fn`q9T!YEZ;Q0_w((Dh|ZQ`#70g0bo#aouR{s_ zB~-BC8|y^8`OoDtbFmX7YUJ8O6pxCJN7g9;d4qo?v=W4K_JrEu*@ZuXG^DpoDdieG z3(g1WVwV^g7(gfaX~O&YUH}9mBlGawS@O{;D=3`BwLW}9c1zFN03?bjr;D8rh`T!t zUpK_eb_0db3lR@+d3dP4$P_BUvit-DXtq2#kWZ<`F-oJhakLc+;7#T9NSckm!_f4D z$}U7h@JBR5AhH12m(2a}^RkwF8Zk zCfi&}N2*@dq17P=Zmc*4@p|p~JNl&J<_DV(;{bACwNaZCgP9;!9ziOw44&1pTx;36 z@c*bKr?vtMf#RUy2ylISZrTyBbt)-eHlGRzlV&NC*f|aBnVdrAaV{tK&6PEj?bR#G zL?uM5{4<0;2T@2Y!Vkd?8_KTViX=G~_fNCBuM&)D#BNFttj8P7@j0RMYbl13^u}3z zsG?b&7vZ)YT61YqV{bE&|7Xsr{%2pw&P<&GS2Zsn{p<^EIRxR97o7H^nyMk$tpELM zge0E#h=d^xiO=mEC1rE6+&O$-*O;424ffioH50KmZjubz!F^R-G(S6wc7upCR$iU} zbehp(=jrCukZ(Nm5VN%TX}{F{keCSMZWKZ-`flg?Kj-#=J`OEbxbmem9c^Y(NP)u3 zK^qejComV{$DVsJ)Co0rtg1f7!oJexp5wM;V|(R_Ut3c{vRL~(*&iKrMe6cJZBp{e zv0=gIJ4J%TZpt`$8?(?;HXS7sPbSH$(709KhJ%{^vNxGjJ(GDqp;$B zxKC1gf7#6Z=C!zPkyUlg)chq+vQ>4lr}p%h;!65DED_^lCaJ5}M*Ub=2^DakCDeQw zvHQpZo{!9r0v`d94Te?otH zb)$rJz8f@a0*`|Xb8z5ZgLWJ`A&2LzHU<#EdtRx(j~yKt*}MpZj(-X%Yn^<=?L#h4 z#DO;&>9aVhZe!aSN}nD2ppI1eodx&Tee9IjiI&iAl7>+=udN9CP0=#NU(=a$cyHm z?H}AC8W8w}?)7`4)2oYhyE;g15hZg_Lw#L#PHt*tC0zLlp7u))b1oY(jd_r*C?@?t zM7N$j#@Fcd`4``wQ6_sd92{(WydgZ9B3bcp#Bgw#EvIQ*_RS40{hP2b&*mnue4qGs znW`v}Fr2EY>7uHc5#g#(i2$m;Q=3@quqO%+*4n?#fVExOEJJ>moq#v$s?J^m+-br% zH=gxKoe;~}vfa-2T5t^>w-q{G%z2!kbNtu&1gDm!f&Vy6c01s0!Xp0ACcf#+cJk8G z=H8Qy>UG>=8y&BX6A}^`{Fa|oG50IBKbTt2y9#T%z74@@3+-P!AlM-R$xSc2lF5w40jXhHg6a?}no5dbMr@Wp^3vxN*>$Di_ug%jei{ zAB1l@vVN$1@g~XGz-iK zgQfZE;}Z4r_6A2?t0fB?XJ!UUMG9R$Tj`GO?Q`wq6hZ<5GDDP$%Y6d}x^nK|@WLW% z(;QvkVq2(nmrIY-l{HZ@6sEjzK*c}{3y+AxcH$j9aTPO0`POkUS!C068u(7%E1zBq zE-l(nJi^Jz0gLm9YQ1b+vzA%A4mnW0a4L{Jd1eBRJ}ow8eQkXYcoDFP22INSJs#qh zmLxUPVgKFh%1S<0tc?i~+Xwp=7QyCX&A6*#F-h*@Jx&9;L;Ewu?lwb-j>n}-H=BVI zH8eOI8}7FcFhbkXeKdxJV9)3S*CSZyrF#83#J>RN6_+-!i!nnxr=$xdjoVJvm*lFR z_*`yHaA7PQ_+2@mPcg#htFWtfs`KYZZqZpvrH>ZGM3}%D4G*+hW{1z)akw}uQ3{Zx;CJ$DC81Gj;?VNCR z!w{0-uLEMDu(0q?DXH#Hw>B-eTQ?y%Al*!LV!XYRx9OFS+tGaU!sMdCv}LrEtSt87 z()3?@SznI_k7JyZov`XpO0K!IZ;>w9Bc`T)9`Q!NE#@d{d4YQtSl)+ z(P4yDF(+Ff+fJ2imw&?4Hb`GI}`yG&u8Q|b;W6{ zfQfCv8>s(uY zq#rT=#VAwjB5P}QJYH_Dd})hbTF+4|nl1V{B;@$Fj+MD_LDhTwucxdG3`LRr4<{yH z%f~BpgNb}sc1PP*|9nTGh`_$M|UB+Q~n`z`WzMA>nqR<)9}R z@(VDpU}Me7%A_Co)4a5ZRJEX^_|pB<&CSlfz^*C^Syrg0X=^(H@X=ze05|zq?*IciM@>cx3H9o! z%Wxd?hsK01<0TDqcJW60m@N~ECCb=@q!hS4v{&0^Gaci9_Ld+}*74E|$VSiGGHGG` zIukQ`M#iT9cO3siU|(L^b1$tCkE_UfU}PqaE$)Q*A$G zYR=oJHbNV-vwZSSJ0KbIvx|#is{~s1R(3b@U9KfxxrnM(LF*{L+#fVI!9x&_5VZuI z??u+PaR;nAoSfX;#KeSzL`|PF)s{;qEz93YjlGYU{8U+%Cb;%n!-lL0w%=)^BKlVo zoBg9sf2-$iAz=~+UW-&&(MrQrcszB-ZP^>yGEuVGYo$UJZt1YW`1^T;gH5;61lwH+JpdrdpLGqPo-*MuCFZWWD0B>>hc{zh(x5qZY$Y4EG-^xS0{GxG>{ zWZ8C#unIygZa8L1&Qr|#885Ack$DByc3p9~P=tIv23E;Zy8H0;59JMRV>n;eyfb#R zW-6i2n9X@z#Z+fm$fMsgL?=?6V~Y!~epiNFv~y>Spe0R=wjfoov%aNq){mE4}M1PF5_Ds&C8N6t<@KgXLNCLq|SF2UrmkD1n5vtxicHL36h4yY>NTD zw@Eok+P%(X-(_OmyjDH(BOxD;Wlv=Pfp1(>RS?uv+j_`-*mHV=xy$bFmcw(X&Wuf! z#a!FK*yWa7y#tey~^}qp*mJ>;jt<@IlQqc#k ziKQMbdvyLQaWOGOc*I=;7~j>)OHP%(T))J`!`V}iFW~$fEnmO{(6(VA3H4Fsr)j+r zNbvAC_xCfL?CFG@WA=x?RJV`8r7(6Xq~_ux>x7Xu5=XB!=yiLks%RfNM@?;QZ7pdl zQ)o?ByQWtE01JC`D?n)*!P0UA%RTRt7X9$>5ZuRr$E@>UHDd$*N)Z5UpS8ETL-qTo z=d3<%wXw1){%)sqOEAf)>p9iAnTZ)9vK85ba&g}3SPAc24D+p1)n0L8DkdhH#!`z} zr;w2aNtfa;20pG`f@DTOU;se;c3|Oa7Z!(xG{Fw+8oU>R^+$YTDK+pF?Uf;Up(t-` z#OC(53ZF4y5jC}*YN)~+^gd1wbTs6?;XjDIOG`_>US9dP#q1iALhHk6UJ((&@Wn^U zgEpe2uUY3722jxsen3g`br_m2ThqJ>af|2B@|57s zbt50p0fE%vtg`ojWemHHqnM*h)46FW8}=u8fw7tW1{?QD9tEQQ^yEb&)x{MJUlwKfiDJ*qHL>o6Zx7Q1SqJ zWWtP3#bmE3%wT=V+6HJSnLK^UYH2MEZEbD8w4?6~p@@-ytgDc(U*S+B9J`ypUb0K}Nz&Gswo zU`T~i4B%7%V}*|CyV}!gv%+sg}$OUhHr)5=KZ z@Qra%qr!svT!E-z;M2;=%t(f<*)5YYxw0$Gxq_E;#N2D*%eh~DaugK&Xps-J6lW8C zJ1kk)EfE9!iQ4#Ta~dMs&L=@m2*gl!D&o9tg16aYW>o~=z%23p?yko=lwYLy&}k`Y z?LzY|M|XnxiPbXBDR=HzqCf1n@;tJ#lrQesuEQk7sK@(7&sbVn{!Q}5c*h2*bh#K47o~XR2VLcV ziu8Wy?31Vw&OdOB?YCsJS7bYHeK_Q&j($lx1N5oD{yDTY2r@sB7Ut)J z6il*nXu7*QN9cn(%lW+grobj`$6HzH#q*edcUCYA6nh>dnBxpTes&u@p5F6g1o8C` zs|RPbpCjHQEyjAAzVUMXnLPL#fY`da+ZfD(4?oNq$zjb7?btcm#nQ&dSsMPRVi#aZ ztIOY+UzoDQMa30)7Y*n6Xr$@wcfh+0s`tFjYO;!9(1R|@Xi4SzFd_03d#_%q z!RmaBT<9>4o+CmDlG?sFJJU2#x+CLgI8UUJ+-i z1mk4X85q6#ic8H>1fzPV8!s+xqzpKwP!K&gi`VWL4>m*fGolS!O5OI79rE>;`4Lm(r^YmaWuI!Uaj zl9`+PRa!p)VRH<0y-vOW|J_OxKGpK;cPqR~l4=LZ1Dnf?bT1==x!i}3;&g%3vLH`p zkP$rGqNAhlH%A^UIqv67(-3@fO-xdCQumH-6qJV%n=P5OnvjPv&RTBH25jpY_WNir3VL}QI#kW|EmRX0-B@ao3-C5+G-*qU%q|_?WVwu`1b*Ih{gE}Z;hmthA$-k zjAhXU*gh)U&<=lLYgMH`T(5o+ydpVrI8~4pqy8n0m}ar==fCPek#VwEz;=O4z$Ggq zTd)0vTzPO>TB9XZUc{wHN+CF<^8xu-#K3SBs1=6m~Xpt#dp666#1 z^9wkmE9vadRv`FAKtKdLPSw?;ywVc>Fsa7|dS=Br@AfqU8$+Q#ykBUykXhdrkNdYC zZ=c`R&vApZY+e^N#u9{a^oN9$WkyePM+0~>wZujWiX`wVgJ;@A=+N6b4rRqp=xnFXhew;qO%Y1jP;k-O5YHOUM)ikiYs0&|lf!pob%) zLo*aS`I_%u@Be;<`bmbCj=p8pmgGWH)Kyf*SlLKo$9y5WlwNSVt_0!7<6LFg!{_~k z&wzM`lL3@kuIF3kF33v%dri>(MJ2S$m6=p&>gR{WW!E z^>07h(a4`*K18Vs!TKR)|9RK9*!bs9hY&Nvn~$z3Ic*-lt#gOUH{zJJ0_@9`7ig2( zwO7hqJ9|tNaK1jn=WL<*);xKkBqXOq;gb^EKlF`Z^54ABK_ysofeFU$)S~OT)A3x9o3j{=O5zAN z*-F7lf@vxQ!j@)bu(!VVJ#`iYI`V^zDMk5~g1L=IfvBW7q$8g~YY{Tg0RWDsma{)a zl`=A)h>srcA8RG0*6uFSafU&np^+sTPy6_|!ubf|zZF3)u>yO(2}kJ59SCr6zaCz( zUZUOGycY5ntCCNoNAT}m%~Eqa-@;4t%w<2t30~=3xIJfS=53)bhYd;}WUHaEQC&(E z+J=k@X-EyYHrnS|TBTkz9Ju`zxLh>}f|LnRG74&>M31;Kg^riXo_u?Lda~xLL;#|nS0BA>pxw7&f^I=f#RYf#fELa6@TC!NYqs8;}Cma z`<|5p1qI_sb@hdOAKM78p@qJ+kXBY6x5d(FH!4>2#9{VGG#RvdkUVCD|Onx``NO*EGY2Y)z#IMyicjZ0QYP6kk8{J z-RJ!;Y~?r9I|lEY_;_!5myMWP0xf9mKSRxI8(a#&(Oh0WwlE>}-mV=|V4z`UW@{_1 zNw^sM2?FD{x}>emtv(k8RK@yhpS+&hh*(1&oU zJ7t`JXL^$QV1ku}lS<1DHZ088*q9vOWlTsxf@RjoTyCxjE=%*pSpzsSj!n}*rFo~i zJ;IsB2AYF7qdlOLh{^vT@%I-)J_gzST4#{)LS|xuz{5^f*b(zk3W&j~!ZF~BW7dIB z;m}F1RBH+Ll2yS?8>#(hAKSZed3JMK@0uq&TgUTphC!laZ{?_vo}P7lyxkYrG9%00 z(Yi1(*M04|G7`J19s`QUp(dx|rYcL;@>jgCZhkt|)ROXfy!F^Hiq^cpC*0ffembSO z!Hr@ouoU)>4Tx8>wFTVX{MEo1u9r$MO|(Nsc)g*fa;fxvL!dVfT1hk*q`&0i!-Uz^5GF~!)0=J|kamr3OqM*QlFrYUM5f!770312ndy;7z7 z3>nJ)Y$X#@I5{;1*|OE(8Mj9Vi4DK;SDrlFi_OvL zt5_Edzlp#53Q)*0Z4E}%*Cc4hk0d}pIH%Tk6WNE>o8YqMap17ov$0ijuDFFR&!eP0 z==ZPAf<>Jil-?RMF^Y+gVV~!F%`*+g|Azs=%A)28kaieJ_r&wzy{RFYA&RJA$&C3_ zuCc{Q`0s~!p-BXUhNU96E^ay0)V?dWsU7Z8HG25bj&}j*N!_sIoM&g1Y^<-S;Wq!l zDHbO^fK`b9&35&tZXSO|>WPVNE4$FpbvSb4eNPa!As zotvrUd~^A?L>YzfQ`~RR>z|Bd?wbka6-IRR1#;~g=t%WB8LXMJ)i#Y%)FHNmD5Ttj~g!KMzI{Yw`fp)llT0{)fCYpmRl*YR%2prUJsu0I}UV)I*hSNm8ilk12kGIFdhV&yB7g< zE=h*HhMAe%j2v7kk+sgs>bF>y*vOWV#K$HM?Tnxg?^ge(rT`rsg_)UNL0b*olOJU3 zh9aXA+iya4)y7MLjCA4oBb{xZNr5=NOLSVVy3};rrNO;#Wk4viROB5E%>Y?kQ#j~p z0Olt}2JJ_-{ibIB(ppY{f(rXPY5`qkK;!WpGC;4tL6^WJ<*5oz|Ikl>Rp+$Oaz}7c zcJ}mAD2kRX$h}=yfHc0~Ur9)HLXL)p#_Mzt4}kkFFRRx&t2tDjiwn1YH;qpY5|;NT zdl43!)dKoZ@>z1`> zYK5E+iAwrAArw@W81~y&Eup%`*=MXpXV|?qWMq40YKj~KQZj%f_SKRjNi1t>W@d(d zaRp61KLO0vz!2Kg(^I2HiC*^-^DqWQ2Ry<-^CWTZU#E&AVM6Fw{v^S>GccU`Y;!!- z^=i%<7pH!t?c-K(@!QQJD5y}Vsi^~lf@r9zW8&g`m#LY<5GDXXM#%34PX>@6^wXrV zeqdvRuz7BTzp>;ntttVb8Hp%5kA$wbHcd}F2BsF}7-*l$i&Tp<**S5R>iT4cK&L5T zNjf+dYv&b801$U=9};V&q%3J^wFfCO;tIbIx3<1IJAtPZn&UAhCzq~=y0N4kA8&-# ztO_cNrYW54Ih`-t^A=eUS^j4I==u8V1A;>2cpe<|3e^~TJ!uD6w+-S9SUQ) zLz>L>E>lsBxerAsR@Ci~N0f*3T4&b3%NDP>9xFqn4~-m7Z<{?~$s<=5rrXKrG6fS& zBvGS_pX65665qW>Y45~BbCwh2t}N2xl?*n+b4(QMV#_KA2}m)Xm3XkCOY5aJ~FMNB$Lg1&ix-b58eCe|=@8 zqR=cZoF7%W+5vb?gDdfe51Ab2b<#IcPdo2^5P#eg*aH$BL10Tfc2kd-*ase-sgfp+ z0MC6w14D!CfOkE?Oh2)&(6O$sooj02Uav4R)_o#}T%O*dta51CS1ulCL7PPiF ziVK^Cyg}3Tdw^qPV!{jWn29Or{`pSo!SgYjGKozqIk{A(hsN9hJc(icykDCSA8%N> zCQY_*0!#(;3ot)h;MOXnc9g!I!R@Rbo1CE`s~!Dp=R~b(6fV7abrSNjZPvfvfKxGl zGzl>+y>C-vm1Ioe4+Y~g0|P^23@BO;NWEeDJ5U5 z91*vJ8R#rnJWd9C^v=)E4^36;GA=;1u0noB#QS`vjs|3>HavA9z0c`49S?rMYj6`g z){l%??{W3FAiu9~Z=u$+sp&haHAr$NXQ`~v%$Z@U7~NK^@30d)I6j;Zv-wqs!JSk%E~ifWa?u3YNZ`{4QJF;X~?-&UbX%l zj!OnDofem|_cDY-afjLAGNc)`$}C4!B#(M^CO)i!b+~cKkf|o%hE6o!=#ddMMpwKa z>b8|BHX`$1m=MYRgQpVRUTvMr+V|tnv(xPSa!78l--Q)%N=g{B9`UT$hf(gZ5^MHv zxDipSJddNU;UW~~0_qBv9ZS2_X?nX_!avF@D{+S0i%Uz9FhlN&32|dHfc2hlmOT*{ zrEn4q`#_5t3_R<*{W1BJl(d@K#w}t)3OrtN*w4u#;M%uoQc`ZbVOVi+a%5_8jUL&~ z>b-e-dIA>0`uh4}k9`>&MYfH(N*Sg>8R?-lUPaJi%}euSru|!ykbtm*fOU4W5jpdx z)<;Q`6ud(Uf(0dMUpLg%{nn`dGd?~p8DaB#6Vce%xEc%sfT@V;9e5ywLyfrKY;J^M8HdqWv`Gjs6BUCOQ5!&C0bs z7y25b8E#FwoV?q9>|qEFaJhVSw$DdaIey5(Ifut>|@R7+pDH=8gBJxMF{9IC^qJRu6ZsMYX;>UY9%s#bvqVk0J{-#;b=4^es zO9D^$g}y$u7SjAd6Mt`9T;MJel{>z=x;o!_G>#oqMbAwzG%^A?%73&QuLnVEjo-8+ zp-K9dB+EY4?arJv;dguE+sn+2gVB2nCrMp`ML|RHxx=JeJ(-lm$a2b) zsWN1OxKi63+$(eQj0@}UOAa1Px&Y|MqG%hk^pRkEip&BHrlsfNvdlzbb?}U-N(n2A z8098l(s?4Fu%mi5?~lX7@eF;M>nbq)K6`)P3juuEH^x1sFYd>M^njhKEyf+b@NYU> z(>o>y-sBI)Cf*ML*bNQ!p&MVmMBHz!eLHMCJghWH_74IHQ0?&$Z|)aa2e#JRab|o@ z9O3z{(|cqJNb7xPNFM_repp^U{B!X4=;gP>0k36Wp{q%CUh9QXK^w|Ps30gUr+;Hb zddbGi_JUQIho2wl!=9R;26syj>tv6SQMJ4(;| z;!8&17XmLUD@%n~xVhi3GthA`@d_Y+M5cV3do~t{v@Y;F2muL`>0P>@1{p3{&Btv# zP%d7zl$)xDjS?U3vgm_P=tidvvYCzVxAa(1c+?29XfSemyU>*3+g2Vq7OF<9J!Ukc!l<%E>CO4C3Ot*H(7n? z8CD-;@`uQQ(qm6mIGv3s46wOS zdLFgwRw|~uA8#im&3Si!n`6w2b`W0zB#< zg~nkLml{S!+83h%*rX#oZkNA<`#%mOVvU0s%0)IV0}JxmN%Z`?XmOT>ghNPo6_+~b zL@XFA7OksO#o5blEG4jL6xB8&U&exYz0*M#HlR=dn)$Ocu>Ga0AG@ z80_41IG%twfDN}mB!uZF+tJxyF^|3T<11^WVXs@<6HPHoH{IEm>CCjn!M)YuPknXR4N)bMVJWYp0H*2vy=hii?Za{ly}>>s>3K}6@P=e zt%)6iO5wis-owM=6ag#kDmyC+NC(H=4!*NwYrXg_L&43ResXbhc(}V8^Y;x0>1pNn zZphId8qiErHZx@U+5L_kW~lfw>TQzOG@zbLei0o&w#`7M(G+12)Uo-B_VwRy%NOlJ zt)V0+vJX+S-t_P*%E~2Cpln?I{YM z>xw7<<+n<9texMUwm#_U>Y7tQi#+#5%32K}8Z@dad`#c%w>uEo3;fF!kZusf?@t@EB;n+()UvXvw}BLTbH%TZDfSEbG8 z0(0|X=08*eMI&Pqx8Aw1(jB7}X5wnVA81g~Di9ATv}#2rBhLpUA$Q)5tJ}N5aJ7E!Du&4_Yt0Y147O}W;1DCF%MDzxx{sL zA+I+tK$g5hcFWNvZJt^A2I8l}KoE)m_adgBPTl;A&Md$2SL-}a=5M)SIrjNq0GDT0 zm=-8{lMIGRTeI+2pQV`LroU>ZN)gY*5y$~>E62XChX~A%WB9{kyJuLi}xi4w(nF7 z44#K|pCiy=ECvl#=gv-V zRcvKdRXrWQ)&`qesNs>QoE~3oB(R>X9BQkC8U}U36<7kV1&Gbc?s9gQ#I1bCasS9E?a_ zZwwLtfn=Nv)S_yUvFQ&W8Q8!M;Ti>;NZ=ZSAOXGt!kRF$8SuVTl?&_v5Fo?Hrq6gJbc{)TVxCT55)tAd$_yB>~V`5?=M_3#;H8X{Ld5v37 z_-4iajmX`h%#l2srb08ub;S9|f3N`PScR~LQm$Nr=6%aUl%sP#=k+aEYAX<^7f(jb zEb@F5S{CuVxN9-+7*6F^SJgs7*IpFy8blE3kd8z>4^WJA~esgp8Rea+b z=0NbEo$46Kp3K&w7#npa{tZ~3!mJ+7Zwepz)i@OsAWd{vVZhp&_^H0-jX})!!P3VU zB+;`7m&R?J{Y{)`I z;!pouf1r1z!&_(4{Ofw9Jz$uawY#@FVTlRR8oLjA@1iHrPFgv^0v9K~DY&S0mwH+a{Tm_H&&P4i(N}JN3KF3E3T%t$| zKvUxNq=PdK2=uJIo3~R$m$J3mj?SGTjH8Y!h?7RgL=(0<1+B@*1FRk#A`LOi70k*; z0p`o&5WR33=*raMY;I*U{P5>e2F7qK^x#NU4k|`grk40LC1!^rSd!nPX9my>a7ol! z_vZxqjQ}BARg}ho(iA-o$LwB&lC-{uDROq$&~Fq3@x5{$F@JbIo@5}Ao`aA%iC3^a zJTuAB1?B{%r?vc^Oz4+1{sY;fLxyd~L&dp?AkUH;ANiQ;#zCDv(l_HN#Bz814)J zSwCZ}S&nndMg2^e`zb2(z4wyvzewjr3?y9Hi?Hs5VF8|~w|L@rp&v^tD<|5QGE1s3 z;uwj-K4gDTi;ix8+Sk4IcQ{R0Vqx}O)jhd7zR&x7AlXTp0T^ifp51GxqcE3{7FaEN z^Y$-)0&PbO$d7IxK60|~>0iHeKo?3|8#&=BxJ;#bLaX3~?RUN-v^f+#xlEO|H(Cq? zN=_G`bCLUKrH3ZXGasQbCSM(;`9@ia^YeS>7vU?I3i-8QRQ z7zbhH<8R>SOIT_^tPO+XzOc2^JRS^ys~6ocNJ(M%$nl!IVWqpLtEabDOj|(Z$3$RQ zc47BpXzk~A1!SL`f zRy_z?1@iLW4T(vRU`T6g6Ppo)b;Esd^%Ko-er?TM(g--W{{iW(*$YkBIVh*vn=@Ed zp+PMNFFp4_;QEw$HTfxGmml@;OA0uCwT-XrH2}{^FYv5clZEm#py3A?OIJ7d>FMbR zhM2lphOkdQ(^v;Hs04U8uGtx)s6DvyNUBtWGhn)fOD(B_?2RGlOI25K6%Hg>cj4HM zDp;Ul*YDZ)GqUpk`MTfFiX_2|mzfsD)J9$xAZ3WvtgDlwN2P#)R;ammg5yA2)RIS} z_*Jt3O@h3%HGS~6N*_UF@sFyeyrKr+fjdnGH|UH>MExA~YwQmR zYUkfgFO8JCG<-_mQ_K4JNSf)$=nKU3JC@h7F*2$icZ!=b_6@T2vH5=eHnJ9o6c{2& zt!W}t(l)go`#2l(yApJ9-;{$C%KFBcHCRBS#8J{u?m2Z7#*w@0Sd_N^9Xy8c`AX1oKUS=ae@elAvJF&sQ(UWAxHQyrf<9Z$R?W`MxdWx1vZmt! zz|?YoGUwxcz5Ec3k9P%-Am{<0#Ikgd212{+XzuQ&G0`jOta5U|bYag)8ZD3|${gHj z=VXtE8aOH@#6;~sWLy{s4M3wpG~8E8Rkc@^0HO>TII|W|=uC~Oc_*P3?~bUZzq$Y* zwH57do2OvqrRD1eG&gL#7hr^jH8>_QGBG9g>*%i|6=}mCu`06pVF^h|NurI^9Uaz- zO@&SGJKCPq;g}a{1KD~^*7mIKu(oOy|9}%>=F644YTfM_99#0z`w6{AvjLh`9Z5hw z854b1>2r{e3V!UTcR6&c|7SzqHztD_rc@~ee(ETO)8h?gkt@kRDQ)W z?^My@ZR_b>j{$t#-h6s=qNGh`d)%Z3v5dq9JIPm&m%POc_oO(C4QI)Y$&4MJAKV>^ zdnNWNHj%301=TsY6OFFF;mnTB@J;d?AN}Z~N#}m~$WRtP3m;{<}C5cjvrbsapWmo!Y2YEKK4C%Jv;L;(L5ugQ*^hVI==;(omWa;v#h*=qO zpy9XF0%R?vmDDVV*#cr&ShzJqiTt#Uu$93kjH4OLZIVS|{Yn>>coBn(0f!74-4ZwF zoS;!-jk~hvii?5P=IY;Rd^mq2@YT=v*{W1fq*$=xIDzUT z0QcdC#>Pj>mH3D&EC6nSxRgDO%_=6nDjGTtIexT>l!Ff2p(l?UGX3=*cgm0cwWaT) zeKiEYc|d*R-9iB$I!O%(>N`$X;s8zv*wg&TJF(hARFkJ!6We6ci|ptz{hyQlI_^a4 zAqN=G7(m1i;4q!e7of(-k1kFMYR+4fPrN23t&uYb&ZqTlK7BI5^J;RG^wI}xV@A2`MYlc!I#D`bYq9E0`TJ{&9*W;$;>~xB0vJI2>l>^>nKsHmMBAkp@s?Fp6KxIuxNbz*1aHm?^buB0% zpPrjH*X`e)}% z)f0MC2mMc0MH}T%9OTdLUwj9#(|_9&=2XPrWL+O|VBz(fdEin{2f3#3*eWs*_(wwt zEC4G%KCD8g1k`vFTfiqj-!ur5prV}aEwIjw9*^$~(N4k2=`7i%#k>@Ei$PJz^CFcS z;Er%Uy#D?(Rpf!107a(PP@(~kNGZGKttQP0dUo~RY&FOo^qGlX{jDzOd!@mF$m;gz z&z`^Nl-ZCU93i}v-+M{*m)p~&`ap2Hq^RVh6K5ct-*}Q)QZKE<*RtxV#cssOm_%H1 zVq)UPH+%ulh{a7p4*a?xthKPb>6yX7^UKh_@*^;;JcXH;sm1^(9A%_*>(}@^-iY&` z_WE^yu1_XeUkGwww#eZHvXH0$7+6vu307Y2NJQeL%0TeB8O$k3Qh+{s z{uKcZOpHQXU%!|ZB9R9D*FAxMy4VcH*R93hL6?`7iFM77W7H7&fxfeCc`Y$Wgoze8}Bz%A>TrCBm2pk zszUk{oTfOZB46JiqhPoUrwLApXX4d9Mce>Uh-g@wfjmcsw^j8EBOVoT8=Ivwh>uM= zmB<4q!}bIru?E9eJTJ`rYyWN(AkIKz$BOG3@IY7B<8*d8L|HZl0B3_~^);hcxi9v= z0@23>Z38kAP$x?KpBI4gG-EC)39$aKPGfE1)Q> zYG7d7bQgETh@$Y{i6SHWMj_a>cc7K`WrrbIqvDt>te=LLm!6geV&AlKxTgOyI69YC zeorY44HW}K9)dpYAyd;;_2iLdh{y;a4Xhs``S)Tgn(i+)CX`hJNTO;_c{w{lT6{cD zpyH@B!2R6d z3o`+hvO8j&rHze?sz!@jA9Y z0h9zdLI|7t2RrsvE@tNDq%kFhL?K@S;d1SM?G1eSl5_ca=ov2z=xyw8er7U=lDNTs zSI3hH$oqi7=7*YeR(A$t10TW{Wb{>O_g~u&I zs`T24EAjK;Fsap02SW*Nunj(YchA2{t<+!N+#oi4DPe6k@=IG<7J2!pbug>eZNMdW zqk#nPw%5-;Wfur?rHXj7i5FxCV}1C)KU^LahRf@vPH4yR!PT`+G!r@lQ+$ulg(66s zm8ZxDNhT69F?&1$dDDNKb|*NLnnp0tJ|TwFWm~_1X=F`OR!bn)dNKoa16YHDgA7Lc z-#a>EFoHl zqa1*wlGAb3{6shhPk;dhH_$pR7H7UdQME`I`uzEGbL|OMWJDP$$@qjgkoDlWo}Zed z%n%J~t4-YN`97oyK!bnJeFn}BWF_0ov~}lzd{oIP{FE`kAF9I-Ab!luAh-k$76Vir zejONg+A1Qy&^sL*X=R4gsSw)~c83Mr!h@Z3TSx<$<(kYl5bv556elnVR^oUA>|$}V z0Vl$Y^ohRuBpPV1<w4^ObNGfnV z(Q_Z4t^v%M>Fk(VgX1BMv1)G>L86%^;7u96*NP^{0wNJ6)0X?&DQ|?K>TsG{C&VMeX2rT&8I=4 zQGS)*fIpzsozWH|bPg0??~gjMGkR-mmSG8%&qDt9Tj?(hOnJsc6*B+Z!FZOzkwFQV z4yX<^>^OfkOKkmi2$BfEgoTxi>2n5lNJk;>{DdJ?$sjGfPo8wEEQ+njgFZ{=A-c1P=@el!6`OhizFH7z*?= zb8=`1?jNH~(a|M-0~%y?G~^d6S2(FC zw+HL%>z4C1R`%|Dl_SFF*w~;qRsiS7zqqtSA(@+#lMyX5Xbs3$|F6);(Jw8sQ3Zcn@rYpO;4=t}7WMgU*3D0&P_O(rhF6`?hBZ#QMXt@xo2cr`2?xl)?=G(zNF7wIT6 z8RhMpydRV5MF+&k1RS6sEdOq@ufI|)(8chrZAf)>bxnckg!QsSAn@>dK0bu^8f4Hyo zTUFWPnCu)JNc#cIj~wksP+tGl)g?*beSKHoAyLoTp@dl3Pn5syhb2r2Js8V9Gdmi6{1YND`KEj zt_F7%upW7d3q1kTJv-^gWJ;OU><9&~+SFsPXy4W4CF6|p*>B;jzxe?7Jhp9o{CGiE zAd54JNgIxdjb;A)jQ3!eH4u?^i%ZYQhEmcJp2abc&%$?`mq?Xgi!CW(^nwAz}Fnur; zOMp#W3&c9XKvLr8w0Z~2*`)@9+$(uN&*(7;%BzfB znF9}6gj8yGri|WX5{VFy4A};M?k&1_iWDHmjFTb`uNweHZbyF@#K6z|vgF~{vU+D? z^5^sUFTmx3LZKKpj+1&8!b%S+-R5A=hDiK8P`)cnW5UEPrKqENP9gZAl4kTNc5bkC z^~sqThx!Fl9+j?+Bq|VNWM~=6d+eXZU3}Hpidz@Kn$M_ZH55}f9AT{D)oKZEfOAav zXIHaxB$x|}vh2@_H2dCLaFqo0Ma0ETnWlRW7;|U}jfG&KTYs_zvrLuQYtC*`;*KQ= z#U7T9j=}z1h14}sQ_om2o5CW#D#$DO4ckrC+5qVn+U_3BIBlvnUXBU&O}Mfj&)2T6 zkq8*4R!e_3!NY@aqL7g>(itSNG6tb)@ZIYV5-+GID14D?x-JoHTt+Snwg8rfH8`7y zu4u#EUX&iu)aRzOR*b&*xEi!d~1N{IA9-e;|*YKYPmg@S5{W;O7_K~5G+t^98MM3+})j@ zmga5klF)XwFCHIo5@)WZ!RU?ModV}~=4FSycI^@1rE<*!VPj8C`4F3b~6k|tRSC$;sJR#y}Lt?TKLW)Yf+tb7(O9Z>uP+Sl=s zBlQ2oqgKF<(txD(mW(8CDk+RVly^ zk;0jd`|bVwIj*0Jau*_%)wEkrZ)FK#25go3kUD3}?_^mk26)GnEc) zzu*Khkq{BsNev1^Wdn|%esRnZb&m`hnkt@UE9k&OT-J!e&Y?OiME z;OJVUxx)iwm&3zKwprt=$(xqSxNhXQ)YKM8asXlk+9>GQ=pvpgqR!HECFRGuT;EJl zXnA?Ky#LN_h7lFp>AVbTYBW&xj&T0DY zQijq?62kp+Vj{+s)yKz&ATBw%*lU-|#w>%l4#*=({oL5vdixcOZB8JZk>I$-O8s5| zxFqzDH}sA;o!q4$wJRZg$^K-`zTS4K8wO*IxWz91VDkIqiUB=&h$5!fPnGsrcNB}u?0yHt| zOi>&%yyKqTGVHZygz1c6;qtErU6yrYl448V2eN6aHmRhrMmN)-@Kvfq&?xqUFeqGAfP?{EK`I(5h#i+?U;`c;TI-XwEFC~&BVj!F>@H4@o6JL*-bNl6x4t9#Eh6jzKpA@X~T3X-nGNk2PARZ zNBeV18+l)CU>bRf<&SYS8fX534G(XHyi#-WXEmU*V{7H*etp&TJA28R!V!%yK4ble zR`W6pFoKvWr?zOS!9`2SNH*~C_4f9@iYfsY7zh{X*V(?PK$Ua<+f6(&KO6R$dbLE& z_UHQwWpX}#hGqDJwtJCl_nVACA=NF$;>?=i-(MFQpCzw$P$yb;MHKK`eURIu9&I*A z{W_Yla#&;KV>8rfQ>t^dl*A3Pf2`B9wZ2V>c0wT&8m)l;TzEBqC_pe?Wt%WHNQ1|?p=KeR zlFx@Lgd|7zXRhKyuo~=acLqIBJoZT^Hw(D<%#WW=5IzN&{T?ga62Co~fH9VRdk>;y z^;cQEM6UaLTPSE43(e3Am^xnLg?*LbI1xi#O=7tW=CH_Rs$5 zLX9LL)PZPJxu01~RX-3jn$%|%mWlH8Dx)BZuZF_%^5WxTt7>Z+8XE?j&uN$^kPy={ z$aZ$sW1`uRk-DX&cw^pL)R>WcxZk{lp`1pqIe7dsSX|L8`PJrVa#I5IIv>kGil&NN z)|ml28GwyMyaI&~5fRbcryiU9%@YSj2FPH4zgR_fJ2!aRv0+;uc?<9%6F+*-4;JNr zBBH4_OJg9x)3*I9>z1@jIcP0z)WsMMG2oONB-ol z)Q>H)?+!g{WNYT2T%W6QYJAv8#2WThkv<^Yn)AeVsmXQL>T%pzbv*@uAOFUX4 z18_-&nfR;fW!bUbFUE>cA*EKm>*#5dKfAdziF%L=2SbTNK$fvlZVC(P@8CBG2rm50o ztZ!;ndOrRsxsI+bkbLYU>o{gJsQWdzMk!)n6{i$DY*qkDMC`o0GfTt6GxNw@{>g;6 z4Y>0;y{|Z$2%i6zWU}^h@fL>%azFnh1=Et9+^bD*3xFdf2vbotGb=bxxVikh#-2d& zd#+GDKi2r<_*iAn%vRC-W*B>&Q!|u%>?zEA_Jf?Q)$Vj@(M-sjbb|lqxdae8IaW6! zicJX7$w zPKlX|_7f$LSWAMcqLN-Ld8DhsOw(`Z3-H|0DNeW?rOw0UVvqat)3m-IT8V80XhqIn zZ8iBcUKmz8rE$8zTtpF=G6P?Tsy2FyAj}zCS}G`DFB#hYeJX_EZdh3G_t{T{|6l=v zDZFX&%fILVcr=EjoTp;Hk%GMU=W$SK(uhxZNx}6;Hn%JArzk459Z)@Jt^Jmt>|w2? zgdaLNGcU6sn$cK%Gg*OuHpMN2MSkk&ba;43jP@D4$~L}Ii~gj!p{H79dQuj#Zt0=j zt51ADNp1msbI(RwGt)dxkfJQJMVMB2Y<@_HhpC}5`vZwb_+*y>F8BCNKgK^D(Svqd zMQuec$UI;lR+9$R(fRpjh|W$<&!-uxr$ z+prMSR>Jl+_g`kegO7e)S&-)9%Ky>Fhd8?!z*#gTa{yd)FRtLch9^;)nFxelJ~otU z5^LB3&gHXvLD$z)#aiEFr(>Ibl@PNzJ2=22#wR3rc(@~8Ud>ERi;L8Ot`mn~$5cNb z?d!t&Le%}&{HQvZ=)hOPPYQuxmI>Yu3L;o~Gz4C?+;BFTi5VlGyJmi`tBnM9Ka;xY zr6rgiKFz9}DH6)ebeR#*Di7{>NlhJVZ%}9Jr^6fs)dLOyG~IM`f*8Aq4%F(-H}c9< zV<`N zsHHRQyA$wx?kn!9F&7gzD;-huR3hK_yLLN^)Wx9k> zcy7HXaLTTtkOCQX#u25@tVBtrULDeLR$D9JB{Yj>WXB7aKnVQMQH{hjI5|l`=M7Fe zw}q7ePmykk;}tVB>q>+-obT}sR=%B__q&{Bico2>;CMKhK#3I zHHLJwZ?YF>z)3YXUkecs;yMrpg3=?Wq$B^y#c^R-=}YUbLVa^dBObs9q(G~Vc7m4G z@iG7&&Vi$Hi=_&`H-hqZ4SRl#zNDlCbT9ha_d#1xKFsFec6;-(An`Ks7iblHuDb*} zHBl;WnsSm_v^6`&r5}UWf6Nal3ItwGSy|?{AkegpddYq}Z_~QJwI!;Wo0~gV3o9%{ z{lv`3cm>4PtgNhRt>%$Fgz6Ka1d2QM1R!JMnSRA0V}htRIA@^#{reZNrme(nO(KEr z%6~<#6@iVoFt^K~taI<`JLFs)?Xs0(-R{X@VS?jD?AEMPl=K&2%FPkRn`~A7ev)-2 zz??HoY?)uSOPET0WS`{JlKQ^$Di1mdYzX58+Vt48ch7mw5+4CmdL662 zyZyl9h(c9`Zlm))pe?IT!71tj?UMa!?CH|^`!n8i)4iRdpc-D<>%w*xrL!p<=KaUU z(GCSD1;zbts|J?&{PYy(#M#+djUo2_-kX4$D9q<-gLq#*Aw9k9+MLhpn(M2Rt*y!z z;|iSuEKgy|z-M$|zhz}rt*A5r&SQ)eq+kCPg+@6oIN4eqQa&s%dHK#big=TcWM9YP zG-^So88m5Vw5CeZEmV`I)1>xEJ#2E%>74hP#rM>>&gowJzl|gr;_^1)bH8>vc$@My zW}(SwWb^9oEX{62=wiXaUgW`Hgfy#V>2HjwG@tjZ4O77y`GO6M<7y6ZWnx|(8vG-!FH z*bng(Y|O-mCl9Cd6&ofp$EUB{eNhx7Cn9Tb-zZwLtGRdQ=d;}rFzw@hD~PY_0~&G zjPZ(hBMADiDVTx2@PpFE;UV~o^lqQ9$ln|VXc@qX)gwLe72hX{Hm;wpksv-h8&2FM z+PJ(rCf4X}-NS#!FAfuQo5E#vySZsKf^D!J<(k||i`1$F_vAog*gScl_x3}TA$-LW z+3K(>#AY=gS*H6JEs-HI!b2$=2B?|~vYhv;gwNJrm3-fs9@HdNXPrSKW@H-qxORTG zlVQR#-yh(9N?L%G1uW0Ntf6v1+Hf+r{z1O6-Th%wwNF{cIR~}zM8jEgqtkMg_g$sw zND{A&;jx4_QBgc}_2Jg-5=G=e!IxnMk4j*GyBSZTo{kGTZ8?%Aw0g+uMrX|Hx-(gb z&2OrxTC7#7n9S49J4EoHHOQR_dP*SC!~lCIgL7l$;&Q?7w4;#7F6PDF9uNKU`a_Sa zn;S?E0s{lDuCKprXj^YnB|!6P@f8UeW}ZAzDOeO&W1iu6+AnQcLRB}B;q#W=9N)Rz zdCtD^u|G-i`S18;k$XB9^r^3u`Wtf|Z0i@gNv2UFl3_~qbmPqzPn@EyJRi7&?%5gHZuLG&u{bB;;fy*3yJCF;KrP`5pm4sOA)EC>BGBB>3Wm-So{Ij z6cHcW^i61UEyw6j6^B{w4WD!*#8g2$d>OVOXHJC;VBM zDXD<7WxB8z*h>7EO*xhI@p7OMD$N!_5et3tgkjS91!Fw4vsa;J^8J;}v^3`*mPMCe z^35u9->P>D*UEGn6*=!|ZG4=z^1oLyVD~83^Ej_Mo{0^a8&P{ff(s?Gbzyf-*A4u# zjYf?%*2}tAF30Qrah%7o3z#UHhxUzKpi?%MzHbh&sxQ2n_x!X8t0WU1N1W~s^T_VO=_ehsrud4_7X^zkTY z&lgkT*pZu$X85TNlH&i@%I7cc3tN_&jMlYb(Rp2)?dUfeg|T9*INpbLb*{FnlYBdu ztD~^*$qVnfuj>Pk-`&PSZ?HTHvGZh;wOaJIz|Nczr#g?Z&Pd87u#_o8bL%^weQmOs z@`>&JD`>BmT4}N|lDM0PB}cHKcYV4YzVs!Ayc*HtbZd;VRT&i>b7H3Cds-2lWRoW; z0g2Eq3QZKjPkaJR*$Wv5Kd6S9N02J2z{%75XRI(M26Lj~u zi?G`rdF$yPsrJ!xEUiB4`j{a;uhxtj?`VJk@y?o+fa71cBk}JeU4;(O^vWouVsBpZS+(i6awQmpJOt*_m^W`IGSKi{&SJ` z1<@M1Zs&X`l8MEnQXZ)$_65?4uX9;kHYH|aV-`6&z}?tYGeqyMMqFb&`vbrC+iv|_ zQoWHcn#;k0ZsVL+`(C|=%ieK|9jBlmWTKF9BNYY!p+{=3!-@LFeQXN1AnSARj;NY-4=uL^tm zBtxY&fTr7iNaWb~*1kEK$!SMCFf7N}HgaY#vz4`A?LpY9MX;s0*$a?k4oAMozRhsI z^L}t;o|(2;1bo*V0uEj2R6DI3_CxM`8x9Zk?gweqRt=6W@ZuYG)*=}3y^NZFl3(NJ zoo#Wd2x}{ujaS8Sc#FT9kzC(s^0s3afK>n~5;)g1WF|~tXF2ZCJm06BYLeBg5 zWtcho@^4v4p0L~d^*WnaU1Wg!$rFMdmq;2tSZEy<#iyDW zJTOnr<28&oba{syuA!{PIhEIG<1JP;8MRglIr(-wRkZC-$e{DK3E@mcz;Oo_LHUVq z>M|kzxFo68!iIl15_fF~7VK|Cr1grYT1MV?Ll2mnsqZO(7h(&OPc_-^QiZ)JOyaSc1C&5R-mJHG zcVKMWs91|7J3;k-)|;abyOLQ_o+$hrmPf$^PNlRFJNda!!lsePWYG=agy>rnh_^Lt zF)23rdC5F;`_d<0w^%7anbQ<^FdjUX=4;Gz@F3EXuhA7B%wpB`ou5Y=t<(s~{Dg&P zX45^Igy$^a>Fj&&?|2>RhD|@8M2@R`zr3J@|k zI&Xa8@IU@KX45k1z3(0GLpz@R(c-QB&1EiNC+pjxvx?=ZctO_2PGpyK`6&V#k) zLXqaK>rIO(V=NTy@kUqy{vAqgi^;E~oPb_X1W3n&_W^it$F>5$ApUC}fthH4sN3Jq z-2v0RsAYxs@M;-6ivL}cVw`GaF6 zw6@^#Yl}?!D5P6mDh1gqg81+k%|DMl7FrXD?8ml2Y0?nOy6LulT0O6C_e+vrIhsQLy^f%%;44HwsGbAue;t^(5v z!s{s@1r2yiDnH(L054~~R986MHO{?w+Du*F=Rq5~xGA1;eb9I~iG4Pmnih2Ukzs}) z7V4{*1%ZDeDpqgua(l_c*FZzBdY53sv*CQthW~wK{62<{pB+INy710mRc$S=yMz+g zQk)T+5lnZV!p%A9N^iee|7k4q)uPMto!3sRyZ^6yuQX5u;1Js0GqAci%$-oWbuvEw z96GodZ`f$RF*}jZW+cs!VBqmUkx_A;wZziqv8YGjA}rN`OHN+mofShX|7ZW%$N%33 z^F-e$Q?JjzOe|8Z9NrIZR~+d$J__=W?Ai@UQLA<&ERv(;>ZzZ^(8Dw6DkXSc6ZwihXU9`s z%O1(oHtcs}xtGk84=2A?-A)o%4DA7H{h1QVEffCOSe3c@+1di#QtR<|%#9>*H+2lp zmkcw$bvO?jNtN=X;Kqu=N!Ck8yPFK@msp?AR+{p<>nm+ds{e!;HET6EZbuJmDd}=9 zmrWJiHy@@73=*;N`J9*7Cn*1I1pp*JKRSUUFp?GIz_Ux zwI=HcrA`;N`&j6MToiSn4tCFy?)(-JT)9~DO+(KKGN1N=ATWTw2qE_2G-s~M9_L_= zzF5B{At7Nb*}*?A9vVi($_(g+|14#w+S}PNs^|TLJr|I{pdh0-KQ~NoZ*TW`F3HsPbiFdM;EGe>W2Q3{?Ak`k5$Of;{pvAJdbK zjN97M5%=k@B;4%Y-V@|m)?>H`>BI3SPe!vKFV~xmg0PIl z@Z?+L%LpmdjU;MQ*n+W8oz1ULD;{H|9&FiRC~ecsIzO0%MY@+R(jKiK9^C-9_!6^G zv+Clp$jx45WF$^@?bc^`KCAuhiF}}feV^elosui?Y_dq#VR481)j~YQugxp2twQ%% z?t?zm#V(%j!oa=7_duE$*SD0yh09m{QfF0;5QL1g>h8{?D921}bRS3d73~Y3DCCzMgiJ$ci z4Mmi{jk~{)wrHz@SopGjd;Ufj`#E2G+eOFjD$C=-^0=&~Z+}nB{JmmaYPc!?6%)&_ zZG6g#`Q!=9;}T7N^;PWj9ImY3;NZMFl};69+|UEW;Q0}$DW~_$j{O}{#=<*8Xj_vw z5*G)Hl`*&Xz0931j22)$ozh>4b~Duz40~FhFM6E6Ziskd*<0c!zk2nu$#z?>g5G>G zr*N#1>&cU<$7LlK*HX;S&lk`8fX4gRr2>40vmhk&%^^0us;yv<8v6{!vU;r_{l2i^ zPU7xT0NL*(whhoWIlK%eX_0?io=)}kzV<3ocoNx-bBdrx=N3O)IW9oe4qCPPfG-ua z9>=JlClz$e`M+P9@O!);>i_NAx3PIA@tk-lAT?#yEQ)nFJs>K&r_#8+x!IYrS#Efa zi`$M04IwlgNsAMyU!zlc?P*FKY_DjymRGBTjfh6Xar^u`yIay&F2XzX1>U`ohY_2Y zJevn2#^l-qcJ~BOk|5e3dvUn>3@7Q8HtX5T(en2a*|oa}a*W-E(vqi#6SX-sJCvW} zfm*jCxsbgE2>09zxwDNh@tQ#yi~iTdG%#Z{x<5V=7O$@J*>o<)!WKQq7UiFW&aW48 zVqP7Y^hK)fNvE}5_HNygK6#=S3;leXASJKepWjwu)Vj2#DB!2&UU6bNqFHcM>S>&0 z-mP`)#a*QBLqw9p1-&IBcwPIkAMc7&0D55D>GCp31y-1JI}R!N@;;lM^geIl2-9LQ zuXJo5AyVh;8T;jZOvvTKy7jz+@G0Cc4;1Ja*mv-$)qj3wS85i>(R{z4M8+CG_fYb_9TS|2rN&xlhFaV7xDJ4xz zr^v-JY^W7Uo%{i0p5C;}F9es*)fbUO&)>3{W z6Byf?hI=~-QV34yQgq|fOeAlOZ*B&J9^XbB*16ny!T!5}{ZEwL=J>RJxDQMLr~5Ny z+nlUDClEsi+*~XYv6+J(_kBD!jELphDT*3Ieq|aJ#tFlzAVu;91&YBC;2*ono9^OLkploJea&whzJFt)nYX-oq@hHmCiaJH(`p=;}})m#m4L+=y3FT(~6cfwZZ z!(Kj1?nfO**xRE~_+3lc9wWlihdoc8uz#};$KkAZe?B+^Szb*^0_mrV!FPFlZ|i!wuu&ea!&w`v2G7m&Zfde*aJPlor~ftVxkQ%g8c# zk~M4gWl$2v`ZPk;>3N7~A-l09jD(TBu}wV@#+Jy~A~N=U-{*Vgv;O=0_xE~z{Kc62 zp1H5c7xqR)J;5v^$u-+z~vXkid z;n4vDxuY92QWvi7F>~bmbcJp|Zgc(|ll857W;+r*4O0TaBm7x$5L|Eek@2=a2JV9?lv1c*-tvarZ>o^FgnDh&wEx zW^UULN5|oVT6=ppJ2ExYJyw4=d@w(#sG*^;DYXF+ooioZ!$RuKhbkn8sjU~ulvum7T~yIjY5Q&!Ict>dSH-4Sk6{@67uXk3Z%e#?9f!^fn^l1?Ipr7~jaNcX^eBmo z7q%oxHK*5h#6^-s8) zQ~mNn6T}+E%Xy_XHGVKanZ0K7K)K+>i?5Ax^zbLF%UydRA9ZizIPX>lT1uOA-&pL!&9K*hK*MxgzjN#654PDJ^gjyFJSTuVwlLcQ^sdFa5z zVH*CNa#w(VOaw&xYb5%>f=|TB=+{g4X8r8w3->b$pAX*Nhd3z;=QH4sm;Tm*R>YLa z1o}w*Gafl?-Gu&uDD&B#qF=y5k=eE1oyUFGPp!giFxlE(rRG0co3R)aUkYC}%AKM! z<8ojD1c<>J;fldEHM>uqCA|30ttXm3=M*}&SwN_z-g)VwZEXOTf;XBR6lh=&)sfD8 zF~fgnqo}INA5tUO@iwEtUK><=8;33z!!XkcGxK_oriT zeiT|?RiA$YOCkpTz{+@EfrfSED!2gGL=T?6N5C)Dhe?pY zTo(=WRszpxvSduj&wZx>6`Fy5_U#)jFZTNG`s6YTj(nK+i$nXLF_hyO+7&kH^C<|# z1_D;7P8xiymt4w%W<+$n^L{l-te})bOCq1fODKbsmzRH^A06>;E&!ud-2SNFp6Xh= zF4&c@aga9-C)yMnWNN;;w9G0(qF)|Zi1f}6!HUWgS`!PZw>ipv+4tg4sXV8EDuKIR z_CDw}eO=P8;y1*{unaWcW$&eH6W`;xwk^vr$>5(&|Ja8x5;V+m`ue`3kENf5F4EwB zmHGzr_h_#7s|0gPQd@WVJ{8S}*p)6?3JLpc_xG|XBTbxqT0~Y+rMH9>>8gghLRtIL~XS6wQ}bg z=CJb6EtS7Z`I-HOc+5S;#tA!Mr(7|2aIV+KX|DQ7;MQ6mpjc`bn;0X(`paFEofh8r zF0*p&bT%)Xz@Wphv%wYlC-`;C^ejZX#(cH1$NNl1ok=?ezi4FlLIvkX<%Hi-l z17hG?t|}DR?fP4uBEtc+Zobb$H&irJ2tl81V+*cAKlpG!s^`SA&Dl$bo?p*qcG^W$Nzo>tL>jVNJp+ZtW%o|c? z0?8iVy%Dv~>dL~nsE1R1+k|$$WCDgzrPtdGNmYX7;= z&Tehs;N+!wvdErK&xBSeIXNUM(Q)teI?8uh(Du6D+N8&dJ6OV=8~?rJ>a}YV6=I69 zWF72i3lh>hpNEW8p4PhdduDww(xUxBSLy904^})M{DeT9bOBG47arj#%`*Q(0L?Ph zk0p+*fApMKF{WYP=OGIf%@xm<=_z_`VQxT3bI@|Z_})$Rd1E*o_y4mjqO8_6-Zg4@ z zL@3s!XpT5vvv=T-V{5$irsoy4h_mPNj5#FM)-!X&IQG2HsI=Ku(HrvB%BG|Z#1fs@ ze5JnXk-ZdrhNC+aV4+7&g6;}AdD;vPoPt&PZ2qp1Vwg0hOWE3xppzu*!=3Pr7WCGn z!fU$?fX$u2t(~~(a*u)9^pmHnEJ_}%KPR~dheb|*&Gk>+deIzLm?)0BvALXhNiBk6 z@~*&~^ku^OAOf)ny8K^)Qy;7xxr_9h37HzAC$2vW@J2k3kusaR6-VZfeI#pUasY7| zIn@#M&-c>|i1p;h8j)?bA0~#2JT@atV)*qZ``cG5#C{y+ElTEA^pE6Feh}VYoLaS( zqUbX?KYTM&c?*F!|6fVnhd>}?7+%Fc1^55kKidhEGIr$j`8~x5c$C|n#cmo5WD4=I zyjPRNdiwOG_}=V?a-Yc>r&JE}I>Vlsai+DURk^M_I1TUm9bJ@*71bXM2*pKS0@3M` zO%M`3uFocJEwv?k1m19w*aNp=XBUc>Gnwl8_q#^hwx7tNek|0w{pU8Xu(Oa*q1bW! z>x*P^CeLkeas;OQ26s7W=9+~*qqH@_SJWfW?#r=f!uYbM7+Daxtk94U_Q0rry)6gb zZ2S7P`3%?g%L{vM|9BU~uc5NnvbB=CC;03yy??X_13-Q|bjT7v3QP_Mf!-<~j{q^! z6u6>Gb}gw2*I6BFar7YHF&ur@N2HBYi-`lC^g*3BrHm&iUzD}q4Q5V~bs5WXseg_0 z6cZu3E&I8W3jh9p*kd_pz7Wq3UT$2kmSw5asmNn{=~X`SlxMGmOx~HN_yWW=niR>U zAj9hTGAo$n2;#AL5aqG=iq;y2Df)aFJ2_cd`Ww7FBF#n+ZSEu**wofd*s*I-@sU_JGtmH z?gJn0%J0ES2~r=Qs1N-O@jGWxDpvQZ>428VCQk|Lg}qc*WzEy=OGvAFUO|8@)Ok40 zFbUK8AOg~9V9cwau`gsm-eZQB4h3aKz6cK{pusQ^x8?y#uk~BK#ogQ7tj$dVcMlSkKdOB_fte6| zh-GT9W*deSjJ2iuLx1K8@u?UAeaH&%^3QzFqh@Kv?CouP&=yY!QDGr?(|i_weyZ_b z=Z4Yg1l{B`Wt{u{9di+?%1s9Hpa7^W3}kzja1A@!Smr_W`ss_F{vN=u@IokU##c(-=)Fs9KlMH2$yPU|P;$NT6}!HkW8n8Bx#9s?TfMR~xup#}^;J46jUa1_T7eGS!#!#=nOi zTgzju%d4x%)G1&+;dWOO%rzxmnrDyjfCLsViAWZSg1>j~?~o&Q&^@6NV48?^wS0ZA za~1-oZEw_`)e(9!HBKz9<>hJ?>k)GusUHUc<`x0rj#-P z{j`;7s=i3#WC;%DjUipStFg3*xTf9RT{xg~Fn`&5*s9zWGkHA}u8G%3le{1C=noJI zsD7_AnNO^qE z#d^L>^%v1}8CH*7XlgsEWlBURDfqZ|o{Q$ad8jBZ^vM$o%cP*7AOMTEf!ME;W0Lk3 zGUKK}C4y-cfE_#JoU6MeYX1J?Oi0H-)Zd>!*Z146g;v`=F@|Vjq9ZdtUUyIAnTZhh zk;8{KfK62|Lr5~oHO%gEXGc7)(^LEiN?hiTuLea6sg-LoJIUw3vACT4>2pJP@NJHu zGwg17>@1AOTf)8MxUR>&iKB0)%TGY;hlVjtsN!C%sI2S<7>5&2Icx%PyJ%|juj%P+ zsKT+pUjvr_?=O-lN`>vcXDQLBw7X)Y8}s(5FUfmufP^MtEpVIQXZBZkTvym6T31Oc zlaX@!T7*;EBUHjL{)OYRWvjiF*8ac+PaiJc) zpTs&-%AkH6ug3^P?vFlw`V?|emcBgCqa#w2Y{Lvi%ZV;&Cv2Y9ULwQJWPx|zb67Fe2aNr{NG z3=Z0Gbrs!fxY}H{u&}_Gm2Sa$^(dQ|>sBUeF$-Y$Zh=_((_$$zA~a!EbKJ+t$w{;& z6O&{@X?>9NlY-Nd>c}>|20be9O%)zPVtXGV+D7fJv?O2Gmm(3+CD`SoYM=f(SRn19 zQf%KO{o*+REo@W$DE67WOSecXmO{fnV*8e*m6*GI6fG#cG~=z~({z=owu%+fndeo9T8m zEOolwnS?+@O}q`0;Si*T__45u)8_k1$p2cIPs|*OTAZQp0@jr?Bg4YOF+h28vszU) z%F;Y_`t+OUz6(EZ`04-^2-XEiATG7-&`O~JzT5UH)fQObE`1q0P$`R-d(Mu$C8Y<# z6vn~9Kzo^{IIPDpBpPF*qRO;yL#f>47aNDM}L6uXnBI5bd@C%R$4U@2*`9;ROg7Yr1Dh3^x(b7vAf!Dz*=x8TaxB88o=UC$} z5$j6h7X5fSux7g)ME1#SLlo6FK<=ECJtm=AX$zQbM zi}x#z_hY-K=U+-*y&1}UPT15~>~_w=>S|ohIgU7ozMnsTf->w^vr*L-sW!*hHPKGx zduT#)o9_HS!Q3kaZW(h8f6LZpkfcz5)zs9Cu`+?88q@fIXZhAPU;(l0W%;QGt=<%5 z#*~jo;TJ>luy`o0nc-SxeXG59{Q&~&E$FuL>e?#_Q7V+53MhXg;~>)n((>*Em`rn& zT@8A=uPhHG4W|!kqraiHn-j!oy?ttFPMOD7S5{(~!jJ=dfsPZhFP10rkcXoUMx2;St^*|5h3TjfC(#K@o0R`M6D`WTB$o#?CQtLsvo( zFuV%fq^z+JWZWu?mS--q=t|;XbUl@7wtVIn& zkOJ5(bM#UNRZWPPu^ni;ooJ0{E!3^IRh<%4yVvkCjww#ms(Xf0IxqrF#7v#fK2utI z8k{%Y6A@Y@g2e&wxh1*A0bv9bSE_&us4L?7$&fDYyrgwI*YJw34yj*Kl&VwcY*ypv zBO%5}d{~U`c|;${(`b01ZgEUU${_tpPAEAtB{FX~QWg%n|L7G@Dg*-HDk; z3C*ord-exM%&Z$=@&wPXDYcG%VMjp`aDgH)0x~OMwx7%mI0$m5ij`sdquG%|dzJ5P zcGcB+4xHgBfiI}vWZx967_{>UD(#n?pb16by)tx_=lDy4I8;>2R3X4N>m`V#qQ>9| z$KAkdQ=t&CbA}Vht`{Tu6I_7H7Q+-LWHOZ{hsTbCPMhL{G$1qpz$1T-kF&L6o&d|Q zxl_htaNt`anZ8$rB0LhEx~NNv7c}DUY?_ADQlymA*Nd%(@iFC+4lP$1!Ug$|lF}6X z^t8v1N)xUu3;F@3<&?0(Pm2ueNCPQUe(ef>D=n=qw`W$!~!(QFbJv& z$`t)p4CqEX9dFPZEw@PCD-*YUzk;B--oJ1wUPLew<-g`iTTBny(h3R~`0&6%fb2bs zN8v%*h-cVzm&EsAtpH@~Xjy~kw!77H;j(%ydZtV(1=KDPak@SZH>G_ zJN80Qdfdc9oEp2#*$O%CR}C;bJ6B%Qh=>vEC2WD+E2wsHkdu{d^qd$VdB(40qvFbP$t!mQJ#l>zb%P?`9>aVEO9Vy3F$4&5Q37+SrouW{B zAaNp!YzRd76F+oG8CJKaGB9DuvfHiKWg|fhVpN%wAKOFL*IefQs#oXPJ$mvs=cJ2&67E zX)gJPnyJBa*jYgGmqJFw6{nR^U2oyMph&&x8-miS@t|+zuga4nKs*R$ALy^Ky0!Lc zt7g(Zb7-$90$R#S?+C>36>H^5F~I)wJw76O9-PN9J8y^Zgh?7L+u|{3a%ZsxM=h8y z`v%wF3yy75aM0GIfcCUOi_x;`5($fMUjlDmXZ+g32?Jk%IuVaq3_$sq6OR<|;W+X9 zZhgx1TDuK1amMo}EmP+lDJ!|M16?$x48J5%Y6!r??@oZaHQBpKgQ*NgUy2Ngn0MKu zO)*xv@zU8%@c-Bbn8|fjSqVO&B*vxfR7^7xh>*Tf0Hr&2{PFYSKCk1I&1HFJg?%;i zpaB>%0l+E^qw)OwT}%suZGA5YDzq3Uh(Jd=sUm)<@ny5~`d-pDW6Qsg%V`2DqxBcew`1P8$AMinNG#L}^I}6O_CfkKs(a)Z= zCjVdVQNy+$K{NN*M4Sz@q6DPz<=`H#`7J!<*QVhK$D*LLLGMDtEz;V?;=NX5LdB%9 zs;X+1PLd(FC)gIsYrRzYrbw5$U%3$0RxwVAZjz)Lch~D1!6&ShVO1c6bL##G>;_#8 ze~9*g8mH@DA!c3K*eT;|U!>H%^Y%SBB2%w7#Hm1x-~|FmD5)(4r~)^3&&@%YD&5%)Q#eU7!p2t=YDjyAq>V7mih`;ya0 z;Ttw_o2$XnJ}m%ARrGcaNt1WM@r3ROz6O8pKiDD`JlUlCE^$Cq8feu^r4I86?`#Ie z9NSWWJ801Gz|%7(2SlZy(o!1uU!xMVs(nB?;s+nUe#JuCNA2V&M;5rji^QbBY`pLU zIJ|=oQy^@)iVNDF>6O~F$v(=%lm6h>muyfV;Cb2pib#4UfxPeJX z^E(`lug?!r(GE6ZR0w@CHa7=91v4Q%;hSrwp-AS>VTVp!kW?j|dMCtBvxIX#idi0S z_wX|{gNTFnz;AUb8^v&Ri;R~pmMAs?&LN$rS!gowU) z>va6F&20U2rK?vprP@CJc>v~V0gbbx+IJa55AtTJzGzo%$Z+MONyXSs!=hF|brO}- zfag!kF{o<8bP|wGUtc?QyrEdc-}~|$1gV4+Q6Cd#ITTfwLdy^n6C*sC2c?A^f8;!; z#3&ol#CQF|izc}Ioi#)^u%U(smaxzzA!ku{ND>!bg|(mjo{)g}~=Ig)4_io4pce?1>6wgi$C|R|d4;!!MW542n}j zAa-rKxjfp$=Mw+mZS=2qhxF3 znfRzgee#vT8(zN^H9;~QvD>1js5w-hQQvz>2ILgBM-j@J#E^N;NN2A@9)g? z6dgQzHi{`+E!u;;!S4XeQQ>agIufqD&1mre=&yZSiiMP>(F>@3an_?4*;v;{oXa!G zTZ36z&WC`W%;;9Q#Q9u^I&(?i_^gKk1Hc<^FT4;7!_QkUICh9o2NSo;uz;=rC?_=7 z3hpuprww zm)F;+0ome!_o3WkX?@9!K?Tt1kRXuTRA+nRN0ra@{`{Koo!xr?b%^RFLk3ycj0hh6 zjX(&&&p?!rNiwa;g`G^&JO3Fwt2=5k41ObKHy4iOy-D|XP(>73oYovCoX~6a>I!! zt|l*{Ep!EXu~XuNH8#Hi!hP4fm0`%E`By*08&iMZdR^LR`_xvck&>E z7p3|tFs!E7p_v#^H1NSUtV2U8*)AJ>Mmpj3w}5kzdL)8u;MPM30f079fH|tPIU0jU zx&Ux1M4_R}oZ|@hPIKG@X^rJMuWms+0xhl>C?ywcTJy!dLwUxz(UUKZpOp}&4ywvL zhdMNw-9NLeM?Eexo#1wz)O#HPOk1$_fL>DxF%VlgLj4fl#Uqzl5H7Isv-O z*p<)uF8@ix+W{I9Cu}C8s4WKP2|!iAj>@Hnp~VxJav-Q;I^!^GzPFVZUnURw-t^jD zbCXa3B&bEt1{%LX2wYMj{9-S(6w}FHM3klq11iMp3=Btrk{mHA|2Fi|1|)to$*<!wc%86oPpglX)MMMP;=y4oAydc95Zn^UuJ=K%5uOO>ptJgu_cX3}ii|4_>cv z59S}~j~yPGfK51`SYj4{b6=ZtrscwzGG^|*m0qdLNHYdW>i`1r-@ow?2IJo z9T7^|`i3I&#J7Tk4Vi3WnE0{4?=|a>e{AcY-;0Zcv>4}odWRI0}{ z&Ie{jY9rn-xU`7pY9xm*zZ&$%TCB`74C5dW&lRpyGID)R0h zKEvjr%q+8`zzb(`VtgXEIj?*6{B?|a!;oM1TZ{3$!e`mue;D|zj>wu5(3{GHcE1Up zWS@ZpCvzBP@o^Ebcou7XJl0Y+y{3>YP%V{uFsa1e%UjTBE%_+JK)M1-(25tW%2xDs zW(u2k=}JM}?*o7Rh0+pY;|^@`prD-J%3DVI4iU{tDVvb@*;|?_OIClG)KhH&1byMt zu*Y4PzUtNYuYQ^4+su27{neoXtTl)r&og$J@$>r3|9<=58vIWV{&yDsp$`7v^+8NM z)=l}J*C3j~f{DaKk@P=)-h?wu0pkaFWdHtc(v4(#>YuNE49l}Do1*VqJ1InX@`qV9 P<5ev+JyoL0oxlDMQb=HG literal 0 HcmV?d00001 diff --git a/docs/source/_static/pquant.png b/docs/source/_static/pquant.png new file mode 100644 index 0000000000000000000000000000000000000000..69bc184d62f3e569a533089e7ba7bc2a1a242b3b GIT binary patch literal 24115 zcmeIacUY6z_b>W_0xBxw%qUm@br3|VsPv*TsC2~%EkvS7?XvjWfs`*(af^m`KsVgQ9Ol>I{iC}b^bc_a{sNL#JCIuB})ryt_^9@$<8|y z`FC=eDAN^F#1-xc-0?YO;{PG{D35`jcIGE;YztjnzN#?!x^`5{Uo9D_$wA7;7#p^p zyO6X{^;=Yex>ipRd7rTGPGAT=?!F~+-@hU3)8<_3UpH-*`aX2vGSB*#{bKpw2X5T> z{u=QPg?oMEr=MY$=;!ncbn6Im^4t$1KV|0O0^LuZjljvTX{-;acAEu#v!rqTkLl>B<=c6<#)*KW*SR#nb`u^mjBR7!eigG#l zRiUQ4Tr6(H;S5?u;PIY5#qF+t}g8m9u;# zdzh`!Q3WzX6d8}viY7(xKP^P0$2mRNAe5BQ_4V>by4H;iY((t;pRfAyMRkoJ(mhun z5bS0e(sDV=!2AXoECw^RF5SWF2!dIckV|TS$Gxp3y*_%M<2sM1 zBV3G{<>7V0lqa>QUbpJ}cM2LW9D{Q0BC%donxD-?-|o`=?|d6U{>O^Weq?H|&~gLqZw{|DN@>o|ZDDOdl;g42zL&-S|70@s zM|LDWFv-8Zo8%yvdlB>4eS(!}2=Xfk z&Sek#zaRc89=|iKtb*<5-t#n9O>!93v4cP z{Y*>T9o6RN>u6fA8Ie~GLKS4?wr({D<6kWbqU&*Ilvnn;G2!>VSx3Ua|KU3Pr1Sb} zC)c6mK?(2$0z35o6%8l>+p#E&KV66Mc6B~fukC-VtL6W(aq>Oj;Jpnh-4q4`t*hpk zO4hh)j;a4QR0V~-#x*PRIRkwwlZ;&|4U$ER1BPexEvMAyvvFn7Gq3AIw6OJmB1nHB zDqf;(px>*ynhLKWM@;oEA~j!J7-TDp*I$%rfp!8BZOJ7DbRTN6-N6n};urJR4!dvztI?Cjc#nU*SJSZ*L7 zo3@G|&W5Nxv|pK#(zDCaDt(-X;XN_Aq`W0{W)YWn==X}W%*>hJ=~01egK1i8(`-)C z8Ykl}j|xI`wY!aWncCHF|4Ur_6A~K{ddI(Fpmbd^dmhVx*%YUL$?-u+IEpU+Xzs*b z(guX0h`vf}c4eaByI%!Gp?TAF0iRO#OFicMg)0q z6n*&J_x{B<5d4nHE-5SM1}HWE+Alo*KbMpaIcU72J1vjZ$E+M+RX!`4IUB|q@2FMU zrC$;*EDBO^nzN7U=h7*!Xu^0m-tp)1df-QgU% zYSA{}**7KQT&SAL0`r9e zv#QVh;!17?xnqLbq=likMk~vsty0<~GYkgjcYGueL)-1Ife=ZT*Np=9@c+_!WOl3v zZQ{g1RAKCKJJf0a`4EYAgay3?xPX;QY`Gk z*`;RpWLj=#!J!rBK1Wts)sn}muL)xy>dT9IKz=vj$b>G!atVR-{xE--p3tD<*DcmDpe~6-qsLu=y{oKIOxudK?3s-vsx^;>R&qJ*7@02HP)t*3 zMV(;R@Pl=1@kR;p?XAq17|OF8Z!0|Txs2W=XO>0O0*vba8LSn*EqZAcAG%jsPMX${ zH}4c+E?1B8;~%p)Q9W<0;a2n61J-hYGX~<^*C|wZ2bdo!XBy;kt=haSHc5tiWNlzy zX<4^(2<{Y3kpxPiG}*}!UvsvpQByML(Bu5>@~Me8s7$Y-{QT+JXABCZxO7$@k+W`_ z#%Ku0#H+^2tj>iw)Mzy|AE+at7;PSePEN8mZ4)F#w*Sl;3vgL1AG?XYl~z+|%eHBT z2*OrQjRpblxB%1J&!N-*UKF2Svzo~}BGf$!GhZgQnw7rJbt?TcsPwh_0HIK#$xTB* za;i85J@try+>y?yFvFBJ^LHlIjY?s2$Cfm(=}b?D;z0^wwF&$eTXVc;3n!+^z25Est6#&p7OQXt!#4C`I9S4i*ny_j z`b?y&F?$$*u^t_-eLc{0sl%H`DBX|&+m1n{##{IhP2A{Ch0^9$el|k~-lFCqb9{0sJa zT}vn$gZkCcRZV6LCl>NRwVCMLd~~a&pjrV}CkRN$9gsUQ-f*H%YKe@{v+tq71%D9~ zb`pb$Sqe451ljfhk&Ww(g!Nectrvm`ItZ#h1{3N0MiZ6Om%sBSyBEm&XL> zydLjRUw*3*o(19NW3<{=qr?iZEN+*yNR(&9 z_cpp}<;r4k;^6+qg&2KQKjuvm=tJMy)!I{9fs z*qWe}x*%+fJB!&#72c)Jz|ZSV%D?5VDtI+VnL(>Akx~^$(7cTOI+n`Ypl=3YnS6C*n(^J zDeA-HWwaiLP2XlDGOE56Skuzorc>d)+q=OFjxEhBT2R%T!yG`tAoJ6p$deagFa;DR z-#8U-RZ0f@MEbQ+4euXk3^Q6Ma@HOZWDxDIPm}K5 zH!i-v=5P+^MAxqdhwkb@TYM+A(Ut#j^C_ru!wJ0mkWb)Z&+o;7Q+?+t0b~*`7UqEC zr&E`8N$!@g0#c=q5;5(o_DUU$`p%!?Npz@pHCR)hRXv^WiKYbfMARg)52r4v=uQX3 zCB-zOJ1I>7>uNs&j4c8eZPiysAj3x}CMX|Lb3Iagx|hB?MEgN>$a#V>EC`d2H=4b# zM$JE;h`t~Af802NM5m*Jn^!!RYQ?O@0(z(-?TQ8S^VuZl_B-E=y7On?Ma?!$Gb1Zy zok6;?h8yZ4?-J2hRYDe;bmJH{7tLuf>tN%RE@q3 zht1os)oLY7i!wdOyOu9XW{58~sI8WbhAG!3UK#R~Q&p@8Rd3xt5*3=cZMlys+;#89j0 zAE+{g()(ACH+{>x`@FFZ*cIiAX#4h}BCaT6YwRmY9N~mt#qv$L4 zJ18(lH{Ago<~NoQhoZy)Owk3`tqK1I-5@L&LquKcj4?{?0QWBVE~JGn8nM|NzT>OF zA(5C(>)(Udzi&1t;^tEZTTwiv<4~{VJx%8xf25T+SFr9Xeh0uXgzhL^XRd^4h81aLWQZk; zEgorPXY+tB(YK>U9c>^=Nu30<6Qf0)T7WkwM^Ulw=mOB+N(JjAzCrixa?Lq>vfwkN zU3@FdVWR#@(IJ1P%y!)f+1YgiC8vk3RQiWjr0xB|BA>1sxKa`r22#H+kD+y0rciXL zvWZEC{GdkS4%ENLZ-u5%T45*q-6MP0SAY&t?I(2nfphJo#QQo_-~3V~OjJbj2w1PU zZfyL%LjmuKh3d!ZBHkWjXJ{EtTg!N+hIg-6u8GDk=ZJn=nh|rK#Jw+AvDaO_y;Ec= zd^MwQ*>0M)Jem<&@pR@;6FzcZ#{BNb3NxvHXMo z3BJUIz>=~B`Nqpsqr`@o=4&aI8>LcKxt!$D1>YUD;X#NC)T3Wki^LdlRc=tgQM-Y+}QoEc$@D$itnum?q81%0n& z5XJt+3vrHd4R8KJbrpo_gmMA63WysJ3n(d5(q+{t7)}@C)|VDS_wxpXBdD7$5wHqx zU#1d=jyOEC6@t~n*SGW&NK|RmuF&d@t;(ye2Sd7*P4KK=27NXBgZpk@ksfIn*oyiv z@p)xdiftUQcf2wDOxAm*bs208Y1jB3;t}(|Tx)$h*f{0QoA?EzqrBA+5J)+`QecMX?gcbtVn(0mEx~q+59W$ z0$)T_VJr9tAVl0vnfR(6OM{UYxmMZy=k3fGpzxIJom>cg(|S&EgM3rAOhe1fd{6C+ zYNHPI%i!3e)h&?50;*!7%q(c0rax^xOG}$&gryC(`h08-=e2#NDaRS^C>d6=%K~1< zP@KR=&duQB%DQ3>xvN?K+!b3gPZM9HajY4f;i%OE%0P@4WsZKDd{e%$@?&$G;?;`z zpsv4HI@=Ph%-+UZxi%0yC#Gb3;wPA5y1tyoOEExeB0ca?y>-?LSt zcQ{bUtEWz8&H5y;ibZo6^h0^DE}iDXSq4kimo%*V?%Q>!B^UHnFN^o!ni{(*?=qZ8 zWj!TZVJ%_T(?q$Seoqm)Wr3AR$Y9u*ynYul}k%|oI zHV00Q5~z1<6R(;&S36jsM3B4#=GU$r&iZ!MDy_Myr*QNWCzGk_Y_dZXqo;Dtt6fV( zN!DJN%qL^7YKagrBb9;1Ej_P1TZ-@PJd*X=)aME9+;rDYVfLEgG>tT3K3u{a*eUFM zh~w-?eN=16X*gmvI5A(Ydy(^mD5_zf9NGAm>B6v3hIXH zO%r_O{?Mg5f2kB(dzRp3s;Ay86~XGjTNf9R`d7%J!J0M49jOy9 z-nzE=D1|)m`rEBC&voA!nw#|*Iya)mKJ#GOcEAf_!x;)U{CGBlqj`MpWHZ}lZ5 z$QnqyCO1UZuK=apj3ixIp8_8}NeSJzHhoispgV57%@xi9NU1j1kmOJRN;MOre>shB zlxk*VDcGVXIrN}!o}J<|C7?KPf$BU}F}V{F%F#TpqU-hdAiJ~;as$EZJBdRf7xh%S zN_ZwsnmpqyT$b8eSZL~->Gj)rPjARZHi7$@4miej$lj1zxk9&ybGJEw-$Y(t1ghnX zlaO8nfqYDdJxkTCB(wHCW}6(mF;se- z{x#sv;O!ORB80wY^B+)IM&Wtz)kY1f?pXqu8W$X&{*qBSG?NaXYZ;V{57CXvE z+JY%<@Uxjp=O!N50ql+|0=pay;nqjKeLY_c*{bFh7dK3B;Z?E{o`NVKOwjG3uiOa{ z%@z8CmPY37G~UDFLNZ(VjueS$ok68B1JX$O>M-Ih%W;diu<>MfSGbtHP1@21L?*!` zdzAaG1X=^*_oLIiTd2f^QNd0|i%eBfkBI#Z?=Tb?B2I&qsc27HszkZU_%^nJ>evU- z2lW6}6pL`|YuT*Sm&{$2BiPz;2-nD@5juW5viM#PYZ{n#sIP)30l5>TbS}LY*xYe` zpK~ddBDVCR?+c)G0?v~7N77ze?TjnXmuzL~pbJFb1i8(K6!&h=*IHxAIOET!uZLO$ zFAe;DM^twU32XM07rw{(Nl1y%{lvzGS=)e~q=Gv}2Z@WNW4GQa;_&*B!so z?8ADm6W4Kebpq4y;c-}R>B%XZn$b#ASxZ3_lI8!nz7zi|=VouFNz;s2PDd=yl%r?LzyJEhZ4#`=>xIUJpfTBhokb2K2osq-uTl(f1d0pFG$O-XZxT5F$j zm?hD4T{gGrxNTf5Q=7R&8r_bPGpc&9yJ$<& zTvft!NSqv&?h3krE% z3UdvsN5$5>sma&#z9{Lgra&;dyLu&jm~*_g`|?KyRuG|V+bBg>RBk*Mes`OS6OO(m zq|g2N3FQ)|MHuU&bk`2Dx@^|IS)2-;FTq57^s}9Yf!DjWucy|QW*N85f;(BdvTn;A z@r{V5z+Xn>N8V?hep8yTH6RG^u076r|iRP_vVjfKQU=Kvc1S@ zt^7rq`~AzjQem{`-YYf{0pVR*5U=oiH<=RO=apQFu5h+Zs{0xxCYX|bp^h_W zy(G|z%WTcJyD&ATN~6}-j56jFaId_M&_UKou$Zf7q9?CZYgJI>*IZ%J*Y=dzm3P{A zrB=o>97{93hvfHeG1ZeK{4#9O5@pFQuKRPz*~y{nX7}ePSBoc8f;l-W1N;3dE5n|U zdn2s57c_kWl4V_z4<(nRTbCSJ*%cGMT;67B7MRB=ksne{*|sFIa2tHPpfyBmKKK^v z-LALVm2Z@?5(w2X*cr}tgtCR^h=UX4*Or3v=K&4OuZc!bS6EB4eiKGm)-&$g9JSfY z5(y??IpB7~x~n#ETH=eU8*r)C4;x0^jK0Y>GO`k8R#)U5Eq6lJg2wq)TtQzJjt|p9 zs1Ht?+@rd;|GXO{th=9Yq3q;n)L562ZO(Cco+a{2=Y3KAZCfPAW5|N7YTEyVz(p}y zc`-X<(xelY(IT_;_DYGj#qAm2Ys*y$ULVRu+{$-7Sx&v|I}Rnjjijyk0}$z6fCbkp z#IeflSV0Q+SsRh)E_dtsM+9O0&ItCV(5wW7iL$>qD!5znFFzJ)e^GAq%SwR#$A>mc z?vcz=m(EgWcx-%Sx*LwJky^kZuHb5IYAWZ7p#^x}IqeX1(P9so%iA93kZ5+h z7o0z~tDjT&g)Ci=!E?NxYi{9d8gyESM#w*2cpLTl9Z)p$Q79FL9*wm2QcZ57C zoamC+)`KZmsGm?t>3o0w&?q^1J=&oy7UFyMl-ItY)4!7lL)dG!-Dx~F+eI{Ur|NgJ zeet)Y6BMq73_m(=D#rD|36CdUL%Nbf_>^>mmI=a3*dWXnP8S48Q9#+Af=4%a!J?Cc;1$tLkmH~|aZMD@Tsdmk zoY6`1?)u6>duP>2^FFg8lvy}hk^bmZv*UOs);1v>LQDxWig{Ch3mJ-HUg-L{7=-04 zxneQGkJ-h_=M)^>2~1o1J>e2rVvtJg^Fe91`-8=v+aSl50xQTD zmHDZ!qnXfsP37|n+ZmNCMP69ycw6Kw;9Yo-N;LjvoiLOR2>m3a+lo`*6kpO^P(NZ^!yuC>C=ofdD2)^B76LbSwkm^|`AHq{&aK=Qtw?WJ6m27*rvSStFt0^>Qm&z4V@Cnz}LHln2pg1>{N zxHB*A_>!OP6zcAQP5S{DUcFebRKipm?bS+3QSOVAPAodif?7QskheWGp2@Q{fyib4 z!Cck!l0@?S<%as%;l0}SgYa^61lkHZg{HLi;#7Jq!8FPHA~^NsVMy)w$`%m~H$* zUas1H$%E28hMUW9_5!VhjfU~+z2+FGTrC=-A6c?>Cp$E(^peFFJv-n+iN%^Pr1^Ge z5wWD*tfL5LsQS}7Yi!3g^-9Swk z8F7;#sj}z3dEs=Rk3xpsgFf)+WY7*2vZd>CiHnXYh?q4u;*E>#Ykb zthec_6MC_%+Z(DLKx-Cj;O9LuHKJu73z0cK)viid8r>&CUoUM`oyr-S^tTn-h_{AI zMW>xhGV*y^Gpf&7A)-JH=0($+eP#k>6IISZs@K;v7Es?&4O#;e>jov5XV~u*G8dq0 zbhkhH3fxR;aJ?=LOBW0Huok#gdm!>E)GXJ6lCM&8Q;u;4_RXDVqPq zT+Y-#A7f$LFpx7!U5NY)S)V)t74UlMhp@lao}BLx4S%-vF|Y5tM_jMz(U+y1h zHf?M55ys_N?TaAn{`6^TD0llG&S~T^^9^&i1A=Cr-&xjXbMyA^A6QK1#ZyAzj1rnl z3XK=o2hUu+rFDEmG_+bCBzWy9(yt#^aPB@+tToxF8e4tlzX_$yau}D5e2Fz_dYs^8 zrB`uo`XY7r9_o&I_F|0e)4O>knv5+|)IK7l%zz`}Xw6!@9b*^smY>dxcn%>j*&Rm@`3^6nwRB$s|M7N}GM~(Szn6(j z-9Mq6vsVFRkPhA(6_hsUkH;P9`g9o}M~MAsjLd`g05EifUf&UeqoOwjoP4~mvAm%( zA^|dMz=;Q3a@KIfy*g`EEQ{X^@^>)_Ti}>i)b1u%?0B^AW#T1ZZA%>QMl*=}~ye?ld_$jhk32d&W2tYBsBD?~TDk0Ar^CB2@k)Vp&R!vPeTA<6hS0)?Gx?piY;HV`v)?vw;%NPy`<_3WZ$Rj; zAX~r@LQbu?Y{iDiIOMxGh?ljh)XcNb>eh^l0+<;18LC@*zfxe5=+djtPH^tb$>VN$ z37y(95k2Ru1g!;W2o?OYtumXSt2y(xUsmv|fVdn|RQYX_redaQ7yW6h<`=))OAV~7 z1VLRR<{z!*;di4z|Ao-}7`>iwp~e(!^Bo*cw)4u7R|1(@84>|S4{ve~?DjtU{8Vq~ zGV_u3`HU zP3Y3r4jsrme?l^{}OjmOy~HY@4AT^f+K=~(M)0n+##U@(%liGvNB zFaR>TLL^=dbVa$rXYoGKV#Q4!5}LSaF^hj9g-OV7#N1KP7kq40 zEDaY?i3@1%QP3*ZR|>6=hOC6vL%Reci|3g4DlwrBI6BcK5L+R2{caibs_=D}vV%zJ zZI+C^ED#9&Ix1LaCY5+d<6K$4a`o_9nNal}DyGw0Mgp3dwpBQP*2hG5p9REP=>81KwaPff1~PRwXbIh0 zk%BYD{3-SJ2-%pd*#a(7=@o1}Tg8fA$+;3ry&53e-r)$Hq^#i<$ZP4X0!i!a{(NA@-bWVMtzRBULPDm9Cfv^~WC zhNWtrPWt4J9f7e8b59Zq|Gqr^TkonbJ~GNP`nl;}#oWH0nkFV0158N}a? zK@Tp8G0M8rI4E03n#G|1d4WVI9e>V|Y0VB%daWDB*DT^a7%G2tDZEBHE1&RF031qC zF=VuAe2{tI#*vVb<#K{Bf^{0*8{~Uyq0Pe(T7^uV=$AJ$qKFF+3&$ghe1tc3{VJrc z2hD^bow)d&JkrqPupjNUuB?)OiX9qAw>M1tb!8X6FMpujrj=o~yOzz2!c_5+UV#Ws z9)r*}PQ>`^SZsf(u^e%W0FMs zZ4}a!`#sCoY|in&)SfS%x~Ga8I<<7(Hig=pc)}4!zYZ4T@y5K=DEuf~x}sl)q7LaD zqZzBaTGbaMm^iWbAf?yfo>1jLKwL9ZC;xrCSeSHH)u=sXpX{+nXw4-I=H108iAcn} zReM>vsQI~4bCbp9hfO5VlleOUm_|8a%}Sw7wU*m1r_gvH(t@_7d_W?%WTq@DCQp$| z%92bjWmmS&J_=iH|McnWruo+Ew>aQL&o$Jb}U(x1J7-YRBnW+9Yv>_K(e zZ6I?TtYX^Z_ftP1bO$JMd?C4fOp^*tinyW`yFw9!0+IMPN44>?mdC2gdmd6vW&hzu z&-$ZhAKwHgs((aFFV#@y)0~vw&>gr5!FQoDWO&ADmC9a!?$|?8&@W5tGS|>6i(x?z zN3?}-_Kn-3ALQyf!e{!+ZUKoiSD!AL(3#4ALY06=54F{xMamQi6rDM!nCJI_N|c!{ z4jU0^M}tu_z7Vp-Q^NUO&Cvrh;};}{iZgclxrYdLUyjoy&H3{{qT#Z$md)kc3WcRm zg8hbjJB~UBhnm-)%KIhBB-RG6;qv0%=fq$OJr%aSzs%!=X-a~Zxn95}G_&-Mb8o!~ zr%2cb_O)g)sPcG{0D`{^`JHf^K%vs`wM?BzQo%*h`4ZXij}@cA zPHP#Kd2Dg1mmcufpiZ8mejz-Ct5MS)oBDu_t$&i}h;P>g)L!1b7TqI|Pa6dugi;~5 z=zgv%X*+?vK6mY#&W$7V&&Rtae?st2Q0e6}1Jj|l7C!#fi&*P`wr}2=w<0k7MV27 zYVwU#UW`TikD-5Y&AscUm%i;TjzsSd3hecp& zBENOz#8=hEOlC=UZM>M){2Zetq_=v?8!jWLy(kc|2@bj}7(F-VU;(ZSQi6*#L@`kQ zBU&W~y5|(r+i?)Z7ei>d^d$a@lsrNnEsNyj z!q#7wt(fY>GjDc#FsTS{!-;2UuU+ubyd7UFxJuy%j_7vjYEGn_9pv%9;H1w?#UaQ7 zdJy*(YEP6fi^P&hZ2XY)2Zb|`pB%puWrPs-f+Cjx^03XZ+SaKlm9^FOq4zCp!h5D8 zR4E*;`RkVu1ZH#_l^1%ai&qGpUeF7huqdbYG^3a7e&6sy5q}thxWmJQD@&RP(HZ7e zC3)BmZ+#k6cbZdx6IU^q&L$j4VY=^F2!STdQ*fhxs0Su5vZp}2ygI-y-)1x47R*A^ zh1xyoXIV`Ifcg+RCC}*7{dRHG?H8Wa;_iNzAb1|}_L$&~npbYJV&l1dx;0YhA#kf< zMirAI-uN|}p@u(O0zCws;t!E}s>!A2ByC%urGdL!=0%4&b#^$#6&*=JK67prSYfxTIHg>?k4(%D}TF_Nc=|3u0hNyol4D`94e3C6I$-2nmiYjUD8obEm&U~k6S?^_U_$MvSy-+S2={sBb zYkA1)C^5c}vkW%F&Z?T zTo0ypny!@Mt*`p=>RM;BO)QFQM_qb&WIjHp4j+F`oj6}UO+iaQZD=pyWx$cR6VoVv zyWLIU$ULZNDi>@iu)E?SeXr9FPL!yXPs1Frjh!JGlT;OpS*_WOOzrKzp%&ZR`M1Z= zga4fliOeHSYxZ^fSnMQsEdA!k7~j0^4}OySt_HK(Y~ueh@@rpQiyT{AYtzSYx^%rd9g&T?1}!tbqs@X^G{<#(D+`|+MTOIlaE(|GfIlPBG# zr``{Y^qxt217R5N8;L#!l14!^wr!IIH((~Y4p$-1A%iePZ|yZQEQ zn7jP?PNiuqAa7m^Bb0Nni&d@Q3gyy-*%lGdQ1>1k>aQ|7;{+n3lzvaZ$(?$)GZ!q4=GAW zslHW(V1_o-;e1b(U1=8ajWAjhyuyu)Krna6p1Pt!-Bq}JpsVPcM+$4Sx4y57S#k>G6x~ZXLAby~@;q%^&4~*+XW~R6IU_{BSw9)*d7O%Cn8>wg zf*V)5cDi=W-Gobe0qKM6wuF-6W*@&60nNjQp*;c(6-!lvX?#ph(BEaE?2bK*+AXuk zG_nJhRA(H*b7+CC%Eo@xsqSC8Puf5N&|9;r?r5P;Wo0Uj)`a53DukL?6qBa?f>KVT zEW$Rl>dac13rUfS@QNu@_`o1t3uI1-Az#otsJsQ%`)HOM)8JQJD-Y&Xd#=1(Y@1OH z#j^#kye;hY!0*Uh;mQ=4*iP0UlSw*#IcB14rGSM2*UQ0!)S0^o>2q1HOubEOWMbwwv_3X=c!5p@O@7${V8xRwMxx=PL}_3S;gzFo3{ohuDopH zu1hgo!!v1ba&+OYc@--<8w(*1!3%@6%Ad|k81M;yqSRBZ(CM~*kc@#lDfDx5*vM2n z@2*Fqrr>gjZ?8M*%iYEhA(^U%(=OUM!Lom>KS_i8?(N)J3E@6HQVYUfOGl|_nU6ez z`;4Wh*UO$ebA*DCbuzcmxvFqKdM4j({;%)LrK4jCFm}9gs`ci<<`xoEZpgdVXYi|& z{Df(F+(J{*g8Gf`lbs<~rSHlssd(uGSq=;sFsg!n@iKkNcr;fIg>zzZ;gAJ~Z?KjH|2 m{}EGvBnMEp{jZxGY*D|p=e$B_^8)}qL|4b?ck*fc_5Tgg5ZMF( literal 0 HcmV?d00001 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..64ac78d --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,70 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +import os +import sys + +sys.path.insert(0, os.path.abspath('../')) + +project = 'PQuantML' +copyright = '2025, Roope Niemi' +author = 'Roope Niemi, Anastasiia Petrovych' +release = "1.0.0" +version = release + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +myst_enable_extensions = [ + "amsmath", + "deflist", + "dollarmath", + "fieldlist", + "html_admonition", + "html_image", + "replacements", + "smartquotes", + "strikethrough", + "substitution", + "tasklist", +] + + +autosummary_generate = True + +extensions = ['myst_parser', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.napoleon', 'sphinx_rtd_theme'] + +source_suffix = ['.rst', '.md'] + +templates_path = ['_templates'] +exclude_patterns = ['_build'] + +html_logo = "_static/pquant.png" +html_theme_options = { + 'logo_only': True, + 'display_version': True, +} + +html_context = { + 'display_github': True, # Integrate GitHub + 'github_user': 'nroope', # Username + 'github_repo': "PQuant", # Repo name + 'github_version': 'master', # Version + 'conf_py_path': '/docs/', # Path in the checkout to the docs root +} + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_rtd_theme" +html_static_path = ['_static'] +html_favicon = '_static/pquant.png' + +html_css_files = [ + 'custom.css', +] \ No newline at end of file diff --git a/docs/source/faq.md b/docs/source/faq.md new file mode 100644 index 0000000..5a3f9f1 --- /dev/null +++ b/docs/source/faq.md @@ -0,0 +1,30 @@ +# FAQs + +## What models formats does PQuantML currently support? +PQuantML primarily supports PyTorch and TensorFlow/Keras models and supports both direct construction and automatic layer replacement using `add_compression_layers(...)`. + +## What are requirements to use PQuantML? +Install PyTorch with the correct CUDA version that matches your system and other frameworks, like TensorFlow. This prevents version mismatches and GPU compatibility issues. + +An example to install PyTorch with CUDA 13.0: + +```python +pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cu130 +``` +## Can I use MLflow locally? +Yes. + +PQuantML integrates with MLflow for experiment tracking and model logging and local usage is fully supported. + + +### Start local MLFlow UI: +```python +mlflow ui --host 0.0.0.0 --port 5000 +``` + +### Use a local or remote database for Optuna tuning: +```python +from pquant.core.finetuning import TuningTask +tuner = TuningTask(config) +tuner.set_storage_db("sqlite:///optuna_study.db") +``` diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md new file mode 100644 index 0000000..63b5f0b --- /dev/null +++ b/docs/source/getting_started.md @@ -0,0 +1,211 @@ +# Quick User Guide + +```{note} +This section provides an overview of how to use the PQuantML library: defining models with pruning and quantization, running fine-tuning, and optionally converting the final model to hls4ml. +``` + +## Model definition & training + + +To compress a model with PQuantML, all layers must be replaced with their PQuantML equivalents. For example, replace `Dense` by `PQDense`, `ReLU` by `PQActivation`, etc. + + +Model compression behaviour such as pruning strength, quantization bit-widths, training parameters, etc. is controlled through the configuration object, which is a Pydantic model. + +### Load a default configuration +``` python +from pquant import dst_config + +# Upload a default DST config +config = dst_config() + +config.training_parameters.epochs = 1000 +config.quantization_parameters.default_data_integer_bit = 3. +config.quantization_parameters.default_data_fractional_bits = 2. +config.quantization_parameters.default_weight_fractional_bits = 3. +config.quantization_parameters.use_relu_multiplier = False +``` + +### Building a model +PQuantML supports two ways of defining compressed models. Below we illustrate both approaches using a simple jet-tagging architecture. + +### Direct layer usage + +```python +from pquant.layers import PQDense +from pquant.activations import PQActivation + +def build_model(config): + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.dense1 = PQDense(config, 16, 64, + in_quant_bits = (1, 3, 3)) + self.relu = PQActivation(config, "relu") + self.dense2 = PQDense(config, 64, 32) + self.dense3 = PQDense(config, 32, 32) + self.dense4 = PQDense(config, 32, 5, + quantize_output=True, + out_quant_bits=(1, 3, 3)) + + def forward(self, x): + x = self.relu(self.dense1(x)) + x = self.relu(self.dense2(x)) + x = self.relu(self.dense3(x)) + x = self.dense4(x) + return x + + return Model(config) +``` + + +### Layer-replacement usage +```python + +def build_model(): + class Model(nn.Module): + def __init__(self): + super().__init__() + self.dense1 = nn.Linear(16, 64) + self.relu = nn.ReLU() + self.dense2 = nn.Linear(64, 32) + self.dense3 = nn.Linear(32, 32) + self.dense4 = nn.Linear(32, 5) + + def forward(self, x): + x = self.relu(self.dense1(x)) + x = self.relu(self.dense2(x)) + x = self.relu(self.dense3(x)) + x = self.dense4(x) + return x + + + return Model() + +# Convert to PQuantML-compressed model +model = add_compression_layers(model, config) +``` + +### Fine-Tuning with PQuantML +PQuantML provides an automated fine-tuning and hyperparameter-optimization workflow through the `TuningTask API`. This allows you to search for optimal pruning, quantization, and training parameters using your own training, validation, and objective functions. + +```python +from pquant.core.finetuning import TuningTask, TuningConfig + +# Convert defined yaml file into the object +config = TuningConfig.load_from_file(CONFIG_PATH) + +# Create finetuning task class +tuner = TuningTask(config) + +# (Optional) Enable mlflow logging +tuner.set_enable_mlflow() +tuner.set_tracking_uri("https://ngt.cern.ch/models") +tuner.set_user("your_email@cern.ch", "your_access_token") + +# Register training, validation and objective functions +tuner.set_training_function(train_resnet) +tuner.set_validation_function(validate_resnet) +tuner.set_objective_function(name="accuracy", fn=calculate_accuracy, direction="maximize") + +# Set optimizer, scheduler and hyperparameters +tuner.set_hyperparameters() +tuner.set_optimizer_function(get_optimizer) +tuner.set_scheduler_function(get_scheduler) +``` + +To run optimization: +```python +device = "cuda" if torch.cuda.is_available() else "cpu" +model = model.to(device) + +best_params = tuner.run_optimization(model, + trainloader=..., + testloader=..., + loss_func=...) +``` +```{note} +`tuner.run_optimization()` automatically runs multiple compression–fine-tuning cycles, evaluates each trial using your objective function, and returns the best hyperparameters. +``` + +All other training code remains unchanged. + +### Train a model + +```python +loss_func = torch.nn.CrossEntropyLoss() +optimizer = torch.optim.Adam(lr=1e-2, weight_decay=1e-4, params=model.parameters()) +scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[600, 800], gamma=0.1 +``` +Training is handled through the `train_model(...)` wrapper: + +```python +from pquant import train_model + +trained_model = train_model(model = model, + config = config, + train_func = ..., + valid_func = ..., + trainloader = ..., + device="cuda", + testloader = ..., + loss_func = loss_func, + optimizer = optimizer, + scheduler=scheduler + ) +``` + +### Using different quantization settings per layer +```{note} +For FITCompress, HGQ, or architectures, where activations require different quantization bit-widths, each activation layer must be instantiated separately. +``` +```python +def build_model(config): + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.dense1 = PQDense(config, 16, 64, + in_quant_bits = (1, 3, 3)) + self.relu1 = PQActivation(config, "relu") + self.relu2 = PQActivation(config, "relu") + self.relu3 = PQActivation(config, "relu") + self.dense2 = PQDense(config, 64, 32) + self.dense3 = PQDense(config, 32, 32) + self.dense4 = PQDense(config, 32, 5, + quantize_output=True, + out_quant_bits=(1, 3, 3)) + + def forward(self, x): + x = self.relu1(self.dense1(x)) + x = self.relu2(self.dense2(x)) + x = self.relu3(self.dense3(x)) + x = self.dense4(x) + return x + + return Model(config) +``` + + +## Conversion to hls4ml +After training, the PQuantML model can be exported to hls4ml for HLS synthesis. + +```python +from hls4ml.converters import convert_from_pytorch_model +from hls4ml.utils import config_from_pytorch_model + +hls_config = config_from_pytorch_model( + model, + input_shape=input_shape, + ) + +hls_model = convert_from_pytorch_model( + model, + io_type=""io_parallel"", + output_dir=..., + backend="vitis", + hls_config=hls_config, + ) +hls_model.compile() +``` + +For a complete example, please refer to this [notebook](https://github.com/nroope/PQuant/blob/dev/examples/example_jet_tagging.ipynb). diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..344ba5d --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,64 @@ +.. PQuantMLdocumentation master file, created by + sphinx-quickstart on Mon Dec 8 16:28:11 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +=========================== +PQuantML +=========================== + +.. image:: https://img.shields.io/badge/license-Apache%202.0-green.svg + :target: LICENSE +.. image:: https://github.com/calad0i/HGQ/actions/workflows/sphinx-build.yml/badge.svg + :target: https://github.com/nroope/PQuant +.. image:: https://badge.fury.io/py/hgq.svg + :target: https://pypi.org/project/pquant-ml/ + +Welcome to the official documentation for **PQuantML**, a hardware-aware model compression framework supporting: + +- Joint pruning + quantization +- Layer-wise precision configuration +- Flexible training pipelines +- PyTorch and TensorFlow backends +- Integration with hardware-friendly toolchains (e.g., hls4ml) + +PQuantML enables efficient deployment of compact neural networks on resource-constrained hardware such as FPGAs and embedded accelerators. + + +.. rst-class:: light +.. image:: _static/overview_pquant.png + :alt: PQuantML-overview + :width: 100% + :align: center + + + + +Key Features +------------ + +- **Joint Quantization + Pruning:** Combine bit-width reduction with structured pruning. +- **Flexible Precision Control:** Per-layer and mixed-precision configuration. +- **Hardware-Aware Objective:** Include resource constraints (DSP, LUT, BRAM) in training. +- **Simple API:** Configure compression through a single YAML or Python object. +- **PyTorch Integration:** Works with custom training/validation loops. +- **Export Support:** Model conversion towards hardware toolchains. + +Contents +========================= + +.. toctree:: + :maxdepth: 2 + + status + install + getting_started + reference + faq + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/source/install.md b/docs/source/install.md new file mode 100644 index 0000000..ef039ab --- /dev/null +++ b/docs/source/install.md @@ -0,0 +1,8 @@ +# Installation + +Use `pip install pquant-ml` to install the latest version from PyPI. You will need an environment with `python>=3.10,<=3.12` installed. + + +```{warning} +PQuantML v1.0 requires `tensorflow>=2.17`, `mlflow>=2.0,<3.0`, and `python>=3.10,<=3.12`. +``` diff --git a/docs/source/reference.md b/docs/source/reference.md new file mode 100644 index 0000000..6692474 --- /dev/null +++ b/docs/source/reference.md @@ -0,0 +1,208 @@ +# Usage Reference + +## Config file + +The most important part of the library is a user-defined config yaml file. It has five separate sections: training, pruning, quantization, finetuning, and fitcompress section, `currently maintained by TensorFlow only`, parameters. By default, the parameters in the config are the following: + +### Training parameters +The following table outlines the primary parameters used to configure the training process: + +| **Field** | **Type** | **Default** | **Description** | +|------------------------|---------------------------------------------|---------------|------------------------------------------------------------| +| `epochs` | int | `200` | Total number of training epochs. | +| `fine_tuning_epochs` | int | `0` | Additional epochs for fine-tuning. | +| `pretraining_epochs` | int | `50` | Pretraining / warm-up epochs. | +| `rewind` | str | `"never"` | Weight rewinding policy. | +| `rounds` | int | `1` | Number of prune–fine-tune cycles. | +| `save_weights_epoch` | int | `-1` | Save checkpoint at this epoch (`-1` disables). | + +```{note} +If you require additional parameters for the training or optimization loops, please define them directly in the config.yaml file. +``` + +### Quantization parameters + +| **Field** | **Type** | **Default** | **Description** | +|----------------------------------|----------|-------------|------------------------------------------------------------------------| +| `default_data_keep_negatives` | bool | `0` | Default **k** value for data quantization (0 = clamp negatives, 1 = keep). | +| `default_data_integer_bits` | int | `0` | Default integer bitwidth **i** for data quantization. | +| `default_data_fractional_bits` | int | `0` | Default fractional bitwidth **f** for data quantization. | +| `default_weight_keep_negatives` | bool | `0` | Default **k** value for weight quantization (0 or 1). | +| `default_weight_integer_bits` | int | `0` | Default integer bitwidth **i** for weight quantization. | +| `default_weight_fractional_bits` | int | `0` | Default fractional bitwidth **f** for weight quantization. | +| `quantize_input` | bool | `true` | Whether inputs to layers are quantized by default. | +| `quantize_output` | bool | `true` | Whether outputs of layers are quantized by default. | +| `enable_quantization` | bool | `true` | Global switch to enable or disable quantization. | +| `hgq_gamma` | float | `0.0` | HGQ regularization coefficient for bitwidth stability. | +| `hgq_beta` | float | `0.0` | HGQ loss coefficient scaling EBOPs. | +| `layer_specific` | dict | `{}` | Dictionary for per-layer quantization overrides. | +| `use_hgq` | bool | `false` | Enable or disable High Granularity Quantization (HGQ). | +| `use_real_tanh` | bool | `false` | Use a real `tanh` instead of hard/approximate `tanh`. | +| `overflow` | str | `"SAT"` | Overflow handling mode (`SAT`, `SAT_SYM`, `WRAP`, `WRAP_SM`). | +| `round_mode` | str | `"RND"` | Rounding mode (`TRN`, `RND`, `RND_CONV`, `RND_ZERO`, etc.). | +| `use_relu_multiplier` | bool | `true` | Enable a learned bit-shift multiplier inside ReLU layers. | + + +### Fine-tuning parameters + +| **Field** | **Type** | **Default** | **Description** | +|-------------------------|--------------------------|--------------------|-----------------------------------------------| +| `experiment_name` | str | `"experiment_1"` | Name of the study. | +| `model_name` | str | `"resnet18"` | Model architecture name. | +| `sampler` | str | `GridSampler` | Sampler selection for the search space. | +| `num_trials` | int | `0` | Number of trials. | +| `hyperparameter_search` | HyperparameterSearch | `{}` | Ranges for non-grid samplers. | + +#### Samplers + +| **Field** | **Type** | **Default** | **Description** | +|-----------|-------------------|--------------------|-----------------------------------------------------------------| +| `type` | str | `"TPESampler"` | Sampler class name (e.g., `TPESampler`, `GridSampler`). | +| `params` | Dict[str, Any] | `{}` | Sampler-specific kwargs (e.g., `seed`, `search_space`). | + +More about samplers can be found in {[optuna documentation](https://optuna.readthedocs.io/en/stable/reference/samplers/index.html)} + + +#### HyperparameterSearch + +| **Field** | **Type** | **Default** | **Description** | +|------------------|------------------------------------------|-------------|----------------------------------------| +| `numerical` | Dict[str, List[Union[int, float]]] | `{}` | Numeric ranges `[low, high, step]`. | +| `categorical` | Optional[Dict[str, List[str]]] | `{}` | Categorical choices. | + + +### Pruning methods +PQuantML supports seven different pruning methods. +#### Method Overview + +| **Method** | **Model** | +|----------------------|----------------------------| +| `cs` | `CSPruningModel` | +| `dst` | `DSTPruningModel` | +| `pdp` | `PDPPruningModel` | +| `wanda` | `WandaPruningModel` | +| `autosparse` | `AutoSparsePruningModel` | +| `activation_pruning` | `ActivationPruningModel` | +| `mdmm` | `MDMMPruningModel` | + + +There are the parameters shared by all methods: + +| **Field** | **Type** | **Default** | **Description** | +|------------------------------|--------------|-------------|-----------------------------------------------| +| `disable_pruning_for_layers` | List[str] | `[]` | Layer names to exclude from pruning. | +| `enable_pruning` | bool | `true` | Master pruning on/off switch. | +| `threshold_decay` | float | `0.0` | Optional pruning threshold decay term. | + + +```{note} +Layer names in `disable_pruning_for_layers` field must match your framework’s naming (e.g., Keras `layer.name`). +``` + +There are more details about every pruning method: +#### CS Pruning + +| **Field** | **Type** | **Default** | **Description** | +|-------------------|----------|-------------|--------------------------------------------------| +| `pruning_method` | str | `cs` | Selects this pruning schema. | +| `final_temp` | int | `200` | Target temperature at the end of the schedule. | +| `threshold_init` | int | `0` | Initial sparsification threshold. | + + +#### DST Pruning + +| **Field** | **Type** | **Default** | **Description** | +|--------------------|----------|------------------|--------------------------------------------| +| `pruning_method` | str | `dst` | Selects this pruning schema. | +| `alpha` | float | `5.0e-06` | Mask dynamics update coefficient. | +| `max_pruning_pct` | float | `0.99` | Upper bound on total pruning ratio. | +| `threshold_init` | float | `0.0` | Initial threshold value. | +| `threshold_type` | str | `"channelwise"` | Thresholding granularity. | + +#### PDP Pruning + +| **Field** | **Type** | **Default** | **Description** | +|----------------------|----------|-------------|---------------------------------------------------| +| `pruning_method` | str | `pdp` | Selects this pruning schema. | +| `epsilon` | float | `0.015` | Smoothing/regularization factor for gating. | +| `sparsity` | float | `0.8` | Target sparsity level (0–1). | +| `temperature` | float | `1.0e-05` | Annealing temperature. | +| `structured_pruning` | bool | `false` | Enable structured pruning. | + +#### Wanda Pruning + +| **Field** | **Type** | **Default** | **Description** | +|-----------------------------|------------------|-------------|--------------------------------------------------| +| `pruning_method` | str | `wanda` | Selects this pruning schema. | +| `M` | Optional[int] | `null` | Optional grouping constant. | +| `N` | Optional[int] | `null` | Optional grouping constant. | +| `sparsity` | float | `0.9` | Target sparsity level (0–1). | +| `t_delta` | int | `100` | Window size / steps for stats collection. | +| `t_start_collecting_batch` | int | `100` | Warm-up steps before collecting statistics. | +| `calculate_pruning_budget` | bool | `true` | Auto-compute pruning budget from data. | + +#### Autosparse Pruning + +| **Field** | **Type** | **Default** | **Description** | +|-----------------------|----------|--------------|---------------------------------------------------| +| `pruning_method` | str | `autosparse` | Selects this pruning schema. | +| `alpha` | float | `0.5` | Weight/penalty coefficient. | +| `alpha_reset_epoch` | int | `90` | Epoch at which `alpha` is reset/tuned. | +| `autotune_epochs` | int | `10` | Number of epochs in the tuning window. | +| `backward_sparsity` | bool | `false` | Apply sparsity in backward pass (if supported). | +| `threshold_init` | float | `-5.0` | Initial threshold (often in logit space). | +| `threshold_type` | str | `"channelwise"` | Thresholding granularity. | + +#### Activation Pruning +| **Field** | **Type** | **Default** | **Description** | +|----------------------------|----------|-------------|--------------------------------------------| +| `pruning_method` | str | `activation_pruning` | Selects this pruning schema. | +| `threshold` | float | `0.3` | Activation magnitude cutoff. | +| `t_delta` | int | `50` | Steps used to aggregate statistics. | +| `t_start_collecting_batch` | int | `50` | Steps to skip before collecting statistics. | + +#### MDMM Pruning + +| **Field** | **Type** | **Default** | **Description** | +|--------------------|-----------------------|----------------------------|--------------------------------------------------------------| +| `pruning_method` | str | `mdmm` | Selects this pruning schema. | +| `constraint_type` | ConstraintType | `"Equality"` | Constraint form: equality / ≤ / ≥. | +| `target_value` | float | `0.0` | Target value for the chosen metric. | +| `metric_type` | MetricType | `"UnstructuredSparsity"` | Specifies which metric is constrained. | +| `target_sparsity` | float | `0.9` | Target sparsity when constraining sparsity. | +| `rf` | int | `1` | Regularization / frequency parameter. | +| `epsilon` | float | `1.0e-03` | Feasibility tolerance. | +| `scale` | float | `10.0` | Penalty scaling for constraint violation. | +| `damping` | float | `1.0` | Damping term for numerical stability. | +| `use_grad` | bool | `false` | Use gradient information during updates. | +| `l0_mode` | `"coarse"` \| `"smooth"` | `"coarse"` | L0 approximation mode. | +| `scale_mode` | `"mean"` \| `"sum"` | `"mean"` | Aggregation mode for penalties. | + + +Optionally, there is also FITCompress method implemented for PyTorch: +### FitCompress method +| **Field** | **Type** | **Default** | **Description** | +|---------------------------|----------|-------------|---------------------------------------------------------------------------------| +| `enable_fitcompress` | bool | `false` | Master switch that enables or disables FITCompress. | +| `optimize_quantization` | bool | `true` | Whether FITCompress searches over quantization bit-width candidates. | +| `quantization_schedule` | List[float] | `[7., 4., 3., 2.]` | Candidate bit-widths evaluated during quantization search. | +| `pruning_schedule` | dict | `{start: 0, end: -3, steps: 40}` | Logarithmic pruning curve (base 10) with defined start, end, and step count. | +| `compression_goal` | float | `0.10` | Target compression ratio for the search procedure. | +| `optimize_pruning` | bool | `false` | Whether FITCompress searches over pruning ratios. | +| `greedy_astar` | bool | `true` | Disable fallback in A* search: once a candidate is selected, all others discarded. | +| `approximate` | bool | `true` | Use Fisher Trace approximations to speed up FIT score estimation. | +| `f_lambda` | float | `1` | Multiplicative factor λ in the distance function (g + λf). | + + +### Quantization layers in PQuantML + +- `PQConv*D`: Convolutional layers. +- `PQAvgPool*D`: Average pooling layers. +- `PQBatchNorm*D`: BatchNorm layers. +- `PQDense`: Linear layer. +- `PQActivation`: Activation layers (ReLU, Tanh) + +```{note} +Currently, PQuantML supports two quantization modes: layer-wise fixed-point quantization, where each tensor uses a single +bit-width configuration, and High-Granularity Quantization (HGQ). +``` diff --git a/docs/source/status.md b/docs/source/status.md new file mode 100644 index 0000000..15f1aa9 --- /dev/null +++ b/docs/source/status.md @@ -0,0 +1,17 @@ +# PQuantML Status + +This page tracks the development status of PQuantML features + +## Release: v1.0.0 + +| Feature | Status | Notes | +|---------------------------------|-----------------|-------| +| Compression pipeline | ✅ Complete | Included in v1.0.0 | +| Pruning methods (7 variants) | ✅ Complete | All documented | +| Quantization (fixed-point) | ✅ Complete | Supports per-layer overrides | +| HGQ support |✅ Complete | Supports HGQ quantization | +| hls4ml integration | ✅ Complete | Works in v1.0.0 | +| FITCompress | 🚧 Partially implemented | Works through PyTorch only | +| Documentation | 🚧 Improving | Expanded daily | + + diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index c97182f..0000000 --- a/mkdocs.yml +++ /dev/null @@ -1 +0,0 @@ -site_name: My Docs diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index a561393..305bbbf 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -24,9 +24,8 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: false - overflow: "SAT" - round_mode: RND + use_relu_multiplier: true + use_symmetric_quantization: false fitcompress_parameters: enable_fitcompress : false optimize_quantization : true diff --git a/src/pquant/configs/finetuning.yaml b/src/pquant/configs/finetuning.yaml index 5b4cdb0..3baba94 100644 --- a/src/pquant/configs/finetuning.yaml +++ b/src/pquant/configs/finetuning.yaml @@ -46,14 +46,15 @@ fitcompress_parameters: approximate : true f_lambda : 0.5 finetuning_parameters: - experiment_name: resnet_18_experiment_2 - num_trials: 10 + experiment_name: resnet_18_experiment_3 + num_trials: 50 sampler: - type: TPESampler + type: RandomSampler hyperparameter_search: numerical: learning_rate: [1e-5, 1e-3, 0.2] - batch_size: [16, 128, 32] - default_integer_bits: [0, 8, 1] + epochs: [20, 100, 20] + batch_size: [16, 256, 32] + default_integer_bits: [0, 8, 2] categorical: lr_schedule: ["cosine", "multistep"] From ccae31da8687b86a72cd8e978ccd3c524b46ba1c Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 14:27:43 +0100 Subject: [PATCH 27/37] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 78a44e4..4439106 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Both PyTorch and TensorFlow models are supported. Layers that can be compressed: Conv2D and Linear layers, Tanh and ReLU activations for both TensorFlow and PyTorch. For PyTorch, also Conv1D. -![alt text](docs/_static/pquant_transform.png) +![alt text](docs/_static/pquant.png) The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps. @@ -31,6 +31,7 @@ A description of the pruning methods and their hyperparameters can be found [her ### Quantization parameters A description of the quantization parameters can be found [here](docs/quantization_parameters.md). +For detailed documentation check this page: [PQuantML documentation](https://pquantml.readthedocs.io/en/latest/) ### Authors - Roope Niemi (CERN) From 52b418b9feab915f60f7f9e366914586e5f50573 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 14:29:02 +0100 Subject: [PATCH 28/37] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4439106..3030e0d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![alt text](docs/_static/pquant.png) +![alt text](docs/source/_static/pquant.png) ## Prune and Quantize ML models PQuant is a library for training compressed machine learning models, developed at CERN as part of the [Next Generation Triggers](https://nextgentriggers.web.cern.ch/t13/) project. @@ -11,10 +11,10 @@ Both PyTorch and TensorFlow models are supported. Layers that can be compressed: Conv2D and Linear layers, Tanh and ReLU activations for both TensorFlow and PyTorch. For PyTorch, also Conv1D. -![alt text](docs/_static/pquant.png) - The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps. +![alt text](docs/source/_static/overview.png) + ### Example From 418020b74487146fb52cd0d3b68fc0e5ff79c494 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 14:34:43 +0100 Subject: [PATCH 29/37] Add another logo otpion --- docs/source/_static/pquant_white_font.png | Bin 0 -> 22220 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/source/_static/pquant_white_font.png diff --git a/docs/source/_static/pquant_white_font.png b/docs/source/_static/pquant_white_font.png new file mode 100644 index 0000000000000000000000000000000000000000..ed2c363bac6c4ef406fab5e95426cf8995a38763 GIT binary patch literal 22220 zcmeIa2T)X7_a}M~2?~Pfm7oMg#EX(dauQTT^1UFEv*a9`OdCN_P-t>g1XQBrOe+e~ zND^ssP_j+VL15Ow`=2-8ys7v8-^{D2sd{!5MYGS@`>egfZ~fNVM<4#Bu6XF+@q-A0 z98$V|RTDud^$}#hBQ+(A$neD~!mk4^*Y(^Hgoy$DAtP`vT|$tPh|*R08{Uc212PUq z6F&Hb;LZGdqOW?2t~@$=^#<<6pDsuTTh+0O-inVsLLvtxh40=bOis#wP0f+}10S0C z$)H5?inYlLxjvf%6#Zx9_8T1wXs6|q+b4gjsi6C|BDccdM>ggMmj=U!=*zGhuB}^! zJugq~Y$RhWF&HtB2|nWG7mKNWeT~(*Otbe*E~0JU-j}CbMZYd_9pL-*Md|k~porgk z+4F|q&VZ`nw=?{9hTn*}=L5gf1geJre>ua19X-0~(8)!`xkH@hf@79+Cc8gyT~gD; zy1q40U*n^QX$Cb)9|YO%hLq{~Q6or3Q%)T^2|*(6G4B1way_SjFQ4?YWI%zAdo1H= z@l8Jj!S?>^Ex?+TY{>Q|T|WXW3;q0etNfpF%S-oP#A=-QV=R2|#B$jns*n;tLwaQR z$=;rHS=UaR$DcCqm*M23WN;PXluh~W8*CIBViry!L#m$bv9JzTG47)jB^-k_r6RXU zs$XHPN(MjBmg(y~QYJ`2?>uy(VP`g6LD+qP3Q2wNKSAgO1t?Kr#Z~{0)@(~BJ8Esy zXv3Y!>dp||@Q?-2Xnj-z8Df>Vr>t9#Pk$%=k#|XYdDZvo&t-7QfyD3e!2^#%p0Ib3 zBjZo^?n=}x?XXD8Tj38{Ha1ZqLlQ$S{=1H3IxbTc;H>LMiD1+AoEzl&PtzfMJJBA3 z=Ckac&kx&Vp}Mz9`QJ{uJ#tr3eIgCZlYg{s*LiPDoRo6*Zi&>&rb2iR>3CVk{-;s&>s565s4u8y1Wk?DN{A-PUkj;Ggw- zfqC_+VUA1Ew|hkC+{n+O4=;r`hm=X(CVhU{#8F+!&VW&Oeb^b|J#nvfgNswtcxhoK zxOC1j{p>#E`eD=?v5z}r4O69ZhfqiBUW!MMys^jsEm)d1K8d%gUZn+K&D(>Mo2XME zOUnPUD^tl56rl0TzrccRPjRN3|Ld5l>z$UNmnQ>y4Bf+HQQ*&*hprPWZQRgn zFFUsMcSpafap2NRjcy6|@y5I%-}5IZ+ymFoHf&^XF}#&|eyo(+gAKtxWI#{znQyK^bZ1Z4N z-vct_Kj6m}O`3wv#JzkxzlVpP6IAwckhl83s8*LU0Ub8I@UK;U9U2t)s(($Yx`>9( z&B0u-GhS%#iYb{t!-B#L3xY)dN>DVzp;1u{#nuR-d+mSR9r&xRb8l&`{}6Q#De<&= z0jt@oKnL&#{^bH_=pPDoRMalMv-cu`w9)UyJ-2f;idf56{$)F;r~FC{u_hcQq4y*NQ$Wg@p5632iXijKJMZEC#QKw*PVC-WnYH z#9NK{X{4ync*+>-J#08HQ1kNJ$Izgk@9t9db?7hU|J~v6INqu>Z-w*w-&g&QFw~Ol z-wh>R+MW3pb-wP(+~A{-*FPn8r#6;`Hk$-$e|GkSG<1{&TFY<_2JY0yg*dCb0tYgd zM}_)u_2d*^FI&O|c)9q9e#b{DeuV1s4PXY-lCC1GKqJsdEt0n6}l z63ypIt#s)9Cx`nC7~TdoS~yM;M$HJ_IFy*UWM)7_#BOgcmI_ z@n;8?7r4c%B!2ohFaT&$e_pnV&2Pw_yJq?ih&_at8a&TChd;ZDgW>M1YvNFJ_2vV6 z+1&=kx4U--T|F*b3o&a4AQoAzNgkYIFAk*HE;h+(SYXmf2^5kH+NnQ~BKj$Jt=YO2 zki%+qLn-Cz2Z8S&%cQl_GJ2kbB*m(WOA;bOx~FY(CU$JU3fHDA9v_)yVB$Ai|HdS9 zO&-FmfiOGk1ObO-`AQM}BxL>XbcWVF zBdHdF^B$Fx+zfQ3eVUt(ZX?LWrZ7fi_)JaHihVvuH+FB8FU(QlLtC7{Esww54*Qu|K5H=mA0EC3DQ!4aeugY#`c+?;%p>xJG4WvP z!o-RtM&Q;miKPIca*7}hoqqii7r$rtoMM$n(_6FYk1?+4J+BTn2D1tbJ@f{ z1s?Xw$?x~4Cr9=Q8gU!uz|h^YFqqA}_3dMB42y_ey!>?VG3y*W`%vLO0ZX^ZdoMa$ zg<&CW)~u`P@uXL_^ra7hhpE0ZgCweVAT#;r@QuyBvYoxx7pz7gc~v_Ur_+)i7nfCB zyh-_%LuXo-Ecs|984`OCd{ul}{c3-<%!fO^AXiRzxPa4;yQ~{}qTQ!#Lu|pBmDm1;CC5me9 zr#W}ZS9w*YwHGW|1t??hR~%9=y<*9?%1N-Bridy&ybS`!nn4nnk0`;lUQ=7L^(ZYf z=avo^Fxi!25ouPRR)}$(R*apAeMjwn^2OEZU|cad{ROD}u;nOi$a6cr8hCP6aG__# zi?w%q(XfFvsxxuBJkAdP6HZGIy`~P5kr+QxnvRDbajb{}GAx7aL zX5us!#zv2rt~9LKAKK^cG}WpEW;dQu-D7{9Z0`fpR#}mzTVI9mqG<(bKnY=9XSACb zr);(~+O5Z{qLvl2Qz<{nkU~r*$9@$2CkNqQq7Qmolx|4>mIB+Tn+E`ds6GPouqJe) z`FWlbRX@X9w2(DL*?eN`-grLhb($X-Q;!6{0?@sv0DpdgKllCmbIcGGZi5)@7VuS= zf#K2{8Jfdur@z-T%6Q3?+SlH6a-SAnHNfoGl6k?z0y}P_F(d<}Cfu`f4id9wE$Dzd$|f^j@L5($O|EzJ zLXd!#L(!SQnpQ&AU1uZ^U1Q*$S$o_Q1(=LnzA>k(hM^%z6_H5(%RL5!|Bl+x7;@@q za~aNA9rit|m_t~niaGL(gfQQ8H-MF~6sN!gs_j&Iyqstd-g{74{4_dS@1xhHnhSILYaCE}VSXB5MyPs8F2fw^!NVN7cTyZUv@+@HZpt? z!a|pNU^W^y%IsB|Uv*Pat?;H_2t|=GT2q;NC813pJXdjgKVXD4!-C$`DlI?u$sQ?& z3#=l|Wep^vtzuh&^D_>uTI)B&J=n`i=unWQD?<-`` z8OffWoh2s2w{sO7^@G_mRJ;WxxG32!sNMuKm?gr#tl9ruCqzA&-0^eu27|7;LhMs%&(p6(GF3|Jgp3&`mz?~8UToe%*7 zhOxo6--G{5dX@Gcsx&~X8s|0*v-vDp8BPS`N>sU7GRs_hmvqtEH~bEQOuJsALGs%G zO?x$oBh95^r~$+;pb9$Z$a8{W!Rcy$o-}%nm@kK#+_rlFy+bt~9Eg_T3x_dxHLzB6`~ccz!`q@T^`)?Kub+l;CrPhT7)Q>Y0qe24*kGE}XMEsq zY!7Nq*5`c#;0p;+il~EFp>@zahynrq%UbvX-y3^YDnW^ta~t^Ps#huMf^u{9d&Pwn z>fJ81U|$FWDl-gvhS-MIIqIo=V}&8Qp&=UQ!_i%QMqr=A_h9pVnz@P}id_Ox3d$=2 zqbFbUSu*=sm;TIN{1e9H&Vcjcc|0l&oUVSTWN@url}1Nv(WHF9My1D2MK3&r{Zyp0WVxUqCmi^)QwORd&l>=xI?@Nppa#CTr`houa%m=vBH20UQ7ucz60O6<$DPv#|J0DKY z16**@wcIt_9*n@+^bg-dq6hctO@Ou>sRN0h)UQ?zjQA!nylo%xWnc(8%&_bBxwV>u zp)b>DUTmUuceS=Pc{{Jec%p1)vZt`K)^~BV$emt&v2i!>+xyZQUA>L)NuK0@iK&jz zp@7Y0R}OobHD@M%|A|Hd0aGc~l@^>AMb9p+8@GBKL8{PJVC<>143-bZF6B2cM`D!{ z1{~LXI|9@K2PfhgT(hKFZOsDw$CUGe7`A~+`hWFb(HL9}WWyz-B- z7R;t+P>lQKVuE)8n67@%zxt2C@%G@f$i+8%&;rmIR$a&g_hMO2gNuGHE5W&9_&*kR zOF5i?(O3r5w26}~&fo)0a!iKcdt)K%MSDJ$&x`sP#TV3` z|iATKAJCS|SE}mU{kx7@zx9y^03ytOm`J)?`+dGzTCuc5F z2`eUx-SWO!c=`&Rom-29c@2`>H5uhvs2%?WjiOp=`~HA zg9)T*#NMQ3E^m$89jrhLNfl7GT=ChM z7$!`{u`SWYzLH}g2Hxoa7_qy%xyW1~#n8AV!ar(Mdn5uC<18u$?GKQ95q?}F1S0)8 zjQHV-YHTV6g3a-E^C|ar^YQV`Z2G9bH{N1hyS-{af}rR=S}X1A5qrzk-LJQZ*>`1^ zYokl5f5cdMxR#3CvxC{M;JU4(#BtN0tf1ZS-LlQJ-JfIaX?n`T!m(TZ@ODYYw@@M^ zf&7$YLt|qu2_?71rZmL{rLf2<8-rtl+`Bs*y9?(U-TJnCU`0Y$r7!BQ_NXBJAlDM; zG?XhydC@bX%kli3jPV@Q>1Q<*;aA$i=;GqJM!kr_gxCII7BWnOE44+au7>#`9U_mv z{9|O)1;Qiz<3{_qoKM6k`0+D>HLodv$?%f{2aK$gb`nSP7%E_*;J!P64i(^Gi{HU4 zjY(}|FsJt>$d9l2;zf_hmG14L{QBaTc=Sm{scB#>rFDEfzsXsj&N7scajT=kMJ|Dx zMvd||_MqzIj?46@WIZcka}%wmB2R-SnSMc+IwMhAJ{F@G9;TVWl+f^p1zdHwj!I@+ zuGcljrQ}5PgsByu)A~RLEEQ^?{L)$V3(;KQ1H!G2^Dp);XrLGHPe55sTuoe=7@}8P z=6->2gH8pl-3m|C70Uuv`G~ zaJ7hWn9%lZ_rVqJ(r-;4uT=au)22VcQ<}9d$E@fW32cpdMrb>|kHI11Kejng@#Rd= zm}vXd>|vqkBjzt*qkE)sP4tYv9(TF&0n;6eh}~)V(ic1vxQ9Ux^C$qcwse zs0iF6dya;nCd!RS9M~SDk~FcpFasLL|2c{7hHe`i*lh5Su~pno1-oi6bQTcqE1+%U zs9VRjP$=@ib&DRHW!HiPcwkD6uG&pLM}tPXc9HA+llv8yV_#3_89W0EHKge=RmKv zW$GoNM^!{HswAXnzS&&wKz?eZDq^7_3A% zexcD(N7X#OfVg8HvFpDyH`d(M*{t>ymPsucmlQm}L2Vm>k+L2vHFh4?qKlLsNG%#&Bm30AyF0vvxe^() zF$j^gRtwGA>8I$X;gkA>2_g|o_jzc`ckSS$T4qkU-~AuFyGxkXKXRUov)_kF_mqpKb=ndVRQ3m;u zZjC#P-2%q*b>%HHe`sW_IWAg!-e`vV2S_5;gWH3CwuBbuF%^hp0VGOYb&+Zo$TTUw$;Q(Ed*a`8}6r^W|60v zW)fH*@JU#d-{!2`PHmoJOaFNRlQ`L5A3U*18krIWe~uceAZPHN~iz zenF44Z}6hVqC1JR#33dx(x-mITYbimY+fm)lpisD!G6{!d9)HoUsId@^Ask+)!0>7 zq!35Hn#C2Qv8c8xa7&4BVcjAkNcn(4SYI?b;$fb=*t0UBdMN}=7N}mkPq(^l1^?*7 zy*}8ImhO9f2`5PM8Ii1?+-RP01i0&aV8f2kaAL;c@91Z5G&A7+6 zl+n5D{7gu#ul0i9JWsfmWKN*k0Rt<(P%`A2S@dH0aMwCI$@C5*b~QN@D8hx}jw^hc z6+(fZg_%l+N|+eR5WH-{n5d~wd(jaD8wOdde#ADSHvMz3jI(~@db8G@*Bpf@_Ib2w z!~2jeOGYvvJ*E(oBTKyLbz{7ZW__C1tfPfGdt2Mz6&w=K+?U-UzKWPWvSP>w^iXfx zCul67nej2cl+Qv~UF$Nk^uW1Go^YI>$#t3xe}grH*G<5b4C&q{PlGRGd+FhvD4Ls= zo`#d(ozrBe_Zo2}yj>HjFhEjuf_YL->nPRO>O+U&9*DU)Z$!Z9ZHW#CL=;#(v3=jW z!HwQ0h9PHU$^vMOOQlHVXw|wL96)VAr*$Wt!`#%AmN>wd`GkZZvg&wX&{()dK;z2a)={YvBE^Ki9OY%HL}XV=!2Yk#Lw;uYWC5SXB;G)45^Um1O;WP zrf?q8E3#iVIuE_vy**oqQ<$4-l~-o^^1_u3$){&T_4hae77DVS!c<2WatY`&y{iv= zoKJz2STdkyc!g>K^HjFWUgF79OXWb--2rVxkUNxG7cd>5A@;$Jk`X9X_jJAg*L z@?=2T%6;}j;xb|=LR1@?h!p)DoQyRoZ|(w26_!{caxvpPb7_|p(LEdki+D)4tamkv zD_^>O*pn_IF>}cCE|PlRM;X7E4MHL}0QFRxY)SiMzYAI-JE@+h90od;%bfr+5&T_{ zF2nM(5#PjxL-n!{CNJUn@af|Q$&xS7Ii=L!VBVSx#!Y=khq-}Q3Cd_(%zZbL#x+#T zf5$4j3m^_E(Nqf4c*Y!>vbV-D_qSKXM~tQ51P3ua4L)tFU&KFLZ=2n8c~LFG2Ssv7 zO!Bah8kOy(fbPEuh!j^NY!e-KxC;waFg{sxHchHb=9aKELg_KSt;WU(!OQqZ-P*tp zNf73khGnakw$r#;MROhc#}BPle_G}+VV@NS?vVu=f)A}?+dkFKj~xEH+xL6IHTOMK zARJrM`toCkwf_5rf497{PLu73Bzh+$B>&ZckIgfd5qbnu~tz&X9hTQQ7SK|oV zTw$>oPec2;l)$PkHtoWMJT01a*|ADoZnSov5IX$=a4)`+)C1J;A!o2T|NC$Qf*7*-rj>3d`SC{Ym=OoZCjc3p0s8q zBil(YRp~{v^CJC|j{M1|xi9h*@q~ni&$Jbf{Smcq-CMCt8{5Z~(aA=Tq5ZqH338jH zdQS(tlDYu3@`N!pWkg70%BwfS5+c$vgRM+;MHdC{)yJt6!y`cP9F0CuCeOH-G{hN; zHF-ujYY{c|X1#~9aQLxS9{JXsd8vMe0(Z7zpq3J=~xxyn6OZX%L9 z!g*#=LQU^_;5j-?Xja#ILfRym6!V)>0TuLa{TsGqdIF#)#p{{S{fP03f8Z)1eV1`@pIT7^pA_5ATZI+I67#JGrm zzocMgIotTE-Edkz-8gGTRmOv5(b~Bm27?=G3IsWVc2(Hgz-F1WvV?k9!{n6=!GOp< zi>Y&Cx(2%^R-4^YXCAMt6gn8;qh}UfcR4GQ_%~~!(F`C0k+z-v1*6j>%Es+8SP^pD zKV?DVHtaH_m>~Q||>xGD3h%%UtELOCqsLlBIXYvJ(2-hAV951k)2d z1Km%&$np$G%nN_J;t}Vp8TP!8dKt(B54wC(W^1Zt@(i6DUrE`F(I(Fby&f@FD5)iS zWSZY*!%=ao=L4xIK_wvBRb61QCQm5o_Eud#-3<=QM4gwiUpHl|IPkSfELbovlHu4l0kT}(3Lh`x3li$*0Fj!ch0DrEKZ z1nhM3Q-LSh>0(eXVw_&P<@#)a&q;s8mG==LP(qo7tLFNmhSE;75LTM_Zu7acTDk7G z2FP;#yD>aTQjhz1(-THhG?Qy@vfWhjXw@F5zaKnX;fb5jz&ps%axX@{ukc`6e9dp`ls3Q=K;?$?6?s&ftm-9+X}`P?%8`*cQ%E0)!QX zh9<7S&Rn=9F#WCjJQizS{6Cj_y|8ev+-?lLJfe(p~fW2JN)W_bhny-h|M zs271a2%)xLk=7=*fm*upS-7khnQta|+EFF>>3_IkmP2hIiizGfP??l&%tlj?Y78LO zj_)D;k_!qqTgeS!XZUq?&f_^D0SLwdgZ}MLfVY zX04+Gskloncy{~K^z}z2-woN)O&^-7V{gFojCir0YSPWFW1OAi-42Xqd0)tNHYXG^ zRXkW>t%KyD+GWt|l$S)6SC~YX@^(r>^~XxL`DWPnN9%8W;zx@-8Ir`C0)u(&GZ`k~ z?1Tc;qG7;=;(>DCq*Yw{kmmt_i|%KZqSzxhKPE%N!_~ zD4|iZp6|0suNN`1pRc;baVNu*OseF`L?Dx|OigTV2a&=>ph7>e)nc)MaRJ(|cq}Mm zPEwU6c3omSn76B^#ndm4jjEcGzSy_AHpALnb)0A5$m5}rDb+SGcewXfAnc$;G7rmkw7}2Us}Y5TYs=ZJ z<8|5vN{a@JIkG@`Uej+N4s`Rv_wzNUC8UeBYVs%Q7W3Uz7l%5D8PZv7e4W3yT~?aZ zu9!-#*}E%=rTo|oP(8MVPoGIHDpGecaVz1hguNm37LEeX@||}l?UP*Q?km6SUUT1Z zmH;T%pAg2VFQvNp!js{KL|E$mEvYalr?6y@WiqC~y&u0%SnjE|e8Q-o@P)G{PwUQF zWvK>~|3NKUjB#^~Be?vM3C}m(NoXENVkyDNh2uo0jhqdb+OD^fisV#eFjuS8eCWkB z0<60ShNP5zq%E+tcsd{vVoC>(@*e~>P_CWpGwAlLNuP1*gKbNHf@e<`*Dd!P zSe0zl;mJ#hq!rs9R8V$VuvCsQq=%?UJ;;yrmKLJoy0C77E507EA-Y z!3KjnCCifc8S@Pkc|tALhQ((tkFwukygSjaBajDY3)iO6hJavfB-?Zy{+OU;b>c{k zMjx_q?o59WG5>)Ei@T!*CTUc9jCrfg)hqVP1z`<6dh3JZEaT=;9bYj_tw0at4)v`D zc>G0<`~*{(9^W}`bOK4x&*0@v=~tH#X_|Te!-BagD8ZH1jsId;!-$2I(#A-SIlwB3TmgM4z}Tq*fdQ~1w*Ug(kh94Gxvh}sG6nbqFO zH0WQwvV3^9mD8p?oP^8mE_^g(bVl!`>RC*5pp>B%q)Q~#8FaeCXZ{w0HPRmoy#0A# zer%F_$Zs-r+4BcwUFq}SwIqmX$$4(e>Xgn0edX5=u$g4E&PKx^NRa?iI1gFJN_-?^ z9vsLGU%K@3H9eKkQbJxKSGe=eKpp3@g^;}w{!_J*M9YJN&AFQVJL1Z|1r{vl-1)wHh*A6Vgh@*=kuM^!?_^DEJn z<2-CaR0iFmv@?<&?1yD-f$eN}Jzm<_63C%pCJPE8^P2$AK>iw2BWS1fw=(Y{OuZFyn3pM>-D@~YNofZj+Xga>mT z35Ol4a%ICzy&{`z;63w~Uhw%0@;+xiSq(#!s^Zz9mU`Sl!acR@EO|Us4jhRx>|vUeCHnU%3$Jal-Fa=*%>?g7^B!+0$<{{?DYPK`yOYGq z59!=gJKytJVUv|M5h{mZrJcZ~xl$gQ!+FkGY@LFA?ST)x1}c#57nPx%tsT}jYAO4> zfiNh*-e=0C@PWxws=;4L+udW!QfDzc+v1I(2?m<`N-YUE*DO9_8|kYVC3###9ajU- zbAs#s^FS0`B8O zjl!gO;0ZTTzQwC~C(5`r3Z$2Yw!v_q@zBQF;y4LvzZXqwLEhheXT`+{t0{R3%1liG z(khGYgoWZyE~o0gk=rpRXp{D(e)(HjETQFC{i}&JC&)80@-%IC^9q8@>SJ>tjotPk z;Eeh_7N_zNFHt(H!e2tT`(y+0EOKF!S5bkW^e{)8I|zb-9QCT*U8iXwxpPSP2?o$D=Y};Sy>GgfnLy3HEb8$FrOuO+rte5wXkB%nh9NbXj#(zV24>PSnb_mYYxr zbb^8*Gw=6o&rDxf`|O*qQqWOr8Qn$$wag%Y9^=^s+y=ld|>_ zqYS373c^2R+V^;ch&lA#R z8rcCwh|@`Y*YbKd(3c-*-a(OsBABrE#VZ>f{wq5l{sVM z)8((RDse3n_Kxqv-Jh;i)vKK1?K5(>EcxgN7j)N6gD(^^5XA;kv)sr<~QG&<8gb!SjZPlVyt|(Kx zh~rC$T8{$aMpux&ga}mqZ;d?T5@&5LK{_ombG{h%^h}QtGjzBFeNEtm3)?8JHboit zh>3?hK9sA8tfaJ}XHbb?;VFn0^;6l<+2ZWGn zw^_Yw4M*iE{zv9gJKCEY{++$r5utD;0nSVXaZ5P5`#coX;gLTRzdUq{H`^Bz!xP@= zv+9?sDb|!({drZAk5Q?n?@Ni_MHP;ToGzAeR^fX<&ahT!_Bg7@(kc=3*+VBq`@EpRX0vSfc`XkL8MQ+-QX- zgTXRy+a1h*|Zt zVwp>FVr*aR@haxiaA-(tOEBWYu)U-e&ZRmr^wLnRq+(<9^}C2P9M&p%`$T?Q^P}EkUkdmakx2c&mDla3!&X zKUHh!rN>`M8LCE@A+lUO0%?IOAp6Rf_rO)y;w(4iCFgA$Sh_qOAV@Rn#1A5IJSD@q z!~xzW+|-coigd(NEIAs;YAZO zY2U0#ziOUm%%O>WIw2-DK(S?!Tp~ZFw;z-5==`ODH0?@|FnjyW%Zi7I;H3Fqef#(ggY7>H$mL4Yz|J7lqeiDG)T!+*UR(A=gDf_ z;RvTnbwhRDK7zxkh6-sD0Yy4AowIZqIJQ=$#&W~fu@mw&Ua7Y#tcCLyne4O=!H?ua z_(+?zwi6^1HMY5`NLries0c*xSJ9nkbm*SF^JPHzL6(OO2!p#hWf5Q3(Bs(c7JH;X zdZ>K|E8BHM>iPi-u|v1$FNU>3+-s|{QQ}MZA`DBRN4yj7s@Agt$DWT{fThs8#1E*R z@=9DRqt?S3*-FT8BKUeV-un~?!zZ^U)apyiW)w*K0LppZjg?RWbMM>jYp%f#EW+D* zZO|w~D(8h7HF82tGjp{n;_9t>Dv)c|JrLCT!F*Tv*#mGwYTbEL&b^$m5dq1PDscMS zhNjm_w}>y_@PXou;(4EqO8BwuEF-R)JXAbJXPdZ|2#%lKwj{Uxc)jT@@*Rxit^}jp zzWW0cY6EA(bi>;L3|e1Q2l^gk<~PrsnfL3hIDBp7+*oWtf5p>-tr0&HHFt%^Jl7QS zs_Ss)>$^(X=LDwj8n-GY#H0Mn6#6HKmu-~W%F0k*^t)&ob)T?WwK!2Hhn9$+Z57nl zEaPgVB-t~+4lM;Bk3zv=kqV0+f{51)XJZ%a^ladD7JPCs-w37!YQ0Hu?u^EG`emDJ zwTa9gT9EUzyfnr~?$t->OF-0j<}T8?@3$wAL#G!|)F@O(0m9a9;!eD@(nfGceT zy?cCH=G881KJ^#pCF*&aZAd6b=aCN`W2x3-^&Ded%D zgizjd#_IxkXbYLzL9pm66*e=x@1p~RGG-acNwt`yBlnDo`Cs1Fj!1#SkW>g}tnyZc z(+yH9Q1VW&^y7Ev!>tAe3T$-inkMbaNs^7yV?I4PnXam;jUz+~x%9&6iEMB~B=0+Qa02f)*$KWMP5;p_q0;E_PjO;KGMpidm< zw9Mbu9+B*>zInUJJb3GPT3%raxi3dy$ZW;dy5!hfeK1DaIJ&niuAx$X^M){#HU9+% zBzR_{bly+fj-<;r=3Td&6js-Cj~4iME2y4dyU^vc)~zR+HQGVj|K+0nLrBV(VM1)R zD!jl_Z*7u89uw+NOB%JTm)UM_8|3(NVy#7!{q*39=ov(&a;aK7^zy7NaLR7H_G%Vs zaj$~30|PKuhT_ggtT}GdI?1RlT9gc#<$nsi=i9ZhEhoX@6}^3MSa!^lu(AHVdM;0M zU+e|R>r+qf87DRPP($Ga7Fqr-+!(_{9x$Ckn&xJXweB@*O7QniO}tcceX)zE@EE_w zlNX+KX?=IB<(!YWJWaJ!W6@B;>@P(_7XX`_tx37L6R}_{)9h9CnS*|w4qEEW3FfJ5 zT7&?DV!kw$vpqw$9|m8le)HaZm}1~m9pQV2(YG|vxi>i9a?j9c4)*m(UJPFEVkn!w zOTwjE>JZ8Gwc_54Nj8@9B(YRdXJ!0A9UpZOr-Pt}e&zb=@QMaet`V)6-dt>s) z^3j4+1l`ay@4lFT{4A&FH9pO-oMb9um%ZJz(5v_YOM=7%9Nm=6X5vQ1mjNxKT!SN_ zFM&r^PdP}~86k+=Api?4muR8&jTlvTLgw>orAy==D`%~IhV%%j8qN)!p32vS!(YFD z;1wlwtn-gdLI0#L#f8@gT-_y2`L6gel9#F0X%pT$%>0R986XJ=o|)hwZ!*cCm0RwA zC|E?oWi{6-v!qI>$1&PW>w6aV7b{ET`c5Y5Ype;Y9C+BByoBrevNpA4SSTyzALo`R zAl6MhH!^Dnr(p=~B-fZsfZYHeq)i0*3hYr(Aqz(qQiscG2~r*+f1cnSb>uK>hGBY5 z)lyIhDTsudb`?=e2N&Uew=oulGqVZ*eSJ%P}O0Rc|?kjel0W>xBaJ;L%IOSpIKnTPCqJ|u84@#qxu z7xrjR!RuLtt_pQ!0HeOtbkJmdFfL|7Us9IWRUVKVW6)1uCr+rGh-;AQU)$`@m%U^- z?J^|iwP|Q};zMtcu|2YM%EVGSygfFFdn4DAfnz?Pe&Aa0a&q8tr=vcql?*bunS$F>5NRF!0!W(m*@BU-DI4`M|XY`jrY|XrbD62H1z( zlTbK_s8KthB%1!~NfGd_kEvELk9=b&VW{iZf@r4Bf1Sk@%4KrKM{8lLV(;NjQ}FNy zCD#vV(9JFs{u;~O8}o`jlI)+NkvH_>*P8L@nh91)>Pz2N{zIS+5Xhn9oaMQ3Yh+O|HKrM_a^j0YP5ff*gQ;u*#t5-(P+!;kPIJj)LD&@H+~A zN5StX_`gR1cDGhWYL6%|k2=c)52Q2TxuAMSKit4Q(2wB5e~5$I;-@L08}i zyf%$~BL5*Du~$*b1Z~sbOCY~>@!KyT6n@9Z|7N_%KxoglpD8}tkNyK^L`gyYYVKvz GJO2gbc!8Jz literal 0 HcmV?d00001 From de12da21ce024af8d597de3aae24cc8fecb5829b Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 14:36:06 +0100 Subject: [PATCH 30/37] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3030e0d..b676830 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![alt text](docs/source/_static/pquant.png) +![alt text](docs/source/_static/pquant_white_font.png) ## Prune and Quantize ML models PQuant is a library for training compressed machine learning models, developed at CERN as part of the [Next Generation Triggers](https://nextgentriggers.web.cern.ch/t13/) project. @@ -13,7 +13,7 @@ Layers that can be compressed: Conv2D and Linear layers, Tanh and ReLU activatio The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps. -![alt text](docs/source/_static/overview.png) +![alt text](docs/source/_static/overview_pquant.png) From b43a7ccb94bf6f73e596def014db18fb0cc547dd Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 14:54:18 +0100 Subject: [PATCH 31/37] Update README.md --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b676830..fab985f 100644 --- a/README.md +++ b/README.md @@ -9,10 +9,16 @@ To run the code, [HGQ2](https://github.com/calad0i/HGQ2) is also needed. PQuant replaces the layers and activations it finds with a Compressed (in the case of layers) or Quantized (in the case of activations) variant. These automatically handle the quantization of the weights, biases and activations, and the pruning of the weights. Both PyTorch and TensorFlow models are supported. -Layers that can be compressed: Conv2D and Linear layers, Tanh and ReLU activations for both TensorFlow and PyTorch. For PyTorch, also Conv1D. +Layers that can be compressed: + PQConv*D: Convolutional layers + PQAvgPool*D: Average pooling layers + PQBatchNorm*D: BatchNorm layers + PQDense: Linear layer + PQActivation: Activation layers (ReLU, Tanh) The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps. + ![alt text](docs/source/_static/overview_pquant.png) @@ -24,6 +30,8 @@ Example notebook can be found [here](https://github.com/nroope/PQuant/tree/main/ 3. Loading a default pruning configuration of a pruning method. 4. Using the configuration, the model, and the training and validation functions, call the training function of PQuant to train and compress the model. 5. Creating a custom quantization and pruning configuration for a given model (disable pruning for some layers, different quantization bitwidths for different layers). + 6. Direct layers usage and layers replacement approaches. + 7. Usage of fine-tuning platform. ### Pruning methods A description of the pruning methods and their hyperparameters can be found [here](docs/pruning_methods.md). From c42efb3ef8cc6ca07605908917c14cfd042cf335 Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 15:02:14 +0100 Subject: [PATCH 32/37] Update README.md --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index fab985f..8ea0ac0 100644 --- a/README.md +++ b/README.md @@ -9,12 +9,13 @@ To run the code, [HGQ2](https://github.com/calad0i/HGQ2) is also needed. PQuant replaces the layers and activations it finds with a Compressed (in the case of layers) or Quantized (in the case of activations) variant. These automatically handle the quantization of the weights, biases and activations, and the pruning of the weights. Both PyTorch and TensorFlow models are supported. -Layers that can be compressed: - PQConv*D: Convolutional layers - PQAvgPool*D: Average pooling layers - PQBatchNorm*D: BatchNorm layers - PQDense: Linear layer - PQActivation: Activation layers (ReLU, Tanh) +### Layers that can be compressed + +* **PQConv*D**: Convolutional layers +* **PQAvgPool*D**: Average pooling layers +* **PQBatchNorm*D**: BatchNorm layers +* **PQDense**: Linear layer +* **PQActivation**: Activation layers (ReLU, Tanh) The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps. From d98d53c372883d74497b21808bbdea056ee739fd Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 15 Dec 2025 16:35:44 +0100 Subject: [PATCH 33/37] FITCompress for all torch models, BatchNorm1d for Pytorch models, ebops with pruning mask and quantization effect for non-hgq use as metric --- src/pquant/core/keras/layers.py | 18 +- src/pquant/core/torch/activations.py | 2 +- src/pquant/core/torch/fit_compress.py | 282 +++++++++---------------- src/pquant/core/torch/layers.py | 247 +++++++++++++++++++--- src/pquant/core/torch/quantizer.py | 8 +- src/pquant/pruning_methods/dst.py | 12 +- tests/test_keras_compression_layers.py | 1 + 7 files changed, 356 insertions(+), 214 deletions(-) diff --git a/src/pquant/core/keras/layers.py b/src/pquant/core/keras/layers.py index 0cfdecb..af98a1a 100644 --- a/src/pquant/core/keras/layers.py +++ b/src/pquant/core/keras/layers.py @@ -363,6 +363,10 @@ def ebops(self, include_mask=False): if include_mask: mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) bw_ker = bw_ker * mask + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = ops.cast((ops.abs(self._kernel) > quantization_step_size), self._kernel.dtype) + bw_ker = bw_ker * step_size_mask if self.parallelization_factor < 0: ebops = ops.sum( ops.depthwise_conv( @@ -535,6 +539,10 @@ def ebops(self, include_mask=False): if include_mask: mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) bw_ker = bw_ker * mask + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = ops.cast((ops.abs(self._kernel) > quantization_step_size), self._kernel.dtype) + bw_ker = bw_ker * step_size_mask if self.parallelization_factor < 0: ebops = ops.sum( ops.conv( @@ -769,6 +777,10 @@ def ebops(self, include_mask=False): if include_mask: mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) bw_ker = bw_ker * mask + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = ops.cast((ops.abs(self._kernel) > quantization_step_size), self._kernel.dtype) + bw_ker = bw_ker * step_size_mask if self.parallelization_factor < 0: ebops = ops.sum( ops.conv( @@ -913,6 +925,10 @@ def ebops(self, include_mask=False): if include_mask: mask = self.handle_transpose(self.pruning_layer.get_hard_mask(), self.weight_transpose_back, do_transpose=True) bw_ker = bw_ker * mask + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = ops.cast((ops.abs(self._kernel) > quantization_step_size), self._kernel.dtype) + bw_ker = bw_ker * step_size_mask ebops = ops.sum(ops.matmul(bw_inp, bw_ker)) ebops = ebops * self.n_parallel / self.parallelization_factor if self.use_bias: @@ -2163,7 +2179,7 @@ def get_ebops(model): ebops = 0 for m in model.layers: if isinstance(m, (PQWeightBiasBase)): - ebops += m.ebops(include_mask=True) + ebops += m.ebops(include_mask=m.enable_pruning) elif isinstance(m, (PQAvgPoolBase, PQBatchNormalization, PQActivation)): ebops += m.ebops() return ebops diff --git a/src/pquant/core/torch/activations.py b/src/pquant/core/torch/activations.py index d6e2418..d9e9157 100644 --- a/src/pquant/core/torch/activations.py +++ b/src/pquant/core/torch/activations.py @@ -79,7 +79,7 @@ def check_is_built(self, input_shape): if self.built: return self.built = True - self.input_shape = input_shape + self.input_shape = (1,) + input_shape[1:] self.output_quantizer = Quantizer( k=self.k_output, i=self.i_output, diff --git a/src/pquant/core/torch/fit_compress.py b/src/pquant/core/torch/fit_compress.py index fcc646b..47e8342 100644 --- a/src/pquant/core/torch/fit_compress.py +++ b/src/pquant/core/torch/fit_compress.py @@ -10,19 +10,20 @@ from quantizers import get_fixed_quantizer if typing.TYPE_CHECKING: - from pquant.core.torch.layers import ( - CompressedLayerBase, - CompressedLayerConv2d, - CompressedLayerLinear, - QuantizedPooling, - QuantizedReLU, - QuantizedTanh, + from pquant.core.torch.activations import PQActivation # noqa: F401 + from pquant.core.torch.layers import ( # noqa: F401 + PQAvgPoolBase, + PQConv1d, + PQConv2d, + PQDense, + PQWeightBiasBase, ) + quantizer = get_fixed_quantizer(overflow_mode="SAT", round_mode="RND") -def call_fitcompress(config, trained_uncompressed_model, train_loader, loss_func): +def call_fitcompress(config, trained_uncompressed_model, train_loader, loss_func, input_shape): """ Calls the path-finding algorithm of FITcompress to find an optimal configuration for quantization (layer-wise) and pruning (global sparsity value) of weights for the uncompressed network. @@ -39,19 +40,11 @@ def call_fitcompress(config, trained_uncompressed_model, train_loader, loss_func layer-wise quantization bits for weights and activations. """ - from pquant.core.torch.layers import ( - add_layer_specific_quantization_to_model, - ) # Set the device device = "cuda" if torch.cuda.is_available() else "cpu" # Check that we have a pruning method active which has a global pruning sparsity target - if config.fitcompress_parameters.optimize_pruning: - assert config.pruning_parameters.pruning_method in [ - "pdp", - "wanda", - ], "Pruning method must be either 'pdp' or 'wanda' if FITcompress should find a global pruning target." def enable_quantization(model): """ @@ -65,16 +58,18 @@ def enable_quantization(model): model - current model with quantization enabled """ + from pquant.activations import PQActivation # noqa: F811 + from pquant.layers import PQAvgPoolBase, PQWeightBiasBase # noqa: F811 + for m in model.modules(): - if isinstance(m, CompressedLayerBase): + if isinstance(m, PQWeightBiasBase): m.enable_quantization = True - if m.__class__ in [QuantizedReLU, QuantizedTanh, QuantizedPooling]: + m.enable_pruning = True + if m.__class__ in [PQActivation, PQAvgPoolBase]: m.enable_quantization = True return model - def add_quantization_settings_to_config( - model, quant_info_weights, config, activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits - ): + def add_quantization_settings_to_config(model, quant_info_weights, config): """ @@ -98,40 +93,23 @@ def add_quantization_settings_to_config( """ - from pquant.core.torch.layers import ( - CompressedLayerConv2d, - CompressedLayerLinear, - QuantizedPooling, - QuantizedReLU, + from pquant.core.torch.activations import PQActivation # noqa: F401, F811 + from pquant.core.torch.layers import ( # noqa: F401, F811 + PQAvgPoolBase, + PQBatchNorm2d, + PQConv2d, + PQDense, ) - # Counter for activations - counter = 0 # Since in config currently a list, but dictionary makes it easier config.quantization_parameters.layer_specific = {} - for name, layer in model.named_modules(): - # For weights - if isinstance(layer, (CompressedLayerLinear, CompressedLayerConv2d)): + if isinstance(layer, (PQDense, PQConv2d)): config.quantization_parameters.layer_specific[name] = { "weight": {"integer_bits": quant_info_weights[name][0], "fractional_bits": quant_info_weights[name][1]}, } - # For activations (in this case only ReLU since we are working on res20) - if layer.__class__ in [QuantizedReLU]: - - config.quantization_parameters.layer_specific[name] = { - "integer_bits": activ_int_bits[counter], - "fractional_bits": activ_frac_bits[counter], - } - counter += 1 - - # NOTE : This is specific to res20 - if layer.__class__ in [QuantizedPooling]: - config.quantization_parameters.layer_specific[name] = { - "integer_bits": pool_int_bits, - "fractional_bits": pool_frac_bits, - } + layer.weight_quantizer.set_quantization_bits(quant_info_weights[name][0], quant_info_weights[name][1]) def print_info_bits(model): """ @@ -141,20 +119,19 @@ def print_info_bits(model): model - current model """ - from pquant.core.torch.layers import ( - CompressedLayerConv1d, - CompressedLayerConv2d, - CompressedLayerLinear, - QuantizedPooling, - QuantizedReLU, - QuantizedTanh, + from pquant.core.torch.activations import PQActivation # noqa: F811 + from pquant.core.torch.layers import ( # noqa: F811 + PQAvgPoolBase, + PQConv1d, + PQConv2d, + PQDense, ) for n, m in model.named_modules(): - if isinstance(m, (CompressedLayerConv2d, CompressedLayerConv1d, CompressedLayerLinear)): - logging.info(f"Layer {n}: {m.i_weight, m.f_weight} bits") - elif isinstance(m, (QuantizedReLU, QuantizedTanh, QuantizedPooling)): - logging.info(f"Layer {n}: {m.i, m.f} bits") + if isinstance(m, (PQConv2d, PQConv1d, PQDense)): + logging.info(f"Layer {n}: {m.get_weight_quantization_bits()} bits") + elif isinstance(m, (PQActivation, PQAvgPoolBase)): + logging.info(f"Layer {n}: {m.get_input_quantization_bits()} bits") # Save the this model's state dict (i.e. uncompressed version) trained_uncompressed_model_state_dict = trained_uncompressed_model.state_dict() @@ -169,6 +146,7 @@ def print_info_bits(model): criterion=loss_func, config=config, layerwise_pruning=False, + input_shape=input_shape, ) # Start A* (path-finding through compression space) @@ -176,12 +154,8 @@ def print_info_bits(model): optimal_node, quant_prune_config, trained_uncompressed_model, - activ_int_bits, - activ_frac_bits, - pool_int_bits, - pool_frac_bits, optimal_node_pruning_mask, - ) = fit_compress_computer.astar() + ) = fit_compress_computer.astar(config) logging.info("Finished FITcompress") @@ -189,23 +163,8 @@ def print_info_bits(model): # only finds optimal pruning and quantization settings, but shouldn't change the model's weights/quantization settings trained_uncompressed_model.load_state_dict(trained_uncompressed_model_state_dict) - # Only in PDP and Wanda we have a global pruning sparsity target, which can be found via fitcompress - if config.pruning_parameters.pruning_method in ["pdp", "wanda"]: - # Create copy of default sparsity - default_sparsity_target = float(config.pruning_parameters.sparsity) - - # Set the optimal sparsity target for pruning - if config.fitcompress_parameters.optimize_pruning: - config.pruning_parameters.sparsity = float(quant_prune_config["pruning_metrics"]["percentage"]) - - # If 0 was found as optimal, set to default sparsity target - if config.pruning_parameters.sparsity == 0: - # Set to the previous default value - config.pruning_parameters.sparsity = default_sparsity_target - # Enable quantization for the model - if config.quantization_parameters.enable_quantization: - trained_uncompressed_model = enable_quantization(trained_uncompressed_model) + trained_uncompressed_model = enable_quantization(trained_uncompressed_model) if config.fitcompress_parameters.optimize_quantization: # Set layer specific quantization in config file @@ -213,16 +172,9 @@ def print_info_bits(model): trained_uncompressed_model, quant_prune_config["quant_config"], config, - activ_int_bits, - activ_frac_bits, - pool_int_bits, - pool_frac_bits, ) # Now add the layer specific configuration to the model - add_layer_specific_quantization_to_model(trained_uncompressed_model, config) - - if config.fitcompress_parameters.optimize_pruning: - logging.info("Pruning Sparsity after FITcompress : ", config.pruning_parameters.sparsity) + # add_layer_specific_quantization_to_model(trained_uncompressed_model, config) logging.info("Layerwise quantization bits after FITcompress : ", config.quantization_parameters.layer_specific) @@ -305,7 +257,7 @@ def extract_config_from_node(self, layer_names): class FITcompress: - def __init__(self, model, device, dataloader, criterion, config, layerwise_pruning=False): + def __init__(self, model, device, dataloader, criterion, config, layerwise_pruning=False, input_shape=None): """ Calculate initial EF of the uncompressed model and set up quantization & pruning schedules, as well as the initial node in the compression space. @@ -340,7 +292,7 @@ def __init__(self, model, device, dataloader, criterion, config, layerwise_pruni # This marks which weights & activations can be pruned/quantized. # We can then reuse this instance and its corresponding .get_EF() function # and get_FIT() functions, passing the appropriate empirical Fisher traces. - self.fit_computer = FIT(self.model, self.device, input_spec=(3, 32, 32)) + self.fit_computer = FIT(self.model, self.device, input_spec=input_shape) # Calculate the EF trace of the uncompressed model (i.e. initial EF trace), only based on weights self.FeM, self.EF_trace_params_layerwise_uncompressed, _, _, _ = self.fit_computer.get_EF( @@ -495,8 +447,10 @@ def assign_parameters(self, model, params): this is not done in the original code. """ i = 0 + from pquant.layers import PQConv2d, PQDense # noqa: F811 + for _, module in model.named_modules(): - if isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d)): + if isinstance(module, (PQDense, PQConv2d)): for name_param, matrix_param in list(module.named_parameters()): if name_param.endswith('weight'): matrix_param.data = nn.parameter.Parameter(params[i].to(self.device)) @@ -714,7 +668,25 @@ def add_pruning_layer_specific(self, current_node, pruning_metrics, layer_idx=No return current_node_matrices_params_layerwise - def post_fitcompress_calibration(self, best_node_quant_config, calibration_epochs=50): + def set_activation_bits(self, layer): + if layer.quantize_input: + max_abs = torch.max(torch.tensor([torch.max(torch.abs(e)) for e in layer.saved_inputs])) + k, i, f = layer.get_input_quantization_bits() + bits = k + i + f + int_bits = math.ceil(math.log2(max_abs)) + frac_bits = bits - int_bits - k + layer.saved_inputs = [] + layer.input_quantizer.set_quantization_bits(int_bits, frac_bits) + if layer.quantize_output: + max_abs = torch.max(torch.tensor([torch.max(torch.abs(e)) for e in layer.saved_outputs])) + k, i, f = layer.get_output_quantization_bits() + bits = k + i + f + int_bits = math.ceil(math.log2(max_abs)) + frac_bits = bits - int_bits - k + layer.saved_outputs = [] + layer.output_quantizer.set_quantization_bits(int_bits, frac_bits) + + def post_fitcompress_calibration(self, best_node_quant_config, config, calibration_epochs=50): """ Calibrate integer/fractional bit allocation for activations, pooling layers, and model inputs *after* the FITcompress path search. @@ -742,17 +714,19 @@ def post_fitcompress_calibration(self, best_node_quant_config, calibration_epoch pool_int_bits: Integer bits for the (single) pooling layer (res20). pool_frac_bits: Fractional bits for the (single) pooling layer (res20). """ - from pquant.core.torch.layers import ( - QuantizedPooling, - QuantizedReLU, + from pquant.core.torch.activations import PQActivation # noqa: F811 + from pquant.core.torch.layers import ( # noqa: F811 + PQAvgPoolBase, + PQWeightBiasBase, ) # To avoid numerical issues - eps = 1e-12 # Store input data, as we also need to quantize input (which is currently done in resnet.py of pquant-dev) data_input = [] for m in self.model.modules(): - if m.__class__ in [QuantizedReLU, QuantizedPooling]: + if isinstance(m, (PQAvgPoolBase, PQWeightBiasBase)): + m.post_fitcompress_calibration = True + elif m.__class__ == PQActivation and m.activation_name == "relu": m.post_fitcompress_calibration = True # Trigger forward pass through model @@ -767,78 +741,18 @@ def post_fitcompress_calibration(self, best_node_quant_config, calibration_epoch _ = self.model(data_batch) counter += 1 - # Get ranges of activation inputs - activation_ranges = [] - # Access the inputs to the ReLU - for name, m in self.model.named_modules(): - if m.__class__ in [QuantizedReLU]: - # Average over calibration data - avg_relu = torch.stack(m.saved_inputs, dim=0).mean(dim=0) - # Now get the activation range - range_relu = (avg_relu.min().item(), avg_relu.max().item()) - activation_ranges.append((name, range_relu)) - - # Get ranges of data input - avg_inputs = torch.stack(data_input, dim=0).mean(dim=0) - range_inputs = (avg_inputs.min().item(), avg_inputs.max().item()) - - # Get ranges of pooling layer input(s) - activation_ranges_pool = [] - # And for the pooling layer (specific to res20) for m in self.model.modules(): - if m.__class__ in [QuantizedPooling]: - # Average over calibration data - avg_pool = torch.stack(m.saved_inputs, dim=0).mean(dim=0) - # Now get the activation range - range_pool = (avg_pool.min().item(), avg_pool.max().item()) - activation_ranges_pool.append(range_pool) - - activ_int_bits = [] - activ_frac_bits = [] - for _, (name, layer) in enumerate(activation_ranges): - max_abs = np.abs(np.max(layer)) # np.abs(layer[1]) - # Find the corresponding quant config of the weight layer that belongs to this activation unit - try: - curr_quant_config = best_node_quant_config[name.replace("relu", "conv")] - except KeyError: - curr_quant_config = best_node_quant_config["conv1"] - - # curr_quant_config[0] : integer bits of weights, curr_quant_config[1] : fractional bits of weights - int_bits = ( - (curr_quant_config[0] + curr_quant_config[1] + 1) - if max(0, math.ceil(math.log2(max_abs + eps))) > (curr_quant_config[0] + curr_quant_config[1] + 1) - else max(0, math.ceil(math.log2(max_abs + eps))) - ) - activ_int_bits.append(int_bits) - # + 1 since ReLUs don't need the sign bit - frac_bits = (curr_quant_config[0] + curr_quant_config[1] + 1) - int_bits - activ_frac_bits.append(frac_bits) - - # Same logic for data input (using 7 bits as standard, 1 goes to sign) - max_abs_input = np.abs(np.max(range_inputs)) # np.abs(range_inputs[1]) - int_bits_input = ( - (7) - if max(0, math.ceil(math.log2(max_abs_input + eps))) > (7) - else max(0, math.ceil(math.log2(max_abs_input + eps))) - ) - frac_bits_input = (7) - int_bits_input - - # Same logic for pooling layer (using 7 bits as standard, 1 goes to sign) ; just one pooling layer in res20 - for _, layer in enumerate(activation_ranges_pool): - max_abs = np.abs(np.max(layer)) # np.abs(layer[1]) - int_bits = ( - (7) if max(0, math.ceil(math.log2(max_abs + eps))) > (7) else max(0, math.ceil(math.log2(max_abs + eps))) - ) - pool_int_bits = int_bits - frac_bits = (7) - int_bits - pool_frac_bits = frac_bits - - logging.info("SET INT BITS INPUT:", int_bits_input, " SET FRAC BITS INPUT:", frac_bits_input) - logging.info(f"INT BITS POOLING: {pool_int_bits}, FRAC BITS POOLING: {pool_frac_bits}") - - return activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits - - def astar(self): + if isinstance(m, PQAvgPoolBase): + m.post_fitcompress_calibration = False + self.set_activation_bits(m) + elif m.__class__ == PQActivation and m.activation_name == "relu": + m.post_fitcompress_calibration = False + self.set_activation_bits(m) + elif isinstance(m, PQWeightBiasBase): + m.post_fitcompress_calibration = False + self.set_activation_bits(m) + + def astar(self, config): """ The actual search algorithm of FITcompress, which is based on the A* algorithm. Find either the node that has an optimal configuration (i.e. compression rate lower than the goal) and break or find @@ -881,18 +795,14 @@ def astar(self): self.assign_parameters(self.model, params_quantized_unpruned) - activ_int_bits, activ_frac_bits, pool_int_bits, pool_frac_bits = self.post_fitcompress_calibration( - p_node.extract_config_from_node(self.layer_names)['quant_config'] + self.post_fitcompress_calibration( + p_node.extract_config_from_node(self.layer_names)['quant_config'], config ) return ( p_node, p_node.extract_config_from_node(self.layer_names), self.model, - activ_int_bits, - activ_frac_bits, - pool_int_bits, - pool_frac_bits, p_node_pruning_mask_layerwise, ) @@ -1011,6 +921,7 @@ def create_neighbours(self, current_node): neighbour_node_state=neighbour_node_state, neighbour_node_pruning_metrics=neighbour_node_pruning_metrics, neighbour_node_unquantized_parameters_layerwise=current_node.unquantized_weights.copy(), + neighbour_node_int_bits=neighbour_node_int_bits, neighbour_node_frac_bits=neighbour_node_frac_bits, approximate=self.config.fitcompress_parameters.approximate, ) @@ -1234,7 +1145,7 @@ def calculate_current_compression_rate(self, params_layerwise, quant_config): uncompressed = 0.0 for params_layer, quant_conf_layer in zip(params_layerwise, quant_config): # Count which parameters are non-zero, non_zero is simply the number of non-zero parameters in the current layer - non_zero = torch.sum(torch.where(torch.abs(params_layer) < 10e-8, 0, 1)).detach().cpu().numpy() + non_zero = torch.sum(torch.where(torch.abs(params_layer) < 2**-quant_conf_layer, 0, 1)).detach().cpu().numpy() active_bytes += ( non_zero * quant_conf_layer / 8 ) # Gives us the number of total bytes needed to store the parameters in the current layer @@ -1289,17 +1200,18 @@ def get_model_weights(self, model): matrices_params_sizes_layerwise (list): A list of sizes of the weight matrices for each layer of interest. layer_names (list): A list of the names of the layers of interest. """ + from pquant.layers import PQConv2d, PQDense # noqa: F811 matrices_params_layerwise = [] layer_names = [] # Iterate through all modules in the model for name, module in model.named_modules(): - if isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d)): + if isinstance(module, (PQDense, PQConv2d)): layer_names.append(name) for name_param, matrix_param in list(module.named_parameters()): # Search for the weights - if name_param.endswith('weight'): + if name_param.endswith('_weight'): matrices_params_layerwise.append(matrix_param) # Set their collect flag to True (later on we can then access them easily like this) matrix_param.collect = True @@ -1325,6 +1237,7 @@ def hook_layers(self, model): Args : model (torch.nn.Module): The model to hook the layers of. """ + from pquant.layers import PQConv2d, PQDense # noqa: F811 def hook_inp(module, inp, outp): """ @@ -1334,7 +1247,7 @@ def hook_inp(module, inp, outp): module.activ_in = inp for _, module in model.named_modules(): - if isinstance(module, (CompressedLayerLinear, CompressedLayerConv2d)): + if isinstance(module, (PQDense, PQConv2d)): # Forward Hook to get inputs into activation function hook = module.register_forward_hook(hook_inp) self.hooks.append(hook) # Store hooks so we can remove them later @@ -1471,21 +1384,26 @@ def get_EF(self, model, data_loader, loss_func, tolerance=1e-3, min_iterations=1 per_batch_layerwise_grad_sum_squared_params = [] per_batch_layerwise_grad_sum_squared_activs = [] # Iterate over mini-batches in the data loader untill we reach the max_iterations or convergence flag is not set + batch_size = None while total_batches < max_iterations and not convergence_flag: for _, data in enumerate(data_loader): model.zero_grad() data_batch, target_batch = data[0].to(self.device), data[1].to(self.device) - batch_size = data_batch.size(0) + if batch_size is None: + batch_size = data_batch.size(0) # Only save once + if data_batch.size(0) != batch_size: + continue # Uneven batches break loop loss = self.get_loss(model, data_batch, target_batch, loss_func, mode='mini-batch') curr_batch_matrices_params_layerwise = [] curr_batch_minmax_range_params_layerwise = [] for weights in model.parameters(): - if weights.collect: - curr_batch_matrices_params_layerwise.append(weights) - curr_batch_minmax_range_params_layerwise.append( - (torch.max(weights.data) - torch.min(weights.data)).detach().cpu().numpy() - ) + if hasattr(weights, "collect"): + if weights.collect: + curr_batch_matrices_params_layerwise.append(weights) + curr_batch_minmax_range_params_layerwise.append( + (torch.max(weights.data) - torch.min(weights.data)).detach().cpu().numpy() + ) per_batch_layerwise_minmax_range_params.append(curr_batch_minmax_range_params_layerwise) diff --git a/src/pquant/core/torch/layers.py b/src/pquant/core/torch/layers.py index 0efcf14..2fa5e43 100644 --- a/src/pquant/core/torch/layers.py +++ b/src/pquant/core/torch/layers.py @@ -82,6 +82,9 @@ def __init__( self.hgq_beta = config.quantization_parameters.hgq_beta self.input_shape = None self.is_pretraining = True + self.post_fitcompress_calibration = False + self.saved_inputs = [] + self.saved_outputs = [] def check_is_built(self, input_shape): if self.built: @@ -192,6 +195,9 @@ def is_fitcompress_pretraining(self): def pre_forward(self, x): self.check_is_built(x.shape) + if self.post_fitcompress_calibration: + self.saved_inputs.append(x) + return x if self.quantize_input: x = self.quantize(x, self.input_quantizer) if self.pruning_method == "wanda": @@ -199,6 +205,9 @@ def pre_forward(self, x): return x def post_forward(self, x): + if self.post_fitcompress_calibration: + self.saved_outputs.append(x) + return x if self.quantize_output: x = self.quantize(x, self.output_quantizer) if self.pruning_method == "activation_pruning": @@ -258,12 +267,16 @@ def ebops(self, include_mask=False): bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._weight)) if include_mask: bw_ker = bw_ker * self.pruning_layer.get_hard_mask() + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = (torch.abs(self._weight) >= quantization_step_size).float() + bw_ker = bw_ker * step_size_mask ebops = ops.sum(F.linear(bw_inp, bw_ker)) - ebops = ebops * self.n_parallel / self.parallelization_factor if self._bias is not None: bw_bias = self.bias_quantizer.get_total_bits(ops.shape(self._bias)) - size = ops.cast(ops.prod(list(self.input_shape)), self._weight.dtype) + size = ops.cast(ops.prod(self.input_shape[:-1]) * self.out_features, self._weight.dtype) ebops += ops.mean(bw_bias) * size + ebops = ebops * self.n_parallel / self.parallelization_factor return ebops @property @@ -371,6 +384,10 @@ def ebops(self, include_mask=False): bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._weight)) if include_mask: bw_ker = bw_ker * self.pruning_layer.get_hard_mask() + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = (torch.abs(self._weight) > quantization_step_size).float() + bw_ker = bw_ker * step_size_mask if self.parallelization_factor < 0: ebops = ops.sum(F.conv2d(bw_inp, bw_ker, stride=self.stride, padding=self.padding, dilation=self.dilation)) else: @@ -498,6 +515,10 @@ def ebops(self, include_mask=False): bw_ker = self.weight_quantizer.get_total_bits(ops.shape(self._weight)) if include_mask: bw_ker = bw_ker * self.pruning_layer.get_hard_mask() + _, _, f = self.get_weight_quantization_bits() + quantization_step_size = 2 ** (-f - 1) + step_size_mask = (torch.abs(self._weight) > quantization_step_size).float() + bw_ker = bw_ker * step_size_mask if self.parallelization_factor < 0: ebops = ops.sum(F.conv1d(bw_inp, bw_ker, stride=self.stride, padding=self.padding, dilation=self.dilation)) else: @@ -638,7 +659,7 @@ def get_input_quantization_bits(self): def get_output_quantization_bits(self): return self.output_quantizer.get_quantization_bits() - def post_pretrain_function(self): + def post_pre_train_function(self): self.is_pretraining = False def ebops(self): @@ -809,6 +830,164 @@ def __init__( self.built = False self.final_compression_done = False self.is_pretraining = True + self.post_fitcompress_calibration = False + self.saved_inputs = [] + + def check_is_built(self, input_shape): + if self.built: + return + self.built = True + self.input_quantizer = Quantizer( + k=torch.tensor(self.k_input), + i=torch.tensor(self.i_input), + f=torch.tensor(self.f_input), + overflow=self.overflow, + round_mode=self.round_mode, + is_heterogeneous=self.use_hgq, + is_data=True, + hgq_gamma=self.hgq_gamma, + ) + self.weight_quantizer = Quantizer( + k=torch.tensor(self.k_weight), + i=torch.tensor(self.i_weight), + f=torch.tensor(self.f_weight), + round_mode=self.round_mode, + overflow=self.overflow, + is_data=False, + is_heterogeneous=self.use_hgq, + ) + self.bias_quantizer = Quantizer( + k=torch.tensor(self.k_bias), + i=torch.tensor(self.i_bias), + f=torch.tensor(self.f_bias), + round_mode=self.round_mode, + overflow=self.overflow, + is_data=False, + is_heterogeneous=self.use_hgq, + ) + if self.use_hgq: + self.input_quantizer.quantizer.build(input_shape) + shape = [1] * len(input_shape) + shape[1] = input_shape[1] + self._shape = tuple(shape) + self.input_shape = (1,) + input_shape[1:] + + def apply_final_compression(self): + self.final_compression_done = True + self._weight.data = self.weight + self._bias.data = self.bias + + def get_input_quantization_bits(self): + return self.input_quantizer.get_quantization_bits() + + def get_weight_quantization_bits(self): + return self.weight_quantizer.get_quantization_bits() + + def get_bias_quantization_bits(self): + return self.bias_quantizer.get_quantization_bits() + + def is_fitcompress_pretraining(self): + return self.is_pretraining and self.use_fitcompress + + @property + def weight(self): + if self.enable_quantization and not self.final_compression_done and not self.is_fitcompress_pretraining(): + return self.weight_quantizer(self._weight) + return self._weight + + @property + def bias(self): + if self.enable_quantization and not self.final_compression_done and not self.is_fitcompress_pretraining(): + return self.bias_quantizer(self._bias) + return self._bias + + def ebops(self): + bw_inp = self.input_quantizer.get_total_bits(self.input_shape) + bw_ker = ops.reshape(self.weight_quantizer.get_total_bits(self.running_mean.shape), self._shape) + bw_bias = ops.reshape(self.bias_quantizer.get_total_bits(self.running_mean.shape), self._shape) + size = ops.cast(ops.prod(list(self.input_shape)), self._weight.dtype) + ebops = ops.sum(bw_inp * bw_ker) + ops.mean(bw_bias) * size + return ebops + + def hgq_loss(self): + if self.is_pretraining or not self.use_hgq: + return ops.convert_to_tensor(0.0) + loss = self.hgq_beta * self.ebops() + loss += self.weight_quantizer.hgq_loss() + loss += self.bias_quantizer.hgq_loss() + if self.quantize_input: + loss += self.input_quantizer.hgq_loss() + return loss + + def post_pre_train_function(self): + self.is_pretraining = False + + def forward(self, input: torch.Tensor) -> torch.Tensor: + self.check_is_built(input.shape) + if self.quantize_input and self.enable_quantization: + if not self.is_fitcompress_pretraining(): + input = self.input_quantizer(input) + else: + if self.post_fitcompress_calibration: + self.saved_inputs.append(input) + return super().forward(input) + + +class PQBatchNorm1d(nn.BatchNorm1d): + + def __init__( + self, + config, + num_features: int, + eps: float = 1e-5, + momentum: typing.Optional[float] = 0.1, + affine: bool = True, + track_running_stats: bool = True, + device=None, + dtype=None, + quantize_input=True, + in_quant_bits: Tuple[T, T, T] = None, + weight_quant_bits: Tuple[T, T, T] = None, + bias_quant_bits: Tuple[T, T, T] = None, + ): + super().__init__(num_features, eps, momentum, affine, track_running_stats, device=device, dtype=dtype) + if in_quant_bits is not None: + self.k_input, self.i_input, self.f_input = in_quant_bits + else: + self.k_input = config.quantization_parameters.default_data_keep_negatives + self.i_input = config.quantization_parameters.default_data_integer_bits + self.f_input = config.quantization_parameters.default_data_fractional_bits + + if weight_quant_bits is not None: + self.k_weight, self.i_weight, self.f_weight = weight_quant_bits + else: + self.k_weight = config.quantization_parameters.default_weight_keep_negatives + self.i_weight = config.quantization_parameters.default_weight_integer_bits + self.f_weight = config.quantization_parameters.default_weight_fractional_bits + if bias_quant_bits is not None: + self.k_bias, self.i_bias, self.f_bias = bias_quant_bits + else: + self.k_bias = config.quantization_parameters.default_weight_keep_negatives + self.i_bias = config.quantization_parameters.default_weight_integer_bits + self.f_bias = config.quantization_parameters.default_weight_fractional_bits + self.overflow = config.quantization_parameters.overflow + self.round_mode = config.quantization_parameters.round_mode + self.use_hgq = config.quantization_parameters.use_high_granularity_quantization + self.hgq_gamma = config.quantization_parameters.hgq_gamma + self.hgq_beta = config.quantization_parameters.hgq_beta + self.enable_quantization = config.quantization_parameters.enable_quantization + self.use_fitcompress = config.fitcompress_parameters.enable_fitcompress + self.config = config + self.quantize_input = quantize_input + self._weight = nn.Parameter(self.weight.clone()) + self._bias = nn.Parameter(self.bias.clone()) + del self._parameters["weight"] + del self._parameters["bias"] + self.built = False + self.final_compression_done = False + self.is_pretraining = True + self.post_fitcompress_calibration = False + self.saved_inputs = [] def check_is_built(self, input_shape): if self.built: @@ -904,6 +1083,9 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: if self.quantize_input and self.enable_quantization: if not self.is_fitcompress_pretraining(): input = self.input_quantizer(input) + else: + if self.post_fitcompress_calibration: + self.saved_inputs.append(input) return super().forward(input) @@ -942,7 +1124,7 @@ def add_layer_specific_quantization_to_model(name, layer, config): quantize = layer_config["output"]["quantize"] layer.quantize_output = quantize - elif layer.__class__ in [PQBatchNorm2d]: + elif layer.__class__ in [PQBatchNorm2d, PQBatchNorm1d]: if name in config.quantization_parameters.layer_specific: layer_config = config.quantization_parameters.layer_specific[name] if "weight" in layer_config: @@ -1090,6 +1272,18 @@ def add_quantized_activations_to_model_layer(module, config, prefix=""): ) new_layer = add_layer_specific_quantization_to_model(full_name, new_layer, config) setattr(module, name, new_layer) + elif layer.__class__ == nn.BatchNorm1d: + new_layer = PQBatchNorm1d( + config, + num_features=layer.num_features, + eps=layer.eps, + momentum=layer.momentum, + affine=layer.affine, + track_running_stats=layer.track_running_stats, + quantize_input=quantize_input, + ) + new_layer = add_layer_specific_quantization_to_model(full_name, new_layer, config) + setattr(module, name, new_layer) else: layer = add_quantized_activations_to_model_layer(layer, config, full_name) return module @@ -1204,7 +1398,7 @@ def add_pruning_to_model(module, config, prefix=""): def apply_final_compression(module): for layer in module.modules(): - if isinstance(layer, (PQWeightBiasBase, PQBatchNorm2d)): + if isinstance(layer, (PQWeightBiasBase, PQBatchNorm2d, PQBatchNorm1d)): layer.apply_final_compression() return module @@ -1254,25 +1448,29 @@ def pre_finetune_functions(model): layer.pruning_layer.pre_finetune_function() -def post_pretrain_functions(model, config, train_loader=None, loss_func=None): +def post_pretrain_functions(model, config, train_loader=None, loss_func=None, input_shape=None): if config.fitcompress_parameters.enable_fitcompress: from pquant.core.torch.fit_compress import call_fitcompress # noqa: 811 - config, pruning_mask_importance_scores = call_fitcompress(config, model, train_loader, loss_func) - - # idx = 0 - for layer in model.modules(): - if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - layer.post_pre_train_function() - - # layer.pruning_layer.mask = pruning_mask_importance_scores[idx] - # idx += 1 - - elif isinstance(layer, (PQActivation, Quantizer)): - layer.post_pre_train_function() - elif isinstance(layer, (PQBatchNorm2d, PQAvgPool1d, PQAvgPool2d)): - layer.post_pretrain_function() + config, pruning_mask_importance_scores = call_fitcompress( + config, model, train_loader, loss_func, input_shape=input_shape + ) + idx = 0 + for layer in model.modules(): + if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): + layer.post_pre_train_function() + # set_data_quantization_bits(model) + layer.pruning_layer.mask.assign(pruning_mask_importance_scores[idx]) + layer.pruning_layer.pre_finetune_function() # So mask is not updated during training anymore + idx += 1 + return + else: + for layer in model.modules(): + if isinstance( + layer, (PQConv2d, PQConv1d, PQDense, PQActivation, PQBatchNorm2d, PQBatchNorm1d, PQAvgPoolBase, Quantizer) + ): + layer.post_pre_train_function() if config.pruning_parameters.pruning_method == "pdp" or ( config.pruning_parameters.pruning_method == "wanda" and config.pruning_parameters.calculate_pruning_budget ): @@ -1339,12 +1537,13 @@ def get_model_losses(model, losses): for layer in model.modules(): loss = 0.0 if isinstance(layer, (PQConv2d, PQConv1d, PQDense)): - if layer.enable_pruning: + + if layer.enable_pruning and not layer.use_fitcompress: loss += layer.pruning_layer.calculate_additional_loss() if layer.use_hgq: loss += layer.hgq_loss() losses += loss - elif isinstance(layer, (PQAvgPool1d, PQAvgPool2d, PQBatchNorm2d, PQActivation)): + elif isinstance(layer, (PQAvgPool1d, PQAvgPool2d, PQBatchNorm2d, PQBatchNorm1d, PQActivation)): if layer.use_hgq: losses += layer.hgq_loss() return losses @@ -1442,7 +1641,7 @@ def get_ebops(model): ebops = 0 for m in model.modules(): if isinstance(m, (PQWeightBiasBase)): - ebops += m.ebops(include_mask=True) - elif isinstance(m, (PQAvgPoolBase, PQBatchNorm2d, PQActivation)): + ebops += m.ebops(include_mask=m.enable_pruning) + elif isinstance(m, (PQAvgPoolBase, PQBatchNorm1d, PQBatchNorm2d, PQActivation)): ebops += m.ebops() return ebops diff --git a/src/pquant/core/torch/quantizer.py b/src/pquant/core/torch/quantizer.py index 8f7e27f..eb530f7 100644 --- a/src/pquant/core/torch/quantizer.py +++ b/src/pquant/core/torch/quantizer.py @@ -34,8 +34,8 @@ def set_quantization_bits(self, i, f): if self.use_hgq: self.quantizer.quantizer._i.assign(self.quantizer.quantizer._i * 0.0 + i) self.quantizer.quantizer._f.assign(self.quantizer.quantizer._f * 0.0 + f) - self.i.data = i - self.f.data = f + self.i.data = torch.tensor(i) + self.f.data = torch.tensor(f) def post_pre_train_function(self): self.is_pretraining = False @@ -50,5 +50,7 @@ def forward(self, x): def hgq_loss(self): if self.is_pretraining or not self.use_hgq: return 0.0 - loss = (torch.sum(self.quantizer.quantizer.i) + torch.sum(self.quantizer.quantizer.f)) * self.hgq_gamma + loss = 0 + for layer_loss in self.quantizer.quantizer.losses: + loss += layer_loss return loss diff --git a/src/pquant/pruning_methods/dst.py b/src/pquant/pruning_methods/dst.py index 45774c0..f7e92bd 100644 --- a/src/pquant/pruning_methods/dst.py +++ b/src/pquant/pruning_methods/dst.py @@ -40,11 +40,12 @@ def __init__(self, config, layer_type, *args, **kwargs): self.config = config self.is_pretraining = True self.layer_type = layer_type + self.is_finetuning = False def build(self, input_shape): self.threshold_size = get_threshold_size(self.config, input_shape) self.threshold = self.add_weight(shape=self.threshold_size, initializer="zeros", trainable=True) - self.mask = self.add_weight(shape=input_shape, initializer="ones") + self.mask = self.add_weight(shape=input_shape, initializer="ones", trainable=False) def call(self, weight): """ @@ -55,6 +56,8 @@ def call(self, weight): """ if self.is_pretraining: return weight + if self.is_finetuning: + return weight * self.mask mask = self.get_mask(weight) ratio = 1.0 - ops.sum(mask) / ops.cast(ops.size(mask), mask.dtype) flag = ratio >= self.config.pruning_parameters.max_pruning_pct @@ -83,10 +86,13 @@ def get_layer_sparsity(self, weight): return ops.sum(self.get_mask(weight)) / ops.size(weight) def calculate_additional_loss(self): - return self.config.pruning_parameters.alpha * ops.sum(ops.exp(-self.threshold)) + if self.is_finetuning: + return 0.0 + loss = self.config.pruning_parameters.alpha * ops.sum(ops.exp(-self.threshold)) + return loss def pre_finetune_function(self): - pass + self.is_finetuning = True def post_epoch_function(self, epoch, total_epochs): pass diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index aae58a0..75dcd5f 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -595,6 +595,7 @@ def test_check_activation(config_pdp, dense_input): assert model.layers[2].activation.__name__ == "tanh" config_pdp.quantization_parameters.enable_quantization = True + config_pdp.quantization_parameters.use_real_tanh = True inputs = keras.Input(shape=dense_input.shape[1:]) out = Dense(OUT_FEATURES, use_bias=False, activation="tanh")(inputs) model = keras.Model(inputs=inputs, outputs=out, name="test_dense") From 89d7eaffc9a477efe8de3e88fe2f48f4790b0ecc Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Mon, 15 Dec 2025 18:39:23 +0100 Subject: [PATCH 34/37] make activation pruning a continuous method instead of a single one-shot --- src/pquant/pruning_methods/activation_pruning.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/pquant/pruning_methods/activation_pruning.py b/src/pquant/pruning_methods/activation_pruning.py index fd7d597..2cd4f2c 100644 --- a/src/pquant/pruning_methods/activation_pruning.py +++ b/src/pquant/pruning_methods/activation_pruning.py @@ -18,7 +18,6 @@ def __init__(self, config, layer_type, *args, **kwargs): self.activations = None self.total = 0.0 self.is_pretraining = True - self.done = False self.threshold = ops.convert_to_tensor(config.pruning_parameters.threshold) self.t_start_collecting_batch = self.config.pruning_parameters.t_start_collecting_batch @@ -37,7 +36,7 @@ def collect_output(self, output, training): linear/convolution layer are over 0. Every t_delta steps, uses these values to update the mask to prune those channels and neurons that are active less than a given threshold """ - if self.done or not training or self.is_pretraining: + if not training or self.is_pretraining: # Don't collect during validation return if self.activations is None: @@ -54,6 +53,7 @@ def collect_output(self, output, training): pct_active = self.activations / self.total self.t = 0 self.total = 0 + self.batches_collected = 0 if self.layer_type == "linear": self.mask = ops.expand_dims(ops.cast((pct_active > self.threshold), pct_active.dtype), 1) else: @@ -65,7 +65,6 @@ def collect_output(self, output, training): else: self.mask = ops.reshape(pct_active_above_threshold, list(pct_active_above_threshold.shape) + [1, 1, 1]) self.activations *= 0.0 - self.done = True def call(self, weight): # Mask is only updated every t_delta step, using collect_output if self.is_pretraining: From b9e13230256cb8673e806924c98ccdfa09d481ff Mon Sep 17 00:00:00 2001 From: Anastasiia Date: Mon, 15 Dec 2025 23:21:07 +0100 Subject: [PATCH 35/37] Updated training config (#19) Took away redundant parameters from training section --- src/pquant/configs/finetuning.yaml | 8 ++++---- src/pquant/core/finetuning.py | 6 ++++-- src/pquant/data_models/finetuning_model.py | 1 + src/pquant/data_models/training_model.py | 14 -------------- 4 files changed, 9 insertions(+), 20 deletions(-) diff --git a/src/pquant/configs/finetuning.yaml b/src/pquant/configs/finetuning.yaml index 3baba94..7ccf9a3 100644 --- a/src/pquant/configs/finetuning.yaml +++ b/src/pquant/configs/finetuning.yaml @@ -17,7 +17,6 @@ training_parameters: optimizer: sgd plot_frequency: 100 label_smoothing: 0 - model: "resnet18" dataset: "cifar10" l2_decay: 0.001 momentum: 0.9 @@ -32,7 +31,7 @@ training_parameters: fine_tuning_epochs: 2 pretraining_epochs: 0 pruning_first: false - rewind: post-ticket-search + rewind: never rounds: 2 save_weights_epoch: 2 fitcompress_parameters: @@ -46,8 +45,9 @@ fitcompress_parameters: approximate : true f_lambda : 0.5 finetuning_parameters: - experiment_name: resnet_18_experiment_3 - num_trials: 50 + experiment_name: resnet_18_experiment_2 + model_name: resnet18 + num_trials: 10 sampler: type: RandomSampler hyperparameter_search: diff --git a/src/pquant/core/finetuning.py b/src/pquant/core/finetuning.py index 5c429b3..681a826 100644 --- a/src/pquant/core/finetuning.py +++ b/src/pquant/core/finetuning.py @@ -10,6 +10,7 @@ import yaml from pydantic import BaseModel, Field, field_validator + from pquant.core import constants from pquant.data_models.finetuning_model import BaseFinetuningModel from pquant.data_models.fitcompress_model import BaseFitCompressModel @@ -294,11 +295,12 @@ def objective(self, trial, model, train_func, valid_func, **kwargs): signature = infer_signature(sample_input.cpu().numpy(), sample_output.detach().cpu().numpy()) mlflow.log_text(yaml.safe_dump(self.get_dict()), "config.yaml") + model_name = self.config.finetuning_parameters.model_name log_model_by_backend( model=trained_model, - name=self.config.training_parameters.model, + name=model_name, signature=signature, - registered_model_name=self.config.training_parameters.model, + registered_model_name=model_name, ) return objectives if len(objectives) > 1 else objectives[0] diff --git a/src/pquant/data_models/finetuning_model.py b/src/pquant/data_models/finetuning_model.py index c2bf1ba..a39ec62 100644 --- a/src/pquant/data_models/finetuning_model.py +++ b/src/pquant/data_models/finetuning_model.py @@ -13,6 +13,7 @@ class Sampler(BaseModel): class BaseFinetuningModel(BaseModel): experiment_name: str = Field(default="experiment_1") + model_name: str = Field(default="example_model") sampler: Sampler = Field(default_factory=Sampler) num_trials: int = Field(default=0) hyperparameter_search: HyperparameterSearch = Field(default_factory=HyperparameterSearch) diff --git a/src/pquant/data_models/training_model.py b/src/pquant/data_models/training_model.py index 3db4e54..78d0c37 100644 --- a/src/pquant/data_models/training_model.py +++ b/src/pquant/data_models/training_model.py @@ -8,20 +8,6 @@ class BaseTrainingModel(BaseModel): epochs: int = Field(default=200) fine_tuning_epochs: int = Field(default=0) pretraining_epochs: int = Field(default=50) - pruning_first: bool = Field(default=False) rewind: str = Field(default="never") rounds: int = Field(default=1) save_weights_epoch: int = Field(default=-1) - batch_size: int = Field(default=128) - optimizer: str = Field(default="sgd") - plot_frequency: int = Field(default=100) - label_smoothing: float = Field(default=0.0) - model: str = Field(default="resnet18") - dataset: str = Field(default="cifar10") - l2_decay: float = Field(default=0.001) - momentum: float = Field(default=0.9) - lr_schedule: Literal["cosine", "step", "none"] = Field(default="cosine") - cosine_tmax: int = Field(default=200) - lr: float = Field(default=0.001) - prune_ratio: float = Field(default=10.0) - default_integer_bits: int = Field(default=0) From 42df07304ee41205dfe079738038338635424875 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Fri, 19 Dec 2025 14:31:01 +0100 Subject: [PATCH 36/37] fitcompress pruning method --- src/pquant/configs/config_ap.yaml | 5 +- src/pquant/configs/config_autosparse.yaml | 3 +- src/pquant/configs/config_cs.yaml | 5 +- src/pquant/configs/config_dst.yaml | 3 +- src/pquant/configs/config_fitcompress.yaml | 57 ++++++++++++++++++++++ src/pquant/configs/config_mdmm.yaml | 3 +- src/pquant/configs/config_pdp.yaml | 12 ++--- src/pquant/configs/config_wanda.yaml | 3 +- src/pquant/core/constants.py | 3 ++ src/pquant/core/finetuning.py | 10 +++- src/pquant/core/torch/fit_compress.py | 19 ++++++-- src/pquant/core/torch/layers.py | 2 +- src/pquant/core/torch/train.py | 4 +- src/pquant/core/utils.py | 4 ++ src/pquant/data_models/finetuning_model.py | 6 ++- src/pquant/data_models/pruning_model.py | 12 +++-- src/pquant/pruning_methods/autosparse.py | 5 +- src/pquant/pruning_methods/fitcompress.py | 52 ++++++++++++++++++++ 18 files changed, 174 insertions(+), 34 deletions(-) create mode 100644 src/pquant/configs/config_fitcompress.yaml create mode 100644 src/pquant/pruning_methods/fitcompress.py diff --git a/src/pquant/configs/config_ap.yaml b/src/pquant/configs/config_ap.yaml index fd46147..a03cc73 100644 --- a/src/pquant/configs/config_ap.yaml +++ b/src/pquant/configs/config_ap.yaml @@ -1,9 +1,8 @@ pruning_parameters: - disable_pruning_for_layers: # Disable pruning for these layers, even if enable_pruning is true - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true enable_pruning: true pruning_method: activation_pruning - threshold: 0.15 + threshold: 0.2 threshold_decay: 0. t_delta: 100 t_start_collecting_batch: 100 diff --git a/src/pquant/configs/config_autosparse.yaml b/src/pquant/configs/config_autosparse.yaml index 6f66bf1..57ad178 100644 --- a/src/pquant/configs/config_autosparse.yaml +++ b/src/pquant/configs/config_autosparse.yaml @@ -3,8 +3,7 @@ pruning_parameters: alpha_reset_epoch: 90 autotune_epochs: 10 backward_sparsity: false - disable_pruning_for_layers: # Disable pruning for these layers, even if enable_pruning is true - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true enable_pruning: true pruning_method: autosparse threshold_decay: 0 diff --git a/src/pquant/configs/config_cs.yaml b/src/pquant/configs/config_cs.yaml index 65b085d..e36c4d4 100644 --- a/src/pquant/configs/config_cs.yaml +++ b/src/pquant/configs/config_cs.yaml @@ -1,11 +1,10 @@ pruning_parameters: - disable_pruning_for_layers: # Disable pruning for these layers, even if enable_pruning is true - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true enable_pruning: true final_temp: 200 pruning_method: cs threshold_decay: 1.0e-09 - threshold_init: 0 + threshold_init: 0. quantization_parameters: default_weight_keep_negatives: 1. default_weight_integer_bits: 0. diff --git a/src/pquant/configs/config_dst.yaml b/src/pquant/configs/config_dst.yaml index 074c64f..060ab6c 100644 --- a/src/pquant/configs/config_dst.yaml +++ b/src/pquant/configs/config_dst.yaml @@ -1,7 +1,6 @@ pruning_parameters: alpha: 5.0e-06 - disable_pruning_for_layers: # Disable pruning for these layers, even if enable_pruning is true - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true enable_pruning: true max_pruning_pct: 0.99 pruning_method: dst diff --git a/src/pquant/configs/config_fitcompress.yaml b/src/pquant/configs/config_fitcompress.yaml new file mode 100644 index 0000000..fd9489c --- /dev/null +++ b/src/pquant/configs/config_fitcompress.yaml @@ -0,0 +1,57 @@ +pruning_parameters: + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true + enable_pruning: true + threshold_decay: 0. + pruning_method: fitcompress + min_frac_bits: 2. +quantization_parameters: + default_weight_keep_negatives: 1. + default_weight_integer_bits: 0. + default_weight_fractional_bits: 7. + default_data_keep_negatives: 0. + default_data_integer_bits: 0. + default_data_fractional_bits: 7. + quantize_input: true + quantize_output: false + enable_quantization: true + hgq_beta: 1e-5 + hgq_gamma: 0.0003 + hgq_heterogeneous: True + layer_specific: [] + use_high_granularity_quantization: false + use_real_tanh: false + use_relu_multiplier: true + use_symmetric_quantization: false + overflow: SAT + round_mode: RND +fitcompress_parameters: + enable_fitcompress : true + optimize_quantization : true + quantization_schedule : [7., 6., 5., 4.,3.] + pruning_schedule : {start : 0, end : -2, steps : 40} + compression_goal : 0.007 + optimize_pruning : true + greedy_astar : true + approximate : true + f_lambda : 1 +training_parameters: + epochs: 200 + fine_tuning_epochs: 0 + pretraining_epochs: 100 + pruning_first: false + rewind: never + rounds: 1 + save_weights_epoch: -1 +batch_size: 256 +cosine_tmax: 200 +gamma: 0.1 +l2_decay: 0.0001 +label_smoothing: 0.0 +lr: 0.1 +lr_schedule: cosine +milestones: +- -1 +- -1 +momentum: 0.9 +optimizer: adam +plot_frequency: 100 diff --git a/src/pquant/configs/config_mdmm.yaml b/src/pquant/configs/config_mdmm.yaml index 4a79da6..2ed0ac6 100644 --- a/src/pquant/configs/config_mdmm.yaml +++ b/src/pquant/configs/config_mdmm.yaml @@ -3,8 +3,7 @@ pruning_parameters: pruning_method: mdmm enable_pruning: true - disable_pruning_for_layers: - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true constraint_type: "Equality" target_value: 0.0 metric_type: "UnstructuredSparsity" diff --git a/src/pquant/configs/config_pdp.yaml b/src/pquant/configs/config_pdp.yaml index 305bbbf..197d60a 100644 --- a/src/pquant/configs/config_pdp.yaml +++ b/src/pquant/configs/config_pdp.yaml @@ -1,6 +1,5 @@ pruning_parameters: - disable_pruning_for_layers: # Disable pruning for these layers, even if enable_pruning is true - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true enable_pruning: true epsilon: 0.015 pruning_method: pdp @@ -24,13 +23,14 @@ quantization_parameters: layer_specific: [] use_high_granularity_quantization: false use_real_tanh: false - use_relu_multiplier: true - use_symmetric_quantization: false + use_relu_multiplier: false + overflow: SAT + round_mode: RND fitcompress_parameters: enable_fitcompress : false optimize_quantization : true quantization_schedule : [7.,4.,3.,2.,1.] - pruning_schedule : {start : 0, end : -3, steps : 40} + pruning_schedule : {start : 0, end : -2, steps : 40} compression_goal : 0.10 optimize_pruning : true greedy_astar : true @@ -39,7 +39,7 @@ fitcompress_parameters: training_parameters: epochs: 100 fine_tuning_epochs: 20 - pretraining_epochs: 20 + pretraining_epochs: 1 pruning_first: false rewind: never rounds: 1 diff --git a/src/pquant/configs/config_wanda.yaml b/src/pquant/configs/config_wanda.yaml index 46273ac..4755ed2 100644 --- a/src/pquant/configs/config_wanda.yaml +++ b/src/pquant/configs/config_wanda.yaml @@ -1,7 +1,6 @@ pruning_parameters: calculate_pruning_budget: True - disable_pruning_for_layers: # Disable pruning for these layers, even if enable_pruning is true - - + disable_pruning_for_layers: [] # Disable pruning for these layers, even if enable_pruning is true enable_pruning: true M: null N: null diff --git a/src/pquant/core/constants.py b/src/pquant/core/constants.py index 7f23200..7fa5482 100644 --- a/src/pquant/core/constants.py +++ b/src/pquant/core/constants.py @@ -5,6 +5,7 @@ AutoSparsePruningModel, CSPruningModel, DSTPruningModel, + FITCompressPruningModel, MDMMPruningModel, PDPPruningModel, WandaPruningModel, @@ -22,6 +23,7 @@ PRUNING_MODEL_REGISTRY = { "cs": CSPruningModel, "dst": DSTPruningModel, + "fitcompress": FITCompressPruningModel, "pdp": PDPPruningModel, "wanda": WandaPruningModel, "autosparse": AutoSparsePruningModel, @@ -44,6 +46,7 @@ try: import mlflow + LOG_FUNCTIONS_REGISTRY = { "torch": mlflow.pytorch.log_model, "tensorflow": mlflow.tensorflow.log_model, diff --git a/src/pquant/core/finetuning.py b/src/pquant/core/finetuning.py index 681a826..4d4fb94 100644 --- a/src/pquant/core/finetuning.py +++ b/src/pquant/core/finetuning.py @@ -10,7 +10,6 @@ import yaml from pydantic import BaseModel, Field, field_validator - from pquant.core import constants from pquant.data_models.finetuning_model import BaseFinetuningModel from pquant.data_models.fitcompress_model import BaseFitCompressModel @@ -20,6 +19,7 @@ BasePruningModel, CSPruningModel, DSTPruningModel, + FITCompressPruningModel, MDMMPruningModel, PDPPruningModel, WandaPruningModel, @@ -70,6 +70,7 @@ class TuningConfig(BaseModel): Union[ CSPruningModel, DSTPruningModel, + FITCompressPruningModel, PDPPruningModel, WandaPruningModel, AutoSparsePruningModel, @@ -368,6 +369,13 @@ def dst_config(): return TuningConfig.load_from_file(path) +def fitcompress_config(): + yaml_name = "config_fitcompress.yaml" + parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + path = os.path.join(parent, "configs", yaml_name) + return TuningConfig.load_from_file(path) + + def mdmm_config(): yaml_name = "config_mdmm.yaml" parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) diff --git a/src/pquant/core/torch/fit_compress.py b/src/pquant/core/torch/fit_compress.py index 47e8342..9794931 100644 --- a/src/pquant/core/torch/fit_compress.py +++ b/src/pquant/core/torch/fit_compress.py @@ -485,10 +485,11 @@ def add_quantization(self, model, params, quant_config, reset=False): max_abs = torch.max(torch.abs(param_layer.detach().cpu())) eps = 1e-12 int_bits = max(0, math.ceil(math.log2(max_abs + eps))) - fractional_bits = quant_config[idx] - int_bits + fractional_bits = max(1, quant_config[idx] - int_bits) all_int_bits.append(int_bits) all_frac_bits.append(fractional_bits) + logging.info(f"Weights int bits={int_bits}, fractional bits={fractional_bits}") for idx, param_layer in enumerate(params): # If reset is inactive, we quantize weights given the unquantized, but possibly pruned weights @@ -674,17 +675,20 @@ def set_activation_bits(self, layer): k, i, f = layer.get_input_quantization_bits() bits = k + i + f int_bits = math.ceil(math.log2(max_abs)) - frac_bits = bits - int_bits - k + frac_bits = max(self.config.pruning_parameters.min_frac_bits, bits - int_bits - k) layer.saved_inputs = [] layer.input_quantizer.set_quantization_bits(int_bits, frac_bits) + logging.info(f"Set input quantization bits from {i}, {f} to {int_bits}, {frac_bits}") + if layer.quantize_output: max_abs = torch.max(torch.tensor([torch.max(torch.abs(e)) for e in layer.saved_outputs])) k, i, f = layer.get_output_quantization_bits() bits = k + i + f int_bits = math.ceil(math.log2(max_abs)) - frac_bits = bits - int_bits - k + frac_bits = max(self.config.pruning_parameters.min_frac_bits, bits - int_bits - k) layer.saved_outputs = [] layer.output_quantizer.set_quantization_bits(int_bits, frac_bits) + logging.info(f"Set output quantization bits from {i}, {f} to {int_bits}, {frac_bits}") def post_fitcompress_calibration(self, best_node_quant_config, config, calibration_epochs=50): """ @@ -1145,7 +1149,14 @@ def calculate_current_compression_rate(self, params_layerwise, quant_config): uncompressed = 0.0 for params_layer, quant_conf_layer in zip(params_layerwise, quant_config): # Count which parameters are non-zero, non_zero is simply the number of non-zero parameters in the current layer - non_zero = torch.sum(torch.where(torch.abs(params_layer) < 2**-quant_conf_layer, 0, 1)).detach().cpu().numpy() + int_bits = max(0, math.ceil(math.log2(torch.max(torch.abs(params_layer))))) + frac_bits_round_threshold = max( + 1, quant_conf_layer - int_bits - 2 + ) # - sign bit and rounding causes the second one + non_zero = ( + torch.sum(torch.where(torch.abs(params_layer) < 2**-frac_bits_round_threshold, 0, 1)).detach().cpu().numpy() + ) + logging.info(f"With {quant_conf_layer}, fractional bits is {frac_bits_round_threshold}") active_bytes += ( non_zero * quant_conf_layer / 8 ) # Gives us the number of total bytes needed to store the parameters in the current layer diff --git a/src/pquant/core/torch/layers.py b/src/pquant/core/torch/layers.py index 2fa5e43..6e67655 100644 --- a/src/pquant/core/torch/layers.py +++ b/src/pquant/core/torch/layers.py @@ -1408,7 +1408,7 @@ def call_post_round_functions(model, rewind, rounds, r): rewind_weights_functions(model) elif rewind == "post-ticket-search" and r == rounds - 1: rewind_weights_functions(model) - else: + elif r != rounds - 1: post_round_functions(model) diff --git a/src/pquant/core/torch/train.py b/src/pquant/core/torch/train.py index 71aae0c..a4e940a 100644 --- a/src/pquant/core/torch/train.py +++ b/src/pquant/core/torch/train.py @@ -10,7 +10,7 @@ ) -def train_model(model, config, train_func, valid_func, **kwargs): +def train_model(model, config, train_func, valid_func, input_shape=None, **kwargs): """ Generic training loop, user provides training and validation functions """ @@ -25,7 +25,7 @@ def train_model(model, config, train_func, valid_func, **kwargs): valid_func(model, epoch=epoch, **kwargs) post_epoch_functions(model, e, training_config.pretraining_epochs) epoch += 1 - post_pretrain_functions(model, config, kwargs['trainloader'], kwargs['loss_func']) + post_pretrain_functions(model, config, kwargs['trainloader'], kwargs['loss_function'], input_shape=input_shape) for r in range(training_config.rounds): for e in range(training_config.epochs): model.train() diff --git a/src/pquant/core/utils.py b/src/pquant/core/utils.py index 29852eb..fe9e575 100644 --- a/src/pquant/core/utils.py +++ b/src/pquant/core/utils.py @@ -6,6 +6,7 @@ from pquant.pruning_methods.autosparse import AutoSparse from pquant.pruning_methods.cs import ContinuousSparsification from pquant.pruning_methods.dst import DST +from pquant.pruning_methods.fitcompress import FITCompress from pquant.pruning_methods.mdmm import MDMM from pquant.pruning_methods.pdp import PDP from pquant.pruning_methods.wanda import Wanda @@ -27,6 +28,8 @@ def get_pruning_layer(config, layer_type): return Wanda(config, layer_type) elif pruning_method == "mdmm": return MDMM(config, layer_type) + elif pruning_method == "fitcompress": + return FITCompress(config) def get_default_config(pruning_method: str): @@ -35,6 +38,7 @@ def get_default_config(pruning_method: str): "ap", "cs", "dst", + "fitcompress", "pdp", "wanda", "mdmm", diff --git a/src/pquant/data_models/finetuning_model.py b/src/pquant/data_models/finetuning_model.py index a39ec62..c51be21 100644 --- a/src/pquant/data_models/finetuning_model.py +++ b/src/pquant/data_models/finetuning_model.py @@ -1,11 +1,13 @@ -from typing import Dict, List, Optional, Union, Any +from typing import Any, Dict, List, Optional, Union + from pydantic import BaseModel, Field -from typing_extensions import Literal + class HyperparameterSearch(BaseModel): numerical: Dict[str, List[Union[int, float]]] = Field(default_factory=dict) categorical: Optional[Dict[str, List[str]]] = Field(default_factory=dict) + class Sampler(BaseModel): type: str = Field(default="TPESampler") params: Dict[str, Any] = Field(default_factory=dict) diff --git a/src/pquant/data_models/pruning_model.py b/src/pquant/data_models/pruning_model.py index 7acfacb..3c89828 100644 --- a/src/pquant/data_models/pruning_model.py +++ b/src/pquant/data_models/pruning_model.py @@ -1,5 +1,6 @@ from enum import Enum from typing import List, Literal, Optional + from pydantic import BaseModel, Field @@ -12,7 +13,7 @@ class BasePruningModel(BaseModel): class CSPruningModel(BasePruningModel): pruning_method: Literal["cs"] = "cs" final_temp: int = Field(default=200) - threshold_init: int = Field(default=0) + threshold_init: float = Field(default=0) class DSTPruningModel(BasePruningModel): @@ -23,6 +24,11 @@ class DSTPruningModel(BasePruningModel): threshold_type: str = Field(default="channelwise") +class FITCompressPruningModel(BasePruningModel): + pruning_method: Literal["fitcompress"] = "fitcompress" + min_frac_bit: float = Field(default=2.0) + + class PDPPruningModel(BasePruningModel): pruning_method: Literal["pdp"] = "pdp" epsilon: float = Field(default=0.015) @@ -33,8 +39,8 @@ class PDPPruningModel(BasePruningModel): class WandaPruningModel(BasePruningModel): pruning_method: Literal["wanda"] = "wanda" - M: Optional[int] = Field(default=None), - N: Optional[int] = Field(default=None), + M: Optional[int] = (Field(default=None),) + N: Optional[int] = (Field(default=None),) sparsity: float = Field(default=0.9) t_delta: int = Field(default=100) t_start_collecting_batch: int = Field(default=100) diff --git a/src/pquant/pruning_methods/autosparse.py b/src/pquant/pruning_methods/autosparse.py index a3e33e3..a8b7cca 100644 --- a/src/pquant/pruning_methods/autosparse.py +++ b/src/pquant/pruning_methods/autosparse.py @@ -66,6 +66,7 @@ def __init__(self, config, layer_type, *args, **kwargs): global BACKWARD_SPARSITY BACKWARD_SPARSITY = config.pruning_parameters.backward_sparsity self.is_pretraining = True + self.is_finetuning = False def build(self, input_shape): self.threshold_size = get_threshold_size(self.config, input_shape) @@ -85,6 +86,8 @@ def call(self, weight): """ if self.is_pretraining: return weight + if self.is_finetuning: + return self.mask * weight else: mask = self.get_mask(weight) self.mask = ops.reshape(mask, weight.shape) @@ -110,7 +113,7 @@ def calculate_additional_loss(*args, **kwargs): return 0 def pre_finetune_function(self): - pass + self.is_finetuning = True def post_round_function(self): pass diff --git a/src/pquant/pruning_methods/fitcompress.py b/src/pquant/pruning_methods/fitcompress.py new file mode 100644 index 0000000..07fa30a --- /dev/null +++ b/src/pquant/pruning_methods/fitcompress.py @@ -0,0 +1,52 @@ +import keras + + +@keras.saving.register_keras_serializable(package="Layers") +class FITCompress(keras.layers.Layer): + def __init__(self, config, *args, **kwargs): + super().__init__(*args, **kwargs) + if isinstance(config, dict): + from pquant.core.finetuning import TuningConfig + + config = TuningConfig.load_from_config(config) + self.config = config + self.is_pretraining = True + self.is_finetuning = False + + def build(self, input_shape): + self.mask = self.add_weight(shape=input_shape, initializer="ones", trainable=False) + super().build(input_shape) + + def call(self, weight): + return self.mask * weight + + def get_hard_mask(self, weight=None): + return self.mask + + def pre_epoch_function(self, epoch, total_epochs): + pass + + def calculate_additional_loss(*args, **kwargs): + return 0 + + def pre_finetune_function(self): + self.is_finetuning = True + + def post_round_function(self): + pass + + def post_pre_train_function(self): + self.is_pretraining = False + + def post_epoch_function(self, epoch, total_epochs): + pass + + def get_config(self): + config = super().get_config() + + config.update( + { + "config": self.config.get_dict(), + } + ) + return config From 0b167018b42c610046cdfb06f163973be9471b84 Mon Sep 17 00:00:00 2001 From: Roope Niemi Date: Fri, 19 Dec 2025 16:20:05 +0100 Subject: [PATCH 37/37] optional dependencies, fix depthwise bug --- pyproject.toml | 6 +++++- src/pquant/core/keras/layers.py | 11 ++++++----- tests/test_keras_compression_layers.py | 7 +++++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 33c6a9b..4dd443f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,11 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dynamic = [ "version" ] -dependencies = [ "keras>=3", "pyyaml>=6.0.1", "quantizers>=1.1", "torch>=2.1", "pydantic>=2.0"] +dependencies = [ "hgq2", "keras>=3", "optuna", "pydantic>=2", "pyyaml>=6.0.1", "quantizers>=1.1" ] +optional-dependencies.all = [ "pytest>=8.4", "tensorflow>=2.17,<=2.20", "torch>=2.1" ] +optional-dependencies.tensorflow = [ "tensorflow>=2.17,<=2.20" ] +optional-dependencies.test = [ "pytest>=8.4" ] +optional-dependencies.torch = [ "torch>=2.1" ] urls.repository = "https://github.com/nroope/PQuant" [tool.setuptools] diff --git a/src/pquant/core/keras/layers.py b/src/pquant/core/keras/layers.py index af98a1a..b615cbe 100644 --- a/src/pquant/core/keras/layers.py +++ b/src/pquant/core/keras/layers.py @@ -16,6 +16,7 @@ ReLU, SeparableConv2D, ) +from keras.src.layers.input_spec import InputSpec from keras.src.ops.operation_utils import compute_pooling_output_shape from pquant.core.keras.activations import PQActivation @@ -276,10 +277,8 @@ def __init__( self.use_bias = use_bias self.strides = strides self.dilation_rate = dilation_rate - # self.weight_transpose = (2, 3, 0, 1) - # self.weight_transpose_back = (2, 3, 1, 0) - self.weight_transpose = (3, 2, 0, 1) - self.weight_transpose_back = (2, 3, 1, 0) + self.weight_transpose = (2, 3, 0, 1) + self.weight_transpose_back = (2, 3, 0, 1) self.data_transpose = (0, 3, 1, 2) self.do_transpose_data = self.data_format == "channels_last" self._weight = None @@ -288,10 +287,12 @@ def __init__( def build(self, input_shape): super().build(input_shape) if self.data_format == "channels_last": + channel_axis = -1 input_channel = input_shape[-1] else: + channel_axis = 1 input_channel = input_shape[1] - + self.input_spec = InputSpec(min_ndim=self.rank + 2, axes={channel_axis: input_channel}) depthwise_shape = self.kernel_size + ( input_channel, self.depth_multiplier, diff --git a/tests/test_keras_compression_layers.py b/tests/test_keras_compression_layers.py index 75dcd5f..87dfa57 100644 --- a/tests/test_keras_compression_layers.py +++ b/tests/test_keras_compression_layers.py @@ -735,15 +735,18 @@ def test_ap_conv1d_channels_last_transpose(config_ap, conv1d_input): def test_ap_depthwiseconv2d_channels_last_transpose(config_ap, conv2d_input): + if keras.backend.image_data_format() == "channels_last": + conv2d_input = ops.transpose(conv2d_input, (0, 3, 1, 2)) keras.backend.set_image_data_format("channels_first") inp = ops.reshape(ops.linspace(0, 1, ops.size(conv2d_input)), conv2d_input.shape) inputs = keras.Input(shape=inp.shape[1:]) - out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same")(inputs) + out = DepthwiseConv2D(KERNEL_SIZE, use_bias=False, padding="same", data_format="channels_first")(inputs) model_cf = keras.Model(inputs=inputs, outputs=out, name="test_dwconv2d") model_cf = add_compression_layers(model_cf, config_ap, inp.shape) - weight_cf = model_cf.layers[1]._kernel + weight_cf = model_cf.layers[1]._kernel + model_cf.summary() post_pretrain_functions(model_cf, config_ap) model_cf(inp, training=True) model_cf(inp, training=True)