From 186fc4fc73907878f6e4adb8b6a54451906f25c0 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:14:43 -0800 Subject: [PATCH 01/14] code_samples > use_cases & updated notebooks/README.md --- notebooks/README.md | 6 +++--- .../model_validation/xgb_model_champion.pkl | Bin 117919 -> 0 bytes .../agents/agentic_ai_template.yaml | 0 .../agents/banking_test_dataset.py | 0 .../agents/banking_tools.py | 4 ++-- .../agents/document_agentic_ai.ipynb | 0 .../agents/utils.py | 0 .../capital_markets_template.yaml | 0 .../quickstart_option_pricing_models.ipynb | 0 ...kstart_option_pricing_models_quantlib.ipynb | 0 .../customer_churn_full_suite.py | 6 +++--- ...del_source_code_documentation_template.yaml | 0 .../quickstart_code_explainer_demo.ipynb | 0 .../credit_risk/CreditRiskData.xlsx | Bin .../application_scorecard_executive.ipynb | 0 .../application_scorecard_full_suite.ipynb | 0 .../application_scorecard_with_bias.ipynb | 0 .../application_scorecard_with_ml.ipynb | 0 .../ScoreBandDiscriminationMetrics.py | 0 .../document_excel_application_scorecard.ipynb | 0 .../custom_tests/implement_custom_tests.ipynb | 0 .../integrate_external_test_providers.ipynb | 0 .../validate_application_scorecard.ipynb | 0 .../nlp_and_llm/datasets/bbc_text_cls.csv | 0 .../datasets/bbc_text_cls_reference.csv | 0 .../cnn_dailymail_100_with_predictions.csv | 0 .../cnn_dailymail_500_with_predictions.csv | 0 .../nlp_and_llm/datasets/sentiments.csv | 0 .../datasets/sentiments_with_predictions.csv | 0 .../foundation_models_integration_demo.ipynb | 0 .../foundation_models_summarization_demo.ipynb | 0 .../nlp_and_llm/gen_ai_rag_template.yaml | 0 .../hugging_face_integration_demo.ipynb | 0 .../hugging_face_summarization_demo.ipynb | 0 .../nlp_and_llm/llm_summarization_demo.ipynb | 0 .../nlp_and_llm/prompt_validation_demo.ipynb | 0 .../nlp_and_llm/rag_benchmark_demo.ipynb | 0 .../nlp_and_llm/rag_documentation_demo.ipynb | 0 ...lication_scorecard_ongoing_monitoring.ipynb | 0 ...art_customer_churn_ongoing_monitoring.ipynb | 0 .../ongoing_monitoring/xgboost_model.model | Bin .../quickstart_regression_full_suite.ipynb | 0 .../quickstart_time_series_full_suite.ipynb | 0 .../quickstart_time_series_high_code.ipynb | 0 44 files changed, 8 insertions(+), 8 deletions(-) delete mode 100644 notebooks/code_samples/model_validation/xgb_model_champion.pkl rename notebooks/{code_samples => use_cases}/agents/agentic_ai_template.yaml (100%) rename notebooks/{code_samples => use_cases}/agents/banking_test_dataset.py (100%) rename notebooks/{code_samples => use_cases}/agents/banking_tools.py (99%) rename notebooks/{code_samples => use_cases}/agents/document_agentic_ai.ipynb (100%) rename notebooks/{code_samples => use_cases}/agents/utils.py (100%) rename notebooks/{code_samples => use_cases}/capital_markets/capital_markets_template.yaml (100%) rename notebooks/{code_samples => use_cases}/capital_markets/quickstart_option_pricing_models.ipynb (100%) rename notebooks/{code_samples => use_cases}/capital_markets/quickstart_option_pricing_models_quantlib.ipynb (100%) rename notebooks/{code_samples => use_cases}/code_explainer/customer_churn_full_suite.py (98%) rename notebooks/{code_samples => use_cases}/code_explainer/model_source_code_documentation_template.yaml (100%) rename notebooks/{code_samples => use_cases}/code_explainer/quickstart_code_explainer_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/credit_risk/CreditRiskData.xlsx (100%) rename notebooks/{code_samples => use_cases}/credit_risk/application_scorecard_executive.ipynb (100%) rename notebooks/{code_samples => use_cases}/credit_risk/application_scorecard_full_suite.ipynb (100%) rename notebooks/{code_samples => use_cases}/credit_risk/application_scorecard_with_bias.ipynb (100%) rename notebooks/{code_samples => use_cases}/credit_risk/application_scorecard_with_ml.ipynb (100%) rename notebooks/{code_samples => use_cases}/credit_risk/custom_tests/ScoreBandDiscriminationMetrics.py (100%) rename notebooks/{code_samples => use_cases}/credit_risk/document_excel_application_scorecard.ipynb (100%) rename notebooks/{code_samples => use_cases}/custom_tests/implement_custom_tests.ipynb (100%) rename notebooks/{code_samples => use_cases}/custom_tests/integrate_external_test_providers.ipynb (100%) rename notebooks/{code_samples => use_cases}/model_validation/validate_application_scorecard.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/datasets/bbc_text_cls.csv (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/datasets/bbc_text_cls_reference.csv (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/datasets/cnn_dailymail_100_with_predictions.csv (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/datasets/cnn_dailymail_500_with_predictions.csv (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/datasets/sentiments.csv (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/datasets/sentiments_with_predictions.csv (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/foundation_models_integration_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/foundation_models_summarization_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/gen_ai_rag_template.yaml (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/hugging_face_integration_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/hugging_face_summarization_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/llm_summarization_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/prompt_validation_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/rag_benchmark_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/nlp_and_llm/rag_documentation_demo.ipynb (100%) rename notebooks/{code_samples => use_cases}/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb (100%) rename notebooks/{code_samples => use_cases}/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb (100%) rename notebooks/{code_samples => use_cases}/ongoing_monitoring/xgboost_model.model (100%) rename notebooks/{code_samples => use_cases}/regression/quickstart_regression_full_suite.ipynb (100%) rename notebooks/{code_samples => use_cases}/time_series/quickstart_time_series_full_suite.ipynb (100%) rename notebooks/{code_samples => use_cases}/time_series/quickstart_time_series_high_code.ipynb (100%) diff --git a/notebooks/README.md b/notebooks/README.md index a641f3e27..f84c2cfb9 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -5,9 +5,9 @@ Our [Jupyter Notebook](https://jupyter.org/) code samples are designed to showca Sample notebooks are organized into the following folders: * `notebooks/quickstart` — Quick guides to get you started with ValidMind -* `notebooks/tutorials` — Get step-by-step instructions and learn about ValidMind concepts in depth -* `notebooks/how_to` — Learn how to use specific ValidMind features, for example how to list all test suites -* `notebooks/code_samples` — Showcase end-to-end functionality for documenting or validating models +* `notebooks/tutorials` — High-level courses covering usage of ValidMind for specific roles or concepts, such as model development +* `notebooks/how_to` — Learn how to use specific ValidMind features, for example how to run tests +* `notebooks/use_cases` — Demo end-to-end use cases for ValidMind, such as documenting or validating specific kinds of models * `notebooks/code_sharing` — Share your own notebooks or document code internally ## Getting started diff --git a/notebooks/code_samples/model_validation/xgb_model_champion.pkl b/notebooks/code_samples/model_validation/xgb_model_champion.pkl deleted file mode 100644 index bfe7349b673d72f09dc01d5d1640bd3d62a5aa07..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 117919 zcmeFa2VB)k`u|U{fS^jcN*qAPISv~h=@@5rHk%X|=_=xgs zbGIffyqbG?d3!haXxYr&-P_GeE^eF<7CAUFF<8@j`2qH$9D->}k1#5<&<$Q4xW9}$w66dw^B6A~Sf(5s%DZd_hjwUC&Yq>!lK$e6I; zVX=SfmDSLQcv@SS=2U_c5@|IFzbjBJB07$CEM`FBz~7~tM8?q4QBlzmF^Rv+FrWhy z{ks(Nfw9pM!Lh^Wz$OfgjQd@lQCw1dTx>$b?-Gq;<04{$L&ARF)vAfHw88%)**KDR zfL@BO|^B>P6~DR*0+^St+t|q(P)%WR*yx zNaM(=k<}tiB26RBBF!T$A}u4UN7jh6inK0oRMY3knEtVUtInEM_fP#NS5{He`|m0$ z9{}Uf5SW+|kpl)MCiJQwUjAp5A`SjhL2a|jpB=Z?UftI|MtWvy!xT>&uk`$QTpfLX zo4U!3TWZsGyQOg-KNBW*4oh0xV@sz!RYu;hRwsG&R{N{EsD1jLQde5sP+jXxMJaIF z0BKgq8Fug6bcrcx%62;v$W9dPl)c*>V7ZohO2rCGWk5v><+=?=)#;=AsH4lUG~4m4 zx{dDvHCH`FZM)~2+M~o?+CJ;By570D(m8_->WUXzk-gK$NNY<+lA|vorK(kTDh=x$ zlddc@Bu`x%OD^Z$u&a(VWqK66V2eJCRC-?Ts+@K88MC*nGrM`Bcm%StGuY>GwbQeh6zORkFBE zmK$#-GhM@r77QX%wM8G9K9Ra?yBnvO{QM76o%_$pcHK&3+$k5ujn`{Q@r*P@VV$?W zPv3814V;@0=MGPqY5mVI!|YqJ)wrF^wIjJ~7UrEe%JDPQnYur|4by0iC$!QWcdJ9g+2y{4UCKmkB^A?ehi_rx)gm% zMM`CgA;pMNm106MqgYU?Q>-X8DYYrKl)4mqNS(v;#(X+~*IX-V;> z_)-{(lEQtXfyS63Qd&{kP})%fC>zhM z;y9c&dIN48H=JVz=%6=?Qu#$MQuzpTOWbf$CVy9q<8#dV@(V8_G&)>I+4jB;G6>f} zcRFt5zRl3#7Dd^=X0kyP% zIRk98fN2BjXsJtMz)p(;je&Yv>eCo#pv95KfRh$y8UqcrxX>7I)zXN@Kw~XUXbd#f z;znb@U5f{efo57fX$&;i(t^f7OD$eB2E4WS&=>$S3@EgKNd{Ozo)FdI;c@JTw-1;D zaNXfr!}W!03fB>?9b7NCMsQu=TEO_jn8P^3*xFHG9AWHWykLxATwpBV{NbG8JmK8n zeBd0Q-_T#^BckwN!f7l}{3xv{Z7IxV zM2VurP~s@@ltju<%5cg^N;0(jGX@w~2&M^fQFI9%78?^D3Cr@|-$K}R`n{wMoS^J6 zp*@?uWuuJJY4S*EZ65~!w!cR%L;BjiC+?#LvzEc4s+Vyb%dV^oX%I8pzI4&#(|x7h z&{M2_YP2$0oT?~kGJttFs6>-*AJmNLyzUD;utOTO!BqODfEenPk7s%YCO|o8Lq^jE zlr?j_u7S1zbwWPGa{HQNVISJ0bpV96s~i%S^|L>)SN&~f(Di3z*zcdlQ|P<_10frK zm`8E|BLaBO z0C`aEd!P*{3o!t7%l&{@ZeMQe`#R*~XtAF!$OfazZ_@%~pZ|ReCZhx@s|<{wN?!4d$>QoWE5y3_eqv7P z7tvyZuQ1NJop6vDB{nfyCDg85D)jN5k5g;x5a-mXBvjp6S+p8+8tYZ|7SXkL!pZ)X zg*WNz@qT4}!EyCryr*5BkiX+TE-?2OdL|nPPcH`WR;B9&vusB2&_lS<)gk-`9Dwz+ z4~eP!Z3SN43S0Pm#3zqT7v9|-i?2jwaSsMQ=MNS(7o1-#6>Ahs!AD4tu&~7@q59EK zVfDPvIAe7>|Fv@lzPaiH?%LfOd+X8~b6W`AdJ6blQ!C!K3BpsN#^L$9I||0N%7hH( zFG8n&i}CJ5<{0H27u@;;pi?6-A9F07zu6_9e-v98T@?m!(Mff9+O{xY$$YWZk}lX~ zZDYaOQo-Npy_?tbh`^kfhC{pL3$32s5N19x$DX!NaC)B^T$?z}>=d+Ww)q`jfqQW$`*;ph-5KF{L%0W1Nh?%?cE{MjhvCI+)`D z#;F`VmVa`17=J!18LvHXi7#?<=bw*8{4&3JJRJjmzN;ty^_v$q-O^X^XdJ~K(2c@N zV^ru#QXP(_^J{xO!hUNk%4e)b_WHd@X9W5iRrDefBf`rSI9)gS5KXpL94^n+bN7(* zHS(QCr6|wUclT&6XPbiJ0J3!d;Gq#=RE91urR$!O@;?h@W26>67f7F`e`8(y=BO3@ zhDbYiKV*_>4q-1hXsY&4yQ{7ias(T4&bEYISA0-fwcL}e`H)IJoC#$6OtNBvo7`Z>H}+w#+`OuEX(FoaCNyT( zsOOQT&m38+;M=UP<9zmZ->vNDd$~$xcTFiRJXV^#BV8JPXNDviY><{seJh17F_*fW zLXwA5wuJUiA}+HkOO>L}N`AZBOMx>o$-~SW64$aC$@Y0G`DSNH)puu;GmE;aaviLt z&biY_@r}x4u-ybj^@3yMm`flT>f4%h;HLXdSbd&sPc&gZuUn}&XY_`7>y$&@pK~Hv zOX`yO7T1f1M4cdy5+H35@xyX=hoXM~z z5k~CboFpkd{3uh$r=j9YEW=i)ev4!~FJpEe$WqjE3uZo?eXJ_ndzjgjf1g=BEQM`S zxheDT`c>9*;6v8-xWJBTe@vc4e=Wgm%zyZfD32OQX1emuk0mqcu6)~vTRc2Az@q>> z62O`j)~T>Ih4m<`L1EnqYfV^R!kQA+k@l2&lm-+hN<)e(1=fGC=7V(}tnFYu2WvQ3 zx4}9M)@DiyN5K@4(u&fC(vA{9=|Jg3=|bsB=}zfE=|$;738sWl!YC1x0hCC}U`jM4 zmNJBrKuMwuql}=8qNMzc>z(h2uIQuN|qzkSLN zo^8julzjaC-sa}`&lqwLHJ+}M>@xGm`=?9^#G$ktki z$+Oo{#&p+Fu4|a8oKU3%ndOx!-}ScREZ3HxITAwAP6#b--ogqJUkY~pqNoZrn(0TOT=zs|UKt2KVwaBX;Ff9PcDgc-n09F!! zp%!_?1m*`Y)>4(m0LU-^m?Z!t830TaV6Fws6#!BV0HzB7*#-bJ27rVEfJp;D&H=!@ z0U+%FVCn#nc>pkbfSne58Uqem>d_deucZNv0Y@!PGzOftG^8=$qQ#ZQKqD=UX$&;c z(v-%4n-+H(10GtM(HQX5(wxRX3oR{a40vhrrZM27#h1naNK*iqY5-)ae?&?fL(9QD zfa?#}8?G^2TezNZ{otCxwSsE{*8{Erj5~}qjISL9#uCO4#tg;@#sGF60s&%3lLO?*KwA(X27?7RC@@}d=r$ITCgJ78N{BOw6^bI-2%hJs zQ?c$0_xaO7{8?E?sNLv2u62atFK=+=-?bPkMhvB8E^oq>97FN3J{5!qc}MVtrG?z> zItBdYhu(rsMn}h3hV_Om{ z=ta8V%l;DtwnMbgVpId1{&@wyyLqHgq_*epJ!;GuHtNIeh?s|0%xr=ib}tiZ=GI1s zpECU2CwaVW*Ry>3nkBqPWpl1Wtq1(oE{;Om{!>MdE6auYncMjEMHhIJF8%qvv_D?= z^l{kfih_5=3}JE&L+qct6New!z^!b45(>*op>?? z>7@&NuRX!I!Yd`#xxAM59oL?};`xAQvmfA^Rfppp?dD)Fm(6%|Oh3FnrZzt2CSf0! zSfTRsiSp#A^6wJcTFNc{--~Tk9%o4xw!C6ioikR~VYf>QDvu#;bR*fY^p)x+a~nvv z9iB@ikFQIO*7PQ?9~&@BEjqE48s20V$8}J*$SzeIf7>9vTYNxT`t_!CVsHj|EFO~3 zhvzK2rIo~%`YC3Y&SDfU&g}TeWb&$03%21N59U*t9lQNxnY#61Z*`NfMQSXLQX5ut zSL+t7l}b&fN)LDGN}UfikRF7ukam9PrM6A+BkLTeNUEhtBDgzmnTrfgY3ig+8TVd#8v;#!pCeA-5;(kPECckU?} zO|h4(Ud&PKUe-pD<#3BEZy6&wyqzoQPT3(E+`{WEtKdDK8`)0208iERmV43j9sC?hDND8IxuItUPcEVc;+q4jawjE=bb>1^KI zpdTM(n$P#z>dtf55oaB$gU0Hnau2QZxxm?FyvhCsT&D-YT^KjmpwdPOG1) zTxynzx)yCjS1;)x)yP$x&JKSxIm?A>rW40yRvFA$Oz4a16mCSRk1ug^r~pYx4sPoejG?#>^$~qJhXOq6{?+FGPE%hN7I7 zb-8gl!_lPK2I&5d zLx6PH2Ehy3f_>Nq(M#U`UI>!~GWmEYC$|B@nGB>sfV$*1pez7sAjm--ve*Xu5FlT! zTa)lJ{!ca_e_63@aZIwGQPeuWmT_&xj&tqB^^@9)svZx;%^7P%>zWIMP(4#|z)*eR zkyuYm*q9?$)JLM{*a*>W!VBTetmXJ&d?%rieIMLC{jk_!&|+ND%S5<#eHOnz=%iTR z|BbNl04rQSVvkoo7>PIXtXOB1t)O=biza9L3DxiK7FHLC+^xfp@RjB>1cP@W*m%VY ze$S0W{!x8V@H+gEchKPtQmzk8I8pci8&yy|Cw`>Y$yqY1jWXy^>V<~ruT z3{(gfyG(>;K?!)%k|BJx`i^+&2$8Gmm5ok}yv|o2S%@7fY{Eu{HMo$}A>55+m-yKo zGle#;eTBo}k2t;eXZXONB>qvITX?PadtvtHFSyP{Z+_d_JwnIh`Q>x>uP3&VlXmx{ z`PQeHf)RVvdQtzJlaZstv#|}|9aql5r=@=#*uxq69AIwNaE0;>Or8H^rmQ=~l zue(%pQyH`Wv%R#BM36`CPLT_pIo96zn6yOQL~=MdT#DSVUBRtCEqQnI-1$}Q&fE-X z$-b_tN7Oz&nLD?#S;LD#Y)+t~RQ;U+TgmVd%Ukqf&)?fG#ZPD;wVilPazE9K#3^n` zIj#q#`qNKJerpDjuQw)5UPyVoz1c$|;WYxo<*NE?Rm*MS6<2%Z zGDXHa$cxo?nN(duj_x0%y60&`cCBKHPk5|REh;#|1P`7^#s|Cm$ZL<`d zg8?|t{M1379lyl3Ut-%YvF(@G_6IkRKTeVj$NwO<$uEn-3!UG;EJ{7kz8BjxLKX;E z;8{k?fuN-k-Ke0Z5y!MIi)utJS_aZI-eq)K6Kg!!)_u`#^fi)|pN$zEaK--#jZd$A3~DG;z^feQpH2#^lq7>HdU*vVxe4f15s4Yr{Sq<=4h z$@?@}Qx?k0Vi}~#0-1cgoF@xtPzJW49su$I*oI>uK)a9zu`J9%{8IaNFi{!~yHC3VQc;;mnK)!k3ikLWh)(LWRVZc+uWo_`>N+ z;#8yFV$Iz7xG?R#kiOd&f4p%{m@)4ar!oz|dvkjUHQO}eDqS+1ILnZA0&(mFbK4_B$)_)gK?>LswShL$am__RqSD zH5!y)hxVrU9d5?&j>UM^{fS&7DV4vKbC}P+x{A-ZW2;);$PjjZH|y)>vSe31P8Klbz?>LQ9oaYDVD6!(Y|WSL2aZLbyKDJ1ySVP z?Rrw$p@Sr+bEeeTY7dF`I4!k(zLc~wxIhwac{AaGpUIw+H<@v7?kG-EIS32%?Uz*!zw$$C&N-`p=n4>+tq$!Ui^7gW^ zq*Jw-&)g{nQdTXN| zTql`%xnPazvBd`F$kC3hzg>XT<=!o+!iCY&@SDpdkHooB{hg_jp5r>n{n2&O>+>jP z;(@`EkU^bD8Z9G>44owdQJ>v#J;e9T;UQ9o+on1Q7DOWF(b{NZ(yQtrz--Fs+n=7^O$AJhA9@>tWeD;j$^pCZ;Cc6 zcQ9eTrzML~nM}&W*5%Xom)Q1C+Mb{)|rM1_uFw^{f==~5hKyNOvG`M;*=ru zm)8Qs5@hI>iYC3WK&J~4nmoiCrHxKSb8Q--9aXMyRd&8W6Hmr*-N(#8^WA%KRaMu~ z*qx4?`MMzGn+qk%k_9EG$@NrJ!TvegdZ<8|qfABQb8S?S?S#e<#OYTvKwO_>G%ENd zimK>_Jc4vk{ViwEfXW{c+sqEx>1{?^nMvqNe+Se;w-O4pzl*$tZJbBId?bvrQGT## z%^8e-iyS7JBAbYXh&eou>+QUp^Y{dhz&~}6XU8wG?U&g0OKkfkw*A2k%pFEa2_0}%{f@ze@# z8c#R*byF(Rfe@w<)wEu48m}|D4fVse2G|B+4LsziD5fb-kAr?dnkJq03+mP!4|Rf& zrWN#L&%EEf+kWd4)Ti-kqh&PtR9yV^X#d-t_0PpN5XeAO``)un7R=yy5UfDB0-*}F z;f^hfSn@W+Ac{d55WZj=L^C)}4&NUGc~BMtl!tBk*zfn@7`ZRdhAf~#x?CTWhdekQ z(&T!ftSqd_b<(8r<3LROse^1l{<30QUi~NH6Yi3q_sS>2=Zik#dZS6=&c>I-qWU_* z))5bcYEpsNuVkL^cI^kz!wL2-Kx)%xt1(moxGbf6z9z4YR zw@wt;y-C9Cwrauk<{;r)*;!%yv0Gw=Y!<(%xk5lI#|Z8Cy<+^TkGM`*JRZL>Pl#?b zf}gW}6dxE~0l(dzjXvmK<;HiODUh9E!l_zA@btmdgDzEczVa957UPkZ zU-JRJZ}8|(5&WSQRrt3}Z{z6;rt|YF7U4d}p5iP09`VzjUJ@FwyDW5_?9J_%ohF(N zs#HFA|AJy$L`p5GdB@?5-I9vZ>RMmP)3&F*BNMWilAvAEzA64vcfHlpu*Ux6fOP}1 z?0r0gQ(iLerK{NLM%~n=Qzl7L=4WYqVSUMg875`zOD7(6)l&UyhQu%9y5znPkq&Il zBKwvhj5^Lx@?f)>h=iA9VBJNsBXGf1Z~Idf+`Ghjr2a-h%?s zdH30>m8*gjD@!k`UOVM0F6(qud}N+6n;P7e6rv$naQ+(Eu;3iCzpgJ+dU+A4RO@b0 z>!V*4&(<)ir=d%j?w1ZRdCL+Mob4p0{^Id$@$prRSv?i=*<)AvwEZQv{S&uddDK8` zt0?dM65D=>ZGW&ZlBdU?h;6;Te&!eQiQG11!VMkQkL&-Yj4N4qlJgF|jS~Hk@@e)o zl-@p$OYfYDj@_{2j9RJCf#`Q!;9G=xE!ROY2X#>YYXgzXGB@PCBo1x2P2(zG+lb~z zo1trW^Ej8uce&cZ#whuK7g|zr9*VO#g!X)xi!QaXQ?^@^idxSZggl4qprK!eDl1hj zL3N%UV$Vx%%DIm$k;{2IG;4TQWD{P1qVHco-RA0`E(Y(>)MpbpiwEB5+MZP8W1NXH zdyGS)FO{O*Zs|u2GTXc81S=g6EpH4UH!oh;&ef zigz0CG_7YFl+heV>xXSC`lx3fq=AU2DMz>ASdDO} z73F?C+W&UP{ByBQ_H6rJ7z2?C1S|-$hy?k~LEoS(oC9nFP(RcOG39^hK=XiOGXffYjR2>KrcVAd0PQ12DI9jEl;Mv&gC8n$c3-!^I|NCy+~mXFtQAxRV5zP96cJP8r29?ufJcPNBvZY9En{_lln zUMk`9>NNhHrL|aL=XzmqyE0+^^P&9mi|@GC`#K5hClAHC*I(m~5659EBOhLWaT;FQ zrW5{PpNTtdtIJ&-(SlE#YAN*iq$jLjJXQEQ=e`iR&JHhY^AyK4br4eT`w30HgyTam zN(EJNTf8)-wQxGyjo<#QnETvm7|zUoBHWwS4}TbLkFVBUBseJz1oOqW`Irxp97!@j z=dQKj)K&Ki%?t#=#e602z++J-ai9U!@y@-d%0TNj)3$l~*7f-J}YB7&${&`{ong zGG!MwI;FPhwRHr?YIRXwVrLxd9)#Deu_>Rse>t%&g*bcOk}OZZQOsL?NSbcj zgBjBTGi&dVw6W2xP`b*jB~y-Dps>CEk#R#J*yl8=5y z4>JDzH75JK3;W)9iPT7;uSiPi&o1lcmvH zn6`C>5~fZGxs&OmT3E)AH7`1_R?Wi{RlaqXj9bhnNeerXHkX>KZZ>z4>a42HI(!^Q zS{3wDESR;CBusj#n()#|b@Y;vZ|<75q_je9X4|$ds&_htOrBVklqR=i{D)1I^lZ+O zv`MeX$d4mc*~6czTIAGWOtRWbHC{AUz3rnSx}M_{s(bH=qgj#SblsVX)qB#IG4v~~ zV<#Uc%k$0`PszW_Fp*J=TeTXhJ1@5TF8olact&5VtXSy*ely&bQkP;+sYhu*aiTP&xKiMEyy3UI;rF_mQQ&vE;kUTq_qX9U zw_)9;q;M2W5h<-GZ7A*F>7B*_ln#_mlrEI6l$18;Y{#1F6{7_m;;U5Veyz}RNT;3@|&fsuIu11alDs0PgwQKc7UQ$1$ zj=@H>^HeJ8zUd>HbEG|*=syu%oY9u6YRD=_uP#B#OGsI-B2tdZor`+!EkSKl@{!^A z3(EKFwjrg%86*zkk?X^cXaK2!UQP@|2fVwX?#b!s{WVWzQ3a$tRS%)K)BTX?%Q$4b z;tEo3?8k-;>rJns4sy)why=UmsLGWnG-#y{^6q>axwKFzAM72gJYKOrnldC@SyJ~s z8gJ1JHSC;*{JTv-UC1Sb1A8eS9&v(3?Ts4GT!r|jJy98I z4Q>34L7o+VB&q%SlIZ`x&me#~1e~>iX#`xffSCjuX#tZ7z)$}JU_JqOlNkV03cx1; z05Gcnd>#M*6AQqn0st_#0DLw80MiS=Cj15CJt<&J`cnE)LMh>t{*-}~L6j&;3?+^d zPf4T;^KKrXBEugSP0t z#wU&*tMQxD_7l=HG8?>PO3P@D2e}N6(Y7f|aM0Gz{=oiY=?n~p5e1BdA?5o3=@7t} z$mr2+U5fl{YCyLuYxf~dp8^45C9$0UP|af}a@h*hhGLWU%nfb%2nU=_wvRyIuGgy%jHC zFiPmC9ECqtb4Ops2Z}9>MJ#<=EnIsV&0oEK56`~YQ@H4HLeL3y5Zu zwSqriGS>%>-~JQ_^u2(eBzDEm5*!2@vw>Xf$4_X-kt}XRVS7H;wmEmF&MjWiZwucb zwE+99OvfYI*$C}Qh6xqhSz%+_M%;z%ru@n&Cvo3x^Ki3-0&INqHGj2hBwjS|CN}7_ zLHX@%A^&;Q0ny>O3r@-N=UYzC!fT4sg_%9_M7__c%93N75Q)y^Q}PG!xjiTH=Zp-n zIzv~8diWWis&QAI6#ur58~n?@B$}dno%c)%=$WQ6KAtHh2302SO1hCi7eB?j)IuqL zO_0>R&JZcOjXk;0V;Y%jH$>$xrVvvDU52e=C2iF&lzJ6sNb?WemzsQzmfqU-lMJp{ zOLe^3kd#KP$<62O33FjEsae^EvGGnO+f8RMsmt7UUO61WWF0C|SL$Ie<%LZpGivvj z_AXFL*B?HU#@sz6`99vp*0LWe<;=s<)CbAZtqMyO^P4x3hE>@?%uo0#KAl=X&goy0 zID>Cw#px81^T^X@L992)J*lh0dzbkN>VvG~0#|86wasMQ=QKru_feLg;-J{wP=|!< zyvA5m9?6>6?~!7yu1N-y^oet|f#l7zEN0o_aAITYD%m6l6+1e0lsw!D$!_z{l8;js znK;3Y#NQsHD(W31RjT@mxiI%5`TD9hvo-9H)aLbi-&+f7Fkz2M$?m1~$eJ(y#67>3 z!mfG{>2C8;)opzmo7%91ytr~iHR(wj*;X=F^~LTg8TRsqYQcerq@8K5YI;F7u?@CT zOb@TAI(@mA88df*YWEf=MZ=f5s;$OeiglhR*}4Yz7@hkki2kIbQr$|+iR0Bz#kF%& zRT*(9iiNKSG4BqiDb6GfBhshIBXGTe&;hfvp*fx9$<%DtKLO= zi{2x}8B^ui_QO##Cnai8@4a%NzAu`8xeNtOk5k^-Sb{Q6uR@oru0-AYvB+bzf@^9q zR5|i&8S45qRXOL89kO0>M!BM}qw@5v_sa2}^xL0vpD8zXnht*s%1ud4&lhLd|y&}TbjD0M{l0*)c&qt7U!U@IEYdJ>w@`4bxBm50VMw#t2X z{7{d@H&N(C2_0G75)}_PgwC{g{)hk6{>uu=v*MRf_W#cJ?3YmXZxYJr-=;<+)80OQ z31$BeZXbW@YnB3^D**8be6%#86ZIMUO-!Pql-9osL?_u_OZFiHK}zFSrWL_J=zo~|@@f5WE|3Pt|LhO!e=d~C0+}3xumnLCrC=Wf zCRxyu1tz(ioCiV|2ySp32vrcvb;u&x_ktMI1N(41)CI@NA{gw;_0Y}VwGGF~eS%{l z$Z3!cbwc}c4E=&_$dl`bH2GM%`Ev#N$>J|6ludiNoPMp-&oA}PRbk!O$AaT@rTFUo z5WmXD(=nOl310){7ta46n3H@l@2Hp9qFcOhe)lHv;`$(*k<^#JGPVbvZtEj7TW`;; z9?_T|@TC`?*f0~%>$Foec)w3{Uc6R#J@lavH~5V(@$NRE=Jq#Y1wB7;_PXYLD%%PB zAD+p_n^x!j-Y(=0XxrK0PX%*3uvV1PbJ(#z>y%yK4lEy!rUW?z` z<~pskQHO~7|u@8Vin zRfO%_IKe){TlbFNYq34K8cE;uYY z?7z#S>|Z97C8##NN|$P#>{NX0K%g|L#ucgJ1}oLsfrA*Uf@M;4Ag8uAT`Eb{^SN6hTTq1}Q7@!^vU# zTV;ihhN&J|lqinY^!9Pvoyw#w`K-9OXdZLY?UK5l?rtgV(_5(^!$wk$NSD%PG$-q- zJ(h-t#Y#q9H%ftStH?6vm+GdzXBeB^Na}rEN9psyTk;KmPR2KWO!fxt^yy#Pj-1;) zPPO3PD^-uZrdL`;B=62}vfcX(Y15)JY17k2RWi7eWVn4H<2!nw@7O&V zq-MzwrfB~IY4VgBs{3|LnWKx+*`yH{Rr}rJB#V2WNe6dVQe)^+pZB((ijTW1h{w{0 zs)6&v6tylGk)g&pzRia(QRRMk@2%gXgy?>&?b~6_Ek#S?`HbuQ`LXN;U6gOxaEpdpH9U^MztvTxm{80p7L@7~D@sjD zZHg_W+&@gazGy(xoG7rq1IGt&b^r$laB2WY25?>ghXrs_0LKJ|Qtl%rKbq4tk>&M#-_+J zC|vn0F%@Oqe!!XDxq|pZiOBnqi}FmG4%@Y?pEBXADU)$Dm0l4YRO#F_<+wa+B(AG~ za)xz9=Z7bwJ8uJ()9>Cu-UXM?z#Uec!=sldzxx=}ae^_sr(v9Olc6rcbNef$Byjc6 z1m)y7H)Tdg19ZI64pe=cpYp3`vT~Wv3CL6i72QPe6_A6H)zzu;Tr%4D!|fBbh2; z44vEGoi+T41MsviKmEg#IXsO6@TU*RD+e$G06eJ!U=jdWDF9#|0Anp+Du8NQz-$1f zTEK+JW&}(L_zMAGQULHB0>Hcg)>^>S0JXG$*#W@62mliVfUgk%<_NIUVozhhK}!P~ z1CCn2Q~}Of8qyeW(b9;<0GKhLi54(vfSVRDZ-9puFm-^Z7BG823oT#*0r(>Tz#IZT zTEH{{;0L9UFLbr|$N>WrgTn?!Muo>m#C-pyodjAAt}9$ixQ1}e;5xzefolTS!JGnP z4&w~t3u6jn2;*i)fibENCXvQ4J}@RQ4siZ(-f*sPesE539?);-FZ7Y6APP?rpbiLh z+mF(k(v||J(Vo&#n+JcGfs}5PAWBb4Z%SWEKT0SioYJ2%kTQr8MTw!rQQ|d?{1^TY z1`5KT%1Q|!EB(Enl}1*g@{R2ORPM6`at-y@Qc~Y6jZ8z!Xrv|ZaiWj6koKEjRaYBX z>XD@?kanP~<`qIJ?Z`1mNg7`%ka@I^x0=@xL5c!szQ*s1Zo}hkxuit*HFZ$AO7j{d z)I((^c%894o%Rd*4*MFp367z@UjOZnw?CGP;13)OFa+6XLkvNW0>&B)I0RXKk&QIu zK^X|J4FO^>5@0Ok#n;eu;ZVfRoMr|LW!lxEi;ObW~572H6D=EouV|k=XEl4 zY;zKqpI^+Tw3HUd)+U8IW68$6evGr_dexJVG-i~Oi(+p#%<8>)Ev-6WCK(ydV|N^_ zBQZCfrRG&05TisVGNsKD_UapjlxSflg#|2RtBvR;*+$$YuX|P`ibMy+jA7eJ?BZr> z`*V&`^wZv?%GJ}-v^+zmQN78M#{~z~l2RMCalPJT^Zl8sgkhIS@yWW()*6LHEsmXG z>@A9jX>DuP-q%a2z2YsIJmsOPU;H-7`|&U$G%aI&RPC9K*+yhvy;tPQ40qCM!Wcz? zK~*w!>seCKBh9x@j{>R59Se5by->-#`WIry93l-y4`;4-IZDx~q7D>6vm*B6TQ)2-;;yP9H0Rt4ggR+qfU7)oj{yujEbc@iY( zOEz^+ky^9*GcDS=OP1X?5Nq;UQNciu=+qj;JTOc5O?60TjB1`HCY_tBF5h%1n)dPn z2~uol{BM^j()*oee3$oBB{{Y9ef(yGYTS?tiYd<-)jXvId#v<2llA%;qx(p=e7gRn z#GjOZw+a7Ud-9lp3}h(p{8$Ep&VGOEh1)SaZonf1tm|McC$HaNy#{MESeLjiK5`X1KSqn5K0wB=bWrQZS;(`r8#pC*W9E!$nE1M$w{Ef<0i+7=rJWplh#MVkaLO{6Z3+1kElTcL)f8@Gs zhca-N4(j-MIm$6CL5*YgBfO_bd9w_mx|4RH9o+|`%#KmYPhCo({+}_(v*M3rrC+ZJ z{<*va<`1Z^1xz6Tath$A1xzF0q6N$(&`1lIOaNpT0L&-wZ;_j5J7AIlkahmjuL;ur zx7Xf2I@0a`Ysrb$kw_Uz8BQ5V`SqIMKlGX)NKzmNX{0IeUZRqZM()wdNE*oqJd)to zv#9(d|B{u)O9{NE!25|xUQiCCDb2CqX{42ne)~17zsXk`&nRu(AaBVsmd3k@N>7jm z5A2$9Adf-2;Q0i7)8@;bRPbwBnmpM{iyYIoh@q_925if9 zLw!&e1jvVNNP__7pe_iI2itN!v2m?2-^Kk zIJN2)yu{+UVCB43oO?D?EWW-9x7@GhJ8u~+G*~f4&=09XJ(G^$p|%&qx;1VHUk#1; ziG8o()MRg*ld}-dQ5_MJolgn<4W5b1mEDELdNZ+Jye;3g^8_w)d?UWv^ZSDSoJ?%c z@|vJ~WenOaHs@?sjN{LJzRb_RJsvx)$;DS<97X*Jw!)5gu|k!4Ex8VB4)Fa>9mZ`< zUh%yaf8t-hbHGj4t-&3S8u9yj)D#4#V6k1DkGM)LCjqBN2;Y1oME|l+{F!bG1@mEQ z-kTZDuM|h~EtYpjZ$f6H%?5;PQSl>syYv)S<~5JE=o^4v9}xL%$=~>9cShk2cB^qi zw^gW#jtlBGQORBAAMp8&?D=Yj4!o=SJ|8+sm%nD_g7dAu@h^fF2>uo;g~bLD!f4$M z_}--uT(8Dcyu0W<=T$sFxjp?GU%lioH>*Bf9!8GgXZqjg&AR(?Ik{=PqERHDQ}&cA z-Xvk`mI=7hB7Humw+Y{1i@=9}eZn7bu@+o;8+_VR5Z!hR;ZN+l$yd5^5q~~C1W$DD zDYzQf7FKNy5)3!A7hRH^_>*7Fm9y$u;?x-|UfKuAlj7eclzIHigt7#c@90Fb+AqDh z?}aqUW=l^pso^7{pK7EqeHbK}9G{}r`OsI=Ej>tjcRxg)?1)oI6E+cNrja7&Vc{hiX^Zpjn0%EL~ z5O7#BKXy&g-_?>tTO=vgju|0M+!rBr8qkzYwi!&;1nnlVE=nfi)fiQaoky6_mh^YN zS~Mf6YxK$Wv!8r>ES;@t*vwY3OYB^nX?>BIVKGRmeCG+%%C&v*xG#+eo+lFf_ID)4 zxPtGJo5|9k%k>mb4`z|FWAmf|P3|bN_6}t_E$KsE4{~KTH_0Yd#s;Tmr;i~Y(BT3nVy084ii#>V zs;Oe1Tu_W(|5$Onz>DmAe}fb_%x4y74pg03^ig3vaT)P6Jx@?a(s^KSnid*=aGMY867P@-yM6?4Lf0T4t5vv9gwRLnW&oH1b5QNgd? z<7q$l?l61r?%kQ4aq4+~R(Ex!4!l+M_w7DtMtcfdE1IC|k9kbhypQhpN<^DSUB*pj zH$|`8xZr_rcH%1Cub~O|HltS?R$$%Yd*n|3hk$iq;BIDmx~@Wuf^Q3v2A1E8)0;H3l9(E=Vj z0I2Q&JPd%XmU<)xK#d3NwAhmvaM0pNVgMcr0Hi43pOvFz8OoEC0lc&{Co#}MOG^?1 zt+cczG2pGmhr|HZeI|tJ1Tz9$cevJYec_tI zb%bjN*UOFo*QOz%F#)a#TniTh$Zv4IaGr2(a6WJj&~NB3^br$S0#ES$M>9&&{0IRA zcxKxXIuL>hp@hy_^W9Y&cPD8*2)zh>2>l5C2?GcN34;jHgrS7tguk2@6kH?-|D5*| z$TjlIW*{wTq#^hPLXdvQF_3v=35g~jeFqsw_AzlhwW*@pf>8^p^>Q=V-9Rj0QV%SG@94iXOpT>|`}!yh@!*C2q@0`oUG z25DdfAb=65qy>%vP!94!88`;pa)5dv9Rk!L&k3O(xh$0b#~`->F_eQk1JJgd2I+D+IVMSejUfMlS0Dqyz5cy}+@*@dpHaTEzEtBM7%F`ex_EJ`27B^i z^l(+$)@Ht*<+j|?wF-A;R0vmXWHBd{Oyef4 zU&|*3*HsNTlC1LX?7~O(Ojqf;UT2P(N8&rDkFjqf1a{$#olLu?5zHO~JJz~Pft!Sx zGxmc{u*o%UG0r2MSw+WJc!MKiN*WumPU~y3JEzy@%#SB=LC>c$6Qq7z)h^y#%CG{q zJW$CVKjX>;L^WWZjcd!!Wy{z(mj-jQ)f4%V^EUJId&aZOo!VT$u#SAfkt4kM!$9sJ z$DY`Od6id8m; zEV)~IBe)m8T;-odd2?>-%H^B=dx<*#KJTXl!PsGiaIIdfQjxn;Trek7SaEQK(8j4) z`Fej1QP{s!)MNML?^rRYB#)lIx@h-;^Pkm~QlqHsI2|E9Xc)EF$4okuTqHi4 z+*v$XE*maD7C# z7#dn%k<{t7RLk>-(qe6{n0T@lwe|f8rP1)d!Ut)Q`a$Ygs#?@&^_nfi#0S$3tK}eV=bQokr4IVI2suSBv44Uvo3i3QE zR41a)*vgsIyxikrk#B;!xWxvkjmcc9)q?_xX_rMmDY6otPOGAbYoVu@TYm!;ls;FO zEbXPP>opZFEpk*GZuV7ubH1fAr*b9}@>K&tsH(hEu^63!ChTX;{L@3ISKZrf?L0q-#F*H|yA9nPfizhg|#!EY-Bf%vfcS({^$;U+8 z`dTwQ#&t5f_3SVXd9VfRJlujOz1xVZzn_i=mh{2>HYA~e)0*Jv@o&++#vRb99vg6d z$LqM&=w|rvzIc@KCJp;dxj~+gpE}4-$FB)a2?^wH<%g>yuQlX#0hkj2=J)`t1prtp z0AOZ*ocn{x0btz$fcXJTw174ZfVcsGP7Q$20f1%=fan2OXaNBP_`LK#bCeK19sYh(sV5X+L)k{zxE7X~Zrf zDrp2J5WE2JVS-odi0}lW64U|50T8V~MAFm;`9N?2NM4PY1tJ%u10asUF(N=|`T>HM z_DfJ8q=EmFrafqr^i3mR5#j2)sPr?Fp$^00RRe z4+t=jKmdaRh6MIyQ3_v;*Eema;$J@G2d3@d z_EkH?&!~Nv_pnIh^SsxyS;=*{nOm-~N!$YVtL=04!3rxb+1rTe7!ioyXTIduQEB|M zs*&uGwcfmq)f_%0-kiJNFrGg;?Fu*PXbE3!@NzEDumRKTLNSx$Ofk>v1+nw2j&XHs zWN=x^Ih@;>a`xC2Z4rMvQt%XJ(b@9`@Y2wru3YE6n=Kact~>-h8EYTT~5u z`fvv(jpg+({lbO?PvWxH#c*TZn)02urt-$Nj$HjK;e4J^06H+zgN+_Cn%V0;fwN4v z;VKm`W~(n~%Dj2>1;3v^hb`S+%&vLo&Kd@tW=E7TT+oL^y!e?PqhIQX`?qd}lcrzc zo*1v^JHGG9Kk{&8>$Wi=-ePyzR%_>RlNv4Nc2!N`;;irT!7T=JCmYOU-3?dr+nUzo zeBRq|=9?a|cTQd7#C7_d&%l0cqocN*{lXRU!|{8GZJw><7R^RQ#YXgwj_5m}_t=Q= z(ZfcD$VYTNCQgi#cTD>A9Tm|#CIUXKFerQwUld;z4wALkF+~?w9p$>|n0jrX5a%49rJf+Xr-aql z1?-t4K7DpmY-8S>dg-1ZjPKfn4ry0KN=)w}KI#2Y*=Js?n2R%&=c-oZ>?i^I}}6$z5hh&GZt*!S<>2kcS;;+v=;N z$1c;vkQAQMKS$F=r>&)F9=mDk{ZOf3`XlLvVH?ry!X6>j{RY*y#V5(()O2dc+nvdKy!03Yv?>PGhMaZ5N3B^)6AX-b@qT zCy!7cF;1h;)LTh)-%w3#etQ%Z_TZ+F7MLk1(jE%WA3RkojeRMk)f%kK3#dyEVO~=! zLuyd3hqjSwEbdI3c0VY&pKK$pe_caLzLhGa9GxLf>zGEZ>$*y~z#db+*zG9o&DJDwen2vPI6Z@36ZSRU-w!fvD z)U2r#7=JpaUuWU?rbSez{R`xWpvvE3+rKch$X5--HWPX0Z?WxfvF#t1L}XunJbpuL zi@l9-y%+ZA+JebA{dz9$qlm_>$D3n*fPfE}X5mv4KcZ8gvav(=nK*4k5!UP636%r} z;sIt4@kPT4ND#;4VeCgdsDCnE*sdcU`$@!u$_r3otzOu+N@u+0;zjJ+t0OA2-G>dk zvuK<8DDK|Y2Dhnt6|ZP@7w_*g8>30{P|1`Fc<$C^cxLSqTz#s5lk5BA)nV?~*-Zyq zy^6<9FGiuv(nM6RiGX%H_Cz_gV^Fv9acJZ0V!XVh5$-*@2R5Cw5glHXgY8!2qfaiK zkw7`1ds~iT>o;ZSka+~!x27-d(d95MPFjYWj%e^N{L%d-v&v7$-(uUp?%f3s5Ae6x z_W#$de~WFuVuJsQNwVSiFT^$?Mrl4@t`W+<32q>6$zqu7-33CKMnof?Wgxi80-8n? zBl%?!?z`6(5%InWa70KW0-DBSi-=$Hx3@s#gR(!?`8_{~Z$ub_I%RQ9Q$G>#K%mp) zBXw!SKG+A(Fc8v+7zg#s>EB+Q{8pEyJ*}skMxY~t+uu|BZzJY!i)|oAfgmRHt;{#U z4FoA!!~%gz7OWtTEP{a`_G6IqfQToHT5?_xzd*19;5d|(^OMc*`Jr7%hX8q@90XaQ zgL)wi`V4vHdSo$9PKP$+x?mgn0YKeQ$4?z(1M=q;+o~S)^_#z4waz%tw@$4lD(ba6 ze{Zt4Dm0i?-A}Vp)on7IFIUv%$mo3(7fTryzb`t^1ta(hsWU4m79dJB;g9{|X;a zJc3Uy8OmSx6I2lq@myAq6TEYYk~1FW%V#dmVs8{(=XNKy;DzCNtoKoCuA%2=#(LCi z?wR#`ZrjQM%=)iw@w1c7nGA=??An>Vn7t;w*}FBS@V$ad`9@c-u(#6huw0Y6TttMQ zs?o70?B=TJY>P_q%=;akIppuk<$U?TJ)K#?JvsM)uT}Ddc{_*YFON-NHoc#t>Tf)c z8**|ED_*qZ>>I4-E3XM=Ypxm29TV^W#DOy?8l*h8(3aJDT1_*VKWIRn$hyk6%w z>}ubKOj&pd)A&?lw$A8auEW_;Ovd6IF3o1T%KUIwyruRrcI_tz-1Xip)^W5iQ#2zH zU(W5u85ZiYy_7q?KfAxz$J+mGVq2Vgk>^*DsMJrdOnbGkH?gX_tgc zug=n@TegZPsZCM~i;v>i=uyZ`DiTuiCklZ=XXVX(orOjABb9{%FG_Pp-=p)oEZk!o z<1ZH8nJvC=v0gN+Jb-Ssxz3)t57pA3-ihMa>5oL$?M}+edl(@%(}FHYcu%$Cx9%}N z(OdeQanjqoldja}Ymrcxe_gEIY&%LR&68dyeki;@aGqFWf3To?B3PLm){j1SqfAV? z6Q=xn)k5fU?=)q*pj^29E?jDVXSo<1)?e~_8%Eb2)j)h#GmvV3qODkdq@Eb`?y17r zd#(6p%OHC2nC;@B=)qE)^9JftXo%^y6or#R+CU5@YpN=~h2LadwfpXmH6zc+_iD;m&!k>RtJTKKDis5T*sZ zP!3L{B!{@Av~Pb-cwRQ1;=6^brFusx!@Hu;Xui2}L46gIt=(QA%KA$26~t@fZGVTihz3vxP^c#2)t_q?-0o*RY)dJirz^wvYDd7Dcc&A4}fcJI~tSm^(5IBO0(1zeo2qXj% z+7mhwLI|A*6?(F1>xv+GA_-B1!Gs}%VT2Kc{~)kI{+}txk3)q*S{$~`4nbZz+p+o6 zAT+d9p9aT#+(_6R%Yj8{`FWiuR|$%qWFx5+;Boe zN9xdn-pxm^To^P{YKxSsbx>KH8M;xa2%mN*!rSY8Sig2TUU~I33Y^*!cL`XAeWTvs zpbT?VZ-8bhOr8sb)CpZu2JM_#&lKQ-LikN#EDk)-KnT-(tqWd>1c3=& zTO{6G8V@nz5hi{rKyK_zG?E1vYJ;T!848Y>ANQz*#@yo<7Eb41S9!1({T85mn`yuPzL=W z`%q3JvXOeA45a_8)9{LgFfb5c5Fmizhrt122rw{!!H|K0L^dLj2Mi0OK|Ux4z`l%J z4$|ei$fl+~h@lR-E!c;;<+2dVeSxxIz@R?ZhIRqihQSGr!8t&F2rxjwHUQ~zT~G#& zK|N3g(toNT8;A;oFL)OIs33Q(A`i1T-_#u&_>6sBeJ4jHbCKP8^3yIg<5v{A_>NE% zGsnILazR%{@e{^2XP546%vatM#<$pB?prw`PIYbLe5O`RCnj(l!D^6E2HhpZT$Mbnt|cGr2A zanpE*=i~XyVJWJZj<@mAj!)3N_;|E^r!7-+4~0%on1!RK7BGwTNgL%)*&4mBurD7s z#K!bPbnoynys*kVJlw1`-sEM>CcGWYMz21{Ts<|7x4Rk1Sl&IAOmczAe zZNWZYK)&%iyaY|k{K_O1O~E-;F7amWtJ!zwUUL^M5A%WM+f=;`Z>!vztmB4G%;pVN z+v1oz1x!lAOulx#N$iHx9^4^xpPSgyhR^;qj}xa{RasAQ;|o^yl^=%R&7;gqZt;KW zQ5L7}+h(G0vul92Q_L!)(9u8NOw3bAt@)n}u^F@UV z75-EmYd7`uC;g?V_Z4)PR;6N#%bv)w^){+tRuy5%!qyUU@)J(1*($EMS5)}kNKog5 zkk=C522e#yd=$~82I7=WQA*csabga$P|EPgp+8odA}xpvqn3~9Ax%BL81+BYPz*Vd zCe3T%LD%{`fJ$BeR`jlpXy0z{r4`lF#ADAAy%#bK#ZMobimj5*iG!0%sCqVWg6*2u z6q9y|Qme;FJ+t3Y(KbPJtKrsSt>+!6)^r`o&@D)~wmgIC-R+>d*1ZAZBx zy4hT6(ReTQKG#{|iNF}Kap4^G?C5EV^gFF7#~wOriz>7yerWz&9%TO)tUQQZ)#c0ge^}%qqaj1!2r5Dw zSWS@FUmFLK?I1#XLPx@XDRhxM5rjxW6k#x72w@mu1mSOyi(G_?MJ`l4_#tlA{v$dX zuY-GB^+MTp9_X~iS=`e9EcQND4|hyoh%GN2#Lg)hxMd4H^tyU8yn4byJTZ(znL1Y3 z*!mN8ZI^&PZJ3So+*Rnq`dC+Oj|MUn;rl4hR>N+3>Y%4og2;F!h}i{#a`0pA`Y zURxlrf%v2m#E57G^};k9%EL4q#4e2&8hC1H+k;QrYw9O$!?xyA_aK0Qcr`&aMCUtlTO2bTjU~tQ{P1)5P?Aa0l^7GA`pW>9D)G*5I~rM zJRlZ<$OOkB4a&kkhwdJWxNBf#dQv>_hn< z`vc{m4736HFRM-8h za#fGMW)1otbl<=OLXs+OW zy7c3=ZkoaC7TfbWb=vao_8(OFDf(RLx@UZ#Wg@ry z-G07WLKYv;{*uZ$xQN+3b^!hy?7$55jbK-24aYGifjHf08}7V40EwMnGkvEpTk&aj;1T;NO?Ho}mW_tUor?+S? z)CxJGoSccIGN(*>#_W+&&h?$5MfpHM|CJ;zR+rEwWk%9Lh3y`@oTg&Z!!bgbT1E9= zWhw4R{o>Q@R-7=y^QH3a$d%%u+r@jZSDDmxwLenWZk8tZTrEB^z9GKI@fZ5p+!VIG z((^G6D^fO3HaN3Tg6Qdj}@Ie zr_nmzf;dt7Ah{X~%7d}xlwHY4(V%r7$!uq#V%2LmDsA9E<^20Ll5yH-di32gk-w8J zO>1^cyf*01ZM0T(r z)qAroRbNa~3@cO!do%l}XN{X9=54$t3>LA78uV4H?08%eQ1cDtx!TSr@APG56#nJ= zWBI4@+=}^w#y9zDfuLn7?^G;kK{sJ=hRFjA(DHx{19dfmDWL|zf&gw2wF%(ya3@d@z-<5#7?@O(nA658vfYN@PY5Jb zC}_!(agryD(38-c(3cQSh#*7~q6mWt|4G=A%l}nl`Sw>RV8x)BJ5$ljM*1k&vNbwf zyCq6*UKbs`(HG@s^~Kf)$D*C}+MqRe6j)Jd2yUd?9&PLM0lS=Ahdy^-k1lU?$0`>S z^oXm1x-3dS)i?LUm%6V-`IDdFXR}|Tnw2bZ6`$w0aaKHbFPn~ghp6!QIf-~wXl?XB z#}c=kR}VQh00$2pyskDlN$8*__VL&%ayDL}%0@SDx}vj++IaE&DR^e&KHR=I0qsAi zpfmbmT-EG7u~RZIeT=6^VQFaZF}-2pHM0IWj*Fbx3A>j5wmz>o8P znD4`y008p=m}sd+VxYPfFe`wW7BDdYc>VxjZU9RyV0wUBTEGke@bCa&k^t}&0>C^0 z;57t*sRF=*2&k{cj>Le2mWCt-8fj@vV!%;L6A}Yp;s7Tt&Ljq0v@|0z;Hm{oAmFJ5 z%pvec2~;tpZMe^HZ{dEzeT4f4_Y3Y3+!weHaQ)$$!!?HM3fB^@pB(|N8C)y4K5$Lo zTEO|kIlB|!T;cp$5?T{{2ucD?zyub|E{S<<>`S)&2myq)gm#1ugkVA_p);W?p*!S< zK(>1jdJ*~%`VsmQ1`q}k1`(nOLkYtPBMGAkW51RD>;4ND55jK>QZX72DfoCjyy`~; zB#o#;ystp?(ReZu!Ac|g5K#w2CXGiH$*1v>BI1sw9J~?;o>c%5hBRp)RDl2mACo5{ zmFAU4t>^>-7yu%XMra~aW_XPdrpy}eC?YI@hm~Abe*I8W2WdmDOP(@o#3#}}%}4mj zxybv_M~w&tF{vN=4kDN)jr8qjnKD-_K!E`Oga4049E?7sK@0(m0_@8H3g zmiHkI3=06p1?mCdIJ774!(ahyxTgXLD2@y7PQ3 zKfPtL7uayGqgJSveYRuY-L>GBc->Oft6NhQ>a<<8=k{!7@}2#7@qxZfJe|*##AK=F zHD1T9Y;%ph9eJ2>KHC{TTQ`_nFsY-;*s_4j_PoQ1y*sLu@kjW)^@XYh?^olShbu9) zdQU~Uu{Uv-6QOwQmM#pQ-<>^bFoxUp-k85VC0ONIbp;pgOg^lCID`o(RI`W7M&P?= zH{z%aU7Q>2iYu*++jo(pWMhr&o>-LdrPll53hK}QWeV{=pBPC zZ|z~-b~D_Xue$uv%!ZuntFe6fjBb2jojsi6vkPp>#9>TCV+%I1!94s+<~F?iR59~) z>jZXMgR?lJ%2xEb{u4CY5n+e9eOc34Yq^eTyIBi|r%d4WA=q)}5$lJj^K#xCf+h~3b?lv|@B!e2}q?txnYKj-l?cFm&s zYcK@hI2l z$+<*!6%!3!C7Uwb~bLzC>@fS}qqqM8S(ejMaCeKQE#xcrsi&qL; zd%G)A+TWoZFOQL&m+TVgSto?kdLGKuq;9l$%TLnXR3x3+(hXJf8Guwy9cj0w
J zbfqJk??|(ne-*v)9ranJdq_ikV5f z3SV~zQ%S?x)5{Kic>S&wO%Hb{F(d|BrlJrk)5=MHKN?jj#(}uN=Qq$Zv(WBBV>FRIJ(i>`T zr`H~Ntb976xftncrK~e+hR>84NNV(Qt{8amgjBPdITh<{Bv!rQCq4&S z3Zk0drD{HjSJcn36z#JvQ2ULXm9Z0hDZAG0L8o4##q|q1tJj-17rb-MDwc(pQx;yW zh5SQZq?9IE!h)3A>PL+pDA&!MEI&MdE>Eh7e}q~8-llxbKzcHhcPf^iptJIn0ET!N z!eRJ_;TqP8zl-!l+TjQ)LK}iVA&?M6Xiw-!2qAPLbRl#jR46rR*CbIS&tSq3!Z5-J z!YINR0t~(532}ej)yHq2nfzE(C`nC3{(7I#i5*5LrNJC*l(7#p-BQri>dR5%OjF$2 z@hiTe7{wUct;Hc7&!FcQx})Pb5?#Fe4rThCMThujs1w%5C+tgc@u#iW!Y~adHX4b| zR7I%3HW@vy9FL9?S?j_ybDZKb8Y?Uspbdq6(8_RcY;o}%>bVqSJpMG+38{w4;&X6^ z+5&d+%EMOtYaEufnqJ%yC;1Y|b&`IdRgOfNo zd;wBNZ$RtgFOs{hgR&Ekqmq(+a$6H|cOPuPu4$3BL< zKVy)eieHneCMJ+4>W7O0CPSXZ!%SXYQNW4;08;|M%pZ_f2C&)yz{CKs8UVoD047?% z^Z+1(0APjykVJqQTFgldSZJ{%G4Q`FRoRm^00%7%NDMU8(ul-BV=ay(2AXJTN@BoC zi!+G<7cI?547h4>BQfBv1!OG1QwzvgKnpD(_W-T5fb;`+YXKPuP-p>32%xlpJoL}J zNC?9R+<&;|aG&Ac!u^E%2=@){72F@VC$$Jxgnzq)Me5`Uz63u)00Exmc7zUuU_vON zGeIL=k+?$13JfPXK8O%a7)lsU7)cmS7)uyOh$T#bygy@riwog5rLBpw{G<62705$G z;`-*Nw5bgIus~jdeUPF+8j@utke)PB7LmEYp9%6n3^JJJYgc3+WG=`r`!<1ert!hj z%21Fl%Tb#8z(7@_+&{KfYxdU%T#b?lpH!HCY+O@0{9>&&a#TpJU&uthzks_xGyI z)u&C^3wynJhh2mC{nId8X}1S^Gc*q0-{j4@`!~hc&Xlos-b=W+HHK_*s1G}LsSSJJ z)p*Xr)S3?+)sp*sFPS|O6vZ8E7QyCsyQrG%d==}()Z`=gPvQ0bt$F>S7kTdGMW$8l z4g8t&2ULdbj&dpf=IkwFiQh3KlC$(3!nlq9j4$c!Vh?wCj#nBR^Y#$nwG%lUb)Em*6PnL{E&d-*T=AzEGKir>x|-; z4Eiox{a!+uXAAi;{GSSC356H->Iu{RU3{9}&JhdFtAv)f{MACdTJfT&j$}?_>gqEe zrNdfhu~PRYf`@O4a%28MVf~io%6`u!sm0~7w85G6^pg9%B;V+4O7D(Rv^_XPs?tAQ zSfA@Fwy(O9eAjA{IJUGh)yHw2^62iDV*4(w)ZQo6$^+zkQ>E+oNLi^@(z!X~q|@#4sl$DTDyuxS61Obzpf)xS5$89y*<%*DUfjHIkov*VOX7lz z+ti4_V5J`wE^fWwf^MPq65rRqqujaHr!b9MwMU1Nuf@5jYamS=m>eB=l*Y(uH!F?c_9W%V?w-o$9hb=Ylch08;F zia|TNp=FS$dSWF_zgI5Wj&3bZ?LC`n7O6)yJbhoi*gRcKDCLEY#&^U*kH^#ii)mtG zD?_SpN{Fz*w4oHfW&riXDpIVub&`6Uej4?_sFUFMMiARQ3|6nI>r2f%wxV$Q?5|2o z@0DV$Ic0^pwpS>dK8bXiiHEqtiWah~@C9S4+@KV@T2ZIcrzlRCf$U% zGhO6|=+7mXCH!&5U(>RD)j%?rRwNY=4qU? zdkqpjkE2khJhX4)GF&--D_XR;2YU9YGg|+81!~#M1TBo}g{t(JhjJEl!fvC|(Wy#m zl%4SgPe>kvj+tFXi%M3L`dx6JI~(wb*JilU;SiKPZ9A^s_B{5vT7Y%SkK%@$K2BR_ zhX*G+W47rET&Kli?D1hG`lZYXjnyBH?ijdY>y4kjU7MfxU)gy4nuPZEFGYSceRkyW z0KfzS4qCt*0u8l*X#{?639UJ46ZpNQG#FN3g$OIgzl4-V&f}kw(MZ0(e<_k&1PH$= zsU?8a1TvP!J4=3jkw{P=If0a=k;sTF1|DDFK?Zpt)_9Y_Ylz@=MIM9nrxbVMSP%WLE}BGG+&CGy*^PikZ|QeN}RmZXoe zlqP$Bfm{Z%pC%vF4PI*6kJ*23leD4vwih|qzo+)!o{);AH83C$DiibxFdx+=fD|Pg z7Lc&uH~{ljIfiXGCQDSZdy|}rK(|ypKH9Q&5PkrOePtOrlrw3f-SN3SY@7!Kjwbx?> z`=~`K(?9DHleO>`6Kv+gx9(WV>#X9& zl*Y0}#Vjis8#C>rM{rFGQ~02VmHA2WZMeYM`l@*kD4upX!7k|7f?MEjz`KmR$@!an z;GR%N*=H-}a0BR6-agBQO)RRVG95BOWqm!8&u*N<+9y9_oiBZ0Zp_<@=R4%$?WyZ= zQm=)$^?^_5RM}NrbwD&WPMXSadq?1uu!h*4E)jQPt4jL*TPT)pb`c|XMzW=H#X zypKsdH}AF`S1s9?9oOKMs_xVpDkFX%uOC~&^*c11O?|4yZS$PjwdQtQr_NvaDwlV2 z1_RUBC5mwN)6E&&h0PsUo48Y~RsX8&>Os@lL~jGGv4b)D_RavlvT-SEH?JPE_ogk^ z$iJFO-|8v9Y5yC3+S_Y9?drmt*KaI89KV~`_V4?6ec{HE6N<{iuwZ}pqk8O!-pUD< zUxf5C>y<-mHV~FrpZAIBx0Z5Q8(O$`Qkc+Tk++z#I#ck+F;x3kuj#CfSA=c0>5@h7 z_X?X^OQ@{dIm#vPCkv-d##72G2ZhL5t?A@9FX*#}rid9~+k_`>QFMRT!PJpc_mm0V z%cNEvk0G;DO~npDDL#oO+Ddr^q13h1w~9+`y9-^nVe0MYuF97yzEG8xFO}#@!F1ze z>B5Im$0dICC~9K$+QRi59mU=unNn&-6Z+x)`P8w(6{vCpJ83~>9np6cB}Glo7Mk{2 zMBl&ifcm)ekoxI67dmM8Kx)8};o{Um{iX0RXOdWgHt%jn>_&qcixF9fFhEFqw1 znX=B6Ws?48LuycSXUUqSsVfdMl$piFK8t6Eiq2K<7oPEZrt}I3pt^Q(6Q?z^R&F1> zg|c0$P$+!gD4sJNrO+8q=vfDbD;_`67m{jCF5GsZrEv1_S!w+2+M@9y2jwpAvM{`D zUv-t`n-$4UdBWl)tLfsj7u4#KNYOoXG`&h^h}fK|BRLp9SL}9m5%t5Ti(5{8q&m&X zqwJiE1wU*i&O5$_9(Cz}Q27>waXNxRat{6T?g!2vJ_34eh@di^&0KkI1h(8l+btXS)tjRSL23OO0;{46GmOe zp`azZaBRc6cx6XD-0Iw9?4EZP&9C(syRT`37Ce24P7hv#ZcSDqJ6#ua@ZLxCi^~_h z)OsTde_RjkkaEyIyAHT%oxSKyxQL6d=HcYr=4eV}dAjKkHX+n9Sk1@it(L4F#3 zO*o57Ab%S_TnsP~0IU%JSPuXooB&`t09aedYYlk~^5Z%L))fG(9RL$8U}^yHUjjhS z2f&vJ0Bs)tza{{5e!x--X#N2BI{~$|fbSCk{}%v&9~4kmiw%hZ_&)*wUhxC!Yq29S zV6Vl2#6SZrzn5_4N!kOvwEW&e84MF}-~ZG?8JK%=4*dxOwA%y8_8>wup+dhi$on$| z*)aU3Fc$YstkQn73tl%Q-`0|S!$9x?;MWp0!kN}j3j`<-x-?=F5u(2Pc7g8~*_Zvt zWIr|dt`-PzkPfeHLOKXw8ox4<2I3b8X|kZD$qV^Gv?HRLMr70anQ6o@Z9g3_@QHx_$9*+vljPbndci0sIspeBp*6BF48!-JeOu^$_2f0uVNEMbN> zFJ#TbTe6=%Msd2;vbo6F=h;VT)v?k1R(SrwFjN#1z^KkF;_5}swh^$m7dz3&5CHmuRrI^RZa-wuz4hFl4{NcysOMLYPFpA za<9xb=-|L4wA+E#70zJ|{I>J{@q2jh4)58fZaMgn(GE7yY6kDxCyVdbSmI;#?(oyS zQ`wI#yP$mcP=@k|XH0aKqTQJ+Yni@@Sxr7h-@Nt^&hK7lw%aEKqqAxi`(Z;P-po7` zXMF0z*zTHv3Xb(>O%`5b+xVBVOF~AnA8y>o>n`TAjSe5;Xq$CJ4C}zQ6XWEE;dc|c z{%zm&N+_IuJ4u+)VWYQ~Yq~hOcd($>)*5^ zdxgg#A%$mNURE4%%n~!_-lP&g-j?1+RTXouoRJn&Zd7io%LTdyeMQrE3k7HUN|cHF zT5&=29mVR_m6UTzOoU4Liv;_G5~by^`I7Oh!F0`5o$2F!mI^ruH$>5J@19yOo=C?h zIxE*&XYZ-=fTQE;-4xlk#dO=0XraaWOzOzBlj6?a2b57;Z3^>R9hV$yhe&me{VC7M z`y|WR)kX7L2dUm44vFngT^EmcjaTleHd3g5-k8qI%Ty*k+%C?r>ZM4U@PMk+CrsSv zpjUY7{d?N$=v=W~j0L^0&_vl?Xh!S2l7x+=;Sw{k4qaJqn3QO*Cms%dC54U{K;`N` zrflp2q{`L#U*N` z@Rm}FM+)i-YmM}%$nMV-hIj~-Y_?Y@$vmVS%Iu&Gj?55!9gZmsJ^CpRWQ~xCHzq) zU)!#H)j;^NkasE;zM#7>L4b(^4C37!PopP``xZld#f}oI@H*$4CY3`wD*WIo3*|?ABa_>O2^sFZ} zt@CQSUn5(zrf4lT?lB#i?|qG(jc231Uc<1v>KvM1Z!>mVeH}Haeh&$a>!Pn7199jz zOB^@f9qXn75>dRY<^yibO=cf+x)A4J<*xw#y ze|wbaR38}?IdF9E@PSd${YOR&4Qm+PKz?RmSozZnW#k_}in5 zTm%TeDV!z9AEnoPPmBmpninA9*Axlhk)=tOJ-uL`2xcI70U)yd=s5<06r_VtCW}-W zu}o8s_N7nq5~s#%41_T7A|qlQpm}i;>LkLO#zPFsXuZO|_lfmjFWkVp1pgL-6f zP2*9f73M&Y(+Gj^5+?{}&>rcV+{WKi`)^N3#X=ew5D>4xh`|4($U+kcRRGLa<#fm+ z&tD-1U``8jS(wwxKsuz!b-}irpKN|_4~|0_s2_j;`5?%1WjU7n0A&D32O!Ax$ZbNJ zETF+TKt7mf1JEAigCLjxw)YeMD~}=-2y5_c{E@hCfx!LRn-lq%Zbt*_tYP_BO247SPE_U!Dg=lKb?$5fS+kN9&7tyMe9+pDe_bZ7UJ&tnsQ zF=E=z%VnO{J&di?dw$q!oKu)2~-p$B#|%{bB2vX4iA^gdUd}osyb( z?)W^GdXj(&KlDL2>=xm%`DJ)l@ijcZ%N11qQpu$0yu+h6A4R)@n&QegC!*w$W+>%g zAydzKJd-o-6cgp0!lXwBvv1BQxMvIQvpuKoU~J<*vPlbDvWN1vb5%1_*rtnjGX8$o z_?g{DvL1y`na!P&nS~=G81>*MoY$L;yrXG%j%_fCRgB)xU0!sSjaa{mD|TAQK3Lp| zTQOu3SIsm4AJ+3^N3GQ5r!HN>Et{-VO|f15|yn8FCJvp%=1W~tvVwoofB=qvpVKdPe( zPKYKCKML~#4;8p2J1N$?I8cs#8wwfm_mykwnA3U9JBcn9tE6-*M!+ZXseHdrh2iHG ziEj4imHw6D3NupXQRfspwjavk(-@bjL zPCNb!UH^<7UFH4`Av$uMxJi{QEa@{^TJ}T|;@i@~&eiv*PUkw)Ij2>U|IQ`Ug#oXm zROj)OHWJ|7CM=_1s=H#e; zb?znAPw%78p6n08pvOa$#pdzilVd^R%%D(ZS@v2 z?i;rYPYVJSJ9`<>gR`QAV@)P2H>WG8tXGtBtIY%IO71wR>gV@LJ@QRA^UoiIHT$~= zGnR--N6(3L`}_IgjVm35vM(2vOF!r-u55cE)y{rFIZfT5_;`AwIOj@N8ZF)_R`O^= zUH!CFylz&NvN`5R&?_EoNA-h`A%jSk-u&bKIXZB>^+Ij+4I&Fo=btEr7 z9?ZsWrYdxMizzAyJ&OFSU*PeGk_O^qyGn57Sw+}y(pB8`f+3#WIUDl_@8eFifO7X;!q2vD z#a?wfptIrd^y(nXv5nAKbrGIYeIU9w$pg~<%0a&Czb18!Ng&Vr4<`?Q@&K$lxCnb4KcozR2Oi_nMAkI&2t!*MzcP@uNFRTeiEzb| z6a2x0AqT)98);)O_CJP7WCxBx41jTm0EQpRfpM4fKv_5r$lFjB3<~6dARhzc0%;IK zUg!q|s1Ja2xnHmk?SXNE@MGIhKkP%0%Rw4!L-?^Dkp7nr@*jK!Qqmv0(5Og28sj^2 zMqABTbj(wS+JZjzJ8crkrBuqyEclA92bk5>^#KhSRGgGEvd?N zTlSRmc{_}sadH8FDDesoYu<}{c%(XeYCs|vY}A7b-+7#CHDwxKyZ#iu=&BVPGUYYL zOo`$0jui2WJ47?ORbo)NK`~>oA(U-hcAiUIu$xOFTEXO*Nqwt~NL~M1^o?p=V z6uWrWK4!s~O>9=V5wpu>p0f`<570Un|JGJVfDm)mrlOv!LkA9@5*lLlJV6zBJ~r6os0$*IdGD%MS3kv+kZRxR2k)-hPB?i-XRj(A^6n|9qU z^||o>+B+AxnzFu+OU0-ZDHVxGZlMgN&e?mdeK|+DBoYz2sU#_ePwvL0hFmi)<(4}S#x3bx|1)PBGw+z0=Xu_b=N)E!KHt{f>%P|B{r6hG&)WM$ zP#q&5s5aOAfvR6(D10^2NopuFR*gD(nwm)sQ_ep#MkuYmC*13=q%vAA7RrsnXalG5 zLjU5g=&aE#CBpYx>76%E2+y9ZrPjSDp&W|((;g9Rlt`~k9ldwCFmAh2oy1L1_uM~- zT2wGy$l7z4%Dr4mx2Bp3rB5~qDQ8bA+s#`;RdeP-A?B3ZswYdGH*sq7uC>&{X)BfO z7VZ)zZ5${$8t_bcbx^tBWVnQCVp&BMWFqD1?@TD;%w586yMxl(P4}v)P5lL<4Jt|T zpY#o8QU;^*CRRAQ z*F97@vlVXAbrf=qZHAVQxs84-G{vSmk8z25m!^?lz#sAI|CVI*SNU~CFgXCo7XX+a z;6Hs`5KI=(S__yj0HhEAOc?;O2+&STd*TCN-hhr;z|;W_TEOf9Afo_a0s)Xz05FFD z$SVMtMgXK10L&!dtOZOa&{GSTPvGB`j^asPSjoZa3|4xu%7WFK5y6(7*<1ijD2GD2dC-e>a1O0%uzn6}*=`jgo2|Phga3}O8z`g59@FEN(3?d9B_!9n< zbVSmPBSaGvgjm97|Er3k>4(7s_q!5OJjgB@iG|2LAjg1=1CowLTGH$;^0C%PJVXN0 z>|7$dk~HaIrx3_Ou=9!RL;`6?vm*+o%tTrezk8*TcZhTZa*igyR!Y*ugPcRA%$m9& zjdoX(raq9VK=J{ZibzG8G+NmRUQ+}KOCv9lFijk^1JXhna7@BAJF7^!;A(#DQUfc_gHVW3g<= z1Lpw5frI>T4rPE3^@(|*EJFgM5sx7r9E3xd*e*DRGN4>IhBBZ$@S)9O969{_ooE0) zA}NX2{!GEO%92P4LMo(H2kk;}$DD&bt6)cBI+P&7Ah4Vyf766bDk zo-ZA@RpvEq0=Ir%DQ7cRDGL%(WednIp}KAv{7zhh{pSVYV}mQ18M7q#mem$!K;!)^ z-ZY$j9QvG{{&o&t;CTz1EU01~<P zAIR>18p~GP64;($db5__hOU@RXzO z>@4ia-CR71U7D@rmuL3n_pjZ=*_?@Cv({f>Jxm+0Ze>@Qjh7g9S5#-VizSQgp1$T5 zyJhp8laBM-_YB|{Z{N)-mOteq5@yO;^`F6~z6xW5$_Dd=$KtuL1C6+%7@Du^{F1wF zznkmP_c*ukySw7W|5Uq<{_lQWF|o+?+&tB?%z=CB8cBuDO{P-v#gkO+&$M>yM42g9 z4^0xDhWXO%9O?@TnuQ3KFA9}gJGYcHy(3i?uR(OvtmlI9L{w}RI!MS~7B0Q>I+cFt zlCHLhHAK<@-zw+%ey?u*Vi)E7xU;IslyyaiPQ^;DOJ1syqZHB=X-Cwj8#~eqTg?^* zKmD5SVBVfu#CjEXx7bA)WW7;$xObcO=C7!850xO-!7f6Co3%RW@J*`qtpa-PIS=Ka z+FR;vTOLT6z&GmB{=)>HsswdWo5s}IJReE)xSc}%$mJ5(zWY>OFJhIZISZx91sQ_X z$R$$UF{_mA94=BxgVTkEb8o8KTUn_%<5Ifb@i^&~m>*R~rr4_;uoZO>uMn=E+OMA3 zd?&55Z>Z|Q5LWHHznLoYNfrII<1p1f9`}%*f0Clc5pSuZ@BT@vSc(o_6et{Ax>q=! z|CZX3FwtPyORY*Bm1R?zK-HCELWq)C&WY*qK@=PYp5oOIyP6T-ewPF0gwsdnFf zM%A$F2IXCDT6FSpKWXWVOv!|rN>wf1p{!iPOWN1HS@d}1MnUhvGs!CF*~+|=>C)c% z^_4vupHd}6-jd3{_ExPZ&y(s64-@R1S4yUZ^p}P{DW}RK`n|tB|574T;-@y@@9PuC z3uLCI;>pKlCTJ);hQ&uYJeFZv0FUv81QSAILK6b4_bdojgpbNhB(4Ji)?Bd8g0&T_ zr(g{Q>n0ZhtdBGj6Y+l`FKO2vy-2tRp%0-Sp+8{&!JFVi7$VN6$?;Iaza=%1eDPvA zpSv&K1|OB15@~LY9QoU>rj7bVp)3O*^wxVd-64Jt?U+9RmGo6m3rFhE@gjz~1)*vHbp4w087i$h zfL9fmT9k$5t5@U5 zZHd?_{Gn3+xE`Grx?O zNYcUehHDJh6|N;*Ke%pio#6Vw^?>UD;|^mDV+>W`AI7Wfz0&N3wBzmikw3ljT}Vk(8y3ANrAj2?vNsrYmk;;rxO5@ z5~SD2QBWuOd_1HTC)Z*e)GzLi(nwuI!h$x6yRI}}*MfGDwn92hc_4FrVUz2R%TZtu zz=06}M8jTJd;Z~r0*nO!4+3~tfP?f92Ir6#9vW~CK9m7zMMDGUVtJ4c&LIwfV*ngH zNFW~oh6?hCbwE90I&%1nI7km^Aq^ahY2g_924w(nF8bu~^LL^__=qee-UFY0AjF1! zOqfcPr)Bx@2GTTnGC!Ux+1rO}I5msU9{o=Kjo$$-bx1Z>@8Bpl(_tl>T9(1l_U8Pw zbrQMz)VH!eQI_o2zEUo%yfxFFYRbNv+LrZrQ_ke84zfw^-{HLFK3vndMy$;tGj?uu z7C(NYuIz{8MzWm=RWjpzUp~fAhcTG8f^D?>9Osk2fteqC39XJk!kBe=#|DpxVrQR< zWb0>|Ff-3(F+ryG_)+OW)^6NJW{>O?o_1^pTGo3y7G76yjb<@ScdJn5#y4?z*>aYh z!IbefQG2*tMV4%3`h5P`t!kOuR12A-VmH6EaT-6PmmS|)Kb^}lxQ&PGiNzc3YZlYWn_{(CMRg;79 zUEdvy*}Kz>(RLTSNw|y(eX8(*L=U!cxe;EJe+Mrl-?Uyh;VjPRB1gqtMziMrUhMf{ z<+$)t9Ny(SgxS0z4qr>q#RFDNLnpfFGtW+2vqsCB^2U{^Y{K;$T<^y_IGrC8_)yyj z*|cM$_?wo|?Awd^eC{J>zSQJ3ze8Qb=iBe(XWZgAry$G?9h}U2^~_~c+VtZaRoZb& zy`q_T&&%RX@TmlnGW4c3o(BBdDkZ&6$w+h34Fs1Vwoe>)9bd;W+vrCm&Z-!+228Gmm&wX`^qO)r2!j;JWWGb~P ze7<@@Qkd%4@T1a-Zv&`(wVhPe=Spd%MXp2=k*`X+&{|>>RU{Pd>nUB{dAAUg(vY^I zU8PGF8LMtJeI*FTt*N{l_o#v7)jGSZXm!&1MM9U9W`g&DXVkEi7u1|od)3#~2~_4h zbJgB$ro#0_Wh#Hq7eb$7snTy;rRwJ8>4ICc8uhG+70Lx$i)q9BL&A)e_f>WGrU~kJ ziFDSReXbL;a+IY__DBvbTPWF7U{F+Aubpb%?RC;tltNXu;H;2eryDiP&03f{q5(D8 zem@;~*@K#QXp%JBSuQ*&TqwPC-$G&>QzGbQ&yjxLZ?W?6i(>I+`D^`p;eX$zgFs~= z4&MJ$feKavf*_K-< zF8M8;TvP|^IWNFE0h3S<7lh1Dd*e`(PVyX%l*wuwoNtc;g95P0-AQQsp#W6y;0pfgdI#Dr*@fK4o1lx^XX2C= z!*N{H9kjIlx8zFHK}$9)#5#90$MoB>JX90tfiGpGV(Ydo=tD{0K^ly51L$KGr_kdy z2hg*nW3b}&5gfs}ptGaO(AiK2Y;mJIHr(QcQl^r2UB$>{Xaef7uL_5d6MCrFmrz_B;{s1_UJ$jnD(a6bL&+L;_I<#3KmT>_sABAW&)cDv@*`25H1D zcvX+=W6}VxxRE#zg*1Dqw6Eo9Ue6<6nS#l+MuZ~56HKn*bvP&=ehW|(r(kjo!j#BQ zb^r8*C`}tlJ49g(_F`$i0wwPAB5_bZlm~mLG=iG8J&^88n_T~~KqU%GAl`_G0uP8e zqR0f|5rl(q1YsaFLD;VaCJ=-G5R4!W%7lE7R?GuFI4BF^zOs4`=Xmj6As)cfMlG z7KgI>4HTT4$u{o3Y6usS)tWtX!Bb`!RwHkG^#?xPZ30^nlgFDM9L!g2-irqw%Vw^G zU&TL0&Bk^Y>hQfhv$^?;>amL0Dcr^E1Z=Cj317^6!Cbqz3FqCZViyFqXDih`IG6rm zT&CGtuIb%}>M%2 z+1jLhwpFKBT!M`U>#NLX_fODerukX16LsBjvzDFN`L^enlc%2J&3c`D)us`Mp+V8Wa^36YC$Uh>G+NoDiZ24Es5Q&ihmi#>GYY2agSmiFw}~9Y?WYh7cJV z5*QozL1lHFx{2`y5FZ<;2n&h*Ik(gMel&;*8WR#68$KcAed~YOag(6%ae<0Nx3N)S z;W4q{!D6|_ijXk>u~9KGzv_FVnBc&%A^y=(G5+x(@V(LxYn}YQ-uk4sG2v0;{Mv{G N>W0@1*9q5~{6Chflf(c3 diff --git a/notebooks/code_samples/agents/agentic_ai_template.yaml b/notebooks/use_cases/agents/agentic_ai_template.yaml similarity index 100% rename from notebooks/code_samples/agents/agentic_ai_template.yaml rename to notebooks/use_cases/agents/agentic_ai_template.yaml diff --git a/notebooks/code_samples/agents/banking_test_dataset.py b/notebooks/use_cases/agents/banking_test_dataset.py similarity index 100% rename from notebooks/code_samples/agents/banking_test_dataset.py rename to notebooks/use_cases/agents/banking_test_dataset.py diff --git a/notebooks/code_samples/agents/banking_tools.py b/notebooks/use_cases/agents/banking_tools.py similarity index 99% rename from notebooks/code_samples/agents/banking_tools.py rename to notebooks/use_cases/agents/banking_tools.py index eb9bb0076..a3f08c6eb 100644 --- a/notebooks/code_samples/agents/banking_tools.py +++ b/notebooks/use_cases/agents/banking_tools.py @@ -279,8 +279,8 @@ def _handle_recommend_product(customer): def _handle_get_info(customer, customer_id): """Handle get info action.""" - credit_tier = ('Excellent' if customer['credit_score'] >= 750 else - 'Good' if customer['credit_score'] >= 700 else + credit_tier = ('Excellent' if customer['credit_score'] >= 750 else + 'Good' if customer['credit_score'] >= 700 else 'Fair' if customer['credit_score'] >= 650 else 'Poor') return f"""CUSTOMER ACCOUNT INFORMATION diff --git a/notebooks/code_samples/agents/document_agentic_ai.ipynb b/notebooks/use_cases/agents/document_agentic_ai.ipynb similarity index 100% rename from notebooks/code_samples/agents/document_agentic_ai.ipynb rename to notebooks/use_cases/agents/document_agentic_ai.ipynb diff --git a/notebooks/code_samples/agents/utils.py b/notebooks/use_cases/agents/utils.py similarity index 100% rename from notebooks/code_samples/agents/utils.py rename to notebooks/use_cases/agents/utils.py diff --git a/notebooks/code_samples/capital_markets/capital_markets_template.yaml b/notebooks/use_cases/capital_markets/capital_markets_template.yaml similarity index 100% rename from notebooks/code_samples/capital_markets/capital_markets_template.yaml rename to notebooks/use_cases/capital_markets/capital_markets_template.yaml diff --git a/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models.ipynb similarity index 100% rename from notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb rename to notebooks/use_cases/capital_markets/quickstart_option_pricing_models.ipynb diff --git a/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb similarity index 100% rename from notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb rename to notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb diff --git a/notebooks/code_samples/code_explainer/customer_churn_full_suite.py b/notebooks/use_cases/code_explainer/customer_churn_full_suite.py similarity index 98% rename from notebooks/code_samples/code_explainer/customer_churn_full_suite.py rename to notebooks/use_cases/code_explainer/customer_churn_full_suite.py index 6bd9c2a8a..3f7c82809 100644 --- a/notebooks/code_samples/code_explainer/customer_churn_full_suite.py +++ b/notebooks/use_cases/code_explainer/customer_churn_full_suite.py @@ -7,7 +7,7 @@ Welcome! Let's get you started with the basic process of documenting models with ValidMind. -You will learn how to initialize the ValidMind Library, load a sample dataset to train a simple classification model, +You will learn how to initialize the ValidMind Library, load a sample dataset to train a simple classification model, and then run a ValidMind test suite to quickly generate documentation about the data and model. This script uses the Bank Customer Churn Prediction sample dataset from Kaggle to train the classification model. @@ -162,8 +162,8 @@ def load_model(self, version: str) -> Any: ) vm_test_ds = vm.init_dataset( - dataset=test_df, - input_id="test_dataset", + dataset=test_df, + input_id="test_dataset", target_column=customer_churn.target_column ) diff --git a/notebooks/code_samples/code_explainer/model_source_code_documentation_template.yaml b/notebooks/use_cases/code_explainer/model_source_code_documentation_template.yaml similarity index 100% rename from notebooks/code_samples/code_explainer/model_source_code_documentation_template.yaml rename to notebooks/use_cases/code_explainer/model_source_code_documentation_template.yaml diff --git a/notebooks/code_samples/code_explainer/quickstart_code_explainer_demo.ipynb b/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb similarity index 100% rename from notebooks/code_samples/code_explainer/quickstart_code_explainer_demo.ipynb rename to notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb diff --git a/notebooks/code_samples/credit_risk/CreditRiskData.xlsx b/notebooks/use_cases/credit_risk/CreditRiskData.xlsx similarity index 100% rename from notebooks/code_samples/credit_risk/CreditRiskData.xlsx rename to notebooks/use_cases/credit_risk/CreditRiskData.xlsx diff --git a/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_executive.ipynb similarity index 100% rename from notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb rename to notebooks/use_cases/credit_risk/application_scorecard_executive.ipynb diff --git a/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_full_suite.ipynb similarity index 100% rename from notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb rename to notebooks/use_cases/credit_risk/application_scorecard_full_suite.ipynb diff --git a/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_with_bias.ipynb similarity index 100% rename from notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb rename to notebooks/use_cases/credit_risk/application_scorecard_with_bias.ipynb diff --git a/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_with_ml.ipynb similarity index 100% rename from notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb rename to notebooks/use_cases/credit_risk/application_scorecard_with_ml.ipynb diff --git a/notebooks/code_samples/credit_risk/custom_tests/ScoreBandDiscriminationMetrics.py b/notebooks/use_cases/credit_risk/custom_tests/ScoreBandDiscriminationMetrics.py similarity index 100% rename from notebooks/code_samples/credit_risk/custom_tests/ScoreBandDiscriminationMetrics.py rename to notebooks/use_cases/credit_risk/custom_tests/ScoreBandDiscriminationMetrics.py diff --git a/notebooks/code_samples/credit_risk/document_excel_application_scorecard.ipynb b/notebooks/use_cases/credit_risk/document_excel_application_scorecard.ipynb similarity index 100% rename from notebooks/code_samples/credit_risk/document_excel_application_scorecard.ipynb rename to notebooks/use_cases/credit_risk/document_excel_application_scorecard.ipynb diff --git a/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb b/notebooks/use_cases/custom_tests/implement_custom_tests.ipynb similarity index 100% rename from notebooks/code_samples/custom_tests/implement_custom_tests.ipynb rename to notebooks/use_cases/custom_tests/implement_custom_tests.ipynb diff --git a/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb b/notebooks/use_cases/custom_tests/integrate_external_test_providers.ipynb similarity index 100% rename from notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb rename to notebooks/use_cases/custom_tests/integrate_external_test_providers.ipynb diff --git a/notebooks/code_samples/model_validation/validate_application_scorecard.ipynb b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb similarity index 100% rename from notebooks/code_samples/model_validation/validate_application_scorecard.ipynb rename to notebooks/use_cases/model_validation/validate_application_scorecard.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/datasets/bbc_text_cls.csv b/notebooks/use_cases/nlp_and_llm/datasets/bbc_text_cls.csv similarity index 100% rename from notebooks/code_samples/nlp_and_llm/datasets/bbc_text_cls.csv rename to notebooks/use_cases/nlp_and_llm/datasets/bbc_text_cls.csv diff --git a/notebooks/code_samples/nlp_and_llm/datasets/bbc_text_cls_reference.csv b/notebooks/use_cases/nlp_and_llm/datasets/bbc_text_cls_reference.csv similarity index 100% rename from notebooks/code_samples/nlp_and_llm/datasets/bbc_text_cls_reference.csv rename to notebooks/use_cases/nlp_and_llm/datasets/bbc_text_cls_reference.csv diff --git a/notebooks/code_samples/nlp_and_llm/datasets/cnn_dailymail_100_with_predictions.csv b/notebooks/use_cases/nlp_and_llm/datasets/cnn_dailymail_100_with_predictions.csv similarity index 100% rename from notebooks/code_samples/nlp_and_llm/datasets/cnn_dailymail_100_with_predictions.csv rename to notebooks/use_cases/nlp_and_llm/datasets/cnn_dailymail_100_with_predictions.csv diff --git a/notebooks/code_samples/nlp_and_llm/datasets/cnn_dailymail_500_with_predictions.csv b/notebooks/use_cases/nlp_and_llm/datasets/cnn_dailymail_500_with_predictions.csv similarity index 100% rename from notebooks/code_samples/nlp_and_llm/datasets/cnn_dailymail_500_with_predictions.csv rename to notebooks/use_cases/nlp_and_llm/datasets/cnn_dailymail_500_with_predictions.csv diff --git a/notebooks/code_samples/nlp_and_llm/datasets/sentiments.csv b/notebooks/use_cases/nlp_and_llm/datasets/sentiments.csv similarity index 100% rename from notebooks/code_samples/nlp_and_llm/datasets/sentiments.csv rename to notebooks/use_cases/nlp_and_llm/datasets/sentiments.csv diff --git a/notebooks/code_samples/nlp_and_llm/datasets/sentiments_with_predictions.csv b/notebooks/use_cases/nlp_and_llm/datasets/sentiments_with_predictions.csv similarity index 100% rename from notebooks/code_samples/nlp_and_llm/datasets/sentiments_with_predictions.csv rename to notebooks/use_cases/nlp_and_llm/datasets/sentiments_with_predictions.csv diff --git a/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb b/notebooks/use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb b/notebooks/use_cases/nlp_and_llm/foundation_models_summarization_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/foundation_models_summarization_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/gen_ai_rag_template.yaml b/notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml similarity index 100% rename from notebooks/code_samples/nlp_and_llm/gen_ai_rag_template.yaml rename to notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml diff --git a/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb b/notebooks/use_cases/nlp_and_llm/hugging_face_integration_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/hugging_face_integration_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb b/notebooks/use_cases/nlp_and_llm/hugging_face_summarization_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/hugging_face_summarization_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb b/notebooks/use_cases/nlp_and_llm/llm_summarization_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/llm_summarization_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb b/notebooks/use_cases/nlp_and_llm/prompt_validation_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/prompt_validation_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/rag_benchmark_demo.ipynb b/notebooks/use_cases/nlp_and_llm/rag_benchmark_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/rag_benchmark_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/rag_benchmark_demo.ipynb diff --git a/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb b/notebooks/use_cases/nlp_and_llm/rag_documentation_demo.ipynb similarity index 100% rename from notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb rename to notebooks/use_cases/nlp_and_llm/rag_documentation_demo.ipynb diff --git a/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb b/notebooks/use_cases/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb similarity index 100% rename from notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb rename to notebooks/use_cases/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb diff --git a/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb b/notebooks/use_cases/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb similarity index 100% rename from notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb rename to notebooks/use_cases/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb diff --git a/notebooks/code_samples/ongoing_monitoring/xgboost_model.model b/notebooks/use_cases/ongoing_monitoring/xgboost_model.model similarity index 100% rename from notebooks/code_samples/ongoing_monitoring/xgboost_model.model rename to notebooks/use_cases/ongoing_monitoring/xgboost_model.model diff --git a/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb b/notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb similarity index 100% rename from notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb rename to notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb diff --git a/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb b/notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb similarity index 100% rename from notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb rename to notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb diff --git a/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb b/notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb similarity index 100% rename from notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb rename to notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb From 24e92d96c03c3f8e92bb68f241429563ba75c283 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:19:40 -0800 Subject: [PATCH 02/14] Fixing changed link references pt1 --- .cursorrules | 2 +- .gitignore | 2 +- .../model_validation/xgb_model_champion.pkl | Bin 0 -> 117919 bytes scripts/run_e2e_notebooks.py | 28 +++++++++--------- 4 files changed, 16 insertions(+), 16 deletions(-) create mode 100644 notebooks/use_cases/model_validation/xgb_model_champion.pkl diff --git a/.cursorrules b/.cursorrules index 888a87ee9..6d310e7f7 100644 --- a/.cursorrules +++ b/.cursorrules @@ -166,7 +166,7 @@ You are an expert in Python package development, data science, and machine learn ## Documentation and Examples ### Notebook Organization and Structure -- Organize notebooks by purpose: `/quickstart/` for getting started, `/tutorials/` for step-by-step guides, `/how_to/` for specific features, `/code_samples/` for end-to-end examples. +- Organize notebooks by purpose: `/quickstart/` for quick guides to get started with ValidMind, `/tutorials/` for high-level courses covering usage for specific roles or concepts (e.g., model development), `/how_to/` for learning specific ValidMind features (e.g., how to run tests), `/use_cases/` for end-to-end use cases such as documenting or validating specific kinds of models, `/code_sharing/` for sharing notebooks or documenting code internally. - Include clear README files explaining directory purposes and notebook contents. - Use consistent template-based structure across notebooks for uniformity. - Organize content by domain (credit_risk, nlp_and_llm, time_series, etc.) when appropriate. diff --git a/.gitignore b/.gitignore index 42e56c29e..af32788ff 100644 --- a/.gitignore +++ b/.gitignore @@ -194,7 +194,7 @@ lending_club_loan_data_*.csv # Pickle files *.pkl # Sample application scorecard model for validation notebook — do not remove! -!notebooks/code_samples/model_validation/xgb_model_champion.pkl +!notebooks/use_cases/model_validation/xgb_model_champion.pkl # Sample logistic regression model for validation series — do not remove! !notebooks/tutorials/model_validation/lr_model_champion.pkl # Sample XGBoost model for validation quickstart — do not remove! diff --git a/notebooks/use_cases/model_validation/xgb_model_champion.pkl b/notebooks/use_cases/model_validation/xgb_model_champion.pkl new file mode 100644 index 0000000000000000000000000000000000000000..bfe7349b673d72f09dc01d5d1640bd3d62a5aa07 GIT binary patch literal 117919 zcmeFa2VB)k`u|U{fS^jcN*qAPISv~h=@@5rHk%X|=_=xgs zbGIffyqbG?d3!haXxYr&-P_GeE^eF<7CAUFF<8@j`2qH$9D->}k1#5<&<$Q4xW9}$w66dw^B6A~Sf(5s%DZd_hjwUC&Yq>!lK$e6I; zVX=SfmDSLQcv@SS=2U_c5@|IFzbjBJB07$CEM`FBz~7~tM8?q4QBlzmF^Rv+FrWhy z{ks(Nfw9pM!Lh^Wz$OfgjQd@lQCw1dTx>$b?-Gq;<04{$L&ARF)vAfHw88%)**KDR zfL@BO|^B>P6~DR*0+^St+t|q(P)%WR*yx zNaM(=k<}tiB26RBBF!T$A}u4UN7jh6inK0oRMY3knEtVUtInEM_fP#NS5{He`|m0$ z9{}Uf5SW+|kpl)MCiJQwUjAp5A`SjhL2a|jpB=Z?UftI|MtWvy!xT>&uk`$QTpfLX zo4U!3TWZsGyQOg-KNBW*4oh0xV@sz!RYu;hRwsG&R{N{EsD1jLQde5sP+jXxMJaIF z0BKgq8Fug6bcrcx%62;v$W9dPl)c*>V7ZohO2rCGWk5v><+=?=)#;=AsH4lUG~4m4 zx{dDvHCH`FZM)~2+M~o?+CJ;By570D(m8_->WUXzk-gK$NNY<+lA|vorK(kTDh=x$ zlddc@Bu`x%OD^Z$u&a(VWqK66V2eJCRC-?Ts+@K88MC*nGrM`Bcm%StGuY>GwbQeh6zORkFBE zmK$#-GhM@r77QX%wM8G9K9Ra?yBnvO{QM76o%_$pcHK&3+$k5ujn`{Q@r*P@VV$?W zPv3814V;@0=MGPqY5mVI!|YqJ)wrF^wIjJ~7UrEe%JDPQnYur|4by0iC$!QWcdJ9g+2y{4UCKmkB^A?ehi_rx)gm% zMM`CgA;pMNm106MqgYU?Q>-X8DYYrKl)4mqNS(v;#(X+~*IX-V;> z_)-{(lEQtXfyS63Qd&{kP})%fC>zhM z;y9c&dIN48H=JVz=%6=?Qu#$MQuzpTOWbf$CVy9q<8#dV@(V8_G&)>I+4jB;G6>f} zcRFt5zRl3#7Dd^=X0kyP% zIRk98fN2BjXsJtMz)p(;je&Yv>eCo#pv95KfRh$y8UqcrxX>7I)zXN@Kw~XUXbd#f z;znb@U5f{efo57fX$&;i(t^f7OD$eB2E4WS&=>$S3@EgKNd{Ozo)FdI;c@JTw-1;D zaNXfr!}W!03fB>?9b7NCMsQu=TEO_jn8P^3*xFHG9AWHWykLxATwpBV{NbG8JmK8n zeBd0Q-_T#^BckwN!f7l}{3xv{Z7IxV zM2VurP~s@@ltju<%5cg^N;0(jGX@w~2&M^fQFI9%78?^D3Cr@|-$K}R`n{wMoS^J6 zp*@?uWuuJJY4S*EZ65~!w!cR%L;BjiC+?#LvzEc4s+Vyb%dV^oX%I8pzI4&#(|x7h z&{M2_YP2$0oT?~kGJttFs6>-*AJmNLyzUD;utOTO!BqODfEenPk7s%YCO|o8Lq^jE zlr?j_u7S1zbwWPGa{HQNVISJ0bpV96s~i%S^|L>)SN&~f(Di3z*zcdlQ|P<_10frK zm`8E|BLaBO z0C`aEd!P*{3o!t7%l&{@ZeMQe`#R*~XtAF!$OfazZ_@%~pZ|ReCZhx@s|<{wN?!4d$>QoWE5y3_eqv7P z7tvyZuQ1NJop6vDB{nfyCDg85D)jN5k5g;x5a-mXBvjp6S+p8+8tYZ|7SXkL!pZ)X zg*WNz@qT4}!EyCryr*5BkiX+TE-?2OdL|nPPcH`WR;B9&vusB2&_lS<)gk-`9Dwz+ z4~eP!Z3SN43S0Pm#3zqT7v9|-i?2jwaSsMQ=MNS(7o1-#6>Ahs!AD4tu&~7@q59EK zVfDPvIAe7>|Fv@lzPaiH?%LfOd+X8~b6W`AdJ6blQ!C!K3BpsN#^L$9I||0N%7hH( zFG8n&i}CJ5<{0H27u@;;pi?6-A9F07zu6_9e-v98T@?m!(Mff9+O{xY$$YWZk}lX~ zZDYaOQo-Npy_?tbh`^kfhC{pL3$32s5N19x$DX!NaC)B^T$?z}>=d+Ww)q`jfqQW$`*;ph-5KF{L%0W1Nh?%?cE{MjhvCI+)`D z#;F`VmVa`17=J!18LvHXi7#?<=bw*8{4&3JJRJjmzN;ty^_v$q-O^X^XdJ~K(2c@N zV^ru#QXP(_^J{xO!hUNk%4e)b_WHd@X9W5iRrDefBf`rSI9)gS5KXpL94^n+bN7(* zHS(QCr6|wUclT&6XPbiJ0J3!d;Gq#=RE91urR$!O@;?h@W26>67f7F`e`8(y=BO3@ zhDbYiKV*_>4q-1hXsY&4yQ{7ias(T4&bEYISA0-fwcL}e`H)IJoC#$6OtNBvo7`Z>H}+w#+`OuEX(FoaCNyT( zsOOQT&m38+;M=UP<9zmZ->vNDd$~$xcTFiRJXV^#BV8JPXNDviY><{seJh17F_*fW zLXwA5wuJUiA}+HkOO>L}N`AZBOMx>o$-~SW64$aC$@Y0G`DSNH)puu;GmE;aaviLt z&biY_@r}x4u-ybj^@3yMm`flT>f4%h;HLXdSbd&sPc&gZuUn}&XY_`7>y$&@pK~Hv zOX`yO7T1f1M4cdy5+H35@xyX=hoXM~z z5k~CboFpkd{3uh$r=j9YEW=i)ev4!~FJpEe$WqjE3uZo?eXJ_ndzjgjf1g=BEQM`S zxheDT`c>9*;6v8-xWJBTe@vc4e=Wgm%zyZfD32OQX1emuk0mqcu6)~vTRc2Az@q>> z62O`j)~T>Ih4m<`L1EnqYfV^R!kQA+k@l2&lm-+hN<)e(1=fGC=7V(}tnFYu2WvQ3 zx4}9M)@DiyN5K@4(u&fC(vA{9=|Jg3=|bsB=}zfE=|$;738sWl!YC1x0hCC}U`jM4 zmNJBrKuMwuql}=8qNMzc>z(h2uIQuN|qzkSLN zo^8julzjaC-sa}`&lqwLHJ+}M>@xGm`=?9^#G$ktki z$+Oo{#&p+Fu4|a8oKU3%ndOx!-}ScREZ3HxITAwAP6#b--ogqJUkY~pqNoZrn(0TOT=zs|UKt2KVwaBX;Ff9PcDgc-n09F!! zp%!_?1m*`Y)>4(m0LU-^m?Z!t830TaV6Fws6#!BV0HzB7*#-bJ27rVEfJp;D&H=!@ z0U+%FVCn#nc>pkbfSne58Uqem>d_deucZNv0Y@!PGzOftG^8=$qQ#ZQKqD=UX$&;c z(v-%4n-+H(10GtM(HQX5(wxRX3oR{a40vhrrZM27#h1naNK*iqY5-)ae?&?fL(9QD zfa?#}8?G^2TezNZ{otCxwSsE{*8{Erj5~}qjISL9#uCO4#tg;@#sGF60s&%3lLO?*KwA(X27?7RC@@}d=r$ITCgJ78N{BOw6^bI-2%hJs zQ?c$0_xaO7{8?E?sNLv2u62atFK=+=-?bPkMhvB8E^oq>97FN3J{5!qc}MVtrG?z> zItBdYhu(rsMn}h3hV_Om{ z=ta8V%l;DtwnMbgVpId1{&@wyyLqHgq_*epJ!;GuHtNIeh?s|0%xr=ib}tiZ=GI1s zpECU2CwaVW*Ry>3nkBqPWpl1Wtq1(oE{;Om{!>MdE6auYncMjEMHhIJF8%qvv_D?= z^l{kfih_5=3}JE&L+qct6New!z^!b45(>*op>?? z>7@&NuRX!I!Yd`#xxAM59oL?};`xAQvmfA^Rfppp?dD)Fm(6%|Oh3FnrZzt2CSf0! zSfTRsiSp#A^6wJcTFNc{--~Tk9%o4xw!C6ioikR~VYf>QDvu#;bR*fY^p)x+a~nvv z9iB@ikFQIO*7PQ?9~&@BEjqE48s20V$8}J*$SzeIf7>9vTYNxT`t_!CVsHj|EFO~3 zhvzK2rIo~%`YC3Y&SDfU&g}TeWb&$03%21N59U*t9lQNxnY#61Z*`NfMQSXLQX5ut zSL+t7l}b&fN)LDGN}UfikRF7ukam9PrM6A+BkLTeNUEhtBDgzmnTrfgY3ig+8TVd#8v;#!pCeA-5;(kPECckU?} zO|h4(Ud&PKUe-pD<#3BEZy6&wyqzoQPT3(E+`{WEtKdDK8`)0208iERmV43j9sC?hDND8IxuItUPcEVc;+q4jawjE=bb>1^KI zpdTM(n$P#z>dtf55oaB$gU0Hnau2QZxxm?FyvhCsT&D-YT^KjmpwdPOG1) zTxynzx)yCjS1;)x)yP$x&JKSxIm?A>rW40yRvFA$Oz4a16mCSRk1ug^r~pYx4sPoejG?#>^$~qJhXOq6{?+FGPE%hN7I7 zb-8gl!_lPK2I&5d zLx6PH2Ehy3f_>Nq(M#U`UI>!~GWmEYC$|B@nGB>sfV$*1pez7sAjm--ve*Xu5FlT! zTa)lJ{!ca_e_63@aZIwGQPeuWmT_&xj&tqB^^@9)svZx;%^7P%>zWIMP(4#|z)*eR zkyuYm*q9?$)JLM{*a*>W!VBTetmXJ&d?%rieIMLC{jk_!&|+ND%S5<#eHOnz=%iTR z|BbNl04rQSVvkoo7>PIXtXOB1t)O=biza9L3DxiK7FHLC+^xfp@RjB>1cP@W*m%VY ze$S0W{!x8V@H+gEchKPtQmzk8I8pci8&yy|Cw`>Y$yqY1jWXy^>V<~ruT z3{(gfyG(>;K?!)%k|BJx`i^+&2$8Gmm5ok}yv|o2S%@7fY{Eu{HMo$}A>55+m-yKo zGle#;eTBo}k2t;eXZXONB>qvITX?PadtvtHFSyP{Z+_d_JwnIh`Q>x>uP3&VlXmx{ z`PQeHf)RVvdQtzJlaZstv#|}|9aql5r=@=#*uxq69AIwNaE0;>Or8H^rmQ=~l zue(%pQyH`Wv%R#BM36`CPLT_pIo96zn6yOQL~=MdT#DSVUBRtCEqQnI-1$}Q&fE-X z$-b_tN7Oz&nLD?#S;LD#Y)+t~RQ;U+TgmVd%Ukqf&)?fG#ZPD;wVilPazE9K#3^n` zIj#q#`qNKJerpDjuQw)5UPyVoz1c$|;WYxo<*NE?Rm*MS6<2%Z zGDXHa$cxo?nN(duj_x0%y60&`cCBKHPk5|REh;#|1P`7^#s|Cm$ZL<`d zg8?|t{M1379lyl3Ut-%YvF(@G_6IkRKTeVj$NwO<$uEn-3!UG;EJ{7kz8BjxLKX;E z;8{k?fuN-k-Ke0Z5y!MIi)utJS_aZI-eq)K6Kg!!)_u`#^fi)|pN$zEaK--#jZd$A3~DG;z^feQpH2#^lq7>HdU*vVxe4f15s4Yr{Sq<=4h z$@?@}Qx?k0Vi}~#0-1cgoF@xtPzJW49su$I*oI>uK)a9zu`J9%{8IaNFi{!~yHC3VQc;;mnK)!k3ikLWh)(LWRVZc+uWo_`>N+ z;#8yFV$Iz7xG?R#kiOd&f4p%{m@)4ar!oz|dvkjUHQO}eDqS+1ILnZA0&(mFbK4_B$)_)gK?>LswShL$am__RqSD zH5!y)hxVrU9d5?&j>UM^{fS&7DV4vKbC}P+x{A-ZW2;);$PjjZH|y)>vSe31P8Klbz?>LQ9oaYDVD6!(Y|WSL2aZLbyKDJ1ySVP z?Rrw$p@Sr+bEeeTY7dF`I4!k(zLc~wxIhwac{AaGpUIw+H<@v7?kG-EIS32%?Uz*!zw$$C&N-`p=n4>+tq$!Ui^7gW^ zq*Jw-&)g{nQdTXN| zTql`%xnPazvBd`F$kC3hzg>XT<=!o+!iCY&@SDpdkHooB{hg_jp5r>n{n2&O>+>jP z;(@`EkU^bD8Z9G>44owdQJ>v#J;e9T;UQ9o+on1Q7DOWF(b{NZ(yQtrz--Fs+n=7^O$AJhA9@>tWeD;j$^pCZ;Cc6 zcQ9eTrzML~nM}&W*5%Xom)Q1C+Mb{)|rM1_uFw^{f==~5hKyNOvG`M;*=ru zm)8Qs5@hI>iYC3WK&J~4nmoiCrHxKSb8Q--9aXMyRd&8W6Hmr*-N(#8^WA%KRaMu~ z*qx4?`MMzGn+qk%k_9EG$@NrJ!TvegdZ<8|qfABQb8S?S?S#e<#OYTvKwO_>G%ENd zimK>_Jc4vk{ViwEfXW{c+sqEx>1{?^nMvqNe+Se;w-O4pzl*$tZJbBId?bvrQGT## z%^8e-iyS7JBAbYXh&eou>+QUp^Y{dhz&~}6XU8wG?U&g0OKkfkw*A2k%pFEa2_0}%{f@ze@# z8c#R*byF(Rfe@w<)wEu48m}|D4fVse2G|B+4LsziD5fb-kAr?dnkJq03+mP!4|Rf& zrWN#L&%EEf+kWd4)Ti-kqh&PtR9yV^X#d-t_0PpN5XeAO``)un7R=yy5UfDB0-*}F z;f^hfSn@W+Ac{d55WZj=L^C)}4&NUGc~BMtl!tBk*zfn@7`ZRdhAf~#x?CTWhdekQ z(&T!ftSqd_b<(8r<3LROse^1l{<30QUi~NH6Yi3q_sS>2=Zik#dZS6=&c>I-qWU_* z))5bcYEpsNuVkL^cI^kz!wL2-Kx)%xt1(moxGbf6z9z4YR zw@wt;y-C9Cwrauk<{;r)*;!%yv0Gw=Y!<(%xk5lI#|Z8Cy<+^TkGM`*JRZL>Pl#?b zf}gW}6dxE~0l(dzjXvmK<;HiODUh9E!l_zA@btmdgDzEczVa957UPkZ zU-JRJZ}8|(5&WSQRrt3}Z{z6;rt|YF7U4d}p5iP09`VzjUJ@FwyDW5_?9J_%ohF(N zs#HFA|AJy$L`p5GdB@?5-I9vZ>RMmP)3&F*BNMWilAvAEzA64vcfHlpu*Ux6fOP}1 z?0r0gQ(iLerK{NLM%~n=Qzl7L=4WYqVSUMg875`zOD7(6)l&UyhQu%9y5znPkq&Il zBKwvhj5^Lx@?f)>h=iA9VBJNsBXGf1Z~Idf+`Ghjr2a-h%?s zdH30>m8*gjD@!k`UOVM0F6(qud}N+6n;P7e6rv$naQ+(Eu;3iCzpgJ+dU+A4RO@b0 z>!V*4&(<)ir=d%j?w1ZRdCL+Mob4p0{^Id$@$prRSv?i=*<)AvwEZQv{S&uddDK8` zt0?dM65D=>ZGW&ZlBdU?h;6;Te&!eQiQG11!VMkQkL&-Yj4N4qlJgF|jS~Hk@@e)o zl-@p$OYfYDj@_{2j9RJCf#`Q!;9G=xE!ROY2X#>YYXgzXGB@PCBo1x2P2(zG+lb~z zo1trW^Ej8uce&cZ#whuK7g|zr9*VO#g!X)xi!QaXQ?^@^idxSZggl4qprK!eDl1hj zL3N%UV$Vx%%DIm$k;{2IG;4TQWD{P1qVHco-RA0`E(Y(>)MpbpiwEB5+MZP8W1NXH zdyGS)FO{O*Zs|u2GTXc81S=g6EpH4UH!oh;&ef zigz0CG_7YFl+heV>xXSC`lx3fq=AU2DMz>ASdDO} z73F?C+W&UP{ByBQ_H6rJ7z2?C1S|-$hy?k~LEoS(oC9nFP(RcOG39^hK=XiOGXffYjR2>KrcVAd0PQ12DI9jEl;Mv&gC8n$c3-!^I|NCy+~mXFtQAxRV5zP96cJP8r29?ufJcPNBvZY9En{_lln zUMk`9>NNhHrL|aL=XzmqyE0+^^P&9mi|@GC`#K5hClAHC*I(m~5659EBOhLWaT;FQ zrW5{PpNTtdtIJ&-(SlE#YAN*iq$jLjJXQEQ=e`iR&JHhY^AyK4br4eT`w30HgyTam zN(EJNTf8)-wQxGyjo<#QnETvm7|zUoBHWwS4}TbLkFVBUBseJz1oOqW`Irxp97!@j z=dQKj)K&Ki%?t#=#e602z++J-ai9U!@y@-d%0TNj)3$l~*7f-J}YB7&${&`{ong zGG!MwI;FPhwRHr?YIRXwVrLxd9)#Deu_>Rse>t%&g*bcOk}OZZQOsL?NSbcj zgBjBTGi&dVw6W2xP`b*jB~y-Dps>CEk#R#J*yl8=5y z4>JDzH75JK3;W)9iPT7;uSiPi&o1lcmvH zn6`C>5~fZGxs&OmT3E)AH7`1_R?Wi{RlaqXj9bhnNeerXHkX>KZZ>z4>a42HI(!^Q zS{3wDESR;CBusj#n()#|b@Y;vZ|<75q_je9X4|$ds&_htOrBVklqR=i{D)1I^lZ+O zv`MeX$d4mc*~6czTIAGWOtRWbHC{AUz3rnSx}M_{s(bH=qgj#SblsVX)qB#IG4v~~ zV<#Uc%k$0`PszW_Fp*J=TeTXhJ1@5TF8olact&5VtXSy*ely&bQkP;+sYhu*aiTP&xKiMEyy3UI;rF_mQQ&vE;kUTq_qX9U zw_)9;q;M2W5h<-GZ7A*F>7B*_ln#_mlrEI6l$18;Y{#1F6{7_m;;U5Veyz}RNT;3@|&fsuIu11alDs0PgwQKc7UQ$1$ zj=@H>^HeJ8zUd>HbEG|*=syu%oY9u6YRD=_uP#B#OGsI-B2tdZor`+!EkSKl@{!^A z3(EKFwjrg%86*zkk?X^cXaK2!UQP@|2fVwX?#b!s{WVWzQ3a$tRS%)K)BTX?%Q$4b z;tEo3?8k-;>rJns4sy)why=UmsLGWnG-#y{^6q>axwKFzAM72gJYKOrnldC@SyJ~s z8gJ1JHSC;*{JTv-UC1Sb1A8eS9&v(3?Ts4GT!r|jJy98I z4Q>34L7o+VB&q%SlIZ`x&me#~1e~>iX#`xffSCjuX#tZ7z)$}JU_JqOlNkV03cx1; z05Gcnd>#M*6AQqn0st_#0DLw80MiS=Cj15CJt<&J`cnE)LMh>t{*-}~L6j&;3?+^d zPf4T;^KKrXBEugSP0t z#wU&*tMQxD_7l=HG8?>PO3P@D2e}N6(Y7f|aM0Gz{=oiY=?n~p5e1BdA?5o3=@7t} z$mr2+U5fl{YCyLuYxf~dp8^45C9$0UP|af}a@h*hhGLWU%nfb%2nU=_wvRyIuGgy%jHC zFiPmC9ECqtb4Ops2Z}9>MJ#<=EnIsV&0oEK56`~YQ@H4HLeL3y5Zu zwSqriGS>%>-~JQ_^u2(eBzDEm5*!2@vw>Xf$4_X-kt}XRVS7H;wmEmF&MjWiZwucb zwE+99OvfYI*$C}Qh6xqhSz%+_M%;z%ru@n&Cvo3x^Ki3-0&INqHGj2hBwjS|CN}7_ zLHX@%A^&;Q0ny>O3r@-N=UYzC!fT4sg_%9_M7__c%93N75Q)y^Q}PG!xjiTH=Zp-n zIzv~8diWWis&QAI6#ur58~n?@B$}dno%c)%=$WQ6KAtHh2302SO1hCi7eB?j)IuqL zO_0>R&JZcOjXk;0V;Y%jH$>$xrVvvDU52e=C2iF&lzJ6sNb?WemzsQzmfqU-lMJp{ zOLe^3kd#KP$<62O33FjEsae^EvGGnO+f8RMsmt7UUO61WWF0C|SL$Ie<%LZpGivvj z_AXFL*B?HU#@sz6`99vp*0LWe<;=s<)CbAZtqMyO^P4x3hE>@?%uo0#KAl=X&goy0 zID>Cw#px81^T^X@L992)J*lh0dzbkN>VvG~0#|86wasMQ=QKru_feLg;-J{wP=|!< zyvA5m9?6>6?~!7yu1N-y^oet|f#l7zEN0o_aAITYD%m6l6+1e0lsw!D$!_z{l8;js znK;3Y#NQsHD(W31RjT@mxiI%5`TD9hvo-9H)aLbi-&+f7Fkz2M$?m1~$eJ(y#67>3 z!mfG{>2C8;)opzmo7%91ytr~iHR(wj*;X=F^~LTg8TRsqYQcerq@8K5YI;F7u?@CT zOb@TAI(@mA88df*YWEf=MZ=f5s;$OeiglhR*}4Yz7@hkki2kIbQr$|+iR0Bz#kF%& zRT*(9iiNKSG4BqiDb6GfBhshIBXGTe&;hfvp*fx9$<%DtKLO= zi{2x}8B^ui_QO##Cnai8@4a%NzAu`8xeNtOk5k^-Sb{Q6uR@oru0-AYvB+bzf@^9q zR5|i&8S45qRXOL89kO0>M!BM}qw@5v_sa2}^xL0vpD8zXnht*s%1ud4&lhLd|y&}TbjD0M{l0*)c&qt7U!U@IEYdJ>w@`4bxBm50VMw#t2X z{7{d@H&N(C2_0G75)}_PgwC{g{)hk6{>uu=v*MRf_W#cJ?3YmXZxYJr-=;<+)80OQ z31$BeZXbW@YnB3^D**8be6%#86ZIMUO-!Pql-9osL?_u_OZFiHK}zFSrWL_J=zo~|@@f5WE|3Pt|LhO!e=d~C0+}3xumnLCrC=Wf zCRxyu1tz(ioCiV|2ySp32vrcvb;u&x_ktMI1N(41)CI@NA{gw;_0Y}VwGGF~eS%{l z$Z3!cbwc}c4E=&_$dl`bH2GM%`Ev#N$>J|6ludiNoPMp-&oA}PRbk!O$AaT@rTFUo z5WmXD(=nOl310){7ta46n3H@l@2Hp9qFcOhe)lHv;`$(*k<^#JGPVbvZtEj7TW`;; z9?_T|@TC`?*f0~%>$Foec)w3{Uc6R#J@lavH~5V(@$NRE=Jq#Y1wB7;_PXYLD%%PB zAD+p_n^x!j-Y(=0XxrK0PX%*3uvV1PbJ(#z>y%yK4lEy!rUW?z` z<~pskQHO~7|u@8Vin zRfO%_IKe){TlbFNYq34K8cE;uYY z?7z#S>|Z97C8##NN|$P#>{NX0K%g|L#ucgJ1}oLsfrA*Uf@M;4Ag8uAT`Eb{^SN6hTTq1}Q7@!^vU# zTV;ihhN&J|lqinY^!9Pvoyw#w`K-9OXdZLY?UK5l?rtgV(_5(^!$wk$NSD%PG$-q- zJ(h-t#Y#q9H%ftStH?6vm+GdzXBeB^Na}rEN9psyTk;KmPR2KWO!fxt^yy#Pj-1;) zPPO3PD^-uZrdL`;B=62}vfcX(Y15)JY17k2RWi7eWVn4H<2!nw@7O&V zq-MzwrfB~IY4VgBs{3|LnWKx+*`yH{Rr}rJB#V2WNe6dVQe)^+pZB((ijTW1h{w{0 zs)6&v6tylGk)g&pzRia(QRRMk@2%gXgy?>&?b~6_Ek#S?`HbuQ`LXN;U6gOxaEpdpH9U^MztvTxm{80p7L@7~D@sjD zZHg_W+&@gazGy(xoG7rq1IGt&b^r$laB2WY25?>ghXrs_0LKJ|Qtl%rKbq4tk>&M#-_+J zC|vn0F%@Oqe!!XDxq|pZiOBnqi}FmG4%@Y?pEBXADU)$Dm0l4YRO#F_<+wa+B(AG~ za)xz9=Z7bwJ8uJ()9>Cu-UXM?z#Uec!=sldzxx=}ae^_sr(v9Olc6rcbNef$Byjc6 z1m)y7H)Tdg19ZI64pe=cpYp3`vT~Wv3CL6i72QPe6_A6H)zzu;Tr%4D!|fBbh2; z44vEGoi+T41MsviKmEg#IXsO6@TU*RD+e$G06eJ!U=jdWDF9#|0Anp+Du8NQz-$1f zTEK+JW&}(L_zMAGQULHB0>Hcg)>^>S0JXG$*#W@62mliVfUgk%<_NIUVozhhK}!P~ z1CCn2Q~}Of8qyeW(b9;<0GKhLi54(vfSVRDZ-9puFm-^Z7BG823oT#*0r(>Tz#IZT zTEH{{;0L9UFLbr|$N>WrgTn?!Muo>m#C-pyodjAAt}9$ixQ1}e;5xzefolTS!JGnP z4&w~t3u6jn2;*i)fibENCXvQ4J}@RQ4siZ(-f*sPesE539?);-FZ7Y6APP?rpbiLh z+mF(k(v||J(Vo&#n+JcGfs}5PAWBb4Z%SWEKT0SioYJ2%kTQr8MTw!rQQ|d?{1^TY z1`5KT%1Q|!EB(Enl}1*g@{R2ORPM6`at-y@Qc~Y6jZ8z!Xrv|ZaiWj6koKEjRaYBX z>XD@?kanP~<`qIJ?Z`1mNg7`%ka@I^x0=@xL5c!szQ*s1Zo}hkxuit*HFZ$AO7j{d z)I((^c%894o%Rd*4*MFp367z@UjOZnw?CGP;13)OFa+6XLkvNW0>&B)I0RXKk&QIu zK^X|J4FO^>5@0Ok#n;eu;ZVfRoMr|LW!lxEi;ObW~572H6D=EouV|k=XEl4 zY;zKqpI^+Tw3HUd)+U8IW68$6evGr_dexJVG-i~Oi(+p#%<8>)Ev-6WCK(ydV|N^_ zBQZCfrRG&05TisVGNsKD_UapjlxSflg#|2RtBvR;*+$$YuX|P`ibMy+jA7eJ?BZr> z`*V&`^wZv?%GJ}-v^+zmQN78M#{~z~l2RMCalPJT^Zl8sgkhIS@yWW()*6LHEsmXG z>@A9jX>DuP-q%a2z2YsIJmsOPU;H-7`|&U$G%aI&RPC9K*+yhvy;tPQ40qCM!Wcz? zK~*w!>seCKBh9x@j{>R59Se5by->-#`WIry93l-y4`;4-IZDx~q7D>6vm*B6TQ)2-;;yP9H0Rt4ggR+qfU7)oj{yujEbc@iY( zOEz^+ky^9*GcDS=OP1X?5Nq;UQNciu=+qj;JTOc5O?60TjB1`HCY_tBF5h%1n)dPn z2~uol{BM^j()*oee3$oBB{{Y9ef(yGYTS?tiYd<-)jXvId#v<2llA%;qx(p=e7gRn z#GjOZw+a7Ud-9lp3}h(p{8$Ep&VGOEh1)SaZonf1tm|McC$HaNy#{MESeLjiK5`X1KSqn5K0wB=bWrQZS;(`r8#pC*W9E!$nE1M$w{Ef<0i+7=rJWplh#MVkaLO{6Z3+1kElTcL)f8@Gs zhca-N4(j-MIm$6CL5*YgBfO_bd9w_mx|4RH9o+|`%#KmYPhCo({+}_(v*M3rrC+ZJ z{<*va<`1Z^1xz6Tath$A1xzF0q6N$(&`1lIOaNpT0L&-wZ;_j5J7AIlkahmjuL;ur zx7Xf2I@0a`Ysrb$kw_Uz8BQ5V`SqIMKlGX)NKzmNX{0IeUZRqZM()wdNE*oqJd)to zv#9(d|B{u)O9{NE!25|xUQiCCDb2CqX{42ne)~17zsXk`&nRu(AaBVsmd3k@N>7jm z5A2$9Adf-2;Q0i7)8@;bRPbwBnmpM{iyYIoh@q_925if9 zLw!&e1jvVNNP__7pe_iI2itN!v2m?2-^Kk zIJN2)yu{+UVCB43oO?D?EWW-9x7@GhJ8u~+G*~f4&=09XJ(G^$p|%&qx;1VHUk#1; ziG8o()MRg*ld}-dQ5_MJolgn<4W5b1mEDELdNZ+Jye;3g^8_w)d?UWv^ZSDSoJ?%c z@|vJ~WenOaHs@?sjN{LJzRb_RJsvx)$;DS<97X*Jw!)5gu|k!4Ex8VB4)Fa>9mZ`< zUh%yaf8t-hbHGj4t-&3S8u9yj)D#4#V6k1DkGM)LCjqBN2;Y1oME|l+{F!bG1@mEQ z-kTZDuM|h~EtYpjZ$f6H%?5;PQSl>syYv)S<~5JE=o^4v9}xL%$=~>9cShk2cB^qi zw^gW#jtlBGQORBAAMp8&?D=Yj4!o=SJ|8+sm%nD_g7dAu@h^fF2>uo;g~bLD!f4$M z_}--uT(8Dcyu0W<=T$sFxjp?GU%lioH>*Bf9!8GgXZqjg&AR(?Ik{=PqERHDQ}&cA z-Xvk`mI=7hB7Humw+Y{1i@=9}eZn7bu@+o;8+_VR5Z!hR;ZN+l$yd5^5q~~C1W$DD zDYzQf7FKNy5)3!A7hRH^_>*7Fm9y$u;?x-|UfKuAlj7eclzIHigt7#c@90Fb+AqDh z?}aqUW=l^pso^7{pK7EqeHbK}9G{}r`OsI=Ej>tjcRxg)?1)oI6E+cNrja7&Vc{hiX^Zpjn0%EL~ z5O7#BKXy&g-_?>tTO=vgju|0M+!rBr8qkzYwi!&;1nnlVE=nfi)fiQaoky6_mh^YN zS~Mf6YxK$Wv!8r>ES;@t*vwY3OYB^nX?>BIVKGRmeCG+%%C&v*xG#+eo+lFf_ID)4 zxPtGJo5|9k%k>mb4`z|FWAmf|P3|bN_6}t_E$KsE4{~KTH_0Yd#s;Tmr;i~Y(BT3nVy084ii#>V zs;Oe1Tu_W(|5$Onz>DmAe}fb_%x4y74pg03^ig3vaT)P6Jx@?a(s^KSnid*=aGMY867P@-yM6?4Lf0T4t5vv9gwRLnW&oH1b5QNgd? z<7q$l?l61r?%kQ4aq4+~R(Ex!4!l+M_w7DtMtcfdE1IC|k9kbhypQhpN<^DSUB*pj zH$|`8xZr_rcH%1Cub~O|HltS?R$$%Yd*n|3hk$iq;BIDmx~@Wuf^Q3v2A1E8)0;H3l9(E=Vj z0I2Q&JPd%XmU<)xK#d3NwAhmvaM0pNVgMcr0Hi43pOvFz8OoEC0lc&{Co#}MOG^?1 zt+cczG2pGmhr|HZeI|tJ1Tz9$cevJYec_tI zb%bjN*UOFo*QOz%F#)a#TniTh$Zv4IaGr2(a6WJj&~NB3^br$S0#ES$M>9&&{0IRA zcxKxXIuL>hp@hy_^W9Y&cPD8*2)zh>2>l5C2?GcN34;jHgrS7tguk2@6kH?-|D5*| z$TjlIW*{wTq#^hPLXdvQF_3v=35g~jeFqsw_AzlhwW*@pf>8^p^>Q=V-9Rj0QV%SG@94iXOpT>|`}!yh@!*C2q@0`oUG z25DdfAb=65qy>%vP!94!88`;pa)5dv9Rk!L&k3O(xh$0b#~`->F_eQk1JJgd2I+D+IVMSejUfMlS0Dqyz5cy}+@*@dpHaTEzEtBM7%F`ex_EJ`27B^i z^l(+$)@Ht*<+j|?wF-A;R0vmXWHBd{Oyef4 zU&|*3*HsNTlC1LX?7~O(Ojqf;UT2P(N8&rDkFjqf1a{$#olLu?5zHO~JJz~Pft!Sx zGxmc{u*o%UG0r2MSw+WJc!MKiN*WumPU~y3JEzy@%#SB=LC>c$6Qq7z)h^y#%CG{q zJW$CVKjX>;L^WWZjcd!!Wy{z(mj-jQ)f4%V^EUJId&aZOo!VT$u#SAfkt4kM!$9sJ z$DY`Od6id8m; zEV)~IBe)m8T;-odd2?>-%H^B=dx<*#KJTXl!PsGiaIIdfQjxn;Trek7SaEQK(8j4) z`Fej1QP{s!)MNML?^rRYB#)lIx@h-;^Pkm~QlqHsI2|E9Xc)EF$4okuTqHi4 z+*v$XE*maD7C# z7#dn%k<{t7RLk>-(qe6{n0T@lwe|f8rP1)d!Ut)Q`a$Ygs#?@&^_nfi#0S$3tK}eV=bQokr4IVI2suSBv44Uvo3i3QE zR41a)*vgsIyxikrk#B;!xWxvkjmcc9)q?_xX_rMmDY6otPOGAbYoVu@TYm!;ls;FO zEbXPP>opZFEpk*GZuV7ubH1fAr*b9}@>K&tsH(hEu^63!ChTX;{L@3ISKZrf?L0q-#F*H|yA9nPfizhg|#!EY-Bf%vfcS({^$;U+8 z`dTwQ#&t5f_3SVXd9VfRJlujOz1xVZzn_i=mh{2>HYA~e)0*Jv@o&++#vRb99vg6d z$LqM&=w|rvzIc@KCJp;dxj~+gpE}4-$FB)a2?^wH<%g>yuQlX#0hkj2=J)`t1prtp z0AOZ*ocn{x0btz$fcXJTw174ZfVcsGP7Q$20f1%=fan2OXaNBP_`LK#bCeK19sYh(sV5X+L)k{zxE7X~Zrf zDrp2J5WE2JVS-odi0}lW64U|50T8V~MAFm;`9N?2NM4PY1tJ%u10asUF(N=|`T>HM z_DfJ8q=EmFrafqr^i3mR5#j2)sPr?Fp$^00RRe z4+t=jKmdaRh6MIyQ3_v;*Eema;$J@G2d3@d z_EkH?&!~Nv_pnIh^SsxyS;=*{nOm-~N!$YVtL=04!3rxb+1rTe7!ioyXTIduQEB|M zs*&uGwcfmq)f_%0-kiJNFrGg;?Fu*PXbE3!@NzEDumRKTLNSx$Ofk>v1+nw2j&XHs zWN=x^Ih@;>a`xC2Z4rMvQt%XJ(b@9`@Y2wru3YE6n=Kact~>-h8EYTT~5u z`fvv(jpg+({lbO?PvWxH#c*TZn)02urt-$Nj$HjK;e4J^06H+zgN+_Cn%V0;fwN4v z;VKm`W~(n~%Dj2>1;3v^hb`S+%&vLo&Kd@tW=E7TT+oL^y!e?PqhIQX`?qd}lcrzc zo*1v^JHGG9Kk{&8>$Wi=-ePyzR%_>RlNv4Nc2!N`;;irT!7T=JCmYOU-3?dr+nUzo zeBRq|=9?a|cTQd7#C7_d&%l0cqocN*{lXRU!|{8GZJw><7R^RQ#YXgwj_5m}_t=Q= z(ZfcD$VYTNCQgi#cTD>A9Tm|#CIUXKFerQwUld;z4wALkF+~?w9p$>|n0jrX5a%49rJf+Xr-aql z1?-t4K7DpmY-8S>dg-1ZjPKfn4ry0KN=)w}KI#2Y*=Js?n2R%&=c-oZ>?i^I}}6$z5hh&GZt*!S<>2kcS;;+v=;N z$1c;vkQAQMKS$F=r>&)F9=mDk{ZOf3`XlLvVH?ry!X6>j{RY*y#V5(()O2dc+nvdKy!03Yv?>PGhMaZ5N3B^)6AX-b@qT zCy!7cF;1h;)LTh)-%w3#etQ%Z_TZ+F7MLk1(jE%WA3RkojeRMk)f%kK3#dyEVO~=! zLuyd3hqjSwEbdI3c0VY&pKK$pe_caLzLhGa9GxLf>zGEZ>$*y~z#db+*zG9o&DJDwen2vPI6Z@36ZSRU-w!fvD z)U2r#7=JpaUuWU?rbSez{R`xWpvvE3+rKch$X5--HWPX0Z?WxfvF#t1L}XunJbpuL zi@l9-y%+ZA+JebA{dz9$qlm_>$D3n*fPfE}X5mv4KcZ8gvav(=nK*4k5!UP636%r} z;sIt4@kPT4ND#;4VeCgdsDCnE*sdcU`$@!u$_r3otzOu+N@u+0;zjJ+t0OA2-G>dk zvuK<8DDK|Y2Dhnt6|ZP@7w_*g8>30{P|1`Fc<$C^cxLSqTz#s5lk5BA)nV?~*-Zyq zy^6<9FGiuv(nM6RiGX%H_Cz_gV^Fv9acJZ0V!XVh5$-*@2R5Cw5glHXgY8!2qfaiK zkw7`1ds~iT>o;ZSka+~!x27-d(d95MPFjYWj%e^N{L%d-v&v7$-(uUp?%f3s5Ae6x z_W#$de~WFuVuJsQNwVSiFT^$?Mrl4@t`W+<32q>6$zqu7-33CKMnof?Wgxi80-8n? zBl%?!?z`6(5%InWa70KW0-DBSi-=$Hx3@s#gR(!?`8_{~Z$ub_I%RQ9Q$G>#K%mp) zBXw!SKG+A(Fc8v+7zg#s>EB+Q{8pEyJ*}skMxY~t+uu|BZzJY!i)|oAfgmRHt;{#U z4FoA!!~%gz7OWtTEP{a`_G6IqfQToHT5?_xzd*19;5d|(^OMc*`Jr7%hX8q@90XaQ zgL)wi`V4vHdSo$9PKP$+x?mgn0YKeQ$4?z(1M=q;+o~S)^_#z4waz%tw@$4lD(ba6 ze{Zt4Dm0i?-A}Vp)on7IFIUv%$mo3(7fTryzb`t^1ta(hsWU4m79dJB;g9{|X;a zJc3Uy8OmSx6I2lq@myAq6TEYYk~1FW%V#dmVs8{(=XNKy;DzCNtoKoCuA%2=#(LCi z?wR#`ZrjQM%=)iw@w1c7nGA=??An>Vn7t;w*}FBS@V$ad`9@c-u(#6huw0Y6TttMQ zs?o70?B=TJY>P_q%=;akIppuk<$U?TJ)K#?JvsM)uT}Ddc{_*YFON-NHoc#t>Tf)c z8**|ED_*qZ>>I4-E3XM=Ypxm29TV^W#DOy?8l*h8(3aJDT1_*VKWIRn$hyk6%w z>}ubKOj&pd)A&?lw$A8auEW_;Ovd6IF3o1T%KUIwyruRrcI_tz-1Xip)^W5iQ#2zH zU(W5u85ZiYy_7q?KfAxz$J+mGVq2Vgk>^*DsMJrdOnbGkH?gX_tgc zug=n@TegZPsZCM~i;v>i=uyZ`DiTuiCklZ=XXVX(orOjABb9{%FG_Pp-=p)oEZk!o z<1ZH8nJvC=v0gN+Jb-Ssxz3)t57pA3-ihMa>5oL$?M}+edl(@%(}FHYcu%$Cx9%}N z(OdeQanjqoldja}Ymrcxe_gEIY&%LR&68dyeki;@aGqFWf3To?B3PLm){j1SqfAV? z6Q=xn)k5fU?=)q*pj^29E?jDVXSo<1)?e~_8%Eb2)j)h#GmvV3qODkdq@Eb`?y17r zd#(6p%OHC2nC;@B=)qE)^9JftXo%^y6or#R+CU5@YpN=~h2LadwfpXmH6zc+_iD;m&!k>RtJTKKDis5T*sZ zP!3L{B!{@Av~Pb-cwRQ1;=6^brFusx!@Hu;Xui2}L46gIt=(QA%KA$26~t@fZGVTihz3vxP^c#2)t_q?-0o*RY)dJirz^wvYDd7Dcc&A4}fcJI~tSm^(5IBO0(1zeo2qXj% z+7mhwLI|A*6?(F1>xv+GA_-B1!Gs}%VT2Kc{~)kI{+}txk3)q*S{$~`4nbZz+p+o6 zAT+d9p9aT#+(_6R%Yj8{`FWiuR|$%qWFx5+;Boe zN9xdn-pxm^To^P{YKxSsbx>KH8M;xa2%mN*!rSY8Sig2TUU~I33Y^*!cL`XAeWTvs zpbT?VZ-8bhOr8sb)CpZu2JM_#&lKQ-LikN#EDk)-KnT-(tqWd>1c3=& zTO{6G8V@nz5hi{rKyK_zG?E1vYJ;T!848Y>ANQz*#@yo<7Eb41S9!1({T85mn`yuPzL=W z`%q3JvXOeA45a_8)9{LgFfb5c5Fmizhrt122rw{!!H|K0L^dLj2Mi0OK|Ux4z`l%J z4$|ei$fl+~h@lR-E!c;;<+2dVeSxxIz@R?ZhIRqihQSGr!8t&F2rxjwHUQ~zT~G#& zK|N3g(toNT8;A;oFL)OIs33Q(A`i1T-_#u&_>6sBeJ4jHbCKP8^3yIg<5v{A_>NE% zGsnILazR%{@e{^2XP546%vatM#<$pB?prw`PIYbLe5O`RCnj(l!D^6E2HhpZT$Mbnt|cGr2A zanpE*=i~XyVJWJZj<@mAj!)3N_;|E^r!7-+4~0%on1!RK7BGwTNgL%)*&4mBurD7s z#K!bPbnoynys*kVJlw1`-sEM>CcGWYMz21{Ts<|7x4Rk1Sl&IAOmczAe zZNWZYK)&%iyaY|k{K_O1O~E-;F7amWtJ!zwUUL^M5A%WM+f=;`Z>!vztmB4G%;pVN z+v1oz1x!lAOulx#N$iHx9^4^xpPSgyhR^;qj}xa{RasAQ;|o^yl^=%R&7;gqZt;KW zQ5L7}+h(G0vul92Q_L!)(9u8NOw3bAt@)n}u^F@UV z75-EmYd7`uC;g?V_Z4)PR;6N#%bv)w^){+tRuy5%!qyUU@)J(1*($EMS5)}kNKog5 zkk=C522e#yd=$~82I7=WQA*csabga$P|EPgp+8odA}xpvqn3~9Ax%BL81+BYPz*Vd zCe3T%LD%{`fJ$BeR`jlpXy0z{r4`lF#ADAAy%#bK#ZMobimj5*iG!0%sCqVWg6*2u z6q9y|Qme;FJ+t3Y(KbPJtKrsSt>+!6)^r`o&@D)~wmgIC-R+>d*1ZAZBx zy4hT6(ReTQKG#{|iNF}Kap4^G?C5EV^gFF7#~wOriz>7yerWz&9%TO)tUQQZ)#c0ge^}%qqaj1!2r5Dw zSWS@FUmFLK?I1#XLPx@XDRhxM5rjxW6k#x72w@mu1mSOyi(G_?MJ`l4_#tlA{v$dX zuY-GB^+MTp9_X~iS=`e9EcQND4|hyoh%GN2#Lg)hxMd4H^tyU8yn4byJTZ(znL1Y3 z*!mN8ZI^&PZJ3So+*Rnq`dC+Oj|MUn;rl4hR>N+3>Y%4og2;F!h}i{#a`0pA`Y zURxlrf%v2m#E57G^};k9%EL4q#4e2&8hC1H+k;QrYw9O$!?xyA_aK0Qcr`&aMCUtlTO2bTjU~tQ{P1)5P?Aa0l^7GA`pW>9D)G*5I~rM zJRlZ<$OOkB4a&kkhwdJWxNBf#dQv>_hn< z`vc{m4736HFRM-8h za#fGMW)1otbl<=OLXs+OW zy7c3=ZkoaC7TfbWb=vao_8(OFDf(RLx@UZ#Wg@ry z-G07WLKYv;{*uZ$xQN+3b^!hy?7$55jbK-24aYGifjHf08}7V40EwMnGkvEpTk&aj;1T;NO?Ho}mW_tUor?+S? z)CxJGoSccIGN(*>#_W+&&h?$5MfpHM|CJ;zR+rEwWk%9Lh3y`@oTg&Z!!bgbT1E9= zWhw4R{o>Q@R-7=y^QH3a$d%%u+r@jZSDDmxwLenWZk8tZTrEB^z9GKI@fZ5p+!VIG z((^G6D^fO3HaN3Tg6Qdj}@Ie zr_nmzf;dt7Ah{X~%7d}xlwHY4(V%r7$!uq#V%2LmDsA9E<^20Ll5yH-di32gk-w8J zO>1^cyf*01ZM0T(r z)qAroRbNa~3@cO!do%l}XN{X9=54$t3>LA78uV4H?08%eQ1cDtx!TSr@APG56#nJ= zWBI4@+=}^w#y9zDfuLn7?^G;kK{sJ=hRFjA(DHx{19dfmDWL|zf&gw2wF%(ya3@d@z-<5#7?@O(nA658vfYN@PY5Jb zC}_!(agryD(38-c(3cQSh#*7~q6mWt|4G=A%l}nl`Sw>RV8x)BJ5$ljM*1k&vNbwf zyCq6*UKbs`(HG@s^~Kf)$D*C}+MqRe6j)Jd2yUd?9&PLM0lS=Ahdy^-k1lU?$0`>S z^oXm1x-3dS)i?LUm%6V-`IDdFXR}|Tnw2bZ6`$w0aaKHbFPn~ghp6!QIf-~wXl?XB z#}c=kR}VQh00$2pyskDlN$8*__VL&%ayDL}%0@SDx}vj++IaE&DR^e&KHR=I0qsAi zpfmbmT-EG7u~RZIeT=6^VQFaZF}-2pHM0IWj*Fbx3A>j5wmz>o8P znD4`y008p=m}sd+VxYPfFe`wW7BDdYc>VxjZU9RyV0wUBTEGke@bCa&k^t}&0>C^0 z;57t*sRF=*2&k{cj>Le2mWCt-8fj@vV!%;L6A}Yp;s7Tt&Ljq0v@|0z;Hm{oAmFJ5 z%pvec2~;tpZMe^HZ{dEzeT4f4_Y3Y3+!weHaQ)$$!!?HM3fB^@pB(|N8C)y4K5$Lo zTEO|kIlB|!T;cp$5?T{{2ucD?zyub|E{S<<>`S)&2myq)gm#1ugkVA_p);W?p*!S< zK(>1jdJ*~%`VsmQ1`q}k1`(nOLkYtPBMGAkW51RD>;4ND55jK>QZX72DfoCjyy`~; zB#o#;ystp?(ReZu!Ac|g5K#w2CXGiH$*1v>BI1sw9J~?;o>c%5hBRp)RDl2mACo5{ zmFAU4t>^>-7yu%XMra~aW_XPdrpy}eC?YI@hm~Abe*I8W2WdmDOP(@o#3#}}%}4mj zxybv_M~w&tF{vN=4kDN)jr8qjnKD-_K!E`Oga4049E?7sK@0(m0_@8H3g zmiHkI3=06p1?mCdIJ774!(ahyxTgXLD2@y7PQ3 zKfPtL7uayGqgJSveYRuY-L>GBc->Oft6NhQ>a<<8=k{!7@}2#7@qxZfJe|*##AK=F zHD1T9Y;%ph9eJ2>KHC{TTQ`_nFsY-;*s_4j_PoQ1y*sLu@kjW)^@XYh?^olShbu9) zdQU~Uu{Uv-6QOwQmM#pQ-<>^bFoxUp-k85VC0ONIbp;pgOg^lCID`o(RI`W7M&P?= zH{z%aU7Q>2iYu*++jo(pWMhr&o>-LdrPll53hK}QWeV{=pBPC zZ|z~-b~D_Xue$uv%!ZuntFe6fjBb2jojsi6vkPp>#9>TCV+%I1!94s+<~F?iR59~) z>jZXMgR?lJ%2xEb{u4CY5n+e9eOc34Yq^eTyIBi|r%d4WA=q)}5$lJj^K#xCf+h~3b?lv|@B!e2}q?txnYKj-l?cFm&s zYcK@hI2l z$+<*!6%!3!C7Uwb~bLzC>@fS}qqqM8S(ejMaCeKQE#xcrsi&qL; zd%G)A+TWoZFOQL&m+TVgSto?kdLGKuq;9l$%TLnXR3x3+(hXJf8Guwy9cj0w
J zbfqJk??|(ne-*v)9ranJdq_ikV5f z3SV~zQ%S?x)5{Kic>S&wO%Hb{F(d|BrlJrk)5=MHKN?jj#(}uN=Qq$Zv(WBBV>FRIJ(i>`T zr`H~Ntb976xftncrK~e+hR>84NNV(Qt{8amgjBPdITh<{Bv!rQCq4&S z3Zk0drD{HjSJcn36z#JvQ2ULXm9Z0hDZAG0L8o4##q|q1tJj-17rb-MDwc(pQx;yW zh5SQZq?9IE!h)3A>PL+pDA&!MEI&MdE>Eh7e}q~8-llxbKzcHhcPf^iptJIn0ET!N z!eRJ_;TqP8zl-!l+TjQ)LK}iVA&?M6Xiw-!2qAPLbRl#jR46rR*CbIS&tSq3!Z5-J z!YINR0t~(532}ej)yHq2nfzE(C`nC3{(7I#i5*5LrNJC*l(7#p-BQri>dR5%OjF$2 z@hiTe7{wUct;Hc7&!FcQx})Pb5?#Fe4rThCMThujs1w%5C+tgc@u#iW!Y~adHX4b| zR7I%3HW@vy9FL9?S?j_ybDZKb8Y?Uspbdq6(8_RcY;o}%>bVqSJpMG+38{w4;&X6^ z+5&d+%EMOtYaEufnqJ%yC;1Y|b&`IdRgOfNo zd;wBNZ$RtgFOs{hgR&Ekqmq(+a$6H|cOPuPu4$3BL< zKVy)eieHneCMJ+4>W7O0CPSXZ!%SXYQNW4;08;|M%pZ_f2C&)yz{CKs8UVoD047?% z^Z+1(0APjykVJqQTFgldSZJ{%G4Q`FRoRm^00%7%NDMU8(ul-BV=ay(2AXJTN@BoC zi!+G<7cI?547h4>BQfBv1!OG1QwzvgKnpD(_W-T5fb;`+YXKPuP-p>32%xlpJoL}J zNC?9R+<&;|aG&Ac!u^E%2=@){72F@VC$$Jxgnzq)Me5`Uz63u)00Exmc7zUuU_vON zGeIL=k+?$13JfPXK8O%a7)lsU7)cmS7)uyOh$T#bygy@riwog5rLBpw{G<62705$G z;`-*Nw5bgIus~jdeUPF+8j@utke)PB7LmEYp9%6n3^JJJYgc3+WG=`r`!<1ert!hj z%21Fl%Tb#8z(7@_+&{KfYxdU%T#b?lpH!HCY+O@0{9>&&a#TpJU&uthzks_xGyI z)u&C^3wynJhh2mC{nId8X}1S^Gc*q0-{j4@`!~hc&Xlos-b=W+HHK_*s1G}LsSSJJ z)p*Xr)S3?+)sp*sFPS|O6vZ8E7QyCsyQrG%d==}()Z`=gPvQ0bt$F>S7kTdGMW$8l z4g8t&2ULdbj&dpf=IkwFiQh3KlC$(3!nlq9j4$c!Vh?wCj#nBR^Y#$nwG%lUb)Em*6PnL{E&d-*T=AzEGKir>x|-; z4Eiox{a!+uXAAi;{GSSC356H->Iu{RU3{9}&JhdFtAv)f{MACdTJfT&j$}?_>gqEe zrNdfhu~PRYf`@O4a%28MVf~io%6`u!sm0~7w85G6^pg9%B;V+4O7D(Rv^_XPs?tAQ zSfA@Fwy(O9eAjA{IJUGh)yHw2^62iDV*4(w)ZQo6$^+zkQ>E+oNLi^@(z!X~q|@#4sl$DTDyuxS61Obzpf)xS5$89y*<%*DUfjHIkov*VOX7lz z+ti4_V5J`wE^fWwf^MPq65rRqqujaHr!b9MwMU1Nuf@5jYamS=m>eB=l*Y(uH!F?c_9W%V?w-o$9hb=Ylch08;F zia|TNp=FS$dSWF_zgI5Wj&3bZ?LC`n7O6)yJbhoi*gRcKDCLEY#&^U*kH^#ii)mtG zD?_SpN{Fz*w4oHfW&riXDpIVub&`6Uej4?_sFUFMMiARQ3|6nI>r2f%wxV$Q?5|2o z@0DV$Ic0^pwpS>dK8bXiiHEqtiWah~@C9S4+@KV@T2ZIcrzlRCf$U% zGhO6|=+7mXCH!&5U(>RD)j%?rRwNY=4qU? zdkqpjkE2khJhX4)GF&--D_XR;2YU9YGg|+81!~#M1TBo}g{t(JhjJEl!fvC|(Wy#m zl%4SgPe>kvj+tFXi%M3L`dx6JI~(wb*JilU;SiKPZ9A^s_B{5vT7Y%SkK%@$K2BR_ zhX*G+W47rET&Kli?D1hG`lZYXjnyBH?ijdY>y4kjU7MfxU)gy4nuPZEFGYSceRkyW z0KfzS4qCt*0u8l*X#{?639UJ46ZpNQG#FN3g$OIgzl4-V&f}kw(MZ0(e<_k&1PH$= zsU?8a1TvP!J4=3jkw{P=If0a=k;sTF1|DDFK?Zpt)_9Y_Ylz@=MIM9nrxbVMSP%WLE}BGG+&CGy*^PikZ|QeN}RmZXoe zlqP$Bfm{Z%pC%vF4PI*6kJ*23leD4vwih|qzo+)!o{);AH83C$DiibxFdx+=fD|Pg z7Lc&uH~{ljIfiXGCQDSZdy|}rK(|ypKH9Q&5PkrOePtOrlrw3f-SN3SY@7!Kjwbx?> z`=~`K(?9DHleO>`6Kv+gx9(WV>#X9& zl*Y0}#Vjis8#C>rM{rFGQ~02VmHA2WZMeYM`l@*kD4upX!7k|7f?MEjz`KmR$@!an z;GR%N*=H-}a0BR6-agBQO)RRVG95BOWqm!8&u*N<+9y9_oiBZ0Zp_<@=R4%$?WyZ= zQm=)$^?^_5RM}NrbwD&WPMXSadq?1uu!h*4E)jQPt4jL*TPT)pb`c|XMzW=H#X zypKsdH}AF`S1s9?9oOKMs_xVpDkFX%uOC~&^*c11O?|4yZS$PjwdQtQr_NvaDwlV2 z1_RUBC5mwN)6E&&h0PsUo48Y~RsX8&>Os@lL~jGGv4b)D_RavlvT-SEH?JPE_ogk^ z$iJFO-|8v9Y5yC3+S_Y9?drmt*KaI89KV~`_V4?6ec{HE6N<{iuwZ}pqk8O!-pUD< zUxf5C>y<-mHV~FrpZAIBx0Z5Q8(O$`Qkc+Tk++z#I#ck+F;x3kuj#CfSA=c0>5@h7 z_X?X^OQ@{dIm#vPCkv-d##72G2ZhL5t?A@9FX*#}rid9~+k_`>QFMRT!PJpc_mm0V z%cNEvk0G;DO~npDDL#oO+Ddr^q13h1w~9+`y9-^nVe0MYuF97yzEG8xFO}#@!F1ze z>B5Im$0dICC~9K$+QRi59mU=unNn&-6Z+x)`P8w(6{vCpJ83~>9np6cB}Glo7Mk{2 zMBl&ifcm)ekoxI67dmM8Kx)8};o{Um{iX0RXOdWgHt%jn>_&qcixF9fFhEFqw1 znX=B6Ws?48LuycSXUUqSsVfdMl$piFK8t6Eiq2K<7oPEZrt}I3pt^Q(6Q?z^R&F1> zg|c0$P$+!gD4sJNrO+8q=vfDbD;_`67m{jCF5GsZrEv1_S!w+2+M@9y2jwpAvM{`D zUv-t`n-$4UdBWl)tLfsj7u4#KNYOoXG`&h^h}fK|BRLp9SL}9m5%t5Ti(5{8q&m&X zqwJiE1wU*i&O5$_9(Cz}Q27>waXNxRat{6T?g!2vJ_34eh@di^&0KkI1h(8l+btXS)tjRSL23OO0;{46GmOe zp`azZaBRc6cx6XD-0Iw9?4EZP&9C(syRT`37Ce24P7hv#ZcSDqJ6#ua@ZLxCi^~_h z)OsTde_RjkkaEyIyAHT%oxSKyxQL6d=HcYr=4eV}dAjKkHX+n9Sk1@it(L4F#3 zO*o57Ab%S_TnsP~0IU%JSPuXooB&`t09aedYYlk~^5Z%L))fG(9RL$8U}^yHUjjhS z2f&vJ0Bs)tza{{5e!x--X#N2BI{~$|fbSCk{}%v&9~4kmiw%hZ_&)*wUhxC!Yq29S zV6Vl2#6SZrzn5_4N!kOvwEW&e84MF}-~ZG?8JK%=4*dxOwA%y8_8>wup+dhi$on$| z*)aU3Fc$YstkQn73tl%Q-`0|S!$9x?;MWp0!kN}j3j`<-x-?=F5u(2Pc7g8~*_Zvt zWIr|dt`-PzkPfeHLOKXw8ox4<2I3b8X|kZD$qV^Gv?HRLMr70anQ6o@Z9g3_@QHx_$9*+vljPbndci0sIspeBp*6BF48!-JeOu^$_2f0uVNEMbN> zFJ#TbTe6=%Msd2;vbo6F=h;VT)v?k1R(SrwFjN#1z^KkF;_5}swh^$m7dz3&5CHmuRrI^RZa-wuz4hFl4{NcysOMLYPFpA za<9xb=-|L4wA+E#70zJ|{I>J{@q2jh4)58fZaMgn(GE7yY6kDxCyVdbSmI;#?(oyS zQ`wI#yP$mcP=@k|XH0aKqTQJ+Yni@@Sxr7h-@Nt^&hK7lw%aEKqqAxi`(Z;P-po7` zXMF0z*zTHv3Xb(>O%`5b+xVBVOF~AnA8y>o>n`TAjSe5;Xq$CJ4C}zQ6XWEE;dc|c z{%zm&N+_IuJ4u+)VWYQ~Yq~hOcd($>)*5^ zdxgg#A%$mNURE4%%n~!_-lP&g-j?1+RTXouoRJn&Zd7io%LTdyeMQrE3k7HUN|cHF zT5&=29mVR_m6UTzOoU4Liv;_G5~by^`I7Oh!F0`5o$2F!mI^ruH$>5J@19yOo=C?h zIxE*&XYZ-=fTQE;-4xlk#dO=0XraaWOzOzBlj6?a2b57;Z3^>R9hV$yhe&me{VC7M z`y|WR)kX7L2dUm44vFngT^EmcjaTleHd3g5-k8qI%Ty*k+%C?r>ZM4U@PMk+CrsSv zpjUY7{d?N$=v=W~j0L^0&_vl?Xh!S2l7x+=;Sw{k4qaJqn3QO*Cms%dC54U{K;`N` zrflp2q{`L#U*N` z@Rm}FM+)i-YmM}%$nMV-hIj~-Y_?Y@$vmVS%Iu&Gj?55!9gZmsJ^CpRWQ~xCHzq) zU)!#H)j;^NkasE;zM#7>L4b(^4C37!PopP``xZld#f}oI@H*$4CY3`wD*WIo3*|?ABa_>O2^sFZ} zt@CQSUn5(zrf4lT?lB#i?|qG(jc231Uc<1v>KvM1Z!>mVeH}Haeh&$a>!Pn7199jz zOB^@f9qXn75>dRY<^yibO=cf+x)A4J<*xw#y ze|wbaR38}?IdF9E@PSd${YOR&4Qm+PKz?RmSozZnW#k_}in5 zTm%TeDV!z9AEnoPPmBmpninA9*Axlhk)=tOJ-uL`2xcI70U)yd=s5<06r_VtCW}-W zu}o8s_N7nq5~s#%41_T7A|qlQpm}i;>LkLO#zPFsXuZO|_l
fmjFWkVp1pgL-6f zP2*9f73M&Y(+Gj^5+?{}&>rcV+{WKi`)^N3#X=ew5D>4xh`|4($U+kcRRGLa<#fm+ z&tD-1U``8jS(wwxKsuz!b-}irpKN|_4~|0_s2_j;`5?%1WjU7n0A&D32O!Ax$ZbNJ zETF+TKt7mf1JEAigCLjxw)YeMD~}=-2y5_c{E@hCfx!LRn-lq%Zbt*_tYP_BO247SPE_U!Dg=lKb?$5fS+kN9&7tyMe9+pDe_bZ7UJ&tnsQ zF=E=z%VnO{J&di?dw$q!oKu)2~-p$B#|%{bB2vX4iA^gdUd}osyb( z?)W^GdXj(&KlDL2>=xm%`DJ)l@ijcZ%N11qQpu$0yu+h6A4R)@n&QegC!*w$W+>%g zAydzKJd-o-6cgp0!lXwBvv1BQxMvIQvpuKoU~J<*vPlbDvWN1vb5%1_*rtnjGX8$o z_?g{DvL1y`na!P&nS~=G81>*MoY$L;yrXG%j%_fCRgB)xU0!sSjaa{mD|TAQK3Lp| zTQOu3SIsm4AJ+3^N3GQ5r!HN>Et{-VO|f15|yn8FCJvp%=1W~tvVwoofB=qvpVKdPe( zPKYKCKML~#4;8p2J1N$?I8cs#8wwfm_mykwnA3U9JBcn9tE6-*M!+ZXseHdrh2iHG ziEj4imHw6D3NupXQRfspwjavk(-@bjL zPCNb!UH^<7UFH4`Av$uMxJi{QEa@{^TJ}T|;@i@~&eiv*PUkw)Ij2>U|IQ`Ug#oXm zROj)OHWJ|7CM=_1s=H#e; zb?znAPw%78p6n08pvOa$#pdzilVd^R%%D(ZS@v2 z?i;rYPYVJSJ9`<>gR`QAV@)P2H>WG8tXGtBtIY%IO71wR>gV@LJ@QRA^UoiIHT$~= zGnR--N6(3L`}_IgjVm35vM(2vOF!r-u55cE)y{rFIZfT5_;`AwIOj@N8ZF)_R`O^= zUH!CFylz&NvN`5R&?_EoNA-h`A%jSk-u&bKIXZB>^+Ij+4I&Fo=btEr7 z9?ZsWrYdxMizzAyJ&OFSU*PeGk_O^qyGn57Sw+}y(pB8`f+3#WIUDl_@8eFifO7X;!q2vD z#a?wfptIrd^y(nXv5nAKbrGIYeIU9w$pg~<%0a&Czb18!Ng&Vr4<`?Q@&K$lxCnb4KcozR2Oi_nMAkI&2t!*MzcP@uNFRTeiEzb| z6a2x0AqT)98);)O_CJP7WCxBx41jTm0EQpRfpM4fKv_5r$lFjB3<~6dARhzc0%;IK zUg!q|s1Ja2xnHmk?SXNE@MGIhKkP%0%Rw4!L-?^Dkp7nr@*jK!Qqmv0(5Og28sj^2 zMqABTbj(wS+JZjzJ8crkrBuqyEclA92bk5>^#KhSRGgGEvd?N zTlSRmc{_}sadH8FDDesoYu<}{c%(XeYCs|vY}A7b-+7#CHDwxKyZ#iu=&BVPGUYYL zOo`$0jui2WJ47?ORbo)NK`~>oA(U-hcAiUIu$xOFTEXO*Nqwt~NL~M1^o?p=V z6uWrWK4!s~O>9=V5wpu>p0f`<570Un|JGJVfDm)mrlOv!LkA9@5*lLlJV6zBJ~r6os0$*IdGD%MS3kv+kZRxR2k)-hPB?i-XRj(A^6n|9qU z^||o>+B+AxnzFu+OU0-ZDHVxGZlMgN&e?mdeK|+DBoYz2sU#_ePwvL0hFmi)<(4}S#x3bx|1)PBGw+z0=Xu_b=N)E!KHt{f>%P|B{r6hG&)WM$ zP#q&5s5aOAfvR6(D10^2NopuFR*gD(nwm)sQ_ep#MkuYmC*13=q%vAA7RrsnXalG5 zLjU5g=&aE#CBpYx>76%E2+y9ZrPjSDp&W|((;g9Rlt`~k9ldwCFmAh2oy1L1_uM~- zT2wGy$l7z4%Dr4mx2Bp3rB5~qDQ8bA+s#`;RdeP-A?B3ZswYdGH*sq7uC>&{X)BfO z7VZ)zZ5${$8t_bcbx^tBWVnQCVp&BMWFqD1?@TD;%w586yMxl(P4}v)P5lL<4Jt|T zpY#o8QU;^*CRRAQ z*F97@vlVXAbrf=qZHAVQxs84-G{vSmk8z25m!^?lz#sAI|CVI*SNU~CFgXCo7XX+a z;6Hs`5KI=(S__yj0HhEAOc?;O2+&STd*TCN-hhr;z|;W_TEOf9Afo_a0s)Xz05FFD z$SVMtMgXK10L&!dtOZOa&{GSTPvGB`j^asPSjoZa3|4xu%7WFK5y6(7*<1ijD2GD2dC-e>a1O0%uzn6}*=`jgo2|Phga3}O8z`g59@FEN(3?d9B_!9n< zbVSmPBSaGvgjm97|Er3k>4(7s_q!5OJjgB@iG|2LAjg1=1CowLTGH$;^0C%PJVXN0 z>|7$dk~HaIrx3_Ou=9!RL;`6?vm*+o%tTrezk8*TcZhTZa*igyR!Y*ugPcRA%$m9& zjdoX(raq9VK=J{ZibzG8G+NmRUQ+}KOCv9lFijk^1JXhna7@BAJF7^!;A(#DQUfc_gHVW3g<= z1Lpw5frI>T4rPE3^@(|*EJFgM5sx7r9E3xd*e*DRGN4>IhBBZ$@S)9O969{_ooE0) zA}NX2{!GEO%92P4LMo(H2kk;}$DD&bt6)cBI+P&7Ah4Vyf766bDk zo-ZA@RpvEq0=Ir%DQ7cRDGL%(WednIp}KAv{7zhh{pSVYV}mQ18M7q#mem$!K;!)^ z-ZY$j9QvG{{&o&t;CTz1EU01~<P zAIR>18p~GP64;($db5__hOU@RXzO z>@4ia-CR71U7D@rmuL3n_pjZ=*_?@Cv({f>Jxm+0Ze>@Qjh7g9S5#-VizSQgp1$T5 zyJhp8laBM-_YB|{Z{N)-mOteq5@yO;^`F6~z6xW5$_Dd=$KtuL1C6+%7@Du^{F1wF zznkmP_c*ukySw7W|5Uq<{_lQWF|o+?+&tB?%z=CB8cBuDO{P-v#gkO+&$M>yM42g9 z4^0xDhWXO%9O?@TnuQ3KFA9}gJGYcHy(3i?uR(OvtmlI9L{w}RI!MS~7B0Q>I+cFt zlCHLhHAK<@-zw+%ey?u*Vi)E7xU;IslyyaiPQ^;DOJ1syqZHB=X-Cwj8#~eqTg?^* zKmD5SVBVfu#CjEXx7bA)WW7;$xObcO=C7!850xO-!7f6Co3%RW@J*`qtpa-PIS=Ka z+FR;vTOLT6z&GmB{=)>HsswdWo5s}IJReE)xSc}%$mJ5(zWY>OFJhIZISZx91sQ_X z$R$$UF{_mA94=BxgVTkEb8o8KTUn_%<5Ifb@i^&~m>*R~rr4_;uoZO>uMn=E+OMA3 zd?&55Z>Z|Q5LWHHznLoYNfrII<1p1f9`}%*f0Clc5pSuZ@BT@vSc(o_6et{Ax>q=! z|CZX3FwtPyORY*Bm1R?zK-HCELWq)C&WY*qK@=PYp5oOIyP6T-ewPF0gwsdnFf zM%A$F2IXCDT6FSpKWXWVOv!|rN>wf1p{!iPOWN1HS@d}1MnUhvGs!CF*~+|=>C)c% z^_4vupHd}6-jd3{_ExPZ&y(s64-@R1S4yUZ^p}P{DW}RK`n|tB|574T;-@y@@9PuC z3uLCI;>pKlCTJ);hQ&uYJeFZv0FUv81QSAILK6b4_bdojgpbNhB(4Ji)?Bd8g0&T_ zr(g{Q>n0ZhtdBGj6Y+l`FKO2vy-2tRp%0-Sp+8{&!JFVi7$VN6$?;Iaza=%1eDPvA zpSv&K1|OB15@~LY9QoU>rj7bVp)3O*^wxVd-64Jt?U+9RmGo6m3rFhE@gjz~1)*vHbp4w087i$h zfL9fmT9k$5t5@U5 zZHd?_{Gn3+xE`Grx?O zNYcUehHDJh6|N;*Ke%pio#6Vw^?>UD;|^mDV+>W`AI7Wfz0&N3wBzmikw3ljT}Vk(8y3ANrAj2?vNsrYmk;;rxO5@ z5~SD2QBWuOd_1HTC)Z*e)GzLi(nwuI!h$x6yRI}}*MfGDwn92hc_4FrVUz2R%TZtu zz=06}M8jTJd;Z~r0*nO!4+3~tfP?f92Ir6#9vW~CK9m7zMMDGUVtJ4c&LIwfV*ngH zNFW~oh6?hCbwE90I&%1nI7km^Aq^ahY2g_924w(nF8bu~^LL^__=qee-UFY0AjF1! zOqfcPr)Bx@2GTTnGC!Ux+1rO}I5msU9{o=Kjo$$-bx1Z>@8Bpl(_tl>T9(1l_U8Pw zbrQMz)VH!eQI_o2zEUo%yfxFFYRbNv+LrZrQ_ke84zfw^-{HLFK3vndMy$;tGj?uu z7C(NYuIz{8MzWm=RWjpzUp~fAhcTG8f^D?>9Osk2fteqC39XJk!kBe=#|DpxVrQR< zWb0>|Ff-3(F+ryG_)+OW)^6NJW{>O?o_1^pTGo3y7G76yjb<@ScdJn5#y4?z*>aYh z!IbefQG2*tMV4%3`h5P`t!kOuR12A-VmH6EaT-6PmmS|)Kb^}lxQ&PGiNzc3YZlYWn_{(CMRg;79 zUEdvy*}Kz>(RLTSNw|y(eX8(*L=U!cxe;EJe+Mrl-?Uyh;VjPRB1gqtMziMrUhMf{ z<+$)t9Ny(SgxS0z4qr>q#RFDNLnpfFGtW+2vqsCB^2U{^Y{K;$T<^y_IGrC8_)yyj z*|cM$_?wo|?Awd^eC{J>zSQJ3ze8Qb=iBe(XWZgAry$G?9h}U2^~_~c+VtZaRoZb& zy`q_T&&%RX@TmlnGW4c3o(BBdDkZ&6$w+h34Fs1Vwoe>)9bd;W+vrCm&Z-!+228Gmm&wX`^qO)r2!j;JWWGb~P ze7<@@Qkd%4@T1a-Zv&`(wVhPe=Spd%MXp2=k*`X+&{|>>RU{Pd>nUB{dAAUg(vY^I zU8PGF8LMtJeI*FTt*N{l_o#v7)jGSZXm!&1MM9U9W`g&DXVkEi7u1|od)3#~2~_4h zbJgB$ro#0_Wh#Hq7eb$7snTy;rRwJ8>4ICc8uhG+70Lx$i)q9BL&A)e_f>WGrU~kJ ziFDSReXbL;a+IY__DBvbTPWF7U{F+Aubpb%?RC;tltNXu;H;2eryDiP&03f{q5(D8 zem@;~*@K#QXp%JBSuQ*&TqwPC-$G&>QzGbQ&yjxLZ?W?6i(>I+`D^`p;eX$zgFs~= z4&MJ$feKavf*_K-< zF8M8;TvP|^IWNFE0h3S<7lh1Dd*e`(PVyX%l*wuwoNtc;g95P0-AQQsp#W6y;0pfgdI#Dr*@fK4o1lx^XX2C= z!*N{H9kjIlx8zFHK}$9)#5#90$MoB>JX90tfiGpGV(Ydo=tD{0K^ly51L$KGr_kdy z2hg*nW3b}&5gfs}ptGaO(AiK2Y;mJIHr(QcQl^r2UB$>{Xaef7uL_5d6MCrFmrz_B;{s1_UJ$jnD(a6bL&+L;_I<#3KmT>_sABAW&)cDv@*`25H1D zcvX+=W6}VxxRE#zg*1Dqw6Eo9Ue6<6nS#l+MuZ~56HKn*bvP&=ehW|(r(kjo!j#BQ zb^r8*C`}tlJ49g(_F`$i0wwPAB5_bZlm~mLG=iG8J&^88n_T~~KqU%GAl`_G0uP8e zqR0f|5rl(q1YsaFLD;VaCJ=-G5R4!W%7lE7R?GuFI4BF^zOs4`=Xmj6As)cfMlG z7KgI>4HTT4$u{o3Y6usS)tWtX!Bb`!RwHkG^#?xPZ30^nlgFDM9L!g2-irqw%Vw^G zU&TL0&Bk^Y>hQfhv$^?;>amL0Dcr^E1Z=Cj317^6!Cbqz3FqCZViyFqXDih`IG6rm zT&CGtuIb%}>M%2 z+1jLhwpFKBT!M`U>#NLX_fODerukX16LsBjvzDFN`L^enlc%2J&3c`D)us`Mp+V8Wa^36YC$Uh>G+NoDiZ24Es5Q&ihmi#>GYY2agSmiFw}~9Y?WYh7cJV z5*QozL1lHFx{2`y5FZ<;2n&h*Ik(gMel&;*8WR#68$KcAed~YOag(6%ae<0Nx3N)S z;W4q{!D6|_ijXk>u~9KGzv_FVnBc&%A^y=(G5+x(@V(LxYn}YQ-uk4sG2v0;{Mv{G N>W0@1*9q5~{6Chflf(c3 literal 0 HcmV?d00001 diff --git a/scripts/run_e2e_notebooks.py b/scripts/run_e2e_notebooks.py index 86e99bd4e..63e84116e 100644 --- a/scripts/run_e2e_notebooks.py +++ b/scripts/run_e2e_notebooks.py @@ -7,10 +7,10 @@ Notebooks Tested: - notebooks/quickstart/quickstart_model_documentation.ipynb - - notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb - - notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb - - notebooks/code_samples/custom_tests/external_test_providers.ipynb - - notebooks/code_samples/custom_tests/implement_custom_tests.ipynb + - notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb + - notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb + - notebooks/use_cases/custom_tests/external_test_providers.ipynb + - notebooks/use_cases/custom_tests/implement_custom_tests.ipynb To add more notebooks to the list, simply add the path to the `NOTEBOOKS_TO_RUN` list. This will use the default project id for the notebook. If you want to use a different @@ -40,33 +40,33 @@ NOTEBOOKS_TO_RUN = [ "notebooks/quickstart/quickstart_model_documentation.ipynb", - "notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb", - "notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb", + "notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb", + "notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb", "notebooks/how_to/run_unit_metrics.ipynb", - "notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb", - "notebooks/code_samples/custom_tests/implement_custom_tests.ipynb", + "notebooks/use_cases/custom_tests/integrate_external_test_providers.ipynb", + "notebooks/use_cases/custom_tests/implement_custom_tests.ipynb", "notebooks/how_to/explore_tests.ipynb", ] DATA_TEMPLATE_NOTEBOOKS = [ { # [Demo] Foundation Model - Text Summarization - "path": "notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb", + "path": "notebooks/use_cases/nlp_and_llm/llm_summarization_demo.ipynb", "model": "cm4lr52wy00ck0jpbw6kqhyjl", }, { # [Demo] Hugging Face - Text Summarization - "path": "notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb", + "path": "notebooks/use_cases/nlp_and_llm/hugging_face_summarization_demo.ipynb", "model": "cm4lr52ut00c60jpbe2fxt8ss", }, { # [Demo] Foundation Model - Text Sentiment Analysis - "path": "notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb", + "path": "notebooks/use_cases/nlp_and_llm/llm_summarization_demo.ipynb", "model": "cm4lr52ss00br0jpbtgxxe8w8", }, { # [Demo] Hugging Face - Text Sentiment Analysis - "path": "notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb", + "path": "notebooks/use_cases/nlp_and_llm/hugging_face_summarization_demo.ipynb", "model": "cm4lr52qo00bc0jpbm0vmxxhy", }, { @@ -76,12 +76,12 @@ }, { # [Demo] Credit Risk Model - "path": "notebooks/code_samples/credit_risk/application_scorecard_demo.ipynb", + "path": "notebooks/use_cases/credit_risk/application_scorecard_demo.ipynb", "model": "cm4lr52j9009w0jpb4gr7z5o0", }, { # [Demo] Interest Rate Time Series Forecasting Model - "path": "notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb", + "path": "notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb", "model": "cm4lr52od00ar0jpb9dyra8v8", }, ] From 510f851d5680a6565ccd0457eaba1fb28269cb2f Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:35:31 -0800 Subject: [PATCH 03/14] Fixing changed link references in notebooks pt1 --- .../operational_deposit_poc.ipynb | 12 ++++++------ notebooks/how_to/use_dataset_model_objects.ipynb | 16 ++++++++-------- .../3-integrate_custom_tests.ipynb | 2 +- .../4-finalize_testing_documentation.ipynb | 12 ++++++------ .../4-finalize_validation_reporting.ipynb | 6 +++--- .../validate_application_scorecard.ipynb | 4 ++-- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb b/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb index 890e60d5f..5617a34ab 100644 --- a/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb +++ b/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb @@ -1085,17 +1085,17 @@ "\n", "### Use cases\n", "\n", - "- [Application scorecard demo](../code_samples/credit_risk/application_scorecard_demo.ipynb)\n", - "- [Linear regression documentation demo](../code_samples/regression/quickstart_regression_full_suite.ipynb)\n", - "- [LLM model documentation demo](../code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb)\n", + "- [Application scorecard demo](../use_cases/credit_risk/application_scorecard_demo.ipynb)\n", + "- [Linear regression documentation demo](../use_cases/regression/quickstart_regression_full_suite.ipynb)\n", + "- [LLM model documentation demo](../use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb)\n", "\n", "\n", "\n", "### More how-to guides and code samples\n", "\n", "- [Explore available tests in detail](../how_to/explore_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../code_samples/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../code_samples/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [In-depth guide for implementing custom tests](../use_cases/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../use_cases/custom_tests/integrate_external_test_providers.ipynb)\n", "- [Configuring dataset features](../how_to/configure_dataset_features.ipynb)\n", "- [Introduction to unit and composite metrics](../how_to/run_unit_metrics.ipynb)\n", "\n", @@ -1105,7 +1105,7 @@ "\n", "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", "\n", - "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" ] }, diff --git a/notebooks/how_to/use_dataset_model_objects.ipynb b/notebooks/how_to/use_dataset_model_objects.ipynb index 76faa2ffd..1dd315b1f 100644 --- a/notebooks/how_to/use_dataset_model_objects.ipynb +++ b/notebooks/how_to/use_dataset_model_objects.ipynb @@ -540,7 +540,7 @@ "\n", "Other high-level APIs (attributes and methods) of the dataset object are listed [here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset).\n", "\n", - "If you've gone through the [Implement custom tests notebook](../code_samples/custom_tests/implement_custom_tests.ipynb), you should have a good understanding of how custom tests are implemented in details. If you haven't, we recommend going through that notebook first." + "If you've gone through the [Implement custom tests notebook](../use_cases/custom_tests/implement_custom_tests.ipynb), you should have a good understanding of how custom tests are implemented in details. If you haven't, we recommend going through that notebook first." ] }, { @@ -604,7 +604,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can move custom tests into separate modules in a folder. It allows you to take one-off tests and move them into an organized structure that makes it easier to manage, maintain and share them. We have provided a seperate notebook with detailed explaination [here](../code_samples/custom_tests/integrate_external_test_providers.ipynb) " + "You can move custom tests into separate modules in a folder. It allows you to take one-off tests and move them into an organized structure that makes it easier to manage, maintain and share them. We have provided a seperate notebook with detailed explaination [here](../use_cases/custom_tests/integrate_external_test_providers.ipynb) " ] }, { @@ -898,17 +898,17 @@ "\n", "### Use cases\n", "\n", - "- [Document an application scorecard model](../code_samples/credit_risk/application_scorecard_full_suite.ipynb)\n", - "- [Linear regression documentation demo](../code_samples/regression/quickstart_regression_full_suite.ipynb)\n", - "- [LLM model documentation demo](../code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb)\n", + "- [Document an application scorecard model](../use_cases/credit_risk/application_scorecard_full_suite.ipynb)\n", + "- [Linear regression documentation demo](../use_cases/regression/quickstart_regression_full_suite.ipynb)\n", + "- [LLM model documentation demo](../use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb)\n", "\n", "\n", "\n", "### More how-to guides and code samples\n", "\n", "- [Explore available tests in detail](../how_to/explore_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../code_samples/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../code_samples/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [In-depth guide for implementing custom tests](../use_cases/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../use_cases/custom_tests/integrate_external_test_providers.ipynb)\n", "- [Configuring dataset features](../how_to/configure_dataset_features.ipynb)\n", "- [Introduction to unit and composite tests](../how_to/run_unit_metrics.ipynb)\n", "\n", @@ -918,7 +918,7 @@ "\n", "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", "\n", - "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" ] }, diff --git a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb index 49df9a5b5..10e068dd2 100644 --- a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb +++ b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb @@ -13,7 +13,7 @@ "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", "\n", - "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb) notebook.\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb) notebook.\n", "\n", "
Learn by doing\n", "

\n", diff --git a/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb b/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb index 0af95d90a..a989bb8d1 100644 --- a/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb +++ b/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb @@ -930,9 +930,9 @@ "\n", "#### Use cases\n", "\n", - "- [Document an application scorecard model](../../code_samples/credit_risk/application_scorecard_full_suite.ipynb)\n", - "- [Linear regression documentation demo](../../code_samples/regression/quickstart_regression_full_suite.ipynb)\n", - "- [LLM model documentation demo](../../code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb)" + "- [Document an application scorecard model](../../use_cases/credit_risk/application_scorecard_full_suite.ipynb)\n", + "- [Linear regression documentation demo](../../use_cases/regression/quickstart_regression_full_suite.ipynb)\n", + "- [LLM model documentation demo](../../use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb)" ] }, { @@ -945,8 +945,8 @@ "\n", "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../../code_samples/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../use_cases/custom_tests/integrate_external_test_providers.ipynb)\n", "- [Configuring dataset features](../../how_to/configure_dataset_features.ipynb)\n", "- [Introduction to unit and composite metrics](../../how_to/run_unit_metrics.ipynb)" ] @@ -961,7 +961,7 @@ "\n", "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", "\n", - "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" ] }, diff --git a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb index fe4e221c9..4660928aa 100644 --- a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb +++ b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb @@ -13,7 +13,7 @@ "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", "\n", - "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb) notebook.\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb) notebook.\n", "\n", "
Learn by doing\n", "

\n", @@ -1179,7 +1179,7 @@ "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", "- [In-depth guide for running comparison tests](../../how_to/run_tests/2_run_comparison_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb)" + "- [In-depth guide for implementing custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb)" ] }, { @@ -1192,7 +1192,7 @@ "\n", "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", "\n", - "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)\n", "\n", "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." diff --git a/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb index 58953711e..3f5c602fd 100644 --- a/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb +++ b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb @@ -1586,7 +1586,7 @@ "source": [ "
Want to learn more about custom tests?\n", "

\n", - "Refer to our in-depth introduction to custom tests: Implement custom tests
" + "Refer to our in-depth introduction to custom tests: Implement custom tests
" ] }, { @@ -1787,7 +1787,7 @@ "\n", "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", "\n", - "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)\n", "\n", "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." From 53ad2f912e29303da31e53c30065077f7cb36d93 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:38:08 -0800 Subject: [PATCH 04/14] Fixing changed link references in notebooks pt2 --- notebooks/how_to/enable_pii_detection.ipynb | 2 +- .../model_development/3-integrate_custom_tests.ipynb | 2 +- .../model_validation/4-finalize_validation_reporting.ipynb | 4 ++-- notebooks/use_cases/agents/document_agentic_ai.ipynb | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/notebooks/how_to/enable_pii_detection.ipynb b/notebooks/how_to/enable_pii_detection.ipynb index 3d648e82c..ca47502df 100644 --- a/notebooks/how_to/enable_pii_detection.ipynb +++ b/notebooks/how_to/enable_pii_detection.ipynb @@ -289,7 +289,7 @@ "source": [ "
Want to learn more about custom tests?\n", "

\n", - "Check out our extended introduction to custom tests — Implement custom tests
" + "Check out our extended introduction to custom tests — Implement custom tests
" ] }, { diff --git a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb index 10e068dd2..11c82417e 100644 --- a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb +++ b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb @@ -817,7 +817,7 @@ "\n", "
Want to learn more about test providers?\n", "

\n", - "An extended introduction to test providers can be found in: Integrate external test providers
" + "An extended introduction to test providers can be found in: Integrate external test providers" ] }, { diff --git a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb index 4660928aa..ab17670da 100644 --- a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb +++ b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb @@ -480,7 +480,7 @@ "\n", "
Want to learn more about custom tests?\n", "

\n", - "Refer to our in-depth introduction to custom tests: Implement custom tests
" + "Refer to our in-depth introduction to custom tests: Implement custom tests" ] }, { @@ -856,7 +856,7 @@ "\n", "
Want to learn more about test providers?\n", "

\n", - "An extended introduction to test providers can be found in: Integrate external test providers
" + "An extended introduction to test providers can be found in: Integrate external test providers" ] }, { diff --git a/notebooks/use_cases/agents/document_agentic_ai.ipynb b/notebooks/use_cases/agents/document_agentic_ai.ipynb index bffd3863f..6ab549980 100644 --- a/notebooks/use_cases/agents/document_agentic_ai.ipynb +++ b/notebooks/use_cases/agents/document_agentic_ai.ipynb @@ -2100,7 +2100,7 @@ "Learn more about the ValidMind Library tools we used in this notebook:\n", "\n", "- [Custom prompts](https://docs.validmind.ai/notebooks/how_to/customize_test_result_descriptions.html)\n", - "- [Custom tests](https://docs.validmind.ai/notebooks/code_samples/custom_tests/implement_custom_tests.html)\n", + "- [Custom tests](https://docs.validmind.ai/notebooks/use_cases/custom_tests/implement_custom_tests.html)\n", "- [ValidMind scorers](https://docs.validmind.ai/notebooks/how_to/assign_scores_complete_tutorial.html)\n", "\n", "We also offer many more interactive notebooks to help you document models:\n", From ebba556f395c696d1afefe0620615e7cb9f6ad02 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:53:50 -0800 Subject: [PATCH 05/14] Moving use_cases/custom_tests to how_to --- .../operational_deposit/operational_deposit_poc.ipynb | 4 ++-- notebooks/how_to/enable_pii_detection.ipynb | 2 +- .../custom_tests => how_to}/implement_custom_tests.ipynb | 0 .../integrate_external_test_providers.ipynb | 0 notebooks/how_to/use_dataset_model_objects.ipynb | 8 ++++---- .../model_development/3-integrate_custom_tests.ipynb | 4 ++-- .../4-finalize_testing_documentation.ipynb | 4 ++-- .../4-finalize_validation_reporting.ipynb | 8 ++++---- notebooks/use_cases/agents/document_agentic_ai.ipynb | 2 +- .../model_validation/validate_application_scorecard.ipynb | 2 +- scripts/run_e2e_notebooks.py | 8 ++++---- 11 files changed, 21 insertions(+), 21 deletions(-) rename notebooks/{use_cases/custom_tests => how_to}/implement_custom_tests.ipynb (100%) rename notebooks/{use_cases/custom_tests => how_to}/integrate_external_test_providers.ipynb (100%) diff --git a/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb b/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb index 5617a34ab..3a0943fb2 100644 --- a/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb +++ b/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb @@ -1094,8 +1094,8 @@ "### More how-to guides and code samples\n", "\n", "- [Explore available tests in detail](../how_to/explore_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../use_cases/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../use_cases/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../how_to/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../how_to/integrate_external_test_providers.ipynb)\n", "- [Configuring dataset features](../how_to/configure_dataset_features.ipynb)\n", "- [Introduction to unit and composite metrics](../how_to/run_unit_metrics.ipynb)\n", "\n", diff --git a/notebooks/how_to/enable_pii_detection.ipynb b/notebooks/how_to/enable_pii_detection.ipynb index ca47502df..7d5779da1 100644 --- a/notebooks/how_to/enable_pii_detection.ipynb +++ b/notebooks/how_to/enable_pii_detection.ipynb @@ -289,7 +289,7 @@ "source": [ "
Want to learn more about custom tests?\n", "

\n", - "Check out our extended introduction to custom tests — Implement custom tests
" + "Check out our extended introduction to custom tests — Implement custom tests" ] }, { diff --git a/notebooks/use_cases/custom_tests/implement_custom_tests.ipynb b/notebooks/how_to/implement_custom_tests.ipynb similarity index 100% rename from notebooks/use_cases/custom_tests/implement_custom_tests.ipynb rename to notebooks/how_to/implement_custom_tests.ipynb diff --git a/notebooks/use_cases/custom_tests/integrate_external_test_providers.ipynb b/notebooks/how_to/integrate_external_test_providers.ipynb similarity index 100% rename from notebooks/use_cases/custom_tests/integrate_external_test_providers.ipynb rename to notebooks/how_to/integrate_external_test_providers.ipynb diff --git a/notebooks/how_to/use_dataset_model_objects.ipynb b/notebooks/how_to/use_dataset_model_objects.ipynb index 1dd315b1f..e63d62044 100644 --- a/notebooks/how_to/use_dataset_model_objects.ipynb +++ b/notebooks/how_to/use_dataset_model_objects.ipynb @@ -540,7 +540,7 @@ "\n", "Other high-level APIs (attributes and methods) of the dataset object are listed [here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset).\n", "\n", - "If you've gone through the [Implement custom tests notebook](../use_cases/custom_tests/implement_custom_tests.ipynb), you should have a good understanding of how custom tests are implemented in details. If you haven't, we recommend going through that notebook first." + "If you've gone through the [Implement custom tests notebook](implement_custom_tests.ipynb), you should have a good understanding of how custom tests are implemented in details. If you haven't, we recommend going through that notebook first." ] }, { @@ -604,7 +604,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can move custom tests into separate modules in a folder. It allows you to take one-off tests and move them into an organized structure that makes it easier to manage, maintain and share them. We have provided a seperate notebook with detailed explaination [here](../use_cases/custom_tests/integrate_external_test_providers.ipynb) " + "You can move custom tests into separate modules in a folder. It allows you to take one-off tests and move them into an organized structure that makes it easier to manage, maintain and share them. We have provided a seperate notebook with detailed explaination [here](integrate_external_test_providers.ipynb) " ] }, { @@ -907,8 +907,8 @@ "### More how-to guides and code samples\n", "\n", "- [Explore available tests in detail](../how_to/explore_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../use_cases/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../use_cases/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [In-depth guide for implementing custom tests](implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](integrate_external_test_providers.ipynb)\n", "- [Configuring dataset features](../how_to/configure_dataset_features.ipynb)\n", "- [Introduction to unit and composite tests](../how_to/run_unit_metrics.ipynb)\n", "\n", diff --git a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb index 11c82417e..229a46456 100644 --- a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb +++ b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb @@ -13,7 +13,7 @@ "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", "\n", - "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb) notebook.\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../how_to/implement_custom_tests.ipynb) notebook.\n", "\n", "
Learn by doing\n", "

\n", @@ -817,7 +817,7 @@ "\n", "
Want to learn more about test providers?\n", "

\n", - "An extended introduction to test providers can be found in: Integrate external test providers
" + "An extended introduction to test providers can be found in: Integrate external test providers
" ] }, { diff --git a/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb b/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb index a989bb8d1..9961e9230 100644 --- a/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb +++ b/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb @@ -945,8 +945,8 @@ "\n", "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../../use_cases/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../how_to/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../how_to/integrate_external_test_providers.ipynb)\n", "- [Configuring dataset features](../../how_to/configure_dataset_features.ipynb)\n", "- [Introduction to unit and composite metrics](../../how_to/run_unit_metrics.ipynb)" ] diff --git a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb index ab17670da..999b07d4e 100644 --- a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb +++ b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb @@ -13,7 +13,7 @@ "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", "\n", - "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb) notebook.\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../how_to/implement_custom_tests.ipynb) notebook.\n", "\n", "
Learn by doing\n", "

\n", @@ -480,7 +480,7 @@ "\n", "
Want to learn more about custom tests?\n", "

\n", - "Refer to our in-depth introduction to custom tests: Implement custom tests
" + "Refer to our in-depth introduction to custom tests: Implement custom tests
" ] }, { @@ -856,7 +856,7 @@ "\n", "
Want to learn more about test providers?\n", "

\n", - "An extended introduction to test providers can be found in: Integrate external test providers
" + "An extended introduction to test providers can be found in: Integrate external test providers" ] }, { @@ -1179,7 +1179,7 @@ "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", "- [In-depth guide for running comparison tests](../../how_to/run_tests/2_run_comparison_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../use_cases/custom_tests/implement_custom_tests.ipynb)" + "- [In-depth guide for implementing custom tests](../../how_to/implement_custom_tests.ipynb)" ] }, { diff --git a/notebooks/use_cases/agents/document_agentic_ai.ipynb b/notebooks/use_cases/agents/document_agentic_ai.ipynb index 6ab549980..1454b11a7 100644 --- a/notebooks/use_cases/agents/document_agentic_ai.ipynb +++ b/notebooks/use_cases/agents/document_agentic_ai.ipynb @@ -2100,7 +2100,7 @@ "Learn more about the ValidMind Library tools we used in this notebook:\n", "\n", "- [Custom prompts](https://docs.validmind.ai/notebooks/how_to/customize_test_result_descriptions.html)\n", - "- [Custom tests](https://docs.validmind.ai/notebooks/use_cases/custom_tests/implement_custom_tests.html)\n", + "- [Custom tests](https://docs.validmind.ai/notebooks/how_to/implement_custom_tests.html)\n", "- [ValidMind scorers](https://docs.validmind.ai/notebooks/how_to/assign_scores_complete_tutorial.html)\n", "\n", "We also offer many more interactive notebooks to help you document models:\n", diff --git a/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb index 3f5c602fd..e839f3720 100644 --- a/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb +++ b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb @@ -1586,7 +1586,7 @@ "source": [ "
Want to learn more about custom tests?\n", "

\n", - "Refer to our in-depth introduction to custom tests: Implement custom tests
" + "Refer to our in-depth introduction to custom tests: Implement custom tests" ] }, { diff --git a/scripts/run_e2e_notebooks.py b/scripts/run_e2e_notebooks.py index 63e84116e..94c586eab 100644 --- a/scripts/run_e2e_notebooks.py +++ b/scripts/run_e2e_notebooks.py @@ -9,8 +9,8 @@ - notebooks/quickstart/quickstart_model_documentation.ipynb - notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb - notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb - - notebooks/use_cases/custom_tests/external_test_providers.ipynb - - notebooks/use_cases/custom_tests/implement_custom_tests.ipynb + - notebooks/how_to/integrate_external_test_providers.ipynb + - notebooks/how_to/implement_custom_tests.ipynb To add more notebooks to the list, simply add the path to the `NOTEBOOKS_TO_RUN` list. This will use the default project id for the notebook. If you want to use a different @@ -43,8 +43,8 @@ "notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb", "notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb", "notebooks/how_to/run_unit_metrics.ipynb", - "notebooks/use_cases/custom_tests/integrate_external_test_providers.ipynb", - "notebooks/use_cases/custom_tests/implement_custom_tests.ipynb", + "notebooks/how_to/integrate_external_test_providers.ipynb", + "notebooks/how_to/implement_custom_tests.ipynb", "notebooks/how_to/explore_tests.ipynb", ] From 7029a70294f06f8a0bc0bee6dcb7e58ef680fe5f Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 14:20:49 -0800 Subject: [PATCH 06/14] Reorg how_to --- .../{ => data_and_datasets}/dataset_image.png | Bin .../configure_dataset_features.ipynb | 0 .../dataset_inputs}/load_datasets_predictions.ipynb | 0 .../how_to/{ => data_and_datasets}/model_image.png | Bin .../understand_utilize_rawdata.ipynb | 0 .../use_dataset_model_objects.ipynb | 0 .../{ => metrics}/log_metrics_over_time.ipynb | 0 .../how_to/{ => metrics}/run_unit_metrics.ipynb | 0 .../assign_scores_complete_tutorial.ipynb | 0 .../{ => security}/enable_pii_detection.ipynb | 0 .../custom_tests}/implement_custom_tests.ipynb | 0 .../integrate_external_test_providers.ipynb | 0 .../explore_tests}/explore_test_suites.ipynb | 0 .../explore_tests}/explore_test_suites_output.png | Bin .../{ => tests/explore_tests}/explore_tests.ipynb | 0 .../explore_tests}/test_suite_output1.png | Bin .../explore_tests}/test_suite_output2.png | Bin .../run_tests/1_run_dataset_based_tests.ipynb | 0 .../run_tests/2_run_comparison_tests.ipynb | 0 .../customize_test_result_descriptions.ipynb | 0 .../configure_tests}/filter_input_columns.ipynb | 0 .../run_tests_that_require_multiple_datasets.ipynb | 0 ...ocument_multiple_results_for_the_same_test.ipynb | 0 .../run_documentation_sections.ipynb | 0 .../run_documentation_tests_with_config.ipynb | 0 25 files changed, 0 insertions(+), 0 deletions(-) rename notebooks/how_to/{ => data_and_datasets}/dataset_image.png (100%) rename notebooks/how_to/{ => data_and_datasets/dataset_inputs}/configure_dataset_features.ipynb (100%) rename notebooks/how_to/{ => data_and_datasets/dataset_inputs}/load_datasets_predictions.ipynb (100%) rename notebooks/how_to/{ => data_and_datasets}/model_image.png (100%) rename notebooks/how_to/{ => data_and_datasets}/understand_utilize_rawdata.ipynb (100%) rename notebooks/how_to/{ => data_and_datasets}/use_dataset_model_objects.ipynb (100%) rename notebooks/how_to/{ => metrics}/log_metrics_over_time.ipynb (100%) rename notebooks/how_to/{ => metrics}/run_unit_metrics.ipynb (100%) rename notebooks/how_to/{ => scoring}/assign_scores_complete_tutorial.ipynb (100%) rename notebooks/how_to/{ => security}/enable_pii_detection.ipynb (100%) rename notebooks/how_to/{ => tests/custom_tests}/implement_custom_tests.ipynb (100%) rename notebooks/how_to/{ => tests/custom_tests}/integrate_external_test_providers.ipynb (100%) rename notebooks/how_to/{ => tests/explore_tests}/explore_test_suites.ipynb (100%) rename notebooks/how_to/{ => tests/explore_tests}/explore_test_suites_output.png (100%) rename notebooks/how_to/{ => tests/explore_tests}/explore_tests.ipynb (100%) rename notebooks/how_to/{ => tests/explore_tests}/test_suite_output1.png (100%) rename notebooks/how_to/{ => tests/explore_tests}/test_suite_output2.png (100%) rename notebooks/how_to/{ => tests}/run_tests/1_run_dataset_based_tests.ipynb (100%) rename notebooks/how_to/{ => tests}/run_tests/2_run_comparison_tests.ipynb (100%) rename notebooks/how_to/{ => tests/run_tests/configure_tests}/customize_test_result_descriptions.ipynb (100%) rename notebooks/how_to/{ => tests/run_tests/configure_tests}/filter_input_columns.ipynb (100%) rename notebooks/how_to/{ => tests/run_tests/configure_tests}/run_tests_that_require_multiple_datasets.ipynb (100%) rename notebooks/how_to/{ => tests/run_tests/documentation_tests}/document_multiple_results_for_the_same_test.ipynb (100%) rename notebooks/how_to/{ => tests/run_tests/documentation_tests}/run_documentation_sections.ipynb (100%) rename notebooks/how_to/{ => tests/run_tests/documentation_tests}/run_documentation_tests_with_config.ipynb (100%) diff --git a/notebooks/how_to/dataset_image.png b/notebooks/how_to/data_and_datasets/dataset_image.png similarity index 100% rename from notebooks/how_to/dataset_image.png rename to notebooks/how_to/data_and_datasets/dataset_image.png diff --git a/notebooks/how_to/configure_dataset_features.ipynb b/notebooks/how_to/data_and_datasets/dataset_inputs/configure_dataset_features.ipynb similarity index 100% rename from notebooks/how_to/configure_dataset_features.ipynb rename to notebooks/how_to/data_and_datasets/dataset_inputs/configure_dataset_features.ipynb diff --git a/notebooks/how_to/load_datasets_predictions.ipynb b/notebooks/how_to/data_and_datasets/dataset_inputs/load_datasets_predictions.ipynb similarity index 100% rename from notebooks/how_to/load_datasets_predictions.ipynb rename to notebooks/how_to/data_and_datasets/dataset_inputs/load_datasets_predictions.ipynb diff --git a/notebooks/how_to/model_image.png b/notebooks/how_to/data_and_datasets/model_image.png similarity index 100% rename from notebooks/how_to/model_image.png rename to notebooks/how_to/data_and_datasets/model_image.png diff --git a/notebooks/how_to/understand_utilize_rawdata.ipynb b/notebooks/how_to/data_and_datasets/understand_utilize_rawdata.ipynb similarity index 100% rename from notebooks/how_to/understand_utilize_rawdata.ipynb rename to notebooks/how_to/data_and_datasets/understand_utilize_rawdata.ipynb diff --git a/notebooks/how_to/use_dataset_model_objects.ipynb b/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb similarity index 100% rename from notebooks/how_to/use_dataset_model_objects.ipynb rename to notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb diff --git a/notebooks/how_to/log_metrics_over_time.ipynb b/notebooks/how_to/metrics/log_metrics_over_time.ipynb similarity index 100% rename from notebooks/how_to/log_metrics_over_time.ipynb rename to notebooks/how_to/metrics/log_metrics_over_time.ipynb diff --git a/notebooks/how_to/run_unit_metrics.ipynb b/notebooks/how_to/metrics/run_unit_metrics.ipynb similarity index 100% rename from notebooks/how_to/run_unit_metrics.ipynb rename to notebooks/how_to/metrics/run_unit_metrics.ipynb diff --git a/notebooks/how_to/assign_scores_complete_tutorial.ipynb b/notebooks/how_to/scoring/assign_scores_complete_tutorial.ipynb similarity index 100% rename from notebooks/how_to/assign_scores_complete_tutorial.ipynb rename to notebooks/how_to/scoring/assign_scores_complete_tutorial.ipynb diff --git a/notebooks/how_to/enable_pii_detection.ipynb b/notebooks/how_to/security/enable_pii_detection.ipynb similarity index 100% rename from notebooks/how_to/enable_pii_detection.ipynb rename to notebooks/how_to/security/enable_pii_detection.ipynb diff --git a/notebooks/how_to/implement_custom_tests.ipynb b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb similarity index 100% rename from notebooks/how_to/implement_custom_tests.ipynb rename to notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb diff --git a/notebooks/how_to/integrate_external_test_providers.ipynb b/notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb similarity index 100% rename from notebooks/how_to/integrate_external_test_providers.ipynb rename to notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb diff --git a/notebooks/how_to/explore_test_suites.ipynb b/notebooks/how_to/tests/explore_tests/explore_test_suites.ipynb similarity index 100% rename from notebooks/how_to/explore_test_suites.ipynb rename to notebooks/how_to/tests/explore_tests/explore_test_suites.ipynb diff --git a/notebooks/how_to/explore_test_suites_output.png b/notebooks/how_to/tests/explore_tests/explore_test_suites_output.png similarity index 100% rename from notebooks/how_to/explore_test_suites_output.png rename to notebooks/how_to/tests/explore_tests/explore_test_suites_output.png diff --git a/notebooks/how_to/explore_tests.ipynb b/notebooks/how_to/tests/explore_tests/explore_tests.ipynb similarity index 100% rename from notebooks/how_to/explore_tests.ipynb rename to notebooks/how_to/tests/explore_tests/explore_tests.ipynb diff --git a/notebooks/how_to/test_suite_output1.png b/notebooks/how_to/tests/explore_tests/test_suite_output1.png similarity index 100% rename from notebooks/how_to/test_suite_output1.png rename to notebooks/how_to/tests/explore_tests/test_suite_output1.png diff --git a/notebooks/how_to/test_suite_output2.png b/notebooks/how_to/tests/explore_tests/test_suite_output2.png similarity index 100% rename from notebooks/how_to/test_suite_output2.png rename to notebooks/how_to/tests/explore_tests/test_suite_output2.png diff --git a/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb b/notebooks/how_to/tests/run_tests/1_run_dataset_based_tests.ipynb similarity index 100% rename from notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb rename to notebooks/how_to/tests/run_tests/1_run_dataset_based_tests.ipynb diff --git a/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb b/notebooks/how_to/tests/run_tests/2_run_comparison_tests.ipynb similarity index 100% rename from notebooks/how_to/run_tests/2_run_comparison_tests.ipynb rename to notebooks/how_to/tests/run_tests/2_run_comparison_tests.ipynb diff --git a/notebooks/how_to/customize_test_result_descriptions.ipynb b/notebooks/how_to/tests/run_tests/configure_tests/customize_test_result_descriptions.ipynb similarity index 100% rename from notebooks/how_to/customize_test_result_descriptions.ipynb rename to notebooks/how_to/tests/run_tests/configure_tests/customize_test_result_descriptions.ipynb diff --git a/notebooks/how_to/filter_input_columns.ipynb b/notebooks/how_to/tests/run_tests/configure_tests/filter_input_columns.ipynb similarity index 100% rename from notebooks/how_to/filter_input_columns.ipynb rename to notebooks/how_to/tests/run_tests/configure_tests/filter_input_columns.ipynb diff --git a/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb b/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.ipynb similarity index 100% rename from notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb rename to notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.ipynb diff --git a/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb b/notebooks/how_to/tests/run_tests/documentation_tests/document_multiple_results_for_the_same_test.ipynb similarity index 100% rename from notebooks/how_to/document_multiple_results_for_the_same_test.ipynb rename to notebooks/how_to/tests/run_tests/documentation_tests/document_multiple_results_for_the_same_test.ipynb diff --git a/notebooks/how_to/run_documentation_sections.ipynb b/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_sections.ipynb similarity index 100% rename from notebooks/how_to/run_documentation_sections.ipynb rename to notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_sections.ipynb diff --git a/notebooks/how_to/run_documentation_tests_with_config.ipynb b/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_tests_with_config.ipynb similarity index 100% rename from notebooks/how_to/run_documentation_tests_with_config.ipynb rename to notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_tests_with_config.ipynb From edabbdfd0077a9f24e2651be095e189c50cd8375 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 14:41:53 -0800 Subject: [PATCH 07/14] Updating changed links --- .../operational_deposit_poc.ipynb | 10 +++++----- notebooks/code_sharing/r/r_custom_tests.Rmd | 2 +- .../configure_dataset_features.ipynb | 2 +- .../load_datasets_predictions.ipynb | 2 +- .../use_dataset_model_objects.ipynb | 16 ++++++++-------- .../how_to/metrics/log_metrics_over_time.ipynb | 2 +- .../how_to/security/enable_pii_detection.ipynb | 4 ++-- .../custom_tests/implement_custom_tests.ipynb | 2 +- .../explore_tests/explore_test_suites.ipynb | 4 ++-- .../tests/explore_tests/explore_tests.ipynb | 4 ++-- .../run_tests/1_run_dataset_based_tests.ipynb | 6 +++--- .../tests/run_tests/2_run_comparison_tests.ipynb | 6 +++--- ...un_tests_that_require_multiple_datasets.ipynb | 2 +- ...ment_multiple_results_for_the_same_test.ipynb | 2 +- .../run_documentation_sections.ipynb | 2 +- .../run_documentation_tests_with_config.ipynb | 2 +- .../quickstart_model_documentation.ipynb | 2 +- .../quickstart/quickstart_model_validation.ipynb | 2 +- notebooks/templates/_about-validmind.ipynb | 2 +- .../model_development/1-set_up_validmind.ipynb | 2 +- .../2-start_development_process.ipynb | 2 +- .../3-integrate_custom_tests.ipynb | 4 ++-- .../4-finalize_testing_documentation.ipynb | 12 ++++++------ .../1-set_up_validmind_for_validation.ipynb | 2 +- .../2-start_validation_process.ipynb | 2 +- .../4-finalize_validation_reporting.ipynb | 14 +++++++------- .../use_cases/agents/document_agentic_ai.ipynb | 8 ++++---- .../quickstart_option_pricing_models.ipynb | 2 +- ...ickstart_option_pricing_models_quantlib.ipynb | 2 +- .../quickstart_code_explainer_demo.ipynb | 2 +- .../application_scorecard_executive.ipynb | 2 +- .../application_scorecard_full_suite.ipynb | 2 +- .../application_scorecard_with_bias.ipynb | 2 +- .../application_scorecard_with_ml.ipynb | 2 +- .../document_excel_application_scorecard.ipynb | 4 ++-- .../validate_application_scorecard.ipynb | 6 +++--- .../nlp_and_llm/prompt_validation_demo.ipynb | 2 +- ...pplication_scorecard_ongoing_monitoring.ipynb | 2 +- ...start_customer_churn_ongoing_monitoring.ipynb | 4 ++-- .../quickstart_time_series_full_suite.ipynb | 2 +- .../quickstart_time_series_high_code.ipynb | 2 +- scripts/run_e2e_notebooks.py | 12 ++++++------ 42 files changed, 84 insertions(+), 84 deletions(-) diff --git a/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb b/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb index 3a0943fb2..5d088808a 100644 --- a/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb +++ b/notebooks/code_sharing/operational_deposit/operational_deposit_poc.ipynb @@ -1093,11 +1093,11 @@ "\n", "### More how-to guides and code samples\n", "\n", - "- [Explore available tests in detail](../how_to/explore_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../how_to/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../../how_to/integrate_external_test_providers.ipynb)\n", - "- [Configuring dataset features](../how_to/configure_dataset_features.ipynb)\n", - "- [Introduction to unit and composite metrics](../how_to/run_unit_metrics.ipynb)\n", + "- [Explore available tests in detail](../how_to/tests/explore_tests/explore_tests.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../how_to/tests/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../how_to/tests/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [Configuring dataset features](../how_to/data_and_datasets/dataset_inputs/configure_dataset_features.ipynb)\n", + "- [Introduction to unit and composite metrics](../how_to/metrics/run_unit_metrics.ipynb)\n", "\n", "\n", "\n", diff --git a/notebooks/code_sharing/r/r_custom_tests.Rmd b/notebooks/code_sharing/r/r_custom_tests.Rmd index 2a89f051b..63e52f96a 100644 --- a/notebooks/code_sharing/r/r_custom_tests.Rmd +++ b/notebooks/code_sharing/r/r_custom_tests.Rmd @@ -54,7 +54,7 @@ Signing up is FREE — \n", "\n", diff --git a/notebooks/how_to/metrics/log_metrics_over_time.ipynb b/notebooks/how_to/metrics/log_metrics_over_time.ipynb index 058a54e19..3f843b5ee 100644 --- a/notebooks/how_to/metrics/log_metrics_over_time.ipynb +++ b/notebooks/how_to/metrics/log_metrics_over_time.ipynb @@ -108,7 +108,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/how_to/security/enable_pii_detection.ipynb b/notebooks/how_to/security/enable_pii_detection.ipynb index 7d5779da1..2eb9ae011 100644 --- a/notebooks/how_to/security/enable_pii_detection.ipynb +++ b/notebooks/how_to/security/enable_pii_detection.ipynb @@ -124,7 +124,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", @@ -289,7 +289,7 @@ "source": [ "
Want to learn more about custom tests?\n", "

\n", - "Check out our extended introduction to custom tests — Implement custom tests
" + "Check out our extended introduction to custom tests — Implement custom tests" ] }, { diff --git a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb index 0fdd6e0d1..b5b4952c7 100644 --- a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb +++ b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb @@ -105,7 +105,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/how_to/tests/explore_tests/explore_test_suites.ipynb b/notebooks/how_to/tests/explore_tests/explore_test_suites.ipynb index f9f0a7286..a14d06463 100644 --- a/notebooks/how_to/tests/explore_tests/explore_test_suites.ipynb +++ b/notebooks/how_to/tests/explore_tests/explore_test_suites.ipynb @@ -85,7 +85,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", @@ -672,7 +672,7 @@ "\n", "
Learn more about the individual tests available in the ValidMind Library\n", "

\n", - "Check out our Explore tests notebook for more code examples and usage of key functions.
\n", + "Check out our Explore tests notebook for more code examples and usage of key functions.\n", "\n", "\n", "\n", diff --git a/notebooks/how_to/tests/explore_tests/explore_tests.ipynb b/notebooks/how_to/tests/explore_tests/explore_tests.ipynb index 9fc1d3292..ef5680e8f 100644 --- a/notebooks/how_to/tests/explore_tests/explore_tests.ipynb +++ b/notebooks/how_to/tests/explore_tests/explore_tests.ipynb @@ -86,7 +86,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", @@ -4371,7 +4371,7 @@ "\n", "
Learn about the tests suites available in the ValidMind Library.\n", "

\n", - "Check out our Explore test suites notebook for more code examples and usage of key functions.
\n", + "Check out our Explore test suites notebook for more code examples and usage of key functions.\n", "\n", "\n", "\n", diff --git a/notebooks/how_to/tests/run_tests/1_run_dataset_based_tests.ipynb b/notebooks/how_to/tests/run_tests/1_run_dataset_based_tests.ipynb index d032c6c77..263ee130a 100644 --- a/notebooks/how_to/tests/run_tests/1_run_dataset_based_tests.ipynb +++ b/notebooks/how_to/tests/run_tests/1_run_dataset_based_tests.ipynb @@ -14,7 +14,7 @@ "- Initialize a ValidMind dataset \n", "- Pass the dataset to the `run_test` fuction for any test that takes a `dataset` input\n", "\n", - "**We recommended that you first complete the [Explore tests](../explore_tests.ipynb) notebook,** to understand the basics of how to find and describe all the available tests in the ValidMind Library before moving on to this advanced guide.\n", + "**We recommended that you first complete the [Explore tests](../explore_tests/explore_tests.ipynb) notebook,** to understand the basics of how to find and describe all the available tests in the ValidMind Library before moving on to this advanced guide.\n", "\n", "This interactive notebook provides a step-by-step guide for listing and filtering available tests, building a sample dataset, initializing the required ValidMind objects, running the test, and then logging the results to ValidMind. " ] @@ -105,7 +105,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", @@ -239,7 +239,7 @@ "\n", "Before we run a test, let's find a suitable test for this demonstration. Let's assume you want to generate the *pearson correlation matrix* for a dataset. A Pearson correlation matrix is a table that shows the [Pearson correlation coefficients](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) between several variables. \n", "\n", - "In the [Explore tests](../explore_tests.ipynb) notebook, we learned how to pass a `filter` to the `list_tests` function. We'll do the same here to find the test ID for the pearson correlation matrix:" + "In the [Explore tests](../explore_tests/explore_tests.ipynb) notebook, we learned how to pass a `filter` to the `list_tests` function. We'll do the same here to find the test ID for the pearson correlation matrix:" ] }, { diff --git a/notebooks/how_to/tests/run_tests/2_run_comparison_tests.ipynb b/notebooks/how_to/tests/run_tests/2_run_comparison_tests.ipynb index ebd3f6236..4ce26f074 100644 --- a/notebooks/how_to/tests/run_tests/2_run_comparison_tests.ipynb +++ b/notebooks/how_to/tests/run_tests/2_run_comparison_tests.ipynb @@ -15,7 +15,7 @@ "- Initialize a ValidMind model and assign predictions to a dataset\n", "- Run a comparison test with `run_test` function\n", "\n", - "**We recommended that you first complete the [Explore tests](../explore_tests.ipynb) and the [Run dataset based tests](./1_run_dataset_based_tests.ipynb) notebooks** to understand the basics of how to find and describe all the available tests in the ValidMind Library and how to run tests before moving on to this guide.\n", + "**We recommended that you first complete the [Explore tests](../explore_tests/explore_tests.ipynb) and the [Run dataset based tests](./1_run_dataset_based_tests.ipynb) notebooks** to understand the basics of how to find and describe all the available tests in the ValidMind Library and how to run tests before moving on to this guide.\n", "\n", "This interactive notebook provides a step-by-step guide for listing and filtering available tests, building a sample dataset, training a model, initializing the required ValidMind objects, running a comparison test, and then logging the results to ValidMind. " ] @@ -113,7 +113,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", @@ -265,7 +265,7 @@ "\n", "Before we run a comparison test, let's find a suitable test for this demonstration. Let's assume you want to evaluate the performance results for a model.\n", "\n", - "In the [Explore tests](../explore_tests.ipynb) notebook, we learned how to pass a `filter` to the `list_tests` function. We'll do the same here to find the test ID for the confusion matrix:" + "In the [Explore tests](../explore_tests/explore_tests.ipynb) notebook, we learned how to pass a `filter` to the `list_tests` function. We'll do the same here to find the test ID for the confusion matrix:" ] }, { diff --git a/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.ipynb b/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.ipynb index 55c0857c7..74e67f61b 100644 --- a/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.ipynb +++ b/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.ipynb @@ -105,7 +105,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/how_to/tests/run_tests/documentation_tests/document_multiple_results_for_the_same_test.ipynb b/notebooks/how_to/tests/run_tests/documentation_tests/document_multiple_results_for_the_same_test.ipynb index 59edac4cb..0feb61865 100644 --- a/notebooks/how_to/tests/run_tests/documentation_tests/document_multiple_results_for_the_same_test.ipynb +++ b/notebooks/how_to/tests/run_tests/documentation_tests/document_multiple_results_for_the_same_test.ipynb @@ -110,7 +110,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_sections.ipynb b/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_sections.ipynb index 340b6b864..0952c0c9c 100644 --- a/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_sections.ipynb +++ b/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_sections.ipynb @@ -103,7 +103,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_tests_with_config.ipynb b/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_tests_with_config.ipynb index 669f240c4..548c020cd 100644 --- a/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_tests_with_config.ipynb +++ b/notebooks/how_to/tests/run_tests/documentation_tests/run_documentation_tests_with_config.ipynb @@ -107,7 +107,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/quickstart/quickstart_model_documentation.ipynb b/notebooks/quickstart/quickstart_model_documentation.ipynb index 933a7d331..012860187 100644 --- a/notebooks/quickstart/quickstart_model_documentation.ipynb +++ b/notebooks/quickstart/quickstart_model_documentation.ipynb @@ -152,7 +152,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/quickstart/quickstart_model_validation.ipynb b/notebooks/quickstart/quickstart_model_validation.ipynb index ff59a7514..4621eb0d4 100644 --- a/notebooks/quickstart/quickstart_model_validation.ipynb +++ b/notebooks/quickstart/quickstart_model_validation.ipynb @@ -159,7 +159,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/templates/_about-validmind.ipynb b/notebooks/templates/_about-validmind.ipynb index d3da11296..b1e9e9b0b 100644 --- a/notebooks/templates/_about-validmind.ipynb +++ b/notebooks/templates/_about-validmind.ipynb @@ -60,7 +60,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/tutorials/model_development/1-set_up_validmind.ipynb b/notebooks/tutorials/model_development/1-set_up_validmind.ipynb index ad72e1abf..61ee21e2b 100644 --- a/notebooks/tutorials/model_development/1-set_up_validmind.ipynb +++ b/notebooks/tutorials/model_development/1-set_up_validmind.ipynb @@ -139,7 +139,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/tutorials/model_development/2-start_development_process.ipynb b/notebooks/tutorials/model_development/2-start_development_process.ipynb index 1d62e9c08..23442b3d3 100644 --- a/notebooks/tutorials/model_development/2-start_development_process.ipynb +++ b/notebooks/tutorials/model_development/2-start_development_process.ipynb @@ -221,7 +221,7 @@ "source": [ "
Want to learn more about navigating ValidMind tests?\n", "

\n", - "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests
" + "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests" ] }, { diff --git a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb index 229a46456..9a3cbe7ef 100644 --- a/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb +++ b/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb @@ -13,7 +13,7 @@ "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", "\n", - "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../how_to/implement_custom_tests.ipynb) notebook.\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../how_to/tests/custom_tests/implement_custom_tests.ipynb) notebook.\n", "\n", "
Learn by doing\n", "

\n", @@ -817,7 +817,7 @@ "\n", "
Want to learn more about test providers?\n", "

\n", - "An extended introduction to test providers can be found in: Integrate external test providers
" + "An extended introduction to test providers can be found in: Integrate external test providers
" ] }, { diff --git a/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb b/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb index 9961e9230..04c745225 100644 --- a/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb +++ b/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb @@ -943,12 +943,12 @@ "\n", "#### More how-to guides and code samples\n", "\n", - "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", - "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../how_to/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../../how_to/integrate_external_test_providers.ipynb)\n", - "- [Configuring dataset features](../../how_to/configure_dataset_features.ipynb)\n", - "- [Introduction to unit and composite metrics](../../how_to/run_unit_metrics.ipynb)" + "- [Explore available tests in detail](../../how_to/tests/explore_tests/explore_tests.ipynb)\n", + "- [In-depth guide on running dataset based tests](../../how_to/tests/run_tests/1_run_dataset_based_tests.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../how_to/tests/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../how_to/tests/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [Configuring dataset features](../../how_to/data_and_datasets/dataset_inputs/configure_dataset_features.ipynb)\n", + "- [Introduction to unit and composite metrics](../../how_to/metrics/run_unit_metrics.ipynb)" ] }, { diff --git a/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb b/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb index 212e341eb..6a2e9e128 100644 --- a/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb +++ b/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb @@ -141,7 +141,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/tutorials/model_validation/2-start_validation_process.ipynb b/notebooks/tutorials/model_validation/2-start_validation_process.ipynb index 48e4dbff6..77383ce0f 100644 --- a/notebooks/tutorials/model_validation/2-start_validation_process.ipynb +++ b/notebooks/tutorials/model_validation/2-start_validation_process.ipynb @@ -237,7 +237,7 @@ "source": [ "
Want to learn more about navigating ValidMind tests?\n", "

\n", - "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests
" + "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests" ] }, { diff --git a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb index 999b07d4e..6103fa2d4 100644 --- a/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb +++ b/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb @@ -13,7 +13,7 @@ "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", "\n", - "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../how_to/implement_custom_tests.ipynb) notebook.\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../how_to/tests/custom_tests/implement_custom_tests.ipynb) notebook.\n", "\n", "
Learn by doing\n", "

\n", @@ -480,7 +480,7 @@ "\n", "
Want to learn more about custom tests?\n", "

\n", - "Refer to our in-depth introduction to custom tests: Implement custom tests
" + "Refer to our in-depth introduction to custom tests: Implement custom tests
" ] }, { @@ -856,7 +856,7 @@ "\n", "
Want to learn more about test providers?\n", "

\n", - "An extended introduction to test providers can be found in: Integrate external test providers
" + "An extended introduction to test providers can be found in: Integrate external test providers" ] }, { @@ -1176,10 +1176,10 @@ "\n", "#### More how-to guides and code samples\n", "\n", - "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", - "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", - "- [In-depth guide for running comparison tests](../../how_to/run_tests/2_run_comparison_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../../how_to/implement_custom_tests.ipynb)" + "- [Explore available tests in detail](../../how_to/tests/explore_tests/explore_tests.ipynb)\n", + "- [In-depth guide on running dataset based tests](../../how_to/tests/run_tests/1_run_dataset_based_tests.ipynb)\n", + "- [In-depth guide for running comparison tests](../../how_to/tests/run_tests/2_run_comparison_tests.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../how_to/tests/custom_tests/implement_custom_tests.ipynb)" ] }, { diff --git a/notebooks/use_cases/agents/document_agentic_ai.ipynb b/notebooks/use_cases/agents/document_agentic_ai.ipynb index 1454b11a7..8042b5f60 100644 --- a/notebooks/use_cases/agents/document_agentic_ai.ipynb +++ b/notebooks/use_cases/agents/document_agentic_ai.ipynb @@ -162,7 +162,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", @@ -2099,9 +2099,9 @@ "\n", "Learn more about the ValidMind Library tools we used in this notebook:\n", "\n", - "- [Custom prompts](https://docs.validmind.ai/notebooks/how_to/customize_test_result_descriptions.html)\n", - "- [Custom tests](https://docs.validmind.ai/notebooks/how_to/implement_custom_tests.html)\n", - "- [ValidMind scorers](https://docs.validmind.ai/notebooks/how_to/assign_scores_complete_tutorial.html)\n", + "- [Custom prompts](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/customize_test_result_descriptions.html)\n", + "- [Custom tests](https://docs.validmind.ai/notebooks/how_to/tests/custom_tests/implement_custom_tests.html)\n", + "- [ValidMind scorers](https://docs.validmind.ai/notebooks/how_to/scoring/assign_scores_complete_tutorial.html)\n", "\n", "We also offer many more interactive notebooks to help you document models:\n", "\n", diff --git a/notebooks/use_cases/capital_markets/quickstart_option_pricing_models.ipynb b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models.ipynb index 695395924..a218950c5 100644 --- a/notebooks/use_cases/capital_markets/quickstart_option_pricing_models.ipynb +++ b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models.ipynb @@ -117,7 +117,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb index 3755fbf63..3d6b0d15c 100644 --- a/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb +++ b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb @@ -156,7 +156,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb b/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb index 6d77ce74b..91a44e0c7 100644 --- a/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb +++ b/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb @@ -119,7 +119,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/credit_risk/application_scorecard_executive.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_executive.ipynb index 693144634..6ac66e945 100644 --- a/notebooks/use_cases/credit_risk/application_scorecard_executive.ipynb +++ b/notebooks/use_cases/credit_risk/application_scorecard_executive.ipynb @@ -98,7 +98,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/credit_risk/application_scorecard_full_suite.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_full_suite.ipynb index 06c98f584..1666b1d25 100644 --- a/notebooks/use_cases/credit_risk/application_scorecard_full_suite.ipynb +++ b/notebooks/use_cases/credit_risk/application_scorecard_full_suite.ipynb @@ -112,7 +112,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/credit_risk/application_scorecard_with_bias.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_with_bias.ipynb index d5d5757eb..f845c74e2 100644 --- a/notebooks/use_cases/credit_risk/application_scorecard_with_bias.ipynb +++ b/notebooks/use_cases/credit_risk/application_scorecard_with_bias.ipynb @@ -113,7 +113,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/credit_risk/application_scorecard_with_ml.ipynb b/notebooks/use_cases/credit_risk/application_scorecard_with_ml.ipynb index a5067f2da..7e0cce645 100644 --- a/notebooks/use_cases/credit_risk/application_scorecard_with_ml.ipynb +++ b/notebooks/use_cases/credit_risk/application_scorecard_with_ml.ipynb @@ -125,7 +125,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/credit_risk/document_excel_application_scorecard.ipynb b/notebooks/use_cases/credit_risk/document_excel_application_scorecard.ipynb index 5d875def2..6d693eb53 100644 --- a/notebooks/use_cases/credit_risk/document_excel_application_scorecard.ipynb +++ b/notebooks/use_cases/credit_risk/document_excel_application_scorecard.ipynb @@ -109,7 +109,7 @@ "- **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", "- **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", @@ -486,7 +486,7 @@ "\n", "
Want to learn more about navigating ValidMind tests?\n", "

\n", - "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests
" + "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests" ] }, { diff --git a/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb index e839f3720..5cc55f847 100644 --- a/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb +++ b/notebooks/use_cases/model_validation/validate_application_scorecard.ipynb @@ -151,7 +151,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", @@ -902,7 +902,7 @@ "source": [ "
Want to learn more about navigating ValidMind tests?\n", "

\n", - "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests
" + "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests" ] }, { @@ -1586,7 +1586,7 @@ "source": [ "
Want to learn more about custom tests?\n", "

\n", - "Refer to our in-depth introduction to custom tests: Implement custom tests
" + "Refer to our in-depth introduction to custom tests: Implement custom tests" ] }, { diff --git a/notebooks/use_cases/nlp_and_llm/prompt_validation_demo.ipynb b/notebooks/use_cases/nlp_and_llm/prompt_validation_demo.ipynb index eff5de7fb..ec70da33c 100644 --- a/notebooks/use_cases/nlp_and_llm/prompt_validation_demo.ipynb +++ b/notebooks/use_cases/nlp_and_llm/prompt_validation_demo.ipynb @@ -94,7 +94,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb b/notebooks/use_cases/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb index caa2aabac..ebdbaeae0 100644 --- a/notebooks/use_cases/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb +++ b/notebooks/use_cases/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb @@ -108,7 +108,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb b/notebooks/use_cases/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb index 14f14821a..3cac66c03 100644 --- a/notebooks/use_cases/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb +++ b/notebooks/use_cases/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb @@ -106,7 +106,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", "\n", @@ -523,7 +523,7 @@ "source": [ "Next, let's run *comparison tests*, which will allow comparing differences between the training dataset and monitoring datasets. To run a test in comparison mode, you only need to pass an `input_grid` parameter to the `run_test()` method instead of `inputs`.\n", "\n", - "For more information about comparison tests, see this [notebook](../../how_to/run_tests/2_run_comparison_tests.ipynb)." + "For more information about comparison tests, see this [notebook](../../how_to/tests/run_tests/2_run_comparison_tests.ipynb)." ] }, { diff --git a/notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb b/notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb index 4b4865f0b..c154f66f6 100644 --- a/notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb +++ b/notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb @@ -106,7 +106,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb b/notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb index 55c24a46a..da5b9051b 100644 --- a/notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb +++ b/notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb @@ -107,7 +107,7 @@ " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", "\n", "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", "\n", diff --git a/scripts/run_e2e_notebooks.py b/scripts/run_e2e_notebooks.py index 94c586eab..99bce5056 100644 --- a/scripts/run_e2e_notebooks.py +++ b/scripts/run_e2e_notebooks.py @@ -9,8 +9,8 @@ - notebooks/quickstart/quickstart_model_documentation.ipynb - notebooks/use_cases/time_series/quickstart_time_series_full_suite.ipynb - notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb - - notebooks/how_to/integrate_external_test_providers.ipynb - - notebooks/how_to/implement_custom_tests.ipynb + - notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb + - notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb To add more notebooks to the list, simply add the path to the `NOTEBOOKS_TO_RUN` list. This will use the default project id for the notebook. If you want to use a different @@ -42,10 +42,10 @@ "notebooks/quickstart/quickstart_model_documentation.ipynb", "notebooks/use_cases/time_series/quickstart_time_series_high_code.ipynb", "notebooks/use_cases/regression/quickstart_regression_full_suite.ipynb", - "notebooks/how_to/run_unit_metrics.ipynb", - "notebooks/how_to/integrate_external_test_providers.ipynb", - "notebooks/how_to/implement_custom_tests.ipynb", - "notebooks/how_to/explore_tests.ipynb", + "notebooks/how_to/metrics/run_unit_metrics.ipynb", + "notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb", + "notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb", + "notebooks/how_to/tests/explore_tests/explore_tests.ipynb", ] DATA_TEMPLATE_NOTEBOOKS = [ From fca979d6c47e5d0b341050e46f4efb5644278de2 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 15:03:15 -0800 Subject: [PATCH 08/14] Moving images --- .../composite-metric-in-template-preview.png | Bin .../insert-test-driven-block-correlations.png | Bin ...est-driven-block-custom-class-imbalance.jpg | Bin ...st-driven-block-custom-confusion-matrix.png | Bin .../images/my_tests_directory.png | Bin .../images/selecting-composite-metric.png | Bin .../metrics}/add_metric_over_time_block.png | Bin .../metrics}/log_metric_accuracy.png | Bin .../metrics}/log_metric_attention.png | Bin .../metrics}/log_metric_auc_1.png | Bin .../metrics}/log_metric_auc_2.png | Bin .../metrics}/log_metric_auc_3.png | Bin .../metrics}/log_metric_auc_4.png | Bin .../metrics}/log_metric_f1.png | Bin .../metrics}/log_metric_precision.png | Bin .../metrics}/log_metric_recall.png | Bin .../metrics}/log_metric_satisfactory.png | Bin .../metrics}/log_metric_satisfactory_2.png | Bin .../external-data-custom-test.png | Bin .../hyperparameters-custom-metric.png | Bin .../custom_tests}/image-in-custom-metric.png | Bin .../insert-test-driven-block-custom.png | Bin .../insert-test-driven-block-test-provider.png | Bin .../custom_tests}/insert-test-driven-block.png | Bin .../multiple-tables-plots-custom-metric.png | Bin .../parameterized-custom-metric.png | Bin .../pearson-correlation-matrix-test-output.png | Bin .../pearson-correlation-matrix.png | Bin .../images/high-pearson-correlation-block.png | Bin 280332 -> 0 bytes ...selecting-high-pearson-correlation-test.png | Bin 484028 -> 0 bytes 30 files changed, 0 insertions(+), 0 deletions(-) rename notebooks/{ => code_sharing}/images/composite-metric-in-template-preview.png (100%) rename notebooks/{ => code_sharing}/images/insert-test-driven-block-correlations.png (100%) rename notebooks/{ => code_sharing}/images/insert-test-driven-block-custom-class-imbalance.jpg (100%) rename notebooks/{ => code_sharing}/images/insert-test-driven-block-custom-confusion-matrix.png (100%) rename notebooks/{ => code_sharing}/images/my_tests_directory.png (100%) rename notebooks/{ => code_sharing}/images/selecting-composite-metric.png (100%) rename notebooks/{images => how_to/metrics}/add_metric_over_time_block.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_accuracy.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_attention.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_auc_1.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_auc_2.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_auc_3.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_auc_4.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_f1.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_precision.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_recall.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_satisfactory.png (100%) rename notebooks/{images => how_to/metrics}/log_metric_satisfactory_2.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/external-data-custom-test.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/hyperparameters-custom-metric.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/image-in-custom-metric.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/insert-test-driven-block-custom.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/insert-test-driven-block-test-provider.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/insert-test-driven-block.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/multiple-tables-plots-custom-metric.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/parameterized-custom-metric.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/pearson-correlation-matrix-test-output.png (100%) rename notebooks/{images => how_to/tests/custom_tests}/pearson-correlation-matrix.png (100%) delete mode 100644 notebooks/images/high-pearson-correlation-block.png delete mode 100644 notebooks/images/selecting-high-pearson-correlation-test.png diff --git a/notebooks/images/composite-metric-in-template-preview.png b/notebooks/code_sharing/images/composite-metric-in-template-preview.png similarity index 100% rename from notebooks/images/composite-metric-in-template-preview.png rename to notebooks/code_sharing/images/composite-metric-in-template-preview.png diff --git a/notebooks/images/insert-test-driven-block-correlations.png b/notebooks/code_sharing/images/insert-test-driven-block-correlations.png similarity index 100% rename from notebooks/images/insert-test-driven-block-correlations.png rename to notebooks/code_sharing/images/insert-test-driven-block-correlations.png diff --git a/notebooks/images/insert-test-driven-block-custom-class-imbalance.jpg b/notebooks/code_sharing/images/insert-test-driven-block-custom-class-imbalance.jpg similarity index 100% rename from notebooks/images/insert-test-driven-block-custom-class-imbalance.jpg rename to notebooks/code_sharing/images/insert-test-driven-block-custom-class-imbalance.jpg diff --git a/notebooks/images/insert-test-driven-block-custom-confusion-matrix.png b/notebooks/code_sharing/images/insert-test-driven-block-custom-confusion-matrix.png similarity index 100% rename from notebooks/images/insert-test-driven-block-custom-confusion-matrix.png rename to notebooks/code_sharing/images/insert-test-driven-block-custom-confusion-matrix.png diff --git a/notebooks/images/my_tests_directory.png b/notebooks/code_sharing/images/my_tests_directory.png similarity index 100% rename from notebooks/images/my_tests_directory.png rename to notebooks/code_sharing/images/my_tests_directory.png diff --git a/notebooks/images/selecting-composite-metric.png b/notebooks/code_sharing/images/selecting-composite-metric.png similarity index 100% rename from notebooks/images/selecting-composite-metric.png rename to notebooks/code_sharing/images/selecting-composite-metric.png diff --git a/notebooks/images/add_metric_over_time_block.png b/notebooks/how_to/metrics/add_metric_over_time_block.png similarity index 100% rename from notebooks/images/add_metric_over_time_block.png rename to notebooks/how_to/metrics/add_metric_over_time_block.png diff --git a/notebooks/images/log_metric_accuracy.png b/notebooks/how_to/metrics/log_metric_accuracy.png similarity index 100% rename from notebooks/images/log_metric_accuracy.png rename to notebooks/how_to/metrics/log_metric_accuracy.png diff --git a/notebooks/images/log_metric_attention.png b/notebooks/how_to/metrics/log_metric_attention.png similarity index 100% rename from notebooks/images/log_metric_attention.png rename to notebooks/how_to/metrics/log_metric_attention.png diff --git a/notebooks/images/log_metric_auc_1.png b/notebooks/how_to/metrics/log_metric_auc_1.png similarity index 100% rename from notebooks/images/log_metric_auc_1.png rename to notebooks/how_to/metrics/log_metric_auc_1.png diff --git a/notebooks/images/log_metric_auc_2.png b/notebooks/how_to/metrics/log_metric_auc_2.png similarity index 100% rename from notebooks/images/log_metric_auc_2.png rename to notebooks/how_to/metrics/log_metric_auc_2.png diff --git a/notebooks/images/log_metric_auc_3.png b/notebooks/how_to/metrics/log_metric_auc_3.png similarity index 100% rename from notebooks/images/log_metric_auc_3.png rename to notebooks/how_to/metrics/log_metric_auc_3.png diff --git a/notebooks/images/log_metric_auc_4.png b/notebooks/how_to/metrics/log_metric_auc_4.png similarity index 100% rename from notebooks/images/log_metric_auc_4.png rename to notebooks/how_to/metrics/log_metric_auc_4.png diff --git a/notebooks/images/log_metric_f1.png b/notebooks/how_to/metrics/log_metric_f1.png similarity index 100% rename from notebooks/images/log_metric_f1.png rename to notebooks/how_to/metrics/log_metric_f1.png diff --git a/notebooks/images/log_metric_precision.png b/notebooks/how_to/metrics/log_metric_precision.png similarity index 100% rename from notebooks/images/log_metric_precision.png rename to notebooks/how_to/metrics/log_metric_precision.png diff --git a/notebooks/images/log_metric_recall.png b/notebooks/how_to/metrics/log_metric_recall.png similarity index 100% rename from notebooks/images/log_metric_recall.png rename to notebooks/how_to/metrics/log_metric_recall.png diff --git a/notebooks/images/log_metric_satisfactory.png b/notebooks/how_to/metrics/log_metric_satisfactory.png similarity index 100% rename from notebooks/images/log_metric_satisfactory.png rename to notebooks/how_to/metrics/log_metric_satisfactory.png diff --git a/notebooks/images/log_metric_satisfactory_2.png b/notebooks/how_to/metrics/log_metric_satisfactory_2.png similarity index 100% rename from notebooks/images/log_metric_satisfactory_2.png rename to notebooks/how_to/metrics/log_metric_satisfactory_2.png diff --git a/notebooks/images/external-data-custom-test.png b/notebooks/how_to/tests/custom_tests/external-data-custom-test.png similarity index 100% rename from notebooks/images/external-data-custom-test.png rename to notebooks/how_to/tests/custom_tests/external-data-custom-test.png diff --git a/notebooks/images/hyperparameters-custom-metric.png b/notebooks/how_to/tests/custom_tests/hyperparameters-custom-metric.png similarity index 100% rename from notebooks/images/hyperparameters-custom-metric.png rename to notebooks/how_to/tests/custom_tests/hyperparameters-custom-metric.png diff --git a/notebooks/images/image-in-custom-metric.png b/notebooks/how_to/tests/custom_tests/image-in-custom-metric.png similarity index 100% rename from notebooks/images/image-in-custom-metric.png rename to notebooks/how_to/tests/custom_tests/image-in-custom-metric.png diff --git a/notebooks/images/insert-test-driven-block-custom.png b/notebooks/how_to/tests/custom_tests/insert-test-driven-block-custom.png similarity index 100% rename from notebooks/images/insert-test-driven-block-custom.png rename to notebooks/how_to/tests/custom_tests/insert-test-driven-block-custom.png diff --git a/notebooks/images/insert-test-driven-block-test-provider.png b/notebooks/how_to/tests/custom_tests/insert-test-driven-block-test-provider.png similarity index 100% rename from notebooks/images/insert-test-driven-block-test-provider.png rename to notebooks/how_to/tests/custom_tests/insert-test-driven-block-test-provider.png diff --git a/notebooks/images/insert-test-driven-block.png b/notebooks/how_to/tests/custom_tests/insert-test-driven-block.png similarity index 100% rename from notebooks/images/insert-test-driven-block.png rename to notebooks/how_to/tests/custom_tests/insert-test-driven-block.png diff --git a/notebooks/images/multiple-tables-plots-custom-metric.png b/notebooks/how_to/tests/custom_tests/multiple-tables-plots-custom-metric.png similarity index 100% rename from notebooks/images/multiple-tables-plots-custom-metric.png rename to notebooks/how_to/tests/custom_tests/multiple-tables-plots-custom-metric.png diff --git a/notebooks/images/parameterized-custom-metric.png b/notebooks/how_to/tests/custom_tests/parameterized-custom-metric.png similarity index 100% rename from notebooks/images/parameterized-custom-metric.png rename to notebooks/how_to/tests/custom_tests/parameterized-custom-metric.png diff --git a/notebooks/images/pearson-correlation-matrix-test-output.png b/notebooks/how_to/tests/custom_tests/pearson-correlation-matrix-test-output.png similarity index 100% rename from notebooks/images/pearson-correlation-matrix-test-output.png rename to notebooks/how_to/tests/custom_tests/pearson-correlation-matrix-test-output.png diff --git a/notebooks/images/pearson-correlation-matrix.png b/notebooks/how_to/tests/custom_tests/pearson-correlation-matrix.png similarity index 100% rename from notebooks/images/pearson-correlation-matrix.png rename to notebooks/how_to/tests/custom_tests/pearson-correlation-matrix.png diff --git a/notebooks/images/high-pearson-correlation-block.png b/notebooks/images/high-pearson-correlation-block.png deleted file mode 100644 index dbe44392d146dd9f414762f12a226978d1e73462..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 280332 zcmeFZby$?$);NqP3Me2TAR>aKw9*~YCEX?6Gr-Ud3P?+LcQ*q=iAZ;M$H2hQFf`xz zoOqx2_nz-u&-wq%b=};1?t8Db_g-u5ReKG=ik~F0FkfM!prBw$ONl9?pkOqkprCz1 ze~f%mV`iIzg7QqnQdCq?T2z!=(E(^?X=93lA{88`@kCRlmoQx?`nxdtb75hbZTaU> zc)~L1OunZSGw9O3iXWdnBH*hoT+vpn^2O34P}=jI!Q~4zI(SNt*$2_AWB@7)PkU{8 zoOpqtZTG2{!$~0X{g#I)a4HSPAqftY9^6e?BmZ0C_2_8gT!8NbCgF#UD_xtL!CFd6 zt0?j9=LegM^1eND*4V6NKIiw-*}X&?M<~MCE011PMO$J32ZtJpE};5`yh>n@ej{yt!^rR~08_SCa z!p6dTF)=ayLELCYBo>r`n)$nVi68e$D~n zk${=tVJ!47Gtbw^m7K*Iq7~%vAKQ}$GCWG78G!y0DSdMEQAGc9>6lpz6_>=$(=vsa zN-dc?ehmr)!^xRqGQqGitZnGIUF=n&yP@t%u;Y)+o;Pg} z{G#t(!??5)p+7xhxA#^}rTEnRK8EB~-nS^<%En!rKN{f(o)~Dfyz5j^9_4 z&2B-XV9u~|QTl4#`$RJF(jw7IGn})Vy^LGY^UF%C>sdUVZ=^ql3=1ls&M1{WcwdRH zNjVil8nc=;c_!8hdEC<3v)=2^`_t!w#Iw3uZi{D<_l(<1>2&jHe(%z#ZyG~7Gj6{5 z2Ak$#DEVhQG5&7cXXdo#L?|Z9M4h|4^0hZje%tll(5?FizKCEyKR?Rl#u0p!Jy6j6 z{h`ZHAchcWF-oe?K#UCLi`UFe&sTK(AG;(AzJI*+|UG6U(`Nll1Rw39{ zuVCAu1+5%$<7D^`cM+}c9+J8WEcqLB@Er->kod{Gr<%d%$9Vmd`y;O*eb!e+kr@eY zG46*5WujHmD5(i*ZT!VmeK`sflzillz4&S;|G~szO$*92tQMoY#yD($yr{?5M6NmC^vQtW~XEPc`I?R!C33&Lb{L%4yUD z6>Omm0+^!XcLEMqMEXFBYqfSF0dgR{Wh%N>kU znX~il#gXgW;SrpP;W2~q<;?pZC|s3n-#8PkD%)4WL=NoWtNI;o9kM|OWZu)yIcnL$ zKBO8pbXHDQF89p5n~}1qo!YKds;DZj%COHrl38Ex5}qA^JfYX-k*2c`15U6fa-@WI@)0t|SZq_>1Jlj0{^OsR+r(CFZXj8n2 zY`$sWrd2;E{-CV146lq(lS9)|kEe*3&kBMs;2nOg+bHTVpdluU{n! z*M+n0(ZiYNbIt+hAFt2O;gsl<{1V?JR3jTUGq-*?yt6BG+IF09dgj9DU}NXWGiGFR z+_OnJ9DcL40gHv#PVOwdD^tqK36bCkcZb_R2SDYHt%AwCycVaaqv1IQjWbQ?x||IQ z&UsA{PI@bIdsmZ#i}Gvg(5ZF6YWkY~rgv!3V3-lJSvrE;^}=J|h#Ph^IzGs@3~GZK z=ws_^>GQQfx-}zl!)AH6s=!HH(;hW0K{y?>o#rtLK@~+T)8IKoR7NA&GB9PCEuM)X zrY`hsY`NE9&8O#HU_M45)Qc76DLVE$EQ_ZTm|-}&L>KRS7~csLau%}8aJWhw_CEh8 zrP*`*UFS6=cBD$j&Qm9xG=e$2d_qE!dgeW)2E7KVW=lq1PRG5FDg-4Xr5G+&6nQj$ zBgL^utkXnnA?4_=?ZWN8?Sc)XidM3VwwtyK z#N{gFH2v9fSV)-uTb+ha7xZjYAmgp;(w1?4h*UDO=~obiyZkn!R@hat?*GI?BVT)SXi z-hlm9%TRF@$aYBFOgzXYZ$6YV%M;3BePUzc4BOk{*#d>Q6N4Io^6FyFa39scmH!;{loyx-{C(pvHq^(#_<}fXzsaFg>NV=&EYR&3UyL4ZT9U zqM3%|YHbL_SpFiptB%k*q3pJr*?2Kr=DtrrEl@4Ac&P$f(FSP;iY`jzgqxBG+2R2p zEj+FuuR1lSPQtZ#e=wD7UfuG!!tvn5i|!h*k2AmQ>FbucpSjsyEP5)McP4dvdes^j z78uc$+jZWN6_k?FoQzFs&tH98xHH;k%-bFRrr41wgBd z`@+sH(Y?~nXP_(~0j8s;gLAuob)CftuD|Nv zPavsq)wQ%D8~45`KA2RQMlDZk8EW+2k@yvUo^r$mTu!ztS?9cWMx^cWRqPWk+0QTpqyQ;^w43)KuMSzKAOw+!xaf>&rZs?sWGH8-^Rv0BkHZ(d zYH16heJ4l5P5R#qJolL{RZfPB6F6CL?lut`*2@j8i%4=ae;Mty2-%lhZH?h5aM z4jh)eybZ2I?mHqP2O^<@)ZXsrZZM*;1P%k+Hcqdn*UmoV=AVTJ;_NotP+yi>Xum?K z06!F#NA<-XHjh0DImu`y4`C~L?RWo`z+BAD{$VEeU7IB0XIqL6%HASoq=8}v=L6q& z^eFakP)G&b3$fEDH=r_FZ-0V*16$v@i?58k0X{Np()3QCYA%A>#c`Gmax z{(MJXzhVA%e;6Hz@&x$}4|%y}qW+^dMsw!Ff3(s5fD};?m6k?!QZaTgH3fhyfQ}8d zt~baB7fhG`X=SQIB>ovoRZT}tc{yHVpe?hZ3DC%t+11t#*&79g-<22H zv^8}!BzLv70f2a21t|XL!HaDFe#}Ba{zn%_YXJ&Pc|~$jpo1wnC-Zye_Y{Jd0K2SyMy3BV_9I6UKJ5DN zA5#T&#~;NX>{ykuP4yQA?SIxp?z;Z+FU#jY!2BFVP}+4MGxCmj|ABP>4f=Vr{jLU+C@8@M8SO0*AiD{rI1V{;Kql>mgYY&+Lq!_VurV{DU~Zx8cM8SknHx z4gU?v{<{tTr6T$7HvD&*>%ZIZpBm}U((_+y_-}0Ff34xawCDe|hX2li_^(s?XEiKL zkAI=Ju?*Z;_VIpydog%(-G6h<6?gp$_S@Z0rF+Dhh$YZ{(Qp2Z$S`HB>Q>;ftea?N0Zt^}GJRi0`UuS7Gjzn3sC}RFsCuxr2 z=OgsN2#hoAi)D)cd-b_KsKi5%&TmTqIW`ji6{f*GOkUK#=+JUMMAsYwbPxRlSE}G6 zBn3T^?cGHWov&VN*kJq{z4m8^8yRF?=-mli=I>Bi%8g${WPAMfy!aoGcTsp;OxU-}s*YI~uz{BHWnIzj)H76)W=Fz6wL$+7+4f zxM~!i6SnIVubFTDMO=PtUw*XFB9gIgPX=Z*>wgDQKP+WebSYi?Gm3SBR+!X|Y8?J- zxc|(<|HrcbND^6Dj;Tn(x<&j67x@?5W&-KSGSS2M;tMUFzeux{0rJCx-Zb;B!0Nc) zOZfysC1xd*pJYPH#D9jycLhE$Gi*bRWZ@OQFj7bljO(BOc^Cg1O4)V&hUg+gMfp7i zd}ILsDTb8TitX;^M8R#fQ)6RYK`<_wIz*K%Lrub{9=xJ*$dH9YjaX)ZQB__7|onE2kE0oy0}a*$dx{EE-$`3 z*^EkGT!j0aopC8&-pw7)LJc0l1n08q9N}Yv@6+%o+iuUB*lp(K>WX2L^s-;B>hw(M zI7X{Z){l40GiV`@XT+>Mc?r4{2?pLtx=xb>%FPyrbo!;07CaC9VyOEuW4pphPHXncxvgo}f;cQu% zTS`*t?sJ2tLFr|!@hvw6zsEN;LvA^U`3$YZ^!gG%JZY=MQ?Qa3)J>hzuN+ z#QM8R#z?Nu`xB4$?_A&upB$ilxD{$+fY%r{?9*{cAi{>J_wE;;8#m)qbWNqxzHMni zVI>u@c%30qqZDXvQ_>WNQf>yWs{sz<;nOKTTs`t!i|-m8mZ*~eE!4r*IlPa=UU676 zwB21<>nh46y;m-8y((^~mCyNCd?#o69dHw#qk8=|Cf_}g9FHD;pH0|$u_oJVkegZp znCv4kf#oWdS4+{f-rqgxjRH56zyvmBdYiRk^h~{;Jd2qH5jFNX8XZ`(^wmx4y?q&7lsAD2_5=KD~)g$kpn{ zE>}Ojs~CPDc18K#{LW}+99USgp2Afcy*FFk&=<`cm1>@*187xBW-n%8O6G{I8Cuh# z#XG0&SC3k!75tWKrASIpGF7UtW~EwT^r$}J$sjVT>HVmB^Bv)8z9~Vjaj~+KC!)>vs;*0)Z`ndfV%NR-8#wSsjOK&XawWfeG zuP5%V>iQWagENWk=`VI@k!os_u(=K}E9Q=Xy(-`c1lFrI7CY=+1RuuXCFwML_D`Tqd0aSm<#L<8(1GX8)Yj8^jREV_uCfs* z{eh~vb`O&Hy}#;$1}B|yJDxu>IS&?F^5KGNyshpLu0_40wNe>k7)Mqk%Z?GLd z-&dZz40DN-QJ0xyu#FG2XzNthap>)wK>UZ>;fsURGlGh1O0lo}!RFJp(VXx_W-IV= zytMwpO<1b!SZ$taYJ@+RX~N~u{O08ugSder-t%t+ew30+F^}c7C1ahr7I=XTcf49h zr~FCDj}u z^ueg5+rXZMOIh7c+NJr7VlM|L8dAB^`NQT~tNG5!aq6e$QoU>9dZweF$X%b(Xq10+ z8iiW$VG{N*lq58%NX(zS{1;A_of*DVs6QBE@kx)z6YNIkFwA9Tol|GW`?r_S4QY+G zRD5WXoK%7?Qa3tunmTYE3~a6=BKwbnYc?X6y<=cZB+~UajcE-pgM_OwZqEBFs!ax7 zT&|=ca%~2SmGjNwOu$L$B!37p3&$fM#4@K#F8afIfu>GTk>%E4GL=^KLefW_w94dK zX4i4{?`X$R3?qlJg=V4KQ_$t!3Cl)?&mBf`1)AO)TLwMey0bLNw~wM_R=UZ2qFFaf zdXZXtv&kZUz(qa{V(v#g8ty7r^mwk%xt0a|MP4cFbSDOIA>)M&s?^ACi1@G6Mg2*nE*#|4RR28rK zLU&6Ye=fNlB}*If`DN&>|EE5YAS)&c`6t<{Y*HxtDYr23~5}XJgs{4hR8q z$%t06?fAP^of;#<&&jy|$~pOrFJrgT9RM zeJy@0dqj_r-de!Gb_!;8e5IbqWB2v-Le>0SH3{n?g+P7;KNdJzUPc~OXSy^&O$Dm0 z*W>CeL0tK{Sl9D1uV^+;RgVj4vVUf1`*SVwdlj7{m9y-*jg|^Efs!_Uvzs1wc`Hn2 z7VKf&L%Dj;b|0T}KpMkyLfF5S$Dn&Qv1mJtG!Stby8HCzsHmgQx6fS-w2n`UGsQuh z%{mZg01>NKcK$YeuBD>66&jvtI!R~txh-8}^95D7usdP%PJI5k7rABay3YLYs_k~B z;8>wZ+INP#dGyQ#MmI1e7r|rp68qi{E0u;!IIRKV>22s>a@_IZqDJyx1~MB)e=l1g=wVc1liZBfBJuX zt>+%7mePm$nW4 zq|q?@o8H+a4mQG`;pKbs1U;`jRa<2i8u(-{w0r2(t9(?iIL_5Q9ir-0NukCZS3A@j=fXc&m9ZdM;#Ea&<5t=|<`M+@u z&jVv`p;D4;mzqJHdLehufdId#@SCr&1G>$8s|t+6f_Osg7J}0|((Z*PPnpmB^-Ewx zEOE_N(_ByD5u@ALCRR3fq}b`Bv8ENKD<+}a_%Mk>GnJXkIY5zGMUj9d7DFrxZB2fp zOI#-i$rb*ce9w7H@x!f!WABF6 zbE|1d+*qwtfWxuSiHkl3FY3b-=kGwl>yLm2xcbfqQ?VI|)_WMq&U9tCTEXeVxLh+5 zL{oF+(}Jj^CIeS-!iYqRGl}6k-A>HQ^F4zYwEeQ_b4@z=r%O6AxpGBo z%C%A=!F5-BJpN#Xa25S5BLt+L8knwoya($nXKlkEplWHo#&X*oF=SXAf*N>QDy!(z zf|uMw7XZu?kTRs_8nv7q9AETs@$Mn8_X(LN73}=%i*&dfoI@kU%Q-EU^Ysa|Kkc2Q z9$=7dS8vy9qO!b@{4gg{C~DGrs`t7(BIVTE;1z9jjJo!qeGdLaM|r?q%Ef9BnYh7u zi`RwT=(P-i4+!Rz5i!9x1l6=IU zb?jC&agK7<{+!aNPce4!$=tt$X3*N|aDnqVZet)kmfE>2tvAZ7>2XSc_t*4DqnxWf zDdW(N27MAAIw_?U)2E8aVat}boT?Yq(7IlbFlQU9Rq`Tc&rB#`ocML!{YjHfIr6pF z@Z2)YH*sC57_*sBtquoMS()?&o-&C0>o=aewlRPUw#)T4M?1EHh?xF? zFV2M{w}sbN+a3oxVP}a`FXEBZuZbJdRQW|iIEuM+nkAZsh)5Rnyjbm?x;jgC4(Aba zx{YI4u*MQCyC0c9c&X2#UE^~N=SpJriYT#K_3wH!X9toHU02P&mZ#)FJkZl$9B|(A z)LY`1-fs0`slq+d9R7hdbwTE>Y;!A>J{Inz>k0=p!XjxABt zi%7JNBz0M`PieLhql*$_ET8PfZSFL>bq{wnE4$V=Bm=o;jPDLFwE+9fX$VRMWw$xB z#u=tvGi9p3@?k)aQl^}=x>-1c@Xi*PZ1!nQ2SB~E#B37PPyJyQm^-8rJ%!Lh!N^pP zeV(oKRB1rtCe1FlM$n+e$KhM(ln{%@`F@SErZ-{{;SiwjF(6~;Vx$$kct7%sAi;q7$rAbjx0%Kiw4NVLM(cW*Sp|D{`Y~VI>I#pMolIj}zbK)8i6n{Henwk3OmLRF}p+iZrQWk4ms!%aovMK~*#2&VIU7 z8%L1bSOVKVE+AQo`8uhF=v%(}wCie~Mk!PV3V8z`nd3EsvwEeMsMlo+?C{9}GCWCc zD-;jPbie>`x>6elbXtB3{6*h-zb}1KX1)ljp-kC03?msl8Jq{US(m9-s9e3?Lptya zEZ`*vP-f5N9`U=dRjE?aCGTsv%WNws;kBTrSVW?EamK)rLr~+BC5IDvhi-V%05Itz zt`IFRmx@XtaOt>uk8`?zcz~?Zn`4*{!g6+(%mQTI<(bf0elxwU7OBIx#iVJ(GdDMW z#a9S`C!gnXFzV!g-3D?Y^L4*N$3N$5{Mb^2wQ-wmH5+s}lYOGZtQwnV{kfcGUsZmV znJEVtfVJWkQt7S`_P5u%%Z>P($JXVTP0!fgJ`X|h=kjUSd8cn~h163o=1z*;5 zhHhDV8Y^4v-tbu!zM%g;td&}Nw1ITeI}y``ELYhR*e!|R;DK${0=sX&= zJj|_+0hp4>?B&3`@(IFoC7845~j%IM4C$uzS zz6Q#S=}*rUG113t+x%$|JMu!VE40@HS zk=i7X=_Cw_NugTU5-# zN$=MXG#)D|ul)tcf?4u8S*dYF1Y$A1H7Ru0-uLR&Zd7)&T*xe^W>@wL=AsEFdyBJR z(2H=-GHMaJyBO!4y1!V*ImoU)I7|5QmHlc=8*E;M%Ut}DU3J?{r}NZSc)6PiSG&+% znxL6(yda?>>L?~(>myhZZsmr|8swTIB)tNF{pzJr%!}?pI{X&tDvbDWFX0NN8Yp9Q z_%QHDzOH~_lIU!*L@hpkkR-s9*w%%aDGk_b?>Y{*oU9N19sRUeI*i`m(H=C{F}#O! z;q_ZPvHOEH%a;%IkACJ)#*aWGhwnsw>ZJswThpJv|QpVEuW~ zPB_?CI&D_Kwn)um1`wMK2WYY8rz(@%8Rwky)992wlshlhPuQsJJAh z`(z|jYE)`%SugI(Ij<3%NA&7t)b0!OS<|!Nb2z$(Zg+@R6dpqBqoc=SFcyPavxb=* z@vaV&y4NvhIYNCAnA7zM(*vlcp+m67$%Q=-#V^?j(`o@t)0RT%fNi4k*S7#nHLhfb zgBs0RWC-+)$9f2Lcb&+q9G!sP&bt)kLCIQzTO3lNi{=d_Yy*fa$`q#awwpZc1vd0C1=g5r3o70rEjp~ioSex;? zDsqV{JWYeg9-;=%Ic&z}95N1vo^q7!Q=CBMuo&L)x*e14@O>^E(5YlsWoz9eovVv| zqsNEL0NniSAFCDjnC5pHzWl{)c^S}J>a!`1=r@u_%;ij@P=3NBq`s`n`q}DMLrV+e zy%_7C&y~L}$J?h)0ed-FzZwCXOBG;Lph!k8_7U{!tYHFmyvN<2DWtLYAp%+qZtYA( zGhP|D9D=5_-kXQSGU4la+b>2!gwnFGRq3-&0@E|#e9kz<2W8Myb(iW(v$o#}$C?=q z{hhnHc>EN-LL(k0cl*{JFm%Umtzk9ZwND|FV4r)!bb011TDCO5+&;2)pOKz7n7y1G zTg;uYI40GAO8oGA5WFlMs!rMw@}y-~@$KfbEWva=hc8(vKmv~%)M>>J2_w(0 zF_yH!HWX}T!#E7DYuXDn-6xOHvZ1;S-?sSGt2(QxYs5)kiw0ntsV3%iyX9t4z-fcd zBc*Wux*)@)P-==2XCOy51eUibiXfI_nAOXVyC9Dz_ zu6f=&TMu#D2_lB=5n9I5D@{PaLD%1@;)h{K?M6*Q74FCC=h>j zl>+a5d;3xFi!~pta5ht#p&yNR%(=SjU-+7Nlyi;K`{Cs10^Bu1Y1~z_ z3DE&YlOXfSE*t2#*sO)PDu=VdksBlBEyrh2gB~s7p=h1zrMF4drkrqU6J6JXmHFe< zrURYO_;P(M*7URwBLlr{3Vl-?yJePo_j+h2kGI?;@k&bQxDh~~MQuh8KYjH+z-?Sc>2fi_px=#1mpt6#s;&RN^CtXs(6*viBg2zM zh`1w-VX$d>mvxaU2?TvE&qHGA31r>(ay(7+v>6-mw!JFxYV0%N?bj$y)U_MEr?90N zb%zKX>@5P}_@qM1%}-l?p7H>DDID^88MleLgDDJ3K8oZI(Z?1JHI+C@M7F#+}MQM0FCpr$K*F$4r4E0kj_-iTj{7g z&oEz--|_BA7Xmw#yY2E77}tdlaT*ImtT0W9iPk1ep#hW4D%BThr12~+Y>VO1o2gJ* zSI|(TiLQaFt*f(EwTkBsB$~nXM3#65fp_6+Z(F1WF51*2=;h?ywSW!8DGcKiw7ZHL zObAZ3L>4}$Nmeur$#5M}`1QQptD<9`Z5Z(VJPo45S+^R0GNb~Ii#9M10@TlpyyiqlDMH8eqvSf{Nf@};+S*uY?>T}M|Hzv+iK zf;M>qayC2%p7L}zd&$(TKYvJ}0k1yZy#+C;6{P0|>O)nxv!U^%bWEgtTqm@g-BQvZ;ms;~X0TjEU8wu?^s>~UjGACqdx<@tGf>BTq=Jo-SqEyVQU`!~ zL+jpcI)`nidmPjL>7uu4$rEvPHH0BdjIXN9{8Fu#KyydX10N|c48y3r=m_Nw@^C45i3`nGZK7@2H@Z0&r-?4{=_XP`c)c+N_hHl>m^!N#`L^wm%s5LBPj ztudRd2G1@GTqb!icOt9aYdxIuj8AZOh21zM zTivF7NS4s_6wMr8ZvVsf!ZZY%Kr;e9CfRy(F`l{4Fz>8zUU%N)Vpb4R*CH-5K4i>U zWjxY&7;&a!(&2Au749ZGmx>)x#G|Y?9kg-Lbz%n)IEYcYs<@Erb?ePmf8(WWaoVl~zN(}Mc-j+^ z2l{fAIN`qmv2#^PE`A5^$yzK2eD(NHFvNyDxs10SkiA4irhP{m_{-KqOU1T&dr5R> z+R+KX;u{?1=@e1qfu8t$+XQ;P;Cnh@@WBFga(*u{L}7X6*5P}J3>7kHnuE;d5?mwG zBxNsR>N`>(kQYp9O3g*|Y-aqjm_+pCTCB@&iBf%9Jr75_kFRzm^zH}2L(E#ASm@oD9r4h$Od6G#4p{x;Vs*M&KNjipl zR=0xZXC7WRgY69K*ctlzV-Vu0w&6F_HfvFMP;vh@?iW!;+@>|b+aO}Yh3w47lRNo} ze$5X3OkDk3%bvzVmz43sKXjDP(2}Rr^+KAX5?ADBEsGbCA&Lfeas_>0N=(hv$2qAHI*N8VtamY6ErcFE^D-E#B71bM=~U?P3dH-!435TXt(| zzPMo%q3A{G9`ACgIU)k|;uXbVGN-bMgqC^zMcj>VdEIBn)CF%$43@5n zHk%)$l*`u)OPjB5uYM_mPgLh}HwWNF6uzs=ZIoCBe0B9nezLQ^wd-$7GnqAArT~@= z=!F=xM5zyP!4pD2lVx4Rut#|jioxQ$&XK*KU9QIB07%!bU2W2$6(;>%g*|=yMW<=0 z4qd#qm=VU4iJjiii3(cNtnDRK=JVPGywhL9>im5`rFQuH_ zj9$lif5;!^4$gb4@RUwIG_HJGhPWug^BMsFec-uT`p{T${3Uyse`&;{5aJ8F-2j0G z7f-rUTFYA*GSaODXt@(AluEL8A3pOM;`RKTP@$t*Xx&;5zTL!XEy_M~)5!2ht1?*) zBUfqwc^j=);Wh8LJ8Ux0(cjdFR-81|1M6wa4EbkvpO6|PSFEs~Ho`(#T5G%D_or?{ zuZR5J8Rbvf_eZ#%_5wW47sR_}T@wX}U&26Aem8EV(71}WySdYA%GtB4^dFucVnU@o zDYOa59P#>5sbKFyg#jzVHXJwzOs14v1|xA+r!4ahnM)AN1>u3_`l-yfy$HvxXM%I~ zq2CnYJ{I1Ru_cGTH-NhtDAp}$4q5T`iJv$7(OmhW$S@m3?p>5!=#+$FQ!i8D2k>ne4Ui*7W4QmDL&l! zU!JL5;Y-=J>u^U!U`DX3UaS*Y>K&e!xRH(2sI!&q!j#Xpw?#x!vY1Vu1}c1SBlXci z{sLBKq^+Q2J&}}naJl9U@rYM1YLwvPuys5t3H44V!0atL{_# z5w}|U%<|J=d^dMmSz6p-x;pq4kAZEdjn!_HZm{csroVUKK?f5NtF`ffk2%T1a1uaM zr$ZeGbESHt_pNQv8HRgzQGXF%YJCm`3m5*Sf)I}9w{jy^f;e$} z$bE*@xcr3pCsFXO_6gv$x}@So8lv&CDF?HjUDamN-Hv75(>a2p?A{icx6$|7ce9>z z<{J-24mU|ZevcW)Li&7@iI}Yu9X(PD8`yStBbymfb0-QZmvIAO=YX$ zS2$6UO#o~I(G6_Vy4L9|dNs%5WYoNP0#;2UDzTlYUn4+YS`(idFV85Xjy3*bKAI^_ z@P4yLXF<(dp;0-0KDy?;a#lx^WQ5G649??Vq6s(S@Age2s*yt@u>}I|X9xnK%RKfo zStScFLUGV)tir`bDJzwfo<5n^xcnJ0XCE`>Gh`)?0f3q|`b1ed$?6qj!J+iHm=#3I zJS>A-FEH}e0b{k8$ipi^fO7VvV^PVO_*l;%!On$K#V z%P^TcG@a0ZB_|_Xk@y*2EFR5LaqZWc!mF4toe+?GHQH3xXEt;Aq55-_4pkOQpIU5Q z(SGVG?dP#ecTYMp0!>~~gUou1+roD$=3%b{rC<}qtBf*Z4H;4tx>xm79B=ZgGeX1E zr(?lo+MI*}cd1!BB7BZ8Tq<>&(bBBbgYgcn$ybPc_Bw{b?Gl%Y?$X;rob)Ahmlg}t zF&a)=-DM4%Z1~<%je_dCRV=4fk5Uj{LXXPuYTdBy3nzA!w@GYf@?>PO+7kz^N?9f2 z0ru1TEythXT|@b$N7p5)-$fp#qsyq2vi#iTw7{i$vDk7Fkh!i#qAX&!e?zSY50X_W zC7^7ch**kdA5e-bRX?p78dc1SpU76j2K6kW}{6A1sZEn0qCksYuV=~(+! zzi%14VHvy<~&)#r|dc1 zuD#W9U&;-&CDFk2^bUcT6?s=`(7sYhD<0=qu9kMQqBOS<&0lFhifc ze_E;b^QZ?SZVgE9-1Xht5)H+I>_o7fl{>+XBuTGZ{b_lPQceBwnruPq_Bd$wBFd}W z>c|LKA*M1hq*FPOUK>astK6rrpq2|%M!pUeI14OM7Dxm_G$@2;@cdiE;eW4ZVfkE@%B*`$DaLeqE1^tU{={9O%qn^oeSV08EC+U+dxO-)9LS_Sr{W2ASAG!>dA>&$KBtr7vsAU;0^WZZ^ zX=nPOOPZaM0rJczVeuZC-SOpTg@=IN#3BbfD4?Vhc|^&+@3@F=8kJ~L$ZIDrmV3hR2@rOVH5PnBV@ z{2cnYrp-f+bZ4H!=}M(rD16JS38I|LOZ08Sb+PdXO^eq#xo##;Lm!Yv;+{Txl!E&N zpxvyjuYH!P=6JN3ATw;4v+2{N+DlxuBw;>Z^-A)PkRdv_HfKoV5;MVRQW^ZriB8%o zmEGq?!!U>~n$mT5(){~?!9{HikL=zDW7te)f|N70ld6(P==3Bdoj+It3)V9t;Gb^*TnCMCG+`r|xi`sQIl!PqgYo?*b0RbtkECj%@*KLBYOP^V#t3=r zx3{TJBNvwvk5o)15~Y!+8P7ILv)Rv>3{Mc;tkXoH&$vLaK3;z8N<#EAE? zU5oZxB9*&AZD+e+&D({nn+T$c72CK*{i2wp){DCJ_E!j<>^IPkA_0XU*@UwAa)E{E zc14OlSIi}|A${`OCjy&$$#KpOQ>Pej#mTdj*Q|$z)Qh>JCqg3Ph5vhmA-%%8!eI$Vz@5g}f+{+Q}~E^-%H zc(uNCBi2d!?Itbw5^lwWXToDs_)kvWhSpL?*!&` zDZ0}t;PKjL3qt9qt>=5mdD+A6-dV%wlU7qW4lHAkB0AS`+w%F$#{Nb5>)bU8C$c)x z&MSGHTCV-#Ce&e~J~Mc6r-;9!2l3+l6@T__Wyf%f9BJCb;v}&*!q>Q4;hW%iK!^;% zK^(D47sTxT^kg`iQlUyX!^x~6HlaMAL++RIVLswf3m5l#8rZp4{Qf#a?S;bmId2TT zYp8j~>E+pZshjS#+(Fs8#1p>O*;|)Z(v++z=KJ#JE!^qDFlVx{nicu9G1XtY zVWnEE{l%sy{^^K4Uf6jl1y2P&T&5<>`;l>wxA5`jG#aERzCPLzri?G&p|7EJG$3GO z9?S4PZ&b@=ZevQ}`duMAr-mx}2>9Sd*uN-E{*La4Ebj;axr_rODO^EQvL-4q0(^A- zT=E7y)+;^2?xR6u*5-}&JbYOcb%?wEO6B{i`s_7^wwdG8S8mQjM%M9|4XQnx&j>n; zWK#nn9Znhi6z>G`hps0~YF^roeVYTQYsVuit>$p#{fe;7i4twhyjNdH7H^k~7y7l% zU*vokcsbq(F4R2aH#W)nN$ms#Nh}BH^y6$9Pn5{R`e@A}ux`pGv?Nu^tYn7dX4zyn z1M8UDlYhq=FjziCiOVuK`Sg-Qjy-~8Z+>~%`%>*O@rRU<Eb^4``fI=y9UVR#i51hP`WjH|l zO-=+NA6#ZMXTeTEYWf$g)n+6M2Wv(x;Uc8pL>~BqUkZoiVmP5A>t1!K)N+dP%U0wk zhhc;TboB6%go`UYZG(4TpSHblqIc&p(PjPZVU3=81NYsi508hC=cF1+8kaKr31q<- z4tu5@SUCjE@=1c-(@vk#o=&v?h)B?1qQOdmusn|9b}mXY4Q7qgW${-duXVpfZQA*v zI4&Vj~MkjQHp3&d;}Hf2*TRYlWi^7eltge%&KiRc+9T2Ga~jKJ}7s4b)nWt&JF z+D%+f6|%X|WGE}ickeeE>v1#QRxvg?p3o{^+llFx+LYPUGhGTc80>7a3qEG1<&+Q4 zEs{)IrTuY)`7G@We$*V$*YHTbGnAlSjOF|$7kjcU+hh2yyz@r6xsjd<^l`ZNwOdG2 z>YyrjqiN1HTGO)C&1GhhS=}9G z9j9WGKTH8LDl8XtF0@|hg2rFpT~zOGD`pU-8rcyfT8HTI{m%U53D*~JFcsyqI78Uy zPEth)6idF(5$h-9|Giu@Rjt&cU`ez?yzsQro#`Bs7--!MOvmhJw}p^ic@pk>-=$Pl zdRy)~Ynjzg(^*Yzs=Ytm+AnMUNaSQaw!5fZ11wQ5C#PqBMx?I0WUo-VsShd-=n6r0-RRZ_OyvmbjengE_^=L(V@i8`A%%7EP-Qtd+9!WKhWonXn*8<>)Oh>9CJHt5UWr zKADYSfbyF6Zs>nuGMv=f0&37dQaREroH^g4(fVMi{@EE>cThCLggnLY^LCTm!Xlvs zCbrEpbm+5n^_WJK+ATroSygCWalEl)G__K4(SmTXbBaOR!EAKwvx#!-KGw$B^_a38v z^jKr9)zwustLA*>^USI-Nqc%;0yU~2sB2yd6&9HQjr4hvNxHru0mM4=PmI-x&J?WE z5;yS6$SRiY`bVYATV(LqdH))R7Zlk)zV32g1G7~U_q&>l>EKkk_j#@~`&rI#tGar@ zMf2IYUAHm7k|?gtL44<}1(;~1+%`QS4PDLPY^NFycry+!G zSM}y8glu|mM6WLsF%)0&QMa@^^E;is1vC$(wLcn18a2RxCq(;cN{yHbbW%;nbXSw| z2}^6TXjRnmEiI-qR0ZB>(#+Ap4e{NFM{c!52}Mtol(>|v)>s(_u$8ubX+9}Y$fihB z*Osyk4>mzp7JHNgp%7CWbhb^F1=v$=7f;*~=D=JkGLg8+fP9I;LaP4qq*>R}K$`l}#y4W7SJ=Xi{hMlw2H3^d8h(>UGN^pTbd-HITcoaI?VU6N#Rr z*ZFMO=()ow3a-U1^U*rIvk9i59vI24Y-LGnH0~Jl;cqQ$^iO(%K8ADSe%<#O86yGc zEtH9+(9tvP#kZN!9@n4z(NnsDeYta}uxG~hkJ@Y2ceKt6Hr|!&C`vZ1KV@cktzPqy zP{4OJP@HYQ=$tQ2mCUR;^b zju^j(Gl;g}ut^kWFa`5_8?HLYhv8jc8Hn|Oyq*}sx@;~#A7#MK&AguYh@!MRU9im> zQ(st#{fZ+$LNG`~t*B`u17r&oDjaIeZ6BHs_WO$6-ZITkU#Wl*0I3}g-R#!>*`S^< zcjtmN6{ie20(k8yOQ-x%5) zU*#avE6$41R~}v$qP9wsDZ}XUg(Y08H13g8oW*y}5uHGygz-!e0)4;a{;+IHvQ&}H zqY|9Oe0eGjtN_iFM%f|gQd+M_*)!}TqP$$=y$d5zO8Y&@o;>S!MCvraG!3yG)0zp1d^RuadBIkuIgKDey} z=am5D0nqZt2P<&gY<<$6ss<0l_4EvItqT}NYeE*C4nldV4AG50aU#;u{( zYq%*Z1?=|ijW3o%u3)<1`J8<@vctfBg2|xMei68_%4WfEXi1~*dOz>|x-2by*3?AV zvXYyNylw20g5Q-Mp+yUgU=LEsk+lCYzh^~7v2=KHuY@| zdZf6y=1ri&@TA@%|3SNOPZtZwbb8cN=Y;x+szIf?^X%vXifAc(Mayg-WXQSLTdw7; zXW9Z6c1|ubX9J}N?)O{D=i0zAakZmp4;QW7UQ57+iJvh$CpN_qn|!o(5j_-kj)pk9 z9e*X*jQ@NeAZZt4WJ>Gfv@#Vv!T=*(Z&8eCmcv^Mb0xavpV@enAt|Zw(7pz}YN4}q z-Qn-L%R1W7+Wl~L>H&Ri^E}6HlSyx~)T?sfnqXA*2nNaaV_G7e2&fXf=?bfcCHqum zT$8OX({rt_4skq#O9&%5R0}?>uT|#MtvDYpd^?5yX8s+qw`q+Trrqpp8mkE;1mN+H zW)VK`lRvb`t_`O!JCDemU)fK0HfC)YGqz+61qNM?OVQvuuRncU^P#wkHm9Hu&{I$* zioVwGOmD6z-i_emj>-gIN4$UwFBmo;WE(lHeZ)ff#QGTs6jb9l}#(gJ)`)(-W?q+6B?)-#={1(daP6#(Fo&BH+9 za-+PsU#TbAHnK6YgUC8P<6F|j%0^l8H1NGWVw`po%%%NY!T_=_Ca~3KOrC~Y0Yh)- zS9gI9AYgCOxw4GfbdNU&2w&PjYXY~=XLD(HM1)N081pGdvRY)Xu^g$^)c7ojT)HsxSE5ZF@=_@ZW{tjpgCi5_@!x!eN2vU01$E18>TZ1d`4KbRcnUr?TCX2^ z4Nfg0EnR$k^9r)tZ#oYm3_b4Z$DQY_67Wtk#8aI=C~m==Ru6G4)q2+I=OC{F@@~qn z;h117Q_?9a>lfy>+Vy?icYGV+H2yASJ8urbC8qVGV?p2qidL5pTQ#2)v4@hQ8i!ph zou-H-d)~C9y(?#h_Sd}|Y3Z~RVU46minnI3H_2fqV7dNgNl`&h#rErUt|5;R4YpUQ z@jwSr1kQOBJuf%j;#Dt@nElS{A)_vT2wv@~oWfJ|=QgSY4=b^@->tHc{NnHA|IR`k zb;ip8O|jafbB_$sCm6ac*-YVp zLaXWK%*3DpkS8eE;b1=}Y1WB;f%*FWsXJxlSFXi4Y7@oMqzT}kiKgBC&83LB_SvL% z-a=#t9vhy-MXz&!shJTTf<2vV8mJQP{G0np(2*9+uD@bi?2>on1h6M~LDtS8g7;TO zYZ;)uBfj(f6dN9V%B;Pb>iiH=Q(GP{_3^obZ;8B!j{>`?y^A!*IJsiFs* zt3_Q&A-c&kCdz4e0g&*kz6Vgt)@d-H$Y6jIeO-7iYSRj2<3yT^C zYedGHbD1yEV0D$ETiaJQq0O=IdO0IGlvIL4Wz+Qi$xXs8>qSWh^Ah9H$u~O4^CQ0| zYgdpiN19jq%$VG5?`37iPJUv6_`SC(G^kQ$HjX|}0YjSQGk#ZbBNG2>iOv%nKAD^J zo8(X_r4Ikwgj%;^F^m)OZ(JQfMk=QoDTK+DhIkz zQk|-OZIM{!&^S_1GyJ}=OG_S+>vo(|a8{a2)M6cH$h6m3+h`I_s*?b)Mu*ttk6%#+@a+fL`}NCE zxe1)UfC>=WMpNAtY+zYUpE-*d{7u%?7_W@qKCE1jGO|}>Ce=L3p|FfV?QiiMsC;z}%5HPYN+Xc901BzTz*3zUAY5&` zs|RE~h9^Ipa|jyY$6#xbOx!A45RfEyuMo=uB#9p7JFPqwZcg1S(en0Q@PlS&gMb+m zWHO~RM(?ZlkaU91`L5of^&`fobCLSp?giXhozvykYeu`(jMt}B%y8e$FET=P%G?w! zzMpfq+QF8}%0YOoSD9s_<%TbQ6Pdi2CmjoIbx?Fbo0^u59|bVN1>5FKix$qB@s>V$ zG0a|fG4*S=_;PpaqQOqFG2!;*U7SMp9c&4$$9XN;M;263%WNEYO%F)UZcDF{jl5^gAB8sk=Gyj9J}LLy*!_AMtF(K?zQ#&ERbAxm zzV1`@5|E^b{angwnYQ{+pKtw!%*7UAsFX+hvV|%aDah4q#7+}@F)3*N+H@L?uXUxt zpnEu(s^iA^bVk^9+eqMe^GyoQ(Kri7N`I8bQZVry$44?)D*Qb!wDI!p`h)|@3W^-E z9S2*yehjf4zTB8Lq-XY=(B(A^!G+m1^l7Lc5|x>$3Q^i}>arp^TtlGjT5!Etb^8vX zbQjVM--i#6E^aS=Q-KGdXta=3twDu-EVO_o_mBCOQ=F+l(i{IvIA6RrK%VMKDrhuv zH=zWh07OP(`H;C?i21-SUcR+FrB4Z|U7f>(gnSCKJ=^_IRfqhMI^l0!?X%g82*O(P zAy#4&@9N)r8hJzk$Bo;bx5&3IVCKoh;E^!xp5>3-lzxc2wd06S))Rh0#$HCgcV~QF z$%tR+rRT$UD1|nli|+7g&Ou1-1Ns^L+%ROzMavqxZb~v!WtqcTf$CUSB_+SWnC*S7 zm*c?9?PTH=rnq7~zpDD`3)4Cql&AvVz z81pxG>DisxO4~_2bPh}vnBsq(cG~M&kaVJSlK^mCy}VjWs;Ay;esZMYErmC=JMMpGIA9uc`%c+rrkdY4vKg&r1vM~*e6q-i7t z1h!cdwxwL}R}PaCEi;LYy`@O`9Cr5tteWlHu!@((G>d<6WJpd3Y}&T(J&44t4}MBX zWY$tV-}9NkYvP}uOq@&8pC?Nj(VR#t4I7Rgp_rFllRjfr`)UN11U=T25sw11%|)Vb z?YlP`v+Yc`w0s!VnwioDLg)gBoHc2rEIPt*_rw|V6#X0M%DpxQP{$)gXBRYo7LV3d zwKhu{FSZZSd)Mvqho;FpFLEi+7YRJw58JA3Zi_x;cAxiKW-B=sH}c{t&qW`ujQD;u zRv&97A;yok@w{Y|7mj27fN?=4lRkOV1trcpyWnDPLs#6j~9}Da?c5{K+(%K%V5jU6pV2(Hf0w-(glVo-VB~MY( znl@~hkBYRLyevM6N@qG3vIR_Cs!*;fX#d%4^@kBvNPT@R@c<(t`2KhOLM0N|dvvlrV{`+g30biW zIAtQcLcmR0?4(Jg(BuegF@j>MT{}!}@@`z9R8kO0Wl0?~5RZ+1=#68IdR;~8R9F|RynU_ydJ|I4>M2`*! z7_2y;V`u5A{ubGJ?cv8#)^eZonV_1nD_?L`uwp$y3zd2!S1WxyN{JEg0rJqrX>g(R zG^Njnoti2CS>})Y-tR1XR%2W#TJ+fUT%RzrYskE^1YG{%7A50H@wE(%JQG{ufzJxK1Zv#?5R^7UvT6aKnv$6Nkt{A@cJ1b-$ zJWT}*=!LL$?x26${Cdq0JE&S29<;=H4-^H6ryt6cvttG-zf<|L_R61=+c|6Fat)}L4N#{wV`^CHN+%UbfJsn@bOSJ*)bmsx{goBR z(SA9OXWU+6SJZkq4$VoN&|lmoA{n&=wRr7l9d2G=W=(n(3{$7&Hutq3!<9WecFSsf z`q`&X;ueh~EuIznWiN}pr**GIx40S&+I`iuUCdNT9g^;zhLf)d$%^Gr#etY39e%yk+?b-pJI;q5A@geHF1 zO_(-N&R6q5B9P<>X?=o2$F2NtdVBFZ4$Y6o&H7pC>9IU4fM2uN*M4RY4lZszZniBz zEQIGa6Vy6^zH6WeMk&tL%5HHMy*8Dqi60O_8ECn|B8@YUW>MM+W=JqDeh#bj5;siw zN1WDdR817v+qI%}Q@G>HLtiP4&|6!FlT|_bB<~;=*WMo1T3ueuN-^cu@%v$CymR|~ zPGQSz$=uy_Nx*hV<#QWOz^2zZWcM&fLaD=e>Vo`Q=qf8aML$#$w(2 z|#8NY(zKbC`m4=e!18|3Hx3ft`GWLTVdZZb%#| ztOjQE!|A+czVWaN$_thJdLG}ORtS*&mR(`OYBaEri!@5ol-c$ zeY0pJpY9qLmay#~8gt#L2bXD73d+uYI+7wDn3vwJ*zctU3K;EN;6^m-?ay8Xlw~t@ zik1XG61O#8+{jHNm3o~4yJ*?`onlo0Ux(#k+Em%#C1>u#4@(E<@=#B^ z$ry<&Bgnv%OYssCD$MZf*Z)%?4e;EkG5r(qT9vj%zsr-6fLS=xljeYYs0!I+UEl_5 zA=z)@?SS$3yGor2tI{xqB_5bfO<3nIktCJY9bwHq&oHZeRrG=r#yL05vAObhYV^Tw5 zPa~>5t1e|AV88o5J6fp^niA4EfYe1Hyp+%zqBmM^KSid0avS|dB>K!9`Xl4Uo=$Xs z*R%iK@FhYFbV_DUj-f(o*GJ!H_-;9`q_P>Ht8F!#@c`;A?HBmgj6e;kx_KZxOMZf~ zg!R!MoHJ)VPG)R5*|m})$NUSK=2#apn;SrR@FA(-DBM@&gXowTJE&Y^J-p&8B=?An1b?pSc4)Ga z!kh!c{#x?5D5gK1uXW2#66DUHi;1WK)LK-Han@uP$t-NnmP+GlEC*F;KxGV%?*MOe zB+T4b8DG+x(!-gqyIO3m|c_;`cWpBadA9uKBW?86U+r zTX0*A&(*v4-WKACYi5g_U^%D5&ObPKkcz*j;t>c=a9K2<_4* zrJ};e3`!jOrEfVCu=p<{1A!PSO#;wKjue|*(2$6hchtVLx~(_zTF-4(GTdN(r^Ok~ zH>t#>(;i>>eo=IBL=|`7>_XClVDLgW>zNS=J;En zQip#N-%0PV_0l0oY_W=G+)pPJUutn8hQr zIzwWK%J)+3n232zP+5V9gIR&7S*pgLVw*(mNCl`w#F{&$#0OhWwLLH)wAH;3-}-W9 zo1JVZpyGqPmSGuFmN4d%6zCV!@y)va!*Kj;CDGI?(nrZA^74qb*@G|H+Zq$2rQ$4x z?i7~w2ljN^&G*gAVwBsh*kac`P9sxCs+^$lmnHhMcimP0QFBc-Qnnmf7FI(*b5}}w^GXEK_>wF@8G9L%8m|S)Axd)x;2Q2Z&KnEDvBWYpAX^g#2qM+`@57(j#bEr%0NkhX+u~V~(saI0v(Qg>1Ju~NJ zaKish;6tvzQAcWq5L#h(XAE)rSj zfl5`bL{hBW>D=aEOfpeu{l}yC!R7})hG6BX8az@l{;Y<1sdZWtZwdFSbSC*Q<2Q88B-%lUp1iG+`HMd8Ut(r5uC z#k;RUz0BbN7^T}j+=wSAWoA50(}5*C-cVb<^lg?$L1Xs15chV(`zn3{6(FQGni`P+ zqVn1wJe1+fO2k%6B`w7`KS{#Fcj_ul9`2Axt#`Z?Dgi#NoY?GG0$#mlcwUYBo30rC zjsF$h0}iWs5pc4V=MH9v?HA|v2uAs&iao&x&A$pIUJ7Zt=RGE{R3sIXpnSFSy*n5@ zp_%1sHK^RyQSxgV3v{E=8N;P%RlTs_w#N+_@i=#Hd&*YfvUFZ2vQYD>{=-n(Y}OZ> z=TU%8(LaUFWVkt-U#HokSAr>a>5e|jnConA=ZmAVojzjQtG|4UZmZ2?s2dID(w?28 zI>d9Db_Z>zcq3sh#@G}@k9VKV>rtm!;*wqaQg3bSutBGBqqpM05+S@?N&fKa-WH)g zMmeX!9IhJwR6a4*PrI(9fV=JjrZyyN6ghgp( z)8Z=%vs@bW@yqsTuG+em%Li8=kLW{R>QZNa-^ZrP)1_mdSrb7=Xh6m1bPHlY5wg$$ z_MRSxsHj~X&%3CAYY>7*T&PWN7d_=2e;^I*Ri*uTxTR259MlodQa>fsOIWj$tS1^r zM4v9!jr8|1n2}dXpC{Wvi9&{u5yW<$@536lKmY0J4p(K~KH{MgIT;r7QZ7<_k+*xz(A0lDGg1erZaYe>Sd9D$c^u&JxcFZjg}s0XyoJAas%w|}tDFVDIN zCTcM)>=T(&7`PeQIjiv7^f`nePPuGRl6_Tg^qSjNzSz`o35+;f7-#98V*Wsb3Xj^48<$nPgzWFKsTJEMo;6r zr!!J6Kz5ixGd$Szdp>cIE^+3STypx^b$`&LruY_gmPRh*F58}VpQ^ABlvr*jbLNIm zy1KN!((ZP?Vo7#2-?5dWAb#T9<5 z>3sK%FLaBmQtrb^69nFU?Y@v(SzQJX+>n$XeH>SWOS#FRrm6pIXU7$mlObD&yAi_m z(j9|QiOO86cKKOA*kEbk$-11|5r&{ai<@0lCaNP{P{QcJqL=wWK<{glI|PBEHK`iDGXO$Y}Lt?8?HH>Q5!Yq{+$kqL%0*i7>HGkST9;h+WnC7p{ zS>P$TmC6I3s;Drk{WWWjT_Eh4KP5z&d4`Cgq^-9V*=+FSMsrl`6?8qI*C-n0O{g^8 zX+gyER-|DBRh)nRWxLz3Ki-1CdDE=xCH`uUq+|J;|?)vHN=N!=9R~kwLWiftG5>K|Lajg9e zsh8`=Snd*y77Ho3(c<1;2!+eoUQ*ecNNl}TW(mU96%U1f+ zRsJx+BF2cr?O>tCdGLQgq9e$x?lXG9t{{ zOIW|Eff`g^ zXh2m zwr(tuqLK#LVTE?5D8mY{B{*!M@RobXjFSTPK9)L4yS_jBX!|w<2eZUWo~32%b`XTV ziqohlE^ew~86FSYzigbKBC&pQU_`P?$GSfMBDa!Mi@`n+wwm7~m)FLd+0+}w(V?sw) ziT+Bo#&e39wgJUHn=nm_7|n`(n#xzf5{6G{nAPD%nvb~~jrL}m4DQth)-oR-yiy?Y zVNhl&rVj8fht^)-);eHJlQjx&4ksuGfmgiKO35#%vugUgnkLARo%kdVpJF*adSeOU zp(ZbO^z%rntP3E}#GEKpVzC^}Ob~DuOEG8BHqe<`)1oYSL8aokWampAz{e;<@=d1S zdzi7bOmLwrsJCdNKfA%&las;zPV?;LW>+XmMl* z-B~7N^34==6Kvta1CFbcr4j^3NAa#*-(J&k+4Wp6Wu{Pl(xwvWoTUP{k%KapgX5#0 zrS0RN(8KIkqyr}*$`=~K7qL$l_&?=+k%g5#!EKdtdWa)x_5ZZr{7hI~e@)*83h+w# zQrGGLxD=>tb~^KkO94BM@?@UJ1a4UldNY4+WDYGlDX%@{ucQ%9$0&9tA0;kWGpM$y zP)xrsNw+1}4*5n^BD9}p%Lp+n$6mRPeDG3s6zgl4p7gO9Pb{f=E<)YVmnR%Iur(BUG>fkw}`+VIM zqX#_osqPeAYhh&{SLL?Rg%|3*n)s`ok0P$y<{@{mFOzYuI>I2EA&;}T?*bqW0l$@i zP)DeK9mLf%v+*6mCA$a6lE7zH7y3WhO>ZjspV$}d23?pH)YYND%xSp$S{#(mjDsx4 zbrf_4FR_c<3qpJv#{*yQ=O5Wiz5Tt8txuLrsGqmX{4B85ZG+RP%Nj9?*)6h9QC6^4 z@p+Hcvo6r(qQg{URJK@SdG`gqG|_hQj&SA1XoKk?*UUuKpH5<~jla10c(JC5Ngl3* zgsXsula-A;Q{|>{sGfFyIyveug?qS9Uw)Wl-50fe=&x4ZUyA2Td)R8VM)&99emepE z-j6aWVj$z!2r>*$VJwTGKBW9X{Rr>Be;Vu2dJ31H70`G}?kzP-Vt1dRNI(J#BZk)G zg9uAdwGTr)pQ{n~+46Y5DHVBGLRtSB`uVH%Z~(`L#nlaV2;?h$o<_|^#H zis@)GuU~dJE$TC~eX!dvC7KT4n_T(cU{s^t)V6==n;>J7|_gsrARs2$725RYNOrh@A}T;v00hKi^Ch8mtB8>79oTgb`|H;@C(l6O@*^f*eQ z_v6mV_3x$4ys)I@F`xP`St3*t*9O~0Z|AqupS0LoicJcub^<96=e;0ho))z;IU1fy z9{bTOElS^OG#}v%^@aC~vh`#7F*DH`heQSekL;=T1NgYj`pN~y+cmIb(hDFAt8e0z zK$!W?lSqcT4GvdOMY!p!01U{pskb{$Nek)(c?nkIp$ANLd-Ip+4K1&7C0%zd5bWgl zgC`B%dngHo4iC97qMOW^kz9dVaHBxCIO=DY64y5|OI;sXf(#BVRIV)MGneG}Cp$|< zn2dcZg3C3#0#zR$u$86w9VCxTB;qSQCf8uZ#HJ9WjY{t*4E_kc>Wr2=TVn;o9I*BB#{xEw zo7=!7stcUvxr-Ht>)@^3(nN4pX6MWZ6DY4_T8R4~3 z5#wn|7jyGHTko#MxZ|&QhO4nw&Q#52A3KQr#i84sYg8XF|j}$wszX5G5}h6YEhjwps<;+b6#@ zmFw?c`A)aUFj`+m)P5c@_RX1a?KH@lpPdf;TJF=LJzh7)5`AC}?2ok_P8+52N_tut zKEtB>lHRfY*j$QlYm`TYFCx4jB(123mz#TQ(MDf-yv?}c39C^h;Z{k?V=^D z#%;}2R*tsroIIOKF^LrwS(I{igtfF7O3|~n<0-0Dg5nqm*;|#Via#zkNM;q^wb~jL z#i=mFB*`%AxajwI{2_7^e+q!meh^VUa@o*AU^uUVp= z5+fzp^$9Fo**|A%wh6l08+$DLJ~go+JA8U8B#H|AygEcbRM_OSE74z3^D36vAWpo>_FdhK{$NKe*b|$DZBN(vma=f}!Eq0@-{sv) z#xx}UbJxyPu)>A!Yag&>Gs8`@@`NMa^@l*{&o0v}1w{*x_^#^DRTvAo&c z@mwR@{yOcMa|<-sHw2GKRRTm)U$lS(li^pm;9UZCq3&20WjZpgd$+WsV9=UrP8_qA zy#KeqI93_FfhP_o&WulGm#i9k9s4G+>jQE7fYWtM@4A7C9toRv9{{TBS4{q29>Nwq z;+8-{k2zV&$c8S3_L$e;Co4#9_#F)tyBfx7}FZm>%9x8*&dJ zj-Gp@iLXdO)mX)RfKWfpsf2!ek*(NDh1K07aR9@pcTSj2`iQY?g@g;{0dlWJnPe3w z0TeM&ja2MP8%Z+**Raob7kkjL;VASAI{vCoYwlm)I%3`l`33NmudR=4aoBAaFijK~ zsEnS*Ic^6gnDEn)2SAQD1_$3IJtkHTyr*TKx$(-Hofwd=QFiBTD;UQRYNEL@L!Q>) zpAbOKYKwE%;k%JQU(#2ZzVM_{D81*jo^C)OBOb?v&7i~X8(rjpb2G|}@&hZV4!YaC zT6Zt8$7c-ISJJ66S&fZ7BsE$5DUyAiL!$R0 zs{OAoME4l5zKw2$J_^uyk1pH7?FKruk5D?gS^%{~mzIe3GsB(Anm zeM>q|#*Z?g`k(gfvSrsoxFuUOOXW=B*p2jerrHve73%Eoo=%tI1XgGqpaRg58g&KT zxX@El9F<&W5(@A8$9V&uAX{5EoC30AV%ahDqns(VU)E$UlGxH%zYmLGPO zUB-fNwOTv5{WLGDB>o%xTh;(8&6Z9BG5xNCE{cfDMO2hBYfrDac*IXAhJNo4IVmM+k1Nn$AbC+mwnL{mLXK8&eu3nplt4R=gK z!h8A%9k#m7-z9=(jb3pi9(-IXNiCiDE)(HEZZbbFB!gfw&=;(CH5m;z))ZK^84L| z{8VWY@&U3x(jJc*>@?L`IEX{Q`9n&YiY9TRV{QW3dx|nM4c?>!r_;kn_ARrGJ2E7% z+=J#@N0_w;7d8XXGWhrzMN@eVqFF|^CIwvXyO2|6ICMtHk3S5P9xv2_3RF99I|9b6FG)*R;v|D=Ma0iErY)jp+(Uev487oKAR@Cl=(O% z3H>5PC-bwEnK^RgNWYqy=&vnw_4`bnes)mWWSU)N#_UG$^;to(VAbPlJ}J-jZ0!30 z8~FF?gQcSM)KkGP!*^XJs)>-xp+Y$6Y(Z6v$j!MevBbitDx-_HcGY>9Nj36Y^>bxC z6A29dR}zX`N=>IUN)uEt zWUKVLU22`RRo}@)_dJ<7d`kwS5|e}a-c@9%O_OG!$i=^aUJw|X#Y`77SGAr7h@b9o z06XCm2yTA=RauR3v@^wp0zZ4FK=sGemb)yL^MG!js*rSO&w$3F>l2zdzW#JagxI7d zvbuuAciX5(J}tJDP!Hubci$+jtyM~MAD@GaX1)@)($gJ)yTsg6cLo<_+DLj%8|#%& z@BK}$jGRWN;!66wM6*D)&{H`nlg^8`>10IM2_a&>z9Ns!50vEMv zWXm-e8N<6ZmW%^)2VRF4NmRZ+rY)`)=i%?JKMeOz`joYg+DYhHZS8A59U1+Z`dUu; zOt_beN9ADQ`f7iDbGNPy;t^+o{piZlnP~8L1Y9ZNRp0A#@9lARvV2wOz4}C5Cmhwb#BKP)K)gryCcoGM0&c2Mwb-eYnW-Ic~B+3AvZ001IWQO>uLU*-R-$#q}jS4X6_vvCw+N ze!Br3W<+D8bcEDFc>b=W^Av2cSxy@|Cv^HULgu0p559!U)nUYIn{OsO@sCNxk$ zg3R>6-D%39<7cv8(;fKu3+M-|nsXHmT4lnl8Vt=vrA`L^_bq0KFXYAxjylvn zK0=HAta{j&Al`hw&5D&wZZ^bxv~6U`fB%6OIBJi> zHRUI6=$cXdf*G50yO!h)Dfy5pO*+`0pu{9%Yc;EXLBShHVAQF!k6N!t9(I|V5N}u! z{PQgNV=@tE8p-~)O>X~K-w=^}L-%)kQCa-ghs0HB|I6kMxdDqZP?TV=+1ZwAA^@PCVG9oG<B4uI}MKB@uBZt1GRz(0Y~y?E8A~=* zp0L#FNcQAOWDn#Knmep1e?LZzh5AB1L*~WUFwa)rG8+~ax7#0Mk3aStaM4i;=)2(O z|Hx~sa{9lO8!VnHYAs;{A{FrJ)`;ByU^#zCge*g89M`gaX!%AK1fN+b{C<*}23OXl z<%PWAHW(5X07>^n)&1wY_Yq#B7V7tneVM5lGKhTOtnzQTQI;ABPcH98p_YMtWeeFm zK$9Y_%*6XYBxe55$j?Zt$o^!E8+~G8{gI_L@&A}-oJe@|BUOnOdijr00E_taTo>&R z*yf*SJ)V zdJ4^F7M>FZ+r@vs^C;r8yjdE*$s~UZi3>q12_AX;KWX!S-`2zF&oa2P|82ZAGkopN z$?YHiTNLd@vk}nBGA;FM!+i_Zg5`Y+)h*F~VbDJ#u_9Hz9Glmp_iZWOG}3-}D7-H^ zNk_(!`Fs2Wknl=s5=tI4nBokcZfqy(Mag9U8vZM1c{p}C_Me{j|8)*hmjWv!(Nt(? z(myQye;Snk>G~_Am+o9I_+R`h^YB046=w5$2RvIb-T33w`oCVp1Og6BG^~HAfB*BP zd1=2_Yae%Afq!9c{%6yvA4z@=;FAoH{C|4bznB5oi8QGwwWI%^M(sP!zlZ!k2lAi) z^6w%4x0U}F3;t(J_up3j&-Bv&dC$KW_J1$z|JBR*C%*rCFaPKFvEO%6zro}8SgHAV zb@gRW)b)+8$XRGcv%8+}%?U@-@ib@=MlcXB8=^U|5sk@2f3a^}@UG!hU3l8g$?G~1 zEgppobb|ImT}$@=qwKAN;_QPh;RKi9mf#XRSaA2?6Wk@ZGq`)uU?Es=cLsvHLkR9n zaM!`z+2_4?<=g%Ct=d~P#UE2O56{T2yU*#Scu9e&frZcVrWfq3Yv6{>^AN^twEU?RZ@41 zEah7`xx`ZpxDm61fQa?CSwIbWVb=2x0rOvaoF%?mTC(u1H6jEHz2|iZDaM%~6V{|l znPE){uLQ4)52?aYey8?Ql}r7=Xs~Km!Qff@dj0+XGr0dxJRtV?p546PA@%wy=ojkp zAwH$A&vDSBYE@6>wR8quPy~)F;<_H1z#HPO|2m2QFif8UTx(oLib|A6NJb0S-%m8N z*7;@9?*rysyB&tE00xT~>N63H@7NQJ(Fgs)lTG;8{Qh(D`@`+u#c;sf?`N4PH`@P~{}JRV z|K%h|vSr5yi`2b7#}Tl2JQJ#Piv92SOe_b%!oS^!?!zkH-T{uz$@UQ9_Mo$GtTX~E zy4(Bjx`n(-xlSs8_-&gtFm3nUzwIg%X3IHF=Yoj0j_45m2 z$9v1;P$X7y?OuCN<$;n(+8(dG*p+`k7-Kq5m!nA^blN59P0i zKVH`S#+ge^I0izvw7Bn1H`;%Q+P3VKy|qoqK`r>4DFGb!jGRR)FexI~o%-K7njlY! zK)?*ZIY(RRpEnPX1(=V{>c0bojB3mWKiH?u5xMStl$lrQAl}4-utrA@h7}ki4}~^tWUG z$`%D#y$>8SYRabk{tZ89Y_;dk1$Or3McY7Kae!+{OPTI}FcxoKHx*w}Tvd|_I7{eq zRWKKRBjSqUdxIC$EAx41{-RUvx+0YpMwZ&6ZnK1MO3=DamZw>d+;h!pOwDnb|8kWA>Q+qaGa6T@YbuFb+Yxbk9xu$>gbzA6lu+My@U*{g7<)Cl+QsuMd zl0KXe9dAm2@A5+;L96+}0=$aB(=Npb+ZEH&yID80ZCt`2^}z)BA^bsH&3xawuwY&a z$EJvXMSgjqf$g=ttOo??>ZK(V0>^QgeEH>R%rb<7sZX&v0+Qz)D2+2Np%Xp8Ebo8cq;i``FVx-BBCxd?%A!^(4;vcpbhqnyNIE#7bDt=oFhN zL=zsDF{8wwc?*)uW{uHvxuy`Jwj4*?y^d!CW4e9#*pjqq+v5#BuP0AVV$;*-&qcHh zZLo*FzVp8THJ4lYvk(t?rWe26t?H~C-hudB9!CKt2K!S**IG&1EJioIJa?YgDXLwz?j0YMxYfbIulSJQeyK3FHmyx)alR{+`c2hc_?u&n&y1&xoac zEp(#m`ZiC&tL9!(RV|Y99dF93xWUnF3W^*5^B!|Ar@o;Q1#=tANNDE`i6~`b@n>rz z{K9bh#x#lZGwp-;I zt1uhMrJ2h9mtY$>DdA-#f6C%#cL%IU!9U&WO+#D5Juo66$r>7fHKyz|+zWwbg*hvaTH?B}E;iXi6-h>w3<8IKFO7v*jNfh!H(?nJ&nnX|FN$mW+JpRXKR zJC@ns2f4gIAf*^4J%0ukHFAmwUsM2#K9CyY7~B)agd14FlkYv#c`QqvJ)@e7U$5Mx zyZ^cBd?7?zp>|R zvOz~sr0bAM`B!;6fka3!FKL~Fy-@<`yaU$l?U6c&H$QPYCh~UX4tzj*;COqhQ)3wr zt%o~=l59}&fN|sQ=u7-CDb!r1KdM@!$clKrgI{1J=zUn%MTHJ>x1KKep&65%ASC~H z7a!X{`uI| zeirTTG_a#7O;pLr^*#LcwS9F~slxU!5H4R>Y6MUBqOn^W6Nh7F7A~VVhW5+^xMCewLZKogd^95a>L=ey=)-U}W`BOeDYv=G70! zHldaKp^fC`Bbk|Bx|HTsiwF)d>|ipukAu3|+@GtdFdCxN#eUk#63~#}hc@ddWBzbJ zrp2W1p_M={7@G4){2KwwlRLEl-z+YFLK`DOTRAlA;Ut4@l%3897^mPu2ey_%SGSfs zHo0bqV8TT!6biI+88l$EI>rTomLk@)7^zHQ6#tb_|96Oj_mi|jH3g%^tTUMAt!qc?$U}e7-OA=Y zw%xolaOK6#%r-P}+A=#&TiX`F6~}!+%=l9Lv&<p3zXLYyF^o&?U+0FvOh(Hk%ts zIFY)&{&wtVtKwY6YTk4YSA~(nF^k#=yg&As+K)rdsC4}Mfc9f{iAil9RX3EXF@E{z&YLZYklGKW< z6)P>L|L4jvCR5>nr_h} zcPGcjX3&zTn1#RjZMRxnDC2cFFJC+DCD29Br7&*zzc$(b!JCG7>>xHPDSfn+iU$mC zNU6aoQg~I+5I5y_w&F>g<>pmB+E=PxN-r^b(x*tqx5;nu(%!%=#Mt+b1WLN7- z(%&27S_>sNM8inCOD3BNT*^r5bM&em@9(9zebG5emS3Pe8nq2$n-$1ppf_x zSdN8>+DI}+u&oH~*!5X3sI-_x-WnvvnorLurXSa@P@AASdfz8^{geI3W&U=P)sHkj zYi&2#@$i)pDc%sv-OnPMzgrghJ6W~4>Z^M#cUKF|3G8Zg!VaF4L^K+ND)-?F|<+;p< zq(2`s#VV}hIxCG*G*2+6sb#<>Tn{`!ioGnqZYBnU&s8W~#c0wxb!CoOom|w?8ekl{ zju7E1ZE3H^Ni_1&+_h2o$bslD2;kZswiJz3hh#w)%amA%;W-2If`vBVl<2Ef-O?H) zZrA4q^hp+V@I9^~ZJjih>OVXmGydoAU{~H3udV*=OU$9L`(!(gIEYp$Lss>@0<~7v zOTMb?KtUoXle#pVW={0LG|4vEEqE!$_DW)YTA>qZTYoy99Jq(Zwb9do0onXdoosk? zij%cz+_vw*byhZn12ALUEAh|wTrYlK2&zan^Uv|y?gD$I{}rS=)}r#Ejz3FsZ>a*w8D}3YH1_e|8R1xm$*L@mX6m)pHiitB!&Q%P4EJ0+2+e#fCD*|Uf{Sw<~NBamDQT|M+;PkYY5TI9l2 zg-xN)c6|8I0hiyw>W16G_(tf-Ws6{OLk|4eUL@x^Y#nbQ_rvhfI}L#eeF5%bHF1!+ zm~ZV(m+{^9gcDaAw8w68*K-RxAZi8f4c|A5SAb- z2SO5V5>G5iOo_=avk(VDuQ$-j7nP!MgR;yPBzIe!hRv&T=UABNxoLZ+JJT$KYN|j# zmY(}y-+s1j(BFwH7B}G$@091Z4@ch7DjOvvs{ysL{O) zp0I2Em*mOKm4GA0heXjva#4?GUcVRlJW}fTPj7E}{`OhZ_QA@Qh;d{^4_ztHir0>A zM4x9$ZO1cODFe!Vw2Ky}U=Jv&uNUFp#FqLj(Qaq)h&R8dc056ll(KMp8Sop|`)P@k z#UY!L!TCTI8?0V@g^Wy2QtnGyU(4$xdG{4M3A4f9a9C? zCsSZ-YIc(x;Gs^Yd$Ij$;9UylZTfbsG44;QRW&cBdGgI97SV$;HTEa426f!9nobu_erOhw^IZK&XHFkzwlbm1QyQ~Pb4O6rO)3wicI;Xs}7QW*DU(ClF4Z(c8UJ4@D}Cyxx1 z|HIRd#|l2`U*LidGT*K-08fd!!!mCTYG{Y z9WOxkx|&y{4aKAH`e)$saeHT($Ktf^FCd^B9X$V(%ii^xX^vMxoy~+}42E=OKQ~<# zD?S}Ew=Yg7bqmsy zT0J3VSfa2B9ZO`N# zO{fu9-ynKQVfSfLb*21dMx$Kj!$~3{iGNaN_&Dr*LC7m*Df#yke5L}la(JCH#q1^? z-MhU!pug!POf{%liTwNGbnzSW>6}JX_0zCb`d0U&8oNX`NCJi0u#j z2tRx`O43tSEyjGh@%V}qgu0?gBj|p>6h*`~dEyd9G@6vb?60A6K8#(K_M=?_S# z(5bbucy8c$=wZjTZ%79J<}6ULR!VB(r23~Ky4$O<|F@`jmKCVsX%S-OG|2$#FoB(h zfk;p=zhh#-aq;Qjp+(dQT#*&`lg4Q@8&T3p+$$Ib^E#&0e+ea|&>sF&d;x$Vl&K#6 zW*Z$un&riI*3Ks;-J_)HMELvwtxaHPf1Zh)%eE&BnUK{U=(SNmZXn@pka z{KokLdVSk*&k7+*|4O!y%mj`FMub2X#6Vh}a1@*g88|XO6!p}$VAE#|cycpft2+DR zr~7uhI=A-?pNoO21XBc3o7;v-tw?Y2-k4qM+mVCkxxM}c)#KwlOrOY9a{|hG&-%(l zCY1?TOZSIYSkretaSNiVBLax3C)9{+eQGoh4NkBxT*JQ%J}2?uJ+}ALXS2qGdF^I1 z0hjo;g9cqYuNH#qlMp#IR4Vw&VS}=K!36~HUYk zLC(?$oVIHjwvZ5;)I>_=L|`Dz`L#N0SVXR{-ae+25C1xLn?ciUxNoy_dj}F_NBt*& zg+a^W`J*%k58U)ziA`Y7U2iw^ zVNcwN0)`qr35~6BQ^6-l_Qp-&Sr(tLt!g!wx{3tN5mUsaMnecOV($0q&&cE);xxBi`v?9ezkYlJMpq0c|?I-7Qb z40A@Q{zl{uii^IYgiGfLNOcF*J6QfcVb+tFtxntI;DD{Qq!%G5c@otiWqDw)cTT%R zXOUTVr)#vK6)#*`s}1LHOmBJ6Hr0e1+)^AkJy{Qkyc0CfVikGYeK<$Hj7a^>zx z-%#|u5z3vo|CbyuVzN_hfMDFQShg%=;L1ic?_~XYd+x^e;Ujn&K2eZEWTE;~Q>EAA zT+rHatJ31~QU&CpM}`-^g$H_qICp13dXr&p%kxJf32E6s%ajb|mD7n6ZfOrc(MeL! zZ^@e^)y~4D)g-uOo+TIy&`${a@Rt~7Lh&I@W2MxkkrkGnN$={Vwy2J?G_uQ#Lkf3O zc3GOB5KsE9PT79*ne(IhEcxBVw+sFf-t)8KZM^UL5A3#H5I`JlI0qIN%?;luuP_&pe0u){go0P;uqIC6tJ>3~Tc@6Vd?LHVX3`eZA0M+$OL-<4~U+`+>_7HV+w3vF@0dD%you%5>3EwEzpc~>5W2pSNWjA z=hCra+LjC<{#rSKSw=4as0X3Izn;`9|2^(~dvSu^HA0B5-ZXhbIpjv*zpfLg!1W zKN8VpYe3&^v&S2|`#p+Hwl6zs6EiMiL^}+9&JiT_@vt*7*y89gOOm&I=-2f-#)8c< zSpkKk(2?NR@8CY<+RF}sI$sqdUd>6>I=;4fSj;{!n?%|76M|5K&2W7%q0R5o+$!dv zY3=GXN-LM9Ett0b$GcUJyW{Seepv;>X`#g5{_+l8CX;iQRUV1fq5CSzMiXbX7_Wk88sw+IPDXNY|l((mXk*`-lAyjW$E@>zIuRaC3$ zi8ep+FAj|QG^48>^1X+jAxZu66_NPzM zojYy_xhPbE#(R^YJUAUK$MffV8`967uwVQY;L1Oq|{L{9?25&f^~0&muXs zE@{qOh{4Jz!J{0(Ygo{a89Qrq-(Xm=me`NcWHW|FRhkIchZ0f|4~%e&J6f?0Um|Ya z(nuh~KoBE$yr1dLYnq{TT`2nbj=mw&+d)Qa+8(m4&KsUI8>**B*V%p@s+?Wbe@aPT zomB*WC0Vc~+6(j6?|x@pf+HJe--&#Hh#5fmL5JC>@)i`87Jg~9?F7VkR8LL5*hrlG z&7VEh*b2z

clYDm#_*A@Ka!dQFfo-}sD=1PR+j?;F@ZG!x8 zEQ|tbTdc|E<7@XiH{&iQ1kO<@7-vid*4o6X|HPOf8Ygy1h{0hp$)j`Go2#wWpT2f< z2|l<_eatL>e1E{JCV#aqUHD^u`-(~0tk?evljDS5i&!1{?RB(}Ax}!1=62_*z-v3) zb0)xZgu;(YBkWC#m%lN+PR$b4O(JlPZwwQa(i^OhKk_yCJx(G3;^Rg=;fmnK%l#>v zGrc0k)B-@8yS3DOQKQEH+d!*FeW@`VgNz|gM0eiahm#}Vd3UN_k4-po^{GFxR^Mh@ z@qYUAqW(tn5SCWrWPW(65*#@av!`S?WYt?nvr?a>{~KZad~LD!q3sQ@o7<-M_rcdn z(b^zc3cFCAF);mPL=7_ z3+7L>L;06(u$aXAz}RNvO^cQ^iz6=g(KnkBYVuA?w-j(t-4C@A@*sL`}oBSRn54;I@8|Th=z_Q z!6lISVK@CTkv-+Kmu+3z>%TdSqE!N&&8o5+bI*V;B%h~+$2O$=WF~9s9~h;bbTfdbNZK5KZAy5m=6^!L2jxg;CANy-gl0x97HgRb3rE%<*{!8Lzq* z7v+VbtY-IgSahfG8bq50FqAiJ`HTqF81tyC*ej#35$qT1jYQN-RjBX-Wf4qpZ0#Pc z8$p?9XIxS({fP=m{4$Q?_sRw*UxId3(k-ssd`7N=<|Zq^W9Tzj$KNMOKb=|}&vxj~ zR~)f#@Y2NAu-3-u4f9Ae0WtCy+tJp2i?j2SmVU4;QXXT0=qE1qys3v{#@vBX3Q`w+ zil#UdAD^|Kl66?|L?K2`0=zExR`ZSMfUkpkh@J-V(h$Lrr#=Y-$-_V^?v!{>Hw0*A z9JbEDxuA^VU)eS^DY8*7Tt)Xni#An zL)*szjnj$6B0pV+KenzCx(Qj*EBdKZ5bl`;`q`k2y5msdV@Df=_luiS#80akVRISv zCbBVo&cmf%@9gT+ojN}>twiHVJTn-o817@`=K1v(Y7YC?D&s-^0tZYx*@0&l>Ld2vN_i%MT={7+9luCsiMQl2klX z|0-M%uW5%WqL)9EYfDztt3ZrRpgMrWO%S@KQ2Ehb+XeS#o*1;~F&0?)(cAd?#W-OUtrCNc1 zJI{`OJz{Qe6; zTk6K3J*+g= ztUy2^L7>o8ZG)x!@2KO)Sf`&n$8r{_8f%CgG6QpLUVKft(>`4#arLzkqSaZ9yotF^ z=UokciMPW{AAP`6q_ll}IsLZSDtw{!EZVV{T&@N4@R?s^)6v8+1WqO+!9BW2r1lHU zxhCu&jiL;(Fh?zM;R(5Y76bdvcnaI_Onv?u*@0v>{v!PTvmlZY5jS$m?+0%fp&Lw! zyF#R=-V7?=mZkq`SOtS@T^}VWH<{P4xg8aw^I!Ejf#A@r;5tu!{iXmdE zgRF^S9MO(b$Yuq)!}tf{f8c;pLB_%!!2lF)sxf~s9L0xQW+FH_`-^ytNA?gsd(11# zrUOFVsK6=t`DRQ_RxZj1t&+p#SZ_Eu2T4LBWI5h77`Ipaa_2|{KD@CxN(<&1)3gJz z{tgexFCEU1E_Z7YSo$h++;z5G5-)BhfR9)3+RG^0!&7FjY=!d>jwRse7h0As69?VW??>rh!Mp+6#{5&K z646L1jQaV9<4l`?^HJsjYCH(&u-a0+s}$7_zYcuWw%CPL??c^ZV6eb-IfQNpp`gt9 z025@(AYI?v!*2@TG>qkWb9f)7KpN#XDonnK{VIjY5If)DHBO$FWrRXE3DbJt9K=~1 zp zNtT%>Cu!j>?^nP$jw!!eCD@(z$C(cV9>fAh)XJp9gUV~wwAE(rUF2gw2R9=1Y7GH& zR%ROMOq((Sm{Hy450%cQL{k2m*15ZjanVBEEBfumRsRG5HyeejECYk#3Yx#72k}&r-Z~Ag@mf$W z7>|VzupDQhewATTm&BlNfm?w#-xdDOl({|&Y@rI zSk{fY9jR%us+~`9i`caTBn6OK+$BDM(>nfMBmQrk7HVW*{RJBoM^TW7wH=3xR{c|Uxa+=!$I=h(-L_N-1G95}VggkS=54LOoznPn>*E=hCN)won$ zkv5KTyatunb04$(CHRYlHjXnkAZ=fEX_O8FCn;dA)v~n6IG9Uyhi6YkWFNhDOoT}) zxs(jO7a(6rsoxgpNewCEfhrX(beh8X=%c5<3)7a*r#SKWYV)78DMK4ulaoXR*eB!P z$s!ElnUe`ReQIU_P4-mmRL#Y=D&RXG^)Tt8X`J#%6%wrkoJN!@7ULA)NZ)rV)BIE0 z#I#(#h(&3Ux12a^AE?q4Fm+uyJsf8zv9Famzh*8Zi};R)L(Lt*@cn$IK>n-jwfVMc zh%J=dF&)P5`U6CBZPo95Zg<2~sR0~#fS*Tb2UW|V)~)FxRiFLo0P`E#f)-P~dXrue z@4sp1moyfJHU9&W4hL2zBGeD9m8uOWO+M-yVY7+9M_(sL@LlN<@;Icn=G$5O+v|p5 zU3?ae#sycJ4_H`1zh7f=6b0zsQ=iXLJi_|p%kMDf^de#KeEYR`UI8%v?GbHo53 zqZa^|MvXo#lT_8iOi5`~SoOCY$5H5_f4-$2UvJm-5#EI`j-TPY3aO!sR&W$ZB6t4M zNEmz5dPUMWEK}Ptkq($zjgs)H-6)5x#FksrtUx{iVi4R^8ZyqME9@psr#2kN=Ob>j zALQZ8oT`Ii%p2!FUH3%|Tlc(NK%Dk9hAIDe^&RxvKGpI#?4e*B#N&1+oRy#90!0Bcvc7y!*!Dj3Iu3##ST+m~% z)T-)2mF_cjcAfJxWZx|xo9Gs|4N>*|(jPP#F6A&xY@|meW9Hvz$^m`Z+VA*Q{YoTRqWWZrMag#?miDmF_ znlh^NJeeXF+mHMcWfmJx5{aH~zOkBQ;nov;&=!l!=5talgLh4{%x>SRt%vcvvoxvo@R2% z@zCRch=M?zu31#y$?cNld+}%pt+wdkg1X>?`S^&m`!zp#e(`wCnd!yQ7?839*T@78 zs`H=T`7j#=KwL)a>AuI_Dvd8&4GJ7g;0&%SE>dYn){O# zuXD!)g;x{Gnfw;i;xv8?P+Z~X=`5ZDOGZ^&KS|r-IBW`P47ID^iOgQC*gw^5-9a^m zbU&pRjVr_ldo4Gh4EqwVaYMS)H%7(5wtVd~cL>2*)eN$07b0 zQfIzDT}0@*MNLp446))nuxIOC7(6Veamta;j!>`J9wtnI8lJj}{Ypm1u`KQ|y{Zs1 z_!7L13QZ6{QNrUJ$!g)<~MkZdw@Vg!E=8ci{WW_G12@%}HCN9NqzC)y|zURtPGG;9FK@V_8Z!*CW{&yT3JRbV^1Amg0eC zA&hfwdFD$@o|2i=XB{R4Z`0kw6;8t}S+m+f#_iAt+>|xa{&v(Gr;OUYLzzcN$o!{Q zwnWMTM_3ntn`gH%?y;(;NR}2O$vR)egUCj38D3U=1n8g{y!8I+&D8*~8(Zqef{bP) z{L8x@bDy`n+#}&p83|wW{77J2$7)SRg`gzWRnGOa{ciy>99T*Vm(#4mbpc|p!};qd zsD~Qc`Bcd@2T)C1|IrXgxPj2Oo3ccF#9PWPmr-C|wyN2t9o$N6Q#ult2wFop?W^ZZ zJbmunpPJ$Z2d8%vZ?AsIL+{c%Rw_%eY!nYw8Fiehu2Pa`ndH$mP6-dV!B6d9cd@77 zX<|HL?HZBa=Y>V>P0f~1;PNZ@=;`#)tU^DBz)G+g4#t_^H@F#!DG1B)WIv-6swuT639u0mu#=GuMsf`%aeLf@!?C_CUXRk3xbDooy#cMYd( zVoeeCcp*55m?F1t+GRF?)X<5&Xka>rZ zRmw_m!e(#8QZbEv(n`VlyQ=NMqZ=9e%b#+LwX=*%>*+fS$%h4`9KJDbeUy>M=a+Q zG(IWNZp%MQwJclswgCwtTh2B#6EwPX{ys8oN z(O*bln`vJ+GjTAD$%lgV^i`5{iDDyHq(p*daJEwe6G)F!B39(}5sblgVL%hfTcD5z zqy8CW!kb9I3hcOv(@>Xi-sN(kJ7T{^p8Pl_YTNbXn1x#`0R(g1(gi{hgl?trto&_ z-JNfnZG`+>O#mH1bjmA91zfdPeDh1W>=*vj%hy^!wiewuqFr`I*3(Y7obNB+HM<|B z5srxS5m|*$d+N2gHrLrh~a^pfgzNx!!qfAPN%vF6T zJyb{`4A?kC_Sc$h0M3YC_x)d^0f0>Ye|rQ);Gu+{en^xLGwlQcMVR?dT=X2@*4~*M z4z+E|Z6Wicz~1|RN@<(m#aK2Rqy`yn!G^V42GGC4s z1=QCgHdxjs5Q^!V{&q)BXlWy|0xQ(J&!rZo$tCGZxH6!wHX6ls-arpU0MY;S=!PQbx=EydU@J&OMB)GW2lE27$ND4b z&7=_4B(^{}SPcq#ZsXtXE3I|DytS5T>=WxV+euwl4**|$FKakyJ?9A(`&j?FH_cSO&3}3UP*=EWX_>s5zdX%vqaEP{u#<92gonYKk5Xq+6SHP{ifr;belE>( zF2-f*jbWSb97jaqiC2A#!`Pkfl-n<|_Ql6&9+%17noh1*bz;<<%3!c3IMqk zIz^=|Iq0iGBHneZuxz~lC3Ov6NBE}bD znXAr4`fx?(DVZkf`Ycf})9J#wtmCHLe1EBtX^-L)p2ORnj`K^5k|*yoN#kWn3*|6H z6GvAP@}yEBX2>fh2i^sqT!?T-Zt zwuH@Le8t87*GXAnd`)%E1`>(wtr5%wmO0S!q!dxA!j;IQ@r`ZR)BUOK)Ztb2x0UEyQkd9M zQ8-UfdRFOkj$K4Tg##Ek$9M9f?QXmN+k5<4TJU3*j$)iayRVm?*R19Y3vI$SON1** zX=$iDd!HZ%aU8mbh`HJTi%%AMvFh-bDciS2N{dZ3n73_loeT?l2=sLi86H6%%GO6t zMzw;>_sbJC5{t+Amz)bJj#y*yo{dufAlJy&!UKo=b-v1-OzKU0dB@e}X5UV)@Gh3y zq}^=nHlK|rgnj#U(xoC8$~KRC@1)ZV#Dq#QWb^0G%WKb1TsF#jl!P8=O+>xhzqHr? z@U=;Aq=nNQuA9=EXSX14@SC_{yU7CsdIhhdIGz^oe5+hdwz${;5KH{Vh>@PHDXK1G zVGC@y-RdlxPJi-4B-=ij;yP=t{K3VuTvMR9&Nr_>QKw#O$IpX(Z>CI#v)~6u4D)Td zu6FW+ytRPe{fB+gFA*!BV@c-=4TZ1lnB7vAQrW7U38Up8=;Fjux)F}(E76PZ5X>aQ zFQE^oH}v)kbpZ9u(Yg8riQ3LZSve572tHhFG_si~VUk38(W;V55KUh1l~E{DFWq`L zncKTD2D|~&!w*9~`fRpvBlQ7j-XlnyMsRLh(`4`FAEzyI;)$0$D) zSET*_mNx&tSu`j_@;@qAs}R#pAXD!B-JkKO0M%ZaU4<7EIs2yq6BzP^m-+d6>Ac+} zA^dxn;*|BtRrqe@9m^j8VJ+089MA&6xJv%ADtF7iM+BpH-Y;yoz@6!Q!yi=P9uOm; zG?YsaNP2T{9i~U0QqMAnZv;ZL*JQQD7Ne?gP@MbN!{}LyhM%QoQVuY)PG!jhLIqV4 zUDdx2){KoN3wlG@3$?=O?)es>kw<_kpwl*m!nHtR=9MvBh~hIGS;`)zbdD)MxYEv$ zm!pyl+fn?_HjBi%HR)+v(!RaO!b0s+fOSM0gec_#0#^wrVn4aQEomhQq2ipaPX-1E zj@D9W76Zua_Ehhl;zZqC)R^PZhb{>}nMEA6*87dtr|oOIkD{0$K!uk8w@AY(O+(-t zsxWErmoLcJZ@47qZm2YEFZQh1WA8n%`* zn0b6^baT9{)o-y<){dyQ@$XIAi5aG7I_~_-akGD;^b2%eePhPp&3M-JAkPi8kPZJ< zR|2ZGJz@KJ-aGHDk`vtMkr>EA_vCDyS+rQH&pCb{edZ>r|H}*_?{%NKG9u?`u9|j_ z`|Zxfv$kVg`$VIt@;PkhFLe^l)NL58i081=5tplR%d{{-Mpm+`FYOeqqLF(W4AYC%}M^({7&d@hJ3Muf3Pf2GyMG` zsm>-Phdsh=a8w`h5D z=XihHfxwVII;8c~vG|;1KG2e*S0H z`Q9Wta4dZdW25H*-7;yeUVi8dJwU#3pX95%ha{r>&LG2bHZz|3vZR66pNaVMNf|X- zmmS3#Z;cUT{Gy7NrxT6&2EXt`7B$`9aTG`L%ht)JP1e(dUiHR!!puv-jpV}jLmAB% zi2I5uzV9x4W?D`hqmh9w`ZtY;DeT@bj6@ra@}{!0ejDRhm#1-$W6lA7pT;qMpA%ss zkqhr`{+q+Y<3k@?_v^-56w1I?aCq@zI6;=Pj`$p_1%bRt12;1{P$6=UJqMmw$JF^N z9~xGj$?*JUO;o$my=?_H3XB_MzP;26YeKB?w~`WnI3#XkygduTwQ1`(z3fe}P&+&E zCT)HxB}%3E{cPv$s<(7l-Ah+GeQP$Ob*v#jR5#A5_jdW`HKRV^m594*r-5CZ*fpk- zV#|GzPmEy__jGg&Z-Xn13}eP8mDgZzf;_{>k}aP-r+}^7@zwXEXs@PWg=W(}n}!yd zS;EeW%{3o7AMevKbX!;2;S_}nf$dVPd-M)v0Z&hj{3c!O`Cn2rl8uHA>#pvd=0`lP z3G@drJNhtS?PtX9{K;{nT993@lqcw;mCu-*g114zXI&7Y?|xD&nnYPKK9XZ0qG!p- zfpEo3PLzI}sQ2s^rk=&kG0*8C^Kv;ScSi%gy_Se|w(!B$Z^MWg!NnHaiZ$dUKc>}q z`0(bgmAwwg%9LTi!`GU}NzvzDRpO7=t<{CuPs|UPD?WlmugYF~-9RVG(*k+zTMu$& zhVeV#n$`Ip3X=nQ11?r>B=AAd0I0v~AE(_GEmX^^(}ovvy?L!=?(35E0qE}droR&m z)c*GBsJ3)JYeMq-d0DvVRoL5;4v8x;X?-wGaKi-Dwa$K|IV*C-+5=77xKmgYXFycdE!c{YKB|EFQH*8WZ^lqo`Vqt_`1F`c~%=wIha>& zPcI|-)J|MKwPmzfs)d$q0Uxq?ZQ8z|(d{LFMSQB+=dJOi-M)I@mg95M)?6M{dh9$C zB-m^^vkpJz(%xz<*m{&TU~n>UfY`jJ-X}2Wq=7|V(WI|c3v(ywC*(Mc;b2};542oS2{RF9E55ZCVuUmpIyG8 z>RsjPH6l)G)t^@651sdE?~e&yaqvB$;e5Uk9X&@MH*PSp7r)pksot5=yw)xNT5d-62uGT#2h8&ciEf3x%(Ds5{Luy{Y?ysYJv z4G$8Uvuif@5-ZGJr%f?jH}op&jKcKv*;|(xr3xqAx48P7f0K?$W^qw#f6ng!3~Nnj zCj>Eys^gyWTn|u;x-TBtbv&LkPnW96JSou((92!1alQ4*pL2S6-u9S%6N(|>{keP8!Y1pVGa?T%(2s;P* zQCjU*w`oslzwI^6D8XqF$&2NpA$Q{SVtb>G^BH1(KluE{iHAYHN;c}c2bVSK(|P-@ zG(GkTw)$sVw4qZVnBV}xq<-EUx_sna8ZkCJ&Utj|0fE&3KIS`5!I8{4_c=u53sDQ8|i4DG(N`|RD$tDBFn z{kZvg)BePSw<#;WVYLUzvTBJ%plq`F=q*VX1Yg*4mnA>D#840dWv~@VGZHRlUsKw> zjIkewjH(!+`ZOlW0C77_bE#UYdrStSZNgs5yZ%>jn6D~W75Q3pan|(57#MAW|G7?K z`IeXs+?5`{T@oDoS@?Rjx{Bdy+jIQdo4fDYzwp1Y_nlEqZtJ>&0tzC!1W^&NAtKVG zDP3KHfb=FEM0yFmBqV?=!3L;wA_VCz1SttE0YO2fNev`{fJi7I2@sOdxpD7v_TA^~ zb;mh-+`sD&VAgE-Ic zdDkblfL)_L7IA+XnBDU8E!|&TXE^bw7-;!O$oG$WS=ElGRO5LBLO*&L@j_SbPQ{`k zrlPRt{NibkOUU*64&ZEC;9?2KGLLxhA5j}K-43?^n8mZhO>|BpQgmn1zH&TwE)^mk zIkf6Q?^BhU^ZpOFl8erJR|{TSz48U;(e~TQn1_uhm)-%6L&tUg>z^#aLoW_%RCbmB z&83t-&{}EFlq+Wy*T=H?e%Q2=@a7zrnB6r0u8-?cnd%`4G;rmq4O`<3!R=gvpN+Hu z?f0$Qp#fJb64fS;n=GwR&3E8}-!ed=e09 zrjr~%GI0f%Le?jQNqc&EinLcdBStDx4@(aZ)Cc=MJYBD(aH#dGW4D06-qI~|&HE%H zUM)WW;CMIsi8Z?V9b_*|<@?;e1MTLLsE0y}V|gM!`kIL*y^QBxagk0|Rq%*?J5=DD zCE_*jgXEGx{2&=y3-rk8=U!XTMZbOW;#eIba74i#{kYubD%!wCzq`oODSDXEb=i4N zpV#;%mieeG_LwLw91poxC`s+w>_O`Kmyzedue?iE$-{jJi?A?8@g6rn?jxIBy}SFZUguoH%jo{Kd;jn#V`K$A(jv&YUwKCyM(qh`%r)puocM2^@z=*Iz_^Q?0~NQgJ;LmfMxM|IW`QT4XIyHPs&Lgu za~5;8dTTI#3&w3?PfxCxo4bsxt2`rv@QP2Kd{4_*s(nq~ynHtLwQ9Sx%(RgzV4I=c z-6u&Nt&knw-y7y%>EwAw3uw0f(SA#m4MLjR*l5aQCP(F61;E7cF_gy4GLRt0h?>{OanS}Dwi z;Mr(me3S#QXO+zh3`Dm~=zK4D%XwUGb3j!0c9=o4LUq3Iq!&wHlTYe?U5+Jy1E7Wx zQ!HDx%Iy|!ZW-if%0I;$Z-9naUI=AiUnTI)y1pt&XuQJai`3%7V(9lA6Z3YwFCbYd z(+N~c4(c(Hl+C&~|7v)kZHz1b8f+|=b2)T}PPXVS%=L@)U2iDm zL0Qp=nk{%)tSb7-Y6D6uFpL|$W~S)}agVq^kpAMAqtrmoIf$SY(1pl zf=Lc(a(XBvUm8MjQaNB5muwJqa4L=Uf}98yb_VG=_q=@)B72ikQ%e-9L%*L|Z<{hz z*1=CZg!KBX%f~)xUDs*1t7OPyr-Iu=5VsCaDaCJ#gQkwgOrH^6scvh~?$h+-5!qo? z!e8!K2BTNjFAc0i~V zv~^^=$t{mm>GELoE_iY!&5aoz*Y53J5&jTP{vE%n@9Ah(!B=rEclqGqwh3a}HlzxF zB@|kvY=xXnoxNlNJr6%%EGIZ+c?Ywms85)bJ{y96Jw= zAWuFPU!7gF`WiuC74e(hDRu4x!P8(@mnu8N>CpN{(yIX>9 za|Fcf3`SBaeT(Y`&iXljo~Zj8Iue4P+CJx^o3huzKOC8>J^+X(=r+a5PbG8KxFgw~ zXfdHSphLfQx+&+zz$i(^q zuXgC#HPhSk{1lp&)cvTW^h{;wgxi=qc4`hdtIr!be3O^jxXMfWwz$R|r@R|eE}6GEZY@oc zV=?yKLWR{3ItO5B)CfJ53m!}|`Zga=+>y>rACTliZWh^qE06t_sRWh~KV)Osj!#BL zsBp)_B-Wr^omQt%ofZ zKcA4p@i-&V7T(00n+xd9+eR4atow1kuvc3Zf~Iqd)*3MtYVbMzX2%McB6OAfMeTiD~#V zMg@=dF)c`0Wk{9FLG=iHKw<4Y^A$wD>rxV6<%1E3cD=`8R=lD5q~@ph?fo^0T0;8n zHUe!xuXbz-JBRXi;Ifh)w~O<0^B3*oQS^Q2&<=$l!eBO&x97!22ZTaT8!bgeu@Or@ zx$vH1O#j1KZ#yi`Nnaa@=Gtz?!19uX)cV8iADt+SRrcTa)}O6}R5$?MUW!w9uMMP(~liP(leU|al46>p~qsqpJV(&9mq6+&WqdF)O=wl!JwYtOv zMlnJtTbsNij(>3L8`OijcPhtJFF9~L|jY0Yox+lR`jO_*H>3nPw03!7&uoz8gBC4CO+&t`f2WO$q4#ShDh}9X z?BnvNVo_`E!p{m~KLPJACC%D{QmycnPbP6&YUMn`W3ur9$M#)7W|lP5y73s^$n5c< z1-Ukb(xXi{Zm6-VF;bJ`DPrq-u&sM}R5@0J@jytp>YcJSqv&S94x$=WQ14dztswmo=U zXJ@P`SC(|Sm;wOo6w1z%MJq(xMQcl@gAS^n=sc%Yq2Q?UtPKEAOKWXd& z0(fLZ<^9xwd6WR;caUpk(e+W1%p;hM&TJN-Eke_(=P}N^G<^PzPqm{ZV&nr^x&`J< z{4U*T)wYg!SRH~4tA`Tci=Eq;CIUPU8-vu6(LvAzCB@^v{5Kp67Q z)nosvdTci2y^b$eUc*>)r#nEJC?2`Yg_V8-MBI7|qu9a6^*I1=>wLBB)hMP_@DjM9 z$=Tjo@};4|aWCsQasc?d24ja@&B~qY>^Lqw(b4eo}k(EIRe=u>Gz&$N$wb#elCbDl(()+6rY zB6F=mSVJQT!cCuFLUO>LrzM#}@&3gTsvJ+lxEWtg374JXS&S#HjVV;*nh;ty`QyrB zpG5iUE8_E>X4WI@a!(JH4LU;ymfP%xh8%@Uc<^=y&Zt_s^rp0BYuy!tZ#zwmd>rEf zRr{&+X6@zlj68Ke&nd*)tCUNb^j}*R^O1h%=jb6h4VSgydwS=;LOy@5bEd`P#uoZ6 zx0VymA&zc)F5>o39G*^8C%Dq|GYnJNu?~sjJh3vC26+dT>7S9z19;?l14DfF!AJVN z4L->oK5weWiD+HTYg)&(IJ}PXWuCU<*B|eJa*nGwdCqk_nF9+1xyCx!)62Wkg6yo@ zv8c*i@(;cN&n-%We?uX5G!nRQ56jtb8LbQXxH(_${@bgUIu-3!DXpH>;I+aL4Y+0? z-bxJ$x6Wy6c=>Th@V@?9wllZ$M!qJfRQAMqz#zxF4(sDl?g47gZ1FIar<)bh9= zUm-Sm6Z$QEePqawt77)ZJ{uMweLvZ+7&0*N@thmzxNqO@?e-SC5xNi`*~T^N;gGoi z53OnIe`hp-{$xvO*} zya%QA#VWhqRWi?J;_w>+Uj%gl=<#ZYaAiMwWmUK@KBdjnVuFPSOV{a?Hu>goU%K>K zqK5X_1p3B#FD)Hzl_00VdgbvwdD7l}syB2<&`Wjmr$^UfNlwwJJhs~Ugw@sjsoGlQ zk2y(JI0gVCDy^&My0_U1+xrWy_h=pMLicKf3{0&(bxHo)qp9^eN(+5%S2z4mh#VK> zUf#CJ-Z~GO6Wtt~4@i^$(q|HXK3eXh5O=0D_glTMYShaU;skc5vY@u|KRhYr{@>Woqs~;HgrNcXF{0zxWNIEiMc{x_J2KiLCGnrbmTT z>6g^7f}XqW_}@_mw&(vOiT~#{ESa1?ien+j)?13XJeTH4HRdJ^3?LgPQ}8x^v~gpDpfBzLO6+?7;W}A~lz4`5>pJMk?_t(h(~5 zaHaiE99`!1N8-Fo-gP|CwAxlN8_T$QnFAp6zE|VFBh-Jk@L%8A{r)#^D3Z7Loj_kJ z>)<2`E+8T7``Jq-)w7Q&8c$yA`S{>?mO)$qrBpuEBtTWiTt7Az^&8hOv;Jp!{Xaim zITa@!lkGxfGYs^f>drR16&C2`P&N@M?;Dzn@0I7*^fS7S*VU9ubaGU)+72&4@2WOk zl+iajbU4!YPKUt%PN?iog49?b7(|(+^@5!DytKAGS~p3h#NL6nJ#WZ9T@IDSE1%>z5a?`|L_t2un#sv^8W>7Tspq7US~#{jL^$j^kjp6%`102l5cqk zB&-Sjqs;mzKOTp?u!41t$&Mh3c~ zqJ4MxWM5}xa{QTD_erubFBkQ#mvVjGu;^1e=9Ab_8^>~`AM9yDuqaZKFo77-Klt%_ z^OufdSO3MHT>*_p$v3xtr+1EjD1J1cL4QC>%7opZ;EbHM;Lqg5W7f*g?0&WhFz<N}I%LS?rag7dNwv#lO0`6rD^lj)Z|`xSmRVD++s zW@8jAjFgLQ-**S!>hfy3?%xD?_%o_$AuJEc{qlMQF8Xu=J2APR@03Cxxr<{mC%%85 zUNR7N1~#B|vrX-?7-P1>4v?=+#cjKpZ^WY?@1gfyNxN^vH(-SMgDk?M=5X6yFyOiW z5B92+*~cX(UWxBl-r5`QG#VJSmX#KW5tN?BXWCU9)A&$D)GyC19G9uyEVV|=kC2?# z)^9y{g;kyBZyC{89Z@Xi(#Gu1D5Fv{uXJU;ICS%h^{4c#CJDU8Z+stP^$lDIjjN>Z zmr@iFXqArT(FozXc`z+48xrDV%Ds^ zKe2l>`>qc)ByP8`PC>KYd&2C_j#*CCK_r{f1JCkLI|~2 z{LzQ%Ya`|Lv~DbS{zI-KQBAkTqHt%gR}j4EE~nW=*)-SE>P-{PPl#OP_Cy2>qAL)g zJN&wiI~$H<&vTm>c7}{D*WxiM8!^!nG+_Spgrs<2Tf>OTUm#*oo1c|G^V_6UjBn4v5YlT+=8Iux_5_I6RskP z8)>t!|F1*>0M)=CRLKv(Yj8!x$6Ld9vWo z>m*+s7wJ>DnO696N=f%O-)v$GAUbE`J$vZgOF86yh1X#>(&JQOSJk(_rLcSAWJ7+s z?%EYuVs5{UMU@b|w{3~wAOInlo^(=DSxOmL|G3@W_Z;Xqn_y}F z#+ed)FI}$nCke9jwHxYPnp=`?Fr-nJE7wzFxrI#;ibH?~q|L(cTAob8e)r_i3h*)_5t;6~^>q`}DAg6wL`Wl_TP*@v--_K_qIq;hR}4@!&ec8$S6UJM}VsW0Mvx9ozDB`DxFZ%aGbNM+mR{-39 zDnKky!6Gnoh}uH+GFH3Ld9)W>UN{jt%WxBUd~HJ4?$Wdj4MNBuC*I4;q*u}HkmZ4d zi#?*oC^3vp@G(TZ5#p+W!9FWye;)YNOb5C& ze90*a_luwQV`hwBsn;e1;~hPAW5XN=d5;8W6S zNTL$5+cA95v~CaRD{;#EQ43}wEM>R1Gev9gag^NI@QjJbPDaj7zd)aKyTXUl#0)?B z)sjySij@ytyT$ZOHvwpZR%W_;*-EkqjCJz*$W^QB&75IOW!PWVnQAtAL-&Jw8yWQA z?7Dg*e*EIN=C`cclaJjrXudgg&pgkPEyAd8)f%Wt1;V+=6*JiuVB&;#xBptQT^OdG zuNO(RRW=Y2`=D$*@^?vz7~}Rvl~mg-rA_tkP8N+U+-|pTu(u4GwF>5Nm^^p763%B? zn{<3-OvASYo|lRDX`)wsHt3&;D$x^T^Wu*A5_pcA+#?tau0!yc_EO}wi_Yx9s1)sJ zlzvN8F|<BX0-Yw4?bq-Md4A;{|UGjOC0K)mOpYd!r=-*vwLJHBn!A8m@ee$6Gjy0W`}C zQOvnq*U#WMeC#2i+RN;tY@{ zIHz(v{ZXUzV0)Q}yoQHgZ)psye~CZ9*}zx-Sp7hEEDmd9pLItf>^$=MqcJc0uyu;z zp;jxT=mP`Fx%96}&eT>4LMF->{@5Ejd)n+PIO+M~7O7UcT0!!dhD?UDW*TPe$4L*Q ztu#$^okf()8g!2KhHNc*)_v*V%Q@q5HD)(r`KSeCBy`_gk} zBA2Fa2xzRAZ7x+RP=Dla2T#6odRoDqW-wGYfcA0%&MDJ3=L`D;E$UTY%GTyD4!JAT z-AS3dmi@riNt0`xXip1moAhx`U7><5O-7^xb{S`#2TR3~}iprR{IB+imuE#2b26FlYRoZPX`2 zhjWtY{cgZ&%!@^z+#f@Lw%veg3{zLNY_@g(hKPSnfP)GPnlaeM9Wl#~Ri^jT5#y~l z8h5KflQrzqa>|wPT5~BFaB!Q1r}eHQ{lMW@X}3-AlV|~r<+T7Wih5WMc87s=CSiEQ z6c89@!X|ZFytjbKS*OPe@K`L)P_i(|-S1dIv$W+?#;g0Z>*!h0YhNiFhFNpXGVZ5r zA74;yn|F?!XbnJF6k}Q#AMjW~DkyK?2{IQ(U#mp4RN29_L_xi-dzH3b{eaIx&6#`2 zkTa_!JV{7B$wYgb5Dmx-u?Ahhl*!<)h`<3J?}8G%n6+)-_{XPLt{76J5pAm(o9rd# z&iOEfYJ0-%oBaq2;_NI)39+}gQfT6kxr&pF*_^YfTe*{)>us|A4JTFl?nXdb1!Q|R zMf;W0tsRm*4HrW@9fqj#2R)t3Y}kM6IGdq|xYINo0HdMz-}~2Zd=cly41o76Y@(l!9tD|*gs4_qe zC`5mfI}f;Fq&Zg>)nmL5sp;0Fw7rx;KAUjaZRiuE;FQlu4hTf>w-L+UP~XWUW*=wa`$xtHqE9{&4~=B*L6GY~}bXIS>H z$|STF|BSh6W$pBlUABzL`(7ZzBA0CGFDCc(CMeFs&cH>9zBT5?&jF$IL#<#}epuw% znTS_ItTQFBe*ak@<4N+@jk~Sg70~q1LgE+aVKFoP z`XLirpL5XGsy#AL$SY&@#HHw{iF9o9z4iHXaHu^%JirOKX%#`W0_&>`T8^l_GG>+R zPX%=CZKSGqtxtkW7p8}ZGeLf~pa3V#c5Siipxoa0&7NMxY=6{LfznNl6k6k?b#d2% z59&>$?r#*RT1=TNd-GzNLMr;j`6fbOOQ%MQe3-LETdtZ|6x!&`{9I8ObzTT!oF)Hp zY*hRPZ34JwgUrK4#ceuFRwZf*n`lplR%6ag#&oQ*E0>#&U<7GgI5%O>Iz^+x1mjLN zfF*Acdc9M?biCV03G=j2ge;tNPE3`!0GI);hWU)5Lz8VB?HO;HC<-QFuai_Bmi`dY zyyqxjZO=+p*Iqa~r@L+7K57*sraHCUOlNvfltH@Qy#v}QA&i<6NZAtqpsq6>@^l;- z!5DKK?$vfKy|S4KSz&w~ENqk4#)){+cYFgPP6pfsQEv4qSx6?4ASw3R*E0L%#G^Of zpaVAguDg*Ho2Q178hujy^`9O_h=$eyo=cokHI$neb*!`w9;P+TJKvDQg+PynLNeS` z?WxMZYM(362MzNZmWxfbLpy^_2|e~-8_b7-wA8@6aNypoiH84_$maV&W;w9S0m=X9 z76@Vog6BQQHjJJ-IJ9(Ou-0l=Wv2ARe6_3He2Nw`t%aH8TYYOXyn?=Ry{>mzPv)nE zyN$>v5G^H?axDZOEy_io)ZIjC7;?`u(`hHV?&5|6q6gdzN@P70}ul z*Vye1>38s_Rl=KV8M4~UGSOuxyl%d)rQ?A2##_ghM&G(3-~5$#bNCjkgo{+ir&@cU zNXjW>$mYym9WEqOT$`d8knQU&&LAslI4{`gA_gs?P@HE%E=bq!O#wbUZ!3`S$BPP+ zKNvUzs#SNkb)BeH$Z`t(a10--{GOkEDXyZ^(sH8Hy9dF@wwo37YgDb0aJt{JVnT8Ni_&Lv~ZoyIQp%sSfM%F`FE*p!MgZEfK); zTzY^y&4v5e%`4##H)ai)+|Ok#L*sAB!hH@#8Q)qQAJ<7~+?fIJ2HJbZ%XGpXwGo4Q z8mbi(&XpOAeJ#y%uWp%Xqb|M+%)&@}Rs5>cQ;jun4^%1jgCD81Mn3jsx{Cz_0Re5R z1Vzf{ag@r#%d%3Zm+Egzj*CRoRnN4pEs-Z;ljM4<>UtkJ#cVzw7ZItrDL0HR-E>`k zsJb_(ckVHBJA!y+cu0UZ`WB?UJYbw%3OBLP+pmIDU+>neJX%=lk&~Znmhnc8`$$G~ zFr#+nj}Dsv0-E=NMT$m7_XW4h^E2wpY#dG4j#D zg}~wo_e==&p?9gR7fL^KyXa=lqlhtz_UucfKx%SoGMbc#9wVEUeDp%i3eAoQa%_ax zd{YYu=2dE0@3=5iWJnzKD(E^1qc&5TKe>FyY_BV_@FF)1SV!Bw*5Y74UmwAud%-EG8WV(sX#mxY1a+VuK3kJ+R#WqbdY$|!ba zv3s@HMDuyn)azkN-&Pmh%vR6YWI|94`gyGl(Xj(&q&&Y;d*t%Y3A=J*xYLPnN(+qd z)AQs4m({`)jmGPd$~oQpjw1{gco=PA)0NsZABvUcy|E@mEy)q9QQI0%eMFY3psi0m zviODRScqdbw#b0qH@haG9NIw)TDJfB70mXsD{~SQ;hc5_HpnQ9&7*8DFFq+M$QF7K z(vxI!>bD1JVU+w@a#w;_jz0Sc-BJwUYjNU8K~xwV)bT+tZB(;UV`f!x;8%=;INA0< z!{CDbSn>JY`rT3_$E1=taO$3}s>kfxY5?~*v=xd^WEGU>xaq7DPUn{Fw`3rX7+rFJ zz$G0KG*J5t^J50j&UFJ=g)|Jf7Gxw=BJEzb(!;M3$#_*apvnf5{Fk$uWe1fX*n#4b zX${DePZdq1Qv<$G>t|I+Uplvxj|3dUyLT);C60P97I~5cXq(|r10&a4NGG*4-b6sZ zs3k~0oL+l5kOclNsX_e)Y4P7{GCtO0Lyl@Fqg3aB4SNRPVJYt|#v2U7V0L}QHEuoxfnND>3gw=It|#aq7< zH2tkle9y5j{4^E6_s)*oU?Ic!>XUNf{oVELR=(>ecFVsK2a(hoeb@cNafvE zon^YklEgKy=nhOqkp*(B?WO|vt#enubZ%!;mK*x`X%*wg=YKuvm@IuOm0gwxE^fE zq7E&4=6LJUndE_tJ*M*zl2X4_Bkl~lH!4}X)L_f1C@dMBItV9?ivvG_1z1us{YU9S zR?NVhAR@V31~ulp-8jb=Q&|!)+YLxoDJa0LM4#y#_^!agmBN(1o7+EIOnhG39P}_ZM?B&=7n(L`A*{9d{N-1_&b+)+ zbbXVTj$xZMy}H3LVhm*GYpDkfEW(~05kc=9Ss#&jYY)Z>xQJMl2#cF z(LRD_=@e8o_7vB1DeqQaMDtZ73RVBY*P5%-h_F{WeM9-mMiHL`V?iFWgFtrIm@%a$ zj|999xV%u}R6j4Me8*w0&B--^7!(hxAe!{VEG0BvE7F3u4y}<$WW-8L2AEopZ=3x5 zE?}y4Zz$DWRX@5|1s7Rz^g7+1V?pqwRycb3`U z?4xZ>bHMc8XzAlddjo^YpY1{>`9UpWI-v)*`mnI){IAVcioa2Z-}BHsk7=zC_XdVa z#}?mCT8AAA;=g`rD!O|agWc0jca{(>O*83s1KK*n-xb_`@`z_lKD0IgsE+5S`JA=ht!Ip6pRHQ|RR4DQR91{u#4tJ4M7K%j z$}ML$Gea7mH3?GLyEtXi8RJ8=TuIKmZeiZ^IKNa*xtCR6;EHPt6FUOZ_NKU*z_&{? zD#sY5A*r&Ew2oOszk<(YGjl-pynyB;cwpPPG1n=4)l_l0qbKQXLxbkhPQ`)M0e{2| z$fS2OdhB7}RGG+Gk*a*?P|NZMXK#BO2&1;ljm>H8`fJsrm&&?rB`7epyn8UM(VF@Y zuZNzdEOOmX(4`XE)Gk3MqE79~;k)zg z5-o7^3K1We`K)y#e7oE^a8*cF$1%niUDZalgS=Mv*lG_`9^X!AXIGWqw+JEJ_^nTQ zvRAE4pcV7?M=x|hXO=!QaFw#vWh`4EOwR622dq!54A@*T;eb z@2;QxGZQu0tLJr9KZo(3H-rz{12DKM)S$Jjy>+|&3C44~6fh)e-u$DdZGTk~@B1`{6OnGhm|#QS z4Ik;^8Jttt51NdMz2i0}gSP@oCz(>Ci$8C+X(pC=N;A~Xh^43y{Vo+ICjS7g@MBYe z1>*tTnuJZgZimqmqgj>Y<1ov_!MnPH`5Nnc&GU7H?VZ;VmziY7hGq&TFV*HvHW3o1ywE z&T#*eIhKt(Y5}|cc&ibW{GzQl8go@5Uk&W$*>gaS-;?pqNAER{J0))Z#9MVG?SGE; zL6q(?oq-4D&P7D+S#&L(p1 zN*j&(CC*?RKQ&mu*>ODigst)3`_-qv#nRMv%4Q9*CP7g@Aj1vVgUoa1t~7*{^*Q4Wr$RqQ zd5#ILTmP=9>kelR`t$yy`%_~`MBY|>Nnu)sDxuBF(-MGRioDApRx>e)Qw6iMeJ{Ia zlR?jIc;6{JdnBmz)>fU`gTpk%B^minTH}YWVYZD9_XsWK*+b@y;O>T~5g^q7iV{C1 z8*+kD2mvoee^}Ker{gv(VKBGkW8wRXr1W`lMqLE#eLSSBNNZWr(h_{p4l%kejkXcZ z8LRzOin7G?VKZ7n5u$dM6R9Lx3?h@w2vt*dZ%BZxEl`8?IfY4OWh$)&8NVvfvfg|3 zwyzA?O4n=dt7ZZ)Pcs5)lEV16`XoilhUC{g8uztHxli57cLDiBkBd%r58fr%wFL%cd#>6-$+)H#gVK^W2*eQ!dV)4>RT$J@e%F zQD$TAojc<1>ySyC!cHZ(=U$eu8xqj2A3tY=4cGCP41VNCLW5MRx($4l&oEamnWTOD z@E|3RUsPRgB5*V&!0Y>sjrBe@@xENUjVX@yg`=9pEt%d2v(BoJ(QZ=2@<- zZeVzKpg~1;E^29U{#(MfcJp=g+a9-rFv8wNb&<4Lo)!}jy~4Hapn#l_VUPd83q5NK zDfJYyPG#eVSMa98E`VB<@mPNT@E&(*ZvK}tAD%$5(R{L_F3!b260Ej}SmzY)g;uXw zWc}|bD*{rq8p@vVRQb{AkIan4vb}>jy%!zCnyw>%^71no-}qG?mZxEzHdOrTI;q9% z>(HkSIt!d%jMD~gl3Ryb*~1Kq7p@bCYEByzGZ-#o_ASK@m1Z)`pUUI_&CDkFI@hWK z&ere$q2H|mAo%7QiR|sQ1ZgHjtwp0WvFYD}2+!lq^`2Ab)l9}k!ug5&D|fv$KtZE{ z%SYl3hDV~#J9rkqV!lML+I}l_O9s39#dp~ZF)QdVnR>LS&sNxG*U=qMSdMRv>H&k{ zp%0@|BiD=!ASj{&LG&Pd_9|Z(;iaL9SF@4~-q3bp(`eJZb~}$JZS5U|)D)B&)Yh>6 zq4e3V1(#g#?!*+zz*?}>~Sgc(ULsSHWs50-{jQ81W~0$7!d`sDikO@Gt*wuMy! zH&N-G^5ch6DU`{2Kdrr#zlGd~MTP>ZGWZs^cz+lF8da~!%b^Y{ry0qOqJ>H5lNz)r%-)Ek-M3NWn_q`hT(~Z`5s=OE=Fx<6n`< zr788zJ4ZLU7LSg}q!YPqHf^mK*%9ozHO#&ekVq~cqX*xFH-=R=zb2N{_l-|+_l&`+ z&lszRl=Yk(JE%PA#XE21+oVh>NCwX|nGYXDq$-K~*I_W_%`N>{Hcx&_LoI+ne4nnZ zbxh0c{)BSQQj+RawEf~nK0{DbcFS8hO!J}Tq`HRo!oy17t7@3vv|YS**OrM)JlM$C zTfIJMo0_#u?mJuE{8StH^&@7>P&-p%G|qaF4Fakqo`d+Ms^wPCcW5++zcifL8bnr+ zc|=2YqwdUlwHzUDk>Av-QwN|aU(Reme~G||D)ujY#Y)-W`0I5*EIa-unK5t z@h(AY0mTQZGcUAB4^93ax^6PR3!;K+mT~tPYX_gGs~1&T}modN^g~S}A3sxk^-@yxL({eI2gnh|nWP z&IGX%paH(Mep~OK(R~K-j!t^;o{Z*zl+Q1B7Vw#GnIY~2_MS-()R+TR4Ex_5CBwTu zrrfKF54;shsA$M7eD=56wL->}a$W?00mtK%I@R9b4C+(1-7=Q$Hx#wvT^j*dAcgJy z&3kfLr_G+5NwAa)!aD5mhA(d(v#G86R;d>DFmYtH2b&n`+zs&Es^?HFPetA4cgJ2i zs~0v-musw_d7}plWJwnIH$b+SA4yk+$08w(TBEX+^m?X$gJC7*{_N8z%l$GY#@dXL zbun49<8aFJpm}PQn)~6LU-2$%z%Z}*7%v%(Ohj`}+I*K6g<6#kF{`(QU5CI0HDJAx zK|$HfWJUv_<%O|2l@_R-#BO2;qP~E7uA>68cNt)rzePER3)&46zM`8<#!Bn^T0tvu z*#fN-W4^vk?3i}ygQ!*Ta!NIEf%x~J7CKQ#PuXsHOFrEO78=a3A87y+Zbax8n<8s_n--=((6(v|X7Hy>dp)sO!#>iU; zTg96Y@Oq>DqSbgRCy%npq{9l?QdPgBNaRp+*-#)C?X$#WGAOHJufM@uv$eT?kM z-ezWjAj-2mABapnF>?q*T(ppi;CzeM5ob}<3_mtoj}L)iTr0>ow%Z_ z8-3hNUjzhP`|=^@vYxg!zQ_K@Wk2kGar8IDtdr8B2ZA%GA%G2J(m&ObpE#=)DvOHt z{2#{NJDTnPeIGAv)hMb~t!Qhtw%WU`s-mG(?NL=F)ZQzqR#j2dp0zh6h&@siwMSG! z5PQTH5{Ym6dcEJ@@8@%V|NPE*{uw9ddE&nA>%Ok*aX%9DwYdX6&!|RQ1}B+c5|GEC z@(~*jZ8Cx_@@t116DnSY%~aJN66|@tZ9Zz&EGIm_AbA-C*Q46qX1~qGANCJFJSfUi zgDHd-x&3y#oWaQmOCu?cunoQ-lpi*@#Wn$mJToON-K=gP<$VxvNtv|FVKi~Iy@(CC zxUbSo7dTIH{aq?+H%@O-tl%~_UOt*rzzXI7JWKdi=Hl|Vnnk7>VaEnre)5@yo>`6^ zG#uqlyQWd)z3q?lAt;ctL-QEZaQU=5tKYVnbB|@!c6rqg>m_0pF62b@>XGC8|3GP_ zvbC ziL-JSckX{ld&~6fsI>XDRQQ$#{Ki;hLm)V2tGIA0(jk#NByI5&Y+2bs(suWA zVnJgCjs~h+7ID0MJ6kF_4W;&3D*orc6^uyxGfMt5hd_2u9a*o&@$+3}^@~H?tSbI5 zyawtyF`6d3p1lH3btN_g)H|KpPSS2TBmgLn;FGh}iADK4aMfq|C!g0<3@4BCk{{8- z&aOF4t|l4;PkJ>}JNDnX|Dhm3;e{R|ELm+0&;izA3v2*?2A(916b8akzK$l@dr%Ia ziPn;Tp|dZ!)mbbQcnaI4alfu{dut(X^_y?~_#MdQB@Ttg%7i;I?uevij(H|zd^Og?4z(be_}o+9f$dwO~P z{m5XUx5diD8M6IOiGj?8x-G<||9(dDti2FWeahyu!b!>bqJ#tnnPph~MKsuSew~g+ZyIZN zW(b~YYO>C^r^q6v{1V@PPQ}{R%+<)U0SMNIl{n~m^Rsf9l}gTgqg^Q?qU%DL9UrBb zMsv^Ilp6|Onb!&4S_I}nGho*8M@;E1OhzA!j+Pe!F7!?Em1FKNJk}j|Wb+Kk&A5hh zwt}+`cL&i2AaUzx-Sq3&|AOOu(fGJDoZunJgV_k0yROgX<)Oq*$&}?RC8th;#a00a z)ga|ZEGH!k!St$xtIhtwpZ>~TgBjDGD%`vzmWzvDm)UC}0AAm#J0A>b$hIdq8=rx2 zmM_0`q+8B+b4YET-MP!xkTp+u|}`(`)6Jfs|?*{AE`wlW2m?Qh;C zi)>n)Ekb_`?Piz^h{~DfOR?ah8#uPmJ-%nGIWf8~RDMk&3x0m?iPYAjPSsH~a!^8* z7dm(LNJH=I{!f$}6c!0bln(Q3KH?F%--)Q+g}#0hQ*M*w9}LuXKpsu#(Z`N`^QNQS zx-RIUpWB%8j$vz5S7_&@Y{2=04b4^igOC|&>Pgj=S2ygXN{iW5v!EQ$NMJU5W4-uQ zzsypl^vHT*2iTuDfn6{E&}OPX|5wlvw3?MDnBPP!`j;KIa_R^*4^M@bp)6~wY?n8- z8rU@^Vv1ZX>yul4+2qha8;|LMd^0pETjtD`p;r}#jLT$s2_LM<{VCTN5SQ-z$MPUS z*SvOZ0{z=*H~HB?Z#n021^v!sKutm%;KevjOM6oq?t*t~i;dXQ=&}U$yOjIi$WL$1 z+Z3i=e(86_wgZw=aM>~2oHB;HS?@aSwyD-aojMVU2d>L7HvHXFHFS`+EzuV#$ceTRtLIZxK(R=Yk#1o&1G+a&E&-to*jx1j44H zv#`7^P>uctNO7ve{w)XOTaBt*G>KI< z2A0auf^3iFviV&Y6Tk=UoQ}77RS6GMck%J#GCOEi_5|WEh?J!h4^^Fch4PV{leBs; zp}|7iLcod0ZunDyG9`c7JN9l|vB=zXc@Pm3fYIfJHx0sRkBY)cKgN!DvkTmPXL-=f zF-U)+Kj6#W;qSvVYpFCu`~J1TtkgOytJPkPSD@odDu|%)xze zet#4%r`Wc^!!u?B27WyIN$pwD4&y9IP@x=e8KVufvYBlz+yg(5YwO4Z+3tBLmx7bmNcvw2;W@*UaP2B4was~ ztR_V(zniD8Zomw!U^?+Em=7$|^b@?Muz?43<>~J64o#paU;S*iyu(uTkJ!%>XgckK z=g?8aDPLcY%HBI}tUlGSDnz%C8P$aQ%Z&-qBpK;?K1tm56DbYpGg4S$AL*ukX&Psc zZs*JMk~P6vzcahz#Z=;56V2hp2@_n)>^JO_EFYY|dPQ(;3%og_Cw7&Cw_ay*Jt3(( z!uQoQ>|2BKCP=Cy=H83DLg8(MIq;gY$)Akt+~D2Ig=ZkMxRcS*w_&N=-790ntmN+3 zpY2_~sPAGOo+lJGwAkBZpgICWqjuF7KIQ8Q3z3$~f`fia>wv6|Va$}7lZtwOo|K>G zCqy*6s=}{on~raNAv|~zP&C=T7%H1+d7nwgY-^O8H80t^yL433Ry_cukixe(IKX9vDw-2c*pgx|C($hSGlfWbh!Y{Tu>PY&RyVmkHaE)9hH;`%SV%Xqp7X(*Zh6%pzZ!~(hKA4W45{~ntgo1 zpW1a)hSfqiz+4;FU$qkbZ|PNPG9dPv}2WWnjD$ z3AJ}8}2 zTZwbbAKTN~e5g+PdkNmz-hu}!z(qG%|B2RL|Bv8|*!%}F!2f}a2C%dxgI5;h%+Z+P zuM}!Zl0vipgBj=-)38s()bfYKf8YYy=-B2xU^ZWYD=13D-ir6h9!ObhkI4C#*>G63 zSLj|X@Ury$L%PtK{!Ak@wc8)k03?BYO85M3l5X|S=i1E}eU*!mMikENT;2qByr2a! zSIN1!&Yk)y^0Fp<_v!hVZig-iJT+}2HFTF0@sW`5$4s}5Z+>G36#a#dEHCTL%c2)0ZlaSLq*ceI@9OH1ur4L66E~%^T{tFwQ z-)B$)qi14C;pYHE!H8?l=N3AV=55Ut%HZl{g@{H9Bi{}WsqR-%+esbI6BmG;kd#;h@>DkY$BlIeVq0ZFci_~uGvA=#HkFJ`5Ty@Rk zJgjSE|MAjEN5s-P>H0LzJlm7DTVWOvakxNF(1V;9c8l7m`I>{Yz(Ss?ePLlM^^03T^Z+B~Dz_)$Gzk|X_NHB;_-fatei^SJlvu_(I9 z(VSvzIE2x6_Qbd6)kSNBF>5&ppC1vfOwH?cKYKYBHO2F3*V%NiTELPCnL4GWFE`}s zH_v@$EmzWy-Rmz|-|~$nHKv8q_3{9(#K(hM0b3KrL`&-=uYrLI4E(r-$k*nrp?iu~ z+Vp#d|Lh$2ic!f}=I|GHlGNm@+8jq2B8A}*bHwIYw^&AOj|n0_0{_8WddVPNwEd`T z{5+2=-fd$1Jn+mDIhZjiUgnV${s{H*d;AcGLi*n}eq!|06uO>>Z9Gbx~ zw$H@7nPLrBJ`v(T-h_uHO}D`g)v{M6B)YDhILQ0VfJP{;rWYO?Gv74_+xig{Ig^j( zxI3ge>~+T%as9K&FnH+GYU{*RE+Qb=LU+w5quU6nfjx$THYrNzA+aY33;@-`Oq5W+ z)pET)-ytpS(d`&=qCw=SbeQwx8A0g&dQF1O2UNpS)e1oxgsiFgLJWY4saLPTRz^Is z&5H1w&hq(Nh!LT!iIku;fO1_eKb;uPe@yj=U)Kq1a&f-Z0mDkv!#c8(LtQ^tUuL}--hDbS_HpzIdv>FLHF5f1NRBzrPmWxf{ zkD#0;J89`n9jj3dB+oJ1D>qOj{4f@vYt~#TJ5v90M$r(tKOkl_Z8yJDGajV4W2vs6 zCRnNKppeALld#G`P#y`QRn_71>V}+*-;IN{zxN~E6;oD?UY}_E9)T7d{f+%rp+x;P zdaG$?hLN3n8#@uOUuXpDmpZHZAY}!gl(UYcOfL?eF@zs7Q1z<f;P+G0_$DaMPT7wN7`XMBXTtEzjgY zvI)B+-KnuXTC^p{W!8=lE&;|5yyVt&927a2RdM>02RoQ_>h?~Cq8&Zcafp_6&cx}b z-|YNBPQGr3IyjM$TAW)3zWuT99J@~i>3-@QkduxIeGa8mBWbage0crKt%c%XlWH<* zfi$nj*AF)XX4T)G4!^ZZZq3;oX!?sxH%?5YN!GFZ&{J8bpg;K`H%SVVl{ONhj#+^f$DIS;wKbY+vU(FYR`yoaXTlru7SIW*O}!RM;mk!9 ziH@`Cp_Y-B_PXzNtq?m^wOW#H6KX3_y0^$#7GJ)h?LX3?RdUGq zzq3NAEIC>MMG=hMp?^;l<5Wa7UGTG2TMRN>ps(^oN&6 z;A*(k8I4oh$DU$W?x|X#jR~<}-~7&Nk3kBW6jnjfMP1l}jpFaRSC>oR;uZ!gZHt?O zx3DIfY8HI>rq!g$8x=+jVS@(!KGMFg(kII^a{5TekA^udx}Fo|O3{}3`f;=h?>01F z?x3w+NqoNw&M{`uYzF@2Rc&2|sxI>sH#UgtTV3=f2ol2e=&neygIc&*Q`o zB?~SeL8t`m*2DKK4oye=S-_#;X0ned4bD0%#S@F#M-nuaa6;`DZi&3@b5J}a=o6G^ zor?D=?a1QKlA?3ld9eMnDf~f%QYh89R4?;*WJ`P0;b0>NE2eczg|en4I>Jbt>?bJW3sm5I(cUf}X8!M)W8E)LNF=`y~{ z=~bg0S~i~Nn~n)1ror!iBFByw`GJEQ1S@L9o z7X83Bd|$7Le`G(8gP+zc*)eq*D;$(7yz|&RSaVRcw;VL+eCq=0FFWEby&ISkZUc-# zz)o=^9f<2aEQ*{`or(xC~L+G5{)nyg|CjCGL;)HKSH9X(+HMSIa)=!U0EW_SFc zBfgG#f+JaM%`M-YtDLQzUI;k9XkrtYqw&~#3GNq zPPJfanl3PVDAztatJNi;Dl|L1>-3~$_pHWRBzbi;G$va^HR?MeSkJuvt(N4maPDRZ3N7kDAG-85IU?60xxnI?!WIQ zC-(I8(d#0w=*+JGz+1y)+b0tTc{I_qjqjPafA?L<^}eh3)LQ)6M3jBw_1Tp9;9Ixf zJfseJmcxblaVcQ2C*@B_0T#505N7l;8IUKxA`>s!xpUG_&-gUovn$t9XjDWxEtv7>WNqRD45A#|EjX z4;icK+Hb||-$>7fzOfTOcodhfD_HXMl~OfTh-}yoq?9K*}e+|T;K z8pgF`fDuDO*W)>#YUf~9432+T4CQPNE!V?#1#=pOwE74=My{-kr4FtpH$>lw_Ki3DXP&_Do*RuIbU1`CG|oR>((8i zy}+D;9;Ka>yWC0ni$k=ESLz0&)9GJm6iFNZdJGNv4zD=2W5GKI)^8WK;cNf6;uU}3st;m z7N>4q8OynnSm6~mBC)M9)Eog$DbzLQKy%RA%}h;+<{$gb3dLR=0dpjA6li{EqNlJ} zyU*HzIjzh3VG*tNw>DvdbK`JH0XMoiQDZLAK%n=S`}`=XE~9XVC`8>F@`}$XWkg|n z9yqA9s(~J#I7SU6>e=#P{EUNdVNwt|qhP1Z9KGH1dWRIyc;-h%qZLQDSC%H5U;_VU zxyPT!=tc!^*e)q>`FW)xC8sAcZDMOSyYDSgV^-23w9>I|9<2Ao7kVYq#oCm5 zsJWr0fZInOUHPb5t@Bi4+@$wy$K=)l;nY+6ph2E(K9Q0_D%pec^6EB|TPg>=XRWFVhW26j?QMS! zYcNacMOeft$!sSYjMP4#w7tK!r{r*Vh8Y<)ixWO$@UCN*y--VhnBew`BY*yomZG9L z^@CLBW7%0IPTlB(kN&S{Hywe+B`1|Kn*dtwDw9Dw>2a~aOEa_qGcK|@c>qlt&qnUq zcY7~F`vr_9?v8UsPo}=@a&yOwg|741o8k#qp{($T-eWc?VK;aM=+>IyjH0mjUFS$H zH%Jg|mw!yT8UMan&endeU=4HQ>-2xSwlRJ``ae}J(q}SZdiFhMH6tD4QmiRJn~#`2 z(};oTWSW)rllxJT7i;#;2{Rb3qR$WJVrhJ&H&x)loOYj@ubRRmGwTnvAlA z=K@M$Dkw#p6F^kQzFOYR?#t#3L}NLSN1B^CM*p;KLWX;c!`=rLb4V%Zy3Dr^8LHrc zzrB0+gjtA7i0^FcgtlwtbiSB1w~1p`aLzsU2!75eL%cR9?Dfz9@ku2t8LhO+pRlyM z7NT`!9$BNom4U&tdgrY9GN3Q5L|r|uKy&UL0+Ur<0Q|^_j(>VuHD(yx!ME};=*>AL z#8m5{1xqX#SuX92B~Z1%B^)dmeJ5&?C*Wf)b>?+NkIYqQ4`m&)VU>voCAy0~P9w*- zeXG=~^9P%{A2s<%#cr@WQQ=rJCmhs0%Zal0o!eUY&hwENbK)^>ImZ7;DA{pT%%>w5 zpXD-6_>5~bXQOvqVbvzTWKFyYNWMl4gP!f=^`Ljf&kX1RL zd=>$lEhM${DlYD>&JfGf^hED-PO(RO&kc z2QFbIe$%uZ3j2*P+zCp2j^wl)Z^YoG+`M*-JS-mdRr>vwZ?ax>xmm*hx33qNk@`B4 zOkBZH+IrlR&QvXW+RRI_mbfgx5 zg8j}7tLJ$w?{Htz$Rdr3S}h-<%gh0ON1AYlv0bQ+h9y^#t{uV8Q^>nPfQ@pi>GCUA zF?fMf34`cLczC~Dof@L8(jvI)X~!6qCREmL*gS~SPZ{3|4negq=$L~&S=$DbW#k^b zux;(s%S`A8n<>U zlc(dcI5qaP9m%8lQtSH*s0jJz|2w`^<7%y*_Cof+Ik1yV;l0&{9F1(qV3 z*+Xq8@1p_fH-hqK86xRgrm3$u&%AbYWOaV>|2jMDhOFiiBBn*xEH)7r+0N}o?jk*h z(n-`|Cqlj1yqA!hK?5d_Odhj6e}D;NKpq1#tw_Ay)hHixNbuQp`w5Vuvu`7hscP>Z zkHDiM=2|McWq3|e3#qQOIjHQlG*!|Z*s`NGz6{}p*Wl?@PD>W3OX?kiUE`(S8@y1N z`Sh)K{T&K-1b}%`J1TK>K)AC>Cl+Q5LQ=%wY6u5WW;&PyGExb>xSB zX2-f@S<%Da*Is+P{uaVwWNQQrkv+{>Lq0J-hb@^#b!KpQyY_r^k>}st&+F~qHA-fb z2~7NNA8)c{<$w=JyB#}c?>Nk#iPH4R$@s-0(C1@53LQ-R#<||44n74&N{a@e+b4WL zI0c7Bq+`0&FZMW60}qoJ!5N^bfhuyzG2hAwL0poxBLM z{A2i=fj|T5;>Dmi=ul;K;CM2z#{0fDg5Yz**F(}cy|>8})^pNf=WE&(E~ug`uiXQoh^x`D#@}KQZ5;58Uv+iYoHRcPQKY4=$4O%1 z>rQ)h^Pw&T9}v64^Ys$%2iy9eB|X-NiZ?!M@$5x#4QoEVmSlXzL;BJ}3-o;5f%4~A zkXO$`$+XoiVe&xm>A&^5Da)^N!@Gbg%vu_ddg>md$Z&E_BX%_uro^xF)Lc<;k5%J! zk_~e2k+0D3+xu&-0uFa?Me7+~BA4V0H*^Z^c>?6Nx+M48rNqw&UxE$vc(#_m9q^NW zMQIq?O`Sy9x?+L-8E%47|F<^7m1wOdr-q70D;6E*uH$=|$+6*!5@}_Ai~Uq1VL*H8w`dfn%?sGzabL)X!_D+tO03T3CpvMArf!nIS zI`u4K==u$6!_a=up$rg?dmUk8B-lPi`+Sq7${A;1wlKteNdY$#+|)%$h>_sM$)9?J z$0C8(h(%w;BWps`(jKKwNtJei{Ytu<7wclI=cvhpYmT^Um-+|Hza50WZ) zW~ff(RfnTTl@To0$jJV+^-bTwI#ntbKRAP{lf7(?N# zPxn-gnrHn8T6xtTz52m(%wq27>O9@nxa1x^;ed1jXI)3l2k`#IOm6su1Cr?SBj&V4 z2`904XQWx6ROjc$!-3tt9PsBaxf%0JzIfq;m&j;ngAhhBXsyV$@G#$1vTC;kyeYj) ztZC-(irLa#mX;;!uqe{>pl0$NY;Y6MCVu{hdvhnDy0eSZSgTe!^$IK9r_VRf{!1&S=~5nP&eFmj}i}yQ(E+YdB9=ImH1HZ&46lq?oaohm!E$QVICLKvcHw{<_BBir2*#S zY!^>E@z~U8Ny_pfStRW4i`3HoW`XL1G#?x&ZN zo-c(>jER;T0!~eUA&sr$FW3s2@|?OQ)<1FZ%hv1OH1yuTesVjtGO_W5(hL3co90WD zT|bW-fm!wXe{?jdm0Ku*WBMS~r7QCe&Hrd==bAh2nP#sibM>?{;$B4dQe`yH&VAio zsB&34KiJ_9SFCP!S^aHt)t+-_8tohKABewnB)Z}Z*kNrEt zP?}fPv-golr45}N-K7MhqpX_#djq)^Y>{j87i8wURDQ5B{KDeG5}ZjMv@YA0TOvvFa2 z_Bb4Rz$vD(H)``J>yi)pI_~Q;M`8PREHU!Cp`+RYt!a#x4n2sek%7SmU{Cgx{gk{` z^Vr$)pXToLr>x&Czbn5V9($&IZ| z5qVaXZZ9y(R&%q+HoE9*s`CE>6r9rj?>p~3@Bi5Q5}w_eM()x>?&1fr7?8(o7V(XL zPdL*%Wt7d{gT)&K%f%6&Sb8vi%Kt}#i|I5=ZbsQ{qXg>H{37SDs@wBi6Z}J1yM8N8 zk0w)DqX4eS7?0U@VEyXfk=+BGaHQyDK9%KACA2KRRasIhOiNgrq3bR^C01#}d?>vzeQ0DR}< zB4mPJDFR6OHxEs@BQh2agV3y6R0~Y?*lWzI`={n3T!}?VK#b%+$npHf-k3)lgN$%$ z2eH>IB$s>Uk(_>g)81^N2{@(pfV1s~n?xel$+I^j+ z-qY>+NuYH?!70?4AGok=f|;`N8O<}#AEzIginT-^q;mKOlRsf=%m$iDMY~@fIi`GU z-~RK->u>$~!;4RmPsGDSy{xay^0`6WQ@gI{W3vS)&U2k|IX%$8V4YvD^RCkTvF@D< ziGVz|JQ><^bf)YMZNC5eb^NbN^rGS9T^Ar++3(v(>jeFo(cn{kGp1fd(CX?-DAi&` zW08(W<&};*u6e|}R;Nys<3=c+IoNd;!#Sxif&+Eyo$G!D*2d;b*-gKyMFpBxbn;y0-y+EQ$LDA{N6S7OL|>(8Z~ zA^QQTU5^`EZgVuwq&Luga^yJ7%=i=?Y-}#X?0X0UNCVVR=CT5>RetTlmgOyf`f9P7 zywIg%lX)P#dBVm5s23&8_^G9|BwZ9H(lV+8m^NHYLuedHbuqe2_|! zRstg19&MtI=iY+>##sx9xMtl9`nu#D=#Q7A;UVT_y29g~qNe8bwZmJg61y4{FSIkS<)X658qG$QvsIMk*8Tk!80*xd<3c#TRsmvXWs}89>@V1Ri?tZj|NlRzbFWf?quS);8IFIE>-XMJ*4LnyUu=_Ny28e_^K4R zs`H9LkOs)-<%_CGrEXRGy(wJT`e(9C>WoeR|74<+K#!NhAx1pp^7zwE_LplH#;%^JoztD)O27399!lcEGFXOF?)OR$fTBkhc z5;e}QBq~jeDBrxoDE|6$I5%fb+7$GRiDvbln*Bs|i}jZ&z_ssel(p9T#)E7JPmH;* zoKbk)n$ORAN;*ip*jKSYYfzw;`%s$L1hR1Y%c;a^%zWT?d^tx;9hcX9uPNe4nD{&$ zr=krQs|CoEZbtI_ZR;y?z>85@8;i|?l3e7RaLo?Co5CLmTo-%Eb+-BteM18kfHY=BoYE(ZDFIvD{~NwPk6*@ABQ9)ue)cP>oyMRu=^?WOwFDxEmAB;TH`im4UD> zo-f=*Lx@K7p=|K->l5FVyYHqu3_nrn>o!kyo9N>Xz?~f4@D0tt%#d;LEQt0B+)}(P z_Ao$VDO~kvWi;}TmW6V^au()R`HW_!N8;7G8rK+MW?Th$Sd|R$= ziN8EcrgsvbZNG+TyI6dU&P7yzV)WHEyT(f*VM9RP9+WOz>+1XhU?3h*47+l!EvEb9 zP;j>$8!|OsLcGoW82v1@-=p=D`1jqCv-Z9*0p^<5aZOk4e?F1-IV2y@HL%z$a!YXi zfUtV3c+)Q7;L4^e^ZG+2ah<@U{o?n3)U(44+27Yp%H_4~h09G$w)jE*qN^yjM8X|_ z_J&lK0qKR&GEKdO0`rCckT&1}%`SRecBEa$-1y)xWeYp2BuGLmtof^PlU^+itNwH1 z3ym$tCQ+Scvw^Ag?;W|B4}07D%T-G;MWsBG&a>qJhKS}%PJd?1`Vip!i=e;mX^hLc zI1ZbtF?K?3bksD?4heG3_)Mz@Cc@4ew!pKfb`q^!1_)JSZtH%Z>s_UzENlugt7j|{ zz60bu-0WL^^%qcI7%mFhedwm7=u7^91Sgb_Et1)(jT$OYaqiwJU?Eq%ntk@Jz$7gnbS26pYoUz*e zu}z7mF9uT8N7r~NZu&|nDnCYT!T6Gm9}~UCf4GlRX>8^HHsayE)21sw2#%EF!MMHq z`HQ26lK;`y2gB({n}2}ADu+xao@46T+i7I6Us}PCAQiuH8~y5e|Bx(*QrBK8n#iY0 zJbk(6G!r+CXiV6uX1jZF+DC0&9crfw%;bG)&janaz_^XK{xR%o!{;9|Xhi?nci|>Q zhn@laUbo_#mmAZ5Gx0F-ZB?`-w>~&4&BHSCs|Z;HaN6b2vd`$!ZA|`HZwolw7m)n9 z?b{HiqtsrUIab_=S2%T22zNi%ERISmpv!JjZT7_Q5=7<9bd(87k8rTtVs~|uZJxvK zX>j^gr1pQv8;QR*tlXlzw@unbm-2FCU8pFYF8fTCv>}m!E%2=84NLEp{kgYj+Lg3o zmawsUM;6bYl>N^lPj(96V{h}EefyVZ1B{>?xi0$59y(#&f$&R#PX|0Q^Fu23RwzM!=ur`BV1sB3_gNaAV z-X-4fS)F*IMw8Fnj@{-Zp4*PxB8}U%2g4a+l$(;M)Tmp3YC(=?XDL?O^y*1g=j!F%zkSy69T#G>0t#Fg@oS4!4L zJCX&h^58OCnygE<*JbB$HXd=o%hxQDj9F#pYvaw8j~@K-&Bs{}7x=$pNiNiGw0v`g z>h4Z;%kZrGNS3Vjh@}7mSEY#-c|PZ>5k=6;@9Pg|rT=zzcyZ6KnEq>Hin%*glP&UN z^Ks1m#J9&&hEwpadYR2^t-)dnm60RG-H+NUMH1*qP(aL*)!Y7-{lg8?{^|Rnkzs49 zfuBzZ3DLvMko(rFD@K+{fW9LZ$$s{eSrtS@SJ8M~AH$m~OWTA`)}im)FyI^Th@u@y zC}t%oqAJ6!V`%y7t*+Qy9fd>4ynDBsvYP6n_FJxJABk&A(IWgWt%6k=E)p8rbu%=U zjPYau8BVX!k2QG}JR?aqkVOg(0h*vXPncbia{e9{Tk9C>$5^Kb~Wd*pa2b0!#E2~vDZJ^_n3G|~_ z@mcb1w3$njrT470B1)?);0u>HAYiGq*k-4?5u@NfL2ER)O3M*8E7|SzhCh>W?drmC zsvhj>PMJ&XbdJdmtV5|+OOpsbn}U0{s_e{R3HKdZKMmm%VvH zd;{IH37J(1*?6X4+3nLduR)oQD*n-{z?JrvUR?D{6bIP@$!d2k@%hiVz4iR&fhM~w zc7={m7t(mI<9~8>!>hYA%0sziO^n+&e!D?CWAt@?upj<3V5bG-qLa*QbgogNtR(fg z;0?>-4jwb7FsQzk!wixIW#(NK9cBP&H~xfE@}(jq?R zT{ce~Zt}SC-v%SQTJrlTCEj;c!-X36a3%F$g@8kGWh!U#w5XD25}aQC=T!6^Bjh!m z9_^}7QJb`hp)uGm$iS{o_R%FZ@av&Vwm{ld?8$CT<2=9R7|ZucqfqIDwpkJT-M#BJ z8a(SF?+&X>soF3<5W@{FaZSFdBJ9%N`ncQ6e~PY3j>;5DCr!@n@l=DK1H5$86&MJA zNS6?pe-YQdm*-?Em5At@d%e2Z@}!E-_CCtEE{sq>Tq12Ix&6)~#3CQ&+b<1TDmA>Sno&qCpWl+?v_hve1mgZ8l0070!wA6R~{B*;N`#m|6e&lr_<9M zq<^DRM|;kMymTJIuSt&d5pafob7VE8b`1+H-vPp|(b|lD(I1YFs^UJ~(PHMX23&Zp zD;t>F7)g7NJEk5{)2hCEqk5zmbgMC6kVe=3)<+5fBXoMiWz$Q!BAuwF0_(l%8QXk~ zhBrP+-jANJ`S>76K5AaY)e&^Tc6pQ9rM{HMYPY;^zVW%yO_<#Z2B>GGM94mweExxH zcq70qKDq9mh|K8qbqB9eDaW~YV&x$;m{OJmURsy3Du803t3i1Sj1`l2{_ya2Y**9+ zx3e(ud6pD$*7#F48^J+ z7mvGFV-~1~$Oh;X3zxpMgmR+_CEgt%Ft?8mFK;f7otfR!)4 zXY6!n*k zw+^+`yu>!BE!j2fb3Z?*VtEpn__mg0X&ih?M%7sS1RIZUQy+CiHS4~NtvjM$)Fm-( zJI&$(Z~165In$d#*4ykFX4JG_N97{c#4$71NH#^ZmFA-N+%ReRS2FjJqE}yYvY7ez zIVw}ksERz3CGRf)z z^iH>(HmL7$L|Dp6%J- z&oqn&UCe3`tmZ(oO%^GyRQ2!1KL5a;;|wWEP`J-Zpy0GBMSHJ&*t(MySIf>k<}$hF z#y)=oBjxpQw&BNkQnaXthSEYX%J5@W`l0;(0 z10>ynMedb?vxf3#H}#n@*K|5i6-cVJD@!V9T8dbx`B%);TrB0K9sdbCR^3t3C_FCR zvU3_bZno~+Z6$rMQtGdksUzuK22qWwDc3a9C?QB0WwKwYw5fV3c_9@=N+F~JP+>g` zrYCcLKuFZD36twO_O6KB5_#hIEkcbWzg0WPHj41Se1;i>(dU6zD+m6$X}IVP?OLTzVhrP5U({W z>cxX$PfjLD3B84O=5hz?^lMW&C7#ZAzkm#o6Ar#GQX-CcFZjq$KdsDgX6gd!=rpuq zsL&7>{c@&7Du{zn#qROMZ)g#83tdHm9*lvb#iO}u&tc8E#ebAc_W3eVePavy+^cI7 zCUtL5)~wRG*gBqt0%enFH>VPNScIlB4d=doYmVjWdDr!{ow&+j;xzT@J#^aCF-AZ7 z75EK*yKV0m-4zd*JzIWZ#XN2+y{3Y@2@f6UGO^Fx_1o?b>8;)1ZgrLt6iL&bu>&h; zgD-Y`>V)irO#CHA-gpboWwxv4pzSdtZYUsXUKsphO{;7 zV2!EWhptcI!PpWX(EO{Cu!)OmKi3oYx6c^*&QCrW-;!_dNs z`Oo_a;2Y^%1~-H4X}WcUzM-Vy#&eiW;YQ3usNS_)y2z&qF)|fadI-Y_FI~$O$&n+q zv#{Pr_Wt|Q>b!*k$XMy^$I^Y|rb9^_&d`vGw&`@uVdl0zjJQ2zrqjVt?^6%E%ZVE3 z*~q-Oa^FNFc0-9zO^OzpfPQ81J9I@NeRpyUk4i+bjY6M|fA@`6z2V>dnRx5&R!%nwpwmcFglya{Y0v-T zv{TYnzm=f;s5efM5$~aYiqi1P&Pqa+m(3elR-34{ zTsv9sPi^*?gJUldy*r)k!3!yBTv z@&!S1GK~yP-FiOv#apUwC{Uzfd`c3D?(5Tu+Pp{nTMJ4clgJ`uD< z3BM>nrLy+2ldWE=8#d!@$~scn^*;@zS51iIK6^PKu^LW4(-EXE{&_yMuO1#lm%Zpo zo5{{-E50T>cx2Xb0gjy_QUIO(9bz45V2zr^)E~m-%U<#;5h`A*P^DkMEdzjxEHif1 zbGD5Zh}C(P)u}j=)pc#l;p!oM@w>3Bz?(@rf$bk2*3VWs496uWdjH>vR*xf%s` ziK8*H+;#8}ZJf0U$k9t~2R3;;naYM8qM5vM_^R}rm(P$~jnl{S#Y$*kBMX2OJ23Md z|9pop#GLZAo5=P(6V!f|H^?P!#@KcxY3K)Nt5=X28Bhrp2MixbQ|~EPP;t{5Dbx`M{d=Xluv-kQ-;3lVpLx*{jZ0Y;lt?SX!+Aw z8=X7r0NdN^(7D(EifSw}h_z|Ix%^`Q@peeZO|s!QVv>HM+no@7@=TbJZMOh#jrfh+ zPDJ-tt?N8g%TGEv&vveoXX#y}{RqPK>8}u(*}5#F4K)D14`b4tRS}vC{CO#x;#+rH3pWb*1aN!ifCrI|q z%SihF8v+uXJ+1Kp7))( zXU(r~Efx#uLvzmFRlDk{tFGdL9hyXdlmYg2voERVvX=={Mgj-BRKf!=Oo<)HMU0Pr z;<2Can_CR(95#C-bJ*4>kG@;{*`gcxWJ?PEmcG~Zrw9+i&`O}=iqiAET_n0Am1(7K zE7ZQpXd;@LK*HL;rXXL%ys9=Hlw|!jiQ9_ue0TbNZ5uW=Ygiy1zhruf)GIg4 zvnKb0DZHu<%SBh~ErUKpb%i|yRh@*e)ApYjO2_@Dd-g#pPL8J+P95|41b;v1r;GKdN2cGxWM*u6v$o%{fxeCItoEADH>cSv5h``mS# z^-1fAky-0ox?bPUz5+uM;zZ-LU*GOkOJ$Qa{of15(N46eaUr>O32ShmP;T-X?AO# zZ?&qbem-Bm`y-ZU_lyfY`*jO%Lv*@X7}5?gzyq8tN7A?ZkWPapk_6M<{5HNlZW`aL z5ccDw)X3lc+^BMpi%nyRqzAiL6TQlPs{m)kVpWV*C-x2x*E`phx$KChu zCmV6PA&jpo<_ZcE2TymUqq;Qh#1*i9BQ#R|(-TLd74faLWA^;`m;d`~6aHf>{*OK@ zcB3gS7l*$=^B4bLU5_gd(79K%uA=|RK>yiT|NF1g!|~EvG|6OeQG)+h*Hb0~o4l>= zi2i@{VE^L@|JxTLWYoZ;9vqjO>uB_{pX z4fi)!{^LD-?SNwubE_@)f3(j3*S>HHee@4%eylwIyX#i~k4ZQbs385{uke5I+kdk7 z|Kb(@;{N{S+qOq8Q!-}Sum8e^{&lJU^UEgKfJawoh~BpR-@op^`fUI{uxojUgx>tm zSNoq{nP>QT*mOS+X#Hn1{vWORUw`HQzXorBK9Iz%u;>97PdJ~odQ6;*`k6qz4*kra zTP)qbU8^sT20`Au{m!IsZ@9Tiq1Jp<7MLbNKP-6zyCrvh&rBg8%cbTm_;Lh}m|y!) zzb!mu_HNkD^ek}-E{U}0>R51hH9lN1@mc0$VeH`U`vJ>}T$Wg@T`ZX5upo+rx>n^R$8o85PH3z+lM7Wv zA>dz$Im)WAl!o^6lj!O7+9j_zf>>So8k@Q795$&NRk{~@YKL>X-SJ2FgflS5OYQZp z-4cw0{%eFFmL?8~90mB7hto9phx?mzqtl=y=~ENUm^H0c?j^6=l!oIe7`xLBM*_Ut zQ{AJu+6tx)6h^);jUoPxOPW9doR8ZC3qW*{)DfN&N)gy*qj&k`y?A#$R%EQ%YJfZt zsm(9bj5$=%TzIUjvr-~C^Jh~xxMh*-fnI4TmFC}zB^nWGa&eU!5q3@I! zpQ6pjB5|r-=;hY}XuwIH#~Z3+*~f$@Jx^UGP<9lBBX%Y=$vr&9dZARRXL0`6e-uvy z2K@p3XE{kNk1Q?IfInV0*IU07%BXhdD-~i)G`Fa)xPTUrjlEN5E>Kew-LYk)1YzWY zi#0=g5O#J~{c}Fx!2R!hmm&%Zf2 z)go9LA*GDaJ@!c7yj~$`BT9>F32kea6Kqv&fcj~4BWqr{>&Mo+Q&>uqg2Vgaf;Dl$ z>+V>}e9Q)&`U-v+?P4pJxP zF3-g2f|+U;>}#zQZNWf%pTbz)@jTV`dSUbB!JSx4gv`hvpFrmLf8g$_3Q9UcR1drL zpMhrxt2L(urWe#8zQz6Wot;yCD6{Rz#a(`JG(Y7M(Ys2)Gb*-aqIM}JnA}U&dCuNr zHsid;TsTYcIT~Pl$aEI#QUC!Oz^|7q?-MUJjTrvmZ`jLSNN2Vw!iIM(piwT6oy-ZBL&b8>Xkkf{d{=1W0!H3n6|!Y; zt@KmJ0C(Qa`BRq2QSK!Z;Xq2zs!z@HhgDaV39x@jgN9E0T}AcNeRrIB4~7U77F4e0 z|K4TiVLTe88j(tCc_8OJ`NN1N5Fi-j#{_fML6TLLi&aEMl%Ate{RD3Q8 z#F7>q8y2F? z<-D6?UJ`W)*_(URXEn?#@iwH>DU+Uez0YQyhVP9Y?u3o*&!=LJYr}vk9w~q@u(mtt zIR6L_x5E{|nAf2o*s_84pN^#e$Km6vK_%kLjdR=fmZzpH2-Z&FF5_FP$XI>y6VbNX zFC~@vHPVUBvhIh>LA^)EE@A^1TP&9Gf9&Y~U#jETJEPtAGLQW@mIU1~2SAkEGmOo1 zh2{o`i5UadT;A=(J#RJs0jkeZ0uVd2zV>chOeW->$Kr?YzBCyWN;=!a9IM-9yKu?% zzldv%(JWBHdz;9tK@r+M7AS!6Uk7vsai{Xik)6;RPEzuOXqM@oi~d<-#>`z^gD zS+V|0#+~`j?G~6*+LxrXRF1<(;qY@A5>Mx!hKz^y?^*y^hU8o!q=!ih_qty9VOCWu zC}UNeaRMGCNC3lB+Q+W40Rsn%=YnJfMfdq!#VrV#r)r?4birz7RLtY>F z29x8XLBYqJaqJdnkmdV2(@S)(!^SO}$I^K#m{)lI3T(O>oaLKH+@+b2<)NZ=Op%}R z{w_87D0Cq8zV7Z;Sa^oocX?`Y-#N}WR-JDu1sGBIp@~h3pt$_T!lC71zj(ovMvz58 z;JnJyW;H^9^{xOJQ^_@9vT64ke!m3o&5BT9y}lO5pJFjmjtWZRocW@`VM|gV-FD9v z(gK^P&FuxIchbKTzL#wRP@|%XzP_$BIin@;T(9s$x!s2Ob0H*%F-CmC~%BYe|g2bNi6cf#zO zM4^En)>wj)g3RXw$vlJa&L4s)V1|a?4;w`4mSK~>>++|Ur&dE)$hW_c9+t6+^Y<)V z3cXeuXk$Ea1>%5>jx`w~mHvF{S5LPaTn_??jOU;fGqhyv&1;{@hC4vgTYR za)WYU>0_U@8u%0bbxkV`sF#`l@C6|m&}_{j?d*NEA_a4n<+Mhd>-8=w^DC-dbh;j{Ah}RifVq( z`jR^w|3>)!EXqp%21_%+8F{;D#vo+Q;j(82Oae!a9l!X=0(wV#Y&GJr1|9Zpf9)n5z)4p)u!k;AD82CGIu0*Jz z6fh~+Nz;4DY53_$ig~Wy#e%5<%xjlF%p+FdY_xlP!%g$Dfkl(s+F-_t<$Ow4HKj^* zA`I6?R2a`|4ZOxgGAj^vhWUQ>sBQ>QvI$7_CMT09M>%ldXyr3kY6 zL%QTGI?k&X>x-DjJk8woXBTXUVETGWS-mv$tsaEpu>qb}MI{t;#1=!i+)VabsLi z5=7gLGgP*4x7G=B)=zFuY)W1p7d&lq(zi>%>_$}*>*^G;!(!{XlsH{icJ}5rxd}4d zpZ>mOBmq+u@!bPF%fPNLOQS66;~l^Xu&!jP#WPE7A6>4EXrvk-NsIi?$;iJZ86sgo zc99f3PegiqSPGAM%IC!CXCv&&)yX`+=+sR-JHeP4_;RT@FK`w(7a5*1L)q1=oEF;J z#`1Q9UuMLf|86B@+Aq`*gnnmu5&$#>#RO>H2m43>wWB_!wS%9J5f=rjDx*r8^Un9N z>i!I81Yy^e`xdc-a@yp#K)l-NQMpWL-(b~(Tw%Do|6`E?sQyJR<#n=ghyT=e`A(Rp z=%GKIeI-h_5;`s)yB5Ape&Kes_Xt_O8>(x;_@Kwu!_81!l}S2XC{@H?ZwBi?n-x51 zAsf4mt_=M-;_s`3+t5C$V^J|TeP{z+v#IkunnYY7U(%z3$ZW|wNTw}D`tj@>@>!O? zDV5csELwq{b|onpkHoYW`}o1JFJ+jjqe&z32BE9_A5&ELl5{<~WtlZa1RraQw|hEqON_zJuA>f7q?Q4%wp~6L6=B?E(Ua=S5ssgmoMu%duj7^m zLQL4P-qjzLl&h$b`rR$KC+h{Q>M)k0`i6@>WvTFc_l5<2 z`9w@nRU4c#Z@<;(cL7Wqj;gVC{)yMlj6;wLJ*%K%ze_~+d)cVK({-Wh$!!NeVJPkM z%}H((;n{zHb`ws*NG{8qhlj;Yi8G?au@B7`s<8RsqTifne)_6V+D)0JCDmsX670AX z&7|QfzOO^aUq9Q87w2<26UI4++5^OZZSmJ- z=6ah_m|V8e@87N>?DUGuEg(4VjY?tMcmhppgPUQRz25NKLV~`g%OTCxBGohO?zEZ( zYu#Oxo$|t99hFFryZFBWz)oHP;aEgA!+>w6KfZgK4d)NNM@!-37g*>65pU{j4_*6*l9lAr&@CNKBGdL*(Qu)RjO5p#*Zq_Y~fHpB$#P zX^D94`ys~;r!%nqcV)_JIoGYI2eeWYlghdToeTxZ%G4?Hd@E+WKv&~z{S&b;x>~wQ z_`{{QUzupu3ncqKwg<#J3L5-nw70}&QtwGbq6UhFg)rmAW)3gPxns?qe}+o|&80n> zt~Gsq4E>TIYs!A+%Fxjc?taL5UG)zE_jV`MUK?oDsDgiCQiSKCO?;$enrxarQR6Wi z5?O2h?@=Gr6%fGl!P}9XCrETjteNj+Z#&lGpU zt%(82rJlnwAbkDj6#F}#<0X%}>r)hFFfp${y*v!hc5Ae9v3Sk_k(NS_Vf^BpSKtUP zWNXZwnS^(yv-Nm$iAUY}{3K&wU#^=l@mTvrMS8rkiqWTo84QIi{L)y!EHy!|2 zq@!{1=A|nNj|=iSCLw{^8F&{Hh?eqE|QPyDzO{nrBV| z>akIrUUO|&jb|n_y1*UV31U<;%xkvFX*DyLJVU5(HgN$zb6HijOdL0YjpE3v5!a!r5>TIIVs{jb6rE3%{|ke?s%*<1{SFH zt-!;2hu^Fy#SA;9S)hEd6VBdCEW1|z%i!Hu^`5V~vr8r!w04GBgNX#8gi(I#+1Pst zSzjInL#mxMsuu;zFkQfFH_GOam5b(^Wc({r!Y>$`c|(5cxK)dZfhBF2QM%0Wf$J?p zd83B+lnI?M)e{TCAG_?B zB;Mcbra9HkNu=Z)Pkja1UE_kazcw25*zFSTWq3k2^UGg=ViKb%T2>#v=!uLcM6Ay~8C#9j&>eTWp^L_0#< zamQl0323t4jfWjTy1li(rWvWODJz+4ynw`mvND+Te7q!*M|sl87aDqNY7ULKKE62r zt?Rxq`Km;+EU!Zv@D)|scIhQ&Q43#(vKH77lMvu0#m>wE7aakwbWEOpTxkno!QA8E zy!IMOu9I-KTMJ@fEX{ma%-p{4<+oG1ZMY&|LpaMFv1unSp#yz5b&yNF7Y(42(UHSA zJ#JLMQA^j2>msIVSq4?Eyc(oU*q4px$xkGfC1v%uB_Avs)7dljI2J;HK-m zaLf)=Y~$;V%jH01qwW33er~JbXH-k>G(aOf)qV%`a{2O0Y9`$l{JHGewRvk*)5(R* zDCvE@F)eXu7ggH=3i$F>v`)H`>_-4dYd9>3E2*Xoa|KD47)K0uC!E)d^Ggrfft<(X zz1Y26k_Hsw7h?N$l<;65c3g%?_+m|#@D`VETtb4;3M9vsArD__a0{farvP=(%PPv; zV`*Lm<7SVFnKef|H}L6xtf?Pm4Zj47xBCUVp*JWqq)^V|#mM8*O*H6pe3j^8I|zh6 z!KtMr##pIm^=IFiqTS?~e{Px86^gl21nZW<{3rrZYWGt3#&4s&Hf`w@KP*(LYg8xl z%sxYa|m%unn_2bn!=D)+gEtIr8K6t_eq!9EPyg2RvdHk4g3 zoR=OAA4%I&Da;{+b<(H_3*W$TZTs=?mkd#B=^+qvT=3Dmnu1j^?NxL?1Oi~`W>ZD; z+79EicbO>v28L06xM=!I=Oa{vCrctTPulEUaaKTRc)>5P%)^5)=l#IzNku68cmby) z&Z{g1I>Ar?@cpu!#*iJ`sj%_ws7;VS&uKvm5h_HHoWdbGaR4$v0)6f>3r+q<=(Z z(531?HTy1iyi`rvuD;r5Yt_u zwj+{W_Oj31Cu_Wsd3@s&T-QPqKEbOa!%w2MV()ONk$BpVX7c;u{NbC*X2B<6n$uV3i69fRNEwZ)!NTyP_CHJB>|jY;T1Un>c>R-% z_Z=_Ki!gHZ%9n^zeF#AlpfV4ju_p$y#(Z>G)i3nsZ~^rL%LZv|%_LpzQ7I7L#^WaS zw>%yl%8Hn7^s@dx>o+X)CQShiv`Q^_D@q&rClrz{j>eLPoy6ysCCja0+%XmcPQpGY zpNdUaXintnQ0x`P_MT=qV0nB_A|k-vKy%T`U|VV`++eb3f70|x&LywXv-Oh`e&X4H zeY88seShwr2gDsMqf4hP?4y^2F(W#ocniX{5c52;jvvYQDz(S4yqp&Z@GMBQJJ)O! zGgBCUvacJh_hwJr2hX{EPO3H>9skv5xSiZ%%-GA})J}BHAA%bRLe(`)JrqJ(z*R+) zzUlfuTeqZY!B?+gb3BBZp3D+5pAJ^weh|4O35d{M3+f*(94RQcCf#@|9hJr_B!osZ z)<7EN;*G1{^Gh|V?O9l>9!{)z7_SA#@-LIUpX()scwbHtZp8i^##47h32k}&4QCM?@w;M)-D33@4a{x`Q16Gxe+5FE~X%x)k z5u|UnZG^9#MhO#rGYJQ@g)3&@rd#Awx>po$)RyauNs8<|b8IJ;$^yNYY$gOx;w_T> zRZ{346*8;G(6{4XKf@^YE<^S0aR>$ct;;XM<(2NoCgS^mEL7obIuk zu42nO_d%?8;dpZFZj*S@56gmmWUDV!4uE#lLpvk(wrnx$%SE_dq!=gE7Z2n?%e-GcK4rrQ^V zzNBkn9V~h5Y0@tKU=MyqV1(?5wk3XAaG`yp<7%Evl+W3au^;+w?$U>5g6FAqAew3X z6kl!lao{&k?x?+~NkH`eeYX9R((EwQ52j3xO_Bz+%pWYEI~u~Xa$6r^DF6_OMf`hP z90Kyb;~K?^OH<8wqcpzyOQ!Uw(x&$wUqO&;Mk+DR@Zi%gfQA@eyyob{q4f; zY&x~IGggL8yvfxh?hB8VvFHoTJB1Uu=|Fghfd`1*GslU(x$xTz#4lzzkB7A8_%#hc3_-U}WlU>T97J=|%%-7HP&P{SXA41aZe{`%RMbno9N@$4_#E z@`nRP@wb9v<%L#z0M6>pZevC|QsyDL`Mr|jathn$Yx{`E6F2IHSJ-WkpG z`1CstH^*O8e(<0G3p5`6F;q~OtG!3nQTGuk-$Xy^=3$X9;~CTcH=5)Bqu%+Clh^kx zo*L2thMY(XnnX{}t`zJR0Meq|&!;dKuZ4BM`*Lh;{1?YSB`y*G*a_6XJ6axMrx@+P z4*Nbb6w@6x9E***=yR8a*y=6g+&Kc0cqM{&;h43&1+sh;_Em7{f?J^eC$Z>xKKlx) z3qB8vN48`0Gm#jM)3xC>pRGs$n%h9;&BLh`J8v0?8eMD$3?NpO_cj-wE^SgaGEavc zbWLvrw`Yku7~NsEkBzmw2k{39O{)EII$ZCAbRGR0!AFal#=ALkXUgm6y5J$a$}-cX z=DW72iUU4NROhf7iYSVl-hY7WUTnX;*gpMr*;cFkO`d&%jMvjMv zt2G6KDzxvsLduIChvK3)5(xlqq?qcq(>gM3Qm$(;t z+N$9vZ0rsjwW7Y9zx|nB)P@L3>4D{CvqQInxRLLjE!fXbC^@DN`VpquH*aPMde*P zCpY~HFx!zYT(F$tctb+t;3-(&z9CVx_jDRMep(EGKvhPdVC~Tcf8F`h#C&ZXULEar zy4hO--Ft0lLyFf>8s<<4h!@&Moromf8H8I^8hZka8DaFB6mdKh@w-4Ap;2cSSebS<_VjbL5NRB9n621|w zFMPyo$4t8S&$-S>h%Oo3(9U`HZm+v9zg{+C<5GVyzD@fRTu&oI7G=cEtW%QamXp70 zGjRxj;k308&C^Y(03+j(g7RLS^M3UVoU7knN|Z%ER(}pqegHDFCene4ba(xfg8auV z%3ttXyvc)jLrLScpuyte#{xllmefsJ?0l9i06nB$TmxV}ovPLpOBp^<$-`5W{%@92 z>d*)w9vu?fS-LCcyTB?PgUg~my7r07$Q%Vy!jRx-r3bW-?CSZ1Dpy)Z8 zt+3vJ$sCi^#h52GZYTSdwWAFnY|$=62uTl^yt#G0)Q0uRachwoL`D27b>Lr^i0PY; zr;i~y?ZAq97y+sO0#B_oTSy5Y>Sk|&kQ*VKKxOR$n?}hD9>Rr>Zi^O2$cG^v7FNOjSPePb&v+BiK1T(t&Zi)C`UE` zVzpk`hf;~R4Y$3MW-gRPSQ-1T4Xb$ig0O08rabkRAa-@&civAwM5i&;z$7Rsb%B3% z*OdI;IpCd)PS}q)As6n>Y%Gc{%bo~U7NJW>QS)N`j#i24*)~7-7<+_zie_gC3a_WR4V$*u>NKVq0;aTF&xa$_el$^W(%>t*@6O zD)vZ76$;#%fn*=E`%m%AZD`lRbH2?R>hY}L?MSCO7S`c97;|IhSN#R zhjNu5hHG{lM`+qC%n7yIn_3)^O0l+=kgM*K2Xh0JeT(WpBrXA(k)7~`_hjZ8Q5#b@ zs;Spp+h#3kUJtxImeS66@-t~yXO+lVzw`B~&Iu;DdHh~lCq4RZ#%|)ag*v4h?LL)e0f9`p~EZD2zo>7y;2myYhNQ4ky!!0}6KftKw&n@MflJP8d&%5I-5=zcK zjAwHdBvZ?cc0p0Pcj~(TtjwJCqi8F~v)ca>oWtD^57R8Y-Y=LNEgan>wo@dR98Ycv zZ`h0pt{^x6Vb})_W;E<&w&~jwC(El*i?IQ4fsp6$2{JFVKcrWzd zAqsNyn|=+J6}3?glTt8F4Is~Ja;8~e9;&iS={d(zkmLO|>2C^?W4Jy!6ePhSa_9}` zLK{VTE6cxCb)3X-AI7!w@^e~1go#eAzo(CL15jylbJfa%TjZilFp*@IOGlTLR^5R* zs-%(*ZgYG68RId-{!_GldRoVIor2*nJ(zxY4{cv!B{W?h!*X-0Nzd*tE1{Jywrq3T z>rrJXzv7uXn?Y--0#0t@27_0?=M$48LHk~iyyh(hymx}+PW&FI> z(!0#|O-;!_qAsW01%H|2cVS7){IJ?l=P!216(1QN4qJWUdn9&ZO}!Q)wj&PiBDp2V zjuST7_>x&BdM^67uF#VLOTQzrZ#d_w(Y*Tur%$2wz~izspq&4~hr2I)Rowh(tqovc zz1(O3lTRNQ_lmorH3;4hIEs3P#m>&y;ZV34>ct`2UIWTX)m{!I`{DE;IwD*iIYTXd z4aaA$qqO!bB#!<2qKh2e{6t_P=d~IFx`>VVhFgC$@yC^kGH>Z1l}jU*u(BEBqO@pN z%%pzFNIV$t>#YxNe*GH1Ydj(bYtD5G>yOpWRzEF>~d{Mz&bSSOh9wnU@{V34CZGM`t{d1o+LMhzyx zu0+?~CqGj2dlmLYmj>dm%}J*a`3|f^(eS{TAAYniwID9@4CzBbGV8^)uO*^F5}$N$S)Vu02HzKw}Z8Im4E^v6EwTNWfJnm#>>|y#bVq4=$EDf zHRC5I!bRi3QecTqlkam;B+RCMyjvPhP%?uUGxVcfmy-I{X0(@JH7T47lNyuGXcHqX zQW{|}(@~hQtUlw|X=Q!@BtYd^Nh~MHt0FU*z93YkT+XnlD&7?9l;2m1=(2=`ZwY3yuACp zaOcnWew?v{BKi$!mCS$3)D}UEc@A)%q*&$S+bcC90iQ7k;;=9$!m*Q8nD?tLT)+y! z0=9`b=`i1ArrxKu6{*ytEbXs2qCWvlQwai;UomyPuAN4qs=6MPcGm>$7*ga;Vk>~M z?pyb*MG@!^=FH`I?j;CGE}-!0Q0%yM(fIlZXC>rai8krNI*FOP7$nkbxcTVUZNYji z^c&hBVbga_#moIwp^$r2a(BjWhjm+*IF8J&C^B&`veH`Lz(TPyldus zP+{CbWy70eSpTQ{yb0};u89xik1|d zz|wU0c4P;g3p1)E35K%by2yhc$MxV+;$!_rDzR;$enMfWa>2ejeO2uSB1eU*Ax;)? z7Z?VNgUYtkSHfxvOs`;J+{z*23atXlpy|)32m3kQE>Q7;1OU;oML;(d<@jradN==c zdWuqU=E&mhAoo&zsnY!v*7FbkJP$kZ9#zNU5dtLA8gcE=?3T5dp-AfJ((H=1@j--`53dTRHu2hU4mrLgmZ2?Bcrx~+KU^%W*^8Fgs~gy}_KEOFtr5PN8S#Xr6q7L?i6~}1_c!!8bCoI!d(PG3 zm&d%h?>(~eMyFom2Dgb$jsr+nS-w#a#epxOe`n{`NfLgTu>C3Sx?&=u%7!2&X zHl9QppjvdAWqN2o@glr0P|)nTConl8WUnq!-0nD2Qxx&|)+WBK_4><^4NnT8gvH|OI1b_n z->LEZ`Dr5o?7&K1$4!*ED@wYv?V{Nd{8n4Ejy%dw@jhR)QZ{1Qmf$TP1nkR4us# zgb>A-yF;pIB~MpX1nq*5#+Uo-DW_aixOJEeNKUwMlm190?xxqRh95V5g58(n z+ZZW|Tq6JBBGrdnV9>fc8|rknooKqu_)D;1bBbcHmUV7f1P9CX`s|}0By{y(P7H5W zq!s-+$2~H%c_EkH`fi}%>?FEPnE#SauYIsPZGf(P6+`Y2aw-%Ez=I6s{-`4yTNbPf z&cT9W5&KDD{?xb<}!*_kmh`+n5f~@ z`!C=?@t-g%X$Q6IXDYBSpVXPEIDvDzwpdocMLm@{ZL$N1g#`SenU8T=QOYFIRo_V&z~ zk3k{@zAo)o{K*WN^SCn&*NaYZ*`++)946}6{V9*yBhPQc0vsHY1HG9iE5Q^Y>aX#$ z7KXekFElLs92JCL$*?p=1H_bF$1XRAM@^?JfV2GT{$fFsP}7R-u_p%*N9c#DDBU}+ z0m^3mA1-ZzWo{cUe^$#9hf2P3yQu4Bx9T`IAcPkI>T1M{kNwQwVUjJPK(~<}xTzIo zw_m!Of@v^`GbTnwRn5HPQad}4TuhO%67#!fSIy=-edjkh{y1O|RRsK7Cw=R= z9~WFFA8q~?SZxmfTHsLmfT#(yiGi)n?ypV z``QB-nAO4fRs9x=#Of7lV%4mD@cwT=B;*I4#!mpJ61O1{lz*VRqY7cEfy5)T&Hh6y zj-2sC+&1({-kHz_qeqAxz;4yA=zGi1KP8v+p?#1`+42|7;j|>*OJ7koje08{Q?Iie z#-^)P(fIK@5wZ~gS)1AcFsJY8dU1d_zH~UxU7CYlH+IEAy=DI+_JN<~CLikaExyyh zveq<$di~B(f#o<8p!y+ihX_EXQ=xtGG+;tq1A8{9)E{_OG5iM&s#9u)P5{_(am$>- z-}{^BlF5PLKf!Os-chDxD^M~g_@wI6hmd_|bRCxJiO(lPAV7CceTleO!GJK(zdM-B z;is1KHgant+z@Ci^LKY2Y4OS#+Wn(RU(by>$8NhQ@Tf0mG!RDpV{s*irmy(W{P#8C zL52`hpA+(*)vO9x-_Vpp?VXY0v%vVF+ zKR(zM;=9(y<8oZFrA-eSsjesHFWF8c0Xl=0W$qHPX)n+a2V1())k=xsY<@)Kh(#I9 zM;r3zU8dE}UqN1xqLR*cMo%WRzFRNNk*kT`&Jld?7AUG!AMfC~czwSwI7aS98N%il zHkY7!eA{LQGCp#P z+k-q!Y(702MBq8jYkY>mPWQx5G49mQ0`AXNY2OH8AW1c8NS5#$CWd zn>Hg;vsnLquzwTnvuN{k!vGbgq<(-#N^PP92=`>`*3pzcP~N$8(~K`BGcuH1ML75q zc)EZ6Jl#}N?b7)%QU~chE-s!# zr%oB-=NF22pE`oD7%~kc@Q?{Q!^sGsdI+5*dg2|6PEHF1E=_li&7|z6^1TVYDf~na zZmAwy(aF)eb4lxRuOj4<$j@+U=NvFaQ>p>QAe5yw#B=rAp#|eszrdU866n`RbZ)H# zQ`G%aeJO0*Gjx5*EOH2ZajeLk+V@!m*Ui4mVw`DoXnu5cb;|jarYg;%YjwrA+ES7? zrVv~VD2DvZVKsx@<~zvzUAO+EoDYUARhx7lDOI8Suk;sz+>l`D@BJkZm5EV}Oy@In z4=RvVS<^hv0fxHiezkG1ymHjUhN7#uNQnxDE|ISLZ)`)+mLgO+67J~2Zkn8=%BSj|@GCJj}Rrx%*CybS%qmxAlXK=tG2{b68cZ@fabtQFVlEs)tt)spB5& zwH&?No0oLYLYuC%w`BuO7o@B;l-;0jS3}qxJ*<@Xt|uhuf+t(a#UoHEq6C(3CblIhhXz)mdl?Sj<Nv$|;uD@~S~PJUG_bC{QT9Sb{@A`*cTL8V9`Q`M zPf9ZX-NzLy{1d4Qa=Ow1_d!p>>yYm^l|!T@F$V2~zjli3Udkwq$SS$|FXib5(<58c zy-5e-Bu&Ape}YOHwNl+Tk&799FFWZPR-mdnDaC+VNn>Zi&G`;Sv|dI$f)eP(BCb4L zd&;{V1GbNIu7q5u6p&J&VFtrtcO80GSk{&_xBZ!Z;%vgks;+bMWMb`-;g%sN1+uXIM{zYmxDsgvvnDMLgdh z8onosNW3ftSN3*+dciJBukIswMH#v!HlD(~=BioTaDn;IZJ>E92H2YVjEMBrgNF^l z5jv=&Tp~$c^hT8J;5rx6*pY5toL+6ehR8W{*$akpj>N7Q?R_Ek*3)%{3bn73C|P+K z~2&`ZmY5V%UiJ%$d(Dljf2yulIG) zeD~XX2ZjbJt$4*|ue;ZPyo_UOA6Ou`wM0`Yf8i@i9@$pr3#$UF?Zf1IGb;~B(C79P7lldaWCGb?C>foB-WH_w=`A`<$f|6tv?e5Svgw!z zY8VRguAFy)x3?b1Y?9G1LJ#MZhrU2j4T9_G)>c_)R?DN3W}SK=m7SYTX9sHhRhHB{ z-zL8Va(f1oT}(t;XNIqg8zT(^UR?e_8Ty@gO~a%=MgI3&Yzg``J(r>k+B7PPf)oWS zV-i^tY=IlPM*Qb}%E`WnqjtaNk2}sx-;+tlnp$^X?60q44o~r?h?b+~kYd;#q5d!y z+ChVC7?fFpXrlEqUucDbE|@iD5U}}MMcp`uAhJiWrFy!CRxjSZVszE~>nVu8u2Xid zY=hH}fzSMCV!i|>3r0&2Y=GfJod9ND7U95p91Dv%>EB2EEe9)VnzI(&XK(vR%p-Q;C@J<@RrwM1kC=34DA?hezM4#Oo|4%6vJL`&MQ$u&v3Q896_i zb-rzAW!d}1v)(a2Q6gizMIS7>oj<)IXxu?h`-g1$X9;Vy#0r+agd5*!GNzyQkE3TO z+d>9c=0pI5McI4C;%kAViFVKx(c=W>52UZC(h&nkJ{ZPT`fmyYMkP4Y*P zST>#fR0!k_z~Wyw2wvu1^prY$@!fRh{%l>p8{PTq$HItTtH`yX`RIY%S6C8~RhVfk zcVei>8zu+*rR(%e^<4A*6OaT)*O8^e+>4ri|#VK!q!scNo1UnRlUopHJwyunT6 z&XZl(32HC!OvB08wQAjV^Q!81FOnwYi!?jcHyr6dNM!O!;2~p<~}zca3w?>_=4ggcPIU&$oQ|YKvN3I0^dU@oI~32 zk{_GPtaMB({S2d5n`GR>vo<#wIs*W$MR=4{G~}X9}|-WC=-cN)wOL z7vWly#d(!p@Ozxu8ZSZcx+~jxm3<|R_rBpSjY7T9N!~8wiunJ>-dl%7-FExpVxY7N z5|WD2-3^L#H%JT!NDj>m2m>M_N_R*~cXtgT-8~H50yA{y@5{5_z4vp@^`8CibFSZ? z`@eyi;rg=fd-Z31)?7229FA6FeWf*gGnY?j(`}cDPzEFgO8sMyFp zHH}XIsqGkmhfxkIOUMaW%Sd04Q%^>-5qy|bjoL{WFFn#{I3tnqt)K8#!fzDe(G0 zH(^Gj&}P)P`?AK}uO%3nv`2@wPq5aJ5n%y54Q9emg4=<>08_JpMCj=)P2 znboB(gXjN31^xy*;Rl|?d3j})IK*PBtaom|JNuw=x-sMrqeU{rspW~8K*-wCu&MbH zE7a2CXw!~WC3n`?0OEH*Who?k_mwIi@O5T#>re0Qz9u|Xr2H|PYeS${VHvJ%MDBj> zAh=ZGi#=nwdq%=_m@&E61?U!cP26dU`xjoyuGf0YdfAxq#+2=+CyJTzstehZ zgk2Xnv&>bVeUiP)E3;4xzaHr%qrI>6lP99l!=<>kdB_5|RTg&1b;3yhdCsxtG^T5&Rxj~P?sz;M>%ZjK`G>AZyZ}`28P_7=fB(k*?UO#(uK=yU zRwTmw@h_T#|NQh)!vOhX$M0hOi$d2wS8Ry~Pg7g0tijT~%4& zX#5mUzy7~H`(5Cg72YNEd-L+Y25bKx-etHI-CH`(xBu;{^dAonyioPIpR=p&^P`pK zU)b2c8NI(tk?2-*F9^l{YZ{Zk*{Q#sI>yr+iCg>G{LUoopP%dBl<}W_0Q**SVOS}D zf1>~XYfT#9&T1K_amfB(ANgM*^t%;Z*H@K)`e^_1mrS>JmWe}+^ws|&lK+#a{`cSe zyuG#9W&lg*-;eXZ{rcJMoqZO^fcy0SO=bQ^-Tz13|F7@u|ET-_sQbS%>HoxT{*QP6 z|INEgaNZ|oegA65n?r>d$Yv)DEx=YYHNOh#dYIY;+U@^|@c&QemHP5FI+l;QO?^{5 z&*P6WYI;K`kq@Y^uTE~vIuwfkTmaPbI-8UE+V6>iO%}ZOVcgYklqtn1@@huP&%+zy1yy&M~{| zybfcS_q=Gt|8M~To7b+!unN`G8YoMHAkhZmUJ)IT&?S6E5Dbd=EOWO~FDv)g^l(Ig31#VW7~$;5!{YAaMulkdx>et8s?kAJn)TfDNYvbGh+Z!;o0p}U+SrIFJQ z)*cQ&-u<=Xbw-~u5DUmAQ*z>CeawEScfrbws6FM=!(xLos?t5OZ8ss$WnYQc$8x># zIxvpDr4Ems!RQ_DQ#J1op+^>uz7V-oiu<~uvA#|lUQzj=&GPN@z3h~gB&hgMcrgfS zGiB_1r0`J9m#yJo`@-qGOKrc{`|1Vlpn#qbx<(T38(qSrg2qkjdZHX(PleK5zxzyT z*KD}2Z$l;E+!F*9V7Rtp^FNzaWm21xU1h-}a zT|UQvyxilDMl{Oi>b5kM?~p~jj%W@eREhT3@s{lb&()WkMQdrZ2_;{Jki8tG^3Geh zVD2!Wd5EgkX>k_oQocR4>={>hVg}J?6FQ|DZOHZrr!b4xo4ex@sZ~8=f!Eji@2j?1 zNEyCujBarV?w~aCeI6$Tz5LvAZCAfT-SFId_Sp!mx~N5z9k|s@2@--^LkV=sG2!c) z!ILxG=4rO`kovU6u?fEXKaV`lhoEqZWe?JI-}ti`o?HzJ4Va&q& z!N&H7yU`Q$tWYCE#WWF}V}U@?2@uu%1`bvYp|R-pKwg$@O!@6GecIeu&(xWJ^L#p! zB!ym-09l>&_|snX{IK=q<&V5QXWJ!C81l+!hSi_;9|t5j^3987>OPawwNaTBH{_M7 z2oRU*p>lRVw#e3?W-9$}*V(!ADU&;<;R>DSG*~n+5@7JvWIB5R1OKaC>3Kr-TZPp{ z4MNLu02Sjid1xG4n8aNSP!d1t-LTcVXr)RQ-2X^KqAoXS_LjHX3Rw|;TkgdArCSG@ zbzFz-?=zS4e8rIoR@4+Sc)y8sVruCBDJo-Ef5p}hw@;x377Bb*F#L9Twhp@Mx20@- zl6!TW%hant`2Aqa?C@m%hKAg};a8&5NEjk#a;>FN^mv=mzDcW+2D8Ub^;5p`a!-X& zn`*$cSZ}+E_!Ubb6rp#0S#EtYN>5#{QUz8mj#_wjRXFJOjPj9}?A^*RiP}w7riO9P zHHVE@i*V}R!aED2aHmy|jJiRGNl&)-Ws<4`V!8PudIJsbx5tihd6tAxdYf=CxQ_vX7~o zXm3PXT3DHtRP```M^zgwRH_5bZW2T63qjmPngCO|o3)KOP;{D$_ zmrV--UG!Pvw9a#G`a9qJE)^y3jw?0mtoP4hnXGKkZ<)I13!@YQE)dX?^Ubbc?d^!t$MQwDAyr}1AvhT+^Q?Y2I`KKN2RMjxX&+k3U=L>4n;F+^5dY+p7t z5vH!=UY6)SPAgBJ?a34@;2zha9s`V*msh-hbH6Nub88PoXnG!RbbOdc02=)<5g; zO8Y8DHml13B1W$~p^bhi6&>-2C^WZZBRM)IW*}Jj$uRckRiP0VE0n7L=od)Qn8Iq4N0f* zMlI9&DF_@(4_m=bk*5`FHLVZNWUGKLyP{u&y$;q3olJINm63%_7%Kd1)fYUqIBKhG zG#purj}$oj$t}0v*3hdy`O`k8Vm>pxWrU0Vw`13ce7?<sd-*HjWFRLGq0pHY zUQw{F)m2~CqI8e5i;z%y45a!>s)O4WF<9|V%y*)u;i$tjcuwJBNJxLgRYf9f1j(jy z-i0L=E$#WW6GE$CXzAU#g_NXGcwSCbEV}VNB<#GP9uVRc;~H{KeX;P<$V&x7ebE;= z?mDsmdDNi)<)}xhTc;E38)-wu&ni5Ro<%iiN21?Xs(Sa|Ivy8Xwdl%t_C}_1qXgO% z?paK>+1pqlN_;B7vrLh~Y%wXGn|d3gJBa4Zq2-5pNygiw0oHD(gWg4K_HeN_ zJv}r~B8!-|gDZ3xxi)&$rzo*1dHGPRn> zP+J;yGl7d=n>BsjY4rnj4Z<&(Zpot{HYc-?@MxWD8N{bK*vRL}G^xOqjs*MB<1KJ` zw98bk>0I@(($CpgP&})BjwjA&@!%p8oRuAZv*+J%xo_)k(xFhc6u>OsiI2`u+KM?x zLN3xn#kCTtbj}WOqQzq(F^{(7C)tuRl#`UeHtv%rN-*7~t8EWG??zX1IP4gh!MZ>> zyD$9e92;)KIdU{+;Flfk=bL@CGhM)M1#p>N?0DZW?=*_*%z3TDj!u##ObD;5!K3O| zU<>_zPs(TO7AYQT)Ya!eUk7V{pI0X&#D!#aqqH+6f?#bxBCKHP%J<>*}j(09l``I^H@(_i&wYSh+ueI|H0)S4c@D8We4h7l*c zBN=}^=DZ(|eBlC6f<=GKv$Gse%N@(F4z7A1&`Q2Phucntc~8HCg1-iy=P@x<-ODI` zaUwd>Fd~B~c7EytgI^RGx}HhS3QJLz`-@y{+22IW$xx;Be%uXrRZ|p-AG8q)jdxfe z`Eu<`pfVb|gqS)CIpfPKlB)GwXpwP!esgv5(rPL#-gdIA);4`pKchCE(Lz6j$6J;X zeX^0A(rwSwPtF*R;ZA7cU*=BiSABD}uB3@6f{7_6+KMn*F6%CW0y9^<+n+oKJ67 zA|?Tsk>+!H;nQ-EhvY~2^*1lGSV`gQeyygNCiD`7*Uq7suWPId*Hw4$UIohj;Cmiz zd5=gtM`hb*n=YHt6ffly=G*>LZvG}$gLu)3jnrokK}GLk$kgPzX-kaOIcFipO4rZ*x(vzBrux#Ms%Y|Uq{cVyQH0zdXFg+ z4}w*0Vpi>4#FaBkeXe}uzsXQbdX{yC4>mV^9ZDC^;N8pUn^ye8GeC@#7z(c2=om@U zBixwDmGtSeS#msSBJ)CJc=!tVX^CO{3*%8i;7cr5W!mJYw$Jq3*kq z^*UQ65%v03l?y~>Kfp=&#y#RG*XG?PUD=@`#~GffE1ShlXWB=>{3I2fg<79pq#zB@ z5ahc~i<;mBJufsQBdcRP4Vj(+;mC(2$S|&o=BZfMgF)#j2R|dK*Yh1Ks5Pk_z@kRw z!G{RbYDAMV=e}7?L%?plP-fiTAj7tv`Nsq`q-)ejSE%^aD@*!$w*&CmQebffYN*1I zb}DmAxzyDtO1yo|m&Ns$3EoY9{WX5)Fx#ZwYFmkZ`>|Tu3f`DMsW8m2Tac7Ix42`E zvuLKkDpE6^jiRW+gTrlwvyY4=%wBUAm+XTxZ&RKB^R3RiR|1i*gM-P(^+IDKV${s} za_xBcew??@^e$UdJjyD4-nlvCb-`fcKFHyJXX??~knc%o!G(M{WqHRSLS-xVWkdVe z;wgTI#cZ_JVj*Jx{j>eNoAGgW|1C&kiAMJECn{jxHY0qNy(k2d6?e>66M+c4GMcQ6Jt=vkw39hH?57wlMFje22N7qBg@dr2wnQfZUYGeewC| zP@o`8p_fkcCV?<(j8@4%#bqq7@+b7j#Ye1Soi&ql$^<+|T~O}F{{ z9wCW=XYGv8ney4eV0({i2U`@Tvf$Z$GM7WOHMNzUNsu>lcA{5RGVa$UO+-QM+H6qDZmUO-{_1Ds#Py) zILDk0XzExQlB1`h?>yWw+{GtQY*-jbCz#89cK3Etv(T9M+8C7im><&jAf8CkmVac) z4wY6ubrVN;pHN2))AyB*p`+t)J@&U{TjqByaNdd2?1qbuzPrUcs@MwY?Dg+=tVzAF zq{3+Ng=Limtz^wQsgB#OU)qC&?cO37MQgc_lBZn+ICAFdcS_$+aoRf4^5()e+Sldj zdEdmC$#>tVD0)kWY9kday9h z%+}fb0!A#0ha8`Kj0?n{EOwe?ABqwg52~G6r8Gj=-QXKB!8pl&)bFCP#>V+oezHu4 zro4)?{LZC1LL@6!s;1u4$@3W??S zY2a$yBPZmbRTkG_u9xagN5I^pHd7?97 zZoNoxF1*d8RTR1H(}^F*8ml|w&@E-r8-kvef+e38M7f<=`V!tdxnaknjeKZmHdJx_ zcylDupOU^YN?mtmd)CCe_qFfaZuB9y%mevJ6KP#p#|g4F+UaIzX~UPx^fPJVwb#gA zv(W9?)|B#8fXV7`dP9w>TKCxxdC&00CGKiuv@BVD zSDEXA(k(;#{yX=iv&)JGpDuOtJEsX+bV~V>i&>#ngE7TB@my*u@}@1+2tPbxRw{ls z30v{mB9fE&YX6`{RBH)gOJnHa$?NI+K4ok}C*Wx>lB@g6jLXIKt)J>opJMwMLZ|uN zb{AlgM_&RKd|XXbem8L~%QY?#6i3!PqZR*lFaW=~tVTgq*)#tf=RfKqx`3QM&MGxx~}Y-vpkQ5QznP zFRV_uY+ZyluDNou;(xNE-dn%NFPG)>b|W(uL_3)4_e1StX0&#Mm^E&DOnGqQhm~%J z>mR-OVrI_?y(5*|yZY#g*x(Su%H2`u3IHi!mWm*}k1b)MVNtGhC~&Eq1jU6j)v&*& zHp|87)$>k}&5E`mIUB0cvZj8`Vqal7{4UhTWJ%Qaagnfn%Htq&|9OkrhrVjoDD!K+sr2_i+J|Ryg4z)%KDW*QQZx+N%r#0 zyQ*7wj=kMqy_GY1XVv-JrvG$c4Xi6fZ`lH+qeE#&b6QZ3a&iIefZU)jaqp)N@ynR^ zM#1{yQ|$iKR(qUQnzLv{PBb_w(LlfBH|p?+*FxR;)@u;&?W#eWDM44&>K$2u=AU=# z<2J*tL%_-k-y7KCt`+W5t`(LnDk6+o zhz~?nrMJeRVO~i*_UiHpnu=Ex`0P72-)@~o?S;Bq+dGt0dZ#t0^yfoici7<`{ioPx zghUK-CTR=<#;!w-^g&(Br;lf1)dE;`9gX|2nG0wDNHgkIEjR)?j>4JFoWfqU~EfG5dd)Xh z^k81Ev1+$(yC;+m4H7nUq`0r%cCT;Q!^i7wOw2KG-Hbob%7;IwM^X&0le%7RRwLPj z8W3_g^+v`r3@BX-Tu4LxTSgZtG>ggoy_^B{C>q-t+?%YaN^&NKR7y`vQ2&QEyh)w< zl0tfFuS3UFhZ-NG)%x(mtS(?kI_(+DC&ls`-rY8*=;4c3yW=f9>ZFu#8gE<>cR)(Yhq6?0t@@?L)@={KM0GoBU|Y= zlGekjtEWCO_dj@k@OZ)`_0PeK1_92#OcW#UOs zqwj`ykjCJoT)PA}n619XKote0Uz7Zsvv>>8qouEWIAD^^A7|F>oIL^%l)z~- ztYQg=rNfn?2!KPUdVOiEoP4h^zL(I;#9Uked90h-i;B=XnWPx+1?*kTkDJALuPzD{XUVFsUeJ=mKZW=LNPE* zX#!2M>j|_jzV4FyQ7hgxQRnE1`st<0bPMh|VzWQa$nQ5EtIf8UE3F<8kSc8}*QcM) z6e`s?w5g{+?)j^KFLWWb%eRRH=tVfKu`jMMlxFsIfU=7d<*=Xd@4A( zSm%=5T4@9wyJ$>VPb)_;&DZQ-3zPcML`l{>^zM84q>EQK=ko)cs@x(clgW8iU3_wu zWz& zkLR2Rp8txvi$8v{Mh$64S6y76^U0k7*~gGObhb+3V>0(v?Ux@)^x(gbUI)?_Dj4KEe8HyqCPD5+S#Y0nC>+f7I5YEGvwhPjUP{8+Y?4zI!y?Y zSe@a!lM8<=%!B;saKoWn3KG;rBi+0GEE1*{CW}ZSiRMr>i_0HI=3{LM?~5(nb*FNt z#>TLRbJ98)+{JnJgxd5Q9wm#wGaB2WP@9o7-)-^qBG}Tf$314bnSgL=aMQ^=ET2Ny ztj4I}TPewDuw&7K6n0#sP>|_NcGMgVh}hKg+@k(j^v;teh4Hj%=CQj(rb7h3b+Mv| zR8xHD&zVli_a+775(jfs8vH^ErFs zH9zE1@8NGh3z}c}{U~U1;r^Ym9qRl&i*jR|-Dakn|JKKKW(Asju5jTTe{m6w3}I1ne%l@- zdEPT$rFd5`I}}-Qf1ew7E$2ovfp`{r!s^pX&iApogKn^qgS+d!+JSalJ(&|~_)BoS z@zPqMJoq*_^8I{}_7N*bQE^#90?&4hZz?VPdduT*4X&sY* zEZNtiN=?RNZIf??tt3BWs~q%jQvTPgYV$^7=97mC(Pd~PJJzbZ=ZT{zeT*7ZBnCwodOGZL** zCGd%6LsGkW?S{n$LriWE%cBR0UqSu)(4O-8*<(W|>(?-4#@1E4WEYR^8bal6@NBAi zG9@oV+`4iMc_p*PLd;WaC+q2jhQaahh}vyYd~8(CZ2$h;vNZZ#tj&C`hw;O`o8$VO z15_?$?mmjfFg<85-?cso$7uRY#dRNhZkk{Ol?=W9L(^dGr)k%jbljCI*W8Si;in#j zjiR9pvunJ-AIL3~z2~tjgDxOM*i@VHXKG~P|HLIDEXE9;-aMRkPLXtySM4eHxEb?z z#`{@;H={26_?v-ay;9%>kjzC+=Xq&Sy`9rc+M;OUTq~9ebfO)s<#^QjeRPnsnCt4N z04?0EN-L70Cq`YhI~RixtsFOMc%q1RdEY+jMP#Tk$woSa84HzzZ*S%n1pmg1%552H zX4@pKcS5a@Hg6B}l_K7U)#rRR z*=>G8rF8Y%=sw*??ymxUCtG{C-A1u&_YwAcQ z;NE=Ea?wu=WWy`d>hVgYtSa*y2kJYH_>`y?noi0J((2Ui-FMw1ZV&tb+2$kPUa9L5~bg zwc`F2tBj;4EId;&PF06Y&>`FKxaaEXWa-m9!tt#CDSHaIL;-f!1e1+2?0;eb%M2bHX4l z3GH~E!dpMX*xpRDElnk~5fxqw45)H0d=|O~Q}$?B0%kV}<`RIYGDzM7aS(kiW=2l9 zT>}a#j`Q3DdYmr{k3FwpAkDf#PB2|Mm9(u|W0TA9_cuhvFRri%SBSFJFn>9TwgtM!ze zhD{B?UGO_e=5m!&&ga+!-6WLxLNLmflp^WYX(pfho*FvO2KxjB?%nlBoI|t~7kl2R zHrQ%)8lQg@$$mDZL){+353rbB(akx@%r!h%@!}@_QZeT~340u!7A(q!AFq(q7r7pz zELfZQ>((ncvh+Cz@y2ugXflrw+|TjxncYPcuupN? zTn1fj7PSFwD~KTW310KOjytOkT!dAOF1TlW;PWpV51ZcZO0XjC#+LKIZyh+%W%*x@ zfclC*#dK7U^t0a2T>Pm)9pMkHO+a_moXr0)8V-)XfABU=rmf)!*nVZY`_MVuEmir` zBc#nAMdOBMzCXp1op;di*RdugkeD|d6WOX z*37FPqRn|BSi!jC*AGvyTc7ZjSc|ou&^1^}DDaZL5gFh)h4Jl+@=?`ywxpxb`QC3_5n4=Kib!jW5&@=iqO6nHOzwYy$(|rZ5q# zk2>)x$RCcPka*9c@`rT4;Su3;Jh5ELg79=DyemUfJDo0JIO^B-AUOBF=&M!*Xk@%I zwYtx*s~U_?BL= zuqN~npAZV67nf%oIwsk8s`V`_Tgm7?4nvVNSq5PbCQ71|uIIGp##&1{j7{}33u9Q%Hsw2kHa4Z@)q5#o6YT%peB-z3@Uc{LcKa*Dz{fQ zUt5OLi4l2vypoNki+a;cVioAq$8a0p?*}s0pGwvK;EE&ieOcZec^bSaVElQWfk&m5 z+56=TgrsM#qwwOxrqU!nFH)6ZaLj&8`lfvxx+`7?<|IT|rYvn*F))07MkmtNz6}Cw6HF~;&mD{>BgPP);h_Y!n*n#9r*{rnNLp1v`2W4cWpvdO!rI? zr+p6{rYqT4bgs}EVu#~0rK@;d;;I5cT%^6*h)ipi`=)}6e4?8PITpI#ZF-2K)KImA z(_%B7Zn>r8^a^!ZoHJFuCb>s7gB2CMw(kwCxC81t+$!~4J_|7oTmp2VEpPjGKggz( znNFONU5p$0U)grF<+U1*^V zY=Tnk++M6R%aH7IY+x?i)SwJthu}PYSTBl-mczsjs#zl?sN{ckw!FPlE<+{|I?q-m8 zYMuPV2;Ye+tb0NuRv%OD;ZGh94>Pt)47Z;Kf(pDU@uH%K9C;uS+QBKXxpp5>M5sQR z;G}dbOzk;`-L!D?=h&%AtR2q9r^%no+pGx00D8R;)nn-K5vQ-p*rH5L>(7oVQ3KOS z@&fCQltyw)Ock|ZTHGN!>GVjmvEv4e@{%(AChjk8nik!}U(+#>VDDS2GcJ{m^RPBp z|M)k4E0qQ6M;b#6ozcSK5bfwv;xJ(zE7D~msMVeOIDIIvqGmdPTs1j>Tend)pmaGX zX*Qy-Fp@;&w@K^kZW=G?MJaG0$awr%2TZBIJ14?HXe=99(|s4gOKAOAyVZOIdc89O zrcOG}Sw~R{DaP#ypPokga@~K?!zU*AT#Ed+uzdLBF-g73m-I8q=g({m9=OzN5cn;< zS{6>~iSLY9dgis>gFipz*c>7w*HM7*b8LcGl4wU>Vy!X#z>X9%N1}D(@v}?zfsJ3xpuO8Ttlx_ z*YF9?Se!O~dF?8C*o(FfvM%n$S(ar~hbS~Sc85;@Sa;#v_Glx4&KF;3`<3C~+aIjf z!vScv6?qN%d57g!7tu{sSA}+DMD-2{B@+19y*Z1`Q#jJCJ=O@k-?%-L6I*|Fv(Kr( znPgqkbCU7p!75~95dQ6RfIjZgQfQK8T3=@h3zdEI+cJh_NzAU7kuPzrTp3WOmKU~u z{46YGIy7yeB`q)MBeMaX+|q_gNF(Vu!AbmeW)Gh`MriGux&*#^*t{n99(-*3FhFkq zV6qEpVhWM{>ftcSXEkHYA9!iGo}{5H+m<1Rq{$W9MhpgPPgH^sT5+y0NZg;!BhTm)bzr?b zbk~pS3E~CWw%2M4dA@K$e$x*ai}Z}ylX8Dsf`dKR?$vC)CyB|wK7LJoj!H-DV-j1? zlL^DpASX)a6AfG9yI)g_+P6F9FipRGH1X1X(d)VXv!ld5woQG@5oGnlValAZu;?p{ z#JQvE;NAd2_eGDfaz}-S?r$n{&wo^Kp_JQyWwW}A{T6@4FpOed^nXP$%_@_}6ETrb znvgtn+LX?8V&|C)DVarnFltws+$(+$gMR;H32n@U#};`S#|PsfG$@6(#K%h;Likwq z9Af)`5lQqOHrK-MyzJc2$ZG6xQ;cYdOJ)>4#XPD7dsfMH7*3u~A=d5tfl-+RT5oD> zdj<-t4wsP9_uRG)xY>-CkME8KH9GOl3@cwuc-gBswA@T*&)C2xCJ_(Xzv97FBLcYu zutyQT9d;we7)|{09&6vkphQ|w*-`zaQ@9YRFB-oB#2J33sMRSlBX6Zx!9apZ^edYCNLLX{YncJ(W5|4 zD@X6s;Z9#3Nh6is>h`Y~^Kk;{wsEGvmS%)bOPYifyU48wArAMLX__Ai3-|@^kq|=V zn90yT;5D-%3ss`0kCPeplWx$L#B&%=O)z9M?9!tgVUE9xqNLRx8C_uax02{ z2CcMw2+sd(2i=U+rlX~38qG~~XTg!tfjEr*#@O&1cpFg~I*qqusj*!}5y0|@n`qQ< zKfVz0d|TjEyWr(OGU~UZkXna@KeEPtM<}pP*cP%`1ywyx_U1Hd;lE6(JxTJ?6Hcb1 z$h_9(&t_TtE1GZxN8ZK~B2OM^L#~!2*BnjGdkQAMl-+4Z@R;__o`~CZ7stB;bmaVPOsU{Xkc9ILR+qRB;7N?2Ag_ORX zGLFR66AObCIZvDQ6bGXt*3r0ATJejp_~j+L7@@CD$7D?~B8FA%VMJwBuX`=jS&xO+etD#P0%x#(?>_Jb~rI=%1 zs(b(HH;?IB?JnM?^Yp3h{~i*E5zU2AMTuEjSIquOeZmJ|nxDEPmVO{Gkv~sx>!tL8 z@$j^hTJ|JFZ{)n=c=Igtv1xIlPa}glwk>@ybZyW);`iJ&(fkxPE2J$1I_p7^qeK6} z@52~K*PYF7{1XMeZKG8y71dD41>I(?Xc*Nt!@H?YZhZtkHNVxIlxvLb8v*Oo*<<$p zqFL4c6nOJB&kj;sLLQlm(G&R1VvO?_osj?)cR#KOM0Y`(v)(?Vow7#`BG}v`f%`cF z%@akuz3V}^o>PH4&3iAZBterP4!XubsMzC2M&|7pq!7VL=pkEOLVgVW*n01{)i0 zSC|f;%{8Ji|NUuBmQ(erz!!ou7Wr_o00Ja-BzqgBKf}cu8J*ZKiqBmB+NIH#aE+XV=Nw+$@=GNtx_Lec?gvkUqJU+46K|X4KF>R(qUVaC& zo<@djezkQsII_6pqR_-ANw74M6H^+-?%?p3qYrRA)q75BN?q^qTpM7>+z-5(p?Kma zMQdVu$?A`5t#V1@X@v`r}vPcI^H6y)s7 zA>-FgA@#$H0Ew>?srBzZ-BME^(CPwJMp7+Yh$cCg{GM=Zd;LfqByTxPYz;f%+DHCbJ%uq>E99e2X zdmAZe?V5qIi&ZuGr;8<$K7(Vk4*a&ZKpR&BOre8w(AsDPUoykFAWvBp4TRI-##wf3 zH;}5dD}OsiSdM%!0$jNl;?|X;KaH=SW-Fx3NPXlBTJi!1g35ifxhrpWL;8O$%;bbuLkhy?EVcmVg;W@rPT-{q+LV#Jf-WH6nbbJ<)BS%%|bo`3zsv^s5^5d-3VT1=s#3x|l{d zN=!ZvaEJqtr4rNA0Qh>f!_ia&?(=}xhw12fU2O!AN-Bf~t^m6=%V96MmJCq42<^?K5fg<5^?v!#1}Sl*+duttPXR- zRb9@5gO6R!f~&ld-wDl!hGhi3ZS*zeJ<~$MheLsW8RV9l>`G|=6VMt8e4a73V-&xa zv1Y@( zqm;Ypg!4Xt@wGGhp%Nl*B7IB7P%pZF7-=p6H}Vm8B*{@xEpAS2l4%45sWEGr0}8aI zG7mG&0(Lrx(5fH$#H3ScIZ8KgP)s(em7dd{O%`YNjw7H=3*SqyZxDJUGxGpRpCR4{zl18#*aIr zUuk4_tLkl%ki>WTZ2-7}fS-rJzG=`2!c?brdy~`Ws=#VA&6WOa`-0l6Uc29XQ?&ZQ zb{x+9>dWB2kYMue5@P^n=*zVscw8&fbt`71Qh|O-quo8Ja((llS39KmtE?B zII0U{jc;_PZmWRRzON{Pw%@<#`aJB-337)+t&_^i-4S50=>eDyY`#3XsE7I6eK{@y z+T2}z>(qng*$~A@Vz5x)&#w_x;HpTEokY?G5HOW8Qs!zgYi|hNGYq;i`nQ)hEg-MeZ$9ZlDO89*FUGaGB|`TE9P=Z5XTdl$mUBb2`GRsIFfz5 zaQkX(I=YhUcdQdbuFxZ3d)Ckb`1Pm#bLPSO#}S)S^BbT%pp7ZCdUw%oCasZb8Cfp_ ztzGLGJ`V(X1~1T%f)B1Il>7?yX#K6Gax$4u+qwStYULSIyKc#4FGTI(*B{=@3;syJ zG9o@3=G|#VPZc-5kbffU;oM zn;x}9PWl^XuUsht1n6LFPcC0bz8~4bT;+6}sB{2Xf; zPYUjkbII^xLm-Zei}LpDp zTMS?>h;h|Xjj$Ry_sopaD_pO~Rv=OCWHKayX48xo zyjnDEA3=$%#42Dm$59jhwRxd$-=qxLG|-~Ehch9Sds`5yMVzSlU*y$)X3C{FV=3Ov zr?}eBIHkd5v%2rWA%-NYSw_y)b#*%o#>tZr<`4YUrZkSuEaJ6QC=`}Q!_TM^*uMFg zJh?8En_{Pk$+!S|c3r<`T`5Koo%XqB(^_Gm0{1_vlFe zWTD|^QuNWF{+V6mLbVNm#G(IpB#zD(fE-Orek_2t^YOaA@ZQ%qPD^1-QvR3x-OxX7 zzwT)VwWNw;-UOd*ZrQ)H`jDV6xUz6%ekiqm|3r<%di8sv$cMdGXTg)qe${qbcDUqT zSvi!3X=2Wwi21vTPdG6@a!TMhpJlH1`dsW#TJY|jSy?30Ta(-Y(-+_po%}M?Q%Qlq zd2&GfB|l$%w|Me(6jjHksgT!7{yBg4_sZrLT~}ME;wWL|o0pe252AAaTKV(mGpw~Z zaXWHk1hQ~J?L|~69B{=jdL*hr*K;f{H%O}E4O8ShAGrD6HujJ@+SDI+a=7L5OH5)D z4%s0^$JPZZx&jVIAl9cqpTDq9?Kz$_pyqe^IwO8$0@DYe5`X1%eY>t_g+B(A1gD*Z zL#aI%nz+^?4%AC;cf9A#iYiRwKq+<~Z@uDhb34eGK5|98QEcF~Mo3iN6t0vp7mXLg z7#iOEN>#a8zJ-eUH;(ljku$9>1D38=NdDJbO2;DfJI03i(D7~iG)T7Ab|alNzy5*1 zX?yCQxWK_@ipJ9&(DP@bdlhYjO+`e?iw>`6>mgL5D^VrXa=so-qns${shH9?r$XAr zqTbXz9F6*90)KuqRS&MJH3yvyQuAjk`4WcS8}wZML*ZYFked&`;QCX zQG4w;?Yy5uSL_YzIAA5%P*to_l0e%a#9rshbF_f`x>B=dOo#D_cWOu(;Ku44%DL~* zEP2xuEc-us4;ZH$+KwvAb-(*OAmac4z26Rn(z2WQZcxWlBE9?MF{|aC`H+KfU_%VM zKDgFTU+0jTy~0^#OCgyOhaQ%($h*V@u+jo($Na7hi9v?pre-^IZ_#HXgOdSsG`ACMoizhG3+9 zAU|c|`{0Xf_0)TaW}rGJK1OXu-LpBsBw6iMFQZ;^PamfjDJ50UIQ8~)IR!6vU^c=v=~Q*>BdxUvZ9_FWGx7{4p5x69#gvVcf}_FkFby z4DpQ7iIg6nWmgFxXbp>zrb|3M8Xjn-jNKzgIEP1?{AG^Hx1Ir$ZcXQzw+87*3mV76 zRCV*QgD(i+2}?h`U&bMO&|5^}8DwG37-?lvKF_}4{Fd&vXkD_y>|NjU3Za{2)ytIW z65AjCSepCz^VV)dbkfPLSRht!*qZ*|>rFTEs&3rAxsUdjLuZyNm}cME&$>>AAB};C zeKDSD3Jg`Sb{L<~pkA8cVv3IIO}E_tcUDm=BbjR?GsL)M4rA?0AX}tOF`V%fU-(Z! z-6m5D3@PRN$#u2CSy`c+xS<+aPYy6sCVE*%@9P`4?A^t_6B}HpemBl(I{0?3Fc*YR z%A-jIL&<^dLy(z0%DoxV;(;`_HkvZL97TBJ5e2&+pJMwS**Bb3dzn4Ds<=jZ$n3&js{Yh9@o z`lO9AEJ%Hf)21n{!w?po z+@g}c>P^$dwnuy}WcQun^-^E#0Wq=h!$E+8?D=o3`BP;$3#AI-TFt!t5)ny9&U#T& zCkq$erQGwGgQO6(y|R?dZq*{!|Bt=*3~Opz*M^s(qN1PzqEr<{ktV%^ih^{dN)1Iy zs8T}4X^&&^Tyt2!R$QZwuJQ|mmisgN$FR33np8WEJ@b-t0-HwRw;)_#B1+4D_t?djI? z1uyBE4Y7qYmeotW&z!3AR3gN&#sMa6NP^hlEx#h~pK%c4h>hme1ea#}r72sML#0Ix zpv-|jGa;i26ArZah_XwuavSY>kEVR{-4lcKZ;7;++vlX!dyB6DjM_XuQuNdN&boP` zpsk+~Nd5sLe_5ogd%OsMC?#Qw6`1sXWN3zufiuDcu3mHe%Dn{9Vp}L(E@cG3q! z_O5Hx^Go9N5xn$5%HGYDHvY@2y1TT_vUG&o0-at4)RY==Q0qwJXhZWpbgKCFrqh;} zUe8(Qau?$cYjBathyZ?Cwgtp{LT2TS_G>}?C9e}zS;3#y5QC@)z5RPRFU@ZA+}YNt zs7rQE4Co@iE~I067;%MzSL?n*5QQVwE-QP=$^(LeRxLB_iN%8Bn>)nR*s`yblW{ku;#z!4S+zBop{h2%>B)3`of5Qu zD6N1a$7gEU${BqM;txrb2kRx*%%d6L-%OAWds_pb@qs*&K^u7RfoMyVb~DqA*hv2S zh-fJ%kq{dL^*9Mw3*&OU6#sRY_(`G(MYkDx5**jC(_SiYdTBhxYh=6gplLZ;<7ysd z^sCcGij?6qc}2;``_wSZWj-N2AW!V_DAl3Wkbn-_*n3y~to4JWo1!dQft|Pi91D7T z$%meFr{ZN%&6$j0S>aZd)pm$ftZ?MXsF#&5t*CPVB3yuz7phQe=tvc=6j;0nb1j?B z<@~sEp1I2b{4=lWEmIKR5ID=r=>`B^Szu*c@{rqu$Ahc z%XyxvH#I9kWg_Ml`?AzZ{#ExeM|=ae1kv|DL7_BVQ+Xr5TAx$>16;EEV$35VSfDgJ z9PHaCcrHqV%*(WTjRybnRqyf7DZ}T5Li|IyVL;0amqW)q?jO1G+A1NfXxd{TmZ#F^ zCfvWW{q7*yJ-l(642*2}c!eLe2Vmc*$P2bj&&fa@aDaG$>B=VN=gbXEZcFwcK}~Y? zdRw#4rL~9L<&#`{Us}a=2CS|pKpPg8Plcq0}Co^5IcuM54S4hTa_tS-RP4COu z<66T^OguU!iYrO-2~6{0nnQxBsc&j;0u{Z(BKxP-DC|0O;`4q5&|y%OnH9yz3Z2hL z4OA#vE(}d^2r1TTYVgA*ZZE~Zj<+{iEM585!@VF?cwjv%%i%^-bna^%v%g++2}puh zne=twUqmdng0R`WwosW z%+$wYKd*ZBWVSpoN0V{XtPf3+7&#ddI|qONXoLQ_5Ue61F`7}~(yyTJLc0>j*J`UaQe9*!P{wlK<4y8L83*-zISIs`( zDDbi#X%u}UpceB>b9tW;g$PD{S9nW}o#g*qt7Wrz_wikwIrHdVgV$p~uC>d21vgo{ zn&VAEyjJY}jIviK7rBeN`cY}qpx^>=X+yZ(au#1!R>rn)ShRF7o5*LFT!2TmDB^eU z4I`IZozh+5{Il7kDF6_l$?!W|AR-sUTwx3EInkO^sVS7+dBFQbz85rCb{VLWZHz@7-2LX0-pfEc1E*KpSmnr$=25 zZXg--G^uU(oeCDGcPWRt*Yd~&f|oKW)o~EK<4eQJRh%f@L$u(B-eu3Fng5aWr%NJ~ zpWiGrbgq3Fv|Um#xY-GW7gW9<3B5(5-zk%ta%?1n&iHf8ix28(guk3hWOO{ZYNN}i zH{_RW4FLYaOv@&ajilDrvm#z$M@Y4cE1(MGS+iwCkngh@4Kald%0hg{r*H}xMgf4hKSfe;SEm(T4l zDsx(U-sfKq(0JQ`rFX) zVTE#J76QFdM4ls?kR+}V-F`>PG!Bt|N6P#|h?C8}Fl7@}6WwT?FI%JF%0t*&eodX-_in$b9EPrE1{38r zAeR%TVtOv~9OQKDm=(10A{0gFdE#O;uu;|KZ@0}f3gBF&y24+_1o z6x}XSes}A2r~oz(n9D3>h%kC8Cd`9*=51y{MWqcsH9Twm>pUieUGf@%-gn|Rf{`Oe z+i3+AxxV!4)N6EMq5WtXBhHL2Xmf^5>K%<7=v{MElT#&R9u49Lizhyz?o}w*6x$DH zchgM~rHmp%Tl>YbsZKRr*;5yK{>*AGY&eip{X%o+@ojc#5Ktex*3DD!NpPi>pAmQ2 z@*%ppfsyOA28X2GTIf3%49>=p*;gMa$8OHK6QvxKJGNT~%C{sR$6P?%3<`{oR!WIg z$hl`1oD9Rraj{&~NC)JaW&Xv9k3n>c$w=?+XnmbJaO6FG=mP4K-a|BV6SKIX{XHM*fXDEh))L+*`8h8|5nE`e1C~H)Z_}@ zZ*lAiX7xglpmm*Xj+5`~&_=31QNwGn55b_VoKn(IzjQAOu>o%bDjaZw%8TXpB?R{} zN_e%#;*w1s$oQ15Nn-wFfji~Z_hx``L*(w7VJu;~-G9aRi+YlbE&9xX_4|0Nm;`G?F8QXSIeB6G<7X4)k>#mm zXLN_TjVQ0;{R1HfV@mmSw!~x2%G9#9d!Ax~q1S&_G=9yL*nr~$uz49Yue~gOigkWI z0Pl9_8;kOboBeu9VOuE6{I(nOV6yHqX*BNqJw3D57u^vz<*Z&62!R1p4_l_<0j4gv zp4ffma*AeHjqNNl!K+x^ux5^j0{tQ8S!q%PzbgJ<)@7+TJK}ZMfJyZ-{SJ3p5VqLv z#&8V1ABRnM<+!x|&PC7Ybz0m=w$5Yz%IQOeP@893-=D-+$)!#^h(uoJ`{guvbB7VJ z7!)n5r)SjESI25RBH%X*L)8~b1PEr^>AQ;+(S~s%8ayGZJqcw#qc(=t+@)N82L2rqybSnngA7Y+5X-vvQon}} zp``>QEy)0q70U^lE;pvEmu)v>J<~yN0)c}G0DF0?fv7i7wzSAD`0Drd9M@F656u)wqy5S9Biv zSbV(OYG!e+xi}G(lo@kzOExVlD~rM=RiAwD}dbGU=>%|JfR|8;o0oMK_$tgEn&Gvu9lC3HX<@e1xEw z+A(8La_Q@cOGgTf4coa=;?3uo%~#y z6XK&y=CLIP4?@}aCSzvr{c_?G9oI;yV_8t19KoV7i|1!SH9EX=dUn1$!Pws zJ{b09lRmD*ee?R>^T#%x=M>3ekd+WG)`SAEWsLoVr&mxty7lC!%7lH+c`nFASSqNS z!GChJ)@|SrSQ}kuiMu5T&}9q6a8~4lJW!$$Z0|-+vO2!4X}c4FIDMFAX!%TBcvfcV zVht1~sh3Yqe6&`>Gj3JR^KccW1^dcb{VZDz#%HuApj~pguqpD-5_u3;I;YvNUYT+9!!Uhw1aAPCIXd2WH3DSm?RP0}g{!1}xHL6FT>OOLIuL^kiX6cTdb^ zD{cqB@_z%vJwJu}S7F_7x^Q_JvDoQ4Sy`NiuYG+tK@0xO-VyRoB0{yjm7kouZ5smU zo3MhCl6GH+Uf>wspB(Q#kErn5+7jP2*|1iB0nA6q8Y323wRJ51xV<4CeLLMx@i1p# z0Mr79JJl!QpPWn}cVF_%2Wn+T0b>(HzOBy%L6m#A&Ige*L!)Mm?})3Xus)ZZEQ_8> z*slrl6lP;q% z>LR_9zEbZ9X{dHbxL*7r}kmZr=vEX4Ssrw=1>zgObwB55m)2N?> z5Jwhe-ue}s5}X3Yj>Q85opP7m@rAH3%*qFyfcncN4Oj7<-jjUK&c?oW{Vk}Z3k_6? z1`y>1=v}+@E~WH$S`!zu9_8cV05R!0&=U>_(H9mI0K+AZzdNcIIqf&UtY#C+wWRX|P?KxjC} zl%s6W-FaN=&rKg$IQ8Dl&TRoxJcAkaKJF{AT{m*73Untpv&MtuUXzwbY|=~D+Th<%xnt^&003($1TSz%@8)E* zx_mK&?Iu$MGgzA5SOM{HD0s+*Agew7iCd{ryBEbN7NB^^Y{eT5$w6L!3!8aiA<<}a}%dzKKT z?B`!zTgnw@0%&B-wpOXMGXciEa_9yYvO8pT1(-Y`>#4tHtZ|r1Hi>5m=<(+X*EaO( z@D+W)qo5(9i~kB;aGUy4LL9bN4xLfYdO)deGAxYEyBH{P^{y&q;>Jd$(Br}0h!7#l z{On8a2CFW1$C9VDC{V-0NhGnPgosX?FZ0k^crm?|mxnlnkpR(`&6lD6AFL@dAa$C)YH zvPC-*E1TSpE&-X1+`UN-o_TV?K}kcKKFiXjrkje-@FSG#KjMA22Gs{z4hMY5AM8cu zvIS3n;Aw6d_0}yB+>N5sOuf;@k90_L18@tU9`qe&9I6_Td%b_BotWQoRpQs3JWv3O z3BezrWvJ$GoHF@A!OEjXQ{Ihkk42jd{-d z;bXF1hm+r07vVCTA44V%ljktCB%P35JMsmaF!RqGR8*2D<_!VPj2Ay#noqA2?O;WB z?S$?Ft*0Sz(q?oebs2XH1^3NZ;(fZ5cFCU{m%bs536(wUuXlSAkWLt7Puqc}(Nh?^ z2N}7sgWoA}Mh-;_Kb2mSSA%K9{U&e7*O_YJzWa`R){I1ps(GDj`$vsWq~czTZ~HIv z^&gg+CmuYM?$2C97+MQWdh@{d=&nMxtRBc;uylKu?s#Y?cYmEAEqP{&g~5i-y<8sl zPbB!okVFCmX&st2fx*LyM1AOcHeLDiTM0=GpY(08QOUZpB+Tf6&R97r_hflVN54^P zX%C0c_ja;J>}`|vNP$Ey8VFOOq(I$8ajwxTARwP-B>Q#*;lU%?*`trp-9itW8{@o5 zP*HZTAoloR`eeuI_cB}Cy+792Ks&W3`pWr5h-*eBTjD_fi3j)k^-)0SwA^t1+}U7! zy4@v++{S%dH(Y;_`s)5rNy7rHkV}bfj?V_cT1x58iCrdqZvqmJV=}FyOT#J6#3>>YbU@O`KA*2d$0AXK&aYr^2hf?>DALa+tE9feZMoY z%&4>2L*of(vZ*{wA7U>#u^KepUJ4M&0Dae9;~DDR?70DdXmvr{c&vUGb-nVmD#1VK zv15GN6`yl@bh7@RSp$|{J@Orof139CrOi3!%&S;AYbWi1uE3Djv4YR2?j-C$%wfxR;jiI}&h3P1_F~0zKQ*j8DW_^507!PGykh--5p@{@=3{aNMaa9n$<5HZflm(kpBaCmk?Z~@C)Sv(Q8$%{R~hb%PfLDw zIz>Cj39t{T=ybX}=Egk=7GEt1UcV<#W{>6s2)N!}8!KUsFFS|a8Ql<`O0Wz$^JesB zDL_vAV=ffiN5yCE3rKE~!z}o?$+{!VrI>@%jmsF1pwB zKg4IdrBhSOhnCbzqP+`_j^iGnU)rZcT{0hHqenPIf=2tw&TPz738N zJKM8r^#$Hd$y)SXLA&wZ!=oBO@5(pB70!<4IJ2JBJ{g}j{6d!UQaX5_Bh+4@kBmKD_?Egc4pZs!n2_rK&838rO~9 z?9ev9nf3!?ph7k@?Dm}*qSEZFyTHc=w?4@F<~|Ph&bYS)idHS3!e`#6VKM2*3+56V ziS-{sH1)9nG86?CBkYx<7 z4CNK{b%<-f+I%xfi+#wEr6fHp9(w|pzf*0C6^CBdEZX6qiNzVs`|K)YY;?+%H(m<> zS0VbPT|s@j%%gg~>z2z`r>9jos4wr=&TdC{KH+SNUvWaDJp@r{%3t@F9^vhX(DfO! z^FO7IxiI3IoH{}=l{$G}pk}W93CL*JNb4iNEWBrtn{PhF7eM$D%9gy!<%0Cwi$6W& zzL^$~s)4K+wwRPySfx@`5djtU+q@$Bcj@>~6LAW#t#bHgd@n##p_)1CwR_CQUiIvy zEbQ5NrlD!oEJA4C+<_ew!Ho$)AbL1{%Lcv8xdmPEo!?P1I;WZ~e>?^9Ju5W8k2L#m zaN$?UH(kA!fCE@>VK+8%Sr(!KH&OoWbD5v&Vjs%%mPZx|547*_0Muw-?}V}U6)5+p z^&nT9t8*G-YJ3G*#u{9h$kD8!v8Q)9s+y;Klt7&)LXWKkLpm1To>D zL=5V@{S#WzEENoD%EG!9^3Br)!BLx|PsVZmadt9v>(x9^f%dkXX2Y6**@4`q*?Jv) zQMA(hkJ)g$lQ^vrtki6s1o2JJFeNMn7fCnhjI|E9@M+;kyb8JU(}20DO%`FYZ>}u@ zWxYZ4*j&rhKBZ{hA3XssJTLQ<_v!Pzew*e99LQ)Iw!?1PK`K%)RqFGq$=<)phL5j+ z$UAO5n|HfYs6V-=mY>rNuZZ>6(D2C4s;M=)D_rf*6<4I&flt&~BzN}~mFeu78WoZv z?4SpF9s97}U;4$w0}lFh%CNI;&!zBn=`Sh)v<8K<5GA?jDcMi{6*cK@MLeWZM4(&=DLj zWo`pdhGMQ6>J|2$9U`*#hY$8>4jf2A!Bd+*L<)A#@)V90q=}S|f;>lZ#)x`Sz3Vk3 zSqUNaG2vO%K{DDb@Ug$VkUE|IHmt#67&+P=lg$S)Ny7T~iHh6y>w|ZPb^8a+rtoky zdYYbR)CdfU(~p1Kp{ad}nCe?>?Wo$5oOwOMC|OLrPj}t}CYq{)+qaWB*r;A_?xo(@ zs4UFOUtiCPO!eR`TqNk@?+Ml5ryP|d!BUBvxHyb>3@srUtrxbS1>;uVAnIhKH}$&l zVnT>OmO5LNzf`Lj(`_%ME~!uB)7nWhjOWR}>Z5{J zK`v4iC_f)#+qG#$pen}BDZe`>@4cqS0UHXK_W_>`^|OoajjN+`GOF=3qOMNTHgfEU z%mTmCN^eL0>~7pQB54}Ugp@Y>?5U;V@6VH=s`XV6KsR=Sh3^V>jKt$+X)=Zo+gESb z@b?T9)!7z%uUSX3EhnpFi>Nc@^)G(Cgs&6uFPRtbAuG@OKHMEnMW^Ku3_gKH<03`@ z;X9?@J);t4m_N&*DI@B-xa@h=U?uKcPuZDya+t{U6MNOHfkL9q&&_@4I@l(nJv}rF znkFaB;^W+sxiq?Z&LXF zyiGVU`-#588h%iS9aKVc(dWgzRsGVw+u)1ty=J5xBP2XudjiL-|x z2RuA81>G#Z9myN^N?vGLQ*|CItX(s$s#K(VKBs$eU)9}kH~oN#fLomOFxHZAApT`yajmqKrj+v-QIm>!`xKg2J;UMrv^>}G;MZz zjYCNzz=CjWV^(>;a^;j!7;jh|$Pm@BrLtSuJz!q_R0-FtG+;v!J2ottAV)QW0Oyyk zr%#uA`iRO&drrx6muGxh@AIOA&-m(R!81D1rW^kGTcqjbG0j`hn2K^{kj;Aa-5sBi zC~Qj}?obHeCLQUHGaiVo=Y{yhDNyuAUgicVaC6S4XX6DSl&#~8&iYn%A(~HKtXiHN z3&MO_R@_N_nk64n&@|8#;RjU2GDHe}cBP&tM43$yrI*vJ%(@wJcCeN`*vRz(A0K&V z$GrZ5Or3yR1Z-h>{eBmZ*_#SMR*9vh=^mZ8g!637^=GxGp+*TMLgyDHHq?ZzwEe^F z#+X1v()Pp#D2Go4nB|P^-jroC=X-`t(-^Ttp(5jF?ruku+_JtC=-?ku-=+xJHt?6O6c4Ve?a468EVni(VE1k_+e zgLXNx!w~36HYRCl?$f0m1sd%{|E<2&I`@p8M&4jJZy6r(ahljfal8)?SE_q= zLiW9BDYpOs2Z||}gZ4#Fp~t4ZpDDzSwCIewdT>|GMBK!D zVM2uR!e+F)cO=h5TVn{;fn}Q)P0EhAs&^%8wjvnpwM$Hd#$+?8ZK1?BDNlONLkMzC z?Xsj4L631bNxE96^q?CggEmNr@OgcvNx&~HxmX*NflSiQDw&=8}&^+ED1p{;yd&RQpBMCx|9T&x_d z29&U{I}7sXS0fXX>vDO8!;5GA0Otzff$fvhTgLv@XQy9|CdO9eT)5Jjy`ZlB87sEP zE2ukOs!|>gsEYgy=5)t=sB?J%9B_)t9TfE8(iGi*1Ks|Xq@DOwL@4E;+Be?bwT5C` zSo#WZ3MvNPZhFrIGl4#&$@(;;xDl6oPnXXdvhBzWnT}Mi+awW((;{-PFXw%KZSGvB z?1@1&lN6Clb6Op<5A^*63T9^Oi?(G|Zc$SGn_}b?_GTv|OR==`5V4G1w_(Ad=Dwvi zl~F~T#7(J!CTx8E*o++}B!{TGSOcr^Eo@r!aQ6q{aFno>$-X^6P7#rv>F;$nAY6U> z=(29ouw4x#(0SU|_S?%qc5SxcHfe(g%naqbgFEwwVi z7ds|fVMT38zW9&HU!Czo&F(!W7@9MuQ6679ZQo>Po>&{2HD(Creqqt@?xM?{U`SIo zi}TNk!W_&)%4BsigJKc6$ZKzTAT4~YnFCYl82o=01T67>#%aIJuI6F@`Oz>D%E#(U zJ6P2>qVc7&qXNgx?zI|K&@QTPdPP;B{1I%L?f<+}hQjB2u^vlX13?pWoU|XT;!I2W zRQ>lGnTosZ79a1*Npw6~?{*>VlV$;FaNQUaITO0AKil) z2R>Bby~UAReB$^!v~p(Ychb-LZx4W)*KVh=#~XX<>J>Ssl~6bSf2vJuv@% zF4ykup6D(kP_b?$2L}Cm1s)>(eeVf$9jW4=&3{@!1a;yE{3vlhenZv~=0cQ?! z)DiDF0`>%vS*F@*7dJYj^@+lzYfT&iZ)w>slb94e$P2LXp*4K_J!lGq2o^l>bBp9V zu@dx$^ILfS3&?keb4JJ&dr=E_NGSZSh-~n3Ys(P471d4I?Oz#DCAYu^3y{!eNt-m* zc`}G5PzzYQ*7?PZ$G?)DG2yZ#xjRmn$)XQM;#P3(i3yrhO?5PUPoH@Q|J%L&*QtjU z9-pUErjuUZ_5vEkhEa|31>YP}{MU;Gcc!At4LTqE@r3S4oph?UddUi@0}Cz%(HYe^ zFlYJ2r&fg4kOoDY*eZ-*)2z0}hg-Q!7WAKm&{l~@ND-&JNHt&h5LTa*xgtFJ4P}XA zzW3F{hBEo$ilhJV`oAymKIhp{D1mCNR~vgdTLKDSC5+AW7aFx2B(RwmN$xg`x^-M^ z+cIhr$qwIcc!~Y@Q_rOe6mi`-ALJCSZ|coQP9u(eAv6P#Tw_Ww0X&;js>xfj`cRPv z9^}s&tV_hk3 zo9}&BSaqV2$hl8ElOO9NI~LIU3#t|^w=OUDdgzfS->yx_gCgd0lEu>emeMr(reFL~ z9sh?r^3SC-nF1=fIM+E+jyNSjMunD28+ydYQjJgD4nXYoz{MSLMH5r$6Y&`_jjcJ1_qxIcj|WS7FtA z*?(8u|9uO4O@VI{n=Ho|PvDxW`8^bl^s|MBz^^isivT41_R@wxiL8+$+C@!b{KPG# zWVrw8IA*ecB;*D4Zl|14!MDAV4jM)xY8x9rnP`(93Bmiy12cU#=KAbEBxDelif0yJ z4Vlpz-4l}F*`HtPEmlAJmQ2D5N9C=P=Ec{*<9jf8)pb+vt@3TV?SbOH%PdLX-X4~n?lkPRUJN2sWVwRz zn(`C|$hoSDm*E4JgWu>ZTe*F$NL6z;XquKVspaK8OSk{qRr!N~G2^{(!HZ|Up*~XT zdM9SJ!~t7nlX?jTuhwemnt|O;+pCa({;C=Z)Yg=e5_|4k$Q7LCyWc1=-zz-%J%rt{ zL>`}D_WF#);zP6L&s4-th^RM;gbuN2bA)sfO?<`;1jpQa8l)DT*|`hi1e92-0~|{G zvWKj}JJAvHLSnl;Qr^WAlNES>CMJ-N6)>4TYi)-K^OpR9`T+UqwcTurw<|2LYQ1)( zT^M`C$hkrlwYd(P)@{5Qt;UMnw+b2D!H$SX#}RTxRy}pE&p{@*sU{~QU062kGnA&QKhH6n4mCX7jyB#3L-St5 zm#$KqNUxPVYRj)_vZ2AwG%g}E>NSRJ2HO0AmozZ=)F**$cUf5}x>3AK5l6 zy1zZy>`>atO0V84jN=Kjj#tcRi&qyjl-H0Te)B7BvAh*nkr5%u-6?tSX~KIlOjx5` z)CRvjml2tKf|Q-+lV7<=T;!BBJlcj`L7q5)YuyzF64a}F;b`Js4_(tudQD@!iGdCg zb?{lcEUOxi{;(cRN#X!i@B!d_!FVO&vt`s(lRVuCvYzi-Zm08(XdGmLX*P)!#QzQ()(l(_?P1_ z*w^MuX}Gwt01*!%9qKrPyxt;d8tusatylFEp#u8?Bk@I~oUH-=!n;P!0;_S^Nn@4- zs?w4kb!qh_B=*%@#ZtGRhxu@?dulJpCJp-FaCV46qDtL|4rX)eqj`oVhdQqyR7Jr& zC)H1_0JFl;rm|O^_}Vhh!LYU7E>d>?#bsVqZNF@=Mvr?AA0L>ZvU~u~D;;y(@-{of zE7~F}utLOY_G<9cv0?i{T&CK*B6tT78v)GTC18riqfysIwju7IB8&{n^}@|PKWKDE zMPvxOx6Fv&!kRBpr zgPhk^_4?z1p|}RTh^ezfI+=i{&s9`8l~QskD*P^DP%GevqL?aYR@Mc5RdcvF&QIE! zTa8Hy=QSw`GGlR(tS^@J(qCZBnC&GRnJB|yyLac{2LMILMuk9!Ime3y1bGnD+4Alt$H6M@l=%kb9{b!BQJv4{M7B72|qCGyKxd z9Aw7qLDtym5Dx!$kI#-*i})De(WT8$1t&W#b?_@kk_jZNhBTVhvbG-HyCJ*{+Kw$7r?77SCKo5XJ4hNCXYElffS1%IVJQ(#!QDWkno!#E+)r-c~->;4^?df zYehbpGCLO4Ovm(KKL82ETQp5IJ8+w?zND4q)Cq~BiL%FyYpeYam9nA+E*tt)@ots_ z8JVW@1IAjsq7$>}mg9|ap;*lJjmySn=*s16*oWp;eXZJM#CHXNk9j2=;P{o5+aoDW zKmIV1A-qbg3l(TQjBaz%z4=W8CJUeRq#zJwOkUEn?a`LueR@vIn-lwtdWJkmy+fN& zsyPZ8P1iG|K~c6RBLzoTX~ellqQc1p^sU`?IU^;FXL0`}cKn0m_FEu7FYp#BvrE3U z<1RppV57uj7`>*fr?Z;0BcWi5icaoQvSYa=!hiE3btAys(ra;Ee9X^xshn^gtLwhkXbg# z#|B4}vFdNY-Q!8!*c8S#4=*~$vB^%=LM=j;5$*jD{NgKz*7mg~7J)S1^}qzb^_HZ* zmpyzzmIGs?@F-3t>t?NM4#Cs$;lY;^#H|y(v!7(7WW6#lj9bK5qYM&oW&MEmH5_KP zcV>m7b`3qpTDX!$`i*Ql4jnzqB1v*n7$)($)&&8+icwFSCDbBr$Ok|p#AW++Xz-E+ zP7Y|ja?HMJ^RAhlxqHgi9(Y(1&H>8Qnr)KmGz^|73B@|On)WhH2lK?Kv?5ZP_1$xaj?{5DC1kNUJka9o@zTl*OH>}jH?7Lx8{oS7w zb4Ic*463REuT)qucr!v~?^Rqr_X^-p3s?a4&~!(ElGWob8|U>eM`p$LA9tP~d>FbF zswqd|wO`!Ol#lT=IGU$n`xJ1^#H9AiFsbynQ-$vFto@)0n(WgDMJ_{zWk=9+dzeNQ zO1!ft@!dsEi=v(QFaW%Rz7Ug)ybyy#7$o{NA5ie|f&tf06WatZy)vtwO|=^nZ=$h3 z_@ThC4Mo6*pYE_FdJRT3H=krONn1X&_H}OJ#%{``QYw%b4XR|tR#ncKSboQoIL|@v z$SZ!$V+L!t*=wY&6a1^YwF{4SEI;r6ek?>lVZFl3C^-YRp^Y47EpZdJWQ?6*8;_4DfWYA z?V%qQ>Ck=e%-;sYj2xMjSciR9?Me$h!$k6jM0z$O$Ztm(qSeK~r7)R|C7R+Py?}`E zdQ4|TpVTUGbVHyiY0=@6$>J+0>0K&h6k(9;zrPcI<=eo1)N8LDE#+U1>n*oKk%UUY z>}1cRy|RhPzz|l(cvF85M9r_`8;ylQ`|OAQ?V6V%pycC@zu|$S{RQvEzYd}D+;NNE z*a&y)#N3RKn#pt!R_9F4j^;DbGP#}ZsVB3!YFYf<$KHza+-FqkXo}y&2(aR*`&Bd5 z4~hpsbWU0e5k~Ovotac*3%Wa8gv~bTvIrlP+{wOJSfjY-pO|HFDD%K9*z0M2yH%Cj zKtdos3iF8Mf^1%eV~TcSvca3BB2c>|MUPPzWMg_iNP3qK>AvUG(l`l_3WVQI_u+S9 zk+}&lMZBLI^Ig~m7@$jt6--_uFM~X8T+Q-j`L-BQylp|#+(56+;EdUDGWMI-HAv0- zx-uET5=7(Vh80!PbDj?XZnIVuL%)$PrQ^oQ_3n~Zm4J6y@uoNYD;g{WTW-=fCkJ;; z@*^jbuDFt*Lu>DSh+))(8oumQ1O$ zG!B3sb@Ae#7L*ch740s`UE=#g{Ln^$$frS5GW(?kZMG&4>W@kHI)Y`{&h|<%+QzL>y`0BM7+thbHe-nuq`By}ADW{4jP|uCP+- zG**iK+<-+Q5l}*z)Eqt{p1f*~Ns;dcEFK4^YHNyGrFiXKkZ6F zzhHmhQa*uTJffXJ0CwgirF4;<%vf(vy@JtSDy#2jOks2x#&@A4KxD#r4kl~!}vm@ib2=Xa0?6+66s6uz2k}V zAe2cD)1YKuVhe}n+;iw;#1F%EF7s-gSrr~&CKXWmX^a{xp<&pE-CT`pMU!x3m(Crg z=I|h=P-iqnOkUPE#l2hEqA!-*{GED<4g8@mF78klBL;nPwr5r0K#q%_LEqTNDYNSr z?-BjF5FoK;JC{QWfj&K*P{SbWyZKIXc8R4&Q+(OB(rzSvdj(0D1m@g@n+GE(#EMcy zyj*GX8+whOL}f-+^)F@m2U|XyNv>xbHP-m9Y}5 z2^!W52UqutIKuv<2=-?eZ1>UyFBp8k+wf?IcUq(XIK6t$8kVCeyI1$2Kpn47KLE^k zG#zn*Sr1SW=15%l(M-kt&uAdJUVvfBj^?)=JAR%A5UFn_gr+lp^yvQE`ksOSW3<%= zGE_ZMy!Kn*TSg#3I9LRkJlfs3kShRK?cD`|!dpj5NWHpW52Q^+9R`ptM?ACMc;L*S zTk-uztf_o92P_mbVsY$<4*iJ*Rp89fyQ=m_+|r-lfGs^gvgMTWCs*>%&SC2o5CelK zS>;FEQrqXimcCu~5!X24nf)yWe|4As7K6W(&cDUrufmkSt-)U^yuYo%zh^)Hbi4kx z27hG+|8|Q0((eEN(J2aiON*ii2~Fgrb1Lr12zC4H+FbujtMt3e+u(Gt`k~wj&*BIEe~H>PV73Gl#U;F zmXCP1U39^-^k_ag0O(shE_?P%#Tv50-%pJdoD0;6gTEOu4;`_p0xV^jCNm1hHTUpK z#oIHet3!p9+c}s1O0NEStL`w(sc_baKKivBU-#Lm9wT02v_sz|_Gg~!pFHvI#e^4i zE?-9^Vq*#%6W^XjLCKRpwgIHs!~E1g)p!5-3`fBZ-C zjUyeVnfqDy;Ae-w8=>R8fj($)&(7F*zijxQ|K`s=34*Lngua!qlQ_F`1wMb}U;pFim=%FYs>y_JRnO9_?`RS5&wD^HCAwkaPZ~f^e|A!G42?K)2aHJuyyxRPu1^=JTfzaG;v=3O`DLvVv zNyquaYUgCpWiOcEw}m4f&6VGeru^@t`Tyi-iUw*&Xq@*xwipX~c|E&>uehInrUbe{ zx+?EG3NUhnS{3|>%>T!A0>1NE54gDHpC|wX%@lY;A#{HIbQl+!F5CIXwS)g3p_`ZVI(&L z26U!AaU!SegIOT=7D7(o6~{Ugk0>cjK>o)`t54QMa#gS6*_fdMy^p>(f+mjq52T^VN4leq;k#>UW%hXQ!{;u{b+J@MHi5x zXR)wV6d-;q^yU>Q`KAiaBaTRD7yuE$gg^5)w(JnKpll7WOm8fXH)w0h@|k+=2lppS zbxIs!qnmZK)fr2%`y3LU;s5q>Z?i`;)oGnp+JS@06JOY~rBnJEfJaHokvB^%VrlqL zPu&T)n#!@R*i^`_s?7E;1;%s$DqZp1s!w;~rvhEX``c-i6B`T#HMPK$iwg&SYmF1Z zKAJt!)@j}|M@nMXCIE;qSh0`(SyW+J8+C?X#dJog2bT-^BUL*oD}_2)ko9U%b9T7*NKvpo@d=Wbl7mMTN!JaKpz@( zv9MwS7zT0&VZBl{`{6xTQf{S5bvI*<2p=6vc5{_Va5_diP7JBAI2~BwJ)0`-vzhSi zc{4D3CIQ9W1(=I?Z^5oCX^`jbxhmN6gYp4WwvBdaO&25#-Yd20IpSE+P*B=2 z9A>_~3ZLwhUJ2ujl(`EOxMnBT+_2KpaR%0j6Np&B+t*gbW|mA6|I+r^ngHVAk(|#u ze>Sk9Gh!v~TC0-PqC@KRmhMU4b2?h#wIhdjf&;qw%n#tC7c{!gD%Nr{I;=Z2v#V#H zG9I!9pjQ;(+`f$>J!jRM|fVX5iXa-nU#T>!~F^Rs(oRN9;;jXv&Q9qy>40m z)$bv}zDucNvj9Gz$FNI`6bZ-@&Uz=LFQ5LU~JGCQrjW#u(OFq&6v6 z)3KN7HDz2v2h^8Y#Fm1#XBOd>omDeoE;Non16t8m`oZ39xt*z=1fPXGi{1GgcO#?#Xid5I<^;OX7ebS2!a$#i#)w7NGjF!C=TWJDDU0{x84ewT3isezBO4_M z&n?5xSopDgku0!HoDLQc!v!@)lZnyJ6F5`oR^Jtg)dH_y`_xIJZN1u>Y@vJz+a+}K zieUY@<&fwV%)x%;+S>_;V)i3ZFNyyneCsHo$(3Fvzq7G z{glq3p)XOZb)g0l_v9rGF@0Z8!YL%YI8EdI})-&>xKxTwOCMgaJo_%yx6@mF;Yn~$ z3=*6_WCEe6@|LYW?C8Y1M|Vu@%z|tY7{BJL0n)Pjt09c75T@528J=T0T{R>e6LMMV z^K+YOMJxd6_6ZM^+p8L?!a(E&O#Qdwci+~2A9b7W%jCnrf*;J>O7HFOU^gq(J z^Nzi38NQGC`H)8y)N(4tQz(4CA8Cje>*+W5Ac^yM|9ETD{21uRdUQUj3KZW*QM*-m zBDg&Ui_0E{%^{|yIbR(oc8wJecr2EUcdUSr%EmlFgi7Yru+}9lopwf)LPQnI(^jMzEqt7)~IVL=1d}LZK!uEf0zZ$HK(-KOF-H}Bp{2mW_No&&#H6Tw}khu%9p zXy90BOL5TWI|5znzaHAIpH!Cp{vKTaG!IYz$=ZlBy@1F17n=F%DOmgh@VrV}@>zJ7FAKL0~3Tt+5nNe|ty}qfwi{2|+;|Jfu?$e8?$+TR>dvMSR zmLspI+GdHRc#VZ#p(CvVH!4b`>=M^IxH1|>v}~-;wh9w2_Oig>E{>(t--Lp*%?wlz z%EeC-3)T`H3va=)Vq%ZLt9>bvK@8eH`!-EvtKUlsLjqDF&c?OkRxx~T6-^v@&A?5C zrBi`oB7vL*4CH7bg4LsD9QoOvK;I?^4o&4a6P(p8oEXN4!y4)K=x(LB-T~w@pXQo% zqJ3tl`+%t|pdQt1Eq`Sy?ItXO#~kXX;fg;Vl-R>PhgWd+u-?9pEn|wj0bJC5_gc#Y z7Ci$6)Zkb^ClOEm&ZGTArV4h>0V*C6mLo{NhQgyva(_tR-NtR4 zrqVp`CVLFcH16%i&av61h!<5L8d>z4ikrFghlRLTZjlp|=?OM-(D9tC)DDjb*~N;L z5hV(n(qnsDdNHC0VPTtZrRysb(DkYt4lhnW1~n32zDHN!2{vIMm2R`@EzIQROD-SE zb!-5P{`cG(y~+E?+Y?h-s(S?n3dqlIzXLm~g?n3of4E<@bn!eOejuEy?OOmu3eg=Z;XMJo1GBshw>g;)OCr2(W}F-@CJ_; zKkumE)U4CrLjApVf#`s}IdlAaa^>iMO2iuc58xFaley|g%W-=^J)o@6cGPGp83Ef} zfM@(((RMkgqBV{=6N6YDd3ePtBOv<89p~uFkUM}g$spVWjJf)+*14~Dt27SH%;c;d`vkRzl~_A3hN3O*^eL-an)0)zK}Xt6&s;wHaD-~(Ty~N zVIgzAB?^B5dpQWu)xQv+3&5OTZ@lJWF9?$14Y9Aaq@bns>zOkmf^kgyxyx`b6e;1r zi1TiNsC#6GtL&YO8E`^b*^&b+ioKPBhu^AY84hcrT>8m{Z_*=WGAkq_wl=EBf(cNv zD@9)9SshO!gfH7|ccq!-t48C|g?v!4qE@bRvMIk$!W!}iS1ab9wW=z8GzbN?&YL7y zO>xUjNV~d|b@Ron1Hqp4{Oxy{Fg5Lp$u=3sLgVC3u?S|$_fn0C5Sfj^MR%N`uo++- z&vWp3ZH_6X!B>0;@PjEYP%yn|=J8(tc>T)C#0f^(`Q?hmp}Dnob{AaAN)mZjh8~hw z;}OE`A;2TMyR1~orx_;Os;5;aM7OXy(HTIxe8EEAeny7}~(= z?SobaN6dEPl)2c&jk)D3jA68F$iEJ9z4C!#J~N0+)+?WxvlCqlq#>b=Y*E&6dx($w zLB`xfe#IQX8Qqmxkk8DK6|Vg8iB%=1ty4`q!v8j?hxbEx$71x&LM6=tt}ql=ZwwfF zza1N2xxp2uHkGh+^9*)lwx6HJ|Hqfzr&UoY7}%o`K{b}7%y6hZ9!6LgUuizjlw0rX z)wmC8tuVrN>u3n~Ih>iVY0X7X#*18{9a0b!qC+}J^PppkoE@GDm)TyjH*gHB=UT@C5E8kM~P|zVz=;t9V z_<2a5$R}RVm0cdN(@u=l$V;fw+o@>gscQBhz>Nh`m3JOr@h?X`k(v2iJoaV}lHxu9 z6Ms0sas=M1E(-;fO^yhNZzwPRG{4nH^R?vP)s9#QvI^k6hgX`L5*-UPheg3Q<(hbk z9VbP~PgYcb8X$}^+#QnrIS2PSS$pt;pOYl^ss#`q0KegE+&E zk&I5j%IXNM{K)fP>94$xm@WCrNN9dR^mKIDiI8M`bA?f{q^0kJ8vU;4m7rQi>Gctt z^x%llI)YWS8Y```2ciCZ@z88uBj}8AvtpT@z;R<9HMa;tk9nD!F>-OvX$0wj~QVl8HPVhWaTgX)?XKJRq|? znWuI>sZhdxj&643p-QG0(eA6-80fQXJ%A}ux7dW8N-VPx;Muz;rM#!Z2 z@tVnq8_z4OanQTI@K9SRlkc)>oWlu{$B^@s*YwAbu(*m1R1yS*x8a9*T1|kiB8r7t z$OUXN5z)Qxh<;T~YdM zua;M=_`DWzcpNACJD zc$A662EZV9XLEfcn!g<&E1~Xx(%5!PZv$j^b*1rvSBgns@_O`Z5lFx(EvIEq) z{d18`b%cyK6V@Ba&n>$rUNUq>k@sLNV;(B{x6WX7*DxukurXwF6|)l)Q$yDe9c^F= zXaF@;8Fcs~RmVHJcD~3+_q;tv5|TQom)hv-edFmdnGg^*LEr0*LY?*vFu<-%au)x1 zrz*kVmEo!!&VH^_ZtV%}f{EZvT+0pGKZbA(TwSNt5KTW*6Cvg(T!q`)&BVRGzY%5T zGSgT*Xc@+eR@HBX%3kown{`)#j4TAPnT4xKkjlQ5Oz0Umm>*yOoZ&Yyl@`Evj1C?- z0g9w*pN(6JT>rXlny0Y3MUI zXG|FyT~B^qUokO|1PQ*F*@h%tvkx}IzkeEv+1I3@>mU!s)&Qf zQLDCQ^9tWkVO(yA&(F1MlD)f03_zvYjEj-A!x zKu*gaTfMrZ-|&(_=;^S!MQbCg>8A>LATn({BWcD4nB<1e<~5)^^|%VaY200Z-ow%1 zzye`fDWxzAP&2nM{)N@r=Q=8|%6FQBT3dnhU>y6Xr5`)g?ikPStf zE2R_AMS#2t7{%whlDsEFS8lK?@_27`(_-f8SNyn{@6tu!C*kf)-kvf&o0a0><1CB( zE?r(T zyR*6bfQi2e2>P_JW`$6Ij{nm>^Ab?Td z>7hS|;XZ}UV9=j)f9ena^>TCuWMNJ23FNzHMk;eW#>$?m8Yv*<_DJ(Nl~K&74FGJ7 zmmrvG{SEEu(TyABIbm0t!c>JSZ?c-$eYyUi=cXa>q4kyn27GFp zSrL_!iTp_}<@i7e4;|6vSRvQdM_COI*i46;Sd10=66%a~3ysgSC%LrpjeiW`wJZKY zL-wOJXH$K4LY>5FSL;J~J?@+MF1R-m|5F5k{W#4nBgTneA|LE~M(-h^>(kv=juYLK zN`17pfynNv$L^ZCV5-dWcTr0KmeO8R55?ZQtVnSbvSvTZp_k4dEazyuK0{K37$w-6sR* zjD6?c4@qGmz~;1^MXrrMd&s@{&k;eQ62}It?u9%+_fk9IIgQN#m6leQ-*`VQt3qat z%1XhR2PkoQH)x6g`=aaCD56KwplhLg;ygfG+K$3Qm{ZZ5u&u1?xSQl`ux?m0!Y?2$ zKVjNhHz1GCej;xKO)a)}LtBYjeKTDFT^nnH#Ne~NX=0V9a}_d%`40>Tu?%;SNn`0z z3b+1xuMh^*R|5W20{$``QP0G_G2SA0hLC$%Hdc8QGq%bP)bf<^Xbt}ql4y$LRI+Q+ z5~#}aD=ZJLr++Qiv7q+?b&H^g*E07?Z-7ebTrXPVl32O%6SBn*3JXtCgNMVYW7C z&9;qT@TIjQQSzH!Cf*a|aiS-wC6$wiiMUYOE@3Qy+Onh}f0i}dlWk$BZ5rLa5tJSr zOU$#6+5pd!Fy~6r27zQ6Q#w9@eWJ(wv9Qqe+?y&F%DS9p z^r5)q&mX@@`}14&U0?_}6RTo7tu}~k(zx?$d;+_(Kd0$#`e(2i1Rm~>k``zV#VQ~JEs%>HL-FW$$XP(a6*k$EEp~XVMz|_%?ZlF6>RR%Y^pq>3@BVhj z+Rt7)5C+IQv1{q9UTqf*D2HaTpSQq`6I|M4UO?XIsKfjst%v}^+r(}*9m(bRz%SB@ zh=3R&IHe-E94lJ1_-jFQS_0f!X+;o#lLOsEho9KHkx}Uu1m!zrQsFE|feyWO#p`zRFsnc=zzI z$wZ!r+@T0K=<@^B*s}BT!Z4}w>H39cn঎&2B(_kZtX?tFvMS0W(op{4?52m{+ zDew!w@lS;#kG7B>J(m*C)SSXjtPLCcvUoXWOy3D&;gx7?jp48QzE^SXm-fprFgQ4P znwSMgVP>hc^KpcBKKO^Li;RQ5oKg9O6!TO((CBnE_!zq~;257a`LY;o{Y25pA$R0; z@GlWKpPB|LAgBX_( zYv-#y`NiH@{O?ly|MbE7QtMs>8V@u)plt~|xLC&vo>Gp$Szb!~$`|-k4O9!|ao!b+I z{+vvTt6oDkT@uF4$#^zEu#lJVQg?a2y_y*>Q!S!bXc&#!+rg>7R#N62^dX&Si)((L z6Ng%a=RY54SP^kqI>+fadAQ{8sYM{oat>Cpz3J9xp%l{aT1 zO}y-YOu5^7%5OMXNh0s5#GjdCCHw`UlKUHJWgeT~`9!>`prV2U3qsp#qRxZ`hTb=? zlo!~>HDtYsU+(3U6>cz=6}wJfN+0VhZbmzQUe!GU7o0o-j|rfJZv~v}yEn3s0RCl0 z9f7w9UJdyEPSIfj>Pemr^UelCrZ zgdyvF6kCqvQanh8H}X_14e9l6%<}lzX%4%Rk0RaiQLljwN5ql;>5Tp_pzOhXx&h|v zUn}j5AY$U;8PU^MFD##C@2VyGI>2B;WogmsnbBg2^5gaFa$C{X3yQFgOG&ynIsees z0MOgd)3L$ym6Mc)bL@^4mpU4GrFDZGy?5BSJf;{R;X@AHMW*J;+p20 zcpJbIou>J3=2fmH{JvP9L)M6*0KhDG9t&|Kpy->qUGJYJT0b-n={x+i>#|n3j#jTs zkB9Z-x=xrS*7=j+?^>lvECE zOAgdk2A!KVRFn`(Hf8QrzDwmQ;nGg)m^Zg#zb_BG6kgVAjO*&+X3GY=r{F{i`{C5J?lXk!i^d*lc)0%dum7A^*d5n5IgIJg*9$BIK5P3MN>TS;p|e+ z|FkdNXceyB#cg%Fqx1FS_Flj5Se7XIdp6a)f|2vE9sQ%on;yGL!HwlQ7HA0jhjcpe z_6nu;Ek+D)_68OI$g%Aizmf9%$t5n&hlYfXZ_-w!^wzJeZpnx*qfWJ@+X$uSR)KT6 z3jz<~%o&nqpwUTeOo6?r57AtD^PAM{H<1ioduhWvVumu{f%0%(&UD5>NkQ~RXFGad zAk}{}s6?rSqo<`WaRG|2GwxZ8PiKsRL@9i#*6y)3jGi!5MbE3;A3^G<9LlqE+CS23 z(;neT@|LWA+l1GKbbUa!!5sQ?`-50UZB$m1o?xP%^!3&T)RuVskIg!XeE?z-miY)g z;egA-QTqey5%{Br^O?qrH;?9ma$qLthwGgP+nIsxYij!W1_?sEiTMct<50qc+dDgS z{W}uGc~qHIBI}IrR1sJrdaqO#%x^CGwpsd&9*un>XoRs=ynFRr_o@xzF2;EEL~}ZRiYe*6<9JGU}~w)J8Ksr}m|zqy4z*msL`$o!+`av2X`EVxlF*+4e)7MEX=ol*}chA@ToxYqg% zzg?LSjuB2sOn%A%F3A&w%Zj$WV$}jh)sM1U+T*g4Mhg?yM1`X$^rhHqgy~m|z~IL` zIaWhCJin!>aW^apx*lXXnQHf#(US>wU{qgKfxspO0%>oMdBTFXt7* zj|DX~`X=%RJ?rT5FDgMDtB?ZwCi6=W0&=$1ZQ4NLQh&OHy3s z%@`E_R2Y=<%#v+ThW(@^Kl=yB2S^0+nWtw2BWjTQgQaIW#M8p!#D3?HXUBG0QXlbo z)Kn?Sc9G~?2)L|Mr6|g(9EDoYKihdPwD$S-W>)QbGnJv!WFbmowhHcO9)HZXvwGRZ zKDtVv`u_Z&XP(!=tEjPx3!UL(0m{BhctzlYMapk?SAs>H2xjbzL=!Rk`DOJb z8_K{FgxnMu!V_(pW|NpRUXjii3E=i--=%=~jig)qo9K&>uF(^HRg$I^n>)sq)32he z+^d!_N+v$r{VQLY6m6!CG7%KP+$M8g3gKOIdw=afa^HJuhP_Ro+RMcAE7EW;Hi`1O zU(fJk-)1v!z#9ak)F(Gm-xoQ+b)qhn8f&ZlLf{tsbL2}1Z&+SoH6LdScuelB@#dl? z2Y3=&?wr1v{8d?Vi%M%tD%3Y__RWd*1lMekq!iMNmsGfY9sJf;vJ5vyL*_Y4JMOqb z)W$<^q(!t9R9w$9_inU?Bq4qJ9en$ovl>-+5n6LbkQ=tiE^cLxI8^a$s1G91J}W7^ z({bLSSFJm@I5*U#I;M_r-+A-zQ_OZbf#~&OSmi=$#o8jZR6~kb*4uC{WRY`$fQ~9Y zg(W)U_MGPyC{`2?W5mQX(J>vYko5ew3&8DA#b)YY2p%;ph2zS=jNSDffz~a`HacF} z=cF01=~dgHb<=T~#4xq-^-`fX=L!!bWi@KwYh{Dr&@g^zd$2n}=MVV2gG+UHftP&% zjTLq729K`+>bAztNLI4H*)FBs^UkaHcMZHg(#?8{*>>C=LGmoI3j@IK>?v%-if;FZ zYbVQo)Dfn;)5qg4blkia5N8gXnJ!(Jgs^zY`i+DF|4?(A2EJs8DX!AE3SR#oD_%PZ zY$a>rn1b_38^c99`f}NvUUOX*lTtsH$4gkIw5AsuAWRV`D4~hXJUsmCiIjNLG>2Ti z(wHAS9Q#`abfoKPP9+A8?fq|}@+pYmnXIrFeWL=BJKLf4cVO&|=cJ&6NH#M+an3=- z9ZG0N3TtQ~gKQtS-(ay}!L!+%tTz`TrNxZOb8Fw|yH0W$JsyE>^y`J^WF_WyhYmff z7FYdPr_8DLnI7n$08c7(z25u#U#Gwd81Go6`ENepGBuD{que_p6JzQ@QXqM)A+Mvw z5Wq)f9Qayw3`NbTZhK@+Kmii(yNJ>TX6Q$p2P)$lfDcltIKe-OR9wH>)^J9W#6yu^ ze(C{T5Z8}Vlf?hQ!!CgZ(=H~(eRe9(bE^UWp?bZ%W;Xw~e21l$7k^GL$WeAxoxe05 zv?x4`%ZPi;O@)Dp)4{XTl{1m^Mk4x8DbW(19TxA7lbS4i`!I~rt0p5zW%CiJR!vZ< z{J;@GfQ~AB+f(9=oC_I_MP!!E+h#SgM$-I_WXZ6(cFRT=5KvJ89~Z0EcKpD<=>b75P;&E0c`)|jKV0HRdbQ?)4dGX7$uoQ zXYRU}|3--tr(Rqc14Fb3kA)#tk0@C=b(w`>z+{XP4gAVPx?z)2t|T8Z*&!Y7II{4g zwI60X2#v zzvB+rZLxj(u0O}kF!S%+#Zp}llii-DBZ3iEppHBJII>myfMJ#6u`3qdLV6`4K7<6J9+2+B>knGBEk zEOiTaNu?RwdexlSmK$mLI_5lHoJCai;^lh`R_>leQ88G}MuAq<*xl`0&Pu#~S9|ty z`u46}Ue;q?J5+YMyK#)i=1Y(h8U_UTnIMulHnj!Y31hm zK5xRa(IRh>X#qh45{O>?U{YLrk)9CGV>;L&w)g3AMot|83{>9>nkNorn4jPC91`M| zFtXVvgumUHy{iDovm>P}M*47aK9(i`^$IvdTpul?}A)Iy2bk!cK zA!0i&JyKQ>kT;^|do~H1kr!>_KVmThD?mtfv9wdd5$PB`Yeh?BI#C51CV^-puJ(Y- z%LBWa(VvnIRS;&g*(VUC2uFf3wHP#cW^MndxuH4ENs+wH+fLR7%L2 zJ`ePUEXh;Z@=92Sw4Mvb$a@m_SPd&|g3xaME7tl=y`9*<8B0r6g6CLJ>I-^uP#4wS zRnQ9sUuTpcTD^McYsP_e4zZZ@UF9j~GpZSx!q7Q30redC{cI$vA71m>yA zPJIZXQ9mI5=_WsK#lK}e!l9i|HJ)tPWLeTO)bD7-4X z(bZ9NpH2;cb}Xk6{)+l|?HDOlIDgq=-yk1ELMWj6EXQk)QI`U~Dfn&2W`5;G+&OOU zuH;c&Y|d&paZt0n+a_Fm5O|bkq}^G-B|H5g-hU^t0S4j6W(31)Hq?{-hP?6e(4n#x ztwAuM)b&WM5i74Z2EQwVGf`@~c;$F;htUzZ+S!~#Av(dwBVoeIO%_KrZbsOLETF@! zs{{;Xgn53%xZd0(4^o1Uz;hP#%>$wZ@2>+nk)(&?=f8(KWSuUY2_p8WO&BLi47D8G zVl{UAIEcV;H}yGv>H|96+YDH>_qcYY>pCC6m+xU}%}PTWia=5IS~04^_{x^34YDLT z6i4aLmy83eWf6t#_0oAMI?`lH79@J_b8B;|myB}cu8bqv>SLkwJ`M*G z9nHSd-vbde5gS^|f?^dErbUaMcPXOnVoLcHesQb0^EGY(w^C#U&vQ>Drt2IK;I)lJ z3L}3?)XX190ZB5}H7Y0JaE0>+x>Itu-Mn@U26KdB+;NMn3q$2dNetG7af3%^s{Y7Y zCntlTlzB>?corYS+dEtu3L7Su*j7|>+TUxVqIC7mE@|*ncY%(*l1_-w6g!TPc> zah!5XU88XPK=v6u^&es2yo?-yU)XM3t5~^U2SCXc&dOgiCU{ao8~uE&Bm7mX>Jxtw zfd3bM$6q-#N`S83h(^q6?*rZIpnhLnDaK!yta3|MYH}=?L=W^|q(KvO0-0fnc!C z2oi5D$US)pENe)v!ms?`E!uwA0DSD$L;H#lLvr;ZEo~m`jsD~qgnDji3Ki<)n)y1I$&s? ziby-O1^^f&?W1y^^;#p}dvG#*e{bpyd)D1wn$3Sx`ruC3^mdc&K&VST$aAjAl7<73 zw7ME%;yPGlPGsaSEi&x`*t`RaJYp?={})aWZ86G@{mze_kYgb4uiJKLbp7JI@K~-A zZD8pA-;6SKeopeV|CE_5pv?%66b*)0m@wHI@M7;{V7b$NKt{Mc7MlFZg+WkeqbguNeJU+Xyb=*W;!gG zZK;AtKOtBBuT%Ob!#OYTD0Mo;CdoG4&^E<=tglxccIk>^Z%iTQJT zRElGnzuzt8NUn={$RY76_t0ysQT&A*atg$XkVB@PO?O9V*21=F-V!7PrFv@uXZ{J& z9NBGO+-UhFh?OhEtf_hiC^lJztJhtytE>Fopz(=!A25XU_v^LPMESz|-9x~@f9>C4 zLM*)wc%E~l1#Ef`fCqS-SO8(J(SSng`DzT=yL;X8Ybe;-~oN_iT zz${wh<`O?0dB8ZJT54zdt_a>LI!0ew2s~xT|MlBA=$e^y5$C_DJj)YA1GL6VWN!Ck zNJ!%oP$CX6In0bAi}(MyP;8NrKz#o7Z>jmNucDU#7PIxqi}|1TFwyW?U-nXZ=OzLI z9v;X@C7_^5g5jbLec3(bw37f7WhLZqH?o6&}hY(xY#3wGSmEb*u`h7 z9&R+2^gQNoO0Z2i2k<*#Favr5^2F&YQA&YmIh~D1fAE4J>u16_r#pf~g$t@V`bu5- zM>PS+tl&@Dt}j$U&@F}Ck*`v~G+`X@2wP@l-KNh< zo~~4|Np=oC#nItUKCZky@AQ!5c{s0_F1leU3#zjU;|$(!lN7vL5Fn@S#IQ|2f(Hxbz%6pVW}*d-@w@ zaj+P&Yr8xVbej;q!Qsl&=D7>2H;qs(Lr667JdMu%Dry_3!P2Q@$?zzJ9Vx=>I*Ga% zdC`WwR+rQvwf9{L@S01excnO|x~nKQXB0$DvEz}lkXi(#a~2I0(^4ZZ_Bf!%NWkuw z?!^A&4|N{X`n0tstPoYZw*W8hR_1J~oJwZ<8lrf+VsB#&g4)2Wt*)7;ka86*jx$LoZ3NdF)09DStKZV`cd~)n zl?G-D^=M^tLa~}|GbK-|6*w@<-Qhn8!Qj2QjaMET9ZapygETr*(_dx4Cg zn$lh?E^aul+8{xIT`2wD^YLRri6h^c)L2slqG1ki_5-}h={y=o!4@xHW@x;Xmw&!Z zh>+3kJE!*<(N>6=o%NcS&@VAx<|`6MjHakAjqV0f(v5Nbkc9dvr`}98qH*aO_Pf>; z6Ql3#RKBYubKVtYpsgi{%;lFh-D41b-)`%Cyauc~fA_tA zu)mk+Kzc?5jO{D!#4uqjvw6Utu0ZvAPeL~!`f|=7459r6cm!}B07TyYk>k|ohY?Yl zG)91+F!D?lJei37en+)=LVvfaB~qwn>)k^_c;=nwU2nTfr!v)0@cdO20Ty+xFydP- zb%QLOnc^|jl*sDBkk-sW9QZzq14%<_qb!>9|9+}`D5T2_-0g}7yM{$Kf*e{^>8C!w zFfw{4y70hMz`Ri4SQJiq=>t-f$3T1hki>evbEOA9;~)~4e%PD9T83afkZ}-((`SkLl34e3 zkyoEI(zr{!a|lv&bzsm<9z>m0=hUuT>!`9MBOH2$`2Lc7Rw#dEVGgk*Pm%C)r0Wf5U7?8WN-{15V&?F#}pjWw*P)7Ca6qcD8U% zf<(&8$abu?0n3`R#&F%}8-pc9MO~Ldc#@~aZsUP-J_gvCdfL5#Pun=f1r0 z#@r@h>Dv()Kyf;ec60VM(Ei^j@#UnB#`{0Z;vLTUF48=2D0LHw8o+en zZtwKUl|y5a;U zMv+##sunSrvUmDJOTNYRkG&N4)?I;r$=V8dp~DuP`P@)jHJz1O$mvA|bEd}@1!9HV zvK^);(Q0M}jq}8MQU6w$@9fMC%T~TD0gtx`4gO+o-|xRv)RcKm`+XVr+4o2@%IFDR1}WvdR$8F@pzpctRQCEvOC8$Yis!x(;aFVh5!_u<~vJTBgk(1r+5E3 zQLL~I1{@q6+axT0TMT$My{ESLLC^0Yt{?Y$9lcYwHY86WnPQU(6wyvO{tj8nZvMe3 zLtP>C@2D4%pzTOKM&qElr=5C#g47{57j6KFfZ;bFkqQ#WnQHFHrUFZqU0ME`rnfU(LPE)45Ph(aPRI|6$;#DE`GQ(vS&aAUqQEEOm8+@nTW~}3s>66vw zOZsDt+3wj3+e9IzM=cE5ot&DV@{k_iEiQXQ_(B)8iai=FTTV(&1ym z3YM>HtqhlTvY^hS)I2AD>;|K9t((Gh3~BzeGXP3%+eg@g$4O2EJ=HaOw*kt#wq0On z5_NC7NP`u7OF@>dev>c%Dg`XPZCs>`&(rnk;zz}p0m=e;PJ8F{30}XaPURc1<+Hsl zG?ynd$;Agq{$Y%xuGrY>Oi$$)0bYR@5TD&zD)~u^f=qWok z_K%zY`;qoPiY8c%o5O@_*nweJ62obY8`7h7Wh#-tqN8(<_+FkGo9=DsAPg@*Nw>h9%eicTiAhWUi6K9 z8G3@N$lM9N=h3+6h&TZIM=OxNPr3p{VYuuPrdmv;F*xU7BFp^ za%~(Cg|=U?jMeirwq4j5n=5qG>r%BSX6DE<_7}>>qBo7^SoMaexAHjTw%!=Hpy1r7rpg)p~Y5 z-SVXy+7FXs`)m2NI%Y+H&)Le z^ahCD3QOmVYK6CdDY?(vozso!Ch+p(MwAlikmZ-xmA<=45pw9}h#?jl~* zJ6JV0gG!6@XcX6bV(IEF{$@~?6Q=bNBRqzmu(Y#yspbu4{9&9J75vz_YOstRJg*~{ zbgh}wA+DhR43^tx@DgwhNs=7Ek~ZSZeE3jtop z&*=1Riz9jYSR>Tmtw$KX^MdEc#tq!{YN|-wo+MUoSr4u}YE$3zAS5#EU^i>C!`xmU z8zC`tb7!eX*!YRv6NHXqHMm5g;E5-|nHLST9Dznk+eB3@RpxzO1+rmVW5Y}aAc)uH za$63}z_cA8ytx8)L-E^CG?{}|;Cskr3e)=fT_7XRuu(Z9gL!YI&i^eV<_8DAy?apr zGwS^lRYYdVxVmh^NIlb1*`Bpg<$UlxcK(d)8@l!dbp#XF_84U3z~IzPp|^>%A&gau z)cJ^VU|N@fe4_pAR?ycb6itl%cJgLSO%U`6Wq-;H@9cc7!c9v~oty@e-k^PRN~r`8 z#Tw7=M46U=L1kmWZ=W4c@T?aO+9V||$g~l{+noW2<6gVE;zD@E$V-OO=EZ7?S#fWN ztc1*8`eyzaMg8+&Qa9C+!oWY-??fd`pE<~5PXpG+uQH$7-Qw%={z5mkSBTyr_Z+DF zxpQ#(;REm%M${qWviPTg*0E-&zcsg{V~u=r?hVeb7SAqzzMv7QDqJn#DEp&n4)QUG zm4miL*~N7PT9H0ifQHeUitB7$7qt%pCVW7)vjxEh^khE^AtO^kWQm>taIbT=NNRR3 z?PV6CPAO7^^!kkq1?l{?ma||N<$c_SjRzgp2aI7fUq2~@z4cz227FF}KPBt72K|PZ zZjVHE(&jXa{Y0MSqf-;ALe*CR9Fyr~|IQB%wuF|zq7Qqc$X4!&;(vh8v}k6Z-BO4 zp~|ygS3fjbT|opq2sV(f&?#K3J=4=W6oPm#6K`sNlfK?1$C*KZhy|)!Hk1~ zmF2JEoetSGIga+_HDwOw#8trhY7p8xc-sykV#;<*ved&GjT{SEIAa+~(iiT$7t>GVr`}9x$}E zT>xi7`uKdgZ90XP{ENGTldM2Cj<}Qa&qBF6FNV*xEt$3Lng7}lyn9^0dyf*|-Oy&s zMEg~S4;eub9%LxREB)*XRqJsziAYGKw1ZQyp)QoR=CFUSxtQmOAhKp4&vuEXxG&iU z+SQK1GTW$lP}Pg9k!>P@?lu2vz?UGlMnsVAh?ewJa;fD;L0;S}hit`Uq9bJF?W@QA z5PI1dj!C~RFm4C1-3@j5g&)c)vY^gcpgOV9T2hhTmRPUXHdtUQVJ_|Mp8A@M9`wT2Y8|V5n7=EtvTyaZghWVDh zsk{{7eA$XQQhU?PezARWtTqC816+Z&NY8&caYeAoNT9Ss#&Ouc8A)qp$1-{- zlc%vUH)cC!sNvIq>o_Cr&eY>SZ*RV-qva)vTpk#tn))w+B`e_a)8#^}ix^4*Rkzyi zT;0*us(g2Vo(eZlR-?Zc0MP1_^FP188)$Qm{Xg|#4Gg;yZ1DUeUDCYd0vCXh^1zF_ zexmAx4-)tp1D#mmg8zUF5NG`jv2$PyQt)#Og1Fu_9Ek4MpJSz29X%26*IAi{e7yqI z#v9CZr`z(4h-HFAq6{5yTmuiFQ%adv0qc*mncMRoE$R)GJF>kxVSX-*1@$#qXQGhZ zi=V9JY6;~|I`TK4UEE9S94_Bs5gN`lkTgJBnS*qzCtqrq=zTvFq8E?jy<|M***%B7 zjjM~r-7X>RJf|;(SnK-E7yHhaI35Hi_G2X6rn~B0O2rBBu%a(J&SQ^m^yLaGUX*Y% zm`4$K@Ye#-E*G2s8EUrU2?R=M(M^KqL4K(clk8R0pyk?iS^st5?KzdQV-4-OtY9ya z7OKhP@8vb$6M&o7l%){(fP+k3gO^jI9FXix<(&V|*#hQ6TTbP=RW^G<01|R;4A>`T zyU-V9JU0;;1Zzc5q>yd^d%V>ATK_$d$lrYzE=DP=+O5x_Rd+!xC(_JS+PooYqCdFg zlc=1QVEcevBWsiQKEw8NHKsR}2r*6+#p>>VmtDg0ZxXX?ID1zNN0Ga68}X2orc;N2;SWY=bu!Ep z_kG96t^2_kX08xgLVm?4a}`#TGr5EX9vP4Gt2>4Ol$Cu)zVzUr-qBt&wy!h36}(2s zdLzZlWx%O+Z!tPBt(6P_Oyy+f$(am=8#wLRRe2_1=}x@ zZUs#Kb(~bQzcXhLP+i*$radLP8^O%6R;JGax7&E%wykX7OnHGxgw+5g_2}l!-PM9e z*f_;}>Gsrs#qE11p@e*D*_}~ZP0N=V|7q7bV?4!kF^x{JH^|v=LuestpK=CcyC(qp zJLi@RwwN8K`4Q0QhPe-z3HK;I)2f^t%A-w{HWIVyrhMiKczOM|H1%#Q>qUK|`)Bk? zEbPyr4E}|Im{#ejRBqtjWb|qeJOk+4P0JkM0zzU9Wg1L9$rovEDX*YOnD~7j?5+!c z6O-cZ-{PDu;~V(RY&Bx^{(j0nV`^(-oC!03evzMcdtQM$EmHoce{lt;ImFS;SQO;( zHGx0eCv*=0zf~3&?GIe7AX(hh82>-Ajz@sUC5!Cmkf>1ReE{I(j0azm>@$*I0G_!~ zP!vhX%x5NJUi!qY`oxsiN4YwjW2u?*kGBzp=k+|wdWiPZUuBf;Qpht2|Rjj z`=`vG!}3d7Z*3$>pz)Ei^URUSXSS_pe~f;krV6(<#(V^MfAlaI z;MUW9^ek=q17MrWlL?-wMx77C5}mY9rXWC6!EXQ`qnoH9pEe`tn;QTne*)@{N)b@V zf(Ck?ht_!%=dOd-oz-6hr=!cV=5`;0#u}m{qsmTd65n|Y%C2{P9ghS^VJ^)^h5gQt zoq2elJ3())5_CJI|LJxn8L_}J^pP_@L)3v3QXG zb(=?b3nL_lu2M~AD*5`#K{u)r!YHaTr-v;4_&YDN`E~q%6!kfN_kbSwk!aQsJtjCp5sUWiC57ZXJ>X9 zP&XIGXKYiy55)y|gNx6gEKeU;c`>~?Gl^Miy6r)8XJIKXV`}CHJXSz2)Kv?_C^)Um68z;SPic#+{BtQDXu?H_TRZkqpMpf zZL55Fh9>T7dK##uGV%CXbGfb;8zx!xr8i#}p~$($k)kT67TNicSS_T2*5iP7@qrFK zyebRdI%+%;k*+>le6$&cG6b;jW`L(xG z>B2yFhHBiroZzC1#^_Q77!=xj)X*jaV5BuTG7H1Q%;um^Tkq!=E2E{>@B*!wWgZg01@)45$7XM^J~ znsu#9^=(8@h-v+l*?!STF0cDD`RA;?p zVBj2~a3v@kcx7e#Y_xNeZYg|_28_bOvO9!xDyIeLy5HS-q_qSav}}iqBZB=%X!bVTQNde325vmztKaxTaZOqm1WE)th{R zaAQg)&8$&fX1f6sSE{?RISl#pb3lWbPLJY;<7YSw3}O~v!ZrONCJmLqTppwyAjQx z2N0M;QO&O9PST1YKve{lwf%|CKUp^Lm;Ym}V`cWh}a_3cz>si@2sD40UlZ)A9U! ztJ3nWma()M_dWt&I}V(=qi7?+(whpkT1VlHD_Q0Pw#oiBo0V^Ht+kypbNTpI568k% z4}mHtHJ@s%|Nq6_dq*|7ZEvG{TR>4jMLEPI zgVMHy7#I}UrI%q0q|FS|nvB0po%wr|;Ywd~ctBZ>OwXO@=};NnDl$Jzd$!T-CsI>x z>@x<}wcV5OAH78wo{0?G^qR?0-m52@dQGt0pn^yo0IiHcLTNxN^bDW@cON7%AcjHx zF%!`K88nbxvN?j5A;AA8(DTSf4k3V_1+~!5$UhZX6v-BWU3|V z^w1(=5U454J;#}Hi^0r0eU+5OiUFD{vG6tSX!&dJG!FEuSenK8s($$UZViEU42qC8 zFkylSlr2EQB3UG?AjYhTtv-!F$2G->maK_^FWLTEgBakAIbD?Xz-6>_u_T-5SefjR z3#)-J$S36gRO3V{IJX024(uEX#jml|IgEJ*>v*i3UiyUidyM_>=s8aDC;{*NuVB}l z{up>~^_+&(u13S*zoSS_jiz*+wLOmzB3XF5ux1$qEy0yLfc-O@Mz-fWH{ z%_nbw|9y5dxs(DB$IdS}9#fk77Qq`R-ooUX1hGTKaVK26>Z=8G$Z?^S5BPj9$EHffRjmyuCDWPaAa6$AcP4;qMs-ya(~ zudT+rmaua3aNX}S3Q5iu0N3s<{p>#GN*}@Xdi*|PN}%tF=F>b5*2z^ux_{5%{`cmd z?4RuS%&|2_D;XZF2Ufo#&6GgCI6+jEtx!L#)ciNTxIae)2hs7*J&zN`=}i@eH>K!e zIf*hCZ~WZ)`=tL2{MFz@tgJ2|_n;(i5nn?CDk^QuHGlF6?p7VshJ)ziDp|&ccB={j zA?wrq>~p>(!$qD+5oN;9V4xGH_AO70GJbN5tBx);mW34nWxh>%zA98dv0v7|XoqA5 z8zruKdIcj7Kp4=h!00Kg#)TknE5tN={SRo-epoE?zk31v_vQZuI8r=@8-17+KVejD z=fkQC^zl`fqiW3V9V0;YlY*!EYyX~O{?ADR?Jg)bS8XDz9E>b2suM1`bM+|bQewkT zHU2~i$-hSbe&^AybGvTOU;W%Lp1Z0EY+$p&^e3HV{xnx}Umhq3(ETiU`6poFzxkb0 z4L+1VX&CoEUI!`^fGK)5_JKX-Qm+i?7+pY%KcTj}}LD3pG{JpJc{$DFYLc{e}$ zkar4@!-l_S54!o^srvWV2^1mx>|>tNc?HyK$*acwT%8RtI4_z~y#D_3e*ov4vVcpGrz#huTb?*ZQxg^`d3clSE%}@1_R{WelatDu>}8OX8s}q z`o+xr8OL%ozcl}UPeiUA4%gFpCw0g~&L9_|rnYkF-LYT}QloVu*u0z&&hSb${I$-_ z81N*qNd8)u+Gu6Eyu9o*BtQXGYu%HUB;iJ=!Ov|pm>te@qYUT4Jze96T^PcH-+n)F z?*5P;;*a_@~R?BZU&;OI-PBpmD{`9jP-PlbapW+nu?Vd~Q&yN9c+zD(A z<)6QZzj&Yv!M_DopE3ms#r7TvJB{$tUgW#(voRaqYc@g+48ODG2~R}5g7m>`^gw~3 z?ma6-h^6|=%J5> zxOpNF#J(nM*X8X4nf$%Y|5MFl*9Df#X}eG8R1#saBO452rgdIRSLr(vUFI=P5;)D}ga;$r7g-ZRFtH00d>pQ!#-Z3Du8ng`Z>1$Zp z21x{^QXXo9Q8YYVOc3*j$hlB&26S;kS z2Mhq|JS4-R+Imwaw2#Xejf%D;0%LGq5RR&HcINw-2ehjed%bf@uw;Ht3ujtw$& zP&v5X%Taj^szNEVZ(uiM`;91e!pIwAnv|`Q>)GF6dqXK%Q0b%NdD9P?guvI*?Uy`5 z9-O^j^yu$LcIw_~6M?|$Azb~4TqIK)53{~vRS_|H2ez?iNi!Z1_(`d@7?JBBnS+DK4!GrSUo# z3B_B7$SifGgl#PFDHi+Fel^e4NRi+6d(((9q2?KHO7ZDQ5}3FX!PpfoanI^*MDcRl zdwi!RA$R8yr%uDmD>4TaD2t7`?pqMDhY_dVZ7w;e|A4irY-*U=RJUZ*g`rYdi8?DI zpYCb8WvM6J-)7DS?P$(l`hi<|$ar;)VdCa!?E7O*;{MOZA}8j)ddYm{8CrX)Z5Hq# zoRRB-gG`KpbM|p3KtYJOc*mq^AwN~hucrXN6`v~U(_vlPb2h;Lg*$#gc+{2QW#NqN$Q|s&$=@HgsyZ4iq$`AdABslYl11GUlvGG1qH1 zBzX-2kxodG8K6wWa|_VzuFoe}MquADJJvLF*V(Z>tK(~_g?>8&v~1Q;F5bVD+Ol+a zY(g?jg9RXqvzAleTc8MK-BT?$mQgnEx%)Yurr{*B z>vs}cz`I0qu&Gsp80xWF0(a7?pHl=&fGfkjn8lM;-G*0#K3u|YNG=<;NJ|TsmW=x- zJc7=NS2&O)JS}C3y){fglW%*+muzNtN8`fQUcP@RA0uv3(|i`8SEtrPsxa5Zd}a1N zBiK+$;VohuFA++^d<(h1m*RH$1B;lBk)Yq#B%~3?Q($Zm&~GhEKmlHy)*MJQ)NVZQ z!y7-mt+jNSs`qgx57r{yI1Xt7annO|Ys8JQV|+Ja?o;p?b?8%hsuB9rCG~i5Yr`y# z3*V&9EiGI^AKZPd^m^+D)@H&Atd`emLN0ipu@I3q1-w2N(>c;ilngG~Bv~*YJVcpn zSbe~=G@M;sVZpUVwRR^4M=Gr4TNuR8X6a%2>~d!h)DFW&d{LjmtogVorRUMJJagavFb`I(NwZA}!SCZ3!0 zv+gk{^xaEsN{Q(wVrXcIv2nQ>oUlmm zWqw;sPVmx&O@3i$ipzIlj*SWrCkT;D8{1r0y>$@zwmL)`c`$EG+E9~$I3ecdONp5f zzx^Gw#yZfj*Z`0rhxp4p`qX5w_WDPNGyTjt?=#JuIZY|m%gGY2<|oM0DCD~`zJ{Aj z8P3&`wQkA;*0k(LTOC@pzF>*2jFrl|{x6j(sl_Y82EeB4Kn-how#rvGKGoPcuAyO> zfs~%T<9;o^mD$!r^>ySS8O2RQ;s;!INH?c$A>m4{BaUgkKLQ1;h_Z4gK|>A~w>)Tl z^3#jBlLVBu3h1*n2*tn4Sy{>K?ez2zpEqY%mRl2~SK2n%SSTm-Ka3Mq4yC2Cv|7vY zYN<=`(2OJ9?EEuxC4fWqkLL|}1 z)Oyaz`jFomuCKpal3pmWKyd1EetjW5fm;hEiB79(yx%O46!FZcP)gFe^&DT6-{W5z zmS`5B_UP?pf_uJJeuFB+0a*DzyoPsvJ1H<5e41~J{?uEyj(2xv*V_D6TYOobEj#t- z23-)qrgD|2g5?AoERq}NU9r$^sx09ccG^K&Z}L+rh9ok0YfI!_Tiv8*!$z^cRq(`k z^8;#6yS2*B7F-5Pu44=BK$zA26Kn_!q`^FCI$rvXEDokUJkU^l^>km>UEu`s%Au@w+0LSxdaXHqKtcMdSP z%eZu4p|Yq1vA?_V^+lspmQBn{yxP9H(=c2fFvH=4ZS`FeK|r02`uLJx^9{1!XbXh| z*Fnmug}T-OkzVlS6jFO?Zot8k9}LU&1zm(jHMOzgZp?Kg+>4M6aEClPIhJQmaO^M9 z5P|aUnSCV8#F?jA(Vky6%a;rDTpBkUh1=T&M{C#Ui=nZo=U|p><#??Rmxa`&_ja^% zC*y|Ju%@zCuoA+7B2v9ys;iXiBc)QG**)0{LP#8=aC%0ny`1QGf3jMy$b_r22&un~ zJ8YtPpF}sbTyra&F(xjqc65ryUj^!!o+JA5pTZ%k*Tl)T#{KZ9J`3_{?FXswH+QHhx$Q{w{GNn)p_o8g7fTmlsdL?0&$CdktO$7}?t!~uTy*Vt zkP!8iHpR>baON3ZOsx*JooybZ8Ygs)AW1AYC3nS4z}e8;=>qQN0J zyrG-LTf$EFyIpTq<(0Am;t7z~^RBC0i9Kb7jk_z=D-%-vRdQbS_fnBtR!JdDxoSzb z?`Yi=iAYuOdZH*7xi`~*+z?^TQc5$jQiq6TO}!oYLN-WOX5lC1@zG&9e0#azWsmsBPpkJ&C9oXBSG(O>vu3tzfM0U7-Gc{Gbk@$8I-To@qLvv2a|7 zx_G%ae9vf8nCNm`S99RSbXH>Ss2qo1wHFFhYTq%-SY)SEEfKLIXk$UCl6vxYb~C?L zpnLKLoA+3`OYr1x9nV3hW@0kiP7XgC_+}=>nxH@;h8s;KhZ9I5fT#LkH#3iu=dwx4 zNgrl*QUzJz+Hg*Ua!&g#cPtlJug*$vpeWr2%(o>BJ25mw#qBV$UF8*zY6!Vwjrbn^ zj?-}5x>n>f!(!diBmOOFck?gv0sS*-{hQwr;V9|ZM5Onx(U zoTnI?Qih-e>dAE%;Y`M*InC9%6ttTpuXm1&%OEuw5o@E8xch^8TOa(lcTYE~w;uRC z2wDNdN}p>LUeB{F6S_OL@x#@@g$}^HFU@Zrb29qA5c5Fp>3-RypLng?1mB82pDTl3 zVY?{T)Au%GbxD#AC)=K)$1PqCpTNRhQFXd!(&RwFiI3Dqzha5BqMc8Z-%EbqwMiFY z!Ixyy>kWVJDSj{PH3X`>1B;w>#8NroJ@B@4-sqX3BJ^-BXIeefOI%^werUt|9FI*d zy`{L};NEzAUBg)2!ts-g6M@&f4xgZz!p9mcYA1g)Eycl;6DcfIxuRuCij0IYT_Kx! z%_stl(?7>!dDuwsm9>$7UCnW)bkRBzTUkgw%(Ntbe{U?i!+d`KxYV+niRDQdM0(Qw z=Ia$~un)qH`_BcKXP$k3D*qgafP10?wwNl$uVeM&)24Jn_^36kdNcO^30_I-iy8iRT*$c`u5P3-eNDxV>)6!@!`PZ%&OnO&kw1iS}(h zh^J?7CbKO1eJhu*$e`X`rcO^fIAGl}8$gBl;Tt;*aTC(RZV~xTA4&IwiP|+#V?GTg zu1b)u+XsDBy0ZMWoDy>|uTebTyPWCqTYAote%nRT6LMh3=@VFK(Vduqg0TUyNTWQZ z+YaO}-JW?}n}Z*T!~8e8ioj7{w%i1Si7&+U3kMb9uGV=D)8k88-vTqFC${Qzi_K7{ z)ksB~0(6UDg`hg7Vd_euih@=t5#RS{DPfA9FRDM{eD0^7Jk=0<{}QGtMK8YKU`kF} zZrpY;+a=R?J33qZM6O=gcgArlp*1}q;He+g_+TP72ZVkGEzf%50LJFa!Uzt)_=|T< zr-i-VU)4OxNt8y&CF>Px^%baZXP{dpin3`24ZE2d;ZJw(xkl)2Edr&xv1%#RyC*vO zbJTA5c_MD*Yvs6aXUe^hmKI$O^*2p7+gg#jJ%X>-k38J3u&?6$S{rIK(4k1VZNHJ< zGYgdGavR>O=j=)I!d_b{`q+{rU|lrb$Mh9J9OtQ+=Z=1$an`FxPK~ulcY+Bom^hxw zo3^+CMLOq4A*+`sqhwerL%=W&4vi14+nLSrA=Gxpa;5X{CX3Du;-v_kv6h8%VslQke@~$iPR>jSVf>UIl^(<2*MbsqpS?NKyk>|Qk_c9_MqfzKc zCJ}rzm^|mt44VTT(MCs9{imV7(n{gaF8R0&04Ya6bTB1v*hIX?XstE*_KuHU1 zBlTX$uBze)l5`TkL%G1K5}J`*;>hE|WL;EaP?zIF^F)@hYS($U*{^n!$hxaJci-vh z3@B^ia|jP~x%8uph!Z!=!DLv2<%HW<6-kf4EaRCaXs6kRNHQmOdK@kXBIrwBQez6p z8qfpdbp}pc1H^Cg_~HIW*1JNJF>BGWH<$az=x?35_!Sf?;j#FLrO}YfI60@;n0hKX zXRu^bs@Y^^r38ufHP2@cJ~Lk-=P>C{N|Rd67#m=I6~0uL^la&wD){(B;xtynsIvI^ z$Z|%B1KR!4Y=3t67!<BCA^3ZOdn_ zQkrdZ>%4M=%9L7YQ+GV_)nfG6sJo3OwRAE=7^>Tu@Vk^JhLvnNx4bnkxSE~_(kr>X z*BCQW$1BdAAKzDgrRc-sMJgLE|zEp4Vc+$Y(T+I%WaNa!rjH<25g&ncd=+JWqY0(hf6dz1Xv?%he9V;iZr@ zp|Qo8^0`9X{qWsB^F;YG4TjA%k*2;YoVjl=ZP&m-6XQvkunKFGa?lrthY(Q_HLixG z4Vvm;W^c4+cfVy}YKx?FX^DG9$O-gOIkV#Te%&ik6nDe{Z6Ku?<4arsm_*fQq)Qo92K9 zuv5cUyw`Pf?mB+o7kOCdoitCAvv`ks4ra!W%C$@8CF)V3JH(70aHP(&90S% z#Y)9G@8kCJu#aP_m$i@4;F#4RPV~1BjZQ`_pM8&K8x7_;wj^x#=6+EubhU+`$1y18 z9N*RU>Gk>5lR1StrLyVTFw#w*_2)hJ(hRDezTI3T=T)c@oDir9wV=}MPK-N4rCr}J zs}b68D)|&cNIzAmyt2;bX>V$uZ|8k#o-t!sdDZNMkO5JWOWYXEsYWSd@XJE{LJ14s z0=bYx3}85MreTTH6>X7ex%VdTTGWc9fhqi3maWEHZ#^?VvYG&h(y`5uzEaR+#gTx+ zf@t3s1dMKOSfh&7GNz3j#EZ&lO6-%7;ZGQOO-hO1@qT_dM{%7@-(*zz6HNCiM)J!5 z25j0u%*NjsAhjFQYY{BKFz>OA8TR&_uoBUTEw^g}FW!&b9&lR^X>`AB1FOkJp(g}y z8y%+WNna}hqJNc1Gt)H3gJVv6OoQp5XJg+kc%z@yO?J93nzFpqH@+z&;sHtbg&ej- zZ7)=wo?hx57k^kyBev|gH=3sdD`p80=$r)m1wAi+xsWr*tEqLcGo#_BH|aEqEoUF~ ztD{+h$*HB}G@G$p9~2}aV4eG9>g>+Sm)Uydf-u)s*Yz*(#I7RprGh}zqpqJ=)rx^F zOigA#oo-m7t=OeX^bD!%jGp!!*&gDAHieQ*>b9;-X@_wwkVh>jsg77EFA*zD*Pe=1| zLtlpk9;;;M5f}Ykk-E|!?2Vi(c3a>}09u%|7eC3g;G%oc7Y?f3NQBFQ>ns2YX^!kw z{gBj$UEO?}v0%u1#Eu)+A0AADTO zw?)e8J?1{S#W0}O^ySHvO-V##^PU$xBxCSngfO*}&7=h3s}0KUaE7{VYZa?RLGzjl zRu{EGNVdtoLk-7`SB_CV?b(tz3!{+7cRz0Of%e_cqPQ`^lltUQ>D>VU>SoKYgr=E- zZWZoU_q7fW#A$rS$u(_^!D3Z4!l~{tG1RP6&8y=LKUVdW|%xC1H zR+zt(S>OFZthM+cuM@6iaPY}8^|#1I^ODLCr=hFK$ayfihL~-^M-5J@0+JIM1ZHf` zV19xzF{p0kaFg5u2aJ1@KZ<|L!<-R9w9F&MQIMI0Z9z0#QQL%D2;YYjGFHKQ?94km z+S_jZ%CGUDr+c3_FD;7=It+g*Xxa&^kYGtL47J|IG{h4HPa&Rl+T|7!1xA=(yNieL zp9fG;_1>%qlMG^MUY6H!?bLYY`%@(h_cn5Snb4&Q*4=78Zo=9FjV8yq1a z%i>@MY#QtZId6mMeac48p+!8Db+>`#{?L?sr7^TTwgN8kJHxG5PvF@6|*L zO+}G&f@nYmGI}Wt+Q54E8J+%pUkG+j@1vrjwQe_)?sFjYO5N4u0NPW%yQ8g`sE*rr ztZQR3>XYCGwd_?|eyYhUkoRDMmxhL;S;pBX zFhcNZq6u&*=nav{@1Exh{dLR1)f-0SK~pvWgNX#TIS@bx;iOg-mn4_=l|pAcvu*3g9I zkp_gHOg%}eevV*nbd zia@O;;5px7SLTZ!GTB3$hgkO4p5+_nhM{kbpEIQ_6fdA~P$M}{j>Ym$s96bTwY`@X z@!&}J4t2k)pUeNu{36REEIg6pEj>#eMvzE?hwQK$m7FK~qWOAf^7v=uLv=+Q7OFgk z&S8?e?BNPdQ{%>io3580N}Oi+q1P2WO8GG)MhEHhq*6IyH6MH|kea__-IE$`UmHFw z7qxVcp!pulhIz)6BZ!vr=_(G=lXDT@O>(Bh?ibP2EJnV${2X1E9V_g>R3AkF!S^Ig z=F4WwG)+Qp7&qZOS}~LUT)?LCUV9A)kGJDseFkf+s?FAW4H_`}VJfMOMvP@|wG_zi zx$OdL3yxDY2P!QL>eu|hEY)Q=i+F^0jzYH!xl9Go&NcgXo2Yy)-q9+(FFjThOMh^B zyCFl@C;5B9J1*l6YY6r-fxJ?jyg>8YVs8i~5x3@u@A3;e2L(4sJgIzOeFO9P3|}Mn zGxOI>{c43P-l5rU9<-o!*E*Y;^(HLt0jdpmspt@oAZ0bn!P3A>&aZt-C_zSLOPfnb z9Iw%w3!ymgt}>7vs+a`!Kp7bf+mw>!ieA!;VS~7SWvv=s9oqytUN`EE|I5u{Ug=5p z&)BIX3x=6~R}AH(9)NRd7yI8zZOWn1$ZNTnJnxAdCzi0HNk;b;&ZB37gbEF(Ct`Ze zWsO2UW$Dk=meHlD2}#BSOsF+hB7Lw@bxH>_SVH`$>Tdy`P~*B{cW#9^dFU*Rj&|Y& z?Tl&N3Rh~Svdp&Wll{H0ZRt^SaY_aHkp1a0Ga7ukhdj$NWGvU?W!cUR88dy5ro8*TT|GeY{r1b%liJ_I*8`$fTwud7_@J+pzh9^xmhfR4T!)*|;^BmyK-&OnO-}S9~^K7<|a72QQUkS(HH@c*+Xq*(D#~ zK0@T)GZ-JQkc#?29_Q*gi6a}HhpjPa=e2j|4?cn~zqXy$@m@Jxe3FE`Tfn5HI{0w& zk`Gmat1K<3cES0@8k$~&XX8VlTw?cvp#n3=$PDuY3{lJP2p~J z>L%V$XsnI`((1S&=Z44Ah~IK=#1{-EqG%wFC5_*|z}LSArm15zCp>~`0LCN=AV!T& zyA1a}=r8uri5H)U5+*o*O-u+mFsH8TsXA7Y<6~}aRqLZupFGH=GV^GQOykAW<7cP} zKFVC4-dl{0u3Q$b4Eb=-CP@!ffGHYc%X{s@jJmlE1~s76QUd9zan-R|G_)*5oKV)M z-J@^UovRyr`a)>Is-x8o7O2pTq}A1{Az?KXSmk&5?33WbAn-+}COyVS-`uo`bm7(8 z0=S*;P}p)#ltSSN&=ASbzYpk5<9rh9Zi8QsUlB#2CWV=#ON{G71by-vQPIdz4A`7B zCY)o*6gd!`WjoFE9_Rfd!BAk$>7#56A8()tOOH;dkdOnaxccU^kWH&0!n3{$@w2&^ zZL1#qy548>)Yu!3IWe88Z7uldL2{^8KJfR+vLI993hrQgU+Q`Ie00}EqQnM*@!?O! zpM$tSd9{G~-yG=11-QW0awK+%`#zT?H!u6Rd9J0eIo3>5_rzUm0!@i;BbNqm_&DM z)r~?02h-Imvz6kEq@}#`KgIThE*FFO6sBMfD^?ltbK|&fu`{!PoWzAe%9dJeJX_ls z@Y3GLyRCfkqU5OcHJ@4Pucujy_=lhFBM(;_!xEed;S{+?bqG*g#ebL_B3DK zo~8iP1nh*tR|e!FSt1IC^GV;U{C(UyRrn&q_)n}(s4vKg<=z?sTkPYpq)S-Ig!pqtqrWZe!=0j4oHqv>?*b z)4JE+vyx5OV0A;&(;v!-&=Ovl@nlAx?dYEQV4c_@>v~0#GSp; zJ2rCj%5m!(?0=dk|AQ*)YR^&1{OTKkZ{TWK+RlL)S8A@$_SjmHrH>GzX`sA5`ztCZ z@z2YX5NH`nD2M840JIl!#)GYn4=wmS_J)F<0CWG$6E5!znJG}AVQ!ht>rZ3l)+o-_ z@QdZgz3*sezZ}s)zn|)cFN1YI`7n zlLSFSizB+F`Uo)`I8Jk`Ns^aS{8-V8^eVtyYz;C1YF}Ll(UpY1$YLgxnuC%LofUN0 z1I67`TOQCKkVoCpY3nwwiqLx_sMcb6S%8AJ49I-B;EOP+>34!oMm#cTx!q`|%eN_v z*rqj*+p+BQh2AU+nM0@V*KIEnPKdz^KI+A}S3AGF+-yd-#b!5mwliszsd1xMfly03 z<27pPvfSuLNN{kHKe;38fnia&zQ&+ji89oia_FUdtb29qU>pI814HYeA8BBDa@@d+ zR?9CUrfQCnM=IK1TSO_W902*Sa@Rp$L*z8i?o>;ZeBrY_5X@NkJhfEHJWRCO1FhWd zJ1J#ViOM80`H?8<`8*V+$29{3Ru0ssxZJj6x8fhQt?B!sKS7$Mj*f?NsxQ0nI^Y1CXnggaYn(8*bRVr7X zaL-;*r-=`U9`?rNz7GZL)jrNp)m zXC4_|+Z#<3b|&i|Clhc1=6c0u91>o*gc9U|@aWpys1!g(l>4+$^%WG3x{JE9B$%YI znungI$rZ}eA7ko=2UaNMQS?y!m;inV=oW*UE+sal(8%StJWggPx}LHvw!fRGRc2?3 zHoYJlT&;K#yQwtSMS77c>Cq7oARlq@UG|wt_YWXZIsmXJ77W_W+Gh-wf4DTDv23gx zCHQW^k*VOeSwVQk?F(|r;u3CZi?nN#th+r0qQv~JR3QyO(${9VLbIz1%2d!RD@{}TI)_Z#LqS32K+D9O|S$)zlH{JvODbn_I zt2~NBM*()*Ak*{N#>FiCsL;8-14Qbs^AUNv8PH++F$-u4t7M%hzWFpoPYp*rX*~_0 z!>2#uvktCSHzf^UV{_Y28^cIgT&U)8vmdh!OLM=`O&vF@M&d!tL*# z(aMpf=D45NhPT)CMmNFg)qEU6&O+wJtoz0dwX;?8FZB3lvAf?^WMOcx6oTi}Kh48K zCjE>bav9n(i~GY>Q&@HR@vtqGTcPv?8yLZ>wf$5dk{H+mMclYIb}&omZBlY0)aIlL zUD41MUNdQl4gKu)bfz2>?1&udZe$Y5yD`)D2>d8z4&RphNRq1r!MbHw>5O;pv#75$ z1=>MA;F>k4P^VkC=>8tlP6zdxKdxpY|FvUqxt-BG0O#Jf?8ttTwH>7uNPy^>QK42R@OeNM+TcOk16z+(l@ z&BrpxYUqz46vC=2nB0`j6&78OPHs#am?RS|PHiL_as9lvgwOgrdx&4Iq-I0jxN8Lg zfZ!hi*{0%dhsSJT3DPB!G?z}iWmx{)Q&${T{wz6UlfCioXxAe@NtbV4?fNxu1Rkb& zUHZI3bJmCJl&w%14|Bp8if6RlkHMR3uvYG&5saW*yJ#D-oYx#Kc~Uuyq82{a9j7u> zZ1O~dau3DCmG@3)1raMX2JWjnLDgWZn{`aSL!2DJ$iodN;hg6u=zNhA(CQihb5$dD zTq&^dZ>f481aP%|Ek)r9j|naSeloGvy|XebW_Rj7A^Jsn-O9l?uC<&tqkjuvG&pcM z>7!m=_R(iAJBmbPAgU$D%if*^_(p4lmu!%`5l@k8wrk(Z+EIN156s8|P^ul*JGMm= zhoec2WR@}fqV|6Eie4dyUcL@CNd&l?ivb@qP^GX$iM1RHE*AD7sfH{uU@HL{q0vlZ zj?0^h1K=4vvyJ$>wHT&_YBT*DJ8YVsEu1grdxWyABrYRI*swc#uqU zx&QHZf$8;PR-L{$UjognRQvfOok ztue_F8o6o6wu&wN^*P*(tcnf1AGJjc)K#25H|zrrX#f+{K*F zSPxHnoC~{V0j62PI{DWvD!rN+_G+S&MzvBQb|h(uU4R(T>oGJ!dQ!HP%fx@WHF2Vl z&&Yzw+FY-x+mJZs#F08z=MW`*Jk0}p8gxn>+L+xjxaL{A(=DRlzqg4e8o;Hk9+YBU z>GJnk%lL95IQE6W)P9qPG+@u+T#zU}rUufo%f0rY9tnDK5_E+YTmlbS+42d%UoP~G zM`NhO%>;>W>Fp51RdU3L5jXluOttM~HzEnO2@us{q+Zg~rS-ucU-D|7b*gj@VZq#g4u4^-Q7}yw0WGr7<68#9+S-jvH|0SF+7)M@3WXghC^4zMVQuLZ zxuiZK-G=`rfA2y>07VU{JE{8i6n=}TmbAPOT^|et7g*lmO-f3cb+H_JJk1dK+b*DO zl@_ooLJ9l!5b)TQ3ezLLyDc4iT!Y5v%ffik)f$vf(q#ygCnB*}u#m4O4f%ODwq>~_#ot|XERWe4jRD?xWr*`HGI2RJWmd=8Jq-W8=#S9?I~Xxh3E!t59=>j9Dz8?6yv#@f)pd`)#BS=Ky?8$sc?J_X)K^ za~h*_)z|YIv&co~UKl*!N^>_d;cT$2OgxfQ4i0H$tKM5=&Yvtaircz@tT0U;$hTbB_5fVCn?76r zv@6`l!E=U@hY_|p=jZ_o z5VudSH@(9s73zEi(4T4h8ECtKw}d497q1rf8{U8_zcQLeQ5l5M`?`IYZ~7e}UznQ2 zkoPI27?p%#IyQW7z!R+|($R(CIEu@0J{wSnvB?;kG{1Hrq1=7_yFM^vQC%_h@6ife z#xQflS8Pee8n8ZPw;yS)g$rfVY-zn#xQr@$1i@rmO96qdOkRw|Whm1@5xC@iz@E5a zxpPiZC0mP2)MxTo9av`UV`K|F49ayF96)v!$K_| zE(1iRPrW;CWHSF#F0;+x9hh`B@o~QEAUH&mg@4t>cB~*tLr4=ra96a@PE#e8S#&>b zj~574@0e~eSCj`Ss(AVJGlf)E==!LK$_}9tmz=s{tf9~;{8Nok<7u9wJ3^RK)!J^3qOx`x54x$(OO;~urn6l%+@m-RQ4K$*BL`Um}WgA z=^c@Ceo02%sY2#oBo8x8eNbjLDBfjub)Fz*0{XRe^O?+|S@Ui7B9>CFJrlda8>29j7F=$~ zu4pXx1+E5my(jub=Q1mbdTs_V=g{CwpM9#V;rtYtqS=unSSpmN^^b#no^@pm-}s5^Tl)(fI)Il5I~Hv-TTTh z;)ovy*8eKcrYx12p7 zRTu^qu2^>0z0GK*a6(TrdR%%0P(8%=0rO?@DLon2ZUD()_A~Td97jCWb|d8exLlGQ zk~eKMoI*nty-}iDozFsbuXFQp3j-h}sjkLc8-qjZ-vUZziQ7wN43mM^-0WFimTDZf zrS-lxES@d_0=IK4dnhP!yB=3%YzZ)_eIY>zEnZ*sX;C{!p;o3%bjyUG_v0h&I+u#o zF0ga4<@eZl&)c6j`xeOCLqgJlVNU|9+bE+-Yn)iKfS{A&kDxq*}Aci5E zQXE&L6ME{$LP=Q9m+$xy?2)WeymOpViyPza=10m~R$1*(5_!GQX}q2h!6%rS-NIdG zEQ?=l-fWC$1QI29$&)NOa8EFpMa$NVAv_rDJ$xF z&mgqF2Sa;GiN|Hm{Y=h;x0pT(EtkQ?;we+t?XX&mFI9|_-gC~ z?3yAtV4Uaev%7(cQx_qR^Eu%-03p!&gx6#zpsU8AEOLW{(n|!uh@_n9ruBF`QUk*l~C5-Vx{> z{LXU8H7yo^;EC?=f`~K#=K8tx+QKNd_XfhGb$!N}O-?gP1&BtDnsZ&KTsYng72Nby_Pfh5rE6c9L&Q>2)y;8 zH(J`LOr!8_U669!xHv>bwRU)NZObZUE%romPQJn2&dGKa32l;RN0 zT4;SD8n$r5Q(m-!dpI*hL!{XBDP=8!EGx9;*!DvK>u0m6EUGINUHnZkhLMURO3pURVV#&*Dc>9A?v_~iVZ2~mXTo&IA#8x%@gxG zCPfX}={JESIgZUxZNWJ*d+d1~N7RT;M*^<}?cp5vDVOVh_zd-r?aW_KyD$a-ncRBK z5E?cXSa@W8MTqz)+|G_RF)#hHleP*BKF8YAHkw6~l2M@ESqs z%E7+MWr1yZ=(TSRV0SY)X$NDGL_i$s9^$Zt(MW8qj$jqIqJS$=3uW+MZ9jj{sX6vz zvkD89MyQSbz{j3!YSTk%25+>ZxPK8v=XlF+^}^L0prO3$MoOKVrvaPl0rK%xC_~i5 zM4g8tUli@KHQzb~_85YFb6nhOh2wxzAPKypS;VJyX@>5cz{)276{6xevTT=oanLK; zMqUoR2{sH3@<4LrKgcZxuTn)F@p523Z;y4+KCpL+UU7f8D`^`tWjZHN5`RKLO5h=< zKKDY!c$bZQog;$vd;TlSAzS$(pt$PVUR|0nIKhw`3%Y^^+X|nZ3+QjEZdBhSiyNp| z=<2Ni`4*`Lo6>;5A)n(;0NWJHp^^BB&HVcf<83{y^#IZKS-3!nZJUQ5vc5$uDmv&Jz6C3`{%3Qpt zwlQ(3H#-NoMx9CMS+>p3&SD_lkTV1BrzXW|^%!B{1@vP8;zzCaBh25VZ>-NLj}a{P zq*?QVpPsWZPPp-dYRvmPM_Fx$k@dl$yD^Uk?MJI@8DQV^u1d48;?uk`j@juJ>O=!I z4&oCY2Ho*P=TdWYi*Cwjgy*XQpMmIc3mG7>6OUWjy9oxYW;p0Z@0 zz@ed@8yn9%u*^-OeR#4*79j#`_hNmx)@ajC)pumD6)NM%e^e5X*fO4O{qGMSfFU`RLyRe;l>9FE{RirGfU{|dw)v(eidUIai zjA>C{L-(DY_7c^KLN}HQ**CyW0@WUqmSv%-1HJN>x$Fxb+<-Xjy&2g?WUc@Z-`3p8 zNd}DU1ywtejn~_O{b`TnlanIu-rb&huSocTN4vQ zPN>IR54I6iBp5nOH?VJd-1R!Rz;&*wzu0-Z6NOY32i3^J>){`2*lw;|b#v6FsJz{xQz(=}Qt(H3VJ?uGc?L*rk>BuX) z*wcenUyGuDp|}oV2!JJ{*6~ccnm{2#tByFJPZqsihRA59TA*@7x zXA|0~|H|ZRbQsc$sl}^I%2XJa!~-*M{G+@anA&9k(Ii$nqNR1w#G;VE) z#}o)l?VW`ewk>J~t?2QcHHgVJM*Uv?NtdS>2rt$LrUJh^CtUd_XI2~r06g!;R;~B< zjEh``Bdo>fE6X413FV;#PP3xu+47J)36Ety(CccsjFu=?Kreyr4ln`OrHj1pCao_E z{3s{<2e;K!c^jwi5^Ue^3a3KN;ZBqXC|wKiI0KyXV_=ru<3YJu%3E zEc}MK*Mvu`hz74umN{*{W}_;6>xYQ(kJl}>1d1E=rpuo52e=w5wKLK2D!dl6B>mQai{PC<$03bUf{D)!u6HEKQ{q_Ho z9TOh9B#A2h18W0X*f@ibR1#--EidB^!YV?P82USjUgBKzD`>Aw2!zQcE}u>MQ+jH{OCO%jg3CJDYIA5r zH&=sr*S0Z}3$vf0xoL%QoUPQck|T$+AuDmp{U=gY-PPZp{s)V})tAQ>V8@;=97yq5 z_Z-|vF1g2JI{GeSx8T)4aemWSJF@31o1UeJgYUKe9A)*dJl7`Prk?rvMZo0o@6<@L z?Oc)S8tzeHzcCY+b-d=N5S_Q1zGwX%H7cdFdxo;L6seRyf6A-TA5W;KNpm5g`(x6% zuXL7h$)_5~plq6`gZ13E8pAvf5WJ$7e|`uQKOV#P8iyF8JGPYKOuh>s*pE3O$u36f zP%&%TX9c!$O#k5CUfl0G#&IC&KlUn@obP0T=@sfucG}i2$ZZ4Kqm7k1+;9r zg6anJH#bZ6n#MF_4(}}RnVbs|Kx7avQ!4!Q+8Owe^GS%18ZeWdH}K%-!3_V4z4wf2 za$VYh1qBsE-69|;AR?mFO_z>hqbnf2sPr0;-a%B9Dj*;UO{GZh3B8Ddlz?BqfN-%Qjd!cD@-jYH1 zKfo9l{X?F{wLH3mdKy&h^88P`{l`yDO#3Co?ok?&_4bGNz6nkxC9?k~io&07_5ihh5|Bdku1Wi^Ie+zz&;Qq)|Eu5p*E@f8azF$m zGXfe$3PqGp$gJ)%V9>_hCTxG&_w4hDe}oAGv!*+aaLC?aQP8oJUTqFrNA+4fp6g<* zCc%w)lnI;K2u%&U-hx;Ypxboqgva{K6+^HNT3}MI33`!c(Hycu6umt#303Y$xN;?m zGf&u1bYj7aGc=uz2W8U*AB^(#eNh2cMInmYA18wLl-ExSSP9oGqm3>1(wtTXdb2rM z#Xki`*Gzq8xuq8BdtxNue^l@`Cn_!KExTv-FG~^75lby25|4_4n6z>4BDITwo zIQ*tl;xsynf2Q>;YGi{?^W3;dlR!HdSv;z}oDfsNc%<9dMcJY-e#pta%_w zqs*#af2CSB;cU2uVBUvZKXzfvj%PO1?SI_BB^2Di@T0%RIGIe zozTZdF(MKqGheXHw@z+2h~MtB=}AifeeBmu!z+xWw2H^G>yHEaGJ8Fz$<9|tR2AvX zQsp$k0;7F#u1S5vxv~ZehODiH;hVIZwVU;YMA@Y`a6z+*5&^T{TUI2UKUyX?KFF1q zUya3dZ}(vWKH+5v%uX_$Eav`JIOpb+!C4^HG>UQReoJyW9 zpD_DKWkryKXR((S_PYl8g4HaLUK@!3L%CCZ6P&I)$gPQu?^Y{ypVu*I+q3-G?U9!B zpob({|M!;lIF9yM`Fy$llA;-8d}jc(H`@y+S^~mPw;6fpeL$w)VDzccMzJwj$an+H z5mPp9Chk9P(bwQazipqsKw+ia=Je|@)B8;D!!A738bYl!mR{c>joakx@4@OTfi{iY zTi(;Q+o*b%Z%>Z-`@dm2WV~c{&PhgVB}H^AVT~&E0cYUmOL3>+Ub13KuOu1LOi_lk zxWy1}t-*E!J4HU(XcB^;I(WFVZm+=rtYg*fwKW=l$FKj$2<+|dt({!`CY<@G(zpAO z*CdkE7VP;!FYBE;Z2cW-P_{G>+&a7JBJAh-b8GOPf@!uU?21;5uUU6`<7D}myc?kM zbnZ%{D60`GpVfag2KtThEA9!ba`7U8-8LheOM)SMYaZPWNxIvZ)SV=i)RiW}GZHh7 z!I_gdK2@%&(qh1*Q4p2gQ`Td>DW;rh=lKYCDmg)qGl#VTy%wjZQc z`PJT=CfJ~)@ciK_Rsyi|)`|F5HEbdr{F2EW3Sa1|99{_CJu4MhW_#XjlrRu+(W{6!ea@Q* zny}g*45Ys!EJNBUn%BnOw$xNQd{Gltu-W$5V+T0uJSYE2rSnurf8wH|CXL?8 z+d)M0Z^_EoFc<55y3r^*)Cebs`L3QzOhfNbJGO(hF7aaBLfmCY)ri@l)WpNJextx9A}z#_bF+D6?ug% zxnstk&|vD!)iX5{R8{5f>aM`~YQZ`oqYq@;%tn8>Fi5(joiIqR5_i!xQ6%KAP~|U( z@y2x!37pYo@hJqXu#MLho$QiEdIkHabMM}VRT%?0GSYF>K$c&SVWo9_ZN9wB9V4vY zf^yjL2t6$kjlH+Hjx0C-TVGxDxlpkx;irWj)DQ#CeMN);-tLR~BGo9`^<0l*PCFa3 zzmbGw@1pnwH40+YDi~NLRn3B?`z`~LML-hJ+c3^RUexbC_b%Z+g%1#c_1X-fhOGSF zEGU%|@2EFW_rQtJuM*dq*Y}qT?!4f{+HVBZT^i&cmFcv7sauSdL`hZJOw`*tGn zkUD}NqLDdi!bBp&EbEr?&XS96fpc+H`>@d{Y_M|X|H$RY~M^JjMib|I|+3`gpA7$30$U0!BT zXso$souaGosX^Iv$2Ke9h8iyPzpb?O5EGOw*4WcpNGz1K{DJCe2m6u^!a&d=*W6LJ zNYA_h@x*DQ;?q(IS11?^mTAtbcDBTv*{QR&({%z0wWSRiZ}@@KB>el9;!&XT_+89y zw_Ll}Suk~TR5MM6XNNwCiQQscoDu?_MHm+E*#tyLRM#dFEAJ<0uwL)jjKuawl-teLW?}r;hxf0EOsMUVwpk4U16O|PnWn>r z9;;he*~dbKDRRSr>2Jsst&FxI|FA+L9kX}Dr=KT-b8CFU^ssNBwa>-IQAwhB4EODr z`1axk=bqyM>-UIKN6VvcQ{qF2!|f(^`1&@h$4o7!W!DCy<#4!!WH=eQ){U-IP$D3d zLf*~JfJDA!sNjXxy`sbCdsS1mdXM(Vwv@h?L^ViyCATE}R6CORm(LjoiJ^OTrEJXe z@;re;ahctC78f#A6aDmfBeYgYg)+bDnUE?a$+j%jTBo*pzCA5g98>QrCjhjz#6Q0A ztJbV7rV5cNKE83}{Hr&IC>1UpWX*eUKFsdz)a-0maykCq;p6jy^0h=^;k{Y?iJ_na zwycjedU3yu2t{cB^jZ6ZJek7(X)yuMCFKc`maeFVxi7gGg>Y@dddb3QJZk0B?=jMVE|FN%w_E{@v6+zoa3{(8jVtd1_gC*VsAbP5GGNw z?%V9;8~k~;FrbN}n`eMdJHB1}(C_^od74~G1li)k7JESEK}amYdDO`{`)D2OQ3^9K z#K9QY_wioCqF!$!Q+QI|l&%KqKmgFxl`#b*DgCr_z6;yBHg6?*Gccj}55_B2GVFsB zCgUxyqS?hh<{rQ*bnk4iTnvg8)r^fRRv6_4xGA(YLhGyJACkp*!qBdsv*+#(i@C}( zK98<~jSn{s@Js%kMd!Z*&;Cg9Y#)y|w-1DTy0~9i>s=2)=g0#1*~=weX*Z0&-#JGh zm6XdG?74=0zGcAFI#Ox(khh2Pvg!Rz^JP2ORx7`Pil^UOtfEYX-Ees2%uKruzHb?( zFTGbLEXg_bEim_6;PI7HOMq$b`vg|_&A9S(VPkfX_&n!{l{s=~7oemn$P{KRjUt@B zm0$h(Dx2+27qq-&T*`pejC5*hTI;y!q8C2lJ*v_j{6<$Bp?bU2VQv^2Z zJq-j+YhW{z0J`DQzY7}J2Mrd43G4peC}1wfKfm4AV>DoTslokqx)zwYLgtglC;`y|pSpXh~fX&Ci{*!+vNNTUVB?fb>5#SNjB?Pd4zQ-e90uk#Z4 zubM83+j0Ry&4V5+q@P`88-8%BJ&p%Z3Wu%`=AB(hNROFv;V(1#*~7K&hX=;mzS@!M z^@0!sdSC}!1!z^u(aMXMSR<=-X`Bb6x8kHxsh$%Q+d%-o)ih%+mMm|Dg(IwbFeLoq zGq)VImxsflMcv#XQejU(^YzZmA0>o$T`ZdvL>TO9!DD^^l5M8lG~rrI(R6IFS$XeD zyzDl~^X0&{CobW+#>hx#Mc!z`HvG_dSkcn{hViQQ4mvQPMx_xmzU1_1gKLGXhlE12tr@Kc6VuyyshY$w)bbJ`CmUJ8K0K~k=7-ZH;_24Pzg5M zz~*@_&loN%hFwi+C-1v)tGT9sb8;Y8A_|U}O>U+zYjpIJs z%M6;^z*a?Pv5KmfjmqNA$@`9Bl8hXOs&1y8S+9^UW<7wU_C5<@=MhTmG@wk%Q3-07 zzY^R%99>9?MI(3|hL5d-4xs^4=;VknLAWJloq(q&L?Lshz0Q`?SK^HK)>oZ+Z+l_UZow3dFtg0*TG)Z~+Ohm?H7bb} zI}-oXtUd4rjLX}@v)pFEgkYHjM1QLHg8RCG=N`n$Z5X$FOcWvo0~y8<%;CLXwey6+ zny`?rM6Q-w!?ozQh>9{5M|bRO^9K;dN$CyN&7=Qw_w<9;7X0Af(@YHkRNl+xfm=~gGaSj-9oKlv-B0>U zv*Wd%uPFkcD-%YYi9%dJ@Q{r`3IJj<$hx_xjB1LYdi?}oEZjjZ-p_P-yQ|$QgCR+m z-IPu93ejs4yf>$Pw}&|D>lnyCNNNNmL%Y6rp#hf*(`p~JJIWq>RqSHd%cTdl=&pbb zq92NWUQ|vux8z;?57M6JGx%`i$tl`#hPVr+d|cx^ee4&;1s`&q@8h~~q;H(-%!|wC z;g_FedFU}jasw*|5~g_r1vdCFf~X6=M={HwJ$xM$?8yXZVc|4?A+n65_J($R0d^QKimms# zRCO1%JQ6|M#l-j8+`j&<2A>?u22FcIy45dQpOd~cm(-z`l%Kan(rnKY!Yk~5+jt<+ zux!>@SXj%l_epu=CKKFU#|` zb#VF2LDMUPVsIy1WUy2%Z6_!Xnu7G3I2@TSPa-)Si&+QY1u-hO-q}~vzh1Q#-#GNq zw8mYfY*xs=f&1c}S4#L56Ja2ssda7!>oey|AaWK6Kum9nO%Rw)(7|-8zeilSp^`eUvwflWWmY z81Hv-krhF@nn=m^cNou4BC}*qUHR%;9b?sTg+hSKyc273d}Y{OgBpJ=nu=7T)O z4&V5kWUlg3G(n^Oqh1C&B^iZ$e37PZwmm`F%zmgZB`N#ez`|{LtFfoKqK-3H%kzfE z*`x{_Fc~NYys#yimZl;)Kt?xeckjX4xn4)eTWZXvfksb$fN-hyoV~Bn#JjZk))Ps7 z+L^24pDCyD@A@n*NNPr2t)%9b);H5F?DY7-WZp0QKI^rG3o25?{8Kitfbg0P5h#cRgXc&;O)c6PaemE5_R#7>Qh?ykwzy#qK zjtys++`~*=0nYdz7R2sxZPE_GdLmxS@^y^9z$O~oSD)UMCN@#9z1XW_BEhxp50?NMIUz^f_zP_cS`+88fX~Lvfg_0{Tn_Y3RWrB^HkzI4i#WLQ&hXMxzdmr zjEGRrC5So*O~YPa~foweC1r1GUhqaKbT1tMTV$~y0PqP_mv?@OGab( z%B$}VQleXVxs6fu_gvJ++ngFs)Qf1RApru5bL(Rz;mQUhaJmh+Tl`%7!@5q{8Bn* zoGl~gd;WsmmA^Mya4;h^uE+Qyd+n!48U$qYx{Qadn+9@D&_K08mQ*h-Ip(^sC@j8e ztvy`jS)Hjyv@HlmWU9m)oTxZUE0-R|p=yVGb#J_OBJEq@wR$5e^YfZ8$tZAPNV8uG z=~{)t)$T-|a*TnvuN@oZk-BEmXNadu%0_bNaiOR@%9DVr8~R zH$@REwU|;TYK(5|6AIQonJ$n?NL6n1^Hv2?hd0YB>~ilF#@RkOrPZ6AQD#q#Q=$U@^B;-B)O%o`+6EvZ?m0uxo4uiA_3vzMY?C z*NLB#8-Y9Xnab7+#AnypVI)yeJ%b-Eyno(!DPPf0n1QWL{Tfuz!P>CFGWwjtx=-Cf zjQB{yjbXlZq!h1uI+V58<*on2!*$7_RM~F_y1@C>>EGi5V~;UqDbpZIHLUo4DO{fIcg&kS{%eoHcxQMcQ`OfwcuaO-)lLEpn@P`7oV# zd5m9}Y+&&+4ZZgt_ItB$zXvDa>jPC{hN3<*v0*&=-Amd(B`A{+;hM%!9-D`yya(4- zHp?^0z3skwU7ht!uU>S|!PUv$c7HA|2wFLb@}q+aah{NMzqypg@^|4Et?)Wo2&&@A_@l(<_R z4xj3nQ(Mb}(w*l7YYM9AA_A$tDx$;i&LRui@@+qe6+J8pPU~l`Y&%}jSvDfSFt719 z?6dmfoW0$DEn4^ljt-g&wnvhXoRDRgR#<%IC$XCv6ZpL!e$*IB9-UVT$GeU?A==5~ zXEQx&g)WstIWnnS*p7G!QbBxoLqfx{t|zc)ps$)jMyk@({VbjfK@q>TMxL`wY$3S< z)PG|_z5m_ty#EB<5d{0j5 za5waH1nq1|wI?Iq_-NwnxQB9hlOuADQ$aY6D*UE2Y0a{q#m-Q&7<61?;yh{{h+4f7 zfHY?}7m&1dhD+@7bN5z^<7bnnZg!Q)KQ;-S6h9+$w^z4lmQMjf-Ct3xOqA>_L}2Xm z+O}h=a$=s`>@_b$ z&n$?#$@of=g!DpIV_`IG2XaDOLZ~tB2kxNy?5p=0{H$=@Th_y5i%%bpidyet@fy~K zD4Z`fhV~d6mri!Q6CC@x6k3lo?-iC9e*AfiabjnYG+=Y~7kTg`XNb%etdqb1Lvn$u zC`2r>L!u|D9g1}ecjAVfg02lJ2$8;qUa=a7TAgw<86lr?+ZI~R^60#jjkG=DU2bgB zbEY0e1<{OhA`-{Jn@gt~rF3@o()Nu+jbW2;;?LJBmUsY7SXyhFvJRK`BG+vXFOs$= zY^)589ZqGN^oR{A_N2!1ihdU(EtW5XLSOJGAyF6pTu(Iv#;2SeO`qYEC?tiRTHqqUtveKu zYTjAo91M$GhrcC}R$F2)zUT-fCLpBMygng-8R+v&$>TfUxB25~&Fax2j{Nzlh(<4) z&m2U!u?hA}y~IWsB7nFxPg;v)9c?Q*X#TK^Bc~Nk+7feFy@Qk1NF_%%@;UBbX!Ya7 z5tGh|%lzkw_RzNSH;vpe**%{3_snSc2m(VJQZA%c!R(PO1vkOFnUM(OvxvlEgJ+*6J6Di&0$#2_(D_tX{J*27gBN}eB`!+f)-r_&dV+{GSDuVIl z*CX$3tm!$)`XJ1EyaXWJ;20dB7NWbMdX8BKT60d?uBR&76bo}7$iM%3toreb2Gg7? z#@ow^m+4u8CgsI~AS)7f4>g!-9!DWG(E?g3&%SqrTeHdfELJ42I!6?kY`yd%0FB%H zVVz;@W}20%{IpY#7uWr~FdmIXUF(g(wnh;hAG0c#-U96C6XnBO2{~z9mgvIfQ@!f; zVm3Wnl4AaMf^xU6d*EtFf=@t=qmmT8Tt`%WU3kIV-?hKCh~5|-bkVL)?h$+#H8L`x zbYz>M<-K`;Q}+bjg-!gudi$HKvi2p0d($!Ham9D-q>10ttxby^RviKg?vM)J1vrkw zbrcK{*8nh@(l8|A&oXA|zI=qi8~lYz`!BSWl#?%HnRhF6FPrP7!CM6j&r3+VOzMEO zwFZM`yX;!s6N(o(iw$hclD!Fou6Ed)4|?)0YCvzetLwI9;p)ValT$5Mc6l(874L9W z74r1Snh7$btx^nrS5S^%Ywz>p?O|lmo#Z}Ut=5+!1SM9CdzTq5DG{iXbnq`5L1&c| z%M7Lo?JHF+k+L@3zs2)h`ntq3a_d8>QxNIZw||K#1*9GO-;W}7(Y3hqI5&>Mf$tL9h)>Buu$C+$Q_d<7D`(0hLIVum=eU|=W6AHNt%`O_~ z$LklFtmU$LRX(gA5zPpSF_2v{&s)-g+Ve2H#-cawLrCCGrH%qtoqB9Ywm5^-q{i8@ zoRk5N9-sw^F0&k_vm0%2@E+7}j6OC$C_kqZ_$#{1ak_Z}mzGOTT@ih6p&H5SRLNW2 zr+qHyJimJ1!=%QqnCfvmT^#*=O0E3-*b)o&S`q1?}6OEjyvoOTki3Lj6N>pj1b3^PHW!XPb(ESb6gx<*A;`Uc$g zl@a*!$Rq3Hp1bpb)januOdty=xU1;s$goEw~sb8UERFjJUg4r~SbaQCA19Q*lzc9h z_5F#{tJWtjtty|gK^=~mh+i5mdx5eT#YcluM=;(jsKt@)^|w~DpPiME`O_tPQDute z4DOW^{%+Uln1pRS9VCZB{Ul#1oMq4hEgW81&t;MOmm*n#m}ZhxC&m0WGkp)rhxH^! zJCjJfk9w28W*a8kNOTtFc$4)U;KR!>MLg^`B)P!EhUlByy^+>p5wPf$T}Pwlb8sk8 zD20i)SH0k1LBcogw$7oWI$d)k!su5ILy0uUyvun#)|}cVWWbTo9RiaQisPPRW~Df8 zAfd1MWu>0#Kt1vJmmW0C7y3~Smf&3ZAz?tEKZ#b$jnEc=g|nAgeCOCm->k4h-QnP8 z3MhT_tm}1dnr}loAV*Yyz)?Gl^92G1nRlE0UGWQ>S%+pSG6nKS4a>nH%@{ zMh+k7iA8kF=)7P~H4@iptzyN=Eqe=0iQQ`SUI?RHYnFT|?|$xOnaX5lVijWebL{clm$~83pxC7}!R|DXr8>y|5+|D*NQYfa5WL+johIvh8TK`649Ev{a#ml=il##imZS?VKOD!!5kqj5#mm zs*mcNA?mY3HMU3=`8p43X@wlwvNo=%h*4d8^b9l|{er=R+$MSoej+c)-SF_Fgqmo2 z5NfXG?$L77jO-1UGX271|20jgoO~l2RO&uI)+sJw|HEP3KZ;LNTfQ26O9U$1G!rSAXm|JxU( zL-apA=faPqr#k=n^3p$9trxC=Uz)UXJyWUHz1ltW7t_kWYu{ZJI(fy_z|z}o7u|RM z7nKco-T(FgCjs*mv`7@e>(A=UI{S-`@4rB!KmOb%*N{6eTQ0P~rV)v>|4BmpFFzn; z{uHI2B(h%lDr71&{&x@atAC6B{1bMqi^+NI zsgf_Nb3cF^|AUuvzUI&oZ?}cOTXYp_NorF6_S*l>6hHqhc>0e9N$yeY1U( z`;&kDSpRzTCaeBv?5r89h5p~aEm+B&f=v2Ik*95wgp!>9e1!k}sg33W&+&+3kmv~` zw*Mrx``35yPHo>MZT{<$e{0|Wy5zr}?0@lq|GMP=>X-ljaY;fCB{LuYN11t*lhOO4 z)q2oY_)%}7{?5WbS(vKw0qLihh6zG#y(R1m2j zX78iLZJCgO{wI_#!7QX(2pnCqKQE)Kg>#gYiDpqRDNW+++f=Q_= zPjenFka-=HRHst|D_20#W~t9^Q){uI4?>T3Os!4anoo5 zzka560?X_dn{>6xyadZ1U#T9Nr_i1OREnnlMs7Q8K!-9OY9q7oIgi41giISwAC7k81XCyef=F>aJK0f}G=H*`(mGZ+A> zm0A19VA`I!DV5lKXe4>wQjqumBp%PD?5ncVMEWMK!cKXNkh20xO`{C7cyZ2lDoDP5 zal%~T2uvqceuJSsNec13LOHPe%}&~8kjIN$uTDY47dX=4OP-ZZXL8PqT{4jqF8EE* z*{D&Q!*{-Ul`qUt?I0V!t}n*eukYzSAp50)BOuFMraq}$F)OQ+9?y=#sv(NlAEgp{ z-?z;Gl{L!IEwd#Hw5ROk8W0WtzCaDZG8)ubR!4G(UDHe>2x%(E7zpP!^fg53=AILK z&XRLoPU@T@xzK&F4@KuLdy%jXhp42JMUv%QE94S|3UNhOv7goCczHF`!pr5q(mEm3 z7fywE!j?6SZBJ7AJYS#^*qOzF{89p%)rgL%OZffmSH8kMr-pT&bugoAe+q zaAAGq_Rd^avgsVshZnqCne+W({ouH(##=k7i$NWgCIIiXe66F7kVcLu+6$tcI5Sf^}_T=?qCx&ECwQQ;R+7|vn-^f{qTF` zWjrczLu@UY7olrNot0XgS@IhGbo*U|6Ew5XC?I`vov8qTbU$9_xTb5a}dbD4o^1osyt_^Z-z^0nYA0qw=zqsxTxVi zd27ps8Y3R8b5OdnE1g(0^CfM|ToI!Aw^1krX8NFa>V!I6qW(C0<5J&()s_2YRe&gE zBrZ1mY%)Be68Wlx^px^S={^Y_h_8+SNSFj`*0x80u79Y9u;0Zl%D# zhAo4$72DJJqi-{Zbv@5u5XNsKSxv%!Lid-NcfN6rpLw57a>;~(;k$RhSL$m5fv6(@;r6r>;=fcy1rBc&$ z%fH|BEj>XP>jK93EOdM2&sHpe&D@yk0-1yRYQh_*IXO<1X9+tee4Nrcf#s^C=L=GOgZZXc~Dl4X9(3b()IVU)J5yb2=r%tx&>FCu!M5>~N(cku+Cg)h*h-}C& z>@A*j<7%cZtN8dqgwbj$GdJ6LLSa19Z2V2BMLAa_XL_LT4ZiyUz5V^ee3(G7ODDS~ z-a-w9{E9waYFdfaJ<&<1fDA_(ucdGRY~T`qgi^Ic zh4%*|Onf3o(9Yiqszi3!)od`x_s_$+by?HQI!KkuTe8(|4E>32P`xa*tp`ZlwG$)jjfo2<;i$?~I&IUVz*j7WI0Shh;Yu1`x}z5h3cE+ashr7+Yb5U@4^aD!5>{?mS;r$}^IpT+&1Qx2@=#@@&X=oYbVb?`K zhhSS6tc9HxG`ifakRx^VH?RvR?muJT6VL&h+Z3<58+weCz%J_rTcCC~$2%HYrb)cA zJRw)0T?juXx0!oXwL+`Zh?vz{7C;mV((pylh|Fi1OX$=Rk6F6%bY-4J6AfKbTzbLz+QTc2T5#uRAG;Am zEQdU?leobQMb^TLlfe20QSYb&>6U?at4;e5Gg>g?6a|9=V%DU4)96kkIgV8f6Tbjy z04#LR*5c@HgKSn^F>7_4R}e6kQF5?Uqow0M)!JhpdeFq4S;hBu4D{7F-RAz9h3f39 zGkjknzNx0%K+SXG{me4td%y+~0V1pV0nL66XY?!@P+y4#z*exD4Zj&jh}A9aHWX{V5AWN>j>aG`zWD;iyA8ItWc`kA zS``|{G(29Nd?-yKZLbq?LEa~NshG7E1`&QKQdd$pITRm;N3u$_qf#l!E)Gqn+27^a z)=0j!xvh!NU{maXnf`77E|GGdZ8sm=<&fX%XKZa5y-NQxCH}Q&r}Sq}!kf@evYF2H z+1Y_a&%#V6*c?|(rIrI*I!mUq43!IqVIO{xX z-U{HyUr>DHtK`#&V~}te7q2+CgFmy+r=AtWrxb$@-cbmYaX=uJC|Q zub;^l)Wd3m92zHXr#;-T75{3F3SlFJ>T(Dz-1q|EJ)$1hAS*~RV;68xFcc=t&h+P^ ztkWBV5rBni%%|&X=5DVc*fUtbCy}oo>$nFyb^l$2>7 z47w@s9~(Vxae_A2P^r;vi&X{P-J*{-y|Gk8+TO^=Y43DWC7pG(Iufre41e06y&s^K zN)3*uf$$SLjric%YGWrB6Wz zqlEb=7%5M;SZ~<>?OHz~Xt6&PN~3vLRD4Jy6tijMe8W|AQlqCwAz?`tQZMw3p6U0T z08p4@S>}enONS0V%@Bw3=W#7{c7m+_HqO^U-O_KOu`z&6%9KzS3F& zQ~X;mzYoDuNRBFBq>N6`PMOc69)s^rPig+%qDT0EMbij`t5*!)8GI$w=!CZ054=pi zpXu-&|8BUmC};la8e$^tRcDeA5a0;pIea33Pc1Rh2r+IS(5#jCgvKv6b%FG4Ds{Xft^kwa;|3yL!FB@IEVkSbwd|U>&XN zf(L$C@s4%3uX#m!wHqe(+;BNTUSTLnLjL?1;FQ;IFAeibm={305r7MCHS&5bu*Xy? zP1Z}Z!`N_FGv%t?;!`?r?0`*hos>;OuB_kO+0wGZ(5T(N&6Wp3UQy#qXP#}?sK5)7 zl{wsI%6pKJ=Zs2y%5S}+vK!R-qj40CdK&#~@pS&@aOUXpp7L)w0H5wYBP;SlyYH?$ z4r|Ae*WA~e4sOE?)g&R9Ippb_HX10?XS)b+E2K(2t%%MiN%)f`Oqxh%WU<4g8ttsK z*qn6VK3~HhRg_Ikutqn@fX0rOM~HtgZ*zO;|I^4Ws3Svwls1<~NECXdSTPqeIov$8>=-5SbNE`6o_ZR9GVSrV{F`SizP3CK0^`TcTPwSRR z>K5$QfjyDxTrZaiNtKizp8FtBuUPQ#@j!btpv-wLWB7&p?H}MhmcHs4RoYI;cTYSy zeq6kwtj>;A%>GW(0kNRARvQ@aq@Y6EG1h)_(6BnSdJ+k)0P3!ohdrmEn@Q-<`#pn} zb@4#harZ-fAQmr(e50+CUMOtTBKT#^A{Q7#kE`3UYp4}0ysQMqEw_0ap~b9>x~lkZ z*mfTgH60mhi&HeKzQLiEB$?Tz-`0>Q+S3~8hW5)%+*}?PoDpf}J4EN*2^t5lh0W3G z$-cPy?H^846j(j#yM_nZ=JB^Fnp~68x-pNzRM*8t_pEx~6DN^Ja)enNBx`NLSzg}n zv5NqQUiEuXy~WmBGq4AGGL`kS8AvtG*)Ch^EqN!%or?OcX3zd0ZCx;H;#g%~X1TT;ocx+NGbNlJ1*L`x&Kg|&2h~EubjGSy zxP5+n3zN&iZo6T2Klc^hHZKaTq8D#ZtR=qKX4lGDR=+Pp$Z zS;Oc;s7e^VFd+Yv0|_zF{iWN~PyKm0sMnHu6o?<$SmbS=3{`NYLLC=pO4m10TCMq} z6CJp&H-g0tlKqa|@v=Ex$t1l3K275~Qnvu>?=Ei;Cjt%>pO`wBaGx{iLgA>VczMZu z7mrNn(f>g}D!@dEpq#o?efz^k&nvQPZ}6&1c(<;{Vl8Stm1#b3z5hx)mj|URi&5-` zwzNgDTGK=(MGffwY+by2MEOZC&g|z#CXgGm4a>xh6BAvYga9plbv`|x=6+#=^XazF zrGjqeqzc@tc{iM8uB}&W#FjGxnxs5xcxr1u3M(3PDP0$TIG5x=w-~(7iabtHPg{SE z<9b+#J?j#pJ^6e<>6I!7rePeucg06jb!KOBahYRo1L{o~IY`8WziIgCXpt?)4bm~Z zl?L|t+10sBuSzx#^(G9v*BIAfyz`p;>{Toj-q^Y!oV@&&8ne`#bdazeH&SWeY8S)s z3tVc_q>w20M-OK8U_m{lCQk1_vvO*f!DTsLztzISWBxA;bmG=uIDZ~BWZo*+w4 z&i&+TF}>gF))45-4vNa+^S0XDi^u z|NV}YQe<#|fFY+U_Kow0chi+J(I2daUg>A~R(8BSUOh)55l$q66k^@kXL>XLM@qbV?v#67To=)#h?;=E$dSko|SqCd66*aA;p;4ojq*1*IGNO%CRLVLgozulrqvha7W@ns*)^Qe=&ExpXbX;A}E>T7IIB4 zk#=>otmCZIt{=`HJ}l^e5Yrk7yEx6OE)sBzV@RvaIzUD*s9A624pdh=C-9;{f&<3u zJvD||$cQhs79Z_lMozJ~U!wOUzz|jQ&Dz_ouU0Sh)|{H{$EQbUCmWpNuC6UN-{Q2oRe`k;M32yO2iK43kLXlFkBJy{&J_+P`2W! zEQ9@*TL*QAz}!Q(*l=(uAlv)5-nD7$6){YJ7U9I1N6<5ho>g-CMUe>U96@nzje*5V z4PpYH0*i|^6Yi)1J-)z2`S5lU+d%%1S2+cGQQjf_Ri;%uXhNz1N2CmhVAy)%N^Pd< zEFER73=sBjsU$2u1Y3xu!#O%oxC7@#9W)s6g1j|31D+wI1dQ?NVT%O5TaTJxw8gW6 ztC7_uHl2xaFQ;$Q*4^=Y6-#?*3pdLBz;t{Kz_Ks-#S9W2;*SFJ6Y%o)-rXpTzM{Qr zxYG?)B*90W<3YLjd6+Z5tWvwy*uiv<&aFC@KiGOwuBt0)*e9Fz11G^4H412tC4rt@G!xd3P~OWOnu9|#b(UaUY^ z&aaACyL|{o0Df7NMZi z9l3{bZDTL<8ocDNEA?Gj_AHw%WLrcJ>iEjMfsU#~w$JkVa+=exeRK?NScb7h2E80~B; zBmzk@5q&sz1Q6{fB!odxKq(G}Z#Kacx!@Gvl}g{o(d7U~qtt}K{3zwBt2RgA53b>= zmG{AH_>9aQT+gd>dY1kl-l1abAG}Vq(`i+EX%pTmlc}FhwT?-7(_c!)kMd(rfB3AX zz#ByN>2}0}5yMo(d+nm@9_3mS)+oftg|}lbf~EXqNFwi_-vP|+n>jQC&F9!p8p3}B zRBxe;NoEb80MPWsidRFQn0Gb}623(Aw2j?vWO?9JVIBltJD8YtE)F zHI0YN@t9H%r>3y2G`EuUiN*VBJf@=UBqDi6ihV4&-*y9Hz`pd0}?vtS7 zy<2>Qm(mJ<3OJ+22I_<6S3uKysact`*yKV?mfG3xk9yKbI_pdV4>7-^auo*)4LNe6 z8yRYWjtq;v#P7ABjen~(>g7@U_5J}zCjzIBV0%0-g1U0c!S>dU4=X&!@8dDpYnYl3P+HcMy89t_g-KG?GEfLUf&@KE*^KMCG` z>lbyXU!YFdUB zKMv~f4teL~#M(jXxRswri{^vI%ss+ZRQu%A@Lu}}LmwCqmK)U@S%KU9?)Ma3O_wP$ z1#^&I?3cJ5yWV7O{|FRwM3O*KbgJJQ=S8#+*ZeX5#k}on<@1#G)*OxP#D)d?etRUT-d`<)k^M#KY!jrXz@i6#;|i$jCfid2xm)|S$)+GkoR8^UQ)T++J#pyxXlT@N~?cFdw|lwXCFaCgKR}hI^k7%YWBuKs59t6 z4#Rz-D5$+O4{ndTrdMQS0bdB}mi4gZ_t}`icjpwDdt>|^vIPvPZn$H%fIL}C{}ysJ z`z(u~*`?H)O;MswmhYW8WHQ$k9fb$zWkqpglUaLV=tiHNUXpOCjp+MftVxy2z38Bg zsNvTFhGkbFtHW4YV^7h^Ww7vgr+R&0=2N?lHJbs$u?>G$)PWMZgme~UUH0&}yZzTl zR#cmGB{&kkO3fFtX2UoG#hb$%3J?c9It+t8tKdNLjs@uSQ4MG*yipGbFi8=Ts5O74 zDuLYc^Q5KsCV~|Ro}jmJ+4dhM<+c3&C=>tR!z`r-A>rsVGvBz=VRWgo4FZBtsc@ZY zt;P^h2fRtS{9UEQXx11G+2+VzxMS~)aONv#@>3~I>kP5+Hv+@#U|mf(S9h)2z(bDz z|6%VvgPPFxe&KBaHzMj55d{$&B3*h1u}f8YM?t{QA_NF6L_|PEL6KesAxQ5vgd);A zp+g8r2>}w27Dy;}MfZ7U?%AGupZDXPIbZzXj2go#|MjbKzPP--M?D!TtXI^ZqQuub zJW#YmK0Ou9q}}neQlJRWGucK`i*>xqKS(K}ay{0|mSf*tPlJDD0rW(buUk7I8~B(V zX>`5};8{yUwxd!VY2el2T!|}nOqZ}58L5iTmETd!4}~4){B>Wyx<)^e0S<5#kO7hg zgTHX%u_c~Z!`lalZmK$3KPVLOq5qHMZl44ah^Sb}Opn-~_!O8KPXZYl&o-cLv5;%b zhAJu?>L;&u-zzF6nm8Kk=Neunaw7#DO8&ak)H2Nnji&CRvbKTlLA@~-JTWMx8FkJ8 zJ9lJ=M+k5fr}7}EO{tnBLfT%|&GyQg=hC=jD3LhoWh^n5V>;Zc??qJzlU>zW`51w5 zpd=#1+_x9#$+$Y{59^~+Z7G_-@LivHUn6~}4^|WT9hk*c&X5Ggv@0wmJiC3FRTYcZ zi`-7nPN+FQyZr*V{^CM9jEludM8%L$WOneA|6oCT`&`K|*Cg-tTw8@P-7f^X=879F zxliHB+;NOOp9FtKCjfFKu0XY@ZsW5(rFvYn+ItCeR|;xqZ9ZM1_rbt|lYgmwb8pd3 zZtCvtw&zR>uUPAhTGY%$dQeCCdLXc`KQ0?{39j0(TY7#h*=?*`1Fbp!NR-PzTf5># z>iyMMKAvj=!-dx9Y?GQ6`8&f|cWog<=Vvce?o=tXX&QNqjH2N+RS(d~9yVp&qAQjS zhw0_B9b1mYHhV)32~!Qh>>U@9JG-#LNxyEtTd{QbEVe>OtNObOzzcJ1W0-Cim7n4> zeXwHE6^Pv3VqaC9tY^z=7qmP!hD_SttRZxQ%V-38kU+P(n8tx2U~u--(r0i^X^8xN zv2XnZw9)pT=W3s))5n50QG!tY1DJP&jD;l~@sNnD2e`%71c9Kcz41}!Ok#Kj$CI>} zaS6k=vgy_sGXVBM7&(g*Bd-HwuRA3JuV^&oGg_Dzh0M)9QaV1pDxHv_m}T~<9llYz zpZ;LqPdZkNqfnK0)?Vr6JRId%EVtZ0DS?iwX}4GO+K8eMgd{qsoXdamPH6bIa!A?n z$zd`un~5LEuvU}N-hByIIz~PH|8k3i_NZJP9W?_zwfy%TwKd6xj@f$HqzuJ?JsIiZa%TIVEJ=42__&m^7n0Ofa-k<0Ah#|EIu{ z7`{fP#m>DwTlc2(s2F4$n8}5;^6jo%)k;6mA2d;?dTfDVr15c8?c&RT9R-OZvnVIT zt)>}&wuFG&wpPi%Xp(2*hrt|;8q7t?o0NJr9Kj4Pt0cQa?v=(|g;q@F_}9qU-V zO7?b{>z*`3KwS5%Leq*+nvT4n!7#L>t6J=De$WOBK<=IDfTwxfPlo{m&*Fl8B#qHB z9!J<_etBJ!0=@Qh-aDaG5-{;AlPe5#N@_e4MAty9S=@CNxa8eE4}IL9)&u`K3VR{< zUc8f3qSC%s3wMmnwo7CH979RaX-6ky`oYy|eb06yxTO^Z-ZB_U6 zGMBE%5ZF0CDbu_FC|XXSZF$nldh1QZN}EVc<4BHpj6}A@V5>U&YM57c!cQFRCz~&G zW*HUbv%OhnGtztD(6I(;;wSOP{W&`?T*-Z%tvApQM@p6~UjqI3ecy*N8el#;{}OD> z3(w<6xPWfV>z$(oSunO&5Bs?*@r*b_hRz^aS6)2(kJ8-(mM96t!n+pj;(K-PHINh> zMAmu->*pt-{|N`AajXeqlDa=U=Hgz_(ULd~AzjX%dP>qmu9&qXy5W|f`N^vzn#g$P2f zCk!1bN*(W|}FAM5vV|JG`2`x&g0p|XUJ z)j`H1XRknO#M_QUI+~V+i4;i(#_0c^xcJxEuszg7UBBK$Fsn21@;o^6S~fNA&e zeiQRz?at~VNUy7OUlQ1%t^X;bk)G(U*<)0#twGOv)(9-c$xGb}yZjp3OeE^YYT0(! zqWTtlFdy$fM&6W5>EXQisYakHoL>ZxD|~sF5fh-emGlsAK%&C$acS}PKflc1<3)e?eWCwv(nkMlYxuXH$$xz*|6vXN z*O&79?fYL}%5Owl^fmPVnlEMjV3lH+ScRJ6|1UQ+@bH7|%7}?fy_8_7js9DQT^EZi zS^mRU@;k~N*ahk4XrR(2zyrnq=O;KI!`bM>oMq^L{n)=dvi=*9@E@*z`r7<&M8f}Z z2>WkD!vA4b{eLyJ&OqJakivY(__z+ge|1d zhiAJk(NWMEyT6JYm-9vG>D_$h#qW=B_FC+QV$jb&X4*~3>|QlA9NHIl`1EsEY1K>j z;OV_r`7$K#1wDWH{6sH~YyXR@j2HKxa<}rMB@z}vP;QY>cBU^rmhM3T0b!Cn@kG`EV$g{E!?p71~;vK5UOhs_{1Q z&vZxwh1{_{6V1^Dg(i(?{+oB-exQk?qTbm@ecSH(IM@O^^v`ei+I~D@53@LowgAFB z9_8K5urch0qk8M(}~pA$_gFP9mnG^%W^=G81JF;BM%8JM~08Pb%0{J0NT>CpJC z6@jfb2PDM|J_2itnfl(jRMb`TK2$TlxT%^zMo;_E#9z9d8zGKWz-$(GqpRGexq4PB zr#42XeTxwH#dXxOLyH1;&J%xHX_k{tY_2aF$!^Y??BiZ#Gk7%?CYCm4y}LW^JDTGX zApGfaRFID{;Dkn(Vi-_Q@ssjpb(=toib>n>buH~#1B-t9Wp{YeO&tfp3?M| zL$E(n|M0MgD3^pxgt(To{MD3ENrvr`Ga*|_d!6H_x;o|^XasXf}c4v?-L!>S+4 zpBXx8C$u*D4o6>~{Ayrpx(12NGgS+zLi9Ny=`CWMXle7cYD?#vP4h`|X01&q|LlM_ z5k)of)D2&KV+nf?KCK|D-$XY*W~ReeV$Q9uo{)LLt(id!R)=)m4(Orz|cC8Zx+9s~XJ2`M)<@%yX)}X_C;IhGDS^;nds{OuHoUU?+(PkPb_%)RH z8J_Z3+Ob+Otl+JeYd|km>m>T5q)0fLTC7R^Jtp3LUK@Am!#BJY!wA0KVQi6axZE=i z@o~8^_npg3gu#`$*`w+_hG(9rXdfJ3d&ZXd#WC+Gc9g>%+Z_kxlzIBn2lxwImN0O- z<D$iUjR{gf-6iW@HTM069OtmdjfzK)DQ{mz96C}EBxJ*1rf8Iwg4gL*$pNhq zua8zkM)Pe7Dk>#D;;Rb^yP2G22}RcRE4R#e<{i>iq*SKmp2|bJB9rNtFKCr zd-yMol-_Z|%tQ{!-|zhMpBv>F)6=xVD?m#J3aS*;+ytpMI zez6sQ`!RCx`S4pKj%(fzLHIK~k931!958XZ^OANvatB^u*GzLo_rUSQZu?=(75h#M z(Q4Sk%}r zeU!DSzrmLR;rE~yZq;xN_TJelP_^J9L&9yJpHiaDQ08wxJC>s60z78h4Z=WCg)DLx zYQl3bi($*2Tsu0=bV-AWg5Gvh%NImYi&G3@Z&xCmXVpC2b{E11)bWNDmPR#cbIhj% z1mx}|KjvW(YOq9)9e`3RTTQ+c`#@Cp%*2 z?LX&9Gc&5N%Q-0tjb&zU<$i~y-ECTCsS5D&@jE)uZ@Nw$dBKy|e{fvkq$vF6f%wo_ zKAZI7xgQR9Ju@{{qvh{v{_dy1T;7zZjq?s&TO2ofbFvdEGEJi{K%XpbMm-sbADmtS z0Q-VY>U%adiwN~&Ph6KS&XGQ*YpO-rs@FQ~sf;dFF62fmf^=+-9g7s5Z9ldrrDmle`ig{hrLk-d; z`%m#Nqf{yiBkY>Hl$(MU&k5+6jws`LHMhJ-C4nl2ZBagp&T6X3cjh~ zB9!Q*jAz_GSC?P(Nl;c;eD8MphGSLd>h>0FW%p2%}h}owTE8gU}sS#HRD$bk8p^!3Y%YBvgR{9p9f|4#&dZDZ-httYF&LB zaBy7syjdmzIvRr0&U>upeG%jczTY`?ba>Rb(BhDL%J2zRkKkF)+useV{iHFiYZK!_ zDgIuQrAfY2aQcf(o_UES{aF=|1&2`evigV2FT_ic5QVTdyNd5`1cWR%`RMj$=(D%R;o#Z7(X9eNS+H5^;QRL%_KFdAm1{FDJn{ULI*~H{f&; zde#Fg-20<~+~pdcZNweMYcEmU2DV!wd8i|Bp%@0DLDOJCrS9^05uh|FvdZ&PXJK?Q zuW2u1ru;%kU)|papU5%pS4|NQWUvFJs8JG5yP#vWy3M^MLL4pVjd1!z7W}oFjxqgf zH{B+v+h(e~hKDYG;DnafEyb8-)`sIhGQlqRl`%hvn%}Y?%s>koRrZ1mXsmE#maa~* zSbv&_IMo958Hp=f&Rx$iE1C~rS9xV3(iJa&pnC5Gy{I+$vskUlIM3^xluNc^@FTw& zHJ{kCze&k?AL%IHGB3%Q{ZiAwIK|_`bAFB^*xKk2qlI=ZBQfRU`FTa(w^S}YS?P<} z*x!Aw%RTJMyZI5)g4EN$^e1w@vX(fqox$O&%{Xrf6mHpb47J6SmHX^;l%?pr^5iMw;RbJbZd6|ed7bnX@oR-xYMvMMlUCfKm&PW3(bo#8rd1;ZO{8;| zIpaes4z)7ifDY-)Gno8UL%-^0cYc%=&I$^GKjM$j`2;<>zvLbRN*9v|M9`Ywud@o z6LzO{(2g66MjA1+vo3>R&LLSktLo&et(Q6*&?p@dQPMb% V-yl~H|RIYd?>T@#z z5P)yc@)%{hO)A5aia+Ib#9YC7v%HsGJvC%5Uz#1ZM(nLD{S%>+_ua6_=_8+&k2+w8 zVHJc#G-)+Pk#?z+Bc?`C(4s$h04_R?C_t)A7d z^QHRYm?P%aBnzDCPmL~p2r7Iji6a)W+22Z^=+`gPm+YCh-;J#vr*JD=PKq8>U70qx zKdXLb<^t>phC@;p#$RQPn7$zExl+t^ye)G5v5{k|T+pbhg1(WGg6~&N6`xU3Sqn&| zsLx$S>Bu>ZRdpoN@@f(z4w1VaA#L8o@I7`rJbGOt{j530>9ZM0;gHwu`N?`{TZozW zbXHz5t)j(2bD}I35Yjbv1MB+`81`cT3S`*T)kSj^4#kx5k~b4^MYpVzx+l zY_Q<@WoNQ%T$CnfZ^3-KUAI)WV=3WVF>(c6@7aO#Y$B~ZiFmV-Z&F4I2W>e+_!wzZ zBnA*AV+*cFYt~D3v$dOqv+RA39pxPBvR8!v6Eg5KJlLG!{3FL9^5%>AHpM#;Ke) z=^9zHTXB|>-KD=ymg^JROqZpAOI6mg&*O}uQ=#GvfkqKS+D<8R|Dc=(4;*sfr$ zSjg8JNo)k(Ye8rhRz^z$C7An^(^qj_AUcF^shZ-q)xM@ktH=oKT@p_^LuiY`ZZmJo z7I`awptkp8w48npmZryFMbKO9g%x-oqSGc5F7ogXO$G3?b7s`WMaJa}eu+>-F?yO? zTx(mJ+5x%tsn1@oNWD#eT$;lf>3zep(ht^&*JzzI|qdyb`A#!?DM_ z>7jucj4X(U<}vbkbG`{dc(wgH+P1Rz;~kdyp)5-L%x*QeoX@RF_XPp&v$Shxo#zuiv-MmjM&tbJFYy=?UZuH4Co$>O10$SX8#`BLpH#^g6Ut%RBAe-pOe^0huP|Z9Yrw zu}TkBBLwG-OsidldZg>C**YbjM2JI~A*Ziu-z1yKjnd$Z(wIBue!`GD6UmLRzc!iXbO8}Hdg1Co=(GODiZfd-=QzR9S* z#>{&rZK4>yX$j5Eb+GUF=Rn-ryJ{#l%OGh9uLDXb7tyWb_+n4Agxx7+9)G1nG2(H1 z4!Su47IFE>XyPAVfPU|bies0%(1`2FovuJAR;AW&HhM7q-X!>g@a*{7xWMx?Wo6yY zq&;~RB-eQ#=WopB{;5oIF$^Czag$ZE@t-V%Iu)nadDw3>OXGb$o>id%Tg0_j#J1c-qV>`xh_kM3#I7&xF3|G9(s?k5AcMf zq#u>#`qD?GmuKBRT_YDe4(hGFuy7oe&I><6#1b_)e!&Z(LeE6~%2?t`RawPLGD_1& zB+4dT@P{&zx&}X}0e%YhbHF-p4!Who&6x`JlYGm`0yAFj3ztCyAuMmBk_#+%a$n4>HZ+u5TphG7 z6fxM*iXSt0j@UyDcj$ zmQ`tmeba6{(?8p+riE-XHNvBkuG^hyM(QZe9g%}S6-bb83_>sNU$yR!T?ftV@Vtr0 zO4G;t&C}cUcr~&6N@K9Ruk_Vd2jh=NF^;di&c}ycTt)$7LKhcO zG&O{YOSZh;-mCus;hWd5*-1E}w&CFT*gh|0gT@-vaozacBK|4D)cE=I?%OldQme9| zzLD!acX<^Wj_qfdb(LqyB+~X1itZvrQj((P@i;h0&!$#@#v4HwVv4LwP>f^?F6_Iw z0cE%19UU`%rgGlK#c(!i&l;yByNZb44NkM!J-{8skE`FnR}G$GP%N>@8S$pZp4aTI zPS*DY&n4&3>8npDQ3A-&T=ztKWT=Jw zT-U7vZ8k-M&5ZzOly(9^(&~nlvszSMNZZOi(AewP=-_M9fm9GS84V5 zB^AX0cE-@P`1GZzWq7sVZ_>$AgqC%KY!#>2tF4~+bvBq@T^w%Ig);XEO}lUjxHd|- zofUjQoBP(3C<=kqImOpl&CBG8lTkE@i2nT~;W`&Cd06LX@4Q{60bX~%kPK<3jd0r} zk&uM&A+zqkAJS@99-$7eZ?lL8T>2^(>db1s;vnuub;xsEgYk)HMcQKcR{pX*%7J`wPPAdx^`!3#SCSou04^$?Ub57rB4-)_XCUykCb&>d%5lbt!3yR>#hexj08V|su%$|1wRcIJw`h(vgZ42yLCpUO7MnnUN!D{ zVDU-H_q(oE%{XG`(E8a+yvwM^;siGtMBv^t6s)RXN@V-cW6&1lUa%^D`s%XON&{2k zj{Std18kBIZ=V6`#=6W-=yP<%0Vv0t(Ne22?-Ig>IF9BOlAbKZ!c4U%H>K>8L3R{q zb0ahy@x;AoYHaUv`qY;^2>w(Ho=bB=Bh(h<#&Sm2wR0v7QlvmQJzwEoI!PCW{=0HT zV;`ou_l%XO-ovkQYaqu+aWtCP@^z%qA{`v7l{q0s^6Wq68^RQmI8T>7{jnm$B|jK^ zl$l**NtSmfUtW}y#5nG%zCMRu8a*~16mpu?;Cn%9A`CK;Viq4_)cfmcuvzwJxw60! z>17+6$dzpp_A3_|6`-~TyNamkDNxg*@aK(^Ynsf^>pzUs;lBukpI>Yb^m=!OWNawuwCRexF zV`!Zl3EsEbChW0%tZwtS(jkyh2if3POo6X812USbwr1nv`_&$*AsGU~>)|L( z$li3z$F^kY(pI6eRP|@ol@kC-&etiXnh}i+t6X_RlP}XYk1gSCP{~!TCHmi9VyAbG zDTQ-FtLn}|O==OqgKzVw#c8x;728{2W}mlHzC=g;iSmzLndxRVe$-AM_(<+AH3j%= zy|6v#ygM>CS`R*Hh&U#-hEJm;$-5QD1_B^M?iN*mpb@vj2ZcPd9XH?a&iZ5yk5Xqo zv+_2W-nLwlhK^jPDh_tgPx?$rj#r+nPMC4|LTMFt+gQznUe|%O*X~p!R()2T7_D>V z%Ki?+`3iMXm9X-LxRF@`ImC*NSRbCE3{1?yshkGQ)V@v@S;RQWvhE-o!^w zM4Pcc<6a$o6#@aa@nZhLKH^LZzjmpYkF_DgEl$h)S4p#$0#BWncS z1=-R!rFL*>gk+&^tD_aHa{s;+z|OdEFr;cHR|49vbSrO@SYPUBOziJ69pk-HY}52d zXbrsE-t3e7>{q2OLkr7BRUZoBt@r%OO`G^$_=qTe*DDdwK_ycI)eCFBjYo*7O+^W* z0M*~sKV-V0l`bKvQgx7~>9(YpqEpzPqhkihr^rW&zs|nnuk`%BX+q?lb^jS7=7vJZ z01P4$FeFsOQ|@!h7l`e;pR1G9D&^y1UM>=p7*+@$0`fYa<%K!3qGBogtQHH;U>n=4 z(%yLIz{v+`RehdIe`R!;CRnZf)VrQfzMegu|7R7Ck5iaW3v7q6?X3}v3TN zV-^a8d)Ri#A*IJIB&uQBBNad*Rw^ji;2 z_->Taf*FM*pJiwyQF!?yz}sRh*QhKbn%{WCSLT3rBFoO$=61073Gm*C(Zn)_uU+ei z?TZ49LcB65F!G?e_dAw#NO>0oK+PXOht4osjlJV97HLqi2-vsJqFGYT@%2IN|tG=T#|D9qyq9T&4SacddpXS$~G{ahYXQbOz@M zhbc6auvfw|xlKh}iVI`XW8)lR{rMR+Hmy2p1Mxpp zL|IPTb$6G^>Q|cjKj--K$bfMv=`w1&n3UpQBKIXjC=9GzB={r;YVRIfid@J=>$3+r zfUYfeM0Ih%VVA(MGhDyHT>jWkuGN1o?C$*Lx+ zzqL8V+S~*=NO8CTC-Q>*bMiiRd2TuTpa)aI7Rc2cb?+^<7)65Q7KlCBChPcH>m`19 zz zct%zL8p=G2IlEkBm&alTzql2XWnVV-(;K;b>H0*+F;#8BH*8NSR@mBijL>38!{wa# zlV89+N#&*jq*&>JL6=L8mtK(vpQaucnE_emxLk4(obRSkqt6#$QoSTkO)z( zXFaHW7-6oddS!Q&=OQez#ieYOYwtqz8)30A#t#^)u8oazDZ0GZMHUtowJyPPl+yL) zip}PM@AwVBeukBRFLCRnfcAsTm{k$|;`d|j3qqAw7iV*QPhEYQ#@2B7B;RM9Go#XJ z)Y(1@*w_27&m37T<2F0h0$#;k>YV4j(wfcSI6fL2S{%{iS|OF=i_+^Z=7RcGQ%~4s|_%i-QiO{IKYMxTps4#W5@{wPaIgX1LK0*4ZrTp}zoq=sk9j$^~)vEX09SNN^v`f|y*d{6=;;hm_w>G^Gz0e0X zkB+sRmG48FgM_uHMpf|k8!@}H0uLg0GgUEU@;^N`PY6aEKl<{%AgROX$9d`w7f-xLZcjR9>i_mI!edZoxem#SoD~Ka>18)BJJUyZPXZZo>e-7^Tufz<=R18wtwr^_+%Yj3#m8TEeo5`Xw=2m7N1d_-qpiRwFZRHh+ri#*1Sk=k zR?>C@@p{@eH#ww%{;Zs+5&lJ?o4qhdk^hud7cn9^MC>F1XIcx!S3&8#kKv)9Od2BFo8k|xGVECQW=gP01y-h@u$sKV`OHmy^-&j7U^&@ z{TP)v5<#3)DE*2{J6c8DBK8N*XLh|t6C>UPFwaK4VQZr|f6u%T$bAOz7zA)lcwY71 z*Q4sean;Lekg?)x)xPz?%zlT6k#40?@Nj&Sbn)4`~^BL}wU)TM5YDv|A`~_k7goganbt%Lune!>u zjeOm7=8d)W_wv6ghe{v$NyBGk(vjtOgg=~IYm$#wVF&yx-nLQ;KWj)tYD>jsgx{2; zNVFOEJQR?(3!Yv&io-WBj*sQp;|UKbYQK5&2$x*{II9)h>7GnlyL-1v_-qODj*oMf z=7~!i{Gf{cxhPYF%?6yEXq(UBp$WMXQn!~Epu38KBie$#MHFR?!%ai znkw2XUj(d`StuT6VfIuw458wxuHWQfAcQqCeGz(}HhQe>*Ma-0^tvTDY_^^N=IbWo zgyaM&X#wCk_s6}LqJ;wgnk(1>JI*4(=x$aOg0e#~kF(qNcK?=(+E!uc@E`Gk+XqVS z`OI=W#s9*k%g=c%&~3ke^N13?WH{>0&Ug z1yqTFYqyLoYv6udI$9=3bB&^shJ9Ucl+o~coq4QzZAY@$$>4*#L=*_nNo_>>rT{r_7uKF-2T(h+y z?x=w3#uBAvm$uM1;io(8JI@Uff&&A% zMJzgmG10_pKJHCszoACI?@y@Q2n)z-vhdgpjNw!9w@i)=8J^z^E>)BtXhCG?1dM!J zqCVRXYLd2WM86fx0D!*(HBUcmQG z)B{5OztEzz=ivWEfo3pP=%A{M$knTwpdM84F`K1Pr9*@(NZxcE(Z;*u+y+|@P;A35 z+LRXXs2Pl$H~mx_=aeNb`z}es3|O)SCAHUeKrs5VT-gjB|hVhHi#G7$QzzKob)XbzEw8ao&~U1J~*&8l7%rNvL+;nIFV>%|}EK#TD)H zBHI&5f58I2+nX=2dh_-I?y*|@5z4^XEg&ChADTp**<%* zeTjU0v<&A}zZss?&R?^hS3mRACbClw=O%HlIh0{bV`plx)^ev(t!`914mB5ux%+Ip=3p&21gy^q5Jwvp zsx`f1!FVH|YtrZQObHCe<_kNx`jgOOf_F53_quRU)Q+P&&%Iz`7v|w)I7@#u%f7eckn|`R}-9i zgw?;X!>=`3G>GBNh@7c!^yY^e0I{^HIB3-r()J3Z+^xs5xJPIXX6i7N|I`l2uAE|L z(SdMo`*2h2YRbIV&P^CqKkl&1`y)BGdxw6XyU=wUhn5zPalBYtyjuG%_0KAz-g%Oo zTiM-sNkod!V+DYtm?&znJDH+^NXPel-I)?VZvfSoeH3&!f6P=b?1D6zm`efd0C0zI{@`^g4R@s zPlvMtMnI&<{Pi)DO8@K++qh(cOM#y>B|M8Gx9>gAZ-n`;c>KyjsxHxGRa*D62=;6{ zk2(-48NP;!w3*j+m50pOH79bwi=fbvCX9Y!e}Cd%SpZ3tkj0V<=uh&J0%}UTFVE7J zFID^q3jPCfF_Mgokk&D7an{OnP%$U_e;uh_3$mZWmLev;_J^#nXH1^ zSR+~)d?aqG>b?)vN<1vEJd6Gat)ZEtx;$<-YQ@_yyU!J)j4>OfCeR-wj%8n&qzcBd zXttk2x;Y(bOZgf%h&_RkX9$icVasr(Y9s`TKTvy9JDRm>5oKFRDGBQ1yCA9X*?`Q2 zpddJ(_2$1vk#~SVU|b?5Nx;I{gvbv_JavZbO|h-rkTZ5MiSU@i{f=e)SE{6aU44vb z*GDV3k=cjF*F>K(O^l)VhXBU$DDR7cyK4<1F}{g#bQ==Emm|fN37KnSGT-7{^a5;e zp+f@)nltAHs=s-^B+CQ4S2}Tw)2{gdP6r6Sf%9~;PP@5E2HXCoSmIIDs2u(!X=g@xR_Ug%mMplAS zv{20sI=kpqjyc&ZzkcU>iO1bi)7uV)Lh*;8WJg>Xr<9|B+{aicc)>w<(xbAQh*=H? zlYyT|8=cJty!B~J_9(Aybto2DFaX3qLc)5LK_q3Jf~ylmN&EhB>=MvN?%!aB2k_C( z*^c`RA#B==WIC(r^5d#K=W@#Xh};Ej8drHc)m!G^Q3otepIh-c3^2su8hpYwGC4zk zW*AJO3;u5J#9^gPG0*kKss_zNK^Bk*>zdGmX=ijEP3*}#ah-El43`AEo9u7gOPm6# zAFNTtWxT&45=d%w#L(Uoa!SJVY?gXr7CLdqxWX%T}d%Fuypvw z=D}6r)p}2oJ1W)$C}s(m%K6pFDkY6S1?s%uws8EJqS*?A;ZfU8AL6h~`E1_tWBkUL|a zl0_Ox!)Rr49*l5e4FP6vbY7sw=w-un=EV8sY_q(kv0~3S!dZ=pJ}7c$n0aGUL+=)P z_ruwM?@Dq%3h)RS<}xQd8^s~i*V%r+=`9TerN=jH&>A(^J8xU~`^ApBH81!0RSx4X zbuOf}6=8NZVA;hM6{?co)sJ^WmCa<3lP5y;NM0SRYha=d`BpVM#%rnS>fI@Nq9zVU zo0a+v9L(nj#B^Sb*HB&2tk25iW}ogiErkMJh$LfN+xa;Ai%v}iq1*zo^A0c6-q4?@ z^tn-eZL|r~?Ndu4TIA?oqGUcVqj1Yi52FB|u#ZMO9U~5u0M)oD8=iB>ixa*^zm)UW z1(~X7YWylqUQ*gl1EbgwY=0ECaADfnS0*nm!$=LSVpt8z=;HV*H7+E#2 zH7;7D=(%BK^ZiG5NS|$PBA3en`ldVoy#Zgm#mOlFkZN&4~`Ct+{Ht4rf@R4$iXpq&XgYP8@+c>kMj$ z?|0v2f7<;866~G`^R^~$AKY=xZEpCBOv!$su=LJuNp~=wF>d@BH0y3vP?3c}e>0Z$o92d7!4g zdGF@aPAd`P!WWnXDXUT$#q>uVOs&LHn-Ra~bpo(;+jz1T4f2Uz`Jk0=!uyTw>T)S` zPo+y1)pDA<7`uaktziz*dKr@ULzyisW``}RZ-j2KAm@EupKA{;6s(DERtEyAM59Zf z+g`QaO7pZ1jaWe~y|+ug*KXg5IM-ZZ(J_gmfDzqN%oXk{ zsLhr=?A_V3a{*rauKf)lB;;}-nN6M>dF*wJm5){@;(|yrIU#05Wz4-e(0>gsx7r$& z^I_n~ud?xSp_M&Qu5o>ny(u?Ga5O5MUEO}S&Qb+I5{fu>&~`Q%E4Tnm-jYGPgI3~B z{N`qAy2WKB*!tqzU$w*+7%X{~8^clTndIB+kr{&4s5QY0sFg4U{mk?)d15@tvyl))fw*Re`m;=PC7&g zL_~mO0o8E@*%@$~9v8}y_gZ|?Lp)moxjz=6W>KW$J;JcJXW6@F$?x9sp)7QS=Ztvj z#}JaSTFBYqTNLlb`p{eH=g0CPS<`JF)jECFBcdO@6Z)7pvx10=Y?3~p;_gg!g;+O5 z>Q$S4tz7cmb%&Y=04#cABv)t9-13IHyAkS;nCd#OfZF7Ztr_4vXpIr60V=W5=5H<7 z+v!bxQu!9g=y@WT9E-rvv%GYHFU8GnByio{b0DCyz_H0GV=zCBGz;Y)x)~JuimB(d zCgPI@P2!8mO|d8UMDPGwnDbJsW{MtOrz#v<#AC-dc6x!5Vg)Ff-JL-M+{|m$8J?J! zOzreb{e!XC6M)j#5)xntj%3s6?;KW`>k7jX2}#HgVWG+wEaJC%g}o%a^*ys)omVT{ zE)hF)tAQXrjHS`cLZs6Q={YasS>?)P%bvJY2PhvoS)Cj?3T(C+WL0;$k-ZSdq|!35 z50QcEwnip^%1{kh2_1!XZ8YCpnK7^cH9;yU`0Xr3%# zlj=fct3|BbOL;J;=yVCtERf8^~Yw_}5m>rXT6x#3d3KG>G4Nc+}) z{QQ+#n?c)qNt%%ur6$cY^S%mlN439r$jTz9I_UCtD;{gAnibBh$|*=3kTKU_8(F>Z z+DCNNIvUY(;x-Sw^X|-KP+_y%pqap|zo#cH0d5Vtpb`rzsc!bIpZqYaZ~yX6sNGw7 zDW8_%Zc%FaZq6eogvH)_uCAT#)9}2ecBO)6ro&|ocWiO9XhZz5%;?>wQuAwsu(7vp ze-nbQ-2CooLNY`3l-0!hpOET1!->8bHcAu$?eb5l6(SMQ!wu8#4g@`EY_C{>kKt#b z)-JS)a==_^B%c;#5Loopj;*P1vo|OM>I>U9sOHyOiwpU0C6j`x_f)nC4OEIJaE+AK ztj;9)&dx6eE&ZH&lNl_nRdx15lasP8FZ5sr{}7Gnb2z^+XM*POT$yGHnfco=v2f~L z8oVnv@mx)!q5zMoL1b5r%q3vSiEuD1GF)Q`fQs{2raS3fZ1w0-@hPBA3DDE4|6AO&vKJM6|X=5a_k38B0qYx>=dGQF2RG%(w$ z1rKnMCT;;!QJm}yE91D5RB}39G3dCBfgJYuBZ-Yn=(3p5NQw%4H~K2$;A~ME#SG!N zh9Dbpo^u^2KVVV0+s$dhdgPWYVh1s(k~+4fHdGsX^c>enO~+A}3R-L{)Dm@pO|k1y zj`ixDUi96l=l<$c#y-=T3!3j+h6l5WtZBL}DV=8b9X6kS`|?A~YYb3x93j2$b-$nv zz~03vTl>GSqt17!(!t^#?2o$Z_-cTy*>uH-08V&r*mbR0MQUiyXrl5jaG9##6(GsO z%MN_V)z}%&HZ8~owk_qlRWJHmOEbqrDngDWc62XwQ_;C7-=pB`O7Ayzp4)F0_Pc$! zw@tpcXs@Yd(>B1>Te}^Cm2!Px!8uX{-+b{;5c%7e+Gcj%=3pyc21T^@QiVyRpx15J zBmHH>h8FPVWW_}5#xYKnLd_vi8tsJBghm;z8WtLz>72s;PiTjP7kc7IUL6UPu2B%+vdv$x>|r>yS*e_L@ranVm1W^h*o)^5psDQ(;%d zxGaHX=W~w$R{ga;+HPc86jZTg9K?PCyUIFR(dL8*k;g(|~trabs&`Hgb^ARa}OsS@fQIt{x-T zrk~qu=SW8BeUFXJv=P#HCDIq+f{M>kkKHb8k6n$;IX$y@_rPOgS&jNTWq9{2UPJnj z0G_Oz-Mu95zqNes5nq;mEkZb|f7hlnkyqTNV^x<6cGP8xMH-a$*nR*K_!F2nb-2E4 zx~=-@6!=~&K31$m=0l626+CHYhl=j|;9ttqjsXn@ zLUQpb>atW`Ooh#HvY9Q6rf5_s06Zv1dUs-u`eyY)(g7R19^e+l9c!zO5nDS>+TsbAN3c^ z5pE8jJ+X|vnbuZ4Kbo&=WR)M`lpT2cSK>#}&y2z5L#My+Tvlvto{mTm>5OThJQvW>H* zd;Zp3NHYPvU{=9hNHaNNEdESZ{0y`aY6Or!Ju*&ll{gEZU*lbAV7F*>EW`81aF<(1G=~puG;t$`q&4Jl zGiLDd$Nol>jLB<;MdqFwUhAIY=iDc=#23RqyjBeQwlN5-99z>m-s=-T)$roBg*u<+ zBaRhxOPqIKnTBjF7Kz-t`O3IgSrpZ2)!pQ|y~)H%tu*e99eiod;>j1MUn%cKEq31f z`tn%U8%?1&DGd&J4>!IpfiWxO#uodAOm)Yj5J<>YEf^ znA@`vnsyu?47-)%er7(Nena;;ui2$7Rs@e^#n^+|l}mv~kCZSi{4)v2FZx6`zoVDv z_hUA1@?C9wg!MAL>(4P`MZZ2VxWa54v6rM^$mK2TInTNUd{xun5$e*!Rn~^EF)n_j zQiXJ>dN4XaGHo@px{nU}Ue3J`nK3X*V;Ka|E!EhJS0(q!g>z!~~26Ieg4DvB5E=bD?G=U%W( z*PO~Qkc+q z28m>8?Nk4&y*H1Cx^LfyuartEMF^plBD7dSmJ-UojlmdF*$vrulA@AmLCC&k9s6L$ z5+VD(j(s1@*bTCdS$LJ@OPyc(>w1_^GH7s1n<%CdXnJ;f4inf$(e)y`kw#hdB2A7Rmq>T zzmgWt`OCrMUn1Cl`_PDppX)OYtk0CdCX@eUQvO_fey{%IVV?B08aVg=zGD98Q=hui zop1~4Tx0w1Xa1WR{_kx5+x7bM7yn(f|JTC+X!igAX>TsbXpiIH!R+2{ef)9!_rT!a z>@hG%)aO-wqhuRyvw5_ZQD>m?82-bmdrLn(rWD83E zBs?q8VdUweYX0A@+y6N{pH-Ri?_F2w0wAoo<9PZ%f7=q9pD&w!X_@7c#qs~-j{oza z5!pYlh@t}{i3eK##tku+&kS;}l(cZbR*Dxt`k&trhQ&1CS?k@5Zb6K~f&a-J{%bq< zHR&<#{AwbJIVL#L9xiY&-GN2Th z-^qW{Fa??q!X%f<|fL2Q`>TUqiTM&fz7bFVw%|Bn~>&=F_gW`$yvQffLYOH7~DDK!8wNebO+!iM9JE zNhpEaR&4VSGkiDjdy)yq&v0&AUD3R7_#RbbM@0FeY82_b z_u&iIY1eUhM|BlcF!r43FD_hQ;I&#*&E7NS zF^=7Cluktx%DOzJ-GpKNv$Bf6md&F(V?YLabtVBt6WhoPH9QAMC0ATW+yjSPu-S=3 z%U|!i1#Xr#<_C!+FnQP#`=CNAY=`u!yJKn)rG6Au{YczMjHE)z8aaj}P;>;1d zhCTP39{3?eKWT{_CFEI!y}9wIpZ?^KBw8s;w)M9BVpXuMm~HdZE5?MCT-R|Ow(ett z#kPWQ2^762))%IDETq+A8WKsIr|I}6<%(&qW`elHD$B40>cOMJ=s@4tFUTr&h<*dM|J72i4?y6 zHJgsC_fAW_;xldHgS`C}lLh%!8xmLG86_%_!aRe4vu^);n$A9rq@SaGEu|eOBoV7T@aQNxs*`7zXYOBIA?h8+6j}7B ziP{a=j$T|GDpt$EtTrBCe@%xG=dgbGXt)0)T~3r^ui}KHAL{;P1zT>=SF7YWT{|BZ zG$rV`A%6>B$>=7zx~XY7P*&~*I<3ZY`_u2IsdEYuha)7OP>;uyH8eu35i*=lyWRxj zy3$0T!IVn_1yU0O(f|`TzmR=g%zpQ4Zfnf|Pj3Ykwtvq`?mF~BJwxm9xlx#wp)34D z2ba$RB3(DBVwk!ZshzCxZA;ev17|4bZ%{*(ufiCBxy%f+{DJgK*V<>G?UWjA%XrLL zT-7|p&3*8|l~5UE!R5szgC0b2X}iY z82CBq21V`RrhS|uS;mSxn>d1N+zj9LspP zr2dwZi@$A~Qz0uB*B@bYM#8>RIP#`l#oR6En^RYwG$fWfP|0kJi@fd6wSn9dip$%S z>L-R^<*X?K#Cxg7Fba*G#PLmvC-;Ft)92Hq=2Phx6y6mz0j!iq==Lb@9qv(Db__qP zBExHEVAd$}F6~Hn^4)3n!~%JXOr5+h!q$UTD&(jKA9&RTxns8U!jXCxn0P9l^es4L zJDRB%K~@)ODt?{3Cz1fyCEACjgOILkWbQ%Obr*M8EFphUG8gNtTp4uYNi+>IPRU4* zVA&HV@uTMaOdE(`y1Q0aK-szYh@-hW0I=7#m;q6z*w%o#u<>IB&Mn`=8?Ym|Tb@;m zILuY4Io1yJ_z+3ztc-4?wCC@KN5sH1FO8zox{*-+DL+AC6?X56HEoo^l^Q_O#=K1) z#y#tLv3Gl|I+1c>(~k(-6gzA`e@1TmvaNN5IXLca@!jnkH(b_ppUnWyMd%%Jioyop zJ|L#J&8meqV%meWZMcv9aK?8^Kcwmz-5C?Wn47QnzrJC+dkThkTuANtz}F)+j<_%M zd%yq`AANG+c@C-_M{L-iMpNfiBn$lPT#0VZk@d<0Y=^u^Tj)w#G7oN5MO__&x?Sqn z*#3>2qF#9?-?5Ji{TfuVD!pl8x@kt*+OMhc~JVxO^UQl$=7xs0jqGbIHRkxN7?`1)boSEPx zlF%lHtd9Gmrvf-~i2NVGrY1aW9op_hQO}~0eKRd5I3M1BqcA3LMMuad4%8swy2i+4 z!#o?mWc3KqxpzXuDi}6RG6?Wm=uxW)YHRUW#2}`{wl7it0&XO`pd3${4iaj;54<@S zClrn?D8-Eod!gM3yr3S#5hG20{RycveEjtMsUk@fk|?>6H~YtR5C1Bdn|kv%F$U$9 z!C;$N7j@K!clh!&>|r~h^}D|#B7%qKvJP4IAqwf8$~r;VXRL>F%+!skOYAXpYWC=P z^YVju*M4a!#t~=Y@J56(Uw z&$Iu7^v4KX~Jk6ngz z@!AqrY+JIbHF^@|NyR{0YoJ1gyk2k>mZ$WNTgHSKCWXE5fxj=w<&PP_);U_3d7!Ci zeRoCxJFV?#8dqWBxix4ZZ!4`t1SJ>)g>^OUKic<2(^K7R{e?M5)B_1WwcK%rY~#YA@aM5EmufvV1*9>EmX8*=|(`0mRK*V zADS;*bhy=`8^90k(N*d0o8#~-c2l(!_iW8v1?8|e2S1vW{1HB*)@VAB>~58gt5t^nX!w|hKUELRYV&2fS}9+aJWN}G)zH*_ z$FL>yIA7be%=S3)`5E!vIKS_OpgB@*uj~O7fi#@T%!*um1d(7YNTd|E^dl}2lxqJ3 zBQb*G0~O<80=R=hduE&+fho#1Xq{|!I>Xk|wk@}54o{2UK-&GI$*KlrA1WPw6;;i^It8MbqlJ-ut3Y%_d&HCa z`Ha*~EIDnTBl(NGjAK(YZv=Ey)|Jv=F5;YC3M<0w_1p&J&Z@^EabD4n47j?0S9i_f@U}QVT z5?Qn2V_QnM}Pgk zAD*L4l?czsmIuBsuiVtqj?`LkCo%sGC9t!e5n4`#)aou}((&eu4Zo#}RMYy-{q zE{j7KfY)sC?8=j##v5L9RiU%OzyrwLr~N6`u0M327QTTOBY5u^WIS6d%pj3@qR?tM zVZY=w5Q;kDrs4|0Bq?sb^v*+6apu$f+;V;wPnCc>v^AhR-`UesHyb8W#0>XVUs;CP zHGR%wQr-7yxFU1WTR@>B*9ZTs;EFhh<-;6&7hL6ixX>4QqtOj=L(ql51IIoqP86^s z{T+S^9-s9C5!Qi=4+_q*?6(6#P_g*II|fBI?91PHMVDuepK9@}{bVNgjd^slCPYRF zSUFBLM+)Kj$}@!I0-PL>>Bl!~7-Z_8YB!-&%PzdRO7!P3nR(D=G-PsaxDRL~d|JnK zJ$SQ_^ASXzSS5IwVP=SG?5#oFg-#M-#^*NsP)Fy+s5h}(Dh^0U(E1@?e4IafIn@sZu!jeYkSw!I1nzM+pPhkyldQPRg%y;dj~G;B-^0CeG0^aC!H=R^4o-QI4xp(x^El-^#uXElW3sJY z?)hox<7T*0U;CN28!oyfHKr1^mM!d%7>Ln+>xz96>b!9rgxt}>2w=B6A%9zHD0atj zFkkhM`m>F9lD1;ul?}U4a}Pkgs{VpL9}}P^@+CbsP0jmJMlacfh(yj_`B>j?np>iSi`oxePVc)Wy)}!hhJEb|p8aD+;g2Ei!D_tUoL~yC|+ev(z0H&=2qpp5AslAPU4(%!!m$^xn$|7H+!G6`dS4;&K`VYbS*>RGZtV+=Sx>vM7mOA zBLcY^&+~`3qOi-qJR<${nD=%P^x4R%_@`}7$sud1+AoV-%2PL&5=Q!}+z#HX89<?a;e)|TYZpZeV8%&yZ_`ORQlHM)YegRO!hO+`g&ZFrL+zXH^e#z`jctt+yZ1#^9!QBU#;iX(3_{t}2V>(^LyP7sCQc zQ*=gY_=%~mE{wg>B`DdZ`Y+lk4e26vYpr)Wk8y}8&k9N*E-TdVc)B2zPUtuu@(`{{ zB6Y<-0Pd1f&|X}(AJD%p)DD^#7q!t)+=*(ms!$z0YuUcECuEm#WV-2o8v$-}xDJ;I zz2dYq&=)Z86j@=?XnB_(Xb`3Am8QJ*>~^CkY@BE78dN9Kfk_#pC0gl5|m?i+nBx&9;+Z@=OGh?hZOc};-s%S^vtgji)z0>pfuLy%8; zQri+d%QT5?fs~_{D)~yKe7@0a+q}|1b%<@XG8eD03Wmp)?<_pxxO}D^8SzOFUOmz$ zWF_K+t z3~fZmVT@E4UYx{zL2ATb|3=f;9dD@~8s|tg(vA<68Ft=*_=^yMfq2KO9fox{@|J zR9R!Kgx%s=G46}J;)|6Y9vA^xf(hx&lcw;V#|h_#(>6JZjIkZKLa_LH=K81&?%qEC#N5 zoSe>L)4olze}^D zG23V!jA^i*i}%sW)$mczqd6AAzZ$`9%f1 z5H8QDFBSsWx#m02m5Vn_zP@F<+3!P1z1l;~jkpZ|767=2r7aw~Zk%E|W2%SbQ!&Cd zTy}F~6w$?*B7f>=2ad;i@Bt!8;phimKIj%TYRirw8>0Iriqt5~@o;7S3BjWFR=IfQw* z*O_hNo#)ZHZtFZ}E znHJCfGHuIlm8DS{V9svHsngX*0WFz$f|(uqP^$~Vc!}BQHl>5^<~3$xc((q>gPjG6 zeFw0C1yxeDQzS(Ox98!l#Shqk_PVLO@~5J6N@Q{G$89XgOsioTZjY2^03%{DO2uct zj*2*?$aIydAFFDK6oIVHwoMWyN{0c+>mu<89)AZyW^)lc))6MCt?%SvhVUjiiKTp9 zC_%f~q7Ughn`i;ef-Z@LrRr&ABjsiGuC=YHuC`>um%Ir1Zvf(dpB=3mWY<#!2qo+# z&Ud2>Oge}n+4Wt2JWQ|Wd!VF^%QuRNN|XtqM5#q2beUOnRUX?Bd;S;O!SL-fBb_wX zK1)dl3zQ*Hw?5}OnKM`E)-m*~K;gyUd(R=;Q9=t{M(&G2qdpn^^4L93zw^g6U-DR)NSb**yMRPeKfGkrXU zA5W+$zt(?|SoSgOUf)4V70gyczFaqCxNPJhv>0Jp`-U^tX$|C$f!4UR3`-L^U*Rlw zpK^LX47sz}|4p?}x zTZ04ZTaU@2h>2<&D`UMP4^GnJnh>Tp()VdtKBMxm0x@ZcI3(6SvY8>v>9u?(C&gOW zCWWk%so7U_MJHD+`>y|8#}4mptY$`X`Extk>(w;C*nT-lWTpD%QaRO6GHcMN_p7D| z;mRVi3Z_Boo@4TSxZJK6mS^NTViqF_=YTU4;bA`tvlS-7B6luG4l1nW#G(d|0ntw> z(H=<(%R!D_LE>O<>}g{Ab0B73uI-GPgCu4fh>Ad7t}#&C0u3%?$~*KgqZ${T;D;9Q z5V4~iW#`lGOoj{QddwvR_RpwVt5zB()=(KfHVI~%FxAQ5&Ft8qELnvOwr9Tj8vc!) zmeWmJ50U_R*>}ncBJv|Qq4$*ItYS5WytEKAIeW9G3g$!ut)w^dv3Gt50?0$DEn#mV{fM-H+=9Lw z(VYfVCN7r+{~1O(?$5dBC5#hmx9KqHO3@ibmIvUWINhy^ua0-%NWSP9U691%2ZMHI z5o-Xo%`r_2DoWzr_6~Fu>k&yw6XT9pe7^a;{vSI3t0M$ps8ZeRLE#m?Z}EYdEjEhC#^?Zpy!rAguTHYKb>)q?Iiia!oQpCkX}P2M zmmDW)#7X?iqV5x?Gc|#{NBxV}+Nw+CbtTtpvWs0)6x#JDLwpN^0*pgSStq`Rm-<#* zxxw}|^LCC^pM(W#so)y*dZp9KmoxiXoeqvvP$`Ez9Z4|T8;I@IFakO@elUH$-|E;r zHzkhYC7KVKF=ou^3XAh4pRvFcJ3r1z=+B$v83=KpH9`W-;c8x@aP6_Wb!DY38c0Go_R2r$v@GLGSwDRJ!`$ z+er23vV;T2q)eF!+472r@8W}dcXayM%)i(+MKlLt@70%f zq&*h9OgXp(9}+DcelS}aHT0*P5NfX{S1#@uSea5g{g*jJJTOe3Z+wF*4G2FNVC zU~v zKYW{UQC@K(`JN9@d#olTNpYi%2p$zjuBLOQD`=c(;huB1#d-0=CA=o8WY}&xuj*y$ z7CEfr>QyaP4o*J|)B07l74^Y1=0~#q=7PM$vG1oj8di0acnioKOwufzSjKYDBa7!CQ|9>@vE}f2NAZbQk3=25Yh)UvXuS*E z@|AXisOG<}9Fo59T9RliAFUU>6d>b@0+(%R-~mXv@=)JRN0!gkkJ)M2+}5aas{5p9 zH#SX^+1<3i7P>cW3MQvcLXf%x{HWo6T8SQA5u_aR$U0gpTeIhU*zdMqm2f{e+9f(8 zc1rZP_|%DoQ6izFWzf()8%JXVAwU)1>IBSWi}+?=_nvpl)GB?%$Iz75QgL8JH`~Y> zZu{2h#^HI7P`7L4b;WpzvUe9u;CELU-MTPu%gnyQUImo7=zuItaJS1{_d7ic-+|@E z8EE-g+3z|%HfC*K7K?6639Y36qq3aExE!bWVBzzfr^)i5GzubAvYtE*1jQNs9a^CbdwaNn2GS|%}c|xRt}O}d%Om} z(R={w94#jeoOWA7ndn$3Q7sra_HJiC7TShxMfk_F?0v47K>~sEK@Bg#cWI=&*p{r& zN23F6#P2T4A!+)BUPspwX2q6^>*?DC^h)}T+{Q0?)?1&9VN(wC9zpkRkRr+eLI zuEsx0ze|B|8LXo4MRK$HSPM03y?Q-x{ZbKoS>I_LsrKyqV z#P%@^j%JCcWpEHSQrnqRhHaF0adq~lo;1|LuO8sFN8)TRJ+VVNqlgWaHhP#W}_Aw$3x9M1Z;U~Fqw5#BT zbq|J>jefTN!)PBWr3u=vgw&`V9T^#Kjd@w#4Rni&y|zs@Q>zRdxZ6-Bm!si7YyY25x;3?@o#ohX=Qt~6?YddkM)!3i#$fDaFb2qS zF~GfhF%VDa9aWQ02$<22l@R-|JSe&ol*NVXO+;R6WK3Pr%^9M#B_9;}n)<%4Dw>Dq z;Bk+Bos+64ra%MXSntiq)cnF4Zth84Rv>U*;p1<(?apaIUTeyBTVY~~(^4XmNe^)Q zDuSPTbdq$Jx>|;4>;|`6W6)V0%-Jf4n?cNyL*ar^k9lL95~DGeeH0JZ-S0|926M>u zFqrI36hix$MaqTc)Xnf&BaIGk%DQ;8$OT3>Cn4clp`AtDO#L?U)j5WFR%5-e$@gHE zEE%($o6PJ<+FuEJLQ_uFEA^fT)>hYSl}XY2B_A_pBaA6Ed^#1~QWC%&y(oc)zKxF~ z{o&VMW-F7EoE!6Ejvrq|*%+eDES>DhHLj{!MBO6z)eMx7s#DTmIpin}gtnxj*&!FQtbe${sT zvQFGTin&DbRc6I=yLX=?-0l${aUaTkew4ym0IbfDP3kh*D=)umCjUq`5S43Pc&dFX3~MfzMh!lAQXbZFC$& zZQ}QjnB5UQIMSPgec0z(F;G5s9FHulAZ{G-UU3IAq)0i@oJgx)`eC7CKZZy-)SIDt zNIO&Y811!rPXWOu_i=WzU&0@s?%J^vH_JzLL{u*z{lut`nI|m!#~d>C5->5a%`?RM zj`xaOx_r8&&BKB}s-o{YU*O_ic=r*;p^wu1!KxZ-!6+~HQD8RCSoBHLmmr2U zgG0%yfhMhrGZhzsl*;Cf$1`22?Xe0eO03wA+PrSlM3g}pjh%I@v0V(>$&tixq<&i_ znif8;s(x2M6>?sGI=tuIdjI1P`!<0dp`9eXo@U`F5rmCgd9K#P54$meAH4RXrNiny z+R)TkNHGRIr~{q6En=-?i*{jVTM)AvFIL_iz^rC049zO!*kOtrj!OyeTCwA+xL*wQ zRl)MxKIc#@U7ast21>h%dKX%;4&E^hqCc$11j>+06{J>CJNF)I5ptBH7A)9e8Cq#< z?m6D%Fikl%e4BtC$-LQiWwy9-xtiXlRl94V_pb~P??cAumZ~w|7^Ejexo=v1?#X6~ z3{>6R;*T=6oIjjMZ)QAeN1(YH={T8<4Pr!yo4x%4h!gy5y%WK1~qlr zi(p}gM9->;wn>}L9~Dzfnav3DakkG7n5h@x<>CxE&S44kNS?aZ2m3Tz^}RE^#}1F% zf+N&3bu#fY4jGVa8*N_2nRq7Dr1YTa%O%3trlN*7_N1-3>H$9kYp!mp}zoVahPbHP&u2DBAE&$lQ)j+3sRf zY1Ewk)=#YJ#8nWj^iZ5|@ide3ROw_SV?1*B$yYCN`)j{5Q zzGv|X$rU^Ygc;>T48pDA6|p?lmJ-&RXV9}1euK?~l5+HL|0TvZ_VlaEwZ3onk$3tb zc!SyT4qB8YxFn?ZLx?aGg_iiFdv((gDR3vhDtgd8BRj3Gib5N?`1-MpqYz+tt-Iz^x0vsBz+bOF6H{V2N5*`~q#LH=|M@jv_M$HD6ZzMdQzLR-?^iuL zH=lRRIwH_&ig$T)=uMAHSBun;G;^xYcnrZo_eLIQ!tM=n_QgU6+}9@-^l25xYlpnFMMBuutw ztJhuY+#zgCZTjr+=I5VVPVF-owtld))+Q5euq%ynGE&OsSbrpPt{+q!yC4@8xxGJS zo=A&}{%#)+Hp9819)1@6bgi_D6`x8-3`e=Fn0VD)o6OYWPNU}`^t;*Y!Yg0temqI4 z=XCW<$%rUXeNJPC+j)!Qdq``sQ9=D?eT?*>@3}5yt~#K)w8JU_bTPQSQ*y=u_+*Q3 z8lK_-Cpt!C-H7#>op(hlU_}KG?Ny$`h(CMD{-pujow*~0bkfL^QX8K1-q+5QS^LxWRWt!}f!y`m`0t0#H80zv~I=JLfd~jrU zUuamH)Phplq4!{cuz0`S$+81#L+-E$%V2|hNG^HHN{H#qO2uc@)%rv=6a=XOzpGwke{0G{ocax zsVZ9LaOQ{3uC%ud$NaB2rRe4yd4V&3I2jOTk8Mn=OL?E^bm}Us*v)nhB8)Zi9NtUR zSkDYC%z+hU@II&Qb9HoA?AzU>`I#KCegw)%)vM_dk&K-e`E*5FqF7CAw=ymkCNZiM zAuV8APS{GS?~BDUs#kC%7X>tgO0g ziH;DB4sRHP%uraT_Lm!o)bu33I=obwe##NnQtTx)!SR{OA1PlO5DYJw5dUGm%HV#yggybaUY4-_CLLk!beL`A866ynw@!v@ zp^a-et|Aliq;jAC zpNMRSIURWEd}Fv%r%fLESr{?WO4&i7hPAu6I~inzuPqlKj|o3;hAvg)kkbf#Wm&1g z+>&?2_SgF{x_y)b>JN`(`b?m>`J3r&*@M?Ch8$hh7u*n}Gss~o`bzWt%8%?KmZoT# zK_*Wxy)glJ+;vDR24mvLiqV;a?o%u}sCvc@Q#x!=R9g3P=hO=?50@!os;^{`lkS}A zg{&AP-zD7~-THL~bfWy5{?1E|_$kI(GTI(Boz8Op2t6zP{nasf z<**k4QjK)i-w*Ys#QPu5Pybk7_As29Fz;gD(-UJ@zh9uwFm&bxV?BQ~YH;#Gy*uj) zrzOgRl`YoAD*e0aBjVnjQd?4{rw^h%Lb2KOqb`iKM4)dgG(q@f*Ymm@I6B*T+}u~Py9pmakk+&k_*`pg9+uPIl8L0$LdH8M zDN9tJS=tr_{or|slbGl(e)ZIjRZo5-6qogZzolYwm}nCOjltu@*We7!5Em_L*`JAU z`QWDvczQsr+^|H^kiD=;p6P<9r^zHw^i5Ykw1=R_Td=BerA~y-mfqWyMq==Oe4St) zS~s}%4RZl%bYz^iilx>-h(*AlJKWRv3$1F-7BOjKW1(kd(=m^KfV6EM7d=>ZoX8i1 zW4Ic;fC<%$8*wecQiK-7Fcuj*$qB`w8LuMZ_QD}bu1@qH@FF%&6A&K!P{+pl8tJ1K zJEL^ha}!fv=dc=k4d4@%XWxn1Y+ZJhgaK9Q8&;9-<*q3yd~5QG;zCn8DGs!fwe;^P zix6&X`N9PldACgOtO^!VQi2I)le%iTFuMN|2n^O{qV4q8bgjC|`pEArdJA6{fmBl| z)*;^-u@Zpt>HbW2-9M<&*rLEpEs=rDH}8DcX>vp`8NnS1%lUVk!SnhsmX9+hAWp!r8P{sr6IGO& zo96?yzDO^=pfr!iQmquvXf_*xjOE(ZMVD&Uzi7mk_p|IW*Qfe1lw~`gv<3mk&=fSe_qa!L!GlFYMfOSDYj%vTl0tqc`Jjk_ zY$=b@dgFoOlwp~(4he2U$B8RyAbRIC+j%=6+KxW!u{3fYdKBIb2XKi?>wvlbdpQPY z)?QoR1!VElj(k1neyJr8`X6Xb!cS_?j{icRl2y+R`=R#mDiZhUiP*R|KEjC3A2AGq z_73)*Een4Xnl_0>&yzVPm({0o={Y=x zC+m-k73ufTMW}*M1WLQx&2r~ljZT2iK?3*FkSlYaY_{YykLae0-0NRf&!XLaAth+zp257f z8{+$o-ZMQ0v982Um7wAq!}v;kuU>Xz7IlKCr3nr{{?;0gKlQ@XYwHAhInm12S#M00 zziWjpNczxAJeT)X#)`Ls%DX;4&IR_@FdWZSICcTA&(yb2!Q(G-Ws!R}2G+~c9L)wx z&V18db{GHU<1tzv(vWwcvxTqenAuLC(e@D$Lr8U9@I3Z$hyqXIelfGZP{as5AF1> z6&6Gj!tL}Hb9Jq)=~Cu0%W&FEd)aVjSJC|-LHeDXRxaW?j!j~eY5nw{fo^W)flg3w zBt{##5`&nT=se)_bxDrbtrRn;qlZ9`^(|kH&3N*?2;A=QA4r&TV6N9X1if{*CPYR$t*sVo}xf1Hod|eUExLRZdpU4R2be}r;Kp@T- z{f$;U5jrI=CcV2{lQoM+?d82#Z}P!uK++-O7=lK;cd%B-viCHPD4In|m!K^w(C?5e zM!FeOATc}}&%YK6>{WSGqX?ATRT-fo_N=k6f=X0djLTwP{T6h(Bs=tTh8C{z0Hk%5 z0n#toVjbhI7(&p+da73k>@+Vh+z!rVDKH7y5NfjnNS)imWsT@t?qygw$go8}Pa#l? zr4XZy95MK1#inivRDCvl7!tE%Xlf0lHX(A%IHy|RJ-zS<`cR?wGig$9Jndbdk*Rr{ zvze29=h7jAF;yW2NNZhioDEP4#MiKa~$^R&=xvy>~P+wwl>Xs+R@o> zemA6pxmo~bIL^mGnIrqpV(9rw;g^6#fxkETZK?n6Qd1qPSZQDS5=Aqsf7~dn+7bT_ zi$+_$q&VL}G4DcNib9Tbw@)oBV2#%Aemhhz3himdjVwL4RBwG}jn6!`?pd@bZVoDJ zXza_8p8F;>qw^khg}hl>DM2-&rsBVq?aqdp?`sJ<=KbWwg4(!m?u@-KcklL&_3-Tg z-xzA~Og9&&Xs6H4wz=n%A7_!ndQ#N+5-g|{vfM_0hwEh>I@H2Yk9XoGfQUF%&O3Ka zjzjv<+lAAh&3Uwr6TW7aal0#J&AEB}d&ARTU8&KuVC@A}=rRj`mhd*q>QZ%bZ}^bb zzkmOdBNdso;CxJJ4{3DDc`$pGsSQ1kQHi{fGQ7Qzt2^o8CDGbrA@j4u=Qjmfwm*=P z5eQMBW_{i{tiRKgKP!`b+&S_y zK<_nbvhjI*y}~Fp{twmm&4GNCeRVP1K66Dbx4(6>6J^g>Eo|rY9np*12Y>AjY61i@ zG^}j2vHjjKK)Hv^QTN}UZyo!jtu&Rn@f^5wQopH@nyqn}ZB)y&@Wwu$#7*4pTW>;< z*~m2QTE*6cGI7XhBQ)o7<3ju7#*W%5pKL}lFjSkhA`+xxarJ`hnX+>YM%W>pcYS$X zs>MEJ1?@Qv=*_g%&+)TWe+9%uKI5ZW+p4dLUn+Nh^-)dY%Cxu% zp;KJd79p`f%24vLF zn1Ahm(YiNJ4Ig;1Y$@sti=SAEm%PQHOvUQIDi6CL3&BH!Xv)Lzd;RnjTW7(_YKVT6 z$m0(n6!G%&$X#1xwCFDh!&x0E3h{Cne6i@^aGT@6i65V=ZS?5jU$XwNSHND9Hu<*AqOdA12e$7#L%LF2~0$oM-& z;{T8({`{Yx1~?!SgmG~Dvj;{xNpA+Qe&^#GZ8Mfr^Q>y`B4cbC_D`p;-3H=U!8CDR ztLZ%%*nXd_aYYWPRn1H&N|&A3n)4rhVhF&b)^E*}0cv9@W8OC(#?O*V`sji_Z{1WC zOOR3%d^}jru^dP3Axx!(Ru3*=vP9sg5N@AMvWcla>I^zh$NQjtvn`Xu2AD4{C~D+MA_Z=?W;A}B+ma0 z8Kr#(jzq{7PvufZeakgVEGCUri)HI&_zAx-^nXF_|LF|=*KauVZtpJ~#&F(>V~f4= z?`HmgogRBX<{BRW=lClrSp3a(hrc5lbvD$+&*rCAB;HOy@~3&>e{}EceeKVFo45g* zn1qPGCxKW<|3vwC&=EUJU+cdmfv8ck_my+xW|q05<%WMpjO%bR{4C5tPsdG}1Z(7#e%3C`i)GE{rXBuiq-!dmyiNFX!&V G=l>7R^;$dt diff --git a/notebooks/images/selecting-high-pearson-correlation-test.png b/notebooks/images/selecting-high-pearson-correlation-test.png deleted file mode 100644 index 78a4cfd98af2cbf7cecebdcd57a0bf8b3c559ec4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 484028 zcmcG#1yodR+bB$z5>iSD(v5V7f}{-HrG)g*%}_SoD5=zdlysL$$4EB=(lB(4)NnTX zys`fCzwhIB);Y7*%-(bFyRMG=+HW;MN`wT|1ZZezgeuAk+GuF_VrXbMH}PMOu!y}QCQ(adli6fO$#p4M% zVw}WN3DJCb6N^H!qF}*Tvpj^*j6w?$GI>if!gBWpCqZw6;b$%vZMg}^O28q+W3BZn z{S@}z!*-_`6K$K>fO}9;7_Ix(ikfBECDl@VJXJ0@1f53?({rJ7W#z4rmewL#a{I~d z%A9&g_p~FCKuPe))kJpB?d5$mxoif|%G$m=gA5*;c?X%=0Xjpk?_uGoCDkhmJGCbv zBz%dBp4g@Fi@MnJT(xf_p32W+goIP4aH-(RT@ox)VK;dee4NKEPk6&ejxJ{4rq|B<;a#tXk6PCG|ef^=7n%hqbR5X1IQ z$@lQz#tl%dZhPy4-`y;FFw3PH_2z})jUU7hTW0TCCdS_*d}}b7^QuWoq@U{h%gMJe zLcHgbBtICmycO!=HPp#)+!$VQVP&xPuYG$^jJyBvf%%i-;b#fVqKX?gN;DEa8>ynC z3>cAIhsT%@h*jeCclbG=UsdBovJux zE9-p|Ia($X>~pT8@-QJ4&B);vAvPD+mdX5^_t+l3MXuR?d&z$Ayn3(h;OC<0XD*Ce zTtS*jv7)?zuDpQ{(-)1NG7KVPv?4n=ejwQMq`|D+M9KTlNE(+ZzHxT_ zz&~|;z@BX}#q0Qx0CcSm5Ab0l*E~b5@vMy`z6rTqu9~jQIEhk##A5a{8I@e2UcM4PD#dQ|RX#lUl{-_dk;vGL47R zB`kg%J633Ez-jL2Ug`;x_!9h+Ca|L7@v8Wk#IYDe>uBXhe$V_+NCsCX@2z~>WkJKe z2!u~c?!ere97^N&(&DJC>D}*$zciJb zj+0bo8-CuTDD&~or&kU+iUkxBq&C^yLN}x2*|Im*$*V|s6jMGpes}P^Z5)&Nknf}D zccLTg=@|6K6eAsv`EeuPmX|AYQ>lGro^&cEFKYW#E>Vmxv*u`sJ01b~_EwC}c7ezh z`}M-U7;#n@tn>EL)*hX&^n94bSIK>u3z|^X`^=MMQuwrA#2!jK<^1?U^TDK|n1UE4 zQv3EIU7YeLi!s^UqWNRSXQZ|yx5*~RkR%$UGjC(wUNV!aWLaw*us({f%MH}Vx973H zy@9pCSr~Ej;E`U3LWdEHGKYH+*)mR3>Qis0!cb)Ejz0 zSFs1cJn_j%`X4AQiVPhi#_dbGJR`v%+Y!H`)1#6j!6RPh?3!v3B}tve#{sq8Y2I1U z$3Z;-M~Lm zJw92V`+lAa`%XLx8@4Egf4FKx6a6gsM&zBt=k|rj2fMD@i{@YbzN)?6r4O1Q5v~%9 zw>~Ug#1aVU=e4*OQ&bflrH+#HC>2M@s4!+%x5jbO;b&Hl| ztL59gT5;&}NZu_eE+H+UG!!=UGTAcnD85nRSh72#=^W^sKdC!oTFWvm;9Ti6F=JjX zziqlLfZ*Kboyj=?pFBK2KG|l%W0F#Qqo^BGxAI}N&7Iw~z-!%e)a#}Xx4V;Tp!l$* z^+ERv6D;atb$K&!yJ~D>p1nltV@|lDaFqYH(^|hrsb`B!s)U5yQTk9+jz#@sBc7>9 z-K=+BW3-pq!ZhM+Y+z3PhyL355_mE5hxtlSMBzZBCGWFLB!lnIfc|~4&HbT~0YSJ& z>zai*k-3q%WOGB8Va%<_DT&qc+V`Rp0hKzlD4|_!jNP)HOiu!_mW_@^19qV^xsyeDbWUjO)dxeKRssw#3JE;zRyFs@~pa zrFz_(>FFfkEZ}nxO8!vQvUf0%Z&n#BTq~QDV4O7FWp${3Xufy(lj~F(!{tH!f>ElE zv7fO|gU?y`QRYo}WO$_cJ(D`nPfkH*53AMl;_l*YBh0C{QvvR}?%M7|5mgFC*~z9$ zv-b7dmH}>W>#V}p67_V>n4iWw`zgcRAtd2u5ZgcN(6mb8%FZ^^f)IN1Y zVZ<0Zqi!K|Y4lWc(M52Ns)=eqP~CPgZAv^s*zwTG+IthRD!%FwzUDbVnAvD`*T#Ij z2_BIi-WxGLsF4oWb<&lp{khNA-a49GkfN*#=Gim;>~^n3v+lF=(E20C+Yqjm6mxD5 z&LZ~nig&d?f0X3}nH_Gm#aFR7tIZ{Y4HZpUjlDhUcMs@RQdeHC#27@HX|=|eS9q?> z=p`7K6}T2o)}>Y$H#AtO|4i+yrgTgxxvb!|nu}7s>Xp`er5EvOzHF_mwV~Zbeoiqb z%7#YPnH1d6Ebi+8sn+xAp!|^>#-mZo>LiV%Ijl`}#+m7@fz0tv&zCbjQ&NixP#*w& z%}{e3GPZYymEt#+B8VpE4A(Sk{bF`9E1I8_dF~!UIL^U4#53@~5Wf#QA6D8w$sft* z9V=fRowdA*d==f9+j;W!rxtf{meGD~Yb;yHHv_85!5?J%uZk{hoQ9 z4d`7_4RDK(|Ln#V&k&EZ?U%uHSsKriBk}QivbAOyee;6f;@+dxj#p~nluZ*e6XMIA zv-6K4wKZpbJ1I0}zNYpL^dmtRpLWM|CNN4fng{EHHWa@_ouuuHy1-Liite9mtownQ;G;5F=_>A;0Uj4DK&^Uz%0AnIuu-a zS<*MNiYh?8-gTdc1X-LtxcV9$(;u@Y!xH3w;!#!S;zF2zm%RCz{(`G&*Vj0;0^ZQ)sBIN_Szl| z>#sf_;ClUu1-{o~eqS-;U!maw|44za?+1*3^u`zafccMmoSVQivKtrQsx&ET7Xfy8t^H19A8hRS4KbEj^aprw$?P6)e>+9@# zJrA0cuLN-GY~%Tq!PnUd>>=SR&G@T_1aN=d%*V*^tBa?jG^3%qCWE|-yA6W~?|t6; zj4}ia3=C54*3Trg6%_v(4*Zj5wDa_ImEhy^@$upH5#)7ox8>s(7Z>NdFTf`tzytK) z@$dtCKK11RdocZ;_sY2$1Eua&?a ze}M%6$anpOkDvEG-#_ODhDu$xN@&{q+Bg|0*gFF_1J;n?7Zwte`ZeHxd-Sg*|81z@ zzlI73iroMAq5t;i|2tIA!^U0S#Ti)CQ|4a;`)lyOKm2Q;6yG)M|Avd-i2l_IU|NPi zitnFPlOZsClMxIMlGa{9Ll?LLYW3_z9^_7 zK^@Ypz7w|{tha4MW_s+d`8A`i1P}WKVf{^O>297anQZDYcclmMS*EZ-7pF0PL=)DJ zvk+@v@EJnTFz^^c7yJAdWgz zd^~>#M1YQ!j;q%=M^E6Y}c zQpJi^RhFo5LD+U1H{n~-^WSTdyp^NI0LAS+assvU!i*@fJiiqLc@>{oygfh3=qFNlNjM&P=x?ck-L&v?UCY%bu6gA zJ=9@bAYT)`kE4+lqH}Ye9E6XK6$mVGnyN`w4)lG4ztN@^5|VHmuSn|_03SuC!q$rr zo$R4u{$KPWzuwL*!T3_(lHJwvYh^r!C$||EtS6p`8v~onW>Jw%1`wncHerVk;47K= z5}PqIDlpUHL~3Xd@+_xXNMi}9Sl&V_7on-i>jSNI9R6M8`wF6Ie?*# z?hIhBOrl{U*Yp7zvGIU8&_(%i_(HGuo+u{Ef&p?1!nE2u!oLP?F_A>B%>dYZ8NTqX zYf`<7C29gD@(cY;O@WR9(t7&13gk}esw!k6p?E#zn@j-WbtA=5G63+OlT_EM0PRiy z6i#=z71aTRjXGeg0DYf}vJ7gx4be%5x80F~0KI*6 z#8|LM_9z$@mK`6ee*^k5vgTs)0TY=s6e~Vr0LUfC7Y#5*$emqg18GIq@O_hS8~3io z`3}(8a@iT+X+l+!VNPJe>@V-^G61>@8UIDIY=CC$mWsm^=uqXO$=iR? z3;}dSP;)B*ysJ~iywODpFr>;JH4m`wq4->SED~j1;P!2Anz1Ax-p0FuKg>kYq2DUi z2c)|!+U&!&y^Ruvjr2HNuF>c6{|~UFj+ItY*YMt%bkHx7kQxD-X2isukw%9; z$+LU?3vp-|_y87EmEUYH0&Gv96a~Qm{uAVWt#Jid`ejb13?4)5G5~dax|A?&Y>19S zU7t!o6!3psvYM9XnoI~LvAm1M#3IyJjw{Cchu6#sw?l7ocd@%lWP~2hctVF#nXWVKkr}pmr+5F)VrI2E^6qUX4AVfzLq?`5ZC6q(g5s>YqI>dE z%sGMoDO;tDyoedPZPXW;xI0t))wt=ydFotLWsmuSuOO-@X@~@((Oo4~jX7Z4k$I$= zYGrgo@L+3*%>4qH@DzC)CYj$Ek|1M1L=7v<>GasFxB)A4WC{d5s%-TQlHI6>)`X16 zUZJ>N6WvW*>WWe8EGmts6?a#D#Z`)TE}}RMrw_ikXB&z9Sv_G&b|U`S3PGT7hT3bX zutToR1wSr>W`~S)oCkjK1=%FhPFV0!30UKupS+lXza?w!Tm1TZrB(2%$7{lz>%E{& zvYO5${GN_`+r|YF`n(f<7~N}=V#YcZ*}7)tINRl_R&^%ne^hXBJYlgKpn63MvS+Er zPVZmqfK;@8NG09&{bodMJ z8UHTmvBenYK@#khYh`#Fw5lm}I^h&8!nqJ_|E1A8uG5vaG5zlam7;0If7CbI59h{7 z?$pUUOqB9$j6jj2xRoT^s4($o#kDE}=+NT#rOArIs%ooSrSaH(t?|buKCiqQ4`$!# zIn4UhPQ{G1*L$p4&k)P@T;!##Wa%v8LW@y9HiQ94PH=odV+2d$6x%>U^U|)Vj!v8X zfKjzAHF1r@6@50eaL*d;o;OXoL3Qe>V62D8O)Ht_R%!% zh>~O8C9aK#^bQ7hz1e8cC3O|pJGM?*$hngeX+P2sI;bKncm=e{n~Af*!Fn8e`H#7q>9XB<3pp05f|~c69fla;YkJ}kb1Hbz!vIqi=F5yL z^XmSS=1*3-JGRB1q(11-4KmA@yeflCKF?n#EQ`@%{g!CuYpKk>-2uS|taU=^FE#T+ zf1$ga>|ks{GH~$f^_*-Em6a#-;?%u2Z(~@z1|Fyb>PeG)ox@vQmJ9Ig1FNHOtsKR- z=04??9n1M?V1X<7Ljwy4OF+~WL+RE)nrMm|Z#{ECtzy;rjm-~k-`T5i(!qSOCJuT{ zCc}AZjwm!ra2toos^jv0Ck0E%5Ny@uq`M`HnC1#YI%C$$@otYZR@Mpi7MX2nVrAj7 zk_My?njex7U9ohTem3#*fm6h`_^}-xtk4d58#m^|2M~WXR=SAuJh!0jpiuBAk4{6C zxaG%ak#CEPnkVKND3jo$BJ%7o-(t^}zvywr*ZDA&d| zK&-qY-<7Fh2al&A{l*c1xN65sn}~SmCXP9dX9k0)7PD0;c5j^Nznso`l_qmX$PgtQ zrY<_~n?v5Pt#6f>v9(;*dX-EkS<|;89Wc-;soUHdo~VAWJu&f)t3=>>Y@`S()2H)} z-_}lJgi26bgbzzJy7B$f>>z_)4^SNi31yrB;rB3tdYV7p4D9B}AMx^?r`U0DO#srUH+ zktfTo9x0iAGOe#j6b<4o4@dQN-C|PgMqQrg1znzLSJ`Pf>acoU*f4cy z-T5vLcgIdYpHlF-y!X!KBUMTsk`p}8z-CHHlXzv%urRzgg{Gx)?o?>_N2sd1e9e~i z$!!|zEo6Z6jP{A81A!RCb5UHcXG@&L99GEqKn9vUXQAo`&3hFx!eJHO-vT*Lm7T72 zn$mV(>s_gbuxVr_qIO$dPYVBF8st3e*(xgxcN-GPC4!aSg-2Lq{Znyw@HJ-jtI(aAU}!*Ukp2sJ}s($Z7krD zSE?*CcDBB`)qt>WugU*`fK3B$d&HaE)mn4l}F9GQ&*uEj*<6T&s$ zhM2oLPzU9-@hfHds|@~94(A0&RdsY|j#5l7E`8t=lw;p*i@>44>T?%(iy7>*;TskIgh5j$IpVir~%|92gYB#Pwj}Fu>?9X_Loi1ROpk*v; zJooPIO4UtjNez@R2EmED+C0s2=(Nr9LRo81K~DrCP5a2*0bh*n&>v*BBW$xuG~@9} zkK|gqQ{v~X3-2vdxs9;C7j{e*msnhikzVyUUkygpi^uEOTUkRl$3C4pAxrUUTgh6l zz^K!-@t>gK&kr!`kH<~&0%GnyQwF^LTK9|6*XO7`pN^p&7~IXWMdl@LdjGjpK=f>) zXqgns&?9SxKe!!ZNxWK7HHDXDtVsWOYRBx>HmVpUODmyEL?^*rlIgDsuDgd)!UEZ& zL$BS0h0wLD%!&ND2=~*K6qDM)z{{syMDK<6FW`rnfelGkX)~%-XCss=y?3Vgt}ZUitoz?RaG#G=2lu3?9V|D@uYF1hs@EdA@;AL(U|~c=C&RiW63m55 zbT@7Uic07kv0xRiu#FQy&e~sW{V1Y4M;-2Ryq9n+w-!=N{A5uSW|dZ5ag}0W)6*Lj zFKR%|V=mR;YZ$efd+TbX)UHo#C+;zIfIqh+#l#yx@CyTJd=x03I zvR#@SqxAdvJATJw24v|i4AYn5Am%{>ZId<*t5-+bb(-cz)ozXa-GkyBItcwj!?Wnm z7@*n}7zd$aV!W40Q0z;tnz|OU%z&y_rVuZjjl;zwT|sp-fb}*!Y3Ye+3ej(8 z>db{*J87~LqSop9`GD$~^*gklYw$YwE>GN3f_>v`mgDlU)Zz*iKY>SC<$K|lwLMP} zcT_trMx22oVlrO*tk%V6mp8@SpF?NVkEv_l|Jr5fi~i*@L^P1!bxoDr4XyxR`ui1Z zJV#~e>=MT|`5oMWalqw5k86zba(JrxOV+ z%mT&513ug*{i2;-nf2%BtMJ1H&+X&|z;X=1H*}yjKR#|4C};BGunjY&V_uZTaAAh$at0flzUWd)gVl?n|!P8tl3B6XQ) zKV|%7bz<*eSg{TsKh{cF3_l7Yb~`%?7VtW;6!%8>^teGNds0u0nS_pBQyEG)Mh5EU zs8ZE)zzCx+*LY1?0+4jEoXDiujy}VI^Jv0xiAz)y>FE z$kpjuV&s{1vZ)PLL8@rUMqw=<;(X)w`@_=|cQur$cD1DnET2g!rf+$}qI>S;jb0Xe z-Z(-Fh;5{lXT3Flm>M+F(N1?w1{VB5+8^;EP$b~3pA7Z);UzkuFvHfn;?J-?^QN$R zaZ#-W=0RP|l-1GME+-PRXma#Mi;Z+t6WJGCKkLh&ep*d62h>(MPJl|APmGJEkA?rz zk8mmxK@dzz0$PF&Jy)Frs>C3R(!#H$$S;KtUO#65KOO3Jj^c#Ldfqt!9gCfxiEO2_j``+HtFKu84K|(jJK$EY2?fnphO}@v@K4kB(o)0QTleD`aUue7xA0 z<;HE^SckEw*0m(htAKg7C7EPtfE~^4()O-7Fe`ezYU+;4{^GM3<(7+6o2Y#{m&Guc zj#C?lbL!9YD3(#=**APLm~5GGtw^Q;FTCg`2BB-#mH8NN`%cc31#)K%6O^GnP42b9 ztNxuonfgKu5@z@ye{?UksQqjCH(33zn2|X&WRRWIg$0ZW+P`B-aI^}9ugb5Te`mbR z7~X{>1c`pXKNKTNpV&*W6KCSmv-DJQ|2#{;uai-*cytmE%RaW7Q@>4RLXD#J>rFZ% zi%pa;>g8(qm_^SC{YlS2C!XdQr`b)8M@$QzCH&4~^uZ_=8?|{BAIgR)2^QEE{OW>B zC-QJyD{XUvozEz8AxY1B=hR5XqN?+znXt2A2W`XbbaE;N=x_~|BJ4QcjY%xU@RER{ z0?=h6`RWNJCbhj*ow_m-!wMfrw^0~$T=O-zTnS_sbdTp5wJ%C91eOk~E1cJ0rL`^d zmn_RC81>d)`Y9@qYhO#9wT*26#dQhu1(gVA>Zk)8vaRI{deqpOc+#S}F(GK(O=6{* zCqG;+kK5y#C%E8b7SI}WxeSCQiQqZXyF-krm$zIicI0~fqE^r4^#DI>Ru#GzG7@`3 zr`5~ps^V%*_X-J_LrZC^X^wvEWL=Dz?Oz^%JfxNmn-)$Z4zeC7pfeLgx*RRO( zRlOkwk%8aAciJ|#AFX>!-f-!Cp68!zdnXunezsL|V0YNnzwtwg#*~tATLVg-DP^C1 ztA^9&Md@G+`wG68UI}YnBuBrtu?itN^q#!uC)G>yvH7dRPZn2)u%c44s5QL>hNnX3 zObg^RS?q*jdHL~aKwhI^&Xfz#M6n-2jwOx42ovfam|~}E8yEBhmEm9K6aKQ293+q1 z?6df%htpvOs;EHmJpVlHL88d&5;*F~i=-Om{p?zm9?RQTn1vLsy~)A}LWKrBwymvE z_B)C(Sffa!&3^(lD?)}b><;SziDak!Dn~geRGtt4GcFNrthe(_Z#Dko zM)Cm^kiXeEXPp&{0O$_8H+}gsz^tdmf`C};c;|5NbWOLm6fj9lig#dz7X2HGq<|@$ zgok0@SgC~rcKm5imiy1Wr&iXpf~%u`d8$dVruFV@=QH<=*?N>xp+y>M+OfsjVBbmXb3T-{dHQ)4X zcIP30?b%$t@)I`AGB|7gq)q}a%ATM{hfex(0m+=G^ERX3!rv;#{W9YRV5Q=ptB=pb zg(R@FU$X*Wc3LpnvC_v~P@r+=3;j<996YCo^Xnr$A1z{fM&C!U_zWVnp=ZsR+xl&V zPK95zm!nOjj!#}L(!*i}uQHNsjfL(9sY%IN+X`WTrYA;X#ttuVwa@vz{5qRfDz4g| z+Mn8)sRWlXtiL^w(WfFQSctyd7pB%NIwgE0%$0E@ikts#34xc?T{k`ek7{Css6pX7t7eW$1H&Ds^CGn@} zn~30sUbiKgIJ3@COAVPJ#yt;Cf=fbrer=+=QnJT&^I@JF!z!7x^(*5hH&pj6JD3Mp zLY1t`!6wD6*Ivow5a0?UJ&SCa&yOb>56Xm4nYGsYjD?1U6^UvKyMA#;qCM~Kdc!j3 zh7Wq6v?IKvuI|k|ZaRJ^k^9ZP@5YfA$WMbouz=aFP9!J9{5`zL{ml9HHo&W=d7xoJ zSrCnjWF`tJXEpy&L&a~ao<`tN#@b26U!m=jOPSRM{rQHss$>Cp@t+vYwALIxWJ{)kC%Ur4clD3$b zM4BLaaonD{(Yi|5N(Fl(CKo;83{2f#F1OrQU7S&D>j?x=F^Qc9gQAikvM6?#?2N?X znr`CR2*(EChJry#)z&@QrvrwA&M)NXgNyybMwU-aU2p2=0>t-aDsBuw^7<_gt-9;T zz=tA_DY*p!1_(SAC^RanA-`sT#gkgMuQ=~TeWGV~1qkVYeE$W*BxqySXenTfC}mgA z;10a#zz|;4VDZy@jHm5Twbg(M#&sB=M>%sV@HRY1(jPEa2gmBssfXy$!>jN67yH5s z*4Sx?vhk@7q&I*x$TG!sf{Wo!=rVz12fzV5-xE1Jo^fb?_^>R1k~dH0He!~4#U(Av ziSt~Q=J#>VujJR?{NSm}c}N9cdT9eGN=8|#E6qpp%U#iTk2jbLF*JJa0xaXiX_j9+ z>uqw6{toBgO!oV@>i2<lWmu0(T)$Vt0 z{FlD_??2z5Adn&IAeP^<^(Pzr7Tp#GTWgG7w$6Ae z^c zoe!=F=1&Kvy>c}_&Rm*v)BF#JP=i(De6GJb@SbijH~W^m>QRW!!^Qk90h{<_4H>$< zhf}{|>c835U#BwVZ1)|OT~D_s%R|^G?YlX&FhTe)(~f#olX#g{c6jLb82@LpeRl6~ zH>&j43uB{H>E;;10JXmWW=oZt)IC`4Nn3a{{GTcK*U{I%5MinI1m4nou(7Q2AVatA z9#jFyJJEoLxg0NN58YRisGc_{Q~ypL{Rymc2)uNuQ|L`9DfV?T0=8xH;}9M8Mb_qu z6iLr`SCEt)1@-@crrCIQ`s>X;=PyQm3XDD$;G{n!6tzDm%4m4u87lPKh5HjoLnm+f z)@qc+;))TlER$D<=!h+{+ORH=OS*^*{?2{>Z&>*ke*SS6s);SMkjs+zU7LM7ht!oo zbT*ewwM~VB{ZCl^myiOoNZ~qMsSz2g^9?wM{G#r!g_W)w8uk~H{Ovq)fdrG7Ab;#3 zB5@XbK#Sdc)W#tLf0r)&$0EnMK<7H#p!pDlUme8`s1?IuvLl1pX)@OX=p*Sro~4|R z#J)|X?7n6H@#xxTHdX#gR=x?X_3+~WKay)X^ zVZoZ$Nny%-(A&9MUcPOvf^X+KwKgTkO6$Z8I@DDr|zo=PjI3;4k zo<3PqkW0Km7UVh3Nu=p(cAqw|Ux|NAK)Ad`hUPmhE zKPJ$(w{o8WiA?{j=(ahO(^W4=Ilq`I=Un1EfBw^iZNd8O{;%q-KhQNKd>HgMX*n@* zh~jGas**+cq{xy+K#?*z?v@WcAGUGeF{l0;VgCDsvAP(iH^@(DIjQdfI?LLt!Lf_d zk)PKbC~oR-{=xjGs#H57#D{?+K2}HQT$T5#?uMA3h7#0u-fLV!XOv}PCVc5nt+oOq zNz@Zc%VhH z@6Uj5lGEDHnpAK%*V|cfqec#zsGG@Qm znoul5(uddjZY>1i1*rJVs|HVq@Uqos%s)1#<*X#^ZRB<)#b!zcYa>NnGx4nI)Q7?vg#GXKZl@}~h*Nq;NSVNYWdTNL@{HN@DQI&|L9mqu8ee z4NT65r0>KXOZWSOJIaaC*>IKGlK5H-8A~#q*X#t{e=-#M0=W@fg1_0m8_NP6cvvnW z86ESrZz)sW%k&>I3)b|~-;8Toy*EDD zbZ!5(jaqoL+I~C*ZS8XXxJHi}pUiH78V8q4)0A3gRMv7`Ivw0u00U%C=e6HL`WqPP znqnhp9AdV9#x3Ifc$?#RbKHHjoU{cfjN9Ax)BvjHPRm9AtHzK+;McuOn{L{R^ve#d z>6lM?;jI`7fH<3=p-GfBkb0X5!`@#8joqltA9{ z>V;?zPwgx>R3j_-IWBQeatXY9)-S6s`05fmK)W(00&LOO59 z!=AAklcG+6;N2oZPGIQ zNx;Hmxz0Q}kY9!$!}APduuAY`sUBK&B#Vbn(fV30YFKj9K5yccAK1h!d|b;ig_pkQ-{)T0_a zGNPU-rXPNri+Z#^ND7?Ybn5M*G=qM5uDoqLM&||Q0#09(j%h@CYMkMHgBgu03m?C_ ze+ z)ms{w!0SWjMW=baU=pD&t2)1We4NaaK1XB*Q6Xu!0JNAGfh(Io`UYzFY4>+8# zmiC>sOjr3uEVin7?@U|$w2g1_^H96HB<)@6h7z5P{p!{Ff>R^?rx{8#I5ZG>NpuJa zP)QZi{?Ini=^1^RtMK**P&lSP!E3qL(vbO1tFV2*@5;o^7s3J*Iwmc3X}n`%Tj@zq zQ%&K#ca}L4upkmP(j=~v_axMW)~w91ej3sDsfiKd?{`*6xw_e@ZqXCZ(l>w{2OQG= zhTf#zgk+w(!YL_bhAe5V)0W!~Eiw~l95A7m&tFIGy=Oc;wKc)MG|D)ex;YiRa9in$ zSP1;2skY2#bjkpz_)r)xHRF@Lv&%tew0Y3SD?G2}$Kq2;llP?EWO+~$Tr28hHs5Sp z)}MJO+af_@2(p_0 zvKGA5=3@aLvNE!Q&Ww)g3$ik{%3!>DoCY@~+NgUfjbIP52H=p|ulM2h>0h%tbqi1P zLio)bRF(wu*GN@{&g(nZEwXO;%Z}vMf1~9=0Hu(E4_lCZNd`EmO533yHU~$7&a}|` z{>x7aHCxhi23xX`KXYQ(d^5eqMd7w+H7%OH=SU5+R_9s7yZsSus{mxoMerE>w+2&E;P)nY&QgJqTTr*wU1ObvHi zRhG`8&*ZrLA!ke{P#s&WCK*EQ=Dk3!LaP_In_lysJ54{{m^`hPd%ZukN-3~!Pptw)IOKY))M@WhYpv8Q&dj z2)&oGN!8HoZF&C6%R#q{ajwBzAk3uwrP9&m*-ilcy9bF^sB_qShOM~sbk()XWq;Y9 z;69Nt_+8C?-T;Q*z#Ulc5OuUO0<#qo3D6;=YdHt`3~9mV1N*2PF5u~~{*aVrNZI^u zPxlagz^ChyhxdXW6;jAAeHAuaqM81Vi+?#3sYTKO$4NlZoKaf;giCE{!qEl{S8lp$ z%WfVs;5k2&Y~sO6-*k~_%JHFj(=z1Qn=XeBPm_-wlk0FE3l#_leIYk$vMv;N&a93> z7U86u2bXxX?ZGUWMvry5*6_?6po{ILbMpcZeH)t_g(r@ija$u~{SJ?LgP??7bNq5E z>Gg+s+YuSGJVFbeh+4RUl2&DyZ8XkpeW4Fih4PYF0XuUd>9&=q_u0E?RDx^WS-}mm zX~%v>sQHQm^cyVK%}rYpJcjc_-mdsNbvpT)!8%z!?k-k>_N^E$g(G8B{) z32LM!b3ZmYzu+UK9^gpd;&RjAr?QE>h+7Qjl7f8*lQl{6$iS9OP0jX?VkYn0e*q6OqR=Kdo-Ser<;+7xbM~c4X z4eNNyCqF1l4~aN7)`bkr-#>_^J0EB%vaR2E9+lTM15|y?1++LuZ>~()aR)ZrM6C(_ z5b*~Ae&BMmPr$T~n^~p^B-wM_6D6kqT#<0AN3t1Sp+8_inA>#K{#wB9vmn~0-EwvI zV$EHb)>ToG4a#u8Pb*de{)oDeMUJie)ZwGIPL8B$?A>VERd(BRdvu;}8pCg3;vOZ1 zBj>hiAqB8in?8fy8_tqAH#7X{5kt?^SH{2@q=6z^mxzhr*g!GQT%X%P6MT&bfsHG3 zzC!boaE&xgXu;M+@0QFWD@5@oy#PbOlS~-C=I?i0{-ggQ_YPy|ChMchvip2&Kjwb0 zF67GkH&7JK`;UoSr~fuKFG6QaVtfl`tRGX1kK3vD!%83WSa&I2uGhO8+b0K zyr%clMxAYy#WoJxKWA0HvxF};DT%kDdts7&%XOHem|FH{f%uAU!P-Zid!RH4 z^Mkd<9xnsBZAIu|MJ9S_lqsk1=;0J}X}QMaMyg4|SxZBDe9mS@3#Rqt-cWRdtNtto zXr4)oWwZ=}1$_Z{n4Mha6-^hYbNWoM*SA_i$CQTXFU$7U+*Ni{)9FD2=CGW|7x^HB)&ae&XNv} zcg}JJ!s3e1psXs3$Jfb^I<|yJ{f`%Nn`I?>F3p?mB{mRs4s>WM3T3jxsXguCOj!Y% z8Y^+^3eg!ba(?b74LPeCExh`0vG?RV3ewliMiDYG=OEK)=j7tT8WFIOVWNDI&2js`C;I_h7^ILhkMIaL$x?Vh?Q3~?r(r= zo<^A{i$Q7n$)^t~&_IbfUyatq5p&sC7B$Kgph&TM%)o-hp#Z4_U97xKm{M$34WOpd zv$vc{DLU0`XRZ;}rF4lPejTt=Z({Cro_+uBc+A7R~ldfCC@l{q+Jn zJE?-Ib9>PL>n zyqbeh^!nu&sIf2PLYl{#W5g+Po8h6D^GT_~pQejw``ADiyvW2)3qPFz?^eCVw581r ze)%%kQ=(kJMpZR5?J(^@r%Mfl4IWrI;C9n&$UEZpU>uE0|Ken_W}6Bu&$BRq*`&s) z!UY~v0i*zi7BYPRF8rzgF;IMWGj@C~zo63R!^DT(=NQ=FWZ6iDsS|qd-qnE?3x%Wl z4-=oV1b0ahl}55LdXA$$bb1d8|Z1W2eH+gzQ;Z# zXOB6|KD{_aTp>p}rm7udip?tr%gajF-xx(u_KN_o1#~{a1c7&L2t_x@)ATsuwq9L- z=M1T^KEaO9z6GuTN${Uloq-TQ&gd-JEH9wm58p? z_o29sIXKU7h^Sc`Vw$gC+$uJ&@7`x>08elN8nI1n+MW@SxY-<2K7w^mD(Gf(OqP2NdK%C)rtUg!}d%zL^Ux zAGvpxR#)oJWIHrB6nqtQ5pCn$vL^#G$?9I*b~S2 zLfy>ncuRz3%>yRG!&lvSCwW9pYvq$yV3-3knTtodv6Yvax9WDa-@(QjX{lh>e(!Q!Vz_DBc;8M69`ZHa^K30OvStRMx-)q#>EIXHEXdW^{q4*@xoAI)S_w zf%&Gv5btx2G*~qdaG%X;5LRzi!q=KnbKZU}>H&gJp6wQR6(tX&RzxyC0i`(LU8^tZ zcOO#z2X*_8tnM3p;6*`xH<~cnx1#e1mZeS&g1WE56Q<5sq2X%#KOW`zmS=0{?q#6Q zn9v9-WTVq{d1`mY3`wk{;}l#Z-EBVWUFTn-O%s0eArvbBe^u$I#3I-cOCmjIY`vQ~ zp)=-IGjDD7gIuM%0n@fe%+fA|b%q_=XF{Kkdz6^^hhmDo=gYvdLByAzTW2lu;&bdR zanpr941F9ax{hTtmo~d4bXXz(i@o;@YiixvMqPG6WGPLNvJ@2Q(jhbp5PFr~1f)0V zQW8`Ilq$UwdJ8>-P7o208ba^Ag%+tH1j3o@_P*!ry}$MSJ=e8IPn7Obf@pLq7~&RbBGhxiJPc?OiQTjR!H#)(F=K$iX$Ai0t_>R+rhTeR|x<1&^&^U)_?<@Cj4S8cS) zoVW$l-%y%Q+&|ghgBH%tJ?On8SpH8ffR3M5^tJjmR)scS2RTTilC$X7c}9_Wk{ZA7Mbnmsm&B z0{dBoMJ$Ea_=rP?D5|{DCF+)w@?l?r?2>TGXM{Jc_RE#nS!%njS+(6eY8nMjmMjQ` zb_2tqxtehkS@XZasT&$T-)pQGIS@)@#t<8csGH1u?T8dozl41|D@NnRMH~x_|x)vK0l{%X~^w6pvCbbmRn5&xlPu_bkQ8wv!CBi zOI8Y?JkL&F)X=!62IE8O$8h0>qCb6MuSZ@|h+;pBw<%6-TsIsp($|)HcQ$>Qauj7D zk|bqOou6j-tex1D9qz%(hl}!)NV`i8`mQ{}rQgGja;kgO42iK8E8K>!zOTMg^K9{J z%c(5*8QH6;nT-kPOlqMN%T{gp+`d7L>aL!fb$|cXyOyh|LH7nlQG;IZv&k9bz%Ydw z*_WL78QtmIHiH^|d8(u_Och{Hf2;jKX;x08Mr_uH+-I__!u0hO36bqLJ>x}nSq}+S4>%CTlJ|kA1 zzZSZ&4&HaADf)7ip*V}>T^N(tm;uF+LGyfTL}8X%#_FU>m23U#4k9vX?Jsbte;NZcD=nUIKDDr>E`z_bgS>UxZaBMu@vs~ zjsfL4fs6}-F(sSz0KW*nQ}eB_;C8?giVW{_c=~r#K?q&XV`G!6OF4_}VhF#ZcViV* z&6jtL9BE|cH*>u2Gl)o0RnskUumq;O3i4IK&I3_lAK4#uzjED#6Kk)ePQ6h4Acv}C zo^&*=xt)4G69rUD9-rdfIqE91gC`mU5~k&Ly@|a}yT4wlNH`wc%S4m;Vuv!*Bu-zE z`^IhT!u-=?8*rG6>9%hR0ww8RZ}HX~RC$q$HjxahZAm|0rVBffkN6=E9XS&q+X8{9 z5Z@QwP5|}Ha|TiuNYnz_MK49wZE*Y_e_+83o)HN@~+A1YEx{1A60 zYvP+7+%$3X0_ECS#83gWIXj)^PfY>}m_(;P-D3gSzFnPkZYD1(qm7h) zIBkNR8yxYaXHT2iYOC(Mo#y9RuU_$HoDB2&V^dE$pFSB){jgSCf8-iSQ}fFJjpUZc z^GjET;o2@k1Vh5xHF=g9lLJ|t(OZdN2LMfp4izpLR{A8ofgW zq2+ZDuRZi1rwIA(uq3zTeBai_?tTw3{HAcG4D5Bf2$TI&`P$Cs z-^u=u%@=@(0}NlX=Q)Ajuw_5LNpZw=V*Z88xcakAC*OCRxbqU6v z_f>2F#0Zk-d-GAw5~N)HFipxLq6#-QjA^@H<`o4_5jq&My`T~B`7kT9>cgef!OxKW zoy!2K#O#as9N&~bbW^ZIKjV@L!M)xcdmNPAX$^xulqH+R7LZ#4$Y|ZJrmyu8xK*!3 zHFJegW{7-5IB4!QNl2^c(Cd-gHLmlZZHJ zcD;hcY^=Z?t=XBO$g90F?JiTSoK(8Encj~W-)a)wZzZ)C*z{?n zNif!Ptrb`u`p(vBK+v&aKtc8lD9o0}HsIl=ArW@80D>0YU82Ug?rg)qIiA*zxps9oQ5SUsFZSQnNxev;lvLKa`X8S*DaO6AXX~U zaF{Nq`BKy#W#phLU?c7M4@`U#{?MS$dy%3N=p#r#erN*TFr>avt6c%}(963|r2pg9 z7Qx@Gct_kYMM`Vqo3wZ6C0)y3Ju&TDf7mh#H2FJm&1dtr`SZFIEp{#qFi=xLP{c!S zr`7E;bc%Byn?Sxw^gfmsKl;h%Pvg|%^Eh=GHWWM!bah^pjXቶPVKd{qg_z+^~ z%)5QP?@f2ItW#IQ=%Cb4pmn+v`!cTSZZcr*=S34I1-;~^XjnvXw3yOku<;X4=6(MT zh86`p=60$(D;s>ML`& z;OGOmM0*bjfLXF&)D6Jc4Wr#Ivq`RM!$-TaBgOK6+xSObLO7hn$-r(OE;z>`|50j8 zrYK^v0+%bhc|3-$G(-z2(cbGnTZr|lpnmI`lGW$yNK2?aFVOTbqm?n=hCP905#^^@hd9`wjr3XFU+r*wW}pQJO@Qr z1>vs>pLp15Cy3*ou!2A_UbZ7Aor%{)J)p}T1o!ouXsof7ZS)V+VtEeFz38nI4r%LjM}OI|~P z4GXj(_r(0ZuF0Ez=!#X#S4t>#7ESg$#Ik9!e*6OlMCsW}$0pbFBnD2{^_#5H@(Y9h zsBtn`)*lUgo8IeaYQybl@>Vr-ZEdJJ>$gcFd>AqOS+7O7#JvHQsrq4cZK^tpr4DH~ zJ)*<1QUHbd34XsWT^glpirXH`<1F*sdJ-wUa?_n4n04`=_{4vs#YVpZs0-1q!CvKc zd$jT?o{S9Q(!;xa*eUEIu<;AdWlo)hQ=doVWV#> zPcV_3`TF;;%{2<3$qti;P5DxFa&LXgW}lrWlW6a?kErH-Nc@mxYkTkR%48EGkIya8 z!&e=LeNSb8W%C~`Wl^2huZ^{E%Y0GOwRtaw(SxMZD2m_YW(wT;$Zchh4YNhM9@L;tL!ulUcnl+Uux zHR7m8);#M0@|0#>ksC)wmh5&k;FPaGGGNddUeJq}rXjeY((%C-w$LXZcj2D!)IEtr z372AOUY()A_Sx@bX=Dz|9PFm3;#e+?DUamJPf(dr)O{gu8Pvosa`h=_;%EJ^l1#pU zQbE|MP2uC^W_vozO#<(fie&Dry#x4*P6f5p5F9Sb!1j((W)fO+1 z|C3B)Q?b~f5-sd#08T?zIuI1S7dlsqwfD3ghn{3+J;*j`zb+le5uX6m;oaK0miyz& z0NbY$$g||+okJZaPU0Hvl>KR*H0GHl`{Lj{prWtZRsLzDZj(Di)I{WRlM%wxVlNrM zY1fk@P_zXA>A|zm82=mW@mb1V67`d}h-{QE+RL&>&su$XgWI09l}1-|qORl6Z9iM$0-L-HIK&>tNVXrtlw8-nEKyb-bAI z=)PI^OdXWRe#Dkb`lP(ugj?f$e&|20Ui+yRkl^W`N5#|e*E?=!aulN@Q+jmT-8 z@w&mgve|iV)D&sEwkuU%ZjgSS%)N{ZjR?t>4Y-}Fl<*aW+y4qLLv^i7wb*{G%DieN zC~Cp0LO+@?BRN~8OW?Afd)~?cWK)7QmOyU0K3vFNcK5*z_<7?^?m}T>s+ikI5kJ~_ z=et{bcE7avx%phE83l0y0h8`;Pa+ch@Y`x{HSGC5^8L z+=-d2@&t;6CW4ML;V9-y`W_|_=+Y;*TmUZz(s=M1msXy~djuhbbD%pl2^JE;K~=Et z>tBA8^tN_f2CU1jdBnr=Z3siPuaB^5l5kXhX*uohSir)E{j5w1xjGU;%e#7A)b^Qk z&reFm^H}b|&|}}m-Kep-{gB_~#HRT<|5g@Zv7`rk)L0+m?Uol3@!EoS{!?u6Y3p>& z^Xy>DjZwN*A07V@_{^d0y(fp@TpX3*p8I# z1B&}Y$fBO80rww*z6z!92RFseVmx^X5pQDqpFrt*;Q9?w45Wk4r~Ey?CkC}_K1zH) z2sDjzH{i~bUIMMfi(!Q``%nVsaag}~V3W;t&+ly6o7w{iUDJC)?e``i^8}rN(q2S_ z=V9DnzS{lKF+F!O3HaJiW%>}L;!`u^rEAbA&;g+R#-^&*kW>cDSEnco$Q1$tG->}g z_`^RSsgc_rAnvr44ikXQdgpI^-BV?yX0vOj^Z0-dFJ=8%Yv>qnE3`fVxNqgU-K~*T zu>h4sbX{>uOF8J~HruGl#{G*Rx0SGc@BC&o<=}bS@Em?wV*~KagKJidwp~&N(Vjqf zbl*OT-k`+RqxX2L5&%Igv4~M1(_witJgt%c{5NNEMhffiY%CBi_YOvJ&#nMUX&(BB z%7om|m5{4po3dbzx~&Lb8L(evNQ8P;_7@``Y_5*>SebD>>H+9sXw#a_OVIjPLrlL~ zC>zWp%r$!}3mBvN7IigpQ#QbcUVcw^=4|feJ<*?HT@evqvqm$`F4kl{h_B}LbiX_Z z4BE3O?K2tO{Y|cw0hARvZ4%%Kx|lR+W8%Y5Q&?yqLHg|On;Wnw2sks)treu}(oYR` z(~6)qd80$k(d)Q_Z9SscuCTcrwnbsfq*d1ND~!fq@c4~a`=`^zgqVoO z$6_0U8!8wKb}A&|VGisD@zuKtRlM!g+5%BHQ9aD?%!%bOR?rEQW?0+ic$p1e_K#kp z0X$t7^0@k47EX~hgT|9 zZ%l0Gs%F6+_!8z$z5d2d<8b@L#)$1~aLnjWo)oBfemY@QJF+e@=3D15d5t5yU_(WNKiJeW&7$mvIl6&N@qzc=QdD<1 zGs1tYuKOm(TCCPgu2!(;?!Fc`!*I*V=TdRy6e%T;yU#GtxI8D3ooE<=Gdh#@uDUg- zg7XILy(1el@8_#9@&<$@N;vIoczsa#SyK(v>7lrVd$zLF;LGvJs|F@VZRXx9<8+KFf=fDi z?h0;0$z5?AiFlQK+Z16)wU~JVE2#zeJFEj$0;l9d1d>8@9dz+gF4{ z)MjbrZ%rX50p6&`<;3Vm+kAO2&PRms+a9G|i5d}1;^$Q0DA~{iv5EB%!wYg?_7riP z8`w}11}Ie}vZUG09*?WTD8n!Guas4TCf2)#I+>M#as#qXwTC)p=z*KBm1B0deJ?fI z$qLFGsSmcdTazBw^<8I9=Xa3ipD9y{Fa2d}ul)m{cHjL?XkL5q_RD1s&xtYC< z-6xFix3ov2kM`IA&Zlg?w()C;vXN4cAj^YVr|ca4)oh#_q}i8_Lc{hgyYa*x3;U_h z(q}(@Xl3zid1pf*`iW)cRQH2cIfYrg)GsXJ%(qF|0!Hzt(@f8z&-G9F*MCy(1MH`P z<2t(2po$jl?Cn>>$!8uwI6U1$H@8>4AC_tLB83D^32s{P8dgJD&f8sy^nEDFOiPL! zW!UkTrM|Ro%#iOoUq|~NI_|7oj0#;PtSb-?@$@EDaBh)y&6hRm=%{!-K#R&l;#B$m z=&$4mjvBKqP1tJiByvOEISxMJqU^gi(r21>^>XmdZC;?UziM+w@ku+%0$8}s4pkd{ zu<}4u%#`92^h&&%B(G8eff7@9;5=;Xy&PBhmUOc-T{1;``Q30-SJ6Fm?Rno$gL$)S zF;XZ-x53k0kn+Gpim<;Y_24;Unq7O<4XHtK(5)YT26tX(Edp>IndNZkD(FRTH)DNB z#3w3Mc9Y_QoA*J_N}E>bn;Y!ES}hes<~TQhh+uC%*?v8lp~V+|Pe`T|bd17s0)zys@=|;Z}d98bzSW8yKZiRW!H~L zquCmtj3217QaTH`#JgW#?>S8!AJkM&d<#X|YtyC8^q2q>CFPUqBjwWjWlF!uIeK}ZV{|np0tCE=a&&p-N8Se_G-3EX@t(5cy;>OGT zR~vZJYyFB{B@2Um1SvV`fIiFnDD&}o<3DTgV_h7u$~A|h%savKW~`D4pF+fXr^wNX zT8|zL>D2fGY}3XAPgsVWOntNBBkaVoC59PG#Q~y74}y-A%&tvTg}X!mb^KO6ccLPN z8kQP5>pXkAye>lc;Jiq1{abOjpkV)s30~6*#hL9HuqSTZUYh_JhI$-xGFox~O=;*P zHV9bx-zc3NhSV3oJ#SqCc%aJwbbl&5ims0^i3i{zfhXq9Yr-G3B26Hc`-v`xR=d6N zNY^$R>^WmTY7@?)g%$Le8O{12O3@yu={`}H*}iYS$Oh#IzQw5eR+nPD@zBn?F8^q{ z)X}4=QKw`QR2@#w_cN6qqk%>4Vu$Vx5aLD|kxp8cAbQsFtOv+yx%b^B@!SNM-)<-C zJ+b4(_YZmjLehg0LrGgzOPkqQUon8w8l)z489v|e>=D56-F-Xwc%X1G^m$W$6)y$# z;pVjNV--@5wc>M}g=Lp}TI1NE*kax6&jYk@+JYvS9Fc%wxuz7jyS%9#VVUNMga!mN zw5U6(mpa(8d?RLVxG*sOU)asLqO`|A&W3t`YG^!HTwJ@2=q!CET@7%{O2gQTXo3x z+4sGY;^FsYlU^sU!)U9%F$`PaOCrABX;z3|k5(G!QW05>0x-Bg%G{lppN?g)eD68)%Pcl+ zR@gs1XzepzxvngBvn*wAHRjHv5QR8J{Fl;ehWjXJg-s`cXg9V&I4{tsh6d-rz4XI5 zF4;aSd>^-TCm*dqC{`p?(?FIHJymuGpz)nnRlk|Bn~G)}E`6kBTVxjxyJH-8BpD4!a#&+OTb~cWveM3o-?t zXa=tJi=c^4v@|Y3I^6cu^?tU1ZSDI%^{jvY|2fnEu*<%z>dN?MH}bC`$ed^Am;B0e zRrsGv&a80~-*TT+HXF~HPHF2-4+~xUsIMV&n^pel!>d0oJme_&oWV--<2_lTfTH^I zHZD1t+p@~?!zrVk5`Hu_#`PUDw58i*&s#{`3|Ms8X@Sp$sfu9w)?4A`zq!^&2II#x)O3{v)pNf&Y*99|y_4eu4QN7kZzaQYxup zog59~89JOFe6)0Kc0`@*mThYoA|1?h46tX4KAYWT3>Hl9ewhtA)r_oUk%JR`I7?n> z`YpTfc%2<5=-2OC;^isUoy~=(-D9GeaBT@DeCKjWgj1ZqbuzBfO~GaOM;mY&m?`4* z{6jDJ!3YsCZr9Cizfe7O>0;sSqsdXzlu=Xv?zWm?+LWY&5mR1dt?rM{15VOKF)blF zraEk)IWA}MGx$}y=GV}Zomr=rI&6eEt8CSa;B1vr(xxvl4d0W|lAi?JevAjgmU7y@ zdeU>F*y7~<`|Cq*>imwPI(pXN9Vy2tP7XYxXGRF@Kpa z(tf^lvMjk(-40%4*3Z6A)c%wAPMfYWZZSf*gVDE&vfY~OGU{YO3DGms8NF3KIZ@m+ zV!-~RenED}a1&Sju1+SzTMaS$0WYoRBt=*f@OqnIC7z_MLx6a`e+b1K()|X1)}PCZ za_AF3yR!$#nJ2jg!JG`LY;@+xRA2rt3@Bs$vijR1Py>C#ZcvJoP=b+D~Yh3A3 z$ByCW*FSArdxwIVTq2LVLdkpW;3Wpj9njWoImryS(Vm0Rh=wlrDQk2OZuU4qWY%YL z(|^wFR%^xww&a#e#JEp6-4DQ`p%?Bum48SQU)_mbDEbn5c2I`vy`{oQP1(TU{=RN^ zm;9fr^Ui7nv2sLpwf2ao&L$S?qn9wWU~-#95pi0uy=sbC?_OVtVkONE5K)E=5i1C^ zw1~Jvb6OI}()Er6r{CR)rS;zLZA$UUi^Q8xXZ?tdBjGtR{2N#>B4X###q?(?aosQ| zX}rpz$y#naZxtii^Vv-m-#I5>d9h~7C%OXI} z?F*>=_cV$L^nx}{#5g6Si&W=O#C*^d{QQO!>&aCj(hz=2D?s;^iQW9;|G=8^ z0T;+rZPklcMBhlAESmP<T`6_Oe<3ed;ffg3G$(`#^WIG&5Dj*z-1+klz5eJu5HMG)Ze~nDO7Fz%$n78*-mr* zW&FV_X{FNp(i58qJd?JnmxpnpCsD%~`>OGYsCGYh9E7a zv{+Fg&Y+AMd4+k!l-vxsI$vJ_m+y&V;+^$_F=}Xj-lqK`vD)qrR(rkl?LmvLhp%VM zy=vL28JjJfxUYrM^%!&W-@|~J8^S+NeAT0SRD!SKk+$cvI2EKxwIL4rE?VXuv|M8b zm@H_uJUH@aZQj=2s0X_hPe!RUIX2*5kG5Ip;uF!5IkD0YwO0%8a39)Xykw*6MHBU+ zXbW_68Y7fA$yA?+wl&c)7`j{^%J~lFi%CbyzLm9;2Hea$r4VfJ&=S$a5Le#pY(UN~ zg`$~90MA3`GLvndMkv^EVtX-5;u}YVk()>0WT7DLxzV!1dslrZ;kdtbu($9f+Lh9i zRDe*4WPy6Dnn2YIyAL^AZT*1@8| z7q9r9PqFZF9n-%s`XMG@ci2TjIJ|hYTNFwkGNtT%BD^L9_u)?RQXCwr$6Vb8>n+Lm zR;A0*M8hYNPP^b`K!o!hr3>?-jw%Tore{{wQ|1B) zB}7`!EpZ2D9sOmz3x-uH9`G5s0VUoha}&sx#~!fbW`=5TEhyr)rSObRcB2bj!|Sst zx6s}_C6lK>JGPKOSUpP)7n%7fIPKy)Sn7Cx?~F8i*6mxi))`0+6A3ldTTd*j?BdvN znlK}_Te)?c;Pg|e5jFz8eMZFerj0Dyv2xV7FM4`0ORz1zZA|_S!^u7Z-Ge~)c`dI2 zkyrp;M!H7qbAEHUQKdHH3ddtt3~}yWV;Iy+ZUaGqvuaDL^_PRa3y+j~O;N{Pg3i?l zG3<{;3#}-fG#WZ@qY3KmEw32?r3BO)6X>!Ki-`Dv$n@A|1y~$09B^z-1ij^@M9}DiHn zz;&F_fu_;J-9T=+`NanR({71_A+9~rHRgKPEZ2&b`Ik6AaE+wicOu(YVw3mgYG31ngp3@V|XUYd5q z+pO?`QGf>BGUYNVxqb9F#o-OeW2!;(uKye#kqP1eldUw(Z*m(6_dzrHEb-AfH8|D) z+~^wj*#q8E)T_F5;VnMhQhySGZB%%>F=jb7pQ&5su3w?u{aE))h|q>} zIm&jVxNsauL1UHqq;P?zc$~BiXeEg`5C+Cyn>n0!0N&f1j@sUHv^Tcy%s~1oz zv~g-!bnF8KJ!-2Y0x=z$pPAQfd$^}U-<>0g} ze;);Sab7rxR!@pEkHA?~DO-e8QNwul%Z4>>3;$WqjyEZzXMK~iqv8VqF_V9kC=9mv z5&+WGD@@zll-SF0ls58Tr1XEP8;KI6k~+~G_LOt^F^%V(^oP~ViS4Cs5xX7r?@vZL zHBJjVv*MiosRa;s{MqMhwI>aCyk=k46mM?<8n5>bAJ(OeNx=J7SjtEB0<>G?*)g>L zw>17`-R75s?gW26t$fM#TP4oj@2u|&Cw$uK){wj<0xzvsbm-SS?sqA6TXx}F=C?H! zJN&TO@-}R=X)?3al!i_R(t@8cbuvLL#_F2-ww`P`n>I-O(lK#gB|rHobRYKh}21{BY%2Xx+Q)~Sm@?Z?GEr*^*(61MPV&K)A+TUFWP z7I;smK31MI5G|1Bp6uNh81Bn%+}rjQF2(gmRKo^(`kp&Dt_r$_mn<%*H8fLsMXptp zQ;_C^nN=3sC-3%>7Zlq%u@vSQqoFpd*gQ%8R=<-@w!SOY0+To`*aX_JEthi}!c%?H z(FV7$J%of>lD?DkCOuLzbytX~3`=+`HGtMWa>Jkn2Tjv(wgzZ}VehafHEXz{u!BYF zRMhFV_}*tmJPNsKyodHbX?3vZHWUz#n1%w*yzfKMftn-o)Mc3~ye_?HKX-7Qx5|06 ze$G!{&nYyiCIa>Hn6WxdVZ{Q@Pxrf*X~gsYsSo}q{){}6Z+OevR$DNbv0TYn%IBqm z8nqdC#F6|}dU=VoQoz#CaHD(;AUws~r!#iVy%86CA_n0||8VIzd8XswCS?i}7%!Y$ z-pMAS%3$q@`|AHtVD+T#{Z`($zH2ec#QLH^CBFN4z@L2 zwGkhK18md$e{e9SiTSb5;3-$-9BS`>Fk9{nV?P61rfiPa97g{I= zhBR7N%(u`)HSYMxyWVDU`rn!HuyA@|!B#YV^4jU($BmzRJY8H*38E+YGXWcAO&}dC z8#XJP_T#>Z-GZLCC?O(c^Pzv+!>VRA?e45*DQv!{Xn6CXoZHzYt?>1z0(rh-;|Ads z`i7&sHRW_tZm^jqu%M90Oe*>Q?^FktX&GaA>+mExsYz2 zr`Np1o>K87mr)vA;YUU#D~E?r_d7Ltzv#mc2J<2*LgHJ@^3xoy!u;yJ-P@J@LwoYq zllQIv_fxf-m*ljkBRT z-g;Y`l{K?BC>wB^Z)$gUL#Hl6Gy!EI$JZMp4HKTewd2Ja0gCvIspYpFs>Lg4@9)JH zGzPn!^I(QWYO^>zB0b&2_Bl^4Ud>}SF2VF}N@~4cKJin0*B^cz!<19sIH;Lsni8TG zz9s;2K-2r{{s#5%XV>J{d*BkP%`FC7>eIE0Ax3a8zLT92(ACDdj7PZpC`0#^~l z-P~${5D_2K@ECyDo1>+lb-0dda2B$kaF^F8Q=5IRih08M_4GVarvU3^OCxUpR~i z`IS4#y@p=V)o-&S3eV(B6h6CU38}8?jKwmI_naOh|BsWOP8LKtIpt6#@exa zVCLR5kr3)Q%L&p6Z!aOOuJMiDW8ZA+1gJLVJQ8O{g9T>GJ_hsI2QReSgXa@@c_>I@ z5U<&l*iQ%Iwln?*!=}Ah>+q)t*RUO-8Fl}K%I-2@=mW_KXJwx)clXL>yC?* zR3@hlmP6YQJQUO5pGOe%ROg65g`14#;}OvZsxDl|Qo6Y(U}oW7l(Tk{slcI+0Km$^ zX=1-QH{JFh9oKs25-FUNdlrqAo%x(Ug&(s)aPfm)n$iz3%VyUxJGsz_Lf=BS!>bp5^JBu-WmqVY3D%|VZM!0qIJW4l2U@SiJ4M^ zh;tnlR8vkJKB=~33*MU&iv+7R%u-Go8@H-Tv&C3zK8cdei7UQ1Ol0Qr5;vkzw-sI8 zG|OAn0RL#u4wzVmu75?OUU_(`V-!v=^ftuWsiv6Q{tow$5oGvL9CdfJa-RM7qoD`Z zqASS`b;MEyCrpp*Nq4hmm-$__OyG`*(mkYWo*p_`)H-3pv&B*4uUij#r8mWzTXj~e z^j!DCM|CCuJv2nuq%W=srU8mI?_=H)M}|(@ig@-chqNmgc*IBYj#?)Q1Bfh=>QF?b zzV}MPK{>^i8}wj!w)kQfIEg<)qV9C$%OPZ1RM=okYJ1T6t2cCqe7li$MO%%dly~bz zOmQ)xvTjS*P|V6@B=vix_cSuQ_tTA-u*WjF--a@tLqs}xL!~O`mUAJ*>7Ii{aL2Zu zvZjuo1mSxSHYDYTW}PGo_pMl#t)h7LH5Gh{fbbsO?iwkDen~eBZP$n=GQUd{FFSr1 zTi*0;4kga1A4|^0>5(?Jkqp2aN^)ZI9oo)nFdsG5k++WZ73_+f_C(?>17ohH z+P&dce#jX{;dok6q`gF%3y3rVQph?|J-qj46kP-96YMVD1f$d1CSCpw8d1-HXDUddmvTmvIXyc!X4$( zMOKFTQ}IKVgi2&}*q3fqg>=?Y+r~zy=fobCY>_W-;PC6-IoOedxgJZD(?G*$r$Jz> z=0}-j$n?QT1@1i`c{=1Fl~lN=TvQ-la(gVhm;Gg*J6;`f1gNQ_YW2}SmKWQqg;2d0TL(jX5m?l{l>I0OvaIvbHANf zfRKumHhY#=mP_R|E7mFKJj2Bq25sEmg|B>Vjd1UXtkAJT-;?6vR5pODef7wcxHTDQ zw9KzyDz@?5_2fE{!i7xr9B^YK-r_eMtN0>wkbCU3-Ykz9=KQ_TsmXHS;8C~*Z#Au~ z(@ERM-0j>e2KrzUF!|r(YJX#>MnLm0@%6^$@qvhTx#Gh|t8ZDNBUc-o&z6gMloZsm z_@+N?l)C8nWJi4h0#8RUy#>Wx(I2vDWzt@O+Lbg~3ekI0$rW4nXPtI4X2DEL!gB0J>}0A` zPPJlMYMeamSP{EdvpXXV>w9vLtc}eEdh6ewa`{d?@pLbfa`MZl$eao0b(ob*T~sG# zgzMNBGdVxv-6BP#!@tT*zLs=;2^XwgVIF-OuK_vDsNQ_l5T%~7v5*5=ACFO8w>YM} zyU?D>v7u@8DrsnJCX_4O^sPzY_hRWhv|yM2UDIU9tY+e@5J3ul`CX)vfgW4nV)x5i z+eWt8R>|11LVQ&hI(a}sF7eD|7S2BcoZsNfHfyIUb#RkK>BoA0QQy(3bTiTe-I3%? zt6$L6aR^zM=z3Z@VMlCrjbAZR8$ea!O~qXU(%Ec8mPW};_L={!d{B4M zCH)Iw`|bJ=&3RyoOD!-50f9+yGt8~`k`|MzSL{X14K@aQgRP10b$|5dBvKpnHUAn2 z^GGoYRh_s|%-C|LjyCGbm$$*F#~;)xsT}BS>|8dI@|g*6YR+I-k)E;gSqFy!q`3j|(`M2C>7fFGkRA>o;yv}i)^ELNvD?05NuLQfy zf{n~;hUCiPwO?lZC#LrhB^zOGePXjVnsQW5T1>Uwb>r7@G`EV8YiAJIDHK1TJ?q({ncGY7#WyXrpl z^Qj0ML$L&s`-%Xw{gvprj7ueL*SG4aE^V#E!D(>SYE&pKhw=V76Y_6et(^v!LtH9Y zMyZp-cF1p5KFetPqdC+7U)#1FJ9NvSiPDw1`t`P!eyJ3h%9Q;#XeH7l%iR`p!~<7; zyqs8&qd}YP(tf&aA4jPA$`*BT@XBghiuQ_S+-5OCZd~@v+9>fiwzN5}mOAF<6LOe7 z)$f&LnT%^4`=G8QN?qgI@yy2Y03wygvhABFyh9qR=W!Uz_~1MGg3iuBS!~qcLJYh2 zml?_?HdIfrixRs=*OxnOE~ElCesU4eW8F9wbc3kH7lqKgnrZ&$`IW3(Danai^i|KE zx-KQ@Z+E`YmjEDbd`3O%o4-JdlEB z-&LGC>c0QX?LjU=J=8KRqN^q&L-gFvOv_(KiFNU;%U`9e3@N@;Nu%%KcY+j2gqJeq zm#Mz8dei%3RFYIn%WWws+x5%`j6-c$`FBB%{j^W>+w|C5OfstWq#pY9-2^7S|V zfVwDRHR`jXb?#HgKSw?{xCxr7zd#hk2J+bZlD>OH-5&G z$9xBcQ=Bz*^eym|3H@Y0c}4Qe#4Ua3&MC`|as4}|Vskx}VMoOT>KphOd`6FXJhk$* zLtKhW3#+V>bCjmlssGXZHT?FH=3BDKX9oeQNZP0P8T&}(lt1unl#=^4E5>o=ix!bm zUoEdF+u99Vsg1T5LL1N0$t;C)MK~(!Gf4@o2BWV!hzUwjS*0UbTfXbFE=bJTG08Vy zeb#msGS%mWbi)XPl6760L)ooSPMS@uZO0|WAEUVLY!A7E@hCYQOhGkWM=#Z*|?7E<#(Rh(X?6`Si^!_~)>-O7=}G6v&J>*6nsQ z4gUyyufFdjXpiY*4?S9}Ow5Jazk0z*={4mOfRUr)wLQ#~++g))s3vzghn>cy!6@|G zFgoLmH@|!@w59=Ri!J7o!?BR7Ti;?{zGd&C%QN(nZd7xO!FTIN7=LrR zENx(Ryvui=2jM$r10jq10_z6@W$*Zeb8TgCw(|lSHvM|iwLCF zABO2NirW21s#Y6%q{YnP)a!M#V)22CmZ>(Ry1-mh?{Gij)IsFAjaz@IPNx2;(U+C@ zpqr8c0<#Zf4STg@0e7t`NUEJJvPRyJ$0f;FObVbGqLJ*_LTjrOfj)+)@v++moNjdE z{=*vRV`0aXlA6kqWw|k7kIK{{2}?;r5bjoMzUCcYHkSRY%-f#do$;qjNLYF30qB+O?Vm<15KeV6|-p_aapa zbM!Fv(=)0MWXUe5_4z#!(8$_o`Z@4vN=?Xd;zug~T2Ar*L)Ux9HPvn1!jA@)&`Ss%{AwkV~lm54_XlX>%G;Pj2ywpizfb)By;==z`3FK_1cTHKN`ucH}*2F8mcv= z5)&)Na}HJaV@`o|1)OhNHDhQeoy+#i>Ma%HwBrtsYTYJ4$I z-=#8LMQ-_IQRwi^+fl$)bJ~nKmVSambwZ>GxK&k!gm(S%Q+|(_;X*ljd%%dWz=307 z4F|Pn``$-OI1}~)D{(ENAm5ux1euw`!KDKo;0;3IKUTEne6pz|Y;G-=BcK{*Dg8y? z?Td6l(?WIP!;Rx913Bjb4#%MLbcfXEd@c^|LWc%1flg0e4&9Wr%6++T8_Dn$I8%O` z)R&h=?5s2ePOSTyzuah;Db(28al!n#Oeo#qI(xZR3LiIi-r44ppxWh5)5MMbr{5Ih z#V%G6#hWdb~p-&vU-w9<{q~)F-$nw~N>Ad&82%EAO$*S(XEz;N|5i`5N z_)$_SG^K-5JM%}D8;+~!Uj9J706hmqLXG=V2z}n3@zkh2Qq|jidoC^(?4pGKB?uxs z&3s_3x_BRVDWzCK#=;uXA>UMPT$dH2qn7x+r*~__vNj)GF`m?7P$BB;a{?d#y56t1 z`KZs!)neTLGhu?uuzGzIc6Q?}tTXe39g)AWG7KZWi%Sy_FdchkrzW<0Lfhas>y}$^ zR=T@eI%2PXK?>Vjnsey_f7AZhXuPaLH236cr}EIirwsSLglRi*K-)*V$%?T-dw7V8 z11^_Y-mQ$}jlyfp&l$TT+4Ez&ClvJTiJn-h9a-=yi@MXu!xe>~6HRApJq0m3?*)jF zuH&|^I~2-w@rWKV=zi5$-3Y(g0aFLy?SuwQuGl}2U%BgtnJJ3j2 zi|6V0Cc%H?i8PfZDSM&Stubwt1zM$?Z?0!?rn3dLZJk8LUL5S_1rHN1db0&*kY696 z(c65;Aw7XBYg&0z!@`w|Q1zVr9*8ISL5rLIg)njp^(k&vKF*|DSBJk~wv&97T&s&# zY50?63r^1XJ;9}~@!W!*?d3cbVYkkWxEbZDDb=Xu&KK2G!#DFt3lUs2EY@+EDrd@C zU$aUh*9ZLiDSdkHqfeUVCXI2nCAjALY*{CtyUYPH1?y)U7Q%RH)J%P%E}eBMFM@W( z5&EPf+9v~17;efHNix9%`HI;Sd-{(lqp-*rijvpsla55Gud$uZ{k@-jg9#|y6ih{r zjm?&H%vIuBmZ$l{+cY>nvgMv17BJ@N@{c!z9f#{-M}^IWhiW=8Z5;y7y${O6T54j| z%^*y%<%TNyxeN znlid$D=_5CI`m||Mn3PVtf}K9yyjsHy2)nNQ6+)47%zkH^+sXgqRR|b%x>V9lr%iu zMt)Df0CiAm%7f0+1Y9TjD7d?{Bh z<`(X+^dDc4qi#C&)}rRDZCPqtPYQDPNaD64`w;P006n2z2d|5D_mnGkyU_CZMgF62 zHu4hMFh)whFm+(I+ z16#V$)Z*=ce5;i%L!65%B)0uAM`QVq;ReHJ+CVSBSa5_phj${o7XP@iu&DWQ8ig8o zn7oXslxN+PM48K!7%Kf=tl{lPKo@#j$xwrtzKum7?V2N!Z18E^t|ci_Io#m=$^9Xd zO42m?)&a_otEDutw&a*)h6EntgTb$Hj9orDx{-tZQ361S4KNZ&coj)nmmL~rTxZw3 zVrQw?Mjfi;yX`LUPd{vOd1dc!WX0#+ZjcXSxJyqs>lVcQ9OHZY3f;8f%RLJ=iq`++ zHV;90%B&ZYx=kZ6o5cXl!*4;PrcEjTjKJ(ng7hsNp?stGQWe?}0ovvtz_ z!HRj?>)NAYJ-lo6iB_ipZ#YA6!}m4@W`0}ve)}a{F2g#uO)hF(;3ZkaRV60X)nPqa zl<|R!@YlB0l8FSfiu*vK-7KG;K~(>2X+^2B|G;tm{J}^br&>cav`jxs@+W$_>QkH= z;zvHTP32rqo09v5-!N5(`rG@hETyfxprZ`GOZ(!draF9WibUr#w)K!_lH*v1U+VEo zS$2;P;^KMTx4N_`nlOW^Zm4~aVJ+6r!h44_jp{s()mM?E-^#Jn;qMyLvMPB= z4m;4-ipcp?H}Qh1Hvm=Eg&*yTiz4xepoZxX;hdCdSml5t3!E5cd{mepqfOWyH`6G5 z*4oySxh8~rzB=U!F3)m%b7G&@)q6Rux4LE_-i>=?lnJ>XZ_4+5!C_J$qJL)A;?CY8Qe8a79US|1bZ_@_vpwTLNN01pO$ z;~kXd{h~!C75zrk7qQTXNbd2QzeDBM&C|4~3hLBeNg5v)Wxe8O0jGYKiP8woHXPo$ z1;rR?M2TaIiXfiJ*Rj-x{#btG``Vb(=ELjIj-c$_LOj2~*ziDe)-!A;1)WnN97{i_rn({K%{0Ea-q4bCq00o+Qw? zN}E6XtYm?xN?)e$6GbdP&?W0$(O-j4Ho^Vky)%}canyA(6LbKc@h%BHH|KRNHAeM@ zPDe%cxGg<8OonV)7N&_a`<_IL+s^ci&(#AMh#8W9_I+kWZhK<%O3BO`cS-%J{ww+B zv_}HH$74JeMrJdHeI)MpQBa=tAX%I<@?ai@>P+uQffCqG!7ogh5Az$l=~dU}HlH;Q zHnzB5=dUD%(j&zL;`CVpZ6|xra~eQyvE(*#K!D7#F_*QOn!)_GyEo(!T+9Ix%rQ~m zwOR~z%~ov=A!`0Z*ptrJ97gV_HGZ}igc0i!UuWe2b+Xu4W8GSC>*u9>wUBzB**oua zZ~DErE4$TWH-tg}wo&FisQ~1%=P5rtraRDm44kt4)D%dqz*TcIxkg&JoO>j4r=khq zOP~2TA+jV==c`lzgdUdjvu%L2iP|Ll82VwvGh}D(v=NOzN6xaIp83KBp`!%?q5z6p1sCYZ3MGC=!IecXia#9r{TZPyfg}YcIoNScf?Eey z=OPrV`PI?Q+|D3$DCw2!Yyo6l!{tDovE_!tsE@>Y%VKE8?}6M!I#z9b+NjV<=Rj?y zz1ynK#J&{CZa@#y`)gxW69M|8(8%67sItLHI7(T(=mof8@Ks=f!-q6`H+V~rXOwH7 z45GF`vfqSW!y(!{MUo($`B@u?Pps-5EndpTx=0*}C^gCl!dmK$0{y^O_r>Zo(g?ym#v?$couVneeZ4UDBm zN^Q)|-8A+mC&>Lr?D{wm zJ>M8gGb-oeaT`?=#()sDte?A92V<+{nKS2i6Z-fhI}cT@TbaSzhzTm92kDBkGdt7Z=QB=tqCc+gTTEzq z3DDwQV9ti5ddd3&5w+;Xiqjn z7}ZWUI=n-+1#L<|8Cma}W}xv8b9A#)b3{+l%YW z2oG6&rL*^rXTB9Jk{zdKXp-<6mC5hgnVvj-b&HDdDqT#B-kM4a78!exVX?r^zJ4hf!z7TKbUE#=9O|Jp=4jn-dg?`X zt}xBVlwRKdy-M|%&?&fB0mV$ew@b9b^Kyg z$guYry*4Ff(EsIBKF+7QhDje`N73Lg{}}!9T}wAhIi7#scnk*PjX2KkU(DUKPHjGr zaxEcBM1%jsH7f%yC1HR83_?6PVc`W zph$Lkv{pTXd7TYdv**1t13ba*D{G%$V*R`iQIEQBT=Xt3irl+9J}p>46^mS*!-{g^ zcJ!8_rQ|f#N?EkT@n8nFbr|^iCV_0z4R_J;%=#ajza?iya_=#j^Y4C~_1K`AdsRwE z)_(kZg-{RA|5Fc>sD9?a=53^YTI!&^O1ol~ezH|m^LHSu5X{j|cqpC#*5t5tb(FZn z*+FFLa!N18D;U)O9!QtekaW93tP?(e_Bn0Ox{C<^I7J_+~5(DQ!w zl$O=x966z_sa~+aa7%*r<0EX}u}3&$pBek0zq_3^^tJG>qU?*#xWGO4d>s;qAd9>& zd~<{SX0L~9x?r^%^rru;mxsK)Q+Cdugx|=NLC1`;YsytrZjSfU>R;+Gg?@PScZ+Lm zaF=Hy;?NszgA(c0HoB-%qe{ahZz6%{VL;Y|flkXn7d-1Nly0z2^A~K@?dHkWB!z+s z9AwG(E6NaoPfyqZDv%PnL!pPa1=N3kB~p>M`D(6vX)Ug4Af0<4?3t!9WUGJ1u6UJ! z*=0@0xm`jxWIN;&HOfgE{cK|4g9x{qmMe8Z-F&l^h+YEfi063##(h|*Q7y28k|{6s ztd7}Qn9^Ao_EY^6fa}!*9E19{#aed=8?cwe$%TJeUTq)%LGbg1$PDzA>K}NpD%MS; z>PDLDi+p@Q(?j{R`RmnSsdN&Vk!Kt7N7J9-!#TkV3Zb2ykm^BE<^1vzXXECYQ6S+t zERI=qoCM|TYgLsM<4B{+sY$V^KDdW`%Az7YlC+_L(k^17JLV*xYU~>f=o9#%=r~kE zv7Fozw;io?sm*c(&OMrQ{MdLZG6L1&44rPGTWQU?Kw8ZQnFcz19bB4wRE7RD5Z?M7 z(u)m+a0;~#xRn*l>_mziOBh18hj>so!}Y3kZGd($UuC*4Px6jjb5JVZEE5NvHLHNn zaY9+IbcXl#Ij&64MwQ#*jdi@=X=c54>Qfb+6N@+JKc<{Q`}8Mk{W4k4Hr<&4@NM=Y zy8|)w_PBA?|FFOMSbW0j`}^jiqIAXK0m3;AshO%hA5iWe*c4VV6giMc2Qb(3n)&8O zOwxIGMa!DqG6$|}I_A9od+eO;x-|s_%sWI%RJimZ@Q*maOO;pp3!(=m#FP0d=} z7N+X7hSx#!AMx+UAb=2G)hbd|9~i2F+i46!+Wpx~bqw+yb-vT>_^Q7yLZ3NJX!QYY zjd7+`mR}H6btNceI`LKs5WEe#@@ISy}XHCCRSrkjF|W-^JHtYdI7hDJQ!uY~rV0`i^5d&U457 z=NXbdXnT_3cKt)37#1jxcqEhkgfZ!7gdx!dl~OwKYiUf`*sConxA_%csX5S^>23+4 z^XX^Ts&yyk4NUfcE0xex@RTyEA~CL3OV3vGFuLk-3|%Rw;gv}0%Ly*s&FB;o4*8Nb zf8MLwysM-`?r?Y__d(}3H~y7LG%BkbvsyD%>j zq1144kj18*2?Q0Mtm_JEt%w_utsfK5(NI3xaMACji}5(}&1DkkBJZu9;g6I`I<1N2 zt)6U%A~2%bl^zaoXVrkaR4z_2&f0k!pinu##v40cqUI%zB`Zm7=K#d(XWUM#@CXJJZ?wvj5g7M5GyQL> z)agbh-rMwYfe6@-7pbbi)^>XRKXEi5aHjud^NnEzz?wlm}!@14GIQ6(7 z+4E}KDJ`AhYYxOy>kh+d6r#B-Kcpa~MK58xpRUnflQ=a?>tGlpu~d3m&c1hnf6S4< z<~e?Rt6e8|-lpAfgRP(|EBSr*A9xM}R269!Z?mrA-OL{jtZ~I|uyL#FyXK;4?eQvT z&fqg;Qm9wP;6lD3zGP@WfED(#Z znP~5Ps6_*K*(xjG=2(fdvs3&-3do{?ECOT_Rh+w;Swnxx!Hme zl648oz!(mFBY?68Y6F1J`Pc9ijn%XC`Wk*B#;{<9`eDrAN9_W9C2|hsG@-dS!uDUP z_AVl3tW>rFn<<%c7KvdW9=FFS|GHk-?hE=060C7>Qc%&7X4G`CWeke|$u@4C^$MVH zj+meinS=e+fvzini|1Qb*>&+UvdEggXkDluv>dK}w@>`Btko;7a8YBFOAnEpvfds)UTzuRZzYA=0(Zw@wm!eRRvjJlcV-2QKbhba0<@UFq0)K3cK)Jo!^r!I z$Xf9NkCn=TuG-n3LyjugTK1u|3N@ucr#()-f#bQWm#QI!h6iyZ)a`!j z1QdC!rTyPiN<|qTxyCnodta)9c*c$FMMiRDO)F2PZG?N?3u%Y=j~HE@^P}KqZhoOz zVvGx1T?nh8UIVgqV}R2*d>bkpvj&$ht#6V1dKX>as_Urc-SNUNI?0rMH9)T45UEJY zK&q?l>L`~8EZ^jQ?|&`cFp!1RNX`Olp}+d6U7Lt@rfJ)_t?>&sX!MIzsc}eDIU!NiqRyB*K2rU7|8hg@ife|awEX6*V}?jy)k^6B z3{Gmea_*V*`9ywFI~eSLuoV5Th5P2@l*r=e&rDFaxb*?{cl?7;+XK-@UA zK-y>Oz>)0mc{OaQo=8)rhh(D>tH^cTrBl)dR5IOB)88O7yD@25W=DqP681I*ke(Lo zvmzbl0wp|Hxrjn`Gm%ev?ZT3X1c9fc@gdbAM%;F^(s-)V@>hY1Zjbh5tFMOWBV<3+ zX91Cj^P*REC@eji81({QcP9r>7=ul2pt{5Da*+AWo(}CzZVd;a56uxu6`6y)YQrP9W9WovVk3|LrwIXG zCjM>cvxR&g`lxc>HdRD=-HE+eCpeM-VfcfpyLziUUn5*jxNx++#C;g z0GjX1KLj~CO2>HvU_lY6lWseK!&kdDR&UEA$QuDecshH0f=BmN&=Bq&(4zS$^RR$2Gi%s6P! zxpGJLXHLC~qbE3$uJQ{wGUL-thUya9%C`(2`f{Ks%{t zv|)Y&;NpA|Cwsqi5$bXGfLS1S>JHSLvo7&@-khJ2DoJXKTM@%{HD{zUz!NLyhR+-n7*SQpTgNcUvG`r=-T@J)f|5pGgX@W0@$W@p&xXt@4usCq=h3-ze zq5nXoF6=p|if4{e7$OeEhg6kfI7PROBglscTt>AVY7B-_-NWbah6X(&z#N z`@E6MAmj&CyjYWMs5ky2(TB3<&iEATJ_zNOl#yqTEXx%9HbcJ-Y-Z^CG5I^cJB7gC=r*c(+A%`pwu zI+~7r7zyWg7K1;6iKft<`)=tZ#;dUjKzEJFFQ{@$zPlH>;Hg&=A8IOD z#X+v}y3L*OM3y*pSm7^__{L`XKR}|oLRu7{985ex%=orZ{oTHas;S22jP5Pq)H*&j zHL*cNwRS?EXEVLI`5#v%6}M|;rrSZ59P@pW4=q&nj!{se2-OKQ$0d0Y2DcXp298x8 z7vVMf8cu|a{EnHxDi{Ac5Rq0_tH;1KQQC%J5Uz3}Dy(AywrbPATlE%QQ?XQ+XY6m~ z*WJgd#7uONqE5|Io<-eq7MV#W0uvnQ<$$Y}FAxn~8gj`~(Y5JJibjTWpvgQ92(o%j zN2tHuDLC?4xY$`e;t)5U9oGAb?&w@~oB7BiJq~-ym zHj7yeSJ{MaLVPjQW5}{5%$rm*5OhT&w?7fhm ztc|~sS`RV0n_dtEH5C0keF5`XqED}9-jOudOn3WVz^3BHEi89-S%UzP#&KJ^7xkv^ zg7BHvMzRzLO$Yz^QvQKsbXikaAR$?EHfVeJ)7O^Q&uXlmDJBe{(%j~UX5Fy1LnFby zfd2Y^k8viS8vnprRw;l14w@T3b!+5k{8Ol|G|K&(nG{N3Y}^Rj(~MIV!}TIRXo&SH z@AN!OC+4MBEpojSTI9*HeKE6S-Hjo_{>UkUH_*)Lzv`$!G8 zmie2cXA0rE>$EFO-x#b&cDt%^);k>u9M&}Z>^|yIOl;3+>nwF5u_DP`4Z*DC60qu` z70DAKrMuTSbP^_@bQ(O=Z#wIKRiVWjiPbp*cJ)dDvW(;oCnnqHrmh>hTpB9hs8s?^ z6t-*p#mIl6PFh&KP?3AK1?Z(N>b40$>&X2)C24?{INo%%0Lkg`+pYUjY@jIn7tzq)0gFQ zGNz@lL|-k%W3pk&vVdcA=EI8a9+P$^Si)>ZYM!GeiRP@S4wcffWvg;>b z4WK_6%+!S)MB6VLVlgxJ<=oh^;B97CGOR^7`Kf&9C7T8*f#l?2Vv|MsT$*8-p$P0T zZNe?&>d&ur=EM{LbX&M3#M(~v{j+JXe`Gu~b)9)xa%j6Yu~Yy71Tfk2P_DHtMnyi@KFr<% z1N;8RI=h%GFF-NMtU~6XDOS)#BCZ-`q87jWC6$uY%&`pwt0(KF zA+I?xUBxFOMtbTPBZJbB5wq`QO4#0_16>8b=EOSYK39elq(5K?!~~h zCcSsXr+wB;I^5-qG#KXH0c#BS5ZXZv6=_Vllr;xP?f{Xt-HY`8a}ZR886o3qPm;xB zxc3y?5YAG2#<{d1nV@fNm#mS=#D*t`{JO8E(D>cVJJIEdu+coL?R-^1y4SiXA-Qf? zKYb>J6*QP)r3N*)MHc#Hh8+2BfZjNRskBV(K^YIq-kngwD&gsSq07S=+apEwbL&E&K5*3zjrQPsJdg+k? z%=B7pAy1>bJjUjfPfe<&4LkBdeE9|l_rVr8yfh(N{?yLO;8 zGehZK-JVy0Es*i&a80Nyh!y3X6Ie%R4<|=zz98k6x__98`LBw-N*S;jm>&2Vt)>Ei z`*y_VjW5JbTp~F3Yf9GD1W*HDGaK z`{_;cy-CLnEvatdM-&J=}%jFt7~4mf5SfP_K%ZyBVvbpPeNs zL0~3@*;UJmTZ<-l5=2YX_-a>9DCTZ{9(L^CGhRw8R+*-c?j#Ur=;#T;k&13G?kB%Mb0;LR{n6Y;JN zaD9PZ^;9c!ZYR7adjuT`iOD|Y*>``tVW(vLwmF-W1(7&7j4mHvjbvL?s0`6UGK@8! z{*kZ5&EFL$Vi}Sh5GTTBKHp&HTj+h`a7CqimW#;MBqh-n-6t6E?8h4rjpEKvuTjML zq?*LvjLTZgL5&nv5^K}kKa?=e{Z8AKs{a&R6`~HJgk0v@IT}t#$E{{L2F6S~)`MH& z1vV=T8{Vm4#HO`@VSmO|sxUF^^UK^Dxynv)nt5R?y|qLq8T#wi`;uzK{_35K3*w>O zQEZisxOp5}6iJ*a&Z?X(m1-U#bl^ZHdaev&<5gt@pd>=eGkWs&tMSGvRGo3_xfH}? zR4%{q;-M6%hHPGEq)R_gdvwtcLnUVYeYg%k}$wn)h705&)#yabPzq@eQeI- zvt(j8>L1gRH+EeC*bQ+em+}y&0tOiYYvn2w1&DQ^+vm)yTeIa0ccBR-bJAs&CtF&s z>i0T>K_sTPKDWLOO$TBG9P~>50gITT+w8AdV|Ge8F7AX`D-v{5bx6~;JbJbm@RJ|7 z3rO?8_$M7i%FLbJVn? zL-TR#m6^jlUIIDtahwdZu5Et8HjBjkt57zs5{o%T4xfGVk;8miz(!|auf{cFo2X|x z^qip3rYfH)$>}Q1qT`wN1kfOaWaJc+RIAi~&nYW*kCtiTN@wftOXr?wFeZ^IYXI`# z3rFhaMC$R9mV;Ejw^p?8xnAMfGJljedv{Wu!e~MKGHXA;IGX|_9%>k}aiYgicP>UA z#vd-3)D9aid9<|CiGH9D&oT7IXOuA)%Gj|^GodGu!WUd`6e zj8KY|o}0c{!BNNOVz~dzhvJI@8I%&%~}qpd%5vl1JCW+101Pqgxco{e>q&+U!t#h z3iEO=ohfC6#wXU!rcqW$0NypGF+*KVA?H`%EYfyV`4QgyecYYCdmWC41+^yI%l zg*u6AN0S&bO*z>Ld?k!u`RtD02W{A=OoMuUMaqp>{Ve_o(EN>Cfo85Zgii_P{_O=| z=rBJ8cyt(W|7y80$){((#kTcZktvYK7Fm1;VOLWVr4~?}`sVm|t1hVHH9y-@k4;&# zb%_ks60zQP2_hP~f!3Sz*XQYW__dG`n(~Lug0sXu0-M)Jzj^ci7m%9{BM9-)_9VIk02g#h8Nt={Yi_Y||6GYk9^ zmKwigm+b0{Q?~}2Y;)ybq;ZWrpw!4j{79=)?oHfgxYPO>sv{ruuHJ`^N(ceL__;S; ziT+z@BDO}3K1Y?Fz+Pt<>-X*Wm$(W`_k5mx`Jlq??%K(_SNt!X?k?vJ{%$0V#2rzX zS@ab;u+wO(t0_ES#|C(4ebhbL3c;mvIz~eTJ;H^LfBLxnj;B%Np&YA>Oj$g3j4fP@ z)k<3%iNSF95>hLL`hzLMx^H~m-03f6X{S%w^ZOCzk|3=3eK&)i zk^p^9y9$qNg~y6NjXsfy{;~5-W77I18@7tC{QA@82inl5s!>mO28~GsFxbwSBbm z+Lu@v1m{L>*Q4;=daa5N=*TVS@f}M=jOdgWe?K*(dWN1lkf52YkfXI63RK1)|5f7T z$u0odhT0oh9F4Ic+o%-FJb-Tk@r#*V4e`AN`riDE3w7ce&O71I-0GK!$3qpV?l8O$ zkV(5;h|22o)F|>Dz|S`4SDtBWcYAKGxDE*<0E`lZH-hJ!@!F<%zX%Uz^`=!xc5h0S7 zg;{SUQRkq0PMrr(c6yp{)PrN!jt`)M?G7r_k}+p%WIQm%W8TVCvmhPio$PrZN*aR`5c z{4`+J*UGX)73VksE5D9Zb2>#zwT>;5ljkcD!Wgb=p-tskZXLjeLSw!u({r`%!C+{<+1mh7}}0i^5!na{$)ILXRG@<&J<#R;@%9S{qpHV%5_uN==MCN_Bjf!Or|TMBj=P&k^)s{;r~;25>_ddh(ni^m(%gP-bF8T zu{0XPjG`|D2p4G3#A+VtwD2Sz5;CFN!GiO)a|0Q^!x zNKBhUyh;{{Nb#+!A}0Y+l7JDa4e&h2?*;+GbTMX|1-HWW4c%oc+0F%4L?zU>lWTWf zb;w5A<8cf#{MC@QdW-#NvSPjL0y}IrX;*`q6wpwUlP8C0&2mddLeDAym!?$z%mdvR zU4utLgP$6P?yPed`IyJauGcjjhtfHkcXd!H8Om`fV@_UFE7;dAov(9L>-SfYj+NG6 zf?apzoRHl20mlk-y0>S1+S{ub!3ul(k#Vr(S!IbEP==Yp65TM3>Zz~4ET=eJ@LH6K z2yNQL|2bWWT>tUa&tMSTR>vn;D{pD@%ZeZ+h+4=kz<$PitfgExN1&!SpP-aK?orPFi!j=9qeHGW_pLvv$(dQ7O4;?c(6DEu)E#AnUPbV45(zH&V2VXun)Sgs^0MskBAx{@4J@Vr1^eue^S6NjuX7Si9CRNzP}X%T1}9 z{E#&rz@7nm2!c2@mXCP{iym3TKj-K)$m_3z}``}c<7K|z)Zk2b;(IvC;eo40Rw6EC(<@ycf zH&GLM)U>u_8_m0W82ji8d;7VfSm6` zM?8gjo4y`U)G`Zf8IN42och-IDBuuGuNPBuy3W%>VCHZzR+06h>iDm0;Mk*0pn#4F zwP911jb)MiRgm`|Xe-`*%CbFavzM_Huft zza*yEQ)Zb#EcCeoYlO+&1Z^YdlBk}C96*6@ygFU{-Rg^1r?sWv$T_s7`@9QE7LmVd zNJFVZ25wM%{l$QZ3c$aL!$*s1qM!TrpV`syO;p{&VIiei%l(YTB?e6$*yo8;1(alw z5!U5CD-TV3we;+gSifnm;wC9)>YF{Tz7jeG4&7(g3aeko&%TdUiDsK-Y7nr7l^o~@ zM>Ed3mss17X*T3&lKOZfMR~g*OKl03u~N$|);E^in)RkSLq5|z#A9rWkxI*T2UJsH zkG2~!w%mr9pXWx^+Eh8CmP4g1W(j^nqq1h!v?G{x7EZS2daQyP4+L0-;AdNw87W+* zWubR^{%BD;t>j4t=)YHikDLIp5o#<6{2hVG(+VYVBM>~x_$DZS4q+&KXZ%Mnaed<7^JZEvrqf*R-Ytfv5 zJV=_`sZ;PR^h4CSODE)~+Z!;fG%89I1yoVP}0R1q-eUl%BIB z_{Ws0Q%u4zKm2zKVq->KrRTb(FeM9W?y>S39I^&)##V9K+5kE288I#b=go4Y3*>)w z_TXKGVIeMdJjUEcAOlR0sH=yvw3Rh2v&WQ9Ovt@LeGH;9T#{2{T#M${7T)AgD!PtA z=oG|t3_2P3j;RC~D4?aBdy8>=&TYAUuU9$@wuoDuq0BDc{gsEkpT9_Ock>}zu%e|f z!B(()2ugT68L9Ql?B@>}I~q=HN}uIic%)Mw@IV-3CUfomt~=i#|0;jNpehSObek)Os9` z{A>6#i0b_Q5n!n+B(GgKK&P4JHl!ndprz;5IUy{Y@#K zj#g@pS-dh&$B#Z6cmpM4mFmEFj(<$LU`dlbxHtDDv!pDuLSR+}QXeWwUvf0j?gied z%p2MWKj^^Mz*pq%nI3KN7%z;1Febh?yCEm1?^FXb z5XkKG@3u(ceGMqVL#j5j%7?x{n%ZXQh$fdVR+^+qQhAmNc)}f@T1rWuR;!&#sO&YGq&(rqET@?Eda83SG6B(@1vt0B4lGB{5^94_ zIm2S*kL1TFA}Jy=dR1yUg7b&;O>M3sfQHcP=k(1sy_zs zVyCmip@EHby-%en?lF~<#O>J*@c7}5c6u9IBwgvJ`KzBA&S#pYiW*&r+$sjnd_OVE zU+mi6m$eGJkrfuz8D74Q?c_<@DXou0I#WFl{;{NIF+zsOCq&Np6)I79PCL}&F2>l_ z1lZfD##S7-T{d58qk&F%7cg@Vlf7Sf%u7lUsX93_Mum!qxM`Suk-{$`6ya$cewEo> z$JUl+%~+b<9W>;ABpiN13!3-tFte7APvUu!&jSd2wE87`Q*1?6+3`EOW2OhaqdAPS zC!jGNgLbPCj2Bb=`X?5mirF3t<_T-r61mX_2c@JssX9DJibFK2pK#P!j z_cf2j?6K5%5}hNuefB<1*}P9rH#U@8*Ry*~bl50+0C3Kd)kXQXAqb#AeaVp#VER*E z%QN>`9-fizNBoX~Z7lz%!Jj)wOD{{?s_$b+@2$NJ#Fuy2=&=NqB`L=qlb@>6FVOlq zr+Y}zgqfznV~MGwl(u#@aqku&$xzP=IFj$;=0_VDpMW9}RmZ zKOSaAR&Bsv(7~<~-yos=jAy&J)@4=iJ#IZ?Au(b1kR<~(QD#q8!B?Voks})gw4^5M z!v72x0~EHr%e<6z2<`gFbN;Qh0c$LRL08LYmoU#b`@BClP)xfQ7W-xN;L`5|eN&-y z8+~g}tWK5kDH9sZW1Z<e{|)6*`0p$|ElzLg>en5{?iO-6>+Fmr=-jouel`4@eH z^IFY!;#P3q11a^ko%JdbZTFKv(ObRS=89&8*rJx54g@g4Q5`K4KJ3bH#k42uTljkmX8ykJt34rvtEo zi431hN$(^IRp;?ptaB(wg;nT9G%J)!wCkebppDE(Dnnn!B)E;t_Q21V86m$=3B#le z`_ygZF{D|hXRI|5+9qYx5M<~v&*0F0GIKH=_Rag40`k1bF2bur<6h3z2U8PW@uzOV zCVaVn^uPN`ot|c2wJXy~n|&$c*)^1BFPO*p?HM=V&|XuU=GMtS^ZwlRk1tfdY|6d# zW%OHUAKzMdHPX!CeZIB9<#d#=#g zrioAcunm-Zl>5b)MaKxT>q$lzQFOXzZ6vcm*{z?4>Ia> zgSjX>=*jg{*Ix+1apMN;WNZN}fBpF3y?X+@|EzFcKNG9*1MC~6sz%ID3ycl-ZNQ1Y zNv2srv1edCnYCXK6}vfM-^PsIYYFA(xV%gC!dfl~q3>{+RIwk3QdaH%pIp~fZ1u9qd3F_&WD#wodOYhX_UWhS zddx~m&3rJxNhm?n)T3W>Mu?p6F(_JVyG1?xdlv@%Z`z)1_(zIf((yejmkcW_DIpSC z&u=xsF6`*n*nj=k%>sV7JTN3x0fk;quV+XK(PM{?&IWlcz<08W0WLxI-1ez{?)}I4 za$mSi+kdYinYT-8my@4yIhlR6Rmhf2b)CM?_2B|S!M1Yy?}%7E{8IhL`B%`@nZWqW zhsrQ^yl}wO1}m`h{TBG^5cl7Cbp7GeNuNKf3!@zRu4m4d<(n(*;i1m`jZDJG(P8;^ z1`1o1h7YWi7mKZt%hfaJ-t-1zJ8*agN2LbqXbbK2ReliP6on;-4~YPy$0@s5Z{Hj$ z>@>3C4Y-{k#WH?t8~IQHCkH6AB}=)0U7~c%<1VLoTpRvpRpj1BtYr{1UHQt)I;^l) z?7-za@OwDO8ojWEJch?K|DMbFYQWrK+12TL_3!`udsbbqhW|T+|N2Aa;mw;$V}qg7 z|Np1{m;d~b!@HeYh%n_1{r_I)e|hoex4ghz(v;x%cZ~>-}OHGAJ-Kb-1qvf z?<$}5S!>Z(Wztg>@n;`CnymrcT1Ajn(Lp|ZLctXVd)qyKJ!fEJTuYy~(KC zdcw9v-J6B&+e!jwaqb(L+A}OZFj zx6FEeziDYz(WFH&Lg6x+Yi40c>@`Ka5_%NTmH=v`<-MurY(pzswZ24xlPo5ak8A}V zVc4deMTibQC!lZ38k%JRBqqJ8HGdExpG4@l!Lrja3~>HYUVr$`i&-G7wArBPwQ}T4O-28H@q3HbHO47uP9yGkWUA%N@TQUV}EqLfBcSb*2xp%%F3Dd+QqoWgxxki=$Kb2Pd4XQH|Yv(Pu(pfJLzFj z3v0ajfkJIk0z-zjzB`k?Bv`nxE?75%*|v#xdVd!ubG(t7>kLWHzcMjy^c^xAI;`S) z)#?BNtgpq{Gq$nJDB7|Ni9-kt9HWfl)wJRk@yS!GQ}9xKW(kbH2z2Y^lWBI$x-;9RAcSvD~0LR#Fa^JgFMU21AQfY}HMM zhb1@-^A9oGW;a>{93T@({s?Ge>(QL|6W!-*72%Z0BxFcYd)12|vv|i^!NSQJ8-)1C zd!KHAeRgqZu|SVGUQx_t_dBK$7!F`Y9DEU(HwloOk|wI_Pgid4uB(B;;Dz@l2a)n3 zHSY*@+ABU5-w@8hLCD2C1uzwJIG>?)r~l->J1qD#yJQc0GmPwS%q z%Ods~m8&yDf_4*i*4|^Mhw8p_sl;aUv!AhT(%F+cfCal13cPan3G85uvQ-UtF9~Qb zbzUfIh7n?IvFu{?fEjsOYFM^9S-@ekXebL+;+Nt=J1=W#XWSYqURN1X z6{InMd>L*4)>>Um0Ks{rp2rR<;`I@Y!`m{grmw*4!fBW^1 zIf2V@@uA)TldUDMbb8`Jj=K9A@H5hGc`kWAVBQIL;h+2osw=oN03a((@{qyfs3@Oq z`8aF#{%DJvbIaFzwk2+PU!!K-@V~v8DwIWTWF~dufGny zlC*bG)U zTJ0|CvN+diGO-3UShqPZcS>&!n_!PMGlT5DhL+Y5M;W-OPq5x9rfOX>FaJM!g3)TM z{Q->lp~Fl901B}RNqcz^4LLfP58w)3x=vaqou?x8ZI!Z8fUTBZ%!!a56sB3<}e z@LT`=)Boug`-)z_bye(+iVd_Re|6HzWT9;JDX#m1N|l>aFtq$^Lbk?v$?2=ntC=*I(Jw&cu1TB}NBKIJ zh{k*-8O^B<6g$=`+a!P0i1?@H{8wH6Kc&&3czEY6I2H+=`RFEx2(!qs5dc!NWXfxb zu%!I#>$6JTUGQZs>8}B(LvEVC{ruUAC4ALV8_l|V;e^5KZbW?F3|`jNpC+D@C98le z?AHBucuv`39#`bh^Z}%peUx)*jq|HQNYkLO5>v>;Z(|zc`Lo~DOib!B z1?6GcD_-i-KX=G?uAiJzu)_~jak@FehJ;?96xZyE(@f*P9}pg5H)-i0psVaSKb-aO zWWfI^p#S*FJuDZ*szv2zj`hxWrGr~~Dip|(HrA>&S+$@AQpz>s+E*tt5IF~bG(;Am zztE&b65Otsty@I+b*#tw^P0g5?(MjsAo1J9A(wZ^O3TE9;1joQiaF>6T>5HVv;pnD z07MDE#Dtl{Du6C$uY?^)QvN5?e89S9EVh=#=8~r_ONrCp(Qy1IUibwJeh>S-^vT}> zLu{%wo{F6?u$cz}7d<>5{J$%v|50ihVZdiuF_!pXpYEPeR=%raJvYK5e|}ScuXCOh z7g@1d^bf4(~Z ze8)`5Lx;EyAwF0399A(k+B$OpP3#5Gg_Gh@)9K8%Jak0?e;V!n>mSZmxl{89cp?Fm z5BTW^=~q90_a|^5Km?x1HH#*`sdnM&&t>eJ4de&sYNOI4d?JA- zGOz>$aLfI~0WLHCA4j78f8|J`7;Seaf(0gl?am*UoPZ6=0)!FA97KM(`^&aBum-pYK5BI zR)ohUn!dhh{w6&}*WL{}uKTky@8JB$TYu70ubI!z$qxpGP(a*< z1F!4*Zq18NeIZV)7nJx=55D^U-194+p{kE%t<`O^91r){MyuFp129fYtym4{G*RB$ ze|xRSfj6)C-la9tk)UX$<1vQN0kng>ZR`Uz{1r9!6-VfMv_UtU?QXjQM`MA}*Ps&o zYGYDRbAA~QsI-~?h>71OmQNSy40Wp%rhk^sM;vzWzU11IoU6+Mu~h-~0qRC+$^Cb! z`_CPyuPBhcQ!lu7$-BcUp(5Me?K0t;eWSnezI%C*JGt|{T#dy$^(?;!%7rk+kLp+} zw<9W{y*pc=r7)A@2_Kjwb~C@4%qBR!$aSetIB%O>#;{T!pr1 z9&W8!fX$P%W%0xofO_q4hR~Kafuard&iv6JL8pcOGU5oP9_SG~2kbb9UF|n_;4LyF>w5kykXEh%oGaLR zZ~deB=O|BW8BN?Jbr zGT4P@NX7Rfuod%Gn8fZIAWCjzobC4gPC)fW$1(8{2d!Xrg^dY})WzqFvlVaxvw5PO zVs1s-pjzM|g!(3h4OY!`{Ia8t$Ii4i_(Iwd-kTWxc=*iN2VNEzUj1mDY?qa+0-)7p zPY8hj3|zj6{>;97$fo**ZVkhC>WnX7hY-S+U9Pi+@tx_N{xk7KFpu|aPc61rlJMfh zeiJM}r18UVt+V+bLg1Wm+-#4yX&k%M!QCQe@pcV2pWBa=n@2yV;*W^{xK`OG>K^19zBlh{{C+q(qV=ScvAdh>%}^bIwhr~h!i zV!66K%$cBdUfaOSLiC-+I6bZ31Fl3{>%Ga&#>wwoNo_$ud1!Cogu2D66ybYMbh{ot zVKK0E%B|{S9oV1prst56+8S6Mb6KsADFSE7WGOq$$k!;Q-c47_UbE4Y(r#}Ss3@Cz zq2j+OIXyL*s#7x$lxV)clulI$Z%d3C6d$czmnUAv6{)3qZ5zEv1FU0bx!oE!!n{vO z0QG#>t$8z2+a0|H!%QxTJ9PvONQcSbSNdHM>=|wt;G7`Nqil^yx5|*-Xvg_?RaU6; zmH_rV5}M@28qsSy;yM$?1}l>^OMv&G(KglfP6JL}8yS0n^~{jpFc-x}9U;GI{}+$$ zKSU74VKwzJ`pn#7xA>aZ+c9Uzq26QB#5*4Q$ig-wz-nYQ z;cff6h{lF0BAJi3bl6@td@>v)ss5H5lQxf6x#%G;MQ%4Qx^JP8m4n}XD3k>Q3 zNcr{N88&F8=_xkRn-!ci&09G0d~4k=j#vCM_qS?BO`bp<5wWq*@~!YT`N-^7KH#if z+0b85Bgf*NdxE+j3(4E-UZ4HQVV6}>BHAWJs6@}7FCB#va%J&vDWwr35!b?5-!DQO zzesFmWX4tIdj7T3Iddba23fHf)jIS3n4AUl8#hfVKQKXMZWNYYwiVmltUXVWe)M}p z32~9$~6iSz=n*UqxHZ0P3k$FXQW z|1=ee`Lew%H$qPSjuNnME6jXq;h(brrdOK{x7zW&i1thHSvtQ5^Uql8Erx7LazTb= zb~Sec8m@&)MYE%K8mvqPOk=pE*eM<}1KW#3k+3+?U!^If&a3ne)5MAv?;}1 z^b3=fuo}FGNP1WGb;pSrNgN<1TMOr-98#g|Vsn|?BT9!5_xOhXI+o5n~1jw_|My!vm zetufpAk-J6LaUd4HY7}H;_au>N1Ljt(oF6uMnGp8%}h83=d94_p5~Wdz3XNzYl~gj zOE#gLIpS~^Sv#HV#}FQHd0)Y4E;JtXST&qVkG2kpjaK+eC%BQ4Y7?Ks|$iT(>BuF%*e5 z%-FkJNQ{BKS1xJ>@860Xx-9MXxU;il4Jf6ro$LFFKri_UR}CXE133o%vFGe#rH0=F zi#U5@Vmz}YsZQEW0H(B9jPL=+zFSJcj1Ju}Wd&BkIFAOGJD74L=2{!oSkyDcgcI`P z7$5RDeFY|gUw0aGB`)-QIljo^zKl{RG=?{JX+YhVL@YGYirOVb;^YOq*8(_R&%Bhe zGOqwgltCC-TUA`K{myMLa1#Pc2JMTiV~buRFUokhY*40I2j=-o`-P7s+24b7K7WgU zS1TMWP^~lUN>YZ;vJO66R9sX76Q%d(`~({>6=9k9en^ok$wP8$WVR-E;j;SzYXH0b z6CAu=^s@Wj_kL%LUlBNu-Im9cg6Pnqg{7OOM`^vWLZYSOCT@W`LNG_;V|~+a{Iwr$ zy3sNG7M%Vn?p(x=EKNpZ5L{ycMA4z^ZPKY~SfC~%1`whRJAoH;nfLeZbyeUyZOgW$}YGIIIcGsKZ}ig)W%^CS*!h$p0kG|S5$stE+6`x=aWR%<@pB3J_s^LSUN zR!RSc6I^U(v;mRid6|%hc~pY^p>E!+Uo2CVOM>z-`aH1Vuyb}#>f`};>Ap2y$O^A>#FKki)Y?~-!?B$vhQ8?R0i-v**glLXPM(ocU%bth>R!gZ;H<}H6|L&(4+Dlk)5O@P*nu81IAy}1Qw7&i71jF`d zr!e1e5ZNm#l)snyqD0C%#p^0tJ$LAgW?5Qi!ERo$ac=2IzDa1Wc5rMg(jP&vp%d?& zEcn4o|Bv^}H|YEa-EUc4waO=-{BQ>x4*@R9MV9laI_^HDT`bx_A) zk^XZ;m@a5cLx=bpKiIMQdY2Lhs}AKwR!XXKcu>ehu>MGe^RhUeA)tXd4klS&?g$jp zZv^}O$JhLEPcm=#zP_iD^)5!=?P(R-zFHLc>>Z4af+44(Ip2FJ-jNhN)-! zbOUqbx%tVHX>Yr@x!75h0FpNOxX5QL5XXHmUrG{D|8?Rk#e-3xaz!23HqATm-VI2r zD84bh+J++hq)GoxBn(J~^2aDnzzyE%{dM1NQ6#)Ev4`u(6wXgfJ76g?6YFe%%`C@qTiW*!4}t8Ru!JZ##Zn@cB1} zfdql7sXZO5)?Pv)Rc=61dWjkZIgJ`p-15q{YaQ^3ogMP_4=-EnG^4Z|6nXQWXcrf{ zrJpV6dic;nV(ra+<5Gxob_KG(!K%EY-Dzm~U4rGJ4_JaEySZbC#0ZLApXiOqm{;oV zp>8q;Ki0l6rm>J`7xLE^IEuSN=UbL~t@0z4-_`o=Kh)ZdLS``TZY^zvR!=j>HH?8( zGAYT-{iJnGY{S5;{#%@DzO-|DMs~AoM&T8;>c`{gl(X^bKAg%&h>NA8rr-wYJ)-+H z-YaA%rn3^e0JaroON7RfHm~*ma2?fTPFJlj6PR{F*h~ zIQXP;NmItVIE##nlNoHtz@6Eo{Qa-ATl+gLMXMe6DQ|kU$eOv8_LRASB6Hk;wt*{O zq6a={73jLC|7aeszN`;P8Eyb->X(NY;)O*9YS>Fc?>6qIEI9&K8GVn2jh-f6J>m-@ z@gjEA)4L`MjN2%wqi3NSMC!$QAZxwA>YFv$2P0Htpe^Nmvyn!07&->KRi zHGS=2O_q!U_YoE6*^E%q*Um;$h7VRqiOB{+ewd8sOeUpfc!WXrRA^o)POy4=qS;zF ztzxN6g4w_&1#NGh;OV6g--Hi%#p3yuUA3(xX8OwSM{61;ky!=*YiJ;rYoW3WhVG|2sYUU%Ze}|o=d{;Iv?d=0iHHCg}!g+MVBVH=_ zH}~K-V+gPyuqueRA`LbwW7iHTeXu-IMF)$p;c#3TU+wrx$pjhv9;}KEEiu9>ceMIS z7|$r(E&Lu6aZRD2hTZ!-K#uWp!hgZwm;i1AtjajaV{Nqm+1J)06DoUhHqjGMPp6;Y z;K{dX)W31_kx48ZjA%o;MYAoGZv}K~^kW@D^X|um(}IX)~O<*zxGskkC zQ{@eV%=Rs0)z)j5g;lO1C4jYeo?+@RKfv;wf~j@+fYBC4E15vAZ6OKf6exn#;oIF> zj6)GVnp2`eZwLcs=qWpA(rp35LICUT%9I#?FXV?zT6@l#bTS?u7wA(sI9!YO{el+K)!-^Jc27FF=ep(|1YFt; zxH+XU)rGzfWXSVnV|zvRcT;76z_p?0<%3PyStnMuFWiP(O|piO%7M`tYF5?&XYxLS z#H~0p=$gH97M3{b&EdY}d8d-R1&TB@D3zU!_ndr<(f|B@a&LfUyGnp8fKj<)DWU@OV_FgB(~GWn_tPyt?^#oQt=ww zbc}E|I5x~WPei+YsXJXMW-kj32U9~T2R7xC;^}Mc6q0}0L{QFTQu`jSmHD`I*BeFm zg=#CU{Mt$XA>};mo3qy@+O;8UVk3@LK#p!voa2}zy!(W;b*{KuCFrU7$Lnj|<(i5B z9j;J=>+9*?Q3XUH1q%<{_ewSdaRm0MQe(x0bU_tx}M#-ZxGx_hzQ!FMH;f zx+ttsDx82bPx8KO=n251w+StU+s46W-6#=_>PwB?Hg0u<%n2JI&7NAGJp4N#1kvl6JkuCMhFV{>AS&!b7FgbfWHTU@yrDgH|r2~F&Q^1 zi=(s3##{wx>=+NVOgoJ#AFq9Vip#cIhA?&xVB|)quvvGD<}Eic{uhAFyT%%JS&092 z_=SXCt#4VOj{2b(K5ehZP0uSt;K zvwi1Q){cp0bi@yrgB|hxYW)`H0x<_5kZB6My{C)re*8?@t!L`k_eX*Vye`lKZ|j|r-2GwrjfzQQ zc=+I8prBw(Q*-!2&+E8!{Lg$TRpy~5(oTQ_hOG?&Jmn2C0i>X;9Fu)Ybo%505Ym61 znnpZ8uc?Oz(*D=5fWNFPzhwbh29$ITLjMN(`R%BTW9)UF2S3x0AD8)#!JQgz0VCyu zeYhSdaBJ$vWS%*Ibpd1rl++QGuS5>^VJ3jmqhSa6bnhUiaP-8~K_KbB1^ySp{9E9E zG@*C?*6=@imj7KQ#l{u_GBO6ZWPpF;-~d{mOeS8P6fC4Kxql!EbS!_zRqf*YjbT75 zT)^C5xkFEnW1tFqcPCyS@IKEl8PB@EMVJ_G^xN4KY-K+_fZcs!=?jHa0MLkt=0!(< z__sOKRZC<`U@M$QoJ}RjTjB@P9UPa2xrz2pAz_;Fbyh}Nr3YwEDdzS)S$@dg(xJ#R zl*5qCq%pJ|pV(lF?=8}EY!02%P^OZ=@vRZ54GnM9Og^(Bo`IG5UAcOCUQ0gFJb*9f zyYjYH#l&97ciK-yx&r$kdPToX%AcUQ*F4Wb;1md6kO zEH4Li(-+Tl0p#Qn!YU+c^=z4k^*g|~q^~SOmA}U^-1lzENIws?Ji?w4^cRNjAOGd6 zb?#e0e3v6!sJ(|*68>1v#vS01A0dnFAvy&m#X@vqqmk~V_=_^R^c^|B{@jTrx1J1P zl@^UdBK5HRofX=pqS}=2yf?>$%OCnLB0A$K`HgZLv(182$v~j1g&klC_CDmBVcG zk8I?YG~|EqXv&brFm{u^k8WD(y|ECMLkP3TDB{cI*Qde*%N5%C8zLR2HP?j?q8&!% zAHPaYUHFkzH2||{R+dPjEVhb(F8e#bKaivzUjfj`+Z8_GROxP`m9Z}tzkK@CTo*{6 z82icNzUg}7I>FJ%3zQcwY1Y9=v{X6{+-h~$LGrC+dw^ZqwXF#@JQ|+??Rfz}#PY83 z2Vm=;d2#0l9ObK>S{y(VuSl4O9zeUde*K+6=?wXsBK=37_V3^M|2;+ee~y(Pfo^DO zZ)JLnLEGRkP!rXGmM4i-kbw*;gj!mvKoUp*}-eH&C&OWUka z3KiK5N^~EQv%}(u4cQpo>nbu9PaN?*StC0$3m(8qz11(rIm}{i7S>G&uD1w@%&xtQ zsiMFb8vI!dSRLAhoa0^gfR>11S>7F?jOo|uwmcr@LA@zTG5nnXd#GI2^Amtr7yi1q zrjtUBci0N0Vg7&_IBn>)qe&g4H2|N-#flLT8o)M@s4KT9Z(iotJhN1g#B z-wRnS(27OIilxscRqL%dSD<1zF$BXj^ONcYc4uX_K%zwE>jH*&(df#T-F9DV!v_Y; zllYjQJ|8#3EgH6yL7Y8(^2(*q!U@`DWjh;g*%(#3_3x*Jw-Qyfihrx{=WwWp_GdXD zKG^g`O(8Yf2sD18%8&mcovjv|_7-w*%7xN(MMR zmzD-mo?iKqsC?MGd2_WV>AB%(#;keqG0>Nje`Wyh{a!dd7In@x7N7v{xg~@GUALGp zMjl|DUx0DLMRV)uFUn%0tXv5mx=yVCn}P4N^i+)*WK?GyV7JRs3Anl6?t5dzX1Gu6 zRIV?%7Xm~-EW~S^#=H?Ip#b{|&yUW_}qetMwhu+YPEgi*O!wmlYi~uHe+XM&l|vYCYKxEWYRAoa#*rD!r4!xXo73 z$vlt{V_^BcjLMEDzo)VfF1xIZFQ5T(rnr_KJj!e|HHEHZzzN|p*;@M9&`f$O{5YPe zVo5-(!2*$#z)l!0caYkjJ0fVs-y@jU9HBrT=iq)Ul1F)aM)`@qJ6W#ggHDnxyg>T)4Q-y0n7HuzK- z-8vD>#|jib9$8~>qO}Ri=d4Kn07%0Iut8w@&N%wjB#OT(7fh3OcS+oq)Nj>3Dok=O z{=TefLsFGm)!2iM`h7M_uwPp(1W3LpxAQrxR(I1Wl^y32_JgCaAspU&cG;yWlpSNB zWD>quG9>BMi5b;s)&~1#czG&a-Ke)W94#5KU+;IBq<8RbHR!EN{xis5SM!I5xGC@~ z{b;}kLwX5C*PzUiG3PbEiKqrbmyEw7Yof&yGN>-XV~ z>&KTcLNiI;)ts{Cb#KlV;_-y%96}bw9~$>ppFL8LvAik1HWjJbI6`_%E1Pka;j-rx z>}lrb?@>v)&m@khP-~Zao6$o>tP7vJYjiBLu5;$Jrs057p1ZF*|7^U`I_roEaGWwC zpBhHGvmhY8*|=T0v08V)v>8X-tME@5sa-G0xj#lN;!uv9&SIl+u*_3T@i`v`;uIaW zC1)|&h|M-%v_+G*DkgSV3|g#J#h2!e`JsAuQOWPT4PFGYRA(JR>=ktrvnm#(ZiR{O z5rGOiA)M!9zP1t)F-_1b9exUU1PM$Aw4J%vp^1m~m~?v6S&(<@h( zaMQ5SiKi2&O9@=YmKo}^%U8Y&cn@H>ORWnYbH4=9k9e@Eq^Y601iEKFx-VV0^{i^1 z*6;DxNw&%~MiqafKuvFy{X)gE!DgbEThE-eMxn5ByB~v$223M3N=x@DzZ7NR^W&@A z09|JGp5u|v=*7zJ({>wb0oWXKs|Cie4jocYz-D$K##Xty z>?H7TVQKAW!)3M}h|1y2%FmWF>p$D)94;LHb^4Sz6PikrEo_stV#?FW8q(Rgn&()s zVp7s`2+@sob*?iBT!IZ}SqLL<{$4OG%*ZucyA2>Jnu&NaypIOT@s(e=ujg!9 z-Mvn|x-dPNF=4{ZqB=R21flwCU5!hi_ZuHTAFlK@GAJ>Vam3&z*5y zH&m1%TZ8Tt7yP{W=ZS96#M46a8QJiS#UU;+NewbLJ}4?F(Ig6(=##jmv5RUYe@Lj5 z4pmkYa~+5t5twz49aP_@8_QBo*#vq*>QyQh)L6_O`DTuCRC+5#mU(}qQUAqk7UIoD z?bq6x0(jz5{8dwx$1p@zSuYQLuX;_p&U){rCL+F92#Ow$)d8Bkn)KJ3-1V;?i|0|z zaujLu-p?&gVo}7_QZQhdv7YzK7E}(@fFo|+1f<-!wKBRKEL?Ao5@_Aojjo1O@ew)1 zx*Y2*Kzsr0Zci2pNl3o7--gc*6u0eAraE+VF!i9XNB mQdW_A4T&QoFt5pBUxR z^+7yN^)Knuo!|Z{_vEbmUAg*~Cw`SpSYP5$73Oo)Cl1 z%B`dm;`0gANX*GBeBHV?n>ftI=GJ?K0OZo0mESQ~-E&7&22peZc$07e-Gwdt-C~k8 zMVCabYMeigr_Hj9>??udB*8@OA~Tgo7~4a$+d=KtJV)rt0_&Pjh+n@EWD3yVzkFN= zAIi-Gf*nL(VYl5~?RVb&qCqNHS;83$z6l8UYzd9Z=!q2$&Uh(&uOJwM9SYK91&Bf9@r#c zhdw9we$dPrDOHsr_h=o*t!8$WY~4MMM!GSEDQd6Lino3QjRNa5^@adBFK$nw)AT~) zkUqdN6=^82GpdjbFiuKfXV&^DKIDbuBMgq1k~ZG5BKzs!Enx5$8lGTp@63YNt#U(I z>;{=96}5`uTJTs<*J+8g5>kMti}^n3OF+r9FOwPk7NP@b@r%H=yf5xBMn~CF6PLoY z)s?NVMYj-5WTl*OwG84rQD!WWqrjUFjD5)?P`wc(z1ee{1ay=gnY05?Lta$&Lb3Zq2H=R@#s-E=U#S!zH^GCk>0xO>6qeq_5#mXS`(Y0 z#>ZKu=IGXLFE2^{9`?8}5qObyiS@-Hry-kMwXestDz<#Rm}oA`6uM+VI&wr6$|!`#?!i88WuA8JjA9MGXFudbBA&1Q=1{3umEORjqk!uH)D%4 z=RC2fesGrh^&z`P_QyC;ykvZq$2ona%ien@OWy$Vq{)pee1*W&-Bw5CQIl{S(57TH zwC@Y?r=1qc<8ODRkv1m>ty==58|>P0b&&$4I+_Bs9hD=e)kM8yg+nA&dhNTet#i7MLb?bn55n3ie#hr4^r>Q*@AwbC!Oqq*iUd(NdC zSJ2ub4DSI0hV@zeVKg`?^2%g*$>iyw$d7411mcKks)QoLQhZFkrQaSl>64~vu zDOaqs+i%AGny;k*x5W)==IV4t%U0bmxy1uz@sxdnEl0CZSUtk=BV-q=E^$P2edaoU z_gdC6v3yp05Lkc2T3OmYOyp(%x|^6`!Ug^g@WieV#XK24Q05Mo((S6?gKK&9pic*+ z^9-uc+_p1H+wtFKyof+nIPlUxc-Z+hK-F%@*37pAVKdLhA8B2brDQE>vq^;53pBak zRokmNv$cjjQ#C-OkbJXf+Dk{h8c$2D{F1}s*jxr~LOsOB`hTfOJ?Y=hOz11sN)&Z|f%S3p~1MN2?X%=b_QX~roXjk!%W%|K3SI9B)4hpdBX?Y+Bq5l5FN@kZEkWf9m&4-_Nf%;Gan zzVRl;AZc{jZrsMQGMQMv;$~dcPBXeX0zox-n&+ygE86A{e?2WDzKv9Oz^-scHqcqU zExw)jZ|3F4Ki@w*bQmZ<2Tj-9v^d07HNP%UR_A|PSO&J#BE>rlMejIaO@pE_I~fsu zlG7ZE_>8-EBRWkQ21v^k?cix1%hgAaA(WN72GTY5t#%D}_9scWoMoXr;p6U@qh67j z4c?lC^8$SGg!bLrwjG+#{STd`I+GaQ#XY)O(pnc?GhiVM3k+Bc@b~DwF48lpe-K_p zKFE;-db$EaFSFMcF4UXVmxv=?}g>joQqNIx%+;!XAQpyc@4AQz3zq6kKJ%`5?J=ue^_oP>+gckg+EWFcY)cAO&_)Mg-l3`Zljn3-}a~VkkKcgWiJ(WGqev)5JQ9Pb>dQWC$hSGIIc_l6md?bR2+up)G*P$ISkZoC;T^UN zo!w0oV1g*Xr9*k_Dy%DTB~BBM%*)Osj2e_#P4rl)8#I&PR2ICbWB&;jQznE$A^-7sCTgB=C116?8yh}%MKcnd1=zP2`eZrPup za9Ru!vyplTzL{yIW2yE*e>MQ=N5i*6FFp?oxzCWleD5>La?wgC1D9mg?JvQM`W&4X z?zF6Tm13&O7qqv8xTEYR&v$P0zPn*pvs*me4e8nWYK1(HwnCPKu@5Qm$3Ts@H|N_h znOI8>kn{YE?jfxXhn;TKv?>haj62*5)41cwx8T{C&xIrnhg>XL=oJ};U(He1h zduUhuCJy7#ZaWEi9mAb@*M&Xg+P}sYzh8dqe_Z|(e1V6!$cJ}71q(PVV`j_E2mGtr zl&n&kG{}(<%;^#PWp`Hi}jIdfB~)s?t?bf>sqarS!V#{UhW_O6oIN4GSc{ zaoGHDrzW%2mrTXgmFmo22>=&V5Ow)ewCg^&3SGtWs%(Q`$%J|$LjFRsUaH(dt|>0t zVIj_it$r(!UJ*#>MUPH_%1x!^o^vAx27nO@cogZtqrO4d$}1V{@^A9+|G2;7i9=}* zbuLn{wRc?H?IiuNanQ@R6u@ObyTW=5S zU9Q3=2Cm+{>u*(NdNScWt!5?-pW#OEPJ-3Kb8niYc5lCS2p>o0jrSWhp6BgR55^2* zTA$VOuDBQ^82dOewwy!ZX%kbQ`1%6Nd(5fy*w(~@wR5gXM;tFl+&epz5nE?tGD z8EH#ttiHRadh;_{x9oFUrK4KMu?^wp@3sJM-h}$a?nMWWzU4`6OWtRa7F}fI5K969 z7)BdVvE|8_+Z^CjjCAJ;<+Uw9-kQzbswz0drFkhw-E$|%{2Zei)*j`NPKv?A$U&OF zHqL)b4ieu#-y%XN<~G(2z0uctCW^WW(f|>mg8hk;?dP2rAOB>E^JJ}p}%#}Y&-US>zZ4H?#KBd*faHKYNJL?#~S_6mK&zP zzE>1TVf50)7pGxjJkQknNr0YIn;!A&$Wz=lD1g|8Su=T1G}WxYV*1{pY}otT@eiFb zBp_?do!w#K(zWcCNM6^TO+?CCMzkh1l8(;4ZmY`di?3bAh%eE&dH=99>NSt@a#fO6 z-;zkrfMy3l{|9@8h($uI({LeV*!eCy*Zy+w^2#fl zc~u4!WqD}Dm&Em(&8LFKPg+ikaIxQXZ9Q%A&p|6b z0;`9#5N$>@t`;eV_iM5qYrQso&A^dZQSO5=%=Uh0rq{(sK#)@UW*bEoWS>1LiMtWe z998ENd5@o-t<>K4wEt~pDN(Mx1TVXM!?m1DtFo)wryuHWp|B&id4ZYf7s-bqMsH0d zooy$*zHlY#7{|~s>1FpnA@Hj5K|T<$731FcjMpB;iMDc^@3`#IIx7V16N)%iwnK9x z5f~=Bz+-`XHFlSY)AH;?h!J<@6t7+?&-tm&|_m_f{(qGFV$rT?82PA1Lj zoGu+4(W0U^TMOR3=>@Cs46&M4IIr=s^j1NiZ0^_Vl~p~&%g9q*vc9u^Yvo>uLa7E)5aH*a7w6Es}h22M;%jigGa+PjBtvd|D zhRzaGt}xY@Z;H#Forx83P@IrSwILYbc0NEekn}{xFb>Gqge3#R(r#|mxOLO$>E6Lt>1JaYr8%5{8u8;)6+GA{$(pnEX-teU{(vwq4OdE z$RkJI>(-9$2&-bBvFB&;L3qSHo>w&>H*e8wF_pEzxWAEGVfV3jAq<)uPjk{g+XjAZ z&KDS3w$qgfBN^)x%LB~YQTy6cdvEW1YiGlYj?v@Mr1X+im1i#4?UTs*=Zo&m%&iN3 zdeDv+$afrm`T}N$f1oXQDnQKtoCWYN@XLz8nUyNi-0{g>Ll>_g{N&~}qu!#-7{mlW z&_(cEPNjyMH)Dg2YLm9MfVoO1(e`oUP9FFpkqgOwr{8#-*FMhl9h@LqKs6|bTdv?lxL`dBdJj=P34}K%ZyX&t?9bC88Q;rzB0_7qQ|PFYma z%Kr3bc|c67m3~s(z0*;=uyHg`pk!Nd*cLk)ZKBTPuPR%9BkOvsI1%T~ul6I@$*y&7 z8{X+#Uh`ez(@pN>c;Tcf#59`>AaQk$Vcpw?6YVj_^t7@gX%Qx ziB~bIVg~&VCAWZ>W}_5@4z>b9r)@tl=XDydJ(*_1#$?^^>(}wROW|O?)O8IUF^lGH z&zkpLrF#&+&G}0fPcX==oU)hi_q2Y=F|C>58!9g-Y&(R?J_{B3?yc5WojGBUv72KB#vN^>Rz5c)2ddp^Zc&ZT_d)9!V^SZ?Is?L#{WO1Ab=O#R>ef_*`S2(R3q=&21 z4F`+>i*i&ABm95ty?H#8Z{I&$A|gxHN@zm}rR-ZsLiVk!sU+JN%h;DB2_YoeC5(L^ z%UB~6*~dD@lHHiG8@uOF-|N1w>-u*6ZvCF;ujh65^6Hgo&htFZ(_$uvE;1#j4^-S&+h5g;N^d1~VT-YV;Pq6u_THyim7LRj5qr;0#qkN%qG6MKUJg@ z$?$szK$_MnpY*`XtsT;u%tDaplEUsP5tj~rWxX7?5yaUa@KmZE5AVKlRmX@=$j)!{ z%bh_U@-XeBi{}!Wv229QuTr^auC!~GRdJHF!?g_?~~$>{sQ6~ZV%GVaNe>N>$x7Q{eIvlR_r%W{ zGYRhI+7bFefE@ocHLHVcUsg&>-sBfTq4xwf)6s@vQ7qdSjUqZtAdM86t&4PPJtkA8 znX|S*%K+`FEDQ0q=J8v03ne$okf)05RudoCGIk1J4s}!Kkzf&LS&&VhOH-b9DVq$3 zrmo{8|B2htUaV7@^}0=|uxOO@bP8ukF{K z92J*%$@Yq`2;#KSz})h)!0E>$_}g>u|3Gf9tZ4asQ#@Yw(uL*g1?h&U-~}K#%!eWt1-bGj>qnGTMN=!*7Wz2Hkv)&n z^{I3|5H`&dmGp%wF{tMeCf7o?V|y9&Y88#1WyT6erjG{1E)jh7FN-ZrbQ?K;(%_YT z^%Gs#sczUL$j%AgmKh>v;NDPSe?{)i>UCR=U`VKEO7Ir5dApo1Mrq{S;kRinTXK}s zQ%~C}pqfI*ua}YULxf}dy6we~#|7Ll}t?)q}U6c1gA=hG&H9fg(qC<$Y znQs>ZFrlSBGU`BRVlbeX{i>GU;3n^8d5wI|_NW}T9AEV*#Dnc|%Yjv<|Sqn=aHi=*0Cr_BM=&9-Klk?byOV{S{7Kk?Znbvr$o0gpl- z_#qKpuuvLfsqPJ{yc*Lc($5zo@s?{BW}5j753q!|yz~%qk&JCV*mx?<_aL6A$+?&d zl`O%|q9qkoYrSZQ{ZAG|h5PHTzUC&}`!HXp7~|Z86E63@slW?w=GC#!m?&M#gqo=v zV@_nsBGxa!j9irW&-F=vt7nJ67m?k;{`IsPrcsW0E8XrUgYB%I^LB()aJKgxzfDe? z-}sL0?-pBThBiN~a2}ET200B2GFY98y@SMi6}lRs1m@l=^HyCU?#MKAsF<9>I`F31 zSj2J@RL|P_vpC(&H7ksN+gEKa##Y~CJ!hUvUhYO2Q~r3h_SlQS)r@GKF7Iueu-0TM z|16QJmsgspP?Jnaq-^x?r*#>l#j3?RWr(x3VqWvsJ@z4cMN1k^!;E;Yva~g;*uj#( z7sTZB6QwWP2X?B3Dm_5gf%u;I0g&aQ-U{}K6LE{txRj01cT+MH`F?v`d!=VN5zjWO zR7T{ol)PE8yN*sZxQSb>bYoxBtKcMD77^dios)}u% z6gDyHxndog@zP+XY;4Oa+X($hfH%P@0l$ePWL=p_t&QRz8<&cOgrI2OwK+4yAERK6 zk0_R^8NOHF<;Ism(JZDQEbGm&P|_`B{BY_ zAFoV^LCs2yrp#^)hikuBFak+ih}q%v_f8%f8x9_e!m=2o!iM4|>lEy2TVhQuUsZOE zK_2q6ma|LsRsG`2&lKl0oofu&QOQZMc)K+Bw{^5-NC<jNnN+rnV&gKw>p3R=oi|09mW}Hvk98~%@?&xm9|Uo*A0{Ft7v{zFy)SfG&v)9 zNKL|uGok5M5Db@iXLvF{5vlI=p$BW4F6p0C98_OSG1-^FC)UKU>Tt0RN0EHq*3G|? z`4TiZ2xYcen`1eH_^_!&)_Tqka}5i1M5j#Fp_M%s#+s8*5SDP=WkSU9VEsYo5zlEP zt+v^tvM;~%9rXT1!TE5!;MgoY(Jn-Cdq_#E&o{4X+0aFIaA}9tU0=f#n8r5pOLCw$B$V3+gP=n4_(DO@4Ho{n-bi;Qn7VPn07z88{|@gR-L_7)bXTZCOR@P zDqLUxoA(}mBzncIfhmy3<>?o6PJaqde6A()Q}qc{Dd6CFSA&tp7G3~4GlZ9%Wl9hh zA$yf+knbpuIh`pk7l~R+aaWmJT@=k~F1i%@8n4g42HV?QJyT>DL`!l)WRu(m@n*e$ z$A6c*o2njTf5)h0*Y0djgV~%?J@1f8qEjNy#Ht1*zwE)9d!!A(DNN~x<%Jkp;`3TO z-xeyoGbwAHrWh!hhKRwaTFROey|+X=l^!7>iJE%(Ph0i3SBu(*5ZH($t86A?m+Pml z^c1!vxDAmc?P8b3@VoY7W1CEdkAk8kW;wX7!T@rV{h+8(*Q#dM-Y>I0Z)N;I%?Uo> z70E!{jhCBaz22s&%{M^oU)PsX`M&1bOfz;#UX(YvHC7xn;m(`0c~3GX1`Ey>u&e7& z$(39wGODS5F*LF|C_+M2uBPkuXJX$uU=w~mx2swuGIcE}$K zq;qG**S4)U1mnfn7U~pScgfE0#GvfFrBYSZfU;W;`&`ZDwefM5@ta`P`KCuA^a+D1 zvypPI28w5?qiyAJvt~T=pzkCUZBz_Rz?F?tL3X;6m>1sXI3N*{D{r)nf{MzW_<~N1 zZ5tR*d(O={N=l3-iQy}J_H?@n;`O**NA*VI2xX4w_61K3EZ4jz{;Qjck&9|m&%`Jx zxof2Oolr_-McniY^}Nc}d%3;TAY9hB9v%8G!T6tmx%Ek+fmd-#nq`W3;<&=B?X)1I zY(gr#=xBUMJkTd%SR!y$ZqD^M%00~e1x4OOe*Vn#wPLV|&%?ukis zuZMH{UPNw_FN8LBlf(tqalz0U?G&1ynT*>zSwaznVfPV4TC%QAvXW`2WKPlfqnwDd zLc$_bJ@IWIT-S%_qCt4aFe=nq^wvAf2OgCh;&?^P{DVjqueeA9Dd)^%6kLs|!&gv; z+$M1mU@02Y9YYyKB6FIvHW+z}4bz@$o%@GI;%>5J3#W6k?oUd<2(GVJ4kuGIAfELM z^G2QBA=k5<*2y^ST+4-4+FWE_jqC#OWVw6m80Ao=e#x_^bAi|MXz|Ad=JXS0MO2KO z?MUP3SD*D37p#Gi>)1Aam^X?&6`6MRu43P#xB8n}>}C2St{^L9{!^&56UHY@+y# z!HfYx!|JJ}O`m^XPbR#YurXD%CRqd7<-k|WyyYS7UuuHcRS25RfjpTR1Z1Z=$>5=p zGleEO9hAJ({@oVgsf)>v#pZSQxDl|(AbmAcyn|I=F2a{8WY6c#NYgzz5(VdxoTptE zHwEvyU03@zr2u~Q@eaFXd;uQL2d$C8Nd z+(yklxoMJ|@D0ANb>gXkaGTPBMoU~xqE426!!FWzWt>bTqe6(JpI%Dail6YN=0?Hn z1wG1AQh2LlJAfwiYXi6s5X;fKY2^1{!s!rF9ZeqFPwRT!9?JG-L&-%NT+cl*;CeSJ z`Klek*W`H9FMOHXdv6nNg_CbdedQilp;>Vd@EFlkf@fK)I?uo4xm84|tKtcLWdvIj z9JGF7K^>zZ*e!V5nUSE!)UE5itV%`vh;Wv^-^*0a{+*V%aj&kNGFqB1bjMx@U zOZ=!yiecm%-228E|1iqXLq?NAI(64jZ11I(q43KsJ8OTwkReAwJp{n&bud)!#=+L+mo)dM;%VQcd2tO*5vy_eDE8Px<4 zK0W0}o7ca#m)vLkn^8m4f10GrFK*SiC)}y*UJO((p~~iE6n~)IPZQ@ySu>(7oE1v$ z?+H08ScjsF3EcO0*<6l&VCj5+OgN+2Oh+Kgx=`_fm4mhwA+%S28)qeGIseQ6Ql1(M zH<9Lv`v?TE1qQh$b1f+(gU2_W<_-E`vz$ons_h9820txwtBLNqm+W;HDo}x{Hdhb8 zux0>;k-r(Qh!1}je7niHDakBnkd);a2tUEGv?;QFAD%vL@5+TncbSLVi z#QH#@0L@GG%yPcBj*yI=VqYq;+BB(a-)wHIU1;-cr$6^i+b4r>9o3Vdq|L53CxtB^ zdEW>uE<0Az0bO!4x6m;M6<+rAhL@GinK}y({8dGb+Eb0$f__o)Fg))aNH|fH5ghSr zmD--S0m*OQf?8i4H+mp>%`C=%?*QY9cZ}>Eqx`}GZMG$Wx|rPbLgOCa!^mcwtZ>W6 zdEep{PH2@&Rf-JP_tP}nIDt_s0@25Ts>-LWM!xQz6wa8;GsQdkpcPwrF68D752*$K z6u5b6^vjQ_BRt^O{E^|SsT!Y-3$3>q>}vHP-__JaH9E|`H(1EO{&>M@VfuY+Ua2l` zEa$L+&?RyK&k(f5FNblvdpK#ODF^0HEuXDx236NeuBda4tHZScIBSz$X;_jp0s z?Llafs`K{8U5(Pjv5s)+3mFuT{kYdk!}Q_d?(h%=^Z~xr9Jl6B1F1S+LnwukNejNE zE7dC*8UKOgzjJhIL8`QkWN7VyO)0KK=;`-;ypIM}H9jr(43BR1hnu1Xz9~#g#D6F8 z4JL})esP+jO0s2&+qVxS^jyJ%%k9c;AP_il(|V{5HL18VrGi^GuyvF2t$^OH*Uv)ZHKhLdjS3g*Wy|H5N<+z@70z{KZvB#O^-=t~XTkD-GgIFR5vTj}!D zk=Dp}C_Cu04x8xSjPSG2z-^JQC7M^d-BDgb)ez&ILTLejw4K7ujZ*=E>f~o78%@?8TL*@Ls_LM2TvLK>oDJX`yL18L$u^K@ z7STwFaY5B1F1;|>R%_ctcpCykZKyL_o>}W52!~q@6q7C>l=-o?mN+F$ngqg8W)PBh zK$oAx?lF8A+&>;1gASG6dt#|$-dU$$*kY^U6?2g4avv+iLRX}9Ox2FVwQ7FZ30w;p~7rD_6-iZbIDNQD|9L;DkzC zI(JgkYo#ZXS=!qLxrQ!klwiWs>c16ZM1Tc+V4NVncBKqxtb+*}gX=Op7NPtfV$Giw zwG?1*IUuzeb0+ux`fbgYtI@;WnGZCG{X2t`19Q1CaQAY{9If|IdR>9rASRX zJI}&=QJgw8!=^Io>-u)H5S z+QegZPr9*#P;dU8UkHKM>(J}~ApaiuPF9@jR7NkHAg@?K^ADNJQ5AhsO#m~^hujb_ z*w~=M_aJ+*ZWTcP{M~6f+2nnP7}Pdv1xBShex@e0o@)BMY=>3`_A5yG;gP z%+sE_&md&#Sd$wv7z)i&j$zZ`s{To%fy*lKx^MGe)`=ZEPH7h@Opf9DQq(@|&YmN7 zAVsN4ucblG_82|ZKC%BpTnj4C5Ub!kW10CxJAh|21;#~|6i6HC|5V~3#`oZMCa#HO))NrRBp&C4$<+!4*a2DS|6Mzk*SY|+0H&r(oxUoVdgP*E^yxxoStVA_+^ z1X6X77vYqq+y3M_F!f8YxfMHArRAiS7@M&=&TLcPE!|QG%R$yT9Ys9o)!$|rYqK}b z-9&do^A4CMVr4a08B9TBr3)aQyvpSmE8ZnAU;u^@^&@aq&VpQM3=$^YqcX=D=ivLo zq5VMk&Pt}o3cy-2ponU%+3jy2OJZ!qctV}mYi1Cx9;2jvWi-*gb8ligCWViz#|K=|CNQ-ap?@Cfw$Xf90bb}>qZ_-w?>({qm;C<8Gse$#Dcg-wL5Ber`{}Sw1_qAbkSBwa1VOy=8m7U7<%%f$xy;EPX^E2WKEsL_0rU zH^&o0m?4bp=LuTgA88CJ=PkG=a|htfU7lNw8l@lFEEr&^>x5wh+vtp8wyM!Q6< z^Tr=_A|q7ipB~Gd!8U$kb>wBS?-49%;S(I%m74PcJb~JZe)V@eV+T!A_scyr|I0o6 zlzL&b7?e@;X+Do>(#zt`oNzRH8Q+E3BD7?rgw}OoL(h?Y z1uer~#%=cW`-jN;lc`nw#YJrryh}r&UtH%mVVS8UEZbX?#dXSC4epk%I8d!N`%-IY zVs*P@m~TP)%2E)F_ndhx*AT7~cq_IC33KSgN`2A*hmjj33d@%#}UH$Xk| zktNTV43+M^4`P2eb#7(OxVbAqF_ck1a#3yVqRhi;(s-U!3R-BF82w2t7df*LPeQ># z*~c6)W_d&X<+^QG%C-$aTJ9Mg*#n7$kb!h7*R*j|)s3d16t6`seY0o1|1sgqqw$4=)7m#j<`Seu%yB7mK>boPSkj(P~ z=84YT+OD4aRdj|-l>vS)!n~X1x8EjOg}a;S`nNHE}#!+T17}2pVr&yEo)TaeXo7v zMRapZ3qSm0P1L&-kXperq{@@0jgXF`w6eB`qcQ+*|N&nbNL+Ov%#BEcNms4;v zu~)kSu9QWTB3#dlJQ|9GymoFgEu!3D_tdD6h7uE_gXKcM4E4q)N0T?>QgC(bbs1~` z`OPfDer)CW9dBJe9u=~Eb+`?>*J*rB^yAmXx`DGVP3lGd9^iUjyH8ASubH7W?Om0# zV~dv-zf;TQOvOG&$92PFV=+!!>5C!T{eTaVR+>n&T}NpY#ctWFUDA`yh}^E5A8!)J zO}JuSGuw1cY)3rn5qrb^;YK91)SPmz55dSA^PtL2VK7Zbm>iVC6y}iaeaq?9M+pS% zH+c*Jy*l!PU2SlaX~HoKPCop&!l|S!UaU;15CEs2-}8I-HilH75hpteSNoeFH;*5vq&#iX0}W(>QUyR`%yRcJ452#&2g+(@cMT>wUG3e#k$3h-e8h0n>i&RTr z$uu~x&GpO!+rC&-hR5ErRTfWrF5f$u0S;oW!v;f5GFiO}XwF2hss6!ItH=JNV`pI$ zpCEM}-=-wF)ibWealeFR&mch)S5CCq^jVoJj)*zCGDg4ORVi@ zK9=zy>Zh}Fb&yXd;TuY`FOJ7m@9&W^*>9XrPa?=)@Za_2JELPUZP2D3#7*9sjv$Zx z#o2jiYUM{j@b}($_RSm=2UTFSzh=O)81c%moZWR8K^xZX(lk?$i&o%uoa5pgoZPj? z3*+TK`|6+2>T}>m>Yj7Br;uer_*&~IZLl`Hy{-wx#9Uu7oo23vFaWo!JvnixP{gdWa^^;(f674h?)yOfM^CT`B|jJLmRYISa|Cojsnf{iP$% zZ$s6(8AdI73d?)Q^aV5Se|&+}oC_8nDTF5)Cu~qFg3naX;!BHmgny8d*DQpxLwdvCB$%Bz>;*YXOXS2F;HH*#l zs_R9#IKr_7d%6dv(v;J@Xet+8&GyCfDl(XudQ7+BCayBU+#rvlZLNq-&Tlp(yYKQ$ z0wgCk%_?tLOIt)}Df#N1x~77**2NKjxArZnou0mtesSBHSdl)2KH?f|oU*SR>xFt6 zSjQ9l(aBNyt(rBx`anWRBYS!^Eu_;kDb z72I@8uncPY^UZ&c*BwB!;k-Yq`wGr|Ol2RX-&4qYxSBOFP-L%v?kn-yq@D>0Lq1al zIm)qRHZealThsXnH*iL~+}5(@KFTPc*fMuTCgWvRvs_^N$+S9L30zQmV_S=pA{P7O z_T&aB%+&tuUAz%uMi|R<_ERS>%O*j+I7q?YW@WU zKkNd0(SB@6k}yssOuGvaD5%?D5>Dyhq;jk{~XTtJ{QXwbs^yX2!8zUv9iT zX|VzGGQ1##Foe6mR05`@ep6jjA{lbHT;h~rXK{LRN05{j$;o6TkR)EWAlI~XkZ+n} z`(!KRhsh_rQ;*IFz&+L?F8Vj%lHD_`|xICDYz!>7PyD#=L7YP(=^kU>=_c&$}Bt7%h)N2$F`el6au zv3PWz-hhZ{9yj0Y2Bkp)ROWiwJ zdU+)>dQ^ypphQOu^reY*DgKnx+FKvpf=48M$hT7rI<<;eFRw`oA-7c#FzhnYE#`{l znW^v~_i~d5{yYIdEeWqbk5K;sRT_PI`h-7wGNYFKj+0CfnI(e^uLNw)v6%S|FS8(T z$YcJ>;XGfHM4tNv%YIBode>u^)_7cZA60wVq!^c?LUFfsm|l9(8>!1KH4;GAICu(1<-8r8NZ0^=e}7q$y*s3XJJMF; zbIm}!Z?A!>RCz@OM`mPtI&HNvC9_*9?1cV^qTY{m6jfr;zIN@m8fg}$P^pp*sl5#? z!kGo&0;Vsl-f zo5E$8tNjF|O>&x7sqmqg7gl{ea(TgmqbWA&*y|(`C`~Pqp-&E;QASQBX);z)gWjHL z(MlHLQOUzW!@k@-J%JOT&hdTm-G)F0%o1iLuBfYoEPC}tO(H!!NGLkdO4G%A&1-EK zt`ZxKstbCrk)*@BcCswlR+A7o)@NRX5y9Jz*>KOp9}_^TfO7=wP4(hVcL1ODiUWTK zA=hr4qYPi6>8ncah!rnw^K)d6B`laQgJ@YEkL7*f6D^PHgdbBC5XEmq;MPXPlLB*1 zEM0jFe@HQ|?aJ`p;95oY=o$cd9jq}_;t`D6L)9~0D>Ld0HG zZ|}MHv3G;cK~uR}o310$dm9m1ANsCm^iDZZB8N5AGt+}gz)Xn5fhZFXGo37*0?+l~ z7F6A1ua-tAAJ=Fyo^i5(W2O!Q3aadkD7jX3IyChS9pm>VBI2w*67iE4_oi!H>V13- zza1;vKz=JL_G_J2UJp`=3h%6Mg2>dtcqZjz{25k1jKSpjX$S@PSKW8oAr~NdAg`qX zX7UCr#pxgV;#M}Eum!1{X03qZLiqC3t{V7O_SSp+PA_^JgJ!Z$uqy{8A=MW!O_5s^ z6OJtwZa)p`ll)m02ljOFAg%Pm2spJRw&UWj4;)+dA0+f{EgT!L$+xCLHd}J|-S6>| zk~S^;?0b|&zJISs`4?yS3C#f|aMh2U8o5fOy7Zus<6vbz!3LvXz8L~kau$9*#$S5L zH+QBh5?I`wV$QQq!7N;a)#sT324&ygZtfI=xY_^VA@B76lC7icAQ$et_0MoX(mRTD zbea<`w4i2htHm#JYYk_E30a828hLKbn#*8A^ad&TD<`Hbq7RaCtVB6}SH$~IpXFaC zqxqR@*K{3Vu%=Hh{ah5YPc1+s2VRGLqFs0bk9R8eTpwszA9_r^y-OIu`BF<4nziWm zBVpJf0R@!k5CjC&GPMW9md@V!iS7R9zxqEAlO1$0ct0Kug3H&6PW{@q+oNHdp#wmj z7J=lVi#Ty^t+Y4snDLWXn2Z|wNkpMV)Z^p7;mChJlhj`=B_4@_iRla!H<)D(vYz^1 z6ZhsCSvgEeJhH7>$@SbAYgz~MGw(-c>R9JV5{j$V2kWVFJ>T|ZA{OqfhPd+l4@YvF z;$XArJ)eA$hy;xmlCr7(9-{dhkO;oA5%t$D`-}b4F8>b?165FWIRCQwP@v@6FFN?2 z4(>PQng8{_%<)H#auTUF*sIe)xf!1dz4+&i`p-|bb|fN>BNJh0kx>C~=Z68Z|6$Aj zx>J7ViC-i{D)_%I_tzQxzdt$e?Opzo3;kWXkvt47>-~p3i|4hC7m}VwT$Jj`(&g*1 zof#aH%efaFmmkhHN4aSyea|yuHf>rTvavYUJbn8;C39C>Gs5HV(8E{-n|BfD4`MLc z%A-F}IFQ<`)V=-KOg3NGAwhr2DWkXo~jTPwR^QNC<02L|| zj&GOldypD9Y13^VOaFwS^7!v>^fy5=YQnKNZGyF4Td=YQ6_y1#%C7CE#BoX+pDVTq zh;;*KzD)j}nkgYpf^m3@OS{Whw)Lh#X^Vyo zdJo=Sur1*5Hv12UA<#sG7$a4j!2Bwqsm90U*A@Q)?yrf(5 zju3WQoR;VI-*Z&|cJ|;aduBvf-{H@_*}L7ndz(@2)3JJ2Z6moMC7oA5S0FRCRDJs` z!DrtTF+CYqX0&sMI7$=;3SY+C-*q|U1BBd;`o;9-nAm#nBKS^OYnD11Sl{Lq-lPVj zDWk5)*TX&^MY^q>-M(~pSmSpK{OxDyG^@nA8Je3(Vdom+gmPtTGw8h&$i;F_=sW6f z_nCk$M90xIDa#%$s;L_`1=#;Ap#Mw2|NP1sVtP|CKF zW*Q|S6LCH9>hKq+{{ZGiWLfA-7TLoQ)WLc2->UtleC?0~0xs~O1zI`di|6Hen!|DL z!AZCPTh92!g~MO)P6S-Rj=yB;|67{+9W&pm1?I^z?a9N6X_{L}Ux4c7wqrXX{__8L z5dT6;z}}9Cdic|b{+G}H<1w+T0t-E0`)>b*-`MZ}fr1u+DN;!+clZmg*numQh7{_k zE*x%EV7bqk{+Ct$msJi&WB8yLTd%CtJ`VAZiT8aKjN&nSKac^kDNlu5t4UQ}nvgJecCG z9P=Lk?Hebo=z{MyS_2nS0NUy~2dR`Vux4pZcR2Ffb=Pd;-LtexGnn~7f=-Ud(||}W zgWkcCg}F4HtP+IeP6z6X*m8v$r<3$>h$VWUs`^fFy!9+y=;dAp)-$jboKw?`9Yho7 zOO+qpy+F~f`iAujnFBPiV`toQPmwR;aB6A2lq2i69PnV1l8$TCv2Pr6qo@WI*b4i48R=^xWcbZJ{3b4S1;A8Gj)gH5a;1|e1yP~O}Uuq43$Gas{ z(6VUcMcee}iwmyN{i7Ga=aNB7Wp}A(VTx!~JplGV@~ZaIVsoGovZ~QUsw4yO>L}`x zXzErcbeY!|b@4O@{}jxo&6=h7%%~eAarX^qUt`%CE3~eLSw!1bBQDJeJCFBd>cAFw zEsp-S4cot3g1~ z$3Je-lfW_tk&|!sdG9Q7_7=;#6SR5Nb3B|fDCF32&aW8b3Ndik6{dc9CISpSJD3^~ zT(-_#LjfsU{RYQnoYiaQ0ZdhgeBD6k_<4HjqWOPcV4yhAUv)?Po47#Rbhha%Fnd(LV-sd32n zT3pY5*0SWNjl|{74k9efN{7R{N#YGGX2yEaP5r#3!?S5|{ktL+ZeNcGvpUz#HnDjZ z%3*KXV|87WFXTeYK~@9~@6O@RIZ(YlZ?-Zu@nPc(fENoay3Nbs!okQkv0~eC^XrGT zC9eI#kXjR4>;F zWSuJ(lmCgL{X3ie#0VMO*D1IB>tRd1j4fO zd-x04GoTtE>$YY5-O1H%)j}UV!>L9J5Yh^AC-uo1@Fva8WxI|79lk#2bW2~c2Hk0t zf&u=R8Z^EA;M+Ef91~1`X-#<8@^RxZuH+-=DWemw)c>r{$f;m!Ei`|d?Zea*#tFOD zyPmEK+4(PC+QwCsw#LU7y@{q2DJwxKARftiq?OSH)?d2l`i^!IYw*foeAi0>phE-?pokGtS{hmkrueKxAX0Ch0Z}ULAo67RPlFuz7cPNZxvzBn=S%UHR)#6 z#{oA$n0W0qShDq=zs{c*HYP{V$4bvaxtZqICD zh0Mae1DF}*YZZBuBDr-yN}O70Bmm;y zYhBB7>!?aZ!nK592Yr02wz}+v9Vr$M>!heZ0e(&38=-0jeh&{|uSi$mJNT++B(ShO z(STrcW~e5GGWq#CCitfQ7(Ikf#Y$&@Sx+w*vk2bI>weeZO~Zt7Kd3WD$&iML{Vu& z&Y`i+!1@rHn?hqY(wTD$qMbjUfzmgVN*@j5E5Yz1giFsWN?=LTgUUb{kh(Bu8cStR~0UX0kc5h$+FyGO{!~ zuDE*9%fBL`Th?y?4w|%nM2Ka6LhHh*aM>E4wt6et8StB_QLBY3ZV8i(Cf?(`SjX=t zAUplJ@$(&WR3$BWWnGdrl_m?dOWMM1hZZ88% zbrWd_@$7>r@qC9^@KEyHQx#0k<}!fC9(+o(u?DT0AYC(Pg$G#O)?I7itM>p+&g3r9 z*?jpLJEVo+)8TpcZFVSusbYFCp(Iv65VSGI-F$``tln5w)`Dg#g6f*bQ}vLQB6AhQ zt`!d32!;|o^glP;;YEeqK->AV%rDkc7l1ZM4lAtD;`QqT?rQ%xZ)L0vGR=lSul}c> z7S1iKJHT0-iyMNb1~_8hsmwe7th?M9oD+taky9QNx;XWJMA!`PkU+q+t)WM}F@lQL z?V!7YaF?=^gP6mH_R+jI6f?^%m<`?a@6oj`sH?KZ0d;AHX1Vh8KZz&QM3^Lt(^ zY+qjK&n}RDC>0}1uIA_6mqPi9Dfp3BEh1Qa@pE@DP?fG?kV#$8vPG#BlLo;-?mgi9 zzcSJ^K$oi~khuAzFXZbihbk{Uujiv`olNb{s%R$#X9OUG$wu4LqTikRz1)Ra*YaVJ zIo1nR$rX^;wM9d0xAViU7FDT?Sjsa)(iSn^;h>1J-;+EKZL7AB;{gA(t0lYxjNbXL z$9cokFfO}LEULh>>*k@2NqP`Mf+rUR+qBBFKtT~vO}0jnP+IPUHecnaVZ}^c0z~2* zhdrHu#7&E?4tzCs{VThdLDe9YbaFt>gCdUW-i^h;1{oyfNbpyvyO1)sp8;v-^}cvr zJj8PjFxEkPaQ5B|$TU4(iPvTjR^Y5x74;zwQvQYL&F=)q?>_r)d)JgmRZ-?6$o4B| z2)g8KKP6X`voPt4sSIS`DbtP1ByI7pNDuNbefDU)k#au4&iQ59<I(92KYiW8Gj}MRll$%G?(trwmgT@nYrpIG4TN{i?@7 zjJ$U3)Drpc1ucfIUFWxavRp-7_KksCyLdbV7kB@1h^L!#54Zb#L7eW#^(;gv;#u$I zg1yDBphgh=DO3{Afh$@t?#+}hTC{FFEPxnzA6OQrjup8b4njGw8?TfBx_W_LQu>2S z9v#@0C%?}fc3A+|90f{~9MZ(|2ba7CU^Bz@Ygr1vF>ilw0sFiFcq?}qOWtAaeSjTb zWCX^^m)dOBL%OtJG2{o7GwM`Hd-zKN?p3;-iyVhC|JgEnu*6>e2cgIfuSd>XIym65u#h_P>AXkM7wYpo& zMHHd5kLrjuFm{4f;YFX5aey!&}G$X$*zU{~w?sYl+neI^a-YNb|FE6q;C`@03R^}T(rbwAAR&{VAy1^A$A&9$KP zKX1;z3B8>x5Zu_!l7b9hJ^YfD50-2bD)qGflt0T8U=Ovhk5`1Z2wr+5`->Ko0WCOc zF!_ADeE78o+zKO&a~jO)eYHzKx18;jyvh_neQ`48)b$IPLN6rST&kC$9Fgj&5BClo zhDmxyMz36(VI5uAJ6=4VZ9P!<|H*mXCsMVa=}nE-8F0(GOv`p#DdxkJ;;$EP0WS{b z5k40Ak@v8Y9uT`wIW}^^D#%+!o zK?!li4lQ;xu0UD=1ERLOT7WlJi_o6@idMg3x+_A>fP|bXcwK>6qP`K7R zm%u|tpB8QD)AI+?4uYw{0yEkZN8w}j{%{-ZucTpxqUnEi0iJ9U9O@@h^(N*O+TB9$ z7dbPIYBC$0mU8it?Yx*;R$5k3A?G7o@B^RQ5L5yFy!)Z*)Ajzxux8+{deVk8XKB&p zf$a9-3a086iMLM#qQ6&$wPHNBbgaq&*@xl_q$>W(`d zIy6U){=v^8=}|^;$x}#LF2}&LqO|g-kl_S&aC>%zaJAZ|y7BvQDL{H9e zkUc;02S3DDea!dGj*nB_aSx4)12^s0!W%Ui*SIQEv+jLLP4z9CWnud6^C!0i&p&C`BG6Rp)}VO*Lo7mp4bI8y(~ZNVDO(~wDcZw9hTdfhp7@#Uw}oyo+qQI+^(#Hz-h zC=w_K_}<2*}BtjY*gC;r3RjlQkjP8J78KX!rY z4Z}i90ORSKofLn-PJs6|pk$T&NIG!FJ8_;Ss>Mf|-r)a?#%tXbMm;;@0bP&6r?%~r zNkpFh=^+k_6DfhEKVeIC9d6kZfdZR>wU%8gd46>`x@i z5g$UxuifOTRcjp;={#Wk_QaYLraa?LY@h8cE>?I|sJ1-m4;Mx=NcQp+3l*KvRX+-* zR~KJVtA$;ZPlB43qohxrL(=W94)t>rtKMNfS~}5A7wo5^mM8@Wg5hMD-6y#}+=2@< z8&@6?2hy^Ko4i!rL&XzM7x>h2iCDN7@O#HU+b3^I^G?{Yaikmk zWw~Dekpu=`R#zJh{+hkEyvFRd^n;m2Mt1S^vs6 z+$g=~1ZF6&nGUxWTe>b|$DNDW$l}h0op@`n!#UV(&Oh_5Xc}ru{&qYN@@Eft&hSrKLWiNWMSE6tX!v6y zx9sQyXMSdk4lG&HW9OZ5wTDXQC;7Cyr|PoiSO<^(5uXG4y*s+uqPIG;bR&@F!nuam zj1BPjI6Uw~>J=}c5f7whb=eq;0fZ9-e30)>t5A1VFkw!}|jkol(dmSWT< z`dX>tOR3b{C4RMvB=XWFALnjYxbI)DV%UAd%oD@r9+W2YM~d_au?KstTiCVaFcGOa z39vsNETMU5xzPz88EM@mja>1BG^+F_})Xr^Fl?{!CrVvLDqEqt?N_ym^}} zPIil|tiL9cPDMwY4;j?+M;aqakLnS)CU&pO5a zjP;*C=Z^OWZ?yFq=&>$^+J-%ygFD%E8f72x+KZvLa*HW;N7sA z+TU#;jiYRR@sp}gpLlu7d5COv9jEZwyy_vOdJf9?l8Rclt-dFV2^!J=Qu_4S(wzv? zk~Q1a`AL9D|mO*L2IK>7$$dM~GFWh*k+xnV`o5Zo|Fu8QY;4Tgs7Wtlls= z-xTjTbNZUsf$jPKu=bWgac*1JXf_FfpuvL$3vR)saS7hITX1*x;K4(158Aj}N{tDx9ZfbTLt~Ap}NAdbBlaYlLUTvOzffJ{?&u>=Zj~iw++@Mc{Tamgyv1iII{mZ$?`pcIyy#XT--&ei+`-VPpsZ{@W zkAR5V_f*dtXYbE)$p1Z?sXu4KFg}@kWrp;xvoZUOz#h~l@t24XI}jTH_s!oMTKiv_ zz^X9idy+$H1Yt%#N2Cw@e{MgJ!ProfHhad*L74LYezuTys@V@Pl(6rg<^uk8^aQl? z|CLMgMNJdvD(`*cS4YJCHvI4FAjI_-iLzHr=l@^-T#VoG&A&c)6V4l*rvAMN&cFTL z#>7`lOC;YInD5V)LVqll&?ge>zl*|c!QfXg5Y#86wp-v3mvLa;a~AOa1YWiZn%I;( zJ}Dnj5fS6P`ETytU+>G&zP0rS!La-><@bf}1XgpdPXz&B3vxcK^e#rJCfoDdNA}zG zZp3@L8$O_G40RxZkt0_kZJ-ld?r&cqK+KBKwj*H5=e`U2fA`nlKyQd`?Ss+x|71%7 zs~F$fB#xN;+9c>@w@M1oDGJQRP)^(ZgC{Fx#Gv^y4m$%9NAs*4vCyj1(HP3f9Cdu* zsbY0z#RRYwvXRTla<-(h`PfnGb7fqe*;F_%1j0-cB{Iom^f@<3Cj6mTtp#c7zQNu0 zaM_wDjr%5t-+HB%mIEi?9Yzpe$j3DOuJ3|&>s?tq>#keRow1vKGfM}2>6pYKo6nlDzf@#19Z;;wZBW^=JnDh9x#8AkU`uvu{Y zK993NT2L-ki!`6kH>N_gLWD#8nE5US0-ZV%M{{yXBd>j-IDQ| ztQI1$h?|Pz*m(c998wluV0H5y{Uj`KS$x~b0S7j3@Y**3q# zXsL-!$)UjTVF1HD$?4S_{NfojMR!JY9gCVz*E)d>fizT^d{CI$#^PrstdzRg}Ac41l>_QcJ(TA zCZnO#z)ZSoddu1BxOtb|ZRNVVstVT%(n+WyNHU!+MAgdE6#wdw_nVr?o=jI1P82XF zOk9~p^_QVU`vQIt(K@?KN{y40(hhRU+6w9~b7fk$D(c$*+X-jmJk@IQRJq z1u#?jI-1w}&>W;nz$DDT6aPv$MD5F-!| zI4tu}(%f$+ov_ufkCxcF7V*6HJwMwl7L5v!7;Eoc=-^Mj1i3q(5Uj&k+8itMp+56k z{<64faopIJS?C+j1Is+!JJ-=aFa!6Fw>eu(_IKi03=%Ey7wE-4EV#~*u>VARqTanJ zY%?3|z^Yb$luBaS6^phCy18(*1VTIz|M@=s?GfkyPmkF5S5QEk*+l%&WUV&N9IRC` zoteUXgArRGPJx>r?Xs9JMwL_&DVLR2>muaxOS=vKY?w~TcH^q-= zdQD@E7Q=L{OEU!EAY14coaGMK(~xPmN7KICoo}

izYPbcfjX-J7g7GohJh$Fm(> ztW!ytvWW$i#h? zPzKq5BO$R}_d3vRt4tXj+QXnyF2tN`x>b~zB&FtewWYp+k4J;CS?RrM11aHeLUOBzgYGLdMDzo3UavEV{3b)-^?#-vGU7tYXr>!E&lu zEI-j3uscQKf99gT0u=8}VJKM@;k}Dw|b32IytdH=86Y28r*@faT(c`UXx;WH3^;XS?dVS zen!kws7}mqotgD0$NKa1N&n;NliRL8U4-TF+q{UHP2`|NSCjiv*wV-sqHDLD-4Gf_mGopaA-iyi5W_;#FkdkIgOFTeQ=& zR>^CFmy<2pQZ!om-g})aoKD6@&y9?p+-pRW^R43-GJku)WU8-rWh3p4{&geq zP`5p5;+;z~WsdLM!B}=jUQ4Mr1uoT^GduHBPiMs_cPe=Ue%*2)x-wb1RzCVC^T+_F z=kYzgMH@ZHb}U$DHS3`ALfdwF<9_||>y2lH(*>Sn95b{u-tHO*@YS|s3cv`EwA}jQ z3USy)vH*p@gjWrDRF)a;6jwLF89^xcGAt&zgxP9ejjpm^xy&P8as&A8J(|$Ve{v6P zY`y~MoD~)rh^eWf;-RUFSe*UO7f~QYW$Gd*&e>xy_JIjQ4v|XfOi&uNE`#|jWFk07 zV9s@BL`~p@=aH!UJATRlNB&mz(qL`K(_Pc_5F=~lFCo*o?D~MLoP%l zs{lltc;}-<@x#@i({uq-98Nd>^BbGN4$;B!h?F|Z*Bcg_zn_VCB=^`%`{0vVTrx|L z({b6W`Ms1&DuFQ0;7Vru)1cYy06VTs&i~`fMHJomv2JhFVJ48M%?*6Mi2TL@LkYd| zheIt8EkbcTyp^r&W7q!d!o4+|GFVHX#R7y15@g>PyvXBP7~h!29Jzl`G%yJQ#P3p5 zp&WbfA{QF8T0dA`D6C@A_fSsemoXWET-Kj;eI`nzRQGziX&s*@aWz{9rpqL9&6TU@ zeW+L%pS{3hF2`dElUoeRv03XL;4;*wZ#E0Rfd#2q0sTMa>Zr+5YaI@Qo2zZRL`qOH zAh;LXjvD;$RU1G-b)*)9jKNVB)cf|BjN)f}FXbzHK#G2C3Di{oNB;cz(;G0OPSdY~ z3FAQq(}`krmc}^qHQh74X3s72hEJ(svmw5UnV-HBMg63d{Q|64ugirl)V=)Jo+Wna zC@@WCyHjB>R|IRuP)q3u1vX*bIs{$4l9XO!{&iqv#6lk8UA^ zOiD%VUTB<8ZV&8RjA{d){;QoyDn6Wn zs}k{d5V9uHgMJ+f_O~;{1E&LaixJgD8}LfPXF(V|HvRI<7H{7P?6bkFs@4ojzL(Ab zWMMM}NY*CxPx)hlIgvuLlX1>7H?Q4Od2Wwu0_}R+5H*1W8YfA5C;SY?vyJWvm(I1v z7B9HH&4LW812WN2@!@3l$@n`@-4@Rp=hWlxXF-Vgk?5boWp%xyBM0K>Wuj%w{4FDH zKPwhWW36F2&;1r!;?|vT@$&dEphaJ8u@c8_y=lTVOptst+N&S^5dUEsO^4^Qm2UDb?#IaovN&Mq2{K(-OXBc+vuTZ zYA)6ev*VATEqxMEYA|Fv`nYQP(i(E%xidoi*EUP^ipTvBG^miP(j>RZEgU9jzNXNk zH-h&5oU3hN1*4!Gg^=qD&WLW#1rCFP$eKp)jOKiG0k!OLDkhsGX6ZIF%$|D+j}e8P zJB61ksnV7~GOdk#M1-aPguq8o*}4~ELT<-cAjV3_c2~%}=WnSW8JEkI z+@j-0{-QE2#rma%ZgyNVL?Unk>-qi z=%&PydE`kEp{QiLjAkwVepR{CO{ok6|BD-$?%acW8MEVKE3EEGhKkidrNz4YJRKkH z9nZ!AwaQRpvVqEg5;c|0u@)w|q@ns$HZ>%X*YT258~x_qVTVDk>pNN(i;lv*mM2N{ z7tVr8vh3_t)*6Rx`pOa=VuA)WItu?2G`2$3=&+Gx*Lo3JnQ-#Ic*@a#nWM>Hc!-}_jOZHR0j1ORGBD_HV*P>ol86D zSzSI}58)pu+V zmuAmmkQr@;oe(xhg;x;kLe;BqJ`fnF8QQMz>IgewhZzo~_R94L{mhHNm(z_N=y?z-YVVvO~z`BQn+(%tK4Yy)2DEV4+sOc(-gRV==Ey#!qWb9%a+%c;?iVC)^Ke4jL+ z`(ckshncTghYj=t`f*;Rf!ZUHA4%_a$1uP}7g87LOCSpvbTKTKqh%?XP7S<|RKC67GXPfugF-j*pgQaJ67$K1k}bM-{xegTk- z>8{xA2iUNxbbcQ}gC6j>PDe@|ul*`qD8|e!99FBiHbYO9H2f52T4MIS- z;CIr}Z1c^6wveP!%z|tRnIw_`84kkQLE3?M1_^EMx*AOG5{>4_2IrFlM~{nvVSH6{ zeIbUMYlm+q^cUOoc}F0h_kKMb+uEjsbfzV8El6ySwUO^XaAW)1ZV5Cj-{SSF=I30) zK?xEYg_KKewrW-6Z9lNwd(5R;nL80yo6p5+)E@7iHmzE;s78&{I@;3Z?Tt619xZva zO_#a_O&kKcYFUp08=aluN=6iL6F)ZoWbss6`GL1rgFLU@NqOiWplFX}`}g8q*lzTD zH%7Ij}ws5e6@m5N3ritmlcj@1o|X7*>c^>Gahj* zK842__v~9dh$+DlAEbICxxB6QE4UDmHQ6ns&(<3$dyLy(*2xF=g0&k7msy~wbhi7H zU4H7!Cb8O;GV0Pg`Qqvel^~XpQd*Z<7nNSmmFYJoM z?-~o$nPb9jlR}22d(%;Vt4~=fm z%=5LbannvKqjh%4?0%RJt*>H|0{QMtRV!j|CVs6I;c)pY zz0)?nb=3o9LglmkCet$n$D@0v1$L(Hvg;_s9b}hmt40RraPY?ku`&J->zS_HLDd4i z?D*u@KbIuoU~yZd#j1>@ooR^m+S|Vgu1o+aBSE#k_M$=)=J8}ZmHtmQpIbXrHY;FS zt+}Fe?VT{^mYQf~j)d8cb9P(E#Ld+{%Ws7M{i(Nz(Ag<}-4Re2ug8tzg0nVTMNpOd zz0)D~@Uf=KI_?4CV@C6r--XnZnDpPUoOad|yS7e?{MhRoq z91Ep9$y)rj%&FbxMggjrHL<6c^X~7Z(iU&c3m!yYaR30j6!hoX3nW0iKR@s%* zR=+o5yJQ}O6_#|<-T@uU4pIow7ey*+LPNHkFCX9L+t}NAb>i8PW5(*dXei8|N*{nq zlpcg&Co@HZMW}+9a}WViZ?q=kw%@GJY=yhFavA)qR5Qw3sHt|k{3umDrr@iuXArZ_ zXT;Rmij(7|#x8wK)v;VfO&a;E4^yQRat;cP${D(gWm+u)4!-uvGgI!mu;9lzog7!U zPHTdYAhpknLhvm^9R!^{d4Z!CUSEK9HMI_$>+I*4RC@WtFhVV=pwIzji+4=hHbGN# zpO;#uT*I%h{=i;`*tw|K(e4M*eHkf-*SS$vn$zYetoO{S7a;H3=;S(6oBW4KxHldL4I5oE`=j=L-5=w6BEAQ(A5B zRT~}`P8@X~XIZuCPciu=Nbl)H6(mWPecaG}beaWQLoMICh`voM*XvMNXQiexn^(#L zR^)AknRsU(>@b+gU>9n|cLbuESUg`9%~YEl8CxaYWbg$Wkin`taplhMy%wvf9XA@4 ze)qG*!E?J~qw#ty`vH~Oz4$|Hv*}+Y%X1r$>R1C*k-RYRXfnm8k4M$^7e2CGHw2Jt z!n{_SCyA&)Jt50826FFlkyx6Egf8kgOA*2A!Ic~gT6rm?C9B$Fy2nxk(Mg{y+M6)% zHu8_zH!43y?c88MY79Cqh(cWOD?~D?F#BXGFuJN1p9gGqc7fj-rvqb#@~MK!`ckX( zD60V^!Q-7q73<#vT+Ita9ljZ4>i4Wcm`>XpCCcMZJ4amk{L@M2Ta!IqJH4ZDQRB+d z726rXQPme*P*?sQugLdIS=MDI#=M07MGmLhgMtoaf$d$u8_hg9a}7U5y9GrFH&`ZB zTMTRIwicU*-?J+NZYn39>fiAPUk2OtONvMIsU(jug0QLr$M&(A3UtoBnMRgTMzZM= zb$thNI}>~{Cr>O+_1sq=r}bB&Vb>LU36C9@DcljPVtAbh9N3&#^FKu$=oE8K2U)$_ zM;I}h-(i!gg>)Aaq^8zn@Fb13n|0=uObX`otv$P;KPsdvKIURjmlgY^18rrguuohq$P7DzGr zn@&xmaTqr^TboE-n3jGNpTWP0fR*$lni31XF8N-DVbWge2xMoU7iZK7TyEPKq?mFc zGrry{bjMFT@}&O6s;$fB>Dqc~o0}H5$yt%3FXvcA_zh!C>SX$3@qG0k`8_xttj1?% zMyvYV7q+LI8C;A8RkIx$Se+i{JkM{v2S@*E(W%;``g(U)>2ty^rH6bkO62k;Q41)4^>vrCbpzqUsOrw+ZERqcF~SNO?ar(YlPwpsmE<=yxx z1Y;uTYVdGec7R(Y%LO_zrR06w?|$;?)$;8B&H_kj>(?D^fAkMWh_f&^3nd4+Kn^kc z7mWl~KS_N3M>>20=lcVj(`{q-&0hU^vbTAA}*!{|zfwC{_-)w#J~cv!bdKlxX7NZ3I*$7(2Ex_AGcH zd;a>@!49MR`XqJ!gzFhuBN|3XHy3jjyzkiWI0t~iG4dU_p9|Y<>mj^6??&w8*7>?7 zH+QeA=!=j`VEkjvoi1FR!+`Hdy)F>v$ZuioqDXw|-1&fq+V%E^cfQ4tjd1xndluq# zjs5%_-K~4&Q9W~CYLlYKZomJrf6in68&-?IH{-BH1(3Ty6PjTHI# z3l*qK(7WKpK&t^uO|KPI&~a_Yi!sk1GnQL!>EsgvhQ%gHo)KMS2VU%l2|HH^y&d>ApCIN$CU?T?|%g+nD8 z$Bo|ap9}M_c>_Ltj^?mokW8RpTf!KjY<+T$=QK*-vfm#1B882oZ9U|)sIHQh6`7_w zlFDs4TT_+5C=v51lTMv^9I~FfVYGJY2p|ShCRrM~su01~ewb(XJtYbor|#aaeQ~rP zyN3*T-=V7mj=kT}*m#hemQopAfD!ifRXnac=rOnC$n?vAAtle`wQ?@s;5)CK@exp0vDXHPZOa9A@EtzgKNVINE2S zpv&pG`)XJ(YLEww(`w<_Kb~7=1$(p#o+;y&K%9aPPUky+b*WA>#bQO|EYz-o*kma= zHy%Z(^bNS(sky(RqMlK#wBv0Zm$T}<-3~}hNL`h`M!$%~oSatC2q`O}?!*&%Ns908{pyxNj_X}K|vv_t6J*se&MMn=e z2L-vPaoVe+5#w8h>fBuR8s&!oSxve&dC5;AbpLeY5Hwj{8BuGcA=k)0yp_eig_->a znuz7HPpjC6EZXj_o&FT~XJ z&=XE~C7XPwv^2;0sE?)`NX{f0pFLrk-S5V08ES9SqgpLY#7J=0x?D(Q9$d1kJIs1$ zj^E8dBzWJVHxAjJ-i#a~byu{AF zx~XsJlJnxud|nWOUy`1JF&dHj;TgSOrF~Y{*Po$jUZr;0W-QqH&F6eV=L&N6S$9^O zBqQ5~*zK~MX+P_7SdT67TQ4kIRJeW@62n5>hFFwTGz}ye$18<@G!|`HRS23)9jfGwd%Jdm+yKgtR=f&fV#VJzXc+B5oCzz z`y3y3uav?aK=~^UNPRM= zUQY4)bu29HoIN({B%?z8w|Hkk-QDA`BLK*ggCejU>Zqs|szq{M!L}+*!9mIomQ23o zCU((?rYR3l&*{VsVC4g)xRB?2VQe#^S{CEsY`xCj*@4Y)GBH@_a#{gUh7YkyT|d^s z!7OJe)8KnU2N>0_+DU)#H8+Ifj4C6UANX_!KhN65eD5tr#r}4<&>Xk^Jz<4;cInsG zF=pq(xhK=fJ`!gXRn!3YI-`_G$IIP!a{=sx^!w*$Q^Cri+&YX8+O^WupadEyZwAJp z{;w#4xvKGiicaK$PS>X=I8>4O7Hnt6hgqk9C9%sffk^|sXuew`49K~@Zx~s1LWNbD_C7UjJ3?JAV_za_>*XZi%_yr`C5M!VI)knuWresCL%TDL$tG*> zA>!k~>hQmGA?DA3fJAPptfj&tQt<=3wNljW<3z8oe{g{?RB$n6zGk^La=2pi-TnB1rF7wvt(D z1GU(3N3FA4kNUiX&dKr4s~K9IhM)0Ye$>UR<)6%mAXHj+s7L%c@sbkl=E;%azEC0? zoW+@xma;?6UKm&}OSQU1g+R5T$O_Ncc>dEM;h`mJy%D^f+}A6IFKcN%bzmlibWopDJsMHUXAfV?oF#h z&AFa)3antyprlSoff^0YRxU{!v*l%T_1rcy0GW0km30?{J6X{^+?s8Is@`2Z&y~Hr zZ=7^QZY?Pa86Set#-5!iS?MB1*)du0YOKROzVUp*qE&A{W~mT(<^cq|&g!0pQM!TT zAcztRMBniecVGd~piB|h_tSKN=sqKqo{Fw+mhF6{WuU_}kavZR-kyHcYA=WZxv)K} z{Q}_Ey#Gg zrXh9(57qJ-8uVVE$L6O1hLth$vusloY(Zt4rNs4Jg$%ofCQ&lVu@#{v3|bv6eL>9N zxv^gDc6ghE!cv|0>qG4!6LC!bZzM1jyT7c+jXM@l+fP!>gG8T?6?@j@WA^$Ym*z%p1 z)%94Xs<)A!z~sFhl`RewrdgjfUt5n1-CEbjTSpaH91B>be7NouDk3+?8&4AMt;Uv` zDx_G1m}}JXtuC-9yGC>a>l(w$i^&k@V8fQmIq|NJEXCz15BdrU9prq9v%_3w4k@mc zi-KZHY`rxLTYFIhI%j7KH10)~eT3|q8f9v{S5A&UXr=i#We98f{K_?eoY8rqgYulM zPMs(==J;hz7uJ<8H}-!K*^7nUy}L?ePtMw37Pi%%GKhUp>&TLuEcW9lFnlg(DnnA+XfU zYINZ_8d%uofspjop+SkQS>RsBtjtySjddS z>T2x33L{}JF&M}nI4aZc4%ulK5zw3D+HF8Sgw6pQU*(Ja0ts^mTO4}6nuIJjH!7TE z1zJtrksAPTbHZK--$S@cnU*OfW`)%!W6mQ?NX9rEqJcZSd<0+d8=N8Aji9ZrhlA2Wc8I46G^*yufc1eBJX*BGQ=kRU%`H4s(Z`Q*si-haR;*e$mF_i|}HFV`7 zo!%emVH3NApr(aY+qNr!VV|os%Ioe399G}w1wOK|48A!7UL~W-M30Q-L{#Gqt_t$U z+2EE^vy(*aWONyya4bJ$&Y)wlO1C8ak@+xqt}z6Dy+$xXxg zY}0Gkj{n#nj(dnzMi8w6q4~V)7}Y!sBJ*F;P}?*>DeWCKQE4A#@w8zlHo_> zkixa=PGAD`b>%$im61gSlD;uhJD8vmOj?D$X=I;@1kmZW&erI|4#ySV#9C*s;Z3#4 z_w3;C44>LK{h*AHySYUFlO+H(mE920V0mIe4jXU9x$*ESw@u9`8QKXicG%dam-$`n zGQGLenH}tGp>MA)3l=VF>(3Z%PYq&PN|-|$t$ySJNiHf92J%G^L;Qvl80*9FEg+tvr7kP}Y;jgz563HyYEsg~byEPcQG zAmebK(U*v>nnQZP=1{zJL#UOYxMLSule4EFKi9y<>ie?=S@q&@zAG!)f&WSR4Bdxf zvGJ|?_03!igjMc)g4$tMq>z`fX5-ffsnwi)g2^zS+s5{JLe2TKlc8|M*WN2lG4TzN z2gsVdqu}!1E2Vd-&uB?biHwatXV08$I)&1@rwoVs$GTK9=GByW6?LS#U$Rl`UT_X$#SPQwhlGx+3J?Hc~&;JH!areN-RaPgu%; zr)|oJ#D#sU;s*axN$h7Gs2{(NB`Q_!jWy&Yluh0PcoFk~PV%Wf>tK zX}ses4qzvj!pxq0C?4dZc zs&9*wz$(D419RB-s@x!A`+uBmTpAe@weD2JJ~tdxZ#x~2Sqkon!p|%-<{CXlIXox! zUn2JAZW&#@HFRo8gatS8>h`8STTjGWIV#nAm*`aVt=^N|kU&y{Dg-je?d4Orxlx)c$2hMR3R;#gtuDgO7wdb7yjT-2XQ>dmjX*AgdHR{|)4^yOo-%NtOxkC)ef2M+CD_$zW$*3ZaSn@+W_ zu6Jbs+kHGI<(=vVI4tKSk3fzrlSaMx&G1v@3Us#fqQ*J=QHF$K9Zb%j7oip$R_G65 z#HAqBnW8xS!Af!mYJeKGSDmjGD?9l<0UcQ7zXg9>v2iV3b8OtARY_2$gIK80=Y``g zKSi55A=~4DTHx-{X<)$*s04ZUE9@+eCi%9Qh4RgXnM1mVZj>siTF~J$=jR5WN==fL zBE$0UZlh5tTUkr7bEjNgqtmeGJ8bpg0|VRa{8EW>QZ|LaaxJGTMeMIt?=^`cwfuwt zm$J{7x6hTBamH*oQql{7npS?El`ki@8%{=6%9rCCpjSY>;2ozSwyn(aWB$+d310h+ zkmPE$Xil0#tUDOpQFRqlivbL%W;w|TKvR$2>(%U-V2l~D`d1aJU*3XtkgN$dca`cT zxeCXJ&Tev>hU>vz)v@M>mX}Y3&`Mk@Zs-oNj5vnMV|j8QKOMF`<^#KBc3h0{oC;hh zz!O#F4yMmdGhPfeWp3<3c;Q%scum4HKQz!WY&X!+=u;3VCr?8HzBGe;3&Q2Ff)pi2gp{KYYeUS}kY9CVG(b zHD^%nNeA7W-|7oS@&T%{I2!eE3R%a;%r>h^kkUM3q+!RD(Uqwf~VGgHp zwZ@V;>VwgRy1hRlH?Id&efO!-$f+=sDm6b{=d&g$9t64DcZbJiS(p zUd{63&$NlAiqBF+hufW{eD~$(rMokOZbdjRcioJe!6nI)!e~NLGHu}OHA|i9|L|8OX0rE23*sD|qzu>&d!2+N zKU!KK7b>KG6;%hS9Gwq#7jq$eSX@BB?2~c;;Gg?~I3z z)S6kHIjkm2mplq2CfI{0OokK10}-*x z-FgIn>>HMB)$Hapk&j$^hY=F*wMccl+1+Kq{%q3TZ1hqW7?-*DHn8i8;(-jJGU6r` zFqp_CNU=TiJD2Oss?B&Ap-aFt{bp^rc_5_)>hV%7DLR|4$7@boN^2tIA?CGi()kE0g{#s?+vf8)${(8s+p~_V z_~pVlB3}wpYd^-0TmnPs!w+|{gXLr0ziP6z*7;T=FF(dCzOitCZpXVg7fN@HLsttrKrvhn%D)IKgZm*t9i(0HrS`M084G39XN_Wu;b zHk+`LQW@KJ8?3&&&W2N{yfi~P7)Cr??E-@dUgOAS@4k4o4xk7$o3oFn+WpLtkRa^E z^%1eK-npwXnXDNAF)0(nr*?#$bOi&GopX~}ZW0pa>_U5QALm+QDJ;I2Q@e5_f;Fd0 z%)O3+eccAJA<0%Lc!j7|(;?QnjhS&Z1oOSevma>SIys-+*b%ga8IBk4WQxS~9W#xC09Ap49Z6D4x3r(d9YtUoxGI z>q+~%gXT;e-F_?!NFM<;(BwdLu60`3USkU{(p53G{Wi7GO+a%Nk_%y=p;u9X`l_LF zZyGbUrR7rPJOi6n0kiV%dhTE?g}vdZB;kBbB|Ru*uGu7GDQy4BB4G??Z-RS4TC;Zq z?(o@B6uzPqUVb{RNbuztA@J!A5eIpJQX)0c`N(ret&_HdAa#foRhX;+_SuOCa>OK7 zZ>vY0VmnQx-b58gul;lVos_)GdTDvnP+VPpW+B;FaOSuVBBkuJwC)sZVS>laq0kC@ zzCn>SdHt$G!l<5!9yq!Sh8?@X6<-Af9JW20vj|<;8osKv5?i?13T40tY{EIe>?ofb9D=zW znU|wDWs;VYdiOogL{=|_9)_3Kxg#JucNb}ePqDzR+3l1}?T6VzO1k(E zkhncr?1ayn>kk(-o4h`n$YG-cXd?u5drm_MxGK>=`~9mAUH8a9#HLsWIqd%s95T_| zYY&%%g9rylDx}??h^Q|j1FChgrHeS;x&y!u$oteU&OAWK;K0w13`)fOSkwyNBoU$s z!dR9<0LdE$gm(fCu_K04e6Xmj_o8IMY5GI|`MM|~2IAHbng&3xd!y7!iVYsl`j!>j zT5mdrKe~j2`00ML07fGF`r=Rj+5h?Tp8H)uKwH1EIk$vDX#9L!DM=Us*uS$|g3;D~ z7Hui?gPnomQmGZNI_|Pz{MX#vYq7Dt*McimSE+nK9b&f61}{~V;IYtpZ>oRZl&MdxUTs zx~xQkryrb!i90U8fc6FFV5x{>EP3yu9xo7)2yDUP;LD@2^qo}7pWnY*%)@P|+R5;o3 zH@1|gb|4_nCRd%y0}%i7WGAP{e!_!Zz2g+`v{0r2ufkNo2mGV}SZuTV%zric^^s`g z=1avf2VRRd6>l70uK2vvNWpEFw3$5y6tG(RopnyD-xD|x-I3$9O2_DJt!|DWIzyY? z%&|GS+}{~4kIdOupkwU;ywONuy;cIN1bQ-u3ql2cFxTwTmyGk#rsavPI|qoF*CnvG!c4wHpqbFj4SA{%}`B$5f1W;19vnVl-QpAXfJ5;23;KE{n$DNmjoE4Musl6KVs2qy5v}O z#7TegyFD{|S+=qt+nO9;+CF6LMWWsuLIMA&bS+dF?dEFhZV(-k84^JD^6j22YVRCV z(-_MF=nvDq;7l*_tQ^lLao4^$PY_4i)jq6Za}Mj4C*GqH81IfB6S1Q;#y~NEC4Q4# zyFkz-!hY2cAA2BkEymB*8oo2i)c&u*lB3%cr_0c_E4Kp{Y zG;Kk?Di8TeG{_A*p`uTks0ATaiIcN}7^=H++@=3jwSDI4nvnnZbO~rLp{f;wp9P8^ z8Tq=gvI3NX{?gb2X<7i++wNUE&p{BaqE=4JRFgx2SzxfK-9ZUuu!kunHkp` zU=PD)5NFB5@Ak4!qg0Ih)7p_-HEQouextmabN}pibJD1!!K29IdfK+8w`P#WqUtR3 zLtBE=3mE$TmzbUtzpc_tn!VWG_dn(S%4j>CbTK8t)orpPCY=f`)grl690N(D8Cspl z_xGG!St0iSNXdEM_-U?gmU>`d7>=O_7b!Vwke_9=ih>pB+8?UWGkLsE`RA}g(}+I; zAe%-Gr^n8$jV85m4^W(|j;7DxKzVhWoeW5bsSBn|c8Hu`t>-URf5a-#&`EK*UWwK5 zx7xc9@^1@Ke6q`Eeg{V7G&PgsW`**$rljy&@}w;~$YGl)VuQTWX<%~)<3lR3N5nEH zBu&*$hCA@r#J#U?siSpHh?m~;gP$IzGm@)fvuphn|J3fA6}#R1HQd#|7IQ;6@kn74 z+CsVw(`dc3uly&7?D0fY?Ow`S3w)l4FOaLg%3XcXzUof70;F!j!)NT`@;iw#0>$!w z_euun$7PjD7ofA+b%?8XAMFW8@33luvyF1Lxd%ti8E5Yj4w->((XR>z@q@h> zyB+JKN*1>=K>>69q|jDzsE{4p=s|&;8$}?jv9zY_;Sq>e%JQ9WHLJcaMYdMGBoFTkHocicd=7TqpMsb-5J!PSm)t! zT!hV;cDKb*s8thRlEE_pQB^gUr8L4h^*3_dixsWABp0XVtU~LwW#Ufx+N3J~Et| z>P$LPec$Y=(@So6Oy8I|@t!%(s{;jyq{MIvTaF~w;9eO0&K--x=_*>a;hd!Z!cQY3 zxWLFaU@6g*=6fVoOL9WAVKeGEv;9k1I94>$FqT;+@gN?Y;#7pWy)eR7Ph`L#u#eBL z@}it9;>Qzob0%=i)){;%ZnkyEK|5s+ln_Jf@h0So`e9{T_hAL0Uo#3hl+rQWaoSg7 z29)DD{XL8^hcs?-KbS@B=WC53masGBftqu)oyBB0JCn4e3e@hCa=L>CNpak^gabwc zvC98LZ3&YiV2hZ#=wx#qq%jb#E0?l%&G52ZZB!G1$;oB!52akbL9$-d$qDlyCrBcd zD^oY12EhH2K~oxZ8<>aVwNxLYhT zR2W&|Q!fo1xj4fR#@`J!?E}T?Zd+bja@fzQsE+ZSzN}o0n7voS3ym7aI|XLV!J(6Z zH;08mSo{RxDW6-!WfC={tole9B3Cj0j#n#%-TZ*e<|CrEz)4w(?G0-?7GjwetZpOW z;LLQP(Lh2N-u0g0g2>eeD`eVKK%_w;J%;%oS}Ph`{b-mltV_H5O~HFkb0x3nPB*Yc z*ItBSz*}F%5QVtiO7V?tuzI6}=f~s93&oem9eSAc_(QT~ctKIT0nlO^}JC)KeRVLspA=rz? zDGF76NQ79_$A$eW@$hbaW*AZ1_PzddEcp9CzeD^+j*76D){azmTE!klv#SA7>l5qa zCQ(+~Q&4=<+I7hRmFRg+Ejtfj3CK%mn|%%UW?O~27>F-QqHK00s4w2%O1J$(3@<5k19re>lX>f9aI< zLDm7*Erw!pJZpSE>G!m8#sWfzFP*Kvpm~V?b*swV*38PG!cwKjKvW0pkoth^_LEgM zc93I2sjEhb%O_i46rQ{NCXMB6fcE2TTFJ||6SL-{RNB^s?odaM0lkv#uB1I{O ztmMyTcoBpgbG68$xBIc=_=?oF%!s3IM8P*FIrR;UzaB?)v=q7QapF5C^5`{L^T345 zP%1o?=epI=2g;N6{hEWxVmFX5o?el6A2YaWp-@!Od?Am^jz$%iu2|1EqVOxnZ(jZu zDKDx8WT2OM)fFY)^ZIZ}_9$>x_obL}MqTF41`|xKCYeK4jR2oooc{~yKh?-WIKIHO zb6a2Dq!;yR;Dn-c+GVK9rb&5Bzx6VS21JbXqG?lVJuZ8c0c?UR!!Bv8sT~1cH@uN0~;1Fcce2+i#DCxW0N4-?VF1ZNj1W#kry zF?{9}iWMIxEVyt)@VLInpal82^~G+xqyo5}9 z6B^TdjVofFjsV(hX~yDYGoMd(K3S$r8Jjg4h;6Su+>_3PxkEB*FBTEl#&q&ru8cwB zx8x25<`76q#CVoM#W(xYVnVU$^9gx~l;f91@!3qmOg9L{l!QCYk!68_%;O_1aB)9}9+}$BK!QI{6U4wror0>tV z@4emM`|FMI#$b>dMdh5@`|Q2e+H1`sN)CC z49*$%rNkWH#5f>zn$6Cv0es3v5R@lkeWnxf-PMWVcS7-zdA zMVn?bQboH|K&4?S5$HM>cy=k!xQRD|eU|C9=ifB#4D^d%-pPgB0f4o$@^4Cqn1d9s z?G3wYxubNW{_*OVg+9tEWxC86hS4|H`qzdKwwSbyAqtC?-TCoZzn#_gk zgsL6xkKuccq8~pwxj$DOJ=GtqAau7)I^rgii0dvcKbUgNvG+BUdP>UhZco@|cSNC(41CkM{lFDq8l*J5an)NPg`hws7#%;B zX|dd@%2H}G$IMZ&4r{7z0mSQx-gB1YD}E&HjlNnd85&cTG6KrpYuH zVWX%>v1<|jp7_mW{be+To_wSI{zwwg_Bm1INCZ#kheRlN+agaW2z#0Say)-0Xf8?L zAE7*GqKej-QZF!tm#6k}Pn24*sIszCJ4BBBqVCQr<`Bo_FzmY*jot0Ae|8IUO(CZj z9ssO+8!BFDF$`4uqOBzyj#D&mc6a1cK5FztP^M?5BLy7`$Szgb&5P`eWyTVa)2gr~ z0GP~A6xJ0by4wgl^Y&p{Z#lh6nZ5=C-9Wm@XL8j1p*GL7FJIePMxfE(Xa8))_f8K| z(@%18!uL`190j;eaz7P5^IJZxd1wteZ#!V#SYlR$L5R)bbf0G^(C#D$55_S$v-A-0 z3=)gpcaQT*BEyvL& zD9TQsj3Z4N-q_LRN>Qhm43>n$BD&li*=C@WQzyA4raMwE%W@u1cOwfidU-dn0;$hM z-t`~-a&~weL~UPSQSehuIqilQ?eAHXUV&Ar-3M8K2KTC}ft{op+fgHlXe!_NszwZ8 zD3QF$-s;C|&Is8lb4?F#?!M{M-`y-?L$3E5#gb##)HB(MEEv(hGyW&hjt>v?cQT8U zj)rbk&&Vb<$Q;lBhf`u2fLh&$c+JUSyYYN%=Z43Qyrc14)o^`n`a?}!?)u(KG??2~ zEpTgl3zeO~4y!rnI5Zls9l*7XHv5^H=J!LBYYaDOP}kBo9$o=(W^K=y^r=!CBvVM8 zkm|m)Cwge9`TnOi%1-T|!?>G)#H}33o}TIDK>RGWh%8Ad{Ein*z4n#q-Q3ScY9jT0 zaws7&c65PVE~^xN)jLM$axmGC>CshL+Ity*58&(zWF;*9vcMAKvi#{xefq-$I9Zyv zXd$O?!zLDTA?vrp&|{|n20(1$=#)ESBWHrfc|PKKGvR{IBpu`x>YpRDJ45t% z`N+v=kg4S}g1?6ogXh?6*7(AVl34K{Ap{(wLp?t`Z&EC1tuSfYme1T>)O$;;JWk}N zUd^6dxG@;@J&osTXL~KJ_Id3dUuqtWO|K6kYONA!Dii>Ui=mAZ-5$*h7q*jgySf$@ zNk@Ng!O6Cd4mL)5*@(_(L4H1&uAVIg^O!Z6T};%hpXuWXD`J(qtZd&LOb$A&_4t03 zRU(!=UFvGQa+A#y&uT2B&e3qws?s#Tb+~Q&(UyMchL{iq$QE$l#B=mZ9IXT3p_@@&+gW%X0>x2_2(SazP>)_^Z&J-=9gdZmNR*5OwqB#EL1)9O-xwPEiQ_WWTj2CUdsv{ryGeLujV10g`zT8a(0hqmu9lAUI&Rvr;^Ni zFL*{5Dn&|+7j5L=#4jk->vLR3nl?l?8(uWa*jzWG^;_m_1~Xug^K+@XvY+m(OTL`U zEv6k$1YB4BASSy0$o&`oMNyW$`2fcCL9#k+R#%$`sKPjwcYXn&Zuq~<3@=vSXN2+wfW2dJ7Pr6R@*c0c#q*lfE2=fvlJsacM%4sX z6`bJ7)81R!KX)m#%~eyv4Fw7un+(Z=c)5(OuiE@v@$cah@ash2Maw2v)Q!)SUFNc0 z?&J}qPf)f$0Hg(26nOuR;a6B{yUSNQHxB=>QLvYHA$iWtYl3B{#Zwur(m$4?plQ7^ zT&yG05s>bDE=&k%HvG|L$!zKr_CeiCxk{H#Z!kufjssFQ+*2CuDRye$IGwPx^R1Btb(<}WW8rV}H z(-eh6t=M4Mi&^LT>Fn$AyvbC#?7^uR$4{-b>)>``HUAq~#rmN}0L4T<({dZZI`kC# zR{MHnox1A&l2-wV5)YRad*OcNB6G zz0oB42Aqek#oYLdL*w7(4Ttf&o(tQZiiReoe6S5(!Nj6D>9w++!!~ndx58w>IQ`)O z32DJk3T-EhgiHh`s_R-9^>Y({A**v{N5t~Ppj1rKsCUrH?+D=rjt-1mZ(wK6KkXKn zWl2`^R5)EFiz!w%rC!Bpz>U}PZTHHa0eYmA;csj0cf|lMtVtH&+q?&1aQ2ig9MSn4tYz;o`A~ zk?PH70S~F7QR7d_ia(7Zsw*nF3EZ)m{2p-W{Bacd1CN&9gs%?(xSXUUdA@jeg3$1W z(lvx;?3<=i|FP(-*yZAQ`KF=TrrVi_3wA%U2)B?#9UL0Tl+hfWf5XUq=<2z^lG>1(@(8)7a%pdn4FG_U8lY~dE!wed)p~MfhA=5O@b*008v>N zj;F!^1SvwZP2=v@QQl;YMFQ)%V`cdp0D==9qS53eMe@PElA&m73ztfs&3lnh92R3{Cx;L!3nHTMjDAnF!kzy+G>H-?D#b`I8{qxpzlW zlb3P|S8NSel+_}|k9_wQt#G^_>7Qt4&>mzp{HQz8u+&Qs z!|t)q_wpF@H*}|cF0A4-uXnsPC+Hxok;)EB6dUwv0(Y=4g!vWe9iK>9SZ)o;+|wer)h~}n%uH#pWVN)OyDo+vIVgD!Y-O4uuTx#3*yjFTtpVL zx%$k-ch>6N(bRRReR5jIAqDx#Wv3!Zx9!i1a3KqU>WUGs#S4m!SO+N!*rG9E zPkSKjBlH1w@905RYi${aeEBSi2D?>>*Tl4MUu9D;cRo3me1S#0 z4OALaH4o_qf&BF7^%vE%1f!KV`YV)0Cgbkb5;R5F20*W1H=c|6x^qE!*O8vn3ij%O z-!J3A5XoX%XFx~c#6#ovQO1X-Kz4@RT6@J*B|{b9Qqj&Pb_MKQX#!#0H9G>#IUxzmzp?{_vV+64l#>Z zEsiYuyTt)encY)8{r9d%`V8(_+d3^j(B!LZ!~tHYKa!k6e{H^@uwcxNk@a0f_e4iD z^<&=836OGlsN}3lQ6u}3hJuCQx>$hDUGFTts~y2ixg9h&O%aW1&@TQ$bM_*(wbSr2 zzCZq30QK9eB5K#T*&iC7y_4^fMeCHzS5yLt+r-+96|-yvm@(CL!yGL(=`4Hb?f3%m z-XgfhQ@m2Ye@o#U5Z%0eQHjf3>q_r*pA5wp%vMUNy6l3%q@ngotX8Zk`A6IoS%5H+ zaTd(oQ12o%IRSQ^a$#brhdyP5Luh#BM~(hy@~c?G?e$aav(D(<3e#hWgs;AGd$uom z$B1ZF567m|cNg3b9f24nr-mOuX~ht^5^KvYrdc*>Rlke;8&vsYT02aE}YFzVB~&tKBga+opSha-;&Wxzvwoh`uQk31Yo zP3aVqPHtGi-){(()s-`ue`@5ctJZb9Op@Vi6K%NR(p%T#8`#ER3&{4nMpFGiOAyFr zd~Gk6Be5PRqEt{di1gBg)!*sTDa`N4g=r+SH&!9167g2b)w;G$Qgj`YoSAtHQh2f1 zuPHRha~T1;`6?Z9vqZaNp@eI#7=xkq9ET+kAV1I>muAn=D5OH#T8xdi?4p?^ZAogX zHqBXB{*1@FP1=XH%80 zg38O5UZSFYRA^V%@|T%b;17{msK*?nTUl<0w2k&;4DwW*66r*R!|doMxeilhBnRuU zJJUxDIg41mwurE*TBawfUqWHZrh1j8)mb0?#uKE`$1*?LK<#W%hRqi6rcidAL-8{H z%<{OUrsp)N5TD&Hdb8t;d$_}Txzx(Eh>(00WrKdqT&yJagAgcw6jJ8hEY7p}@~xQ; zW_%ylvuQ$}GqUUj0ic1j0i~?2S-dTj_JeInZL&}2S^%L6chj1yKudF6qyziyPA@zL z0l}vM#T#O?mp^VleyDJWn`hUDCWoh;+#M$0-w_vqa~Cuky;hKTZ$d}pHUF~m<+N+e z3%x9tdrI*s4)yKmS0siVeS`&lv_EWFqN!D9s0Lk1+*)5>-}~3xf>}aA=;-~~V~t#w z>sRIpjNT38gH)Q_y!ejUH0pVzcU{off?Q^wYyiEc+M6U;gTV#Y4U_A=QIs5opal1C zk(c=dDCebAB&Xs6T=oaLcV8&d!yIBT4g*#!dm$MiKK@lwRcUtYO(xy+x5!F!RXNfj z+?Z_Q`Yz`Ldt2o~PsIpBoO=(rg|18eo6a3Cxi2Htl&oj2CqD#s#f?HT7tGg!o^6a} zTEm_TKd~mb_@g~6qx0+ovB7)x!O3k>A4-k5+dl=%-WMyl55=Z0Myi9thr;MPN;k=N z-@ZgkeTR3S)ovY3wYQ{%DoSTKn~q!A*p%;Geye8&^nV@P4T^E0WX87bj;6P2yif8F zdPCA3-YkA%Z+f5b`Z<1ji*0wQsnKvDx;RC1aEmuPy3e=O@7)IO$HCLg<~=slqQ7;p zpH?YP%bp$e4pK10vTI&Q1B6zqnhyOSfuW0fOFW!tf2W2M2nZI(@xVl>q_cpX=C{bE zo6J}t3k*C^0hgP2+}Ts`mK=4(taub}VK(lSr&KJFsjVa4_FLWMp57s7bWfmHtN~loAtuS8pt{|Mq!>DF#I%{9 z8jwO41@IWDKYSd>gc7)K^g-W4X=Y|$Cqk2#(n5!8x~UtC2h*HY8jnWIRGA7R;ZCF@ zV$xQf-{sGj2o&JovOZOuH9$>X>UU*85de)(*`$G@bv83t(tl zqt4gxxW}dw-!=!$KaR51%d_jVnhnbVUVq#jBCWdFLvltI4e1R824`a}u(>-t77oq0 zVzqtJZvR7WNo4j9+}Enr_fm_cVNycEA+k=;Az;cw4cy5>8;U>+WlyI0U-pQN4YV6Yr4oTf`?au9LA#{v987XWjIcLR z$y907y$HR6s^NTzpHk^wptHPriM*LGW(gS1O)rP%%>IVaP7#a<)E9h%qWErEpY^}> z<0@uTQ@tzSTQC=WwUalm=-X05YYj@>t~uSLJeYIjTrR6o^L}POG)8HtvE6so;r6M- z{!(Q3}bIFo;M!j7PbG`I*ZOZ8MOzk_%sy))#L$&u;Hg8?)?r`sG~X4Q#Vol`q; z-KBTzGZg*sX}tV0E26fWH zP%cZTy`TRl;^jBb0)8SU7%-zY8oT&RxozFd&NW#}R@pd}94FTN52_k!iwL(D#01la zLpN?l=e)aOuOsMJqA{)AGZ!~$OC78uT*Qebs1$0T=TCLbsql7h5!F^O4JiFZBg4!@ zKw6Ti0D!79B|Asxit(&}mXI^LHuOAUA$5(y=yoSGnyGx^f3=fQ z|5~rAS^T=>^3dG?$Ju(O(JX!bL(+-aSPlN$rflz%M$&bBvSXu4n+dfK)jwE4;lb7gHxV%m;a{#3U2J*5FeZZx-s?ZL8Q?f)D{L2^GyR!*eL+m#-{NVp42fff})+ zZVicg3#q!@b3PUICDJtr^n{6>vo#57b1$t1u1HxDW5-Byk-NXi9Kx zgH7kJ`wIlX-NdavgB>{8+%q#y@Sg(%(~x%EPP-IEqF;lP1k4-H-9&^LxyUt~vs`a{ z6_hz^00XmQM84ED6y_aS3j?ltrG&>ErxoJ^3zC)(8A%Vl%wBZR8j2((2*B!3*1=5b zFNws20gqI-`iBn5-N8~VM+_@TR{?jg)nI@bB3kDl5WMTI}XP!irKDiuVe#%g48c%aW$CXp}{f&w*f~IPd|c^ z1{0&2HNlE~Dn}R?7(&8Vh`lQ{;`R}Z1lRV(2mx5=s$K{fL6$eQ@i(N}iKO=tMm$M` zRT|&>)^#-nkdij&sWw*VNdw7CgCmOAzsleyL9F$k59~=Q*Ny^2=j`_@8Wfy5T}t0l z5a20yg{HZI4Cy5Tfb{%V$Bqn4%WjLXSgts;us?1tw@7D?!L#1{U69J9ryV!%(`;=>h+gU_UXoyS zv-nJuZ$b4U&SsTz7M?q`+eAh2$ax^K2CIhS`P=L5*sBn~7&8sa3m(#-p=fFa3+30; zh;(grBgQ11GrlxdY^bntD>d6egS=VV!sdR;*6-ixiT}vjWIK~no4Yy-Wb1QZITke- zvVIb!gewa3)6ufbp{>2JHL1>5EY952bcq%+SO)hD(zk(#^ND}DIAGalgw(Y_9Y3>9 zY*Ns|iupup5m9!zaEWTV7>)%5pUGCY=TY!IOkeiudQZEC)5nS*h%TO;hsxWtzcn2B z%;TW{e0}3VnN33F%B_6gJe%&py3797-#pSsz>2))an+MAuXdrwI3%f@&q7lV7p0fv z;~ZDs(L|O3H~%zv6_*Szxc81;7s_PXMtk6JEv$Z@)2=+9gka=g+vt$LqbLOzhvO+& zc-pvsmqgQ6WpBm!;(#arX>4&)@(IE5Tyv~=%Xm!!hpmC=VFTyud0snwU1_^Pt{Z6o zK-${m)f zyoq2zDyb^?T$}9^z1oIPg85b;4lw~Zy(oZxoL{afF0~DFB!*r5MQ{vlLaupP5M1*F zwU?l0r2k!*-J!wIoJ7V@o-ot(o}6m!pyczBJ$R9Nhm|1|F~ zp?g6!2Ildgxum(DN+h~u%$yiqk|62J#l^&Zp4esRu`PzfA#6=>C1a^+Z|(D4 zv1NYxdI{@|d+l8ES6x-+y&Fn{kT@bl^TUtmh zkRp@5C>zxKDfNpfJ3F0H)pI2LlQdzz)tun9=|G`2)s{WNM#k=&IH&1Ozo4zo%ByQy%6ui}y$vbO8jTZ09?#1X@uR2z!Haxq;r} z`8wOwQuAO`Ipoo}QT-fVM48Fr#jT@TrYA`m*|28{n_SD3UVWCUyqWabE~{?}Vryg^ z-7egBES-L&!uCzu-ApgLF>%nYV)4hYcQ;gO%_D$hyg0k(Z<;<6}ar=GtH0%(3$H}?00po!a1`TfGzn2xM9y;;BSVK>xd|A`#xCZoMde}#@C!~%P=uN(30@&xwp(e`&lRdv zc2(t!m-^&KlqiTq=(pEa_SS0~PcfR~`<^o;4;nO~`Fk`xG#=n(mDQ*&M)f?uIN-_; zX1G4NVviHvOa}jem@Jxpsp?lf{_Tp*AMW6$Eqxoo@yiLw?trO^I#qNX5osU-uGk;R zHEw6DXuBZC$sCVo1u1?&OB>&nXR=T=D9iQKSB^6Y1K57L7283VAFtF1nJ&2F&+klP z8-<9x4r4XlLIlZGA2j>6epdT+V6E7+(PY^r7NzbWtqT)&HVBS?tOB^b26kOCy$4~u ze{(m=&@d^SE;JMX&lCWNyXmF(Cv4y&ZKfHXQ3M%FBb6Yp%O8f+p7nhZHb{q1&-N-6 z4e3ZfFQNKS-m!nyl9F9`vWB}0u-8N>B)Q0`VYR4~gYOO@tzTVy+% zR>TtDyuZyqd0u9|=yu+J3Sqa-L)FHlipqAXS01T%M2`@Ds}w;qcHef9HF!G=-g-~^ z8eo1l7M4C*fqeECf|Q!xAP1?aOkoydZ&VnLVmaM3SZ>gefk5W62c&VVD`d-chUHsN z>&7rry8&o2?F!v7Id8q`Xw%0k9IKyk-q4i$u9u3=9} zokTPu89UWFQ$6}A??$c#Ieyesfm$XzWPdv~>5}?;AFzQwk=3Khq<)r3B!fqy9K=uw z=C-?K>vq-55YwuppihGpUpR_lc=vJ8BwwjmD;1!5iS_n^cH|K{<&H_7sW9S4x%d_; zw6;m3mwh9}b9v3p-DM<(ggYiDK$rgx8yU!1t!ZEna%_;MO@{2x#+Mgdkk#**L~hA~ z!MA6EAeoqK@r{We(yx^j+%xQ@LE)wOhA)G-O}Gz*Mb^@!dE5rU6n3AJxizH{MR?0z zOztJVAtm+M8P6BIJTA{RO^A^g&x(L^K8w8cjDZbFFSgaxm zfR$3YZ&k|m2i>aNHC!9pVramXKh*CAIB_#@324)++RDS`2&t8d6c0mX9Mf}@ONngY%qRz+9qtC%e%3*ChT;5-X0Qkr_AK%GX4iL~&>BlRMmObO+GR>A~fnXWj@<`mP->qUd-6Z3qZvjc}p^zk22B-aej!Do}I{Ec^;Gx!!BKd+jc>a-nM3L5yfC|}GaDd+HYCa?p zdS4Tx5E{>F`Y!TyNOeyu)W1V8{uPe#FRyTE31HCV8&&}H<&H`Z@tkDXUF z5trc>6ItbqH=hgd-!LHma?^i%-=p}aj1zU4%H%;7rt#L%foq;OzSF!k|M%}I)#Y0_ zz>N0(6gb0)mV^Pw%;6&+5Vk?Dx64*}K}FSEE9b1+ySa~Mx7%d)a{7gV8O(YYtc_i&6G)|Hi2dDo5 zPxfzv|F215BLdvn6N;{%+L)duVt|xY12z)yi>p#~li+9O|HBIh2_j|-ZY8Pn3XYv$8r{0^&`?ofvYq)!TO*EJ zWvk;KndJfh`9w%Fe(3G6&GhlQvh6HbLIU99$CcK6cjc$M7>)7rrG#s;L)5b~XEona z>rCXH|KXPZ{F4VA^eazyH2R=jMp({Y)9gkRCXpKT)v8X3cK5%#y{q0$-!>Do&VcaGbLkn3%$a0G!!P`aD%zIA|uRgW)gDfG&dPl1C!O3kwFh83B@C-=gtV zSVVk6ge$YEO z#KXzkBY*t|l>|O3BL)dD?TP}q z?KT%M<3?{@QURp8P-*m1Tk!_9dq1oSO8%N&s#YhTCHaW>*noBSuM%zv|#P+?&A*5BCbdqO|< z77*cr#KikMm$!Qy=BnD#07 zp7>C^?(wP8+2OYT`AqzpkAL~ij0OkyIh%+>z!OUUt%?uMuP=rFR95y>h7mJ;Z0rsG zMuEwrdC(bW=fwkN2zKj$Irrxbt%4V2u>Rr`AcR8WfEiIiND0C~wMeWEFu_YoJVWy% z`+2{v&wRh}S_&%nVGPX9MG0KF5SuVMrATh3_J$18yPEX{Y|1#s7~FymiEaUygeQ_iAd8eP{iuiTvInudc0Pait=Oh2VEYEu2kr0t zywEHQz1NX}eob>`UJdj)Ag?2mJpVv~`V+s5fPyBNU{cqdILG0HcQT$U2 zUp@uTyua=UdvvzuBJlZtclMwC+TX!$SH_3{M8B(Us_5}rLGvOEe{YDtL7~9^3Wb*6 zR44$*1YC*57bB0MP(Ov%-xRCoYbN@CM(pn_u-~BmpAq{P*Zj|I{NKO%e}fULrM8d| z7f%Ja`STAp0)kT@?5&iq7y<^`)ianj2jq;X!q>1VA)#L0-jpBPx+d9sEhVByieAg9 zdd%CUC5~+3;jO3&&-)}PG z5_-ygQ9%=Cb0U=8l!}7gnBkE-69s^FI3^|v@}s5XdI6pH*X`P`Pk#H|gtOKjM5Zmk zK?1R9-*rkSrM|xyi=ubeKtM*wV$xsT8d=ZJE51rU1+4u3uek8ojw|P{Ma=)SkzowK z%pc=H;o+bYpw_s6N%u9`NTHaIpPxUkoRTuPV!7CTt1Y*D=f$fXjAw)HkG|Z22Q>B< zbexALp;=!!rA@kz)5tbFSDjwEWG5gx$P^lvHLicHwIYGM}9C^wm#(k5)cmUexmM zT6xyrt^B9UO6*IKCHX3O_&l}Lj8>k0%?ntv*QCG`2@AoEdyEpzZ19EuZfwEzyXA)! z9Qx3X>nRrb6XsNe>Vzs;QQTu`lVC`vO;I) zMfbel2Gpx7G*DG_9=~-A7#id+;9HdRz;XEj38a3ZZ_*q9IrlfA?aKXNu!X zOhCeKQK?z{=5cu7+Mv;YZu39Hod5Bs=_tOP%F`$pJNirwj(0X!q{HW@LIFw?Z`nOT z5wcspaW}bc<%$?+;Ry#OV_{auFaNj>fHeM~X066-Q8uK(>Vh(!h7^C_Y?Vwx>{c zPoY(oF!0*E$nR?u+QE;p`&LunfeOG+sjF;n>-IGG#T5r{KVJ6On2q$b(wpeYy!I~-dD`RxCSqgzEMUf;8u@r^<7o;5zXYrTJdu0y#ACe4ic;~{J;4&br_X#4#m5Im zG82$tBpUR)f`T}w&~SIV2Fi>hs$ReyrPm}_B+|=6(#FY$l|&} zkO@(f0Wg*Vl;ZGsJt4y29z5r9_|P;DJedVsj7t7yBH zJNvp+-L1LHT<&vA#XRnn3s04UmI5F=OlQ2tKGWhINkV0|_ULQA4clV;_0vX|z*E@) z!mJGUjrYx&Bs69qeh*m^l{V<^ZgHElHuevdZYX#6gYX=-!d$E{)}=&&1L*uPX(#W$ zyEx_;ymJerR133O56IdgF2zV}dZS)v;kQDsxfEuX8|6YGAeYoaMrj|ggMo?{<4ci3 zUCjsuua5l^^v=`M&?|P2OYt#PaNA!d6Itcg7*LnyGLUI2uD(;Ti{CuiptCEYs16*j z5&RE-MF%ddvdTau;2$3He|~X<>Dku0OKCg%ju`8=r%#{BQo&>ez$l{rBuF1!x$lmP8=$=#^o*Z6buw6v=)sO!b*wlH2d~r zvei7M|AXzj(?wknvv~3dGLX{#p)!#*jsO2|d`D!S+&9S7r_LAIrt2vtxhpZRdt(}F z%)8r=;XImY`H3tqtt6)Jm=NQXGuu|OJczKKbTyJ|$V{6#YS&Zf)Pf#FEIkbtsmB%k19$c7 zaItEAf1I#nM`LN_zP{6y-MUDyOCq6fh};fRwN1$2%vyZU2b?t*c9vPj3q4MoDGOY| z>D~7~Eg-q`u%P*$8+jsU)Ub9D8j4S$#=aWAn;AYk;*yWshk13Uu|>A|vnLuOMUJlZ z$=`&LeEErXL1J?a!LoLx-zdX;308(hxL8WN0^BJl$w7GsJ?ntbdAs7Z*|8#r8-u6v zq7dcYG9@4dgh2pG6Os}}5Rtp*s>y*vjB61TRG!0lHyHt<>E0Cv zQc?VvcF3q>`)?A^50ucW^*n z{ub3kkkCTGMdKqdttxldvz}wsohD!L5sK<7FR$-9TsA5)6xQHE{7R8piu?tfE$u7M z7UeUHo`s|pK+~yrb5M5JDmr%$pSGr-h{M$Y1%LjMK00UG3n3;ig8R3$*y^XAdoPQ? zzYqM`Umy5j{YlYF^E#~aT+^Uu1YhP+*~Hf^&7YriJ)`>P(ZmDwO_S2sfUdegQqkqD z;1XLa4S~u11$QC%Z|hSKq7IhPW*#_Q6lls#WZ4mkd)^=iQ;-%qVyJx1>!7XxZuZj4 zM;m^Y{E%WEdoLJ^E~l~-B|wQsOG!mc?hipsKWPtopZ}p+W*B-nik^d4cr75UL@gXI zD@0f}N=SboDJ}8launub1_KN09m7>RWmp57x^}3PaIaFP?AwI06ePg1ZcY2+>5ZT~*@rza5XP~Bf{ zusDmq!6I(2^O_R+S>5~j!{n)#KQUT1j+?7dm^YScOL9uN_s|LdwO#52c^EJariY*^ z0rJm~`_)LK)Osen^xS%s-@KKYXd%Xy|}6BUcw zvM!A3I-ckc>Gk34%FjU((K_8OR3bQDB!6l9)s znru?#Zt4=E;N&b01qnG%uA1%ZXvw|r+bmQn=?uLIvfEQh`!*kML~5PZ8P%Hzq}N~C zq~u9)=;LN>8C;N~ww&^k`W$+jMEr_jSy}$#9;&8+nr(0Bdy&JMddQgWZt&z+Rp}H? zt==I{q~U?AZbmFK)teww>>|fABeuC{9+r}aA-pGwMg)z$_59^!>RW4N4$VeRT}4hF zp=eX7Q&Z`>3q1Kd`Y?JFf4JTR(@w7tePqw5Uj6NBX?htVM0RKap(Tty0>}s#YbHE| zR977}^EJ85+?6D=Qw#Km^FjF9h%gE9y zgG+ z@?2ileI21TUc*&o&yBU=0y5K?2{)sij@+#qH32SVPB(!RPXPV6c7-Q(_U69SPb&9&^0_PF>p8^)W78a zVuU%~BE>?0tX^lAc!AF$0>E=t+iBAnOWSjFP0-E@U6eXATmAA)GnMELT7+kumniVL zY#=~@QU;gGhDj|uRJdz>bOc{z`4ybpz=u{(p;W?$p^z^-a7Yt^$0^>cXqtkcr$d=# z0YxQK@!sjhZPLSGlAz7;h~9YUhiauN=PHxw#sm;LXnse3yhm@o_SrU$G>}AE&u1O- zl~l?R3Kv-0yPt0~CRDF;;^wxFx&8bZBA@Fb-dnl{;dJ-B*yrsDDi^Z->Rmj_TD~!Q z-Orh0Th5u|N^N{7#pn)(a7jK?>Q|a=oMIJc3HMIQ+;NO=HP)$avm7!xe>#*}U3jrp zZr4_VmV~VVaS5Cr_{KGxgnQE1V?J31RjG%fE=ClF=9zEYlr`4SBohV)VGD+wtdK&J zUuX!@h=EEY3&w()4IfgO&dxZkx5qMtYU9I%x?PBUgk{*Rd}K-mu5yjyM&#>1x?!=c zr$N&g^&~f;3S-fTbyIfRtj{Rab`WR1HzTo6660{OAe4e!p^xYG5f~z_ZP?!YMy1d| z7o9Wg69EEk4PDq)=MZV2{EzneRlcQ)7@a3AMfQ@5`i zH{;F6RcrV}+nyLyzMJ*g?W8QM&?40f107xP*IZsDaC`Xf5zjfuPKgyY3-1D zDZ};gSW7vy*L5gfRlx#kk~21!KTg*3nU8m;ZH~I&>+-bfQ!eJE{}6KTr(>8Y{T z=(Z=Howg{%r0MuyC?QtFJWoh`rNdKhQO&Mr`$Co2@7aZGGnM(1AhGO$9}4jGJTLZD zWx8RKnu;6VhU+3Xa+I)Q_7R$nW8;(u-!+{TR6(a~^Z7uE8@K~jR?)Yw+_;tb;%t5W zHeTsxZFSv-tQa#yI|e_T-}Pa!sn=M2`E*wyuVt=W{W6+*b%ArO(5_*kU{;{+3K4~U zx_q`FQT5Z+GMQ$R7&vR=H^e6KEDe#h zwsi?FS-zbMMNhW}{W>q>faBi*osDv@`7W~HAJ?aOhga=?q(uHJ08xj3f#~1({6@RC zgC_4p7iw5>Nj{N#jB6x4(v%5@py^@S-o}G_UF@2kHvUitg@5%mxVq%nCo!{(&^{s} zVmlwFX=q~KIqJ9c_nt2DwYoAA@7~Eyo}W!un43-(TjSmpOH$M69Ii055ModOQ} zm9Ud~W!ymmSi8Li>9R0taW>{9Up3M0Sej2IM?06i)gR*f1EI6~-4D|a-9LyBTQQOY z)H0zw$$DBwbjEe=>pO2j%ORxrKf6zCP4~VPkKwV-^oT8yUbdXJj{2lr0Y_wI7H3!tF+It zz4=_}#sJPi2!m#2X)i4J5@S1`{P6if@bK&KEA6nZ0(T3ctOCA*%O;S6;Zl}NfrI9@g7-$9$)yaB(@&CRJVMiqE2A0Y%Hi) zGu?Z3a~DPCr~)|7&cSTn2Cx0zCEDT-Rj=VVORLt4+=QhzXM zwZzvj$>huJKqAM`P^veRixOkcr`SRVAYuipaQ_}DYW&qmksTV?WWe+@R_}^`o*yDK`!+EnoN(JeTL5kFaU{IjA0ORy)d=?YaT5B4O?2eEG0dnQ9 zq#7~Tm)l?lJgRmv;`dih8(cx3ccVA=xM#(ti?asPrBvig*qvgm^v z5qOThvgBn`^ySUnv4>QCKYCv@7Prfb{C||aWl)>{);3z9|CZ81@#1Z8Z-L^{;z5GD z6?b=Uk>VB{0u*<5FAhP1ySt|Z2@vGXKKps+JRkO%+2=bm%rN}cz1F1y^LXC&!^`hn z(R>7~<(STa|NNerT*_4>BnnIYS>Xqwzv%WEsxiw=x(ENCuQHTx+V}W7U%sW_iBUi1 zmDIokGYWFa6A<>UX3~^VNR=56DL+|J%B9|ys^GDIOKq=3`^J=PJ z9T?QC$AiK(%BA=a$<_bBd#GPQCNV&)+FbcKtNu&TE12TnWUM2553b?cr=Hv$_xJGu1ZX> z0MIjsiE~5;^@?}wUS2QqxXi=tc`-J;uq8ChWwgBNq|8e>_-!;vF4or#h$wvT`dHLz zA9GhIehmUmF9%)``S)gYO6v)5<2$o~ zTL@3jqj&`g65XYF1>p#B_9{1_|EV(B$LZq5e!w+iya3#bjT9{=CedlloZe(XaeNp#__u`%<}g~}>ik3r|=)(hbLgX^#@7JdEQESZIT`Zr$8YkYWgj-HHZRmuM*T-wO7Oa3!+^RJJrv4lAqd*e%=)Ku=)am)KN= z&U_5GJT^FN7pl`| z%VhEUgs&8d!x;Kb{%;n*YeN_nIGm7C8W)!bPV3xB%eDQJ+A@m-z!t%npx|lYyhB3o zCZ6JPz0b(s_3YbCB$MigG&G@ZVkdgyJvq>Pt}$MtMmF7kb35$odF*CZtNN?_50IZ8 zj(egx&7xxuewFS97s&~lYjAUU1rcwmwcQgo)o$Ahlj3QPX@D=wdgG8Q8M4r_Xfpa` zHI*xqgq$mi=d>az&DhP8o9<{Y??CAaFQy@}Fc)X=yK*#*uQpe2)mOu2VBjfo>kj{D z3SHQ1ZWeOCf+5TwD0h8-^i}ZPEGQMIe(rb-S9&FalZCz;ITGW(>jjMhu!(-IdSX9K zdzU5s0!v)!jrUqSirW5EI%Eac@Zj{G=f$1I^}XrsabJl|u2-WB+^E?cxUj)(+q3Q@%hx4ZY*(+vC6UHNoAO~K0V#{w zdIUY+6znvIq7JI@3whg4XfMkc)brj&6hst~fzCgKSadktgy#z|&)MzrxrOej^RCR* zN71g$hwXkU>^IMg+p9e3I~hKhtT~feC!j;0w~F|^B2Uo^8z#gqsCdg749z|S{nb4Z zb|LDcFH|&o?O*&;1YhM^H~Ozbi9tS|8XLMe>)>0$pjF=eK=f1xbx5&kdD?WftoduR z);TtVTr_I|?aCelKh7;8UnF9}er&vi9uOZ^==P(h0gTEWO=++TNXgUuLy0bJ6 znk2urG`-r6Iv_|MO^y)*B(oQ`uv5l8+*&hJ@VlgxsF%y3Em`iC4>CJJK^Qv{2WYnk zRDFRXf5A?xgNnmXL^*isW+pep`Wb!h?QmcS+PH;vttS||dyk+dWVK>%;D$oomKrGP z{2!nC(HZ242ES%98#_!nLK%&iq5A#aKW5Ebadz`2t2O+aeMrYb%XK@RffncfA5rV1 z9^^lhNI!L=e`ujgk-I#Z9a!FyH-s8^U>7DbiCP=;g{z*KRikfpsSwbn6@L_5AN>PJMH_QA`&An#s}DFsVYN8 z1<M4OW+e$dyQ`lYP`kzh#M86$fr0tjlD z6E$7DyS+lakO^hjIM?4_Q(0+5o0fN3_2nayDsxi;ZuU*F=9$57!2Jb zX}VLG2%(Dbm;43ZRHUX;OQe}%CO+&!_fKzRh44v z1hAZJVyQ@tlREpE(6lwJ^nQiRED`niSykw&Y;sPnk{XFqiNs_a7 zJf4TTuGdiKcFn(F7;oZ++meX4<>WM9$;fx2eE!$wY1vQd=_M3}0@or&;9r#`PVfJP z?r7XBFfgKXGd$LGVca%reX6t%wD1pli#*u^M>C&lSe62%K;zz7KGuA0rsmtj3bCQE z5uo^?(>->zkY~Su4B+s@*We}Xs*{U8Lj0NzE`G#g-{?a*pG0W56`D-t&TyMhGuiPf z-(Rn-Ke`w~+?DL}5T3HeO#%;WxH+Ks*ZI9N0Jqipn4y15qPN!YLu~2=XZrWc4ic74 zI!qP!o>`BL@EoO(I^x!oh%pPWZw0k=!~> zcR^o7@?>F_d;vO&fANc6aGyctTPjgP=A=de9;03hbOaUObc?I3*U_jFm&e~v+BGhG zv^7sAqyZpO{%zC4@$8Ja9R~s5e&of1dHnwIg0uP_H8u6YL19*ocdNUr7K17Gdc_|` zHnwQ(#)IBW+sdh0*hQ`s)rl#$`GP^GA9WN73j1WWeE>`^0FbotF&FbXssAW*ZR$F? z`$xmL!BUcfNx_`u!zlFsGC%?U9RuVrj9W#>n@Wu7`&m;eA{-s>mwC7nl~lR#NB;-5 zqQpbPeSG`rP}Py71tl(RqG?ws3%2=o*U&3xI!RL{G|i6*408z*vJTb!Ms)4Hy0mI% zs0&ShA+*t^NNYrYHmeL4;?Zky3XZP&BSF;D=M1#h|6QieXY80Xf5A1Bb5q4bUld#y z;@MS7hXpcmG@eh#PyEi1t7t!2;K{vqFn)lv2KJM>_8cpUanxPzR5$t9&GXCKz zrFI#<@I8C$VL)qWOgI6wvV-r@=kcW;9&fRv?zL$spx1j=RzYN2^eqa2Mj>@v*$3YJ z^QFQd@bqAQSy@Z;Ewu%v@xTLu@u`VR?k3j@P=8Gl}d7J+B}h?*D({8r7Y18 zWKD836QS(X7S2k!4cXS9%*R9Ww~Oe;!M(LH(~nz}A*<aa4rebc%+NYSkE|P4wD{wC*Mm?Q?pm(?2;&iB_SbE^1z4CHmS2E z;uY$jL-&wS1mLJWvU$(d9c#+Uj+ zN*UBv71~IQspGMQJ7oAzIzx6ubLMdb&%bc6Og8 z{e}Z7>3s>}eudhJn@j|@C(o2?^))bS9nQbwCCNVEK(v^gHaSMU?E>FGPC6DEKKdZg z(OebzZCIg$+JX2R z@qSn+6`tygm{(zW$02weimcL*^v^jKbWlilk(f{VWq15ONBV4&^LECM&D*c0ersP- z5aa&uIftvvY9Lc0^H3%gwI!8zUBVHyG4VuY9y)={J|;^>N;84!QueR+uUGC)65HFT zo!2MZsOfhBGo`*{D{C{_k<^*Fex@-2l3;IO<`h_*M*YvvKdof`tBb#`5@0LIQhs@d zKnh&*p;ki+>3812tyWfvec8h-}r=*k#^Z*TcI^!88gyu%oWo5B< zOJ|-SqRrBwNlYMs6ZG}LU}RT&pE6_Vn%{Q^pW+hy6NRmqop$*6BrwPW7`jofrH2sVWzvcM_@$3T#Z;RZmt1 zC?T(V`zPz0Y7_o+uj;;tQ1lRId;LZ_XRhB@Zo)I5SO0o(?^~3(=@4GIkPR5#yCLpmm9tdf?I^l> zi;3xy4#!XCV=yk4d1+S$ICbK@b3*87gU!tnAjxWR8$>GdAe0+Etb@+=FuG1PH~4Kj z?3XJeNN+ZF*I!YdA@G>5-%e#5lcsUA!Yb!k5*{6%(<2of3yD{4cA`x~>|pr0@m`(o z4}V^AP2t>Lp@YC<${_#4>M{SflqmUAG=`{>5c!5y4YC;F!9T~(4~vqsdqBIBf|SnHZf!3m)9#P#W#m5JCeqJz z$JWt$I7C# zf4yx6)m+oBKIyHEVJSKQ0rtJN0@)XqMD#-w zn-6Xzdfyp~q~ypgWICJpUw9jUJ=9w=gu=rHDBQsUED!U4dE`-!acS0pS-vSbya}m! zj{$!U2CL&QutO9GKFY$p;W>+q8cv~Ff+>T})VVaU`s%F?^@uAo$Mn)?^q#XroC>{C zcy!Jyl4k^9BTMDeo7vIR54g{HWzaU)u_4Yf!YevFBpR-IG zI%r1Uf6tkaNwuxQ<;X?lk9osZ)E^)QroAt3qJ$`M!uuRz?73g`#!ceNA4RF|1+o>uD1b<0Az(p#(ZyrbgrXmWCJ z46K93izk4dA*rzJPqfVdBaTB79Zxv!#;=b-V4rhTnBC9#P($(k**Zhp!|{T+9euDH z?R098n(eS5GLq(FPs7Qgl>4gEiWS@Yy*`~~R2E33yU|JwI+-iPPgb}eg`5&KY;pD# zF^h=?lyG6Gu>R9NABW7xwU<}Ualn24c7JLGx;1TYeSIIaRv0VMf?}&I_F#l9ZX+kJJqknj>4zIJGy%Vbh>B$Zqw;y;JaqO%kezJUud^P;7ez;dT z5K5@^MotL56wy(JywcQvIS)81i9C^~_ z!FN`=0KNE2j8f!c$G8TNWa1n;ylQ^Q*Bci{BMy1RK?A(z#0=bJNF7j)6k-*eLp}|O zxcfEtUJL&{$2k9S#=@@+38<0|KceIMihi+sW-K*-7_-OF zz}JVRKI`gM_#Y+G0ewrF_Pddu-z z#0YDxw26-n9Bpcvk&DNn@1%(Sv;$YstWtKqhgw{$BH)yRJ-T^`UhF%gv&W~9RE{=} zCME|`ZntN8>kiSoG{~Q!Z%2x)L(Hc0zR9BlfO)VWG(iO(n6>hNY0@6iaL(f!>Rj_m zTcPta=3HU3jUHF4r5wt7>xpq6Xbl|QXxp<(QFj2>n{KwnXBskGwTTY}^+%&*Ok+rw zH8Al0p|kvx{_Rq~{ZPsZis@@_}!-ePPK$m?Vsu2IQD9@xOpf zEgF{JwkZM{w-%?c*1P58cNV`w*w35G8qf;UJcEOF&iBvgdsMdVu0ELOITe@(yh@`< zm5`U@JKyWrK0q^%qe=3I$^+cdYf9T~t(K>EgyaP_fnKwrjZ|B;cDKh8EPo!-+xZFe zh&7M5Si;_K?aYZ78GZ{mM0Xi!aA3?DDRggz?@w}vF-W&P0A&==6L{ci{&;4b#9RR# zJZQ_$&)qq9X(WML1(@R@z)8Fkdzy$LF(zW)i_y zsmGPB%=FOkBsMYPO1<3b_(J84XSmbuUUVIOTFk%X)HDw(!9MX zgqLe`VSvDVR?`lL%R~BkjYfS;ha%s9moz$bDX8WRj>1;a**D6ivlKVyr+^h-cY0Dj zqoPFLt*?v8e%prGcbgKOc}Ijp4EC#D00-@6r!Nr+nJQ-L6`H-PY`H9S-XaJL+xaTs z<<0-0B|ysw=|@*JK@5;{cq#s&WmdnNhbvsd5s zM`Z0M#)$*38uZR=5O-B|Rj=`KDxUcZ_)Z{15|AG-H8?i&2Tpo<@MUZ862c^V zcs4WF_=c`CXg8kbx>p4X3Ibftg#L0OvJn^hs?p@pnOk z6I%t4t*lRV7c`M4rGx(W-jz7Yd^Cp3&ro>VJLRhYnmkdt#fQz~XuPncTvpv_-)i}nc=o9kc?+Qx^ zqwPVUr}^x1ex28ffz_EV(Zq_pll;!2n=MSV^w*UrbM;=-pP|Hu!Fz6t`Il|H8Hb8L zHo`qD`XRJFX#YN@;an%I{eaur=XB8d@2KH(0)Ix9x#Rq}t(jVtwxLiB5_d4OFV|A~ z#g~XPO%_7*ZTOQ#a}+wng3fro-Sq9dgK%o18{mQSvm5Le)mYwUb$df@LJ>%sgo->3 z%~y<@GQfU8eETqaY`Fay4W0hV_Pr$7LpeM$QW3T5o{FaC41y(9>ACK1gM`F}%v0O4 zhR(HaP0}MDWI76;(HAotx#PYl+#>-(r4jjmhHP5B_lTzVqnD;g+s{z1LVkIt_YH#& z=ZgE0E%lAVWPOt>p$_sGHsDOpJK>Pbyht=ua8~>}E;a6lD%6br=TrlaHojE4%Co{x z=z%6x;Ly<+fcCEZp$ul=4SJ?|O#bIQlB$%;_%w^lal#EPI_%9VE+!zs9Wxcx+9!~t z%{f=BB~{xWADXJOh2hFXDHH_oohz1em=@{U0HH&fKB9o`{Wp4MwLev(+l45;!&C9i z9;83tQ11_Y>tXblbF`yZ6dF!!0=9iwxyEha2?iA>^y1u-Ph33{VJs9drEze|XFcP- zu*jnK)O~t^O53@MPQ5CNZ9y-^r#tnX?0Z#CSMaPMmLC;*g>i`Si_=!)V%Ji1XyhKd zrZ{`YHPd4!L!>P`^%=$vL)gnAZd-w3m8n8sd9{+J2mPI}KbB}L+~x5(PU)3dZD_I0 zLY=kiW-k!k>XPfF0aO?_z3?CZooqR=nUD4_@ZI{&x=W%{R8m-@7LK>k$w$AWXN7~% ziK7NolmCjop^w-SMgG5n^8f9H{VMR(K3Fo-ZE(qr0u%iv{H}HI6F>Y>vM9=Do|{s@ zQdb@$-(bRs^EqGARE%%?EQZp0Yu>99uO^)vDJ~)zjFxLrTRwuYwI<7rovG7_e-o9y zbHhv8%`cpK_-@B%x%hd}YON&hHHj&IzN!1^UE=YEVY~Jq`3;zbZIvKH`Z*SFFYJ!C zllkLlQ5R3oTq+q)Kfra(?}S9)X=?Macb2adx}dHR%h`B7wu-HWVK2f&|Jb$Rcd|Z; z-zLv|X~xI6T8Sto)1!$Q7)r8aLe%nx!sLW?xS@=kjN3SZSRL?1%GBn3*|6Gp&M)on zOpKM^TO%CHzHovXjzTfCvPWBzktd>!qh*ex#2Rw0y8bUo9MfG&<7&Fe&kJ8{c6wxN z=(%{FIcWDt*iF;_!OSn^rfwL8$8jQdMfWeR-Bl#Ur$;OwN#?_GYft)cm3dh;>~}frR^o?9U*F4 z(c63>KCyN$Iw%cashDQNH0UP5*ccmlEqWu^T&sWSB32>e>Fksrvy13I*A#E7tVwPy zgV>;Rel?^vb_6K`sU73zCs`dzdN+;E_P6Ztp4NH1;P<8bSH@l*1~UQ@>+&#ZN*Jzu zMSEcvmSYLiJ1e2vprTEc_|(awE00#bsN4uGOlh8wW%kq5rTa*R5)0z47LT`-Bp^E_ z+fl)zM|OpYfa483pbVF^2}fA(;usFO5rs( zycz+}NFm#Dg%@|j<{EmI;pkczW|%c#KQ;l9CaTj2S3+g*m6@Ll92NO)CAalpo8x$I zh5_6B!k^Te)xU;feus#6k}sI(ebQ>rG35148hvF;JXub!Yc|xY3X)qRvP|4#$x9S2pGQ|N2wxbK`W;gXEDAOnS!S zCJvziN84hchDT_{Tv?%QEj`B37aozt82Tqn$ z9sI%nIn^JPUaSGHL#Etxj%6G5O?E4e-5(H>a+n0wbeNn8GN~5*E~Y(N(W>>!i>r)L zN@dnyYw#F=Mt;^oZtvn^fOxb_LWEvHYOdA({|%2kc9jChrWe>^Cd|L%T>mJ+{VKW{ zhD0aPk{2~SnDjcA1p|A_xly8R3ai@0?%cp}#=5~3=sTI@h~`(f-H9z%{W}3??&!sO zo3FO%bB)!fE7>f&p?_t&DS_{qFr`e>N3gPS*5)#a7nv4`szd8_wchZw84&IT&+d74 z%}7@uK0#Ys2{#7vifa=G%dbunrGxBF(c-OT+o^`TN?G-(Zc|IF@y&&Af`YscJ{^qRe%D)9i5<^;M%S!^{UHxY+J2(PVmd`M}PDUr4KO-8Bigc0@Ssyh9 zP^e!g7dMT}2@bqx6Riw1G({r?W|*MK;sl0xWz)%DZ%L$mz1)VOLfYOS`Cx-CVCqE3`Yla z%4vg^uHP)GXn7B|VwIieAor_vYHpxP^Lr-c_=M{CHR^sk-+fOrED+)b4AYWWT#ORn z&32+579;`j)PnRUvsiOYsD~T?GDAW-%DXhSpfu2R?Zf@HOCd--_UN7w{jpuMjaE+F z=7Dmu+)LNe^8z`hCaE+^t@W_| zJqcXoDdg*Po0hRvPZlzQzcGI>)On{m66NVVq`cVNsQQ~@%$@6`Ow~{i57Gc@N;)Lm zX2OKQ1|ptm+JG_W4IZ|JYE*=XAToKnTv(fZH+>g~oSC{i*j7T%&Vl%3@?|?O-(UnS zgTKE0AYpqmmoZzpZfl@ZbEL)Eu$OYDUvjk%sbnAK&~{I^f`bc&joV zOhCSR6mZb*ANl)grDEN;`cE&w$Wf_FYX}IPKI?-S$6C-QZqih413R+!#6L>ym6Z(Z z;zFF7mRG|GOD&eJC|vf}4T(y(82I?|U5Eb%P3_$4>f(1jB3#+2OyHAzfW3irp!c_J#1zun3r%)G=@-8Y}P|Eg5CXDm`yEJ8_%mMj|E}>V6io@c- zB+Q4+wIIFnZrF`XK3YpV1S?0)DE5-+R6 z00Y#ve&Lqn;7iRYunO4&+qk?5o#j?0#mOtC-`3CjMf zclz4jzGdT`nl5Ip(+tzp9}=?>wjkGWU17fY1L2JQQe=F# zrYd6F$!5uYGF?_UfAEut)si6i7Momr<#ug^GCoHOWcWrJI-s-!LnhV`!ka%Fe#Ol_bt_D zFuHEudbxb+;G?FbSoC{f`&f;#1V|#_scCL=tEuaGU7r$P*Wd(JA0|?|xb|(-X_@d%VdVve zl`mikEA8mhHT^H{))&-u5H}oO zbYwE#wpVZg%UoSlXeH2pHNA5eK-{ta?s?2vpsZij!SAp)bU2Xw1=+8)tAfnTQkYn? z=wY`nu-I|*Bzf{;=c4I>{PoAjs}39dF5qNh-by#^XIC!?dOvBM)B?D{Y=DwHEpJ$g zN9-^IpGao=Am!cg4PmhwO*r%wsQZ4%Tvo;E z*2%JYxTy}pEU#R7ocKPW#zd;wW^yBU0ElOd?SM#FCd*p@(=jpe9$2%snBwDoF5u?L zU8lhVbFnhzhZwJDr_s#bHW(P|)T|={;wLFrH_+QR~Zw^~*hl+)9sswFi z6<7ZbfWXt@mgrGXQm?arM5V5zY4J&(MC2E@{hAn+PeY`5nZPI`XjsEm)%1k)OA&01 z)_F$AXQU{-cPJ3 zEkIz;6^j!|`hy-li+=j8<^rB;KOyrcy=06+J$n(k1u1uit|4Su6~UlU{IV=<>QJC* zw3j}3a~r62u;2n47E#odVKZ10f7+qXbi(+~;+-Ft~lVeo~D zc1}grhpgAhz zjapG{qNwI8dLT#z#Do5vZTV-j=72!@lp z_5Sae<^zb^L2bX7t&1_4DmHu?kdrj1H;27%jO3l6*=_RLUJ4(8poxw z3!{FogdnW==xd~*qRG*=(kVv;s81>mGV;UgT){Cj&*d4jkb@~pyeojO@X34)&RYjd zDQtCcLUl0QKOcI8**HOM_x&uVZP{q0FI|^X32NtRGndvM*|y+Sz0>}AX!!PHlPPUx z(VK|TrR(e)EzhC(Wag=g*WW5GOlSEC2_ZBTps}pd19A!z}4atE>6M#uw_#*fkYd+=c$YX-E^}~y(Bw@lg z?CmIyN-qi>4y#l`+qQ}P`RxtlR7|9DO~|DcA{JG=w_8+`mq-Y?@YUV!U9u-JX^&v) zO!hv+Bl%< zmp(ehAjx9jY@*qnEdXwNBD<@R7goe&2i2_)W3Guf)uADdv&>t888zfZE?8;b#hK0` z-Hb7zmI^VnH!OG6#yCPE8*6tcI<-r7bdWuZXV^YN_edlV=dak^WQ|Z_fG?gYZ|F1^N_=aWCnGT#v2B+GA=>p;z+~*ExB%*Ou)^0*gxfocW>(uIi>$(qJ%ECVnimc z3$v7lP1>TNhnAjlc@6cQm5P>%HHPx!wWPmi3=rO3Qo=a(O{qJcDKjyxO5YrukR%Q@ z{^}VgJxplBAVzWyO!`t4vwbn zk$=E>k&IjL`aIXcGd?^>$L)rOcDoyM@Y(Yi%$O)Pm1li}1tVh8Y!P%gGcL|*`x7|* zl_VaFZ#!xlSKFYKWR1rFPYYVx&8hT>p_-M0UK6j4OXM_){`8T)ejVez4(o;s z$LKoPV{+Tild+KOKwTwZYOFnE|AaOlhxbINtTKE(58pGdZH}VHX1JvPd|%GN%M{vU z+6x#x!eV1>{v|8NZKbGxJOE6jVZdgMqE2p1G?R7kI;91f8D%fTxv;HC*tb7M z9rmQI(>%MruH9USwZ5H9OcnqgCV=9>Dwc<(@S+C^BZAZkb=2;_49yz#Boj67kXuV$ zS)TjQ4F@;yCLV*^vH_|~I_K)D9iye(t)HpOD_=?cIKaVw(RVJh7>nnsoC(5% z(A784igpe+3*=Hf0U>HQVE$in~sdnG-}ZY zbwz0_tD7-pL<)!l%|S|Zk4PyquQYHmze^`Xlsa88&i&Pk;LKKgjii;y{Iw}!1HVzA z(N8Y_2b=UTQR9AL>7zOOya$bod_HIW+T)VY;eFSm)sUFh&;q$kaj_8WY8Flqi_ejs zS~qAHy#}y8n$ikATzk~F8fZMA?K@X0o!9G)AzJHv^gVa*=&_GRH<-8srM zdkW=e1X32rZ1zqVV=@_3m(j|w)Q3Oe5&2ngY|#g=ShX2efoc6mR1(1cBYPGkGTSi}C_RdN#~S612smV&x6qj|MW* ziQBAklaUDUsZ?RU=lS#DgZNW+YVBqY$(5K-S%U|DlagzG&w(;>wsz>j5i>RG-lDHQ zp8$CDU2?wSSlqaB+Rt2KpYBdx!b63;Je7>JetFXjF*~3 z)&-}=$yfW`Ia^;BMy4CgA91y-u9;^F6J`=PqWkGYFR(b*RG!6+G}5BIQVJE0jAAH) zVc%JKVfV^0ogBO|lT^+w;B4wA+sxb3+6c$2|2nuKm{lr9Nxr^{!c^;E^^J=OEDX-uR@zncZGWzzG_$ajc{e<$`F(H|Zf zqHZ?lUEDi#)j24I`>W{*p7*YxNk_X%fkqfio=@V)e0ih>kDiwV{poQ;Y{AtO7zg?S zRNO#MqYVBspYSFXn9wOG;2^e5Mh2_CKHQTQ$qZ14WQe9NG|N1*d`~Gip%irxV5T%( zZOamTE)rR^wG5rkC{Bt;+OBF261%Qw5Ztv*n|1D)uHEsb*a{;E_r-$-4F7pR) z@MTEKUMRnBf&cxnS1Y>FJN5JGWt$w-?DX4O0cevOFs;f%#c! zyk^5=Z}1R$*SjuZC zJ2u(9y*w_(+(C!S-^vYhwmWGeynUASUd%fMG*LMpAFZ#sP7Ux_#fxHJmVhO%E^>3B z(kwM`S}YeH$L*Q+0r5%#AW;ag~~!rJxM8?z2yy*;7A_p$;+7067&iqQ`b2ZKR#v;>6%mMZA`28WAM#VqXlI z3pE;yo)KpC5=sp9lE6o9NiQ|8%ZK9d8LB%pKQI!%}txQ2FZAdW?G&1>t471@+1 zt>YlFC87Clb16~W#}-JUjm9PDtv!cR_RSK~Oud1;@p-j}7TQe&Lz1V-U{YFnVHJS( z$tp70v=RM-zBsXoG`7X?d_C5%C`*V}Lkf!5XS)f{cbg}NIbfxQCkKOUY~RR#-V?E-@5{T^JOUm9l1oqG0z ze>f>nR#!Ub!v; zM8g2QtDO@cC%949K2n#zaX0n{uaOA@u!_|cRtCm898tslTJ9>VL-#B}-Zd`Uqo!Lv zH!)N<&NX1-#|^BXZsfc`m~T@U$b9#FUWm83=0X7j3)IEJ?pZ@Xl$G+0WWn?9GgEc} z#LVrZzeaz}IzbOhPj}D3C+C~1#*+)=nFn+fRQS)whv^12eEzr;(G9_sc4Mq?%LVY_ z#e2DLaia{JtYMqj@a()^&V*6q;Y9*LB51nUT;#Rgc%+Y5dTMt?Y~|T73S0G=il!!m z5b{_YUGxdlegEa1G&0H|4eIY zNFGa0jk!-{3xuu*La4An_3i(2ta<+T$J$pliCpNj;zPd-^#45D{X3ZgTcI~eH)9y| zTILJk^(DO-a>Iemq3E2;V_vz7t%e-{qSvq(%OKUN*FLq_Ha8AACcipFONSa`&v4jtyqz==al`ov$ zzX8wwrmBJp#tG5vK()K$P0gxfJIKG>rwSV2L0m-B1a&DN|L&7|Umv08=bjOIHc9tB z!3E@@t-h4Kz{N1GICF7^8d~$2ymgwK^-Tpo#qRO*MdCCVv~v?;zbQu&7u_UeN} zu|z1rg0%2U5+4{C{Bx3KMpFD6kfRp8VelI~HifI->ocW8y)AK8yvh0`lYPJ2vL=MK z?ZY@!m1WXy$6`;6Jkn8;IQ6e;ITX?cUuB`o*ZB;Toq*6%ItS$lJ8&729pqP&Shach zKG~76DyxY`^Et>ewbzohzTL75IHqEQ_C>=oU8ibP(rfp`W~b!E=~l37Fmhke2`O~S zJWkeKY@?T`{xh;mKpYiRD>WEOu#(e<^k87_e$^5 zc92&!f&_}&Ln|1^d+vJY-)2o%=7&ACcrci04{9szSe0>%Ias96XLhSkfo_e|3xwbC zDu-?yjx}oe$oSy|Bgx?s?60pxFfxleC5oLFZ1%3Czvo@knX}7W5?6VlYj|H^|HW=H zFRQrRo2wjD2QNLuqGOxtj?eZWCSTs)s(CZCSyK_Ng)evW5-yl_e;vVWuHGD+8Jm_2 z?0qQ9R-s9eqND&?QzL0U%TcYoIlNYq%=RdQ5Sb$N_*!K_=CP?0$KBkh{>P!?6~om> zaTKFv+@6@^gfM1yl}L#b1ljAwHytawZ?b&i@do;|cVf%yY+VjruixPXTF`jU$-15$ z6AI~7*v`~YDk?iEe7(JfzO4dH0&I|!*NG9Zu6fMVknDdBjASoXm{kk`4P|jfrHD2y zut`R+C}&vB)_qc|pt*2?x(oR9v4+kWDTi)12agCh9*_NFk;&)H6v>^+ZCm7IDg)B! zs!)|QsZ0y`r^A+Nl40|zENp6u7KQ_8=OoEf&2wKj6{zr`T1va!bfl6Z@EK-Cdn#Ec zHAn|rl}t^*3Q4=jdMdK&G%$F@tW{YP(Pb z=jyd=n}t$5Df~)bD|+o98G;9@nwpgIZ)VRJO3+=Y3={61mePX^Q%7Bh?q0lT*pI3_ zt4@gLHfkpo261D9KRb8I(E-%*Dw(&MOMYy=(c#KJEr#B*PhffcV6qYgO*gnWs$Wb*VHYdz@m_S>HsevxFuq}8MhB>w6%PAzqm@BjTt9y__LCg&vz3SP*{Ka*Ds~q4_UaQ>1>`VY`dTh*L_9GJ&aC=Z0kz{p_ z$z&ztnw!9*K=o`|>fyVS8-J4WtMl zP|iI@Z1fR6mT17Pd0 zMqar?*rF;sZ$d9&T+L7YH&Nd^%_#td;9g^yM=-G}WFVdwEoAj*hT0LTl+2VEjSl%Y z&Uc-?8}tp1n#Psb!`0o2hhoYH6V575#CZ%;OlN}XLX7V(U8{BEkGrnW_w-t|s{L$* z-V7^9T_DyDDOWhjBD!Rb{OZte_{FD=?dw?R6;=jjz?EZ6vBsKL+zRyhk zA_KKwYc>y7`4{K)-_G4!r%c3kdb9xlnin!bk>yR!1B&dli zqZR}OCudBR zUKpum5VD#HT=)FInS9S&ZLRfg(bqI_2*}GT=ZqDolp+p6h;QF{j4fKVtz+s^LOIq9 zzai`&co&}h@|`#cg0~%UkdM+mgkroy3v)IU$bOgf8r^2H}@AdymZN5JGuq8N%FXI^qcgdyeFmg zb9&x?|MM49{d^pCeqUvHfbjZ=ukrSzgJy&x{TUMyq3-M~h9kCy$~5C94bMk>-72+b zUpAA@s^kShcGG(ddQ;&{r1$!nveelU-q_r_t;JK`X|Pa`gB^|EGnFum=4DcT(ly)R zw!Zl>%Slfut&xBUDHrpaX@Ct5lX&_(Us)NE&im5PY9gOc5{i?~Gq(z9N;G4p7h2p^ zj4?LjHE$ZY@YJ1qo*)5FZ`P8^$*BOB(Kl{iF=Kcjc}GzVux=EObcs>qci;^6SI&6Q z9Xsw?QNUFx@+?}>Fln!WW=KQDEeAa2kFSGep|hRWD$D??XSV_CU{?v9P|$K|s& zhT6!&%=j>yEG$TMCLoe@zod$ZtF}O(YB*qu44iaF0C=hyf`7G9hyt5Hm2k=EbP-arQ z-9hFd@!<2@($5d)a5Z>gZ;;H5i*=2HRXAaF#^`V`#DI0p^=ha; zy&+c-RZNHSMg6|Q#~zQ21pFnYLdk|)*f=Yr&@0S~y+_su;_;Qzt>y!huInoPP4{Dc zSW?L%81DB;BR;Sk=$H{_=08x~{c&hHks~Gdh9C+8lxi(QD}BGMR_3rF7IM1rD50Sn zNj3{nI*k}M{R*a-epI77KrimDAE#zJRY8(a;%a~(BXpi?z$!$Agol63&TbnKQ}Iq= z-LY@I~#j`CM8f`uNp?$K>(?#>arw4&~eWvl~>p;g` z;@)3m!F2D909bARYZx}!?PKCdB<#D%zZvQQhY(CN)7(`&9!9*>98*b`2xAcOb!L zec0bKvr6-5Qzv{-7VsOF^>gupG4Xji+Nkq{*2h@grCF6%(+}Zc6Ma1tEAc*WOwB}> zClpZ!H?2CheP6P-%f?Z|?N*BrF-M}*bxkCgQ^lS&ozGI--jr9S8L`}9kL^f4{jq!B zkC)VDG@r0MB!Y!u?t+4fs0wQ&ow?2mK?3)}FzRN?>aHHc1J$AsLdOwB()u(s!McJM z41TRtf)uX{9hF~f>YTCs+VLrkQeZdCvUO=GK zdeRo1y`KY&oKt=UU)0o2;%f!ILE08>f_2vsDzEv8)XJIbI}KJ=#)Con>jid+Eg=y| zm(y{v=v!I?3(Y6xL7$t_W;0-URP4pTnB)l5;!vgDycK#!uxmvlFCc>~2Dd{zvVePg z^+bK)>PLzWYrU8lO^yCg6&7%v~MRARI{&Jb6yaU?kvOv`@powf zJ%OK(zrKjMjc9!)QdD7TP~lvUe4`{6b0-%pQ(mdOKrO-9F<&Wq0zgCWxI~=>VUd+0(J0>56qQW<2~FafV`+u`ju^k41w9H_h!8)KqkPjp|^^Tg;{a`_^H^l=M9c%ZlI`XoLbRIyMW zl8?yo#4GE)-x?VWY{{N-LZ_5CNA09UGRZ)u@;8z3y|mgUiguSS6_Sg(8?(+xZa&lO z1Bi=gBl&=Yull_ZZxGV~54L+`302iI3e~X2MDqkFnfR0t1=JeTHnw}`F!QlDD#s3c*}PVv3lnpuWJ%)mVSKxbc^6z{`%Q*FSVfgv@93iZqm#b#2!7%WXmo_2SNf1%#7l=r9GbP;9)c5My0PJr|N?o#@LSNQw zhMaRLQaS;|R8=f@(}!EgU3Z_}V@4rf+&-eaNAWzxB!jJXJTgYWnJrM!Z+T^<`0Jwl zWB5Epi!;dT<@FiI4e1;M6c6%p(;VZA%LG34_3MZ_yI)VLkYnz>?PhSm(42haP{hpX z$hR}B&sy%e@4cHEO6q#b{bNNX5vlQGgyd<(G05Z-p$aXDC*##?9lA$X{eH1d+uIm**K^^|&OEcgi$Ln{~yQJ&VKh|+1 zKSaB9AAfG|hSeRb8rc0&4fM{Q9PYj)`P_Ci?wN>!(oD*iAW^fANal^WeIG1G_d!|R z6Gska_Xhn>qbm&PzpfPe&t-%c2=pHs$Ci<%aLcpmwLMFS&kHo$qLI(Pa>*_Fv)-P? zkRpgBhLl)&Q^YB_CT38O1!~mOKf>GuBtS9xG1%OqdTRIPYs13YJippwD35{Lmq#|E z50adnOV|={#~!gS_`6kiP-Hp*3W#PO9@F*>p>HKF1sTn3?CmfWq}k3%H3t9%eM%5s zsEJ$FAV3@#B5 zUw5hom^9^lP#nUj5wMC}Il(yQ)LhuzJwS3_Q;qhMxsG@11Mh3MwSh`zk;jmB?60vvrD~Ro?^s`@S+yxb%aC;lgO204|S_s z_-na==)E?d(<1Ul@hfsLFjBJMP4;}VPkbUjMwJS=Je&z*Royvz5TB_2(!Oh1-A%tC zVu_5Q@dAyQ9RUfQRBk-%^N&8(OS7q|aKUBI*?Td+NflbVa$=Q)!}?sNx3&%zF5uoD zpKgWZGpKCwzW<0=)HngpSh$_}+xg7`eBR4aIfKeaem+SXtJa<=k9O|rC66R}S{PVa z9Dpo8ze@j$d-HDwp1BjkMcu=wdXj+*^`}(CDh(7=i;5TZGkD3c10jVJphIG;dtfvY7 zze1gH2KM;>ctHNA`M1MD_;FUj`wIzagYQP9)y|^pxx_HhjcL5C%oYQEUkE8CKCy?> zNWOco2&JT=c!7riB$7$(EJq+Vr5jC5%q~_#d9Xebcyh=fgHMp%NG`VNT_b$XF2)<5 zB+)Nh3{1QEE8ZPga30IkBVgSKBq6$yE^aRy028A2bof==P|^0@^Y5G8E$wsS8&f@q;n{w;#~RJwMK?vG(;7)Vy*#2Ti$vsvY~{8lD>FMfI%@oW zhz95aWz6}Ddzii}(bIDY?!38X^nOK87sh@33yVx`!-N1ulUGoM3HpYEnR=EsgH`vD zmSzLn_1CQX8)P+_aYG~lFc!7*kzdbvSL7|1zgA5DD+2%L@ANPk|MGhl^m^tex$aHY zU9maL>gccI?wiw!8^Ykyo`7#=kJap(f1iQmmcW1h_22od#QggVS@inMVtMXOYj3Fy zMX5(z-Cr{^Gw%bct|4T<^5ye*9G8D{IX@*NaVtl=QPpaj+)AMy$LDtHK zwvN1N_r{yx{_)BDzMfxeNfWz#^YY)H@b+^x{$dKBeetRcln-J4`#b;lH!W^G>fU>l#~=`q!{J+{?0ACa zwOuXBY#$z1%jo;xv-<;4e9rzgCbIFqXWTnHxS+oN+K-!&6pDXm5B}%x^f3OhGVA^W!pB;WTQ?Auf3gPRy)4*GK8pB7&< z;eC05l9uWz*Ldq}@%@*#DCOjuW7Lu@qi){HFwEf;x`(r<(Y)b&xu^{i%2gLNVS8kMj=b6~eST~j zXj^m3AQHMT79oD#(z3D~A8K^Iq(?m>&0THvku4|~7@jfgaV)RlmqEN_pBo;2=S>R4DSJa^9vFCIpj6V-xj!WD@j| zNS6&zyy`^sKOOr&o&44Z88*qt7tWVhi%gt^)Esp^K3-8yj!#a!mZbu@ZRqjwZO?2y zzJw;$``6KdPKz{u+JLz=0iZ}j@3Z3r5kG-BKOE^^Cbn`g zq&tyh)L_WxbY`x13Jv{cB8fZOOC(7yN=drpM=UmH>MD6MQos7L(N0ZUeq&7C7+be; z=Wp5&e;H>qgl`7-t}~-X!}1Eh3EB*2m*@@4g*^9UjNPNf-!EoPu~A+k6*pHu?kmEo zI!&a1e{~;{|2#C8nq#ipO|v%)x#@3XV7CqIW&iG7#$O=?(=CPmhq3U0PWCfVB9*td zwX|ij1E>^K+(4nOe}BJR@PFY&|8a3xO#a0`7xQ0l;y;%8cX0EMW&W|uKRELb0{^>! z;UCNVW0`;A8GJPHkE8s@QU2p7|8WQZxPyP(!9VWc|A)Z($5H;{DF1Pk|2WG3=N;vK z8<__#khFKBsL`5A$Lv+-Uoolo9&fe{9@nZvc8`-;%0hMu^HZWYe<~csIE(|g52;P4u2ZsRA2jw+KOhP z?nKSx0(JCx(f7qTZ6gu0Ae#c+t zmlN*}^+@cUrTtPliod)|UP<%o+!Qx<5HL=wS3F;C3&fot`B}7q`m3_3xmDvFdAc~v z>To7|CnMT(j%vEL;UhhR-CPEOUJ`RH1W)3FSR%YM15W4nczco^>KDw4DrYAxI+^3y zDEjVkZbW^QPZU|H(^Tz~gv^+X>Or==#zg&Pmfo{$i0-20vn1^gr~E#Jj^0Iu3ra#Z=pTf1SUtIPzgw34S*^|E8Lz3QsP{P+cp6>d z6E(6^&w`k*jAO6f6rO_~8A#quB1jQ-Q?Iv$%cQGcaxqOlNh;s28tRi&1lC(kS7*7P z@Rg-Ep)8IqYVFCvkeLmhy8Sbd#mBc)XBZawTd!r53C~s>p>2il_KGmA&_!N{do96ZFs82(4$^QG z+y7+RBYC9kK9G0WW3{FU95~f*>LF5 z+7iF3V;xnCPMzuzv!yQ0x_e`lSLp?aLsar;dT;q%vb`I@$g}3 z*vh+G9BZh}K~vJbdBvhCGhj7PL8X!9sIL~Cyd{R-j6t9)r0TssM2tEQ;mhM}N9T+s zOIII+o|(Y(fKulsS|Ck22&5l6k6Z#nLl zc)sM*ioOt`_%Fyy5B@_w3x>@xic-L77%}Ds*EPRB>CB0}Nu(l8KBVQ8)RB-Zx#v_m zJ|Bwp84@34Y)c_W^Ue9<>P!1zWd^XuliY>TtzP6pyB8T$D%K4Zl%*oTTlb?x@1XX? zrWe`(pr&8C`>iuMUjn|@)wN5_KdTccq2u%&x<&Ygi&Q6Pz{b2lH-2LbLh3R|*eGFp z?IQ*J{HPI^$gKwk7l`|qQdJ&o^q}Zd@+v-bO+7PP0vzSZ8{30HMqU^p7aSJUqXpJY z94*k(ApNS(Q_W5ZH)B`$-f7_T-7{;*eX3_^pCeN028ZRLtxrY_w97Kh&)&XaNu@T* zRIYU4X3EOC94*juO^@)N`{r$T@0|wVEHrwIqqE(}>=HCH`bpE8NYW0Icj5gg%jxu( z^Wo`3-hO#dACIUHB5gd|p5zaqknYap5IMA<~&E(Hr-f5U=b z-?y>pv@V)c1&||A3U#BKXPD;MsN#jSpg+X&ZwWHL1@Ur=fB;Z|3**DH7gxAr;bE)7 zRpIz$J;yKWJn0%Alx6e>TmJ>sFDM&7X0y7|tDZJ#x+YO_jAou4M7H)H9Ep(M1Vs=+ z+>K8vZd-qMB)xR&Im_z+5PyP=0k+`KJg?QoK2E%{a^`+@e=qjJE}t3DN=%1MREW;@ zNq~&$^~Jj)$3jP_lC}o?N|kbKrUhJF7?4iv%k2NsYJYS;{4MF~l0Rw683jb$hW-+C zpSjXb7iu)E1u;M$!B7%rvmW@kU~`XGsnmiqIb3A&vt7+vn?>|nYT>w>&Dhg9oG^$U zZZqrS5`j6+RPBlJ(;v()U#3wp9L*iKO!e%m`!U$fqkOtQHPj=Hu3MgJ^tENt-2gh& zV;jH%@1q2%=(9b<1eVmT%F$o<3t5xo&#q-a& z&lbC?TEovif7!0{tsNC^u&r5WJX^ALIZc7oE(E(1ix4(E+ax?wF^RHFMD0B=0@ofPBRNqw$skiJ<(Xw9&ID+#7~cG5iX=HQlMvV zeQq_e%#m7!b+D4u;b-1RWXV&`WadAB=;&u*H26h8=CN#&KBA@_pe13dy=P44IPOqLYTOjF8ijUM+ z_Y5|B69$=N?5C^nIh%+ludYAZ1VeGBS2Ea`&Y`<&sO1JAX_dEUmwd)8^!yaM&N54C zHyg8>8QCk1!vtE-`p}g&o%Hs4Z4`p3UNTDZq~6)vAL>s^?C<4g@H-2}p#5-`S}$LD z>XGI?;A=U9+Ox2C<&zb?B?Xjx4|J<-hF=3!js0n zrAzoq3qS0Jk;S~h5q^?HGPvPvowKynBAKSX2v}H*pYpUDMe(k1%>e%rs^=+>ui5$T zO5+FSc>U&4;=E+KT34P^sTpzW5MKr<7hF-IqbzX$X9OWO+W?X;9aP|q?_0PAg+g)I zxn7#QBAd}NHv_%e-jTDteug8%!?S+jzN3k);#qI><-M)Sx$50dAD8Njef^dbS8&s# zDKw8@wP%A)78^I$4m2dv3CCuvpvT{ha;#@e_LL3u$o#Zc!h74R^3k|+$lfPzOEmuN zC%PBOpYjVrHPQAZo$s8`+Y)B9(G)UTk_j3P^#IF=>b*X11Mwe$z6>h#d_DP9%|s+E z)Ye9{*A=UxC`7gX%nLDxL8x zC(lyZ1eF~08zW1mkwlmEA->#yBDsHlMF{Z{N_Sz8re$;2@keD-tvvte^-wuys;GDw1(ffwDGcS(ysTT9& zmHU%2^`z)=F$|@iUk?3{Na*@-xgBA2(I4#%@ z*c$USi!U*yq8;Z;;@&(~6J*KQsu@|CHO3zFw20etv9=jF*oh~{u8`fyxEWGe+09d# zKk%~U9j!30_$;4ko51ptwvmhVWQFOqQkzoEX{T!GljVk!q>UaN^lSjiE3(NO?_D_U zR?ZDWS?RCI?!BcOJ6%xs6XcxQlB;!{96jqiJQ&nh*Xf)}<6#v6W;M*XVuM-JoaZX^ z%I&R%=X_vS=c;o#hp+mWwUe(DZzZ;!534GlMyPCfw=aBH76DJE^I>z!KF#mgC{>4c zfAx$mi4s0!p0FlT(K*moPrRZ!Dsb0YiadX(BcFQHuaPxxnJsZT5_m%rO>>+5`OLcU)5p$ zbE1DTY2yF2o(CVSa;iDjY#jzK=a=?FVAf8LX(X!b&aNyU5%)NVx#oke(o{J}FQ|^T zrwz1ERj(MCHJr_NA9IWB}7c%{$s$he|Bx42x z#-H>VdVLRRZXFq-C8WM;Z9kl~)KQh$TpUei5k;~ZsLkskKK!F3WWWL^Uu?o~q;KJ%tr9wv&eA@9yXd7{GEr z+iN!y_A^TiyXzedYI$FXX;i1#u)UTQv)&|mI6KB3f;nGkUy2Pb*>%C=k?_NJ3>y@E z^ZeL7+}Tm%a!+ZWz}TE)sHV(GQ{z&^R0SB!b4Os3y_`zTand!kV!6l<&{B~s3t3&uf%o@&O#AU~Q z+fe*-Hn+16WKOa&(QT8d934_o)R6DozEkCE$BMc}|EPuJb6Z;UrR@Rvn?kh*tMrYU zX#K!4YD3p*H9uJl14KRDZmcjIFvsyYw?3pcA0k4(QYV3gd(Hmwzg4I%cCe&I^}(5IXRCD`5O{@Eyu@O zZ(?bA>VBvTtQ4ee=oz)dXhrX}kVo)|>mj!?R4$*i--E=c>B6QTU)#2f5ViXR*j**h z57?;h3qA3LiqlJK3vTBXm8&u3ZaS!s%XQmRyv&B`MvJ8v-^dU|V5xvu&zM@zDSX7Eu#xOd$y~cM*&Q+P zFfc1TMfA|X^n@KsC%*OlbX}R}^uT3Ugu!c-&th; zdZqV0OjV$$;qY0YcTIY5YxI{$k-UPW!tt-e*xoXqFjl5s2Z{c@!v)y`mj$PTem_Dd zV!n!)%0kd&>Dh_r#CS1fT5*J?dnB!Ja~)u%;F!DgTR>yw+)EUIxm$urMQ&=sBh}U? z-HHzJ6j@$bQv|M7tpsb;_3wTiwqAaHIjAcXWOm1C&}qABI*m2WXL57K&ks_IPjh>8 zEaCcnw(EGLwH!w`Cfhe~7aQfBD9tsprg((?B(uoOTe}rz&s{SI=0b19*#0ycy1l2C>VWo376EKF;#T4hHbYdOZ^+0E9Uvw2tun}4v5=kqVB^^f< z&rsABFK+bgeMyvN5dz7&G2xLI#qWh&sE2yF-OKf5ALQYrhfa?0>Wp%Dpl551>WepF zn-HzW2JeoaK+8}ySRB$gUL2(bu5d%gQ3a?ppl`A^PBU;D+6>s(YHM3LZ9S}TNGMM) zhI1AEN!ncSpS$!2-X;P58*5V(OO9hal8mH-1OxmwTak+DPpm4sy0*Wp&K%7`Ld0s^ z2d=E(1GG^c5RkU)NLq_og8`V7j>U)e8_F`g=G??HB0CnDb5(ohRsj$+^zuxI89QZu zZJ+M43#a1cSJ6X^?^({zfNBL*fKip)PC2Rs*L+}GD|Jz+8PihsUZ13FLdu@>Gi;=y z(;bP$>nrUENaFLwy$N?l)g723?JXzz%`}It^_0t`Qo~jU4vS_fa-2hgJ8X&)(Y7Nt ziUQwrcY;TA%lJ252L{zN9d_V$ySZ2L%j*sfYe!Fdt}!oBvzx`z-UvxmJ_$$3Qn#^Ls@m#yPUW+{gA=TTbVL^a>5(LK|0xEc<<@ z7n7Pt7_sYl1?%}=JNaSrxp5WF*XTbL0oLEkPCM<(6Up-GbX%lNjyN{#MjB~{-I2;~ zT0i%IDx^{j$5*1`*XeYu-%sC{y~!^dbni~e=;_GQZ~lQd1tN9x2&47f6U}=VX1;h z!=*){Ze9m1Lvy()D0Zw4T!8J2c?)09)^n(7(`j>|-b{-EKV=%i_2#aexo_uk^6;Nd z1xG#Kk>8pp#T}!n*V^(n%K6R(i1PTh@)+8k+o7T`)*0))>NL*o+@dlf19B!^=Jf>h z$Dn&=H}AsrSsF3RMHcOal3?j{`?uNaWc46?z!rwdtEbkn(D2bM8=uy9!^n~3KQXVb zg~IoKXkhM2?Fx!`(mTEE(FIKR=+YfGb67T7$8l$#tyZ~tyL7gil1b{vL-`X~ctkUzfc{ULn3%s?t8X_C#WQts`v<%$L$0G1% zJCO$$PQpULNtq+9z><T|V+icL9I(~!-YFqtSd^NNkC-}?q@o-kIT_g>x)W>xe-m}5^Kso zol_|J=DI+l9d{D2)DZWi+@H7pENSbM!9)tTNleWfcf~g_yWV?gWK7XlMw!6KcZvDn ze!rDv1zTbqLGXpYdg3}>i4qeKqnJTGvZ{K=4K_Fp`*@VO2GEijuIpDQ)sn zvXv9L2N5>l@QzM)XuwQA_~;yZ!)9Pksk@zRcEBOYs8P9ZDw1@hKYU($yK>ZtSg0Zf zRTz5540EtoQf$hp_VOg|N=M)|2K)0@r@79#mV(f-^exr~%Yp*y0snQTgOq1sqLL#4 zp!e>h{VpT*avO}uHP<|2PmF3jgB#D1gHG%Db}c&mL@|0E*#>KyaxL7{Ig4#}P|UMp zys3Nrk?F~o9O99(&)?(q&>D}H-`N&9`o4mwH!@Om7If9S-oq(ED`&D~U3E{CH@p)hu&zK$9RHKJxA|Vo+>$!|C*}{HrcmHr%p?yd=~-Et=gD({!+;%Rv7)c-y13VSgXu8{ z+7+Meg=yaN`1*0VDTu*I?$uy@Jj+S0H5Lo87lT_4X3goosF zhshxGLeV2?1E1;{UE{SuMz89}-a1c2yEqg&tQk)_9=?0o%~w#M-o-Fuwr@Mig!bT6 z9lgBR#U60XP?xldElSC!H>Hf$9S7$NzT`srV;@FH^s$eu>NC9mvSm}og!1<(sEbbq zb9ZoAP*%jiaeV8Xq8O;qMb>UX=MtWWoB}`|ung0#-+nP`;uN~7JH>`G5PLM&US?B_ z1Z96sPn7zkde_YY_*ii*RO-#cyC-y>U5R}m!4c)_stVNSA+L(X#yKQ9HfxV1cYoBI zE9WXci`g5>^cQ2l=~_NYhY(#zzCIT6SVthnuX=-3-(I(3IwQ4%t1ro8+njYc4hqmY zxMQs?b#ghVbn8%3y(=0?%r5klNY!bBSotoR&VBuR#pyup{(e}_0mZh@1)BV?ucM}U zW@>AI^f+j$X&fp0ji>QaWdZwD3lWZJgZ;BF?%g$}k+g>DyFa+Yx#A`o%b zOJp-TsDr2Rs%NVkWD4%1D-(45uxrcDu5mOG2ddKuo{?F&cKFlTxJPXejc~J_v&+4 zL5L!nRN|sXx%H}iqVM78u8%Z4+fh6{Sjgf>5bEiCSDzv)IF65*!_)A>tl^>sX3Y81 zwdtChy1hKCSF%8=QaOgSNJ`X~ci~(l9XBh;M@#fq@!7&yqzip2Xw$tc2@V_5ddQmM zm_K80hE>POgQvfJl#l1igi9}TY0G1rx?Jv!gz8YihsOqlKYUS@7 zV)w?|h%%KTjb$d;4#3ALr5nwKd|FS-;qWksVTH?KP1KoGLU81;;9a$t?!7el`HP7P zE-#ifZS%_h)XNIw;3Rx2jh3B*shtVl8mY|KD#6aIxH0D)cm1&^ZITYsD&OC+)y9x# zq*0aJ%zooPpSBc|#hcf#|D}I7uY@|6G{>o|H8V4B zMn}e+W=vh@kSZ3Lp z_cQFB0~gOzWmk*-wELz?1R}es_xPnvvG-KI%Xze$PBN zMb|Z2u6KN|-#K(=KcB++-Bgv_&X{Nhm7JA-(^|!O#tUexv#HZ ztu+I(+j~Cn(jp?iHmO;C@*xI>q-7Vw^x?k69b|88DO7au$Dq-^;%k%D-8?D)S@KxO zBq5F|;VAG?f4lkY;xm_)B2U#iwoR zRvVytl~aA}X+nDDlzU$HP^y4j9G_;UtP911d{DM|pBu$ebH&pzF8_=B#o=@4K#36QUX>Y+hGhkkM5L*UD1# zWbd@*e9UgyVF2khe>Pgp;Z{kM=uF*&TL84f&8fEK@$K?o-;w2?cS?cn@W*|>0ulH7U zZ8YZGDxLLQDf_6ERKz-6eRNnwe8J8VD>HS~=JYcY>r!oM{b`L}z1OzQwT2*(A6lI) z-(QNap)TrGmlY7UJSH!o>3})IV&Q_oJJmlGdKEEOh+9VNU5DBw-BReDTjmtfNV&s( zW42v4)uYI@prgfjw(MP(ymG3QZQJQ~^5(o>X?fqGUIzzr7ybU!$^`*zFlrjfO7EAR zN@l#sYesv!v*%}Mu5$T^JU>IfTJVjXQRXocd*TFZOetvY^<06c9(TOiuorx&D)nVX z(I!yvvD5U0j8UwHg!XCCgXo~L) z3vUjtT&SVq-}XFp`q))CZ&a2>RdchWTgIt>;WvsdL@kM!F{p!kg)0n0xRO)&cL_4V z6)<>(PMynq|AE$ErFypTi#h3#1UN;85GHj|kkF^8YDWbFX$fwRi?w{%=s9JNIocQ( zyuymngsX_Vd6=Ya-^ueR+NxbFtg?fzeQhWZk2tp?Ybf1nU-ee&I;@=YGO#9-*v&L5 zC#>Bc|DtpLzL0vYeR1oW)u{`Z_9044&$ogJb>)fraEzkv8&KZ4g|1ZFQ1tSc1MEOw z>o?Qgj@Z*}<=+1dk&=Hvq>rua6l8W(-=hft)lg`kY%^4dk5*K7BpT^!+pBv{#}xX8 zW+*03{h*qMR=VP`BvLTPVHBJ&YkxoK#F13mBT}4q*nc+id&E zqzDVr)B!J2h$XY zIW?+@b*M_#AME5lw6EmX*Tg=!+ zG35J`0gEMy8!V+U&C%>uB%)Tk`aITO&)bsixxsEsE{juc(_+Bb+N`DkY6zaw$7lqf8 zhUy=NfXq_XdI2t*2+x^#$;m<`g$E5KO8P2gU5aBcuJWgW`Zc_;LwrHVz^jbCJvq$T zbWb~3+;~QiY&|cuYUwHmWmbi|zWEZRBGp2M$8jUA{dAX3YD`Ib1^N5hI`W{de&H=~ z@eOA6;ktum!r+6KOt@*j1vv70${cuawsL-vlf?`ypTlkEJ_A-2`NXUv$8vQqOGrbC zqro|MNDz<#=Ia6||LB@-oDLr|(CH_7y07$;N>0@5$81esJSBZ3&j1kgDr<7+I4t-7 z@rD>WEhy?$q^SZ_=MSf+tHMRay!|+3-!NKmFA`5lPX=tt>y4I|G{8d_vsHW+Mpo8G z!gbJv_=qi)NIL1B8g?%h+#mmPo-}xXW6)l~ScBJsU9_{gYY_-xYI{tLmbJ$Wtlo5kKpV zJUy8gJkS!R!)cd(@owRM>7%C^m06n(9G9#ycOs2{W$)|lL7OXW3m;q!^Qtgt^Y_JZ z`89p2-tk7#yrE$QR{pJ2++tPW=Ha45*Zu-?Zc|QZV9-75GGwNH4894|D;x#b5-X9= z^T6%vfylH;0W88HdnGsTYUe5S7j2;~>t%~**85MSg(`x(`7(H-P z7n)4S*eXeL4=)C)iT7g8Lpem*?}3}3GY2BIjFc7gw;&}oes$$Hcu;S>bUy_J{zprN z=d8brFZRc^a6CjRBbNGXZ+=yZD3HUD-w={pcm)aAC=FtOE@^Kgm~9t&b!+{zXKT?G&r+OCBzdRMR@eITg&qPD zX%aejzV@lAPTHiiO{chTuT}I#Exoe_`Bj`FPp#qRNaJoTI(=QC`y5wLf+!Dg6_C?% zS?7G7JX4tx%@%RG3Hju?SGtp<2|8MJ}`oA1+a3TTB!%YEs9#KpV^oXoxX*6)T=Ln~T~KS2I6c zv*m8@Hl5_o+0c0;q@mOy?sUBAQaV$i7E^T5C@xfR#Ua5Zb*9F}vR2b<(X3Y{a7!0> zHotlInI16$P#^)t#Erod9*_rm4$|;53=L8=ikz3^_2^`_S9Pmb{2%h(JF3aGX&1L) z0R@#NA|Ohy3P>+X6_5_0grd@-i&V@8u%7pP6fBuDNFJ=pOUY0NeZbhOM@e2)lU0SZ34*ybT4W z+(gRm0FUju18e_jHQtdCp(1pCJ0Gj58g(Nf`c)|H1N#r|o81d5-SKMJD~?4+?H;g0 zVaqk`Gb33t!H*pMN$Fpqz;w&uGRHxS4P3ynadHJmnXKAq8bvMjJMz}-mTLJ#dqH~H zmpo#G^`}N^SJm$V~pH6 zM$|}*F1@u-7x32KGQUzT@pOZ>n#_ICDx*V3cTnY-k#|Ew76hBxmdlVmKXi-?_T>z) zJzpUaS$BV0z@bpl0aNEJ;lMq7Ynh50i?MT@JZL$ zBWrx|<*`3(JyWbB!KMHNip97LJkkw=5z9DqxnD$VJ^P)YSe?H&5@{ZoyvQ~8s(AW` z3~8`JRecpp1kQa&qO5@`F+Q;nQ#yXgSyVj`Dq7pA6bb5H>G5Vg(YOqJ{brG?#Ip6) zENORPxPFDfw|=NQS50<+HuHJf6=EFRVF2WDo9iRCR+kLj(4csf5!6J798N3ue(LqI zJa>{#?iuHUikn(IR-94ffvMKo7BloojwpRdKOm`K_9bwiftxXzXEauxD30mf?DISY zB=uvM)QH^^-o{dfWj+}ye^OW}E@jes?UsDkTQ3uiD?-F3n1tfviEk0@K&$8Fc*#I= zma1nvqIIygUl-gD8O}Dv%_Iva+2gNSOtV{7D?G~S=QBNMh92$Xy9RVFGQ4`{u@$(k z(0WcWIKXE#ed>O==(zqmAg~Ck7q_|=AE7~*!WcB{t+A#is7W66L6$@g_Tt*TqgWC# zhj{SZ7U7{~e6s8O9pCl}i1pO81$d^WVR;}&kq5C7e9*PNgmThfHEG^{MBkO^BxN^q z(co}Sa&J?s7kRk7qS=JNCMx4y>Gf6pzcKi12JL;X(^|zGr)f=e$0mFXQR6smV3fIW zT&;Li`aFy8{-A3<$PU~Ms782f+`qdQ`~ohNnUA}npVvn`>$n0))%lZ=MOfn8lGc(L zGvBQl-PvyuQVy$~`2(fc=@zp19jVj`kkc&6rK;}dtb($AbD)moafX-c()?6AIoVee)POnaYFcf@)BAe z+~3lGiIGP6UAiOOCobTUKw@(x|LRCKjI0cybISm;=Wz{r-8JRr(X6dWd7gqAD6K!qu6=M*+fLCMNtkIfGka6!R;{;)sOrX2T)yL$;VJPW9gY6+H|ygQ&!)-x zNbW$>3Y`?Q?MBl+C8fsD5Qm0j+sy*4U8lnVR{CVkE{=8TRNtQkf%(Qg(jwfSzCHQZ zb{{kH%ka4yN~*4*`Ag@CvKQC6xg>rUJr~TqC#+wpagJ5s+eyk&+bLGT&>*srHp9K@ zB}=lASQ1(P?|@vxdI1Tf6_1MfgaDPcr1!f8sfjO|9-Jb(Qw(v=ES-mzW-kf9?uS3R z&9spmoh&%?TtdtN+vr@uP!y(-6*LWqn}Z08s1jmT%{$$Ka*hCF+PFD}CJaz?Mjetv zY%(`aN|pw(;1DFgwH2#f-Qf!QV(Tk|it$nxg^da$_tYj)9VY&xrJ~Dm=2!KbUxkJg zp&l1k#CC;l;|o z)|T)fjNAD?(*@9K)++`lkh6|k()+%AS8=o(tEC$8HRxDSzP<%D4@Z=h2Y_Nj3a&Jp z)X?puUc0fQ#F9bb+Fi;?Be!qWpL3qW{Ifu2)xCwNQKj(!0ZiJ#hmkxDu;9^&yl5`P zo$Hz(dJejJ>lj2lcF{V2%6o@lx&Xd^qZc-2kqCWRF87g2jH}AbTRFWBCk}y4h;%C^`Fp8^9I^`5IR z2c)RqLd;s7s%v3Nk<>{e(fV|+83vFLxKx0Im}S?E+01SYpv)T$;33i9QmWc~Z!=Kl z-uBiE-mlpI$|M-srf;R2;1$>8n<5#^yYV%Ihkq`dW{#(UvY@DL)&pH(kz8t?Aos|G zzUgn#`C|nCVz|M zno>a-DBmgruKV+fxSb>B)c0N<^6^6~y;SIYCE$@eYVJR-(mQ67M^yw%5i4^t+@C4@ z%Nu=wG(~ZZmEQXcSJ`sU3X)}KXL#p1EXi!FWvqE^F7rl4`<75&e5$YQ$e~5ROkCHR zwhS*^^f{a|j3s})5+E-PE4@s+?lKT4x#A@$E*Zl7jfS=EbOW znM6TEyAf<`jv_UfLwIkGhKzTqCZJ;5`CEkx)ds;sxTrt+M6!4XMtDm7>?HU`WioDP z+&CIAFu(2gL)~(ZwNr6*oKKk-A~;%Hl{{+HGwQqn>1-R#nV6ol5l?pSoWk3rSP}&F zzzhgY3wQ-DXK7~kj%n*5Y#$ZM6?b(A_Muy8yE2NhNe84qtiQgyT-Fs9eP%jAx?b_; z)9!;-PF7)MwuILq=zb8yLO#_cZ`dv4+9b*`IlDi^egBTeT}Fx|GjFx?Bjkm(CFKYk zCKHV2q~4$%^xjg0DR1S_m^E-q?{Q%hYbW6sVd`7N9gCG}bRk_ZY6=+kI4S#yg=B|g zlA0f7rGmmlKi|BlF}gtzWbuv!x1c}F3OO_Ijd<`{g|slyA9>zB2Cd^lGA+vrnYRuf zv2!fxlS6#XI>At(>uLKHsbeYA&EEoVK^X49+(%aCX)s1Pv2UUR<;vS4%fhl5The;GVkt~y(7)VRKjx)5`&>9^kUSk5YW+%q|QsF1#V17=!~YX ze=NVu#NKIT%DJ8j8f=&`-&1L8tK>RP4hDCxZSX}9*|*q@*@@i`Xa`_|ab#F&-Ozy} zdtw@el5oXabc#-yx${mFwg+$qhDsE-rL-hc%*NQEpM9{Anafd3miy#tRSmDIsZG=Y z$5f0=cIGZ>$K}1z%K4LJ%hfro$6krau5r=S99BlpMr6nyNtd_jkER!=dL+Kl?v&MK?2^ywy+k>Jl*Puqid1A-ZG-sFv?+pLx!d&4%i#a3BL)jc*;@1#g= zzvOovWhU)j0G)OCM$F~NZM)PFV|&QXN^*Ni0N39CEz!0#tqMPDGqzPO4zJ4eO&Iwf#v9Fqma{~7- zreFdrwb+H(4Mp@x-M5GJXRGwP&>k9)kI~lCnN0RmTe>35T(Gn^QdV0pQzR8AqKkyP zhqxEL>hgAAqc1#rzjDM0LB~f6N_&Xmf{I`Tt97-)hs3H!XrCW`2O=}UY;-d|_f@MI zpiM5^W}|=(QaZ+{L23mUUen0{It_Q}S8L{CGS&Dv-Kl#P2Yz@r*B-1wQ@qx#;+E%9 zlkG|iY<7u3Yf$Fn+8lY@F*n+Kr9HkY!4iV=t*)R-7k_UUNDAqoj2l2qM-(mysbT5% zcCZ0%i81}CU^7|FOf-r1Tw&R`MW^+M2WWUb1-%%(moXRVqmblPUA0}EIEoO*>qK5P zYTH2E@)m2uOvW~?=Mq}E&AET^05h3Os>?WoO(9A*1IgHXJH7P_17Fj)%vvZ<<92F# z%$}B_`&AlLj@R2;V2yh+{UMP$ignrSVs8vwXU{YVJX8ya!{x!Yt(1EyzWWQ79rT{7OVd{+-F%&f()>pq zMp91(Ipk9eQQSHNY))ipB-J}#-lFyqK$W6z{Y=xWQ>&0;K~2tk%uO$?)o312VqS_- zj#!sm8m=*yE?pl`lxx(WP|a}_;lvZS!Q2UR3mLn7E(IP>%wL|&L8g?NMwG6sLTRgy zeGWSwDhxulZu!PK{XHIa#_!$l&S>w0cPgGlMA=z0Mh>rj4>+^jRjya0dRyMAz!MSV z5Pv^T-?3`N^*{v^srGs)YitEJ2J5VR#r8K)s3Y?Se zdvaH3fRguuqv>c`Af2fhf&Ka=d@JOICb7WOV@|&cV&`r-s)t=&9{0YX z9-hBQ$`>IiI=08j_wbWtq=1=ZXL&GN?M{x?)fnlH0Mjl(YtP6e&`WxYn1Sg@|J{$r zzujKL9a%*%ly8jjtE4zX7O3#?Y zs@g1b0Nlo!UuZNl#)jIN9@-?+1jsOWw1;&PN`dwpw=>;`xF?{Rjqxrelbq^DK&;k$ zrYj1q{>fo8X4~7xbU-|R;?H#9gHgHCv<_NIUtpu|D3Q4uhgn|Vy%EJqXM&zk)k;Vk zK)oFux~9D^4Vjjkaqm?T;zNxKFcN<~L_#pNd(*vY$vub4yy7~VuMu-CVnNYcF1f#1 zdekjiobgzQX5qIJ-z_QYl*EN%HkGKGF0tUuDZ>)0rZS~`7)qX|00cct1HTVk$f$kWSZlhbfCU@)U1?6#RL?n!U5KnoGnfb%m`BJ z?yL;0>;01$YD+1~@70QZu{p;F?NM6E((cD+FNx;E7o#NhhWx`w=aOMKA*{=|ev2c? zYWW%7wmqdOAtPTnIO)2CmZE-8O3Qu+Cx7%CEweOvCHQ^{+g#@3y5U(!2Mp&6om5ir zU{R~e16a0gmFwxJ890vpgk}?j#;Z$a_A4IMrcbe@jm#93bE?#}{jP^|O-+LM78#7Z zT4v<2`zBZlynE|}tIzL?X24fM;0ULMNq4yeGx1C2~+dg8h zVr4QgCYBy&;ykKH#43R?nzI9DmvBx>x5l0Ark#{-JTl}=vt=D9MJK-apR;PAuxeDPLIgNOTMXb)|>AYfO)wa@$heeR2| z0~y@=mVLi4s&=ZQ`x_Zc5Q0G3Jt{Mv`nx2iDvX}aJo78Um(WZ4waA8LT*Em2XCf51 z+xgXSy?z0E-o)l?l1NrtI9f#19TmHwJ|HIk>$6OUKbh2IA;7#yWW|r|=O;yJ)`Re( zamZ^S7wcO0)X*uXUT*BxZ9Y5t&}D$Fbey%q$$E7fNaYo+G-k(Vwy~&GtpNwtE57NX z>L}np2iTgBr;kiT;x59IZ1)L#u7(PkfS4URS>fZ5Gx=g$EJ5HJ`jXqloT zG@NXv!VSb?qA3)KT(l7ruS(TpVuI#$T$?qn0zM{42jc^F6C`h#09^STxEtjW2|G}s zE*VNS*b<@)I}t42@m^Nsk+<@qY*G=#1VFm=gnJDIYPtM%^huU(REJn8K>dpyJ+ zhVidaUYsOWHQ<8RC5(0&!5+*dJ9!Y+Jm!u-Lw^U}8_)9n2}Fts2y^% zT=)sNNQ&RoAcH&iVC|_`C-_L3U6{_LJxH}h*TLh(cGSrruxjXdfj;l26RnD81<0OQ zV?{bHpkOun9!?l2nE6-{s>C{hfwpmFl?^1O^vCY|Yz|eLqfuMJVUQNbd68{TpTJ~~ zAz-qz_0Cdo+vJ^p?alu3>Cd`owg0aiy_^?@CN&y$G8l8lj3I4XnB!jERiL&2MkN=< zvN2e`$8&2>JdHMXyjFj2ad06&S4|Du-yJyPdNRulbgw^E3>uWB#GNGX>Nt+?Kr=T1 z7!KyH)>#!gktQjo75E7ve4^h3jRsIBnIczfAhngdthbAAyX+5Vxh%zId1D1F;>~?Q zyL%JB78ZXpiOF^{vJxd$&Gx5`w;y9)^@3K;>vFmXJl73sQ&KYo2~-s!ERiM1Gz|xdqNvzipYe(P`xB09I^$yNeJ~dyp>(W^PGDZ|`o>sK#r% zSS}54FJ2q5CXq$ljP0$xq8r8w$(`fz@FhGkGvWb~0$8Mgw&Xr%T`xsd-+(RVTcI82 z6L?9|8?A9cX}*&l#>Y2_>)wD~nR!4Z)%&Q*^SHt#(NKdZ`vX2dM7@y#b8TSTR%pzv z;$(W7JXKSor-3xK^=qP`VZDgKHBPyHnh2>+Y%n$1KPhzV*AXe^?B)BbG5|M3 z0fnH`W4<{Cl1dC+rEz{Orn+B-JMtd`QFYh4iP4L$_#8?&?mj3ROhGF=89cbA<0`Y+ zD?NDDou==+<}8!!K9i{S*zIbv+!n?yzj6sg!QB`w^&Nx|y96*?n>O-gxXeT#LQ-(7 z#v^$~KqJ4y2`KgP(l^Yh1Cp~0C!iYzlHFtE zP{*ikjid|{@4-!4`^PPdS*PNmc=0}@Z>e2&T|*-+@)aX^-e2KR1w7Y3C=Fj z4(X74Y}9r%OAGBRAy0Bq+5$volhNFflTr=HkKJFdj`1eUrR`TKI)s@hFD*44E{bc9 zN=G2r?9vt+%6D{s?((R_KRzDEmhpjwUS~c8mmG1 z6vwQ1pfS^_+;flVSl`!AEAmmjQfn};5MH?G*)dk2&~UEJccMOb_VW`LuuU?*M>IBT z6vIpP@?N&wZG(c3)8#6u6uL9F^Cz?weL3)nUwD<}T8)}ZKWrsO#-hSiZ?lU9V=% z@%K{s8%mc*L)e6^D!RkPww%E=?6xr5OBvLT$we=$2~!Ra9wIjQPi-Q{yB9*RMf-BJgxQ17a+LnUjCs?)gb z)}zN_IUU?ZcW_;lu_oho!;UHva*gve`keE1ZS?X{Y?|WT18?fA9N9%&Fvq5*6Mo6~zD~rbU4p&x8aF7g@#>q#!JBY%Rd+Mxrh+EW!>(-%a;})OPhv%ORGLwU!!K?kTQlHcG&f( zfUZmfh7PP0C<5ny^iDd5Sjngong!;8Wwse9c`>0p$f0yq#T+~cp&TOJcchZA`C8<#-6?DZm|e+) z!qp+4O%zTB=Q1Grkx50b^i)e8LZuqW%5qAT6CC||7#nW04>dnBfzmm>9Se7F;}+y) zCtsl50ahNk0BU;L&p_D{0aSZa{1BWEr_(~i94m)te$kL;M0LXxYm+R!ka7~P;@7gm zy7@<0i@n!`>}Kku;N{tJf*GMiG?d2xsKx_)xZM~t-Vw+!m7g|gBs8I?P=Az%h8 zs7gF%AiblyzMKc^C#k<15fehvf7aAAhX2w zPR~b`_gJaKR)&VlZA$`%y*5;qzw}}X`jdq^DjlYj&)b%EIyXUt`VyZF27iGd$8s}Y zXNgBY$`KqG>Ag8{ZxC2rv%Id7za?Ow9@HpihpE4oQ}x>EDA#$S7+@LFGyQVCFOhOU z#L{d=MqVK*1|INY2ziP21CL^q;mUjWs8er27TwTK37bs}p+SW>r$KIuT9?6aRgy!} zQQ!CRqWFibGQ?Hw4!O5up_nD)P7j(fTl~{Ygs)ZcgO!smi4SK7wFJYYcMI_# z&R9BFhHq{xOT{LiP+Wt+8JB~*#Gx#D@RH-hU8w_}j-^)1gf~%AFS7h*3&mU=3@ai9 zBrmVbWv}Nd$Kglw=C2Ka2fgeN7uypaVM8mn{G$1RnX5Jlpam~7@g=P@VjM^=Tpyj# z^B`j@xeZGjh~@q!YloccYa>V3h4BirubAfVpMhl_OXxt6g}3We1nKFFMG43aHf5#T zpLO>~fCT^sc2xm;i(MwbUKsE3X9Bgg6)QDKG2=2+32vbkQBp;zu3?`6hBV3DtHt1HEKqYov~T* z%9G&tbOq91QezETIjnpvuy{|yRCSl1z-A+p(n$(v)0WqKB@9@dhiC9+>b6Tvrb6O- zUt{(uK9T^L;?^k!-;(9(@sWY^YLDerzFU-0ie-@wyyF;M_BGc@g#BFs&WjF9eQ4Pt z{h6hbSNPcWeu$al$$)3MUxGT`HYOfkkY2et3ZX9R2O*wu+CZk8jt}Q4AX_VPt=Vln zWuZEybxpur3`u3>p6~j$7Qr>`H&Rb#dB9*cB6?UI1=ZFk*1qEvR&mVCBK>2Y%iX=r z#DRt%eL}llY_~U&Q;t%;i^iJM@RcfMRNK&^$D-?S35c4=5ZpI1H79S&nTXL{`G z=hIV`Hc91S%lrqncX36EN-TLBFhKgd5qU@-*PFRF0c^JjgShLv)f0t9f77YcIkqj? z%R;Jc!OPAhWlm7!B_jm7$`Y(46I$Wv@k84(3> z+hh3=G(J5gvGVRav4>eJwoPMRu&>1#?UJsB9B(%$yUKOO>khUb0V{OIxlp5gv&vvg zRa7u(+>qQQ4UC`_RY~3k^{KTL8AXRopn8~KvdC3y6@+2HmeR`voMh*#3o28 zFGzn{*~f;?<`{6nt+Bzc7Gb>kg>fYMYH}}rk^DWsGb{f`FZM`=BFQ@WJk8HQj#p{Z zX0oVyJg`o|<0lCbfQS!eGCRr`2(?Pa3E+s*=fA82Pv*GI*C~lG8s&Qt;D-t zFZns3vs!adw@^0ty63eoVR+uKoflXN@-zspsk`vyg$5C8gW7q&Kzi45cl?$Rc}9NF z;7?ZiC6LzfO<)gQ0!A(|98UQI;G<*9Dx*Gqk~bs(HyN)6S>#o zy}8z8f$@`3@_()9wZQp5IG9$stQ49hQr4FgFyl86N9K z1z9FLlvbG$V(eb>1CmCt-zIKn!oy!FGDhG^HbHzc=LK^9 z-;rD|@A;xla8G7*2APDZ( zv%|10Q@u*T&lJ_HXw%wws_r^eR1$1Hd-pA5D7SVg@@4Ar0Xcuw6Wg&OY1hLDaiD$BTYK;DQw@*|D71b&kJ$POjA0B$CQ)QnNMv*6G#YUqBpTc* z#hsN=!WK17WkK>+EAgUEV=h8*i!vbOD-`ZY4Txa*C(h>7;eNA+AQZIW(pAsa_`B85}L| zpt7%0I-4-9Ht4!g5(s{rW6CYvjok9UNsH)zzwO;Q^j)-Ig`1;J@M?1w<+ehO)yF6FnxT&e+Dt^n(^<;ia?@p)E z!+SXs(RiT`OkTP)9Whdh>EH=hZ6a3j_kIR?7wbWiw>AaNg(P zUS9L&wEaO(`usM{c?#g#UCsw=Z}GI9d9ZiRwH0N^i>6;oBMQZ88lleN4Bob8FPHeQ zieu_#W4M^wGzGPO5@yILcxIH#0xHrtuW{=#$xXcpciY`4 z5A?;W>%-@4uIg|k=M?0PrlZD7SLGxk#O&v#>s!7<_SSVupZ|QhCyQViQLN=>^L!D* z2BMO&)5Nk(r@tYev`CyI8{xUSqDfiM>f!U1r1krL?-kB-jl9n)`*RHRJ)AMj3lr?f zbg!4w{38?<`eD|DdLhr229+2W6sOGL720V&pLo+zf?3M9hsmqV)|H>0-P+px^qoVO z+-I22`~tS7PaSl#m~!+IgAdaSVpTU%&RIMBI5?KqBo}qzOGP*yd%4})lPJnf{oDm@O-*W2Ez;!ZE z1oHMbfX&OO@YR)u8a2OGIRS@awjwZ`Q|R zoi_^C+H}M^%xSL2_$kT^b?fZ+aD!_do@HNK*dmD~Gf5IP(oa25=j9MyE_QTmFHulN z-0r^+oHDN_r(ypF!?XC9P4oFp@>tE~qw^FcZ86-OmPs1A)j9k0hsh6udQ?j=-Oe#a z>1@=Wc3*-qxxE&7)vKmD7ROwaob3~>qfMoft^abRDh5?KMh>gLzh1uG3M3uAC z2p73Cj0|!r}j@;7v+7q=roa27(S$eEu;bNTg z(u|pv(CYA=smcg`&Q zp1%I`9NAnjTyU>kXT=h7|91>pnik~h^!n9(anCURrWE1VxAKa+&nU7vK8a&3xSv@1{@hXjQVBQou*h9VI^kWNz$Iww|?yDvA=$y z{5940P%d0-e?GeI(baLgp-TOOtvM6lJjAe4zG5lgo0bGaa%!A%sX%=-C311p2vTLA zS^7GlzDt{E;F+X))YIod_{qo^#3!z!=_tf{%n0$>*82)~d|oN7p5@dN@+w#KA?&dt z?z~4>J1QF1C;*zF+O2Sqalp-6NW>a8nAN&^s_({S+F;NndOgE!U2w6v(sjER{Y6jw zJ3l>c`Yg8Ya0M>xxtliga!|t!w>0jir50L$do;}&t;e=%uJsF&P9RK2aT^59vOOvZ zfs!hn!{$FuyELFMt^@v=Zuo7xC}5agdi=>)sZ}qFL{2z2@s4I|S?>F#N*M?1Z|_v{ z#wt?Z#zYjZ^VAtp;|1c(A8TLK7>{e)G*7p68GiS+baZ%bTIF7am3P=^L9|d*P>ASt zAFqPeA_Q1U2b8%Tm%#!0fz4te!Fo#Bi8UN}3Hrz|-T_^^s}_DM3zgqtCazBZ(^K9P zKWE}IfREq{ymJ$6+-BiP4hFsO#Pz_Hv!kJ!Gx)sZMC;ubT~)#07oiBVbi=ErQ?-}! zUDdzJ)AMN5RDuY8Hx)T1^HNlpmy0bw|ESccc!9JX&Ko&kz5t5XQoK(A7lkck`QPT- zm9$bnZZCe`TOC+;l|n}9VA6>_Ymq2gx#|IF94NiV?N;kg-~2h{7;&~-M}^z<^FFfC zg67c=3}?{CFOBUr-k}TPGHO%2+d0= zxMA^nKat3{vsjxxkmvZ6TY|xjYa}8RH}9Uk+3a%e39)JwJ+v|6#DT7!je|U8`>3Gd z_FeFi-WKV?SXsW|_~cD;I(pNa@Ar?YapSs;;hx3^l1?MvGpV9p%KLXJ0nfbP*W3v*-e1>(Rn$a0#(?O+B8Yqxo%$I$&c{f+$1fjbY- zlH0={1IW`Tyy)Dz{MYAy)$CrS&D*Y);)k}SQq)Y|XL@{&>BGZw+V-uttS`!*V^Y6- z!4SzPb+GBQ;0Pwm)!bvMN|QA7LX2UhFaRSUB#qQ83JN-f?yn^Ac>oYe2zTXk$Q53dQe zQCp**G|P~YMmu#DntU9REiQbKagQU0TU7a$y@i)mtBl zqyeHhe8^O6+B8B$DO?lMlMEZy7liKX85@_&vR58|Cco4`U?js8Abdxcw-vbs})(<6%EK?F0oL z^IYFum`jB__KUW*Zl=C$B)VS2lPuP}1u5(ZVB;eNz7&?xk0jBE$7Zmwm)+k*<0*cm zV!GvqJGO6ugdlV7#Abg#*AriJN|5OueOG-wbqP>L-^#IBJ@vmzRffa+eugZ+07|mO z)Hn`)+HANTg+GtM2epYm{1;>Ra7{p|gDRetw#@9x>k^ zrJR1zLUQ1o3Nef)_;NS&^^)N9($aXe4~#TgCx74_5odO?0eFRO$YgsAmZ}b`iBI|x zoy$Kx?s=(&6^y(f%A=Y2UX!9y-fnqFm6z?Zb2IJ9*Z%P0T$xD%>mUjrec2^D*)`G* zHO8bDEys8kK%B34e{~4qlqwRk*&AAG8e%VJED+7L(AA=;izN+i{p?jcF~i@_P|B6c zo(=mMHKe{)Jv$b9S8=zHFM-oHQZ^2y_#DmjYlhmSRkp%>Oo0KSek{}qtRrDDZ@qJm z@_Dx+V!BW#KjC?W7tiJPt|Gk(16Q_E|2z4h6rdv|d`j zu$BMm(vks32=jrwPb}#P0^ZfVTxloMnS5MYU{ECw(MM^Z6k$VY?~y#joEgOrw6nKc zMq@jFwlxWM4LSY-ln}kx3wGKArKCf?QvH|Minw0UF~)G{q{F!aYZfT4e0VK(E{Aot zM!>{pZ;zVYW5`e*-9qQ|Kxc>6`>47OJ#flmcoBc-R05nrh^)%MJ-X`=M~ z7aX0rk}3T=zGy1?w^;%v5oy7=JHwGtwVr)F@+t4Xp!;tO|BoNI&J}Ko8}6CPadTz^ zSWLVs=7ViGF3*g^Rq4@HtjYd#b6eI!hk#i{H(&EX>VD)HU zi2N&x{>L})cXIsnF6Hl+5v6q7P*R47HBNw}p7*py5BMPK@lSvA^Bbpei-FTxUj+U} z3skH?)_3|cHb3<;|G$s_e|tZVIsTw{j6kJVW6Yag6waH~#Oo+#xpb*hbNTgovj_>- z!EUY|jPEhV{~lon=Z7K|zd1c%D5N0%s?%Y4e-g`t5e3dDh z5seW;Sj=*XXNnvPIT@QU!mt7~%d5xao9~Of16zLBQ+!0T4=|^;eC$c5xq~9(6|cAd z)I2W!HsYWgThb1T*%O=H3bf{v)6_a{a+@G0L{Y7h>!?GM*H4*xtNHu)r<+oJBI#mo z_Bk}sb5lm--4(;vL@gu18pE(u?{ZHL;hAq#$APClu**I{{;!bppFQcX|LgGE?$wei zso#O6h_W%qM_@^9B~`H0;j?g&eKwKj$A`oh4jbZ3TB0&i`UfL~2@k41w}=ppPMo7m z@+y(V%>R$L$yD~lh2s}#Es~221F5KSAKk2uPl%#Eu?>M-=dTU}W4>y+6Pvst(VeH3 zml=Io638|G`Vs%F57*;=qnd2E8>5JG+C*Qb!~I&lS*$&;`yi8 ztI0T_)b%Y!DmSx!hM!sx!r{4Ght@7|Gy7`Y3&;zY^9O4R{n=f=961iXnAzOp#T;=! zYu1pGz8iShbO>)<-yB>yMw?hwVcv-#QvH%>*fc_78n~2dg39Fz2YX88363j1r11*< zr-pX#kD=YdOFMyTWEDknWUo{e?8#jrVr65I*!*>$qy3(e{sNm-UvUcQaT_lt@?%Dd z%I!F+IAboj5?$t4_Zuxu!=I$5Xa2S+7WPk1ric0W$t+~Cj?Hn>dlyhhO`H({vp=L{ z*Oc|7&Yaln0ECCr)Y&#dCSGegb)Q41d1H6IJBPR6ZYAe8p&Y`@#hI>?rxsSB{fqsd zfz4gNoIh}9T|?3~_WH=1JXocLB57IpWUVnDq|w$pS8+-nNV>k^u|ZD2A5;II`eSsI zNyH%q_kE2{YAj(Ts@jvJ>7P88@BTiyv)%1O<{ZN2S6!^nR-GCGH>FqbKQ#o7KZbC7 z^{b--QTEq+OU)*y0VzCE4*IXa_1}wc=l(#(V*!5Gcw$u=nPh{<|1)r`QJ-7fEV@2; z65b**%(`j+*_qw-JM%m6l3(u#HOhLU)u^6L_2Cp6`!wsb|LN~czxlgh%vs~%f@#}x zaFvD=PBWJ&HvP?a|MV5S{*68tWcf|wfHde~UoO$~|A<}x|HahyN@qLFizIfU9cNmf ze!M5=T^z;uGBKy76xo4q~)-1JlCo)|wgoR6(k;4Kl>a;1(JMVE8Hk#fU!ZZ#& zg^IP8YJ>l+K>Ke|+2K#NH@b^k|Ohxz?aaB-Mg9g(0+G9-6dPCYphhNhMb*gyTI^uLi~D#*Ce7s&sonx}|-^6ZIkaP3P4*0dfvzXBkx0v zh;>kw?qKc&gqj@I=jr~Lfqr-$_#4sO(&N@rfs#{r-RlXT;nh*blnF3F2OYB^W?6(=9s5IgWeKCzj3OwKki{crLJbnj4IiKHA+-!+`g1-?m~_qKLZL5Ynb9hNqYh z7eYeMVGg$wxlBq7B#(AYhY4#`?A1$!nkzp_9d};@E)6 zv1;OkBeM@*Mg|93fP|9?y;2>bjd0YwC__qW-P%X9{>5J=vz-wRH^HSiHrK=b>7WWQ zLu7_y7$Xb!K!Ljvh8v3U67vXnro?!C_h%r(%q-ZWd32Ar&1B${dW4-&Tfc4v+uQ34 z=7l$#D;&9XMNV#^EOqR^O;-!F2wKx-T4$}bX^QT!POK-kZAoerHt{xWFojy9c--^r{Se4)8EUyVK;Ark_2o%oD&;TH{hzyl7R{%uLVvV%zy% zAXh%{!9m zW@3fr&0O+jtZLVq>Y5#+N~f5@3OXC3b$lo;mTL{!16Ji9+RdPmO)F^S!+p!v8E&4B zz@kbPoW#+P&lRt&{z1OQg@=3|?YmyrU}6n=X+KJgY^ruANrdBV{59raW8Rpn3~SMK z)z4STy_Dee*)#|7EqvUjK`$Qzu!z0tXIt(9^h9kqF$mB~a_?@sthx+CoylUVi~OXJ z8j|kZJoRT^b%4+^Tl=c7A0bS2uP?_%KAq0BVWtB4tH>SD@H@*L8B6PkCR?XUScP_ zznx7{GhWExaM}#8{RX6oNZjMdU#DS+Dag(b>p+pj0=t?9k5y|38k ztM&9vENr5T$6`NJKjNrdQ;_}$n_WYr{W3%oqJi}}npH`UB-N?_jY7L;xR7E2Tg2y> zuF}xhiSx8Mo6t9CEpw(!S?>BQmW#fq#xaAJ*YP(};aSZIN1|+u8orP_KMxrL3tBQS zaJnRz0hqR433?qoy?W0$+R*);cr(7_ISvmFOF#$5koeBq^B!2_o4LEpcyI#SPe8B$ z4Lq;R(m=3>rab!DKn6-HNU#d}9TUWe9G){nG(m&Y$lq3Kwxwn*3b~xkb5-sMI&s>j zN#(AhGZa6BH9N}fQ1%%CK}ZHa9>~tjIg8xJmQH61Y$T}}=6J@x^QRolm;ILL9&S7% zRoKx8gf&+fH0tDNpGFyZS>y6c=}dQypA%zm8?>ZK6Y`$DXC%qmoP$xw2a=*L=TN-8 z-pyYCEgul=_}|rivG#cR{*r<3fs%KAFE)uc4c5-Qqn*|4R1SCr<3prOj5{@$8eKp% zb+^ZD*Ng!@qp{oHb^Ib}V@k7=Nx9NlfTNWjC{6q-!kV2|cep{-f0xWH@s#hU*J4J1 zTEJ#K`dp(sNQ5kZkR_WNJY-}1A1fXAzd8X}x94%Hx*9X(wW5=Lf1j ztZF5FA1zhZ)p*haK1%xZ+x3$qDH)otvau%9O~B4vRlCaiE=zt`1M47Vc=y9nVZ2*X z`A{#w!WW-M$a9S(N+Js@4M=uLzb^mYpu{Lf!(TF`p*S-O)or&s|L|9Jw{x+$`1maQ zYjbMdn1`4%75da?=fz(?`rmS#0ViuRhld->6e$`xM&KcLsshfd?20CS+V2u+in|C3 zk8;rnlB2gkU(pCwyur=vV}hIR7M%KGwX7uJP3cG7{r+F9>XdL}WXw$xNAkG0zLY^B z?yH*0d>}|L_S^BUW!uB%{H%(~_T%^_>%lmE_mN@#qSP`j76@ag5}vmu20LcV3tf|G zBmYZm@W`Ki8_;m`o{1W=HN5C+-R_a#QmfMpeq!UOw^kL;m={KIiM9U~)wb~>x}#!6 zk|{ac5KTRd`LXhMF>UA8mbQ1ijz7`dbvkB^x2`9z(;RjTsyaU^u?h{8LOfD*TTfU1 z?{CMozV>tG<`B zisz(1frk5HPf%DFU$Vjo#Rpu}$fr`ymMAV#@{uZyEWdx)+7}XQ`d^uMssdoiCDnVv z&VNAp)ij4Iq__Obifl)&5k?1lgBOBk~x2Dob6W zusaD34l&%V;TQ@soD_@;xk1n0g#smdM%6fn0OD@AOcMSOCYeEbw4THC$ap7730z<_ zAY3M?K(VVRbOX(rDtSZH+<;4`vEpv^63it z<}8OEQK6hEkqRUvNGZ_hNynry=ghnr1C3Zr19y%YK|z-hIYN>(Z=;h_*npq2AT_}K z-0S0u)ie`xnHbev=scHdmC!7=lYF>SWKqvgPv+ESo!MGCwp+4P{mDe@@>OTigdv3u zM84lrl2F5%fTN))<1B5lT20-)0S8H9xX*@m-Ta$_LH^FD| zxHh0!E#Sr{vOQn`|BJ3^M)4%_{!m!U}uv67n#Y|)J;dY6N zXfct%!noi|;ZTcj96O0ZMu0(Fm3Ip>ZVV-4(s8L|zHnG*=L|t!z)4+X zV~<&NjhCfY8d?o5R|2Y0Cs)X7CJ*}hA|1{ny#p^-EeV8=uPh5ShrII?6q^tSOp^%| z)gAo;D!6--ag|2aLqh?_8aip*ou8mkeqzG@D3GWvY70x^!V%o79!c8^% zFqTD6CE!fhJ}EOtswA+ER(q>Ycaa0u3IMYaV!81KMYa8qoV!v~ynArjFuTy10Pg`Co;Im?9 zGXUo<)jMS_HP|Vf+G`gC$jJ}m2FoSM3|;pJ4$CeN$Yj$f#e&ESne%+)J!O_x8s>nEOe*AT-s>rIP{P*=BON?!d)j+*&xNx%+49 zsQm61z&2FzdfWii@3yEltVFByo5ua;zQTKbn-S-Q)WY?XQwUcVok`MMStt-oskWCv zlpv{-BAk+3xBpCjRH}>n$l=mz6oBq5Y_5|S2F`V$K4BdqkO|x<^zpp?JnLw>P0oqP zFoJ5QcTAs|OyJN-k3h{!hobR9L1qBs;%)ksn;#%I^ZON1+M0C@LqK;D*4F@9zNk^q zZ4F{xaGcI>lha<@>BRiwxO$`lXD4wAng<#-xLp<^@-PHD&qHGKkCy7J%m0potoKFR zN0B!BfeyD06S~#G$~vWR)UcYkRc{9y4_2nw=A_o?r*4&PTFb%ls{d}sWiQv9S& zJT6lZW)nW4IouVow%|5@e4^;Vi@|iBgIKjhpyF(J)9}E)O5R_|EOI$4c2+?cZK<@> zFRtCnR^LHil{K?=sl6N1_B$)U%ly+6+c_^~?>18E<)B!kFckG}sBn!FUa)&gmpFUM zN6qMl*`{d+QpuK3Y}}Wef0&Nqu~=MVOiT~(&IkrHxLzUMtLLJfHwOy(wf2q90r`@; z28~QAV!!m9u7$@oanH7Y7Qb`gf?f6j!Wt_8@mZJKgMe#Up{?r@`~5^#z}$O*+u7X+ z)oy#jh=gicbB~xekW3Gj>Rh}kHE484LF|T^isC>bRkgQp;hl)kz=t&-cdY^XBKIPq zoh3`Q$OSI*C*d<4_ecWmhb?|7((x^llrYF7@ zn`lD9y7CU#^;Bga+N8qy=(&FIn*TV%>*tuA&hb5j`FMTV%brzJC^_p@NwUo@8jqE_%%`~qhE3_?|ZP)z5yx@ zJHg-(qmy32kM7i4@tN8oJ2vd3T0vli6qPAhe|Ci%o+y5woLEta%J)C{bWj=YAD%iL ze1C-ReTQV97&jL7}wxSUJp z5`*vGwnVz=$7x!Q?aoe0qyhWV5)ru2n(2@b#~BJ^{ENz}7LB8ug~RK%5gX4huqqi*G%3P3K>p)Haqm8`ruwb|&%|G_-_UZ6{6s zUgNveBt7JEyn&;x!>&p@lDyjVDtkhx0A?!E zzV+w&h`Vn>b3eSrcJF#%_#Qw14=ovoN!hJjEFPcHI|6$^4y#i-5EGF631_OFJY>AN5vC>G(>PicU-K9V0*BG>oljR@ z735HqufeNtMwGdPR6$Wl{i0oC-RdA^(NfJeo!<2KYFTlnm!bmJ*|Ea(f9A;mU!cK7 zxM2CUSQ2eniG<*8-HTu+C8BCa+W(nC#Qv8)|B`>PF%1Kkl%ZF)Iyxp1#)W-A+z&_(?BAtKfRliaeT~ELyhKJ;Y2y zj{6Tn~lX35oU7Nu+f%~o%C_GD>? ztc?+3ke<1B?O{r-r$0`Y`xMus!C1uWMB6AdKy($uqxhQjb8ZfhHtN>X{d~z6gZ*$$ zSf55?gn*gPskSM;p&GKcE@gC^`Efu*uj-tL?0WGDIiOyc=C#`-m(pKrdU#_T?A!MV zb*=;Lwg|ZJgM&rXS?-TRJt+!rRV|*ra-uyw1~Ljc78}}oJK6X9#X)j8wBGA7%e9Wz zIF+9hd{@F;92-~#PZH<=fKW4%*yqG_^L&IN zgxTjXpRv)4&tj`kQ$8gXTw)d^&A{f`(5N{G* zKe=A#KhT9lF$ym<#Oz7bI%mvLW-Q%?_Szpd+xvVf5tx>QZj9j&Ye`Uw{-{||@MHdsr+P0h1xLuhmlm?mQTF908tx_c* zMM)%$xp{uwbjT5Pd9DNGg|vQxb7$$;Ctb;^Xm>^gc?A8q( z$~-m=M#^&9t!r?RpWHM)+}{qTPt&6fX|xBMPlrEqX?5u+N?Jv9_wgQmZ5OC@vq|t3 z^dV^OYpQlxeI@Eol2zrhrqSwsSV_j?3yOH@Y&oxTFk5ce*;%ki9bA0=w_%y8;jkGy z@8=avD>g!^o5eyxM*0bM6hWPS3hinFdW{z5ow5>x)Qnf<^J9i%Q5fpDyE`nh8Mi{M z?1!_Jmdg6-OR&{Lh}XAe`&!v_GqI!6uZg6tn_*-(wQ5G>yxTnb5BFFYz1F=91(!WJdR^!q z?67F!&yF#v0=fybuvj`wb1H2Ie}5C~-p^f4goIRq@>V?&A0;09NI{KOZ-{7CoCxAV z)_hb$SSn7TMlq(XUM`!~pR$(VESi=tIBzwBy+GNfMvc=lPZ^!f5rik7rcs!W{4ute z2_!ue4ScV^cYj9fha}d5+;}FiA8<08sP~xn_W$(E*zH9OjNjw;-XxaXPGR*pi<|UF zhu^W` zbAbW5BPZ*;t?KlbSb_|`b{mN(_@^ng%6v?cRfoj<_6h!vP?q<^UG>h}Xz;%H)?a9o zKJ!iRgu7|#vVlX$55(UUPMkG{w31W$DOpV%bH`Y_SC;bNl%mc2`bYlH+99pE@b7K# zPD6=3y~dU8Fhx`njx%8$$*09ENrR*{oDo+>(93tBA?n-d1&QDGw03rlLy>2_1MZf4 zknMnhr7G@%D~f@5ak~$yCrkkg&Tsy}1KP$i+UW8sJOJ8)RW>2}0!Lsrci_$8mluf9 zjAGEi;RS(0L09~yHJgVN1$rtSH}?lNfw>BO<#(khEmk_sfoK|J^_3f+v|V+Xi->Ib zWu`s~Z*RyH>eV6}=ZjRq+m+HC71slGx6W96ciu;M4%@PNQ#~UH0utPFaqwT)M4;wS;N(_Rb~O(-kL^i;#rn|t zhyhiyok&9)Cl6gB$`6yEA|)sL63& zXUZ>}?zvu2FF2E3CChJ2N3V#lyB;=5d+TFj`{~t=>)VHYWN$ z)%BH{se_6@+E@C>5icj__w8hQ0(Z(Rc?s0#h0lxF8{TZTA|%40pV{tiwVaadE0{M$ zdNT#=vRtAD@E5CPt}6`yhtO(lG6>YemYw!xh(JTEQxg%F!ns;*(h;*8>f?gG@JLQl zROR7q4xhzLJalPfL8q*?E1C|2N%wm3_@sNqqU};}AA!5mDKna)UEU>tLr&h4nx^(V zj;+-74SCf8i=Mn~Win{`t6*4QytWfO)S9D&xIIY-~mfD zz&uCwWbtZ(kFmnM@lVZNNTeya5ofDO_{Xg=dB{S?DcGNjnrMf+(>$52L^ZEHo+JE) zZLxSM@o`5gMnNDOV}+G7%q1_cZUi&7qJ=o8u>{m;B;0CBs4cvlKK=;&m*gdjQ#3!y4k$_M zta*_D4sQ-_wXo>D2uS>F*$UHi6zo#bM_&O_U9X9~j#}HAG^}bzU$u#yIH=N@XeMhH z0CC>p(=^f#lsfEY(F7Nj`;v5%YwKI2jc+zI#8Op{;6z^Ypfcsib{r|gU}z9R2iZ(O zgcQ2M*s-!K;sMzlU@;ojb^xlHIPYpSlptIJZqVOwEuiLeY%|6~66f3%& zQcIR@qujX$G6+g;Olg@8$mVRe-bv;m>ybSzu(Y&1!L^W5xJ5rvK?B(p&6Elf0HWG@ znM6=##tBgH0S^@3Lj{Jq%*Dn}bf`gFrSx6>Z?k;rQGK>?(^_qOJcl55BXGUW^OPMG7fh#CN2ARhDSl!Vfv(b?LpDzP`zhbW#(J z+l&9y(s;Pe42*5FKvP`%*A#8{EL~b6_nu_-P82XE>@Oq^f=Y&@2qa$;;N9rTW8h+r z07uYMZq?y=jzf)k;cznc*AcnS=Z4SIoL4nVA_? zWE2fD{_rBEbgm<(@cO=?Z@6FY^Wool&1R2`%*oT#H7Az*!6gWZ=l`s{B?9B7hB8`U z<6Wa7RwqYXK%+xIcEh1-k~eAL#wYKOv2JHdCj$HN?n+GrsrI5=`m0Cq?&|oDd*&HL zBcKnCPp#<>rq|}Ox-qRQaV?=&Dm;kkHpC;xMpb*%qT3cUPr@>RT35 z2nBRO;&vOhs+o89?5rE9F4*E7Q5TIfGP(B`JRPppWNg-p-zd!UoLStgh}109t6QOKWZl6job zZ~isJL=WVFb)v+BA&aaO&Vad zIXG_)*<=+m4;TXcGgaZGK7)3xUmcA4M%4|8N08 zh^QYKL2^ZYcEHKM)GS5JzbVLJZ>C({MYU$%=GWUS(=PDuH7Y}zY_$%3{4;)#8IfB7 zch|%!4N~Ah;{W0T>jU@Bp850ht#;c(r=s9Pb`rIH7l&$Kofv(tal>3tD5_?cs6S&T ziP4>b7h{{(VF9tqo`YbCs#^7x{9MNh%%xYgXW06lKm5S9{0^S@P#PVFGh(2^Rr%hr zvxUWe^jprz%BKup47n2}?R@u-4}beJ1NX(tF#?%iGs%9~SWsJtcebZr*?D* zeWq&XlfIEtUGX&oQOs2)$$UPkZY_}P`hv{Vv(sqqPoQN4oV%1KG!-U31UTxU!OC=0dr>LTkyZPe$V%T<)~?)sltYiF{Q(=s zKHCn8d6NQf^}U}Lj-X(J&&dUM<%66bqBDp7?q|1d_G6+SFT%xJ?LjZcf5R;p*wItp zPA5DjY*RuwsG3FWwAdFhC!eTy<`YDL>uX=f3`a$mnR?50wRwf$TVIhgXZV8Sz+I}_ zn#^GWZOx^1Z+wmO*QLp8&`&o4bP$VTs?%%OqUnx_^{{*2$mN=~e0Mum8< zEmt?KkF1D=b0Aw0zYCf!`}mow=uLt*52!vu)kpsT0+Grg>oCUHX^D~w#+05VY;aVw zy#AYu7$@^hz#WQ9Dn;R@ztC0&Or&39VMe;`HD$ErBkE+iUGv4dx@%1XEzjaG)(dZZ zeTROh<%vBas?J@DsAa{z$@uMp8Et;3iyr`b?BTH3p+1Oe)M#Uz9J20tgT!ZB+*q#} zeoNz}FL_fm9{@|yHAxw*9-(rG_=W>6mh6(>>TkyLMtLKSaX9xQE`l}~M}>2Xrr6o% zBB+JD&pidvBZQCEtg6ec67P>0BoY^E5je-D_xY2Xp`qCY{sO$+f?fTEX{A9YH&o@> zaHsGUN#lJ3geHUXVf@3L(R{}`Vg~^UR?Q&Cbz1|SmERuSeRHN-OU!^a1{_lrj;gPQ zGaR>R8Ped8znj90n!$a7y5vFq_el&<@by3nd6uWOX~#YhHQsR__a$8VY`|_egvVUw z99&H-rddb_y=@moYDq*$nZK_Q#*lK9?TUuXd*n#ra8#}2ctgeqnja^-3`t7L7Z?-} z8-|m{EJ4#NH>~&)R#GFhhYgCeN=Y8^AWrX^+c9Lorn}GI0nLj}ZE)!Mx35-ibx!6D-O2)yv;PaD#5n?Ii_V4w9hP}I-+YIDs$K;t`MyaieJ_puQ$fRALW3; zh%m93SYsb?>pJ0zka8rG5PIguDFvHXbfYGHO%PnIz*Rn{iXoPnD=b#$B`e$rrgDsYYwELd89N_n^%76jw&a z^hXgaYS@g!VV9JHW<(;2PbDLs>)p4WR|Pbt#MgudGLIO04wI~aV!2x1U^4GKHav>t z5$ZkSV`s#OuKZZ97u5D!&mSY)+9qpW?-F7|TZz5AS@+>OtNS2zc?K1YojezjdG+Sn zD|mL-pBKk^xPpSKM9lNOMAy+Es}5~Ku>V_$2)Zrbg72=S(33zI8F9UdS`3;SM(T%y z@P+XRoyyMWL)9+W~BdMpTX8ANxJoEKawKI)tf7XB0XhB6Hm>!jTiW5r6l(5e)<_|yN2QO5MU_%b432q!=GyYUOAvYWajjfGFr znY#IdYqwcY7g4yB-~RP!0Zy__dPQ&32QZb?osCj}1kVfS+kl<<$_s&x=G~^oV>V~-AJH0hSd3M%Q(1vd}c-emZ;-Z;~#fhhq2fdt~-&j^q4N${0= z<<#`;Wz}O0`DVq-%B5fi4~Gz3TfRKlF@mF8HO=}(Qq@&@N=8%#4e*?okQH)cd<5fNgXDZP)eHP6i~Up&EsVDdWQHLL zHjB(11BrWRblx{2Zr@(}*>st7UVQIh82kK-U=-j3_izafzvO14wLGkgqE(7DwpN9= zpI(ZNh?p7kORb5U>Up>FG{;=hthAW)Z0}~2IVFgolbV_ucDy!i-B`aI9i7T_{hkFR z9mgz8%1=+`kHIbg1~&m5|HIo{(m zcQXsQeBY#MHNAokk1jT=pEcsMg$tRZu6H75a)eBt}(H!<|^I%!$g|WBuCPi z4P~=ccS7`mu;U9x(2{la*r;H!Z3rS*LHANr#b*LLwmt7NPLFgj`)>qMwX?KZ@JWDx z=Hh=@F`E1MI8it%QdaKUt^di0^pwnPax%p|>c0PGw+Y@AwWtA7{S6q$CboX)hSDk7 zu>i{kxpcz{vr?@KsFcEMY*Q0%dMI5I40!!yFPFhfpn<+d|dW!u__8 zwUJ0ChdcdxBtMO?Tzs5w_XJH;AIn;UiRA?PZ_l9q{n;# zcD&ZI*Qzd4-lN)KP_oc1ceA&^MR{_kssnkpOymxhGqGmw3YCC#9D0!9s6X)7k`xjaD-CUr55Y_xDgrlNn^+{ia`NaI=Q=RwtoW>&0*c&bm z=O;5O!lkVM`8Ct=b+#D>2{FxSe6{gKHG>F*BiVT*Rk-;hZypTOa5r!DV{do80?JR) zTV!?#dvvZQDdPuJ0=P%UW+KU6aM(PPUg;4@O{w1qg@GMm_<0R>*o55|u^&qQ7K+TD zL0&X%wK%e662PcH#{;Dn0Ua)I&inEHtb}@PTUxe?|7>W2dZ<@7YWmsiR-`qY|4PIo zg4-`gP;Q{EHL^98xa-p#qFesC3_xkJZ`c12xzw|3ltL?-)X~&O7b})XjM;f>yZ+ zoz&So^zb=;hs9Y#L5oHS;yjI%lILTH&HNmZgLGCA0<+CW3Ug9~TF&GM^)2VsC~8KO85m{GFuXPF27mXm2OV9!JOv^=w^>~nUy`{K z@;|JEzdqs22CJAih)RD;*V-R%|6AZi6T2;z#`cNO!t2hvmu%!nYwEV@?xbI-2}to2 zW&!6DU#Vd}KbahncA@xaqqO3d!=BiuOdP#ctjNe{)P=C!%yzydzVFX?)G+Gl60q5E zahRfEw4P7g;FVWGI;Seze0deVcvFhFHL7)#3LQ{5L9+-60eCx|>Z|d#B*dbQukX>c z)QWU&5Hu@lKHj@E$;r}zD<@spDL+{^^~|vrdB*$a2c$a0;WonifT6~|;NU6qpWH55 zT_7)TxHGeK)@B$_a6Lc#wBCE?jOq=Y`lzQPvl%G*g<~q6IHKuo!Spy zsRoI;!B1bQ>Ggyq0P6Dv6(F>1tez9>wMqW}X8~*iC||Cbv$NWBsK#hFsE~ydO)RM@ z4%k|Nzu<@5fwiJg2M_H7%*M#xa0|r3qCY5Q=7wZSUT!*d$#(rk`b3TQf5751`$QR6 za>5_zgE@hXpIyVhqMd%#e587PA=Q4(kS)*}?Xfhyj!LZa!Q4sU{S=wq>h?^3V3BUm z@P!clrutHQ_>8@&&oUO@{X3*Y{B2DElG^uHf?Gtr@uoCPgz(zwCUy&rgX%@xC3Wj> z*q-Zg@ziD=Y~Otq{d7DvZ1zt^{Y4f1XS7{*Zl7NyZL}DclzOksVv%c1nyv~q6WCDA zc}lN~=k&YRKUDlU&0B^O5{2PbEQz-}SX-Qia{tm5hDouAI7zaeL_Zh*&(IS)Ok>tx zuSU!R30)30XSjLIUlJj&MPIZ1G365Ml?wp)^DN=WmWW;~rFKgB1-n12LKqp5g7;FP z2d{P{MLeE5c14VkS`Y+oG(pR7j08~`waez`Q+TV)am#lU$zJI=tw20BBpBf}PO0i< zy^Y-Z4Zlg8oVuGs&yx3vGaN3TrTSC8Zl!a33{f$^Emexf#bA<@r4_vgM{vQxAGvYk z-VqY3m>rXS2Xo|10R*c#W^U++|cG@47O zK`Q%>zFxduq|rju-&hVonPa7So*IIFu&N6+R~4)66&y#IDatxlrpLUE4$umLi9Kw8 zHrHtL=77pMjp7D}_w2r4W|EeFw#gBzQ)N)l#MLA2C>$=;&s_`AUOJ~C$|!xm` zo+b@_6YagOs2S8n1*6+gU2574dX-^#&hZAttC2T0{*2+hQL0qc*#|Wv*rED)wfOra zXG5l0SV9SW*+p$7SYWJ-JiYhj5re%tF8au(ttz>jaWF+l5;ZNgga+pNd9F8Rkw5by zgper_G8vIAcQMOP^cU|wiLDOe(!V8k-cowBV%_TV@g`5I58dg_d)^4V=325c-;~HB zczs+QtB7ezwiN14j%7mm4>E>pCqr?|h3Der341&OA2}@B7t+J;1QubQeK>5G-r@bu zLyYzJ`_{I_RQFxr-ITy`8)R?$M(^l0a6xejr;aV9@x>g!TfdHL*u{?>#XY8=c7=;p zgJ;+Zr|H=In>SP+yMVgbT&epY=H!J>`rV8Km@WIF8Z0a-g4zxX{l{k;36q-chc=m+ z?WAY|H=Ko=)Wbgsa*q0^c~!8mJ)JhP-)r4Y@qH|>a>B8Z`O!>?NFmImcIdjUR@S(# z_E8#4pKJ$|27=N502?WTS>IUFBrD7RS9Vq1DKC^_Ttp)nQ48Ztp}_YtB7=qplr} zTaLmhBnyH;gF#Ev_xSBjpD?(%zBw4|jBA6Py&Bio#m2ZCWqgef6IP-@(y^o9;&L~l zq#Q>Up}cXmt!=(P0?vQw)8xp(OlF3JOR)qfxb)Hoq|`Gks*;Jl4I&a{hi6S^wVco6 zlL*?UExo`L9rzTT9ddUjvs9nay!TIYo$=m7k6g_f+uAmIWA6nq|6{)uJ1=S7g5{LgIZ^T#Ye8T- zzTm!>Pb+4~PiMRN5ja~wr0QcGi*|^!-e@fGt&a;BE4jy@lk6nKQ z)39WdSO)#J%z_~}Dm9zoJ@X}D;!*M18Gt@{$sIfV-P7uwn%omrapX~dDkfkts__g< z$u+mov40`iSFP30EHvN;1|-5iW$ZVW%U{{LU@ALKhMYuoDq$*0C zM6)!(kzwW#sce5i4Yt2h)AtF}raJGq#w&y1SyuB#nd|x-Dm}j(*!#vbD`u~Ua7Pw6G?P$kC*sG|Igy0hmQeXp3mUtnOXEU<=&d}tSK@i`tAF%gg$US zsV-Q1)MNW6Vb{@>8{5bUM+Ep~Tho@thL*+#3?f|z7|z=p|+ zWsdkvw~phT&9q~P*g8@K?lUOwC|Q4nEmseRP*-@T0G?(y>NOAIWK4M$G9jnvm`qNsgOe&2iEGYlQ>$dd=D#qtaiuJ zthRnAuN3LFbc^Nu#gIWB|JBW3q~1^17Eg=>&PoJ30X?mWWP2CSaLlmI&-^qSh0<>f z9V%x?nuohrPCY1OTg*a5|Egx^djrYMVAK5s^OmvO_mQ}mqzQn|xd+t)C7p6M>{F6s z|D^RA)r;tBQcBWGz4vN?Mu_6g-Lx73f3+;w;ZU}ojG;#;#zc$98Mv|z9xw^>uKfoj z3j`hSyvHG2PvHD%LEKwDbMV<_0@276U2ueVSHugG5Oa)YZsAuf4b6@Lt(1a1c0W_Y zhKU_oYF~aIOL%EQt7l=crH|@uL$*G&o@qi`E z;nGOT`opW3i$r{C7t-`YJ_|-CSPT|ocfyaya}>RK-($!)3^^LF6maU-qvQTAZrJ-A z67UHl%Oe>h^aELTb*20=0uH!tb9qwBy6sVQo>(aWye9+Ios>ZsaTY0u39D|SW0Hn@ zL6ymce~yCX@|8Io%K#t-a>oNFg#Bqn>a=X0?G8e^3zqG1@Oz1~?kG7;RQUkiQNsAV zt}$uJ-3PchEZ(6GDrJp{new5;9X06?-d=A)kcxi2161JP#K1F(wHFJ9Wcs1hfWlCt z{DYV9?NR+7u$|ug4he*n1l$+Sa%W`$_c)@?t*0tqA11HvGLP4;Htd3WM={v$R05-N z)iSAJm{PqzGK1T#Zq?mz-Me5NU-voZiC|ek`hKOaqr0rAi_y3X^Cj>YT{x-gy|NO9 z&9>1>*&>?jYB=72!(~qAR~%@)pp>m+vB3KiY$Cs@!Y3P8kNPJM_X1wLma1Yi_qKE8 zCI;e$v9{203497+J~_bbN07KgyDR1h6t(jA*kyu1J!8hDVeXOn6!8yT;@}bz$gg zjM*}hx&G${neaU%32(+1iSKYo{I0E!suL-ijmSFE7tI3D_u@|4V={=h*_q$P0e>AJ z+;1F^^_UD8;|Knvlpk2V4_57YJ%Mhfm*LDV-^;st=#WZEPJYoTc_%>RYr+`2<*UKLfy7TXa(_oGqJ&ebG;n1T#`rL@vPXEK6ULFvx{(U8mB( zx<_F2i=tseGGJ7)a|o@W#O@)DxJa=OD4dOl6$U{L|Fj^P0F2h8emL3eR8=GKm3qi( zFCzhT*ea-h<0?545GG1y`Ql{gz4_(eN@L~ZNo3PSPb%{~#6md;7dIiCzi_|_|8}I)qHjYPr zyZj^HZ`N;*oSb(=-(3%GG9UJN1iWZjB>xG4w-0^pzDzi)(zcxZDopma?Wl zw!z5)`@ZaJf(ZE0^QlWE^|&!2g1hd_R;>RR6 z9R^}al4R!l$&+0t#5$Ze=*+-C1XK#r%oWg#hEyn z`hIl>(0-Gn9X%}BXj+`3VH~+BsVr7Yk9rKomy*MTEK$@pPK`GT$-zgKgeHG%3j9S? zuV+ppGc}`IgLlIbCb39I;Bg#hw@hS1bKfq7XSV#?l1Ew>wpdk>*N9(k->Z${KNkkG;F*P)jmzcszsFRMMkn}rL` z_#Hi-zj!56wlH^hH^AR!biyi!E?)0D%-chU-(UWQJPJ1_@!-C^x>Ep$D*(NFE29j6yppH+<cLv_wv_ zoP@Ir_en`_%=9a5)rt^F7b+=H=c|dT_A}o7nU?TeJX^)A&=f1@f?(XLQM#lY&+{Lr zOh@GW4~$fWr;*PD(&4RZzwjWp|5H*b{*@FmaJI7-67O+2x3^gC&mgmCLifAVA!gZb zBvCnmhjPy_f07lnJob`BXei^+6SmoLpQqLVMCZX=t=l-2Pd~Xu0_nrwNBxTgFsomv zlN!Fw5WBvspR@*|*{yHl^bJB8o-Ii-Eqy07 zdtsw0E~+iX(*p-Ye7Z${(gv`_$rBBgb?VE)<}L&usTQQ}1Jd$Dd;3FS=DKWcxF?U{ zql{peY!Rl>pF>f-rQmgRK&(^G3k}#uS9^)WTbGR`d)~COUeuWt z6*XJ7dK(7jhOAx#xNZ?=`p|nc>$zFWKf+bZVZ4R;E2PvqidZZI{6E3K3AcA7iNiBaXU)3ug1=khGX+NX0o8yD7K>0zOh8ua+72S3A0WSREXY0A zs=3Ibq`q{O8Z{lxL)xG(bxS2^3Pp%*Q|5&O@~C$SxDy|-b&aM?%#Qp5Z`PH9+E3pD zj7AW^Xjp@OQcc!C|Eo0YPP-<_2=pe-LFo-u#v4tybH!u$Q-O10%Mm=ZO`|?~i?8a<&xddA5M(h3L7XQd+)7X}J+Xi6x z8mvtuBS`yR{$8#*^fshEHj2+o=Z0PHDX|H?z%MSn9Accu7j7^T_OrXRBB~7ZSY98P z8C0)>OB_*CNsdk5zw3xKgjG5xaa0N3XYdYkNsMLwa!T1R_2I5#@pT6w5erNli13yEQJ#y?n-)Y!4m?LnPbzJKZI?glA zUmG+6t_Pn%&UEJX>WVh3s&zVU%1HfPCG@kXUbnuf^dHN#8fU;LW+v77Ry4jYfT9rT zcIH!yb~)JuBOQD1(^=|ZK}P~v%rmOA(U zkF~dqinC4DMibHm3mzbZ;O_2DaB182U4(C~A z2BaU}N<_gGpFz0rJGc|x|G_4_zH*GPf=qH#JtF&#^^YfxhLl-cH8V_O;%aqzcJe>_ zH-XIc>7fi37&E3AD%`T#SsL}i>>-0XCzmpJIB0Dvyiw#LetGUT{S=YuTlR79Jv~(4 zkNVS!?*z0PTyu(5mdB{2bO-Zp;EFJQ=7Pmtakd^sLK+YG4tQveXNT+GP5dO+f}wye zU03GBRlbs2Mj+b^se7UeZsqu-d=!X59g`DKU3n8@ zo!{@BA;5nx_xe8R9qu+^D*EP=YN4%HrH`hd#kDq7#UNP?N+*c7Lz8@6Lv8LWKSsbF z5DD-%R;w$W%Y!BV%VH`<`Q2IRtjnwnQ*KIFO{YMixl}>)R;8Bx^Kf!>9q#o#%c~@L z%!^-{3;U)oA!ji~wvTm&yG*y6ak}XG%QojBV{_wHZ=@X7ARb^q1Bu{fS7T8KFPhGW z{^UgsqQyVeCPk+q@?t{oNDa1rMEv!fZ^X4A6zuy(O-a4_Q^t{$-sp?7gpw*l>W>K} zAU`A*ts@t`@++8mczqYbf0#<;m2IK?L(D+8tU#lV%Bu;_U;9R)7hHO7Y=D?Bl0!~1^A$A(6RWWOV}qx2kKV=ZH{ttv;#y&C+sSfkT9ug>GtOZ$ndgu; zBBio?4aVDMcSAbHbg*xVbo*wFQ~eY?zr}Rg?udToRv5W^Q(x+mmCX$pO#A?hF-@ig zCh4wKjW$a&Zn;g2#y}q*eaYzvK7BO&L@n+J=*Up!&w&n=7i%s3eDQGfd)FpF#HqdI zRpWxkS%6sdKy{>NWME?oN;w?er&=rp6PvZA%mY)`kG$Ff$M$J<+Pz+O* zCm|UvUeMj?wDPFWhRbd<+-$q--3-uqetvvp$UH!S@o7&vOwJN^UP8BPSPfdXuKS4_ zg*Vn;MTy=L*NB0@RB1Jv?aaS|u!dUH9FVFM?Cd40RyU)~B6=86xL4qjY`^THm5u5Z z(7sIh+FP>R^_T4ST8EFVU+Du`^S=!@sx3L+YjaFXBJVG&N6ER5V7Wb|GtFjSg?9!* zMu$VurGMc-gwK=Jo!}L(sE`)$FE_GmY)YD9O1j*V*MY9}#EUK$5hbRqW_h90{iK6* z!RL+&wZHKT2TM?Mm_$Zc8fI3(dhX$r+y)>`_GhMml8{uVJ>5>8R+5Kfk7q?p8`GWU zrLP_?iq@4dmgCA(=3kAd)qy@c&^R3hW0jTPw2CP}E|W&;YOsSYLVr z%5E^q4KS#*hCy~x!gHmtV!a1y4zVa+16|C9uGRz2EqRZ^zYfccv?3@%dhMPDH_{tC zF^mKV#xLL2eEHyb>>Xea-$L!Gs}t}Sqm;GfgyrpQFUI+$aWvi0xvcStuIVXh`Q5vu zo3^*C8F%Hbk9JE_53i2tAPrx~C z9gR27D9s78=@QLJ`mk^QE&D7%$n<_W#5L{^$*Ru1H43rDw|U!ecy4vpNa=U|Y!cU| zh2I&}}dJRN*uq{)A7?Tm(cz@ZBafqHZ~lvMiH=BY|N6)#x}XOSFgfvP1`$!J_O$zhHYJDt8j9v)f-jI_eSF$5g==*5&{PqtbjN$OVd{G())Q>vbjV}H zadrIKxX733S&i8BJ_G~Khz($k+JZH5lWqnRc~$3Xj*C6`?k`{|3yTkhJkgjOhJv8X zeu>J(rb|QO;k#54wVh*@wvK@AT=^EL@HW=!Q%OAgnJM@xD)Ci`St+l7mJcrkeG5L* zIVe}?O(Nra*T_);gyZE2Fp=BfvBA%U^}sNtn6m1!5^4tUE+S4;SxnKy zl&0Yg?M$HJb%Xh^^ZM>1ZqCCw-}*JTI(9-Y&t9ebg;bH-pvo0Xl<$?>as49r8u0h9 zuk-C1_mBINKHoc3{_*&zUU7$(4&7fgYpN%^sEp*QmDCV9OQvLn*{=waZUCiZXV3=4 z5w9vM6F#C%Hf+QM_WA%j^cW(1AH-rA>p#rM{2v(+v zF**1Cm9wAdnDz_kpZ|(9j_yx~+<^YBYOkopbOEh$d`^{7r_LqK_tyZ(>Ax^M^-A!J zs@`9ImV2M<=1!zBQ?nVc>XewN<~F(E<$-Wz_<5~@s@yb05nV<3;l0(2l-nt_RF3l` z)$MB;8BlrTN%8CLelZ-jc&W3ghRp^=!{h1DBti73Ey3cUw(Qy6yvfrPlDZaca!;Xm zhGB~iail@pJ-{Rf6Fk}bcp$+kyLF4w_{(N}K3Uedj4j|Fq8vM44lDNI;9PJg^R7cT zr)bJKAJEpWeEyAUv_}g?HCEiMz4d|=LS@gV-T2N77)1>gmryl&uxM&Y?bl&e9O&?1 z|8A?+M>6thPN_2StJ3#X{Zn1rskA`RDFMaa-Q9Jcvuq3y^DP9MTl}CMGXFZMR{i*f z`+4o0ej{2D=L;#%1TM3&ba2%m-a{2mUC5+;A@V|+u^Zg=iMuJ~=aW{Ta+%3vsVW=S z&|LW?w34}4o$IbwPizEAs_L3`6|L!P(R#DH=q)bh5r%4YENNfh0MNuRyM*>rkRL%s zjN^Zyn?4qRZa9|{zh_gNC$UYB`oxrOck^0gCUb}exU_wZj4E$3=HL4ycMRk$q6b*`0P))VLSLXjgML>A!viv&~?tY>~Q5Au*e$4;kG; z2i2D+BVa$x=y5(yaM`n*rCc|j<6+-!FyCpE0#<{e7wC1fSlpqa54tfFL$P!%O#bn~ zv|^(6!rLVn_-LE-FaA)SKciieA%|1NIaF z5)dpz*DXFMRVXP<{b1>)EoC7OUDV!B(cgHEA*)Fyn?RP*XgePD8DU!{PH}H)S_NvZ z+JZFx%T-{1<&qIDj*^Pwdf4(+hpav|NBtUfQA_y|g2WV3BC|tFRsECb;gqjeR0Zs7 zWN)CVbol9L93Z7;Q$-ds#no=HHg5rgD)%gZ|Bg|8ZWK?j5rmer^wG)N2IdnP)+O>~ zx7|Y4)M05WeN<|?!OS07TqeQ3+$fP4v(T_BAZz$6#;G zo)_(I)21LZ0ZZHklyBMGVQ=>!^FQS;y8Oeyss)Qj^3&bxk>&r>In*P8S|13f_=Ytc zl18E3cs+Xb_*O%!niusu5Fx}>za$5k8-G9b0nZw$E$pKmzr(b&$@8rlUg9r&1hWqK zSD`X@gN#5H00IVB<&kk)FDLe1@B*+LAA{j=!!ugZ}6LPYP-lSA!>M2%eh9 zs>~2_{N!duuSW>$i}5~AwDE9e-tY2# zY`tMABoWBO{W8^N;Lp;m@O};~HrgGhTpzg%f4ez7>N*%WsEsmi;Uw3^+i!6{*-#ut zg{70cu&vnN-!&E+Jz8`j@%+_qmud+{{pwcdRATGxyG}K^Ck{;(S~1gNbL+)0zFP;Q zUiXF4(+~tf(zuEA?L;HhZn(*+3=_%*7nX{EW(N9rw!F7FawX#^ zhs_IJYgunQ_dQB8nY~X|OI>f(ZrMnfSGv!!yV-6e<|=e!Zn+CQh`iOa#!{X@#S8tx zvFz>Pm><4HmW`3sr51s6xwe?sCPGh57~4p70+V`~dKrZGVzZ@eW5}wmz}t?VeRSTO zyq`Sxb&=&RxAyd4UM%La(OmXv?ETBUle*VL0{t%Y8iyNiR_98btEmVfla^dsRrLA3 zptt0A!lS)HA|!PLN8)YmmoO~|9cI^fM_I)NYiZmL;M10v-7PO@>shoi+exr|9nE|t zr7e2~h9?KdB_i4TYFe~19EgNEQ{&Z~bfxa-`bgL#SrA#jl>+7Da{MJ)`Rzvp%O;`IadlYKFR%%OxzlT6fz_ zZyUGIix&Y>9#%ZBD)GfVEHkH5z63OLLEv_OC6a5^*9U-aUckW`5@<_TBTx;cTQtCj zE!P}!kGmXOV6Lw|d@^W@@!`X~BiJ#v6-@AD{Thv4*Xi>Fs?=NBS89U{mh{#vDd7h_ zG*n~M8OLRla3r)z)kdy}5sj7%HlQY7INg^MJ{DZM(H=0UZ#O8fGG?WcnLFE})iHNx zz9#`L>KX%MD(mDEA;0_Dv?!S}HtQn)T{F8@gHn;a>T7R07vB*q1Ed3zFMb;E)L+Z5 zpexXmrMG#%EL#@3CyePQP~5{mr@zZTA@dMa^TyFJx3RRj)~+(#h4-qaA#c6_+AD3& z!A^bHTT%~-rO}kYX3`?%)#uo?jgW2S@?#LZmvuYvY_Y!R0hv+xw@Wl_34>P`OFM}@ z$nJG&ugA(@Zo#ltLRgt$aOmIoFif1j;$-b}IN@3?HhgT*`}l6h5tH6k(Nd+F{}r!l zQcai|-aG4Muyq@&qH|Yv8;;Wzdek?D7!?K7C2y4P$7+J4`AIL7r{AKa8O0}MlFnZFk;@U~XamS}!b=`M;!!MW0VXG0i1mEoc z(01VgmltZ~UE#8PZF@&+xRarq>Q!UsKN+;*b6;{~W$eUW85U!$3+LZ~9Kgyr;pVUC zB&WZAX%@JxPnGJpnm%Ua6(vNJ@RUgPC3h|0E!jATJcTi+N$cC-B#ZsPH_-DqiZsg) zI3bkRXAK?F)*6keT=shfVw4U9V2mR^TL(+IaM@&*!L$7U7$&$+zZ&;rf2c85w!*wt zBp?^8$RK+^g8{|gGV?V96dS&zT`P^$Vtp9T@XAqh@by$10}a_AN+U-fAZmCKt>twS zdB-c^)-Lu|6>U49_gX$@pg>GDtx@O{YxYrzj8_U!)QII%>7Yh~AMhdrEp9L!M9*2J zpJA16#66;3L3T%0iIrOQnN4=dv>R+$-16PQm5t$Y&E#uXdMW2M-{w z!MvHehTXK0;wRl6M7^Z4S5F^-s$4-`Tn-9gHfk2L-R^`S-u&|pJnDp7o#I3%by`&3 z0E%UQSKrF{cFUGO{X$QTdphso{BsOO2@a%S8`=RMw$6EzNFLutb9e{F-6FB~EJ;by z?(xxUO|LL*?6hff3u??L=RGhV-Pw2*ZEw*yy=g;q;Z33guTHH2fY|(5ar%u3pLmmx z0^jwiQs~=_E+6OOSY_8+wXBJ&bse>IIn+kBY-LFQ;3h?BiH90KvQrNzx7J6?Q_A8UmUX)ni8q8s8q zz=3laAl%NAs*3B1zBx8wQ2Vj_Q#3VNp#}GQs#fD{{`HojA7&?MZ;OsAOXaf)lU;m4 zb=F?DKu5-)4fPUk%Pa|c^13M{ed3Zdogi{}u?}9#192$*MLGg4R;$w&o(Hcg=-=En z*W#r~0H8R@F0;FvzEt&PR+KK*Ao*_Sfm12_gw{D+pxfRH9WRm34r(%AhxOoglUh}A z+7fQVWrusJSnG>Lhk~cf)XNU+DjC(?byd~mqJ7I_P*H3-LH@=aPHCKv2mAE*+&O1O zH=47OzB=>@(dM{28A&hnF&59bqbJ0}zN+4JTy6H2t2<4h*2~}=sc*y_xO_HBXl)U# zuP^t+9r8Hr(H0vJi`&~{a$8nWNivO7aXyY457gi}+#y9=H2e2VII6@Tc3@lF`x!rW zl!YkWTDNuKITRaB*gk(5PJ7g<4MxTqw`R4#QbRYP5N}rdrMy=4#hB|@@XlWGvOgNB zUujmA0|p}C3oWstS)q<*(~g+%kqxv`r2zyw6sNQW~R6=KL{Uuys@XSDFk5%3F zG~4YYX9sH4bFl5eCOf&%oFEC_E%WU4Yo~64Ww_J$5nqx?SE{kIHC9n%FT%E+E;xoM zw_;V*3hcVz>IiIuS!`zAm~RMfCG7z#hLXeaN{%p{Jrf_KbK;^wpmEtKI;A` zV$LsCYh{ScC?jq})sPP6jrc-hz|P+cx8|~ccpJIlv%cE?8m+4UjFufLB4sV);mrkS z7U*=o5u2(P?$;aDr7$>ewlX6BaQ3#U_R0p<-Q^ml@{rC9Nl5H{H&MGend~(2d|`wxNgpn2Td!eTb-6bcQM&PqOw~x; zw?fma&2Dil5NMfIl>@}5k2{6o`{Wi`aoc9do8^v{$KTxEYKahf=w}ot6`*~JGyitZ>GGn4Ga+gKRXbiwYVz`loJd(S>xkqDD2DJiTW5`lb{@& zbP5O_D#(O@$lo}Y_vV@T-#`f7e$R<4Z?siLchgxp!TW>y$p3lSj>u#RrYZxT{wT5_0MSm4Xpk&dis>R zD@_e*n(AKztkDeNgx;y)D()KrS7NGl5Y~^|p~yisnIuMUg=r~ffTSC`!N*VR*stF( z=gCz0=WDQ_qB=diE^=;i?=J6|I9jE{?1fp?&nUL(g#RReNXF^UYqJsmVc`=>F~~c?d9+57uTh+Y*G<+*bz`u?t}v5-H*>^~?(BM} zewbohxbij_DXq=^n25lNSGu^7D!Fet(JdC<3sOO4$Y7P(+C}{$xMRZt^#ebg3bTU`qFyt2WlfFMdlag&`gW)J>4 zor9n$z#YEha2`5BgL0_I-wL+KHyqP*Tt?UABj`S-=#H1^m_w!l@4=Uv4iI$7aS) z6}OV!JIsvRab277H_!3>)(c(~d(zdq*5^UD#QyKgbAx0UG5TV1#+TD_59b}w{V_Ca z%n^6}B8eezHwnK$r;cu7fPTn+H7c`Z@KL(`&aShzPRVl56XPYRy9L+QlimKJ!Qjsd zxV-juH2m%F9IcPy=>lD?3Pfik+{NJ|T z9HM+X2|&*iFUo^mukxNeZaz&$Qv{Kl`*2Y#gk@R06s-B0^}}{NM18k$JkP|t91J2V zohx3O2rGEFdE5h$*V+HTn6+(F#_j5=Hd`dba*|Db_z{cHpIJ8GzF3I{E9G6zu|tx( z_hJocE_eU-`=mULDK%s(2KSTvY;Pt#4M7-sY-R4QWMuQs=zt2@om81w{-|p$ zYmQ|nKL1w7nmcccdk25dpHn`j4}4D^9jvKoDR8TVS|-|XLj`Z> z>F1~ha5Bh8q5G~y9hRl;Te%464P5OV)*G|!7=lmBT|O9aq8wcZg%G5>PDENPMCqiHo0 zmt5U`7?|73BDwG019g&Ue`^*SS3$p^)4sHVmg&`)Hah5Auw#!89q8LpcD$^JhE7x0 zj{f(0t?xRRrPT~{Dqa=1wnQU`?WV|}hI5sYMATv`p=4Naou$xk6+_%A*zsG(NmS_EwNjU&7?gWgdiV^z+0W1O0l&wITZE;} z?MltPBx{&gs)M$o4o2I+#N7S~-|SRf7(?stJ;B)g7~J87jyAdt#o@QvOWWoWK7R+- zcC*=gj6;k>DmWEf{+ZdebTNEyra#7youpyC{Yim42Oc9aGn}wf<;V_gCk!w26{&u2=hF z!rKV13pM0^F4v|Rz#L}!ervN2A;8<{29H$omksH^{G3oO{lexWpg#yXR#S8_ry4|C z83?ltY}+!wLqj4|Rpbjsg^$o2`P@9_{ZR^uZ{fhQHtLF?a*s6hDc_&0%TJex5vf*$` zMQN1e@~Ec8_7vYX`#iITI?tCo-=CVb7pzFcX(Zcs)8LeaNFuzloW!g7WhE_f51{&n;+ z?`W~QCRiU;*}Y43yBTR%+o1YLP3SWGxNRe9Q@RKvCZJ<}0HCTgtK@Wchp6HH7|n9U z;ze;T6U=?E%k_sNS)M&s=WQkcI1=_zcZ>NNZ#4ZkDN zw+=8Q%WH58;oj1F#$S|| zcg$CHK2DL=fo$JPX};|61UL|3{QXIlo!hR8mi&mfAo6SzD0*`ZsNI(!TXu+OSZ|vt z^TL~tO|ak~-^P5tHZMlDTMCGp>GqY3{x|XL#bPHM)kmBj|FRY)5pzisVyx}t;NY0bYGsacjKvhlEpX2_t{f#nMbdko0A%_Hn<6kDYeBF2J(n# z>D>)%ePfpCFEZj^33ab$x6^7jkjQ*sPh7%CdNH9j$`>YY5PUov1!^A`NGI}6>NBbsbs}ue)A1<-Q6&=Oe~HY z(%zwQByB?R9Zh^Ztb_)f@Oh$PPwg2e-gNL+Vv@IscqaxIg)8+iL zOul3_m$Y>QtV|`}CojdyCm!Kke~I5r2OZ6i^i`F$O%+jJxU>D7nFeq*&gv#&R#4KjoPw<;S%9({rLEPq;uTXMThv?fR6DMXA_6 z5LkIWA_eKAJl^wV z)}Ss4mc1>OL0y~*O7`BOdJvvdc8W1~#5-^0Z2W+9q~Z9>_a(Z|sW~JH!|i8sn}M78 zF4se-sBii2zHm457&TPQ7^aaJ7{eB4{F-P=681t^^Ji(2#A;Mbzfi0Cf-U$pAHlzA zMj!dQpj>)|L~yM0o!-oe@g|WE9!#Icad(5n{!4Ga&=XRQRdpInfrJ*@SxKo;&8Zp# z#vQ{^;aeHWoQ6csP4rU{rbgSwE-3c$f~vF6(|t71@x!O+3X0`!#_o$+=}A z)H_<2pEQK`D80{B5ug?B?wn};z+6gtAQz88=A##D@KP8u>}-r*UB%8qgEGS2Vq+4` zx>~v)iLn>@85T8};GS9Wy5fc+oK_D%qaKCDV#8wP*my~Q)2S|S{5-!7Iur6yhn4m=bQ)g$&>eEJ`8&1y83GGm=QKx@ z{NANYNb7Ba z*`1Fl1yE7J&*&4Kp94am;P}=x;8MB3CiyrV-w5p8oVwE3EWbiV=Mfi(rE%75(EA!F zTZJjSRx!Wq%$J#EEvj%BvIf2p&lGjeRGfE77&ULf8t70%eu zM%_byXRw1EhQ~`Mj@7D|A;c~v#aBKQ$oRP6gy04T7Vq{+MLi{9TM{Qm3$R1V9g{Q# z%q3rp!UCbV+c_L7UvuZS29X@eO_Palvc2_L?-geP1gVaJESm~r5`$Xt3o?Qm=~Sli zMB|KwquEQ(O(MR(P;$&-v#`3D^Dfh2DcbA@JUgJ@d7v%!Q5@ues+P6 zWS(1AZTez9ELsIMjh$gTNH`37@^_^^6fzFd^JBxnctfgG@zpkxgOk;)DT5NtTG*c- z3>Sk>UXb0Wlxk6{*DXJvcrPUAR0_+RbYV-rWHHy<6DmbPhm*dD{;td`Qm3z=AP`?$ zrkNMZg?JC$h@MLpLy?xN=h({X;JI*Wx3$eY1`X@VZNE3MWapK!HsVpi01Zr*4FE;? z$KM!2yfV~ak8i$kPitR0f0&8+xsuXZB|y`Za<|28^7+Z*xiQiP!6S+^5GyKZ*IMwc zFdKs6LJzWim&f&@L-bZ83kXYP(-(oQj>(JlMXVf1PjEk9j?eXckdl+ZYj>x}x3Tm& zafh#%|Jg8!HC(cUL%_u8Vuq3H>zdi!W7u-e?p!jbjc{?*_e^{QL{)g^0(OKo0{bsy zHF_sbTEOg!WDZIU+1j@>1Pecn!b}-o1+v!_a-H|n+pdSq!XDizXxzf!0pqR_aj}j) zq5XNBY$S4QXqI2Pp^)|DwcN)t>AKKnn9nRWb)vNv7*`~rsyu*`c_ennbjXVFHrP{T zt0G+ZZ=IbTtE-6)W8^D=%R1l{?I0$OVotNh_>9isPg)- zelL^|OWJ?g)wM4;9#Rchj%?l=obs_j(SP>>(Ak`xs3Y#-Cd*^UohuvWyBDH*6cF`k z^i;bKbNploh2Hv*^daq2vfaur!CCOuTQ_#t&!h0sk$#;`*y}Ujo>S53QmxRkhmuii zA)~p+7awfVchDKG`bp0e2t0ITqaMItNv2roHyLRJ4^Y>c^%51$%X?C&#iWC}N;<8J?nsDK7kq!#?`A`% zBZu;Kki6BIgNEXEh3ChJNa%{{#Qu$8H%*&yvnhsI3i=&(GkAvmn_%q=WKsS!PGa^g?G!?9wd150SVP=C=)5Wq(#R^yCjwEICblAv2WfB9(Y=erz+st0=a zQuO=3ym%T0eaC`O&VEj0?t*223VuQS3pU#tBp^+#TvOEL#!uBd*fly2$ zlh8Y1PpqsBlD{}mb~XZzK@7al<$y`>o>vS@Hj%}WqG^4j#*zw%l*g3n+|%K{1@)ZU zGzMS@<1s)e--h`QOJlGoo(exe6>PjD8Vk);ipa9GH$EyD8MPLFJ_*&DQ8^OP~g1{7X0liFc)maAuo zDiv(ajb2O$wN||ndXW8Fd4n90T@yv!NIbZOGj@|R+xMK#$xVo#m|**QC8SSzq&v~A z%mu79ejaJxZB00UD#rluF)^b2wiUElzI8a6vuS0A4$3=j;g&haq=}79vwn<}qRWxS zE{m{ukmY}*X%1(ANDyLfmKAt?rb?yICGOA0b}C!!g{8jP;%xj-Dlauw<^A=cOv#Z2W^9B(8&S`??3+x0 zzw2>6)$_T&Tk`^x}-*3}|q;dFu9+0+PpITjo*$v?1jl%ulc8{D?-fIJ zgS$)BM6O4STj}3SN9cT^`t<>73b#Y(=a#eAk3eS9nMy^bNWOEmr0ph#s+wRJ9`_L4 z9t}$-4}@70Mo977Ii8Wg*H<1q;ZofDgna)^z<%#4L*NVa2zSt~ z=hXFBIK=H0Z@VWTnZVA+^BJ%KMeJXwD?ZFxWeWLPVw&MW?_Bdgp&SJwjpoYWkEmC` zA{EGG(1hS~N0tso_d5STe&_j21(nqmoEJwf6J_S1#_eM`Wui1Tdq!{-1aRV+%OYEg23>c{TpE3z6UjbEu+E zP(y*D@u*en^U-^QA4R1=oC#aQJy>zeg9TIyEr9!39m99|V|D(J; z!9JW1^RsABd?OSI#LWG1c^vGK%K`HTjQ=Tu!2-#sH}}V_q^qmv!`Hu|eHTa^v0bcA z=+F>*slQ0dA48=a2p9PENi^B@?BffP3}UfR`L5t;xdr@nb+d+i(fn!Bm>8-w36g3OPGX8hU16-^bQs@8foB!Jrq0mD8rGl<_rFpRs zAtvj&v=^f_hfD}1nygr*7(W|6`@w-rM?j7GfA~bp; z{+KTnd5{71c#oHx8uVl30uozt@e~_oh^fKSoCHkIT1`x0;dSP-cb^|>z5s@Pj{{Bn z1y!}}NEfXi?UWdFE&C#PllHyqzCJxT-R+IY-yNJk+HMWdUtG_6>@-ME2X2?;xH{SN zIiiUA#=u~uDo)=azRRY|>(TuK0F-FLLoEN7kh!=lIHp?|U#qi@ zCwD+9l&|K|j%6z$`B)%sJBE&)j3y^R3$5a2@MQEK?Jhy}Kdz_a`UgbFt{=X8_U=!e zD0O^5dduD3RB%h1)H0%->@$i8WV?Gi7l?0+<&~!!Eb0AVO&tJ8*G6 zY`nAHtUJ>3%CF?cl2+cQFNV%CrOfHtjf+5QTrgZcRKQedDyy=Nj3hz-->MpH*LOx$bm)pdpmq zGU^4EC3{oM($L|5IX{tA{TWd!ADxHrr@hXeu6Tif%ph2WeO?wrTCCi@w8O;yIbSC zgP-)2gTFkzhjhP+IPVMES>N5`?>>gv7b-%*Q7sz9jl!(WrOLKOPoHlN307N{5rtux zNbtUp0#mEBlEZ=Hvg>{DOc#CVnlHw}%N=uH&ulR0AKE<{P}&>Yz<}|r7_+6E_3p6o*`M2n znAFM?VlmV)wUr16U=(ZrEB7-_i{*+Wu!qN7LzuS|U41kW8ONZj8{gyQ^fDnTY4R5y zO277o>OweGJ2*oMG7jJNI1PC7o1{~W|4^2({+8tifc4sd1Lb-=KgIz*2e~9QPpi>@ zV@Ol(SrIIF??Ws9v#aRS^Lbh8PGS+WzcG_25S8^({M?+=U4YA7p4g#ncNw~eL8+UX z%*2D9%rZmwAvYEZ=jpTS|IzgL|AidS3E%JkFa=AofY{DAA|@y8+E&>P%;enRRH{_r%C;)M-?L+f_>+qs*AtpmjITz;%x+HX) z{n02LIc;ZZ>PD5Xa)Df#1=a4GLw3FJ$B4Iff5kIJJi8PAL)eG@8}>iO?||}Bo1N2! zUA1$|uX!#HO*G04tL>UC_H(0wNM4oxxys$A<(E#6bJEjtKfCO5uqj0}6p)j7T|(Kc zX2DK2##pgQl_R8aTA7xNkY1s+H9=F#a;#VB(07pFKZ+xL3=YkZ>l*A0N7ka<| zJgr+#W2sJS0@4RAsxs{)N@CX5#!{ZAtH$Gx{<1kzm^qcTm_Nm(s3J?|uSZIG)6kMf zwcVY#8p_l^N+z+DG?nzonL{KpM4~m|4?5Y`i2uWzO#b&x?h620_odeM-1gJACI>D= zrd1j>rw*lXi=}xug@=CN)^B#1lr-9Ov(rEPfxJ`XEjB^9IT(`yzpNmjVRE;B+`99V z9(e>XJU87?lFzmWMQT-x^n8u~uc{hO30SZWeZpf`u~M1+B?{IzkB3{VX5xkEl-o_! zLwUIbI)8gdyB}|C9ojXrMtYo9;BY?3YR{p~+ie!iSE2hP+@q3FRYV*@^ zOoFo!tdarKfJ!0%g))VrLS1Yl%Nsvnif+=B)tr!6D4Cim5pO8MD3Dz2SExiG69-s1 z9lp4l*6`j@+7rGX+f@JcI>CNi=@~V+=)}6;DJ*&tgFfHR+PA9Xqj{tIuSot|_}6@Y z?T#;x|IVxJ-5yBrKHD6`Ei1NepnUO?j-GOb_$wy%zv&2MkThzPbmw}+BuRY?ARO+G zualP@R!|-v&gn1i@jb?Xp#*{nWrGxC%k?{f7XbQi$nvLUlp%eHm`ZlPe?-1h9my%} zwZ>RfV4yduC;Ta5u8>V8oF6*tH7??~uB?ZQ%O^w*cBN%j$hD=E#9v9$9kAOOUCW`~ z@x{{3Oy;!A%=>|y3wX!yhNN$PNJN!9|tfnR=^iE~Q+ zvJFUFfkwn;^K!qxbl6)iV^=OwkY5bkKTd0l1nK}94e=}+C~-D>Q>aP!B_0t!qRDoW z7fl!EJ-tfH#usVKA~EPP`?4R4XuF#emoN3u5Bp9~Z~iL92N9QGEQUsYKs>pZHq!0> zdK}P3^gop=gg8sZdJkWHb>~54;B?34bp9~tVa%vpq!5sb$LKSr^S%Yr&&(|0dcIKa zVbbrXX;jF*V2z7GBS%02qw~!OFUv<@(tdo{h;k(yslAR0c*l zR-C-FC&q&Q;MrO!&WfBXzU5u1n~>&|)K#-!0Xa})16YKq6!8w}%9?C@@2c)L8C87f zZ*Wp0<${=W_@l0FCDAoi6A?kbgwPf@l)9tFadla>zV!AIPBU-+6I+GojPmyrXg-;BM#eWJ8nY{4^S=3U^-RxwQ|TxvS*=KRVQziI5kdP zCW(!LA8J-U3^+BeM;GK==^DgL3cqH?xoS);e2&Q zi|hq_uatsEdw7T9E*yi~-fqOyJB8-Y<1X*VzYV`p%=>-7AJc*0^~ELovRp-`ugR(x zXV;nx7?t1&I(8>?@6YwHSHe_GM;ObqGu< zV}$C({@Y!nCgbS5N4Uwh`fW^hlQ-(u${;-*x8z|Q&I0{yUu}?uC!J>3(P%l?R@-lPvCT&5C^y#lKIPj@J$~cSf_vIVX9kr8ge2+}3 zNi+0fW}#Tx1hLwM52mQSN~toP>$N>=tg7GugSU!xBJX8UiX-oDdJj_)R-(U4j~ruV z(-3sw@usDd_N(;uSQW{~B(l^?sW}hI0EQlW426!8j%lfqvWOfOR_x63Ta9te z05xpTPBf$KH>k;xcQ}cT`f6Wa&b~Z5lcJY9)TmAJpA!FYXL-VHSp-%j3u8vJQV{u6 z2_{ia<;0DAB+#^8i~I#kq6>QHqK^oMz6Nad619rJLZwpi7^)cA7J4^AWA`8$oo}1b zKvnKE44jv;4IaPKDnN~cY`xrD35-^kJ)U!HFs#s320b#(O6wDA+-_2f_NSw#wz9Xb zCW=x?wwlHJq&{sCb9NsBXeipEE*t+YKQUxn_BnpSsOw-pCwV3^F*xEw_$iJHxgU=W zo&p_|gm#`+u$h6#xK$~h*~0@5akv&LL+3dIhHTSUAd{u3^}FodpRkQ{Sv>{;8@NPaymq|$4WUn1dl~o;O48WybOfGL z$6Aq~IQg=rN29TJKg4X6YaLtCw_y*$iCZ2SVBtUsN!YXr$y`UhHq7KTPp&)@_?NAx zQl@waceIr!TC+-&=V9cWVQEb^!@qY!+MPd~m`b&Tme`Vj?IFaC5wck~?I2Eht?W9rH z`P2jWq#J2Lx|eRGq#LC{ zxF(}^rIvdCUiWj&JoDV|*PYp!{jlc|zdF{tjo9qJBc2+#u$KS3^;RZ{cm%+LAe zSUdH8=;*Qur#D=+$+19u^Zn1WnD4NeoyT|KiPrn)v3ZcEgEI!d=T>!v?ceSbVCTB` z(q!rwWc*@BK};pM^h%Rsz@$ZN|3?a5LeIr&E}yAF=IkvxvyS(}ie>W#^w0Eu1(f5s zPw{MO(S>+_d$5u=imm-T9Zx!cqa3lfOXTHI{4ZkR^Kdg2wncxicvMjk0Dn1E-jD=w z$-*1__T`K|YobEGAn^DSZMtr%=uE~soCXS_sm-evv$9Lo3DaE&j+joR9c^` z{Q`_ZgAgi)((gSGH-1kDA&Yi%TvQe>r9NL0K5r7Meq7C(b9|zCC2YzE>CC^8Y)~Ml z?JvysW^5G@16#NA*PUh8CL0`HHy*VW3~NpR6CU+T2gd_CQ+m`+2GuhOUybf<`Wv*p z(I@$%)fG_zx8tPGoJB}}N9DA?)WbF_)+WNw!LL`bckYLM43QfG>GyBf{EC=)^pp$a z60?`=OQkHQ3hP9u#_~K6`@h=|WE_rJs=TbOU1IytEDAU7mN74}~-# zcZ8L7F)xlbLcRfsVyjHAO-cC6i*Mt;y0o(I6BmniO+XaN@mPj{e1U{sL%1)6VY{I7 zAZ_yeZ_$bA?kX7))_spkgYq~FOq8G?M<{FcEE~=Gv@|eAD%5-I=K%(&SYL5mpCS9) zs50QLWKOsA!QBP!rNHMA$3SM00y#X)E=7@_04wIv{IL<{Q4BJOH9fajcN)`wcl9yt zLGMV9`6)2*Tlee5y8AQcB)8Vf*9NUJPEIR%529&X3rKdrxrctKH|Si$tnqN zKIF=*?hl4mSxzC=2oF48m%I#eHyEreQl8{I>v6h(D_f5)9)ev;hz(5PgzVY`bA1M5 zpXi3zpqp}dLlZ8!FVPxt@~hmot5kg?py`+=3LfGQX;Uq#sr)+E+o z_J3eW<3bip0=c(jkLa;X0`Y1G=NDVd$}E9IyoBh6J=Sct_IYXZ z9>VqKS(XC;tcLUpMrUjadYzJ-VB0zJ_*DhBB-+LUUwlK|+AqKux%<6RyJlef!y zHet~d<vz=AkLlC(BROXsYE_w9QRM`-JmH3lz6hw7k8O=KMR- zmEx9D26TOQ9>p;h@xw_?GYpL`Bh@3}xnq&r50nm-HSZtl_)JdE8}>+u&VrkQ;$#*{ z9<&6k>&t2?nG9NyJ`bmANFF>8|EQp4Se%L$UQ}9gJx<_f-^aGY@%#}-8?MSV!M38qlbA7ImeyZAU;P5r+3%?_8=<;pZb)JV{ghbwz%Wt2F_DlgbM-X*(=b2NqH>%7 zwC4#d;xKbI>hg^gV~HO^t({{NhGoD~x*-|R8jI_h3byin(xeXsS9C$*fh zKqbu6fb1zI&6`h*y4>n7<&(YksT9O7u^8nMrCtC(f3l^DO-U zA*GDSGvxPli!j8<+ACr99-wU|0ax`7AH*_cxM}c@b|rS-+Rq-XmII+5q=AyUm88^H zO@wE{BJBN&6G=`T)eWSoksg{A;N}T-=%+F-{p5T8AqGgFi}hUoQLx>Te0m@MK6wuD zJoW5~@WZ2g({7T0CPsDx(#3C`tm93j7Rtves zr4i;2zkw-{M*83G9Qj|}xgPWRC#a zm{lp;t>GrD_UaWsO~;04Vwy~t{h1GgtTe0@%g?eQIDN&NZZX%K>m^HMO|S>{znaV$ z%=Z^w4$Bfv-^vU%`vHD}Ply8F=6L7TZGTjj#8zKNSpfg@;&4pcV`Jh>&z%wl20Hmg ztGdzr2h`lfUwc`XBs8X%<8}R8L1B&Y*%(4XChs*W5c8zYd>HHb@l%3^D73XtopC{>tO} z0rN)57Qkd4I@QDoq-W-krU-tP(i(rfznom=F^rXvRv20mnf$T(vff?wRr|MDj(HZD zoJMazuzrA2dd@45w(rxA24q}6VYOf? zR3cM803eg9A=v5$wnlsiaq7NW%TzS4o3FCMVC|u-gssIg>1*!D5ukp2g z7HLK!V~?H`zk%%b+~eNVn95z$pb zjz;@=t=E_cq=AUYr9+8C9RrbJA`_ADE1RkgC2JPL8J-4DeLDD$i+F75PnmVe$hfv~ z#m}@!zHjG`&Y~`K_eM#7X-6PhygWxZMJ)!rItNy*0p)^F`{iH$`{Yu;lnF2H4l4Rs zwjYtMaxl3oa{ida$0gS`-hyPOJGD%^0r9Q!CZ6!57YJF24yqtz@9TmTG-ND}@NreJ zYq85>*gSs=@CRmBO|MP)KBkN(zuP1^g|~HT?OXhgcy(x=Nw&+Y)bAt1+?>>7qYRQD z;&2YC>!PA;Ab8mw%t%9koRQ{wHM;Y(3g3FPBD$Piz_A!M>0r`Dt3-!j5@UVuB)X~U z4%S*cw-|~L$Jb+H!6KGGK=&BZFkEO<5%)YC^p8pt3P_TtA>*`R{ye{$Ok2M?mzf!# zIUXd8v7M=u;e|cWtQ0U3%@*k!V%L1pSG5GX{6&UBG&{YlU2t?wEbkTMe$77gJNM-G zQD{KB6Z6R!?o)`&)2qwh-Uou3FdTh@rK~obL96z&&gN}(6}PXN4w}qEC=ap13JgLe zww>bAq25zlg<8W+bG)LVc&4l7S$_WG%Dm;o1$b#L?1dzP`^f!GIGdBhe7oKh{l=uP z(oT;9uHznbxQghZ5Xj0g2@}GNH+)7!-{kJX+T4lYY#m%rX#@HA5AP;>kvfm_*qEkz zIr6mUHFw))HjhQ;4Ap}rzw67lM3cifYVg`h8*)sfd!1sCZTBQ~8`Z(yYf_ z1-VcKy4d|0=UZaq_(o%O>vesmAEhMH8u zwPLlRo9U)u)0*^eJs~yGjxX`^PkC@HeK}tpo%P#OWr`D=Wc7ynLU(cT~_S^ry%KdL8>8 zIS}U=1vTV8Z;1U+cPaF=C^PyxUfIY2^4PI=P<0nYK>`wGPUZI?aXDHIovm1-SbA;r z8m&qVL4b*Z0=ZnfN#>Xty-qk!S>ZT>;SzL#xLqvG@0#2qI?WYRSYzIyGCh(7_euJ* zuhB@cQxV2=(~&7h3VE01pI=1>{-I>Cwj7Hlt!oa|4;uE#Mk@BZwaMaT-zC&AHQL*J zuP=AgjS?CAA|?^x7R@WaGg&}4c9;xWBTd%4?E{^oc-NzaQmWj|K=B%Qm>;vz5r(1>Z_~oT=?KSB=#-&$-H=^=4zsGD?T08D|{5s>r=LF zR{~opo@8!ug!8*AAWtjSjuhAIM9*aI#J=O1l{uL#{)wNg|2E_CQmbEo)l<7`v%m2o zzWmIiuF5%_Pf`;!?+$?UWPHc~3(e=dEW=)(N+)Y-h4v~!lTcIi{Y@$ZzBy|PGfN%z z33DG8&zE7k#a`lVLUjDK1fw#}Y(sk&tEWSs&?Q1W_#0H9!TCgCk7$jvuqF~b_UW9e z>(LDb7`AK%X8;*ZKi^N#{X8@@IZ5^hwxASZlgzN&;B3qesp$?5unEH|G$rW}6INWb9^IwDLi z#C{?zgEBN7*9>om1e9nnmdMXm7GJ(SxV>UTrO4|Fub3lg9)Z18x%%R!dxo7q;vr>Y z_S3a%l95dPsjsr*nQlpstD}!<$&B63VRE9k#9FrO#<{wRaBVLP%Ow)SG?#!gLEOqxtbR(8W-JHs0`tRxEEn=sQ<;^5Vi~X0Np@MJ$ znR%_@DxenzuWBlfSw@mx!hdkB63M_XoC_%OcL8R*8|lh8B>`^_>{4v%yVxR!0v;qK@G*F#Lj;8~bg zYW-@{J~yE{sr^Z;K_;DSFbPCuE@$$2M+Ef; zyP+49ia}>eHFIPGvd_L0FT){twg-$#n%Z8ysrT*h6cU@4nZr4cT=iwvyO@%cdx$^_*LbFgvy3>EF2=4kW8nPh+1p)y3g`LOPOibd$CI{0z!tNH~g5EvdQ!$?Ko;q#KkdS>Jc= zo-Up^fYA78wtQS>-^{(nWG*2QaMfe|WBUG(&hy@Zu{7)=WmEksmit8hhX(~E@ZywbcN|yCG*u4G; z8kHA{Dk)g&kIhWVwM$y*hV;u${^z-iYHR_x?Wl{mSfEUwFOANLd5h*Lv%Y#-(2sB* zTGejtkWZR2-AEA2LvQxdNB?PtHGat!M+B44>w(ex&xis8Hrdghjuw8+Ef@NfK5yP< zy~Fnd$*Iw36bUm9@NiBPDP*yyW%|0-{`oMpd^INgc2YQ(wT5Lr56`QP;{ zdWo&9gc#J1M7TF%SS3UEnun2;Uyw#$)!!LSFDHgfc$3dMxh*SBWvujx*T4gIp>j6k z2eT^s8xg>zZ_=b5QsZz?lQ?bP`EdfjkifKFWT%mZ0a89u-J{BD@~a zAO}3%Kc4YXr>u+#woVY2md$M$J2zLDjajbvs-5sn=_A7$YzzoCp>JD?LC%4lsTyYn z`a9KAMZj7fwPhgB=h2>a(=oe0%-IhYzqAjj^z~P#&U}Oou2>>q(NQurNT)D_y-LO#-V4>{d9N=V_?JKHGD{0&e|v}l zN0VqSg_daQ#MRKx>)i7_cG4CFtX`}K(pl)O#))snk3U0*utX1*{4k>_>h?+~Mkhg% zRGVUzT!LyJd%R%;8!FxMAoi4I^cXZ^z>>NquHWKZrFTn-uBFjSg5g5!g9|~NXx8JN z3E3i+3@C;c8gxMC5m>_zF^}bYgC9O~Wl3k8PPR3CuJ$^P7z@>&y*Wa?hCG2&-*tN$ zCcW`A6cTF5#M+_%k|$b@~Yfg8SL7HY@x5=3(^)OH)q#%27VTm+IA*q}b7E$2(wkzPSgI26jsBULz+o zIMX&DYbw~Lue&c6aMYS=n}B!c7k<-)`rx#s2OcBg@a}#Q54}OHK1LYB@CiN@i|tu) zRxNx#euZboYLz>J3fsRCP+SMs_9U7GbYP<1FU6N8zh4jh!ot!gKUx`^lMVUF*&sj@%ZdI=6G#3RP26@B0`{z@ zh&CFxAwXNVo^6+}>8_r?z8GbhmCz2J*{V@SsJ3u!Nf{{abx@dmukD1>IQ%VwhE^$} z-&3c0SGQp68g){|x@$SaXm5El*j`5t_ld97a0RNEPlX`ImlCfj_b!N->x*EUAcOH` zVA!~Ir9zzytEV7E^|FsLQ!%8A!fGbm4UCVJAohB!D~l)P#fnlPTAoxIhfKii?AOXi zRu~iZ!x6sUVr3q`c-de3scXe{Q4nlO`~g}Si?H|I26r{Dh@1|&BbDbb=c=m3vK`pV zT94m!VqH8ED=7Eq&ELzC$-T5CgTk&es*r;0Pazo}cU5jENy&Ua#?5RG28+zEMDxRS z*-fcVV1=ZN4!S;jj;`heA1yA;`Rv$bT{fnevZq?H{Hvu^&Ii^uwGrN z9C9oTO?z4-0i~ufEWKs>EQ~~#XGq9K9^6Y=q@NPo8kmXLV5$-2KrV-j6+8_mEffef zELEbmHs4=V5$n7BH^L4zZl>->3WFmLQNtElYJOzX7dbfqR3-C z_NON9j;wXYizmfWT1$D>Le5p?t&rEqwzoAq4@~s{}o+Q zITqX?6Nig0m|#RtGJtf}(IKg2J0x%4aUD(_i<3XSB+-IaDndb2Vd&*>oVy9OALrlD zT)pOf0w20`+ef|<%+?CdM#~wKUb83AaxvmxIo?(_3~Zw^lGO;EjPdO?rTJ?Mgxgw%O&`dy z`ZvIM0qpU1e6XmQctK#2lef&HCd~UZReC{RJIPQ>?NLyF$D%+zBiBSrdTx7p$48Dm zZ^?Yhe!wcRGD19~IbOMGSKSar>^tO=rnBziDn+nSjOW>jzJGq*rG$+OfjsqD_?mtd zV(kF3LitA6v6N{{K^=_>2?Ft-yX@09{<8^vuT$gzn!w@TCh*pOxHl*8fp4_m!4UI| zlTJD%+w@J)408Nmr<_?V%55TNb4$T8=j*oNQ1UW*E%gs9Kx9U|58b7&{ZW0bi?!I& z4_}NePWso_@Wgx+N)b|Y!}OC#(5HX|cr{xLDA~S=k;+?JA!gXQUj}#!=-3d&cT80u z!<(D!M&htCVrBIsiru_t!(o%|&cw_N$FUDu#e_lvJ!T?I6`UU!Ho$^;7T0 z96`y~yK5Bd60{>9ZPk6BYsj&u7%r4IgJQ3+9|j<2oC}nNU&OS8Dp`3LvwfK^hVoGI ze1TG)Do_e(^;N8s{%4iB^fFgFi9Q@-r(3PIHJwjC{wpX%Ho1Z+8xYNAbJx;JJ-U?u zBcKN3=@}=p>MD+M*_Enj9KM|qdQAN_=jm{xNjr(_CN z1RQdKwzy@1ff6lZe-hINYUA|SxOW&-ahOrX+{z*hMjc35b*nLg7293!0fxKS^6H`d zI*%*2verwTJ=j09lEO&^*EaLXCu3BxoOb^Nkr~wSd9J)s5*(5AO8}gYQfDf{?(8_5iK1zEmB9}#1biuqI*R`h>2D3e6}kJp z&VnfZwb0&~@f_E$dU!SUfdiXskXynxnq`13<-rUK6??yxq6+2p5tdzmQj>8W{;b&V zsmE{%ENYh%Ui#`>y$lgek;|9rCw_-&5uwFE7 z|DD0d7?bK>yC+;0m-*F{AoXUu2u!5M8GmuHx5+}0?{lTqTg@b!hVhd)SFr*W=#l@Y zY%BgbgL6Z??qT(!f8CX9GNa@84e?e)>(!Jby+_xf#7lehK|{G|%xKSz#IQ+>3w1!C zOoA;Gj@~|VxmN0U)-4_gR38WgGLjq_dhi0OMz^45YTXe`{%}puc+oZ=H zU->n62yd5N4K^}$VmjIB_>_WFmep`wnCX0>gWOGNZ{{eWJw2A2Wi2X!E-YFLj<{M$ zEu|N2_yyS;3_V3TqnPSNBOrZWK(&RlQpMlZSIMJpb-}Xm$Xg?Rynnf@vQn@@^0#lSwQ-xmZ4sgx4&+^ zOr;;i(tP>bcO2AEe5@H!JEwR}Loc;c3XNO9k>Ro;L7eAzrL|}kNpLi6%lsPw2V=P) z=adf_N%AY|V}F+4cc{KhlmO)G*W2^OBl|&l3Y6qGNvp>J>_TNFVxkp@gTku*V3+y$ zv7!ESd~V2HRZCk%6(pRSbat-Zw~^AUOQQ02fbEjBbl+js(;9gL<8!@fLKRY2HEMo{ z);B=qy{ZI5E6^u~Rm7e;N4QLuWvHc5vAjzN2+Z0xIW$fclHfZWLI_-a#|}SJ&2kSq zzu;JRlywM{AsMSoMhNDIx)H_a{Xuw}_p7GOd&kQ1C*dB2c|g7;LE|2{j{}L@!3bNB zy(ePbta+@5^+d-7XZ0WlQFp5IIohior_7)e3Xg=|;?_!zGi!D)(F5u*jHxjy8F|e| zK%*}ceVUP9C+o3!ZXW%W4HGpDON(V@u$8e9`Ja-zTx<6fH1cu3zX~M2W|n{1C>4@Wys2^sEx~H%xJ6ed8hMIF<2#P0s*-qD1enjNK#Ih~kV>X}_H)?WJd6jB9#oqr7$?IC~>=2Ad8M zH#oDYd79Wm8uu3`s6gc=%Jm7+;6vg@?hJnk|BRdludcPSk}>r~1@~H~VCN<+{oygO zbXIk*0=fCjqEC@-xM_8d_Xl#U@-_jC4;3Wb4raw2m3_7g8W^AG(w1)8in8Jen5J4J zrJHw@wdg{EO=#)Z|DA=^&NgnXO;|woBBEGC-1!~WHC9ou0i&xcx?;}Jg+~e2bhX5u z_nhqg>=QS61RL~mvO@NSwHD{ zKlgXw&GPij|7stT9h60jPyLwE^p)v#0A|m>AdZCKvyr2Ay_kzE|3ETN^fKQr_VP8p zYuWOxD462IcN5IO@y!B8IqQ!~J=j21{PwE7wYqyso(dV0NAc@i5U<7hUi3q2JAjQyqzL_lN=~pfcF5QP}`is-vBLgHO0e0+bzgNovzA z+1?aBSnKOxMS_%^{!M1Mq19{fVRU>V>063Ek=S=j5M-s_g+GEzYmF<4yB$>qIgnvB z>4)JeFcESJQ{|Wf@7RPf%z<#zjY5~K0|;zl$rN%!-bZ)%$ipl1afC~ z6_Z7|H`+?YlGpvG@}W)bXZbuA=uP(q)TRZfO0hn_+d&z9LQ=A85z^4>mKL=CsjvoF zm6JYV0UhOpPaSEp87iSLU+ZH@GhfS}Iek&^8gvVQZ}>Wt7^6nMbB(Te&0x^DaCY## zPajDx>2fq(gNWSPXOc9~H;*$X$HN69R{iiVVRdj0wW=%8wAwo*ErJ_M+Vjj zzcZmubl&o8eQur)2wL5zzN>@eO_qkB@=F!*#QO0|>65hR48Q&&Z8diM5X=^rT1EWT$x5GQ>c|5{AkM z1RUAW`=i=%LBY$9T*jSEB9>lO;rXNMi6mw@K3&=At0k$ z`dVmUOFH*#LVvQ&a}Tx;oTNi#}8+uJo}g$Ts?^WrWxMbF{UoMDyrV0 zL3SMOGs+X0UNhpTTIw!N8H0@*q-__OJ*AUk&FCNp`xtW#!pioK!73dF#)^r?ZEW&3 zZEUOZ%3~GuoH^0DK)jbRE_}Yt!sKM#Pe;mkYzTK-iP8^S2F6=lZuNT;zNd3!2R`58 zeFu&*6zfH*1Ow1ZC^Y8vk#oxuD&1RZy(_rgx>JSBh6}mi+-BsAVu^{hjB=1}A=UW~R5V;pmmuS~$fmtc5g+ZHv!#t=xgZ_uSh!#gt^SRq5uZZ6%TV1rxKZu6Q> zz^boNpnrS=QeuU$LI3S6vIS7Gb>=cM;m;_bp zlFzbNMc{5qa-jY@x0Rg*tpv?$?9(<|<7mOWJ~Bv_COc8Gp0kfhHUjOfCf$Bi%*kfb z$^ZJj$@S@{MHID4{Q)k?v0YUfUqg|f)5c`QZt{uo%>BWc(o{7ry`;xos_?@xs!o$5 z3n`zc`f5EB?(_#>!}jbu(oEbc)2`4iEgC`o-jk4@_XIUC?$P_50qTvJNOSG)Xg z|5=xX)(rYVm)s9CJQnlk5k5BX#wTO)fafAX@4qbeiv!pNzbO) z1TN(az;pLtC#=6wB+hI1D6_3QJ#-x@>5XEYd_xBE-AILuZ*;5XJU)Asij{$UfJ_Zb zJd86`7@F@f6W5N{N=*;W6?50a$%s$o2K2w1t$M|p1y??2 zF8JDSJ-{(}VQG!cu6U+0W--G4J8G${A>d1h;3T;#$a6O1tDO}#6uAmTPI(1|lUaBUC6v`~e0K)b`VqEw{pM~)vo_W>$&t-lN<5n_(It~s2xmkKBrvMX|eOwGfE zIH1BsJbv~1%e80@f4XsRg@R?kjAbS)-B4bN>g#rQ5h0kV-ZUZ`6sakmFaz8gS!+PzjI+LYU7(UySUS- zX#rM)<^a!|L#FGdrG|ETwq%CnD&*eN+&Y~f&8la%^aZ~ffs%%o{7*@XHD5VtoNHu| zpCz`q3K_poAN;$*x(bz7Z2ewy>ego~browd7!B;x+`91;({vHjp?h+lL;O_pcjDSn ze<_n-BT2EiI(Cq9BsSjvlLSoO6|qT%u1 zhI6R#-b+vhTt)d3|4LEN{y$O_`y%A@XHjM#>^eR`w>a``We%w>&*20=*%9k6Kvbf* zyTlH=D7i8u4g~IExjsu%LzN_WGzo9q$^A^QSQ^()N89%+Fy9!t^eP90^kamHNe)9L z_dLnvh5@CW)@@*ZgzO25agE<~>h)h|WO*+)6V3@mH2``(DjC3`iN2DQX~D=ixMI7p z{2r6unZT+hm)cpb+2Cpf|4D{sQI=+?UWT3*1ccGnv*u+ez46ad!jE;Bx z({S|P8u?oYjJx3i;s=AZarLZRbL8EzYSG_Ld#sCqexb=pp`((#UkvEQBK!B3{1`QN zoK}C_Jv}BlD=qZSW(ZDp8>=@WsOC@F9Ht&>oe$)o63j#-!j2L;td0@yXmQd3jkOqN zz0dSXqF|+=+e%%n%WzAkak?GCTY`1Icn>e%?Fcv~@fgXoR)+6g!Q0hKJmX=MR?U1!&kJU-(x8xFW5kl*(;i|Qjpa)upX7K=fDQRrX4x?JG^Es8zHkU=>jv>K!#=ex^ zem=}9HmV71irIm~ISS-}C?yF||LndGzGl*ljWh=qFqKnR z>Al6v#HYhJ5krF#^=d(Yv({}E59r1E&z7|UPG#-}oOnVazECSK_adhbh{FDsFw{c8 z<#aa9_su6?XMc?>6q75N41TTx$%tV#H&^QYWN9I#%BnDSfS-5$%gr5k5k39Sd6+Kjdaa{6)BMHW?d3h{ zwB{N8N{9IP#4qtX%cWz;6BkX56tfh`p;rXVxxzk(0Mv=V+?L+m%-R>^n`^)L=sfmm zc(Owl4dQ)&z@qL8(C+GjAYW5``Ex6g;{z{%Nt$EuT2$II*!i${UR^I_mHrLFzSh|& zjl9enFh2-J%9r~0{YU=S`;W@Mx=DNQ+3RBdL>KVutEj65Xn?o5X4>*%o{8l_9=fwc zw(5&yeJj;i@8VwMWLax2X-S z5@pcl*N6FbQ#VUlnH|L^F^7zu5aM6%7gPZd_wvsy6!-!0 z2hX6LnKPb&&G=QNgnS3bcX;}Zq15xN5ngxwe&a% zk|5bIIQllh#kCIu_x?WQh{*UjefE0UFlm3%41|w^91=618dU3a;_EOupej8*B0K*7v|1QeC_-GBt4-@HC6*oXAZp%xIx zy)L8wor29X?LgwjVd(6yr>NnLY?Y`vJ%=>^u4DL`FN;O`3vk9N%+b7=+5N|d^B;h{ z^*_&AI_vimOTxY_4p@ru=DAm*0`j(qLM3AvB&h_?mESg9@zKDi7 zWRVAbVe9+#@;}guy34y%LARsD*k-pjnI3KpVGV<=w^J}w9WOjfn8U6JFYuOl_J+A=gZ0-yOuG6DsybN|hL`AoO#$ANgeYxcUsIm_HE&~5^Pyx;EUfD;u|#M z_{r|EJaJU$4*%p*?^MYyo$T=hwk&ycJ6)7#(Q%0>5+^@U9wE0WwCnYFMIvPN3+(}L z;yPw~o%Yjl+DO4J^mLv*I57}O9$YQEAI{m9)4E@id;0RuULPxAD)H}fw=@e=KNoA=MJgCb#@yU=`%(8=mk>YtYZ{|x8glq!!{vZA?K_v6Im_)5d3jDw85DsKB5qqsWn zhkBM`zmEL*g;8hUy?6=2hA=;(&w>ws6d3iJgVw003v}%lYwu})jy8QQfeWS>LVS;u zO4ibL86J%1&L+xR!S}oSgy;`rg2uddB8n(+*PhXGJ0x4%YzXRM&;0OVkjcLpAHow^ z!*UGQKiqi`n-e`G3o{fJ+>^?6nu7z@Vl`~{PGA{Ubl%a>9AfOO6MU19?KIMfjN7n; zwX5u11Tt+os{XyuIcT#!z5P5pDTA~sS~cKz(Ys3;Uo#Qr=h?#ZL&YFoUKfx*m7WV0 z2sZ1}wqp{u;jwy$evH~FOtWu}HKIY$X)ylHN!01^29|zl3w8)MD1cd5F`YAmPSHcB zMm(h;z=Q-@E;)=H6_q8t%4q4MJMmpKQXL!sH@9x#vFd#ck!{=PBhHV-Jw13za!T*> z`DhjNOMHLU^`u4ixp2kFz_ikPf6UM8_O$3Bts&7IKzj0BDcjUN8BDIdAzKm&iuUAV ze|&U8PR56~1!u)`CAhv$3Z4B#Q@eidfv{fJc)Z2kKdVveub&*(_M3GsTZKR6Lr=H4 zVQ0*u;3!RI&UY0N=MHLU8)j{rRxkfJN3oS>*bY7Pn>4^iLE7-Y#tK$3djXn+I3W{W zB8SUg=l(*Rx%cqRbG4~@+ZWY&nW9MC!t%Q>5c7a+L0sc?P9680Zv=8NgHu(8EhyAu zo){Wf+*^dVB zlv~~FI3dY)IacZ3x#?bSc^Ak&B&PqkY_D?lpr{lovHRHrqhF}-qFUs@;hw;*#*PMk z4yc7E_K8~6l(0qQc46T@MEIlO7vA)oR@s4XA^xcvXD7HJ%|0Kk{qq#j^`Y5-tNRk% z&+Sqzun1-@A@9LiZ=ZV6t`28kfaREzQNzBtLN?JwvV z$#Yu2in^;WC(ja2oXc|SjVr7FXlMTNG(1O*C!ds_>3?0y69igphebSy89z8#{GIFz zJut6IG7G1`DvUI6OjsS0&JZZQ^n6-8jD9qG!{%*sBT`a9*s$SK2f#IO(Ur~gQY#j+ zJRv&2J;NYaXj}M%O+gEdv2kZQi8Cjf_Nd&ugWUbz8edl8TV?GyM&>Tc3>to&EX!kz zyLg}Xz(?!()*5Z#5D9NO{QOg#HwBE}YZja3t)In;?oV|QP1BrgOtO=Ql@UH9veWux zhDBecR>VitB(2Z*@m@hVzD}|?TqyEgac6kdDH9*4P2Rry33IRH7 zbyic_mqdJz#+IFyf7JD+m$GG>-qemw5)`e7T{k6C7R=$Q)B{HO;gyJYPBG`U^Ti0T zVllGDC83PP8hNRY6=o*gA{oh+uUY@_mUOTcHM|k}^SaM>;FUp8FC-vXeh^+P--(EK z;n)=Lf+Tdy;~!4^7HO;U#f_U7`aC9=C+>l>u?CzcMG`OoSw%`T%Dbz;+v;@aFm7+l zqC6MtFTWQPs`U$HLtLS_&NE7;z>8F*#%Vi?7FqI|px4if@lpgwz(SvQXx-&)8&ikv_aYhzK+c z{m}{RNmhw<^VL6NK&{p0OiJxM{hCkj@e*$9mosN3c({!}kbnmnQ7)C#bskIj6k@5| z#={FxA-euiGhWr~nbh6yQxF)lm8smP{j4Ty=Mm(BtovFu>37<2q=HlC8nLx)qAPAm5S+3_B?G9$) z8`@o72n#+~{^YmKgW%pKp7s=7vUMJ78iWY#4RV9$^K0h~$=q)~`Y?5R!d7B-sp)}u zI+Mp#XLn6uOd{3dzenbNE{hC*U-5KTxW{goND5UBV(NXIq2U3nfW><@|I8_K7eIib1=0e~@y}WYzcpjJ4WWcKT5$Pa~ z&&ImHPx>(sFfcHVG+%R7)k{BNRO7FfyY@}_xt=JMMOe63(N*Bj`U#IQWHY!INr6t( zcBwWdB6twxY7ZW%OslpRgE|Js;H8ztuk~_rYGb}~c@#40lz`ax#?bpYcv+~?d{5pX zL~0a*Rgo|k3Pf!Q?jlPVx|gW6ASe>@EZ$oz-#37rB&y?fknvAFkL?m=kWq?_*Ymxi zP!5&U5{K`PK8>#VJ}45^8ilI*KYCb>9J=p>kM5<}Ka0C1l~K^&7u&8U4BX~3`DIYn zIt#W&nUDI0^hGMBD!^Z_uWu7bE#SKbY<*dG{&=iqJzX2j9BxQ{u{m6++OH&Jw&*m` zi5wF5z6W+BmG37Ia7Z7m_<}WAp}M2vm>cn6b@560=;p_v<#ctsliZ1*+U>OcM|&>D zwz*2f2NX5e$}`mJUUHvfWbK50GE%W0#51>y_y@)r3Z@Bj+mo{?94?E@L@(-b$=n^T z-Cq483fS{Ptx3S7Kp309D{`g#`8LdsFxa$N<;074 zV(F}v-d>vPEBI9T7zY0SI?$@0elj&d9@K$8Kv-~E!YpnHk3R8TFz+MPP>37wH$G*+#*F5X)IJF96@`e{>SO5n8;2iA$i`I6qls?}C@G`C!+#p-$@7Tn4uJ#Ce(Mi* zijV1Y&ajKghTi@)5$8z;kom1GBuXYxq>QkPAK{OE5{ayT>_2AKqoo5lr~DHFTC605 zsO5YWg`kD$%eQ11SbE))(J#8^aUWh)%;^Er(QI3o`+X?|r9Anxt%!1mh04=k@k*dc z#8?&Q%)>^9sU+1k|7W7B6YOXzgD4|s5qKPwJ`iYC$%?m&G-@23@rWCw*)MY7?F6d=(_cJY)6 zVmht}WG-1^Qr_vt>=W8{Oe<{Ya_>#etjA{F2D@y8tb5DOO8aAFCq ziA2KQX4?~SxQDub;m9yk^GHfYxiQB9xlOI%r{B>8p&;49e?C2s$5RtNAq(FQ|Gt6q zq#3|EK1iO+gNW<4sneXyxaenG(w^eiaMrJoZGig2b^=#e^7UvMo1Cp^pvHOV?C($*r#6xCvyxf+m z>Wn~$?&5thBc<9XZR2ssURfO;pwF0qEd7=0xI!aquYY(~J5b|ggFY-dAmPMrD7F4z zxx{Djz1`-<+)cw^(?rgSPDOE|M2B0~ZGW+R*AoXbIk4fKuT}oYb zMy}6M90_Q0IS>{glD?>1Pj=n9|C-1*XW&pf8wkfNoYJJ6ACaaxVATZCyB*ynr)Y1C zL)l6sv}nf#2kC*M`)NG$>rgVCn1|zwnn7YOgTb+CfDtBwv~Of|(+wKa8>*chfW}o` z@Z?qBB(hOL{b9!ILVKpWeAQ;jZHmt*-gUX5U-zqASnTJO_UJFJFGhPzSg*RjoRSvz z38-0Im0C*XQRwW1Q7RjqQ9vT3@GQGi_F$y3qz)7Y%3_H4b@ZcXH1mDqzk z4|!AF`0dF%ditN{E2O&=NH^169W?uH`}7qD%N0IQK1}N;S3ZV;P{vTGuBEoR^|ZEy zc`fIALf*!5J>M~0%L9SPvbH_dm-5|gwJ2?(O>ai7S!wd9n7DCz)!8!<}BKW}Uy|r}13?qc~nBcVjkQ79G&k3#dpmORX4| z^>Xb2wano9ULWkK7I@EUHo$PbDP&#~pWNJmmBht4;w;sni7o~6J3JnkWqpA9H7@i* zW|x{MW7rl=+AWEO(Ux7D&_5|Kdz#c=;HS74jrWw-QTpyHlaA!6JV*XzQ&Lg2T9Imy zyq~uuUNKX;5Z#!)I1>}%NXeWfi9??Y@XN;Zye4-Un1*}z8St}mOUY`>6G)CehT%nt z2^U+eLq4C0L&l1e4E%`GT6s@g5!VM)BsH;cmt(E>mh_uR^eS3-Ex8rGBJM;TZkA-; zS(qg=WYnuaG)hEOD1|JfDJy7l)xd8MvN9M6zj58N226?CBGL9KqWIB^sq>ue0qMHhz+8`{!{>0JYX z`ZhD_&C^F1KN)TZaRI5jcC#SBt_T~rT+8(wZS#1^PlqwAM;PyX6z@LlMH;Wfxej(U z&Xh?R1lE7ERHK`&rP<0DEXxAAEK;^K<6R(}(r`tIP%=16z1F5~IV)lwDu^Iu-t@%o&1-De){CrY<93#t2bJ4Cl5a)9}PO`pkuj3e_ekGZ7_WlhX6Ji?(Gjxq| zYOfl|H?4XFwMgU!zgsd!d{2Xl$M1(*AEO#}dNq3Zcu96+{M-)@GBa7iGI5f(!!a-D zwlyUpM!Gjmc(Ls)C zWqxdXN7Zi6eQekq-ABd=?bQC}bnX)trI@46U-cB2ck%GjIkUhx%?eKcDU8hyc#U~K z@BTDo4?h+5*z^xu>bAw%4x_hc?$fhnu|eQMz1DFhs=IxM z$Hi$c4aPqkTvHdT>FqCU7_<$;ZF!=M@@)YRUH~eTq*Tr(AX-8l@jghu?oftc?u~|u z7Lrt~*r$srx*gguV|orm_VQm_w8@$UA=BeWN?U1x1Q|3eCRoSm0s|WA)N)$C0keq4l6Q>9X?bcA7bImfr?@{#OO@q(Yz;~ zbCN#CzvtoAY`F&t=}fx3{#~8rQfi&u8U=PCCqhsloy09Nl}P-AG^{`hb6^W%x@QEc ztfF#Y3ozgNnI+yzRUskJI&#BhCC=>07T?xzU`GAPQ(sTzG{KPj@*;A4*xvjw^BLbX zFlcWXG2NMK53{G9L&w{=fRGjen<3YbT(Tb#9{86yjR{_oGGdRP#!p&4o%6#8bDyR9 zCjevfo!t58AdqhSoVZxN62cMpxZ8uxOtS`8&Imo04!!$Lt1-LVYE3YquYGRC{d6s} zJ^Vf4kEY*Nh85W$C=YiZg9bp2L!sEYr%K12s=>RQ0po=Ei{dA|+{e|NXqKIUBPlpT zNa3e@e(`yz{dYKxR|j0&znC`L(gG6Nj)v*BlSa=wwe*3?8(O|;>IiU+$Jn6S#YeJV zUnsS0HJy4u#)yKA4@fzS0Nl}J>|ao{e;VcJMgb)v6IKhc`D)Cn&~-r5yH;(z!h%EPvTjBZ)A!oQZy8bBbWU zhp@|-PyiN*($*zV!26%WiY*dCJ--O|htuNU>w;TJz>WAJuNN>ySDoKtVdnU&Z<_*T zGk0L&XzDa&-+9pTRL-Rs_?zZ{Vj_U(*|k0~&X%mH(nd_wC5PL+iX4=mGI4Yn|0je# z-CM3il76yr=jj^L%cz{6XdWjKZQzPq8#Bk@ud`lJDrlvUa^>~i%-cSw!`GxlyhSky zkS)89@b&Rh40A2}B7hVmvF>~vgtF5b26L6K&X92L{&n6?R`sCk5qigBxC8F#eWz&t zC%-p5C8w=Uh*#`JO0jR1DN{B}+i;KJJ(u>nP?NysE zw0vD3w$4_y!HN=9MqnNatJxJLxVtCWie51Ua&)cVi^w;27ic{n zKz>~;xUrEUteK@gzB3E^2{GL|HuatWb3@%!J}adZBss}^V>d-_LH?Rw#uf{@2lgp< z!1Muf{I7=8xN>uLkNY9w+D1p&j}(O$uG)P1L1$PFQIlY?=F+6gsF7@`DQRY3fdv${ zY(eJ!(5;K5n(rv#>FKHLZvT~YfKSQd9DlZB@J|1G)E>1$%5@-E9}K8-D_U&05M9>F zFtv(Xf6v@zWu@+ayH`5q*#c8si5G2u!R_k4uC8 z?pXIhgETMMx=sAp`7e`4MdNz*gie)j$OsZviMM$Bo{Wsx&{uQFm|7#tK-xPyS84kF z-mN41mm6g`?M=(U_V&z`X!|MU+iD{Y-XNn(Y}B9(Bm~l z)hEA&Lv2v9qM{)mI?0H~StRAQq81??u9xxhu;mvlVTV)O5zPl{Yn_AGsM+vC0S*a= zJyE?>_5wnw!Cu6$MP2WhKtSPibDHYVkc!(DTArG`YpnP>nA1S6wy}9A@%tqcch)sA zw`_B8h!DR@N6G7!&;CG;2>d0&dU3Zm8*?Q!J=6i$JdU~u5m_0L{m7(ALY#qW1I3pw z(&)G&Kv4uUh{Ce(Q1r4(ddOi>IGr;crUdGp!LGpB2`NU+w1f0{)D!7_ zseJBclbzQ)&=xB#BEjqVer&%#M>0IqXJhwYbfi(7AG!VS4?kIXPk49|9@z{j1k9HE zaCw>>xeg*r$^!%O_)Jgu<;-M3^tIj`u`r9NL|hiPfr1oJlgvShgl35((qTkZ@jJ|s zECn1IZ|c4Rv98N5yeeu}+HxVs%k{kEH7je;9G4yRv2!_%Qq36VzSA|o&#s^ki>Y(v zdNQ7to9yqxt=A!mJiWyAIjqzH^}W+Pqh5G~ouvxV!Dm4^v<4b-BJc{t-@2htJLpx1L@o+y$!1pzg%EhmykW*hahxQAyaE8k;Xz_j46|C@n3dDFDpo5S&#)dAx#rqk$u&jv6^QXFBvQ z>RvFP!^$A;0>AHq4}O;lTtiYAkT5DpIl^~yR8qyQSY|4)V3xZar?iL=M2hq9Od7>& zxD923dvC3nxy)nHdWQOXVKDk5qNhfPB_s-FgluH-q5k|@u3eiL65cA42{(A|>hKrZ zleEB45<{f55cLr|nyadh0brz5KoPl1`Ox;**HgJ8dY{d21JuL{D-*BxQjD|5gdWex zPdse0j*lF7TSDL??a*`hg$pEpketvK(eL*Ut2fuoVm)pnRy4+})rc%Xu101Bpij;3 zciTh{`?^SZxWIoCEPBkl?5kQ`+aS>Yd~@tQXnq^?Fj1)5(u~!qNQ9A}b-`v|EnvEd zA1I~do&#z4SgcyylE_hwkO8~P#MIuR7SnBcKKJR^q}k^?y~wITTpeoP$KHzZfYKFB zcYjq)VMe=Od9MM5XuW~Emx8&F@=uQ{D*1CvGdXu}J!3eP9daB_ zFDMAcn}o!>Kh%{`yPfLfw{3KzC=9szyCI1-^o<^Apd;Ek17If@tb~-_F|1CEtZ%r8 zF!c~mq>s$&@9ARoA-06au2o{7HftKlD74*XBC)SnJ&J=nDZ2Wojji^-kkkk#1q#A% zG?2WAW(rVpdI;}2>NM0&hPL*xQW0^2+w|`iCi)LHb|%X4xqbjL{9og$j{zk`85opS z#_i5j{XLLI+bstWOd-q9gjZ3`p{!u&ky+z#iH?mv(Ike^!O`P7=Mzd+lX=yo_Ex&Jsc(H`7# z8y^QXSx9ft#!E?;vQ62$1P!E>KpgIc!%J1dWnW-ydkc(hpMTd6?KA>*(++Xh#X@_@ ztCDM5R7c5h>R4s1fg&0^>VV`la66NftQI=d3h?vbjO8 z3Y^z3d*-F0n?2(MKWArOBBc?Hce!vZ?}(8eNStZ1+*cWXg6G@+%H)~?gk@PAmohh# zSg7JEi19+Rk*|{W`q1)cTE*j;068R<6Ef5rT-%kpoiW?+aY;v{K!uXqpp6>$_Y zSmkNcz(r1lq9S#7%k>GfDh|3SP4)0)`-iY%J8S;(*)D{I}&SA~2u0w$O!@f#J~UjZY5!JjZIeaec~pWU)81_cyXAdj!m8nN`n{oC54ivy#sTMrAlkoe}%H}}r7qb>v{5^64CAm$sk z!wImbuo<+V9DjW2e|aJ|g|$^?47uY}C7KS2zixn2iFyi`4qE#ycNctatbEKt7H=YU zoAs;9>mAmoW!OxFWQGi>jArncK1JZixf3A>*etq4KbShV%~!1gNPH}l;Wd!$zr~}Chpym3QSSEVhZ=XA?cBo?c8h@9@-sPfJNCV#g z@kE_iz2;{EGY_;n^Q)ndTKf%FdYlYxzQ*b^kor}aA)G+8jly?Yptai?3SzU5yXB(Oh0Ql zVL@M0Y(`!&?nAeg=nGGW>bLkToyFSA$jzsEL={4#e9*($g9y*Xz0Q5u`FDK&STT{;Jv@c zz`rqv_DAdeEpRmK81MdGidhT}=lh}Kdl!PGFS#AVzm{=ZS_b}1aBf@05+@~Ja_2BN z(oqZK4=fBTA-XZqMTk5dy}t;`d3wIxIFKa@nXRd7v+5l`;t~Kt*v08W4O0O^Un>l; z`6lkEl7L-q1`JR}PePMY%JFKe5j~2=!YcSv8Y7$W2|WxJ8Zp>3J-aWnxFZttzCY~W zvNfmTWVTN#$(e`Um*TwVZ|AozqkudRQq=;UZ`{~5SN;X(6SPg%p;kMX^FEWLnlVcL zh|8S>O9Xn-7*!%P9Bp9WKSI=hGp0f8MmR+_(5x3ZRuyCvh+cM-aUDv2ai!I(Zm5+G zyPy-m<9;nnZ#tU7qwsv_Z$#zY32|4f06Lx83iKWKKooHUoSq%SS{KX8ri3>X#d@|* zN$4+IH^-sTFfbGzL**(mLvA4G8B6t=1ydc7av)380XhAL8%I5U_%BRYPT2g-@9H8& zb}|^O)J0Q%e(91?Qrl?V@7|)~7n|AOp&t?R*RS_g8F}B?7({8VBX@Lz@kt2<8YhLh zvm4SCk~)x7^BORsa@@Iag!#ONjCGYh$Hb2dCgulhsQ5@$_Q-79suUj-Z{(}`yX5VG zNYQ~I3SRH*-a*Ec?Qo`5X?hp^_GIHrBvTUOt^+V^gu4lvd42Fm{hkjxwwB#q*+lXd zrS>LP@Lm(=*W-JS%iZ`;sd*%7JzN9@-Ju2w2E=s5a+0+i+V(#a9-t(ZiUT1WFh6&6 zqquI0coYGCUW~c-SD<{6HxCmP?T>hDnxBo~078jWge35aW0mmP)w?0$A%E9`{CL~HqTyGs&P^) znT~pfVlnDtnGwL=l{g=~1#hQF!9ipFNCz?uH8ECE7UqTu3cNxPv+0|SJI=1k$_!@v zu0UsPJnlh=&wgj#Y$`J9oBgdM8Nwz+yT?l1Bjg|5aCaeFh|L;9TYmX!nX z+Hf9)368h3#Hs{qG+1|fVAd;g&8^}R!IL1d5Ex`7B1x-Jzudr9I1_uj^Vh-pPe8yn z!;hDEKFAgapkKg~Awb*Y)-CGI)U6jLUU!ZC87<92bJ&Y&a3Pzo27nlH{87L6kel8?Kv?*2y?;`ZWM!Kc5=LJIRNCqR&e=cd0Q$4S-Tb>$jCA=#XNy$$G`=bB){uDk#T0X)xb}es@`)cAj6o2q79z{vcB>S zJ*ls`7;FF>`#a)U{vLEgdW2CM%slsN^#kmu{8q1<;bEpyJ;nz!GraU8BAT4N(9GUg zYhV&}x9;nSf0ib0#k`I=KKRsX@j%4c=-4 z9m2T0?I+82IH)y)Wu#C#=uUX>3CqqeEu#etyBi<%Ji^YBM5bX+;YjQt8TGAFgQo zt^<_RIIEv-5dAjPed!Cb1bsr=o=*92e?c-XR$!1yOyPIVLq?>tnT=0{>=}@_PVUHJ z#V}f?RwY0i@-kxP7+q`+Vh+AR8w$MWLM4?+N>d^DAva6Y+De53=j**o??dM5_;~cq zYQL_f(FB%UO+T$&vCWSfD=XW1jJA^fgt;n}*qu}tQ8%}ps*B7}guPx740+zMt{x*k z(^*Du;X^1%)GZYgzaYqF*2J=HVwp6!>z49u$N8d!F%L`(ErP^SL`~G0pn*`MtV|JW6yXw>#s=Al$z;b$MJWIR{3Ojkz6zsQ98s#FLC897t(`F41Ln= z)uM2~9ec zzRueFSiw=(TM9zbjT}}<^zqwt_*aMz&(_}xY9Tdz0Vh2kQ|V13XQ!8TpyIF}^!1fdkx=A9Rh z(OkM9eCVOb`e@GMUZFCOAwk5C*r4NZK~%}|1@W|T3Qp3G-4dGBG*SyGd@wZaqtDdu zeQhnAJs4YWggDy);D6?-{UIew{e)nRlk>apb?;u{hy4@My^qr=#2&bBJ9Q@0>{`W= z4Los8#^Yx-H1?7-v<`KO`J$PenDk!+`3c<_QTwkXg^R^pnkG+(Y1${%&-rN0C zZQNG=TSIhfqZtX4s@g5*KCb`flSdd5F7-+bWy_iFt3S4EQ)m#H4an90g17PUwayFD zS{HIh8qKGl<)zfNzW;~qu0=$L`0J-J-hZ?JHj0HCpWP5TzZF1jjsSLH>vpjnZ9Xu? z2XD$u1Ylre2hQM!dj7>**Z!1j-DKVQ^m`YF*>AH*5@V8&!(-i8S>m~U0E^Z4jo{sq zrFq@$Ee2Okz0d>L>5y&RkXF4GR$!(q*GlUn-F3t8|E2!4F3Gu-$rF)Jdo=f zWm79Z^O8B;do^z!Bb<=mlrsaqvv~{<6CUj5U=l))ry_7q&DLym;=!VGah>#e)%2@U)K%gRAf@u^&bMC=cv+lO!WU4PUdMqu2Z%R0gC59kDt&d z9Rv-|F_puoq&v0PNNTftkpd)`gD!}sq6nVttDXPTeVuy__4afLv~Og17I{Y2EfrNj z$anJ|c6a2PDCSVIz}))!Ju5_%FdoSnU@RN1sPK}z_VR!)m&uv1JLaXnyVzy}2$<8!Yx)Ab9FXgs8(-huzPHxJ4f%k zrT*hu7vMw~0398&mq;!}fq0e&0pPpr0zmlX-}U?RWalNFv zLNa+6DYrR*89c#NRnBMYXyXv7rHV}b?acD@Od7-#~dVS8w*BF|r_W6!n|sodlbLh^JMSP7CCS6Ubt zVSj&PfNP3oH@fHo22MYWnq3%7)#9h;#Ib{wnQl9rkN}U(IHG?xbu_%?__t&IJIL3; zg*umyp7$5S75k%7YU_*Z_~s04JE&1ILB(8A)jk7dyi;ZWtp9z<{E& z@y)Bb#DDor9q0ilg8U+M8iG{QGet_#A5q8ujyZ8GsDBe$l#KtC&@x>A?>K{89z#rko`3c3fIgShy~*1Cs>u_fZy>_;qX^dBOAnj_1yiB zB=o=i%fES%cESHzLo%-Yf3qj{NBjKwuNOq7PIa+my^ev~HDmJeH)lJc>8Hu*|Moop z&B_1THv!`t{?FC?pR4&lSM&e=YxMues5vjS4isv+Zfks!g+k9Mta?sJE1678daeig z*K9^H|MnsMDIl#EJmxozi?wM?yP@T2LK$s{YJXQxAJVj)82|a-!hMqK0Q=?O0pyn* z$=DQkO0WK|TXh#u?k&6#;$|H7dN}a_%)2Y(Z%oFiHQTSQo7tUjsqZ%)h%F{M1XZug zmE$V~94;_2crL@Yl}gPGn{GEF$FZYLQcU3M6n|0qY>az;-_tyYlDZwU;Uz#45#+f z-o5K-_)vS;xL3Tamw!;DBzEw4IVLlF%)^8L`v|!CIBf!C z$zp5r^NrAVdeL9QNgR=>a0W=x9WRhY=#=vL3r<)QOzn?vz1pOABEVp9q0ydq2W+|w z?EJ**^A?b+s1s8L~k5A8I_AAX8xEXK`1%%0`;>o3o<&I#AT}oiK z+@BtfDh^KS*AnI&_gX?&4U&0I1&7@n00&B- zUUm$4H$6oY9`m-BIS3sn%n%mA!jiaQQ?N0J3DIQ^>}B^m;;*a_a>Ww`7%si{R&I*b zGeWEA3*~SX02kDlj7yO|;dSsqY_aIhx9ZB1#oFxGPhR=jolk9L{Poxr?zo4`#SB-c z3kmSrP~`V7b=Ld5W4LFH-z6b;nKWg|i3rI- z3|idD5!g(Yve?_3Y?s)R14_mwe+B@qHfy4(tWR{;`z1PVic8meJ9k^LNQI$Dhkd72 z-q=BbC_rsMlJQUX*WLB6-CwdD8^j(x1PlsMPOpr{zCVki6$n4bAr}7kiV>;<8AkE7v=`RZ%TcnbMWc8T-GtU7L!a=n;Z*0)Mz^j zCH#QVHhxoyw^|?!$;3Uj`jm}FmzSVy$%Uh!Ddc>|VASy1&fz(FVG&^PTg{tVJ}NGU zKU+A|<|B27uq>~k>wCAB|3eo=k^}>S(^=R;05rVs^rFZ_-5HL~(qC(_UJJ~E zET&$60}&WM=NtkQ{O0DzKj)A)Cb;@I$>AqfvQY0G0wjF<0>mD+N)2g5Xo)DXoSa zR-VMADf_nJgFf3^0~y8uBuLgnqe7&eB&5f#vjJ+&?vLAz98428LXRYFA*o$2A-FQ` zlPqmtKmB>e7XS-+vU+DcJdT1(WfX6A6T|tuDDQFP&!f(|czcl`57`AK9y7b+=YfDV z@n=DEc7H)0pMb?RA&}M|c6x{ybzo-z?s^MD8V<$xM~l?GNv^e=Xnd||fOw*GFhw!N zYBe_e7U1M0>9TX}#}1<+++Xx%X`G5SqJ_^qHps zK&OK#eGa~sf$xv(q!oPXSHB1#qXI0_ci;q2*S^Ch>n{4AGF;jJuZ*Lt2vrhW8IYjH zeZp4rJ6Km4i0ux*J_?||M+(TIvOcJ=IqRW#YE1xZe~)UKw-9Um`U~_f*T|-eW<;|q zGUgoC6nGx3(M+6(>6>e)S^1n0hWD{EjKgaILipTIgsb1(2;B*;`e58}$^D|Zd)HuI zbK2iOw8*H)K_e4Mo{hl*Ml}pP*o|`IXB?92y22#nhyJYh%R=NoOT7`rXyLx^?N1h&XAjB%_^HxvtPe1Vf>CuDWsy~J0qW`nX zqYvuGH-+uv&Fr)9MjC8AGXb+_KMhFM@5t1-<4Of7Mzw0d76 zFzM)HU7m@Fgg)R0M}lI71ZDOz+=u`(^#ocqdMT~yV zztH~u5vxkbg-NmfMqv5)M&IJz-1MG-_lHyo>s7C#nM4br`+fG-N?UXv@7*XwDCOGs zq`ysBSG=d?GF!lDW(VvP7448H>$jG*D}@Y z>QHSzVwfpbX3qtJ2nLxj33Ll&T~vPCoE8teT)s%mb5Ge&-=*3elhobJA73e++n=t} zdNu~)x%Nvcp5*|hm*{U!4+P-Q+KX-QX`9Px%-h2!8kP@y>eavP04(o)Ms@jWe4w@z zt9QoD?Y!^8>wf!L_H{`0cOat_i#3TQ7P_B*uX5UZZ6t2D3?|Fqze4l5+tEne*-fK1 zE7vJ@9^3>oo!bU|IkBT=SnLJdokjW;(gE9O&$QngZud9s?MZ8zwrIN*-`vOdBHP|k z)Y^8N-ft%;>=09GM}Tj>17Zw&j}ndgShf!1={zzocjNWWThUwtoa$wVD}cu-HeW)R zAeT>MmftlGA?nEi(niFM%w$1L#B+`P24B&!UOxYwSmV*7;F%mfuz0oVbK`RlWZ;8N zm~V>t@H!T_&$9FU>6L0^yMhqGrhLksgi2* zJ8=Wdcfq{~M_hxxZ&5fb+==xZb}+|&KneDH=`!{GN-(m1JZ%OYT(@`>W$&xNW71Oh zE&2`E^D90FnvSQ7V$vmEZD$bO<8ze?AUyBM4UpNjUuN<**Q>h>(QiYd`<&ll*Y1WO zUHoz(lOv4(70qeut2D&e@FHB(R|V2}DF;?%82U=ZBjl>8=q5~-cw@>> zp&pmmgWnIfE&_tWERO0jz~OP1<;~B$G;~-r&Mf%@)#?+b5?f z(|Rp3+h15-zUrPSP?k}*_cG`g4Kj0xdkMCiM&^djfjRVuU*MP3LSKqc4e6p-Aq3jYFDK=sQ_53*MHi2>edTUF z4bMq&sU%#6F#(Mq&iNuIGTfSmza8E&w-+r@P6}o6yL=VAAi2ljYGB|?^Cc*u8sXh?DJ%5~bc1b+-~WbpmW2V{qo z_2>4&Xi=uQgY7^e*qlQiYoPWQ$&g-d9dU4f{pvSz)jMV?-Wsdgob{%7DiDcdeLdZGB7_a-aL)2Myte!9%k=ry?lRkSBvsodu3QberL z?8dtFHI6=hVq1^6;?Y}AE_Ti#-c`KLt%PZW)h zv@-;C4c;^(HmmG3C6WV;Wbpzt^IF*IanIm1xHTo0$HYs8&=?BAJ^QfW#;=$|KP>{h zXa#utb&5F>Jpnz(gq%@2t+SLbE!DPoVay=7rRf@23;pb{PeE!&l&jFoohaa?-dX5B zxhnMY%Q_Qquq{f{6qro1bRyWl(~|=&^LxgguXVsWYY9kBA*5+5}axlHK2UY3V&W&I}-@rHF zx&@#!;djNK8%JjqJ5om(VlVKcvGJosBy}s(YR;T^@jKs1b~c+NyC5VtQ1R3STrkmV zb)p7SYQDFMw!;%<$*)J&A$ef==wp$N?=}lCsWmnmeN62(ZI5gc?kA9}|1l>;cy#iv zNVha|*3U)Ia4Bt!YQRByPu~`_7FzmI2j)|k2(eXq6LKDxPrc!s*3{Cn5XG94^n;?_ zn5q*glTY4rj4~~qWU4XQ3$+zIKn0K5Fn?qB*`@EMw*}wn!-$aOOpzo`_X1ajcx$6p0(y11hgcHH`(V3ayiA*rUxIkS)yb; z`DY|(@xQD*UP53jBV_8y*6az=QI38H3}Ukx)3M7LOz3N3)TgH47^*S7dK>tW!*NEl z80>Zt_(0>EoN%MJr)JAenmB76KQF3`iFp^^ zZG`5YaIX$UZAE;W7>v)`-Rr6iW%W@77xd9tu zFw+20fyZvpNzcER?3qwh#g}ivc6~dbIQ?q z&!EC|-r%$zXe%_W8p#IXmPx-l-S_z}`4hilY^Ffd1hD5HN*aWp-^Z1RbpnxaZ7Xp9 z7DJITkY&?2H&hYVP!%`no4|bE*NV*@Q^bpf!mR0aQu6IUE-d7XV5r>d6Ky4!bL^1x zwM|pQj=5$K$^AiLRzg+C>n-#13=t7ivb`IJR?R%S<-Cye>>pq;qEiKc+oe)?F((Nq zdx$aV&a{p3WR{^zO6^CwXuotxH_72l2dD~ER@OOrO&QUY8;ew`rZ(?zr^w(C*=o1r zCF{XK(@{vPC9bh!q*k=G2quylu=EA)q*S4s1B-#dg7Y8tov4Jaf>ooSS?r}_pQ6#N z%n0T7#^JVR1fPhK>O%#mvc3u7tQE|%#nFYv$|@%U%b4~ruDO#Ch=dWzKi=)2v{aN; zyu+0Uh<(-*c%HYnoX;*dQUEd#n6pu>HZsfiIs6;D6$28ranuXHKiWAU1d<0a;K8)t zXgMv-88Owr@4={7_k~jSo4h79n^O4OUh|m&(nNBvi*x>X;{w}P!48cEYeZnh*ymb9 zl`n6lD-gC@;=5s}bk4-TB}Orgx!JKk$I^NoUX}dS8&kJR2gI~4^5bwXsag)fd-|NU z-QOI2*rJrpIZznq`~}sQVJsPY#*WyZg50}1Q_OZS4>P#q$lOgxOqh-!Tv6 zkiA%q7sgx%Cgu*VLSr9MuAEpzRHx5uVYi>+PgsdGkzEb)^(h73l zD7l`wv-X#Gg=Ob zJ`q)1I`UorNPd=`X>8oY{At6zRJ%sdmnxXg`#N!L0i}p{gNtMF)hDkO;(_Y|J2avo7aUa&aJ7Hq?3 zjl*iSZL$%?R!C}m*=RFyMG1Ea~nRfF~2T3QW-TEyvMfEb+>jOBDw z<3SyW!z}G5DHo}VkO$T0v7rSbuNy-8sL)@%ht9KV8dEt>eA|s}?}=pAx%~+)R+nH1 zH9p{xN?7>GBQ`r*C?Mf;#ZK?2|E1SJa_L$CS}DO7p#GHc^pKE@R2_#NYUI72=oIUbV{dn269Y3E~E#$(AO5d?+Gr&MdP-HP}Z2V|`cbRTzS(z!< zhA?hy$)hy3P+kvaino0!v9uT~vv2Zk|Y41WQO3FhF1e;p}NHkToM zWXABU^x(UfKc)>z&KXH%9VD!`JSWsg|5dL0PT<`~G^|W@$L#^eukpkYgmjwQYV2DF z;m&UP@Q3Wzks+aVgiVv`InHX3`TUSMfYd{VAO@fJ#w1j5Mi+B~!>MH%>y%Ui8b#_;&)$z2|Z(D-fNSn&CM`P&6chx9-}_U0;<{PzjPcYuDEoi zUeIo4$~BgI$W~hxrt_3F1S(!c_YW6a-K-s6K^rX8nB>a@pO1W~GFHHKmh-lxAI*G< z(rr+brp}1v{k4WtZs-`*d#51uK-Pya#|EdMel$m)(9S*gp&!0E)AO2$+Rk|6+0XA$ zA@K;qP#G%li_Nd74>M0WzThc3ot8)m zdx6g=Y+BgDRowD5{>_+1ndY`tYDK0_%cl@UWyXX;UWE+eVC};1@M~)QV*+^^^t}kp zO-k7GO$P|l`O=t$P)3GOE-uQJ-eMM$j4^F@$S(WK86Q8s5!D$3I4pM=#t?Z}W9Os% zY&RALC(Y1nerWK~9<17)*@HlLS+_$cl&(nGl7?;ItA=IQ-Sl;nPmVQ0d1uzIN~cI8OXOloY=u|p#+LAEDIjLD-}QJZ zq0VLrR|ds+D$TA<34FCSL9?L-|HO{CiZ|1)!B|9=vqbjZ^TaM4K1|a@09o+`(8(BT zYxBNqHa{&b?wirX>2k|4Klt7#38JiMKc`%oTlY;ZHHmyUi0^!wAm;Nr-5}v;8%2wa3D-#pV1aebcsYBD zb;NFB?d!(jH@+7?P*Y;kChM2gQ-Y=dgSr~zXi7;E9)W^C;{3+l`mr|!A;^6PS9O-0 zVk`3bw2HcdDKDr3X+fnT>jPTR5a_BTmclpUbS4QCe82AVM7oRUw96RR+poI;O`7a? zx6|Is<-|+*@OsW{Z=8J$^K3mz>(cxfU7?qx!##~$O1%mEMMk{cQ=5@{gR<}HU^@Jxp zeJdsScnX6^5i=iTNA$oU0waEvJg*Tn3}Gb=-UJ)IeV^RT(x&S&i+SM*fEpHn*u+lN zec`=S9{$U=@l1;v3=Me)36|6M*bpf$z1<+HUmi7u&0{DNZnc&xH0AR9Fm@Q3;fjnQ zxECPCPeEbIB>3xEPxu-92XK?}X2YZ8KbFsU!yVoRyYkzOId2T!dRvkXK`%gnfCW6( z;cMhr*oZR$6ry>l3}9cfZ5I5jUUf&htuz9&;Q_m<97`;O_5jW2YA)vEjsjE!xP^*I+qR||qh41oV>WEyO2LH0J+R8+I!L@tY&{@mb`^AbE@~S&5XYsQJNYE)D{D{r~dofFn za)T}zFE^Vz=u0K%@r@GR*i(YpKY$47p6aaYq4TFjLjUyB=+e-qILzfJdYf9H?<$E- z@p`;odr`JA9GJ>}Hb^5=(#BCb{ zqFDa@px9s-%@y-ThunMceC~%!?ir2v)%tH>ye9hnS=WsFzDiF5_l*)CJ8~_0@f3f- z)kxYEJ)M}nlpD~>Q^ejb8k3-RERbKH_5UH-?f);dejV(BY@GJZ3p(?uo~kN)az#%R zLKf{N+TL!(p$^m{$KH#n=FjkjP==qrpIPQ&q-b)pdQ^3YLbqo5 z!1tD2#08xh5BCJtCd>rmN}s4k?xhozQ3 zB*E%59C;J?dODXpw8sN8kSkUsD6}7|xfvb`qves4K zkzFK}TIqxbX`F*}q&{f8*FN+410iLBd;4oK^V!3n)ta#5b8S9!qTF!wc(3TF86{)z z&~-{3E>*v1uPIvAA=GNfzMU}5nZYAzVg2}8%OI-z0T+A*d(cR!F{ab&rE8&j#=TZ@ zNpJ!#cbwfeac2s|Cg%zyRf^U|+75=a$WG}Yi`Fj}@VXv^lB6aTY4vpex)#b0ccU0-TV(nUYo?>V9xs@)7{bqRVu`CH@YPCE+4a#1)DPTkc=A?-} z?%{gLJF9lmI8d&VaB?^8d=NEw({wrZ9dfHQG0t+D^B!kAb|v@?3`(*@z&WNUzk0DQ zi#q$j=Y-xE4wK-(Hgux#<(BHatCV88A%CH5cIw$C7aL-P@L;`eb94m+5BR4yh>)%z z@4XF;;?OxM?HGk(o94{pz7+)LxpEMM9g$1SAb1;{mV?!ntfxVKv(&Di4zChG% zub<|Ksg~ilrZet^KO$F7qtogVBXe<3LQx2*`#lKg76Ac{86BJ=GP@{z_Z!L}{DCc; z5@F_F0rj{8pa@joY=ak4$1R9*QdOjT5-O+@p>^D%E`a(e8w`RYA{gM@^*|o}%{N0M#?Pp$B-WvtXCsI% zeAgxQ9^Q8Ckz%jH z*L-v(pH0ef?$?A6%j=hlO?1-V;lSLIHP~Q5&BPK;`G4qo>!7OtwfkF;ZX}fkrMtTn zk?!smN$K1)NTYOjcXvy7_ue2Ko9=u*e$P2`pZosJ^Di?DGYqV~u63>J^+pt$ZC7DW zAawjTXNg-XnHzjnWc1|GvtkXi?{}qR^tIDw*Vf|`VUf`p#?vtH)Ty#YZVJpHOlAAp+AM+zgo1^m~m`DA^A~X3dPiErVFrTCwL$B9Hy6>1wiQIY= z*;#fLb3Y4F^b%5z0qZyE2`ygrEo+U=PlS@r`r{z26Cdjcf(BEapn!!+jrP;D^5*99 zhU~_Daz{cTA+Q{7y6*v^?0FHC`xNL5C^vKo`10_$bnz@~)4kinsOGmf(AXhZ3ww-Q5)f?TY zv#Q#~Reu*>w)45fg+5c2^yr0Zu({2k+~CMOv2wj+MB`#olE6awG=~N7T%>lCN=k-! zrOD*?HT8!Xwog8R4G3$pBlwturyFiOEzhC~(QI$;zwCvi@YubJCBqKGtig*|ojk4_ zNz*!g^7|a+7Wi$UjYeJ1?fHa`yOTEgb<6iUymaMUNaS*Kj#tB$u?|#jU=+G|y*lzw z=D4Nq>a0hmAlHI%wN~=|{V+f3x$M@e$kB~;F6P`o1o{1!)EeVSK5B4yIT!p|msI3f z1{V{n)G-{^+RRA9CDzwuxV%)Z_0X4_%#L)7!E z{Qhq=f?T?cd;6~#Ooma@Em5n9Ty`tk&!;O$bn=X%S}ARv*9*s=Ba*WX)5@FX)^<~kok($Z%$E7m>9v(<$B2@+4i6WmQ*`jeM2Wj zB}UG!*EqPUm2H>)B6*U>Vkbu7;SMOK6bB*P#oUHSImdsy>34xNBU@;Wmc1W_XVK9N95C*P#o~tD zpc19-R?83hg%TzbV+lM+W|&R*iY2yHp>E=U>u_Y!Suu%>uZ|(P8uuKD>XgbG3x(h4 z;}`Cx&+yq}o8Bq-lU0swVw?EJN+%Izy#V89d)!JbVfH>)xm7RiP596F-#4#&D6#N) zWjS?@PBR3}f_2}&y&mjJQVOFhU#VxsNu(K2hzjbU(>j-5ismKw@)HxS_F&GAx)ak& zAeBcr(OPVQa*2Q_vnIGG|yw@3=$%)<%H( zxe3&2``p3=_1ArXViQ=Wv3Rl1i``|Z4(V6M-%p+DVK?H@H{P$oEJ&o_H2s&$AZ)kf zu0MP(J^Zfsf9?<>N#E1PRWG)@)Y~|0yj>8xcQsSKr;tmk{$v-cxRWb~&lrPg{RatE z#8so5%Ipncdk5&Ng0&J2bM{2&w;6h^IAMfM!FwCV@mzzzv>?#*C(Nro0*;2@Y+6C~ zB---7vsdtW7K``MRQ3>>kv*ov+bxdCG%b}P@w=LH%@+c5uPI2oBgI<6QSrh=9@6Z{ zvCGlI&J}n3a*e+}Ssa0M->y0nyT|0m)-t9Y>TgO&r0!`MjrfeZl-#hRu@JoFyqQ-2 z9yrtjAW}>yq!wls_7ZiYg6&?sz2lOJ9gDZeG%MD>5N?hM?>i{3EQ@~Yur5sd^(C8# zAZ0l2w_M;STSz3>YgccBtFcWJi`3eBg}!{B;IlE4j%!>atF0|=D%|W2o>05)DUKV1 z-Ro1Q;AVe$kJOOFNQsH@P>widO9~wIQhkLe_dU!^oL~{U{4CV$^~y%GL4;G%{J0a3 z=TrKyx|n9{ch_$>u!p2#1C`!$U;9=OhVq4lvXVAh$8Jf{Wu6|rqM{+h52AP?>Z^4O zL}c;`m+zs5ar^m7Oua7{)0<5%|HkSBAZHV#{>+Hm_^`@d>r23G&S^cZThnLU|FwGR z3(cTp(nwD!{g$d568BYUOy+UR-#qxV-(W5ntKkA?@?Wv~rC6rx%G0w(W zb}g#?AjracY|N$}yF*~Kg<=WMjLoPhY=jaAr-ZEnY?^vt5|WAUz0_&Z#cQU3cLrIGXm8#|^eF{? zWKb<4zrqd9TeN-~oMyvavTACD5z4;@t*$B}YZFPitRKHeAX%`f7oiD8y5P4S6y1Cf z*Gk`CL(86$pVG%`9?Cv5v2+m9`gYl>^q{2UP(5~CVDIuE7uig39!ANqI1kf2r`K5# zT}Z7L^1Sw0%R(tLDnQdMAl$YU zP~~p1QcAQrZ*$o(m$!hl>>_tga*mceFy$Owz{hgyx85dZbLnWCCM(_j0`%F}btV4Y z@c?1|KX9FScvc=eM7J+wP1uQq{o>TqOx@T48j$>g>E2i*YQm+U5lbHb?11IaZ+~)8 z!S)2YAKjSPuyLvLht+>lAg<@bh=+u(vtRg#|BTclv<;Mib3^g33cfvQnGEJ%)2eAp zJMG~*Y1vI25ada|HL59%Vb!T;VZW_YkJP;ZK2);UNE31~u|P#966< zZyiXU=E`AILUAK@9am9axZ&sp5}K=@WX+B^1Ajf0^V|&39aQU703E4clBEfllf41= z!zPVBktq!^wyoJ!!VdNty6k$sL6VtrMCt=;3kptchK7dAZwXb`7w+y!5Ywx!_LegB zv4Xu9C2GtrQh0ksEJ#_g0{zI4vZ>aM!BP#K*|W9;Eq@Ry{1MUjpk==VL*8gwpZ#!0 z=b%1sCUpAaPjd2?+Pb#gGYLbQ%lg(K7r6ST2;6~dcK`0^sjj!sDG^xZ=V{N=W840Y z(uw*Yfc5=@!OA-e)e;{vBprmlW}>Bv(lL)ci0y2LJlO<)ot+#&Fxx^1ZL2ug;6^6p ze`M2cqNOALa0)*zuiavue|Z{osJYrpX`nexK=dSzYq){8F;_*7#LncOxBJ|WZY45` zl>&<(O0J?w;+1;)x%$Vc;!TutHrh6{qrI}9$xAM+Y{fs3^bHok}Cx8;W7$VO*`|2C6{(Tux3t;QT=Hc842m9|2tcXJL@U1F10_szbZ)- zZjeY%EoJ{?L*$Wqo!U#TAmpw3Z`(Y^ofV=Jz@z1c=R)Q7s*H-8)zA?h5X@GgQUeX= z-uhY6N~N6x;0>6gGmPr*QzLFF?yY^V<)iIdGG3Wr^;7cP>$_F;_RA0=_{su^o*oY-#V#oZ4u6IYY+JBQbsLwS zozOP$&eory$~VUJL=}89A45XN=;Zw=K-rX1&IlQmWMJqwPG`xD>A(h?cQSYYG17Jx zG@GDnUE5BV_6V{WZ;#Qy)e=j*vf1-}JcxR@cl$Q&yay?dbWZp*{?rSa=%V>&djh)u zvht6+JwUFhF32L{r8w^j-qrB1RSvG^h{zDLM|w5zD(t^y5-E3}yZ?fHyL6b(uGzsX z&#!{M@IELC#&TVYY{LSeDO1=+D&}t*0L?yLp8s%fA+Z0?Lfsh#O+d9;W$sf8JO|oW z!!IV3Zp3~y#9Q^7iZ3&%5XcjlLW%xZ%5NO&9mPC%Q{+{uVb%AFjJYacWvQ$#%L4Ci30g4-JO}PqEJIx~~LCVlZf;tmd8)ZqL)5 zWbVDwomn^pm&kiWqGGtunv?XJO>vi__A*gNmpMp>*a@?ZKXxDS-~2QOf@*9q)!@F; zrx)QWtmr-}o*F$M0;yVB*x%HOA>k|`wT(3l-ET#TnIGd>M0{Q=m-D*`-Y1RmG8ycy|cV#!7Q3_X| z!yzpEtF@b`A^5xsy=9e9<@h}888~cG=f2^n-O>ei3T}}dY_Tq zQNF|}#sn$$Y1Esgovd~&%@lcW6;)TC`Uo`Ggk2PP0A2jI_gG%5;@E#ODA(Zw=wcjY zl`Y#qS9Y^jQNYiJ-_$iQNk99D}s6t*^xTk(0 z&?`_;n2w@dPlDt>gMUsG0k_!-d6N6w$j++Z7p<87N}D~KWjl=5AdmYZewaDTq%EaX zn^)OO5YVnr^P=3U~^&#yY}E8u=9Yy#X;0k!lPALd~RxUs`b3 zw^-QYv2>w$ER==t%g99FA1BoYe7JH>Pj$RTCCE=1@eavI4;wtky>ElxmOCidup3h% z)D!b|qfhR$D7Bz}#3A=eW|R?@-Kd9$TY>+s8P8u95a$nnBM!8*H6-w!QN#&8T^E$^ z$s-BGBQ5exhPh!4?8KNuikR*x5s#os}G z1i?Xt=JIpDjV{2dOfGR`4o>QZL=)#<5php7QWC!#v|Ng`0!D3lucMSxN+Bl{L6+`c zkZ9Ho{--l7e$Q*hbbe=T=JRq$(}(LM6{XTm4uB3ep{Qi&a2=~qWPRuPUL@8}1*nqb z^$_+f0ZUHfyA~2z-0U}4QJ5&@jZw$prMW#Q6i94R->KHRhENNE%^3w!4f$?fq2q#n zo%_hhId~`1YA*+sC1$|(r2*LwTVGjJvu&yuvIooP;%CCsMC9%E_+G7jSKoa_u1OZ% zPh>C<+I|zQgubx^3n$htd6OrB;oiM%aM+-?B`%C#pTL}Q`0Jo@M5*JiRM8fbUXTj> zN#!n9a1EE(dc3o3Z23yy^e0E5UpDrkbMoo%i?>YEIq)X)LU`cn9JF-2s_{PjERIBr1%55a z1Akok-ln<(}%xTwRztghGe8C&*0tn zPJ{ znS#W_+Iar8CV?X5n!vYX{z&zTj)NZd>#Y4GX zw1G5Dd8<|%^C@69QG`)OimR>pH;}dK3G4>k3w1urV`^<};S^wq;fv#m%q0Yjl3eLs zS*f#NQz+PJ0EmU!aLe1l_d$Xk$-jgtf;#RdTyyN6XH1kAqIyQMUIfV#l6faPm8`EP z+llt~onF&43KQ;ce6Ot9gNT8`)7YmnU$82ks5ZE^Ba(>72*By2b5_0XGET}5Y^@UwUK!h27m<90);f`NQ}UWlZaX&HNQ zuJ(evoU~kL+SLGm=>#zum`EGkEA2*9xu2F--6y7trli0r**;|&jrem|s9mo34DS{f zB>~9l{7PA|Tx!PDN(wX2QpU%UYr;C+k}|P==Tu)FJ_f^gBP_Y z-`cT5OZzhJgWI>GdlM4-uDO@FK#;fs1c}t6-{YD!RJFPbFlfJ~U!BczK9`(u|0p9i zP5pD7AwrMS%3-!Pi>Mzb{A~>IRaDMfi+k@(5C@RShnh}fZ_Rut}<{54pO|GC4S zviQHrp^GAnEYJFx@~1-tdHq(#jdEDnMS$K36to7mk2g$%9xnWWHw;owb1qOQ-As0$ zuSr_JuC>Tr?xfny45<|6A(Es=gN``u#d+J8D9cEZvMbKF#`9@-NK26;XS((m41Dt2 z?oK#`akePY!X1G6Zi1?u@?({Bs9&rjqEkRW=JZCd-S2Y@M^@uS!$N>Y=HWF(1-37(X{q(-S$m3Qymf$>Eegd)C?y=f2JyV?`YR|_<;v9+Hp_L9j_G*i)Yr z%M3QrXAwTR((t=G0mToju)%021-@~Km(p=cSN71s= ztT4hbcqNF3DkfFO)`CJ90KyGxlu@wo6d5G;N01)Nz8MUN9pgSKq;0Il@c z8_|Td{|_y0!HXNT{`G9WC)vHWTlXw7LgR-Z7_a(ot|}7PVMjUP%+q#=!cnEENc&-m zD4@K1ojga66x12Whv}}NN~T;}0EMLxl_@)unGtE4O*eRKPNULkr<$)2>Zj6+;Ear_ zOWpHlzh6!#54*SZgfvzlQ*PgW%yK<%#C3ZFBH?Hu=j=IqVi_%*}g4dY8wUrqS9G?Vx{p&jil#UitAJg z$%0O4K>*a!KjtQ;Lqztz3PBz?dSOIINKaZ9WO~r61aZo{M357#*xFH#$}UWM9B)(q zO{bC%zP0S|WCY*Y29(vpu>n6FzsD=HOHzTX9uvMZ%jEO7!7!tpBw7aAgA{+;^)0+< zR=UQ#mz!BEFuKSstGq&pz8Ll)9L*?*P$5A8bmL4FcJBUCkj!>FdFx_~ zqMS<_g8UCu{WevJA%+hx35;nnec=R=Lhqb@8Gl?-EB6q(U+>+lZtErHWqN*s(@bw% zc(0o;6AJV$ogdKIjRJOAeitjydEY)CevdA=;xWzDD?%}(3|Wf+uI3VB-#P0&{qeM- z5cj*NGASB3vHQF;!Y& zx00yun2^p%f$;5X9#ma2a?&)^moo~4NGp;TFc^-AxIlU+yh62x)zdNTidOL)8>v)q z@1)apl}j`#=`YlLNUw4@=(0ce?!fskb847%agUMaGg3smr0+pjHbJ-q zaXb(11kidq4KdiaE@`}W({1_7tG6v#(6y%H_(iCuCN|d+=zfX2SdV99t2Wa&7MTTK z>8-Z08_Q*d?oxdlhWc{}wcB;HrWcA-`5sz^_MdY@or!)-(~4gx;M-%gf|XwSLC z(;bD87!r95tS=n$YBa|{*r2X?^>YEZjJZj(B|*VZT8T*8W1!~fcd9QiMCEno3Gffe zv;vKxdkCGb2Vnf-(I3$^2oQPDyPfOXPgpbE`7YZc=NVW(ti~e3kYo}N;1D>^<1)Jo zT28-2f2M$WvsjAU^y|VNPrkqa0X~iW2|@gitJ&9G{E?p#Eu_hRSx(I|s z;ATfQ^@v{M*t-5!^Rv8Q_7_<_4j2CHUbMtmSb4%WE+y>{!(jLd)6hTK;j+yR7W+fX zo8iQjV;vhA<^IzQ>zp1(RP2Ac%hrl9qdgT5=w@`p#xriuQFz7PwUueV|82~y8ljMq zv|6!{^KExR43UH|T=;;zug^4bzl`!6{vs1&P7y~YkDsor4i9)6RrN?&6OnsIDIE^I zC$iI&|8Xa|$|MeEWx4)vJ0-!!NilNPUvK5T>x+pIxJlOQBlB>ORHyf&VsfKqCn&&L zafeC0LZ5;4t%)vE{!hbcDlKit(fXo+qSpku8}J*#!rRz>pA%UZie+52S{P-BA{sG} zJn?VF8h^mNaVauZB-lDylhqS4m-}@mth+m>Cak;QthB<>yShbXz0uL<%!9&diA(Pk zSf*yKUUlJ;%0A-{;ED87)u%768E493S#BR{vTL?~6hvL2WshO;?(8}e%DJ-JuwLzIJa^nkKzk};>2@ZetY!0x z_vvybe^H8N(C8a6EMje-+l@WACcK5r=9MnLb32CUlT^Vo>%{sGyYomM@GoZ~?qTXV zJMDV&@<#yB$Hr3pHL>PmzS%@z-$kZS(=AlKK$uMr0lh;Kave3{KitM6=*cGXg`?_* z@5GMdEaEVAq4Gz&N~OqSly<_#f-OrM^#SQSKKaj4AVM%4_2xMLA(}Vo_C)tLuVDBk zQ{6?LZ#@nl*5P8&8eLBCa8Ls8PPMtL2KneS`{L*xa-;dQC;Pkf+?1%e5LQy;{z}a_ z|AkYsfM<%7`SgJ4u$rd8JKO}lms_SXH=K;3cKfx)tNz0q0a^p*fgR>yglV#R+0-`b zO%ZE(@*+}uH3V{9&jZVCc(ZOR|Gupu&pMtF_lIl!&QK8I_86#N!qfYzo@cPdndoXK zs!`vEYq~!VCEn`bxkZC~I!@^;H|V0iUK^)0UH_z6kMt2PP1engdF`*_jHFgnL6QzN zYi$E;do?QExolO)xkngvcm=A^FHj-Owfonpx=|EA=Zm(dQY&epEhB%WM)`(j6B6XB zLVd87AW;?~7X&qN>64B8W>-7AaV;v;OKrhX9NzR-U@#GE0 z6LB4nef!BCGY@!DIrFBez0xoikK3#ltua|;lIJc2?u#!RE{ui)X+_V_rA)T3N&!Ul z%VPyfN~yj0uT-eZEJhM4Od$}nNL}EUJHN~N^L=#i0iL?)k=Ui!NhP+_9*pdyD`Vb8 zJbOmUW!@%uJE;j*T+erT;9mNd;C|nDqj1s@uw7heaX6yC5v*0ai)_u-yXN31xj#*3 zvK0X-#pQHoN+ zw}RH?8>jThOHy}G5N!912SE4`v{;|Gy?%eadwyV4VPu_ykJTG$0mR}c?`Y*kp2VwD zxKV~7pO1btIf(dcipLEUT?+Y>!uo>gkJoq^&Gu1@1l!7`JU=zD-tL^4uSRM~+vB-3bRus+ zFb;A^6dV0%z-*#}oJ0UpV)qgCvsk_qtpAKXi(sQ@7(--uzq5)r(&Y!>MKDSL4wq=I z(Wnd?pBj(6q&ysFt*&GHm|_y=LX+I^KAHV(1FO|Ep%bUsJJ58w34L!`rgnrM>pk@c zUaTGK?#t9x7c3+*550fM2KX}hp0<#SxB0-6I#t9DbSCiMdf}HPEZh7}45hPfQ@a%) zR|F;M&o@FrG@N1SDjhA{etNfiJ?DGBt39%pSctz$m1yT?bZq0<-@6?aDpx+6-RnnQ zlnloWY56mOHa;~c$Ub4?lHN+?sXWv+Bqe)wn6^L>kAE zXyT}0M)fV3`%bREa}D!&F+0-fWWEnBH(JalWrSTZcSizNu1)jNqnS19IQOXf7C51B466y?BP?XfVIUQ!uZR5BH4gb$_QMD{b?%-Vmqth>H5 zkxpTera!-WE)8#El;Ul*L;XGkjDa78{LR+{=(1Gt4XNS-bE?Sm(>mTI_U-6UzuJOJwPShlf% zQVfl`2z)~v4f34XlmLr0oe|Xc+C570 zeby@LJ5DRvojY60$NGS=qVR92_&9k-g;O}#eq)xLeqgUcWK+UC*a>L|l;Uel2_)+$ z#-AD)mIUnHlWSCF7n{WCbMV+3iw#O0Jp~77&0+&ZG#Qtr%*h9ywMJ_ z4}tzF7CG8|lMY#|sAV+B_~47Q!NABZ;XLW20ewVu2D6U4?e|G!ho2@IOzEaR1SZ!w zTO|xwMxfJQfH{}M#qZ<-V#~{!OSG`b0BZ~dx9gxHW=yxQ5Qp=b|Aj0rv^~7`XmWH-}(k9kS9mP*2NDYj=IYylj=^9?~q|)Ovix zGM1umDftsh>_3S0CAhUY{Eh#~c_xRQbsz37r0^x||c$2<`8MP~LKsk0%Ra(%1jaS1JV_6Z1nHM;0ju51;B z4oVD;TH2P(22TV57m?NB4=6^b;Da9OKZ>D;%-hU}I^>nPZt^g2;&h-Wm?@jO(Nan1 z9I@{p*Nt!Q|K_zd2-%^*lBZI!@`?HJ;E&UYE&Bzp3#nl-JigAlqA=L=Bw#Ukr0O}H z1Y<(V7w(!Fwqi?AHz?Nf9@l>Ok_agjq9*2uC;!Evb}ecd#*t#(^dtpIo3Z!Pz61f! zJpIYf^)y_2b^+roe2=lP-I_Btf{yz)u(AaGvsa$*Y3mb7Y(32oQ`CxHv8r?k$Z09=Aty~%6eAo4t9e|?3b zHFr%osV!=G4_TKIys9&*RzveNYwQnv0tp$Rre{f$OKg18!(Wiu0Uqm3XpP@I?5~T8UQm}N^ zD1+4ma6>RVWeoL)Cu~4a^yZR%=lgl*1@1(@C(k=~)HZV~Sh@W477gP;KYjHx>fc~obKIr-%EMDu9r zh*wkDVNQcZaPE-gVvo>c>X1|18VzY?5sl4v5@(KsJtyK>qK{*2#Ihgcy`8+o%Rjj2 zpuK>?$?uvw#4>u)rVQcbWD)5&qKkMQT_TJ^2wgrKQ{udcq)Ov z;o4|nfpqAfKKrGG?p4-NB9n+gz>_QO}Eu)-{g3 zct*9`>kw|!gWTxfa9DCFV^i1R9U}d&`ah^t^ej(hRUO?A*ctWZ4G;$`qb7Rm;22D@ z(Hd0r?=X*io@KW=Pm6e|;`7V$=rVg-HI|Z<;bsm-gP>b!F19}hU$d244%Qk#z*R{6 z2*4yVF5wv|>=J)af8~W=r@*MsDuW8Ma+c(1E#I_NQ*gdyv2yuAXN8j48dCjnymC$y z!k~LgtCdq%W^CIxkv#!-f`X6ia48r0=8@}$20@{YOKkE0n`O^gtb#}^|5Xk5 zac|m#=Lv{hCPApMe5y>HDARDwFY52u(gvqHx z*4ke5u5Jb&=bVJ!d1i=E*c#X&a$0&ouXwhHsTU418*iGltPEIhMzI4PNgHpb%*{sY z0fl<^GH9Cr?w*2V?kHK|Yc?@XC7-e6em&F5FrtnyXJTCgjcnKAad{(T?ygs!h?mh< z*F!2GX(%ldbg!je>8^HvNyMmVb+=&9dS#rI>5GFib0;^I{8_^{R$ z@oE%#k@=tegy2t5v2c=;f2r1mpu6enN9~UZ9fnVSlQI|hS~#jzoo&`bx_+VPn?3wh z{bW%_0rt1m0tJb>x(2A@8y@@>srg2`OZgXb7(0JDx^14eR4m;rah|~0C7@}l+$+kE z1&+FKr{h(jg0b!KZtv}b9XVt0zjW+c^dtpe4jxtL{y+kj6spY|96korf+*=8FS%GUh}6)-*A4G=C$p@xLaC)X31U zW)5AeHBu&lmYK}%>C=LhmAVj%B>s73AZFM~M@YBeD4DSacA5=j?fPa%=Y$@n%c`%A zt!cLr=~w0hBl-=&$pE^4dQuIG^M~(Tv(-=0(z!Fa+Ru7W~gKxVM?3=|Xz) zBGB0+p3A~vjUtN4E{#%k^kiO8+==VU?^uPQu5Q->Zq{35UYnKcX5HDUYUaZt0qUYE zWomAELyL>^STm;QkP~<$@2-c6XvNz|u3LeDs#BQq5pSrkxA_bxu z*bcy(F8|Q;3Ufi({P2-0KZ-ome3s*SeVZzDA=NI{drhFaW8=fyczBleJ^ycZlY|ix zgR>eV=3S4#TuFAZ!ivr?a!N+MZptE^u9){@H0>IS*rjJ9Ye)jx*fx$3HT~fe}g>;Rvyfq_pel~j1N{o zFrv1yV+WzV8adT?zRXJuLx|JI%HCnAv0m!~Kjy>r(U}_me@$^di)o^YLWYgN^4p49 zXoqm#!dJ=+DZDq^`v7D`P^?-lZhb*;a6A+(%v`%Wy0GILc`ee}(H_dNM+oG|d<0Og zp4xiV?DW3J;LO&isAvA@CXLE*`18{y#9H^>M~WTn4Z|GLf`fL99Yba{f8`Zn2x;p! z&3a&J#jw|6Bc9BU-QUc$g&E4)so;Qqx60a;U#!{6C$+aWiCT^#b0C}ySFYesno1*7 zkIT$qzBrGq^gi6Zv=DFh=NUYEabXUnli<1Biu*q15h6|aX4%Mg|7GO34~dSVVs|_@ zM=kpqtq4-FrSFn;?vqAci6pv;bx6q`Rv@)7KjleM{@b51tiy%VhR;%iH=~5+*CR*} z(OP6!kAnps$~a~<&qJH4oBvuiV*)dt13`JlsTUsywc0~C6S6WEug(ikA<~ZQE9dD# z4a~}gMLQ%r5mw`fcdtPd>2c0s@3>0(j?V(&z8Fr>tV@sgs_`aM%fl4PcKXHK>fv!} zYy`gNS%DUjuJ(P_PWVSitOl6R-EOB?;YG>9PykL4ZZE=Oc z%_uG&i>@mVM8}7(WGA}ul*4D`)DSy8uIL@+-xiiiv`3rv_RF1Lj=D?p?x>Wu2>Yj;bM&Q%x``N zuipxJcv-W(6?g*PCcrO*r^126ggf|kt9yUw^1F%b>!#Mwqu8b6pjVrVI`(D{Vy36O^Ys()=<> zvhEf7bjUr-JBzjy6(CT=!bmG^YZ*nH-Y%&KU(TSA=Z%e~4Nu4F5UN!7F7N(uxc@u$ zatFjduxp^p47F>~67iRqERZ8QH5+DQ=o9mSD_;HI10+yU3_so_0gdSP*HNY*>-HR~}gVbDE}Is67rYN{~M&?$F>B_1b?m*#L(5OR93*{cBXPCT?)ADceECH#G*UjaS%2K-%Y#Vw!P?ZjTtGUYe>n%>%fHgN0nd0($>H#vJGOt|D+6q)~o%J-1nm%lc7nme#zl=o6=0tohn$YA9@+ULupGcsv&PA$Wp2`E8GZ~S!?!AHBg zbqh$Cu)oonJCt3#&Vn#VsaIF(Et$-Q)5-?c$mkqTX*t|{?7xmAFr`ohMKhQAtoTee zb}A-ENhaCv7T+~-^ECWo4;#yDeVKk5V>9VLFsNrqJLzq(k-wf}FEzdHw+P&Se&_y}Br`h3$ z_Kh$3=InsQUcGRfSe@&|&Uj2sVM6yrQtjWw|0w5Z87i~LFUDwe@SC(TK67 zb48z}S8Kn?!BEzfbD;no9aye(SjpxF@W}1(xQUj32g0ne{6)5ZiE<8F&&Eq~kj$1# ze&_(B=*ByIV;&n#3e8!mc_W&QNE?Bn&sC$Bj z?Cd4(rzO-OKUO7y>o+{W*g5kjzHgk|*`fJOsJ;WWhlc*iisZQF2pzZK@a&I9ZXMD2AR@c3bgs1M3gDV}*=7_g$$*~od6CIa zBwIOk1kCphVwS8o{^w?oO@yhf^h8P{byA-Hswf=zrP02|s_Ra}KS1uh$6J2xk0NGf zq3hP;xWSuk@w=7C5Me;{)%Dq&Oxk_yxIKy4B5eL?0#q$j6L0a0EU)6K( zn3c0zgvhBm0Sf|8#DA|3Ih|2>&F;|yf1i|OHKWkHZ(g#Mg!%%(j5uu8SpI;G_aDx8 z8~oIql{Qnd9+!J11iF((!6TM1G{61Er?V(-_Cm29xiQ39gj!tZS_h1%hyyU2$N^ai zc{TRt!{k}Za&yba;ygiZsH(u`<;>+c*jQQyO&1r2<~HCC=`>TsN2y_Gqjy&PPteuD zm&%OB`;_^f3nfH4W|~$k76gvHQ~69`hJrEVg^emC$Kz&;5#J8i_ndQB^L}F}bcc4c zV!U1-2`$&xTQ4a;)YWte@j2(HDlG>pe|KBZ4Ad?fO&4oL(d`S{HVd9M-q>YLO5(LF z0bfD!Onvc-4&&((h~{8_L5$OpS%8EvaT^vVQS(zk0ky!1CM}(QS<-B=ii!vA)f2M| zcUQm5gz#&#wH9>kAjU;!s8{UjYJEUAp@4CZ`~1QsnNmH+mC4Lil-4Js0h+N_Muw`!z)HaFpXR2}Zogh*|o{k2OQ9O7lvt zpdXH3uIa0%Je`Vkv_h3-PteB)j^m$F+9t8C6dgIsLE*y4b@J2>O2cjL^ZC9wJ)hj% zSTv6UUuW`gS4MFt5!W+r-T{ldQ$%@IHqNUv%Ox;&9bO`%mUv=lOTAUMr!+GVBl@+x zv1i|iR9;pt5Ed3HXdcmJQLihS5B%{RN4C`K@=Vl?tdIr=lwul_X%Nr`9%pD1HzhOsjt@{EfV)di2ty8&u36Fz7R0yh42 zIzsG5Vcb(8=Vk(oPU?!rClW$841enmva9=16agJvI3bY#kG;1Fsw3*! z27?6+A&@`_9)i0&1P=uF;1b;34-O%?ySv-L9RdV*_u%e&kZF>9Q}cc`7jLTmi@BJp zE^g>LUA^}1z2te;vly_U$hgxiZqD?!<7jPZupfc2LZYeu%TrQJD#_lk3)K3S_L<>Q zvUqw2E0iZ%y!SZLg-|-?#=o}A17`jts)-MQxDW44WE9k3(*j`;0Bue^L7Rp#ED=$k zF{g37)18NuOgyVw8thFFSxsG7*b*vtuV@E&a5Z8U$pQ4DX|qW_?yy2~^oam56aQ-c zSkkevBrcKdRzy|PvD#$M^||vF4?3Y2Pw-!q^Bvv)Vp?(A;~>8{YC20z4eo}epRmm{ ziR@GCCQLuwRI@aV>CqRDi$(7nfFjMe?T9I}YdJiR;o723xUjoMH9zinPi%H`*m?vq zfmbgu+uXK!pR(hRNR&BTQ($NLD3GvntJu1gNtP0=r=FFI6{MNwcAAiAMzwBA%xS`2 z!`DP0(a=tNSN;6KhfYzUzs#^su?7bJau8;l~i0%vksrR;%2-wZT-b0U(W|;SrTno^#m~#)8PZ|47!Gm z&aicNebl!GV*TW+ga{aUyXg_D^3^i;2ae0Vy?yJ{q;@{ACFork)ej#bJ|JcZ=QmVJ z3Q5&|&m5%Ke|QOBm$lreA{FtzyWQ73o91{`7DOL$Sr#35hN19ClX8_Lu38=*;t1O= z7YqY19N){Nv&OJ9o}l=4m(SRS7oJJWdOuu)6z{UscWiRJ4D27(&Bo;Vi_9N2|BW@gbB!`DE| zYE)on*V{4Qm?+V>4LY*%1dAEx>LvS0x}lQOSmhqg(k;(18q3nOVN<=wg?zy%o$bme zY%h>~x!22{1Hruc>H(8OEoCBJ+@irWKEOKiE*Z_ZC1C*?_2MySHHpB zW7DS-1H{8dguleY9r^#n{5KBN37>_$(!ro#&hJg6+?t#$FqUae4aT?kpv?}J4S$CC zHS**8RUe65ikP~qXtxhZR{%i5Q_S9;OG9&rXLB0zoJNOF+W?_C*A?^x#+otp+n$hQBFWfiAf#64YhVWi(r_+tZW0 zpY4rE$P9Z}cPvIS@~-t8o1wKl%g%e-f)Z1dj-7YLLIq6mEP{y4`@1_6GxE-#{$eeU!Ebzhwwev%UDaM>2}5rb)q)0 zP4)JHPXGi1TsHB~B&sGF^CR}30C%d3maG79U4vMSN-c%0MeW5t+1|9}#MO%7&-9kr zyAx={>sQ*XyzvFdT!oLr^I5qRKsRFBjD$PGjh&jTgE9c6r*p}Fomef zkT)Xo%WZQA(b;EcSFZozszK8KldIN0&+%yma^xt!MmpVBW1hCBf`rNaQHtw8f#V{Y zv5(hMXSQMu`AZ&Y8T~3;07Nk0JWeD|E7RlbC6E!PN@`2L;+>H{eWf&4YT}U6bUBwa zH{bgBP!z^uq7X$9RXH%>$1@DrVg9!rYU>$W7M|>|usWdf*AA&bAya;%)YZS|J?G93 zjBK-6`SobVoQ-K<#%UPe5+#*VaqObUW68Zjft;)e53@ulwF3Diiu5PKZ_gl-{B}-Z zPv-PSEJuB@Syj8#)jg%ew(tQC=}wx8^cj4zw5x8V0=brjWmkt=6tG$T=J1i9Ttc04 zJmo{R?XmKUyf-Ov9Xa$q6`)<;7M1DGz+`=0y7N07=6n2rk|}iZi_zfx-o=|LgKP=d zq%&Cv`xG7UuHwAjNYg_pe4la(v3^XhDYLey1G#&apS(}%1`qnr*Iv!5^8>Hdx|7ae z*?W`Eg0W$qc5C>@!&goQtLXKQ4-u8_J)yo4hgHM8W2?D18?mJ+=g_JGVNLwd_|W@o z&Ys6)zxOx4r@aw+`Q`gELH{K(d3;yK*tHJ7u*eAF!>GO(svo6Kg=oM6e-9rSRogXu zxMUk2n|IcAPxT2gVk)-@BCpzIXK76*oqDsy+d#AXb&Q*xJgq0tS8g*}PEO=tu1N8W zn`3>&2e12QI`DJA7u=I86-C!_EmzRMqrhozmNXreMDGr~HoAe#qNj6%^DuR!W$V3^ zlmb`jF!z~VPDq*E2yv4rG&Wf|n_CigzyR#}TQ6Yt(P#f6NocFQH0wn5x#LXSpdct< zA%2^}0?qRah8f89YVqZH(@0*LJFO_*mruipDTSN)aDP&n?dNSs(z*xaPI^e@zRJchjU)iy!Kg)y4?`UKL%8$*kS*T- zFS3Pm(v!*l;Ubj$`%?xEW1wVcDP!V+*=OocrT%p@3vABcuk+`n{+qx5!wn1hU)-<_ z^QTkrA1*KEzg+jkSuuz;5_?Vvy&;U%6Wcwis$PHlk7xMr|Nfz-?63*^CzcUc3mobH zU@TAAu@8S>5n}_ggC2^NXx5J87j3_ZE<>BK8Go1mzp>Qsd;WUWWrY6>GS!zz|H)WT z=%1qgSUx);1x%3VDL;o`%lhoM7vM-z^Z(7;`|pkQ_Z1UB#Q*>1`fsfE|MAWBNdYQv zvC**~;44I0w%ka8Zf^`ZPJa>u9Lmrb3~BQBK*_fC?rfN^(t3Q4lk4_2Q26|YS1#Q7M(i0)vQH9F~)DK{GmIb)zJ5`6I7R+XVG40EllW zMwMlFddQQv&qt3zYL>QI`=|Rs%S+str<# zzcflM_oR8lA@@Z9tydn8XJu}!wt06K(^{2}AV@!pvKC30R7&o>Y#W6?)jh`Vk8Nk` z8`0DG|1THpD=U(%Rm$Xa@Md>2(Cwf}YdZ5$JdPF{!$hD7&FXf2eE92BvY{R06BaX3 zr1n{g%C&Ip8XVU0n1^nm3fX;r;nDmtYCNpb!MOFc)!5sGJL{)&a*b3O#qVE@^6HOK z#G;258c?C~zHjvR-T;s=B0JGTU{oGuMoyZY-0o!a$03frV3o>Xe=>EIrgaYIy%a~i zEAo}sf)NjazxTzt?GLnnI#+?4|HIG2CVx-0%Ve%UJ!OZ=JK$;w$Ce%*6(iRu%w}#V z)={@HgCdJBNMf}2-@ntzyA*`VV^`R@|KlGtEv}jWZ3)7Gu={t%1L9{(spSCAaVU6P z!W1rWAcmF8&PGh7@!1~dZP7a|QW7^}$(sMs0{D&<^!Yo@iZ2ep{u9^KXqANk-4eKk z=b%5q2_2hT;=h{zZ`Hv!hb`|&unn z9>j3Sn9;O&^9G;&mD>rkmq7MpRwviG^7~mA$;V><`YiH4JS{TuMB6dx_fUBh z@_GG^%N}$AcrM?-mh+ry=sT_BLRxH1sJxdoZscdFUyAR6$OTo=h3|I=lpTk|EBQAK z{ck+M(>-V+f6lj#v83@U3`BMsjZ5#Q`$JdG+f7NFelfCCw7A!0p5uV3=GTqg>baM2 z$-iWBt~!6m5H9trd5?$oN10ici$)?vONUkB5u6{j3L#(fo{L^-3XX4&cFslVyE{EM?t? zPv_+O|5N8Afa;)W&w+1VI9uOPabm;cT6s>CXvm94laoFbf$LiL^3vAtuWEmu#wqtg z!MzM$_IT8Q+a8^%x3B;4knnu@XCs}6>6U6Dp<8+H)WZeE_5yA-I0tbq zIh3cS9Pkdyr;At1dwYTby`xyo^?AwpsT-IySO%17{K@`L8JiwBYcqWy z006hKc*PrT*$R;Y0kubtN*N-K!#cfemC>m6HW=i-Fup^e03p>x|0nQ_5cL=P$8YDD z;E#!K==2jf8eO;17z89XzeUH}+MeeQx!%towZ!$xr;aQYDio4$P3IR5Utx3FZMQB3 z3|h{#1?{5WuuKoWT1~LUb-mcZV)@R!GyMCrO7;CA^i*E>@2@s|p}$c|2&+UJ;?5Zo z(~X9YLmsf;yZaMGwqDU(TN3V4P-G!Q7kt0VNOHr7X&?S65`seiCh+GWnb{m<_ppfU z_rM&>N~fJOo3TLQY}H9|YoJvlHJ#jeY5}g&evfQU`YbmjXIqSH*A|JrOXu@Lz;9o> zZreuouf8^t=uaBco6SRZM+)rak+G~W$*2^*yYws?$IAp8{*rfq=YhJPM_|7{*-ER_ z?J}7?pToIzSB3X2iIDu9xr%&@*aG5FfK5;L3L!ON276ltt73Fz~e6Ch~OzK+&=ZU*=G>#83VY! zZ@*7S{W-B?k$Hvxao3^6{Q10uBps!0kO6oySqo?ZZK`wgGh03p)<&12{UShHe=WeV z;tBKX4mih9MNj|uhYx8my!`+8v~ZL^dt8k4fMbTn>^=_l|z=|7$Es_}&YM z)D@RNdm1;ZJp>zdKuR$#O+^J%spx)Zs*Ux`aRd;GnO5v86v}8eERIoJ zawSB)tTBs#@;MB3{d!%V@zfW#R=SZx!1s6>1U6qLiOnulq!gPTV(S8JT}5qaI*D;Q zG$=mYY!RfyK;0jO16^?`%T+NBx6{R6$UGkTq7-1Ll+J}XcCs<_!aT|*P+HE0Ah(VQ zawHq^dSQW!Ylw*JZ=ivSf7nxgK(-aH7aGZ!^LZ;K`zBYf2TSWGK;SAoO0n-YwwYNC zxVc>uM*nFHy-hcm3JFg3&}6AM-F`M(`_U^o)TRve@xXCO-n}p5w12o&{7920yW3gu z3@*$NlQ!bj%g_h_2qJVu-Jv5(aS~|xYwdit$%ekDWD+eL%Ki(1<1#*LgFHu* zV^=EM7wYsm;(i8f4d}GMOxdJ&l`@)+3=SDT{3LmE+J&{Y9cTd`wwXV0f9mrL4JnB5 zZ)vuSnkp$BJXrrq-BMmr+RrGpiS0V*O#AC@p$rLPaEiOnj1C5Mk^&>m`8L7@QDbd~ zc$q@NueVb_YFko5Tst5(F#&h#T8+Blah<)NFYHwCBH}ieF3(TKQd3)-?j$#R!1aIh z3b5t=6E+BGbl=^1vtHIZhGn(;yEF4L?f5ew34VK$Q8|0PSi9V%OQ-dKJL|aYuGbpH zV?173)dY4E-mkd7oRqg@LSbT}B!=Y=LHd)AWQHzz50^6^*CI&Pz4@3DKnO-bz~zM@@&qR^;q_8dC?RkL0Cf$ z1c7U4j*s`X3I$4{^NOM)fDi`}%~Clhg+)eKrUmv3_;SNkAyY73f@$idsl~I=3*N%BwjyeinBb(E04w-_or5FUDgZ zXen&ZREg+69T#GdRO%Hw)37DR#{^*pT%eyS4(O|T4N2MW4G|~Qt0!6U-Ev)8lANnH zj!}>SSdEPe#d7ji;KfRkv+RjplKs!lyLSNupy<<>p|G3+9Uo-l|K&-)flMz_^SuD@je zw04#vXDK7f0{Abk#1hh^i0LoszZ%8LoB>I0??xQUx#SQ~_x|b3LKh7LjDpKrVC~ps z7xASj&}LGEo88G0MC2#XFuMI(Fdjuf|E68TS|8bYZLro}j%WdvDDQ4R@|6|i4Yfjk zc<@`0W6NlR{@5Tf;`#?D+k zwb0$kN>-G*?^I<07~r>l;6}=|n}nP0kg{H8MS~Sp4UZD^OY)-O8!~%n=Mt?q_WPc= z0q#r%^1ZIoo0}cZ6RfmHvNmy<67<@$j#m$-YM*4c)Xr4hQ|AWCQc?Rq%Ru6% zHmjCCj?-QPUWX*e94I$tVut6S_J-5td%GKKG( zdQXHyW7fjBXdUSCeelns1EdePc|IBpCFaT3Ip2SG(?=>+O7X8WpWjkldFBmo7l;Vt zZ2(p_3qjD%V<2@|DYcvxxV@4rHiw@>12`F^TZ(0rlZA>)5bX_{P+8@{l-iHl;&0!* z3R4hZx@mvngr81~+bS-v3K{2vy#<&|Kn^vY@-IkWXR60N5~(7}S|68ajy#StFn588 zG*xc5vxc0FPkG0HGS~IzI9?7u!!%Ep{z12p~(j>RXAxitg z=+bUScSC8P6Uczq@>>uF-Xiq-1*EZaJNm)Kg) z*bU(j{1aN*kj9M)0&tL>686c?@66|oFRg}URM+RU5IFdYdYP;wEp-b7cCW6isO~=C zGZ5{TYdp-U5Pkt6LSmm~X0&~%MPGLYW^HE!kV@s_1H_Q@)eX6lgt)E^IxRL0o(|r5 zQz#)u)Q8e|p@X`{?{s_tKJCbo6B}D>Kf#S3!Uu#(`kk;b5!bPx0ei-Ok&lPBbd{fu z4|f7oXL^hC@Q3NX=yQq@iXzsbKYWY)dAE=k7TLORZp**;dA(}5Mr<_;xqmKZn5-I# znr%D*$J#H%UL)q@ZUDe4>SfWHa6}pP0?I8(t2nH2TrFsmt3oh(VPRt3YnLTJNFiq> zH#_DeQl=JD%15%#UeJ$B zwNaeiOM^oM8^Do2_0GYYS@z)8Ixf0Mg&4Dk#H99K@TEaQ(df}u5Z$`k1t^s3;Vtl2 z(pb*+>ack60CK4I(G-SKh5j0^2Yt=fyS(P;S|Gjh?8p0_yc92`%s4wjPN>b=hk=CF%^TH?Xw4@^g+z`PZ5I=Z}|3mii0CQTmWA zuIv)V9F>+k4&#k!2t8b~P=0%_8s%m3@@uO(+a(g)NM5h6q$cBGAw=OHABe+)Juz$o zTC}1JrhB=LTFRJk^l`%<^Xa=jgadFja2Dzg{pc`^w%ZWpQ^&nND)FNcsed}`IN*{W zsOVgGzx-ZP18LppB;+l#HS!fOnLF1F5#3F_#?VVQTdI)yFVX=U#{czdri6Nq&mT3B zz2sq5(sbI{sJjkT5+GEJo-M=YY3K_iOGa}XKI%E6qmBE*wAI@;X}S(bm4rEa5u(DH zHQ4VJ!|F(V*U+l42>~|{EqxH$&$XAkoM5axPDq%FIUWa!5uf_M$HjFgtcbs!8?0TR z1?n$n&3NE)FX3>q)MO$IQfWm2q2=z>0FE$uX$lgWQVU9DjoDAgb3ZId+1nejxh%Y? zH7?zf!vtUQ2$@hyDgb=*OGS#uCn8KT(pFb0)Fao?KHVCA>IZ3V20_rx#q><`lg3jK zcI(Baluab4H{ zUU510Mfape=X_3!3*%ifq1_|yyBbWH{~SdcqQ49c_v_FZ?nn3(z_5>o1cXJxA!AXE zlYB8)IhHE3SQ2WmKR{!0-@M|%15{GvY7gPYYOl;~e!L3k>Cy+C<5=ZMB&8}2h4CD| zb>7IynH|%=%eAos$mMsf$743L;1iY`)tD0?AST~Rb*+g88lAFXiHSyPzV6tn9`CD> zpg4^Ml6|7kzsymM5m$Xi6XW&^TVjMpJ^JE6S8lE@6mRaQP5@r62Y!|VwHD}$pe@z> zt9LlDJXe{NTWa8u*$8pMQi&Zo%Cs$CB(^y({*)5{haf&`x=$T@UMq=aS)of-z$W7p@Q&s|1~w~RaPG}5x4X~3v|2}*Ew>1Iu#qLp2n}&SP)fw;K7GRQ z8uT!Tpw+r;bkXmkbbj#L5dU_G`~+cdw7M5=zk96|r3t27 zOwAwlC0Vu-189ygD%ZIT$ExpIPAX%+cC9{W*k8ldY?m3^%}{ABkB{L?tMr7I)1zh6 zO>C)Fn4E#?=M;4TTBQrN)7`Dj0e6uWh{qXtT4P+j16IZD`+U+hAr@SL8z$3B7lkD}O2L#2EYsslWohiLj!Zx0d<2;J`TO$1l>T?{+B2P^M3 zVov>TQGwm3GI+6II}|J67g|p6l3O&TRVAT&{8$v%ZmY~lP6#}+jW%3%*3 z{l~NSG%Q0!yBGq$J{L3*_!?{#ZQe{yaM9pl z1!^T?OuAMX3`eDy$V1=Km#AsrV1?epY?=~*Un)zTl*qIsyK^1hjZ#Q&w`dSeD)Vbi z*4gi|_U#o~c$+Sri}UkM4BrD#-RToY++5Kz^TGFKA49QOYcqOGPRkvB=^<9@ZC$LC zdpNlS#HAA8zF+dQD7^!Om{1M0A2n|w)t%sBjSI^WEf4-`XW+z;nC9Qs(py8=wJ?fA zvgwT`v1ISP)My%BV_RCS8g9*=8-8iWhm<=mi)k{EU)t)}Y6x^i7rRQ}@{3yDa2;N+ zDcsvNB->BzM0AIfnnnBM6t>JSYLEb~^CgL9|rIKjIoA94mHZoQW|BhsvfV;q_Mfn9XnX*Cu?Z2SHl$gY$@Gcl2WvI_BK+7Tu;|RAJ(`h z1G@YHrr5BOd)8^;5>Jjq!KXEk?BOo9?ZGWzZ03iw{03Os(YI?qFoHe-iZxAPyUROn z>c*T!{f^$o2nPUte2Im!Ma3()0)`p6w)m)} z;_k|sc%$yhH z8t2MmJ6hM1aruo~gIXWrtFAZaEpv2lY~gI%tqKNe=iHhvSUQ-^6L_{(bZL>!d{mG- z)Ys!oaJ}k3kLUfkOV?V8NLA1%bcg$4$Yi7OTw`inb~bx1;MkPlfL-#-$sX~|g=_hy zzcW>F6)0;;vrw^!T9Hb*ucp+ux(p6E_D45jc`0kzQ<#2I}RQOp4DD zyaOw-IUu+_XOmV8dQV>v6Wl0kY^8db)sCoRw>zU`(VSzeZ56*=ELY-?{4DJL<9iHy zl~2uwx0k9LjR(uFq1=nE!)RKoAJ!ZPW)M%?e6j|mmx(dITPS+#9ZW(hQj|Z!Bdaz} zd@WM`*#pXz$q+bQU&EMKH@(wMI$`?N&cJAK_aojatp1SaeJKNRKo4f!pnI7o1||mI zomCO02m`38Cz9lQzx;UK&j=HpX$JseYKMXRWT8o%(*zm?t8L_alGc0=esSMDhMg5E zM~D@w&jnd%g5uk50k(ULe?ba}A7Sj~dO~!)3rj14-{(k5#Uebcmy(w6EMgjLM!YSA zQ)|*yrP>&KlX5r;XjS?=zsT-lVMBneZ$f?7hyNB7hG}%ZROO-f%ys$xC52aMUbPY| z1b>JKZR#La%BBQ;=9BOq_H$)rEu2S5r!`SqCB0K{Vwn%}4h<%97$w z!g4L#=~~jp&Kk&NSof}gkm=WvaOw;klz6sMk1od!9GGQsMbo__C;{VstpW7!l`vgj zJ3PGv^`cfUd6MJEHPvs=4~eiQ#HqE%^G_yKe|9!eHKGusAQ)PxwnFE*Kv8@T3 zgLWnd2=x*kx^guQNUOk*6oCLVl?%@3UVzO<>4JC zA)wpU6@-A5p-VK`08JPD)2y(){w&eOG%MLY?f@5DfgG#OL%jDutJA=O_-@@zHCwiF znpfq!%NO3&OO8g6s^Rhb&88F2qwtKY9X|vv{A9uMlLyElipJN4d{}mP z)D|jI-0c_eQOE%CQ})7x;fUsbK6N4f`lk_x6e2W>Dabw9)B3T6XX7^ZWSQ7k79W>` z)d000hn-osM!{PaWdAW}&*C-0o97(aT@QySTnLzDZX}=SC%7$}{$vtG>a$AE77NzG?WVf;W_8u*SA; zbZW4Rc8=GuJT+gd^$cpN@#tMXEd4MF3Tw)hkLQ7n^{|FdcCX?i;;Ypbwn6?2&0$R# zSOF<3pnN4h{S;L>ON1qXv9~p(!Vg5aV0X0WbIx>)q3?y&f%Vkbr-SWlwW=Wq zL0+;B|6I$9fHc2Qb@t96bBNm~gfKsE)xyLhYhj9KZ(*TlRPpioGXC6pU=)G$%oR1z zs*KHY+F~YPL&^jAq%(;^cdWA}jv*(L*xJRIZB)ih4lx{mO}lcGG|SV@J5Ah2Iw5ML^Gh>|x}>Sw(8iADE$3;5w#gpq4bZV(Qd_= zsG!{pcs3xM@DlFv5nsmge3wTC5!xgE;Fa0JJKlX-ns@@?Hj3JL_NW!RJzfl{T*y%Y z_>A;m2lpzw(sG)N_V&GOUBhl!_I&H{R9p&YwY;jMTKAy5Mm{TfC=Qc=Ayv$cQc{@~ zUmIB8V5q%IePZCt+1Hulfb-`ZN07F7m%GlFJ0YqDsLZn*x?BTx5F~_tgY`m`hT^<~ zyXPr+^8v>-VS$@2cekG|tG21)C~Pa4Eo?MuPMB{!VQv>QramX`6t8d$TzxDb-YYe8 zt4G=P2sm;Y4;{8_T5nJnjJF?{33Pct(xWfybo+I)r5qP9LAoh8=@NJ#cVl788&$H!I1mJ)8;Y0-Jvca0b(A|)W7v$J`a0aJO? zwqK6&P6opl?U^uenLOZbYyXh8LVRH7uD8Q1=?q z9=$F!J-4_J8lIgV)gDgZ8FaK}o^|wrlcSD)xAlgUak?{fB%NLv(S4?a*NIfzumcqb znja0foCTbnNJ{~HfSj+B?CMk z*q;MgGtG_K4+Cc2TqBgW{Uwe@{NVf`bJ78FQP+qGOD|&Cw+?w|!Kw0zu))WKr%gb; zp}@@peNQ55KN$P$2_^4m`GM)@7Yh+Lt;Zi=$a- zy7OPpF^|gU^F1kXMxfzsBdnat?r)b$eOXGUTAMxgp%h`NlVC31+r_?h@- z7*N>yA(D9nk8xM)&?;!_$(n*aPHKqp{>KWaExi$Z?1!py&moau>UWt|7So2q%$Lxv zl7ehv5|!5*EA4BF%-(Lc8%}gZ!36Mv+7uAGkP?AHNun$Ls*!qCu5zD-I7Zq*dcxjV zf0Zv+2A=-@HvInl?2zenvqBsnxXTD0gQnQPb)>cj;1%ZJSPRD5hvyu1!IJ;=B_+=K<#P5XZu|N3*XG<1c zdv;l_edw2Y3-@){QmxURrO|_;%Pt8T4}H^r{*+z2;Oed-mseRi%p_eg^Ag;bXaKq& z8Hm0}lxL-O9qJny++MGhFc9hSK_SPpI-L#H#;hDGpJp*CnkH*_px};hNkO7@w%B@g zJ(TnF9+bSjMAA{Y0nmxQTKm$vn0iIc^H}Bmfz!<3L`&zOw}w-9y96z>NP6n6LPCV! zv)8xpQ=5c>{62qz@>n@8Vzz*?U+HM+fSkI!BK zxZS}ia7g}@l{f2;SaB<;AxrdG!1)}=0D~@c-D!RVz$qYf94_70x@ChqJ~CnAUJn7f zH>GLkaC>1lH4L-(O0mZ(obFguswF9C=E>G7+yR??TA&9-(@G(pX92g)cXa`7EZ^%( zT_sdILLI4A`vV`#Dqum9a~=;jSc-h+LDh)cf}zUhjanhsMR(La9b5x*wWFg>e4awcw`-7 z)imIFdtaXi=|FUoWa|~L2C2?uMBMJ9HHDPTNy@onKy%2wv1!G7u`HSAte5>oW$wYr zp7L&6F&{rjo`Q*Xm-)j>91TDf3%%%UbUEN72KaSzUbm*v(dx+a^cS>%gxNh;0^cRq zWo*%c8H_wO*UV9O*Bx5jEcY?jBx9IiCoVWahhatq?--~5Jt%k-9<@nrdR4@#9mj!m zjy=0wcrn20RN#>(#sS}0dip?2fB6A4b`T`Ax?9-#E7L<~Ixe2L%MEdtoc+8DRe9}& z(OUu*oA{R4KDVO5X*Dfkneo_+R4WE7ebW~r*E2JL95e%hx|+z&-To6h;VSHCpkloA*CWNr1Y zFIPq^T@}n#0kI7CjawYf?FXdpq#itv;KOBpwFYZ8aEsLK`K}JAJJi9A-`HNZlmTo0 zc#Kq4kOtk;S#ZD>Kd-xouh3OwkLFN^_;q*BrJEw(Q*yhU`^go6}yB>Myy~d#qo_`3BXLM0~>43RP)L~TP7Tq@q`JI^o zDe%;|Y$kSrQ!0LNi21ylnG-=BLSEJ*&rYtRMF*ta&t2Kjz8g<$%hA9Bw3j?p$$|uS zwwJ?6OoG;wMINN+qyq4nQU{4PrHSb`I$L3?$Ue$vm`g8~BCKZuzlS0`KkDY!x2-BF zl`(LUir`Zu!^!P4;O60CMTGTOg6kDB7Fg15DM@CugsGAT8e%7rf05GSHbd*{Kuguy z?NK044C$|xqsi7<(Bv@eF{doOj0x|UAj>t?dB0}3lgZIA{%tvM45_^&DCT*VUMR0> zs=NVPs_bUdz*OK6@Meek| zQ9mG6{L~_kr%#=&X}_^ylb!g%Pp}~)qSsCD+JMzk1)FWwc+3rgcdvh$d~-{-8|iSM zYWk)~MWs6wCi;;`@F+yG3Emi&QRg`xgEbU|OWmM}>=iu=yTm$uf^!o3fLCXxYuKSi z^ojrrk8$8>*saOkwHU*AOdzbdS%&m}yeFY|$p##T7#^4XIG2DdU-Nl~fL0V4t@&p* z%TZOj8WPoo=a5sASGYd5(eoD+NV~Wumx@d?g{M#V|PH;mhqW7kL6MB9+DAD2Uq)<)#VTWCn3C| zTJk}rZv@`haW*}QmPfIm#hd*g57 z_~&-toMH7aQwtnx_x_rl_3he+V!4HBrPN6*?b|o;38J0Ig_m_WU<~G!?d(tNcKE*5 zSf69s!+tSt6gIld)B)b+OD9;hSCQxn)nhMv2A(ea9~b<1@<3!XonzamlHwQ0I)V7I zsNvO3yJM@%##xJjNWcq%f_tJDBk2yq8Zc`E=DKm*nCK&xu*wMc?amO4TU}p1QcE=Q z&JTTn&|)b|S^8tZvK9GDdM;M6gg5>^BVD$iw+zQ|cXsb*Hr^Y(rx0%r;Cy|-3M*2m zP(Us%m6Cs^{pgi$K3?>Qk=ey#wB;F#TE@hcARA|s}RHtT@ zXXz~)dCR zrUFGeYQ|K)4OFm`J`G&?GkQPNDrbf4HS}yoZsWSkHwm&&U^~9V%a?RYvO$`B1X>!# zb9+|EB65?|T+gR+DOcbPU;|Kg^WZd&aZsbiVPa~OnOOZrstV-04Hv-=bN0f~tUn$^ zh<*fLBCmy*C{p$?E6$mR7S#-ePOGsIa+UTmb$p-8tDU`>tU~CYbgeuKb2znR+4PVx zPtPxw5X%aC836LQ!f)0!XM;^*XgT4;T25yh5Ngc|Wl(qjlO1B@%(tYf9aP?j-uKm| z81I4yYROEL~T|IT6IkU&b13(P&+QK|E_FivPaEW1&= zCu^|K5SRyaFMhC60annVcCIza%HzJw9ztd2 zdWjwFH15kbyy+-Y-zS6 zz2Poqpm2!LL-d(OyXy-fr@D_kCR_yPp;67XWYO-8ykfO<=zeKI58sO({4{`8N+|h= zw|lh>52_r3C1L4w23H;jcf#gz<}IN6XXCN;@j;a|DcTaKFJ?#ZEvaz`J=O>kikVj> zz9rF#DYo@0YPB0gTVUI1NY5W}kJcCQ`Y!U`hVV-~RVinOb%XEv=~~H=u^o|phesWK z!Y{Jq@v+Q)dX3xudc33RDP&-~OR;vDav|*F+T_hRz_GQDPjhvcbz^ySkW;@rxb_~~ z%~Y>d#<5KGu&xg!fR+GiMFdG&UBbatu2=ZktP`!-_jXFQ1LeD`TbZER`h?bn2aEf< zV^x>G#Kq=?>_=v}0%Ugg_N?`_)44v@nuNo;d!67%Ux zBNOeCAAWp2V> zhY-_|<34u9UV02~;g40}mUkhz_vPH#1AK1{O|#q5YXM@^W-PB}F{UDc%kk1oW@>$) zn1h;A5&yjlw7e(~$YW&Zg!nvOn zI5vrQ)k?X@%4MKQ?m|pIuAhS@Iy3BQ2vF0&z>gtCXSO-2V^mEy2YxfTK3}inGC)oA zZUR|RrQG}MGZi!ON0m+okO8a??eQf(+x!w3Ny?5kgzIXdYSN;$VAHS^7tqWi%9zQ4 z4^ajUDRuJg?exSD_IQdh_p&k=VNV$^L?tJ|^D%favgg zqW)NpXV#C|*|9!pMV8l6?%k)6FaBp3k@*fF9-4BwCKU$fhuItJjd@=f9gKV_UQveK zT;I1Ny;lwSc#cQ5pjI;S5ZJeN9sI7(C4}aN9`Fef5TTF|Bm}I+_DW4$Qrd*DF`rGh z-d2{5holuH7PuxYkMLP4Ez`hq-r?@Po}jT>Io}_1uHbJxv3%}v9a8D5fg4cBexJ-! zu?mD@>+W&1JYXvfU(#IZr09`k&qd63b*$&9ngL>doLHKqeyDuvN!`qaUd<2QAlG@R zCJ%(51V07Hmqzz?AqX&1CJ-aHVwuJ_mau7kt6bXxGp54YQ!p@{nTWd zX}HZSR%7c%5~=ZSbHCEti|2GpS;@>cB+tw)u7#eeWqU|j+qHUo>mZ*e$mi;@@FZ)MG#>R8&NIAl0 zo|Y*FNuQILb-)va*vs(knV|cicRNRD&|yHM_N(bD24~@9W%XHsqhE~EV{ZU0l#ej) zV8MWRh%k+!z_!&-GG83Ct-qcJ(K=w#^;qw=F}}&~i!3;yhRJ{;qZ$hJ-P9a;`wvZV z*YpFB5{A(RRcaZQfKCV6fVrWxstcI|pQ7rZ1i^e*3Zh}ANq|TQewm;B^UJ+A7!7Ui zaTi{JbldTOAm?pxt|jLl#bl3c2UU0d?y~m4MD1frrO~Jy-nqOulWz${=Z^ql@Ja_R zQ*zoDy)*MQP;_Oq#WGyfC^`IynHK}{t`5S|O?{GOJr61MAxi+lBkdl3>vi*VIS!`# z$6!1eE$O#>jB0rPhJ@-4H*W?n-oA5oTdjw)j_Mkbn%?y<1`W%KpDjiDRk{1|T>J_` zH)44Qbw6j(So&=YfX2vwr53_!W5nRCcPXkF_Ux75sK**+bC{$p8Eh#JH@HbVJXCZ( z^!AW-A%ck(t@6p9l&tBPKYD^n$A0bG?|^)kj=e#m*m`w$xd)EuIo$9$iwpfF4zr-1 zB}R)e^o_TE_5C$yN%h@f9txJJ-c!^Q)gX&v^mshp_okqiXN(|Kb|zqUOwK@E;rPDr zl@NDZg?S_GDNq`wD4ArrP>*&j9SW;wt=Dva zPATtXTg_%Bj>|;(Ieh`LAa8O;3VRlEhSP6Q7-FknPJ5pR24Czi_dB+9B-N51Fs4=jvU4!|49c9gvib7#v5a$Xh`5o7RQaaY zE?Z29FT7_;6yTE8FW#7m?8H7LVkj~{1OFd>BFVKXt*c?@1IyzE_Po|EPkDiyq*C(=8d59+Ji zRBhX7uC0OvhMwpw1P#*#Cq=VPGccGZNw*LE0=m3_fKz<9I_AixpqTI7BCDI!Vj_ z>B<%Fmk*ks^^)H?pvGlKE5%XbCQ&7qi8qCSlX9G;co##Z96c`0#lnhknyZLxQ>;;% zvniQq^}`!s20K7_{z*OQV1{Mdn%Y&QE8d@que)YBncWc>5D>@E$~<@U!bjYQup(u& z*bi>TFmABl0Ds^zNSI5n1ASK^Ly6@LmVM;c+u#Hj*;m#d@#;&g+i>WfkD_IET6_ak zmy97>HEHtFW$8z&kQ6?)M$hn*pheG-;_#os2X$mFOD(91-S#musL5&H=Uk6)h{7b2 zGyOs=4qc+`_EK3WfQr&*>yCW#vzS87^wzR|T4OZB%_AQgwP=x6>eX6+0WDunAH+%= z!GwHPca#^{jq+ecu&IQe5Yln~sCUG<0xDd(mNzyfr}+9t{Ob`9BSz#K0fPj;d^3Sr zSA8nRzS!5;f%~K2+D~d{Ww&WZ&Rl$3%||J%u1h&vCjh;)@oM0R2jVt-wvCL#*>1de z8#3mLG4~nVOUTx4&>WTjLN7Udw!zsPw*5{=qx)2iiNX|hDs zY7s9ql}rhe*~<5XTYP2eD$32LL{l3RWsK9i->Bs!B({F!8 zF38RSTcP%v^~=_ICA9qz;rSe*m8ZXmw*LtN>xzAQZ^`!U-V71RqYl)toYe52eWTxr`YU4o71r@-MQ@L{A1_+k3^Al&lM=ZtqP5)JV#G z-W?yL6^9w6%3D~yfViLAtX7AwPaYESSOWO>VEzi|G<(!~pwpYWMmnBG;Rh|XKY!J1 zBb6Rz*UHPxq{gcG{4p$=668kC9aOSaEl|a0iS*c3U@j~%*t%QJ%Ak^`T*wr{MBXmO zrtusjuk9n&_!P(`B$E8W?%5q49kmYW*8R$8K2)cXc%6I>_L_YO?6Xx(&qt;Yw zg*^6ElqO$%(jhX#A&=E@_y~0BAVc5?y;70GK=PtXzJ#;5nRk?O0>f}<6+ zy_f}=&X%{u24x21<2v2X1^%*Oo#_CHIOj&ooK$`;%Z&H>h}ob4S0x&)?ZU;zW_Ch2 znh~_d`eG!u3<&!h%b6NFiqPfz%QGYxjA^Qj{k;RRnTl@&5}l#S5}|bGm*}QW?8iR! zO#~<7bi}xLT8-A8yjw_BnAPqODU*@>dI?Tu5&^x;D8l8i$znR@PW(8jOcM6?s=x~G zP}z9Kw^|2aj8G>F8nN4(98N1%BPQ2u@ev3GFwWT4gkxu(+DgTJkub_CX_x>U2op!5h- zw0vXW#^U2iks457HLy;3D-*x+n#Te0*SG>d)0@x&33wv*9Z~_b*2QO%(pX@~$Nn(% z{LuhT_ir&q&?SV#{x2LBp2_Q|l;t8y(WutuT|Anov=d_C5f5zI?Vk&LFfl3y04NP@ z{2aU4LL1M3Efa>W?+K;tVtf~Y=8OE0bIxNOYp=ET-hpxK^N4?LpFhkNr!A|l^XZja z18Zq?v&Hg@sPNxppZ3qoG3raCtyL}MTtG2DA~y1*OJU_pZdaXS=A(-Q9EI4F23 zqdhkgLq?P1sXLM4Qw=cbOl%NwYCJ=TQl4{+l6qwRN;mQfOm{MWRn5qTy#GNbvc~im zQys^?Rq5Md_f9nu2?rK8Zs^Gq#?n&|5;q>>?CZD&lZhElndBzAawx0CbeoC5p}Mss@7<{v(P&$Aa7@Ar&#?yDZ1do>T7?1 zt=7B0t7hSv=4cpMH1y}rmPU%@UxmO3PpH6^TfRT4sN@5|;anmF$*c$+$^qH6NzdqLbgzA62lJNRDH(^bK zw8=i5Sc;^Ab^Zq2+7*Ty$4D^lt(06!EP`_=sDRI(^rC) z^LO@YV3Cmc4e|ttEBNMQS(?JHmb0bjzx)5k}l1itYxqWGdXNC_yQVKrX&poJL z{TCIzE;+HVgc9~u1QqXFE09L{GFj)Z_ni2aOQCS5-vmtqIRX(Qqac<$!JqY6tY|o9 z#PS6XmitI%yG!D{b=M0q#}A8=5~bq&GRQ0I?0yfnKd+Pubd|5NwMJ*uWpF-aTs}5i z`H0@TZRxVB`Q7dJsOAE|{kZ`J#Q0BNi`-wS9R|UF;^HUplriP(Q1y4D-!O5Yxr|(iDWB zf8tw2TbM0(()6q1)|$R|zXzq0^%pf`)7r2?rx^g(6@4oaQTl8=VO z?k&d7zBvs^!Kgch4LiNV+fQ3vO6`%3uD-{;aMr(&8jNOg^@%vtlED$Yuen_oBpB37 zcwnxt-DL1EIa}dnoWq<1eyvd8S(x>B2X2> zQ?8@y4X2&sI?sSjvf~+wK0|73M39>k zm9}#avilJbMrg$#m7L?muH0a(3qIV%$Ey7+0@k`+Qk3~B( z7vIu#)#Y9)s;3~2r#q^mF z!}n*6(W6aBl&hQ$^AH^8vW1@lg4P%S;$^pGM>4#P#1A??-m8wwlQXRK$U0udKYV?C zN#tU=wnhjnT~dmfErCpzXy9yDij-aVav2`ytOhji^jv9Vy$Cc!&L!J=Y`jn@ML$VM z452;XqM0*kNF7uuiT%QY_2vp)K`$<2xtn7B5cW)|)VASlvT_m>zPP1?iF|Y4`Ern8 zxV8>jKxt%z32*#z!JMVT6g%Q^3=tX5iohs=L0f>;Y4#m+#+s=|YVQGoo7`7v31RaF zr;;?3I#siBn@`?pN36$w6-d;f)wHhQi+>E+QL3fY zxQGDXe2e5wfdCxvxXSiIf%e{T*_()ScwKOQ4dg9>wd}m@+dtczJ#a0uy?R^%Y#(tc zY)9a&ECst8t12V~5w2z{@%imAW$^lGq&BX!S_qeatV@O7?^~q2E;Sbxc%_rr8Lb!Z zW!YRW(I>xr=++_E!a09kbh}i+aLl88A2zl^7`10M7|mfpP@rFFK9Wp2^eP}aW;ype zr+^?+GhazF_xvnUm}dT))bm&E0MoOdOL>NPKWkR%D9qQa6xI;!mZVwKLmhZ9bdfhD z_RfEfl8R5G;kqpxr~ROMU!qy?VD?1fGySds@u5TD&3z}@>c{N3!qI-AZI>KD->U=6 zAW#o3`@TkQ?V!?M^ji>#vm?e}(J0Z8w{Deu&hcu7wl1hue6-Rm4@YHe*KBZ381n;9 zkeZ%k&+f7wiidK)85GzE_^HOPP!Gfix&{drvf_-NgT2wJ(}t`eCd<7Z6a6w(YY#-ej>@OjQmTYF z*8IwG^SE!*Xl;s#XkBW%x0%>z+oatVElv<)3&trF zmxdnNiEXpcPwNJg83a-qhVL3Nq_6R^9tMIS=4Tw_sl_mJ22xd`S7MGTseHLnBswHN zX*6=v`aXg-tcGxyGS{Vsd2rm zP{x%$7*f7UYt!(0$jG1a|7oL=|NSas*?|RM=&8VE!1|#?ZQH34WSYb!5a?cUAcl*| zfxb_L-ZF^{yB$F+Bz(u@9;mLKSa4X%5ctn9H8o`)H z=&i`J6DmBb->Q}CP#1e)5)XU%W>uq9gVGPtxB{?>x-^Q{xPFQ?2Ywd zlqnCmJKrggMBAZr2&mwL^PCQstk_{z`w6Zx`u4Ml&bH|n58=rRYUj!&Vo-`_cD5h^7h%2Sp(_BnkzsUG|UVU zBLe*wqjle|?R1BHo?_Z8ts1J(E|P$ffYK6w8i@wpaSX288OyI)#2jz(IDpf)d6jRV z32EbCC{6@zL;s;}+zeeun{(E_pURgVc{82+bmg z;n@H!f015a1$NXSDn;2S9-dAHgCu8(m~ zzYUYIo5KzNuxHQZ?ORKq;@!Sd10VhNQQ)wC=sm)vK3gY~wWBx54b4SC7vJ@QI15H|j zjsM|SYEpq%>4S-tG+H%<-=Y_y-?TW62Q+gKO37o?WEa)jwS)I|^e!dMTWscY>075--`fvFtp$xx+P)3O&X>#;>mkM_8PtG%i2bdIlg^5= zPjlYjIzI@O1>4*Q7SO-y0D*#cs=4g=!=iFpEA~Ir6ut?3P~KOeetQB`wgH`S)Bf{j z!x;c@DEKZ*_u07xlZMgT+dS+J`K{voIEB!);BWpKJhVv$TaL%yga-wpsM8-jlebCK9{^)LztIY zeLP+M&fk%q!8|xcJ^a%MVJT+HZcJiExmngX7L?FdWkkSImC5H-#xjBTD2{aD>NJQ; zEbE#ZkNVKYlw!3Pt4XD-B8cWZ4T5Nl2t(HC-ZI>e`iV7XsjF!tpIKjg=2YPaFB#(8 zxkl7aG-kNd+VDWy-7fsx5s~@*!+#s=z2?%}g~j8kuYA#;zA33KWiNvwH|lUYdA%c? z3vRW&W2g>T&Rh_xfADyr$fSs`KBfAL}s|FV?{iSV!9-I~!kYV%<7h z7tLv{6(gcE+0*@(jff7`$fZGM_NTv*aM~=$v@yVol@Q(>m_`h!7EHz(eGBH@P=oQb zK=6%t*SQ<`CC13ry<%Ndzy(`InnNsD+)K>A(CPQU?CvkliU&&+g(P7I5r+VfWcg|lOtt3v%==_tb z1B`!UN^x2ZzE7^@ElH{(Cjg^6DtUoAPhrZc!C`}Y#Qg*(*8t-@>Ug3k6RG3l9mu)v zm$VbB+0UPEIDCEyc**n-VXWRIIM%9Ruu264xZz#_AiA&1j8g#D35w7l<%BpRwswN|Yy zAI>K5s6qXFN$TXd-(tj zn8lNoEy7NlmO~AVmPg?KCJn>o&<7GV(QQH=5Hm{W9%AJJ1)_ZX+sbw!X&!->vdoBu z-9-)9zi)d9;R`&;3p&{ntLYGoVXE;zspGj-kw(;b3eL%b)(|iF0kJ2WvSZ)?tWqlF zrmG${_fx0N8ol*T2VV!@pLFQs8SSFsF;^5$_*xL6;Kh;4<@3VOf+7A$miXQIcA|OM zYvwudXAmvtz8fXaJ{JsIC{*w!Pccw;mEC9O_1!yQ8H8ZGSY*X!UZg-c@Y&=wP3gt;T?Sa)Qmly zQ#0j@`*YQsPQcO(vf5UWD2Cd85uCU+L>$!N>|}ggnuE^tF@$H<4ivcY>V%%|2u-MH zRhZNx)7MYkN#eFOa6ZYH>0EBU*+uNAVxx-O%X@psB*VJ($Ju(FWmA2+LOJ3gF%nA~ zRC_**EA`XNTOOTcpy9r#i9NV-V*Un)cr)}Z=DFP&S$RI914+@#?jWzu?qUw#EiEFn zVQ>fOS7y2QC<0I=Giq9ei|AnZV(!-! z0%n6I@CV&=9m`btjQd|G#!v#fc;sZ%~ewx?A%N@fGD@%>tKi&pXi7}KLC6r9z zpy^2CPD$tyeuX<24rDc8SkU3ejX*_sD#1PI#7Y0OxUx*q_h$C+Y+Zl0d$RiJ{gmwc zeCvY;&x8oDnYNNJ$GQk5q*$fLYNs|#B~Imv*exG7qc#uMC$Z(dQl<=Y2fPGYX&ArY zBRC@QH*)!h#fDQXyB+fTz0LBkmie8Gy7jLW)7T+T@!M~O9@b!9Kp13_3>QfO7aUNOe`Op7kDa?{S-&m?yFE?ugg^L9f7@++N}UiI_k zmquCzv~TFaT8qXB&$sWD3BHwg+P~+P`KzH93zRY*bP83wH`(<;c#~FJ%oqW-aeBb% z=Cj_&Q(!~MqG!JZh)mZ33}IoS=d5}=>iX#w-})5t*fwbfX5SYoeJ$9_SlZy7fy<5} zydXUMh<}P(8uNbo`VFRr?9o~CDs(0FgtfggsOEGgQ>z-phhKf)@IYqTV(jgUUE4Y} zc{#$My&QUjW9-c;Rt_H%3jBu`R+IS&Hi!LL z`s8tF9rSDmD*$$72MNFT{s;p;llJZLj^ zx@$R&<7$N+0g3xhfRJm%j`=Vr95%TncQES% zBjzq);ijK{@1~B6_z1B@hcGDKUrpr8Urq;z*J@f8rf_)iiK3U{Bp<@&nPrTz(uE!# z7*T6BChJX!L!7^W_lZrx}7N(Vb5; zlzK}v{!RQ#hiD|)>=p%_r3CWMgVmd~dByxij>rQ>8$n+(yGd>4?oR`eF(-@Ia4oNV z*vucY7swe-0>dGW))Tj~2a^wyv*7G66(7cHCeUeM;2j;LW4HP97`d;3Izhpxzu$`4 z5Gc&5mw)^M>=`h>+IhIiZAkiXlXNe0GwNn7Pqxrwnc;{kvMQmrKfat=_8-)G{xvWa zl8M~l(~kCLim)iY^>WqZp}N*r(PDOW6ymQU zwTtZ@Hm2BJ^fBSAVK+t~2+C%Lq=O2%D837>Ufh@It?c5{O?Bp_wh-Ng2M}&2u+^11 zS$>{c!TA@?&4)Al!dAHqsXnc`yGf-1Cvq;UY{8d0rbkr$x%tz=ouBI`*D3pCk5fH` z&CUr+K2#`l7Y|&6af*hbWMOdwXCv;SMRsGqtx}wmNyvoF35`A))9z(u5j=5p%SCs5 zw;aNGTnL+;leBt;K#yiNChrQv?+*gwF|YyZ4(sw?b?0}L!;JNHk@^`#d zFOQoWG5Z&Z=1qBLzI79}KOFCxP;+d>01jVs9j%5Zpm&$8&p%xgS06B!qKSJ%Y?1wG=<>5$H(7m_>v$J#IwD6pLW0V2@<0MTr_tZyY; zQvzPQpN%sm&=M@1A{CVhUJt=T(HeAG2!u9T_@}mm%`4wDVx{9Wt?(t7K*=%SR0S|q z*Mo)>Jy%6RkU0lo@bwTFI4I8Q@NnFk=%hTXQf@Dx+K6eo(4K*E?}`c(rRB|gl^DkY zAS}buwv@tP32?6jB}O)>jKO*||A{TDiLF#`i)GUfqtjPg1FDzP9pY*&me`B%;bO1e z@s6Q>xyV6IXwaHX^1h6abgs!vmX(3w4m$1EsBcS=;Dn9T*L3=hr{1uz{8Y%~NF}E? zz^}~zft?=v2R>MCsE3H>`ulp+$k{`1af+?>X++rCYP)K|ue{mMk&yxyeAxAd>&DfD z*xxp=O*vyr!38LgrhBcR2so~n3-0hyEMDgXNfHPxO13aunvyX~6;(^L8r-5@Cgl>9 ztpo_M%q!UB_v6f%c+uqSg~#y+SNXyCF94Lu0{sOh4f}K%t~fgZeC|J5 z8pA^D0qbJbm$y#T^>g%3->g7oNkin#Y z0dH$;Vlt`d`rZW+#B&?+nc7kZR))KJF~ou?fnrywL?pjGl$dy$dl!fvicl z*GT90jP0zVq2&W?h9z`4w`zcoAg^o>jc`ViS}(1a)d+@#xM$gO3Xjy;#{*BI%!B>x zSV3C5mC-=@P4W*$4N050^IM*)qo;6=`LIZ@1WC(33_9dx_}0=v8`DPnURF610e{YUQTC(H-0o=qcLFW zbKOJMskdiY4eUo1E6qUEuC2X@vA8F^nYYl9Q*|3G2L$QheRob-M|R#eBH?v6Vm9k^ zg(giwFSD@^scaYM`&@5OAff$sNAT~vS^pqa;}mR*4`qo~V$jB-z5#zEpjTnt=qy)? z;hC_XdTGT$bAS~I8Pjh)C$sAOh$b}it`{enLbQS)h>lZ3I_=}fNOZ@a$i$z7KQ>In z^@nwF^3WmR<}KEj#c+;ZTQeF{%RQo~NGo2u*`K8{jrLapo*apLL3^C;bNOn&op|5D z%zlF|nd`a9&C)1omuZIk1C9Isg?D1?U%luk(H=-49H4oi9*13J+A_pVs#vb~n178E z`OeFcF0O!;9$#-5gnXPbMr=z9C|%Ua3?V`PVAs)v^-WnYA%D36h4@Zgh2279gOCeQ z_AMj7wF{FxXI0-Rzg>9hU9ai0o{6ip7SM>2!l1C-WjcQq;#wVz4hYmVQWO?7#}TGe zn>26wPN5H8F;vLje$?@P4cFbMpTnv{ITe~$?Kkn_HY2i+^D{MspV*gt)=5mCPwMf5 zyKZ6NviP}_)Hl>mcp@JjoU6cYI z2LNTmkZvC=Se(!&=@;`g%SuB@!ycw%C9*I1zQH=v_1c95=? z)3Fk8$%WQ)*$@fbm%|Z1heL5(A8lrt8pvL(@xzLOgLOh7Us)v`Ve<^hu`2L2W1&WY zQ=?k54LdBkgHB6_KfS{>Ta5M$nM7?of1N-wB*RhL=37OxLqhYi z15{#`V~h{0Y4AJrUOZ(clQ^TT20>(M?3H{T=hoHo&ncsMJ6?|e?C2``F=zWuwIpat zY<+KTI~WT8!W^YMPiz}6%{kdpe5qDzP4Iw}>+rqBFFJ+yiP^|C7F{Ovw2<`I=6u3b zmX-Tp8GbP19EeNeVvB_agl-vFonwxdpT8Z=uQG6M=tpsUx>LR|{u!Mj%+&%f&5C>c zMDU>>5`hIN>SQ9O868`Jzd*#IRitw?sFm;%TX8)1Oc>+KPdl2JM9WpbBECZL6g`3r z{!o0rta3c9tID|%)kbQ(JeG9m)^_}(ZCRrGj^ksF65~g@c1F=s?Fb7H?Id_nSepge z={#gq7zlXj(?pVpCNQd3T}4gK&LOJXEx^3K7l{P4)Z6D&mx%Qcs$dMC`P~JN<pRDyt3*}<`>(_Pe%$IDVQPOB%@;Jp@c0@9? zwN7_e4E*JFuy`{;%(>q`J<%5qtcCRh6Vc*A2zTeM>cI0R;-Kw1>x6KXiaM0|5N& z+5L;3Vgd!d-1i`9?yp#bRteU1`NeYG6hQrBe3WYw8l=(7+xb{8lqANNV7QK>gZ)7& zXZ&U}%Eq|yuZzdCTu}n~i9R9Z)B9Q)zVZa$jL=LwY$EUTs(ZKHHil-#Yx~0aDWK}? z>TJ~|t83z%pKa{1sS%*W1dKfWxs*qU{awSxNqvqZobP^*xQfRh>`L zKj3Jaq;WNjxl?d2ruRK^HV3y=pBFp*vf83uh(+FRg~4Eku~s%WTC~c3%XRM64sUfa z3J6f0rVEUIMTp1f_Iw1|tDd7dAzwh#&wC+KId2G3odZL48|C(du=X+7Jy3MAmN|G* z7%rgIEV^yP83JxP(Y!0kSKds{b>uH+5K1*F#d%(X{s$@L!#6!z1)X)9C)?HM9{xk` ze3yFPY5-l+ijjXqTz_@3%*Tob@BhMN%WJ$;(rOLV7vnxRYjpZZzUNfhP z(ReMa6Ps5Ifi!@+u3zY*YJw7UIxP&ayZ@A#d9@hEe_VzNk_A~32%fx8Y)FgBUn|#a zNI0JBI<)?FeZco_a?FZ9DCe|$TzvH(i7c*kPaGuXc&~hvub6iBdG1f61YiS-7y4-m zaw<4ZjE8Ql{4GHtueGbLaDvu?Ubxz^-J+x8bbWr6?K4@}r$nE;7DL9(n|qFfnUm`q z_o3qz`g>1*N@hBm&X_C*5^l&aF=GqKdh%;-fvY`hr;jO z^1Gct2xA#K1_Y`e-vBfa1x<&#%IGEBIx0yNV0|UFPgB@l3QwN!7QfUJlxKqBhMI%Z z>ETk0W`<=EH&St!UR~lvSj$~eXCCfQ;RLR^uC0lt6d)jDL@0i-Ft$rBat&#uK?0cT zpKmeK)RsR*$x>(rRn9gfH%!qO4gs-8j7J$YU|%E|D^lurRP_gVqOr3jKOXb z&O>P#DsTv}v|-^FeJk?8@OfE}`A3G6pyGNUk{R3uCRq`eg!+t%R`(4AhXZr=v5N_>Q{0I)gvswNAAd zZz_kA8mWaS4XHjF$=oaZD}}y(S>4#Ms=+LP7 z&tKKHIb|c_iv20HKoCn^P1N(?6$pJhutzYLdnKL4zq5O05p6Xz!G0T}cZl+^qg563 zGVHYN52SvGReX9gzhY2!O4Bm+(>^Gc9?YtKlMH5Locz4UD711O25!Dix_b2g(@X{Q zWJnsRx?$|2sAw$F6r!M+-x@|^sz_OSq_aEK+L68NMJMKpGc}8kyIIj=8VQt**@#)e zZ{aJVm@Ze9Z>?!9X`?VZ5Hy0v(P1lx@c;`_ZS$1e-|}>1v5S9puDX6Qs~M^L9I^^* zc${Xm@w7*z{SoY?z+@>VB5*R%7zoD2kBsdX;R6QBMpH$av=sM70os2kZl2DRPwA(n zP{mhReQ#QQ+-ZwOXL_do%}aS8~jT3 z4XRsdACY+9EQNlEJ^U~qrr%ITQbDtb(EOu(Q~@_#0k9=!knNcAbY~%m=(-wkpemaN z@`<1_Z-T_Nyrdz^KIT0F)@VL(?Xy^V?HH^!!WsH|%xJ%2cZg+da>V$Yi6T_EV38cJ zVsbk5dgfi4@{RpypA*K2q7s)ibC?;p-TuzfuIxToG0Qq;`irT0a$}mXN20(dV{{aA zbepLX+6RjtK0fPd&KGJPh5oq0exdr9vz2=EVX5ApGd`_CpZ5=!HaN92zd+kfyjhME zpn&|R%QoA$`rTZi;43w=&G6(TsU2Bu@9y;?=6tUXOXrl8pg(l*yY9ktbI?Ijpsz^+ zS92-Fu;Nv4(;fG4UqN^smEWCrn!tIvUyHd+uw+NT%6v!0@}C~y~^tfJ9fU*xLv zKi3K8cVLco;iO}IF6L5<>cZMd9v258s+AgEH0nRm1XUw;N5$G+l|q{nN`p=i)KedS zk-=sxxM6X{?3L$hZqvJKbRMS_S`rOQ-twdlP3H71wE&CNps*-8o*Xh%A^KZ-Pa}5` zmuT(e*mT1wlM+X2eD#^1ZO@-29J6EEKo)L&mgd)I<84D(&_D3V`K`zB5K+d~x3)Bi>8 znM>+?ZyQCDKV1JeO$)GPDXM15AVH|sbQf7N_d_y zm15_21Ftl(*W&HgJCW!-D$IFDhTb7cJZBYRGvIZ+5V zEl19*z}dtCibR_)(o}=XV#nYPbxjy|N2I+S2T?G*vVCcX$AFuoCqlrD>o%BS&mbn) zWzGqKx4_xSV)lJ__fEU!E&h(yrS{FUvn-)1v}#XBJAJU%)BsfbApt*df$7GqK(G0x zf5b+z07hcz88E|$^^c5wU+hZs;}${eqki=`^jqlZ&wxPZE)^xrC4lULNN8DuujA&b z)L`r{9&12RoP|Ij>YbQ4TJ6YQu0uA!b!C<0eU}cm{qo1k0tbpH&WK;@bUEt&gz4(} z5Dy-Hi_s{^l|+?2b_CYA6l&24h-@NOtJuZnw0c^MFKN*}lsh#&&2Y}-kaH5GS*8RF z&iS0kis#EGgl^+Ss9h0rgjr_5?U!p+cdAL$TFilkeD{h3oGL>#W&S*fr7bSMWugX8 z#QW!Y2i=B^pxKsm9Vsy=D=B3Pyc@vfX^aUITp&$W_&&-0GD`N=9;?_97G% zP{AqJb9{peqr!KJ|LFg8RjX!ZOZnl;h1 zJsB_f@B};ZLMIdQRZE=`ZG?TAyT}~V53V0$0016}&-J0w2DKA^K}$wcwBl0=m7eQ& zR3Hk)q7bj1q|+@DFW~Vk;0LaC>9~xu76v9JFmpopjcyxfX^-79i>QYXW!R4w>+s#) z2N-C88^DslcS1`(w;J1+rGF6q$bww30oo zZb*2A+o;G~*a3*1?}xiC+O=sCqmkf-MZ53T8A0L>J;3YK+>mq>Yc2j``kv4EGWq;% zUmDmMK{GuwF#CklRx8P|i6ZxA^gLixn8AT?G6y5@wSLY=BH4nU#a|8l9~7;tNVuVL zREl(^V=5NY0w#YgAG8E>VYECALRWM&&t3E9ngFns?J6|o5%WD6c#$t`5wN0#7&VKj zL8@~i<+{604B9n>c(|LZ{?E@L7^u2O;EIlqbciM0%FMlt&Z!uL|-#@)`aX_T6tTlJ@w!}mxViUSqRUi+yktA_N=#6Na#CYLQjj@ zq(j=DV(r#u)iSOx6w@aTE(LP#x52A(^yQu>1TLz6Z-##r^D^Fl88p_{t&w1pbFxEa zBVSNV<90~6z$DdI_6x%W4!H2ixDY3}{3Ep#Md;s!!iY~?`iJ5LgDqJOpX%%D$G~Ce#{GWU>_c}R`c9uNc1RpvlW<6H zkz$(6h#5nmkjU>x1*^We8b%-$Dj%jv#4!&kV-1qTUF4_P~ z2k0*H1Xfs#bG9yhz{1dpopto-XlD(zWORG{Mr;#6ru?_{lJL8z`W3_qZULGl=lUS@rbC~GgKWQ38eIX zsbo>AY7~eJ<)Ax%4qQJb)X@NgmL!<#4Yh)!iVWha;hD})v3@Y1WbrcNi*7haCY?HO z55F1KZll4^WY>cd_GH+Hi+MHZ=5_L0QXc+ZLc>OZ#h zu>QVsp!p<4Xd;cC(5-T%(EjXu_O=ddXUh>sBgvAs(wnf(nmaZP-%~@*R$??=NYKnV>3?r+NeYZ znuC6o_L;4<*xE@wm7I!l)b8b{?Z) zN1)+nJcvJ(bL2cHM%a4=e>jW2A5BTRKgoTCZABvOQ~f+cL0gPwGS(tjNSE$e35My6F~^Qg7ij3@cXmKmr)+U(e} zfGSA3hs7oKI_$EQzB4$`9L)J(DSR?<9x7Pj=MsigsJj_hy8 z3w0*A0mH>;8x^{h#hb}!TQj`h3qk`W6{*L&5j#L6{TKf^hQKVE$pzJ;RL1O^07D@-J3} zw9xjPHD^J|t!6ogB;`3Icw)Eoua_6%l1cTS4>|E0k0$PZO?mco|COV;|OWFQwiX#h~jhXV7_Z_=C7{6NouUc{Z zH+1J}^5nC&mh+{nih<=G&xC<)P~`3t4|e+Fj4q47a$9gF^Ac?xzu98jc0Fpyz*taqb#QH^B0e#ypI%OTEBY|b$m=*4iedhOY4#8 zvoY?{GDtRt#fj0~(zIqFF$G2UWEW4GPc~hVY+{XPmls)W()JYJ6sUBKZ%;}1hlrkU z(Z5I8B0*UQ_G=cd=Ufa@^=1`saNQr|N&EN#;bXBe5=>Sm{lFCm3!$(Dn8&BnnWsyT z9{(0AWDk!iq#ej>FUhe$q+k?i9<|rMAV)et4m)aVfM1f9Qzt4 z^z~H`=WdjjiqF@Hjc@wKaKe4!m>(LLN9R*BluufK&Rp3m+Rbh&!}R_s?z-_mf+{&o zZb!e%3U_D(A$sLdt+-0>ANPWTJI%1ED$UDzi(l#a%|ZATA9*RhK4I75i#=kcuQA*H zMZQM`$QPP;Bi}-=3;Z4;${dVOMa$Bc?T~}ttu#}iU69`BET+MgmQZ)h?U#NGwS}UL zrqns_c7K2Qwk72V6Ta+Q_PNA+dWa4M%|{<+0ckx%bcw8U ztZ5PpW>;|`Z>vlTvcp{J1ZMk1tOfm6jC827npC%RoT}S+V|xjrE7faukEfEWwZOtf zqzy8$K63sT`Awu7(P)wnW0dYbq%e)^HOc;!?TG(dva|VXurGH<0s0`@L^Kov8hpp` zhRa?p3y_w@1%VNH=K~n2JZ7m=bEedMqKt?OB)42iZ?s}i3H(CyIN zi^X#KT<%FzlD~#r-WYyt+mST?t*;OzP(AN<&eT0m*O_UMbG|wb7>63;@v&8>?l;4 zR>x#Y9U%A^HjjLt@|W#Ff#%$Jul7|0ok9Zw8%~Ge3<{f*sJgL@o`|ukS^Y1EuS+8N zu+y;&#BQ7zVvH7r@E|Ir?LJo*GW;@5BRbu~RvE*5r2_YjG`s(#iY`tpfE1+3&Ek|N z_0eDKv>J)Xy3%0rRp&R7o~i5I^>~N+T=m@S+ryBvy%Q=R;FDFg_eIq*jak38>PTF) zA5f?NmIA2ygm1{Hp+ngN?m9ikZs`15PYoe zU8$3o=B2X}El9mi@C>*VWQX)VzXD6#cy}H1EFa8%@|3%)Juu+Nk}|@h8eRL? z+Ug3NfJf|Hf^s#YpGI6n3NA35JjBcyd2w!deKghR^1wsewIv@StQE`SeiY>EGd-;- zi;m_FsV)K)yxG#psaUkGkh$wHPqw$NblSXk4f#;MM`i8g+s-=c@|=cdEV|DLPGJvrN#J5}iL<_iU(Qc|*9 zM&u>d+u;Wm>5OVlSwdtcDuW;v+Yf8aW!m4P+iuT4O$`Ph`2Z0-1%zR*k@PF`ZVpnJ zkBVs=k==M4ig7ebxWqEXT=)}tvWn;jvtdDQGRciPbq6)(fzI11(Y64#!>s3-1DglFtdZ`+YxlN5$>p+xY`>l}?9E}!u#`OSh6o2(B zaEm`?wV<*S-;o+h9$%fy~c0v%DZe*)*f%N|v0Q)Iz<^laO@ zNR1MQQ$ETXLiRmnZOxgqZljr8(zshG+)`XnS5GhCgXM=LH+{v9|KuXM2(+dYxnzDp zbp3=*I%RbErTzK3lq2i&GwA~Erm9G95N}~xP-74>VhRzO`8ev}`FfYMY3mT(FbGEV zWeS+#*Jup4#hEgkA|_8gT#ATSD2bNt`?6!Vg|O7v*QZ^J=~8@YYLolU@6xd{$cL#n z6X$qm{<}d$T)xLxAa4yaUtTt=O!#qtTv`^RBMN^T+)Kds$)~bdrRCV~^feK?{1ZCc zJ%@xgnH&><==05Th}~3%Wk-DA8+s@FjT0MGbm0`=)YVeFd&l=f#}8bIUoNZZg;Tjg z*?-0qNs!S2;!G=HV9_LiOf>1Uo;cMOSIoDUaGHG@=bX`|@?aJpTy>#5bs7i^P}A&u zEzlb`k0jc6oE@#%_|u$j;hO(cfqLYZcVpku%Tnt7iIvSp@;$u7)tiC^j^yD^(ie6T zG;ATkVJ5`9YgR;%%+RNKNU$K?~ns5eU+x0P+;2H*cojv~@;5Lk`C*{m4NX#6?6x^CBr zR@hy9J=z5XtuF{T1b||$Ea9D}PuQ!bNUh2~q2^gAQSU2*&^tQZbQlyn!snk7GI8rn zN$L}}AtE8%KOebLFKB+sC6qIbAZU*`NRbuwQ18c{eyOq?kj_)Wvh4#Bb)aDX zX9Dcss_viUWAqXB%I_kmBuX{gHjC(o(1E_AxHJlA9||jcAf1S+>Oh{wEz{Wvm5UWC}$afqC z6XP?!BGwLiI^(0v|NHCyeZ~QKQ6gQ1I#^8XKbKD92iR;dWchp2`J{f5z?J#C`c=~c zN>LV{n`iGhLjUU#-MVj4#$ANxy=Ak-faDo{a)$&w&Tb0$j}Dudi$SRK|9sh?1cyRT z;3~my`>tSlDG1RF^Cw$uz?QhMu^VXg2SwmSBvs^5O22jOd{&^mH%yR_) z_sfy^ko7pJBs>n-)bTs!Mh&DiKh9BH3LIE`yWBq!y|V{SRtP?GxU#6u4@Wp+A=hkH z?k`*#jxw0Tnb$|l1_kKm|IB4c0!3)Soy9Ps!X?udYvtp2FtPW`v9!-65DRH@L&%j- zzsIt;o=O6CT>R;pxk!7a)6$*SPefW_x@waD`yT%1b^MA6kA#|3Kmon|@)BcD5{R-U z@5>1l@5ZykJ7ItF>Sj>4nH)Fi+ZyVPI+&DSZtiAN!g#*`h@JVo4lUHn9YJ@^jH_Ht zCj{JXk3qoqafG$zZ4)AQ?qmK>(`2{Q7(mx9qtbX1pum!;CQ5Y%>oZrA6jOe52!-8> z+}Jf@BanSAw%$f9>yXXG-iY~I;-q6~8&&@AtNoio+Hg`@w@CZKB5&C_HUj9_6S|4o z-`LU=`nKnLvwWl0Y;VHnbsqof<}*n}VFnoS>lzj-7BG|u<>d=hp->ce=eE|i|92KZuTw=j+o9bo>MfNM4euwxU&v6U0U3WjX?&vQjY{(qdkby(Ex);6p(A|(O}lF~>@Ns1sy zcXxM}#0=6M(v9TM-8D1<(%ncmk^@7%KfU*Up1r?)KkxCq&!3~m;f&X{uC>k;=egRS zWf20^D;OS)Ol;fHNKCS&(0x)%{j)#xjuc)H*4+h6&v|HSW>hI+Pa%cFjjf~K*KIX3 z|4B-rJ%ZU6fq!)#zgQ3uQLwU-tCz!9zw%zBbC_=6Fev8gZzT7po^20L$U7KX_mATv z@eRnvP&=7c&az9#sHH9bI(r~R7*uVM>I#g=0%+Sjrlhgiz;Z+35f^J(!ZH#Pj~0F3 zSk(8oDSz`)js4s2<)Git@gVYB(k}}=>$hVS2Hq8hf8j>Bx%3t*e@gU$&+W*;MZ3(J zo`}o%+i0*uYB3+UA8>akQIBR8uKIWcjaYlmSn*}Owx;A}b&()IOV%fU#7hJv{w(X2 za_PY9(`89}pw*0S2W+mpF4Im{R4&l}LE*)vu)WRL(D3J-uE!{Yd;Z0Reqlt&41T!L2-9JC2>0D z8}wTFRCz7erg#btFVtQjux6q8E)wd!iZO;Tvw=q?|Z>2-bj@?8Lr)ulGC z=cScF+X;c?Ox+ZIAf;K*e;(9d##Y9G^m|0o^=Ou9S}HP(vwq$AVoGfc@q-f_ajsgD zX%^_==vO11;lIF+4WxSCbg@!@UZ zqcrLb@?iIJyfkK=Cehboa?vBz%id&DWhw$O+B7^LzXc!l{+SeUZq(QNfB4FOi_c)( zKlU2A;@p?W$KpHDuLQBU@9bfR`(B?ZHdf_rw94AUFJ@NE7rSRR&w$sTTNKYI8Z(Fl zL3=*7l=OS>IE0z-AnboKl7Bzx|M=9-^4HOyq#j*4Zhy3gZ*|slSPk>Pyz|30&gmm9 zr95`=o%p@Zwo5!B2N0nT8izt}qB?kjMDM5tO%~YD&HcyX?ziol&La5=+^fad z>4E35mdCien$>lxvrizid%0j2=D=;g+U}k93_D)#-zUR$!8xB|j{fwdsU)I#6#jSS zp)LGRp_Qx)N*z)$+^d>B`BUay!8Bz@hRDwMGAA_4%I z!Jk8+sP0<+o5=J(Ji~uC4t|`!3{%{3{jQeGcVga!*kUH5m7PhB+u~E9(h7aQl*iid zhw-~tk_o=o5M2j|k7?3iAN-I0>)*fcKR&e!{lhdfPp=$ygI3Rb z5>d^2ieSbF0}0F$0Z*Q+#f%Oc$BT$Zj`M{j4)&?(xo~dwr^f=^-e})@u0Mc_Ve z;eW>Jzc~@|e^4sX6Fkn!?r_q;cZb|59PTK;V`!X4QfBc@`SrWz$%8o zq0i|HjGCuU_u`^sA+i%&QT1aXv9ua~v3Q@rnSajwB-PgSZ}WivA8!GNb&21xB(^~c6>yD=9jSGJg)15HK$?@K;^>i4k8Q?37{`}C>!hmWuZ zcfzqH?co4z(EKonIDi0BT&Niz>U=iD-ALnuE?f5NGnC`cm8BcHcKmOz4CNnJ)*prr z1qN*xhKtLZB$t|WlUFyMM8Tx~xSeHsQ^Nn9!(&A-_{;6cCbcB}8bqSSqw6(#@dsb5 zu@%4K_}Ap=|KS_Hy#LF=hukS7t;WsjwZ8`c`Jtr!n=^m@nFdPx-EnI8X|bR4=_I`* zsE_1ae)yI1naok0tPbKv;{WZt)Ho3OUgLh~FpF6vU_E7T zkbu1;376jR#vjVP;V*aYZ`k=vqi04)vVQ(`O1EZzVE|b}Z!{Vn>Py03!7G1Akm8Oa zP(k}&Zz&LXJPvq(!lJ5}O2vD@rk{cPugC6h&pvvbYzo$x zjesr3Zs{%W&y~Kqd7Z6@kT)hcKv*u8U@H^Az%R9)*@y>OUL#*b>BR4NE*42Drvw?w z7rMWCy_Rnn;K*UIR`A`fO+p=MalXu7rhjwx54mx|j{MH?M9t$muS~hfxN?x?dYBMF ztKJqO<$d>YM$elBpCM@_k<8WkrB7B@-{?YEqQY)WkHu)aVBA`Y6*F|B8R_HucoWE{ z%EqrWGM17~GdgjUAD_^g-SVTqUcHD&kI~KkjC*SJc8UIJ^Y-ry;d(01S6t{ z5+DR4o^}V|px;%*Yd)D2K6=kO>#uIYw#pfH&4;QUJToG~Emc~?NI*`BJzwcm?-dIa zUI${{#YQlKpA$TW8r@f%bk{!!c)})n!l1ui^dp?dpMsd?o}8ckn5%Z)`0lVvTW_9f zhi`KpPni!dXGD(5jgHFY!Ipc{z>{Y%bhE#+nQ^!4S$Utk0fhb>8?EWSg(kKc;(&AS z{gmf@=q+Liw}Y!4BH{%`5W=1I4v|XsYOh@c54t|Y%8Y9bA-Mwf4lAQdLCl9g&WU{N zpSOZh8wW~~xHn)Vf1d0pqeP9zD?zQ236UE5JaYY#R|sKM>;Xe{TNTD|V}zJZnN zWtp@fBXyn`6?7)?1|qsyeO3adpdU~?w8!;zj6N|qv_!2X{J+_xnkTNWxNqA@d~O3I z$D)MpMJQ;Wh)88D3 z0uoGO`J*5wk9XTJ3AD%1mD!Yya={ggv`G|0^RYF2%2YF%49ia-50@y>yZv(L*l)S6 zm0C+x$WIYev1Q~&1$Sn}T=Jpxw@krmu2z2S{jrb&>j3f1R8^*w@vtSng@!e81KeMH zTiC>MUN_|`HxllS8GWBBzqTp1*&S#j-*5t%)E-dv8lPz4&Q}6P0DY08{dH3JpfUCY zu{z^1YyA9$zQLSRYABf0*6@AB?GVlL`BgCdbq-A1F1$0@8UdJQN78*Q#nsctmBkZ0 zR;A(zzh3K6KX!;Vy=zjb%=j5}-+GYhg#;ML+FYJA#jR-6#uSDUDv(IqA-zIv*^JuJWhD-B=| zHI*R_o7jA}_O|IkdA7ecoDW1L6!ud*k`xjeZ}pvA9yOx`hNOa%lFEF4`*zdgilT|y zL=v)k?au#tb=oyK(xZah_XCMjlQT>{ahs0^m;$$8v}PDM+d&()-yk-VB4 zjp=6MtHg7A3%`831tU(da1-w3kZ@QZ=OATh0ZL75j-y4+g=@;yVo!19TW888u5b2s z!$&-6c#=hCj$1~w!~0os{nLVIg`5o`4M0_NC-rRQyUQf4B497 zj3&c3bmi2-a*>b&bRg(}`{}!b?@HBoE8`F?n$F()#j_=g>wziuBu5h|g~NGHI;pK6 z=YU^S%vfwRL_T)A3nau=iK47m4pERbYcFPcr}gROSpked2~ z8T39yGNJ5^pT&GG#l?}k#_M#>gWOwCsYOKj3ZP&VRI=ip+4v$>?-TkvY&r0bwrpjY zaPEgjb`F!%q(PRPmNP9unTVTgX&O%{Dx_zQX*juWPKh)q@q?qs?(bkL%7BVc7R42+ zulwnt7I#C+)Oj;FLw-NM-^J$5_eJszTGQh{5t@qbFVrP5`eQ+~7m1cwK4$!{sjZU; z8AobwZHq8GY5P!UOa|Z`@hHIrU3z52Fb}Xq9Z&NO*WPy&ubQyeOv&X3yONSub=3r>nF)5Zf z@D?xahnE6ixE*7R&xMS@<*aM_plF$J|HX#z@>@7&BDyU2Yp6pX5=8%VW8;>4CNVGD z@iRx#qx6*AeZ$?V_{qloL3W=}-o;=2gZbS#GPQr1vkmW@-Wqvy^g0sMgv}v(Rb7Vs zOV?X!NtGC36~Z7hu_&T7qBrH4&7JEvCee*P$M5GT{xaT2|1@5zzcpU9`86bq+&$S++b&LzhJlde`?{==V8z{1EWK{8qo!XCCaC z=Pl<^C441=BjDA>9rb?H&RP)pUWB7|EbsIO679ombEC0uEHUra^7YHJ)G#LXPr_S7 z=#369-w?K2F9=SNO-8EKcwgDdEdn! z$vP2k4kiW-Br?j-E9O>UZe3VZZ=6wg@=qq}k;K8S&$e-C_83GLg_df*=6Le}vx0nw zv$d(R>AXyfI*b*~QNYwi^b`CaWawR!L-%q*gez9qBtj0MZ8w*Ek}l#5xGQ((=2HXC zgQfL6Vtx_PQ&!UhFck)sZ%6();B z88dciD37r+y)X0He^TW&GIE*>#Q7Ad6|aT*a4wodQzuOOAekUxtvcJ-v2S2HiyLdH zri*E<%@He6;?dh($RwkYTERWY?c&Dy;0hgn*;yZ_;w9zPx7bMf#5fS+X9*blXIk#> zqH=oSac+30vCNcY)TmCjU}Jxs4WGsFy<8riikUnY0gEoh#6?g=?K1Ru(Af@tJ}x5< z^H`kbTE{XN0PtW*oRGJ!oQ;O9O(H4k(MXzrR4Nzf&B8H2-BcWi;|@EAiOstPEyS0nNtA0r3XsN!Sy?shrL(0AmyK zA;1uBz0|rS&_;cG^l;z=mpr$qUv|XElgmtCcv4-Q%x+q7Mw!_dK~|E%9L!}J&uZp& zJ}c4m`Gis+ZvF9?*>-k|p|ul`EA%mp6am;u-eI>P6UXNI6u)>E*h4tZuliUa5##xH zVha}M2Upk&#BaJ@G^Y`_-ay~vS@S_Bzp=^Kc3#645ucrcz}u73B}!L@L0NE}^_*Bl zYF$iuu}Yz+QQuP+M@eXK<=A}feos9gMca}`&GpN#tk9FHbum)%nhJp4wC*Hw8-z9DDKJKT|Y zUxxh5`#R7hj|qt;J$-E(sZFR*l!$d0Mu1}A)zMUmTsT@^CCA`mYA` zSUZ6o^95Ofr;B8Qj^Wdw2I1*)J@9pW)pBM)RY+S+9P5MolFN)nI^X^MdX!N2=L5vK zNN=WIqvW?{xh?Lb3F~Qb+$(PuG^}xiHEYc!;V-(_u@cgjYj!>Q>U17%C>zjd1hf_qheXaR`QTGe zVfl~}Ls_*Ja|7QHMZOSMX`_0F5VoV!&Eo;KBRy}OmnRirG+3?1K3gXu2ZD0;9xW_O910?XFkrbc!T<2$bzA!)I2Rarpdj zzD&S#vwA!)G^|m7k+XyKu55u{`&hhX8pp5Uqp5u4`9&|e6>8<9)9cqMjl5gTb}YIy zM)q2%&ilaJYgoP)?-X$fp?Afl$XIVi?7C=4^K%?f=E{X;E>jlNPq)6aKn>@+BS$)z$eZMu&(WSg?wHJR;+adbs*t zAhB)q4F&f0z{}6lmw<|-`OUD7V>-_I)VbksDz_Xn1JhQknvP4n8Lu;{N6PJy(+{u_ z>D@_RW|S?O-=ZiQtXdi#QXHhDNy>4!=v=GJm=6Y@9>a)Chu3MbT3;0j-mI}egWYkbO9b~$nH&i=Wjnpt2-(E_yH zEUvyg0E#b@>L-O{n*#~iGbk`OZG9K5w9)k^(wM!c79=cJ|dz^1{T|}4avOgK)u!GUi9L(`Vs$9)aIuJ(Bon#oPx7UL!B$Tf+9csByjGd_=^B-eoWfHT%#4XF*{D75 zah^@KKItnRY7cy>+~5Xk^INBhB|V1MGTMYlCQ(IZvc?m%wu&-$7dA}cham!i<=dE{ z1Nx>c;gWIOy*9MoR*W~EehK?%1O&?PI6q?H#u&TQK+Z4J46{4DY`wT$=L5N{l5^Tj zi#;PN`RT*K1&gpKBmN468_uI|(5bD)_*H-Of!K)DW|W{h+kU!j#e8njW%%{4!|;qidzqudJ}(-r{UEt_2u(wL-tuMg#xUMTE*MBid|tY!|Z&#AM~$Pa&6XqQR2 ziQ;#~xykZ9J;6ydbh7*gzTl z0v6!MKI%TXI`->XlYg=`5-n(`J-)Dj+OA(dP9h zvHh8z=xm&}Wfc7%dbO4xEoTeffiABTKa)tYZjrLxsyM7>JCs1DBa*e#7m$^8jS)gv_q_1xqfn%mEioHEn zG6=H3P=291LsYvrXcuz1yB=|@Wa&`QC9@|qCv8h$Sk??)8c;{Yls<2k<)M`E5Hf_d zo;#yD5W60}kDA#R{Gm+}#6$)viBx-D^Q&|&MtOf%o+mipYu@0-apa!EyJMkFfy?5S z>SFgU`;hO>-~$-7ylK-d;>{13vir2&*p^9PI>>t}_0^^3riiL`pAVz%@#n?w&m%5evVj+Se_(7 zwMRTu#ofLb+x|t}vc^J?`Lt@i==?}G$)FcvaRg}82#)Auyt2y2`;m< z+A-+)CBEBdIAmRXK)>{;>&Tl@9?yt8e!Id7n`U|=I)T+iXA^mb)kGA#?~rt5vfCqH z>(ysSqR9Ciq=%wZ)rR+bmx(HrCwdalVJJii4EQGfF<)()PQ+fKv>^l2 z(=?&1C6I$DO5gG2Tm(g`r?+5$kL|!g^fT9Po0&y?9L-JrIaCtojs3VtKTsNDK)Ow=}3)*FpKl<3tAo!C9CZLYYISW z=TAoefL2p;d%gYOpOC)()t+@V6Sl?C7a<#K!Y^xRL%Mpdl3JI(@kqK=Hj%58-`!#7 zmqJ$muBfpHIu*-PkHhY6w~5M0blJgf`V&BechLOe)L&xEroP8~rs;D2x$h@jr)iOY9|!qV4WV)uYJYL=5PE83ypY>kgHAc+?kJoJIl=h?BnZY>)Z5*b7-_*MH0Nv4mzQAW;*{Y z?Y2RD{+vfyQ$YN3 z7bKRkptM9Ix3W()MiixzljZx|5K=2PB7|lphd#u@!xqZFdXs^tZg?VzPT>Cjb9Z2> ziu9C}Rm0e=Y8d_WM>Od=-RG&FaUdQcf~Fh6YKY=i**0RiNvoYdlJ?FP` z%rj7y3ZLH$C6=sDy?Y&=4cuIcD$?_)E%m6CJH5bDDER);{~*Vns7z;Jda_J}aCf*V zIXH-cIj*$MMoXdjQpcFs4~!}ro6{FL=utrF+s}zpeP9ff&w|^sjB<7LMNl?h5N%gH zE)8ZHmE>`P=N51>rog9N-75s07zS0{_PE<*fE8s__m&E(tzGz=IeAHOZimpT(vkbo;?9t1+6kkOVZcOEOsF~EK zCo}gxue+}8xHJDT+i&>wwdvC+-ayv|KXrX4N_BJLo+eCqgpqsQEgBz|n7s#7|B&!5 za+j$;^h{~<<(+BIlg*uYVYZasZy;Jfb8+zPPujvmTnty7H!p^gxV}0Hp6jPo{Aw3% zlc`DngdyuExO6+G4x$}U;m!I=a2H7=(qYqwHKGW?8d;rI?F~;A%|WJ@L+7euHyfan z%g(mxhP79t8;Q&3(6B+2OLHwYXO5>tb;btK$IlX9cpH{I7WQUg>Dq}W_2SuA*ew6MLYzW%5+Lhi|mTHf5ql=4cM9jl=ikA6GO+(?$TY5$vP zW&i0eybn$_vJJpWu{%r;YEx?)nc_n5vU9U>J@d*;qg%L zK$`^Qb>qD)2T#Ly*gKr9$nx9J?qg!w)8W{h2xY3N$0TM`MG0~L;K5e+?#(orL;DBx zPf_Gtd=fyQY;M9IBGQ;WF02>@SQtZ67c)r{D)^dRQ<~hp2yS^2OnbCr2{OmUtD7oO zh}hZm+YGwL3^%?>g zEZf6{3}QY#a^aBLMo@0{otLF+mcLrvs7a-yzpE-#lMI10y~hUur@l0Mz)uPE#S`yB z7W?)5pNwjnf8h?RS+;O{q^VawVq~avnZ}Y<-Bkn{ZZuss;PZl94Wv z>jRzzCmRZT<)yx%4AwQe)(kERmG#RI^q#x&!go=gZ`kZE92-2Q42vF&mEM(i>k>^D zRoVa7an=2Q%BH>U5rHrX zhbpD7$nULpSL(&zI1V2z-RvN>z#Ps0@|c55WVHW$|18FMculuI1$$Z{v5aL9 z8BhP_M4W8ab`rJH2^+tMT+D!t*ujM$rGY_EQ82p@oyHYl2Y!4l77^RuF1FTOynM-z zUOZl+=lv{wD~EIMV~t1q^8qK4g#j$tM#Mp=NFU+G2gHn_Ibkt6CtFbSL-c!+$D`am_A;bu?)1jdieECAD!CpWflY@#=Ef%3FbgI(dKjvcpxuE*m%#A!T>gMe-?)WB zJle|4jtFZql7R?W!f*w-8WPNYsR?&TRCvNQK#=A=WG7^|l5t0K!$ZmpW*(vUH}c)QeD`3S7R=`YRPpfo+9q z<&O2-7*O=C+b_G$MaraQ5vmk%=NvwoSe(y%RQwEckHZ&cqWLGaIylLy!QM;EpZw{?d-&gi9SQ zq9v9(jegi>dKg%BDDr+vL_NRTv~ii4wQhZq?bi@^o5WB$l`O?z#&}LAR-}$BKc(rm7*^fY457dzW;(l z)fYUo2QVypEe0}LQNM49_@Epq_(V|BT}Gy}Qs?eO@m72EbfQ_gc&t1%IAMX_Gdx!& z%`57}C+G5#*Sm0#Ie|5yFd5N;^S3(S4f-U6cRzO1$00&$?!ChPnE}biRsj|F0oy3( zYChB=9pChQnw|&?&}mJ-sWF^uK`VC3Kefp|?$-Pkm;^DZ#zyi1`FXemtW@~GSmtXp^nq^RwED5rd9OV2^xLl}fA#uzq zneFnGkufO5{>fI}`wHI^CF$6QTuPH}D_x$b*9+fD?ci7+%aU3^rt-)MDLIUS1iW1X zZyDm1^}*g6=PB{HwUfiID4MU{R#F6q!^poEdz>E-@~)bg;mb{-LN4`-mUiz%N~$zA zhFBue_CL~W#ChD8u%lSr+8SIqpj)AogecTu*ME*a-5R3aob^~K2;#1Mu&~53{&0y? zEREHWYPnCd7hFp^d&gC~Rk>~h%AXN=#o9~y&KPahPjm_KC;4f1k5oX5M?u`%_2ebN z5?$=ONv&8%>fGHAua!JV^RK9gS3WcHpy!2C(93!Q>f*8|H{9l<-<~%^eGpOeSMA!r zJkEmN-`zk~%k`jOdgz_QQOiBw-CRBWRTw8({uPo;U)r^lBD-n->xa<9vb57p?v=T% z&Gu%pT;o9vy5|HdUS~^DxCY1NWA6j$6#I zx@#$c({4aqTnGz;+ho3&J|Jv~ab+Y$KoFgm&V>#iX2NQiOUYiRgut4LpZPwW=L6M^yIN~m^EN>5Gen4D$vm{VY{e zGO^j*KVOj3nTC=GI(nf6eKn#9*u)0i$1cGKZV?gd!Ho*l(-eX`7#BSDWLBPX%I@iR z2ZPVU@d}8Oj=$ZOxGO*x1n!OLM$$9(NPzQCWur}L*0&IS(Zl$i)BGjh2^S~!bRY75 z!wx6JO)~YOap3I$vU*#qK^Rj)Qjp;f1)Bv;NXLwFo6LE7msmJP-bDzl=iC;$MQ_XV zJKEC#cY!TNUa_PF!^q;(2>!j}i<>@PL6RpyzvzVST30+{Z9bOMe!_zrL#}z}QwAUt zM5Bz@MjxSNpY@roFrl_xd|5D)<5G&_<8lQd%2#d03Xl-5Oh)gMV8^kWN8Q}(Q{2|> zU$A>$#OK@a!NmMUNaT;%Se#>U)9yDW;tcZ?XMVnOB$RmvK6OT7=_UliNK|w1&B3+xBi6op5GM0 zf-kZ|J?DzJMGVde-r!Tvva$^W8@#GwB^o0@3?qAcv?VJpnY(T8^Tu}PTQ=9L7FlU| zUDx7lntSV$Vso(GyZ)N0UkM6$%X!rFvmolV>t^2e2xE!FoDXUFL~DO7?;09qssW~VMtNk4b8J&zQp~i9|AO~eTr59oCMlDrG}fn5!b_#a zwJI)5SS%$bNoY8nX7{eP85SldS+qEQo=fzRzjbS=G{35OLcMVIrd9|b- zQ(8)n)R6Ox;}Mx%_oTI0YyK?6+;bmi5{f)-V~CQ!8TcaR9(j|7mWkOs4o(fJ*lTR^ z&1~wcH9JVSpAf?7*Afqd!9+qxp9-=Rws4n-eAO_P7Zw?re@Yk!IV?*D5PrD z{|XK}r02X*S)AT)yF)ExD|w9`;jNzKYeksuT&h}3Q&*N}=>~sd>#U#rc(QR~y|Y!E zFWqJKPT0VblvYES2R9;ij5li-=m#sS6AzRLVKoPF#_(cuB#fkKi(Cm%Bg@27kkrvvmbo&(9#Tj!=n3R#e$H>ey1c1dVpTo#*a))))M>(DVT!P5N5r`<%hg$$ARw z0y){l$U5kmt3N(b&qzej0zK!F$+I^U?UcTF9gf>SNi^P@9Q3c9JZh5?y}0SPKj=Hy6LJ*;`+>{lFuEI z)QcM5GT-YV?FZO*=~KrEEZTNX#pB7~)53o8zCfy{ns%U`zRV~%O?hXj{XpuChBvvcv zGfsH}Dqj;m8wl7frzMx&J$=(0nHfDF#e{F`>;m)jxM|mZ>cmb9mr!r_ouX3@M*3*g z(Qm9ZwvRquBUM8UX(}D*Z=Lp8H4(RObkN(>JXI-9z*acU(CPC1+0C~lQlYxI8zNpf zs3fpic#*y^6I3$3ikNf_g@_{Wid7_3$Q!t*6~i7^&|Pk&DDO)de7(CvJllX{DwXce zl~zxsJ)z~9Dbsl6nzA^6U4=9$hjSXpuSx$sO)TPMmCbVFOt~6ENxSxtSqXMLg%otv$s=T>cppQmIqFibROUm^h5Bv0NK=nGE(r zw=I9K8uraYh~Tj0jqY7cy7Yj}LG{*>oH}%0a02ARSvtLR8Lx7L1TjI(c%irpV%~MP#$g<;>R6ZeKrY_}C zDOC*VjQ~-+fr};}QgTteeyfw=kq%3Fpu~oM#!PgTwe825rfOr*<<=MocmtHhP|GU6 z)Bb8D6wa5=vm|->G=pI^hS=%@pYO%YB}qm$ppvLkLhRTNdWDbWm{>HmKbV;$K7?z# zWzf?f;yg2?YW13C+bvk=yY|zccf%GJ?x`vSQ7#(&Rq=TTXc>wLVYLaibHpb*{sK`^ zSY!!R!+;E$e~B(dcoDYiVTW<^#7&U8N%g0Cy;r!?Vbb5}FK#tVo=#sab?&V{GxqRw zwV`a7bEOpE@0=|iJhyAT6vCbEenqD`hd7Q~EIn+j#jPM7O)F$V|uT zTo}44AsItB81oq^v({?UW+)>w>v;p(o`Gn-kC;BA>Al8Ch^8qx4k0>475QblptoL`ZaYa$Y+5zIFi>Z{J8oI`6{X^UD|y*v0(ka`>^F}Z!4-~5<*KON!SY8S{Z7{?|Z&0qv= zA;n|C`}u9j;W5KEJ_ z{c>?`z1$Md8igSI73B$MXxs-Zuv{u{#y~t%sqUQa&jIYMdEqezU`Y|9hk(yJ*hRLG zE2VYtR=-=6Hp@celrwx7-l7;kKaL!*r2V3-Tj+KtSo!|))d^)M~ zPA%8PE-b!(Gb<5BurVxPBbBbR)?6>56Ego^5v{d2M68D4=2{=P7CG~~>Oo|z#Ke?n zhj#wLuBF#ir=!qgYJkJ^+|CrX8-v`lh^Gps-Zx({~) zBIe#zU&2!sTJ{j2o*|R47Ke3rea!$kL~&iiuW~!;JWKm`r78-uyTr}gzy><1UkL6b z2p?bQ;6f$VQ8+&9lkq1QJAzRIg|`gRj@4u#BMT)}pFw0f7O^NC=Iuh)BCmu${TD|4 zQ=AxKi3C@$Qa~op%;MS9Csqkm$r4v}aZyi}K4C}MR~~sc_2fa%QV@F>Jl|_q4s1x* zx!}2o_OVQRmUBjt-Fpa}^S(g`6LZ^SsYcZ#mMjRKRyflauZ`_zskL&_tNE67{EUh$ zGUm}`X*_@C>v(YeA^)6T`b2`uwq!N#;Ms4!)yKCzeXko0%2zPW6D|0%GaV)RnktX@9XZ6=njPc(Tvx3Wb$(mg)`$X(u<=;P)iU>BY?tXeUcfEn`UhB0w;+jC;ro!K>h2qWg z6k=8-aAyEMu%qjFp+k?2*VzQKk&N7E+sm-1fi)zu=cK|F6*C1t5tXw(XsB87dGKPl z-n1#N1vnC;r9J~@yPnU)L=teEA3K!qg_D4YZtI=x~_ z)KgB>o&NSXRNpC4+u4$H$Kj>=ZCS2Quq{#`#-y|#xhMTEw; zB(}l#tQeGcY&(6vntxJzvQT4yZ}KBrxaxZCj3xI%=vICjdyn(8JVT(Md_4iCH&~vo zscZ?JJ52bowT5&{n7kGHm!=Sf*YILM-dcjo(f!!Z*UB>LVTf5nZqoz0f;HEu2D>Lw zayW6BE1zvNMT5zmh&X$1UbUT~>1C={^Tf_$Ik=-ma*+&sPWgu+y%aKUQCgVctNRg+wqOT&2vH=q_Tth>XjdL$<-pPkHl^s#xQD??^VJH3&f=Ngym zGdfPzi|HmCPj^RkIV5RSLT1J2)jjlq3fZ!lbGw!!nYdcDSD(xGsxm9&d9AMh4|`wP z71z3@8-ZW}f+xWtxCM6zPH=Z81PH<1g9nEI!6_uT6cF6qDg@WU-LS?=A+E%&1lMsPN>PBxx5ERyWLk@E(ksA_(R&k(B`P;GE>9^ zu31_*h&a+eZo3A%DkDkBZd8$Phw#;>GE}PoPZyi-8c4m!-4}q~N=CM!ya27^8D>e! zyOE*~{U!*m#2ToxNIY~r)(e%9BsOOS3C-X|?m0Wb{ka+;iqA2;kz+4O+tmbyr)YrF zCJxTu8{GuL%O1*$A=s=8CSisuc>M)6xXHn zZv)SA-eE^SAcp&>5ReiKLC{#a{C26cvt!?KNAm3&Y77gHa>;5LVU$P@R&9Bn8Ipol z!6q<XRE4^S*41?%9ehGyh`pmK(c2i{Gk!D|+CQy=6^c)EX*{jkp|T>0vZfgn`T zM{?v1)QK@`!AU-Ufva4tsXujM`KG>OUC-|(10-UPJ7A!pr~ounDT>MQ3>E%yp0QM7 zhFyXZBh6-r`CE3f)0%noDRe?2Y#8ArN7v|;Hh28T?v++7I+adAUg@-mG<9$MuVh*L z5pmN#15xd~fyZGm1>vv$I5#efs@eV@R1f`TIri%P!pxQ!5XRo-2%k>Os7PCz9r<=t zYcemZplJB?AZKyW0`HApeRmY_87o!3g70st{UFZCg=mDK`1<+}56Ni#GJ4vmR)}5o zp&zwA=4-ycs=!Vir=m^kvdj}9uNz@75Oct-s2QT?plcinP?E=>$&Ic zuVV;u74LmJ7UB9?Mjo%0aQPH+q{R3@Se+r4l@t~0>H8{QgD&BMW9ccv;~covx}X=R zKjtsd!56P)H(PAYhf*Xj=2Urb7Z?oEN~|Xu1ObNbW|r+Zy4S4vZ?8syW5A6UgcCTP z=|<19O=HtM7gpf36StZG*;M^aG&8|qNE4%OBphxL7+IrV)ScN_zhJs3%xEEK-QxUIyfoxd z2R2<+%6TheyYfwVX)kCHazvq8M-&Y+AOd2SCEnN>3Uy7LwQvTWVD8 zR2I8u7gn_m;BL$73Nq`Pfal4N#*-g93l;RoSFaB7KxCTgJvO%qqz?;on^lt+?Gd^u z>KW-JBU-H!`hl>$yEpCPbrh;s8aDvL_v{!5gz;peE)i_L+Y!d6n-G|2SYGNnTq_)N zI`l4_Y|U1-W1H9TpTxmIRp6<2kG2y;T3;GSILHNfv@&($J#l4g1-1uZEiTZ(KytcM zah3PAQ{us1i5t@X(=^;P7;h~nhHz^%vCs=F#=f*3%ZO+rdXhRio%yL>#f zH3l~m*JP=mM8;zJPSmgL8&Y0Foe^nTC-V{r>x;{pzF{0F>Z5IMN4TUZI<>BGoo`>Y zH-pN3aMM;s2(-p_V7m2ZRSkBe=N%R)0%j>Uzxw?H3+<=Sd&yS6>S{HU@ER|l&(ZVS zp@qA`_1e00hy_>>9*RQ1LXAo6!_ZaB7hPHNZSi=Pqs8FZV2h0ne6zd$VH{Zwk%;ycSq;Bw35W;#JkRj+T6C=5JQjR*oO+z73}k!K(5{T*BjBC}0y!yH9c<3%6hPxa43_c!VZAvD*@G>U#C1TvxaV zZd#_bmu5zX3NMv!i$Ik{`>sL1ofqcR`=m8LX1o@$V$DWS7>Y!&fcwdViQM+=Zdt;i z9UFnalTrz)f=nio0p$CyGiRA6#vc%&E&O*c_wzB$s`EYcXYnP3Z84jZ*N4BrdPv=S% z%U8F#1r);8u~Ubp`x@3aQn%L&SNErRg&GCNtOLiC!{%0k&MKdE$UrX^@5uth$Pp$D zQSpDV=nF5TENg6^W3Zgv(PW@14D38w33Ps}_39}5oEVfnVWq-^t2MUq-Iu`-C^)yN znfogDT$7%FE|L3=Zh;z_TiBwVE3eNgNrD?5n+{|X-R-AaL$?7x4^{Ab;U|`2>+*&w z&kKWDK$f#VS07@u{v_PtMZEXNRxW+|?VzMELQaB;m5{>&e+cmi#=c(cmcV%Pfvyl1 zssg=%UNmZADS*`ByDumL;?Ts8ITP0>9jKEK)t*973;3AcDj(sp5TdZC12T=n`fhB; zN9vTGL||;MU%oVN5+IB;|D9E=Z;(Onv8WMR<@w=B);cKk+@_%k#``h01JUyL-mC9w zQg;GgYsl)Q8J~#z_@rR$3sMd+ACsbpGkec4a-PlN_c zs2C0RXR6l16!Wehy% zuZN`uWad_S_^VhypIj;TJ!kt((_rX8cwO>R;QA&U0WFuTcuzLyEC=Y0Ti$gaI!{1r z99sR|g-WzK=>1Y7h99^u?lJb_wQsG`QTYxDpfoJOg8@m%irVN4>K zBWvIVf4*~S?Y8yBdSXw%H_lO1@ZS~Zh%)so+>|3poHGCUH{n`$jhL&)CwU>Rsn9K+ zx_#f2l&9Or%R&Wv?QlUKq=f!tn_Z8T>_STTh06tCn(1;=It1ghqJh)|_qMNiA%;P2 z>32nhnn`pvi!_9&*JU&o2nKBbVwq@a*vYt{xOLeUWhbw!#?F;by;xE!)R7YU314`Q zc6ot`jC(@BNRDJEP^oI?PiL9NcQ*T?O}yAHmTi&y?|Vt?B~pbs0$M#8ox-}~R&E*YH`89# zlomLNw5so0Ds6y(5CH0doAiW*u9%&`jl#LAtf3114&!~BD0ge=1tz&VqY2-gEBuda z&>k}ga}zip7pzWR=3Jr2jdvQnlsNP~ioGE^AYZ0diS2hd4KNcR!0#0ndY@4AL&ut4 zwE+QOcfU7dA6DPY0eQKaW-IOi^Yv@vC;8GKWIhxhbuGPsKutnp1sgu67K00${GLNRNmmb*_Fj@2jx4uvM#YD~VZ@BuLSv1Df{ctK zZm}vZfou?@!+i|ZE+?uLG7`XH0$O~uRL;vOu@UMxF=b{f77a}io)v(nMEeCFbE zgDHK{ky87y?`$vOyT_&mWUw3;1O|2eKvB-VX#cnbsgnJ9HTObZ>iGKegW1pWAG>Vz zD~iW(qTf$3(Z}d6mUM_aaw_*wZvx$l|5Vz7?VWlCmd{00B*JdI&A4rwQp(vZ(JO>g zqF=7>s85+*HQb!;@Sm}=;d75mKVf{-34bJQEdU|KxTkLANCzT*sTL)3mI5*5YZ5Zg z#0Pg#@fuP$npwxl6BnYVO1x7^0BYrPvnC8PdCrA@>Q+rn}K09#Egp`&zv!mosep zB&zU?x$)ElMNZmE|eCM)1h_iorV8czD9?3x{S%BJVPm4Da%7M%~jon{i3H|m{ z)-73$v?a==VQJw-99m$WJLbhb)~KH;EDD{U>}!)U%TkDL(_uE zm%(3O@RWsfk|vwTX-0#QjcsgwTVvBNHo-q)iu%|&{K6zAwtjt4M-r|_Tj2GiiO+SL zxfHxaVoKXHY9YM|m?;SiIBQ~Pa>3heiPCz=T_ij*Owt$q%wy_bc3h{AH4H*wHE59e zw|p;{>L-xm_L-&cGr{~gmhJs0V!{qGs;4EHtKUe3CT|tv9LZ=HCmdO30#G+y&b~bq z9vjPJRjIj_Uj8^M5^&v-fo+`K%${?dRxPr%{_5`8uuWM}zLf1^#Xy2n3&H5$?F`d@ zYiBHq@fngmKH;X85Ke&|)E7qP(Z0l{`^WJmhg}RG^rEQqk9vQewLaP*W@x!!s}5<3 zFIG}lkgGtcFx`chH*KZ2sQ2acnoWi7;v7#f;H#YPxlWtC7Ycavj0QpE9t%}@O3%Bx zX_2yoo#XSjow6r344Ge-#BE!eGf2EketHoX`^VX|_jqK_(cM{qK!D{0y?wifR7+ER z;6&*Wx@G#I>!P>f3$B$w2LZga73JPDuPz5n-zMi>&IHDiCz^5V8#M(g+ZT=RRF?zv zMyNB(FV+bRWx9KX)F6=!tVM7}37P7Z4Y5UMu$mdV_s_*${xg!)XSw2@A?j%SXyi7c zM6}Z)MhG6316mEWUfSJUE2xPs7aAoZ!gsJZuS7XSnuIz!RrBG~P8EHEtLn1C7I{hS z>p}>m8+xT@Tg{uy^Q|cFZ0tAqiv_G+JE;$EYjB zUDbq7!GDz=j+gpQfHgxhd|kerIWD%2oDV+}5ktDj1tL}JeTo}-GzlllNT-`PW*(-a zYTg&c{x}LP`1Sc|PU^E|)(pJ8-6(y~pakeyM@f@{9SY`LW=W&vybQg5Sy+|A^xZKR z?87F8$MCTCuu$9k$Io{20m=uw#sf#@3q&43PoOVtBRk~V=|*wJ~vLf@(cCxPH@A8tRnWp*Zi3Xi|?@M+Md!wNvDZ zgAu33($B#;uk?y`Ex; zJ!Y6liErGKJ|=%Bs+0S`TwbRIn#V-@6%e!t|g{6Kl? zGmdTJv;Nky?p@C0DW*~YUfceT_6)Wwfyrg{gND)x6QZib?B^iu*LXU@XutD#fJA{q zJ&EmJgHd`QfleX_mEcH?Y~*Swo{}ptb7?JiIx}Aw7)hi-+k9o4GY5C$g=4S{-CdNA z`TSu)FXNl{ke_rmq9w$*iCAI`IEDfHdT6#xH^E_0(89thd z(^iD^X(h8m=?x6AN}Zk9cpi?x0M1-Co_O@cR_Qb(WdJAdBxF#yN}l$^hkghC1P0wO zhmA{`R^)^43RJM5)K2Tv#j52D_}1xAOQy;?eaM^-FC7nE@uw+6kb%uGA1ka#b>h=_ z4#34o(70I2`#q>w*}`g+e{~3S#PB)%5o(~?v}pMNd-=v=#p8)y{|N>H<#Ya@g_?e4 zdGL?OCQ~i-Q8nQ$OjPT>fvmqnspWl=gQH(k8j5K>%|b`M>(fL-!w1sXvOUA_9p-U; z>&{FAaRf8<0pbiw-&`P%og-4Q92uem|%Y8 zbKE9fN>YeN-q+pRu;9Y{G4gj#ov$n>`_;5}1)2|T$IVzscAA9}9aYaoTHSrCk+!Cf zqu28_5NUAs1+mtHx_0sP-}Z`wP;!O%{U@Zf9(|#Bt9NHJU&X~(qPh6FcWj6q$LzBM z$sQN8+u-)?8iBD|hJCdA+)e$@wP1Q5-S-5+^j7@z5rtl zziCr(t~9tqxGdjeI^xu&PGFWAi4hPsl4Ue2*5k(WR{91PCVTB z?GuEGb`?y4v+6~0oeLa`tWTUfkTvelN{LIK#-m^y%gyTNxh=1KXzE+x0s@4Oy;@g1(kHczd!rwiqoO)e7@g(YE|e8x5l<@RWNu!HN9 ziu9(DhmAdSdtnXO*msGw_uJk+=gVl+wv%z<-EshHJtz;xiht|ws>K}7zG<;JHh9j) z282>W0GO19Xo;D*Nyq3mlJ}m?Sex)fir;^(LNxa^LM~GAD8@#0x9mIvCRc~d?10%R zl`y}M(ng_jF}blkW~GiL8*l5=`wz4{RA9L3(&s981{J}c2rJ^wu`mM^tl18` z?qJm+*M(ShZ;GI*BlThh&}z{9Gw?!1QM9o?(Gw$vb}9o(a*i)cL^^xn_`mW5>JL%T z@?7B>QKwgdy<&OQX9@#+iTX8?k0nV zT-Sdw>$|pfL?t-+rKgJ~6*`QKoz4wE9?$e4wo>r|y$ih@-V7Ma7tXpa7ADi@=$KfO z&A!uovEL^1;)c^=T+e1f^bxv9l|mAkX0f&*eNa~Ekozs3E>8d3e8XY|u~(!WOi33h z)0w|Yf1coK1ta1M&%s|vtn>|C`jZC^V~3{)X^3s@S(DkFAw2zNb+ zx9pf$ws_^$-lSdQ%?-~*O07jXGeq9|u{6GJ>q?8fx&d!1224jQbW$pxk$!e4)#C&0 zmB_Xe6O`Qfy~QXosDAtTDc;chN%RF@JyZ4>NAD=}=5O6Mj1q$h2N=sFGK)tOV=K=8 z7%|UEeHu)LzX$~$lj*Qca5F5UjIshe8vm2b1hSYE6KE-9=ZcoRx;U##AmaWVrp zmWRI0WEHtc)=(6zWl@%V_P`UM^7ldyl0glhFVg4Pe{C9}AU#+y_y^%Ih_IGg)h>)W z$9b5m+p&!-;HVo+^5(@^T7DkS2DBy~avvKN!TE*jorPL-L@+P@$P4<zxZ0ja8~HWE@dzWsIXH!}t-E=1$w zvwiMFnPPpH#b@$j6p$(A=AGoi3gR1JSk*Et(c+4>slWP&r@vLyh!LbhHYwu)9qIjM zv1t~%mpD9mLO@FneRJ}-TddfDfQzkBXkPdCPrMhC_9}vhD~|K#4J=B>*Y(-p;BI%l zpJs!i02)FroXX?#Mx?(#N-nZ-sdNg9pXe+OUM?58-@848BznZ?*&V4ql zuwbF$lDT5vn4(uqnc+qY1ObCYdL6m{!0~jilKW3!MW>Su4yH^NZN$rYL6gKgC3JeQRa8K=51(; z^il~5j6M!?`HPk>Vp!Wn5y1Vx%a0u&wZF4<7+fWS2;0KALi1xvS#jP~F*}j+;)i@}%X3PwMFD+isefq)LC)O4$eRYrH{R(&)9>mYz`A z&p=Z#5`vz4B8s)q%Dq5~00o62?y|Z2;|%b`7E3EUjO+89s8(CS zV7~?vY4(C(;{a^?9;UEJ4|qvA>~uL((H{kg%8d1#(OFG0!V;9ofVh!NyJ9w>A1GF? z*aSA4@kh0C$=}o@kj-Qg=|#4gWod4z4su)+pA$T(gAm%ZT{- zKqMJ07ZAA{QM6$A9{GoDb{vi37$ANa34E`b>wwd-rD1ra8{1MBfYvuznmX2uH!xCK zyC7$X8g8Z=*zNZ|Tk(qvUtfQBoGvJHWfUyZnKNWDU01J0f=DVyX53#eEhzD0Wwj!Q z6yl?PhN2m&dShE=fL$@Nh0jvr-gZWWjJ6Ui>RuXOV0(4CfrTpHEA9!Eb=+ja_da~@ zJ}Py_pn$5-V22&{elV5uJN~N!{K-Wph>Fine9JM)6vIBLr#Nj6UgkZH>2of=L`HqO z`LFHRD1orCd<8OCxP}9ym9qp&F)Q9E-Y`@?j2dzVwL674iudjb>7EAO)H`=C2XFE|MDxEGn4|P%A17aVA%&l zF6@p^iUap5YjenstkqjxABp2T<5LyzLZ&9vv^~3X79PNWk|{iEv;!;SS(1UmGm-+P z>!6hjTs+58u*Jo}ZI#2Bj-|6Kh(Q}KO|I%fJb~aXFYEwGtySb_Ot#R*AiBhDUm7ot zi17nx z$diqk5fSyG4ifhD`LLQKI`~Z}Q{c9rq zg`Xy5MkkD0=IXZ`N4O8>8>0cA4@Uh!ikPukRMb}q#giy>uEs6b(z_vaAzPaFJQw*T zwX3~~-&Y?`QV*EeRpF<_;Hz3wSqQQgE+Nci4re2qj%B2$ilWx-@Rr#b0*SakhAAWJ zmAss@AMTSU&sT;w50dgVzy=a!q~W-9v=~@7*iRTtzd^(9~J5IVGcu`Qo8U}RKSYhXa~3GN&JgKsXbiAa{Wbfql;-p*Op{4S$q z`kvbg#>aU1wf3}6KO}hfhpd1Wok=@V_a0ygaQRZ8Jg!9fJLJR*EkfQ{V2Nwp;#WsL z6=))~@rmr$ywyaB%qzAz`rfud(H6fm?#YOO(&E&nk-)uo)Lp&8s#eb8xl>f!>+Gcc zsE8x(4gFO9QJa7U{_9-)bDDUpXu@XeZVF0v^#|M{C7o=QvxPJ0zg#m8af(`j?-hxJI$AK!&Ei#r&V4nI(c~LcSX(;%Vu~hqMrIU+yA}OS zETB?4Dp5qPeeOdQrlb&W6acP{;V#v#h_Q29ka>`f6%Q%va@fiM3-Ve?f^%I^?LM@6 z=G=%|=!o5RY|2c@$#-vL(nMb)zN{He4_DIa?!82+ogn+wty!u^cHYU>x3~k+^nN?> zz%#L_)K0J=zR+4v2js-kiJ|{!{cTMiE-XNQ5P*_QG|;7{%dIXILrwG0`iF`6mesy5 zL0PhYd~55u%!^og=$qddff4@no2p?3`T1N8MA2T_bHAt?%Tv)q&?%*O$9~n|u+=2? zVH80AdlZoSmr=mlP^>UF#7;dQ4Wr~SxmP-e@f+*;h5#eL94>z;a8v1r<#4%##b}s0 zpK+VdSppro>Yxi@{A5(MOX(-r)~fzmC}(=2PjqC6dB`~?`$XXaz++SIGmW(BIwIfs zS-zV$c&HQ-r7!j*qC9anda^V{HQGj>BCxRyFNNN2Zo7DKGD%WBgfn zT8^$+j?Su6;HCjO-$t+Pp(tN3l?G{*)GavwEeW+xZ`5mM0vEWWvu*a=4P(&_^RQY& zc5@U?7$#*9MJ8bu*xg_pQ;Xe!f-z(H4)vqtJaVc`4xwNTU#@}+_tIc zfn)*FR-;kaq>RC_=&ETM`)f91cZ&?K{U1vRvS_8yDjKbovHpfv z+xZIZvwfNXr?w~9QoWv%Ri>D<(2&oYL1QNyJo`a)m=c`f(ebbclRGA>Gp#1q$DjJ%B*jt4{r>ud&qNTt? z6YImJ*smv|0%XWUd;-Vun{JOKWe#b z=3!rK0f&o1(5Qanyq+@R5xcOevHh8fooG_%%2E5**wxRy%1xT_fP?94fT7zl5cU%) z*6R3L*t97OsG}&$*wq;-mZ%;Th{> z40D{Xu{M|H0z{~D+^K-%V1tl;pQml6K|bVda+*g71LHfFy(8LnhL}(NUDjiOrW61* z_7}zhP-AOAIu92Jx*y71txxpFIf(-t6}$jPg}i|=#dN6@Jj7Sv32y46v#<-%#8~%r zL*vL|6hxY%cD*9A-E+~<|luT znTZe5MnkrIHeOme^X^fT_^&6F0eJfnXz)UKyXAfY1jX%+a^OB7sYN!h$24 zDXsPRC2NxmJSV-%|CGYE2g+frn*iuV0i|>_VY1F?U3b>Pw)M#%r)+ood=|L`HJ)Va zN=%z$|=!HG~MHqyIAg}H)0X&DB_To_(@@@B{ zXYI6eHD8wS%j?RKjj51P%7;@0J$?F8;!vMb1&9V6t&b@whjn)uUAh^#)yL-d>z&fj=V%e5D4>Y=zS`%#3r-5#Y%YE=iDrz9iisvyOYrzDB$QlGC^u#`8pHAn1 zPp>J4rRo)Oz`K{W7zfautq$>L4wnR0izU;{`ZX%?yT68?m3l-~Eq5XB=mAzI1-)3B zuu$;t6eh)K)5m2vJJw}$yoFHJbZY_wI!U#sH>{!&Sl>{Uw(=?MONBN)||>E;IpjSn74-VzVaXs%HBM!DP-e;fWrY#i(`_ z4oAa{S@q}(BHCx|fa{vo6wO+wH1cN=fb;7{V&#bx4Ya5$d~`i%w7#ZMDB%Ts?brqo zeEFai&ramTNAzinE!sgBo}Ah})DNsa(HP!lU7#!`5L%4CuhVbS5w`^t<-?1;e*~XzoRVyZo}PB zXG0ca?ThHedt~c_o}wj$yNZYPc~_M`P6=B(A5P!k>mNR-xM8o$VS${fIEedCZveoO z8TnIwi-4-m-RQ3HLVKko+OlX%HILdOSzd!Rb6`gvaRDsiIjBqo*ArZ8FUk?7nq7To z>m6&IDk94jk#mvj#5A0Q;=wY-^AlYUd>cB~StX1wXEOS%uTO)n>A&aG@Co4?*Y~Gz z0p2s&a4MJ~&(=?CD8@F1erdq+0TST(;yh)YHhy|&KA#`LJwveNHGeR&8P8;>Zu5?E zj>7orb^d5`A0SmF=tU>w9xDb9W(juC^TS`ih#%BJ+dZ)k^v8);1pbxfMlCa>rR7Aq zk_jwL0o=59fa+*9qH8+eXNxLNRkkU7|Wav_56)AqNtE6B->ZN5!_@% zYY-F5g^w>wighXj%Q3*+y}`JND+I*$6^ZP=?GzsFv&9b42t&B@Zv;qi2~MVknj3jn zmTQH&W*_=LpASFjqkSfy0aFmC1~1iRFW(R-12 z_z9Yo3@OUk-SpCLa<0I11)j_0(0)iLPJUM)cQZ%i{yF-JkTaYko%fXP_GFi3RAOj9 zneDynTbo6RQw+kBq$GX!ZoGKaG>&%$aqM z0Z74ela;i{Sko0Q`-R@XCFMTYkJ1&}nFmJ>SfFfo=e?nB6Pf>@z1sDRc8>k-Xf=BD zv+$g$0mIw`Rk(>|qMfe`V0_;lXw7jYsei9m$JMqRrd2hW(zMJ1N8Xo*(Xod=#( zQ;Om(!nZK?1`(LP>-qrsU=%bcay_}QGKva)#3$SZes@he07nf%tHX`RV+qS`F0KN! zqc2F|cE7gY_BM(B0wcpO}_Ktl5{4?>$ws?m(_}E3$g|B+e|aWo@rlD6_stRXpvsZsg+TU$9(vf1}dfG9mmIJ{9OE1x?BF)Rk~`!*`^Ziqr8~Z zyAvolp2gK>mCdv(@vBSzCgc$CClM)ekO9mAPZ_(%JR&#m@(X!mP-dR)=vMazs-mIiu9!E{uQPg3q}SY2z7zX z{6h^d8HShkP5&X7t;izp((TPfll$P)8+TQ;G^M}88p#I99uZI_9Dv0(f2YR{$TWL6 z<>tx17XNWr_4fYO>B8Ism_$y9JD$HDrlBAb{_nArfB&Gr@<7{gfxpBOwLd%>lYu99 zp6#y}5o_|_Uc~7m-vwNtWBNOwQd$+cNx+&K-P=Z-RzBGVB3d_|iVdYs z3ABN@CAv}mBlC2W=*h!aOknU*%=~wTvmCVb?u^9)z!}D`LeHJHa}#-jSO0xZWYW7Y z7sB$HZuTcei1}5z2Y)GdRsKb}yE(Wke18)s*~Fuf$E~<9_h3-5DL>2mUk|Dm!rkR6 zrcTobUhkuJWB6qk3ZMH9PQx-B7B8md(7>>nu2ya&GLXovd4e-Q_ix^nU-*;93-?X5 zVZ6-AdbP$l`*Rh#CpJ?xcn{JN`_Pc&XHso{j#0a#(K#Bh&kN7DxA`**^sx6m|2N;o z|1}-9_W#{8jnI|K_}dHM z@6Y@{PUZieg#Z1z{y#jk|9ejVIq&*^R9pSRas3v3uG1CL1gIPz#LV!E)_d<*iuRe0 z|KkAhatfmd-XN(J9Q!sEFAY1=t?g3ofjRhu8Wh0(A2-97r%yKT;NhPpY@|tn0BmAt zZO-p>5?AE@=>PeZ#LR{8mt6ejUm*D_P9Slx>y@7>5KX?|jkNVmmN_ehvuG5!`N2HB z78$qoj+&3*|@efQj}uSt}Yb|hrwroA?w;7V2vHz)VQvlHQT zroqoQMT&Ss|FhrxuNmU+ZeCpO6oa>c=t~H)O{*Zz!zgt_Vfs7b-M1G;uPncP*|hlk z|MEM1{`jwHQs`f%NuPbcHh=>$n#>LwP?A6;h$oyS0qMnnFGzXBA4Z$oP2u~zYzfrd zWPud9DFTqMNC-d{a)+ez(*VTP=2$N-i@CIiP85A-Dr++s<7><5>M~`|u)l|*2`l}Wy%=oSB21*yA!qc`k@V9uf+atirZ+P{Au@C zQ|f7xD*RP(GpSB+9@q;Y@r_&2h~4^4!ykzTJ`x|TJE07;s(Dc6uFoDD@2xi$Js?a~ zW@E31jzUq5*z`{~Sn^1k;r%}p)7hfv<5_SVxxO>&0!dgq?=|)8c>zW+BXv-q+m9iJ z0Dw{>YPG0GfVCH798hSJo`N+r))DX!B6osE%N(bY?}GlC)k|7OT4RahO-|EOn$cmT z>vPgKuGt_4f<4p8qo znT7qd8%3KlYsMCv?VpFl{2XC)1rCJlVX8*zpo>0Ie0f2kUZ}?kOSOIJCr+*exYxj8 zKZj@mpN%TF<-Kx?F%u|?>=NS1)7?v{S*J`=UpO73lv3MFxsBO0`yCeXpXv~nSRxJ7 zqtXwr??Q-S*ER;e4)AU8U#)MZQUPVHUakA%@?pq@W*wX=A?;_uYl$ zr_pi}b=>c%KJa6yfm9Z5eaEv21IBMB3)Wd+Y}_B1N{escDO)G!1BOj+p*7Yt)t|;P zfH5Fi3A?O*z{#hc91~769m~%KHKFYYqEG{aad|WM-N;?-JbwxhAx4t|U-Z zEME;;{EkTR;ODi&s``8>w9hukU(_Y-tjxKGTlD^(ube|d-xAX_aPLOcMPklt>QVW6 ztku~4jpUAt3&9^|UeVoU*hs!)?!9{M9pmZ)xd9$b&geQlgqQ#FAHwHt0auJ~&oCg! z2qItb()=^=K;`L;PWfgG8nPQaw3ls9t7)yiGde{2y52P%hr%e|&3w>x1g zDSUqt9?_yb7Od84Ks=C2lc+{C+hC1!MtH=S&gZPt65Tzx8m4YlsJdx`5Vw-3=YUd; zho5D%%m5g&!@#~+#JnC68^83Wl_;J(%xG0c3ycF^p43;+1HM=)?MBf{yt58})b){$v0X zy#7Zeeb3eMFN3q?owES7uSlRv6H1DX#OMxnZNF2p25sTu7Wr)LA=$jU6qnA=Ps+cn z2c6(e*yBH4ef_JS09Ui@R@7(hHE*5!P2hdO10LR&?O*R}?-)aIG^N@WQjjV<9SA?$ z$BK=x4MaKDsV0=IeR_|bA`MCWjpoI7Ggt2jDE&FNf6e?GhSQ=5QF=PC&`@Sd(YJv=_^4Hg*+aF;CYG^n8@_4?f0$iLdajKOjvJMnaisqP znn=I->yds~*k3V0q~TW^9cKS?IdLG!tGy5HU1Rw=90eTz@WCGx%4&G+%alNED^z*{ z;keK&W>B5q4(;6v#fauxYr8j%+djqFy=sWvG|wXafq4|5F3|C&p{e=A==IMB=J^gM zp>((mBHBiN{ZGT#0V$X5=>}&cL71KH)y^IL#mHp^<(@yUkmp4+W`WCm&xv>PD3wL9 zlAQyERvf)r9Y^~0E=vQGspM>(Pp+!cY_%r~r_HQNi(+6ZpN5?SBkC1}H1T?cARgoc z^4Xm1afh2H>qPgdbJ+`d`NpJ^*XTk!slda;wQZp$@guv{UQu;D?%Fc2q%n()?D)D-+BZcnv|T=CW`3^XTj({{fBs zrj!3g2vn8lzm^b9v|4z@Vu>uUGzPeJ~ZmD!kO}xJklix1~Q9yuu5p&zW?p(tzGwqV@h5 z34L!-)}{#ZZnpISkD6?%kHSF9{lX4P%iRr<{XsNyh`Y`5qOVe+%Is6VwA1;RqUB7b zy3-Eq6?_-$s8;^YhyIOSomf1J-n*;Zhy5oIvby~%`KSNTy*M%C9pb@nSu;pHc3Do~ zAuU0rY@+HBxb-er0~rTSqmY(O>U$IF5bgj0v9RF1*bYXqjzguQl#0x^$I0va(Ke_K zNOyRt7+`Zw@a#f%i$iG5VZ5d2ye90@G8Ah8Qwb4E3yCa-adiyWum`b3c!YM*+DPQ-Yt|JG{*B|aia~tKV zIzPuVOzEptNt0VCO)cg@pqy;S*m}&U8x}1B6e6fZE>nC988dJa#7OHQX{{>4-YZMrEc>x zOMZDnuRFwfyH#|jM!Q<&>Jx96AuCsXtU>R%&ZE=b+iT2G5!NA}`+(>*A`00Jxeo8% z+xJ6O9H*=a_ip39-O&9o-cG<2F0AF!dxpUGbg;IP<##MQzdF_~OGLCr*vI{kiNdva z@1k+)%Dkl5!rr1Bsxgp)FS@ko(mh z<~?HEx+(DDn<=Rzq|Kp3sj}G0@EWAi&GJcS^OrfTzDY^0l4F|^W1yMb#ogqLRtQV0 z-bw8>0li?iOnvd6;Wu~X)%{qwS}+zYwmJtsy$bo&p$^7BsL>}qC-UB+&Gn_zjA7UXl&05;>A!4Esb<{=TJk}(2X<Z>L2S^WedWUH@j@YFH-Qe>775B=@CZIcWYAR5TF@pP|e4;bI8~@i(*;kJWkx4 z2>c^1;Xk?lduC5$?L9?6eoCGv5y{O@+v((L2{aqbXapv0nc9<#VWVjk-M=63+DOuNRVoHVKxu}~%4M->+uHPb? zyS$-r?phy@x!y)+?DJWeH!dEZ<{Lb9)h1mlU>3}}NqMEZt;;qYe@&y!kfXD^T$V0j zB+9P+Mdw%;1a~4)&$Adg{_1ram-WQYCYPPRG}Iya(&ed(VF5AfR~cUUUt%~ELGHTrf0<*q$A5d=L74zOd)GxmCv@ zO;AN9;4)=b_2&|A^(!W|*;4hX)y(}HzYam2CMPGeN6uN7w^8S!TBc4&sFoR1wi`R- z+2`5Vc(cHSZF+&MM|>MDW6%Dpird~uf~^LYiux!F+e$lJW?)O0*57ZF$VDxmL$V(` zvK`iy1tu#$N#S814!bLX%aSkp-dvqfAN>-qMSg8%#3JB*j{^+UBwcQ0YMtwho$tuj zSt!G|S7EMI`gonv9^)VhG=zdg9T$Rw#iq!LRz?+HS+utwa6uKz!*&<> zm{#{z@K%yqUq9CCE}>pqql*jm>v)5Xr0??@$kX3JBT^9Tb!MiO#ZoPuzOM95sbnsD3VtvZyBD)o`dyzanLSqhH4 z4jY+ZO%3XJIU*redX%c+O0lT)uSUHEO$O*jC2)`?g;u)(o4^FvAKjqbW5M)vhOVqc z>E?8+pa;!#5~xZh*ofd}?tRq*xz3tg#Fnl8!F-89V8T}!0GUGTdf7~?@l9*&3yMP{ zivqq;vS~@4L9WFdTJFxpyrcE@2{7Ex;k>h$L1HzdKI3?i4x@XqB|~Dh`CdD|lH>Kn zWm-~fD~oBH^#XHWky7)^3$KFab$-Q4qohjV&RSsT(E9GfxzI=oXWrpb(;V%l`lKnG zSW35(Zs8_LGI-~<^14ZttGm-dB& zol$nHYih+oks0U0hxB4M(^@ZGc6f4zccn+={n97S3M!d*n@uF2iK;6816n#FLxTEF zktlqxjbE!GID6w91diU*LAI@%cnj4uDkmIq9}XxP!1LB~!ofQk#K>f5o*EdMP!%7x`$yo@Mw~1%$GP2YYnyBrx0@LCR;~H00@1|nS1igS+t-pF zz_$9F+lvPc{BJMbVr8WXc|(*_+Vxos7%e`Ptq&$Ji01Z?p~d~iPNq&Z>od-i=T+kCXWStosh3|KV-s>2<(&q{8bJi}2Eacsm|$@ca+3 z6Q%K|0!){Vo@vGD9{)?{7HG~%2@MhojH&>WPgYzpD>{-{)v`93iRz#E7d!m+Wqk3F1a=KMXCv2)azB8`Lx-B5T(D#ICsDr91i#<%GBi_ zSu*-6-`l&&_CJIWgkKpXa6)Ft7n~!ZyZa$#(YK!Mh9^YO2z6}Z61fCDPXlTW+j-dn z;siXqk2lz4PUE;B{XoOpsSpG8pr5a~qymx3Y?d-nHc1ENHL4dGITsjl3;kpz(WG8s zAyt^(I$@`-%bbsiT)8*x2b8D>2raasS>sS8;4C?ujW|KA30`=^{zw?`poO`l(U$5} zf|dJ48r1PNK_BHob~H4gp?^0);w&(}_EhSv|87&?9iA|zGY7^hSwr0UI{kK=_7oXF z+AbQrRWxw94Ec7FHwaW-BFLz(>P_1sM$_-YB8mBAxo@!iApI>|y1!K^HV_XoGi*vh zVrZ1MOOPgp^V}W8*Q+HQwiB^(7pT~~9dCQvzrq7Py%Ck4aWg+v_akKI3;pH>?a37a z#%P=xlM6EPOW(PqmeDNm@WfYsPhJtz&7BbtNWIIC6jcHh8d6N4Yhm`Q{l?8V<){&6Ya*(uEU z40;8jR01x-1unc>c#D%Vp6N|Jxbe+i5htJu7ueDTSqRH9J3X8Umd<1edx=HNIDd8L zb2*B5IJ5&_R|D@en7_;anwOXTC8i+!i$b>WCyZP&h5>5!I2YEO{O|k7qnlj(o7Y#_cPQjEuqU^WB@}^~G79ZLiF`;XUndG#a&S?2gqv0bzRR39}`R zU{$biD;M0i<#h5A+u15nE$QzwCTWCDVHP9OX&VUbodWsUn*UxSxRNAAP^s>O#J30? z$*B3jez{SGjZ$|%gvMui(yvU&-diRE5}=1zr};h>ZdY`K#e4m^NAdf7n4H^N>&JB7 zy2Mc(_gzl>g*bWL };M{QU4U4Accj{UV6Z|KD{9yKf6SY}-)*Zgfa2W6LPW9mXk z%koKb=?lLMOzSqtx4Nq&7vM~E3PyBwOII#)?R`X)B;c~C*6$#=PzNUEE<(@8;_H;k z{&G~vrYmjg=2O2j;hDKUdo)Q^?XMalNHd)ba!r1G6U==z13i-Hlhr|rm9qQAwv4o3 zD}T?9Dw;&p=TZs27__>zak-6&L;f|Hm1;Yb#2pmFS!Q^DKWSq93DFvzJMPVTZ{#?t z-L2n6->69LH;mW;r9-s@vgL6YLR-9^4_rnAUu%0>FPa11DUmICYla54f&Q^nGGLJp z>5?jHvhXpUH;he5TPhL>t|ng4vqzP!SV(V-bDNJd#arta@wpwx;{U?%`m^_Bg3DvY zS@yAeJ?%IBX|Ddpzj6iT!IQV&S{%#k$+|GP%syg!qvnp-wyhQo)R!NmIx^`SCAaZe)e;P1F^tZRZX%mhbj0#Um}&4I+eTM6<6QLXFqDix(!M6}lm3V8=f* z^+Su$AWY@Z%k>tAH%1Zfw3;<(ZlKmS>RLNf`N8)&z5OeG6F*rI*MB3vM4z@k&0?px zarpp4@(5>g-gk=7?M6(tGu!T180pLOEZHp@<45wMzyJ9*6t<~|b!Xg@zI|(L_#3(G z=4-?}7})-z$u7jjNK0^a;OhHu5olq6gO>#HV_SIWXR@5;&S?F6o>HMEG0T{2G$=-W zixQE-ezg#2c9qlaUS#3pU3utL)>|4ax03ZC;*#ptz6Lpg2-Zha8k;qCKuod+NF^OPqH$El^jl z-K+SgvZ2L`s**U=HDZbUa>B^t%ds>gjCWpGU1EqdZDtVdpY< zoBsk5)KPaJzih;W{)OAe*(T;hZ^iMutL@367KC=jK_;IheyZnhKRkacF4hsB5lqK3 zV5no)Rlk-h&L+WlZQ`ILAwi)vTiN%@oU4mDYDS zor6qJgSYMWlX#~gDMkJw!Qh*l?LnZ!`NF|lK7DdKv5g)+^_`N^aGWfe?T)P2&WcAX zT#)e>JY>71JQU=IiK+TVpO|cTJ{_Sv;N0hts%t8AK*>`1D$Z5kdhJ%1YoOYguJ)Z( z?ionY;gnAck9YfTnL)m0ghk;GN(jqJknekLC+|un$Qt%1z z4aR^+5rpPg4Tt~o*(a@PwAh%eQR->U!~!o|L4h%evY zW0t;3rA8x=kLzmgvoJ}b;EI)%=T2cW&B*BJ_=)#JT8;le0*8FEo35Y3@I7Adic|EpUzqQ} z2mT&18k4VTKy2ox?5&b-9*#+D+MT02QhwX3YdDV0g=F+ZgUVa>kGh1^jlL(tsN3y{ zz52zKUUsl^Qo6}ktfn6PX$_m=J3}8+#0HC}aM`vky18b2M2~C68_py)bAw#maq>gF zSysb*zqKHCd+9$eyE~IdMrhri*G!x(2)fH-ZB7gg8Y+yJ^sokx%YWEo%4q1BAo_x&R-|B{6pn z_PA7iVbY~jxk-_N877L`)J0>AtJB1VIMIxS zlAt=*)}}&_0sps4)ODil3k7z+?jTg#8_=MhQIYFIP`SmUck2exbQ`l4a_2{-BSAzb z(&yD#*X)lW^W4LBD=$mquE8t%&ka{(TG{ZLJ7Q4rT*?&ia>ixZQRRg3(zNUNbmHC2 z2W!xo$*5dzSCD1|7Qp74(>H%fWmOX7^u6${s*EqH^T#%zk-aKDtkwc)Gwr4B(5JVe z2DEU$MTofL9HSgz}4y#8@(KRZ3 zdeu!W9}c{lSWS@9*EW*RhFd z_xc%E%Bhoec;EdGmO#|1fgk3T)l_6kf;t9v68 z7u|2rm&Toe45MmzUA#YT(4Fb&=N3zQfB&Wo?=!qdP5|8>My`_A32FBoqScoO^5UJp z3ubv1o-cT|ct&8IYSpKB0-WIU^!@ziH74AGKC&8`{m!v3hkCv6TRmfmr=@Mq2E2g5 zrxn==P$H(+!|tH1(9qrv;1PUGDh_vT)H}2G=T)|V0xzyWJBO2{rgq?(s1BszCIc% zj@ix+T=%@#kGD|4cp0(cM|trN=EdIqVgp;u>1p_#@Rc0~>7l0??9~^?p4$35b_OO6 zT2{6eleUU!96?r8qq_!CtzuuPH7V0*wAk4Q7EtO66SrOL38^6(jnsa`eHm>s(x9iW zFmM!|IcDxsRXFwYZr)|^gdU98TmZe`j$5`c)@}Y7@7A!*>&r*G?3^107D%+16z?lAedjy3LOCbPEyeUNzepg2D%&wT!>YPAt4_1#T*i`67GMn?-};kH)9~)3!6aZ#Br2;ex1sW` zS`{qy`^I%}r>{g;p6ywf=AH*bN+y*^yhG^0{_{)FJ@VrLpEiSS`&FBv_BR=etBW|T z@G^Ec_7ZeoeyhG9cU){nz>E+QF^2>M>(z*o?ZVZjRKq|PNxk!%xEUfI7C%$oafOb0 zYcN99qSKw^$1;)}fDyQ3x^#7g${0>mq=qfmf{Z@rsa32+u1=-TAh_Y)qGfyxhjy@w zb=^dq7wrFy)HymBXIFMbVSilE71euox6v4_$mPd-zW#wAIo1aHg3iC{$W5qroTBx7NftWL;&Q~tzlW-xDuu@O@_tlJ$b{0%kjYc&DF+`B2-hm1~7y$o4tzWw1 zYl@W&*Krn73Dout2cJ&tCu*Cl{sHw{lut0WxM~_Ayh3zh*3YDOkJ8Ga>A7N2lY=FQ zw-=Xq=I?yWp9uZhAfogSa&L7af7Z9g&&xi)) zF|+Z%p5;j|@+*NQ2Lq|v9NyXTuX?_4OFt9jJX-B&dWOm~GTJ^@Q-ts+{ExX;47RXu z5=`K1Gg>$|mT4!?R*3PT9h84S&R4MtK@4vBTmNjWL?{_dBD)7uX|)a}WdE(9ZtZ=B zfqc8N3=?@NT^vXaFfg%l!;spY&hewWGV7VQvS3U1Tc z%AxPU3sVQZ-2&AuhbKqvLQQ9dlnx0v_{aCi*-R|+wH(=~G3;THQkNVSvy*6B%q|z_ zYi_;=Zt6kZE>lz`oOZ5t%rEf#!yQ$y2;B_Y8QWz7eitlbg~fU;9Tu*-PpwhLc_AV% zm5^esMV@F-i?yFmuo~Cxh$o-)d-+-$(Z_J>)UoI1m!q2SqG$#j!7Wawx{%W@HDP^#Fnes@NAiq zp4RuDAIFEk6_^i^dHZZFW)OKxTUTw>uf-Ldn11ht7Z4QH@gXTdGd@Ubbv`c*ta460 zu+mUKZ2Dnx+r`#;Frn`P)lIh=>Sp2Pq)OM8ay@2ljFfPJZr;k04Y4QKd)6-`wn^mH z*uVRmReQM>##Gu;lUb(OM$t7EOzM?KiOM)W3t}L`F}#mn`>FMK@wlvyIyop>+njZFBIO|B4t4ROXQDHQa)gK zGKYm}rTWE6Ab_>0RZi^$I?O6=eCRUNb+o7;A09-mSydKmrPuJ1SUaE87MDxX7kKSwYLQ0lEz5tw# zL{!NlrfX9@QD))OXk_jjBk6^$a^iH$!zFO)48WI%?#syjTKt$y%8?k$%-afh% zx%>pV_#cRac!m0RbJIyrmiSBX#PvPGnN77co56k44%6`mkQOmrxfIpJ0~WO#Ayxw) zSZ%yQZd?@0e0~Bv`0{Atd};r@Cmz|wE35}m{p;&3&_t7OHNsCY&21^2u0~9D()VpL z^&CxZAK9d$a6ye|<381L=nezfZ~Uqx1nhl-Q6YjOH9OiOm+gcSt*!~{iriLtJ&gok zzB&_*`=gC4~e7! zKac2p#z0-UZDON6oLgOh?|V=<+2QIfysdLa12J(<2UrZn$Rg4y8YPS&Ld%a1G@EIN+-4kDzoWAVjY!e=} zv2|)Lhy_QR+!692aI6jIUV~j+O1;Sjl~TP)t&gh&Nom0}tJKJ%@2I;wmYlcCCu;5e(T~jE`w(D> zrG4i+35YlO%N@XDb>XB^czEkF9_rZa?;FA;-4WO*1$Bu|)YGjf7Y0tA4E>h;9PK;3 zlk^%w>r#eSqf>E&Q!Q;o&}Wd)YKLV5$gS4$09{XSgCLb>PBDKpg2I#*{Ydjr%>B_) zuGvmdcaIHJu*$0Sr^iw&8PK%nE0G@Ied=o+=A7vc7}%?eB|Wj3tCI@(YbO{nDcZYe6LKxYcnjrmF*CVKryvDxEZpzEADg zydXvXPE>R1hcD0PT+r|Y@~NooFtl0(xvdQaExo{xp!NT_mS=aP=17xfc746IMV)?{Z`|fOJsdo+ABVLJciI&)Pxv~P$zUi*xxZd`m|J3OuDCEw0 zX^RVHC$Hyed2iC$2M{A7(?u?;m^j;tT@0%#=S`I*r*lv`KG-$XK`O&Xp~5VaYZ*Wm zoQpCqriv$QnX|w|#Wfg>#K!B?XOWKUx>P6T-OB%u6471$TAz;?#a7?*@4{=Iv*3yV zI8a~U&MEY(y7?}=p<%UcNiIyuEa-7tPn|ZgehBKv-5*MKPhp=}e?&hflsW68UW{k4 zfVZau0Lex5NUxBI$+olA!Bv_?Umy>4sTy}%iS9~BPwl(ZI$|)rH_(5pjlo}_6%I?a zel#gVn*~55Fp%hceIsQhVNVO;JeB+>UJ{ILPn(DWKmF-*D3?Lv(I@Z;h5hb#icX9SAqw4ZbW zSZWS2eJ%Q)ik-#!4ZN|tw7HEjv783X$>>s?f0;X`dFz^%-{;H}b9_x%LyJzZ z-%wr|xr?dRVS2%x^&N5Y4wmdGY}7T4nMrA@%10@t+k& zRgaVTOx=4Ww@P_k6MN(YN=dI=`~ANg<{Sn{a-8Sk0OrhhLq6=vEKJgjuEF-V!T5=v z#zyN&(fAMDbo9!v{Zi+h1|#I}1jdrQGnFema~k7M0J{&6gCA3*wuv|Uf+F<#dolF> zGT7;Qz!}^e9=?>*Kbsehea~6n(DG)Jt0Fh*I5y#Vdeix9>h%r=O=dNhC1X=BqRZ_Y zeBGD_#Iv%7&k|kByge6pm|-F@doWLUL>UaE(4Gr4$Mx_SJvEbc9;JmAPS>_rf2>5@ zbW<279CiLglV9%EXVsqKjJ(WMkJmk%8(fffj~2y(+Qt|;ovM|)%RF44VQHKVXu9bo z^Y-P91w{~EtOt`g`~nBz*Ouoqz;vH`%pPa|hBC~J89TO=Q*(EA3xOXIhd2f`5ml!k z)dY(Q-$=6S3F{`K9B%22{}@ar>g^1H3hWKS&Nj{`Ex&07))=pF%aBCSii9t=!Cww_ zMGa3BhT)GY*)v(&?7(W`<{#u{JWI+qJ|}Yxql)&5w8c_v#=CVp-=R9h{vR;;?OMmG z-px*(Ni2|Ff|b?XFv4hi0TLvpJ7o)YV@Y(`LMHX5z=-d8&yDWjdRABHzZN!-#gSPt zjjD$w#Ld$$wO>bJ9e5>lE&iV4hRQ~MBh>MdYgg?G!^%$*bO8~_%ii7G4Ph(nZkfIPEpGwQ;TPh-qj}MY0j5Cx7B)>x9Z}Ei%({cR_9_~ zRPIAk%PS#oB&x&%Sv6zblv8@zr3TY0#7xceq?9;%+$-@FFD+T^%;yL>x3T8C2K|!j zFyq#AO6~5+20xp#Z(dzm>s)AzjJ;y$up$^W=fgHYd%!&q3V2L}o+nw|Zd!RRb{^42 zR3VQX^eItkaM{Qkn4H3!bJLnYVtCJP&)mMJl8Jbr(U8U7?ZsCyM}7~4M~P%2!K?E1 z!eiDm>hC$ZrDQ7V1YI0G?!6M^~|6aa9cu4}7JpWfG|#63}q(d4p|< zI|Sh|Q#P^p@tA<(x(Eh8@fyZK&3*}9JcM>(Mr)l{1m=T7MOQ~ra%nK_Ats+6|Cf5m z{x9_~(q$GbUQuS&PM2_gN<9WB4U`H5v(!fZGQ6UH{*BZ*Z>q>Ul-=q_!ZPbeGa)Mv znAP5&ufY+@+ddn=hGo@Ldi8QX#}%?8)+wYT`1}AoX8nZbdwZ(89xB1$kG+4s5&vEl zN_0gPwvRC80|CeXjLcE-2=!HH1_JC3Cg1-JIsG7-5Bkw5`8hDu4X-)|(_xOTwPq)d z@TI>^o6NXya4OrLFi-ZH{Z_&i04YenO($HI4H(d1Ge8}YlGLk3Ov~cp3%{rdTJ}*;> zUCu%>6Qd5LHBdTx#7FU$J5mDatFLMvP}Z@+tSTS!V8{!J~exqb_sr3moDl z{mPM8Gzh_)C>auyu2Lz2(qBnR=`lK{D|D0!q=|BYuiCTCz<-LTHT%+`^JZiNoQ)`O zlKLqIh6ycM2;WA-Y@02REPq+-Ip4dzWVnP;&9Ph5V>r8vVui^V) zg@zT(kdXtX<~m^xv-6Ad56{$w-kJ|;t9u5iU~^lY;nmN*kVKU4{p`a>jiK;l&-`Kn z&+q=JN#tkoc_S-;9C!z;+E!&CDW{Q%>$#=gl*PXQ;oEii?%84s=M`Te=ENPv08+>J ztGal&BPFTPK8BHPE=3+o@gUA3BWL5-ha7WD1-;?^I2&KWrkr9Z36j z7FT)ChfKlXvkp0i$xYycG6$T)*=dOtj0x93ty{QxBcd8;JOCeA!DpOpdd)f!m#A<= zT5W~D{baZV_Zj`?WN-Y>$>!feS_hWQA$3hBE2|NV#HU$dzm&p@o!+G>TkFyHg>|Cu z6W7DiD%b5Nf0CW9h@5E2U2m@5;}{?8WZ$us$=D}chocA*y=30xY1ZXp5*k56Z*?00#!Cj zKu}M^gD|V!>8ub9J4!yhLZhZ>IQ1%zWEe%s4pO6MeJj};nfdVypMFB?&F!WEl*J0m z>A}q{f&KmpEyqR(KHPgcZ@Smh4&OWI66y-F17jX=lQ#v zQx>e<2Tem=G!NFI#T|8Ez-!#aQpT(h1geGK~zA5K3)LM==Ur`={PE6~Ce_b=oG zJ;`U}qJt@{k@#%vp~uP032o=0wT)%Df){j=b8c3DqaP-6Zrrx2#`QdmdEy$grs8Kl zNtEiq3gfXJA1;&j?lUQ5la6}^|2CE^KKxy8DR--z@g@1;Seirdw8WGE)}~iDTI>Lgeog?Lhh90B2U}}A81aoT?ke@mDIMLdV>{VI zr5ZLbDq1wxahsNr=NvVfb>il{>JbwWOpTNg6w@iPy zz8$mvWZ3@2bmDZDiS3YILhl(XkX?!8lOKB7R=V|ge*;Vm;B$Xn5Z3hC;s3t&#O|lu z3NyCvNqc-o7++9Vr{kEx0y-HKH z+39H};f6W^&#kT(ud?$3-AnaK7H@dFN6~cJyP{g1l-^9Z_LO~! z??RERuTXeV2-UR=Em~>VVCi$WGO;dBiP$)mX!eRc!(Z`L28;)MfoT*G#@M=Xev>_k z+7)MiBKZzK*tS>k+ZlHEv$Q{D?Pczs_ziUm>Mfw0T7z02gY{o-g9v@cbU&<;~=AIRUW_(9*I%2Gr1-;>g14H7Gf zWq9)25;7C}Mhk)C1M;8?fxkjq?{5ih2X01~{ygY&yxJ5_^8ez?Vj2bPAT+PYTAV3Z zIiJHuo1)!Z)=hn;#hRg7n2=Vq>xKTc3n8KE+zjjpKqc$qacfI)T$x&#CPO7Hog@>3 zhp6|>=Hsw+<3p3pL>1h4xsgZwR;R07XZddEH91y0IFPX>lqmKW2o-@21vjelL)7SK z&77%F;yJgC{zSq$V6@HyM(c#M^gPT05Vq>A)`io8vgQ1)x+$i{#ZrZs6Rld?b6NWb zLU8Msd_v&%oCtlbzuczu>m7!}cSmKiElVj3ToRf(^?j(ckI`wRFz!YCG3p)Caovs} zUgEp7DJ}%Ri*Nvc6#19>c&D(=KG+n)dym~wJA~dPEcjRB&Yv&AlYGbeu=QqugfQ#7#iF?fh1-jt* z>pPHw+fw)Z0|xNG)GGs0*Y9Xuz3v_6y1xCWYK8Ra=>6%wXwH=!F%ySH+S*mjQ47$n zGG5Ioyl{TJlhl0J!hY{aCZX4{`OaLKcWwFVGDXc#R{?Bn{a<>smhmqZ_r@Fp@cmfxcK*3S7kj`5?`DV^-`C z)HPu{Q?JCxAWMq!KVZN7*EJkLco0*F_o5a4?vYoW}YMvQ^)J*7BPb zgMOaLsjj71zcD_6xLI@*voj*|-su>#fsER7tF9J5nNANW9iAYY$R%tW<|8@iwA=b< z!?!`&c7~fN3kWb@Wa6TF$IfRe(OtBV-MZZ!tp|zE>q0ba>q3WDp_n?~2V(FqD9&~_ znfi4HdFA5v0Q;mB>Er$9>=&V*kT#jQ-60Yj=M<`GUu2iuW$HrifUy-dRR*87tem_} z8*5bA#9DKvp@i~r`{ug}XdVzyiA>sZ_ne@=J%&Ac(3@V`Zeq2Xj(6uT zI?0b*)qvV{6H+6sH&#pt_<7u~RcxUS!}jkVU6P3lSH5qg!t!p0ucK}mezp8M21Ez= zt=M?Nn8L}(rfv)vk|H-N(RwT6%v@xvaMa7=5rcxQVrKNtR4xvJ3fVw8L3=LCUtDm1 zJt@}|p%@K4A&X>$8fno!5`q=llR%xSAG(g-GhFMg^hOaD*GJT4N&zAxwoS=mW*Svh zzQD(MGt4F0w-0Ox=g_H~(295;N23hUzLe5R@Vq^hGPYF++P0cq^$jvJR6$;^mo|E`SxcIkqu8!D3faJ!D-re zPkuu;$3L~9+z>dw6?Gd_-)qY$;Vs3=8S_;Zhw$;NVmCsgW*e0r58@0Y7TQZZV{dbW z_#JKNJ|NQ|H(Kl!(l{Dmkj!YW+@9~!sA(@ReC0aEPiO$GpfWyT1!jhQ4@vbJ5qQ7V zYi{ZE(1X;hQk-xiY3fsX@cKsMJ?9(M6n7y;n+DsfNeCWz&SBYmTYzVKvE?DD9fN;2 z!`|WQSBteZwAO}3ox1L9ln)x^(dT7DL=iQdR6YhrWy>gxM#Mj&lmP680TZI($hLte z;&H50Iyzr{62^4yH&ws&VvVH&P#&mZrd-Q$rq4_CV0-!H)j03BhY@J$ zTfC3bRS3e~0G_D*>fBZAxrcV-6Bntlg6f`y!UZ9bvH#eJ_H^Jc+mzF{VT@JRZ_!K@ zC;=OmT(v?`%KP?ayBk#&Opm{r$Po2EdXoG2~P0GhS8N<`S`RTDk|*@+Oc| zIB7a9u{oV$8#AvQuU!z=%xb<#;QKx}g?!%n0jcmmoozF5mHXX5u}Cpq>EZ<< zzbP9iwiX@4Adar2<&Xg_3yIX@^a1>e2JSM|89YtxMKKS{LV9g|W~Sj)KH>Wn$f=d0 ziGjg&B~_hdXp$+# z#?ug1qgyX9_t87NqG)^t?`{qEQF|Cr;OcK z+is5-3+A6apzD|AEi$#GBT&oG*J80^difu8>;-%JOkCH;BTmqAC+n}DCZFJ6Xvw>= zl%$8KtW3+5m*O@XNMPb^f&-+0LQ1`!$OU2B!>%PynL3Nx;XIYmke#%$0R&Y`r$QY} z(ZyOUpnxh>I5!AU4?~kYTX4>lSaNr}a8*Pq{lFtJ5xT7n?gKkLU`f_LE?kEwZGxd^ zaZ?pd)#=M6qnAK_l0fMN?&)rD5m<8sNt z4fTz3{Hs5bPmXKF!vX;a{PeEb_lyp?(dr3CvhOE1((rg?Wm4JWnuD;Wf#l6(IE}pZ z4s)s=Zr;&TWQ%;IQx#lBaX3=H{QIfH(Mnmzewaw8hII$O8`JSljZQCEzUuq*ck!?= zJ+~}onP)%YgY7a0^(SE-l+j9}$tLS>ZW0<}VCrTI??^H*LNlTXdtDHu66U`g4TPFa`Xzg^)^tx{HxErw+gkR(#T z;8=qn)#5FjYekn!2EXutcLC{eI_u4N$o>B(8-rjVOzCbVcNCJ)Nc~RVZLIUFA)kh+ zvN=0bSKDt}{|3h>TX>X&ydw$sws)Qze^ZTf^_1Yr%a~h6`6q4)L2gQ&#UR*c>Fa;k z!SZLMA;K#MpV3=PSEEg_B*RlN?^FUxSMk=Yr>k7MjlyWW%G=+u?#5WXHtD9HohQes zINWX@d*2?jrW&|RG-YB2$KZrki+9zG#VC*;fk?-8=fD(fARp4kyy?Jfha6s{hv0p& zPvD`Sb;!{C-+>HK%*W4WZzmEa+F=;rqHrT=qHILXCMSuJlcZ|nmb;2&bLGB4wEZK0 zzymFlEKxl}UwRYhTykS!Nn0lN%0b@5Y_T@b^K#zZi_h$nqjC#L`dx#z2R!0dBZ)f$ zP?djQ%mqkXnIo}dKbaw08tSjOCCW28Jc>QsiCdeN7HfO}FGMAS#a?>CtzNBLREE>H zXC7nhEKb@FCr$ABnB07h+m`M(-pClPl|8`*UNLKlQld|sb2Y)7gyzeRDHeJ{vWIsR z-k&vIcYU`^N-aA5Q*XgqAX}c|5Th90X~DX#-EK<`cyhWtc!kntm8$4Uc9Mhi^nwq2 z`dWoHBGv}GPpUnn&-CwraJx_%l=xtXsag5G}>n{iNRw+6s?@_p!|EDvFGz6&; z$ha93!3SBk7k_6&d0hvUtk6&Q)->NDQOzFFl!f=uGFh>`LMm(!7Uz=%vH|K~2Jc;s z=|lB=oSp}CV+`N|cPfP|I%%;&BngM~`>D!A-)YP~<0TupH2vfAU7Oh*YvR336}8^* zHa2ohb2-31g`aAJD?dQhXBK=O;ompur0zS7)6^<7wV3C_UAZ4aG~9DObouk%I80KO zizDQJZTQgR3*yfweRUn%>yzXcJLHM?y2mpSm6oZuj?LiY8TO;yRrq^GpzFn^w>XSM zO^89jD_z%Gy2HB{Ez%XwI?N{A?gFo}d&pJX`b+NLGSJakZ{G8HF`O>gdK2kU5|E5? zaclfVBN2OcKd#E-Cx{0v8?@u%V+*E@J%&XeW>Mshk!N-?d<=?P3MmtVvD!a5y@uD# z#^W~QxvB2^?e+swi5-wiKA_!D25HbZWxs*nTRZ{%`IpfAwtL7Ldiy_8WR(mIv&^Ls zYteRgq9W{~Nax)Ww>463>y+WbhXvaGVoO~Lho01u$pr2t+ECMU+$YA`(m#(L+8Vx? zO0S^jn`#d3t-T1M*i%AmFK%g-tw~^NrDamN|xqb5cNq;}~G(Dbv6)n>0$+*U2tf3#E=wMcogcB*ouZb`R_;np5@ucNKF+!@I?WnUnjKs%U4RXB7RuS142}vtC84#*s0Ba-%0L zKqpzimkb%G%f0w*0uiK;BuG54R|+rW=l3d6=B+XeB6ak;%@STebB3YM-iL`qY~oIF z4C#6f@+TbOJIjvnn(nxe+uY)&Ie2kZ>>8)aaIWjSKmbEswM?A>H25Oe9^SVlScXqC zH@5!nrg3DGSfF&YA>lyXLga!(==#`5z$A>5gx)BKM@xdqZ2K4crmgxr;o180e;Vo^ ztTj+ujx@`#;;mIPer+(nl~7j>7Gx*F5y(C!*m#NAqyiF0U=;79nLVYWmewWzZzARY zE1Qvs1=+U4Wi5&ZJlR)50b^W7e*~`#;i@@GP&fBwgw}4SUshy}z;C^KP}ldZt}BW0 zZWL8_cE0+2tGNaRI)sjHe%>2zwOkhg;b832woJEO7Hi7cBdU?c)|h7du3Pu!GSGG| zfNZ8~lo=EK5I!@9(LhZF^i`*$8I2~V!Bp|9U`x4Pqp%NFxZl%CUEfbvu^Ir=8Aw#u zRceUoE}9I2#zM0d)1 z<7;nin_pdzfR!RmSEaX!{GZREelkPo1*p|#atlroN|zwqc&n`O_MD>*w}06}>UFz+ zqju6rVDrO*Wu8%Ri|}hG=}Uo%*0xH0zwU+)NK{GF0sKe##AsXekGjf4&Qk(-E}EvbIhE^jcv`$^;%7 z&9bKuG^U+_0nw7y*UDlm%?)KsDhQTpb^uOlvl*=X*!acw0u{p>IdB#aADr-%`od5Z zi=<;O*)LeuBA@F^SChDXHMcHC{Qb`L=#_BWNKX%j^sRBTjh?d_G$^H-$Hr2=2I0Lu zB;kE)#ZiVry@6CBI8kp?hP6{KdhMqhOxWM{NAJWaf6MYbo)PK6w+yy zea~TDaYj3+SX-I3a^PA-+T^seqtV#nHXh+{B02(Jewz+0-O2?sb?ZBIQR4qaX(f*5 zNccw5#AN3|OC8H@J@bXG9Rk}=#@a=Z;$yx1N>gD}`a9X8xio9wtNO9;CsOVWp17UA z>)+R7{(D?lf*xG)lCRHw02$0uTSp4 z^MI8~xN*1pVOPw1I-0w_2S+DMB_|f}i>TX=9wI{|Da$Nxa3LLK+jU+qpy&{pR9;yN zT4QPGyo1BeeIuKf$YR?dYynj9LTz46iHc>_p z{+KEIQ{e@eM>|m4Xd9+`V1Y#w>?y}CDbFkz7EL=4#5$cp$X6$=O>eaJLK#EKC)yc285XgAs`cbayRmvb?wOmaJ6T6rOo{n+ zDeLSwK+gY~ie7dV!md*RkD5hl&VqPS7Kxm zt?I~&OW~Y=ZuliYtTzyqKw<@Buf}p-Nw$x%kVKo!%D!aMct)2bU#ml=$hC@Ad{La* zDwaHes{%6v2yJ0C6D{{+@0&pkeDeXfaI^3Ddf%|KP|JW$v^Q>YE5vWwF5R$@h|6=Y zI)T_~b-p|HG=u+ztM4=~Io$pH0f7VV{GD4_@!dNrPULwD`fu}az`$JJy}m(|)f>?b z?beP-d_9x(ft;~qT8oqIG0>z2#n+xmBr0cz7nI^Cchol&;V|7l)jY>i=`5Di>Wlfl zF!W7Gpk{CFM6a`x)}Ug3SVcP`EV*^nY-<+O!QeOL|A}OoHs}$(0-Voy^RVV*@k>oR4d!BCN3e5xppH(h!%t#Q<{ZyYm{tP3iJp&K}y;8 z1`zG7No!GOA4p*W0Yp*hL_MA9QcJ%QL~L_#RN&1ZZk;FJgqrpb6So|&Xnpufs-!xi z1tjW5Wg;DQO$~g5F&}q{vC1>TFC-q66BFkHij5~^`PFx|O7S!UiI&?>mlK}SNIqt{ zl5>2BfvdNbkg-n%qj+OT!U6|ZxTzuvRZjKR)4qvwo!F8IsyN=YEUv412H@5DmJx-5 zH4lHW0-rsR(k!}8uB%fKpmm_R#Hx{_Vxxm9pq$@?3gVut6v*_lUN~ilVr=gfw1_t7 zRB7vo%J1m!;6+j1`EHH6e8n=)dv->3NM}UyN$9<55*yStAyhj6Tjq4m{Db2=qIa*WTqX{a`z8w49*7m^HdUQyN4NS+-%1 zpEm3LKAbJ^vwRD)6x)lsf*_YlyWCnWV5}vT+cHzxMziXO9pT!T^^N2kcDBWahweQ< zm3N;9cJcgO+9~_W68zwO1y5_Vm4n}6RGr|Ev-5hcy+Kq~%(nt$zq8pTAw^R%8UXXt3o-_AM(Jes10UAC2%sCAAh~J1^?^sjGW{Lcz~z=+ zIm-A=dM#f@8F_J)BNtOrjAYa)-~q^=k(tPu)z55Zf1+;D*@|~EswZ|HMb#YQ+znSR zs}xJ$L@_Arf!+1rLClcQZp#SPz`kvI6p=(2KsMoJQB|T}`dfugUogn5-d5u?5$=J! zF{fR`&GmzxZGo1XQg2bBQJ(jq``Hg>$hm5cC}qlvgB~EVB2fe*`?C;ck#GB6#<8z| zzi+^SfHVpor4V(Lc1G!n8W~g^%0?v_k7mVDHM2B*Gj(bv%w4>1uv(;A_-4Umm`gP) zr*b-*_J8uAsFKgjjRw$8o#bBcEAs|_vL>0tAKl{;`4I}t0S?Pk$Ef%N4l$5cdS4laz`9E_dd)++ z!I35-#M1{XYn$Z|%I{hU49e<@{PQ%7xKWbOMH?J*}b?$6J3@N8GE6&uWIdKt&WPh8$NXoce%Qn_9 z%G0f5(UballV!>?0Aa*~=P-o#w?R;_YXNWwVr;{Z&}9$l56R zYvX7$`r*?c2~mx47$B7(29%qlKmpJ9xsW{UeaUB(Kg^c zJu~d?G0Jfh95Gt~9y;+NajoExJX6deP`M) zIb_N`-HzYS)665t!Nah!i}O0SpK{E8p~M)4z{2=~IGf2~_Bz=fUsmo@Vjh$0cCE~iuRU&ooM|KSGs`F49zaCk14q(tL{8gcnbjSGH zZhK$K``O(ib+pXQMcQ>{1|UN3JcD&K{&a*kE$n=|<9aY_96EColUW@vg*hA(R0JtC zh`o+OfnEx_pxbhhs}Hl%b5KaTBizT#NY_Hr_`LP`gls`c`I|7Z>dF`NNg|7&Iw@%S zE(JiLspajF=r>o~24MX3LJ3Rf6U}6HTF!2P^W$=iMI}ea_^;geXCx+$DC|rv1Pw1x zq}hG}w{8^-Q^VFiTeW!=^m(}~+F$w)0E-2BD!OQfyL23A-Bu5a%9ydTH5yp^A+W@D zMJLV&d(djHPXKF5^e8$ONcLM-Y7}w{Kbjgy>Gk$HE0G*cEOdR!gh7(Mwnv#wami1R z+68^vfNpuN%G|dyf^c%&Mqo=Tvb%*{0hT8v!C%(wt@DM zGsc#V)-IN4$U~x!4R?C4`>i(y8T^_<@Y{ZCJ6Cn7D=?EGvZFPxfye1a!m{?@DZT{N z%CXCSw|f`OHnFR6NxJ$AK^btarl4X&yOO$~bezc(e+QuV*Zg|*c~?W@`BQ9}FmQtN zM}v%SFww~}J^iZpA(vFJL$&}45!e;6o_+FG?HksE{TV<}e!h4&q3VQ_s*I8lnh5Ah z<)IZc$*h|)n30oolY9()Z1fjDum$v2$P#svt{kDsAr+l0e2nYV?1yFm;Ag3H;Hntu zlyesd1;|Hs;!REsLf*WZ1{@VA-`rhRlW*c7yT!TOw>a0k)uwauyXRo^9MqQn zh6Ej_MtQJ!K73g-xaB@p{7!e_eY5SUUP#!wvmNHcLV^ zT%v^u?mR_;f4c&CA1xSxY#}x(+z}wk*Nh{{H!DmS(NbE>!v`i-65P)M!W|)7k*;a}H zNYJkLs%sm(2ob7d0kP1V^AZuM=@Ab!z}pjf7KjK&gS$o)h-K>b9*I@tj<<{ z6)CP)x`+o4;qe6>qIY1-@om?znvA%tMIp+u4e~2(Wg_xav_YcunACFSDHC`W-d|-~ z8M0!u92kl&n$Ne~G%Zq7&P$6N94tq&-$gKRQ%&bhD}=KVNw5xXC2gr`KA)6`VgilT z-m_(ZVItn{Vgb4U_Av%Cz&h%50yEaTfq^4-u;r~3R5JW1gN<73Wyir+V@z!Y6&*M_ zz2CyxK%|PDDlmjBd(iU!9AAdzAjde-#mc}@RZRfhO7^XK!3xN6zuTUF-Pl2Js>;3} z-acWw3aDDV56U|Xg-kC-HI|#p?b66zMuyOVzozA8evOI-xHQ{RIZT2)Su-Y89LU@= zsu$)L1QziuE@c+AM^>}20ebw_;G`60&Qm(c_x^X3>(>}M!8$DTAq|h_$n1kVxH3QQ zrJF1>mJ*IXb2Q3N7dOewI$dU*HraEQHmvd5=BP}_cXoPEQz)Fuj&bBe1f&pHN1FY>j!-=Ci z?v@TX(ly_+Ma>DWKcm->8P*EEKKB6h161nad-r$2Yi_de18L=GN($K#sahrY2&tdT zT@&WR$YCtgesUF&#fe+0)M6mYq$YuSvhm|S9m2N^W|%7x?jpp2}eX zGy4Yjs0F?irhEf>;CKL;vXr@;Yt(r0@+!a1fTzn}CKsAjHbX=mU5DSa7aBagEQT?D z9=U$8(35(E=!(C4-(L~ZXw5P$&2ng^NWm#Cpp!1(?-{WhVphNLk0wB<>Ax{xoA0T> zx3+EjnhFR!PWZq?JzCkm&MA`D0cvMS;OS(~G&|V-`^MviXt=9Q`ui(&; zNb@j?M0?Jjs3Lc{-!QfbU_I?NaZ#KIp>I1DkdiKG2teWm=6Y*!vLHIG<2RU#&&{&+ z-u9IJ%qTuo_Puw-+P7TEUkkD4&jN#Tw*rV+6F(<8vTRy>W&5imG$~hQ<~u_#=T(tf zdZE5Qo>O3L$y}w|jNIJQ>4H~>9utZ8kOjs#X(T{w5=2|6Iz)7xlcJBgvi8gp9&vkx zKaKn{VEQJ28D~&cq!B~Nbo8M35x?NcH?K&NgC_hazE~-N`7YW@evjrn^HCc%@p}m2 z@xXR|Q!^NirciZy!AVm%5-bvw1qqUZL#~UZA3r!$v6{U+ z&$e^*f4e!6;V(awEo(Pbyh1L2_BkANB2Ok_gn5eGde<$?{-ug*J!_;-4)V}BNTt?P z*{slWr7G?~XOGmCTWWGUZ!HmzCV+39;_EYwE4#IUrees&G@KDR>aWdbnm3Lwd7bQ4 z6!+Fi2tx!}l;FJuMT-PB?gQR&oB5qieDqlA*75Ep=O4;i3UT+rWr{U&6~<<{{3tYC zYV1F9Gb@eP;{h?0@C;^0+C2(aPE+CF_9;&*ma#W?Tkdn3U&u2WM+3)rG3`Z51_}8en;*hdJ5$y z|50j8iyr><$2A;qH;Lyq!+SKK*WXmG+reUBO7S@FW~yPZfC8qOY+d3Pat}b3xWBn-ykb>Wc(Hnl*?Qc^V&tMxF0ZWdW9u=AQgqS z1((xV#fuhyL~fuim53LPJh_LS^5hk2x?(v-7yASg*FmlL-J|W~GVlACD|N>4<1Nc2 zed>lf=|01ZTC+$6&s5>7wo6f)2hD71w*@WuRxsQu{=*9AHER0fSu=l8&1*vj*xRop zcLBqOXC4&pCvspFWoWHZ!e!M~wbEBkbDWJZ72rt&T0qz%qHTIXPgH4?Wgev_A6_7J)5!lqzP#H7E>yqde z2F-2o>@T#$-$f5DBs|#f?Pl#W(`0{AkB|465_r3N-0CYtu!byqJN8q=@l0uve3I6v zL}y_9GA5bw`nBL7N2B81j8{?vvPIW&2r*0>sf!W5U1iX5t#SN>GKm98U7nK&j#o5b z^}zs-7sH>(R9^3M)3CSfIDkozEK-O-;j0h5iHv9zE(Mcr>y#k*6tN8LX<&B(KlCn0 zO?1HwDb0Fh_O2|F^jst6qa@!p75R;;1`-l* z(LtG&*1n`Z;;grHd&&;36f6LPMk*K#c%YHflXAIHF-Ec>(WcHp+}md3Q^t9M4tcIZ zU+Lg~^Hk8O^QhjTlTM^9o8+Agl=5#1lsw&Q&EN!+j9+xJ7JcYW-vdlBI=9^~z9ST~ z?TWrUvh>@?hs~y*+vC31I<>E{NQ=JJg_7NtCnN>SYf!U{k7p`HK(=duV(&GW0K)_oIARMm&Ik zE~*`^R}`E#D*%rf-dBJGfOyT1>-srSsvuoYSZv95oE1DXBslcaCzq=%F`mI1#K06f zb__zn@B&^B!^;U0tD3`OIwm$=V5wJtW=n|4onN_!u{u&(yiq=n_5IR8@0_;2y2!Oo zI}JFd*RSk+8xq=I(wHZvct(&0qtZ5CTLGlk<4H8!9-r&~7Lei9d-qk^t{VVGhhZ3m z9i4y}J0ih;+L2ER6!8`bPVNyRc&hlJ+euNhTBqp@h)=wDV~_5+>>qNmi4IdFxm7A7 zwCS|nD#RtCnWzkO`o-1wGvt!9-jnk2UN6}g9?G=b$b6#X2XU>q61&`sllc!I>w_@) zqu6An3PllcwsqDC9TViZa8*j zkMlqO>wkIqSHZuGVJ2w#5GS-g5u(e{POCSf>t`ft&D% z^fUU*jpqYxWwrFpRQJtr#s7Ka|HB_cpWdEzfJztC{W!Aj;`s2}K&-4D-CN5z9y@z$!`B!Fjc`*_n4bGrd zZB%Ci!_-Akjl~ofx%={vcLzhodw^-(^1dW$x{KJr?fZ!nJD<`1ofrRb--`X-R7UgL z6LoSB08G>XvyWJjR~N^8r@$pN3#^ASDW9Rr=ZdKdilC>eb)JKF&bJv^nKaW7*J+~Y zc-pz${c=C|Vte$e-&5gk1rzwjgR4jkiGu2%ei)(K9S-`TMX4o|ZCweKKcp^Yo^qSE zvl%NgpVr%7Y)x_tC$k@uo6xk(e08UTTfM<3sLeh%fdM|>Dh{#vINCDVar}RC<6Tkt zk+Tp6Gcy{5ynKfxSY5tRZJpRmn>?oPwh1vqO}8@W6dR;{umLA&1;R^x*yYcggzAcM zg7QyS^wIAX#Xt7kaqeqqE*X2WwvENNLGMTIR1nFcB!4}VcQoT(r-}&%J`rosqanGZlO+*HZVal_## zf#b<0je$BCDjfs2!8c3yU)S$9|I#N?>>s}0d;HsCgRHEPo19m_z;yZ|P#LJ}cQMtD z$CvkJEUwP&6$3slrggq63kTr+a3DD$l%D|A{Wgj3fIi&&XJ=(+oA!4R??x&MVUVZp zHH|pctlDCC)LCUaJzKZ1ImlAXqfQ^?84hIlzxP%^d*kz9nzVL1@}S|`3YqeTtdXsy zHEWh4WKWjh;`ZP-q{mYIPiO4klW=`q{NG8)%p1gwIE-n_aBgY0sJmi8+o-G4n1)3W zD=~08(qmlK-yrx|g)W?blrY6+XQXc--Ai@prlr- zcJlc^`JCF>L-&l?h41ty$T!&5Q|9@bKMd`^f6c?4>vs1M9Kkc>Q?nwOts^5D)Lz@M zR#si?AOI;C-*!8xMJ==YgT@QY@#Y{m^= zCSROvL(T>pq;>HO(6}>?TlWg=3x{EVPeTbNQEyjK5=8S)LGP!(H%0D!@-gk&t2xor z-Bt<%KQg;euVG~yVfz`)0D^V}N4C042Vc$JWs9~@D)&>?ulseE7&Qjf4s|+i-M0II zXkE|UoR2q`+sBBoZxSU;*!K(jKMP}~l7AOI8@^a1k2AXT31YkQ1rv2f_fL~OBPhfx zt#7`jO<}slKSfOs<*l^;_5vvRT@-O%<{=as+BxJ}_ijG&W4e%E|7^p5LUFl$t@*%d zBEeM8K_byamRJ9;Kji=4KN#!7na-H}aQhi&?ME*%=m6g-u?CdanZTVe7pruRI}M`x z4UOpw$swa{4QLe9QQ#c=RO)IMR zx19G*eOVXIw`*g(u=r)MSS@3r4zGnXm5M7B^R+k>dveRBK=`y%>D$|t>v?;9$Pq)< zt$t_AL6hj$ZUcF%1S)OAdJ-mNT-*U>se=I z?}$m*HO~1`v&<6nYrow|_wtG3&Uev0%`(;vH5+UmF;F6*U{0+3C7)z4|BpylzWI|( z!^8+cP|%j-ewmqnT$dSBQoHc$zF-CP0$>scYUhU&KbLGzIEP+SQh)Tb!%FjaLVchJ zVc6ip!LPlEkWw~pEb4&K`qW7_)sZ}P;Dd~{lyhRMPcB~f!izib5se|))5#hkm9wRCdG@O?{NXi`| z;Y;`xsN577l=47=7SkL49!Qu3YAYDNuZ|OI77JUK4e$BcrU0YTqHc&o6-U5r@r$z@ zoki_oFx1&ivRCrmVu@Zy(!wH@sr1P}V4#4SMfE96{-^O`!u($W7Wa}VHJVzMI{^o^ z9N{WuEQ|mPpz;qbKHsWpFE4zW@*&8WM3>cKMlI3ZEz^E;B{?`!7?b?4$6DY7to9*W zqZCF*yIFNazVOGq*M;QYQ)}AD01tWo;HUTYHAuwApK>X?I^Qg_5}9g2T9K$&b#sCyvH*8sm=32{r37EUHX3&c~w?;Kk0UC=h<@idh9e zHa|q-o8{or9&oLl6WewYua@x;cQc=iwJf0i{@lu3h7}_ok-kdLVPpfF#iAsE^>U&9%Zd5fp2;=ya++hdSvs2d4!0Zj`KVtsX@z-V#JyGsz}JE# z?8fEST>X8=3Mjw>5<9v4F@^E=H<7jd--s;pS~xJ*1p?-~7BmF`<1efAzB}?TDU$84 zJPy0fF{yxU0Xx%+DZos$;^fCtOW@^H^(irch@GxkfNQqBMIf=9Mz(lFZErD6`rXQ0 zb71FwufaQO3H*0LjNDW#SEs%12E|1?sEg&EHa@wtF965DfxTZ{+eoWEO1Y!j?vlRK zoEr7;^0Xk*Hck6!WAGs>(~p{~2EXe$E(74|!uIl*b#!2Ah^B4%598|P=HIz~+v~rD z)mpgiI!9FL`pD`!u;oIST-^$%IT@2$>sY&-H^f&h%_z2yu65gGGrVnly-Tw|?2BNn zourmsp;uoLyQV#gaU*u`Gbxyj?f7iK;O6p`ivIT*6-daRg5555>6(|=%?wsTy|D32 zZIy8_)McH7&gxFj1dEc2MyvsOu2OZx^9;GFL{q?rBOxF;nEy2qW8k~-O6Kpgk+kOS z0C09e43F+Lr{(MC*Bph*(@3@j^U)Tc3}v*VO7N^aC3_50(74|4BV+LooIgw zL@2#(0UX;Cm3wqROhs8S^1NZ5#qp1?(^IfOUVt)Fqcw+#VuX=Gt8l={%+)0w9aK@_ z+JtB`Up_T}Z9VL~#_%q1EZ;rX6o2?Ke5HE(i-RMMoYYTbpsy-|AJkd(~Q^j>3Vi_M+Yzt09>~vI_-Guy3MPv z3}ml~vMMQXu39&c)c$IwZ__B>F*?=nPAAdAl9vFF+N{!d$0yOTMCyL~#BMdDs#bCm z_XPs6Ix{Lwu};U5U;PdT~C!DChr(3&ziK&#W9KDJyb)=0Y!1@uD4xSaRz1kK` zus3qxXV|gw9KWQQ&e{UtP*TMse62Kl^NL)W8LqF0ssNfz6_Pz!_P+M!7E^u&T8^tE zj`}U8B+lFVK>d2Rmx3VBIO*M+>)~ihR>O5*Je%sV>Kpa)6(iO*o?>zWRavh$i3Nki zY0sj}+z@l;6e$DkrPT$bTsf>jOKysf3r&`7eaik*7xXaXmq!1Os!qhQbSSz07%U(Tq^6a;Uw47|#e~lNsRFw3t+S}IkwEZ%>#^! z+CTmER{&E;cLLR7FeqZq1hGttLgdS7aY-}pG}eAAP&~%0B6D<&EqcdsH0>Yewh@0> z07JtBT<^ewmj@BRd`79uK|h_{9zGdh`e%v>_D7+Y0S-_+)4QDv0>O=fln{r@78xi* z*_2z1J$Xv#!qmn2Umec#EmwLvCTl5$9ZqTwT)Du&;N->Ew+*Xlcit>C_s}ZvH7TvTd$``dBG*)H=gg`O4v(H`!O^?t72k7Ha8a2C_Vv+ne*FXwMnn2Ts2U z`^7A^zb8KIadtk8f$wyo!)k{Lt$l4czjG)P`yOnbJx|W7SRa_*>&m>`96|VA8E#9> z+S6ZhUouB+t;~pjf8!TPU9qjf!x+FKZ<8I$uEO)0N|D}hq05yH6gBx4P2yNcf`_-? z{pEhKNg`si`26wy1^EF9Yn@@p_;y#dp`D%mQSnSzT=8-Ryngmddt2I*iHC=a`RrW^Ul7O4d zaW{QOl`L09p*eGnS+efIdhpD_-kWaqb}WuuawKmaS0{nonB8T9#fpsj#hA)<9ZWSI z;e@36Or>=?gSC=4b56M!VBS9TR~A{K&4753GWsqpI5Df^Je#yt?X&Q#&DknXs#9r) zm04J$l?}}FT9nE>>mk;Pz7vjODPK*6iw%CT{qYGhR_&(9dTPxvKscMa#!CwnNp9du=~_(AN8S5`WA1|1+S6q~I2$?iU!) zO}T&|67~U(r^DIF(fOCp{+?z`&=fp-e`Ne#>%YF zs>o#2oD1Zj?+IzYj|1f&&y4?)^lW^RX&SVaxHcjw66Dq$ge=t6WrWYRi2pkJZW)*5 zyg_fG=188C%CiTF_<5jrB5vnq=~@E&IqU}go^e*c{OP!>4!6&@StIyJxva|^QB}5r z=`qibHeVwoJ>M&`AlE~};jO%Z-2}mpd_nP(te-9J=GV7Fe_-wQMfY=3p0RnU zh($haH7c?ihg)Kv|03k>PPp>rzYt^OVl;$0`c_}{kRoC0-J5oC6nG6hLoXDtYa%J7 zm*0Ks{kxWfZF63w&$^~QGS!og1LL?T%l7$+nu}?qCZx_4e@v{3(%w<$#a6c)kd8x2 zw;6pUb_RR&m6?xf{BR1f#qB6JnSTr7S~91jS}cApN=( z+n1tKD@xgl;lyZ%wxivs3z@$TOBc>5+#pUko+p&Y2UzusM zH72rExzA}or7?CfP9%`D!41X5Hq^y*Va`&G|{u$Q*Bsj>YPs|B^%zUeP4>6zvn#feEaO01# z4R3v}v?*;@L{Q?{CL*;{e}AdhHN+!B_cI2CM~n9?*=$D4&;hb7%El&`a#7S@)^gT@=5i{t4X;$QxNmp#v$8w?{~K5 zVIhL!8Pe|=ia4^TJ6|k9jVKbGFo;3SRmU6s@l;IChPxW<5)$bdfnxMMQ4ajvn`67# ziIsigPujeq@t4(Tel`t#1n+?53}K~E9_uWlWg{41p&2TuB}x;cE< znlUFrGwN51B=*~qO&_rzzE-xM3rQCC@RPhSq&sllvDnY60kexTo$1bAO-vsWm)XDQFE zR%y^`rW}Mt_m{yDfvlCUC=_acJ=p6mn^`pwyz|B6q`ndGaE?m<>a_JCA1(b`$)pz+ zlQ%l_@@$A2EkFojL~o3N~faHITSPwnHTy>vevbjQS3TE8ee!D zbSPOzWU8m9?ct0}X&$RTM68*-lUm-`cY?5?g0(ENrs+2gnS$FGJQNU4wu_l8 zWIrt`TW^-rcC<&+>aa95;fdjy@HzBk;vUb{#Xj5S{M|q2qA{S=W}lGgSi|0!PDMX! zk4{kh<-wh9TLmt1Ic4CAt1LBXv+g~DjC#ElCCtyK0rqHVa6Fd`xmpP%%^5n=INA}R z=(BE8S=GqT9jD81{0$jX2Z`?O`C<&2)r?n2$J+3EwV!BCQ-s2HqGqaToIEbPNm-UC z5}isWaepz_r??R-EPj24DmDx2&X26pj`awVU=)wR5cM*NOg5!S-6{Di+ST!z;;OB( zXO;tsM!@h}6=R#10xt@fup=fcJCA8)2^({p$x&t`2hlg>%bkCpY#}fH`a}GbPOEVe z;Eprm>pW#uR#>hf=dn#J{(C0yKZA)KZMyf8%Q~Il%5|@Q%|+5q$20C%%my7Q5u0%L z5g1tDkU*_?Pjl2fw0~z0Y z73!9DCGu&%JbCZqa=NIJr}Bgj%PmCkWmQ)^yHbxJYgq*kWSr@$oi2fTeWaS=Aq8Kj zQhCE6gY6vZ=|a%y;L~6pJQT3=@m5r4Ub%8T`FE>S3^rA1_lE?&_sLjll3b^Q!p*V4 zZ?s83HBQIY`^ddgWixhxRlz>2it1VNxq0u`gy`Px6A=d1qjTb!<=0)F#S>KINjG&^;DbhCPKA@lt+V@s#{(&I%K1p_dm~N4iq({@ zu>NH1+!ikpu&?ktEX@k{iu78sm^xO|mm>)&m_|DLFZl;9(Bs%GJgWGn$f3Hp+*HM8 z6SmRq`7+k~nmYlV24_RNTb0E)+4b%I+ZQbpT0pX)OpJe_Q{n=^?15QW3+|6vV0ck% z(D@OM_|6}vTCrDuqt&R_9txEvl+>>354xihLN`P<%tyaxOF>^WPaSC_ZONDQG=SzN z$r61kxi~@(NniG>7aQ=sdpTYpu@eX+sz*G=Is8*KWx;-e#~d~4?*w;atISz8;APsO zJ*`QmHe6ihHdQtoeG{745}iRd@2R3eYi=jIq5=plGLFqe?HzEca`}!Dn`2|Po1AM| z4V!7?!G>)gDOQ?JlZ|PAqo+^cf(IIfy}jT`ea*n$uIQvK(_;`)IvTl?22pik8#8=I zMJv?GK(cCmsd0p&OguHKbRk6W(uRwI1&MB<=Jg`rC#LOQ;kZ4MA2XCC*R9$5nh7ag zeg1mI(d5P(2N%wh8TYsfu8QI=%ERV5HEDnVG=)vax#nApvVdL;}N20 zeZUFkS$eoSvrUqzRce)Xa3*;5ynEqmfzYu^FBb*Wx z1yClU7(@(POEc<6QfLmGuR`T3O_l6TruiN9dve;HD*5c^J$!1;j{uL(vZ~i?ripyc zMH=CP?r(>l3eO%asar@EpMR|~;5&yF>ksWvgJ0wF*hr1v~QTy>5ZP>89Pebc8NGoSlK(#!)|ZF=CmFK3XRVTbQ%mw zMdcdWnNYl9jhto+cEjGl9j3*HGQ$M6`7&`Vxo|tH@Gv05%Zda&e*9c><~l@cCOV1G zO1Vr0siay7JFfb;Z89ZDvXW}`$P zCj6Elo6!UxV6jmO^_t-EZ2?i)2|wBkTgkKe1BOF&M>Vs3II4~|qUWA!MHzG|S6V`S zn#5;IOpeV#$=>ECwn+RDLiNkulpYa5M~`SBubuOq72XI4BuAu6vR0_3Nt4jElG#Ix z#jo;I3nlK-KiBOU1V^-zJFBu(Q(F9V%ll*AC`{~k>}5}@2TgI{gxBW!e&_^?*|BfB ziAcvC742#1-z`6{q`ndke`<(z>e@?AV|AiI>9FHFDANnF=!$3A+4|-E%mbevf4FWD zd z(1x;>tel{ZYMCHgHS%C!yCShCi-=meyf)1xxd+#~BZ{GIHs-AAyyB1duPDRsqO<$I z6`kEBqzZ0R{TXrfn3DW+C-cROA+M8B$Uk@C0MBz$Q$h@2*bzlrO%Im7$3yBRxuKEg?vRwUA*E0P{5TKNoPq}lj=-nhedjNz_akWJOv-6{*gePr?)QrmjY*h_su`)1hKWLcJK3=;;8{l5iKteeU>4poB8=*xOlHcD&d4 z`MF*zD_MfirHu zj?On|63u%CQ?^V}alCI5H-@AsU-}cxPGv1UP)Z$TmofS-UE%;-#DJK*rmq0@L^KE^ z|0N>HS)?NpM8Xjaf;0{}0Wd%45tk`iT1!)jKKK<+()J8o?E#_&sO&x7W6e!k_W(l~ zaTJza4>&CB()s!3zg)E!9TN+0*eknDRw@mTs%lbb<*S`W85S;|9fWe2i~yIy2teN) zD0;-Q_1%18 z*F5EyIWz5M4FfJ#QUe&3({ahPnk_}GA`D`I6<4N=Jil5td8fmzh3n-5YjYa(St*xBJs2;>5NT%q0Vq+n3cXt1J-ec6GAQ`-^W0aF}_-E`EBs z+@7{om}36BaZPOXhbN6Vr}kHMyc6woNz~F?wd}t_3-hIIP;KZktiHeWY6NuSjDne* zCx{UBMb=>~nU{yVFXGvccXcMdj2n-n_a0iUBNtO3PM4GRHthx5uq70%6py#2*a^^i zB^D^|-Pe84|BDx%pgm?Cy$jEK)2bT5O+7z;O-eit59N<*JW$1HDsi~b;aUBOFW`)w zSNqbSvcw$TKDDENPPeetVqdXY7ane7{@kljYo>6D)5-QHs7(^6w)g&*nj zPjZCuxn0kvj@fL_OizK^fY?lBX9*gZw+L^*bU9{dGg|fTlKMdH861rop=M11c^#z_nCqAT2zu^%GOxsWKe9Q=@J z-k_GhbB0bf(m$zz|f z_X|p(eS%y%R-{f#mmcP5IeDGU(?KdFyR|ruO2vz0{8Ni4Vl=^m0%!unkq0Wh8csc0(6_PktY8sBS_fY(e$A=dm6k~hfqew+DMyYtFK+SDFLzn7yqg?<@T$B5@@CVQi zx3sTIs$Y$akcqx7-BJS1BxkfJ$CN6M1hvhicZ#=wOoS!>&$#?I@#cmEa&go6Tu z^jPb1zPiN3l#r*Kp=5`H$NagEiSw<|iO2mJZWT1)}H$J*Yx9j`gX zrd@aIOb+^^#~`fF^9F~#_G$a&b@%=wCf4d_ij>Can631LTFc8sn{N`O|le z0mb~b6HKQX-ILyj(IEm0=JrSNNa_1l%i(v+CYeMjTqEPjFrXR){>)?7C!bn5AV<)6 zKCOFCu}o{P0gcfGU+`#5FI6fNZvbW{c@5ucEz5V?s0(I;g!iQK-S&n@wa!^Tb}W_n_qat4l$*tb{%=jrWX(e1M9_>tf^QZWHzG?2#xj37%E+QQygEx%N;+ni+9b^}O5d};{q zSw?b7vmUm=*KvQObn5J$oA28GYX|aAzbnpdECp!u#VRTYuYe&|1@%>PIA6@WmjdZf ztB&??W_8Ylqed>`&IxR;l|@Pq{3ny}1r{?&(U&`ZNOTb*NS!x*C3FH0zdxL#zKQ~9hedL&_z_tnQ-h;_Y^Kabm-bh-gj?#VjkK9R#Y%9 z_b=_j>yYhVhr;jq$9w8_(u|V)_8b3+4x~P4KO(iuOi74N&vQPeB?7d}JR1e!NE)CLxqLPA0i9<-2NGYix-Q6WQq?CZP zbeDj1cMOemcegYQ3Ujd*8M0wbnhqF`=X|dU68% zDw%`3@)U?mmDwMSpW=FxPVaw|Iu50L8M_&F?%% z;LnGArog7wJL(8>D=MsY8+?5=U-}vPo}uqh2(q^UKoiGH=;X=V-mkW|hgiEpv{_8k zz?ee3^qTg69zt(iV9$TbfJgtQaPrpXFS~E4Z|{n8T(jm&xQPB(d8qn}eKtI z-WOe-A=NeOb#q=+C?(H(uep}RL0%!=dd3HuWq*2uB2vL`|LKRO^3R8sbo8mKKmAs5 zGv9n$R6F_sFQto>Gm}z-zn1pTjHdeE2MHMRL~oFlE~Nv>kPqCpOSC}al!KLL#PT<+ zXQ9Wg|7lq3{rzq9ZEl!5PysaVbfvQmPRZB6>i6^3liO$tzR&NtH|ud$e`e#w%S$?E zS-WKIFGF+{T%-u)38K)w|LiIsyu>8@>(l+;ZLU!s1V#O=KI|p}#-lWLpF5?8Ko1<^ zQgv-Hv#f_nO9AZg){TdM@Aih=DW0SY8TM#;_V9Y`p6L_F0l%Z8HAVQ}Z$6MtY%B3U zd*%cTfB)?t$&~*No%b*P2aU_0jQswe*{^r)xhF4%{ChgtVw68+aUw2m>c78e`-}Sj zvuz9ezqV})|FO3E`9CeyOZl-trInc1?GsP`exKIn=KuZT`Zp2&*OmeMe;(EU>nr=8 zNA>@DLjPOE{XdWDe;(DJ-Z%1pa@Bvg=KTNvQLWLJxg6hMfOi8CU706hxB?*D@vDNNDIhZK1Q1)`9Z|>QYUc&t7kI@R2!~a%jx3y#YXtM z^ZlOpqn4%bXUkZuNP+sbd?50wgsL`+aJllrkD8-M#l8sVm<^2;Y3- zcR6_F?tJ^|V7~f0M~+Nq2p#^O{Q|;*o8Ve?06Uy8h zc#ZQTq4f?$MW%U+>OA4rxMR%lRR7odWl!rJrH@#7y1@6NX+_oOz_`u9bnB?C2;pZy zuly*gJQ48g^XUhL&3ui;uzmUcTtS##z}ZM}*+BDd2?@;!q1UX*rY}5nFdax@L0&;< zz(d2Vi)n|?x6|Xv)%9t7=zLp&jof7Qxh*SA6M#i0_DyWpqd>1Fo+#EhzTX*buB3=& z)y(C+4Zy!6T;@=}oR$#UE4uBazdWTb?-C|GLK#C5DC{0xV@)3Pu%2&_1fTek5IqxvcC$;2h>eXH&75IRFA4-6Z7^a#SIIV5u1gVn7giar=!9Wei&3C z`m)M(G$qJdtxd?PmdX>|SBi6@vA*PNcUIZ9?;Ao((2xwI-uqB}(P`Uytlp5EQ%38B zPe;<5oeJg6Kx*$87TiaX=1ScXF&NZr6GZ?a_k1m*1>oJ&yC5M|$A}~EU$LsD9-SX+ z<>kO73UIobi@-IZH~f00le`*D#+8YscXFr*g{VN)uLR%kZ}xZohi|CM78K<^kx@=U z^VrR{ms|x>RtclA-hzd44oA9>4M!VB2u<}iz;rb)Ytq<_*;v$!lrP<3hYSFPCZAn@ zg#;}l@GUlp9S}JL20Q>2NxjA+CmiZH&9K=?qHr9R_VA#eN8d)42_@NYNPkkOY4{3aE8GTrFJ?LW#r>1gxgbX?~ZQ`W+ z9L5i|8-J|CVr&3t=uO1;-Z=wPV-d1Fp}|#GNL+QbvsvEx%(}JF);^d&+SSKxT&_A+ zbY|wh>)1&G?%gNAh98aa57UEVwD0LjA z2LNR6`g^E@?_b%G@t&wNSWHSTMk%kGedHuE4jlCOxI&7-!Sk;JuMy7Q_P^rC2ud)3 z%-`GoFk^fdI1Y?#dej*2w9hc8@XCsV|B4!axWUhQN6F^*y0w7>2j>F;(bc#TW7Qdv2xvrghr0q!$ zWi}?pRz#De5zxIW3<+_kfoIrwR~3tvI3Uw>9m2w;;Y={l&-vwL0Y|iPK#g_b2ARhKtdB zDv*7s9jTV4kT;yb%BoR4eX^Q=R`V4r?-&2Ixsf)GtBo<`UI$+eo6q>FR*f1|-bcwq zS{+On$p;fN?a$;7H{sMJ<)!352;=)dUN=6sw0}RXF>NeT-ke6vw_~+;xCc(}p?{VD;{;Gz-7NPxX3jPr|D2^DdbZA{9v9XptME0gvp~?%W)Lr^Vz5rA{X~akxnvf z5&_lq(Et6~4d6|`bcoSB1Aul(=c6T7j8~9PfZi-at9Yz0?!fFo(PPP%r@GlfnHe}x zlTH>@QtTo1IIO}Tt~j!9-D&z^6wnB00x`wPqFNl4USk`dB~cQEMO~+=$+jL?K2z-w zZ*}pTME!QOr^0^Sgbx;@^xWmpA*Uiqn#nfpG<)M+XX&+96c2#g+d@uTmYmjb7~TO} zUkrKb)oPSXeVfG0~pQZGO#ec$2%-q?Ms+Q-%RXegKsMM|xu+1$`Atp0Q< z5(K%@@KUipHJJQb`pL;txefJW-nY}anN!3ObSpVTIr&DT1$i#udB-_dztUpVx?r|- zYz~hXt28l{6$=)XV#r`kb+hOub$X$>ACc$zOt_ERR=Dpx%u4CytxNaq^N6JIY`56G z&4J1x@tB`&8^2Qm-+AAVhZr5q5pDwQ6>-&xWeDk1Q@Dt*`zK9(ffo>AIp^!EWT1yh z1{#X+pQF=pB({CW-%D11(g?pl5hfrwQ$A-t_6K6^31zrzV zlPvG&Gwm|(7+=>njvfH!mpyfRUf1kg+ePXQ#_PGm#@m1qfblNBvbZ)b?xQBHaDhW! z_)J;AaIFVZ$da2T9M}E)`W0EBrTLA_t2hn|jY!x1iyXrmmhCHl*JP1-TFa@TyHfQ$ zL6UL!vOdS4t0M8ru5@s-Ulpj<fok+iRJUx;2jaij{{~I%w2oolk2#PGXz#b1`}@FXn9p=v7M`V9jdE zPP#7bI6XZUl0BTKq4^YA_9KCf+sM3M zNw1TIN_^#n5T`F#8`nMjtw*Iu=r+}o6(PP&J>S`SrueWxWf+4SDD~GV>I^T+aB%|d zG2J96+qV1lm%8cZlHXj4Rc6EtNwK<>!UoO4#p*lOfDs3bUm)gpr)|lHCw(O4>C=Mlfe@zC)IZ2Ef#RnZkrABNqND+_WufI(y;t9 zFT@(dWkOIHM3@6dxc^|WV%TzT)h{K3>6kwiWMWa<7HP`dHQ{#)4knc>-<1F~%&;B^ zH|?|eCBG)ZF{Me|Rqg}+9Wu40IwJGXr|jbL{3qf^B|rVdmV$`L^;*sMElH~1{c5B} zOY%!Q%Eru_>TMpRnrhU2!vBsr21G(UsnS9hDyhzn)snO{{*KD+XW|3|_G-52(g7aC zEFF0UbU2$`aH)qK6-o z$QDwW|MX%xQ`fbL!zhvzq%#4sn6S2G)mfYB00^DZNVh0jVg_ne=4T^SOzUSM;dzbl zDeGxVQdGvO%`!4cqJ}$qA8Xpl+?SDq&1Yzv7yCUpTElc1@&i6HR=E8oee#I)w-hBA&_)Ixn|=q z!%uEtt;;wM=r4LNaH2i?okJhEWOzF7&UC|5B;8&}r33~{;2vm_zXsSn9y>-oNTJP* z=*`JdP+`0i#)9?9%PR9}^$&1t2NO9J5E)&UV|ei$x%`wch`K+9iHR<-d1L?EML=1O zWU-qYd8b^nn{(Jq#KW>3o)!ApoAGm5L|LX7I{-D!m7jYE6}kYOYh$*%9J81Ug=m&T$}i`^DT+L+B9|Vj3kPjSj>MPnPiOAjC0vZU36y6Y5hCZ2 z3-<1Qb?AWv0!JY6h15nzmjK@-Bt~$PY01-r?QrVD8_%f1TnP+L((!q2zwQ|91CrNq zFV$rmoDXTpbXL3vImWfGxAtYJ30ogu#S}H=u@Cb=5VCn+xOaPBid^w5Fz{G-=!y&R z?TFeT<`VS3bD9#EDz!uOJrxP)+Ygs(<71e#Y1zVxMQox8Qq#V7*+TZ*)A0ocCy*8+ zLWR+9)Y5!Q6sAd8*WIlht=4OE8or7Z*)D}7BAWXF&fnBqBP*U#OVQzv`p9Q#IlhOh zgXcsC9&fz93mpw`r~Phrq#efnCD49_Pu=fFSWVQ_3dg;`&RKR4q6ZkUUX6xJk5s5d zrx_LkRZDbKotfKKfotB(xMTy7;|^PAa)@wAIpdu%ypy8k$0!8%EtkvF9D1jk)xTuy zDGS0|E=IDo5w9u$!gMaU8P8|k!PD)XGR$5rp|uzMHBfJR(Pc|&ref5D4zJ*0(BfRx z(aVs@8DHI2A*($Gs^}%ps}Q=SlchogrKyd(*NWZ~wvpTVk>G7gp5r~+Ig!g4*@TM* zhj*Pv+AgnS0M8#ut5eN#0`T%D)m-fF_r*{w4q4tzpQw)Cwca z7B=y#!gvTaT_B+|R+DCpMJa@%dm3uH+U4(-nPSC8{R1I9FLu~~5JsZn>wG?@U#wle zB|3TVbSXr#3HsetsQLn*0DSs2@EAem`suhUlVpewVa2BazSo9(S{ehV5Ug6q{SioI z_2P~SE_Le8SQ~50KrIR}nLRZB91jRA10H#;Jod2nOmfS#D~9f}+r6Q-?hZZTVT&MJ zOl>?LK_iiG$SWYbJa(*-Yd&87;ji@gx#Z7JnJIW)`Q9%+Myh)qWjqh}gNM%_HA#i| ze^3PxQZc%oJhWxQ;zjBR@?C!Ux!H!5+$Tm@JF)bZ%?|J5uR8z^!dxmS-%6H6WG#V< zhKV1y9lO$r^LS$@o+siaiM*!u(Yy}DjH(TUrNc1JCF4QQ*G+PJXQqhz!2Q^SbN??+ z=&p(U+khURumH>B3E?mU_9Wm*vDBP4boQOp34G)kA&xh-h9R2Pw4E<_6g?a(bL+XrVde@ydR9wkOh>OqbG#dm96E;r;HVM8Z z{BDSl0i8!PPpgIo`H~&wZleULK(~Z+&-ZV%kDcl`&4M`a!8>4Gg^%SE0nuh^S`Hb! zb0OTC+kw%5RfW>ZYh$QdeYL*fB9H#JPbtf0V!Dh94~Uj%TM;V5RN5PWJXwm%I*uf2 zg!@_CrTv})F=`uUjB4HN_zP^Wb^P$MLBTw|4${pgVRNfar-6a#PW$YQ@<-ON{x{fc zcH#RA^_nf0=bG@QHZmwlH-YNX$hFWxCSglsFB)nouKXL#zMQ6h8r`AGKu>_p@P^8B z!;3*f3u4mvR<}+U_8uUzO>vKqt)?uuAjx9kwhO1EZ%0+7@WXzGq=?HA(0y%Y96a=% zXr{`1b)#socd{-iS@t@@CsllRqT7kse165IakVmcFY4Iu3nR-m84=Eb1U@N%q*}t0 zxSa*|6abiBF}!X~V6}mMp;)z>ShS@%yU6iCKqW7=sNl8!?YBKXW!7Q#_0Z%eEtY4w zlx%iE9&jYQ;bk2}OavPApl8|1)Wk=rXnS_y()L|Q+`;1KHr_O}aK-CYzHuBt;866) z7ZGec;4pc_A06)g^wi_Daag?GYBE~g3!Cfc(_5T{>{T5q*Va9x zc+&tM?d&5pDGe)RWsqd8)4{X7`Cik}&GGsac4jm<%-V7Kpl|*-tfAEPqV+UomXJn{ z^QKZd#)tn6TdAej@W-I_Y3gowLfaZF$SKOs4DY|>cMnKG8|1!3H#oh4NHXfsG=I>`E#ti_=f2-v|V<^^-Rp2#h)m zNiGV2Ip?3&(0@+%Utdy(rmct(dR%<{vP~n%ke-2#&}A;2x%i1w!|ptiBcrgG4hHUg zVikxCae5@QGL3TY{qWP(%05&yQ1s{wymzt+_z;!O&F{>KeRpVXLe^-aQ{|C7WU7WE%s`Blc5&$a+a9bLoRYf2 z6LEE!(20*V%*Akz0fe z=WizlAg5mrNa|`rpN!2K|^GRK>C^X4_nNmmB{qzEa74N$TqRB zI~~Wy$DF$oyy!~ut?@aJL*13+HvPydhxFo?#p;Va1kACJNyZ(x!c7B#s$aUp4o8$8 z>X$wCV}1NXlx-b>KhS}|!(<>{iEAE@wx0aNjIrp}-5(c7ZJnd7MK^Scbl*EG+egp4 z_VW9l(2%uy;IapzKN&Omy@CSpT^&_#!{s2l4j-0|x}u{Ao31ZSk3@EjyXeQnjq7uTZAWg24(k3P^cnv+Yb6oo5dS{V`o(y6{sj{v;luLjx@yz*>No8LNHy?$E;MP*vNj6e>I3={B7_vcGx{s!BU~c>TJXs z_B`7|U7#`R`%ZsksH}j&?zTbHY<0s974zyg{tEHYm5;3kg?X<2>fkO$ESWi9Pd&+c35(l;yrsyJC60#F&1ilS|-X|C5EZ4L2QJnP&vuAd=DMmF!qc@@w$|%~_>U%cr$_ z$@!2B)u-23Prv>8b+?P;N(kLGgPhB)D8pMyB*+p+hXMg6YPZSRmIF=``;C|bX`fZxXZH#Fwym^dX zrtU4HR`#7)hZ{j_)vNW`U%ljP_p~d|S+4@H&sXVfk;%3baN@d>!%s^0n~{fzv5(D; zoid_L9?8%v%g+)%{Y|-Vrw5%e$~$VM6B0qTIy>3uiCYs1uJ!(2GsKTONH%z5UGmbM zA8_!s?&0fXb)3rfu5~#=LR`0CLn^6rb+6{u{^ouZm#XIJJlUSC^Nrb}*+s>y8K(wY zvDs(Bo(=9G)tB??;B5zVuB!~*n^rO7;)Yrady+W@oelkOTq(=gM9K0W!9E{IJ$dGc z=aj0L#tfQUx{@U9?h&G62e~;PWewX(jRfSsZge@y7P`1mW3_wCa!5Y*m` zt7}NbW{rmjJNO<0r!SUH@!(FbiNS8&e6wpOfjJcq4)~ETbJ5d;J;ToqMBb4R!$%yp zo6>p(vJ*>|onBDFUvFJ*xugf9&K59KUjN>PGd*+jrl$errb(n&&RyZBzw1{yShykI zCK}2SN8K%yEWRq4mJt-FvYb}by`^w>-7+r}RO2^r9gmJ8!J3{Wx0o6q{yKwM8lzBU zMfFV5M~WO<{rEa)WUV}KvWHdXehNXF{LU{Ew*`*M=+>%WUT zBL7i3{KyJDnfcMmM5SRK^hf*|{Sx&^bv3uyJvPwa*_xx1Y0Fu&A2_JU2(xKcO1511 z?YuLSS#9|gEs9=7x3n4hhNjAFRn|o}3wOxxb$HLT^@ZZ>F#WX@wRZvJtYal{x$48# zO60QMYxIZ8lNrzZzO?yIGlY%OGTXN3Z{KaWhGVwL87_ok=KE@+B}YotxjP%z&oy&G zo+g~OJa)E9I6DLKx>E+^>cLbTU&0(+ZSXlFkwgH93XW&0c%I$l_^Q4lrz z2LaXKu;V>^jh}hqkHfdIWT}Ig*a!8dMCJt0wY-+VG#dqgCxsAOSo{M^RY?3!SAjXt z97k@Bc#`XAokf>Gq7^TiyKA}67f>wnYU56T5tU_Jy$zD>4G;$AkGhhdIu~!$d^s`) zukb(6-DF2pJ9lw=jV}aD21LK`0x~dK*Akg^%`hRSu4sR5E6&L=?Pj?Yr+CfMdIRh? z9iZ?VOFw^I&xv63`u4FEo~=w7{&S%v!z|XogH0LwbiqaHMP<{^f`4tZuro>#5BH!H%V@NDb| zWEUbe0?Tzy}T`9L(C93O}m3Ek{7;>VlL z;pgEV@dcg1Y+Kls+?(uab9Xg@1w}Uqk7!5V3T{8K(qq;siN@HJ%DNKWghx1 zrN_LAr0gw(yHp{!3_C4%<>NTc)|uh)!p3yZ9RIe}Wb+{6xh$C=uq_YE)|lMe4DcD| zGo$rluYQ`V=J2)xumG4ZVtUtoF&VnEj*-^hjPf?BfrlsbSTb=E5U=BSvI(xNY8>Kk zO*9fEE_v&AdnS?$n|KOvcX*A%iVV-w1LseD`MhEWE*N&Zw~!UlprZ?_F(0+kil;J8 zB}dT=El;1_lb}EkJ(|d${{U9FRxDkQSXL`&jQ)2yWhCl94BU($`$QmPz0y*rp8X?E zc!GMoF!2_L(f6$`*Uva+1$ozx{$!Hak3aOZHauw%UX+o9B(aYi!edLH@WdH!i$1kS z!1w6y&}_HII-Tw7xSbd*@vPxNrY%M=5?F!~%PKB_Ig+!sI>=kY^bTdmx8NUaSe8ej zdNw@$U&%;_bT04*_w_%D}OnNLWJk>2l6{1upigs zVpXlL$UzcBfRp(ckKk*)y;yh#-&)9HYEdEgd?bT8VylnuD{ZfAhCVq~Hbr8(?8rUr zj|Vs+CUswV)_MDhN?Qf)_k~0Twc>!jAHT)%i@!h}KYPCQWBqz9WN@0$)ophy-}42C zd8nA_E>6tjVYhO=eGdO#Dk-QMivMujODi(>e)uI?cC3 zzCq@MgXcc^oU#$>OM+h}xg`33Ti%R4A35_>Ffvx_I2EG=#f;ejje7ZAsfQuQI$g zdjI%Q)Dp5|i-3C|i{soa|4-U1ZFsad8AQY2uVgoO1e2{p8;?N2Sz$43l0jiXy(WVcvA9_S2aNQ+_q; zH|d`$tR8wUhnR<(==V*2*@4g`EZ3Pw-(aJUW727(FQmsZYQOhhK(#dn@bmMB8+*TY zheuQ;rAwEPXS;x#Lv&9p*#m}|eFl9_ZBzzLWWg*|)3)E>>5$br_hF9Vfx_y^dA-+@ zVI{FxqAJCJ&+Vp+S$0yZBVCwA25fgMh(?{2;MO8z)W^&$H6a$w1tWP(rd4 zt63oHOf?s~>n<3Qv8w1C4$FDoA zPcAwAR8wLJe8xGrxY3Rzy-?so0U{WF?#+gu#}8kAwHI88AR3^Ng0L)vw>}eYdOq2rDG@b*U7d6zZ_>YxQv$u zVMinL9XjnM{4=J0r!-;OqfIz`fkJ@&%U-uv@BArk6KVoT9VsqUy}Lk5?OpK^SDH=j zpo=vqQ%gCf?M>1mV1gYKxC= z+bG2r@StU`+fnvdE82<;Yx56y`G$W~kc1Cxhsk^5Yckm9k~(-r1~NQW#MHlF0r}sr zWMiesRTK?UUGZ-_$?EFgY3mJn)%3vXq>%8J`+h0F{l7W2ue3?v6 z2z)Uo1Ex*xV8R(h*5LW&Dsa%yydZ8c!Bq$1{?(yb8|esGD7{##S~3`ud`}FS zTYWa?Z}xbb=B)^qvf@F~Wb;Q4<`5$p5lUkZRFSIw33#OkF!~XsS^UsEiCxD|cf1i+RlWpG-b=WunDrcl<=(|{mAdQeh7xhGr-X*2^G)%g^2LNL2=2!|ccN7j}b+E(IS=A#gG(SsstG;H7<@xZSW}r+ZXzv8Dswp zCQ;^}JF#rVq`9hJuxPefmc<`9QT%KUHVTf z1QA1$U4ODovU932bZ`931QPgJ^YT1*ua-)M`-oltWZL#Z4WidAgN^Pw+B%_UEFC{F zo&!F(EEd@8x%aCRhprlEkQvZLhIzcW4Q5RU`rLH|S6vyX%d4mz8cM^cOTS$_rSdC# z94c}AZTn1eIQ@=r$z)r`r{go8N6sS?rtxHnX{8)5>tQkq>}nA%@(aF<%*eyhsB~@~ zNaIv+K3vF&2Df3W@eEl!bZ0mh@+xeC*@k1=9{NNJF2d5pq zN;9QQcHg$Z169Olv^Bq+jA3VvE5Is(?UZfVSXRO}pLLaQD;w5NcZIu*=E41f|F?+k z(5GJSlK|mIoZN|2)lIsi?H#m&th5=!FLn0kLjAI?k+53r0_wrzx{2#m#OCPM6N5a) zQBG^_dF3MOxuzERbbeujZ+;ohzvUK8QeU`hSM@FmZb$FH2X1&1Ee8R|!A;D~_XH@& zD?|3Gs26bIA9;9k;*qv#@THbt^O$7>tIB|V84R}SgQr zTiUiF^7(0Lh&#P4)jHoWl}BB`fNtc+p-YFn_vh>3u%r+*Z$v8mhr=|L8#y}u7HQGR zkG;ooe`-8vu;WZ}ZHcpF_Mc!AV(#Tb` zI3T*q;t71BXJ35ZQH}Dh6mmuICvT_Dc1PFe(hOq*A%1+Wl;fs7d$rE(Ng;Rwba8Tu znQv#`lveV84)s=a@@yx~KiWo}=rClmINzYqE+uqPhtkO-D236&RYoMu2`sw(h#;3t00a~I4APP={jmJW4=&IW$br)H+(FJ8XkRV!)7=hpxXGo=A1;6<^u9rE@yOnlOL zVUYz?|Cb49wM4B3G`x)0(0+o=hdE8*s0!l8@BHr_`PbFFczUdOyB6{)2yrZqg=_}# zH{)R8q@*1Pe9(D%{S6+OFlG6vlolF!l)J- z|G>QaI&m*c?y%;nIp~Hr*mCf@OT6^@3wZK{q0Y_nt$h5Z%Fj3LU|4{M8ox;+i)u*~o;P1d<;NNOB_y#^bl0@>2L042p8eipLDOu0JTe%) zbh}w?e6Y_wDdTj|S*T0Ht4v;H#mH?ei340j+EAcJtIU{7DeD=;MAYHy0P%O-l0xG~ zyq}Q|Y-P%ldp=r9dnTGTg|QhDV< zol>D2;BfWg0_Evv=28VBi|{7`ZG*VEV_(TO&4#c0=%L5qWXv6?}=j|SXzT~WSn z3!;~@Ee=2T&wm{9rKb`onqI@VCP|{!&ZBECKg=sb33d2gtms@8!O7sCM;vSGy#7Ux zc)>ko*Bp=KR*WiJkYiyrtdzt2zhpk#ns0GE)cOtvO6J-V$o;3cRh|$ER$bnR(mH0t zYgDB$gTB!+J-3W=av6MqG{nDhli$%i`5RKtl^&7zD6Sa!8Qi!2Yc`?u$>A!?%=y73 zt#A^nY2&5A%btU-$8&YfuKV&Yu%|GV z$c+)ps$^9CZ0aI`)~xP|)?qcPtHt-REbC~V3(s==6ZuQzSm2CVchyOz0XoF6dA=QU zGu7ici(P>RQu1>Dl$G5(_BU`cB$k$l|je*wWPP+FKqc;KL z+{X8J>Sv1r|NCyuUd;`29pR0feeD3{k1k$2YBivTWh4})6}VGYCyLfpGaSJvF<3QA z%&RXQ9l=wCm#RYGJBW%=I;r&W(@R|y9V-40^RMeorJsMrFL#ZAaz=-X-pXq?^v_JU z=Y2ivw`Jcv*r`_SXx2>ZNjfNyKX~eE9^Z~CXjpB5{1J{R`p{ol9@`>Np5i*UK%S)e z)!c?n;W8&;-Kc~sm$H(;K>NmHyg@B{*VV?Gq~jB7xW4P$mt&Ar__)y|G?XlQNcg(@ z^zD-1P@`2%W3p`l-SJZVJGMj*OSzUUhj+A8DFebgsa+0t{F&U#gm^gteWe{&5ljTotonLjhsw6qoIY%3P9Mk^c+Je(9pvC zEdKfRn%Yt*`T{_rB$vO`!BWLir8v+ z$)9rYpu=BLUVVTNxGU%rJ$U7;)(K6%D@80nZ(pCLGQSEI2GQ7_gz931dGmWpyi_9R zm-Ri2&))1e`i-3FE8jz=@|u6E+KW8-7zb1%9^vVK3!T<~JkhP;zs;& zl6ar{d)#y=>JjTaM$&JDmO{P~OGmbpUMH_}(l}d z17G*c{`;iFNGJj-W4v{C91qb>+hr;Ze^Oex`e!`!+B~ztWlrXKHMJf_NT>?rz&Pc? zX$#xe0K0qi)ule}IA<8Sz+1M1kS8fnL(Q9OvfkgTemN0P3RFTKY*%7nBRKLw)Hj^s zn8E79Qm<46Dua|7wgU0v0l}M#6;SY?M;D7-`}9EVA~@oeQ>8n(O!8C&7Ew?OuI}^p z${~hyo3SW1X=-b{zef!xG1Oq>m&co-k;q zyGcr&ZVhfwaI;bN%OsXH@gi2)anhnGB3|ohRXTe%&y%ty8Zes9$!_;+9Fe;4x&m|y z>7q~K$Hts3=z)1nWo724CiyyahHgLSpH9W+HJV_H*=Y%GH|+Ed%}cebjJzp7I7Qp( zdO+brvi2&0NBdO+o9>E!F8rm#y+@w6o6^rQ54Tf1Yw+CZg-cn$F(ar-qMhVp?7wdT zJ9Z<$mYu&)sL|ykYR^owbIOmtAW^8R?<{6^b0>6;Q`d`uKR~%)@AcB(N+nb&BEO|! zD34TA4W3Er-fSH2&1}P9;L!DGLk_B1Kg~J1y zz(w^|=c}sP-~iV=8m@Ir)?}yXNuXY_{Di=_=67z(DdEHAt`ncFQZfZyQ|-HNQzly- z$DI^N99w2f@?7f}SnQr=3x-I)&~)O2f*t(7}2n=`8L2I)~zrqAL(FU<6K-g+{P~YUt7!;U8DnXob!~>w*%6 z-w}7dkDh`j0m&k+m#I81-_Eu6?;`F`d#JKF-=x4Xyco5cNIgD}g|Q&Ur_>|a$b;wh zwRX~kJ=fl{qnp3bg{MUfcgGhR<~eDx@I0BvyCj=Z9+e>(M%n#qAsvXpAJh}xNtZHQ z@vcBSSesE}yG7YAx}IKhbQgY2a8`iHpD4JhY|G7AoD5s%iA&nKXY=p25h4G$DydcZ z%YIymA*v}7`1J=Ip8XErpHyX&NYbr?r%8^L$l*e?-&;FSndxtQgmqP4zHJ{qE?X__ zi`VtcadC312z%YYvang3PRX#ShR!TXc(Em;0*QwMF5uC3CYTULO~|3&ht)7ny)S}@ zKU*tmlB1fk3VQNocR@_p6`|{>C*McFkhogFjWbQj%gV17-BqsjOtbd2*>iH?D{(37 z&+yc-Ol_9avby*VOx{$F&M~h65C@M}lXS>w3SL|GgBDcDpe~s3!dZv5$lE5=yubQ0 z6Ibi@d+eCsNN`wU7>nDiUG~IQCl3NphVSR1L2)@_?4T!T?e9y2eg#tlkZ;DhSzIw7 znSuhuVT#j_-U7~txp@Yn{wIckxP5E>Wmhh1t2}Y>946Npn}Cw+8KW2t zNzZi8A$91DfUP*iu1Dqqu$RTKzDde5K8rC+z(k?SZ)!V1#}IvL6^Nmfa&C5t_2aSw zt64umX=$=4v*1ywL}egI?pcu7@gCiv&$}a1bD;E@fzNDsFyuja9`XhG`Le$4DPY`< zsI4bUqf~XbI_8tKh8p)Nl!}Zb4)|!Im@0k8@aE#fMZHgR>Rj9aE0Y%u!~29Zs2wo| zqq(;BV`k2}4&c+`T37)e#(i$|YO!tCZ621r*ZU1-(T&Ak!}}4@^-~F+mbBxK`C$+D zjtnDe?@qPs{H`>S{SG)>K0p58iY}`8=I$EHo7da;|7URXAE7R}d_&XujVIx6SaPME z4m7zy(@Ia`1Q-_`7st_4ZENK4d#_~kJgeB-mx7d=&<@( zDd(%G;))$dbuv&FJ4)D_BwA^uk3F;Fm>1_jGGAa+VMRCCZmU@4o|6LR3T678e$OJ!h zM2|Buq`w4ql-m+1=FRWN12+z8mnY|u-HM!)t@!^oqUrr(*zzbOE|Tef%7gYeC#*lV zg)@wNf_!1*BI;Acnx%senH_UpTYVUA*naVN892L<@7xi_pQ?ssxoXq$slLFi?5NK- zzQ2m;$x?V#6z>hX&^z}*bfJ}G072DRGK{1~i@;8(!5$}*z)ct{F~~^*w6HdmT}PP< zDlOTYZ3_VO!ucw0J?uz`YL7@hhFMd>UHtgeg0%Sh&Y*mvzC2Ty!1)U1DQPiX6yD|k zb|%B4%Hx9Ggy69|hWtOyr@uRZ16I##mp)gfT@T-(ne+)b9+F zA3bPC(}b3aLd-4j_9s-hCzIo!euW$Vn08sBLqi~{`r}`PAIFfW+%~+yx=8@>Wi`jJuVb-7;5S&GRo6q!PbKl( zKhl_WsIJvcKt8TK?7;)9ZK8ssbEN?hdK4mwE6P1tsK_OPicefR_N9&FWPh&O??E0D z>6RuiM;ANKnd=r`Ta^oTf-v6F=)i{>52rRT*%VME=IDKz`O ztt^as4R2)0gV-E@-D@K)kY^K;?&NR&>R^=x*?D}%BjlB+Q}lpUY{q^IhG|d6nnDjg zO`(Rdd0=RjymdMN)0qxY^fmTq5~D@|APZO0q|;N!JCH>MV!Ef{$ItSy;k#G4Jz%D2 z3>d;M6_hpDw7d)3bi*sB5O=^P`pv_WEAGHyvKJt@p9qTC8cfS-R^#7s_5O(E`piF4 z8z<}70Q*_$wDb?^VX%ptx`&FKribC823ynzY-t`~AuGC_eBgGU8AA4x2~%2p#pu3$ zfokzfKTP30Ia&is9mbmrBw;iD_#uD0&^V`o^`=l=I+T`z)VTmLX&DkfPQ_B%SQ4a{ zRCrtILVe6V?M=}mqV+l^G_LvK=nJNNqtaiD_n#Q&iK+){eP^U|_a6uqSAQb$BXlZV z`k91@%rmW<<>8}q#Nsj^ylC&H#GSD8Xua*S_>_Ejx!x4Hw0&Bg96uc0Ff@V93x+in zAgFgwRyLQlfA-WJ1~y7Sb>{lFl@^_4PFujR-HTT5=aW7Qh~x*Q3RlhViuv&+M7+&o z*CcX3Yg^q;iw_5LAAD04Q@9x{gz$len^ljZYitlh9HWNe8{pPOA!Q596i06s<$OAz z4piuP$Frt^RCOJy}a2!X2jOTR~hw1rzZhuwQxpuT8`Jv3^2gE8Q^8*S4 zevsJ*@njW{W|NuCh%S-slqvv{j>vQ5f)5vJ61i@azwZg@q= zaynhtw{A@WZ->w(cE=x1=^u1o?_e|(Oz!b5zC_36bvl|=1;iYd#h{60ho3$1OS2_j zKRVi8aqkIQAA+_}kx>4H%LrYW>)R-;7<&-4g_?JLp*9sIAGCI&P;>rjvqtAP_9uyV zjryY?`D#Vc>Vnv!KDu~aum8o~dxkZ&ZDFIECISK~0)iqc3er`2$AU;#I-#k6p@Vb? zp;$l#MQZ3xdhaEnih%Uq38A+@Kp+VrK<;Aiv(Gu-Y3}FVU-x;wKY7ARvY2a*Imdj* zJ6fFm^me#b^lb=4tp{a}VEC!GUorXKgK%D8HjDK_#d?GzF)zRP?#S1en;`wMOmuhb zov*8`cmnxbwTgWD!Xg}2!nhE5dhasZddQ1nlDvJ0b2TO)GG55XscOyVj%O2MO=(=( z{Zhxz1(NErnlLl8chL4Xk0DwIcA+7b0(eY)!EulEtt!PpU+uYM#3K9!7=O4_lsKC0 z>FHge!UbXa;P`+*6t45$g$>TmWsNSfsw*i&8CC^N}?klTtWxD*yK45|uYm%3LC+Z%&8%ll!-;$L-5zxJ*_=heeqDhCl?U zI2ul&LXeSh+y27bIl9uC&=V#&BhFm5fpJQD^NK!Hg4x=iw4jBaM)^$j$X{D|GUcuy zk=TV7ojabXlhsQ5j8d|u=a!hw;Lp`xxJK|JGL901RG1!hAU$s~n4(>L?r0HK2ayq5q)$Au^^xz0->&UK(;|MM4m)tcfj%1k2Ou>jTie5HPzi6tO_16$JO%? z1-23E1gd`+nW~~!0#!$@RQNTektsqOljI=ZR$f5->-Q6Occvj&d6h6N{%bZ$H;%f~ zqrV*bJ3qf<{i77rbD&vst&&1>(xF^`anJ)@C}EV=b%yQkI08Mr4uOHNS4w+JYQGQD?XLR^~>0wC68ImT8h}stl;Uv#R$=^{!0Ty()>;Sf`?+_gIB}2eG|uInX<2 zKAR|lH}!5XcCwptNVexSP5wlpf@*tk4^!Kaux}3nI6GhuJkBbPK^}ZSFBuJ4J%}5u zmSMHs3poM~<<4nux`&9cvixPrgD02~KO(0^>d(iYd!UU^x=sq!YWM0g4mQ2P^<)uUJSU#s!!aY<47#?h$%bikHxQq;Nh83w3as~$P|4a&>us4zW6 z$Lu_~H2s{m@GzbE*uLna|$2&hqiUC(Z~{M*F?s)LhWUh+emf&N2^ zs&5`?6b5uTEZ?8oV2eyrYPdauykfhZBQ}Gr%`RBK*V|1xC0c?v8M~5hEp`7EF7HP(CYBJRWmV%zDAmXJ7$$7cUtok;IyZ zkfhfd$s76sU(M!Uwm+(2I1r^@UtNF@85KZ75Ra|vVG>~qC=3-+Pq+f~Z9M$J z(vAR3CS>(kZUVzQ@l5FMgkV4G>RiJu+|E|ZOcuY?{%cX_-Z{u{?Z`^OBW~;LxhHbLyDPmVW+6#b-HO_m>!h>I>VcZ3bh2usQGeGx(?Lxq3IChmI2}_$1^8Gu|-^jVksZRPy_p zDQKfBwLL|7{lW0MhOsgH;Uu4Kjd9y&cJ|v%2&)j90Ey3x0cc29CO73QLwwMrqiuaQ z4IaO|5GL209Qmq$c_=F_6hD)6Zo8c$GFu(ia5QheGFfdWIJs1Ws2%%i ziuK!`xtwoji@z%^>vRkmTq2l_T5d}Fbj*8ykk|3S%I02DDGZ1-=*yqG!f1Acdqdfp?ttP3>@^gbRX5S?yH`Ma7WL5gR1)?UF=c=om+0Nj*Mk+7%pth}5QGf9@+ zNFpfP4%=UC-1tD+DB40NB=qS>#K4@xd*5ntHpN!4Sc-%8d{$%g3-91zq`D`Z8AmXBESW-taPXdi<|p-~ zB#tcgTIAlh5WnWiA(Qu`=IXZmpo|hx$LXencSWD6f7fB?gO*A_h1Gv{?n1Xx*Pl+) z2&=@r5i3-sQ=(;!IHG3w5zJXs24B<`cBqHQIqAukJ~}1}M=W^yH|Mf`Z%a6A7kcOwsb7c zF6OreZ50K(VU1MkOE9~h)&fW;$_i;fqQmbGT1}h3wu8w6wTq*P8rA~fUc1Fqf{d|U zUR$X6HA&)~QJ_(>cP}Nwk@1bsIOz4-TNc8XItq!bxp3dL!Rn4~8yeHXnz5fHweNv? z{|SjIlkY-2=FS!vnIDT;&r`~+%NrWAj~4S2c2}6|$AKu`@GQFfg>|W$xGBeQwwF_Z z$9`2#qybv^8c<*Z(j)eAYUG9ifU0~(W81@%A@q+Ykhf1AkjqW8wwfJ74Q1Gy{XLpg z9$0zC2wLb-^cVd$B|CAysIeh7cl$gKY3iWR%E};nkX8!-s_3DSw0hop=Yf7*`N7$g zs?KOkH4seyblHR7YkPrFrjSp}gT}9rvU1uas-F%Hp28FwKG=JaBMw!cetzGYty{Pq zEA<`ujevmBvpNr287mAt)h{|8hbIIj=IP~I9!)mrewZ&q9nw3_)F}mhy13xRGdcgW zJwv&AWjf)5_4tu*YM2fbI)Xo?!7dR+jUJAet}Tjw6U@-D9ySU>QcpI|J)2Vd_y&^- zAia<=B_yZtbiLn<8piI^6__~QXV2;DqfdO=xX*(w#Lep1)O8xyzyk;yP%#O&8{?56 z<o`Ftw1KfPGN;rkIy>+ zF=VT$_ek_;4awgOFrknKHRUZn(nbDLpEM)}spwd47dJM>VKN)LKqqXH9IB8ybOI*Vr_sk=0Gv$u zifyOF5jXEs(LD~vY5f4QC2qwD*M2{|Y7vZOPerz^L$F87<-w;O>d4GyoJtUfUK2E~ zuI?@TY&XD@ zJH8g1-TtI^w32Jr&61q(;w|*qZQvy5zLaC)Msx^k3Twp9kshF`u@~!o8!tOH%A3@J zlI?e+l=2lqZN^KIu}3-pr?OZ=n0eBodVq{R)m$NCK_s>*_C;e#%8Q)S)@Ump{zMnJ z?{faBv;ocC!TbmOjDW~?)GLr>FVbdhAN>J8FUXr{8EhSYxw87=Va+G^)%`>PGj?;N zGD7h5UdZti^lALPNhsA3)Pd zq2DW`{Uu6bdWZe3^+aAh&<&s*3Ja+AIoF_!Um@3@f4{horGo{pE3E6P57_d8#BTBm zPQ!M>H-hWy0gz*=na^0^I5beZ>Vz7D<~L`bdte@!;M6!3?^{L2Q@?w9xAhpkeBC-H z>sno4@gDP0Yp(y$6O9C%mhm+nk~hw)G+pUo3ngwr3P6xBPtVpDC=@AV%{fe%=m%<5 zLnNNw^L#1%wR$~UFO>Y9=eHz(ybqu)7tG|ROJdT-9M?BOD%qO`uj;3YclyTRC%^1F z))uI0WkIApnKKpf14Wl*+%RepQi z&ft(S++$U!5-2pTw4+pN;LnNkdx!YlG+7K*Ly3@$yzG5fqKNOYYj_5=o4>{TMZr9E zVpMd~qugRe=sD6of#RUi(!G|&&FWmYm(#$DHQW1{AgfNt14C5Zt54SZeU9P#Q(ceF zI}g&@_Ia#1){jo$={)jBGGACgc=9UF*?Z&NNGoroEA!P7JhY6P!FU(U^G)1S2dqI#WZ{gZvv8qb>F?v7f!|@etgv14j5y4ZcX|J0R$`xa58~XWz6}HHM z+!w6&QuL2QIVHN+8pQft%%~IHE^!tU9`4DNa0t;?m?n*uDd~7O>w3}dv1jl0VeK*< zckPdRMRO(&Iy9g61)sW7A@i`1auM~-K|*Y1=-~l=xz(H2^UzVam|XK9F+(qL@0(># zmcKHnR@4wS2-4Fw*QuCleWHOXXYi>sZ?cuc9o2ay54+dC5nqu8OinG#uI-I5YT;wd zb!aI1j?%&J7AIVOvq(3eE7*=*V2!Blnex$T4zr-3P+P2pgJ_y=HIYHjQw_KuXmOW( zMfOi9U}$wSyH6;r-g->AxG;d&d_*gz()DD}?NriNa%{f1jCQLG79EM`U+g5gWCpQz)2#3}dd-T>icDr{1>Uw3xBbgQv1t$|bl!>%8>+#qId0lh@m zgxS5$j7q!P+Z_c|j4=Hixk>?rt;-9UhUtGF>hu!*8{KundDZ>R!mN|bS}b@IpyRUF z+YJ3cnuE9R?-Y6wS#UJ)6{OHO4~o7UjJAv;r0y2e-X{9#{Lz5)m+PSq2kaj9Ce z@kwtu7otLRu4U{DQmw~>>SxWw58r5v)bW1N{CZK2eF_%!LP5p`H+He}ZE8?rg7c`y zijyuU*MVUvz^&$l-I&clk2Fdhz1xV&9qLiaa=>L37#AMs=RCQGCbTwoe(Zbw{1Lev zr!`esk?>vwXLm6H>5r$PyYPjl8&&5)OfRMm1ZiKM#^dJ`x$*7;GbxMk?BQKA7jwIf zg`u;Z`!qTQw9k)OB`Z|#i44g-S7S1vzwoSl?A%(R+0{hJHHTfe=u~cyCCY%L&neD) z4;1I{j)>6u5D(z{O#R;vJ|9$Jn$)?B9&Yv~G_KAPtCm>Hbf{rY}gkG~@_t83@kHC~8O zy#{7CqlgPo;BdwcZ?jK(Kfg>jA~Iy#A_0nPdW8x*kd>0ntr;J&3VkoHWd`P3wQg00 zGOj4eOpRq*b@kxcmZ`ZW_jW(%YtZB5t!i?YiEaJiqh$w|Q2;S%H-402;P2WYhsQv7 z=AjWqTK-Ld^<%t)+cT0^C>dD!?I>-^j)G{^cM}KRt2pmmCMZv|k1RO$D%`jey>d6U zJ@r&sCGCjda!M7r2W$%%$8aI2dFnNVqh6K#N$cI>)0bDF6_wAvkgn56u9?Qc3&V5D!}>z-UEvs511mdj*?L9hJcP62qb=+1o7?mS zaC1+9+sb>cLjRlJZlgoG!tEr{=TX*JIre`78)m1yqIb7#<;*VYxQyOw;+67QnQbtW z>#qa#Nl99YhkqmYxo7B~l(#9k8)%F;mgY>*&nC3`HdllD%+U8q*w&99_3zQ(GBE_} zvm3F;2fME-d57ydzDKjDAySO*Z89WN-PYCn@qA}88X#V6|H|<)8&TD+cKEf zY*(3)tu0h_V9!&b(-0Q083Wdu-zS>T3lgT*lWDUV8|D>t`s?U!OiGVywzsB~&qfZ` z?}7Q4nBVQG8G53(R*#Hww@M^pA7-Y9^a`+xDd6 zq4{0Q*C5LWW^%&r>QVzmp}qY}y9ufZN+B0M-fH5sN2Fz?bd6;L zj*SsiB5Y+Q1h3-Leei&S{`(r^i@n{TAfLWvci|{=+ittDIkxPBwJIlh{6;MB$+4m; z>J87zuD7;Eoir-kQ9?`s{SY1oPh=G`^D}$g*$@oEnKYP>I)-=xU4{HPkJ@Qz%>X+! z%|mdu?#(hHE5E3LZ&sH3`ah@)=PGiuC0SriPFx8#z;n$mMqAPrw*TQGN^7yKymH#Q zGVt+zJ~^Ly0TFOdX;sbQk@r3SPnc$*QrbVx-E{4%x=p-%jPH{k{oV5k9I6VJSzXfR+DEgofh=^2-#Nh}quDMxf`QUxNj1iUiy7&~)yvI|PI@vO-}aUjvh6+8+FMVH{R zC^dPKZ={$O%)3^r?_t*+vft_NKswWafT4HCr)T&7^v?xW@NQ#XEA80Bu8j6B^+v0m zVqE9RR71sBgC!g%uX7}?U!Sy!sY-LI;}gp_0NU6w{o&O6yL)f2>d(0Y{inGDRDAex z@fnY*WC<-tUb92#+9JcJzygom7%$ZsEimHAo}VwU0%DN`TY?!PHnJW{(f^I9_oX^5 zN$|NH^mo4Ni#TNkyAA@^_hW}SzkowEMZTVZ$$oF*X?mw4MWOhzw3}g-7I zo_O(?WF+FfklKU4FNfd9{9fp9-pESqX~|3++v##>@fB;pJK2r7E5l4RV_acq262ep zm_SIZnq{cLG$*TUc#~u*VX+q*#%XC=&zvl`KspdGkyOwFDX%|K-&GB|L*iEB-BGuv z5+}z+-@w;P$U_JSp-m#mC7`T?O#P~{HKTo|6RmZflNG_?_>B;Rhy8BT07Wb z1(CHQTrS~)NOWRRW6}F*mo7o=Jjg4<@l1jYS}=|XHXPsE#dLcVD{vB zSN~?SY8yuc=-8%^RsHcF|1O(97wx}1;O~Fl0|ZHx`TW$M+u?tG_mAIaRQ*PlJuFXQ z&-o9p&9C0QrakLStJ?Crcz*TWKj!1H1KIDhJ}&>)M@Gqfb@oEkzX|d`J@((OeVHWx zeP5J7|6D2m=X(76M*4@4&YtE|Y~sB|FY-^9{?pg~>DtvwV3L8={EYZNhVm~He^xgj zK<5ej%l}gs{;%IF+R~kMR^1e7!u_c}em&Dya{yVUigy3gV1F?Ifx|3UXQL*;0h?U^ zGUEUKr{WVpmiKI81OL*B{QVho1%XMf?E7s>`4{8)$1?_|0Ugme|5ts4%22t*3?K!L6<`l!`-eg#{h6)0JZQM}rAd zv9TA+2X^sWVF5!@KI)}xe?Bf_EydhY$Bz*x7eeE69Gl%8n@SvC`oR%dY|rd)HxDo&&;+F<$pYDH$q zr%&_;XFR!SwEUW5Dzm4m;Rd*p84vUTe>#WNwZ9_Gv!`z;VzU(a)E`_0QT7J&X@;Ll zWVj-G!$~9Qc~boO)?!%v2*0vh{Ao$0@89qLl_$y22&|FRsb6#JpX9c2&7R<=JnKyP zVlaVE^Ui>2YX>@D2=G;PDkz7k#B>Qq?s%ejDH9We=_vlny&tADb?Cgx;>lc{a>89C zqnxKw7j}=h^FUqK)r-TKUB<8M5a{ybGNNbDxu9{0$@$~DGPTTKz+%qLbcvGgpvRJ? z+l&;yr5iQfNW5#6`Z|($;k>hp-R;o7aBM0@0k>ddT~cnsa0Mu4h3W!6724C1wPF|l zVQ~HTrFgXy04PcX8cY1S+rT0E5Vt|l!yUu>d}Ip_tUvA6)D++MhUd77Ui|O3_{UPc zN_%p%ITZ`oxoxJavp04+;Pg8k2lRe-e|}wiVPVlnnP|bCEXCxepN^n39!+*ct#fQX zuiqT~YLM>!H(Tge2lsX0#uwt4)UJJr0sh$sd%_OTXyb6mV|(`6ikqqXNHp;L|F1^7 zNO#xc(2}swr=ZlL5hmfF-qZmLzVla>a=IFTCESvPaV&v~z#RJO{fmM0ca8CHh)z)# z0B#@GhuV9&p3$6j-ho~PUa-&3{Av;aMHiR@ps@=h*c zoWh zz0%#&h9|<|JPo|aUHlj8^FOZct9d8O8M|h&a;}6Dkg5FH{||xGG^u>ki{CPkO@`#<=bCasYxs7Ctisa6Pa|J#E3Px?U75_qobt}l2g z2g^?8wY+@wjTO+jzIyb#zF_=$=ba@fQk%FF0VHoysoy}FQ=Vvm!VU>zDcU)TejN!s z*%t&shU~-nLSrUSEWvYjW&b_5W->3Qfa#pQ`>GujNE%G+mHf4uR8*zOhz`&}`Mp`( z?W>jymRj!_yP=hA0{oVjzr=Y!+Z!@z59X4CEAUIi@XdwtSj_@Wfy0&KD?BjT#~7JR zJJ{!5FVJN#yPY#eb&C(cV`iL}`D3oP>CAMOR%tltyYKioqfN0rF;0=0HH4zQGP&040^zMTRfoe_2bG=ba1 z>pH7GlTru*Rd_!dEkv!l-d-EZhRLX+t`l~a!}*J9O#F_$!!{1qSSN5ukCde{t3JM6 z@MoA^aaxx&XYA=mz|IQze*IT^1E?ebSm|%YS`F4Y0XMD+hRY%&Q_<+dj8eQj|QIT9buGuKvQaU&9&4hB>%{KYx znK*}dwe~8(`ycK1?L(|bcm&Z_)oE{ts?pS#O64~*R{-J+w}BKMGjYu{amDI^IX(`i zP6fW9wn8;Lh~QEd?}*-IAIQ^DacB=C@e^^JUlfk_yC!)GA2Qnv_SFkQ_Pttig=o|Zhq5jl6dgds~nhsGFksvXs~P_{?`&&408 zGS@N34|m2AiffnoT#uw65yfn1?0kJACQ^a4uS3f!bjzsBBzf_uWGi*-TV-%2@X46! zZS6eWR~?B`<#NV#43eKXB#d5A-*9`1$3+bQd%tWHr65^}QW^89oQv#K zZ&e7q@mTtWX*G%XL%{_*V$fN+eBd7x8~dB_X$2MhJ(1s(!>&a75_TSc-k7*Q^C3Xz zaq@)}l_uWS4}D?uq3kkBK@CzfQn4~!GeVJVXI`~Epbt&jt~$deK5DesK)Yl)Tw;*; z3ZnSn38#S3wGbBnTjqY1ddIi5xIKVY6?=ltp0QL;0RP!FHrWcIUGg$g^w;CzXCyRZ%*c&|rPL{jh z)DbiLPXW^ZS!wlFDppV?9@NP`27GJF+rCGw>;&Hh7PiBK#{?9q!9A<}dj`zKGbWrT z#TrBDmKf(p!5$hf`O^Un!i6bd_Tuu%5`aDhkH0>0Zwaz@BR-h9c$2eeyB<@3z6ELD z-0_8JM(}6}rC_JHj@z*#F=1BCkPxf=goTG8(c=ztuh+B>57M<4;e)u0FV*XjQ328Z zO*WImcOp=fZw?xFf&`4Kv^8sKQ!WU6(MrZlkFK8=enJ+JaVin$A0h}8a0j|S%X+ui%dzi+ATqGi5}Pc~yP2Z=s*!SPMfEt_TzxE1M7%+-DPZ(Z6^bc$ zA^7rVEUWK3J)5f9%ADg8RBAEw$T!W;tGkoYxN0F>3Caouvr?)R9$`^JWIX=o+9+s; zy|(WF{xYrJI;)T@H6T_f@#hl?(xIzvr-5&O?r{|MqrlEudx$S)r{!}>#*q$kDI2Lk zRL64;r0P@)aV(wNrCdTy18FBhOTEy-0#r$d{~hpQS=ZgJ@pHwUh1F3mSp>$=yUkf? zT<2e#9`Cx#wq@Lt@wC2KXe>%2Iu$SObExZWv0szA>mDTkb6t8;VM2Bw5->i$xOM-^ z^b5Rj;r(d95EtWEB%8>}yD0Cq)}kG+-8M3qQ8`Y>=ExE(<+0(_M8G{Pe6cs0A@L?C z@lohy;WuWNo(o^u)-PtQcG#_9j@@Bw-lw+1@OcLNmN`C1$67b79iTrx9zb{9h-bBa zeJZo)_ShDe!!a3;dxDHpn%0+ge^Wg^6c2EQMXAT#C3Jrjopqc4PO~;#ix4o|N$Wd0 zQg%Dy>cNs!8K=<-U2G(vqhcnseZ`6poS$S9c&f|_aX*NWBlgxIiwYGFee;N2|McD& zYyJGUrIxSaO*-UJ(;m}lG}Blc zL*)1Q0Lahz_UnIElmF@HT6uC6M4=ge)@F6_=$(kh($^RAA$DUacLIYymgZCDc?$37JZ>9r*YIP2J)lWnu** zne5`WC%t0htj$ts4r;ty_w)6Ycp!c?0YgP8elb3bV$m&)M~*0^LDd=+&-Ve5t{`#- zVD^8UH)TS_A)fePITknEVA)fuUCUvT3mqI@a5X2RI{WBycBpo~N$G1K`jjRA?KFc9 z(q`s0<05fWCeiBTw5$`6OPi8OI(KWc`mzUX%|H)PFjYa|PqA9=z9@WC?MB@;6-cY& zQc>i(9-nI3eS#H2DyvW2@0y+bPmM9n1kwAB2<#&;k}~l!?Wtd(q1nrH4_%v7___27 zT^gf!T@y~uR(+Uzq`HI$1~Y6snq&XYXu5zS#pm9Pan1KQH`CJV`tGUPnIF48F!A(Y z(VSL0QE~DkAu+;9`~VR1zIpp4~mEIa$HgE1|FfTG0Q~i31u~S1LItof;_>mVj8ZU&_muj_fs}<^V z|H7~dn(zz6Fd@9E&HsD22Mi73Q>Z4l7-pr%M&~}_Xj%PxvXWv3?uJT zC5le#HqI?%HL2Z)1%fneyK}C6@Nq61^DlCuk$lI(Gkal@hj017vsm1w`)qBw{kKRn z7ls$bx$C-_`b-0hnN?~+4TdwmUmLye@0j|VOxxsIMSKP_AU(vZT{`RmWCH@56BHvLv zmGG6PYx^lvqROZBv2dUUjeFY>K6i{RDYJtMn@GSl#r|~aLT80(A&?L)DK@!7W{55T zzb~Bmm9^C*#qvHTAkyE!k=^m&*?@XVfB2w{Qfd%mwc7oiF?;;%5w`;Ou9i zMNYTUtsLZcF~2<(h%Ttg`noV~V?NZNaOO!Blvpn^gU@8mFd7O@dIg!)aP1+C+-uo< zn(IZ9yF1`cKcz665t!wNyaW*q{_75g)#^p?byYi1O+X|Iop-A0w@P<8fs)KFy6 z#)MG4ZZPt5o&&UNJiJ3vaX#{}qDr{u)S zJnKIlcZjnGo3E&I@_^p`$oBx?V^-cEtx<*7J6(8Vov6d$)!97d&v61RkGS7{;10WF ztLI(DieqG==a@Fgi+z~kPsF2`T*i*v7Xe$b^ySiza8r3BLQ(nns8h9?+>li=(((4q z!wOIo!X#M^s8g(?U3Fg&Y#JYAS~u%phZBzkYSxsztIrX2)HN{MIIa-ET!Ch~0As{}EJ8BO^EuOty0 zz>?D(Ng2x4GDg;|6;|zd!lJ9QLp@^L8NV}lA!!wDtTHaCYd!QB2rV>jjI=)2HSUvM^Sorp>if6Np8zPRvj6c3a`VNVy% zclOoG=Y;9z`=*rZkphOsE*ZS%cA=zV3W7Bn|57p>XDwnb|i)m&?7>l~-~dF`g@ zv+jgqRjS?B%mp^L)`}IvB+I*^K5K;@6oUywk(*8chv0HdG4!%G5=LD&r|x0Ud3|=V z~Xs3gwkY%jOZpykE5Nwa0FmnjDP*$n5~|4v)+f%ISYYDDj1fp)~^9Bej4z^ zGYVb~XWJ)@RdqPq`|oa%j`j}{3bo5UEWkVU%$SId$eD_^7OF%{(UX>JM2(aBEE=$> zTt}A_ZrkqN@Ln28>$NP*$YidpkhAs)nJio4w=?8dfz}chzMv0u5&I%(A`S*7;YuG9 z(r-;3zIh3{nb$=pOxOT3M?SwuShlmtbBV0nT~;91ta>n)9-PZ>LZHiMk~Vsg4Xa}swERd%T6S#MgTo=l!Z{$Fe3L3~QNvRPUB47_X$e_N)A}Tlttha#VTP}A z9Vi9o*62p6kk+4jQLSQP;wvUZUuqM(X&>+3q=KM5TnO8@7{clmxfO@9$5p|B{+T>v zJk^ip0qgsNM!I9?hSXL^h;ZZbMzw#*kGSwqz@TST8%r)NJ0ALUuGs1cDOtNK`inbt zVp#%fjC(!{rKv$qfKFj>d2a*HO#$~MO8*t*bRLH2^3N$wuL4M9GQC~C$PDZ${lh`TW)O>^t#2^lEW>3 z6sTONe((GQuE)ntQnyKk?(4L-fgaf1-PR!Txzi2ZYL}Nar$isR?>aCi^6KSH3tK4D zmwwa3tc4eJ8GS%0rogB>zN-u!W^spBIZbK9J=S7xU;9LV&Y2c^|Fq;g+=q{~`SKth z2bucW4Lgw`32VR}OZ>UWU~TYfGZ`b~e+Zilb%XLUJLhTVe*j{0z;+TlOFdj&EE-{M zFy9zRst*^zs;^jnDB|v5)`-Pmt%V(02uRYq-d^^f5dA-2o6T%`chOn95x~ttg(<57 z0J>(2(4hmLVIObL+prW~MYYDu$qh~c7)%AWGGE-49UU+<{wngOhvNMv8(cSyi*nps z$0KqMaBq3r$T9V_X=~K9AWP$U1*1n zvNADo0o<&R1j_4;Wv_4UBB0jt!#YUVU&AZrHz0a9)$`NRX%&E7n9RF9G|gG)N>C-C7N6 zxH$m5uaN`3f0un@AV!~D^O!lY?X#b0o^FHi@bkx+e2h}2TT7kZg#{$b50$<-0g(qR zLh)x4^Y)tR@A?%uc?KR|LRx$bi{dx$iJSi0L6|7*-s$$#6Cq-XH^39^<4NA9D?S6( zZ^xljWTs`ueBjIMby0H7Gy;HmaUbGqdyG9}w0a_|kldl{0-n+Q4q-ucBYclQJwF_& z=oTYe>1L`P@ifXApE|?-yszxw*5*sqD!toQ8$$JqFTijH0mGVy(`4CEd9%ZW+L~WK@7&5{keMl{c7ea2C*vV3RAG zNyiCNcFD4+!eSAd?!y6h~f z*x`Br`-EzDp`098*6?kOYW)*(atIFCFWeU^#pN$rX`J3h0fmS&v5`qhQb17-F0BOr zNyqvl`FwU5!%7{Fo#2Ml=1_OpEuOtVbYUEDohifWl)YGL?>%t41f)a?PL!F>sjJaZ z04qI(3XlOb{Uzq_y448??6n-IIG5~KYI_|rMixFLy&?rl?V?~% zA|oi$;>Doa)Tl<`bJ7uq*amZL@qH;0LS{iBp~1MCRHM@wf8OtSPKaESf$5ii6X(xV zLc;+=Gv@e?Ao_&Y9#YGw=*hzB8S6Z}dTH{ zxGDmWBSvMpI{qwRxi zVnc!x{N78=4+>^Uo=!^#y`~>ogs$$s)aX1%=lEwWfYb4vn*)&4ZYUnw@#Wf^ktAdY zG9~=ma)&nHPY4eG74knT*-p+10<^fyR%h(1$faQ5EFR<(NaU56c7|SeFI0CsqW1(L zQ`frhFCSp?nCS|ZL@&FqnCc}FFYtp5R-^V&p;(vvpUpV7~pKeWrEb zd55jo*8oib9bg|Nb;i`2zSO+Lvo@INnl(W|$08J?spVd-oC#DZ8ywlYpD8%(y+3e4 zB=;%t96%}7xS~c2<5jg>kqU(;ICc6;tPb-aHXnP_8RS!<#%n6&V=9WeAo|B9hs z(+`{=ffwo7ZIrHV`gA+;XeNGPHTO|^*{yzS^=ak~^sdJq8h3r?;S|_%Z7{lbpCd~m z<-~0ERu5D0ka1lGY6{t()D^0bQz^RLDlzI`_VBK`Pp+7tZb@6MP5>E+RyCR*$#3z9 zUJ9LeKagA#V1u5&d-su5czGl6RHF~}%B;oL=+F8B2`g_uegpWHTW;p9(&>6-8(B!S zJGAKQVi&9Vlbb@AiWByAZ&Tv&-a$Pwd3`TKcBr;c&Q{M5zA_W9Dd8@?P|8ie)N0nJ zlBG^lukSEC=HfilLcI$5Fm);tz%XLgQo95~CADgNT_Yjmi%8xv(<_bhZ| zH&pM^eu35sOxPH-v((2$I)>a|ZmfRDz#+Xc-G1e9M?j>0UY=2PZldHl6l;#7h(%kd zOLZk4fc+5;e5T zzGJgO8bd65CbgE(B&c0ql_NqHsSgtO{~q_KsUz*#e=#@yfsuPlo8GZF52`9=cFAA$ zKXU&bKH90ujN#>J{m2}X;y=TM9`naA#STyb-D`yKD1({Wupn1H`6mX)Qa;0RSz{qv zH4EakrMfg|pP$N{(Cg-YSOstcgPZ@HjrY!nm%SCv&FtWNjVCC-BgxZ%iT*O78ePoK zr^ed2pdhvAl^PHUrQVFt$T#7*%@$CJ7XLBV(l}9GFl8jd{n*HL+Jg?z-AjiQARN>Q zf#n`h9GP$BXE4Db@$5(uBSzd1BQ`H^;(-IT~fha)A7ye$|>-1 zeb(I(OP#b%$MD2pz&{-#J+T>~CKLmZOUzi!%|}hVdu#w_1F&@~MaGR!rDL_7l}$58 zzH$7ZU+%D4EID@R88P4ix%H#JT7EMYIj_MeaqT9M;raarEfIiTajP+ zIQ~?E=~cK{F8TU%|6 z6+p2WyzEuYsaZ;1Q7E7?sql$J*c#kexyLEYDB&o0Nq%av;I2WDoz=v=lnY@ACN-pO z0i1N!?!Uti4-BJ>Uqo6=pjyz|B_4iMC98(p)Qp>V^@;I)02tp;KH&)w`MV?0W~=A; z8%aO2<#-1+-MQ1IzM^%qiu)6oddhjME3>nc(GGfUW!Y44LH{w1yZh0sO!e;pHb0~F zUor;%{hX6zB_CjS2G#mDSUsyuIj8yk=h{DyAsbK89-nK2ko91Nq22yQ z`5`11a9%KNN^e#g5Dl5*rgt~ydJ-BVJUHO{Zq+aQTv0Q|b3$ymw%_tYq=EG6XBs&R zE-0;FtdDQ?l_T;~kP7K=ecq211nR`)?qo}6Go3nfsPkzuXgASMqnev`a&^-c5 zwk`2!#qk0pHVxZ(M3LdSxZ937i#-WbGY^}LH));s-@kI<6B3&BP1;v%r5!{Vux9hi zLYiR*4SbO^@s44tCA-_f9jv{ z703L+fj|oC!a*m05|cTo_;DI}mMkjC1(s{QSoHA1dO$U)q{U6ST6eEwE11*s#8$eA zP%#`WG}Vh&n0&%OIu7=&*|7jKM|X#W1chvfTi_6y1Sxv`O6!>Z)`a>{DDgOgS$=6L z*`8qCe>AMBH}tZ*vZ}ssuQFGwB2$$G@Xt;QEos!kBW{vsD{MMURA*}@MNoyrbk+f0 zEmXrBm(}g(=y$m#rF`Z^p}M)c&Fjm3W;0O4!GgO!%nMiXQiFZp4GY#<`K7z~>^A5s zV8So=8sCypzr%p2v>y1LH)#=8EUqL}BBMVcw)TY zr`nzR+FR&RDekjjX;ghIG9ZLv;5;@3vi($ftz$-n#_|mEZzz4FWOypYe}}@S-fQMi zX{R?4Z@E=W?1nR_0a-;E*5UWlo3m{7IKRkxyVP#YpW#`mNt7YbTf=;GW|9RPk62Ld zCb*4>r8NL_8xFj+!}xish{A1lVLLzLvAug)%hGnT{4daH(xviGyeXvcE`V1t8{Rjtwj`E8ew0Bmz z_`~ET74!6SFTV16j9|DdWt$$Y(eG|EzV`WUwb2-fUk`5J5_iNnte=3PtoHp`DZ)Rz zV22*vvRvEI>n)RhlK&|B{S|(B&mHHtz+zHAQ3J6dzj6`+l!9VB%VU1BY|&5Od#`HDK=4{p$a}5^dn_&DjgAtUSRCByJ+(<)|YfqhWh8V0J~wcnSgG zf}YnyfRL-3@hsNlWZ5V2+ka*4r~>O$spG>7iF2o~Im=TfGF+Cuu_b6Cp#1srx&Mc~ z_lkvCaz=6!S|q1t zfBtixz2{tKt+D>G*Uh;&SAEfA^w(YezE3?>^;Fg20DEsNAbHWS)vSD>17uYcNS$4u z{w=}r$~DmU<&MtQkJ;-Y3p;&>0OmeE8`ALu6|F&aS=pcXN3HW{WsGJDJpv>;NdYRE zRfX1VKkwR%HwGH`3_P9!V-(%n;vS9v8-J@o2XKNoRuaeZas)AdFgg#W@m6N?tQNjf zaxPK)eDzrp-J0-F8u!N?1R#Xl_!KJ>wqQBY$MpJpWYZe_u@fKaHFJ*-LXriwI2Mm)$Ms`fqv09~{epA?gYwUKDkb0lfINm{A3ek*vY1HurOYQ4o(pF7q&awzmw>{8=%x#$f_4}7m5Z& z{hhWmQZYT{d_l*vXK*jjvo_rj7PQKO2sgWAcd6=ZHgi}niUS<4XsJ7 zd5yfWccH{M|7{NH&q>H!#&juCqAcS5!4L!#xh8702y{RfOS8$R=Q-9R>HM8T3-Xo-P9s^#m*#H zn9$Xb)W6YV@lNtSex|s;9JDtpCP>VZnoLp>hX>Ha9q+8QI7NEK1N9y;0V3g%G`K3T zgUyn`HaL_q3G?Cz7$L!TPV#pg@;wFSQS8j0O5ekQ%Vnd_+=rcErqr?HX9Elw;SP0` zJAk7YbG|1SjzF9Q{pum&MlGYl>%w)V5MU6KUow%U6i$(<@bKRP(n0Lv_4}5rQHyOW z*rP>*%BKS@NMz=eNyP)Ljb=01y#eUEo%Iq8AcZF4y8YeM&S;lP`LW&9^URA;h9E+~ z^v`1>#gPG&kWAZfO=%3PhC&} z?dSbx(4Fh`xyx*eSB_t^=_h(&dM3wbz<@&0*DcK+KKpl7R!8|Uikx2#|DCQ*W?LMk z?`#}UXhtrA7Gmv6xn*3%Vyi+V0z=N(>g%7P@NDbGy;=Bet_T9e zy~F$MqmEUIzGJ%@FuZ-$EdUcLP9OE(DMtPe6_ixOf73BQ{G8$9%1Ov|-RWX0=Tic^ zzIGFjyl0VF;%)u@)w#+UQ&CVN9DnUUvbO*FF{IV+L{`>`*=M^?D{k)wE34ER*xTEG zCl(k3<)KDQhUO6a`imI8uYuIT_6A(#Z$aok+=67%9}kc~(d+m(oZ~;fpz}kZ*Snc? zOt}B=^ez2moB#hQ|DUy(jB*D=Y%EhKF>Es1YPmTDVe_Xc#hvNzHKqLH-*v~&RPJCi zY<4GOGH9NendvDhxIH)^ek}X7PjYA;v8Qhs3pBd`-7E2yf2#%bAJn`MAl0?rMl(Qz z-~TH(`Y+)6PjA~>n*aJ9{YNS!nGO)kBr5=N{vKWW>&9*Y7E{?s+36ux!y{35ed6*qTK-rO!iW#30-X9 zbVsp4W>??QS+V?mXv=iN7{O>8hvy{TR=Y`_Rvg6n&Mb9GBKEw|4Q!0g<3!C^%ILK$ zVF`91IkgEl{x`VjxShX3-j+qwFXpwhh!nBMbuvZ@p-J2hG+MXh#wRfvgAr)S$8=#!FSV`E53{`W7;DUY1b z4j4`(ZqIH(3Q9@_uHsp}CSFBt`(7~=nQ}1?Dv%Q;c0(C61eihZn3?hM|1Vx7IpkK( z56x}oCfGOyI!h2`Sz6PMi*AS3apnJmPkl!eSZAkQrc5W;I)h#Gbz`t3R^nnp8&*y6 zH-GwnVx2&PsE)0i8-2qTpOfAvV2oP~EW8q{k@T&fDi3x42cL^U5qNWTR)TZ1Xi1Sr0W_GMUc zc~k#S>=y>?zrHtlU?{@h6h5Fh%KCN0(~8XAv#{l8F>wWZOi9KS*Zs=j$Z77dWq)>O zas*+2+Lh`V0*(rCDXCTKek>XG)>gAt*3BZzHSbqKWE?`C zWQc);PIhlU)H#n14>|pvi}9bFjz1Ub8zXkdt?t*i@yQDz_tT2k&)hd-ZbbGMVO!u% z0$S-+WlYUk*A2~->E=@4+_Y@y@8SHdd-$I|jyZ#L1|E>32Y0M!_B^pAuO0wBKWsVL z8D?qu>yb3TBY7ao*YmYlKT05pg{9j2bERgz5v8xIe-gY~xC=V{pBx==%oWr^o3A|J zUm#hcE-x8G-*$YQ>9KeWW6^?b74fn|PTY9&2Rg^e9ZND62h-|4|!B4hG=r#VU?Yq)IHi|uam{(_dx^-e#^ zOOV&L{r4q=OkFO1Q+lj#6KH+%(bCF7Tbns9HdnOYntq7M*NU_%MLcKV2|rL_@Z2oF zK#qTWUMh2FX1ljdCiI~MKx3AUGYPr;u5DIGY?C9RE9Uw9>hpPv>q&aLxZ9Wd zO2e{3f1XF$3SAW+`-3vm**v*f@HBJHXFC^1mIyN_P%oVa2__yZO98V9(BH6&p2m{~X}G`fVGxx|Q9SKWGNemYOL;LE zt#p)oRL7B}SyLGOY1A>Mmo?++hjeWB=K&eQ5F6FX802D^?{WBgK20#==4Sp2R?9!O zUGUk?!q%g@!B_XJkvM$>wp0_Sd7%dnZl(6fj<0ob;=~o6d4!cy+6{Zy8e!zdYPm)jY8_B}# zPoXWqGM6U_Rt>o-*cR|nub2sU;HO*HlV!${J)u5C)bcLRF7EGaP&-1q55fDN&Ka~8 z8c@BD!IYE)P&Gg5aB0$S1lXth5(?y*0=oqc4eP7UTO17P0ZbwVjz2O%V2?ot^3436 zArQFk2?4Y+47;dyHA^T11deyF$V^^I^29K}d?LX0JPy0)A!jx=Yl<1GE)#nHDTGkB zZ2vO8Cr+4=Tsi?0jRCu+y=>Y;69xz<0YJ_7l^!hO0>@tW4aD zJ?dmrm^EHUpRbqjf;w%Q(5LlHJCVX;_;cDdh-JWnUn%+}%=M)Ddzfzwp(`o#eLa~S z%z{NtuKW;ZVfw_C;~ya|&jQfbO+qbA!cOYbh+EOwI*AoQ&bxY;55YLHir4?{`wJo^ zg$r_WZM=I&97kDydd@Z72;8Kj_Lw{iU1|hxz{_qSDIhes7F~a4*S^{#96EBemaM~6tYrLHAlBb&O_Z&dnzrf zV0lxpMB(I%?d0oQS9F)``;1mE$7QYF2x={@XF7%GuPi0h5y{Rms08Q|_ZLzB%VD=Y z$$)SHsy?m*tHm}iRJL#cnx#a$wfsP1zT<63v+gIKIxaOkNZC#jr9bZ6=p?SVA+}^GIqx(S)@~JTTQH8gP^Pe+DMrd`H((rR6hU)-_&=ZUql#$2 zC?-Bhm)Mp@RX*7<69m$etMVMK*i$>-z%Gwieyk&2WLTC!fRwa|p_B>Qx)eel67=d64ZbfHbSLVS{soiJ6O^xVJncz>xD-k0-C>-mlo<2}kf zr)Gy)THj`ez84~ABIm@+7?~0QQj2W?##)~bG2Jj|+}Z=txHrTZTqlHX6Kkx{om|Sn z6s^h9i?ZlV7+zqyfQ^D1U(2PzXzT^wkj+(Jp|PL-Foe^vNzv}zZYk*q&a3a4ZP2Xr zQq6&cLY?WemRiT}Zm@_g+>sv5Z|-jEbCX}z)UUl1H_{l(*dc8|~cB||TL9@hwJNKLF#Mg8$2l74!mcp6^c z&icIn(UJb=8sgytkBl_qR^v|>qMFvr&Hk_AUt(y*Z$_mGx=P?sR)pFZVjlg#Llu%{ zDz)D9t?f=beSdILdhq-JG1p=dZq|HTsozk)d)c`s;obH@*z?!4bghK5vY6ksLfxAw z%`v(-c?7%${=UWL`ucZj_KqL83<-z|&TOuMVNRLCUg*{XUv3pn$fsc+QB{SG!-x+){N1dZh6ErYll8&x5UBGzFx|4nWn6JuOH04x(xHrsEDH1&i+@& z*7^-U9jrT-I%B?7nb_ZGb*E0(Ju`YWUap@jB8alBwOTVzb4 zVodN7{spKan|d654H3pHX!9KtpDPY9If;-qDmGk=Rw^pm;gu?AQnCp9iCnB63+ICd z;01`4*W1s^^~7C4S^ZVRdDe@hM^S~Mtl+k}9ii|c!Vs*O$FBnNl^a9~J$^ETp0@dT zB0`EnEFW+(TR0Soq9mME#r%KsJ;t>kgfHY}*`BIX2q%ps{v4%J`)LqJe7(ddpjz>| zNuW9}%Ka``$jQJ#80?^N_T^ zoqg8|b3ATdVw=mum6zvWaX7AB$Vs|w6JEIm?xUdjj0^{uJ2r`oj%wFiX^T-S|?l;D?@qI)qO)-tlP z>hR42`>2t$hVQkC-d@RA9vk<9mAoDLJbl4!Qi$c8g2pL2rnMAAG^u%&K&1u+oXVe0KRHI%=w^T5T|8Qt$6Bvl0 z+u*QVQa>(u%5&~n&%MDxMmAkOUkuF#S8OAt74L&3DoTU}PNzJuzygNuNRV zoQ~|9dO!EX>;stszcv+;-Oi7?;-s04Dy~0QPUh+HobN;kjr{!ld=eFN{jON#%?e$3 zyZgNph=}K&Vz@99LsZ*BdzS5Dsdoh3i{azeJ_ZT6k>%}lK>OE12fh6ArxbLKBCw;+ zN>}Dk&P0Lt;9-~VLW1vGbi7-uq+cHaver1GO-_TC>s|fMP@ZjhToRw|6w!1W{Y{xq z_PpC(a}7?P01A=ACjj`RS0}!04bbs>TL2y&orfpFzVDaXr|}@O;-#AurdX@cm-DET z>v|B`S%1`n_!*_IxX!OH{moC&yLrpSv5O{I^5zt!WGBxTo7)v-5gWx)}&6b>7qNo2>51h8zn4E?4^Q|Ha? z#g*6fGCBpFpz9R#Gnu2QZ(eoVZ0-4e^$~q}*&(oa;^|pAPg8j%LmuQ_n8<2g0IA=y z&Jd{31XfBw^6hy#2k4JhrhDccMCJ5`k}KU8QBT$K8<9&DkstY-PIF#Yu9}V6>s4a7 z0nDYilB+OazOm0V=zN2l3vuDBJ?UYM7Wk3f_r=h6m<8#zcS6W;O=Z~5R=o8)lB!bG zdNf=4P$!C`pb*o6{Xd$TtSGfR-;e73xcZ7?o6Bk z2E^!>^=)X=>%g9zUYXavzd`l1S+V%aaR*w=0T>>^NW9+>(&w-n-!hL~0h=HS=eNG+ z%#V_{D&--_*{?ZMPmuTqwISpQEAEj$&4ZhdHt8zu2-xlWRqTt|4Qe%S_s)D&>LLp~ zR!)U}eduALcU}a2Mbv-?9-Me>=VCx)BuNjozZjLRH9km5f6p}2@m#Ypx1LiTufgBP zYZ`SPOmr~@zt+BWoWQpTa=c^!ztBBRgbA(%y`QB%KA1K9CWh}f z?WV-ev76LjeGF)Ae~~V+HmEi`WxslK8u3;l2z6!!7uO3S+zQXx6qJI8s1U0^B}8hi zzIpfsTi@zvgI77`3ePj-)_t|l3L+TV2@B%?AzN|EWWt6 zB=PRwlx1usbJUuZ*amel#5kp3Gsmc9`N;~y$bcu=Rz8kv#J;D$cePHL3dhFPQcsAe ztg`6DElkL>7d*Le8%I)xubJDo{I!3fX;yh0G{U;+@v_`B8ht_vt{e2Y{JNd{a;&blepY21 zd-i?-mb*e_*V+35`6q@`g`QCtIG0Zmxj8}UG79Zx!KWI5pqQ16<@^gArd;!p6_D8F zM*Lb2Z`d+GTho$+3`cb)g6C+$m8+|2o z7M0U$#xmfkn+FYiN!}%f$+`EQy%scs5|lJ@Z7)~pIEPJbaxn#f#lt7=iBLGrzm2LJ zH`8;HZYCm}@}-ei+hQIG=QJ{L06R-AC*2U&-0sbODtj{Fg8sg|{)ixRf4=Tv5aN|E z3;}mAKxF?609Y+e>OWX*nuOnTgVA@LpZO<-GC)lk^Q<7DHYvAK%OM?`o$nXnnTGZ5 zHYZ78+tW6LYwZuP>wlmKr;ok?*hDg%Mg77c{XF}%ow-v9m!Ydvfd6!(La>KA=cN^N zAdB8~;tES`nBp3Vy|s8c>~75b%@1=&PAs;1DIm@7USnJ%4>( zmL;m(q5G|&mb>%AZ#NA8r9KiUL4i5yPq*dN*HtUHygCZcQbCN+l3ZE(!kTAmTK2=a zgyfuGei+8W+j8<;W2Jr_7=z%*M$k z$8Nc-S;oZ#0;mC|{k6Y{cPV-{-ET_G&nG0<||!g-)nwxIn&{^?#M*@N`N z`yam;ir6(&1!#ZKD?$mYUTF5G%*Q{y)+2x@H#jZ-ur;f`}60jG2$1*E_R3%w_6`4cdS7D;Ci~hpn!}HF^nyjXYV^-P~-q8D3zizxv z*mW}`6gn^VhMa0OK7Amn7*{{eTOXXsv$$<{?@-xr<25?DW6G=>4SD@kM}_6})FX(l zJu{iv%?EZFfiMQ6iDIKHVw%h98rVzr+49+YZxXNB;>muOID%-)Ac=C}1J6XI7MERx z6>!3dWZs|k$JR4yhVE=#58|3JI)}2!`QHP{OmKfA1sBXKk{5aQ@3MZ2`y@xK5>IXh zqnN-smR9W#$D+{w5mb-KrY7QbFcn(itz~q=_>pWq#9c|#TYk=zizq>-kKqL89%e1% zxVK!G!bQ~QIJIgqt)reazN0V7M_(!;1tFuW-6M8!#Z>oKKFd#v0fY$TAQoMsBP2 zoobrY`^335XSM0hoE=Gm?Z7_1cVK}KqegD>lxF9kO~%e_AH!*AQOYv2<(R___uzf4 z44!_e`Gtux^o6PQ&lnix{^7}z7+-igi3lg6^Xzxd6lm^Eq!r)UeuD5>bSXPlOYyu? z;580pap}=yu94VeLII{`xhls7*EEUs`tvtpBA!tN?i~6t%zRD`6-IeNaT&S_>d&NF zG4t;9U~y`FtOsAWSEo@I)9sCKXjRNbY~7~bvK!)-uvbT7SDqb|Opc`4q#bF+X-b7~ zUvW*4DLiiTKZ_ZZKZ>n3mk0G(_N=AI^Hwnb#lQ79c$Z-)seYN7>+K>b1Do%;?aAZj zQ_)y>Fs6sxNzANrZ8r^2T?f)SmtN=y}b-UUr2v&(;Yg}g4Sn$<#@$Y_|jd9(QDz+@`$9yMoKj`o+l zNK8R~$c3ZdK6)S&^@Co*Tg>vO16}OHtkc2CvSy|r5NCN!#CI>#;C7Ou`{g$e(;V-U z2`ZbP=4?BuaZ1Tm$|qP$jk`VggXz3CR~G zjQIvi-+wLjx$63W~OHZ=| zCJtv@%=F7+1Wq#Q7s!ZcX|uA=bbDy{$LY4g=KQfzj6-hi9B4B zdn_C1-k!k|EKu$E=4$#4MV{p~u4RJ!Wwm`TvS;6kQ2 z2tpa;>*sjbO@sI?vapD{tVHAH_(Z60Bt;%`)#xQr9wQD z3xWTexQmua&fJ_1nXQENb38B@Zd-Qgv0DuDH-DzDB=Pq4J+WqLdC)R5IGP#44CTF- z@q5cW)N;nJce8Xv(h^Y!qzw`)#8&%Of@ELSiV)xW#Fv8Yw}puWl!~soG@L?AU5^j( zN*Yqg`d)0wQ}nfvXF3Njfxss_lSG|~gpP`wg80xU57`DD-TW%Wi(}^U9*mV)XC}~1 zI>WfRD%GRXN_o1*XS=1dhDU0yFv(oNTRmHYqE_DP#BHSBWL9!S3(383vxA#n3RQkJ zHTUC*f5;>TD<)CKO`I#Uoh)@tPRs_ITOt?i6&GO5@qG=@#mU`$n8b4KlY2slH*6drA{)E=Susg-tuIFA*Zttb+g+#j=^l%?^U#DIL{%S( zF5}3joaDQT5Yf-`m`y+lO!<&pDKRM$b6F`pAJV){(ig+%*h}g*msY^hLJsLg&g3bN zzeWK7DgpZgpv)smLzbKKD6#F-H_1#c0=AOsGB(7vWeuCnbsGJ|>2Iwzv0hgcUEFRw z*LicW@J(?lG(f>SUC26{c&-*gk^ab?`_m7UXvj-Z#jG--W?1C!G-R{F8{9jO(`GVUnX)oA%%hXw2n#S3o7A75GwbI*AQ&LK7*<*SGOt zXpTbWcwkvYBh`FnGXnn}Mt_MnWrIQmYfu1{4ItqgnNa8*ZQVsd&2g$WxWZt?h z()XezHIK02!hx-L+*TJs2ltqb;1@F5!{+vG0X2r!!l7$Ew)#V^h|+<+8wfx24- zTC}(u6-ARUZrpkJ;Kbn@BC#>adS>=gU?!66UvnI?G9C4JD_YQ^fGwvG{{3Mu#E1=Q z3)hQAzcDrP$~c5T#`)#_*E6B~j;2b!M~i7t8o|*FUcz~i9H|?jZp%rOz8BMW@hhrV zZIn4$Hj1r59XUi8VY}j9;+}~=%Jv!$r!t9oy3)l8(6$aiD-+Sgks1)%nd7bxV5K3d z5ltQ87XuS3^&A2(hv&bKg1rvr6622f+`sFyHMqMRkv~ILu^le8d`b*5U-RkPYnuB4 ztl7{ob{|ga>ZP zS)*LW)cc80ioQbow_Rhvupo%*7+I?=8|{-4S$*8|8q+naIyWz+AViD#5`+c3%6t%6 z`NVDqw^#x&doqhyzmggEH}V1)F)nsdyb}p`Rvj`5RoEvmm=}Sp8(ji*q}6PxwY2AQ zpRa#9gt!DQ^a8S_0y)Rw;#J7OW~_e!%<^RJWX^Wm#P$&tKWs+ToP}C|CojLx%DLk> zhx3Uot36K!a(6~zg70S^`!F4-v&5)PcrWSqF@TKp4OWIPmK&w)G~m5tMy+qd6;d;f zRp?)W3D{HVR;#)_C9}eUL1Nt12OFpR3hGMwo{FN~$8JWJ^ED}#6DM>Yt@Z7oc;>d1 zTrXrT?IkI{1VrJ=q^q%@n;t;L1w2%&o0dPp3A+K_4Hy5BNxOWSR}USo1}~E%DoSKw z7mp-vcOFtAlb8}~=_|eO5J$LFcS1@f zukF!C7lJ+vs#yhu#MKISi2}-Jc6M{?u^70HIZA;b185@YdHc7m4 zAjtF~r+BwH4KC%>CWT#%RCv;xik2e{r%6z9uv2khrPl&@BaJL@KnsiG522)_}N(h>d#;^2GH8j2dcYbHYhaE&7ws4(xx+&t! z6Oe&|IYw0j$mJ_0q!dG%8OQxXbtV-B72I+vGHw9REcrFR=f7E>xT+9w3DR(R2;P_w zjB_{k!`_VJVZ-RD?x{Sn-^WF>I-u?7+Cu+SlV(-by<2 zpmT0)lAmv|nN;_1DPFzW|E7kMaF1|epOBXCodBCcifRaM#&=`#olUZm`<-$tD}hcv zeKA;pV_|W>qxuRrIr+83=*bmDvFKu?Ts}lJrzYiI%WY5SRH87K;|gnBj>d#n=x&UP zqGv4w@6=R|Y?g)YpVpXkcmg zBR#%4qR&kLHD9Wxpcu;%QCXi)oxoSfdDkq(>Gpd9Fk2mGi5kymJUK&&E6{ah$@DpHbqvx6M5tlom_H_#EAtQS;1TRml_K!E| z{I%MI>l~IQoaAGE7??D9?#{R1`Nw{Mp#NYL6o>>?Hv|=C-Sh+KE&nYZCZ2Q`APB#% zV774>8soOH`xJ(6*vU*IH=8Wf#ypnmP^kB@6jZ&(KKAs+S#E{vWbgWYr0wOfkno6h zt3<|Fmwn^7$y`!5IJLf7L}(gW3&$DxY#=EUHr0c!pBOv4TzXs)8NgW} zW^o`r;uL~y^>9Q8Ek%~81AL^NRiy8cKBK)SY`0_Yb%Rvh3U%#iH+{W&ucCWANXslc z&C3A#^A&B??eQ7`8#e17m?G3#Rv;-PHEi_uTKHCQ5$gHt0*F=_wtt7D=s{&HFB_um ztXyF*Z$g<=rLFF;N66;Ui=kH*b+HmYoIAg9$IW>II?0t?`%x-k1)Bnl??hWzmx_XG z^%j`9vV`2ABk6Sd_oNs7beJG(Gsd@LTNEea(rM$L6DNq|+ayRgL^~^?T!pYP_fwpp zVb<$${>9+Ye=;$iP-$3W_S_WDlclu1e#yszr*&1vI!)@9377|MmG2KpPma7U@yvJU zYlE-L|D>^yD6OLgcNkrItN+PZ>O}P8~cVWs-yAoX>xeox}-nnO?Rnz2dhwc?9kSf!id+ zy@P!*N+Ph>a5nuD4lGKY5XmtTaQb)K+x&A>>N=U&to8y*XtX@L4NmspNA^$YZ4;?s z8g-(dRd?{Q&`pYF2soh8JdMU`h@oQR9WCBue_j4%xv>PI(~Vht5%$2^8um-1Uf1H5 z!#Q;QOL$^?QMF3R2N}KH=?3a@3S9E?6`0hnS_oh)x4^H{$g`{dUrCr(KxDJVw$;*^@~}a7{sLp!rG~YFBw62<#pZ%r3gduIzBI*HLfsi2sO zDQKm~fdY2N7HFk-C_<+FJvJ)~SSo$y4RuhW+i-G*nqF66K6yJ^poLIVKw7E8Bh%SFyp5%ZWTer)+K>;0wEaV;51r$M~hAV zC~pcM*Y@$^H$wSX2G)}j?jVgllh1*)%9*tmOkN41<0O@>Y0IavIkF*P5UcT zKjFHy`=f-t@=Ac%yVAs=&;y4xZ_Kq2S)Vfk&g@)G11~ z=B$;Fq?`Lr@CTFW^>2I$`_^l@wmB{ATrfCQ}gwk{UktbR+YwVtzO@(#TJj=nSsIP;n76d8 z(O7e1P_WxqMmF6Lo<8{%Z6baH)(T2wT2g%;z}K#$ntJ(uvvw)OLa!K`*fpF@;npVk zVN`$wTojiDR@vOM<2w?%%K8VAf}iyfpdK5)ToYXt3V2ACxot)=yex5}dRBKCx<&$8 z`>D>T6t!|}pq1IwwzubpjXZx4-jgH8u zcTFklI#5A)VcW&%VVdV=%ShJ#IiS^ZbW|axNL>jD8-IxH{l2;hcuR!3$BLTI$ z1Bpd!HZ?f-(C7zKTRE5EBSDb`K6f+7M@MRGK#MzFpI=?}G0UV~(T0`Ya>I7%OKkwC z2?^~Rs}dac=N^^medUg;Wi>wSJJ% z^7|WU%`-N2CUxcjSQZM9I9DZheK?9YXVpaeT`0U~SUlj=e7Z`ti9=^XDot{<)M!Bf zjr&#SZ>`idT})PRIv}kH?lOOYp0+mL5x!gf6QxRnxCWcY1g!_TE5$limQao4l%@21 zco1gC<>-)r!003h4d-H*3tUNh7H-=+xJ8~R>HJ$=4Ua&Q3#^KoFA9YT-kJno3w5$- zT_*2LS1?Lm-`-xGrOZ7Cl@a-r!D(Mqc#)>TZcYT>3%H*kwUo16Bk{3WGiqzNZZh>} ztHQ5T)a|e|A_oP)lCzTM?*@5j=z&pXlgjWl65^Zdc1Ra*o8RGN0T5O7IHTrKR^V3o zEUz9~T#fTH-YAhvqT}6GF6Vn*;eB}GTz1oiln~q9;x;Q;U9v;LL^JZt!4y^-QdEIr z@H-}j%HTnSyHYo<>d-VR8Q(uqqbeGnDWyOej9n@n8uKQ9HJVQFn8?c0OZ>-j#`r>4 z@)#`>%r_dVFaSW|bm7#8!3&@3=!NbEA$ayluvuA!1m5_BbqqPhecdWVgOLIog2X

K44c3Q00aF`?4*b$AIfN5145lzkQ z&9!mnby0?J3x?b7E${D~MUxTBWS_`b1e{*g6ucQsW|PM5vjtHK$Z5OM$>*NFp)H9P zmnrZ?yxR-$X8s-H0ik8-pHYevbp><6Q>SH`{uM0_pTXI)X5GxkqotfLcUhTcTH`DV zqye-OeQHnxGj`SRFwEA)(EZaGz+JcL8zhD%vWFEQvFK3g8qn39wUQ8;qg!vrAMFrYD@a;+&SLPVKspK(NJEU%X2k~mF=7yYRwtr; zm4+GKhw0s&IXYrQ!41$pEuu{AieMYi)$)U)S`P^3R@CqiBn{}0#l=H!TexA=L zgFLR@9T1&Xw6BOItgKr*F0__>cIwEX>m+b&^lx{#ay*%LlAkancS7;S#USd2%49U( zth@idA>w}c`b7a+q#(}o>x>pffYS|JBtrBSZ?iVX_ETfImD3t$`Z1q zTGIfDdG0M;5e@i&ruYl$uv@SA6$?hyweO6fXHaukgWy$>ymNEFZ6LZY?CLX6@IWms zW({DYqnp0Zon+j1{Py*!7cbX0gU0Y^x|)nN+Yet8fZas`ERm-Nvm?VXCyOAzN!A_G zxTxMI-vmwJZ($H@*5g-N4?EdFoovX~M-sXoCP8$oOxSTiu}dnB3KNlk@v2nbz*R>( ziPAdd&D?c!=B1uS4SKG$_``$Dd!N zsB4vKs{fHavjWAKr&8nmB;?)6V`;>H-RoO-;-st79f-wuO#-N6j~=fWycLJd*AW@} z$x#EgmaB7f)XniGt3&X&Fn#1#_D2<&Z~pfs4tBmCY0x-HB|I4r^KHeobI5&mdj0;D z%MR2W69QSS+C-q}YK#qnW*`=)pZZl^bE-*-{n(Y+>`U(+p)b5ZFZtuW8F5`|mcY&z z=<_KzRHERE#b4VHMAl(!aWA^Mod3hq|GB#i5Ov6jNY#@I>L<(9 zq|1zZ8qZirGws*ZGUB{e9dw_o9V6lmg-jkEM^cn8-+Mg@q zG%#$V(twmain{Ie`>a*AiJa6C1{GXa`Np9WZ+_DD!E=lmv3Rpc+|XUax8SJh#G@1j zY*x1yo(VY%zZ+eSwCBcVOR^r~;3w#qCj8_HQlIwum~`+no)d{>peqc7mN z7fxkC5zmDq6T@D=dY{BmQaXA`NcdOBEwCxBv|ojhwbje}gmUtWLs<`snX`y3Ce3Zr zu2LC4v1&DtDPylI4Z)l9$D6O#3lxlq=36fc|CK+frO+&?R%BCvmnb(_Blvg2&b#nN zR5jj|7SFd@X3Tx{Xo>Op)*)CoM_%b3Hx_WaW#@0AQ(`8|Z)@=toArxR7*-Fi+xSmP z&V`8k>N+7NtP= z+VVIMF~qYEJBs15zls8zGBKdQ(*LUk9Gfk@=EQS0ap+37j3iSZ>H_fz5!YOEL9bXkd^gelvqMK z<|JJsQ5Z}lp#9pm>&iT#Kn>gt4#>!O1SZ54*h$~(noX9d9AQD+0yC{AFvv-AIGw~R z3W@QgRRizXFoM_7D>p7S%LnqpMsy~=r{j4W>8W#RUkr2ac4kE3^3XY6bk5*IBX_Kp z9yV`~937;_UwSs86^VG*MD-rEu^VG*laBGG$!zqz01_ssqJ*O;BzG3RxipK@G`9zIie zjPV%Et@^TD69`b>%vk79XZ(42aS(aFF11C9TF^mg{A6H${iqE>Yy?%KrXnP&Ze%HN zK#YMC&*&2dh;b<*U=Cf(=T@>VvY#)F^9ID1Nkcg=FX;Q2uBD?7*a!h&Xgb8N7xhu* zp5^d;-iPrKO177<^7-Au{1Si=mpWyktyRwAy?Uw@`Vp3ON3r8 zUWcLwpIU|}%7#QkGltt_2;Jo>i}*Vx){B`HXwcxtMF5vN%G>&JR3 zc+Sq^m@#XyXuk(gSu_ISb7sG44C(0%2#Jo|1s(hPP-R#nv(&jN#_)bki-=?qB9CXq1w3$8$E7ouQ%f}3{{MGmOWnq0ml@E}`C+S}1QCC{Mq#GW35Pu-bqC|re=cf0~_?z4bcB)%9gr>*2+jQ~~4YAtH<&0AL zJ1Fe1gS{|azuGAu-Dn^ChG4dzv(s+qjzBGbQ9Ohk*OG1$%-Ge9=`#ZG-G6M`dR>Wh8Amt;YeD$5|b2jb0jRxi3*4-cO&gTNI<}TXP(ignoT;yN7tv7oB<&Q zmb9aFmv8-PCvS?iZK$bB&kwiCw{eLW#%w3W5}Z5iOz|AV;cG!@P-g0k=amVPapz83 z9#(U+DqtLYW5o=V+NQ)m3Pi+szr zNWsxbyp%s8zT6!Q*HBqn9E1Me4VTQHRaSi(ntCNxV6Ou$m{%~?v$SL_?rxURdJa~- zLwG728tN(8<6CS;qjh_BVVyh975ct^IJ1Hmn=?3Fe(X{izX z;8-cfY}U@fZ|eJaL-vf&?q#U!!h2P~DaPOcU+>4_Gt!jTff|!5i5PtI{;GSWHO)U5 zH)gAC;NjJyd|U(dNYyJo62L#$2NAYzWwX4I*~T9V>#j^HonF=z-Bw6+6fHtWvy49S zO7$VQfojAYouwqySZXUlcAmLEQGztM9dUi?dHkL6_1h7P&w*EMu5q?9(zttPl1Sc) z*~1fcDHToEHQF%+)^_zYVQt?~h`~m~#gWM3C!O}WHY?$cU!!L)67BKY_bZxTHO^!& z3Jj!1z`6p|DKpgK!T|Q?6hzT%>-S#b`5FfVz4m4&2L~bug@I*d zTbd_f(H%sUoJ&nwh*wsvGN>hUt}YOuRB2H=7_fc6e1*u_kaVAp&V9^5G8&dwzMUa8 z$4G^(?DV09bzS~gl*)C^O394Rio?{{pO`oj7Uve?bHuH*#V89@6K!!|5?1drt9T%8F>MbE>+X!$5VD&sShpQ$R*5gUG;9vPCd)PPC@Y& zItEk|t{G8q`=drL{Jbo^bLRc=Sl@JgW%|>9LnBVBSuqf@S(n}YmGyP&vAv7NYN?G$ zdY(BVwSfqh*#YDNGK+L$WAr*?wev7N^AAwL(@{@+qW4HVA*lwSz<_1(Lnt$T|7=xG z|Cr!W5(Cyp+-%!zbj-UPiCsQp5lzz}G>Rz=B2sqY7uf%^wXXjJvt>e}Ww&6eN-(RJqa(dKk-?d4qEq~d1ZjN)dHh{+4`tsm}3_U;|zJrcer z_8f>?_8NL9vkmM2Z1PJl z&@1aQJS#}qr=JH-@j;NOMZAu9r~h*OA{G5oo#YH|B|dep!}?P9GY_bU>H<5bG*mcb zQbLRLeT+9Mi7V-`T^`8*%!wc@`Qn77M6A{x!R7JD+G&$n%Jl=p_X$eIF&%B45deC< z+id<2QqY5HkjYbOtg;$;MEwW$>mobQKQ~b1-G`M7rs&&w4JI-8(q=Y5;<6jZaiOz?Xfk-D7uhL0@mctz^9eh*LielBvv3{%b>^I zEFf%y#Cf8Voop`>$do)hBi~o0W{ET5e+W}Z^I9c!1d28s+fS^Ye^h`(i~n@B1&H@b zx(JQvT>|mRjE8o?lW+qt8iJZ=kqU^$eg(o4tT*q4E51zwNR)mm_XStQ)3H#CU8uE=~5yLrJ)uCip8K)7R}5z0$#u8 zr;hMexX7S*ZydUkW4qJT+g*dM^3(}tDXr-i%FFfh#l#;<nnyl^p zRR*NfM!NY7TO&%iH+24nCg{S7ZFa{9EN$cRN$x*Fyr0C`Ph_#SE&6TT{M{;Y{TFvx z(GL-V#;slFeiZ%Z>KCPYy$2!PzZywtv`~^)bXrlip#d*DI| z&0Kx-khWA zM7ssv+EWlxS1M|I;9tb|qX%dMr@8`@sISyG{j9Tjk#Y2&#lTt0dU$#VLhe*L$~^Gh z`3}g^@nUAK+Xzds(^^$|nB7oVp)^5-_~PxXTT_$o1_;O>?HnV zE|XQhN4mZCdo9_nZGLDarO>dU;#k&&Br5fk7T=8pR|c3@LKMZWFJod#PL#MJ)0^nq zsk7RpLGN63UuhUlmOE}If??2?b>?3r>CE~o00#gsIAls4RcaV6KYX&XNj>%qQoDY^ zIVm{)jyn^dW9EQ>=;tCNgjQ!j6lsBuke>x~+LFu`BBRlYOHlaAqQ8`}g!g$EboAT% zICL3(ya0KuJd(Y?HJDv{>>vuG~vBys- z0{(iJTzZI{zUZM}Z={xw=WYpXOYW0s>2@}FfWvGoo_`6-jKSdhx?3=Bz0_D#S*E`1 zFxB?=$7IUsLUNCtjBg?Y!erm%>7Lx3L#MNw3)RGh|_X*0hMPjA2L$njUpS1xp zW7z5VoqVY`42&+Jmbm+n=pAW^ttwU~$?yFar>^-%QwcIVgMf{>q?*K~FB?NA35m=p zVcNVy(OEH0P7Kz_=X-NF2cHk zUff)syzn9GmjOi#9c}-Bb4fy4xb^F_B)+(2sfytjfdoyKDeo4#${{+oy2rl&Q8az* zVZRdeKYL12|2N}PB)Q7&B}ISU>tWF;>q{4c`(|5dkxT<=)$1ca^faa-@H)U+3nQ@k zpRH|t1@0Iv0yM1>Y3f{Vw&&+k$dScXO`3rZWdUcZr3wwL(ke)fy2#-`^x#77b9#bC zwI>L*?>G*Z(;CRBKYxf6V3F|o`q!-H)o{6Xz%K%NS(-^v2U{m(V(dQt>V^zX_#CJ2 zNGfPJ)Lw^hL&DHX-&4cK9=OeQKdT(}0e;y->!A|%N{R-U5+RtoYhvG@ywEG3dv5|Y*lZ+mJ??B zK6Sp`jzzgKc#p70xlIQzu&%+IEKFR5{b1=O+4!{QX!fFsv<`3|zWv_AlrAp2kIMV2 zMlMRZ!8$vpB^a{;sZTq4{NV~m015boSd_?h-MYYEUEn963#2;!5cLv12=!_g&G^Rz zVCf{*BB|u*^`R(q%|DkvbSl;@%fGU?_Pf5k-TFp$k-SQ1ItW_VMxN3l>^z(M;Vvga z%+pUlX-^}(jO~-uzrg{k{P-h68anz1QIo?9!{lcN?k8NK+ztkSNf6Jo$Dst{l>*ZC zUEzD%Wa$g-3JS(=*m$tSxZ`X=s*K5>4}6>vi0aZy{p(3C*5^G=y36ZS@okGK#(2gOlEtEXMeS^m~k779_c$FQo`@COTs1pq&CN<{$oKc9Z5*b1^ zT`aqp6UVDIKV8Q&?`Gf`b@e3pLjNN%KR&{dUp=$>itT^Q&% zf8AeiOxC$8`9c-SuAoZ?#qGrRBqAVmVG^&=PAb)zpe8J*-m}~&Z}o_}X(%zNEoqV~ zX?LQ;DdX2c=@Y#KLbH+bs|ST1YtNH(T77DdxF;qGnC}qkXe7Od=^M;QYfZk)9`Gs* zR`N*3tWL30tus&sy2vpTaBYdTqcz*p1Hni#LP6-6lGWV`Zo|5oWIFs}s6)kBavTO# zP-(2dL?v)PL5Co<{^zd^PP373%%*tG=-6tSm(twIQR}Kl>`dpl4bBi;MxFA`nm(jm zKTc1}ZDndmI~vkUMm+}he(2oRvh{2BpovAM`Mn3^x3WQP(1#|c2>xFD9iRjCW^cmE zxPCWtd`!p@{qlrQ;|1Y9vNtNF_|NPo+in1a^B{g0Qy?5Ya{G+k`hg^y=i{$=6g~0( z={;F5gyM4?tow?2Tg*Q=BO;8qu??kNodW8Tah6q847?7MD}_&l)6Kq{Jp1$AQIJB$^Wy1?-~1syBaQ`o}^%w0R@gcAA8{&?%zUB_GTgY4>Onl|?qOX!Q2OLa?|eh!3(<+}@xT1W;pwy+1mJ5H2kstchWalOb@^ZK%L&-o~c7DGa!g0=v9knQn`KC?7l{SBPJ)mbg34BoAuP<}`w2o!;f)W)?!cC1j3<@DfXSBBD zZt;)-Q>PGbUL9zspl4cOn!gUaedwd1Bpsz#7N8f^{s*gq8!!7(fr%ne74P0!^usoZ z^In4j9>yMArIk)Gcabvxy%-q-yg}k^*|>N|cAyAAx?MwxK1p3fmo3W7o2+Oi0%a&h zKq`_Gvqv!fiYP7W_i2`E7WRnDP0ms_GY$+wb>uCW{lRg6oV*<7gMpO1UGmRdkC$lHMhdd=v6k<`7)%-h{=Q8AIqpwGD8=-;Szzc5e zFCz&xkP z=r0SCgOE#S^hejXmSfDb;?(?C_|w$g9h0$bLG;dVkQOcI+BBAy3*^#2eo^SJ{){h` zHlyt>4z+LwEfWEQV>}Kw3kFJ^JK@#y#n8bUC=NTc=$E-Qd z#bx?MG&_^c9NZ-!LxCVU_LMV+ zlC{U?T0A-Tzfr*-LCGP8K?OQpYaJ8;uF76OUwfcaYaffu7sQ1szgU|=y&Fa82b zG6wpyE-#xu3LAU`f@EV%cp|3RE@HrFg}agwPdKp!A;>>R+DU)46H$|0+^n zMPpO_M5ZkY8@MINT4EgWDSH2Vj4`7dEhn`$jrlq0McYRua~LEjZTIKP(${gmI=t4} zt!?sQcSSDLY8q%cT33GSaVM%s2%5daIS!+X_ z*p(z;GFzuPU2CoxVVn0~7}gAqW@;&MX{ta`!pHUc|!{u5z_w?*1i#Q4)n;VPnp+|;SkddFw%h^BAoQeM%r_G@*GVDft*!ZaQ#HSavpw|9K=7-I7b2*rj!>23=bgzmYE`b$a9dlkkk#<@g$ z9~4g((O7IsT_3v+HO<{|jx)PI!LWML@WIE>paSpH>svcCL93RZbFPZ$3H~$`)Nrbq zxHXN-hC^D9erz*+tIZMpJAyt|m8YM%r@QC;N`1@}n{ahA9Ez#=yq?g!8A3S{&eD>; z&utnTfUmWZGd(+;b1luCtI8k}9tAL?K6uME9mNb=O6H?!p({PUZ$>OGff2*;-#en$ z_bWO|`3H7nmeBN0W`z?c<3qYPal~pL+sHk!)x2%0zjzZLEFb&+K@&IC3HGS{+oG1@ z!gaEI031^9_S~P%%Um|Bb#gnPfg52WvDqV~8E^nUG8|;8PydZu^#N>FgGyWm6Z=jY z*mIwIrU;*6)2jGZ1$j?H0#*rgo7M=<|atYZBVmjY7WS>fiGO{0*LrWazomZSK`2; zhyuQ+=fhIoS0K)S#-`CK#0P0V z|8)?FMn+-q@5r2smXn&SKJU3b$YogHJ7iswA7aSaWIX@AfT0!=spaGd8xWB(a>!$a zJ!n%o_z3&EAmY&=L__8I=HH539LsvJ)!lQX_Oe}Me|I;`PgM1;)f&vTp(&Z-h))k% zS;qj!fcX3HZ!=ewUvi1k*7-| zXI%nH5&~B%Or9+9+-4*Rg7vXj)6W}>Xnv#=kT4zZx&sL?&0NWd%Dqz}J&~NlP|pR| zauB7I&!=)F@MZR}0a7ST#ptF&o#`f9Y3p^FBZI|mVCP#u~KV^v3GQJ7ejM`;5eHvWMLpBSV7S9d*3ML zcwfY_NNJ|y}bL*PRVUN1iL&=@5kM5JsAKRG`JVhUSw z#U6d15W^(*9MrR)PA#H&rDj(98r))00(nl-R)w$Vcmnk#@?Yc8b%U=c92t9<=Y>bH z0nk!tbuTIW*2(t`t%Wd#&tLh$Kk~9UWv`gezXM#ME50Ho&-CTD?gs)N>N3*jR8oE3 zOYW2c|0ho-79$hYUq5GhZC$-0p2yOyK)8C_oo|HuxuzkRz)e3RgFMnu@Xxcw+ir=) zY67;R<_C%P1tz6&SvKznLj_{~sw#=YX(IKDW_|MYurBn34@J} zh57fxyhGYGF=+F&y0D+2C@-2_dD;-2?m{nsjI2pgqG;KSE}PN{FdB;@0B zSi&B8bAEkAz@~D^$j*_)+R4eGks|nyUGuh*GF6RBS}j{t4k(gNy;73Mbfnqn<*H0n z1P;Ur8EKYHBi0b@J?bF{>a?!t|93j$- z!C-eTj+bU#VSC#u;eo!rpQt*CiRER$B%o(6Z^*(9=yjitzMVYd#xL^FNPzxV$cety z6F8RDEPdm!Q1V4@q0i|*gtQKMX$lV&!PS3|Il=Q$w+cl3&W$$N5n?*2XoTni;wQ4?LB)po@94<{6aTf=%FyG`@q^JM;%k=SlBk5~&wEp=>iI1+9 z>-8~4>EOYqf75_>pUHGYf$tWQnXM3SFDBEr!>*+aybeO9Ht~sW0Y7jBHTiN`(~$oM DOcpoM From 00315c745647972497bf319e7aef1bb774c81a1e Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Mon, 9 Feb 2026 15:06:01 -0800 Subject: [PATCH 09/14] Updating references to images --- .../metrics/log_metrics_over_time.ipynb | 24 +++++++++---------- .../custom_tests/implement_custom_tests.ipynb | 18 +++++++------- .../integrate_external_test_providers.ipynb | 4 ++-- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/notebooks/how_to/metrics/log_metrics_over_time.ipynb b/notebooks/how_to/metrics/log_metrics_over_time.ipynb index 3f843b5ee..4b5c66dc2 100644 --- a/notebooks/how_to/metrics/log_metrics_over_time.ipynb +++ b/notebooks/how_to/metrics/log_metrics_over_time.ipynb @@ -545,8 +545,8 @@ "- In this example, since we've only logged a single data point, the visualization shows just one measurement.\n", "- As you continue logging metrics, the graph will populate with more points, enabling you to track trends and patterns.\n", "\n", - "![Metric Over Time block](../images/add_metric_over_time_block.png)\n", - "![AUC Score](../images/log_metric_auc_1.png)" + "![Metric Over Time block](./add_metric_over_time_block.png)\n", + "![AUC Score](./log_metric_auc_1.png)" ] }, { @@ -589,7 +589,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![AUC Score](../images/log_metric_auc_2.png)" + "![AUC Score](./log_metric_auc_2.png)" ] }, { @@ -614,7 +614,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![AUC Score](../images/log_metric_auc_3.png)" + "![AUC Score](./log_metric_auc_3.png)" ] }, { @@ -717,11 +717,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![AUC Score](../images/log_metric_auc_4.png)\n", - "![Accuracy Score](../images/log_metric_accuracy.png)\n", - "![Precision Score](../images/log_metric_precision.png)\n", - "![Recall Score](../images/log_metric_recall.png)\n", - "![F1 Score](../images/log_metric_f1.png)" + "![AUC Score](./log_metric_auc_4.png)\n", + "![Accuracy Score](./log_metric_accuracy.png)\n", + "![Precision Score](./log_metric_precision.png)\n", + "![Recall Score](./log_metric_recall.png)\n", + "![F1 Score](./log_metric_f1.png)" ] }, { @@ -767,7 +767,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![GINI Score](../images/log_metric_satisfactory.png)" + "![GINI Score](./log_metric_satisfactory.png)" ] }, { @@ -800,7 +800,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![GINI Score](../images/log_metric_attention.png)" + "![GINI Score](./log_metric_attention.png)" ] }, { @@ -840,7 +840,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![GINI Score](../images/log_metric_satisfactory_2.png)" + "![GINI Score](./log_metric_satisfactory_2.png)" ] }, { diff --git a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb index b5b4952c7..bc3537190 100644 --- a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb +++ b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb @@ -421,7 +421,7 @@ "\n", "To do this, go to the documentation page of the model you registered above and navigate to the `Model Development` -> `Model Evaluation` section. Then hover between any existing content block to reveal the `+` button as shown in the screenshot below.\n", "\n", - "![screenshot showing insert button for test-driven blocks](../../images/insert-test-driven-block.png)" + "![screenshot showing insert button for test-driven blocks](./insert-test-driven-block.png)" ] }, { @@ -430,7 +430,7 @@ "source": [ "Now click on the `+` button and select the `Test-Driven Block` option. This will open a dialog where you can select `My Custom Tests Confusion Matrix` from the list of available tests. You can preview the result and then click `Insert Block` to add it to the documentation.\n", "\n", - "![screenshot showing how to insert a test-driven block](../../images/insert-test-driven-block-custom.png)\n", + "![screenshot showing how to insert a test-driven block](./insert-test-driven-block-custom.png)\n", "\n", "The test should match the result you see above. It is now part of your documentation and will now be run everytime you run `vm.run_documentation_tests()` for your model. Let's do that now." ] @@ -586,7 +586,7 @@ "source": [ "Since the test has been run and logged, you can add it to your documentation using the same process as above. It should look like this:\n", "\n", - "![screenshot showing hyperparameters test](../../images/hyperparameters-custom-metric.png)\n", + "![screenshot showing hyperparameters test](./hyperparameters-custom-metric.png)\n", "\n", "For our simple toy model, there are aren't really any proper hyperparameters but you can see how this could be useful for more complex models that have gone through hyperparameter tuning." ] @@ -666,7 +666,7 @@ "source": [ "Again, you can add this to your documentation to see how it looks:\n", "\n", - "![screenshot showing BTC price metric](../../images/external-data-custom-test.png)" + "![screenshot showing BTC price metric](./external-data-custom-test.png)" ] }, { @@ -756,7 +756,7 @@ "Play around with this and see how you can use parameters, default values and other features to make your custom tests more flexible and useful.\n", "\n", "Here's how this one looks in the documentation:\n", - "![screenshot showing parameterized test](../../images/parameterized-custom-metric.png)" + "![screenshot showing parameterized test](./parameterized-custom-metric.png)" ] }, { @@ -815,7 +815,7 @@ "source": [ "Notice how you can return the tables as a dictionary where the key is the title of the table and the value is the table itself. You could also just return the tables by themselves but this way you can give them a title to more easily identify them in the result.\n", "\n", - "![screenshot showing multiple tables and plots](../../images/multiple-tables-plots-custom-metric.png)" + "![screenshot showing multiple tables and plots](./multiple-tables-plots-custom-metric.png)" ] }, { @@ -868,7 +868,7 @@ "source": [ "Adding this custom test to your documentation will display the image:\n", "\n", - "![screenshot showing image custom test](../../images/image-in-custom-metric.png)" + "![screenshot showing image custom test](./image-in-custom-metric.png)" ] }, { @@ -896,7 +896,7 @@ " \n", "run_test(\n", " \"my_custom_tests.MyPNGCorrelationMatrix\",\n", - " params={\"path\": \"../../images/pearson-correlation-matrix.png\"},\n", + " params={\"path\": \"./pearson-correlation-matrix.png\"},\n", ").log()" ] }, @@ -906,7 +906,7 @@ "source": [ "The image is displayed in the test result:\n", "\n", - "![screenshot showing image from file](../../images/pearson-correlation-matrix-test-output.png)" + "![screenshot showing image from file](./pearson-correlation-matrix-test-output.png)" ] }, { diff --git a/notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb b/notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb index 62650af90..fd84b39ca 100644 --- a/notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb +++ b/notebooks/how_to/tests/custom_tests/integrate_external_test_providers.ipynb @@ -756,7 +756,7 @@ "\n", "Now that the result has been logged to the ValidMind Platform, you can add it to your model documentation. This will add the result where you specify but it also will add the test to the template so it gets run anytime you `run_documentation_tests()`. To do this, go to the documentation page of the model you connected to above and navigate to the `Model Development` -> `Model Evaluation` section. Then hover between any existing content block to reveal the `+` button as shown in the screenshot below.\n", "\n", - "![screenshot showing insert button for test-driven blocks](../../images/insert-test-driven-block.png)" + "![screenshot showing insert button for test-driven blocks](./insert-test-driven-block.png)" ] }, { @@ -765,7 +765,7 @@ "source": [ "Now click on the `+` button and select the `Test-Driven Block` option. This will open a dialog where you can select `My Test Provider Confusion Matrix` from the list of available tests. You can preview the result and then click `Insert Block` to add it to the documentation.\n", "\n", - "![screenshot showing how to insert a test-driven block](../../images/insert-test-driven-block-test-provider.png)\n", + "![screenshot showing how to insert a test-driven block](./insert-test-driven-block-test-provider.png)\n", "\n", "The test should match the result you see above." ] From 493867e1757270ccebde03557a74292638a673f2 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Tue, 10 Feb 2026 10:28:28 -0800 Subject: [PATCH 10/14] Missed some links --- .../use_dataset_model_objects.ipynb | 2002 +++++++-------- .../custom_tests/implement_custom_tests.ipynb | 2182 ++++++++--------- 2 files changed, 2092 insertions(+), 2092 deletions(-) diff --git a/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb b/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb index 2b425daff..5394f131b 100644 --- a/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb +++ b/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb @@ -1,1003 +1,1003 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Introduction to ValidMind Dataset and Model Objects\n", - "\n", - "When writing custom tests, it is essential to be aware of the interfaces of the ValidMind Dataset and ValidMind Model, which are used as input arguments.\n", - "\n", - "As a model developer, writing custom tests is beneficial when the ValidMind library lacks a built-in test for your specific needs. For example, a model might require new tests to evaluate specific aspects of the model or dataset based on a particular use case.\n", - "\n", - "This interactive notebook offers a detailed understanding of ValidMind objects and their use in writing custom tests. It introduces various interfaces provided by these objects and demonstrates how they can be leveraged to implement tests effortlessly." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1__) \n", - " - [Before you begin](#toc1_1__) \n", - " - [New to ValidMind?](#toc1_2__) \n", - " - [Key concepts](#toc1_3__) \n", - "- [Setting up](#toc2__) \n", - " - [Install the ValidMind Library](#toc2_1__) \n", - " - [Initialize the ValidMind Library](#toc2_2__) \n", - " - [Register sample model](#toc2_2_1__) \n", - " - [Apply documentation template](#toc2_2_2__) \n", - " - [Get your code snippet](#toc2_2_3__) \n", - "- [Load the demo dataset](#toc3__) \n", - " - [Prepocess the raw dataset](#toc3_1__) \n", - "- [Train a model for testing](#toc4__) \n", - "- [Explore basic components of the ValidMind library](#toc5__) \n", - " - [VMDataset Object](#toc5_1__) \n", - " - [Initialize the ValidMind datasets](#toc5_1_1__) \n", - " - [ Interfaces of the dataset object](#toc5_1_2__) \n", - " - [Using VM Dataset object as arguments in custom tests](#toc5_2__) \n", - " - [Run the test](#toc5_2_1__) \n", - " - [Using VM Dataset object and parameters as arguments in custom tests](#toc5_3__) \n", - " - [VMModel Object](#toc5_4__) \n", - " - [Initialize ValidMind model object](#toc5_5__) \n", - " - [Assign predictions to the datasets](#toc5_6__) \n", - " - [Using VM Model and Dataset objects as arguments in Custom tests](#toc5_7__) \n", - " - [Log the test results](#toc5_8__) \n", - "- [Where to go from here](#toc6__) \n", - " - [Use cases](#toc6_1__) \n", - " - [More how-to guides and code samples](#toc6_2__) \n", - " - [Discover more learning resources](#toc6_3__) \n", - "- [Upgrade ValidMind](#toc7__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models. You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language.\n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "

For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "Here, we will focus on ValidMind dataset, ValidMind model and tests to use these objects to generate artefacts for the documentation.\n", - "\n", - "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", - "\n", - "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", - "\n", - "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", - "\n", - "- **model**: A single ValidMind model object that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", - "- **dataset**: Single ValidMind dataset object that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", - "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", - "\n", - "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", - "\n", - "**Outputs**: Tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", - "\n", - "**Dataset based Test**\n", - "\n", - "![Dataset based test architecture](./dataset_image.png)\n", - "The dataset based tests take VM dataset object(s) as inputs, test configuration as test parameters to produce `Outputs` as mentioned above.\n", - "\n", - "**Model based Test**\n", - "\n", - "![Model based test architecture](./model_image.png)\n", - "Similar to datasest based tests, the model based tests as an additional input that is VM model object. It allows to identify prediction values of a specific model in the dataset object. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "Please note the following recommended Python versions to use:\n", - "\n", - "- Python 3.7 > x <= 3.11\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Binary classification`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "metadata": {} - }, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " # api_host=\"...\",\n", - " # api_key=\"...\",\n", - " # api_secret=\"...\",\n", - " # model=\"...\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "\n", - "import xgboost as xgb" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Load the demo dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.datasets.classification import customer_churn as demo_dataset\n", - "\n", - "raw_df = demo_dataset.load_data()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Prepocess the raw dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "train_df, validation_df, test_df = demo_dataset.preprocess(raw_df)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Train a model for testing\n", - "\n", - "We train a simple customer churn model for our test." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x_train = train_df.drop(demo_dataset.target_column, axis=1)\n", - "y_train = train_df[demo_dataset.target_column]\n", - "x_val = validation_df.drop(demo_dataset.target_column, axis=1)\n", - "y_val = validation_df[demo_dataset.target_column]\n", - "\n", - "model = xgb.XGBClassifier(early_stopping_rounds=10)\n", - "model.set_params(\n", - " eval_metric=[\"error\", \"logloss\", \"auc\"],\n", - ")\n", - "model.fit(\n", - " x_train,\n", - " y_train,\n", - " eval_set=[(x_val, y_val)],\n", - " verbose=False,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Explore basic components of the ValidMind library\n", - "\n", - "In this section, you will learn about the basic objects of the ValidMind library that are necessary to implement both custom and built-in tests. As explained above, these objects are:\n", - "* VMDataset: [The high level APIs can be found here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset)\n", - "* VMModel: [The high level APIs can be found here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMModel)\n", - "\n", - "Let's understand these objects and their interfaces step by step: " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### VMDataset Object" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Initialize the ValidMind datasets\n", - "\n", - "You can initialize a ValidMind dataset object using the [`init_dataset`](https://docs.validmind.ai/validmind/validmind.html#init_dataset) function from the ValidMind (`vm`) module.\n", - "\n", - "The function wraps the dataset to create a ValidMind `Dataset` object so that you can write tests effectively using the common interface provided by the VM objects. This step is always necessary every time you want to connect a dataset to documentation and produce test results through ValidMind. You only need to do it one time per dataset.\n", - "\n", - "This function takes a number of arguments. Some of the arguments are:\n", - "\n", - "- `dataset` — the raw dataset that you want to provide as input to tests\n", - "- `input_id` - a unique identifier that allows tracking what inputs are used when running each individual test\n", - "- `target_column` — a required argument if tests require access to true values. This is the name of the target column in the dataset\n", - "\n", - "The detailed list of the arguments can be found [here](https://docs.validmind.ai/validmind/validmind.html#init_dataset) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# vm_raw_dataset is now a VMDataset object that you can pass to any ValidMind test\n", - "vm_raw_dataset = vm.init_dataset(\n", - " dataset=raw_df,\n", - " input_id=\"raw_dataset\",\n", - " target_column=\"Exited\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once you have a ValidMind dataset object (VMDataset), you can inspect its attributes and methods using the inspect_obj utility module. This module provides a list of available attributes and interfaces for use in tests. Understanding how to use VMDatasets is crucial for comprehending how a custom test functions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.utils import inspect_obj\n", - "inspect_obj(vm_raw_dataset)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Interfaces of the dataset object\n", - "\n", - "**DataFrame**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.df" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Feature columns**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.feature_columns" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Target column**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.target_column" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Features values**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.x_df()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Target value**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.y_df()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Numeric feature columns** " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.feature_columns_numeric" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Categorical feature columns** " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset.feature_columns_categorical" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly, you can use all other interfaces of the [VMDataset objects](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Using VM Dataset object as arguments in custom tests\n", - "\n", - "A custom test is simply a Python function that takes two types of arguments: `inputs` and `params`. The `inputs` are ValidMind objects (`VMDataset`, `VMModel`), and the `params` are additional parameters required for the underlying computation of the test. We will discuss both types of arguments in the following sections.\n", - "\n", - "Let's start with a custom test that requires only a ValidMind dataset object. In this example, we will check the balance of classes in the target column of the dataset:\n", - "\n", - "- The custom test below requires a single argument of type `VMDataset` (dataset).\n", - "- The `my_custom_tests.ClassImbalance` is a unique test identifier that can be assigned using the `vm.test` decorator functionality. This unique test ID will be used in the platform to load test results in the documentation.\n", - "- The `dataset.target_column` and `dataset.df` attributes of the `VMDataset` object are used in the test.\n", - "\n", - "Other high-level APIs (attributes and methods) of the dataset object are listed [here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset).\n", - "\n", - "If you've gone through the [Implement custom tests notebook](../tests/custom_tests/implement_custom_tests.ipynb), you should have a good understanding of how custom tests are implemented in details. If you haven't, we recommend going through that notebook first." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.vm_models.dataset.dataset import VMDataset\n", - "import pandas as pd\n", - "\n", - "@vm.test(\"my_custom_tests.ClassImbalance\")\n", - "def class_imbalance(dataset):\n", - " # Can only run this test if we have a Dataset object\n", - " if not isinstance(dataset, VMDataset):\n", - " raise ValueError(\"ClassImbalance requires a validmind Dataset object\")\n", - "\n", - " if dataset.target_column is None:\n", - " print(\"Skipping class_imbalance test because no target column is defined\")\n", - " return\n", - "\n", - " # VMDataset object provides target_column attribute\n", - " target_column = dataset.target_column\n", - " # we can access pandas DataFrame using df attribute\n", - " imbalance_percentages = dataset.df[target_column].value_counts(\n", - " normalize=True\n", - " )\n", - " classes = list(imbalance_percentages.index) \n", - " percentages = list(imbalance_percentages.values * 100)\n", - "\n", - " return pd.DataFrame({\"Classes\":classes, \"Percentage\": percentages})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Run the test\n", - "\n", - "Let's run the test using the `run_test` method, which is part of the `validmind.tests` module. Here, we pass the `dataset` through the `inputs`. Similarly, you can pass `datasets`, `model`, or `models` as inputs if your custom test requires them. In this example below, we run the custom test `my_custom_tests.ClassImbalance` by passing the `dataset` through the `inputs`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import run_test\n", - "result = run_test(\n", - " test_id=\"my_custom_tests.ClassImbalance\",\n", - " inputs={\n", - " \"dataset\": vm_raw_dataset\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can move custom tests into separate modules in a folder. It allows you to take one-off tests and move them into an organized structure that makes it easier to manage, maintain and share them. We have provided a seperate notebook with detailed explaination [here](../tests/custom_tests/integrate_external_test_providers.ipynb) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Using VM Dataset object and parameters as arguments in custom tests\n", - "\n", - "Simlilar to `inputs`, you can pass `params` to a custom test by providing a dictionary of parameters to the `run_test()` function. The parameters will override any default parameters set in the custom test definition. Note that the `dataset` is still passed as `inputs`. \n", - "Let's modify the class imbalance test so that it provides flexibility to `normalize` the results." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.vm_models.dataset.dataset import VMDataset\n", - "import pandas as pd\n", - "\n", - "@vm.test(\"my_custom_tests.ClassImbalance\")\n", - "def class_imbalance(dataset, normalize=True):\n", - " # Can only run this test if we have a Dataset object\n", - " if not isinstance(dataset, VMDataset):\n", - " raise ValueError(\"ClassImbalance requires a validmind Dataset object\")\n", - "\n", - " if dataset.target_column is None:\n", - " print(\"Skipping class_imbalance test because no target column is defined\")\n", - " return\n", - "\n", - " # VMDataset object provides target_column attribute\n", - " target_column = dataset.target_column\n", - " # we can access pandas DataFrame using df attribute\n", - " imbalance_percentages = dataset.df[target_column].value_counts(\n", - " normalize=normalize\n", - " )\n", - " classes = list(imbalance_percentages.index) \n", - " if normalize: \n", - " result = pd.DataFrame({\"Classes\":classes, \"Percentage\": list(imbalance_percentages.values*100)})\n", - " else:\n", - " result = pd.DataFrame({\"Classes\":classes, \"Count\": list(imbalance_percentages.values)})\n", - " return result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this example, the `normalize` parameter is set to `False`, so the class counts will not be normalized. You can change the value to `True` if you want the counts to be normalized. The results of the test will reflect this flexibility, allowing for different outputs based on the parameter passed.\n", - "\n", - "Here, we have passed the `dataset` through the `inputs` and the `normalize` parameter using the `params`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import run_test\n", - "result = run_test(\n", - " test_id = \"my_custom_tests.ClassImbalance\",\n", - " inputs={\"dataset\": vm_raw_dataset},\n", - " params={\"normalize\": True},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### VMModel Object" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize ValidMind model object\n", - "\n", - "Similar to ValidMind `Dataset` object, you can initialize a ValidMind Model object using the [`init_model`](https://docs.validmind.ai/validmind/validmind.html#init_model) function from the ValidMind (`vm`) module.\n", - "\n", - "This function takes a number of arguments. Some of the arguments are:\n", - "\n", - "- `model` — the raw model that you want evaluate\n", - "- `input_id` - a unique identifier that allows tracking what inputs are used when running each individual test\n", - "\n", - "The detailed list of the arguments can be found [here](https://docs.validmind.ai/validmind/validmind.html#init_model) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "vm_model = vm.init_model(\n", - " model=model,\n", - " input_id=\"xgb_model\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's inspect the methods and attributes of the model now:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "inspect_obj(vm_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign predictions to the datasets\n", - "\n", - "We can now use the `assign_predictions()` method from the `Dataset` object to link existing predictions to any model. If no prediction values are passed, the method will compute predictions automatically:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_train_ds = vm.init_dataset(\n", - " input_id=\"train_dataset\",\n", - " dataset=train_df,\n", - " type=\"generic\",\n", - " target_column=demo_dataset.target_column,\n", - ")\n", - "\n", - "vm_train_ds.assign_predictions(model=vm_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see below, the extra prediction column (`xgb_model_prediction`) for the model (`xgb_model`) has been added in the dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(vm_train_ds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Using VM Model and Dataset objects as arguments in Custom tests\n", - "\n", - "We will now create a `@vm.test` wrapper that will allow you to create a reusable test. Note the following changes in the code below:\n", - "\n", - "- The function `confusion_matrix` takes two arguments `dataset` and `model`. This is a `VMDataset` and `VMModel` object respectively.\n", - " - `VMDataset` objects allow you to access the dataset's true (target) values by accessing the `.y` attribute.\n", - " - `VMDataset` objects allow you to access the predictions for a given model by accessing the `.y_pred()` method.\n", - "- The function docstring provides a description of what the test does. This will be displayed along with the result in this notebook as well as in the ValidMind Platform.\n", - "- The function body calculates the confusion matrix using the `sklearn.tests.confusion_matrix` function as we just did above.\n", - "- The function then returns the `ConfusionMatrixDisplay.figure_` object - this is important as the ValidMind Library expects the output of the custom test to be a plot or a table.\n", - "- The `@vm.test` decorator is doing the work of creating a wrapper around the function that will allow it to be run by the ValidMind Library. It also registers the test so it can be found by the ID `my_custom_tests.ConfusionMatrix` (see the section below on how test IDs work in ValidMind and why this format is important)\n", - "\n", - "Similarly, you can use the functinality provided by `VMDataset` and `VMModel` objects. You can refer our documentation page for all the avalialble APIs [here](https://docs.validmind.ai/validmind/validmind.html#init_dataset)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn import metrics\n", - "import matplotlib.pyplot as plt\n", - "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", - "def confusion_matrix(dataset, model):\n", - " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", - "\n", - " The confusion matrix is a 2x2 table that contains 4 values:\n", - "\n", - " - True Positive (TP): the number of correct positive predictions\n", - " - True Negative (TN): the number of correct negative predictions\n", - " - False Positive (FP): the number of incorrect positive predictions\n", - " - False Negative (FN): the number of incorrect negative predictions\n", - "\n", - " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", - " \"\"\"\n", - " # we can retrieve traget value from dataset which is y attribute\n", - " y_true = dataset.y\n", - " # The prediction value of a specific model using y_pred method \n", - " y_pred = dataset.y_pred(model=model)\n", - "\n", - " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", - "\n", - " cm_display = metrics.ConfusionMatrixDisplay(\n", - " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", - " )\n", - " cm_display.plot()\n", - " plt.close()\n", - "\n", - " return cm_display.figure_ # return the figure object itself" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, we run test using two inputs; `dataset` and `model`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import run_test\n", - "result = run_test(\n", - " test_id = \"my_custom_tests.ConfusionMatrix\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " \"model\": vm_model,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Log the test results\n", - "\n", - "You can log any test result to the ValidMind Platform with the `.log()` method of the result object. This will allow you to add the result to the documentation.\n", - "\n", - "You can now do the same for the confusion matrix results." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Where to go from here\n", - "\n", - "In this notebook you have learned the end-to-end process to document a model with the ValidMind Library, running through some very common scenarios in a typical model development setting:\n", - "\n", - "- Running out-of-the-box tests\n", - "- Documenting your model by adding evidence to model documentation\n", - "- Extending the capabilities of the ValidMind Library by implementing custom tests\n", - "- Ensuring that the documentation is complete by running all tests in the documentation template\n", - "\n", - "As a next step, you can explore the following notebooks to get a deeper understanding on how the ValidMind Library allows you generate model documentation for any use case:\n", - "\n", - "\n", - "\n", - "### Use cases\n", - "\n", - "- [Document an application scorecard model](../use_cases/credit_risk/application_scorecard_full_suite.ipynb)\n", - "- [Linear regression documentation demo](../use_cases/regression/quickstart_regression_full_suite.ipynb)\n", - "- [LLM model documentation demo](../use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb)\n", - "\n", - "\n", - "\n", - "### More how-to guides and code samples\n", - "\n", - "- [Explore available tests in detail](../tests/explore_tests/explore_tests.ipynb)\n", - "- [In-depth guide for implementing custom tests](../tests/custom_tests/implement_custom_tests.ipynb)\n", - "- [In-depth guide to external test providers](../tests/custom_tests/integrate_external_test_providers.ipynb)\n", - "- [Configuring dataset features](./dataset_inputs/configure_dataset_features.ipynb)\n", - "- [Introduction to unit and composite tests](../metrics/run_unit_metrics.ipynb)\n", - "\n", - "\n", - "\n", - "### Discover more learning resources\n", - "\n", - "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", - "\n", - "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", - "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Upgrade ValidMind\n", - "\n", - "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", - "\n", - "Retrieve the information for the currently installed version of ValidMind:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip show validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", - "\n", - "```bash\n", - "%pip install --upgrade validmind\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may need to restart your kernel after running the upgrade package for changes to be applied." - ] - }, - { - "cell_type": "markdown", - "id": "copyright-340a990e20194848af0efb0c965e219a", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to ValidMind Dataset and Model Objects\n", + "\n", + "When writing custom tests, it is essential to be aware of the interfaces of the ValidMind Dataset and ValidMind Model, which are used as input arguments.\n", + "\n", + "As a model developer, writing custom tests is beneficial when the ValidMind library lacks a built-in test for your specific needs. For example, a model might require new tests to evaluate specific aspects of the model or dataset based on a particular use case.\n", + "\n", + "This interactive notebook offers a detailed understanding of ValidMind objects and their use in writing custom tests. It introduces various interfaces provided by these objects and demonstrates how they can be leveraged to implement tests effortlessly." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1__) \n", + " - [Before you begin](#toc1_1__) \n", + " - [New to ValidMind?](#toc1_2__) \n", + " - [Key concepts](#toc1_3__) \n", + "- [Setting up](#toc2__) \n", + " - [Install the ValidMind Library](#toc2_1__) \n", + " - [Initialize the ValidMind Library](#toc2_2__) \n", + " - [Register sample model](#toc2_2_1__) \n", + " - [Apply documentation template](#toc2_2_2__) \n", + " - [Get your code snippet](#toc2_2_3__) \n", + "- [Load the demo dataset](#toc3__) \n", + " - [Prepocess the raw dataset](#toc3_1__) \n", + "- [Train a model for testing](#toc4__) \n", + "- [Explore basic components of the ValidMind library](#toc5__) \n", + " - [VMDataset Object](#toc5_1__) \n", + " - [Initialize the ValidMind datasets](#toc5_1_1__) \n", + " - [ Interfaces of the dataset object](#toc5_1_2__) \n", + " - [Using VM Dataset object as arguments in custom tests](#toc5_2__) \n", + " - [Run the test](#toc5_2_1__) \n", + " - [Using VM Dataset object and parameters as arguments in custom tests](#toc5_3__) \n", + " - [VMModel Object](#toc5_4__) \n", + " - [Initialize ValidMind model object](#toc5_5__) \n", + " - [Assign predictions to the datasets](#toc5_6__) \n", + " - [Using VM Model and Dataset objects as arguments in Custom tests](#toc5_7__) \n", + " - [Log the test results](#toc5_8__) \n", + "- [Where to go from here](#toc6__) \n", + " - [Use cases](#toc6_1__) \n", + " - [More how-to guides and code samples](#toc6_2__) \n", + " - [Discover more learning resources](#toc6_3__) \n", + "- [Upgrade ValidMind](#toc7__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models. You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language.\n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "Here, we will focus on ValidMind dataset, ValidMind model and tests to use these objects to generate artefacts for the documentation.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + "- **model**: A single ValidMind model object that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + "- **dataset**: Single ValidMind dataset object that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + "- **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", + "- **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Dataset based Test**\n", + "\n", + "![Dataset based test architecture](./dataset_image.png)\n", + "The dataset based tests take VM dataset object(s) as inputs, test configuration as test parameters to produce `Outputs` as mentioned above.\n", + "\n", + "**Model based Test**\n", + "\n", + "![Model based test architecture](./model_image.png)\n", + "Similar to datasest based tests, the model based tests as an additional input that is VM model object. It allows to identify prediction values of a specific model in the dataset object. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "Please note the following recommended Python versions to use:\n", + "\n", + "- Python 3.7 > x <= 3.11\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "%pip install -q validmind" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Binary classification`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "metadata": { + "metadata": {} + }, + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "%matplotlib inline\n", + "\n", + "import xgboost as xgb" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Load the demo dataset" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "raw_df = demo_dataset.load_data()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Prepocess the raw dataset" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "train_df, validation_df, test_df = demo_dataset.preprocess(raw_df)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Train a model for testing\n", + "\n", + "We train a simple customer churn model for our test." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "x_train = train_df.drop(demo_dataset.target_column, axis=1)\n", + "y_train = train_df[demo_dataset.target_column]\n", + "x_val = validation_df.drop(demo_dataset.target_column, axis=1)\n", + "y_val = validation_df[demo_dataset.target_column]\n", + "\n", + "model = xgb.XGBClassifier(early_stopping_rounds=10)\n", + "model.set_params(\n", + " eval_metric=[\"error\", \"logloss\", \"auc\"],\n", + ")\n", + "model.fit(\n", + " x_train,\n", + " y_train,\n", + " eval_set=[(x_val, y_val)],\n", + " verbose=False,\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Explore basic components of the ValidMind library\n", + "\n", + "In this section, you will learn about the basic objects of the ValidMind library that are necessary to implement both custom and built-in tests. As explained above, these objects are:\n", + "* VMDataset: [The high level APIs can be found here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset)\n", + "* VMModel: [The high level APIs can be found here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMModel)\n", + "\n", + "Let's understand these objects and their interfaces step by step: " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### VMDataset Object" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the ValidMind datasets\n", + "\n", + "You can initialize a ValidMind dataset object using the [`init_dataset`](https://docs.validmind.ai/validmind/validmind.html#init_dataset) function from the ValidMind (`vm`) module.\n", + "\n", + "The function wraps the dataset to create a ValidMind `Dataset` object so that you can write tests effectively using the common interface provided by the VM objects. This step is always necessary every time you want to connect a dataset to documentation and produce test results through ValidMind. You only need to do it one time per dataset.\n", + "\n", + "This function takes a number of arguments. Some of the arguments are:\n", + "\n", + "- `dataset` — the raw dataset that you want to provide as input to tests\n", + "- `input_id` - a unique identifier that allows tracking what inputs are used when running each individual test\n", + "- `target_column` — a required argument if tests require access to true values. This is the name of the target column in the dataset\n", + "\n", + "The detailed list of the arguments can be found [here](https://docs.validmind.ai/validmind/validmind.html#init_dataset) " + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# vm_raw_dataset is now a VMDataset object that you can pass to any ValidMind test\n", + "vm_raw_dataset = vm.init_dataset(\n", + " dataset=raw_df,\n", + " input_id=\"raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once you have a ValidMind dataset object (VMDataset), you can inspect its attributes and methods using the inspect_obj utility module. This module provides a list of available attributes and interfaces for use in tests. Understanding how to use VMDatasets is crucial for comprehending how a custom test functions." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.utils import inspect_obj\n", + "inspect_obj(vm_raw_dataset)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Interfaces of the dataset object\n", + "\n", + "**DataFrame**" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.df" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Feature columns**" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.feature_columns" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Target column**" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.target_column" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Features values**" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.x_df()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Target value**" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.y_df()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Numeric feature columns** " + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.feature_columns_numeric" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Categorical feature columns** " + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset.feature_columns_categorical" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similarly, you can use all other interfaces of the [VMDataset objects](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Using VM Dataset object as arguments in custom tests\n", + "\n", + "A custom test is simply a Python function that takes two types of arguments: `inputs` and `params`. The `inputs` are ValidMind objects (`VMDataset`, `VMModel`), and the `params` are additional parameters required for the underlying computation of the test. We will discuss both types of arguments in the following sections.\n", + "\n", + "Let's start with a custom test that requires only a ValidMind dataset object. In this example, we will check the balance of classes in the target column of the dataset:\n", + "\n", + "- The custom test below requires a single argument of type `VMDataset` (dataset).\n", + "- The `my_custom_tests.ClassImbalance` is a unique test identifier that can be assigned using the `vm.test` decorator functionality. This unique test ID will be used in the platform to load test results in the documentation.\n", + "- The `dataset.target_column` and `dataset.df` attributes of the `VMDataset` object are used in the test.\n", + "\n", + "Other high-level APIs (attributes and methods) of the dataset object are listed [here](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset).\n", + "\n", + "If you've gone through the [Implement custom tests notebook](../tests/custom_tests/implement_custom_tests.ipynb), you should have a good understanding of how custom tests are implemented in details. If you haven't, we recommend going through that notebook first." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.vm_models.dataset.dataset import VMDataset\n", + "import pandas as pd\n", + "\n", + "@vm.test(\"my_custom_tests.ClassImbalance\")\n", + "def class_imbalance(dataset):\n", + " # Can only run this test if we have a Dataset object\n", + " if not isinstance(dataset, VMDataset):\n", + " raise ValueError(\"ClassImbalance requires a validmind Dataset object\")\n", + "\n", + " if dataset.target_column is None:\n", + " print(\"Skipping class_imbalance test because no target column is defined\")\n", + " return\n", + "\n", + " # VMDataset object provides target_column attribute\n", + " target_column = dataset.target_column\n", + " # we can access pandas DataFrame using df attribute\n", + " imbalance_percentages = dataset.df[target_column].value_counts(\n", + " normalize=True\n", + " )\n", + " classes = list(imbalance_percentages.index) \n", + " percentages = list(imbalance_percentages.values * 100)\n", + "\n", + " return pd.DataFrame({\"Classes\":classes, \"Percentage\": percentages})" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Run the test\n", + "\n", + "Let's run the test using the `run_test` method, which is part of the `validmind.tests` module. Here, we pass the `dataset` through the `inputs`. Similarly, you can pass `datasets`, `model`, or `models` as inputs if your custom test requires them. In this example below, we run the custom test `my_custom_tests.ClassImbalance` by passing the `dataset` through the `inputs`. " + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.tests import run_test\n", + "result = run_test(\n", + " test_id=\"my_custom_tests.ClassImbalance\",\n", + " inputs={\n", + " \"dataset\": vm_raw_dataset\n", + " }\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can move custom tests into separate modules in a folder. It allows you to take one-off tests and move them into an organized structure that makes it easier to manage, maintain and share them. We have provided a seperate notebook with detailed explaination [here](../tests/custom_tests/integrate_external_test_providers.ipynb) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Using VM Dataset object and parameters as arguments in custom tests\n", + "\n", + "Simlilar to `inputs`, you can pass `params` to a custom test by providing a dictionary of parameters to the `run_test()` function. The parameters will override any default parameters set in the custom test definition. Note that the `dataset` is still passed as `inputs`. \n", + "Let's modify the class imbalance test so that it provides flexibility to `normalize` the results." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.vm_models.dataset.dataset import VMDataset\n", + "import pandas as pd\n", + "\n", + "@vm.test(\"my_custom_tests.ClassImbalance\")\n", + "def class_imbalance(dataset, normalize=True):\n", + " # Can only run this test if we have a Dataset object\n", + " if not isinstance(dataset, VMDataset):\n", + " raise ValueError(\"ClassImbalance requires a validmind Dataset object\")\n", + "\n", + " if dataset.target_column is None:\n", + " print(\"Skipping class_imbalance test because no target column is defined\")\n", + " return\n", + "\n", + " # VMDataset object provides target_column attribute\n", + " target_column = dataset.target_column\n", + " # we can access pandas DataFrame using df attribute\n", + " imbalance_percentages = dataset.df[target_column].value_counts(\n", + " normalize=normalize\n", + " )\n", + " classes = list(imbalance_percentages.index) \n", + " if normalize: \n", + " result = pd.DataFrame({\"Classes\":classes, \"Percentage\": list(imbalance_percentages.values*100)})\n", + " else:\n", + " result = pd.DataFrame({\"Classes\":classes, \"Count\": list(imbalance_percentages.values)})\n", + " return result" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, the `normalize` parameter is set to `False`, so the class counts will not be normalized. You can change the value to `True` if you want the counts to be normalized. The results of the test will reflect this flexibility, allowing for different outputs based on the parameter passed.\n", + "\n", + "Here, we have passed the `dataset` through the `inputs` and the `normalize` parameter using the `params`." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.tests import run_test\n", + "result = run_test(\n", + " test_id = \"my_custom_tests.ClassImbalance\",\n", + " inputs={\"dataset\": vm_raw_dataset},\n", + " params={\"normalize\": True},\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### VMModel Object" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize ValidMind model object\n", + "\n", + "Similar to ValidMind `Dataset` object, you can initialize a ValidMind Model object using the [`init_model`](https://docs.validmind.ai/validmind/validmind.html#init_model) function from the ValidMind (`vm`) module.\n", + "\n", + "This function takes a number of arguments. Some of the arguments are:\n", + "\n", + "- `model` — the raw model that you want evaluate\n", + "- `input_id` - a unique identifier that allows tracking what inputs are used when running each individual test\n", + "\n", + "The detailed list of the arguments can be found [here](https://docs.validmind.ai/validmind/validmind.html#init_model) " + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "\n", + "vm_model = vm.init_model(\n", + " model=model,\n", + " input_id=\"xgb_model\",\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's inspect the methods and attributes of the model now:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "inspect_obj(vm_model)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign predictions to the datasets\n", + "\n", + "We can now use the `assign_predictions()` method from the `Dataset` object to link existing predictions to any model. If no prediction values are passed, the method will compute predictions automatically:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset\",\n", + " dataset=train_df,\n", + " type=\"generic\",\n", + " target_column=demo_dataset.target_column,\n", + ")\n", + "\n", + "vm_train_ds.assign_predictions(model=vm_model)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see below, the extra prediction column (`xgb_model_prediction`) for the model (`xgb_model`) has been added in the dataset." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "print(vm_train_ds)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Using VM Model and Dataset objects as arguments in Custom tests\n", + "\n", + "We will now create a `@vm.test` wrapper that will allow you to create a reusable test. Note the following changes in the code below:\n", + "\n", + "- The function `confusion_matrix` takes two arguments `dataset` and `model`. This is a `VMDataset` and `VMModel` object respectively.\n", + " - `VMDataset` objects allow you to access the dataset's true (target) values by accessing the `.y` attribute.\n", + " - `VMDataset` objects allow you to access the predictions for a given model by accessing the `.y_pred()` method.\n", + "- The function docstring provides a description of what the test does. This will be displayed along with the result in this notebook as well as in the ValidMind Platform.\n", + "- The function body calculates the confusion matrix using the `sklearn.tests.confusion_matrix` function as we just did above.\n", + "- The function then returns the `ConfusionMatrixDisplay.figure_` object - this is important as the ValidMind Library expects the output of the custom test to be a plot or a table.\n", + "- The `@vm.test` decorator is doing the work of creating a wrapper around the function that will allow it to be run by the ValidMind Library. It also registers the test so it can be found by the ID `my_custom_tests.ConfusionMatrix` (see the section below on how test IDs work in ValidMind and why this format is important)\n", + "\n", + "Similarly, you can use the functinality provided by `VMDataset` and `VMModel` objects. You can refer our documentation page for all the avalialble APIs [here](https://docs.validmind.ai/validmind/validmind.html#init_dataset)" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from sklearn import metrics\n", + "import matplotlib.pyplot as plt\n", + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " # we can retrieve traget value from dataset which is y attribute\n", + " y_true = dataset.y\n", + " # The prediction value of a specific model using y_pred method \n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + " plt.close()\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we run test using two inputs; `dataset` and `model`. " + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.tests import run_test\n", + "result = run_test(\n", + " test_id = \"my_custom_tests.ConfusionMatrix\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " \"model\": vm_model,\n", + " }\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Log the test results\n", + "\n", + "You can log any test result to the ValidMind Platform with the `.log()` method of the result object. This will allow you to add the result to the documentation.\n", + "\n", + "You can now do the same for the confusion matrix results." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Where to go from here\n", + "\n", + "In this notebook you have learned the end-to-end process to document a model with the ValidMind Library, running through some very common scenarios in a typical model development setting:\n", + "\n", + "- Running out-of-the-box tests\n", + "- Documenting your model by adding evidence to model documentation\n", + "- Extending the capabilities of the ValidMind Library by implementing custom tests\n", + "- Ensuring that the documentation is complete by running all tests in the documentation template\n", + "\n", + "As a next step, you can explore the following notebooks to get a deeper understanding on how the ValidMind Library allows you generate model documentation for any use case:\n", + "\n", + "\n", + "\n", + "### Use cases\n", + "\n", + "- [Document an application scorecard model](../../use_cases/credit_risk/application_scorecard_full_suite.ipynb)\n", + "- [Linear regression documentation demo](../../use_cases/regression/quickstart_regression_full_suite.ipynb)\n", + "- [LLM model documentation demo](../../use_cases/nlp_and_llm/foundation_models_integration_demo.ipynb)\n", + "\n", + "\n", + "\n", + "### More how-to guides and code samples\n", + "\n", + "- [Explore available tests in detail](../tests/explore_tests/explore_tests.ipynb)\n", + "- [In-depth guide for implementing custom tests](../tests/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../tests/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [Configuring dataset features](./dataset_inputs/configure_dataset_features.ipynb)\n", + "- [Introduction to unit and composite tests](../metrics/run_unit_metrics.ipynb)\n", + "\n", + "\n", + "\n", + "### Discover more learning resources\n", + "\n", + "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", + "\n", + "- [Use cases](https://github.com/validmind/validmind-library/tree/main/notebooks/use_cases)\n", + "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "%pip show validmind" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ], + "id": "copyright-340a990e20194848af0efb0c965e219a" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb index bc3537190..0fbb88437 100644 --- a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb +++ b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb @@ -1,1093 +1,1093 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Implement custom tests\n", - "\n", - "Custom tests extend the functionality of ValidMind, allowing you to document any model or use case with added flexibility.\n", - "\n", - "ValidMind provides a comprehensive set of tests out-of-the-box to evaluate and document your models and datasets. We recognize there will be cases where the default tests do not support a model or dataset, or specific documentation is needed. In these cases, you can create and use your own custom code to accomplish what you need. To streamline custom code integration, we support the creation of custom test functions.\n", - "\n", - "This interactive notebook provides a step-by-step guide for implementing and registering custom tests with ValidMind, running them individually, viewing the results on the ValidMind Platform, and incorporating them into your model documentation template." - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Implement custom tests\n", + "\n", + "Custom tests extend the functionality of ValidMind, allowing you to document any model or use case with added flexibility.\n", + "\n", + "ValidMind provides a comprehensive set of tests out-of-the-box to evaluate and document your models and datasets. We recognize there will be cases where the default tests do not support a model or dataset, or specific documentation is needed. In these cases, you can create and use your own custom code to accomplish what you need. To streamline custom code integration, we support the creation of custom test functions.\n", + "\n", + "This interactive notebook provides a step-by-step guide for implementing and registering custom tests with ValidMind, running them individually, viewing the results on the ValidMind Platform, and incorporating them into your model documentation template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1__) \n", + " - [Before you begin](#toc1_1__) \n", + " - [New to ValidMind?](#toc1_2__) \n", + " - [Key concepts](#toc1_3__) \n", + "- [Setting up](#toc2__) \n", + " - [Install the ValidMind Library](#toc2_1__) \n", + " - [Initialize the ValidMind Library](#toc2_2__) \n", + " - [Register sample model](#toc2_2_1__) \n", + " - [Apply documentation template](#toc2_2_2__) \n", + " - [Get your code snippet](#toc2_2_3__) \n", + "- [Implement a Custom Test](#toc3__) \n", + "- [Run the Custom Test](#toc4__) \n", + " - [Setup the Model and Dataset](#toc4_1__) \n", + " - [Run the Custom Test](#toc4_2__) \n", + "- [Adding Custom Test to Model Documentation](#toc5__) \n", + "- [Some More Custom Tests](#toc6__) \n", + " - [Custom Test: Table of Model Hyperparameters](#toc6_1__) \n", + " - [Custom Test: External API Call](#toc6_2__) \n", + " - [Custom Test: Passing Parameters](#toc6_3__) \n", + " - [Custom Test: Multiple Tables and Plots in a Single Test](#toc6_4__) \n", + " - [Custom Test: Images](#toc6_5__) \n", + " - [Custom Test: Description](#toc6_6__) \n", + "- [Conclusion](#toc7__) \n", + "- [Next steps](#toc8__) \n", + " - [Work with your model documentation](#toc8_1__) \n", + " - [Discover more learning resources](#toc8_2__) \n", + "- [Upgrade ValidMind](#toc9__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom test can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "%pip install -q validmind" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Binary classification`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Implement a Custom Test\n", + "\n", + "Let's start off by creating a simple custom test that creates a Confusion Matrix for a binary classification model. We will use the `sklearn.metrics.confusion_matrix` function to calculate the confusion matrix and then display it as a heatmap using `plotly`. (This is already a built-in test in ValidMind, but we will use it as an example to demonstrate how to create custom tests.)" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "\n", + "\n", + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model)\n", + "\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Thats our custom test defined and ready to go... Let's take a look at whats going on here:\n", + "\n", + "- The function `confusion_matrix` takes two arguments `dataset` and `model`. This is a VMDataset and VMModel object respectively.\n", + "- The function docstring provides a description of what the test does. This will be displayed along with the result in this notebook as well as in the ValidMind Platform.\n", + "- The function body calculates the confusion matrix using the `sklearn.metrics.confusion_matrix` function and then plots it using `sklearn.metric.ConfusionMatrixDisplay`.\n", + "- The function then returns the `ConfusionMatrixDisplay.figure_` object - this is important as the ValidMind Library expects the output of the custom test to be a plot or a table.\n", + "- The `@vm.test` decorator is doing the work of creating a wrapper around the function that will allow it to be run by the ValidMind Library. It also registers the test so it can be found by the ID `my_custom_tests.ConfusionMatrix` (see the section below on how test IDs work in ValidMind and why this format is important)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Run the Custom Test\n", + "\n", + "Now that we have defined and registered our custom test, lets see how we can run it and properly use it in the ValidMind Platform." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Setup the Model and Dataset\n", + "\n", + "First let's setup a an example model and dataset to run our custom metic against. Since this is a Confusion Matrix, we will use the Customer Churn dataset that ValidMind provides and train a simple XGBoost model." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import xgboost as xgb\n", + "from validmind.datasets.classification import customer_churn\n", + "\n", + "raw_df = customer_churn.load_data()\n", + "train_df, validation_df, test_df = customer_churn.preprocess(raw_df)\n", + "\n", + "x_train = train_df.drop(customer_churn.target_column, axis=1)\n", + "y_train = train_df[customer_churn.target_column]\n", + "x_val = validation_df.drop(customer_churn.target_column, axis=1)\n", + "y_val = validation_df[customer_churn.target_column]\n", + "\n", + "model = xgb.XGBClassifier(early_stopping_rounds=10)\n", + "model.set_params(\n", + " eval_metric=[\"error\", \"logloss\", \"auc\"],\n", + ")\n", + "model.fit(\n", + " x_train,\n", + " y_train,\n", + " eval_set=[(x_val, y_val)],\n", + " verbose=False,\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Easy enough! Now we have a model and dataset setup and trained. One last thing to do is bring the dataset and model into the ValidMind Library:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# for now, we'll just use the test dataset\n", + "vm_test_ds = vm.init_dataset(\n", + " dataset=test_df,\n", + " target_column=customer_churn.target_column,\n", + " input_id=\"test_dataset\",\n", + ")\n", + "\n", + "vm_model = vm.init_model(model, input_id=\"model\")\n", + "\n", + "# link the model to the dataset\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run the Custom Test\n", + "\n", + "Now that we have our model and dataset setup, we have everything we need to run our custom test. We can do this by importing the `run_test` function from the `validmind.tests` module and passing in the test ID of our custom test along with the model and dataset we want to run it against.\n", + "\n", + ">Notice how the `inputs` dictionary is used to map an `input_id` which we set above to the `model` and `dataset` keys that are expected by our custom test function. This is how the ValidMind Library knows which inputs to pass to different tests and is key when using many different datasets and models." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.tests import run_test\n", + "\n", + "result = run_test(\n", + " \"my_custom_tests.ConfusionMatrix\",\n", + " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", + ")" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You'll notice that the docstring becomes a markdown description of the test. The figure is then displayed as the test result. What you see above is how it will look in the ValidMind Platform as well. Let's go ahead and log the result to see how that works." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Adding Custom Test to Model Documentation\n", + "\n", + "To do this, go to the documentation page of the model you registered above and navigate to the `Model Development` -> `Model Evaluation` section. Then hover between any existing content block to reveal the `+` button as shown in the screenshot below.\n", + "\n", + "![screenshot showing insert button for test-driven blocks](./insert-test-driven-block.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now click on the `+` button and select the `Test-Driven Block` option. This will open a dialog where you can select `My Custom Tests Confusion Matrix` from the list of available tests. You can preview the result and then click `Insert Block` to add it to the documentation.\n", + "\n", + "![screenshot showing how to insert a test-driven block](./insert-test-driven-block-custom.png)\n", + "\n", + "The test should match the result you see above. It is now part of your documentation and will now be run everytime you run `vm.run_documentation_tests()` for your model. Let's do that now." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm.reload()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you preview the template, it should show the custom test in the `Model Development`->`Model Evaluation` section:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm.preview_template()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Just so we can run all of the tests in the template, let's initialize the train and raw dataset.\n", + "\n", + "(Refer to [**Quickstart for model documentation**](../../../quickstart/quickstart_model_documentation.ipynb) and the ValidMind docs for more information on what we are doing here)" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "vm_raw_dataset = vm.init_dataset(\n", + " dataset=raw_df,\n", + " input_id=\"raw_dataset\",\n", + " target_column=customer_churn.target_column,\n", + " class_labels=customer_churn.class_labels,\n", + ")\n", + "\n", + "vm_train_ds = vm.init_dataset(\n", + " dataset=train_df,\n", + " input_id=\"train_dataset\",\n", + " target_column=customer_churn.target_column,\n", + ")\n", + "vm_train_ds.assign_predictions(model=vm_model)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run all the tests in the template, you can use the `vm.run_documentation_tests()` and pass the inputs we initialized above and the demo config from our customer_churn module. We will have to add a section to the config for our new test to tell it which inputs it should receive. This is done by simply adding a new element in the config dictionary where the key is the ID of the test and the value is a dictionary with the following structure:\n", + "```python\n", + "{\n", + " \"inputs\": {\n", + " \"model\": \"test_dataset\",\n", + " \"dataset\": \"model\",\n", + " }\n", + "}\n", + "```" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "from validmind.utils import preview_test_config\n", + "\n", + "test_config = customer_churn.get_demo_test_config()\n", + "test_config[\"my_custom_tests.ConfusionMatrix\"] = {\n", + " \"inputs\": {\n", + " \"dataset\": \"test_dataset\",\n", + " \"model\": \"model\",\n", + " }\n", + "}\n", + "preview_test_config(test_config)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "full_suite = vm.run_documentation_tests(config=test_config)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Some More Custom Tests\n", + "\n", + "Now that you understand the entire process of creating custom tests and using them in your documentation, let's create a few more to see different ways you can utilize custom tests." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Test: Table of Model Hyperparameters\n", + "\n", + "This custom test will display a table of the hyperparameters used in the model:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "@vm.test(\"my_custom_tests.Hyperparameters\")\n", + "def hyperparameters(model):\n", + " \"\"\"The hyperparameters of a machine learning model are the settings that control the learning process.\n", + " These settings are specified before the learning process begins and can have a significant impact on the\n", + " performance of the model.\n", + "\n", + " The hyperparameters of a model can be used to tune the model to achieve the best possible performance\n", + " on a given dataset. By examining the hyperparameters of a model, you can gain insight into how the model\n", + " was trained and how it might be improved.\n", + " \"\"\"\n", + " hyperparameters = model.model.get_xgb_params() # dictionary of hyperparameters\n", + "\n", + " # turn the dictionary into a table where each row contains a hyperparameter and its value\n", + " return [{\"Hyperparam\": k, \"Value\": v} for k, v in hyperparameters.items() if v]\n", + "\n", + "\n", + "result = run_test(\"my_custom_tests.Hyperparameters\", inputs={\"model\": \"model\"})\n", + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since the test has been run and logged, you can add it to your documentation using the same process as above. It should look like this:\n", + "\n", + "![screenshot showing hyperparameters test](./hyperparameters-custom-metric.png)\n", + "\n", + "For our simple toy model, there are aren't really any proper hyperparameters but you can see how this could be useful for more complex models that have gone through hyperparameter tuning." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Test: External API Call\n", + "\n", + "This custom test will make an external API call to get the current BTC price and display it as a table. This demonstrates how you might integrate external data sources into your model documentation in a programmatic way. You could, for instance, setup a pipeline that runs a test like this every day to keep your model documentation in sync with an external system." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import requests\n", + "import random\n", + "\n", + "\n", + "@vm.test(\"my_custom_tests.ExternalAPI\")\n", + "def external_api():\n", + " \"\"\"This test calls an external API to get a list of fake users. It then creates\n", + " a table with the relevant data so it can be displayed in the documentation.\n", + "\n", + " The purpose of this test is to demonstrate how to call an external API and use the\n", + " data in a test. A test like this could even be setup to run in a scheduled\n", + " pipeline to keep your documentation in-sync with an external data source.\n", + " \"\"\"\n", + " url = \"https://jsonplaceholder.typicode.com/users\"\n", + " response = requests.get(url)\n", + " data = response.json()\n", + "\n", + " # extract the time and the current BTC price in USD\n", + " return {\n", + " \"Model Owners/Stakeholders\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": random.choice([\"Owner\", \"Stakeholder\"]),\n", + " \"Email\": user[\"email\"],\n", + " \"Phone\": user[\"phone\"],\n", + " \"Slack Handle\": f\"@{user['name'].lower().replace(' ', '.')}\",\n", + " }\n", + " for user in data[:3]\n", + " ],\n", + " \"Model Developers\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": \"Developer\",\n", + " \"Email\": user[\"email\"],\n", + " }\n", + " for user in data[3:7]\n", + " ],\n", + " \"Model Validators\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": \"Validator\",\n", + " \"Email\": user[\"email\"],\n", + " }\n", + " for user in data[7:]\n", + " ],\n", + " }\n", + "\n", + "\n", + "result = run_test(\"my_custom_tests.ExternalAPI\")\n", + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Again, you can add this to your documentation to see how it looks:\n", + "\n", + "![screenshot showing BTC price metric](./external-data-custom-test.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Test: Passing Parameters\n", + "\n", + "Custom test functions, as stated earlier, can take both inputs and params. When you define your function there is no need to distinguish between the two, the ValidMind Library will handle that for you. You simply need to add both to the function as arguments and the library will pass in the correct values.\n", + "\n", + "So for instance, if you wanted to parameterize the first custom test we created, the confusion matrix, you could do so like this:\n", + "\n", + "```python\n", + "def confusion_matrix(dataset: VMDataset, model: VMModel, my_param: str = \"Default Value\"):\n", + " pass\n", + "```\n", + "\n", + "And then when you run the test, you can pass in the parameter like this:\n", + "\n", + "```python\n", + "vm.run_test(\n", + " \"my_custom_tests.ConfusionMatrix\",\n", + " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", + " params={\"my_param\": \"My Value\"},\n", + ")\n", + "```\n", + "\n", + "Or if you are running the entire documentation template, you would update the config like this:\n", + "\n", + "```python\n", + "test_config[\"my_custom_tests.ConfusionMatrix\"] = {\n", + " \"inputs\": {\n", + " \"dataset\": \"test_dataset\",\n", + " \"model\": \"model\",\n", + " },\n", + " \"params\": {\n", + " \"my_param\": \"My Value\",\n", + " },\n", + "}\n", + "```\n", + "\n", + "Let's go ahead and create a toy test that takes a parameter and uses it in the result:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import plotly.express as px\n", + "\n", + "\n", + "@vm.test(\"my_custom_tests.ParameterExample\")\n", + "def parameter_example(\n", + " plot_title=\"Default Plot Title\", x_col=\"sepal_width\", y_col=\"sepal_length\"\n", + "):\n", + " \"\"\"This test takes two parameters and creates a scatter plot based on them.\n", + "\n", + " The purpose of this test is to demonstrate how to create a test that takes\n", + " parameters and uses them to generate a plot. This can be useful for creating\n", + " tests that are more flexible and can be used in a variety of scenarios.\n", + " \"\"\"\n", + " # return px.scatter(px.data.iris(), x=x_col, y=y_col, color=\"species\")\n", + " return px.scatter(\n", + " px.data.iris(), x=x_col, y=y_col, color=\"species\", title=plot_title\n", + " )\n", + "\n", + "\n", + "result = run_test(\n", + " \"my_custom_tests.ParameterExample\",\n", + " params={\n", + " \"plot_title\": \"My Cool Plot\",\n", + " \"x_col\": \"sepal_width\",\n", + " \"y_col\": \"sepal_length\",\n", + " },\n", + ")\n", + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Play around with this and see how you can use parameters, default values and other features to make your custom tests more flexible and useful.\n", + "\n", + "Here's how this one looks in the documentation:\n", + "![screenshot showing parameterized test](./parameterized-custom-metric.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Test: Multiple Tables and Plots in a Single Test\n", + "\n", + "Custom test functions, as stated earlier, can return more than just one table or plot. In fact, any number of tables and plots can be returned. Let's see an example of this:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import numpy as np\n", + "import plotly.express as px\n", + "\n", + "\n", + "@vm.test(\"my_custom_tests.ComplexOutput\")\n", + "def complex_output():\n", + " \"\"\"This test demonstrates how to return many tables and figures in a single test\"\"\"\n", + " # create a couple tables\n", + " table = [{\"A\": 1, \"B\": 2}, {\"A\": 3, \"B\": 4}]\n", + " table2 = [{\"C\": 5, \"D\": 6}, {\"C\": 7, \"D\": 8}]\n", + "\n", + " # create a few figures showing some random data\n", + " fig1 = px.line(x=np.arange(10), y=np.random.rand(10), title=\"Random Line Plot\")\n", + " fig2 = px.bar(x=[\"A\", \"B\", \"C\"], y=np.random.rand(3), title=\"Random Bar Plot\")\n", + " fig3 = px.scatter(\n", + " x=np.random.rand(10), y=np.random.rand(10), title=\"Random Scatter Plot\"\n", + " )\n", + "\n", + " return (\n", + " {\n", + " \"My Cool Table\": table,\n", + " \"Another Table\": table2,\n", + " },\n", + " fig1,\n", + " fig2,\n", + " fig3,\n", + " )\n", + "\n", + "\n", + "result = run_test(\"my_custom_tests.ComplexOutput\")\n", + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice how you can return the tables as a dictionary where the key is the title of the table and the value is the table itself. You could also just return the tables by themselves but this way you can give them a title to more easily identify them in the result.\n", + "\n", + "![screenshot showing multiple tables and plots](./multiple-tables-plots-custom-metric.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Test: Images\n", + "\n", + "If you are using a plotting library that isn't supported by ValidMind (i.e. not `matplotlib` or `plotly`), you can still return the image directly as a bytes-like object. This could also be used to bring any type of image into your documentation in a programmatic way. For instance, you may want to include a diagram of your model architecture or a screenshot of a dashboard that your model is integrated with. As long as you can produce the image with Python or open it from a file, you can include it in your documentation." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import io\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "@vm.test(\"my_custom_tests.Image\")\n", + "def image():\n", + " \"\"\"This test demonstrates how to return an image in a test\"\"\"\n", + "\n", + " # create a simple plot\n", + " fig, ax = plt.subplots()\n", + " ax.plot([1, 2, 3, 4])\n", + " ax.set_title(\"Simple Line Plot\")\n", + "\n", + " # save the plot as a PNG image (in-memory buffer)\n", + " img_data = io.BytesIO()\n", + " fig.savefig(img_data, format=\"png\")\n", + " img_data.seek(0)\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return img_data.read()\n", + "\n", + "\n", + "result = run_test(\"my_custom_tests.Image\")\n", + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Adding this custom test to your documentation will display the image:\n", + "\n", + "![screenshot showing image custom test](./image-in-custom-metric.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to log an image as a test result, you can do so by passing the path to the image as a parameter to the custom test and then opening the file in the test function. Here's an example:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "@vm.test(\"my_custom_tests.MyPNGCorrelationMatrix\")\n", + "def Image(path: str):\n", + " \"\"\"Opens a png image file and logs it as a test result to ValidMind\"\"\"\n", + " if not path.endswith(\".png\"):\n", + " raise ValueError(\"Image must be a PNG file\")\n", + "\n", + " # return raw image bytes\n", + " with open(path, \"rb\") as f:\n", + " return f.read()\n", + " \n", + "run_test(\n", + " \"my_custom_tests.MyPNGCorrelationMatrix\",\n", + " params={\"path\": \"./pearson-correlation-matrix.png\"},\n", + ").log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The image is displayed in the test result:\n", + "\n", + "![screenshot showing image from file](./pearson-correlation-matrix-test-output.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Test: Description\n", + "\n", + "If you want to write a custom test description for your custom test instead of it is interpreted through llm, you can do so by returning string in your test." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import pandas as pd\n", + "\n", + "@vm.test(\"my_custom_tests.MyCustomTest\")\n", + "def my_custom_test(dataset, model):\n", + " \"\"\"\n", + " This is a custom computed test that computes confusion matrix for a binary classification model and return a string as a test description.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model)\n", + "\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_, \"Test Description - Confusion Matrix\", pd.DataFrame({\"Value\": [1, 2, 3]}) # return the figure object itself\n", + "\n" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see here test result description has been customized here. The same result description will be displayed in the UI." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "result = run_test(\n", + " \"my_custom_tests.MyCustomTest\",\n", + " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", + ")\n", + "result.log()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Conclusion\n", + "\n", + "In this notebook, we have demonstrated how to create custom tests in ValidMind. We have shown how to define custom test functions, register them with the ValidMind Library, run them against models and datasets, and add them to model documentation templates. We have also shown how to return tables and plots from custom tests and how to use them in the ValidMind Platform. We hope this tutorial has been helpful in understanding how to create and use custom tests in ValidMind." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps\n", + "\n", + "You can look at the results of this test suite right in the notebook where you ran the code, as you would expect. But there is a better way — use the ValidMind Platform to work with your model documentation.\n", + "\n", + "\n", + "\n", + "### Work with your model documentation\n", + "\n", + "1. From the **Model Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", + "\n", + "2. Click and expand the **Model Development** section.\n", + "\n", + "What you see is the full draft of your model documentation in a more easily consumable version. From here, you can make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. [Learn more ...](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)\n", + "\n", + "\n", + "\n", + "### Discover more learning resources\n", + "\n", + "We offer many interactive notebooks to help you document models:\n", + "\n", + "- [Run tests & test suites](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", + "- [Code samples](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", + "\n", + "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "%pip show validmind" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ], + "id": "copyright-997b933948594ddd929ee9419957dfe3" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1__) \n", - " - [Before you begin](#toc1_1__) \n", - " - [New to ValidMind?](#toc1_2__) \n", - " - [Key concepts](#toc1_3__) \n", - "- [Setting up](#toc2__) \n", - " - [Install the ValidMind Library](#toc2_1__) \n", - " - [Initialize the ValidMind Library](#toc2_2__) \n", - " - [Register sample model](#toc2_2_1__) \n", - " - [Apply documentation template](#toc2_2_2__) \n", - " - [Get your code snippet](#toc2_2_3__) \n", - "- [Implement a Custom Test](#toc3__) \n", - "- [Run the Custom Test](#toc4__) \n", - " - [Setup the Model and Dataset](#toc4_1__) \n", - " - [Run the Custom Test](#toc4_2__) \n", - "- [Adding Custom Test to Model Documentation](#toc5__) \n", - "- [Some More Custom Tests](#toc6__) \n", - " - [Custom Test: Table of Model Hyperparameters](#toc6_1__) \n", - " - [Custom Test: External API Call](#toc6_2__) \n", - " - [Custom Test: Passing Parameters](#toc6_3__) \n", - " - [Custom Test: Multiple Tables and Plots in a Single Test](#toc6_4__) \n", - " - [Custom Test: Images](#toc6_5__) \n", - " - [Custom Test: Description](#toc6_6__) \n", - "- [Conclusion](#toc7__) \n", - "- [Next steps](#toc8__) \n", - " - [Work with your model documentation](#toc8_1__) \n", - " - [Discover more learning resources](#toc8_2__) \n", - "- [Upgrade ValidMind](#toc9__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", - "\n", - "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", - "\n", - "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", - "\n", - "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", - "\n", - "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", - "\n", - " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", - " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", - " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", - "\n", - "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", - "\n", - "**Outputs**: Custom test can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", - "\n", - "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", - "\n", - "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Binary classification`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " # api_host=\"...\",\n", - " # api_key=\"...\",\n", - " # api_secret=\"...\",\n", - " # model=\"...\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Implement a Custom Test\n", - "\n", - "Let's start off by creating a simple custom test that creates a Confusion Matrix for a binary classification model. We will use the `sklearn.metrics.confusion_matrix` function to calculate the confusion matrix and then display it as a heatmap using `plotly`. (This is already a built-in test in ValidMind, but we will use it as an example to demonstrate how to create custom tests.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from sklearn import metrics\n", - "\n", - "\n", - "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", - "def confusion_matrix(dataset, model):\n", - " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", - "\n", - " The confusion matrix is a 2x2 table that contains 4 values:\n", - "\n", - " - True Positive (TP): the number of correct positive predictions\n", - " - True Negative (TN): the number of correct negative predictions\n", - " - False Positive (FP): the number of incorrect positive predictions\n", - " - False Negative (FN): the number of incorrect negative predictions\n", - "\n", - " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", - " \"\"\"\n", - " y_true = dataset.y\n", - " y_pred = dataset.y_pred(model)\n", - "\n", - " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", - "\n", - " cm_display = metrics.ConfusionMatrixDisplay(\n", - " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", - " )\n", - " cm_display.plot()\n", - "\n", - " plt.close() # close the plot to avoid displaying it\n", - "\n", - " return cm_display.figure_ # return the figure object itself" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Thats our custom test defined and ready to go... Let's take a look at whats going on here:\n", - "\n", - "- The function `confusion_matrix` takes two arguments `dataset` and `model`. This is a VMDataset and VMModel object respectively.\n", - "- The function docstring provides a description of what the test does. This will be displayed along with the result in this notebook as well as in the ValidMind Platform.\n", - "- The function body calculates the confusion matrix using the `sklearn.metrics.confusion_matrix` function and then plots it using `sklearn.metric.ConfusionMatrixDisplay`.\n", - "- The function then returns the `ConfusionMatrixDisplay.figure_` object - this is important as the ValidMind Library expects the output of the custom test to be a plot or a table.\n", - "- The `@vm.test` decorator is doing the work of creating a wrapper around the function that will allow it to be run by the ValidMind Library. It also registers the test so it can be found by the ID `my_custom_tests.ConfusionMatrix` (see the section below on how test IDs work in ValidMind and why this format is important)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Run the Custom Test\n", - "\n", - "Now that we have defined and registered our custom test, lets see how we can run it and properly use it in the ValidMind Platform." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Setup the Model and Dataset\n", - "\n", - "First let's setup a an example model and dataset to run our custom metic against. Since this is a Confusion Matrix, we will use the Customer Churn dataset that ValidMind provides and train a simple XGBoost model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import xgboost as xgb\n", - "from validmind.datasets.classification import customer_churn\n", - "\n", - "raw_df = customer_churn.load_data()\n", - "train_df, validation_df, test_df = customer_churn.preprocess(raw_df)\n", - "\n", - "x_train = train_df.drop(customer_churn.target_column, axis=1)\n", - "y_train = train_df[customer_churn.target_column]\n", - "x_val = validation_df.drop(customer_churn.target_column, axis=1)\n", - "y_val = validation_df[customer_churn.target_column]\n", - "\n", - "model = xgb.XGBClassifier(early_stopping_rounds=10)\n", - "model.set_params(\n", - " eval_metric=[\"error\", \"logloss\", \"auc\"],\n", - ")\n", - "model.fit(\n", - " x_train,\n", - " y_train,\n", - " eval_set=[(x_val, y_val)],\n", - " verbose=False,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Easy enough! Now we have a model and dataset setup and trained. One last thing to do is bring the dataset and model into the ValidMind Library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# for now, we'll just use the test dataset\n", - "vm_test_ds = vm.init_dataset(\n", - " dataset=test_df,\n", - " target_column=customer_churn.target_column,\n", - " input_id=\"test_dataset\",\n", - ")\n", - "\n", - "vm_model = vm.init_model(model, input_id=\"model\")\n", - "\n", - "# link the model to the dataset\n", - "vm_test_ds.assign_predictions(model=vm_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Run the Custom Test\n", - "\n", - "Now that we have our model and dataset setup, we have everything we need to run our custom test. We can do this by importing the `run_test` function from the `validmind.tests` module and passing in the test ID of our custom test along with the model and dataset we want to run it against.\n", - "\n", - ">Notice how the `inputs` dictionary is used to map an `input_id` which we set above to the `model` and `dataset` keys that are expected by our custom test function. This is how the ValidMind Library knows which inputs to pass to different tests and is key when using many different datasets and models." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import run_test\n", - "\n", - "result = run_test(\n", - " \"my_custom_tests.ConfusionMatrix\",\n", - " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll notice that the docstring becomes a markdown description of the test. The figure is then displayed as the test result. What you see above is how it will look in the ValidMind Platform as well. Let's go ahead and log the result to see how that works." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Adding Custom Test to Model Documentation\n", - "\n", - "To do this, go to the documentation page of the model you registered above and navigate to the `Model Development` -> `Model Evaluation` section. Then hover between any existing content block to reveal the `+` button as shown in the screenshot below.\n", - "\n", - "![screenshot showing insert button for test-driven blocks](./insert-test-driven-block.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now click on the `+` button and select the `Test-Driven Block` option. This will open a dialog where you can select `My Custom Tests Confusion Matrix` from the list of available tests. You can preview the result and then click `Insert Block` to add it to the documentation.\n", - "\n", - "![screenshot showing how to insert a test-driven block](./insert-test-driven-block-custom.png)\n", - "\n", - "The test should match the result you see above. It is now part of your documentation and will now be run everytime you run `vm.run_documentation_tests()` for your model. Let's do that now." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm.reload()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you preview the template, it should show the custom test in the `Model Development`->`Model Evaluation` section:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm.preview_template()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Just so we can run all of the tests in the template, let's initialize the train and raw dataset.\n", - "\n", - "(Refer to [**Quickstart for model documentation**](../../quickstart/quickstart_model_documentation.ipynb) and the ValidMind docs for more information on what we are doing here)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_raw_dataset = vm.init_dataset(\n", - " dataset=raw_df,\n", - " input_id=\"raw_dataset\",\n", - " target_column=customer_churn.target_column,\n", - " class_labels=customer_churn.class_labels,\n", - ")\n", - "\n", - "vm_train_ds = vm.init_dataset(\n", - " dataset=train_df,\n", - " input_id=\"train_dataset\",\n", - " target_column=customer_churn.target_column,\n", - ")\n", - "vm_train_ds.assign_predictions(model=vm_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To run all the tests in the template, you can use the `vm.run_documentation_tests()` and pass the inputs we initialized above and the demo config from our customer_churn module. We will have to add a section to the config for our new test to tell it which inputs it should receive. This is done by simply adding a new element in the config dictionary where the key is the ID of the test and the value is a dictionary with the following structure:\n", - "```python\n", - "{\n", - " \"inputs\": {\n", - " \"model\": \"test_dataset\",\n", - " \"dataset\": \"model\",\n", - " }\n", - "}\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.utils import preview_test_config\n", - "\n", - "test_config = customer_churn.get_demo_test_config()\n", - "test_config[\"my_custom_tests.ConfusionMatrix\"] = {\n", - " \"inputs\": {\n", - " \"dataset\": \"test_dataset\",\n", - " \"model\": \"model\",\n", - " }\n", - "}\n", - "preview_test_config(test_config)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "full_suite = vm.run_documentation_tests(config=test_config)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Some More Custom Tests\n", - "\n", - "Now that you understand the entire process of creating custom tests and using them in your documentation, let's create a few more to see different ways you can utilize custom tests." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Test: Table of Model Hyperparameters\n", - "\n", - "This custom test will display a table of the hyperparameters used in the model:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "@vm.test(\"my_custom_tests.Hyperparameters\")\n", - "def hyperparameters(model):\n", - " \"\"\"The hyperparameters of a machine learning model are the settings that control the learning process.\n", - " These settings are specified before the learning process begins and can have a significant impact on the\n", - " performance of the model.\n", - "\n", - " The hyperparameters of a model can be used to tune the model to achieve the best possible performance\n", - " on a given dataset. By examining the hyperparameters of a model, you can gain insight into how the model\n", - " was trained and how it might be improved.\n", - " \"\"\"\n", - " hyperparameters = model.model.get_xgb_params() # dictionary of hyperparameters\n", - "\n", - " # turn the dictionary into a table where each row contains a hyperparameter and its value\n", - " return [{\"Hyperparam\": k, \"Value\": v} for k, v in hyperparameters.items() if v]\n", - "\n", - "\n", - "result = run_test(\"my_custom_tests.Hyperparameters\", inputs={\"model\": \"model\"})\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since the test has been run and logged, you can add it to your documentation using the same process as above. It should look like this:\n", - "\n", - "![screenshot showing hyperparameters test](./hyperparameters-custom-metric.png)\n", - "\n", - "For our simple toy model, there are aren't really any proper hyperparameters but you can see how this could be useful for more complex models that have gone through hyperparameter tuning." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Test: External API Call\n", - "\n", - "This custom test will make an external API call to get the current BTC price and display it as a table. This demonstrates how you might integrate external data sources into your model documentation in a programmatic way. You could, for instance, setup a pipeline that runs a test like this every day to keep your model documentation in sync with an external system." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import requests\n", - "import random\n", - "\n", - "\n", - "@vm.test(\"my_custom_tests.ExternalAPI\")\n", - "def external_api():\n", - " \"\"\"This test calls an external API to get a list of fake users. It then creates\n", - " a table with the relevant data so it can be displayed in the documentation.\n", - "\n", - " The purpose of this test is to demonstrate how to call an external API and use the\n", - " data in a test. A test like this could even be setup to run in a scheduled\n", - " pipeline to keep your documentation in-sync with an external data source.\n", - " \"\"\"\n", - " url = \"https://jsonplaceholder.typicode.com/users\"\n", - " response = requests.get(url)\n", - " data = response.json()\n", - "\n", - " # extract the time and the current BTC price in USD\n", - " return {\n", - " \"Model Owners/Stakeholders\": [\n", - " {\n", - " \"Name\": user[\"name\"],\n", - " \"Role\": random.choice([\"Owner\", \"Stakeholder\"]),\n", - " \"Email\": user[\"email\"],\n", - " \"Phone\": user[\"phone\"],\n", - " \"Slack Handle\": f\"@{user['name'].lower().replace(' ', '.')}\",\n", - " }\n", - " for user in data[:3]\n", - " ],\n", - " \"Model Developers\": [\n", - " {\n", - " \"Name\": user[\"name\"],\n", - " \"Role\": \"Developer\",\n", - " \"Email\": user[\"email\"],\n", - " }\n", - " for user in data[3:7]\n", - " ],\n", - " \"Model Validators\": [\n", - " {\n", - " \"Name\": user[\"name\"],\n", - " \"Role\": \"Validator\",\n", - " \"Email\": user[\"email\"],\n", - " }\n", - " for user in data[7:]\n", - " ],\n", - " }\n", - "\n", - "\n", - "result = run_test(\"my_custom_tests.ExternalAPI\")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Again, you can add this to your documentation to see how it looks:\n", - "\n", - "![screenshot showing BTC price metric](./external-data-custom-test.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Test: Passing Parameters\n", - "\n", - "Custom test functions, as stated earlier, can take both inputs and params. When you define your function there is no need to distinguish between the two, the ValidMind Library will handle that for you. You simply need to add both to the function as arguments and the library will pass in the correct values.\n", - "\n", - "So for instance, if you wanted to parameterize the first custom test we created, the confusion matrix, you could do so like this:\n", - "\n", - "```python\n", - "def confusion_matrix(dataset: VMDataset, model: VMModel, my_param: str = \"Default Value\"):\n", - " pass\n", - "```\n", - "\n", - "And then when you run the test, you can pass in the parameter like this:\n", - "\n", - "```python\n", - "vm.run_test(\n", - " \"my_custom_tests.ConfusionMatrix\",\n", - " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", - " params={\"my_param\": \"My Value\"},\n", - ")\n", - "```\n", - "\n", - "Or if you are running the entire documentation template, you would update the config like this:\n", - "\n", - "```python\n", - "test_config[\"my_custom_tests.ConfusionMatrix\"] = {\n", - " \"inputs\": {\n", - " \"dataset\": \"test_dataset\",\n", - " \"model\": \"model\",\n", - " },\n", - " \"params\": {\n", - " \"my_param\": \"My Value\",\n", - " },\n", - "}\n", - "```\n", - "\n", - "Let's go ahead and create a toy test that takes a parameter and uses it in the result:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import plotly.express as px\n", - "\n", - "\n", - "@vm.test(\"my_custom_tests.ParameterExample\")\n", - "def parameter_example(\n", - " plot_title=\"Default Plot Title\", x_col=\"sepal_width\", y_col=\"sepal_length\"\n", - "):\n", - " \"\"\"This test takes two parameters and creates a scatter plot based on them.\n", - "\n", - " The purpose of this test is to demonstrate how to create a test that takes\n", - " parameters and uses them to generate a plot. This can be useful for creating\n", - " tests that are more flexible and can be used in a variety of scenarios.\n", - " \"\"\"\n", - " # return px.scatter(px.data.iris(), x=x_col, y=y_col, color=\"species\")\n", - " return px.scatter(\n", - " px.data.iris(), x=x_col, y=y_col, color=\"species\", title=plot_title\n", - " )\n", - "\n", - "\n", - "result = run_test(\n", - " \"my_custom_tests.ParameterExample\",\n", - " params={\n", - " \"plot_title\": \"My Cool Plot\",\n", - " \"x_col\": \"sepal_width\",\n", - " \"y_col\": \"sepal_length\",\n", - " },\n", - ")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Play around with this and see how you can use parameters, default values and other features to make your custom tests more flexible and useful.\n", - "\n", - "Here's how this one looks in the documentation:\n", - "![screenshot showing parameterized test](./parameterized-custom-metric.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Test: Multiple Tables and Plots in a Single Test\n", - "\n", - "Custom test functions, as stated earlier, can return more than just one table or plot. In fact, any number of tables and plots can be returned. Let's see an example of this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import plotly.express as px\n", - "\n", - "\n", - "@vm.test(\"my_custom_tests.ComplexOutput\")\n", - "def complex_output():\n", - " \"\"\"This test demonstrates how to return many tables and figures in a single test\"\"\"\n", - " # create a couple tables\n", - " table = [{\"A\": 1, \"B\": 2}, {\"A\": 3, \"B\": 4}]\n", - " table2 = [{\"C\": 5, \"D\": 6}, {\"C\": 7, \"D\": 8}]\n", - "\n", - " # create a few figures showing some random data\n", - " fig1 = px.line(x=np.arange(10), y=np.random.rand(10), title=\"Random Line Plot\")\n", - " fig2 = px.bar(x=[\"A\", \"B\", \"C\"], y=np.random.rand(3), title=\"Random Bar Plot\")\n", - " fig3 = px.scatter(\n", - " x=np.random.rand(10), y=np.random.rand(10), title=\"Random Scatter Plot\"\n", - " )\n", - "\n", - " return (\n", - " {\n", - " \"My Cool Table\": table,\n", - " \"Another Table\": table2,\n", - " },\n", - " fig1,\n", - " fig2,\n", - " fig3,\n", - " )\n", - "\n", - "\n", - "result = run_test(\"my_custom_tests.ComplexOutput\")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Notice how you can return the tables as a dictionary where the key is the title of the table and the value is the table itself. You could also just return the tables by themselves but this way you can give them a title to more easily identify them in the result.\n", - "\n", - "![screenshot showing multiple tables and plots](./multiple-tables-plots-custom-metric.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Test: Images\n", - "\n", - "If you are using a plotting library that isn't supported by ValidMind (i.e. not `matplotlib` or `plotly`), you can still return the image directly as a bytes-like object. This could also be used to bring any type of image into your documentation in a programmatic way. For instance, you may want to include a diagram of your model architecture or a screenshot of a dashboard that your model is integrated with. As long as you can produce the image with Python or open it from a file, you can include it in your documentation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import io\n", - "import matplotlib.pyplot as plt\n", - "\n", - "\n", - "@vm.test(\"my_custom_tests.Image\")\n", - "def image():\n", - " \"\"\"This test demonstrates how to return an image in a test\"\"\"\n", - "\n", - " # create a simple plot\n", - " fig, ax = plt.subplots()\n", - " ax.plot([1, 2, 3, 4])\n", - " ax.set_title(\"Simple Line Plot\")\n", - "\n", - " # save the plot as a PNG image (in-memory buffer)\n", - " img_data = io.BytesIO()\n", - " fig.savefig(img_data, format=\"png\")\n", - " img_data.seek(0)\n", - "\n", - " plt.close() # close the plot to avoid displaying it\n", - "\n", - " return img_data.read()\n", - "\n", - "\n", - "result = run_test(\"my_custom_tests.Image\")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Adding this custom test to your documentation will display the image:\n", - "\n", - "![screenshot showing image custom test](./image-in-custom-metric.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to log an image as a test result, you can do so by passing the path to the image as a parameter to the custom test and then opening the file in the test function. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "@vm.test(\"my_custom_tests.MyPNGCorrelationMatrix\")\n", - "def Image(path: str):\n", - " \"\"\"Opens a png image file and logs it as a test result to ValidMind\"\"\"\n", - " if not path.endswith(\".png\"):\n", - " raise ValueError(\"Image must be a PNG file\")\n", - "\n", - " # return raw image bytes\n", - " with open(path, \"rb\") as f:\n", - " return f.read()\n", - " \n", - "run_test(\n", - " \"my_custom_tests.MyPNGCorrelationMatrix\",\n", - " params={\"path\": \"./pearson-correlation-matrix.png\"},\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The image is displayed in the test result:\n", - "\n", - "![screenshot showing image from file](./pearson-correlation-matrix-test-output.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Test: Description\n", - "\n", - "If you want to write a custom test description for your custom test instead of it is interpreted through llm, you can do so by returning string in your test." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "\n", - "@vm.test(\"my_custom_tests.MyCustomTest\")\n", - "def my_custom_test(dataset, model):\n", - " \"\"\"\n", - " This is a custom computed test that computes confusion matrix for a binary classification model and return a string as a test description.\n", - " \"\"\"\n", - " y_true = dataset.y\n", - " y_pred = dataset.y_pred(model)\n", - "\n", - " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", - "\n", - " cm_display = metrics.ConfusionMatrixDisplay(\n", - " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", - " )\n", - " cm_display.plot()\n", - "\n", - " plt.close() # close the plot to avoid displaying it\n", - "\n", - " return cm_display.figure_, \"Test Description - Confusion Matrix\", pd.DataFrame({\"Value\": [1, 2, 3]}) # return the figure object itself\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see here test result description has been customized here. The same result description will be displayed in the UI." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.MyCustomTest\",\n", - " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", - ")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Conclusion\n", - "\n", - "In this notebook, we have demonstrated how to create custom tests in ValidMind. We have shown how to define custom test functions, register them with the ValidMind Library, run them against models and datasets, and add them to model documentation templates. We have also shown how to return tables and plots from custom tests and how to use them in the ValidMind Platform. We hope this tutorial has been helpful in understanding how to create and use custom tests in ValidMind." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Next steps\n", - "\n", - "You can look at the results of this test suite right in the notebook where you ran the code, as you would expect. But there is a better way — use the ValidMind Platform to work with your model documentation.\n", - "\n", - "\n", - "\n", - "### Work with your model documentation\n", - "\n", - "1. From the **Model Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", - "\n", - "2. Click and expand the **Model Development** section.\n", - "\n", - "What you see is the full draft of your model documentation in a more easily consumable version. From here, you can make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. [Learn more ...](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)\n", - "\n", - "\n", - "\n", - "### Discover more learning resources\n", - "\n", - "We offer many interactive notebooks to help you document models:\n", - "\n", - "- [Run tests & test suites](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", - "- [Code samples](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", - "\n", - "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Upgrade ValidMind\n", - "\n", - "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", - "\n", - "Retrieve the information for the currently installed version of ValidMind:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip show validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", - "\n", - "```bash\n", - "%pip install --upgrade validmind\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may need to restart your kernel after running the upgrade package for changes to be applied." - ] - }, - { - "cell_type": "markdown", - "id": "copyright-997b933948594ddd929ee9419957dfe3", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file From 1628c2d446fea5dcfc155522753dd48728d2e6d5 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Tue, 10 Feb 2026 10:31:43 -0800 Subject: [PATCH 11/14] Mucked up some missing end divs --- .../use_dataset_model_objects.ipynb | 166 +- .../custom_tests/implement_custom_tests.ipynb | 132 +- .../agents/document_agentic_ai.ipynb | 4389 +++++++++-------- ...start_option_pricing_models_quantlib.ipynb | 2685 +++++----- .../quickstart_code_explainer_demo.ipynb | 1743 +++---- .../nlp_and_llm/rag_benchmark_demo.ipynb | 3733 +++++++------- .../nlp_and_llm/rag_documentation_demo.ipynb | 3379 ++++++------- 7 files changed, 8116 insertions(+), 8111 deletions(-) diff --git a/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb b/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb index 5394f131b..7ea025faa 100644 --- a/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb +++ b/notebooks/how_to/data_and_datasets/use_dataset_model_objects.ipynb @@ -147,12 +147,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "%pip install -q validmind" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -217,9 +217,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "metadata": {} }, + "outputs": [], "source": [ "# Load your model identifier credentials from an `.env` file\n", "\n", @@ -236,20 +238,18 @@ " # api_secret=\"...\",\n", " # model=\"...\",\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "%matplotlib inline\n", "\n", "import xgboost as xgb" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -262,14 +262,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.datasets.classification import customer_churn as demo_dataset\n", "\n", "raw_df = demo_dataset.load_data()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -282,12 +282,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "train_df, validation_df, test_df = demo_dataset.preprocess(raw_df)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -302,7 +302,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "x_train = train_df.drop(demo_dataset.target_column, axis=1)\n", "y_train = train_df[demo_dataset.target_column]\n", @@ -319,9 +321,7 @@ " eval_set=[(x_val, y_val)],\n", " verbose=False,\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -370,7 +370,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "# vm_raw_dataset is now a VMDataset object that you can pass to any ValidMind test\n", "vm_raw_dataset = vm.init_dataset(\n", @@ -378,9 +380,7 @@ " input_id=\"raw_dataset\",\n", " target_column=\"Exited\",\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -391,13 +391,13 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.utils import inspect_obj\n", "inspect_obj(vm_raw_dataset)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -412,12 +412,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.df" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -428,12 +428,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.feature_columns" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -444,12 +444,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.target_column" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -460,12 +460,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.x_df()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -476,12 +476,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.y_df()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -492,12 +492,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.feature_columns_numeric" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -508,12 +508,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset.feature_columns_categorical" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -545,7 +545,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.vm_models.dataset.dataset import VMDataset\n", "import pandas as pd\n", @@ -570,9 +572,7 @@ " percentages = list(imbalance_percentages.values * 100)\n", "\n", " return pd.DataFrame({\"Classes\":classes, \"Percentage\": percentages})" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -587,7 +587,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.tests import run_test\n", "result = run_test(\n", @@ -596,9 +598,7 @@ " \"dataset\": vm_raw_dataset\n", " }\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -621,7 +621,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.vm_models.dataset.dataset import VMDataset\n", "import pandas as pd\n", @@ -648,9 +650,7 @@ " else:\n", " result = pd.DataFrame({\"Classes\":classes, \"Count\": list(imbalance_percentages.values)})\n", " return result" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -663,7 +663,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.tests import run_test\n", "result = run_test(\n", @@ -671,9 +673,7 @@ " inputs={\"dataset\": vm_raw_dataset},\n", " params={\"normalize\": True},\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -704,16 +704,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "\n", "vm_model = vm.init_model(\n", " model=model,\n", " input_id=\"xgb_model\",\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -724,12 +724,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "inspect_obj(vm_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -744,7 +744,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_train_ds = vm.init_dataset(\n", " input_id=\"train_dataset\",\n", @@ -754,9 +756,7 @@ ")\n", "\n", "vm_train_ds.assign_predictions(model=vm_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -767,12 +767,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "print(vm_train_ds)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -797,7 +797,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from sklearn import metrics\n", "import matplotlib.pyplot as plt\n", @@ -828,9 +830,7 @@ " plt.close()\n", "\n", " return cm_display.figure_ # return the figure object itself" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -841,7 +841,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.tests import run_test\n", "result = run_test(\n", @@ -851,9 +853,7 @@ " \"model\": vm_model,\n", " }\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -870,12 +870,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -937,12 +937,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "%pip show validmind" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -964,6 +964,7 @@ }, { "cell_type": "markdown", + "id": "copyright-340a990e20194848af0efb0c965e219a", "metadata": {}, "source": [ "\n", @@ -975,8 +976,7 @@ "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial" - ], - "id": "copyright-340a990e20194848af0efb0c965e219a" + ] } ], "metadata": { diff --git a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb index 0fbb88437..638033b4e 100644 --- a/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb +++ b/notebooks/how_to/tests/custom_tests/implement_custom_tests.ipynb @@ -138,12 +138,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "%pip install -q validmind" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -208,7 +208,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "# Load your model identifier credentials from an `.env` file\n", "\n", @@ -225,9 +227,7 @@ " # api_secret=\"...\",\n", " # model=\"...\",\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -242,7 +242,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "from sklearn import metrics\n", @@ -274,9 +276,7 @@ " plt.close() # close the plot to avoid displaying it\n", "\n", " return cm_display.figure_ # return the figure object itself" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -315,7 +315,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import xgboost as xgb\n", "from validmind.datasets.classification import customer_churn\n", @@ -338,9 +340,7 @@ " eval_set=[(x_val, y_val)],\n", " verbose=False,\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -351,7 +351,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "# for now, we'll just use the test dataset\n", "vm_test_ds = vm.init_dataset(\n", @@ -364,9 +366,7 @@ "\n", "# link the model to the dataset\n", "vm_test_ds.assign_predictions(model=vm_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -383,7 +383,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.tests import run_test\n", "\n", @@ -391,9 +393,7 @@ " \"my_custom_tests.ConfusionMatrix\",\n", " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -404,12 +404,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -437,12 +437,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm.reload()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -453,12 +453,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm.preview_template()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -471,7 +471,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "vm_raw_dataset = vm.init_dataset(\n", " dataset=raw_df,\n", @@ -486,9 +488,7 @@ " target_column=customer_churn.target_column,\n", ")\n", "vm_train_ds.assign_predictions(model=vm_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -507,7 +507,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "from validmind.utils import preview_test_config\n", "\n", @@ -519,18 +521,16 @@ " }\n", "}\n", "preview_test_config(test_config)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "full_suite = vm.run_documentation_tests(config=test_config)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -556,7 +556,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "@vm.test(\"my_custom_tests.Hyperparameters\")\n", "def hyperparameters(model):\n", @@ -576,9 +578,7 @@ "\n", "result = run_test(\"my_custom_tests.Hyperparameters\", inputs={\"model\": \"model\"})\n", "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -604,7 +604,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import requests\n", "import random\n", @@ -656,9 +658,7 @@ "\n", "result = run_test(\"my_custom_tests.ExternalAPI\")\n", "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -715,7 +715,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import plotly.express as px\n", "\n", @@ -745,9 +747,7 @@ " },\n", ")\n", "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -772,7 +772,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import numpy as np\n", "import plotly.express as px\n", @@ -805,9 +807,7 @@ "\n", "result = run_test(\"my_custom_tests.ComplexOutput\")\n", "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -831,7 +831,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import io\n", "import matplotlib.pyplot as plt\n", @@ -858,9 +860,7 @@ "\n", "result = run_test(\"my_custom_tests.Image\")\n", "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -880,7 +880,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "@vm.test(\"my_custom_tests.MyPNGCorrelationMatrix\")\n", "def Image(path: str):\n", @@ -896,9 +898,7 @@ " \"my_custom_tests.MyPNGCorrelationMatrix\",\n", " params={\"path\": \"./pearson-correlation-matrix.png\"},\n", ").log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -922,7 +922,9 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "import pandas as pd\n", "\n", @@ -945,9 +947,7 @@ "\n", " return cm_display.figure_, \"Test Description - Confusion Matrix\", pd.DataFrame({\"Value\": [1, 2, 3]}) # return the figure object itself\n", "\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -958,16 +958,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "result = run_test(\n", " \"my_custom_tests.MyCustomTest\",\n", " inputs={\"model\": \"model\", \"dataset\": \"test_dataset\"},\n", ")\n", "result.log()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1027,12 +1027,12 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "%pip show validmind" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1054,6 +1054,7 @@ }, { "cell_type": "markdown", + "id": "copyright-997b933948594ddd929ee9419957dfe3", "metadata": {}, "source": [ "\n", @@ -1065,8 +1066,7 @@ "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial" - ], - "id": "copyright-997b933948594ddd929ee9419957dfe3" + ] } ], "metadata": { @@ -1090,4 +1090,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/notebooks/use_cases/agents/document_agentic_ai.ipynb b/notebooks/use_cases/agents/document_agentic_ai.ipynb index 8042b5f60..4b1a6fb7a 100644 --- a/notebooks/use_cases/agents/document_agentic_ai.ipynb +++ b/notebooks/use_cases/agents/document_agentic_ai.ipynb @@ -1,2196 +1,2197 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "e7277c38", - "metadata": {}, - "source": [ - "# Document an agentic AI system\n", - "\n", - "Build and document an agentic AI system with the ValidMind Library. Construct a LangGraph-based banking agent, assign AI evaluation metric scores to your agent, and run accuracy, RAGAS, and safety tests, then log those test results to the ValidMind Platform.\n", - "\n", - "An _AI agent_ is an autonomous system that interprets inputs, selects from available tools or actions, and executes multi-step behaviors to achieve defined goals. In this notebook, the agent acts as a banking assistant that analyzes user requests and automatically selects and invokes the appropriate specialized banking tool to deliver accurate, compliant, and actionable responses.\n", - "\n", - "- This agent enables financial institutions to automate complex banking workflows where different customer requests require different specialized tools and knowledge bases.\n", - "- Effective validation of agentic AI systems reduces the risks of agents misinterpreting inputs, failing to extract required parameters, or producing incorrect assessments or actions — such as selecting the wrong tool.\n", - "\n", - "
For the LLM components in this notebook to function properly, you'll need access to OpenAI.\n", - "

\n", - "Before you continue, ensure that a valid OPENAI_API_KEY is set in your .env file.
" - ] - }, - { - "cell_type": "markdown", - "id": "a47dd942", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1__) \n", - " - [Before you begin](#toc1_1__) \n", - " - [New to ValidMind?](#toc1_2__) \n", - " - [Key concepts](#toc1_3__) \n", - "- [Setting up](#toc2__) \n", - " - [Install the ValidMind Library](#toc2_1__) \n", - " - [Initialize the ValidMind Library](#toc2_2__) \n", - " - [Register sample model](#toc2_2_1__) \n", - " - [Apply documentation template](#toc2_2_2__) \n", - " - [Get your code snippet](#toc2_2_3__) \n", - " - [Preview the documentation template](#toc2_2_4__) \n", - " - [Verify OpenAI API access](#toc2_3__) \n", - " - [Initialize the Python environment](#toc2_4__) \n", - "- [Building the LangGraph agent](#toc3__) \n", - " - [Test available banking tools](#toc3_1__) \n", - " - [Create LangGraph banking agent](#toc3_2__) \n", - " - [Define system prompt](#toc3_2_1__) \n", - " - [Initialize the LLM](#toc3_2_2__) \n", - " - [Define agent state structure](#toc3_2_3__) \n", - " - [Create agent workflow function](#toc3_2_4__) \n", - " - [Instantiate the banking agent](#toc3_2_5__) \n", - " - [Integrate agent with ValidMind](#toc3_3__) \n", - " - [Import ValidMind components](#toc3_3_1__) \n", - " - [Create agent wrapper function](#toc3_3_2__) \n", - " - [Initialize the ValidMind model object](#toc3_3_3__) \n", - " - [Store the agent reference](#toc3_3_4__) \n", - " - [Verify integration](#toc3_3_5__) \n", - " - [Validate the system prompt](#toc3_4__) \n", - "- [Initialize the ValidMind datasets](#toc4__) \n", - " - [Assign predictions](#toc4_1__) \n", - "- [Running accuracy tests](#toc5__) \n", - " - [Response accuracy test](#toc5_1__) \n", - " - [Tool selection accuracy test](#toc5_2__) \n", - "- [Assigning AI evaluation metric scores](#toc6__) \n", - " - [Identify relevant DeepEval scorers](#toc6_1__) \n", - " - [Assign reasoning scores](#toc6_2__) \n", - " - [Plan quality score](#toc6_2_1__) \n", - " - [Plan adherence score](#toc6_2_2__) \n", - " - [Assign action scores](#toc6_3__) \n", - " - [Tool correctness score](#toc6_3_1__) \n", - " - [Argument correctness score](#toc6_3_2__) \n", - " - [Assign execution scores](#toc6_4__) \n", - " - [Task completion score](#toc6_4_1__) \n", - "- [Running RAGAS tests](#toc7__) \n", - " - [Identify relevant RAGAS tests](#toc7_1__) \n", - " - [Faithfulness](#toc7_1_1__) \n", - " - [Response Relevancy](#toc7_1_2__) \n", - " - [Context Recall](#toc7_1_3__) \n", - "- [Running safety tests](#toc8__) \n", - " - [AspectCritic](#toc8_1_1__) \n", - " - [Bias](#toc8_1_2__) \n", - "- [Next steps](#toc9__) \n", - " - [Work with your model documentation](#toc9_1__) \n", - " - [Customize the banking agent for your use case](#toc9_2__) \n", - " - [Discover more learning resources](#toc9_3__) \n", - "- [Upgrade ValidMind](#toc10__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "id": "ecaad35f", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models. \n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators." - ] - }, - { - "cell_type": "markdown", - "id": "6ff1f9ef", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html)." - ] - }, - { - "cell_type": "markdown", - "id": "d7ad8d8c", - "metadata": {}, - "source": [ - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
" - ] - }, - { - "cell_type": "markdown", - "id": "323caa59", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Key concepts\n", - "\n", - "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", - "\n", - "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", - "\n", - "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", - "\n", - "**Metrics**: A subset of tests that do not have thresholds. In the context of this notebook, metrics and tests can be thought of as interchangeable concepts.\n", - "\n", - "**Custom metrics**: Custom metrics are functions that you define to evaluate your model or dataset. These functions can be registered with the ValidMind Library to be used in the ValidMind Platform.\n", - "\n", - "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", - "\n", - " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", - " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", - " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", - "\n", - "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", - "\n", - "**Outputs**: Custom metrics can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", - "\n", - "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", - "\n", - "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." - ] - }, - { - "cell_type": "markdown", - "id": "ddba5169", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "id": "b53da99c", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "
Recommended Python versions\n", - "

\n", - "Python 3.8 <= x <= 3.11
\n", - "\n", - "Let's begin by installing the ValidMind Library with large language model (LLM) support:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1982a118", - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q \"validmind[llm]\" \"langgraph==0.3.21\"" - ] - }, - { - "cell_type": "markdown", - "id": "dc9dea3a", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "id": "5848461e", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook.\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "id": "97d0b04b", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Agentic AI`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "id": "b279d5fa", - "metadata": {}, - "source": [ - "
Can't select this template?\n", - "

\n", - "Your organization administrators may need to add it to your template library:\n", - "
" - ] - }, - { - "cell_type": "markdown", - "id": "3606cb8c", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d6ccbefc", - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " # api_host=\"...\",\n", - " # api_key=\"...\",\n", - " # api_secret=\"...\",\n", - " # model=\"...\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "2ed79cf0", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Preview the documentation template\n", - "\n", - "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model.\n", - "\n", - "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dffdaa6f", - "metadata": {}, - "outputs": [], - "source": [ - "vm.preview_template()" - ] - }, - { - "cell_type": "markdown", - "id": "b5c5ba68", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Verify OpenAI API access\n", - "\n", - "Verify that a valid `OPENAI_API_KEY` is set in your `.env` file:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22cc39cb", - "metadata": {}, - "outputs": [], - "source": [ - "# Load environment variables if using .env file\n", - "try:\n", - " from dotenv import load_dotenv\n", - " load_dotenv()\n", - "except ImportError:\n", - " print(\"dotenv not installed. Make sure OPENAI_API_KEY is set in your environment.\")" - ] - }, - { - "cell_type": "markdown", - "id": "e4a9d3a9", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the Python environment\n", - "\n", - "Let's import all the necessary libraries to prepare for building our banking LangGraph agentic system:\n", - "\n", - "- **Standard libraries** for data handling and environment management.\n", - "- **pandas**, a Python library for data manipulation and analytics, as an alias. We'll also configure pandas to show all columns and all rows at full width for easier debugging and inspection.\n", - "- **LangChain** components for LLM integration and tool management.\n", - "- **LangGraph** for building stateful, multi-step agent workflows.\n", - "- **Banking tools** for specialized financial services as defined in [banking_tools.py](banking_tools.py)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2058d1ac", - "metadata": {}, - "outputs": [], - "source": [ - "# STANDARD LIBRARY IMPORTS\n", - "\n", - "# TypedDict: Defines type-safe dictionaries for the agent's state structure\n", - "# Annotated: Adds metadata to type hints\n", - "# Sequence: Type hint for sequences used in the agent\n", - "from typing import TypedDict, Annotated, Sequence\n", - "\n", - "# THIRD PARTY IMPORTS\n", - "\n", - "import pandas as pd\n", - "# Configure pandas to show all columns and all rows at full width\n", - "pd.set_option('display.max_columns', None)\n", - "pd.set_option('display.max_colwidth', None)\n", - "pd.set_option('display.width', None)\n", - "pd.set_option('display.max_rows', None)\n", - "\n", - "# BaseMessage: Represents a base message in the LangChain message system\n", - "# HumanMessage: Represents a human message in the LangChain message system\n", - "# SystemMessage: Represents a system message in the LangChain message system\n", - "from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage\n", - "\n", - "# ChatOpenAI: Represents an OpenAI chat model in the LangChain library\n", - "from langchain_openai import ChatOpenAI\n", - "\n", - "# MemorySaver: Represents a checkpoint for saving and restoring agent state\n", - "from langgraph.checkpoint.memory import MemorySaver\n", - "\n", - "# StateGraph: Represents a stateful graph in the LangGraph library\n", - "# END: Represents the end of a graph\n", - "# START: Represents the start of a graph\n", - "from langgraph.graph import StateGraph, END, START\n", - "\n", - "# add_messages: Adds messages to the state\n", - "from langgraph.graph.message import add_messages\n", - "\n", - "# ToolNode: Represents a tool node in the LangGraph library\n", - "from langgraph.prebuilt import ToolNode\n", - "\n", - "# LOCAL IMPORTS FROM banking_tools.py\n", - "\n", - "from banking_tools import AVAILABLE_TOOLS" - ] - }, - { - "cell_type": "markdown", - "id": "e109d075", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Building the LangGraph agent" - ] - }, - { - "cell_type": "markdown", - "id": "15040411", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Test available banking tools\n", - "\n", - "We'll use the demo banking tools defined in `banking_tools.py` that provide use cases of financial services:\n", - "\n", - "- **Credit Risk Analyzer** - Loan applications and credit decisions\n", - "- **Customer Account Manager** - Account services and customer support\n", - "- **Fraud Detection System** - Security and fraud prevention" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1e0a120c", - "metadata": {}, - "outputs": [], - "source": [ - "print(f\"Available tools: {len(AVAILABLE_TOOLS)}\")\n", - "print(\"\\nTool Details:\")\n", - "for i, tool in enumerate(AVAILABLE_TOOLS, 1):\n", - " print(f\" - {tool.name}\")" - ] - }, - { - "cell_type": "markdown", - "id": "04d6785a", - "metadata": {}, - "source": [ - "Let's test each banking tool individually to ensure they're working correctly before integrating them into our agent:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dc0caff2", - "metadata": {}, - "outputs": [], - "source": [ - "# Test 1: Credit Risk Analyzer\n", - "print(\"TEST 1: Credit Risk Analyzer\")\n", - "print(\"-\" * 40)\n", - "try:\n", - " # Access the underlying function using .func\n", - " credit_result = AVAILABLE_TOOLS[0].func(\n", - " customer_income=75000,\n", - " customer_debt=1200,\n", - " credit_score=720,\n", - " loan_amount=50000,\n", - " loan_type=\"personal\"\n", - " )\n", - " print(credit_result)\n", - " print(\"Credit Risk Analyzer test PASSED\")\n", - "except Exception as e:\n", - " print(f\"Credit Risk Analyzer test FAILED: {e}\")\n", - "\n", - "print(\"\" + \"=\" * 60)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b6b227db", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# Test 2: Customer Account Manager\n", - "print(\"TEST 2: Customer Account Manager\")\n", - "print(\"-\" * 40)\n", - "try:\n", - " # Test checking balance\n", - " account_result = AVAILABLE_TOOLS[1].func(\n", - " account_type=\"checking\",\n", - " customer_id=\"12345\",\n", - " action=\"check_balance\"\n", - " )\n", - " print(account_result)\n", - "\n", - " # Test getting account info\n", - " info_result = AVAILABLE_TOOLS[1].func(\n", - " account_type=\"all\",\n", - " customer_id=\"12345\", \n", - " action=\"get_info\"\n", - " )\n", - " print(info_result)\n", - " print(\"Customer Account Manager test PASSED\")\n", - "except Exception as e:\n", - " print(f\"Customer Account Manager test FAILED: {e}\")\n", - "\n", - "print(\"\" + \"=\" * 60)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a983b30d", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# Test 3: Fraud Detection System\n", - "print(\"TEST 3: Fraud Detection System\")\n", - "print(\"-\" * 40)\n", - "try:\n", - " fraud_result = AVAILABLE_TOOLS[2].func(\n", - " transaction_id=\"TX123\",\n", - " customer_id=\"12345\",\n", - " transaction_amount=500.00,\n", - " transaction_type=\"withdrawal\",\n", - " location=\"Miami, FL\",\n", - " device_id=\"DEVICE_001\"\n", - " )\n", - " print(fraud_result)\n", - " print(\"Fraud Detection System test PASSED\")\n", - "except Exception as e:\n", - " print(f\"Fraud Detection System test FAILED: {e}\")\n", - "\n", - "print(\"\" + \"=\" * 60)" - ] - }, - { - "cell_type": "markdown", - "id": "6bf04845", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Create LangGraph banking agent\n", - "\n", - "With our tools ready to go, we'll create our intelligent banking agent with LangGraph that automatically selects and uses the appropriate banking tool based on a user request." - ] - }, - { - "cell_type": "markdown", - "id": "31df57f0", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Define system prompt\n", - "\n", - "We'll begin by defining our system prompt, which provides the LLM with context about its role as a banking assistant and guidance on when to use each available tool:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7971c427", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# Enhanced banking system prompt with tool selection guidance\n", - "system_context = \"\"\"You are a professional banking AI assistant with access to specialized banking tools.\n", - " Analyze the user's banking request and directly use the most appropriate tools to help them.\n", - " \n", - " AVAILABLE BANKING TOOLS:\n", - " \n", - " credit_risk_analyzer - Analyze credit risk for loan applications and credit decisions\n", - " - Use for: loan applications, credit assessments, risk analysis, mortgage eligibility\n", - " - Examples: \"Analyze credit risk for $50k personal loan\", \"Assess mortgage eligibility for $300k home purchase\"\n", - " - Parameters: customer_income, customer_debt, credit_score, loan_amount, loan_type\n", - "\n", - " customer_account_manager - Manage customer accounts and provide banking services\n", - " - Use for: account information, transaction processing, product recommendations, customer service\n", - " - Examples: \"Check balance for checking account 12345\", \"Recommend products for customer with high balance\"\n", - " - Parameters: account_type, customer_id, action, amount, account_details\n", - "\n", - " fraud_detection_system - Analyze transactions for potential fraud and security risks\n", - " - Use for: transaction monitoring, fraud prevention, risk assessment, security alerts\n", - " - Examples: \"Analyze fraud risk for $500 ATM withdrawal in Miami\", \"Check security for $2000 online purchase\"\n", - " - Parameters: transaction_id, customer_id, transaction_amount, transaction_type, location, device_id\n", - "\n", - " BANKING INSTRUCTIONS:\n", - " - Analyze the user's banking request carefully and identify the primary need\n", - " - If they need credit analysis → use credit_risk_analyzer\n", - " - If they need financial calculations → use financial_calculator\n", - " - If they need account services → use customer_account_manager\n", - " - If they need security analysis → use fraud_detection_system\n", - " - Extract relevant parameters from the user's request\n", - " - Provide helpful, accurate banking responses based on tool outputs\n", - " - Always consider banking regulations, risk management, and best practices\n", - " - Be professional and thorough in your analysis\n", - "\n", - " Choose and use tools wisely to provide the most helpful banking assistance.\n", - " Describe the response in user friendly manner with details describing the tool output. \n", - " Provide the response in at least 500 words.\n", - " Generate a concise execution plan for the banking request.\n", - " \"\"\"" - ] - }, - { - "cell_type": "markdown", - "id": "406835c8", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Initialize the LLM\n", - "\n", - "Let's initialize the LLM that will power our banking agent:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "866066e7", - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize the main LLM for banking responses\n", - "main_llm = ChatOpenAI(\n", - " model=\"gpt-5-mini\",\n", - " reasoning={\n", - " \"effort\": \"low\",\n", - " \"summary\": \"auto\"\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "cce9685c", - "metadata": {}, - "source": [ - "Then bind the available banking tools to the LLM, enabling the model to automatically recognize and invoke each tool when appropriate based on request input and the system prompt we defined above:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "906d8132", - "metadata": {}, - "outputs": [], - "source": [ - "# Bind all banking tools to the main LLM\n", - "llm_with_tools = main_llm.bind_tools(AVAILABLE_TOOLS)" - ] - }, - { - "cell_type": "markdown", - "id": "2bad8799", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Define agent state structure\n", - "\n", - "The agent state defines the data structure that flows through the LangGraph workflow. It includes:\n", - "\n", - "- **messages** — The conversation history between the user and agent\n", - "- **user_input** — The current user request\n", - "- **session_id** — A unique identifier for the conversation session\n", - "- **context** — Additional context that can be passed between nodes\n", - "\n", - "Defining this state structure maintains the structure throughout the agent's execution and allows for multi-turn conversations with memory:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6b926ddf", - "metadata": {}, - "outputs": [], - "source": [ - "# Banking Agent State Definition\n", - "class BankingAgentState(TypedDict):\n", - " messages: Annotated[Sequence[BaseMessage], add_messages]\n", - " user_input: str\n", - " session_id: str\n", - " context: dict" - ] - }, - { - "cell_type": "markdown", - "id": "47ce81b7", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Create agent workflow function\n", - "\n", - "We'll build the LangGraph agent workflow with two main components:\n", - "\n", - "1. **LLM node** — Processes user requests, applies the system prompt, and decides whether to use tools.\n", - "2. **Tools node** — Executes the selected banking tools when the LLM determines they're needed.\n", - "\n", - "The workflow begins with the LLM analyzing the request, then uses tools if needed — or ends if the response is complete, and finally returns to the LLM to generate the final response." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2c9bf585", - "metadata": {}, - "outputs": [], - "source": [ - "def create_banking_langgraph_agent():\n", - " \"\"\"Create a comprehensive LangGraph banking agent with intelligent tool selection.\"\"\"\n", - " def llm_node(state: BankingAgentState) -> BankingAgentState:\n", - " \"\"\"Main LLM node that processes banking requests and selects appropriate tools.\"\"\"\n", - " messages = state[\"messages\"]\n", - " # Add system context to messages\n", - " enhanced_messages = [SystemMessage(content=system_context)] + list(messages)\n", - " # Get LLM response with tool selection\n", - " response = llm_with_tools.invoke(enhanced_messages)\n", - " return {\n", - " **state,\n", - " \"messages\": messages + [response]\n", - " }\n", - " \n", - " def should_continue(state: BankingAgentState) -> str:\n", - " \"\"\"Decide whether to use tools or end the conversation.\"\"\"\n", - " last_message = state[\"messages\"][-1]\n", - " # Check if the LLM wants to use tools\n", - " if hasattr(last_message, 'tool_calls') and last_message.tool_calls:\n", - " return \"tools\"\n", - " return END\n", - " \n", - " # Create the banking state graph\n", - " workflow = StateGraph(BankingAgentState)\n", - " # Add nodes\n", - " workflow.add_node(\"llm\", llm_node)\n", - " workflow.add_node(\"tools\", ToolNode(AVAILABLE_TOOLS))\n", - " # Simplified entry point - go directly to LLM\n", - " workflow.add_edge(START, \"llm\")\n", - " # From LLM, decide whether to use tools or end\n", - " workflow.add_conditional_edges(\n", - " \"llm\",\n", - " should_continue,\n", - " {\"tools\": \"tools\", END: END}\n", - " )\n", - " # Tool execution flows back to LLM for final response\n", - " workflow.add_edge(\"tools\", \"llm\")\n", - " # Set up memory\n", - " memory = MemorySaver()\n", - " # Compile the graph\n", - " agent = workflow.compile(checkpointer=memory)\n", - " return agent" - ] - }, - { - "cell_type": "markdown", - "id": "3eb40287", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Instantiate the banking agent\n", - "\n", - "Now, we'll create an instance of the banking agent by calling the workflow creation function.\n", - "\n", - "This compiled agent is ready to process banking requests and will automatically select and use the appropriate tools based on user queries:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "455b8ee4", - "metadata": {}, - "outputs": [], - "source": [ - "# Create the banking intelligent agent\n", - "banking_agent = create_banking_langgraph_agent()\n", - "\n", - "print(\"Banking LangGraph Agent Created Successfully!\")\n", - "print(\"\\nFeatures:\")\n", - "print(\" - Intelligent banking tool selection\")\n", - "print(\" - Comprehensive banking system prompt\")\n", - "print(\" - Streamlined workflow: LLM → Tools → Response\")\n", - "print(\" - Automatic tool parameter extraction\")\n", - "print(\" - Professional banking assistance\")" - ] - }, - { - "cell_type": "markdown", - "id": "12691528", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Integrate agent with ValidMind\n", - "\n", - "To integrate our LangGraph banking agent with ValidMind, we need to create a wrapper function that ValidMind can use to invoke the agent and extract the necessary information for testing and documentation, allowing ValidMind to run validation tests on the agent's behavior, tool usage, and responses." - ] - }, - { - "cell_type": "markdown", - "id": "7b78509b", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Import ValidMind components\n", - "\n", - "We'll start with importing the necessary ValidMind components for integrating our agent:\n", - "\n", - "- `Prompt` from `validmind.models` for handling prompt-based model inputs\n", - "- `extract_tool_calls_from_agent_output` and `_convert_to_tool_call_list` from `validmind.scorers.llm.deepeval` for extracting and converting tool calls from agent outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9aeb8969", - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.models import Prompt\n", - "from validmind.scorers.llm.deepeval import extract_tool_calls_from_agent_output, _convert_to_tool_call_list" - ] - }, - { - "cell_type": "markdown", - "id": "f67f2955", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Create agent wrapper function\n", - "\n", - "We'll then create a wrapper function that:\n", - "\n", - "- Accepts input in ValidMind's expected format (with `input` and `session_id` fields)\n", - "- Invokes the banking agent with the proper state initialization\n", - "- Captures tool outputs and tool calls for evaluation\n", - "- Returns a standardized response format that includes the prediction, full output, tool messages, and tool call information\n", - "- Handles errors gracefully with fallback responses" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e4d5a82", - "metadata": {}, - "outputs": [], - "source": [ - "def banking_agent_fn(input):\n", - " \"\"\"\n", - " Invoke the banking agent with the given input.\n", - " \"\"\"\n", - " try:\n", - " # Initial state for banking agent\n", - " initial_state = {\n", - " \"user_input\": input[\"input\"],\n", - " \"messages\": [HumanMessage(content=input[\"input\"])],\n", - " \"session_id\": input[\"session_id\"],\n", - " \"context\": {}\n", - " }\n", - " session_config = {\"configurable\": {\"thread_id\": input[\"session_id\"]}}\n", - " result = banking_agent.invoke(initial_state, config=session_config)\n", - "\n", - " from utils import capture_tool_output_messages\n", - "\n", - " # Capture all tool outputs and metadata\n", - " captured_data = capture_tool_output_messages(result)\n", - " \n", - " # Access specific tool outputs, this will be used for RAGAS tests\n", - " tool_message = \"\"\n", - " for output in captured_data[\"tool_outputs\"]:\n", - " tool_message += output['content']\n", - " \n", - " tool_calls_found = []\n", - " messages = result['messages']\n", - " for message in messages:\n", - " if hasattr(message, 'tool_calls') and message.tool_calls:\n", - " for tool_call in message.tool_calls:\n", - " # Handle both dictionary and object formats\n", - " if isinstance(tool_call, dict):\n", - " tool_calls_found.append(tool_call['name'])\n", - " else:\n", - " # ToolCall object - use attribute access\n", - " tool_calls_found.append(tool_call.name)\n", - "\n", - "\n", - " return {\n", - " \"prediction\": result['messages'][-1].content[0]['text'],\n", - " \"output\": result,\n", - " \"tool_messages\": [tool_message],\n", - " # \"tool_calls\": tool_calls_found,\n", - " \"tool_called\": _convert_to_tool_call_list(extract_tool_calls_from_agent_output(result))\n", - " }\n", - " except Exception as e:\n", - " # Return a fallback response if the agent fails\n", - " error_message = f\"\"\"I apologize, but I encountered an error while processing your banking request: {str(e)}.\n", - " Please try rephrasing your question or contact support if the issue persists.\"\"\"\n", - " return {\n", - " \"prediction\": error_message, \n", - " \"output\": {\n", - " \"messages\": [HumanMessage(content=input[\"input\"]), SystemMessage(content=error_message)],\n", - " \"error\": str(e)\n", - " }\n", - " }" - ] - }, - { - "cell_type": "markdown", - "id": "4bdc90d6", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Initialize the ValidMind model object\n", - "\n", - "We'll also need to register the banking agent as a ValidMind model object (`vm_model`) that can be passed to other functions for analysis and tests on the data.\n", - "\n", - "You simply initialize this model object with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model) that:\n", - "\n", - "- Associates the wrapper function with the model for prediction\n", - "- Stores the system prompt template for documentation\n", - "- Provides a unique `input_id` for tracking and identification\n", - "- Enables the agent to be used with ValidMind's testing and documentation features" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60a2ce7a", - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize the agent as a model\n", - "vm_banking_model = vm.init_model(\n", - " input_id=\"banking_agent_model\",\n", - " predict_fn=banking_agent_fn,\n", - " prompt=Prompt(template=system_context)\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "33ed446a", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Store the agent reference\n", - "\n", - "We'll also store a reference to the original banking agent object in the ValidMind model. This allows us to access the full agent functionality directly if needed, while still maintaining the wrapper function interface for ValidMind's testing framework." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2c653471", - "metadata": {}, - "outputs": [], - "source": [ - "# Add the banking agent to the vm model\n", - "vm_banking_model.model = banking_agent" - ] - }, - { - "cell_type": "markdown", - "id": "bf44ea16", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Verify integration\n", - "\n", - "Let's confirm that the banking agent has been successfully integrated with ValidMind:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8e101b0f", - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Banking Agent Successfully Integrated with ValidMind!\")\n", - "print(f\"Model ID: {vm_banking_model.input_id}\")" - ] - }, - { - "cell_type": "markdown", - "id": "0c80518d", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Validate the system prompt\n", - "\n", - "Let's get an initial sense of how well our defined system prompt meets a few best practices for prompt engineering by running a few tests — we'll run evaluation tests later on our agent's performance.\n", - "\n", - "You run individual tests by calling [the `run_test` function](https://docs.validmind.ai/validmind/validmind/tests.html#run_test) provided by the `validmind.tests` module. Passing in our agentic model as an input, the tests below rate the prompt on a scale of 1-10 against the following criteria:\n", - "\n", - "- **[Clarity](https://docs.validmind.ai/tests/prompt_validation/Clarity.html)** — How clearly the prompt states the task.\n", - "- **[Conciseness](https://docs.validmind.ai/tests/prompt_validation/Conciseness.html)** — How succinctly the prompt states the task.\n", - "- **[Delimitation](https://docs.validmind.ai/tests/prompt_validation/Delimitation.html)** — When using complex prompts containing examples, contextual information, or other elements, is the prompt formatted in such a way that each element is clearly separated?\n", - "- **[NegativeInstruction](https://docs.validmind.ai/tests/prompt_validation/NegativeInstruction.html)** — Whether the prompt contains negative instructions.\n", - "- **[Specificity](https://docs.validmind.ai/tests/prompt_validation/NegativeInstruction.html)** — How specific the prompt defines the task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f52dceb1", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.prompt_validation.Clarity\",\n", - " inputs={\n", - " \"model\": vm_banking_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70d52333", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.prompt_validation.Conciseness\",\n", - " inputs={\n", - " \"model\": vm_banking_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5aa89976", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.prompt_validation.Delimitation\",\n", - " inputs={\n", - " \"model\": vm_banking_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8630197e", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.prompt_validation.NegativeInstruction\",\n", - " inputs={\n", - " \"model\": vm_banking_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bba99915", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.prompt_validation.Specificity\",\n", - " inputs={\n", - " \"model\": vm_banking_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "af4d6d77", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Initialize the ValidMind datasets\n", - "\n", - "After validation our system prompt, let's import our sample dataset ([banking_test_dataset.py](banking_test_dataset.py)), which we'll use in the next section to evaluate our agent's performance across different banking scenarios:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c70ca2c", - "metadata": {}, - "outputs": [], - "source": [ - "from banking_test_dataset import banking_test_dataset" - ] - }, - { - "cell_type": "markdown", - "id": "0268ce6e", - "metadata": {}, - "source": [ - "The next step is to connect your data with a ValidMind `Dataset` object. **This step is always necessary every time you want to connect a dataset to documentation and produce test results through ValidMind,** but you only need to do it once per dataset.\n", - "\n", - "Initialize a ValidMind dataset object using the [`init_dataset` function](https://docs.validmind.ai/validmind/validmind.html#init_dataset) from the ValidMind (`vm`) module. For this example, we'll pass in the following arguments:\n", - "\n", - "- **`input_id`** — A unique identifier that allows tracking what inputs are used when running each individual test.\n", - "- **`dataset`** — The raw dataset that you want to provide as input to tests.\n", - "- **`text_column`** — The name of the column containing the text input data.\n", - "- **`target_column`** — A required argument if tests require access to true values. This is the name of the target column in the dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7e9d158", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset = vm.init_dataset(\n", - " input_id=\"banking_test_dataset\",\n", - " dataset=banking_test_dataset,\n", - " text_column=\"input\",\n", - " target_column=\"possible_outputs\",\n", - ")\n", - "\n", - "print(\"Banking Test Dataset Initialized in ValidMind!\")\n", - "print(f\"Dataset ID: {vm_test_dataset.input_id}\")\n", - "print(f\"Dataset columns: {vm_test_dataset._df.columns}\")\n", - "vm_test_dataset._df" - ] - }, - { - "cell_type": "markdown", - "id": "b9143fb6", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign predictions\n", - "\n", - "Now that both the model object and the datasets have been registered, we'll assign predictions to capture the banking agent's responses for evaluation:\n", - "\n", - "- The [`assign_predictions()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#assign_predictions) from the `Dataset` object can link existing predictions to any number of models.\n", - "- This method links the model's class prediction values and probabilities to our `vm_train_ds` and `vm_test_ds` datasets.\n", - "\n", - "If no prediction values are passed, the method will compute predictions automatically:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1d462663", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.assign_predictions(vm_banking_model)\n", - "\n", - "print(\"Banking Agent Predictions Generated Successfully!\")\n", - "print(f\"Predictions assigned to {len(vm_test_dataset._df)} test cases\")\n", - "vm_test_dataset._df.head()" - ] - }, - { - "cell_type": "markdown", - "id": "8e50467e", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Running accuracy tests\n", - "\n", - "Using [`@vm.test`](https://docs.validmind.ai/validmind/validmind.html#test), let's implement some reusable custom *inline tests* to assess the accuracy of our banking agent:\n", - "\n", - "- An inline test refers to a test written and executed within the same environment as the code being tested — in this case, right in this Jupyter Notebook — without requiring a separate test file or framework.\n", - "- You'll note that the custom test functions are just regular Python functions that can include and require any Python library as you see fit." - ] - }, - { - "cell_type": "markdown", - "id": "6d8a9b90", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Response accuracy test\n", - "\n", - "We'll create a custom test that evaluates the banking agent's ability to provide accurate responses by:\n", - "\n", - "- Testing against a dataset of predefined banking questions and expected answers.\n", - "- Checking if responses contain expected keywords and banking terminology.\n", - "- Providing detailed test results including pass/fail status.\n", - "- Helping identify any gaps in the agent's banking knowledge or response quality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "90232066", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "@vm.test(\"my_custom_tests.banking_accuracy_test\")\n", - "def banking_accuracy_test(model, dataset, list_of_columns):\n", - " \"\"\"\n", - " The Banking Accuracy Test evaluates whether the agent’s responses include \n", - " critical domain-specific keywords and phrases that indicate accurate, compliant,\n", - " and contextually appropriate banking information. This test ensures that the agent\n", - " provides responses containing the expected banking terminology, risk classifications,\n", - " account details, or other domain-relevant information required for regulatory compliance,\n", - " customer safety, and operational accuracy.\n", - " \"\"\"\n", - " df = dataset._df\n", - " \n", - " # Pre-compute responses for all tests\n", - " y_true = dataset.y.tolist()\n", - " y_pred = dataset.y_pred(model).tolist()\n", - "\n", - " # Vectorized test results\n", - " test_results = []\n", - " for response, keywords in zip(y_pred, y_true):\n", - " # Convert keywords to list if not already a list\n", - " if not isinstance(keywords, list):\n", - " keywords = [keywords]\n", - " test_results.append(any(str(keyword).lower() in str(response).lower() for keyword in keywords))\n", - " \n", - " results = pd.DataFrame()\n", - " column_names = [col + \"_details\" for col in list_of_columns]\n", - " results[column_names] = df[list_of_columns]\n", - " results[\"actual\"] = y_pred\n", - " results[\"expected\"] = y_true\n", - " results[\"passed\"] = test_results\n", - " results[\"error\"] = None if test_results else f'Response did not contain any expected keywords: {y_true}'\n", - " \n", - " return results" - ] - }, - { - "cell_type": "markdown", - "id": "7eed5265", - "metadata": {}, - "source": [ - "Now that we've defined our custom response accuracy test, we can run the test using the same `run_test()` function we used earlier to validate the system prompt using our sample dataset and agentic model as input, and log the test results to the ValidMind Platform with the [`log()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#log):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e68884d5", - "metadata": {}, - "outputs": [], - "source": [ - "result = vm.tests.run_test(\n", - " \"my_custom_tests.banking_accuracy_test\",\n", - " inputs={\n", - " \"dataset\": vm_test_dataset,\n", - " \"model\": vm_banking_model\n", - " },\n", - " params={\n", - " \"list_of_columns\": [\"input\"]\n", - " }\n", - ")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "id": "4d758ddf", - "metadata": {}, - "source": [ - "Let's review the first five rows of the test dataset to inspect the results to see how well the banking agent performed. Each column in the output serves a specific purpose in evaluating agent performance:\n", - "\n", - "| Column header | Description | Importance |\n", - "|--------------|-------------|------------|\n", - "| **`input`** | Original user query or request | Essential for understanding the context of each test case and tracing which inputs led to specific agent behaviors. |\n", - "| **`expected_tools`** | Banking tools that should be invoked for this request | Enables validation of correct tool selection, which is critical for agentic AI systems where choosing the right tool is a key success metric. |\n", - "| **`expected_output`** | Expected output or keywords that should appear in the response | Defines the success criteria for each test case, enabling objective evaluation of whether the agent produced the correct result. |\n", - "| **`session_id`** | Unique identifier for each test session | Allows tracking and correlation of related test runs, debugging specific sessions, and maintaining audit trails. |\n", - "| **`category`** | Classification of the request type | Helps organize test results by domain and identify performance patterns across different banking use cases. |\n", - "| **`banking_agent_model_output`** | Complete agent response including all messages and reasoning | Allows you to examine the full output to assess response quality, completeness, and correctness beyond just keyword matching. |\n", - "| **`banking_agent_model_tool_messages`** | Messages exchanged with the banking tools | Critical for understanding how the agent interacted with tools, what parameters were passed, and what tool outputs were received. |\n", - "| **`banking_agent_model_tool_called`** | Specific tool that was invoked | Enables validation that the agent selected the correct tool for each request, which is fundamental to agentic AI validation. |\n", - "| **`possible_outputs`** | Alternative valid outputs or keywords that could appear in the response | Provides flexibility in evaluation by accounting for multiple acceptable response formats or variations. |" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78f7edb1", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.df.head(5)" - ] - }, - { - "cell_type": "markdown", - "id": "6f233bef", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Tool selection accuracy test\n", - "\n", - "We'll also create a custom test that evaluates the banking agent's ability to select the correct tools for different requests by:\n", - "\n", - "- Testing against a dataset of predefined banking queries with expected tool selections.\n", - "- Comparing the tools actually invoked by the agent against the expected tools for each request.\n", - "- Providing quantitative accuracy scores that measure the proportion of expected tools correctly selected.\n", - "- Helping identify gaps in the agent's understanding of user needs and tool selection logic." - ] - }, - { - "cell_type": "markdown", - "id": "d0b46111", - "metadata": {}, - "source": [ - "First, we'll define a helper function that extracts tool calls from the agent's messages and compares them against the expected tools. This function handles different message formats (dictionary or object) and calculates accuracy scores:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e68798be", - "metadata": {}, - "outputs": [], - "source": [ - "def validate_tool_calls_simple(messages, expected_tools):\n", - " \"\"\"Simple validation of tool calls without RAGAS dependency issues.\"\"\"\n", - " \n", - " tool_calls_found = []\n", - " \n", - " for message in messages:\n", - " if hasattr(message, 'tool_calls') and message.tool_calls:\n", - " for tool_call in message.tool_calls:\n", - " # Handle both dictionary and object formats\n", - " if isinstance(tool_call, dict):\n", - " tool_calls_found.append(tool_call['name'])\n", - " else:\n", - " # ToolCall object - use attribute access\n", - " tool_calls_found.append(tool_call.name)\n", - " \n", - " # Check if expected tools were called\n", - " accuracy = 0.0\n", - " matches = 0\n", - " if expected_tools:\n", - " matches = sum(1 for tool in expected_tools if tool in tool_calls_found)\n", - " accuracy = matches / len(expected_tools)\n", - " \n", - " return {\n", - " 'expected_tools': expected_tools,\n", - " 'found_tools': tool_calls_found,\n", - " 'matches': matches,\n", - " 'total_expected': len(expected_tools) if expected_tools else 0,\n", - " 'accuracy': accuracy,\n", - " }" - ] - }, - { - "cell_type": "markdown", - "id": "1b45472c", - "metadata": {}, - "source": [ - "Now we'll define the main test function that uses the helper function to evaluate tool selection accuracy across all test cases in the dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "604d7313", - "metadata": {}, - "outputs": [], - "source": [ - "@vm.test(\"my_custom_tests.BankingToolCallAccuracy\")\n", - "def BankingToolCallAccuracy(dataset, agent_output_column, expected_tools_column):\n", - " \"\"\"\n", - " Evaluates the tool selection accuracy of a LangGraph-powered banking agent.\n", - "\n", - " This test measures whether the agent correctly identifies and invokes the required banking tools\n", - " for each user query scenario.\n", - " For each case, the outputs generated by the agent (including its tool calls) are compared against an\n", - " expected set of tools. The test considers both coverage and exactness: it computes the proportion of\n", - " expected tools correctly called by the agent for each instance.\n", - "\n", - " Parameters:\n", - " dataset (VMDataset): The dataset containing user queries, agent outputs, and ground-truth tool expectations.\n", - " agent_output_column (str): Dataset column name containing agent outputs (should include tool call details in 'messages').\n", - " expected_tools_column (str): Dataset column specifying the true expected tools (as lists).\n", - "\n", - " Returns:\n", - " List[dict]: Per-row dictionaries with details: expected tools, found tools, match count, total expected, and accuracy score.\n", - "\n", - " Purpose:\n", - " Provides diagnostic evidence of the banking agent's core reasoning ability—specifically, its capacity to\n", - " interpret user needs and select the correct banking actions. Useful for diagnosing gaps in tool coverage,\n", - " misclassifications, or breakdowns in agent logic.\n", - "\n", - " Interpretation:\n", - " - An accuracy of 1.0 signals perfect tool selection for that example.\n", - " - Lower scores may indicate partial or complete failures to invoke required tools.\n", - " - Review 'found_tools' vs. 'expected_tools' to understand the source of discrepancies.\n", - "\n", - " Strengths:\n", - " - Directly tests a core capability of compositional tool-use agents.\n", - " - Framework-agnostic; robust to tool call output format (object or dict).\n", - " - Supports batch validation and result logging for systematic documentation.\n", - "\n", - " Limitations:\n", - " - Does not penalize extra, unnecessary tool calls.\n", - " - Does not assess result quality—only correct invocation.\n", - "\n", - " \"\"\"\n", - " df = dataset._df\n", - " \n", - " results = []\n", - " for i, row in df.iterrows():\n", - " result = validate_tool_calls_simple(row[agent_output_column]['messages'], row[expected_tools_column])\n", - " results.append(result)\n", - " \n", - " return results" - ] - }, - { - "cell_type": "markdown", - "id": "d594c973", - "metadata": {}, - "source": [ - "Finally, we can call our function with `run_test()` and log the test results to the ValidMind Platform:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dd14115e", - "metadata": {}, - "outputs": [], - "source": [ - "result = vm.tests.run_test(\n", - " \"my_custom_tests.BankingToolCallAccuracy\",\n", - " inputs={\n", - " \"dataset\": vm_test_dataset,\n", - " },\n", - " params={\n", - " \"agent_output_column\": \"banking_agent_model_output\",\n", - " \"expected_tools_column\": \"expected_tools\"\n", - " }\n", - ")\n", - "result.log()" - ] - }, - { - "cell_type": "markdown", - "id": "f78f4107", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Assigning AI evaluation metric scores\n", - "\n", - "*AI agent evaluation metrics* are specialized measurements designed to assess how well autonomous LLM-based agents reason, plan, select and execute tools, and ultimately complete user tasks by analyzing the *full execution trace* — including reasoning steps, tool calls, intermediate decisions, and outcomes, rather than just single input–output pairs. These metrics are essential because agent failures often occur in ways traditional LLM metrics miss — for example, choosing the right tool with wrong arguments, creating a good plan but not following it, or completing a task inefficiently.\n", - "\n", - "In this section, we'll evaluate our banking agent's outputs and add scoring to our sample dataset against metrics defined in [DeepEval’s AI agent evaluation framework](https://deepeval.com/guides/guides-ai-agent-evaluation-metrics) which breaks down AI agent evaluation into three layers with corresponding subcategories: **reasoning**, **action**, and **execution**.\n", - "\n", - "Together, these three metrics enable granular diagnosis of agent behavior, help pinpoint where failures occur (reasoning, action, or execution), and support both development benchmarking and production monitoring." - ] - }, - { - "cell_type": "markdown", - "id": "3a9c853a", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Identify relevant DeepEval scorers\n", - "\n", - "*Scorers* are evaluation metrics that analyze model outputs and store their results in the dataset:\n", - "\n", - "- Each scorer adds a new column to the dataset with format: `{scorer_name}_{metric_name}`\n", - "- The column contains the numeric score (typically `0`-`1`) for each example\n", - "- Multiple scorers can be run on the same dataset, each adding their own column\n", - "- Scores are persisted in the dataset for later analysis and visualization\n", - "- Common scorer patterns include:\n", - " - Model performance metrics (accuracy, F1, etc.)\n", - " - Output quality metrics (relevance, faithfulness)\n", - " - Task-specific metrics (completion, correctness)\n", - "\n", - "Use `list_scorers()` from [`validmind.scorers`](https://docs.validmind.ai/validmind/validmind/tests.html#scorer) to discover all available scoring methods and their IDs that can be used with `assign_scores()`. We'll filter these results to return only DeepEval scorers for our desired three metrics in a formatted table with descriptions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "730c70ec", - "metadata": {}, - "outputs": [], - "source": [ - "# Load all DeepEval scorers\n", - "llm_scorers_dict = vm.tests.load._load_tests([s for s in vm.scorer.list_scorers() if \"deepeval\" in s.lower()])\n", - "\n", - "# Categorize scorers by metric layer\n", - "reasoning_scorers = {}\n", - "action_scorers = {}\n", - "execution_scorers = {}\n", - "\n", - "for scorer_id, scorer_func in llm_scorers_dict.items():\n", - " tags = getattr(scorer_func, \"__tags__\", [])\n", - " scorer_name = scorer_id.split(\".\")[-1]\n", - "\n", - " if \"reasoning_layer\" in tags:\n", - " reasoning_scorers[scorer_id] = scorer_func\n", - " elif \"action_layer\" in tags:\n", - " action_scorers[scorer_id] = scorer_func\n", - " elif \"TaskCompletion\" in scorer_name:\n", - " execution_scorers[scorer_id] = scorer_func\n", - "\n", - "# Display scorers by category\n", - "print(\"=\" * 80)\n", - "print(\"REASONING LAYER\")\n", - "print(\"=\" * 80)\n", - "if reasoning_scorers:\n", - " reasoning_df = vm.tests.load._pretty_list_tests(reasoning_scorers, truncate=True)\n", - " display(reasoning_df)\n", - "else:\n", - " print(\"No reasoning layer scorers found.\")\n", - "\n", - "print(\"\\n\" + \"=\" * 80)\n", - "print(\"ACTION LAYER\")\n", - "print(\"=\" * 80)\n", - "if action_scorers:\n", - " action_df = vm.tests.load._pretty_list_tests(action_scorers, truncate=True)\n", - " display(action_df)\n", - "else:\n", - " print(\"No action layer scorers found.\")\n", - "\n", - "print(\"\\n\" + \"=\" * 80)\n", - "print(\"EXECUTION LAYER\")\n", - "print(\"=\" * 80)\n", - "if execution_scorers:\n", - " execution_df = vm.tests.load._pretty_list_tests(execution_scorers, truncate=True)\n", - " display(execution_df)\n", - "else:\n", - " print(\"No execution layer scorers found.\")" - ] - }, - { - "cell_type": "markdown", - "id": "4dd73d0d", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign reasoning scores\n", - "\n", - "*Reasoning* evaluates planning and strategy generation:\n", - "\n", - "- **Plan quality** – How logical, complete, and efficient the agent’s plan is.\n", - "- **Plan adherence** – Whether the agent follows its own plan during execution." - ] - }, - { - "cell_type": "markdown", - "id": "06ccae28", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Plan quality score\n", - "\n", - "Let's measure how well our banking agent generates a plan before acting. A high score means the plan is logical, complete, and efficient." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "52f362ba", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.assign_scores(\n", - " metrics = \"validmind.scorers.llm.deepeval.PlanQuality\",\n", - " input_column = \"input\",\n", - " actual_output_column = \"banking_agent_model_prediction\",\n", - " tools_called_column = \"banking_agent_model_tool_called\",\n", - " agent_output_column = \"banking_agent_model_output\",\n", - ")\n", - "vm_test_dataset._df.head()" - ] - }, - { - "cell_type": "markdown", - "id": "8dcdc88f", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Plan adherence score\n", - "\n", - "Let's check whether our banking agent follows the plan it created. Deviations lower this score and indicate gaps between reasoning and execution." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4124a7c2", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.assign_scores(\n", - " metrics = \"validmind.scorers.llm.deepeval.PlanAdherence\",\n", - " input_column = \"input\",\n", - " actual_output_column = \"banking_agent_model_prediction\",\n", - " expected_output_column = \"expected_output\",\n", - " tools_called_column = \"banking_agent_model_tool_called\",\n", - " agent_output_column = \"banking_agent_model_output\",\n", - "\n", - ")\n", - "vm_test_dataset._df.head()" - ] - }, - { - "cell_type": "markdown", - "id": "6da1ac95", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign action scores\n", - "\n", - "*Action* assesses tool usage and argument generation:\n", - "\n", - "- **Tool correctness** – Whether the agent selects and calls the right tools.\n", - "- **Argument correctness** – Whether the agent generates correct tool arguments." - ] - }, - { - "cell_type": "markdown", - "id": "d4db8270", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Tool correctness score\n", - "\n", - "Let's evaluate if our banking agent selects the appropriate tool for the task. Choosing the wrong tool reduces performance even if reasoning was correct." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d2e8a25", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.assign_scores(\n", - " metrics = \"validmind.scorers.llm.deepeval.ToolCorrectness\",\n", - " input_column = \"input\",\n", - " actual_output_column = \"banking_agent_model_prediction\",\n", - " tools_called_column = \"banking_agent_model_tool_called\",\n", - " expected_tools_column = \"expected_tools\",\n", - " agent_output_column = \"banking_agent_model_output\",\n", - "\n", - ")\n", - "vm_test_dataset._df.head()" - ] - }, - { - "cell_type": "markdown", - "id": "9aa50b05", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Argument correctness score\n", - "\n", - "Let's assesses whether our banking agent provides correct inputs or arguments to the selected tool. Incorrect arguments can lead to failed or unexpected results." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "04f90489", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.assign_scores(\n", - " metrics = \"validmind.scorers.llm.deepeval.ArgumentCorrectness\",\n", - " input_column = \"input\",\n", - " actual_output_column = \"banking_agent_model_prediction\",\n", - " tools_called_column = \"banking_agent_model_tool_called\",\n", - " agent_output_column = \"banking_agent_model_output\",\n", - "\n", - ")\n", - "vm_test_dataset._df.head()" - ] - }, - { - "cell_type": "markdown", - "id": "c59e5595", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign execution score\n", - "\n", - "*Execution* measures end-to-end performance:\n", - "\n", - "- **Task completion** – Whether the agent successfully completes the intended task.\n" - ] - }, - { - "cell_type": "markdown", - "id": "d64600ca", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Task completion score\n", - "\n", - "Let's evaluate whether our banking agent successfully completes the requested tasks. Incomplete task execution can lead to user dissatisfaction and failed banking operations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "05024f1f", - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_dataset.assign_scores(\n", - " metrics = \"validmind.scorers.llm.deepeval.TaskCompletion\",\n", - " input_column = \"input\",\n", - " actual_output_column = \"banking_agent_model_prediction\",\n", - " agent_output_column = \"banking_agent_model_output\",\n", - " tools_called_column = \"banking_agent_model_tool_called\",\n", - "\n", - ")\n", - "vm_test_dataset._df.head()" - ] - }, - { - "cell_type": "markdown", - "id": "21aa9b0d", - "metadata": {}, - "source": [ - "As you recall from the beginning of this section, when we run scorers through `assign_scores()`, the return values are automatically processed and added as new columns with the format `{scorer_name}_{metric_name}`. Note that the task completion scorer has added a new column `TaskCompletion_score` to our dataset.\n", - "\n", - "We'll use this column to visualize the distribution of task completion scores across our test cases through the [BoxPlot test](https://docs.validmind.ai/validmind/validmind/tests/plots/BoxPlot.html#boxplot):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f6d08ca", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.plots.BoxPlot\",\n", - " inputs={\"dataset\": vm_test_dataset},\n", - " params={\n", - " \"columns\": \"TaskCompletion_score\",\n", - " \"title\": \"Distribution of Task Completion Scores\",\n", - " \"ylabel\": \"Score\",\n", - " \"figsize\": (8, 6)\n", - " }\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "012bbcb8", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Running RAGAS tests\n", - "\n", - "Next, let's run some out-of-the-box *Retrieval-Augmented Generation Assessment* (RAGAS) tests available in the ValidMind Library. RAGAS provides specialized metrics for evaluating retrieval-augmented generation systems and conversational AI agents. These metrics analyze different aspects of agent performance by assessing how well systems integrate retrieved information with generated responses.\n", - "\n", - "Our banking agent uses tools to retrieve information and generates responses based on that context, making it similar to a RAG system. RAGAS metrics help evaluate the quality of this integration by analyzing the relationship between retrieved tool outputs, user queries, and generated responses.\n", - "\n", - "These tests provide insights into how well our banking agent integrates tool usage with conversational abilities, ensuring it provides accurate, relevant, and helpful responses to banking users while maintaining fidelity to retrieved information." - ] - }, - { - "cell_type": "markdown", - "id": "2036afba", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Identify relevant RAGAS tests\n", - "\n", - "Let's explore some of ValidMind's available tests. Using ValidMind’s repository of tests streamlines your development testing, and helps you ensure that your models are being documented and evaluated appropriately.\n", - "\n", - "You can pass `tasks` and `tags` as parameters to the [`vm.tests.list_tests()` function](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) to filter the tests based on the tags and task types:\n", - "\n", - "- **`tasks`** represent the kind of modeling task associated with a test. Here we'll focus on `text_qa` tasks.\n", - "- **`tags`** are free-form descriptions providing more details about the test, for example, what category the test falls into. Here we'll focus on the `ragas` tag.\n", - "\n", - "We'll then run three of these tests returned as examples below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0701f5a9", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.list_tests(task=\"text_qa\", tags=[\"ragas\"])" - ] - }, - { - "cell_type": "markdown", - "id": "c1741ffc", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Faithfulness\n", - "\n", - "Let's evaluate whether the banking agent's responses accurately reflect the information retrieved from tools. Unfaithful responses can misreport credit analysis, financial calculations, and compliance results—undermining user trust in the banking agent." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "92044533", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.model_validation.ragas.Faithfulness\",\n", - " inputs={\"dataset\": vm_test_dataset},\n", - " param_grid={\n", - " \"user_input_column\": [\"input\"],\n", - " \"response_column\": [\"banking_agent_model_prediction\"],\n", - " \"retrieved_contexts_column\": [\"banking_agent_model_tool_messages\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "42b71ccc", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Response Relevancy\n", - "\n", - "Let's evaluate whether the banking agent's answers address the user's original question or request. Irrelevant or off-topic responses can frustrate users and fail to deliver the banking information they need." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7483bc3", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.model_validation.ragas.ResponseRelevancy\",\n", - " inputs={\"dataset\": vm_test_dataset},\n", - " params={\n", - " \"user_input_column\": \"input\",\n", - " \"response_column\": \"banking_agent_model_prediction\",\n", - " \"retrieved_contexts_column\": \"banking_agent_model_tool_messages\",\n", - " }\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "4f4d0569", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Context Recall\n", - "\n", - "Let's evaluate how well the banking agent uses the information retrieved from tools when generating its responses. Poor context recall can lead to incomplete or underinformed answers even when the right tools were selected." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e5dc00ce", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.model_validation.ragas.ContextRecall\",\n", - " inputs={\"dataset\": vm_test_dataset},\n", - " param_grid={\n", - " \"user_input_column\": [\"input\"],\n", - " \"retrieved_contexts_column\": [\"banking_agent_model_tool_messages\"],\n", - " \"reference_column\": [\"banking_agent_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "b987b00e", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Running safety tests\n", - "\n", - "Finally, let's run some out-of-the-box *safety* tests available in the ValidMind Library. Safety tests provide specialized metrics for evaluating whether AI agents operate reliably and securely. These metrics analyze different aspects of agent behavior by assessing adherence to safety guidelines, consistency of outputs, and resistance to harmful or inappropriate requests.\n", - "\n", - "Our banking agent handles sensitive financial information and user requests, making safety and reliability essential. Safety tests help evaluate whether the agent maintains appropriate boundaries, responds consistently and correctly to inputs, and avoids generating harmful, biased, or unprofessional content.\n", - "\n", - "These tests provide insights into how well our banking agent upholds standards of fairness and professionalism, ensuring it operates reliably and securely for banking users." - ] - }, - { - "cell_type": "markdown", - "id": "a754cca3", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### AspectCritic\n", - "\n", - "Let's evaluate our banking agent's responses across multiple quality dimensions — conciseness, coherence, correctness, harmfulness, and maliciousness. Weak performance on these dimensions can degrade user experience, fall short of professional banking standards, or introduce safety risks. \n", - "\n", - "We'll use the `AspectCritic` we identified earlier:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "148daa2b", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.model_validation.ragas.AspectCritic\",\n", - " inputs={\"dataset\": vm_test_dataset},\n", - " param_grid={\n", - " \"user_input_column\": [\"input\"],\n", - " \"response_column\": [\"banking_agent_model_prediction\"],\n", - " \"retrieved_contexts_column\": [\"banking_agent_model_tool_messages\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "92e5b1f6", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Bias\n", - "\n", - "Let's evaluate whether our banking agent's prompts contain unintended biases that could affect banking decisions. Biased prompts can lead to unfair or discriminatory outcomes — undermining customer trust and exposing the institution to compliance risk.\n", - "\n", - "We'll first use `list_tests()` again to filter for tests relating to `prompt_validation`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74eba86c", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.list_tests(filter=\"prompt_validation\")" - ] - }, - { - "cell_type": "markdown", - "id": "bcc66b65", - "metadata": {}, - "source": [ - "And then run the identified `Bias` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "062cf8e7", - "metadata": {}, - "outputs": [], - "source": [ - "vm.tests.run_test(\n", - " \"validmind.prompt_validation.Bias\",\n", - " inputs={\n", - " \"model\": vm_banking_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "a2832750", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Next steps\n", - "\n", - "You can look at the output produced by the ValidMind Library right in the notebook where you ran the code, as you would expect. But there is a better way — use the ValidMind Platform to work with your model documentation." - ] - }, - { - "cell_type": "markdown", - "id": "a8cb1a58", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Work with your model documentation\n", - "\n", - "1. From the **Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", - "\n", - "2. In the left sidebar that appears for your model, click **Documentation** under Documents.\n", - "\n", - " What you see is the full draft of your model documentation in a more easily consumable version. From here, you can make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. [Learn more ...](https://docs.validmind.ai/guide/working-with-model-documentation.html)\n", - "\n", - "3. Click into any section related to the tests we ran in this notebook, for example: **4.3. Prompt Evaluation** to review the results of the tests we logged." - ] - }, - { - "cell_type": "markdown", - "id": "94ef26be", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Customize the banking agent for your use case\n", - "\n", - "You've now built an agentic AI system designed for banking use cases that supports compliance with supervisory guidance such as SR 11-7 and SS1/23, covering credit and fraud risk assessment for both retail and commercial banking. Extend this example agent to real-world banking scenarios and production deployment by:\n", - "\n", - "- Adapting the banking tools to your organization's specific requirements\n", - "- Adding more banking scenarios and edge cases to your test set\n", - "- Connecting the agent to your banking systems and databases\n", - "- Implementing additional banking-specific tools and workflows" - ] - }, - { - "cell_type": "markdown", - "id": "a681e49c", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Discover more learning resources\n", - "\n", - "Learn more about the ValidMind Library tools we used in this notebook:\n", - "\n", - "- [Custom prompts](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/customize_test_result_descriptions.html)\n", - "- [Custom tests](https://docs.validmind.ai/notebooks/how_to/tests/custom_tests/implement_custom_tests.html)\n", - "- [ValidMind scorers](https://docs.validmind.ai/notebooks/how_to/scoring/assign_scores_complete_tutorial.html)\n", - "\n", - "We also offer many more interactive notebooks to help you document models:\n", - "\n", - "- [Run tests & test suites](https://docs.validmind.ai/guide/testing-overview.html)\n", - "- [Code samples](https://docs.validmind.ai/guide/samples-jupyter-notebooks.html)\n", - "\n", - "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." - ] - }, - { - "cell_type": "markdown", - "id": "707c1b6e", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Upgrade ValidMind\n", - "\n", - "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", - "\n", - "Retrieve the information for the currently installed version of ValidMind:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9733adff", - "metadata": {}, - "outputs": [], - "source": [ - "%pip show validmind" - ] - }, - { - "cell_type": "markdown", - "id": "e4b0b646", - "metadata": {}, - "source": [ - "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", - "\n", - "```bash\n", - "%pip install --upgrade validmind\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "387fa7f1", - "metadata": {}, - "source": [ - "You may need to restart your kernel after running the upgrade package for changes to be applied." - ] - }, - { - "cell_type": "markdown", - "id": "copyright-de4baf0f42ba4a37946d52586dff1049", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "validmind-1QuffXMV-py3.11", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "cells": [ + { + "cell_type": "markdown", + "id": "e7277c38", + "metadata": {}, + "source": [ + "# Document an agentic AI system\n", + "\n", + "Build and document an agentic AI system with the ValidMind Library. Construct a LangGraph-based banking agent, assign AI evaluation metric scores to your agent, and run accuracy, RAGAS, and safety tests, then log those test results to the ValidMind Platform.\n", + "\n", + "An _AI agent_ is an autonomous system that interprets inputs, selects from available tools or actions, and executes multi-step behaviors to achieve defined goals. In this notebook, the agent acts as a banking assistant that analyzes user requests and automatically selects and invokes the appropriate specialized banking tool to deliver accurate, compliant, and actionable responses.\n", + "\n", + "- This agent enables financial institutions to automate complex banking workflows where different customer requests require different specialized tools and knowledge bases.\n", + "- Effective validation of agentic AI systems reduces the risks of agents misinterpreting inputs, failing to extract required parameters, or producing incorrect assessments or actions — such as selecting the wrong tool.\n", + "\n", + "
For the LLM components in this notebook to function properly, you'll need access to OpenAI.\n", + "

\n", + "Before you continue, ensure that a valid OPENAI_API_KEY is set in your .env file.
" + ] + }, + { + "cell_type": "markdown", + "id": "a47dd942", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1__) \n", + " - [Before you begin](#toc1_1__) \n", + " - [New to ValidMind?](#toc1_2__) \n", + " - [Key concepts](#toc1_3__) \n", + "- [Setting up](#toc2__) \n", + " - [Install the ValidMind Library](#toc2_1__) \n", + " - [Initialize the ValidMind Library](#toc2_2__) \n", + " - [Register sample model](#toc2_2_1__) \n", + " - [Apply documentation template](#toc2_2_2__) \n", + " - [Get your code snippet](#toc2_2_3__) \n", + " - [Preview the documentation template](#toc2_2_4__) \n", + " - [Verify OpenAI API access](#toc2_3__) \n", + " - [Initialize the Python environment](#toc2_4__) \n", + "- [Building the LangGraph agent](#toc3__) \n", + " - [Test available banking tools](#toc3_1__) \n", + " - [Create LangGraph banking agent](#toc3_2__) \n", + " - [Define system prompt](#toc3_2_1__) \n", + " - [Initialize the LLM](#toc3_2_2__) \n", + " - [Define agent state structure](#toc3_2_3__) \n", + " - [Create agent workflow function](#toc3_2_4__) \n", + " - [Instantiate the banking agent](#toc3_2_5__) \n", + " - [Integrate agent with ValidMind](#toc3_3__) \n", + " - [Import ValidMind components](#toc3_3_1__) \n", + " - [Create agent wrapper function](#toc3_3_2__) \n", + " - [Initialize the ValidMind model object](#toc3_3_3__) \n", + " - [Store the agent reference](#toc3_3_4__) \n", + " - [Verify integration](#toc3_3_5__) \n", + " - [Validate the system prompt](#toc3_4__) \n", + "- [Initialize the ValidMind datasets](#toc4__) \n", + " - [Assign predictions](#toc4_1__) \n", + "- [Running accuracy tests](#toc5__) \n", + " - [Response accuracy test](#toc5_1__) \n", + " - [Tool selection accuracy test](#toc5_2__) \n", + "- [Assigning AI evaluation metric scores](#toc6__) \n", + " - [Identify relevant DeepEval scorers](#toc6_1__) \n", + " - [Assign reasoning scores](#toc6_2__) \n", + " - [Plan quality score](#toc6_2_1__) \n", + " - [Plan adherence score](#toc6_2_2__) \n", + " - [Assign action scores](#toc6_3__) \n", + " - [Tool correctness score](#toc6_3_1__) \n", + " - [Argument correctness score](#toc6_3_2__) \n", + " - [Assign execution scores](#toc6_4__) \n", + " - [Task completion score](#toc6_4_1__) \n", + "- [Running RAGAS tests](#toc7__) \n", + " - [Identify relevant RAGAS tests](#toc7_1__) \n", + " - [Faithfulness](#toc7_1_1__) \n", + " - [Response Relevancy](#toc7_1_2__) \n", + " - [Context Recall](#toc7_1_3__) \n", + "- [Running safety tests](#toc8__) \n", + " - [AspectCritic](#toc8_1_1__) \n", + " - [Bias](#toc8_1_2__) \n", + "- [Next steps](#toc9__) \n", + " - [Work with your model documentation](#toc9_1__) \n", + " - [Customize the banking agent for your use case](#toc9_2__) \n", + " - [Discover more learning resources](#toc9_3__) \n", + "- [Upgrade ValidMind](#toc10__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "id": "ecaad35f", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models. \n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators." + ] + }, + { + "cell_type": "markdown", + "id": "6ff1f9ef", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html)." + ] + }, + { + "cell_type": "markdown", + "id": "d7ad8d8c", + "metadata": {}, + "source": [ + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
" + ] + }, + { + "cell_type": "markdown", + "id": "323caa59", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Metrics**: A subset of tests that do not have thresholds. In the context of this notebook, metrics and tests can be thought of as interchangeable concepts.\n", + "\n", + "**Custom metrics**: Custom metrics are functions that you define to evaluate your model or dataset. These functions can be registered with the ValidMind Library to be used in the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html))\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom metrics can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." + ] + }, + { + "cell_type": "markdown", + "id": "ddba5169", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "id": "b53da99c", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "
Recommended Python versions\n", + "

\n", + "Python 3.8 <= x <= 3.11
\n", + "\n", + "Let's begin by installing the ValidMind Library with large language model (LLM) support:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1982a118", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q \"validmind[llm]\" \"langgraph==0.3.21\"" + ] + }, + { + "cell_type": "markdown", + "id": "dc9dea3a", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "id": "5848461e", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook.\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "id": "97d0b04b", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Agentic AI`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "id": "b279d5fa", + "metadata": {}, + "source": [ + "
Can't select this template?\n", + "

\n", + "Your organization administrators may need to add it to your template library:\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "3606cb8c", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6ccbefc", + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "2ed79cf0", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Preview the documentation template\n", + "\n", + "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model.\n", + "\n", + "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dffdaa6f", + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "id": "b5c5ba68", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Verify OpenAI API access\n", + "\n", + "Verify that a valid `OPENAI_API_KEY` is set in your `.env` file:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22cc39cb", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables if using .env file\n", + "try:\n", + " from dotenv import load_dotenv\n", + " load_dotenv()\n", + "except ImportError:\n", + " print(\"dotenv not installed. Make sure OPENAI_API_KEY is set in your environment.\")" + ] + }, + { + "cell_type": "markdown", + "id": "e4a9d3a9", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the Python environment\n", + "\n", + "Let's import all the necessary libraries to prepare for building our banking LangGraph agentic system:\n", + "\n", + "- **Standard libraries** for data handling and environment management.\n", + "- **pandas**, a Python library for data manipulation and analytics, as an alias. We'll also configure pandas to show all columns and all rows at full width for easier debugging and inspection.\n", + "- **LangChain** components for LLM integration and tool management.\n", + "- **LangGraph** for building stateful, multi-step agent workflows.\n", + "- **Banking tools** for specialized financial services as defined in [banking_tools.py](banking_tools.py)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2058d1ac", + "metadata": {}, + "outputs": [], + "source": [ + "# STANDARD LIBRARY IMPORTS\n", + "\n", + "# TypedDict: Defines type-safe dictionaries for the agent's state structure\n", + "# Annotated: Adds metadata to type hints\n", + "# Sequence: Type hint for sequences used in the agent\n", + "from typing import TypedDict, Annotated, Sequence\n", + "\n", + "# THIRD PARTY IMPORTS\n", + "\n", + "import pandas as pd\n", + "# Configure pandas to show all columns and all rows at full width\n", + "pd.set_option('display.max_columns', None)\n", + "pd.set_option('display.max_colwidth', None)\n", + "pd.set_option('display.width', None)\n", + "pd.set_option('display.max_rows', None)\n", + "\n", + "# BaseMessage: Represents a base message in the LangChain message system\n", + "# HumanMessage: Represents a human message in the LangChain message system\n", + "# SystemMessage: Represents a system message in the LangChain message system\n", + "from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage\n", + "\n", + "# ChatOpenAI: Represents an OpenAI chat model in the LangChain library\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "# MemorySaver: Represents a checkpoint for saving and restoring agent state\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "\n", + "# StateGraph: Represents a stateful graph in the LangGraph library\n", + "# END: Represents the end of a graph\n", + "# START: Represents the start of a graph\n", + "from langgraph.graph import StateGraph, END, START\n", + "\n", + "# add_messages: Adds messages to the state\n", + "from langgraph.graph.message import add_messages\n", + "\n", + "# ToolNode: Represents a tool node in the LangGraph library\n", + "from langgraph.prebuilt import ToolNode\n", + "\n", + "# LOCAL IMPORTS FROM banking_tools.py\n", + "\n", + "from banking_tools import AVAILABLE_TOOLS" + ] + }, + { + "cell_type": "markdown", + "id": "e109d075", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Building the LangGraph agent" + ] + }, + { + "cell_type": "markdown", + "id": "15040411", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Test available banking tools\n", + "\n", + "We'll use the demo banking tools defined in `banking_tools.py` that provide use cases of financial services:\n", + "\n", + "- **Credit Risk Analyzer** - Loan applications and credit decisions\n", + "- **Customer Account Manager** - Account services and customer support\n", + "- **Fraud Detection System** - Security and fraud prevention" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e0a120c", + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"Available tools: {len(AVAILABLE_TOOLS)}\")\n", + "print(\"\\nTool Details:\")\n", + "for i, tool in enumerate(AVAILABLE_TOOLS, 1):\n", + " print(f\" - {tool.name}\")" + ] + }, + { + "cell_type": "markdown", + "id": "04d6785a", + "metadata": {}, + "source": [ + "Let's test each banking tool individually to ensure they're working correctly before integrating them into our agent:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc0caff2", + "metadata": {}, + "outputs": [], + "source": [ + "# Test 1: Credit Risk Analyzer\n", + "print(\"TEST 1: Credit Risk Analyzer\")\n", + "print(\"-\" * 40)\n", + "try:\n", + " # Access the underlying function using .func\n", + " credit_result = AVAILABLE_TOOLS[0].func(\n", + " customer_income=75000,\n", + " customer_debt=1200,\n", + " credit_score=720,\n", + " loan_amount=50000,\n", + " loan_type=\"personal\"\n", + " )\n", + " print(credit_result)\n", + " print(\"Credit Risk Analyzer test PASSED\")\n", + "except Exception as e:\n", + " print(f\"Credit Risk Analyzer test FAILED: {e}\")\n", + "\n", + "print(\"\" + \"=\" * 60)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6b227db", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Test 2: Customer Account Manager\n", + "print(\"TEST 2: Customer Account Manager\")\n", + "print(\"-\" * 40)\n", + "try:\n", + " # Test checking balance\n", + " account_result = AVAILABLE_TOOLS[1].func(\n", + " account_type=\"checking\",\n", + " customer_id=\"12345\",\n", + " action=\"check_balance\"\n", + " )\n", + " print(account_result)\n", + "\n", + " # Test getting account info\n", + " info_result = AVAILABLE_TOOLS[1].func(\n", + " account_type=\"all\",\n", + " customer_id=\"12345\", \n", + " action=\"get_info\"\n", + " )\n", + " print(info_result)\n", + " print(\"Customer Account Manager test PASSED\")\n", + "except Exception as e:\n", + " print(f\"Customer Account Manager test FAILED: {e}\")\n", + "\n", + "print(\"\" + \"=\" * 60)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a983b30d", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Test 3: Fraud Detection System\n", + "print(\"TEST 3: Fraud Detection System\")\n", + "print(\"-\" * 40)\n", + "try:\n", + " fraud_result = AVAILABLE_TOOLS[2].func(\n", + " transaction_id=\"TX123\",\n", + " customer_id=\"12345\",\n", + " transaction_amount=500.00,\n", + " transaction_type=\"withdrawal\",\n", + " location=\"Miami, FL\",\n", + " device_id=\"DEVICE_001\"\n", + " )\n", + " print(fraud_result)\n", + " print(\"Fraud Detection System test PASSED\")\n", + "except Exception as e:\n", + " print(f\"Fraud Detection System test FAILED: {e}\")\n", + "\n", + "print(\"\" + \"=\" * 60)" + ] + }, + { + "cell_type": "markdown", + "id": "6bf04845", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Create LangGraph banking agent\n", + "\n", + "With our tools ready to go, we'll create our intelligent banking agent with LangGraph that automatically selects and uses the appropriate banking tool based on a user request." + ] + }, + { + "cell_type": "markdown", + "id": "31df57f0", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Define system prompt\n", + "\n", + "We'll begin by defining our system prompt, which provides the LLM with context about its role as a banking assistant and guidance on when to use each available tool:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7971c427", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Enhanced banking system prompt with tool selection guidance\n", + "system_context = \"\"\"You are a professional banking AI assistant with access to specialized banking tools.\n", + " Analyze the user's banking request and directly use the most appropriate tools to help them.\n", + " \n", + " AVAILABLE BANKING TOOLS:\n", + " \n", + " credit_risk_analyzer - Analyze credit risk for loan applications and credit decisions\n", + " - Use for: loan applications, credit assessments, risk analysis, mortgage eligibility\n", + " - Examples: \"Analyze credit risk for $50k personal loan\", \"Assess mortgage eligibility for $300k home purchase\"\n", + " - Parameters: customer_income, customer_debt, credit_score, loan_amount, loan_type\n", + "\n", + " customer_account_manager - Manage customer accounts and provide banking services\n", + " - Use for: account information, transaction processing, product recommendations, customer service\n", + " - Examples: \"Check balance for checking account 12345\", \"Recommend products for customer with high balance\"\n", + " - Parameters: account_type, customer_id, action, amount, account_details\n", + "\n", + " fraud_detection_system - Analyze transactions for potential fraud and security risks\n", + " - Use for: transaction monitoring, fraud prevention, risk assessment, security alerts\n", + " - Examples: \"Analyze fraud risk for $500 ATM withdrawal in Miami\", \"Check security for $2000 online purchase\"\n", + " - Parameters: transaction_id, customer_id, transaction_amount, transaction_type, location, device_id\n", + "\n", + " BANKING INSTRUCTIONS:\n", + " - Analyze the user's banking request carefully and identify the primary need\n", + " - If they need credit analysis → use credit_risk_analyzer\n", + " - If they need financial calculations → use financial_calculator\n", + " - If they need account services → use customer_account_manager\n", + " - If they need security analysis → use fraud_detection_system\n", + " - Extract relevant parameters from the user's request\n", + " - Provide helpful, accurate banking responses based on tool outputs\n", + " - Always consider banking regulations, risk management, and best practices\n", + " - Be professional and thorough in your analysis\n", + "\n", + " Choose and use tools wisely to provide the most helpful banking assistance.\n", + " Describe the response in user friendly manner with details describing the tool output. \n", + " Provide the response in at least 500 words.\n", + " Generate a concise execution plan for the banking request.\n", + " \"\"\"" + ] + }, + { + "cell_type": "markdown", + "id": "406835c8", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the LLM\n", + "\n", + "Let's initialize the LLM that will power our banking agent:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "866066e7", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the main LLM for banking responses\n", + "main_llm = ChatOpenAI(\n", + " model=\"gpt-5-mini\",\n", + " reasoning={\n", + " \"effort\": \"low\",\n", + " \"summary\": \"auto\"\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "cce9685c", + "metadata": {}, + "source": [ + "Then bind the available banking tools to the LLM, enabling the model to automatically recognize and invoke each tool when appropriate based on request input and the system prompt we defined above:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "906d8132", + "metadata": {}, + "outputs": [], + "source": [ + "# Bind all banking tools to the main LLM\n", + "llm_with_tools = main_llm.bind_tools(AVAILABLE_TOOLS)" + ] + }, + { + "cell_type": "markdown", + "id": "2bad8799", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Define agent state structure\n", + "\n", + "The agent state defines the data structure that flows through the LangGraph workflow. It includes:\n", + "\n", + "- **messages** — The conversation history between the user and agent\n", + "- **user_input** — The current user request\n", + "- **session_id** — A unique identifier for the conversation session\n", + "- **context** — Additional context that can be passed between nodes\n", + "\n", + "Defining this state structure maintains the structure throughout the agent's execution and allows for multi-turn conversations with memory:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b926ddf", + "metadata": {}, + "outputs": [], + "source": [ + "# Banking Agent State Definition\n", + "class BankingAgentState(TypedDict):\n", + " messages: Annotated[Sequence[BaseMessage], add_messages]\n", + " user_input: str\n", + " session_id: str\n", + " context: dict" + ] + }, + { + "cell_type": "markdown", + "id": "47ce81b7", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Create agent workflow function\n", + "\n", + "We'll build the LangGraph agent workflow with two main components:\n", + "\n", + "1. **LLM node** — Processes user requests, applies the system prompt, and decides whether to use tools.\n", + "2. **Tools node** — Executes the selected banking tools when the LLM determines they're needed.\n", + "\n", + "The workflow begins with the LLM analyzing the request, then uses tools if needed — or ends if the response is complete, and finally returns to the LLM to generate the final response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c9bf585", + "metadata": {}, + "outputs": [], + "source": [ + "def create_banking_langgraph_agent():\n", + " \"\"\"Create a comprehensive LangGraph banking agent with intelligent tool selection.\"\"\"\n", + " def llm_node(state: BankingAgentState) -> BankingAgentState:\n", + " \"\"\"Main LLM node that processes banking requests and selects appropriate tools.\"\"\"\n", + " messages = state[\"messages\"]\n", + " # Add system context to messages\n", + " enhanced_messages = [SystemMessage(content=system_context)] + list(messages)\n", + " # Get LLM response with tool selection\n", + " response = llm_with_tools.invoke(enhanced_messages)\n", + " return {\n", + " **state,\n", + " \"messages\": messages + [response]\n", + " }\n", + " \n", + " def should_continue(state: BankingAgentState) -> str:\n", + " \"\"\"Decide whether to use tools or end the conversation.\"\"\"\n", + " last_message = state[\"messages\"][-1]\n", + " # Check if the LLM wants to use tools\n", + " if hasattr(last_message, 'tool_calls') and last_message.tool_calls:\n", + " return \"tools\"\n", + " return END\n", + " \n", + " # Create the banking state graph\n", + " workflow = StateGraph(BankingAgentState)\n", + " # Add nodes\n", + " workflow.add_node(\"llm\", llm_node)\n", + " workflow.add_node(\"tools\", ToolNode(AVAILABLE_TOOLS))\n", + " # Simplified entry point - go directly to LLM\n", + " workflow.add_edge(START, \"llm\")\n", + " # From LLM, decide whether to use tools or end\n", + " workflow.add_conditional_edges(\n", + " \"llm\",\n", + " should_continue,\n", + " {\"tools\": \"tools\", END: END}\n", + " )\n", + " # Tool execution flows back to LLM for final response\n", + " workflow.add_edge(\"tools\", \"llm\")\n", + " # Set up memory\n", + " memory = MemorySaver()\n", + " # Compile the graph\n", + " agent = workflow.compile(checkpointer=memory)\n", + " return agent" + ] + }, + { + "cell_type": "markdown", + "id": "3eb40287", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Instantiate the banking agent\n", + "\n", + "Now, we'll create an instance of the banking agent by calling the workflow creation function.\n", + "\n", + "This compiled agent is ready to process banking requests and will automatically select and use the appropriate tools based on user queries:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "455b8ee4", + "metadata": {}, + "outputs": [], + "source": [ + "# Create the banking intelligent agent\n", + "banking_agent = create_banking_langgraph_agent()\n", + "\n", + "print(\"Banking LangGraph Agent Created Successfully!\")\n", + "print(\"\\nFeatures:\")\n", + "print(\" - Intelligent banking tool selection\")\n", + "print(\" - Comprehensive banking system prompt\")\n", + "print(\" - Streamlined workflow: LLM → Tools → Response\")\n", + "print(\" - Automatic tool parameter extraction\")\n", + "print(\" - Professional banking assistance\")" + ] + }, + { + "cell_type": "markdown", + "id": "12691528", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Integrate agent with ValidMind\n", + "\n", + "To integrate our LangGraph banking agent with ValidMind, we need to create a wrapper function that ValidMind can use to invoke the agent and extract the necessary information for testing and documentation, allowing ValidMind to run validation tests on the agent's behavior, tool usage, and responses." + ] + }, + { + "cell_type": "markdown", + "id": "7b78509b", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Import ValidMind components\n", + "\n", + "We'll start with importing the necessary ValidMind components for integrating our agent:\n", + "\n", + "- `Prompt` from `validmind.models` for handling prompt-based model inputs\n", + "- `extract_tool_calls_from_agent_output` and `_convert_to_tool_call_list` from `validmind.scorers.llm.deepeval` for extracting and converting tool calls from agent outputs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9aeb8969", + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.models import Prompt\n", + "from validmind.scorers.llm.deepeval import extract_tool_calls_from_agent_output, _convert_to_tool_call_list" + ] + }, + { + "cell_type": "markdown", + "id": "f67f2955", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Create agent wrapper function\n", + "\n", + "We'll then create a wrapper function that:\n", + "\n", + "- Accepts input in ValidMind's expected format (with `input` and `session_id` fields)\n", + "- Invokes the banking agent with the proper state initialization\n", + "- Captures tool outputs and tool calls for evaluation\n", + "- Returns a standardized response format that includes the prediction, full output, tool messages, and tool call information\n", + "- Handles errors gracefully with fallback responses" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e4d5a82", + "metadata": {}, + "outputs": [], + "source": [ + "def banking_agent_fn(input):\n", + " \"\"\"\n", + " Invoke the banking agent with the given input.\n", + " \"\"\"\n", + " try:\n", + " # Initial state for banking agent\n", + " initial_state = {\n", + " \"user_input\": input[\"input\"],\n", + " \"messages\": [HumanMessage(content=input[\"input\"])],\n", + " \"session_id\": input[\"session_id\"],\n", + " \"context\": {}\n", + " }\n", + " session_config = {\"configurable\": {\"thread_id\": input[\"session_id\"]}}\n", + " result = banking_agent.invoke(initial_state, config=session_config)\n", + "\n", + " from utils import capture_tool_output_messages\n", + "\n", + " # Capture all tool outputs and metadata\n", + " captured_data = capture_tool_output_messages(result)\n", + " \n", + " # Access specific tool outputs, this will be used for RAGAS tests\n", + " tool_message = \"\"\n", + " for output in captured_data[\"tool_outputs\"]:\n", + " tool_message += output['content']\n", + " \n", + " tool_calls_found = []\n", + " messages = result['messages']\n", + " for message in messages:\n", + " if hasattr(message, 'tool_calls') and message.tool_calls:\n", + " for tool_call in message.tool_calls:\n", + " # Handle both dictionary and object formats\n", + " if isinstance(tool_call, dict):\n", + " tool_calls_found.append(tool_call['name'])\n", + " else:\n", + " # ToolCall object - use attribute access\n", + " tool_calls_found.append(tool_call.name)\n", + "\n", + "\n", + " return {\n", + " \"prediction\": result['messages'][-1].content[0]['text'],\n", + " \"output\": result,\n", + " \"tool_messages\": [tool_message],\n", + " # \"tool_calls\": tool_calls_found,\n", + " \"tool_called\": _convert_to_tool_call_list(extract_tool_calls_from_agent_output(result))\n", + " }\n", + " except Exception as e:\n", + " # Return a fallback response if the agent fails\n", + " error_message = f\"\"\"I apologize, but I encountered an error while processing your banking request: {str(e)}.\n", + " Please try rephrasing your question or contact support if the issue persists.\"\"\"\n", + " return {\n", + " \"prediction\": error_message, \n", + " \"output\": {\n", + " \"messages\": [HumanMessage(content=input[\"input\"]), SystemMessage(content=error_message)],\n", + " \"error\": str(e)\n", + " }\n", + " }" + ] + }, + { + "cell_type": "markdown", + "id": "4bdc90d6", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the ValidMind model object\n", + "\n", + "We'll also need to register the banking agent as a ValidMind model object (`vm_model`) that can be passed to other functions for analysis and tests on the data.\n", + "\n", + "You simply initialize this model object with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model) that:\n", + "\n", + "- Associates the wrapper function with the model for prediction\n", + "- Stores the system prompt template for documentation\n", + "- Provides a unique `input_id` for tracking and identification\n", + "- Enables the agent to be used with ValidMind's testing and documentation features" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60a2ce7a", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the agent as a model\n", + "vm_banking_model = vm.init_model(\n", + " input_id=\"banking_agent_model\",\n", + " predict_fn=banking_agent_fn,\n", + " prompt=Prompt(template=system_context)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "33ed446a", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Store the agent reference\n", + "\n", + "We'll also store a reference to the original banking agent object in the ValidMind model. This allows us to access the full agent functionality directly if needed, while still maintaining the wrapper function interface for ValidMind's testing framework." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c653471", + "metadata": {}, + "outputs": [], + "source": [ + "# Add the banking agent to the vm model\n", + "vm_banking_model.model = banking_agent" + ] + }, + { + "cell_type": "markdown", + "id": "bf44ea16", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Verify integration\n", + "\n", + "Let's confirm that the banking agent has been successfully integrated with ValidMind:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e101b0f", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Banking Agent Successfully Integrated with ValidMind!\")\n", + "print(f\"Model ID: {vm_banking_model.input_id}\")" + ] + }, + { + "cell_type": "markdown", + "id": "0c80518d", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Validate the system prompt\n", + "\n", + "Let's get an initial sense of how well our defined system prompt meets a few best practices for prompt engineering by running a few tests — we'll run evaluation tests later on our agent's performance.\n", + "\n", + "You run individual tests by calling [the `run_test` function](https://docs.validmind.ai/validmind/validmind/tests.html#run_test) provided by the `validmind.tests` module. Passing in our agentic model as an input, the tests below rate the prompt on a scale of 1-10 against the following criteria:\n", + "\n", + "- **[Clarity](https://docs.validmind.ai/tests/prompt_validation/Clarity.html)** — How clearly the prompt states the task.\n", + "- **[Conciseness](https://docs.validmind.ai/tests/prompt_validation/Conciseness.html)** — How succinctly the prompt states the task.\n", + "- **[Delimitation](https://docs.validmind.ai/tests/prompt_validation/Delimitation.html)** — When using complex prompts containing examples, contextual information, or other elements, is the prompt formatted in such a way that each element is clearly separated?\n", + "- **[NegativeInstruction](https://docs.validmind.ai/tests/prompt_validation/NegativeInstruction.html)** — Whether the prompt contains negative instructions.\n", + "- **[Specificity](https://docs.validmind.ai/tests/prompt_validation/NegativeInstruction.html)** — How specific the prompt defines the task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f52dceb1", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.prompt_validation.Clarity\",\n", + " inputs={\n", + " \"model\": vm_banking_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70d52333", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.prompt_validation.Conciseness\",\n", + " inputs={\n", + " \"model\": vm_banking_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5aa89976", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.prompt_validation.Delimitation\",\n", + " inputs={\n", + " \"model\": vm_banking_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8630197e", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.prompt_validation.NegativeInstruction\",\n", + " inputs={\n", + " \"model\": vm_banking_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bba99915", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.prompt_validation.Specificity\",\n", + " inputs={\n", + " \"model\": vm_banking_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "af4d6d77", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Initialize the ValidMind datasets\n", + "\n", + "After validation our system prompt, let's import our sample dataset ([banking_test_dataset.py](banking_test_dataset.py)), which we'll use in the next section to evaluate our agent's performance across different banking scenarios:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c70ca2c", + "metadata": {}, + "outputs": [], + "source": [ + "from banking_test_dataset import banking_test_dataset" + ] + }, + { + "cell_type": "markdown", + "id": "0268ce6e", + "metadata": {}, + "source": [ + "The next step is to connect your data with a ValidMind `Dataset` object. **This step is always necessary every time you want to connect a dataset to documentation and produce test results through ValidMind,** but you only need to do it once per dataset.\n", + "\n", + "Initialize a ValidMind dataset object using the [`init_dataset` function](https://docs.validmind.ai/validmind/validmind.html#init_dataset) from the ValidMind (`vm`) module. For this example, we'll pass in the following arguments:\n", + "\n", + "- **`input_id`** — A unique identifier that allows tracking what inputs are used when running each individual test.\n", + "- **`dataset`** — The raw dataset that you want to provide as input to tests.\n", + "- **`text_column`** — The name of the column containing the text input data.\n", + "- **`target_column`** — A required argument if tests require access to true values. This is the name of the target column in the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7e9d158", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset = vm.init_dataset(\n", + " input_id=\"banking_test_dataset\",\n", + " dataset=banking_test_dataset,\n", + " text_column=\"input\",\n", + " target_column=\"possible_outputs\",\n", + ")\n", + "\n", + "print(\"Banking Test Dataset Initialized in ValidMind!\")\n", + "print(f\"Dataset ID: {vm_test_dataset.input_id}\")\n", + "print(f\"Dataset columns: {vm_test_dataset._df.columns}\")\n", + "vm_test_dataset._df" + ] + }, + { + "cell_type": "markdown", + "id": "b9143fb6", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign predictions\n", + "\n", + "Now that both the model object and the datasets have been registered, we'll assign predictions to capture the banking agent's responses for evaluation:\n", + "\n", + "- The [`assign_predictions()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#assign_predictions) from the `Dataset` object can link existing predictions to any number of models.\n", + "- This method links the model's class prediction values and probabilities to our `vm_train_ds` and `vm_test_ds` datasets.\n", + "\n", + "If no prediction values are passed, the method will compute predictions automatically:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d462663", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.assign_predictions(vm_banking_model)\n", + "\n", + "print(\"Banking Agent Predictions Generated Successfully!\")\n", + "print(f\"Predictions assigned to {len(vm_test_dataset._df)} test cases\")\n", + "vm_test_dataset._df.head()" + ] + }, + { + "cell_type": "markdown", + "id": "8e50467e", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Running accuracy tests\n", + "\n", + "Using [`@vm.test`](https://docs.validmind.ai/validmind/validmind.html#test), let's implement some reusable custom *inline tests* to assess the accuracy of our banking agent:\n", + "\n", + "- An inline test refers to a test written and executed within the same environment as the code being tested — in this case, right in this Jupyter Notebook — without requiring a separate test file or framework.\n", + "- You'll note that the custom test functions are just regular Python functions that can include and require any Python library as you see fit." + ] + }, + { + "cell_type": "markdown", + "id": "6d8a9b90", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Response accuracy test\n", + "\n", + "We'll create a custom test that evaluates the banking agent's ability to provide accurate responses by:\n", + "\n", + "- Testing against a dataset of predefined banking questions and expected answers.\n", + "- Checking if responses contain expected keywords and banking terminology.\n", + "- Providing detailed test results including pass/fail status.\n", + "- Helping identify any gaps in the agent's banking knowledge or response quality." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90232066", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "@vm.test(\"my_custom_tests.banking_accuracy_test\")\n", + "def banking_accuracy_test(model, dataset, list_of_columns):\n", + " \"\"\"\n", + " The Banking Accuracy Test evaluates whether the agent’s responses include \n", + " critical domain-specific keywords and phrases that indicate accurate, compliant,\n", + " and contextually appropriate banking information. This test ensures that the agent\n", + " provides responses containing the expected banking terminology, risk classifications,\n", + " account details, or other domain-relevant information required for regulatory compliance,\n", + " customer safety, and operational accuracy.\n", + " \"\"\"\n", + " df = dataset._df\n", + " \n", + " # Pre-compute responses for all tests\n", + " y_true = dataset.y.tolist()\n", + " y_pred = dataset.y_pred(model).tolist()\n", + "\n", + " # Vectorized test results\n", + " test_results = []\n", + " for response, keywords in zip(y_pred, y_true):\n", + " # Convert keywords to list if not already a list\n", + " if not isinstance(keywords, list):\n", + " keywords = [keywords]\n", + " test_results.append(any(str(keyword).lower() in str(response).lower() for keyword in keywords))\n", + " \n", + " results = pd.DataFrame()\n", + " column_names = [col + \"_details\" for col in list_of_columns]\n", + " results[column_names] = df[list_of_columns]\n", + " results[\"actual\"] = y_pred\n", + " results[\"expected\"] = y_true\n", + " results[\"passed\"] = test_results\n", + " results[\"error\"] = None if test_results else f'Response did not contain any expected keywords: {y_true}'\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "id": "7eed5265", + "metadata": {}, + "source": [ + "Now that we've defined our custom response accuracy test, we can run the test using the same `run_test()` function we used earlier to validate the system prompt using our sample dataset and agentic model as input, and log the test results to the ValidMind Platform with the [`log()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#log):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e68884d5", + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " \"my_custom_tests.banking_accuracy_test\",\n", + " inputs={\n", + " \"dataset\": vm_test_dataset,\n", + " \"model\": vm_banking_model\n", + " },\n", + " params={\n", + " \"list_of_columns\": [\"input\"]\n", + " }\n", + ")\n", + "result.log()" + ] + }, + { + "cell_type": "markdown", + "id": "4d758ddf", + "metadata": {}, + "source": [ + "Let's review the first five rows of the test dataset to inspect the results to see how well the banking agent performed. Each column in the output serves a specific purpose in evaluating agent performance:\n", + "\n", + "| Column header | Description | Importance |\n", + "|--------------|-------------|------------|\n", + "| **`input`** | Original user query or request | Essential for understanding the context of each test case and tracing which inputs led to specific agent behaviors. |\n", + "| **`expected_tools`** | Banking tools that should be invoked for this request | Enables validation of correct tool selection, which is critical for agentic AI systems where choosing the right tool is a key success metric. |\n", + "| **`expected_output`** | Expected output or keywords that should appear in the response | Defines the success criteria for each test case, enabling objective evaluation of whether the agent produced the correct result. |\n", + "| **`session_id`** | Unique identifier for each test session | Allows tracking and correlation of related test runs, debugging specific sessions, and maintaining audit trails. |\n", + "| **`category`** | Classification of the request type | Helps organize test results by domain and identify performance patterns across different banking use cases. |\n", + "| **`banking_agent_model_output`** | Complete agent response including all messages and reasoning | Allows you to examine the full output to assess response quality, completeness, and correctness beyond just keyword matching. |\n", + "| **`banking_agent_model_tool_messages`** | Messages exchanged with the banking tools | Critical for understanding how the agent interacted with tools, what parameters were passed, and what tool outputs were received. |\n", + "| **`banking_agent_model_tool_called`** | Specific tool that was invoked | Enables validation that the agent selected the correct tool for each request, which is fundamental to agentic AI validation. |\n", + "| **`possible_outputs`** | Alternative valid outputs or keywords that could appear in the response | Provides flexibility in evaluation by accounting for multiple acceptable response formats or variations. |" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78f7edb1", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.df.head(5)" + ] + }, + { + "cell_type": "markdown", + "id": "6f233bef", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Tool selection accuracy test\n", + "\n", + "We'll also create a custom test that evaluates the banking agent's ability to select the correct tools for different requests by:\n", + "\n", + "- Testing against a dataset of predefined banking queries with expected tool selections.\n", + "- Comparing the tools actually invoked by the agent against the expected tools for each request.\n", + "- Providing quantitative accuracy scores that measure the proportion of expected tools correctly selected.\n", + "- Helping identify gaps in the agent's understanding of user needs and tool selection logic." + ] + }, + { + "cell_type": "markdown", + "id": "d0b46111", + "metadata": {}, + "source": [ + "First, we'll define a helper function that extracts tool calls from the agent's messages and compares them against the expected tools. This function handles different message formats (dictionary or object) and calculates accuracy scores:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e68798be", + "metadata": {}, + "outputs": [], + "source": [ + "def validate_tool_calls_simple(messages, expected_tools):\n", + " \"\"\"Simple validation of tool calls without RAGAS dependency issues.\"\"\"\n", + " \n", + " tool_calls_found = []\n", + " \n", + " for message in messages:\n", + " if hasattr(message, 'tool_calls') and message.tool_calls:\n", + " for tool_call in message.tool_calls:\n", + " # Handle both dictionary and object formats\n", + " if isinstance(tool_call, dict):\n", + " tool_calls_found.append(tool_call['name'])\n", + " else:\n", + " # ToolCall object - use attribute access\n", + " tool_calls_found.append(tool_call.name)\n", + " \n", + " # Check if expected tools were called\n", + " accuracy = 0.0\n", + " matches = 0\n", + " if expected_tools:\n", + " matches = sum(1 for tool in expected_tools if tool in tool_calls_found)\n", + " accuracy = matches / len(expected_tools)\n", + " \n", + " return {\n", + " 'expected_tools': expected_tools,\n", + " 'found_tools': tool_calls_found,\n", + " 'matches': matches,\n", + " 'total_expected': len(expected_tools) if expected_tools else 0,\n", + " 'accuracy': accuracy,\n", + " }" + ] + }, + { + "cell_type": "markdown", + "id": "1b45472c", + "metadata": {}, + "source": [ + "Now we'll define the main test function that uses the helper function to evaluate tool selection accuracy across all test cases in the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "604d7313", + "metadata": {}, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.BankingToolCallAccuracy\")\n", + "def BankingToolCallAccuracy(dataset, agent_output_column, expected_tools_column):\n", + " \"\"\"\n", + " Evaluates the tool selection accuracy of a LangGraph-powered banking agent.\n", + "\n", + " This test measures whether the agent correctly identifies and invokes the required banking tools\n", + " for each user query scenario.\n", + " For each case, the outputs generated by the agent (including its tool calls) are compared against an\n", + " expected set of tools. The test considers both coverage and exactness: it computes the proportion of\n", + " expected tools correctly called by the agent for each instance.\n", + "\n", + " Parameters:\n", + " dataset (VMDataset): The dataset containing user queries, agent outputs, and ground-truth tool expectations.\n", + " agent_output_column (str): Dataset column name containing agent outputs (should include tool call details in 'messages').\n", + " expected_tools_column (str): Dataset column specifying the true expected tools (as lists).\n", + "\n", + " Returns:\n", + " List[dict]: Per-row dictionaries with details: expected tools, found tools, match count, total expected, and accuracy score.\n", + "\n", + " Purpose:\n", + " Provides diagnostic evidence of the banking agent's core reasoning ability—specifically, its capacity to\n", + " interpret user needs and select the correct banking actions. Useful for diagnosing gaps in tool coverage,\n", + " misclassifications, or breakdowns in agent logic.\n", + "\n", + " Interpretation:\n", + " - An accuracy of 1.0 signals perfect tool selection for that example.\n", + " - Lower scores may indicate partial or complete failures to invoke required tools.\n", + " - Review 'found_tools' vs. 'expected_tools' to understand the source of discrepancies.\n", + "\n", + " Strengths:\n", + " - Directly tests a core capability of compositional tool-use agents.\n", + " - Framework-agnostic; robust to tool call output format (object or dict).\n", + " - Supports batch validation and result logging for systematic documentation.\n", + "\n", + " Limitations:\n", + " - Does not penalize extra, unnecessary tool calls.\n", + " - Does not assess result quality—only correct invocation.\n", + "\n", + " \"\"\"\n", + " df = dataset._df\n", + " \n", + " results = []\n", + " for i, row in df.iterrows():\n", + " result = validate_tool_calls_simple(row[agent_output_column]['messages'], row[expected_tools_column])\n", + " results.append(result)\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "id": "d594c973", + "metadata": {}, + "source": [ + "Finally, we can call our function with `run_test()` and log the test results to the ValidMind Platform:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dd14115e", + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " \"my_custom_tests.BankingToolCallAccuracy\",\n", + " inputs={\n", + " \"dataset\": vm_test_dataset,\n", + " },\n", + " params={\n", + " \"agent_output_column\": \"banking_agent_model_output\",\n", + " \"expected_tools_column\": \"expected_tools\"\n", + " }\n", + ")\n", + "result.log()" + ] + }, + { + "cell_type": "markdown", + "id": "f78f4107", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Assigning AI evaluation metric scores\n", + "\n", + "*AI agent evaluation metrics* are specialized measurements designed to assess how well autonomous LLM-based agents reason, plan, select and execute tools, and ultimately complete user tasks by analyzing the *full execution trace* — including reasoning steps, tool calls, intermediate decisions, and outcomes, rather than just single input–output pairs. These metrics are essential because agent failures often occur in ways traditional LLM metrics miss — for example, choosing the right tool with wrong arguments, creating a good plan but not following it, or completing a task inefficiently.\n", + "\n", + "In this section, we'll evaluate our banking agent's outputs and add scoring to our sample dataset against metrics defined in [DeepEval’s AI agent evaluation framework](https://deepeval.com/guides/guides-ai-agent-evaluation-metrics) which breaks down AI agent evaluation into three layers with corresponding subcategories: **reasoning**, **action**, and **execution**.\n", + "\n", + "Together, these three metrics enable granular diagnosis of agent behavior, help pinpoint where failures occur (reasoning, action, or execution), and support both development benchmarking and production monitoring." + ] + }, + { + "cell_type": "markdown", + "id": "3a9c853a", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Identify relevant DeepEval scorers\n", + "\n", + "*Scorers* are evaluation metrics that analyze model outputs and store their results in the dataset:\n", + "\n", + "- Each scorer adds a new column to the dataset with format: `{scorer_name}_{metric_name}`\n", + "- The column contains the numeric score (typically `0`-`1`) for each example\n", + "- Multiple scorers can be run on the same dataset, each adding their own column\n", + "- Scores are persisted in the dataset for later analysis and visualization\n", + "- Common scorer patterns include:\n", + " - Model performance metrics (accuracy, F1, etc.)\n", + " - Output quality metrics (relevance, faithfulness)\n", + " - Task-specific metrics (completion, correctness)\n", + "\n", + "Use `list_scorers()` from [`validmind.scorers`](https://docs.validmind.ai/validmind/validmind/tests.html#scorer) to discover all available scoring methods and their IDs that can be used with `assign_scores()`. We'll filter these results to return only DeepEval scorers for our desired three metrics in a formatted table with descriptions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "730c70ec", + "metadata": {}, + "outputs": [], + "source": [ + "# Load all DeepEval scorers\n", + "llm_scorers_dict = vm.tests.load._load_tests([s for s in vm.scorer.list_scorers() if \"deepeval\" in s.lower()])\n", + "\n", + "# Categorize scorers by metric layer\n", + "reasoning_scorers = {}\n", + "action_scorers = {}\n", + "execution_scorers = {}\n", + "\n", + "for scorer_id, scorer_func in llm_scorers_dict.items():\n", + " tags = getattr(scorer_func, \"__tags__\", [])\n", + " scorer_name = scorer_id.split(\".\")[-1]\n", + "\n", + " if \"reasoning_layer\" in tags:\n", + " reasoning_scorers[scorer_id] = scorer_func\n", + " elif \"action_layer\" in tags:\n", + " action_scorers[scorer_id] = scorer_func\n", + " elif \"TaskCompletion\" in scorer_name:\n", + " execution_scorers[scorer_id] = scorer_func\n", + "\n", + "# Display scorers by category\n", + "print(\"=\" * 80)\n", + "print(\"REASONING LAYER\")\n", + "print(\"=\" * 80)\n", + "if reasoning_scorers:\n", + " reasoning_df = vm.tests.load._pretty_list_tests(reasoning_scorers, truncate=True)\n", + " display(reasoning_df)\n", + "else:\n", + " print(\"No reasoning layer scorers found.\")\n", + "\n", + "print(\"\\n\" + \"=\" * 80)\n", + "print(\"ACTION LAYER\")\n", + "print(\"=\" * 80)\n", + "if action_scorers:\n", + " action_df = vm.tests.load._pretty_list_tests(action_scorers, truncate=True)\n", + " display(action_df)\n", + "else:\n", + " print(\"No action layer scorers found.\")\n", + "\n", + "print(\"\\n\" + \"=\" * 80)\n", + "print(\"EXECUTION LAYER\")\n", + "print(\"=\" * 80)\n", + "if execution_scorers:\n", + " execution_df = vm.tests.load._pretty_list_tests(execution_scorers, truncate=True)\n", + " display(execution_df)\n", + "else:\n", + " print(\"No execution layer scorers found.\")" + ] + }, + { + "cell_type": "markdown", + "id": "4dd73d0d", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign reasoning scores\n", + "\n", + "*Reasoning* evaluates planning and strategy generation:\n", + "\n", + "- **Plan quality** – How logical, complete, and efficient the agent’s plan is.\n", + "- **Plan adherence** – Whether the agent follows its own plan during execution." + ] + }, + { + "cell_type": "markdown", + "id": "06ccae28", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Plan quality score\n", + "\n", + "Let's measure how well our banking agent generates a plan before acting. A high score means the plan is logical, complete, and efficient." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52f362ba", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.assign_scores(\n", + " metrics = \"validmind.scorers.llm.deepeval.PlanQuality\",\n", + " input_column = \"input\",\n", + " actual_output_column = \"banking_agent_model_prediction\",\n", + " tools_called_column = \"banking_agent_model_tool_called\",\n", + " agent_output_column = \"banking_agent_model_output\",\n", + ")\n", + "vm_test_dataset._df.head()" + ] + }, + { + "cell_type": "markdown", + "id": "8dcdc88f", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Plan adherence score\n", + "\n", + "Let's check whether our banking agent follows the plan it created. Deviations lower this score and indicate gaps between reasoning and execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4124a7c2", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.assign_scores(\n", + " metrics = \"validmind.scorers.llm.deepeval.PlanAdherence\",\n", + " input_column = \"input\",\n", + " actual_output_column = \"banking_agent_model_prediction\",\n", + " expected_output_column = \"expected_output\",\n", + " tools_called_column = \"banking_agent_model_tool_called\",\n", + " agent_output_column = \"banking_agent_model_output\",\n", + "\n", + ")\n", + "vm_test_dataset._df.head()" + ] + }, + { + "cell_type": "markdown", + "id": "6da1ac95", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign action scores\n", + "\n", + "*Action* assesses tool usage and argument generation:\n", + "\n", + "- **Tool correctness** – Whether the agent selects and calls the right tools.\n", + "- **Argument correctness** – Whether the agent generates correct tool arguments." + ] + }, + { + "cell_type": "markdown", + "id": "d4db8270", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Tool correctness score\n", + "\n", + "Let's evaluate if our banking agent selects the appropriate tool for the task. Choosing the wrong tool reduces performance even if reasoning was correct." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d2e8a25", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.assign_scores(\n", + " metrics = \"validmind.scorers.llm.deepeval.ToolCorrectness\",\n", + " input_column = \"input\",\n", + " actual_output_column = \"banking_agent_model_prediction\",\n", + " tools_called_column = \"banking_agent_model_tool_called\",\n", + " expected_tools_column = \"expected_tools\",\n", + " agent_output_column = \"banking_agent_model_output\",\n", + "\n", + ")\n", + "vm_test_dataset._df.head()" + ] + }, + { + "cell_type": "markdown", + "id": "9aa50b05", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Argument correctness score\n", + "\n", + "Let's assesses whether our banking agent provides correct inputs or arguments to the selected tool. Incorrect arguments can lead to failed or unexpected results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "04f90489", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.assign_scores(\n", + " metrics = \"validmind.scorers.llm.deepeval.ArgumentCorrectness\",\n", + " input_column = \"input\",\n", + " actual_output_column = \"banking_agent_model_prediction\",\n", + " tools_called_column = \"banking_agent_model_tool_called\",\n", + " agent_output_column = \"banking_agent_model_output\",\n", + "\n", + ")\n", + "vm_test_dataset._df.head()" + ] + }, + { + "cell_type": "markdown", + "id": "c59e5595", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign execution score\n", + "\n", + "*Execution* measures end-to-end performance:\n", + "\n", + "- **Task completion** – Whether the agent successfully completes the intended task.\n" + ] + }, + { + "cell_type": "markdown", + "id": "d64600ca", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Task completion score\n", + "\n", + "Let's evaluate whether our banking agent successfully completes the requested tasks. Incomplete task execution can lead to user dissatisfaction and failed banking operations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05024f1f", + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_dataset.assign_scores(\n", + " metrics = \"validmind.scorers.llm.deepeval.TaskCompletion\",\n", + " input_column = \"input\",\n", + " actual_output_column = \"banking_agent_model_prediction\",\n", + " agent_output_column = \"banking_agent_model_output\",\n", + " tools_called_column = \"banking_agent_model_tool_called\",\n", + "\n", + ")\n", + "vm_test_dataset._df.head()" + ] + }, + { + "cell_type": "markdown", + "id": "21aa9b0d", + "metadata": {}, + "source": [ + "As you recall from the beginning of this section, when we run scorers through `assign_scores()`, the return values are automatically processed and added as new columns with the format `{scorer_name}_{metric_name}`. Note that the task completion scorer has added a new column `TaskCompletion_score` to our dataset.\n", + "\n", + "We'll use this column to visualize the distribution of task completion scores across our test cases through the [BoxPlot test](https://docs.validmind.ai/validmind/validmind/tests/plots/BoxPlot.html#boxplot):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f6d08ca", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.plots.BoxPlot\",\n", + " inputs={\"dataset\": vm_test_dataset},\n", + " params={\n", + " \"columns\": \"TaskCompletion_score\",\n", + " \"title\": \"Distribution of Task Completion Scores\",\n", + " \"ylabel\": \"Score\",\n", + " \"figsize\": (8, 6)\n", + " }\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "012bbcb8", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Running RAGAS tests\n", + "\n", + "Next, let's run some out-of-the-box *Retrieval-Augmented Generation Assessment* (RAGAS) tests available in the ValidMind Library. RAGAS provides specialized metrics for evaluating retrieval-augmented generation systems and conversational AI agents. These metrics analyze different aspects of agent performance by assessing how well systems integrate retrieved information with generated responses.\n", + "\n", + "Our banking agent uses tools to retrieve information and generates responses based on that context, making it similar to a RAG system. RAGAS metrics help evaluate the quality of this integration by analyzing the relationship between retrieved tool outputs, user queries, and generated responses.\n", + "\n", + "These tests provide insights into how well our banking agent integrates tool usage with conversational abilities, ensuring it provides accurate, relevant, and helpful responses to banking users while maintaining fidelity to retrieved information." + ] + }, + { + "cell_type": "markdown", + "id": "2036afba", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Identify relevant RAGAS tests\n", + "\n", + "Let's explore some of ValidMind's available tests. Using ValidMind’s repository of tests streamlines your development testing, and helps you ensure that your models are being documented and evaluated appropriately.\n", + "\n", + "You can pass `tasks` and `tags` as parameters to the [`vm.tests.list_tests()` function](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) to filter the tests based on the tags and task types:\n", + "\n", + "- **`tasks`** represent the kind of modeling task associated with a test. Here we'll focus on `text_qa` tasks.\n", + "- **`tags`** are free-form descriptions providing more details about the test, for example, what category the test falls into. Here we'll focus on the `ragas` tag.\n", + "\n", + "We'll then run three of these tests returned as examples below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0701f5a9", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.list_tests(task=\"text_qa\", tags=[\"ragas\"])" + ] + }, + { + "cell_type": "markdown", + "id": "c1741ffc", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Faithfulness\n", + "\n", + "Let's evaluate whether the banking agent's responses accurately reflect the information retrieved from tools. Unfaithful responses can misreport credit analysis, financial calculations, and compliance results—undermining user trust in the banking agent." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "92044533", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.model_validation.ragas.Faithfulness\",\n", + " inputs={\"dataset\": vm_test_dataset},\n", + " param_grid={\n", + " \"user_input_column\": [\"input\"],\n", + " \"response_column\": [\"banking_agent_model_prediction\"],\n", + " \"retrieved_contexts_column\": [\"banking_agent_model_tool_messages\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "42b71ccc", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Response Relevancy\n", + "\n", + "Let's evaluate whether the banking agent's answers address the user's original question or request. Irrelevant or off-topic responses can frustrate users and fail to deliver the banking information they need." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d7483bc3", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.model_validation.ragas.ResponseRelevancy\",\n", + " inputs={\"dataset\": vm_test_dataset},\n", + " params={\n", + " \"user_input_column\": \"input\",\n", + " \"response_column\": \"banking_agent_model_prediction\",\n", + " \"retrieved_contexts_column\": \"banking_agent_model_tool_messages\",\n", + " }\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "4f4d0569", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Context Recall\n", + "\n", + "Let's evaluate how well the banking agent uses the information retrieved from tools when generating its responses. Poor context recall can lead to incomplete or underinformed answers even when the right tools were selected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5dc00ce", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.model_validation.ragas.ContextRecall\",\n", + " inputs={\"dataset\": vm_test_dataset},\n", + " param_grid={\n", + " \"user_input_column\": [\"input\"],\n", + " \"retrieved_contexts_column\": [\"banking_agent_model_tool_messages\"],\n", + " \"reference_column\": [\"banking_agent_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "b987b00e", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Running safety tests\n", + "\n", + "Finally, let's run some out-of-the-box *safety* tests available in the ValidMind Library. Safety tests provide specialized metrics for evaluating whether AI agents operate reliably and securely. These metrics analyze different aspects of agent behavior by assessing adherence to safety guidelines, consistency of outputs, and resistance to harmful or inappropriate requests.\n", + "\n", + "Our banking agent handles sensitive financial information and user requests, making safety and reliability essential. Safety tests help evaluate whether the agent maintains appropriate boundaries, responds consistently and correctly to inputs, and avoids generating harmful, biased, or unprofessional content.\n", + "\n", + "These tests provide insights into how well our banking agent upholds standards of fairness and professionalism, ensuring it operates reliably and securely for banking users." + ] + }, + { + "cell_type": "markdown", + "id": "a754cca3", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### AspectCritic\n", + "\n", + "Let's evaluate our banking agent's responses across multiple quality dimensions — conciseness, coherence, correctness, harmfulness, and maliciousness. Weak performance on these dimensions can degrade user experience, fall short of professional banking standards, or introduce safety risks. \n", + "\n", + "We'll use the `AspectCritic` we identified earlier:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "148daa2b", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.model_validation.ragas.AspectCritic\",\n", + " inputs={\"dataset\": vm_test_dataset},\n", + " param_grid={\n", + " \"user_input_column\": [\"input\"],\n", + " \"response_column\": [\"banking_agent_model_prediction\"],\n", + " \"retrieved_contexts_column\": [\"banking_agent_model_tool_messages\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "92e5b1f6", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Bias\n", + "\n", + "Let's evaluate whether our banking agent's prompts contain unintended biases that could affect banking decisions. Biased prompts can lead to unfair or discriminatory outcomes — undermining customer trust and exposing the institution to compliance risk.\n", + "\n", + "We'll first use `list_tests()` again to filter for tests relating to `prompt_validation`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74eba86c", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.list_tests(filter=\"prompt_validation\")" + ] + }, + { + "cell_type": "markdown", + "id": "bcc66b65", + "metadata": {}, + "source": [ + "And then run the identified `Bias` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "062cf8e7", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.run_test(\n", + " \"validmind.prompt_validation.Bias\",\n", + " inputs={\n", + " \"model\": vm_banking_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "a2832750", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps\n", + "\n", + "You can look at the output produced by the ValidMind Library right in the notebook where you ran the code, as you would expect. But there is a better way — use the ValidMind Platform to work with your model documentation." + ] + }, + { + "cell_type": "markdown", + "id": "a8cb1a58", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Work with your model documentation\n", + "\n", + "1. From the **Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", + "\n", + "2. In the left sidebar that appears for your model, click **Documentation** under Documents.\n", + "\n", + " What you see is the full draft of your model documentation in a more easily consumable version. From here, you can make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. [Learn more ...](https://docs.validmind.ai/guide/working-with-model-documentation.html)\n", + "\n", + "3. Click into any section related to the tests we ran in this notebook, for example: **4.3. Prompt Evaluation** to review the results of the tests we logged." + ] + }, + { + "cell_type": "markdown", + "id": "94ef26be", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Customize the banking agent for your use case\n", + "\n", + "You've now built an agentic AI system designed for banking use cases that supports compliance with supervisory guidance such as SR 11-7 and SS1/23, covering credit and fraud risk assessment for both retail and commercial banking. Extend this example agent to real-world banking scenarios and production deployment by:\n", + "\n", + "- Adapting the banking tools to your organization's specific requirements\n", + "- Adding more banking scenarios and edge cases to your test set\n", + "- Connecting the agent to your banking systems and databases\n", + "- Implementing additional banking-specific tools and workflows" + ] + }, + { + "cell_type": "markdown", + "id": "a681e49c", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Discover more learning resources\n", + "\n", + "Learn more about the ValidMind Library tools we used in this notebook:\n", + "\n", + "- [Custom prompts](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/customize_test_result_descriptions.html)\n", + "- [Custom tests](https://docs.validmind.ai/notebooks/how_to/tests/custom_tests/implement_custom_tests.html)\n", + "- [ValidMind scorers](https://docs.validmind.ai/notebooks/how_to/scoring/assign_scores_complete_tutorial.html)\n", + "\n", + "We also offer many more interactive notebooks to help you document models:\n", + "\n", + "- [Run tests & test suites](https://docs.validmind.ai/guide/testing-overview.html)\n", + "- [Code samples](https://docs.validmind.ai/guide/samples-jupyter-notebooks.html)\n", + "\n", + "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." + ] + }, + { + "cell_type": "markdown", + "id": "707c1b6e", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9733adff", + "metadata": {}, + "outputs": [], + "source": [ + "%pip show validmind" + ] + }, + { + "cell_type": "markdown", + "id": "e4b0b646", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "387fa7f1", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "id": "copyright-de4baf0f42ba4a37946d52586dff1049", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "validmind-1QuffXMV-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb index 3d6b0d15c..de541b448 100644 --- a/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb +++ b/notebooks/use_cases/capital_markets/quickstart_option_pricing_models_quantlib.ipynb @@ -1,1344 +1,1345 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "1e2a4689", - "metadata": {}, - "source": [ - "# Quickstart for Heston option pricing model using QuantLib\n", - "\n", - "Welcome! Let's get you started with the basic process of documenting models with ValidMind.\n", - "\n", - "The Heston option pricing model is a popular stochastic volatility model used to price options. Developed by Steven Heston in 1993, the model assumes that the asset's volatility follows a mean-reverting square-root process, allowing it to capture the empirical observation of volatility \"clustering\" in financial markets. This model is particularly useful for assets where volatility is not constant, making it a favored approach in quantitative finance for pricing complex derivatives.\n", - "\n", - "Here’s an overview of the Heston model as implemented in QuantLib, a powerful library for quantitative finance:\n", - "\n", - "\n", - "\n", - "### Model Assumptions and Characteristics\n", - "1. **Stochastic Volatility**: The volatility is modeled as a stochastic process, following a mean-reverting square-root process (Cox-Ingersoll-Ross process).\n", - "2. **Correlated Asset and Volatility Processes**: The asset price and volatility are assumed to be correlated, allowing the model to capture the \"smile\" effect observed in implied volatilities.\n", - "3. **Risk-Neutral Dynamics**: The Heston model is typically calibrated under a risk-neutral measure, which allows for direct application to pricing.\n", - "\n", - "\n", - "\n", - "### Heston Model Parameters\n", - "The model is governed by a set of key parameters:\n", - "- **S0**: Initial stock price\n", - "- **v0**: Initial variance of the asset price\n", - "- **kappa**: Speed of mean reversion of the variance\n", - "- **theta**: Long-term mean level of variance\n", - "- **sigma**: Volatility of volatility (vol of vol)\n", - "- **rho**: Correlation between the asset price and variance processes\n", - "\n", - "The dynamics of the asset price \\( S \\) and the variance \\( v \\) under the Heston model are given by:\n", - "\n", - "$$\n", - "dS_t = r S_t \\, dt + \\sqrt{v_t} S_t \\, dW^S_t\n", - "$$\n", - "\n", - "$$\n", - "dv_t = \\kappa (\\theta - v_t) \\, dt + \\sigma \\sqrt{v_t} \\, dW^v_t\n", - "$$\n", - "\n", - "where \\( $dW^S$ \\) and \\( $dW^v$ \\) are Wiener processes with correlation \\( $\\rho$ \\).\n", - "\n", - "\n", - "\n", - "### Advantages and Limitations\n", - "- **Advantages**:\n", - " - Ability to capture volatility smiles and skews.\n", - " - More realistic pricing for options on assets with stochastic volatility.\n", - "- **Limitations**:\n", - " - Calibration can be complex due to the number of parameters.\n", - " - Computationally intensive compared to simpler models like Black-Scholes.\n", - "\n", - "This setup provides a robust framework for pricing and analyzing options with stochastic volatility dynamics. QuantLib’s implementation makes it easy to experiment with different parameter configurations and observe their effects on pricing.\n", - "\n", - "You will learn how to initialize the ValidMind Library, develop a option pricing model, and then write custom tests that can be used for sensitivity and stress testing to quickly generate documentation about model." - ] - }, - { - "cell_type": "markdown", - "id": "69ec219a", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - " - [Model Assumptions and Characteristics](#toc1_1__) \n", - " - [Heston Model Parameters](#toc1_2__) \n", - " - [Advantages and Limitations](#toc1_3__) \n", - "- [About ValidMind](#toc2__) \n", - " - [Before you begin](#toc2_1__) \n", - " - [New to ValidMind?](#toc2_2__) \n", - " - [Key concepts](#toc2_3__) \n", - "- [Setting up](#toc3__) \n", - " - [Install the ValidMind Library](#toc3_1__) \n", - " - [Initialize the ValidMind Library](#toc3_2__) \n", - " - [Register sample model](#toc3_2_1__) \n", - " - [Apply documentation template](#toc3_2_2__) \n", - " - [Get your code snippet](#toc3_2_3__) \n", - " - [Initialize the Python environment](#toc3_3__) \n", - " - [Preview the documentation template](#toc3_4__) \n", - "- [Data Preparation](#toc4__) \n", - " - [Helper functions](#toc4_1_1__) \n", - " - [Market Data Quality and Availability](#toc4_2__) \n", - " - [Initialize the ValidMind datasets](#toc4_3__) \n", - " - [Data Quality](#toc4_4__) \n", - " - [Isolation Forest Outliers Test](#toc4_4_1__) \n", - " - [Model parameters](#toc4_4_2__) \n", - "- [Model development - Heston Option price](#toc5__) \n", - " - [Model Calibration](#toc5_1__) \n", - " - [Model Evaluation](#toc5_2__) \n", - " - [Benchmark Testing](#toc5_2_1__) \n", - " - [Sensitivity Testing](#toc5_2_2__) \n", - " - [Stress Testing](#toc5_2_3__) \n", - "- [Next steps](#toc6__) \n", - " - [Work with your model documentation](#toc6_1__) \n", - " - [Discover more learning resources](#toc6_2__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "id": "b9fb5d17", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", - "\n", - "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", - "\n", - "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", - "\n", - "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", - "\n", - "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", - "\n", - " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", - " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", - " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", - "\n", - "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", - "\n", - "**Outputs**: Custom tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", - "\n", - "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", - "\n", - "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." - ] - }, - { - "cell_type": "markdown", - "id": "f2dccf35", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "id": "5a5ce085", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "409352bf", - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "id": "65e870b2", - "metadata": {}, - "source": [ - "To install the QuantLib library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3a34debf", - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q QuantLib" - ] - }, - { - "cell_type": "markdown", - "id": "fb30ae07", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "id": "c6f87017", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "id": "cbb2e2c9", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Capital Markets`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "id": "41c4edca", - "metadata": {}, - "source": [ - "
Can't select this template?\n", - "

\n", - "Your organization administrators may need to add it to your template library:\n", - "
" - ] - }, - { - "cell_type": "markdown", - "id": "2012eb82", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0cd3f67e", - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " # api_host=\"...\",\n", - " # api_key=\"...\",\n", - " # api_secret=\"...\",\n", - " # model=\"...\",\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "id": "6d944cc9", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the Python environment\n", - "\n", - "Next, let's import the necessary libraries and set up your Python environment for data analysis:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8cf2746", - "metadata": {}, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "\n", - "import pandas as pd\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "from scipy.optimize import minimize\n", - "import yfinance as yf\n", - "import QuantLib as ql\n", - "from validmind.tests import run_test" - ] - }, - { - "cell_type": "markdown", - "id": "bc431ee0", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Preview the documentation template\n", - "\n", - "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model.\n", - "\n", - "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7e844028", - "metadata": {}, - "outputs": [], - "source": [ - "vm.preview_template()" - ] - }, - { - "cell_type": "markdown", - "id": "0c0ee8b9", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Data Preparation" - ] - }, - { - "cell_type": "markdown", - "id": "5a4d2c36", - "metadata": {}, - "source": [ - "### Market Data Sources\n", - "\n", - "\n", - "\n", - "#### Helper functions\n", - "Let's define helper function retrieve to option data from Yahoo Finance." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b96a500f", - "metadata": {}, - "outputs": [], - "source": [ - "def get_market_data(ticker, expiration_date_str):\n", - " \"\"\"\n", - " Fetch option market data from Yahoo Finance for the given ticker and expiration date.\n", - " Returns a list of tuples: (strike, maturity, option_price).\n", - " \"\"\"\n", - " # Create a Ticker object for the specified stock\n", - " stock = yf.Ticker(ticker)\n", - "\n", - " # Get all available expiration dates for options\n", - " option_dates = stock.options\n", - "\n", - " # Check if the requested expiration date is available\n", - " if expiration_date_str not in option_dates:\n", - " raise ValueError(f\"Expiration date {expiration_date_str} not available for {ticker}. Available dates: {option_dates}\")\n", - "\n", - " # Get the option chain for the specified expiration date\n", - " option_chain = stock.option_chain(expiration_date_str)\n", - "\n", - " # Get call options (or you can use puts as well based on your requirement)\n", - " calls = option_chain.calls\n", - "\n", - " # Convert expiration_date_str to QuantLib Date\n", - " expiry_date_parts = list(map(int, expiration_date_str.split('-'))) # Split YYYY-MM-DD\n", - " maturity_date = ql.Date(expiry_date_parts[2], expiry_date_parts[1], expiry_date_parts[0]) # Convert to QuantLib Date\n", - "\n", - " # Create a list to store strike prices, maturity dates, and option prices\n", - " market_data = []\n", - " for index, row in calls.iterrows():\n", - " strike = row['strike']\n", - " option_price = row['lastPrice'] # You can also use 'bid', 'ask', 'mid', etc.\n", - " market_data.append((strike, maturity_date, option_price))\n", - " df = pd.DataFrame(market_data, columns = ['strike', 'maturity_date', 'option_price'])\n", - " return df" - ] - }, - { - "cell_type": "markdown", - "id": "c7769b73", - "metadata": {}, - "source": [ - "Let's define helper function retrieve to stock data from Yahoo Finance. This helper function to calculate spot price, dividend yield, volatility and risk free rate using the underline stock data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dc44c448", - "metadata": {}, - "outputs": [], - "source": [ - "def get_option_parameters(ticker):\n", - " # Fetch historical data for the stock\n", - " stock_data = yf.Ticker(ticker)\n", - " \n", - " # Get the current spot price\n", - " spot_price = stock_data.history(period=\"1d\")['Close'].iloc[-1]\n", - " \n", - " # Get dividend yield\n", - " dividend_rate = stock_data.dividends.mean() / spot_price if not stock_data.dividends.empty else 0.0\n", - " \n", - " # Estimate volatility (standard deviation of log returns)\n", - " hist_data = stock_data.history(period=\"1y\")['Close']\n", - " log_returns = np.log(hist_data / hist_data.shift(1)).dropna()\n", - " volatility = np.std(log_returns) * np.sqrt(252) # Annualized volatility\n", - " \n", - " # Assume a risk-free rate from some known data (can be fetched from market data, here we use 0.001)\n", - " risk_free_rate = 0.001\n", - " \n", - " # Return the calculated parameters\n", - " return {\n", - " \"spot_price\": spot_price,\n", - " \"volatility\": volatility,\n", - " \"dividend_rate\": dividend_rate,\n", - " \"risk_free_rate\": risk_free_rate\n", - " }" - ] - }, - { - "cell_type": "markdown", - "id": "c7b739d3", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Market Data Quality and Availability\n", - "Next, let's specify ticker and expiration date to get market data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "50225fde", - "metadata": {}, - "outputs": [], - "source": [ - "ticker = \"MSFT\"\n", - "expiration_date = \"2024-12-13\" # Example expiration date in 'YYYY-MM-DD' form\n", - "\n", - "market_data = get_market_data(ticker=ticker, expiration_date_str=expiration_date)" - ] - }, - { - "cell_type": "markdown", - "id": "c539b95e", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind datasets\n", - "\n", - "Before you can run tests, you must first initialize a ValidMind dataset object using the [`init_dataset`](https://docs.validmind.ai/validmind/validmind.html#init_dataset) function from the ValidMind (`vm`) module." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "113f9c17", - "metadata": {}, - "outputs": [], - "source": [ - "vm_market_data = vm.init_dataset(\n", - " dataset=market_data,\n", - " input_id=\"market_data\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "185beb24", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Data Quality\n", - "Let's check quality of the data using outliers and missing data tests." - ] - }, - { - "cell_type": "markdown", - "id": "7f14464c", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Isolation Forest Outliers Test\n", - "Let's detects anomalies in the dataset using the Isolation Forest algorithm, visualized through scatter plots." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "56c919ec", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"validmind.data_validation.IsolationForestOutliers\",\n", - " inputs={\n", - " \"dataset\": vm_market_data,\n", - " },\n", - " title=\"Outliers detection using Isolation Forest\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e4d0e5ca", - "metadata": {}, - "source": [ - "##### Missing Values Test\n", - "Let's evaluates dataset quality by ensuring the missing value ratio across all features does not exceed a set threshold." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e95c825f", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"validmind.data_validation.MissingValues\",\n", - " inputs={\n", - " \"dataset\": vm_market_data,\n", - " },\n", - " title=\"Missing Values detection\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "829403a3", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Model parameters\n", - "Let's calculate the model parameters using from stock data " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25936449", - "metadata": {}, - "outputs": [], - "source": [ - "option_params = get_option_parameters(ticker=ticker)" - ] - }, - { - "cell_type": "markdown", - "id": "0a0948b6", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Model development - Heston Option price" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e15b8221", - "metadata": {}, - "outputs": [], - "source": [ - "class HestonModel:\n", - "\n", - " def __init__(self, ticker, expiration_date_str, calculation_date, spot_price, dividend_rate, risk_free_rate):\n", - " self.ticker = ticker\n", - " self.expiration_date_str = expiration_date_str,\n", - " self.calculation_date = calculation_date\n", - " self.spot_price = spot_price\n", - " self.dividend_rate = dividend_rate\n", - " self.risk_free_rate = risk_free_rate\n", - " \n", - " def predict_option_price(self, strike, maturity_date, spot_price, v0=None, theta=None, kappa=None, sigma=None, rho=None):\n", - " # Set the evaluation date\n", - " ql.Settings.instance().evaluationDate = self.calculation_date\n", - "\n", - " # Construct the European Option\n", - " payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\n", - " exercise = ql.EuropeanExercise(maturity_date)\n", - " european_option = ql.VanillaOption(payoff, exercise)\n", - "\n", - " # Yield term structures for risk-free rate and dividend\n", - " riskFreeTS = ql.YieldTermStructureHandle(ql.FlatForward(calculation_date, self.risk_free_rate, ql.Actual365Fixed()))\n", - " dividendTS = ql.YieldTermStructureHandle(ql.FlatForward(calculation_date, self.dividend_rate, ql.Actual365Fixed()))\n", - "\n", - " # Initial stock price\n", - " initialValue = ql.QuoteHandle(ql.SimpleQuote(spot_price))\n", - "\n", - " # Heston process parameters\n", - " heston_process = ql.HestonProcess(riskFreeTS, dividendTS, initialValue, v0, kappa, theta, sigma, rho)\n", - " hestonModel = ql.HestonModel(heston_process)\n", - "\n", - " # Use the Heston analytic engine\n", - " engine = ql.AnalyticHestonEngine(hestonModel)\n", - " european_option.setPricingEngine(engine)\n", - "\n", - " # Calculate the Heston model price\n", - " h_price = european_option.NPV()\n", - "\n", - " return h_price\n", - "\n", - " def predict_american_option_price(self, strike, maturity_date, spot_price, v0=None, theta=None, kappa=None, sigma=None, rho=None):\n", - " # Set the evaluation date\n", - " ql.Settings.instance().evaluationDate = self.calculation_date\n", - "\n", - " # Construct the American Option\n", - " payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\n", - " exercise = ql.AmericanExercise(self.calculation_date, maturity_date)\n", - " american_option = ql.VanillaOption(payoff, exercise)\n", - "\n", - " # Yield term structures for risk-free rate and dividend\n", - " riskFreeTS = ql.YieldTermStructureHandle(ql.FlatForward(self.calculation_date, self.risk_free_rate, ql.Actual365Fixed()))\n", - " dividendTS = ql.YieldTermStructureHandle(ql.FlatForward(self.calculation_date, self.dividend_rate, ql.Actual365Fixed()))\n", - "\n", - " # Initial stock price\n", - " initialValue = ql.QuoteHandle(ql.SimpleQuote(spot_price))\n", - "\n", - " # Heston process parameters\n", - " heston_process = ql.HestonProcess(riskFreeTS, dividendTS, initialValue, v0, kappa, theta, sigma, rho)\n", - " heston_model = ql.HestonModel(heston_process)\n", - "\n", - "\n", - " payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\n", - " exercise = ql.AmericanExercise(self.calculation_date, maturity_date)\n", - " american_option = ql.VanillaOption(payoff, exercise)\n", - " heston_fd_engine = ql.FdHestonVanillaEngine(heston_model)\n", - " american_option.setPricingEngine(heston_fd_engine)\n", - " option_price = american_option.NPV()\n", - "\n", - " return option_price\n", - "\n", - " def objective_function(self, params, market_data, spot_price, dividend_rate, risk_free_rate):\n", - " v0, theta, kappa, sigma, rho = params\n", - "\n", - " # Sum of squared differences between market prices and model prices\n", - " error = 0.0\n", - " for i, row in market_data.iterrows():\n", - " model_price = self.predict_option_price(row['strike'], row['maturity_date'], spot_price, \n", - " v0, theta, kappa, sigma, rho)\n", - " error += (model_price - row['option_price']) ** 2\n", - " \n", - " return error\n", - "\n", - " def calibrate_model(self, ticker, expiration_date_str):\n", - " # Get the option market data dynamically from Yahoo Finance\n", - " market_data = get_market_data(ticker, expiration_date_str)\n", - "\n", - " # Initial guesses for Heston parameters\n", - " initial_params = [0.04, 0.04, 0.1, 0.1, -0.75]\n", - "\n", - " # Bounds for the parameters to ensure realistic values\n", - " bounds = [(0.0001, 1.0), # v0\n", - " (0.0001, 1.0), # theta\n", - " (0.001, 2.0), # kappa\n", - " (0.001, 1.0), # sigma\n", - " (-0.75, 0.0)] # rho\n", - "\n", - " # Optimize the parameters to minimize the error between model and market prices\n", - " result = minimize(self.objective_function, initial_params, args=(market_data, self.spot_price, self.dividend_rate, self.risk_free_rate),\n", - " bounds=bounds, method='L-BFGS-B')\n", - "\n", - " # Optimized Heston parameters\n", - " v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt = result.x\n", - "\n", - " return v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt\n" - ] - }, - { - "cell_type": "markdown", - "id": "a941aa32", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Model Calibration\n", - "* The calibration process aims to optimize the Heston model parameters (v0, theta, kappa, sigma, rho) by minimizing the difference between model-predicted option prices and observed market prices.\n", - "* In this implementation, the model is calibrated to current market data, specifically using option prices from the selected ticker and expiration date.\n", - "\n", - "Let's specify `calculation_date` and `strike_price` as input parameters for the model to verify its functionality and confirm it operates as expected." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1d61dfca", - "metadata": {}, - "outputs": [], - "source": [ - "calculation_date = ql.Date(26, 11, 2024)\n", - "# Convert expiration date string to QuantLib.Date\n", - "expiry_date_parts = list(map(int, expiration_date.split('-')))\n", - "maturity_date = ql.Date(expiry_date_parts[2], expiry_date_parts[1], expiry_date_parts[0])\n", - "strike_price = 460.0\n", - "\n", - "hm = HestonModel(\n", - " ticker=ticker,\n", - " expiration_date_str= expiration_date,\n", - " calculation_date= calculation_date,\n", - " spot_price= option_params['spot_price'],\n", - " dividend_rate = option_params['dividend_rate'],\n", - " risk_free_rate = option_params['risk_free_rate']\n", - ")\n", - "\n", - "# Let's calibrate model\n", - "v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt = hm.calibrate_model(ticker, expiration_date)\n", - "print(f\"Optimized Heston parameters: v0={v0_opt}, theta={theta_opt}, kappa={kappa_opt}, sigma={sigma_opt}, rho={rho_opt}\")\n", - "\n", - "\n", - "# option price\n", - "h_price = hm.predict_option_price(strike_price, maturity_date, option_params['spot_price'], v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt)\n", - "print(\"The Heston model price for the option is:\", h_price)" - ] - }, - { - "cell_type": "markdown", - "id": "75313272", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Model Evaluation" - ] - }, - { - "cell_type": "markdown", - "id": "2e6471ef", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Benchmark Testing\n", - "The benchmark testing framework provides a robust way to validate the Heston model implementation and understand the relationships between European and American option prices under stochastic volatility conditions.\n", - "Let's compares European and American option prices using the Heston model." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "810cf887", - "metadata": {}, - "outputs": [], - "source": [ - "@vm.test(\"my_custom_tests.BenchmarkTest\")\n", - "def benchmark_test(hm_model, strikes, maturity_date, spot_price, v0=None, theta=None, kappa=None, sigma=None, rho=None):\n", - " \"\"\"\n", - " Compares European and American option prices using the Heston model.\n", - "\n", - " This test evaluates the price differences between European and American options\n", - " across multiple strike prices while keeping other parameters constant. The comparison\n", - " helps understand the early exercise premium of American options over their European\n", - " counterparts under stochastic volatility conditions.\n", - "\n", - " Args:\n", - " hm_model: HestonModel instance for option pricing calculations\n", - " strikes (list[float]): List of strike prices to test\n", - " maturity_date (ql.Date): Option expiration date in QuantLib format\n", - " spot_price (float): Current price of the underlying asset\n", - " v0 (float, optional): Initial variance. Defaults to None.\n", - " theta (float, optional): Long-term variance. Defaults to None.\n", - " kappa (float, optional): Mean reversion rate. Defaults to None.\n", - " sigma (float, optional): Volatility of variance. Defaults to None.\n", - " rho (float, optional): Correlation between asset and variance. Defaults to None.\n", - "\n", - " Returns:\n", - " dict: Contains a DataFrame with the following columns:\n", - " - Strike: Strike prices tested\n", - " - Maturity date: Expiration date for all options\n", - " - Spot price: Current underlying price\n", - " - european model price: Prices for European options\n", - " - american model price: Prices for American options\n", - "\"\"\"\n", - " american_derived_prices = []\n", - " european_derived_prices = []\n", - " for K in strikes:\n", - " european_derived_prices.append(hm_model.predict_option_price(K, maturity_date, spot_price, v0, theta, kappa, sigma, rho))\n", - " american_derived_prices.append(hm_model.predict_american_option_price(K, maturity_date, spot_price, v0, theta, kappa, sigma, rho))\n", - "\n", - " data = {\n", - " \"Strike\": strikes,\n", - " \"Maturity date\": [maturity_date] * len(strikes),\n", - " \"Spot price\": [spot_price] * len(strikes),\n", - " \"european model price\": european_derived_prices,\n", - " \"american model price\": american_derived_prices,\n", - "\n", - " }\n", - " df1 = pd.DataFrame(data)\n", - " return {\"strikes variation benchmarking\": df1}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fdd6705", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.BenchmarkTest\",\n", - " params={\n", - " \"hm_model\": hm,\n", - " \"strikes\": [400, 425, 460, 495, 520],\n", - " \"maturity_date\": maturity_date,\n", - " \"spot_price\": option_params['spot_price'],\n", - " \"v0\":v0_opt,\n", - " \"theta\": theta_opt,\n", - " \"kappa\":kappa_opt ,\n", - " \"sigma\": sigma_opt,\n", - " \"rho\":rho_opt\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "e359b503", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Sensitivity Testing\n", - "The sensitivity testing framework provides a systematic approach to understanding how the Heston model responds to parameter changes, which is crucial for both model validation and practical application in trading and risk management." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51922313", - "metadata": {}, - "outputs": [], - "source": [ - "@vm.test(\"my_test_provider.Sensitivity\")\n", - "def SensitivityTest(\n", - " model,\n", - " strike_price,\n", - " maturity_date,\n", - " spot_price,\n", - " v0_opt,\n", - " theta_opt,\n", - " kappa_opt,\n", - " sigma_opt,\n", - " rho_opt,\n", - "):\n", - " \"\"\"\n", - " Evaluates the sensitivity of American option prices to changes in model parameters.\n", - "\n", - " This test calculates option prices using the Heston model with optimized parameters.\n", - " It's designed to analyze how changes in various model inputs affect the option price,\n", - " which is crucial for understanding model behavior and risk management.\n", - "\n", - " Args:\n", - " model (HestonModel): Initialized Heston model instance wrapped in ValidMind model object\n", - " strike_price (float): Strike price of the option\n", - " maturity_date (ql.Date): Expiration date of the option in QuantLib format\n", - " spot_price (float): Current price of the underlying asset\n", - " v0_opt (float): Optimized initial variance parameter\n", - " theta_opt (float): Optimized long-term variance parameter\n", - " kappa_opt (float): Optimized mean reversion rate parameter\n", - " sigma_opt (float): Optimized volatility of variance parameter\n", - " rho_opt (float): Optimized correlation parameter between asset price and variance\n", - " \"\"\"\n", - " price = model.model.predict_american_option_price(\n", - " strike_price,\n", - " maturity_date,\n", - " spot_price,\n", - " v0_opt,\n", - " theta_opt,\n", - " kappa_opt,\n", - " sigma_opt,\n", - " rho_opt,\n", - " )\n", - "\n", - " return price\n" - ] - }, - { - "cell_type": "markdown", - "id": "408a05ef", - "metadata": {}, - "source": [ - "##### Common plot function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "104ca6dd", - "metadata": {}, - "outputs": [], - "source": [ - "def plot_results(df, params: dict = None):\n", - " fig2 = plt.figure(figsize=(10, 6))\n", - " plt.plot(df[params[\"x\"]], df[params[\"y\"]], label=params[\"label\"])\n", - " plt.xlabel(params[\"xlabel\"])\n", - " plt.ylabel(params[\"ylabel\"])\n", - " \n", - " plt.title(params[\"title\"])\n", - " plt.legend()\n", - " plt.grid(True)\n", - " plt.show() # close the plot to avoid displaying it" - ] - }, - { - "cell_type": "markdown", - "id": "ca72b9e5", - "metadata": {}, - "source": [ - "Let's create ValidMind model object" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ae7093fa", - "metadata": {}, - "outputs": [], - "source": [ - "hm_model = vm.init_model(model=hm, input_id=\"HestonModel\")" - ] - }, - { - "cell_type": "markdown", - "id": "b2141640", - "metadata": {}, - "source": [ - "##### Strike sensitivity\n", - "Let's analyzes how option prices change as the strike price varies. We create a range of strike prices around the current strike (460) and observe the impact on option prices while keeping all other parameters constant." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea7f1cbe", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_test_provider.Sensitivity:ToStrike\",\n", - " inputs = {\n", - " \"model\": hm_model\n", - " },\n", - " param_grid={\n", - " \"strike_price\": list(np.linspace(460-50, 460+50, 10)),\n", - " \"maturity_date\": [maturity_date],\n", - " \"spot_price\": [option_params[\"spot_price\"]],\n", - " \"v0_opt\": [v0_opt],\n", - " \"theta_opt\": [theta_opt],\n", - " \"kappa_opt\": [kappa_opt],\n", - " \"sigma_opt\": [sigma_opt],\n", - " \"rho_opt\":[rho_opt]\n", - " },\n", - ")\n", - "result.log()\n", - "# Visualize how option prices change with different strike prices\n", - "plot_results(\n", - " pd.DataFrame(result.tables[0].data),\n", - " params={\n", - " \"x\": \"strike_price\",\n", - " \"y\":\"Value\",\n", - " \"label\":\"Strike price\",\n", - " \"xlabel\":\"Strike price\",\n", - " \"ylabel\":\"option price\",\n", - " \"title\":\"Heston option - Strike price Sensitivity\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "be143012", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Stress Testing\n", - "This stress testing framework provides a comprehensive view of how the Heston model behaves under different market conditions and helps identify potential risks in option pricing." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f2f01a40", - "metadata": {}, - "outputs": [], - "source": [ - "@vm.test(\"my_custom_tests.Stressing\")\n", - "def StressTest(\n", - " model,\n", - " strike_price,\n", - " maturity_date,\n", - " spot_price,\n", - " v0_opt,\n", - " theta_opt,\n", - " kappa_opt,\n", - " sigma_opt,\n", - " rho_opt,\n", - "):\n", - " \"\"\"\n", - " Performs stress testing on Heston model parameters to evaluate option price sensitivity.\n", - "\n", - " This test evaluates how the American option price responds to stressed market conditions\n", - " by varying key model parameters. It's designed to:\n", - " 1. Identify potential model vulnerabilities\n", - " 2. Understand price behavior under extreme scenarios\n", - " 3. Support risk management decisions\n", - " 4. Validate model stability across parameter ranges\n", - "\n", - " Args:\n", - " model (HestonModel): Initialized Heston model instance wrapped in ValidMind model object\n", - " strike_price (float): Option strike price\n", - " maturity_date (ql.Date): Option expiration date in QuantLib format\n", - " spot_price (float): Current price of the underlying asset\n", - " v0_opt (float): Initial variance parameter under stress testing\n", - " theta_opt (float): Long-term variance parameter under stress testing\n", - " kappa_opt (float): Mean reversion rate parameter under stress testing\n", - " sigma_opt (float): Volatility of variance parameter under stress testing\n", - " rho_opt (float): Correlation parameter under stress testing\n", - " \"\"\"\n", - " price = model.model.predict_american_option_price(\n", - " strike_price,\n", - " maturity_date,\n", - " spot_price,\n", - " v0_opt,\n", - " theta_opt,\n", - " kappa_opt,\n", - " sigma_opt,\n", - " rho_opt,\n", - " )\n", - "\n", - " return price\n" - ] - }, - { - "cell_type": "markdown", - "id": "31fcbe9c", - "metadata": {}, - "source": [ - "##### Rho (correlation) and Theta (long term vol) stress test\n", - "Next, let's evaluates the sensitivity of a model's output to changes in the correlation parameter (rho) and the long-term variance parameter (theta) within a stochastic volatility framework." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6119b5d9", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.Stressing:TheRhoAndThetaParameters\",\n", - " inputs = {\n", - " \"model\": hm_model,\n", - " },\n", - " param_grid={\n", - " \"strike_price\": [460],\n", - " \"maturity_date\": [maturity_date],\n", - " \"spot_price\": [option_params[\"spot_price\"]],\n", - " \"v0_opt\": [v0_opt],\n", - " \"theta_opt\": list(np.linspace(0.1, theta_opt+0.4, 5)),\n", - " \"kappa_opt\": [kappa_opt],\n", - " \"sigma_opt\": [sigma_opt],\n", - " \"rho_opt\":list(np.linspace(rho_opt-0.2, rho_opt+0.2, 5))\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "be39cb3a", - "metadata": {}, - "source": [ - "##### Sigma stress test\n", - "Let's evaluates the sensitivity of a model's output to changes in the volatility parameter, sigma. This test is crucial for understanding how variations in market volatility impact the model's valuation of financial instruments, particularly options." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0dc189b7", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.Stressing:TheSigmaParameter\",\n", - " inputs = {\n", - " \"model\": hm_model,\n", - " },\n", - " param_grid={\n", - " \"strike_price\": [460],\n", - " \"maturity_date\": [maturity_date],\n", - " \"spot_price\": [option_params[\"spot_price\"]],\n", - " \"v0_opt\": [v0_opt],\n", - " \"theta_opt\": [theta_opt],\n", - " \"kappa_opt\": [kappa_opt],\n", - " \"sigma_opt\": list(np.linspace(0.1, sigma_opt+0.6, 5)),\n", - " \"rho_opt\": [rho_opt]\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "173a5294", - "metadata": {}, - "source": [ - "##### Stress kappa\n", - "Let's evaluates the sensitivity of a model's output to changes in the kappa parameter, which is a mean reversion rate in stochastic volatility models." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dae9714f", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.Stressing:TheKappaParameter\",\n", - " inputs = {\n", - " \"model\": hm_model,\n", - " },\n", - " param_grid={\n", - " \"strike_price\": [460],\n", - " \"maturity_date\": [maturity_date],\n", - " \"spot_price\": [option_params[\"spot_price\"]],\n", - " \"v0_opt\": [v0_opt],\n", - " \"theta_opt\": [theta_opt],\n", - " \"kappa_opt\": list(np.linspace(kappa_opt, kappa_opt+0.2, 5)),\n", - " \"sigma_opt\": [sigma_opt],\n", - " \"rho_opt\": [rho_opt]\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "b4d1d968", - "metadata": {}, - "source": [ - "##### Stress theta\n", - "Let's evaluates the sensitivity of a model's output to changes in the parameter theta, which represents the long-term variance in a stochastic volatility model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e68df3db", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.Stressing:TheThetaParameter\",\n", - " inputs = {\n", - " \"model\": hm_model,\n", - " },\n", - " param_grid={\n", - " \"strike_price\": [460],\n", - " \"maturity_date\": [maturity_date],\n", - " \"spot_price\": [option_params[\"spot_price\"]],\n", - " \"v0_opt\": [v0_opt],\n", - " \"theta_opt\": list(np.linspace(0.1, theta_opt+0.9, 5)),\n", - " \"kappa_opt\": [kappa_opt],\n", - " \"sigma_opt\": [sigma_opt],\n", - " \"rho_opt\": [rho_opt]\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "32e70456", - "metadata": {}, - "source": [ - "##### Stress rho\n", - "Let's evaluates the sensitivity of a model's output to changes in the correlation parameter, rho, within a stochastic volatility (SV) model framework. This test is crucial for understanding how variations in rho, which represents the correlation between the asset price and its volatility, impact the model's valuation output." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b5ca3fc2", - "metadata": {}, - "outputs": [], - "source": [ - "result = run_test(\n", - " \"my_custom_tests.Stressing:TheRhoParameter\",\n", - " inputs = {\n", - " \"model\": hm_model,\n", - " },\n", - " param_grid={\n", - " \"strike_price\": [460],\n", - " \"maturity_date\": [maturity_date],\n", - " \"spot_price\": [option_params[\"spot_price\"]],\n", - " \"v0_opt\": [v0_opt],\n", - " \"theta_opt\": [theta_opt],\n", - " \"kappa_opt\": [kappa_opt],\n", - " \"sigma_opt\": [sigma_opt],\n", - " \"rho_opt\": list(np.linspace(rho_opt-0.2, rho_opt+0.2, 5))\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "id": "892c5347", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Next steps\n", - "\n", - "You can look at the results of this test suite right in the notebook where you ran the code, as you would expect. But there is a better way — use the ValidMind Platform to work with your model documentation.\n", - "\n", - "\n", - "\n", - "### Work with your model documentation\n", - "\n", - "1. From the **Model Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", - "\n", - "2. Click and expand the **Model Development** section.\n", - "\n", - "What you see is the full draft of your model documentation in a more easily consumable version. From here, you can make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. [Learn more ...](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)\n", - "\n", - "\n", - "\n", - "### Discover more learning resources\n", - "\n", - "We offer many interactive notebooks to help you document models:\n", - "\n", - "- [Run tests & test suites](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", - "- [Code samples](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", - "\n", - "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." - ] - }, - { - "cell_type": "markdown", - "id": "copyright-de5d1e182b09403abddabc2850f2dd05", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "validmind-1QuffXMV-py3.10", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "cells": [ + { + "cell_type": "markdown", + "id": "1e2a4689", + "metadata": {}, + "source": [ + "# Quickstart for Heston option pricing model using QuantLib\n", + "\n", + "Welcome! Let's get you started with the basic process of documenting models with ValidMind.\n", + "\n", + "The Heston option pricing model is a popular stochastic volatility model used to price options. Developed by Steven Heston in 1993, the model assumes that the asset's volatility follows a mean-reverting square-root process, allowing it to capture the empirical observation of volatility \"clustering\" in financial markets. This model is particularly useful for assets where volatility is not constant, making it a favored approach in quantitative finance for pricing complex derivatives.\n", + "\n", + "Here’s an overview of the Heston model as implemented in QuantLib, a powerful library for quantitative finance:\n", + "\n", + "\n", + "\n", + "### Model Assumptions and Characteristics\n", + "1. **Stochastic Volatility**: The volatility is modeled as a stochastic process, following a mean-reverting square-root process (Cox-Ingersoll-Ross process).\n", + "2. **Correlated Asset and Volatility Processes**: The asset price and volatility are assumed to be correlated, allowing the model to capture the \"smile\" effect observed in implied volatilities.\n", + "3. **Risk-Neutral Dynamics**: The Heston model is typically calibrated under a risk-neutral measure, which allows for direct application to pricing.\n", + "\n", + "\n", + "\n", + "### Heston Model Parameters\n", + "The model is governed by a set of key parameters:\n", + "- **S0**: Initial stock price\n", + "- **v0**: Initial variance of the asset price\n", + "- **kappa**: Speed of mean reversion of the variance\n", + "- **theta**: Long-term mean level of variance\n", + "- **sigma**: Volatility of volatility (vol of vol)\n", + "- **rho**: Correlation between the asset price and variance processes\n", + "\n", + "The dynamics of the asset price \\( S \\) and the variance \\( v \\) under the Heston model are given by:\n", + "\n", + "$$\n", + "dS_t = r S_t \\, dt + \\sqrt{v_t} S_t \\, dW^S_t\n", + "$$\n", + "\n", + "$$\n", + "dv_t = \\kappa (\\theta - v_t) \\, dt + \\sigma \\sqrt{v_t} \\, dW^v_t\n", + "$$\n", + "\n", + "where \\( $dW^S$ \\) and \\( $dW^v$ \\) are Wiener processes with correlation \\( $\\rho$ \\).\n", + "\n", + "\n", + "\n", + "### Advantages and Limitations\n", + "- **Advantages**:\n", + " - Ability to capture volatility smiles and skews.\n", + " - More realistic pricing for options on assets with stochastic volatility.\n", + "- **Limitations**:\n", + " - Calibration can be complex due to the number of parameters.\n", + " - Computationally intensive compared to simpler models like Black-Scholes.\n", + "\n", + "This setup provides a robust framework for pricing and analyzing options with stochastic volatility dynamics. QuantLib’s implementation makes it easy to experiment with different parameter configurations and observe their effects on pricing.\n", + "\n", + "You will learn how to initialize the ValidMind Library, develop a option pricing model, and then write custom tests that can be used for sensitivity and stress testing to quickly generate documentation about model." + ] + }, + { + "cell_type": "markdown", + "id": "69ec219a", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + " - [Model Assumptions and Characteristics](#toc1_1__) \n", + " - [Heston Model Parameters](#toc1_2__) \n", + " - [Advantages and Limitations](#toc1_3__) \n", + "- [About ValidMind](#toc2__) \n", + " - [Before you begin](#toc2_1__) \n", + " - [New to ValidMind?](#toc2_2__) \n", + " - [Key concepts](#toc2_3__) \n", + "- [Setting up](#toc3__) \n", + " - [Install the ValidMind Library](#toc3_1__) \n", + " - [Initialize the ValidMind Library](#toc3_2__) \n", + " - [Register sample model](#toc3_2_1__) \n", + " - [Apply documentation template](#toc3_2_2__) \n", + " - [Get your code snippet](#toc3_2_3__) \n", + " - [Initialize the Python environment](#toc3_3__) \n", + " - [Preview the documentation template](#toc3_4__) \n", + "- [Data Preparation](#toc4__) \n", + " - [Helper functions](#toc4_1_1__) \n", + " - [Market Data Quality and Availability](#toc4_2__) \n", + " - [Initialize the ValidMind datasets](#toc4_3__) \n", + " - [Data Quality](#toc4_4__) \n", + " - [Isolation Forest Outliers Test](#toc4_4_1__) \n", + " - [Model parameters](#toc4_4_2__) \n", + "- [Model development - Heston Option price](#toc5__) \n", + " - [Model Calibration](#toc5_1__) \n", + " - [Model Evaluation](#toc5_2__) \n", + " - [Benchmark Testing](#toc5_2_1__) \n", + " - [Sensitivity Testing](#toc5_2_2__) \n", + " - [Stress Testing](#toc5_2_3__) \n", + "- [Next steps](#toc6__) \n", + " - [Work with your model documentation](#toc6_1__) \n", + " - [Discover more learning resources](#toc6_2__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "id": "b9fb5d17", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." + ] + }, + { + "cell_type": "markdown", + "id": "f2dccf35", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "id": "5a5ce085", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "409352bf", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "id": "65e870b2", + "metadata": {}, + "source": [ + "To install the QuantLib library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a34debf", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q QuantLib" + ] + }, + { + "cell_type": "markdown", + "id": "fb30ae07", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "id": "c6f87017", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "id": "cbb2e2c9", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Capital Markets`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "id": "41c4edca", + "metadata": {}, + "source": [ + "
Can't select this template?\n", + "

\n", + "Your organization administrators may need to add it to your template library:\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "2012eb82", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0cd3f67e", + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "id": "6d944cc9", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the Python environment\n", + "\n", + "Next, let's import the necessary libraries and set up your Python environment for data analysis:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8cf2746", + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "\n", + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from scipy.optimize import minimize\n", + "import yfinance as yf\n", + "import QuantLib as ql\n", + "from validmind.tests import run_test" + ] + }, + { + "cell_type": "markdown", + "id": "bc431ee0", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Preview the documentation template\n", + "\n", + "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model.\n", + "\n", + "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e844028", + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "id": "0c0ee8b9", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Data Preparation" + ] + }, + { + "cell_type": "markdown", + "id": "5a4d2c36", + "metadata": {}, + "source": [ + "### Market Data Sources\n", + "\n", + "\n", + "\n", + "#### Helper functions\n", + "Let's define helper function retrieve to option data from Yahoo Finance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b96a500f", + "metadata": {}, + "outputs": [], + "source": [ + "def get_market_data(ticker, expiration_date_str):\n", + " \"\"\"\n", + " Fetch option market data from Yahoo Finance for the given ticker and expiration date.\n", + " Returns a list of tuples: (strike, maturity, option_price).\n", + " \"\"\"\n", + " # Create a Ticker object for the specified stock\n", + " stock = yf.Ticker(ticker)\n", + "\n", + " # Get all available expiration dates for options\n", + " option_dates = stock.options\n", + "\n", + " # Check if the requested expiration date is available\n", + " if expiration_date_str not in option_dates:\n", + " raise ValueError(f\"Expiration date {expiration_date_str} not available for {ticker}. Available dates: {option_dates}\")\n", + "\n", + " # Get the option chain for the specified expiration date\n", + " option_chain = stock.option_chain(expiration_date_str)\n", + "\n", + " # Get call options (or you can use puts as well based on your requirement)\n", + " calls = option_chain.calls\n", + "\n", + " # Convert expiration_date_str to QuantLib Date\n", + " expiry_date_parts = list(map(int, expiration_date_str.split('-'))) # Split YYYY-MM-DD\n", + " maturity_date = ql.Date(expiry_date_parts[2], expiry_date_parts[1], expiry_date_parts[0]) # Convert to QuantLib Date\n", + "\n", + " # Create a list to store strike prices, maturity dates, and option prices\n", + " market_data = []\n", + " for index, row in calls.iterrows():\n", + " strike = row['strike']\n", + " option_price = row['lastPrice'] # You can also use 'bid', 'ask', 'mid', etc.\n", + " market_data.append((strike, maturity_date, option_price))\n", + " df = pd.DataFrame(market_data, columns = ['strike', 'maturity_date', 'option_price'])\n", + " return df" + ] + }, + { + "cell_type": "markdown", + "id": "c7769b73", + "metadata": {}, + "source": [ + "Let's define helper function retrieve to stock data from Yahoo Finance. This helper function to calculate spot price, dividend yield, volatility and risk free rate using the underline stock data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc44c448", + "metadata": {}, + "outputs": [], + "source": [ + "def get_option_parameters(ticker):\n", + " # Fetch historical data for the stock\n", + " stock_data = yf.Ticker(ticker)\n", + " \n", + " # Get the current spot price\n", + " spot_price = stock_data.history(period=\"1d\")['Close'].iloc[-1]\n", + " \n", + " # Get dividend yield\n", + " dividend_rate = stock_data.dividends.mean() / spot_price if not stock_data.dividends.empty else 0.0\n", + " \n", + " # Estimate volatility (standard deviation of log returns)\n", + " hist_data = stock_data.history(period=\"1y\")['Close']\n", + " log_returns = np.log(hist_data / hist_data.shift(1)).dropna()\n", + " volatility = np.std(log_returns) * np.sqrt(252) # Annualized volatility\n", + " \n", + " # Assume a risk-free rate from some known data (can be fetched from market data, here we use 0.001)\n", + " risk_free_rate = 0.001\n", + " \n", + " # Return the calculated parameters\n", + " return {\n", + " \"spot_price\": spot_price,\n", + " \"volatility\": volatility,\n", + " \"dividend_rate\": dividend_rate,\n", + " \"risk_free_rate\": risk_free_rate\n", + " }" + ] + }, + { + "cell_type": "markdown", + "id": "c7b739d3", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Market Data Quality and Availability\n", + "Next, let's specify ticker and expiration date to get market data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50225fde", + "metadata": {}, + "outputs": [], + "source": [ + "ticker = \"MSFT\"\n", + "expiration_date = \"2024-12-13\" # Example expiration date in 'YYYY-MM-DD' form\n", + "\n", + "market_data = get_market_data(ticker=ticker, expiration_date_str=expiration_date)" + ] + }, + { + "cell_type": "markdown", + "id": "c539b95e", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind datasets\n", + "\n", + "Before you can run tests, you must first initialize a ValidMind dataset object using the [`init_dataset`](https://docs.validmind.ai/validmind/validmind.html#init_dataset) function from the ValidMind (`vm`) module." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "113f9c17", + "metadata": {}, + "outputs": [], + "source": [ + "vm_market_data = vm.init_dataset(\n", + " dataset=market_data,\n", + " input_id=\"market_data\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "185beb24", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Data Quality\n", + "Let's check quality of the data using outliers and missing data tests." + ] + }, + { + "cell_type": "markdown", + "id": "7f14464c", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Isolation Forest Outliers Test\n", + "Let's detects anomalies in the dataset using the Isolation Forest algorithm, visualized through scatter plots." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56c919ec", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"validmind.data_validation.IsolationForestOutliers\",\n", + " inputs={\n", + " \"dataset\": vm_market_data,\n", + " },\n", + " title=\"Outliers detection using Isolation Forest\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e4d0e5ca", + "metadata": {}, + "source": [ + "##### Missing Values Test\n", + "Let's evaluates dataset quality by ensuring the missing value ratio across all features does not exceed a set threshold." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e95c825f", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"validmind.data_validation.MissingValues\",\n", + " inputs={\n", + " \"dataset\": vm_market_data,\n", + " },\n", + " title=\"Missing Values detection\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "829403a3", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Model parameters\n", + "Let's calculate the model parameters using from stock data " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25936449", + "metadata": {}, + "outputs": [], + "source": [ + "option_params = get_option_parameters(ticker=ticker)" + ] + }, + { + "cell_type": "markdown", + "id": "0a0948b6", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Model development - Heston Option price" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e15b8221", + "metadata": {}, + "outputs": [], + "source": [ + "class HestonModel:\n", + "\n", + " def __init__(self, ticker, expiration_date_str, calculation_date, spot_price, dividend_rate, risk_free_rate):\n", + " self.ticker = ticker\n", + " self.expiration_date_str = expiration_date_str,\n", + " self.calculation_date = calculation_date\n", + " self.spot_price = spot_price\n", + " self.dividend_rate = dividend_rate\n", + " self.risk_free_rate = risk_free_rate\n", + " \n", + " def predict_option_price(self, strike, maturity_date, spot_price, v0=None, theta=None, kappa=None, sigma=None, rho=None):\n", + " # Set the evaluation date\n", + " ql.Settings.instance().evaluationDate = self.calculation_date\n", + "\n", + " # Construct the European Option\n", + " payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\n", + " exercise = ql.EuropeanExercise(maturity_date)\n", + " european_option = ql.VanillaOption(payoff, exercise)\n", + "\n", + " # Yield term structures for risk-free rate and dividend\n", + " riskFreeTS = ql.YieldTermStructureHandle(ql.FlatForward(calculation_date, self.risk_free_rate, ql.Actual365Fixed()))\n", + " dividendTS = ql.YieldTermStructureHandle(ql.FlatForward(calculation_date, self.dividend_rate, ql.Actual365Fixed()))\n", + "\n", + " # Initial stock price\n", + " initialValue = ql.QuoteHandle(ql.SimpleQuote(spot_price))\n", + "\n", + " # Heston process parameters\n", + " heston_process = ql.HestonProcess(riskFreeTS, dividendTS, initialValue, v0, kappa, theta, sigma, rho)\n", + " hestonModel = ql.HestonModel(heston_process)\n", + "\n", + " # Use the Heston analytic engine\n", + " engine = ql.AnalyticHestonEngine(hestonModel)\n", + " european_option.setPricingEngine(engine)\n", + "\n", + " # Calculate the Heston model price\n", + " h_price = european_option.NPV()\n", + "\n", + " return h_price\n", + "\n", + " def predict_american_option_price(self, strike, maturity_date, spot_price, v0=None, theta=None, kappa=None, sigma=None, rho=None):\n", + " # Set the evaluation date\n", + " ql.Settings.instance().evaluationDate = self.calculation_date\n", + "\n", + " # Construct the American Option\n", + " payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\n", + " exercise = ql.AmericanExercise(self.calculation_date, maturity_date)\n", + " american_option = ql.VanillaOption(payoff, exercise)\n", + "\n", + " # Yield term structures for risk-free rate and dividend\n", + " riskFreeTS = ql.YieldTermStructureHandle(ql.FlatForward(self.calculation_date, self.risk_free_rate, ql.Actual365Fixed()))\n", + " dividendTS = ql.YieldTermStructureHandle(ql.FlatForward(self.calculation_date, self.dividend_rate, ql.Actual365Fixed()))\n", + "\n", + " # Initial stock price\n", + " initialValue = ql.QuoteHandle(ql.SimpleQuote(spot_price))\n", + "\n", + " # Heston process parameters\n", + " heston_process = ql.HestonProcess(riskFreeTS, dividendTS, initialValue, v0, kappa, theta, sigma, rho)\n", + " heston_model = ql.HestonModel(heston_process)\n", + "\n", + "\n", + " payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\n", + " exercise = ql.AmericanExercise(self.calculation_date, maturity_date)\n", + " american_option = ql.VanillaOption(payoff, exercise)\n", + " heston_fd_engine = ql.FdHestonVanillaEngine(heston_model)\n", + " american_option.setPricingEngine(heston_fd_engine)\n", + " option_price = american_option.NPV()\n", + "\n", + " return option_price\n", + "\n", + " def objective_function(self, params, market_data, spot_price, dividend_rate, risk_free_rate):\n", + " v0, theta, kappa, sigma, rho = params\n", + "\n", + " # Sum of squared differences between market prices and model prices\n", + " error = 0.0\n", + " for i, row in market_data.iterrows():\n", + " model_price = self.predict_option_price(row['strike'], row['maturity_date'], spot_price, \n", + " v0, theta, kappa, sigma, rho)\n", + " error += (model_price - row['option_price']) ** 2\n", + " \n", + " return error\n", + "\n", + " def calibrate_model(self, ticker, expiration_date_str):\n", + " # Get the option market data dynamically from Yahoo Finance\n", + " market_data = get_market_data(ticker, expiration_date_str)\n", + "\n", + " # Initial guesses for Heston parameters\n", + " initial_params = [0.04, 0.04, 0.1, 0.1, -0.75]\n", + "\n", + " # Bounds for the parameters to ensure realistic values\n", + " bounds = [(0.0001, 1.0), # v0\n", + " (0.0001, 1.0), # theta\n", + " (0.001, 2.0), # kappa\n", + " (0.001, 1.0), # sigma\n", + " (-0.75, 0.0)] # rho\n", + "\n", + " # Optimize the parameters to minimize the error between model and market prices\n", + " result = minimize(self.objective_function, initial_params, args=(market_data, self.spot_price, self.dividend_rate, self.risk_free_rate),\n", + " bounds=bounds, method='L-BFGS-B')\n", + "\n", + " # Optimized Heston parameters\n", + " v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt = result.x\n", + "\n", + " return v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt\n" + ] + }, + { + "cell_type": "markdown", + "id": "a941aa32", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Model Calibration\n", + "* The calibration process aims to optimize the Heston model parameters (v0, theta, kappa, sigma, rho) by minimizing the difference between model-predicted option prices and observed market prices.\n", + "* In this implementation, the model is calibrated to current market data, specifically using option prices from the selected ticker and expiration date.\n", + "\n", + "Let's specify `calculation_date` and `strike_price` as input parameters for the model to verify its functionality and confirm it operates as expected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d61dfca", + "metadata": {}, + "outputs": [], + "source": [ + "calculation_date = ql.Date(26, 11, 2024)\n", + "# Convert expiration date string to QuantLib.Date\n", + "expiry_date_parts = list(map(int, expiration_date.split('-')))\n", + "maturity_date = ql.Date(expiry_date_parts[2], expiry_date_parts[1], expiry_date_parts[0])\n", + "strike_price = 460.0\n", + "\n", + "hm = HestonModel(\n", + " ticker=ticker,\n", + " expiration_date_str= expiration_date,\n", + " calculation_date= calculation_date,\n", + " spot_price= option_params['spot_price'],\n", + " dividend_rate = option_params['dividend_rate'],\n", + " risk_free_rate = option_params['risk_free_rate']\n", + ")\n", + "\n", + "# Let's calibrate model\n", + "v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt = hm.calibrate_model(ticker, expiration_date)\n", + "print(f\"Optimized Heston parameters: v0={v0_opt}, theta={theta_opt}, kappa={kappa_opt}, sigma={sigma_opt}, rho={rho_opt}\")\n", + "\n", + "\n", + "# option price\n", + "h_price = hm.predict_option_price(strike_price, maturity_date, option_params['spot_price'], v0_opt, theta_opt, kappa_opt, sigma_opt, rho_opt)\n", + "print(\"The Heston model price for the option is:\", h_price)" + ] + }, + { + "cell_type": "markdown", + "id": "75313272", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Model Evaluation" + ] + }, + { + "cell_type": "markdown", + "id": "2e6471ef", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Benchmark Testing\n", + "The benchmark testing framework provides a robust way to validate the Heston model implementation and understand the relationships between European and American option prices under stochastic volatility conditions.\n", + "Let's compares European and American option prices using the Heston model." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "810cf887", + "metadata": {}, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.BenchmarkTest\")\n", + "def benchmark_test(hm_model, strikes, maturity_date, spot_price, v0=None, theta=None, kappa=None, sigma=None, rho=None):\n", + " \"\"\"\n", + " Compares European and American option prices using the Heston model.\n", + "\n", + " This test evaluates the price differences between European and American options\n", + " across multiple strike prices while keeping other parameters constant. The comparison\n", + " helps understand the early exercise premium of American options over their European\n", + " counterparts under stochastic volatility conditions.\n", + "\n", + " Args:\n", + " hm_model: HestonModel instance for option pricing calculations\n", + " strikes (list[float]): List of strike prices to test\n", + " maturity_date (ql.Date): Option expiration date in QuantLib format\n", + " spot_price (float): Current price of the underlying asset\n", + " v0 (float, optional): Initial variance. Defaults to None.\n", + " theta (float, optional): Long-term variance. Defaults to None.\n", + " kappa (float, optional): Mean reversion rate. Defaults to None.\n", + " sigma (float, optional): Volatility of variance. Defaults to None.\n", + " rho (float, optional): Correlation between asset and variance. Defaults to None.\n", + "\n", + " Returns:\n", + " dict: Contains a DataFrame with the following columns:\n", + " - Strike: Strike prices tested\n", + " - Maturity date: Expiration date for all options\n", + " - Spot price: Current underlying price\n", + " - european model price: Prices for European options\n", + " - american model price: Prices for American options\n", + "\"\"\"\n", + " american_derived_prices = []\n", + " european_derived_prices = []\n", + " for K in strikes:\n", + " european_derived_prices.append(hm_model.predict_option_price(K, maturity_date, spot_price, v0, theta, kappa, sigma, rho))\n", + " american_derived_prices.append(hm_model.predict_american_option_price(K, maturity_date, spot_price, v0, theta, kappa, sigma, rho))\n", + "\n", + " data = {\n", + " \"Strike\": strikes,\n", + " \"Maturity date\": [maturity_date] * len(strikes),\n", + " \"Spot price\": [spot_price] * len(strikes),\n", + " \"european model price\": european_derived_prices,\n", + " \"american model price\": american_derived_prices,\n", + "\n", + " }\n", + " df1 = pd.DataFrame(data)\n", + " return {\"strikes variation benchmarking\": df1}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fdd6705", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_custom_tests.BenchmarkTest\",\n", + " params={\n", + " \"hm_model\": hm,\n", + " \"strikes\": [400, 425, 460, 495, 520],\n", + " \"maturity_date\": maturity_date,\n", + " \"spot_price\": option_params['spot_price'],\n", + " \"v0\":v0_opt,\n", + " \"theta\": theta_opt,\n", + " \"kappa\":kappa_opt ,\n", + " \"sigma\": sigma_opt,\n", + " \"rho\":rho_opt\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "e359b503", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Sensitivity Testing\n", + "The sensitivity testing framework provides a systematic approach to understanding how the Heston model responds to parameter changes, which is crucial for both model validation and practical application in trading and risk management." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51922313", + "metadata": {}, + "outputs": [], + "source": [ + "@vm.test(\"my_test_provider.Sensitivity\")\n", + "def SensitivityTest(\n", + " model,\n", + " strike_price,\n", + " maturity_date,\n", + " spot_price,\n", + " v0_opt,\n", + " theta_opt,\n", + " kappa_opt,\n", + " sigma_opt,\n", + " rho_opt,\n", + "):\n", + " \"\"\"\n", + " Evaluates the sensitivity of American option prices to changes in model parameters.\n", + "\n", + " This test calculates option prices using the Heston model with optimized parameters.\n", + " It's designed to analyze how changes in various model inputs affect the option price,\n", + " which is crucial for understanding model behavior and risk management.\n", + "\n", + " Args:\n", + " model (HestonModel): Initialized Heston model instance wrapped in ValidMind model object\n", + " strike_price (float): Strike price of the option\n", + " maturity_date (ql.Date): Expiration date of the option in QuantLib format\n", + " spot_price (float): Current price of the underlying asset\n", + " v0_opt (float): Optimized initial variance parameter\n", + " theta_opt (float): Optimized long-term variance parameter\n", + " kappa_opt (float): Optimized mean reversion rate parameter\n", + " sigma_opt (float): Optimized volatility of variance parameter\n", + " rho_opt (float): Optimized correlation parameter between asset price and variance\n", + " \"\"\"\n", + " price = model.model.predict_american_option_price(\n", + " strike_price,\n", + " maturity_date,\n", + " spot_price,\n", + " v0_opt,\n", + " theta_opt,\n", + " kappa_opt,\n", + " sigma_opt,\n", + " rho_opt,\n", + " )\n", + "\n", + " return price\n" + ] + }, + { + "cell_type": "markdown", + "id": "408a05ef", + "metadata": {}, + "source": [ + "##### Common plot function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "104ca6dd", + "metadata": {}, + "outputs": [], + "source": [ + "def plot_results(df, params: dict = None):\n", + " fig2 = plt.figure(figsize=(10, 6))\n", + " plt.plot(df[params[\"x\"]], df[params[\"y\"]], label=params[\"label\"])\n", + " plt.xlabel(params[\"xlabel\"])\n", + " plt.ylabel(params[\"ylabel\"])\n", + " \n", + " plt.title(params[\"title\"])\n", + " plt.legend()\n", + " plt.grid(True)\n", + " plt.show() # close the plot to avoid displaying it" + ] + }, + { + "cell_type": "markdown", + "id": "ca72b9e5", + "metadata": {}, + "source": [ + "Let's create ValidMind model object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae7093fa", + "metadata": {}, + "outputs": [], + "source": [ + "hm_model = vm.init_model(model=hm, input_id=\"HestonModel\")" + ] + }, + { + "cell_type": "markdown", + "id": "b2141640", + "metadata": {}, + "source": [ + "##### Strike sensitivity\n", + "Let's analyzes how option prices change as the strike price varies. We create a range of strike prices around the current strike (460) and observe the impact on option prices while keeping all other parameters constant." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea7f1cbe", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_test_provider.Sensitivity:ToStrike\",\n", + " inputs = {\n", + " \"model\": hm_model\n", + " },\n", + " param_grid={\n", + " \"strike_price\": list(np.linspace(460-50, 460+50, 10)),\n", + " \"maturity_date\": [maturity_date],\n", + " \"spot_price\": [option_params[\"spot_price\"]],\n", + " \"v0_opt\": [v0_opt],\n", + " \"theta_opt\": [theta_opt],\n", + " \"kappa_opt\": [kappa_opt],\n", + " \"sigma_opt\": [sigma_opt],\n", + " \"rho_opt\":[rho_opt]\n", + " },\n", + ")\n", + "result.log()\n", + "# Visualize how option prices change with different strike prices\n", + "plot_results(\n", + " pd.DataFrame(result.tables[0].data),\n", + " params={\n", + " \"x\": \"strike_price\",\n", + " \"y\":\"Value\",\n", + " \"label\":\"Strike price\",\n", + " \"xlabel\":\"Strike price\",\n", + " \"ylabel\":\"option price\",\n", + " \"title\":\"Heston option - Strike price Sensitivity\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "be143012", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Stress Testing\n", + "This stress testing framework provides a comprehensive view of how the Heston model behaves under different market conditions and helps identify potential risks in option pricing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2f01a40", + "metadata": {}, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.Stressing\")\n", + "def StressTest(\n", + " model,\n", + " strike_price,\n", + " maturity_date,\n", + " spot_price,\n", + " v0_opt,\n", + " theta_opt,\n", + " kappa_opt,\n", + " sigma_opt,\n", + " rho_opt,\n", + "):\n", + " \"\"\"\n", + " Performs stress testing on Heston model parameters to evaluate option price sensitivity.\n", + "\n", + " This test evaluates how the American option price responds to stressed market conditions\n", + " by varying key model parameters. It's designed to:\n", + " 1. Identify potential model vulnerabilities\n", + " 2. Understand price behavior under extreme scenarios\n", + " 3. Support risk management decisions\n", + " 4. Validate model stability across parameter ranges\n", + "\n", + " Args:\n", + " model (HestonModel): Initialized Heston model instance wrapped in ValidMind model object\n", + " strike_price (float): Option strike price\n", + " maturity_date (ql.Date): Option expiration date in QuantLib format\n", + " spot_price (float): Current price of the underlying asset\n", + " v0_opt (float): Initial variance parameter under stress testing\n", + " theta_opt (float): Long-term variance parameter under stress testing\n", + " kappa_opt (float): Mean reversion rate parameter under stress testing\n", + " sigma_opt (float): Volatility of variance parameter under stress testing\n", + " rho_opt (float): Correlation parameter under stress testing\n", + " \"\"\"\n", + " price = model.model.predict_american_option_price(\n", + " strike_price,\n", + " maturity_date,\n", + " spot_price,\n", + " v0_opt,\n", + " theta_opt,\n", + " kappa_opt,\n", + " sigma_opt,\n", + " rho_opt,\n", + " )\n", + "\n", + " return price\n" + ] + }, + { + "cell_type": "markdown", + "id": "31fcbe9c", + "metadata": {}, + "source": [ + "##### Rho (correlation) and Theta (long term vol) stress test\n", + "Next, let's evaluates the sensitivity of a model's output to changes in the correlation parameter (rho) and the long-term variance parameter (theta) within a stochastic volatility framework." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6119b5d9", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_custom_tests.Stressing:TheRhoAndThetaParameters\",\n", + " inputs = {\n", + " \"model\": hm_model,\n", + " },\n", + " param_grid={\n", + " \"strike_price\": [460],\n", + " \"maturity_date\": [maturity_date],\n", + " \"spot_price\": [option_params[\"spot_price\"]],\n", + " \"v0_opt\": [v0_opt],\n", + " \"theta_opt\": list(np.linspace(0.1, theta_opt+0.4, 5)),\n", + " \"kappa_opt\": [kappa_opt],\n", + " \"sigma_opt\": [sigma_opt],\n", + " \"rho_opt\":list(np.linspace(rho_opt-0.2, rho_opt+0.2, 5))\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "be39cb3a", + "metadata": {}, + "source": [ + "##### Sigma stress test\n", + "Let's evaluates the sensitivity of a model's output to changes in the volatility parameter, sigma. This test is crucial for understanding how variations in market volatility impact the model's valuation of financial instruments, particularly options." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0dc189b7", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_custom_tests.Stressing:TheSigmaParameter\",\n", + " inputs = {\n", + " \"model\": hm_model,\n", + " },\n", + " param_grid={\n", + " \"strike_price\": [460],\n", + " \"maturity_date\": [maturity_date],\n", + " \"spot_price\": [option_params[\"spot_price\"]],\n", + " \"v0_opt\": [v0_opt],\n", + " \"theta_opt\": [theta_opt],\n", + " \"kappa_opt\": [kappa_opt],\n", + " \"sigma_opt\": list(np.linspace(0.1, sigma_opt+0.6, 5)),\n", + " \"rho_opt\": [rho_opt]\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "173a5294", + "metadata": {}, + "source": [ + "##### Stress kappa\n", + "Let's evaluates the sensitivity of a model's output to changes in the kappa parameter, which is a mean reversion rate in stochastic volatility models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dae9714f", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_custom_tests.Stressing:TheKappaParameter\",\n", + " inputs = {\n", + " \"model\": hm_model,\n", + " },\n", + " param_grid={\n", + " \"strike_price\": [460],\n", + " \"maturity_date\": [maturity_date],\n", + " \"spot_price\": [option_params[\"spot_price\"]],\n", + " \"v0_opt\": [v0_opt],\n", + " \"theta_opt\": [theta_opt],\n", + " \"kappa_opt\": list(np.linspace(kappa_opt, kappa_opt+0.2, 5)),\n", + " \"sigma_opt\": [sigma_opt],\n", + " \"rho_opt\": [rho_opt]\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "b4d1d968", + "metadata": {}, + "source": [ + "##### Stress theta\n", + "Let's evaluates the sensitivity of a model's output to changes in the parameter theta, which represents the long-term variance in a stochastic volatility model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e68df3db", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_custom_tests.Stressing:TheThetaParameter\",\n", + " inputs = {\n", + " \"model\": hm_model,\n", + " },\n", + " param_grid={\n", + " \"strike_price\": [460],\n", + " \"maturity_date\": [maturity_date],\n", + " \"spot_price\": [option_params[\"spot_price\"]],\n", + " \"v0_opt\": [v0_opt],\n", + " \"theta_opt\": list(np.linspace(0.1, theta_opt+0.9, 5)),\n", + " \"kappa_opt\": [kappa_opt],\n", + " \"sigma_opt\": [sigma_opt],\n", + " \"rho_opt\": [rho_opt]\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "32e70456", + "metadata": {}, + "source": [ + "##### Stress rho\n", + "Let's evaluates the sensitivity of a model's output to changes in the correlation parameter, rho, within a stochastic volatility (SV) model framework. This test is crucial for understanding how variations in rho, which represents the correlation between the asset price and its volatility, impact the model's valuation output." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b5ca3fc2", + "metadata": {}, + "outputs": [], + "source": [ + "result = run_test(\n", + " \"my_custom_tests.Stressing:TheRhoParameter\",\n", + " inputs = {\n", + " \"model\": hm_model,\n", + " },\n", + " param_grid={\n", + " \"strike_price\": [460],\n", + " \"maturity_date\": [maturity_date],\n", + " \"spot_price\": [option_params[\"spot_price\"]],\n", + " \"v0_opt\": [v0_opt],\n", + " \"theta_opt\": [theta_opt],\n", + " \"kappa_opt\": [kappa_opt],\n", + " \"sigma_opt\": [sigma_opt],\n", + " \"rho_opt\": list(np.linspace(rho_opt-0.2, rho_opt+0.2, 5))\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "id": "892c5347", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps\n", + "\n", + "You can look at the results of this test suite right in the notebook where you ran the code, as you would expect. But there is a better way — use the ValidMind Platform to work with your model documentation.\n", + "\n", + "\n", + "\n", + "### Work with your model documentation\n", + "\n", + "1. From the **Model Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", + "\n", + "2. Click and expand the **Model Development** section.\n", + "\n", + "What you see is the full draft of your model documentation in a more easily consumable version. From here, you can make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. [Learn more ...](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)\n", + "\n", + "\n", + "\n", + "### Discover more learning resources\n", + "\n", + "We offer many interactive notebooks to help you document models:\n", + "\n", + "- [Run tests & test suites](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", + "- [Code samples](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", + "\n", + "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." + ] + }, + { + "cell_type": "markdown", + "id": "copyright-de5d1e182b09403abddabc2850f2dd05", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "validmind-1QuffXMV-py3.10", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb b/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb index 91a44e0c7..99ee1abf6 100644 --- a/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb +++ b/notebooks/use_cases/code_explainer/quickstart_code_explainer_demo.ipynb @@ -1,873 +1,874 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Quickstart for model code documentation\n", - "\n", - "Welcome! This notebook demonstrates how to use the ValidMind code explainer to automatically generate comprehensive documentation for your codebase. The code explainer analyzes your source code and provides detailed explanations across various aspects of your implementation.\n", - "\n", - "\n", - "\n", - "## About Code Explainer\n", - "The ValidMind code explainer is a powerful tool that automatically analyzes your source code and generates comprehensive documentation. It helps you:\n", - "\n", - "- Understand the structure and organization of your codebase\n", - "- Document dependencies and environment setup\n", - "- Explain data processing and model implementation details\n", - "- Document training, evaluation, and inference pipelines\n", - "- Track configuration, testing, and security measures\n", - "\n", - "This tool is particularly useful for:\n", - "- Onboarding new team members\n", - "- Maintaining up-to-date documentation\n", - "- Ensuring code quality and best practices\n", - "- Facilitating code reviews and audits" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About Code Explainer](#toc1__) \n", - "- [About ValidMind](#toc2__) \n", - " - [Before you begin](#toc2_1__) \n", - " - [New to ValidMind?](#toc2_2__) \n", - " - [Key concepts](#toc2_3__) \n", - "- [Setting up](#toc3__) \n", - " - [Install the ValidMind Library](#toc3_1__) \n", - " - [Initialize the ValidMind Library](#toc3_2__) \n", - " - [Register sample model](#toc3_2_1__) \n", - " - [Apply documentation template](#toc3_2_2__) \n", - " - [Get your code snippet](#toc3_2_3__) \n", - " - [Preview the documentation template](#toc3_3__) \n", - "- [Common function](#toc4__) \n", - "- [Default Behavior](#toc5__) \n", - "- [Codebase Overview](#toc6__) \n", - "- [Environment and Dependencies ('environment_setup')](#toc7__) \n", - "- [Data Ingestion and Preprocessing](#toc8__) \n", - "- [Model Implementation Details](#toc9__) \n", - "- [Model Training Pipeline](#toc10__) \n", - "- [Evaluation and Validation Code](#toc11__) \n", - "- [Inference and Scoring Logic](#toc12__) \n", - "- [Configuration and Parameters](#toc13__) \n", - "- [Unit and Integration Testing](#toc14__) \n", - "- [Logging and Monitoring Hooks](#toc15__) \n", - "- [Code and Model Versioning](#toc16__) \n", - "- [Security and Access Control](#toc17__) \n", - "- [Example Runs and Scripts](#toc18__) \n", - "- [Known Issues and Future Improvements](#toc19__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", - "\n", - "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", - "\n", - "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", - "\n", - "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", - "\n", - "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", - "\n", - " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", - " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", - " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", - "\n", - "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", - "\n", - "**Outputs**: Custom tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", - "\n", - "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", - "\n", - "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Model Source Code Documentation`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
Can't select this template?\n", - "

\n", - "Your organization administrators may need to add it to your template library:\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " # api_host=\"...\",\n", - " # api_key=\"...\",\n", - " # api_secret=\"...\",\n", - " # model=\"...\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Preview the documentation template\n", - "\n", - "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model.\n", - "\n", - "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm.preview_template()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Common function\n", - "The code above defines two key functions:\n", - "1. A function to read source code from 'customer_churn_full_suite.py' file\n", - "2. An 'explain_code' function that uses ValidMind's experimental agents to analyze and explain code." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "source_code=\"\"\n", - "with open(\"customer_churn_full_suite.py\", \"r\") as f:\n", - " source_code = f.read()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `vm.experimental.agents.run_task` function is used to execute AI agent tasks.\n", - "\n", - "It requires:\n", - "- task: The type of task to run (e.g. `code_explainer`)\n", - "- input: A dictionary containing task-specific parameters\n", - " - For `code_explainer`, this includes:\n", - " - **source_code** (str): The code to be analyzed\n", - " - **user_instructions** (str): Instructions for how to analyze the code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def explain_code(content_id: str, user_instructions: str):\n", - " \"\"\"Run code explanation task and log the results.\n", - " By default, the code explainer includes sections for:\n", - " - Main Purpose and Overall Functionality\n", - " - Breakdown of Key Functions or Components\n", - " - Potential Risks or Failure Points \n", - " - Assumptions or Limitations\n", - " If you want default sections, specify user_instructions as an empty string.\n", - " \n", - " Args:\n", - " user_instructions (str): Instructions for how to analyze the code\n", - " content_id (str): ID to use when logging the results\n", - " \n", - " Returns:\n", - " The result object from running the code explanation task\n", - " \"\"\"\n", - " result = vm.experimental.agents.run_task(\n", - " task=\"code_explainer\",\n", - " input={\n", - " \"source_code\": source_code,\n", - " \"user_instructions\": user_instructions\n", - " }\n", - " )\n", - " result.log(content_id=content_id)\n", - " return result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Default Behavior" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By default, the code explainer includes sections for:\n", - "- Main Purpose and Overall Functionality\n", - "- Breakdown of Key Functions or Components\n", - "- Potential Risks or Failure Points \n", - "- Assumptions or Limitations\n", - "\n", - "If you want default sections, specify `user_instructions` as an empty string. For example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = vm.experimental.agents.run_task(\n", - " task=\"code_explainer\",\n", - " input={\n", - " \"source_code\": source_code,\n", - " \"user_instructions\": \"\"\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Codebase Overview\n", - "\n", - "Let's analyze your codebase structure to understand the main modules, components, entry points and their relationships. We'll also examine the technology stack and frameworks that are being utilized in the implementation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Describe the overall structure of the source code repository.\n", - " - Identify main modules, folders, and scripts.\n", - " - Highlight entry points for training, inference, and evaluation.\n", - " - State the main programming languages and frameworks used.\n", - " \"\"\",\n", - " content_id=\"code_structure_summary\"\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\",\n", - " content_id=\"code_structure_summary\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Environment and Dependencies ('environment_setup')\n", - "Let's document the technical requirements and setup needed to run your code, including Python packages, system dependencies, and environment configuration files. Understanding these requirements is essential for proper development environment setup and consistent deployments across different environments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - List Python packages and system dependencies (OS, compilers, etc.).\n", - " - Reference environment files (requirements.txt, environment.yml, Dockerfile).\n", - " - Include setup instructions using Conda, virtualenv, or containers.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"setup_instructions\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Data Ingestion and Preprocessing\n", - "Let's document how your code handles data, including data sources, validation procedures, and preprocessing steps. We'll examine the data pipeline architecture, covering everything from initial data loading through feature engineering and quality checks." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Specify data input formats and sources.\n", - " - Document ingestion, validation, and transformation logic.\n", - " - Explain how raw data is preprocessed and features are generated.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections. \"\"\",\n", - " content_id=\"data_handling_notes\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " \n", - "\n", - "\n", - "\n", - "## Model Implementation Details\n", - "Let's document the core implementation details of your model, including its architecture, components, and key algorithms. Understanding the technical implementation is crucial for maintenance, debugging, and future improvements to the codebase. We'll examine how theoretical concepts are translated into working code." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Describe the core model code structure (classes, functions).\n", - " - Link code to theoretical models or equations when applicable.\n", - " - Note custom components like loss functions or feature selectors.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"model_code_description\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Model Training Pipeline\n", - "\n", - "Let's document the training pipeline implementation, including how models are trained, optimized and evaluated. We'll examine the training process workflow, hyperparameter tuning approach, and model checkpointing mechanisms. This section provides insights into how the model learns from data and achieves optimal performance." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Explain the training process, optimization strategy, and hyperparameters.\n", - " - Describe logging, checkpointing, and early stopping mechanisms.\n", - " - Include references to training config files or tuning logic.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"training_logic_details\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Evaluation and Validation Code\n", - "Let's examine how the model's validation and evaluation code is implemented, including the metrics calculation and validation processes. We'll explore the diagnostic tools and visualization methods used to assess model performance. This section will also cover how validation results are logged and stored for future reference." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Describe how validation is implemented and metrics are calculated.\n", - " - Include plots and diagnostic tools (e.g., ROC, SHAP, confusion matrix).\n", - " - State how outputs are logged and persisted.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"evaluation_logic_notes\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Inference and Scoring Logic\n", - "Let's examine how the model performs inference and scoring on new data. This section will cover the implementation details of loading trained models, making predictions, and any required pre/post-processing steps. We'll also look at the APIs and interfaces available for both real-time serving and batch scoring scenarios." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Detail how the trained model is loaded and used for predictions.\n", - " - Explain I/O formats and APIs for serving or batch scoring.\n", - " - Include any preprocessing/postprocessing logic required.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"inference_mechanism\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Configuration and Parameters\n", - "Let's explore how configuration and parameters are managed in the codebase. We'll examine the configuration files, command-line arguments, environment variables, and other mechanisms used to control model behavior. This section will also cover parameter versioning and how different configurations are tracked across model iterations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Describe configuration management (files, CLI args, env vars).\n", - " - Highlight default parameters and override mechanisms.\n", - " - Reference versioning practices for config files.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"config_control_notes\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Unit and Integration Testing\n", - "Let's examine the testing strategy and implementation in the codebase. We'll analyze the unit tests, integration tests, and testing frameworks used to ensure code quality and reliability. This section will also cover test coverage metrics and continuous integration practices." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - List unit and integration tests and what they cover.\n", - " - Mention testing frameworks and coverage tools used.\n", - " - Explain testing strategy for production-readiness.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"test_strategy_overview\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Logging and Monitoring Hooks\n", - "Let's analyze how logging and monitoring are implemented in the codebase. We'll examine the logging configuration, monitoring hooks, and key metrics being tracked. This section will also cover any real-time observability integrations and alerting mechanisms in place." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Describe logging configuration and structure.\n", - " - Highlight real-time monitoring or observability integrations.\n", - " - List key events, metrics, or alerts tracked.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"logging_monitoring_notes\"\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Code and Model Versioning\n", - "Let's examine how code and model versioning is managed in the codebase. This section will cover version control practices, including Git workflows and model artifact versioning tools like DVC or MLflow. We'll also look at how versioning integrates with the CI/CD pipeline." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Describe Git usage, branching, tagging, and commit standards.\n", - " - Include model artifact versioning practices (e.g., DVC, MLflow).\n", - " - Reference any automation in CI/CD.\n", - " Please remove the following sections: \n", - " - Potential Risks or Failure Points\n", - " - Assumptions or Limitations\n", - " - Breakdown of Key Functions or Components\n", - " Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"version_tracking_description\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Security and Access Control\n", - "Let's analyze the security and access control measures implemented in the codebase. We'll examine how sensitive data and code are protected through access controls, encryption, and compliance measures. Additionally, we'll review secure deployment practices and any specific handling of PII data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Document access controls for source code and data.\n", - " - Include any encryption, PII handling, or compliance measures.\n", - " - Mention secure deployment practices.\n", - " Please remove the following sections: \n", - " - Potential Risks or Failure Points\n", - " - Assumptions or Limitations\n", - " - Breakdown of Key Functions or Components\n", - " Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"security_policies_notes\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Example Runs and Scripts\n", - "Let's explore example runs and scripts that demonstrate how to use this codebase in practice. We'll look at working examples, command-line usage, and sample notebooks that showcase the core functionality. This section will also point to demo datasets and test scenarios that can help new users get started quickly." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - Provide working script examples.\n", - " - Include CLI usage instructions or sample notebooks.\n", - " - Link to demo datasets or test scenarios.\n", - " Please remove the following sections: \n", - " - Potential Risks or Failure Points\n", - " - Assumptions or Limitations\n", - " - Breakdown of Key Functions or Components\n", - " Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"runnable_examples\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "## Known Issues and Future Improvements\n", - "Let's examine the current limitations and areas for improvement in the codebase. This section will document known technical debt, bugs, and feature gaps that need to be addressed. We'll also outline proposed enhancements and reference any existing tickets or GitHub issues tracking these improvements." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = explain_code(\n", - " user_instructions=\"\"\"\n", - " Please provide a summary of the following bullet points only.\n", - " - List current limitations or technical debt.\n", - " - Outline proposed enhancements or refactors.\n", - " - Reference relevant tickets, GitHub issues, or roadmap items.\n", - " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", - " \"\"\",\n", - " content_id=\"issues_and_improvements_log\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "copyright-ccbede139a26452183291a108b791513", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "validmind-1QuffXMV-py3.11", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Quickstart for model code documentation\n", + "\n", + "Welcome! This notebook demonstrates how to use the ValidMind code explainer to automatically generate comprehensive documentation for your codebase. The code explainer analyzes your source code and provides detailed explanations across various aspects of your implementation.\n", + "\n", + "\n", + "\n", + "## About Code Explainer\n", + "The ValidMind code explainer is a powerful tool that automatically analyzes your source code and generates comprehensive documentation. It helps you:\n", + "\n", + "- Understand the structure and organization of your codebase\n", + "- Document dependencies and environment setup\n", + "- Explain data processing and model implementation details\n", + "- Document training, evaluation, and inference pipelines\n", + "- Track configuration, testing, and security measures\n", + "\n", + "This tool is particularly useful for:\n", + "- Onboarding new team members\n", + "- Maintaining up-to-date documentation\n", + "- Ensuring code quality and best practices\n", + "- Facilitating code reviews and audits" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About Code Explainer](#toc1__) \n", + "- [About ValidMind](#toc2__) \n", + " - [Before you begin](#toc2_1__) \n", + " - [New to ValidMind?](#toc2_2__) \n", + " - [Key concepts](#toc2_3__) \n", + "- [Setting up](#toc3__) \n", + " - [Install the ValidMind Library](#toc3_1__) \n", + " - [Initialize the ValidMind Library](#toc3_2__) \n", + " - [Register sample model](#toc3_2_1__) \n", + " - [Apply documentation template](#toc3_2_2__) \n", + " - [Get your code snippet](#toc3_2_3__) \n", + " - [Preview the documentation template](#toc3_3__) \n", + "- [Common function](#toc4__) \n", + "- [Default Behavior](#toc5__) \n", + "- [Codebase Overview](#toc6__) \n", + "- [Environment and Dependencies ('environment_setup')](#toc7__) \n", + "- [Data Ingestion and Preprocessing](#toc8__) \n", + "- [Model Implementation Details](#toc9__) \n", + "- [Model Training Pipeline](#toc10__) \n", + "- [Evaluation and Validation Code](#toc11__) \n", + "- [Inference and Scoring Logic](#toc12__) \n", + "- [Configuration and Parameters](#toc13__) \n", + "- [Unit and Integration Testing](#toc14__) \n", + "- [Logging and Monitoring Hooks](#toc15__) \n", + "- [Code and Model Versioning](#toc16__) \n", + "- [Security and Access Control](#toc17__) \n", + "- [Example Runs and Scripts](#toc18__) \n", + "- [Known Issues and Future Improvements](#toc19__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/tests/run_tests/configure_tests/run_tests_that_require_multiple_datasets.html) for more information.\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Model Source Code Documentation`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Can't select this template?\n", + "

\n", + "Your organization administrators may need to add it to your template library:\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Preview the documentation template\n", + "\n", + "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model.\n", + "\n", + "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Common function\n", + "The code above defines two key functions:\n", + "1. A function to read source code from 'customer_churn_full_suite.py' file\n", + "2. An 'explain_code' function that uses ValidMind's experimental agents to analyze and explain code." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "source_code=\"\"\n", + "with open(\"customer_churn_full_suite.py\", \"r\") as f:\n", + " source_code = f.read()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `vm.experimental.agents.run_task` function is used to execute AI agent tasks.\n", + "\n", + "It requires:\n", + "- task: The type of task to run (e.g. `code_explainer`)\n", + "- input: A dictionary containing task-specific parameters\n", + " - For `code_explainer`, this includes:\n", + " - **source_code** (str): The code to be analyzed\n", + " - **user_instructions** (str): Instructions for how to analyze the code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def explain_code(content_id: str, user_instructions: str):\n", + " \"\"\"Run code explanation task and log the results.\n", + " By default, the code explainer includes sections for:\n", + " - Main Purpose and Overall Functionality\n", + " - Breakdown of Key Functions or Components\n", + " - Potential Risks or Failure Points \n", + " - Assumptions or Limitations\n", + " If you want default sections, specify user_instructions as an empty string.\n", + " \n", + " Args:\n", + " user_instructions (str): Instructions for how to analyze the code\n", + " content_id (str): ID to use when logging the results\n", + " \n", + " Returns:\n", + " The result object from running the code explanation task\n", + " \"\"\"\n", + " result = vm.experimental.agents.run_task(\n", + " task=\"code_explainer\",\n", + " input={\n", + " \"source_code\": source_code,\n", + " \"user_instructions\": user_instructions\n", + " }\n", + " )\n", + " result.log(content_id=content_id)\n", + " return result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Default Behavior" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default, the code explainer includes sections for:\n", + "- Main Purpose and Overall Functionality\n", + "- Breakdown of Key Functions or Components\n", + "- Potential Risks or Failure Points \n", + "- Assumptions or Limitations\n", + "\n", + "If you want default sections, specify `user_instructions` as an empty string. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.experimental.agents.run_task(\n", + " task=\"code_explainer\",\n", + " input={\n", + " \"source_code\": source_code,\n", + " \"user_instructions\": \"\"\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Codebase Overview\n", + "\n", + "Let's analyze your codebase structure to understand the main modules, components, entry points and their relationships. We'll also examine the technology stack and frameworks that are being utilized in the implementation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Describe the overall structure of the source code repository.\n", + " - Identify main modules, folders, and scripts.\n", + " - Highlight entry points for training, inference, and evaluation.\n", + " - State the main programming languages and frameworks used.\n", + " \"\"\",\n", + " content_id=\"code_structure_summary\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\",\n", + " content_id=\"code_structure_summary\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Environment and Dependencies ('environment_setup')\n", + "Let's document the technical requirements and setup needed to run your code, including Python packages, system dependencies, and environment configuration files. Understanding these requirements is essential for proper development environment setup and consistent deployments across different environments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - List Python packages and system dependencies (OS, compilers, etc.).\n", + " - Reference environment files (requirements.txt, environment.yml, Dockerfile).\n", + " - Include setup instructions using Conda, virtualenv, or containers.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"setup_instructions\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Data Ingestion and Preprocessing\n", + "Let's document how your code handles data, including data sources, validation procedures, and preprocessing steps. We'll examine the data pipeline architecture, covering everything from initial data loading through feature engineering and quality checks." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Specify data input formats and sources.\n", + " - Document ingestion, validation, and transformation logic.\n", + " - Explain how raw data is preprocessed and features are generated.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections. \"\"\",\n", + " content_id=\"data_handling_notes\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " \n", + "\n", + "\n", + "\n", + "## Model Implementation Details\n", + "Let's document the core implementation details of your model, including its architecture, components, and key algorithms. Understanding the technical implementation is crucial for maintenance, debugging, and future improvements to the codebase. We'll examine how theoretical concepts are translated into working code." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Describe the core model code structure (classes, functions).\n", + " - Link code to theoretical models or equations when applicable.\n", + " - Note custom components like loss functions or feature selectors.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"model_code_description\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Model Training Pipeline\n", + "\n", + "Let's document the training pipeline implementation, including how models are trained, optimized and evaluated. We'll examine the training process workflow, hyperparameter tuning approach, and model checkpointing mechanisms. This section provides insights into how the model learns from data and achieves optimal performance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Explain the training process, optimization strategy, and hyperparameters.\n", + " - Describe logging, checkpointing, and early stopping mechanisms.\n", + " - Include references to training config files or tuning logic.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"training_logic_details\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Evaluation and Validation Code\n", + "Let's examine how the model's validation and evaluation code is implemented, including the metrics calculation and validation processes. We'll explore the diagnostic tools and visualization methods used to assess model performance. This section will also cover how validation results are logged and stored for future reference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Describe how validation is implemented and metrics are calculated.\n", + " - Include plots and diagnostic tools (e.g., ROC, SHAP, confusion matrix).\n", + " - State how outputs are logged and persisted.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"evaluation_logic_notes\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Inference and Scoring Logic\n", + "Let's examine how the model performs inference and scoring on new data. This section will cover the implementation details of loading trained models, making predictions, and any required pre/post-processing steps. We'll also look at the APIs and interfaces available for both real-time serving and batch scoring scenarios." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Detail how the trained model is loaded and used for predictions.\n", + " - Explain I/O formats and APIs for serving or batch scoring.\n", + " - Include any preprocessing/postprocessing logic required.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"inference_mechanism\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Configuration and Parameters\n", + "Let's explore how configuration and parameters are managed in the codebase. We'll examine the configuration files, command-line arguments, environment variables, and other mechanisms used to control model behavior. This section will also cover parameter versioning and how different configurations are tracked across model iterations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Describe configuration management (files, CLI args, env vars).\n", + " - Highlight default parameters and override mechanisms.\n", + " - Reference versioning practices for config files.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"config_control_notes\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Unit and Integration Testing\n", + "Let's examine the testing strategy and implementation in the codebase. We'll analyze the unit tests, integration tests, and testing frameworks used to ensure code quality and reliability. This section will also cover test coverage metrics and continuous integration practices." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - List unit and integration tests and what they cover.\n", + " - Mention testing frameworks and coverage tools used.\n", + " - Explain testing strategy for production-readiness.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"test_strategy_overview\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Logging and Monitoring Hooks\n", + "Let's analyze how logging and monitoring are implemented in the codebase. We'll examine the logging configuration, monitoring hooks, and key metrics being tracked. This section will also cover any real-time observability integrations and alerting mechanisms in place." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Describe logging configuration and structure.\n", + " - Highlight real-time monitoring or observability integrations.\n", + " - List key events, metrics, or alerts tracked.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"logging_monitoring_notes\"\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Code and Model Versioning\n", + "Let's examine how code and model versioning is managed in the codebase. This section will cover version control practices, including Git workflows and model artifact versioning tools like DVC or MLflow. We'll also look at how versioning integrates with the CI/CD pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Describe Git usage, branching, tagging, and commit standards.\n", + " - Include model artifact versioning practices (e.g., DVC, MLflow).\n", + " - Reference any automation in CI/CD.\n", + " Please remove the following sections: \n", + " - Potential Risks or Failure Points\n", + " - Assumptions or Limitations\n", + " - Breakdown of Key Functions or Components\n", + " Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"version_tracking_description\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Security and Access Control\n", + "Let's analyze the security and access control measures implemented in the codebase. We'll examine how sensitive data and code are protected through access controls, encryption, and compliance measures. Additionally, we'll review secure deployment practices and any specific handling of PII data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Document access controls for source code and data.\n", + " - Include any encryption, PII handling, or compliance measures.\n", + " - Mention secure deployment practices.\n", + " Please remove the following sections: \n", + " - Potential Risks or Failure Points\n", + " - Assumptions or Limitations\n", + " - Breakdown of Key Functions or Components\n", + " Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"security_policies_notes\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Example Runs and Scripts\n", + "Let's explore example runs and scripts that demonstrate how to use this codebase in practice. We'll look at working examples, command-line usage, and sample notebooks that showcase the core functionality. This section will also point to demo datasets and test scenarios that can help new users get started quickly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - Provide working script examples.\n", + " - Include CLI usage instructions or sample notebooks.\n", + " - Link to demo datasets or test scenarios.\n", + " Please remove the following sections: \n", + " - Potential Risks or Failure Points\n", + " - Assumptions or Limitations\n", + " - Breakdown of Key Functions or Components\n", + " Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"runnable_examples\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "## Known Issues and Future Improvements\n", + "Let's examine the current limitations and areas for improvement in the codebase. This section will document known technical debt, bugs, and feature gaps that need to be addressed. We'll also outline proposed enhancements and reference any existing tickets or GitHub issues tracking these improvements." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = explain_code(\n", + " user_instructions=\"\"\"\n", + " Please provide a summary of the following bullet points only.\n", + " - List current limitations or technical debt.\n", + " - Outline proposed enhancements or refactors.\n", + " - Reference relevant tickets, GitHub issues, or roadmap items.\n", + " Please remove Potential Risks or Failure Points and Assumptions or Limitations sections. Please don't add any other sections.\n", + " \"\"\",\n", + " content_id=\"issues_and_improvements_log\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "copyright-ccbede139a26452183291a108b791513", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "validmind-1QuffXMV-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 } diff --git a/notebooks/use_cases/nlp_and_llm/rag_benchmark_demo.ipynb b/notebooks/use_cases/nlp_and_llm/rag_benchmark_demo.ipynb index 0296db81b..51515ad7b 100644 --- a/notebooks/use_cases/nlp_and_llm/rag_benchmark_demo.ipynb +++ b/notebooks/use_cases/nlp_and_llm/rag_benchmark_demo.ipynb @@ -1,1868 +1,1869 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# RAG Model Benchmarking Demo\n", - "\n", - "In this notebook, we are going to implement a simple RAG Model for automating the process of answering RFP questions using GenAI. We will see how we can initialize an embedding model, a retrieval model and a generator model with LangChain components and use them within the ValidMind Library to run tests against them. We'll demonstrate how to set up multiple models for benchmarking at each stage of the RAG pipeline - specifically two embedding models, two retrieval models with different parameters, and two LLM models (GPT-3.5 and GPT-4o) - allowing for comparison of performance across different configurations. Finally, we will see how we can put them together in a Pipeline and run that to get e2e results and run tests against that." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1__) \n", - " - [Before you begin](#toc1_1__) \n", - " - [New to ValidMind?](#toc1_2__) \n", - " - [Key concepts](#toc1_3__) \n", - "- [Setting up](#toc2__) \n", - " - [Install the ValidMind Library](#toc2_1__) \n", - " - [Initialize the ValidMind Library](#toc2_2__) \n", - " - [Register sample model](#toc2_2_1__) \n", - " - [Apply documentation template](#toc2_2_2__) \n", - " - [Get your code snippet](#toc2_2_3__) \n", - "- [Read Open AI API Key](#toc3__) \n", - "- [Dataset Loader](#toc4__) \n", - "- [Data validation](#toc5__) \n", - " - [Duplicates](#toc5_1__) \n", - " - [Stop Words](#toc5_2__) \n", - " - [Punctuations](#toc5_3__) \n", - " - [Common Words](#toc5_4__) \n", - " - [Language Detection](#toc5_5__) \n", - " - [Toxicity Score](#toc5_6__) \n", - " - [Polarity and Subjectivity](#toc5_7__) \n", - " - [Sentiment](#toc5_8__) \n", - " - [Assign Predictions](#toc5_9__) \n", - " - [Run tests](#toc5_10__) \n", - " - [Generate embeddings for the Train Set](#toc5_11__) \n", - " - [Insert embeddings and questions into Vector DB](#toc5_12__) \n", - "- [Prompt Evaluation](#toc6__) \n", - "- [RAGAS evaluation](#toc7__) \n", - " - [Semantic Similarity](#toc7_1__) \n", - " - [Context Entity Recall](#toc7_2__) \n", - " - [Context Precision](#toc7_3__) \n", - " - [Context Precision Without Reference](#toc7_4__) \n", - " - [Faithfulness](#toc7_5__) \n", - " - [Response Relevancy](#toc7_6__) \n", - " - [Context Recall](#toc7_7__) \n", - " - [Answer Correctness](#toc7_8__) \n", - " - [Aspect Critic](#toc7_9__) \n", - " - [Noise Sensitivity](#toc7_10__) \n", - "- [Generation quality](#toc8__) \n", - " - [Token Disparity](#toc8_1__) \n", - " - [ROUGE Score](#toc8_2__) \n", - " - [BLEU Score](#toc8_3__) \n", - " - [BERT Score](#toc8_4__) \n", - " - [METEOR Score](#toc8_5__) \n", - "- [Bias and Toxicity](#toc9__) \n", - " - [Toxicity Score](#toc9_1__) \n", - " - [Regard Score](#toc9_2__) \n", - "- [Upgrade ValidMind](#toc10__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "- **FunctionModels**: ValidMind offers support for creating `VMModel` instances from Python functions. This enables us to support any \"model\" by simply using the provided function as the model's `predict` method.\n", - "- **PipelineModels**: ValidMind models (`VMModel` instances) of any type can be piped together to create a model pipeline. This allows model components to be created and tested/documented independently, and then combined into a single model for end-to-end testing and documentation. We use the `|` operator to pipe models together.\n", - "- **RAG**: RAG stands for Retrieval Augmented Generation and refers to a wide range of GenAI applications where some form of retrieval is used to add context to the prompt so that the LLM that generates content can refer to it when creating its output. In this notebook, we are going to implement a simple RAG setup using LangChain components." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Prerequisites\n", - "\n", - "Let's go ahead and install the `validmind` library if its not already installed... Then we can install the `qdrant-client` library for our vector store and `langchain` for everything else:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q \"validmind[llm]\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q qdrant-client langchain langchain-openai sentencepiece" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "
Recommended Python versions\n", - "

\n", - "Python 3.8 <= x <= 3.11
\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Gen AI RAG`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
Can't select this template?\n", - "

\n", - "Your organization administrators may need to add it to your template library:\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " api_host = \"https://api.prod.validmind.ai/api/v1/tracking\",\n", - " api_key = \"...\",\n", - " api_secret = \"...\",\n", - " model = \"...\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Read Open AI API Key\n", - "\n", - "We will need to have an OpenAI API key to be able to use their `text-embedding-3-small` and `text-embedding-3-large` models for our embeddings, `gpt-3.5-turbo` and `gpt-4o` models for our generator and `gpt-4o` model for our LLM-as-Judge tests. If you don't have an OpenAI API key, you can get one by signing up at [OpenAI](https://platform.openai.com/signup). Then you can create a `.env` file in the root of your project and the following cell will load it from there. Alternatively, you can just uncomment the line below to directly set the key (not recommended for security reasons)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# load openai api key\n", - "import os\n", - "\n", - "import dotenv\n", - "import nltk\n", - "\n", - "dotenv.load_dotenv()\n", - "nltk.download('stopwords')\n", - "nltk.download('punkt_tab')\n", - "\n", - "# os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n", - "\n", - "if not \"OPENAI_API_KEY\" in os.environ:\n", - " raise ValueError(\"OPENAI_API_KEY is not set\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Dataset Loader\n", - "\n", - "Great, now that we have all of our dependencies installed, the ValidMind Library initialized and connected to our model and our OpenAI API key setup, we can go ahead and load our datasets. We will use the synthetic `RFP` dataset included with ValidMind for this notebook. This dataset contains a variety of RFP questions and ground truth answers that we can use both as the source where our Retriever will search for similar question-answer pairs as well as our test set for evaluating the performance of our RAG model. To do this, we just have to load it and call the preprocess function to get a split of the data into train and test sets." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Import the sample dataset from the library\n", - "from validmind.datasets.llm.rag import rfp\n", - "\n", - "raw_df = rfp.load_data()\n", - "train_df, test_df = rfp.preprocess(raw_df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_train_ds = vm.init_dataset(\n", - " train_df,\n", - " text_column=\"question\",\n", - " target_column=\"ground_truth\",\n", - ")\n", - "\n", - "vm_test_ds = vm.init_dataset(\n", - " test_df,\n", - " text_column=\"question\",\n", - " target_column=\"ground_truth\",\n", - ")\n", - "\n", - "vm_test_ds.df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Data validation\n", - "\n", - "Now that we have loaded our dataset, we can go ahead and run some data validation tests right away to start assessing and documenting the quality of our data. Since we are using a text dataset, we can use ValidMind's built-in array of text data quality tests to check that things like number of duplicates, missing values, and other common text data issues are not present in our dataset. We can also run some tests to check the sentiment and toxicity of our data." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Duplicates\n", - "\n", - "First, let's check for duplicates in our dataset. We can use the `validmind.data_validation.Duplicates` test and pass our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import run_test\n", - "\n", - "run_test(\n", - " test_id=\"validmind.data_validation.Duplicates\",\n", - " inputs={\"dataset\": vm_train_ds},\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Stop Words\n", - "\n", - "Next, let's check for stop words in our dataset. We can use the `validmind.data_validation.StopWords` test and pass our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.StopWords\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Punctuations\n", - "\n", - "Next, let's check for punctuations in our dataset. We can use the `validmind.data_validation.Punctuations` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.Punctuations\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Common Words\n", - "\n", - "Next, let's check for common words in our dataset. We can use the `validmind.data_validation.CommonWord` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.CommonWords\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Language Detection\n", - "\n", - "For documentation purposes, we can detect and log the languages used in the dataset with the `validmind.data_validation.LanguageDetection` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.LanguageDetection\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Toxicity Score\n", - "\n", - "Now, let's go ahead and run the `validmind.data_validation.nlp.Toxicity` test to compute a toxicity score for our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.data_validation.nlp.Toxicity\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Polarity and Subjectivity\n", - "\n", - "We can also run the `validmind.data_validation.nlp.PolarityAndSubjectivity` test to compute the polarity and subjectivity of our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.data_validation.nlp.PolarityAndSubjectivity\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Sentiment\n", - "\n", - "Finally, we can run the `validmind.data_validation.nlp.Sentiment` test to plot the sentiment of our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.data_validation.nlp.Sentiment\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Embedding Model\n", - "\n", - "Now that we have our dataset loaded and have run some data validation tests to assess and document the quality of our data, we can go ahead and initialize our embedding model. We will use `text-embedding-3-small` and `text-embedding-3-large` models from OpenAI for this purpose wrapped in the `OpenAIEmbeddings` class from LangChain. This model will be used to \"embed\" our questions both for inserting the question-answer pairs from the \"train\" set into the vector store and for embedding the question from inputs when making predictions with our RAG model." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_openai import OpenAIEmbeddings\n", - "\n", - "embedding_small_client = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", - "\n", - "\n", - "def embed_small(input):\n", - " \"\"\"Returns a text embedding for the given text\"\"\"\n", - " return embedding_small_client.embed_query(input[\"question\"])\n", - "\n", - "\n", - "vm_embedder_small = vm.init_model(input_id=\"embedding_small_model\", predict_fn=embed_small)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "embedding_large_client = OpenAIEmbeddings(model=\"text-embedding-3-large\")\n", - "\n", - "\n", - "def embed_large(input):\n", - " \"\"\"Returns a text embedding for the given text\"\"\"\n", - " return embedding_large_client.embed_query(input[\"question\"])\n", - "\n", - "\n", - "vm_embedder_large = vm.init_model(input_id=\"embedding_large_model\", predict_fn=embed_large)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "What we have done here is to initialize the `OpenAIEmbeddings` class so it uses OpenAI's `text-embedding-3-small` and `text-embedding-3-large` models. We then created an `embed` function that takes in an `input` dictionary and uses the `embed_query` method of the embedding client to compute the embeddings of the `question`. We use an `embed` function since that is how ValidMind supports any custom model. We will use this strategy for the retrieval and generator models as well but you could also use, say, a HuggingFace model directly. See the documentation for more information on which model types are directly supported - [ValidMind Documentation](https://docs.validmind.ai/validmind/validmind.html)... Finally, we use the `init_model` function from the ValidMind Library to create a `VMModel` object that can be used in ValidMind tests. This also logs the model to our model documentation and any test that uses the model will be linked to the logged model and its metadata." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign Predictions\n", - "\n", - "To precompute the embeddings for our test set, we can call the `assign_predictions` method of our `vm_test_ds` object we created above. This will compute the embeddings for each question in the test set and store them in the a special prediction column of the test set thats linked to our `vm_embedder` model. This will allow us to use these embeddings later when we run tests against our embedding model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds.assign_predictions(vm_embedder_small)\n", - "vm_test_ds.assign_predictions(vm_embedder_large)\n", - "print(vm_test_ds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Run tests\n", - "\n", - "Now that everything is setup for the embedding model, we can go ahead and run some tests to assess and document the quality of our embeddings. We will use the `validmind.model_validation.embeddings.*` tests to compute a variety of metrics against our model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.StabilityAnalysisRandomNoise\",\n", - " input_grid={\n", - " \"model\": [vm_embedder_small, vm_embedder_large],\n", - " \"dataset\": [vm_test_ds],\n", - " },\n", - " params={\n", - " \"probability\": 0.3,\n", - " \"mean_similarity_threshold\": 0.7,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.StabilityAnalysisSynonyms\",\n", - " input_grid={\n", - " \"model\": [vm_embedder_small, vm_embedder_large],\n", - " \"dataset\": [vm_test_ds],\n", - " },\n", - " params={\n", - " \"probability\": 0.3,\n", - " \"mean_similarity_threshold\": 0.7,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.StabilityAnalysisTranslation\",\n", - " input_grid={\n", - " \"model\": [vm_embedder_small, vm_embedder_large],\n", - " \"dataset\": [vm_test_ds],\n", - " },\n", - " params={\n", - " \"source_lang\": \"en\",\n", - " \"target_lang\": \"fr\",\n", - " \"mean_similarity_threshold\": 0.7,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.CosineSimilarityHeatmap\",\n", - " input_grid={\n", - " \"model\": [vm_embedder_small, vm_embedder_large],\n", - " \"dataset\": [vm_test_ds],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.CosineSimilarityDistribution\",\n", - " input_grid={\n", - " \"model\": [vm_embedder_small, vm_embedder_large],\n", - " \"dataset\": [vm_test_ds],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.PCAComponentsPairwisePlots\",\n", - " input_grid={\n", - " \"model\": [vm_embedder_small, vm_embedder_large],\n", - " \"dataset\": [vm_test_ds],\n", - " },\n", - " params={\n", - " \"n_components\": 3,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup Vector Store\n", - "\n", - "Great, so now that we have assessed our embedding model and verified that it is performing well, we can go ahead and use it to compute embeddings for our question-answer pairs in the \"train\" set. We will then use these embeddings to insert the question-answer pairs into a vector store. We will use an in-memory `qdrant` vector database for demo purposes but any option would work just as well here. We will use the `QdrantClient` class from LangChain to interact with the vector store. This class will allow us to insert and search for embeddings in the vector store." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Generate embeddings for the Train Set\n", - "\n", - "We can use the same `assign_predictions` method from earlier except this time we will use the `vm_train_ds` object to compute the embeddings for the question-answer pairs in the \"train\" set." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_train_ds.assign_predictions(vm_embedder_small)\n", - "print(vm_train_ds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Insert embeddings and questions into Vector DB\n", - "\n", - "Now that we have computed the embeddings for our question-answer pairs in the \"train\" set, we can go ahead and insert them into the vector store:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.vectorstores import Qdrant\n", - "from langchain_community.document_loaders import DataFrameLoader\n", - "\n", - "# load documents from dataframe\n", - "loader = DataFrameLoader(train_df, page_content_column=\"question\")\n", - "docs = loader.load()\n", - "\n", - "# setup vector datastore\n", - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embedding_small_client,\n", - " location=\":memory:\", # Local mode with in-memory storage only\n", - " collection_name=\"rfp_rag_collection\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Retrieval Model\n", - "\n", - "Now that we have an embedding model and a vector database setup and loaded with our data, we need a Retrieval model that can search for similar question-answer pairs for a given input question. Once created, we can initialize this as a ValidMind model and `assign_predictions` to it just like our embedding model. In this example, we'll create two retrieval models with different `k` parameters (the number of documents retrieved) to benchmark and compare their performance. This approach allows us to evaluate how retrieval depth affects the overall system quality." - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [], - "source": [ - "def retrieve(input):\n", - " contexts = []\n", - "\n", - " for result in qdrant.similarity_search_with_score(input[\"question\"], k=5):\n", - " document, score = result\n", - " context = f\"Q: {document.page_content}\\n\"\n", - " context += f\"A: {document.metadata['ground_truth']}\\n\"\n", - "\n", - " contexts.append(context)\n", - "\n", - " return contexts\n", - "\n", - "\n", - "vm_retriever_k5 = vm.init_model(input_id=\"retrieval_k5_model\", predict_fn=retrieve)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "def retrieve(input):\n", - " contexts = []\n", - "\n", - " for result in qdrant.similarity_search_with_score(input[\"question\"], k=10):\n", - " document, score = result\n", - " context = f\"Q: {document.page_content}\\n\"\n", - " context += f\"A: {document.metadata['ground_truth']}\\n\"\n", - "\n", - " contexts.append(context)\n", - "\n", - " return contexts\n", - "\n", - "\n", - "vm_retriever_k10 = vm.init_model(input_id=\"retrieval_k10_model\", predict_fn=retrieve)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds.assign_predictions(model=vm_retriever_k5)\n", - "vm_test_ds.assign_predictions(model=vm_retriever_k10)\n", - "print(vm_test_ds)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds._df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Generation Model\n", - "\n", - "As the final piece of this simple RAG pipeline, we can create and initialize a generation model that will use the retrieved context to generate an answer to the input question. We will use the `gpt-3.5-turbo` and `gpt-4o` models from OpenAI. Since we have two retrieval models (with different `k` values) and want to test two different LLMs, we'll create a total of four generator models - pairing each retrieval configuration with each LLM to comprehensively evaluate how both retrieval depth and model capability affect response quality." - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "from openai import OpenAI\n", - "\n", - "from validmind.models import Prompt\n", - "\n", - "\n", - "system_prompt = \"\"\"\n", - "You are an expert RFP AI assistant.\n", - "You are tasked with answering new RFP questions based on existing RFP questions and answers.\n", - "You will be provided with the existing RFP questions and answer pairs that are the most relevant to the new RFP question.\n", - "After that you will be provided with a new RFP question.\n", - "You will generate an answer and respond only with the answer.\n", - "Ignore your pre-existing knowledge and answer the question based on the provided context.\n", - "\"\"\".strip()\n", - "\n", - "openai_client = OpenAI()" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "def generate(input):\n", - " \n", - " response = openai_client.chat.completions.create(\n", - " model=\"gpt-3.5-turbo\",\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k5_model\"])},\n", - " {\"role\": \"user\", \"content\": input[\"question\"]},\n", - " ],\n", - " )\n", - " \n", - " return response.choices[0].message.content\n", - "\n", - "\n", - "vm_generator_k5_gpt35 = vm.init_model(\n", - " input_id=\"generation_k5_gpt35_model\",\n", - " predict_fn=generate,\n", - " prompt=Prompt(template=system_prompt),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "def generate(input):\n", - " response = openai_client.chat.completions.create(\n", - " model=\"gpt-3.5-turbo\",\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k10_model\"])},\n", - " {\"role\": \"user\", \"content\": input[\"question\"]},\n", - " ],\n", - " )\n", - "\n", - " return response.choices[0].message.content\n", - "\n", - "\n", - "vm_generator_k10_gpt35 = vm.init_model(\n", - " input_id=\"generation_k10_gpt35_model\",\n", - " predict_fn=generate,\n", - " prompt=Prompt(template=system_prompt),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [], - "source": [ - "def generate(input):\n", - " \n", - " response = openai_client.chat.completions.create(\n", - " model=\"gpt-4o\",\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k5_model\"])},\n", - " {\"role\": \"user\", \"content\": input[\"question\"]},\n", - " ],\n", - " )\n", - " \n", - " return response.choices[0].message.content\n", - "\n", - "\n", - "vm_generator_k5_gpt4o = vm.init_model(\n", - " input_id=\"generation_k5_gpt4o_model\",\n", - " predict_fn=generate,\n", - " prompt=Prompt(template=system_prompt),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def generate(input):\n", - " response = openai_client.chat.completions.create(\n", - " model=\"gpt-4o\",\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k10_model\"])},\n", - " {\"role\": \"user\", \"content\": input[\"question\"]},\n", - " ],\n", - " )\n", - "\n", - " return response.choices[0].message.content\n", - "\n", - "\n", - "vm_generator_k10_gpt4o = vm.init_model(\n", - " input_id=\"generation_k10_gpt4o_model\",\n", - " predict_fn=generate,\n", - " prompt=Prompt(template=system_prompt),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's test it out real quick:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "\n", - "vm_generator_k5_gpt35.predict(\n", - " pd.DataFrame(\n", - " {\"retrieval_k5_model\": [[\"My name is anil\"]], \"question\": [\"what is my name\"]}\n", - " )\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_generator_k5_gpt4o.predict(\n", - " pd.DataFrame(\n", - " {\"retrieval_k5_model\": [[\"My name is anil\"]], \"question\": [\"what is my name\"]}\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Prompt Evaluation\n", - "\n", - "Now that we have our generator model initialized, we can run some LLM-as-Judge tests to evaluate the system prompt. This will allow us to get an initial sense of how well the prompt meets a few best practices for prompt engineering. These tests use an LLM to rate the prompt on a scale of 1-10 against the following criteria:\n", - "\n", - "- **Examplar Bias**: When using multi-shot prompting, does the prompt contain an unbiased distribution of examples?\n", - "- **Delimitation**: When using complex prompts containing examples, contextual information, or other elements, is the prompt formatted in such a way that each element is clearly separated?\n", - "- **Clarity**: How clearly the prompt states the task.\n", - "- **Conciseness**: How succinctly the prompt states the task.\n", - "- **Instruction Framing**: Whether the prompt contains negative instructions.\n", - "- **Specificity**: How specific the prompt defines the task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Bias\",\n", - " inputs={\n", - " \"model\": vm_generator_k5_gpt4o,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Clarity\",\n", - " inputs={\n", - " \"model\": vm_generator_k5_gpt4o,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Conciseness\",\n", - " inputs={\n", - " \"model\": vm_generator_k5_gpt4o,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Delimitation\",\n", - " inputs={\n", - " \"model\": vm_generator_k5_gpt4o,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.NegativeInstruction\",\n", - " inputs={\n", - " \"model\": vm_generator_k5_gpt4o,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Specificity\",\n", - " inputs={\n", - " \"model\": vm_generator_k5_gpt4o,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup RAG Pipeline Model\n", - "\n", - "Now that we have all of our individual \"component\" models setup and initialized we need some way to put them all together in a single \"pipeline\". We can use the `PipelineModel` class to do this. This ValidMind model type simply wraps any number of other ValidMind models and runs them in sequence. We can use a pipe(`|`) operator - in Python this is normally an `or` operator but we have overloaded it for easy pipeline creation - to chain together our models. We can then initialize this pipeline model and assign predictions to it just like any other model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_rag_k5_gpt35_model = vm.init_model(vm_retriever_k5 | vm_generator_k5_gpt35, input_id=\"rag_k5_gpt35_model\")\n", - "vm_rag_k10_gpt35_model = vm.init_model(vm_retriever_k10 | vm_generator_k10_gpt35, input_id=\"rag_k10_gpt35_model\")\n", - "vm_rag_k5_gpt4o_model = vm.init_model(vm_retriever_k5 | vm_generator_k5_gpt4o, input_id=\"rag_k5_gpt4o_model\")\n", - "vm_rag_k10_gpt4o_model = vm.init_model(vm_retriever_k10 | vm_generator_k10_gpt4o, input_id=\"rag_k10_gpt4o_model\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can `assign_predictions` to the pipeline model just like we did with the individual models. This will run the pipeline on the test set and store the results in the test set for later use." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds.assign_predictions(model=vm_rag_k5_gpt35_model)\n", - "vm_test_ds.assign_predictions(model=vm_rag_k10_gpt35_model)\n", - "vm_test_ds.assign_predictions(model=vm_rag_k5_gpt4o_model)\n", - "vm_test_ds.assign_predictions(model=vm_rag_k10_gpt4o_model)\n", - "print(vm_test_ds)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds._df.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Run tests" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## RAGAS evaluation\n", - "\n", - "Let's go ahead and run some of our new RAG tests against our model...\n", - "\n", - "> Note: these tests are still being developed and are not yet in a stable state. We are using advanced tests here that use LLM-as-Judge and other strategies to assess things like the relevancy of the retrieved context to the input question and the correctness of the generated answer when compared to the ground truth. There is more to come in this area so stay tuned!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import warnings\n", - "\n", - "warnings.filterwarnings(\"ignore\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Semantic Similarity\n", - "\n", - "The concept of Answer Semantic Similarity pertains to the assessment of the semantic resemblance between the generated answer and the ground truth. This evaluation is based on the ground truth and the answer, with values falling within the range of 0 to 1. A higher score signifies a better alignment between the generated answer and the ground truth.\n", - "\n", - "Measuring the semantic similarity between answers can offer valuable insights into the quality of the generated response. This evaluation utilizes a cross-encoder model to calculate the semantic similarity score." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.SemanticSimilarity\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " \"reference_column\": [\"ground_truth\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Entity Recall\n", - "\n", - "This test gives the measure of recall of the retrieved context, based on the number of entities present in both ground_truths and contexts relative to the number of entities present in the ground_truths alone. Simply put, it is a measure of what fraction of entities are recalled from ground_truths. This test is useful in fact-based use cases like tourism help desk, historical QA, etc. This test can help evaluate the retrieval mechanism for entities, based on comparison with entities present in ground_truths, because in cases where entities matter, we need the contexts which cover them." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextEntityRecall\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"reference_column\": [\"ground_truth\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\", \"retrieval_k10_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Precision\n", - "\n", - "Context Precision is a test that evaluates whether all of the ground-truth relevant items present in the contexts are ranked higher or not. Ideally all the relevant chunks must appear at the top ranks. This test is computed using the question, ground_truth and the contexts, with values ranging between 0 and 1, where higher scores indicate better precision." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextPrecision\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\", \"retrieval_k10_model_prediction\"],\n", - " \"reference_column\": [\"ground_truth\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Precision Without Reference\n", - "\n", - "This test evaluates whether retrieved contexts align well with the expected response for a given user input, without requiring a ground-truth reference. This test assesses the relevance of each retrieved context chunk by comparing it directly to the response." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextPrecisionWithoutReference\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid=[\n", - " {\"user_input_column\": \"question\",\n", - " \"retrieved_contexts_column\": \"retrieval_k5_model_prediction\",\n", - " \"response_column\": \"rag_k5_gpt4o_model_prediction\"\n", - " },\n", - " {\"user_input_column\": \"question\",\n", - " \"retrieved_contexts_column\": \"retrieval_k10_model_prediction\",\n", - " \"response_column\": \"rag_k10_gpt4o_model_prediction\"\n", - " },\n", - " ],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextPrecisionWithoutReference\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Faithfulness\n", - "\n", - "This measures the factual consistency of the generated answer against the given context. It is calculated from answer and retrieved context. The answer is scaled to (0,1) range. Higher the better.\n", - "\n", - "The generated answer is regarded as faithful if all the claims that are made in the answer can be inferred from the given context. To calculate this a set of claims from the generated answer is first identified. Then each one of these claims are cross checked with given context to determine if it can be inferred from given context or not." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.Faithfulness\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Response Relevancy\n", - "\n", - "The Response Relevancy test, focuses on assessing how pertinent the generated answer is to the given prompt. A lower score is assigned to answers that are incomplete or contain redundant information and higher scores indicate better relevancy. This test is computed using the question, the context and the answer.\n", - "\n", - "The Response Relevancy is defined as the mean cosine similartiy of the original question to a number of artifical questions, which where generated (reverse engineered) based on the answer.\n", - "\n", - "Please note, that eventhough in practice the score will range between 0 and 1 most of the time, this is not mathematically guranteed, due to the nature of the cosine similarity ranging from -1 to 1.\n", - "\n", - "> Note: This is a reference free test. If you’re looking to compare ground truth answer with generated answer refer to Answer Correctness.\n", - "\n", - "An answer is deemed relevant when it directly and appropriately addresses the original question. Importantly, our assessment of answer relevance does not consider factuality but instead penalizes cases where the answer lacks completeness or contains redundant details. To calculate this score, the LLM is prompted to generate an appropriate question for the generated answer multiple times, and the mean cosine similarity between these generated questions and the original question is measured. The underlying idea is that if the generated answer accurately addresses the initial question, the LLM should be able to generate questions from the answer that align with the original question." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ResponseRelevancy\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Recall\n", - "\n", - "Context recall measures the extent to which the retrieved context aligns with the annotated answer, treated as the ground truth. It is computed based on the ground truth and the retrieved context, and the values range between 0 and 1, with higher values indicating better performance.\n", - "\n", - "To estimate context recall from the ground truth answer, each sentence in the ground truth answer is analyzed to determine whether it can be attributed to the retrieved context or not. In an ideal scenario, all sentences in the ground truth answer should be attributable to the retrieved context." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextRecall\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\", \"retrieval_k10_model_prediction\"],\n", - " \"reference_column\": [\"ground_truth\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Answer Correctness\n", - "\n", - "The assessment of Answer Correctness involves gauging the accuracy of the generated answer when compared to the ground truth. This evaluation relies on the ground truth and the answer, with scores ranging from 0 to 1. A higher score indicates a closer alignment between the generated answer and the ground truth, signifying better correctness.\n", - "\n", - "Answer correctness encompasses two critical aspects: semantic similarity between the generated answer and the ground truth, as well as factual similarity. These aspects are combined using a weighted scheme to formulate the answer correctness score.\n", - "\n", - "Factual correctness quantifies the factual overlap between the generated answer and the ground truth answer. This is done using the concepts of:\n", - "\n", - "- TP (True Positive): Facts or statements that are present in both the ground truth and the generated answer.\n", - "- FP (False Positive): Facts or statements that are present in the generated answer but not in the ground truth.\n", - "- FN (False Negative): Facts or statements that are present in the ground truth but not in the generated answer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.AnswerCorrectness\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " \"reference_column\": [\"ground_truth\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Aspect Critic\n", - "\n", - "This is designed to assess submissions based on predefined aspects such as harmlessness and correctness. Additionally, users have the flexibility to define their own aspects for evaluating submissions according to their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not. This evaluation is performed using the ‘answer’ as input.\n", - "\n", - "Critiques within the LLM evaluators evaluate submissions based on the provided aspect. Ragas Critiques offers a range of predefined aspects like correctness, harmfulness, etc. Users can also define their own aspects for evaluating submissions based on their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.AspectCritic\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Noise Sensitivity\n", - "\n", - "This test is designed to evaluate the robustness of the RAG pipeline model against noise in the retrieved context. It works by checking how well the \"claims\" in the generated answer match up with the \"claims\" in the ground truth answer. If the generated answer contains \"claims\" from the contexts that the ground truth answer does not contain, those claims are considered incorrect. The score for each answer is the number of incorrect claims divided by the total number of claims. This *can* be interpreted as a measure of how sensitive the LLM is to \"noise\" in the context where \"noise\" is information that is relevant but should not be included in the answer since the ground truth answer does not contain it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.NoiseSensitivity\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " param_grid={\n", - " \"user_input_column\": [\"question\"],\n", - " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", - " \"reference_column\": [\"ground_truth\"],\n", - " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Generation quality\n", - "\n", - "In this section, we evaluate the alignment and relevance of generated responses to reference outputs within our retrieval-augmented generation (RAG) application. We use metrics that assess various quality dimensions of the generated responses, including semantic similarity, structural alignment, and phrasing overlap. Semantic similarity metrics compare embeddings of generated and reference text to capture deeper contextual alignment, while overlap and alignment measures quantify how well the phrasing and structure of generated responses match the intended outputs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Token Disparity\n", - "\n", - "This test assesses the difference in token counts between the reference texts (ground truth) and the answers generated by the RAG model. It helps evaluate how well the model's outputs align with the expected length and level of detail in the reference texts. A significant disparity in token counts could signal issues with generation quality, such as excessive verbosity or insufficient detail. Consistently low token counts in generated answers compared to references might suggest that the model’s outputs are incomplete or overly concise, missing important contextual information." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.TokenDisparity\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### ROUGE Score\n", - "\n", - "This test evaluates the quality of answers generated by the RAG model by measuring overlaps in n-grams, word sequences, and word pairs between the model output and the reference (ground truth) text. ROUGE, short for Recall-Oriented Understudy for Gisting Evaluation, assesses both precision and recall, providing a balanced view of how well the generated response captures the reference content. ROUGE precision measures the proportion of n-grams in the generated text that match the reference, highlighting relevance and conciseness, while ROUGE recall assesses the proportion of reference n-grams present in the generated text, indicating completeness and thoroughness. \n", - "\n", - "Low precision scores might reveal that the generated text includes redundant or irrelevant information, while low recall scores suggest omissions of essential details from the reference. Consistently low ROUGE scores could indicate poor overall alignment with the ground truth, suggesting the model may be missing key content or failing to capture the intended meaning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.RougeScore\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - " params={\n", - " \"metric\": \"rouge-1\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### BLEU Score\n", - "\n", - "The BLEU Score test evaluates the quality of answers generated by the RAG model by measuring n-gram overlap between the generated text and the reference (ground truth) text, with a specific focus on exact precision in phrasing. While ROUGE precision also assesses overlap, BLEU differs in two main ways: first, it applies a geometric average across multiple n-gram levels, capturing precise phrase alignment, and second, it includes a brevity penalty to prevent overly short outputs from inflating scores artificially. This added precision focus is valuable in RAG applications where strict adherence to reference language is essential, as BLEU emphasizes the match to exact phrasing. In contrast, ROUGE precision evaluates general content overlap without penalizing brevity, offering a broader sense of content alignment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.BleuScore\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### BERT Score\n", - "\n", - "This test evaluates the quality of the RAG generated answers using BERT embeddings to measure precision, recall, and F1 scores based on semantic similarity, rather than exact n-gram matches as in BLEU and ROUGE. This approach captures contextual meaning, making it valuable when wording differs but the intended message closely aligns with the reference. In RAG applications, the BERT score is especially useful for ensuring that generated answers convey the reference text’s meaning, even if phrasing varies. Consistently low scores indicate a lack of semantic alignment, suggesting the model may miss or misrepresent key content. Low precision may reflect irrelevant or redundant details, while low recall can indicate omissions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.BertScore\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### METEOR Score\n", - "\n", - "This test evaluates the quality of the generated answers by measuring alignment with the ground truth, emphasizing both accuracy and fluency. Unlike BLEU and ROUGE, which focus on n-gram matches, METEOR combines precision, recall, synonym matching, and word order, focusing at how well the generated text conveys meaning and reads naturally. This metric is especially useful for RAG applications where sentence structure and natural flow are crucial for clear communication. Lower scores may suggest alignment issues, indicating that the answers may lack fluency or key content. Discrepancies in word order or high fragmentation penalties can reveal problems with how the model constructs sentences, potentially affecting readability." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.MeteorScore\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Bias and Toxicity\n", - "\n", - "In this section, we use metrics like Toxicity Score and Regard Score to evaluate both the generated responses and the ground truth. These tests helps us detect any harmful, offensive, or inappropriate language and evaluate the level of bias and neutrality enabling us to assess and mitigate potential biases in both the model's responses and the original dataset." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Toxicity Score\n", - "\n", - "This test measures the level of harmful or offensive content in the generated answers. The test uses a preloaded toxicity detection tool from Hugging Face, which identifies language that may be inappropriate, aggressive, or derogatory. High toxicity scores indicate potentially toxic content, while consistently elevated scores across multiple outputs may signal underlying issues in the model’s generation process that require attention to prevent the spread of harmful language." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ToxicityScore\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Regard Score\n", - "\n", - "This test evaluates the sentiment and perceived regard—categorized as positive, negative, neutral, or other—in answers generated by the RAG model. This is important for identifying any biases or sentiment tendencies in responses, ensuring that generated answers are balanced and appropriate for the context. The uses a preloaded regard evaluation tool from Hugging Face to compute scores for each response. High skewness in regard scores, especially if the generated responses consistently diverge from expected sentiments in the reference texts, may reveal biases in the model’s generation, such as overly positive or negative tones where neutrality is expected." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.RegardScore\",\n", - " input_grid={\n", - " \"dataset\": [vm_test_ds],\n", - " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Conclusion\n", - "\n", - "In this notebook, we have seen how we can use LangChain and ValidMind together to build, evaluate and document a simple RAG Model as its developed. This is a great example of the interactive development experience that ValidMind is designed to support. We can quickly iterate on our model and document as we go... We have seen how ValidMind supports non-traditional \"models\" using a functional interface and how we can build pipelines of many models to support complex GenAI workflows.\n", - "\n", - "This is still a work in progress and we are actively developing new tests to support more advanced GenAI workflows. We are also keeping an eye on the most popular GenAI models and libraries to explore direct integrations. Stay tuned for more updates and new features in this area!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Upgrade ValidMind\n", - "\n", - "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", - "\n", - "Retrieve the information for the currently installed version of ValidMind:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip show validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", - "\n", - "```bash\n", - "%pip install --upgrade validmind\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may need to restart your kernel after running the upgrade package for changes to be applied." - ] - }, - { - "cell_type": "markdown", - "id": "copyright-09e315440ca84258abe1aaefaca3a3d0", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "ValidMind Library", - "language": "python", - "name": "validmind" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.15" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RAG Model Benchmarking Demo\n", + "\n", + "In this notebook, we are going to implement a simple RAG Model for automating the process of answering RFP questions using GenAI. We will see how we can initialize an embedding model, a retrieval model and a generator model with LangChain components and use them within the ValidMind Library to run tests against them. We'll demonstrate how to set up multiple models for benchmarking at each stage of the RAG pipeline - specifically two embedding models, two retrieval models with different parameters, and two LLM models (GPT-3.5 and GPT-4o) - allowing for comparison of performance across different configurations. Finally, we will see how we can put them together in a Pipeline and run that to get e2e results and run tests against that." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1__) \n", + " - [Before you begin](#toc1_1__) \n", + " - [New to ValidMind?](#toc1_2__) \n", + " - [Key concepts](#toc1_3__) \n", + "- [Setting up](#toc2__) \n", + " - [Install the ValidMind Library](#toc2_1__) \n", + " - [Initialize the ValidMind Library](#toc2_2__) \n", + " - [Register sample model](#toc2_2_1__) \n", + " - [Apply documentation template](#toc2_2_2__) \n", + " - [Get your code snippet](#toc2_2_3__) \n", + "- [Read Open AI API Key](#toc3__) \n", + "- [Dataset Loader](#toc4__) \n", + "- [Data validation](#toc5__) \n", + " - [Duplicates](#toc5_1__) \n", + " - [Stop Words](#toc5_2__) \n", + " - [Punctuations](#toc5_3__) \n", + " - [Common Words](#toc5_4__) \n", + " - [Language Detection](#toc5_5__) \n", + " - [Toxicity Score](#toc5_6__) \n", + " - [Polarity and Subjectivity](#toc5_7__) \n", + " - [Sentiment](#toc5_8__) \n", + " - [Assign Predictions](#toc5_9__) \n", + " - [Run tests](#toc5_10__) \n", + " - [Generate embeddings for the Train Set](#toc5_11__) \n", + " - [Insert embeddings and questions into Vector DB](#toc5_12__) \n", + "- [Prompt Evaluation](#toc6__) \n", + "- [RAGAS evaluation](#toc7__) \n", + " - [Semantic Similarity](#toc7_1__) \n", + " - [Context Entity Recall](#toc7_2__) \n", + " - [Context Precision](#toc7_3__) \n", + " - [Context Precision Without Reference](#toc7_4__) \n", + " - [Faithfulness](#toc7_5__) \n", + " - [Response Relevancy](#toc7_6__) \n", + " - [Context Recall](#toc7_7__) \n", + " - [Answer Correctness](#toc7_8__) \n", + " - [Aspect Critic](#toc7_9__) \n", + " - [Noise Sensitivity](#toc7_10__) \n", + "- [Generation quality](#toc8__) \n", + " - [Token Disparity](#toc8_1__) \n", + " - [ROUGE Score](#toc8_2__) \n", + " - [BLEU Score](#toc8_3__) \n", + " - [BERT Score](#toc8_4__) \n", + " - [METEOR Score](#toc8_5__) \n", + "- [Bias and Toxicity](#toc9__) \n", + " - [Toxicity Score](#toc9_1__) \n", + " - [Regard Score](#toc9_2__) \n", + "- [Upgrade ValidMind](#toc10__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "- **FunctionModels**: ValidMind offers support for creating `VMModel` instances from Python functions. This enables us to support any \"model\" by simply using the provided function as the model's `predict` method.\n", + "- **PipelineModels**: ValidMind models (`VMModel` instances) of any type can be piped together to create a model pipeline. This allows model components to be created and tested/documented independently, and then combined into a single model for end-to-end testing and documentation. We use the `|` operator to pipe models together.\n", + "- **RAG**: RAG stands for Retrieval Augmented Generation and refers to a wide range of GenAI applications where some form of retrieval is used to add context to the prompt so that the LLM that generates content can refer to it when creating its output. In this notebook, we are going to implement a simple RAG setup using LangChain components." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Prerequisites\n", + "\n", + "Let's go ahead and install the `validmind` library if its not already installed... Then we can install the `qdrant-client` library for our vector store and `langchain` for everything else:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q \"validmind[llm]\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q qdrant-client langchain langchain-openai sentencepiece" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "
Recommended Python versions\n", + "

\n", + "Python 3.8 <= x <= 3.11
\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Gen AI RAG`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Can't select this template?\n", + "

\n", + "Your organization administrators may need to add it to your template library:\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " api_host = \"https://api.prod.validmind.ai/api/v1/tracking\",\n", + " api_key = \"...\",\n", + " api_secret = \"...\",\n", + " model = \"...\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Read Open AI API Key\n", + "\n", + "We will need to have an OpenAI API key to be able to use their `text-embedding-3-small` and `text-embedding-3-large` models for our embeddings, `gpt-3.5-turbo` and `gpt-4o` models for our generator and `gpt-4o` model for our LLM-as-Judge tests. If you don't have an OpenAI API key, you can get one by signing up at [OpenAI](https://platform.openai.com/signup). Then you can create a `.env` file in the root of your project and the following cell will load it from there. Alternatively, you can just uncomment the line below to directly set the key (not recommended for security reasons)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load openai api key\n", + "import os\n", + "\n", + "import dotenv\n", + "import nltk\n", + "\n", + "dotenv.load_dotenv()\n", + "nltk.download('stopwords')\n", + "nltk.download('punkt_tab')\n", + "\n", + "# os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n", + "\n", + "if not \"OPENAI_API_KEY\" in os.environ:\n", + " raise ValueError(\"OPENAI_API_KEY is not set\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Dataset Loader\n", + "\n", + "Great, now that we have all of our dependencies installed, the ValidMind Library initialized and connected to our model and our OpenAI API key setup, we can go ahead and load our datasets. We will use the synthetic `RFP` dataset included with ValidMind for this notebook. This dataset contains a variety of RFP questions and ground truth answers that we can use both as the source where our Retriever will search for similar question-answer pairs as well as our test set for evaluating the performance of our RAG model. To do this, we just have to load it and call the preprocess function to get a split of the data into train and test sets." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the sample dataset from the library\n", + "from validmind.datasets.llm.rag import rfp\n", + "\n", + "raw_df = rfp.load_data()\n", + "train_df, test_df = rfp.preprocess(raw_df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds = vm.init_dataset(\n", + " train_df,\n", + " text_column=\"question\",\n", + " target_column=\"ground_truth\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " test_df,\n", + " text_column=\"question\",\n", + " target_column=\"ground_truth\",\n", + ")\n", + "\n", + "vm_test_ds.df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Data validation\n", + "\n", + "Now that we have loaded our dataset, we can go ahead and run some data validation tests right away to start assessing and documenting the quality of our data. Since we are using a text dataset, we can use ValidMind's built-in array of text data quality tests to check that things like number of duplicates, missing values, and other common text data issues are not present in our dataset. We can also run some tests to check the sentiment and toxicity of our data." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Duplicates\n", + "\n", + "First, let's check for duplicates in our dataset. We can use the `validmind.data_validation.Duplicates` test and pass our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.tests import run_test\n", + "\n", + "run_test(\n", + " test_id=\"validmind.data_validation.Duplicates\",\n", + " inputs={\"dataset\": vm_train_ds},\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Stop Words\n", + "\n", + "Next, let's check for stop words in our dataset. We can use the `validmind.data_validation.StopWords` test and pass our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.StopWords\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Punctuations\n", + "\n", + "Next, let's check for punctuations in our dataset. We can use the `validmind.data_validation.Punctuations` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.Punctuations\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Common Words\n", + "\n", + "Next, let's check for common words in our dataset. We can use the `validmind.data_validation.CommonWord` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.CommonWords\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Language Detection\n", + "\n", + "For documentation purposes, we can detect and log the languages used in the dataset with the `validmind.data_validation.LanguageDetection` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.LanguageDetection\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Toxicity Score\n", + "\n", + "Now, let's go ahead and run the `validmind.data_validation.nlp.Toxicity` test to compute a toxicity score for our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.data_validation.nlp.Toxicity\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Polarity and Subjectivity\n", + "\n", + "We can also run the `validmind.data_validation.nlp.PolarityAndSubjectivity` test to compute the polarity and subjectivity of our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.data_validation.nlp.PolarityAndSubjectivity\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Sentiment\n", + "\n", + "Finally, we can run the `validmind.data_validation.nlp.Sentiment` test to plot the sentiment of our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.data_validation.nlp.Sentiment\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Embedding Model\n", + "\n", + "Now that we have our dataset loaded and have run some data validation tests to assess and document the quality of our data, we can go ahead and initialize our embedding model. We will use `text-embedding-3-small` and `text-embedding-3-large` models from OpenAI for this purpose wrapped in the `OpenAIEmbeddings` class from LangChain. This model will be used to \"embed\" our questions both for inserting the question-answer pairs from the \"train\" set into the vector store and for embedding the question from inputs when making predictions with our RAG model." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import OpenAIEmbeddings\n", + "\n", + "embedding_small_client = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", + "\n", + "\n", + "def embed_small(input):\n", + " \"\"\"Returns a text embedding for the given text\"\"\"\n", + " return embedding_small_client.embed_query(input[\"question\"])\n", + "\n", + "\n", + "vm_embedder_small = vm.init_model(input_id=\"embedding_small_model\", predict_fn=embed_small)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embedding_large_client = OpenAIEmbeddings(model=\"text-embedding-3-large\")\n", + "\n", + "\n", + "def embed_large(input):\n", + " \"\"\"Returns a text embedding for the given text\"\"\"\n", + " return embedding_large_client.embed_query(input[\"question\"])\n", + "\n", + "\n", + "vm_embedder_large = vm.init_model(input_id=\"embedding_large_model\", predict_fn=embed_large)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "What we have done here is to initialize the `OpenAIEmbeddings` class so it uses OpenAI's `text-embedding-3-small` and `text-embedding-3-large` models. We then created an `embed` function that takes in an `input` dictionary and uses the `embed_query` method of the embedding client to compute the embeddings of the `question`. We use an `embed` function since that is how ValidMind supports any custom model. We will use this strategy for the retrieval and generator models as well but you could also use, say, a HuggingFace model directly. See the documentation for more information on which model types are directly supported - [ValidMind Documentation](https://docs.validmind.ai/validmind/validmind.html)... Finally, we use the `init_model` function from the ValidMind Library to create a `VMModel` object that can be used in ValidMind tests. This also logs the model to our model documentation and any test that uses the model will be linked to the logged model and its metadata." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign Predictions\n", + "\n", + "To precompute the embeddings for our test set, we can call the `assign_predictions` method of our `vm_test_ds` object we created above. This will compute the embeddings for each question in the test set and store them in the a special prediction column of the test set thats linked to our `vm_embedder` model. This will allow us to use these embeddings later when we run tests against our embedding model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds.assign_predictions(vm_embedder_small)\n", + "vm_test_ds.assign_predictions(vm_embedder_large)\n", + "print(vm_test_ds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run tests\n", + "\n", + "Now that everything is setup for the embedding model, we can go ahead and run some tests to assess and document the quality of our embeddings. We will use the `validmind.model_validation.embeddings.*` tests to compute a variety of metrics against our model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.StabilityAnalysisRandomNoise\",\n", + " input_grid={\n", + " \"model\": [vm_embedder_small, vm_embedder_large],\n", + " \"dataset\": [vm_test_ds],\n", + " },\n", + " params={\n", + " \"probability\": 0.3,\n", + " \"mean_similarity_threshold\": 0.7,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.StabilityAnalysisSynonyms\",\n", + " input_grid={\n", + " \"model\": [vm_embedder_small, vm_embedder_large],\n", + " \"dataset\": [vm_test_ds],\n", + " },\n", + " params={\n", + " \"probability\": 0.3,\n", + " \"mean_similarity_threshold\": 0.7,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.StabilityAnalysisTranslation\",\n", + " input_grid={\n", + " \"model\": [vm_embedder_small, vm_embedder_large],\n", + " \"dataset\": [vm_test_ds],\n", + " },\n", + " params={\n", + " \"source_lang\": \"en\",\n", + " \"target_lang\": \"fr\",\n", + " \"mean_similarity_threshold\": 0.7,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.CosineSimilarityHeatmap\",\n", + " input_grid={\n", + " \"model\": [vm_embedder_small, vm_embedder_large],\n", + " \"dataset\": [vm_test_ds],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.CosineSimilarityDistribution\",\n", + " input_grid={\n", + " \"model\": [vm_embedder_small, vm_embedder_large],\n", + " \"dataset\": [vm_test_ds],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.PCAComponentsPairwisePlots\",\n", + " input_grid={\n", + " \"model\": [vm_embedder_small, vm_embedder_large],\n", + " \"dataset\": [vm_test_ds],\n", + " },\n", + " params={\n", + " \"n_components\": 3,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup Vector Store\n", + "\n", + "Great, so now that we have assessed our embedding model and verified that it is performing well, we can go ahead and use it to compute embeddings for our question-answer pairs in the \"train\" set. We will then use these embeddings to insert the question-answer pairs into a vector store. We will use an in-memory `qdrant` vector database for demo purposes but any option would work just as well here. We will use the `QdrantClient` class from LangChain to interact with the vector store. This class will allow us to insert and search for embeddings in the vector store." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Generate embeddings for the Train Set\n", + "\n", + "We can use the same `assign_predictions` method from earlier except this time we will use the `vm_train_ds` object to compute the embeddings for the question-answer pairs in the \"train\" set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(vm_embedder_small)\n", + "print(vm_train_ds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Insert embeddings and questions into Vector DB\n", + "\n", + "Now that we have computed the embeddings for our question-answer pairs in the \"train\" set, we can go ahead and insert them into the vector store:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.vectorstores import Qdrant\n", + "from langchain_community.document_loaders import DataFrameLoader\n", + "\n", + "# load documents from dataframe\n", + "loader = DataFrameLoader(train_df, page_content_column=\"question\")\n", + "docs = loader.load()\n", + "\n", + "# setup vector datastore\n", + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embedding_small_client,\n", + " location=\":memory:\", # Local mode with in-memory storage only\n", + " collection_name=\"rfp_rag_collection\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Retrieval Model\n", + "\n", + "Now that we have an embedding model and a vector database setup and loaded with our data, we need a Retrieval model that can search for similar question-answer pairs for a given input question. Once created, we can initialize this as a ValidMind model and `assign_predictions` to it just like our embedding model. In this example, we'll create two retrieval models with different `k` parameters (the number of documents retrieved) to benchmark and compare their performance. This approach allows us to evaluate how retrieval depth affects the overall system quality." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "def retrieve(input):\n", + " contexts = []\n", + "\n", + " for result in qdrant.similarity_search_with_score(input[\"question\"], k=5):\n", + " document, score = result\n", + " context = f\"Q: {document.page_content}\\n\"\n", + " context += f\"A: {document.metadata['ground_truth']}\\n\"\n", + "\n", + " contexts.append(context)\n", + "\n", + " return contexts\n", + "\n", + "\n", + "vm_retriever_k5 = vm.init_model(input_id=\"retrieval_k5_model\", predict_fn=retrieve)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "def retrieve(input):\n", + " contexts = []\n", + "\n", + " for result in qdrant.similarity_search_with_score(input[\"question\"], k=10):\n", + " document, score = result\n", + " context = f\"Q: {document.page_content}\\n\"\n", + " context += f\"A: {document.metadata['ground_truth']}\\n\"\n", + "\n", + " contexts.append(context)\n", + "\n", + " return contexts\n", + "\n", + "\n", + "vm_retriever_k10 = vm.init_model(input_id=\"retrieval_k10_model\", predict_fn=retrieve)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds.assign_predictions(model=vm_retriever_k5)\n", + "vm_test_ds.assign_predictions(model=vm_retriever_k10)\n", + "print(vm_test_ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds._df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generation Model\n", + "\n", + "As the final piece of this simple RAG pipeline, we can create and initialize a generation model that will use the retrieved context to generate an answer to the input question. We will use the `gpt-3.5-turbo` and `gpt-4o` models from OpenAI. Since we have two retrieval models (with different `k` values) and want to test two different LLMs, we'll create a total of four generator models - pairing each retrieval configuration with each LLM to comprehensively evaluate how both retrieval depth and model capability affect response quality." + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "from openai import OpenAI\n", + "\n", + "from validmind.models import Prompt\n", + "\n", + "\n", + "system_prompt = \"\"\"\n", + "You are an expert RFP AI assistant.\n", + "You are tasked with answering new RFP questions based on existing RFP questions and answers.\n", + "You will be provided with the existing RFP questions and answer pairs that are the most relevant to the new RFP question.\n", + "After that you will be provided with a new RFP question.\n", + "You will generate an answer and respond only with the answer.\n", + "Ignore your pre-existing knowledge and answer the question based on the provided context.\n", + "\"\"\".strip()\n", + "\n", + "openai_client = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "def generate(input):\n", + " \n", + " response = openai_client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k5_model\"])},\n", + " {\"role\": \"user\", \"content\": input[\"question\"]},\n", + " ],\n", + " )\n", + " \n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "vm_generator_k5_gpt35 = vm.init_model(\n", + " input_id=\"generation_k5_gpt35_model\",\n", + " predict_fn=generate,\n", + " prompt=Prompt(template=system_prompt),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "def generate(input):\n", + " response = openai_client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k10_model\"])},\n", + " {\"role\": \"user\", \"content\": input[\"question\"]},\n", + " ],\n", + " )\n", + "\n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "vm_generator_k10_gpt35 = vm.init_model(\n", + " input_id=\"generation_k10_gpt35_model\",\n", + " predict_fn=generate,\n", + " prompt=Prompt(template=system_prompt),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "def generate(input):\n", + " \n", + " response = openai_client.chat.completions.create(\n", + " model=\"gpt-4o\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k5_model\"])},\n", + " {\"role\": \"user\", \"content\": input[\"question\"]},\n", + " ],\n", + " )\n", + " \n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "vm_generator_k5_gpt4o = vm.init_model(\n", + " input_id=\"generation_k5_gpt4o_model\",\n", + " predict_fn=generate,\n", + " prompt=Prompt(template=system_prompt),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def generate(input):\n", + " response = openai_client.chat.completions.create(\n", + " model=\"gpt-4o\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_k10_model\"])},\n", + " {\"role\": \"user\", \"content\": input[\"question\"]},\n", + " ],\n", + " )\n", + "\n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "vm_generator_k10_gpt4o = vm.init_model(\n", + " input_id=\"generation_k10_gpt4o_model\",\n", + " predict_fn=generate,\n", + " prompt=Prompt(template=system_prompt),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's test it out real quick:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "vm_generator_k5_gpt35.predict(\n", + " pd.DataFrame(\n", + " {\"retrieval_k5_model\": [[\"My name is anil\"]], \"question\": [\"what is my name\"]}\n", + " )\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_generator_k5_gpt4o.predict(\n", + " pd.DataFrame(\n", + " {\"retrieval_k5_model\": [[\"My name is anil\"]], \"question\": [\"what is my name\"]}\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prompt Evaluation\n", + "\n", + "Now that we have our generator model initialized, we can run some LLM-as-Judge tests to evaluate the system prompt. This will allow us to get an initial sense of how well the prompt meets a few best practices for prompt engineering. These tests use an LLM to rate the prompt on a scale of 1-10 against the following criteria:\n", + "\n", + "- **Examplar Bias**: When using multi-shot prompting, does the prompt contain an unbiased distribution of examples?\n", + "- **Delimitation**: When using complex prompts containing examples, contextual information, or other elements, is the prompt formatted in such a way that each element is clearly separated?\n", + "- **Clarity**: How clearly the prompt states the task.\n", + "- **Conciseness**: How succinctly the prompt states the task.\n", + "- **Instruction Framing**: Whether the prompt contains negative instructions.\n", + "- **Specificity**: How specific the prompt defines the task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Bias\",\n", + " inputs={\n", + " \"model\": vm_generator_k5_gpt4o,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Clarity\",\n", + " inputs={\n", + " \"model\": vm_generator_k5_gpt4o,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Conciseness\",\n", + " inputs={\n", + " \"model\": vm_generator_k5_gpt4o,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Delimitation\",\n", + " inputs={\n", + " \"model\": vm_generator_k5_gpt4o,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.NegativeInstruction\",\n", + " inputs={\n", + " \"model\": vm_generator_k5_gpt4o,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Specificity\",\n", + " inputs={\n", + " \"model\": vm_generator_k5_gpt4o,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup RAG Pipeline Model\n", + "\n", + "Now that we have all of our individual \"component\" models setup and initialized we need some way to put them all together in a single \"pipeline\". We can use the `PipelineModel` class to do this. This ValidMind model type simply wraps any number of other ValidMind models and runs them in sequence. We can use a pipe(`|`) operator - in Python this is normally an `or` operator but we have overloaded it for easy pipeline creation - to chain together our models. We can then initialize this pipeline model and assign predictions to it just like any other model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_rag_k5_gpt35_model = vm.init_model(vm_retriever_k5 | vm_generator_k5_gpt35, input_id=\"rag_k5_gpt35_model\")\n", + "vm_rag_k10_gpt35_model = vm.init_model(vm_retriever_k10 | vm_generator_k10_gpt35, input_id=\"rag_k10_gpt35_model\")\n", + "vm_rag_k5_gpt4o_model = vm.init_model(vm_retriever_k5 | vm_generator_k5_gpt4o, input_id=\"rag_k5_gpt4o_model\")\n", + "vm_rag_k10_gpt4o_model = vm.init_model(vm_retriever_k10 | vm_generator_k10_gpt4o, input_id=\"rag_k10_gpt4o_model\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can `assign_predictions` to the pipeline model just like we did with the individual models. This will run the pipeline on the test set and store the results in the test set for later use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds.assign_predictions(model=vm_rag_k5_gpt35_model)\n", + "vm_test_ds.assign_predictions(model=vm_rag_k10_gpt35_model)\n", + "vm_test_ds.assign_predictions(model=vm_rag_k5_gpt4o_model)\n", + "vm_test_ds.assign_predictions(model=vm_rag_k10_gpt4o_model)\n", + "print(vm_test_ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds._df.head(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run tests" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## RAGAS evaluation\n", + "\n", + "Let's go ahead and run some of our new RAG tests against our model...\n", + "\n", + "> Note: these tests are still being developed and are not yet in a stable state. We are using advanced tests here that use LLM-as-Judge and other strategies to assess things like the relevancy of the retrieved context to the input question and the correctness of the generated answer when compared to the ground truth. There is more to come in this area so stay tuned!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Semantic Similarity\n", + "\n", + "The concept of Answer Semantic Similarity pertains to the assessment of the semantic resemblance between the generated answer and the ground truth. This evaluation is based on the ground truth and the answer, with values falling within the range of 0 to 1. A higher score signifies a better alignment between the generated answer and the ground truth.\n", + "\n", + "Measuring the semantic similarity between answers can offer valuable insights into the quality of the generated response. This evaluation utilizes a cross-encoder model to calculate the semantic similarity score." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.SemanticSimilarity\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " \"reference_column\": [\"ground_truth\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Entity Recall\n", + "\n", + "This test gives the measure of recall of the retrieved context, based on the number of entities present in both ground_truths and contexts relative to the number of entities present in the ground_truths alone. Simply put, it is a measure of what fraction of entities are recalled from ground_truths. This test is useful in fact-based use cases like tourism help desk, historical QA, etc. This test can help evaluate the retrieval mechanism for entities, based on comparison with entities present in ground_truths, because in cases where entities matter, we need the contexts which cover them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextEntityRecall\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"reference_column\": [\"ground_truth\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\", \"retrieval_k10_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Precision\n", + "\n", + "Context Precision is a test that evaluates whether all of the ground-truth relevant items present in the contexts are ranked higher or not. Ideally all the relevant chunks must appear at the top ranks. This test is computed using the question, ground_truth and the contexts, with values ranging between 0 and 1, where higher scores indicate better precision." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextPrecision\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\", \"retrieval_k10_model_prediction\"],\n", + " \"reference_column\": [\"ground_truth\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Precision Without Reference\n", + "\n", + "This test evaluates whether retrieved contexts align well with the expected response for a given user input, without requiring a ground-truth reference. This test assesses the relevance of each retrieved context chunk by comparing it directly to the response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextPrecisionWithoutReference\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid=[\n", + " {\"user_input_column\": \"question\",\n", + " \"retrieved_contexts_column\": \"retrieval_k5_model_prediction\",\n", + " \"response_column\": \"rag_k5_gpt4o_model_prediction\"\n", + " },\n", + " {\"user_input_column\": \"question\",\n", + " \"retrieved_contexts_column\": \"retrieval_k10_model_prediction\",\n", + " \"response_column\": \"rag_k10_gpt4o_model_prediction\"\n", + " },\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextPrecisionWithoutReference\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Faithfulness\n", + "\n", + "This measures the factual consistency of the generated answer against the given context. It is calculated from answer and retrieved context. The answer is scaled to (0,1) range. Higher the better.\n", + "\n", + "The generated answer is regarded as faithful if all the claims that are made in the answer can be inferred from the given context. To calculate this a set of claims from the generated answer is first identified. Then each one of these claims are cross checked with given context to determine if it can be inferred from given context or not." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.Faithfulness\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Response Relevancy\n", + "\n", + "The Response Relevancy test, focuses on assessing how pertinent the generated answer is to the given prompt. A lower score is assigned to answers that are incomplete or contain redundant information and higher scores indicate better relevancy. This test is computed using the question, the context and the answer.\n", + "\n", + "The Response Relevancy is defined as the mean cosine similartiy of the original question to a number of artifical questions, which where generated (reverse engineered) based on the answer.\n", + "\n", + "Please note, that eventhough in practice the score will range between 0 and 1 most of the time, this is not mathematically guranteed, due to the nature of the cosine similarity ranging from -1 to 1.\n", + "\n", + "> Note: This is a reference free test. If you’re looking to compare ground truth answer with generated answer refer to Answer Correctness.\n", + "\n", + "An answer is deemed relevant when it directly and appropriately addresses the original question. Importantly, our assessment of answer relevance does not consider factuality but instead penalizes cases where the answer lacks completeness or contains redundant details. To calculate this score, the LLM is prompted to generate an appropriate question for the generated answer multiple times, and the mean cosine similarity between these generated questions and the original question is measured. The underlying idea is that if the generated answer accurately addresses the initial question, the LLM should be able to generate questions from the answer that align with the original question." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ResponseRelevancy\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Recall\n", + "\n", + "Context recall measures the extent to which the retrieved context aligns with the annotated answer, treated as the ground truth. It is computed based on the ground truth and the retrieved context, and the values range between 0 and 1, with higher values indicating better performance.\n", + "\n", + "To estimate context recall from the ground truth answer, each sentence in the ground truth answer is analyzed to determine whether it can be attributed to the retrieved context or not. In an ideal scenario, all sentences in the ground truth answer should be attributable to the retrieved context." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextRecall\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\", \"retrieval_k10_model_prediction\"],\n", + " \"reference_column\": [\"ground_truth\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Answer Correctness\n", + "\n", + "The assessment of Answer Correctness involves gauging the accuracy of the generated answer when compared to the ground truth. This evaluation relies on the ground truth and the answer, with scores ranging from 0 to 1. A higher score indicates a closer alignment between the generated answer and the ground truth, signifying better correctness.\n", + "\n", + "Answer correctness encompasses two critical aspects: semantic similarity between the generated answer and the ground truth, as well as factual similarity. These aspects are combined using a weighted scheme to formulate the answer correctness score.\n", + "\n", + "Factual correctness quantifies the factual overlap between the generated answer and the ground truth answer. This is done using the concepts of:\n", + "\n", + "- TP (True Positive): Facts or statements that are present in both the ground truth and the generated answer.\n", + "- FP (False Positive): Facts or statements that are present in the generated answer but not in the ground truth.\n", + "- FN (False Negative): Facts or statements that are present in the ground truth but not in the generated answer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.AnswerCorrectness\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " \"reference_column\": [\"ground_truth\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Aspect Critic\n", + "\n", + "This is designed to assess submissions based on predefined aspects such as harmlessness and correctness. Additionally, users have the flexibility to define their own aspects for evaluating submissions according to their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not. This evaluation is performed using the ‘answer’ as input.\n", + "\n", + "Critiques within the LLM evaluators evaluate submissions based on the provided aspect. Ragas Critiques offers a range of predefined aspects like correctness, harmfulness, etc. Users can also define their own aspects for evaluating submissions based on their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.AspectCritic\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Noise Sensitivity\n", + "\n", + "This test is designed to evaluate the robustness of the RAG pipeline model against noise in the retrieved context. It works by checking how well the \"claims\" in the generated answer match up with the \"claims\" in the ground truth answer. If the generated answer contains \"claims\" from the contexts that the ground truth answer does not contain, those claims are considered incorrect. The score for each answer is the number of incorrect claims divided by the total number of claims. This *can* be interpreted as a measure of how sensitive the LLM is to \"noise\" in the context where \"noise\" is information that is relevant but should not be included in the answer since the ground truth answer does not contain it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.NoiseSensitivity\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " param_grid={\n", + " \"user_input_column\": [\"question\"],\n", + " \"response_column\": [\"rag_k5_gpt35_model_prediction\", \"rag_k5_gpt4o_model_prediction\"],\n", + " \"reference_column\": [\"ground_truth\"],\n", + " \"retrieved_contexts_column\": [\"retrieval_k5_model_prediction\"],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Generation quality\n", + "\n", + "In this section, we evaluate the alignment and relevance of generated responses to reference outputs within our retrieval-augmented generation (RAG) application. We use metrics that assess various quality dimensions of the generated responses, including semantic similarity, structural alignment, and phrasing overlap. Semantic similarity metrics compare embeddings of generated and reference text to capture deeper contextual alignment, while overlap and alignment measures quantify how well the phrasing and structure of generated responses match the intended outputs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Token Disparity\n", + "\n", + "This test assesses the difference in token counts between the reference texts (ground truth) and the answers generated by the RAG model. It helps evaluate how well the model's outputs align with the expected length and level of detail in the reference texts. A significant disparity in token counts could signal issues with generation quality, such as excessive verbosity or insufficient detail. Consistently low token counts in generated answers compared to references might suggest that the model’s outputs are incomplete or overly concise, missing important contextual information." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.TokenDisparity\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### ROUGE Score\n", + "\n", + "This test evaluates the quality of answers generated by the RAG model by measuring overlaps in n-grams, word sequences, and word pairs between the model output and the reference (ground truth) text. ROUGE, short for Recall-Oriented Understudy for Gisting Evaluation, assesses both precision and recall, providing a balanced view of how well the generated response captures the reference content. ROUGE precision measures the proportion of n-grams in the generated text that match the reference, highlighting relevance and conciseness, while ROUGE recall assesses the proportion of reference n-grams present in the generated text, indicating completeness and thoroughness. \n", + "\n", + "Low precision scores might reveal that the generated text includes redundant or irrelevant information, while low recall scores suggest omissions of essential details from the reference. Consistently low ROUGE scores could indicate poor overall alignment with the ground truth, suggesting the model may be missing key content or failing to capture the intended meaning." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.RougeScore\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + " params={\n", + " \"metric\": \"rouge-1\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### BLEU Score\n", + "\n", + "The BLEU Score test evaluates the quality of answers generated by the RAG model by measuring n-gram overlap between the generated text and the reference (ground truth) text, with a specific focus on exact precision in phrasing. While ROUGE precision also assesses overlap, BLEU differs in two main ways: first, it applies a geometric average across multiple n-gram levels, capturing precise phrase alignment, and second, it includes a brevity penalty to prevent overly short outputs from inflating scores artificially. This added precision focus is valuable in RAG applications where strict adherence to reference language is essential, as BLEU emphasizes the match to exact phrasing. In contrast, ROUGE precision evaluates general content overlap without penalizing brevity, offering a broader sense of content alignment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.BleuScore\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### BERT Score\n", + "\n", + "This test evaluates the quality of the RAG generated answers using BERT embeddings to measure precision, recall, and F1 scores based on semantic similarity, rather than exact n-gram matches as in BLEU and ROUGE. This approach captures contextual meaning, making it valuable when wording differs but the intended message closely aligns with the reference. In RAG applications, the BERT score is especially useful for ensuring that generated answers convey the reference text’s meaning, even if phrasing varies. Consistently low scores indicate a lack of semantic alignment, suggesting the model may miss or misrepresent key content. Low precision may reflect irrelevant or redundant details, while low recall can indicate omissions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.BertScore\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### METEOR Score\n", + "\n", + "This test evaluates the quality of the generated answers by measuring alignment with the ground truth, emphasizing both accuracy and fluency. Unlike BLEU and ROUGE, which focus on n-gram matches, METEOR combines precision, recall, synonym matching, and word order, focusing at how well the generated text conveys meaning and reads naturally. This metric is especially useful for RAG applications where sentence structure and natural flow are crucial for clear communication. Lower scores may suggest alignment issues, indicating that the answers may lack fluency or key content. Discrepancies in word order or high fragmentation penalties can reveal problems with how the model constructs sentences, potentially affecting readability." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.MeteorScore\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Bias and Toxicity\n", + "\n", + "In this section, we use metrics like Toxicity Score and Regard Score to evaluate both the generated responses and the ground truth. These tests helps us detect any harmful, offensive, or inappropriate language and evaluate the level of bias and neutrality enabling us to assess and mitigate potential biases in both the model's responses and the original dataset." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Toxicity Score\n", + "\n", + "This test measures the level of harmful or offensive content in the generated answers. The test uses a preloaded toxicity detection tool from Hugging Face, which identifies language that may be inappropriate, aggressive, or derogatory. High toxicity scores indicate potentially toxic content, while consistently elevated scores across multiple outputs may signal underlying issues in the model’s generation process that require attention to prevent the spread of harmful language." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ToxicityScore\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Regard Score\n", + "\n", + "This test evaluates the sentiment and perceived regard—categorized as positive, negative, neutral, or other—in answers generated by the RAG model. This is important for identifying any biases or sentiment tendencies in responses, ensuring that generated answers are balanced and appropriate for the context. The uses a preloaded regard evaluation tool from Hugging Face to compute scores for each response. High skewness in regard scores, especially if the generated responses consistently diverge from expected sentiments in the reference texts, may reveal biases in the model’s generation, such as overly positive or negative tones where neutrality is expected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.RegardScore\",\n", + " input_grid={\n", + " \"dataset\": [vm_test_ds],\n", + " \"model\": [vm_rag_k5_gpt35_model, vm_rag_k5_gpt4o_model],\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Conclusion\n", + "\n", + "In this notebook, we have seen how we can use LangChain and ValidMind together to build, evaluate and document a simple RAG Model as its developed. This is a great example of the interactive development experience that ValidMind is designed to support. We can quickly iterate on our model and document as we go... We have seen how ValidMind supports non-traditional \"models\" using a functional interface and how we can build pipelines of many models to support complex GenAI workflows.\n", + "\n", + "This is still a work in progress and we are actively developing new tests to support more advanced GenAI workflows. We are also keeping an eye on the most popular GenAI models and libraries to explore direct integrations. Stay tuned for more updates and new features in this area!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip show validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "id": "copyright-09e315440ca84258abe1aaefaca3a3d0", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/notebooks/use_cases/nlp_and_llm/rag_documentation_demo.ipynb b/notebooks/use_cases/nlp_and_llm/rag_documentation_demo.ipynb index ca868c641..aac1876d5 100644 --- a/notebooks/use_cases/nlp_and_llm/rag_documentation_demo.ipynb +++ b/notebooks/use_cases/nlp_and_llm/rag_documentation_demo.ipynb @@ -1,1691 +1,1692 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# RAG Model Documentation Demo\n", - "\n", - "In this notebook, we are going to implement a simple RAG Model for automating the process of answering RFP questions using GenAI. We will see how we can initialize an embedding model, a retrieval model and a generator model with LangChain components and use them within the ValidMind Library to run tests against them. Finally, we will see how we can put them together in a Pipeline and run that to get e2e results and run tests against that." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1__) \n", - " - [Before you begin](#toc1_1__) \n", - " - [New to ValidMind?](#toc1_2__) \n", - " - [Key concepts](#toc1_3__) \n", - "- [Setting up](#toc2__) \n", - " - [Initialize the ValidMind Library](#toc2_1__) \n", - " - [Register sample model](#toc2_1_1__) \n", - " - [Apply documentation template](#toc2_1_2__) \n", - " - [Get your code snippet](#toc2_1_3__) \n", - "- [Read Open AI API Key](#toc3__) \n", - "- [Dataset Loader](#toc4__) \n", - "- [Data validation](#toc5__) \n", - " - [Duplicates](#toc5_1__) \n", - " - [Stop Words](#toc5_2__) \n", - " - [Punctuations](#toc5_3__) \n", - " - [Common Words](#toc5_4__) \n", - " - [Language Detection](#toc5_5__) \n", - " - [Toxicity Score](#toc5_6__) \n", - " - [Polarity and Subjectivity](#toc5_7__) \n", - " - [Sentiment](#toc5_8__) \n", - " - [Assign Predictions](#toc5_9__) \n", - " - [Run tests](#toc5_10__) \n", - " - [Generate embeddings for the Train Set](#toc5_11__) \n", - " - [Insert embeddings and questions into Vector DB](#toc5_12__) \n", - "- [Prompt Evaluation](#toc6__) \n", - "- [RAGAS evaluation](#toc7__) \n", - " - [Semantic Similarity](#toc7_1__) \n", - " - [Context Entity Recall](#toc7_2__) \n", - " - [Context Precision](#toc7_3__) \n", - " - [Context Precision Without Reference](#toc7_4__) \n", - " - [Faithfulness](#toc7_5__) \n", - " - [Response Relevancy](#toc7_6__) \n", - " - [Context Recall](#toc7_7__) \n", - " - [Answer Correctness](#toc7_8__) \n", - " - [Aspect Critic](#toc7_9__) \n", - " - [Noise Sensitivity](#toc7_10__) \n", - "- [Generation quality](#toc8__) \n", - " - [Token Disparity](#toc8_1__) \n", - " - [ROUGE Score](#toc8_2__) \n", - " - [BLEU Score](#toc8_3__) \n", - " - [BERT Score](#toc8_4__) \n", - " - [METEOR Score](#toc8_5__) \n", - "- [Bias and Toxicity](#toc9__) \n", - " - [Toxicity Score](#toc9_1__) \n", - " - [Regard Score](#toc9_2__) \n", - "- [Upgrade ValidMind](#toc10__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "- **FunctionModels**: ValidMind offers support for creating `VMModel` instances from Python functions. This enables us to support any \"model\" by simply using the provided function as the model's `predict` method.\n", - "- **PipelineModels**: ValidMind models (`VMModel` instances) of any type can be piped together to create a model pipeline. This allows model components to be created and tested/documented independently, and then combined into a single model for end-to-end testing and documentation. We use the `|` operator to pipe models together.\n", - "- **RAG**: RAG stands for Retrieval Augmented Generation and refers to a wide range of GenAI applications where some form of retrieval is used to add context to the prompt so that the LLM that generates content can refer to it when creating its output. In this notebook, we are going to implement a simple RAG setup using LangChain components." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Prerequisites\n", - "\n", - "Let's go ahead and install the `validmind` library if its not already installed... Then we can install the `qdrant-client` library for our vector store and `langchain` for everything else:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q qdrant-client langchain langchain-openai sentencepiece" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Gen AI RAG`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
Can't select this template?\n", - "

\n", - "Your organization administrators may need to add it to your template library:\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " # api_host=\"...\",\n", - " # api_key=\"...\",\n", - " # api_secret=\"...\",\n", - " # model=\"...\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Read Open AI API Key\n", - "\n", - "We will need to have an OpenAI API key to be able to use their `text-embedding-3-small` model for our embeddings, `gpt-3.5-turbo` model for our generator and `gpt-4o` model for our LLM-as-Judge tests. If you don't have an OpenAI API key, you can get one by signing up at [OpenAI](https://platform.openai.com/signup). Then you can create a `.env` file in the root of your project and the following cell will load it from there. Alternatively, you can just uncomment the line below to directly set the key (not recommended for security reasons)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# load openai api key\n", - "import os\n", - "\n", - "import dotenv\n", - "import nltk\n", - "\n", - "dotenv.load_dotenv()\n", - "nltk.download('stopwords')\n", - "nltk.download('punkt_tab')\n", - "\n", - "# os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n", - "\n", - "if not \"OPENAI_API_KEY\" in os.environ:\n", - " raise ValueError(\"OPENAI_API_KEY is not set\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Dataset Loader\n", - "\n", - "Great, now that we have all of our dependencies installed, the ValidMind Library initialized and connected to our model and our OpenAI API key setup, we can go ahead and load our datasets. We will use the synthetic `RFP` dataset included with ValidMind for this notebook. This dataset contains a variety of RFP questions and ground truth answers that we can use both as the source where our Retriever will search for similar question-answer pairs as well as our test set for evaluating the performance of our RAG model. To do this, we just have to load it and call the preprocess function to get a split of the data into train and test sets." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Import the sample dataset from the library\n", - "from validmind.datasets.llm.rag import rfp\n", - "\n", - "raw_df = rfp.load_data()\n", - "train_df, test_df = rfp.preprocess(raw_df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_train_ds = vm.init_dataset(\n", - " train_df,\n", - " text_column=\"question\",\n", - " target_column=\"ground_truth\",\n", - ")\n", - "\n", - "vm_test_ds = vm.init_dataset(\n", - " test_df,\n", - " text_column=\"question\",\n", - " target_column=\"ground_truth\",\n", - ")\n", - "\n", - "vm_test_ds.df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Data validation\n", - "\n", - "Now that we have loaded our dataset, we can go ahead and run some data validation tests right away to start assessing and documenting the quality of our data. Since we are using a text dataset, we can use ValidMind's built-in array of text data quality tests to check that things like number of duplicates, missing values, and other common text data issues are not present in our dataset. We can also run some tests to check the sentiment and toxicity of our data." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Duplicates\n", - "\n", - "First, let's check for duplicates in our dataset. We can use the `validmind.data_validation.Duplicates` test and pass our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import run_test\n", - "\n", - "run_test(\n", - " test_id=\"validmind.data_validation.Duplicates\",\n", - " inputs={\"dataset\": vm_train_ds},\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Stop Words\n", - "\n", - "Next, let's check for stop words in our dataset. We can use the `validmind.data_validation.StopWords` test and pass our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.StopWords\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Punctuations\n", - "\n", - "Next, let's check for punctuations in our dataset. We can use the `validmind.data_validation.Punctuations` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.Punctuations\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Common Words\n", - "\n", - "Next, let's check for common words in our dataset. We can use the `validmind.data_validation.CommonWord` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.CommonWords\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Language Detection\n", - "\n", - "For documentation purposes, we can detect and log the languages used in the dataset with the `validmind.data_validation.LanguageDetection` test:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " test_id=\"validmind.data_validation.nlp.LanguageDetection\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Toxicity Score\n", - "\n", - "Now, let's go ahead and run the `validmind.data_validation.nlp.Toxicity` test to compute a toxicity score for our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.data_validation.nlp.Toxicity\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Polarity and Subjectivity\n", - "\n", - "We can also run the `validmind.data_validation.nlp.PolarityAndSubjectivity` test to compute the polarity and subjectivity of our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.data_validation.nlp.PolarityAndSubjectivity\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Sentiment\n", - "\n", - "Finally, we can run the `validmind.data_validation.nlp.Sentiment` test to plot the sentiment of our dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.data_validation.nlp.Sentiment\",\n", - " inputs={\n", - " \"dataset\": vm_train_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Embedding Model\n", - "\n", - "Now that we have our dataset loaded and have run some data validation tests to assess and document the quality of our data, we can go ahead and initialize our embedding model. We will use the `text-embedding-3-small` model from OpenAI for this purpose wrapped in the `OpenAIEmbeddings` class from LangChain. This model will be used to \"embed\" our questions both for inserting the question-answer pairs from the \"train\" set into the vector store and for embedding the question from inputs when making predictions with our RAG model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_openai import OpenAIEmbeddings\n", - "\n", - "embedding_client = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", - "\n", - "\n", - "def embed(input):\n", - " \"\"\"Returns a text embedding for the given text\"\"\"\n", - " return embedding_client.embed_query(input[\"question\"])\n", - "\n", - "\n", - "vm_embedder = vm.init_model(input_id=\"embedding_model\", predict_fn=embed)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "What we have done here is to initialize the `OpenAIEmbeddings` class so it uses OpenAI's `text-embedding-3-small` model. We then created an `embed` function that takes in an `input` dictionary and uses the `embed_query` method of the embedding client to compute the embeddings of the `question`. We use an `embed` function since that is how ValidMind supports any custom model. We will use this strategy for the retrieval and generator models as well but you could also use, say, a HuggingFace model directly. See the documentation for more information on which model types are directly supported - [ValidMind Documentation](https://docs.validmind.ai/validmind/validmind.html)... Finally, we use the `init_model` function from the ValidMind Library to create a `VMModel` object that can be used in ValidMind tests. This also logs the model to our model documentation and any test that uses the model will be linked to the logged model and its metadata." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Assign Predictions\n", - "\n", - "To precompute the embeddings for our test set, we can call the `assign_predictions` method of our `vm_test_ds` object we created above. This will compute the embeddings for each question in the test set and store them in the a special prediction column of the test set thats linked to our `vm_embedder` model. This will allow us to use these embeddings later when we run tests against our embedding model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds.assign_predictions(vm_embedder)\n", - "print(vm_test_ds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Run tests\n", - "\n", - "Now that everything is setup for the embedding model, we can go ahead and run some tests to assess and document the quality of our embeddings. We will use the `validmind.model_validation.embeddings.*` tests to compute a variety of metrics against our model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.StabilityAnalysisRandomNoise\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - " params={\"probability\": 0.3},\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.StabilityAnalysisSynonyms\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - " params={\"probability\": 0.3},\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.StabilityAnalysisTranslation\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - " params={\n", - " \"source_lang\": \"en\",\n", - " \"target_lang\": \"fr\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.CosineSimilarityHeatmap\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "run_test(\n", - " \"validmind.model_validation.embeddings.CosineSimilarityDistribution\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.EuclideanDistanceHeatmap\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.PCAComponentsPairwisePlots\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - " params={\"n_components\": 3},\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.embeddings.TSNEComponentsPairwisePlots\",\n", - " inputs={\n", - " \"model\": vm_embedder,\n", - " \"dataset\": vm_test_ds,\n", - " },\n", - " params={\"n_components\": 3, \"perplexity\": 20},\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup Vector Store\n", - "\n", - "Great, so now that we have assessed our embedding model and verified that it is performing well, we can go ahead and use it to compute embeddings for our question-answer pairs in the \"train\" set. We will then use these embeddings to insert the question-answer pairs into a vector store. We will use an in-memory `qdrant` vector database for demo purposes but any option would work just as well here. We will use the `QdrantClient` class from LangChain to interact with the vector store. This class will allow us to insert and search for embeddings in the vector store." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Generate embeddings for the Train Set\n", - "\n", - "We can use the same `assign_predictions` method from earlier except this time we will use the `vm_train_ds` object to compute the embeddings for the question-answer pairs in the \"train\" set." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_train_ds.assign_predictions(vm_embedder)\n", - "print(vm_train_ds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Insert embeddings and questions into Vector DB\n", - "\n", - "Now that we have computed the embeddings for our question-answer pairs in the \"train\" set, we can go ahead and insert them into the vector store:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.vectorstores import Qdrant\n", - "from langchain_openai import OpenAIEmbeddings\n", - "from langchain_community.document_loaders import DataFrameLoader\n", - "\n", - "# load documents from dataframe\n", - "loader = DataFrameLoader(train_df, page_content_column=\"question\")\n", - "docs = loader.load()\n", - "# choose model using embedding client\n", - "embedding_client = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", - "\n", - "# setup vector datastore\n", - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embedding_client,\n", - " location=\":memory:\", # Local mode with in-memory storage only\n", - " collection_name=\"rfp_rag_collection\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Retrieval Model\n", - "\n", - "Now that we have an embedding model and a vector database setup and loaded with our data, we need a Retrieval model that can search for similar question-answer pairs for a given input question. Once created, we can initialize this as a ValidMind model and `assign_predictions` to it just like our embedding model." - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "def retrieve(input):\n", - " contexts = []\n", - "\n", - " for result in qdrant.similarity_search_with_score(input[\"question\"]):\n", - " document, score = result\n", - " context = f\"Q: {document.page_content}\\n\"\n", - " context += f\"A: {document.metadata['ground_truth']}\\n\"\n", - "\n", - " contexts.append(context)\n", - "\n", - " return contexts\n", - "\n", - "\n", - "vm_retriever = vm.init_model(input_id=\"retrieval_model\", predict_fn=retrieve)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds.assign_predictions(model=vm_retriever)\n", - "print(vm_test_ds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Generation Model\n", - "\n", - "As the final piece of this simple RAG pipeline, we can create and initialize a generation model that will use the retrieved context to generate an answer to the input question. We will use the `gpt-3.5-turbo` model from OpenAI." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from openai import OpenAI\n", - "\n", - "from validmind.models import Prompt\n", - "\n", - "\n", - "system_prompt = \"\"\"\n", - "You are an expert RFP AI assistant.\n", - "You are tasked with answering new RFP questions based on existing RFP questions and answers.\n", - "You will be provided with the existing RFP questions and answer pairs that are the most relevant to the new RFP question.\n", - "After that you will be provided with a new RFP question.\n", - "You will generate an answer and respond only with the answer.\n", - "Ignore your pre-existing knowledge and answer the question based on the provided context.\n", - "\"\"\".strip()\n", - "\n", - "openai_client = OpenAI()\n", - "\n", - "\n", - "def generate(input):\n", - " response = openai_client.chat.completions.create(\n", - " model=\"gpt-3.5-turbo\",\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_model\"])},\n", - " {\"role\": \"user\", \"content\": input[\"question\"]},\n", - " ],\n", - " )\n", - "\n", - " return response.choices[0].message.content\n", - "\n", - "\n", - "vm_generator = vm.init_model(\n", - " input_id=\"generation_model\",\n", - " predict_fn=generate,\n", - " prompt=Prompt(template=system_prompt),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's test it out real quick:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "\n", - "vm_generator.predict(\n", - " pd.DataFrame(\n", - " {\"retrieval_model\": [[\"My name is anil\"]], \"question\": [\"what is my name\"]}\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Prompt Evaluation\n", - "\n", - "Now that we have our generator model initialized, we can run some LLM-as-Judge tests to evaluate the system prompt. This will allow us to get an initial sense of how well the prompt meets a few best practices for prompt engineering. These tests use an LLM to rate the prompt on a scale of 1-10 against the following criteria:\n", - "\n", - "- **Examplar Bias**: When using multi-shot prompting, does the prompt contain an unbiased distribution of examples?\n", - "- **Delimitation**: When using complex prompts containing examples, contextual information, or other elements, is the prompt formatted in such a way that each element is clearly separated?\n", - "- **Clarity**: How clearly the prompt states the task.\n", - "- **Conciseness**: How succinctly the prompt states the task.\n", - "- **Instruction Framing**: Whether the prompt contains negative instructions.\n", - "- **Specificity**: How specific the prompt defines the task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Bias\",\n", - " inputs={\n", - " \"model\": vm_generator,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Clarity\",\n", - " inputs={\n", - " \"model\": vm_generator,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Conciseness\",\n", - " inputs={\n", - " \"model\": vm_generator,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Delimitation\",\n", - " inputs={\n", - " \"model\": vm_generator,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.NegativeInstruction\",\n", - " inputs={\n", - " \"model\": vm_generator,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.prompt_validation.Specificity\",\n", - " inputs={\n", - " \"model\": vm_generator,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup RAG Pipeline Model\n", - "\n", - "Now that we have all of our individual \"component\" models setup and initialized we need some way to put them all together in a single \"pipeline\". We can use the `PipelineModel` class to do this. This ValidMind model type simply wraps any number of other ValidMind models and runs them in sequence. We can use a pipe(`|`) operator - in Python this is normally an `or` operator but we have overloaded it for easy pipeline creation - to chain together our models. We can then initialize this pipeline model and assign predictions to it just like any other model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_rag_model = vm.init_model(vm_retriever | vm_generator, input_id=\"rag_model\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can `assign_predictions` to the pipeline model just like we did with the individual models. This will run the pipeline on the test set and store the results in the test set for later use." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds.assign_predictions(model=vm_rag_model)\n", - "print(vm_test_ds)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds._df.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Run tests" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## RAGAS evaluation\n", - "\n", - "Let's go ahead and run some of our new RAG tests against our model...\n", - "\n", - "> Note: these tests are still being developed and are not yet in a stable state. We are using advanced tests here that use LLM-as-Judge and other strategies to assess things like the relevancy of the retrieved context to the input question and the correctness of the generated answer when compared to the ground truth. There is more to come in this area so stay tuned!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import warnings\n", - "\n", - "warnings.filterwarnings(\"ignore\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Semantic Similarity\n", - "\n", - "The concept of Answer Semantic Similarity pertains to the assessment of the semantic resemblance between the generated answer and the ground truth. This evaluation is based on the ground truth and the answer, with values falling within the range of 0 to 1. A higher score signifies a better alignment between the generated answer and the ground truth.\n", - "\n", - "Measuring the semantic similarity between answers can offer valuable insights into the quality of the generated response. This evaluation utilizes a cross-encoder model to calculate the semantic similarity score." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.SemanticSimilarity\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"response_column\": \"rag_model_prediction\",\n", - " \"reference_column\": \"ground_truth\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Entity Recall\n", - "\n", - "This test gives the measure of recall of the retrieved context, based on the number of entities present in both ground_truths and contexts relative to the number of entities present in the ground_truths alone. Simply put, it is a measure of what fraction of entities are recalled from ground_truths. This test is useful in fact-based use cases like tourism help desk, historical QA, etc. This test can help evaluate the retrieval mechanism for entities, based on comparison with entities present in ground_truths, because in cases where entities matter, we need the contexts which cover them." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextEntityRecall\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"reference_column\": \"ground_truth\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Precision\n", - "\n", - "Context Precision is a test that evaluates whether all of the ground-truth relevant items present in the contexts are ranked higher or not. Ideally all the relevant chunks must appear at the top ranks. This test is computed using the question, ground_truth and the contexts, with values ranging between 0 and 1, where higher scores indicate better precision." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextPrecision\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " \"reference_column\": \"ground_truth\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Precision Without Reference\n", - "\n", - "This test evaluates whether retrieved contexts align well with the expected response for a given user input, without requiring a ground-truth reference. This test assesses the relevance of each retrieved context chunk by comparing it directly to the response." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextPrecisionWithoutReference\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " \"response_column\": \"rag_model_prediction\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Faithfulness\n", - "\n", - "This measures the factual consistency of the generated answer against the given context. It is calculated from answer and retrieved context. The answer is scaled to (0,1) range. Higher the better.\n", - "\n", - "The generated answer is regarded as faithful if all the claims that are made in the answer can be inferred from the given context. To calculate this a set of claims from the generated answer is first identified. Then each one of these claims are cross checked with given context to determine if it can be inferred from given context or not." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.Faithfulness\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"response_column\": \"rag_model_prediction\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Response Relevancy\n", - "\n", - "The Response Relevancy test, focuses on assessing how pertinent the generated answer is to the given prompt. A lower score is assigned to answers that are incomplete or contain redundant information and higher scores indicate better relevancy. This test is computed using the question, the context and the answer.\n", - "\n", - "The Response Relevancy is defined as the mean cosine similartiy of the original question to a number of artifical questions, which where generated (reverse engineered) based on the answer.\n", - "\n", - "Please note, that eventhough in practice the score will range between 0 and 1 most of the time, this is not mathematically guranteed, due to the nature of the cosine similarity ranging from -1 to 1.\n", - "\n", - "> Note: This is a reference free test. If you’re looking to compare ground truth answer with generated answer refer to Answer Correctness.\n", - "\n", - "An answer is deemed relevant when it directly and appropriately addresses the original question. Importantly, our assessment of answer relevance does not consider factuality but instead penalizes cases where the answer lacks completeness or contains redundant details. To calculate this score, the LLM is prompted to generate an appropriate question for the generated answer multiple times, and the mean cosine similarity between these generated questions and the original question is measured. The underlying idea is that if the generated answer accurately addresses the initial question, the LLM should be able to generate questions from the answer that align with the original question." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ResponseRelevancy\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"response_column\": \"rag_model_prediction\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Context Recall\n", - "\n", - "Context recall measures the extent to which the retrieved context aligns with the annotated answer, treated as the ground truth. It is computed based on the ground truth and the retrieved context, and the values range between 0 and 1, with higher values indicating better performance.\n", - "\n", - "To estimate context recall from the ground truth answer, each sentence in the ground truth answer is analyzed to determine whether it can be attributed to the retrieved context or not. In an ideal scenario, all sentences in the ground truth answer should be attributable to the retrieved context." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.ContextRecall\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " \"reference_column\": \"ground_truth\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Answer Correctness\n", - "\n", - "The assessment of Answer Correctness involves gauging the accuracy of the generated answer when compared to the ground truth. This evaluation relies on the ground truth and the answer, with scores ranging from 0 to 1. A higher score indicates a closer alignment between the generated answer and the ground truth, signifying better correctness.\n", - "\n", - "Answer correctness encompasses two critical aspects: semantic similarity between the generated answer and the ground truth, as well as factual similarity. These aspects are combined using a weighted scheme to formulate the answer correctness score.\n", - "\n", - "Factual correctness quantifies the factual overlap between the generated answer and the ground truth answer. This is done using the concepts of:\n", - "\n", - "- TP (True Positive): Facts or statements that are present in both the ground truth and the generated answer.\n", - "- FP (False Positive): Facts or statements that are present in the generated answer but not in the ground truth.\n", - "- FN (False Negative): Facts or statements that are present in the ground truth but not in the generated answer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.AnswerCorrectness\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"response_column\": \"rag_model_prediction\",\n", - " \"reference_column\": \"ground_truth\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Aspect Critic\n", - "\n", - "This is designed to assess submissions based on predefined aspects such as harmlessness and correctness. Additionally, users have the flexibility to define their own aspects for evaluating submissions according to their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not. This evaluation is performed using the ‘answer’ as input.\n", - "\n", - "Critiques within the LLM evaluators evaluate submissions based on the provided aspect. Ragas Critiques offers a range of predefined aspects like correctness, harmfulness, etc. Users can also define their own aspects for evaluating submissions based on their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.AspectCritic\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"response_column\": \"rag_model_prediction\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Noise Sensitivity\n", - "\n", - "This test is designed to evaluate the robustness of the RAG pipeline model against noise in the retrieved context. It works by checking how well the \"claims\" in the generated answer match up with the \"claims\" in the ground truth answer. If the generated answer contains \"claims\" from the contexts that the ground truth answer does not contain, those claims are considered incorrect. The score for each answer is the number of incorrect claims divided by the total number of claims. This *can* be interpreted as a measure of how sensitive the LLM is to \"noise\" in the context where \"noise\" is information that is relevant but should not be included in the answer since the ground truth answer does not contain it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ragas.NoiseSensitivity\",\n", - " inputs={\"dataset\": vm_test_ds},\n", - " params={\n", - " \"user_input_column\": \"question\",\n", - " \"response_column\": \"rag_model_prediction\",\n", - " \"reference_column\": \"ground_truth\",\n", - " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Generation quality\n", - "\n", - "In this section, we evaluate the alignment and relevance of generated responses to reference outputs within our retrieval-augmented generation (RAG) application. We use metrics that assess various quality dimensions of the generated responses, including semantic similarity, structural alignment, and phrasing overlap. Semantic similarity metrics compare embeddings of generated and reference text to capture deeper contextual alignment, while overlap and alignment measures quantify how well the phrasing and structure of generated responses match the intended outputs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Token Disparity\n", - "\n", - "This test assesses the difference in token counts between the reference texts (ground truth) and the answers generated by the RAG model. It helps evaluate how well the model's outputs align with the expected length and level of detail in the reference texts. A significant disparity in token counts could signal issues with generation quality, such as excessive verbosity or insufficient detail. Consistently low token counts in generated answers compared to references might suggest that the model’s outputs are incomplete or overly concise, missing important contextual information." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.TokenDisparity\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### ROUGE Score\n", - "\n", - "This test evaluates the quality of answers generated by the RAG model by measuring overlaps in n-grams, word sequences, and word pairs between the model output and the reference (ground truth) text. ROUGE, short for Recall-Oriented Understudy for Gisting Evaluation, assesses both precision and recall, providing a balanced view of how well the generated response captures the reference content. ROUGE precision measures the proportion of n-grams in the generated text that match the reference, highlighting relevance and conciseness, while ROUGE recall assesses the proportion of reference n-grams present in the generated text, indicating completeness and thoroughness. \n", - "\n", - "Low precision scores might reveal that the generated text includes redundant or irrelevant information, while low recall scores suggest omissions of essential details from the reference. Consistently low ROUGE scores could indicate poor overall alignment with the ground truth, suggesting the model may be missing key content or failing to capture the intended meaning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.RougeScore\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - " params={\n", - " \"metric\": \"rouge-1\",\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### BLEU Score\n", - "\n", - "The BLEU Score test evaluates the quality of answers generated by the RAG model by measuring n-gram overlap between the generated text and the reference (ground truth) text, with a specific focus on exact precision in phrasing. While ROUGE precision also assesses overlap, BLEU differs in two main ways: first, it applies a geometric average across multiple n-gram levels, capturing precise phrase alignment, and second, it includes a brevity penalty to prevent overly short outputs from inflating scores artificially. This added precision focus is valuable in RAG applications where strict adherence to reference language is essential, as BLEU emphasizes the match to exact phrasing. In contrast, ROUGE precision evaluates general content overlap without penalizing brevity, offering a broader sense of content alignment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.BleuScore\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### BERT Score\n", - "\n", - "This test evaluates the quality of the RAG generated answers using BERT embeddings to measure precision, recall, and F1 scores based on semantic similarity, rather than exact n-gram matches as in BLEU and ROUGE. This approach captures contextual meaning, making it valuable when wording differs but the intended message closely aligns with the reference. In RAG applications, the BERT score is especially useful for ensuring that generated answers convey the reference text’s meaning, even if phrasing varies. Consistently low scores indicate a lack of semantic alignment, suggesting the model may miss or misrepresent key content. Low precision may reflect irrelevant or redundant details, while low recall can indicate omissions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.BertScore\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### METEOR Score\n", - "\n", - "This test evaluates the quality of the generated answers by measuring alignment with the ground truth, emphasizing both accuracy and fluency. Unlike BLEU and ROUGE, which focus on n-gram matches, METEOR combines precision, recall, synonym matching, and word order, focusing at how well the generated text conveys meaning and reads naturally. This metric is especially useful for RAG applications where sentence structure and natural flow are crucial for clear communication. Lower scores may suggest alignment issues, indicating that the answers may lack fluency or key content. Discrepancies in word order or high fragmentation penalties can reveal problems with how the model constructs sentences, potentially affecting readability." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.MeteorScore\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Bias and Toxicity\n", - "\n", - "In this section, we use metrics like Toxicity Score and Regard Score to evaluate both the generated responses and the ground truth. These tests helps us detect any harmful, offensive, or inappropriate language and evaluate the level of bias and neutrality enabling us to assess and mitigate potential biases in both the model's responses and the original dataset." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Toxicity Score\n", - "\n", - "This test measures the level of harmful or offensive content in the generated answers. The test uses a preloaded toxicity detection tool from Hugging Face, which identifies language that may be inappropriate, aggressive, or derogatory. High toxicity scores indicate potentially toxic content, while consistently elevated scores across multiple outputs may signal underlying issues in the model’s generation process that require attention to prevent the spread of harmful language." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.ToxicityScore\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Regard Score\n", - "\n", - "This test evaluates the sentiment and perceived regard—categorized as positive, negative, neutral, or other—in answers generated by the RAG model. This is important for identifying any biases or sentiment tendencies in responses, ensuring that generated answers are balanced and appropriate for the context. The uses a preloaded regard evaluation tool from Hugging Face to compute scores for each response. High skewness in regard scores, especially if the generated responses consistently diverge from expected sentiments in the reference texts, may reveal biases in the model’s generation, such as overly positive or negative tones where neutrality is expected." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run_test(\n", - " \"validmind.model_validation.RegardScore\",\n", - " inputs={\n", - " \"dataset\": vm_test_ds,\n", - " \"model\": vm_rag_model,\n", - " },\n", - ").log()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Conclusion\n", - "\n", - "In this notebook, we have seen how we can use LangChain and ValidMind together to build, evaluate and document a simple RAG Model as its developed. This is a great example of the interactive development experience that ValidMind is designed to support. We can quickly iterate on our model and document as we go... We have seen how ValidMind supports non-traditional \"models\" using a functional interface and how we can build pipelines of many models to support complex GenAI workflows.\n", - "\n", - "This is still a work in progress and we are actively developing new tests to support more advanced GenAI workflows. We are also keeping an eye on the most popular GenAI models and libraries to explore direct integrations. Stay tuned for more updates and new features in this area!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Upgrade ValidMind\n", - "\n", - "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", - "\n", - "Retrieve the information for the currently installed version of ValidMind:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip show validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", - "\n", - "```bash\n", - "%pip install --upgrade validmind\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may need to restart your kernel after running the upgrade package for changes to be applied." - ] - }, - { - "cell_type": "markdown", - "id": "copyright-397fa35a68a34dc38f5d84d797fb5331", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "validmind-py3.10", - "language": "python", - "name": "validmind-py3.10" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RAG Model Documentation Demo\n", + "\n", + "In this notebook, we are going to implement a simple RAG Model for automating the process of answering RFP questions using GenAI. We will see how we can initialize an embedding model, a retrieval model and a generator model with LangChain components and use them within the ValidMind Library to run tests against them. Finally, we will see how we can put them together in a Pipeline and run that to get e2e results and run tests against that." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1__) \n", + " - [Before you begin](#toc1_1__) \n", + " - [New to ValidMind?](#toc1_2__) \n", + " - [Key concepts](#toc1_3__) \n", + "- [Setting up](#toc2__) \n", + " - [Initialize the ValidMind Library](#toc2_1__) \n", + " - [Register sample model](#toc2_1_1__) \n", + " - [Apply documentation template](#toc2_1_2__) \n", + " - [Get your code snippet](#toc2_1_3__) \n", + "- [Read Open AI API Key](#toc3__) \n", + "- [Dataset Loader](#toc4__) \n", + "- [Data validation](#toc5__) \n", + " - [Duplicates](#toc5_1__) \n", + " - [Stop Words](#toc5_2__) \n", + " - [Punctuations](#toc5_3__) \n", + " - [Common Words](#toc5_4__) \n", + " - [Language Detection](#toc5_5__) \n", + " - [Toxicity Score](#toc5_6__) \n", + " - [Polarity and Subjectivity](#toc5_7__) \n", + " - [Sentiment](#toc5_8__) \n", + " - [Assign Predictions](#toc5_9__) \n", + " - [Run tests](#toc5_10__) \n", + " - [Generate embeddings for the Train Set](#toc5_11__) \n", + " - [Insert embeddings and questions into Vector DB](#toc5_12__) \n", + "- [Prompt Evaluation](#toc6__) \n", + "- [RAGAS evaluation](#toc7__) \n", + " - [Semantic Similarity](#toc7_1__) \n", + " - [Context Entity Recall](#toc7_2__) \n", + " - [Context Precision](#toc7_3__) \n", + " - [Context Precision Without Reference](#toc7_4__) \n", + " - [Faithfulness](#toc7_5__) \n", + " - [Response Relevancy](#toc7_6__) \n", + " - [Context Recall](#toc7_7__) \n", + " - [Answer Correctness](#toc7_8__) \n", + " - [Aspect Critic](#toc7_9__) \n", + " - [Noise Sensitivity](#toc7_10__) \n", + "- [Generation quality](#toc8__) \n", + " - [Token Disparity](#toc8_1__) \n", + " - [ROUGE Score](#toc8_2__) \n", + " - [BLEU Score](#toc8_3__) \n", + " - [BERT Score](#toc8_4__) \n", + " - [METEOR Score](#toc8_5__) \n", + "- [Bias and Toxicity](#toc9__) \n", + " - [Toxicity Score](#toc9_1__) \n", + " - [Regard Score](#toc9_2__) \n", + "- [Upgrade ValidMind](#toc10__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "- **FunctionModels**: ValidMind offers support for creating `VMModel` instances from Python functions. This enables us to support any \"model\" by simply using the provided function as the model's `predict` method.\n", + "- **PipelineModels**: ValidMind models (`VMModel` instances) of any type can be piped together to create a model pipeline. This allows model components to be created and tested/documented independently, and then combined into a single model for end-to-end testing and documentation. We use the `|` operator to pipe models together.\n", + "- **RAG**: RAG stands for Retrieval Augmented Generation and refers to a wide range of GenAI applications where some form of retrieval is used to add context to the prompt so that the LLM that generates content can refer to it when creating its output. In this notebook, we are going to implement a simple RAG setup using LangChain components." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Prerequisites\n", + "\n", + "Let's go ahead and install the `validmind` library if its not already installed... Then we can install the `qdrant-client` library for our vector store and `langchain` for everything else:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q qdrant-client langchain langchain-openai sentencepiece" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Gen AI RAG`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Can't select this template?\n", + "

\n", + "Your organization administrators may need to add it to your template library:\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Read Open AI API Key\n", + "\n", + "We will need to have an OpenAI API key to be able to use their `text-embedding-3-small` model for our embeddings, `gpt-3.5-turbo` model for our generator and `gpt-4o` model for our LLM-as-Judge tests. If you don't have an OpenAI API key, you can get one by signing up at [OpenAI](https://platform.openai.com/signup). Then you can create a `.env` file in the root of your project and the following cell will load it from there. Alternatively, you can just uncomment the line below to directly set the key (not recommended for security reasons)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load openai api key\n", + "import os\n", + "\n", + "import dotenv\n", + "import nltk\n", + "\n", + "dotenv.load_dotenv()\n", + "nltk.download('stopwords')\n", + "nltk.download('punkt_tab')\n", + "\n", + "# os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n", + "\n", + "if not \"OPENAI_API_KEY\" in os.environ:\n", + " raise ValueError(\"OPENAI_API_KEY is not set\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Dataset Loader\n", + "\n", + "Great, now that we have all of our dependencies installed, the ValidMind Library initialized and connected to our model and our OpenAI API key setup, we can go ahead and load our datasets. We will use the synthetic `RFP` dataset included with ValidMind for this notebook. This dataset contains a variety of RFP questions and ground truth answers that we can use both as the source where our Retriever will search for similar question-answer pairs as well as our test set for evaluating the performance of our RAG model. To do this, we just have to load it and call the preprocess function to get a split of the data into train and test sets." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the sample dataset from the library\n", + "from validmind.datasets.llm.rag import rfp\n", + "\n", + "raw_df = rfp.load_data()\n", + "train_df, test_df = rfp.preprocess(raw_df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds = vm.init_dataset(\n", + " train_df,\n", + " text_column=\"question\",\n", + " target_column=\"ground_truth\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " test_df,\n", + " text_column=\"question\",\n", + " target_column=\"ground_truth\",\n", + ")\n", + "\n", + "vm_test_ds.df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Data validation\n", + "\n", + "Now that we have loaded our dataset, we can go ahead and run some data validation tests right away to start assessing and documenting the quality of our data. Since we are using a text dataset, we can use ValidMind's built-in array of text data quality tests to check that things like number of duplicates, missing values, and other common text data issues are not present in our dataset. We can also run some tests to check the sentiment and toxicity of our data." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Duplicates\n", + "\n", + "First, let's check for duplicates in our dataset. We can use the `validmind.data_validation.Duplicates` test and pass our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.tests import run_test\n", + "\n", + "run_test(\n", + " test_id=\"validmind.data_validation.Duplicates\",\n", + " inputs={\"dataset\": vm_train_ds},\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Stop Words\n", + "\n", + "Next, let's check for stop words in our dataset. We can use the `validmind.data_validation.StopWords` test and pass our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.StopWords\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Punctuations\n", + "\n", + "Next, let's check for punctuations in our dataset. We can use the `validmind.data_validation.Punctuations` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.Punctuations\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Common Words\n", + "\n", + "Next, let's check for common words in our dataset. We can use the `validmind.data_validation.CommonWord` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.CommonWords\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Language Detection\n", + "\n", + "For documentation purposes, we can detect and log the languages used in the dataset with the `validmind.data_validation.LanguageDetection` test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " test_id=\"validmind.data_validation.nlp.LanguageDetection\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Toxicity Score\n", + "\n", + "Now, let's go ahead and run the `validmind.data_validation.nlp.Toxicity` test to compute a toxicity score for our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.data_validation.nlp.Toxicity\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Polarity and Subjectivity\n", + "\n", + "We can also run the `validmind.data_validation.nlp.PolarityAndSubjectivity` test to compute the polarity and subjectivity of our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.data_validation.nlp.PolarityAndSubjectivity\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Sentiment\n", + "\n", + "Finally, we can run the `validmind.data_validation.nlp.Sentiment` test to plot the sentiment of our dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.data_validation.nlp.Sentiment\",\n", + " inputs={\n", + " \"dataset\": vm_train_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Embedding Model\n", + "\n", + "Now that we have our dataset loaded and have run some data validation tests to assess and document the quality of our data, we can go ahead and initialize our embedding model. We will use the `text-embedding-3-small` model from OpenAI for this purpose wrapped in the `OpenAIEmbeddings` class from LangChain. This model will be used to \"embed\" our questions both for inserting the question-answer pairs from the \"train\" set into the vector store and for embedding the question from inputs when making predictions with our RAG model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import OpenAIEmbeddings\n", + "\n", + "embedding_client = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", + "\n", + "\n", + "def embed(input):\n", + " \"\"\"Returns a text embedding for the given text\"\"\"\n", + " return embedding_client.embed_query(input[\"question\"])\n", + "\n", + "\n", + "vm_embedder = vm.init_model(input_id=\"embedding_model\", predict_fn=embed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "What we have done here is to initialize the `OpenAIEmbeddings` class so it uses OpenAI's `text-embedding-3-small` model. We then created an `embed` function that takes in an `input` dictionary and uses the `embed_query` method of the embedding client to compute the embeddings of the `question`. We use an `embed` function since that is how ValidMind supports any custom model. We will use this strategy for the retrieval and generator models as well but you could also use, say, a HuggingFace model directly. See the documentation for more information on which model types are directly supported - [ValidMind Documentation](https://docs.validmind.ai/validmind/validmind.html)... Finally, we use the `init_model` function from the ValidMind Library to create a `VMModel` object that can be used in ValidMind tests. This also logs the model to our model documentation and any test that uses the model will be linked to the logged model and its metadata." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign Predictions\n", + "\n", + "To precompute the embeddings for our test set, we can call the `assign_predictions` method of our `vm_test_ds` object we created above. This will compute the embeddings for each question in the test set and store them in the a special prediction column of the test set thats linked to our `vm_embedder` model. This will allow us to use these embeddings later when we run tests against our embedding model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds.assign_predictions(vm_embedder)\n", + "print(vm_test_ds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run tests\n", + "\n", + "Now that everything is setup for the embedding model, we can go ahead and run some tests to assess and document the quality of our embeddings. We will use the `validmind.model_validation.embeddings.*` tests to compute a variety of metrics against our model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.StabilityAnalysisRandomNoise\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + " params={\"probability\": 0.3},\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.StabilityAnalysisSynonyms\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + " params={\"probability\": 0.3},\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.StabilityAnalysisTranslation\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + " params={\n", + " \"source_lang\": \"en\",\n", + " \"target_lang\": \"fr\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.CosineSimilarityHeatmap\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "run_test(\n", + " \"validmind.model_validation.embeddings.CosineSimilarityDistribution\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.EuclideanDistanceHeatmap\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.PCAComponentsPairwisePlots\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + " params={\"n_components\": 3},\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.embeddings.TSNEComponentsPairwisePlots\",\n", + " inputs={\n", + " \"model\": vm_embedder,\n", + " \"dataset\": vm_test_ds,\n", + " },\n", + " params={\"n_components\": 3, \"perplexity\": 20},\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup Vector Store\n", + "\n", + "Great, so now that we have assessed our embedding model and verified that it is performing well, we can go ahead and use it to compute embeddings for our question-answer pairs in the \"train\" set. We will then use these embeddings to insert the question-answer pairs into a vector store. We will use an in-memory `qdrant` vector database for demo purposes but any option would work just as well here. We will use the `QdrantClient` class from LangChain to interact with the vector store. This class will allow us to insert and search for embeddings in the vector store." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Generate embeddings for the Train Set\n", + "\n", + "We can use the same `assign_predictions` method from earlier except this time we will use the `vm_train_ds` object to compute the embeddings for the question-answer pairs in the \"train\" set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(vm_embedder)\n", + "print(vm_train_ds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Insert embeddings and questions into Vector DB\n", + "\n", + "Now that we have computed the embeddings for our question-answer pairs in the \"train\" set, we can go ahead and insert them into the vector store:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.vectorstores import Qdrant\n", + "from langchain_openai import OpenAIEmbeddings\n", + "from langchain_community.document_loaders import DataFrameLoader\n", + "\n", + "# load documents from dataframe\n", + "loader = DataFrameLoader(train_df, page_content_column=\"question\")\n", + "docs = loader.load()\n", + "# choose model using embedding client\n", + "embedding_client = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", + "\n", + "# setup vector datastore\n", + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embedding_client,\n", + " location=\":memory:\", # Local mode with in-memory storage only\n", + " collection_name=\"rfp_rag_collection\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Retrieval Model\n", + "\n", + "Now that we have an embedding model and a vector database setup and loaded with our data, we need a Retrieval model that can search for similar question-answer pairs for a given input question. Once created, we can initialize this as a ValidMind model and `assign_predictions` to it just like our embedding model." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "def retrieve(input):\n", + " contexts = []\n", + "\n", + " for result in qdrant.similarity_search_with_score(input[\"question\"]):\n", + " document, score = result\n", + " context = f\"Q: {document.page_content}\\n\"\n", + " context += f\"A: {document.metadata['ground_truth']}\\n\"\n", + "\n", + " contexts.append(context)\n", + "\n", + " return contexts\n", + "\n", + "\n", + "vm_retriever = vm.init_model(input_id=\"retrieval_model\", predict_fn=retrieve)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds.assign_predictions(model=vm_retriever)\n", + "print(vm_test_ds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generation Model\n", + "\n", + "As the final piece of this simple RAG pipeline, we can create and initialize a generation model that will use the retrieved context to generate an answer to the input question. We will use the `gpt-3.5-turbo` model from OpenAI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from openai import OpenAI\n", + "\n", + "from validmind.models import Prompt\n", + "\n", + "\n", + "system_prompt = \"\"\"\n", + "You are an expert RFP AI assistant.\n", + "You are tasked with answering new RFP questions based on existing RFP questions and answers.\n", + "You will be provided with the existing RFP questions and answer pairs that are the most relevant to the new RFP question.\n", + "After that you will be provided with a new RFP question.\n", + "You will generate an answer and respond only with the answer.\n", + "Ignore your pre-existing knowledge and answer the question based on the provided context.\n", + "\"\"\".strip()\n", + "\n", + "openai_client = OpenAI()\n", + "\n", + "\n", + "def generate(input):\n", + " response = openai_client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": \"\\n\\n\".join(input[\"retrieval_model\"])},\n", + " {\"role\": \"user\", \"content\": input[\"question\"]},\n", + " ],\n", + " )\n", + "\n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "vm_generator = vm.init_model(\n", + " input_id=\"generation_model\",\n", + " predict_fn=generate,\n", + " prompt=Prompt(template=system_prompt),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's test it out real quick:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "vm_generator.predict(\n", + " pd.DataFrame(\n", + " {\"retrieval_model\": [[\"My name is anil\"]], \"question\": [\"what is my name\"]}\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prompt Evaluation\n", + "\n", + "Now that we have our generator model initialized, we can run some LLM-as-Judge tests to evaluate the system prompt. This will allow us to get an initial sense of how well the prompt meets a few best practices for prompt engineering. These tests use an LLM to rate the prompt on a scale of 1-10 against the following criteria:\n", + "\n", + "- **Examplar Bias**: When using multi-shot prompting, does the prompt contain an unbiased distribution of examples?\n", + "- **Delimitation**: When using complex prompts containing examples, contextual information, or other elements, is the prompt formatted in such a way that each element is clearly separated?\n", + "- **Clarity**: How clearly the prompt states the task.\n", + "- **Conciseness**: How succinctly the prompt states the task.\n", + "- **Instruction Framing**: Whether the prompt contains negative instructions.\n", + "- **Specificity**: How specific the prompt defines the task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Bias\",\n", + " inputs={\n", + " \"model\": vm_generator,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Clarity\",\n", + " inputs={\n", + " \"model\": vm_generator,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Conciseness\",\n", + " inputs={\n", + " \"model\": vm_generator,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Delimitation\",\n", + " inputs={\n", + " \"model\": vm_generator,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.NegativeInstruction\",\n", + " inputs={\n", + " \"model\": vm_generator,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.prompt_validation.Specificity\",\n", + " inputs={\n", + " \"model\": vm_generator,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup RAG Pipeline Model\n", + "\n", + "Now that we have all of our individual \"component\" models setup and initialized we need some way to put them all together in a single \"pipeline\". We can use the `PipelineModel` class to do this. This ValidMind model type simply wraps any number of other ValidMind models and runs them in sequence. We can use a pipe(`|`) operator - in Python this is normally an `or` operator but we have overloaded it for easy pipeline creation - to chain together our models. We can then initialize this pipeline model and assign predictions to it just like any other model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_rag_model = vm.init_model(vm_retriever | vm_generator, input_id=\"rag_model\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can `assign_predictions` to the pipeline model just like we did with the individual models. This will run the pipeline on the test set and store the results in the test set for later use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds.assign_predictions(model=vm_rag_model)\n", + "print(vm_test_ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds._df.head(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run tests" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## RAGAS evaluation\n", + "\n", + "Let's go ahead and run some of our new RAG tests against our model...\n", + "\n", + "> Note: these tests are still being developed and are not yet in a stable state. We are using advanced tests here that use LLM-as-Judge and other strategies to assess things like the relevancy of the retrieved context to the input question and the correctness of the generated answer when compared to the ground truth. There is more to come in this area so stay tuned!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Semantic Similarity\n", + "\n", + "The concept of Answer Semantic Similarity pertains to the assessment of the semantic resemblance between the generated answer and the ground truth. This evaluation is based on the ground truth and the answer, with values falling within the range of 0 to 1. A higher score signifies a better alignment between the generated answer and the ground truth.\n", + "\n", + "Measuring the semantic similarity between answers can offer valuable insights into the quality of the generated response. This evaluation utilizes a cross-encoder model to calculate the semantic similarity score." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.SemanticSimilarity\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"response_column\": \"rag_model_prediction\",\n", + " \"reference_column\": \"ground_truth\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Entity Recall\n", + "\n", + "This test gives the measure of recall of the retrieved context, based on the number of entities present in both ground_truths and contexts relative to the number of entities present in the ground_truths alone. Simply put, it is a measure of what fraction of entities are recalled from ground_truths. This test is useful in fact-based use cases like tourism help desk, historical QA, etc. This test can help evaluate the retrieval mechanism for entities, based on comparison with entities present in ground_truths, because in cases where entities matter, we need the contexts which cover them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextEntityRecall\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"reference_column\": \"ground_truth\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Precision\n", + "\n", + "Context Precision is a test that evaluates whether all of the ground-truth relevant items present in the contexts are ranked higher or not. Ideally all the relevant chunks must appear at the top ranks. This test is computed using the question, ground_truth and the contexts, with values ranging between 0 and 1, where higher scores indicate better precision." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextPrecision\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " \"reference_column\": \"ground_truth\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Precision Without Reference\n", + "\n", + "This test evaluates whether retrieved contexts align well with the expected response for a given user input, without requiring a ground-truth reference. This test assesses the relevance of each retrieved context chunk by comparing it directly to the response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextPrecisionWithoutReference\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " \"response_column\": \"rag_model_prediction\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Faithfulness\n", + "\n", + "This measures the factual consistency of the generated answer against the given context. It is calculated from answer and retrieved context. The answer is scaled to (0,1) range. Higher the better.\n", + "\n", + "The generated answer is regarded as faithful if all the claims that are made in the answer can be inferred from the given context. To calculate this a set of claims from the generated answer is first identified. Then each one of these claims are cross checked with given context to determine if it can be inferred from given context or not." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.Faithfulness\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"response_column\": \"rag_model_prediction\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Response Relevancy\n", + "\n", + "The Response Relevancy test, focuses on assessing how pertinent the generated answer is to the given prompt. A lower score is assigned to answers that are incomplete or contain redundant information and higher scores indicate better relevancy. This test is computed using the question, the context and the answer.\n", + "\n", + "The Response Relevancy is defined as the mean cosine similartiy of the original question to a number of artifical questions, which where generated (reverse engineered) based on the answer.\n", + "\n", + "Please note, that eventhough in practice the score will range between 0 and 1 most of the time, this is not mathematically guranteed, due to the nature of the cosine similarity ranging from -1 to 1.\n", + "\n", + "> Note: This is a reference free test. If you’re looking to compare ground truth answer with generated answer refer to Answer Correctness.\n", + "\n", + "An answer is deemed relevant when it directly and appropriately addresses the original question. Importantly, our assessment of answer relevance does not consider factuality but instead penalizes cases where the answer lacks completeness or contains redundant details. To calculate this score, the LLM is prompted to generate an appropriate question for the generated answer multiple times, and the mean cosine similarity between these generated questions and the original question is measured. The underlying idea is that if the generated answer accurately addresses the initial question, the LLM should be able to generate questions from the answer that align with the original question." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ResponseRelevancy\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"response_column\": \"rag_model_prediction\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Context Recall\n", + "\n", + "Context recall measures the extent to which the retrieved context aligns with the annotated answer, treated as the ground truth. It is computed based on the ground truth and the retrieved context, and the values range between 0 and 1, with higher values indicating better performance.\n", + "\n", + "To estimate context recall from the ground truth answer, each sentence in the ground truth answer is analyzed to determine whether it can be attributed to the retrieved context or not. In an ideal scenario, all sentences in the ground truth answer should be attributable to the retrieved context." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.ContextRecall\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " \"reference_column\": \"ground_truth\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Answer Correctness\n", + "\n", + "The assessment of Answer Correctness involves gauging the accuracy of the generated answer when compared to the ground truth. This evaluation relies on the ground truth and the answer, with scores ranging from 0 to 1. A higher score indicates a closer alignment between the generated answer and the ground truth, signifying better correctness.\n", + "\n", + "Answer correctness encompasses two critical aspects: semantic similarity between the generated answer and the ground truth, as well as factual similarity. These aspects are combined using a weighted scheme to formulate the answer correctness score.\n", + "\n", + "Factual correctness quantifies the factual overlap between the generated answer and the ground truth answer. This is done using the concepts of:\n", + "\n", + "- TP (True Positive): Facts or statements that are present in both the ground truth and the generated answer.\n", + "- FP (False Positive): Facts or statements that are present in the generated answer but not in the ground truth.\n", + "- FN (False Negative): Facts or statements that are present in the ground truth but not in the generated answer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.AnswerCorrectness\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"response_column\": \"rag_model_prediction\",\n", + " \"reference_column\": \"ground_truth\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Aspect Critic\n", + "\n", + "This is designed to assess submissions based on predefined aspects such as harmlessness and correctness. Additionally, users have the flexibility to define their own aspects for evaluating submissions according to their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not. This evaluation is performed using the ‘answer’ as input.\n", + "\n", + "Critiques within the LLM evaluators evaluate submissions based on the provided aspect. Ragas Critiques offers a range of predefined aspects like correctness, harmfulness, etc. Users can also define their own aspects for evaluating submissions based on their specific criteria. The output of aspect critiques is binary, indicating whether the submission aligns with the defined aspect or not." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.AspectCritic\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"response_column\": \"rag_model_prediction\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Noise Sensitivity\n", + "\n", + "This test is designed to evaluate the robustness of the RAG pipeline model against noise in the retrieved context. It works by checking how well the \"claims\" in the generated answer match up with the \"claims\" in the ground truth answer. If the generated answer contains \"claims\" from the contexts that the ground truth answer does not contain, those claims are considered incorrect. The score for each answer is the number of incorrect claims divided by the total number of claims. This *can* be interpreted as a measure of how sensitive the LLM is to \"noise\" in the context where \"noise\" is information that is relevant but should not be included in the answer since the ground truth answer does not contain it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ragas.NoiseSensitivity\",\n", + " inputs={\"dataset\": vm_test_ds},\n", + " params={\n", + " \"user_input_column\": \"question\",\n", + " \"response_column\": \"rag_model_prediction\",\n", + " \"reference_column\": \"ground_truth\",\n", + " \"retrieved_contexts_column\": \"retrieval_model_prediction\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Generation quality\n", + "\n", + "In this section, we evaluate the alignment and relevance of generated responses to reference outputs within our retrieval-augmented generation (RAG) application. We use metrics that assess various quality dimensions of the generated responses, including semantic similarity, structural alignment, and phrasing overlap. Semantic similarity metrics compare embeddings of generated and reference text to capture deeper contextual alignment, while overlap and alignment measures quantify how well the phrasing and structure of generated responses match the intended outputs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Token Disparity\n", + "\n", + "This test assesses the difference in token counts between the reference texts (ground truth) and the answers generated by the RAG model. It helps evaluate how well the model's outputs align with the expected length and level of detail in the reference texts. A significant disparity in token counts could signal issues with generation quality, such as excessive verbosity or insufficient detail. Consistently low token counts in generated answers compared to references might suggest that the model’s outputs are incomplete or overly concise, missing important contextual information." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.TokenDisparity\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### ROUGE Score\n", + "\n", + "This test evaluates the quality of answers generated by the RAG model by measuring overlaps in n-grams, word sequences, and word pairs between the model output and the reference (ground truth) text. ROUGE, short for Recall-Oriented Understudy for Gisting Evaluation, assesses both precision and recall, providing a balanced view of how well the generated response captures the reference content. ROUGE precision measures the proportion of n-grams in the generated text that match the reference, highlighting relevance and conciseness, while ROUGE recall assesses the proportion of reference n-grams present in the generated text, indicating completeness and thoroughness. \n", + "\n", + "Low precision scores might reveal that the generated text includes redundant or irrelevant information, while low recall scores suggest omissions of essential details from the reference. Consistently low ROUGE scores could indicate poor overall alignment with the ground truth, suggesting the model may be missing key content or failing to capture the intended meaning." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.RougeScore\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + " params={\n", + " \"metric\": \"rouge-1\",\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### BLEU Score\n", + "\n", + "The BLEU Score test evaluates the quality of answers generated by the RAG model by measuring n-gram overlap between the generated text and the reference (ground truth) text, with a specific focus on exact precision in phrasing. While ROUGE precision also assesses overlap, BLEU differs in two main ways: first, it applies a geometric average across multiple n-gram levels, capturing precise phrase alignment, and second, it includes a brevity penalty to prevent overly short outputs from inflating scores artificially. This added precision focus is valuable in RAG applications where strict adherence to reference language is essential, as BLEU emphasizes the match to exact phrasing. In contrast, ROUGE precision evaluates general content overlap without penalizing brevity, offering a broader sense of content alignment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.BleuScore\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### BERT Score\n", + "\n", + "This test evaluates the quality of the RAG generated answers using BERT embeddings to measure precision, recall, and F1 scores based on semantic similarity, rather than exact n-gram matches as in BLEU and ROUGE. This approach captures contextual meaning, making it valuable when wording differs but the intended message closely aligns with the reference. In RAG applications, the BERT score is especially useful for ensuring that generated answers convey the reference text’s meaning, even if phrasing varies. Consistently low scores indicate a lack of semantic alignment, suggesting the model may miss or misrepresent key content. Low precision may reflect irrelevant or redundant details, while low recall can indicate omissions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.BertScore\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### METEOR Score\n", + "\n", + "This test evaluates the quality of the generated answers by measuring alignment with the ground truth, emphasizing both accuracy and fluency. Unlike BLEU and ROUGE, which focus on n-gram matches, METEOR combines precision, recall, synonym matching, and word order, focusing at how well the generated text conveys meaning and reads naturally. This metric is especially useful for RAG applications where sentence structure and natural flow are crucial for clear communication. Lower scores may suggest alignment issues, indicating that the answers may lack fluency or key content. Discrepancies in word order or high fragmentation penalties can reveal problems with how the model constructs sentences, potentially affecting readability." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.MeteorScore\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Bias and Toxicity\n", + "\n", + "In this section, we use metrics like Toxicity Score and Regard Score to evaluate both the generated responses and the ground truth. These tests helps us detect any harmful, offensive, or inappropriate language and evaluate the level of bias and neutrality enabling us to assess and mitigate potential biases in both the model's responses and the original dataset." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Toxicity Score\n", + "\n", + "This test measures the level of harmful or offensive content in the generated answers. The test uses a preloaded toxicity detection tool from Hugging Face, which identifies language that may be inappropriate, aggressive, or derogatory. High toxicity scores indicate potentially toxic content, while consistently elevated scores across multiple outputs may signal underlying issues in the model’s generation process that require attention to prevent the spread of harmful language." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.ToxicityScore\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Regard Score\n", + "\n", + "This test evaluates the sentiment and perceived regard—categorized as positive, negative, neutral, or other—in answers generated by the RAG model. This is important for identifying any biases or sentiment tendencies in responses, ensuring that generated answers are balanced and appropriate for the context. The uses a preloaded regard evaluation tool from Hugging Face to compute scores for each response. High skewness in regard scores, especially if the generated responses consistently diverge from expected sentiments in the reference texts, may reveal biases in the model’s generation, such as overly positive or negative tones where neutrality is expected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_test(\n", + " \"validmind.model_validation.RegardScore\",\n", + " inputs={\n", + " \"dataset\": vm_test_ds,\n", + " \"model\": vm_rag_model,\n", + " },\n", + ").log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Conclusion\n", + "\n", + "In this notebook, we have seen how we can use LangChain and ValidMind together to build, evaluate and document a simple RAG Model as its developed. This is a great example of the interactive development experience that ValidMind is designed to support. We can quickly iterate on our model and document as we go... We have seen how ValidMind supports non-traditional \"models\" using a functional interface and how we can build pipelines of many models to support complex GenAI workflows.\n", + "\n", + "This is still a work in progress and we are actively developing new tests to support more advanced GenAI workflows. We are also keeping an eye on the most popular GenAI models and libraries to explore direct integrations. Stay tuned for more updates and new features in this area!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip show validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "id": "copyright-397fa35a68a34dc38f5d84d797fb5331", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "validmind-py3.10", + "language": "python", + "name": "validmind-py3.10" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } From 5e490ab4432263eda362bba5739017d5a629d0cd Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Tue, 10 Feb 2026 13:52:35 -0800 Subject: [PATCH 12/14] Edit --- .../run_tests/configure_tests}/understand_utilize_rawdata.ipynb | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename notebooks/how_to/{data_and_datasets => tests/run_tests/configure_tests}/understand_utilize_rawdata.ipynb (100%) diff --git a/notebooks/how_to/data_and_datasets/understand_utilize_rawdata.ipynb b/notebooks/how_to/tests/run_tests/configure_tests/understand_utilize_rawdata.ipynb similarity index 100% rename from notebooks/how_to/data_and_datasets/understand_utilize_rawdata.ipynb rename to notebooks/how_to/tests/run_tests/configure_tests/understand_utilize_rawdata.ipynb From 8d8660ccab117432b3b55f067664bdf9b91926cb Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Tue, 10 Feb 2026 14:07:50 -0800 Subject: [PATCH 13/14] Edit --- .../run_tests/configure_tests}/enable_pii_detection.ipynb | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename notebooks/how_to/{security => tests/run_tests/configure_tests}/enable_pii_detection.ipynb (100%) diff --git a/notebooks/how_to/security/enable_pii_detection.ipynb b/notebooks/how_to/tests/run_tests/configure_tests/enable_pii_detection.ipynb similarity index 100% rename from notebooks/how_to/security/enable_pii_detection.ipynb rename to notebooks/how_to/tests/run_tests/configure_tests/enable_pii_detection.ipynb From e6af96979410a7276c75cd193191f85f43808c82 Mon Sep 17 00:00:00 2001 From: Beck <164545837+validbeck@users.noreply.github.com> Date: Tue, 10 Feb 2026 15:48:51 -0800 Subject: [PATCH 14/14] YAML template copyright --- notebooks/use_cases/agents/agentic_ai_template.yaml | 4 ++++ .../use_cases/capital_markets/capital_markets_template.yaml | 4 ++++ .../model_source_code_documentation_template.yaml | 4 ++++ notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/notebooks/use_cases/agents/agentic_ai_template.yaml b/notebooks/use_cases/agents/agentic_ai_template.yaml index 469df8ce2..06ef71c9c 100644 --- a/notebooks/use_cases/agents/agentic_ai_template.yaml +++ b/notebooks/use_cases/agents/agentic_ai_template.yaml @@ -1,3 +1,7 @@ +# Copyright © 2023-2026 ValidMind Inc. All rights reserved. +# Refer to the LICENSE file in the root of this repository for details. +# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial + - id: executive_summary title: Executive Summary guidelines: diff --git a/notebooks/use_cases/capital_markets/capital_markets_template.yaml b/notebooks/use_cases/capital_markets/capital_markets_template.yaml index c891a1c43..9cb561dc2 100644 --- a/notebooks/use_cases/capital_markets/capital_markets_template.yaml +++ b/notebooks/use_cases/capital_markets/capital_markets_template.yaml @@ -1,3 +1,7 @@ +# Copyright © 2023-2026 ValidMind Inc. All rights reserved. +# Refer to the LICENSE file in the root of this repository for details. +# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial + - id: model_metadata title: Model Metadata and Stakeholders index_only: true diff --git a/notebooks/use_cases/code_explainer/model_source_code_documentation_template.yaml b/notebooks/use_cases/code_explainer/model_source_code_documentation_template.yaml index 92141e751..7065b35a7 100644 --- a/notebooks/use_cases/code_explainer/model_source_code_documentation_template.yaml +++ b/notebooks/use_cases/code_explainer/model_source_code_documentation_template.yaml @@ -1,3 +1,7 @@ +# Copyright © 2023-2026 ValidMind Inc. All rights reserved. +# Refer to the LICENSE file in the root of this repository for details. +# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial + - id: code_overview title: Codebase Overview guidelines: diff --git a/notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml b/notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml index 5ff6064e9..4f074f6a3 100644 --- a/notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml +++ b/notebooks/use_cases/nlp_and_llm/gen_ai_rag_template.yaml @@ -1,3 +1,7 @@ +# Copyright © 2023-2026 ValidMind Inc. All rights reserved. +# Refer to the LICENSE file in the root of this repository for details. +# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial + - id: conceptual_soundness title: Conceptual Soundness index_only: true