From 93792e35b67bd94fcac86d74140f981cb1331ecc Mon Sep 17 00:00:00 2001 From: MohammedTaherMcW Date: Thu, 31 Jul 2025 04:47:24 +0000 Subject: [PATCH 1/4] Add Performance and Accuracy pipeline support for gemma-3-1b-it --- models/tt_transformers/PERF.md | 2 + .../tt_transformers/demo/simple_text_demo.py | 48 ++++++++++++------- models/tt_transformers/tests/test_accuracy.py | 21 +++++++- 3 files changed, 51 insertions(+), 20 deletions(-) diff --git a/models/tt_transformers/PERF.md b/models/tt_transformers/PERF.md index 737ef759fdbb..e301d5b3a7b3 100644 --- a/models/tt_transformers/PERF.md +++ b/models/tt_transformers/PERF.md @@ -45,6 +45,7 @@ This configuration uses bfp4 MLP and bfp8 attention weights for all models excep | Mistral-7B | N150 | 95 | 99 | 29.75 | 100.24 | | Mistral-7B | N300 | 95 | 99 | 47.01 | 65.95 | | Mistral-7B | T3K | 95 | 99 | 67.82 | 53.93 | +| gemma-3-1b | N150 | 30 | 40 | 40.00 | 40.00 | ## Accuracy @@ -82,6 +83,7 @@ Llama 3 models test as insensitive to attention precision and so we use bfp8 att | Mistral-7B | N150 | 95 | 99 | 29.75 | 100.24 | | Mistral-7B | N300 | 95 | 99 | 47.01 | 65.95 | | Mistral-7B | T3K | 95 | 99 | 67.82 | 53.93 | +| gemma-3-1b | N150 | 30 | 40 | 40.00 | 40.00 | ## Long-context (64K Tokens) diff --git a/models/tt_transformers/demo/simple_text_demo.py b/models/tt_transformers/demo/simple_text_demo.py index c58504228ef4..c0c19c12dcb7 100644 --- a/models/tt_transformers/demo/simple_text_demo.py +++ b/models/tt_transformers/demo/simple_text_demo.py @@ -482,30 +482,30 @@ def prepare_generator_args( ], ids=[ "batch-1", # latency - "batch-32", # throughput - "long-context-64k", # 64k context, max_seq_len=128k - "long-context-32k", # 32k context, max_seq_len=32k - "long-context-16k", # 16k context, max_seq_len=32k - "reasoning-1", # reasoning - "ci-1", # CI batch 1 - "ci-32", # CI batch 32 - "DP-4-b1", # DP 4 latency - "DP-8-b1", # DP 8 latency - "DP-4-b32", # DP 4 throughput - "ci-b1-DP-4", # CI DP 4 batch 1 - "ci-b1-DP-8", # CI DP 8 batch 1 - "ci-b1-DP-16", # CI DP 16 batch 1 - "ci-b1-DP-32", # CI DP 32 batch 1 - "ci-stress-1", # CI Stress test batch-1 + # "batch-32", # throughput + # "long-context-64k", # 64k context, max_seq_len=128k + # "long-context-32k", # 32k context, max_seq_len=32k + # "long-context-16k", # 16k context, max_seq_len=32k + # "reasoning-1", # reasoning + # "ci-1", # CI batch 1 + # "ci-32", # CI batch 32 + # "DP-4-b1", # DP 4 latency + # "DP-8-b1", # DP 8 latency + # "DP-4-b32", # DP 4 throughput + # "ci-b1-DP-4", # CI DP 4 batch 1 + # "ci-b1-DP-8", # CI DP 8 batch 1 + # "ci-b1-DP-16", # CI DP 16 batch 1 + # "ci-b1-DP-32", # CI DP 32 batch 1 + # "ci-stress-1", # CI Stress test batch-1 ], ) @pytest.mark.parametrize( "optimizations", [ - lambda model_args: DecodersPrecision.performance(model_args.n_layers, model_args.model_name), + # lambda model_args: DecodersPrecision.performance(model_args.n_layers, model_args.model_name), lambda model_args: DecodersPrecision.accuracy(model_args.n_layers, model_args.model_name), ], - ids=["performance", "accuracy"], + ids=["accuracy"], ) @pytest.mark.parametrize("device_params", [{"trace_region_size": 30000000, "num_command_queues": 1}], indirect=True) @pytest.mark.parametrize( @@ -945,7 +945,15 @@ def test_demo_text( ) # Benchmark targets - supported_models = ["Llama-3.2-1B", "Llama-3.2-3B", "Llama-3.1-8B", "Llama-3.2-11B", "Llama-3.1-70B", "Mistral-7B"] + supported_models = [ + "Llama-3.2-1B", + "Llama-3.2-3B", + "Llama-3.1-8B", + "Llama-3.2-11B", + "Llama-3.1-70B", + "Mistral-7B", + "gemma-3-1b", + ] supported_devices = ["N150", "P100", "P150", "P300", "N300", "P150x4", "T3K", "TG"] tt_device_name = determine_device_name(mesh_device) # submesh device should not decide performance target @@ -994,6 +1002,8 @@ def test_demo_text( "N300_Mistral-7B": 38, # TODO Update target "T3K_Mistral-7B": 45, # TODO Update target "TG_Mistral-7B": 45, # TODO Update target + # + "N150_gemma-3-1b": 25, } if model_device_key in dict_target_decode_tok_s_u: target_decode_tok_s_u = dict_target_decode_tok_s_u[model_device_key] @@ -1075,6 +1085,7 @@ def test_demo_text( # "T3K_Qwen2.5-Coder-32B": 180, # too much variability in CI (https://github.com/tenstorrent/tt-metal/issues/24754) # "T3K_Qwen2.5-72B": 211, # too much variability in CI (https://github.com/tenstorrent/tt-metal/issues/24754) # "T3K_Qwen3-32B": 250, # too much variability in CI (https://github.com/tenstorrent/tt-metal/issues/24754) + "N150_gemma-3-1b": 100, } ci_target_decode_tok_s_u = { # N150 targets - higher is better @@ -1082,6 +1093,7 @@ def test_demo_text( "N150_Llama-3.2-3B": 35, "N150_Llama-3.1-8B": 21, "N150_Mistral-7B": 23, + "N150_gemma-3-1b": 25, # N300 targets "N300_Qwen2.5-7B": 20, # T3K targets diff --git a/models/tt_transformers/tests/test_accuracy.py b/models/tt_transformers/tests/test_accuracy.py index 78de89940988..2515dab97864 100644 --- a/models/tt_transformers/tests/test_accuracy.py +++ b/models/tt_transformers/tests/test_accuracy.py @@ -245,6 +245,19 @@ def test_tt_model_acc( theta=model_args.rope_theta, rope_scaling=model_args.rope_scaling, ) + + if model_args.rope_local_theta is not None: + rope_setup_prefill_local = get_prefill_rot_mat( + model_args.head_dim, + mesh_device, + prefill_lens[0], + model_args.rope_local_theta, + model_args.rope_scaling_factor, + model_args.orig_context_len, + ) + else: + rope_setup_prefill_local = None + prefill_input = model_args.prepare_residual_tensor_prefill( pt_prefill_input[batch_id], ) @@ -252,7 +265,7 @@ def test_tt_model_acc( tt_out = tt_model( prefill_input, current_pos=None, - rot_mats=rot_mats_prefill, + rot_mats=[rot_mats_prefill, rope_setup_prefill_local], user_id=batch_id, mode="prefill", page_table=page_table_tt, @@ -280,6 +293,7 @@ def test_tt_model_acc( # Get cos/sin matrices for the current position of each user rot_mats = tt_model.rope_setup.get_rot_mats(current_pos) + rot_mats_local = None if tt_model.rope_setup_local is None else tt_model.rope_setup_local.get_rot_mats(current_pos) # Print table header if use_reference_file: @@ -310,7 +324,7 @@ def test_tt_model_acc( tt_out = tt_model( decode_input, current_pos_tensor, - rot_mats=rot_mats, + rot_mats=[rot_mats, rot_mats_local], mode="decode", page_table=page_table_tt, ) @@ -351,6 +365,9 @@ def test_tt_model_acc( # Update rot_mats for next iteration current_pos += 1 rot_mats = tt_model.rope_setup.get_rot_mats(current_pos) + rot_mats_local = ( + None if tt_model.rope_setup_local is None else tt_model.rope_setup_local.get_rot_mats(current_pos) + ) # Modify the accuracy checking section when using reference text if not use_reference_file: From 1b9c7887825bac0fc26171051f8e984eae6761dc Mon Sep 17 00:00:00 2001 From: Mohammed Taher Rasheed Date: Tue, 5 Aug 2025 16:45:15 +0000 Subject: [PATCH 2/4] Add CI test support for gemma-3-1b-it --- .github/workflows/single-card-demo-tests-impl.yaml | 1 + tests/scripts/single_card/run_single_card_demo_tests.sh | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/single-card-demo-tests-impl.yaml b/.github/workflows/single-card-demo-tests-impl.yaml index 92c115ab9c96..c5782e2947b7 100644 --- a/.github/workflows/single-card-demo-tests-impl.yaml +++ b/.github/workflows/single-card-demo-tests-impl.yaml @@ -88,6 +88,7 @@ jobs: # # Moved to t3k tests until OOM on single card runners resolved # { name: "qwen7b", runner-label: "N300", performance: false, cmd: run_qwen7b_func, owner_id: U03PUAKE719}, # Mark O'Connor { name: "qwen25_vl", runner-label: "N300", performance: false, cmd: run_qwen25_vl_func, owner_id: U07RY6B5FLJ}, #Gongyu Wang + # { name: "gemma3_1b", runner-label: "N150", performance: false, cmd: run_gemma3_1b_func, owner_id:}, # TODO Owner ID needs to be updated ] name: ${{ matrix.test-group.name }}-${{ matrix.test-group.runner-label }}-${{ (matrix.test-group.performance && 'perf') || 'func' }} env: diff --git a/tests/scripts/single_card/run_single_card_demo_tests.sh b/tests/scripts/single_card/run_single_card_demo_tests.sh index a058614bb532..e75b77457c2f 100755 --- a/tests/scripts/single_card/run_single_card_demo_tests.sh +++ b/tests/scripts/single_card/run_single_card_demo_tests.sh @@ -49,6 +49,13 @@ run_qwen25_vl_func() { fi } + +run_gemma3_1b_func(){ + + MESH_DEVICE=N150 HF_MODEL=google/gemma-3-1b-it WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/tt_transformers/demo/simple_text_demo.py -k performance-ci-1 --timeout 1800 + +} + run_segformer_func() { #Segformer Segmentation Demo WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest --disable-warnings models/demos/segformer/demo/demo_for_semantic_segmentation.py --timeout 600; fail+=$? From d6cbc2500ad636d1613bd05267b0e1846a0bea2d Mon Sep 17 00:00:00 2001 From: MohammedTaherMcW Date: Thu, 31 Jul 2025 05:48:57 +0000 Subject: [PATCH 3/4] Fix multiple Testcase support --- .../tt_transformers/demo/simple_text_demo.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/models/tt_transformers/demo/simple_text_demo.py b/models/tt_transformers/demo/simple_text_demo.py index c0c19c12dcb7..b8326b2861c9 100644 --- a/models/tt_transformers/demo/simple_text_demo.py +++ b/models/tt_transformers/demo/simple_text_demo.py @@ -482,30 +482,30 @@ def prepare_generator_args( ], ids=[ "batch-1", # latency - # "batch-32", # throughput - # "long-context-64k", # 64k context, max_seq_len=128k - # "long-context-32k", # 32k context, max_seq_len=32k - # "long-context-16k", # 16k context, max_seq_len=32k - # "reasoning-1", # reasoning - # "ci-1", # CI batch 1 - # "ci-32", # CI batch 32 - # "DP-4-b1", # DP 4 latency - # "DP-8-b1", # DP 8 latency - # "DP-4-b32", # DP 4 throughput - # "ci-b1-DP-4", # CI DP 4 batch 1 - # "ci-b1-DP-8", # CI DP 8 batch 1 - # "ci-b1-DP-16", # CI DP 16 batch 1 - # "ci-b1-DP-32", # CI DP 32 batch 1 - # "ci-stress-1", # CI Stress test batch-1 + "batch-32", # throughput + "long-context-64k", # 64k context, max_seq_len=128k + "long-context-32k", # 32k context, max_seq_len=32k + "long-context-16k", # 16k context, max_seq_len=32k + "reasoning-1", # reasoning + "ci-1", # CI batch 1 + "ci-32", # CI batch 32 + "DP-4-b1", # DP 4 latency + "DP-8-b1", # DP 8 latency + "DP-4-b32", # DP 4 throughput + "ci-b1-DP-4", # CI DP 4 batch 1 + "ci-b1-DP-8", # CI DP 8 batch 1 + "ci-b1-DP-16", # CI DP 16 batch 1 + "ci-b1-DP-32", # CI DP 32 batch 1 + "ci-stress-1", # CI Stress test batch-1 ], ) @pytest.mark.parametrize( "optimizations", [ - # lambda model_args: DecodersPrecision.performance(model_args.n_layers, model_args.model_name), + lambda model_args: DecodersPrecision.performance(model_args.n_layers, model_args.model_name), lambda model_args: DecodersPrecision.accuracy(model_args.n_layers, model_args.model_name), ], - ids=["accuracy"], + ids=["performance", "accuracy"], ) @pytest.mark.parametrize("device_params", [{"trace_region_size": 30000000, "num_command_queues": 1}], indirect=True) @pytest.mark.parametrize( From 7aa6affafb6f5e9e1fc41815f69749ce9b2ab082 Mon Sep 17 00:00:00 2001 From: MohammedTaherMcW Date: Thu, 31 Jul 2025 05:49:33 +0000 Subject: [PATCH 4/4] Add refpt file for gemma-3-1b-it --- .../tests/reference_outputs/gemma-3-1b-it.refpt | Bin 0 -> 50620 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 models/tt_transformers/tests/reference_outputs/gemma-3-1b-it.refpt diff --git a/models/tt_transformers/tests/reference_outputs/gemma-3-1b-it.refpt b/models/tt_transformers/tests/reference_outputs/gemma-3-1b-it.refpt new file mode 100644 index 0000000000000000000000000000000000000000..d57fab334f4b88e136867f2ac13e7fc94f0b47b3 GIT binary patch literal 50620 zcmchg37D7T{{Nq(#mrRFh8Ar!mG+%F)wD9Dg-DjvG%d_pQQRyk(2W#WK2pJIWc|oXI-ZJ8RtODoe;ce0Z6HCarVJ4KG`ersYh|EkE#=YI(U8hNMOV50y#H~EOK$`Jo$BSK?+$!VC4~!1a%RRDyO=_8f|DBUtwcx*ne2$ts zzK~pj4AqC`)|lLKaB78$)taujwoHMbh5rV}0$M9NDFAfz)V%cULP7hcbSy#?WJ{N3 zOvua2NG}v(cDIj@DPN#m;lFqxsz;pzAquV9s&(I#_J2x(p#|Pk_?Nu0&_Getpz?1? zRNeUn%0?MZFLK)XF#XTw73#NawbIrJ#mjftGp7`KSixUPIi;BmmHy!V?Mx3oKe~K; zRkb4z|5-y-ev;+<)$~7czg;P+@7hS|n~jxLH6Q4_&|c-+-S4KOl+Sj%l}x#gUvjD#BgfmuA3Tr~obb<|EpJ<=AG%*l`x$!HJXd^f zc6kHq@p{ni0hga=yIf-XBiDB#6Rkf&@*Cc0towCyO8JLP)NWX=($Qm;Zn{|MT;upn z;V>`wTWWb~HdVe%Kc)CfzUhB#J@J!jE~o!#sp^Lu!OveUi#Q@i@aD)|(#u8dXxeGNylV7v(ce=QiW}#TJ!+Hcct<0{j1HT)@AP$InBK z1Mmm;4c4!>`C@5(lAIINKKfLH9`Gka3dA zAMLEP{XnH>cT&3kXr*m$SBiei&;9CGMr#-=-d~e$egcafsaTKjiYw=Ck@Z^&>9ccdqjF%>T5f#qj=R z!gFSQ5m;YW?GD*r*PW>RY|Hnw$J@6D)bE!%N}0!4=P-`J6+a>lP)|AS^37+v$KBBl z#Q%G-iS|0Z%XR=4>yj{9S6$5W0~|EnEWzOmlWY1~}(>s}XNr(CG~v~QKZ<@h>pkn%HX zEBzut=>@j)WYeMiCgcBJW3_ATc)8a8|Im7sm-jrr)Bah`{)K$xPhKH{18pztJ>Phr zX?@Q#-<0W6w5#jCw;s^N9?!bpqH`qAh#5*#?YFF(SVyt0!aw)Be^v9n@@4fu)&Abr z<;1m>&LcnTX4b<`Ij#*(5uE{tl(uv~_^>XH0($rr>tfc!l*4zT{fl++FDHo4#|?zr z$BUKkVESSIOjSqVh#yYyyxwW18tgIt;5PV5mD7&;*Bnp4pZ4RIs6BQAm!(Zq4sPJW zdYt%&zQ|w8de6F6{Rh7zeRp4={Iv4QUmwK3cektlvMMq-?MXjTc7!msq_~VD{KbF6whU`B=c|SQ@dN0gYyRaVlv(&7}AF$(S z&u5+A6h6)DXXtUG-Uh!63T zcGQ35I0F4Ey-uh6OV=lOeG@r7#%Ud=)tuIM%6^6Z-;2Y={nqL;I8SJ1{jewNr;%oieZd+2;D$c%PcpqwZ}bFT z`azHOv;&_I7x2KJ!D+tKjPPsx>Kb<#YCjJB8Nbc-J_`S)ocV+K34WDEiQ)WOO7UC# zIP^d8!(Z@U*4xMn9^eVi-~n#X0WbQ|9z1CWfBcg1f?rS%U+STYyyR&IKl~nfNNGz`LS{m{?bnJ8R$vfgA} zO1lRfA6~WpaBkJidc5LzxvQ1U>#`@7@i>=Lat=s}E3qo!4zI zpQwCvmeR$Siy!+jQ2p*cDqr%7QtXC5J)`DD&lAA0_Cd9$9ejE`p>p<5?2jIP zPW-}sQuLwPzrR{(v!P0t*H``Z`C(FftiSr!Je=b-dMoqmxzu9=Ub(-qb?i5XN9y^o%C^#bYyU_bR^!vi= zOyu9@e5haOFYE{L8_qZImj|rh+?yo;{(}GDFYF)j7x15Ps`STS@DJ*-AO6CA8Na}P z_FMn;Gxa}l{(_(2H{b)m&<}`D?4#M&#qFoTYgiTO1)l7)*;j*0hzB^K_c+fl$XChd zLyTw6J8JrXrt@0qKf9m!)9z+gTn* z;l4YR54jj;#DQ5pM?xOP(bl&lC;Xv@e9Wi#)q2l^4|yED=Wz-BvpheMuj}>YPPa>{ zCq?c$-*&KGth4%?KI<#|3qR#~G5-34{g&se$jy2HxtY(=lXys+L0oF1vv4RbLDvrMDG(nr$~BS`ICJv3q9n;F6f0_7OGn`-{}IU z=)1^y@J4U+o$d2^=4Vozpy?+ z-io)#0l+2MemvLbui(Ue3vhui{J;gi$OE4cm%n=;9DGLlgUf2qFYq}jP4)N@xPSxg zxDS)*`GIz3Q?x$J6uF3gp&p_9=oR7zZj3YZT;EI#zG83i0-ws3bBE`xj^+OVYM{ogmesp%?*9=PTQ zc%v_RqBnSlddJ}mzTmO8zxcoG`%$e1Di6*>H!C0V#hxL5=!X7>-{FTLzTl2OqVJ;j z#sAU-r5D{U{B|pe7I$&El8AB5y*BX7bUk_?H#mm)g*dU#B_5(5^EuDem=74gtS7d4 zzeIg^``ycfB^dY8n9rCeyI60YKjLq5yl&WBPW?mND|uY=Jc@aOd4qWb+_(1;ogH?> zoH5D|IYlY)0X*I8cl%J{LmbZd4S3_HtOL-4bqP4L9vS3y80(aBH%Y!PpHdpy6FGbeN9N_-9`CI0kUP`| z`R9vV1b+DC;?4@3o9720r=5gL4SxpHM#J0T1Mj!-aJ{^D+9fei^Y;@)Y5vYy@5S!w>tQUx>@Q z6-9vO>%>9y1|QbDt-O9A4o>PJJUf3cI;*@t$egY6p|7eO+%haL^59?Ki~Puq9^i41Ox`lPjH?t)OIKmhDas1=(W1a@b$Bo08yT#`R<9vFi@&|fr9PD-Z4Hu|< zUTdY@?O*%qC=Y)7pHqJ06s3I)&|!a39z8<&(F+_)!Y^~KCmV_Vc7jQst@W_yx6wQm5R~A6qyg$0#_q%&sq4E3g#_C|Vjk4SyI?&H{ zIeH*>s1N=V;>WmUT?2m1r(ZrL28>tYB6xyV9Dn$h7QZ_7n|5CRtf?aYclT3zQaPm$ z`+f-Py;Pq&UFQB1y#E8I(EspX@MOJqt%hI3Jyyq+UyMPzCl-3@} z?)bLP`R<23miAS@dVQ2)2hPVR|7%CpKiW#^fa{g+@ck_OptN=aNAUWycB^@f z_sv>a?_s_I`l8?cw21wz&CEZ5%-9L z*ads-+9E<}#_6G$c^^LTop1i7wHx~go(F*|^91{jl5k;t7w1o*oN>5-2l#+T^(yjT zZ~?D-mZ}`S;rIa$@F^`W_%Z&$KH+1ZLwxHv0WQqL_$&THyrCWC|5tt$;s;*IC(7TM zSLgK+Zr}@k*a_UgGcFzw2Urgi*TIc-9Qd){V4neQ;Di5#I1-DZ zUmCA+?2g>`4pDhRvi$Tu?~f|kjvX7S{%7aMPS^jecunQlx0UC+arTSTyq~x-N9{uU zVpr^(t!5Fpf-|^cSL{$)`#!x@gpavL`t@%q2RzgJXx6(SU--Zuyh8h8SL}>^u_yNB z9E9<@sD%JeXI(VaYWTl>IB~4Ez(mAwT56E^)ZvcdW+_*iRV$=*{{8JV-+v zvg}VKjmsr&2R^06~NUD?AuS z;07P~gAe?WCk~e|o-f-e1&QPM$@wR$ocS6&@DFeSk1XSaT*!gE$cMb8#RdI&p2T|n z=VoG9+jfR7{Naln@DFieUc+y|hjlya6wXgse`XC3e{hMzBkr6Z`G_x!r_$n*@At6! zIi4{8u%3H!wEPD?AwJ*%K5@7(4}r@s8ZJ?S$QOMFB*}U3AO#oV3vrrr!}HUG=MI}Ug&pW97nHEfAB6X&fvJuFQBvU0e^50`G7C}h&&zE9C>mHay_r~Q|XpCg;8KlB+Np*oU2fOa(B^T z{}y+DW4G;te8{uXcH|zyuimFKp62y1{p*xIbGp*41C(-~1H7>Zcw!gEBl_;?BzxcF zxV7R%<=G#w59N9E_y<+L>0K$pem0IDcFmHQMf>;#&SNj^7{)cmAvm$WIY4Kyca~}jexfjAb3SICD<-pEy_z~~H@m`PX$vuPz z;~YHa+TXziJXlA*hF8EWv@xxES zi+Hfo_efd)UEzD3#0lzi{Jzgk>Ec7XAG3<{%~ZB%zwhdz_Z`6jT)-*B1$%HGg!v!4 zU=Pj*X~(_}JN(NZxC;3pN60tM9-RLWr?JE6Y)#z65$wUb0DJu6cn$vHz6EQB|;!7L=XU-eB=fOE7=Sak7_`)aT58ffp;2X|| z;EKO<&d53(JAe!L{&=Ar-`mw9f23V=ufJ;f`PY`#0`8@HO3?${&;z}gcjE8^H*f?W z@Cxr)gzHoIhWwBt#4p4RJi#x->+EgP6I{U!d@}4u#8tktg`a-o=Un(F@Aa{sID5MY zz<03cJ?z!tF~zNr-~TUv#tw1!Jc#r70d~MH*a1HX?LnNt4%la$_nn+?yymzNo^yZ; z_%pAu4!~Z>7bh3vE%a0TGK{DAVch)<&e6aF{@{Te@C|VRhu`7j=X7U!zCwT2N4Hjy zKT*E+JmtaRcm8VsMIPisZsZGb0T1wD9JBvnUg3TO=ZwrdAs_I8AM(WEGTGlx+cQfF zFmLlbYQ+JSb6+ZRhXCU}gj(ZN&wB`*Oa8OH%E2%0Iam8<)DF4P2f5J$9KjE~zz^J* zSJ*#s|A2c3+(Rh8UGV^Z&_@pVB1as4;K2JB#2ez^8D7t`PY=)az#G3R=l8a^^ev7b z{+6lYqWjC7#~?TIhI*k7_<Kl&ANz6kNp+hD z0G`i;_^}S{eTCZLf8e`&pzOY>hSpIhq!w=n&U<&M9K1Q-g&ysw&r zI^Hm^Q6EQdi`Qr1Jm2)|Zjc_FyMjM-Szn_!_U9aw_eMB3;@p+zIwhU6g8Odk!+Ub! zIN^L1zO#MqTH|#o!h8R3^i+Q1R;8?0LwtDO|4Z-7SO+{{1iT% z4}^Mf9?JKXcrX8b>;H|<)8O-f%ZqS#KGDw+@dND1^IYB+_-@LcT zdyLov`T1Vjb_tU)$~LC93}%e+T(J z<1^Lg@c1R=tUHEOQonwlf8R?`p6{2x*F*UXm%lY%;{bWE%kY(A7{<9=+hMrV8K#F` z$N_E#MLzn(`y-wk9piO7_|@XXNlx&CJ~)PQA^(BS>Nn(8G3NXG7fe+7*2`56 ze)ui@(I5KYSH^gPw4Utm!v%Hkpuifdenm_ z{lJCq`OuE}9Q-bK{D&^>su;%*KkBERAUWv=ejj)oQcn92Ki*Tu?-+Oazlt&b_p;+K z-$VY@4@$wK@t2aL$ynLpm^+kz$LFn^a+T*i*`H%8djH|0>&5SbRJDUHc!qiq2f-Qt z4CiU$GB}5HLVpEk=+hpY!4aIP2Up@X?eJ6ZC0;(}^EKiiICr(*vM&aQ*P9BLmwev{ zzrVwFxyR$@45#!%K5#@YIBa!XhA;S`59RnP^q=>BrM%D|`ZpaI6Z3YRColhw}Y=BbcY4D>Tc!fCdybavgSJiq^^jcPxBJ=|< z=;Dvy6o((Uv97@XLVCo7(Eq?M#DjGJ_#xMnN|GP?tPk+NK{u$L_2I97QGT4)Z{UaC zm3$8SslPwpZ?xJI2fj{Ge)5S*uk5ds`4Bqb!*`#-k9bEt{)eA{FZ&Si124ue?TLf< zAASdp;FMfPa)MuoFLL9Lw13m{#8ZC$0&e-;MGySY1Ah1ccIoVM?mdImZ;XFU%-=zf&rKiH4oRYI?laDgv=^s4!@uI%P`Gs634 zZ~+gV|AGs6-Dr73Tn6nIPUrzH;IXrt>g#wt!*{{J1%K$!PVGWmsK*bO_rL?ZLR@Hv z-(-t?1Rmf3F5vOwUNuB-aJbLk!JRlraTMI}5A+A0>k>uh+Z3f=*vM=n@&?bbbA|jexK^j7OFq&7Nutxmz#YKcC!6oN`mSSdi}Y|elgkQq2Ch^ z@axb%;rPH0@O#cNI48m`_&xEGbqo3ux5^uD=1Keozh|CeJQ7DqX$M{6O_u$gb>qRd z!ew2a9E5W)-miGTc(abhUzn#3cO36!oY4orMefyZx6^o^<@SdhrxJgW9Oym4cC6h{ z<^9}#{czlp92}<`nE@oC$KE>;Uinbr;x{Mq66Ys$NXRnVc93eW?GY?^R+Q0R@+Mx%2 zz<3M!hQC*c9N@gGtLTON+SxDrrl|f&%SoPk%8|3D&lP9cj`L1d|LQ9hhbp*yhtD&R zhd51qCjK$*z~w2+#k@Jv^g>*~ePC_z#cte-?l(vVTX>h`TWCia=L3lP)6@^S?zaCk zP9L*>B0uG{=e-B&=ZQ^(9BUS-V4dy0#(s6K?;Sv=iv5asLj1VO<~W0VjDvmF6Z!B<=*RH|mydk@6!N7%{2uc6v?s1aT>X58kY(qRZqL0j!~X*5{~Ct{-F@& z1^g2H@K^kk@9q<)!5e%JB$>bE#qO-@(4Tm|$o|E+u4}#i)mL;zf2@Kv&6NjN_=R%v ze1mb<({cZvChCtogBiR#C=yT@@GJ7GuWU+7nK{DBYhJl2qXL-puI3SHs=I6i2+hzHE$%2|TyRFuf-)GNtIr4B`7V@DUeyk@Sv>kc|bY3qf$2-gTukbv_yg1Zyv~&NJ*9vd^ z82#`wPA{Xt@gZKeI>d8YNxseyY!H;-uMqK21 z6#W<%vmH;MlWYF~AI1UaYS+0wC72JfFXQ+z*E6q&^w1wV&;>`%=h!dYU^zLbqkkN3 z&_Q3u6Z=Na1<@PaL%Qq-i08yL;u&!aI}z{LPjH`@b284;c6cBCfbneS6uh}#MLa?e zXH~=Fn)@2~5$j9nFs_!pB0q&M=Xtc}_mhz4?gncA=0v5*e*VS$ z|Ji36$MlCEeA-)o)~)PQ_`b~yuNT+&oOPPx0pkLCk9+=J>-mfJ^ym9Q-(MnmF7^72 z`5b!K6T0{ZIA6V5dUC$_#z5td^SUc5O?l`-4?lwLUw!{<{?%$ft94VJ&R{gm z@|T>fbj+Pf&oLfb{Cu3>le^08rg)wF^vQ}N@Z(&D=b^-R{QG<4P`9GrW5>_IjdoYn zmps(N=jkMsV+Z=3YkkA{;pap#Bo1=ESH}APqmAfMPuwC7b54KaF9L@6`O*gE>9=i` z=(IVca_C%sjmpD#dAJ8|=6JO)|CZ9}1J%Bf;|=j~lJRSDit1~qdDQ4We5Q(=s`TX#OvE-FQ|Tt*J;zPQhxmkF_^be_;oeD)5ofwbN(6$%EOO)iW>%~JmilZ z@mIz#dqt(4J_A#7{^mrycVU{b@JwNbzAG$aw92w#pA5B0zusO8LcYF+v|nMq7v3zn!H;(IgCFxNa^Z)_ zhd*)dO*`a#-yI7V3$k4m%O+3iL>~e;t-0{n?*i@)zaV zN5Ka=^hck3%YnYgiyp-1YaP!vo+|o#hbtu>mszX^KRvH<>@nW`;D=q{N4v0`_Q*qf zPq~u161^OG0mAk{ZyS zG{td{^#<<)b5DJX<>2`p{braR_GN!Py`krar;77~4*k&=yqF(}FFE!b?(6a04ZhT4{KLuk`(n?_OFi%Xt%vl+ zFTtPoj2F&9u`lxr_a&HDYFN(wPSFQFu=iIU2jI#4PCM>*A`f)vPkY88?eSme;IH@x z^x#X%x*B|mhkorpf-mK?%eCFPZv;;HF6W#$zz@&^9N15wmzQA?_;Vilyw{tIXT~vl zGe0m-GftS-;mdgy&w1d(y5nk(`$vpd9pAU$yoGfL^|YrQ>y^_@2RXnIIgtxISx=G= z`+eO(_Mjemv z-e>qAxc`7ZCc0f2r+Y*$LauxIsbcrDsyKSP1i;Tix*X2IZ*aK6>uK~} zc$Dbz9`aqMDPIyj{EqpY^YQi8lm6gJyLTOLnD;;RyiI%Ta*^>q%=={QM?22Nc&`S1 zel|VUv%S53xYf_cho2|?-Zl>O=eg21O^WMJT&ExW!I}LLe!>2TJaVBI{jm@2phrD+ zV_eXVJpRG?HS<@F{fW5Ac^UDP^&xU$ANE7cFW|>KHu!4c!u*!lOZkV#DaF5T@IEx( zaxl)(3%<;2^}Meec&F&(U8)rQ=tsLgkEwoFU?=#1BmI#BzUYVEJm2N}obZDVc7Ptw zF9)YqsCeGedw2c2;C@Qszrm@6Pev;eO(o=S|E(UK5RFUE%pBFCU5hrOTTIAnHD*-a zs8((IrZkL3l=x>DqtUB0u-bZjZT5NdX-})gY&Yb)w%CLB^Qg z=PCQ5dL_9yj^Gl?5m}Ftyo}<= zBQpf}tzI9k%T_sYjo(+tFI(dG!-~_beNcHH4@B>;R=&9b%vLt~;8hVI-kt03UEg-S z8ccFK{P}<72R`&~e1r5s|8YM5ta+yNUgdSzILEV5(NBDf1c>=MO!;!_?mRFEStICEjzvU&DTU|C4h1MfQidetb`C zLBOw;&p~OUJwp58-=BLw7S}JdAN>|O9`HNn zas5O4J$JF>|xqLSz_{_FMmW1RSYDf%-$>vod9{7x+U9?pCD9oIN{`K~A9 zi0|zU?4SV_*N^W{A`gDU@7%35pmF{AP9wOZ55Jd({WjJXE|nbD_?^Nyx_swwzVVNv z%X;;HrO$T&L%D0;Edk&4choqKkCT({goEE|3p~>6%ea0+H%l@2f9ic%*NgwyUVP^n zT$*`ZOPr45$9ExFzjycf?hbc|>vwQ`SX3y`Dzy7KTrC3AK&+zV!HeePMlnPUlaTuP8H$O_UCtOkmrEc zp?rsxbpdgtdy44udkwR_?##44;2O?fe6KH@zxGT~N5&)jbG|>B5zvjB&$urZepecK z=oijo;Q#8EvL|}Q;T_Ir^yhmeas9%1>{73@-Z0+8fAo%{6E~mnok`Xub&iuhFSQVW zarDCZ?K8`>KTZAP`i1iv`sLey@f*IA9!D>n*Ps_2C;6ebxrFsr+&nhEk?0)s zea1LB!ubmOzV({)!cWnwB)f<67~|{U;6JuYIDZxW9+b~HCdN5coy-o!fAH z#LYj%|Bc?C#PJX39q(~zVBCB{$YQ#(GK?8>M0pDqf<6jaT;wj%D;X5O&7r>F{8AbOr zJg(`_eO<;M_i*^GLin8vzGFc@+y zd+{9u_;Q}k`yrhBQVxIK>!%;<2XNp$d+ygFKkvb>_PI6jkagBa-j`!9)?gLv}z8-rrl(rucXSzqA?uecf_zlG^9KrxdyJ z^CSp%O!0ep*ayD6Kge@s{I6r8=z)Lb)0M}6dH)H$@w>QrmwPA3SJn4jc+V$%PbYlO z20dDTAURkE@m>pZpx2dtj(P5DYQOA8;fFqdJ63t(9{!haI}taIJxA@rxB?%Z)6$RU zJ;<}c=Ro9FS+4pEBp36*(>`|}bF<31-*|ha;`-)TKJ@?2&t15$j$C`a&f#7;_rPpI74#_#x-Y=gm_)i$DQeGsVb1NxJum*?eV*6T z*a^Daqs9NA``K2}<(?pV@thdH$=M`6*rC494WV=Si^Xw1bFT96=ROGfe(!Pq593c9 z2FIE{H|d-vI>^QSKeN6vj1ld9HQx1!{+1aPAAP@c-8LOR#7E;C!Q_pSK=q+|Yyj z2s}?Kyk~ISusOY>`UQLo{>9%js1bDw?iox;pEzkkc2?h%E~U6=5OWiuT|%J*uii3l zSRs&w|KcTR9Gw_Q(DJvd4wKR+P8gLjVcMv?0%=B07}GZ;xzqx+Y1g)6yVe~$ckI%+ zOS{e;+O_W7x)V52<_H8=0?$Ww#>&~6q6iV6m#qoU$-J`=2&qezoFMUr0}2aP@F>Y>i