From f8fb17ed17717724dcf9b3b9ac89d20f0ab97ae1 Mon Sep 17 00:00:00 2001 From: asfiyab-nvidia <117682710+asfiyab-nvidia@users.noreply.github.com> Date: Tue, 13 Dec 2022 19:16:40 -0800 Subject: [PATCH 01/16] Add ONNX export support for TE modules (#1) * Add TorchScript Operators * Add symbolic methods to ONNX exporter * Add tests for the ONNX export Signed-off-by: Asfiya Baig --- .gitignore | 1 + libcustom_ort_fp8_qdq_ops.so | Bin 0 -> 675736 bytes qa/L0_unittest/test.sh | 3 +- setup.py | 3 +- tests/test_onnx_export.py | 870 ++++++++++++++++++ transformer_engine/pytorch/__init__.py | 2 + transformer_engine/pytorch/cpp_extensions.py | 76 +- transformer_engine/pytorch/csrc/common.h | 2 + transformer_engine/pytorch/csrc/extensions.cu | 27 + transformer_engine/pytorch/csrc/extensions.h | 14 + transformer_engine/pytorch/csrc/ts_fp8_op.cpp | 178 ++++ transformer_engine/pytorch/module.py | 505 ++++++---- transformer_engine/pytorch/softmax.py | 77 +- .../pytorch/te_onnx_extensions.py | 170 ++++ 14 files changed, 1727 insertions(+), 201 deletions(-) create mode 100755 libcustom_ort_fp8_qdq_ops.so create mode 100644 tests/test_onnx_export.py create mode 100755 transformer_engine/pytorch/csrc/ts_fp8_op.cpp create mode 100755 transformer_engine/pytorch/te_onnx_extensions.py diff --git a/.gitignore b/.gitignore index 4ece24d518..68bba92666 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.nsys-rep *.ncu-rep *.sqlite +*.onnx .eggs build/ *.so diff --git a/libcustom_ort_fp8_qdq_ops.so b/libcustom_ort_fp8_qdq_ops.so new file mode 100755 index 0000000000000000000000000000000000000000..ba6ad6d85e10d25c3e98b54b6cba4678dc0bb0e6 GIT binary patch literal 675736 zcmeFa3tUvy*FJvE48s{Xz>MaFEENqDyaI}fS5VL)QBl;?R739lm0_XFXrA_^Pxs+`jT-Z@@`^=1ANoQN=WEoM&mW?tE4X~=>gv=bDv>}SI)t4j zToTJA^0`DljaCWQY1B%2agaJXSCN7mXc*&4l16>=?H2ibX}HLzQ7VIu^dAc*{8!3L z6ZxX==6ZbTa#25xYUK?^KFw7;=aoN&JEb9_yxQWBZ{~V5DvD>mBpuXd`VAZ=!95jZ zi-0qEJ&`8)xB+jTiiq5puqS82)Out8d~oB<&*efaMc{0Oll&ckQ?mL&%|eQIh1EJl zntzXdWm=HMUWWjFe)5a;B)e?4%iC`%Hc3g#=dUsy@smDGNN5%=RY?6`V!B988=URv z0!MpJSd)&$bw`|?aK;OMSJ0j~6L2Qtq@y>^zBv2iOvO0>=Rll;aMF>6Go4*Y(ojK% zf!>011kRB-N8ubzY0uX?Q{0C>96ME>Fx@rayO|48Z+O1zn?HPWL-4*=md=^D=*hVB zSC+M#clVNxVFNbz|NiCWzwHh>TzADwmwt&`H@v84;IC63d3lWAo}%9lPJU2|Ec9D& zdx1Q1SKAxHyPsR|R>s&h=U%w!o1>nLa})9p-%|e2BM&XHtsDQjZF=+%CtLnL=7tyC zCzmYV(Au58&`)p9ndY06}e;DnnrgAp~*US&BMbF$??3ox+bA1bI!QWkr{vps4R)gPG z`qylyy%v6>TI&0JEquRP{2yJ5ouRexzeIbeQ-l4Xs8>z=xwZK1KrM0kv6lAsMlJk? zO3i+Ys3k5x)Z!l+3u}tY_FDAB)WXlL#c!|GVrOD4exq@^CVSe{!ar7vKcB$>-&_ij zmi9wN91S^c}`?Hap&Ceh!hUdhqm8af5mRlr#ZOd7@_|~p?{yyf4$%b;>JmMyWng7+$8EH z2|m=a;{psIdCCZiF z;QVm{KOF6f`8l66h6sE)Yq#hG;?9&S`0F)4h;p^Dy*c=#XN`mF(fmJ9wDV#g|Cph= z873oZ=5xF@MhzAIOcQvmJ*jqFSid>X&#*i%ctm?-?VPxy`IOLQC(%2Icq8oSB`{WpH6;ZUMB$_s^b@n>}?$#I1BXpb z>mL(?92!13I-_Go-(drj(>tLe|6X0pe^y=We^lMHodT*l1+=BRQ?XD_-KCmRN^q=2Ni;_EQ?V%&J z1oHfUxh?oA^EEP>2d_ilv@S6nF$2h(F)edyY|hj?%oB1jE6JQWGaECKoXp8nk;3#O zYf47eq$z@d${}OtzWH5}I}YfOnVvkPe|j|8jERRqkX4j}V)FB5k%R;+IdgKB zhE5rpF*G`ZrW|Sg(SBg8j}0-=IWzL64NJ)_=n$KH8^kdkvht?o7Zzk^%*f8kn4Z=@ zCoQH!Mn-N?Moxa0jO>onI!uED{tMHWqO=(5=)8OP$;&Oso?VdK$s1Cd7NLq$vvc85 zny_U~otl@G2}AT2g>r|c_=E}$V(5ZNGq@wu3px{Lc1%o6EcXSpPR^ZJlgH4j;I{aT z0U25FZ9#VOkZIGTY1z}V^5>u|cr!jDgDK9Knl}+%&7LtMZ$?^wnhvHH#767F?A(b3 zlMK>~e3V#_Lrp@;7?wM0a_)qTA=xtvr_t;)rlZlr6X0SW2dCxK?&7Nra(~Cm)kX?2 zoW*p86W{~#xz@yP9?~BJU2@Stcwu60A#*Bx%R58HWDHB0G$5wsq{q<6ls{-_203q9 zUJ+H3wdmNGiP^b?%SJOTgpUenuAY%Ro_P?CC@dgodLG3BQ^>{fF1zyg*Yr-yckSaoR&nwe5ohPM2{rFSCAV(aZ4Gv{Ef zHjVd=$tf!mptPYW=>^_<62g^_Mw5*eGbMYD==GAunucL-c1B??`oI()CSy&V zJbq@ugsj%B+s({t*GWQhntnAgD=T9rrDBMm#rggE^vUSZE?VlBn$o*ZMr^y-cz!uz z1lS$h#l}=i#!G!hj7aRAk`dFcL%Zt8m>7f1m`PXNQNN1uUdFbI)7dBxt7;RT_4wh> z0&2mZAD+vZ8Bd|j1$J%4Go3)eCxjU(%3;CtO4>6F#ueqE!z{`FBY&(gR?;f$?P&Y9 zX)Wf!q#&sRJUYmhCk=Av{SRU1MU{5za(`)wkkgjb^m7kmXCqiTAb8GsQAh_$--+~` z{jWp6RjLx{OosQDE{XJk*Qa0y$|42XxJ?C74`9c|EHy-$4jF!1FR+xZH+eF4z5=A1 z1b#WQPm$V+bYY{(*dg(gx@h{Yr!*d!34Pl0RusYxyVx&$MC_Yr{3GJI^GYB7zRo<~ zB(XoJ;kC53KTF#_w8y6HFRc-Jh<`Iz!9F({_$>_l-3ETNfgk-kPar7zqiW*pGvI}Z z47k*QH}Ve|@U)k!9Y+j&dRC|%hhF1XRBnHb33|l9k2UaJ2L5mZzskVp?Vk(J8TbwZ zUNYEEy-z!=27W7z32Ha+#TFuyh8p;-4frqvzng&{Y2ddr@VglJT5L(2VBm|bIhM;| z;KvyBq#5{PE1OA282F~Y;A>+j$?^?+4BYxL+rZcM$Vpgi;NwQ4ABzn9Af1c%ZVY_fNc7_o1Amatm86vh zejfvWm4P2(;IA?8Z!z#U8u&H?zrw&zFz|O9_;n2YeFnbvY=^{$41DoOjO99F;MX(g zaT)ma4g4wtzkz{&&cIJJ@FmfnDYoqme5--q(7?AF_>BzwPy_!413%2bztO;tH1Hc6 z_|XP_Qv<(?fgfhzCm8t64Sa`zf0KcqX5fb#_#+Jb2m^ndfuC&PU%R~KfomSP=7DP- zxaNUt9=PU#|G#_Sw5`dXj*{b6N13H^9+n>qDhf=VeU6gt)@{r+p1AM8KIMt{9>msE zBK#zhoN^!acsxs4nifu{-23%3EtpQZEA=#OIGl2?)zh>PI^}*^Pt!u@l>2@?O$(e; z?)iF}Cb_5F)AckhTu!+&^)xM5PPqr`XuK6>K>hVJZFr&ndYUHqsK1`3i9PDCr)ffu`s-<$$fN#xnij~Yzn-QE zJ?ei+tA7|vAJx;%SbD#nrU^ajucv7ukNWFrn!uy}dYUHgsK1_W!P4{fbW4_=uBT~( z0QJ|?w9rTWwKQ65{khaylO)z&k#2iu^>l92j#pZTOe^hxjVNx$cle#S!U@%SA>Y8ewl{LWPUjUR-B^CvidiH=tYw)pKu6S z@f5!#D+>4}S#g+Ok`x2c zS-B;UNc_2i`mnJl<-x5sz;ndbw3s}*O;F&EvIxX|K_u!ghqKC2`D-sn*EkE6li+eneyi#H7VUCg>fv^dF{t?ZjtVa>}uET$NJgy07BReg3 zA}xw&Wh->LH zkEbg!?366-rb-pvC>hG6-7k2WcBf>RV`C>Nq5BQeIF^ib?Y-df6OwvDLBL>Wr{&q}j!>I+}kFs-|66JBnaShKO199C>?iq&M zOKnXPSp6N&bB_441s2=-28$YQ%gJe{buKX|gt z$q^-|@)z@ITdREz=Y^8*Z1AA{v~6VlDGo_0{iHDcv~5KFEL@+m*@L@rY9R_Q4-F4; z6~hSB5|_o@q#Rr1ZYZrdZ5y@qlx<|?Dch)uIg0HT%gxAuirH^P7!xGh`cHt`Lz;sj zDEUP>Woxzf*z=mn&}VU>n#T5$cuR`YlQ=xJJdVE2kT@*0yzO6fF*L>b`>^+{RH1i@ zX#~PN@fe~+_tpK!O1__0azZXhNG!cEJk|N@+Z3i$rzgdhe8$xRJ}kaaTwp1_U@iQV ziQwhsPzuEUV=jNldH%u*>MW_wbGMF39Gy5Oacp9S(Zg(h={)7EirG{7i@nRI&QrGK zm6gAQ#_unXx1}M9%dxrlb_vRc{ua=WC~gAF_2xMm14aB6d}1plOv*d35l33D#9NYM zDiVjMY~~CZwN81DUsugT#oy)EcP0!=cf5nD(=m_f>rKrraT8|42-8@EF(udb~v%}sFE6gcvfiNMM3^4Xv(SaTMJhks-7C} zw%v0Wx>Mti7O9CPd+s{+Df2h!`-Jp;ma>J6@pzW>N~!!M#!-2qW2$p#__@TA@1Xj8 zQ9H*bE|;b9HYXCG%F%3hQov`a@l{3ROLoYK0q$e-y-j%2d7u+Z_F4xx&qleE(R`dA zCLZ{+e1M#C;OA85`IIftfWwY9=9W_f&H)F7PB)3;&lNRIEZHh22kdvZf*$mT!l$Uy zom(XrE<3iI(%&bZ*(w(vKK7*;r%c|d>$R_F^HU%l}D}4-L?&T_PcW;q~b*9o}Bqdi_JNS^N&j1lgsay z!XzmtdBGlA>9c?bzybSg8!DWYiO6{*xY8Y%Sn0M!NJr*LB}bchqA}yN1rnq=5ZN0ef{ z?$f<(b%GX(@Y)u{!!zh^{C-?gnV%G}yYgtmfJ(w2`&f)?Ib`cdAd||20&*(d0fh}% z{gdTAsMIf#UryrLO3AO_Bx*vn^q8T>9f=Iwj|(aa>y!D3^Y>6gWHoT-QA{G=cTXhB z?=$4@#9I=FCk{(={y{zbB3gLKag36NAw3d9`Yr`E#|^(9^?#zY=#jQae&}2c&v-m3 zWo^P!%7%tpQ=GdTQNK{smR!V)#IfLnZNXo-NGaby-~rC=;V}sQ$%+Baxr8nVx4V}? z6}KBVrK9=Ol}1&##zF1d$Vq+qw%#Dn?A+lx{0D}f@`CW@G!r^b3Fi(+8Se23uGF$O zEmg=cV8Iz%=}KfxEWWFYWGnp!yhPjPqCS)*;CITh%T)e$DQ4;i>U|*-H14AkkJwNh#Uo$Lf%fLmUHgzTrObabEqCFKXh%28usqmN>JKOYo_)5>RqpfD*YsPKqkL#MhU=eQGoi^H z3m_^I@hi;Pl@wl2h9*#2A9sTbCWPB<>%nI6+mi|-Z>l_EUX&EBI?7Rbc6WC_2(Uqa zS&urffyHwv?9(dzSb|<*ibtyR427l%)!h)ycSSTkwo>{6Tw?J&ism_#fN0KQ(d>4L zNAol&k0x$*t=?xbY=}Gq^w8SYPYqY;es>OK2Yj7Sd}&?*-F{QUgIMrzJ))8aIwk(B zt+W!wTps9~yn$YdK<|bw+r16oH`@X`J`w2q!9t)L)ZVQW0 z#W0#4C(yiN4oY|32>TaQ*h+JuHs%btQ;56sB(;PdcuN_;`gXJg3}J&w@@^F*)IAp}UWhmwgZ_|w)z+7Ada|ej)A~d-U0;oCZH#l*rw|}wmd+0ES^AenfXc$Iq za#y&ZO{$ccDpeC*u?2wkrljG51;ViB4f_zpp4^ z<6X%8HkuL|)=%i7w)Vl5u*=qWE4qC{*N4ASMKP)Owu9ktQY0pCa1-W*w#cnep9=Lu zsOC_xzRER=mjo^izL+cTEygq*{$G?V9b3cl(FoX(%$(<%j53Zr=K2Jga&SMz{YtAZ zN;TBi8dE`aRH&&xh&BDhLzn@iF`H{B34=QhvMeH27Y79t#FsjPnlu|jPN&XNFqgm;i_qCb~KR8Ge9u54Is&i_1sI9b|q!^(?L7{x7-QgTt zmFmnr=WxdDA?%el3G%6&FHcYe;}*jR0$V-H4v{JtLl+8n*Ne*?y@4`;kZe$}3?r&kAw==SFfW;r>+cuLA-0!gxFs>NwC;No_ zi5lf)mclEQ@8IRT#xd`m+#HRP#<+)oE5-vpKc-RVkC-#*Si3vsA9n#{7=B8_soNm( zWeN>K38+u1^E-?NXltou9jHH*hG9CAg4s>^=XfGZgz2{4Ja^$iydg(Bw{1RIg+Jh{`)Btaim&nKNb z5%~yQgHYZt)Zy%huqVNk_(Qe@ALGFVHeFJ~$Jy!(!fqJU zI`e2N3}1twLC8jgj72s(4Lc*CoNgxLB-epo*nJiCrV-H*^NH&>aL6>OCh2leVjR4J z>4zGT}wab-&%jzy=gDZ1s8~6$?9VRVW&Xsbv&c<}g>sF7CTWw4sxwZZT z*m1dA^O;*y?98oPv8~Q6%(Jt(XK$p8WpQt1QC&xe=WCvwjfzP-OPMW_s|eoW_X{>K z$Ke4M;_f!%zO3zodHp^|v>fQt{Lb|-*Z=Ceo~+hfpYN_uEn^u z2sKxXgq7)}NQ7jPuFE1phOkI%!;A`%crKjO+v-eYks$p%5;n?S7Po2~2qM9@_}x2pFi+92{j2M1vRrGh z6d(5&!siLh;k4UNH?@acaI`jH(EnDc=Jd;(spfPu_H4-M4}RzC^a|bSk1?kwON)3@ zsAdk@DRnt&EwvBZBbah(uVcrexGT8-O>}U$|4rfkSw8O1pzLLF=f;rHS81>7;5n_m zjd=XV54P#-|?3+9IIskTW^v zggX}<6{|yA>1ozuHN}HS@eyKajcAwaj;kr|N{T=G7MUo%2xutvxiT}&+^}K|9pF5Y z>ii|47_LE(%)hx1C=qFQJp*j0gHOhM|q3e6U zVmjS>@k|lA-S^`u0@1GLIlY7JwSMdV6?@Ihh#l38xPa_#_U?tZPqn<75kEzt;Q7dz ziatfn0j-m!9sjl7GZ8tj)(9E_!yCcx|GE)4z9i33?-_W@l^X&5H#jr`K6%n+(sZ*G zy@%y5i~AYhG`wo>d4}R7+~vUSg@z)0K@>i_sao$@kNS~yV#=22p$#U{HMN_JHtyU7 zYz>~v$4v%n(v*_FaFdAv23NSrsQ<)sL)l(eqH|}WXL-1HK2?~}pzByMCtVjnNy)KILY4a# z)KF8qPh(cWN(OVY&&8J&_bP@f61j6RcVn1!mtj0?=;E~~hC%>B-LGRGmGE?%hWD`L z@U3mZ7$|Ym2iJjp7lw_(HmtxN!;>BU*yTOagq+v|Le85gCw(v*zSV44sTEhjw()&D z87H3NxG%zKl+o=G-djrfNbmb#xamXFSfOdW9j|7vJbAjkh9Ko4a!fj0Xk;@c(m01S zvX25>UH&}#e|Ep_`_HIq|DpZ5^-LL!$nOq@|9nT4tM1qBq+Dfj8ANK?ue<3WUyopZ z#rw7^5ge*P8oL+x?AQH37V(A0f7^cD%H|k2x}hLiCt(q|^iyA#{>%Nk45pCWnw?go zTd%%fHRpV_Z#aKP8||9-#j zyH7|bIo~y~2IrUl?fp78WiN}nn+WCsOZhm0{pcF@>w3U<{COhndt7oO=i_CHd2Bv`iE z_UqQcb=v;Nf9rnTsC~X3``7z*EtyJkSX-*9?{I9`Ot{K^T|3HN7PqVa-*DL7h|-n! z>z1-O);zTn4Yn;=t~FSS&wtZ?-H#vnI{iPmU$=@WC-=X0^OfCymHoQcDSKI*gUHnu z_hxubYp=EJ*V$RKsMY_p_46v3Ud{hvzwXT&aIYIg(zh||q2urD*S&@e|2OyRwtqkk zh+@3gapf3aWxwuy%3c;XfymWj+?4!(`F`Cw*y6ii7f#SC?bm&OJpy_ca$aG-?kQ62 zvtRfAhgVU2FDZ^7_h63xe`LSz^SxAQiuPB1uN>{G?bosVWpRb1=4#=N7Eb(MykGak z9?fyjCSTccSKF^+`OD(ElbWkJ?kvJp!+za97~{KNcacT5<}s1&YrlgpGrERTHOhN@ zOV?lQrYe2=U)`_kPe%Cc*Uh?`5uK>u{V4c<<$m2GyR=5|WS=WHf~)P_!O&Vvr;DZGf6;`}2uYAc(=eL?)q7x`p*o};`Y zmatW}h0DOkcOM-1P81rZU%2bGZPe2J!s*R;yuONu-#hV^za6jbrPlWw5OsV|dG}`Y zUUYptGXG>=OLW_L;|Ji4vR)2n>s03*P%_g_?++XHoUu+wb?(G-$y6uaX=bIoiBgc2 z3QEOORqMPaso2r_W-DH0-oW1PS#YMXcXG^`m=nk5Q<|^WQk@^O+(p1qR+L+?=-3YE zPuz@W_!9eq25y``;f*HO)0OBr@gF&CeLqTZew0%AlgWJ@YVUoQi}X@cH+a=~+Oe5? z$fM8;!=}L{l_e#(Ckm z1#hIL#$PDh%U+mr9ztDG%aas4z0Df!K-#%gd!N`*hLXyn*&4oND;^}xsB$=O#`ig0 zg+Dl=4yDKy7*-4FqQ#_QE7b0411G0q0f+h^9ALo&YCG(L*m&99&=T-|ji<03y&B1? zmblSQ72M?)P8DTea})d2iCakG-a?z&@n;dkvnkHADV0C_xmOdbya)a8k8AT5k4HRA zp#RBHqWu>R(su2*=7DP-xaNUt9=PU#YaY1ffomSP=7DP-xaNU>=YgR^hGyVLWVByH zNu8XVojF4iT<;H~2tNL)5Plt~OGZH$@^tN5{pY13*^f-&%}6?`;l$5Lb?xf?#imFC zbcoF;hziTf%bi)kb4mCerMwxL6SLcmpHq-M^VXO#VLf_;Rp-Ul!0W(x(Z1zg_ER~U ziTFXNc9W!>%&GVpoEZFWdsx>nDU1K2PR8U3v%`A!2#b~q@QXN-@pqWIcBP+NB5P+9 zW)+0x7EYZSmYY`)R)k+N!`Hglk(HTCEdHZRuzW(-uKd^6!lq`y$)M({Zatq{osPS4Ggqzatu z=jK6*dtpUSX?%H^ehIe<8^O0HajgDXtY6_m;B}UKgSp#s>;lAqQ}wFHLoYC|T;uT^ z0$l@2-xf$)=kbKnd)J_IL2n0r4(sDJpznfKz2))D#`Z?ldXHxXsC@(UfbIi51e%X8 z?T6v@<29fiLDTTa)CF4c4)Fd+S9m_Jq3?gQ-({R!Btq_=U?KyL&c z2O15U51Is83_1dIDd<$tm7v9-Ye1KSR)DSo-3R(E=n+u*N@5l051BG3ZLrRiG81Ux9{Ui!v1T9tYYHv>4O@x)O9W z=n>H6sQ*&D?s*Y3;a86*4SraOHvx~M-gMOCM3QeGBFVGu@(n?Xbtz&I4*rceyR3$f z$@UO?-;mIoZR#v*vDB+k*H#_En^RWGpM-q5L-`QRmdaX*^7!=Dl5~UCR=Pk&?d2=v83XEf(od55hS(RI5<@~4niE69O8o8$ z3<*sNvG)$KrmEm3vP)pemQrdT@DGg*Qfi2Op=lhmlWd{3L-E#ZA>NzdQTx$&q+>UD z-NEaLYp!D&(-DRw^}YPH9?u1mF}qSi(o9Kfw8VT=@}DZr~b1NyaMpp@0X9% zb*GZ@QM2BHryG@(S(qgor3!U&ARP}XNHy=6r&L^djeZ2ImMkB6@P-UAZ{vlI7& zA5g}x#PIgnerTsobXnd{`>G7v2G~`&r>6U2qvz^SY6Rp@x*x4ZZ0PsY{f42uTH6)# z=ircW=483X0QFX>Ka@h>Vw9ar`s#9hYGNE4=LF<|zGvnM^#haTJCpRai6{M{K(8IwJaEke z*F12|1J^uo%>&mwaLohPJaEke*F12|1J^uo%>)0JJfQt9y!Jcr+V8&q+dA?cAW2$+ z*DUGK(og!NSNWvZi?sG^fNk<2e=yS`Ny}&Q3vCZu`<;7y-H#o#A52FhP9*Jj?X|sl ztKk3f7yWZ<>_77@JxSa27$FM6mK{4*V*iW|?b$*a-VCOL_H5~(JuW)xHYdt`KI&mP$G17h4nA71{XCOoa#`G_cAd*-8+hnHBH{gWDVJf0h}w17)^m6)Yh ziHe{1k@H;FKL4jZ;96aCiP5x~jUz$OG(pD+nlEUvpi2c^Dd-wOD+Jvq=n+Ay1Z5AB zQI4HGG{W;gL8Apt5HwBDaf0RxS}f>NL01a8M$igD_X&DL&?-TtSXNERE@+sb(Sjxj znkMKtLGuMI7IdkgD+OI6XoaAY*79<+&whRSbPbCfHoh>opfD`HUB`COZDR^qD(0@( z=yuT^+r>n2zC{13mUmy?3B$T@B?HCKR2;|CBZTh!zWhS#qQxSub@inptqo}_edKFI zS{q_2eA4@T(noyKRU)mGC-vj@Xlc7hYvqUe$fHGCD?dS`?V^5ZKJsxs>3oq^1isiO zy;P*NA#aUOdY?#ZLwc1+(<4zj(j1&>EoiAoYxp%j>3t&29%&-0J%#6|M`(0Z(DOkY zS~@?Ur`h8$T<)9B)AR_24(-;+9$|1+wa?O_W{D5G)<5Ou0kY(`sDM7bN^1i2vNtrh7O>ESham@CIFl%%``50%R1c6rzw7;!8N@a1g6yV zdMEzbpR`T3o7O08z$m|=wHWiMG_PmD3Pkjdj6odR0h1WhQDxd4l2jj+&WE^&Al%5mc0l(}N{9Ekk@rFJq^@8Ed4d=6 zYBVn0N)L<@Z$Q3L!$=WlJ3@bzvI!mS18@&&5}q;Y<@w0yFDnba!y`P%E>@G9GCzWA zqFU?%sHv8%3?#Xa~mK)2r10W#T2hz65agEKVL`X-*>#)jg$xvW$+ zm!xN4_Xq&SYHEbIg{;iI8=-`F7DRWFn7JYP&1tAaS6Pu=cnFCIJOt4TnwZtZl*TMo z(5+2Lvhr=BB)tQ`zUm;#%aPH;(s4OJR@P0FnMya1=PyS~(f)q{I80WC4wG4g>S7pZ zjFXwhl?^T*0?b%hS%^1BdO~J;=3{)2N!-nY?<`04C(6nO^#6>S0If(vzO4L&URtaJ zn7u3pgEycFm`dP+p0f0`4q#XGA;q2Q4#4=NNO)W3=zC=4qlaYaV;#+|=tGKY7zTqT zSSPL5(TioJ`$kE+q@!6K=tJV_mdeU+9c8HrX3>WFF~2wY8vjMp^q{OH<2!CNlJq1H zvz|VrsO=jde@0eXHjXcV#8&CdhsO|D-14BP!3lT~<0r z0ATUQV1(0PF$e6Il?uF=Ah&?9TQvZ$JR6-k^oXp?LI~x-faTNxWq#t&Dp~m=8PK_a zJyZ+SXr{@eL~a3oHL#UBUT=LyXdRPs_+wd;4+8dcb*RDCFq3lfZ7941L|sLzH;V){ zMx(+uCgt?2P)M%;bkL!1Qc!ti`k9n<_-B4{3WOtdz$0y{tLbl2j?V!q4~Tg>irR*; zut6r}tGPfu2*mR`iV72`pD5%Hnv~*npxyyur-tJ9w}4))0eHfsJTekL$b)?H5de;9 zAQNkh9JJk}?0rsvELOxHCyzgC@f`HANqG;qG^VK)0C75qby^NOVp3juNq`0cFiHn8 zH*ru?v+_EI0H&z`fO$HITqOm(9|FUBnw4Pma_aB$qX0amfrPxTS$PZhNb2G8CdlX~ zPxuHrFgtgGWEyBz-o#{tUFhL=V+#&$uGEU|oDeeaXXU9G01y3?78!%fSjGT^-kpyOAd%!aXp=`QY@!;;t zl+A=}4$0Y_n9FZRVQ6_Q|1!uP_2%D8o+&Xa*(ZhN>mb|Yl~2T!&s1hs<{?n5W(OcU z?3FL4$}Bf4y}JqfPD6IyD}S2od%~>j{X&#~9p=#2;}rJGbbolktfbw?ZzOUD$l|^7 z4``CI+N_+OB;@Ikjr7WYe*^NhX65)S=ly#+)?HI&q-0@GE~CuU{tj{tlQ;a3_!3V4%dFh80V`yaxDGmu>*ISWyv z_)DnbC9`rTCed;;Ka(%?Q!XwN`R75lz?*-= zX5?S)r?h-T3>8m7_Og~=3||cNgr8DgUx2m)@Uaf!!xsa+;HUJ)Qko4qZUBDQL45dP zpmlyq-Vy-{2}aY$$$iU5rf2SjO?&*5iHpS;*8zZ@I*1Rc4D^Mc^7KKWiMHUTRRbBO z3jqxNlb@1#oq*p1=)*c(zqLsmdD>5T|2KhL3)H*SkOJgqQNDaknD{vWKj|R-#wL&< z7Nu7mp-#f+qT)2vxKUGzVv0q1_#HIaW)QZ~0o-r9HlP)cuqcsOtTE^I1z@la;x1vJ zNfzZamdp$^1%M(Q#P1CZG|!@x_7R|^06edQ*bS1IPajeMgDoIF%I=2A&u?{!* zjUk^x>HP%K4b<<|kV4%Wi;~<_fI@7N)C8xuZ}H)W7yN-mxpBTg($B^8(UH8R)T2qT z$zR!nA0}Y;pfLc<&_Ogf^G3kE$lz)I%A}d%mbVzt71iMe#Z&y1W*><7t_NzTj${tv zPG>sr_E#Fr5q2H{>d(uO#>U9oFs2Wn>6gQ$ z0EXP|uLNRg$f7k3sJp8pne7IDeD1G!(u6-&0J>U-)0J3?)u+jQeiP=n&IZnGVVjBUuwu`2p}u$RYhsuR^;)ki%}1!A>Pbult&b?Ls6E#DE!+2&;d2T z4WVR398{DUxF<5RCIUZK&%;8=K6tBT@MDVdWpe?42+%b(z5wGufy=`jXeviv4b+R23eIY?+Hh>1-@SmdAuu09{5pK<(GM)92(;9 zs38xliowI#R%OE(QO<+Fzg|Ng8dOa5A?lNFRo3kmqoZGgdB&=& z%0%z{39#RErPN%cfTNh>n4|#ZBIZFb3p;hE61_RN%Gc^#dzg$ zwByq>06$2=H;m-|kPY_A&DcsawGU9vWAr7vupZQM;VDKIR$}_2A8vPic zFY9pqPAHIL0+d1MkWBG*pgz@+2A46NxdBS{2LkykP=D!28g@nLCNlPp0Ojdmfegj6 zKN2T*uAy{>d?G;EjhzfudT*d^(UE%TM#ZlLDBobek-?_}I{$LG6u=a}8=$;~xg|qB z4%90;(qQai+^$V~1C;551#$;apXo@0u?%@4Kv{tyn&~_VlpiJ}yjs4-GWdl6d)ZIGLU@Su>1}bNU37so| zTB{>{jb-pbfl7y6Lh<{6et9`u3Sh{rKt;jNMlvt_0hA?F^Mb+Hm$9R7njWa6tQRIU z1**M{G#JZ}%LA1ltv99sHC9LZ8q46%1S(H`C=|~IbkXH-DS#p03{)onE{uH!s5f+^ z!PpKDz}RhpO5M{!=LbN2r6Uc-GUQKzis!7*c?Kx;dd&g8#xnTtfy$y10gnJQ?sB*k zz>xl`a;8X_FbJp&9ceK3d74ub_4LW4${?$`2~ZoRFxM?1pF+Z4H{_#*DRSKTu z`<`+%GIi54lIMgmdsXGjj|6BK0JrHNuXY3cM^(9367W($pR5JW?TARj%B{gi%Gvv{ z?G5BUoc4`l`M}KY;4Fyj!-*|;DdOM`c=T+5X`CdT0NQi|a`azVbB^1ZAx9LIVN&IQ zo#~Pk26*%pvPkxY7)B|+V-~+FOJNbXc%ekU`C{ z*hh|_<_pzP`-wW$IgBn=mor+fzRc)-Y9*sf)I*HkubMs~{BpG^qYtT_7=2V7!suga zE~AgDD;a%4tz>ki`U9iSs^=JeUiCjfdR|Z)G5Vs~jnP%=t&F~;-ofZA>I00fRyQ(Q z)u0En`;P`yYbgJj1_v1bT!WL0o^R0iQ{w;Gz@O0z4O%dIu|Xzie&+a@Qsar{`RD?9 z@I&E^@H1Tq$uRS3NLB;6gd}t!B!kQyASnj)C6dsEkn}g7`$m>pA-#-QN%9Lz`8)(yb8Xjl3O;pLdg$x zk*VYt7vuRLGM&Wv2kCBUEV)4|nMs48={lTTdU?sRWPJmf8{9e$1`lk@tk{JoTCDyC zD{g@Uwfc8~`(n3{FM8Jb9=&5QaD_PUpmN$8t@sG0l5`o)M|CN)f|RiO(*@NZ;;>~6 z(9KP9BjQ=#0B5_ravI8cFCcikJP{8*!qvdw<`0dBeJxT0{{o;?rc{g6)~`N~pq-^M z9AtrHJ%sB8a@=l62gMN-k$p1|phmX_cENO`RHjs;u^}PYWtC&VF{ksKb#YzPemfoq zjBnHmgwT)5y|GQ%vSV;Z3@rVVaBXclNNs^c5|lPc+lNF@VFh^-MS=p(@x|W5ghy^)S-|g zko-i_3^KPZD{!20=vUO@0@!kxC{c*UE4RG{ZUeBJX(Cg$H*2uh%1GS za-f0w%Z+W&GPD69ab;kHAMTYdRF^UO zkh+%9ht&g&KBE4@=%ea6Mjum~V#yTL`f)Xx(G}`=MxRg@G5VzX9HT4McNl#}{f5zJ zRVjq>J*VEt=nHBeMqg4VF}hm4kI~oEb&Rf2KW6j|^(3R~R6BMVgId3-wr2D#wJ)RV z)l5b=sB;0?}G?mf)>J&lmW%LvEB|$#~?b%wD8~nyDg5(Clc;eQxb+Fu^ z9lN+e4xzgAY~4f-A!qk&-AoQ4FZXO6E{BkNd$x|0LwNQmIfQ3#C5KS(59A5z%s-(y!LTy93QLn{d%le4Ewb=%8IyDfak znM*kc%yT>@Pc4w@c90b-S})BPR#cZaq3L_wZnE;{y?n-8N%C7MKl`A2pVWk&!_`fb zm1iSli8xfVQUDR4Jr z%37X4jt6ALoWdCv1eAmINxjrEM4@m$CI|f$sfGJlSqZ>@bBQPTH1f+#W-SZ%3c1;G zOtGybn*j3-={ixoszB{I2ETYd8v<6x61@ zx)<#us7EsebjA?CaIq?+E>kIxPIzfMjh%RMpM+~jP_UUjHaqL zGCDwwWOSg~k_zjCBfxKs)BG9*jEfd4o7(`F0Cs;(#NSeRK&GjldVk$X z;dn&@Ib=Bpi0Rc)`0o{)WJ!HQX@y=QmuN!qo~D%+(m`$G)E&ql)V7nVHXzzrZ9%l% zc=a2)o43nS6B`oERyQ)5qkh2XMD;sHC#mNdovhliTN~8wHnjz#Q`8=ePE|7)ou!d2y*M#pq}%nwQvw-s@1#$x4$dSt1)!36xH|Q%yWaA(Bl> z{X1l4=tNxw78v>tW}~9^76nYb)LX z4DQs3@N(nXXkNix>poAmvsZG1<3g#ja^nW~LDKEIEs$6v9y=~e!9DAAy&oGvU|hm! zt{}(Jm5DI?ZNcEqTyI;Jp?96GxarDGQECKEmP2Ys@4^K4Q$F%Xy^vAv2~iS>cT&CE z(MwFh{grP0cro%Ah_ZC?o%D`U@E|1>4x_T=xe(p0i|b@0vkfK5O3@#>6t~!+U;lxP zgcC8?gB{e2HkG!@uX|whla1KEFT59R0p zMO#T*TT{?JWNq@+9;k~k1zi6s?mQ@Xhh`=v?86_&VjKCnIdmoUn}$gR-EpycmzJTf zF|mPXkf~IrR7`BdIutwPHYoO%dLKYlQ0xei9x3Q3MqiiT=qF1ZqScnu z(7omG*DOJwK|xs1cx-pPZmCa=gss5XGp>n^adQi`+%2oWb)$DyaZxr{T)eJb+^Jp2 zX+refAaM~3&WI5Mq0`hP0Y0~^#uV&9K56L|4Q>@i8;OhFl9fIfE6MxANz?{CQF>gS zgZ^&WBr9>)0V7!_8r3W-hWq|r?HtE}X_f(FtD{DGRxG?)OLXgIdKC^y=airiL`@;n!00dLFtG8EMqEJTEt516d1U^$9R)+=Pm$bH5StXoLv7Hss~rm-K0Df21FYOyXyc_ z!OM><``31n% zUk(-a<(ic6yWpS?fcQ#B1rO1j+9-lz`K(D9at5eVKwQ#Le7eE*ovz<$iTF;IMt4Mc z4LV3tGo0Ks<}CIat7WT6d3>_~bpxQU4r2b|po1o5=VAdG2f!2^L^er{Xu@py#iUGp z23Xp4lb9m!b-;PO6Z z<&jk~D@1lcHc-#siC(9*JSRt6(y26hpVpFURvyE6PawGnA9D%Kv~{BQX)Vvnae-d+ zFthUW^&BKW2jt6yX11{xW-VjQN{}0vC=0Cy+&(!O748KH%oIrB^f>-q`Vs+2Pg{}DEwfh zn+sl@z3DNq+z|d=9YC+dS`z(~Q%huCZg6MZE^!)6Wten7Wz<|zPz>bV3F9q@y%uX3 z>!&))zcy=`?5Cuxlex7;kj>SUNCEU(tfjzD*?2_ce+aS_-uz5H&rj)$ zc{Qu;ddS}O=BHOx@Q*!}Kc>o34-Cul=a7Bn&Clfb`ze#B$x>&MpMmV6o}az&X<6x~ z+^_?ltc$5sW1QRxysgk1p_Vm%N^?X=?f_v=9Wa?SCVCgnveQrbsRG)ELO4nTNbTq~ z!0Yd}G{)kF4NNm3D$&Fy>S4S^viAioffnT;CRGgi1W>P5N3!BL@&=1C7vnfX?gHvy zbtD@kIkKZgSvXiAe*?-tUaKA}-8<;AR|GA~EDCKZGR0wlwyp-}uLxRRvnbEKEkMZt z466oW)#5K9S$0^Id3dBCPXVGxN6~$TzgTWLXi@spQ^ds(KA-`lMtf$#>7D!)M?H9G z75E!9nfK-|HF~5cPznCZ`guTo1jONLD14C*o&S1Sw#2>(z)1)_I)LrZ@CXM{kS@uV zJ01Y4K4#>taPlTVelV8!?9YH^nJgW!p<+q^tiKK=Pf7vrVc@;~b(5t5`W4OjOxXZT z(Lhqbby(V5KUB6foX%rnS^(LDnw+-?9_I-TRPF#?k=-khuLHHECgfaWy$zP$Fl&L%T{cv(O(cDpL*8#Kf>tC@%?BGqqG!F79I)+>+FdHqFPX(qF^5xYq zCI*aJHXNnZQ>8kq6Xj;%HDVn$qm7+**u!~0qKw8W16f~NwUWf98|5H(2>A?r zxA7pHNb+Hj-AZz1-Y)jUF`!7cC*#h~mZwD!&eZ_{nJ_%yPFWd@t$SwjLy)Z?IV+&i z4KG4ohCOa9h~+oHe@~N{2+qfufQMz}87!mZ0}y_r1IVe;ZkdJiNm-fkG*tWnDgAeU z9?qKxv4^c_1D=wvYk5dS;szjER7bIPL|Zp?GLrHoa*<>`gZ4J@?p0!;dC$$zObgA^ zqs2wTG2-G!jWhXH!MRzxcnlXKo*xOTybH}S+xh7698B<8Xg=GAi?)!+XQA0`1()dy z&2ghpV|}5SnStFZWDDtqtaM@cI1NYmEHvX`n3b^lLi3r=@GWcP8&sQu>I=;u@8adn zsRiy`Xg-Jf%JNd+9>QrpPf^hqnkySZxElNon#|JVOx~|l_(7X%VFLE3Z$vUbK4V>GjFErQO1K->VM2?24zR>&y z4fsY_EYE}dUJYYn&e9i}WmtbO&~pI1u7jAr^o8c$AMmfsNz)zxKGi{FlXs!H7&mwM z7YP5<0eV!u3(e3KFxrlB_y(NZ0Zfg)&>U0=fWFX7z@(Fqavyx8Xj!}q&7od27Migq zIY^!k5uO zj;aPQ(Wv2FXkPpfYVv_7(op=HnY7T%FA^p!h3p}cv+%MnSJFbWHJ$>nJLek6*6VU@ zp&8yE2D}gU=Opr4s4p}FS!2`|nwwBzLdt*PBOtkY7Hy%~z?%gNO>`%Qlw$zzPFcu6 z;RkJ@N&o!cGz7vi)dAW<^G`G+R&EjG^9kcMRaqv0 zyBf;7(Co7afDsT*(gAFt`Tsck68IXfxBoeFa&sri6^S88NUq!xdu*{2yAUNLE^6PG zqSn?fl-kwWqITL^S`@Xk6jh3d!acH41G0jn-!IE(phL;DIj|X{RjxBY?8Ck zZ1`AavVIrX-!?95ge+%!q50-Hif?0XrMa;Nx-0z{C_NaG{xzThl6lP(P3)W>ckVcfsPh zlv_{iD|?8ZAPvG5*}i!{VPP$^i8>GYqKc-GVpU2rSk0)8#Kp%~f+a}o5YH5J>jiVa zi`nP*2!G0V=OxdTFTV#PV57Ow5bOEp@x=GVNV>N&`nOu-o&|7`B8bE}xipEHKVtk9`J@>$kdV~SDTo@3 zR~n&TYWZ|K<)v-BGMV1rUWKlkr0-h_%QjxwG9J8Va5msgK>A6-mGR2iZP-eK3u8Vo zN(Qo&@yd@cK}`)Cw2fD2gC*evdUj#VK`&61F$ZnWDr1g{n5MRe=c%~#9}t+n7*CEd z2k9{ge6cad)-BTeu>kKniT}ZvqubBu{}h{omLV!*j(ZppDGqrNXZ<>5h%)BbHBv^j zoPv}w2kk5=`wfb;u-_2dO!1im8D2p*^%%|2u#0q~ZGx8J;Z3kp(%?M0idRDEMX;8? zS5646g<#~*gH^YyjSwOVk{?$)J+>p$K1R@+aPh|!Gl_K8SItHHhwBL&(Vq!;XeK=N z(z~K+-vIPL!JKw&CbD%ifDh1DW9wB%9E4ImgfPwM3UZWqRqYQcvjZ_dwU=T(GMwH# zCySIN6GtmZfGblU){tq9fHfmc*-42+#_O(WSO&tizm4d*9C)hU67MF*1Dc`W7zduA zPeW2x0$Q)&0t8QFBHTIpH+W^aI{?##e=SP9E6>Q)8L>Ag)}_8c*XoMIyYDYiJE7zW z9O<1uFF_h5oQt{R(ce-GqXBsdE=#a5Fmi>nC{ZWLy%c=_>&naAGms7Bkcz7*F6p6D z+Y*5D!@%g_93q=&40dOa!S5HZI?U z)6ZZ$R4k!IZ3{c?)u7%gbp;l5WOSurV^SQv2BRzS;>b3-ibiXGvC&mIAj;?}2D9`| za2SA#52pN}C!ILT=t}6djjqaEM_J7QyukLMjIO>6!vYkpY!hzV2_8K}BYYBnPc3o95r%&E4t%qm2)APd z|0c4fJ?8fz@8w7c#dZq%5OxdDJ%U9{N(y*kjvp|upxFX5kj`*i(zp1CCS-}bQ3w?K z;9Qa*@~Qtx+U2W)4`Y7zBi1k46sC6w@e{t4)3|2M?aA7($c=q5AB3`Dq;LZ1vZrwq zF`=Fg*ZDT0Pb1i##vSg3sH_9FUE#AGd`#pz^uz23u=5I^@2H>PUhPIU0(z$4e^2A; zSJSj5@RS=7*54=J_B8I7F6gA-P?``Cu04&Tdf-gsG%<}6ZbmvniZ6@NCn9#c=OA(_ zndU26OL8~3)V$YycQX@VP9e=}oiO*i*S{{qe4mg{2#G5724)Yt5=oCi&WWzY7@-fk z>+QgP7Z}GJB?39@?)E;@b^`fXDJdxCbVnjz#en)ZT!RoZzL(U`WCA)7S#~RC0B|m1 zL!v0GX|>>d7kLS$_^dhz4HQYl@vxi`Ue38G5})rwZ71M;0y#1YKe{9LU>gL`R0SWT zc*yx+RpuV`04ZM%@MA>~Ii%);I5kzw2PwntNns15DPd^=)P|Q_hKFjXw2lilavvq_ z-A_m3omcEg9=5v$N`vSc>yUv3N$hDJIUh?!#-e}}z{M}Zhc3KH##tVDY!UdZBGB53 zahep1J=xe#gWQ8l!~PL2tStyV6zMxe#Ukwra+i6>B8>%Mnj)RVlU&Zn-V1VXoe3=~ z;IvK=Nb!r3O$klNg^)$EuYj+TEn=6@=2yZ#b9rc?C9%(4QOk2$Uzt%irlpodJ*Ua^ zd~jMG_=2*=o=j&@151Q^5-#3}7I5}tdg@R>4dL9}hV(v!tI71Z{@D103uFDj7#_${ zlWAi(7}IRfo=j786DDKW7n5mvNu(yzw2D-dX#;bKkH9^TOCJhEzl|qnGEI660$)6t zF87P{o*w}?-gOfH!(@8RLyQxOO+m{L)nqz#1=A=F-H0@rGDJqPisx;tJ^eZaI$aQ(tY^z9BjRo{1pX(s@Eui#w{ zJVTE=!L$c}o+@~s1JBVfZAKO$N?u$dSe;;9s2F|{)t_THEMbFI{j}%ViF=SzN|2go zx0{8ps-d7W9O;!OKTd(Ed3G8iLy;c~Xd=Pi5v=CfqPmj1nrHvARuVVCal7)M=Ghn4 zqxBqx`)R^NSZba<68+=PaDHe*`cD*?nrD9ozoBg~@5d#)$_b>zG0%Sb4-7(pRSv+V z5;e~*I2LU*4UE=`b;k(>^XwGVp?<)ID_ms1nrDB3S@H~EOBGK3vqly3>_V5A_9+Pa z6p50e%aH7OcH4fMb^!!>DQ7JBV@_B-=YR!@n8kcAI5&;F|kK0pM*VgMB& zI2j7nJbMx5VWgZ@22?}A_B^}cK_=l=fZ8jV=Gl}1nnAne8C3`2)~PDY^zRf#tu3y25$n=HxsVLssYt-5EWb)%Lhh6AWMx^Gw=qN zRkA^QtfEvT^zIIbzT%nO0qxoTiif^+P-E3CB(x zKir{6Hyxi7-}hW$`a@U;Wx|74A`vacgB4XjxKv$1+GVQ6_aH(RErT>@@xg^?tEH)@ zIp!j2kgL(r8RY8XSq*X|s6mbdHOL*oM$``o@H#Gjjsmp@xg|T9_9t9}Ko^MqqkxBI zib3vYY*Xd|&3G63>_P4@#tJpa#bUjfEK-(C95u)l_>gI% zpk*9sdMLuT2D!QyHEkYTSJ;TIVUx)6I91=e3a_I9eW~DJ2cDq^W6$IapeqXYIPe_( z@86mB5RePe62a;O>q3PXB|8HDR68|zVc9DYLH9C zt96QecR+mz_7SWGIZ@BaT@7;H0Wg7u!#rGzl?OG*E&Lk|cQf2SBTR&)2DvgDU=RZ5 z3pS*eq`=f5Hv)d|0sBkgftyJU@QyiqXH0!?3Dxp*S)oJ?a%EehaVLRML$S&^pgWWO5ZHezNz7}$7)lmDzy#US?}H`5k?utt$67P<_{9^@K)gs;#*IAoKa z8|0dHL~T=>Nu^)Mn_9R$0Q;%!!y4x8L2f#>E+WzD3gD7CKt7%uR~9@9WF z2iT1$JZBqr7JL2#T_or^+pyFJnBo9awqcY!$2KhSF185ZtteW6P7hgbl_~!NKSH8* zU=UZ?hTTCd$=|F0NkO1(*rPX?pwImebjLOfz&6YvK-y>FOQgz6ss6*IVW7vRls__9 z+dz!!0yBuG48-p#88d0QV~bsgPkS z1F@o*=w1Zjz9NwpL3++W>@3XgT-^}>E}0PyGtfZnPz8is3bY#kraA^<`F{k<4@Nh| zx`lc#eg@&8B2kJeCC5dT0@i_o4sZAM(-Ei125F0s9l2}PF%Mn||O&N&Q zL>;0aSz}-sN}DneyMNm9-xt^*$G>eL7FA4U5StEcj)U6^^5w~~dX$0K+?_J1>@o;9Y(Cq2?c3M#>CmjQ^B8Mykv3npqv`LPE*z3#X;G$>8{$?C5 z+3rM*kmYO}h+W5mMnt0$SPfppv<<{ct&=^L-WjZc#8e%^a|U8VMqA7|V9E4}nUpdR zt3F#!)btHR7$(ZgHV}KQ2C`{C+>cWbqPw6es4@_1j0Kg5@Xx>>Dh@d*1F_#QgA|-_ z%nD+0Nj<_z5S4*g7&g?(f>2wL#B3?hKy2tf{Qol$dIXZ>Y^pIfeDbbg?w8geDbv7M zgiCA=kRUe)vIH+S2cj#WQo0HS>pjN9(+=>C;}Qo_P$wek&ao2WfH?71vM3TMjLXFO z+js>TlRD`>ykgJoI}Gvmo}1{3>^-+f(EZ{)w+MJqdu}J!W2FLz#<+MU_4V|m6G!d2 z3BC57+ZpWmv3`JGw|%HRw?Dkt*@7#38@IUxzeR;?zuP|dj|_&b1GHJe_MTfy>_t)7 z>|5MUDwy`%C<8QuI-ra}&G2r!ZV#Mt7EvT9Y!UQVk0=@xHWLq?a#h1VK8iY4u3Ff3 z@moD?Z+3F85w=y}HN(CVzqP`C5WlsL>xZ?+?|jxA z-D0UK*+vh*Nhf-He;F+FdDIBHb%N7~Qs448aYjk}lUW!HaG2@uaJG+8eex}~RA@DU zV^nGLu*{O`;xN<3(U>8Ew*%LgxcFd%rH)Z;hN(577jRuxsHhI4L>wbZ4|q3C(TXqb z!ZSi1Fz~;Ge9|RC?)?RJJHRWCt12!&JZs2zhhpaht{J#qQK$;}TSh0iQqbC=1ywZT zPlTTEXaqt}axu>r@X)UB;67TChZXqh3|tqoE?xU-N!g`He(K+Vp`3{OBDSv{rgFBf zgkmL;eWb>Psop}CRS$V=msJy6#B4o6sjUg;ul#tyQd1Xd0%H@X!imX76%+WsjrfUZ zOoIQ#0nvyE57e+kMIs_RtB&}##br&PTz9OBXAF5h4+47<^5f*fB2{~Zu7s4uCyaQqM#pHCSyL|~0w1zrcX1I}ODkiLZQaXOtn=WSv(?!&bA;i7*Z zNYIRqa7ZnS5DR$5+-$EKjiFmd5)Wr7i4mhx(z{dr0N`zw_?(O8-(+T)| zU`uRVD_QXh!td?+7}Z8BbGHHdLct{#7KG1%u3VVlu#<2;ub`ygkvOkE?n?@EAN#u_ zh{If=SpN}+Jyk@_xQl#<-%jg%@0ie%$eD4BB=4B`pI?RN1i0`6lo-u0@Ere36S#WE zg|-q@lDy-56>q??8Bvc&YFVN-#(I=_Cx(s_RFb?C_Zt2>!WNgI^YVE_2{?p+y6Lb zp&dWOo`3U9qR0Ed>4aLpU+E4V1ls zwBxM!6$I5sk6DY<9A`gzm{zf`o|Ec}W|RQAd~!7qrnBe^BKJI%nuWQsb>yj(9`*!U zJ(V)#X~~t^gyj|PUBk+W-yUI2#Ba~AuJ|4L%NUs5V&amt6BEnG`iaG95>xV(YY=+7 z^M1*2$bydVU*Xyf7as{ppNN|#IhWnjKF3F(#2F2Ivf>n_+*gONs{gFe`O3=pV;rmpe$*l~;VRP#P$9hDq6E_lRLju}Kc4}6ee*KN)pM-JkC5-8 z-z<=v;}(8Mj;cwDH~Pj9w*mTu0=3LK_<2OwLel%Gj|%$rHFz#i+k78CIYvQV@)U9% za{+#YJnaDFy5>aukUT{{ieaQc!@N}MJXK!~n@QCNzn_ZVjIi_Kw?)|R;Igpdrkay3gc+5o~oU~eE3cMJqWpoLoKR~(bS<9 zAI(8EP={JnJ%cv%1Q87$jWYNdTX%XCnz?hR#q3W&$OkaNCdkGrw$lf)y#V~GLoL3!!?e>2QDdfnFq25Z(5nu$h@T5c9coeX ztmLq5Abz39>QIZ0CKEc?IbfF*p0sf%Lfo3I3sTAqa#x31{2h(2%#n5$nu%P)C9=ag z)B;l?EuQeAz)BG=qURiH(fOgZ^O_2T=+)aeV z_Ah$D&k;`EF`8BcSKHlKUr~0++CeeWzg-6(?@q{aLlzpa#4-r9PYr)tO$WrDg zf6W8qfDOvJin%cy5|TP0a56`!{w79F+Z=_;K$)Xd{~rG>8{C{@k;;+?OfQ5d#~g+9 z7zDo99OdX%>Ahuucb&xlV2<)pCyZ)}O+m{Ll{t#B4BO5iEF)4m$`ECal2TblbcceJ zISQ47GDo3EH{E+3hsxe%dJ3jCPvAkAqYzCdQ`E$!o0fnCo0TJ%@OA^4q)1|0>Ls{T zB`;b0$aXGXbW<=|ypC-svRMh40KqoQ{2e}R!^}VNtZY_DP&O+hD4Uf-sCBd9FdrAM zN9ne0RyOa@w2$Gs%|`U50v?(vY*rrR!U`DBSp~NsSlO(c?~bqF6xNn7%}9lVZL<=2 z2r;(}Gf#}e2q#jOOdMsiGCm*nS%6i<#XF1eEt{3*AD|nAYYQ9E`#SJceF9!}zY6Ge z1rKuI8G8FcOq&d7u7Za-@Em<@D9%3uv^@aUg$iM_GTe8UD>QuOvYFTheo)XDG$nK zWls|{K-#(NOPB~t*{p;aI13HV(``s!NP#JvmC=}+tN^x7;ej?Q{r|!=5ZK88Tq;pE zE57CE9BzT}yJ9VOLV?Z7nWg9e-$Jv(B_gPc>{m7`{#e-J0js2N@}D)Tuvz*04{TC` z&`yykDY^{Fwpm$LglPjo7-f@WVW|(GTqqL9W+fDZHZ_m;!QNo|c+O_UI|T;=13Rs7 z^6{L_iuX%6-3H^4VmUS|)V7_bY*07Zy*L&Q!e(XmY|9>HDtrpV6pDy=pfrdFszc)8 zN5oz{l!Z%6QlvTO8I@vkoE(HM;yIg@Lf8qUS^C=qJ!i90ZM^iRY*r|Fj?KzDtIZ!4_R)jDE|X(R;V2q#8oycMt-RvZeoC6p-LzSv{}(ANP2_+KzD3bvT?+dbpWp$ zF1`+tacowSkTq-s+$Y$WzMTllW`$ZAGYC~SD>VGG4RG15{Lqu-G0U0ys<&NQDer*{mGdiW&gIN<|_qg7lot$}nWkcCe1TfEj4BauhX-T?6g+e^VWs z70+ID5TTP1V_Y(c^HA?){FKc~c`VvkDKM(oEZKN%o0Uza@LmQnW*NZS+Z>r$%4Vff zutgdQ!Z<||`K4@D_HDOFi$PeeNE9c>X65J;L}wSkqlzG^1C^>{vr;Vs5xNe>eZ>-L zl+8+~Lr|k^RtoNxOqOQ~+6XRLZ`J9!%dw^GoSxgf8zxa8v3j^QBKBDdT-mJ9=ZX|A z>jtbh;i45No0Xj_EdOr-o2vXPo0UaKuw%3GR-z=a&0v0}v?-gFHRK!RA72C ztqeE??1JOpKGZ1Vuoa2lf&Jz9w{2EBzlYBxu`FUaF=&RESUF&o z2^YnwY*xyB3M>uot!=+nU9oLe3ekRPe}E$t;VSfL;y=mL>A7YLq;3|#c{V}WtW2Md zvpc9O`2^4&8`i0X$xxKdN^n1mc@eDJflLvcW3zI`>YiLPF#o_MOHSw%ousl^$-UCj znFv;$K&I2_Dw~zQ8!dV}Ps(a9-;R{~%) zMG!VCvNy18R`R1(>FHqfP%J96tRdPqD{JRN;V951DXQpy)akh+QPrt;&{u%4+9o+R zE4OdU($n_>J89#xM#yruZC1j6kU6d20qfa|n6}Nz$z8CiMlY!6o`szQTuz7ZoXyIJ zUu55=r+_8XCuUO0W~DH8_Jpb~M7Sx+D+FP?aC+{r@<`S&xR0kGM0Y{+O=Yvvy^EE! z#lTl94rQgXS&43BCF)DyCjvOSAS#=cs0?gRfbdw6#JnodX63gX*g(Vpnh%$Zks!(0 zlpi}#WV2H98In>HjHbB6<^TzDb3l3$r7gA+OBBUEc-}tlIR+Z-V9+vcKO|)JBj{pV ziDsX|TtzIsjd0{iMm^YGW`xG!cJMs*Hoqx^(b_vdY%G>WOWB2HO!F>|I8g^-9D>@@ zxOl#X$lV0ow4{~GfG>=q7dxzkATM(l-U;C`h;FEG$U41z(Mp^rjVs+J-bRbc2SS2P zl5@QwLdl2jzp6t?RbcgPTrWW(%GX@0%# zun%V_gs;ZLE{@!&!{H3VqdJ^nSq%KJr|_ZAMJC*(sN07#bVNVFa>JQ+UInDnlRBJX zKYXdf8S>V`mqe7#6i^!59@OCswNs@U)*VnEg8!kQXTIJh5oMFT+L}WLdzp;Jlp|pu@AI>oEJu1+Dnqi9bj=Ke3IIu?`1|!cR z)aE9oTqI8~OkQ8Kxk&{P|Kt`52q$vYeHWWS)`?t`N@9};+Unxsl_*{IQ4S4?;SCsE zyV!_M$xuf*bSr^~4+S<(;SC*pOyo|CHf%nyl?qRD)K74qMl;z4=zxO%eU!uKFib|^ z=_)vS8}e-*<*)#|y%e3NgotqMqZ~vXDOdq>mDq9SL#&AkA+@D(MQp|DiBOSDnN{ix z0(o+m@N2|EXTrX3D6epG*Sqtu3qVeqb8^=yY}in|hC!{~iQ=VB?h49{u@x?J3F<+R zI=L(75~i(z^9EeJFG1?$u7V9<{{iP?HYD>}o!rG6A+v7)e4q$Iw{vpWC^USENf@FV zg-ZmGH3B%ftLV>~Rt#8qg%71zsFS-sEsQuf0@y+kMB*Ba{Rb&*!_0%7nKl?$CN3vC zDJjoqr*))*$WZ+MOvF0=!)x*&k-b<)+qG|39vp)WWqWb)(G(fG_Pv62^jWxGwGn+X z!S+ESr?#PXJqD&joA9a6aPTpaCp#j;as!K3_&bjJ3GT*TWCNgj3jTNP`?4>-p@pXo z;ONWAw_W?LWM~>iXBZ(OT)XyBpQRN%reR4q&OCx0x0Rr*$0f3hw8_ZQ)AI+82*1Wc zBr1M$XZXN@BX6MfCsXceEy>+EaD)zMBOOmE2CGP!jN5+q1H9y*xJ4}hvW^h@z!B18 zkh6W@$YwugV_?)Is#IbfIP&`{tm}ZZR!ZyxN1D`O+G}tfjEnCk_3FTpkfI0~&I@cv z6q(g)SO<>0$5C$ULHJCOL>$$DBW+6I-(*AWN#K_PIrf1g!3NWQ1@uh8-%vc%fg`aa zQ6Ej@Tr4gTN13>pY%{vY*mLH?T3G1;YmG~lOUge{7@6TxRa9E(kFxl=s;ya7n@USk zHRAwEAwdg6(&)4j>ax|b9mgqZ-Mf~-SVyOoe1La^Es2gn)jqA{(W{WySMYTd7yq8Z z6yDTnC20x7xB>KmVw@$#VzY%#EBSc|*2i#RVF(}!msB$ubmz2^_k9+r6bMxmX*-_e zJe^J}If(fRYYL|}ia?6-+BF%OcYvG4&x0@y;2c~SpG%91h=($=cNOt>GVTT0MH!TE zZ-A9uUqg+`E|IE)0T`$lopSPv!f?t-1WHehZCb*GWFkFvdmV6D6&;UBEg6itxCE6DgUG+n{x>;1ub(dX-QFpd5BIvuqlK3^Bd10@_!qEtZt zJ6ZM9|8Erf=^tlm@kKv|{=(fnpMo>m`rq6=Jl4PE*5Wg-OZe1_;hw)!>~?yxFScX1 zC(Fn6UlvO{V#jW8u~l~L$Wcq!bd_R9y-X=tsh)!|T5jmQamiPMNbw1&_?9<|Trs5mZZ9KguGV%Sw`SKl~GN z(NQKGSZ9%*WhK!uqe7E79XBEWT1suG;mM1}AX6J=@s0zAJU2GdTX2h5~k~_k4US(k6<$$p;me$I zqq=RE+wDCVCg3dyxL#S6o2dSFx$OlWe+PC&EV!$u0f?Wlaw{>1?io@8UB37Lf#RvF z8?w3ICD17chk!5Y66GLO-RE+!`36KYp-EBDsZGL<=V&6 z-O7_6lGV{fwomV;9WG#nL2=L$a_fk9;ik;f7`$W(NJd!3U?IkOR=5W9j0CD8T#I=I z+FG;HxvWCGSWld#zyyfW+Bv(t=y4Xr(Iq;GxAQ=OpGU2uM6a%EPGf))+q_UN*H7h-S zWeJ(ec|e_1j<>m1Dk**+2!St7Zg8dkJ*Hbo`A|(H* zs`w5@bh7p9UX@!&TK%}@@`*+0KjVMMBazzB1#9re7qxi;qo>@-uEduSR5rNqj?Fk2 zOVlX~Vy-p$WlU$n zzZ0Rz_6-_{4*(v}f1vzzyQsGxsHOOaKr_eE(CsgVg8)e{LpJZfY9NzT>dVf97hj?RJCskM4RI%gfD& zVf^v|l6KcEeW`!2D@vW3wez2L^)}Qk#im|F{(iw-o=!4rhLZpPYySFz5sBl>)i_4x z-3BQ6UsR7pDYANmh05rjqOhr0L?Y>?#Xi{!PCR!dZjgtO=fsFDDjdQ8bNbpJsfg+| z+STqR3~Aw2)flI;Yf;~03SNCcSgxg-W)La%QFlysOS)XsbHL;XF9^rN(dqe9=)A%a zaH{I80_2oEc1}1v#=BfkOc=vpg0m~Yqc~WS{G>#nD7KRZvD%OeHE;gh_#&izNLc>G zh{AzEXtxWw>v%Z+TN;?gDy!`lMXMJ3V)Wqrqi6XSF_ zQNa?s)j?-C+12tiD42O>5QU1J#!j(MEH{f2yAZFF>)t%c8$-OGc5Bb^bkQ9pjv0kB z$R@jfxg+tFcARN6*J*aDC5|RaqATI34C?EDQ<~;40b4Gw9$5i({sDzY|4E!iQ=LXq zc@sOoiC5d;zkYUh<$TPvGQDu<-D)e36i;ja*4Oa+jVoVXrqu(i%3t+^PDPNto?RPS zEJIFSypCI8mn#?gi95ZSHU%nFMPQ;LjE7U!iXfItv9q$nV}6(G@g})FxGTV;s0dW_ zCm7WPYZGbZ8WcI2;ucI~sE~pni!vcWL|i;jcTL|yzOd{QbD=3_AL#mZuq^t@whrol zl#YLO)SK@k=rrB+flIcs-sH{xXW^=qIh}`==*4~xink&rerE$0c_5a*KO;$6VrYMu<|eup*29a0X-wM!O2k(W@a}R1;!d{b+R#SmWNLq; zX(i2Qyt|m>{t=GXhhrE&2l6Fcydm+D$h19QqwOGnQLunSrMm^Zt5HYDv?NP zQR;-$0GFi`N-S`LQ3tbq8Pg79LGx)=9gYtzF^+hhTmum>@vai@Cq&#)DxO3K6T1X& zq_WS|U27Z5K_dQRu>Vz0Ma-TX?ZhO!z5fR>8pNYY*0a4*qjK7k)_&4KoWwEO^~2XV zjKt=s0ZER!i9aHIlzj7=SeIbSVH$WMP*EknorU5`oasmHZ|F)v`e`#_gH53dq_0I? z))HrR6_oAR3!#>ki>#~$v#8kZT@)kt&1=POb zXNdVXu21fxJ0u=u45iK~DJQLBq-Z}E108&HZHm2d+Xtofxf~S97de>N_z%Rnv#SVF zr(wN10loy8_BwR2;gR^~ccND=hjnwqzjSSlO`&zVE*9&F2k;gVTE?Ok6nUa+WR)$n zh?XN3{c1G)Yaw##XP@Ag7>%wE{AV_wHW75OlMq`O6QRf!x@*u898rRtDvB~zUu!&< zQ?Zwj;K(H1_3cziPA76eZd$`%(qjyzmt5PQ;8iJRXT!l05v@q|o?OIDOFUOpWJ}I} z@E$iG*B0A6>Yl7VY9b0gVhMBQ?u&N>7?qCM9Qv$DjOg^BtDBa1syNJaSM%vfu!X=* z);)O8=|SAI*q$|@u(QtZp}x^u=1?p~b!w1W;^fanetpmy@66MNj!Lr;^AZh;4w;s(}|U- ztI_c=qjK1qjd<<0`VppGY3}0hWBu3#x_jZ`Q)pKSv!g+PKf3#0w(u*fHELl81%RrPGs^M8{_uHKS=@ zWl4L{Wg0c2X{u)V?%-Zt3A?-GI~-BZi%a^ZCoPFghm2a$Gy`KLAE4G6-P}h$w}MLs zr3rbUr0MjeH#$zf$Q;wC7EKw%O3d-X*BkDOWg+wjXN>YeaTeaRl4Q7LjB{tl5JP&{ z0^F7oky0Y7m)MXqYQax@Wz=U@yPD?_7`66bXyN{ zjChVMj_1bWIimj+?Bsx;#WxCqnmT1d3vWh2;`dT9`MbpWjX*0R_tN-H9tTG)aX(sX zosMR7V@;#Y+S;3tg6$N=_HF9|=@NOSCezxHtJAGDEb=m-?lE&u%huMg7ka`vy<$GN zEtlmDZ^xir_la@8WZ^y2A@_?3LJv-@v*D#g@CU{;*d}FhDngygN!<}GV;#a08A`_zK zPX;ig<`{SuO!Dcdd=UeBAwOFXZ_UxW8#*@}&Rr6u+2!yi9AXDVX>^Dy9^7AK1?XldT2) z6Y^4z)Ro_fe2K}=%QO*e`met+<+yt$`ZFT2^SJ$>*dZmp0A7*1Q|Dr*7XI0v!2Tgz zL{8$#-2L%vEX3i=@_vf%v~dYsbgWv)9T$=y3%wgSHh!VMxhdP5daHnrjI#@XrL)gT)*lwugJteQTSYgkHy~KeJPTZG-z>8`H-i0-CBne~L^SM5#didAo*WvfH3PATQTQ zovJ@~1lz7Z9Sh-zaFAHut=JgECE2R}bX$Xk1pKo~z>*0UkyG_2bv4s^kpEV|y4d)0 z_2=<-mXc8*jI&8j{h1G3C|L|_wT(Ylf3nxL(zO?aBSfNf>Gb5(A6lNX5WD_Vxo3s@ z2qcF57m=4$=q$_?3x=2=gq30f`wZ18bPJM~&?8P9kTn++z(8bhV zA>Fsm#Pdq=EINKv3#bXtC)JeS{{N|_v`}-ityDAe|5r_>7=~`63#ZZSD^l}6jAnEy zPAzLRqi2yQvFqN0tTW~QecjT=E$E8dpN6U)*S;;($Bn*?>Y*7F#x(u$$C#8QIoYOCG+4E}fp#%!LmARx=mz|AOSar-OS4b{?05$`0UmQu(GQ zHFN1fzU`R{*+NjX*q^wCAiW}KIz9cDnM=>%P*Dt=D#`~b5#H3yrQ=+DMFpH?;Pwg; zDJ60gos7AyX3&49i{FhNhNc{E?vPW9wJ*!x-q!E;)-U~^w&bR`sVT=F=t=3{Z*75Y z`QPg7DaWP7_+Wxu=|9$qn`Tcr9=&EwF{~-aQOvBxl){>FTv%q|)|4ami@+&I(IyhN zryS)l?=cD>?)H?UJ)&q&Ep8)tdus6j&+&hEp~SusK9oFIQ;V9=Af^@~Q^eGQQl)jj zh6S0JT6}@=&Y4DiHq+?C*%wbpJ7a8C*gj{#`MEPP(#B)8ducR zB44~@vf!O416=%sVB1rR8G>yO4L)oT5s8(x3Ri zeW08s50o^Wp8iW;R`wxOJO;;&9FjgL&cd7O%hn&38krZQLPVsL$Yv|X8iW2hQ2g4( zh%WlFCVONb_D4(kyWRRN+DgjwZ*r0s!%_8RRXWOT`LWO~|5=dTmyKP7{X24XHsbBR zEb_g;zAV?tz`pFrVoSFAvJbEgmv#Ac6%z=O?Y?XbqGjAcSax4V|HQ}c%f=(Yc3(CH z&++ZrQ)1r;uZzUmec5zq5Pg}*6tx4bWvov_g+lD_FlRY=-yR26bY2_}Q{gVK#WP||dI`Y#od ze}f<h6B6vi&1;KX#$XoUdd`{3;@)n+=znil;lk$d#p)%rbS{XWKTazoryZ60|`zE(-w zi+GEFCt}y;VISeccXFjYQ*l!x^PuR!+B^mu3t30zc1tbXs?A%HEZN!&IFA88Yi%xp zEHXYvSaxlufAeA2=EDfyuFWU#9RHA3QK-!(5^LAy%Fy6sim1)v;LPrGj#wje%l%nL zX3EMeBlF0+P}vmvTHxaM(Q`W^^ME_R`oMjNjfpqb&dA(~x)L=q58BQIlPv}P1MwIT*M&yF45V`@FaW{@loX zZJ?#3EC`isk~1=w1TK`M18ZmF&yCD;c39~e0>Ya_qIBu>24MI~H{C{$8x`xyD#)so-BZ}NFKszGEvrlK|BFc)FTLxAi-?u*|C2LTy zmUl^)0|$}GuLVwtPOJ@_5*0(E$~uV9sXB&+xZ8usPl%B{h*U=K_8?LN&+(H6Q)1r; zZ%H1k<=sJO5VfCjIm^Ks-6#HP4I+y$xpbCyq6nPj-GHGegm<8CAugT=y`wXTq`d=d zBiwh|nC>HjTHa+YMSRpC(lA*v*>%uwlb0fbZ7=Tz3AR0moWaHtk(i13NM2l$tp<_x zXz%6VpOpnxiEt4)HHdgHaTA9LqyuYbx zkqfo0bnOJ;0FfwNIz2gqNDq-Xdk~qI*9!MONKeQMMP7PQgGezfqD0`iP(WT>S@fzw z0{j;5Eyj^K2oZ?Cg6D)i%6->$7=GV zXsM!3xLBW9D(VW!(9#UovQh}2u&6iTA*G({kfq^eUI-aUFGQt=#wI;$mWC!m5*ijl zLsSB|DQPYbC~3O;;YEDrYf!!$w1fL8qk9?)9?$?TH2S77xB`vE6J(?ARJqV8AWo1UXuz6bZh`5Mr(nH$eVa#AK3TgjoxH7F<= zMR^@nBpuJi^NRE}dx+-}F;pH=d3WKtTn>8MA5~-z6eO7w=(+1@JeN0T({m%#^9B`i zuBGSU=o%VSG==_m-h|&uIS-Kg_Z{$D**rzheP6+Im7JI9xqUGBDLF*AgWtq+0~zkP z?cmIl@fp?;&+}shryus%8Z4C0b5K(nER81j8M$5+D8S-5eOv6*~& zvf(`XJZ@2I^Y8x=Lf!}}kD&aOdqRj7h>?F)R|xq8;>q7H9)edO5&1*!;A7hO5Gw4P z`Hy0e-`FRFsQi!4KzJ!<83dCD_18U;+7$)blj_wmJoKd!=GsiXFupgN+YHag@yS^N zwsD%&%~u_S!T&tNJL`IeaPOaq=lUMXZK=WpTc}9TqTYq?CMle>xNsOGyxQv!?_>__JEWN@-?4+lp>mFj#fwVf^;QbB!%+_O1VOE~f zQqMuZM46)~Rr_NE$5VTmF?)Xn&uQW zNI#J^nmYAQ@>URZ?Ia_Uh?EQ-{sn+I;!+55?5VIm3EGG0;Z*&~6DH&GDVDb>ex6-J z%$kuig)VMdh2dDwrYzth-F77h#)}K)nrV}J?q*CH8gU)YF3vxZFids$2 z73hDAq%4x!_PoH$RJH{zFr_-rc8ySKG0&%Pj((Q%`ZYb1?!w+h9W1^MP(rS7s;X= z(<*GH!g+SQM)%d^V5a&SHnMXqEu`Sa8<#8L*ihF^!v8;yW0BQYyEz2{XWL;N!GE`KrJOjX7m$^ zgQx=J|3^rk^a>toe4g}*9%^o$^hzFTU!L^J9%@jY^s1iw8{}_fe)&sH%9CE*L(RpL zUc*C8!;@ayLv6v6UdKcA-jiO}vsC=n_fXCCq^EeO0(#OLdV*0bp7c}?RX0z1BTp6a z`?81Xkte;ehw6(bJyPIa(yvC(uSoZf#Oa!=5#nHM zJA(k{QvUbC8=pnDb^wzsEiR=yW|472A7SDacPSUXtd1+( z5aUx^DeRcJ;vzetO~ifL9b4pawYorW92<|9g>j#jg<)J=auPn3kDJp31rpcEuW8xh z;t%2rhPW!9z=|xc&)b-|YEcxh|6_g{5i%&;9(Qi3m~04NNHMu^5sf6SJ#@0Tw&AFZ zalg-@n7kE+qsZf4pNvzT;--cmCR!AE_HX0uNl|4iYOEyf2tP@R3Sqd_;vO+dQkgQO zC?*<-(V~dz-@{jtx5JoFTi(734m}4C75b33*;o^^xP)8etxf}kul4x)n4%sHuR_#m zHz=y>+fWGZ!<1=fZ(?AL`+S%PhdlUCbJE)51oF`e=WO_L@*E*N_d$ACT-CYcc~2=y zXQ`!>;|FU~jvu0Q#yu{CfaB_;<{EK*3*%*KTvR2>@$r}`hiOry+JBALr(kwmP{G^? zzfHj``;wAF3sDxA1M@6Bt^=HT+*gQcP+TbHFRr+9tzogMMUiL!UA_)@RF01k_rI8N z9)UiFLtS!+8iL8N{|SpF*KD}eC5NaEQ00Hh%8{#&T*<*8F)IJZfS$1?1XU-99HOqE zg8BbuUCA|rT*)Eo1g1Lvf7ozx?M1HSV36poc^hi*z4p6=jVFv;yoU$E2V-mrzKaFSyH{UfZSN(C!L%pl!TiP0QFUvg=kl_o*5aTbvqGm&i@Xeq?j%Ot{ zL&~EWx$%>W<|J?6K^He`5GTZCbjH07`IHzj;rb>Va;?IRE`1$taIA#laTydHmoXoA zFGbPsW1KV@pOH(eE^sNx@|_o?A8@b3zk>!bDWVoF$7L~*Q8z79+ zy2GvV?M=-$G(2~61C`e|7-ixQ56|BVQiLxrCIbFQDdq8{qTBK33@>G~J!5<~DQoc#IA?21>MPHuIkfOt*)`MBY7d;(POnB6RdzdNt`j3Sa8(#h( zuryy+MAjcCrIzTbAZD@xSV77EY~gRr!Y?8PvswQmOdB0navWvnFQ^BI5Yhb27~cd! zCOiC<^3;Nb=eb0lGWX(tk`WE3EvmVU1rIgdxfx4tQ zQp)4&LehK|LN(v(J9QcU7O{vwp|dcWPb>UlqXO;UE8?|;adI;L!7bqey3tbp_gTc+ z9}sVn{2wyj<~{5J!l5!QPKXunkL`Gu9*i(HO38}%wt#r=mY5aqgHp2MeMCxDyiW$i z`>Yf1NQCPZF*?sU3C1V5C7eLN+{6C^i#URG%Iv%>6>2hDu1G0JX3JHn-tgtfizxpn zu?Qc%0`^~%QchovZosa~1my8WQcb!cF|UvQ1GE1pDZS(~2LZduvQvjr*ypGAaZ6&Q zd~`6b|7R(c^Zn2b%-d3`=qqvt(l1h~>O0j5(j6()@P(mh{CB0!6rcAeVD}`J<|~Rm z$$wvBEqz^R9Dg9O4w{#;!vB!*y_7kW9SNUgV8wtjJtFTFJ9C~&y5>tmgTv@8>Qj(! zKrQ(FTS^8-OL)X^%`ZcXL7o#@VC_`H@6sbqLMlR%->vfpm_0RyLk2ERh~|5}4ZLR8 zMV4s3ef=Os=#ez#HDvKbN-V;+7gHR64qepmJic)qq0*F0uWvkBv_Ge07WR3TK+2_y z8ePiQ9G?a8NPMZA@$MoFN74~Yjy}h4%hkDkn;F)L_?aW z^ZSszzO?rt&C+?;1F8{ysR=KTQYqh_RlwHiye6=UzUt|~KG%6KNHtEgW`p}`{$n~n z1~J7KdIQv3$Y4lmzBva${S|6{#c?>kT0Mb1)pC=<$gGNc~9gE=9YTrm6j zy43?ShVu+yLwqMwfEATermr&jD<`FizHz8C{_0Yi;oCx8bUi7}^O;mmO{C7Hz6!0t ze1-GA(7DRD1zn)Ovy?XYj?nnnhx3WRw)xJYXZ4Si*lypmJdmbH>45J}dr0#*UkBzf z-=sP4w}SH{kk0x3DFSID=Qkl;_B}>d>;IDTY`@`q4d496z)o^r7Ses+Jqqu#l>RXD zRs-|4l<-F5$RL0ZId2E5!OVtfK>v|q1T!B}R~r%}&^*k1a~;H-L3|>}US>952XdSg z3o|pvD-erHu@p0RPz9_I#8-n{k(rx3Kx+l@y%1|Kvq>V*R4Jw~vmkcx{r({SJfC?^5A^9mJ17oQG%(g?K!O-+{Q4 znX#|J?{`7m{Sbx9%!Qdi9|ZAI5H~P0dKS=sf_NK<+n70l`ic-29|3VUGry(W$mQaT zARb_5)kOG>b@8nbk3sWzpoLxh48(KHoJ3<&Sr>l<@iH^_)`Xbs;t`KfxXjc^RRb4K zf_R^qyO%=DaPbU?e=xIbVTj!&87Ee~L@jiXi}wX;=;r-%Ku5axM2Hc(`AJovZ@Kt# zhLCVuWo)uo!<&X17cy_totj_4KAMRF@|m3eDx^M?Jk}Ov7&A^#RAU1 z)5RM=tf8Bm>OkD%;yodz=w@fCSNmLi62vs!Y(s7Qu#2yP*itvI=LUMj#lM8uK{x;W z4CpZzzX`F2ZZ4%Zalys;A1Ghl%sCq9Z5NM+I7BxWL;;Ns=JgSoLYpb5dEj3(;l z1V7M2 z<^g&!n7<6MFgNQ^1-=u^dqXS*zgQgjp9b?;5G!(XP9(%IH{S-a1~-@d0CIjeKLIg? zn@y?a6ms)lAf|D12!;-SlA8xVLHTmiyB}f|H_s2T12^?XASb(dIfy;DdF~U4P24;k zVn1$n$_BBen-7IJgqw{wKxmvnP!z{iQgOn+-=o^9U)<;O1xPKqp9X9s=qO zagLjR3BOCZ`GoqM#cuu`#8uqvFb?8UH@^dM12=!_0CAI>=YERvFxWt7q15Osr31A3F2 zpHt@+X7CRo-sk3DRAZwJz8~Tr+zg>;6g2qH5VasPxFf_wgNHmr`39NGwm?-)gBOMv z5oAVf0h(s;`VjL3nG34`ZEo`Hz5hHSk~7P`*Lt)j~iA7<>i9Qb8u2FyJ3% z@XZh_BA`tW-!k|~h&6)D%ry{aNiikJoPgN)R~Y@IN8W3o?JAF{Qi|mj;;+v99#jmEx)(v)EOL ztwMO7e^942GItvgFNN>| zK>r9bKOxQcLU?(In#*iZ7OI|v@KlI~%iM+m&!0P#zXmbFWw!YM}%iQ+?&_6??{({)gWqvmgqAN^Jr(EV>D&OcZo)fho(`7De3%`ZJcu9y8U1nwt zph;nJ{B@a2R|2gcCI;(yF0)1$(AE$^Ug|Q>=Y%*Qj4y_`%4OE?3UN{x{|@2?m&uz! z`~U%=9JV1=G^yMk#>+z7?J^r-Eb)I6#@j$V;4+Jzfp{^DXF@#YGDBa2crA=Cg?P?o z=KTcXoiH(IUUr$?t|L~D!uW2WH$jev$UOW9i1%IQ%qbvyJe&uid|l?J4T0wOh!knT z=9Csdi+gx}phmDcdn3dO9$pP%M6g-nBE)(g-UecxU~_17_)Yil*CBd?&7IWswej#7 z5DN#J;hRD3>fvi3mI^k{V;|Aq-^0IxSTWe#6b14y55EMlMzHw>MdM8m{{v!5u(^@4 zZn}qObD?~L%^f*`&XZ!xU^81ih#Ne-1ket_=E}nmcX@bYh&_VMzotPv;o$=y_6s(D z+XhwFJ$xp_A;D%z>do$WqLxF<3^toz2Kv;)zl1n3*v!)&V$N**2E-Y`=2mK<@!5C` z>gK#)b1vm%Vm4kJ;?iKV{}`aPvhnvIt_n6!ECAXj8$S(kL$LYwD-e4@6~t}G$+jR* z%f_?2QNF=uodZA@WaCK?4p?sgY!;(w{F-bD9U-0zHk;Cjvo#wZ1MzaO z*^NexFSGIG5N`&X18LkpnvL&=ct6;D&jayFHhvxAA4oplHu`_g7WD|C=6*-4kNkJC z@naKVX@Ut8e+od?(df}(Kf9u7UOK?sAFIbT18UU3tuDDF%pZm-;D1Rs#=^-rd=W^+ z^tg8+Xuf_0AQjj1uOKg4u9^6uOBcI~F5?fVbqRm(iv2p5u?VP_H2O>GF)hf~YI4b+ z2A3*&1nnES46?nca~?Ob-KQn&Es32Oe>FYo5!`&ss5w>BOA8}NtOw!d?MS_1HNCEJ zpZpWeyJ{&^*3k>Z2&V$L=XwJ7|Hs*v!0A-Qf1h*jeaxLP^UN5+;K59cvCd!^Ornr| z$rh5GkS$A;go+Sl$ySOeNl28^hO{rDXrUBoS83nt{eFMvp8L$i|Np+9cRrtU=A7U6 z`}>{co_o%@=bU?=W(b&7#HSE52~@q3EvKWC)>gQk;kWKX9u?GiTubF+q{GbsDF@OZ zejWw0iO_>E<9+%Q32_a9&zBZjM4<;uPf-|O!DJLV`|vL!%lFukwNyTXIcU4>nxM!!H)fwUAjbm1rZwJR!_NL?W`bAMr~eEScm}tQa(B0=Q=76jg0& ziR($E2}eSk7}`cpR3s{)J&+-prIKWC)J?>H5~7HI4J7`f5_~~??kD&rH~|95&Zh`C z^q}yyu(%Zv@!zooK1w@8?B)5@B7CeiSr7C}K_Eu?h;4MZ*b8g5Ey4$EK_F~ng?vIb zblGx^`28%x=VOC#y%{=_EwY}7EKf58WE1|O2$K2REbC>mh&T)Z{{dzq{-YNEf{+xB zBp`<^l7LazAd(29fVol_@xQgC8YGEV*WiOBd0v)?Um7B*jqV{!t{0<#``}+h_G_?e zZ>cvHr%E(9Ou)7Kh(F9S?|;m#hb_VfY=f|9(0s)r zzY>ww#t;x*kBI+?#moI+TjMDe;I1mr={tFUrY`NEjT!w&iPdDNZ zvG@|=^dMY`e-YWz!zf@0TuN|=INVzXzQH2zgh*JIfS1;JSoSmlhO|3N6fq2~(Lt;rdyGYZL|v?wZLK;c|sR zg1WlRBCipVo0eM|()u0lPdEgSt3&vQ<|$Rrqw$hMu5UQPFG~H;!d&kN8YNpy(7^t( zFxNi}9NZTbArQM<3Nc8a!_WQ*GBiXOLXNoE67joQ{H73R2ncs@BK~-bbKS(;R?_%_ zmVLX0xsGCBHc%*Tv&aWT#5;))ke8%q;Vxptf7!DBCW{_~ym`^T4^%aBkDh43kf_|I8BSAB}`*}wt+U}3HT6@ldhR&aZi0%Wc~ zIqm5M$T3N+k;TV_xD<_1Bw+^aUY2w>N#Y49yn}6(aSJozUk6reE+;vh@j&n(1zb>a zIL(6KNeZ~69h7&aCmmz4)6)=YhBjuC$CmApG7liQ%wM2sna$iaK zN6V~3COxP(z`ux`?1=a=gj;ik%%K$G3-M2EkEJkP9*G7SVvf-hS|-=b9KnM=^jeE> z`OG2P&T*Dmgez&L6OvmdXjxk=%$2nwuyBHY@^#DMa+{eONHvm!T%EBnm*0x?g5&^B zg;a{i@6c zt+|rtPzv#b_$PVcdY%YPFUj5o@RJtif?g4r`33yGg)3sA&5`Vd4bKr-P^tWf=TS>< zQ=Bf*fXR+K;@1G7HP;1gJ}*UB^2y~uQ{(Y2E*RH_Sk78<*uj}ml~LH7BCT<*C2{4@ zJY(_r2=Iq2@;(u{*BAnF_z3+MEY4L$X{xqGeQFUdAr`sbK_⁣Y$dg>w!{$;HDWW z*{lqzly^jEk|AUx`6K$>Ej~BI83OXG;1eyrF~k`H{+TQaEVVe7D(!u@!wSEmP#U}p z&vB0cZ1DI4<^+he=3=J9I0U~~{fZRIBNIbFj(to&0TC$_7d36=)v!V?ZQ5DW1y;xf z&LZO{2VXl%UTQg9^(?}XIcCtBrdZD7}2D zHJ5Qk9fI#HpR2d#&Lba)fmi8Hp6cr$q>fw6x ze_*F5v8g3;CE5_f!Z}qANnH=IoIA;pDK0}O!zSb?;?K4ClOfI!5Y}|W-)M2JSR4J1 zSd`#_>qQH51-l5$Tg?d!eQG&e$2N1?pmR=yKuTH=YBPjndDSryvN+ec%^1X0Ol!E6 zg}LT!ntjyFV24w z-I0`v-lx+bZOsLDQHSM~mj5(`k|~COWS^kF$>LmxH@I{Q;gTXaFI!G37TC>&k{eY9 z2_B?&2!P0Mf#cyo3UeAtt+{;fPzrG-{z>ttQ5b*Tk}gtA$!uRxiQYh7d?1q0?Z#F= zY26DDiTHmYIsaw}E*GCib}`*VSdx1M0n;ueBn1;&ZxUb#>`z4JHJORr3wdkqB{;MP z@EHDyDefqkI+9FFMHTHtPg`P55~YX?!9*_pMdYk0Y6;=i+A*gY4I^Cwo1x7a+H^NZxwZY zAabO+D*}#qvU|a*f=QmIjrfg8Vg>WmtdJnQoI-Z9JT7ZAgAR295r%nm*3uUs+?pq3 z%|bq(l3`>msmvYE#}+g47zC@Fs%dT4>a>focdQb%dPi@(5^v_N(2HPKEiIu*H~hmn z!0WhQo^}MAWD);8Kz-dRWTlJWPyFmmRx$m2pOUEf5q~%&t965!O4Vvo@(v`lQ`c%% z{skOiQFl47{e~UG%hWp%uh+B2Yjv5Dchc7D^ndV~6^VH3^))#1aG7FH=dahJ+Y(^} z@1fjT6I;7w%2DZF?e~r9wfEs;@kv_k*IwUJZfS(3YV0pWfn&ZAYo8N9Nu+>ICEA{mz}jOg{{o3LiCyI)(p(}fWBW*JDUo)u^luQkNFv>0_n;;B zzgQxDVs3LFt;_z74c0+17aNQHFDb*{^&1&`WCSAZB-ayScd-cVB{DfSekpW1lpKa4 zOp9%)2BdR3-%6ep8*(WkT~hfr)Qz!2d5CoNBjxZ4c`OA7U;1B~%9nBH#U{}}-;@b3 zusD`81(D0kRRyvt_URHt`e)QeWL@m>9f({Z+1V6JosGxa_GQ_VB1JA3nuOkarB*-Nx=#IV30T*h`R;97+Y5L8AX*WRpj+07pd5Yl!B>gF z>;GeL`Zj1iC2ebEajk-!Yfz4hUBR9S)#BAf8R~yf8h%E^nLJiC8I&NA*q>jZBTEeW z+Ok`5WGhJ`UhIKJh?JCwAA5-XU9v=yWBiKkpcILu#m+p3NK_(t2_K#4ATtz{kL^M; z9F!(Vr5G1k29=UX)mWF~uvSK1r_YJKg05%Kd2;wYPx5{<29NqoL|fFA5HKFEKK&}oPun&mRVX}$mb7Rn3itZVoPGXwR{JFzjYDEg0{G5 zHHxqouj*l<*!3KQsNbPF=eU=_j|9Gvtimb7sNb{({Y&6ua1`H2Zpop9zwqGm7bT+p z<{cE!kIMH?qVipth|eD*@V5!&c5LiLMUb_>J-VF14gHen)nX05(Q2@nQ>s|a6Ns2O zr5~GD6_E>N;+GbCwuw@MTT7&T>}rgHgWE`?YAlxqE|Ex1tU3*}l}KLfW*TTGktVT! zFtr%mULq}HHE^#C?jVtNu?LXt!5z&fZ(E)|`o~_SwXqVpLKXHriP&}WsgG<_q+D{dlDa`ZaS_yJlx+w(*3?n| zqu7_WM^wI=qE)||82yK=jW&g-U*oUw9jJBEaretv4Eep%z4fb2&PKy&Le4hBsYTAi z!g*J(9RncU^h;$w!eCORK?Xn1QnvrAS`Dq5!X6=O+9o{b3`_y_nF}cDJ`UR5&VhQEgV&C>ne;sy*H4h4jbe%>ACckAT*^Z1YhZ1v zV_qHcD?;ApY0j>z**zrPD1Ss;>r zjdvh?s|ok6uhobOQU#-Uyz36A+~cTz)2_xxPb*fKM{B*vscfogD&7gHCe7pGPV*r?W^DAeVuv9Vot05|7>ecp)od&?NstkkS1`!($`!Eii&V#*#AQ*2 zo+@~58&oTq(R(j3KHHl84H>;nnYaH41AmEukBb>F2^37nAT@l8$)e5(ifKMq!)#y} z+z-OXh7b!045192QHFv)Fz*@uh$GFjnPsSguB?;KOr2aBiZ|$&u2<^HT=_D~Lf!gR zkP_UKi6lt53b(xi(Nd}5d2#8(yOE7MM7qIBjlx2`6;Im3cR4AK6Odvr)blEUpQlp} zTA5U>DtPa1$n#CEekq2T`&owG4f;DwcZP43YAb9w3Mx;@eV+lVfuUOtf-%$7OCyTo zSFPWCEaHr;D;oUSwz2>xoY=U)WLcaSosk6S%c57AC&MJQgcNa$g^;2f?54 zuxgo$;COgZS8z3!8e*a1bShT2bX&i2Mf`}+HuRyoODfm1);c}8K=Wu@ zJS_iRlAo;4ic~_z^uPYih@9W}dG4X2ki20JRsaEyfOBcz>;}KDAKwLn ztNkh6ux3E8DnVJK#y1rpP&Fo=p!xoee*)cljg-&P`t@)p>34ufwXa7yB@ZCDOFz08 zrbaoXxqiFOnKXH#(xaU+ay;q$7Z(5<;gsPngF3%rI$~Ek$-L(N`Ol#D(^okO8(}3) z`K$1+Mn0Nnnx5b!)vSavy+Q4(t#%f*CkEQe{|EdQU)DlSE04pHho0x8wFjRyGZM;0 zc30%?jPG-B#ZOi4-3rz=J89)UGde4a>vYJ63GswplL!3uV~+m@`tL2EJ`Lyc(Qi)Lsf+Qut*8Y&iU-Uh<={&W_*jC!@hhb!gK%4zrfi#aW3fZSswQ88(4|~| zAa0lqpuY6K(TghbbqQU?^`FPg^#dsVat=kwmh6w^Mc&30tcCZlYU3vaaeY!NV^tN- z<*#h0ys~Tv_1y}ae#Y;BoUc_qK>2Yv0aGX4%&oAZ9e#WkaA!b$EbMUiNmU+(8&!93 z)1K>Z>Tw!yhd<~co~qpa2IQcln|AF6Q+vyZ_Xn;z@kY=++~m)&$h8ULop9zl^~i*2 zgZFfkHP)A22KIJ{^CYMmBKo~t_kP^Q`b&U6g@hqY;9jQ%Iw;-SO`3+rC_gfiT1Bf0 za3&dH*{aZes0sP3=4kXpesZT<`k#J*3r-5T~GPk^C1KOl2pKx+;$^kZOFoE%!RDJ_9uy zu8vhvc~@TsYwPj)VHb1V+W_hqhxx9X-s~n_jX{(4h5;IHVe?E`lP|#OM_vEcLt1e* zy}B4~MtA8IYx4D6z039AN4K>NtY^uT9w5G5^S&-I81^1_{ll2X9Sj*Vi;yBIpSNa> zQk^h6)0aB_qnYT%vU1qWDAV1YAAZRQJx~weeaWf7MwWNee3dz)8g={jVRt_PWe}MYjrhx)g^#d zhVXLz)t6B3>m>ia5i2~nqqc^a@_A{pW?#V~z3f-P4S3mFTrmBbd-O2=9Ru?O+3k2Q zR~P7Y_s%{_{S2wf3J|A@zz4fQ<)FKICBC@`sEUQn7o;1^;BEYtyYNMPnGyT~z|Ad1 zH5k7V%5S?hF;4dc+6&0#MElWngTXzZ`%Z~7)A4}>2qpo!Hq@Oa`TEe^{tYH|w?Vwx zLe3y+-yrws@x(^mF`7c)>;V2uaZX*a@sV5OF5vu{(uW~FhBoSojgS4IYk>a%?C;|E zje5dBr9UkpHQ|)~Apxa%CGpkw@gd`$ivRFsO;lNp8=&UvdX9eq2EG;)sZNyXTqn63 zx`EEP1byJ#iOi^JJ%Z;s$>lLU84GM$i1QRTelFy8R89jYb`1 zZkTWAB&};>`iJd+cEw@7G_D&tNt=$FTkTCiM?$zJUn19y9slh#{9G5re^{vKaL8B7 zb%Em#9*W<`MZv1TN$O6xHm#QH2HD7GQ>XkCJkei>1@uOih00;ypH+`LWoU;FRFAjA z;der5D43(jzJVpO?eH@+nHiAIvxsxEz(G6w;!-qsKo3|rXopkV!{S@+4&2P-?*aUR zrKkpN5JWp%4$=akA0qk*(Zw`v+u?VcjqaZ$BeN!7wjDN{j7wJ*;!F!UE2wST;TO0d zYano%0dG^BW82|_cp`B60UI9T>uJNb!|k5}zYf@(;<#;xQWIf2jfhnjOTu!tu*V9g* zW?MPQUtVDP{IVo$W44vQ1qxa1CYTF6uSWa=w_1%_^GA7r5q|SR#USltr8; z1r9Fo&$E@90cf6ugA44ImJ6Kxl~&}h1H92vRD*VCX}G{8zQyajK%WKjJkb|v+FoGw zk`mnyfSe3Yo?_0IYrfpC zyEw^DCI{C)#FW1X+^m}D;dp~zk3q~1G&`pwQ?0@5K=xO(WUqhnc&s5pI?5u>8G(cA z->)(5e?apr99;kTc;3Mco_Gwe&4a%V@J35f4Nm?B^li7>So}6I&}V@>PxLREw%7jv zCbQK20LaNu*IxgvjZkI3LhPU%l4Zx?Ky9!8Gs96#2%L(*vx{@=^)HRZ5vLH?B_STA z4SW5s-;VZiIj|AMaeMuxCLCOIe!>se9J$GwxQx|%#^)TN1$$Ig{k@SW?f34-xED4- zx|7D*6d>u2@B;V;x56o| z^f28XQLU;%S{u&2jOkucG+ieFd%=J@gz(U!>E3-9_dlRf77o*uM6y1`fX?#rQL+Ua zSeWkh*z#Wm=>}?!V!Gptrh9seQjY_AE`+BPP4}VOaohvY2@8kmN+MYqSYu=}ZiUH; zZN}{vqN+-w*_48FtMXFXW_$v)>X7DH#K{&oXvW{S!Qm7@T`e3m<2yNAzU9`zvorZu z0v>BAs=?HK(v9`qhG{j>IY4eD+JL5QGuBwYpza1B4~Dw78TYsfP3&2SU$KzWgxafRCwM;0FAFk2WyviFei{gCfcHno0965oV z$8X)}>RwW2rkMim1paKu;VW$_kKfYK*BlaAdAj!%NCS*T83jKY^F1 zYx5&^%!Db&^LzAFsv@wwVtCP66F=soYkGbqj8|>I>K8KQ?Y#K2d>+3Bp!2-)H=-PO zU_E9Mm^XxMQ^uPx?bZ#v@_aPs>}nY^uiU%umP2?>m^90B*-RQ<%Qq3EUu^f;Z z79*-We(y#XcqwCsVG;&ooq&%BIp=trN#*fF3A%}w*!)SXa$|xp6Z8e4isZ58Pq>?P zGh73u)mmViLtI{R%H!t+^aWmYF~&=tN8Atm!;mA7e8rD6s+#-+fxgJ|Hw;$l2T=bG z>2jbk@9Nnw*~&}$ViQI}m@J!%$2d4sE&qS#8&yqy+CX>p{CTLuk+8Nfv?vw*KOSMM z$&VoD-kv`&7mE~7+7c?tDaJg020{1n(idc5yD$Nj5By-rkpp-&`E3N<&-4Gmvh1h8 z&V;y}RJ>Ko8s`;WDY+5dyv$@QU+nNorXEAriBYyHZr|E)xVvki<(Qg~b(Or@%~0`s zyix<}f(Lj)X3w{Yx)qHabF5nQ)z2F)w13~Y7ICo?xb}<$8CD8>O?e?Y$xo+L0ov#Do$Ia9H$T+>H+VYCvQz-F-`k00OEbG)3Iy$q)j@Rr3n zrFwRqpl*Q0w2%3KfR0s34_M>%Jwa}DgR^qG^l{V%?A$_IcXTeA+n0X8kwWBl9iWYb z*9vTNI}DQ;=5`ODS1tUX+%|v1a6SS4ZE=p|b~7wWZuzAq<@W()y))x3RGH54e?TYm z7jw*j>P&Gqo)2>&Y6jj_Q;bN^u1G+ARM^T?& zfKUyb6mV*%?H+;jAy^8|D)PqD!UpH%*0?}*QXd*)DnvVJP6@!f;r4{I1^RSb&~j7n ztc#+!3v@Ru_36Ac8q9I;bFODTnm|O=GgM;%%vg;twm|TRX1{Y@a>|X%FzE57Fz-}i z-+;^V5n1iWOHdf;{A`RMQUCiwZYrFpN|1|7J{P4TvQiL{?=MIEH^Gj`%1kuyxoD6d za5(>p$XWt6$?y<|l+dL`{58POj0f)0EymgmNpHBvOBTe{a#OK3V)HgoS z{D8x`543VbOMFf}BmPeDIi-)v#6BvMQD_N=3ru)d6?u-x=pK>JR7d<-qQx0hROV4p zLG#6r`rJc^`rNOGO2tHd{zz+n5H=Oo4!S-k0@2_*Cks0$gt?^>^>6J+x{ZXuZ5P_1AN8YM z=`)OrCgBg^=Sw)>KO_?R__Tv?{O)3>8#$Cj{rmHXvba&de|`D~;OC1ZA}uE0-z)MR z!c2h$yk^sbO!Hxn9z7Y}^d|l8=x_3UBjJW=Gzeb?dDMTUH~sr1{0MyPVGG}&doj}H z`(J?*5kDG)QzflA@FRW~NsAJ^{e{+?FMd>fe6j-bkf@)C$IYl;R(wkE3c<6u1^7#| zQ9o}l1^mvx&mUnz(}loqBN(L-pTAHZ6+hya=mT(wfU6OViXiI0FX7L`{|5SKI%0N5 zU(lC;p6@dl^)K&7pW%o<24R1j*lKe*b523N-xOh-E`cBM@2CDDP$K?>D-irZ*vo}Y z4A;8_d7N+gzYF*R1W~`BCH-5_AoB%`ijVX6koZ~XCG!26La8YFQ(zol5gtIF>%S4d zByQ~dAk4?Ph`$UsZN5C(MSMPJ;WH<=A@hT9-+eI5A196a{FT^<_&$H&G%CLkg%7<* zHPXk+&q&FCP{1|=nNtd){u~J}6`#KX8ued-kGDn!f&QN4n!%{waxi^{BmSib`#+n6 zhOoR8m|yYOpi_RrPEWhEt++i3OQ+!YNEbPu_Yq7u{5d4?RN)-#BM#}FO3BZJ=pi{d zl~}PVu#SS2YRY~e9x!qX%M-g*cD3sglk=8^6)3$;q^&5W2aOk~asE5Nrj}*`14FFv zC{q~=`kpDb`;r;qSdS0G1*yM|B#LaML}q5O14h7;mkMiq{tt-DH5dwM9v z_Wt!s-S2d{0k3MfFYf>mf4T48q}7<7n*TVRe~kS2yk4GC45y25_%!i8ISdpz;!rsJ zH0&&v{QqRrG}$S5qZHqx> z@|8Q|KH$eR|50HCISd%n20kJ%jm`;8NXnIuT?p+Qt@)o?_lN3|y1}yyPW=}`eZg~= z*MR;nn*S&`6{?GQ1D_F?+AdalP1#K9pVftSEBssp{!{&Y;tZJTsw_C(VC8@6w+i(o z_4o88-I}Q6@+IrG#ETBP(nn^nU$!X0weBcYM9% z^AmnY&W`C34I8NBY9-l&OqudLxYbj7A#2CUt>nth zu?vzi4tiBoB}^P~=mDgTkAI7Ba$W*|sBfKSEV*Iyam=#yB~Z8&&K(Z>&O<1OB{Yt1 zAmK`Y6GH-XWC=~9uaPhh;Ifb)IjhHAtK$-Z4VL`X}IjkaNA5m{9RQvP!Fu1J??~>LbY0gVdxrqM8;5V-iWbu;Y4Xk> zXJ-KY5{Fkk2e?a8ZDdIbmx@b!9-Jxrs4@MxE{hT*cQ5lznLx71 zxf|nAa?dgc(s6QzoLl14!+OUxW(Ck z8=hVGPVJ?ZVh{6ycAT**v+9<_wg(t@=xJ)~sxo}dieKQq4$X4xYWGecj!%4%ejfgZ z`ZW7`XY9@jV=l(81M7hUq4tz#Yy-*;lJ`2P|iTG6e8gQkEl9)XHJ%;huis~N8Y z{ufXZ5Ce05r5doi82ft0ZKxx*k1W7Bq?nYY0^Z0x)z(x%OCTLX1Ja(3YvCNsJQ8pQ zg}5YuOY|o4?9*GM`D+V!P)0cF^U6hoFfSXX`S9_i136|%BAE)GO=8@PtQBaSuOP}M zG46)2N&JW#aokLID-g#gzDU0d|3kffCM3?d)fHx9Nx?LUad+7!L2%sK$gZEzB+hGv z--(5D7lWv(3v_?ROeB{JX)}vBD+o)I7`HLU?}K$82)YBl;#`ym>OYG#)`Kzy@GMd` zAyU-JruywLPLZ+-@P?4G4HDJhD;Dg*jPI6U(H{kR3h+Ks9*v`$GyeF{pzj0ui0G3< zO&PXjO!*P#e<1yb2ul|<2`S>k8F$anDjm`ooPpy_V%#GcbuTtUL?gh3q?nX>xyS9u zxn?z9-+{CT99}aiYTq=8agXH;3^hsD)gD@`}KG*?2 z>VE(^K8Pri1@Oz(9*ILn!{mo!{k;&3OvB|e?=g>`9z6p|m4-dgoU43!8eOO=OL+XP z?jZm^(UDJK1`zORvr50WQFB%LViN>>!sA!C8h}r9t8=Ivnu;!+<5g(h4tws> z#gwk+m1pW|2+`7wyhvB9R_YN@7z5{W4HVN^Vr{(ak1+DjhIpZc9Nq^3-o?wFbw1XA zfo%zKR!V?h=4D^>4O#%O*F#)xKx2QPmtAd+QYV0&4soe%gJ0!EnlNvdp!0{5tU1!H z8hnzM-3RtFfz=6ddHdMl*L#r}(7y;+rx54+AVL1-c-fi22Ll@u;`~Y10AJ)~UyA(A z2DUK7Wg=?qFZZ%XK>uD~TSB}s^@n|euBr4|FMFz|6l?4S;IEOx)o3-09a!m&UiJ_S zb*zw6fWC^u58y5=z0J$6QB^6{r;DLD5iZdF_&vb8yzISOv|@E-0m_NPpI~ZU`Z-*? z2Ft2C+2}q;Df^sj*1DqYn{&F*Z;T75y096g)ZO(m|@s!SsWOu@t!`V&a5W_E6 zqk-WoaPWP$@QBYl!spd!wx|t1a9@T`2wCqui^+<2p8qU>C7Lm>Z1T!&TFs>Z&fO2s z;jwE~ohPNe41dv`cnpGcJK+O_W1hEz?ewiB?&JPZ-*^XoiX(pI6#$zG$Xbv3ZB{Z9 z6k}0VBqrPeax6Tpq7uXYI4EHe2tj=40%CurSgC_KPt@O!p(0_@4MUb_ej0~3IUl9<) zvh@2@XRC<%$7j<28h*Z%D649Jyd>|{@^8RKPYW&}s_GPZwcQua$N5J{hr_vq9WHgt z2Y1x;D&YK$<-&;;40a$QemzLU+7o*CbX=N(1l~>BpAUIfGc+yV>*oK8NOeZMOWi@w z;{HecWjL3B*-@`vc_^~c%P;*C?te%>4`KOk#sXcc7VrsWm?^~>Ik&3BaMo|j5b`GZ zS9C%ujChmX7cRns3*zhgQ&dw| zyOQ!Mo4V=`c!;G0b@h0xMXE7y9gsN|4eE+T`|K92mQo%oBXz}XN_^oJ5}(ESNbds4 z+Z~}i(Ovfl>Jxu>)fAT(KbiS7!>!O?U<|-55)}!S=P{b#W64iYV#2ZKaUssYV$}cq zH-dd$gg|3kFzOS-hRj-uhw$brFXA&YX55V@c)z9yh@qpsm%N-u=?_IZ(ICuiMW6eO z`Le@^cT^$B_qj!hJu8CT;_UTPDV!j|>{9hwawqmQ5z@U5HiHb+sCu2uRqAOp2;J+< z9=zs&P?g>KkNdPbjNrfj{yXzYJ>z>k=OH!=f75#P7>@04lrwvU{_1`V;k(gk=@t6G zD7=k^aO={&zP=TQE)dkcZorzr3WQob>`Z(T&n5`MB>!MG6Vu*pkkG9B&Fs|&lb)1{ zXmMTNUVzohNeFaYs?;(Bdc9W`6S;}7x*M+!;baYD>eV|7yIyexCa7dS1n@Wm*FKJ! z#AF0!?Nut1fv!`O>cPMS3`;{9IPoiHLXdVnRRSCYR_Klwzz3EV?b-r^e{Z7en_#}j z!2b3Cu0i1a{a6b@pjYovS}g~3LD!ecDfJq$BdFW=5m=#&k_24wsX!k} zg(d?xrC?gg!2Pe{u@QmY`eQVRZV0u!I8}9Rf_80Y5_(;QTk8NqGkdk1f{{SbKOROqehIH0tk46|jhu%d zYyZN#arYrSYt{|hoz=B*g&V;>{;osy^G~lS-$$!5i!RsdQ~*J`3H`|1dh2BvJ5~X4 zgbMO~h~yxW4Oiy(O0J4F4j`3Boy@$?x&e;smlJXv!-;`&0OBpcH^Zfj#)Q+_iCY9Be~}wY?_tu@z&Z-advHk?qlsD-FANz4 zrAp4Prn`aRqbf>Bbz~P7Nuz`s#Jt{ETDOO^F$noVouyPl|Jyz+xed(?45J6`*b7;y z3)V2G41r5}vx^g|=r9zjtnl@RU@Qd{haA+z&x+73DPeS1M~w`VjfYISdYD&bK@W6l zvp`b`7hf7D$3wR4dbn5NagzIhrV<8rchs2Bay%4RzS66GisUJvsf02;95pp0$3p>m zlvk}(PmpVYrV`$Pe`83FhXV3wugZlapBD1co{m}+lH;L(JjSbj70H`GQwcqLIcil% zj)wyBSg-0Tl1GE45-Rj|)P|584+Z4$Uey;!&I3&)l>F6E+d^_Y6p$x-RevBk6Eu}@ z-fxcD9g^drfIQ8sSp|*3s|}jUI0paukQ@&MA?pF(7;rW-lHJvWEutHOo=}no9T` z!@}nwIUWkgXkg{h5qYT_NiO?#uWp?k%5O;uCdYaZ+2s;>p^8HbUa8JBv*;V`sy4HiA| z-&(>mj{NC09G^4|R7K`1uv)5Lg*P#S6jhOs(THqxDy;=wHw0}Q_+wb;gl9%$u{u<; zq54KgMecV}SM)<&eh-@d6y7puErHYTBk}|SyWl?7i5Qgst6bths+R+~R|($7x(<5& zvncaRdL1FxE#Q3!OUK}nYGHgxYZqi843!Q9CB~H2&B1Uui-k*i=MN_&#T7&PkOX}S zj2fU;hnx6I5xOPi{0dIUhBNetFau9(7jI2Y%DFGhb34KwJ@G0Dm=5L47 z>`PSVBe-?y??4m6jE)PV^=n@rhShZ+;sF8&HJ{@w?+84(H~Zj$B7Se)Cnlav!i%RM z91POAFi1tOSv3GAXdo9EsR37pNgfU>qAI#PBr)C6mV$B{T*`XSRiA}48%no^bNta% z!4L4N*Fk?7E}=JuElZ4ttctV3Nqi3v3VIl5NeQSKxRjOa<4h;i_Jd@cuS@)f^%Q+S zSnWV>4VP4R3D$bUoY@dAwEQ_IE9gQnZU*&6I4G<^^v6)ahH#bS&pGYsonSl!@`G@C zGa`o&coQz^t_gA7cqr8UK1<&Z#&4khLft)xR6*`zaC!$K?Gb1Tm+{W1xVjT&#~PB> zbPAMl;Etj4?}#i%U>PIdAhHL6XW-iZ*3M+yHl~cDxCmB=yaaGiJZwHW<@DE}oPzR6 zxTKSAobQ~?zcZ@OfRUPr1wy!_b@#@N#ucsklB;UyZD0%qbub+NE>i7H07H*+4T4YW zXdpRb1|;`K-3vpJP{h3)?o|0{I}Ew9VfT_ytbdrqzgB33Y?ESq0nN zF*s?PmmtuZ5w^_<2#kZ1wrLGHVRo#cux%~}cNvx0HlIe|Nk-TEK3QHr%q zE7Zz{Obz1Hk!_nLu}YB$CvDTRoU{2s+iVPK9_ehGmhOfjYu>ia1)$D@tQUiNVko~0chtUs5r|PnNOQC!<6R9PU{3o&eGgTMx4T@HueF)kt zUFPeWFr8WEKG@l2?QmVW)WBX76a#5nFiu}ITdDE~PvL!>`EF(IPL$t&ii3+=TOw+s zL|WvuN2i?Gjm8o&G_-AV(rm}TTKo3$-bAT^UE9km944Rhb-8#x=j&?me9qT-lFzh@ zk z@U%NI|D-G}=P=$sXCN#nt1xwwd%l#7RftHTL|BEX54zFV)1-KFcZPF3a&w01SDN%n-m?B#5+)wrI7m6kvy=TEFgmLIVU zO{j*hCuVS3w=vd%UL$wE-K6zfLYGtbMQIJsx=ke_tznOQo~U+$ zJ&@iP*LN=ryT{8-5?9IvZ?_a#t3Etv2$63} z)ck_Y?78(=JpQ3X>YmEDho91e!R9MToz4dalmVH(DE z+E@S^(lB@#!u!iy#=|7MD~aGn{@*TRmYWv8j9G4#_+`v;b9ot6X3T>zKK0;Q4^z$n zU5+O=5t+nDEpE$7D?p@a9VnomdH@=Ypz)uJJF^O}fw{Od<>ILHw^7NRKy`<}3%j$T z*J6V5SEN!=Wj=fh3~vNc+F;~4U75d-EBz-Txf0>!Mx=#Ac)26{b@@%WD$W_{M1PtM z8?V6zGL3upNnN!bKq&ztx!Jh2Rr;odSmtG}K7l8h`H3wJZGzUigCO0&YNFz`abSHCa`z7pZor0}FK{wXQCvm3Fud9+{!g zE)6%&P^uTv%!(9l6V1O?LvtQbv{+btG0TVtQM}fOhp5UMG8YT`b*A$(#Nj9;)m;na zmFi4g<7TjZNp&J3jjg=gJ$Jh$rFVG}y?3h@KS3Y5)-82hs?*;Dt7S(9ZBg`XA1aMC zebsX)F>tpYJgLg1G6uA+2gUJJ2tFg3XK> zG`z|h&P98*H}4Uk=H|^@#+2Sb>Cx$MMIMH5xhf-;-Ru=(9b{GGG$+_HkF_uqlJ7hv~acyj%gKo2%>Urw#C`4MEK zD=64$prHn%k&(5RvGFFXsDm?}g&`RO|yq$su8B z&q+hm7*^?3`eEx6uzNi`$-zp)nWiG6bJh#z2$Ea?*tyqo-i2Il@ko}g^*KqksQ(Y@ zW(WRrHOQlew+tTotOt>aUl9!ZEXj?`O=M(xtw`tfnsi={3?iBDpvQ2zA{F*trf2z9 zR|tBR2`b@REd!fM|Kw(+xGv>3hbM(u*q((Pjmy}P@?KV+iwAL)S!Euq-vOEF@($>V z@h-%)-8+G53{ZVOr|-wZSHipSh0jHvW~7GUpZ3{r@~lLB#dPo@8jovlig zceZkNp(f?`OH|&Aav?HFMcyxw%a+<|3^ocQ$4jIi0HV^@yoz6dfTlCis#ivRKx34u zc)nZfd{#~^nl5n&uL|J*ZEzknykYQE`d=?&oe?4T2)yvpv`Zdg#24_&n*0W1KHBr* zQXFSiYJYLeDcccNRC<@?N_|2z-@udNMF`-_!)P#>-PvktVl5`@@wN2$6(KsS$IppW z{-OhPGjF9`j2*&(tP46&{6M)>W z0jtbrOwk=5^Z@4=Cb~I!@nNEy)1nEqT1h&+9FtHw-%ct86K+L#QtXrLc{0DR3)@Yt zZD!C3J(WHQ)zOw(L#$R%S<=Ps+XpslOr@X1pf-WTTP-md@`RYjA&=vp$~>pI7 zm;M7g*l#vu*qNV&3eSBCf_Ibaz>v!PY8_I@f)!lxiK_Y1`pvpBfzcQ(@*s6uz!SS1 zOPm!Ph-#7k3c84H_lp2{468Z}Q8A1`Y`qL(w-e`D+JErevIbA9 z1N;r(ro@MJR%a9l5p>Omb?(yIa?7DL9M%^eLPTZmpv6C-@t@Hw$Eg_~&2pS3W{A2F zLsa6`xG=ThNx|Yn)Wob!sNH6K9Ex01mhYab^c5{|`~_5}o8|utj$|%^p~B-WP>Z$} zbk=Gx{*Qt4sNr1|9hISo7?#=Mx4ee2<5}y6|J)VPCBOUxb2R8rontN}-e3IQC zVblHHk8OUl8?z@_sfXb<>W5I2jE8jAnFaXHND9vJ!=2QSH2Mj8pJ?`rFGBV-oEpwf zJ7cFV$4>jAs8Y@0hCfpd8w8K(RE|f7Ab5kqe-gmlWrQVDF)x4{{%17^A2;f4Ah?La z{?|hIgkdg%U_ONxPla%|5gvfxMYwVta!v#VMQ-i}1$pT)2~D^Gp!_tv-UO$sBXSP{ zs~F*+^b-PK!BwPAP76pzmck?hE`g5*zl1hzD2a85zsRu?rF`1w)4^Tf-wG#@zUf$> zf>Y&oadi_jAM@7|DPUNu+n8EM0F}g6Oah$u3L>h~(n*HbZ#yPU+=yE|+p4N^9Qa!X z1XoR90Z}nO(G*AZt}xRPPCtamd<5peRU|EyPf0DpOsr!jF4LLbc8I-a;HC0`+7i%Q zKuCB>SM~TftdGDeR|cy>aNaA3RB9cV`pc}ZIx|fk%e{|Ab!C`L+C9kNK3(bXG#L34 zP;(e4gwqx0;UE_RgBbAi@{z^XQJeFTF42Aqrx{~IkrZOt3lBukW zc1Ukha3$j*+wPf1JsFf%pkD;1MHx5A~cimW9p_lB(_$n47M{KSgX z&<@bI!>OcSkbBGY!cds{%Idn#e4NSz_XwP>g2-P8{J}_BM6%1F2ZxjEV+}<@Lndi# z48G#Fc+#fc5&ZUWD&fq6cci>&i1b0AH(as0NjW>6D|GfdHlZ2dPlr>ftBNF)swOZY5cLxfuLofroPHRQ zmk@XXE~)6{Pdhsme6W%?$7xW%f@4{JL}knfnu)8{nX=r&vXqA5QW?kvoK8Zd5P_y} zNxWoM#|uMZ4_;-Y!mBj|)Io6nQHZOC3P-a=VIjy1L_v^pd>XUKlu!qewTw4()|Yty z!aQrF^GJCz)?m962^q;UqmWW@pjE6E`}m+>2lsO_QZ4PT}y8ayxTQb zM1}E!uHfB=h_{m70(vvx-L5-eC7O)X=iN%)3Pim5^sa}e$HU7(lGWfO$(#+43_)xF zTq<`U=Ed*EwGi!cD|qW5*301C1=4CbJr|L^2s{f{QGqAjyj8_kIaL?C6{g&Pl-bQo z_x>r^pHQ9Mds0Q5$AOdX-D+YZ9-8dlJ$CQXt>=PN7f!nMo(Ob@%VxK3wIZq|v}{*? zE#Rr-vn#&`fi-Z_m0Ny^P}5#qcIDfzLxSwe_k+I=&fE@G$v>A;hfk@**7^k$KBH1O zcI8$nNlie`*friX9Yl8DDU~o`hO0gUPV6paLZ2) zLm}Vx;Y-0?49CAzcH%3;PJCUEJL$L|2j?+3>9{{Y;3y;PxOEJz6p=FQgsrxxKBo4{ z>KEZ6OXr&nS~WQ7d^;l0o)LDwQxKR0SB_nxHIS&Pt}`9((HXEHT_HHDs3ct>0?#la zT_FOW!%4ShEhMW!v~V;#jRx4QMJuEGgOhHpAp-T`%CU#CI;mG=FE15pbmoj zj{@Eijduy6g9Gp}g2toN%3u#|l@h{`BmqyxM|RQ2 zKz?76DRxbqvoz2aC51>;Jek4CUn-aEYza!Jw0luOOWg8a5_{ir@M?jX1DDGE2un;5 zV!|18MBIDOz2J2PsWV(Q4_R5FFGPQpCkLx01D*(Xi5#p#QgWE;E=X3w$zdw1m>8N+ z!5`#G8is}>S8z<3?E0}F}R9UFoPjO|NCGljQ@2oB;$V?3{N7tqi~X>42G9rsiGyE z42Bk+%6@%d&bBYdzDd*!lz{z0P9)UJ+ zG8kG-M>LBMhI2rgO)Wm->_gxgI2jDBRz&SZdUi1U8}RSsb1-aLjh|Exnv~_2P&Q<* zAP2)|W(Q4b68KlcnZeLf{c|aG_<>5i#aBULg(z__v`R@~$P1Ftz^~A@sa# zEDyrT5ZV=i&WvyfosPg%IO%GwawIfk6J)P>olWsE@OQ$Q=NGG#a89L%sl@K~C=}kK zQYr^)tCXm^A)DbdONkm-)q|6Px+MY^GQxp+3<4wJid9TfXeZ3)(d^8dgNoS*`h9R_ zaJMNYt65C(X!bYcad3YFoLAsva8Jy^6^AQUA1P<2gMog5O{f+47r~il7MoD2+RlXR zGs|cYM!?BzU?T$S;bi2o3TbDj63hlZ2K6``{!%!){TMbY8xjm2%9V-!Kx#Zv6XO+} zO5$^lW#a5upzQ@p9Cta$=Ym@oP6qkD2wVmygM3`WhQjoML4FpfH^AAy3=WUKhS{_s z3x`9c$DuNVL!_lRVaSrh;nL%9DQA%_**%MlA=BeoWI2OuY1UE`op5|6L(?BWe3*W7fot9%%q0#cA0u8%g<++kY>6ZgPnaNEnW;vvhA)O2$- z_rP8NcR!r$iUG}hcQabzKGaXRwP{w-bIYpssOa=gw_=G6u*+3FSyYy-yv#pugdUfm zif^Ow$9_@^MuakUK-x^I)UFN;i54Tcy=b9g0N&BIG;@FYWR9 zco2r&cK?>eiwlsLf=S{pLSFyRjp&r#%OPi_uKZpTM-o=GnA&(;#}DzJ-BWQ(lp0GO zje2};6LpV|bI8Rls1q*BI91_=x{~Z1B!!w9sVd)GgUPB|v_JkHY!!jqrYPT|36FiB zO22FsK5K!HGs!Z-TAbVg6(6WGGomB{VHGD?F<>d=IWDswn* z8@`4ZZaT$p4{SPpJ{!-|+z{oalR0*D<4ZX50R^WYJjoz8ZdENBDa7;aRrr53oJW#M zul}M^GZAv-$3#&0+IcEZa2z)KEKZu?&ni6+|Or3rFR&q)M13& z&H)DsUb(yr1Ez^v9pWEEqC( z1U%_U&^MVKDT*)Hz)denFuwb_xD@kkHsl3YnSUbx?q(>M0}(g__6n)mcry4-0H-|t`UT+{?r>X`lB z+57O|1snJU$RH)QS!XUhRg1!{gkhC_!4;UYgX+$J_n*r;OjngNOzCU;;qeW)bFU>m z3T4}Nc1wn+U#_82JMjM*IFAb69q?4`9)M5|WmwQ=M2fXEqT<&WZo zgOsD>!mQm0t6EgydAaNGe-4~S1+PCmmDyk|%E7ljpdu$P(!0_u-(Td5NA6yDlAN6A z`1fx#=`)q;fTGg*D*y+``OtFi?~IJdM@-WB3j$vg{No%Te|^BmT7a7lPpnf@rSn%1 zs#2b7<=1u!E%Fx>niK4F4)A?|{Rj>#3gT25F4IKH@3QjX-~`SBmPx2mmUkh~`O4|@ zNZEu$`La&>rOWW8M`-9b;K?Bf#)_N)<3x9*&KOrQn5yDL?>f}xXf{)>aul60PgWS> zX-K7CIt%R|&l)b@iIjq$!GPVJ=SmW>c(G(A>-7wLuHU(Q;t5GAJr}cHKH<1rwHLX} zdeh&7oQn|7YH%xMS;a z4;M3p<#(WTiQAz2T2<%DesIAhs&&50lA=kYlNIy6{I6*}&*RkJk3i<;?YMQ(7 z8BAbGf$UVX8l1qyUni_ZOT8Oop$h-`!3B?Ol|B=u_@xD>2RtchCtMI!ixjRV4#R(b z1HvOorT3bRpFT#&xe=bwV?rlo$uUff@qaa(N0RzfkDP#42oaos|NQF?DgX4Ead!pn zfv}l^m_sp~^!$Q*@e8>4p_mIWIl8MhRtDkBp_s(wJQTyZNu@fN3&X_^#jwvl1c$JapM zf86h{!((`~g>yv{X(TPlTBm z#7yu|%%c!{`{1SWETkpigQ_6Np%|WI)Vc5$Ho%?;oOeDVl|Bnh*+VhrabA_1i$?Wx zfMK_m8QiBU-O3DZ0`vq7Y=_e?BND@S#4$-fhR8Ss#=xa=?rbeMVnGhYaGrb*@D)@T zh=`l7)ZdJBL8K=l-Qi&HXBbPq+SDQ1`t8U8wyl3$g&bs~R&ECCdN`ed$ma-r!bmbA znT_x^ADsRd7Op^mKc!QV22wd~w>B}FK*Xhcy%OSwL3)5%3lK@i?afaJ=oyG~LZBTZ z6A+n)0KWo~%5$mKlxs|3jbHx*@loJ!QQ><;nipV;3r?RxWGVua7~_b~DlBF7PU4^B#%^Eo7Snxg%~Rq7N} zq2JT2*Ia{Do1X4eRl?M)spnE*J?F8Wu@lnZ&Y+$r!pLYispq#5I0Waho~3@>X2>8U z6{#roYi(htylDOQ1hEsG)bC>m?1b}Jzf!NCBK8rS)T=e&8WXl&8yBL_fRlP%iNJC= zkM%0``4VFL;G{mS3EW)Fn61z97vK>RPU`bo1g64ytWT-O2NBxx0@;1ghwZTXCb0Ujf>#1f&wB!pJ^=4GnEU}w??Yr@bNmJYTt(q> zBHS-jx1nfYUY7AzgQ9oB+YGHuaC##m?;>ynE`vvMt)3Hx{J?@N>9qw#_lNfv6#k%v zZiv*p5DS5D?S};Ehi|0viQD#0@sKTY1{d@Ac-;lco!~O~$ZYxeR!?Bg@?|lP&(D*< zy&6uQsDY-h!ccTQYRYAABV(XEr)RbughBmH$cjH=v*R)2n|T~Kp)1872e4SjXNF+0 z&OB&G{WF+KW^UgKbL>uJjzGxH5txR`ylOab_Q>J;2^@^f<2c?=kcd2v2#{%X?z!0 zJ{jq0Tk!rD`3ZLi**%U}{l`|Lv$(tbr_*_1?ncjF8L-qJjyr)9zgQKY@n!Du`kF%ZZPIzkrmEp5yK|c%`wKZg zNpsTO&RJJ+?%j)KVsm>?lvCE~gT1-_C0W?m-ER-4(eMmObB`5A?j2Yw+=0Klfuab- z9(-l1X18%TFThNqH_dPa(Kx3}{m!vR6AIxegdVisG>8@IXsn=WJ;WDhojRC`h2`5h z?BT+8%i)bKY_EBOYcZS)e>g+USaxBnNY(o@@?FjB7bwa*Yc+>oV<(b@?tEDZ-nIJ5 z<(Kss;rP2bD9R~o)o!mW8;DhL1YhPJXD~v+hV9X)VC(IcWvAd=xYQX3a4h(Z4p-xj zwd0~-r`$;&$lrTGQH0{Td}Xb+-Zsl#z^eF&FY7Y&jIU+gg_5@axg`T>&#Dznim$qk z4#i5-;-SzsI~1b$p(yG1IlX1a8Ix^?qE;YQtjAZXS=Oq}o`^Lz74|RR&Y|Fh>z>z1 zR*>Lq@1FlFJjJB|ThBuP@b^qm6ur2;zINw}hjJw4Z^aBzn8?y;8=^3gWjEKiYWX

7_IjkE&@AtynSu6trh-Ix60p_L{M7b>RXBk!%jqCy2-aR@08Z1| zeac=t%7tm&!>oXEXiIjJ`K$L96h$tM($}GUWv$M>6On;cc!DnrsHZQ=sw~#>Q+Wo7 zzi)z~yt7t2!TSWOu!OI~%eu;C;ROtc*P;|aQBGN_-UV(PCt1;rFLT#a17$rpE4;l5 z-7h|pWxeIIQ@PLDTW?D)57h;q6^XN#d=6{ivoQls;q{e*&k@8qNj`edOL<;An0cSI zQPyfV;>?iG?d}t1TCSuZkFu%oo^LO;?{#JEdtK-8lt1e#`lXlff-3%I*HK-Oi_hq5 zzBa8}eF7m@-A_M})!A>qjK^u=Fkp^oDLZSmDHpu!vb$mvzEY91Rxh)2du`WiZpyY_ zMe+C3_JbR4zZ?JKv|TTMGyhPnG0eH>ZEuAGdDoY1zibEIppkbT^gM@(7X_|5Wd{B1 zckSuuccK5s^(BRw%~xgh)iUiB>V5R~?1?{xf2FZRoArjc%IeQC&CO!>Wn25$TeLw>a1KjI#Ia_NiHmhGQ_;89%*8u3{^`Qkd#RM9y|u+=o{o zaT4l{y?1QQG2+uM{6VkxRUY}yeHA?)+*fh)fh|;lN24z*jCiY_LsjZ5|Ap_Vn;@D% zz5+SU!gx%HCL|VPd;(g0)VdMlU7)V$WU}LqY8Y zZ7Z@TQwiMSP}`??(@MwC==qI!os9d5;!h)S1I7$cQ#GK(=l}A?NQr;M^7+bmynyR} z#BD?38;q|3mmcmPg=oHk<=T0KE?s%sWBqN{$0NS>*4(f}NuB@|_wrR)t+S!*h%YLJ zdCU*BS+Du4tbT`YbF)L$o><|Ck1VdEqmlKwc?>f&Ou&pLrC0t{tjt$(u6_f4IIbIR z7lOBp^=oeBq&|LJJE&l-Q#+Qd;HbI`RmrmabH3a5<)#wQVGf7FearcYH9CI_mrd~9 z80ZrlXaC}wBW{R|Gn5|tMN0mzsxY6g%4#)SS>rtVNoJG0e`&K`Ygk$RE2gpsAHJ(Kal2hMUgO*D-dtU|2hjdV3w}?)>K?Rp`oID;>azs%C{KlS}^i0;wmIp%RY-{uw$DXGwuV~TxB$A7|ZJddg4FWwTq zl5_Qqq(bXet`JVH0kJv$_wgTF?DT;bdj25{03@~n6&v%#@t0XD^iy5KQ8`eX^)gQW zH3Q$~7LFpRQ0TG64OHlG{KpozDjokl#>L~WAb0VVnALle3N_gDw~*7Y|5bx{{AvA> zr*cg4am7yWZC3~*Xh9b;H#63Qib}rtZ@$XRDsn@vA@rCT{Ldfl)>}d=tGn`TK30}P zNwq)qb$%b?irqBgwQtU4IEiiSE%<$I;B373Rwme}pk5zZaTH&PS^W=E+00x+=0M*6 zG>dC#eQsPK{N+cen2eK(mhaL|#at4dvTVtAY@{}iP@ADDVwP{ock6yx_Ge!b8<1FG zvn>084K4m;=yNJT0(EfShCytX&H43!yg7-Tx8SD@>g3X8RkobHC1)t;vL=b2F+NeE zH;I?G;u;)u*__0OwEr`p%T6SEatU7px(p@Jj7WbWQ@ZR=;%X9?DD+_@D!1pqkSTEj zi4#d22U;)QzEsqTtnZFYnjEv?vApsh2?ohZFl zVt2)qX~(x4wNph_g)R%9_u1FxwPK6C74Il^&4)2lAx;HaeHghI7h zZlgOmONw3as?lY+I~0=T<>$rM;W|BQJ%V}n?kUpyWKNum?jp_^?8%Se>)+&kF31ld zVgG4(5LaVT8cw9t6u;1_u*Sb{gDItvOLH8AUROTp8kIuV;-Jl)5zN$?%JQ$KPb>h-e^-owN+|#9F{XhQ|KW^q zb}^eL_K3Vq{uP^lVE@-fAh88y+yvyWlDH6K5~#0_`T9io30c`hcx#F0{czKVUYIk# z?c&0lYTA|A^DcW9f?T4yl0X%pR^r+@g=AHRAX#By;WqK5vDd}D3&=Mk@eIaOAX{Ax zdSJ@5Cz%%BX;W1DS-X9S)8}%`T1$UdnW^M36>mY4TKb7x(!1S0^cx^Qo5aQ#8-lD> z!rn=RWclSm+l5VbQ~>Gw;rQ>P0Ha8pj&Z62^!_WBZ36oDP<+F?KDGYB*+}c?TWw>?@(;->w z8Hg_()$mc@)@J$26(68F&#~urN`JGe^9YI0Fcv9sH;Hb;vh1Irb_|W|0eEgYg$e=l zHTKyHai1T9*96=T2U+d!1h_nTmMrHCoFYyOPP)tSzf_#nhcg_wC%p&-C3b!kTc2kC z6M*5LQFJ&4sM4&3`T_V-?+PQi zzXe$L>j9di08Kr>!or&3tFjy6-WynDM_>#GS&h2ikJ7PqtI`9Mcg1HxI76hYN^^iG zjtfpDE0VIpzY0yJDqqbD-Uj0qv8vRrPU12-(uz0Z!@R^vLcSKQ9Ksq4Rt+q>KKBmX*K z<}D#N2Dqjb{;q0m>Q{`e%Es99Y?0GQ;N zfpLuzl05HWEC6+)M9DKXNS^J}h(DfdR7sw76ijYx#V$&LL=9u zB+ua~(qu9x&H;~?OP(HMvTQYAlILNJJAp|aCst$~x#X5|IP2kZ$PF8V3KE*LntgT$#V+EvA{x@HZikBFQuKFz(a^CV5`Ln4^Rw z&-#Zm;XbFC*8M$>%t6y=qs zxr*Xlrt(VDyovFu64Ep)9z$yZ(=;A{$No~NkQcgR?O6nqrWuL*2oN;Qx5=|)IcMNV zainR^#{W!lOw+h$Av=+Rx~92@{dWMG^-uQOcF3n<3v}cTHt8s#+)pgph>Ev}Q{fN;td#Li#G@C-{BbsTN z>hyCgSr*bX&5a^S)9epnBrr`g3F90kq-pNLxB~=DqbZB)cLHaU zRGSBAlB_aKlZ&s)cE!C5u*wd?7yyE%>9bGBkXxIgRqZ7x*EB~#m>^QnGy~%WIO`bE ziX_uCW0bEn&6O}N7t1t_yH+Gpxu&^IB(?F=5M~2w<1aA&s|coPc23#4)jsB1a80xP ziHwB6y6O!u{ss7$rWu>~ZmnQD?XvRjrzwCm&E6+w*`B~OjXM@Hx15qR&FT1_3QW_u zS4Hwqzm=$IZo~B!xl7X=nwqa<0>{cmWR{xL)v?DDnIG!;{(9#4qYr~gvOo`}JiMc}tn zoY-s2S7_Dg>?xd-Wxt=ufw`Q-P9(MiE$%AgR2Zkttl}p5JT{-h{?kDIEQvQUUIldu z+v^>+R7{y1gwLFqWtMf^g9CX_v7(#ul<-;iWNz>SCVaj>h082p!pBMc#zk`BbIYkr zssIx{`<}*JHZb8c3%4kImIUGR7rTA}CVbwAh2UQ0RGiSP2$n0JEf+rSP-x`3l<@f_ zMd~}$&d}j6lnbAcr*qv2O!zFscomrNaUzRn5TzeZey&{jY|JESGRgwOP|I1zve zpX#&82h_>mgpY^dzJG;q;WHkm{eTId_UABb22A)IhjBQt5GH&)1ZSW^lnbAaaePxu z37>P%rFQ`)e3oD=1{S8F@Trfr3hN&TpMTDwMQp8S5^Fsmto2|#rG%~ZoX5BTO!#;J zChJrn7d{uU|6CB(dN7tKVQW3-^ZNiMe7dFV-KwF{^pWs6=RcIi)_U-N0ED$3jCVlD zzuj9Q7Z97Zo~P8bw$?*{g^FlvJr|(bfDO#fDP(R{haeeiJ(r2E zf%#v!cL6pqzl)*!GlTHylQPvJM+4zgc_A4A`drNw*2D_X8Y z9fC1PEL-bwQX$c66_5|YXG1y~6Fx`df20CP_*{i?g#y(7HB63N5pZFZ#)4%FpDl)& zXU6R_(F*BI!slLfXi50ELtz=N4TR6|6loq6M)-XCGUb)G+R>x%@F@Hr3nb3qV3lJynMnBTj{mM82p@N?Tqfyfk)$S0f^dRJ zCVZS!m5rxl5zZrIc37_pT zwgo19JX%fTXw&PjQwdbm!*D+YgfYoE&6mLm!skkSF9kLxEx>pK*qG$p+D2R#J{>Nh zMSuyPei)kqJ|=vQ9A-8ruUqLyjPMz(%1ihh0A-YDCVb9}BouJ6LKrRPTq}}<&twSa z0TVv=VcesHgwFzuw?Gg+7pEX4D~3bQyrFOsK82}l6ioQIYayGa!;53>JRH^KbjPU+ zurB97jQx$Jj^v@3RYf*WEM&{(VyP#YjMI5yX))&kjQhm00o++7K6~t=2uCS`POEny zEHILUkCQ5s)!Io2pPu?XtADM$l%5V)|Jn^>4PgE2L&Ji9lN6x22UuA6)hjy~_kqAF zdo0G$APAplVgT0Iqm_tTh&*kYY^C^IY&lxaI115ajv5?44Ny6uLd~XFNeB8g3S!&NHeBQwIHMvXp6vxK` zlv!bm3!lGm{R8OJ{i}FwIE%8n@Y!aV?aN**+cqR_TU^@Kd+)eS!Y4x-xbS%;ZvPv? z=ed}@sG1n%riRJKEBDr)BSp5012Cf@e#PT^O?ZzMZOTj2dR@*f1b{h|LN*^`E@-hs z8HXalG0}4no7-N&qiZ1Fm&CmocY->(^g486FG4 zFq(=(oABYRn(`_KIo;sq4uwXpONpLSQl!blEY2=(5hvZ?cIz}I*no+i7S}K*4ovho zkp?A%w8N3F!Av)}?SbQNz(mgp7!!es9%mLax3c(g(Q_NF*NQ9Av(dHOF$rvdaYkh$ zZUfPCH2y~dKGtVCEpJ3q!Ed49)@NP?&`v0ZepU zj&UI{(Q|Lg-mP-cbK)D6MWW{y{C`pa-Qc$JjT|Nr@}E2+6rYF9_9cH8JzL|@4_N;H z#+a;xM9(6O_kf8WXY)@o#CFlM?oCXL1G}_11!JNTy0n;&@dBuSCFc9TMbG20Zk#a0 zM)W+Wrq!Ul(#;$TV1x1&7@Gkbl$}$^+^UX;2hnrB___i)4EO!T*9~r!w{U1c5IxVP zOsA<1=#w!r8MO!T-vzmt?($U~y% z0c9f5b0Yr7gCKg`mC2@MkiHa2N@6C2>qRor0VaApT213<)9a_I1S;xfxK9OfA}-dd z`7$^`^gM;{EMQ|&;SQbwAQ2|wBDb~?*F{fX+&2LxdJe$Q4VipQ^yK`W^r2kQN-$V}TOtnF@E}F4!GxtE3<$ zD~3bMyr6IrJ!?SdERu;HcjW~vI=na*-oa5_&S0GMSgdt9lQGUSmPF4MF{=uGDzK0& z8;GTzREpG)V!P$870ezxK)=gZok9tAD)~V;TseXF#khOWM&&$F{t$eGbAi zA_dX&+PIJlk9b8Zq^TRCXAhNIqUTE(pNnOp$6YHDsTf%zT_KX%xXs<@9AIs{F~)`< zh@Q7nwr;KF5q!00IL`Zst*-h^jMK$NFB}zr153!q^}_tPt@vJP+q-dF`_i`Fi{m!w zg`RP{7E1PtP4+kR!k`#}to8aC6AWht!LUX>H(SL>OdoU36;E3G@D&Q2FzmU5^4|iA zeM#i&^YuHaPr!}&ZlwB-5?$_PFcf<4Sc=&^8!!rf_r9NeP7@DdPxJfG z6d>P@!~l%VK%KmL4QESwXy)u#D%x*FCp#qquxG)xiE4Wf|?3kh{7QN3;DrpLL;8j;k zxh|zC?oGSjR~eVv==nv&Nt$9lc|QqEQ=IxRkAMNw6i%es4&Y?x(tOSbFq5YE0mtuv zX^Ise;c+lvn!=gI%q^<}U#=;(!?h2v{(CXT$KvXt8D~@`%ef6SMaM_8Y-Pa5x+JHS z0ys?t=k;fyOWG33=AxOVcq}!|WYrK&(MOKb6k~8Y0MtJdeOyyafNXb=&5b*l3<>0* zDei)FEig@S=3`lQ957AM_HhIOsOxwjA@tCUhbUy)lr+V~IGqMeQyeslA2~2h@f*h1 zz(Sa&@DLRtM7gGz^aR(wz%<2(Cz&+`rYXL|_!L-}hMMBdeY5zPOp(wO2Y*b9NK;Il zon=P=(-c2oe5r&MQg(lec_v_*!UHgCraHN%c$EG30=uSP^=UdeVAu4gVN3+3DLzTr zyHyj5qlJ`LKcXzs6d&RLfdWWVR6oN!JP7%>92p2))hK`06l>$q4OspsVjQD{G{s98 z&jZsG&Mw$pqDfP%{48fTu#tN##{NoZK2Se6=Ife5cfF>!N7)@@OJdzvc^hq- zqLv?`jd@ki@!}p}W8PXA-GPmH&M9PWReO6ZEPSLxqXB+*+y??1^B%;w9|TSDbINoW znKsZA@8k5Y9D}CNt>57@S;bq>WHS{{qg2iF^nbucsU8?>f}km!RIoe8MXwDpw2*R& z0_YC1A^6)19W+oKP2xxe=-o5sIDyu_0pBoGHGF91km&P&KQ!}Ks-;`86p|b(eL}C5 zB!@c`>?sC!)8C~eNBs!jIg9o{a_pq>K$7EHN`DnF$*~aQO(nF>(*8eO0s@m99)MSi zrBERxvfu&=Ws+kA?!!Tl99@%V$#TxXXT*`@I1~TV#WBg@o`tOCL6jptG;=%qZw4kg zKEqf9Omet?Mdnt>fzt!Y(d|<*k>vR29PX9?L2|fjC@>YA@W^24To@PV;4Og5=l(-(7$WSyM4C0ybnhx3&@2CC5Bm=ZdQv%znW5 z4)8I_u}(bZdFEEN?WDz!91p9!k{tCf@=vNkkQ|$*6QyJYmeyHnK7%C5(GS9=z&ft| zF-9t(j_X{Evq6v?15&t>6~mz1M6spVeBoIjqlFl4%AfCSU@7YuLwG=PJ?iY zkt8|XwK7@dl4I(BAxU!F1Ysty?sp!>TwvX=2WXN4G!L0(NREBQS7m>|{X6j`IaYj$ zp#lWSaZ)TSE0EDj$F^K@^o6jANI`OJJTBzIBZ|=qX_^c5@S^gS;|*G;{t(|{%9@l5nOV-j`OR)B*z~ZzX3koZ;Cze z2=Y-sK9Ow|vC2o5ZEF#?om1M@yF=V2$#GrWF3E9T-2OKt#~CsEkTpX{cyD9tAv_oV zZ4AR0$_ZCobN%IO&>Q2}v-Ui0O9lBT5@=VI%0Zpndi}tbBrX1m1>gqNv4ogWZ1JH&NLqXY;|*ZaqU)*92hFqI$n@EP0_rv_hKL%Wc&6 z%W`Sq4uwXpOG%5aDbj$0El&Gy%caG~&E?zRGT;)`@ zNed6ba)m;aON)nayhTjiqjkiBEE@+*T0D*MIIu7crNs`ZN#+p`Y4Q9LT13*K{X%Y; z046Q2$C##sq{UAdUjvgC9)L@G709KTq(FerbIu|x@t&^_O0CJfZ;O^y2eZ)vfAs@pVTvs|&~H&jPs+p+I+ zG6NgireRzKTI=R%=g^ue&%(kw;%Gd35trHGXgurt0YeiAa^jenQO*o6r1CLrBu*pb z8019fSZLmGh9`1jio$CIIv=M=z($~ZFzyn|mMEN5$lR(`Kt9Nc`3fL8@fQAXD1hX| zpBTR@K=1mPqvXVo_@=R-bZ=JuX3^*WmJ`RPTDoOr;0rl1Mtxk86YfyR+$y;?Si!hF z9im~h2Xf-7pQw)HMCT8=&;}+ahGXodgyh7P7?%K(6CQv^%u}e47nl4WP$-iVi*f%1 z1UYeI@+?`-8QA7KIFb{!A5k$77G9=;2J4O1&Eq9nkEDd*NU;<}vJ8P~zU6~ahP>@Jez#4`|{0wyQE!1%8c>a3Rkgl-N5Iq^~oQnF$= zRKj@*CpobJgnx--a>8Ai7uMm$@hLf~N7@Icp}=~iOEE4nmR2ynk6Cg5yjX~VU&T^K zGYhB3#L^1JB8-p3vN7FRC4MMgh|uFlBIvZL`IMOt5LPgpRGBhu9a(x_;i95wtWskxADDl<(UX5`j2y!AD88XZfa=}W+w!ABT8p3Rm zf}EJZ6-fA$tdN#6s8>HEIq_c@i^VcI;jV?utzrx!(%vGejaw|HZou03Ul{9xASaHA z*;1ouE$BH;Cqt!NS&=uc2X@<* zI~E#I(rsJw@SO|nwk`KBWmX-t@MrLU#`Q$a_3VmX)yNLKLo+qOD>#Z!nm4U`2~Dkiuc8!O0R`> zXEagF$jedrNt1RhMAUnti+W>pjsBsNyA_k=1YPm#pF7#Vc_RGpg;LNajl30pKZUPR zljvp39=R%6DKd91*xQ%0`U#`M#`KRt2At$x?et=3y(>c>E+`LU2AIkZ`khSlV(472 zh<=5Nzoh2UOQWmw522ha?rFWe4Zk1?e8bpou3pu;#cg}at4 zC)vMGnTSh%0bkQ0TnqBkN&E-nIZ%IX%+mjj3P0-b8KoD~&y0Ip#*LMK@PAhIiu&-i z0Bh9sRHc#M=1u-lT*QvzoBEo=*Qm!2ZtU0Ml0x_5Vs;c4@U_7Ye2tn)@s^I)V)sJp z;uGvB-ow|Zvr#6ET>e#PTYR1!#YgmYJ71$_k7(?-T$hSfiiQ1f`GLLpV!pnnA-)6~ zv|#h0^?WTIe1nzP=$~5sxE@I7x?8@RYwB5l5I+63+@X+2C0GA#wY#V+W@UCFXGEK) zi!Fa7P<}1DI(^R62xySVTAaMu<*&_MO&QV|U zLPuaTJ6~gb3T$R)8g30|b_Oot2|{2qJ0GU{xaG~oIJx3qv}{^5c85admb)%Bvr`=9 zJC>-7%Wd>|MIkFK8edP|mjRpE+3*|gsRuT*<3tvXkKkfTYp)s1v}oMqTYl}pW_G$^ ztO0Ch$C*WX1J6AC^_iXhaNSW{&Ft*?U6$G|Te|gbzis#o|tp#Hy&8Wd&Xcq&F;f z`ko#I*o@9Zj3brM8y4Tfcox*n!{26fJOnrRD1^`G-1q}8DFHU4Q~Z%Tp_I@KsKZF? z2rPun=y(VoaZrfz8J(U#G1&lYM&}%i)0EH+sGnlI1}sd&8J)FLQ*@kQGdhPRpm#58J%2Z(TvVB_&=opS}gt?<1-NQ-**2%;5JX??=w2Beq%lgSpM5%Y@>u` zl+MIB8Q6@Dvr)=o`;5+O?0?w^oGmSWM_U6EN!wv;3F?Q(eE;{1&UUG8OVqTB3v2NM zGRe><0vv)c(2Z&TI zBaX!B2swrs9o@SUJ}2VIjLry!mk789r>lX9fafuu70YIHoW!+Zq}RqE%;-#40L|!p zh5r`{ARW;5Pcj5q?;|2bGdj=X%atsT9-Po{h4}L5^nXw3?3=3TRxF05bT%#0MWrbn zcPM0Tm0TN4>70;`(H%4hQ#yyJ^VgKlrWAi8U{g9rV;rG`rgR?0xEI)zjtAfZC4~xk zG3{~>g|aD~CV$aof-t2sDS4JG=L}pdj;3_F;lGABwnFTlG~-c}Bd!qd%>IGErgSdG zm;!7{$NhQnW6CY$p(&jmD#=7sIxpk@63FbeitbvuOw!3BX`bX42tSErQ#wwn$_7v} z-IRK>9HqTi<+6Ackl6~cJ2qWb^!_uOK+=@Xwh*=gHl=em#_39EO6MtzCxK1rc(j_v z(Wck0sbDJV=eU0c!uaHz=F8xODV;Vs@74x3J`Kj$3E24L+}cK5pVFCv>-pkph4?{? z`+=HPuOQJo0F-#h&9sF-uk~wyh9GH^gS<B~Eyz+nY^%vFp19aJl#QK$a)){orKLB=WFN z=2G|H;rk6J&5Jmxsu4;3k>*8Ks>-uE5a&m-s%~ZFx7lIA+xwOO)K%~s1>+dN0E|A~D$giC?VcQ}cc z-!7dK^BpRoS|ryD(bI5d1Dh#wZ(e-66wl{9a{WW_ukrs9@Z;6|hx@ijRx0A@ZFBuY zxenF*Zh=0ggPpQ`BMKKHb(2fJ&lWw^QyuKI0!sTi#rIaao-Dy&~i&{jGwP5IiK z!t?_htW2UoCE7G*PY9J5i~m7Zi3ZuVYfNZgRk*%Ec9%fAc$wL?Z_JL_J%Rt@mR*OZ zkn-LPD<&&7GQWy^J}#f*{TZmze|vNPx@0B)>kIYGmg85X|LxYMDgSu@WL5fSa!y() zS&1~G&|;8Cf8jO|LLX3UMufZ_U)^f>>4U7po@HfSrE!>%2vluemTkwz z{@dq;=asGg!``L%Ukr--lgRhrYXPaZ0RN_i`2{b1r(~tl@5o!@r{R(}%d@{A)Mzrs z{Z~s?@}Ia|t`o2u?rQ*>>~!~kG;-&j$y}42JL5VS#7}Mcaa+9+i+gZBZOKWgL)YMt zb#CNZ$sxN>9j^{q!$Y-3V`mQ4A2{2gnnH0Vg3_TnusN&Vpy8qF);J7@s;EQN;1G3h z?8hM*Lp|&eT}a?5pmc~HCH1gnSAK}rY8-|`RMa7AaAgCwR!dk=yyZx34O>UUhTNDhb>BvUHEY? zcH3fjD;i`XUcVFKSJ2_AG71y%`?_=ZTgm@%&2PbF_2u|~Y@ov>T1bdq%iSK2s^e)Q@9n@vHe^s)QKfee2 zEleQz;lO?iL)Bf`r}a6!#Mw_O7JRt4FP^gQ{i{%|7uj*-e+{zxILfk^qd` zfX&T1sUjODlF!Yq-HJ{g*xc+)jA_8;W@q8naBjBw3RE1}-0VxS5Zua;iW3@xIo11G z)32quS$8Nja$Ra}_L~%`8StoKs7evj?UeN|- znw!0$b)L-tHaGhM#(#j#%{sG~x#cIO&&^h};o?YK&CM<%@c}UH>x{}q+y-;AtF`6A z7VxpTS*LO7n(7-eRl#4T;3ns`fwGloHaGiga&@asEU4yYo3w?bx!G|L4id@cW?LK( ze9$dw7N48F8HX9b=4P9;!yDM#>@bW0pl%8NHaF`bxW!ZEvy?T2Nu^7pyf!8q(BhUSJR zVVtFe@_!y7ZT@=jh{evyK1z%?7D% zEswLg+5f0%HBayc0bWx?&CULf@hh;oS?AE2aG0B&CXVI?y01cy4s34VW{ev_n48@$ zotb*GZG*Ylc{t6LW0;%WDHfVrCsm{cP5x4Nsrg@V`Wcv-@6?fz1BAI*C-FwgNH3e4 z?MtU(bF*9Hza=n1KMrH80`%S@QZzR^65nOc&5n*v|M%SNPN|x1*#Y1SNwh$nTbi47 z2a22yLU3N{>7B7@_dFVex!GEE{+gSeO7SmJee}YB`51GR(A;e6Rr!y|z~*K>0QF6w zLQc$|4p@<3dSSpmxbFqR-0ZQ*vt&7E;HlzhZuV6CPZr1KX5F)pb(~n5o4t+wHvyZQ z{R-p1z~*M%zan!hRs92v-A}oBasmJtZ_ZyV7cOVZi2Q zJz7oUXw&PT_o8lr`(_}FPtIw+3{IGvJs987z{aOrFlGW9pPXCUi0gB+|HXB&xSE@7 z^$*@w0{Gb6>?!e>(?v(CI@VrK?;NelYi_nTl=Vfkx!H@-iBhrxOLMc&iKMyN;Slx# z)?1x{ahek9t!}}%352=XYf`w96~i&*`iH`4ZuS)jFNXQDz z=@(#K(uV(JvH*m+*=J)`EFp@;%<6$+sizr^(>}m@nzJy@5X%O0XXTk&v5hRkRf?d~ z>NW_s7)f)p?pm3w^10bvR)(ZL_jL%bg3#w;{0eMt)&n$20bKXc|8^8#mHo%+^uNF= zyFJFXAk59a5-ZD{IMJ&1V_QBqdnknQB89oxR_UVOtyEd2SHaU+^oBD zy>RA7Ps{;ZRg<1pFrgKGvPMGsE0cG!^_y3U1xWbST$~X4;@v za&@a*8+7UfN!nlzgl9!EZLm!wvACvYacxkwCbbdtpE38v*i#AJxA6$ZEugOBiBTJP zh(e}KNgGU9i%tWWHh39hjuO%a-FiS5j1#dJ?B%9h5c+(*_=ZDyUBWB+f{D z_5pTnehJ)DWUxTxh{*zz_fw0=|4__?b_f__8$yvw7msmrV<)$7h`+~>hF#D z{%>t?NUB?3VsqcdCu&-aQ@z&XeivZl)NUBN0MiD}p*2;Wg@s4N(b#l4F2{?bb;GYQ zz63!VoSM$eX(E-6N3H+GWeSMe;MG`YUKWNY+Mtd;!A6r!aM~EyXfg_8Ke6n-4JQ>c zx5~7^o(iD*HqOESECtYF-`yB@DnRe&V~)}WC*iwHZSZw;`oFconW>s?Q7!wR4SrJR zmb8I86f(C;t_|+nxIP`D>}1mhYpC;=HdsjU-&B314ce|xGXm2FyI~9hrVTs*|8cbx z$}&S6j9Hyv(gxSzehmoP;GX1JvO@WInbukUb05EOf{`_m_lv~I{+Mu#CnMfNPjQLTtjoF1|0(gyC>G+BO7L>krwlC;665IzE?4LbMc(LG?=U^vD;z_fu!t7#l< zdi^3V>Y2Eo4#N24oaW2m1Z{8^zPAAzpO#>J0c?D7Zfzs3YlF2mM8N~o2D@Mk0enmw zJQ9xr4HB*DSbJ%Mi&c4PgNaa%70tB4i|NpntiX~s_*f)qgBcL61J+y3!FXN?^;SzT zz5+oTyq&_8tQd|<{tjypPTHW|My!Z|pbgxW*%KXJ9CwnVx}<(M^#Rr;9glIGvGgcT z7JtzdJcADlZE%`cx^A6;({*C$QJfbs=7?p3xwA_A*hUuNAw|$>^(}-YMv^vg5>wAH z({gQaXje$;b64D$hf9I=xqUG<0j3R{)+7Ze(*_%hugV^P`zZ0%|4zm@4+L${Jm$;0 zC!&>(ZFygOAB1~E3fiFkxKKHY6|GcRw80I^SK43!jJL!xZQ!oFUnG)>k)=m*J`+i8 zT+xSu3Tor77+pZn2K6ahw>o+R*9JS`yo1>4$|qnPE;c%#T|9yM&`G*BI5%z^Shnrb zxNT%diPd2xMOL+|~% zmf~Ng`slr1)tfLU59;LA@V#I8L9r#WY3$Q`zwTBDNr5e3YywOQT#PXZm=tglcPNSE zQs5W%enhs0~)W$-POvMSs;u8Ly?&VUz9SV(Hmy!ZK zQlw#IPMq7;E|&s-koR}Mq`+}~84-X<0VlE&2vJwmv)3$_0_*fcI0BObyJGANObR%& zn7QR`>r&u6T#pe~QegaM<~$$Por5}3jAwxS_7CAn1XSR5|RR+V=Mr5 zeet(t0uNEhv?)n}8Cx(h222WMTcSUekQCU1#J0dfm=y336(K~q6llE_)0e=cz&MNp zmC!Q5^B4~T3)4^v?3tQkGDSiP?A?>vOA4H_H6s%+DewWt0wp8`*4c*s1(+1@0Q8Tl zlS_e%*nb|dOYx5|-cdrA;_LQj!~`Y=`a@#A|62-7NOc=VY^1<9YFZ6T`x0Onuwm&8jMIQg0q0O%m1kk$S#dNJ-HXeu z;%F#Zd3#C#f)qF_otd*lDj$9}$Elwj!!p6-SZLldpdu}3vO0Z&4IX20ItbX{aWTe) zVwn_h60iA*^xDwmdMJN?1&|bY2>%BZK;y@U81E}U@6#ehQs7*Cmnj8qj!yr#6quB% z=~gU;mI(^$luH43C}eJxTpLJ%o6|Asc!o)Vb=CPx3N+h+QxupK*b<|k5?UrW1LGuM zQosWsJX5HU7nkctQYe!G^KqXCf)sc#d6q2a47^z!Nr9j6|3Ms+0`AHCJSeD3fmL^8 z@*J2H7>=u zvnZLa>JOHquGzlC>2r%NDd3JxljR3Rq=VLlBq`8#CuA2eDX->XeI^br$bk=;z2|Td@Yisz^f4E1Cs*3V*IRx zdaG3ia(;jy1>Q^HN>&WV75+cgBb=nbRuHxj$(9M+m8ESRUL5z3qq?N=IE?|;CEbc~ zv#}%vD#wJt61rdS8h z-Ye{+%9Lrj6qxugNa}O@L)aQvpF0j?EHEkH0h*)$Wl~^k@m1MNaKBi5^}ml}JPLvo zSUy&kks(^?*p~Ojiy(X?Qjh}Y#E0U!Vu)6%ELZh+DPKu}n!(HugCGUmwIY$qrNH+h zsg2ix&=Xi2?~bvnBG@v)`6*ks8Z8qXkMnV2t1F*@ah=#mf%!lSPE2djjw-$t`CzXoN~o8dbQa~hn_vJ zQ2ggrpEe|R9Kt0PsFPQ(Zfr?+46PR<@M0zQwYjv|;})FJQs7}6?gb_V+U~(R zF)%4G9%C4&)Bi{`Dc~U(&lSS&80tEd1xa91;53XAl#mp7591|ZAxsK*2yW+6h;k`# z>7HD`0h0pXV|=BAq`;1Qp=p7IX($E$otk16@sI)sZcObZ1*-SXb!8o-z$lDiN=ORa zk8wLNDc}K^l~;jW3Ut_q8?Av|iXVWnuM(014`SR2ObSd-*}GL0PniOD4861wWswwU zH;joYU{YXXj157^-!CI+_;bkLrN9Ur_7Ou;;7W{3l~Dd~VY~uN3OJkoLu{7F@CHO8=3j7|O{%B!kT&CPY9+Coeo05s7z=r%F zHUL2ixNGGyNdrWZ6xbKSFp*3OIH@YrziQQ0{n2t%ud6puoN3V|1>CV|vO<9ZX+mE} zk^*-_(0eIN3Vet0wGxs7YmQ=_6qpq7Xf=(aO|SpWi#i1NK_HAz&S}03PLKk}<9iIS z@#zta2Y`)F&aG|4bt&*Gu0M;bJBB*#pJyEbACm%q#G}B9Xtb(h?X@a!vnnqs&>zay zqL~y}F8y3fR$ysW;8&3(1rC8Q4p?t>3C6`rsJD6q;~@~FKwSz~vSK){@O$(loTR`< z5Izveq=35?vW}BDQaFy3qq?M8#)786x}M0PD&(!{`h6*bBNIOMJIl#d33rd7Lht^@6SgV2lD)yXbgDN+DTJ(F?l%y9J#6 zaGQ+(d7xHLKRc%)S*cAwZ&D+Qel8E-ejl(Gbv+Za$+D6aismosdLP$!0mtnMuCC@J zf?JibDupkK`Q3O_AHQol&w9_nS`n3_Nzhb%?l+lQ&#MpX_ z$DQ-r@Njo+@4idy_a<|WxQ=`wZkt}V?d`bjfzr0#AIELdk@Mnq=}4c+{Tn)Rofx8# zj;z(wQhBT}=T)@)bSs|O`2cB3Ju!6=Rlfk(6H^ak+z>wbnNDy}p^}n&<+H zXEnP+VfJ9p53s%i_1z+KJ*oveUz@w&npT{2lDA{+sCGQJ5}kObakOgVe*TM2ul~e5 z60g<9GDQOH2_$#S8&kSg+Y?AT;JO{KCy?B|kleihdID+dt;s@9ARUSS5gFtVCz75(x(>qCp!7hVKQk8|MDj3AQW&pYdJt&=^tXULnCJdgjs1BL>FRCBR?kjW z9?a4xC_R|hgVdU!!xQH-)0NuPEkE3q`{XO+b8s1g?;u53L?VBauj5G_rwIFwD@7<- z9>IRL<8iqT-)j`%G!psIe7#6&4yfNg=C7Ztehb`H%g3JT^j$oqkO~w(*E(O&U$iQ5!h3m?o`ppT|ZhqLbw&KTY&JfQ_#DE%G5Dz6L)h8Ub0<0 z1y$#Rm5&~Rb4rtBB}zO6FT-^zuv5_8tCD-nAk!)MAD!ho1)swINf1s!CpB$M;uKtd z+jt6o1>p-&It5pt6J8#KQ_#aSOJTgI=@fK=or3P)Jo%?nmQ%1!&7o7U4;gF(N~hrd zq(%Ze1z$@?%Pl{F(kXZ@zGo{!ItA||b&n$G6m(w6@(Ae^T!8OeijYph%0u{Jfd224 zor1KroqN8G>F~IBqj(r}?v)-_^}U70e&vs=x?4pfH>t4nxN27KKk5(Y$gCG#99~=* zp27*5y1&1Xv>%;i{i0aDZ9~tgdzbmYc>6xY4=q2u*y*k93)v3ewP(?G72b`-5oy7O ziVD6etKTD8VgJwKhW!5f_&=-5(=llG7Ph+!-{#Zo)?InGHL3QezE0S~i`_1IiA?&O za~)SgxA9`Yud{3q@ebrGIahZgmCei-a~DG0smuVzbE!jY$oz`F3X|{LK}EX9&$MLu zwX13|RK?2jfAQU&L)Ha6lFyEmCDwHe$v3c?w`7Vr&yOm0yLN|I^M@yA+0QVWvZ3M+ zjLK@YN~n1sRebnMT;No*swGmzSwMMx2)vel}l~)v94xEgro*)MAM?x27bO)t_UUo0X*(+d?glE^bz; z#nHt9rCO{$FVvzdz7@UsO3dm-1PirzpSe8M;>7=NEwZksb3Lp7sCx{tP6x${*swUi zlJBgIf~d_3Dow#kQCTgoHm|*&W%n$-XPWltFhB2msN+M}tsd*iFI5|Ra_74h`QYtQ zI(g?o&(oawe#KSa9uQB5otdwz+KH9`70vmotX>SWtReHWxz~?Vv$s4(o5#9*o14Wx zi&SXH{fg_W=>Hf#{y;9>i`!4(r_g?jbIEhR;;02b>)fbWi+^%hpJh9%!4&a)zEYU# zwMd0l9dJ*`c5-ahcHtD^y^DTsD0)7)uj1wdTc`q8M&ID7dJa{o(Pn(_$IoP6knc(2 zT#U1o*onkF7uxUermi%bM$aF>Ye(F-2l?$J4#PME)KoEteVpLY^Y$!Y87jZkl zfVo-LO&*)^Raw0{-{xkAYB;HI#77p_(Gk!3+{|x&1j_=WN$n=y{(L3p>W-wsaqTph z$I1vkd8=@gnlF7^JE&k6#l5`UE?M!YQkS7B8531}e@N}$2OS>ZP`EE=PGi>S{25$o z4&%oP^ofnLe{s!Q2F1ov+_oe-gNi!7Dy!9SWsUPopU^nl$YUS#*qm>3v-%!SDm2di z#m!5N(~&7yHO>*@oyu2ouI@uBG)}&9Xq;)W_*J3KrIS7t`oQAq!w1JgPay0ZVk}hX zd3;q?t3qXk?sPZrXC>Aj+T0pmW%bXP=4R#S#kNrB1B)AnLLa?DmK}5li)C~NUD;jH zo3G?t{Vn`Z=(k^Eu8`ns6BqtK!E9$j3u>yTLJIjl6ENW>`$eHvet)v8cgp|c)b z!O-#DrOmIK$E|#uo0a1SQlZeJL&p=wq0z-HLl1s?6;u43L(lnzg8ar;VpiWkD%4;O zi%rUD*8i$O8i(R3Gp5-2J+H{m4|3H+Vh>QUBVU!(s$^M3KDjY8(6QQlfq6{e+uW=; zcajPf8B^?2YW=M?34Qxa@$Tm348obEmkXrx!|5bxh>yP9f#GFa(PS^h!E7IvpUNA|hHzqO8viEwilIB4(bc zU{8E2HsvcZt3M}LysBIALYA8PhjahN6~bS?svF!_(eho|shEAE6X!p_Z`J0MA9*i_ zikRii3BMKnQ%{ip#ilI}XLtcE=C|cGzer{54AjB-XKX(62;RI5^7l!cF@ZHtP$!oz zKd|MIBT)ySOU`i9^(c-M=u%6f;%M&6QKB7**2geU&4wEaYE}q6r1!~5B!Df) zJBf$%MDpeM!%yaBZeYvt*(sbIBy2f8YnGPd?>N58FT^{7-Q$2Q$B#(O?^Za>#9nTr zBX{F+F1^6R9SWIS?z+@+{BfmsoyfcfLTV6I_j{gqVMdE5X{=(CE8zr#i4rf$0;x<^0 ze;NOm03VydcN#M#slFjo75oAUZZr5lLit`a`?p!ACD*>HAusKCNsd~AU+Hv)d{93r z`uM-i;=#PIEPrv_fw~CfruLHM;gALcTbA#526vbNTb92JV=}0dzb(sq2$tm)!k6V& zJd?#)V9WAvVZ5Y-mgNVZ#ZUk&ge}W^2$tm)qI_BY6&#-iwk&@E7ujb5Tb6Hr4wwJH z!Zci#zdAL^65_EepY2JDXj%Sr2*(4vL-$XNAC=Iu{J!V1C;@C)-UBdGqyqV}{4?x- z4A`=G*Ga5X0b3S7594%T%ksCT?A@Y|v5%JJM-8PcT9*F;|Nkn0Ub4LWd1MVj{=>Ho z{=9awmi&ELenTAA1(yF=7^f(qWqG{qD80!IBB61EIBC>sw zo+bJt#Mcs${-41f;_Gg)pD}&}VTu01l<6#0sC-#y6;7y?0L9Zybi2k1aJR0Cx1h?-{y1$7Y*FW6j4@)_-MUUHBs%?|eORJzLq}sv^cUhkMFF%#{{Y5)3ebC}nBxS- zrQY~zLQ?<8cu3j5(0VQU{NF3|vr4s$RxE{9=xb&9halu zT)y&yeW{LC=s%$J@2EOjp|87u?h@Dv{azTm0$ZW?063>mAum?Z|FJj0^xWP|+^+{= zh5qy8S+blna5r(ZLjNNEbHuR~diSIkp`gA({}cPa1GYlHK9>%?fUVHGKQA&&xrIEm zLjR~T(F*-x_#Xno3cb5lE|c`FNLr!49Kxj{*$TarxLKEyX@$OS9~`xmIvb}aEV@?c z-LYx1{Gf=`ei$UJ(0>78F|ZZ-?iX=Y0BnVR6vlqQR_HxiP2*_O>#tM^RMbhhp98{> z<(%fr;DiLwnG11JPI7IXr+5{8On*G*$RE0eyt@d9z<5?uMkNq^fy7639RFqhcQT)rfbZqBJ0=! z7VGPM#8O9d6HYV5(#^W_G3JS7V8+bgpLqc_q!#==D@mNr!`3dntFhRh3~zxV{t!7e09GUV_XQr3VpAbFF(I% zrDI#(6+aB&L6O1={k)+e7aAa1F;`om-$&)v3jO;q-WAJM=-rj;;Ycc9p}$5XwQ==S zR+fRaaSx0&L0F-GHD&A8YF=A^Zf^+AgTz)(ek{h(V$*r)UugZ5_-^^+GgqD0kDvfr zp}z*k)xiFRmOB;_*(vE?Xg!bbv%vm^miw17OYQl26Z@HbgX`CF*T2yEJvCp+3RUuz z`3{%TeFJ^EA0B^y4^h^*I@bNExNUCPws~>ehox=3-;Ud~Qv7S&u9f0G@woqumEs{W zL>dRfi%IMn!X(dWF%~xs^3y5qwqd(NXjDm_?Wn@0z$DKb7_TTH$+P@roXen2lqh+o z9&i6o=871Osv|-rd43sBJV~B$Ft!FJdHP<#9VoygkCQ60mLj?2`H20m0h2tZUdb)K zz$DK!+~Rvl<^{>q{3=dCV3KEMECj#&QgQ5XaMFw(SuT0pq0q>6DarFxiqw&NOo-EC zbh+diPTo5MlRRHyd<0DLIFTnUAxb$67*#HL_P&}sGJ#2+Q!q{fCV8A$%-pg%@a2-{ zK3uOCSCZ#I))Yqq8$X;;*@)Xf^4x&`48X^_M5nPjnd%!dRl!fA;MOI+0OdcTndEsh zHTM$L5XtkT93^@564I|h{rl0!C65+~?M~#yiQJJJY0{m@>v6}$8o(saI~cDip@$Ot z(MxDJsFS}*9uHB-EQCv*8*#c4nB*Bq-_{?P!$+7kglmVFJxENy+Fv*i|7Y-ViZqcfV8zgylS6L)^zQg|;1(4)v zKNDRELjDUQkDg!oyX5JI!$!dJpNw&~5|TXcVY~%Q@;IB(f=Q($dDgm-%PU}44ihns zR6OYP7y5tEjxG$GH&7ud>O~gj>TyP-O(U{xrCT^?-Hs)@Eu`#eQ*E!6B zMRw}q7Zwf>UuV=oxDOX!WA3LIi$IV(EmNk$RH5>5cDb8T)*worm!c<&1uEWxCeJFo zM#&9u`WLWKaxaXbVwvP|5^IBzj$E=2lBb%E#w5?l_@Af%8YpkTn4tjm&qj(QPwM7N zawi6=)lE_DQZ?L)d!=bkQHPc^jXUsP4pWVS^HO&mubXzyqQYpJd&f~;X_|Qy?*)}t znx^I!I%r^;W_ygSfN2^JfS699LQd$8pH+EjnoDrM7z9nTS@JAd&KcPKAUM)AkKq51 zIHqabvyio9VcIp#C+z+!u7*qHP&#{0m=Bvl zj>n56O>-%POMvxEk7GQlgnFirF+K!A(~L~vN>&VqRGFc0(lpKP;AUbFG>yCR_^}Qz zjtk_dE@wTQ)&|z)9EvgCSkg47#;kaWfmmpoU5PPW~lBRhH z#*1Q^rg2wPL!_5$nj=I~8~+612ViZy@?8u~AZVHyDOsn3U|scSj041u znOcZ-hGK3rBFqswJ!n#nNE1Ey)*v5>jtl%#1M!1q32n#R3YZI79y-%8Xp@8kNe z+@)!rj80jZ6-wlqruuHWZ=g^2N8;7Zd6ZQn4)fRD>*8^{wrpGHxb42uw%%*UZPGMz z<92D9%i{LGp=mCS*{8uy_nHo@m@-v2SGE3}+een)-U^yAi@JZso{ueCpG4+r7%v>YXTe!yunDDLll zN(rAKegpToVscr#*VFQ8E8;uLu9{6u|EFJWtjj&i{;~;@!^8FXbPEPe&Zug7{t!##Kt#y&jBr zfC-<#PP=(xC--_@Kq&&dycmZuN(sBygYg_F9_{n}zlG1FX=0yK3h+z9cp`jOD!hi$!8i>BHk3}rm?)MBpFrYC6i=@ZH@{xTjOb`g_$eET4^p&!y_n65$gZc%Cacm}2K$gwG?%=0Q{#;q&mNlvl#% z4~n;4<(2ShvzjkE0TVvwV~hkQd_n+TAx=WYIU!EAsqzv&D{+4o#KPyf#4}?BPM<~> z!jbU#3jZ&}G2s(D`IT1+8iY@!mm^mmnD99UqZcsY6Z~^rutH-=_*|<@Bz&&J{~8br zpWvFiThenPNliQk;SrHc_yiI+WO=em!e_G_b%nYfr;jbVgimlRn^;*z>haY@kR*K4 zuMioS@acxpSqZ&vc{#?Vz=ThTR<3lkdy9uFn2LHI?)QLrObVQW-Ed;z^A^5qfQ?BN zUPS=_8Xd- zuUiIJzHO)7i{pE8RF`uqPW^#(Ik#clVk`-ta{jdD7i`2r_>{W@mU@!saat*sgwJ;v zo5ivLJg_qSxOj1dLli-WRi!l?10eqLN+9JXnQ~ocf4Z6Tn@AEq?I7$AtbZMVaWb&} zH3TS=1SkjrHl$XGugXrs{VMU*zb?X90Ak@&!&jDzLT@GeHhYrkT?lWB6bqlqr^LC? zTYJmrYBzIQs2`H>`5nd%u}t^`S7zBgDO>oA5J_!Z_jTR~2iC@iV;lxz;ZrTiHdu*H z48rFeoQI38uKGrd8De|kb7aB~7FU$^$7SUhT`o)bJOyJJFyRv%c_YY^10@kYpW*u{ zFyRxtcqPdDC++ElPuVx<=70&Ge%{Hsj1`wC2%pxt?hEwSqLV)huBNO(_{{O^Zp&JC zuV1$~v#!I#ew~ER%YMCtPXoW-|AX+UN9p}Z-YiJlFK>sr+i(X&L35M}ir`iWxje;Wio9GE4nC(`GAbQTgX#gHm_QX9YoJCwhsb!Y4JG5VkLBGk^Urd|Bxtd&^yle{}w%! zeBCJNo+f(6Uqy8^C?7+Rqks*{<1oeo8 zAQn9}lT2HYX$jGDDo&@!F%~`LeFdBwq~fhF^SHum7@dOCWMISSB8&xMndk|mlnYh` zzXK6HI}||Qpm_)XwF;n-^f!#{3Q+8Pj{iaQ?3q*}SUwwxp7Ye9C88%dq+GBvt|gve zJJ84E_7)XJ^xSg|<(25E`zbpVnCR(`al8`x#>`xdn}La*5P&{22^HssF!`E7ndtcj z_pd-KdU_x6{*L%$&e&^nnP#;O!SX2r;n#cE?CJ) zjOeL!HRYA)Sq0@e(MNq323XHj=?fYNSkKfJV?Pj!o{>qo zjOD|jWo}eBiJp@ooFtNop5U5tgS2~bd{vI718}+$SeNrW#!6#J^vv>Eaor#mqUT?+ z)RTOR(>G#C^i=thP8Y-jcwlAtaq;2^ZKn`Hht&ZP+8RlBdjcspv9d+a4R;c(cn8FyXI`>zgO%*tY|-;4gx^Jq zMNcDttnkoiK`eTLYfeJS7Conlq&7Yr!ePMLcm&2dieTTE z**nQLSf!p|yAkIZVymlOj`5_}=!IAOg~2lN33{Qvul$;zoT5zd?mym~y#FR&DQM?$7&3OdoU1r3bV=@L8JUNVa@V`8R@e zXA)667A?Q#4IJ=LbfTDbc@yjDlxuntiT>Ykoeu1C_9140+fVBvMynj$!)O1bW$DRTWq zmUE-X98EEOdbXxW|Hx}3rH@Rlqbw2oSlt1V}W+7PfyQg0AV0{f%i~VAu4^Fct&T6bB^P2P<1s+;#(H zk*28rJ72~GrYSmNvU{ciH(`wA? z@F!9L*qAp6<5XZ{Uf`q>b~0pbNX-{tW8QVRUnRcAyzeo-1+k_$BFS{CDwI9W75>FM z9-w%-6u8b;z`44OO;ePmpRiHtAe=e^8>I$g3>3>WMIfaTdX}c>sQ}UxlkuOZ02-(k zU_7J%9me|{r71?>8xK__Uz%Cveg5}LGsh&g3|4#)I7yB#)!!wOBRKFwxXG~=JMVIz z<(y=5Z`uRNQA6W_B*%M{eyysb`z#gzrsD!8IeKDr1132_07msBls`829(d9Wg6TfX zO}O6(V##rF;+e4mr_c4`NOCO4|4DJ|K1=ZAvz8PzNRH3g{s}P2QM8jj9hl?@{y8pK zp|PYMepV)u94Fv^9Ec@HaLwH5Z^iA;lJYyy2IFe(6 zDlf@#G?XJnGs$sda-d|axXpB*>1?qiIc8!^7t6-?z+(O>DXK3Z5gt$k9ahUAJZ>cQxWP3yv9cw{z?&ec``rLx z9kA{<6_F*d?l%M|lLW|;9D9qe$~M8h5wObg`6|~F#FFD}Us$dgy_M|S?5=n;gi#{J zl4I)0an&LhtT;_`AvtC#UrCPHFm4jd?z05foP?D2ERjACNp1WLgyq26_zR5B6u~6N z^d#G0RS6NoeU|(b-88VCoTJn21Ndw4r@vlpMkg8HC*X&0eU&?9tsCgq_06p7aF$;u z$uZoom*m*!*Z&8}@uAN?&YB^_|LSc&UMM-#PhIg5K@Pa|KJ7kQN(Dx;R4T|K|{!-S|d?SmU=Xy6~7W0qCQW+~A`03niX;C9T zj};bQ@hYq@cOrqmAamHpsW?mq{UT< z>v+`=X)#cak`{m9v>g;*=Y2{^iyQq0=KNxEEG-T$mvZ|8lNP^Y{H%m-S9C9*a)*K< z*W62s5Q6fsDv=h~;&dr6X>nKqU#kQrEpEb?1}ub0ix46w4v{S_e!y{~n4gVA6jVsL zyMRfH-!QfT3sX{B+@3VaAd1B8is`pOlC5R6km@rgd)|65utOX}9Dg-MGBoQO8IEh4}IMbu~V-@#Z5>@)d+LuIq^s0nXv+=&jxWMCl0~?U~x=N z1W!)MeKK-l7~6*clN0kW<^q!w!Jm#T$t})9a-z!}a3m)-;J*&Uaw53q?v^x8B*}@W zM#}vwlF5lc%5&2xndHQsa#ZKr7{`Xd*&U4FSax^Od(6EPlH|lO5PAWV6W3r&QbKny zUcgudOiqMoBRNxfsh7-$)Dm4iYY?wL*qZhDYDsU^5;s!Z! zHLjDy)mQQtV$28pnVg8cVy5-Hm0WUi!S=SwD>?Bll($7QIZ^0coy%B$Cn6{6&VwX5 z@jHYaz~sc9wdk9Gbyi(5x`0?t?3IMeSUwyo;W&kpoEQ$_Op#1Z1Xt#TwR>^APLAr4 zX5e%^upVg*#;eBCv_&_c70=I$g&0^RmO7fBaoQ@D16iiNO#C0_$?G#+U?5PJ{quk^luhTe{y1#aCq);yzz| zb-!<8yai%8(c4#+IY)0L`!>5P{td!*kzzTqx__u##){WS8GNC9B`1pOq+A^k%ZcEc za>2@$6OGh)sg1ipI0RT5pNTO{5ll|J=Ceh&d#g&QZIBb!<9wainiyJ&@u=8zUU~=r zj^qwVu);G~TxmAb6|^4PS_k7JV2^DD$5cWNl;p9kR1rACpUmHmT$<<>qH0Q(HGa=Kz}Vp`u?yJWsT>yT#I-7y1rTKHu`nLGV3~Q_UojH zF8Ay6(L@9Me*X`e=tv)etV{j?a9&&9$qcF}omg>A7@N>IweDq*I~2EaU(Mq|ejkaq z3%wP5B?$DgHK%a5f4R6wj?cO+<3Md5B+!{`icV&6IZbmRNcSaCa|*=3;35M??%&bA zhu_EcJJ}S?WU&VCmqFh7cV?Wq6XY`r<;eZIOwA8e=Bc=)^*VdGU#4XU=~4t;@t2#9 zZ^VK_{%?J6+Xv-)+q((hT;(dKCRvnKZa%iXi*M$;%*CsZkIUD8iM=9QHmCQMp};lw zC&QlwC6RX-B@K*JVugUY`$W}R-;eJs)#^GH71U&b zz*kv!DQE8F8u{`s*2pBf;S|SIt447@3|*L_0sp^ z5l{(M%Q}KzI*ClS%xI@h2$$*=Om$9jlkn;=f@p zQr+ME*2;cm?#`%e`W-gJi}hH;?x!lH{JNHDnEr|lX??>hy@tiG=SGzFt28^+BE6Xn z>GdqyZ)Y)V4#nFwUZs;#)zYijkY37Sn0`d4l*<9B8tGTrkbXuB{anzn7fvhfSFvf% zUg^|OT(+|{+RUQf9x1mMXkUrdql;PW8od1;tPFn@baLhQjgx$~#am%B*VI?5mq^a7 z;J_4ODcA7L^P01=V&=Na`_kRKl&-vjK+)Z7>PPXqff5N<>00Jx_DV|9*|VbOSlwz* zE+IkmD2W#_mMd`|iG%k_xi%o0OX3-frNBNrvUgIxl?pXKU1`};{4}%Exo*m}05-GJ zx*qSl0h`$gBsv_C!pzQmw$B4Lvs14=2P?3domQ=Oo!PmHO_u?i+1W3tPq0GEGERQ> z5G|X0X)!pYT(E-cE;Bp*l8TKdbK*SuFmaMEEq+AauLGOeIi~@y(Eyv-2}EuhL(~Mx z%kyC-Gdt}YrrZI*W_AW(oD6JcCot14SfOm;rt!_Vju%%qjc;g_a@PX8WDbnnQe5>~ z9Y%57>Qec2{9gn7+00I$@kFh zj7~QkIs?o9a*T_W(2UY@jHSS4bOIY^vBQi`UQ0fG3rr++!8lk6-E+AN;{s6ph|l-` zp3%82soQvBbF+B+B7#U>yh4E0il`Z#A2GfMHlq_bw5H0lA+<&vNs6L6AwI+Orrp^Jpc$RIt#|?ym<~7sqdRaNUhklyNOFrSn#@xibyIlumnf{+iM`kK&)J`e;gL0mcJL zXiDctjBkKV>4X4$D9lW6mkHu2olg?aj1@S27K@`PodNitERId- z1W(R93L5SZU(NQbfKBPVjPU}nDV^ZYkB%g{#d&B-r`i%Yn$pSH4+F$gI>9w}x1=5- zX`ZAxguO+wDV;#dbDJocrgTQjQQG@hocdUF-60N+WfLpx6p=Qo?7EEQ zcOp|dzlfwMosJON1M95@VVtUjdaFqoSAlp+C&!;=&Sfkgj+vUKk3!Ov&O!+DMY1WK z;Hq-5dvQEfj_Q&=!08=eT~gKk`49q#?+`cjS#c317E?O2#8OYw5vTURdYT~^gT%7I zJh1eE?YLlJsR*wsf)1;zAxtuorgVa9ZenFm>0J33B=xzAAuIyc=f02eF0d(`5THyF zAZtpem-wpePTckG@HvffJeZFXPW3qlW(;whc+$=pb=lFBlr z^Mvx%l+Gv^BgC>h#KDzMe0zHKl+N!Wsg37AxEWX*KZmhG5o}6lVv=pJcsI-bQejHx zOPoI!TT?pa+c8rQ_-oO{ACE^a&CG^8;nxkyTKBwPH#W1b!|Q&XW<#d>^_mR{_f-DF zY)E4tB6~I@{>oRZ;40Molc6*@@`4rDPPdH@ZnT_Hq9tBYBNnZx*1n+Wfh5ktI0@7q zI+L^42T3PZ2$K79DwVzjmuNJLqV^C#`|C;MopWA>$(Lz;jD%Suuk3G+ll8^89Sf3-cb4TB2S@lrP}q)%-{B zt&~_v5hvG(H2={H=N?AT_>C|Plq!i8C|rn4Qvnkl99#=`;~I2u-e03K#i&-wF}G%3 z>K_Fy)P5<;;(Q7-2DIOkM2SjNFU=koSK?0mZ?{U6$gXB-Lan^ixld+i_bRlPcbi?U z(rlmIFZge>>}u<0@Io`xNvzOfxttaDQ;d5Y$Vd(BckqILQE7jE2k#w<7wLEK4#)p6 z;PSr6I*s-yP2zX(YCi=@KgD7E$z>PMAMb6eg|(2{x5^f@8DH9nAf;K{G)RrO#RYf!Vey{!CAjlZa;V!{2P?^ z_dj?z0N0b{uF1~e-Y~Jk9$>Q5e(>;WTqgnlS0Q`9H7>>C8JvforZ(@;93^&HlTxm^ z?6S+2MTBF?s}UZ6OOL1tIo)+yy~0VQ`;^U`71RcY<261%8HX+L&R z8|q=Z=p_Qb05ZF13#lJ1yJ^e^g*I%NSfQ~>{w`qf+qbk|_U{4)w^pUxGV{-5P)vV(NZZ{|QyMKSPmpGh-p{^MT#e2#(C)??PcNEYeMl4{&`C_|87K@l%<*5OOC} zwa(sk;o%b_K+!3%G`U_1R<5r-EoD7dGbl}god&^)IR;M*ZKcy-s#;d3K|lQag7|a@ zuIbVwPKVbaovfZK>pZv|?xo__J|(M+WfFcUWAf9Tfl#|EfkJ+HiIw4?VQ%)cZgld%=4RJnyaQ}*w)g(K&dqi@47m<$ZgwwU2;P=RiW3)$In~Ra zqluEa+2Fth#xAbA%*}R5HqRq-;vD@Vagw=N{SNf4z~*KfbmzndHa8oHJkWIjx{4{z z(^kSv=4L;}@k3y9v;ScH4Qy^UFw-trp>4z5?EZ)IpcXLgTk{BV1*UxigWr}<3Ldjd z%*`H$|FM8SlXJnDTg;nVYzJ&^_EZf0;AEWtS;xmm3A2Ok$Un@@PQu|zF*G;) z9LCd1DF44Owga1+4Q$#+>@YWbL~m{j0h{fZfN{AJ_B=eslc0FK&o|7?#y_BYXLLBo@$?N%$i)O-t^ngLVu{V@89WplHElybq! znw!m|Q?a?(i}1fd0VL?}!njibI?VDpjw8_m-`&p5F85CVdv5m5q?*Bs8{27a_HK1< z$=qykpcKhYh@E$NduK(mc@Pc4-0XX=QU%S;zDDs^t3H~WO&?3~fz8c!$LI`fZZ-tq zLmEk_I49;$YrX_UZ|~?A8K;4GZgx%LnXv+=PcLyaH~R$sOU1Fd+2F}LfD|;$&2D1* z24Hiub^CEx0-Ku+{y8pKp|LbKyF{62ZuS)X^(%|<+-z{o-7V=Wku*0u5yAwKY;HD? zIB`30^fAp^aW#&bL|cf{e2cER+2B|p_He##;Ha-Pzg;LxwH(Pu> z4@Lo-o9%{iDB#cLX5aPuoVy_2O6KpFo9(R1Yi{;zC})XgkK28j94HwpZZo~TGfgDT z&E5!M2C&}hDU4-GsJGgPu>r($v)htz8Ow)b%JoHs)7)(C3EY?h@!V{1<&L3tFOGl8 zQC-r$I5h{>C7pvY+*o?tuI6!ZU~VCb#ms7}S8!BMb0bbO#M0w-Ph&hKmJQ~CmEp%x z;s~cJf)1-q5H=b~bF+bzn^@I6OJ7ObL+7*l-24;id_dgiVzdA@HyZ+!Ndg4jL;w4Y zGF4@d$GxBU>VGfCxD>>5vkiP@BNwb>-)7Iv-V5Pwk>a`8A<0F5u#(C$H``a`*4*r? zFkTkR=4OK{zb5JF*>khAL{b}XgRliy8`nB1uI zd-1QuXnMA9ddBbiA^Q{k3q6ni!R3dK%N5pIFO%4T@iQor zSDQ~*lW2p(eFW;>kcVTX@j=?If8&`gE<(}foX$4;*A`Uf;RY@?Z1lf9_&+3 z=A{`BzmbDm$v1KqvFQO2zmbz1T_UeDGB`w!q57+~6-c_Syu z+-i}$kuw9=%f!`l8)Hr-S73c_VDLuHF5D8@;1T>61OBW#3C_HcvkOfHUrNEPJNW?0 zd!m^(xGHfCR`s};_IA# z5fm-K-?Tvp!2quiK^r_ch)x5THpm&AayylfHW)zSC}1H>8-x(N&cpzyCcaUj=I&VReHkf=G5(bzy_!48I64D0y4@GGM(*_{` z!kz$$Hki%!8Nja1zsA_4gs#ooozC47VA|mEB>P}xYlHD`!jU$(3I7{`X@lh$Pl7oA zpZs+t&p|8ypbb95;S({W4T^^8AuXT(2^f8VX@kJ#+!i}%gL~ON&j=b|G1e)e(YDSR zT*ZUp+!NwM=Kt0PtCG5{Rnu-xEmG5JoI0HVrvV$MuEm%FOdA9at$8rz*^s(Q9F0v+ z;<7{>jZO8=WNHA!+Tgw9$ZRE2_IT7Cr*3kLwLwE)Xg(H(C)(h1h1X~@3a1gkMw2-h zH;ZN3AdrxXo}Q%*8qlejHdulG(+VJM@D;`v3ecgpr;H6*zs)5J%eJPyBxu$FxE4iql8#I%nF51g*BFAPOpj{B1!9-jiIU^kpt8+?iHCSc=J{d0H}3E21)xD`rqgEkn1 z>#4xB!4!VjAsG)ux|EFX?b{!I!eZO{Y4;Ud{{8^M)hOS>1x8t>t#E@=!- zBY|~EOE4Zbmb5`{pA{by6ANu{q*&@{KE&yLv7`s94d#KB;m5^`BV4HnI;5T-VEVukz#Fd=E-p}`S`uJlFFhDMkrrtg9;LB->!s_4y{xZCr`-6~Mak2Qls!8y#?l@8Z&2 zAqQ=+!LO^8weDNLu0>{DhhO|UX@hQly|h7?o%;{kpqmdt)+OI!vG0C$^MeR`hC?oW z+7YuprwaP+*LxKIb=9XAiH@U~lLtleD*4^7K0X37XNNEt@ZGPA6+%*A9E?%Gq`=D< z&jFJHft2Gqixi|l^YeJn0+1|+AV808{pTMNRR1E!0goQ9E z5JGUvQz5dYK=%t%t}`$xa2LjHN=OR)h4DSGFeRnH!lWtcb}=c?XgwrJfyXaGbODnB zRWGJ4z@)%Xj8lL~fe?UcdIBU;U^UxUil9sJ>X+aQObQIcI2D)_Se|4btZXT8-8wju z0x#nKyaMQlQ@+Lc2E_T#Kgro2SF_4LNP)_i^6d#=`S--=u7sq>WQ_5^q(ESEnJspZ z0`Ic@Eh8|VR=A9Z*nkbLJute0;#EH1|62;IPU^N*O}jbu3>UsPm*c|E8g=?Qs7l$XQV)IKn^B5A$HzH3jCF9UP6N)1^)Vs zDo6?(Oz}GalL8lDj8Q^T;3&led12}stKRA}%UGyINeF{lZ z;0OrafJuQV7!#F{6nGwEB`_%vqLnKh?cU;Lp{U>D{tbx7r@$%L4JVcYRVUCf02`nB zU>pT(d{tx&v63^eM(C#*!2`&S%Apl~_oD6U0(aQ)VKi1=iEF#ApHH z!91`s{J3~=gsF<4!|GTFeT*b25Jfe^DG&mbNdja^ zfr{d*vajO)viRzMw_$7nu@o5WD;v3BCHpqJFRnQWjSHyk2x_sue_XkYl~fifaFOzr z6gUV*N3rZSK_KNM^lT~cv`A{>p%8`uYvb!Mu2BS&0-q+?2CI(GH<1EMaDG^9b>$yo zye~FV;2ht@Rr)+51wQlZ8f2~e$**gdS=ZtB;D;1A#;=zY7~vc5KS+V2eTdRhAct%G zF+LE(6o*`TXqydNsluLY$(@WkR(-0I7*FC-P$aK5%~_M&6`15BIM=%ygTXE7O3y2V zq`-0*PXLnwe`0I}CIteC8)6~_DKO@0?)L$c0^ebL4onKH#jT_iIC=`3fJuRylY=Q( z;jrKme(@KaJIND6!GU?Zq~x*lE>hsBWOLoaC}YI~$8RQ1^2E?m@}3V&3beTf4FgOH z1R~7_QCHMAeF-ypV(42OzX2u%DqhPa7ceOhm}wWRP>mo34#TxIu&%brb>s@Hs|}3Y zQrr?!;B@>?1N@m32sAFKlKRG(s^G6taO*v;g)&7nyDRW~;u@@MDe$jIk^+xFSR|53 zfe$^2FR80pf)w}>hi`yMfg`4JpBR`FxEn(sPcCxZy%Y!`2+68M3Y>C14?zNx0?RNS zQ9^eGs@#A9EQCpc5F#fIku3$*;P{dV`aW0VX(?A1m=qX~F$!3il2YL7q$zq+B%T;* z^$jFRfsWJp1yf*BU=GGiC3IKdTa3?uNr4c6>r>S!JTcU31`k>TyA;0_W0n%S6#pLM z3t&><*ChL3WlMpZzlI|z&|@YORluac2#j+;oPW5CWQJ4u2Ptq94l~4%6nP6{jS|Yg z{EaD>3rq?GHvNa#K?ICLSCEUJA7Kh31E2RiyQ0eph%69tYvn5!m1{6k~{3CIteCGuP9zq(BEc6_Wzj z;D5COX#98tW3d8sXzVGH0;l4;TPbj)clzH_pk`9dV8xAXQs6yeXQV)IpqZ1M5IgT8 z1==N>H_;$Sfigc)1xbN*6yLs|sV9c2-ApqAlLE(M^adscLICb|C86S+kODovg(4|% z7w&h0SPC4LcxJ4?>2sMlk^-yoe^DHh0>P69H;%|ifvs%+0hknMHJi&SU{WCX=eS^n z#*!5HT$xA;oP+;x5KDpJn!8(4&F>&d3e18qQzVlDft2UQQ!+_`&T`Z>+cP*Vx9GYn z5FE=UR#uUE9Q-{bNrA5*d^q(F;XxCjR(1^Qw11^k&5ILhyHen-e#$>kza zV6-YPDR2pti$pUiaB6a(WURQ&Bn4K9Bq?wYgu8(CRz67sOH^xH4Iy-HT(3A8=HcbO=rd0_&14#kkm5k^(b)R(ugcETq5? zvDDMtgVSANX~udD#;ao4U>;Z*eq6jb!fZv*VYL;)Pezgy2&CM^%9aAh{0K>XZmrvR zZU)4CF2+H?q(BHzCJB%w1!{}0$_~SQsQBuCZ@`!eVkvODuWaOkmF(N>zIZ8wM@5RI zz#qQTi(Igh$|40ODql%~k70Z$mPvu&%6IoXJzEN_7D;U!&E*q%kgScHl4uNKDX=ri zHds}BzKIk#3g=$Hy7I9Yqs6B4(g(UKCU+HrRp|L#Vs_mMOCRXE6~-KpcRW=Hj;X{7 z6n&uUku7kB;`TiLD?tT)A3Ja|876J|eVMn_N$Yg^4)@K#KGansxw(?D;-ZD$S1i1p zZXB@R=5lpaln{ayUTR4Fkd7Y1qa)7kK{TJlAdFK%x!aKXfswHi?zZ~TeRxd4b+Wk6 zkywK9uo91wScmZusQm$#Ss@TtXTCTgP~Q7|FzHKN{>68v$bXP%atBlRAo`I+ABhEuJ0YxOUNNbuZFRJJ&FFhv^wGt!pxo!YDHFU>i4}Wu`rTSF(k=Hp zaGfjnc}#T&_Y5((Gss2{;}P7ozQz6iQtt$g5v~I4akFB7=A50iu9;tVMP^-xwtk&% zDb4olr6a>k?tjpc6MXiibYum+E!D@L70fYsD;#d=)0^zOjpjZWuhmrjMPP4C{fO}` z=yH;0gveZL9YNn4R&&VpK$;FKe}TgEXG@bi*@d9^63^V9YQc_L^Aud!oC#NkcC0Y0 zW{#WYom^st%P)RK=g(h=c>!L>5${-FZy*H!7L7$(@ypR{(nTuaU3Sr75VM0-p z7m+H=i{%(7TiE@ZTR~Ud$UzY9Oe+Y8qECNj-xVc6aW;sA{wZ zm&y1}RD`ccL?5wOLh4~f7&0;wA!CIIcG`}@<$ZkLRfI_-qKjDkOKK-5p6m11>8f+# zGHdSIdSx(diV`av4SHoTnuklnySOV3?5)n=&xx5-%I`e?R_D>U9w~Rd)ft>J8HMmV zX`jYJpQIcI;y*hC!{eONZc$U;QDt1OHz^dKR&ZpChC)lHFb~hMba*Gmc{$~ ze0#<{rv?+}nhaS^OGjhV_2>P@B>pe8oV)m0=U(Cf{Q%^=#v(WWewmNFf&T;1vL~Dy z`iOJCYF&9qx%pc#&8Pe#W*(cduGYj42s<|2eAZi#4!-Ul=Z4+FCz-Z5SC7p(tym<^ z`QO5K4@R30=85`0`GS!@mTIej{6%v+D@=4;Z53%&IMNa;T)V0kC#zU4+R5@%c3FS$ zOmvT@Y_+bVU({Yvn^f+rnqQc1e%9~4=58BrN>Q@4D&mVQa`V+Ham^1_#dqM5pU=Fu zR#%6YoBso*xmkVMvaZ(PN)+M3beCiBtkq&aOB~Z(&3EvCb9lh9IuFbo&6@-G=5%C{ zF!R4Am^)q#(fI>sIc;T*MSpqmKQ)HJM#ptlz^I>d-$ZduE=sqs{nzOV=Oz*LW`)0& zMQ*-om{p6qgW_5&)9Ur+@hD4kv$Fh2Dz3#v=|eNMxG3E_Q;XH7#I@L_Anp%e3o-K- zvWROjn4hFmdpTwb^!I<)!Zmw~CYd?Vxh}*y3Zy?_#pdX1mafpXirf%Z=1gW$rJv(- z|8cX8ZA%-!$*!sXX!?+swsF^QMN4j^Kb9`@+3)xiHivkdhCMLZ=>wKbV(-Hc5zPMMak*O!bpM7au723LoKebPV82nW7{BHN2aa{+ zPOB{26`nlJxyu#!rVw~(j}UlCde6^ExWcdK8SY_wYI$(lN)5>Ud{;J#_Nvhfr)CQ9 z@rNi){(k_{Vx$+caD`k#@dxC z0nVdb*&nLUpnTXvUD=;1zClRWOfk!M;VvBiAI;{b6)rtei%VEgkhvr>2I!ML0aCfaOsWC?MX7#mPNv&tmnGA zcQk3D*v;g=aXj-r{O=9YBBYzJaD}H%W;zfnbsh`jPi2>8F9g&z*5 zzhHZGLADbOHNw^IT2q0BckDZWDd8T~>X7I-YG3T01wRfY2UjtFf0{lwzXi+u%LtI0 ze*??vigi0k$#8{-vUvek&MFpGxvMZ*58sCy2+G-5vk_ij&dr}V3#{GwMQ>8nkm4HmAt~q|2IHd^z;lZ?qI>ed!BRav2wm*VP&3b zS&d`;FIvB|&AcOW?|DCBCzkd*M=sayM-eHShugdOzYSd8iKISETr-v@aqT`= zq_MdD31J6_#*=7!FSFkuno43U#%SPbt5hKfb7ekol{lKSYxp&|%*FXuP_8Dg4 z&ozAXBrY%F`#gx=B=Hl*cS@`x(dj^dlvXSrc zCo=2Ge|-VkKGWk0-|gz$-6V4svM^p#WmDLwJKew5ONZKeooBA_urAK&<3p*hve#$j zBmX_m<^2x+{|(aec7>NPbEMC6rS@Q9@y;w+KKYJ4&AFCr*LP{`V_vSX4m^E;CUs;s z&oz_FqZOSS!2i>LeRAIwt}5f)c_dRev9N5W*74nKs!FQ9x?AfWNbLVqH7e9h*7bv(aBlfsT2wtGKb+%A@TnV+eLb*{<+B`f5$_rE-`S zwCtCXNmlD9&gLpS=EwFr!0xuW!piXUa6)RoY@TZ-ms`1_@5=ukz@A`mg&oQ{r<)8p zgIQQMv#F&kEX;FmB&$-B!+O=eRxve5wMaEsDyL@R{Qb9cv-z*N=(HH=3t70r`v{0UM1x12 z#mbZ9QT_o&cyL3sZaM*0)Ohe`_TP*LuQ=k8(fAVE%oy-ES|7w{R^S04fv~9s?pNhg&h` zfC^7$5p$Ceq3~t5)U2LQkTrO@#z)*3B_4UM@_o*g-BO$$%i8En7S9uJr7{>u;v0;w zl))oer2K@QWH7tYIA!o6Uas+uDVYq)xz5U9cB6EQYsg?P7L^td4@5Ocbiil_^0G51 zUn+w=U(Gq>qa=sCt=DF9sIXh-3T0{Goa=bZkKhA=t8^~eg~qMqMo}z9O)pZp=CZgB zuWLYbBZulE9VbWCgD7)+#U#=_eI+F^k-VT;_su*{xWkbks?W;*xSxP)+#YVR1b& zR#v@^q1@Jj`w+M};NzP#OyKaPFVD{PBV0@;;3KA8T@TLN2teK9$PP;*{ z^Jk_b&2${a=3XGu+{XnNV?bUl&avR1n^>VC%ci1sWeX^KW89YE|2T+`wm{0;9jRw0 zk#zok3gHtY{X#1RQrRpk&WoISju$C{l=lc@AZS{S#J(8&09Q#}%`?7fBR8G8i501@ z>4!sKV13S|7#Ay{KIa~cJAs{gfz2FhvWtBFvm5;!%KQcHpMkjV37%=UQaR3U6shki z|0oR(tPg5~(OL=hK?5;P26j#ca&D;%)msd$;9KcV{BH-&`l3L}+b!u9k<=Hx4B;h_ ztS<_rvTiHIRP;!F(KZ~m0PB-#KgP3A!1|=aFuH&e4O6akmg^dqujq?83HPgjv%V?h zSm5eDV13hv@>Acm2%iPO`lt^v-UTJvqeAI?=z_+EjA|}rf&)0~t3pPVTyJHBW2C<7 z5F8E$)@PlCaXPR*D-bG|&V`;qhvh@KJ^=W$zU#&0unJauSXjNv>+DsFzYXIpv8?Y3 zp1h>w*=bJ-eb-<3{sFA-YVF56o#54aMbQd>4ZB$t0XB?&EWrcplH}>(X($zc}GW zjIOrusi0ZVw~|H+R(OJz>x|A-D2=8@HxPLp7_Ax7T^*Zh#x(r|vk@S#6?-OlWPC$; zH3xgMdh1TOwZOj_hj%6-VzgIq* zPu+&s7SOoekvWMQ?_S_m_r9{xVXQ4$#!dx|&*JJbL`u8qav8TrbSZ0lu=#KhUx5an zGKqJn;$G!)>U7!e3u8Mua|P;m@BIB^XQuDHm9=~K(j~ff?*(LbKCs=ptE27SJK1_W zu-zLxc<$T>4JFdk-FO`dSZE)e6xmmmROE!NF;$0hj$|T;QdbVrV`ht|&wLFB=+VA%D>_fIk z2fG#>{3*XBUZq>9Jx=!P>|X0fK3?{{*4+Fj(4x8ddw#6t{G+%twg>X_;z`UUq#w-h zfSu|pNvm5L4KQ4@rY!ek%DEr^`-8NcT;UKHXJX}Cz{1?`WlhTN%@uaToTQD@vNyWI zdaS=)>mSQr-&(gP(@R>%+#GyX$!ATr4|k0y?8jRFMTzx4L@U^!d$T*Vu}bx<(lj@b z{!qhx+m3-cmvyefO6E8I?oGqiqQ_IgkHkc|&(S3TaNlC*qu3W$*S*J% zcYcX|iP(<4-s*-MZjvRUZSeKqy zpu3$N&%B8Blk3jjjvf7jeRYLjC@@Vw8(Lu{N-&SOsVB0_6c>JMk@l2pHj_;i-*fI| z{=Wm#BBUQ;;R;_N$R@0uK|DQWb=t|AxK77q)#-$+I-Qu*>7=+GR;T`1bsCUWr-4~@ z8eCD?#C19?t4>2hovtDCi%(~fZkDg-xl-4LVohXa)?Abx3A>;>s)PS`fwa6`;X`;W z#!9`!BB@T?9DRIq$bEY^=RV;7XCN&?`bie<$Ef&ynz(zJlNNIkaS+YS-&wjO;4hl1 z2Q#Kex*47B-cWlpI&OMoJFa_c?X0NZPtnmXeR%!8x{u#ydUW}jL_55>?*BxCYn(Cy z;t}PQD(cP6xiL(6^{ArLw8m51X97hlS>18rX|Q@$=~Rm!Mc=^)dY>-{VydUM6h-Mh zbKEfe2Z5SLkq9n4XpBqM2hz>!QP${eHob_?N)Qbqky_4|jY$;At^I|pX*!9;09GFh zy3Zg{w--}BGck6sW-DlVCyB97^N2iXx{yT6XPD&yO`js+R`BzXB>1cJ3hD2Bano)T zkyMuSdR7-H)&6U&rrTJY32O*wx|75*j7O9xw-;ZSdzM(BX-yJqFjj!}4M~jNm&KlZ z4oH7=a`xA>6)T57$1Nh@|9*^WGPR%XQ0A|E4)2cf+FO(TxHpq#em{1or!z6xj~!a+ z0QLK^W4*8+?EpWpj`m#Gj|av3aV1y0+K-)VKi)(Ut_MYfa-BQd_TwQ{%GBXm3ReCC z8ds#Yotb`(|F+_m?ou^s#M=HVx!eWmViJ`Op;Ez*Z*`TSAx}&#ib7@l7G`NteH#dX3+f#61H|c#&@jQ4BAg6(R4P8O3x#MK&AUg^!DW| z<5nu)+^A8siq!|Ry&Z_&ATbT&8YPyI_yl7;D3X8Ek6BaY1@tRu{}qX*+gRLAYBp%P zlf=&$KPXYQZdRf?iEb~ZTqn@99*LVVrYo@ziQh1`Dbaz%i7#;-3z{BA;wg;Bl;}gE z&T6J0K+^#vuEH3p#91W9yv&nmpy~M}e!}=#i7QFm@Cx641rJ5nld7BN+}}kM`$iIb zzRLGcf$KhxRBtvly$hp1tNMYai%2|xagThGzrrr`L+v=qQfIKRen|aS^O2+J*qb*! zq6J;79&kxFKUhP>@Q&T4l4xN)A|1Al&j+|^a3TLSJ)BmkbZ0G&WbtiuBFpwCv2gB* z)cNmos5h(-y-T{aO_?|BtSR%>hc4HK<27C0v0?@6MteQL0}CTZjUCNAX#SeZp|{Q7 z>vnFDT(a1?oIeO)w@6%%%##|jU15(YT!_*YrK+&7`shIv*G!aSzUKh|RyPG{Il02V zlbkyME7h5Wx$C(GE`CUUdMpoY^Zx{pmXo{qsfmndmsLEIMCuY2#?X@qSslLIXFndf zsr;7=Nz2<6&Nzbc4=YD=#KwD%HE}c4ZmeeDo(^j?ADmhjd|s80HBXH;Y953XoBBS) zgg*aw0GmKgYf9M_u3?-}kDH^D*YY?PzAIeFb4NN^Q?0}LBUs;Xn5%k1y7M@?b=ORc zA!qX|()?FHZ6|kHr@LDaeY2CZ>y0RSqBA{^dd`LyD!t$9-MHm5ZA#>-z8Q6BK_-1i zpFYM_eJiRg&%Wo6c2z%!YW3ij&G4?RX5jW|RIF6jo?Poy-Q*JwKS=b?ed6ivB)*7h zo~!#jWc}q}|;s-JZF zn@z2)?i<^w>Lab{pVks(c*lKz!)a>`g&R2x?#@c8K;NO5>8QpfK9?3lsocOgm!~;D zmCL|J+8XCFuyZDtLG3fS46dr?i8C2eymT#g-_3+ND_(zBBQhTz+uz@YeYWj`j!RG| zRnLk2Z&ZhQZvCgloiz>ahhJ6Jz5w(G2A{ixU;`55c3iT!O?PPOXADBUO==*>Z-If&_IIscUgf%WT-5jlu6>&5QHZGE!l=&8^>WyqUTtE>fvQoc2Qd5sY9p~e7mzIl}f^(ymb%`I9|A#>xpN~QG4}PgN z{K5-~;{Kj+`4T7f96z5NwdV3q;fDIH%-k#7$f%};>Il0%uux}W3&kSS;ySk~TF(Qtr$o{16s>cIc5<$*R82 z^b6=a&J~;%RaTc%vsy2XB_E6T8W_Owu31E^s6LzS#(f@Wd`gxdKaL{5qZ>uTS^EZ? zUjhV+y5zXql3*N(u3$)1cn-nR&Da#}!J_3G><>`Xo6W`Zy!WyFGzl@d{I9L^eT6#n zzjim`uSfz0%X_F2)uX<+98bKyKy9>)Hab2kw~9>Xb&Eay&lu50`!xz}bT(}?8_G0r zf7JGS4x{6v2I@)wWX%rHVvVPlfuvo0sY;mpOV0Kx&0L2M{6hjnP#& zy6+nMUQ5ocZ>fjpMtinyOLQQcI$p+Oss)?M$&OF4@==vP4q`w%tId9{ph8qpXW#s2 z)x)?|jB34QsdbEm1(iZ-`T|mw<5XWel?r3p@UHjKofjP7H=XVZ+IjO_SI|B-yZ*?$ zPC=*mEbrRuBoc@EQ{Fw3>h=QSpYba|5(DL^!p#5)+P!LQLAQg6d-e+NeU`C2T~ z;zcdq(PEPpKe6~V`itd7a{gQE%2Jy4)wI}4ixwrj))IgL3=ZpkKUWS{{MxoFosF!H+^AJi4_-)idVDgB&Hdf-O}}mEQ)oT#e5p^V`{zkKiVLb?3@&->2o5$BDiW zpZTENR1(20Vq+Pn z$MryC6)14ZxIyB4+1d)zgUyEnD@-#Vmh0Wbii=ZlT;;S1;|t-+sVPoMN)fE2?Ax1K zlUzdNivY#eVK^lrOji*$My+)8>43Wjr#nGfhhldfbn^HQ`?1;%sutq%jHlv$zh@od zT8+Y%+{7m-;t36`c{h>kM&%qgZ}Dtj#{DRRdcqluMXtvkey#O|+oR7kn%}DPXJ`vL zcMVUg>*YC^ zT90VRL8}+Bs_FIt=wb>yOQGF9o0Q)UG|R z6$np9Q6JY~l=nZ2EWZr?nJj1d&7;}efNZTq<&p~$u1epg~oT$KHQls zn6|I3lh@Lat3mzp4S3d}U`Fvd`}iyOC!xLta#pa&&A)?X>J=7k&5TEx4Wxet=9Bsw zt785SEMu2=lyL+s1I#BLWvY_gGpve7nLYF~xdk(7{-}o(SETfARTp{iK9%AffNjIg zK^U#?e}*Gsh7C8!vw|7>Yjk=xrD_@9on7;Jd6ZM3yT8XnaNuk^JLc8uKa{Is($|8H z+mL7l_n2JmSfdU0y;XE|*Kyyv*bl^ZxhJq6H2dtgp2VJS{)O-JW)nEO{LQReVC!a( zz8cJlwod2y_%@t3jPMWJWpm{ZcxM|Fji14+AH`l$?HTsV6jn_DH$+yeM@*FF%vgFw zwdPdy5FTic|0>UPAb z2;)1&QJ9!koH=zKZAqL~Y(z^8-j#=DEp)r*37uZbmS_Qs!Gt>* zG}bE_!Iv*r;9KRq3ehL5UBc%1pweS=dBDbp;{kUjs;Zorsv5n<>YZ%=14OGx9Q-j? z9H3IO>$$0unM2Z zZ(2brb7YSD5ua~G+KJ!Z>*CJYUPE5o)3CF=6&N<&bx+2`7YTp z!D6c0{u zb`a)QU}r(`Nbje!poF;k{R)3pT$x0gZjqWd(0Qq|qV5LXlm>QI^u{<66i@d4|NE@y zu+$fQ6sr7CBF4q1X?1?P;v0PwZ7#WqXAX2_S2HPAu&mWs6fCJXPes%P)f7rJSw+<4 z$0CdcfX`rYLhX?RSgU$HS|@7G+E3X2z6g3%tNSPPr=aHK+xSG9_ecKU#5C-E={@_> z=|vB+>2iE71<@-c?!mZAiN{H-!FUx^+G}biLSzx_S-rzpy^ZZ#L>Ne-#zrnTKy);T zP8bIP=9EVeOt1XtFJr}NO0yuHUZK)lirY{aLqP3~*JV68>5}rtEPJSDJ1*DYdo@V^ zK!Ss6r+2(%mOa#CnwF<|mR=zq&yiF6Z#vN3qwSVe(xuiDkRJv`YuThx;Hj!wz2~RA zNeha4-yDwuPuJ2YFp5=Y0zV2oQ(L1@G72>95|@sS5>~;p)lX|rwKNKR0_S}YMuErT zQQ){uJg^9g=ytCVIx`9kA>1jzI@I9Hbql_sLw$tJ z3xJIR!G!~4PSz;!58M9))|qztoUhsg8wF-1`FDnpi~`Hp{ut;n{7`3qB}ReEl07mC z5~IMxcG-pA2JuH=g$};#WtGT}0?ohRE5*P@f#AZ0tuJ&7;@K#08a{)875sXPt3^_W ze!DL?sxU5i>mLt*q*34}e7+M&qd>DSDG0DpV4g2HH$J@8Dz08;6c~c{SOC!m(_lKoGvj6O!b?b3syCsSJKLh2)zLGxYhe+jRH^l`Z*V@>``EZ z8c3tSJJ8oE#0u{pj{=YTROwNmv(Jhi=N9d|#&0T=&Vt_vvmL~vz=_`P{~iT~Cy_c^ zq~?3;ywq7y?<;FPD?7GLaxlo$l!4?I*; z7hWbQ)OZzY#hwq)FIhK!9c8#$g;F>ED#lB|y75Y$!#M~?-FR)0)Qv}9^CG55>c$(B zXb7wuKMJE4ux>m=;LIdK(v6Q{`*|X$8=r$=pQBVa{v3%Fz<1+olTrtZE%v8we4Fd2!Gp4(Od9|t>NM+XJH6F_4p!VNc zlngmg?1$dV9m51HzW2yqF9C+Y>BLI>^+WG^E$}sbUjY^LGHvkBO)Os`$B(dY>`0OH zhV2&Ie+1=jx}AGFzU*ZZD^U1MZgdYG)i(3FdO)*PyE8+`PY6kwCRB~;U|yoT`#D3VvD|Ht090LE3-dEc8mcbc}erA?nu zpdcc&^f75#+8T;!JB6fe5}Onvx?CrdNjor^2{V(VMJZAg#Mf6;d|RLEiWYn;g6m^f z{ZQ8xU#z;ix{9t(bkX&JeD3%A{m(h~F*8XzY0&*Xi%xSN=bU>U|MP$U|L0u*NK-K; z72T`sbL|h}@nb0TE7tlHZm#<&k`v=M@3eeS!IcU#?zdOiXW;Sc@%|3{p1Bq`m{t6H z+Rref_$}Y8NBcOm4cOHVj$@K3UHD&s?WBGQ1si+b)pX=i^!x&Jb`$D0<7Y3!&26~2 znK!55=4-fkkT?I022TGu@(AO1ChN}|M1M__NFIPk?pI(=c{LvIMxmYf*)PJ)>v3@> zZ*IiRk8$xZZ-#Mm{x2G=75L42|4`5r{218OXY4cVKjHDS@&0Q3?El2gdvI|VZyv_Y zR#}X@AgEOq!baX=l0peLODXeI7si>$v$xT>L$69>C4%zr?`s zv+u{vZd_b}pN#Ut|G<-5ad!m2<$c=}fHp#7;?pC=|MSHKj9PxWKg&ls%g>rGfy$pp z^-tp`fyzsM)c`{=ZWySX!o}a%wge{YWZtmQ{5zZMmkV@;x!KaPu! z@rKdLKf}dO@RMj|Qy(rVrsujPKU(?p|3v&9eiE&`0T=6e!)WDxTs#}U<#N14w{L5h@!h8Z#2Af!8(Mk+eiDstN(Qw}lZ;0HSME?oqaR24 zcj2d^(VwrIhk?+)Qq^2Uqp!RSl^BiwWmNbQem0}ge~XLX@P^Uo%O8ce41Ova-86y> zV!F8nrr+;um|-)zx($Ud!*2%H)s#XwuP#5!lt(oBZjO@?>4PXgfS)Y%i*a!~Z@AF+ z;^M>j$r|7855Zg+-oszx@1X3zvN9vm8-9&`<0osp78k4WlQlN=AyVizO-ue7_oD1x zR%S%{AzX~{hHLx|T)Z8>_Lu3pIv{=7Dv$&73_;!ctuX;gma|qA`2y;G4!>E9GB>3l zWmJ|&mNSN({|ZGPVexNcK1`vYqG_I4&VGs#i++QBf}i}?Uf8BCsJmSLD*c79a`N0` z?}1E{Y}0;1l!oSe`-UxEAS(Nyb^4p^Ir`=hZx-`r#jJh^mv6D#cKmO&|DX0EnD16> zQlI*x`UAb@x1f<1mH2h%Q*vrWuQuVg<>4zpRE9+7r}W!DMZuZQy635yud$zp?mvPq zQNR5h(Qn^?dS(1(F&oFH^_SoSq_6ZRtpH-hAO)w5y05()QUVib*i5C7K*_cE*-WnSEL>cT-;x=itES>Scp3uP78@GQ-w6P2 z!K+(Q=J~Aj6x`g6i+A(pY}|Ye7hl2eOxC$@C!YKXcaP(@goM$wxx{*qZLW&PH*80n zq>ShP7GfKILK!=8u^YcLSxhLSskv0uG?cN7LPgdhWjuz9H{d6f(NtI_6@1G0X}tdj z{6duR5#0V9zvZ{;snV3Op>c&w^o&ouH0b&1{us<6a1@v9z$hLsf9CsFegi*>FZm&-VJ(UcpnN}mu?uiB>t`Oo=>sVGS=PN0H^0Ee&v?^; zn}vVC`rvoLlQTi(n5vvP(|#tNJ{7Mw<7ao`=GnNohBs}vxeXUDz;9kdI;iYQ<+c}&@tb!~DySV-wePjhu^++Xb034qhM)cKxY>z| z9lZG(ZiaAi9e#QS{)k8Ryk+>;%q0T1F#~g7X`gNH#N$_@?8{ksGj2YLi;wW;V%&TW z7ypLencXs%J$Pi0s({FhUhG-}7BT)!((| z$}jku8EsdfO%h!}1*QNb9bG9XYEZ!(vUA_?+cZ~(oj^Zd3gRZ*+-eML6@J3anSw!O z(>A%eyU-8e=B`5deVh=q6Z<_zAaWNgwW~AXaBGR$_HN&uSz$_B(t3 zWwzz~&fdpA=Wgb|^qEWCe>nu=-Y3!cY%TchR@jk{XBl+t0Dd<)`I!afqnze9I9cZ)hC0@f`?68PHSutGs9U6E<+)zU!8?e z$W|0O;4F|r=RQc5m=UXzRA|#(XMF1hi`V_urn@fYwYA{mhb)WHs}2*cNIUP_%qe<4 zo-^BuL$4zLdBO8C$f+`$th06QFWLNy@m}%QX~=7=LD!f3zB}z8YKl z7#n9|aOqso_*Y}S?SA85jb+)mYT|3LCP}#hC98HCVu&2r1tz=tcNje zR{1as{$uQ{K^A;bq#&ar@apO-QE>Z;6}b6yZ0S}?|K&Jq-4&bj2G;swb*;N%ovfus zd{->J_P4mX_7iC0uGpwVMwY*SF`9T=Y~FX-1W(Zl1}PtYTWlYjP~&=gY?i9^I@Wr1 zY#z_kayD1jdUfn?f?BUJwMJR%j@XlUAg@!1)RIJ}cf_8_X;a^NdF*VNngxe0v#c9Y zxZyTaLsEFHWCNf1Agm+!dknuA3n9w!nU-}NR}CM=KQb6*5VKaYNY^pM|KTrHn=ux$ z7TlSEe*>->eu#ghIDNmZl`L}Aqmblw<0@&^K8B+hdmXY5e^>eleD|&b8qw;>>^_ zuEgXlI_*M+N+zWF(>Ag7Yquok16to@ROB$efAqHe>*_{Z&xxI-qAuC^TkeIw0{&isUyL$dznSU}#NfZhQ{@sn zf|;@R&0?mlvMvS-a}{*N2u8zeqrpdC{3! zUOyYdu`gOD!&%8M-qwKxsQCL^{9>#e<9RL6Tj#FkduIHv$g=j?P?ojNsw`{2xBu>K*p6A7D9&NLcjCK;p?l7*cqZ`f%{Bs5 zUh-rdq1ouD5==R@boiYXpIrQ9e(l}12Q+xjPa9ML=9*Fke%i273Vg!;&U4Vh@74Ry z#z_L9^>^x!IWv*ww&0m9Am%~RihHfwz9 z$*8e#uOQ-*wV>NRhlQ+j*YNpfJYRZM6IxnjFY97U&r&z*_OZ|>8s71J+dAX!b8mk! zJeSY`Cxr?PY#5U`j5lq33LmXUF)@vNx!{ku;-N_m&&NNqjQeq6t<2%yivi8sO|VoB_r6ahtT$-gn&ocmD0Q{$SIs_O)%WNp4Xk zTm1_3%d;7F@>{^?qxjhm;buKJ;kEc}Vw}k)MrkZz6Ccynkun^5bws7iJZ7p7qjU~G zseU&u?ut~mmVaBj;9BkQ%yIaq1)q!Y9Uy6*HG4K+#9wp-y3M6` zY+DMk;4C}QwAa3On>*iLFr#nwGu*3Y+bGj`fwVDmjeXfNXYQ8SPjxrX&d+UcYM$M? zY@mrHW>waleHC0%XS;}rQ=dJp(L%l1n`bXY={Yq@vz580>rTz{3+!8>x&Z~@T4@X# zMO-wp>kHP{mp2VGt&l!8&B1Hcwf3e>EXR^`zpwzVW$S`U0F z2Vfe7yyF)N<_CZfot9uy&YW%U!wM|F{fLdkC!F=ObSLGbn=nIXN9(pXwJu{X&$$ra zU5@uFbT2XC?B%%}u^R7rO|~<4_G~q&tn;ME1fcHu=zL?)`6v6I!q@Y2&B|Tik4dIl z*6k^N+xRZqzOW?#sEhn|1PV+$E7xFz`MC-lR_Rac$-P+309#t^*Op!cleH%LA`Dz> zD_^NeTIWC3;Apy%(5t4u-t3o?PjZe`FW1Yc0QW81G~8_n14T3DaASD>`QQ@2Fb0(weRZjN#~syzuBr9iDjNnL;fA?vISLY9p2lt5 zfG)SGZy|MR7^E&+)`~5@U4L472=DJjHanDTLWlTz=WMJL0rT>8c+%cPIJ@FDz^F5q z<=WA*rNFggjioodKXaC~tDyq$cKbtdI>E{1wtd>tS8u~C zT;(?{V{2XZVtcmgW1pIAuJ$upEcWQ?@LH-lekRc<+kckdj*OlFZN~pM3S*s(uU&&x z^83sdme0=NvFBITo#6PM?LYKe^!t`tgUJu3vp-riG>9~pu60>{uHY}Uj@f_#1O`F`~8JSzu3_OF?43@Y|@bc4P|YOV6UAG#^$>4%;|=6LDwv6C}vl- zkUtkNz7`!W=!Hh_iTsA8|C~jcEQ2eruiOBf*dk$^orecrxE0-nGV!QFjb^`%gqyLf z(V*i5Q?=-0l{3iI=1d&)Ur5*Z_(iq``9qvGRqO`+Srjs#eU7S`*NbXd&)q^oi5cd; zzp--20lqvO{iMQ|{zq?$PAm8{JF-}M1#h;2N~|NchEAF2O|uF4x7fi1aHbW!KHrXh zl@KJbX<09*tT2n~a4SCRudn{-Z555EFfLQR(SB6gG1IMmG-n`da7 z7?NFoNze|Ss+oVOy+Kgz%ObrbBK==*2`4%0gUyu9L#S_FTy zzQPtVJ_js)#SB~4ok3M}k|3sr`zmyhXmB|=m{;2`1Xfl9hA?lFoglQm*4_+|QqOt+ zI@MMPtExf2-uCgg{|)&4o4&}D2`-9rsoscK86gL8%Q{wF#$}l|nKJ0zn}ZR^v~U>> zK6wjNu!c9u3srfmStwa1%X(YTH(5>+rnjR-#Y6abn#22BSrMXdy?ftbmPkgRAO4;0 z4tJ8-T<;7$^P6N;pQ3wmDKJg3)(tPk0j2snS( z%&PR&vOW@-RYPClU1^6In-|*n+D8qbVY%-!*8tV~Z6y#`I7CVD7l12Z}3Ih3_kbNESlM%v|LsZZh2w{!9cj0JwEzJ z#)*g6-A{)m7(!9SY#H{^vOcS(B(gc6>I%x^6K0|e_&!kamUHiS`g0X8D=_cpt6qod zej#}2?-?}guj@ZmyeC%hKm0{P3ppLE*guiK1j*Ayx&r zTGm(9C-wXoqWZM}kOZ^{tzVBI1#+VgMOp?}@!`LOx?>3EH@Kb}cDR_|48|$buW;YD z%o5`nOZ}_)pg@sjeOs^!K4bO}3w;LRE9aNZmXveb_;hz40 z=fwMwPCSNzKY4Bw#DFvB?pre7J{o+Fg=2%}g>XkmS}v^3kp|#G zUup=m)6D|#w+0pPDO1764!1SQ4OBLPk-}-Z28AoG;N=Y$U~9ah zVG;g)WuzT{G44c+ANIG4J+C|Qsz^anQaz=wMmrP~UV|zIA;>&(QD55-7Uh90z&ABK zRZ!sT)h7h4u;?3j-)MTuCvP;BF*D$f8qP(3-_$@o6!@S9%{}8&{Q6rex<`odm+-Bj zPi>xkmT02h7F0mh2d)T9ak;1h-hpWi7!h{&?;6PL4=wXK zZ?wzj@2vU~_YB|MB|s7e3BCb)#KHMI_hrjuk6{YlUDXmx9~bZ-(q}KhEa}lI+^m=H zo}j8M4(Er27aSC-3-60f{9crzq#MBr@593|gZQd#()su6vWn06K!eY=fQ!Pb03qx0 zAHu^xuh9F0DPdX`{EXWpyr_A310~6Ky11V)g#~NR7`|6`8o71DXDj{kmJ({<^+1SwtQ$9iPfw^K7X({k}qe6GG6-NK@WDkH3u-KR)4CLO?#zsF`M6yCzxZI zn~XP@N@GUUqx49rTp02S!(J}iUrZGbtx2a@xm3=1qnUKMP*ml3pDyIl+DN*r9;I?< z&np*GSS9r&pBeMyX-d_Dug6F}d;LhpL()D~WT-Hj302nx(Kn>4!E^kV&1cKmR4#i% zrieb3%77Q?O)9s)fFbGH><`-Z%MPeU(?hB90oEv>r{AN$kdiGV;Df@U^+8g&KB~jC#CX0w$QAa(i5o* zVwTL9vgJ%Mm;%*mwxDMdEqJMZeN!G8&Sh9_fB8Uh^+0AYHIm~xfP-eap^Vq_1ROmf@@Ca4tn6TxKqAfMyx~+0In|#7_!V;5^da>$&7HyzVCJ*xONUD3%#fEU77NldnM{_zREC;E*Ewvy z&{ModuPDl)pL~&4z2+tR65bpm$&SPI@;W;snzD9Yh>1zDA4Nwx$nn?|;R_n7m)C499n8XKM6dhJ&BV`D^ zs|Rz16x&A`)t}YxVzv_ZqY80Vsl!sIUYD1`EoL_YXR564F z9xD{NDX>>@Lgz4GG&K1IRkC!bM0h6t!~->PS-!(1p67KjVoOy)!S-s?I}}q-baRG? zVB$3POl(|pK%q~0&_7u5K;d&>*HlS^E$Z@nAkR4sMzr(JY%pFV&MKI--F5MfSc5ym z`PzKk#cX${b3AXm?Hvv9`LJV-I|DV0S>L(8kyU2Kceyj-^W8;jrI5QEMUL*|`hnSa zxY6#2#oT$$hYzysxzgzJxbyiLc0(hYbXTMFn@#B-p}`LL#Nzn{c>Z4V{Kt)UEarUh zU`Nc3JO6~LaTYljMLubY{DehZ*ZB|&;XBS3OtGh+*n_6n&!nXftCrXaY3V!^`JO5A zb7?7w9VB$^Vx&8>o&P_A?)W-^ZW-g&u7K4u6@$Mf11%3Z$-~% zIv-FyKMN4!#GQ}cg!j%r&#>K#<8~`X@)^r+fGdZBfcI9l_gUTEeSUkNiL}?j=H6Z1 z9KeU*{h(>^tu8<|?mY6`4)kN5^Ge5##a%bt+YeIH9X?~LI%RCHIj|EpT~En0ZC>e?*BH&X45TaeU^Y zxO1A_7n|!YagH`fKb>FQcwcL59--?K0GNL`40P`d+bH5Xk3dJ2v7H`vFVCKw=FWGX zkAAZLZ4HBEHKbSJiC)Un*v@&jyTo?q12SE!A=VIo$X&v}9dbizazMa#W}iDJezgGK z9WzwxXFIhjkguJfPh9 z<&Avp+|dBc^mTWQ^Ya^5@cuOo0MS{_Bbec{YJHCwn+MOe%`Qc@}(zT=yxVP81_GlkE)2lk;E{Qv@NAG5o!X3oW+2!SryY}tz z+w`0)w`J~PP9pC7)H>oWcd!95Ilaf4)PD<|CH5i$Enjtj zG>j3}cQ0PFFy0q;TI_`+m0LLPF`$9_4g)v4fXy>HFm>0U-P7XEL)P)v#E+KUbAYbg zjlCda=k(zwbIjei(>mql~)kGStt@q(} zG2js7@M`y*rvulZC+9S~OLpSLx`pnA9D4jRAhU(;Ij!!(jNAMscVWBxl*`-|oABmo z?o(Jo&G5ma=C}i>S)OrEe=abmGyfQ#+FV1d5VxXp7uUOK zvAa|TbSXfy$vtbeyK1X@?o-{fp2|UDM`01%_@!8{JL6agJX#!oIof-ZJHN?Ygqri$ zxo5Pyi&kSU_G3<(+&L^__jRl}j*3fsVEfk_IWMmB&ISb&Kx;Rn181`dg2n(qBYxDi z7o!xm(r@geM6j)`tq-&JcL9#)zZ^J~bH2-64ML5tk4W`J@k>=RXk#_r6XBfEI*4j* zar-zMyB{AqFK!jW|zcmC}#5Orf2U;VW+PY49gHMgCQqL@!vNrDaGC5D-6yKCc3 zje`nD`~#@SGUqMWA!O)Y4s2XaH~V7Cur2<^2Dv=Xc@T0G=)CjQLw&7^B7GZQEc2!7 zJnq=eHSFRdcgCgQ$I#>Fpeyl3N72Nk&MoLythgIrUgmt!T7zdX=lwTzJcIaZSsaf@ zM6Nv!G&aL|05_}Ri;wpKVLk*LyvljkjSmYI`m#)X6%u44Yn*?3F7YSUr1w$u=V#z- zm!q+t3RVT@`f4x+%iXi_pp|Ro{NlMsP;&_t8-3y=;JS#%tI z!!a|0Qa|}zk~pCAGaKK61$m=8*SUqXS@7&k=TGP~S<{*E{W71AfERCwFN~o-pSa;D zmf=0Z$(}*x|7=tv(T+QRm|-79^H|@PfDM=rT)imnTr=p-Y;I(FX? zPSh;t#TdxETin@B`3UZ21G&Vr?k*s%rYqeu)?l5y4tJOHB#uAk{AxzI@d&D6d;DTd zmha?OA56qFuRfON3t0U#TCpZ?##kWXKf~R1Ga38e9|ma`5SZiKP7+O?=_n+X)p6%n zShy>Q;|8G-kUwMyW5*PKqTqxNoF5QYcy*%ig6VoSrg;&`RL^unOFi=kXgNL+v^+cx zur!hl68s-`?!-tm`{2&yJndI>#TgysU7vbM0xfSZRrgO-ce~VGyKZd1da5|ELCuyq zqqZxQNC;OPjH~U;u^&3tx(1l5$@z}kcU*PzzUL4JR&duxAO%0k`FaC(>~bKa?>twS zGuwH&;FAT;BZvD4_&~{b+V{nQ@+tkekfw2QPt(HKS+3*!3E0nVLa7Vm4m{lOwPxqD zZW-^+aqc}-miq_d523&r&V4tu%JV-R?jUmuB!L+}lpB z@hJb^$At)S=n%6TR)GLFezPy$cpz>AWa7UewK*MweT@kNq4aAr8!So+W)e8%(ma{lhrK z3+LMqik*37H8a1)fb1jgd0@ovZp5O-+-u?xejCYYKlx+sHCOh5DEyin2mapSJ{f-# z>)a>bi@UADojQZ4>NMGnM^=!0Tz@=vPMjJTv~(WS8D*l+?;S=fO|caiGyoVVa0eus zg|Tx`@m3cNFT}-5fW(}I3xS!Sm~rFJ06z)U#tI_)$8NBpHGq}`S~PhW7zLFgX_vm7+eoJMCjd|CeE8t2k3DozpuyCLd{s z)ELHQk!}(|E7WIw?DB-_v-81EswJns&4n7uov}#3AK zs=!Wf*ZQ_cph8^)Rth?tNAdmhoY&p7=6Jku4O(b)euQOp7vlX==kZ2#qe(IF^FW#7 z0MJIFT?dF^j`InWbil0NAER32kD@UV6;uPTIn;(g+KZ4G_ZL`RK6wQuRozo#he^C8 z*6O^EAi@4#BZ?{~=d;rdKm(ryhq-`7|Nih%w_zz*Mf*r>F}BXlfW!shbN|}{pNx`m z=R-FEmD+eY!?{WtoXxMI>PPHjIFA;eSm-W!lWMI4A_^7UKt8|~QtBR5f3{$}K2?UC z#mpvj7^v|F5K0yR552LWwS!}g6Ey+gMbWs3n}>>1F52Q5ItOGfop;y| zJ&P@2wxPbj=i<)!W_FH-XNOa$YW@&2v>5a7d8`i??pY7Z?fH78p9!FeJ1gXi$2fl< zg;uZ=h--nf0ApLYkU03byY1L9^aJ?u`xx;owheFxaR%E;aKyoy0*;gc6f6$nDguju z=?7+V%I_VeG;>EKWD@>94%kuf*b06+3ZWhPz8(k#gZTR!!@##ZzVu-=O^*YJrV(sG zoi*5&{|dqOf$0R>BUotH0oWc_`%kut0>(HvXTtJN1aD0U%MVNh%Mard0W1S1ujzxd zcHR-Hh|v5k0@d>!nCu|Ql?ccSh0S=)O_eA%?)=?M+xg~+fiy=JM!E*1xiPWx{&Gln zBIrF5Mk^S=|D#Cv$oNRN2J|Wz43O@v<00Mq!m~r9>qGCc6NX;B{9#Ny5%fNA6ss`H z`4kcCRqm`iG%EgE4YWTz1+?*|Q$Pz&VG2O2K{0}I7YMlhBfxE*#*>2@Prj2}CYS{g z&OZt^X%hV_xFf-@sIdrKCn1$QESLttt{<(!uC6;Nn}C*xQrU6xvHxRGYQV=fIgoLL7`RS*4GMdJP^LZL>s;*-xgM&krl7wK=DpFo{7 zp2vx*_tH5~`l9q_%aEs>Z-Oz009@_V!XmfAUIxY50^?fs10?lnbbL3JM+f0&(%?`L zGb3KaE}RqJ1&fr((|1BQ?F;qTsB~bXM);p$s%nO)j4kNAs)2U7X6L6j9)Zpm=4EJF zpvr<8#1NRbp2VIsf~g@ek1AX=C7yi^^f@&&BhVQGzcgH}TAoNba%vyOo*1TGD;Gli z56KJsfDTi;f^G5&xGO>-;%+FoG^OZ*1c>yfmvz=dyl1(&=jtZ@D5;l?BAO{J3S>n!NJ`@}Q?_+~AIz+VA!x%4lthMf|Tr!W0Xs$r)fT+z_w zdel~(P@wGye*pqE%lY>i#!zvOpz$X;_f&{!HFORzRNN{!{IOFw+^5$y zO=*9vIQ*6f4u4QUYo7Cg0JIby|3J040oZ!U%p>3rej~6|FD^GWvHzd>gJ&o+t%0pu z{$gg@zeZdxb||<;`~|T#c%;w5BQW5Rz99Q+4g(J<%!4(js}5+*8}MSO3m-=F7K>>nZ?dY!~_-=Kqi48Z6S2fy=M+eue zS)mBumkuI^E6z|U81lvRe~w(btM3yhmHi_2)O_7BSpiB(ABV(M5sO7xq`|>w>WIXW z$g-y^Rmzkn1xnHyvvOpx<48wexo>cg@en_xr^Gn(aTMw^ z_dVl%|7c%%O~;YRnvgDz+s348dQ?b$r0rvg(89d{n~9>U;&$MY z^vP$Ph)dG#fMM;D^i56w7*lY{CFzt)5-{Ii!X-)B!IwJU2-vYxE=h_i{WW>g)zA+s zf4IP@%2@^Em`3FGtDVLTFT zluOc7wdS==<1|&*a!L9paY=fYj)pkplQfQR{VAU$`1(Hrud%3a{S=Hkr+kwB!hs^E zd?B77Ux?a>_mof4DW9a8MCH^ze1i7jDW4=n4F(>Ir+ku}kBVHi!1*>+T&H}Jrp@u# z*c|_w{70&NlHMZFHCwq#obpLJNoV%IMwmL~lf7IPWdwd*@3d%qczJdEYGY zu0p0ch6=(Z?0?6PAq6%PUW00uTanXpMjKP8Bbn6fWA3_L2uehn&sUU?rA;!xKhO{f zQ-lxU3`TLd&9cjVNNEdyP$d70W5%DiLC<(XoCc9z%(zlU)9H8cBlI|% zB%OXu*FhwpWBxtI`K5IP8M786Z$5@DNxwe)=S=z~$607ldl2Vr@xL&rH8LWw64u1o zGXiS{(jLZq)CM#IXAfZU{BYR5M&^!P?tBT0CIGjhDv#>qiQJs)m?Z7}h|SxA@N2;S zk8I@P!@4|?iQSmd?WuOFPNY>kd7S{q^N~Yr9@h6in2^uuJd*ev# z#`^~kO+2HjPKVknKM~|yV`kM{y#W)4ly66LBIkD~oG0gDy!CJ&(IzJJYtlw=&Va(p zziqI0UA5bF-_)@}*M3jKkxH!EaN&aEjcb5GUoaaOl=u?~0D(TgXVB+YY{`6oF7o94 z?r?`fm9KLeE|@fh3L07$ceWF|u64FHb~HZ9C(btEAO8zym4NT$c{*6^rjfggND00l zV_hC6hl#-Y)$!fx;)3`+>f+q^-Rj~g=E-^H$$7z(_o*_g;_p=#7wa;|@pX9Czvm#c z=Q$3JuaOUz^mp;a|NcttEaqC9Sql=>q)ielF97B?TOUIle}%87Ns;d{&EA!A=AWE;*50`u4Q#kFl^e-OjRa~W*5fqA;PkcRD66tuBC#hm+>0Z^ z+Vg`2wA-EF=R?n|JkDDi&3Fe=`2ifs#$&!vx2Mbo+jkGGOLlbEH;ClkPE5jjIT_Ex zz=ksUa(iE|m+UETkcaIZJBEALq0`A0el5AT=MvA_kuC8^xb94`CsQinxV~OFp{&Gl zpt+6y`Gxu%wRX%^I+@(0pR<*9o-7keCqicnqEt(|FnlQ4o$lV1HVe7gKh&>1U0or0 zNpV2V6w~v#vwKsbWgwF;6o*o|W;ypPIcRyDR@FZPF@xyUU^2O}*q_;-%}Z^)49x}z zw&4)I_DxS8N#SV78#0@BWs3Pst}CD6qK_s!_vN~GB@!VBNhZ7_Dd2CC4S|nKvVwt}l-A5Vmpwv)A6`^^XjCnfw69wo8C@@>yzbz8r9w zA08=h$Jv4@9LZP2VQSel_MtSKi%yhY5X|imcGNoAQJXyv$B6P!&cr32rxw*eB(c3a zvb}s<(9`U3cQzAjhuV`l3*G3&8Jjrka(~-!*1+$c%usi#j85fo2weD-zg8S_w`GeD z#jS+UtrxG5KQjKrdh}(K=U;l6mZA0O(TyJVWNxr>=`~W?fD@k{wTn?HCM>* zUxCLLuSl#j|81RQ9%NbqgjrV?R$u~8uD${mKmmavk!V&}0f2;6t%VpA(z&FV8P4Ib z*5nX@s>K`9cxPJoN~$N>)q~BsquXom-05v=@9l|#wg!Yv-X0t&cp|W%KtVOKK~C9h z-TnTn_}7rgLy|c$lZ0yP4uvnZ-Y3PVhs4i>%G{0wA$<%a&16E<+^H=BpEhP^T#VT%Q)l&-V9u8}ejNxMGn6_8rU4R|=dGg~^K zrm2GRP8Sl?#2x+0c%{@(MzFyOImdV9N`(~69rh0i;Ix8PqB%1p0F0SSU)SCtJV$bL z$8cA#AV>@|%5i8=3!Y|CUr{9wNB!bNU~HUBs}M%raoz94224558kQqqS*txQ@U?Lu zQ%+@biS=zftr(|w4(u#uGT;bGW2s@#^7Wufot=GN?=!lSURQT-dsk=MZY)r3_-HL1 zC=|;?JzH@;W+^><$m-hL>+Rjw+1tJ+=_UKNC#4A=7ZvgzPdu(f$_9aLUBT|lG=#?2 z-knSKdcda|HtHN2pmmQopgBEF&nqA}k(_993~U2>u=5mO6kVrjfzRkFV=r|9#dVQ_ z;FFpt;pyI8=)6!Uub3Lc%oY!ntex3hu4gz^EM+~G}5st9b>f2f@Csiy3hfSgZ&4Rgs& zo;Q%)j|0Aw*4~l4X4pXBakIOy|H@3Uzfi)V#=A44nH-K6FI+b=yj=|ytgNsCm*;S5 zcdi}y75rZ3$dF;T!PWr%Cwb=bc5(;a)ioK|#JU}z2@xK)cdVd!*j-gj>`-#Ommclj z*}Q?fTERwAi3U4TcFVQ|Yn8ZHYoXN`}+}XkQzEZKLOn@UyX3Jw)oD;5Vutsd%+e_ppz{WhK$5|f|C$gE-|`WK7dcK9Hupjy9|vb2Mh0k(L~YbkvINec7sRh6m5^dWiGadsbndE> z{!(T;1N^6`hS|kUR*xQL7Zou+Y+*zpHPK2l-bBJ^G9dOQ)`RGTfd>e_Sgs9d-Mb8A zO%+0n{w83DwRpdIc%*bdnAv0(Gz(Zza26+{BGJ$RE>u9;Y-0VnmI2T#tPfY70&68q z3OWfoS&QXKP;46nw}c&)EoZ?mfFGeW2^J>c7=)p|ctwb>zF=#xK>dY6&gwaoPah~2 z@~FS9Fp?h-)kw)o3T}r+2j74!Tg{l2in5?!2QJw;yeZkTXFWKVY<`DUatZW>WV4!@ zl9QU^V17tMuwE@xtqcz*vzABIPOqB!dSPXFH}Ns+2CP(KeLgd0h)SUd4W00>nq9;w zqLk?5hJ}$(g^LN(I$NS7j}U{;kR2R8BzMFG|c^?-Gb3u1aw4#@!1_m%~B}}Wr0Ocwk(lC%()K*oWsRh(ID&CUm(Oeat1rsg{A2M?cCI^$v zmv}wiaIr9)+7D%khh-2%qQP*K%2*H2N+^>_X9hCV-q#e|>aaXeXbVga^| z=TV09h6~bvOOODzMNuPyiB*-;y;?0)437hfwa}$vA)28kGz>(&=)zFcA@XiDF;{q= zwb?XKIYHwz3NA$Jq)89!J);P2iwfk-G^ZKg=Y zLyy?p$b%@oj1ffIN>^u!1?wOTqwAZwQ3iwyN#$D0#gPnbCE!EKiqjJ30>gWulUueZ zBg2+05sbd|f=DfLZ~07VBz*uEUO!CAnG&QdH6@kQJj8(lz^7n39-!9QyE;Ec%@z<( zegq<*-t*FRH6Y=dt0p?pvK`Vxh_eKuR|g7HSCRxTB_hG?uwUmfUwGlK;nOobvNjx=M*=FIhw zuf~k|i6fXggQ&2Nc5))DkEfv|O;w0x)S~%fPaDj7Jw0u>=-H(@Y7{iJXcIV(N?8+} z$3*;wk=!A%X`^P9NG7|2W#|FbA_EYihFYu_-m3fjPcf-Sv}r1nUH<1wNo~D=^P!$%#;}%YkVA)g%NXQKHchHU8-R`%KA=f z$ybwA>7+M-Av_&6ghvct9`Le~!-Ngh{i#?_C^b(6!j(uf5rsm>yC-6ELEO4vv!E^;UXKTzd_WE=zKrD3mq znhk=2|8qL3F}#s{4N$KT%c|H|m8cQotmvvG;s>5VyA!?*m~>@=#dxF6XB)hpZ2mg% ze-tmFtf2NXuv|}h|G>+@Xc&}kftFL%nt`QSqz3LXr3~CYCglaFrl)y>`g*LybqTVr z;(kccO$mWo{!?ED>_D(k?h5Qjl~A*JFZfO{T3y4*ZW<4}hNIm*DFQ&d61zJNv6%q1 zWGg)mfsVal3}zT!J6>rRVj{+$Efvz6H+zF)rF1G^X?NJT4!)I8^J=%1?sBnOJ3hWc z!^Hx5A#&p3ed}^hO=0NX;;-Lp#*LQgK&-1ZkgI;oX za!<0e*W1z7+oqgHckb?L>rHH&c*yArP7!e;xoo+d%OoKsrNq~x2C2|agpnN>o;+e7 z`ZofAdgyo@D8F?SL~g)mnNG}sI?D5bh{0|G6LGnCaIiQuXfx<}9+K;HIyckSE&^(R>O%@@Q8%rH3d30UWrZ?`9K;v3Q zL`S(qJ#cs(p%SJERF+#a@}fZYK|g|g{HJdrPyz9GQ~nM zTdDkO2NHl*m<)~YHpdT440I~0Ynxlj=)k?$de5Fs-YTQHA>fEd>f6hvsy_{#&t!7 z!D@+0IVuJm%1MpGU*zo{tQ8HA~2w8P)JGZo)plvw?k>Bc9Oe$M(7&+8}Teavc zRWB=75zA0x>f!sL0ap4V|+0?Ab#P2E1J@> zz}b})TBC=kfbU0V1-@cH22zy7UkM#roB`qsWMPI5Rd{hjcd&;oZtBD|9ZHn1rL zK_RzQEaZ;7Hs-Z~9ZhW?6rDEQ zya|ge;a)B6Sbo`|WcV?k%3fwHWs_SzP7Z?vA#ox1QEgrgQRejshe#egX!*eo+HQ>g z)-<49pajwoX|F{#1_L~(c?<2iDtuwJnsFkFJ8LABmE%e{sZoE^$Ibpa0B5#vhxAYzAi#E6ekDZ;|DfinkFC#=}>&H z9{>%L$9{yqGXMaRKSn?lbh2)}Af!~cbpxnLOfgC2!iXp}bl5VGqCi4$t9ADEu9`tN zLI&uUJygK%d6 zhT+qyPDn_hSajbOtes4%n%I!mF|8^RD3q53sshPl6B1$fjFidGQxB(?BI5W1sTx`c zFH;xVu8M^5RHi0z_2_r#DO z1(J}ViNT(hZCx=h8ggeDet9_u4G6jECCZPG5R$9X+dlnJXj~5kBB5l zkpL*62MXAzHXAAv`YCG8E)+IUQM-tNjQWMRaRv5?)E6n`-2aNc!K#2>*L%am5YP>UqzSVZLgNW9R00io; zpc8|=Yrq3z11;NrV3$gTcEA>Bo4Z!w8n=NeKUVVsuB=AETQ^LesP9OcHVrX!_aGKV zdr|{Rp_J@pTAgqji2wTUKZf=~k>(?r*47mY{FTa6YOQ!TFagz!$#+X~EAAJOCR=&fP|VJLm!?{6hG3}e#-UV~+8s2`jyJTrv4 z)Vp3_g3-%r)eTZDROD)^c31Je%;EZyFK=w&#`QX{^g6qG+Ir2@DOK@KFvV@+(+=v9 z%MN9Q^VLdQM&E^Pu{HN?zzo-nq^3`rj-*z;m14$)p>}H10y-H=*3%Bo9*S4MCDP@{ zG#g%RT2{D*Pdcz;P1QXVHSi&lK8SCh%Jb4GY_K*!`N=yVylWM>(QU#l4QBPQNu7dL zm1e#MoL1ykQvR#ZPtvL_vhE3IYz&gXY=^!hlCn`|)@RnmETUYd#GWZ{Bb-EyeomlT z1*C)ef)OgfSt>0_SiJ&ZM$)hb)-%rS6ligxS%S)PNKb;8GG-mnR2h(@CAG0)YPh|7 z5Lv^73m_vfIW^nZqBoL8G?Cy1N{eMc6PQ8+65+E}^_Gp8cV(~RCW z4Py~>fFWt1WC|lC45y{Bs@!=m@7}k)Fa(E%OpkaANG>6@H*52_AlYGaf3>`Y007k?unL$l>_BW617UUcG4zA7Y$Rf^D(BEdL4tllqA3tMtgLSZucmmSTpQJp zra7{9lhqG)MY zDG-4+&G?w~Rg?h{Hi#T=u=QKlBh#L6(7{GE8ih0(_l5RK9si{M?+StPW6)WF@hl92 z_be1sCe@RHF8#IehMSO>WlIXZU=UzzbqY-5NiAqk(qL9R!7PgLAA7#1+(P?LJB>m0 zf(0xSq=D5YBv=fT1*xlw=o_iII+SdJv?W5et2p$K&foWomF%>;<5OW?{)I1W}Gt%x1pPj}!sdCnjZuwarH~p9kiJ zYIb1aFkLkpaN=lu)nL7lpcPvXrwWIZHlrq2WkMW7^HOVXZh%&|eCP?*SW(Yjzj3je zy40DH0?F-?kUfieri}FS50xh2W|SIqv@|x1e9GlPqiFMGPHcaKKu5zfEWl@tWDkkS zGlK(ulocYRm#AL1O6SM0|_v9neaZ+_i@I!z_tEKAkgFuv8W;lzSK9ZC( zfH6()pAa9zNYo^=v(RlFmpb56N3Pd1mNo}#J3epB%U;m8gK z;9}*YTUtfqMxO}Q7)jMlVHT2-o5Remn)bwVdsMsjnxRra>Jf;-ymvi$dDrs%(SXIBClN!*@z1mK0ws zkzhevYk8?KPnJ4KKszsj>lMrC0c2{=7_YUcy4PSb|0y*VhK8>XshHPyraBeL2tQRk zFXh3u;{K9l@}Ch4l^FFUVE~p%5x}-Z2^IJQhm5H;+=4^Md~;Q{>=ICyN)Z)kiT5In z=Q^)>9pGBcpi)J4Lz6Ou(zzipt3wvdGjIdex^(s#s2&GNNES%oGGZS;1-qy^Wur_K zq8>u#725*qtx4^~=s-~EdKLOydwD~;EuZDAg;>xMS;HWVI>@n&poK|sG&CwiGJh_2 z2+<^lwUbQqNG)k{!yZ~HLiwhQxj<#zLJ%QS*s6#YqJ>C=tnWaJ+3+uK0y0*oFc9LrD3XV;wCPktL(sg=*9?XBQ+q{n z82EZ1VkppV>b#S{7&wAN@JcY6W>};vP7e?XpC^4}l8izjP-PAdBO4HGIqQ-*7-c`5 zXDd*>R(WubAqO|S?_qOG7Y|7yGU&Pc;fq{85Y5Won<*mC8XcpRTUy(;-AE$X*3-V- zQba6`h*BN05!L``OeU*C8=|NwYiRCsArBj!7e0a0Ed=Bzgdx>vFh@1DUQ;ugDj-!K zAkM1_0-_`Y8%9F4RZ`$<2g zR#e}H=)Z)j%Vg#Nz9x|b!Y_%` za&U~6k5L{}j5x_2W(~tsjw8Mbo%|dSnSWm+Oh}Xx0gq}RmP%Zmdi{iwUZ^l;Frb(i zgx$%^2CZ`UO_?h)7XJj2zxuv0u#wA=NHo*eh#ym)`)W;m(`j(vn$9^97*Nf+O-n~X z3wSCSu)DIWOaZ`#nBkqg3E-lxW7kety`UUTFy3mD51kw$vTQBjthFt4Qm7EWtqGOy zXj>Uk2jNd2Gq`0-)!y?p1{JZEMmIXKqYhkkf&?>Qlmnv`blH;C%@4WefiKDyrOy#C zc&KJbfZB74YlD+cB-Bg2qhO49hBVFwN{_-5B@53zi9{X6Ry>e`k)4N$mSA;C>oe4R z7!8^C%|SlFHqx^x@P+SdpUjg}omocdLv)P5MxJM+6@`WiQP^b@Hn6YO9>Cy+4?%%D zNXz{?4?ZA>=uk#5eefUCG!05ZSjA*Vak}APDo?VBQXovHIG=|gN+YOEMvp?$1cwEL zVw#XXrIc(hDVw?4MwM1QdY4X2;3Mh!)jmcq>fMfsje(tp?&0u(;PI+Z*^Zer^dkH= zm2;uQF(C<1sop2j(<$FHbaIk&TLY%sNAbp5p(iW4RE8w@PeoT$rQOiQHiWFHHI~iK z5g|Mm(NCo|lZhox3Jv3=1)XJ5d5VpScD7e!!(cDz?)v8z2Rf%s26PnHW5U;hLr2N@ ziCRkVNEpoO1rLL*skmVTjfMq0f$Izpz1AvP8(1-7uC$8){7rfvd@wRHUE4EjDut%o$ob!E)4~v4P$Zx&*CyoL2DymR5{Z z4r?JQnm9?_1TIdRO9I9uzq`BZa)i8H?(I%qncVH|L=K$Y7B%M3Zpd6k#KJ^L13>^d zCk2~^muX3>u+QLNx_wCN0XH9`1Oc=$OM)!?6wnUQDr710NIHj0pkI)alMQaCXzq(Y z{tll$r2?3?>VRv0QRfs9ZF1H8p*cl3S$DHPLPuIl#mA z#ItAsPcV5u?6)Og^h$@gaI@S1^iBrYVIx)Zo_wj*A`_=DG)!R4EQbC|(8MZ~R83Ta z5YUJ+`9#t!Vi1!jqgPDsK&F3$A&WI#6MW=8`C;cn$<6+eBcWYg8R4Q>QDSiKgs8wI z5iGShs5-TPVSAacm5JgUO_hbiB^S;7j~IX)6)F<9IT^~*9udZz%&iO;GHfC^K7zYf zRCzm3_(PPeaG(aSObY@>4;9h2yg{6hgcL12s9k-iZZ1HdP*Zb5@NhrzXLv$#KFUOE zQ5B!BLFEHUdIDJURWl664Gdqk^*HiPicdVs9rBTKBIM}_AbJ+Gf;cdpX|6^o;KOwf z#|ai-NXUy5;ncj4Yvt|-zN(dADQs=JcmOH6^l z3BMB-3myc+wfK#VY))1f_smXX5wCZ%C8)U#a=!>G(!Dp?+xraW zk=V{dv=Lk%T`VoE8rR;5ZK>P3xE7GbkyOCDom;LK@mf&V>_=z|KOaahn!x%9yF68s zUf9bzGCl*T)F4Ms0Z^wjYPEY+mqa}-DrZd&MVVa1z3D8u)8!4USU;W1Lv08oML@p? zA}M{vG0}HL#u1_!=yfs!>L>%1odi(PqXe$N$A>E)4rxd z4#A7s8D)CF8Ac#P+?e!O`+^)FmCW3vPVO~WH=TC_ha=M4RXDHEq4rnOY$w?uBLOP_ z%^qYw8!2@w3`1;K+ikPb#ix4|UK$hoZX#@!?oKB0EdU=>){iq1nU!8gm8waZySaYI$!kUaS_s_MHere)r9*&)JgZF4pU++ECbt9Wm4#zRR#j^m0Dc?8eR`tIan`kSK0wGYJTSW zaadY}9i&J5Mf1^9&LNpC7&?j&TAYAuLak(~X+%H3up5jPX0tLG|C-J%Yp&O)zoAD zX*k)<}E1sxElbtn_&ME#Wirgxxi1x{?iq)-FddX=djE!Xz(=8;nm<;R{zI&EL zIn_ooOvVP!EakLY3$q);#bhGFF69?c6Bct~X0CDSsBvaquk7>iVusypP^UHvuuFLV z`slW9WT9Ow2(greB0^7vPcN2I;q=(DkQIVTdQ=+|f*}}QnFbi2 zqt`+MtkaO0z|0_j3lt%U^Drh)HcrBsXzt~SC|!9;UIbZAv#}D{q1dd70Reqg`L|3> zm8^q!Az1_zZW^ryRc2u`VnY#Wt!}Sx-y&Gp&lM@_Q$JeuwIdp}kqM}mB$%x;B&WDe zwt=R+I+}&5Se=@CrrJ9V$g@ju_f!T5un!+-qk;yp&Bf5&KB?Uudh6iZhSRClZH0P#^#3hmlo*sM&Qs_a>~FF=!Vy4CY2k2ZT2yc#kXPn~f=9qd?shuXOfG z!Qv+}_2XE_v^p`$g;BojPYv`dVN3B|l4_r-NzHo&{PaZO??(C$>31qn9mYL%hbXwG(b8?pq&T+CdrNRD9z@LR4NpPaN07CNx*5^h!j;v zS3@}mEdV^^C3zg@O%o22Fc&*Qn4;>(gIs#%Zxp2)fi-Z^L8J>j1>qy8<_#)YI@imp zm|b;Jt87f!XdX)za(6qaR%Q1q0V#mx+UX2^co^UG&%97I*hbDm;7*aCKZUQ9F&R;# zd#`Pr1J1Ya+tJ3>LOZF3Yb>KWk%p{?F3-cM{q#|o>HJ6p?4IlmvOLXXzXiPMSP&Y9%XhjNT7ofl;YXj4ifaDkWc&BwoqBZf{$ATaR^EWft%b zuv1hrBL0dZ#155oJqlV{&uDP;IG`yDp}{)vuTgS2{PXb*r5xhvwq{{4+u@9nS3<$& zc?VLtL1-Jt*x;4BbVAAWX!w;v9-)P*zz!z`wjq}z=Ia&K3CilVW%`nmUp)AHca>lQfI)0qRuK0jw~Q%ZW1SBZGYnu zNRAJE?tFj2q~xf8V`Bz3G?@t4C$BgNaiRm_n=BJ^{wm}t>H$h?Hwsl|AQ8wx}Mv`EaVGUe!`iUB6{!??6hQ-CSeMrls$ zD`i5Fq*@mU49%d64eEsG?h2cL;fK~&YJo5pA-=5!A{`&|CUpm)fT2R8v&-AlwL={_ z6x!E`mS{5nsy$i*wg;1gNt{VkflLL;4bMAu) z-JfTk9I zp%viJ*Ef5e+q^w(oxSZnJ#Apsdwba5QUOeHegKY3%*5N%etBp6&i3tXojX(sunF4> zg}_v(Fhewf(0FjAxw0^RuRx$DwQ`{-cx6B^`LHgBgBZ?LDqD3V?6rnc`Es^YGCBLB z5H|jX4H&Bky4+-Z@k|biz`=`__6=UPTMUE~q!`L}@9N$a zCbP53VPiC8)xjSJ!J`6~YT0*t_de4V#iK>Ul*t{4L&sWqUa2S%Y%{s*5q+PX9FB6l)yAn3~o@# zjWzw4R`55GL1dXGK5t0Ug5Z2vgi8}NLVGgE+&Il{OC};xM~6;BT143X7$)dt-uvm}VGql?QZFbVcQzU`l#D5>C8%Fq)8BM3Xn&5k=_;&5l# z*zni}6|e1DRtIf?I|g{sLcRb>Pi<}vG9mNM1NL?K-tC2 zcj8%aFq0l(NyAVDJ_Z#?L=y!is-m)=kBO=6kOzitsMz1xehIwE!-theYyz;`6D$*)05Fi~y{B9ppK_7lWu_z4tzcFCw4#c?GU!X~DddZEKf zxygly(ExiMaSWO>H-ZKW>CKxxa{x_TEhA_vvAVs8k#Y){(-tVDkd0)$A79t zNy_yG=U}_lt4Z*^3FPR!ntHzav`k0h{yEta6YR5A5fQ-n_=`nfoOb8e7`S3@Oq z9fR5ZNvLiQ`MR zf(h!~C%^#elouQ@n<|!otUyMDl2m##RRj|W)2$ggS(R~RKP~;G+QZl>h#Loep{3U_ zH73t?sYhiJp?0yW+iUCX-RmLUE%Yzdn074L!ju`w?y>G&W0j76;jbE9o1(YkRI8O$ zNhCd_m_cngJ>raQcOuM+4DRCpN41^is>)A{+b8%h!cmn20+a@@G18lDBqv*|N(a4u zeMB?qVYVyJv7ox8N`bF+V1tDHF%R>RaV|IqXX0V~MDB&;!ZHIKd<&89Oxl2y7E4tg@Q zbaGSS6BZyC&I^Ufq@cCI;q}enzyWzkU#K^vPrs+pQ*j*UIbW$>GldZGI(P?+J>ubn zZt@X!D&rGC(ZBqrNN~M){XY3 z7Q;1Osv59dhmfj`lM(C#=qMt-MYQ6E8x`*e$mF$-I)!6~32zs!^di+qnt=flWRh#5 zx%y@A~s7w*$8qrKTa>-EPFgO^dKE!VU#fwT<96mxPMm79kvEcw^COKkEM^x6i!fML3BbtrGze)B`{;5_%R^5 zz%H!9Ruh$Jcb*6xj32gwGYqt&i+@DK$vM<$fjG#`TIazlnnF}8QKA`0Ly;lKn(zm? z)~azl3+qOrIIuq0X4dN}rwBd#tNTBG75GM)*>pz1xV)!<2JXi_(0lh1g z5<}F+OIER+|ChaY0n#kV%ERveS9NvI%+9bZ(t3zpuVz_`$LeoCx4D@vWJzFz9 zT{T_ZwFBDef2-@!T|4!fdi4xqW*6DQmSK?qBg4=Nh4Yu9igtRYZQ zj0)?%A!6w{O?IyKP+2?f`B>2)IC3PNer$bf>y`cO3lL_*x>6KHK3wjw$X1f7Q{y?j zSP9$ac{TG#mL4Eb+KLvxF(BZ8zmm4cGdJO04=b%zrffC0=nn zq%Mn~wjHYxn3IfA5N1g^z?{ihqHZOqGeIdMlZg6ZhUzP|`Zn~5MHbUeAv6N{;DOjM zp?EB5mA)iO%`xmKI5k zYY#Xdd8=r2QPZ}7hp~7#S@*+Ft}oD8s(*Wb>!#TH_ZP?FXvbq{v(6p?tl>7YCKt$> zo+8|E;*Zg_5T?2IP;^roDo-s@p0ZF>2q*$EomSfHSn31|d(NiuacDu+mDZ7_=DjW4 zCU+qFo67fK-Q?n!KtTA^&8@BV_dg;%&;~-tj0>m0AbDx+rLCPii#y9+b~X|hbQXqH zU?~ry;V`}x*}rVVLUU^dd*|jM*j(9MS`lVcuhnsv@xfHfv7y>Aa+*~3i|j_(6BUDq z09LJ~9DS8GoX3@raqpFdm|=SPI<#G@{pDAsH#KX{$JchR@In<)8mdneB2u4sVno<2 zErZG}qN^i$8ACp@zjsHBv_Q(z@nqJ#>!4QnZ;(q%Bi_u;%G{V|uFPErt-F3rlQ{{W zhHHXg$ ztB}+^+=d3;vcCK`h<9ek1kIYZz<2BM~#I;{v?B`IHJpB%;<9R61x*$Z^e2 zgO@<_uH)xnzIm{wA$eVvYz6Y=C=kG*+zus?@Ud=;>cx%V+k2@^tPjeWpftshHKYVG zIhn%+#8&49c5N$_u7YrlV^iQF8@a%?Uq9FDU%&GFmDvwo@oJPuGzRk{v7ld|cgVqN zG$FjkkOkbFW5oqXp%Il9R@1mO$FckV#4#;#0bDVG3?l^O3=35f?|XkO)h7Xxt4we&37vc*0XDQD8r(f=9S{qEmB-@CR2Wu#$UQca}6HUXbVO z=1z?Z>7OyB-lD_BskyMd4++6}kRx+%8+KEGPm#D}In$0nctv<{3fnn!s_I3%v%h!S z8Q}MOdK0Le9S)I|ijnxzYlvt|pDfiN0%K|%Z!pyTfi`%zf z0^$^T$W|3;A>{KJ8d*~+zJcp8aY{k}^v%Wo(y9xaU0;^%T#Vx(!jWfQjsiABlkxrqg4f2{T5xuS9)i0RA@Uc3%KF6|JLCCzxfZJ6B{u=5`#=@~{cj0!afT3r;Vpz7ANs;O@`oJ)V_nihg)1PwsNE>31X9$c}`cWD8I&8yWuy}SBvK{$q3m=F?3cxhFdFpG6oc(VVFf8 zF64O)ky<}LFEbXTBGcw59s5Ix4X>04LhZSOT3nj>$NDXP)Aa_l$Jjun@%oKk3%#q) z%+B|O;VFg@Ig*V7@wuIMjzCjmZ|efxoddI)m{>(n=?R4u&N4CsQB|aJq(sEh2hJFL zzP}IWI0{Vt9Ls!hD&HG&Ys@+#V(bXBu^WC@*vqleiz6 zx(~-sr?SWH-$UF2xq+?Eixz}~4k)68U|iN%IaNo>-082TYgQSRf zHNhrF@8-AHgttd56Jwm6a22rzozJ2Rx&S^$Ff+1AfQT%zNf6tGGk2EYFf2C!P>Vqf zuzkaGd+Ki4Uw2agegfAGK}3jH6zkBVmn=5CXjax++rkHAbm2=cgnongc=O{sV6|rE z))6mX*$^c?J_uKlOc+Ukr#*aO{WTehVI=MvXbyIur;4y7%IBnbf#8&Oa@oZc5YdJh z=!jNL*I-1Bo`5~XdQ6NE{354EjvAdhSz6R6y>2U8SYJ`*2Qm(l^!p4;8YB zBGy?v+yh{#jb=g4FZQ(kD+_(xZ+~g#$_y^J$7R0jdk`~xjt~M zSR3qbbH6WcGG1;G%-eO>SI@hK7(J9pMsr!x&J|2?fOU3!aGh~vpv^|keF2tCS#i6% z7qM%CulMuj>`i_+P?yvU4gxSayHE~H9HNLTQ%0-~pp1}+S9OWO;Gn>bt=o_H&b)9M zlZMkB>>oEwmJ_LEg$Xu3%E>*Re6l9@cp6+*su4W=8x=U^jH9<_-&n(iXWO@5Lv$P5 z#ejpSIl7Oq$DUid2|f9ygL)u;O69>Uxh>F14RFjARhR%YHjgPmbi9QhXj|l>6Ke~PB2NVqt`p5bd~GB(2g zDuE>$IUTpwJaaQb%jSc*voH6guWE+ z1_!|wKju-STZL;X?3z$SJHf$v)2uIL(PJp*_u#sE6V_zscXk$EcRtHHn?g034(-zl zgdlsu&|!YNgc~|zm#xPxI3+kjFXt|sS|>v)|$8=AYNN!HN4&np*POjie6oJJ zx{fe>yE`)o;fA2_u8V|&fjJTts8`yN2)=zlITOhQWAdH}HZYL+3(9YgK-t0ti1*?M zamYo*gkK3Y5@UT9t*spJwv7qvO8MAfN~q3ar}qNLkp+ z(tt8ZCp{~Dqw7P@Qp2B8>h_#+J)B#b=KVCkcN#&^W;Q@mA^cGDA|Unf&4|;k8Tfd= z#ya^vDv*n(t#2aXst%D1`ORdrR^+|Lp~=fE%oZ8EMT0ux0~ zJCW|~Y{GQUVjju-EWVb`Ph>21Bw(f0uz4}7!bQ|zPHZ(AAqHMY{%NAL31J|wZC!NH z2h1Y~8W3s3AGd+l&`qp znk0R2*Z0?RZ^N5iqRFl!oR;`;N4kPd7m77CgYS9pq%U~?* z@Cte`XA=5K;N#UnqPA=a$P2x%_;G8vNwXgXMUBHsu!}aIHO6zjrp}n1Mc_++S?W{r zEIL)Run4Q7+Z!ve)C$(^f)BFRhm@Mjogt92l605G796snbPYb)7Z3z(=NZ^sSeW2l zF-v&75G6atQ;^U+y25y~<&SaTFl6fFL;&N=h3;b+LB9+8Co&D+*yP} zLsB8fi#mC`7y}`|5H3L#^+^9QlcNqIiXCf6;S`*J4|};>S5eUPe*nZn?(+PumIP(SoZu8UVC(pk;qg2A_+h8RUd6t-+bLZ;+0Ol2S< zVTjSCpcPd~XbjPI4nj=^fYV}b(qDx&Z6t6hSm(g*f;Xos{UUiZc7yJvF(EXQ8Yl1% zTVRp2)DSjMipZbJ8UEL3RN@HlDa~RRS9pK1Ss8Dt)9Jd{gDgrd*Ae# zN1piVGhco3kw-W7_BU6)Mhz<0mhifWOH97zwa3prdhX04kKTIZ(b-dvJi0nJa{>P@ z?!Lb1qDt@YKI*JxAJy2$yZ8|jxusVg-QL;SgQwiq&ZA)Qwl?v-{jjvYxORyoI}z#am0}*-ls*-OTWEhK-uTof>EyrSq;muY23oO`YGpy z=hlwWPQ6AgXUd&<79ms*%H1c<4jI48fo~)M&;-*I5`Yrpom+?mU(zsquivrZZ&{ImIE8S)#acibcVV)BmZ?sxW1FX z8l4|pzHE0)ga1LxxZ>mrPj+dXPT7RrIU{nPm_CcI@UHT3A%}tv2-}2Zh{)D=z|SJ4 z`|{dWwIduQ?taNp`swuL*t}rn1R+dh{&O8s!eB!XB85eJ#>^eK z8AKtLAUV5J8HJL5AvVQ2t?AL);Ih~YrXC+n|I0`zXy6b|VYK=gVY$p1xSQ2S(TLE-_OM=?LIi@kni^&-=WZAzJM%iR za>gMxiK-d|5n@5+Wf5R2hFuHONq7L~FQ=C3j?C62P;8jG#QrxdBk&{^5%_J{qKy|; zr;gFKT@i@aLi-=)e&NMs5ppHm<7zMRP+8?oJagVRMMk=a>m3-rqq(LfQ(ovw9&LaL zs|Nd8uoVD1D1@*K!x-u4nhmGN8lw#kLF{j9d}S>b=o2`3n2Yw!a4gE>(T&1CWm0^^ z=<{yOK$ayT?n@-@wXJ-(^SfNk zubuB-6TeVIHsd$>*4O9L5}N6gQk1btnXZ zF?u;BrvZwg?1e^4m)M5hdfV}x&SypC;o>smueOnXHB3#AF7~f$yr-R&oA86?Rfr<9 zI4v$L5$ZSr#_*eucxsykt}=xOUGTVh z8Id2q5oZmVq5~KoKHgtBvmy482sEs|D{vQIF)@rzSbR_mN*CQ{gUL1<4h6tYzn~Pu zYbhNHuw;oqBNpp05^g84b7HtS59A!H^TaD1XWx68rIofMvYvTOnDPrKM?(`O;lNe7B-fuTZ|&po57C7No{+G0-Y&sWmH|ZgXO~2mgH?>Vbw{*Vm>{Ao z*iL7WiE8dAb?_C5O%6+7Nj>hXvk{5bSHet>h1xMAj#Kh;_+Ku?46OL|ZHORO3Zjh$ zqfXh5&Yf|j4;OL?dTH>e{-W-*?ouxBSU@(krKlDcI>jJUI+td08ASLEyl*pak~6;3 z5#zfA8Aln{#{w$pg)R%wP_^^1wGn6r*K_UO&XW)`Jcb2Rmr_~K1C7aoJH)T`s~riW z0FOKxsy>Cib8{bV@2@@iWXAW5b@CNq{N6IW@Ko+QR#jG$-;BqqOybE|OgR;TdQeo4 zt(%e_2dWqR9lU~=a=)Fww-~PCvsZ-2)O(BZYk#}HwAE4LSwLv65gM}po1pWzs(3YT&KH_P`>Dp9XPko%3O<9Bbv zqU04Z23lQ{1Ew&d>ip_Rvzf7q3{~xk>lNAltI~wJC%UxSy2HEzPMh1BYf9RP&6fGA zxp&$LrDjiZYTEB1@z;>)`y-6&@UUk)ZzPr=Qhry1zZ+%#aTx7+#hg>GvAC z9VxEV#U3h2wM~`b19q`Ebj{R2SA!8eiS=Fpz&P3t3wi@}FvJFf#k{q>cuOLQVAZM* z%0Xf9sZ%;2(0}EW`7F6^B1b5MoHBXZN)XvIANl~^V`+Z}JKOsHMsf?Gp|y{+q2P|j z^Vwpby_}}TQno9yIC5ha$4HVk%S4Yv#hnrREQ2N^)W zq`G#Yb*ImY(!)HF_e5<1dWwtYVeyX2g|yYS)WPBjp$fFHk4*T9^+ zZu<+nA(o*7rf!d1ih7PCZ=oB4Z;$l4KG0!BDdV35m9pRPG`^k85E zxv80?l8l%55t>UEc?%I&dt&*gUv_JYwb@FAVG9-xgzpHgWf!ZUuKKx07Uak?Nmg3T zClGOlV0U`#YHMfDb4La7*-Z_J>_Vk$NT^%;hzcv3MY*dKOmyAWHC>Pty-JX^4IC(y zkO$&b(pbwqb%!abMa7FAAmA`IB{?8P;oSnDa!r0gw@8ayZz~G3s}S}IHjLA!Yp*Kc zTV95BuM^ToURtV*c03iL=WZU@yV0=KcrV|76(Mc6ARWUnszY$;1 zm>ABKOUye!eKhL0x>gI%{0dfcO~ihAV+5;0wNlXrV;B!z{bdZGzcW+DXBg37c(AQ!^i;RF( z9JuJ@&8udMqf$WCGYG>`TB4CZz^sNoY9=~rm+Dv&?T9*v1Dti1gpvh8z$G9w&WDD` zP-2Z>M<>0G`cycPq6?!_mL4RfJ)DP9zCm~|Y#P}0F0QyEKbJ#&3Po+_!}P)J1|cCa zZ7uOwq?%gD!RZa-7c5%f(m@<5P=AB9kFK+ku1Oi5-j5*qz^-t|LJ*eWBryiuao-&l z4uw&MSS9u(;GWuq8c~A#5e&`Uou$(!#SZN1?1dK!HekbpCcm^?tXUpIpaGa+_i=M* zS;2mB_WCnIl4RfncMmz zrdBOip|~Aa_WCD0_v`Y)4nyMVmOJsm^i(xoG(-S6QmzkXR2Idczld7}C9t_yys;sp zQ)Cdu$SKlDj^e!Y4wu($+?2?KQgqa%4DFnkrmCJKKSo4%mK1^+mdrjjg!6D$ zd|h>C)sg~Ed;NsBFUO*S$wBZabkQ^$w8?RxJg63kaJHrbPmJ^F2ITDh)lJ8xj$jIP zX358_AD)(ho8t7G^O7=?h{(ma(2zWC`4WQfWIvoVIHs|-UUE!e4Cj?%tD!T97v^0k zrQZ+rK%HNI_fgK=0EFegM$eZ)oL)kd=UiKXe#dl~-L4r&OHJiYR& z3aw29*Du&%ma(*brpn|#uOto;_Jt!4$HkA3OJ8t2$?BtNn`pDVzUD`Z!BjCh~ zEF#?r!Wu8}kf812;SP?e>2v~D9;~Lic$CsXMW<|o8*7$5T@-^~Cv}o|S%QUq_{~+( z9d~kR^42OMJHlPe9lH-SLXi@>A=0@|R&@J5zf|p;%4NC~3zI%Idn*p1~I8)-Wxca#F5f{~S1)G5? z?vtaad>lcqaClaVF6vw=aU~eJGRxpGVS@&Op$uH+=0BX)D02Jfg+(}9TZBw(&ahH- z5i&u6XE!%r6S8{=H#}2n?RqgZb&{)ImKG5i1Gl1tW0Sk$tsKmJ`V^M>dAa3O)?6)+ z`5{rvBYFShA6VQ~$1}HcG})I}ka;y_1$?(8`tuSvE`|yPAy{>H+QCoojt2D%Yld=` z5lw2z!C5sdx)OBGU%xx|v_k-PsWFI#wY#Oc+HY+6MhEZKKbGt-5ct-I^1`t#0k8H@aFhpb_-P zudnJ&e{81XQM5oyt#kE}v(zCj@Zr3pvEmqW^$o$0JWQoP2+$#+q1Oz#Sy!PzUp6ImXreKki`V;=J zRAK~imvS#R~*+~_QlceSouKz++k6o)vIs5_&h^rytZe-=?>7iO-^ zCh}27cp*mfH$AzVe5+>`cUMG#&aL_BDmn{;MGe7+guFBbokjH*H>*t_@g+!dMVT{G zA|j!P340GJ?z1>V*hLO*0-uAGr$0J?2T%xsmrP>;$vDP6K94l3B|;E64aVC0K$-zF zRk{Ha&C2H5J~%YJtROdOcyIf$B{>*FyKmw!9)LN&b*eJnZe*bx1`h%NqK*?Ij*Cw? znb7c3E_dORN|hS85>U3b{T+U#z@Xc>_6I^R(LE-nk52El>jDP|86S540k`3g=LjTK zhqqC2w`zsx*BJU_b&oYUVhI5-IZKHVctG#Lv+~+D{LdIEa2#Gr&x;{!LLk2ahEclc zZaEe9G+Y6xbqT3*$AVL`$CS;HQ#sIB;N~emR=|1|CXhO%hCCm8C5;e3bKpiNpbDnS z1+E}4`UFDB(ZQ6!SVFiDTGY|v*C2|0xw8)D-oZcuuq-kfw15|Kuik?n+>&thkjJRa zE+R-eY)$%0)}z*6x1roS^mV!<=yAbT;@JA8>{G6H-0``Vgp67w0gStB+oryUiOkup z^w^RSGFLT8W(pZItmc-yiPrMg2BI)J-^Hqbpm9?eRC6>GPGcCvgpK1Qx|{))gUw#2 z@eagXDo+V)e=+QaJ91K!rtR~I+I9P44j zA<(cC-AlZnU#Z2a<6RwjG@H>HQk<_vlL7I9pX$=inz}zDkUt#l%>G0vMWo{#)8xhW za_mfrcpUAFZjV8-$isS(Fo90?M6J(@Llf2&upFMx&N;H!n+=OK1D`E3<;vX7jDwB( zKssz6m1BWEOwF4#3NP*YN7(IBQXyt;#$61H(2sA!-c;^FvNPcsF(QM@oGJ+q4{qzM z|AU;P#Y}rwqsK13)~i;xcJ9EUIA?5YqIYi!sK!*`)@YUX9RU`!#nh+u!aZ_p@UeuH z@0o$HUPO}EyrUcYhUYecuEjOABm~+`lxg;FDjCIPZ9==%W#pjGJkUrB)01rd@uF~t z%xvA*Hf;RM@N-l+5#z>RoQO9ptW3l5_o#AsP{EDf>EOz$+@c7C*Kyyq93uC=5$Xcr zEo!%D*(F+_1S=ADVN@d2aATN{n~yP8eJ`MvZDRz|5Z#yn(dyRH6HoLHR`2XCEp94< zpdy#Yc>o-MBOc^Nzu1=GRO&%tI1&0~htXNsWMhQFWcdE5Pe^)71z|#&!d)*HS5_DI z*M(0V9@DZtVWam%j2F43_o!&k2j`C; zkp^1rnRgcvpHA5)lUlED-Gb5zyE~`?-N6}=5Bxb~Ah2IhsEEtJ?6%U8QV2)Y536id zJ`o8H>f2a+Wd-t*z5c=0+H&Pfu{0aIAG+g9M^YBrSv#5~7Qb6G?D`wK`jT2Yb% zDcKd!*KdfOxpHZ?X_lLpeojnej)j^G7yVKh1h@OUb2L_|3(BHl^@V+?#d3AgcH@Yy zUap2KpSN~o@x`vNxKw6y$L=bst^N#T2LDnh31!t%FJYzf64@crA}4BKBkBVOiE~#2 z%?d3kF1qIF$X49p`|qVY%YJXvS>Q1UkQtv3^`R=9J%R}%L^Y{bY{>=MZ9$J+NDMi| z0~Wvm_KlMSrHr_i@c0h*G8~fRxGrb-rn4!60M~0fl9AHi2=r$@k3%&4R5q+EyWx0`-EMMhj|_-*ujOhd8x`19!selxS0F)v4_u+@uCT1raK&Pk4?Yq>P@PDks$Fx8(E|F3RB5dFFy&)IkkFWa zP9&N!jC+rnqQgW9B7#ED*2Q&^Mkt@a^VbpRX(C!i0sb7G_7o{idRwA zbLF&TCIXg8)c%IB1`ySXDS|jHPaCBV)CY7KMm0)TywOI`kGXCTMVElobd%P*G3AK*B5~?Vgp&5 z7+6Im69Y;uH>$w20J0cac%s}{S+-rbzBUknfnoiYt|A6QyCaC+N*`V9RgP9b`K7nh z^vemt*Rc2kBFd z1O`0^9fR5X$*NXsOyLyA@;zZF)8W-_PB3(Kku*+SQo#y}Gzb(2w*bj6Cv8Y+pil6m zpmjdbCzMYT!xC4c$>L|DZU8*naggU7h2-5?%JKL)=P3xGVz%~msQ>ZfOu7zV8GEx z=1aV&7uHXof(Hep5vm;7Q&83P2$#3EaPhs<`7lyF6edb-(cSo9)y}ml_4RH%Sh3V2 zHA*86y|~2JCN6A7KN>#zD(f(}AUk*QtfR}5zr`3QY6uBdT(j|KL90Xsn32iPh1pp| z(xs)3tb4G~;Q&}y3b3w28dVaPIVxbfnj+?x58Ojq7>W2(;*!fHl(8K?c(^F6E@&r5 z!Q-rIF0eL)1%#biZw@p7XAX+IAcc#q&Xvl8V6E=1th`d;6Nx=Ul`^a5nw4!!m6?I* z7rXt{>dwZ>hWfNST3ZiL)DrVAvg&XF<1pu|VjiVuECGCpof6OkGaMC6`bb6qGoM@S zt@gZgT&_}uyWA~*+oDhuA9C>C9F0%O(YO@AtF|DLbv~TMx2zyJ?+q+8LTNSi;k`k+ zeqwbcG2yhFGkj#g-rVX2B7DO!9+%IUJ^#V>zFq~TvQ}`3P7Mm=lMpb1n@;^K9+e0q zu(t@qYrQ{8ei0mv>`dKetd^$kMm&x0p@(^g1H-akqgb1k@M)=6*Jh^zabdoT!namh zkjdY|Wx&eHC;OD267Z3APnB5MWu*qU`f~C^Bi$wL3BiZNiSyv5E>(G z)$%mIZxezwGc^TCO0WA-vw|iUpcW6HFNd9-ehb=`-|>SJn+oWGipw~7NQ{sEF0@BZ zPEWfL;o2fpkItbP6AZd!2Q&D%35bMJdEJQa46%!~0dZI?RjXJLpWrBj%b*}*L98J; zyPLqs6kyu8k;x~}j}!^brp)N;z2R~^=VVr)dMR&WG)7fc3rdAZ{o>H*_7Y#UGG(Q$ z8t^vfu^QEGRdAQil<7|5qBQs7jQ~@a$DVX6<*ID5(Lq&17$KV7bG-}KW@fMSpF6*B z9>42hb8E+uG%aoiT~iu)JjRJv_h!)wgtMp~Woj%9Mo*qz8I^iigC;4z05@!KEudQR zwg5UEgi&c8yzybJq_HKgo8j&l3CjfCnIc0>4W}Oio3CuC;ERjpaXDi(z&qqEu-Y1^h!W!077+tv1G@+urD5ds15w@7HHq= z)hZ<5G7rrYIu`Nr$FUZ5O$}s0MWmR96XT9_SQg>1ED-QrSP75r7)YX286kq196B{t zvbqv@L59k7^`AI)g>2af;p|StiBT4MryI}k<1*@V)r<)!j!CX<-|HpgD@a(aY|xL^hD`kfRXl5|7Ls zLTQkrY$m9PM;UN~!5mL8d?CS*cC)=X~#C=cL{bOxs*gKM?c#QXCrulotQ zxxco)C)V;U@|?i}PCF5TbJ}r7eWd`?Cso?1rr_c~s~>L+Gxz*i(3aV43FPUW<**6P!QNx7K7kSweXI>BEY+B)8X>0o zh5q~{IkSnQxF~-2u$4MPzR?a7=Yj*i3ko8J;}DEtG2!A`U4)_Uh9PHHHntHLLHWU4 zn0s#NkW0Gbqf=0UDAU%v0zs18Zt2Zd$-C8*4AQO8tMMkr#ElGcUKD+K&S?+};Ir(! zNWtM>>URO`lrRK#v;~Ws z;joj!YuCw1Uwds@L%hlb=3?5U_XEn&xNear*P7NuuS3S17xNU+#z;IBNcRkzz5hQp zz87$%Gj6|Ciy(*4LPRXkhlX(9+A?Qw3E}QQAl(Y?%@F=35xqe{XJuyqrP4FMIo-x{ z?N`zc&L@M4Yt)2WYB>Ui60v3^V#{-2cYBC91L$1bo`qAryJF|cGjh33bQKR4T)b@c zG>GBN=LUbK-b}X-Z;7C+Sy;R=7cPfQnJ(xq66;1}3*7`)js9Z%1;x0FD_@Cufk>xz zaA(k-N*-;_2G=~&=*=mibyF#6(qw}jiRQvT!yIDb<4=i2r~xM&%K-vF3KQ4sZEpi> zWg-w!O}J%YirmSq#(yMA@3AOm6x`1O`wdAma|6#dV_m0Dgdg|(1;=Y$IzMwcy6=8| z?)=sHo{OOBraIwg2392T2m&-iMSu~Oxt`!9#*td91ME;v9)RJ~Q=(gnqD*G=!zJ7$ z?Bb=#Hi|-|7h3qy+-9xr$DUw0&V`YRk@OQWIA!1j}(t zh{_&R&2m?w!^Q$99oH{x&z$i5f+@fY0N8~VikvinO2HkY9-;yPFeNOzNLE%%KF3km zF_p??9IaJ`6iBZXsQc*2?sCZXrUDM^6m|_Eg8A^wOeH^Lu!sxcSbMwu)PPa5-+`Ij zDY%on1tc8C)#sJ(9>KVZ4(CVD7!FK6sE594ht~k;c)tA}SGqyX?I)Z-;}IVBfym&h z)EX`N8MP9eK35^?TmJ=^l;}ly{n=9$^fKyMragJgyqRa@MyL32-HU~n#WQaZZnBf5 zu{mB^B(I{7R2B%kzRINB4a083s2f8A@rGqBfj+3;TWdNLW5*1;l^63T>L!V7O=2oO z6u7UVa!&XSM!KG-iw-~~STNG=<_fMn6A7w|_hC`S9p%7nGa0)yjus>{pd}{Bfl-6y z8mVA7C4@p0Hm4y_8LXL1^r#!-6-bF|LL>y97v?2MmBh&tY{>=4e94bo+j34|%A2G| zPGMjqTE64g^6-5@1*Q5_%HQPToLjITxVD{|5X4g75??i1M*)4Mn+%No&5H zMsGG5k};CA?_6$h+B?S+C0JnS-fF=J$`MDNMmQ|zcyP%#0!py2sc+ZY93Bb?-{NLxMu+8NaNv@*$p(dSuiPw zY!Rf2NAiIpTXH-YtKa?%DtbwV)VITy3O$Ybz=6vs*dg4os0*X;Idb5egEyG0CpDpK zyfQ(Y%-yw+y}7=M2HaFPN{E8zgsSf4`e}IaAh?p8yAZ5xx1!R(EzMz>Ql2?+vkfNl zy!r7-U15&f)aJU)iM4}@_+1p(SaNtI^A^n0hu^w$@r`=E2mjod`^uJoyhZ6uCHy)L zpY!52QkP#32s>r=0XqO*V$3GTQs+w<5b;PLspQNPW#!u2*urxsvnca{Shk2%TzH$c zO%uW=Km*Q*HDRud{NT)$7cL8XZ80I-sim!hc01qG)$YpPfrKr&<`}&?0+-`|0sQC% z;GdmlWF0%5d;q_(b+IxDGg23n*})}@Z};}vstQ&{VBccV9F`Z16&GQPa(iQCZ*9rU z6=jrRJ|KrS0bnrSS?{g)8m-{5CrijzM>q=XPm9iH%=_ zW~?qz;WZ|_xf;UQc5!D0;rc-J`v;5b`zx@U!r4KCWQb3OTrQ`AFmGfumdBWxLXBvt z=gz1B8)`GX>X5l%(>v^7@w{Ga#p6J4+TExuG(qF*#f~WmNGQVd%z4^ou#SvJLNAoO zo7Jtx&)N0ibHJ_99K8imTCCJD^`@zd49_4}tDG~DDDf~bv0{bFSf3uJ8-tqJsq9ZI z?<8InMcTlj{Bm!RbvgA2)?rB-V{VE|2TexNM+Z!D?S=K$F4fSfkWoMwv$~3-lgzqZ z9)#cYCd@|r^nVmDM0YjD9GHoGhCs1zGx z>*1^J`Ze8aUVZ95qIly(ieay4*}KQzU$+oMc8SdPH-WD73(j`a|kA*l5EJt&)RA_A5M&=Do?xig}1fo&FypJkC%{p;?M z`+#{if`M_z=>V}1@C9Rp1pBA{%q;lQ#bp;w3!oB~-uuv~F|&w0!V3R(cy7IJY$HyN4V$%i=sy-#W1+=D!gR4Wiuh z^eNW|AzlFi)8#`KOiYox#Mc25;+!y|^CsFZLN4~p$5kuYKiwy1|hnMYY+5tFjBxVE<|*75NF z&vh|y)T~ea(SinxE{ccWdPSq|(cv6lJ_bdG?;x+sAR}9wi|dRtqenzBYl7GW0fv^} z|A;#rExU+z2$=_(k_opJWIXkikQtkjV=-R2GB3!C z7g_FHkYkJ7KYXw;=OVpEXjnJ{Ve(>YnjuAAB8<{gesUL}!wOFkRn=p3(&+>)h@pid zjgR}+-p|z@0_4<#dAU2P*4LNU4l0DLbk|hnI)Y`8Z(P1EE;`SwVG1|Z=__n2%`<>y zjQK;HFo)%*fT}nl`MqS>OUpL<(=(TELTo<$IE#eeSC*i3U)#O1DAvct<=T33Qi)*$5jP~P8&0?ZB*#W7q6xWapl0Ta355u6c^}7Bo6{mz548!Mszv-Y%rUzae}|+a zzseYZcGIdw3DmG?e-@|dK`cB?A%yEn>U}!zESOU7l)kvcD2FL#N7zrrG7ah)>;Y6M zjZUJK+*&t!o6BB*SVt)GM!}k~X;YWOt8MmnA#jCV(8(Zjp0Js=Iv>CzUz>wXkcd3w zMi~j;Ml~x0oe0U>e<|{B10poms_!UsSduA+Y`%%hYa1XK`?zPtI&=O22SmGF?LgFiyOppcAc`i*Oh|~` ze5JHO4KQx6tZyUgncVf&Omm;Zyy5JN8V@$`buSQz|S z|MuegDhyXNDB=1RE(3HMgSUewCzTFB?gby{9Bx+u@fc31qQ23WDtDOZHW-iqcb3oz zilRe|r>bO2VZ)F!tJCHJUBG%^RrGj;k2_T9u}y=|gvhfv76yho( zdR6blfyR!}fHS(*xEv#uq2NZiJ{~KqB`%RW0XVso98q9~qVn$9ZL!3gY2p1uisE7s z)+zjvV5ahYXPxE&6*lT+rRivD3&#$Bi8w69^!80LeFf3e03%PGLpVY;x?l4io$j4+nwc$}ZWa*)5?uo0y7jt~ ziZjH`(?0x_sQTZFh*vd4Y2iE9W4UWsg|)S5*4=}0Y1pSgT-`0?p13KAQfK-4Nl%3T zPsyztl6L#YP-5_gCItU z-9Y=Qlb}JY06C5dl15w~;A;{Kc{?-tYniGI+EE8|LbpesSHG!>{fm$@Ub!_3UK2H! zsioX8c5`ic4d!-Ew&X*9K-Pb&vY5n9tc1madWuq&g3?NfH3-?pO>D~8k*lTs<;_je z5*$GCa%&6nheb$0D?1zj^Ubwg*vA2qW12)bBjBR8#s<#f!^)xyxb3uPfZA0k7ByiJGJsEN{YLXVlGB_ z%vnFwiQJn)cn3m;GLEGsNp$0|I&B4m z#$XsFosQF`recR@cNVvA$M>P>G9pAus8sW!!8LW8;sMm(fgOgP(&cV(HR!@6DKLxo z7a+1q>nDw$Td*AA>jbyG$&qUC9>Me~-n|#Af0(=Rgh()G65Fwh$~zOP#1e1W*)>yRE~&HuSX>aR}j^5--QesevpxSYJB$$uAGs;)2HDr zEr)d3Rq98Rw!^>a&BJ8@o{uzX6@g~!pcrS@1WqFurq@<)EW%V1P>4AY%X4pkH-e=b zVbUwz9S6Ap^n%=;_zYw$%Q_(gSlbUCJEg~A+d9T>YTx*~|} zHI@{}8#MA}u39`9#5`ZgA0*Kh!XjQ6QYR@;4VAk~-Ngs7P$Xl}DCzpGl|3ha*jL8y zurt+Y%2z#h37lwZAutDlA6Al_#Qh7(eqw29TT+yD98UY;TI?E2te42^ALq#o5_|PS zfRF)~lfhnF!#M~oU>|J#4tkz)<`&fjoRlJw5J+0#LN)IaQaK z>}6)>%ngm~)T3iDCgh|Hoa6a$niuCGC_fra;!*dI$`4;ppf66|EUs+zd-Xne{_2&P zE6!i^QN|!*qYQwc7Vz7M+OZa1# zwjb}EdEqqXOpRHSyxY>7B<~}Vmnfa_xmm@!sU_(rdv-cq&qp9BJV_5&xW8g|fQJqu zOD}r~Ty=AO>t=s>e`CA9y|{;gWMoeny<~Z6UPkIP*U>pgkcY)O?`E)}W(*rQEz*;a zsMrnndAs=4N$evz8WpQ@<++N+hG(ZrJ~{};9ei{I>CBlg^*2+Y2!+|h4j#w>#>9~X zi$ZW^_Ua43l%pBMNLOsawQR*5JrVT|vMX>GJCK9#J-fYsQLV1vqXu|DX)CvlIeqP5 zTKHLn7g?COBKLShs=Xu~ zDp7HCN8DI}m*554jzd5W$~Hv=B&Hu5Yu~7Gq-uB(2(fC?@#=(smqg~EYV&AoEr>nU zjYy((ZtrYuFCsL=3WE$$0*oo@Us-@h#|x+3EO30`{5p%XAch1Bx27S9SN2fAL8B|{ z-iNz|xkIEQf2*RQ2|HMtfZ@-#a_OevLIg(;qs$`q9xN+=f4U=Cj!YinvS|xySaPYR z6hI#5m~i!4;<0#scV2~Xm=#%-Dmv67Hi8@IAqi4?+|TSH9$ed!Q!*6sxKa#&i{Mtk zKhdSaHZj3Jbyu-l>10Gjn(dYIhX+IKor?BtF=Es$dUyZkEm>UjY8ag?>axaGg;*D_ ztn6&U%tnk`z#HtZXyq|AJS%H7GJ3t}uj0}@R>+*EN5V$UG*c&82CnRb=bhtnpKFiM=U?bnuL1KM6NJ&S5!Y;>W4XsV{K)HgB;#GcbYJMeR#+N@3?gAiQn^dZ3G~ zO-`NkB>shcy*OEjkN7&wA#}92YzE@(tcW)gn1RLd)`S6M%$e~*`ms8rBBL4;VoQ`L z8*f!DD--T~;3p;Sv6@VY3JnCUVdr$d{)f=e=SLHs(0=Tm)h?{xBJX<|2bHvMzBY+6d3;K(zQTw+x_O@1?fCBS- z&fT>>bI>~j8zE>B^`pM?wYFsKkNf3&Ry(-vtx zsiMZ<&UpesXK{2IRP70`>X=!5PHt9%(})Ju^Ff9WA;xZBvi#`ty4RB4Z- z9#l2qqe0FaY;VW~Xt%facNaI8aVzwS+U!RnpSJeh1>cZ&-P-Jnrw`)dxZWhoX)KU= zY->1Si+DG>4sUy0TUAp5h)KoVn?Ya`sFr3X6!C(KQy^a|?HKaNR1(yk zjRkc9Y)3P{LQT~D#*TR+U>E%$=1W`x8qR#Jy4!pVIQP-jGsGQKOA=$&hvjZnuN?{T zAj5K6ooElvPG8E8xzoqjC-U*^@Ei%t;YH<6cBdq|XhCU!4NGHgM_49mC^>PCF#8P= z9~rmHT<(C>u|=9;weG4z!!i@l%Yeo1b{tj%{Ib)s)CH$NEQS~<2D>j4)o;KX_h=CG36qF4e+6A_ED zUL)AQ1Si^2ScDPGHvl82z!+R`hOJ?)MN$j^Eh8*rwK}_pJ3tkPcAJ#$DF7ef61WQ$ zL()>Bfic6>!odng5_M}37wFAZ*oKTz-1LYY|YT z^4$aFM6_p6@Cq1U&xjNVaD*7=JVyWo*=87hP~oadm#o|gry9}LtpaA)C*xb4 z+@*6u7DwNgmkw?^wp)m+pOQ#u3$Clys!D6o@{l?~SRK~wioq=%rAv`Jo;XYKL(HnL z&s@3oxP&CIvzlDHa%&M29|c74#?aXkjqIIee>0heU`-&mX?P6{K+9Ux#5>Xa+p}FMDl!@nc>AWdRsDN)evYnQ4M%fIFdag;gi+A zL)>ZWZ&j!I)w7lUU4P7_-D>)-dkX*Nr6#NUKl7QaTva`E&V6;!()+84zxa1uR#v(* zRO#+er6-QDCrsOVGC5S~V?*_%s%8upKObOHHN#|>(pAMeIeQr8U;E((cz1^??JD`y zvo1aU0he~F>wG-vvp&Sf7c6VC`rXd;Y`&|`?^$h3QJbqa{`WIhh?RD#`S@q|%tGu; zr&>r0bt)DLHD!gWiZ#5)4<{?s8Klqz+Gkx~VRcPZ2Pf|Gg}p%LYZ+|16a1EGVMJBu zORZw2g+iSP{>W5&kwWKK#h+USKH&>}nTJkc#yGO}>7S?uEm!%HUuVreBnb7Mw@%^T zsds+eJMNyhe72bWo{qMQ47ScXC#24_BF#*{?o?;3Jr*w`cqqp<4(nItYvvcl4s?C7 z_6~${tnRQ5|iUrKa`(|7rpd=q3^jX*W_>g6l0Vp;6@6AnAzP@aU z;={>*ntVA??Vq^I?tR*REG~u)J|F#SAXlgLYxbeocGcm}OpC=j)$iH}e&5n=b?~qN ztp3MRt>f+b$_6aPHmA_$)O$J^`O14&Anm{`>w0Ueq1a)qhGDQ|q19@VtuZaaSIBo& zu?R6(i}Hmyta!y2BJ`9}s1q4vCI)MslQ?G(`iP|%5FfwEXD#sYV?OITAHUhM8W-5) zs+C~9Ivl>dW-{S!lJI(Oix3aBC|`()idTFgS~f2e*2%Qk zJeLX6UwG9$ebtUjF)%(pzRRo+@$qXd%dHK*Erx=8-1S-4`S>k9>qC6}xX-%I$M3YP z#?^;DvCsd+`rfIo@$sy$ah|imUxF38@ z`_rjzq>UhprQ?o@nc8<@wx z_ECP|(__cqtVTImf3xe# zU7`U89{gsgEck~wDf5r>k}6C z|FfUcF+eUaSwPQYQ*4uIp<`1g_#@M%Ho+(yK7l+SK&1 z0#A1*UEx(;s&IrT70Q_7lLKWq!3P=BdV zR#WE~oBQ78no5PTn!1D3R4SC!bZn5CN`zszQ{+`F;Tr5Dhy-G4nnB#7^`Eaz^n2x zREPc*I^(}(`(X0;7`{bU(r<-OkJ@hhEgKuOU74m)+l{{+wp|ezj@oYg?Xc}m9cQad z3n!7|_wh%jg+llF{{N~+yX=rrRs1pF+|_jM9~h*jw+vE9YG#8(*}<`wUlLI}Ro2zG zfnh=4onQk@3s4mW4}TtZ1QY&CjsSbkkNw#a4-~aB>@bc^v42bp!J}a7fK*U;J&MbK%;Gb)qESRvEqrb)l3sJXBDrngQx zEfb)d{IM{#IR%whxB8+)3)EU83%ArbF-(o2`IZ{1E(PjL3nJ#uAcgAi+1_^VpR^Q= z7$4tfS{JZMd^~GePPfCi?+pd{_+R_1>wNs7+P8fBXtWCwAZmeFt9T8Fy%@=Rq@Prp4;y-~eZz4OdF?xr<_U-*5R{=bG1@AtF2z(?Yvni2AaZR0GHVnOkE z!}i$DTS7a0{9?-z%@m*CZh0rFgX-1l%ic42mmRnqmHq~M4xK7u&7O_%mn@C7)kIoC zF8+;ukJGd+(NCv!wCSwV??JElITs7)6(31^P>PT2Xp?&S?1Dor6aUFkjEKyo1ytaC2g;ajG$ooa9BoS* z8Sd+N6!P4y{)xeWtG=XkR;dO`vkdp>L$_-J2?Yr!!Wdb3XC#M>aErs<#79==Hlnmf zmz!1^DkKqu#hwtVOc>#;>_Yi_G)mXZ5s$L#!FJdvIhQPVk47oez$Um;VAnZo+rVW@ z8#bc+OV4tJFB_Pt{M(n<>hr$Um-xu)L_wDR5g}OPJpz@?2%8uVROK%R0hPmoywy=b zC1yDC#~SYusANXi#BiW0e>n)K92VrQjtVMgG#JKlk3c0el6QKL1z7%a5KuWR$Xgu; zRIKeCwswz!c7uIRm$YRI4#t0i=kh<1wr05147PKH26ApV?oN1Shgcq(K{icR z8Cl%ozTFe9jOYn^%KxnJ9*wZ;8(@zEzaK{Bp1L)Vv;4>-6s(f}@9_vHd;{!p^9Uzu zAZPiJM<^sg{=df~ob(N_$IT<0tbv^6M;@VDhpdKc`MzrRtyAteOr&2lOpksd@{@Cn zege!2e`zx~*QeDZE3LM0`tMfhcdc!wg$p%K3{zv?e4)l_f~1ORiL)N0P^bDI%nA5s zEiE5h%jI6o`02{m_xG&6QqO#$_C~wKa(Sbnb1jWl)lXZGhAcFfVq0gBno5NVHT^N` z(>q(C->n8K*QqXAJ#($~Rav=$7^0gMYC?^)(L!Y@4Bh*u%s~6!w;c{AgZG(H+-*yd z^|wrF^y`))>&&97V9!!yy?WDS{fMQ=`n4sO_1pU|Cb;BJcqyThdusHMK&vW>{?1w-9E4SUCzGTg%_%Mga{BId>9)87_de^#pe9=;T z_=g)V>%mQ5DhFjID1fM@@JA5-JBvZ(VG|M@{vTn`02oA|owc>K=hxWFTx0*rud$c8 z#{MI}#$M(c`_KFu%Q$nk5z?QsDu32jJ;%p?@L^X>TS_Zzm5e#=tHtBfcSE|T|?miOzHO5PvZ^V>`$sNaU(w>&*>fO*jaXpWCB+aTcD z_RroR)Yx;){FY=Gn)yAJ`AHjxX8xMJa6p>LQN3ur*UV4Y%lE~ZKV~l+yO#VV%jDoS zlMq-lNN8ru__Lolb>HIAV~&rUNr5hBlkku{j(jzmK|a44pz$kj@P#1K3atRqK=OV+ zftuu{K$E-_W|EhJO!9~_gqY+JU{@m4S+-{aQOc!%_gB^1-=1b>?}R2nHwE?6cb)jI zw-S8Zq?pflZcE3im!&vg32hc9#Mcc_R-EuXsAu!_TP>Tl@cnPri~oSV{zN^SsLYA7 zHaS;+P%q8tI%mjgrTI$KXIIjhWPUoE%ui>O`JC0Su$i=G`O3*=@pQtOpUr2K@oUCR z{H~=?Y4Pz(jRauved-O)(ff>d(9CZz?%{7+s+s9LXy(HPI6_P_qi)n5f6p5JQ=WC{ zlXbyeUit^61X-7Dn<`|b7=x@@#kH6BSXI-$%IkdOxU`oX9Pv&wQ>@a=6sH;o*|9Ef z`mW!|yKXyT=(9+f+#gvD?w+DEdd`NrXsP6JWBN-5P|5q4%>;2&@<^qLpOVM6m`dwx zx4_}CD+R_o{<6Rvg)ax>Gufj!J#M>yq5hHC_rO}r^Dz*>-}$Tm80;Vk;QwUYITV(f z&-cN&_COEa*CVs;L{Tn5c8BDPrz7TbH3s_SIzZeYUa&mzgBv*WbNc1#$qALq&k2>w zk7qP}<_y2bAJFFb$b%ZBW9ceDR=Nm~m97C~ai|<3veF)e^#>Dk`;}dQg&iM1>%C!L zLc=$5Fx<$&a3cr9jT{Ua8FFBG-2m{>FdcmSO`o;E7XC{kItA(wp!|OO?kWHM93QuR zR-d(p(f0Gv_Vdv;?gVT$w6T!4v5>d1khhV^9J*7iooPt~S16R#1o89(VzKJ}SQ}>n zaOLAC3{;Z{W%1@+zWoc!)$lYU50BINoi?3X8~emmddX(llIu+Rxy$8uOpgr$j!u^# zVA_CMXKmobBrH@u_5xC|?Q+V3%l5%k_0eh@1`K>B=1BfGA)llEw@G=v6Lnesbz5V9 z&eDnMzRzU)IU7x}j``)TOD?<5Zh|Xf57`rBrJWESAV>OP7FMa{($Y#>c4(2eJHs;s{K?gU0yp6fMjk&b{9U~}t8%GDL<~EKJRxc-5 zR-ClFPzH6kPLB-0P1TKvclGMVMzKj}50Wgzk+G?%x)BFkuWszY!ZO%!gwz*enD2ix zT@rb*0t1P8=yay0^jp?SK09K$f7}{$j0@j>*>aUh<_kHDIz!C!w#!Y=U@kWuK-Z6R z%{NpWCx0e5reMG&+-q84XX?0N2$w|xB2|Zlm==7?CI+bq;l-2Y8s@30K5hg2XO^O0 z=~1H8RSYowvJQ40W6A(_@L>)Axo>-pkD)u)^X_Cm?wu~HVp@P))#1sBX}NC@J^3>h zyD`kzO5eN4rFOC-Xo@GiiAna3X}On^Q~aH2x!lwV{?4@6EOZrq(7$QZG0dRFL5L09 zweQi;!*}`eywdK5rB_z-)?w4s+- zxng$^w88-Q{k2&M(Bsn&+wVVaX}9{ij0{y(TB;y2$qVU3vK^+KiY?W_#NU}tfX1^V zDYXujFYTX)W>_Ai>_@XLXR_kY?bUTBS&nH50$u*dv^1MLzwdA z{%=3kxk{m`_$&KX7-+Xj*F?E%lUcbUvWDvZV=t4;_~mZJA6w~Ql@)^u<$xgWF!cqg0AV56PpF>8I?-Bzgbf!>*2hZmHx^ z@kH&R|hWTi_0St;hZ zBOr4>@4Ltj27ukj0d^w?*o_=uH*$d8$N^?^MF27V1z5W@5vtVMCE` z5Um!QB__Gfq@TN7&KJ{TgTScMB~+Leb~ve#Q%;-%$>QT*4(P|WODT)}xD}s5U<8p0 z@SVsc`J&SzbPo*VEYEk+K$cI@jpb9EV}7ZRlE;3sKhVpwD{K;3X=}(z8!9OD*=F9x z5iTG$5{-p0=GghpF%pIxC}GU?`}p{v0KqH@9(gFefhk4WlQ7FX8=dMmLTcXPepu{L&_om97wErHcbu ztTRp1v0*D@8-K}$2Q^UM#(dsJ;q>8FM&8C8+aTy5xX;^|%iEYs`%e{3-p0|vs=1A$ zgw+oxSXP|0yif*pw=#|lz)jVSha!PG!3Otv)QQ2)450LT9E?TqCrPsx`u|44aAnSbp1@%*IWe#3b9Gl$)*?)j+o zjTliFdDUSdrUj+N#1wyQsi{+?g&O*gxRf@)o~Nn)z(y9m)KyTs^vhuwbVn>%!*5t0 zFkC){?p#kDLU@J$(7sVUE+;JK?`%p>h`=U;|9c zT@i&1OVrM+1HnK3dK5rB_z>G>+R)3aTyfAh;`6JUZ z&_ko76J>P|YhkH!ZN~>3!jv!fH8$m?R;!A?vem*syH&a-%3Yhx$`z3{RCnFW`ZB(_ zTk%Iylww^~R;;Y&Re=pa0b$R;at+`cK6UFr1Hnq~lQv`MJ^Y zR2Ap%H*Frlj`5MFX{ceSyrjoDvgklf-)UsgVT+#AqQ9}lNmnamaisKcMiyOm>DVbB zCV%eG7c##v`^{bl(B8;Fdm{(!jU2Q$a?sw$L3<+yt#Ky-lze*O!|rL`QlJ7KLw^_8 z>~X(wEac5*Y7VwhRivZ$g;w}@(PzD!L*?ZhDlg|yX%tei+0e#9-o`@S#zNjkHn{E- zYiC;8!3%}5n(FRMtc??c;~gKj3{=?6KK15ZzWt)*>OMX4@Y3ll?)swmVY9>z*O~Nl zmy6=Hzhi@dqthh_m^PenyB~Iy6O(|te0-bzRM@2B4zbu-D^39|--$PpFNz;R_unSv z`A$m7@+oGq{uI5KANMVOW@(C91CI0vGbi{Bn&^>FlS%c zPxj|1Vcy)v(ZQU!^v(~vC&HKT{`VGL{!!y>Q@em)+9a~l6@sjEaUhFzrfE7h?0RhD z-8MX&Tka!TbM& zddZqbT7b@YvYKS*9nPGQZjN^Ej4wjv{1wRRn?*m@aHT=zfzvO)YU5gvJLDgUif3%w?FKD z#Bli-x^q46PC-$`s+blaS9N#X@^{~V*e%Ljm6{aI8wi16m3LqYQh;1`%=w()}*qy2(Og?HuDOXoj|Ec{> z!dX@nrKJk8hrBBEtfVMnOLZ_gfQNrYQN)s@-#S#jw13_yeEc1sE+~oytFAlAa!gAQ z=<-LVWuQ-1{GD{7q_W9Nm1{fhYiozdlrQ%)Hsz&OtBSv}Z-s$&eF#m;SIb?S%*qv! zHB|RMdwJhnzy$Zyt@tC`EY?+J#mb7JK{xcl%1v}wZgSgow+xV~<9}~0*R747iX4U& z#d(L1Jd;R#NeamsF;eoV_+YR!$)n_)!3!jhp%~-Psf05lj7holPddu(6;v_oN&$x) zpUwJM2gmh!G(_nryEohrWk)~k@4E4Kg_^0n0`U0A>7XthJ=q?gwQB)f;chAN|zh5($$8nbg>~TMV)xXd_MJteFECmVUE%keU8IKzIMjh)BMc|zOa1W6Ukw3*eV2jY>*WrE3LCA{wr5EDoXqAvWJ8Z z$HRB_oa1Nr2_Kf{JBP*c>DXA_cfcMTU&h|B)sf-VRNaX13L6A_NceC(d}q%&{(A*4 z*CY0Zt&R+@rs_t77iafx*(xM-2rWY8uP3YSf3f#bv8but2rg_AS!rv?O3@WrtTRp1 zv0+n4hlMP*@ryPxu-|zbnIOM|#sr1)HsHR#!s%~VwlSP{MZbTI8>Q!ucbt7U^Q*|Q_wyC-i2U}I7K%|rn z;`g*zflI+Gc=ZX3lXHuhxIba+nq!*ycF}T`6XQ#r>q`0RVR*5wALq~7nrIj&e4kgVs^uMZp!v^@0K@|N;`PEWav4qkuhXF7? zk81cG>jQ?%$IzYYd3OqG5_X1Z0diG`>^ajiBi=#JSc2UcW^ASJ6;IQF6ps-TlWd!5 zxtEhu{GDmJ+|&vF&a~L9s7Y9$zK+sfmzxna3BUh0Hl1kb;k$h4c>KzC&=dYk7Akk~ z0XD$2+!axDute>!myV>^O8oUGfOzmBw#~Gmmsz=DcR(Z!aQ9hSuCA)yX1|j}menL_ zslpLFuSzEpP_6xSDz;PyQ>_qQL`}kyq|`c8zO;WHnqhg6vcLS%w3W$r`;2HLICHBs)`WLB<-tf9Jp z*rq1oj}+S%>#C}>SV>LN1s+g@&nvv-XkdV$RJy>)%9r^c>mok~arLu52!hTd!S~&A z-!MPFw91t)+_j?h(30bA1|QZ_xNAkrGxjTky0JVjBBAD&`2;DIJP}Zt~i$Jr{3^aS>BE^d41HuN21IWYWg`r#!I3!GabKXri0f^ zI#Un=nt9T6TNJx%W;(*RAcOcogc=OD|g-cjT~S%a)8~)0d^w? z*o_=u4H}~pMo1qD1eK4WYYTa!nUV#4`WEOHTEaYHl={I{Ey4Pp-Lc{&M;eOt5KW{iwTz03}Akz}FR4A0ybS%1uDFFVx zen$Iz{J3Q)^E24ko-#sCA87q_v;m80X zAYXj^xqz8cvh9yWKV_dxRRl?*Z}_8u#Z9cPE4CCa|ZSs?8iilx0mR@$NRDS|yDj6-kk?TWN^yOy$fPAQ?y zcXo@=CZsr}EYEjBkmdPKFy6Zv=WWcV!vfpEHm-V}>w4ZsrgM|Snylo!jk&yyIkrKlLDQMHk;!SB z+wdFH*syAD<0xVEPZKOFPFh|lgSuPMM+V@g>PE&pS>t){Mno~EOU$bq5u2K-8*#8r z)r~kG={d=J}y7J9%_{k3Vfa zZ|E_9=D?bDmj9uVAd-v1*s2Z-F)e7`CZ_mfOHG|BE!5Ees(PnE>(6^YF61YdA{oxV z9OTU7ri;_R0JEyY^BL0u^o3gT=g@~ihE)1ekuUAPW;y_b zeE{V@FBL!wA8t*P_Y+PML0a^ftJyss=8o5 zlDL&Mk!h)d#3io^-6}b3v84z!>0F8$lEp}1b&z~%`4-F5hQz`kO(h*9e{QL(JIO*! zO9SunN2Y~Io2>XFmwxF4pO-3Bb=+6g((@@_=$j21rS__dzp___@pY>d>kFNl%nB8e zGSu~TZAwS}SQDP5NK#d4u_mRXKf#+!F95D8fErN<_`129f-?W)>A; zgX_Gg*`bO->rBUr(*pkEAIHEBQ!BwUKKso4wA;*2d(Ql{^UP0mcFa$QzSc#{$ca*F=KBOJioo_DtRWJoOCo=l3o5?L$@MyL%-OoCdjfE9AVlK=BIsOe%c-8 z6GBX7$1neV#+Gwa52?6g&3qqBkw?Vl*S}>N--lmuy#aZ6*SdRr(Nbjn;fBk4aMPD+ z0ss}nkxpF|XZH%_T!-Xe0`6VQ?BpCD2}{tmbV8Ap4h321G!_Wrgm3wC)G(V3gS(Lr z?nXYi8~NZGRA5#0pBOAZ9$?DH(Ck9q#zNjk@dV&t_+G31$mzgTHJH_vqmbTMEp{yoqrdZDp_;J3>$G3QbxPRhq zp>rwJi~F&tmRMn!v6s_t3u@zIgAAk7waMZQ&^6SA_743=?a!+}j^qSTQQu}Ns?rs#``v0u{`ZaHT>S zuh;dSRjl#nt!C^YeEdDjQZ9}!?HpUqV`oiw{2;#3H(3}2f8As})sj5%d@J@I=ZRu1J_khp&mxFM}?rVPgHv(|^7&>x2 zXSQ>a++|fv3oxrXZdYXzB!i zWLj)cln^Y?zD_vPOhpO7FaMjc>>j?$mmdz($#0mR@L#e}xpNP&0j7n4iW-4G*A9CF z0X+VCxbZ*u5L;zh>ReW+IMAvh9R7O`w2TT>)lb`xq;_Q`L|UqFxX!CWw@OL~wiJPe zbI4Sb5G+QDtApfA%eS4vmqjJS0CjaIS%_(A;9dU6v@mIt6@TQ?FC8KCQiZCH`>I-? zFy#wXMt4i?0p9j2dsP@;r%JKD(5Z>6P!TCZUEk;hme~%}t@z`&TA^ZHRaUH|gs3V? z_vtuLcHQa@SZ<=j`WChwx|Zv!I{ck!LpiO4!tKBAH_1632A}gMh&d_~_DwWg*olPI zVr73mR5rku(hq*o-VN<>^w05;zr1hcFRSW1ZFEszqdw_l*BAOxD_mLx9bc-Ys;c^a ztAbTE06*@-_u14D`Vj4_*Vy;i{Bd0|pXmNK4ego#1NQzCN&c_dJ6-r${`W2a50dhf z{L@N?gt|imvH8V6= zf8w|~NX(yVcG8hDKW&luX`{@qx8|e;XSvt|Ke$0Oov5)2bn`PisY1 zT49O(%LmQ6xh4S_K5`@u@@$c75(W9l1?dX%jcXDG`N%H1f_&qeL_t0hD6Zgu%M*j^ zRvZsIPjIvQY?S$YXN$#Ya0d~h`>f@h#X5EDS7w$ATRU5~1q|;EYdh}_K`uOLb z+3hdjA3Hmb$(5bL=bxK^3}aobY4Dwt0w)zh^0XeL-OveG({Rj($zrs?oqEI=}qOK_Dbp622u4jEoMV;LAHQ$jq0&9ApSW>O-qz4*>!GZ9Yk53u2nQ zklRxQobeyEgRJtuv#MQbzJ1bim6zkoZ`X>FZ=>X}++4@^{f2#A>^pzt0Gl;}A4@1# zx!J12a!d=&PE`DnWUyRW$Co3$fZ(F67yPIV@?ZGD`lB7$p8WGeeMh|)JbXgVC0`RI1*?Ferz6rr&33g+cv6a48tV{<|Ja$Y> zvTdg2UQSN&cc$fXQz!U4(_*utEM$T5s%f84Q5o|4@384aLl58O%Xf!4M$y{e3I8Pv zmAm)=8(>-$WcqX6Q;huAjLhF)gnii55y!sM?S0L#@OXte!Kf>_od zrlksUg1jo7$l8?QrBkt`I+)UOqC{j#(q$bgU)nzp&9FR3S;?7+Ket!con$$tB?xr+ zBhxa_CoBF=I#D{==B3KD9rv}h4`Ire`&FCrQma+PU)i_9K=Gl8K-yRLWLB<-tf9Iy zR&9X{cPswLHj8ytS+SB@vHK@^I%W#Nb}=O2WP+D0Po)qT@Vlu(Nb*u;kmRL`AjwOW zK$4d#fFzF+aVmc#FID_VUeeuaa^R6pC7-4vC4|z^GCv(L^V3l?KOH&q)6p|Og#hzY zC@?=6PBnFcBVyWA^7z0dPrQ|NF-4u+izJEta*& zB=eg<8BTi2@U^DO6^hH}b<2Zl@>P4JMj2V@YLYM0l_YuTDw4c(1xa4IdL%DhIg*#I zns~_=qziZ$bc=vmcjosQK?`A)lmd5q&f&28HflK}e1DAa7|8MYT zq8b!SjtyM${f}~HnU-ew45VW*iD6l;PhhL6*}0!}g1Lp}^F8v-Ocy1qNEasa)5XdB zbTKkNU69OA7v<@Q+|WuzO$=BuO+|`!2NU-*RxeYuQ)Ye!E_rO=l2a$xD$}Ocj}Kh( z#K5gr7468@jMZ#vy*t5@Oq)s`8@SFB1D7P?a&~_$M3w&Z8;mMVOvWsK`VCHb6hIo8 zHb@3R)RHMSHMNCZC~XN@X*-gYw1TWb+mMBuHX?axE1H)yQ(E-hiV*u5FH6sa^Dlfo~&v*CZ_J3hxp4gi9;6sE{ZK`L1ZL<?8{SH^&&M^NHOEJifP%?UXxKfm`P06wm-xu$oq&No_>;c5m-u+rvRpyFT?hsF z$S%8rd}FVn3+5vSP?l|Ur`T(zg+*B^lo>;m3{+Sr!GLb@@y{5LT(|i4k1bav51&IY zrD#`_goq&uJ5DTi!%06c!~R$S_8C|^T>_CQfS6*Qy#MQF3-I(oLE6DCSGth zna_7lWf8l}RgVttzy?T{zc9g#ZsYG={~vqj11DKg-TUgEp59@XT|pEO5D`=ing|L? zRAg0D6bQ2{yDYdn3;qQ}1W`y(*d>A?QJyG&o(6GHABq}%Xi!1XfEv)4$i^7OKPV<@ zR3fg5BK`@lZr|Vc-tJSUdhYbhxbl+ue0G0T=bk$M>QvpjRkyn}+b}K(E8%98L0_wz z8)1iDO0)l4>3tDLa3oookz@r?PX(^TQf7&GOQiUi>Q1~W)sY^5MkVbeAn}9gXsAtI zn~FqAH6IacswYVk`DA(Ga!1;{8qfI)vU6C@mFOZBDwqih~Et^EB$dS4&IWTfM z6t97#hBWDUzhx5_imAKSI%gL`*kAq0Z0ejOrA(Q-VfFEHguY$EOMYrMi`rX-Q86nn zeJqBD^HM;wby%zN=@2P$ec~dH=U*&2+{|dc_n`$J=%^hFTx5Fmg|#X^=8?D);lEh& zQaSnc%Gst*S+?bDf?N!x5RmYSO7|qi+MHFk+uRpR5CuBH3oVxCU@my|LLM__<6AM2 zqJj3kT(yq4j}c=X+Je57oi5iwpRYT?bwe8r0;a57u>T%|I+d94QiIF)%XmT-BvxON9V z(2GEui=ZF$PT>026@2eWHRbZ9EvlpqccEMH{mT%oA#fGj97o&1jtSqiPQvFVYb|bj zO&ZQL+zwo4+OHN#@%)Ok9;5$VNfIA+ zDBf9-CMnjvk$pPF0MwsU=~_b(_2odug7;SH)=>PINF^h+&(@3-F}Y-bDXcon5q_#AR+KzIxH<4|@Fp+kDSbdV;nGB1)Weg`} zOLuH@KlbrEP?OjvVU6E`Ix|y|fsYd#86PKkiU;eG`&eJJg@H6>dxst%y`WPkx7^*g zLU1ZN+F+&j4L}5XckEJL{Xa&NQGLmUA7$Lp#~6!@6d7P8BQ?fYWTc44B_lP)SY)J# z2Y#0q_pA28gxeQAdbkUD{1xT-h@t_u5`287d?8o?cOp*-nrOXoXmPqlM>Q1~W z)sfj-PBpAWLv8ZfR9v#sDfgCBan+1ki)#`A_LftD^BI8`nsjixB4u*-mPZR)u|J%) zY!aa&OX{}dz{u%P>=37hH0gQ2WfK>Qsk_!XXBR>^RsG3q>O4%QOqsi(_LdR(HHj{{ zsfGSEAr*hU#j~QXu2tE7t`xbpa}fvgd&plhGncP6wBQ3BwPS&cOoP6#R>jAB5uc0w zJ>+-E$p=(ZR`oQ+vNoP_)e~V|vMozfnk{NeDM)yK9cYTRIjgKhcMlm+pcA~%Vi^wR zf>$rtF=IBq6%!^JXy1EMv5vU=Fk>Csg1(iVF4sYyuRFnY&{ey*&uD+tkPW_?iN5yN zsym~i9jB>WU-xm@`St%3Rk&dCO>mI9tAGae`bfpj$5~H9KvG|HzdcPt*~(vjoRbQ{>mP;zqD_IFhWp zL$-?-UI!X#lc!RXr&5zM+r|G|!MMB*3dLH(+Ekpoz$qjB7>RmfEt28|6=^+2Pb;zF z^9#lIR-{Raq@VkgLkAD4Kef`eh9c_AF^dH+s?@Eac)Ca>BeipDMv9nRGE&3pVnW+aO7=Cq?P6pShG-VW>lCEYENTy~W`MXoy9L8U+FhYfPr38nlGyDS zPRf?<*yeuh$DPn0IAue+86`kCP0=!>WYLY?{D8nzCa<50GBasgqmo zhFWQs6&=TsmD)D|5$NA02i-<88P%6u_)*3keT=clNRa_nGE!rVMMjEvTryH)j73I@ zc;I(=xu=Wem`PYM6!FlhBijwPFM9NF7xMTk${2lL3U|4ui{g&o#Y!tE4?q`2#zExGm-|y)U{OLN-SlTh_^(Fr&M?1 zRjH25c5$j{BL$T?b8q%ca{gzE!D5maO>zrK(;dJ#Uv#Imkm@;MVhT1Mhzb2&OuebP(0$*MHoewGaOQCB!7jZDZU3{ULxqP*u1s~|B9SdA!8uW#=Dn90m z>~`_{@KdWhJ`X#fSo(;Dr{;a4;9V zdclqvv+=E%FwsE!-jjiK#C=>C>(CbTt?YEU4*GoE39f^#+QofF`=f?z@YNLC#VGiM zx-%-;ahl4Fbsv|VU;jT*wS>txaR~}Q-;||&rVKnOHj00L>rn!+bQ#ux87mV$*Xa<5 z$>$V6Ut9=Z)O99Af4kUnInVK_s9s0b{Z*D)0ZoHpvV6sMF`UfHJy)p=Z(gf`KTc)d zE5}?OF0S2y5A-6?<|60^y%V_3IQO1NQ!ZcHqDtCu7rGVSwwkj&ts!s~+Z;#R!Hx;v zv`)h3CTlJ34b}HMP1<~D2QD(BxUxdRHf*~%1omt9sqSN|D6*L9Z3FXk%s1Thi1s^d zdPMsLHa%kMO|5!GrF!LWcmSy4kgQaT+r)lq;k4}g<{hiEuG4;1rFoJfPRGq)T&|1? ztF6P4%ep&c7r8bZZSi(XaUm7&CqHW)Yg6aerp~QRol72@i0T)m{IlJfcxCT0;@_+;ytK+U1MI7)ap>`LmL2pKLn39c9Zxb4sgdv(m@#hLsX%@Ak zONR2TT87X35$w-Yc78xlL62HsKonkD1Rf(W8gEkjGz9#_0P} zxXYblET7kK`=Un=x7~!s==)N*%XGO^vkjJ$#bh_54Emj#^T~4PB;O!f>3tDLa3ooo zku)f#uB8H3Vkxskyd_e+vAPqlN_Av5p;HZ0XsAt|O2s8BopKX86<5uuwYVk`U=unO zIG+*tKPDaAu1J}ZrG2p-JX+ZNmQ5m5WJ#@+92hwriml|)T<}{qaiN&HYprv3A%v^d zpUkG-reMO9xf@pBhk?+YHM~lGYESK76;f%k;<8TIx{uSgw1mkwaS3do@A1+;QwE+O z8^yoB^(cW@x(w^UjFkzW>vRajWLyK(7Z<`ZU1yr~H>WL^^L(F*>UE^;wN^mWV3;gl zu{jOrQ)Oe%RVu@q*J|L8Q<=9p&E?_Z+8y{nF9K~Yf_~6Df$NNO?@2Y~@}(`Rqz!kW zTk##HaC%zfdqTR3ZH}YuV8?`SS|{OileHH2{%TV;xplS!7x}?@S5`>ayv^y*LC7X) z^c`r2W_)8U_--01FSNK0`g&X8^OAFm(DkC?n!$?>(q`bRVTGMuLruI2^ z*Rh6b#~Q0`C!CJj(7{X4kMUSjzQBRRkd6OhfehP(x-8m8aVwEB2Nhg?l-L%Bv2)HT zW1fUQbok|hL&cr@0{B*A&h{8G!JN#vKU@7C!th@Tu_-g&*qVt-!%V;6v8duC7S4CP zw>k$YP%!133&CD zc&AJ`aZ3|%InJ#UPDgPo{HtOzQ*1)Q>4kbvSKeFQbn0^Q2CSl}S z``q&C^MsL95}N*x`^Q)Auf+Yot=wOU`;V>MUy1wISMIOG{W}$gnB2Jk{L1~6xPL(9 z{>s$-mAJoG<^IaOwcY!CVHBT6WaWWJm98Hzj9e${{O;|-NILYGl53qXlG0EnTcYI# zt_>|A{QowCe76Y$?*C5)HOL@>cAm!8XR0x_jxqM^YK*O8jQvYB#?~>$zE+K~p z9O@DA>h1lUcc^lAKXI}A!uMYDhl_~wJt7gfP8WN`VF9q$=(;ag=`l%>o-86xZxWA! zhUXvAlvvOx-zZwN=k~Lx5Tc$y`AZrUv}cJ%)C!snT0yfhD`+-k1&x#;Vg-$Wy{#{G zu2GpF7JT+^ar`pS?JY5>6HP+bjlXMk(ZogD5I$~F(5Z7<8V~CVcB(`hb|QYgqAPad zd($A9>eod^3iZ1N+3!(xnR-FONo6`pWcuomL2kP1wS#1;q&}wDo`i0D6T0nD==ACt z>PZ<%mClCUb~yB;Kl=##X%X@CYB8~j;t84pVzO^Oi#~dl)($=5dyg(9{aV-~+J5K} zk5}M`SdZ9e#z65PEm<~HOPBR5U9PK^F6&vk+@KDQ=essz+#_F9ohDJ|R3cYV#IK%} zI7i;~h&HQwM4MAtCs)YiPgbT+rKTsR@b#M-EH$`$7Q3>4t(Ex)g$0cp)A!Vq1#Odh zf*dVq%+lm%K_g6Hp&h->3J+79g?E2TXK_@wY`D(EqJBI|%XUZp$jkS7X|a-GJpumo z1%DMc@R$I%)9O4vpZDn0o2}aG`LJv~^3t6YWe8#p(-%)i&>4;O{8bu2Zjcx7lONo` zp{Mg#DJPw%N_sj`mGu4|wPkwvwCVwEk|GakanHk!f=IH1Ad>7Dh$LLa#YnOi)Z;IN z-zRIZr4GYX8iuJf3{zL|ZjqF!H% zqr-#n*NO&9sI7IZPIatKb*xTxXqv6%x4TFO;{$emxh#21iKM}| zoRLN^#JEztL!pW%7+0S?P3_6*limT?$nMdd|EBJAkJE~`lTnMN(iq#kq}p(dJ26Yb z(P|R{Fq2y6<&)}?I;pG;gLN3MQG?<|!BgT}%GAl6g8u4A$Ll{)(BY>}>camy@%?Mn zNn72U+|LnWvkv;8$^A9V$IQ4Y!a^*GBx^*m21t{BHUle!RI@j z*J}NA6tSXCMCb^@LPx<*op8Z#E$_V_Z4F0A(~c`lA2c5`7rNtyX7^D%RwJ8qcfh3>dwoE3l5njGmOy6iYjYIf|*b(oY|Q@t_PR%Ikv zZINWhVSZ zU##+e%xA3LZ?eqow3I1{dO1$#Ex$=4?*fh|Ug*o3pd=j9WRv z>>IaXycd!!o3nt7{ZPFr4m<1Yz9sh(%>m`Yh1y=#YA$}S9Tjd=g9~Dww@a#h1}mv{ zPHI)>}N8Bg8u@1UH-^xyx>!8oqo!~m?s$JaYv_Fo9cj!c4`yW?hCcZdMvnee$zhd@kzsQ~)o;$*4;%oORr zIBvO|Et6DKv~0&@&$<=RG#IA3U&~VQ;y9el)IC?J3~yemfj>@V*Pd;)bOM)$i)(k_ z1HA~exd{3}uaz=Sc$@nvmoIHmLl?Rg-{aKfp4Rw#SGtOAj-%~h$AoWMd@oVEQp^Hw zvex3hRP9M7>*aRf;!s&3Ve?F+m4@hWn9rXFiM}sn1Ig4RqqaF5{ zW><w#HkAf!Dh*iXSZX(& zem_DO*Vq)9zv85UxKC8KfaHLI$QQ`VX|;2cIQOkOXDIGasa}`*yDs&2UFvUg^jKSy z+Ois3YQ?Kl#j8`rt5d~E7}{MFg5E9KVM-(w9|H+Xio>Du4|#P^D@W+W*i#grrl7`K z*rv0msl7$i-kpAApUH-gC>z{$Lt!{-$)wU4+q|UWCOFu z{C<^@L$W9Y*p?`S-Eda#Op7|nRM3kvEuwpq;it|V2)|7?|>y3h>)dK<_iHS7arKo!B#< zG0*FmznJ&mJMo1W<2!YHB!<81m}4t;V$XcWJg;N^VoomoRMQXfCEnlAyfNQ7o6{TP zXH`a$9U+lq2S+3!)1vh)-U`w2J-HV*{!$$)QytFJM>#T59h2xFbnzG{)iIgsNFHlB zqDghkU##*v<}+3;g_|sMJ1u2OqF#=Sc?)n(+=zKMC~nRvRvxs>o3lA_BRV@MZqCle zwWe}}88vgfVA-4%SL_EJ1HsI)v~Pv_La8g4f7H$uwYR3E;;$ds5AoNJ?aH5RDv|E7 z7MF25f82A61d$Pr=b;4?=p0&%b-6f1QY%=dOn-#1uk5-+811raY|qp7Jxmo3ko4M{m#9cZ^mWQJ@pK(BhE-^gF-Unt}r}X5*XJzL$~K5% z^i3%aY~UDpW^EMz{??-eV(Bt;gBdFmKG*3Gh{@p!pf4_jHM-7Z=pXl3E@y)w71irV z+iR_Wrok{-zT&tCPUhsEt5k+JuhqaGr?Oj};~p*#7uW8<2YL}`a}o4|-U(c1p70J{ zQZ8TGqDtCu7rGVS1?qB7YY1G$HpkI+uw%kEt&{M%$y$rMvfArQ*2V3>MP?LNR!G?V z;~sVuXS$&`;~Q(i_o@20C+>mSqz)~vgMQ%3#|+Lv`1oslZlUOW?v{_2I?L=r9KxLA zkGf@M?M+*>k8DV>SVq|F3~6@ov$H-k?8tUL`p>~m%y5eTbH>w4*)4O%69WBrK>gWo ziabrnV;BxE`NVl7@d0W+I~_@UV2e*qN7AGQCm*$nBs}F4&XL4NcKKjuB-wFXKJ?Yp z`=;GkU59om4eeAK+Nm_OQ)y_Y($G$&q0Jl-#V?;x3cRyU1&X!3t5JJYb>mo_s!fhL z>W*U`J-XIH@#2cKE{)2%G%D-TsAM;(P+RL*o$6Sf>R6rXNSteTkq&xyaEB?8H285Q zq|q_)@s8r(DOB-hw)yO7YP%m+#<_Q&9@#zHo$g(KNz{Z|vZFM{HZQ4l0+glkCgEtc z2?3aS!aYLaAd})lCB^5}No8&5ufukp*x8_^PTsH~PIUi7A%~wjGbQ{sv*5Ss1-;+4 z{xWuAPh5Lo3MwPXYKtVR$XT?YC)F{JV+AD<(L;>!ojN`eLmVZ>@KYxM@Y^WBZ$kmS z&$M2VLmBMEp81SDb9rG7+a_J<^C&ZU{ zzgY9eeCKRVZ;YQ+8A*18M3NmGk%UZ(*0*@qL&vFdFFv=VI##ASlE)wpOi~?_=pc0Q z;3w5Fnd(R$aPvCmFIIUS^BJpGnJjZVEoDlgUXF};3vf={htQLJKC)Iqn$ia&d^HRrn!+bQ#ux87mV$*PIZD$t?MFK5j<&0>&}4trH?5QKxyf3Kdw{-d z&zXkXfs5#LWrc*zKPYN?LvO}6)`IUydEf^mw73rXd_1t1gMxz`zE(8e$tdznQhdK8 zo8)|BWI^NLgD*=fXzZNxQv&)H;fg0_OCD$^PHm?OmT9kTxTN>8Tj>k z`Bs| z`g-9%tiNWMVr|LlRLAO6M{-0$$obOif3G{8;;xn2b*Z=OQg1UGTxh7ptxmPt2&>QJeguW>HRWUtCZbNmg4V*>sI0WLmVo#hZe4E0WN$yF81_?^H*!kzcHD zixf_EOr|;}(LvPWiAt)&IX=nhsOG3S$13l~e8y^Ab3Iw+c3R4mM7{i?!+8sEPTa_O zR}9kBx)D>X_zi4N+=$tf6E~u>IdLO88v?n(Oqtn!w`|TTCH6vfd;&Wwx4uQ(%LW#4 z)GqE{#8DYsb=h_GY}R7!%Fi7tMegTmxJ)?m$0SQMaWKg^5eY4rKxda}tjonAlHzv` z8FiU{3gJm|@G4=n%dS^FQ)ABCma_?PY~Cl~X_f9tinTebQgfVR5{v2e+P`MMKa2VJ$rF$oNVL_x)t9Q>T*wO2wcTB$I*7MW5PGBlkmC8 zT8sOGVaFs~WZT}A6@q2+j!BBeJzMzvgjexspun&z?W|94u z&v+HNE^P?8f5Sy!Dt@D_)-}UY{ynpDIocF56ucg5F}uVM-*8 z_kD?72uM4#it>7jw~5qS{513J^_s}}40NyQXbu;LRajEz)_SYKIpZB~5`|V9&p}60 zk)6vF1=2C5JjGqBm<`F3GQ(kAt|r}ZdhhG%sFOtnz4*F1!o5Lh4L^0#9)9YiUhkKD zZ1N$*b_haj8uo;g_WS?i71qa*hT$00H$<_mcowjxMK)_yBw2&}rwD9Ity$O+DZMPy zQBRi=ZR!MxXcH;A6n^SN5Ps@}apAhS@W^_F^;@Eghuc$iG69MMnIf@@q%`A3oQYXF zfdBvWd*PAwYD@O}aC?5&{mwM=NKN64YsM|(`BXDyzgcy1lJRU45;^FxX!Z7uL{vtS z)fP!sQ6yQ})KeWRty^)~fsS`nYpzwPj^xaZ6T(!-WU6Bl9Yigjbf!A=SdaDbxF!~h z*&@|Z-I(SatGtf+jMd{!mbsmlG9^(jNA$b}I45r8yer0dYTbw_R-PrM;zrD-oVXF4 z&50Y)*$|k4n4{w!k@rJUmd#n{j_I5e)+?%&?RtuTFH&zwPvx66U;5COJbA?Ze(@h% z33DyxGLGfXEMKQ7i0Q)dEwo?);}X7}GRo3emy1IrwSr~JL_CC(o#cavt+J+`Bc`bX%XYKD1lMd~m z6?Ba2bh!vROx+1Cg08~icoK%bVPTDR6ch)Ov^={S81X!d%AeQWY2BQtTEgTTw*&>C zV@GkW$mM}&f9r#*qQCX*{9n2Zt)ORI!sJ>J0&)1ODrm_(6v9Qi$mHgqL|QIq$0ZdN z&GJqn(bN|j+n2>LBy3EuJtwIQY1?vIx@V9LWVJgmfu7@SE`m;{bpjU|`rZ?K%H@b! zREZiAg>J?4&+1@LUkF^qD#yQeuvzaI)k&D#q^xCKSbY~$atCS$E)Hy-*(3=GTkg=Y zdW4T6IKbibE6(N=SuOPrn0Q{kL+i*MkrPDL-#wyx5Z3(6i<2FW7MHbkEf z7|f}JwHdlK8@ja|x-}lUJ+p&uU4U-A==Kq2+Yg(xyy=DKsp^zB^>>c-rpJlNPuC%$ z8{OJ2tja|eEAY@70^QmI-I@cvFEG7gamV-n4%cwWS;)a1lBqX4$Rn|7ec(~06~_zX z^l>h=%g%qQ`15icWrw{G(sv;G}p_ z<@%5mDP+L-J~dz$kve_Jt|Ku!OxXcH^>uyp-a(Y=FX^M75)Y`TlWoYSPQna1L&9`S zj9H0wNa;&w1o|twIuNzAM#)>mP=D?FW-!ask@*W|PF@wxx^vc1q;s4^uWt#(-XUg^ zo}?djdXID>AxKa7ssB#WwLFKwbW9vQU32nAj4=5dzZh1N0MR5`)IZcWNfd*7w$VLT zlK|aLFVzp{HrkQztH*!V(Rsw3U;lA?AJ#5gP4 z_{GDBRPiK=Yv0^DAe_h<=6eQB5KBEGR+|LH<&vWUbe(K3N6x zL#ornbrcWkU+qyDzG6$)*oo>^<-|Y8_Bg~by+$tLY4#8zoMEO#=PnppFoBNFu_lsQ z6%&(7e7g6JeL}cHUahGj>phj!9@nL`Ws}u$*`BsPt%FOkHe^-0*iNV9Hljc$W}(GO z9(1B#Op08tZD`_>*RuCaVeNMxG{!n;1s&r$T`qzSQ+I-kpsTPrgn(iAIH~)(EER_k zwEV4lE~?mZno6rmqGYU0R4rlhjaz~O&~dalM&RmswAe+Xf@oQLaFRIlf37q)v9cWD*v%L^^8 zgBiYFVzRmYlWM3=@-+L`)f2=dhY5eLb*^(KDwbLZ`)Vz$LCeApS`0U%XK~+ui$-p_ z)yOSF_&0giXKSBLy<)p!nmD4o<_&^Y#Wh4!2)~p*L}h~CZ^PZ{I-(!9_IsZuyHowg zxPnf)-=(yNevj(Q>Q5;3K!2Zk0kG)KYf8JY>C(!8H59R_Tr6PLyDA&jQ2dBUB_p-Z)QlAIqhzFpE3L4GB3_k@ z*0GXW$F(fAyZ8b+3rLSiC)l;T{_iTRsYoR}(OinRDR`w})Rrp%y%po{UHHh|OR85Q z;sIjqZCNS~262bHFP*f_{XQ9Y`XmW=t4(OZ7+PydQD3JpGic(nhvLht^v&cv3?wx> z_4EtL2%S2aION#QijEuhR%#?!f!^{jbsE`ZT3^!PNf~)GGA1JIxBkgZYbpOSC)}|-|vRoIieHFWDabn88I8v*Dx3easNx)XQ=<~SEM z=A76DSzwfT#E1KLLuAI{#+*wUb8fv1tzb_3(#D+28f)Kfd$fYNcs|hM{Om`aC~f~B zWPv%4cJw%(aIGWrLEZ>(`)KD~&3~$g8a52wQzFxF= zGT5F zXUx{#F+2CO6&i%lsrP+0(+)~l?ZAX?2Pbqp7@^w%3Ed9L$MsD&yrt56#Q?#)D&oF( zFt(WQ>II`7fAQ0pbG$KUcLJ?oPWz(9oD+?;hroMeGkq6ydkju6XXkihnG=mUNnCn& zKkZ$_4*lg>e5*8>+#}NEeP1B`%d>1k^*C)d<#ffnkku4PR!>3Fs-3S|jHISIih*mD z6f~=)M>A@?LEjEUZ2Kci8=PIJ!eQ8)7g&h3^Wal%Y8QeR_yw5KcJF@_|6%3dB*ndz zB++FX(@3%*El4J!POR!x&K6RP+S<9P)VZnDxvA8-%!?}_yhpM8zjaJ0)@s+LI@YE- z6j86|o+{~w)>?fza+^HRudVk12T>!h1mlnS>#lMe*|rQfU^o z$5gX$eDPxohRLR#yAx8ph=So{ap{h2?#F)IiG32*R+}J#F^D$viu!XDP%=L5@K9V| zCFFXqWe6~krrofi_XWrF0O5qT@KSQu_uV0=+wrH66)hR9|x8M;UkY zF~%YzMFv>ONR2TT87bm%$w-Yc78xnxf#2oDjj_Ej;r2z39&Q#bMc9k-eKFj{y>+az zoU9;=$qRZDI(53z&2C@x=;5xH0fou;&2UGvQ1~W)sY^5MkVbeAn}9gXsAtIn~FqAH z6IacswYVk`DA(Ga!1;{8t4%t%U6C^B1chEIa4d`#Hos+)2n}i_2S!eZ;x&-bT<}{q zaiN&HYprv3A%yeQpUkGtNm9y`xf@m=FGuK4B)sIOwnP7_kcwGx>7zV6oRk}7oJpW?Jw3*R-??Ved&`~=UxXASA3u{$;%p-9p!hf;k6LRuV)x&Jlr!3oY zHbE|iQV2-+OdV*7wK=P7x4AEtAPRJX7g{XO!Cdg_g*;}=#QhKP5LjEST5)J zJ{8sL$bl}4l~zF0V3;gl@nQ*_%+@_usSIyktARgGW!{S=Tplj2-GLAEBGBd{=m)(M zxXw8Do>WsVU)rKd+He=T72h73vpuaLa24AeN87=U3E#9%!sjMyE$&5{G@NO;9k|H; zwJR$mZ0?IC$~N|ppI+U?R#9Y$71L((S$FDQe2~5-v`1u>z`GtjqD|vs<%E{!S6Wu` zkxK+RL1q*CS!(69p7pnuTI;Bz<3*K)lN5PuuS)7x7{CSizXQS8X~(kyDni`tufwqTe@yR-Dj;!cLe-ZF-hvZXt=xgY!Z9jHm{ zld#6`K%JSX$iUZ#jf{_zJjFNblKWU+w1t5*WqXGnAibbdC%4?)w?c3#I@(~RMv@ii z-#bf*{EyLOR9|x8M;UkYF~%YzMFv>ONR2TT87X35$w-Yc78xlL62Hrf`&D~k!tIM5 zJ=}#n{)+N_G2G<_FqV@QWHEU`Z$hU|SGw8liyl4Pc5fM@?@QTTrpvoE+h93iCEScM z==nrOXoXmPoNv-HBJFI^=jQyW9*;HLOKL zZSvYwT(Z*1iK}MRT3nL|luO`tNbQ@?2z=P2gWDA;le@P(TG)!c<+Np!2o*h9wpwbnVi5WPi9l+VKQaP+zqw2jL5wP)`Zk1``3h2{Ph;k zioUv5W&4-pgKIk%aWKD!{4g_f`D#N8KG0D+7P!bX=nHFAe9RZ|x!B)B{)L=;f#MgB zwCuVxlIqE-F4>l)DYmh1U?~L&zpVpJu{LLwmFVsvBMNka7g{XCD-3VEaKm)%QCTV` zJUB38Hokf7drt<|5%+Ontb^_fT`N1?>iWQVu-XA0@u{e2nfDuOXc`QYO~7Ig8BXTqo~u-bH*YzO-96-H;@Tbf zKraGqE`om2cLLX6px}E?q$!s#ZBZp{ctpAt-`XKsL*Od5IgYl29TUE3orKR#)>_<2 z+7WT4;dbC6Gm0xK{-&p7z&30T`5@h6533j66i?G;NpENPy+@bn`D+X>4;)U0NhT7z*)a0qu zdP^U1s|!@t)Y06NF^h+FV&0`F}Y-tWQt5^IoxCjNzng>5gsg$9~+2eG=AI zn;?QQh=X}o*3Qo-|K(LeW;RV=AWhk^p$AAW=+wzAcSEf-%ZiTU$V%-SfC%(%&emxp zlTm%ig&$?y(Z?8zj1(DQB_lP)SY)J#$0Z{*##m&ehzEX`mwUQcj+ulNLlF<1I!E1;r2z39&Wo`jM4X{aF^-wH=1p*oGd208D-Faq#G4k z4xQv1L@T{7;s}l;D>IS?#niP_;7TlImWa1RiZ@hu;#H{*?Tys3i}hqXq#D+up*DGK zDlS>+if!y0SPYpEkkGDlPg1PSS!E@<+r@|io#2HQ%W%-Qt(frOz>L}W=C$uV8CXZ$ z$Az&Dx|Nhpa1Y+qjbb}cy6F%4J5Qxcx6hL2G2)pY#6QaLeY`L80 z_*7K1%-b$T(_okex;9pd?P55YmwT>K8Q#3*Ge*Co2<3CZ&ly#lpGGV0~eW5Tv;Ju z8@62>0{gY+C?@ePL6OB&ZyT7WW4__0N3`Ez(<9n1u;~$Pb{7jLRKLAaz4B1`j}|BP z#ckp|wQySYee;ghS=VWQWuFFS8E%qdt#&HaF_r4b>}eO8u9GL9se6v%4wd5dIJdkS zPU|TiQjsPp*8Q7(jKxXR|GHAYh9c_AnE(qGRqEDI+(D$0k=ny+Mv9nRGE&3pxGP2x z2mDE>-NkCq8C4#We8l$BPO0%fFN7UZzwFSduGtr$*i%n<@CyPsW zY;!;M<4)|8u(sL+5sX2sqh6OjB`O&oXAg>B(Ixl!zmNa}XkpO(Impr*2CQjGPX|R&r`clb-imHgTbtx@)a-b|FgB5{=o^+Z0TgGIzu3`!Ept zl7UqrwKw*!3aK<%aoMK_-;6j8q+mOt>k}7oJij@;kD1YYwV?$c=%^hFTx5Fmg|#X^ z=8^bh?Z3z9dy+eo6|5f8A~=9ip^=- zKU>`y740}p<*>Ss)3&sP$v1HcY@qLPvAqn(z!PMn`1iLSB@j!Op&QItnee$zhd@kL zDS*DX5T^sm7&G^Pz@NGX-UTAS0^!2vF&XV&G zq3cD(HG>x&q|Lxr$wO;5oEEx!w5ILgOzm^-u44_=jx|=>PB&Miu|S4xO_ju}C>||R=AeShuM*qhF!tx?lrc|2A3FSU!CS?h`vUk@W6t&%GQph8 zxIbI{9>VZn3b83O-q@OnO2bUQ;IXLUBo@whytg_RD^M`yoD0EnI^x=m#kHD<3mvQi zbJn#RbB;CU>;%ThBQ?+Dw09eGE}B3ln3K6Uk?F)cWy*yQ@xkRFdxtPihVNAIJvGS=#G=0GSta!=!q}4H4@ByOpS4z{ zvrp?wx(+%saPcIAxZ7(D-&PR~4O@;EhU_t6TZ>%!QUBknG^56Mq$yvgp}gV;*{si7 z_blJ0`^vD(zB^A_2L`X!9+ahGC)di00kcBf7=A;6hB+4j_~@hI7KLE>9p&wcX}I#5 zubJ{zB^Q22`9KVxQP#E6f+bNzbQpxz<{Yr*gg?^xA zmQ?IrX+M%;ztAJ6J)uXY15r|!OWZRmv6B>EP?1*R5t`zf-l}ny`GtySO2{FVut|iy zz9z|-K3JqcJ+z8pvZ9Y~GwI^~!f(S4vktMXgp>tyex*#o7B=Xi0UfOLeSE zb*xKc>Hg9T@-LAOahZ4Qhf4RRk@`G&(0{FDssAq)n^so5nOx9~lZKT8y>4BWiuuLL z=+|lG^k1{Giuwie3E$#I8X}IX`X@<&@yZ6;57X1`ARTm)wiCGc{wh_H-bk6^Gs4yB z_Y-uR2uqoKVOu4_V(08V(SK&A$flgW#I2X%E0~kGtg-Ht)7QOiCGjf7+2?yeL%z^P z4w&<$9pIpcn{!4RuT8tTGmb7f6JoW zo|Rp-7l`&#vv?$!n_Zo%{cugiLVRwT#=Eo!eV@7$TnBx=iOQbqhxXm#ijRzNpZgFp zJPaXNK37=HMvBZbTgFfW`Y{+w%xTIOXX5{}HG{TDQ2iAobdF~Gc$fB|Z&P=I>tN2N ziOQnkos;tACC=5$#Y?an^u1hC+5VZKSz5fWh$JWOG#)a;EL}!>(6?w=r3axozPVbo z&9+zq`kF!;OTb*ewF4YvLUYcg_2k^$Vx?5B%!v+dZe-xFTTIL2<}Q&~5dxFc zcZR0RSjC48F|^JStUZQ2Fw+ICuSy^2^R>pf4*Gh>1J`Hr1HONkes==`AwL&W?3&(V z8M7AH4wf@^P4zGc7ucBHi)FcuD zES6OvTq!wIV(f-jhChujUQANE;$jx>Er=$&83rhTnH zq1JIw+ z88R4c#i&onQPTpW!1y|hKu=w7n0|Hsm;?=*(oelwsJm4iCm`s!88gv7VoN zaqT$kwaS^Qqb3ilq#Qx^yCLi}4e9Wj8wXCW zNvWyHK57Uken!w>e^wCc$v6#FmpID{F8j7ciRIpJ#vu5+676S5KB^`_6 z4e7BDdu7=D4I&`Dx?n|D7p#KfGh!humz*<%t?)c6I+Co=NV4MEuDHTkE6)o+L2D{; zA4lRoNl4papN0}gcDcB#rxBcs%UgNI!YU|U84pPxjEChDk4^(5vK20}746kuZn8YC)tE%yUJb&dDZW#riSU{* zap1$B@UUqpch$@ke~~alIk*~*M^j{^){>#r1NCV9d}ZC9BDdVSz4f+SVp)IhSI)8b z?0x7%BaZNRRk=(8B7v@qbeoN zTCb2r2zRO&kD!Q*_}U6_#6ZW8Lf8`|q;5Hxw#*n3w3>XZFz&rjyr?3rr+9HiYAo(Z z#9dIeT}u&h#fvxTJ2FL#<7eK~3^DP8xErURDB=Oa;zj3)`0RD2>Sq~x5v9SuRpK~W z7|%@x8OmD9wu%%qWHFoq;LA~H-r6)5HRS!Dua%r_&7<3 zX?iGSY`|CQ7*g<3#*$Vzjv_|Ue88qyX?MOLJHXi3GR0S2#q_J%Xv-AFxd?g>qGJh{ z`zX+EZKGHahq&4P*geM*rJ>b^bFaSWS-x(HcWh+Oo?7tt7u(MaDevRXsf__UJtUSaGsikn1==B}muu&UdhPw{aT={Sn7 z6sc%Q`J#&L`4_AB=9=^t6|WQ)EicvS2uD2y%{oxrSBpPv{;q89eZ>Wp|EheO9G85I zkNfJWHOYFQT`EbmlsAks4?+>r<8}zeTG4T-qGWvZ6!o6Z2{@vQEs?5R#cf4fMbwRj zQ#VXgsdcYNb+3?56%^@>h{8 z&$ruamt>g4BS5iYgxBA-qRrY}-2W*hEiQT}9xOreH4}=5iBzu6sFD1Uaifxrq;p4q5q8A}Du{1|7r#)~PweNTabS%E#G71<+}3j6;rr$EFmm1*%VchVom#5_>-?-_b~c`1WH!%wvF z1A4#4{xW)BSB302if^bw_Qq>AV8@BFBi?5yo-9(#ziT%1%Pz`=cTIKJwjsPjCcIl1 z=dA8ICEIk$UGu-I@@A4EBAjnLEG2zZZ9Z1FrFct4T5UVk5m$ETyooE)9bp*K4o{r6 zmJ~D_lX9rk?Qv*voPbUV-Gg`O?mi9elX7xVI6a(le7pbwrQPB}d=~)4r)j7_x5|t9 zO%jTXR+yqW85&7m%S)A2ZxO~jJ;lGRNXh!Ti2Z0S+f_Tn=L2dr^oJ88FPD*3I3avU z7lwt6aD7YRD@?&GW&+Q=B9OLijIV9#hTJmw@-3Gu>Poo?v&-~D`F5u;cCgKZRyazM z&Z&Y{JmTN5!E%NG$7{to(^LWfdE$@iDI&rf+a0EQ(D~d*D`ASW@X|kuv$_2%r;7N~ zeKKNSVSJ0}nhn$R^v?&V)P7SLzJg+Vd8wdm?kTg1;`@XLmpQ!`arzBzS6sxzQJFINDr@_PsSIO;0`ycSOk>gd>3-A8ayj+xw!Ey}^VAaK zJ@WrlYqUSs1R7J1BDeD%ANO)*%pS3nWB*6bGnbcFipLGgPp{Ohv=W`XJzm5lw;i*h^uYplgKj^`lDOWGO2 znPxTkPZxhoEsBV6X0=ksd1`Zh-In6z6=^jgW(DtvYxU&Ko46u`2*Z%#=437LZdgUz z;*a(?)PKi@_WI>L)==HCA)u5yHi}Qsz=B>jAN>Df2}MTSPN#Vt!S#b}%Gz1VtBqC4 zO;Y^pisUYUMr+v)v_pJ8pm1VPe>kC?Dj3D@@HjV$mJ_f2~-tt>aDBwsr2it#Ci1wGoqRMW|#wKr;OmNCS=S zO4L9w`h70Swe`a^hA{#GIzF}&CVv4k#=RPJIx{3p@rJqGzRp6mUG1DbrdqDAvR;lg zVd^wt8ehal9T-n*th#O!rbSKE6?X~zVqd9v@whe30)sibq*_bLy9^T80gNV7L}=_! zE79p<#6Lr!KBfx!B*il;(s~3QrMpoa#}u*JnSMGbsT{}FKC{`kj5bn4Thv049n8pO zH!vON#wvxl=f|{szpAiuNL;aMJ`LwJc+N2?#9uWhrKl(P?d=Nyy45aY(kF)74h z8^W5##B19uCQ)fP*xZuF z+QwrQuNVMgQ(=cY{Qugoq&au^_83f{!_*4rO2P$|q-2GcGI@7{nyWtiO=8jNpl^uf zoyw>ol4>R_?sl??Bom-U)w+V9|wa4HDbDZM=j{26LtXGTm0&Jt+-^Uvp zU);#&s%=pvVTfDB$1&%sXrhuZMA71TP_$JOMWI7xfVqBeH|88`%-L$cLf_p8W`2Jn ze)d71-T6xO0iRVAzadio^BFYWR5d=5Vt-@*(@xiqs<}Ahp)u;>%e@qbYrf=Jv330W zYvJX>D~09N`u`7${xRVvgnysm-}~UQ?S8_?3;$U7WZ}WWTGL>=E!F-@vg_-@Z(--N zt)*rC5t;xWC1jH2W_g+DEP$+(g=+s=>3x$Bd#wH(AN(`fZ`;qrpOeed5Q3?GvJy-O8yrLch(2$ za(Jt*e^+?rVP*RqKJQP;>n{_2Z3evP@RI)u;o;i5%i;a?@vDaj|3IJ0&*3k0eN3M! z*-oGE&f#}-{U^d_=~*L(57t)wUcwbemHy;#_ci79#|Y1x0jG{G`S}?=eqo58Rk|+O za4-4iE1%9&K7CGjtMCft&$YsD%Kz^PH$AtsW1Eb9GvzhcZGLZDAbXy#arV2`_fqM7 zh49aWuNGb>yk2;NaJk|zYCOu`ssG3Sne4%?x@B29Pj+1(e3S4Jq23d6 z-+vU|V$kyMtLt|Dc&X^G5?&%?KKMjtetud;-idRRhg|SEl4t$l`2>9%#dmIoAO9?0 zjz2e#=AMI~+vg!W?_Qw!_fp|y!mIThbffSKnwP&UWS#tQt(OlKJ}=IN9;IS@i!B?kYKU zKCtrsE+g+pbCic%@Mg)g`oAkW`ns}vvC#74pXJN(=jPGez7ur2|77Rgn>7Faqi~b( zcI`jyr~N19W%f1fylnkLe-5uwTypl!RsRPUSpGGV{~6(Dg`0%`BHTgo+)21=!}!{G zHr0n-+x{D}=R3k52>(-P`Ez{p<p6wzmW^5$=k~wGdBuvGW~1&(~g&!@|NrN{D}OWtNwr2xLhdt-;?}b2!ADP z>wYjUJXm;$@NnTw{@Hjo)rVf&ewp-cFWgzUtI+c2_~y%>QRRPF`3UCn)B5vB`MF$3 z{7;p&pZAG=o$#x|ndXBtGxDyQ zqdeq-e=d2}pDRR1-;JvOeTEe>J?fsbh zZ|;7xQ2yZ8=aqk(gin>-ZxUL2-=_Ow&fZ*nEj<_Sx$0k|`{qK)f3D=eMEEk{8NxG# ze<%Eu@C!ogpB*1f<)g>8e~t9KPI#g4BBABa@r`Q#sPbN`d;`fpYwzXq<3mE?`5d)B zOt?;Xu90iyZRz$Ol^?lyTYKRL$*+IN^e=qKhmU-;@=sE{ULicgpyj_qdhK}IOY_P8 z!ZkuWpICYKWaRBRM|sEvTast>^F9>%mW%J83_t!^z8rsU-ks3bLr4%a316FX>bB^q} z)cF5SwSSlJBf{H^Tq}?GZK?K`%@AK}FZ>|+@Bzum@smIBkw2FI?-j3q5^geR`EQY4 zJ06y)-7dl>3uhY7w`JrlnWH@9g5S-=i}z*G_b};wq|o}0f0i%D&%R&oJzVJaecb)@ zKJE$HPvSnoIXQ8)_8~8a+v|N_t8ecS^*v;s5+kyli}$ z%14iFzkZJTo3{Ue`W>YItvx5pp4S-vU#|AA6rLyi8za}sBYs<|{rNM**V+p|NIqO9 zIXQmv2R`!0@?WfYy;V4E(DHvmdhPi7t>{ZM&K@P4X*_>EBk!I$%0n*rFOp~d;qw9L z+fH`xAhi7WXZdpc%yYTV9YD9AKOj%^jL-?aVX6n`-1&%0&M^~V3Z zl}Eo4E|$D~j9lv<@!L}EZ=4~%)?WBQ^5IL8ljA3U;3I!5|AQ2-hYBBN(DMI6dhPgn zJmXsU0^v;KyCXg;@3C`~hg@)L$+Q0O`6%=qD!$1K|3RW#zAV4unfu%obo=?M)AaeP z_v&+2zZP=te2>nbtv|@i;gPb(>N|Nvee32~-m#K*yznI9tA!hcUl9JY@HU~1myJ(T z`RK9jFQ23SrtN>M{s!rPYtP@vo*Rw-f2;N%5#A*Hk&$cV5x*_f{-zn?Ywd*}Bp<#i zIXQmv2R`!0^8c&ib(`>ZgO>l-(rd@plQh1bEj&Ru(|F!Wd{*8Q<|q%j;I@)y{o(VV z=vyJaRT=(gi*EUH{LFK+{k$o8%D7me`H*>Wx6J1UX41p;3*^UzLi>4FtN+l3`Y)Yl zd2f-tw+r7T{D|;6;e_&biEsxY@xy-G|4rqi$F~2<9Q8MCzg+PLbN+l!_WT-D96D;h zNVtRWDMqgKkN9n=_CfaN`q$bEKS(}oCpkHO%!=vv4@x3v_f1c=;FUNnh;t3w7`pbo<3tuI) z_MQ9Wl5(ZNrTX>NZG{gM?j)Qv_+r&h5&l4!)AxGmdxP*z!Yhm&*fm>wj+rNW&~I_J z_7Jbx#s|GWR9w!NU!QFlU)p~}?LRJTD$n*0?dIyg^&^)ro7R>Y^hM!j;i7dVJ%=CE z^^Xe=ohtcrII+IG{y^d9XTbB1E%`4It~#!)=kWf|E3ZF9_*XOF)1P1RPYUmy0YCME zlK&RrS6^7xbNH?omDm4P_~{py^&DP&e0lxt!n$P$|C+G%lCqw|FY5ZYg!`RT z^5<~bOUvs!3lBZHte-AC;gqs&|DUgX+E4lPXTtM^lggXph5zfs(t=Bc8(&t|ZxCAh za`|lOHox~?AbYmc`1xJyd$9CAO!x=FM+;97o+La)xLG)AJZyj9|4jB^mu^VUA zG~qLZmOsb$yYlyN+4Tb9w#qBwY4h9KztH^Hm5Yb92R_i|i{-ypes|>eB7>Iy0`c4N z@oCZjS@?Y+i|=b0e*Ck1IezBN+)Crule<`hkA2@xY!O9-f(ZXR`O|d9rWK7S{h<=|5h0 zlJHdFD}=T`ZxH>{!kdNMH=D-C_E%GTu*xkUJ8q2x6>Tu zAs76W^jLp*K0)8!(z~zF^5dW7%kk&t(b+x^*?IQ>Js&+&c!=;=JqMjCJX`bfYlW-(AP{DR_SpWm##`!A4vU)jR?zb5@Z6y7QPh43EX0mqh^v|M@xW|C{Q^j=E)8Y9CwTV&VOS_ZOB|>;H3n3+3M~vSUBt4$7lkylP9!(n9lNSI&QH z4}74_2g`r&u_a}Z{9bI(@*gLDJ07nP{R6@;285{ef7MA~XZdpcxp{Q9 z_h0giyw07AL$~MSmzn!9^R~SobN|iVZx+fQ{Cbu0Z=>)hI^Tbq?6>y*iSCOzduNI> z*X{cU#Cxv#C-gqULdjnu`8x|ABHUBBm+)o6mkZApTL0|$Xeu8)w*BLz=Sjll!lw%@ ze~xcd`$v`cc;y>N{#knulON9$63<^LZ+|OXDtY@FxmMnmZvSHWk&Cyr7k-fZIy2M1 z@F5>Q^3lrQMe%x=a8HAl|1#;dy3<@j^+?zM^!dH)>6o##33AJ*QSyuXyZncBm5RYt$nH(P%E`ncqOT6l}l z%Fpp}pUt7Y|1z$LBllzOx7^39{+xVEpD%s?rZ~)(yzffhkAy!Fwlptxge!%sh0hb( zc-iz{s`oh~JiKf5;5+wf4df zk`GgoljA3U;3I!5|0@-*vxFNBTK)~vYsbSUMgK?Pe+p+B&lhCmeSD7ckPBWSdDb7^ zmqp)~#CL0kAO9?0j-P$M+6{V?`y(u3hxyDM7Uh`K3#Zt#(o>0rt;Ba+uu)mM%CZ6{gnD0r2nlwyUL!&8UJ@z z`-cnn6RtIKtvvE=OSRvBhWJ{0;RnfwLnJ51PyfM3{#gEl6t6>tha0r~FO^<9zTPkT z$Aw=P&NQB1m67+JIm$yW_!`Nx{_yz#^nF@D$yFcL{gY z`3d`M)}NX5aQ$BS(bnf3?B^e>{vS8gKQYhpmPp=q!X1Qr3ilHJnef%Z^M%9@`)$0M z%14iFfAAdjH*Np#ia(h1=P228lJWoDYX4s0$AsTDa;<;FZ%eg5b%yv_d*KJkhjS$- z$4~yiNB&s;&naI2Ec}u|%l|Ivwd3o1qTeNafaJ_HzOTv1`?opDLoRr|sf<@iO8 z|BE^5Z`yvd;t%HhxfH)awYx_6QQ^(PJB7%#{t>?|)&A-k;%n`NA0!_>Ejc-U@&`Wh z$MSz!@%pCl+XgNFe@L$#Uwf+Ee!^8k#@8n^Z*W%V0`=D>D>-!52&G5swz2Q4Xb>i^9GVN)% zv#!&QIMR-Ov+>z{KuOv7G~s?FUMjTwk9kT-d9m;%!dD6plbja{bM60F{7(};LumOA z$ndQY9eryu`rtc8{P5-Mn=OAX4)2gYaEAj+&%j5h4xXbr*j`@pzl?qX)h`<}{j<^N zMgNaQUux{w()Ju^{Ko&yqL1qTh4OdW#G{OA{eO;+xPkAMoVsOM`l#r)2xn_A?ZJ=D zQQl2+Y>yx0%cbfE`osDU|F@(Ue()``Y=7+x{6~=&!n1|sjkWJ(89w;I*NV@!f3WC` zQ*d9=iA#IAZCTdgxA>@xzQvMrf8lPzmmB<{>N&jMQ$z0$JW+M$Cf`sKoz`U`&W{8_fYc$WOwMgO0rI1uZEB7Siv=ypZFAAAFhQ*!EA5U*t(weB?R!V9|*a`EKRGZ}9;c{j4Lee0nK~ zb>w?9>y`Iv-T|*w{U+f*2{#F?eNEeaS?#|myj}Qx;bPfq<$qAutzDlJ9lOAB$pNiA z^npK-oV$dU{|l05@mHefaGQflN#I_pgO5{vQh2n%^HsO@d_{V{Cj6H0d%`<~*k#*8 z&!OeVev3QFzTJfQW!vM&O!$_JzcaP7`o68_usei57C!dnCFk*{mG~y1ZGS)U?QHm8 zo8d?9l=zp64?R{s{0E7Dg^>0+{sYAaHf{f6wX=AZ=vLopqMt52Q#fDzIk`Fc(9f40 z;6JHu?dQ7156+VRGVxCf-#J73*X#Nlg#SCkpW|z)@AtJm{>~Jyoc&GP!$-X67trPd zeAcd9K9E7pWiaXYfIp z{t3@0GvefDmdHBj*35cnAMqa`TrNCBX!Gk7(a#XRI)j&p4*sp`;CEEd;m>t_x%^ly ze6G;?_jmFyhdKXe>rYNUaUL~Z7sy}Rp7??96yN*rACUy9gTbN`>L;cg4I;kmpy(E z_fY*Hp=~u={@wO3B``m+Z;t(^X#V;$;YQ(K2rm@ARd|K)-NLJd9~0gn{0HGC;n#$> z3x6owEWBG7o>khrNVtt~7vXNgJ%xJcKkML^Y=Y*St%bs1f-$rW^J$|)ru=aHNA zyv6g??aIGnGWl1w&i_9aTH&g8dVMUk!)IOkSlH~+JK>6sCucmYS?bZd;i_Fc`l4|B zgFX60IDS`;zEuc&c=W{~?CsH)gxy^F)?w3QJ^rQPc9*^^FxL88jfMM#i=DO7SlA|n zjs2Wuc`R%j_WpB^e*f^<*Ld{p!q%_z=nn`N{*6a}V7S7iZy&C5={tn)yYwBytsn5@ zKPZH&J^D`J_z!yYox?_#?j%EOJ{Fv0=+T{I=+PgND(%sC%g`U1q3@odr^z4hBDj(;TnyF_O$IkPZ4{_PmCu=BH$BXZa?7Ce6+8|j7oW6fN&ujm&V`m;pe zWa#T6z0gaZ#A@&Fq#ixZ=Zhp~8{G@V9;SQz>A1+*|H&Tz{lkPazC3^bG4cmz{C=w^ zC(Zx=6#W$CKm9%t=hL<`Z!GK4(|POxk=_dFIDK%BzEk*9GrsQE$jV#Ms)6#QB&{lHqlvAEE4_EV};&jp?Y@g#W94ZM0z2gd!&wB z!;zwMZ$u+o1Ms;lqI$j=hNAwyruzGYlB=ii!vrgU zlb*cRWqrP*;O#S##!#d`oWbWsh0kkv=5sqHep$ah0XX&RCAEIINAbTe#m9IxC7`3= zn}Cy^yr%x5g8yI!|Cdw#w`*4(mWX#K{NK;OufMNcPbY=LKb1WnS9~4@ob-7~jU(8m z<+lo-zx~~l)|%`$meenGWvJyum(Ks23 z#OivEFuS@PdOcvBJ|_{41uqOQjzv{tK>w%cKl0MCiEEob1yi4!$a7Mr73u|B=Fxw5<_ zL_F?_Rz`s5b|Wj=({1WUCs|t*!VTTPO*A`7;$+$cW&BE?oDzOcP8aAHkBIWh3U zuH}hd5=ODG#=Dvw`XkRtoSwc|nK#OjL>~f#6-CybaDpV-)B2I+JEAxC{XLLm-UO+V zLlxrG*7CY3%#EfHKt8>3R+wiL@Jw@yM8=^QT0zfqwC4SK4}@Fgx>50-}=niDSd?t$xR9^-Z%`H795&6}9;JyqwLz znJLG}$T{RrB$>LhWbWF|C~?D}S$UUYC*}f6s;51oNx5)pv}o4XYgWhHTJ7N9dmJ@c zF&_4=MPrG9eaJZ&k*a<|i<8K6f;?GBS5XmqKcLY=(ARIYS6h`gUL%x)T5w=sqn93N8t*9rgE+Ryp zd1@fp8abW=*DJJJaqJ-M^exvb+N#u>j7=Q(hbCx_w4G&(+rdQMBf|7LvxbTxV#1%qZkr6EV0`6Q*e_ zh;?X@PI%-NnpmyRr7h-U10y#G#+Z-0Kodb2`AD8F$SsQFZJ>2dfJbw*xt&J7)lPE4 zPWDDl%QhF0Zg6Au1*>y43IS0u>-EI{Ev`HyTdBa6#PuS2daWEi8Eo(Ix zt=}$~4Mr0W^t|@=A&>4Ph#_O1P1A)NoWMY0#*DSP z?!=%Z9lKZpBV`JjnCL(9Sk`y`9}Xc`cCwRogn%?~RQq0tq(B~6nx&7k=F6`wOW7T7 zcAM3M0n#}7q}v!#mWfIwUb6$&lNfBl%U(Gsi*6VT_`OqaVOxQv(;+V?%qlq(%guXC zdt_JD6f(jkOd*vYdNd`&ZIKIfm~}{4emE(tX=jY~@zn9nLI^zm0OX^2ZoJF3TeGJK zZ(n58qs4qWRdWQRa3_=J5S?4w>*8Qriai@`O1EhPuZgq4jvMsEmJ^SCGG3#BT^ROz z6sVDQDf=2mRmMLAOE(y_N}?nZIzlCll1!N9mY>f=dX@IKq_0|e+n2^}R9IV+%%$m~ zpK@AmkY~-^&hdJdy`L6CY9oEK`*&AOB>5B$Hr3QZd~))^Nh*`#47e%;}B?ptphn z4ye>CC1`%Oy4}c`@SBGdGk%6m`4Rbv9~GzUspY$W#>uyKGk*AdnHIhu!}XkkbA`_~ zxuWAOOV9Ucxc+z!o4%#z%Q9a7b@gh<^7+0E*H_dzF4Gm|uYw+*CZ_uDD@3kePz3m_ zhwsRm#`OFhioVa_)8~|)@9%Kk#(8$OD0QZw2k~?@qd(vC;reF^llA9*ay>-*e>3!a zABgKzY2c9l#Dn$6V_J6bm*7)=>Z%CeANsBm%<`F@`~R?_UrD*)&t2+4)o+n;P>7zs zAr$Gq4j9?wCj8P;$=Bpx*}{4->}z<)((`>KuCH*%6ak;FbN$T%J>P5Mn(^}_p4IZ-eeBX;}{{GAO1$DGP zqXD1H(m6~xd@t;=C+SV2n`pVh^11#yT4{vzyVIx#)pGkZqS zpJYN+GaZ9(0x``Ursw-;Zz%e=REKH)Q(=1U*S7)7%IAA)fB0UCuHB>P<=X*0^SKS} zS^DYywtY!4Jc3@{n>x^+@4*e9lJu-+Q9i$a1pG-C?mypy+Ew)Jg8VrqRP|kGCzMJ7 z&h+;d;LMKePbvBWn)dWl5>lK$Ovm{5&7l9?(-PxRy3ms;MS8jyR)i7ygKtZ^|9mJz zjK?DG*%|crzhBPOTP|Ij-cs~M`-v}0`ZwRsOXN4MeEB+ZQM}pxlk$GJfp$oc~BleuMx(**7Hp Zj|v}ai}LaLx0HUpDe1pjASk?Q{{x7i*D3%2 literal 0 HcmV?d00001 diff --git a/qa/L0_unittest/test.sh b/qa/L0_unittest/test.sh index 121555a3d2..7e2020d4c4 100644 --- a/qa/L0_unittest/test.sh +++ b/qa/L0_unittest/test.sh @@ -6,5 +6,6 @@ set -e : ${TE_PATH:=/opt/transformerengine} -pip install pytest==6.2.5 +pip install pytest==6.2.5 onnxruntime pytest -v -s $TE_PATH/tests/test_transformerengine.py +pytest -v -s $TE_PATH/tests/test_onnx_export.py diff --git a/setup.py b/setup.py index 2145025789..5f824c63e2 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ from setuptools.command.build_ext import build_ext from distutils.version import LooseVersion from distutils.file_util import copy_file -from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME path = os.path.dirname(os.path.realpath(__file__)) @@ -85,6 +85,7 @@ def make_abs_path(l): pytorch_sources = [ "transformer_engine/pytorch/csrc/extensions.cu", "transformer_engine/pytorch/csrc/common.cu", + "transformer_engine/pytorch/csrc/ts_fp8_op.cpp", ] pytorch_sources = make_abs_path(pytorch_sources) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py new file mode 100644 index 0000000000..15c309ccdb --- /dev/null +++ b/tests/test_onnx_export.py @@ -0,0 +1,870 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# See LICENSE for license information. + +""" +This file contains tests for exporting TransformerEngine models to ONNX. +""" + +import os +import pytest +import warnings +import numpy as np +import math +import onnxruntime as ort +import torch +from torch import nn as nn +from typing import Union, Tuple +import transformer_engine.pytorch as te +from transformer_engine.common import recipe +import transformer_engine_extensions as tex +from transformer_engine.pytorch.cpp_extensions import * +from transformer_engine.pytorch.module import get_workspace +import transformer_engine.pytorch.cpp_extensions as texcpp +import transformer_engine.pytorch.softmax as softmax_defs +from transformer_engine.pytorch.utils import get_default_init_method + + +# Directory where generated ONNX test models are stored. +ONNX_FILES_DIR = "./gen_onnx_models" + +# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT). +ORT_CUSTOM_OPS_LIB = "./libcustom_ort_fp8_qdq_ops.so" + +# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14. +TRILU_OPSET = 14 +# Opset used in the ONNX files generated by the tests. +OPSET = 15 +assert OPSET >= TRILU_OPSET + + +def create_fp8_recipe(): + return recipe.DelayedScaling(margin=0, interval=1, fp8_format=recipe.Format.E4M3) + + +def do_export( + model: torch.nn.Module, + inp: torch.Tensor, + fname: str, + use_fp8: bool=True, + opset: int=OPSET, + input_names: list=["input"], + output_names: list=["output"], +): + """Export to ONNX""" + fp8_recipe = create_fp8_recipe() + + with torch.inference_mode(), te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings(): + warnings.filterwarnings( + action='ignore', + category=torch.jit.TracerWarning, + module=r'.*' + ) + + model.cuda().eval() + os.makedirs(ONNX_FILES_DIR, exist_ok=True) + fname = os.path.join(ONNX_FILES_DIR, fname) + torch.onnx.export(model, + inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,), + fname, + verbose=False, + opset_version=opset, + input_names=input_names, + output_names=output_names, + do_constant_folding=True, + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH) + + +def to_numpy(tensor): + return tensor.cpu().numpy() + + +def set_layer_scale(module: torch.nn.Module, scale: float): + module.fp8_init() + module.fp8_meta["scaling_fwd"].scale = torch.ones( + 2, dtype=torch.float32, device="cuda") / scale + module.fp8_meta["scaling_fwd"].scale_inv = torch.ones( + 2, dtype=torch.float32, device="cuda") * scale + + +def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool): + """Transformer Engine forward prpoagtation. + + Return results after copying to the CPU and converting to numpy. + """ + fp8_recipe = create_fp8_recipe() + with torch.inference_mode(), te.fp8_autocast(enabled=is_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings(): + te_outputs = model(*inps if isinstance(inps, tuple) else (inps,)) + if not isinstance(te_outputs, tuple): + te_outputs = (te_outputs,) + te_outputs_np = [to_numpy(te_output) for te_output in te_outputs] + return te_outputs_np + + +def validate_result( + fname: str, + inps: Union[Tuple[torch.Tensor], torch.Tensor], + model: torch.nn.Module, + atol: float=1.e-8, # np.isclose default atol + rtol: float=1.e-5, # np.isclose default rtol + max_errors_printed: int=10, + is_fp8: bool=False, +): + """Validate the outputs of an ONNX model vs. ONNX Runtime.""" + + def create_ort_session(fname: str, is_fp8: bool): + def load_custom_ops(session_opts: ort.SessionOptions): + """For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension.""" + if not os.path.exists(ORT_CUSTOM_OPS_LIB): + raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}") + session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB) + print("registered custom FP8 Q/DQ ops!") + + """Create an ONNX Runtime session for validation.""" + if is_fp8: + sess_options = ort.SessionOptions() + load_custom_ops(sess_options) + # Model loading successfully indicates that the custom op node could be resolved successfully + s = ort.InferenceSession(fname, sess_options=sess_options) + else: + s = ort.InferenceSession(fname) + return s + + def create_ort_input_dict(session, inps): + inp_dict = {} + if isinstance(inps, tuple) or isinstance(inps, list): + for idx, inp in enumerate(inps): + if inp is None: + continue + inp_dict[session.get_inputs()[idx].name] = to_numpy(inp) + else: + inp_dict[session.get_inputs()[0].name] = to_numpy(inps) + return inp_dict + + # Run ORT session and TE model. + fname = os.path.join(ONNX_FILES_DIR, fname) + ort_s = create_ort_session(fname, is_fp8) + onnx_outputs = ort_s.run(None, input_feed=create_ort_input_dict(ort_s, inps)) + te_outputs = te_infer(model, inps, is_fp8) + + # Compare ORT and TE outputs. + assert len(onnx_outputs) == len(te_outputs) + for onnx_output, te_output in zip(onnx_outputs, te_outputs): + + # Compare ORT and PyTorch outputs. + # np.isclose: abs(a - b) <= (atol + rtol * abs(b)) + ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol) + + mismatches = ac.nonzero() + mismatched_ids = [loc for loc in zip(*mismatches)] + if mismatched_ids: + # Log some information in case of error. + print("*" * 100) + print(onnx_output.shape) + nb_vals = min(len(mismatched_ids), max_errors_printed) + print(f"Detected {len(mismatched_ids)} diverging values.\nShowing first {nb_vals} errors (ONNX -- TE):") + abs_err = abs(onnx_output - te_output) + for loc in mismatched_ids[:nb_vals]: + ref = te_output[loc] + print(f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} > {atol + rtol * abs(ref)}") + raise ValueError(f"Output validation of {fname} failed with {len(mismatched_ids)} errors") + + +def create_meta(scale_factor: float, size: int=1): + meta = tex.FP8TensorMeta() + meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda") + meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor + meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor + return meta + + +def dtype2str(dtype: torch.dtype): + return { + torch.float32: "_fp32", + torch.float16: "_fp16", + torch.bfloat16: "_bf16", + }[dtype] + + +def as_te_type(dtype: torch.dtype): + return { + torch.float32: tex.DType.kFloat32, + torch.float16: tex.DType.kFloat16, + torch.bfloat16: tex.DType.kBFloat16, + }[dtype] + + +@pytest.mark.parametrize("scale_factor, atol", [ + (1, 1e-7), + (224, 1e-7) +]) +@pytest.mark.parametrize("precision", [torch.float32, torch.float16]) +def test_export_cast_ops(scale_factor: float, atol: float, precision: torch.dtype): + class TestFP8_QDQ(nn.Module): + def __init__(self): + super().__init__() + self.fp8_tensor = 0 + self.meta = create_meta(scale_factor) + self.highprec_type = as_te_type(precision) + self.fp8_type = tex.DType.kFloat8E4M3 + + def forward(self, inp): + ret = cast_to_fp8( + inp, + self.meta, + self.fp8_tensor, + self.fp8_type) + + ret = cast_from_fp8( + ret, + self.meta, + self.fp8_tensor, + self.fp8_type, + self.highprec_type) + return ret + + # Set dimensions (these are arbitrary). + in_features = 64 + hidden_size = 256 + inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision) + high_prec_str = dtype2str(precision) + fname = f"te.cast_fp8.s_{scale_factor}{high_prec_str}.onnx" + model = TestFP8_QDQ() + do_export(model, inp, fname) + validate_result(fname, inp, model, atol=atol, is_fp8=True) + + +@pytest.mark.parametrize("scale_factor", [448]) +@pytest.mark.parametrize("precision, atol", [ + [torch.float32, 1e-7], [torch.float16, 2e-3]] +) +def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float): + class TestFP8_Gelu(nn.Module): + def __init__(self): + super().__init__() + self.fp8_tensor = 0 + self.meta = create_meta(scale_factor) + self.highprec_type = as_te_type(precision) + self.fp8_type = tex.DType.kFloat8E4M3 + + def forward(self, inp): + ret = fp8_gelu( + inp, + self.meta, + self.fp8_tensor, + self.fp8_type) + ret = cast_from_fp8( + ret, + self.meta, + self.fp8_tensor, + self.fp8_type, + self.highprec_type) + return ret + + # Set dimensions (these are arbitrary). + in_features = 64 + hidden_size = 256 + inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision) + high_prec_str = dtype2str(precision) + fname = f"te.gelu_fp8{high_prec_str}.onnx" + model = TestFP8_Gelu() + do_export(model, inp, fname) + validate_result(fname, inp, model, rtol=1e-1, atol=atol, is_fp8=True) + + +@pytest.mark.parametrize("scale_factors", [(224, 224,), ]) +@pytest.mark.parametrize( + "precision, use_fp8, use_bias, use_gelu", [ + (torch.float32, False, False, False), + (torch.float16, False, False, False), + (torch.float32, False, True, False), + (torch.float16, False, True, False), + (torch.float32, False, True, True), + (torch.float16, False, True, True), + + # For FP8 GEMM GeLU is not used. + (torch.float32, True, False, False), + (torch.float16, True, False, False), + # When enabling bias we must use float16 or bfloat16 (because of kernel limitations) + (torch.float16, True, True, False), + (torch.bfloat16, True, True, False), +]) +def test_export_gemm( + precision, # Precision of inputs, weights, output and bias + use_fp8, + use_bias, + use_gelu, + scale_factors +): + class TestFP8_GEMM(nn.Module): + def __init__(self, precision, use_bias, gelu, scale_factors): + super().__init__() + self.use_bias = use_bias + self.gelu = gelu + self.precision = precision + + self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT + self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT + nb_inp_scales, nb_weight_scales = 1, out_features + act_scale_factor, weight_scale_factor = scale_factors + self.meta_inp = create_meta(act_scale_factor, nb_inp_scales) + self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales) + + bias_size = nb_weight_scales + self.bias = torch.randn(bias_size, dtype=precision, device="cuda") + self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda") + + self.inp_type = tex.DType.kFloat8E4M3 + self.weights_type = tex.DType.kFloat8E4M3 + self.outp_type = precision + + def forward(self, inp, weight): + inp_fp8 = cast_to_fp8( + inp, + self.meta_inp, + self.fp8_tensor_inp, + self.inp_type) + + weight_fp8 = cast_to_fp8( + weight, + self.meta_weight, + self.fp8_tensor_weight, + self.weights_type) + + ret = fp8_gemm( + weight_fp8, + self.meta_weight.scale_inv, + self.fp8_tensor_weight, + self.inp_type, + inp_fp8, + self.meta_inp.scale_inv, + self.fp8_tensor_inp, + self.weights_type, + self.outp_type, + get_workspace(), + bias=self.bias, + use_bias=self.use_bias, + fp32_output=(self.precision==torch.float32), + use_split_accumulator=False) + return ret + + class Test_GEMM(nn.Module): + def __init__(self, precision, use_bias=False, gelu=False): + super().__init__() + self.use_bias = use_bias + self.gelu = gelu + self.precision = precision + bias_size = out_features + self.bias = torch.randn(bias_size, dtype=precision, device="cuda") + self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda") + + def forward(self, inp, weight): + outp_type = self.precision + + # note: due to logic in lines 104:116 and L129 in cpp_extensions.py + # it appears either bias OR gelu can be activated, not both + ret, _, _ = gemm( + weight, + inp, + outp_type, + get_workspace(), + + # test bias + bias=self.bias, + use_bias=self.use_bias, + + # test gelu + gelu=self.gelu, + gelu_input=self.gelu_input, + grad=False # only True for backward pass + ) + return ret + + # If gelu is applied then bias must be added, as defined by TE kernel. + if use_gelu: assert use_bias + # Set dimensions (these are arbitrary). + out_features = 128 + hidden_size = 256 + in_features = 64 + inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda") + weight = torch.randn(out_features, in_features, dtype=precision, device="cuda") + fp8_str = "_fp8" if use_fp8 else "" + bias_str = "_bias" if use_bias else "" + gelu_str = "_gelu" if use_gelu else "" + high_prec_str = dtype2str(precision) + fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx" + if use_fp8: + model = TestFP8_GEMM(precision, use_bias, use_gelu, scale_factors) + do_export(model, (inp, weight), fname, use_fp8) + if precision not in (torch.bfloat16, torch.float16): + validate_result(fname, (inp, weight), model, rtol=1e-2, atol=1e-2, is_fp8=True) + else: + model = Test_GEMM(precision, use_bias, use_gelu) + do_export(model, (inp, weight), fname, use_fp8) + validate_result(fname, (inp, weight), model, rtol=1e-2, atol=1e-2) + + +@pytest.mark.parametrize("use_fp8", [False, True]) +@pytest.mark.parametrize("scale_factor", [448, 112]) +@pytest.mark.parametrize("precision", [torch.float32, torch.float16]) +def test_export_layernorm( + use_fp8: bool, + scale_factor: float, + precision: torch.dtype +): + # Set dimensions (these are arbitrary). + inp_shape = [64, 32] + + class Test_Layernorm(nn.Module): + def __init__(self) -> None: + super().__init__() + normalized_shape = torch.Size(inp.shape[1:]) + self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda") + self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda") + self.eps = 1e-6 # An arbitrary small value + + def forward(self, inp): + ret = texcpp.layernorm_fwd_inf( + inp, + self.weight, + self.bias, + self.eps) + return ret + + class TestFP8_Layernorm(nn.Module): + def __init__(self) -> None: + super().__init__() + normalized_shape = torch.Size(inp.shape[1:]) + self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda") + self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda") + self.eps = 1e-6 # An arbitrary small value + + self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT + self.meta = create_meta(scale_factor) + self.fp8_type = tex.DType.kFloat8E4M3 + + def forward(self, inp): + ret = texcpp.layernorm_fwd_fp8_inf( + inp, + self.weight, + self.bias, + self.eps, + self.meta, + self.fp8_type) + + ret = cast_from_fp8( + ret, + self.meta, + self.fp8_tensor, + self.fp8_type, + tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16) + return ret + + inp = torch.randn(*inp_shape, device="cuda", dtype=precision) + high_prec_str = dtype2str(precision) + if use_fp8: + fname = f"te.layernorm_fwd_fp8{high_prec_str}.onnx" + model = TestFP8_Layernorm() + else: + fname = f"te.layernorm_fwd{high_prec_str}.onnx" + model = Test_Layernorm() + + do_export(model, inp, fname) + if precision not in (torch.bfloat16, ): + validate_result(fname, inp, model, atol=1e-5, is_fp8=use_fp8) + + +@pytest.mark.parametrize("softmax_def", [ + softmax_defs.ScaledUpperTriangMaskedSoftmax, + softmax_defs.ScaledMaskedSoftmax, + softmax_defs.ScaledSoftmax, +]) +# Softmax kernel only supports FP16 or BF16! +@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16]) +def test_export_softmax(softmax_def, precision): + class Test_Softmax(nn.Module): + def __init__(self, softmax_function, mask_inp=False): + super().__init__() + self.softmax_fn = softmax_function + self.mask_inp = mask_inp + + def forward(self, inp, mask): + scale_factor = 8 # arbitrary value + if self.mask_inp: + ret = self.softmax_fn.apply(inp, mask, scale_factor) + else: + ret = self.softmax_fn.apply(inp, scale_factor) + return ret + + # Set dimensions (these are arbitrary). + in_features = 64 + hidden_size = 256 + mask = None + input_names = ["input"] + inp_shape = [hidden_size, in_features, in_features, in_features] + if softmax_def == softmax_defs.ScaledUpperTriangMaskedSoftmax: + inp_shape = [hidden_size, in_features, in_features] + kernel_str = "te.ScaledUpperTriangMaskedSoftmax" + model = Test_Softmax(softmax_def) + elif softmax_def == softmax_defs.ScaledMaskedSoftmax: + # Generate a random mask with 50% probability for 0 or 1. + probs = 0.5 * torch.ones(hidden_size, 1, in_features, in_features, device="cuda", dtype=precision) + mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool) + input_names.append("mask") + kernel_str = "te.ScaledMaskedSoftmax" + model = Test_Softmax(softmax_def, mask_inp=True) + elif softmax_def == softmax_defs.ScaledSoftmax: + kernel_str = "te.ScaledSoftmax" + model = Test_Softmax(softmax_def) + input_tensor = torch.randn(*inp_shape, device="cuda") + input_tensor = input_tensor.to(torch.bfloat16) if precision == torch.bfloat16 else input_tensor.half() + high_prec_str = dtype2str(precision) + fname = f"{kernel_str}{high_prec_str}.onnx" + inp = (input_tensor, mask) + do_export(model, inp, fname, input_names=input_names) + if precision != torch.bfloat16: + validate_result(fname, inp, model, atol=1e-3) + + +@pytest.mark.parametrize("scale_factor", [1]) +@pytest.mark.parametrize("use_fp8", [False, True]) +@pytest.mark.parametrize("return_bias", [False, True]) +@pytest.mark.parametrize( + "precision, use_bias",[ + (torch.float32, False), + (torch.float32, True), + # Todo: cannot configure FP16/BF16 when bias is disabled - + # AssertionError: Data type for activations and buffers must match when outside of autocasted region + # (torch.float16, False), + (torch.float16, True), + #(torch.bfloat16, False), + # Todo: cannot configure BF16 when bias is enabled (ORT issue?) + # (torch.bfloat16, True), +]) +def test_export_linear( + scale_factor: float, + use_fp8: bool, + use_bias: bool, + return_bias: bool, + precision: torch.dtype +): + # Set dimensions (these are arbitrary). + in_features = 64 + out_features = 256 + hidden_size = 256 + + inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision) + fp8_str = "_fp8" if use_fp8 else "" + bias_str = "_bias" if use_bias else "" + high_prec_str = dtype2str(precision) + fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx" + with te.fp8_autocast(enabled=use_fp8): + model = te.Linear( + in_features, + out_features, + bias=use_bias, + return_bias=return_bias, + params_dtype=precision + ).to(device='cuda') + if use_fp8: + set_layer_scale(model, scale_factor) + do_export(model, inp, fname, use_fp8) + + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + elif precision not in (torch.bfloat16, ): + validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) + + +@pytest.mark.parametrize("scale_factor", [112]) +@pytest.mark.parametrize("use_fp8", [False, True]) +# Todo: handle case of True +@pytest.mark.parametrize("return_bias", [False]) +@pytest.mark.parametrize("return_layernorm_output", [False]) +@pytest.mark.parametrize( + "precision, use_bias",[ + (torch.float32, False), + (torch.float32, True), + (torch.float16, True), + # Todo: cannot configure FP16 when bias is disabled + #(torch.float16, False), +]) +def test_export_layernorm_linear( + scale_factor: float, + use_fp8: bool, + use_bias: bool, + return_bias: bool, + return_layernorm_output: bool, + precision: torch.dtype +): + # Set dimensions (these are arbitrary). + in_features = 64 + out_features = 256 + hidden_size = 256 + + inp = torch.randn(in_features, out_features, device="cuda", dtype=precision) + fp8_str = "_fp8" if use_fp8 else "" + bias_str = "_bias" if use_bias else "" + high_prec_str = dtype2str(precision) + fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx" + with te.fp8_autocast(enabled=use_fp8): + model = te.LayerNormLinear( + hidden_size, + 3 * hidden_size, + bias=use_bias, + return_bias=return_bias, + return_layernorm_output=return_layernorm_output, + params_dtype=precision, + ).to(device='cuda') + if use_fp8: + set_layer_scale(model, scale_factor) + do_export(model, inp, fname, use_fp8) + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + elif precision not in (torch.bfloat16,): + validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) + + +@pytest.mark.parametrize("scale_factor", [112]) +@pytest.mark.parametrize("use_fp8", [False, True]) +# Todo: handle case of True +@pytest.mark.parametrize("return_bias", [False]) +@pytest.mark.parametrize("return_layernorm_output", [False]) +# Todo: cannot handle FP16 for some reason +@pytest.mark.parametrize( + "precision, use_bias",[ + (torch.float32, False), + (torch.float32, True), + (torch.float16, True), + # Todo: cannot configure FP16 when bias is disabled + #(torch.float16, False), +]) +def test_export_layernorm_mlp( + scale_factor: float, + use_fp8: bool, + use_bias: bool, + return_bias: bool, + return_layernorm_output: bool, + precision: torch.dtype +): + # Set dimensions (these are arbitrary). + in_features = 64 + out_features = 256 + hidden_size = 256 + ffn_hidden_size = 256 + + inp = torch.randn(in_features, out_features, device="cuda", dtype=precision) + fp8_str = "_fp8" if use_fp8 else "" + bias_str = "_bias" if use_bias else "" + high_prec_str = dtype2str(precision) + fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}.onnx" + with te.fp8_autocast(enabled=use_fp8): + model = te.LayerNormMLP( + hidden_size, + ffn_hidden_size, + bias=use_bias, + return_bias=return_bias, + return_layernorm_output=return_layernorm_output, + params_dtype=precision, + ).to(device='cuda') + if use_fp8: + set_layer_scale(model, scale_factor) + do_export(model, inp, fname, use_fp8) + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + else: + validate_result(fname, inp, model, atol=1e-2, is_fp8=use_fp8) + + +@pytest.mark.parametrize( + "precision, use_mask, attn_mask_type", [ + (torch.float32, False, None), # calls forward_torch_softmax + (torch.float32, True, None), # calls forward_torch_softmax + (torch.float16, False, "causal"), # calls ScaledUpperTriangMaskedSoftmax + (torch.float16, True, "padding"), # calls ScaledMaskedSoftmax + (torch.float16, False, "padding"), # calls ScaledSoftmax +]) +@pytest.mark.parametrize("attention_softmax_in_fp32", [True, False]) +@pytest.mark.parametrize("apply_query_key_layer_scaling", [True, False]) +def test_export_core_attention( + precision: torch.dtype, + use_mask: bool, + attn_mask_type: str, + attention_softmax_in_fp32: bool, + apply_query_key_layer_scaling: bool, +): + if attn_mask_type is None: + attn_mask_type = 'causal' + + # Set dimensions (these are arbitrary). + kv_channels = 64 + num_attention_heads = 1 + qkv_size = (2048, 4, num_attention_heads, kv_channels) + + query_layer = torch.randn(qkv_size, dtype=precision, device="cuda") + key_layer = torch.randn(qkv_size, dtype=precision, device="cuda") + value_layer = torch.randn(qkv_size, dtype=precision, device="cuda") + input_names = ["query", "key", "value"] + attention_mask = None + if use_mask: + # Generate a random mask with 50% probability for 0 or 1. + probs = 0.5 * torch.ones(qkv_size[1], qkv_size[2], qkv_size[0], qkv_size[0], device="cuda", dtype=precision) + attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool) + input_names.append("attention_mask") + inp = (query_layer, key_layer, value_layer, attention_mask) + + sm_prec_str = "_fp32" if attention_softmax_in_fp32 else "_fp16" + qk_scaling_str = "_qk_scaling" if apply_query_key_layer_scaling else "" + mask_str = "_masked" if use_mask else \ + "_upper_trian_masked" if attn_mask_type=="causal" and precision == torch.float16 else \ + "" + high_prec_str = dtype2str(precision) + fname = f"te.core_attention{mask_str}{qk_scaling_str}{sm_prec_str}{high_prec_str}.onnx" + + model = te.transformer.CoreAttention( + num_attention_heads=num_attention_heads, + kv_channels=kv_channels, + attention_dropout=0.5, + attn_mask_type=attn_mask_type, + attention_softmax_in_fp32=attention_softmax_in_fp32, + apply_query_key_layer_scaling=apply_query_key_layer_scaling, + ).to(device='cuda') + do_export(model, + inp, + fname, + input_names=input_names, + use_fp8=True) + validate_result(fname, inp, model, atol=1e-2) + + +test_configs_multihead_attention = [ + (False, "causal"), # calls ScaledUpperTriangMaskedSoftmax + (True, "padding"), # calls ScaledMaskedSoftmax + (False, "padding"), # calls ScaledSoftmax +] +@pytest.mark.parametrize("use_fp8", [False, True]) +@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention) +@pytest.mark.parametrize("precision", [torch.float32, torch.float16]) +@pytest.mark.parametrize("input_layernorm", [True, False]) +@pytest.mark.parametrize("return_layernorm_output", [False]) +@pytest.mark.parametrize("attention_type", [ + "self", + #"cross" # TODO: handle this ORT error +]) +@pytest.mark.parametrize("fuse_qkv_params", [False, True]) +def test_export_multihead_attention( + use_fp8: bool, + use_mask: bool, + attn_mask_type: str, + precision: torch.dtype, + return_layernorm_output: bool, + input_layernorm: bool, + attention_type: str, + fuse_qkv_params: bool +): + hidden_size = 256 + sequence_length = 128 + batch_size = 4 + num_attention_heads = 32 + kv_channels = 8 + attention_dropout = 0.1 + layernorm_epsilon = 1e-5 + init_method = output_layer_init_method = get_default_init_method() + attention_args = ( + hidden_size, + num_attention_heads, + kv_channels, + attention_dropout, + layernorm_epsilon, + init_method, + output_layer_init_method, + ) + hidden_states = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda") + input_names = ["hidden_states"] + attention_mask = None + if use_mask and attn_mask_type != "causal": + # Generate a random mask with 50% probability for 0 or 1. + probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision) + attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool) + input_names.append("attention_mask") + inp = (hidden_states, attention_mask) + + fp8_str = "_fp8" if use_fp8 else "" + dtype_str = dtype2str(precision) + attn_type_str = "_self_attention" if attention_type == "self" else "_cross_attention" + fuse_qkv_str = "_fused" if fuse_qkv_params else "" + attn_mask_type_str = f"_{attn_mask_type}" if (use_mask and attn_mask_type != "") else "" + fname = f"te.multihead_attention{fp8_str}{attn_mask_type_str}{attn_type_str}{fuse_qkv_str}{dtype_str}.onnx" + + model = te.transformer.MultiHeadAttention( + *attention_args, + attn_mask_type=attn_mask_type, + params_dtype=precision, + return_layernorm_output=return_layernorm_output, + input_layernorm=input_layernorm, + attention_type=attention_type, + fuse_qkv_params=fuse_qkv_params, + ).to(device='cuda') + do_export(model, inp, fname, use_fp8, input_names=input_names) + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + elif precision != torch.float16: + validate_result(fname, inp, model, atol=1e-2, is_fp8=use_fp8) + + +@pytest.mark.parametrize("use_fp8", [False, True]) +@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention) +@pytest.mark.parametrize("output_layernorm", [ + #True, # TO DO: handle this + False +]) +@pytest.mark.parametrize("precision", [torch.float32, torch.float16]) +@pytest.mark.parametrize("fuse_qkv_params", [False, True]) +@pytest.mark.parametrize("apply_query_key_layer_scaling", [True, False]) +def test_export_transformer_layer( + use_fp8: bool, + use_mask: bool, + attn_mask_type: str, + output_layernorm: bool, + precision: torch.dtype, + fuse_qkv_params: bool, + apply_query_key_layer_scaling: bool +): + # Layer configuration + hidden_size = 64 + sequence_length = 128 + batch_size = 1 + ffn_hidden_size = 256 + num_attention_heads = 4 + + input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda") + input_names = ["input"] + attention_mask = None + if use_mask and attn_mask_type != "causal": + # Generate a random mask with 50% probability for 0 or 1. + probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision) + attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool) + input_names.append("attention_mask") + inp = (input_tensor, attention_mask) + + fp8_str = "_fp8" if use_fp8 else "" + fuse_qkv_params_str = "_fuse-qkv" if fuse_qkv_params else "" + qk_scaling_str = "_qk-scaling" if apply_query_key_layer_scaling else "" + high_prec_str = dtype2str(precision) + attn_mask_type_str = f"_{attn_mask_type}" if (use_mask and attn_mask_type != "") else "" + fname = f"te.transformer_layer{fp8_str}{attn_mask_type_str}{fuse_qkv_params_str}{qk_scaling_str}{high_prec_str}.onnx" + + model = te.TransformerLayer( + hidden_size, + ffn_hidden_size, + num_attention_heads, + self_attn_mask_type=attn_mask_type, + output_layernorm=output_layernorm, + params_dtype=precision, + fuse_qkv_params=fuse_qkv_params, + apply_query_key_layer_scaling=apply_query_key_layer_scaling).to(device='cuda') + do_export(model, inp, fname, use_fp8) + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + elif precision != torch.float16: + validate_result(fname, inp, model, atol=5e-1, is_fp8=use_fp8) diff --git a/transformer_engine/pytorch/__init__.py b/transformer_engine/pytorch/__init__.py index 1c5ddd5c09..b941896d49 100644 --- a/transformer_engine/pytorch/__init__.py +++ b/transformer_engine/pytorch/__init__.py @@ -10,3 +10,5 @@ from .transformer import TransformerLayer from .fp8 import fp8_autocast from .distributed import checkpoint +# Register custom op symbolic ONNX functions +from .te_onnx_extensions import * diff --git a/transformer_engine/pytorch/cpp_extensions.py b/transformer_engine/pytorch/cpp_extensions.py index 0db38d25ad..6d3ffc0839 100644 --- a/transformer_engine/pytorch/cpp_extensions.py +++ b/transformer_engine/pytorch/cpp_extensions.py @@ -12,9 +12,11 @@ def fp8_gemm( A: torch.Tensor, A_scale_inv: torch.Tensor, + A_fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors], A_dtype: tex.DType, B: torch.Tensor, B_scale_inv: torch.Tensor, + B_fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors], B_dtype: tex.DType, out_dtype: torch.dtype, workspace: torch.Tensor, @@ -41,19 +43,21 @@ def fp8_gemm( out_dtype = tex.DType.kFloat32 if fp32_output else TE_DType[out_dtype] - tex.te_gemm( + _ = torch.ops.tex_ts.te_gemm_ts( A, A_scale_inv, + A_fp8_tensor, A_dtype, True, # transa B, B_scale_inv, + B_fp8_tensor, B_dtype, False, # transb out, out_dtype, bias if use_bias else empty_tensor, - empty_tensor, + empty_tensor, # this is pre_gelu_out False, # grad workspace, workspace.shape[0], @@ -87,6 +91,7 @@ def gemm( transa = layout[0] == "T" transb = layout[1] == "T" empty_tensor = torch.Tensor() + fp8_index = -1 # dummy index input_dtype = TE_DType[dtype] output_dtype = tex.DType.kFloat32 if fp32_output else input_dtype @@ -115,13 +120,15 @@ def gemm( bias = bias if use_bias else empty_tensor - tex.te_gemm( + _ = torch.ops.tex_ts.te_gemm_ts( A, empty_tensor, + fp8_index, input_dtype, transa, B, empty_tensor, + fp8_index, input_dtype, transb, out, @@ -214,11 +221,12 @@ def fp8_gelu( otype: tex.DType, ) -> torch.Tensor: """GeLU with FP8 output""" - return tex.fp8_gelu( + return torch.ops.tex_ts.fp8_gelu_ts( inp, - fp8_meta_tensor.scale[fp8_tensor], - fp8_meta_tensor.amax_history[0][fp8_tensor], - fp8_meta_tensor.scale_inv[fp8_tensor], + fp8_meta_tensor.scale, + fp8_meta_tensor.amax_history, + fp8_meta_tensor.scale_inv, + fp8_tensor, otype, ) @@ -245,6 +253,46 @@ def layernorm_fwd_fp8( ) +def layernorm_fwd_fp8_inf( + inp: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + eps: float, + fp8_meta_tensor: tex.FP8TensorMeta, + otype: tex.DType, +) -> torch.Tensor: + """LayerNorm with FP8 output. + + This version of layernorm_fwd_fp8 is specialized for inference, and returns + only the normalized output. + """ + ret = torch.ops.tex_ts.layernorm_fwd_fp8_inf_ts( + inp, + weight, + bias, + eps, + fp8_meta_tensor.scale, + fp8_meta_tensor.amax_history, + fp8_meta_tensor.scale_inv, + otype) + return ret + + +def layernorm_fwd_inf( + inp: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + eps: float, +) -> torch.Tensor: + """LayerNorm with FP8 output""" + return torch.ops.tex_ts.layernorm_fwd_inf_ts( + inp, + weight, + bias, + eps, + ) + + def cast_to_fp8( inp: torch.Tensor, fp8_meta_tensor: tex.FP8TensorMeta, @@ -252,11 +300,12 @@ def cast_to_fp8( otype: tex.DType, ) -> torch.Tensor: """Cast input to FP8""" - return tex.cast_to_fp8( + return torch.ops.tex_ts.cast_to_fp8_ts( inp, - fp8_meta_tensor.scale[fp8_tensor], - fp8_meta_tensor.amax_history[0][fp8_tensor], - fp8_meta_tensor.scale_inv[fp8_tensor], + fp8_meta_tensor.scale, + fp8_meta_tensor.amax_history, + fp8_meta_tensor.scale_inv, + fp8_tensor, otype, ) @@ -269,9 +318,10 @@ def cast_from_fp8( otype: tex.DType, ) -> torch.Tensor: """Cast input from FP8""" - return tex.cast_from_fp8( + return torch.ops.tex_ts.cast_from_fp8_ts( inp, - fp8_meta_tensor.scale_inv[fp8_tensor], + fp8_meta_tensor.scale_inv, + fp8_tensor, itype, otype, ) diff --git a/transformer_engine/pytorch/csrc/common.h b/transformer_engine/pytorch/csrc/common.h index 43389294dd..a4e8fb8a7a 100644 --- a/transformer_engine/pytorch/csrc/common.h +++ b/transformer_engine/pytorch/csrc/common.h @@ -94,6 +94,8 @@ inline transformer_engine::DType GetTransformerEngineDType(at::ScalarType t) { return transformer_engine::DType::kFloat32; case at::kBFloat16: return transformer_engine::DType::kBFloat16; + case at::kBool: + return transformer_engine::DType::kByte; default: NVTE_ERROR("Invalid type"); } diff --git a/transformer_engine/pytorch/csrc/extensions.cu b/transformer_engine/pytorch/csrc/extensions.cu index 4dc18d3fae..ebc5c6d7fa 100644 --- a/transformer_engine/pytorch/csrc/extensions.cu +++ b/transformer_engine/pytorch/csrc/extensions.cu @@ -397,6 +397,23 @@ std::vector layernorm_fwd_fp8(const at::Tensor &input, } +at::Tensor layernorm_fwd_fp8_inf(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + float eps, + at::Tensor scale, + at::Tensor amax, + at::Tensor scale_inv, + transformer_engine::DType otype +) { + // This is a specialized version of layernorm_fwd_fp8, optimized for inference, + // which only returns the normalized output. + std::vector out = layernorm_fwd_fp8( + input, weight, bias, eps, scale, amax, scale_inv, otype); + return out[0]; +} + + std::vector layernorm_fwd(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, @@ -428,6 +445,16 @@ std::vector layernorm_fwd(const at::Tensor &input, return {ln_out, mu, rsigma}; } +at::Tensor layernorm_fwd_inf(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + float eps +) { + // This is a specialized version of layernorm_fwd, optimized for inference, + // which only returns the normalized output. + std::vector out = layernorm_fwd(input, weight, bias, eps); + return out[0]; +} at::Tensor cast_to_fp8(const at::Tensor &input, const at::Tensor &scale, diff --git a/transformer_engine/pytorch/csrc/extensions.h b/transformer_engine/pytorch/csrc/extensions.h index e2717203d5..434eacb8eb 100644 --- a/transformer_engine/pytorch/csrc/extensions.h +++ b/transformer_engine/pytorch/csrc/extensions.h @@ -95,6 +95,15 @@ std::vector layernorm_fwd_fp8(const at::Tensor &input, transformer_engine::DType otype ); +at::Tensor layernorm_fwd_fp8_inf(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + float eps, + at::Tensor scale, + at::Tensor amax, + at::Tensor scale_inv, + transformer_engine::DType otype +); std::vector layernorm_fwd(const at::Tensor &input, const at::Tensor &weight, @@ -102,6 +111,11 @@ std::vector layernorm_fwd(const at::Tensor &input, float eps ); +at::Tensor layernorm_fwd_inf(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + float eps +); at::Tensor cast_to_fp8(const at::Tensor &input, const at::Tensor &scale, diff --git a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp new file mode 100755 index 0000000000..94e46f0dce --- /dev/null +++ b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp @@ -0,0 +1,178 @@ +/************************************************************************* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * See LICENSE for license information. + ************************************************************************/ + +#include +#include "extensions.h" + +transformer_engine::DType reverse_map_dtype(int64_t dtype) { + switch (dtype) { + case static_cast(transformer_engine::DType::kByte): + return transformer_engine::DType::kByte; + case static_cast(transformer_engine::DType::kInt32): + return transformer_engine::DType::kInt32; + case static_cast(transformer_engine::DType::kFloat32): + return transformer_engine::DType::kFloat32; + case static_cast(transformer_engine::DType::kFloat16): + return transformer_engine::DType::kFloat16; + case static_cast(transformer_engine::DType::kBFloat16): + return transformer_engine::DType::kBFloat16; + case static_cast(transformer_engine::DType::kFloat8E4M3): + return transformer_engine::DType::kFloat8E4M3; + case static_cast(transformer_engine::DType::kFloat8E5M2): + return transformer_engine::DType::kFloat8E5M2; + default: + NVTE_ERROR("Type not supported."); + } +} + + +at::Tensor cast_to_fp8_ts(const at::Tensor &input, + const at::Tensor &scale, + const at::Tensor &amax, + const at::Tensor &scale_inv, + int64_t fp8_tensor, + int64_t otype) { + transformer_engine::DType otype_arg = reverse_map_dtype(otype); + at::Tensor output = cast_to_fp8(input, + scale[fp8_tensor], + amax[0][fp8_tensor], + scale_inv[fp8_tensor], + otype_arg); + return output.clone(); +} + +at::Tensor cast_from_fp8_ts(const at::Tensor &input, + const at::Tensor &scale_inv, + int64_t fp8_tensor, + int64_t itype, + int64_t otype) { + transformer_engine::DType itype_arg = reverse_map_dtype(itype); + transformer_engine::DType otype_arg = reverse_map_dtype(otype); + at::Tensor output = cast_from_fp8(input, + scale_inv[fp8_tensor], + itype_arg, + otype_arg); + return output.clone(); +} + +at::Tensor fp8_gelu_ts(at::Tensor input, + at::Tensor scale, + at::Tensor amax, + at::Tensor scale_inv, + int64_t fp8_tensor, + int64_t otype) { + transformer_engine::DType otype_arg = reverse_map_dtype(otype); + at::Tensor output = fp8_gelu(input, + scale[fp8_tensor], + amax[0][fp8_tensor], + scale_inv[fp8_tensor], + otype_arg); + return output.clone(); +} + +at::Tensor te_gemm_ts(at::Tensor A, + at::Tensor A_scale_inverse, + int64_t A_fp8_tensor, + int64_t A_type, + int64_t transa, + at::Tensor B, + at::Tensor B_scale_inverse, + int64_t B_fp8_tensor, + int64_t B_type, + int64_t transb, + at::Tensor D, + int64_t D_type, + at::Tensor bias, + at::Tensor pre_gelu_out, + int64_t grad, + at::Tensor workspace, + int64_t workspaceSize, + int64_t accumulate, + int64_t use_split_accumulator) { + // cast inputs to types accepted by te_gemm + transformer_engine::DType A_type_arg = reverse_map_dtype(A_type); + bool transa_arg = static_cast(transa); + transformer_engine::DType B_type_arg = reverse_map_dtype(B_type); + bool transb_arg = static_cast(transb); + transformer_engine::DType D_type_arg = reverse_map_dtype(D_type); + bool grad_arg = static_cast(grad); + size_t workspaceSize_arg = static_cast(workspaceSize); + bool accumulate_arg = static_cast(accumulate); + bool use_split_accumulator_arg = static_cast(use_split_accumulator); + + at::Tensor A_scale_inverse_arg = A_scale_inverse.clone(); + if (A_scale_inverse.numel()) + A_scale_inverse_arg = A_scale_inverse[A_fp8_tensor]; + + at::Tensor B_scale_inverse_arg = B_scale_inverse.clone(); + if (B_scale_inverse.numel()) + B_scale_inverse_arg = B_scale_inverse[B_fp8_tensor]; + + te_gemm(A, + A_scale_inverse_arg, + A_type_arg, + transa_arg, + B, + B_scale_inverse_arg, + B_type_arg, + transb_arg, + D, + D_type_arg, + bias, + pre_gelu_out, + grad_arg, + workspace, + workspaceSize_arg, + accumulate_arg, + use_split_accumulator_arg); + return D; +} + +at::Tensor layernorm_fwd_fp8_inf_ts(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + double eps, + at::Tensor scale, + at::Tensor amax, + at::Tensor scale_inv, + int64_t otype) { + transformer_engine::DType otype_arg = reverse_map_dtype(otype); + float eps_float = static_cast(eps); + + at::Tensor output = layernorm_fwd_fp8_inf(input, + weight, + bias, + eps_float, + scale, + amax, + scale_inv, + otype_arg); + + return output.clone(); +} + +at::Tensor layernorm_fwd_inf_ts(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + double eps) { + float eps_float = static_cast(eps); + + at::Tensor output = layernorm_fwd_inf(input, + weight, + bias, + eps_float); + + return output.clone(); +} + +TORCH_LIBRARY(tex_ts, m) { + m.def("cast_to_fp8_ts", &cast_to_fp8_ts); + m.def("cast_from_fp8_ts", &cast_from_fp8_ts); + m.def("fp8_gelu_ts", &fp8_gelu_ts); + m.def("te_gemm_ts", &te_gemm_ts); + m.def("layernorm_fwd_fp8_inf_ts", &layernorm_fwd_fp8_inf_ts); + m.def("layernorm_fwd_inf_ts", &layernorm_fwd_inf_ts); +} diff --git a/transformer_engine/pytorch/module.py b/transformer_engine/pytorch/module.py index dfbfadbb36..d0b66923e6 100644 --- a/transformer_engine/pytorch/module.py +++ b/transformer_engine/pytorch/module.py @@ -4,11 +4,12 @@ """Top level Transformer Engine PyTorch modules""" import os +import pickle import warnings from abc import ABC, abstractmethod -from typing import Union, Optional, Callable, Tuple, Dict, List, Any, Mapping +from typing import Union, Optional, Callable, Tuple, Dict, Any from functools import partial - +import numpy as np import torch from torch.nn.parameter import Parameter from torch.nn import init @@ -69,6 +70,8 @@ fp8_gelu, fp8_cast_transpose_bgrad_dgelu_fused, layernorm_fwd_fp8, + layernorm_fwd_fp8_inf, + layernorm_fwd_inf, cast_to_fp8, cast_from_fp8, ) @@ -144,8 +147,9 @@ def init_fp8_meta_tensors(self) -> None: self.set_meta_tensor(True) self.set_meta_tensor(False) - def get_extra_state(self) -> Union[List[Any], None]: + def get_extra_state(self) -> torch.Tensor: """Save before checkpointing.""" + state = None if self.fp8: state = {} state["scale_fwd"] = self.fp8_meta["scaling_fwd"].scale @@ -162,10 +166,12 @@ def get_extra_state(self) -> Union[List[Any], None]: extra[k] = v state["extra_fp8_variables"] = extra - return state - return None + state_serialized = pickle.dumps(state) + state_tensor = torch.tensor(np.frombuffer(state_serialized, dtype=np.uint8), device='cuda') + + return state_tensor - def set_extra_state(self, state: Union[List[Any], None]) -> None: + def set_extra_state(self, state: torch.Tensor) -> None: """Load previous state.""" if state is None: return @@ -204,6 +210,11 @@ def set_extra_state(self, state: Union[List[Any], None]) -> None: self.fp8_meta["autocast_id_bwd"] = state[9] return + if isinstance(state, torch.Tensor): + state = pickle.loads(state.cpu().detach().numpy().tobytes()) + if state is None: + return + # Restore global FP8 buffer states. set_global_fp8_buffer(state["global_fp8_buffer"]) set_global_fp8_recompute_buffer(state["global_fp8_recompute_buffer"]) @@ -521,13 +532,13 @@ def grad_output_preprocess( fp8_dtype_backward, ) else: + grad_output_t = None grad_output_c = cast_to_fp8( grad_output_mat, ctx.fp8_meta["scaling_bwd"], tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ) - grad_output_t = None grad_bias = None return grad_output_mat, grad_output_c, grad_output_t, grad_bias @@ -537,6 +548,7 @@ def forward(self): """Needs override.""" + class _LayerNormLinear(torch.autograd.Function): """LayerNormLinear semi-top level module Calls custom cuda extensions. @@ -564,6 +576,7 @@ def forward( activation_dtype: torch.dtype, parallel_mode: Union[str, None], return_layernorm_output: bool, + is_training: bool ) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]: # Make sure input dimensions are compatible in_features = ln_weight.numel() @@ -584,19 +597,36 @@ def forward( fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True) if not return_layernorm_output: - ln_out, mu, rsigma = layernorm_fwd_fp8( - inputmat, - ln_weight, - ln_bias, - eps, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM1_INPUT, - fp8_dtype_forward, - ) + if is_training: + ln_out, mu, rsigma = layernorm_fwd_fp8( + inputmat, + ln_weight, + ln_bias, + eps, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_INPUT, + fp8_dtype_forward, + ) + else: + mu = rsigma = None + ln_out = layernorm_fwd_fp8_inf( + inputmat, + ln_weight, + ln_bias, + eps, + fp8_meta["scaling_fwd"], + fp8_dtype_forward, + ) else: - ln_out_return, mu, rsigma = tex.layernorm_fwd( - inputmat, ln_weight, ln_bias, eps - ) + if is_training: + ln_out_return, mu, rsigma = tex.layernorm_fwd( + inputmat, ln_weight, ln_bias, eps + ) + else: + ln_out_return, mu, rsigma = layernorm_fwd_inf( + inputmat, ln_weight, ln_bias, eps + ), None, None + ln_out = cast_to_fp8( ln_out_return, fp8_meta["scaling_fwd"], @@ -604,7 +634,12 @@ def forward( fp8_dtype_forward, ) else: - ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight, ln_bias, eps) + if is_training: + ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight, ln_bias, eps) + else: + ln_out, mu, rsigma = layernorm_fwd_inf( + inputmat, ln_weight, ln_bias, eps + ), None, None ln_out_return = ln_out # Column Parallel Linear @@ -622,21 +657,31 @@ def forward( bias = cast_if_needed(bias, bias_dtype) if use_bias else bias if update_fp8_weights: - fp8_cast_transpose_fused( - weight, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM1_WEIGHT, - fp8_dtype_forward, - cast_out=weight_fp8, - transpose_out=weight_t_fp8, - ) + if is_training: + fp8_cast_transpose_fused( + weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_WEIGHT, + fp8_dtype_forward, + cast_out=weight_fp8, + transpose_out=weight_t_fp8, + ) + else: + weight_t_fp8 = None + weight_fp8 = cast_to_fp8( + weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_WEIGHT, + fp8_dtype_forward) out = fp8_gemm( weight_fp8, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_WEIGHT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype_forward, ln_out_total, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_INPUT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, activation_dtype, get_workspace(), @@ -658,29 +703,30 @@ def forward( use_bias=use_bias, ) - ctx.save_for_backward( - inputmat, - ln_weight, - mu, - rsigma, - weight, - weight_t_fp8, - ln_out, - fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None, - ) + if is_training: + ctx.save_for_backward( + inputmat, + ln_weight, + mu, + rsigma, + weight, + weight_t_fp8, + ln_out, + fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None, + ) - ctx.activation_dtype = activation_dtype - ctx.fp8 = fp8 - ctx.fp8_meta = fp8_meta - ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation - ctx.is_first_microbatch = is_first_microbatch - ctx.use_bias = use_bias - ctx.sequence_parallel = sequence_parallel - ctx.tensor_parallel = tensor_parallel - ctx.inp_shape = inp.shape - ctx.parallel_mode = parallel_mode - ctx.tp_group = tp_group - ctx.return_layernorm_output = return_layernorm_output + ctx.activation_dtype = activation_dtype + ctx.fp8 = fp8 + ctx.fp8_meta = fp8_meta + ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation + ctx.is_first_microbatch = is_first_microbatch + ctx.use_bias = use_bias + ctx.sequence_parallel = sequence_parallel + ctx.tensor_parallel = tensor_parallel + ctx.inp_shape = inp.shape + ctx.parallel_mode = parallel_mode + ctx.tp_group = tp_group + ctx.return_layernorm_output = return_layernorm_output # Row Parallel Linear if parallel_mode == "row" and sequence_parallel: @@ -695,6 +741,7 @@ def forward( return out, ln_out_return.view_as(inp) return out + @staticmethod def backward( ctx, *grad_outputs: Tuple[torch.Tensor, ...] @@ -748,10 +795,12 @@ def backward( # DGRAD: Evaluated unconditionally to feed into Linear backward dgrad = fp8_gemm( weight_t_fp8, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM1_WEIGHT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype_forward, grad_output_c, - ctx.fp8_meta["scaling_bwd"].scale_inv[tex.FP8BwdTensors.GRAD_OUTPUT1], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -784,12 +833,12 @@ def backward( ln_out_total_t = tex.fp8_transpose(ln_out_total, fp8_dtype_forward) wgrad = fp8_gemm( ln_out_total_t, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM1_INPUT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, grad_output_t, - ctx.fp8_meta["scaling_bwd"].scale_inv[ - tex.FP8BwdTensors.GRAD_OUTPUT1 - ], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -874,6 +923,7 @@ def backward( None, None, None, + None, ) @@ -1094,8 +1144,13 @@ def forward( inp = self.pre_forward(inp) bias_tensor = bias if bias is not None else self.bias - - out = _LayerNormLinear.apply( + if self.training: + fwd_fn = _LayerNormLinear.apply + args = [] + else: + fwd_fn = _LayerNormLinear.forward + args = [None] + args += ( inp, self.layer_norm_weight, self.layer_norm_bias, @@ -1115,7 +1170,9 @@ def forward( self.activation_dtype, self.parallel_mode, self.return_layernorm_output, + self.training, ) + out = fwd_fn(*args) self.post_forward() @@ -1133,7 +1190,6 @@ def forward( return out, ln_out return out - class _Linear(torch.autograd.Function): """Linear semi-top level module Calls custom cuda extensions. @@ -1157,6 +1213,7 @@ def forward( tensor_parallel: bool, activation_dtype: torch.dtype, parallel_mode: Union[str, None], + is_training: bool, ) -> torch.Tensor: # Make sure input dimensions are compatible in_features = weight.shape[-1] @@ -1173,19 +1230,27 @@ def forward( fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True) if not fp8_meta["recipe"].override_linear_precision.wgrad: - inputmat, inputmat_t = fp8_cast_transpose_fused( - inputmat, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM1_INPUT, - fp8_dtype_forward, - ) + if is_training: + inputmat, inputmat_t = fp8_cast_transpose_fused( + inputmat, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_INPUT, + fp8_dtype_forward, + ) + else: + inputmat = cast_to_fp8( + inputmat, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_INPUT, + fp8_dtype_forward, + ) else: - inputmat = cast_to_fp8( + inputmat, inputmat_t = cast_to_fp8( inputmat, fp8_meta["scaling_fwd"], tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, - ) + ), None # Column Parallel Linear if parallel_mode == "column" and sequence_parallel: @@ -1202,21 +1267,32 @@ def forward( bias = cast_if_needed(bias, bias_dtype) if use_bias else bias if update_fp8_weights: - fp8_cast_transpose_fused( - weight, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM1_WEIGHT, - fp8_dtype_forward, - cast_out=weight_fp8, - transpose_out=weight_t_fp8, - ) + if is_training: + fp8_cast_transpose_fused( + weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_WEIGHT, + fp8_dtype_forward, + cast_out=weight_fp8, + transpose_out=weight_t_fp8, + ) + else: + weight_t_fp8 = None + weight_fp8 = cast_to_fp8( + weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_WEIGHT, + fp8_dtype_forward, + ) out = fp8_gemm( weight_fp8, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_WEIGHT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype_forward, inputmat, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_INPUT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, activation_dtype, get_workspace(), @@ -1238,28 +1314,29 @@ def forward( use_bias=use_bias, ) - ctx.save_for_backward( - inputmat_no_fp8 - if not fp8 or fp8_meta["recipe"].override_linear_precision.wgrad - else None, - inputmat_t - if fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad - else None, - weight, - weight_t_fp8, - fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None, - ) - ctx.activation_dtype = activation_dtype - ctx.fp8 = fp8 - ctx.fp8_meta = fp8_meta - ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation - ctx.is_first_microbatch = is_first_microbatch - ctx.use_bias = use_bias - ctx.sequence_parallel = sequence_parallel - ctx.tensor_parallel = tensor_parallel - ctx.inp_shape = inp.shape - ctx.parallel_mode = parallel_mode - ctx.tp_group = tp_group + if is_training: + ctx.save_for_backward( + inputmat_no_fp8 + if not fp8 or fp8_meta["recipe"].override_linear_precision.wgrad + else None, + inputmat_t + if fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad + else None, + weight, + weight_t_fp8, + fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None, + ) + ctx.activation_dtype = activation_dtype + ctx.fp8 = fp8 + ctx.fp8_meta = fp8_meta + ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation + ctx.is_first_microbatch = is_first_microbatch + ctx.use_bias = use_bias + ctx.sequence_parallel = sequence_parallel + ctx.tensor_parallel = tensor_parallel + ctx.inp_shape = inp.shape + ctx.parallel_mode = parallel_mode + ctx.tp_group = tp_group # Row Parallel Linear if parallel_mode == "row" and sequence_parallel: @@ -1270,6 +1347,7 @@ def forward( # [*, in_features] -> [*, out_features] except first dimension changes for SP return out.view(-1, *inp.shape[1:-1], out.shape[-1]) + @staticmethod def backward( ctx, grad_output: torch.Tensor @@ -1326,10 +1404,12 @@ def backward( # DGRAD dgrad = fp8_gemm( weight_t_fp8, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM1_WEIGHT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype_forward, grad_output_c, - ctx.fp8_meta["scaling_bwd"].scale_inv[tex.FP8BwdTensors.GRAD_OUTPUT1], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -1361,12 +1441,12 @@ def backward( if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad: wgrad = fp8_gemm( inputmat_t_total, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM1_INPUT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, grad_output_t, - ctx.fp8_meta["scaling_bwd"].scale_inv[ - tex.FP8BwdTensors.GRAD_OUTPUT1 - ], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -1429,6 +1509,7 @@ def backward( None, None, None, + None, ) @@ -1620,8 +1701,13 @@ def forward( inp = self.pre_forward(inp) bias_tensor = bias if bias is not None else self.bias - - out = _Linear.apply( + if self.training: + linear_fn = _Linear.apply + args = [] + else: + linear_fn = _Linear.forward + args = [None] + args += ( weight if weight is not None else self.weight, self.weight1_fp8 if self.fp8 else None, self.weight1_t_fp8 if self.fp8 else None, @@ -1637,8 +1723,9 @@ def forward( self.tp_size > 1, self.activation_dtype, self.parallel_mode, + self.training, ) - + out = linear_fn(*args) self.post_forward() if self.gemm_bias_unfused_add: @@ -1681,6 +1768,7 @@ def forward( return_layernorm_output: bool, bias_gelu_nvfusion: bool, set_parallel_mode: bool, + is_training: bool ) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]: # Make sure input dimensions are compatible in_features = ln_weight.numel() @@ -1700,15 +1788,25 @@ def forward( if fp8: fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True) if not return_layernorm_output: - ln_out, mu, rsigma = layernorm_fwd_fp8( - inputmat, - ln_weight, - ln_bias, - eps, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM1_INPUT, - fp8_dtype_forward, - ) + if is_training: + ln_out, mu, rsigma = layernorm_fwd_fp8( + inputmat, + ln_weight, + ln_bias, + eps, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_INPUT, + fp8_dtype_forward, + ) + else: + ln_out = layernorm_fwd_fp8_inf( + inputmat, + ln_weight, + ln_bias, + eps, + fp8_meta["scaling_fwd"], + fp8_dtype_forward, + ) else: ln_out_return, mu, rsigma = tex.layernorm_fwd( inputmat, ln_weight, ln_bias, eps @@ -1720,9 +1818,14 @@ def forward( fp8_dtype_forward, ) else: - ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight, ln_bias, eps) - ln_out_return = ln_out + if is_training: + ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight, ln_bias, eps) + else: + ln_out, mu, rsigma = layernorm_fwd_inf( + inputmat, ln_weight, ln_bias, eps + ), None, None + ln_out_return = ln_out # Column Parallel Linear if set_parallel_mode and sequence_parallel: ln_out_total, _ = gather_along_first_dim(ln_out, tp_group) @@ -1739,30 +1842,48 @@ def forward( fc2_bias = cast_if_needed(fc2_bias, bias_dtype) if use_bias else fc2_bias if update_fp8_weights: - fp8_cast_transpose_fused( - fc1_weight, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM1_WEIGHT, - fp8_dtype_forward, - cast_out=fc1_weight_fp8, - transpose_out=fc1_weight_t_fp8, - ) + if is_training: + fp8_cast_transpose_fused( + fc1_weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_WEIGHT, + fp8_dtype_forward, + cast_out=fc1_weight_fp8, + transpose_out=fc1_weight_t_fp8, + ) - fp8_cast_transpose_fused( - fc2_weight, - fp8_meta["scaling_fwd"], - tex.FP8FwdTensors.GEMM2_WEIGHT, - fp8_dtype_forward, - cast_out=fc2_weight_fp8, - transpose_out=fc2_weight_t_fp8, - ) + fp8_cast_transpose_fused( + fc2_weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM2_WEIGHT, + fp8_dtype_forward, + cast_out=fc2_weight_fp8, + transpose_out=fc2_weight_t_fp8, + ) + else: + fc1_weight_t_fp8 = None + fc1_weight_fp8 = cast_to_fp8( + fc1_weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_WEIGHT, + fp8_dtype_forward, + ) + fc2_weight_t_fp8 = None + fc2_weight_fp8 = cast_to_fp8( + fc2_weight, + fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM2_WEIGHT, + fp8_dtype_forward, + ) fc1_out = fp8_gemm( fc1_weight_fp8, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_WEIGHT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype_forward, ln_out_total, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_INPUT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, activation_dtype, get_workspace(), @@ -1780,10 +1901,12 @@ def forward( fc2_out = fp8_gemm( fc2_weight_fp8, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM2_WEIGHT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM2_WEIGHT, fp8_dtype_forward, gelu_out, - fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM2_INPUT], + fp8_meta["scaling_fwd"].scale_inv, + tex.FP8FwdTensors.GEMM2_INPUT, fp8_dtype_forward, activation_dtype, get_workspace(), @@ -1810,7 +1933,7 @@ def forward( gelu=not bias_gelu_nvfusion, ) - if bias_gelu_nvfusion: + if bias_gelu_nvfusion and is_training: fc1_out, _, _ = fc1_outputs gelu_out = bias_gelu_fused(fc1_out, fc1_bias) else: @@ -1824,35 +1947,35 @@ def forward( bias=fc2_bias, use_bias=use_bias, ) - - ctx.save_for_backward( - inputmat, - ln_weight, - mu, - rsigma, - ln_out, - fc1_out, - gelu_out, - fc1_weight, - fc1_weight_t_fp8, - fc2_weight, - fc2_weight_t_fp8, - fc1_bias, - fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None, - ) - ctx.activation_dtype = activation_dtype - ctx.fp8 = fp8 - ctx.fp8_meta = fp8_meta - ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation - ctx.is_first_microbatch = is_first_microbatch - ctx.use_bias = use_bias - ctx.sequence_parallel = sequence_parallel - ctx.tensor_parallel = tensor_parallel - ctx.inp_shape = inp.shape - ctx.tp_group = tp_group - ctx.bias_gelu_nvfusion = bias_gelu_nvfusion - ctx.return_layernorm_output = return_layernorm_output - ctx.set_parallel_mode = set_parallel_mode + if is_training: + ctx.save_for_backward( + inputmat, + ln_weight, + mu, + rsigma, + ln_out, + fc1_out, + gelu_out, + fc1_weight, + fc1_weight_t_fp8, + fc2_weight, + fc2_weight_t_fp8, + fc1_bias, + fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None, + ) + ctx.activation_dtype = activation_dtype + ctx.fp8 = fp8 + ctx.fp8_meta = fp8_meta + ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation + ctx.is_first_microbatch = is_first_microbatch + ctx.use_bias = use_bias + ctx.sequence_parallel = sequence_parallel + ctx.tensor_parallel = tensor_parallel + ctx.inp_shape = inp.shape + ctx.tp_group = tp_group + ctx.bias_gelu_nvfusion = bias_gelu_nvfusion + ctx.return_layernorm_output = return_layernorm_output + ctx.set_parallel_mode = set_parallel_mode # Row Parallel Linear if set_parallel_mode and sequence_parallel: @@ -1867,6 +1990,7 @@ def forward( return fc2_out, ln_out_return.view_as(inp) return fc2_out + @staticmethod def backward( ctx, *grad_outputs: Tuple[torch.Tensor, ...] @@ -1925,10 +2049,12 @@ def backward( # FC2 DGRAD; Unconditional fc2_dgrad = fp8_gemm( fc2_weight_t_fp8, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM2_WEIGHT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM2_WEIGHT, fp8_dtype_forward, grad_output_c, - ctx.fp8_meta["scaling_bwd"].scale_inv[tex.FP8BwdTensors.GRAD_OUTPUT1], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -1941,12 +2067,12 @@ def backward( gelu_out_t = tex.fp8_transpose(gelu_out, fp8_dtype_forward) fc2_wgrad = fp8_gemm( gelu_out_t, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM2_INPUT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM2_INPUT, fp8_dtype_forward, grad_output_t, - ctx.fp8_meta["scaling_bwd"].scale_inv[ - tex.FP8BwdTensors.GRAD_OUTPUT1 - ], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT1, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -2004,10 +2130,12 @@ def backward( # FC1 DGRAD: Unconditional fc1_dgrad = fp8_gemm( fc1_weight_t_fp8, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM1_WEIGHT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype_forward, dgelu, - ctx.fp8_meta["scaling_bwd"].scale_inv[tex.FP8BwdTensors.GRAD_OUTPUT2], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT2, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -2072,12 +2200,12 @@ def backward( ln_out_total_t = tex.fp8_transpose(ln_out_total, fp8_dtype_forward) fc1_wgrad = fp8_gemm( ln_out_total_t, - fwd_scale_inverses[tex.FP8FwdTensors.GEMM1_INPUT], + fwd_scale_inverses, + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, dgelu_t, - ctx.fp8_meta["scaling_bwd"].scale_inv[ - tex.FP8BwdTensors.GRAD_OUTPUT2 - ], + ctx.fp8_meta["scaling_bwd"].scale_inv, + tex.FP8BwdTensors.GRAD_OUTPUT2, fp8_dtype_backward, ctx.activation_dtype, get_workspace(), @@ -2176,6 +2304,7 @@ def backward( None, None, None, + None, ) @@ -2422,7 +2551,13 @@ def forward( inp = self.pre_forward(inp, num_gemms=2) - out = _LayerNormMLP.apply( + if self.training: + fwd_fn = _LayerNormMLP.apply + args = [] + else: + fwd_fn = _LayerNormMLP.forward + args = [None] + args += ( inp, self.layer_norm_weight, self.layer_norm_bias, @@ -2447,7 +2582,9 @@ def forward( self.return_layernorm_output, self.bias_gelu_nvfusion, self.set_parallel_mode, + self.training, ) + out = fwd_fn(*args) self.post_forward() diff --git a/transformer_engine/pytorch/softmax.py b/transformer_engine/pytorch/softmax.py index 8a34615d7f..a33cf5a85f 100644 --- a/transformer_engine/pytorch/softmax.py +++ b/transformer_engine/pytorch/softmax.py @@ -5,9 +5,10 @@ """Fused scaled masked softmax functions""" import os from typing import Callable, Tuple, Union - import torch from torch import nn +import torch._C._onnx as _C_onnx +from torch.onnx import _type_utils import transformer_engine_extensions as tex @@ -46,6 +47,36 @@ def backward( return input_grads, None + @staticmethod + def symbolic(g: torch.Graph, inputs: torch._C.Value, scale: float) -> torch._C.Value: + """ScaledUpperTriangMaskedSoftmax symbolic method""" + def triangular_mask(): + dtype = _type_utils.JitScalarType.INT64 + ones = torch.onnx.symbolic_opset9.ones_like(g, inputs, dtype) + k = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + mask = g.op("Trilu", ones, k, upper_i=1) + mask = g.op("Cast", mask, to_i=_C_onnx.TensorProtoDataType.BOOL) + return mask + + # Captures the logic of function scaled_upper_triang_masked_softmax_warp_forward + if inputs.type().scalarType() == "BFloat16": + inputs = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + mask = triangular_mask() + one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + inv_mask = g.op("Sub", one, mask) + + neg_tenK = g.op("Constant", value_t=torch.tensor(-10000., dtype=torch.float16)) + softmax_mask = g.op("Mul", mask, neg_tenK) + + scale_input = g.op("Constant", value_t=torch.tensor(scale, dtype=torch.float16)) + scaled = g.op("Mul", inputs, scale_input) + masked_scaled = g.op("Mul", inv_mask, scaled) + masked = g.op("Add", masked_scaled, softmax_mask) + out = g.op("Softmax", masked) + if inputs.type().scalarType() == "BFloat16": + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + return out + class ScaledMaskedSoftmax(torch.autograd.Function): """ @@ -78,6 +109,35 @@ def backward( ) return input_grads, None, None + @staticmethod + def symbolic( + g: torch.Graph, + inputs: torch._C.Value, + mask: torch._C.Value, + scale: float) -> torch._C.Value: + """ScaledMaskedSoftmax symbolic method""" + # Captures the logic of function scaled_masked_softmax_warp_forward. + # output = softmax(mask(input*scale) + # Computed as: + # masked_scaled = (1 - mask)*(input*scale) + # softmax_mask = mask * -10000 + # output = softmax(masked_scaled + softmax_mask) + if inputs.type().scalarType() == "BFloat16": + inputs = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + scale_input = g.op("Constant", value_t=torch.tensor(scale, dtype=torch.float16)) + scaled = g.op("Mul", inputs, scale_input) + one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + inv_mask = g.op("Sub", one, mask) + # Todo: type is hard coded because softmax uses FP16 or BF16 + neg_tenK = g.op("Constant", value_t=torch.tensor(-10000., dtype=torch.float16)) + softmax_mask = g.op("Mul", mask, neg_tenK) + masked_scaled = g.op("Mul", inv_mask, scaled) + masked = g.op("Add", masked_scaled, softmax_mask) + out = g.op("Softmax", masked) + if inputs.type().scalarType() == "BFloat16": + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + return out + class ScaledSoftmax(torch.autograd.Function): """ @@ -107,6 +167,19 @@ def backward( ) return input_grads, None, None + @staticmethod + def symbolic(g: torch.Graph, inputs: torch._C.Value, scale: float) -> torch._C.Value: + """ScaledSoftmax symbolic method""" + if inputs.type().scalarType() == "BFloat16": + inputs = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + scale_input = g.op("Constant", value_t=torch.tensor(scale, dtype=torch.float16)) + scaled = g.op("Mul", inputs, scale_input) + out = g.op("Softmax", scaled) + if inputs.type().scalarType() == "BFloat16": + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + return out + + class FusedScaleMaskSoftmax(nn.Module): """ @@ -163,7 +236,7 @@ def is_kernel_available(self, b: int, np: int, sq: int, sk: int) -> bool: and attn_batches % 4 == 0 # np * b must be divisor of 4 ): if 0 <= sk <= 4096: - batch_per_block = self.get_batch_per_block(sk) + batch_per_block = self.get_batch_per_block(int(sk)) if self.attn_mask_type == "causal": if attn_batches % batch_per_block == 0: diff --git a/transformer_engine/pytorch/te_onnx_extensions.py b/transformer_engine/pytorch/te_onnx_extensions.py new file mode 100755 index 0000000000..1315150bec --- /dev/null +++ b/transformer_engine/pytorch/te_onnx_extensions.py @@ -0,0 +1,170 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# See LICENSE for license information. + +""" +ONNX symbolic functions for Transformer Engine + +Warnings of the type pasted below are a known Pytorch issue +(https://github.com/pytorch/pytorch/issues/81693): + +tests/test_onnx_export.py::test_export_cast_ops[112] + /opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py:649: + UserWarning: The shape inference of trt::TRT_FP8DequantizeLinear type is missing, + so it may result in wrong shape inference for the exported graph. + Please consider adding it in symbolic function. (Triggered internally at + /opt/pytorch/pytorch/torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1880.) + _C._jit_pass_onnx_graph_shape_type_inference( + +""" + +import torch +from torch.onnx import symbolic_helper, register_custom_op_symbolic +import torch._C._onnx as _C_onnx +import transformer_engine_extensions as tex + +# This file registers custom op symbolic ONNX functions and does not export any symbols. +__all__ = [] + + +# Custom ops spec version +VER = 1 + + +def make_op_name(op_name: str) -> str: + """custom op name""" + return "trt::" + op_name + + +@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i") +def onnx_cast_to_fp8(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): + """ONNX graph for cast_to_fp8""" + output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) + if inputs.type().scalarType() == "Half": + # Q inputs are currently constrained to FP32 due to a similar limitation in ORT custom ops. + inputs = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return g.op(make_op_name("TRT_FP8QuantizeLinear"), inputs, scale_inv).setType( + inputs.type().with_dtype(torch.uint8).with_sizes(output_shape)) + + +@symbolic_helper.parse_args("v", "v", "i", "i", "i") +def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype): + """ONNX graph for cast_from_fp8""" + output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) + out = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, scale_inv).setType( + inputs.type().with_dtype(torch.float32).with_sizes(output_shape)) + if otype == int(tex.DType.kFloat16): + # DQ outputs are currently constrained to FP32 due to a similar limitation in ORT + # custom ops, so cast the output. + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + return out + + +@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i") +def onnx_fp8_gelu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): + """ONNX graph for fp8_gelu""" + output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) + gelu = torch.onnx.symbolic_opset9.gelu(g, inputs, "tanh") + if inputs.type().scalarType() == "Half": + gelu = g.op("Cast", gelu, to_i=_C_onnx.TensorProtoDataType.FLOAT) + out = g.op(make_op_name("TRT_FP8QuantizeLinear"), gelu, scale_inv).setType( + inputs.type().with_dtype(torch.uint8).with_sizes(output_shape)) + return out + + +@symbolic_helper.parse_args("v", "v", "i", "i", "i", + "v", "v", "i", "i", "i", + "v", "i", "v", "v", "i", + "v", "i", "i", "i") +def onnx_te_gemm( + g, + weight, + weight_scale_inverse, + weight_fp8_tensor, + weight_type, + trans_weight, + inputs, + input_scale_inverse, + input_fp8_tensor, + input_type, + trans_input, + out, + out_type, + bias, + pre_gelu_out, + grad, + workspace, + workspaceSize, + accumulate, + use_split_accumulator): + """ONNX graph for te_gemm""" + is_fp16 = bias.type().scalarType() == "Half" + if input_type == int(tex.DType.kFloat8E4M3): + inputs = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, input_scale_inverse) + + if weight_type == int(tex.DType.kFloat8E4M3): + weight = g.op(make_op_name("TRT_FP8DequantizeLinear"), weight, weight_scale_inverse) + + output = g.op("Gemm", inputs, weight, transA_i=trans_input, transB_i=trans_weight) + + empty_tensor_size = [0] + bias_empty = torch.onnx.symbolic_helper._get_tensor_sizes(bias) == empty_tensor_size + pre_gelu_out_empty = torch.onnx.symbolic_helper._get_tensor_sizes(pre_gelu_out) \ + == empty_tensor_size + if not bias_empty: + if pre_gelu_out_empty: + if is_fp16: + output = g.op("Cast", output, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + output = g.op('Add', output, bias) + else: + if is_fp16: + output = g.op("Cast", output, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + output = g.op('Add', output, bias) + output = torch.onnx.symbolic_opset9.gelu(g, output) + else: + if is_fp16: + output = g.op("Cast", output, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + return output + + +@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v", "v", "i") +def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax, scale_inv, otype): + """ONNX graph for layernorm_fwd_fp8""" + ln = onnx_layernorm_fwd(g, inputs, weight, bias, eps) + output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) + if inputs.type().scalarType() == "Half": + ln = g.op("Cast", ln, to_i=_C_onnx.TensorProtoDataType.FLOAT) + fp8_ln = g.op(make_op_name("TRT_FP8QuantizeLinear"), ln, scale_inv).setType( + inputs.type().with_dtype(torch.uint8).with_sizes(output_shape)) + return fp8_ln + + +@symbolic_helper.parse_args("v", "v", "v", "f") +def onnx_layernorm_fwd(g, inputs, weight, bias, eps): + """ONNX graph for layernorm_fwd""" + normalized_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) + if normalized_shape is None: + ndim = torch.onnx.symbolic_helper._get_tensor_rank(inputs) + assert ndim is not None + normalized_shape = list(range(0, ndim)) + # Normalization axis = 0, so normalized_shape uses all dims except dim = 0 + normalized_shape = normalized_shape[1:] + + ln = torch.onnx.symbolic_opset9.layer_norm( + g, + inputs, + normalized_shape, + weight, + bias, + eps, + False # cudnn_enable (not relevant) + ) + return ln + + +register_custom_op_symbolic('tex_ts::cast_to_fp8_ts', onnx_cast_to_fp8, VER) +register_custom_op_symbolic('tex_ts::cast_from_fp8_ts', onnx_cast_from_fp8, VER) +register_custom_op_symbolic('tex_ts::fp8_gelu_ts', onnx_fp8_gelu, VER) +register_custom_op_symbolic('tex_ts::te_gemm_ts', onnx_te_gemm, VER) +register_custom_op_symbolic('tex_ts::layernorm_fwd_fp8_inf_ts', onnx_layernorm_fwd_fp8, VER) +register_custom_op_symbolic('tex_ts::layernorm_fwd_inf_ts', onnx_layernorm_fwd, VER) From b2568676ac8fe0978bd1fd5f451b30366bec25e1 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Wed, 14 Dec 2022 18:49:08 +0000 Subject: [PATCH 02/16] fixes for pylint tests Signed-off-by: Asfiya Baig --- transformer_engine/pytorch/te_onnx_extensions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/transformer_engine/pytorch/te_onnx_extensions.py b/transformer_engine/pytorch/te_onnx_extensions.py index 1315150bec..2424c62a64 100755 --- a/transformer_engine/pytorch/te_onnx_extensions.py +++ b/transformer_engine/pytorch/te_onnx_extensions.py @@ -39,6 +39,7 @@ def make_op_name(op_name: str) -> str: @symbolic_helper.parse_args("v", "v", "v", "v", "i", "i") def onnx_cast_to_fp8(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): """ONNX graph for cast_to_fp8""" + # pylint: disable=unused-argument output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) if inputs.type().scalarType() == "Half": # Q inputs are currently constrained to FP32 due to a similar limitation in ORT custom ops. @@ -50,6 +51,7 @@ def onnx_cast_to_fp8(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): @symbolic_helper.parse_args("v", "v", "i", "i", "i") def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype): """ONNX graph for cast_from_fp8""" + # pylint: disable=unused-argument output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) out = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, scale_inv).setType( inputs.type().with_dtype(torch.float32).with_sizes(output_shape)) @@ -63,6 +65,7 @@ def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype): @symbolic_helper.parse_args("v", "v", "v", "v", "i", "i") def onnx_fp8_gelu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): """ONNX graph for fp8_gelu""" + # pylint: disable=unused-argument output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) gelu = torch.onnx.symbolic_opset9.gelu(g, inputs, "tanh") if inputs.type().scalarType() == "Half": @@ -98,6 +101,7 @@ def onnx_te_gemm( accumulate, use_split_accumulator): """ONNX graph for te_gemm""" + # pylint: disable=unused-argument is_fp16 = bias.type().scalarType() == "Half" if input_type == int(tex.DType.kFloat8E4M3): inputs = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, input_scale_inverse) @@ -130,6 +134,7 @@ def onnx_te_gemm( @symbolic_helper.parse_args("v", "v", "v", "f", "v", "v", "v", "i") def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax, scale_inv, otype): """ONNX graph for layernorm_fwd_fp8""" + # pylint: disable=unused-argument ln = onnx_layernorm_fwd(g, inputs, weight, bias, eps) output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) if inputs.type().scalarType() == "Half": @@ -142,6 +147,7 @@ def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax, scale_inv, @symbolic_helper.parse_args("v", "v", "v", "f") def onnx_layernorm_fwd(g, inputs, weight, bias, eps): """ONNX graph for layernorm_fwd""" + # pylint: disable=unused-argument normalized_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) if normalized_shape is None: ndim = torch.onnx.symbolic_helper._get_tensor_rank(inputs) From 65231f43ffca5f739d7660082250ee11a09795a5 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Wed, 14 Dec 2022 20:32:46 +0000 Subject: [PATCH 03/16] fix pylint warning in softmax.py Signed-off-by: Asfiya Baig --- transformer_engine/pytorch/softmax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/transformer_engine/pytorch/softmax.py b/transformer_engine/pytorch/softmax.py index a33cf5a85f..2e9cf61388 100644 --- a/transformer_engine/pytorch/softmax.py +++ b/transformer_engine/pytorch/softmax.py @@ -128,7 +128,7 @@ def symbolic( scaled = g.op("Mul", inputs, scale_input) one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) inv_mask = g.op("Sub", one, mask) - # Todo: type is hard coded because softmax uses FP16 or BF16 + # Note: type is hard coded because softmax uses FP16 or BF16 neg_tenK = g.op("Constant", value_t=torch.tensor(-10000., dtype=torch.float16)) softmax_mask = g.op("Mul", mask, neg_tenK) masked_scaled = g.op("Mul", inv_mask, scaled) From 291638875063462b08d6cb5ed9bb2b57e72aa037 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Wed, 14 Dec 2022 20:37:49 +0000 Subject: [PATCH 04/16] move FP8 ORT lib inside tests/ Signed-off-by: Asfiya Baig --- .../libcustom_ort_fp8_qdq_ops.so | Bin tests/test_onnx_export.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename libcustom_ort_fp8_qdq_ops.so => tests/libcustom_ort_fp8_qdq_ops.so (100%) diff --git a/libcustom_ort_fp8_qdq_ops.so b/tests/libcustom_ort_fp8_qdq_ops.so similarity index 100% rename from libcustom_ort_fp8_qdq_ops.so rename to tests/libcustom_ort_fp8_qdq_ops.so diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index 15c309ccdb..8213e0f752 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -29,7 +29,7 @@ ONNX_FILES_DIR = "./gen_onnx_models" # Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT). -ORT_CUSTOM_OPS_LIB = "./libcustom_ort_fp8_qdq_ops.so" +ORT_CUSTOM_OPS_LIB = "./tests/libcustom_ort_fp8_qdq_ops.so" # ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14. TRILU_OPSET = 14 From 91f6c39749808cb7d49c22d51ad7680df5c7c742 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Thu, 15 Dec 2022 01:46:43 +0000 Subject: [PATCH 05/16] enable cross attention tests Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index 8213e0f752..c0e3997355 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -133,10 +133,12 @@ def load_custom_ops(session_opts: ort.SessionOptions): def create_ort_input_dict(session, inps): inp_dict = {} if isinstance(inps, tuple) or isinstance(inps, list): + nonetype_inputs = 0 for idx, inp in enumerate(inps): if inp is None: + nonetype_inputs += 1 continue - inp_dict[session.get_inputs()[idx].name] = to_numpy(inp) + inp_dict[session.get_inputs()[idx - nonetype_inputs].name] = to_numpy(inp) else: inp_dict[session.get_inputs()[0].name] = to_numpy(inps) return inp_dict @@ -742,16 +744,23 @@ def test_export_core_attention( (True, "padding"), # calls ScaledMaskedSoftmax (False, "padding"), # calls ScaledSoftmax ] +test_configs_attention_type = [ + (True, "self", True), + (False, "self", True), + (True, "self", False), + (False, "self", False), + # disabled because query_bias (reqd for cross attention) is defined when fuse_qkv_params is False + # ("cross", True), + (True, "cross", False), + # disabled because TypeError: cannot assign 'transformer_engine.pytorch.module.Linear' + # as parameter 'query' (torch.nn.Parameter or None expected) + # (False, "cross", False), +] @pytest.mark.parametrize("use_fp8", [False, True]) @pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention) @pytest.mark.parametrize("precision", [torch.float32, torch.float16]) -@pytest.mark.parametrize("input_layernorm", [True, False]) @pytest.mark.parametrize("return_layernorm_output", [False]) -@pytest.mark.parametrize("attention_type", [ - "self", - #"cross" # TODO: handle this ORT error -]) -@pytest.mark.parametrize("fuse_qkv_params", [False, True]) +@pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type) def test_export_multihead_attention( use_fp8: bool, use_mask: bool, @@ -780,14 +789,18 @@ def test_export_multihead_attention( output_layer_init_method, ) hidden_states = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda") - input_names = ["hidden_states"] + attention_mask = None if use_mask and attn_mask_type != "causal": # Generate a random mask with 50% probability for 0 or 1. probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision) attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool) - input_names.append("attention_mask") - inp = (hidden_states, attention_mask) + + encoder_output = None + if attention_type == "cross": + encoder_output = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda") + inp = (hidden_states, attention_mask, encoder_output) + input_names = ["hidden_states", "attention_mask", "encoder_output"] fp8_str = "_fp8" if use_fp8 else "" dtype_str = dtype2str(precision) From 35f897d843cfea2b7d0d91d2aba30015cc487f53 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Thu, 15 Dec 2022 18:27:50 +0000 Subject: [PATCH 06/16] refactor code by @nzmora * Increase layernorm FP16 threshold * Normalize onnx file names: _ separates configs; - separates words in a single config * Add get_attn_mask_str and fix mask string * Add missing ONNX files * Moved generated ONNX files to tests/gen_onnx_models/ Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 111 +++++++++++++++++++++----------------- 1 file changed, 63 insertions(+), 48 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index c0e3997355..d17f7af523 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -26,7 +26,8 @@ # Directory where generated ONNX test models are stored. -ONNX_FILES_DIR = "./gen_onnx_models" +TESTS_DIR = os.path.dirname(os.path.abspath(__file__)) +ONNX_FILES_DIR = os.path.join(TESTS_DIR, "./gen_onnx_models") # Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT). ORT_CUSTOM_OPS_LIB = "./tests/libcustom_ort_fp8_qdq_ops.so" @@ -196,6 +197,16 @@ def as_te_type(dtype: torch.dtype): }[dtype] +def get_attn_mask_str(use_mask, attn_mask_type): + # See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names. + if attn_mask_type is None: + return "_mask" if use_mask else "_no-mask" + attn_mask_str = "_padding-no-mask" + attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str + attn_mask_str = "_padding-mask" if use_mask and attn_mask_type == "padding" else attn_mask_str + return attn_mask_str + + @pytest.mark.parametrize("scale_factor, atol", [ (1, 1e-7), (224, 1e-7) @@ -230,16 +241,18 @@ def forward(self, inp): hidden_size = 256 inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision) high_prec_str = dtype2str(precision) - fname = f"te.cast_fp8.s_{scale_factor}{high_prec_str}.onnx" + fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx" model = TestFP8_QDQ() do_export(model, inp, fname) validate_result(fname, inp, model, atol=atol, is_fp8=True) @pytest.mark.parametrize("scale_factor", [448]) -@pytest.mark.parametrize("precision, atol", [ - [torch.float32, 1e-7], [torch.float16, 2e-3]] -) +@pytest.mark.parametrize( + "precision, atol", [ + [torch.float32, 1e-7], + [torch.float16, 2e-3] +]) def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float): class TestFP8_Gelu(nn.Module): def __init__(self): @@ -268,13 +281,15 @@ def forward(self, inp): hidden_size = 256 inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision) high_prec_str = dtype2str(precision) - fname = f"te.gelu_fp8{high_prec_str}.onnx" + fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx" model = TestFP8_Gelu() do_export(model, inp, fname) validate_result(fname, inp, model, rtol=1e-1, atol=atol, is_fp8=True) -@pytest.mark.parametrize("scale_factors", [(224, 224,), ]) +@pytest.mark.parametrize("scale_factors", + [(224, 224,), +]) @pytest.mark.parametrize( "precision, use_fp8, use_bias, use_gelu", [ (torch.float32, False, False, False), @@ -463,17 +478,13 @@ def forward(self, inp): return ret inp = torch.randn(*inp_shape, device="cuda", dtype=precision) + model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm() high_prec_str = dtype2str(precision) - if use_fp8: - fname = f"te.layernorm_fwd_fp8{high_prec_str}.onnx" - model = TestFP8_Layernorm() - else: - fname = f"te.layernorm_fwd{high_prec_str}.onnx" - model = Test_Layernorm() - + fp8_str = "_fp8" if use_fp8 else "" + fname = f"te.layernorm{fp8_str}_{scale_factor}{high_prec_str}.onnx" do_export(model, inp, fname) if precision not in (torch.bfloat16, ): - validate_result(fname, inp, model, atol=1e-5, is_fp8=use_fp8) + validate_result(fname, inp, model, atol=5e-4, is_fp8=use_fp8) @pytest.mark.parametrize("softmax_def", [ @@ -506,17 +517,17 @@ def forward(self, inp, mask): inp_shape = [hidden_size, in_features, in_features, in_features] if softmax_def == softmax_defs.ScaledUpperTriangMaskedSoftmax: inp_shape = [hidden_size, in_features, in_features] - kernel_str = "te.ScaledUpperTriangMaskedSoftmax" + kernel_str = "ScaledUpperTriangMaskedSoftmax" model = Test_Softmax(softmax_def) elif softmax_def == softmax_defs.ScaledMaskedSoftmax: # Generate a random mask with 50% probability for 0 or 1. probs = 0.5 * torch.ones(hidden_size, 1, in_features, in_features, device="cuda", dtype=precision) mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool) input_names.append("mask") - kernel_str = "te.ScaledMaskedSoftmax" + kernel_str = "ScaledMaskedSoftmax" model = Test_Softmax(softmax_def, mask_inp=True) elif softmax_def == softmax_defs.ScaledSoftmax: - kernel_str = "te.ScaledSoftmax" + kernel_str = "ScaledSoftmax" model = Test_Softmax(softmax_def) input_tensor = torch.randn(*inp_shape, device="cuda") input_tensor = input_tensor.to(torch.bfloat16) if precision == torch.bfloat16 else input_tensor.half() @@ -530,7 +541,8 @@ def forward(self, inp, mask): @pytest.mark.parametrize("scale_factor", [1]) @pytest.mark.parametrize("use_fp8", [False, True]) -@pytest.mark.parametrize("return_bias", [False, True]) +# Returning the bias is a TE fusion optimization we don't care about. +@pytest.mark.parametrize("return_bias", [False]) @pytest.mark.parametrize( "precision, use_bias",[ (torch.float32, False), @@ -580,7 +592,7 @@ def test_export_linear( @pytest.mark.parametrize("scale_factor", [112]) @pytest.mark.parametrize("use_fp8", [False, True]) -# Todo: handle case of True +# Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @pytest.mark.parametrize("return_layernorm_output", [False]) @pytest.mark.parametrize( @@ -629,7 +641,7 @@ def test_export_layernorm_linear( @pytest.mark.parametrize("scale_factor", [112]) @pytest.mark.parametrize("use_fp8", [False, True]) -# Todo: handle case of True +# Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @pytest.mark.parametrize("return_layernorm_output", [False]) # Todo: cannot handle FP16 for some reason @@ -686,8 +698,10 @@ def test_export_layernorm_mlp( (torch.float16, True, "padding"), # calls ScaledMaskedSoftmax (torch.float16, False, "padding"), # calls ScaledSoftmax ]) -@pytest.mark.parametrize("attention_softmax_in_fp32", [True, False]) -@pytest.mark.parametrize("apply_query_key_layer_scaling", [True, False]) +@pytest.mark.parametrize("attention_softmax_in_fp32", + [True, False]) +@pytest.mark.parametrize("apply_query_key_layer_scaling", + [True, False]) def test_export_core_attention( precision: torch.dtype, use_mask: bool, @@ -695,9 +709,6 @@ def test_export_core_attention( attention_softmax_in_fp32: bool, apply_query_key_layer_scaling: bool, ): - if attn_mask_type is None: - attn_mask_type = 'causal' - # Set dimensions (these are arbitrary). kv_channels = 64 num_attention_heads = 1 @@ -715,14 +726,14 @@ def test_export_core_attention( input_names.append("attention_mask") inp = (query_layer, key_layer, value_layer, attention_mask) - sm_prec_str = "_fp32" if attention_softmax_in_fp32 else "_fp16" - qk_scaling_str = "_qk_scaling" if apply_query_key_layer_scaling else "" - mask_str = "_masked" if use_mask else \ - "_upper_trian_masked" if attn_mask_type=="causal" and precision == torch.float16 else \ - "" + sm_prec_str = "_sm-fp32" if attention_softmax_in_fp32 else "_sm-fp16" + qk_scaling_str = "_qk-scaling" if apply_query_key_layer_scaling else "" + mask_str = get_attn_mask_str(use_mask, attn_mask_type) high_prec_str = dtype2str(precision) fname = f"te.core_attention{mask_str}{qk_scaling_str}{sm_prec_str}{high_prec_str}.onnx" + if attn_mask_type is None: + attn_mask_type = 'causal' model = te.transformer.CoreAttention( num_attention_heads=num_attention_heads, kv_channels=kv_channels, @@ -740,21 +751,24 @@ def test_export_core_attention( test_configs_multihead_attention = [ - (False, "causal"), # calls ScaledUpperTriangMaskedSoftmax - (True, "padding"), # calls ScaledMaskedSoftmax - (False, "padding"), # calls ScaledSoftmax + #"use_mask, attn_mask_type" + (False, "causal"), # calls ScaledUpperTriangMaskedSoftmax + (True, "padding"), # calls ScaledMaskedSoftmax + (False, "padding"), # calls ScaledSoftmax ] test_configs_attention_type = [ - (True, "self", True), - (False, "self", True), - (True, "self", False), - (False, "self", False), + #"input_layernorm, attention_type, fuse_qkv_params" + (True, "self", True), + (False, "self", True), + (True, "self", False), + (False, "self", False), # disabled because query_bias (reqd for cross attention) is defined when fuse_qkv_params is False - # ("cross", True), - (True, "cross", False), + # (True, "cross", True), + # (False, "cross", True), + (True, "cross", False), # disabled because TypeError: cannot assign 'transformer_engine.pytorch.module.Linear' # as parameter 'query' (torch.nn.Parameter or None expected) - # (False, "cross", False), + # (False, "cross", False), ] @pytest.mark.parametrize("use_fp8", [False, True]) @pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention) @@ -804,10 +818,11 @@ def test_export_multihead_attention( fp8_str = "_fp8" if use_fp8 else "" dtype_str = dtype2str(precision) - attn_type_str = "_self_attention" if attention_type == "self" else "_cross_attention" - fuse_qkv_str = "_fused" if fuse_qkv_params else "" - attn_mask_type_str = f"_{attn_mask_type}" if (use_mask and attn_mask_type != "") else "" - fname = f"te.multihead_attention{fp8_str}{attn_mask_type_str}{attn_type_str}{fuse_qkv_str}{dtype_str}.onnx" + attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention" + fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else "" + attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type) + input_ln_str = "_input-ln" if input_layernorm else "" + fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx" model = te.transformer.MultiHeadAttention( *attention_args, @@ -861,11 +876,11 @@ def test_export_transformer_layer( inp = (input_tensor, attention_mask) fp8_str = "_fp8" if use_fp8 else "" - fuse_qkv_params_str = "_fuse-qkv" if fuse_qkv_params else "" + fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else "" qk_scaling_str = "_qk-scaling" if apply_query_key_layer_scaling else "" high_prec_str = dtype2str(precision) - attn_mask_type_str = f"_{attn_mask_type}" if (use_mask and attn_mask_type != "") else "" - fname = f"te.transformer_layer{fp8_str}{attn_mask_type_str}{fuse_qkv_params_str}{qk_scaling_str}{high_prec_str}.onnx" + attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type) + fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{qk_scaling_str}{high_prec_str}.onnx" model = te.TransformerLayer( hidden_size, From 697f7e1c791d8a8df6306b7e588a4b04d5df5e94 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Tue, 27 Dec 2022 19:25:36 +0000 Subject: [PATCH 07/16] fix merge conflict changes Signed-off-by: Asfiya Baig --- transformer_engine/pytorch/module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/transformer_engine/pytorch/module.py b/transformer_engine/pytorch/module.py index d0b66923e6..5141358272 100644 --- a/transformer_engine/pytorch/module.py +++ b/transformer_engine/pytorch/module.py @@ -7,7 +7,7 @@ import pickle import warnings from abc import ABC, abstractmethod -from typing import Union, Optional, Callable, Tuple, Dict, Any +from typing import Union, Optional, Callable, Tuple, Dict, List, Any, Mapping from functools import partial import numpy as np import torch From ebf781a2164d4125b48027df35c81ddbbf75a214 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Tue, 27 Dec 2022 19:56:06 +0000 Subject: [PATCH 08/16] fix Q/DQ scale input Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 44 ++++++++--- transformer_engine/pytorch/cpp_extensions.py | 2 + transformer_engine/pytorch/csrc/ts_fp8_op.cpp | 1 + transformer_engine/pytorch/module.py | 2 + .../pytorch/te_onnx_extensions.py | 78 ++++++++++++------- 5 files changed, 87 insertions(+), 40 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index d17f7af523..5371d6fb79 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -418,7 +418,7 @@ def forward(self, inp, weight): else: model = Test_GEMM(precision, use_bias, use_gelu) do_export(model, (inp, weight), fname, use_fp8) - validate_result(fname, (inp, weight), model, rtol=1e-2, atol=1e-2) + validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2) @pytest.mark.parametrize("use_fp8", [False, True]) @@ -467,6 +467,7 @@ def forward(self, inp): self.bias, self.eps, self.meta, + self.fp8_tensor, self.fp8_type) ret = cast_from_fp8( @@ -480,11 +481,12 @@ def forward(self, inp): inp = torch.randn(*inp_shape, device="cuda", dtype=precision) model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm() high_prec_str = dtype2str(precision) - fp8_str = "_fp8" if use_fp8 else "" - fname = f"te.layernorm{fp8_str}_{scale_factor}{high_prec_str}.onnx" + fp8_str = f"_fp8-{scale_factor}" if use_fp8 else "" + fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx" do_export(model, inp, fname) if precision not in (torch.bfloat16, ): - validate_result(fname, inp, model, atol=5e-4, is_fp8=use_fp8) + # TODO: FP32 has a small threshold (1e-5) + validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) @pytest.mark.parametrize("softmax_def", [ @@ -567,21 +569,43 @@ def test_export_linear( out_features = 256 hidden_size = 256 + class Test_Linear(nn.Module): + def __init__(self, + in_features, + out_features, + use_bias, + return_bias, + precision + ): + super().__init__() + self.linear = te.Linear( + in_features, + out_features, + bias=use_bias, + return_bias=return_bias, + params_dtype=precision + ) + + def forward(self, inp): + ret = self.linear(inp) + return ret + + inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision) fp8_str = "_fp8" if use_fp8 else "" bias_str = "_bias" if use_bias else "" high_prec_str = dtype2str(precision) fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx" with te.fp8_autocast(enabled=use_fp8): - model = te.Linear( + model = Test_Linear( in_features, out_features, - bias=use_bias, - return_bias=return_bias, - params_dtype=precision + use_bias, + return_bias, + precision ).to(device='cuda') if use_fp8: - set_layer_scale(model, scale_factor) + set_layer_scale(model.linear, scale_factor) do_export(model, inp, fname, use_fp8) if not use_fp8: @@ -687,7 +711,7 @@ def test_export_layernorm_mlp( if not use_fp8: validate_result(fname, inp, model, atol=1e-3) else: - validate_result(fname, inp, model, atol=1e-2, is_fp8=use_fp8) + validate_result(fname, inp, model, atol=2e-2, is_fp8=use_fp8) @pytest.mark.parametrize( diff --git a/transformer_engine/pytorch/cpp_extensions.py b/transformer_engine/pytorch/cpp_extensions.py index 6d3ffc0839..babd1ec7f7 100644 --- a/transformer_engine/pytorch/cpp_extensions.py +++ b/transformer_engine/pytorch/cpp_extensions.py @@ -259,6 +259,7 @@ def layernorm_fwd_fp8_inf( bias: torch.Tensor, eps: float, fp8_meta_tensor: tex.FP8TensorMeta, + fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors], otype: tex.DType, ) -> torch.Tensor: """LayerNorm with FP8 output. @@ -274,6 +275,7 @@ def layernorm_fwd_fp8_inf( fp8_meta_tensor.scale, fp8_meta_tensor.amax_history, fp8_meta_tensor.scale_inv, + fp8_tensor, otype) return ret diff --git a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp index 94e46f0dce..a50242dc0d 100755 --- a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp +++ b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp @@ -138,6 +138,7 @@ at::Tensor layernorm_fwd_fp8_inf_ts(const at::Tensor &input, at::Tensor scale, at::Tensor amax, at::Tensor scale_inv, + int64_t fp8_tensor, int64_t otype) { transformer_engine::DType otype_arg = reverse_map_dtype(otype); float eps_float = static_cast(eps); diff --git a/transformer_engine/pytorch/module.py b/transformer_engine/pytorch/module.py index 5141358272..f3b9f0df54 100644 --- a/transformer_engine/pytorch/module.py +++ b/transformer_engine/pytorch/module.py @@ -615,6 +615,7 @@ def forward( ln_bias, eps, fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, ) else: @@ -1805,6 +1806,7 @@ def forward( ln_bias, eps, fp8_meta["scaling_fwd"], + tex.FP8FwdTensors.GEMM1_INPUT, fp8_dtype_forward, ) else: diff --git a/transformer_engine/pytorch/te_onnx_extensions.py b/transformer_engine/pytorch/te_onnx_extensions.py index 2424c62a64..bbdefdc26b 100755 --- a/transformer_engine/pytorch/te_onnx_extensions.py +++ b/transformer_engine/pytorch/te_onnx_extensions.py @@ -16,6 +16,11 @@ /opt/pytorch/pytorch/torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1880.) _C._jit_pass_onnx_graph_shape_type_inference( + +Scale tensors are treated as lists ("fs") instead of tensors ("v") because we need to access +specific entries using the index passes as `fp8_tensor`. If you fail to do this you will get +the following error when accessing a sepcific scale element (e.g. `scale_inv[fp8_tensor]`): + TypeError: 'torch._C.Value' object is not subscriptable """ import torch @@ -30,55 +35,71 @@ # Custom ops spec version VER = 1 +UNSPECIFIED_TYPE = -1 + def make_op_name(op_name: str) -> str: """custom op name""" return "trt::" + op_name -@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i") -def onnx_cast_to_fp8(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): - """ONNX graph for cast_to_fp8""" - # pylint: disable=unused-argument +def quantize(g, inputs, scale_inv, fp8_tensor): output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) + + # Q inputs are currently constrained to FP32 due to a similar limitation in ORT + # custom ops, so cast the input if needed. if inputs.type().scalarType() == "Half": - # Q inputs are currently constrained to FP32 due to a similar limitation in ORT custom ops. inputs = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT) - return g.op(make_op_name("TRT_FP8QuantizeLinear"), inputs, scale_inv).setType( + + scale = g.op("Constant", value_t=torch.tensor(scale_inv[fp8_tensor])) + q_op = g.op( + make_op_name("TRT_FP8QuantizeLinear"), inputs, scale).setType( inputs.type().with_dtype(torch.uint8).with_sizes(output_shape)) + return q_op -@symbolic_helper.parse_args("v", "v", "i", "i", "i") -def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype): - """ONNX graph for cast_from_fp8""" - # pylint: disable=unused-argument +def dequantize(g, inputs, scale_inv, fp8_tensor, otype): output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) - out = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, scale_inv).setType( + + scale = g.op("Constant", value_t=torch.tensor(scale_inv[fp8_tensor])) + out = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, scale).setType( inputs.type().with_dtype(torch.float32).with_sizes(output_shape)) + + # DQ outputs are currently constrained to FP32 due to a similar limitation in ORT + # custom ops, so cast the output if needed. if otype == int(tex.DType.kFloat16): - # DQ outputs are currently constrained to FP32 due to a similar limitation in ORT - # custom ops, so cast the output. out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT16) return out -@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i") +@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i") +def onnx_cast_to_fp8(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): + """ONNX graph for cast_to_fp8""" + # pylint: disable=unused-argument + return quantize(g, inputs, scale_inv, fp8_tensor) + + +@symbolic_helper.parse_args("v", "fs", "i", "i", "i") +def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype): + """ONNX graph for cast_from_fp8""" + # pylint: disable=unused-argument + return dequantize(g, inputs, scale_inv, fp8_tensor, otype) + + +@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i") def onnx_fp8_gelu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): """ONNX graph for fp8_gelu""" # pylint: disable=unused-argument output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) gelu = torch.onnx.symbolic_opset9.gelu(g, inputs, "tanh") - if inputs.type().scalarType() == "Half": - gelu = g.op("Cast", gelu, to_i=_C_onnx.TensorProtoDataType.FLOAT) - out = g.op(make_op_name("TRT_FP8QuantizeLinear"), gelu, scale_inv).setType( - inputs.type().with_dtype(torch.uint8).with_sizes(output_shape)) + out = quantize(g, gelu, scale_inv, fp8_tensor) return out -@symbolic_helper.parse_args("v", "v", "i", "i", "i", - "v", "v", "i", "i", "i", - "v", "i", "v", "v", "i", - "v", "i", "i", "i") +@symbolic_helper.parse_args("v", "fs", "i", "i", "i", + "v", "fs", "i", "i", "i", + "v", "i", "v", "v", "i", + "v", "i", "i", "i") def onnx_te_gemm( g, weight, @@ -104,10 +125,10 @@ def onnx_te_gemm( # pylint: disable=unused-argument is_fp16 = bias.type().scalarType() == "Half" if input_type == int(tex.DType.kFloat8E4M3): - inputs = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, input_scale_inverse) + inputs = dequantize(g, inputs, input_scale_inverse, input_fp8_tensor, UNSPECIFIED_TYPE) if weight_type == int(tex.DType.kFloat8E4M3): - weight = g.op(make_op_name("TRT_FP8DequantizeLinear"), weight, weight_scale_inverse) + weight = dequantize(g, weight, weight_scale_inverse, weight_fp8_tensor, UNSPECIFIED_TYPE) output = g.op("Gemm", inputs, weight, transA_i=trans_input, transB_i=trans_weight) @@ -131,16 +152,13 @@ def onnx_te_gemm( return output -@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v", "v", "i") -def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax, scale_inv, otype): +@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v", "fs", "i", "i") +def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax, scale_inv, fp8_tensor, otype): """ONNX graph for layernorm_fwd_fp8""" # pylint: disable=unused-argument ln = onnx_layernorm_fwd(g, inputs, weight, bias, eps) output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) - if inputs.type().scalarType() == "Half": - ln = g.op("Cast", ln, to_i=_C_onnx.TensorProtoDataType.FLOAT) - fp8_ln = g.op(make_op_name("TRT_FP8QuantizeLinear"), ln, scale_inv).setType( - inputs.type().with_dtype(torch.uint8).with_sizes(output_shape)) + fp8_ln = quantize(g, ln, scale_inv, fp8_tensor) return fp8_ln From b9b5477337ef83b7f5765f46da9acca3cde4bd1e Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Tue, 3 Jan 2023 20:15:54 +0000 Subject: [PATCH 09/16] enable FP16 config when bias is disabled Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 18 ++++++++---------- transformer_engine/pytorch/module.py | 6 +++--- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index 5371d6fb79..3bbe224055 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -549,11 +549,10 @@ def forward(self, inp, mask): "precision, use_bias",[ (torch.float32, False), (torch.float32, True), - # Todo: cannot configure FP16/BF16 when bias is disabled - - # AssertionError: Data type for activations and buffers must match when outside of autocasted region - # (torch.float16, False), + (torch.float16, False), (torch.float16, True), - #(torch.bfloat16, False), + # Todo: cannot configure BF16 when bias is disabled (ORT issue?) + (torch.bfloat16, False), # Todo: cannot configure BF16 when bias is enabled (ORT issue?) # (torch.bfloat16, True), ]) @@ -608,9 +607,11 @@ def forward(self, inp): set_layer_scale(model.linear, scale_factor) do_export(model, inp, fname, use_fp8) + if precision in (torch.bfloat16, ): + return if not use_fp8: validate_result(fname, inp, model, atol=1e-3) - elif precision not in (torch.bfloat16, ): + else: validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) @@ -624,8 +625,7 @@ def forward(self, inp): (torch.float32, False), (torch.float32, True), (torch.float16, True), - # Todo: cannot configure FP16 when bias is disabled - #(torch.float16, False), + (torch.float16, False), ]) def test_export_layernorm_linear( scale_factor: float, @@ -668,14 +668,12 @@ def test_export_layernorm_linear( # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @pytest.mark.parametrize("return_layernorm_output", [False]) -# Todo: cannot handle FP16 for some reason @pytest.mark.parametrize( "precision, use_bias",[ (torch.float32, False), (torch.float32, True), (torch.float16, True), - # Todo: cannot configure FP16 when bias is disabled - #(torch.float16, False), + (torch.float16, False), ]) def test_export_layernorm_mlp( scale_factor: float, diff --git a/transformer_engine/pytorch/module.py b/transformer_engine/pytorch/module.py index f3b9f0df54..6ab26d3fdf 100644 --- a/transformer_engine/pytorch/module.py +++ b/transformer_engine/pytorch/module.py @@ -1086,7 +1086,7 @@ def __init__( if self.parallel_mode == "column": set_tensor_model_parallel_attributes(self.bias, True, 0, 1) else: - self.register_buffer("bias", torch.Tensor(), persistent=False) + self.register_buffer("bias", torch.Tensor().type(params_dtype), persistent=False) with torch.no_grad(): self.bias.zero_() @@ -1648,7 +1648,7 @@ def __init__( if self.parallel_mode == "column": set_tensor_model_parallel_attributes(self.bias, True, 0, 1) else: - self.register_buffer("bias", torch.Tensor(), persistent=False) + self.register_buffer("bias", torch.Tensor().type(params_dtype), persistent=False) with torch.no_grad(): self.bias.zero_() @@ -2501,7 +2501,7 @@ def __init__( ) ) else: - self.register_buffer("fc2_bias", torch.Tensor(), persistent=False) + self.register_buffer("fc2_bias", torch.Tensor().type(params_dtype), persistent=False) # For RPL, bias has to be added after TP collectives # So it cannot be fused with the GEMM From 4812408c78e630205b35239a07adb42ba15e2677 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Wed, 4 Jan 2023 21:39:37 +0000 Subject: [PATCH 10/16] fix pylint check errors Signed-off-by: Asfiya Baig --- transformer_engine/pytorch/te_onnx_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/transformer_engine/pytorch/te_onnx_extensions.py b/transformer_engine/pytorch/te_onnx_extensions.py index bbdefdc26b..765e30e7fc 100755 --- a/transformer_engine/pytorch/te_onnx_extensions.py +++ b/transformer_engine/pytorch/te_onnx_extensions.py @@ -44,6 +44,7 @@ def make_op_name(op_name: str) -> str: def quantize(g, inputs, scale_inv, fp8_tensor): + """Helper Function for Quantization""" output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) # Q inputs are currently constrained to FP32 due to a similar limitation in ORT @@ -59,6 +60,7 @@ def quantize(g, inputs, scale_inv, fp8_tensor): def dequantize(g, inputs, scale_inv, fp8_tensor, otype): + """Helper Function for Dequantization""" output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) scale = g.op("Constant", value_t=torch.tensor(scale_inv[fp8_tensor])) @@ -90,7 +92,6 @@ def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype): def onnx_fp8_gelu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype): """ONNX graph for fp8_gelu""" # pylint: disable=unused-argument - output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) gelu = torch.onnx.symbolic_opset9.gelu(g, inputs, "tanh") out = quantize(g, gelu, scale_inv, fp8_tensor) return out @@ -157,7 +158,6 @@ def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax, scale_inv, """ONNX graph for layernorm_fwd_fp8""" # pylint: disable=unused-argument ln = onnx_layernorm_fwd(g, inputs, weight, bias, eps) - output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs) fp8_ln = quantize(g, ln, scale_inv, fp8_tensor) return fp8_ln From 9a7198fdbd9f32fd908aa755a4564108b9b7920a Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Fri, 6 Jan 2023 01:03:37 +0000 Subject: [PATCH 11/16] updates 1. remove List import for pylint failure 2. address comments: remove state tensors from GPU 3. address comments: Update reverse_map_dtype function and add to namespace Signed-off-by: Asfiya Baig --- transformer_engine/pytorch/csrc/ts_fp8_op.cpp | 25 ++++++------------- transformer_engine/pytorch/module.py | 6 ++--- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp index a50242dc0d..29a093d27d 100755 --- a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp +++ b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp @@ -7,24 +7,13 @@ #include #include "extensions.h" -transformer_engine::DType reverse_map_dtype(int64_t dtype) { - switch (dtype) { - case static_cast(transformer_engine::DType::kByte): - return transformer_engine::DType::kByte; - case static_cast(transformer_engine::DType::kInt32): - return transformer_engine::DType::kInt32; - case static_cast(transformer_engine::DType::kFloat32): - return transformer_engine::DType::kFloat32; - case static_cast(transformer_engine::DType::kFloat16): - return transformer_engine::DType::kFloat16; - case static_cast(transformer_engine::DType::kBFloat16): - return transformer_engine::DType::kBFloat16; - case static_cast(transformer_engine::DType::kFloat8E4M3): - return transformer_engine::DType::kFloat8E4M3; - case static_cast(transformer_engine::DType::kFloat8E5M2): - return transformer_engine::DType::kFloat8E5M2; - default: - NVTE_ERROR("Type not supported."); +namespace { + transformer_engine::DType reverse_map_dtype(int64_t dtype) { + if (dtype >= 0 && dtype < static_cast(transformer_engine::DType::kNumTypes)) { + return static_cast(dtype); + } else { + NVTE_ERROR("Type not supported."); + } } } diff --git a/transformer_engine/pytorch/module.py b/transformer_engine/pytorch/module.py index 6ab26d3fdf..96b89bad11 100644 --- a/transformer_engine/pytorch/module.py +++ b/transformer_engine/pytorch/module.py @@ -7,7 +7,7 @@ import pickle import warnings from abc import ABC, abstractmethod -from typing import Union, Optional, Callable, Tuple, Dict, List, Any, Mapping +from typing import Union, Optional, Callable, Tuple, Dict, Any, Mapping from functools import partial import numpy as np import torch @@ -167,7 +167,7 @@ def get_extra_state(self) -> torch.Tensor: state["extra_fp8_variables"] = extra state_serialized = pickle.dumps(state) - state_tensor = torch.tensor(np.frombuffer(state_serialized, dtype=np.uint8), device='cuda') + state_tensor = torch.tensor(np.frombuffer(state_serialized, dtype=np.uint8)) return state_tensor @@ -211,7 +211,7 @@ def set_extra_state(self, state: torch.Tensor) -> None: return if isinstance(state, torch.Tensor): - state = pickle.loads(state.cpu().detach().numpy().tobytes()) + state = pickle.loads(state.detach().numpy().tobytes()) if state is None: return From 882c462035eacb5c188262bdef9c3a04d8ee98f1 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Fri, 6 Jan 2023 01:09:20 +0000 Subject: [PATCH 12/16] minor fix: coding guidelines Signed-off-by: Asfiya Baig --- transformer_engine/pytorch/csrc/ts_fp8_op.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp index 29a093d27d..b4e6b6900a 100755 --- a/transformer_engine/pytorch/csrc/ts_fp8_op.cpp +++ b/transformer_engine/pytorch/csrc/ts_fp8_op.cpp @@ -15,7 +15,7 @@ namespace { NVTE_ERROR("Type not supported."); } } -} +} //namespace at::Tensor cast_to_fp8_ts(const at::Tensor &input, From 2ef1fa4058570e9160e6122b06f3f0c108c5f5ae Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Wed, 4 Jan 2023 21:30:56 +0000 Subject: [PATCH 13/16] fix scale re-init issues Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 41 ++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index 3bbe224055..a9079da0e1 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -80,12 +80,17 @@ def to_numpy(tensor): return tensor.cpu().numpy() -def set_layer_scale(module: torch.nn.Module, scale: float): - module.fp8_init() - module.fp8_meta["scaling_fwd"].scale = torch.ones( - 2, dtype=torch.float32, device="cuda") / scale - module.fp8_meta["scaling_fwd"].scale_inv = torch.ones( - 2, dtype=torch.float32, device="cuda") * scale +def set_layer_scale(module: torch.nn.Module, scales: float, num_gemms: int=1): + module.fp8_init(num_gemms=num_gemms) + assert len(scales) == num_gemms * 2, "Each gemm should be accompanied by 2 scales" + num_fp8_tensors = len(scales) + scale = torch.ones(num_fp8_tensors, dtype=torch.float32, device="cuda") + scale_inv = torch.ones(num_fp8_tensors, dtype=torch.float32, device="cuda") + for i, s in enumerate(scales): + scale[i] *= s + scale_inv[i] /= s + module.fp8_meta["scaling_fwd"].scale = scale + module.fp8_meta["scaling_fwd"].scale_inv = scale_inv def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool): @@ -541,7 +546,7 @@ def forward(self, inp, mask): validate_result(fname, inp, model, atol=1e-3) -@pytest.mark.parametrize("scale_factor", [1]) +@pytest.mark.parametrize("scale_factor", [[448, 448]]) @pytest.mark.parametrize("use_fp8", [False, True]) # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @@ -557,7 +562,7 @@ def forward(self, inp, mask): # (torch.bfloat16, True), ]) def test_export_linear( - scale_factor: float, + scale_factor: list, use_fp8: bool, use_bias: bool, return_bias: bool, @@ -595,7 +600,7 @@ def forward(self, inp): bias_str = "_bias" if use_bias else "" high_prec_str = dtype2str(precision) fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx" - with te.fp8_autocast(enabled=use_fp8): + with te.fp8_autocast(enabled=use_fp8, fp8_recipe=create_fp8_recipe()): model = Test_Linear( in_features, out_features, @@ -610,12 +615,12 @@ def forward(self, inp): if precision in (torch.bfloat16, ): return if not use_fp8: - validate_result(fname, inp, model, atol=1e-3) + validate_result(fname, inp, model, atol=5e-4) else: - validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) + validate_result(fname, inp, model, atol=5e-4, is_fp8=use_fp8) -@pytest.mark.parametrize("scale_factor", [112]) +@pytest.mark.parametrize("scale_factor", [[448, 448]]) @pytest.mark.parametrize("use_fp8", [False, True]) # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @@ -645,7 +650,7 @@ def test_export_layernorm_linear( bias_str = "_bias" if use_bias else "" high_prec_str = dtype2str(precision) fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx" - with te.fp8_autocast(enabled=use_fp8): + with te.fp8_autocast(enabled=use_fp8, fp8_recipe=create_fp8_recipe()): model = te.LayerNormLinear( hidden_size, 3 * hidden_size, @@ -663,7 +668,7 @@ def test_export_layernorm_linear( validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) -@pytest.mark.parametrize("scale_factor", [112]) +@pytest.mark.parametrize("scale_factor", [[224, 224, 448, 448]]) @pytest.mark.parametrize("use_fp8", [False, True]) # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @@ -694,7 +699,7 @@ def test_export_layernorm_mlp( bias_str = "_bias" if use_bias else "" high_prec_str = dtype2str(precision) fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}.onnx" - with te.fp8_autocast(enabled=use_fp8): + with te.fp8_autocast(enabled=use_fp8, fp8_recipe=create_fp8_recipe()): model = te.LayerNormMLP( hidden_size, ffn_hidden_size, @@ -704,12 +709,12 @@ def test_export_layernorm_mlp( params_dtype=precision, ).to(device='cuda') if use_fp8: - set_layer_scale(model, scale_factor) + set_layer_scale(model, scale_factor, num_gemms=2) do_export(model, inp, fname, use_fp8) if not use_fp8: - validate_result(fname, inp, model, atol=1e-3) + validate_result(fname, inp, model, atol=5e-4) else: - validate_result(fname, inp, model, atol=2e-2, is_fp8=use_fp8) + validate_result(fname, inp, model, atol=7e-3, is_fp8=use_fp8) @pytest.mark.parametrize( From 58920bbd0a21c1cdd33fad887eaa0b395a042f52 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Wed, 4 Jan 2023 21:41:55 +0000 Subject: [PATCH 14/16] update scale arg type in definition Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index a9079da0e1..2cb09c80c2 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -633,7 +633,7 @@ def forward(self, inp): (torch.float16, False), ]) def test_export_layernorm_linear( - scale_factor: float, + scale_factor: list, use_fp8: bool, use_bias: bool, return_bias: bool, @@ -681,7 +681,7 @@ def test_export_layernorm_linear( (torch.float16, False), ]) def test_export_layernorm_mlp( - scale_factor: float, + scale_factor: list, use_fp8: bool, use_bias: bool, return_bias: bool, From 486db6bd588f2cd0f967cb9752f6d6ef6f4b4109 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Thu, 5 Jan 2023 04:41:51 +0000 Subject: [PATCH 15/16] add scales to mha and transformer layer submodules Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 136 +++++++++++++++++++++++++++++--------- 1 file changed, 106 insertions(+), 30 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index 2cb09c80c2..d2d5fc2e47 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -777,6 +777,33 @@ def test_export_core_attention( validate_result(fname, inp, model, atol=1e-2) +def set_mha_scales(module, + scale_factor_qkv: list=[448, 448], + scale_factor_query: list=[112, 112], + scale_factor_kv: list=[224, 224], + scale_factor_proj: list=[448, 448] +): + if module.attention_type == "self": + if module.input_layernorm: + # LayernormLinear layer scale init + set_layer_scale(module.layernorm_qkv, scale_factor_qkv) + else: + # Linear layer scale init + set_layer_scale(module.qkv, scale_factor_qkv) + else: + if module.input_layernorm: + # LayernormLinear layer scale init + set_layer_scale(module.layernorm_query, scale_factor_query) + else: + # Linear layer scale init + set_layer_scale(module.query_layer, scale_factor_query) + + # Linear layer scale init + set_layer_scale(module.key_value, scale_factor_kv) + + # Linear layer scale init + set_layer_scale(module.proj, scale_factor_proj) + test_configs_multihead_attention = [ #"use_mask, attn_mask_type" (False, "causal"), # calls ScaledUpperTriangMaskedSoftmax @@ -802,6 +829,10 @@ def test_export_core_attention( @pytest.mark.parametrize("precision", [torch.float32, torch.float16]) @pytest.mark.parametrize("return_layernorm_output", [False]) @pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type) +@pytest.mark.parametrize("scale_factor_qkv", [[448, 448]]) +@pytest.mark.parametrize("scale_factor_query", [[112, 112]]) +@pytest.mark.parametrize("scale_factor_kv", [[224, 224]]) +@pytest.mark.parametrize("scale_factor_proj", [[448, 448]]) def test_export_multihead_attention( use_fp8: bool, use_mask: bool, @@ -810,7 +841,11 @@ def test_export_multihead_attention( return_layernorm_output: bool, input_layernorm: bool, attention_type: str, - fuse_qkv_params: bool + fuse_qkv_params: bool, + scale_factor_qkv: list, + scale_factor_query: list, + scale_factor_kv: list, + scale_factor_proj: list, ): hidden_size = 256 sequence_length = 128 @@ -851,21 +886,39 @@ def test_export_multihead_attention( input_ln_str = "_input-ln" if input_layernorm else "" fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx" - model = te.transformer.MultiHeadAttention( - *attention_args, - attn_mask_type=attn_mask_type, - params_dtype=precision, - return_layernorm_output=return_layernorm_output, - input_layernorm=input_layernorm, - attention_type=attention_type, - fuse_qkv_params=fuse_qkv_params, - ).to(device='cuda') - do_export(model, inp, fname, use_fp8, input_names=input_names) - if not use_fp8: - validate_result(fname, inp, model, atol=1e-3) - elif precision != torch.float16: - validate_result(fname, inp, model, atol=1e-2, is_fp8=use_fp8) + with te.fp8_autocast(enabled=use_fp8, fp8_recipe=create_fp8_recipe()): + model = te.transformer.MultiHeadAttention( + *attention_args, + attn_mask_type=attn_mask_type, + params_dtype=precision, + return_layernorm_output=return_layernorm_output, + input_layernorm=input_layernorm, + attention_type=attention_type, + fuse_qkv_params=fuse_qkv_params, + ).to(device='cuda') + if use_fp8: + set_mha_scales(model, + scale_factor_qkv, + scale_factor_query, + scale_factor_kv, + scale_factor_proj) + do_export(model, inp, fname, use_fp8, input_names=input_names) + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + elif precision != torch.float16: + validate_result(fname, inp, model, atol=5e-3, is_fp8=use_fp8) + +def set_transformer_layer_scales(module, + scales_self_attn: list, + scales_inter_attn: list, + scales_layernorm_mlp: list=[224, 224, 448, 448]): + # set mha scales + set_mha_scales(module.self_attention, *scales_self_attn) + if module.layer_type == "decoder": + set_mha_scales(module.inter_attention, *scales_inter_attn) + # set layernorm mlp scales + set_layer_scale(module.layernorm_mlp, scales_layernorm_mlp, num_gemms=2) @pytest.mark.parametrize("use_fp8", [False, True]) @pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention) @@ -876,6 +929,11 @@ def test_export_multihead_attention( @pytest.mark.parametrize("precision", [torch.float32, torch.float16]) @pytest.mark.parametrize("fuse_qkv_params", [False, True]) @pytest.mark.parametrize("apply_query_key_layer_scaling", [True, False]) +@pytest.mark.parametrize("scale_factor_qkv", [[448, 448]]) +@pytest.mark.parametrize("scale_factor_query", [[112, 112]]) +@pytest.mark.parametrize("scale_factor_kv", [[224, 224]]) +@pytest.mark.parametrize("scale_factor_proj", [[448, 448]]) +@pytest.mark.parametrize("scale_factor_layernorm_mlp", [[224, 224, 448, 448]]) def test_export_transformer_layer( use_fp8: bool, use_mask: bool, @@ -883,7 +941,12 @@ def test_export_transformer_layer( output_layernorm: bool, precision: torch.dtype, fuse_qkv_params: bool, - apply_query_key_layer_scaling: bool + apply_query_key_layer_scaling: bool, + scale_factor_qkv: list, + scale_factor_query: list, + scale_factor_kv: list, + scale_factor_proj: list, + scale_factor_layernorm_mlp: list, ): # Layer configuration hidden_size = 64 @@ -909,17 +972,30 @@ def test_export_transformer_layer( attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type) fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{qk_scaling_str}{high_prec_str}.onnx" - model = te.TransformerLayer( - hidden_size, - ffn_hidden_size, - num_attention_heads, - self_attn_mask_type=attn_mask_type, - output_layernorm=output_layernorm, - params_dtype=precision, - fuse_qkv_params=fuse_qkv_params, - apply_query_key_layer_scaling=apply_query_key_layer_scaling).to(device='cuda') - do_export(model, inp, fname, use_fp8) - if not use_fp8: - validate_result(fname, inp, model, atol=1e-3) - elif precision != torch.float16: - validate_result(fname, inp, model, atol=5e-1, is_fp8=use_fp8) + with te.fp8_autocast(enabled=use_fp8, fp8_recipe=create_fp8_recipe()): + model = te.TransformerLayer( + hidden_size, + ffn_hidden_size, + num_attention_heads, + self_attn_mask_type=attn_mask_type, + output_layernorm=output_layernorm, + params_dtype=precision, + fuse_qkv_params=fuse_qkv_params, + apply_query_key_layer_scaling=apply_query_key_layer_scaling).to(device='cuda') + if use_fp8: + mha_scales = [ + scale_factor_qkv, + scale_factor_query, + scale_factor_kv, + scale_factor_proj + ] + set_transformer_layer_scales(model, + scales_self_attn=mha_scales, + scales_inter_attn=mha_scales, + scales_layernorm_mlp=scale_factor_layernorm_mlp) + + do_export(model, inp, fname, use_fp8) + if not use_fp8: + validate_result(fname, inp, model, atol=1e-3) + elif precision != torch.float16: + validate_result(fname, inp, model, atol=1e-2, is_fp8=use_fp8) From e83246e25701aa4d7e082e55d410c88364f34625 Mon Sep 17 00:00:00 2001 From: Asfiya Baig Date: Mon, 9 Jan 2023 20:10:41 +0000 Subject: [PATCH 16/16] address comments 1. replace variable scale_factor with scale_factors 2. Update type hints for scale_factors to be List[float] 3. Remove use of num_gemms param and add amax_history assignment Signed-off-by: Asfiya Baig --- tests/test_onnx_export.py | 56 ++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/tests/test_onnx_export.py b/tests/test_onnx_export.py index d2d5fc2e47..789eccf09f 100644 --- a/tests/test_onnx_export.py +++ b/tests/test_onnx_export.py @@ -14,7 +14,7 @@ import onnxruntime as ort import torch from torch import nn as nn -from typing import Union, Tuple +from typing import Union, Tuple, List import transformer_engine.pytorch as te from transformer_engine.common import recipe import transformer_engine_extensions as tex @@ -80,17 +80,19 @@ def to_numpy(tensor): return tensor.cpu().numpy() -def set_layer_scale(module: torch.nn.Module, scales: float, num_gemms: int=1): - module.fp8_init(num_gemms=num_gemms) - assert len(scales) == num_gemms * 2, "Each gemm should be accompanied by 2 scales" +def set_layer_scale(module: torch.nn.Module, scales: List[float]): + module.fp8_init() num_fp8_tensors = len(scales) scale = torch.ones(num_fp8_tensors, dtype=torch.float32, device="cuda") scale_inv = torch.ones(num_fp8_tensors, dtype=torch.float32, device="cuda") + amax_history_len = module.fp8_meta["recipe"].amax_history_len + amax_history = torch.zeros(amax_history_len, num_fp8_tensors, dtype=torch.float32, device="cuda") for i, s in enumerate(scales): scale[i] *= s scale_inv[i] /= s module.fp8_meta["scaling_fwd"].scale = scale module.fp8_meta["scaling_fwd"].scale_inv = scale_inv + module.fp8_meta["scaling_fwd"].amax_history = amax_history def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool): @@ -546,7 +548,7 @@ def forward(self, inp, mask): validate_result(fname, inp, model, atol=1e-3) -@pytest.mark.parametrize("scale_factor", [[448, 448]]) +@pytest.mark.parametrize("scale_factors", [[448, 448]]) @pytest.mark.parametrize("use_fp8", [False, True]) # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @@ -562,7 +564,7 @@ def forward(self, inp, mask): # (torch.bfloat16, True), ]) def test_export_linear( - scale_factor: list, + scale_factors: List[float], use_fp8: bool, use_bias: bool, return_bias: bool, @@ -609,7 +611,7 @@ def forward(self, inp): precision ).to(device='cuda') if use_fp8: - set_layer_scale(model.linear, scale_factor) + set_layer_scale(model.linear, scale_factors) do_export(model, inp, fname, use_fp8) if precision in (torch.bfloat16, ): @@ -620,7 +622,7 @@ def forward(self, inp): validate_result(fname, inp, model, atol=5e-4, is_fp8=use_fp8) -@pytest.mark.parametrize("scale_factor", [[448, 448]]) +@pytest.mark.parametrize("scale_factors", [[448, 448]]) @pytest.mark.parametrize("use_fp8", [False, True]) # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @@ -633,7 +635,7 @@ def forward(self, inp): (torch.float16, False), ]) def test_export_layernorm_linear( - scale_factor: list, + scale_factors: List[float], use_fp8: bool, use_bias: bool, return_bias: bool, @@ -660,7 +662,7 @@ def test_export_layernorm_linear( params_dtype=precision, ).to(device='cuda') if use_fp8: - set_layer_scale(model, scale_factor) + set_layer_scale(model, scale_factors) do_export(model, inp, fname, use_fp8) if not use_fp8: validate_result(fname, inp, model, atol=1e-3) @@ -668,7 +670,7 @@ def test_export_layernorm_linear( validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8) -@pytest.mark.parametrize("scale_factor", [[224, 224, 448, 448]]) +@pytest.mark.parametrize("scale_factors", [[224, 224, 448, 448]]) @pytest.mark.parametrize("use_fp8", [False, True]) # Returning the bias is a TE fusion optimization we don't care about. @pytest.mark.parametrize("return_bias", [False]) @@ -681,7 +683,7 @@ def test_export_layernorm_linear( (torch.float16, False), ]) def test_export_layernorm_mlp( - scale_factor: list, + scale_factors: List[float], use_fp8: bool, use_bias: bool, return_bias: bool, @@ -709,7 +711,7 @@ def test_export_layernorm_mlp( params_dtype=precision, ).to(device='cuda') if use_fp8: - set_layer_scale(model, scale_factor, num_gemms=2) + set_layer_scale(model, scale_factors) do_export(model, inp, fname, use_fp8) if not use_fp8: validate_result(fname, inp, model, atol=5e-4) @@ -778,10 +780,10 @@ def test_export_core_attention( def set_mha_scales(module, - scale_factor_qkv: list=[448, 448], - scale_factor_query: list=[112, 112], - scale_factor_kv: list=[224, 224], - scale_factor_proj: list=[448, 448] + scale_factor_qkv: List[float]=[448, 448], + scale_factor_query: List[float]=[112, 112], + scale_factor_kv: List[float]=[224, 224], + scale_factor_proj: List[float]=[448, 448] ): if module.attention_type == "self": if module.input_layernorm: @@ -842,10 +844,10 @@ def test_export_multihead_attention( input_layernorm: bool, attention_type: str, fuse_qkv_params: bool, - scale_factor_qkv: list, - scale_factor_query: list, - scale_factor_kv: list, - scale_factor_proj: list, + scale_factor_qkv: List[float], + scale_factor_query: List[float], + scale_factor_kv: List[float], + scale_factor_proj: List[float], ): hidden_size = 256 sequence_length = 128 @@ -918,7 +920,7 @@ def set_transformer_layer_scales(module, if module.layer_type == "decoder": set_mha_scales(module.inter_attention, *scales_inter_attn) # set layernorm mlp scales - set_layer_scale(module.layernorm_mlp, scales_layernorm_mlp, num_gemms=2) + set_layer_scale(module.layernorm_mlp, scales_layernorm_mlp) @pytest.mark.parametrize("use_fp8", [False, True]) @pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention) @@ -942,11 +944,11 @@ def test_export_transformer_layer( precision: torch.dtype, fuse_qkv_params: bool, apply_query_key_layer_scaling: bool, - scale_factor_qkv: list, - scale_factor_query: list, - scale_factor_kv: list, - scale_factor_proj: list, - scale_factor_layernorm_mlp: list, + scale_factor_qkv: List[float], + scale_factor_query: List[float], + scale_factor_kv: List[float], + scale_factor_proj: List[float], + scale_factor_layernorm_mlp: List[float], ): # Layer configuration hidden_size = 64