From c8495f8c4df9fa397fff192c13a687593c9b77a8 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Thu, 8 Jun 2017 11:57:41 +0800 Subject: [PATCH 001/106] update ctc_beam_search_decoder design doc --- doc/design/speech/README.MD | 13 ++++++++++++- doc/design/speech/image/beam_search.png | Bin 0 -> 474749 bytes 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 doc/design/speech/image/beam_search.png diff --git a/doc/design/speech/README.MD b/doc/design/speech/README.MD index 7304650e62..cc03aac7b4 100644 --- a/doc/design/speech/README.MD +++ b/doc/design/speech/README.MD @@ -140,7 +140,17 @@ TODO by Assignees ### Beam Search with CTC and LM -TODO by Assignees +
+
+Figure 2. Algorithm for Beam Search Decoder. +
+ +- The **Beam Search Decoder** for DS2 CTC-trained network follows the similar approach in \[[3](#references)\] with a modification for the ambiguous part, as shown in Figure 2. +- An **external defined scorer** would be passed into the decoder to evaluate a candidate prefix during decoding whenever a space character appended. +- Such scorer is a unified class, may consisting of language model, word count or any customed evaluators. +- The **language model** is built from Task 5, with a parameter should be carefully tuned to achieve minimum WER/CER (c.f. Task 7) +- This decoder needs to perform with **high efficiency** for the convenience of parameters tuning and speech recognition in reality. + ## Future Work @@ -153,3 +163,4 @@ TODO by Assignees 1. Dario Amodei, etc., [Deep Speech 2 : End-to-End Speech Recognition in English and Mandarin](http://proceedings.mlr.press/v48/amodei16.pdf). ICML 2016. 2. Dario Amodei, etc., [Deep Speech 2 : End-to-End Speech Recognition in English and Mandarin](https://arxiv.org/abs/1512.02595). arXiv:1512.02595. +3. Awni Y. Hannun, etc. [First-Pass Large Vocabulary Continuous Speech Recognition using Bi-Directional Recurrent DNNs](https://arxiv.org/abs/1408.2873). arXiv:1408.2873 diff --git a/doc/design/speech/image/beam_search.png b/doc/design/speech/image/beam_search.png new file mode 100644 index 0000000000000000000000000000000000000000..7f7e35f34223162d0f7f0ed97375909c43b830ae GIT binary patch literal 474749 zcmaI61C*pevMAh`p0;h^fl!Yg`?_# zDcI5a(NGb&p!IE|3o9o&*Zmec$|3QqeouMWaXg*^TyJ`wbO5G0nL+(JX{888`B8yJ z4^NLjgJLzf5wwKt&Rh`>n>$?A;M=L)Kw)B@OwvV(&|&rD-5cC=s&CzR}5! z?--CpjTqG_4%e}u|B3Z>;|PFR?)+}R`PO1$7B+M|?H83n{V5#g7~)(ZIRk}eIErh2 zOdcE}Q2FBy3{T8}e%Mt>1bD9C8T1mzR-L&Q=+@CuowpBRKr42w?~{EKhn47Ap18pXQ@G z13|*%z*c5*9wxU%fWA+;>uG9~JPP4)CH&onQJ!AV;!mZiNz4i8hTjb&j%9+EaT!>s z1W!ZztsM4*hB)ln_$tFMMVwFZg@Hue=_#cpn=!cQ-?h>MH1n}oQk(NbrjOd%$IL~7!Q?v?)&@djV^6o0`VhYD07e@( z^`MynS#A^e*aW<3hZ!Zx@m+NS{{*?=i#JE1E3+(fiz1@%j{bnh=E)G{=Eh>s-}^Qu zc@^ps!Uc~jN@p;=?q~0ytbUX_my2NJh+4nlm?TC9tzbVb?(kmJn)AcXIoqMjHwm@!{$eeJj6 z2^-+vG7KuE?lW}{ARJ#1pwBI~tZmEIu4NFr`~+I0V+rTh7Xcs?0(jRDtXU9`UW{WS z*h_!tTcBVRJDvf1dOj z&qCVxdD>Y&`lJ6w=mZ^r2#7_%5)@lRupR_r6sklN8-%?R5sZZ(!IKbHMBpV7L?f7p zMJhyD3%C?4kJF1g7&Sj6nh)L-@QC#}{B;jGBu`ZyX^|gtYP5*>Bpf(L;Y5}mU{dgE zM%a!;D@HvBYR2P%)fKtT-!X@Nitv*`Bhvru_i-xmaa6EAq&gGXAe#P=I;QHMB?FmT zExm#EI^auU*}*e*WVX-?W}ukiH8<972)$t2zKUCqPS~Chz;57+8c0;G00S{QG6XBzZC739=eu3*to>?I4{2y?vN4QCZxi zcy%EIQbeK@6_GU=E;gyx{+l)K8OD#IeP5G==&J?nA(`!*Z>n0(@&;p zCVeKHG}I)&OoaC>f-ByX89ZWomn#$p>%Ls|70mGnTq19Jf1@D*n$a- zQLu6QL$nE%(WMEUBu`3uVq1b6N98Run*K%U@%NDAuwC8$f<{uEoVk|WOt}f@^*=|GOHq- zvaAxF0&nSN9(QSep+N;w0h3yh;8Bny8>k@$69^_|s(QVV)YxosK`k;i^$LK8yNGM0hM5>-Cd?adjia-Mj@f?^qGD`#xCm1cHfn`W(M>9!GM zv1gHBTQiwA4^LlDD^6EU)nyc#x}0(|*_do6LnPyn*_SKNbehPtxHiu=)ivBU?5l;S zqo_5jb>qRUs(rmopOLWZ7(8b6(Q`BmrW0 z9(Xu0U3hXlGTaN?@NV6Y^^Zs%xDOsTSC?J09Yf#`cL2)gvs;z>sOPQy_gT3W`k|U( z+u5CI-)>(iAVLE6-#Wcj{8Ricznyn~#|q8%=QjVgurs<%-W4M(ChU(bE<0V!ExRb$ zhW`a|6v`UpU)x*zqz~U8)!#7Sn<1Ks##?2uo&Zr128MV-d=}S03=r#%xDVGvcBWrv z*F@REBF1MXF6KP;u9UMfw{qP)Yz?+gIaoSyzl(w_3`z~^f)rZtF0swrW*e{(or}z1 zjxw3fa<@-@tBlkjH<_3*nc|HX7mt+L%y~&e9WFP5B?Tk1mccUeY+$R?v*TvRX8+Vy zv!Mi-*IPQ6-#4JFBtB7omR*+_mw|HIZli5iH2O8-n%^u8%ma#Ij0Q~>jpLb-nPVQ3 zUd}$6X{svQG;~0i1>$nKHaI*DpK}@1TiL+J($W@ED_~+cuZRrii*7u(fs?f3S(Nx!7Z zlNFPT=|8)A>vJ1-uj*HgWrEX#rJy{0@4w56#jC~Doh;iX?s2JaQ9Ds_sjXFQbYGiN zmAuLZ>kBInAE=(orTDZywZ9%l&kvVyEiYHB^rW;W^gJE2-rLI+hvq-#x1W?&R&0OU zP;B4299%}BPgqU3Smj$GS)bFvQ`&A;_PAdi&uA&OY?j`Zmu{5TIUg+pHrO|+Z2+sf z9YT&`W6^S4?p(Gv_d6F8LeElfCL^!&*|^|7pW)Rr!G>@p38%qi|X)Fiwc+0 z>g}33Ul~|U;_@>-h6DZ+ft=9P`&Cg)68UeCSEoi^qZo|&BXU5~ui{D+}~6dd|r+RZ)MJ2opm zm#!T0RwTd;;Hj`&EbB*I+x{SW#O@bE9?5626sgA}pv`yN{ApIl0giFEVesZ-e`yGt1kRcj7c+Vvyum zQVyu!wRvM>K%Lv*K(u(-xVt>jMC4t*546D;WGgfe zm;f=NKJOp6CW7-@TMciN9+GUROBpKc(T3dW8y!ZG0Tq$~bwh}WhO|UQdaZ$uZ3E|$ zYI)osLnCJ2xu%CHh;e~^P<(@Q`B}+F@cRz@O%)*R#5Ej&fY3<)@dlPqB)J6w0?sj4 zR(DdDk>)hCwWiZIvNbTKbF;Sliwy+C?Z)|cXl?AIPvB;4W#h={#zXW^49>sfe@xR8 z5&RRy$&!aiT}GZj$kxG_;1?YW9Rm?B6afJNw}X)hr=qavf06(F#zSQ0AI_+L!^%}3bS(a^!%&dJ=?hTtE3 z^$l#Dop^|d{vqhUU;o;tv77n-m1N`iUuykTkp3Sl^o(>2^#9HKFDdswW;x}}-Hff& zh0U#vZ5;o~;ALcDX5#)Q!T)dR|BCz{q#FN&l$quKL;63K{)?2G{vRs*50(CPUjLl^ zdtkg!-1Pr__`Fd2;uV;G?SW%1ET{Yzg8aice?iXQ4~l=mzhh7Z{-}2YBp@JuAPHds zWjEluWdxA&4Jg0%0B5 z-(R)>B6QM_$DKBxYODCJuC$xZw5`{v3_xTrJrcbDKLWy!AHe?`7)|zzLzt~UBJ!bx zfB*^bBOv&HgV7&Beq=BL|Cv{?AHuAIKk*mI|C`6Z`2)+W{;zWWi4zMAOqhEFBK9Hi zALSu{q}Tqhiv3e*0(1~T{t2+?mxTXKuYbzB9sj`sRJddz@uubxk@6o=<<&ukhldm2 z=q{Ta-ae?2XAKMu$98sd@(T(iN*0ck@$NkTx1;{oj%I#jW@eTO216a5o{E!^k-2|q z`^b73gRDnkvz>0(_7pjuOpj%oy;2sktNlmqN2C2RGO%T`xzh@LtE#FRxQb?bJpb}} zx>z}@sOipUHl2|1K^Nr>Ur7{_;-#);!1O$Gv+1wH_csxj2 z?7`O1|G_CxApI?t$`#(9ugWBosEllEbV*hwz2el4{mI<3D3c`jyp!9&250_vuYv#p zBl~S(A&G{DM)LVyuUjL{@2P=`u!8y@JQm@HF4zdioi{(Xfluk=4T zTkix3!H>oi2{mo4|8Uy7dUCr{sI$4mO_CxOQPI*IfdOf0l$b}@Y!C!KQs3j?Z-&Ig zOpL6Ah6f*dj5{K3pnSOt7UC64NRiT`i-n~ehl87`(uFr~wnV<5RlDCM;=6T%1*&5b zlFC%DXRS5Z8L4r>MWh;&A7BOd*Evk5$)?0r?y4Uvc(f8&h8RPfTFv$?~u2&4I3#bwX#FT z-Bjk}B<*Pb=H+)`PoR@mjA~gTcXio|Pn99{VM<(gcD;3adf^(OlZB+LV*K=KVr3=G zXwb&dQ_uDy=V=bOvDPtMjbZrk=E-*l0J87k34p2q@-IjC_eJ(+Q|3Q_l#&K=-|QCJ z_J^X<>i}M=(aOT9nw-bOcmUDJz!A#R-$OwhNWh;4DOahGm_oLWO5;(fRR1%c;@~EP z$MT?UYO>n8Ofl3yYS|z`JsOrmbl9|~Uft)e*Qtlh< zh2oM!y!VOi<>+M5ET3ZI%l^Y-zaN-f zWYt(Y`H0C+WYYnqA^R@~vn8nT2neE_P~AiOnTTw$kp#u5{ff3FsnH@qYm5DLKdK~qBIpD*0siqEp7WXGIR2``I zfuzd{xWao$==pwA*m~H)Qz?Za{SVj>gKk z1!$N4m`rAgEQGYiXQhonBYb{;Xq1HKMsRiK;rnb$t9d0A`ZF$>^Tig=q{?n-DU(V) zSB^dpReZ*eq{DS8Qp7>6oK2=d9Ndt}#6L zUObX%@dm#XMT-m%u2O{mks?siB6o1vAJdb)ip{xE5SyLTTbXa?1I!i#2|s7nkp$02 z8yH57&LLhdrO|k@iCd?wqx}XR8Se8BPbm{rv1H9PnaYqa@MtPCI72eZkh|mL6QLb# zn=Pjx8x5t)T9w0g!7F-~D;O;|qP18ZOu|b3RnTjBa=O{HVYJf7C6mUQ)&8qSg7vtj zuEvt+bn?x)(PshgJ~DzB`8l~xac%rc;JGp{?LkBxNVN3q6$c&BS53bWs}>$q&G4!)NyI_s^@CE2mI&airA;XzuO~ z*bl#Axg1#=m53fBGEQfvSI3Ue`e!yyw=X=7l~S2bD&ZM!9tsVeq7aU z3FiaC_o!QXESsJW-&9S{*PFnSnx_O9X!|*p4F#z2V?0BaM)%Z(VxnzwcYMDCqdkJ; zrHNwNtOka(;0w(cis$TF%4-04aYAK!+cgg(Lg$_{pPgr4p!kv`TIISWa4Hq*A(b!P zZ?+c}^f6_v_5X@5dVT$Su_<%S4e(}dbE*sJS1$8Q{#Ybb7`I z?Qrn(eLX^dI=}S+=VULTOPpQ%&+jB&h1%( z#fIg%YaYiZsh{#iCeRi-_hiPt_~>#aHgXy-A=5H3r!X`DWX3O)1OO zb7TAG$j1e2M(n8RMjm$N^i{f_(70XKVoe)Q#Q@PRv8r=b zBCX>x+31p9a#ypB=`N?sNUkAKG3Ix62W;-NV|sOd>+&yfFqY<>cQv~CGo_N#mtbh8 zrx0|JAyTE1IcD=dYQy}ZFz}PJksIG!J%DbJB(2AT8%JiOd*0q?^joh4H#KMoCvAI#d^XU{HZW(MG7gomZR$8 z5(?_6*h$M(p%A%jf561ZCag$(>SdzucjG2_um!c5Q>p#2ri4j*K<4gG$ejYV z9-Z=Ka8;*nh0BVNNdfJx3vk%Y3nZe0>I3F&b$Nk@x!GpPawj$E-s>6B{#m3%OiA~1 zkB|@o3T6=5mR_5~8HkTIC!CJN1Kt)##7%KmsOS*RZ$o3yk~ba(!z;!Y z;S(!j5yf3@UEz#J7%nwwA!c&V)s2?E#Usx>T(lgUlyihjO%B-VXVptXiP9BMre;0w zVPATpT6Y@L;e|$d%J{h6PD8`|J$DDt;b$`lOK2tewibq?j88WtlxX$9=dhRbzi1;Orr=iFlIv+{WT#?8bj>^<|a8k+k+>6sR zc83SZ{dkqLgB)4tCMwLmua>)q0Q*w4am->-(NiW9G+Fmh!B@>IiYC08H@#*(W2Sw> zHJ6yWtL}o9EwwS>61ai9n&Sd%3DZ@zi^w+1XhMd24%tfG=8NH3uMJR3nXH2kCXug*OIo<-!Y~=_=<+eKp@T3O(&8_f{oy{1Hz)kslVaQHqFQR6=uCP#E9|$NoQ8zdC z{(eEb?@2Uiw-!?0<;Wb+@w}V&3cK@Z2aP$QkXDk-C{isCVHGA)6Lh?()P)(dMeQ!$J;>uqw+2EwqQ}8jfTAmD+ zza3Y{p9N1@ZQ!PPAZHAqUmJYvoBy4O-rf)sSl4(K=xQ1>Jk#d{B)h0~l2MIJC5g~W zt$e9>UyvC*yv7z7_Q@b#MuQ=H2DS*Cf;E1ZrXrs?L*T=igFOQ+IR5T7Syo}NS;N1L zRxFewU{();^G{(l+~jhWZ&@L+>OTDzBos8r#&N=ZAcr_clGN#E&fL6bHi`1Y2ladk zu-xLLm`ZRJzNjuFXA=1@Rfmb!GkV96e|{XMVT={#Agwxu)0 zb7gv-Yme(3gkXd0;289DLNc3-BmDPnf%p^m+V9w#ky ztv>wSO8g%?XZzobBR^DGs~DWmY`Fi3dr>~eH=%r!_sk9L&W9A8YE)q0q(ArPTcaap z$MU!`0B*rgY7Yam?d)rf?2mhBa*JAgPp6zVypurEsXpj;ZcvO{{_hT*T5 zj1tf3Sp9qls5gH=TXA`S3D|RNP0U~14&&VIO9lbJXpZWdjTk*KG}iN9{r;>vEQ7|~ zwJimLO&!{4Z`6SZX(feZrPcZSCHwVSaxr{vEua47d)amzwMt@~{N53t6Ok-;1J8vm z<%3rmJ=d^upzkI)d>1riq~gHM9bMVynPr(b6V#?5a+snRn({Y&6RdR|f@$3Kw9ERg z^-ErczwaeJ-?OjGTm(%TPZqA*CuMY79%((q9s+DPR)m56yD+iKC$I~PJnIy8obebs zupW3|RP=|t9z9(H6(*+#4=@nlzzxw|T9imglPiN{5Y5VgG19Fbrn^+0>vg6`p;L^b z`J59nWr|0nkTjqa%L;eGHYK*QzFfMIpjJ;WfoHZR^T6C3@H=~22-N|4$D0KXR-L^< zq3h@0#4nmEiMn^J4-F@SY-(Ds*bmLY8=dUQVF}vI{jPVB%KoMofU#Q6EiKa;d93Q@ zx=h}Rh>>F-kfb}M=7Qqj%@yGvPi_nwnbiXsQ{e89y0@_e@yH%{gD%}4E0A!zb-ZvJ zGi}Y3zBf>F7&@zUNys!SL!YZ+1Z)ipZN>q~q4>uM>xb@IG^KY&0J2Z*RBFN1>>9fh zTWps#)ACo6@~+++Y5Nb~k2WWwbI#RVk?*lp_XG24T`x}Ozj3oyz}J*y?h1HALpb=i zB3v+KbKX_AB(Myo2Q;+_?BQP%xOq-mWx8XKdG?6wvPfL2pkQN%4jPfh$kBK6p%#CM zylSX4!hh|74Vci$GB7g*>NI@4oaRVDjHz%-(fY%cr;@L>8!%~0an{%B5)XmW{scVx zh5$HSh9;A^_OOSt2g>jnUshsENOFrjYK&by)k3z1O&fk1_7=Xf9}nUA{3P$nUKc36hibvWi-%lI5SD$TAi^ghlW5r0!n2E zFJfngTTqa<%Rx^lp^H(k%mDE?Aq{kIkVv}0pC)%J@kisO7@v2ZyTZr?#&OpQ;*D%J z{STH91GA1ADSpqSo%i`jwF*jNZj^tWYKOh>RJ7xCIZ@t&GVWM@U;TC>g7dXNFpt{Bt~TOulrtRusD`g`JB0n~8soT>g)MCRI~{)5M>X!
dwp6P?%1w^+@aoKazO!Ie2&;0MHAuwU5~u|7T=9RnoZPq zt@ZjThfrmuZ+Zcdaj(kYV1PI!-jlSjSkGZnxf$B~g44WtI4el6#=jsivVp@cMf#o5 zG6H$VJ>UvAGivZ5s(EYG3oObk%1pKXS1X=5k=_*^a-B@He+nBHJwX+^56xCaG+1LyGZ6uA zh2L|3MCUOwj7V4~lvzCaK{%!dJdOl^#q0C?YA?vTFQbc`K!)5Tf0EKHXzlxDQ=!wHNvKm|%;oM8;qI+liDV(0yI z2RB9*b1#@4Jt`!k0oJyMB+;7d#}0mLRBGWRSWTvtDE3X@lhklQv7u08#suSRKaqV> z3#D8dFR0}VkDwRa#`Obfu7Ljqc8dP`;@Qh*rE9v`_xyRU{U%K2rbD?$pKlI%U+Hej z_lo!Zx1ZQL^HUF8H%0A#nzZ(vugN77v$ErdEBu$&%`M0d}E z+4Pe1jvkhSG@6}ea7{HDnUnQWm}Pv71K%onY-VfWNsm`=#D$9HVeG_*2GbB6In09V z_M)RS^waU9zzLL$gmL@G@!~^g+eO|k#U{r_`sO04%dCU`t`d?LxX8F7jEr|AsB??c ze$1-cw ziNTSQk;N$vm0fT+bMaZ(L3yH@OVf;P_x;JNd@?`FJGdxMOhmyK#Vcu^ov~bA$g>1a zEL~JcqBAs`o;+JDu{UQ*xSw$JkTGPYrt5&!Fp)IQW_}Oaf z7a_AFbXrA~mrg^X3$VSPx1^*rfv!Z-1c5QsuLGvl)wKlNl@TH-OqdC!mjfEFm-tJ6 z(FpYE60{@SaQ1f$h0=E$&AYpj#;;La#iWb4j?WTh`r|%(&F*_-&4vdi#}y>-gOZ{M zbj81=9izcdm4ZFi*?Qc%DKnPNiG}(2%F4+#w4}t$N`h?-aWa$1o%Q%hnT9jd&5A-7 zap~EXK*cGSUhm0yNn54m~^UtPY!cmc{1s$N$t5t&zH~i zW)EwLx_3(E z2L-WTyDDm7C2+)0vbr=ISz6Mzm@bzGHd-NDG<9b^UKx{Y1Q&s|#Bf(rQE22DRdc&A zm@+rROWBwzHN*2roMBlgl8{drCN+8(7+8oNqa|;WPcf|uiSoL?OQ7Ef`BrL*=zTOs zeW2tv-$du9WlB_g7nB?2F#R-Xizf?k`TSts3Tw=>Ar@1L7#|uH#QJ9H)Md0BV@+$? z1Ye7Fwkol>4m6n@TVKyi_;vnH5XzDGo|}XU=)tZIP%Xaf5H`gD3 zrr5%M&7N?c=r4q5JJuDz1=HOYtx)wA$dGlr`&6g8p6kL zm>Bf|(4`J2I6pX;-#vJS8ZjthdKA%QEq$WL-l*JmlB<(;zLH+wAFKz`=wqK91>D8G zV@I&ewNKu=ob;A%9aACsv^4Hp^L^n?9Pb&QPi(zLwQah|Q`7Fe1K;WSO6**U9V`_jVE9gmwQ*=>sS5y1al`sF8t>|_KdC}vM-S` zrJoy%xJJRi9YB`pX5>mK3~$0puuM2!Y*p1z{OLaXoCSV5e?`tsXtY+Ij_u80_*3R; z2w6V@1u+bN6!nii%H@p=8=YRRp=Zo&6L@!GFEf&Pe6^BgncErF;%F65lf_bII!mck z(s&XH7aGw#8Ev{*yTc;^E{A(kfICwr4!T&J!rdcHQ1GwNy0+Y5zG2R=zefIW>UDG7Jcu7Ql%;i)-Go1;$s zo_E7Igm1g=N>()PszhjXW-b?SUE<(=+9MOKPDXk=Yq5v1wZ`tA1T-JRnu@IqkH=H) z@-Hcbk#R}OFq+AV$KoNWlZD0WBg)BlI30FR|LW`xS$OYk#Z*NJ7gP(y2e_?L&lK6k zas?TxBZ{J0xvuOGFFL%v`^@}zeaOUKK?6la7E*3+QC3)wxzjS_QXzNbdr!LzI_*~e zmDl#|duQuXx8U=`^?tHapBs z&ZqooTi1M(r;50Uz$5|8`EpefaE+H+l^q&j8L4ja@7$yuF99w3x43kg(hYEEv%5~r z#YqvN?if6#7XI+4h@#b1b*LDYV$Q0~)~j*()D!D=hlzx2G3v20FJ}kKm2|9?QgNIP zY8f1mh!#jh!pG4AvMesgxasZN!dx?X$drBMJWW&w>!s-R_BO?%`Dr84B8nxl1!g^b z4=fbPK7iapv5eN~s*L=Kz<}9lZ1bUkF;#^Ad=OiTTuou3fSTit8dwip^ND+bsxc+|f$l01ZM-IUww!&mNjW{g@jyi`#wyhrOtvunvY886Z~c{5 zeiS~DF#H(PAfG2*I9nojy8J}#wDk$%&ih=jYX|K>)ps>tOxk3#5|6c1CP){}iiysc zOVBg2{n$GZnxDT$x1Ud`sxR`S*&e5LAXyj=QTXn9JrBBbf->t!F;z7K5p|p0vdV)SLv`(B`Qkz9Eo=M$#aQWE-m_vJVRi`nJR@m=Tp)D;iyV zl9?v96K36-R{czOZ5wJV%5qPAH9PF+U2s zZMj_fIRt>ZADmz4=5c)@vr?m5IHATf8u89zTAB-mO0AS+(NwCRV6s?i<4kpAltOln zv0jZ0ZRTh?lbfClxtYS#Y7I&mvVkUgtX$~?R=!h9xKuW5ZE31eTc=!-%g@Jm?~NvE z@4s?-W36QdprkKf$b*N)W=cFyu0LS~;kb2Gsn+J`;u{BSsY5-wi?dbRZB1*F-uMxO zFL5fRL8**2ITKr~*F!EfJNhY*= z09(rXqD2?J%i>kXxj>zs1us2*0y@S9omspY^1#nPyeVncGIP`{6gAFxlid}f`7-$a z)?gnV8DB?AN3*ZWKw>zGMd|C{5`HS6>S9ddhq4gRWY(1;VM>0<;l-Zt*h0a?5W_+b z6emY3=XvN@X!o$YTYV(?yy$}tHDM$c_^IA@(H+u+HV*MCH z+!&-RS{)HMuuMf{i5Q#gd<@^WDERbd;foROh-^!qXmz!66-mU;ZSJ}UePF2}>QBX3 z*6z5B`$5eT@qG5@FU53C`ET=x*2{)ceM+>n+~Sv6pmql%@Xuy2_jpY@Bx)k;vhsunL)v7s8kN9w=TWC4be%2fI(TnBTt$EHd0tUGcdkdm&Gi zO1C)}4P^W9W&=j`s2UEp)QVA~vUmfFqfYsauM~$MNw>D!(|vS1{HI?YBnvLhM~+N^ zYu>mH%$HwW$Ika<*E?@;z;2-&)1bJ=R7e}#c17g&kt9qBs}fmD0L9XUb6q;8Z^h1y zPq)TU#xu!^M-y?h5yiX!?lW^Qq&QMfDiw2?gW{P&^S}*-GaQ}(hx+EuPhM#JuhqY8 zIO8R5xv&#`pb7XNNsvB`u1ltmPcd3&E&<7v6;An>5gI_a>mfJ?*YoW=D~vZ4C}YnU z16O&o%6YuRZwkav#g^#!^Fq!vBEqjqOk7^9*IfR`lj)*57s-iW=(Q!1r8~Im-vjiG zueYQgHkwK6E|nbF{-x{3uA1GUozY0uvLgo@FiObxl>W#2YM0#qpo#JD5yh3)DcxjoKbmT_|rvwQxg-}35c$)bL~&q z<~vFJbcQgVL1fI{Uzug7YaC96XANX?t8vvvc;2DQAD!vTR$NkycTl{cT_RQ1YlaKS z>5rHKhN4mV$*5UI@Tr|m9&Z{UM8bk;(JY>QUg><3QQ-{$`Ss1Ti#&Z*$T)wer`?*y z=M}!QMm26s4KBr)z5}!SLy5&olQNoZ(s*{v;~Rm{l@G}bKu!q|l^pH5OK$X4MJ(&f z-|)pfd=nq}*C~r1nKYh6)rAUCM&b}e7Ns zN>-Och5?SHd<@~u!r>QgnXLPWlP!%5yY+gjKdaTcj6NY@eQiQvxo{x4Y;Jay_30&{ zu2pM>a1%C5;d&Cwq5A=oPTQ4K@sZa2C?v8uzajbkYI{^OewVO>ilvg!%6j45L+*rW zO}uR+<*ysY_+nXXk*f3Lc#$Qi^jD5~`QD_h$E|XD^M|_*l%mC0Oi#_V_2d%GChS

fJoD6N#bAx%Ts$yaoQ!%W|ck%4;NZwf??q4@tDrzMo7#f%lE*~S!KN~Ld%2;bvq<{aOK2Q7XPhJq6^OwOkKREt;q@jw;1_xIV7sgpb zrIsj?N+HvFW{P{W>ADs&e>7{33ZFKzDJ&WcGj%$eWz}rC0^0Jnkd9K1z1eI6Yg4y4 z+e=e&I^N{CXQh^~_KeAe3~W~zprV@`3StR9dh3TeljG0lv;KDiT@lmNlgUMo$NCP?i&D@lh zk8+fV{>3d&yWKH2W)q&I;~FS?1GuLC8!I0UTC4Avvwv|fYo^QhtX}CYcjKJX@r(?2 z^-V5U-wb6u*gHv4@;Xe~xbGe2D45$E!u+sXrX?MwHc>9dR-h>x9v_VhI@@qMR6__`h&ED)iN1dY+ z9&1&rZ&e(MR}XXw+eL6<_({Ha0Aljpoz?{ICK?GRRoBBSnd#{%wG}1>)Bq8E9~$5Y zez(E)Fjpo5dkf0&oi#ItBCm^!4|B7TVt+gcTb$6V;6}jPpdYz>2`l0pRoG_Vmm#2#mM48Vq?h zy{3b$A!bp|w(uMxSYGBf-cmZ~tRNAjVCp;gY*say+A z=l#yv_0IV-hxnt7&4oTQXgPZM?IIp*oXsMXj&>toWX<7})d-0duQP~TE)e1Q5wF>c z9Zd$evKO1(4L=Vo9VRu9hb?#Lo1X-HWo5)T!Jy$z ztBhY!5U0U202$jfUyfN!T|>PuPZ-(jc8<)AJLcpEh%`TD1iv1=HhVWZwOCig`g5~Y zOmeNsJH1{r^08MVCYP^Xuxo}|GPB!yBSwe{I8Pjfcj~)FA%K)7odJjrZkl4b5+BVj zS$by{;}*@BWH7kjdx!0Bn97-Xh0AEREUEnv6GFv-2F$B#*JF?C*^?2O{ZuGSj5_B! zIkM{dj8v`L5kRe02>7LaZ~*G-9p!U%G0;D#F6&b_MBrJ;#2T5vx!FGks>N{E#Y=H* zRWlQ!*^c>>em#<_TVd1Jy`Yu}t0m?mz{hsU#Ci%2j_zsJ^aZDK!v}#4|CN|ZB5COQ z!2&g_VF=s2XMXJ|5~uoh7Tv0pg6s2onmJ-3wgl%jl6;n zM)(JCV*E#R#H%1%dtm&GSiaf~62^3^#(hPb!BJ2HVN8BZ%cx=$BPhu?&nX(O%7=Zm8W_}x(r z=k3v-2jX%}^GpL3?I;ysx<)4Z`z8R0Xj5r4$jAt|`-Cmdzre*-m(C)Fu;~bBNTn+C zt~50_a;1YMPnk_jp8qB<5oiM@1|u@Q%`1y{+%>eMStTM?W(0VfU4aoJ&A&HeWp`_T z{I_!?-tPB)L=pJ#ek$-g;RHLYNuSvm)h&gDJ7^69Sw`*!)G~L1sa>T8F zJmc0*Zh5uGKk_LbeNRp7!7$T+z9U;CbSzsO%jDl1wZ&Md;22M&3SRauGL4$JJz)&x zDM7&eLP-JC3@-{)Da0|S+&>E~o^t=ZkaIVUB-I%0)=B*$UGvhnT8)*MyPYLsB>Dlr zIkOuhCt4t+gSR3R6-)S~FEH|jGXjnqFJyBtF;t+iUBsSg;WE^C1K7*g4 znMr~fiMMD8M1M4@(%kAcrwBv2Ummkh-&QWloZrk`dAvQKmmXuay_^Q-wpg_FImd4a zn{B@Wp4{N@-LdX$6jt~)N~;QH;7*TM_+%WeHkK*8-fzKXmIbHtYy#dQWG$kR22OZy zd{7@6 zz%TOM8&9wtX?OJK!s~iAnfVaTVZYBB1Os`6&yG61~;<1$=<&q>5Vu;OS4c5qodv7G4dd3 zQ6ny2>Ceoyu(~WuM{Mi~Yb(tH^kkdvaCI~YMLkWp{+dKGKHihtJ@skbG(|BbXsJ~W zG@+I`N4wdr-S!UzEVnzkNZ%4!O*9EZX2;c4bfKQR2mAE__&e^j#EEI+Tb?H_Q=y7< z6H}94+e7tJ*O=q6n#uVuY@j)6w7F^R2B$&zcDsujnLMs=rZe|FAH3cl3}0)H%qFm> z(PT%G`O<0grL*PqCZBFV0MMdIyD}xN?b}g-84^zq2L~QeR|i9zX#!|2xW;azmXCY{ zyC?if>}|LtYE}LSMCe<(gD|Jnd~X0}RGgbM|gTl@u?( z5V+;)@>$f8R`octZ!}=Axor5MLW{A^)Z1odJ+;=2*~=h4*9chtE#qs-oAjL5CzS)` zXlhEH%kf#AvJ4aeUPJ`h2T!lRR|rgTJnG*mShi}hul$|Zv+Dh;b=diarv@PL$Zz8R=e~1#?%xntrm;OXS#}f zyolrj)zqx#o*YfDCowl2<;G-w`XK+M&M$Nu(ku<>vWU5dl>uUkVSpQTl$*gAc zrdZ8-V-%5gG<0?_A}^VmfsnQx{ps#fSxdh9r`~P9kn}bfg!}F8%%50gHy>}VVtBLh zgC28H;3({TenVf`gFrgO78jpz|0&7d`Fxf)=`XC5mo}nb{t=>>;f9&0&5n#(>RPOW z|A()4j`F10)&{$5b=kIUblJ9T+cvvw+qU^DtIKwE+1Av#_q=Cj?w$Gm&6R6qW<>1B z9eY1`7SXz4-~cB4C3h$?7Y=Ka(=R!M@Sny(K2(=4P>fp5<<%Ebkd~@GLN!|23#Hkr zwD?R#Dxy6ee#`~5*JFib_KNtHfvwQd-fuDS95O|Xp-5{`7L&g+Xg99|WHR0rV~KaP z*E31G^l3^Xayw%%px*Msn@UhBit_QSuzB?W{5Vs2MgM?;CM2z zP2K%IS<$sFP{r4`Jk_*^ap3~X9NJ`}N@>%VX=&w*6~QmNwNGQXnw+nYv68}^x{!c! zxlO}1_TX-UXKoy&cxag~affEU+F?6A3XjDd^0%tAQ<`=HpunxYLXPDlnP@VT~P4WAz4=M3DC@&?)5E`yO-zAczO4;B8q{)QEEc2>toOIS1ZZ9RK93 z+AV-Jvv%}Q?bLoI8d&p$F708k_nQa?a{4<~9YSP9$GiTTH_2E*q~VNG>Uou%iLp>; z9DkOu{|Oc?&JWvl>1Y+wJmh4ZG2BZv?NddWO*@Hao^s-;`j_pkdR9LO?FWiS0QJnj z_8XAx-Y3Z@9@&i)x@yLk%%J!4NkQ|qCkp$g+(V4{mDnp4Q}^Q!501AqW)dlw4j%Dh z%W#X*sQ)16@*ttQT!CbepFq#hqebDgv=&aep!^!bltQ7J$$-UFr+#6ZbtYB2dtUvB zqSC4V8|T!}x*TPj^1aubgEsA8t25NIbG#MkUKgyTJM5D3WrBXidaY7W=HAajBS0?2B22G1nNUw^N$>?5H5H{R0JVNSHkE zJH66@J{Z5^PrR{SsI@goHB#gbffcWAvA-%D2)S?AA9miGo&F%$z?l0xBYb`y1Sm0U zL8kQ_w&gH|!4@l4Yc_a~68E^zI-{;{us6F;_8M?a+=>EdZhgK zl3OMxcCZLH37BYqK12rP}eGg#38O}Yb zs9~9%YKH0)KkH25rf+dY_wH=oZU7sU?xl)uTO+LqaY|iw^EG;-L9oDBcH0>rZx+EL z_44TfK{*kcw>WSeaXJ$v`9A4CyF#Y0Vi&oL22wkBF$Z)N%15eVhHh%n3yh$|(wMjD z3eEONyiQ2U7t(7cvkcpMbylhGle%J*q9ALTpN(G;S<1AqH@9-*c$a;Q z<20HzDZTp0MqZ50=F80u0Y+MM1K|t4%n&U}c!R+EELFcrS=?Y3bUkmQ`&r;FW z6GW6=n5vf?SXQz8qXak-G>bQKoK1ad7+ZzxuhH7EI>%klI3EQ;242cnlQv!n>nS34 zu=Im1)z|PxRh-l??&+bMu=pNx2pXU)EgI9N*`JI$G<0_Ap}MS5^~MR&({>aOj6FdWgJ$?Lsg ztYQrsJmN3@P5K?)WM0gUU(S~`68S9by7~7S>g97~r+jLT7aTJ6n z&{Wrpym!}E4qs1_bL_e0Orj2$UE@K7(~*s-he7^VMFHAu#qNiQ-=2hZ2fMap@=7IL zB1M|aTTxDC@q5yp%b}A7ip=Jv#6}4++85lHHE8O45@9n<$P4>_$2<%&noWQnjh)B= ztyE}1Op0F$nW2YIvD5x1N64FzzIVoK@(Wgnv7dDoJ2-25oCMpJ$qI)xw=dB0=H{;L zbB_ZII{atslP7C3FE4j=&IAGg$wE-!)eSAipG<~NrK|b=8o+FFGZxuO&Sj5NrAT^a zuyCI1FgRlmV|=8EdTurt0jV+>)YqqL1QpPFgDKY`#d%Y-9^6XIelnr8KYjNo6++{9 ziXvCy!w&Wkb;(U!#quZMkay2}N&0s5Fwe@Kj+A>&D&wk~k+olhtTP@%W zYWH>b5Te(|49W-7ml%fE=6Qnxf4Lci>-Q0e0O2gkinb-)kojD+!%(G{nXlwfwD*-9w{nTIbl}VWCyqB*RC?25s=Gr_I$6 zxg`4U03NL?&i)MKk;$6Tqnp^hu3i<>#6Ma9=3{n5Rzh@1GU~%qXQ`Y&4Pwv0!x*Z{7&g(NP{$RC;<*c!xbB|qT(?9pi zUyn`0JC7efzwMWs53SeBltVvl>fH#erKIQQ!}SQG4kX@Vn)mCvnsy;3dU)A;^4|3= z?lgFCDT5lm-BQ{gUZuQJsqE2aS;?WTNXeB-96SdFKte|(wT>Sb!-n#}5asQ{l?iF` zdQ5CD%HZeQ;XdM-blUP8V#T_j0-BnfVBulUax?^}7dSkZu}hcesqDX~n6Q^fP%sfd ztUXX>=S5n8;}A~e&tIHNwm_zuDEZXukbC0Gw|W&LlG+-xd1tjRJTCPk-A3vlGThP- zXJN6WaQ;nA?_vS2RT218PV7F<1yjv=nFa+wrd}^2^^Cy_Lm-px3vPNsi5>d`-ds&P zAGmr3MkkowLVSJ4h?b^{n8gvL9-l2hT+QX%jCjGJ-V7y|60Xe={s^7b^QDc?F1mI* z2=plT$%dxR=yKv>=Z-6$)Ye z>1lhIe;!&|UYc%Lg-IlWDS5WdL#hpP zNJVP@YzYVMY&rSmF|*~n9*88eMlc9;lxTQ!exrl4{>hzE2VrK`%4NPWggTYk8{^x} zRZwVU5!*Q0|Kvp3YVh$5a*I(yXiVS0fHqfIfw^jBwg@CwXqi@5@5MmEV}o5}8QA;E?IvWM#aUG1fsQR%f||P5g;> zARuVMz-WSf#bQY&%L6G9MVmBO%JjZg$9ucb(!L@Q3e&p-Av8zK%`GpsHE4zpFIWG? z|=;a#G=_>nY(`L|$=SY5r)V-$Dq+2P$&J^VsD-q?H# zO`s2QABd+PvSdjGdsazE9FKo+oGYv+BJ?Wm8UbAHhh!<(s)GLn03>h-$u$-j3+|rx zjA4HnUDp%<&2^fNLa&&rVzDYDBYlhY`RSVxvUkC74r*zV@xv-hPv*>r6FbzvyP$`dlG{ ze5LYnL%_KtO;?}M8p00WYH}Sg588z8kv9gs2I?`Pv1XP5JCM-r_FEs3m`aCd7RLM~ zLW=BW3>)TU65V?D0ICyZfu9S@0SX73>JO7&%9R9C2Q;?cCp)v+xBSOKCetTh zcBby+xGaIpM=(=T>q5!!cBMa|QcpkFjM(+v-NXYf4ic%CL5U@BdM`6~X2Pq!ZmWGs zid?DeP)zoAp532Yn%~&{P10J?Uq?UiByRE*dwCuX1V61f#zq_cM;zszH@nk#gIh&4 zPk2qV9&@94FXiGViE^3F*;FU-NbI;tr~pRGW*i=WV0!3xuu<^Rd&GYu23v(K>sx=h zQp`XRjH^~e=L~KGIyM2jyBBdg$4}SjaW*om6H_$k219kes}ZOM9aZOGyeRY@VU`>KC zh2N_O0mdBcLQlkWFf#h1i-8BWz4$4j3(B0+x7J{L!qxkC4uIZhb*RUF@BEJmNBqS2 z6#32@56)ctZku0#TMew`%vS%j`RgUhyNl{tXbxjdwL$xE{Xd+Pf2CQu{v|RaQx{pZ zmGQXNR2>EjdwTXeLzG!uE@Wlv zw1C?qotHEQIGciBbI|PHBK1JT*-?9Ge+R^&?hRh1_nl~*pZ+}n9^>q?@?#89T z`h>=*=|1Ud|60y%vT$<%!5Z?7YWdNU9VMWIZVly zJ^Wh|(_(E=e z9T}|Hpd+e61~ap!Q>dC&YrlHK8y0|UoNPM7Aph3GngeO9(;s|V&S!Q{dAyQ{Ms-Hel@tBB-bz20mHo`DH%eiLz<;t+B$m(0|Lxm3H8NWr`CujTzt zNYD z2&Vp6382tu--U>09%8eOKF9vvng2I_^U0Ek=Vo{VDQibCl3Eq6@W>cniT)ykCn7ab zeN>A>Mgu2%2G1OanN~zarnQsDUZ&H03n3}~>E>bYNxlOkHFdoJ*zEPg;fi*M z&xM-Y8YJYr*9v=L_-;<_WPdl)wL^7CUS4JBx=#n!$d7jSln8QJ1lTqKUy`IPM>n^WLtstlmtYxP=yowXh8{PCeG{6`>5T za?g*#`fx~$q4qk*HB2rqlD-;Eh;_{l2R4>VkizCRh9XZE0i>mB&G6bxd)#$>mne5U z&atCU)Y0J27i}#SFhR_!MqGlbEj`+!$xb*QeVI^&Ugi2~#dz<|BaB8H^MJ8*op9%$ z>vZ&tI(=L7w`#QDG6H>C6%#FrvXfo&V2u#wQS62%fw7>OW65rahL!@K=&KvTt<|-@ z571?Y)mNoYnOPz&us8IL4+0X1t?>Ttx19^RO~#`j&S!%^ zx*PYtq0c1K)?b0xjk2*16+XKj;3}wL|Eb}}g6bxIAEx^_A1TfhQ}@vq1|TLrhm5vD z)8Ay|!zgNr)40=kfhO-0#WaMOk7yjTHgB@*oHc*nOg2vusTy}?D@Kt6e0R?!P`@kC z+@>}h;JL?ykO;?6rsj}>f`N!QIMK7$kiq~m0*(9xUZ#d#JFA=b+wHXZZ1RrsRVEoL z{e=OU>)3HyuMji^ggE`zIyMHmYHT~Ji<$adTX542(6Il*P~% zi*(_MX6%UVSpHze$8e>c5xu*)QZ@*^oGu2T6ycLU8fnL30u%MpFP)bYR6)wU13lk) z{x)6S;vt4xRx?{BDBMZag1h@YGpyCwB-F2H`PK<{Yu>X=);8~P2o|1nj_Z@KG8Zqn5}{eb`WI1+C>4o9_VAr`4~r>sOqQ6jEj^ zSE=>)3mnR(3W6eM++?^bC3eU9D7a|n3f@>7#uUjMT*^t5tXn<+;Z)kq9POa9 z+x6~m5h=lTy&^ce=^lw_1tD<%u z+Q^L~{(T`A2};pDf0mA3^Ez|z^mVGvfBs@kwlR(pmR?U;mGL<8UVV&+lbLli^^M7Q z!6L=eTZyCu*&cWX%iR+dFkVAj*x)1oplzl<@R+cINB7Ql5vNap&v#p?fjGP9P;Lx( z_B@t(JW?voHMB2+-MzRxhofEW_Dp3|_mjLDuH-Lf@Oh)m+1k(4gJ508!5HM@}y1`Q)3VhRmm_W4rb%so*pfa4D)AY!G*85%HGFIdbr zSa(zx*42^CX;frjafn((^0~2hW8cguHDf!5EHQBeQlHSuH*^c#o~0l^O;YyN)TAPT zM$eULh%LzcE%W3}-e zGO!2RSsU0-u0iUdc}d^Xa-1s7bc{eFE3KrbV)rVEhhgRp9gI)G4MKsR8>}289K(^QZg7`q z8=8@*6eyY$B2xJx&>@OAk=(%;pkV@nf*UUER>%Bq#KMG7zdo7Xcz#bEZM8FBO=fl9 zZ89Ghq2xXuXQ~TJ(kMRaS;)=*zFOR5X~@>Gd{pXYy0G|cp++Ixw;~Olk7!0N*Klw; z<+MQJEQR)wSid>=Y)yMnt4(k8XEXh@^+Y0-Ld9d+L-Blr1z`mOrV4$s~WZ_!X`suR}4;`EZUN*3dX8mCaBHNrTy->|R#Xlp>op+Je7 zcu((>dvKd!0guq-^LxZa0+T%}#HoLarZK*RlT^$OB)eVv!u`NK`0mNUbcV%(fKM7k_)krcV%QI3BSzCk){0 z){!!nN*Muu2kNG%8sV?WU`@}Hd+{yO z)iL1(Z^v%)D|8R#d2FT$NE$0o#Uc=PCF{BsJ2uw*bT|^EHqZ(A4nB@q?U|R)VkR}r zcX0OnvdrKvL;y^wzoBjfn|ytE#g9i>i?9oU%uYF70GfGlsC}CnBn0|d9ss=Z@AN!J zuZ;Q&BCwt|iKy(7KS_3eg@AlY;I*?yiGZS+8`lbF6A3t@_(Mp*zNK|Po>P{v02MU} z40!)#?Zmhy#~9~}@?2j70qZ$q*OpF2OD4u}}tEIr978>k7;T^`ADd(I--mBiw`gd^Tl2 zE_OGz^Pgn@XalBMoU!8KkMlJh$ZY%$%IqeZ3QscWjnfH;WSX~LRgs2tg_PdRMdHp| zipGMj<}+S|%7hirdZ=4V#(J{|?9g18*A!d~Y2u!S7dnx=Xwn^+@{pW3u`OQJ?dNf( z;Feq22r5C+92Q{AG#PyulAGv;9CTLqo3W#o%{C}lxB}i7-(r)Pj==faMPbM|6MU9$ z?qRm+#RC-{&5CLT2B^(6IqJE+R3#brD{fPkQ5-lv9PBYy5j7;(tpmP*JQx> z;sQoU)MVkgz#g+U$Jow$Gpq}>Hn>bnv;w4=k41K^_$8IC8-ItXA8AGh;AE09glfBT zXS2DdoBZY=Ep-(6H`!z{l1g=rQvZxmw~kINCSUU0=I{I!RvhQZ%!)(beZ4{GxO@pTR9Bb$fIiL~7k`{oAJX`UL~l8uCFLrzvzEk# zkjV=|*BL!4#(#l$PMnDfb9SAPJpI!U$;KJL=aqv>`$;OJ=Np|!OaegtL!x+1jemP@ zU+CvnU6eCLeUp+%g3y#O&`L4#@sPEvh3{lLvD-bMmdx`xLkiBupdoi`;;DuF zO4}*PiNmwE3ufn@9iM+uLqf$At-ENnuBNJL3Jn}2(9j3_y@f^}uE9^&wW*dCL?bh~?70TvajY$@M>CP`Ix!IN_{iRMAq5Ra>K$*8fl^wL&-4Q9untd+>zt^~zwA z!4Zdrf}C`Avr`cL5`{{y8Cz{9t17mVd#7X}5v(`Qs%RQ0h5QVa({fA6I;@ zB4hP9z(SB%p*P=#kC3p=%nu!vWV`0U?RpwKvoZ_YYxPE)Q&31m8Q3bb#o3-5+FU!9 zV`!boq0!%#?DxlLn;>$h2q)g+Q^cNEuiIPD?5VbxF#9wq9PPe2fOKr=~pBa>%u$G*noM8j&s^t@OMl|KP$5MC2yZ2dV`NlBL0z9$ zlg*=e#n_m^eI!c(Qq*I@anz5<$mAO~hn%|x_sDU1)oj_p)ynChcOjy7nifvZOaG!z zan#wByP>|7Xw63)(cauPSw0@NpG6m?lpmB4DNu~}&m%w?qq?*$OCjaUNp>gW%(?`n zWlH$*4+nN`vpRZ@Ly$0}Py1S9H&PZftBL@^sJ~6?FwoFmgIz~gCD)~D7AHx3G%OBP zy$O#j&bzW5A1vX66co*Zy-W(C@wIrfTQ5rGIzgXJ_WQjD$XLJPF<*(rd@9xALS^2v zFL|Dhd#1AN$p)n*zA6_G#`bqOpN5NirkV=Egw({=T&81m!NL@krE$hk$DHCrc2jA( zX0RrKMxrzonwLKQmIkSf#vzMRtb9uWIiTXp%-A}%g$CF(1Jqmbq@xhH8{MgS<&DT!LPc}27t{X8A>iywJNtZXtiw4>53Vp#8~HQ!rGyr)oq zwT&lh31vPTNu+Pg;m$|U&`P)3KRI2z6i;l;E+F^)Y4y345G{~%+|ratM1OZi12#lL zKIjQx%rYFH=yksqlJyWY)!Hb7@cOOSQ`>xY!-P5Z>`PqO?Th|~c<2)Hjo}+*1b*S$ zsNrKhnM(!dhA+++f|&0M@G+ky5X0hkfv?`E%Z|ob$Enb9vhV)Q{5FHDzsuwWmq$U#M!+fZ)L?_iX8MwDeB|eq@!w5SUn9NW>6fZ>!n)0_geav1yn%Bo ziK^%0oqyv=Pf8mY8bZcMf~}i^Fb-NkYhrHqzdrEq&ot(Q$IAFi*MLHt9o}byGcd9S z$M+zu`~g-fjSljGQYzceF4lIRrY4PYVEHK0NErofB3W3J45a(=GH^VmH2QOqfq_ko zG|cty6$kXD*3vHrYGivh0Rhx#pEJLKR~d+FCN?yvThLKdOIl*yh+HUqKr`)pRd37Y z$Awft>u7PM^Ycfd`rUp9EKOQ|N(d6{!-Ub!!ATlZIkzO~9k~ih)|Y0O3%2iIyVv(2 zNljq8>F4QkC8#3U&T5o^vJE%JpAu=|untm#G6){tR$Ts?^=KuxKtyD0z=fpgG+&S? zSqTHvywC(tV$lCm3dLQDi?;ebs|#PR-qQd!w~#o)8iD;?}39?H}$1yQIgK_g@Tkk>I}(m%dYq zW$Lw18EjVJw<6JmaILM~%5(G%7vm}$_O635ne02~H)(((tdXN+gY7b;(@W*Zo5w!1_6FVlD=RzCqny9! z4CJB}cb>w2iur0is^r=5&Si5HhvzD$+ZC^XvNNoETAnDk*kmp{hz_=jKcSoRq!hw? zy&)#srw7)SV+*)7{p93Cp=>sn`AQ4wn&0Zr$HmAS4w;NAyMvO*-OsjIEjl+O47?x5 zTI}|J9)Jq=4+wOGl&jZKXfxS=l^1M$S#bR#j0&u*gt>I*JE$)Z2C5Ni2dAC$nuWYK zAM=jN_{BeUqlin=L%%VtKe43e@OKY)NK^F};L?ZTdIFrnR`mzc=ZyW$iiCC-UQov- zxZy$DW0~e8EXoVw8=LJ=Nw(NE1rK~Omdk*?m>5f?3bgwVbq?AWnca;BNn?wpz5AFp}j`#HittjZM!1cN*v4O+Bi`e z+}RQxY?c~5s3U84M+WQW%|ghVzVT%0I^P-8znB;USr_r+6+~Dqm*G7&SOt!h*lAd2 zK`8a^CmyJsZdZdE==93gOQ^_X-ypp=Du#V0#hRyjb@tZ=5X)rM-PI4Twqcs=P5g#x zrGe2Z;8SiE;p~n>{o}s7jAQ-Pq^o!qkR)c&oyCudl?D^)$J~yXzo1qu7qh4!;7x*u zhDYL%UJ;pLar+y^`pPF8_=95XL2J3SE*Ib%H6<5uIQP{RKgLxmLA+|z2GMM0D^!q` zo7j%8T3ArWFKK5o-&$+-$CJ;#8BieX?w`-YV|iBAkr@xz1NpwRbnAFBSnMJ(4I=t| zA&zW1Jl#H7fG;}^j}ONC@dcq{-H+MT9x56a)N8MU=#Q}2y#|2O%7SGGVWDWDS=1VZ zsV%Hb6~65aPq#yqjn;SVbve4#XBrq~zz2Wtpmx3fX_k@)HZD}Klm)BxA~?rKbD&00 z0)@#;>zixAh!pTGc*YW&+YTD{%w)c81u6Pu9SY56$}p-AqgYcJc{Au@`qyzg^{VqA z9R7AVyVJJqX#s)}>K%9JWgtdoN}*j8ccB{ageOAt!aWPTc;UgvRuMo%_Uf+X)_uiR z)O0AruaO;AdkluRtaZJt;Wo6kB>2P!!cZncyOMT7uy=@y!{ZEVR^!pfPk=Z#=dgAa zmY{N0AaTd`cjw?kG@7UEh<|6pCm{!r|r89cBNr*ttorNY2 z5dmGh*OPb=?QLY-13pD)GZb#_IjY2bxdTrc`F+=Q5Yx83`9ot3LaA&|Z+9p1>SjN5 z`zGxwb5$wnit(j0F#W2f`A=W9tx)L{K5SEk}AV-j2S#2QuWexfXv*aPM1ndK>?|v z^*HogZzRvR1%}OTJMiMw&%jDOzHFQyE|(muL=lLHWDt?U3otrQtCb&5L(hJyFMVO#zFvOs*H&5UU8D<|M4R%?Yu%Y5eRlX$3-nu81BBEv~4 zSbMv!-t26)d?PSgsFP8WM2L4%HOi8mFpFp55&I(rO4WuE1M6p!#OGVRnxPgN7#MZ> zNok`rL}?*#T<}@p^@ZyNX%^$a3WHC-^WAs6QN{e$IYe+Eusu#FM)iQW1 z^RrZ|z?-h|DeZkmz&)GX8Mucs_Og6r4Qcl;M+kJsf-u^ii>r5W1j*18Y`}^KCv!f< zE$H{y$@p3O@wsw9w_OdD&~j3`p+I+c1;=Ev?PsWQz~E^-c}3wV_PPlN0L0v*t%qst zSR+xAM^t~_FB=s>YCi26iM5JhPBi%>fM2d}Q3*LDglx!!(Gx_A2wPonNS}01l9%dY zA2wtor_YQ(OVDV?TK7NVLhRz2!8%O{Qslk8hQ>Q-*X#d`7%k&SV?Pa1AP&nHvdNU* zCo?LazlxlNSQ+1&%qe`*2xjn4PS(B6S_mN%q+$j>n({PcNnPEdJ7Zex;(&Va3A8Hx z9O>Cly`ka20|)n%h+>)VD@g$9xW^2Rr9JIy$%A6qwWzhI=`L}7zlC#FHU@^JW*bgJ z8l27ZCwINvh{&8N$g{H@dte4OBi+5zC^D&U08d>Vm4rXuN54@vg}w{y;DJau7v06G zX7qhmcP>){2j}4&ztmDc#M#EaxOvz%O!cW<#H$3SVkvvHAdS%OG2#Wi&c$SO!M>Ng=CWG zO{N564O#-}?5+f#D@)sMt_UdhNdRWUb31Rd>J* zgy%o(`hD7+uR37_R6%E=HU$F3xqwA|oT~tcD#1G7aU=Qk)c4N=_-|70klDygDT+!F zh(W;k)gyN56Msl`+G21uu}5~~4)<(ATqmJRlbwu58ZyqoW{1HJG5}&JOf0f!!<75R z?A&1(eGkGIOFw94{q*UT0}wv>+)>VdO}eHX2YVo)&{_h?IAaV1602J?h;%RMG`rSB z=xCTu`(zsvDMo!_RE0Uy2Wd^d))uVwhNKbtnf?8H{i%hQJCDO+vw|rUh9Cma zBsHL^K4tFW@I@;C*6VwDmw5O13(KPy+F-N%$Cv&l21r_JQS|NdW45pwB+4FId-Z`k zZrcxWRMXM+O?AqZF!F6_qlbFg0)uO5qC*z5Vw6a9yi3AQ2lo;bjz4XuO`Jlb6#^#= zUZ}C6rK?ZYvNZBSs%@KjgU1O2LQZF7zfb*QCG{4eTt#w}Rx;lpv&Q~>9^8d0*}wfB zG44$T^Am&H?Y!@D(5)HfuFHOEAr!~A4^Qa#uyEg7KDO+|YMc_f&Q9$Cgvq;4WuX!x z+Y(rNk$R3)eN{rG{Av%WCp=ChJ=RJ-MU>TAn9ZuTl;twK?Q#zB@(TRzQ-BH~3e`}# zc3X0}oc*)*i7p8fRRaFykbsc_a(CKUT@IYVM4;jkQqqgzRU=$_ZOQ{VmI=`SNSFQH zw%>E-ArT(Lkbs<8gO4{43_YnOn;5xo+Rot!fhwC5`p zcA&9zX7?@03Y0@rmP$Dc{(#=jv^wl&KWbcn_W}RByqGJI415 z5O+Q>&{w$!UY2x%#YBasVPYL^#0++q?-=SVG%gaamtCYgEaZ|VR_^HV9H7pBja;hY z_{G@ixiqoc9vvMqBzFCEMcF9O?2A|0mjuO5obrDL{VJR>GQXXo|UY zqTRB+Wt)?36Z=ialDdfN8p9w=2v2LwWyg|aF(B8L2_2qj=D~yx6Tyu7e)CHY8f@h)leyK9ifT^ds$o~0y-r*TD9{4V(E z@TNoJR0_^ZzJX%8GZNXvF=fCxe}a8({&0X zHI{Ys__UE@!fA;ltbi()JSy>(7)Y`OwcQ7Grv=0Q0~W5>Z{t&%jmRnDryiJB<%qcV zpkV*{aat{Wv^31X+7h@3G`eI1J7uUam0)2gQT#eCF4&fAWOx}yb!%l$JQ%dbx_|3% ze_*!)OC+dpBL?0Sn~vBv3Rv~C6SGi`tiu|c2uq8?eCn2+zM;dG9B1k_rCvK`gLz+X z-6?C8o>L{mTQ4mq!_RH8Uu!Qgvn-}idXzFmE~vfoLCN8LC6qIW#OO zJEE9$@c@vbMby1F>Cs^VD>|nQRzU978?NZ_$M^%V*67 zC4zUGv^FBcTd{Ub0Fw%>|^@ty%;M1dzAQh#omFsN7x}7hea0eot8d%q{BY*U5eJy`&&mvN=Ck;JlJA5* zh@Z0!U5}-A`AirPsq}-ltm_V`=)o>raeb2-%yad&78j3IC=LY81M@@~c8>c*-VUZ0 zRPS9r{2Lx`(Xzv~>em;*ljnfT^YQ}4Q;l*vl76ZaaN$?+pTN#dd%CAUxu@7Cp_DHp z5@Ec5or^-QN;TCKYQOM-dGI-Gp|+#Rp`;{$y&3?0UvkuZn;A@c7Wsv!6UCa6TeV&O zvz~<}YG((-3gh0(IfDh+_lQJ?$qnHN%2Dho$&Lf1*NNTd2VKc}gMr=o0loBRn zGf6b^_JIhr)D|RYvx~>Hui)?g1Q5U?lIx#(8fGe2upim;LtmY_U!^vQpeD(#(zikp z%5S3sEM7mQ_7(Zr_iu?^*2L_DRDE)--$)cF$H;DdVAokMgvOentwl}kD5&@I(|u=7 zH#R0~-FW$n1vQcr)$k}yj_%|lpGKxxsjojeg31H=r6FEHr(p4%M^enI{zIv#sY z|A@r@+ct2=i6jrUJBk&j^fs(I=P!pP^wDH;*BFQC-kBMDdY$=!@3Yger1#hU6~&?% zEPyjJitI278i*E|@+RQF`(^^Zh~Gwi!V1U%jaI5gs})mi)BRo#QY-=c!IWtXd8Fg( zBsRoe-9B=W0c1A3Q9`rK=&_}ZXxs=nLRH%MD%>&s>@X#TZi9;Hdm0B~W)V#P$nN~x z`Y+&%Bmk8ET%#47dif&6BdC^PZVU<+TjlWCEPb$3{33O40gSIZpqamF_)5$+fAg9$ z?Yp^PiAtZ=ad^i+Ccz^4-{ZGai1huQXx-onGuidAAl+9S_GpChS%(vKIA-T(lbCfJ zR@%dRbxh22rmqT4JauW|1Uxrw(Y>F1flllkQ3Qao2AT!fK{9TGfi_hNllZ|las2A_ z;SjL@w?$!qT=)+_^n(j;te)?x{|7o=UX>8UG&nCTt)*H(vx=i3{iA4N%jSep0n+s( zNbg4vGQ?LmSYw3^gYPZLG=~m^DyPKd9TcmUvdF*R`z!3*b}`{iQj#05KRvSskG~J| zuk64kwQy#LlpHk0wBAw(4QE?bF{#*Dtdl;Th2X!}2c|7^s;Es0YE$aJ`n>;q+98LODcXwT4f#sp8w64Z7doXlYmk zY;SCMqV3}I#}*V*Qq4LvNLbuJY78f}O^LC3u3)7w?!Tw)llt39GNpvP809>o0aV!D zl}yi$-ktYh4`uZe5;E!lRyeC!ZoCJe0UX1bm}pbZM4?yCRH0H6&O~aXuQD6R97Spd zE3H)GPW_J*;D1>-{>!$KlQ>V6l!%WCmQOcs$Kx1&qqcpo{)t-Hi3pU^D>?KEP%V`t zlr5^)x7CAJVbwT~hVrz_IY=G*^S|LR?hyYniv-R|B2}uE93L+^$W5?MNg$Nx(jNa= z2Ebgqzvq;~{`zB-+Q>e{KD7tENBloxMF4R$LEyH#O)%eIn@p%6keQ;CQ(kWShp6ZX z0|U!9+FS9+)`2_+Jno-&zs~s%*$dtP)Zza*oE|sQ=H)|M~2H zU%ZO=UItsp>Izl=^sE0rtUUYtg8CqkXIZe|ia5X(c^Gg0{q6tdP5Hl_LT`ioYierP zM7!_re7gD4{L!$mBE6n22?+>(2;cXt5s&|$j2fS#Ay-yb`lhGh=jC{LyPxK75y|3`TIKZ51?9de;o4?hS3A@u&w=;+_>AJ6*08PQ7eVId)e zgoMA`^SV=OP(uFyhwl0XiGh*v*X}@wfwgrsFc@?f`VV|*fkaDsn@yB;_H8Nt|KsCn zp#J{-EAaZnr-!dB;FSx9Wb%LV!h{6A3i{UsK&U8`{2wPE^!u()W*tf_qW`sk|L^I& z=KG#=T3VHP0ik!Gt4!MKoTurM?ke|OLH|67(;x7!PrgrdrP>PbD4*)u<|JooZEs`9ALqLfbenE&-*(hubh|kXroq_x$Fu~F7h7OcA3O5|K z(Kj_Fc-;ZHMNb6|#(jP+6leee0_qo+$Lw2M!a5=Y-wPN4T%jrahYx5A4|2OrxDN#NYIfpkKrz`r@4(bxbC5O<$@~+x4ti zWy+R_&;n|Z%~zvLC-?8oAl6D<-AMVfVmFG#3M;~T;V11Uq%9zPc<6N|6SZw9#sQZg zVT1LByw)_){9dub5(pLx?|Gps{})+b6&6>wEgK+6AVBco1b1!Ro!~Bw1$S?(u>ir{ z-5ml1cL@Xy?(Xhx4fJK7d-gu({`YY`uJx@kM$Hu<;mrhO((2r5 z^S5_c`|V}ECn}9P@NY!r%sT`~XL1I|e2(tl{rQdTeYBoyoGGHgicqj2mu-)rnI}(l zcBuWRO?Q0Xpg`)Lo<1Z&Aay2|=Fb;K#`mKBUqU0J1Jebwq7;~v|McT>aY-6knSb-4i&8p^hgB>us@7)AItOY0NpF=Higwdei9+)Wa^Wf9fsY0!s86Ey z-pm*_@+U*SX9&r`+9t|E5(^p@T*tVY=}7Nqtdf7W=$0=RkI0q;@FWIHK}qW`@41P+ zct+F0elt~cYYSIrD-w9*tVWvgpXGxx@*D}KJ|*|q&~uxxjn)wSd0Oi^!dc&(7r*?> z>WW*eU>||G7o0QOql{ckGPp+l#YiPI;^Uv~Wvz?xAr8k*C^lQo2TOi13GPT%pi~i4 zkVu00&Cb!O*Dp90mecO?EPL9iRLLAsg>QD$jb!`fN~-8{WM@eXbJdR|%V-k0*P%m> z=~E*YC!7gheaeUf7JL zOaWyEnAA@>DTAqerhf?kF(5YtF(2gN0RaL#XquIJ{MTg%J;??3G+2_={ze!7lO%^3*p_hFRV6WAW$6(TtRy);$8ze z>7{_#0kQ4k9m5@o%Ueclww1c1GC0v5_<7skDPby&=-vcnl^JV+#JSpfIuOBE z$7uhT(#y{`l4hy+dm4%3x)rZ-7ID-hXVShs!{^-FW|P&dR{ek*OFfLeoDXUZ!{Q_0c`XH-_r3UqFZo-bgk#dbC|d=d_xaLM9%UPaDx$ zSP`3TzdC?U4M|McDr6?&_Ubwn8)Ka}|FSJ0C((SYn{-5XzG*k-`lq ziOK2VXT?P@hc~iQ`}7vjV!X$H~#dn#4)oy!634SLPEw!A_+7GNBn8=X1rrEx^p z!>}gWYf(R5V=A-FM?F?&i-h{Qq6k-Mhbcf_CXIHm*R0pEG;G(@yoZ#A(MmVZ-9FlA z{p^XJetqb1dOHq~M1zY^iXzHNqQTL^b3ePwqYz`2PZ{Jh1W|gIXMYsMt@*hshq`_E z2dX3u>b9WEEu$bc{_8IH%X-;rhLj7%#X?E6V{7lc9+mm751#SSp2f6Cw-!;vFfm~( z=L6z*cY>M1=s%Ade`{=S-6-opf7%~8E;Uv$PZm?gzkLl#eN*QRSbZ}nMEc3N_8aRm z3z)fS8eeNyo;&B**#S#*Eu)1R^AhpP_7RvF`@Wv-4eyCCYISv@^MBfRfv?bnhJ})& zeHPARDGz_JXgNw?MAwwwoOTVQkA#U%D^o?LJ8D%&z@LTw~3D@H?XugK3#r&M`nz9|q zUL(~A_f^!@uOs5!68}e~kQsjP4u&Q8GV$^*nOP@#)xDC7LIYn6-86bqYAC*VwY@-+glE;1?kEPwpKjGEZQu9{sytrk!FriCpp zVGviZlrbo#mDcnpbTX7qOt2=n_v7=gTO@fK_ui8h7qwdY)~Lss_wDkZ(*4#$Dw`v7-eK@isW+JJipg!<}=I0TVD9?jaWxjg@1>iXu z6?PbMmCZ+Hmz9DJ_Z>?`E+cBvNriYil%e&Bp&bWn9C>mOncOenIm9 zcW^nCqLMgPw73C|Z-SfkM_i35V&So@4^>FyX=Qwr0!$;Elr6oKAm8z4rp4rR%QOP+uv#zs5-J+usJIJS_!Mwn5Cum>*(LhY@8 z6X^eLg(A#I-i|N|R1w`T%Xm(YcY;3#Z;W(XkqJ2MLyycV{eL*NdpM($plA08^B~X@ zV_{b)!{4DE<*I%qT&ul4GZ3Y`@5?TVsCh1VDc#_KyWUgW|E!A3i}y>LM=r7NivR%0 zcWv#$6X)G}%DCg+LRT}PLv%6Ia&Cu@t9k_xXZLU74_70X&*s40vNuo^ipOF=4uGLm zt^GKhTpxt*g=@;vTy&~ofbBvI+fMp>_602)0xzCN|2?2@I8PA{aptEUCPMqm{_j&8 zM<=iH)pSes30#~Lf?VHKYg&UDO6dEc`dKjPd6GX*vn@t)^&)&lr&>#w`DR3T&{g#- z5#ry|4P;z(ZZSJy5*HpF(QW4(>*rP_aS_OZ3y*NHj_(3Y(A$Pjg!S1F+`BwZ+9@>d z^g5&CA%UOoTgNi7mPc(Z zw~w}nJDFv(`*bUUXWf6fKd}d)AulFNj)GAa&=P|&BsapA6`GrcrDW=ngK_6*4J(C? zYKUpAOvcceoxN+GyR+}v6GeHRdS!n;8_`7aeH@5vshccLw|k|+nH+T553N@KQM_4C z_RJhkPqrfuR6{Jk%Bg#}`SNT2)Q)=5>QKq=Q}$ipzhUNbh{Pp@}bWDmgfgMaQL~t)inh$P+ z`8-!myweo3J=n#Kq1%qj@*4hSr-snJRZy%U*@w;Q%Cx>z2eUuzPw9$lzzUQrnHdAe);v`hpf+n*F5s@VCq-BUIm|dc1o~jnauYydR1QlP%^oM2wbHmyDR% zDd@(KGjT_>8i`e4<{`_@hfIfNF^wWBqL8})g>HBvAut;JFn@RQw%2N>CzcZE=o=5q z#}aw#Kx6YKUhHG%0d8na@^7s{Gh?)uL$!|&uuCPZA=f669SOwBRL&y;!YeIK2>j@oKaLaLeNHTUC#M5|6)?qQx{^HV2|9(&){ASQ?V%->5MR`v}k57}6>q?^43}2xem5&V1awS@>awq5JurDdQ?_K z?MHLnLL;n=uVC~-kKwQHTzD=a08t~gH^3+D5c*HD5QYin={t*OEFqH{f&3}4#q@8R zn1iF7bv5*mEmLcIQOWS6JY*ccgz4uF5af~pIU+Q-gd1&Q!9HqmxRwz$y&+EE*(40E zyvx@LP^eN723MKDw@PY28h@$(YgrhOr|ZMcYNe=_oR$CmJyj7C`}QSW7lT>NP0#5^ zCW5sH>m(*Qv-F$XufngAO(b5^qdvkD-Pr|)$5H!3^H8dm?8mJB8~d+)c6OzIOBX?R zRis0S)zv)_#7}Nw?(7^B397>JzP&K@I}3v0`+}MdUS&8np#2+jg4GgU)Kox7^jf=; zs1XMC-4=Bv$!g#*n_LCCWSMCh0swqx?%a8<$e$IgW(Vi9wqqru-_Met(aBZ_*3W9v zzoLNZD~z+)QVh)D43=8s0h^Dh`)Jj9kLsE<^x{5!<-Qv)vcH>CZ`+nw^4aCRpI(M+ z*zI<9b>wI$G&Sz7b|Y>P2Q%ckk+$Y`=IT^F`_QRGyo;(8{wb1q^kv%VLXW* zJ@j&Mwh>id!f{G%3zz|SkX6ubC*CSwbGFnCsoeEkR=$PM+K#U;ivqzy^l&1rR8}zG zq*Ka1HiHsD+weqNy)oF2-qt+P6z=Fg{}EA-Rr13PPUM*4EovHm-%J)gCIjzs!k1w>Dckv zEI_+H&Q$eE)E?9~BHx9qpVo{G1nnUOlv}9P)->il=}S-TsWs7&@nt6t%kb6NQllPl zCN&!RiiXMJ&|$UM?Ke(OG|ragO5zZ3opZP1Vdv1lmYL*CmIoD-r8eNCrRpeeCKp)U z4uvFMXkJQ*$Hk0Kt*Gc9GT`n-w{%P}m)58IvxlOMlSZl_dl!>{uP0P%j zjcMwvTHXw;b!cj%@t3_53L%hADSboUY)Y2i5vzpr58g{owW|hC3>8h6wggco;;Z(aB^-2*;$a`2i&hk)lM!;5OmP5;d=(=G zXSoAvIe4v$0(k5RsATUhD+Sh7O&a0ohtMehlq|tWlAMVAf?zlwO{b2G5KK2e`Xwk8 zo)E*6H^S9gwz}WBp9ZF@QUT+qx5RG*PEhYInG{C5gc$}oqQ2ktb<=^44BidZd0xdS zig1*QQoJb=N;W^-_!Y$Lk@Bd-|AS?`SVQvWd2>dTq)AaI>C_*XuHMg%)58$O^a$%&3>3$IOyu$vjvzXkBa7yc zYHYl<+#6oqAxvox9l=crSh@2PU$x1IW%J_jXyBxt(`i##i4OI!2<4lHkcQW=F zQi@9YO4GyDCCX5Y1j|R#k^?6hz4Gp4yIW2tkk0_@(PR}3Q=$N?D(5@rn4rp&u0 zK38$@_1>Aiofgi7?+qw|45BW1t= zw)(7&&@D<9@s&3;otDVkUMA^uzu#`WFg^n{0M-_ukc>7E({$XRMMkXQdYfr=b9&iL6zT|n5F z{2&a%$>+Ai*s|s)S^M)u5}dpETD%qV;ft>U2r}FSdTB6m6rQ*~2B`BFSt+MMOT12$%eI$BDUO)3?ID|wdS zUSLpqOS$B!a1kD{IE1g~I{EE!r=r~G#*y`1b0WK+$-vsD-qP#dpM7x z)PT|~A5!F-Tg+qlFF_>@&Q2xUUIX8BlUJ3Bc6yM-%rw>xz!oXWus))lTt{85>|F#Z zUK5{6#ub98>3R+>@nvB%;EW_*T1sA>GAKw|RlZ%oV$C32WU0=YOChx;a!e5U>+MWJ zc?A-vqjo6#_Rhbq7tt6pIoNCv2iQBJ*H-AHbe>FQ1iKwd>Yp5|%?4*# z?riDrZ1thc5>@$Py*Ny+uo=eriz*N_Z75wC8L`6Gy2i?+j7y5Qb%NuM^zg{ z<3hxDbXnbg5h&$mi$>&>ZPpFzlmC4CZ~$ib8s}D*5`;qWYUn+?i>tgh|V-I({rGXwajMiS~otT$Y(c{`Uq(an|=f~I4GX;b6r67!v`mdP2nPoh(9 z-aHq%QCQtY;It3A#Hm%x5BffCe+#co`ukfIStfgB;Q~VQ%W}wl;VFjW=r%o|I?aNg z|9GbHQhSramEG(C#m^B&w+R|tz}=B*Mc(sl_=Sv?sIsVW?7tZIxC#lM!XAbclLw>G z7@mj0kAlIGe@e4aRYufF*?iq=Jdbftgm+`OqOsWQWR+%qvH93MhJVxYZu9ssXMaBs zx_Hgzps1BNY?kX6AGFn92B+O*hF8_RCjBPZ!{l|bvI}`C)#*iRCFn0OOME#P`fHdd zOC>w=mRBO!S|HjIz&n1IzT5X!1tu}rmfYrVi-3$?9aEP@X`SS(K=f-Tw0-G(gech{ z3AF4^(4q5l^NT>MLVC>Ob#cBF2HI;jJ6o;ooK18~4cvP5EBt=P?8DUL2N!e2An_LR z%n`BHScJ9xIpf*$GukB(HZ__(f=%7d#wm87jo;6Wje9~z#l??U=A*qUzoV3I^aI*# zFfX=h@Qi1JX*FsDkO_`U5ZYoqK*51S)qnN^=an0JWkzyJ#KmG(I8 z-e*1d+^Y!4#)@Wt3VH{9_X7wRsOKe$T&r<93!JPYM!MjMx)rYM^;OcZKi&>i>Bz{h zFT&V*KHqL4UamR+flhEsFgEn@DVHS1=!M&z0Q(EcNF3a*!?jeklcFsm*f|%vLM2M3 z^VvQJ5`wjO(jTPj{t1IPLZS%m|9iY$*erlexm?R-3zQPY!9aTtf(!nx$ zVq`a@ZJ46miZ%Lp%O^>Vs4_>1F!35>Qr1`o!+AIk(<&G^?_Nx*>>WyAfFYyH&tL@vYKd<4_W z@sAUXsL*`7`03VwM=hShIE7wX_PVqUnUmmT~K&0C`dkFii%1{8bhX z=tAZgB6_nb>`u#tn5`2g$_#M##=8@#EBUbg+^g%lzGs!17x!F2e}!+S$D3uXQ7xjm zM_pJ_&>NmFgwx0@s&Q(qJetl~CKW~S_&Po|7Tqcjl%hF9aC&YcYRNXO@;;)e9$=~N z5*EYsuirjj-FLLEyLy%q|84K*q#CtsPQtilxKXI1s68(jRs{Ha+`Pcp6e~Q9gM5j7zZC z2%GjfRXN7VFriMVnQITYRKVV$HIQ%)$Ia*d{64D~Y?M)6WFBXoZ;SW*zRLz;sjyyC0TqgzxP1fgU3L5FsxQS%`#;oky{f!r4$;#Pl zmUpf=B7pFZrNq|IoPTc# z{?jVSFdk@O)A{!B?PQ|-`g;~c-R~r1w-@_xrb~Mc|6tNs8;_9_ek?WTrdjfXXw-6J zp>WzmtlD=xRTJ^T|GLF5_lE3v4kVnd`ic4aZxY#1S4dhrUt9oBP*v;jn3m4(q|i&d zmm$0m6DBl+Y9+%cLm6&^{zB>xu^UBygfj*&$Ft?cVsG+pjBEBThfo2(bntPZ@};}E zo}fEXMPm-Ctn6iIkpu#vIoghg^9kL2uCUL*VQ{#@J$$e^?mJM9qwDx6x zHwyFVAey|xJr&vKx(_IR1tC`3Iab&=Hz!;Kla1_NKhjjEg{FC2s#u1mLLIF5?z7fq z!#+0p2nNst(__D`X5WmyE`NRdRI+;yhLVW!vWkxh?%O^z@b>CuzQL3dDVrSeXiwb%54m^m^ehN#$qW{=iVOY z$+4;bcA`ZDryxsyp}Y1TV3+sRjg0yNKWl+Cq;-}_vJaViui41!FxrrEz;L?&NT%W&1xS``3CeGvyKWU(}i&i)Pet9Wy%dm(x1nZalnj5o4fk z!k&_Y35`^>`(X@T*&vf~EI}Xf^7TS4*Q2%Mbby`wtuDHaDr7d~qS{$*h|j z1L|wOz@gTu{I&b4w+Zf;$h;4r(MI9Two=Woqya>8;5y-`Idrf%R0R0xUm(HpLXAN+~B_7|PD+#~OGdZ50Og4Po?4J6n1Eu`epU7Q)&->RUy4gZAZ4O9sv4= zd7g8HXkc%jN@Z9m&)=1zW9`QP}5qE5faVwtZl@&ax^-?R8IZ=3(YxZTg^7<-3 z*L>5zddTkNUad!(v8Xp<1ivN7_OdLgl}JUE#rf!4&*ZT%EqG^=JPnwCnb?~}(^iSO z{5!;}-}sGFP`E7}%lu=m16NK-?o4GCf$T>=5q`Xn1?ie6Uo3uyDJBz9<%HIg7@g1! z1XRu;?3i}B4u6wtgkx`n3u~tm>O+$d6coP>ZlXrPGO?bBI4zHGXgv)$@(||hFr%BSpQZUM}vJ-f_&c>f9#jl^j*XH5;LFoab`zK#aS` z&L|(KjZL^7)iE62kZ(merph|NFg%7uadoMa&U=BvZiHU1xQ#pEO2=R(>)9T3`hGz8 zID`2$Eg1WQ5y*!Wh2t*S)W~QE<%|k+8ipN0d#W*$_=#A?0A29l&dwctMwq$Oc;cn3 zaQT~M=w*wJ4|IUE+IpUp*q}~P(iVcNTK!~RsMVeRqzFknd+&1BF!Q&JZbApBcS}`b z<~I>h%I)E>t1O}x z60Be1bhEF!HSsl|@O+jSP^)jozNhVs;Q%c8)+iy-$A6ixTJb4T6cJp z^&ONg=}CJdm=li&b9$@o;(AF|o#GG7BX{-KDeHf$0M21nz7B6L*F*LI^U8IRz^6tU z7deJ;3A<+iAavgEhOs#)A}q#V*~|c)ehzM}^bciAdWf$HDXV#{Bk=?VPO5 z2a7=7*TrH#(o0;yp@ihq?~Iw5=st2U8F;&P;mI?eJKlWf_}$k6LG9_LY`<@|E{UCu z2sd_n7io(=Mme0KpN%UEw|eb(BIP@mF@`>({!&XOt?pX9G&LIvaPqsDqU-1SLHTIyawY=|4hNQ||#( ztLqTL+n;e+wu~dCnoW82_nJ2k4?-KAP67?gMv`-V8B?VV7Q$mr#oppjMYlY9##|gT z_t{~I6%Cb+>4}OuLLPzk_c|^f4--@a2X^5*52^{|e*)3QoeL0niAyzg@s^xkt==5d zZ3PTZ6R2;f?9!KVWQ*!@M~Zy&$_Qina$=(QU;D=-ceJU{)6#&dF8fX(A-b0H&Lx>0 z2{DnkTftw*-Y(F;Nh-uP2FNaM;Q^~FU%|Yk3CHZM+hyC~#_r19+Z=#acPw944jP*_ zAi()f2zt{<#p;#Pyx zSO8Vs)b7Ghw?uWxB=8464Eod5IQg6AT=D|)8*$a!Mcsk!TjH4esm~Tn>-)kc+~U9R z^p_f9`Cj!xr%IX%j!yLt&N)a!RxROw^j1izBsilbCiP{w1@bPwV&!;bvXk-^qFM>E zPDl9;Whb#0D=D3Z)L`wIed4DjnI>qeQvcHvdJ~rbj?(!W<0?jk^Sjz|fXD4==18he zD5>61dPTNARAd1KDCk3*G@cRSoO~q1F=s&LXNOdEMQlg9o->?AsP<|ImfTN`p!jf* zS%A$y(U|S`7zerNEp0gO8S+{EY;CZR={$2JCWp2P84Sbrf{s5H7I#ZqSjyic5Dn{O z1&=Qf#;9GmEXkevPz?HX5OT8gpb0aRPn4cp@>SrtZj;TOO8#s}rsCi{Ig&!GYJVbo zcX*aLb8qN@8qy&kxeac}J$u7NMave0qb*cvj#VOZ{QO4(-mqC-2fxX4;?<3m4rAh0 zY0>c-ERO);V;vLt6>~wY(fXy5jDu6H?_FJ95>`k%3Y6`byDqfiTAqhfwN(>3u}o0b zCr#wXd~)C8!P36ENIzY7yI#dGYvyJ^PrCKd#7(8OC4O=1FE;%Eb1{qRurI1hxws_!OH@rui7$a9# zI70X|$Hj1y^{|cKvK0HBrhDU9O-hP=1l>K;!*PNcw8B%ZL9d zoVP0SCr#?s!kYkHyB_|I9R{3p%UiLzR5o5Z3wklj zRcwyC`yG`}2e!H9zb4F2 z!JWUm@_h+KtK34|$_ZCZW?U>Yx;MHx_axC0@U+Cv*0Su$#Wn4l6>Kq}Ps6bqLT^Z+ zxbCJZ3@Y_*AwS6FG940w%RPmt^SxXUuL`s$Kh>9t;s0bD{JdCpd_e^o4By)?&^0A+{b2;8!ahF_j?ae(tPA94xq#J1^`zopfk~Pj zme)fWah?a>J}$qe)@HjG$Vw%*y9+z3b(#j&<`rxz#OSFH`s@z7|MuJisYZ`}3g~@!!XB4}Eo8 zw$Rs>i%lL?&*>^QxKhmhtBMKpPp8dKPZV)ny!ZZ_r}S!#Bc5fF;~zi|$owMWy+h}d zFwV#Gflo@>qTB-QLy%D{zE)oZ*HX3gAU4GE`7m)hr4@3&N?lnC9lCahi4lzG#I^p& zoBI3j0YLX%E8IpkA=kJf_lpTo>Tulv|jn&ceZ}t zgI*xDxRv9lRsWr$CN>*aNnEn@Hhh=SaR=4cl&%paAeR$hM$&9S@Ko)kQc?W20)=eMm -7->U zD`&9;KBQ_QwTaQLtJZuU3t5Az-1qO&DvjCBK=V_d(u5d&FC5J!e&@psiojizUw+W}ciiOsmjB#HG?CYbb_j5B=_?o|}dSD@4-duC^_B>w&P`VorIh1#SQI;DQ zH2|yOkHFV^jMnj4OW_g32%__OV4^Y{c3!6mG~YsR=;o!~$NawYh5+r}N6g)REuW^^ z`(Q6RNDoBLuYrRH~zLB+Q&rWQ|QH4}UdVlr5e-X@%QVPY#aUzz78Hx=`ltN_0>QW$_r9MRBe=ZE3DRvT)KIPYP2&@2=r>q>yJJy!&^rzHyxDGIBqB@ z(nX)dL9M5iI3v%`Pz`gPr*Reb#B9>zXI?F zCfAe8WzP?W$2#>gxxTOO=>nDP4hT1Ut#cP#OC9>fKrbefu5|W_+{U^tN#S6HoQ)4p zvLl7YA#r~+DemM^l+!RyRlP<;-)gZG8KLP)-0KBs>=E@Al&BjR=pj6FdIH!pf zhYpxNoW#`o#j~pZa!>>8dM@zdpsiGHkpDi3PB&XeGuPSdNUaM4TEO9MAho9@`|C87 z^li*3eXwK;`QsavE2k%sTh zfKb1)voI%|u>7-N2PgTV7YY#<-j15OCmAJ~k3ojcWbC~bRU&2l%6-BgE$*%o1=$Vg z5lsBV)Oct?+qbZmWLj#-bd(BC4fEc4XZw*?b>_H)RvC#A>198qlgm!M?u*D}3)< zupQHoWUw90?0N4`!>u%V^|hnSb+uOA{~Ye|+O!yi&!I}fY^>br?MbiS^ud)FLWn2P ztVpt^mlm%?RIOcvR@7F~G__HcRch(4YMN7qikbt;Z09KySbH&P9l!S6slRxpR zmSOglS)TUK^n3EOpt|OBMcJr0;o2TIob_J&#i`+Ye~M4=MOak(-$$;0A;-c`53R82 zT<%lkTIGk|WD$ySEH`F|Z$=3+SvOA$~IL5_$vg%0J#E zM)#UXk6^FKUKHK3^KLr8Av$ix){afYBLAL75z@Bu^&ja`x-e**e5{@gp9*?ke=mGJ#Q8hMYIcHIJ$6-8p_#RJasNX|l68d-i6L9S(Oy#?`E8cvX&8DC zB_{E}{%a{3P0T>k!AXnF%?!o&fr91jaXUt;`+YIQFK-#kV}Sx|?Rne=20Oc>Mge?K z0YSk-ROsI3rs%o%^A?7z$=nh@_;%x?!V!OBzamgcY0*EK5r&+qYSU6ewn`O$m$^Py z6OLxm4BY@_@?aKOX-UxOlSV^rVb3i@D)^zp^Xj|(TB}m^bI$?iJ|D;$n9eBInD;ef zm7+JNAYEhfZ-Py!R!5Is$9ynf>u9mAlb$CVM`AnVdw>|M_u%%UiL-EL5ZK*TrUB?$ z(5HJhSya)~^7%WWC&a~G3^y8UzalF|`D(kx8|2B~(yUnWAwnrGMX_oi)wkbgb`8;c ztuuFdo_RVA%bS$PCGAYmN5P+wwVW5kzi`ha*y0n#oXIOW@F=w5yA9KEi5B_sSZ;V& zvGsdD8bByV?6)y(n}=|HWp107xpGR*)FmmMwutU)&G+IDp0b4fcX^n4p@m=Y%z41; z42{#;783+$)dHhQ?1XsZB9ceet43^n1RyuruqJO(kCz*?EMdPWk}lIpP7DiY*~~MS zC1nO)y@hKWeSWXdxk_z`=@NzFODvknpO12fIWHv-Q2(YA(9R>2^+V2d`^mqw{QsgN z{u|=`5Wz(9)__sS@bHVXL3C#2q@0egv>JX3JoxD|5$TO&!bVMvO zPVuwlPeI|Q+|71%m>9Il*t3pO=pW9T?vPb;V9HlxpV!i)RF;{Wn+x)z)!DS-s?G$+}np@D^o7_r9#bmC@aNU9fd9lY$ zo}V%qaHm&gSfHaRJMb4UgD783ust-1IOF$>-1@jAteX4eeW`K$1CoGnMK3WLu1K-0 zMcLL)6b@as2X8{ZC>7m-M8_2;9y+^XD}DH^IZQEIY~U;2V-Nh|J_C$4pqdRQ<}9J> z4{l~G9pw+TZua9J4v@}TccRliLa|@rMAMf&#*)I3kWoM6e}Ct9b$wm(z55&TH13KF z6^T`}KS<{P?atofu!b5U*Ina}<9wFaIrz#$$(eePTIsF4=u3YU$b>-^wbpDSnJYZ-|a#&43b(CG-kcnk6FzGJ!SXqM$mSX-tkUtX*X=cBODR}=7T$<5uaRd=D?4I^1u z9t#oB%@O0)@kAU;E0YTPgJf*`*emlKFkJen(f$-+I+$bJwP$|oB(gk)HZHWo{h z#r04Geu4N-ZhR9IT7iu49#PFw+k=riIJm;GDr-w@x7MOS-mC?AzhCa0SY#>OF@xr& zXbOKyZFP*o3VO?Z-uG2Qf>nQkJvcqNj>5|gY3!Sm_;G85y9xJTk{_OOgzO`&;c0gL zNvxaD^oF=ydzMn5VQsg!$vEuMN*<|#zAN+MF_&c3ri_4W>i|46pW`dX^%RrU*4$sd z%}xTa^9lM9kcJyNmq=~TC#ke@KN(x4PzG5rJS5;B_O?GS$~!GreLc!L@N2V|;M1fK zdpW!s^W4#95;l|7%Fd*=FK!Fl8-dh9y6X!3RH0Rh0N+D4aK`(6ZSM*(U8>uSpBOW3 zfhd(iU}1J)9dAg{ojdS16S+X7SODDN+HZ{+NXnL}K`soc-mjc&uO}558CppROa0$$ zJxRd^E;wTFq3g=1_>X@F_a`@afhOtIl<0qd6l~e=q4&u41Fxu9tzoUSAacsv*&`9aMo>9Twp8tzz^X=L@rFA=PSchip?AyzeN z?&X`YA?1Kmalf;!;m3GP53&fMI4K+%JT9f&y&?{kBy+~SET1tkrIB7E@(!c<;W#?+ zGMB%i^#S6@!aKIIJEAZ@Fvnx2`%sAu5WhiCLkdeLE{SPy!EI>O{|m&nYUJ%T=RO7+C$gR zPe=9yle-qf=?w_3_|1{g`t4uuNmokbq`A~c2c8m{ym*heo#0l@ohV)(=am=Q7yy$+ zIBkllRl(wzbK8sTaqt1P*sI5)vH!L-HJ1z3^HeJm&~4M+1PEto?6rcHm!OoI{MMG$GGsqqT zR^Cvq8JitY>qR64!i3u*B`Gw1KAi^FW;vZs1t>S+pwFTc)X~8$T2cs}SfQ%0+%8@P zWi!M)b&Ycl++X`#m+9WxX4;JXum%xHLF}z7gry7%SN4_(5iRx3r4s+<@$ug2OCTy5 z7J1#DPwo~hTYj6TPUNug?7XkcWvGV5qDAVyhZ#QxxR^>iJC=&9u4+z4&9~yDL9g)| z-~F#S_<#9%2F!`+Y?QL*I6k6Iq0Wn2us@a@G~^8|YylnL`<@BCdwIH(`WiAKlQTww zOC3D%e;9k`;L5&kZ8Wx%9ox3;q+@lG4m#?NZQHhOtK)Rcj&0jE?|#pB&b{CJJN;JO zs`byRTDA7tbIdvPjPVRvLm@Rdp*8+a^wGtS0NI!h0~kb%ykn(uV=~8+4UnJk#vw&% z(^M;D;;!pGC#}EgY}d)&<{XHF+t|gvd$1i*&7jIpkm~ z>zHs!9LeIp-)hhc;)a&%LUosYq0z*{J36~|+JNHc_XE&r7#)_q!fRc0ZrP*SRk1U5(G7c)i?S=f!egCa!T>@hr%0-vW2ush!Tmdfcv@#f&v zZ>xbdM;Dj0`sunEgOto9NScdq-+g@VaP>M;LOo+locTI0Y#gDeOGK3ZCCUQZoaijA z3*=L~6S8&nJ}5zz2;-0G^~Z|plVE@Jrq570LCMxnQ-~2=q;WoHiOII2*2}>uD!`lG zTK~i+!_cM_V_!rKfeKwLqYDkY={zi=c~U??2V|sBt8u&=5sY_u65GL+p)gc~!$pia zB&+wsi&}8!U|1vk)ewndqWL_YS9+3|JK?H^-oCj{=C>{+i^a?C^IYQ8RL#DXLCj!x zh)-V-h@gaT=hol`2dAMLJW!L z;8T^Q(@f1Mkj{EIT5X3m#~g#{QxrTK977YqK@A2GvU!4nAQ3HsR%N4GT3+p0nZX~t z=8qh}$2mebU*jdAOm7$VuUJle1UX#5?cX zJG92-=V`?LeBIhKw9%F!*T(oA9Xftrh`(XIC_rg*}8~vWP}#?nLm(VHy(R`t7&(2R>o8AdE6v zB!g8PPdhk%y|I5*(YHiwtP&e7)V=ZZf>Z$y&<{7W7{LJDsV)(mr3~DEX@ANnU#Q7R zv&9uw2;)m%LaM@CFWbk(q3_DtTzTHKzE?5jK2z%pH^AJQgR+H(8%SeQr9Og4HD zL1hMpMj;3|Gd&}6hwi&xq5*H)RqIsjVD6H|we^hwSydck@~%SbkT@oCb%K<;f+#m{ z?8cMJ2wivknr{|SVDuke8}}7GzZ~dlEsu=@_pJwdyU1j?mpm7K_v7|qc})>#2L0OF zQlfs^sGJ0A%Ola7Ec6S%1jTRqQ-#;pI|7&NX~P~0Rnegy@YE8^j5zj0Xkhm9WtyX^ z0~zA=b?ZI?%k}&?NLrl6_suP$_P&mX_s+rJ zVHa-rhZ)+b;H^YoazN0(`SSm|$q^x*hh%rUlGWk%0_7s=4Co9)jf{*UYcElwMI|JR zc65`;RT+RAPGYn>)c9?*6C!{pw1z6Aa(pe8)wytRmzE)k+8zjNmGR^I^*q#Xz z#rgTfZGpN)5H}+GnZsNJ#u363D8FBwIfm_XqdK<~6afjlS%*xcMJgojdE3+q`fitV*t*P$*VD_wMS?FWW+U<@ zq$`m?2X(X2jO2qs&147i(>h$sFNjP>{&o?d}?NPM2cn zlLI*cUJwp-$EJl2DmeL_kK=`EJuhYFtT$l2Ru&~UqJ<(|8pB3Nr@)ZC|Z47s}}{Gos&-TcqpCG7#Bq9-itVH8~=1@Tp!`nym|l!+hIaC!~O z&C#Aw_#wzMEdWO83Qwo?8s)?Nfm(%}iPhd!!~QN??I@l7Ift0|(XWAd9ODL)%UU2f zW)Kb%?m&W4`_=|yB^}dLn&{s{Q(qKmItX#$*)-T4TQNq`p*%yVKMufbJ%vqzh;IY} zbrSwT(a;-DzbpdPAARZ&PZrQtWKWh}CX20iFJfA%f;H$)gc5v)e+IlGC@7!|Ke=V- z{@)n*e;pzrYEaZE@2RjPgMW(qe~^4(m_h{LGb+Ia4W$40f&cGqR{>}!py$w8OGGxB z5Ib*&sQiE4;YSqE{YaIooR|wL zAdsy5<8>M~Rx&7bN(tiSwRdq7*^gvf=cos6nE*dlpn^l=Sgz`;FwC`QkVef$w_X`E zn**KZ>Pq+EATH*CF*E1f=k)aG(?1va|2RQ@reK>XzQxL<-LvT5Ngz(|j^j2~)Mzo4{h<0Ymc9)v1gsO$S5R)h(GzRCViZr+!Pc$)-2mhefFa0T27ZW@C!6se0 zQUdq*P~Bb5kzsq_2IKF47sp}(`leQcWIkUSyaS1^Z)Ht-dI|mAhn4-`GvEKcx-1Z# zWS~(f9|utC6B#i=lc#-lzHBee^sw+`;CA@~V+Y80<;uZ38_VmdU8Xe7SzaIl@A#$b zxinhM6;YzC_Qa}C9!r`yREpjCw)`G<*dOL>+rKX|0@6y_Jruf1J=wT^ahRpVm5jp z#fK@4BrzJ&|FJky%Mq>k7HU&^v;HXNyXEppd@zY%m_T zu7WQdHg@Uql7QCiqA;{))@Aq#0N_3lKyo$rP-l$8N$L@24s^q~yJMi2Sc{5B{lUC7@~=6QzHNpIuEqUDuNX!v`O~4G1>@vHnF=2dR3WDfd97wV40<@F^ri#b= z7$tNgr{KOu15^i(i(tEzo#U!H%Z24rNRHGvs3H*s&xImRDipeUzX{_Fy`K<+Y4zSx zcl}?bKL3+n^F?`Lfhn{DZs%<$#8H!pfqDk2D^6+@KR*$e!;`S6EJ@(xP<%kWHdSiZ z$xv!O1|g)fT7Ww&`Mb>aEJaQ09C|r>PfYEPUPqm^0fEuWnTKe#c2jVIoxOUzhOOa8p+mQkol>0S8Wm*4_+199>HWr&Xi`UhI~Bj>>v2~|9oB{B$%11@GSN9-q~g< ziwO%q+kEio7U)x1E{C;c7TVXeYiDX!jZ^ zG!dNKs3P%?!$(?$s6JB^w97s?Ap>bKCrEq!w|DO`nQ{aNereQP?>vj)uQ`D%id8B# z8WYSh0li;%fM|B+avV$6SG0vVg|5O-;qn?V9>2ftmj+Q{L^x9&?(SR4SpHJfN&Sz5WCJ!i~ZoYGodq$h^l8qiL=C%NjLax?grVeAY- z|3T%LB@3scPe}v(WmkGZBR+!W6&0}AbIHPgHbqu8JRjU(%4PB+Ry?fxvr2hI;&MTe z@u~SY!eECnGXG2!V!TWb`qow5|Le|**)x$&>+1yMEc@wNgzxJM>>1}9q6ev2MzY7+=mT~pZ<7L@QU0N{7pna z1z!wX$O0-4{^?Od3J;x$^e4JH1e19!m_u0I z0hM`2?~t@ti5uLtrliH6FH{q0pZ;-Y z?D)%_^p%1nxn;cpwx4TAVl7JXazwa05%?NouM8$y#g**`WPx+F%Jgl5jbD-O{Wv z-R!MGv=adKS656hMmQa9YHr)cR!N&%e?T|!NC*Wy>y)Iu=#NJp)!F_xxzv7xPzz*{ zJbK?wqd}v&VfA5yBy}-VAsbc|dz+b36vDHhjG@n-{DBI)CrR!TC;J0KglOt*{eT8@ zK~;yR{nZR;&AIe|B))Y6%^p;vAtb3!UPyy%>5?x5ejz3UyP9cX0n z1X}zyAVDUCK=)nPtStvAvQ99Po88>}1T2gD7YCSXAzxo+kq_NHr?2PzVZ)ZZO zJXVU4n6P#7uh_Wcch`=NuP(d)$xhbuN}-=>KGmRcWGj9SI9{UVX!(a}@^*>=tc z(X0|i9jP`0{o9*bt{D3jNdC2mu-Q*h@oM)oR*Q%=&Ov31U@%+HXK}f#R&0}*#=zC3 zY3zksYI-!3Q?mATk)lgzZ zEaC|t1bd39l6X*nd{8}_z%sHgcO?|A!ck;TCc=PCCq^cK#*VX zs}zfxYITiF5!$lMqC^GexH4liLwkFC`IOn#{Sy*nmQX;%;p5Iz8hnfMCJ|2fTcC3X zR8O6<6UMN4pQw4S7ljkCLRH2e(xp$^vou`!mo|gDS`q4`@$f9?6>rlk(@jS?Z7+15 z(^h@k!|>WD8q8e4)W;9dsEPzauF4qHOpVF$Ivvs=8qM6hhwf!?ZI+BIuM(TiK(i*p zQnf~0^G`LL#tL=eI{vhR$qd_^eXPGa)uLOkz+e^FPO}a2=wo7VacN1BbZdgbt&?FZZpY@fn=6Zn-2K7xlpi>Lfikz0G);IY_&%tY2=H8v!`6tZiho*m# zh0D(TC>IO@Qb0ID9HWusk70eR$;7Yab4tIpi5K-mH!kF5(;*csuj|^j5%ww&iO=4*w5lFQWmbMY$ zwJc%PrDd?)J;&&EFXeDfx;oQk<`dFb-+$O75^4&$p%qW+Mo}&EY_0^dV6SkpfJV`! zLwW4 z1w~dgSh#EDKFGN^U3Vb8L3F6+hCsD3zKC!)s$|rJi`{2a$95SAMVj;!e1!}_2E#2wIXi{d z>jDX^P*Y04sXVVVutxC{2DXx$7qJ_n%57qEIzQAlH~GRIR)()RMkA`y^j{1{G+P8j?pjaJr$s)lL+b9EhDQo;o$*2kl-L^NoODV;)s&T3q1 zA~Jwd2q)!hLo7j*$X)A2n zAI;(PXu0bGl%Hwww>hmy))6`@Zzd~5nwGzP^l>N^a{{Uke=9Y1zz;?eMy3a`lB4*4 zxoT}hW;0RfBl>LV>$H?bz~RIuFvkk%2r6qKZj}3p{x?f|1&;RA1cnQb&BXK(k@gF- zhs=uu0hbj-2r%{}g76CC3Q=vH-t9kRv*ab^hu`QS&KrzhvH!eU(QMdY(J`E}KH zo(qO#+fC^(rJOo_8&Iv=5tTxDoQnYO*VT)#U(O}mY<#cODb2W)`m65?f!EncRm;w4 zNUR)Ss_m!lp2k}5bO5j<^(S$CqG>cJW585l8>bv{ToU7@o}gcsx^-XKsbcPqGs4{* z%>Aucw!PC-Q4P9zG6M|R=j+nVkUGAz4}zBm8D@*<^-C6qd9$NgQfj2jGMH4cbB& zlct!unmb}2zjUUbCoM^DbL^-o57)K%jA*_0e|yv*mg{$?PTSg5hshnl)+z6-44S2~ zzH7XBa1m&=noiEAEhc}J7S=@>En7dYO|OouNP&0z7-k^(m9Guju_Yw-d^GmuIBgj0$x>BjtHXyvHPP?27&#>+g_9hA_4C8-*VaVALV$Nf50#KL|q7&c(eWdpl)$6U{}# zEt}+44xFgOgjF2%b}SYuSqMg6{@N^~A$_7a&*4us|2ZMo$QPaazW7}=@I-dy?M%V* z0Mx+IY#hd1Ybq7tmOYQ9VuFn=YC+@ScOL_ifv zbu}5>bRdk1-Qz7B_&MQZ;HaeFMZ>>~9`6A^Di+y@a z(07RIwy;^EXX?LOk2g(-2DhHhk78w2%_+8(t{=qnbn1b;h`Qn$0{%y{>CwU8vo?i0 z@Bn0FVqOzkn}L!wfwNd_07g46;X8W9@~&XbYGQ+1o`CqJUA$g$i(WYXZ;7Q1*MQ{E=st>&)X1V$D&y~4jN~nOI`mld|8*k?28oNqTAP2q zvj4QjGC$^zQ{cZgF_4$v?)mO$wkUe?eGb=zObV{*{J{Z%R{`jb7GE(yEY;@N#%jD7M!zTH|+zxV3^7Xcfx6 zqqaoMTQ9X%Nq8n(V{b|{J)xwN{jZ9#eB20`*JFnEkp{#05To(Z@0WIQy|NGy_TGZo zcPCX`khR+K1Rudo3i!fBRpZneMb3At$+vGARBj#~Qtuv*{>~FF#V2lKrFLC*j=U>? zqU~L0c|z%=v&!K-L+F?HO*{;I)cIdmTq&kI-Oh9Qldim;LkXmYI%+g^BWfRxjx_Bz z|EF!^=*$5#p=C#8Wz8|LoE*lqP^%0^BjhZOTW>YWd69ubzS2h z9m*P<4~en8?uA@yc~4BZuHX?}z~+fal9)f)kL8nMdAn0}Qlm{HyKx<8AJs{-V?C&H z4JRa%1jc&{wzYHPRs-kR@9^j)W8LXuaE^p5!H;G!{F(aS??0A8j*iL&ngniEuq6)X z!r{igKCku9g(X~{))P!=Vo1D0-76JYV%q*N+e3o73V022;P%)iyji&P>tO`HcF4QS z%*>wjFu*I8&kA#r3g9tG!CMP<7T&&l0cJxmjhTK&X5K>^>VIlUPkveGB&P7mgfT?q zuQ}VgRv4m@_!f!JgJxB6;BN;O_PmS#mLx<=>aZ@K#);hCdt$WL!h*eAhhJ$JF3>-V z7sZOQh*UEK3{-7*4)78yH?XbvxO*2Y_>!$UgMjk|@+BxNbe;}v@VT9EzMrg>&X$9_ z-&MvK^00N&vodM%BM!X)CA2;LeiG~2Wk|sIzmSyw&dFWI|Epw1O_00oOtDg@Mt;c} z0^PyGibZZ?gM>JiPH|;HB*(jBy2?OVA+<)^N;!;AhX%-idCz@M9T~$0oZ`BqEX(#v^3N{dNpz+MN;MV=%LKtm(8xgM07LccdJk#2W#a z%O>h?zF09@*CLamzeZH5ALex1!5GtS#rj6eDca7-pv>7$0soyPJ_VPy2oA0Ymy-uB zPy9N6*c$z-e1aHKo!x@t*xNf3^+-csDNEmUQz4v%$z%`g}_jCihA0@X= zVNI3D=$T=9dh(P`PsQ|dWHlw*8t0}=*{6;&dPjgbFwxf#a_U}x6by_=7{C+vd7biS z16aBiffGN15z(iJum|S3u*H2G`1$;^d}=>ZLnHLpBW_E2$rww#ec@TuDR*+>PBSnf z{XhL*Qt!K}Y)#jR$RrWO&OIN_m}dbUEth#~Hc_DTl~x7@Xu4k#)LcCc$tdMB!mJJ> zr++RfH8A0Yj?*dhwW;tSI9i8Vst;ZkwP0wgrNjL8et?-2_&OX<#${itSz1%Fx{1Lz zi^ZAfnI}`ALQGE%;4n(? z`Ta$d*uLdI=w(D$PYKZT(`ev~q6dhq;K(0X-|+V8enM+Ha&bfBDpY3&A3G%P{5CPW zBjQ35gPIeeR23@)uF!(+;C2qM)K_RX+-5Cw1-kuktiOTBXG^kL3tP|}Bbc2L8c$Hr zc$PZA)@>70=ayYEjaOcYH-!NqAAnc1DKM7Cg5!{K;VZVBqof&=o6^axg@6-U49M?Q zGFtiaM4ho81+&kMdG5%fpHv>T)ZCYNujv2a$Vzl8-!+7ZMGddKqWt7fB z@z$D~bfco` zXNpF1O}Bf8``zXY+1aCLF&?9BwwZf`?5g8Nvb#^33}JrS0q< z3()=xghJe{RgYMDL$i`Y_bmWITlZfn%-awoOUijvbM-~J8LQHKX=V?Na>~Cu+6CNX zN?kYvc8Gg02>tJJ#Q(}rnZOkfInSfhPgWI-83d zl}T;XQ!NGX!F-MUZS+kX@GS2UJ z@wlHZ;m!mPqPajR;}|&%skARtIHr2**vymZLLD*BXd~J|*h#p%+ypThbfMdGUUT&} zJ9VOFcRGZzTpm9s4q-5-iVa2(s(aml;RX_Z(6qI#5`ThQ{IaP(pp92zL{V_7K@cJhjDI5XX=e=oxLNtF8HZPfii-iH& zDxzpl6-<_qt`gTYMFD#QM;Vqt!=Ocslpiy&GC|hHtHw6ZjCBY{K|Km;{_f4(AQuyP zd@fW#y0@rDuXh;<5i|O!KXG@g+3|>3InvmTveFHXj*YG@tk>}UqV-0=q(FUR&!yoviaRQrb0q}0I2N=?)*SZn; zTZXnfM(V%*tjOb$O#1S8L4|%jb1`7(EeQ}rDrUclJ4BLD zYz2JAR^^l1>jRExUWO=Bw;3e@v!>_TL}XaK9&^R%BvK0iM@FVG)36wxV@wH)332t0 ziW}S1{X?sjlNu5jgsY&gF60Z{l@Jb#=AHh=MV+fgQeI^I;pm=|VY7F=NsUK0wVyVB z%>{0f`4K4!&$bQwgKs}xApp@f#L*@}$zO!;+pI0%;fKS+`4M*h`J<2AenQqc%lCkt z0O%A&gVXXT?j3Jrku!XD=^P}U@I}!_&MM_DL&4O+7U*5yDe68HjPk(FIvo1(NI`}l zgFM**4%c@x29~cTh)BBLFibVJfg#%YO3zxRt7QOlb?WfQrnBJJLeUrHzMG)`U^KE& zpROU;8;K?^HvFvUxVGocLtFv*foVY$G7F}D8^AQ9!t)yQ7h%WeiGvHFDP4aV93VLw z*w*el5~0n;R#gm9@BK`kYuyfYybxLB(tZkW2Gn=Oi><+`RadM+X!upf;p{%pZ}mR{ zs~RcfT1hSBSiN>YRLZ}K?DZ} z#~y~f?~p6H`)7j}C~%X{0AXMHT)Tm>D`to%*WGaoWLMYU{d_sXfi$p##S6okY*=s@ zVL?x8G*%nIt^{kosbvFay z$S9@D#TK^@?54_hc<4#pkgiUaF*!t3LQMZyCCEEOMCA@QMIbT=k_2xOT<6Ds=kqSn z2c|M*!SDG5_pZ;c)wLU)yeB zh=^8sZ!|9a( zVlmaFrIk7ldbav~d*R|pN~K`_fq>P=tG#a^82BERK+yYOoaDX?q&>*4} z-M&8Pxl~JhJZnIG%-?pr9~0G;6$E>D_qeVDv&9O(rOTsO=?ccr49B5b190V@Ey{z5 zw19n&YT=&eDG-MS7lM#!Yewqy?|AMH`vD(K45imEGu~qvT`*Fjx7}{tAdcnA%uZ|4 zdhAMwA&f6O2h;MYUA8O_?)xF5!^*Z+-$2y|Ka*^=5VNy3eE+DL?BqmTDwTbQZ?M?} z-4EZBaK~eF&OH_*xWl$Ep6t1#bKXko1##l#F$$FxN*p(U7aJ@*`0XtMoJ!+(Y)G%& zOyK=<#qcgw`ACzDNKU2CZ;&Lj$J_hr)-8+;4@-YKC&Tum5zQP)h+$5NE;mZ_G#rUE z*K4|w1RDNd$L-(YxNe`Q?q<}f#F{#{6LPtncDQEI#3^hI{gh?wNc(rlrj0`ulLNK4 z&TVQi(K%*LCg1M`gGNWWO~YnC@801}X_?M4c?f0?==-WHh5D?3DOR(ul)$C<^KBge zrbI=8xc?EcnsCFHVsz2)GRS0DxtVIP{%NK>v(JYzF?p_YsmqbJcw7%T<79maa+xRz z&N!%*rKZo6g6rt`i(D2vWS0$pFms3D@8}_eIpAcFFDeyRJ5T0v>4JDJtnB`)p{S;6 ztPUq(nsEc*7%la7q{U;YAp(!wShyC`K!^9xKi25q|L=d7=h@cDR!^14M}2(da_T0a zPiMyYmGTzF1qYzEQH?YvF(VIY{6-3b_4pP&;A1@)6au$Lvk1|Vm3bw*@5!u)I5eR_ zz>|B^8B~T&wYw6eUdO5z38IPXWq%|*W`O6s5wLi+(9xRn6rUv0**VR+${C~_R<{GC zJyAsb`d2?URCX%^#&o3lBDJ4%X6>s7>I4|BJy%aEEPbvMHMA_5!f!&%KoBG`cx4p1 zF)MuwBg9$FUWqjWPoW$y>x+T@KeM%#A$;aeL?iig%)pk1lo3N2w5L)}TT9pwoa$vi zkIcL8075XmiOqd06oeoHyE_MCllfTV;c6#P%DmP~A1uW0*~_6{*f5ES^;8jD2m@iDiz zAWDQq=IrkK%tvu=*10UR+lu9z*C>W=$S-P+N&$QAU#T}$?KQCd_o`I34=YC4KgI$) zaeN(4)%6DsI$X1AKqaI8#cslV{q95liBtA&8`{TpX!$#2WIedI$iZpJAxLxGY$lL@ zu>4N~v%|KYD_V8T;Jef>VqS=!zXqI+QM2*CNB?1qCe}Tlz~*fctk>+frrTe%PW0tW zBQR}GI2L{ttf@<+qG9-}DF2ly)!0CQ7m+r_?)|RYbEbD__~)QNiF#f*8_Je|7XmyT z5D*k6C_RadiHTVos+y(J*IG#05(Px$Ac6vkN$ZC}xQUWL<;GooWMxyeQ~SpHQ?o?d zRG<~v%}@w&oxGDI`-nI)Ph}B{WQZ^qICT(b@o^=n{lWZEFnipVHECJcIRlCs0grv; zX$BDxQCf(N>60b87Pvn6sVs=cBVOF=%5IsH)% zi(41ou#f&?-%5AJnYm<==mJy*W_tR4`=I6FcTyzfr)t$fQ|&g`1gf3@XrWw_7rQsh zs@%0kM2u@*{`$^YW)f705w^#__C(5PIh0!MArl#tFoBNiYz*dOl2) zg13V|o9a6ZAQT8yZ?FNeA!}yGF1!um5m9lS&v(iElJAFj<2j1h1neF2Z6+x7PmYQ2F4x^ zGggr|nd^OY56?hL%py$qhH>~{H5lKRVfXT4bTm$HkWs=6*WN~)5dn_jm*;AC$@!gH zZK?-spn_AhWaEA7dj-8cujY_^%p8>A``c!n>$5sPETUM094O#x$nhP;w39HG+Qb9O zYV-L(7PY|K9JdfHC8mG5o24af?xAgZFIO$7{c?#dKuH(oL+b`hy=qk~Nmr{hzZVq) zB{Wo1z>?KOs#@n;8I6E$?Ke#J%W3%awidFI^V1UwgL*d?;8ZF>9h>47N)`&v9y`Q1 z?yXbAoK4K#$~G8x9F1iB2ZnngGDNsUN3|%ET!o)#aK#&=?SYvuIr0kXpKJwHA|?oR z5MvDFv7YwQ=DAy3*v%6J`SZ$)hM&h3N~H*~TFc#0{;j=1x~PB`gvlKKkW5iSH-vlC zLK*oa&Wj7mR=5%o_-|UCLput6pQHbj0#Gh`S=}ZGQwX6zR)zAq@5JOsgz8n_qpF}8 zr!ZvFdtvF+Y-S;|Ryd?cpy3k7vSMRs>u;2cqIsVc+9P!;{y<<939L z4#p~p_m}DgMV5IyM1rQLC85_FkA~An*N6^~Bb7yT21qmv_eBP2qpDm{xynB^z2s2z z#BW&z>|9rTn4E6j3E_#oPtyKg{G{)(E_)HHfawl%`o-cit%cu?2ivL+b|}g*eVjsg zw9vr2#=1JgT2tq0_Xw8Eq!-4}u1HF+BB{!^i@74tFU+O{;zeiGX;jw6yO9Q|>HE*C zQy;Y4_QjH^LxGMRH%bXY2czJX6QEMo|D^YDr>b*@`%9faG!{Ndi(;-JN}&de*Qu1d z#kR0CW%G-svIRTbig*=gK>r0@OrU5LAef765k87lH1l;;mc49?oE*~_ESWQ3e7Do^ zJnWTJy#j0QR?f5$H&10S+$V++UEl-@Jvn~Nag1Kn+>C)0P|YOC$V;Bh&r4N|V9J=e z87WV@_WDUXArynECX%?5^1Y_u);vg^fWB!GpP1Gp1+Ky$abRh`L_AfLqLejCa1r(- zA$e=bK%+SfR^T)OZ6G|HR3f~vN6|pJ(wd0HW%Vn`qEg|FpT=CryP;+XZtu#BD2B@= z-9nBJZRzG3^uyDGV7Qy3Xyl2O;!-SX;ZF&@ONN~?9kQb@vMDJdB^W?bFcbpwLo8<% z_s(Mvql}vBtJE&VH#|zLbgs!5kt22)RM*Fb)E8N!xJh-c6kVyv zKI#}}>b;w$6WXU4b-DDjfS<3{qDJMu3Hi!3%2zix!uxCPnVQT*@E+*8;+u7+1GCuUtL3Z#DmpqW3N2W5=nc`nW&TiV ztsKVlIH+0TkVzy5oli!lRB=+~&*dkQq_j;0l+HTva!O9DChaVlWGID z3FRc+nV}hrc~L@i22xVenIkG4amgVVie<0bWH};|2gL^z4uZ;{poYjJKD%GXou2xo zU-bGHU&2Hv72a56=0=Uk=uNFccP`$`WG>x!)sJ!d6O2a1ABo`MWg|2u>EXhx3pBz! z;>_co3d&HuPX#1y<7c=wa~^4@X0G`j8aLcK8UOcM*{J%ySa^MC)Pu3>=02H->M8fA z(?*`cDfLpvZt6zrx_$2_N6m$m&t2M^mr87mhbup2Jx}P6G^x}7JZ~|}tYN%g2a(q* z>qz~58?1y~;*P5al`bd)nk@4(Q=z=bcPAiDbxn}yO|Pn@(W=u_d*iEgWq5?%U=jPczZ+`9%Cr>eMpIC@oR2pE&9=ZO#h6st^|pNtouZz zY4~NKV)jnPZ&}qaI*Y+j;{mJ9Km*|~(;jP^J9YGBW(6lP(5A;v1hN}W*XsY<_S44# z`;pc_7e-Pc0j`z1Trl6@+b>QU!URrZBy*LeQO4&QS zR+Q>?7kl#SvpDXBEXW6|_{m7Srj){8bG+Urd@eprM3u0c7!5dZ3ORy?62VEiIlY;> zpNteGeUpbDDXKb+uBCdreu2Q({^Jss#CpmHZy3kOO^P7O^{NCL$AG!}g58#6LpEqr zGWc9mF?tjY{FV-`Muw=%a?ZQpRPNly+L9nxIL;#q^=wb^+4uNVj4$&xxLVR8Bq{K2 z>F?Bm;k3ts-=#|cj}J-(FXn1ACL&2+y{N)(6@{#>1KBCx{Esg|xmNtqkGCnU@RSOj0uuxp$ z%sQ|%|8CeBG*NS@1TC$$sF>~8g9_2XUGf%TtddPhqaA@erUglo{ikI70X0i@V6f}lr#L|Xwf#UI0Hl-#H2VXJjjz&`eCqV= zg_O5nFU5~%ie?W=##{`0{e=eDcs%4=J*P zO@G!WTzUSib5iWeh5yk%^WjW?$yx3{^?9K0sJJT)*{G(;*9DlAy90-%SYiP>elLVe zeR-Ji=rTX=kQsG+i>BBgy8P=Yz<9E!n=17_+ixAe%3?*NL;fD(Pdh+N<6liSWH>g= zNuBuTC99koob-~QU%dQI>-RH0yOw~yRZQ~xkp5V$A9cp4A8qH(&BUaZpE1nP2Mzl6 z?JAnigfep9P|U#9L(;Y-&DGnOy4<@J2|L9aOMd~8%+Kb4RzWDm3!(15k{+c~#Ns5B z9m%ZlPmX4hjou*TQeQet`knGCG(N4n_kgQxc1U=x9Epz{_m1+)kz2nJT2OgQ1kvwM zB;;D4t8(As=uDism!#3l?Afg8#H3NI@F+r(9Mt!Ut+Fc3Rw074|Fw3I|7Y#w4$eKo zhp1TGW`2z6`5?&rQaQ3HpRtI)`08FyRl>4v0_Uyg0l`8`pxa8}eZjG!Hm>G(~||H_W3GP;;# zI3Rw>Vll8wT{NX)!@SEmnDYHW&GLbeymNAgd~7^2i3?4+*ubUkU`d`t)qF!ePx)(t z7AiRsJt3_(y&)4dS-w?Okm`K2;R>S=>U+82xTas>;;DG|vwXhduDvKN3)k0M467xn zN}hXZ>N3UD=%#1anp%ufv+v=E*+=6CoEiun zeZ>q@K%@&_YqbxT8HWx-q#{bMzFb$uakSrVc42*+~l>$g8*cGp|H(^V4k1~hK(M30c-w|d3l0wog>bQ+goSUrPV!5`o` zqFKXIhXeg2=MvRp3cc~7>_BG!=~^S7y8R`J>#Ji2)GJnDem6&Ld@U~dhHBl!Vw-e$ zSqqOij>XS@=1tw5O_(Ax^FroQiK_^zowiSao&GrcbiT2Hc_I;3ZtH?6&2No!%tIzU zv=5h@(5=)bKXF={1{UGle2^$ivC?&?KyFtqwSLPSyW~9SgBCahasWh+I|5%MS0}`6_(8FD#UpH71mL^=+_N-qw zoQbdxPp@bCs2XUCVkIQPq*>v(a0NO@77z=j5~>&dM>g%%kI9X#JL$pv`@~%#cm{^e z>VlEwpV>P|g4R6s!Z~3ms|HpfZoK#f$A1;7@^QVS^ zgbhw`CQs+!sT?MRocJx_rfFa@Ne}aP0rvINDWH9%%-R> zd$>&>b=arPL_K|})TO4*#pE7>7-OeU8dx;n`F(<^9xueo7R1bVeS4Em3UpJlP1Rrd z_U!9|`nfygu_C(;CkNgnGOpDq#VTnrF`QG2asMB-zB#(J+P8vHm zwr!(v(%430n~iPTHum;?cYn|BHvipwpXZ!=&dhiGnHh;P%fppgd}-4^3c2_}?(kwQ zVWzcNKEzJeC%WU=qS z7CcGyub{gVh0hAAPT*_T4*Hw?g0;zW$9&I6xa?si)EQB4#;taPNjq}LEp8+@P%Q;1 z)1g1pUEyn5&Np0+kGMWfc69N&WtB;9k1%0oiw~pvq`ceq_}4H;F~tQWA9}xvAeuou*H$>Pq)F~^imD|2ROMay@=}fr#x559QC@A zFEYFEE#8PpuU5oFOYml{XgRq#D5dBHpGGy!z`t(a)5&_mW1so@n!e&o_T^tzd?df* za2|6Y^e20QbXo5Y{C2z{Z^C@EW##31M(6{i0kCF{$bOdX;?FX#6Qz2l3vI9}5ya{i zN>~KzZK2mD(FWr`ryE~6vA(brX8HYQN=Ib9{`@GRD$+wyE?+4oismp1`Sqysmi=6Q zWB9C2tPcMcqamGO~LaXKE!J)7y=GM ze|wX*DqdY!sMH#RN(U3a`9|Q>4q{XH)ogcCDUUmos{nl%0ZV8?aSUyfFNI+?_O)To(SYb3D-#|Uy9VhY&UJY8 zaE-6;y;JDNHg|y-taX@C^-fi`#H4Okf$9hd5cPI}VAT@X2jKy&v+6nXjH{O&%v_(j z+qjHzUi7mPGz_{ATWrt^qX`GjdQ5B>iQqp=N~%pJb!ANx$zF!pSkL06fRF1A%%wLX z7x{Oo`HR_B7q4r+5l(vRdf!;Z0~-NY8~OM?{o{Irz>Qv{}`gP&NgAtZdd;ijLVA=js_jTumYNKu#U!af!6@xsw1pxFZ28{Aq+LEw|mQ0G4&zGdn}hfsR)heR0C}MDJP#LfPyr zoe3l&6uSRR18<*Jv$;qup!!Q2=hcPwL8cxq(ohmlI#m ztG`U}*5_|8D%d=*Z+JLD6c|t#eBIvrj~C*>2ESbm3p5ry zBsB1pucUd6tgX`_tfM$EfTt@?F!2vOPT3rIQ2jq1!GAMT4>W(@VP=_X3k)GSMLwdt zvQkR`%Rz3SpM-)23`PEDh60an$vj5k1lHNTf@taV*kK%Ua^l% zzQDybRHMTY=bhzZw0X3SkcsSOLM>$T%}J09zGS!d6SW|Q^6$gjFVH21HC5lXu%|Z9 z){NAu&A`Walm#eQ#Oqr{-e1RhCTpi?+)xdG+tJzn={ zQuUxh0r3+&UI4q}pRT^S*vt;bciSzL-~aH}P!1?53NG95>evhxX;H-;uTkKW%1jO` zY7uAZ=wnm8-O*?SX&mp}pFaH!IwRJNbvFxGzU&nHW`GB0TVIW9nW)OY+uptm zc^8w=zaY*%$n;0>H*9^+=*Qyqf)A==fD$k9+rj*RkyQdqa5g@PV2bI?AkPJ|Ju^+T zHcaZEyb?M0$(I*6t6;A9Tmq7yMv#k2{Pm#Dt<9a~9Q7jg6njFE! z1He>J)(F|qthG7W#GF>qfFuT?^+)1PjlKaw z-=q(T@Wxw*Fzw;h6O=ojHGvBlw*c@v@btUfiiFa&#t7I^vy3_6Jm7+}W-cz|7YEDt zXO(#F))wC|i)>RV|17_m&$^4rK2YLi%)=n84Hh6sCKc#q=`caKPpJG<#(%^!u8n7dPoj*=O!2cc^uF zcnrw5=WraDHP_hjSupX03Wm>Y;93I9j~lj{Rl*6~-So?nC`fEA@*T&cq;G|6&3W=+;O5s7c&R)%4dNh1BoHl4X78!>sgskpI(w zixod!Wx8ka;wlwd0REKg9H&tAy5;7QuPTQ=|32cJPiQxP8bq9~&>Uv&7I}R7y1D+L z30i}-@wuBLnOt`m7C3pf#Mz_o2)=p^(!KC0ae0bn^e7nX>3YUNvzUNF?RU#|MCSn7 zJanlOY2TvfV5UWPSi~UAu8_&xiFPr;Zyk-F+gagD0zkRbF>vZ2airm8MlzyqRh5g@ zoI$u}Xy7TontuZznp}lVOxb9Lt`c}fe^8E5Bk~1O1BP?$u_vo9w?>1>v0o^_@Et{% z2QtT4UR-mXhs(;SU!!_ZJ{OK6;#w$)TU7V94?^Hvb3Ey)P%xBMqd@;^CU8RaBzQ76 zINR(k{s!b(6EhZ45FNtAEFW((ThQh0fhnWaCDcTgaATtays^MW;C9Io*x(L>#;5`h zan($sVH|A8BYxT67Y^-F5$rqO^N&TmK=>=CJEMhx;6hfqhd0;{)3RBkSzTBU*Y`H{ zU}q;j|0kHIDudQeG&SjDW| z{}`(1c*+F32+mTn9&-ScLlC`SJEozkET#z__~CsmvvX|XgEs@cL4Kqa>M#w>hCsnl!aI;dquOrX9IW(`H0pVyg=2w zMjGlae1O(ysL8o;pKepMh~y9DBvOobSCGIC&S>2&Vog}K;Ebx?0C|DKkCFRT$(Ixg z>pP&`Q=>GZdsl0k$#c(+XJ`tCv@IyxXx5|ZjimH`rt$ZkY6*LqKkrdPsn(KWWqmY+ zGIs*rxOHaF(fWIDy@4undMVzDMuT`^5IS*%RiB^qy|*BdK!XyS$_gP~aT1;-P$!n+$#!iu+A z!Gp*5IqOh~dF5*ZzCAV1vf~r6W+|&!rI+VKp89vgy4j5d2UwlgI^ZHjXl`YD|0Y3y zs6uw#r`jJ2w(`-&;9>?3G4)hGN7+LYxauyJcOr-SK~9Ys9I%)$48$68&FUF#$*1!& zq2B^N;3abyP;5e1sM~0ep(5d+=vw@lU!n9Se4cFT8r)E?s4_GAD&qjKTSvkZfbgvG z1sI?;`D}%#v&Gsg!tkZIp-3l5+ZtqKL@(nc=2<)X?DFRB5&^{BlR4;6YZID29mMV_{^lvnE1C6$%lO0y_3tqaNLY4jgHOox) zcX$>&Zy*q6wY^`W1#K$D6ge#Od`|d0D4#lYv_OP|+_T)YDUsQ3enY9T_2Eu?uY-lV z@iJMBP}P=fz!r<@D+6l7_KOpe`CM`4@IAx8Zd>$vi#7o&`5-6dT)m#pJ3!a_Uyn3~ z9jMLQj=#Y{@$j|)?N)z|Mmm~KUA%mI+R-GRK4Rd%QB zX^k*SmP-iWkd9(a9zUpva_sYt$j+ljbiZ^?Eg|0B-C37FuCE`xI^U=x%$T7oDN$oS zT)?t3Xoq#&X9(wsqSRt<_zJ%M$_n`tT+#{;Y;lTMFTl-=Y@5I=3f9ZzWh(k1+PKiK zyeP`Q5FXm@Eu3D-r<_tJ=tXI|!a|y3>v1R|l~Rvv!rb7q5A2MbEFwFJg+6wuX0*{= z;~CLuB}g)2FIp&~_c~mJO$gh~M(4HEGq;5J6RkAd#@KoOuOS5fLPi0MOTf@Ur61_a zQrOX1-d>;WAnho3PgU#YCZZTGJFyBCVwNPkfmM<5yb<#eXe7}#A*6M*O&tzBRxJRW?%p;LR z0|Q6nJhc|eJjP>ws6NZhWHY!7U zc~nbxM6u^g^XzX7l&}hM1ceg=8#Ae$S!D~L))CV2j!NcWH3 zCc6kb<@}!r(rJ4hZ}mZe^2G={0Wg}{j)lY$^}OX-R(aa_wA7|HyQd>)#>Fvdl!KM=g)BrWO(oXX}3sr>>M zS7(1JO%&CREBQb}kJF_2}+^}~X>R)vQ zN)TE?i&+uu=VHCH=mo+N<2|J+e>eUpJpGmeRAJVwfK4B$!Y`}N7&79!Lh~7`@+~4xtsTfDw@}kTy6flm zK=8fsO6Y8k$8pz8%`hs0x;D~oB%O${_dy_n=5ONy zu`BL?;26iVgIil5$Me&VTf7ndkS`LhtldIF$QUT#`Ewv}lGG<@ zETryt7B;q3*)g(6t|FiG(P*G0C9zDfk8Z&faKCbLPubFs7O7L?Oxkf%dP;EM<3wUx zoZxR5TKgssBCqhZC6x3f>h4pgx-Hc3Ki#~+a9I`+x6L3jFfa@nzGtuLTtdFARf4&t zLxxQha=vjW=h8pps)E7g?E2_o@l^4_8o${n&$|ap?%@t0P@wYS1t#FseSTM9Z2i$O z+P_YDMmis@#WZHmxuo(3iPm8LT|`?jiCu5XkdSWO8S4F5R5>(*RBmXttM;3Z?e+ZY zc|+N1?o} zTELR7=)X7wKykmQ;V?MOVK>4~g>*4~#HuS`0)u4@74}Pp&Pu51w70lGA`TXh1CbfV zC9&*Lvi)L1psEdUISQ zTAV;qsHlzpYpPV(l02VL9CGuQi1ydzEVsp%DqtPIQU@{}HhcVRqjwEq;rBTmLExdT zXy$RT0?kx5h?m6@#H+@PST&wm+KV}LJwaOtmAFZAaa~A6gy>}I*}+sX`f%gY##;N>PodEqk2H?^k+^Iq%bksd+W3dJ#K(ur-mKW1*ZxEg~HHzkHu9vaR zE$jF_0^xK$>hu;X?v;_ADk~9c^JEVqnco>AlMS(KOa_yT1?oyLOQpUjwH88b%3|8rQn2-sLxwN9|jFduE6N zv|5GrwpqnOU5eXPcT;%;@PVkV7|^<(tL)U6^c5BNU54u^3$Bzb>WEAkr7RN0b&nfp z(}wG`YcU`AS^tesW)`%t?cmG{$9rSLabWo%4FsmcZ0ZXVz+*j3Z4C+;TMI|Jo%pfk zs<8Xz8kt+An_Q&V9if}|qjSH`={*Leb9(Y3Cps|<{@iwFHCu!)S6B6GJ7Gv<(2SrD zuFP?|*TdRmf?ikbafRESk;)c6SgrY6=b9sI3{YVEl_i-pR-{JvHRmj?J5%zlJL8(0 z^_Ulp0Ng9Ubwl_elQ)Vt7;$qV#ayYJVjwy_sMGxBn-c9#gm?6tD7rmM^}7n)RZ>M1 z4%^Z8zF7aoD17uef9JOQe3j8L#tD}#ZUSz-fc^c`prr|7n=(5ZqcToDtVwWuL1e+1 zXo6N)5+}PmMgijsuCZf_?#wr&8R`UoH(yT=9P^{GS8DfC8`s-PG^No#ZTef*-2S)R zuPQ#mtuZWb^0{aDLDBbUbSn-Rb?#9)u(k3?03ri|D zuCL@*6W#u@K2A5JqGel7U~NqxU&xksYm4yx^c?4*k|WBT-HDgZPg@&xgRK=?Q}OHW z-6_li#|I*%>-zvU<9)R3iG3qP?M`yXh(mUiL_^Gt@UHa$H5f2h?^3WcZpyJs0)TdDfgw2*pmA4sS*qEnXs^%>ZGO$+pD>3y*Y0y=|g6x39Xd zN2JY9K|aUSyZY|MKHJsNF8$@F8h>^))%FgF1nU8DB4InRzCIf;MU`^b(&*X>P-pr@ zb*IQ*ee@uVQmPcH<{IJRcI$(63&bui4ykzDciidU3i6}i+$rIyGs&@*XDHSlu}GYB zxX1>G4hKq$BpGKU0z;zUF@0sN zLRTdr!}=4_0?RenCQX(oZ1+Wz<%&KdIn?%NQFAK<1VV<^;F^RcDqq2cLO=jwuW}mZ z-E4vD;6z0Fpp9N$*+BJcRITO5Bxt-W_?7S)QRrr@KXbzh&GkXB=HuL>ACK{PqGw@? zRIM8`HU8kzBORh#LQJI{1X7p%%K)w3X*dx zSATK0`h_)95Hu*0EtSr`8aTewl@s^uoRd4CX1lY;g7WLzLJmwb8P79YHb{B=!B?v` z7aGYGB548)t$5C6m-wPz{5623b5+|rY6uB5crVvV7eQraFjl>ZFq3`uP-1%8(6S$} zA?&XnebiTpv}`0>Zv>l@<#7k+ZR#sX;TU?1gbGDm-)l>LDC~?J(%?7}5<6vdFd9l& zGGlE;mSRD;oAcwHkZ=36!hDcp`kUQvZv*-f*k@_H{)&a;n_G+5jV^tJ<2gohFmO9G zayYYZ^BC!0*+oy!uiB^S)_i<$O-wtcFT5|1F&Q;GZ#AM(O6L>>Gtpcn6U1-T(PP%j z(ah$`0%?p^x9;X4^p`79H(AWd?Hma~Wo2cRY*}QcS?55flz5Kv*%wXN33kqXnY6V5 z%8QN2z62caDPX`{>Vns-&lVeQmf)({%*;AicVlKC1e)rrO=87%^KU9IY(6yb=NU1T zR422Sg{;WF^zC0?%=_t1njb!?yH);nS3Npq|LiEyLUB+dMwp_Vy78yhFOS2R(LI8I zbYNmeRy}ZH(^f)}AXhwE?AOsF0LR3vFvRP~C3flj^mQ@+ zwerH1Xsj8VYR zb*|MP#Dhyo@c7|h=%e4wyQ%2^tN7=D=HG%2)dZTF2Zo+M(hH>o|~bK zA|AU8xV~1A5oG-kz(+->>%yC>)h|>Ai=KD)TIy&XbWdh6VNk)F&)xU&I#|u2qqjm{ zf1Zx|Rqx&hR}zbzC4bEgn6=~SaEE|uDC>Qjp1)Ag3ksP45up*Ft=J=f85vtT>FaBuCY;^qi2RjeUWyaE8oI(I5gt1BeH3{H&+4?v* zI2bU94#bg;7$N4G+a892N1rvO`rSpY?28*aZ z@iqrQc+>SL*0d_EE)cBmk3$qnjoH{a1^Ab<(V03<$7?+nM$<;>K79Eg2XY||N_<3dpiHVVJV;cSrFfb_QFh|mAR46B{X1N3lVtiwCuV|ZI4 zq~A3ImD|rJ8}+z2CvW((l{a_W?33#N*iN6gbM23x2EDhDRTx(T%uB!kybJz9wVCpq zrMz31Ii^AY*!|w2*ojRb(Wkk=JHB21|GZH~hdvG_k38 zrE6!fwpuP>v*^!^MNhrOu|gA`ZVqUup6o*D4HP_(bwYgN=5pse9&XxL)|x{V5t8K) z=i?t(ll2N<zQoZl9;m?@k&tTJ)MYQ1$6YLzue#=j?NO? zhXWVEgk}b@DE#nA{F2U}e#1Me5!e?86C@91jff3ys~eNWvD61rtBu={;o!A1@>gpg zFh2$!nAnZ=y)pfTg?#sbQIWRd+c9T3uzBq)Uu5Um$N`)*bEG2HC52YD((DH=sj9dC zw%wSS3y;I@74M8!57 z%_CJ@Y$e>U#Id7$*_r+C*zYxi{vLA?(w%xqUw>j8aZgHv{0@w|;;>4^+g8&mH9y!S zmB}sCcKd2sRl?i>#j5#glG_-`?^Q`C=TOlfwd1_u0x32%sue6gkf^_SrOZjF59cn9 z`7I|0g_Js=fY&|C6`P*DQajLp|E#Nj3`HG|OTUOi<2KVhMOfz3R-3z5thKkU zM!ec%+e)+j-jAjqa71{kIc+o?Oo&q_U~<1il`0)AOq*!Xmk}59W4SxEyUOM`7BF8- z*vvtP;2!!})tRxgJJQ`oG7;v^*sciB_32q*^ir~+!iF&}4rc)u`Sk{}{=FLd7)ev0 zNC_6kLcUl~!{K}q!|2*5=P-AuvPFVpZrn|m4FbTI%;U)Jv^o@l;othDzXndiq#@tS zatJp>@D4eg;;CCF9Z4$~NwBVU=A9wI$b);A#o`xpsM|_;*Q_SO2A3Y5SVOo*AWKS* z#M*`{?MHq!Xxv|^dmsWNFx9I0;^7cyo5s#$N)`HNcr`dW_@pZdFvJOp(tcL^MpheJ zxK@koTSrf?t1J6Dq)o{>BP>}eKUqH+5*8s?CLs)muh!k)g`J|6wh1s7jeJE2$u}5E zFq;Ua(#w*CnWk)b7db={RSkl-X;$M)xuOF=g=stQwGC7B*bP= zUl}>^&IVXMNJ&3Ys1h>d_Vc z@pizeTxIDqT#msLR4c|uc}OYi&vvIc-P^yh)^wW8W>3IQ=n7Jblg{L|^5y0|o2a}Z zb=dH%=J^Wd1LH(w01d1SF`c1CDhOby!mu%5N#Hk*P_=%qRHls-jP#W;6w^8z1>{dg z4~SqxGWCJan)lc`je5Ma+n`7B|0H%^P-1nCQQ0c5Z#StX33oM$?Gfupo@+k*)$F+aZP{Um2uk(d{}pDUlp zvLjZbWMZ7oQdVHg#Mo27?NtW%x-bm&DC*gk46Zg>1`c_D0m;s$DN?$lQmIBB zN{}G=#iHEf`G?ws#Owv*16L?7A9#pNi-&-N_#le*(^MiC4gdkc5Trz3kOen?%B2{I z9V+C@=h(2TDdcNEwZcPtI^fP*uL)I+z&nklY#t>-aoHRD*W}uwE5oQ}Z2ko~>717D!hHzl0p8?)9^>*91{zea)r^J3YVZAY)w5mypMUkWmI9`wKXGVw z3eeJ-)!`gCK&s!yT%oAnPAJgiCt#3&c`r%i%>x<40-KEP(#`TV4HSoiQ_I^TL2yeq z4V2I`28F@@!-GB`VWZJr%MY(E5*$FJM7)tq4)>c=-P>rq-y7V@14FRdaK#=t`7}e; zja`s7y5MT@#kwM7{NLW-i#i}njGN;i0m>30!NJ5AMj&8d8Kj&%kP_uSGfOC3S9`)g za{@O~*-*>q>6steI0Ufqqb@F>d#PsS%xC2K`h_ev!5Tase{%d(TC=OJ8?WCykqQF?rSIL!Fmqe$wAXY$<^%mYdjo& zj*u|QXB^1(61=xXu_!KM^T9O1-N zFQM?=qi_RNw!3a8{PyTSjX$gX_#sfPBvd@romMh#)^HFCtYhZbO`gQ48}7V^ByDa5 z@mD=mB5+=lLBi(4ax+)aEno=;BZ=zVf3B)ym7Uxss@>$QamCnstA;drSDwBck^e1& z3y;l=#ji{UV*Y(NDk&$hZL!g;_JrS|KO|dtqCJ+Dj=nv^gj!joh@s=m%#7Eq~#X zq5t6r+~v918;2P(2Nqb$D$sVtWpn+4g2(5M#N7ru_M;RyqS^Lfy_C;x>4))#87ahB z8c#&-fb90o_a`oA2+(SEwD%^${>WP)NmyzH9OQmJ{@}%FN`ti{Vd$hosymPq<^)me zaa*jZ2I?zLH7{~SJcN*DZybB?>%Q(tapVH}K;WW}9&-Td&hO_Xnp3{xcO=jyDw05(_a+em>2_rKD>iuzCw&YIoT)&s}n zJ}hOyef?S9jap*fG7{7uDl7Gs^o7YAQRU>)KjF10F2<$gBzJR_#v{5-wxya(Y58 zv9=fS`?T7l2=v^Pk)!C6Ba3U2HkEA1ZgM!?qM7(m>0{=ZNP|ln#67O zydXDc&tL*lo=vtKnBQV)0}+2$&h)_a_8t+}4Mh{=QcNiQMjSjR%dg+Vej$%~;siXCbG1zlN5OkF1Sk%@$Xezm&_Ut(svOY9her99 z63m}y74&XEzcTz1pbZX{GlWFGNi=$Ki5qQq-PbEH|c%`ka z{-}78&VuPpw;uEhmi%T9WAN3J^ahs|+R~d2LHvj^sagjt^2FC0>xhCp0`BakCE3VYlCV&L+m{=P+Rmsc|L z5N=K4sjpBjIBcWA$tN?|^=vRaI*1(<#>*{H5Rx6pqr(L7ii)P6LvP1LtO}5ty))lu z8Z36b8re@GZ4HXve*pja%-h&)1pv`^upe#Hb53*i>60+6qexfm?uAw{$>34UWm|xZ z@A31V!Q9HKKz7L>${UEg^I0E$IpVFGtAfv7=#gtW;C3qB^U>wF_Y*5x*6T2+x<2a* z!M|F-o8lr83XsI4w`Dl;qaD-Cz$bAX-@q#pZUT>fyQDyBdbkKFE+vKQwbeWGGlRZa z;sN{l`K?X|DYDC^HE?4PIxQkkr)&Z8!SqvRThr%MuR@h#z^np#Knl5qg{3a}D-O=_ zG;n3anBLk*3MWPyuyE!O@Hpg{%wfV+LBmQltN*1x`cfqB+|I_kf&)p|%qtXbqO};z z@}5#9%V^}>&}_s-=U)-H!FYfKXH|Dq%oizBfKc`!EErAp4aAKS5}p;Int<*Rhws_g zGtxErzAFM$)yqrWbKH9VE_xG!K$R^s`hZ?70RSY(Z2w6sV2~)c&^0%K!o@X*jiD>1 zIb5BI^rKi(Eml(WrZgC*LQyS{u+H%Q1YH4SQr!wucXea6%Ok5#3a}8B|Q)w?<9Q#ApE)| z^=YS~z$daoOEP^Eq2To93!v^x+){Vf5KKMm>~z-j#3fot0&RAIyFN159&qbIbxm*^ zexO}qCP(~qsW7mH%9UKxODM{H*B>KTccK5x`XHs{llyKacR^|3?=D-rhfad(IkV;- z=ckfB=MrroJwE!&ovu2d4#9KYg1qrI8tHMYYi)Tbvpc2ve8)X^hvNeP8If-%2UeEp zTqT5a-}X!zP;NLgE}&5iQ)B(4)h3zAXH~C13L0Yz>y*(? zTQ2IEmK?A>>HV<+m+KP&HnOllMWx*yU|H=3m@Y>x*{kln4sNhHTrqvf><9~%6-Jh6 zJ3O&R?{uC$&O%)Mb=3z)5}qr)^eL1uj+p_^#)#4QwH_oo4}HS=fM|w334()NwxAEw z*x;O=+-~+H3}(A+;xV{8Cbi<)0P0nRA9KPqJ_H`Fd)vKX!}*e=BK>|KE(XvmL8rSP ziw+T5LM=+Vw<40L}a1f2ra2u5mk}pA<()* zTt8LCEiihoLGF-NyV=)SaJypaCN;>ClRwDLJz{Fe{9*<#9c-yVU^}g+(^AWZpsZib@g6J8FXhEDJ6@j`zIsL1*;$^H;#plO4GyPoWO6!P0eW7Zv?_D%f3?k zVAAg?W>K=3osm}uh}Afo>ISv1y0BpNI+`T(PgTx+1_XU=dH1=GU#~i~BN_>3?ZVmy zgD-C{YtPZ%j$}5p{%?>$1$u-&QU@6JX0B1f)|0V}R`y(eNJ^`*Nf#}ou? z_#g=y22s?3&J*e|olJEzPeM*q-Gg%a`1fPuzueM^{3}t1D0b)ER%>oPJ{#WEM#m~A zyk6+sZ5dW2Xy`Z%BKG&0G2%B8SXWKX`B*^pL7dhIG`L&j>X=?mkP{)6SSK zA)bD95-@F`tIGG%aWjIq0xkuZJ!aoXUPnDB*Nn*(qrEwu$6Y;+7c0CRl>q0k}Ki40xcj zJxYM9<$&eZt0ZTe#dBr`ZO-8Imiz4&$7U<=K7vPpAfy3*Q&c|$c0^Y!fp5c$ZQnZ5 zd__jjSsRq?p>~$(vY+yMuzFw`^5X{04j0XacceyEzq9Cu1%`i(H=0(}swY!qx?m2>pSlk{9z=2n*-;c-c(coIB+qdzQ=DtK-TsjHRSE zM_4>9?l8FTYZ1KS3`ERg1ghu~su?am#y-qDUP~U7EVT?s8UF;C8#T~!J*U=Z`O`(B zWtzk!gqbL$ZcT8-@xy;2@)@bfnj_UItRO|huu`N30l4a#0tc*{TRSw?lVyg8dL}aY zrZHg>ku9MJ0~Mj)0@dX*D;kpoX?IY@dwn|~!=H@sAOh+A_6kD7PQ}8?_X%Oh+1_(9 zOoYmnLJmT{*m@3tO;)QA>zV!xjY^STSmq9;3-m_wG=3wsTHzz7xHo5DZo~eS4_NW` zG;iPHuayr>K(Ne;&JnDWfQ ziR)@ZYDV@M`YSjY^#{*yC396^emLYox3&X!i#}}svb4lY6RF4J66v{X>jiF}56_}% znoxkHgvYtGS^qqlUobg%fT*YVyXoTXS;23+ozH9OwG?v_X&mO9k6xs_fVVfye_k{~ z;(m`0N^6boiS1QpB3>Hwcv!#c-0D&x+wVnW28quLLr% z&fM|xPkdzeyQ$iWbGJjvO`DuZ*w=mM@X6C7SXErO>H2NO|MIv-jN@SnKfbNbm=5+c7r-PzwiW37Bur z!49Ovy!!DZ-H;(+3JPF;z_sK;LewyVF%^?c>{!)fHR*ka&+cmwbcH9cc zq}9Z{qtMFP6kjBpIl3$pjdmn8mSWd?(t)G&R{L5iuhb(Zwx{eq!LeK^SNmkd8UKpH zye{RXQ9D#H9OX%79Q!I@#Q6fpcs`zxV4qxsa`mUiFS>RF%1OVb*s-V`t<6lp@ zO2Ew5nOi_HoM?boI_4TQGX3zotf=T*c-o&V62|fKB5}v(yf9|!Ug~AvmHAg;c@mf5 zlYG^c!reG1IhZCoF4ix({r&~hn=RSj6O@O(Lg=iKg4IUH(xl{Vr_NF|aJe72>iZOu z_r*au?A>eCdY!5-wLf#NULuULIt-Q}{P4s$_YQ5hI~A&DRxDW>jpSkEqjaPY6jOON zxpjs7qxuPeT^|^+6qzho=s`O5@NIusS4mK*-cm|qF;V%K-EC75$xX-ASLwsGkmehw z+%^*e1rZ1MzW#oSj_q%o&ankiW|??d%6Xg998*11C>HIFrgn~d?;A3ZZeGT4ktcEq zh6;5V4$--I2yb@ut8lWo6dfn6Vm#JoGc~Eu6M3*>BVe^)9uzwD{v97Aj(n4V2|pvu ztV1EddxOZKRa{3e;YW?DEVX(XNlaiPU*RiQCawsFuG=)Fm?9Di?a5!B4kXj20 zpvdGFrn*xHRp-LlYnKU-BT@Q3p8yZKL>u+XYF_gNKqS2bzQpd1HtX;^JCXNk5;*eB z^fe30m?zENv?;`SZ#9$%d+C zXzKc!kG|+}Jdb^kP2kTL2ll4z!$?>L0aPGFx?Nzs={5&L3u?L>!Fz(=WPGy8{&0Q& zu=^04)!N0m#859)O>d#<-H_Xbg_Drx)Ah9(W{?4SpdPOR!hpz-6YJ^xG+K(8d{8a@ z=no$ixpBj#`U}~A6)as084y_P1e!__;^Y>QomvcDdqVMh{!>gLR`(CB5#L1Sfa%&x zqE!0_hQ9B#wiF#x!NL&uS@|jNWx>hsouD!9~?C4|1D}HhlBSzax7m*C4B$q7njcLU&B( z3}(ptMR}}KE`WNSskO@Ut6904T=5#obcLhk;D^$e*QUXAZbt&?aHJI}Mg03_B*7q% zQ}iB97K?Bx6@FU6Stg^A*juM`$KbYfyb${#2J?>kM2(1xm>Iv8AQ zR6sjxQ$=jRWGJidT3vw!_PFeNu9Wcgb7=0CXot3JVi=U+^9J3!LpVK zHHqt%gFmq;KzAd59UzsoM8+WvySIiJ4sF4EEKU>2ZhMdg(0^-VC9b{4C^zB*ja^nKD4viuZUn$7>sNx@q7iSMr|` zf$=fRJQVzO%+i!Fb2=5H^y*6zQF8}IzXq)LWe32s9nO3^jPmErpOLw6%EgO6gPKP3 z5n2!@F8Q4%wl-gj9h+d4c3)Lk50B{`j*ErTeo;L=YH?S&!bAczGKWJw@xw4M;c#km zQ4A=i2k=wl5{ePna5e;kt%74{_|h0oGi6Cz=5mnCoPnN(cg}kByYT)y-#7j!e1TC~ zl`I+3mBS5me)-UXEO(RWiBxM!x${J};`lgH^x~=bps{tChD>ov#78p!*b2gz1jzbw z$^1Wwh4rhDlZ@_f7<%R^=mwSTiZ@4cPxiB{W?I?MDdc=eSy$5h*|Sg;iV;}sFIVKs zlYNuh=H65Bvdb}Xb=278s@5uJtU>JI`$8poac6Lc3G3rKf1ECyKe8qA8|m;9H%Dr) zSKbmFGN(E6o*E&@Wcl0$B)8aYAyHbNd)Ci*b2dVDN^sj!!K`0Jr^TEKw?r)lmCD)y zraEa)QTByB`__}wK157p3;{2h&r>Pus*@pOb%R|~$%e*>J91^zO=C+k9N-4AG*<99 zu$Vm$1S@UA-sfsUo*J!;CCRa1walZ>lOE#+JC4T`tL|h$V@4V<373AxkD=`GcVkYu z&53q2>uU*f8CS&Tak!>x`^~JnOCDFS!_t92L*Qp6^d8A_jj-ik$^24)OC_hTH=q^5 zb)`@VML}gfhmI?yS#wOlH#0ORgURkmF)OD@-*v;#g$)w)1b6oBgQKjIg?Nv{O1QT3 zva7!Fodq5@98`P9EU3V8*PJ*~bRZKK()h;fa~i4nAf;cViGph+kUjrK0F1QaJMa_v zZRKZC#PK@fp!ZSdQqwS}Rh($ac>Yq7YIfQ~0IbZn9O&6#QS0k}C%;PyQW6t~oW4Yl zD>e}2`Vn6w`dauZ@xF3LDOwgb17nMJWFN^cg(u_~#;L%xFT?8o2m^~u`;+1nS43P@ zDi2X#N3jW_j*ute4efrn;+&^Q@qaOQmQis&+nNsqr*U@)7Tn!6I0Oyu?(Xi5I|O%^ z1a~LF-QC??r*rPiy=R^LXRY}-U+6EaUj0_RRkdsH-+rFL`-5qQ{b|TfI}ZnU2HDpF z{$})YsaMK(nSJjbDjm2hCu+D>@Ax8wGeLTCi>Vz*9#KYo`O8`+1DJh+r9J+LBP^8O zcuS{zb4>;X(ToxW>64%AwH8)eu`#8R)rxh8zl`A`mE-r4L^N2sJuc&-(dFqU%YJ_p z`(Q0$vb7lZjpyqiiqhe|V5_SUc27f@VXm(;2Mcl(O}2xMeIOg~ZS`Hb}DHIk3^PsC?AHxgnhh|M@D z!Nk_EA!bf)EQw2lIYt+=pxk_Mef37%nq-{boT$L{Rp3Ic2ZE1)2c~?I6n5HjSFVAI zGQBPodBiUi^a~K5y(;#l30xyJS8LSdV9gWe$;m1}%J_r?i)Bm%D@0(fy8nqT^n#Ze z(5+7rrd)<5Z)>t2wABv`zsBjV^+AfnVFO>aXc441O_|3c-xx|3OBHyRSfQH(`y_D} z_*nUA!TvKGQB0=aWjkK)+6rss~K4ch$3~=FD8KoWBnUunkeldtp+<6 zhzjnf153`D_E&o>n}k6%C2rV~#^Od$p2Q%V_x0Q_v1xU9AaE2^V~uZi_%fnH(I}TG z5>;~F39S>JPS$9A{xyw1MSc4{&$;!9O6}<5 zOtl(OeR(4Cyk^Yta_4}nW6DOu^#>W}`>u_>Bt3F0Y3VNvQ=~gjC?gmVpxMsn*-j6F*CnCs1I$a2K%4fKOv1=>j?f03d zGT9TCq1Oj7mD)WYIK0#x!b!2?g@)u&0vf&q zqP=sLw1)Z@w7<$ZAK;CU-6%iSUGRV)rG9zNBa9D@(h{j@ze!Sx7tC^0LrF)<2txpP zF}2#3JyMARk_>_DV4LHF+iRzIq<+nQXr>I?`hLPEmQXB8pU6s3b5zY0o?bP~7IWb> z*v!K$&ea4Jd1z3=$cVBNn`&G?s#d*yTvOy$=Ni(dkjrxLh!X$E*`a z9<;zwe5i26#r!H9=I(-!B~TT#|GSkDy*60KJUspMxyxBgeumcgd--+HI)A*8J0D|G zA=Fm^R$NGQI8!GAX4vh42BDeK>{^Fe+a^paIB84$O!f?UNr%nS%w{e{HJ|7!S7GjDuGGTUUbI~+W zf=PAAeOzo}m<+`Y>_W?GyzD0ndJpX0AE2yO+6eZ&k_-e4Q-?-StJ`mu4;I`|?vO{M z+4^HQMHoxk>pJPV1U2EZuRbyM_I~unO_W0m!`*tVc{{eP)eEL9nN=7`-}M)Y8VbLl zGhgR&Oxg0d6uL;=^w+1Oj(DamMVIF;COIO+^kRVq`Ye77^tN_7ox~VDY9co8@PSFd z*5dI5r<8Zkm4Q~1d9-3)YCBJL%XF3!aHCHJ1`E7G$-es7Wj~Tbv3ny?427!KnvskJ zVGfpDCQNLKq!etZaqAQ66ey7}Z+t#mX$o+}8r$AkEO!HWP}Nt`^}U!)4A5*6_Y7hZ z@Z@|sP{HT%0&oc5GLj8Ei-!x*4AplB{YGP?@67(qdjoVe{%b3A`}GAgQ{XE)$DcOG~#xBw>xO!5P^t1KPiF$?BLNJNst>7#6hp@IU1D7h0u zKcN}h-=nm8=Nn-L{h6D3rU)#xFM3NUVD^f+TQ6~8SQzX@dAOfJ(tdd1?wzP2q)L{a ziFWq>?S;IbdB1qZYQ2i)p;Em(Dm^^#3hOD;%2a4w_{@T&%m%>rC>TEI951A4N`;Wh zp<<|NR08hAg7bj{tcw0Ha1nxSYsyWO_nH~1M?aNeN@p$ft#y^Hjsa-(;6naFVUuBO z*fKkk^Ta@Fx?Kvc>I^jN2sju~Jv(ftqC5gp2(-&IH%rveB+@6Gxnta(lCbUSa z#C)E$l)kK4v2zQ%sMPIcHt*rE{>3)(=z<86#)veiU&@s~&nnTJyDJPnu+;LxH!z&F z^Qxveny(Ixh|U|5fy3)R_j+3jJKv}8S<_U-es~0Uadog9+_})cTtb{2-47ZyMpGHn zWW{8JBrL9RZ7oWMg_2UE>g(r~De&^(X2Q@xc80Iu-$jdUYUOc88VYZm6MYCC`|^+; zagEn2%G#q2Q94!snWQQz9KEEd?dnCQ2tVu}^L2aR=SF_XTw*r-k{7 z-&4Z`?xJ))of!|R66L*k&u96J3nqzs`)!bE>o@hVoS%X^TkeuxE(JF- zZIP%VCk6ZBG8QQhJg8CfFY-^+yT;)Hls0b>zH2?ER~YS^x_@8y(Gfv;CAdN74b>M) zNF6A%`yoJZYYFLdf#SE+PMAWFe4HDUz(`uCO6ypz6a&XdIuj&885UMAZ9P|s4t)Q) zz{ucAzvbmYVD!UOFTpm?Oi*RoA-wEV|JL4ms2ziF2(Ii>UOIN!b6(y^tlu&8TPW9c z@|LKZd7ZTiWXJ|~V(B~v8pX|kCn6?8qqn{2TgX3Ti`M?7_|R@S#`iW40s4`?hj{wG z`btx0s>z3dBwb6zxuKK8M7~v>E}jri8g->nIN^qZrQAiGGFHxE1qdC`X=P z<-XrS6qc^!*@wnIuz*K+mWIba; zwaGgwmot_)z<6`Bd#8{)(TQ30#Se_p7O52!WN<1|Ljz%xk@rJi%AF`i!HT5Ag60+F zD_)F0(F=qRNLYL3nBHfVY#Dqsz7WCZc0=}k!pR3kXm-a-Fk*DZ<{?-K^iZaa41M?T zYMVQT5;tiZ(Kt#_c^a2Dj$OMf2t?8UG!3MncV#UP*e~n7{AU^@>gp$&)}X1(QK7{m zbu^|hhH8ix5-S4sGNE#`flnpM15EZ~wPDYLHO|NEFQ>Nvs832{RQ-7X(tUQiN$hL@ zj^7(Wd51cVs>TN)47BLbM*8hv-Ibh<*G#AziRyesYJ)79IL7Z#1|6)1|8#lxx|gp(Kig{0iR&n6OE~ z50bR`G+B`tuI|l@X@CHonx58Mn+h+LXeWeXDXplITvyG`jt^!4IduTRx8uMPtG>x% zpCq4F*Ha9iky$Yf=uNzvDn&!apjU-OKt!SGr3l-`>w~Mtw4>I6E0k*P8XMvrejNEy z-e*Xp%d3`_tMzzAr4+Q!FYS%-==m4-0fxEpA?h3qlZx@rd(jIze?@3XCyI4ov4Lew zwRSY9YE~H1IeVssDRlvbI@MT%l-PJc4E=sD^KcqRFGosFa;2l7k zZGU86Gy*$K{k>2b%~TYq^X=@Py=XNrX$X@KqF~Vkdi|1BHjPXn(_c+bz5CBOK^6sf8c`#bo-6TDl~-g4wEFg z^RS%%leiyk?E0T6X`~{GfeaEHnbzd=J|jJY>h>{EWLBuZlbD&;vN_7AX zcm4A}%<4t*A38Q!i^^tinVh)N4<{cT2#FobiG&l*^&8+3FtUQGYS8_G3u)79Dm{2~rOLSqv08C!Z}FznbZ9$sy~NOoSz31%im zjk|=fg!Wv}{Jwkr`jqT$=lFNZ3hob<8@x4zof8cdzHsz~2$m4BwFoz$FI4DRgiSUn zA}ty^5t}GQ%TX33p(TS5>h_0;`6^t9hBo_sF-N_0ib}TwkU;?K8d27OWM*Las4NAU zhYr_Q#yVlarqE9*XlM``zr{0(i4gAb+Hihs>0#RaE z!&pCyUzr%iOe}b#`}Uf%##2B}w>b)?J+6e4C4id*5f(RKWd&T88=)@@MdWhQQ;cL> zkqs0~)HXHy!Z%p2$I!e@hGA@`sVcXoC0irSGgUp*d6TOb*&3>bdVCkaI`aq=Qr^G` zFKZ1bZSy?!+@!lD)26L@uSJE&s_OEeSW0EJgjxw@4ai&}19m;ooly{fL}fFaA58T(K?8y+duSeXU&q#%4{6 zt@}fN@6U(JPWlbZq;ck@wuqu9Gflh#{U?y8=*xRK* z+x?OLB@vF?Nt}43y#Rlu&K|)1X2}*YB-?MI1gJIN%7LkKEW`O`L8=>v8MA4uZOSMK z!{b5qdIKDG{3|K`XgVm_OoW(2o02WF%PZ{aRjrSxiBf$7)cwu#dWSLP_8TFnL@}3L zGBKY^G>4$h1${y;mY*-Lt1ayZ6tJp3iRVb1#>ehD4(cB3xSbxWbGl5F2iP0zejU+m zK{OT4%PFSD<`Zd=_sp%Z)(KeanG~>r{CidU%7YMdVLRDRAQ`_l+x@M1?-Z1BS-1(A z(P8~xDL)xce+rctWm5}+bUeP;3L;H(-7i(_`#eDG(1$K4u4A=A&vhOEYv^vzUWE(0i54eVbcV!4EcXaJ)1u zA6R&KCyIp*y^tn!_I2FsmDn8nl@aq`kH;b-U` zZIoF$*is_#{s!=4sX)t0zX|0_D2O5jb$LUbK?e!aG>Y|G)GL;I7^BxqYR2UwT=v|r zk9Eg8hawkGf?>26?46{g^4HK?83ir;>gscEAvHuWAY?O5q*g(_n=_AzTc)Rk&>DEL z?bou#i^-W8%xAI;Nqw)d`R%Ir#!_ow^gu_eeL;}?dXB`&h8JAjw_*#4lC#Nn7QCV} z$Wc#pxMg=dj-h_FEws@tZWn#gz4;nyuUjsnspg83)0{8)jVE=7@7P;Od*}M^O3;hM zpB;}DOSR&nM71=Qc+H3`Ft`wMWCw>*c|Js||HM`__U9Yp%vV3v4jL1$O!S<^p?K=q zFZjSdDF|`#fvrqk0x}5gC4bwF?=c7x-V;nr7jB+=0-s=a`iBqLwEFT8U{RV}9wmGHgE7s^D0R-ReUKZJPGqM@29wtdG4}gUgyEFR!6;6I zvitO)YLljXO}c%r>LX7Yx2Cwi5944B&=TVCatCm;p>Cn&OdW~Y82IV5AsA#)WrpKe zRe+?;N%rVjP)VYx!7gE`OTgP|^d?Cw)_hQL6`KR$GB5gPKBvxHi>Ut=J&ca~JVHmO zZ=N-tPz!tqi?7$rMdIy=sThrmnaqJB8@XLFazT8q~f;FgsP;1x4tk~^JkOUnyuoFJI z`Th((lG%w$No1~2n~yHll5Yz(LX(nkcggX`lnJXxi@cIoi&nh z5_u~xUW09Lls$!y?azSTVp*ErA!VdaY08WFQt2koUGA|go1 zT<;w~b1k8VJ7k^NGG--iQm7bzbntNJ;Hmm1WVU1z4V>u@urW!OgFXF1w0wdkSe6gi?lb5mcF zp!uy8VI7nRd(+TV*Z#g=Zmgq$0>Sm4V>dFnI;|I=3$x7-+x|rs?#0=dB-T z8dES4-H0&})k9|Y^C#_t@F?!gXfvYh-Z!X69^ar(+HoPahgKOyG!pY-nryEX`tOH| zE*SofM%9V`@I1Sy^qnq1gtnM+9X0#MDaaE6HwY|SK9pWgF;-PMT14VeYxXZ1_#a?y zJA!#!y^)cxU|R1_x;K?La?q0G{qUVzZ^Xx|2o%ROl+UoS#Ab1c@-#4*Qj}Zt!<%g# zea3u?Bqmmmn_O1mIc&AMx_bxh`gy2o$1(8gMXv8pkarzF){Z$(R!K9kR;u8LMH<-o zoK`W~Ja@uD^>ExIPZLdotJt~5I%#eD3w zvwycaS@s3UqxC6#^{1j)MsI(<>l!4bWx)^tz5p7G*9Jpp%kv`J#SA{A;2meI!h<&d z*pOy(_qpd-enJkU>b|Mn*4%0?Y%~f4vGysK*oYDE`2Cq>ryKe@W9}{y4S=Db$U+Bv zd5ekcz(3S|s3O4)9GrdHg}~ZXW$lN|;5y#1WC@r2cbL&iYa&Jc#927B3b3Q-Yn{2H z(zgt^sfNz+Wt~r@9GcCiT-;(pWHlvR1M;cFOUVis#A)dT4aRC-JaM)Tul5#a3M1&! z(r0sO*H7{&nQ*7}uK>995s8zE=uXFsvbZx(V%c<7Cnc#rS-g3>l3X&Hq`D*2ry^M6 z0JvAL*S@R;i=JdGD8?>5gLxYA=|8uJ;E75HL}%@Lx0-Lh{iTn%Je^dx-R28_T>@y3Yi{dRu02QEhHoM8i^8wz~4`dduL|hFj4SkAjvh?sXMW%G9NcrU9?Bc-Qeu?j{F%3LPnhSjR#4j>I}Vr8)4;>aH8bgloxczE$3G|;ztG+-BThgpt#ATh83P1p=X3$H zSX&$s;?vTV=Iz%f`c0V~>Ri^d_m9;C8EZfKqS<#NvO0g)AGTZIMkFVe~$G} z2}M~-Jd4GABD!y_X$>}uQksnv?RR*i1XWd{t|UxgQwy7K`y36rGy&7P54>D#a0F zb7UM+>GL4^kU4Mk=8J>=-KIRalq!SN)aqb*3 zTQJiF!c%d+&|r#%MTJusYYCU-W*^EYUuUy48QC*@kr=75)&V%K^{H8X2A!c^{vD3T zVstHtMZtJ)Ilg0A`HuTjo#WHN)6iyX7iY@9dsu71YxLiLJ6)LAbzpbo`9r?Yd<P$)*_55JBnz-5T`l_<8^HLi<7kI$B?8Mi#TlM3Cdf%;NQ1K{3T*Hrq4Z)QIn# z$cV4YBUgWaf4p!mv2k0fD;-krv`#U@B9vJ=@zNnvh(!_N1T!sbY*lCahbwdu&T=5v z#McnJbap~fN5u6)PIL0HJ~a>ynR{+2Lm5Q&idj3$lVKat4KHBRYlI`bD4=!kpv^!SsQ}c)sgMJ0;8GY-o7ksFcC=-KNJLe5)2)B+`-JF?-%7iyFn}%bZG33|2 z8-K>v!O{BBghh4_Snk;J`8CJbh{bFyP-`dHRUgP&ugi19u18JKautcH!}U!ETzi#0 zs)ce{)rupYJ?o167I?vY*aelDPTDJS5S~rM=W!tYCZvy^5-Z5GDRi^&%)i?19eTEU zP0?v8R1l_OOsIZXpO_O_@IgSg#e17uyWJzIYQ0Hz&F9UaU#-+Z0YskXHM91WY)#$-VzIekxLIf!^VHcHtB@bhVn%K?9GrOWh_|@OR zd`oowF|*Aln=stv`4L*FfHQ`k^0RIPrSIRhRVvnh&4Y9tXdVn6^g~W{P*QiAEkSco z{@qlmk-EXi;*BKg*#wIZ2v;P4)vQ!JmT!LP|GkvDUgzDadO8KLDGxVNihbr5U)!h!bqe9#-dgnlM#BGnL{e=Z7C8W(|tSF1E zQm5kVThXq9#&e3R!$dx+_;l$qw8PQfp3r_)1XtUY`b=UsxrnZ=hUvl`#=Xd02t&;C z)B2$##@~!&zd;(I!ap#QvoDU4Jm#eB=DiW$W;#?HEfk9B5nf*KmfR1JE1$;#L!3_K zR$HA5qpdX*V!#Z|`?DN(u+r{4@ain!JY7-{Z89Vb1x|$sc$|yqwOhaLN1v9szfZC{ zzA||rV~gO*a%Qg91(-w<3ZWIt<%TCvOvaMrA1r6POq_lwYznQrIXKbL;PvE_;lK7K2DcCDU%Pxg_`)Z{4mF(&q_Zy`V`xmtX5*(E22$6JOdCPUIPdW9@l z>U$}K@2|h}u}%NV$IeUOsusbuiJlp@gfUV!<4OSpGHQ3*9s)H#z1hi288NTQK~NZ) z{J|ef+E7%Krr00vrh+EcY*$L6)fcDQ#2KDU_SqsX?(@!9J(Pxh63pk5_Ww-pe#Mj> z&*Z)zFmF6l_l~QXA*Qh7pr3s!cFJYyqSFsPi?v9}IsQ9EsNtbkfi+tklKHTVTG^(a z$bi9@oVp)|+Ds=mjn`}FGy%XAW{;`9(5ccW!kOBTK$w97$&+29Yel3dm-%lIVe;*j z{x5H9g1t-16yF>eP49Fl)7v-kKcm(aOp8uT?xK~7eYQN}+O%xB;pLVG(vy)cfXX~ZbHx0t}|*htBef0Y~m9v{D_2dvKGfE_mUx$Mw;5b zU~F-{gs|4S!SG%xr&FQQjJE;>*1%&K?rTP^l?3i9uI6ZQ*VovgWCn@bWvAjvUD{l4 zN}KrgZRneM|8=w7p0t_LT?r%l#sUpG?u6%ipxNUU(cvIl>%ok3XP*j}l5%##pb2o`qAMV`ebG-Af| zhLqdokbAY`J}lsq0ZVDakTGR_Zk=lb-fVrM(&JrK-CZJ*d-lqgUherqu<5-7HHEXW z!tj_pzv7Oa+j$Dy$)ElQjt*4GD=GgX3Ym83Q~~YEA&_y|;{@Ct`f#@uG}N1sg?ch= z3EuQtonkSsfqf06yffvWFXnVvt&Lo%b^1bN`j4WM@A^y6IbFHE-Z1$PU{FQQg@3&_ zGh{w`V?-Tj0r6?0Buz{t5&P5Qk1I}{l$1#etBHXgb^$A)b)=fW9ZN&i8%39Si9+7#@gVl<9%J`*h7I5v}u@pD9;ttC#JM6X$d@-`z zjr3*y;#>-KF!}_$7fCb;L zKRAzapYS-Nf%%ib#3+O3VTT&AQ|)O1q8l(KEt_aO*y?-yt-oJ9!!kfX6k1WTE#XIx z^((fevPBZ$>K~LSV$W4VRTvv+ytH*tfvGjn_Pjg_Y}wQzVsrf<8nGqlj+Rz@a65sB zJYU7W+bd%9sJzDBaCL#6t5}Xm;K&Z|fFPlzCKTTy7B#V>(!rODW;a`MX%ClHlLOwSR6S=gTiJ|l#N6pozE&xcLZ^=ROx0Imtr)WL9O=%@ zPD4c?Ii-78lW7~bb!zW7Yw_Si+)4dRmrIP3hS6Hwk$o#z% zI(M`rJoRdkvP5Hs`Y;RQRhgeR91KUFP0_xhzXKsR3o^vYO;WEt;?RO;4=$|#=8!E06Te^@b?_71xfQYd z!M>{Il0PV0^)=wy-vYhX!B0zSY3Hz!fVP1Em`-km8VL7q?Bd3Mvx`j;j%m0gLP;O$ zr2?nU=Y@w6wI|9XMU*`|!6TNwg})iif$OhymL0E>5r_gETFw2yVpF;bhuIE1Nd)r3|?6ydTnL0X{N+T5dl5PFX6dEgYg zxSO3G$(~myjAzxMV!1Vk(`yrs7OiX_PTz1J#ARdkW?wj3Q(g&`aP#L>-Xe1RQ5n?g zTy}bJ9f?N$MUX+jE2C81BzDqqV}EhJfJm|U>}D)IZvtW|Ygik%y*N)zZkiArT5 z4TFFv?38}`6_>%90i45-2Axh7oc>PLyv*Qh!_cVOR5|k7%V#eyLJ~%5ObS(cw@#3C z2G#b5>z$#Tc^-NadeRU6LMUPL-y`^?Uz665y(fVD2?swh=kCNdNvTIRzK_w}Kitn<1CS6h1j*O;j%=kBSAub@xSkwKM8VnMP zE$N`zmQ?k={(fO&4M06EtKjOAF$rewm#6jz4w5SrA59ACWuL|9WfTq8V;)RhIai6{ zqe6Xh@cNtTk&mqK2iu0ZlOLI-X~NuJbk>Rwk-7x_HVH?U`opp98h;{6at^`}y&y}-{iaGDxHXB(b5j2SLr2$+(5PNEw;9Gh zX&~mMfqGSQJsm!N1wZ45&$_7FP*Ed&J_pSE%tZq2WW_4oSuwbI|Gw0em06nPoJ;_%qY=22(#meV@X5 zA}MQ_N8f^%k0+`%dt?^+J#~f6!eeUQWe00o<2LsPQpRgR=m!g{wN|j?_EyqPt3X9(X1a z!owW6wutk+vB9e-p9iO*lm`b3+r*zg{o;m*LX+tS;xRNgpLJ*=AFSZCcxJ>UHQ{R? z{0NVl^t9HRaV?QT@?8Yf0OyJyO$-(s4idgEG}#k0tj*=UQhvH{zkjk*crX}`0oHmf z>%!2iLIhGqc3;pVC=7H%?KzZED*<4mT7cvRDE?vsy5h$qHmU61RAuVC{X;}PeU9IZ zo8;_7ocBq;jnt>8QT@|n=70ia8me#n^q|M=fWcN)baOjrkA7>wZ1oWc!Yf0Xs8^E* zoJ>Lysn9{gc}6VnzxT9cdfMG&ARX18N9Y6!h%I}tDV7${C3ei$SU*Zx@ty@AOx;f8 zo4VduUGZ0ZhC-?)?Ai>J(7Vw=0^qR$=%0{lnGe=EU5;!sI2{pwlxZ`qt`Y=(Z--{t z=0s^29SrCf012jkj!?Gp8_VR*F&w5S(XK?5;t5Z(b0*=eaez#wT@46>lV!A<5u8cX z?Oj{9=`P$PuB{O^O+@e38CaaY3XVYfTNzzJ@XrMwnU)AdSrPj}+%Uz|8V_Sy@q0l* zF$Z9TV+bI4FlDQA0Nx5%4sLwhJP7-(YXyU9zkxd$H29QFp!S9WMBhU&^yF}Z_&mdg zi{E-w?IR^}Ywb`;l1objfeDLw8n1f?^UZt~Dm6XL1X&AS2V}y9V!T@-C~5_WP{1VX zQ0Ec~)drQ#vA9{#DrKk&))1t}d}c(sKEimdVS)0wg5~y6hr=xL2y*w-Umx!@xKHEx zQu8ygIrVF$*C_kJ`=KCfnIc@WK^WCPFm!A5f5^wG8zV5Hc6%fEh6U1q)_wu#Mw_e>Gf7QRPD{aB0i?FEMOBNFA@1%v3)KCl!F2S)|rL z4#XmdQ$^|0UYx^x8UIPjjIMv_SJ{phv6$X13bFk%6zE4!*iEvPHaEO^umeK_&&7#1 z9!Poq?{Lh;*q_Q)r+y7M(?>!VI2XiJucp}q)#AziRQxhTDLyoEML z2D$Rg$xeBqn{q|eWae!ErIbezLil*SGSlyIC#XCuB5VV%+G$5TIlhV9$3eb{27MLu zdNwSO4^sj|>;vAv4GtKl8^RyF;Ukd4`x?App7^5M{iVeO&BgP0axX!iDOXcTncCno zuSK{zTtZoy44uh-0@Qm#-r=7|E}iRSZkk#QyR_z&4JQU)C$+-u_@UEkAsqVY3Y*gi z+oQ20gcMUNoI;!EGssx%hVekm?UN5Xs(wW{j3HO#uVs)|16=@-PR|p9X3en;IIiAT7~kN@N)>Zjh(OHg?%j6`W8L1UxU-4 z%%)5Q@OFNjNd8LxpoDt#yu$T|)u*)Veb-|TN8A5dX2hL;08$1?{;^JCJwaeLL)3q3 z0sPc_aG!Ok;K7Mj&5rB}BC%Fj{EhiT*%w*-F{^v8a@p=+#G6ViEIDVkNADz3r^;}H zyN*Um-GM4`v45)Xv)%QMT!qGQIODSiHlGV%$y*sE`6@C=Fzxg``z7*5fwGT;nG|1N zA0;eY)=!7b%p|$NKwV3blWVbeX$qeirReh584?h7-5GfM{)}T84V~lD=x>KNML4T< zqMuB^smR;mcgn+Se{rKND*ibxXHL*o3$Pjz4YILYRU8-tr>ORPa@F&+_F}bp{|Vh$ zwI2Q9`UsobfsC8gjfnmH2>G;c|J##*Iw>+PZZx(rufX_umt&W<(SP^d_`iTIXLOxp zkDQS7Inppa>jKqXNae0QEvI@iY0M}HYmNcDt3=C_@>N?0;oF>^dvIe}?LnG8LX6b2 za7w1*Ju~v(dxgekslRE{%APplYqr7&SU;ilYqT2VEmO@<+h*IiS*d!y5AtKpDo&Ef z+ue>Zop3o|uDvOS3#Em_Sl${eiq{s2J%0|Ix5N7xem_cR&J7p;F+!* z*QZ}+ud1VA`E_!DsJrioJq_K0kp%k@9!bDnnU=Wq!W6eGL*eYrr$@8}&l|v*0D^o4 z@$UYkb_;A%_5V~1V>v)e!mJ*o>pjEy&q`iT zkF1e+vw$&GgSe2M^#?Dj+>&UxrqImJHzd3_PFUmGGPBiOw55m$Ue9Js|9kq)jX1QX zJ1T(l@@;=uT(4qdJp8&QS3CHYyN}Z?QiWC%pjcR&(QV5Vd#;opzp!7BnJYwx0Li$* ztteY-UhIW^vKiX{ z9Yv*qhe9?pYz$;pEu}2m0ngA*0ob2^jWCv;+sZIDz9Bdbxol6*7p^6J;q8Ry_;CMk zS}Rx5f-*(J4470a;7<@O&-+L-o#yt`(>V-u>#mu3DjujbmK|G#d3c^3g*whBzm#>7 zL)V)V@+-k*d76aA~!-Zv^)B=l7UY>OO9U>OBM&<=f zvhF67Hf~>8>rFrX;GmJz9-u37^ieF3g$5ZROAE;3$s|Q^D(udEI!wuLq~aF(Ud+;o z#&fK4v+>}7yVL+sF`G!hwCNPouT95a0k>#r_a#BF(2As`m71o^t}ejyXDXm8dA<^G zW&)t^dIp%pSjiVkv*+2bpAerbeFDHsmeCD1c_o&<$PC>F!jX|>Dwq0?--skaIqRG zfKzmbKa(VW>9&}zS-GyI|HKPg&lZp=PVSdd*v?8YzZVmTm`C@^5|S7SQ}B3W4I@Q| z-E#lcT~8+{7DyiuUHuTTlt?d?JSJ;CmG|*RrjABr{L!7htGp}pe;f||-$Ov9@b{?W z2colJe_@-m{NL@z@!E%b@22OpUHL5yRBf?Tu-$x3tx9cQ;+mc)WJf-9>mY2R$+!O8r|$dC)XO*^U?4DZ(*-yHAS>)TzHh4$+@(X&d$GY}My6s_Yx@Q9iob+8-(Do`skH;tJ7+FV%iy-oZzG zIm*r6@kG&K*J7=ras1Wsn;d6k7->a{SV_76n)2HR=dWUdC<+khVE_7f6M;zdyCc#V ziOG(LPQO5gdU&21oR9JE&Yfxd>^RBv6>V%I=fl&})6_hJUw7_i@|?1BT_Zv zb#z`w2~)-y+)o1Up$Q)BdWYvvcCV6}A>Q_2vJ%zLpL<@&D=vq@6{aun zdp^(d(Qmo9qcQ;XIz?NXhkd5No=@AkX`hIFF!!um>dl_Q^6sK{vj zy|ce=OYXTl<+lCwCFD^Z9f)6dPWhpR2Nl8i{1zHbI2X!UvW4 zQORG#mX7r1JvHd0{ssB<^@izmxQjU4eDoMFcfbZS^DZae??f!}PRDfW^4(FwMQ=~0 zaKZUSW6c#o<}y<99c!P`trbctsFm>Ewl!E7ZZn zu8g31UN-dZv5^P`Ld#8wNtMz_CEkRoG>agv_D8_c0_6?GN9{~cvN+JLldUDHqVt)| zr_^UgQc%plQbO_K&w-l8=2i+h&Ax+a56JcJ*sm&*C7^5(7KxO^K;si+ZW4fGq2i$3 zjp&P}CKQ>Fx%s@~R%tHx&{1Lh*rD~i!WhaAGgd7K zhw+;prgVGQ;nG_9nB&j%V~@^{xllS!@T`K7E!I1;F6ywtuOv^KIe zW~1@o6|SE>Bfp4$JI=da-rD*2L!(Z+j1j`jf57}kr`RU*4V@+ye&8lFbCgbf-50Ui z?+bRivCtZ+u|kWo{?4!TGrlNfgP7ZE0n;G(I|kG@>4r}t*FHaV2e+2<77JiS@WY7s z8MQgBqAPn|AZa$WYxu;YaiToP9euhfkTiG&S``hPP+T%RqgR|gzHGzNIF+fu6kW0^ zJgi*4Z05|y{l_a8;Dkv=1~u{iq?RH5QI5s^W`L=VGqiKcqhhIX>i#zJwG z-#jQSzi6QH>v3o49X=yDbv<7yxd**8}y~=)tTMg>pLOXMTzxK%>UwyKgG^xX& zkUc?-LB>4(;4T#fjX3g*A|N?f|BQ2NFfvn$J>QPWj8ho?DUxXl-gLeKq_qou!N`jW zdF0yH1+om!5p5t_fdIN;Vt!b*l&Ez)C(&|c63h*7*E@OzzdsicT8ej`EHz{?vTqIg zBq2vas%dX@bu-Z3dcR@x6-7G~AiS@FfznauU$)aVLEHHCZuFXh_0* zYM!L$l|ES#Yn1%Baa5oR%`Z0Fee1=@w4K2?-Y=IISqT4&tG57(qwDgAL-1h19fAjU*93xV zaCdii3-0bRxCD21*TLOwaCe)JXaBq3?(@Fqnm7z1$73UAWCU#i&9V^kkd62>`Ri5P;sW??v6!q2Aqd;#o0A%NtOdWWUAMP18rUwrbA_7 z3w4(HU&gaLpkOE0MDUxo&jF5-9b>X7Sd!CxCAzJK~-!+|y2j^ej zw*e=nb;|5gCL`Gizs8h|1{~;@e20I(J0D;X3;Ir;_}Ur(ur`@oE74B*-PvuqA0@a= zYyHVR{A3Y#8Z;^rB_&nY9?P_L2`-$$I77~*Qmi+41Wd-nXI?wZjxmlRw-HP+FIr5` zZ||mf#P|$^n@QXQ7m{pr=}KqaM=!RsRPe@uJTgqyjAsXXKj@c@yT4X0yM`|`0-yF4 z>?e=b!v|_XCOvr{8c8MRdmOfikazBZyZo1$ziHG+dV>dVrkA~rjKNg$=lH<-&Th>( zFCP=g7j~M#NaKE3>s@ybnBqv3^w)sT7ew0R>&ggqYfb!5V?AvoVRv!lkwL6s?uNcIbiVqqq80u%BFMJaGuQ z328LMLOq%JQpbRPFwMg}tx#+%;+FbY_{*z0MiDUq#rMF$nh}ytV}+YrI<+QOCDy=#rzi@hKs8&jz{lzJCklX|cW(%a zmD$Z&_W8wG9u4K0s*AmNnS-?Iqi6K2hYFM`XFz^=aWVBKC>p&wM?evZgwEPz{H9E zF^c;G5%xDDu(wXFW5&C!%uS)r+o20T@(FXBxruBJJ@Vzlp0`uh4c?Q=Wejbfk0 zvjqUb@;iRIamfe?>#YnyJf#682ZM~-H)7q_fZbM~vEC;OyRE&mdi#6um)8eUH#w;K z$ttM&^sL~UDQyr``X8@JH`&OdXX*b<%|8Nf%aS4os=mtESOC=Q^v>5}_L-NRcz82f zz(~BA7+96oEeb82wlJJ1^DMW$JVb;TappyQ|0ja--T;dX67Rr#oZ6CcO02&k23>U{ z+X?F8JY%2{b9S~_L`rtkhn@!V_{?^|y#ir(U7K}TB^|kdeRzxreQ|n$m+qaxH||Hf zqYrUK@PkPNGC#NeJaJX6(*`Soxs{#wkd=$sOgg#`w`8$euRp_h*ed0{sf2e?n!fsd z)`zuf5|ijN|86pI$W2CXkXomY7{jKkzs^;d0%_(^2J$11AgIAt_PW;@zZF(;aryy~ z8acpC$BZ2n5eZ(Rak-_b0q3~sMHKOF%wcP8-K&&XiVq|=quDAR4AXjZK0;6MF6@mp ztQmd=%W&`nTieu`UY_;g_mu#V;IM6`L+k=&K zi+fSy&Zx%;hK*)AHlZmi_hHpQu9=Ke-m$O zd5OcB5(L-n&NN7M>JAbl$GPHp#^R$p8Zywj)FBShshBA&?~3G8VO@$Y8jshgI@|eL zgBu?#V#Z)!{KVHbBii2ebk>l1Lyr+QR+?4+y8AW#oJ4*WmdNaTIro=Q%#CUvX;tDD z^C)Lm>JNFJgX?yLX^h*=I3SZMPbaIku<+IfwTc(2|LMH##tq2JgT$Y`wtofr;(l-- ztS*JhW2ntrlP^rj=xESI$;S45!e7zFE7gxOPfyM6leX^4NdFM)u3XS^xe_F7sALnm&f7C?}EDH(WjS z3A6YP4l#OuG$Quow%GZcHGjI%GZ%6_n# zgH28~xMR>Jr^0k?Gp|3|`zEd5$xZyz`YG}CV>0gPjW_i{eC-{c%!}i0?wHrX;WNA; zk6*3?14RsNj`CLsj|b&8s0VWbh%?2(8~oVguzv?_SD2p#>17%}sB?789MT?ip#8|g zFPTCQt@uEj%4^A?Vg-hM%0TX(8%da|bRc4miLoDCfZ^*&$E~a0TI9Ez!|cnUo@F-I zvfN`%uyLj47=c?`=7y{*%s5DRHF&3^ychIJ;M}kYtzyJIx5q~)IO?9c6N}9?QI$?Y z4qxx2k0*z`S_HXW=x!UM4~!8Ej3)lXQVifM+U`V&*&R}MtoGQ%OtfH!nx)jtb#KY6 z8k81)#=1TGq}VZ!JpC|2ak|t1OJ%!ELdy0vS}wbHvIo-{RzpAB$>o)@Y5*4Bo^>4B zw?ZRKmy_tnhRYk1k-To=SQ>)_XtsH?|2D1n#+B+ejY&P7tNxF%!y?x>J|~c9Ah^QQ z{`{DotHpzqiH)9S{%_b{Wtwg-=VOdDR_3zeX|vc4BYLmR0@P8pqLj%BiRCnA&-1B# zI1F>Vb&(wH`NuXpQMa`oFLTmrqjeIfz!X%+7l~@TMGSY=&^WJgd4cz|)0P+4^FG=c zi~*@6zcipVUDC;W`yEv6{5g%utIA`p2T1{IWXoTMOI(gneQ>WbuReA<3jfgP(qNPD z5ATP?7y<<&5uBfEb36^%?821?oM`o+q#=c?u z8KfE)eO$+#{IJXV!E5)K68*CuI7zlqS1gO!NK(CPT-HCaCEs6qAL1_WqS=1r##6*j ztUbzjJ@N}gx%XZz@`Dtr^{Yx4qASYw5+?he!zjPbYUpaMwqbid0J>St@3$^a!cBo^ zTNYZ7`INGO%D@;)w%iL752rJ{@(nvUb$sWa;2!TF??G}lK?t^5Q_!z7b>4b>wl6B^ ztGV(f37j`FXzfhIYaMO`8I53n)|9G`)i{G7k_E{@%jBS zrzVIAG4k~blD=I@BA%p4PefhDQkg#Ul_0B0M33OL1s|?_&pkF*sT^54UIB@N@6c5l za9We4W;! z;`!_BTS8auOVd-{QUkHT$D2PPnIG%$HMJP!{NQ<_tr-j?%v>piM!+pFuKSVTHlia5 zx0&%X;nM0SL6x0GSCO=>R_uCV!Cmg+>aOhLl3f#iJ>J@%;9zAgqZNn|mJhdz^o0xV zMf&XSYYGHrVIK5wWLg>n4HIBeiZQK{9`sX{*V8riyKala{*uL`7Ev1ricD;&WSTkSf|bf{?IC5=#)DO=f11GJ|G}HA|->NwvzUmGQiJn2kYabBsyrcmzn0#Z41hPK__6I`hQrC|H7wOfUyC>9(f>r&Uf$U1#7iC(AcQg zA`N|crGVt_T@-vDh{8!GZ##Wm5#zY*%-Vi*oxltI_QlXUIpx@2nwRL+kz$<@VCtMC z%wBSmqI!jwy$MUoZ?IBno&k?#G#{O|ticodK>fSxPcT@*cf&50uD(E;i|6F30aha} zIP9Vghz+b!72#L?pAzzaZIB+DzRKg8fhikq93zwbxw?Fa{;8d?GC)uH&akOPnIlO# zj?{WMdK2nouZO2O9vHqXQwd#hq_TK_8EmAcO@lSRHSVeEECtqkIL4A&esx%-wiPke z8+f8ySTx8MHHDmrN30in@(?x7ajwjkW7#$kMZ3{>^1fAKT-Y=4(eoz>-NyHp#IC-DGswc%ivvU#Q30jeBvGEwhI{zr1kOz z@?jxRsLmq7Yh+x=(CE8XCgJ7q^Oxw0jgUSnhONRX)W7U!q37#eVsV)!3bVq z18hZq~LRQNdYnN-6Niz&)F4 z^?D==n`3`|j9peaQ6;v}@@^xw*6SB8a$p!~16XT<|veOFX$!Am8Tb zx!8==F0tPU79+aObt6WBc($@Ky1K)akP5sMLpB85Z861DGF!sZZetg|;=}q1@tnXX z%?k1}CXzCWqw9H~N*c|6ag3|hh!nhdB$d-15y~J>i)r{Wkl}y-Yv(CJ{x_BKzr&dv z>YZK^fCmDRSe%<$7NEbYB{{e2wfHd=VH|T*{n6RRBh=GXk7K~hFSQW!{ z784Kog?)wLY4wuINT79ujX{G;h!+gyHog3gYfUV$*C!TZ4%+&u`(_$O=^%2sP-$+Z zy^wf-$!j;+*@h@dej-F=viV}te&$;gc76Q8^~zZr_xT;V(p}tJ$%~MRH&tNeIl18} zHi&GW&go}8Vq=LJnHf=RFHEOO_b+qg3sUXg^PAi!z+#7^T}3@ zr_*aBg)#f`a&+2CX*{!qreXUOiOT%tlzV;uMdBzO?{C~u6w=zyEoT`zRR0N9 zA@aV+!NV>Izxd90os>Iuv6h*Nj;psg+k+@B@)Zh9F{H)sp~2J1*A(4>j}?9_^wNpQ z7Sk5OMfsPv0HRLiq&o=KW||ScQ-?V4a{fo_R>~UryD3H9D=>Z9q7VA2+hfdE{$z+% zP|kFBEzLZ2)bAJG9q8FNn@QY2T{7^f=mDWw3Lw;aQECyhG-z}gXes4Kdq9FT#S0+kVD`$B3 zD(t(UMX?#)O>YdJ0QA!@fFRMc7oo7D0#E%8=NNwYW;vG%aoJC$u2tPtUx>_o!2c@QY!N8RbE1e zw~{omru6`pF)OQFkV)qAs{TfkwX&bF27)xbN|^IPVy?*VE2Jk_YcDXeEYgp=xWzS-LmT&cMZ0=6 zVJxNkkNP}6gm+`C$M?^+aEH+7w5Zazg=rL;;Uh1~L#~p2y=%6PlUp3)W-SXx4yigGshr1whg2OXiWc%zPx=wHXg~kqz|A@>P&5(Y>g(;z3j`cym=L}JvbP>>oCC8+Xq|8- z-dy{=1d<E81Vv^|torP8CvQuy)XIX@!|CB4$31qNYkK)qiFVgiex8udFPIUZ6)ASD> zOyY7I^x=V`>Rj&t^CpA<7;m0dlbLYvdup`kMK<}{8Jv| z1l>XbBWI>JO|P1kI_vmsdc=^Ei7ay`CmTaAgW`6k_ArF?Qjv@lM}qfkyQ1#orhU8|U%%0NMHJfqAjjNY?xmyUo3-d| z4Q3N66Y15;O+O40!JNcAPeaS#yw6uhN1MwJZFy|;%ZY{yS?;#jlXfv}T1>5Eby7#R z8^>8K*Nm1hfl$-;uOFGHC6(=pdq1tHueh|Bh0Z`doM2T@wI2v<=K&TSH?!!it$=YH zN@~s$Z6IObYw|{LwGkLKJ+I+$?Q95^!2EbT*Z3YTjHXi9)7$7NmxqO>3I;J8OJPGC z42GL7)_Df44kNGQMi#?}%QtmD?`)TwB@|`VTca0beJ<$tZ<}9I68n9GS?j`2S?yr~ zUPADunMV@RFKsK=v2b_vqgY0{c-mZ^K#JBvwHCX~GK!a9v2VX7apvegj4&ho8f7Iz z?tVIW#T4{AdJUC6{C5%ezYp(%aNi-$z=l}~TvFC_VAc^IJiY+c)6MJ4_(?|;xk~+F zkDxRaz8T56lc9+jLRPgv`psffF3UNTQ&tOdklfjjXc7X8?sRMUiwpvX)oj*>G!T@V zFw|n)1LvdK0(a9$FrYgrAjO70diN}gY^4w!+=6d+T2*}X;#W?EaGvKp>&0`O~YgFN5i<=7E#-G=$mUur$h z+pSi7DS>=;zEV~az2*Ybs`dWB7N6fJSRAZ+l@`{`r&aifp|%^(?1HW7>&F;HALgM( z2(sG)pjyyIsLW0o=H9Om5Q;lE8%dX|B?9LKi_o)sW+yHO_S7UN8Ztfxj)T zBRv^ylR*)yVm-j}!yEu$GeJ3_010=ejVrLt1gS>Nc)Laf(d1jbjm!-91 zUN-%!oii@$kxi#dq$o0sw#}6x1;W2f+8Z7OH0x}Ho96_0j;QWftKjj#56_wdg(DCA z9b2IF&5%R?qdMQ)lrj&fGr8>+c8IBqiurA3gbZF?9c@;C!OQ85PflEV)5A#2*PElb zyy|RIYUyS*lZ|OKFMF{VvTI_qCA=7eJw^IPbknpPphhmo zVKV;CPsPYbNG}rPG4s5JYNPc=i!F;k5v(jVGt7lu#(p|jdXsFI)_;561-uDzJ>AS) z+>Lxw6$8=oa+NEq7ctJI=BsAsS#F8BKt=I*lLzowyX!s6FVM<|MtRB{Y&H?*^W+S6 zKBu@yN)(dJQfsV$;Z(W_p6Vzj*XO*k@Lz~#nO&%a4kYcY$hx(5B0jH7Lt z;Sv#J;rWlJ`|B}YfKw@7DaoJB9=E#XPSv>4k$#UY4ZRJ*#;b1|dj8q9|Grphl|DflfK^10&Sx;brJS#Ll$= zMGDXXdSXlCV!SEeCX3&h4d(-!ub0|utX6V(LFcWaU5`*{i;i-6enkoRea>~F)mokH zk13#N~vx7ZQ#-MPQV{v7(ut)-sKC?rZgdn5Mf|7Z^#j&?IXlUZHNe8 zy*pVNpuWt|F7Ov=Wyn&S`M01rA4%qU?h?w@z0~gZ1#pU_yY3!%!U4D;&4cn30ixdI0 z?+$qUuoYeOs?le?e3S=P{8m_1+!CT|_`euAO2t=*-qfM|*LJjx2Z_+JMm3aE=-&~C^ zmAql4<8>^t8sW*+F06eZZYn9PR=XHD?{!MH@q0yl7Oomue*V zR{>|t1kq&rk&If$EDlkvi_D)xEO6M9Mjr+zd zbNBWaQ^XXO{o5islFuW9vnV(vjI7@50@g-c+T{6mULcS0WM(pa7viz}rn6nRNIU?n z7DpkzqcKZvYSt{DBB897U83y!@6H#0^l6wA{uUS!cZb5bH`SIWp>wW>(030;=8naj z7{VBaw0=?O!hS|!iN=erK+|$cw9E zcG}ObPWPFQ-4X}e-6j;NomvR8iAz`E^f_VRD|Gm z&X|(vkyse6s;gGbNSwJ$da~))A^Q3~&CbTwxOZUu zCz!7EGad!P=+g*4(P~qQCvsNtgw#dzejg8NfXjsb%e8uebO&pSxI~$+1K-0Q@h4gO z^llh+dbN(TDc1$6^O6LxB=+_IdKp#C4^*i}T&^ta+Y|g3PU@#(X@4>&M5oRk_uaw* zZYd8e%c-y~!VVeYj;*vvH(7uG62hlHrntj@K5Rvmp^X%t&NSi(?STc0E61 z)q64}BT2fbsC?d-FJ`HEU`-^f#L@l+gM->;@MqME{aXvjVK*%_^BboZm1?x{Iv-{E ziVqjcnuDx-|5d%fIJ~wcvv6y<6|WgjOy=5Qj-m#+gcs>aPi`}uHA@! z-f|YeD;rwVIzdRZ*_znL<2_UvFmwqVLn$O+7&N5bI3ZUSN(g93hYN;*4O@0|)4Gxt zm<h>x=x|+Iy2X*qS zg3R(RquE4T?oeLFj!;^xe%E1B54mFa?EuHN-UD!!7 zrcq#Ni=%5)f(Pq(52yY#3gn4H#JK^5ra!IvrWaTB(V6>`@-g%yQ0n*ct32 zwOH@!OW;+tb9yK`uI?isCF*sN~M1F`U6`9mim8)t^aRU6~h(G01p!C zAC#!xQc$P12L!te4>H}C>;<6icwYT%BR&X-Z?JY>WKjC%L}Hupr5%%nI!xB53Urh) zCNoe6%@w=Si;>geketMWVzEt`EElw1pHmn;qO&3;3|B}1K_s`EbI_}XC+%seT?W|K z^~6CVDuxPmInXl&vQcx_0HZ%0cT>IRx1zXY zrq+S3@%tK%3ZZk@640-~S7(+YFJC>lhp1E%uie2ukGng-?uc0kc93-k2HK}aoq<_`F0uD@!+k2G`o_OO+!K4mq43F~?*zcA%5E** zY1+*%AYlcK<~W5J4BHYQwhhJTS>F&8wMgYFKXjVS=it$?rj6na(4nzIG&|mZ^3iO< z1lf+gUynw*K!zBu1x9~QRqxC=wLjAH{m_F?X!==$A9F0~7D`lUFbS-QdzgnYp5+nm z?L+JrNK0nxS<~4{qVt|=yZbNicoYrTNO?rh81=UQOVonDf=GZ=RwI~Ww&0}TDUH1o zx-JFM-@Yaso@+)KRF4u~H(1sq$DSH0={keQ?c;m(p!~cZ9OFOVZBi|5OMtn#eePQA zwc$ng^!7Ep={P0~y<)0b1eyDu9h?56qi(eO4fc+o=8s!qcB1?hRpDf(;H&epVR^Z* zX+pIQ9~7eI6Fq&KNG0`%T#41n4-Ny1LjkA1(L;xOKNA=MWWyh9Dh_{3%t3%Isw8G| z^H>EDW}Xq@ySq5wwY$genQA;9Nry`T%PD5hW-*WD>~M4mJSXIG8o;U_{-n5DKLu6n zZ#?qXY`K)eLiu~J^ikwj%cIRm&>eaVZwQlab9volq<{WU@87maDu3=S0m{yBf@;0}XqsY8`z`OPcI1= zwIRR43m3392}ScI4_l_;<4)w;E^T(iV*%fFBGetR^tybZBLOTQ{bW=k(F%T#RJ1+x z&8BjCGAt0d{CtT^&}VPwMht-bB#f{(nglIca}lhY#n*onPQVIFd(ZcytOuRR+({K< z%m5EG$o*W#6Zq`8wO?oIw!IhpNe0iTf$mTI00PO2Cg;lZCROj}j_sWtBv3J^#sokX zu4Em|?2V2(wbvc0C&<-V--iH;{RkUOlH?EXu*Vpg7j#3CABU_XdtY7W2@mws3p|?X z9w+7ff&_F@V8yq%0TrQV}!l~*oJsOgs$W`k>^ z$P-2Ew3*<(`DxVL(peI*N-c5IOARe|=$Cn73rKrskijxa*Oq!RM=F>5=hg*A3@N6l zmp2q2Ox(yJl#8B)9|1opB=g+-$4Q|&()wWm9EHepfiT@8J;;qHR+o=K=w3!04J+a} zIK^V;UO@`AM(`NaiP&kgLzuw4Dmhd+@9mxAFN_bSOHJE|zd@ybI%F&P@&{%$H0rx^ z(A(F^1F21V(6XK55taFa`bJ%&Q|??_g%QN0PUa-)=EHHkD0K8U-x7kT5qFHK^KMHg zQW;4XbYe(x*gQTDzFund&-8$h>*0xXD3vHS^S2%j&Ad;UD# z)LhwfAjUFBzU9)?rXCMa6Rz2u#_(KR6ldosR4I^-M~MXS;pX>uEIK0DclsM6fObJy z93`9HIOU=bLPuIy_MJD+ZF%>kY~AR{K)_B=9q()3ZxY4xKi?I~nH)^M9$buHusbq_ z9Z4-c7V|r#n1AmwR*qYygMg4>z(PGmF%7|XiW8**udyL(lT4fpERA6q%(Kh{oH zxYy$Aqd6miRKEq?S0JU6DZ~fBPc!|2U?oll?8p>-$dbkFm;wTXaVQC)_6HfREXBhX zN}qPs855|-#ML*V%kG(t-LIEhkY@W`snI9O>bFlJ7tMVypg^*zphztbrKg9AnE5n{gDTo8+l$Ffbn8eV@=VPesX z_=h2|A?Rbv#EkXe=)Bc&vgo${Os(y0$ms>Q9W^n#l-JCS@UfR^{SIb5>V3S;OUyyu zo>R_voTIW)3`&KF&9!kYP#4jp790mg@2XrkI_g~?e0?zr6IlFpRx=eVFG@c2feASl zU4|Hp6JrqwF`}sQ@GRZzOL_9NG)s{Mf2F3vvaKQ4Vwk(*F^wS_Vycxj#)}KYxdXNA&n~fl;93K_&e#%8q8qSKwz|=fkFGAPJZPMxrNd@#V75W%<`oIK~AYO-BYybrXs?--^8=q`Xq?) zsb3P@JkGQ+H(m2LXJ?LPD5DAZ(pd{V%f(1xz*WF2o+4r00jIUM5>L$8>C6QD+4H9N zokQe#bCq)FAqk06)1SL>9S0ZZA{iN+ZoB7(JSS}yo$nt#M6T~ETTZ_fHvH-bTNOS6 zpvBXQ0qe>PZ}u3)@9o(rYiReVh&Rncg(}*qjHV=wgq@)&XYHHRmcA1^yYI*9K_eG{ zekaoRWMbE-7%mNBAc_C&d#KxcXjz%?f2HZBU2a#u9S$Mn2EgTQ8(=ylsYU|Q z-W1`TtP`|mw}Q=G)lT&yD{28F)ic9ZjpILsmU{&{bmXEhn|B$#=qTCOx3=iZm4>t&G7pQPi#TDt&6jsa&(Cri{}R{hH8 zh4g3ino~GHU9BV(Y9prE#)%UvMHE`*MP|!f(-me^#@j4arz9e+fnhxDC0WjNb`(>+ z@`|bQRT1x7J5k$UfbS&1D9ynKPsTd3+Ql%Zi4bKn>syvkoBcwgl{yzPa?f?C!dl_04{dMB9z=9y*E*v%5m(-V$Cs$N%vH z=vhT^^LTS|Ia-Nvbk$8Z`HXt&?Ub2N$ZAvSq21yImU#}x!9d0PUi6I5UG8ptF!o!P z?`GjLo^332-JjKDH15{JDP8e}qo~lB9yy~W1<0qkpeu)cH1>h$Zu?b|HNU&nU+9hB z2?Fal|7eMS1!Ql~WwDM;*PAIutJ!BFXZ!5fW6^x})w0*dJ|7L}nlLUqR#wlQ7GhZT zCL7e1*&QIe=2hm6!0kHYN`$rTC;%)<5oz*8Rp2{=>Nn-4xN|!wnBNX>Meog&vV$yK z_ImkM(Xg%rP>7r{|F6jLA11Y~m{5}4yK6kWpcm!b&eO0{+cpP?pNG|@m++`;Q+-1~+7M-@bX zJatTGByIKzSy~!HFD%HFzW4Q)CaO*z?Vv1y|HUZikAg!~&qi*6aQE1l+Vw08m%38V zR-CXriK+)Rk|!tvaEqkf%$O}0$HMN(+1#eKyG87~Gj6Q8_gW;N(P$GPnVc3!l0gxU z!4ST-14z=~`t{7;UCUipec1E{H@xz7*HpS^$!0$8#|6pYr%c`YuY&Yy&}Xh$Q@%o~ zsY_Xd#Jsko+DR^ltOfL=APHg=TWbFspHR#QkpBEaNXe>ofg7Mf?1u{}2FkN{BR`H5LYC+iGc3`QkBUSlby^a1EdO*tvQSk|fC7c@fsteQ z!<>Y-;=#)e^(e{;J7Hx`3hSdMLazAD(J+OFeIfM+F{b?L>l zwH~h$Ln`;iX1^V=X1RI|PFbT`zQc(R4u2lmD5ya^r=PPlL1|O*UlbF0sQ0e_f*~zC zcE^)f;2xizdNzlkQx>Xv|2p&9^?kH=dXDa%>IsmaWWi_oaO*+8CVzD4;HhhxK&8{} z{M>eWV~~^Di}K!Cp4!`Wy*v}N^)BdHV=&LL=D7H+qZA8zbV2CF##^CIngkVd!y54` zs~+JW0$YQ(ouJ3JSWdiq;xN>J^lzJDczTvUwe9UszLY<&;$p6#bjLPse#>|JJixd+ z5J_1GfbMMmW6kTbGh9dohuY`ttq9#G9@?eBRS<~KdRhNnp^2Tx^_bj1Xz=?5l{t54 zou|(rqor#frzScg+Qx(>mcN9OAuN6SxpwzpQrFfnKNXJ2BvjFe-|u<6T*Vj4@2XrU zWfuckVX}N9=Y+ap%TkNI+`^8LxU63${)ShRSe6itP<_Qc@Lz>p)SF-79sXTR3p0{U ze>++(NNqYhRO0K`)n7teX{8Y`f6}-c9wK<`gzwiwil|HzD~5P6up|JnS@u}DeRIg= z^R$m`R`ks`Rbm4UH`nf)sx+a*B~Ts^P#)jr6G2G?fc)6p4H7aqpAjo=Bpv1 z)A!x)M9)vc*lAzPXYs=GHSq#1Npq#jf_|i$FxupK=Ts3^{(1XT9AixCGL=W&+p)$96JM8ue65PgZgS)Ie;NTr zH%O6sqe-{#5?3H2FLD~5G4gJu6tjDXi;Ddk84aBQ>SXp<;eJ@>{5KM* zs(`XP1}m)_e4V0-sC$0ce3j)jCU^SpRNIMhKEP_vMTS#e5BN$(LUPfLuXKO@{#qay z&L-oPT(An3&Em-`3Z;5xNRx0k``Laua}KT5C#|2xNK99l`$PJGYCwJyTlW6m&gI6R zG6|w~e}u~9sYQ=+XZfV#mrN?o32)EFG^PN_jNZ2jR2HMjV6E$paV(Sgv!uY9;SC@A zT^yCyb}ZEZ$9yR;#Nn;Ax)F1NYzA@rwv%0J2Rgp0ZHVW=#Z;*)rxG>!ghYH=VG2fc zm`@1aqr?rID(9PrdEA1~&hnJ}9xGulMK&2J*?>R~; z496&zeUchUvq0*A_8X53T)P$lMNy%IWGY)7(^%wMvO7f)Yd2T6cVUcqmFZ%&$Nrbo z>M)_3^8luD8A3Hl$lyiGv_rnRkbm=+KF%(KKP1K*IgemZG~7!xkZjff5dzwMLgtoO}o_hRI;B`pUF`r+Ab0bhj9FWSAVG|t~BZQECmBN;U9)l>`&q# zrEgS(+>c-^uXtu@!@iP2651V|LThzBplO6*LP7?Vkm(MYEs$a=6w4A~q}1->pq_eD zrSOxGW?I1)$bNgzYi&E;RWtY?jUVzF9rJcuP-N%E7%yu+&+P2+kqUe3dwbw7SUo{z zA7DdIGuYzOzn=tH^s}VU3umRq zmnd~zA}kD?(5cJel8n8@Voa*85_~pDL&emzGz#vz{kto`eU+Oebo181mG)BY1%?xa zMviL?LrF5DKDfSrFdrT%_;I(ddkIQ(hY;%`%FKZ3F~+M1!6WR${!4!y5F_7cLVJ6* z09A-!^!l2{3r5YZD$a#R@MkNGg`!6v`5Hg7Wn{y2O!}1o2Lm(zIY6<4$6r9ynN+Kg znJqec+V^ZDseDd|b4}n4w()}Zqm>xaRD&k=TrAoy$^>47NhSKhAaVce-OAf4m}Lo~ z&Bdy(_3c5SApTL>#XS7_SFMP#(EBQ=Xt3&EFlr8*7yLg%fHpVJE*S5pBXZN5(8hVi z!bTXLSQ=`;osdBB5SIc*A*w`h>fN_|*upT0zQ!EO5xRdI;{yjMswzJ3K#^xdevl;I zt=r*Pe!ipa%hRRctofeTjxkP>7{lfuIP^5&KWu-sikvZ)%Gaxmym}@oI#r%u{x@F< z!ynA>@g9D2UAz|*tQ)?RA34u8O2>KAHH|48{wc*_Aa2IxWR9qBV0Ak!dQQzk=^PKH z{T#QgZ%|#8RL ztolm``!w3K>0+zLkyB)JSRf*_Xr8pfT_CYc4T9o}$wH*zJdHA50Xvh>O1*gwTQpP;#iM_Au}X0gP?1aqPyZ3~ z=bW3`ZWBON?``7-E}pKwB;=!`;KaDT$s@$FexkQX)@iP|87Uha5|VJBD0&F$K5yx) z(oaBKA5*n2y%9$;a;SahmATGgRapvLK6rSu1oR9}x|7IyF4F?Y3IEJe{+BlNf9~7z zpQlh|6BPOp7f{bny|e3tl{O5Q*GM92(N^ws3OPbLd&8L!qJP&I!pe$;-KrVa z2(q@0_ff1h6phBxU|SrF{7a`ns1fm8@=X1xzVzy$En`hvjkZFGcHw!OdBP!$X4)LE zGvV!tiDG}1N=1S{*OMj(1;g|$0wEcqk(-@|U|jB_;ZP8S<%Pce6p@ceKAOottNzeF zuw(2y77F8Wzt0NP!(HA7H^o$)T1Q1H_8Xu+^^)~M%+giY*NY${7@~#?-f;<>(akUa zd++~0oIU)1Vv@Zg-{?!wQ)}P-q%pzs?zhSHW)xd%nh`FkAUo6_Kgr;P4bkUbI}x65 zkHW%(>Uu9}$Mc@kV~>Y=;fU9`T9Hpa%np#+zEVd?N9?d`=Gks;JT`g>;P}e?Bxk?$ zaD$Lart4V|ob2Xg!eA@&W9S>yTic@ip1G{lUr~U%;44FQq&>ZCx($bb6x7v4?A5Wh z{kTIiN%)SBUA568r7H*(BAO!}Adt#qjOJ6KeK{LZr@=^|TZh$U`-b2Wk6WC8AvOBG z7{jU9(1^pAL%gCij1*8H%V91)*YqD#{=dcN{vVd?Q1vLi_teOwY(Ju#t6k_9Y?osk z8MC$+lU#mvnnwmFIdmT!n`gGZs?|C<4aeT$BWCeo3O{rHoOFT#6t0~sjz2nPvG!km zNoGim0yJd?y6PDzA*;4V?lW~;(1;>iKub9WRQ)ASPS;z~T8g2FNHzecdBP!biY_?N zGdb*t-`c9ftIy*4^rxRl1(2cQ5x_BGd-65EqcmP{`=Mi3poY)ju(^L3M6`ZAdPo|C z^rhDhRz7j*8yrILf%pM8D{8ukX~XlCRb=?&QAXz@pRD|*u&D4KndX1pzIfut97Y?s z!4))uA_6>PN`dGGv!xg=XYul+1CVie48B{WE^nt`j#Xa`TZ@Ps8_6F{JFhuOr*lD3 zuw-~C?*L87llP?D*ET}Z>JALvP)}4}Z$l15QtW37jA}R~jb>85P(C>U_9N2uI5XE9 zaUxQzyIYEV;5e&=_Dw!4_r~SDbPK=Ec!v_NtdgZf1N47%x?NquxKwKYf4sd_P@GZI zH5xp4aF;+ra2?#;g1aZUySoJU;1FO2cXxLP?(XjH`cK~P{HN+m-a413>baSVnVM(s z-n&L)J1)O?%r( zejJbiYs-xbp=;{rzPA32s{iw@_hf$zTC}3yK_Qa+mf;!AA<5FW)&{x;c4xxm7ZydN zMQ911TBYeLVac)^3dBcE+*C|~O!z4#YeyW+3dV@brm($;!%{V9Ph#ReIhH;B5x=AN zllw3dq@{M=*SP#x7-CMMu@QQiz7)Ml@15)VRH=5f+b$S*ZczPWMcHchIq|cDZpCwj zQWQ%()$-v3rvk3jsqw!1G#PfUD{L|5*qJ30cnVDUs}09pitANL>fl;ir-gD}C2h^e zz2Cq2{u%f=Fyk@me^pfeKkbP+_Wn~1EXf^faR6y~aTEY_Yl9>4)+c_@SKUaO{xAYeV z$)v+J(s)OAb_Zu0yf91H#P1&c%A{OT!v?lpg<%Mk`~s=ct@)6y5$67?RciY3T6#_H!8aUA z`@NUmyzSo(qgVWL*ISRL4k)W{>kcKHbP$>sEoixFWTcRPJ?KEJIFAlooUpf6cEn6@ zm(;HT@UB^K#%;V9Q@WPJ2?Eeg_JA^-aG|P2NS923cseauKH}a=`=VsuMVElD63y;t z{leXLT+}FHxj58w)*-M?MW7s$3-HcPq172t8T(wa&K`3{f32pGsy&FD%aiF#j=GWW zs@6;_m%|rvKTu_%>g^g z6a`GSL3))fT~T2oHt%M3R;;AdJsfP8@WJ&v&nn0o4Q;==TY8%D?KU6VyR4c|e=l!z zG5F7-I&Hh{$)scBm!dQk$a`KNj65)oAjTa~tV| z4OT;{rGJX)*LY4&eD}mnjYW=D({w|+6RG2<)VgMqGSS&lm zWyo=xO~uwu;Zpz7rJgvcHzznz$8q-R!YTE^tP=OTy<+E7H-T;$=M<(J?KNRVWoWEg zRsDPvOLy0coqSX0)DnR;&&-rQ^Z5faNWlWMXIEuwdQc$WIkN&qd`dV-bt+ETkL8FK z%;a-xa_{`$E@EuDeyBwBB6jt{~$ zxfJ?=erCK%hNi|8b0ECep&{23@`TGtC>T3o-5fcNBbm{3CZxGf2_u@YXEbsN&7BWD zMLTu3=S=IX@jXnucxfNqg<&>qfmSv3AnZdo?-yT|{Ev?PY{&CNVGUnd!G&yM=9&~) zThc(Kc0SkF#zP6NM$|XU#lW~EeNZjsx*mu^)6o_GBz0C)!>67M$?81?%F*P{O zo>84P|7~SDwJ^@^W@WtTA|;#|nnb^Tov&(N!;yOHPPozB*+#J$i#P`qOvK1L>**hF z38Frh7I&^J;E$$!a5DwR7|Y?+p@cDZ1%(qiWGr0x)~sCLJI*^z6-MFJ$YnRj+g9E$ zkwk?IxPH2DcOuTs6bMiy8u#lz`kk7vakis*4Uw%891b`8{q|U{(sJs%p(){p(jgVJ zoCorjcFLlT@ko%0tx?4xA!*q^xJ(k60b*Xx3WD1y`*dV6Vm*B7{FK0!hd#4OE5aSeWds9D)IYWAaqYp??;Vp zB_t~^#m+B4Oi0KUkRxe~5=(@;8KD@m1vILOKLr&6Xld^I0*Q~o+=Rn`{L#C$NlKi3 zC?66RQ~kIY+ZzPM4d}pP?!%h>2=9QPET!sH%7ZfhY$|N9;t1yo`z>%NR8nG0Ubq`e zMN`KKDV-7Wgu3_pRw^ivCvM@T3Jqr0mXwEK2xP&2m_3MA|A z5RN9e@T9Vi({n_Dg`$ErAj%*C!{+*{yy1O)Pr*d?M5`Ne!&OZew_8u}Yg8lu^N)l# zT_Hs&E@mgf$>auiuxD#w3dkSed;3(CH?SMgtA8Bvb%&7&3cpN(#V$tm;9$a)wSnK0 z-li+e>5Ys`0hQql-F9>QVsoK7WF`%Cc@eyZQOaaEj2m%0D%VllytBnQqT12%(RXlZkIOnfRlqT20) z!@PQ(&zaBIF;&3BYu*0d)-;pX=LqmyM7;aU}vb$^BU&*GScDD_TH;|kz(n77Z zfbEEPB|h?ghQCUD2@O!o0y!yzgv3|DeG@ds1T9GH>ByM*ijo+PtnYZ1aG2A2J*))#<9)eEt~2ipQDf~|v7ukj{9 z)_7Zq{#7^r^ZMSnW8@=c4b;I_)iyTEl)gV(`!a1kd*1^uN{b7N}1m z8m|&=45kR*pUm-BSRX;;@WI#64{(E;t*1x|$ zhg`mN4S}Qx$N(IEm1?XB&Wkz_l=_^Mq%mYK9~jimlquVjC+{u>Yjn=k()ca~N%Hz^ zta`r@JVW_(AlFP@aq6_&?BO;${%7GA-8h<~?@T|r9uTY^53mlk$HH9`$u3$r&6&@g zG#lT@%p%1m%~+`DIAfz;UXYQ^wE{6^`8?a+UD{qzycH3dEg$nA+!yLxzuvEIJGAN{ z9KKF2)ts5*r%px2iL-zkWvMT@6xlBBmI(ShJeB}{COFX zQVwfG1>)yOizIG)R;iKhgToTp^HW-;lNIzi${a=ys>gyXIH&oJY$I;C*Ia`V(x7C6 zqk{xNqH>a>!vh%zKfmi?M1%dg(qJk!6&W$vjRAdN2r~cVIGZZ>k-D$8#a*68JMo~y z>aQ!D-*VP08$GHBco(z}?oH_ZM%z1jN#GWL@v=PyXKr6lYooE_qGYY~1h$%H$n@C7 zwcfM`>#`Kyo+7)*Fdo`iOxV9uco`$4ueK-VA$x`mHgPRP)#BUxinSkyehOGYHm0w7 z;>&;WXjOb@`5PRCbZmX{?LW{DzpBOGL{S>J&v1s<J)z;1o5sO?GVE!j|HCLLI@K z&G2-9gcFs)XLW!V{bW)24e$*VUrn%ID_HuQm7Bw!P9d1Wv*K6uTr4Y86`ww8De!_9 zKW0*+_hSrehmt~nr_K1Tk{w}H?|3Te8Ze(GF(0UyOIhHkl=|#5bRbmJ7IRxQ>Z0`q zAlP3n+W~;ope`L8K&eD}KoWMsJj_U@BlpP4n#(Q8=}+TnJS|e=F)`8mrk1-md~2!% z&TtxD`AbjO=)fI9>k!_a{>9)FAh2)cAa2cBK|g5JnrHrW9RZ7c# z-8&!VYIoui>uo$V!MK84+DTl-#3IyC-OiGFZ`=nd#+4FoEQz}gZ(@s zfYG&pyZ1Ao>=_{!Q^&c%`6>?Q(Hdec4cZhYD$qZJm@~jxs*y0@HLM(g^2+~a zswClS9F{$E)+#c>lh9M_Y^1`oLR~Q-K5-dSsjQa!>1Z{5);G(s83ee?WSPsK+ClZh zym^}Ppy`sZ8YnZXPHsS#kuftXjE^m(`#`qBgb7EM&*u3RN-JrzFd7)PoJqo-nKyhi z^;BX~6T`+ETBgkX1#pMKM+~e(D*y z=yh?*^qi2{X)$c0D`T#k5y%#q z{y?L%$;j5OGh29Fk1~Eb<>fOa9n1c&D;4RG6@UbAM+84q33LPqK>`c_rN5XG@fQNW z*CSb4X5uC7v4o|z9;ELCm&>sXlR&C?8WYmZd!<|ler=>dAP+yqr%PiEGEb7XEy&NZ z?mU}Z9Z+W88K$BO6T3{$_C1sFFN5sARFBv)?-+8+n=)pNSvyVKiY?c>yDrv{Q$s1q zQs7N<=uN_CbYt5*sP&mND$}IpDQU`|wchF`45C8@?V0bW#tBY@QM9)E>D%Y+Vsi{x zfoDShfF%zp`h$!r}U;K!{`uR2!gKAdB!`Wc6S*7BmWA8#QROR-DYMJ5<{ z0;^HV{SsLMry6%iQ)^$hZ>ZuPgn>^)KqK8%86|P!laNAicf{3)lYtxah9?-^jyna^ zao41oY3bno?n2>0p}?x?+lOzOm3x@EYm5a=dR4c3GgIGn$4biGV4RX|9Q{$v4cbrSeflF8Cr2 zv&1={j!=HN$T_mKEqRVfe0lZGgazDGm0Be^wX5VaO(jkA0#<8l$J?+B<@xnogF>(}NGIy& z*t}}ZkT5d`V&!8?BC%Ad&2^0`;q`BAfj`Hr^UQ>oVw0qAUtrk}v3SB!MDpz3X`X*I z7RI;AyrbsTp{qA975KA>PyVg3{AmBauU{AV-lJLG@p~-{913!rfTJ|jc0oz7r|xy# zc*GmpSEN?cwaZPu!9D7Z@r+6wCKPt^;cZZ(lCtut7_Tx9zz6+`gW3-r&cUl$sc5o`4!(vFJ zG_4}j*OLvf(AT@%ENWQf?Vq}QQA2<>#jcK>wnM7{>$|S?gDI{bzpPEA^fV)eBHZj0 zma`-t=PIYO#t%t4)3TZ9OnVcDTf_8iW;xMvbXF5v@q(MW4R+Ob!5a`Ip{rpW3W>4%|*@kQG)4=b%R5_&;A4c}=3`OWy`FDoH=g_^dTI)*ccQ!$ z&N!bL{=HmZvmsGtk2c3eh*2dw^an&dh0r(2P}Y|?9nP;Zo4*568rc&jY%Cf3q7HwM z$}z;#svvy!THcU=?fJDF$k6@>%U`}6lmlRm{|q^9wf;T=_ma$9jS?OwkUZiMNt>$* z9?|C!-p7hR2&o#8rbCugE@4zvDlB&+&;O|KsjGg(v%+_;ceE4+p8xLmd<& zpDUngnhFNTg&!I^{zrK*I-xD*?|=x&`R9c%_up<&-!rUX$E_s?N%E$3%*nv34==mW z{WM_s_B0lL-d}VxK4%&HaqG+G0(R|sp?zDnw>r|H~UQpl6GsvFWA2!U; zaYQ1V$KOcLGw>{_q`NWRQGMC#V%it&Juxe$OmTuAcQh#FY>ej1-M5RZUiCI-9DR(b z*eiytjyzr_^I_9#b5+D0X}GK$8?@CszYIz?WzkK#FaMqy*AIUJ&i6ZMHL>omde7%m zp3jY}m@Ue)`kkTM!UpPW9t#9-D6S%9@^gJ99mAEW+2ixAyk3_)+Y0l)# zeF)x0YArQOsOOhTKv=IMT%Wt3#8#lw8k* zS-W`Km5dfJ-XQf3sJ6AZz-T$&f(7niExf=r!t`yM!_ib5k&n2D8zoC|%zEr{#ST+p zg0qdsNn<0_x8vQtn~GQ_ngOG^2+qEc+PBr`+4jH@dnB?hG%^@DEs~XgpPe3CNxspBTQ+5eOZw}Ww=6s?c<<&u6ee1x^ zqT=yX2s`sy3nO9*3RvFYJ;GY?(lfj}U+EYy`HM>ce*-p_Ha(JKubfpIjoUQ<8_hE!7OMX?r3t0Z%QuQxfV$ z-UC=M;(j0@LCx%2VKVEI-k$&he99#B@t1NJggUl7IH7+9TY+%CUt9$T7yAd0B3|RO zNMWc)9ArwXs0`~mwq8$OWq;Cw%W7sy?I0mQqIYP&FUXlYuG%c$~jIB zx7mI6Bd;8_-KwBBp{QBZY#fnvH{a=L z$#|I5Hdpio(a~Ob$JYATMvH~baVu$G2(3;m-uIA zlk+=p83ip2gLyq}DcE?i7$<~$uP_m8Tu1zz{5;Z+RA^M%2ife&_bk{^AVQ#WV^@Dt z{z_3?s0}KeNg<2NNiJ^7DkV*}DV}AQ^0IpkWdOHF26y;i7dNw+^Wd=I6Awq5LwO67 z~1e;LOUjN*OQ5Z zE#|Uf%fGK*}3*YVBf9qu{7F@;9Oy( zAs=wm8PT`By3)UL>JqO|$oA1-WQyJVB(|leCJl#AO=eNQ9ob8+ra|NFm+9Mvu-e$N zH#r(viT`;fx%6TtGj_l64Kb|xRA!Z`aLBj5@OT-j7j4{k=5&f;Wp)qb8e`T!R(Hz;&$7HZB5R( zOISDj2G2jN2X8g?n#^TD?S2`rlRI^zDDf0M@ZSJ3c)~f@K#zm8>F;AdtWzbxx3C;7 z+q89c&3GtezX<5w9UIk@su7>I#P;y=HXsaJvI*r&iYtq7B+_dHt-Yy77sFe-T3;d! zCv)MlzMMHd?(*Kq^U?knCc9S(yaW2Igp+t^-yn3iq%j;2hQMzC&P4@E2aLM94btqd zwqUpDxMl}(YlM$-IsF_nNvE*bz3DGWBQ!M5Bd0n=h&p*9uwZ`SJFW=D_u?p74j2Za)0(cL@JXdL#}(Db@({?j?8JzpqF{JuQN z_N1}oXKB3vJr)}_Zauz;|2NJf`y(mDVm6cPtuhc07tzLFMu(;i2!En(nZGXRa|}4J_gO;hqSpvFxJqA>Pxr zl)d50@PDWQ-}gM1e?0!BFeT2p9y(+0uU`MN_8!zBwvQiIuGi^Hh2UBM2SzQ^)gN2p znZ2$$JEf)nJtfip;Aknji&Bz=gaBePe`7N+JS0LAb}ns0Hj%qo>(eGRt7~ExhMckx zj1MXYZ;04gUG*jBe+j!>-~ahD(2iz`(AKma;jd|~ojw)?z0EK@A+$r-Ku(UX-@hgE ztXEyXZ4>?Tb-#Xfu%-07p6^Wli%|c^8CuhU7q&EV`~5#({-5aK|JR=&ZYHNSUWJ*| z-#r>#O_{iGzVqKE`t(pu5eIMlYF26W=>Xa!5OQ?1s(Z_;G1j$S7LmV{+f0-F)X~v# zd+#Jlt&LcRUv8F@$kjwv{7XMv6^3bB#`0_V@*Y|9>yph)M$|Aq4Kb}@WQrAzvTB_s z_vL8u<-e|m(i^2_XFkvmAl^%gUqd@QUv#TCwp8 zsU$+eg}+(?(3O~hENGb2a?tT~!BPo>-jUePiiY$u%p$O^EbLw&x&a^|DNUIyhKYJ`@9)$0njfO2P zdQHHKk}bv)DR7qtlkuPl_`H>wD_wCsR~sTB6D(%4jvyiw{z}f|JS_ouf4x3f;F;+* z(2jj@vvyQ_?$TjdX$wQD7Q0ddIsT)X07rkLEBF5HXDXEOE0WKsKHUtGc_b)X{JY_u zJLN`EgQU7wd3wTF_&*fiawPZwx@6yyyhQ9C@_D{qJl&gL@JkFbUO=;StmO0c1+|1q zcDBloD#bELe8inmt32u_VY0cPK5p?uHWLGyU(xSh82x@UEv93p(+#JrZ zQh%B=FIj4Y1LcpB3mXW`tsf+Fdf}%gT?HvIA&!Ih*Jkbjwm$_R+2BlvAPK7m4Qq#$ z5>4|9L547r<6+Jv`D!J4-jfUM;`f2P#ZBEoJ>?o!xh$&SKnylG*BJlq1a|ha|L;ra zyyA~PQoro2Sh*ihaJ89*ORhhqMA9aTyLsOguC&$_5rbB6aP*U1fE;Sn`T-^L&4$f; z4I@&y8r>=LFdDx6tAKQLc=FW^j(P(<;7MB-7jR?sHS-qT{IPMDSB|NM@>GYcn%F#} zEhRBEVoNJ&{iyM(@^cHcn2ZnNlrV?DRYP3N&u`;~qzU=h8H=rudLs|S7I!)#drTke z-j!-FA&rOKKAu_C#K~H9JiA?03acS9pb$s#MbdQ094HS+!l%G1G`QbT)Boc0?Uhda zJ`!cSk;YMn61vv1byt)+ zGz#b3h*V@Ma=R@iv1_N29YS*`wE0XcM?bq7vHyp5M~?R?u&xhH4xpiI5QkFa;-0MT zoHCO$q)s`#HLPFS=Ot3Bne*Axsg%F=Tif4RjV!RfW4hi-WXYLLv-ZT#DF01hCBIu6 za6Q}8wtaB=^G1heEg^4sMG)TnMJIYCk|4q5??WvBeX1frJx*-ztMMz9y@HJpLT zq?7vBz;xH~{txEJjNbbsy}m1=qb})$`l7y{-mq(}+=wJ&8h(_zo*C4R!UIb-WQMjF zuE`h`S7YUjCaWaxn+-)C+;y0YH$|>3)d(^Mmol`~f4PQP>20EvcX^!o_&yvt;c}kH zlwBT~q*;5gSJQ8y7-d=rb8tmws9MsDAH!u+mao0G)ZZ*-b#Cyt_XLypgkHV!GP)fC zkSui`V6jXaO#l=>wz^UIdrbIC#hNtX3I&F0WB*}B<^Mo^nyBo{iboNO9G%vXEzkxB z5f)tbBw~UEyGQ`njQk|a+O^DSHWNMOn}-cHUs^j#N=qDR#=c*Q%~($j4nen)p!^r{ zrP&}URV`OK*42qhOT2qmOrEjsrsX%O=0?z*0X|R%_2u;C@$5KbU57PJcC(p*uWN6u z+}|VH&z1bsb{!-@*b|rUaR47!8vE{KbbQD3A8VuhkhcJlvH)eIg7zLj?R{eP>1Xk1hC0$8+{ zm)CdgzaTEIfmq=jDY9dscz7db+pbpQMy0T{wd3VEc!`xnub@p^3=Xf&yc9HhXB4>D zm@47nC}d?3C{?5=u||2K*rVj^$DFRHei<@#cH9Z(iH&?O_H)s6`XO`x>4N%4S0u18 z>}Tc&`jk9kOw51*Zsq}oY?3^ zj|;qTH&|EcZ}>upuhVXA0o;Bk&i6J#i{O~~=8MA^uweLujRkM-w!E*)jbDEU+=oq$ z1tj$JN5shd$X5JL?dW9;RovvWM}uIm-{sBj!?5f&Uh2*DKzoB9=bZBC4Bvsn|g z$+$#xdTMRE!BoSaR0{HZt4My}2F;L?>x>xW7|20}YbiW%A3(^-$u%@<^oKqf-D$+> zBGUQPBlEoDG%#dJSqIt%1}EVxtBmq|6+6{4c7FTnGQ;0Yv6K7v2J)c)F)H+K$2aa2 zY`ie5B^R=5OTxE6EkxEg7~OQdNa(eu{UsZYpqesR7Zg#~O$vIV`7k8ZdeQ0d2MQf^=M4qKz={r0r{VZVIx3VTveB*O z9U2MQ8GH3@q%%b$`C?fe<#Kgc;<7uYmgeRtoQHgSs875>?R>~_0ZU!z7=BZG40aXS zwS5%wP2jQD2rBa6zC!Wv=R5LoP!mG#KRwC_U`&t8=15-`KNF&I`&VSc+1_5VexhjZ zsyBq0wphiLNo5xr8Js<6DzmMVGm(fM2;W1qqenM0Z)<0sjIp*AJ?WE~C{X=S13zbb zyKSc3fF6!yL3&j1!{!_3bdd_83Uz$Y-pvPgn|_C0Wx71>j%l2ZTY~Ji| z#aq^}w)#xe}tqdaJ;`+G4VfrP6cnIK0PU(ob`M)F`t&!Vj)a0mfMH)xN3K1= zH8J!1QtC#s(@VV5HQgTzk2oGCFU|C%41YU@^$Bz^+MB>Hf0|voA75j-3(l;#T$7fN zk`&LYEgHxPZ2KQMkkNnSKt_jl*Y%C}-mA>VZ85&d4IH1Y^lGZ}LJHS0D z6QciLF;ou$-1ds^`YL@7WB>&onmCwOoy3|a@`j+)!nz@t7IfPZC0r&6LwZZZWpxKM zv6n;K{i?f?Git7v-F~X6?V@^$CRXNN7+HXmSb@70tQMe?4I~OI)W@1G(Sa(espfFL z#vSd$mCtjgxpsdQbu`z7^UEl!*yXv6UUtAD25Lm4aJYP9*`-tWZ~cN?u#y9xVX2HS zA=9zi3TGG5%|xw5!V8PkSz(e2QuO5xHeHBJMz1iuK&O4PB2q-hi2a!g8(E1C8$}Xe zlGdpIv{*auLW_}Lp(@@hKqUKvxa%TB#C6UzvNKyOTW-ny+=+@Kouhv20SW*mOaFj( zwa3tB-Ijmj2|}3-V=N1EbSV$pgGKJ^c(q#J`&o}Lv`YSV6hB2!VR?XFGH;KBL$599 z8HNvQ;co2J5HBE5_%8lhv62SPN!w^|E+^@P?|7<#NA9Ra(pPndDgy6?OtX-z!eNR2 zSL9UJOgG}F_5qgLDPdME1*Oz|SGm3e5+M^l`BSaf0=ip&bb~&CoDUtYp0@j}cvgRm z38kK68O#xVs}`hFmhUCsyr$BQV19>y8eckv&YMZ%-_oOs@En~aheKoUg_R)~Il>5i&0satifd0|} zk5VvL>jnkeop~g|Ij1YJ z(_6I>Dr?pq~P2dEr}b6UI~iU|bkh4eZTPbrS4IO+el7r?G7**M&2 zo})HzD+wYtUGHP738-iqyE~v?Upd`Fc88}fWbok(NaiW%KfFie4`ZoNb zh511RP)PVLP&9fU5v0!NLZ4X1{Vv^vB_i#axi7*re3E#ugiRv+YHxIN_`-nWam+@& z(V07{SGj9yj7_-kC!sB8l4$-Y??jgMXV+$}50y4pZ7;iDEcr4bi~}gXFXnu!Dc{rs zUQ++Z9ua>&G~tXqzFR9@jxfy#+SdS4s7cm|22+fTw}@g)7b>7wyT}unNi;?%eQA(7 z)g(kIdqeaoU;TlI$Cb<>tQ2n%mEq;SkphwYGfN^m=~Tu?piI$TwWqHD2tC>?H_#foPIyW8ZEjjnrTF_#28Gd~IfA zak(!(M;xsEgi?V)zi%eO#||^tc&fDS=I5||q(-7Zq-HgXo&_g-+B_l6*SJjAPGp~4 zO!TNYHUH+O!qUK?r`Jk^SkZ@;0?JX_?5Ewy2fpWhrQsJN#Bz&1khr}y2<6)6Lcn}{V}PI${+7dTz5USo^qt=CNA zegvr{dQDo!=5THi1Xk>C^qN=*7V<@Bk_d3uvF=ute`FUx2zE$t#szqz>z*#5%s{q+ z2I-kt%%|*b*WEo|dz0=Bo;c&JR!D5Vy5YTO&HWIU4J6JSd(}=CldFDUrP)i@DKzdW zbgE`L@~N&olxOzTMClu5H~Hq+e`vdP5Q?3Rz32QI^yYQV>Ai*)x;xhH&u-G#TCIHL?MK%i5sV2Ry#FV**Q_6bPa*4!SWFO{F~9-VW7-b!RVR zfUbw^Z;OV7K$BUPJ@|1#PJyl9!Tq3JokxT1c{|r44I=3H^L4W%3hmWfM80QNw!~8& zB>a_g{IlB!^S8WnharS7BahI>sc%qsdvKP`gy3=~a`U>A(2F<PEBDj@NOXHV2f!&#ad;jQ^YEK%uo$2nt z;td|Rm)an!X8rSl%KiE4(MOYJfrsFKAY9iU2-g+h!rx>a!SC;~x-+i-`dA`g67!BO zU+1XO=BhGtk1#y-z4w*X64~tz&uRCf!E@5nm5179@dC-#oX;pJ(9Ndh@e%XdMvY{( zjBq|>v*Pdy^G=$)V-!YUO2J6CFw^ta#WM?tQ&>H{`PjG6>j_ zZCZG+Zw6f#yReSzZuh8hfd^=>M{gT1Z%>fLoB;cx3qs=-UX1cvvt*t0fzB9K<=Rz@ ztJ!xqaO)%W9M|r7-}bR;P0&_%z8&q+p$YuXOXi3Sn2SwsK#)pk)qrv7!;Qo`YYRr@ z)B0gx)J)c+3D3!BLS0!~uMxe>X98pThc+pp`S@^1RkB-{=kx4_DQLx|zh1y+su@?j zw0yic!1b!CRI~t9et8OFxHB8x8+u&D-|D_!mvnYqv(u#Y|@^W}qO-?GvBcK}2)h=n5qB%*dApYt4R@jU4s|nD>ve z-mXjGuVtfMkljpMJtHl8e2)CeN#Wd&mO^DeMuxvo3wuGIqFlJbn0hycH;L~Co&pv8 z(wKpqA?rb=bTVp`w@+SdwpdZ7{17TNZMRQwz1KOE!;(5+2#Ar*|19$elX~|kQB%z!%)HY5?iP)GNk5iS zYFh)7D_*lV=UzVB^>5j)k&q1xE%(41kE6993h$<${M41u6MOQL%O?G!3k{N+-rxR+7>u4{Wr@jRXyi#Tnom+C!~9Z_CBY z?|#Q31nIglu(R0XwDM)KTuh59y4K~IWm@OC$~X@U1g{=yi^)*yaCt=Bo-W7)M`h%M z$HqExZUO>{7G<>9ta56Lu_v!z<)$zY_Lh)Z2@6kRsA?XRA>nu*6x+E`#?2@8*}Zd4 zxYeevFJ212Qt7!@^-EtLWzgiXZ6oeoRxS71h!O!4@^QY5feb3~42S#WNd;)eAQrWo z-;LnPS@5|hXg+$x_}|f2z?cqF030{vYOdrti1_XNW#@V2GWev@cH1Sr(h?|e0$FW~ zg}U>H^I70i=DH{SAr^VB&*&X@$p2<8<(I+CC5<)tWz3TjDJBgCM5?GuWa0?EaILhh z5r*vV-nARN?g zER(gXO9)(`2+$M;pcb8rc$vRVPfa&E;CMT?y&iWN%z$R%l&kGEsIfi>e3oX*3O%FE za?l=0QEF6Y6le~9-O&zt#{5gQKKekwi}ZsBVz@je?ov+$$b(X^tGa4Mt4 zXPsI^Z)>aX6xo7u_>n9O&N$$Rt5MC8L~Nx9lJ4w&HmA~tK41Dc zdFm-GGrYTcSe@%~Ovv5H6}3pMonEC}H!y>UF@#U=oZD)}EhZ|+3dG?5r07JPzm(>j zqQt5;P%{NK8N%O5P#{?^xbb9=PN5l=oMKY~5F3hOBJ*if!T6(+qyyI3^-L)gV3pQy z`rrVpiv7I$n^_Jj5Js820pYkr2TOev&iaeMQyNqa_ANweSL7nkQkt>XBd6ER4g2-Y zoZi~`!_FL{X?K}He8C8^Vt9=YCk`3^-x;iRH0&4|=&-daN7P#JpoVVOJzj9cvA}Jy zYKY+n*`EdLZ=%VLwjXStA=(`3OJR??nsUR}rdtK0oQ_lOV*#6YUFPF;@u&rgWx@jY z*ywBwpI}rksa_5{-2l>t;@6sIX@2(MNSPuA#}3V0yYyKkQT~W;d#$nt$DJjSB-obz ze)!+Iv~0dVPMat!MzVij7q)Sx4`(9UNOOamKHL3%lfkh+#MbHC8NhhFpjx0%h{P~@ z1{wGnrM>lv`1R~6kOOTRW~vnNt$q9h_0eylUu92T{ik_;d>JUZ1E4jc#Ua^aQ$Zudx6fuUwwV=J-`{(hx20qp%E!w2$&93Nl}z9I80%K;#rSI2>&L+0?Pn^g_iSR^!8NslEDl zy3J2qVjT_TE$8S?kklp+T@I%CH=qTwf3=@=9KGu;3*IYj9y`5!+6jqn*(>(?p~$_= zLH%n^wQ91NBhP2L;>-SI$6wyRcoQs6*(_^7I`G{x%HRi&a7`x%PR{} zV`^9U+medfLH2(Afk8o=6NcZh#~0QIgg{~*0Z@K<44X&BZsbzs(HW5%P$fIIoS}j) z$cD}AV6v0n43eSk>||G@Eq76Az=VJj+}^AjuG#qXG}kX`cS!^&wYLHASxPyUfktwC z9%C?FR$?`qpLQ{u3xu9rl3}NxL|~BSI!S+{zbV-jr{{4>e7h31HIu|x)fHA#awX~@ z&;_P2ZyM}({dREtOhGk9fg-xtPEf~S^H;@cNJlm~DVAxp{QLuHlP0jJL=knkMis49 zz2Y*zqw=m2lJ4pfrRD1M>3%mUy^e^oIhu2PmQ$@*-GuqxFuz;~AN~Ftxf|fF@mMM0EfRF-)Muf`+#d2HGh1K)#|$ z{!qpI-Ny?Kl~mIq_9vPB(=lr2o@sw-*!BvRcjU;UURcv;x*H(rXkj$TL6%224gR#y z-CQ&N!Gr$iYYo)L*<(N}X8gFkAfVIE&tq+9EM2TBnsHbYHDKqkiuuv`LU87{)zpCX zbd%EG?2i#?{$?KoCMsH7Kq@X2R1Xw2uF~hJzb2!*;J_9 z=aM&Fa@z2u$BZdXw-|(^8%Nz*kcpR)vvtU_G{aZmeEjXCxH0v{D^!)JNhlSvSvFNN z$v!G^gisRMQ~UQyH8D(kYs4sp@$Nz);C3AW3^zfglV>UGcr{PGo2^bs`Us>8U_$SP9yo>>GV>NQD|_PDTE(|mc_s zjt=Kdz`X$_z3{8eZe6PEaokV(tuT{7wN_c*d1sUHM34AV#N-lCNZG|Luu^|M<6nc= zajf!bBoBGE44JfO07?h#=0q@8rV1ZXC88J3EOkw>P6Dj{SYO1?o}RYk8$3*h6Xw9P`*g888*u~^6oC>5&4)U`Zud?N`!PT#Ql?5qnB`!`)6%fh zKq2KZ@+F>)>{sDdT)#b0l#O!;=Nm(NjLHJ?Bz;)a%*)K>xXfR zhButN4lAexH;Yn5t>zZ zQ&?0ar{GI2`DB0>1va)liQS3llFN#NP*QVg7fxeC-`yNsY<3NWe59O?DH}C?8>qH_ z7LY_qr6S|L>bo(V_m=(9%1fem3Yw;lKw_;79OBew^d|=p$N5cP;F!I5e4K7obh~`7 zY=6HPrTbz}CrYRzS042~Px6c*`zq8dKD9hubxbZt1&)j|cBv9J);K$ZdFCLV(8b?7 zndgT&r5jg)G@|=KdV(V#MJ%(cdD(<1X}Xw<8I>evn=yUs`0C;w_4VdW5r+b+gV`sqW5?th6_HJkheaJJbgj~4Eywc|uV=zB>kK#}eJY_Eo2%k( zstZJTgS`Zb-*`p#%0-m8*6I&?IKm=0B(-pY$2(MCw~K}|GBoNwBYU}^o$%m_;glH@ zS!|dM6`}$4$O&_i@K?HN*p(bb`i@a|!n68;3BHVg%qXJV&p->}Rg+jW>8( z9faEF7KomO)>uAiSWk(-$~eE(*>P}GSfSmX(Z}u>`~%IuLK`BO*iplIhImQxHWTqb z14w*MIl>C|!J2cE^Q<#yMOQLX#&_Pj2@@tgxDoglNZQ%Ao8m{6TEb{paSkQ5SM=wI zw1J8c#J1N~e&4gj4P3{X&MVS3UG9=h#U_92+;3hnFxePTn%ABLGLgfA>=NDyl=szE zf1j^#qq#li4ytlWHRu?y+aGVh_T*6>(8YN+k> zy*^%mNPuklHrYe&IaRJ+X^$*aZYEzZOcH^Q6H$j;)H#%pU!OW<=l%vk$blPH;l*9y zH3y+BG1IluPOMpP)jvjII#tI>-K^JLRo>o(+HTpYk_{=tb<=obev8)wr!)6j&0kvZQHh8vDvY0`_G(feecHm{cER=+N-<9^W3_|Ij*rq zd%lhv9>@&)e##>DhVW~)RUN<4d=t${D&yvA3-;!wMXL5m7>zFV3N9xexOA)h*I&ou zU|LVhjh_PY2RWDSxCYisWV%m;4)i9M`seJ(5z0-_*`H3|im_HHB?Le&JH+OqI`#Ti zn0vXH^V`V|xy=V1gH}D3fqSkQ~ z#gfkpSfMUVe6q2Z^CcCHfbvgx%!9qayfx^{N}Ovwh)py<1gR#sAN=@{-plDEyE6+o zX9S8inWsY0?_Ue03fMn|aA-4oLa9{?;;NT&cM~q?P50Ftt!9CMwIwxqu1$V_A?Wlj znqCsxNcO(Lp~-w*_CK=*3n2VAuI~p){Ef4L>d}5ikrHf${!4WC=kNRKUiUlWk89>t41s#l#P6xv!`jB$FIs#3+2if1hlJMMe1|4KA#}(otEfR858oY z?E?yB`*tqAq7^{_4nXdRxPAg3>b;&9%%2<2nEy19K!z55eueBEKec4|F;Ikt(|qX% zjBi2AaZX3`{(_h(!8o9D1D5V`5~4SdPHesQljh6omwwen>wcrbj`O{*64gB}2S`_X z+uoItRNd)}8$xHIZNnu>8GWxGDB+nNaMhc>?GuHa+ZSi0rVk!@1gdhiCeSdK4vuSy zT-NW9Tuv(n{S~A}x{BfaoNmAsE@I#Au^Crzj9~Dvzb}N`UAm>vjdZ~6^0IG7g90lF z69c1a4p4PzgV*xh+?;&R_#G4h8-%y2i|!{PpYG8CTx-Xx?Z^=nfHBD+1ZGqBR}orTTm0&8kOq3xV62CWGwS)^1bmh|F&kQQsS zn1^dBPZWZ7L4TU|-1^QlblW%nx2}su_zy)qt;W5Ojm4yd^o@8ZMi3yaSo68^r_kgNt^vc_XV&v#Tq!Y+kC)<07qX^V)p{xY{eA=sghT71r!%lG}};u zo);WV??u@uwYCo~CcUTwa_0oDZ*6}$&faGdd@g&ZyK?8}&ut$cM)mR|9?!?yUfdd0 z6NL-87b9SNo3^+m^OrFi$v1z~Ep8o0cMLyxe!|Q$K+kY-36+iE_%y6q!a7BYe!JPx z2J{jkm_2LDmyrM9{E5Q=9VR|L61b@gKfuk=OY;w~SkPF53ugx1isJ?jU6zLT`kX8M z0z-*<4B?~+r{9`B=2_s}8dKz%iA;zZ@C?VZ6(Ol%YuJJ5v=loEMR5fL_Ua)3uo@&Y zYlv3(bESISm9l7J@`OT(j|OH_1kK>7Ls4XA3E<#VzcLx)jM0Ez``ERBjk$cRt_^O% z7T^6OCnYD*DhZjj^egDc_xAisp&kG(;G&zbc|$;A9`*cSkdV*7+uq+FoL=|Vcr#+g z+0pLL0Wid}6^e^lTEgYb!YB(UWx{oJT(@|68wDqi+?Rta%hTfN%?*gt3hs9+XM z+XogQS{5&w-C<<=J}0Nzt;4YM88d|PeV;yro!QR*P?Exg0wFD&a*`Bwf*dtO1aC~O z6`aC^x5o+2z5?D2-P3!TlXoeW@@d-U35TFG{q#=2Gf@8JGN zA-{zc_f6Ij_Qw3r!2bKoKkWWF{r>+VW&Y6wJ)R@_OeJ0oVncEj5QdeA^> z9|*wp+QV@5=+Lso#PJ=d?b&~r-B|*@lMhg|+Eh4kiAbT&DPUf2GKDcAsvm!Oo)f50 z>%M1s+&881Y3rb1n@AobrqKJkAubaw=KtMU;YQ3i1x6N+aa;&YcaDfPy64E4?EU>? z`?7ox|3j-7sbB2E+hgHX1FG$Pb`tDzfXBPxaK9@bHTKGQ_76q}Q(BX_w6wqaRVWHC z@3o_g-~7cp;K-U!^&F^B==<3}NqtL_Z)DU*A9B!Yk(|Ogh(qs2KDulwgCm%b`8riV zl(+8y_M>`@ zmB}rDs2N}CRpYnNWMuWTqGFUty^lb%`tcFHoRsgj_z_hid^vDCtHRyKGYC;RT70b8 zj-y3B>lFk?zR{-mQEKO7;EJHnB72K#$kkf4_s2PGqp(U5H@vt$@9bj4(Q&gC)}qBm zR(p$uGd^(GLXOGPtz`343nN!WQi@>(S63_%;UUF*abajG!u)!2^iq8hlDUb!pekm)o}%!< z+55{IqrVQvaBZZjMYDV4GqDIx;yO|XVUJbZqT+*S+mHST1e~HKS-WM6zR1i5{E&>) zMWS_aacIccBG6%Bq!?uK`Y0YmNz`(~q7a4JnSCrGU$GNTBJ#q*q(c#dq`U3AA`;5^ zjX%<`QcB`+hI+<=BI&J#MQhi`X8iKoUvX-r!he*MESr7BG3_JSNC${T7v*K0Dq-UT z3y#N50So2Dd~(0U3=pa%cUA2zN~cri6G;B=Xbvt+IdD;oS}S zIBnqRM9LzNN3eHommDBEr$|-c+Z2_~=F-VV7JdA9|MG{C>N#H+ z{JFU{+*jkDS>a&3ze8mL-xtT@46;--G zqibC%sYsOR^?n(%n3IV`ihj%BM@MoMsgj#YMOG z4^CML^5-UEoX^WSjaSWdELVK##*t^TPGh+8vy_OO4yR~YUH74a{Uq4Jd!IyQ;vaxN z@kK_6<%KEGyhp3Wg;4Ick7!xkUTK>C1MGN0!V(fF=DfBvGU@Db>B`I_yU^(-QD0-= z63Ca1>VG(&RY^bX*^mJu@dG3le?6|zUd)$@Zr<138|jPUUtrdzRyUb%BL=?;)i?_KNY3RjTd*8OMnkno|l)auat;~ zUX2rDerdJaCV#fw1;2xkwCaRYwuw`na=W7S0D@!F?99^Kwx7x>Y&ukSpH1z(?6RWq zxFo8fMVWQ?T~C+ukduCurJW6H2#kBJ{Z*8xne&wd4L9cp&!sA-T6FqK!4VICDtYs{ z%+>j17vj_c%=v>il!-(!7G@jueb*JFTW@yBu*T@bMNPuGKh5TZD<2C-{H`e!IlL%K zY#$3Cq5zeQyvfseYniDgSstyuGfd|(Kw+tTb@C_XXxjO@@l+H9T0P$q()eN*8B#~g z|KnhyV8LWCXM9z^Pif=s`h>92{u!&?=`PT0t&N7RlQv9On_u*sq|DgNY7_XkyoESX z%Ms$q>(IF4LmqsWvwcx07P3aQpzRa zOO!KVn_&Ad(fP)cgt9+X_Jw)d4dauWpJ&aZ9X{UsQLlzlE`;kvfEexDw>^$kw`Y{d zzjHLrS0Z~{N^S^Ns#a7Z^S8@R)wQgjPEChzNusme*n3CPta6w01tHVNHwNA@ZSG3` z0zx=!4!pmz*F(&c=E=}&KOWY?*m+~K|6G;yAgC)TKF}KSn*D6DQA_%6IOXFG{_E+% zLYOS?pJfS69Tab^=2gjzzX;e3M_e{=Eg>OwsRR175jz?mQ-_#Qd*a@B^=lBC0Sfjp zi~`~i<)(bf%ab?jU4TZBU@S1SunBfkGH69SU&cB^R&EpYh*LfdaQ zpw9Z;N!Gh3yoeUeBY%j=TY(Q%!kA+He~*hkMl)M8Yo> z=Q>L5_D^sU__oF;9EHF16wsu{RxbJkg-7cjhW$}>eThN_byY2VDffb-32V1( z(`d^3Y8+3A5U1yN-P{r6)DV*AiLHgjauMT1VYyA1b{+L|`%B@RRM?dVuvAnqlk2Q1 z-NNbTvZ;r3w5kfDe)**UC}*u;zWyo#_N)B;Q1Z`QSY;*>5DZ22*5XA#k37@g@9(O8 zh#EE6p%bXhVlhUrhI`mOJ?7qodD0Xj`klwH+L-YmY_E@XiLdQn-6S((yn>?Xxfc-V zgN-Etbu)PP#&&aj;SmVKUuKRU|G;46i5b)lsCH74?~(+C%jyE1<{pTpT%IDk_ArQA z70Sp9EXJn6;&+&2^G&wX0PF4wbsJC@6jQpWfSh40`dy8{c=qua9gjM2bG&bUZ>0%t z1u;34MsXR6Om?t}G<5N6y8JT<7KXeA><2%CbE3NS!88fX!$!+=ez<>y;5*iN-eot?1_R??ivd^ZOB%LsNH zv|EhER&9DW*DTqtcxQVfyaDHuKn5>r-P5TC7H9FI#9C-?1-g z91bu-7PIBu+=O;c-RJm+XZXqZNwNTjY^@+)(Oifv~Al5 z*RV@fp!qKdy}L3KZe*E>I-Bpv1-=Y`%jY(u05MlvU@bq$0GsM=%eiqrH&=yY7F@d2 zrN8|VN%)=GeaNgCaJ!N^QquKt^h#CRNzAVKHf%i&B0It?%Jdd*WORw8$xHcG;eX zp!-Rz6Nc|?_*~3i{K{md*_7S8bQ9+A?r}RQgxISn_n@2Mp>$B?J8piSX5Z`Zfs|Hx z#Fq|pEZR=+2la5S@k#tK(}mcIYw*#D1i1uU;6wX~X?_XB!pir^1AJ zya;e>qcyGiNs>s$j+EWd6dhy@`OxY3l?pK3w7z-|1QxvoxMe4aGLSj~jC8mopza|- zzQ&X4zFsq&>k8EV0Ye%%Mv=ef%K=D6f^%f2&XcKxEKH#Fb6nApnOW50QSj~UMQo7l zuyZn2$K$OAT|w9F4_!+0gxHxq=S=wu@}2W$?-AwqOXl;z4Qv1P{?}FD@`o1r^(0?s z-3$n53Egm34SS`Q`r+4ZE5DZ#UU-L3mC#kX=nRWBLc~MK&N(48y7!|FcVGX8Qao0; zmlMDuWBJ@q_)_L;7K`s-c}>cRk*)a@0z-pb(CJ>r z7SB|y5Jb3WiSb)b5wM;JhJ#$VV!G0QE5b})t6+zG4k|Y}Mbu(>U;3_Fe8A&V54!Ru zL`wude%C(8zHqK)Q70cb21KBya{ok0E^;6dzoxcCS0k}(4#B~@3+T%Bux@O}e5VnH zU#S|wUg3f5Rtd$EAjc>sx|mX!8v`7WnR=_co6Ek@4lX*?zh^RxKG;5_$4s@q(9q`#$+o&~MaCVW8iX8AR()Jz#> zSmur4Yn253>yIA6fR8#W^k+?t-q3M%sb&vFu}vFPOYgDf5nb6XM&r8AU$w=_f6rI2 z|Lo^p0te;gw51TFNK9@P%7*Oy{Uk{ExQKR&uP9pnq;*hr3<^+)h+?k7X!?61;)=#2 z{-g*sa$ljq4-eNuF=6cQGg&;Ab5}V^8CZ@U<3JO+m{jbQ9aUJ!#fuEq>^f3YN1DO3jF|9IA9zA{>-?-C zVxtHMq&l2%*m_6-3G&n<_V80=WVooNpnmoTdqZ_6#|-JH#CP$iti_ z=z|R_+O#8YQpNo|-HnJVZu9c=s-uUi>JKspNTGww{Q!JOLURiGy%vMz(Yn-myK{c6 z25LS}2YORQ9`vtdVs3=ILlkxE=(|}Sy1>RvDlpWQU6m)&LwK{1krR>sBtGcEMlM_7 zQu@sL^qCI#&#Lv9G*qq{nT1I3@q_oUJY)!xQ1~q{{_$n5zG$BW(QN~dhMcZ?tejmA zql_b;4-qenyl?yzslC!BZ`+B3{kv2&3YRTE9bFQYOn8PuPSxK2f#_@7RPtxOjHpVv z(ucdTa*-$YoZ>jouO?!B!F?$c)~X_zHM!WE(3eKbRY7y5Ro`&*$*RWC^Kb7ZC)}1< zGE6U=E?cr+I)<_=Qf9H14A4U%;BhfAbV0)blGrM_Ff=XAqIN&yH6E~g{08&YU~X@S zAjK$Nk8!1U3%{*ql+(j?*br}FBZ(Qz6jvox z%CtBBDlZn&m{rtS6_*hb`?S}3p?-8;U}4{9i^HbS<|8zX#fj32q8oaod2ik`U3B0{ z2$@V2was%SsHY;fwRtG&3ezwEP|ZxwlwKqk%S5Bf4fWO19UB+QOkZV+aMJ%QQ=7Ax zWBmE^M`7*MNP=hlgTn(S=-DlkOSoXDuT4Cc*rqnEASh>Sk~+{^iB=5h}FE;%M(opf^2J zLAr{xcuOF}nzXaVWFnV;@TBI5eN5pgm6w8GTn^u?$rpe4UR=KE>;fXukL#7+OQqb{ z*r)byfeOO?13FM%k1x_c)@vTdu@fX->I)0d!T+Z_(J(bljLKq)7p=J+s9*EptIGILx}QDD~>x z*{4`Zq<@k;`B-CDAFuAj6>wEW&*lM`HH=nfOPxXXfWiLtqn=CEpg5i`%f44*;aD`lfi`*lXrX^R{@S` zgqS4b5vQpd~Qu)m(aMsIdG6!_MH{VcZPCs(mh zy5MXo^^c>C$*O0PIcZk`q~Y-E6K$-wWHeiI2unn{&o&N4l&U7+6jsD_e8Q;H-c6O0 zBtaBR8HHl0VP*g6s{flMm z3u%pmVQ6HrxurshQ!tm07uv?}=j5WwRV(=a<`Ite^+km*U-FZP-RCg^%N?ICawVER zgC@Zs6uCHG9XI?oz+p1Lk;$A%hHGL3Mqd5x3BH9SHH}zV@{r*i4?r|9d#N{Ku2(5y z2HF`PD?tN_dLrpwj}}G_@^P0i6Xg*$r)|*;=^I6MeTcl z-srwd1Zk2fg#k53&e0r=dsD7j@-ppzhF&O_6`A`j*R(gt&-HgocV+40alvU9Pu<*T zD#A01Cc^Wi1GR|m@=+@~SN7vgu>yw31>i!;SSH+Lo)P!El7fS)lSFmPm66#>z$+vw z-@587AH_xgK0d&kvsT&4x4 zQm&m)jztlF#-(9zi%5EL`pb;;%v>BWY&b%s?(TGwK9a~q?VBuqHJzDH0vSD5kSiaK z`q&U&7FXQNdSa?t!b>7L8@hu(Qd$UsuD5E5K$s62Qgx!G%&lU{7fLSMSa|s$Ew1D@ zq!dpXzOKDh*s!E}oW@qFe*ja6dK0$E)us@K^ULgdhSw19CfRzURj7`;8z)dC{*FuR zR-r;jh+O7}RK%mQfE<&?+T2=d%~xoFDxi60I>V>r2_0glb4bcex+7iLA0Tzo^S+Y7 zc%i8t{El}drhVf61EH82g#P1!5QYl4kHva~{jGNEp#0bBe0yp+|?~`j~ZWMLF z$;sY$3eZe#TTDwAXeP)O-}c`Y6WTuL-O`Nds-xdWU=1!Fn@=XYQXcPg z7BGm3gkb}SMva)EVhh<&^dofQFypL1NVxUeeXhrc@%oeEHUQ<7mC6R~Tsh3H22CDcNn!V7E z7w`j}O0MS;-;plUM zc$S{7r#~OCX7Y*05~*X8ti^}1L;Q|84|c2Mm$hk-xy z@ZzyhLZ51^^)|lC8_X<03VwF!H66fk3+*vp4e0S+T0gehf3kb0b0AIrObJw1P;$KF zML66G(cdpzecl^{U*TvQ0IcC7R{0DZ(>m$u%0u8&`~n=f@Y=f}G966e^i%DeH_+8O z^b@6;nF(rWKn54f%uL%8G+%N=Q8-_Q?jlJF!0*zFS!!@Y9Hfy%EKAIo^TJh3_lT%< zf{>2vsJe#Gok9(5P}5_@7e4*LLvO0jgX%rk4IVeX z(ABlO%|8bt%Kv45^!eghq|?zhsN%xwop%~9idt1*(Y#6i+q)(b%O+?(rDk{J6#Q6y z6X|bZlv9cZ1|8fYB;HZ za_xRv#Q;|5UBF&tQiD*RE|4%1nzzwj`v024;8avF$rnc#dzI|n8@L3%t?nMB3CRcd zweMEu%u2EL@ly9vek_oUildRy>!@#PD$flidYwP*;uSwyK{;hPr}mt1f?yMkc-Sks zQo+FHh7Is-k4!BdZ*;-_ZLz5tl;dh7q$li{>K0Jb+#IDbWQl{zp#ac(9Aizq+%Ri` zl3aft64oATqW?V=$+Mb6*-IhpyLF{I}!+lYx zD$?ikXn%2DO<5cnXuFdky1=4`gby(d-CWP>&J|P=viagMUd4>*WqfkN^HNCg|I(og@?}E}fdwIKAVHC-Z3x0IajNQ8 z6F$Zs*mq+4lRl3uYy6CNl;otO;x{J}EjMf2w3ZpS&2|UJWL};MyuQWj-YP3gFndPA zYDk^DjL*||oB$Wu>qq$Cp#`j8SG3>qN%uG|t=nA=8NhXCCfcY}se-Xl>Uw3}5b2Q6 z6X>e8mHUtuWze&EXcL}^Vcq_x^QRZ)W}iYCu2E*v(`7W_Y==KGsF69n1s7b;*9Tfo zC!VeD0L_ANt*n*|Di6g(pA{rsw$LFx`%S0pL}o$%_a~?tf-3ss?0d%n>}s(tU`>sw z3^FoOU2o!C$n^+6I&*$n?m+})Fzwxd-V3|441de2zcG3E!gY&#uLsMDv$VSR=wbJ ziZC4F1E<-$9U0AGvyD$580;3YTSLf*sQU56mM28VaTaH5@1Wj|Kd^q>_EPFG_DkUQ zd+}WFZd!4BpM>_BE-4WZk6L7(b3p8aF+Q@NkRsKqj=W)*`+zqDH|`t4Juo;OV7o%s zk95e79x(zpCS?XC%r6|>6^|-tw>leg8UKkKV0Unc#07jMT{Ex|f=6)R+P)@}nhfR zEy%R%?nJL$$H;DP4cWHd!-}@hM#LL;)dn_DK#bnt#JBPJNa(CX1F2W+Fxh=p#jJ1G z+j$~cUugJSH$5l__Aoq$a$-^>PMjfMgbghKDarvIxV@SlOdhyWXCdK_l_;em2>_ zYg{k_DmSO2d5&-*hE$$NOs%dZW`SP#(2+`enguR9e8*RJ*`O&q+`vK9SxL+Q-U>8HDcEHEi)Hr*^_XqVFpQkv#7 zwp=a)&KN<=#e zntyB2wf)LAT`uRqVLOdh(U99QAV|w~%cbq@pa!mHd(t&-!FjKy7u|wByN->@1djb5 zF95dmT_eac<(K%q?bkzA)|&Kyyn_j}np9N8i-5lcG4t;vMq|zE&3g_sU=zv5S~CzD*1P`$ zF;6`5wpZu(o#l9)Fcd0n5XTCIf^-h?{DQQ-r^db5IKgRLI@?ng|A?rJtI0cS+Dil5mZH zzt}>#8;e1|cM%p&mbdTc)#jlDeqh8v+Lz^4JAIKCWj!c(k{^ATzKQ zEOdHkLoTfP#^pL;%XD3nNr%9DFK?VO`Fs$kvh!d<2#4Rehvp=B++3Pyi>ulH zumeRkBn{gymB()Df*n0VL;DXrd|)j%(Y{Kn2c9`=LqUyINhA4l(9j1n^%i@O{P+_% zvJK?wfK?dM!Nnc;CHc}iFu#VGcg~V{H4$OS2H8^GI~as(I}k_l6-Q`iIH|nAN(uaH z9zm`3iW8LNPp|f)Dfeiuq!+NVLItQ)g@G+Iy@bT0G;Y`y?HU!>v5~)+ z`LT5y(9=4I(K*|^{>s|Uki-Pm2!VC_kV4s{_<6e}ode>(3(7gXxeI7T$vps(rJ`W( z*zG*(WSGG5TxB%dh!}V#>NT{W!Z?=~G;qiPv};Akd~*f^$+xHc0>D^04P?CJsHN|t zsSY5}1o8b{MuGA90@T+AV(NrN!wF^bCGY9}g_n^mBeKWI!{B|IfydVWUkCVT3KcP|5@} z9>XgXT07d?jQ2VU>O7sZmy`wjE?D3&GL{~Qgb2Cbc8_@3&Fv!d=Dbg| zqV!<5HpWF`k4IatMl0nBkxJYu?zG~DR0y1ZzYO1#9?EO$Y6;wW-Z*f5gCc)2^XJ8I zXp8CSXN%pE<48`pyBw2%D|)QRw9bEi*>C0V^sj;wIoeL5@J;^e zrn(>R@KDWjfhRdmwt|UJq1ZkL;978mW?&IcYFBJ-@3UV+Ly1(zcCm+8Fv&&;*_|EN zsL}gyL~k`qVSCK`poX;V)I2aL43wfVn>!BO<1f2`QX~Tog`;V&gZC;@T&>Om<<3=T zg|!O5zK@P1{wFZJZtZ=KqwweIEQns}ECZ!!Qg)NcLOkdK;d9gpcYIX5iGBSct@bAf zn~dUE8CxRjDj4((mZ3jTTXw*07O3)*zv-h2ukx*bvomY+|Ijaw z*{e^~c8X}5ygq>))8|Mdod&bnF}f^yxZgxop7YlU`^&phbC8Q-A+_@~Zh+*umr9|l zmh{&R4R0Mlv0gaxIhCH9qaN%7H<3S#e4y^LGfQ(C^G{GQh-m;8f5p7X&*(MRQPkp?@{a7 z=Sid4N)MR!C#wwMz0e!%RZMc9q}Sh-p3tc!Iy%Fg=<{#t*_$)K+%%dZsHa%V|GLIq zo9s}PvyHhB1scom%+{Z5^rolG1D?-LW`FLDqF6{vuo0LuL_kLqWVc9O5UtxauGMr$ zv!>b<2y+Dh{iTGMIep0V{czE2wcMO$wwa+;;;ZAGt~D3+fly)2E1pjmYNP9;AfCUI zh-)8&V|4Se9c(C(!+N(13`t=N7^JQB_&{^9*%(}e4XHDCv`J+=Qez0>uTh?ElWG<$ zHA(hPv;nGOoj5ultl~PCV0D2j|4lDkpq*g*oQ)fjc5Cfj{4?>pPvZ6j>-F=h#+qpQVY(IgeD*HsK&A@Q2+AVDMH>QGk(ns|n z$nk{=x}|^dt~ax21`;6x*v6q55=pW5;ysrsbDnxb&vzAe=L4j#4z$)B=Y1_4m{}#v z$3`l>#IhB@5y8|Od^R9cstQI3>?y3Ifiow!H*SZFHTxCPbMrM}!iK>2`EGX2)Tsz5 z(=W(e$aP$egU8*Gc&1dxUMIvcXM><3E(LpDj>S_3V3z# zm-wTlTsDs-G%PpN`6B7=Le-`-%3MpNA}6f6RT6;&BkgF)fL@t?rP$Ff{v0IpUs?hFxFt%xVO)&WYmRkKoLR#*KsxGrf<|_g0$uvHQD#D7@@AdZd2YEtg|*el9q1wc|%Nfsm#nT z$!l2Nt*X)AG??wKZuK~UdnDwDy8Fr;9Gm@+&l2e6h0Eo+p(XVt59^C4pq^cp#zar8 zFnd~jf(Z!EhXbm@Hm+>%B?tnCEDeO7u8wk;t^b3Huy+s}g2x+$!^R(FPEJ`?1+l?+ z@NH%1#caMnd31)$B3wYLbZzl>U(2s8rC(0H4b%8v& zRj08X3mSiA6P!LY5pL2|AV<$eppGPz$suu}Hytcq>)BYx`55?&Fi0t*9FPh#$7t{$ zNzs#sX++a`4l&k9jS~DXSySXlfs z42Y=~#RLE!lOx;V1!CrYrced<9(NtIcrh|>h}+R!n!RiPn#WUe=eN@3yj<0H8J}B5?gM!Y5n%efOV_QG6?PB$vD2kqaoJ1#Giujzs-@sW zlPr*27QmoL$@LH6Yx~5{@qg7YaO~8tksBC{C^K06F7njQcT(;ZSf;1q1F_9}XT~pV zimEied=?K*^ZyItQ1251diev2b z(p6OHl2ZScB#jFDNXCGr+Dya0X3uHL7%T8)GMDQkt8&=#P0)@L{^GnXPN%&alkFW9 zn+lyc^~;>UK;zBHJVl>0Xt?&CdN=5Q#gumB%jX!YFL6vkp0~TG3IW0G<=2d5%id}W z{AMzD&bKHyu&3CWe_8lW2~qU6*8<@vg|Ta|21(#oitb` zxDMB!rcSGetKTq*t%61U#H5C4;+!yXJhU=@Ce}&8vJ81(;$SLHMhKqLiD~ifY~4$0 z<+iDuFlQ2Cm+_Dr-T`F8(T{Ah+^O@9A%3Mv3y^u~ZMH&Ape`r5?S`AN_MbZ~T_0#k ziU4I9k|#$UAjO52Vj-a444YMqnvU#9dL;qnO6+#pITn#kNSfybyeO`F5?0ceQr`N6 z70tieLz#F@-Miut!tVHyOGNs$bTDPFa1R%Vo%f{-9Ni=UT3vO2K{S^|HwOR=uC~~2 zp6lR!>}gHz`hf{Qh_Q5KT4I#zwfzu((z0UINxnn%n&r2~5=c&%-0fehL$F8*2{j@fsbcHz>OD&m>*-3Kh{{UDX9EnA?+>I0pu3_;p_=Ow%0*G{g) zI!;oTI`mfpnvrJkp8e6863=ke9*ERJw6eh$EusWbV7K>5&r%hZEhMUcrvtD%`*c|8 z>Wa{E5G&~BkF6o{CFj#jG@4@bErtkSG@32@B9dml3R;>FDJd$5F%Br+TL6U=9;RM$ zxtnVf?773TaByXLt%t*z5Mp(EfrbcUkEjz23W^}TJ`mYF_mjy}En_;gKA0I8q1owe zTOiY)-e959H4}=|ab*V}O}+ocg|RyFpCM+~`WsnS#R2L#AfRSM+jVJdbrLIq)2rS z3KK(S_F-?IVgId-Nx+s=8f^M{8g$_sXej$@hY`~2zgl-5IFtXRuahz^_is%#6~67H zWgW(3OBJhvyeoB51^Hf{p8!c()H8ME4C)M}uKV3g#QX96R8q)g50o7;VLzguy)FzN z`7I{!OxJES1rQqjla;Djm7B_#bLV=ObAi!)A6dA;li!Bn#Ot zK~j&7kqN^10XvI36i3QQB3u(H#tR-;K#qHf+k!ZfrAbs;00)yXHiZ7t1wccUt(Co_ zcW4sTTL3vU1y?aOl&wTnPuI&T(UKbux6%Re4NXv{(r04Khx|~)6D>bKKbGi;U^QK1 z8+CM+P)+WzbSzs*{UJpEgF|CgUg9u5Y)?d1*--j2+fh2$x+{~(?I%z!G`4KPz;|0B z!a!ScZcIk5BKHqrl9ex}&Q8zm$5WWY=$#q*rdup_Gf?X;yCA?3e;^ZIVkBy0W-`Fj zGcX9Ro!T~6Mh}5`+OoGSQ?M;lGA)&Y><3kf9OKpJYZrHj*$0k(?*w}7%SbO4=x?{A zxMnd4eK6im4?pTJ&E74aud0RV7b(QlOs-t{Y^r!QIy~b~rUtan2HslE=Twf0dLY(O zmLgpqnOw*7_lQu!h;~2IyAhPcFFip`|C(-Eks@e$LbbRc))~}sc+)TJNhcaH zI4-GkqllL`1cw=yiB0D;kaxqPAC-C|AF6si+0-18ON(_%N(+{8sBe88fO2b;z#DO%1 zs_*%ZIu%)#7s0y0+3&e44u>^E$kTz)8!6JWTfL2vLo}5?N;=hf4>{bp7c1W2o!PpL zP3-VzkQ9vmYqgR2v%z`7$2s9;8pUA8^9oHin-g5)ik`z);b)6HiU+L`(u_$Q@!PWU zRdFELqwubT!>O`}{|I7Bt*fO6f}V<4f!MT1jqmaiJ83iW0!s#c$%fluBeg4(i8IfkXL0Qx_949~_Id*jcjvRnW~;xs zQ7C*?Ytw^MUeid)=+|Ry9IM{Q7!GOgCa7s&=R&NX4S~8I@9ny1V+}6J?Zz^Z+O?QA ze0IS#Xf1_?o~<50s2z*`&Q??GZCBpi4Yr5JqeO7G87o;rW{Y{hpqhl5`vt*7+A;f zZ_72;ORLo2z8J5nmrs(Y>{~$TRKpXWpa|WSN_O0xTbg|!cT;#EWeA`Htf@V5Oy%Iv ze;6IKBgI|&GPmkT!kD~^Bk>irBNYgqYTfN!uuLuTK7kHXJ;^hS7&)d85#{iP%1D;C z2~yMBlu+KVml3T1gFgahI>c0&tF*w2p8uSrRQQWlapsFnYpc~+5uWpj6|PFNGEJ}} z8@j{^VQL79l`!0$XBlR9RsP5XZ)oXXYuoOc(58i584r3B80F;lejp61`+#7S)0cz8 z;))8pwds9npfN!%hU@M7150CM2K0{kn1U+m z8{!$?CussRL+g9u+j=j%OQ+G}_7LcINZ!$^6l)nlsf>UQ{Y`w6xoy5#AE$n`Xs-+? zrsjYxNdyWyQymR`a2ih2S=PsMsCGV`!9>M;54Vup+)myc@^GFmGTzExz*Qh#-pd)j2tX`fFvvXT{q*RBQSnI9DYji zFG?oL@()Tj3;E;~kwr7)GyEfH-e-{Mil$RLlWgkcQ9JTn8FVO(WU_Le6i@H=^B&G2 zevto>F)f*?ekeyr%1N+!npF!%4kz!4Z%UY^}{u}56?ih!0-_z_m?@#ln0vsjFDFpfMMIOr35d(Z za-fnxOTNL?4r|#e0y<(@|1J7r#qJs;o5K!5LHqsSox*Q=ZyVyT=g3lp1Cbm}2K#8G-IEX(Qq=sBojNC*tTs6^NnsRt0Juj>}$yh{#Im){t8o4J9HhNu~3!EG@dh zKNk^loPNuI(6SZU4yM#)-K9Jm19xdADdKhet6@<@;G)`+=mdSrvfrAfSw69HYOq2o zD?>UK{Zg}`Fpqplz8X|9{?KlXwLBgM)mSB2_;Z*sO>tgK9BDqY`Q-W?sJcka_3O)m z#_KE9B}JKGRNEF!RsJlPmX>at`A#Lg45EyS3?yO#O5e@yn`e}jHbJ`;nPNjfCwaVI z@OQ(T1Y0&@cl_VmiJ>f;H_JQsT%en1G6J5yx9g7slJ(uXy$P?Mhd0Q)JWrZi%MmyD zV=PG8mVYzbVSn<6uzYI1K+1}bbYyDq0Z}3s;WR~*!C41j`agD5@mB9(d^On!bu*U^ z9Sn>ckGU~uIBW+Wwp_Qzqa~h3^7GnwNFCBP8xqsl^7wyMJodb0yKOT-Cl$-Y`*|sc zWV3w6znjux4eHTrt%52O5WlckxLzM?Y7oBTWAuCOh7yJtEG*(ap-pbaD@{@P-AaSF zG5>w!xpfFTWskVa@$D$MGfp?OWAdYuGo>9IAn@UgE9u9n^vUuf4f0;np1D5<#tE7X zEDZb!5>xjcydKKYS4&ts5>U$HLL-`}S~4lk_SzQmj_ptCeYwD@;@+vLAlSV)`n^lZ zifi;Di6A@rM+&JkTrB)AeKyUt=X+ne6kZMvs$(j}&Db7cNol&kJdYu?^{CYi)P%Xm#q~r=X{P zSt=IV7yMXG!aD$l!=(Hv(=<(D-gkSwy>qyW+*KnZSs#XeWXS2u*#H>sT=!ZS<-sJL zf`GSEVw=@=be@OYUi?F~NJ^VSUCTCS9<^FIF7@8@c3TGHg{KEf`&Dd=vyYHS!1QA0 zKe4Yho~nR~e@;CurT?6I==htibj=P;f!hK!MuAN5AO6Y;?dI4VIlUPcS4&Tpn+{1V z(}oF0YUA%N{K@-VwlPF-js5M|<9@|@5M~#lMt3V-AUKH~b@^=XhDr8Z73qshsKFjk zJ#2K>5Mv?-TV}{*7k`pF?XBRu@JLUhPIlzZuN{gg3oSS+G0jOazrOG;%03-C!n6`0=u~DK;=!Rr%TwT9hfWq5+;_Rrdj5WTUc{87P=?9)tFH#R{ zsMHAh&20K{n>!H91uvDE_?ws8=kb!qu=5J_oMlaSf0^fq%7m7~=H!Erw7_mX-6(xe zu*qJgdU!mp-9m;-D6lFiKse{KhK6^cmtryIcMQ}^&JN!%S}AiPQuv1!doNp#%d#O+ z-j>_i4_Y{j5wJ_pNR7~~{RTSZJw1lp=Pum!no<_%6pi?7xaw(fy6mLP*vNLVb*uFZ z(y8-;@fm!zaeOClyj+fbozH1^d*Fw=($JBGR|_r>TVrA`dX`Qs{;u);LfR*%f>lm# zw9C%1{YET&hqKz4Plv>h*g}`9wHoe%5nl3vt<#TBnIhI}wVO>cDdKLCWUViwkTW;+ zIjJa)P&uCs_{uO%LJj26jHb_KysvSPSn0mTh(MqET%kcVIYrQ6E1afzyA3l`!^u_a zEY~bgjO(F~MhV>b=qixJE4_$8ZSW?bwaXUz%L7&{n*@R5;Z`GikqJkPu)7B3<`#b# zRo7X8$sl4{-3{KR->rm$1tsyan)5-lY`h$lg!Kcdr5Z-^D&lYoynVV1#autJ?wj{o zrqX#5ezvG$Xdl_md6_YwC1MAPKte9f5F~gq11^2%J9D;>l)o;Kt z|EpwMtf}p?=60r#iCs5vucqMEjp5Omz3Q^$KCSQEwD z*B4HP>#bVr01k`UoQ;A>3bF^@K%HtQx)Yn^HzUSz|5q1oQlf>VC=rlH=Gc}6IcTFJ z-|L+#oq8V$ZV`O04jWK&bJkgx#%NAw#|MZ!xD&s@s)5E2N?4CA-pi$M{c6VjRk0D~ zp(-m%3r}y$Z5D#p75gG4#U0|(%iR3@?uO9>eOEwrq)7-qtR~h>)v7Y zfUS0n!wB7;DYJRDKnI~SC!{B8!zvbw8@e5j_pV0}q1{^@#D+DFaEFBlLhUjbE>#>Y z6%iCWjBjr%II2O@9o+hgvF68H(uAZ?trW_X-ijATo=ib|sl_UcGlyvk zQFdINk<45xVlt{;44(5}pYRA$&ott+)Iq))ZTKnXfB9;z|K+P~m_Kev=iu$-jGhg? znTtuG(xqGPyg5%LMfoL|Fd2z#IG&V1Ti6?o!)JlIU^-*wv^%g1m0`N)Rk4{dex|1& z-ceR@_U;JHU>hnD$*VJ3yUUa&TLib^R`=xU?iEqY0^{kIfuv37jnYQjl`if#>0MjQ zrzvV)C|;d!F*CNZoZ$h8Lv$N`{=$qo_}bC=c()CQ*9A#Yd9yAQA)Woj!6NBt&72Y6 z^HuhCp6)w+s#lT|h_kq8ZAnAR%bDuh$-VZgLMK8ET9>m1&kgYnRD^L)Rgh6r&I7Em zLWRgE>QaTV@GkLsvWPAi4f%ieEThWfgvHs3jn?&n;mlb}_ztZNFFBqju@L}Vgxpht zyW<8O7xp)&7s|ifk-cJxj+6ajt@t68-`efVzItqxzc2#bYIDWF@tevTl!?p0>R*gN z)UALItb^EENJ8tbFtQPmdzC-Xy?+dgG8RmHRnbYKsJ7J&xm@5R{Rv8AXeeex4 zEr@U4+Al?VicA!lpJox%fB50CpTievzDN#IdVjG@sBPE3Vnn*_0ll`OuCtQ(@lS$= zDVB^>#-tf%mqeD3o!u1=LXOd<)kjn?LQBIOTG#o0m-(DJW6bPY@R?9g^1BKdXy70r z1o;zQ+V&@1ic^;9K_#yu45^)!d^PgLAYvuw3LlQyj%|IvvqE610{0;S7qkvK$@~nt z%e2;D_hCP*vx{M-*@&b?*N4zY%^8Q z)yG0f^*1v%@FDJ1-c5_zqaC<)$G51*p9}`^9vmw346xre3$gf)IF?@>C5b~rDQYQ# z=iUo6c+24#+?Q%GRWefJ{_>I5jia}Li{PdR9k>?MCW|1E9Do_jikZZCC~r@ukRA6K zZdQGn8d$;93mg08H`dAucqm5W!R*vwZ%CEV)SYbPA$YCfA*OxDkH?F^#l^@cS#uf_ znyUw93wUr>=n8Hyue`ZN4@y87AMMmaZ=$(8+YthPy57W=kBM=h?HRsLWav|B%_+no zc1h#6JK3KhFC(rGg2*Knw#+ z#c&e-?d_Bm#tI#r!3dOJe#b*I@Eki6IjvkV6EiuS1A9^Fj@XCSsuCSUEC%m?`Lgst zkr(PB23p;$tk{BQ0(Re=jqA;+L$@M9fi4f@IMyYI{)172Pe*Oqm+3<2(3#6;bk}=c z>s@b1le(S5-o58SsiysG{MY!dHO->;w`P@z_pMW#b+bQ=+PM=@iIMludOLr~L!YL` z$lG1tAQ@g+t$28NzDEQGVlSEVNyZE$`eZT|hQMk)82q?S>5_G~SsY{O#%$CPhc1Ww zp9xagjbx*D;3@LQ5h&wlMY>9a$=0kpwI%wFkt`BzTwMcgw(x0dm0!wJ?4unU9(sm= z0;c)7D{oMp#y^wt$Nkju2Pw~6kK>S6ftS`#f6qC|AF98xMTda$$L+l%fVY1R!>=x~ z;b-y#F*1;{qv7qnouM29FKP^cY0c9NiWos9RYMDFiyG{<-@p9S4?nxYdW+Y@cbMg$ zL}+*1`HFRoXpKz$rF@9jIyATBy3%x%!7B;xBqTRg`wyBkJ9_>q7Ml-6wGMye0gHIS zRu~ahf0p*Qd+CC?JfQ69sL4`HbE`*GKc$3gf7;18_`|$r%rFR|h(iLC`>Ot(x5~(9 zcmMlxeQIwaYTA4QeIqB7>xms=z=5WkeBubLlHmCE0pXo^L4+rCR8|YBD>G4IoXj|r?HcrBMG?XY82R3^5&|V3nUA}#yAI*c{XBh*nqG$^r!jgLX=v$gH#K~| zK}ZBS4leur6FVdYJa7IP$L3r1 zGVQTm@QPk8PrsQ~>z*JWxM4V7KlkS8CN|RjdizVvc|w=ERW8n{Sqo@k8m` zKxA~}aGl2m#9zy;FcDj6LFEQkoTih=HZ@$ws6?n%&(@uljmRhGvoEsSwE!z}Q?7GK zLUOrw!~MR6<@PJQoUXgWZ{YUpJaw=>t7$@^&m*j?(mm6y)a{7ak!&6EPRn;CNrQ=L z4O6xU&DhB^67j^mt_~x|-iQJd*NBM)AN!O@Z_RdJMp;tjo{cOs4FOCAtB4=_z(Tqp}9n)B_2XE?c5EVI6|v>&a#8>jV4W^~6G8_LNz zSf@-za1_J+r5r>%t8V)Q1J2E4?wNFQzl5%dB{OZV&==nZFh1Ft(R<4`C8_ zNl-6y(Jmt(wGaCmr9IH-wD3%32{1B0fY-ays7i|hhjoVb*{O-W>$j28JwMcnkQ=wtk8tD|~}(u&D^;P3Q} zv#nwt16Ihm$p2TYtIt2l--6`=ps-h*Z<5D*U675g{p8%wlrh}jaOLskP=Oo0;rzpA z37(1yvKC;LsT>9JK0``V@)Kp^WRSD8daMo5!2DmPcd?B(sXi)=j|f`b;}M?o?1j67 zP`M1qUHo?}RiBo)x{)^Wck0!o6^^z*!2+zC8cRRmzQ^PDv1=CHh|-_%RVg#)LeWy{ zx-S{?GKsrE$14l{e}}sPK)Oy&>|@LC$yIXi;!y=my12?GC{{}Mw-TO-(l2$Ss}b-2X_3MW7Y^Ip-(&L_$0L zmCauL8jh%=UO+XLGV`Y`8>rCH8ija}O82(Xqkd$0@e;kS--b!2*V$@t!O)4NpZ5jU zahch6wJ|+r3%=mc=ZjJax7f(=0pIh0MdGxJQ9p!KgN>ZkmNnY_`D}!hD!qu-so23W zUwS7&PM(~=r!Xc&`OLmYq$Mb9Ri*fMdzQ6wCd%cD|HsoFPH;-O2&Csip(G4$yW=Oi z<1H+;T2jjynNh^fE9O#-EM3Qa34!jK(#C1Bq`FMDlB^htSWPTpf$?w^kc>Vc!pcfn z(Zxj?4YTO!H6s@O7Q~P z1?!c}5Z7{%I(m+ir@Dsyo~2UE8sP~ZB=S2)I;!147jCyp=I86K2da`&Mwl*?=Bmu)VVsJ3e*cU*_ucfX}F2i|5|Yeq<$J- z@%I#zwljkHQDy3Vm6D{)cRL}@Q-MZSj988%)R-97v!(<%^r4_%WyLkEr!+Sv17ih+eJ$fBn~PPD%Va4&kb9*4FFAZfmpuh z3Ot&IYak)_-}q)ZtQRy0-r2R{ZvtVlW)nl5Q+}csb|Q?vZ&1HV_mCuBqb3hay8MaA zl=zX9-R~>IUILBk&iUS5HVL9k$ghK-wPIclRSu0)>E;0K^?XtCA9M7J&-J% zo@Cn__z%npTMBzUekRT=M)FrN8qH%hmLI~0di%K_HOKG>RGXyHk$(M#n;eFpJ3f4l z#4n@~RPC-DlY6AS=&!xn77MHX@vi@JG>>eLkTjLj6JWb@M4k(I%VlUC(YpxU!1(%3 zHr;d@G{bzQ91XEu$ON#qaI|=rxNT!a$|NS}p)n>mc=LC?c1wJG$K9xZTOQ}6SNEyI zkf=W0Io^fx$}P9pc;@=+S&v@+@LFJKBqYI*tO)oWxamR+bu&)Wb%U%-o5HSsBI|bqU}z8Mi#*z?#(sDTfCkdKz(r)yYN?P zy}`)w@v)4B=hlyb-%CWx6J+{s09J)%Ghnm;tqk#@_d#zqsrOqz*vhyR3b2Va{avc} z$bpHK;0tHZ!UpOGI)nmK9&hLR93+?~0x8ztjrkLI`^}|NZVOs+d93JS^!2$wq2l?9 zr>)A9wD%dzR)t4R*$ie_dtOCM`gBN#`r3V(4o2%k9*x#R*7h!TMAWVGXf6I*)0Y%y zbX28XVuzb^O;Fd{5cg;13tR-0U}VIpJa{#HrEU*~;S zDAH0@p}m5{lz@j&c!tZPN{b>NQFW}XYZ!BaC{YT$x%Zl5vhHoUzG6}eObxJ5?}rab zQ_zW#4tBN_NV;=GAJ+bnzb`X$X@4}@tRL0DKV|S%=y?3D(Nx(xLyNoGj?@>lXqW*t z-AC;*v@8glr%EdbHq$OwKRGKK{`5H3md0jIBuV>=+-szi z^(nQ+-`^jO^5P_64|6nXh*bYuU+C4;P@sM6PF*(%rqWB*L*ZM7~O_JkZ+*kPghv;K?-Vq8|2^62fTJQ2OGczleWOT$n=cd-!n_iUyOg^1^+2o=ur+_cqZr-H(+2 z<<3dGt88Fh63P|`&nmsi<7#v;BtxkW2^*35%>=^8;J{8lA)q#$yYZuPFU6 z>|>2-wD=n7*jE(v=Ff=X!?PF?3U%ewuGtCDv=oTtZG~+xpBfrfHocC(qcPUA_(F); zPy=g-VvcEEQSjJAN+f98s zUmjD><;$feWd8I~nT3UshiwvFllF8bb)`PTR znTNA?We^$>#UE(MA?Ev%>|FTbDk0ZLpT>|Z9iI#*ZN_t6 z$=-6kppEy}cX!G+Zwt!t(pMF1o*Xa^$Mpl|l6edZp_at?7QXArYx;3EzAeiN(ASJ| zEs1f}M8*@ctdFUPR%Ng$Y!2&gUa|P`r+blKEdd?BA|*Z^It$$zk7#nHSjQ0g5C|>Q zw?R#t(ECV3^i@NRdEKFl4wLp-oPp)uCM$GlnLf2!$9=~RjJ(~%bEQKBei=C`M1r7J zO>x#2aL@2V8?_u}TYV)2UM9P%!>^u)GGD69rvwz1~Br32-LaS5}Ii9==YlowCXH8SUP^s=i#R zgy=N#f;Em8KnuiF=pi7PKU%;e`1edl(|^6c!)rQW!< zWIL%4P)SqCpV*X>S<5Vnb$U+Xn$rK%=~t5B{Ar?ZaTq>#iFwN(SH*WojBPo{XXT<8 z;H2p&pKLMFfYo?&8ljShxQbsWuN%i3b~wh#lHwBjsybv;dA~Hvf6&|N^w*|94~t3cJzYC_?~Rsmu5ppOdOR|U=dQ%l+JNHVKvVx)TJ5fQFUFlUY^rPd zquvEE7b6XjYXlGY>Z11k0EDr6Yvr=q@NJ9IjRCmKNt728+^YiOK@D_@bv-DKTnr`r zlCAb)Wn7kK%kQSctzM;^?^m@MsHU#!EGMVRlBdc@?;I^ZqS`M4EL99)7AwjE_U4m< zwdXb4pzasd(!4TH$a94br*<2iIqnV;6VA>Ltw`NUxKhoeUp#cn7jLId8riUj=bOcp zRZHM}%lDii=ef&Lu>Dxd)+fXz<$HgZD!ANEgEw7m`<*JeXU&?uL`{nkDCf%JSC*$F zzL`VLf^z1I1}aE0Qz}}9yNfVt)~bY!?0pzI+aPE*8ybIdRVsO1Bx&(`rIo7}G<(*6 znlv566)KFh@(YBXu;fr5dt=|-F+fu)yg{WwRz2Cyy|zo-I0dqG*KmENXP2+Jq-k~W zaLS;QM{=8oDG5yAktN@pmiii}lp7;US;BVhF%YWdN|oKcfFG!A#Q4nD)4h^%XcVJ9 z(ZX|-juy7V5Ssfx%~fT-6!%O3a|AXqUs0Mr^}| zFH~BeAcl_*WtSvKC?3r>@?v|Lv!!Mw64G66k%~Us(!9rE6M1)}rXKb$VwUzrEsOdq zoXK78G^S^@QygRobY!V{j_L5qgdWIQDdW##)|_|~bxPG4lh_Gbqiw0xm*-@7o(f63 z%jGK9%KlP@?w14X;9j=HwNX&~v{hIB!j}&Lp;d-F2T(UBNp$*_w(jbf^+O+mSxMx* z&Y<42=n>cIdGYUC$g@^Ywmj`~3Hz9rx-}uk!X?N7nEQyNWF zuRzfly}_4@x%+IcQDJbiDJ*aDR_p?psaN-=WKJ~N_%$s061;Ayu}PIP3S3**-_N}G zP>Z8%oGGEq-w3FcBmd-3P4>o-_8ls+DnT;?>JTvkF1omUKCaMT_>)8Q*ulgBU4P^&0CZN4hQuw74A%OXBAz$M6Nn1P>N%x?gN=06A+Cd-TV-n(XBBtdu9!o6a8Uwca0@sLB&Rj1!(E zlA;8&rEv3`{TdYRJvy^J72rx_b(?je&8hvrJD(!mUtX7}tZ3P)UbO(!LW4+Du5Sbu zUXAtD7?21ny0;va1`qEf8s9uXo6b5PHRF%P9;IbuEu>tQ+vtB6E7f=`B zW>-aW!!5+9{8YwY2Ppe+IVO45o7pa$-{@IgW&rH9+gD4wia(x=0zLoec`RH$JDv{v zsNdoRo3(W)4(dKhbnz92qDiC`1=5dMl;8nuwYmoaWi4f+)kf2kF{ZkI3f@~OiZ{mxoiaK^yzGrnD0(}Z z`>H3xxG3Z_ahK*hf0lfFoWZcU{1Uhq-P*AVhoUWTuADblDpMSue?|4Ih&aHl=w`OM zb_j!}V23{WMR0d~fVmL2J$5RcmX&tI?oA&19Jw#!B%A%|Q>0Q{Ns|0=KEDSkUUpH^ z=lr~D?|~GxYEhQKrRw5U8)3C1k}z?_=@u%)HMZ3BchUR*s|BE2U&31cB-1hPRRbve z5}Tgys-XbAaH?1A-sIeiuprl+)O5up=+k7{vD&zcPlY$D+Qls%(Vi7Ee*YwuEchN+ z7NinQEw>ROa(`g)xR z7xqoH1e3h1NWWT9z-F}?5w9by3@cnOd!=3Sn+@iVE3uez-CV(@gVHFpQORu|nAZhc zAZ@zfL;l<2$s#o7FG7HeREmJHq?|&2YR!nb`mfcmSL+WlW2x>w(li9>(&bnm&okQP z%9;$SEpuS^D_14;DxRJ7=FvC{U3KMi&7q!Wa}4u+WybS+42Y^Mi*H4JsN({9r-h@p zOID((5-MZW?+Y+n@8y9H7b+DcF{&G<3(6B~fIKsw`%G>^kA_`O`S&ib;bJK=J-$2) z0GrA;d^XMZPJjbHYV<0n%gH>^{qW+%=^DE8!Zs#>a`LEqnbyQw`Oek2y1oxU@BLwk zz~$wZ4f`U)ch@q`h^68%Mg>VFX*BnMKv87#T`wCk7H~r%Y2*;6#n~2HzPQhHzy1`v zfTYb{{>&m?cKIG!uBrd|hrTfVsL(e12yV9I+40fLN$O$-d^3`-&CRU}4@~9tY!F zlllx}9)fi0ZBpSO#{O+PeZJh+-pwlOI;RcStXTFQU6MhVJ~A%X_rv4ON6W^f{pIc) zt^>+3Sxe#F10M~3b*IfY_iy+iQ3JR&uEIrliMr-F3|Fr&NebsrRz&W3MTXvvhuJJAL%tT`$@;n;tm81fhkj=?$SP@|{X>e7=$JIh&dqhX|q+Na2gukx<`sig7@x^)Bu zS05-j?ycaXX^D>JGVF%Tl6nzUk&MnC=B-+%mQe(Ja6j295SAMEdr}@ll{e8C36&jh z9Gzr?jh4!bH0sZB&m+P_aHVYCR;_Dxda2P#(>qC{r^tRTOpqP8{vb^dn9j+qN!0r#=ubp5o4<=^loVB-Nqhc2mLi#iovdmr3)R^%;o#t5quuISbb5l!O)zW^AL-x3q{M#}K@ zaG6jx2+HD9UTI$&TbfCZ-mBVMNKjBKJ9BZHzVdVt;-+|q+rG+^h&e$BZx+~WjP3L` z>rOx4u)F7#=b~A+HN|qQWjlLSfyzIH`}APN<~3Pg1DXW(L~XM6`RW6@x=7JNv(Xk9 zdra(q4^<=PBoGTSJa~WYJ`}yawfcJkxH+N7HM(DvY&@CK-;8X}x8rgHx|nQ2!Zqt} zumbwMpih+-T{c(vb6hlopO34(eX$B|&UObiI!)N5-7bUk`$O`jie@x@q*%cIXim`<)6g;u}nkFIY!5KohEpai#(M6dphj)y;pVwGJYnOP}p zFQwA~gMB#=2olTjNY%r@Z=b$FV!5=vL{!~(MxTc6-9E-A`kT4g(Af-k?H;)xi&IoQoHd-Xlh~hLB1joFAxT`1*d7E;U!EET4skR(Nb34p2I` zkhS@Fv(m%p3tOll{BjDsIwlR5p9mivOAe_ZP3?M5W!KE{zRp=|l1eC{@AWm7Lm;@p z(X-K2r-stv^gARk+Tb8^zruX7xv?5hJB~>W{VRkjrIuS2(o5&O5lw*rWNM`bu8#fYQ=|KxOkT;Dy4Y^40mH=n|sQ3}LA9C(Fp z2l_e@S-@k^KOuA^jutG%sZTfPLOx0My=Em0V_M*6Q6>7Tl<{2CsnI`w+1ZROIP&Lr zFU^R>^LqS{WI9PiMt8!-WRTfOQh{W(+oZIo?^7sN(NgU^r(-5OQZ#=w0R9Qq@R1EI zdF_rso@x~GCmo&o$cZcK*StEc1@JA6K(Dde4}77yI+D(%9z?I+o;^?ym0?q(8jeuf zG*y_?+3%a93nksv%*=S841L&qzEKo?056iTu_#4*U#4Av!xEQ8nfzPa$Rt zp@h+JBvDc1B7L0bS5JUS{$^4_iwPq;%Ec;m`&#DW%O|+=8v6mSe3OGbdvmD`a+%dE zl6`oM4TIkvL9!NRuW~J>(~1U%2>??m$NM34oGE}OjRP|<;x$SJsy38e89FqElw6@A ztjW+KD`q<`miEW-z==_qq@ZY&WP6(w+F~ECz+2P4b<`tW?21B7fE~Rr<}>U{%{TbY zB8)=+G+OPR=1@=bo!(Ag#EO@=uYdoI7F;QW{Gyc=vn?LwdTHuR*5TnAqoMxVi4C)a ziO}a{u(q$iukdRWZfB82P)jJPIuIF7?y=x^Yz~wfZnf>drz>pOP6v3 zA8GqoX6*0IC*Qk2ys5g^Q3FDAPJ<1n+(hxY?B$TwkJc{(CsXfcpJ+kHwsj~hK^i;l zzx?O1L?6T-s}B@`F}eZK1>(Ly`I;eeU{`!bMg(&cVPApmT3&H(rYVI)Is&P(jMf5= zbCIp{0skFk#oHzV)l%(eYVy;5WI2^t;%>Ga}Vf>ZJhp3)?H;xJB zE+Qf(F4rMyCiD2LQTdRNeucFi3+`qt=E}VFDa-U0KYn~^cMAn(o0lNWwTV0S@i6jn( zvSCr4^$)%|dbS!Up<}f=YmHNgYj|5DPWE08|HZEJ4*~11de`c->osuQ5NA`CgFfpo z(PN(|SBpoX&z*ww3|)@A8>`HBOcY0mw>d^MBqir|P!9F-mi#p`6^B-O^Y?e{Z@%jq zkIO1y+-_hJ^3eU8HS7eXs_1%l9rUYjzef;P*VHXp8R z-FnUl{79|?JxlF>JZ$~2yV8c;0s<%9rg%osK@LqYSr3?&A<(&t18O`Ko|K&QhuEmz z+GwcLkX&`5{A*ybqfRROFzZ~}Yfs|)-li>1r z(?`+fmP;7CqJBn@>1i9G({1xRJ8J0HuUZFE9Ya?DRHe35ds43N6Tui@3#Mk2Zu)Gn zWcrT~G^pa%Vd+v9796*}{MfED2ywbW10hbJUSJQSrx1rfw9w=US3@JIs!J6CuvWPj0krBXgBH+Lq}2i@yAhhtXNJL@x$t#QX;qb@jq)VO{E z!<`64-7aOWUMpsb2fm-faz|{FQp{T`AgPXd^m5hcf7-8gz1sS9Y^{2&ed~d(ht=>S zoN9*P@05YyWxqnPiPxx1m;r_&5)QXna}_4tmA^zWn5$+{wf>N<8F0bSvjttL}I zLo+iz;5&n!mshaKg%k0>CXnC#bT3m;Yc(1>l z<<~pkh>(Qwu)&D`W_V#X;+Rf+lqQbr#mU7rf==S0)12*=(V%q zoP7WPbN=7y1B~cE0#moyd5-tLpZi~9da~gd{as4}vjj5z1C^A%tiB1frGFN0wFFeD z^wc#q0onNoL1OnGj}G|r&%866{Exb4QDrx+u5>svuvCm#QH-bex9Ug*L-+@ys)Cw? z9Lr2$u3NV1>}mfohe850NRI=xKk5JSy$q<)>OOF>U-BY#^+K64_nHtyAFja#>EQzb*1!xe4mk8yn3AGwY~)P>ucx_y_dZ!||JVM3aLT)GOK zxRnB?RZ|S2<<7-$@xRd0p#nsymW=Ck9+bF0;7%)M(#QO=xi^UXTKDk3xFHoZAuJA& zExND;J4o~v{3ZOq*fg})i|NV`&Ed{!FinRM``#c$Z= z+J?Cv;2?Gk4z5KJox?97s+w7cnf3Nb9$WjE>}?SC{U2|vAPBPjdv6P25ka)uAc)+yyR?Fp3pUT<)#hz4UViOm`4;@Of0n z340Fr>#*lYtC4=6aDC}xJJYLcz^@$8J`s>_)T%-Ua5y^L&T#ZDgv0*e@6&&rsFqjs z^z)NfGtmQL|Ci+cAF9cL$Y7_H^yD>KDB=AicK%{Y)~yYI?uZrJ)=EHZ=2x95yn!q= z@I$AF&F%^Q4tbunjy#>m=K0qXn~$6KxBo1?W}pQKL5ynGz+375GA5uOon1nl=X@)2 zpW!lHYX;rATD)sCErciFgGfZ>_%*gY#2+!}X0Z^~z1z|Q`rf01H?Hqa_l zw9(I~qF2AZ-2jQb8LwF0&{4pv>Lt1!rgoxd3Z}bUL_i>OulGWsVro>RmtWud4rW$>^~Q)p@M|y^KR#wYq84<+#H2ZvrIwj{xn`(6i8p;%!8= zv26C=MtdO}GeSHhPh0;Lm4!PG;^wcm>}`nJ#8DQmecl6wSnN=rf1B%V@1aZUKzXA?LC#ArsFQSCI&a+=YORc$Q6 zx!oQP7Sn9P-I7FLt@BNw{z`qRZU+H@#q@Vo>J*8#`UAy$Bj~60wI59E7!Gtn5$KTH zCx!j|G%Xy?SN|Bk{}f;o3Tn$Kt&WkK8?jK*Yl*Y&^oDZR8pwg)MT2 zmMI+mh9;KX*{v;un-ZGqv3N?@)`h-lpuO?#?<2(-IzFV;3yC~z$DOMhM)O6y%8xx( zltxSI;DQF4^>x1O-F+yEI{{2TR+|}&pKfQrLo??=@eDk8QOIic_EH;v&pG}@p|$*@ zdLP6G{os5k7v(d_!5dXM|8x-u2Jw{7m&S}H`{V<01cIV@7s*74NLlfil!}zgq>o?C zeu#I^K%(og)uPvdqyx-WhtXHR3oN%Poj` zmP!|xGBHZp+uSafnXfswdR@y2lR+zr9rxV@nmG445iFx=)ys$MP2|7z#(%pacF3Te!AL-P`pu89 zHI+sWUWU$@;N%0P_3VnpNB_M*&jwK)HsZij8BW}kBvr4yXJHT)yRLaHRxlARGn^?b zM*es25aLjh#&0P7f%FD*hn-Q?-@4tuQ7Pni)|0;v3=NWKt`@NpxSKtZ$`ah;P10vZ zuH02EZR)%ZWB!Z6rMv<0ck+|2Aeaj8hbA-p)%XV%KtIJl<(XN!gdOYdK|77H9KSPr z4jyt{+Emy`l(UZ*b!UGslDRv_p;nHt>L1x7C~5HxF{NP_xl^$3T))Crxo-?&=Yj>6 zF~iZ3Vb{&XbGq&8zg=~(J(&(04p5v;SNr`Sk0t!Fb|J&``aY;;eO2YN$MP>6uJA+G zADs0Pme@d4V_JMXgA1Of8Bf$m3wzS)n9C^;?Tb4s(o@?emS#%2OfFuat&Bi_`BluH z-Z4lfgC11MvjiFRNork2k8+j!^98g^93IUmL>NFeNKerB4N5Z@uMgB6Z~kGx7tibtNMp97wHkVhe%z|l^J007eEc`3QqT-!vY4%twqHZ` zX_k5I2@dHsg7c0!Es`O<3C?2_jU|j6j)txsos)iOy&N-}G7G0>_$%|U8|=Wq*IL%_ ze38wGnyGOU#Kx{7SB1`lhwp1VVPWzcWSBnrCOA>TbW$aS_1FnDjYm3SRxq5ufFkH+ z&)*!pI2c(I(+j#H7OHYmGM&3TBr6y#91a#xHy-M^GQQWxoPJJ*P2;_hW4(^(XCTxn z>GCjlr|IQ^?vVda#uUr|nT&PhYIv;gUY-Ko&lbojRqwDRfA-?bse(f;a(nZ9yKbI> z*mf><5O7UJm2{xwFVQrNvG5*?#A4M8;w=3}NEB z3fj{{Db#%n>3qQX;_3LMA&8LOY_6Zo@xIgI6H6^q-ESp1tG%E7wrck|!}ZlI@o!GHQQ{tzCpjQ%O9@={(C zY19EVBu^(5WE$^#u6RgN{sf5Ee{Y4NUy$`LxYt9mTtZ|cJe-{ z{<6_S+N?$l1lylew79;V2d=SJiB*1UTSH#`pnYpl^F2ZA%L5y$r{1$2*2zV0< zaFx?~G5j=^JY26b|3Qio0EiOL95jp0lm&=&$f=GGfP&=~dqP*ctpMW^2I9dP>`8ih zf2q)YQW%xW4JiCvyI6L*%zW1cxa(i8abwH@r3CVtxGmf+Y8J9W z1rayzI~IZU9ix@{O=x6L6&HD`YscIJ##uD*-wt2~=pb3B?9*7s_+v0#L`+1FF{7v$ z79K8ORqvqCj3B2>9fl-kW`O2Q1gy9bQR9?$tvEj)kgX2Qav!~;Ea$u$$R@mXUCu5TX# za&sT)&PFv#qU>oLyzJ?N9{)w()R>ZhW}f*R_q-E{k0ZeYRtdtE+(x$=rLB!>FD~OFaH0kuBip`g$yCi zPXFd8LFAMS2vBV|2`&@-HL(BsJp%%3pt+eMbNs)K4d@9n9rey;8lQg+TeOB|zXgjp zHuNu<8YsG9vVa~(FW#Cu{M(v?L02;Nf@P(Nw6&5`0x@IXLCSK25|)OLDmgT3f^5;h zm$KLIcOL53P_{Jv3{R`~elkxe0ZITxwOkpli15^KfK=Nbk5BL~QOo0pUft1kGXWyzRPt8sk{u=nEJ4p6Fe7oL-Q z8dw4MlfEQ`{Mxb$T8NF@;NEUK#T5~H>-FL$Jn%-9WJo5*Z@8(cGQ3qr9t7Ime*-aE zCD1I>N+T{k>t;RvOUND%k14=l+ns28qPmKEJ zn-+BV(#8JLt5%rb%M58+T>ylDSBl^KV|xPZafgTjMKdWiq7ax{$ys<}KJLN3E>2%RzpX=C1KVIr^MuY5=TO?s zlocBs$AbgBgZB*ELHzSud$jRGRQOr|j7f<<2lGGn8l&aV$lK|UY~IO3k{$m^w1!QE zm9Cb>I@LYY>sjmM(11!|#pB-WTn*QhQu|Z3n4|29XO4gea+KqTJ^Hk*J<~ih^k0xJ zVG8Hwdebc)3h$Lxd6-x|#>MXTFuYc^8f22tcXewm;mGOT`x!pGE3E7B`jW@B*lYH; zCBtigTuA@T&y`=OijT0i8G=aX6FTvQMJvYFm@cmk>a?dBxo?boCs6KZOL2ub#)j+$ zlbK$aSyfE2Q5qM=3kGOr8pH`gE7;(<$X1Z^l;-X`=5qvFT00_*);Fw2k8Sa7^W*Mh|pzBu`5xx-6!CNt!{DIoXO>WucMc`Ca8W+?bqgPBIDfib+Xe6tpY zuUB#xw(~!|UmK1Vf8~KwcVUl?j!sLXGOEa)3@gVj#v4vPxj0FvkWhppN$%7$x9t98 z|EbvE;u>C2fj~HWRsJL6rMufjK)}YG`MsrJF$jy~$|3;MQjL)oICty+bYeiu-+raB zMFGzKv*@76XaM&wLEqP%8J(*K4S1eCq}APvQ3^~9%4|&@mll;bPDPRm7l6EPTKb0Lw$GloM_K| z`*ytWY~l6Id#A_;bbNZKHJ_f4cOjaG>qs$Rp0Ua(JSX7PERuQF14=es;HDsW~)$+zR016c;N_PVS^x+?l(9elbq>B6OGR4Cn?TKZRF}<&l7E7oS08l z19&|@*^I7m5v>dmn1^U<^xQp!ivL^_9~w|d=xW%{V#HR2-RJ?IwfOkjkcaT;?OSJg zBUlz6OjgJ)paUV@5#LzeXnssh^Vh7sWgK;kk4FbwTC8=$rG326%TvC_&vwW8rFHvH zvNbv*%(D;+__ZSof5ZxI7d$d)hPD{#3hQL+}95j4ui@Y@|Ag>qS* z#uZ@Dw3-p9ySeao3~V7#;(maR1%&%kIRhbDK=M0v7xjHvM<_k!>s$x5b-sBOEFQG5 zTIm|h(Vj*=hbCWd!LYjQGv>Hk1}Ztk4}MT*CO3@k3C!y@h+zsK5~-K-*Z#y)rp&*T zZXe0~!w0BRapX8TMN7}Hy!WHOvSu9-=+>3Vy5p@Kh`83I%=R|oJ; zy5B*iGGh{$D~$O3Qr|9>VTk=SBNi00QqUO7IW{#EJG|tg*-%ZfVd( zwikuIfT~-z7aTJK+oW^R&3%=LW zCmtZ!q28Xr(JqM19=LgsYi&Q5WYlcMC5S(sDbNW(JYU6?8>a^op#cP zMk?jhD;_7IlG*A(DSGP^cd~GO_pgO0tTr?0y!e^XlHPIsyHSNR-S(M`YTP%Yb+B-7 zBBsfrhwNrb3zE$(x{umxEgsz-sFVL!viNU(gGL8A34m?R;TasQ7|0f^I5vi^CPt>) z?<{3AF(Vh|Wn4IC>}IMgEFy)1ie!Wa770866+HX=@Ds!8WMtg%TSjQLPIO2C!oscm zOMN*bOh2=CJ$~6m1=S#myxAjdx@SMz*U#9u6zVz^nLUGmWURTkCoTI)LUn%-p4f6X}9lXYs=Ku zshluyY%b#z*tX=#OMKIwkT~u|1GE`%p8mKQ&+}JO(AxzP=&8Pxiy$_fWs&oK&ToRm zWi((x^eRs->DeOT-DZEh94{vFoYyWKC+x)-2f$<|x{@`C>=9SJ`WfQwiS8q_3+FDL z<=FX=?Fq419w>=_DhhYbEc`6qfb;3ckBa`XfgpzxjIdSc5?~KyG_d)N;a>}_CdZ1%H=ug~x>n3K z4V5eqrq0a4PNiar1h6L{olzY3rX&(VHhL()8@=u?EzyZnz*bD*fP%`SDiV7?+9_Qw z7^&|;c&7(qF}i8GQ%bE>+tlN@{vPpfGj9Nyc_x=@SYr4+ z)Ss513G#eGJ18_zlFfN;yi{$a>|-n-<(MFU);ho8>Y$-9&d>w~MpBaZ>p_F=I~}uC z^)L&MI|xg7ja%Ok5A(@>936%2r1X*EIDn1bJRxAb+<2~p{OHQ5Qm-3c_=G3_*JjYc zMi-j27Oyu80rkJojPDND`D@SR_)^e>%~n=L0%Lq6-Wkm>?9tN79*q@~JY!dI>4WWF&ve#BFP*oG&vBI-<2f@?K6NNK5$vVq6sM)7Ou9f>w9i(LjfgP-LCN0@V zG&fTYA6Cm~A) zq2~h=+xwr4_(Hdck2l9r5^Cds!t0Oi*uf?~{(ZLr?pvMBgjvmp@i^`8qhZP)d+A!l ztlI7`t^OPb5MU1)$R72;PXzJaMD!rNY>llHGaDy<(Am|w@qrF{u3ndB*JsmE>(%U6 zrHb0>*BaWI?jBr$)F^b)lF+M>Zj7^rP0agg5|@=Bo6N z{D~*UDeknAexQe$;1D+=&zg6~)JlJ5<5rA@Rc%y93(B4jpA*wXXbT?Ks3v8kCLYy> zw6!JL+&+VI2oO$XI7Zn}CU$hLwFtbSCOH2S`Uj?gAa(;fC60+6xEbe1XMZ9@Y6446 z}e#4U>bY_s_hT8yN^85GaouIdmuSV4T8(#ztpXvybrSv^=@7dpJ z_3u!+`>XAR#*sn9I2pAsOu!ji_2{)UK;}#$amM0JI-gdCSXh~?A_)_;8X}({^m$h z%pw1nEo~6U-qL#G{CixNO*#85*`-}-42~=>o8sa3y^x$UzbL(CQG$+EHG%m}L*Fj6 znktKonq^qMnQl?HIoL3aH#IcT3gv@J<#UG|7kR~K!|`xpG10joZ}MGU>%y-x5nnfP zJ`3?GTLNr(dOP3?Gpio6?FdY`+qDQU=$0am%H{)@j1ax@zp^0iuSRXiCW9@$`e212v`3I?Fo+G&YF>s z$XNztCx>Ew4h~7-&~`ry3xa&gmvS@U)kHlRJ0o$qCGmZ@;aPf~3iy~vsvWJ5l2Rq? zjffXb_RGqrsHRX`AAW-NJ;-+8r*ds3 z#Ywk&`9VD`%%2>D5I^_1`jyuoVBKR=%i#v{7fpbt$#O6d$=f|a{o2Tk;S%_OYU2uc zxt%h(Cm6VlT=inqzb8|-7jpE|B0eCx=cbM)3FlvbKq1F%l=|I3a`K>;!4m-fscIvr zzUs+nY&Ys)w%B^IaEG01-j|0W&|TUY2px|Hso$T8MSFBW?c46k_sYgKlo$j#)8*u4 zS*dGAw(+F7K)sHW5DV%f=(;xP6b`_9Z6yS+Dtq^skQ)hykt^v&g0uglgQc0AKCCGT69NzcDG}5>i6tiDkzu-(J<>HXFLum#2U!2 z;b9HJy*R=zF<(_bpCtHu`rmuP0g*%@%bv5kYt|izOnm;Rqft3h_XQKTsNd{?`t#+=am~)q$kD*S4Dbbe8 zkKf(hoqQo!(+`KIf|CN;?0uro0mEUzB<47j&T6$S_I~kw?-6+8^O?zAdbRF%LO*yL zA+1=2o>#n*4-`p>8MGABCzpf38FT=`;DB1^mf@a~-(5PI&L8X+X~1ukH5H?>fdFm> zSlj!>>%EEk+JTSU6#`Q(IE@>3G#^^y(>!DGo!IXlypzN4-M(Kpo)oCps|kU+M-Xqv zhbyc@f{cxylBq=9wLr?b?4eLJ?CQd~!;SBgyWAl~tN153u;u^pjG|)UTO}Wl9kwxcrqY5ndpWV!6pge{wv)f*rGxRR1F1fKmN}mEvb#%)A=xdQTZ3POL+euj|>GTV>>gF ztK9ze zk_i#?eQmuItTD;~%h-Rwt2+r0c=6f!)}%>m$ZnZLPGP1@75GJ#8h0;G>WUBVgmCL@ z9}dN0ROb_RYEqz#HGHy)a+3EAn0dejxX{C`$H6tIA@DOBwIk=cXEC@gmvd%uoM4Ww z?&fytTX}X1w+`7ha922mP%rPzlQZJkB2cPE+}WAEabIYy9SpaIs4wU4=XPwdkOU$@ z{IHo`J+^qF*Ehbo6T;71M@JBnPrslwrppFt6UDLsw zQ6R=SB_&PqM;|0sDX?$qJFpg@8;jbcr$A6;@y1YBA_1SP6w-I8<-yA zvU}poL}Egw)Lx0nDdDiDxV*E7sdKbK9rHL2F`^*;aNR6^Z#XIX?9C32MNAYJg+T+f zsIYcSimvPYD_1ZBS+UmsVA)v`V66!9{w1NuaZ}Y(o0VD#LMcj$ERs==WJ+j}#7}2F z)J7X1m4M*WY_gi?s*v47|0+)^z8ddb`n4gYWnXywy_x;pgg8{O^kj2NRyZyJZlrlKA2Wi=@>Y-n3Z4pf58;@?y$C*a)s^Q(x;^XM?cAgOGcLk+d{nvgn5`)he#s!&L&N3SNrmZK;#h zml|B+!Q~jEE;q^aj#=Y#nVZY0^>fJ{W2Ehl#e!PVj zKk!B4?Ig;1NThdoL~3;y!*a>v>;}H5hv{b0q{jhy#-mpbwL7|$q5}hHpBdAAikoK9 z^v#|Xi(_dBZFkY7JN9uoZE4@#*pP=op4raHX}>mTf&7uzkF2+E=+vKeo%z+6!Q~Ku zad2Y4z@~B_RX;H@E?Tksb`Kh)8>11V2b$u^nI!_W(?(!+CN%OQ7Kf@dJ02Wz;=cC} zG+>@k&OAS;l%N)K_0NpMtgGqdpC^*xF6|iIckr23y`%5T`eLKuw3#q*zyCyXF|{?D zTq=Y`tA`B!s(*3mEbhF-C8m!o!FU36au@H45ZG>8XDR&fi~qtcEf|5F{&=NM?tVL_ zruOV^r`3}TH>qxP+Cul;(Ft@w5BtT9qpVJgmso*9%+HIJX|jV{D^=}ksJ70;H@}m1 zBVXecsv+;T%2wr7AzYzr@%xCk5R#VXu#_z+$27F$Tl9-A*GEoKWyI-JdZcsEW&xL8 zLqy+dEc(mzp^%zc^< zaIv|pXoUv0$PVeYtDHRyyVw6nWWb=M6G0J$%F<*5MNba#0p$t#WMEyeqxP`Gp!Vsj zcw4MWzj0a-Yam~ZmV=8Cm<#H+$Ve4Q$CGv zev1aG5Xm8JOT`*ydT+iB=j95j+kLC6h6FS2=e=~hB_Dby{t!4d zV(SQXF26+2OI&oB#Sa}Nxgr(RZi*{Ha8KR3QF{he;)<>iuDAA<_t$_}6knnrS?b+3 z>2mZY@bNFrm{Q^KTwZ(jST!7LmK7o; zYNJt7^!xqtSiE0ioE>Ww^04RC-kkC(OpMs*g~&vxzH%mKOMj34aX9g%|F%k{#?4QW zZ%LqZK2Wcl6P$KRF(5H3pNnGpk=a41nJTRQ}xrDHnY zc|>fZRzhq02kSV|o_*gwvloGcBMSwrrNmX{y`4~#!8zGjWv*J}ovaIZ?0#;WU9XXB zp6)F+`wB>qyUXa1+N*B7*es-6Q@b*3Z?hqDE3=g`Z_jcLSFIT^ajvW7LTp&dbS8TIdte6cSUbokJip~@< zQ!L{^OBwuAkt2pry7+(20!Vm{icU&mPwDIl&CcZdl(7|?U-flu?&J8-(ps`D{EBi# zZiv-gTgNUIp$Oqq+z16t!nlCdZh~5*N$k+4s_9(w^G@-gsAb>fGTRq*rTZtzMpI)0 z>t!=oJN}!<;KJR!pJbbda9L{|;F;Nu(%L)xoYw0~-X!QBmIpJch-0#$!9@V5s8CS< z@p0dRh6=ILV>jHL^je>WJ%K+0y>N^pYAxZm687YK^x3hm1I)#!Ssm*gQnD6 zB3;0ba;>Cp5jK2Y7lt+{nJ6=$ERi<2o$DEb<>lw(21A}0=nkkLG}Tgh7;WzZY-p)3 zd12UcUOf?#R<0xXjHpM}LnNuk0GHc!8A@Oo-O>&E1o2$XRn`HTj}bZ|{r6fvpM%d5 zsZkEszYMs-+H(u(Ua}IUa&WDU^}>dk_c7;)ZmT3xV|L-5ceKc6lY+abO3Fzzd-K~NuN)U>)!wU<+A z(I4jmm!m@f67)=!5UZ_Bw=Q=arYUHmEAfo|VxHK$G0(?(8=Flm6By-UrC$tP&B-a` zJXWrdDpE~WE;Bw~=ah+#2@WJBg@uLq&kxh=K-B5_I7QV&0;uh(MxhuXD_G0Xu>y#= z+=jgO6wc}Zm2uyLrC=r1)$fUjc0Du^HyX4bv%JQ++cq$=c&cG;ayt{x{1B(y$SC+o zARnC$Y};R%d84KaG9`N=Y>gJ{)Pn=nqnX~^sq3Fj&}OXOYUYZhbEiHT)2?R`Hg9}U z)CzVp+~&%M_xWH$eVgAO{;n>VAQrSkEYo#?k5uW2y*eGbaV#J&@KdY|zYSwqpgDl) zQ6c|Vjju7&Av)61#vvtwWgI$GBf6dCQSmV*Zsu2h7NrMUKQja&hY}9X3J;f~IXg{@O2VBB+u4D8DAg;!E}AK0QX>S_{h0)rIU1MIW=M zj`bxyfkx*Tu*Ueus=-3E$pc_5+u}7j6R{vNv3NL{lk$d|br-733MUJ*$bx20-lsr zl^90WNzp{jQub8hTXFVT5Ty+3+mrL~D^?F5F#%7jMQH? z`IQSo6fn-!Sg?J9`jPt4mu{D`{Jqw)F52|mZcS9cLK`-oDaC7M^XeEb{M98jNsnRl zKCfY$9d8PHo|0|;9T_yvqAN5fw9eK9u~ZX!lo8!fi43Qp*ouz?Roe*ASmSZ8cSo!` z|BV~GiV-Qd{j;vJW7u;C6lW8ARt`&*dHU*@>Pj`OTA6-G7N?E;L^h4DcB7@p1io5f zy?Eu#{Hk9RRT6)}Wv%q^#N~}w&jtOS!CZmyUz3|g zM}^4Jgf*ZFGA)=AW&-XhJ~sqFxU+w60};s zEWxBpNXYt>8(2&YO(Px|3ri!VDoegN#X>NCe{QP#UCTsDk9XXevF?;^82!lyiW5uR z*HcWU#Bn=t^2KsypThNKpQzpC=lGOfyUz|M6VZGOy-pUY1xFG84~f~C2ol;yj;Gyk z$*mu*P@1I0>0e7zbU*qVJmbl=E!~@5vW+<_Q|~vCBIQSn_~+-w;m>4f1$^N}x%5K| z_!p=874EkH;i3!T3273woG4oKc;}UyJ0JJP?=5mf*!hFW$;m%v(F$0`4}3Kb8`U(I z1l_&yJzuM^)Y}-GNvj};g`e@R_~oQuE6fBq)Zuu@{Fd2c_uq;<~bGa)z6AvUr#>FoDLa)1$=WypxucHo2gX{OiUsWLvjW zu|`eK=+JzK@ACj{__>^tj0znsHG8V>`yZnZLmb{$yKsDhej?0gBy_Cr>6ul^k_!fc zM>j?yo}XXnNJ#^I{a$DzOkyHYa5z=N*au=tj>4C#*j z&akD)mV`ZkhB33|a0*Y8=OCiZ#UeidLBMlOD9Yj$FeFi>G{KDG>QYc=d&g}Bow>F1 zcEF4M+zz`sB?xKJBRi>w6^#%dQ){_Qy@r$!EIq4Qr=Ib^F*Ol5T+2NtGpvKRA4}E4 zI9M+?f>xa=BP88O66fVYINg&=gm|N=9zv*q@*Er%qrQDjz{e?A{&pEi(nSxyiyN7LJuX#b zay?!~5X{yH#=4Uu{Lg6o-?4cYH^@Pm8#Cnf2#&G=S$aT@Rco1(v9g}~V(@Y*_TWhb z*>#>OayJ>}dgS?%)Be%HbzCrX3GATaJ^KJnCV~a~8T?gidyGV5saUah7VtU;v$qr- z>B)^3=os)+5 zddpCsB$9EnQ7cye@ibdbBzd^gCBqJF0JAlAvOsG4vDa{m+R--N^L-hPjhj7 zQFKyK5myr)XB>0S1$CLIh`mP3?AQ4DQqNIz)fzJkd_Fhm7q2}&osqWbJrzmIAIW@k z)m}{Uv4+xISxqz%8=q#fxGJn}76x2j$|ATKEQ}$xJO08{un@~sq`JHfGKyR{ zt<>Xlrzub;^igj0^?(rAG?j<(}HH?$-Ta|LyRjew=&xU(rL%{RO> zD@oyoX$X9)nn}PXQ1E55jI_GA=tyH?Z>L ztY!pVm+6+`U#1Y}>94#zL#f>>KI{)W*|fOYWYzD{KiKFZ?08z(*{hwHAAV>-VWxbd zb}Re&^=)RJ$DdM8qfKqL1O~=Zck#%y_z(lZTZLPX=I!esL{`==n-b>rD|prC>4>q+ z%*0GBIeb=kgdctnQRD??lyUH15D1EA68658dH+OfJZ=`~2pTRQF1KXi?=nZ`Vp577 z<2sZWN7kn}H=I-)vyrBU0}?##E0=f=m6}I5od_L?l5L@3qHoGg)sPfLL9QwXb?&R~ zM65UNoKRe$TovfRSfsq%QI+cD88z+dv+&$5F2%d^j)dFV99*z#XR?CH3e5|F7JraX3Cy>O6D zwez%wql}$=CpQA5&j>l_lXtYFApAxLPO*uvTu>Yi<(!+Fo{c;n}NSPYoQgTF{i z2(d8F#u|Mt12#b)9v>uHUD3n}?bMV}KXDiqZ|gF;afrSInNok2W}L;45jsmG4hk4R?V= zzny1o8h#7Y*Yj?+q`i{z7uT<`8u<4@P4-$gCsFBa!>+)a-s_+#Rujb47!{2FV} z=}hwI(g#)UU?hRGid;Crvymjm@l(6n+1uR{e0i}RSZ)_F-ORw4F+8M%;O`f|k8CON zAQ!C^k>68Kwd?~Ai=3Z``WhNOJu(t7v)esEXRtTPX)`VdAB%CG`W@EW%S|}$Y&|u4 zby=xH#Kda;$cr;S0OLr&8j+~`OiTpbs{W@2@kJ`O?AcJTxf?#e9ZVNev~+VWdo89I z+Ft+{6^dOJFbEiDv(Qvhl1GJg=?b6FY1x@Np4vLyNdTh4^@hus8U{f$LnltvbG9Sl z1sQ!<=^uPzZnhlLcM5KC@YRxIiA|bnCWXgljj&%2$K~|pedkdQ?*b`TSyf3S!ZK;N ztSo8!Fpjl>f*WG=qhjZ_uFG-0EEdgVINxk9nDxFK0$9dZJTGGVqZ>yfFThl&8x}Dx z9C-whf?aey7&=Kf4?4@0C>U+v9pLTN|GCbF#lF?6e~JayXy%))iuyoTBGZ))Zh^?p zAYFIbavb)bt!hdaj@=0%1kKwu2rXN5Nq~_h2rn58(YE9v#ibS0?sbyO*CvkVkbYnw znzX7z^yCki0+^9LdL2fT5g{4;URkP}ksH5?I`aYIHFvmhN^R(h;Q5(WC$$4Qq3vY^ z9?Z&l=oA_`z=%YVVL@Kg!?$~U8>F&XCEFO2gPordz*^e<5+lpV_uSuDd!X^%K!*x_ zc-MQfM|bk00cLT%bzqeR)=0_Pk(S5Y5}5$WLB?dx#$n1VqQL#~J)REB5p8F{D`o-- z-_~Kuh^OltbB^!>sj5}1a@rA4tD^c~ri}|Ftqsv}%>)M|0I7+G(eT1#Je05h{c|6HX8Anr;M9WrPe>R+-OE zE=9dE@kPx`PJB5C*S&*Q$h8j2_{HIY#}D&_cXo8h8wnC&#db5@6CSkB0c!kZqKOC4_qm8GEI9{wzgmZ_-f{*y-9UnLqvUw0+l#~A zH?j8{B}%NTTfRvzVoK`isF`F#;BagYDyj-07>!+Pg9FL!*&D){5}V>-7GRWu241vL z7P;OksP99Bv0;aGExSA+r$JQ9;+@e{E@bPd@t5RZc}Isf3t%-mLgcN??($(uTkmSm zH%MMU4Vz7D9^6TR=0E&G)d4ojE7U~ba$z`<>Bg7aSiF}j5OJ#>;_1jYiBpu!ePA_I zMU?O}O6Y?Jdy?}jZb~Jes1HR%A%ZieQF!b8+0^aUVoLmHEayx(i>F)xzv$iTM!~xS zQ|bfFA=F$ccXO8ed*={O!gKbxuEbroOa3K@+O7^nY?8wIGC^S~IDq>0$AvqQ|_0$7V)cx+-CX$EpvPm(_$f+mS42ix5ZF<5B99EK)jPIFnnA zKJF$B88J>ak;Xv?lh!h(Q+=OR+L$Wsd*ocAgg9x&Q$KTHVl7ySYDNCIy6%Hr3{Vq| zKr_Q!@BM6J#wC|Iv{xyL@2ruf`Dc>m=4Mt!vDzDAj?{AC6ygmh`7RFP&bW2GhQF0s z8amQN7K+KSh(WVu%CFtSLlBM|0Ta1l7DsUvnj>b6XMk?N2>yU1frG8rg0;Xw_iiy$f zgCYa!gdZEHiOkS@>^!Cm&Sim@b)mt>8v>`@HM`Jv5u%H&l>A8e2}%_0`O>4EtdF9$ zvRVMhnBjo_H0U#UrWK&5(fE{|?^(z_`MSPgBo0!t_fRQx(Qi_9Pdg} zL9@+QA07&Ma*8BwX_Q2$W;{5K7(qCy==0Rj>_dMUQsa#kr9V;CeR`AkcPj~9dD%ed z#uO*!9Ix+9fb1dWoT8;L6%>)0N#nikp(Wp8=(xwYk#A)?iZxWs zmCmK7Rqqq^c-!(An!81K+_!S}3=_VqqZ8^h5h{o>&KTWW&M{;PXI^~rEdAHqB9n|1 zs`1-2y4inKbnX+-6&rXfBVQvaWI$bTpo^DJ`3X}>6hQmFp!wDbf$GDXheqAazFhjt z?tQINvoj@De40+EoS?hM!ZsF-=U4PRRa%7}WAj3j?2ZdO_1zcejTnXwz}^crV+klc z0ezpqai^Q88E;ZypYPG1?oRs8ERrnO+JvdytzcKb-rc)Qz}qf);03wI%>+EtA|5J| z3|(pWE=>nQYq`TAxedc(pM0|OjXZ~mABxQkQJfjXIQ~>UOcUaa%jVBmzNB5UP5(^t z8NkN)7ee`}_Pac#)>+{EUyojqpyptI`A29ZFdHrAQt-OX!@0Zp1Jci4rpvwbQpZXl zN$wU~>s@H#U)+$HA1VseN~&?EFD2(Hm5I5{`6E5w5F>bO`zL>)psLp5uehCrR?9Jv zC+Kso<$WG_nEjd0_(<`HNEEyMxaE1d`Hoq?XSnVcF=SCd?`|0f+vZbrEo)@oz=s&V zq&^AVtLX^0!$-@FYOWPdhuEyN@qT7u8W^R=ow@cV%{ph4U>PonjV(W=9~W@(Bwj(H zNRLKyrMI-kL)G(d@2(3H{!02UYDht14H)8#l866Ypij((QoZ*)M@2sQ5odFzTzV~Q z#JHpBg^dL|KElAYv^++JCVul7w3D4K2p2NmWu&`FZsqnv-uf-AjS0A46K0?O`NhG4 z8X>O_LUs4nR}=eh)#>zNF5HV(P56aA9qOfe3-AdFghT5p>03-{0HO&vw94`Vi=G+W zwdQv;9Ek%D&+nQ60`WB9)FqZU8Z^&sP8{*;-eI^5 z47d$>j-y9RGd=(V&~~%bhGh5$MjnmWQk_3}oCR1|@Kc>^CpP{65^6FU>0szXe?kiK zN8Vp$LL+xOn3ahYNJ;4_h?uq0B8fdDqU6T+_!h^zy1*Bil?A@n~!jBLW-T;|jF?MZb`Kk*vXYS?z|g&)i{#6>{Odm)|+z zP=iHLhu~PRBU7jxby!D7;8@`MhJ#Zu9N%*`iH_ZP4veqjXoKzN zC~-14-6b2L(flVR_1^?lBn2rHXC3B$KXJ|gqeAogG4=l_hMmq!LfbZnf=#RMVzgE;zwMhXSdNbI6g zsc4|mXQDP-Q8ch(91rWpnWnIy_cwd;1`Z*nSeB~zK_fdz%~awAQ0v_)V&AUf1+3!PSf4&HmaiZ>?GD|ha_LRJ9O03$nQ zi&-{+`3#3LeKVv3rhgRCohq6vAqsp@dFm)O>V>>wchK*DCi9#A+T{c%1MYDuDdmw~ zpr9%;?7<>kNd~IIcwH?6fo(>}F_!S1yw?KUr&oX>(CuobhFlp0M=y{RFDRU4vEg|E zq+jSi?2hN5m{W&~>fWQs9`{(mX1$OzQi27|1e)bO(;8Yq31Y9i94SKe3^z0BTxx3( z`aL26KBIdX(PB1MZecx%FSJH={HOfBLjU%3(4_^^BAe%wK=qGXJNu!hPw|`FX}#0k zw+BjPV2H_`wdj#d@{J_T{@*prt#sMkma#ZYY9^d{PamLr&Rjp-2+gv(De7tBf}$eB zugZAOh!0P9TkSFE8%?LdB>1YqaYbw~mU!7wm5p>~Xi(d5rD^QZCdlYT%5aWhS&2JIB0x>|f+U>S-#Nqh7(BdLH!%~_Bk7q*D{hB2f z_75_tSpQM&CiVColsz7zSD=G*sQ~nRzPe||zZUd(kIF@5K%r!#3~o$2+qc zA3wM@lJ=_^`^P4d-r|o+OUujsAm;Fsw#S~QHz*{(FamFK;_3X3K(FLdJ^ZX8(f@G9 zXS?bK%VGjwnC%yEZK;IxFu_Hx?alLcS7Q@O(4-CB6g@;hTrIErO^}S$Jm5^KDsW>J z=KgoLvwOVAz+@L&vkQmnn8Ul5)}fN*5mLt3I|M08Q{~KC3<}6A9QxRTPJ@ht<06Lk zamd{M8IYf%HGyuV28ri!7o<&U?#4AAvEo|)!y?Z%N|yF%bxe!MQnNR*5PL$sKR$14S^kxIC1r4;bl_+Oqx z5kV8B`Kf|N6rawMVDyUwX46g=IwTzF!|~&ukx@=k+t<-tUval2_!pijEW^mQVPj!ey|hWr z1h=om$b@SNvphLy==377vdC%n;I85^M|@Uo1fVAB~LiO7tyJFWU&L#x?g4 zk@s}}hrt4HKt}YU_eESF7=VnSIDEgoGTfxuzJ>ZN;2?ocg&OFe`8;Zj6c*qai=e^0 zbz3Fb>h}b$?|Cm;Vu>L8(MogS zJZ8p30%+eB%=(C}_hX5jo_v!ccfBik2giOZ&LVN4NG(Kkh&iGdTBguHhTC$s5`1hK zE7U1`!S++hzOYxJ@=?UaJw^OXqH>F@g~X%)sjk%iQ^(UpuOP6_|#taed_N*u*TiWOP5eP8lq5mm+tNRvJS>ln4_oROM|d*Vdp zV2sXkv<}|%=x_rrAwFO6{?2sMPjb{mOo&>oAJ_>yD*=mW7;NslceOIR{@I^b-URQ; z(N1@wcLHoj;(M4WmfAVB8?7Tqj#EDne}MMY7w$d$rExJRi0bBl5(PRQ9Vovnil^K1 zMhxG!qO#7iMv~XtPF3M32t<8U$O!E_JtE7aUjB-MRAD-kD0bz4xka{6^+Kc*uP>Rq zBFGj3-6+Du(A)pu4HH7!nR;26;uZmLg0*ZQS@7=vK-yPi&?1;cra(6!Z`>pDBQt)V2}Dzh`s zgkBGdG<-Uh|w-$!-n{8anm!tli6xc6wImj>ri@t+NLf}p0h#v9P{onUG z(6l=_{Ij^hPmfarmw8gRRH;w~f1GC5Sr2iqu`xyDQj8euwwx=L2Xg7Cl#``}ZV!Qq z5*_vw_MJ_D#hDZ>OWTNgXkPntO^ZVoc4jgZr@n=oirS+Dn9^k@++z8&gq5MHo{yZ# zt@NT76|*a1VA-#(qE*a=Tbryz8yfXtN;#YIU!+(Q?{h=XqkAnhx4%k3VP@f4V$8I1 z>I@e*@(rD08Mk~iOkZem@asYK5!bHxpo{Up4l<}G1QaJ3hN?M^2Y(GtAf_a$W736UQv-wBe}vgS!vzCLSp@s*sb47}}$SMFRo2s|Kjc_;M6 z;tkAX=q1IW5AmnKbPtn^*cLwF#F*F<+qP}n>e#mJOstM=b7ET)+qRQ0 z=Y8+KbN_Vz?cRIW-c_qst@ZH+FYuxC;?7IHMGrS|_h*QVzfuNH6f}keN%`%D_6pF9F8D-Fhy){hPzXz$ECWI)|2 zBL+qZ`Z*P{7l-W&30bx!2{f6iN<&kNbEDO8 z0)>G{=0vh;O}vn|Ftu8!Mo4vjbg^7$9a|yx+bjMhZHKG5gO5jlP3UkQ^urW z7&*A0q_4ByhVo`Bnx{CudAwH4RLOC6#P2~7m|ZvI@QpumA#9A{H99nhHkPFXnqu{y zZ4x$zYXxRwY|PB~^5B}82|&G)|9DbuoUC4(GOx3)BmQhH2yD_5Kx+RacbRBfId2fi!-OCky>)7t?3UlvuIN7&!nalWw-XiaNo2XNF#MuCI zjg*l`tc%U5?i~u$i?tM*@bEb7BES38vUFM7LbT>02WvDyw+_Eo8K&;@Kf1=Y)xVWt zjUl=SLAMg<0M2OOw)%vsyK){kuGzFuE?H4xYX_-aCerge_%Qz)LB}Ys@SblHquY|Rw>q0XQ3Wfj z$?UhxVuH}`XWAzxQ>sp$X7KSiuRP0C2h6o5t@?AH3`|RdCt4I`=Cq8Vj!@rf{~n}; z_j*}u3`Mij%s^@N-8o7h@}@`+>NM^s?{h%;@SWkGb>kxv*KSlEeX7gI>T14eXuo^q z*u@m3kFN#J^jwrE-1Gd_Wr&L~`M`cuFO_XfeaW z&bztT4aFM@snQw~psPduZU$?O^usHT{yuw5ltcco_OH^~u4|O!s5Wd!%x%^+-ynTP z)Xgr-bk8A{`VDb)$@DaTP07G?57Xrf`Q%+q+mw|oyRF|dc^g?H?O+s2Mfw^Q54+tU z<0I}&O&b}qzn)M6YBxj|*SJv;F&lwja7MQHZRdo*_*e+vFuyYOL3l*0*;&K>QhCPd z0-#@tbNNlE~QhYd-bPs;)T5VB(wk`hBLjN!D zdXoMPJh|2UDnB%k-$XE*SNi=28|1;G_X#BehRL+bFd=u^px(4I%diA|9>g3p*i?AN z`%ScgbEHu~0Xci2+yE?#&Tv`)eoF?wy4$-#vh~V1fQ?>0$3)zWA zQ;tKCV?IL!GkJp0F^IoZB>nV}QarrZu)Z5tp{KkhhL*Xjt0bV5O)0mWtow?{P%uzJ z!ML>}NdzzHNyxAWSiCAQlzoBTJ3BWZzJi7drOt*4Ex)9asX~Y#?gaGlM(++H-L8C) zDM>LiwECB-QKgb8;H;Vt$DTEqR|~SJ#bP<^E z3}|B7T?XzYF1nk4@cL%3K-gHJ2RX=fM3|M^S-Nko3=2&voww}=_Dw<#e9Js2qYn1z z&>J&GMdQS-hx&%ZP5{V;TuXaz8s;JoewQJTQdX*o^dAMm>a6o3V;8_yc!wxtKst#$Z&(QlOn8MJViEOTfeL?JsXY!ID#axCEhCY4R;7 zee2t+*5XUB+56Fj*jMnIVf!QBAR%$}gJ@cPdov48G9P7Y)Az>Z3N4~o%=^DmE#Qme z-xPkBTmz0i3S0s@m%YXQ{200OHdag|88=Y^!m{by|B=>G_~a9l-HzYCWE~UTY$zP+ zTGXKMCu?T2U@F3e0uYd+;~=R7-ZOCe0=v?53ZfO88?k~QtVqexF&B?~z9|%`tyo?_0V4e%~qe4nk zP<7=aaU5+Sx}@xS;^}h6j22@8DwKs0nW@wTqXtEhtpX3@4)2P;BH0LVsq?tHv3$Uk{Pk8%v5Qm^j_iUgX9 zt7b3X+GlP&Be>lz`-!!3X-j2(K|hKkQTMB2H;a(_eo~{gNQxIqH67q~<-vKxz<*!d zvMsX)+wK!-thkG;mx&e=+?2u&rUk^qMQh<5^-`F9YF5l*%>f&!4kvTRWjx!Iumw@b z)Zfx_Npo!JC_$>o?k+pHayi|gsZf+K%b)n!T+PDSiC7ilMd1NYYs0Oc2UM3`@BzRG zXgHEysBIrY`Jzz3d^@FezK(rVjn|+(&7T>G=T~r{lp4s@r}NmytYlLDnu^B9MV8Rq zx76;ok6r3bYDd3jQP}^LgrWVLgsHP&i@-nJ>&rc6sssMRbt`?&8{(Ct_FKXQil5QbNrK+L#sHEv4QENQ48(mgnWnb z9vVY#f_cgCqoKG-7`dS_=z&^j%OG3KdCeK_^t*8Jx}<@#Q>9FYHCw%JAkQo7)2&T- zt99;Ibgvs%(LrAvCXE){N$It~lXNomGurn*(9h zF)OArqE6Y|KqytKb_(Q-iZu#4M?Tiz;?|}09taNk&H^pmv1}ah%Z;XmX-X{MNPLBB z%z;ymCpAxE9H<3SB~N9Wev>~DSyh5$xPh63t0921jtcP&dn-%v?fkuzRYn%5ZBl$R2h&3ZIzVxYpf3z5 zrr%IR-FjTp*WA#gAj{w#$C5c*V=78W)FNA6-XY@7~Aq1DE7TaN|=suHqTE@DP@EZZOd zao`Zu%%A^jfvazaS#I99OV{9({&Bmt>U;BWiMEm}tr+xQ(fwWlb?uiNCHxI&CL$>$ zZ6y)2RGZUdGurU(_F79M&Pyywg%&gzzZ zx2zUVnSRIe-V(JbIgyehvih`l#D%t0$v}pz&D=kHvth~?nsYs=En$2;V#)3;WkMlC z7iN56vORXBBW0ds#pK@bO0B{!S^SoLBIBu*?U+KSBjeTY&+dJbhsUzuVxmVYXMbrq zzoLm!a_nklgk|J=>&+~Q8ka`-5NLH@(Re^gN(=%PXLB0;UKPrX00OuHkz=gKqCEs& zBEFmS78e72Cc{S$_c5hM`yuxO+CpUr>0q=DG}MXI$*Z38=W^A$DoNA*#wU8V*b zxjl?rvKo8JB60GrONs-={fASss;s3L%ilFpT|+fRYtJQ+?7oSmS`2OE-%|)EG-orVTd|4a0GVbi(yOuz(3}rZ5$mT z`pw(M3hC+RV}WTWhLk}sUa+Oa^1jEPW)-TbzU#Q;GDKCkzTSC`uj`e<`3R36!V_!b zSY7xUYV{yaa=!)(8KIW3V!7o!!Sb80HdzxroG+*1x_ydXy$eZ>;)HS?qTqda&@9$5 z5XVMK6rDNmeC)F^`-o|C0eniqarj(Ny4;cCBMTJ=@5lv zI=(NoH0yg;d5Ol#SC%UL2dj5Gg5qkZ|I6Zx5BBGZytMsHe{IH*szBgM5J4A2bp4XS zqtZ&~1|lzBFvTKldCC7o_v7mk80E33>PMoQ$d9pDk!!S{27^g0BDW>AU(N_u8>rQeFhiLSn$!ofj>p5>v z4RB4`tV;PYA5wr*D!;s>(sP2iV727Q^)(h}Qr1~WwoY5LY`!$>FNX(M$nUx*`=!P+ zub28ZLl~y0qD!22l1lPcrhOb@M=~0;H3J!Ds8SbC`@3>8`q1tzs*|)nxGl0KD}*^V zNhG1Sd;fKpvdj5H060q%?whml9GL!5fxN#@l5`{8OvT$q1iZ|{I09JaUX835lEG(o zr>+MUP&zT03h8E92kbK<-|xJzYH?dpEY>Lf&S=gQd7I)?2FS?OjEh{Dom$PM?xui| z&lW9zbN@uGBQQ)GK{GVru^GR9wS|e4P(zuPOK0?*0z6yq{8$?A7_^$-=paNxXZM7^ z3h-HB;_DKS7p?gNUG=?$fpZ!+I@&$wC^qT*DN^+u#%k@flben~phU_VPU)A?k?(r? zML}mrv(cj{fctPm7Sk2B_WQB0!js5pDaW?jSV-0u;ybAei`V8%)IWXNtK-^o)#l5y z+gFiEWey;xY_^oV_i(bRGEz1iH%`A*L1bwWffeMe-~ z)RqDz)y$d7EyUTk?R~F8F^(kZf|2R@m>kY7BE%Y`&s-8LziyP*wEN6vy>yy+(9f18 zeeWVK(YZ3zp8qM+K?JvP{w*Ty3=#*Cm?Z^H29o|xc#GRH=KTufh&cf1fiz9tMYc3A*JD0}*Ojt68SPBC zX>7QFyr=UUOg4vGI)0nGl6(7-(>jVN!$*@R7d$RU!sCX$G?Sj_h?Ul35-*y;VJIp0 zOcwv8BGj_vt-Yty8PSDSOKdY9kM#8B4-({mIIut@`-DcemZR47){BCyzQha8z2?>z zxlcmmO-@rRDT5?k)ktB9;;v@l6Xsp9rTx6p5Tc*(x-oJyLJT849T=6{B5hToD9!D}G^(9C^m&EpGZOeacjAlk zgRP1FA%`Qbx_hz{0=puO1o~A({!*_fsZ*cF`O&~3v+6RD*G4}}A#^4^#>wJud_?2j zcV=h&lV&LySlwtlWKeG`&D_6RWpNs6^}VS}2gv)LS5y~ktvSsX{GxGNuVh+5REeUJ z&mJ(pt`KeSx5pf`xZH>y@R{P8KVOmukMF@p>0eXNOCtMxc#cm#I_{$(yeHpHRQ`N5 zKStF!*GRDC?gsy_r+yjw+t}}mDolHJr6vY8qSL?+GKW}6G2=Y5Uj~ycBpzN0hFE-a@8m{;1Oe3fcmjtAy)OsKL=jAC@qAUuR4VdQcK1(I&J3BAqm%e_1^G=l^R^i-}> zut$gWT2ia0KGJebNXKoTFf3l>$5|#L0G0_4emcie1OUE!-;Lk7kDEG$y& z41St_bg3p?=-b~{%)EmRw&~z_cN}hC5-P1nW zuKi%Aimc8WD9}oXHoulB>xRqSYwH+xI>NGv%#g))!sWV~wo<(PL?6a<9%%nikMD8N z=a_I5=ou>7b4OaBVrB1t6V3O*vxMGUZw*@g3SDA(;dUg zsGly$7@I}>2aAKak0@_}(Z8(-D-fUHTRG*cKNi+*#(TzjBApdpR?6{lM5H}>;fOGW z<{7QRHAlQvlvw7)gAP}$S-=;eGkY<$VjoNjX=YstVG#zvd%9Lcn1?KsHEsBBf5rP9 z+43pP|GU3Zpjs3-Go4ITa1w!y+;c3cG_Z$KkcZl*K3FAEUVKx<^&L=^m^_ zYzBa6e5J`V#g_L@A_=)Glg*NKBCQr%=y#WcWN22G)yTviNR@c73l!Loz%O70)xt;~ zOLhJ8L{F{kbp6(YaM_nZ`*w5Q^dsNML?Hno5|utQUlgWpCYjr*SgA~*Qjq*x!wZSF z0yWe@%L1R@O)CJyK@yyWeo!x`A7G4MpCeJ-aqDl8yQ4L8j`Y(4WOHIeFNHghi*G%gzsP=GzH`hC?C86S!t#K zBFzE?T9^gMPeYAD$m3ZjnFAl#aT~}z1TW#21?2sPTKvhQb}3U_vPgLUQe{QZXOxU_ zzZD;z_omYAjl~Q@u`vX4 z^?|k2=nsQQqRFJOfgym^r$VUV--F_K=&4(G#I%2q8yrp~l$Ocr#aEJ#%6%WiLG^ZZ zl1wxQq5qQ9&$It?3Udt^yU&sHFnQK{Kw&Xzh9KCb;nMgJ8%iETDJurB*r2-+^v{5RKF=KKoPnp=;LbHjGe~r$6R3q>Fq=pa68%0c=)m z+Ig%L=vEHD=VA)~WGY+L+0Q!SdVQPL8M2v8W5$j_bRSwLQ^J#kIHAV8dEtFqIybc* z+Xfjj9~mvFc>T3FBn{b(kf$^zfiewHk)nymmbAAmBTlJaf#@KdXNaPmsVB}$SSKj) zHJ2_#34S_h=m9{$|OxTx`vQW4x}cTud49||NR&OS`t z6OV4=|kk-hyC%Yv9?6+yF&rOd)}6w3`AnckHV(;A=qM-$1%(HRdGP~q^} zMA#meIkmeeph5%40q8;b%8@rYwS3&8NL*-H(Y6$?EerFZ%f0B=uTG>}@O@F9!$W_l z?rt1Ru7{a&yq3X;$>juCeBcb2Dazh;gU{}S$o~6xS}e5CzbPraEt*!y?$3sBt8jbP z$=?v-ka~6uQ-D&&mV1x8w+a08tYwx9W7vl~dQWi!)abNu#xw@3W5Ui3R9v#q^Z+=c zt$MBJAdI!2VbE++x_mP@i_mry^D1lC1tf5*wJeRX885O<)YQ`}&1}v&{`7ye04leZ zd~fLgQqSr5@8IaA9>PG<#S99$khPOg`#rk6_#^p>bY?v%A;p6?|l!f;%tlpB|MAG*f-Q6NgO@`Tv(YQiDzJ4=mV*!}g7=IL4uL7JGlt(wE{*eb{v)IQlIg`e zeN+ie^6AUIkPZzdsV)Zu&4Md_Da)fQrl&{~2PpWte{mq|8$gI!ruieo;GXd(4D>xQ zz3(Hx&uwG}*@>EDGh~mf*&)HD2Stb1sEP4Z-qVtqbDzQ`lH>| ztZ0@qLFjlsxS1rxii?unR2-ix86r2+=oY)VH+;Qkd}L&7|-g*JjV*k47fv!}^FwR{g{ahhNuL>AOF> zhzuVG+5e*`Jkz5fp##3KC{^G{*r5PA%y=uT%xs@4+pW*$q=^Ik%E4_$%)|WJ%{LU@ zhEWIwf2B-Me=RgKhcVrwT-x)Lb5H9c)36HMjJeF_d&p{xcso2@3U|J2|7g;JE>j_J zMY9CDoE1~(R|-h!8UcEJN_cR+0bNi>O zL;Yrf7WBYsQT(GFEujQp>b=~_nW5Ss&HSk{a&~7OQ;&)Bm6&(*_gmOwKE@beIu*h^ z+DVze^YDr zAA@3l?={x=d=qpZyUSb5i%8?MfjUiP5dO0k*6O-Px9JWkI9yL*$!Zz7T2zVr{z-_> z;{}%c3j)=V^S&x7&vWl;w$ZgnBXvO}sQ~U9=)}dbTY2q|SGNr(z+n-=LSfBcggMqi z6Z_pRJKL%zin{ykFG>5Qg4+^LJBCi*pWrWr$!tny_GmcXq(bwU4E$byHplmdx$kc- zRk6YjlyzZL3YBUysSn-iHHZ1cmzU@*`n(v^;t0P|L99KW+@o3K3OBuAkEHHSSF=3* zg6C&*?9_Rr&i0GoU%yGmma9So)o|76XK@E=68%d`8m;Wl?z}Ns({pE4Xl4p~bKD(| z*znuxO-&y>0SkED47{KK1)FjVNfFCYf zc6mc+C7)8UiVDJ~)z3fFwX)YVx>O?uzwz~27$(Bd3b!aYhhS5tzzdP+tyj2AGRRY< z+|d}D!|LO2gMvtOgFz^GfITwQWJCHayDG67#28HTpWMmut3Dx#ZSIUbb7-6Pa}! zRz4g>btD^BH(E>?P}$fBxx#nMj`iSMHqer3V_8D9>8=P0`&Eh*50TyX$DcUA#1O<0 zcNd!Zh$`f5k-uK|CVkJfNsKLJ-Kh5f*n;>M_wSxLJ{Ai2(y?%q?>?1}->LNzeQlE} zc8{s%!Kzj?BZ<-y3?a(_Py(2ACcdNGl#l2?8+aba2II5UdHc>BqE z$6UyzBeuX8kke+^GFFW4L+*e-jGu&ia>#8PM=n13b~U-j4cJ^5?Idegtb!RF{lh?@ zTDq9{3XdmhJQQ7^${janwH2=NhP2i2Z(Ox?5#{7PQ~$@13=}wou4N79_?Krb3i=II z);_6Q>Zj{6yWA<~j#iu$`8nsOs-TJf=q$SEiOH98T{5v$i+Gh?k&+`=fsfZu+3&3Z zYy^LP-`3V#-WRyC*efA$@DHP7L&;i?=Te!mIplHwxZXhf%1V>3CRG7JzTxpyd@t#r z5rUFl=!cH6#rGC6Ln%#w`}~6aGBk(8l!%X_!WMIN<({DsO_O?{UXt1@^Oi*9vwhfM z%r?ery4ACrN`76TiuhFae0W5SEp8quLIL3KjA5hYu%aVuxJ-wSYmCp$vfity0>SBK zRfn36Hx8y zxm{921$CF#S3;l=@w_W3DMVY2rgTu|sv^*n_qauvrtdJ#D?gb}$C$9l2CQ78ikZ{4 zqYO>^la7*PQne7SB(uY>*j4p5!DVNT<+df}ew99}d%7KN*y8d-R*+YI#>G<`pB+5Tvywl%QE3}i^I?lXV)kC zg%$@8CdMgokMS>wu0!iQW%*6Tco8AH8f#PbB^VQ}E@s&^m#{$hO_amX<-wON3;VlW z6AN2$ct2c`OvId`CmOOJFFYmi%8*K;cS;LZ6ONu~g?4qqFn(U?W{4J&DKD0@Gi_cc zHufw<=f5fn-BQ`+uCJsM%Yg7MSY2tCeo(;LI7Gl~Fp$OZ6~^;V56VyuQ+*a!raPZF z9FEv{%V%y^gMY=X=iYPNC6b5=c!zeS;hA;zRN6*e+58DjbV%mg+SKNHW!t1?=0;6u;TYu-R$&8WMz}t~xiA zN!-=kcDEC;t*Yms>IS6WV_rvp>Kzt33rVrgxD{*aq>n8bqD)TWRU-jhUu!(48$iS` z#Y<=0qhX61~2}HMf19Fn7DLt5l{K6Jev%?Fk`!>xLQLQagwS z?@ZxKlKP}^8NU6(Eesz#OxasX&JWW==9#xse}A@Xmr?MA_Pa7hLi*<4LqP?o6aE5S ztx!mE+Jpsd=h5COK3JI(2YIdCT6TVZEnt~OpE6Av+r%<|={+mJMjM%{2WNc)VB2u- z$U~px(9g1e5j_QT0Zy&aqik`DzP*tX>dH7(Z5LwIthvXC#8!jq$!_RAxE%iZ&BRs| zC*~`Q4}#TI##EpdtY73|+^$HdTt&f_+bNV236-v0empeQwop(F1`BwkVWI_UBhNb9 z90{GiIUqXfcgF?hlGB)8p6mN54XqcW=|2_yd=wiXEKq4*5twpw$xL)~eAq@_g#K*w z_V#-`X4>US6g#(gq|JVeD-^g2x?l8XQ<+##hT9Pe+VCUn0oYVCvr;PkJnlZC%b2a9 zbs^vgan$l^T`qBXLTFUti1`Y8z_rG2S9E&q;FNAkk-5Ud7dS*OWs zA38mg4VqpbD-Ue|7r$_%&?3`NF!T!ZO;=~n_2re)nHCy~ih0Rq zFQuO-1*vTTHDAF$;_R7d>Wjm|%twtW<< z)yy+GK3RBoyQ=4}7ZMu3!k`xxRui%Pm?E3?5oE?Y(hS?Bs>1op=-V_ffN}hplK7mF z22|}$aS-c6Q#5%i%HL4$-0v{pewQ03{Kaf08SQ58%S>W!_yShwNNUS;wfU%6m&IU? z6>6rTJ`Z;-*A0GE#wX?Prd{4YPf=jDN!hd5+HvFGfOzZ)GNEO21XDSUh@jonMxGJTdJv?3HUz3&t-K=uL{we zgSsknINErk^suDXj(p@?VA7Qe`1Sx-4>P!(LLBchINghbziDJ`%HEFcPX31MHQFl? zobnjZ*zh=|Ch8kw<}ytrnMGd=LElccf*0}uO~fGF2+xFfRazA}64w{YS zMaHs*^44Jh>j6xp5hy)jiX36NWH-W(%JhUbSJ*He?b50Ho9*E^F=;RC8&P9j+j(-G z@_x7delFDHryS4RTnUr|G~#FayRbHY&#`#TbTo!ZtI6=I;b!bA?dqxe1l}lA8>s3~puzViI`8C0zNnyvG*549^ zL8Nze+&cBkXW}DPuQ#%Is`YrX_r)A%7R@KE!mwR@upkpl>#l+*o{kb znjlnd_@jNfUC1kr%i&6RrO6eDF{~{A`hb5pod_c2H-!fBw#*M~+M1Y~if4z(1HnoV z=MztY%GUnSAGZ|HSn?5L@;5W``||6`Tj|Mn>!aEU>wkJ@^*Ktc)-(xD5`A>mgeC-K zKjNC7^0=d4a5<;>=CCGsde7zXRVqh~X`mIe@pk9gl+?@x{csySdv?_t@qR-*mL6v( z$oo|#k@nIz#nSb?u>*z$5$EaH*w~Ufy-9Bq>t$rD!LBsVfn9*AGS}3E`(;jqRG(P; z)KRDlovrydQN4BQ98=2NkpFpXKnSmyzzf6wNSj6{4P7CE^a;noKf4~%yt^`BE@HEZ zxtRZARRtR-Ixw&*9O%zXMNTw{R;tdvRxEJ^D3p*`%fLHJRy1WTM2e6Uml_)64IMPW zGB{l_J*QrRyUBfSwODATVVycr3ylm`JwLvE?X);8VY0eVks;WH1{Q~At>>Z`m`j>e z4NPWJUvD7Z*^lKhlF6k90&S@J=OotD57ng#2_D}cm{Xi~twVGzROn2D;yLoPqHHm5 zZ^G<%3ioO%{Qb!|G2fai`*(BQcFXmD{ggR1&UzD(`L^ppiiT&x$@z>xo#nWn&V6=x z!Pe<65<1XgRp^C)l3oxdZ7D7_&kpR*WZzRF0je~eQWzQgm;Ny*#6}YclGB~7PREJ7 z!EIogV9>FRf->PCU291Oy~^e{emoE1C@K$|9+vOBmSc72$F%&C0I5QL<-`^W@9`5$ z-S-|mgmQkhrXh>{f+9ViY>+j{xp;2lNP9nKLjQOs{=8-C{j)>N*lO%TNA^b*X){pW zao;D4rxw2Fy%`MzT8u)!hf;?+Rah|z6@0TTAZd7tNXHPQ#TIP#ZI7+=&}cY>E$7k$ z;bve5&(Gjw4F0+k&HuTT=$O$GmrU-mHv}_nPkK55$=iaOd^{WnR7E%Zt;WknW#s7gVos^l2y)t#q|h#+qDtQP5~{}^ zTeJxGgjnEc_#nuFl?-t@D7U9F2>dHfD8W`Mw9%YB+bs&#irTo($tQhyvhJuYvVDTJ z(qk-dM6-xqqTz!6@ESX`FWIj!J_zaEGE?3&BTerc|FLrQ^s&3ONnLMFCH}txES?*b zDDNOPsnK%_N`Z;)oe^hiQY#wlbCAKl4;=1H2`)gLe(yENy>tYs_z>NmQX8V_Y(bEc zoV$7={Ua!qYzKeIiFIN{n!Uiihn~S@OoWBTqQnc6)P;vft*({LoUYGWRt^f2yF9{P(tCj zz(7--#b!5@O6P*sX|VfU&w$kkf1`I$y2U<;disK^OrYPDm*KM^3xcJp6_dd8dwY58 z50-pLNQwgCWLiB-sj=G@g(bDoY;}3!oKicHf98Ud{;*BgMi*UNeNbp|_2AH8PQs8q zGX-Zs?_w*NRbUZs%1a^xo zd(0%qqdq|8q)zqk<&$5A4hwJ1pr&}jpaWGI>Zrma7p?j)g4!weOc3{Iq+%iK2tQlB zUtr$feH^@V>9cgS4W3&sDQuqJEkjqZr9*5Jw5nR1p$x z6Q$G%G@JC3vYhinq6|L-BvBd|n1q75E`@hBYm?_yJvR5x)a^0Wxq($vRG`JPgx?>Q zSJ6y-bdJMLzSS|X8{%4Gm>b?;fly3+fggU$ADVjgU(d$VZ}ZhtkD(g@DaHqZVk$GT zI^Su6m2VtUF6iIi)d6BL>k3V+KE-4G=|^bv*Kc=3$E59aO z>0F_hOwrmIT)jT3u(k01U2?jP-LGd(VWf!PVnM>~)pznmCnTioH;hzt6mX>0J~BMI zs4y9Bk*2Aw)Lk~8sL#dfXX;4XOg+)KhtybDqsys%xrLDo9$%)AIsU+*eRsysbi+S* z{I?w#A_)$wtlnJlfp@vI`|swWO)t}87I(R#%2Jk~FZe4W(!M&9+#DviD_ItCF-imL zKvw23B{U3k-H<LfP*VFY(=n4}JL-U`U`iH?p{xCI;2eDu!5n$1BuxI~m-M)w+37oB zjJ|7Hlo-j~1Hsq+YuwJ6{!PBi9Lda!Ds!1rx{yk-D8H8HHrJp-EX#&;l7zY&ZGj&A zSfXsmA#Q$aob6jy0l)7{$dVnuEFtbI#%Wv3kdIQT6x*5fovRKIa|)!`r%Z|XWIcnR zRQh1oM7fyHr_LF6BhDKez1k}IGcMz(-es1fT)PvB^_(f?^N>e>^)#8YE4geGb=Zqn z#;a%hjB<}}`XuX}yBeraL(AfOu99&xexh^GIA4mY!^@JgEa}w~zVilgl;PdE-1HPx z=pGrn|8m32Ll+R0XyOz%X~74~;RfhDA&%60i(`MyJ5Ů))QCM}GSL~cj+x7fWX zB854aRYPS;Ew|9;@!>X~^}S3V9^d8`DG80pt~S1k)BUUqdiS7pT5WOck@%7Q<0%qO zn#}HhU;Wge-;V6+X@#?IV!tT;?{eXH-p$2ARXPt8UVle{6?Q63F|eu&=!get+kZAF zRgHcu@1fol_GN_P=ez}lx|B|Au@*W=yWN1@dt@-zvp?YR2F0%Lh+M7kty3Ovmhqp= zyRw#Qw6Wb^NKHNbPOEd=TI*UQDdWjtv4z*eeAE>tK~a;zk% z%4G1T%EVOcFibjW)d%OdFS&Iao7gQPN}RIQ7f%X<2ZCf7_!O{_C#%g!va=p831;xr za%yt5&DMT>OmbfCQVe|Y(5C-Q^@PvS?WYCG%Oja?)zQDDzJ%GUUUb0W1_MQBkt8hFo>v z0QSY3M)JMAu*dUH`%B&Vs<-(Ic_w|zSV>?{`X@o9mL+G$gLCBcs!`HVj#fPD%1SUfUcCOUIs(U}3)hsbIfRpluhF zc%-SPp=AZK3@gw=!R-_Hw3~kGO8(uTp^stXRa7??D$|m2?c_7xMtDkObG3=yC#RHc zwUBE2+|$?qcPs~&f^fVFbK}j#4~hk6iX%_#P{grSbLcHkO6|KY#<0y`y}_|`r=2RwFp#~WJVNHl zpE0S^u7G4{D350(Lc`f`gpO~W?W*DCt|P@tQ;hqQd5r3*=GsPD@REEv3V=;HY=P47 z%Wi601EbrE8Be`qKc<@-!k|?$zTA(2XZ+s27pK}?xEYsgdpHY4R6g=2gXJOt6{}6O z53VMW8&FN*fyT2|(@7#4Ee~*r4Ns%t z6K3$G?0Em!J}GnF&BNFAyPB^1{_4l__NooZx<^;0I!%^wnyVZ|66lm6au|Sa)gG)W zEV_3e8ceZ{;ppdHR7N7`^;=+DT9-RLmFr~`UpCd=6mar-mO4xhj8FKid}=1{eyCg& zw$9Uz0o(N?$aiDIARRYYc?^oNH<74Ovf8E(^?-1Y3QZm0{vCHcfxXlB1CWH;zMH z%S4C77H=a29_Q=ZKp`J6|A2CTaXz-wx@1qG1TLD5A9hpj@N{g6FuPy7ktA5UY|L_% z3k^`3Vw`k!fs3Wn@^5oe7uYDI4voW(G#X{Ev)b?l*IE#0nf3&l#uzcMkG0z!_$KI| z(R{%CUkK&<0RD0V7ZDi8Be!_qLz*d#PaBB%+9P(jc{0uF^kt3xF$BE0re}1W0H_?T zORJ%y)05kl64#ke>CotWa$9$JM>-e}ZwDnd%l}jz-sFvHy2IEemI(O&*Ond*iwWQYnN7|AX(!b0Wv zksZxL!#0@(kxl_Jl2=Vff$CsvM5=KV{!y2GSUf6*@!= z|7vLcNehWtm*P<5`%w#+HL84K5x?3gU$(DE6XqQY?80$Mi0PzNNl4Wen-m4H#eGMKV$Tun^(5G9;Jf%9 zkGXT4pz*Otw7-mo z>{zl&EtSS-v0CO!U5EizILJHGyO60j2&yAwy_GGkU%I2pMo zwG`AFnK^MfJXUo*8?V%~hH^cdJR^B)3?dKFrx_pe-5x4C2J_SDbw+1C5d#@nLz>X| z!Er38X@&^-`0z(Z1$OEew@o%C6M2DEEV_r(mca;lHhpXq}1F1$%!20IL1zUG;hm zjT3_;m->fin`dNw$vP8#(N}0GDGYP-zX%EOZ@e3RIg0$&YI2X`Xh=&TtESo$c38P% z>hwWT%r|0PC|8wIf4RRYs*?KuD0|2F$d|1RbYe|x&mEm4O&g# z2eN9FE(?T+@E;c1We9Yb-+f5WXpJ2qh{_nSf!>7+A+vRGj^NHfKMq4M-xOIUSH35; zGoJT+7KTN~I?F9?&lCeE5k>M`Xrl*OG(@A{ak0@47$nIj!otDsCr<}tW(-M$-wwZz z^BLU{$YC6E8rok}+0z_=*Dt)~qym%pF;42Id-&}3BT%e~TKOXVz=|2u6f3zbvbByq zJ7=qLtI{c^T^Ht{EhF@A`D-2PSI8jjP2R++m1ePx5#yIjCAQ|esS7fKY)tVU;SV_6 zZB3X^a=Ly7SET0i0C*AW@jACF?rI^G3O-kxjqt$YDkl-OsvwILQemrVUy^EzspIG? z1t6HbW_-EdzDFi|MZyI>&h(De3i--Qh-l?Dt7O%=9Gx@;LpZgun zS!@FDv?Ry8F|YNoR<3$}F2PBM3-wgTs?Jm4=dH+Fx&UtipK*f*wfpVMqZ4|n86X7V zY*1j8qXhxNRZ;u4yun%5_NsZuLc@oh{uXt+NZiX{x-JE_-hZO^NT70m2jTKPRf_7L7xKrn!F`p)ImaaHcD5xQ>klrBO5(`|+DaYe&!Qh(U^>g~Ed$>>u zMv2)p_HJ+*O~ZJ=x1%k{K!F1ci1dq|+{Lf;Z{yZq+`&sz7_is*LixPyV9oBUOtt%} zVW3I%FEc;n`d3dbe~gHCbmwsl&}u+1S#d3Dc90h^oOCp_UiAeEA!DZx9BZ&HbSL8D z;p4jyXf46wYqWd8#UgDJc{=lN;2ISc0i`tiK#zTl8Pp%iT__QD735@lxT(Y}jeOHbtnJMA0OsolX zpzT-OIB)Irod5l{y{4s<`s3dTMqtc$g9Mmj^!iAP!Af!DO}&g zJS?dwE@{XpMCk)I>*8NiC==oGrM^^s8xPETi2Omm+aVy(ul>Xe2+H=)xP(4IoTWUCV%o#iT4dNXtS+lA`oMp(EizN>W#T{eb9;AXAfj?#T`Cw)lF&A|W~ z)+a0{{owMX+N$;_jtqA8DBNPE*wDNjmY!|aca5}j>T3HuyFUh>%bueUT(kMi&lEkf zXO^4`5+~?Mx34ggn)_LM_&rc;(_t>VUvCa*;@LUFo7jK-`>_7&7_Z|4_gp8J)zie- zx7TIY*B;R2YA)^{3A8!bSRDD214k*ZCcerwo}!P6Vg?^|c31r!iI;8#ewy9PZP96d zrKp~{ZmaIVW)@VcrYsS`h%~2MbkUD+J)`O5K!qm1-%Nt5T!EK-+{l8bmEMo8Lx>%w zm|M#>pNr%&!&DPtdoMdjw8WrWTOjI9$A=7psYGKl{d}TFd-~Wz@%&D0i&V>zFneCS zfez2d&KHz;MHkTJM`uK4&nx~HL_JKyw*OXXc4>cR>H%T=N(~<0U%gZ9ep6%kru#@n zm9s?|05QtiS8UWh&=ZT>*URm6SV($>pz6&8R-bI!=WTPCW@IJ(?14jy=n!af?QN~a zhd8=$>UTkm9vZ;fTf8%rt5*D6!^9F7*F=50aSK9Xd!`j};gb?ti3y`4CTH~38JZH4 z(YJd*RgOf{q48cyKnC{Wrw2cEpYnSNM*JBo*XZN_ z>@%i6D%&jS8b2+J6dUAVa{KTHY6}JqOkyqsu(~`dX&fg{PyrXqIA$Y=@fCL8>ji(m zdO27<7?%7ExyZX8R8vk(9lH$K;{lXbyCdMmz;dx&Sv0e)Q6VA{T3n^C*?zJlug1~{{bjoQK6^IP-e(+Mg znL%%LdkbhKw7Gi=`E=rc3*dqSctqB3;4kN8_3{P1q?z*!4O?N0IZjW{^lg6UrY~Vt zK(_|jnc7eJv%KU0V9iOm49iscPjqnSMJ>MC6)2oUKp!6A8Y6Gk9VsGoeg!QvC!{uE za9*GE71~+GS1OhRM)nfd^Iy&IB1*-3GU3u0?A}pcT{cNGaP9)zU$n?`92UtJ9^9^{ zN1J+je^&2B|Mw6DH~D0MSJ}tyb*JZ$)vP2&Z-RX3thV=0LVyb&wa*cD-|Se1&?12+ zQ7s@x`AgzT5v1of)C79t?L(kN_>5H3@Bjtsl}R&B4S+Ojfyu8n941ObdQ{o-3*BYm zL6%~vvV7?bEWsfzRv!Qm=6ieN)6r4$RCyMN&=JM=RN%<+vSa$=OcBeJ^Ley zIip`19zt#1>(B{O%SS8ZZbi0-jvak4{l5S4N-t0AExrv&_KYQW*mG=g5nOcgwD!O~ z2c3Q{RPsl}EE+^YhhAg5O1oOLw6uYx;h1;l<1+wuO7fhTg9C!16jrBD4#-b|P|rGl zZ)7>-eYMW^i`vDiS|wQVx6^Ea-U1HNBqrn#JqueX45k>sN7a5KB0;$+w)#V@=Av?K z*)Zk5*YCp}s59OUk}ucN_zg=4fsKwKTmlT@TTn*@v&!%I{#c{&=8G*XnyL-~rRQ7P z!Est@o9=fQ=Xwu)6PaB0yU#o&%&&_&IT)N?vmj?UZ9T%-ISiBmmW`HqkZKIfKBTWV zb4P_RZZNe|&~QFMIW_bHq(%I(!MsLJxa=6NA(2i7i<)UY^pitgE><77W|Uj?Tr?j` z8(9}>?@T6wXaRxeTW!fuF&7&lYs0j?7Y5uHW&t*XA?6EJJUEHp{b1lsHFJMA$Xh5^ zD}e1~S~G=5YJfLv+t9Cjqja6>qNCkFH+$RmUW5>W3iWZUYQQMH5Rt3Y;#ew|2RM>s z=o{H)c$+gt8js8h3y1Oqd{m~hUmU@Igd%fpjq%GBqISRn!Q)0)j&n5kuI(ulkuZux zGn!?oG@KFvTnk%g-e5pNK=JJp&cqkV=%^rtsWS6R12|<0%ZL=fr>2XLiLRVY@`1X| zB&(NqTwLu6wEwz>Hup(aIrFWGbIZ!a5{Ed+jkIiG^+%w#mhQ(0iSFohT}Fr)64b_PpWpW(5C-g(OV z8F>nB^lFV51b(0e`mHS3KOR!JtzC*m)pEdM(nc7XK10KP7Bd{ssybh-79AG8}KB(B*;Ze7+2Emagm{D6)UtpjlnN+d}`S%Nx}%7PEai zsCC&j?wLU?UzY%f7oB%?Dewu-QPtj=BC^Z{C)Z@9$y~dg2+u}($&|$Q{0X)D;~i6W zEt+3hn<&y8Mm#)5z@V7`i^CC#GLBF-lY9JrP0Y(0Wd-x6)r(_mXF=+JpA5lGJ!B-1 zGrg^!KX20bqdu^KOul#q z_OG)P+8z6o?h_6EdLiU$-N-mT@=!5F73_zJf~fb77Sko)q#M6xe(Z}_77PV#9(VCP znhhhIwqt;<#H&oT7{Sd}Y9ffh$J=|nz*fASfG5mB8IH{UI8Z`)YRcA@nrau$3UE`&ljWK}9zlk;h?s z5k~tOeh5C6iT%U@rdI;iK92Y^9%Wx;F79y>9k>wze}7+^Oe?4Pd;^7LYmIn@ft(o% zK;0`u2gXajktjkDJg+xEEgDh2ToZyFt6=+h!e-Iu65I==Tl>`-QJ~JbS+l>v$+x z44{%rpp^I0;o;!|hne+{4h#qW%RZ`@1W79V#P^^^ayS?n z@j}%G=dVwX?)|w9c;@zfWO`Ap57H_iE*&`hTExc)>Z|n+N|V=ylvC(TrL0)+BrOlu#ViQZc;!b{KIJh)Vgj zb1WX$&mL^Fm7_<+=x0&VH2M}6xu}zj5w5A=%!VM*dKi+Z$|Y-9na}Wo!>Te06Rk*N zx9Ba6M!=GRCuA+2i?14m{x;FE9IG?<`MhN?T1>>5SS1RBTgx#r>Wp^Su5RnX4T@j~ zWwwVrUTDv*w&9K78*pZ&W$Sy6pX(}l=x6Rw}MT23$ zLsPUms~Jp_=Y~kG2R^;!s++T&3LFGY!%m64(}JNFn-jtNRaB5bqHPMRH{+0x@?CIl zei@eDYvVne_0o5c- ztL9|Zk0oX4OlGu1xKt;C%8UtjrQtc~PIr{y7`)-ac7;=xlmNy%%^aH**An@~W4-+j zrE7xpV2yfA{dP6H_(c9C)xeoDdsgys5%8=Ffm8{8rD7O!hzu*Eo-hZjsd^qs%sL;% zVAe4wDAVz@V3l^BA8~zPiI%~1YsEdC&A1n4_o~KvzZRhmYD3_hsb9SL2)-2xR$llu z1&Z~1PutJZ4H7skr%c}_6Fnm|Riy+&P_b@n+N0sL_Ewhn4KY_fhEk429Ci0ch)8qKpM+Mp%owz3HLb`0&eS#TQL6V)=}B5)%%_nN!fI(e^8U zXl=(=f4#48(LpI(vhu~?^m5%?rH~#DeiD;c96R(=Hz(y2NFG%IS&u~t<0H1 zo-#HXopnB{obG|T2F=J;7$wS!^V6hHuSg=5s7!W@M0R69{?blaIY-8WD1sHzkteKY zvt4gmrI`Iz8sRDsC)fCX-BLLO_00?^QFomi1JGm%(eSkB!;;0*ug?EskF;9cl5Zwd z>1TSmO3MUe&gnn$sj(fw48#^?dCuNnBgB4^zFH?`e0#CFowf+ZoTPR0Q|!+A3)tcb zc!TnUPE)*%Mp*YGR9(vONRk4jG635{<{#X@r{$^w(WD`%Z zjU=1abdNVkp{!v&KAof#@5^cf#uKI#M=Euq#+4%hTL?DVoi|j7|C@g!tjrlp#Mgbb z7%h=gDKvcR;qY;t=60q{#`!RhP=u6{I`7z#AVuTl@*-zr-UsvXX6*IXey%!DDV+YaBE!Zx-hdKzkVjbc?-wI#>JU};^{8nSqPALsvazP z-A@xvet%7CQ>o315!z7wLw)Q;ZOBuvrbnJb0W6aJt|!l9CB--$vDlW~r(CZ~hV%Y} z$$UOacQhKsw=+C-xqen^TWDx(W+gN~Kd1gnSE`vfi!A32?&aFkz0mi+{rw<*NKll% z)NTZy=@%-LUnogW8F=Oz%lq!oWLs0M(J5DNA2sKjGI35+(yx}BLl0H4prnxQX!a8F z<#Vt3GJZ)5bUavCIQEbGJM`0kEWLffj1TJts$ISvX5gN(QokD)fjy3WYRn9K^)qz5eOwX|l-Sy&kzz9>5P#n5J)T z4VoX7!9kcw!~y|xb=5{$0bBK@)J`896%I~aOQUynmR*OO$h_PH$PMb$D9sXSBU-aL zl2|l^^NnR1uIAU=CMSWVYZC==D%nCfKX=L;3ZYt~Dph{1 z;8BJn&`?(%o==~U>?MQ+l+(P?KR-Kz^PWvn^T;h5iBG=Hho?i_h1G-+C#h~HB9cQ| zp`R#L5nk0*-}u&$j>Y8^-}=Q2_nx3VL^)9tlP+0klFnOwvILgXq6-Y3&& zZ%Ex_7M}Xe6>`6F6|!s@Ua31RBKK^BXnTm`aTqCsYEF~_ib^LJnAJlfyn}A;nAkoo zJb{i9FW-g6m{VpQl+X8JYN*gc2IgkX z8F7}*)?d(DjK#)a#|@}*K=xn4j)ZQ&`z3K-oKsu{TMXHZ@an(e^8#0zxRP(S;Zt&? z{YmtwQ1V7OM{#)bk?w}xb3%@t04rL#(y72kDJpf0z=k+2uO?aoGPWyJ(MSU&`f+bYUi;eLs{F(llPJWX_Wg8tq!5w0UQ2!So+!Mj zQ`f~8b61G=y8z^jmQ6Sjf>4KJ>?DC3Rl+=r{CIjGNKLgvMKROZ5&qZXAzrVSHAMBN zNs<=VH@ul*S>aK}pv-RS-a&IM=R>~Fgb58h@*?otfp0aQFB^+ZW`u?u!Zluvoa8D4 zkl_Nn>|RHtC{dIKfEGi~ZNdFgt_L`$+l}D!!kyTjwtxF*_W$LX@aJabaiLOBo z(?WP$yZ$-CO3s8FuwWiuLGcjNWo4FwZNL0*7wfIa5D0+KF^Sk2qsoG*L=ZXQu|t7k zy)EBQP`vC`1q*4jjfm8W$0Cs?tXB3NNu;X!j?nYV`!qTv2s5EHyG_vz4DyB&qn9Z& z;o=n8$`@9;QA#45pvgSjTKN#Rc5}v|$XE^5Pu(AP7U2g9ErZvt5AU9Yn+{NwDJ>!T zt%>yJBFwdy+A&`cH$~5wRFWiB*Xv6mHzvtdE);f#LMhX1-=do zibJcFisR+K$~P(>TrUlk*0vsKsMMeWRI@^pO*Cp-ug=Ta&U~!F${9eT>P0O*C*n2s zO&Baa`3ou-k0paKBYr1p<;ly^`z$4d2-M5eef9xh(|_jZ;4BO6r_3I}Y?8P!emkG| z2nc2b0SUQ(G}*stA0Z{~u6el$G+IsaCJE%H&N?GYl+^2FqKK0Dri)dUB@rLs$XFa-k2}8@pi#_HMtC0z`Hta8yGUXdlX{y1 zcrTFF9PSD7wang&B-YY@5@Y(&Gth|HwE4)bAb2XNQ^%s7i=A|x$<4vyDs-+JSsXMX zDIQY_nUiyDBiN`j23x993MJw;<=go#3F>|)RPb_S925ly)Z&+0h^Ip;&klrXB94?3 zfm*gX?W*BS*HNe9o>-w){ZJXaza4K=LhGGlm@8A~Y&3;+Dbbh843Q}?@-dL3^gXT$ zj+0_GvTpvY!ano!HgxD@zp&G>;fxuF1w!vRbjY>xr2*EgbpCd4#0U+idW~u|WTi;> zar$D0uW%Uw;+L?Tmc~rP2rIC+($!N@U7N9#Q9F_v?oQC?Bf@H517RxyiIcVScXvcduHNB$Zn7==XPJDX7V z&cQ#M6Pfj|QQFXvWfoj%3%Up&zC7%`kh4=nJDsi>F&pv2Inp~8sDhaRa!>rxa53dz zU|?QR#omkOr{nHbes!55rqvHdq7c9(Q~2K|zaW+M396Yi`!v{ABMwr{9$6(zVlK4L zh@nT78ir0DJ}@DMIhECp6y&T>8{gUJCQ0L$O>bg%|Bw^cZfi+u`gR-y)!@Etro1u| zu)Lq@CyM62Loc)EzN~?R%H@V!L8(|x=UWung-CID&!PEff>@^Bzzv^LlTgJQK|*q2 zqhd)U5*!-$v8TgqD{ocD%qG!pD<9DYpDiZ_%t;!kqmeND+-)(3g+s`JTiFNr?bFHy zaiZBt_wQSi%4Kt*;Z@m;Mln;M*_$GIKS*#<+L=|N#u;b@q}^r<;v(qeyu|jAld{Ha z8yb;kyxelO{(NU^!fZ{V6IH>vFE-KQreKs17#Y6TSryoUY%+d{x*6*F{(2mD1oI!$ z;VrA>Ls}|3g=qLwny=;ZEIYiD*W;lk?e*!J zP}81o1U*HnZtsG@Yr34p_VTRcSNx>;%<-Y>s11I;25dPXD0UcOBTyAN&ifuaVXoLs zT_MxLX-pg9#+Qy1KxexbH^8CZzCdUb8a?>SM`o+>PA%iJ3;*Pn5ujApA*C+H$OH7KjJgiLj)(T3~17V}?6lf43WEi(GcZ z)*0oA<1Zc!G8J;yqcY{H0GkDaw##8(!oJ^$%tjXm4{d|$_a~%o>(PRO5nm(JtfO4d zaCzEwFo;jBudY$ELf_11@;{j1tkMu^XL|ZJ>wtjCzLXYj@0bAItsYIT3mKA zL^9e7*&-e`a9&8O@gn`cypz=WmWhHI^b@LSXL1hLfo~m&cCNht_c}_*>9>Tm0g0KN zyc{zHB$IL$;hm6Z`9f!8b*ymbd!jCiA3lpW3zOoV3l&-u;Mu$R${G(tz7hf_ zqa({$319)+_^|ux`lQ)kc_wDQQHF3Db(g~1-2z)k#18&7_iNa+ljzL7%~YERx;m~y zV(4d3%_#v?`d~QzS*Ll`Z*y5Gfqq3Wy{46iP4@VTTA%xFEtCL(AuSDCSQ^6aqjzH7K;lLZ@tuq7ra#UqsQ!T-PSHT3IXeLHW^5 zX-7sq)M#WwU)T>pUcFzDkkA5B5|r)Tn3<*5dw$`F=#1n@-bs=lX@|iPa98oZw4Hid z7r$csuUvTk$R(@v_HhCg0Cdd+6b%{K^)PO4qxB6E(uy|E)*WF>=c?1%PG{|lX zO78ScZ9(9as#a43I^3OoP<;iqBE&*|>@h45l%-$0lSs(S7W&lvoLxcIEpAXJy*0Jq^kiLh4G z`P1w5;bC3zv&_C`q_(0osOTh8varu#M>c9sGWLIVnpl0l^CmKP-p-h(MX25Pzk}Ck zF?!LJx4*wjB`5yx>*x#kbsW%Rc86AKLjhJWe?Wf=)f?7x+Si||UdgH0KqnrO<40_+ zt6^gc;gw=9n>AnMZUM1s=Ho9-%GT#^Uapz2yn`gNn=L7i zMq$D8ID9(z{%9~k^q7d#vmw(*b|=-7AS4thYd4g^adXwKM#Onf7=G37k(XD`BfWGI ztt=@Fq0?LKbg+}W;hO85yoELV#!91ywT~zGq;5RQhcjLOKRzlybcgJp!Ux%9#w6f} z)v|u^4j;Qd6`C!0>2@N$xjtIAPc1ZQjwzKzA zvw(*-SN6vz`hHP13T0h;&NVvOC*Lv;FR1yF!P$Y^{nm=kR`rytzt(iftxxaKR1#P> zA;QkA2rJg{EI69_zb0aH?4NRnjkGWk;Agx1eu{>hPVZ><7fX>ky8hakb0Bk)96dq6 z6eKzV7?qgUVilDpb48?JQiz+I1KIo1E19b&kDzf!gwxDYZ*2oE=lv^|j8wUU^CZIQ zhBs_4u;rkESxf**tl?r6Ds#CxFtU;XMd;Q3`4{i$Vkf)+j0d@P9VD!SZcW1Bw*gYp zuiIIO1nXUa_M-vhicJ{L4=2$R_(10sJv4it9*Tn+{C|TlA3UHRn4dm<`uM;+PoMn@ zhwwBLN?TbBa}ZJdB4AOW_94J5kXO4G}lj_?sJG^{ZzU*Gx4!}{3k#n>U&L`J#5Yk(UJC=j(>ULKkp1^M^|a)K7| za&-m~CR{O26!}m+L1-@me=)%&oQ$8aBw(6VY>+Zh zK7r_JeiYS!F+dcVcvdmVPAPJ9A;eT*i{khnB#Zwb)ldM%0TQ~B6FdB&Ox+a&{T3=s z8LV8SUn-5t$+--er;ImaQTb&O=jiSTAulWyIcn%HqXE^L0l_U}ZfThZ{b6oSZo+nj z@*n=7fAe?!c_EhuN2zT4#lKDEcG=pgh@9J0T`%K!Df{(XOVHG?VA@@W2Eh<^{}A1|%4 zz^7Vclvn>??k0eX_bxGnvr`o^3Kjm6rtk>EUTyHU2sw29)9cW9H# z9#d_V9-kY-&diQEvpZAz7S#B-1Ql}r`ayYeV644TK1hLC5a|~INc<=gD!#usjJ~lk ztP+?_(v;&3N&hHZsQqpNV#Ufm{{yXxK~80AI>hVua}qgWVPUjy5k&N?4E|X$sszwk zANU!1b%i4in7F9nzl3{weqj|oF9TkyBl3bP^dc91e*Z`uHo>gxN*it%IQvUL{(>@p zR*n93*i&8jh( zXR&|#8!&6>4?w+j4@xmCC`6W@V2FHnF}uSqy{oP#%qYehy^%Z{Go8`3F2B+*6l7f7 zz{N*!aS}!GZzz{^DVpkibt%{lbBea#TDr4=j0glJ3mYIzP%HtpO-KO8Xx95A|6Uap6^5lWQ@3LV&{#Iy?7?o=d&yNhJZN~^dLx9CWN^5;*az|hF$OqH8IlhU%up!F{;ZH`u`cG=Y+pTuf< z%U^8fn)P^}2g-OGDSTQ{(ZxS|M|LA#3=I)K=&KjwlSUl}&d-hZ#Wl0|dQp3~rXAS) z4$jpp$6UesCV;zFpJA`xLhkQ%{J!BG>zNu~+k^J|4=KXmZx-1FBHTEh|F^Go?=)fsp3 z?$AxEo$&bx6C8vpwW}FEr7_Vr;Rj(O^v^>2}2(YUEU?Sfqqi zZb~QG!DZ*4=xRU3{KF6dEZ6$qCotIoILmsBPFbXyvR>*e`RoS*lI4% zE8=QiuA3rm(#fn)7_hW{K|Uq{DfdRSbF+n=V!foDUp#^v6dOhakt~f7WYrl8UuYRY ze<(1V#wL|9&q2Y%!w`IQ!dm(I##pX(_p@UFq8b_He=|24SR2Bk5__X5P;m%6*S%|r zxrxjKqN>-rVaK$m>h}z!rqH*rjHF#*)O^0&he(HxwOaJuqz4_x%~QMJEm4^kwk95j zW{%~uI}lQ@Glr1mrHk46X|~mFw{$VGdsBrqR$}>dv6y_Zuw)pYzc{k#;6MduKI(9( z3d$K>7rJv=TcS6w9F=X|`!P+1S=04g=n#JWx9%7Hqx-3I@;CzcH9;fT311tp#9rHM z`Jy!C239%h0aZmN^N2Qe^cgzcNZr&M5uUJG($Bg_zViemw;NB{6nXX+s~p-Lj+nh2 znRIVbAvu`M4hOnO)MYH9Lb$pnKV-MQu0xvpo1Ssyd28XK&5cU}r1biH@4~q6X<{n0 zWA;vD`#Waj7bme}t1T0P*3C5M&~`mOM`F?qr2|Xr+?S~rRqA%7GLvNJiOqRJCQIG74|` zPQa;@wtSOd_Psn?zTC}u!6_(9|D}M*bN*O1{9t5dG|(kte$9p++5?K~5v_NI?2t>7 zf&Gu`HH`DW9S7s7U$N)uUSUR?@q+UVe8U_eU3gSC;zCanEvEa5h>96iKcNZ&kr=|vCIn5`DhkvUyxr)h@n0f#VupGpJ-lE zJAQs3DD~8AAs=K6zG3Ea{PbCli@^EAj#{GwJZn(X@Y>n4!5BnF4BFfx2PIEeuODZ> z;mP__TXZ%^D9oFDY06jdGUk$X-`BdgXKxnXXHv7;M3Ii3CX&cnubr<0bBCWjhg(m5 zRplPDy%WCsaOdXS&b|JUot5~{GA#aChMFBR%j{zCu<_}_IPn7cILHWdDH`)1llN<}>Z_S8_ChvF4Jyah*s zd65RUF{fnQM zIsP80?Q>CxMkkO+6H(qF_vTq&;1#o*JzHo+&Bs&uK{ujdO`N5-`(6W{jzIA?xfrUr z=WCki|DmxJW+Hopk#I75Rt{0flb7|kg_}bFRNJM)5*c@d-@YUd^*!7vwRGQ#3g=-& zV9-TVCQ{`&GoUs>M~c*RBXW0oBjX@LwT2LkNhJx8oH&x(+Ky2`QC34rJ@jPPRVi{CT}d7Jy3Vo?f7B-&nBMCSNr1nHWi8`?MQ37=24Vr1dsHQ+L2Qwr1WS!gl?gd}E$)so|2$B%om_(!sDiOrSeIj&w zdi?s0JBh>?KP8bI{seYUe!35wlOkBn0(!!B|FL2bBs!_zJ-rgFt4c+4+@?vgRx)6* z1Uk#;brTgUC@cjWqfn!Pk>20rs*c=D1!bS~`C`anS{()+M#wk-RMd2zTy++$jq@0; z*U^HANC<3m?+B)Q)BZ9~u}cmnp}q>bJf2&X90Mea?&`RakZ8wP8<8o2+^7{qX-t)f|s2Kmy>=JINDcIbq1U=zWVFAYLc(c%3zI8yJ=Ek`TK+o`PWA8=Lh| zPp(JWKJ&rEq}2{UwIgPsXADv`MgkSeQ>f6Tay~PLs=o|v@p?xAH5LURga^dxAr+8V zPg%+PzIU;XC4dSqi#Qy!eyN@`LTpQ4NjA8ehQF+w%dU6e%+o`)!aCgTv`^Op#i2-+ z+8c}vOv!}U0V_9=Irv*Cd6OW~ZyJ6j zPwau#+_-XmUbB^%po=cU;c41r)dv|8-`m?RJ98j8%Pi?g3klFnZZdOap z4f*cVE8b8NTlMymXLZmYRq{`YsYaE-_8-}#p< zH3N)F0l~YGv3GNV)6!F)|FTy3k;MPFbfxZLashr3e#|3qK$~v3mdmGPqYghRAk#tr zLje3(Ghg6M6MzsCs5_-tveApsxU@m__*>=mYS_nZD7i1LGzM$E8-FC#p{Wy(=eHR~ zPyvmmP#MtX3V=L|5 zmK-Wz_s99-Sl>pSjM!%X%FvirY)=5h4gGt$B;e)OF>gFs-@y?)9#0FDtX(0wRxwN3 z!q!P&*4AOnhQpHzHKpU{ag@AVE8AdI9nCNM+Zv}!-#^FdUW4;n^SY^T<5^D6I{kK-VQ3!WXkzo__MhJHY7}SZQ!V8 z*Vd<(^p=N;if;m0&nEwboXVxF59Yn~2M zJa{HnDuEo;T{8?c!Tb1{{2nK;2_k}UYblmy5vzx zoD7{Or^jUIH^x1-*2{P)3R2F10MOUplf&s9>m7xgHRSOle6GF_?iP{htKZ8 z!aq?Gi6$cNIim8GIhO}wT$CG+*c!GDd0;PH@?B;if1nf6^jWH@=y#;t4>@B{@2Ckfw8!^p{)L~+`Myra zZ)?_Vx7=|kOb~bP@yd3_Q@pp6wVBK-K9Dl0j3k)8q%#TbxQC002WW|L%(P1A8GBF= z5IpR%_d-xJ;-Qt!lT1Nb87tDnD;lWP$HZ%fb4Movd^5RDa^&ok3Uk@p0R&TN*nuS- zxg2vBoT>M^z9O~Z63CY>oWegnSK@M2fb}2O+)tF%PvTGETfELr7m#Uy)euVT#DabN zq3Ndl=3i{os&kWqdh2;KP9E}|jvfUnom5K~Ewa6};R69|GZyN-flq459mej>lzeR3S^T~jJ~H8F|Ls;ozTJ8B3lX*SNA1_z9UK+Q*~sB( z_-mh+loR@RN?hzXjM&61)Pt$cD&XH^KCY_#Y&qAY<(8D0J``ukThIzC85)*G_w`dku zfCBTI{eCHZy#BbnRh7qq_;20#%M3i>7j}>MxOjKpsRBkV-4P=MBQNwj6iUXsbXJ>h-S6z;W~P8>LDZ_D4TeAf;F_UsAD?hxBNKQ1WR9CC zgFmM#3xJwO-fO1XV_e$hQ$l9g}q zbgqSlf+{cpxv6Cuws{ZoCJia;JuzX5$CtlV}ZjS)05h z(1o7~Q{DT;uNLZja+`bc(INA7Rkrn86m9mRY=8HWq>NT$u@8dPVL>_Dpy?Mt0co80 z$Ct}+1`ZMzk`Cd<`M45D?5pW3|kKfTf0vC5>DWGI)j3uAD#{;i3?6nk{hXIeZGxQ%9a>CtWt9e$;&6w8)Ebw_tdz{JoPF*&W5Q)+uRdC z)Fzz4&8BYzpG`0^URG1e`UXTI|I0bx&n5cOWdDA!@CEkM5%jOX%a>{?hu;g(U}SQF zP$$B8y6>nM)lXn_W0pqSr%(1+%kYI!F1&6uBpybi9~g#L$9!8l1q7sY>Ys)!bmSwv zBkP)fQ0v%yIjA%_c;B+o2}Rr*I%~H_tQ#tL^74Jj^7^P~I+fTQ5q;--tAmv#_1Q zKk9B}DV!Q(uYSpM+TAFkFs|Oh#j$lBco3(lDwsbMy*OEh^`b(OSP(}-M9YeB1#=pV zc^de#@G&@-UPW&OK=6Kk5XzKW?J9~B-ab27xI|nKKDtI{@Aa36ur5A&5xV}tUY)K! zw$eBM{)FM(o!+bQ2ASh1%j9rdX6izjN;MFMs67J$CN3V#kV@^NNAfD5*NLM3cB2MWY=RKVX1o4&nPC};B%2*8j}kn>pUgsCw9eP z;M?9HH2jMwm3+N6sd611F!I(^2I)@^_Nk9%sNpNoe%l%K2U`_bvUUlV#QWs_$&Hre z+vI+JXfElr{ZI{|jDkR0B5v)>-H#p2J(=-5p90ZR6P=H)=k*XUDulytw3_WVp#pOY zWs2d)U9XCU3f6g_s*x^?q(!*?q=;jOyAi^t`p!RQKTR0Mwh*069}h7a#12A&3q8mO z`!5DmJy@nC(AgY()w@Ak46XDJXv8MIs-Y_<9)3}T+{w_OW{wci(ZNC9I)uVJO|jkU zru!M+7t9$kM6aVxJGIYQ&A={KYyXL;fn0k&`Q!6(TtI0yHhDLlib86-rncB65bhP$ z@}C_B=ms+U)3(tckXX2LLfQczzc-`*E2oK~*JKfdR0m&oQIb<sKA>5tRk)I}rc)ek-bry5Ftm*9PaUZ&LWBH^gK`G`fd~i3gQ}Y*lBv&#E16 z6{WG=@WNC}D7|{Zd%w(u{|jwqV?5?>XPXmauEr}(0=9U6j{fJj&%j-kum9O&f9LBx zL$Q<$hB5KGiK4f$yU!Q=hr=r^Rm)|}PaCVh4C~J$ohE5M{dFBvb@#H&BFNEMO?L(_ zoBh&_!>AwEB_b>CH&|odJZRAD(ZJX#GGnP@L|`=!3a?`pmc>55qi;PTrBZ78UfbXe z`+#pwqLHuoe8NQkl;}4X5MNMfIqHp^A|n&-Ez00*wBVy}PRtZ-vz@GWp+(@Qh0;=D z(ZRM)^J+!Z-L*ISNTlDpoTLp9akQ(lb(;N%vBJ`U)jr6pId13#}gVhr_#?q7W20NZez#F;Qc zo5zGd_B!(ds;#IProU7vc|p{kYzOC9v>YaR5K!0QkMB(L^zJ1YBpqaapJ=tVw(pen zpd3~#2FXl@*S;iL*KHiFZF z^NHIm@?2N%P%X9lk%K!Pye{(AJJ@QudKYgRGA{4y)pTo|Ft2bNW!8PjrrmWD(~e`j zs%M#ront#Bq?53CDjRq>Zc+u3@mGxR*&Nel#n~3YNeq|n1y52j(y8suR7hnq+~O6h|I_BfTPQvD zdkzopi9vO#+?zRXq zm$DH)7cOA@Ht*)s0lR5a&6Um~k#5IK_oS4?Qs(ukFZA(jEu5>etXv3ZbY>hee62FA z>cIRya)pJx6E&4e2mNRLc$oJiNtx{Eg^*z`^(T7)Um6BWu|mVsY(#9P-ELxVPcGlm zL_xHTroz4HWU0sf5$5*_OJ2FkctTz`T$*Nbe)=P}!hGeF-Bw__TaK_5{8mxzg>A(| z<*tWA=DUotd#dFG`H1ns@$bmuCo92V&$k87P#!z{r;B~kNJwh?VU|4;b78QQqNt) z?h^+hLP{I=IvFZPq>I`_B`qz_b8b9m@r3X2t~iVl)vfnWr_7gf4kgZZ$bazX($g-e z%?(Ao$>@m5YasAqPG!e$_=`U|)r4 zS$3zV8oo8Owk)esioP}GKAm2^xkF6~kAXQq)+N)K+tS-`vaVE9LejZhzRIUY7`9P*UsXW>dPW`u01>z4R%I}_wTI&~f=3@$U z(XronY<8wO9`HF`X>st(HA>9AnnKY=XH7Oyyu07NXci}v5UzYJ!*}5%yZb&^ykY$} zGfuUDv-;zT*z{rUrliqwjh|}pOMJ8E-G31O5V-lS{eaW>;r)-FYQZ@Bn)klUQho+e z3!souK90^i)a}~hMxRSZh2Js8?9+RFQ1+0bSfDxAN&$6upVz|ckAig<4)@8BhShMb zlA?_;gA(@B+3=9rQgjYdV*g3Y;e&XmBhsfrk`?$e<_b&H{K3}k4upv;5+o@`wh!4OtYtKe_YaU& z+Gj6t*;W?xPF=|ILasp?HM4@}nk8j!X^!>MUwblhabkVil++uiAz!>0WaauY(?FcOb<07Jve0G*eSM$P z+Wuga|6GX!*=ocCpUJv;Y5i!7o|;ik}S z3lt_ScqwIoIsix1`{sv=iB!Qv9bbv$r6NlF_^pw8`$UHSMMo14djn!lq0V`i~N zFck@?3`cz#AoIBQFT3CKeAc(q#c4nJ%5|NcB$zbVSsV`6t4j)=Ow^AqgNO|Ka&rh&KI3XKl#yd%FXin%ah+<|TStR3n|K{|t|_KP$;J=HBnHtR zo>f|Oa#kF>gSX!- z;wbjOAy&g`;dD6%k2pUR9eu=s#B8XsQc&oql89UuYOg9&$s3r0hA0?`W8C(w-ITm} zL(qakN1ZETO#?HoyNy=nX|E44Jo1>( zn5g2=8}V=n#+;+2&J=}5M^74#f?9>eD-NU?K*^q?Dx+2QKYcX>kUR39yZ)*OPpjda zK8!$+K5lgf!^YnYh}R#}RE2kZ`$;e>V_#-~$szuHlKJcI;H)qB(ZQ;su|Zz@N3RjB zuQwcm$c+l!$+T;Ywm67=|{cS`o4Y$dEYoO2&it@O2%zdAi{EXf=Zi;-VOj-#t?M zO}~cHQeUO|SH7iJ;Q#>~(2LN@ec-DIh$VNsp$gDfX+WgX#OirQd6$>n>e{zR5b-G@ zO%XM1*0z9Bdt(|^@i9#o3FkjGD{d?ZyYKcd0@R1s0L0zZxXm+x;=#rNLsIr~6xXYZv}io-TB)S`8IZkP=$3au7&m1fJ3noHHM z$4Nz2&WlsN3#BHEj2N*3zKkiZsjL1kt+x@tpXISx(j9R1U3E8nM`Ic?I&VhQ|74~R z%6-Nno5LNZl!EU!*rF{(luM&1)B(o%eBzIHv0lPFJ3UIx_!reG)hhm5;~P+nUsQa} z8O`UeO?dAOhXvmur3{DbFX=T{5@)V5?)4Vv{qoh~=HaoB+s3{VlP55bn`yLkzw7Cay;HPu2Vm0pG6V+5I6uHIX-VGA4@ z{`-Dwv*FMlLosh@zEKEinMfOs6{Q83NLwSn)rX*=0aiX=!^b1)=&pT$L8}q{XUehh zWCm;UOf7&$6v%>z{JvBX-L)#CAlOCXggODg^IH7QdLa38x)0F(w=w;0kOVwc&jrB= zYPC+NgnZ>iuu}EDhpuOBM@GG0$(Tc;n;tRPu?T0NDH&|`*B&5aOIDFbGUxe)b*z0` z2UDFX6BKMd%N1oe){)<<6YflXKIWE}S;Nbr%QZxVmelR>su_h!2bbwoMwG#(jeapM ze2dY0?du7x@Cmwq%)8MPMOjpGRYh+k6JPKUMsA>r3CeQ#-$CE%U71uevw)1SU0~0A z=P{U^|B*lZj#f!1Hec!_Ex+UV`Fy?8rpIjgj05#oS-E`vZOR8&})B!b2-FAQ@+I%*Cg!UCF1Jii}t=Qtu(ont~WS)dFFZDu)f z|8cSE#bMU*q6aMqZvSY5u_=6@ z>E2JEjTm+xUWZWCoNdH(3h5P1^E2fT9jrF9#i-afX33Ag#*rX9SrVAc=?0^f;0@O8 zh@ds$%yqH`r;SbghdZA?6V&USIlf-r2`9>(|0;)QV9Zu9hToC+c_O4Fh`{gDo>Iu ztG6FRGDN@}H6qQaVvIq@mg?1Kh+aq5YBGI=HOV>`dU@%v(S(lP2kFqWX&_eBd{tBKU0Y-jevv5W!J{C=}xHvdG zqisOW33dD=p2-haRF}(+OS<1mi82Y^6rJ>lQHt(6o-(l3ZGZ!}1I_#SP29gQS};4% zPSKambvV|U;xRq?#Ww1sAq(0V^SpSmETK*Kuj=wOlmAN_gWQr7;s6*3AW>kBn9aqs z8lh!~Uh%xxl#+E2pf?}Rzg3>S77iBna{WHp*Pm0Ooyd*meR_5gp!efwv zC;gY_=*eIj3v;W81?y@{p9YpwO`|mBu_mdaAx9uB&g%)`1yKmRfsKU!73KNTFzvn zE9m0*-Zd$+Zjkc(N2kwWJ-9n$w2Gs0!9+zDYVYmWe&2!dczk8*cKb*X?lNlFk&;sK z@O4Ftt~lKJ4}F@?awU9iMDQQENp3wYymzc}NaYixD8kqoJU%}j(mHLmz3rdlOUE%v z#`Hycc^p-FKEuX%Bz6#EVi!PmCS0FYYzWp?>v>iiowL@DfH-%Fu zVic_K&HH&$BI9*Z$44#`24CoeZ7Q#2MLai4BlM@ZrXb|vJ6RhyjU+SkOI>!`6?;O9 zNiIRIJ|>qro5=nAC}Wv^^mG6XYqgCRGpV-7prK_h>Rs+6XjtlQZ$+5gxRLgkiSok9 zavK7%1^au>q`@~6F>-zRVrKAneb5xQC!qXKpObm`)>0#Yn(iYs4!0@Q@~Qf{GInwp zG|60C#e#XFu2=&ZaT;xgA=2rRuU8zI?*K2@^nKDPrY<8W19eaCy&_-jX#lCn_fr(BX4fSuS{wEm$ z_Y9Q%jqu`qvfmh+l|j5+GFiCddmi7+;IPkiWU)yi#-&`#i$T5E=b5pwvj5t{OZHd$ zQ@CNG=z@0BP09PH!vH~gOW4QIO z%}Szg^(L8`3G|Foqz=`Y&o3zLVfsj03zLVVEfS2m+4$#VfJm{6wIsf5jBIs2VHbGp z{!jxIUn30t zW#?|tr$ZI;cdlG=(7|r`^3vC+&B9Z z6&j6-o8S^?Kkz@jl9xU{h(Bwc(X%?<$+F^p8-`+6)qMlJbhT_|RGs}{gbNe+f2Ye7 z93n{GQ5CNBYlIvK6G_KYALOCrTgvR-MOI z6ATmk;NO3f4uQ;UdRwh-SJ0)UDHC)|!8z}2FolZ~!$>6UyylSvG`G?Vk=F;30bUruuTn=qi7}3Cu?iiONU00;KQ4}Io;DQG}ySEah#BI zdVVHRDTE~suHp7X=baV7gsO$aH*K~w0@~c+6GYIp+)uDJIegjW822pPs+^IIg%NU} zBj>j!ngfEqB1*(_rga_+(B`&36}Y!+`2gy(8u0I#J;pBcozPY1RAk+O!pmCgtM9;u zF*pU&B%6URyZ7t*A4R&V8Y)hUfPI2kaC3}qJ_UisE>o^JT{nL6=9Im>Z9)X=^#soW zVHmrJTH%!zrw=Fvy04Ad)3cnJ&eK{eD#k8Z9-t`3Xo>$c7 z<`Z5-MR5I9a@9FpGW^0<%=FGW(}#~XH)V8uDkFZz=d_UZd=SQnf_EXdW!E(_P7Tht z<%2S&>pEu41;Du<*W&<2_&zL7jG+$yd)m+4LHb9C9hFt;frlJ=-zLKsc_~=T$yr!o zC!TcHRK!9393Rm^{@U#=upCoaB!O#PRxU#O)?Px4+qxr~=YX%OF5KNM?t8fG0|2 z3V~h@?hG^J(x%1MwHw8qH`(xm$#{Rx>@=Lv=zx4o4W-;?kCw=Bjc#5PpIkw=H-b_udC~5<9z|ebObSjUI7#w ztfH~Xv^U19`Cq7GMm!eu&;*Z>Ziu{#EA0g!to<2*y8zf zvp5|NWXsiWF3zKiq_!98D3X+h4s-KkQ{`q!9^KRS)G00=GCNrsB#giL$-_qNPtSCk zU!Ue@vn}+n7(JnI4I^lMMmieRi6VDJ(Ly8jrH~fpH|mtzVWzfik^*Is-%8**Q$UY* zphlx}+8$A;k62BqU)}a`%`~pLKewHWo|;*eS6fM3uo$Ag+HYe)BbS}O&m1%dHK;>i z@%t*ar{ZE1KP2W46#=_r!Hmc(=~0=hqOv~t?BQH4CN5D|dxgJXEvaOVrDL}5^1ImZ z(Dxfd=Cha)6N@!>*6Ar;Y;Ez6s|uciFsahG`w*TZtOo_Tb?bULGv=##JM zm-PF0_h6bs!r+NAt)jPh_P8TaZrQxrd2&P7B_pO)gQx)?ZQ#A zU95pG^K-PjrVPYQYnr3UtcYw_Ng=62v4JlM@}-?)J=L9;b<5Pd&)@9PHqcVttg8y( zv3sYZGUXs3%MpzMz0&`1j6C&zL`89TuZhbVIg>1p|dvOm=_`}W;o;m`tU80x+IXpK}KG6p8K+` zt>g0J#k}OIKLQL{DJh4hn_r;7FFLx8O;pp$Vk6@F6utF!`}B?I{qps`VfOPSpqTrBn+plK4`fR zI`mL7dFVcshwzk5g#&-bE7doME#mMx^E&uYs<)Gu<(YBRb=1`x-O`*AQ_fc5ns(2V zEHEuq)5ZQ(h7(Lm)hn&$?{^H3fS?3&c;+Iqe3dm@?_iSZ)CZ|cns!tq{ZXfLXyYof zhOj+p|F^aJQDMO^L8*{K>6MXj4xNoYyYEW}d>4^SEG^wdzPOdLk!59>o);=kVb{+` z;R(emI+<20U-s}d-3@^Nsi+4NsP|uSfF`Bmp69|0XXE8_*r z6z*#DWKWJrqd#hSgRd#Yv>3~Ur@rttThN7qBQHgqS_%wRzGm+kQUmBB{!@dK9 zdxq80y<_=U_q+T65-pdjXbIef%m^g+ue)IB?D6_*|6*Ca$x_9=)c;tQ2p~|Lfdu)f zPY_War?G)hVmiE%cS;7W9IjL}B2!tQjouT3o`ul4pTdJ98}rQcLmgEt3*xG1sgfDL z^?zj-i-xDNtxT@crqtfLzUIxVgX{}~Tu;16;6<&kYiRF>w22LJ)wi-@181O!=Ue32DrUZibVr-6-Y&%A&#TQGPM$G~ov{;Z6a-6yIaj7Tyl6>|txJ(kgHnIdx3 zll_kArbl;T97G?wd)WN;WCMiE_b7I^4ijk6mcy^uNS@BxEh{)&=m4#Cp zLRbTJ?#XId6M8?hk+0Z02njgh6t_A}^_8()uvi54MHlr);YTsUV9T(Qvj8bs-%1O3 z=K#3`jEskq1c9urdO#7io8YW7#KrN1XbYH$0tq7$p#qXQc6#4NY6G_EKqk{~UXB*z zh?MHVCpPp4g>*g5{3sM&;idsDtJemZ4BJKJ8Pt-~2CG`Xdn z1tzD59A;djcP-+BS7PqlNa6fW2zOGdN~%R+p>@$xV-rXov|goP7L&nzU7h~$qKLX( zX#~$mQ>L#l?^XUBkrHV-@kppXXmm4f_dU3r8}k7vV}3-N8%cKcMA{gvrt`W|o1_JL z#jpw}VX)4Ch;b)8YB&2gPaLe~tEkicjK4%Czv2^!7a{m zr60k}XFD0GGOTtEKfPM__rShGF-WMG^X$}-ny^;-mYO*w(=m++u#f`qNFSqcmzf#d z=V&IfS3p(C(nDcj1@ePgr6U{_;j=pxRruO;0k5Ks&^Q_dUx#m6Yfr8ltN{#dCyNM2nX@b=D+@7II0n3O4$n`x+;=xZ%;dt1HOly| zu9FHSog(GdQxX_=QZxyF?ylTik9fKRpSk{^lRIXzC`}uzral|f;DiJQ5(O|7P#rGA zyi2EB7QdC2MdYI$G0rCf#qo(0apNq*aN$0Gr60e}3(xIir<{EEu^# zfP#T6DrGMs^wLF8ea;5RTeqYV$T;Vwxw7VLJjPDw9LoX?3y3I~DN+Rmi7Dv>-gcRb zVo-+}Xh_si`%6~>rMY)`wd2ol?Mygt=b0@{4TW_?r$+jO^6bdAkFmm?g;^0p@|TXP z7dnr8oF%`q*1?uEGH-OpYR{|1TikU-=8O<6&H{En-^^am@2UWE2tx}@L&6D?y9?V!*Kl|&-) z@D^RNSD^EgjU=+Acd$aEH~{DxpX~kg{$$vOjY2%Bk;o)!u1C$@Frf8B+&0ItQus#Q zKyT>4wj77rVHO1sE1p@k6;I<;0+DoXf1ldIetu3l8W7pbKctvEJF7EX+nYEcp2Xg(e|w`?Z}|SVwW*{Gm~4^IGrK8Z zhG0is64{Uv$~3sZ7)R1^W}=h8TPa1%?^E^SvZS?pJiGp+A%)Oytw_gR^Z&2_Ja?}I zcsrw*cC*M!@1k>lT^E%pRR4x?<`H0eVoXXA@UpG~ zcZ1lPPu$P8RoAp8qn|4R6~F++>m1PsjRn z9HO2Uxfh@}AJ*hlrg9msoTuRRB+50eE1l-iCC)y-t&6o?#G6#L7KUbPQd6@sQSV?{ zLySs}I)6txL@{0*iIVUP_0s5?5!*=UjQKa#=lx1b zzJ_t4cwt$CsU%0k@$DwF*@ZdQ)X^?@qK_EO0jU)-;??o$KqoC`l7+01FV9%Ey8_D1 zSlAK=OrC&|Y2C3l84%Vxw8tioB=TD$K}9>R#aDBK1ljQ@+msTV1RK0lkK{NGSzP}f zuJbk*b{IUb&?awDs5|}>Uq;~r-#l>zD}Wu=MvlX@O1_~esM`WR-^eRdD%b?a>~_g_ zkTt~og>0LBrFf{`)?tMxgTa}$^!?~Z#2)t7%zL!&v@d;ip}G-~<(wxQZu^J?9P#Ht zqZ=*kDNCqn6;CPB%rBG!K*_QViC6<7OLvdo746aDWnt3#0)G@!*#rguOrnI1z7URe zairY<`_18952q$kBSmR$VtvKuR=qsEJoDLjHRJiE{bpJA;rPsh*>i+RpYBm z-*fy|D>G8^4-!ERn7I5A6!J!|cKhp{JnBk4qOpm&Y?A`KeWIfGmwM>Utt6G<$xtxp z!t7XLP^&`Y>lk#nuP-sE$5%(h&~KffN=+Z3h%au55H{NUE8yU{XWvY+Z;V6BSanC9 z)^sq|a3GP(wwnkaWqIqr-kK~P2vM0YeC_4hanG-4nq78n+DJ0pb{nIEr>r`h zpIkEd4=iSC%!R>O9!Zlq!>4CaTF&2L;GltnT`~SN;SHhn8t$M>z;nBc(haP-g?=a% z*nOMya{-Kc2_jK778`f{2M3-BK6*?sZ*Z8Bd{^AEH6BH>qJn>ZMSCqzkgQ`pmfoKX zI!{gNHTCU#oAC8#42=Bnw121L;Bgl*$TYZx4kUTj8a3KFeiBIENWd?4X)0aZDRT~1 zb0#ycQ&h^56ff{Tc_~gaR_mf7uTd92#(yiu_&(bZ*mOccL4OHlox+kZBAY6?A8VIl zO8T+TW$`p+^z(8q{B@zOm80pJFBwk9C0{EO$KcBECv#u|3Qo^?+9?!w)0<26AVoAD z-nuZc=E~eMaLH^n^z+GJM2WMf5X(>;ZSAJ$e{F|Zdao2^*K;_g?Qg7bRgWV^; ztBp?Ko^S3f&IKwlQPOn9p0J9Omscz2_a|@9Q_S4A;q*w+SWR!13aZfS|F|@M3gq%5 z1Hu-lFM74PfGwi@2m+>-fYr(dE#g@Vew=Ef9h)#f~35~wBIfPEqYfpO6 zn2>|#>95MQ7xIa>$l=ISrP+zKi>}w^ISP;E@FeyltZ2o>aUr6`K#fgSj7J$WW@@{s zZX^E09PBBaH)Qadp1@=c%tW?%znXI`5`cT#VjJ&dt`}}`JiTFBN-`z8@iBpT=8}e$ zPA3}Kmh6PhgKP-K(m!65v#is5PWO?NZ=8ejZ^3*=tz!>z^jB4L*N8T10%)HZC% z7jIMr9X(7~4hYR@QocvxM?j_3+4uxo|4EKlhVuSK;Ea==$ws``s+!{nMp)p$J% zbfB^5_wssMoIlL^WSuh_0>Ai~vHM!X!|A$&^p%oKmuW%(tZpQbrnS3)3_4ssLFtDP zCOZnrNJ3i!tiA(Cq44hEzEjz~NnZ2_NZ8d&ox_y0JJ+5_08QYq|9GDHtQ&K&EH+HW z9e0kyjuYTjN%A#%jq2d;d5bfZ-4T9lKnU>+3uGoC+S&Ib_+r{jV+cdE9rYHK9RFma z2rt|rAU4sr+^4a|#}RZ8_TS}5(1Q%9*7?N5mTsF76U5vO1>L9IT;r;BwmYF$+fF0W zq%^S4L9jkz=eiPe{oZVbwM=H*4f&hsTQ!v*{(=5<++w@ed~c+_WEDUbWn&PxM}G(< zLBgGRwNrhr1VS>Bt5VFF-fZ3nkCQy({askbod5K9v%~Fnf(b#=&cP-NsZ_J%r!}@} za8a4(Dq~XG^HuiRpTi1x!u(ELGFjZ9=*O`?Y8%G0e)Ny1*zp}Xz1grmyNK4eldZGn zdL1CnTTHLNAH(52g1nx!Zx)zPpm=yNFfn`8wXmT)VDbxAFL<)`HR{l;t*zy^h&Y(? zPy}kUch1g&%{QS+*Z+c{N|m4t8fVt-VqS4<_TRVU#o7Y`oyu8mosEz(MH;UGFy%q$ zoiEf=eBaYJy+X~oy7X%^mu2!EOo*P}=y5?#3+brQZ->J%)DN$9R6OpZYd+6l{xFn7 zOE&&LU}KEiNSAAk1_BA7XA>LWe2OA_j8~pl2&y$aem};8F?bwE+L_B?5}1b`@Y`h2 z^pFL)>304uUAE%uL2rsh@`t>sGK>(++oG|JCe;|%5xO5a!aw76@tb#oh(wO(H_I3? zm+NFFXxp0tvfa}8ntL))M>ILz-qlsi>8%%HfqcK@Xi}=>%9vo-irhlQ9c3f!Lm%sV zIT~a^K>NzEH(|Lq_UxpbIBpNFh&DCmWr*yndQ&cVeqErMTnwo0zQN z?rGos-De3z{1`hW1hgr4j+GS!0y1*ciMwpE0BP&=+RaOgA>-vA`)XOe&j+Y*uKy`~ z`TPS<`M@Jo=$RJfN4o|3XZ}G@##LsQ0&10@ImA$!KhuCC+GXHcjjpgytIO%+VqpW*15)lF%6Zvm@w>LCo~NSD9W! zAs1O-cOPF!v;MG!8rnm<(L#&&Y^^2ciLZaiW#rn+ZYt0g9l8D08&m4iS*R=Rd0uI^ zYV-R0uFvazR=J)11uB*%jvK9ck%migIm>g*No-52;|E}J&k?E>%bI>)p^v#%#C~$t zV(cq&FM3rH%^m6&uMg634c}2^W#)q-wS=^4vo$!r%mmZlc}F|s;&pvT@^ z-Q|kM8J%>wZ&V)ed%i=IBh@WuhY>pu1L$x3%-%ulm^{5U*lx?&AIX{COjYVhU+vtS zf>5YjPhE!DV;nI-4Ut%7rae>R7-dJw(G8BWll9daxnr9vM-A$xvsZGjl2snpN>C%0 zR*M%EZ90{$IscYP4)_3Nl5t-~M3y%}5R^G{kT);_8mgaIG_s`7=1?MYz6 zKaIKqXbvb4PVZw|;II0^v~+9VED?gA&Vw~cqkj2}dz(#~ZLm$y5Kw+e6{8et# zBrgcLp;zshR%0Lq*`4Dfzgf?wj>TT+ZRvlj&sB#c^IS!j=hLf#7L|aqfC=~nM#RD# zhLQv`+FHROl}wbcLK~L{QS3+yq|js2ez9Vc-EfcEi}Es%-@YofGI@%tT0TShYedQo z5_ju(l9t}T;5XO+#Gnc==I79hp4CpBWNt*-^hTP9DnrdPy1ZK&ST>kP{Cnrd(g2e! zrW2wVsK21JUkm*!_wZxH@vUe-6s=~97%;WcR=sj)hPGdZD5IUjQx0|Oc@e)67Z-Dl z(PThA!ZehwIBMvPxBt2t-Cj{8i1p=KUvWU&7*owo)8V|8cI!XTsFLT<53i8EMF5S) z?0;x91OWD&ra)+XX#qhjI0#aHpK^t&bJT?U;+U^nkQ!d@=l`Qd*dS^p3Gl@kF_=J29hC`AAJ@BF((|9?0bzds3JrvhQJDp>rR z93%XPL-XG&PKyApm=n=RA%-yG1_B{GCTa0u#-x2@Doz1O*6EU`!ATU?@`whP_6yH-TGQS3j$yAhs$RwhsKo@@ZWKOw&(reQuY0_D%hl?m-6 z;oQFqy!fih*`cAK6yhi%`qqX(Jh-%kl1KdvWlE%nE1^+g!p21U`o0l{WiO%`CGu*&hRlPlo`0wyGbiffC4qB6W-AQ7Vua?i$ZawSXk7G zN-2*MPKv5$TNpRV zZKaN0@0TqfH1~)&*fY4u6jJwIa~_LU7NX13kuS1&SH38<=g)y#J$>&dIMr9rC5z7& ztfRJRp!&@veKHR@Ij-Kve|(NIBdlElp#r-v^gqNcxqpaT7QKQL1NSAo!||!eiuRuq zyy2A0Y%u%>UQ|U=KwS3}`dV>K*%}l{u;>25?&ga70bQh3bKlA?&rH6g1jf1dWI#yh zd7(nC$L|3U?*7B^6#MgCYGKhKB3{P07T1hCjS#w%wKk6MG>pCTM5CU>SK)z=lsTn; zi4cE%1ppi78`MRinBWEo+U2E0wVqx@Tp^!*g;{(Nz@uq3mMRkvOZ_x&a%K_$FyE{TG zCVRKZmjWf(h0(Q{*fi=21e=L4*PE;nM5`+r)9--CcN@)N3CxA~iz zu;0OruPdS!+GlveG(=Z1hrV--YAx%m965q`e!vd)4*0g2?8{|~4!ipSMVU zt{3lRe0KXQI;e#UC~z^c$SZkhv=2UlfpaN>8`R;Clo_7<=>KjL-+$1A--zxCl>|4G zkm3b^8DztjG~2)72DM=R>`x$?Yv%2EZ8to)5DK^F4EEup8zI|SRBT)QI+}Lj&#(;A zh()xN_SD6X9@+o6eZERk?>>T&ws|*5?}u1XqYehjk#|oN1@E07her~kPHd!vV~Rt4 z5CSiHC5d?Io!!K)vmDn@UwImOumC8+ED%sWK286kHyVeLrP=7fwzrq{G zwFrEV$JI2pB1VTL?Z`_b@Crb)u0&$j$CbxNvlB<`3dvJ8J*15WM1`J4_3(G+J^+5z z4}f1CsNuM=2<6<)16XFziH$o|S~=ogkvFZupWtx&RVLof=Uc#w5$Rzzg_tflF|hb4 zLh9|#|FEwj36aeVwSAw;e!VwRgE5t_9lKrh#GK1bk*wbm{uv&uqRFKl?$v@cXZsC? zV-_Wt6@O%i<8voFA>prQFmVi$(yIEOp#1{%PaL2=F`gZ$$?zkZLie@#Ku3vM_E@Q3fJS}SS?13e_a8(`CoD@kYw)E?Qk^OaK@Ljf=xOE zQW?Eqy2~_Cf^*DOlx9AS0JJX1Efm|ZFeTHz?oJl7_z&T;EHXdd1SgcS#enq`{nd6J zz}$hzQzp|~o6C3#0#!(p6`^kzZ8P;UZtm2}m)M~Fz2+kI_TNmlgDU;CX@E<+)-s}h znR%?}csQQd&>r1`qYp4l;?Q=e8|B%6m4rkQf zKcuX8X-T2civ3@Xm5<#-7GKEcbUEaA!-F3a2a*5qwwL;VW|-8RB?ea(&vE-gP*I#N z=X9I2ZEQim0KBZhX?)QQ%>oQ^$rL{m+RQ_cUve;@-uTE;X&{?14VFKjYGIGD4t)rX zDU>Vu=j?O&o3(mdZ9n1nh|lj4B3=FUIa2lRTZFyx=3$s&4nQqj3qYib_Zhzw>@H57 zwsGC_`OgO8|04b;w_h&F7ikmqTM5;H3KNC`wGa@KgZFH2pYdpcm&=P4ph^-XQ~Dv< z;0jt0#ix52tm!! zzj$Bib?(3p)d}&a$4-elj6`~kMEVYI)fJ{B%Ios2JLc4+I8!L9r55Lkp}LseQ}X< z%Ei7a9GRnDzsr5{wg1h`UY!34$*1ZO@!bqSjutc@x_p1$rSOGG&hqtl_e$0CaKh1l z0l&2D{LpiMdm&^df^(!5Qb(ipzcZ1_t1Y6ed3u$O%r6pPZwqcQtbV~Ax+FJ@E&$e4 zr79%z@T*TKw14Ls{k}NycKI@c5;rVBKtc(ifYjHlMSyZxhgGkfc_2+8BOoIAVYlT@ z#6SY1*v2YN8<8vB=18Fsn1DlJ*b1M$e|Y|5fCS+oibj5+7-uYz5)OKbAYB%DwsA+! z9&gZNcUmt+|8#AiV%-SFY&Qd>!%;QIm{i>dZ3bnI{l%-j|1$}_Z@5_wYO>_C|dq4?couP@!3x5~bw&I~t zDCSpMR-zcov1Mr^J+XwHYo8a$F~~B}5&;5VKQx+Y(wA9C=E}FVu;`Z=M+~@07hg*1 zNTLDnt3X2PG`S4wE&gxD5YMyQJp|t&z!Z~ zvz~CliM9-aDn-o>{ha<%2)C8|W~!R_oEFCm8}`lVyX~RxC)N#~i(1RqN`Wg?8xt%D zap8a_mE zfVTPec(iTxSCEqbw=DFRA{@F1immN8s#LO#pWfqEX4E`l7gkbGoX=K7MH^{uoMW4^ zZfEO!{i4l4@fN-9?tq@y?Fcnc+$=YpQBkSMHX3esQ&4gZwe6O3Uk1Al1~w|c^NChq zZM#;11hw<7gGN>3qnE!l+7nEMd(cieE!&Lyjj(PO2S7MNEwjZpr*C*mOo!B6lf84d zh~0X9=FeaQtd33#hE1#k^72YZGKT=Gaj0b2GKV4^v0C#DW$|aD?wY7fECEVp#w{3T{@jfGj z9sEe)3eJ<24sy=snjpcUHWGGUnrf%I7~VN#k2fdR{XhQ&My359Fp32HHrM*a3>Dd9 z4LPTGk`L`ZM*#KT!ydp(H~-JNDA}cveC>k((c?9S3eg-| zB*OiF=fEbpm_;TA8&(>wF|>=8UVPTHb!FIK7OVJy^h7uFOJX0!}xX&DW7RVYpkY=U}oe4NcL&|zE8{BRqm zR59M(y8e*e2t&Y(08SWUWCMLwkf!b?p(v;z#M?dArz?&hG2&7WSUj*o@69rI>_%}k zcdsV8q>U)pF`{|Xfv5J)Cz18Zpt-J}BoRVkG!TxQMtF`p@jT45sd8vC;)7N8iUokMVRRt(~unFCj&2akb*(n>Q^sFzTNO*23|!@?&SY!&wv}d zevFR1dKyQ6iuK6&P1+>9pz@#O3IW7X-0%C3Vkh8WPk{Op6m*MA8rjoVuHW>UWZ6&L z)6xrGq!EpHX7TuZ9Dm9`>yq_ zwfFvh~u-e z&Io04HxSS50*ZBJ~wAF1^c9-F*N`7Qy^ttGzk}xx!=h z6CQXaPuPJ=e&E)NiXnyb#7Kbb2YV{>UmB%R#i6|UthBM&XEPwunD z%$b{+8PIRqmkGi$qD#Gc3zt=}&o*7DVi=@)8zq=Ub)(ghaX8UbfQl1W7+lbp*?~s-ln{#t6;KojG_(tkuf;?3lzeSPI=q~gQLOQ%sJLK}9TXn+~ijEk=z%LpCrjhsBpWRJ96V*I>?RzdH ziotkbKX{zM!G!HJsk_TkhYY*8ktj2rEP^MqHlihAp%E=! zEqnn6xnzoO_e}(DG9TRM%u^)dx34E;Z0;(!xR`<8nrbG(6+7gCyQWb}5Dj09AVQMf zobQJY=-X0!iZC(aaRXD*;`45oqU_Vw`H5+mj$I{%!!Gi zS7;-jsF^-Cl}nIeF}lRFft5fWSK=3cIn3PtTwI1aVK=O>j=6kbEdVDjv}zKw4OOgB zN)cD~3QxP@k@}L2!ferTB}mE07Z19|Qvi9N9$5h35LaIrSdRXP}Tt7xr`E3tpD>O%- zm!Qw&-`g!YoZ$KOi!+^cNG%rz=5Tks3w5`IXYRIG$%{#WHQ~JYxy({Z`9nY`HEdHX zx%Y;3DZoTqjxPR;)DAFW@^>y3Lq-uNiZkTso1P~> z6azk}m$=8OA131`^g~STGD98&G}V|)Iqj}+rg?DxjC#r8Co?eS<%>2W&DuAPWt2+} zg@aX$fbN*Wt+=q;$^$<6WzvwI>j`l>N+9b|P7M3ttpS9r+`3tivH9nSi=T~%$M_s{ zq&>#V*bK3*la+*0iT|#+K!HB`doFY(b)&g$UAc(~;Q6it)cqwr$PT!zL6>5sdp39Z zbqACAFS@cJ`o@zH0|e9LMwH%tA{4U`>tC!0<4fOA__*pAtvf+EWDY&`PJ&|Vq|FYU zhn$y}ngCA23!7gWt)CHAe1`P8+}htm%iRF>%OIqU9q)h#(@QFjlv~g3m8Q?D6AnFJ?HZ`>+htmrzU(g6r#3{0#G1ec0Ex+*4q6zq{;t(UOh-JvfSG0< zEEbDj9x8qd6;w?m;K=&=NErAL4+~;W9ZtT~2^E`ynP{#8w;~m6%DScHtA|wuT-lLd ze?8BdgMB^7bUI{8-S|c06PjB@C+{9~-5f;Pnr42Qv39`u1Iu{(*av2Dv}bS`k~9dp zav^KzF0s9KhuF6PaqJHYRa4)8a2YevSelpo2ZiFAm}mFoGpA;cRK&C^*W3vH3|1V}yR%2qG5nZyxzBN8^@TTbx?2u^VS3X$dnA_NeO?iu{vWGw8g1 zM|W?sR#Q~{lLQrTcUzTzGWX9_v?^be#qpAjI`oVP0t=6kP!Xv z>b>T0-udLJ!qrJz3Fbfkv*#p!Sjpa5x?FLvM(LluqHRR`hSqJwr8n1z zP+7$wHH|)`%|kIfc$+Ba^3+R6M~7$4Y_X(~PG*>Z?wuLhCKGOyS1UrT1t-vuSu0}}aOSt(C^xv`A44**j^x9Mum z;2yrN3jWtn)|NLf;Sj<^egFalD=n-KD&l)^;`~C>g(!_y2fo!#9D~am=!o?J2zE>= zj}u8|j_w9+@Yf#0g8ZQs4DCX#ozpMo>Sj z%Z32)eAd{fAE@XI$L;WvWQMGUNIt4hg{|wgb<;c=HG|jFAqm#IeY_97!kzDGa-FbSdqZG@pP|` z|GsNgF=0ERgg)Vt?ZyVqA}j=@sdcs)m4bns_{B&9(sX#^^#cAqD_8R4<4lpJH&B4u-_Ye0-nMvc*YuZdWjr}a$!h#9K+*OV;{v)f zb?HZJ49+{PiVMM3cCyi`(iet_rmO8;H}Jl}_^40(Or1@67C;$23^z5dPd@kvDk>hZmnP*(oyCPzQg$K-gxUj~%MgbbrBsjYl_ z3=3RC1a40|af0rTd8GCGL$9Or-e0lC`R}Qwn$$Km5ur2BlX(>?&BP><0!juHcI2@@QzDN!8&1Gt5we`Yn1p+X4YKjkdFkFf zkBMSFV}=%kSuwrGKe<3&XPJGL{K~EAbzn#-9nTsJi!gbLyNHc1izRWIoY&#Xhd`*0)UV{M_NkVmu^ZrqK5J zF!osL+JV!w(q38mBkE;OiF{#(B@&eikmp?69a(ckX2o!Opu9YFf3nuFVq!KG>oz{i z!5~~*@1leU2m(rzBrU(Ok++aZ4~&h`ZSy~iC>1G~GSOwq?#=ZUYNSETsI5LagbkEm zgK4;4Aov;Df7=bn-+LtwPbs`$m~E7b90Zy?pQX=c14H3qPfME6I)3aYN!<@|$~lxQLjkXs4Vt;}88e_mpqA-7L?j|8Y&j;){EJX&xjW zQfnGhM9ORVXmM4vS?loN^PM)%Sx&HMM|Eat;tZmSHLX%G84NjQ(M=J_=hnMv@s zpWmrDU4<{4lpK2l)|tYNHBWO8cXtnBSiI>h)1}wrnDeM`{U_&2#wkUlb;#r5>K??( zbsCqQ@A#kN8!WJmjsWp)KkC&+!OgK8`ik|Fkw7?b^hzc2hb*+7G1bIy1s)@V?ah z#Ul1@n{-TL2T#kHin+N)pvx%3nB1BijQjzfX71GY!s7bDW2QTdO{V>CYh7-SM&9Sj z^D~U$3#=Xp8;zz_Sgg;8G@2wvABS8mc_rr0Ty0yTg-INsW5gG)X&DjUYzV79h9uJk zc;so{2FGOSVq!7q6O%uSr6>srg4~DK49q18v&rzuSD}n9S^G%8#%UWPyWGeKl&t-L zvTQlXS5dm`aP=j-HxVFn!(W{$GwCDCiikyL z^DR4?6&6n3Cn=NH8Yq*{YI3YY!lzOyokshx)>U#E$E@+&O5R%@8FO^6J>Hb2l}vkV zmku~$hZ2(%pOFyk{9?>VC|;>&Wzy9{B67D_ulW@fXH#EeR}(jyn7Md--Q3+M2}okK z*ZE>eq*RY$VL>&;udfn=&Y3TmD=LF_O4~)2;Xpc=tzI~B}qS$gb9e)>5 zu(#!%qF+tldvPFkCzEt$SzWjyuA}ZIN{Oditi=)jM&~ToYU3O%ByuUQd&BlC;sta~ z!O5U>C{~?JI=3)T?&|)U)CwQ}YInVhXpx4$o0Jv?#*tf6@yXg~#2d||2QlSf6T7`H z;QX-Stodtm0|3vyMBTGA<)$DGe>jp7I*YkOCO6;jR(!o0TKb!FGU!$=iF9Z|V`HNx zV~omtP_UDg5wB41lBa4o?M9qF5~XS|#ZK(NZD5F`oFSo-V}-z&2WL6(LsU#CmwD!I zz!h9F0^8{E#2KxF&Uq~-;4XEI7W7~(& z^dm(jnz{ZUbg2W0{61FGfzY%abe!k*P}Bm`7x^)2qPQesQnR;Q8aGN+W9iLVb*+Am z2|r^H)Rc=V^H{PgN-W|q!VdPc9k*jLtqu=f)%PD9ykk|IO$swSwRXs})D}|yY-OfT zF6gyWMw7Aqk<(*&Gi5L#7F;e$T^Rg{6FnXiPP<6Cd$LL`yy ze*fYugTRp61Yeba8Ta}FD#FT9hAx_h&1}WJ$syZDo2%BqyZ9A+=%>GyHsuZ5tVEWh zJ}KMgmno24O;i-&tjUIUWmx-^M1TVz>M6%XA39gal@NmC1~JD=zKn=l(h+*Iem9R#(lK1sY$^;&CZ|1-I`U zvmgENko(B&&WX7-I@f8@`xSze)cAt6Xht1)_*nKMfFu;u>`IcJ%vdkSaHh*5SFUNw z%&KNgqN-!t!@t75t{{OJ)E%Z(1)B2nIW)<`dgl~=%SX!g$)4I(Q={y`k7D4KK<5S&OVg%#FxXPdXvR$Iw_QoKab~d8=WD9vLXoy z1u@u^kFqSkxhl~qbA-tzp}u?=AN&AJE+W>dJ-oujnxzVR2m}VAHB+cJv1!zr`BS;2 zfuzI%oCYsXZn$1*4^(l&ECLs=_w?@W;hAnhWBjY$ld5rn`;N&cfO>&LfC>_!Y}#)N zO0f)NNIj+fo?4PFpg;(HD=uDBBWclwG~?+^>u{Vz^)box_}~u(6%};d%wlv4oS)bPin4N`Xac3+0icH7?r8l*7 zzY|95BRgPsa~WBXz!Udij`sG=V9dkw)tAbLcyn>ESW?qNW#MN!tqH%j+gP%vMQ2io zwSKI<4=Gcev}SLw~Tnga2}Afgft8{wCv!Y50ADX;DkT@%fg1B!~9ZmvfPEcjyExRd40S)R~<0okm?5l^rKHUMSuZoP1g8?Hq1)(Wqw3^7J3<_Yuw(0){AI|b74H(rtkb6XroIrlt)^P< zF3~nR`w0w}k{m5A=6G1%>Of}_e0?df49svEEqW-IEg(U1D!R>v=(VK(|$Lq}lDk!cy+XNbw zTF(!3$)>fv8I7M#Bm8wfd&20(V#GGp)1NK>9B&6Yir(t`@X^OWTHSz7eb+3_&RE2u z!FySkc`1RI?&Ch~(r;A=qG$d<0h&ZAg9G$d;xpmFp>*owJ`tX?jfm{n+u_wOaBEVk z>>sZ#g=(QSbQ;Rt4VFvU(%m)|L>bOZULW>-UM|{EJ6ceCA_ox?dH24dvTwT-l>aCP zNP2v~p_vH2bMnn2Bz|!&%ds}_NMj(VrSZ%=rW08l>9WS+K%5eCyzeu#QZ~-C)*GSK#!JndMTO>t4 z;XjO21dQH;5_(%eoq&8y=#xlrKj5`^XuwTMC@GJg94M7T+$cZzBE$F)2>JK>GMVeP?8#f!Pu@+2n0$} z{ik$s6t(*b49W0`U8fm=`Ma@Qnbohp2kbI`ASoL2oO2-Yt<(d?1r5?@eTGA~;uiX= zfyM&}NzKnQQt0F$9L*PH0wJj)c}euk<;-lV!>_?2d5;g5jboL;DQ@Lxf5Ql7R*vXh+I(C}Aw0V*RR80R@ zqWUo|pEzP`oKvkbAqh27$013Sat$z>BfH&owhK&AmX&ud78&-~uSU+c;38O_Of5Iu6S`_dW=W*@?m+u}R=^G#Z89L-pG zoBH}*!}U#iE|C5LsKo7Mnxmasj<>g64**}DtM~5L;pO?u*IwAok78qpi#1Qi@QQK3{d5lEk=d zMNZZon^LJRy~fR0EX3h=L2nRBMuup&9(0eJL)#wcESKfZwycX?EN@RrrO1=lQ*CscZ*|P7_ePnS( z_rAAw%fuYY$YBE*7^ZYqr_43QqTT!FSr}PKq)w63DaYKV76C&BdYNBFA!V|dhkU`O zZf+BXNl=j1+Wo06+cN zyq|7H1T|P}7;eudLp+vh1Jc2M>cu=X*!}2cd8#jkJ~U6r^iZ6iB)$VkmWN9r%ZveN z3f|HnF6Kt2cT!G>DZ&b%)|wypZY6aYk>}F+vI?U6KAZ?~4|9{LGBRDMt=02AAku*AJ|IKP~uqhv76-^O(Xq z3Um-xr5pTIuOH;9qb7%8Q%rXn3$|n#@O|GCcbVxx z79Cb|#pDnL_gR8H#Wec%e;U07JV<{dBkfw8=5IQ%yDSY)*OWg14@dMMfKznqji4~w4(W*I=gx1omU^ngBV4Y z%jdom^*i`cHS2KvImKrVagOOV8$)!VFL@O8-gscu2QhC=8RpbE1>}G{KKL?qff_v! zb6o7Tfa&d+5|i)ud^?Sor9+6cyLnTA7`2B1UP^ru5w&#!yWbFYE&S;z=)wXMs)=`i zmeqD>r&4gQ5bc3{zx97CD{Ee$WmTIKE#jXWqQJ5Ow5-&9{<5r`-%(#V3VCnwxB_H& zZuU>TU8IpBSbN$^ZLBBlhC%%@G48D|WI^)i(m0Wkf}uEX#jQB+!9O{9 zT%k1hH5Vw~_0tmq-=)UT_*86 z^EOF^sD;uS{}6JaVMF{5GRn#jybpHQzg@L;dx5XPRE>R$%N2rP+ZB>n3r)YU@_raJ z%Q6w}w+eCcUjL2|BARR%r))h*P;R>-TFt;!Z3qIN$`x?8<{^RMFq$XBDm_^HmDP;d z7B|iQI)_79MFE&K_22`{DiO(N<&NqOH%}!YYSM8+@bK_5LNtt;DMhx&y`|Oyj9uTi zSmf*ec#rJfKsQimwF2J0v{`E;4?%>;SyAP@hOY1zxuSL98Qz zy{9JwxnGGx^KbD+6)TeWE4+i_&){18)$wGtRe2IBvxhC^;dUF|)P8;LYHdi5PeiYE zu83Ms_h`Hwjf!^EG~z9E|^!ktg1r4v^03~VSy8#*iyscJ$|uh zb6(%QQU%Vq|4RCLj*~6Udt{q02z$h9N~qOj8>cdgC9vu*mG_ZdNAaZva(w4FSMdI@ zzo>12pyLB~rYXvhLw-{SxBSktRQRB=hf_j83lPNe85miJte})`>o3c`R;`%m%P5c= zrz27L{I>;=ep;mb%B3_`$YB4OBV=sNemkIe-tK%#{>i@Y0~Ui5f#vJ*kn84Ux7IXZ zN=bltY!1Cd>gs|mHe0DATVYL7(CQpF$9nZh5#%_Z<;UeEjUiQRzsr<1x`0_qeCAj; zAlsbYyQUqP#?Zd{g7eXfksrq7^3Bn7&;Xq^ENs9N{IHreWxAHM<=`$xb?10jkUhdG z+cYg%p{{GTNVy-g_W9hkI8i)dR2fUEV1AtBS;1(g^qzxjZ|e`0mFsHkIGK4TU1-p| z)QV*fo#yBv#FzTAZAlQKfhT*gRHRkwwgz=HRYQJ@S@Rz%Yg-;jW%->LhJPGv&crk| zy;P%I4bQjwi^_7WOa(vMg%yda%e%tDK4No&PHX;6Wm#wP0e8OV>Te6ZP36tzjbUEx zw~;~+D-Jtnw`+r$Wf1U~B2_!mh;{NzVnkHsNFb(y;uM|g3NTp@7}INpT+C9HRn)&o zZ*ldzlSE{iV?<1+D`Ffp-7_x@mAFx(vE^V{229=!k8?B)v^a+gCkON=0O_^f!F*?a z4UenM780bPWQoIGKX{f_Wm78cG8EdCH!G?&5XA7wopbEyDr+q)Mpt_0TrID}xQFkQ ztQ&9EHV0Os@%iyHmNz(Dj@v|KDg3D}@M%H-O^2Mq`)h~0Ex}&C2t6U^^BvWzl}knt zX;w#(fReqjP?cCSEaORS8KAe-%d)J?C7r@XdkmNch|vkQ3+ra!EQ`hV>V`!wC6_uA8o4>fB&OgiMs7fJ;-lui&7=%x3=Z#faqoY zM4{S2-E4R2nO|?H`{SWgM~W`U(29Z6&Tgl%!JHKRwHc&h$kC73H45HBKB_G=Rz}TN z;7(6tfX@)Xk)+$T()9E5Dze`KwnPg2KwaSL;reV1@ZCn|T4`97!S;<q8PI#u`X>Sy!z^OfKto6kuBYGJf5mbQJoAxpS$!O$Vv%&v6*-;<=qm*4C!j-^3cGcLb<(t@?zuy!qCKA4nHD{;HiH_qsUxP4YJT1 z+Jv7{S=CTaJ$+^$1X|y*4J0?z?X$EI*Nr#M{!zAAf{s@X zi0`68g8ZZoRWP1DQ7@eTIOTJ+``ibOgX5p+KNS#JAS(ga1I*W|vY4eaVaN5xtTE2k z8r@=;+1(*Q^(*NBX2Dj6L6y%!P8)M(Lyk{^-^5*)qsoq3sNBBFBVxm)v0;bnFJ~K2 z0lH}VL>J6cxvOzg0Q?~)#MnymWZWChWyS6oaHne!yPP1B)c^yBEx|<$XEYSbXHMYF z@Z2l@Dmawi9LqSW!fJCQsiyIgyY4p`BV zRi(BT_@~kEiV_4^(U}fM8yzA93GPqhmAsseuH(2bCh;Fj4U*@T=-Qv;9UJ)!k9pWR zG4MH$YEIzew{Nl3x=C4Cjn(LXLtWKHj)#JZ{|D-7?e|*-LS3-#*k~fzQm`WE@qa;G z!$7DDlL^=MVX5z>vc&BZ@qeJM4$J-+WG%)3Ak=lLi9+^rk$8o6xkcomUJI2nCYt?@ zDmsEW4{taE&*UT*MLe^NCc|#keXC5MN3ukrRK=&^rXPTnnkCCV>cc@G>iaIaVsG8g zJa*uCqywF7f8c|u&x|n<>#ck9O_pQ5JuKiXJupZT?C|qX|M92F26NwPbf(62d;gda zYS5f*g56Pb5wc$S8bM+f3pvcW7tfTag~;l*p>cJj38!^@)KVN1>t_WRy`Ch6h%$q_ zk~hyz$8AP)bpF(N3b?c$G2gG^nZ_0fl8R$L62oCt1&|^N_8s&FtLqw(vW@nIAZGGIA8H{Stg9XT<6lHs>>2ZPG4Q0xZow+Bb4cG>chV|)O^)%!mU0T@&<4k6h9Hl1VY<*!p#qMU8Pr6&scJi6p1*011_w5@JGF=XyF zkv<`H_dP99eAh&sJzfG5T`<8KpAG*IUF5xe;pQ5j8Fa~v!p=WnGh2T3_a~5-MBD@C zEP|9}C@Rj-qzB&mRG-B_)>9zW7_|J`&V(7fV|Yh!uHb`PCBcxk-7&B^#QU4w=}g$1 zm~isp1)#qeCZGxk);OhV_YLB!oOm1FMa$#Iu)vw6QVBeH9?73ZeBfcdjVJ4qH*kV#gWt3avzDm2R~jAM2=NBh zx|{O748OUS*;o1&$Hbv-FGd%4F*gVW0}qoQIUyD-8R@~He5l`>+AZX*oSLJD!gOi* z{8M~M*2^(`Vqwf)yhBeld3;$Shj~$Ds0561VLT?i#W_+(j>Gn`a#FVI&A6lTJtt zfQOx)uVeSrVEUOQM;m2tDsErU?3tT>XuQOgYRm0#f?oX1`1gKXpMMDIC*1LErl;4& z&<$%!`R(q6>K``k>rt%Jg}R4&Go}4XTtn%)RY^TV0mrDnaW6(>AnpakUXlC6k+xWA zVj95tIi#5L$MWzSRx>jX5IwZ;&oZ83yirhkghg$hWA#bB`(a$k$s)ve)q+CuFD4>$ ztGaIG_Q8c?#bm=PazR>mt5AWvREm!oqzY0S%v_E7td_h#y|w*-2GSbvrlWaEEDXnK zrSo-_3N;_53}W&L7*ZSDcA`WtOvB4n=he`lN^(uLeigq^bJH~%bZ4t!0%?7NEBldPuj{VP!nGA%>R1$)C`0wd44MxtU)d(7d4*4aIr9vSx~A#lf)BAwqS z_L_WL>nO(1fEO>;a(p#WtSjkdyy0GUqIOMsAlJq?x$2P$+_-vFjV>%F@`KfH8LYh( zoNqJj-gwI02Hkkjez43DxWJNkPVR9NnGe$cXMGjP4-JJ_`?6S(t;|XCX&;zBMhwhH zkxh^Qut$4r?|c~Cn^teBOXaC}8RO$oE)r;}{|f>$0zzOit|m$aM=#%)lk4A5`9E+? zS6{xGZsg)0P7O0eODuF-Y?4&7mJ4>{I#gBh^Aijkpkllv%ao>l88wg1VS?AFVP8_e za~?X;)$CFSyPrx#urD;obEy4E{T$e96d%THUZ$8{wlR|2P(MbUA|$L>4ltF75K+pv!A`EZ!@m;Oy-P3qR@@}Usd9Fr@l_vEI`SnqyA;|V^r z^J`E%HLoYbXME9Av3ZLn^wDlU=;ObBb5_v6LC$rDG846W=*myqujd&o*_QTG)3f<1 z-M9x#6%fPUy}J5CHqcPn*=REB+4g99;0s1owdG!(u_*_x*R@!#T7nIxPh2%__Qknc zO_A)+m;?7-t&>6ZVyUI|4&ORwXBq@8%~k;rr!9AZ+HGzk7!75aUeZ`{qfhHoGqaEf zQL-F^(fHa-g>fW9DF_vKvhc(4IWpZIS>|V*NhBx(8+vsCsHoTvo+{EOqTPM(5#>~> zqwdXqDwJkq1)s-u?zS%7n@9InxdDj+v{dNC-CYhdfFTGj+g&bBPTt)4HNjLkuNt!A z^4O?;{$mrl8st>x*czPhRLOa5zm9Wb!#l+QuR8;UB6w*{$7sApuE}%rJ%w>EpM(Ci z{zKM^nL((g=-V~c>A`w4OEZ2w=rmh+xw0`a`9+@h#Lt*wh*s-_`vDacq0BUI{MqXA&Yxtmv@p)5LNLY;uEtndT7^-HySEm74O<3x zS;zg%$jVwnBE|{ul(MJ`7A6F)e!7dvu+vYT^ULP3%}PdzNA=B^zff^`ykV`jzj|uB zKor02i(Rce9!;qL!7?0U+}VP7A0yzH$dL1y7SBGx7z?iS(fF`T%GA1iPNFOziG%20 z?c#}^7Xrf#DYnAh)lxw6!2$>ST{K|pECYMtzyq@U>1)>O!C@koGqPDirFT&kBb?IS zJJy;OUh&nYdflb0+RbMlQ$_L|arKb(l?VDA z^A}h@t#mkHV*3AFZ-9c25Nz%GOcozcp*aQD=Ylp-tSDN0F86USbw?nCcje^_r}s+v z_~(NLOFK3Xl48Nlmohyvs&fj{9rM(c7I18YRX>l(t3nI-uYI;>6Hk_tZLbZ5x{9A% z2Fo|jsf4e-I<0Ys0$WD69lmqI;q!yPa?J(QL8mMjt65uLvXQ~vBUzlN>gaSVMPqakmm>{Wizld`nRtIfZ^e{85g7MKzTTTEUT3D>D=E;c=UV?Jy}nU zQQrB{1<6Sc?&RV@KZn=oCluDKf8vQiF=|+dTVHgPgsqc%(e3~NbhCSC$DDyp97MHS zdLO#0AIe=3Yt1y!R8FQ7>Sgf5j(Lu97#eKVYK&+d*4c&LCs7!Z_Dj2XItu%ud5{}q z&pMZGMUhI4{S@YGek;D^*90DDdsddfW`LMStk)r;F8sZcq~fXUQvabA*}sWd!iHePDr81v&-KPP=xH_gWdmYILHD zT<~%gGFmex-y_c3ErZNRM;<^F)70X!Y2FYY|4?MZC~EURd_~|bb@N<4h;xY&2Kffm zpq#A${C|OA+DR%2Zs&(R2=-t>oM-68?akmd+U_H)@T?0Rek(7~0gE%6^Rs+~sbrH) z!(|peEaIbLZd`qIMU1cD^9`K3`^@jmu{1{quRR)b#53AaDPQ}Hd9rwcjn|F79)agI z#~Li~?%u}Qp%^}HFjjHC;x|mp?rTHF-VJKRz{(1;Z~RrsjwO3r{ZYtzroJqq*S!HvHrU#~``ueSJj(-*JrdY+KNLs(aDV6SbkXQ`u>fyHst{W& zpYW`}NtcqPJRDDBQXQC@^!zTGEX0Fwp26)qYmP`T(aa|jitmpAPxXr?tmsAbN6wzH zP+&he&O$RT;#Nt{^+cA5Vn2ikra`x|ZH(rlI65|HgR7(UJ&=6tlg!lDKxRE+{py32 z$2kaY{4T8+3~W_Vbw|a&LY8UZC=#y)KU!k*wQS1G{UoleL`LG=D~^74@o*d9ixM7N zvFI5SHls=^CpJ~TO9OSuNk{k76~z-Q2(TQ;jYot$1O)NJnqm^ZmvX54vIT z+*VUfDwU5V(^zSrdUECP;=DyYWs{t=9^a$pCEY*QQXLFn#ll_dl~$v~-;-twJRFxZ z9F~2RlR1~o&MvxIeMtRlj2KB}(K^g(i-WM1GP)EzI18ODVT{nw{luPyyNfB4L2>aH zn}J-n@X*YX0z_VI@t8@;&Jvzn8k|bL9z-uUymiTI4U$zx!W*4tWSQ^)i4{=(>z|ptD~W15LNN>=s`TSO6q1D0G?r;VCc34A@pStABw5J%~Ha&0D4^}VI?WIcZ1#6*H1#IV$f)T+k@H=ok$ zQaqxJ6@@<0z1o6yoHl_r$xGkhuf%}&UNJC_G!HxqgAQ~{Q0Fp2A#Bh#MOl}67%|<}X;Et`RCMH&hEy=BD zwVt2G+gr<(2)fR}f9Ny;cSDIP>)z3Cht!1lQ@(xaXuT^auL?Y#;fbY&7Iw=ujP7U; zNgUUCrPTQEB-#$!@A4g+yfwez1}tIZC$xMGB>nGtoz!~mYqR=IQ(9AMzP`d=%D;U8 zre)NtnM6 zFu@-kCE{1`@;~`Yf5~M3`m=-h8}h<`rO^0S`q@8E=6}&=k)l8wBVP1-ujM~t^uG#{ zf8BL}ot}Snn*Ad$fc*V-{`>C(#J^vWxN@uh|Gdngv%FbbnJFPFCubP2G|*TZdPWou4f+@% zaP?WrYYooR_vcD>Yq0~5b*nhA#y9TK+B13e?mhGTX3S>p$Qx`WE$92 zps)-5ZmJ9wZ{$x!c>0mrn}rbqsgn>YEOhr8PF~JA-H#IP#*QyF{tV@^El(^kNYLOb zhfuc=5*kXtS4=_GbtrVdQu$gnT~roE|MYM{58>;_|Mfk4(f`hZY`?(+hBHJ8#O~v!a=AmWYknk9=BvCO zV#u2iMz5P_5@olmuJ}N#b$UM^O@l?BD!V>19wGuVdy6O#G?JU+I&_a^FB&gN}B@kQ~qGaYO z$M=+Q@;6H;B4o6@Z=5V%I~`y^2BoSxU8%)Vg0mcR9Zcjgb1lXN1bLOKN9L_l@7rhmS=;3}yX(WV%fU@tEI!;2*7CnjPE{`bC|Cy zc2Y~%>TU&BN* z>?N%FKH0hJ^%KI$qiP8O;&A~XJt8J0k; zOdC`h14+iQSy%l|CdbAItB_zLe_-lHpfnH0f7`zPI%mAt!-2A=3d%WqPdGOl6%2V-ju%8A_#uQ|x=tI1{*b~>|-wu`xBcLRB zgc?e&3)7Zl-=dZ(0^iq~8|b0#y!FN}5T|{P9Mz1s1BC{dP-Wv) zb=m#3TJC*l6Ma}%6X=G!ZAtVAHrxwAhjh<;&1oaN^GpX39WE)_TWQN{%Udi%xH;;;D$`yJRK$%p70bc5i z;5{BrDTEAQOgLbTdo^$wEZ0D#uvYNmq+kQI&8p9~6LvH7b{n`qk@@8=-giYuEY_T^ zIxr;o>8@LuU%bYDgbG%%rZ6FwO5w-Pfvocit4fDbruNwuOH38@wAlch7?*0+;Wdo5 z=%00wex6-J%O6LcxN=Y?TQk_S1fP3z`+%9mDI|d6Bk{j3f1kjHP=VL%xS})UzW8_` z2?jgNzgsuThVPp12@wXj$zHy7k_6j`mH-ylj%mhn9Ehj2loS31`~WR{-Iu8ou|zHL z+w(ACu6S4K@vqj2iM`4RRbslCk9g@GP$8<_^)BwU7PFwN96_D~Y-&q*sJrVpps7V{ z+aPZb)}(VD&yXy)B6^bw_sFm&!A?4*noXd`Y`9xebwUEJwol4x|5}azb*~`(y{L(n zLpEH41I2+ME8|UXITR?ENH%yp$MB%3@+3RWmnS4<1s^OjP4$~bWvgeaWZ9n>?NT<= zq13zfZK8GVN*(^-Mo?D9mJd#I9gXrSAW}jc%+BsM{nJKpkwPmeRBI;~2b^M5&@&!) zmC-5t!*LI;(kTPp*_lyKXQ9ci3hg6|_ZR77U}W^FirQ2JFOG&X!Ky=+KMfjAFoiXBv+Kz1IaZpJs;rZ)v4&r^U*%(B~a}~>Ea=BccAW-oKeb2)aPRxmI+qRPl zCbrYD?POw3Y}?jE9ox2TJHI@i_0@Wx_uuZ-z3x4Id!K!(YFFhd-C%A>gyqveW_R`} z%3roxE+8)!?%3zX9s&k~HS=M;y}>ZC96xcSosW{--~fX>tGNnx-bw&QSOFy+$Tf1X zDw)qx=ly7iFtKQD;I0t2m{X$>aEj6u_oOp4w9on%3o3-!Mr4~O#NQ_Gkj)33?hZ_@ z4DOi0o*-Dpil_+o-@mwRo$l@$cXbUloz?_K!NyloJY|SNcqMZ2CT9Q(`)zPfxXBZ? zVDE?75}GdBA+kyNCfx^sS1#WwM)stGaP8n3&f(1hM@`_^ycfcL)+aA}fql;4?$F#1 z@eD#CkIJn!H22*zv9nQuEVuui!20h3T=$O|tV#XmNY}jAZdHOAF&90|<0bf3+obhx zaf(~HMp8hHejki~WgrbUyJb}Hy#;W31D)K{?pnx_&tL+Dox+Ey@v<4@d(#3#)&+EtXftX{ch`2C5<*V;|XRU`P z-bQPRsVtOlDmVR`oTmUy9{st5IsVIC4PPDleBLYNV7a%}+N=fp&bsV&&9 zM9(G-=N66{LReOk(N13aa!Y+QvZ+R>uEPk{IG&MT1qxzAG(o7?m>_c> z1CX2;B46454E|xFR;df=5Mkr|J12Nw_E5vh7T^P9>L;$}<-xl~HfZsrfckjsZNbA% z(NfP*Li22ya|qP3##quw?NvZ=m8qp-iAP3#{`S$iX!r5NkIR1;2tKD3N7wVTu_@ygUw+M+FYhe`+$}-m4!5w;bD~fib9LIfs}w9we{D(1;Z|8HM84b zS{idz)gU-(sz4EJ&ZR0GrbynPB$KAD#Wx?h$7maJ5{*bPEG}SZjLcsH{&a-CWV3(_ zd)`&#o*K=-ly)2~&O3wbu2aBf6G2c^n!WD%EqSz_P+mH~UqqyRz5@rF6O)9EKI0sA zY6Sx;K9UuEP1;J2$}J-AUw!b^!B^1d~MbnL6}J(6eUV(L&NOeyk6tFqScm-3oK-$%WKS^=t@E=epXR>}j~I!H9-;a+TFA8I}JObW_Ha z&0H}U+VThdT2wrBFXXG*Q+c$2bIVH--nRdovY)##2H)TeH=DOX5(F2(n?KQn@VHt%APf#5o##7)v#NzZ9B@8*!(BKAb*xQ1qT+f}3MMnp_5A zi%sh>F+$+ba`Uu)Ii1gAbZ9OH1~y6=Y@Z@ap+}gQyKPMhp97JtQF4l87CzZkL2;sAxI>pu~oLI)rs60SS z&yT?MGCx(MMR0eyiq%-YY|&pO5#loue-}H-=>;Ho!0P}f=;8^L95MB0vL#Jb5oW`` zRUuf;^S6%Y#}u$9hGZ>=j3*9*_>Y-WDd%j<2C^_@idcCU(lq2yMPg5QdFyDdlsY!7Cx% zGiE?=!7iy)2L;pd%_2K7*|`e+J3qhDn-&u%!nrXCx>q+i;+; znEe#TYYUOVV5s*t7?K<{eN~dN+H|}MkNI~u1c&@()s1yh@flnSt|5#$L9iCs_3;6J z=naaLYy!1tiZ@lcxq7s;2JiA>?uO}vI*5?b*Gkhl#8?>_j3=SLa0iXKR7y|#Jixd4 zSV#)0X)%O`E8vb|)>=ig$cywusEadlZ#?i54PTch>MK;i-ep`eqss|8t6A7BlZ7fr zhM;lPN$|Ca&cJp|VzCt1Bg5fzvUQ{0=W&c+Tuq|^bKyw<+*tRld3E|0iF=ZJ+fC$F zUW!9){9?3=m~OYDN=`muKX@k9tGFI6f)%#tA|o#W>8ab{fGjtOiJpY6iAZy0&;04X zdvsKS?0pN~m&7?tKS>eqRZ` zS*~U`yo-LJ+dd==>&ZPVtC$|dBte3+#G@iH>vOJ9Ep9?chW(-Tb9btA)-Cp{X1Vk- zxx7){T`GKUDvIgAtv7tDHG7?0s&QwyD{JciO*K2+*hu5qX|h#ysdDzb%aCyu6`(SH zk-~0591IVD^n^v<4vz#w^W@p+@B-)Y{X~7Oe(GJSW%5A8j*hNO%S($wW;XJ^dK?&U zzgXf^Z^nv-xGmCGAJnX2oe84lhZ{t=7fE2RLu38#=5i2;($gu8eWM z(kw#5!Gn1dyI51Bn(HF-1(sorm(|B=?-2%pv!a(8>_#@hpZ55p`~gL8PiqjX7k5^W zl8;XR{%9Z&@x=-(D`4^D%$8uObK5ev&6O){dF`|kGvOx3;CDykb!}H1(uo$I0$c5Q zoGw_9GBp=OO!8$#*Sn(PLw;_bkr5PNujE~5i7+{jxWQel_`UwZ*j}96xjSb0^6o*e za(^OZBv!;h(aF~lfCC&tia}*xmWpfJYOyjTddvG$Z=nC{jK=(*7NKr+qeNUMbic=Oi2oy70olFpvZx9@4fuV0<8MF;nU*6LvsSmfyFY~8U(fy154=7I6kuy zl>QoBh3;Q;fp>RmS9n(C{?JxfrpNc~5`ziLqT`d^xiL^vTpP71%VQIdpOvx_SNrcz z@5%5>&P-5fYi-+j2h1f}Jjzwy*1FnZv%PB(cqU)+FB43j^p}-bt4%?S#cGo0>@zw} zK9@X?OyJxb7)I@r?x*VIZJjK4!9_IY1RQWtGvP9x&Cxp58^N01i+jC~m06t=m@^J= ziiD&K7<@i=V7*+iFk6kO&||D%z;sXPX-LpOLYfWVo4dA$&LxkqRV4;DcA1POpq%y_ z+o@CRHfxbmaPj>}Ru{JiD?PW-ECW>oH-J6MyVD(EsI-yy&w>`bEq64kRq9cjlfgZu zr<}Cg5ceUCr4TIWcJp<{Zg?MHmYpBS5`4d=4en@l?R;TY7OTK}2ZQTQ3Pe*TL9E_Q z@tO!aA>PxhnamGxBQF*qqMHs@8qezTd9U%Ro0|^<^_kl^N+xt;X0VONwzuKmnr!HNmV7|x0N)NA7RG9IPV+&Vw~?#7 ztor%N_INky-J)7$-OgShaXObnBXUz?>jO*|EjUdl(*v3CbQ(`Tq%3o=Fw=o+YqB(1 zXq8>DeqRLXwP_Kna3(q)6X4Al?r0d_A=-s;T7~ec8@)pP9?j*I*H5<4UnL%hdvP4C z0hZn~(kE#7S3EIV>$}TVI@{*9-y~iEC7Yeq5`t0DL{CezXHHWf8=M;+rhlhOn{Q?#L@zC|c(uQKzvf9OR2HD7h`A?y!~z?dQqXhg zBz~aINA&IZc?>h3&-^T(V83w(j1(fHF5jvfg-H_gPP?Ao{E?Nr#df0CTcoI%@JaQu z(@~||yAqm_=2FPej9)lpjAdS1f^xL=(mK~v#+90qHeC{=7b~v}d9A5ZAPZfvxxkPQ z!q=wQY}4xdm7`PqCUD&Gb|^*=;iG!x+-#FsdKlV&ce=f*c>obJ%U3-SuwU4wx92>) z8_S&L;>DPVjkP?Tmv@7(-2%fG(TjL`v_WDS$ES!^M z*G~b>wtEyZW4?>eicp-p;R>L!wW;jGx%li#U^69~ga*KCDE@M`De+Q~gyI)7Yn533 z!G|FK_OMsvptV%Hx8K@eGl`|65i_He;IHI&abQ?#0;X50YA1_dSvt1jJfydIzbyt8 zy@+m!kCIv3WR)$z4OWf0YAKeSepB4vK6S-ju}!gT>%nK_ym9a0#=Y-QsWR(w>YyH& za1Z`>$1-W~d8+s}1l6pGEX5A*%}nPn#Q5t8B22gxos-X?`(?3vujW@Yw9tjS)|b`H z(=%?r*+H|sNU6ah;JraeIdHX_B6GDZv1HOSdpc-KJx&I!tD9WDQ^MEl8iCD`k<2@b z26rVg$NANpQtY8cAi-+W?D;|J{yMvnX0t7JJU@PI*Xx2#0lg5Ya zA=|)%Z{TF7_9Q-;ed~sjsx{N&Cx6cj7N7p0E=v%}WZib{Ig3~A4i0WtHkb+o{p+-Z zUG_BULmUM%7Im+XkL^MACMQf>ItM=En^W&+350c7-%z|RuOO98D+we| z(GEUukTr~Gq201Gce2)&(pfwAMVGg?S6$JgC4;M$Va(|+*t2msAP>pgmlgkB(EL8x zEf4jdVQy~hBka|lR&hDbTG)&#?p*?|=qz%4&(0_CPp!WY!_oKCwomGJ@$JbOxC+2A z(e&q;Sd90W`=tmxGZMwh@o#X)nmFX{L_zjhp4Z2RdWN@50#4qGcm#FDx;TS|J=iLx zEj9NtUbP1j8C}^ynmQEw9;wbH(2fJxJ|-OOHG?}B<&OsQB?6}7sey`_r&nfe8F-2H zY9k~y-uD&{!mH6puM;VUop3S|&B9{3GIH4IaTr>2QC&3w9ZOAFEnu_Qfi-riJVkKU zpr^bHG^NTe4TvG(9|3YaHiWYcMC6lR7$o24B$hrZ)f-=Ma654PO?x@}roE{=>_YP! zaz1zEzC-(j)H=*lzd0PSb4O+NX4yGThx1iSC$9;It^QTh?;mMg@?{C^=<(BBr|8W* z$D7|!Ne{ZcW{U7t9Rj2Kbt5<{5|R6BIg9LQwcsWH>3c``>6%}! z>ujvigvl-MbA!Qzc9Z%mWWD$3QfEjqH`8~Rv)ni${e~IPDb=E(S;Vsnt)hWn$B-BpUTp8uuT=D5%-RR!$PB)!y z;yBhjyyLxWH*%+1?u2O#`5tP%YqC^+*W5fNm`V&mDc`{^ECN)Yvsx>7TWg@cRy7!} zONIZOoc3@Zjw3$03G2A)b={n?^^A$z>|hKJi!PY*bRC`?jNq-p&25|&R{&!{ovGeP zM}Kqc^!uLZ)NXb2bZUe>vlO6?Q zB8n(j&z>ksN-{O>&Qe+*l+Eu*_CD4HQ1ZE5ZC&fBb3W@W3Dt|c5VD|(obcKp;626^ zW$fT+@Ee)gIFQ6*1LXH2$lO zo4={vi=U(*Sf`KvVQ97e6b3ZE-ff(ki{Z@%PR0^Pxk?jjzD!LbzA1A$-`2LF;t|A3avZ`GWg27w|pf}m?uAQKW!jX zt3Hqy4Oyn`-*~H5@y;!mfhAkq<}u^$ce0u2p#;k!miLc5$ca=Em8O-`MRGJt_UJ1; zhoSUy%l0k%hu=RvrvWp*C=jBjr?J3da3 z%GeZ;Dr1_Frsr0>u}XNh9B(v9OvK4#Y>F>eOyV zx22=)HO;x!P++8fgG>NySa%Lm7Nt1*p7M)rCn3^jE zQ6D9qNF#V8*4T}7T^H!76U*Py_DB7}E0o*90cFvc_US-2yd&NIhT1eMA925@J)1InVSuD2ph zl0&uIUh_6PSz~j{MEQe_QIX~;Qj|_hw_64#3jS`OG~Uc^d6obfQ6$I6CS)aQwY#y5 z1=q@P%lC%Rkt-zoroe`2vbmu2;FUan}mPSXCun4iH=4Z3QO-)uz*wFe!7W(;zUns_JOLX|F(IKQ232H|s^(m!$mt!l zALEI}f5jmaA0&;4p)a?bhsV#mZz?}lBQ^VGa$lj-c~UG2?&3dg|0v*CCS ztM}EpSF2JZr%W5Us7;Eqn|KlC0r#%aB8Dr@kG;y-22vJJ=g1tz4&Z!oxKT=*BZUu; zNzN8H}XTQa&q%bk6!c!j_R{UaiwKu#!ZpZeGEw-?bh)oU>Y!s%Zj&*lxK$~}8NKmHL z5$=l>h45MN1Z%>h*!tvu`4+#r=x6Uk0vioJBUq{++zT4z4-!Jh%;>!0eYW}+QDSmE zK)cYV5Hu}X2n-JUf&JyN6fD1D%6E^mND?@V_jfDXAZ!Bcd!9X-;}Jj^mpwJAB2u|e z46^%ih3l>M7K+{jGH<%3^(nN!T=$;ZTtrufJ_kAOjqE{h=g@mGx9K8uS!TV~&B9E1 z2xeeYpt^6d>#Voq_XvQ;iFqTN1ziV6<8KSs5kYAebOr)h-Dp>fl|2ccn&H?=A71AHj)`BK2 z=K!ep)tP>oq+d-bc=LfCeWlWho4m~JZDAHL#uLXwaV$q}#aJX#GA z;k@l@q5x-226?i34pHYWg79oWd)V?Qt*d7G%|dqso>cGmuv=wWvg*tIaT#cWxiq&u z9ra0qPn$)>yo2QQif>%xQlipaq&hZj%-fgx%ghc)5(<%^hBHu_uZODR=^}B6K+hN} z_C_+uRD@}z^Fh_Lu`pO72Fr9!mn@8hONL*!2Zeh8mv-v&1fP0Oam#IXfw@qZmZV_- zLdOAXW;xNLG3OXDr*XA%58tUioJwy?0qneon&@eTiuYI$yNNo_n6_tY%7zV+%k)nf z?HMl+l3Tt#wue-W3o;5J&9PpJ{&tFxGPQ0kzt`qJ5XR^ zEy%}cqGuQ%@HI?Bmk`9#r)Ei@u66E_1`{S^$Shb-0@<byOWLiH@K$l-wvDv5`n5pJYky;T? zMd!!0rUMure<>#^rIRVS_R zAKLaO?bBw=a)JTWH4#deh646)A3dif#4H#$ERw-Cj1$b{K2WGc?9AE5%WAGsh7V6YC!9Da(QunjO5=4=Re!Mk5v}85K{k z9+vn1NL6*y-g8i$evjXrd5VtCSikXJ*~NMe)d13FWDblUR3s=S{9=VCy&Vg^E4+6M ziHk4f$4YIP+QK?LcCf=8R&lsly`_q>_>=ypZah?@VUr?H@c71J zQbmS=JtPDhRcvQJ}II% z0bn|-gls0FZnKM*F@>cp0(teWc~tCIm@yIf%EKcGR^_h6g?f0!Q=}8r&u%3Ld0R~d z{C7Fd_2RRCSp*7ZT@7^!VT;o)vX@0KA4o&kHgML#a&jWvp!tCRq2? zMq+_Qv_+1OJE^lqA`OSK6&TIlm>Ov~hglIB(2j8l&@A+U2{T;13)1@qx7-7k4C6Q} zC-&$Q*c|}ifa8y3qNv8TI<7FGhWC8)xx6?wtYgVkHo6kkWU9M$1A=h*WVDfX>6KRE zh#s$)>6_Y)q4_elj?XXLC@rIMyyuzwY&3}B2x|AvwMgQsZwpX3US`mYD?=$j;r_UOOPYNU92UhPQdsR$baoY zBUmVnv{u&UI@z2qvi>!*JXx+M@UGwe(QkT?nhx zR8NXbm)tB~N$hS)tMl|>bfUKyw&CN;rQ;KuhsIv$^5e%8?poTE#O8-MaGHwtsibjy zum6gQhAHyr=M&bw#g=SQk?CCfs_}}~Pz*{vX%RIHZ*3a6YgJ!f5yJdAR}y<>yA)Mi zY#+Jg;G|RpNwxLn%y0In;kr2FEi*%@@u#k{I}3#P+&7(lr#kw^D8!mjoa~gk z&0q8lIqH}=ND*R4)#nPQVSR`vb11{%I)9#GL>9Cm*s$79H#@O#N8PYVCGtkZp&C;4 zy&{cguSJZ>McsfmeEI92XmX!ACg<(MQ)%TAYk0u)5B`oH>{MZ`wY--w{8H)7B&hHV zA6|HeOT~^zpO5BV(MTNCBYO{z55#dgQUOO&x#iTweY0+IYL48b1Gqa*6C_3tnW+Cqtbg(Wm8i^^OP2H{{sxs=z^`g{Nl4M znZiX`Y;9iwmtQJ*|Anwa>%bl;TF%!p zW*&&#&f;3v(d?^Gmcj1jTPAxujtPZS99<2@->5IafVn)H;OO+otW2#Q;0jcD1LspF ze*oU9%eJ%~CvIBv;F9^Jk$&@oU5WnKGMWG0D@E<|Y4!E!#c3! z#KCDk?`wqO%Ful1ULpdU>d>HD!aOD<;erHNWZ1dFmp4{3_h(`W&evj*n2^-u^lByT zD3|o?HmJq9S}(%8f|e6EjpoldaS`40!P77G3H{2NAet|LE}F;+tbI>E463(4@=dlD za6wtL-3V%~cA_5;?AI{xhq2ptY%_R20W7z2ygZyk>`u{S%-?}mJ+J%4Y+C>BqvE=T zz+kHme~Ef(cP1pE&xRnjx&e9wh)JsiiA zAEd;}#CT*gxd`e**n^&7q0qz>0D5KhyY~Bc23D`f9N-kl8Q2`bMr5!PNQ4p=R^V}U z&I-e=2;43g0S}ew`QdV0Xp^MaBbaZE4rrbHcCZW1nTk?WA>xy8iRCLjzZXB1_KgXt zIe%!pr~)=@F*h#ezp_KAYpxRiR(J`4j92b!0W?Rk+P!dPt~>cy$BiN#upP+0cs~F8gaovoaRCv7Oc79LlWv^AKR!r_C8lrjXXT z7la;q9hVvp8F%ImYM2^BNFsjrQbY+hC(Sn!r_7ORIFRyIXZ2niQK)CY`;Uwv+~aWf zF3docRj`th59#ZcB74A}L?BpO0#`CnMJ0LaNYur>FnQ^-fzars^bmdBIe8@5E!^ER zg5Q_1q$Gc;07r7UJm8O_nl)Q{MwA6?^{gQZpX9@yJUQ%Mu7q&8-JpH^5o0Tu{pG}H ztnttv*rt5PFyYP@(QH&I8lZ8$p@GWV^aR6q?ey?rIlFu`$ zh_$WqHzQ*s!$uXdO`fM?^E;5gePYpl1VtinQ6&F?JRoaKKE5RRyR}!i$hNT8obif@(q@lrNztaZjc3=0Nl@ttr0qKm3rg6fbut$e9jP?7hxL8`S}ku$Txa0{{9AjKKk zSb}86JpgMN5Pc);8JWKhsj#5*76LeoeVwtZviZJ3Bj8V1DX2~o;WpDhS{@l8&Aq&$+l-6ZjZ(N4glCf~r`n77PQ8IwH&?=T$}naU zjS_*TZ$6G@>b2@>HL6j(s7wah1fIGO0+He#stB+W*!YL!C3{L3bXv`Zh|MF6Mw9h< zSmY{oPNG2@!0~jDmcboGaTkPa3U1y9ImJd(Z&urNlt#SshE%PH3$3r&0U|nn7$83f z@*5#8bX~?;Mb1l7$;Cn}pXh0Q%_uY$zPqk@EYc1}JqtoBeW;rqHc@G-v=X4I0xQL}Z6Y z{2mgcKWQY36KL4y+6K@eCv4L>_Ixdj-&r1%$M&z}m({pO@*4gvpLYL8K7DW>R#T!5 z3Lf@+k_oA6(4FwSc~eX%PuE0&-K+v3{t22x{~nQ*B+z=|S}-XhxLA z%dx|WXEz1kNsNA-f4sz?r5Vyqca|!OWX1Qsj%WzRgEJVVq+$1l@>P<)ORTu*u%buI zeTk7h^{!uAZ82U27)C;Hv0$5N(QoUl-0xHE2n4FCm++qD%=NIENhD}kPiyqamJRdx zy zfdbu+`v=T0X$}{M1`QA-0>wrPaWfT4N(`H#@ijVM6wv>iXKcXYhrc}-7t&fBsbS>^ zEVZ{Onh<^s$N2yK!cX@vl=*cM5d=gKiSiSC4hxyn{RNJG^GaeZaP?}ZqRQa9-OYO( z60lG~RrZ%%q8UFUvo?$*^1e@4W8YUa6#ZZ9s2`yM_dm!r9)qIh*8+?Xf~ZI{x?J*w z{!$C%9QLEXQzwm&qX)SCac@+D*7+-bBmf}vXma?H^<$ms8Pxsj!Gr?+3Y12g(&aEv z?D~51?G&6f9IkJmkAzAGfp221Y!C~~$!(k^4xBX+rY#t~#8?lS9i2Re+3e#~92V?f ze91qw$p+@v1$r8_31j}d$^G9|x+4FNQgVr=hx}h;-2VQHPkrXWSe_s5*m-Y0Y09oIErfDJk7sUv9 zh`>RJh8ZjO;5d@Qr!Y9?t6>{U)R3NBp#jEFuq}b2twd$5^TC;|f7r1GyDSqp+x|am zb~X8rjiErV2Cf(qWivR64PJMVK!8H2y=^3xDuHQm4{YU{OA}zpH{FjKd9cZ<|D2V@ z37-o^u3V#65bEIboQX)2;s4-h-M*n;xA-BFNtgDrNuHxvo}*man49#1Yw=tbP;JQP z(_gH&DZI~~&*TP?5%Wn0I>VK}b7hRj$JajM zc+l7!!pb|KtUualbe0&TNo{mJIFfiJUDutHT0gzeh!V+}e}2n8bG4SQ97sWMP@-p8 z%{EiwE{DCX}Tuv%69`sThP#?^H#tCzBkvsq_`{Z#f^FFR)T zIo;`VAV`Ty5XYY1nzsAu>>jp!(d1%wfN~ZU7DYt?n(Cxy-P-~VB5rt z;22x7|Is>O(Y9^!xOpatVb|?Ld}3DKDHvt*C*wuyK7N@P~zE4Jt zO2K_Y-=V^W1Dq32>?(DkgH43aNZA}_P_MRZD4F3a6gyntl|58)lc>MyL&a|nSb%N4 z-$F11YcC2EAiRNc4L({hDGOfPlCx)FrbYBNrXjoQlg}7y8CdyKPMMX3w)W`(5ixlO z@lnZJ@#UZOImD5t)h$tp1B_vuy%qh;+Ip@Ie`Dxp>@qRAy*7ER*DoLK^Lik-I1TVt zVb7*Pek8=)W-@22*2bGJTLP{veceXga@vh?qb8Mv!iCmf-hk=zf^6`WPJ9!8<9c_amzdZY_qh!6yP zF~Of~cLcoPeCa&(hmutiB2~#DpfC^WMDUw}*9`a;UmHA5deV`MDiomboB|8<=Vf=N zNO7*6Rfw+lORo8N86z?VYazt*mn&l8a%j6Jo&*9x|9PbJ(e$PDorTIToU95}DgtP@ znRTI$caT4%Tvyq9boWE`vHg}0IdQxKjjkxq`>hqq*l#7h!yE-SF&%vlkHhctie;HG zM0|k9;d@HYG7j8LzmYf+``sr+TrM9RG_A+()n3m$Q!n!}IjRte18ew8Y#9J$+v+9+xO^1R{_BKoRYxhI65hrY%1zV%I3B|K|a? z?L#E{9EwZI2u(RsCoL=x0p`HTR_g0DVwcqo*&6h)GZjj71=p*Y^4}2C`8qRX_g>B8 z9gSBU2ffxyI%ctDqfxzFc6j~N6ZrIcew4+KNP^ni+L-UmxRWr{@D2&(8xzg=yeZa*Xd7Udb$pJ33T&{Z~{kK-cd0rzIRIj|l}X*|OG zCksDBl*G*GXbzPRTiKJefv-~RYdgSBE$*lmHXL%sUHO1V(2EdlbjfG**h6BqclN6V z-u5jArv0oxVlPEl&qMLgazK-UaCa13%vtcGHx9(QTS`@yI`9L-Ik z7Ih=L84AyW9|23ngluc01Bo|MW_0}9m~cTfg86cp`kr!GltO<$8k^qH3%OyMFxDo`@p#v6UcTXbv}v!P%hOn^MYJx)l`N zXh59+)VGzM(2Ct9m_Q#DTpxuo&w_{!VDov zSWtJX{}!uLu+YRJ8RKM!BjPLsG2ze*gCM<@qo|-SfWDW!jNOjf5*BC`|{) z{-{Ssec?2as5d0v*&E|Qlm$t6%<;M71++ksn7{9Ph^zIWPYl!&+i_5>$ZLM1)_i+o zc%oz^R6pKNxF7i0zZ)W1Nl8ib^JGMvPFC<5@vLMTM0?AdwdYdg1?gmhHydY{h8CS% z^J%9%=M>-wX0Yj94q|*|#~8+93hDIp)vg;7zWQ`kY5lwwLIoEnKrB8miy*zLcc2$o zGVe%xAn%PcUbc#!=Ej2WvGch3+2`-bA1V_nBCN#9oZ)G-orhRc<4clqEf+_MwC8E@ z!`e4vQPry{OoUS_x~Cm_Twhm~GjH$w&*inE)92@bWoHd^XJ}XFil4{^G=SfMnmigi zQd0%SZRw2z_%>R(KuAK3KQpwg0lV6Z9l=DGG(=&Za-Wl> z)oFvG%dlDsUUSTpf2QD2;dQye1}H(9bzxoB`;mu*i}^y2^DRbrP*77k_<{3%75VQ1P z@O*sWyiU3iQ?myju45W=x&X&oWoiNXj!QLz7z!`$c}^K*+r*H%g9wI;Ro+lT?7w9K ztB%&1kjqkvfuB1dZ|(>54kvKk0p`2YmBO3|_)x+2_vo0cCH$VrlI)#Xm85WblYcfV zG-VdE#jJfNyi^;Wy6)OYy>;qt_w_9NIIZwO$l`i=Yc7kkYd)2rEbcJFYKwGQ zejtbzlD}VHYy~qWYey9NuJ8aJptf=BZ(-PLn#vT$#$7tmK7kT)>=)SrznxDzgG0uk zcg@BJk2d5KrEbtY&N-h3G$x2aBe-6I#d}l)Vw$-rKo(tXdsi+W2&Z$wOefLSPAyI{7L=~uTMzl|t<0(m%*sb4*gW;!U^uKPTxv4FYjP*eeNRbE;VB~sv8lt*1~N*? zArN19^bQWfCP&xtQ7{lw_JZUe(gwZm)Y|Rpu{r<1l9ZDZ^I&J#&>OP#>)1*N<9yjg zypm@VX})g169{h<~vz0jw zP|fE=rgx1og9gmJC~S-!ADpMJCFOHE198F}*iM=+BJuhD0!!myvDs{+O~eF{7EEYw%_h~h7^%b8c)Ns_}YN2F5VX$^p_T&p_Q~$k&oAX$KGAH{ANfg zdwxadJh{KVb2I>EA?dVO&HQHyuEeaMUx_KWKsWgLuUdVuLY6;ZH-IH;K5$7be~r8b zSG?lm4;6nIXQjk)$O9GwN558u45vf$cXMV_2+aXfwuvZFc7tEnl7jb)#{7H@YN6wX7KT=Yc!L3tp}86 z31p+K@9Mef^mNi+Ki@F03gOm$1Brq|w9Np@Z~BDXGJR!E`#=uBd%@0IyJPwH=zWK9 z@t7{gRjP`;MS;dXxj@}FH@Wtu$;?CyzV$osH0JNX$xH+Y%zy4U|?w@ln%U-pf!VE5#Dm(%$HlEHYG zA0I-L^(R~}J`YHX4-bgx{7GJFddI@P)=R*I&c4)(-uL^}N6|?e9mNjN>c3YkRGBVj9&uj=`x}5B1(n<@>a367@c-8xx|Csz;$hm2*7uRZ9-F>-x9!tWP&< zRwRQcKT)6T_dRgonFn&PZm%wCqU|V;fcHbj!Ucy22n^am)`M@kn|4U(^v;77{80wu zx?J)I_FOjSIzedKk06_@>a6(DVi%)~g?b|DXf(n|4u)Pe4)Y#ocRz)@3{V*_&hblr z$-$Jm-Gr+qVuk(keG7FK$oO6KtUs2?fy{TR;!XsCqvg~d?An23&SC~UktT(hgNxKR>9DZO zNaMTCUss961$xF&41avAZlZ$D!5W`&Y+U$;ff6iOMt4yyr|-ItjmIZ+rF)DCvlsqUp%>OG*7l3V*Mj zzm6w)#u@v={!1VC(iNZ2xY{qBf&CH1c{+5Soyg}p^vP!ve3`*3lW1kt2RaR3bPKuCgL+}lk9-E{#YHk6y^m?g&_F(+uLi68 z`5ou_z6eZ#+hs7p{4$5Yh{kbNDht&7-eQ5i4TG z9(Vg{wRzL9%Zz;`$_wK%`XVT8CdQ-RYdVe?-^}Md%Xe%&w4AHa-AB)l?9%D*=Ahq) zAsHTOe_UGkWq-o`?QMN=>9kO25&75k#Zr~s{!q|PvIvd!^Uc8NeX|435y}!gQ$`d&{t-dz#*wP!GX^h&JoLIhKdfB^DY3 zbw_*Z8JA{^@9p4hx{NSmPPgKQ7bWN21H6yTb@L^hcLP+u1(&WRF(& zQgfZE&suEl#4ONrU2NC=<^^#5-?fZt0YFcH-s(lEZhTmCbPZt>ioj(TNT%kyI+Eky zS#x?Vp=zXGkpam06^A-7V995alhY}+PdJQcpnYHSj3Km7c)o*$r^W||BCTW=k7WQ+ z6jcJDn)U)b$tI!0)u4BT&Fi9$WBE+X%~q6WNrQJiUJ?H=@KU`&W0E)yxfWQ|k)KT=jFTJvg@)Nop67kkdL0GCwMom>X$EhJXv9h-YGNvb7tS9EY#Ecm;x93uSzS#Zz$di_S` zSe@ZUN&ei@GuaU7iYw0*Tb-Xvw=)mUVUx+e6%adu{U}p}R8Mg1KD`3pbX0S0hs;yh zYm;5V!gp*RA}aTuZOh{Ak7MYTJZQliJ>Umhgsc_k+pb(^ZfU4|$>x^dde5r#YCb}| zr^<&H5$lWdBz_$Z>W+)0RY<~h+iu!+w-LCKl$&D6>eYZ>-dTl@@D4_tj}r-Ug>n9M zC0X$8faRKRGaOkfJ?b56(h^HEYfvPKD9FtInOme9Y`iy<{c*YY6}hUmf1XROp~_7*FsNi@wcW+( zGim!tjm=}lO!s`k86+zHOZE@*^t63KT$YsdeLbyPu>JlUiL)x_`}?w$h3Tm*-ylPA zJjirHzSnrg52l98`qsI&VItPkAF2&EA1cgHl zxIT}^TH-@wFIyd*#s(ImCDZZfZ(a{nxNM~IFu9wHW4YvPR$(miaVo2dcU}Jvl|X90 z1Ade$Qx?NrP3hs@^n?3&8a;VF{dFmr(jT3RPujyb)^zOKdDPj}n!1mjM@Ri4c>Fxe zat+0L@1?oJTa&r53H6+`n9c^qk(|?6N}EQRL+L3=6c<*H!c6+G(}6Siz(0ESu}rm zI~qJ=37rXgY-6FC)oFC@XLsu5YC#-sLOmug;Ii?Qlh>YtWi{#?dT?zU&3n5sRWjCR zY2Sv1&t6I1QK?kiDW_JZli!Z-XwuM8H23SjDKIXbRL?+L+P=`_MAQCNbEu7-8JRfu zqQyU)qqvOX&&ATu6VK-y=e>{S4QoZ!+Kiz8{p~?1*~)^rNTbT2%NswS0W~dnoY$e@ z@Bc*CLlQ|*4BJ9GzQl{tGJJ_cm9br)-=B{gRQmbs@xK4uv(ol;Y0j5_t`*`)#Su_v zN7LbN{*Ssk*QKt5=h4n9L6nx0e@$839xw2oGJ#Hh`w8`IH;mq2y^rq2XFT&e`vs0r z26xd=Qs8;|eC8l>Y}AD&fAJ^XiB6-ZPuUtu@IOhPOzBCLtu3kjkoRcsb?KVp8DDf| zkV^S5Sw*q84$<;=x=_6a-Dv)odnhQHuStrzcFJvUY0H-nO8-g8-ljfc#kzr7+J-5fvdpgGYV^u@FR zWNV;LHTq7aEmt1!J|+7Cjq}<@Gp8=1&wloxjAz{FKke8r{l1pJI=7#Sbj0Jndy_h- zXp7|(bYLS*YEzq>+773cyFDp6n+1rX#a`uWWego;0j*v0q2#{#JSAimf7AXd5BXZ_ zlE#4c94&^Lc6{WXt53B-l}^_-|BnXMaik`Lrqi#N?^EV;#xi?x(WmoKLS@{PyS zFT(PV#E+^t`up>Fv}oSPbRjt5x${baJqtZmB;TcHA zG-^r;y-Hur8cAa&FQ&a74=79V>^ZTteUTIEO>5^4CkHcQa_KRNemHaY8FOgq=ZWWW zPP%=RmX7U6ZXHL{+MO(*N#mE#7qv=BIsBtZ+yg)Q;giYK(9x9YwH-%4?e(LG_#{gG zfA+otEUK<+ds(P}En=V|V7HIh-JOVuii$0Yfas%-tzcjWc06`>cNb!UAcAxw-EjZw z3=GT+!!U#Re&6@M%yq$>b7r4&_Fik}z1O-|bj}+hS$(dQ%0(xl-$d;(`)}eas>)D& zpX7H|9{%9iEGox;cHGHLpP4eqLw}Yfv&^St9w?)?%)}}jsH$t#+KUPqkxiPR+f+GY zN|r3RZpq$Pa4sQ%BzW9AiFtimqE71pypeksUg2^1E3=dk+~2VreLIZ69Q#Xf4~fp- z=W}VY+C_G4OV$O6g8$DRrpCbO>`qK*RUa*SkHwa2j=Zs#lyjca65)JfAEtGyhw_>= zG0e;kuf7N3kKkvbc9lLC6J#g(k(8zMFOsPWj*}yQ;MBUgXsJ^N14b{x-H+bjKvOvi zM%DB=KB(Y_Y!WB{(6Wi7;6i}}G9cHF`kli4X=y1) zjAP5VAb{ zA5V@HWF##KZVz^0QZGH|wH=9#XC3j2t-lm~e#?r3>&-C+UM=P>!Q-4P!W6w+{F?ys zA6|_94a&lBuo;d&`+-EG!MR({Mb?&5HB;zwVPMOi)qPktl`Y!(8+!zFDRIEI;%nez7Xw zD(hOg_L85v_2RihKu`$Rp9G$pux{gH5FQdJ8f$!f{SXoskujd+i*{9!-v5cTu|Dsx zX~9@j*KdoF^BwTsGc4C}Ri%RcNdQggPq;0I1@QdR%ZEKHLJ=J+)Pq!{@SGz$A{>GK zJiqkufnQ)Sds)Pa<{kgjH6c056DQVL;4hu}7-+Ht_dfe51X$7%gK^hkDf-mbMroan zFk8!(e1Wo-eCd67Y;+W&c=JSP|D=Qm<( zo9bxQdn9&Vdyh~>0I90!v%j(6N)8ez;6VF7;%Zxv&HuCnIJ!?vAicYOn;yUZN#Mp1t5^#0{*a`lg+iuGG?RJ+EcQ#N=0FZL~>=rBL}{OTdS`QSo< zF=-r)DVfx?^r?N@{f+Agv1o8TF;Pr^zKEC_2ly{$>#1qd5%~w zI>bCq>$Y8`dPdWklWHcl(k-7O|JHBkEhUhyG7ps9Wnbzva}HUKYD*Q=C89Ga@d6%G zlt`s~lJuyp%BL5lsB4$jRHZ~BxqW;=PG8+9I4YS^ zl9Q=imD*(3xf``;q(|jTa{bQWnF{@SL&Rsc|8XH2Z_Zp$Q+qLAp9B|_oNT4VxYGfa z;ri_-$s@WNnOJ6UX;sML(n?DTr#Bo%e(Ti_RH30hCA+<)3m5NGm|9Dk#X$!a&g?-| zB-&YWv=3c4x`)m@4Wf=i#?$Bltwm9@a%_i5p>%x@*Bkr&L~SO}`~?fBPZP!UTOR+J z3G<(6%Hb=^R&1^}$GOaoVWnsY*PAx_(q#A9(&u;~jl7>9V)Fd;bTOblO`o@b<_tHa z@-hJ!l;S|6H1c?OK=iB=k3Fbz=LzZjavdc2<&Z|Pmec!$@}#d5$$k4py2sp& zEr$!l{R^pk!zx)kY-vIemJ76pxkiG@b)@k#MpJ78AzEmTnjlhD7czWK#vIIG5%ZrJ zWaB@}5~Aw;L@shpVXnQG59z|mi{um>N2RM)rHULJAvHdl+eePh{>sO;h ze^+|Nejvm=~XFD#l`%0s>(1!G-*6ONB;^Uk8d2%#_==# z3$;UF z((MDidH0cihDK0KLK0P|*?>BB?LjRX=u;UL$4@yD`Ty<`;{E9So*lG)(^)Flb}-Gc zF{iGLYmi1UDT2qmmphd?p&WM7##3L(VCYzyJH9*VasT=IPY0#Og^}}*Ni&1ZAo=&R3i1_3g+8+FRfB{ z{&IgrJ2@Kn?pqN|G;c;WGY3(r0PVk7`vqmsSppn+?fa86w9oz$`4%cqC5yS! z2UlMzQNI^;uCGO@0bj{Gt~hDetWBlULg?x14-}qUii{>qrcp!sk$y#u%*A=mFZmO{ zq|leE_Ox>K26|YyC)xb{H_hzZkkrKs6gjU%f*+k*zk+Nzik)-0UbJB8VzL@zNTroo zQ1kP$0JKpIp#7yhfQA@yJb9RQ?L0)!{bHzWm(et5_7ob?yqaR*h$9R|2YJx7t5@jq zl~+_)vpnh5t4i@cAL+#xKPp$B7faeD$j8x@;wx3AvTEPx`Qs-PRJ0zMm@lF!V}?_$ za`HhsF({NLFh`DTmA=G*8XoOiM|SIXP*~%U41_GCA%?nK?-VT9m43LKguelxAvK9& zVq%Cb1~URm@UZs=6(%=dUn-_if%GbsrC+YE==1j=D)HA48gDX+2DIe1E1Kx0lJl+YWM{j9 z?k2URS@UgZ_JC%hU;y&pnsGHbfG%+m0o#oS$tkK5jkI7uY~dKHD=gB5B5*wobbU+* zHgUUBYeXYPHK+F%4$uycO!g~ApC(%`;`BqQR{4?~>6VjopaIV}XUKl*HHxp^oo37& zPR(j?-~a{kNDX(P?Hna>gTq5=HEtH!*i56pav;x7&$ST6?h=EA>*&DMPt_(#(hgni?NSk4_$>qjx;0QUBr8ywFQJv|~G6d>=vjJ*Th?7SXWQ+}_lL z_6AA{^Q1F}IAZF%IO;xf294<5EE~9&F3+t0s_G&=5-Nk(_6J0fP-kYsLy!3c+i4JYONS=urq*m-FePuwqs6l6H&(UBDg}9P$OcAP3oq-1W zMX%m{qL74=G;p*DjUGCX>Ijj1v&V%&WE1Wdj4X0@{CeujnZ#5K+dEB03 zLVnV%8`tUb<(HJArb)HxR;8pMXL|F+ht%~AsdZ&d3jXX)ab+uT-|~arym(6fREtJT zvm&!`##EDAX9T2GVa`nDrgkTp91|U01b)c?RbYy z?Y=-?f%{>!BoaxfZBXr>8ZSG$SQ!ir^nlxc7HK@)(ma>v%*C}sY#fAIQqx*Nb&wNHv zNrg$Lc2%OtpY-9YXWrXYUh8A_9R819$!p&rL`rwqzKJ%U{YmYInv%^tV`?nS6?3R^ zs;2)R70Ummm$L#3J_QmekbsN?k|Vxj@6t&aGGrKo3AUDo)00y*u&!y7PQz(@?4u^E@{e??GuIDWo43$vkZ zP*$r7bhPTCUq4HnzWo{(H_t-125KmxQ51t_EyF9H?4+A%?4FSj`%7e!9~>4Xmyby- zVP(Ek`FSbqLa4l%dlH$)wkk}0Fu{6X6IAj0nDHu6=1`c2O#IBg~0t`qCB+lm*{$aAZmym?URxOOEuwku@_hVz?Rho_&t{|Jq_y zTOE`vqm3%nT4M0d4o4X!?c>{kCB$2ZPI8-voQ(`7VvUjLX`*Hx8aYEle59B*Qi zjR{&d(npi_y|HNXQ9S?RiSX!fe7V09P;8a@Hm|`9w1PLMU#nUx+Rh%R;RX zk2|eKV#$T~qC6{JD%%N9YW zQB!gGojamsT*ietMPj@_F%f{^&|irET{m9A6w# z#BPdAQ=%-jNBYx~;r{3l%m+3=tycYDz4tD>^OFbd54murMEl_Uj=wQ}

Z^cnfz< zY{RrZ4N$&9C7xR}L!bT=vH8Fme029laKH~-;y^3Cn^!^Ib_22I_!IcCyT7vSIVUZM zcqrrJ5EB!PC}IAul&d-On_k|82_34TX^-L9c;6GHIz@&Qa5XNp0SR^!0ASMc-;hEHDG)o<6w?2-`L zsc5b)Gxx}zGXJjpZr_j`;f_OV&C#H8E%Y5@h1;LJdCi$~jZ-y!{+0Cq?07a^$kyNU&5(lhq2q?IBviEh6tXd<=jp36L|u8)owOg zS5QOu3D&sxEi@N?7is>pfm4^@rNDot@FmhY5B+=vcDDA|d-XG7WG0c)DP_)5Wxr z+J7G-I68nq4l8ug(}tE#V@zLq7~jPFV!U9CiH<-BM-uh<@f8m*?83?g);MyWqf#mX zUdWUt^E53rk$HeT;quJ`0pSd2C^ayr!S~fE%pTkf8ubUjaxWA6%kpDoz9AnDegi;@ z_IZoFw(~KlZ(sE8-8&!tdZ1_T@z}8cCVZk4N4}IQIWL57?68E%n5o!v{WF5S-oTCt z*()(2d->Xim}+;JIah)hKu(9`(xTmQVDV(sVF1f;$aI`}{WF~>D+1}3=!oJ*s)ws^ktl1TFxZm;PXqJkat~CEyACCIS zZA6uNmdp#(-5asV&H;{I0$3<7SIS*WOYp_1WwX$%lm_~7G0kr5J z4>%%fPmUC+CKC}gjU&1Lz|oDfU_8bYdoR90C zpPe27bq7a49ceNbCmy&ey(yrYE~NRKdQY&zBZ)|;;__$zR=uB4M$Xyaqso;ugmXQ# zo?r^IzxU%KFOc4z-7_js?EnBk07*naQ~+Z`4pLE48!fxf!6BA!aCA}zhg>RtJY;~f zR~@KVYK`fu&*3L8s)cS+B^h%6?eF{;o2*8lVNEUQHSdBYJI}$*KZC!M7uUCUTXNs8 z$pBh;7|*rCTLwduQsVIG>S0Xo*#<@dm2afIp8>NEPmmjAj!D|AyP@# z+%hF22j|#c((+fg2>~2+c<}^Ot5gQ%^;=`!#?x@|^}>Vw8@YbhhmKA?jGVa+PhI>G zFHD+vF&-yGEDsHaucs?s+&qd+D;8t#Q6{(#k{{_;v9`s(rXeXN96lay@bLCSl)x=0 z{^<{VrzFNP$5$wWLq7QC^c+VWHet!8y}0zo9T75-dUJby;D>8iGO;ty$$DY(I+gRnU2`364E>=1AW;_iIx8XVp6~|Jh~cKa0s$w-Q2Jaem!= z^lD%L{RW1ZwblV2d_r>xG?0d09I1Tsd{Y>iSmD4O9s_vn6?Cs!yJF6{e5{X>l1t;w zweUb6xVd@2FIeD6{)4S5ANz*1#6VozvIJeKX`_eHBo1)&U18KzRntq#sNgtf2^0Wm zIm@Qtm;YJ`B*W$SHkb~XjkU+$vGtcP&a9b+<_t;|uhoqs3`#em!k01tYEfCd3S6z1~(AEK?6+v=P*2Z5w6NGk`U&F4BrY|{!pS;*plnq5q zBQeY!=k~AX$=61leDDp?yf9bhBL)jJ>kh>H-H%iQHZotp6YYc;4sa35z+f;!LP9eC zf+f!bUGVRsX)tU$2y?ezVapId1WEiZq?Jq`&ScwSBJAYYP<;RN4v+6Y#KVXA@pm8h zA3lfk4?mIRrm}KRiEzfQrDm9Jy#i0Z2f_Q@Ma(r8tdL5g&4@X;^xjJv2#fc|$)z*U zw6q3lbQ+7TH$EdOfhU(q0Ss)-MQaAO`sV=TWQtenKqmU5i8%UWYPvPqN;8ZcH4}#! zSQc56B}rPWsiWVNC3y0KC!uUr5a#hRBif@C&b{$a8i)>ZyqFFIHBb(8mA2wAWveGw zC9Iw#3Y{7c?=K(m$;nx?v#qeLO57&*kS+MGtel5-Ecbq<=|ui{1<;a$962JY5lYEK zL`_YJg)>`|O&L83GwqJzvtPP3nWV*n7#EnN8ghU zz#wcE%n~*WO6X#=`F4Etj}|RBm6sudPvIUovBeU9)zL-?t@<#w-i5aw5*tRmnRIve z5)5ou6OCH-#+rjS;FB@F#DsX^@y+wtz3U(@-+jw~7cWc|4SGE8yg$7aCL_jU$^Kg+ zpjuonLOxxC<(Lkley80ib6ol0!`4Qz@O3oJbrfWvNT7i7x@L)} z&|dOWx8dI&V%5z4s9e4@TJ)cQqmRC@9%JF_^b%*bZ^8Pt3@Y9Fh`@{hBPp>#cya#% z)~#I$8_PKu)4wh1R58G)scZ4toh^2yDnPE9&Pu=m7rY#AVCO&Pu-m>L?|xaef(v2to(RBqS@lWZ^H zdyp(ay_}nP_La<_NfZYJ36+?`NX}3w0tqKDNLW@E z1IMq#6PEx6RI`3p{U{keU!G#W?Q9qgXotql2Eu&VDSQ>I<`vbU!YMDk|Mlt^W;vLFIyTXdN#%RH*N|6G{LY!rSz&6?!PpB0f6>js`!FpE0O?%S2r(W@6MCB&mc^U-&?Gn zJ{T3)vPrMcR2+QfDPtwYQQ$7x!l)&KP=%{t%>11k`SN$8DDtA(#|78-S))%I9aOA8 z9P5s~%iS6x*&pZso{AO>pbeP52+zF~Ta%^5GdQ@y49&|5;NU>~bLcf9m2qEFBtBJt zW3y~g2y12TON+&)>*uh2_Zd8Ll~^nZu1%NLW0YYf1_Eosbn!m8{YnSp1XtKt!L=kd zUdm!7Inoo?PVI%wg1@npCyJxHv__R84KZW!33x^2GC9aGuMFgUyaJ0+?NF=JIBYoY zgh;k#Q|6PoFm5;)^Y=Z;8-T`u=#Mv-u-n!O78d3(7u+M|{LQg8NB^#EQMpo6bQoxY zIrHamI&=AH=gh(SJtyGAi_;`kOcRwU^lzj|iTHu*XZEw}CtG|({(|FK1|(Xt)k_I| zm{>dDD=$bTvzr|Dg{@V_qAI(1mTqH=m6tQzw#3$W3V*_4U?$S%`D^v9Uka5$dY z4wGS{Fn8-|eB%X!XmOs+Vl5psijDTiJt5lT$kED0dz8pbW}T@s5S9$ODz;jbvEneb z<~Eew>PzDBfA&y9?gJ*z$HVX0^CB+a&>0IvkuqfgHgN{!M?{UqFE2;zVE4X}lPqxZ zsT-m*`Zg(xwVFai)Q7xqh=YG-1XNqLZuQO8om~8W;ai+nO(k^kJC=Z%?@_j&P(sd< zYq<=Q5U>%eWbSY;T^ zwCAX|Y#GLQOJ><}lOZq81lO?1eHufZ$2z?ZV>ucbyMl_W2*o8xfd5yvj5@l9t)gzS zRg?faNPs#jjXQDpdiydQ_ME}}ue>c%!q>{uP@C2R25za5_g(yd3+c-)B- zqSnR4$?Z4O5*a|YX8^gn0mjbT3`gHcsqsl%ifrGx=~LqOoPg={XXcOI61cx}dv^zeOVxxM6*Zap3y#=L+4=;3~vHD-h4F zbak=uV08!;_mQv2xjRELma$(}6uur%WS^nrx3Z?(D)VPADUb+)mTj+WP+f z73kNnF^ncJ!-KDEc`rG*<@NxTe&H1k5mPu4 z_Cd_Q`X`BGr0Lq0Nlj$;1qZ~ zI-)|m%HR5!mG?A+@p!*-dOtMiJOYakJcJ)dQCGHfS+#ZNV6xoE!4){<(L)awp6VLE0jahv-?bFup5q9PiD)##+a!PKw~Gblw>3)OY@rvoM*{NNz4-ziw_qzU`(5; z%#AXW-2~ayhj;2y#pVj){L1;xTu?L~0?PwW5yE_6it;b#5qv2MZ!WIFxc&`Mr*Q)` zXxI=9<^1W^uZJ=f8SpJy4izdHpl-bea?=U->gwvE_XJB^d&d@CiUw)<8TxZqlj9J^ zmU5xt%vl#7fJ^Jy>P1_ydg%?zT@Mk!JE@ZL`{i~IHl{tG$!>nyoyTF*%`fb>$J@k8 zFF0k3!mel!_GWTv=`okqUIq)izdpsr86z=zk~J>A^<>v9ac3cIvDRVKY+Pk4x$keq z{9Kzw{9K7ror~-{rOz|#Om2XKaGNau*@8Xz#uM^CwhoidYW6b;xeuIdiAMrQluSDj zmoGN}Ej5}sD>M1!YGb0+0XX}6Xd975R*&&kxaTaxm6sZUd)sZ$qq-*43hQF*+}#X1rGwp>Wy`HTvkQv| zd}6ojVnwys+S(2;JlIWIrv3<}5Y~H2ulaH;Hd~eQoFI(H9f2P)^LbpB2Rm%gi_4;3 zy$dY1-GV>6p-c8VQjcW5FpdVy;{7i%g+kt~I7EJcjR~+}0%XEazBhUGu%a(_ZAfTIN#Dx_x1ZTmsmNM&@p*)070ZnhJrxA9MW$xpN);8$J166NLbpFsI=8gD|I%tiM8C z;kkyWt^|F--sKG9)a{0G%ylVjW+iZX<-rW8T;_U`z;37^0saUK4uQv;D_AnACtCM6 z`8@!wS$|AsngREan4H^3X3k>p>CSe{9A$_`&FZ7RoH>jzH_>arZk5`VP)w~H%2lWZ zgE~3SdGz%3(AAiEfggO%)eSYCsXaouKZ}e>K;W0VSU$Cn2xMwEbPCSBl(F_njlzqg zb}-_4QmTj!22HTVGv?V8fY#spFvshQ z1pcvP-rNx8>dv?JVkUbIbsjbjd+#_QnEP&laz&g~aY^#!J5PN>4sVvwbDt5DaY@9% zs36y>rqAKE1(&5IPynDw%cJ0|KmyVd5L_IY|7>uhbQec6c5(C&+(0B!C55|Sllf>= zRV#`j#ad(T%4_f!ai9qVtU@4wIK*=R0D&l2_=pH31+wPYI1yoUGEbi5c74fVxVdQ# z+E=ZF;dAVmn=Fj2sM4P$@QcX5#~ zVM^q8Y_%{!HML@>*^PmW2X1UapQWWVE|0=8Wde_i0ko93U+{Hz#-}ge;m;GnbeC1; zQ3&wD7bi!&d;I{rtxeEcyCm8VGl9e9dwBQt2XmnbE<$oOmMK!vg2K2RG#dBajyl!sLRvhvMU6nAkQFk&%)7%~lq_^~cX)lH9liVUnd-MlyxXJWUAtkZJW& zt0!B%+;@?-)?^nLx7)j5+EtH< zIuF*C%IiX&B_LZzF$b2yJ}N`nD)$pg$ocmaDRDPjjyRpmvIHG(y$8-RmY_+$+^}nb zDfBee8NmMw)BipTPr=2LAET@cQ+Ui30#FFnq{Xz_VD|dU@DkkUB(n0KI82T9fWzNr z&?}|RZsB9FjordUpnS&5WyhW5m@s&`eSxFnJ3P3!3nl{%p|0H-CiC~<-cv_>_hv3M z0T@w~`P}rGm;UTG^%miF7YiqKhfd?+Smy8;eqoVJdLEC&q~sj?bY*qTGbU;NGm+)2 z^6f>DC>8xC==KEjJ~gkZ1s%hISb5?p{3Tqx(iq@MZ?8FMD|6A>8silJXesfL2>9uW zFJIi?6Bv=n!;~Bs0bdUnI61w;qpJrnYfLv(tlj}brf}e-J1^ntDR7}l027J|^Bcd3 zEy&so_)P$d=f(4j*(ef18Uy+as$RI;ZKK&D#^GIdV}nTDG2Jp6m06v!laG90tDK(i_p znLeZu#?07<56l6W#N%pGVglmhGunYL#`63|SQ8}hwK!fEB=DR^6cj?9eo`Xv{McFy zXZP8G<1BFLwHrbsWBGXrBCu8NREg`{J%JNW3D-U;{<9j)f3}YK&)8aCSX&75fgEbJ zyqv0ZEj5OzG&U^AbGuTuKFV)b$%qK{h07-=o~u8?p%t^xL%$;0_8Efp$1mZE(N)a5L;c_Dd*xviQB0NT_=cqRk{kbSVYj57RZ)0Q4#{xi1f3G{^z@1RBrJMH4M z|Es=vaOc^B}QqTVu$$iLl;y5D&hv z>qjn=w!rrnFn2^NXx3v6G>5!$poz1pB6C~w=l}i~if93_2li2D-9la)6?f7q%2Bu2voc zrme!$?*jiA*9ML(c`1Vft?xt&+;dT$Unb;3M)XH@j{a!KE-Fj^#*~p`U}~`gPqP6+ z(iUrVyBT5eDr=aGGs3i0hn0@@m|1K-2;y>E7`Ns+(~o)c9w^jfmKBFO^!rSP!(%rF zK*QkAq|^aHQ9N;$+cG3ym%qnc2QRD;f+`3>1lXNbPE=2^l}c_FXkcEbtca*p(P!jb zEV7(|QKP0~+vyh!6!Jn-BKN6LY_YaT91*nvjM(kQdY%c4M^EFZ%O5yEOfFoL63-GJ zDy<8-mVl863-aeaDnR7&ol}=Byi4VNLJ2vG%kaD30$FjEB`AZmyM-VXlH(M+xPQJ0 zi!oiGQL;G78w|$EeJ^;kAU%z;GE5A5&zyH7P@3IDD;OHX_Ixf;G&9DF->c!-$DJsT zC-{46KMr1@jxM8_1M#Cj_w(Ynup1UzIQ#hrB9iNqpwpj9=X^~8nSrr7>0717~rX~w$! z#a2ow^3aUTMXMqJjrpCN@8OW$EKC}2jU5Lb!AIa-tfEYb|V->XMVN(d^TqwEhz+d_b~riZGrzx9?3qt0lGfMN{)nGxqK;qk6&_yD(W-NoalZ{g+@h!|lmFQdnl zCs}$V_~s&7p}kkp)~DE9t4tpi&A}qf~O}3 zEDDYaB#=!43G8Me=07v&KLdwf zcu7qLQca!*)upr0hTYUUjESx#Y;@C=0W)dNLlfXR^%`TjNzP?OW9htsc zD_9rZ*~Za4tFtM8HB7MC#X%ghNUKvBJh^pzFLoa~59ge?>68M{m}l+UdQ9$G4VB7w zgvIhp@D?nLQe)x%_6bh!b-+rCacJLA8#S7=!}z&Nv1!XT9KLiPpM8Fb+;e2hlXcGE z%H!Rb(nlAinhu8b;pgnaDY@{>N|`T5Vk?HfhGQX5+EsBEjr#cpJ9y`=Tj!2w-@bi5 z{Ix-w_M@?C=S`NUv~{OaWo7G$4`QpA;s%&twFfQ&?^tGC;>e0j(!b7Z2y}R%Td_`8 zEOuZ*apt=a0ToQJdV4KLYAvUU4n{L@L2y;g1?&{(E9%DsEE1wWYH{?(>N=>-B--7E za8%F>?->-&4#G%UtZCIW;Kfh_3>ZHH#~$+HOi33$@wyMg6w zg~K4@dk#1tfNdo!qR*ric;ps>z|Z$^(0(sYUuJh!UgZBLKdDhauxrs2=$0yl0cO@5 zP$BOwoYE0d)lpTq4(hk)iOEa$?WR&CwynmVmzkaPQn9I2=ABiq1#Miq+m50#3*$}qsof@Q+x{=^K67U9RvPS;%~!7a%tL?nTY5_l$Gx2k&_lNrbn7)nzhN_AWw{)4 z%;#eC_&Hd&?<%`f3;b@G9kQrM%Ica!_Nmlf@>93-2GxaDnW`8taVZnSvlVMb6-!GD z$BP?haNy7>ymkwa&v&KNB00SdSq`a(n%b=~dG29+;kkm)6UTFKrZ*2S;lM6iOc~k( zy5(x2WqTu7FSo;%?MHC^z7qn3sEi_M3P44=MVw$2r`jLiD_hAV&Xof!ByNy8CFXQ)oBq1+y*JMJ}BA2B3*$++hUf*mHzWwjNSc7OAW$agrG7lF5Hovx5AqT2Q9`NZzGtddYhWj&qhk0f3gX zYzlt)ua^M3IKC6}pA|;;Nel4cyAWwILohr*1iL@QstJ9eQ$iipnB!snady3nWy`Jj zK%CfW4wH#y*tq^5jOyC})eIVG)-j-Th9B;Eq`>nqtQ4lWhGZX@!;q zmz(w&-M$L%Q0t@XfN9uv^fd0=IgKrr{n4UjLyWgOz(nanlsJhg0}l{y_ASCdJ$2OU zJsC$IyXVMhmJ<3AcC&}00=u*s3}kTdl@}90zrr#G^`_Wt!G|E_Bhclos-z}OFTLa(Yz~rdhdG7m_7}*`_ABtAMe1%1>)+48E9LXIp`{O z#Qd#y5G*W|ggcqk+t zQ+<@ysD@sXR^bl2^krED#^e3b6&TT=92)d6!QR_nn2$<1*(Y%H$7MqFN39Z|k_5HN zjhHLv5J!7tS716PCc)v68$x{Ez}8|4rY_isS6+hqPqrLqyiWCgLK!*BmL+fJl9r&w z(0SZk-oTNt$P)TuPX@B54P}?dQmEFr7gip)1HTCF^Tnl8vOXt=eTJ>o7*t>v&a&;t z!1h{3L|vJTrzR(IFdk=|Jg`GFUc6$SG$HD3gwHqJK6wz!*KEc9t1t0WaIw!xNFRFx7Gm2hQ+jw_7=tN2x_}D#E_M!0GKvFn``k96tL3{=azRQlcIwF^IFD zy&xLa?+(*N97$M+04ejj)I4C-W6~JO5Ll(iCD&e1VCew{jXyFGj$jant&6}c%w^9ZpQT> z+~+-Z+nA!Mwic?^?Fg$)XW_~`dBMD~wRZ7Lm@Qb2g%+l0U#|`t)HCGBm?oI9a1)N4 zxX$z5otQSbBU<)jKGut}!5Na_`TSqZAKnyIE7pWz=P_8d^#HEkIE_6kP0*urGZ-yc ziw6SmCRG_Y^5w;CFl$>AO*#$1y3@}&YHETUK{CK}{~*l!ao~ee^)Y~}xEZ%t!KSN?O$IY~)DEx4GgyUOmv2C*h9=`d8h+H^&<3fJm=4l65Tg<`oEe9AR z^g~Qc2%a2XiJ?s@=eb=eS08g}SAuKqU3Sgw(-7U+g6n7wym)`CU5V>$9s#sO=0x1W z5$LN`uFc^7KX@#F`;wi8jK)<>FLQgrX$}%70MK#}Ou^*>2?(wOnfzyk3GH}+b>)3} zcy_^ZDmu^&M>kocS%aF;)NO^aD-OfaCtTz*nG*5=tE?Aef$eEHJwJ!pMy;W)p^q*D zSK$1E9~_Z}K{h77zs1D&9V+Q!;)309mav+FnE$Mb2HJF=jAOjRp6*t}5r1xM#>{@`{}Bu ztE*JO7gnGM#tXkC2e?wUWt{crX}K0nnPZ$Sjnk(%DgHjtAlxSfZ|m+c0m&ZtrXDp6 zbNxrNsAFwC5{f8XxAvYA_#+Q2?vG!caU1W50P=@vaE*G4F}o{NWao84WE`n7T~1(9mPfN8xZ4l&Fd-^W5ORpu71x==KMZ1wl*t1Y`|s;n*+B%~)?Bi7G|3qLZQ zV}^#h&{#T{uWJwkyU6;8N4q|kcpH#m$Yql?#8kkxXURhuxg``N?3H)ePMo&9=ZyDlF$ft$@2c|#|vK1cJjR^CM zV0GMUPjG2bql)p^mr( zivDtkB&_#uRT2qe_Z;_J5U?&v@bWy&a9X$YO+Xfep&FF@G{NWWeK9{UG{yH=k`;4QU$FFYc2 zYVHWMTo~Dtar#ry#)6hQ!0MdO%B||YL1aSXFmi;$neXm?9DOUes~AP%3(JBMTjiNR z(&XV=AGDqIEGp2#2{3!;Y14euBLCDWT0_#si1XFxN{TXlNh z7Hy$3Wq@-=$0;scG^+X3e5%<0m+pKRy{AQ&I*YWuU)5O~0kOV;MOM=&6mHR56a-%= zsd6}zIA}(p_9O`{EUQp9rQV*f#Eou|H_ev?x#ZBrQ=NT*AARw?r&Fc+igA_mu;S%I6c_z-k=6UGPcYA4-jwtaZb;P|Ec1N=?8+8x^Soi4!}^;7M7XUbv?4CXAV_c|5Av!!Ej z*@ z%{QWLb~Tl4ENd=?7jxrukRIcHpwNH+0X&Z#+E2C-@=I;MlML!omiDWHo&x!Vx1rna zF}v$NxMM{%|10`^mU1?=1i&&PBn+W@c;2s>bYp93LK%bJnOT&E%R49|n^7mXPb02| zKlUfxA^m7c7T1Ag6Q!+~lp##8GdxCg5)6aAZ6BL?ZL;@?!5xG70(jd8#o)Qr1lO(m zu@g`ocPD$NRa{ku99_r`iT$*05gPouD~OY zZWI!MfeWRZ813QM@%7FSBdp$O^McU0^-VuQNKt0YBjc5)5y)7Jil9k*MUtfxxYMLn z|D5hwLz6Q;Z*|ymrsYLUv+fSZ#3a1bNde5e?Y6R@P9$oRixA z3iOq5^TQPmLd8TR4$;UTaM%e_0{PJS z(#)EJ_P>kNlI{_;(>~ps8h*FUjI!YZdlKNuu*ejKvSO%BHJ8sLc_!Ng#e* z+dK4#IMQHEWs}rlHHh$ymNy+dF})r{#xSCuL|x8ORbg$QthB&Qx!vfcx!vJqjQ#kI zxCIeqg@3iR=GmZ->J8KzPCsHwRnhhm4Igv8v;X3XZC|E}ZW;jXWBM4h-)#i_>i{#2 z$12Ds5G`A+4pAL}p})f}v3k@JsHMRY_B?c7PN9`#pz8FgbaHe1;tl=tqPkx$*9N2c zH-e=H%pYiDqMbWr4a%t0AS#(QiosnW!dSnr9FNT4&;UV+25jZOd~7Nrg*%y4>rt3@ zzsFc9c_e8n&Ij5mdQz6_v0>7056B9011iUGWMvW(hjInCVXJl1_@F>XMc9l6ASW%) z0jI=AcBkJk0})L9kf${4uM>W&rAoy$>cdvFF(L>tb#a&sP-6HsA~YK9WF)`m;=`W} zVKpo_cxr)46X|*c3n1hlCIbaB&c!BshPm#dzD}Xe3cUI(X|ayy#5QfhY+iG9$_vMa zQbN5E_1>dc;v9@`@}_@L(JU>}+P&CDw3q_k zG24$X92uIegWUyM-MaJC%*E57?V@!3ERL~KR`|tN9(GVLVov<4KKj-1&x3+Vg^F+D zND!n&y1@*yXV-uwW5f{YZ?{oh?m9wsM@-1rwCy5QeI7mU*FZ8$FOSZ1W%J-fhvx%s zy0ppVlHxvxwH{S?mfj=a77T{dp1*i&u>mNex+(MM+zt~^b>sW%ZAlZ&f?Jhd`%H~eDx@K+x7YFn1{C#Co@^yMk3+<`p# zaNUABZF{%duVRs&bRi zM|Augi-0QM#LZ%&9wtE?6f5=OJ4OWB$gSi^P~w}WYD3z>QfE%X z0*r}I1d{S1wdf48{&gX-ytz3wNgi53%*jb!1%O&eG;1qLWoTg{vI1Lybusz{4w6GBQ&`B+bVyF=_pJ%C5Z*LQms6GLr)ES5T zQ{~yiG0~%Vc8udUM+uFxoXRLbYaag%zXK%XZo3D*rgl|WgS|7@!jAQj(0dBG*BKHhP1esir1byxdfofW^jb zqF7bj$9~w8X!){9djBQzhg}%dT(PBr0u`mU-S7|mVb5Lg70)}WdZrY?+rx;QgqHng zt)YdwG%a#Bo`|=Nv@C~))8ia%8pc#JX{Mltm|V#VtG0}p<@eR2E*3h4O8LZ+5-_Cr zBI);mi9BA-6ZvFhkx%^>F$Ku=_0jvu4@5K*w&(-8svJu=Dn%R2W-u;I60V9uJ}Sqo zEB(#Lpl zj_3m`9o2Q(Wc(~x4B_!{d2OA=qQDEL%GUs?VnUVD@3I zSRcYPkF2ed9!(-FSGJLEYT?}N_@R{!wPHHMaH5eAd$vJIUQr7_Nz@15a`xI{_4o;w zjMB2pV762u`Z}OUPnl;%IV!u6j!)iLox*gf2;Jl=>63{#Ljt>w?2G3s($77~8{Snj z z1ffx*>6IdJpDuc9Q4p;wjd$q0gV=qGoqc}M5znu0I7ybO=20w~jEuamAhSqWpXk0| zQLYejcf9_UXN5}Q#lciTtzP&BA&iJ6-H_Q@NBd-9&3nYGNIXtF=RGfQ1|%rGZu$b< z}O=g?NAhG6gMd!VXF-g>(Q~desY+dK9}s|Lx?)NQ_+@f?P$@YiR|n46dNuIQu(Pgcmbq~N#n%^>AF`fVg?lyHQ94nhtdR6 zm)h2}jGU_$)q%o}nC_`+W+!*2nbIPPMjYr&qQk+wDl2>YL-d+Wk3c<1m26Je_+p&V z;6*i)%1Y(?Vx5(QEC!`Ih+eL$$Ew}B5AOHyQ7elJnU z0!#yr1u5!SO5`jl8yP&p8mt$2ksC1&HWkKv6vVT^628Wq-?vxnu zH$zO7lny(nQnc_k#Zwoo$SzMbO=1GzFrA7fFKJTLfQhfxEcq6>UT5>BO}~w+(+9P% zZX4Ncl0Ao9dr2-vofr5eb|@$$rznXqr6dU@al$d=f3&MBPWu0`vfmxUTXA}W z=4b&`$lcI4j*N%9H_j2upUcZk{fK0Sc5sn6e7q>oSJ9T!%g0h$&Eg~}K+jb`bNJb) z#Mta^J)!tFkm6vif?s#@!Y;j`(5@oiWavqa*}__q5@}c5@*_I0B44)Nf%0ap12UiF zGux4*qOti7F6IRp-*1s+Qq~4{sVR~0&NU`EfaTswIY=V$&t{2|G@Ykxx6{SH$e2WV zfzVG0DDUo+TUfpFd_T6d;>EsY=HVuy{HNw=r%Vh92^t9!mb#%VT}4xirQj04Z#k)n zGa`;VA`7+Z7*=iQ+-cY`)zo_e3%oOE2Isr&6I%6pN3v!m%)YJ_YSRq#pYnO)FXJhQ z9hwF4r3+JGdgsq<*o-fdLEo2!L$WhX4sVp}?uVF6%?zbss0$TlDPbeUBXxtx3S(D| z_gBX6)C%dAsecLYu3*cFCj%+Tnf*#)Yj=50+wk}hn1+3dFPi^KmvO-#*n%b!yY-cG z`iUOj^>!gzi>K|U3T~Jj+ivl1mT7v3ZxQ4yylEYsE?=1#yht{WR3)d-QxH?WOO-2aC@<22qoJhDh%-co?z`Z9gV6gf!`Y~Zwrt@d zeNO%TPy9W7$pp$SA@x#EfXg&V3k2%%ee%Uxvk;vSbFoL`#awBC*1!&Vz%NUNKl-gc z?xhHY&FFx+W>>8BW@le5(8Rn9kO)kIB0-Y9BX8%KOM4r(<%(5-2{hL~&G94=7pkdb zq&EuV3 zN>sD?JAHh4ufToJTRsOj)Vjo6xINy!s}OYif|r31@4YPM6lsXcB1t`ZYFj@`l)j<8|Rj>YP8SQ`g+DRc?P?H z*EBw|t&1^J$Po^#y;Uqmo`V)hUps~{2t6;?>PWKmeeQ0OxW`lRPX#Q?u(EtvnQeOr zcH>uow(QR|U?13UT|?af-v<);Y+L0xr>a8;ZNM5aIW6T7%miDy&GQ&9PD{4vyNuA) z_#ama@~Nh@qebgZ8{EaOs11Xk7S-`lX^haMx>8WPHJ-}I|ulGYHZsuHg zN3+&YJ(mXQA`EGp!cB>uM8@VU-*zGnk(HV8GE~FV?9g|WoE^gT!1jx1S^hjm_-wt`yp+pxEIA`*Ai;l*Z$L{GF%!)<(asSP>qqY1U+!~Krxdm)?7I(S7=w4Q zi&h-DCX!4&WpB1Iujg~DeS3lTYcb8*2beX3T~F8aW{yU$vw;dM)nNmjhA)X(MUH!r zCq}>Bx64U?-He{1GgZ)$L!;fe1TpvXDYqq6SabOZ?F$T|hkF!r?GDf|6TGMp3=L>YMILKt0-}s?pkAca_0|N2UZjqTO!247amC zQlrGnMB30X*q;S(lFm9=PGwlcZEBjUwLnTPYzpT;fY9sS5X@~k_yKrGjk2v= z&+|vr%0ZdYjc<^E1qq;h{68k8Bxe)(%yW#|`WZ<%V+8#;a9G4Hp$}=qD4HUiV=-SV zn`#r^Gv^ERBKQjXgD&2UIx1j2q)I*C(j?HRdo}m~Q)q`_b-xFgUmUj4U2#yQKb#O| zmbC7Rbw8o%4ig5Dn5?dY?}!dH&+;sm3>5c_(DMxr9!gEFc}ZL=>0l#4C{N zk48Dpy!>z7_0KdaFJ z;bb{If9qy4bC~YTWx;U5YD{^~;}K%Hu`{(>3A{wMx=OAL?dtv;4$e#=p)8E0&;};b zddja6-bUX!b@Fm9v+>y;hduNmty#EmN&HNQ)#1~00X?#3Aks?_(VKK;w&WA8K%$>g z;ST$NLGuEy~tO+$(hpVWirR*@B z>aL&os3LNa{fj>UFHliG1ogEHCk?0 z@hiMsFlqeskM%?Cqh+h$2-xonI#Xrl=8x7G@WzUt`=>W;P3gGdc6sy`FYAynk^SLN@`-yjfh|g{ZqW? zj$41;G)42M3O?eyT!V|BeuZTPK{#8aDVK@5Ke%N(;SbzktiGZ?4{{KC+3pql2ISZ$ zUj?`?A;7@~MHZyo;FV`)6}_fZv?Fpd*-jAelX7teY=3aa7XN2105`!V9v_?#+L*?b z4%~|qcD0UVyQdBm+|qVFvy#&5RCYUCuMzE5rU>`wS=^bO*P zhs{uaZI=_~Oamp$5&}+EGe(w1tBDFy$D4&mNCo;ZZ=;F2v)0Mt0)39iDz4bb&0877 zx_yfs z2jHAG7G!sp=l2>_POXbw?4jgcFD3v(SZa)!IjxNF>YgT+%B%<3*uS(MKi*>oNvwXd zpS*?acgh77cpRmIy$+?ndmJ>H-BM6nWWrfbnP*;Rc`nmjpsIwngUa1z;#-A zvc3XwmuL87#e2Xyl~Ue?U*Rm&g99eMxuCe#qSH*>$9<7{I_=%=j8Gfk*W|~kVGUoO z*2y-boQdQt`Xw>jLua8hs4iW_ zY>&zNaJB;O3CA%)%Z=);nSzG23s2WHORqD{d&=TW%FpEDa`8BvcbnO@VrIu4SWeqn zp827uYi^f`xjp=z$?x1oqF3nmjh=oib_Er?5}zMQD@~#M=lP>1%vN;gORans?9RS2 z7TRL^LE$;kd=&+^HX0Z8)e>_;cYITBmQxD;l!0r@5F5@9#hUq+2rn-4CO420XBv#t z$}a`7_SHCfBK*`-btP!~dxbLIdTrHq50gpF);zy%8~WouJ%U2s6&u25Cc?^N;Ascw zI>&>Ijz5xE9gtT5RelVqYUFQL+l|^@-ETbE7|f=S>U+064R&}%Yt?ie7aQLI-A)W{ z59V)ngWui*!}+5E`w?%hs2?mhf@|>xisnzL!uKo^&|F%h8XCP(QG#<R2celuFD583Gof2f~oaN}YwQzEu(RhR)-D*-oUZuZ!| zfT;LV>FqUH(P!zmp(!RvNZex?eX9GVC;Dy8d3E;oAM_B7*R2rJk6 zHI+4qA(hGs1t(e9HcnUXcom9I=2Y3U_Kc0F)&@7=$rUNR>mmsNr~?-Oex!Tpj}8rf z>fQUgs5+5il?R6RH)UsD1JFzR5{fww6XT(vrhTHWA0--iHM~IfxjaLlP+qF*lga^M zDjw8RH8l$@hv_UeQ(hTUQMDK6vl)!pBt>ChR*mVLxUt{^lMyeI`f9!OCgbxAZT*(# z=4VZ40WBVbSAe3LA%lduP~u}{Og?~^>-Nz^#FPFZn%sq;wN5E@d^VVAgJsxc3Y)iw zR;loU?KQ~>ovh|TZTKh+_@g6jkFNn2qFONwINVv}%tCb2+a-|s$000gbE(deO7C>F z!8|IpU9IQ>zC?puK+pag=2xX0qzhgj=j~mHP6GTTRrTDTKipLJCG=`0-XDvOB0 zIC?&+^pT71%Nz2-L~?|5q0p3VA80t{4W`6_^pPL&IQVZDUg6srcunl zHma_i>QHZ$Y(@EabqLS1cw~G*+5RJ7L236BhDn>P(c(l z@*8wMA#&qmd0(50r1e!Krt2+whbt+oul@rtn5u1U?YT=n z=-ePjK@%-zBcXP?odiXnfn1eU35pl(Q5R@tbVGT(oO75 znH2vIxKV13z%^R4d-PlB^7qKA7MvU+pZGK*el@G5I0-Ic zNt0`-qS-Wy~E5ePyHTElFRLEAo^L$67{kP+Qi|V73Spivg1Tj~uh7DUd6I41bz!>JWfM_i75j?Y!*va3nqoJ~v;nmxWc!ur`Y_wc3-#|ZCzj+vTZ~NGP z-8-iut3Iw2az3c!mj|>_0J|@ZhXP3~SoK3MC0J9cgrj{}y^7yJ`@3xpT+EMFeQfE+ zS+0L43cL@#?KVoF4WO!Q*7LQtyWD(w-9kaNoWp!~ojY_O;+rI<n~(X;$@ zw;9h$OKItLHlI%v{9m^>?n|u=KYw>Kqzw?$;ZW5eB&+Z18kL^YI3WM1_C4CD_M$i1 zQ0E@fWV*<1ae8N59+@FOxQ98@lPo$nhnjC0B!E4VgJC^`RU~T@Pf4YLceyT-qs&sm z>{?pFAf=$ifC`1w{qKL0dnGhfp^2Ao_`bhL3{j@cwMXg>1?<6&sl|g#X2UI1 z;hssn}FtAG&qx-4-T1m#11URSn<=eJG|~E@ z_g*HEHM&1M*VU>|&Tb$(+Jt~bUF6D?z~<~rAYXe}-L+-)8}tIkV@C(8OpjfEK~F}> zfCOcEQ5CYo2nw~oM)_u{uC>kU(70?{O^o zdn_GC?DMd&U*va%X)HBcg0W`01=?D?=-Pi;76H=yVvOa3Pf|a4Sax%%L{W|L(|_g* zVE#aSdjfW>j=j_;yXfX42Svh}9v$IkGq{fVtl(RG}~Sp=n_+SX>^s{^EW;` ze!I_iJRrzJdSydH9GBSva1v9NUZeDuEcyK%8;R|@(t%qxDxUeYsbDnb;kXQQuoAV; z(hxs}tCfwflEg7|bQ`Lxw?h6tKcc8mq!IJ>iWN%22zwAzA}@PJykrPw8W>+5FGA2+ zMs4NL#yJ%q-MgmwtZ-F>ddcu}~Xmj*5i#EZ>2GC4q*0UNrWlh$#Kl$D!y zn0reYLQfnQwI?LeVx#aN91G%z$c#;3egb#B2KgokJG+z$9*>V)T$fxFZRPQ^K+Uwoad ze%(E?Z|r~37V&UUZWO#-J_%T8E1IKbbPf*`Q_ph0$0{10mS;jy5K*MJdUv+J24$sQhg;OauKU#c##!$aszQ2Ftu=ARN-oG_ zv(p7nz3vDF9&ox9mc2ebGyG#>g|XiBd6U<>+3s*xq(cn&Q$|v|1vU_8$I)Rplhus<}xu*E-0u!LgaR5-vx)WdXr$h*UnTyCjSnW zX)ePl?$eIlj+uvORj+6y;fGb@k(uoK_Jm;qckG1GdJ4e*>!6Gq1Wk>o@s41WSnUvX z`EPtshbT~H(BZP!EM=MvoIrBH`;_0ioAS?Y2~w^G63!!%ilu5abYh5VC4Y;hx zaDHc13@I3nEL#V%IEoY%H|G1ZC4vgY-(4`j*hf5fKH8x`UIUL7%9&pGXz&`PRSzi@ zQQI!o{bYlI!A1f6*>9})Z~18x4wKtZ@!P!YlNas)3$7qUn1{i|N-AD6xeOZ4-9~RR zi;RxrSS`B278FFVl#4Rre8iz_&^gF!*Wg+*_->9NbZPa5!>Pr9dey%Dse}C{Q?c#t z<@Tq?H{7H|6JK;`tgdd_DDuggc%0?NsK&1>(cIywo(n~*b#N(weF7Zya^YI#egTFb z)~$|sK?s{Z1_3R`tHlgw7E9liIUO*Pr#T~JliMMd^`<7Fr8QW^{7gw)s9aVw>V-13 z`zOv{x6L#$aa$ev!5ZYG8yQKNuyTy>Fk=HgZiQVrPM6CbQ@HhEH_eop+N;K5jV6*N zc@5NzTn}9?F-3QW>v!2b9ysFyO0wm1Ny*J7>9}oukC)wy?s;K|{a^Wd>+Q&_d;R1h zX^vCo?SFs9kzUp!D*!m5`arpLCAVjrt_34Z@3C^C6){nRYYFBuvOA9J4+)JNeh@%2 zYm$p&{kTFx42Hk?H(%wuOpaYB%(*;T&LWRkEER{asGRr#=p#q{j_;V#x$=MOv9$V zul&-6qfBCBMwvxre8ys8ggV9< zL5qxM3j(OA%c|;^B?aNoKAE3p+@o8s^NxgE!w@x+^VBwxaNgowNTAowc( zj)?eGO&1ap=)XVs$cB28Pw`UNNa_dt;|qV^{{OR?7X)GNtnd(lGC?3Tq%6`8S+M$S zsIhSx9ZOSKKLjNv3c&v*>wgIT)C3g_e7>7-IXKJd0E)-8={w5920AEA#7c>-EB_HZ zu^%idy^^DwPe{*%jJ7gJWNi}ikE%Np;zu=J7DHEvor5HQtMXb0H{WQ%m-X{S#KQ6! zI$8{-Sb^S@%J8w_DIrP>kvHWSvig7Y{~vw&`5j0;D<3BH*WCtGk)ZYc97P(pBdnNA zW_SH+6jCVwgoJCm72inb#9Muc2Drx118xQ&k@cPhqFNjl+Q*r<=JC(%l4&c58K3=f1G>nJzu`f0~E|> zFKc=hpg3EhIEys2G}Olj2o@Gj_qQ>qP%kQ>>B-m8A>ymJ;&o=q=T7K0BP<;3?hRQz zG$`$4(&+B(FyC>R{!RE^_&>=4^y81;*I3g)z|a{1(z6u$2pI`seSki$9uanoQ_e=i z;|&K23aYB=L^s@@|12(m0}cx4SsA6)q;S!UP4j`Le~Dh55`cn15HS-%OXOpLiw@8? zj=-7O5%mfC3z}pgJdlLuX4O+LL$^K2f}bWe4cHQc{-dmW*1$RZquan^^7CY9dObp0 z&wIQw89YI!PQQ?Fu)r`M~mfP7B5`EEL~nDGdTn_NmRX3 zCVlc;zw5UPmtSr6AW_;PSwBzdu6x6eG2}Yg69tkL2c-x}m;Wak{nun#_WMGmnjjqB z7xf~2U>mzj$Fh`G90<$ip3L$I$_29!fv>Pek6TW1J+n~yOCxqCXQ~B@6`#>Z?d}4l zucUf!xT1GvK7=+J)R>Cu(9EHI<(+_?#m(>A-kB_%2v|Ngy)8L4qCSR(pvvat-0KxO zn=5=(*O{|fExp(8PP5as6+Gc_fBP7vl`_Ns-vRT#x|oUnwPZciZ$9|Tm5I9UVG}-I zumR`XFjgBfCaP`V9Tl0(822_N}Sr z;7~}z#SXCo5O*9|?M|M2$R(@)%{VV0B7&(eSjYcDL6!^JnQ7^Na#+A;zn;UVgl{of zG^aOoXM+<;W$)=>7s&b++7%Gbx7)ZZX|qCw-kkQLGw|8bTl5gGn=hVN06dP+>ZJ>| z@>bu3ViJjrR?<@pkX9;@1CNXp`9o*riArt^VKz_2-QotRfT!E!Uhx^7}WuhP4`tu_&*_4Dzr_zH{a+$ z*li7~S(DfekOv-%Lw>c8PpR^J-*bdsRiW)^;ZfN-`0n#FvZq+dWX|!DH{tnQzB&bk z^ZqE}0(Ujro)*9CWq)z3c(+w=FZG{s?aP)y`~q=;zO#+5!~$+fpSyHPt)>XT`tdf< zb%JOoSvI+Wnn;NW#t0|m>S`dWr_@@7JNdH>v>KZ<{Gworl= zVm>uTTkn>RPyb+N+&V0Lvq$Zsc01|q9Mrx@4h{7(b5c7o%1KfigRSVl8NKemqvscps9!Q606OvU%=KaQz?kNO2TyXjZEwth%48@t z=q(ya&n8|Wpg0)Vb^gGz=7AZepqwsOUxzEjTwp4WW(=QBFSJWk?p4mF!3|>LdI~UK zb;Bc3clsiN(zkW=p_R><33~*Qwl3iogC4Dqk2cxu@2K&d`?J=Ej*}Q1m~*0N`o}nu zX2p|d|*VT?=;A2_m51H z@Xd=|VqXbN82m<bfyJj z&UOT}Pl}JF$5wNyo4nNxi~X~TpNb&A_V`?6qX&y{#x)2sM~ki=gH!A43p-)a)GYH| zOALk=7<&^XATT?Po?w<=p-GTW?biL<>#0>Hi%@-WNB+Z#q&ZfzFUYSMd~>O)Z@$} zG`@tCs0gx^y=x*=HctsZUOaAbNxhgL^QDBWZJk6&-m?-R6D~NfxEKeY1t6 zXB*ESrK^ACatN@lQ{H-s?qzq}A*R*rh}6KrAW$t2TW@w=TxT34%gShkOy7EZ|#2Wu~~E30yk^W!M+`DJlZIrv`xh# z6ml4|ig)Grm&ztj{x`ekW$G80Zm?a;ws5obyk+d3cdjaGM@fLpJ)74$_p+&8LnimER^+} zD_c6&mE6<}$ghbf1G7lq9XzwnKmYpwsQbSy1YXfE>*y6DKV^J14$IC?&G`woQlCc0!W>p#AEZMD zN?8?SXa;`Yj3ZkRii*446>~AI{ZDHiQ79U{?_pTs)fGHHbs?CWAQR(+-&JKU#5+KK4jo`)Nznut>7VSCtuod_9t+lL+APU^IQw z0n8QXVgM!kOd${~%NI|}xKo}%84oS-F-tzRGl7kL{7V!t$58tu&iju#@aJFna_ZF= zm=cix`M>|}pSJX0FtM+*Ek6J6owb_3J8DfvbsPcczia3}tzy{{+JFDf{a@_756=hv zN9*UW3_dtgU#$LzwSj*aS~2B6f3^TkDlzoFewYMH!Zw$0A!VSgb6ylY^Qd! zL`$L?n^=eWEE1_WKZy}oUC&g}#o-@}QEO15%@_}hy)#{Zo%#zzixp3?;5|uWQsQhmwOj^ZKQggT&o%w@gt;d6nxt*^Iuakwu+bELU)#4;DiO;9Rfr`kA_$n}s) z<$UK%i=I>sbYSi5Bvn(YV`w74LxE}F_=i0sd)~bo^(TG2kjyr}S@4~DW95hS2tq6j z*TkTx3Gp)-`f}UcAL-(7x_*B?Z4h15vXE=>M$^bPkd9uaSj5Mo9k%&tg08SuKw?LLVKs8I&S5`ZYy_0He(%ZcLBcu- z`*s^+yp{*2u0ji5%ghX(!#~^jN{+PU2?oJR*X=W!QDrst{r$yW%wdp_p6wa5j_GB` z_43$d%)g^29)zpl5;+lw_5B?XwVLO*4BdyLR|7Bm zQ&qTtWn>yOB7fl+LS^bUe9dk?ukzlHRRQwVGP1yX;~-O#TO%SE=7_R(n|*>&*ZYpo z-|LYE>7Nm0`n~R3uAjVIop@@b z{&$no+{*~Kr+wLt9<^3FQrHqGGn!Q-IOocu16f`xDDtgCM|h3zv;#h=AJ!*K$bPSm zLdiWYb+j^@S|p!_JV5n~s8KSPJvAa;J6cNefQ$%{s0Ih!#MjxSj^~6b!ZCzH_|J&` zQTU!tISY3VN5qx|T0icC%Y(>9li1Hv(WFJ8vy~>`QUq@PX;B{fhcL?`#TEDi6Y{;- z!*~ZH`)G%zK^pVT1~@lat)Rpxw#lds$Xct&>D@v-1tYS(divoOXo(3@% zJSHIAfeudC#>n?xK*g(ddSj^D>vQiaAME%`CrS0jV1>0<7rGezbT%>M=B%Q1gB?B~##dP7wuP-t zo>YBkB&q$!f2fYclQRb}fBqs|izw{3naWG?z<}5A6Is6#Jr`u5KyEsMo$=l4)sdK`A1Cs%4At(-3c9K9UbI> zo6gQ~zH$Kv4wYgn$qb0@;>;DA2O4Xry97;2i;F*h?HU^FaO2dw5eDa7FjKi57y$e3g)d=w~k=J+_^aX&=!5a`W8cb*GFL< z$0-_!kP=?^j$y^D1vvc399_PefZ;uxphz~e;L}gm1YhUIE{HdLso@1Y4ys>}rOw5|U4tFdYVf2(~m^82f?5s@_zo^q* znw)(v1lZ-ogELsYbP;ynP@_ffkr+Op1sn_IhIV&o`lf#`jgUfwfDjM@ADnf9NIthMO!|&dH%$qqI`|jjHpE2KJL_g+~NBb6u1Q%xPtI{9GRxHN4 zQvql=@CyuTUtPzoc^NA+KD6WPW}STDzm@76qJf8RzmLFuo!ceAA()O0hsjd7id(rVD?hb>MLuW zA8+qmhFPo>-2cD=y~a+Cvy>^DUo2&^w)g3myS{x@zuXg~miaRQjCWu49s4o9&REJy z=>=;Jz>Ix?PM$UupEoKak-Xm_%@*G%Ism73ufl>w{~)?p3yc^)5}$D}m^|!9SKCM5 z^xE=}b3#A}2!W4|0Q*Cdj}Q<7*_ePj(wp|r9YytuSyAhuQ|XkeA1PJp6l<)G@}wO< zkDwYhrex;Ok^Z;-GKIt_-=tSjR75BR`1z2Jj}Li!d&m80zWegITYG2IfSyBX%EF`M z?)mD!-!#8#{$9QErPrb1q)4Uw6f5=#{Gf_>N!xxIOEvSAq&5S8q~rHrQM4juS!D{6 zSZ~_%%Xq3=y*o|)^ECN|$50lRm?_oH%1rp-n~wZ3ftoqmQmX+I=ECOMS z=+r}Bicx;*cOorlKlmvn`M4aQABQ!kij{d@|7{n!y$Yq2MWt{058gnjfDjM@LLkct zXq<`5M+kf(1k|Kf^B+Gg@;^yFZ+_GKp6s_IN#ErC+ppp;N)25ZPQs~@QGt#WAo3 zwPv1$Cn*+>PAtbSzsgP%11&yGAqn_*M%-Smm5 zTRgW;j?;awugA};ISOYT-rhC1re51GNToy!JF{0P2}bP9-ptfQr!qD4h@F5dq9YN` za)^vpa4QU1Mpnp`%NiCO2~-gsfw0gpM8>FKU}OwSYin3rS!PRz`QgtlU=D9(`QuO^ zdX4%9(?)kf$-Ie!J!EQ+QTpHjmp5ylCz^~Ni-~>fBcEjs^*h7kG&VAv$7vX%lz^$Z z1+00TT9}!Dl(C59{t*!s1%*1nI5p>Sn$YJPDe(Br8s4He4`;$^WAwLEFuY?G*qG}O zVUf#M#Ks~jDiTqVQHWKl**75fMXraXReU{+-pmP_{vI0>6PL!w#1y7xrn=7;+}~rP zqu%uQ*!cc#u9=f^@tkA`6_16`P#z00N*Ed&!`w2yznhw*ux(qP4y6jkg}>%t)|_SV zE7J=TCQsvtoF!q&eQJ!%?-UjrNFonIX4?h3ooVZ&_>n zpY6rWS+lYBfD0P*jrV`nuykVoXE|23x^l}Y>2&O?8EQ@z&Yx54`NqYwR=pc>ia751PL=&i~oh zZ~UJXNaX*FgEjEn8pCs@+Q5kCOEZ{i+Nz`QRLnAdofa7vFvpO2T3Y8~p5`#(ZQN1e zVaz#_*Kmd~HZ$k**03};)p-q~V>LdGWa@C-+`9@3=Pt(MJPj~->{Rq=R+%HZr*Mzd zH~q&!Nm?NUgn$r8k$@=BQbZ#^d^`kr;TGKe(OHG zLwL^7^!NM@sKafoQz|#SZ(qWtoA=-u9D!*5&8|!hv}n}^)ymkjqAR13BGq_#aXmZH zpMyQ#6)yDL_kHSue@~u;%VSSO z#hSpL?~Bcw)kR_R7`WZJgj@IB;1{ZZQlWrt;WDV*v;}HbE`_}O^6o>vyVHH_$G2RY z|FePY|I9kUTt$j7xZk^lYtHu(Zj={QYu7=Qvc+MU=1T#M;w>EB?)Px^!gbt#`ikd& z1<#qLC|S7<^Q?nBN^e}bbP4yKcp^;mBGl3jH5;}>>-tquj3bqQ@E4(4JIIGTsm80D zyRdZOTx@%o7o7)>!-W3LP$HjYqI`IhDZnZzoU%ibWMK1IlJRE(y zwufVWs}HuARR-L{{GWco+&?d%#aC=iHf>&q5sPP$v55x>#gx&h0Vt;a>LaM(2Khp}IOfnGI=rmi?* zOPny@r#N_EKlbgv0z>P(C{?})qCD>4(mgNOR$!%tojE*iK0;K%La;G?h>K^>!PmGv z29BGKFZ%a@gKgSMy2P!_;v){!aOTh1Y~iyMfmQmj0w>LF8Vl7R<6Qn7_Z zoK;s0tBEup?pPU6ro}1Z3`}6fmH}2)dRlPuIDL9)FV-*KgJ&^TY@P5N_xZuoxO7uA zv=8xN^e<{?Qib)NdgAb?bz(nn+^&)Ts(`#UC?gV(!Y*sPp+4 zOr82AoQi8Kr&1U++;UZH6ukLi_r$TIIDP&GJi-iM=THU}O4z|L>^UyqeuzM21q>QI z5Iws%p@3y9Ty}58>SgP2_C*k^8}z`JUkyhew&b!(ZLD#B5BGnLqlXV+-@eOGnp(r5 zToJ_jKESnm9^x0ySxT{@3fa6qpwCt<|ry-~Jct`x_=uKg*@ z{S0Qaa_o--uhDxnTSE5q5QTCX!p+kYCRX`Ss-O*CKe~duk9}e8)D?pU_C?29ycRLj z-8w6^dg%X~9%DZ)^Q2atUvpI3Nl0qd8R>l+Th^_{+8w75thPqC;iEBpU>`W-wMsDW zB(JSn5rJp7kK)k2y}0~L1?xg3;}noLE?$F2s1=&DsEb_igS&4u^5wIJyUSIazsoab zLk?sxb_9Ahazw5V#xi+(vdfu>mshd!j|Eux&vP{FHWX7Q^hTB9d0~>mZBpO#*`=LQ z4DW5k+gpQ4LZH)SL#}$02y1`QPE;`s7LS7 zw074qa)0Gd0k0m>hTn!!4SRDc+j20iIr)epGQ;_}T1C8Niz33qDJ(RU!onjdMu&VO z*q77fVQtCOq8g3--!^)tarm92eF+Z}`adhf{?9h(;r~pNkI(fD^!2ET^y}KI6q<&= zqlD#>(w`3eJf7-VTapnB$+W>h`oF^u*sn%vwdIYQ93bKJhPl+SV;|}=Y9Z}Cd7pfP zg6R3Re`rLvdSp|eFcm9Mj@q}MNZXEGr9BJBP>YJDWMpMboyYu0S3FX7e*4zvQlfgk zmxEY(aA*+?YFvy8*Xv2Y?YKs-v(gV*27LHL`nuBoEq~CI$=}lW3FG;10!_%!pC*sr z=;E4Lk_ zM;<=p@BNsz%o|PhO6R7s&HB-@LwD$Plp@a8T%^Yp`rqg-lq+|2_QkV-T>TK;Od*%%3S2rJ&~*H`16vy=lY`f6~Qk|I)AHdy}2D1r;n@gle?zO2dC&Ne6E} zBA-BidVFCEjp6%U?qcPr?+?rAwr6mr1nN;KVktT@g2KbYC^R&TBBEk=E=)@Rx{v+1 z_N$}mwhsQ!Ub`Np?*=s^8%t9%G&ZGPNpgjB6dZoY`r!#Zth=n@$4R&IJ_NMTbhwknf5eg?K$=dnt~6~ zboR&8YWpF6dpkFzCDo*L)2xfv|2~c?7IL7^`cI}~E-!hln{qDtrcYX)918&Gukh~+}AIvjCNf{YH4{w~Oz1z3ZhCf%(mOUrP)iZ=t+MIVM zC@6rVC4JqqE}0f>Nt2iUO99cT)kRIOu5G6;J5?hSV>9YB@_V}cEI3u=L4u;Kjq$U-zbD)MCfpV9#XBxo^)Z~Bf5KSAN~H?V~2oE8FPnF4f7ZLPwgonjQORWn|0@ zJOGAyT&LADzopKfeMW8Dw#|gUR@A!fAey`82nV~+Ct{V&BMHMee89(13L9mUkK!UCVk$N ztV=Yfu{?ggIC5l)l2a+@)q}INU{WutRH6XO^R9XT06+jqL_t)Qs?m(TU$&E@lJU6F z^hH(-A6qez`V{>d;;CdvT3sao~!A zJihamsZAqiZ6N1ozC4e_-(%HbPiXCo;Z!!4CDraaigsLmPLcZDf3;RbML`~Jbm_!? z+Wz-OTCr*i9l3a)0^hu7(0YmHq|ReMK8M6nZQF5FTfSFox!*N9WVpOV1 z1DdvMD?JHH{pE%_CV-Bw{)u{Z=|VGB>?C*Ju=u{iWArL3_`9)!-`uPKH6QRJ9lPyG z-Y$pehcDZ5@EAL)&}LYifsk*gxy=OW7I&}(d z+;y7#(t2QcM=;qpwJMtYJf6~nhtJ4|6*O^5m1L=spYV0wPv7)vK-T3t(YGtkXnmka zepS*lYPMPor;wlk^7r>o`On9d{+d3DYFFz_iiiMm zbGc4uPM)IEr%z|X-$^=o`XW7W_lmRJOilcWTdSZ)|E!`>y~fe6>o1aDkO%FV#}BNffsZ*sARJm4L`fc4|@_5s)!~I{-*(1AX z#qxEu@5FUhcxn1a;#MU(;{NWueK8H}HHf}nb2Lt|_jY{vyBws6{Tq?Bg#|U}JC63> z_TUGi2zqwq6m6b6hi3l1mX4f#M8S~>=PFI^+=eiJdj9w!J@fRT2wfkd6uum}bpkcW zZOsa_Z|LMB-^2>ESZ~_HkzMQZ?;W%$*#2u+=EyBN=ouZ~yqJb|=}5IJSEMc@XV8J0 z8V0EW)koGERJvKpXqxsxWy^C}M0f}V`1_IP(+6~J-)fpUV;ZgBah$yLQcC|Pq&HvE z*;kTgqvJ6LexcCU0j&JAN>D*&qdU<0HjqhDQZhY74HJ0`|zeuc*j(SO3fB1^3<+h^YO$N~7 z!*?k(IyGf{!qQUu9!t+|9HS-w8%iD9)TWA6y3u#@dHxDcV@;s**pDwau~p|#w(5M8 z+^SQPR1r=uuAZe`D_79Et^4V^+p9D!)HQi8nKFet){hP?_=Xx6 zp8`8K%2g&1gG)3_|a{9n#nmuzSt=Vye9(ujAp(#OHX{ct-9ZESy385Czt+ApA{=a zZF&x+UDw=FRiNpeUiVf>N+BQwgg}lXAU@EtWMiykntcc;;C}QFPCbo=V~dukoBIV8 z{rZ1cxb_h8v>b--zn_i)^@_8fuDAZF6mZ!)AHQ`EF$K)B)F?B#K zpS5NGXWz5`Gbf$>pJ~fe?{n-wE6$e|JCRSpmBza)DH3;&9KqR_YE)^_f_=Ys+mIbkBE^{boM&r(_$Xntph=TC1O!|{_B;N}+#6-S=b##C)eobEbq-j77- zx>Zr5j13Gq(rlbRvn1v4b`RKBu8-EO+n|!h-^n|tS4mR7??VNi{d*i|o`k`^QB%~m z^}(8Xv$0^!KA6_)iz(BlVqoKhPN|i+e~5i_aiq;%SJmh+Vlw;P(L{-4?`U`4u)mWJ zUw*^v2Xa)>gg&m`HjQH7er_u}%3px3ryrqE?OvETZ3_Cgs)#)Q`OJxd_XB6#BpR5aw}#50r2_vVxuZ&&7@#2I%^g z#&^6ABRDO7U@42(8a6Imj;*_| zqGJEialtq0Iplj+r-b!?^ZmbZmX@qJePaJ-jq&xA>6kpIUdrS01Ibnu@d{_poW+G3 z4-pv}h^H=>*ztN8S`C|s$y5KIy|Vy_D(m9-Uj&g-v0K0vJFvS0yW4eD>~43h&(*bc z?T%H~?rvQR0RsdDMNpCMo;u%ogM$MM2#Txg-q}Uo4Da3e-TUsmciuVwGXSOXq^%jH z15?6BpF3AlEikP=DBIvz50{%B$|o!dxpzbZzDce88mEI5Bbd! z^iGvwFR+nyE|+dN1k~)t-w!h|s7X22I@9Cqrm>uOMFoQonZOhB*<|8;Ox7QH_#0d~ ze;OB_M5A)O7O-t>jeMN6DOJA7j?c(jWlR9<@84maUKfM}2jcB>2RJwdqPp!+Oq}); zs#@how)9r448pZd>#=;vCaCJRgWdF*Xi?b;rm1PQW_16IbWH3k2nYg#z`u>azs}?* z8Y~E8h=7V>!h*@)KZqjP6Y2fK!!)8}HFbz%+hKF)Z%-*yORGR-D4kzEjvC}=ovkb- zX~^_l}dBLuTO+frq$Yi;%^FyW)FoR4nIf|uEJw*k-d)^@faAEXhAc#-k@N98lM&` zdH8EO^Vb%dK5Z8L`s;M+)1)>P%4$s`XYVKPh);9Y-CHT->j;hPT%C$F8bm8jx==XJ zq4c~GJHCgNUu_#}Ta^_{PLw$3~ zlfM*|EmuM3e$ufP78cZc&}2H}z#b48@dDA^z)bv<&(rV_3J3_I2-b^x?{tDaHtMK7 zHd+jvLRVaqc}OY4=*l0{sYNkU%BE*Q{l;%3XWvM5xN;)va?~109iWyqd(8=5BMPS+?Z)vsov*$`$g{Jy91-)^)Ks%Sura8a< zLNmwor-qe^k#*alv|Z8_O+`;nXK`9{QRiIMd64wsH(WpEF|5-SKtUlaArR|JCzi4| zZ2`%fwlz)OaGiW3lYSqBx?iE`Lt3zQS2ilpxIe8t&)+p)?13gRkdepLjT~xTifVVY zqd(beJNi=#gck3#&wVbQz1|-kT28&HbEtjxk~C=AW_lhFl_A*(sgojv{Ycex7rY8L+&;{rB?46d@ikXS?F-c=p6zq1D7TC3;3XOZv?Csad zCp0!)Yj=`j()mUaEy;6Ym89K|m1rfe45Y&L3znMOQvSKnihW{j*LL6G9w^ zafqX5vJgjEh#RdO*M|yc)2FNkwQ2l<)AU}gfyO#6EXW%h8$+>iEX2!tL()sFL&3=b zVq#(_CN_@43{x}os3_F!9F6Z&pYm7ePK$Tlr=W-^J}#ExIqXX7+Av8c{Os9)V_S`~ zm1YAPA0ufd>3${CKePLV?s>Cbl3JI8wUN@eF4A4zxBFBP?m=rN4xl3045&o2A++V1 zJH={QIaSPF$v@hCQgL`^XpkSh<1n_M0C(CubtqLatVE;IGN5q*N`;){N%YP5Gu9T7 z%VZ?u5IV<$|D&!|^H78S6V#e+5wWbnmD*rwAyePj_v8@H3LRMk?Plsm9IazgZpM?} z%X@V8^d8!EU_b3YaDWcz_}Q^_H4SUul!_OwN-bE6YxTxGv~T}@-TUs?L5I&@r&oUA z>KhUjhl@!f(-EB5B0M}?{af1p{2!@|NQ#M*w8}nn(s8KWqa%D%Rx{zIY~a*QR_-tARGzFjQl8}C37WOvC zSBJ(dJwe`K31w*N`6qrWBRy%;tYKuu`bWhZ^`*5OzNLNhtYY1&B=cR;v69Pqe#$7? z*O8V@=tYH#H=|L%9VgGgNVTrgC&_EwB)9*GHPGCXX`u1nPjU!nT{f_N@+MjXNeYqt zf^iFTzf3cJYEDHfb)&hPZjujcenqR-M|rAqDZOLOD|J%+vmsOI28XT2No#MiwM5gM z%=n$*vE@otideN?m=CXIfxMO-{AD!Rm{q0WX&TU^wT#!TBx{*eSSHMs&jO*M*vK&Q zXB|pEPLlEJ&R;aPs|{7{F#L-QXgcR=s_m23&$}Pj&(zk$FIYcQty=33asH2Kxpa=2 zW+#i5gK0e{u1J!gMyB!jN7BK22^wh9^@WsNBQ{oZoxo={udAAC z5$XCNE?$W`IQNt2pINc~ z+2)(%9~q~;D;ZQu}^FFbocC&J1XKh zEIN?h`}k3S|4V*&GK*RksZ9gE(16xj(m;Edwg#G{2dXi+rEC9Bc1G+V2nYg#z`vJ( zFrfW=$4<07g9N0Dy+iX^|11}KHd%F~Uv@sA@FY*dRpieJ2S#?S!yYO6l&4I0TDsR! zy^lf`MqX|XbnW7Kx_av#xxewFh$t4S_i`iqzpv9p{`}IlTlDC;7X|ASNtTh@`4!Zs zej&1M*@u?yJWbawU!n6CuhAXH=kz`}if?>VUUK<493s1ptbta#1I^lXN8Of`doc)& z{#k_t2o#EwEC3Rrbe@-YB!qB-g>$OoKrhIjW z(oA@fT8nrpvuD^xj}5*l+bD@rYkLv0MxOI|n%K7qnHU*TZuZVxu=Oq*DHHbbe{_N- z_N>k9+p+%KwN!b;`#aJ62|XwW-`M7<)sq$<{X`bc5A#&#oJ+ks$^MdZzDIc8qFKY+ zC3sWxVQ-2j+TIjO3TMNx&wD=#XZ@Bg$=lt9936Q*Vy(XcwzbH(Kw}y(X)9g1 z?MNQHHbsBXxJy>~$G1FZj?EuOHmtd%Hjud|^YD}L7<|w`Ydd%%U2$V$laBT2V}V~f zZfr!jvYviwZb6k>bf<+|j?(3em(}az-wv+yE|9f_bkv{TvDWP=6HdYY?|CipReP2u zx!>0ASh}~rd~8!xm1yX!mZkws5fe)9Ub@m#7Y}+L7?r5umnm~4MWZtQfyPs?n(b#8 z&^|R+Gv1t}M|{EhnY18X@7Qq5>rsL?tueJ4Je95`OI)Ff3!w9>CQ=jjKsC&kn>vhG zNVh$LSW8Pz;Q`+C=>845c=;OLb8=^mt#I{&q8Cpc>Bf~Sbm9DEx^>5qJbi-Lqd4Vv zygcj$t)DT3EDBVmZXd_dL1IbD2)ey#0kyNrPsaSgO*#KukO;$>5}GKC<3ec#to>aD7JZuivu%q=uE8uR-3C+tWwn@ZbjRoIjdcl*~(Y z+jOR-`%ci^Cog!tk{)E}t~*IXzT{1!L8~?m{2T+CJb*5)olC8X7NC}_fp+Xu8fY2a z|4aHNjuiw10YTt9Cm=M?{%tY0qP?F@Al}aj%jYjp>z|po8iqNuXP}Qwu|$m>l_DH> zcdo&r-`C;VJ3`~W6IiQc1R9me34hnqICkuByz~i0v@!;{OV>lqlG)+!;exl3CNML} z!a6TL2+2|w?YnnF`vzsyS`vwJ6p9dB_+uUxE!&K@xhkM?#RjNgkq^=F!3Yc`*feX0 zj_n)5GH+6Sn_!nyY7Ml_*9|dx<}A#xYk_=5`YH7_;@{)Q@AL87s$Fo+%lc=(%)#Vt z7Px=v1UzMV(4<{E)?E2A9k)+`XKyCNvtAQBX(1cyH5p~khUAh>+GP;$9~~Kii0C*N z7@HxdnJJHj3LaNBV*dYF59*!@eJ9PvtZ73~$~-~)L=_*1%STV(HTS7;yS6B+OPfyN zi(_+{^@@9_Glg~Irguj%v*a3RO87XQ#Ni{yVgJSg?R)fK{?;(f8ij|aPvhLByYPt) z#tR1rJbUGfJl54v$+{d$R&Ic{ZQ7z<+59k6>sM*_NVBbi=dCTQY4tk}`c{G6#A%q; zqZaZQCvSsh|1bQl(X2W74vj{Q#pK?s`DL6fRVj;DrSg6~1)-_v#jh9szForeIMz=xHoIHP$H9!{lNUc-1 zV(TgB*X+UCJu_g}q%3RPrPLT?w;)7?g(60&2NScLFp+fd_%lhLj6;W@Z)gZ(L!R$? ziD!JW+E^`!=X*?4qMlY)jEK z^#@-xZV1QKElaRu*+#s~Zh;CFczv`ifOvT*g2Gg&+OQRD+c!cf)`c_Bw2^B~?^q+= z52w~F#*&pg@YtjQ#<1Scl%8eaeDwmeH9_5$tx>+<$FkX->zj!b?kypwpCzmWPm7dr!Sr>x<@->pK7$~Zp2l@ z7}kXB-Jm3lKb+5>D(RDR^-~3W>hBuHpRs;^s%RG8!;@p{)H;i&oQ=^_t$)@RrSqA7 z7(*2zU)!^u*YEga-zCHqt&I_rroygQQ{>kV#>I1|aNF?(f#JtX^w$?${pIMRGx3>q|G>;|i}WOR)abYgm-6fChESp@4}DVSz!2 z$XgXH+uNdT)l$fjsQJh}O82?X`e%<$s>|PY`V|`V8Ou79qfjpX34ecj!lYC^v}suu zIrw@d;iZBiKAts9M4Z9^COLAz)WiV9Yu66e z7hSe)KVqwQQR_1fZB|a*2VOA|^85x)oqq+Bl6BF#C4Zh+|HnGgGapqkzPNGu9FCp8 z3!~!I(4li@)GA#V##v+V?ACQ$yl@j9{yup1%muD5-oT)!6)IG;M%gO$(V|srG^kh{ zIlf54?qhp(ZY64xokzabJuznHKvc0}?YY#yf|3-YihhH?Hmt+a)q9bzfi1?f&S=w$ zmM}?011+QbC#g)l5d;JQLEuLt@SPj@MDqlJOd`M`jt(0AGd*hl(=@u~A!VBUP&|jS z1-!gX3kS8OqS=k9NY(cIG?7E^qWP(AtRL-PGmZuf9!ATT{zARmR-huKtf^6BTN*ZE z0&UoLl&)PrMJpzDqKcMPXypG6u=khdX=OtB@(@=J4ed>Zj7`a^MpLqzzLpLhyF>?f zucG#i>QRNJBWT+tSBl~WX+O7&efNN74Qo%v24!jJj9v6n%Alpa!-wW8f}YTlG2JK+ zhk%r_9Z9?HdsEoE`!sjt0Q!0QYI^J+rSrHpJ%S+K54whlJz289%wRhKDL1R*UL+}b2X(IOD>SVC)}z6#7}&PqjTZ45)Gc z@pMjG-$t4PukX?CBf4ec{~Nbb)^H_*Qw{}ebz~f=Ww~Ox6kR~fgLn& z(Hh!z`W|^no{2inO-;Q~zHW4E=SmtodL%8{w4WY%hfrL!KV4Wmk(!oZEw{XNY4q=> zK4!^d4~U=_H|Uq){b|_rmGr=y=N+F=F##{>_Ju>VX)WvHwW&`9jEhtKmILY6B`ay| zxwwGI&vBwn);`D0|9GV^5h}=44s5A^kjQ1?@Y0fe!9jOI=zvp^8n1(z;Vl6q0OC zrFX0q!7j9VN28$`#csB+k_W;xqhRu)gB}Dw&vW< zk(LLVoLtW=rJ+rWQ2sm(Y2v&S^d>wZ6ggvag+1s#gvTd@z$JtZr)yjC&P>txj~}GV zLalv$d&ob&-r6lDrDMGD& zTBy!aDrHFyczlSajT=J4W-g}LM(UIz-*s$aYG^vp+jhzo65I-1ItE>5*N4W%8|p0URsOK5O5W$&l< zbYj~O&ZFna;gvt@x+chNNFH?QI)`+ed)l7)+U-|w zf1w9jyg#proGG?Q!P3-W@Go@JjkDvX@uiW`{Zq78d>{x20)oH~L_maa{y>8%x{?L~ zZT&MnP`#dG=#&#*c(I_aQX!*AKX=-*aw=7=P=ZV?Z1}=*7dgHSQEOQ!0w2@Dsk3R? z;)C?)&Jh~Zy*3$}l%yuM^XcfdR}>b@Mm83{zre!x4f0#i;AtDlHJEQ;67MKjXx8-{ zjq6>XOv==up}+r0uJ1mS9p*->#CrvlMO|DF{`sr=`x@x$nWVz+Wfz9G-~V; zI(F$93+QUKRhln{Sud|zerh&gDqVUOknAQS4Px>jI=gBzH7t^o>UJAVN9?`6u3-O{ z-T+D$_-qhTWY&*#BcO9&(qN|fleYeuz8=|h9!7`mvA8iCASJ!JDAs*Bux<|3u3DOM zSk$DRvp3V-my&+h$Fbul<+OX=NUC67fI9v>Hz6EKhh<9{%b~uDr;ttl++<#*1I^ie zgZwz`S!%obribf`jS8bTuJ>risyWoWZFd^*>khjAnvE^;2sV;#pdt0EQ$9ZD%Xgnf zJ8nxFGO5f-QqapA+v(@lWy#nuD^>0|g7)2Zr`J!f(!3$6K_j6E<8WKS@J} z*QD@(S9F|3p4)XBOfz>}q<6{m8sk}8X$5O56|-o@YsV>ilL7s<;AaRDUMaq+q=7HbjR{kuxdw|xBE5)rDd4?!f^|IahawKZcgUq>rnp%+v(A3f3=ZX z5&nWUNFiv28qmlEhv_v7bblOaX?;xJSj(hv-SIuh%p^NC88Cs)KYc?kS2xp$etl@_ z@}u;cjh~6-@>=$-$5wMXd`#w$mH|yk{?D$mVPZ0k9rqg@Jo}h$^pmdl>6t5vxG)Oz z^&-z#ujo~}e_qndmoLf3KZwQYQ=9veF5xNGC9FrE6T!m(!!epaxD5>&Ig1ku z_;6x@#IridD5YmUD*0jF-3x!xoT-y&-kRNX=Y?;Q{;<{nxnAB%BidL~a}I&rbIqB< zJhk|%YgAGsCysgJ^*RIR)GPHl^^!wu1A;gdG?hMl)Fk`75Y8}VA6*uNsv)S_fLAXcqa%50)oJgML-zPeys5ny-9^Y68$qnL$avUgr=?7M~*LE zlh+F;Ik?JT0%Xl7ot)%d(oOB zk0_WQ*-4jw%19QzpVXgkgsaoYxrfM0x=>AQ3+tbqX8p4|MajC$7}|UPP2vr|Jj9un zj_X1BvsWg&U;ZMm$fP$uTrfXTokp!n8dL2aW9Yb(k4|k3MX)O^oYG&d=hSS#csh3d z2Ce#aFxd_mL%UgoKRUIGu*5cg;f5kWt(jDdh0r+wj!>=(&ED_8Ne@0!NLqu8oDQ&; zRwXmy%=1*grycD%b)GgXnn?Alm!rJ7bCFTO+BA6fE_(FBlU_Zur(GNVM;$u#q26=1 z({<0F)D2qcGNAF8#Yd9wn^)xS`IbUi_*nZoUYak57qMO*8%G9C+d$6(5)3Oz`=I-s zJnT8Goj!<)@l*e$oVc9Yi78AmTRd?veb=X2)n?zz;Fy{Bq-8Ab>0rj~h5MsIJQ zqAA^4QUQw!)OzS_I&#B_Ub7~apPvtTK7T^jj&5TwwL#ROOE>B@c{QE7|0=;q#!vsx zE}c#d^X8&#S+WpHx2MTF{$}sLWan=p4H-Q-x|sUc%1fsDS-4$8Xxq_Kv|-6KYFfJz z<`ipxIo~8|841~R#CnY6O0aQM6f^eR}( z$ds(a)W_{*vT>`UW87r);^KN5(xfCC7KhVT)=`T3;F%(M?yj9SfQsd)N%ZO@|>2<`I++P&OwcO+0kDYuh80sBdE*3QMC0u-}q`99x`ccH-2(#`=v6r zT5vVzcA6e&(pnZpes4X=!}ARVN3fAGi3K}-b0xy-DjnN4lcrCZKocgU|7ZO8@wAq; zpIjwR&s3KC^cXC(dabpYzPYM(K*l#Uj5FN>P2-ix2~CWRC|hIa=O0F;LX$5;vmdIV6It^bzw-faW z736hmFO6thiE4HpPP_TM(avtF@#?fPXXDfK9pui2yM*Tma&kMjmIgK~MwM)Trfuwv zsePOTFn#B~f?ix&M#GxtryN%7RsZ`Tx^ip{4e#2CvtMter<@U3dp-oaoTV898&WP4 z1L7z{YCCK)-F(HFBXvC7klGL*$X@j``cpo0Q_51L4h>njolYIuNYi>WrHX~}lR0}1 z*KFIDHl4Z4^WK9VUfWB*Pwqnf`uCy@N2N^0+7B_M5=bC^0s-@#=0VP8}bWXb9qNG$t_U4KHa-@j0YN|z_@M|04YVIE zCRg-FCxLiBN7g@EsMbHr$$D1hi|2$f>k#EBTnZ+t2)H@ABgP;Ps(0#&PVJk+#;O3a zYqUg^@ra65APcch&$>nUZS^S>={XTIrVT^&B4(_6M2PUXin$9GVT*lEOqw|Zo%se;}$XB!!tc=5P=hkC{>E%Y{DwfDD4}oW39L&o#M!z2IQNMg47-dbaYW1Of zePeKCTi0#}9ot67$&S;pZQFLzv2EKO+fK)}?R1=uZQtGRSGT^m>Yj6d?W+A_uCeAE zYs|stfjpk$muhdWN4zzeOs9l1eJ>dm`uQ!k3&o)b#zO;MpU9&52oQj@>k)oGoua?q zS&21(qpjp)8Pb=N_qDjeO^47&2A)vo-g|Jm(YXO(E+TsK<=TM_}=8@QfoZ+B_S zeH|lg3u~p0F$V8~5(wbTym#%38)TseXe<{i^h1=IlP#8n0?M(tJKf-{K2`!E5$K>8 z%VHH0pEd{-G2LaHkEhGFAD_NGRK0_}((wcsP+j~~F$fwq71-=ktz|Gh@AQ;IE@TR9 zDirFs3Q^MsI3$(FX6vly84Ma)!nX`?h}pY&S@N7BRte~r*`_hLGilaCWM&KYQuAx* z>zntSO6_h+h(;M#l29H;SFMCs-a{D8HByM#Ts6ApIue;jo0JuXt{FFIotOA~&@ zm0FSjcARS#`OLOZ_1hcG8v=AakW6jF^ylu7OI5z5YMuh3mKVvq=xjQWLc5H}4PK6Z zQDzHI(|H@o)Z1-z*rGL9%t#;y%)d4+VQ~$Rmql!WF3`Z^* zc$RZ8Cu<-*t#H>nF58cd0ij3pS{Y3-fi}Kf8bkF3^RQx>jH}0}aLso5=j`LzOi7=p zJKj!5mF2o)VDQS`@O?)8^NjH{hgPE=r9>Au&+Fi3Byk*ksQ3@1Mi2O`55d({v?yJq zr1@4J7;wZdI=@vWo&nzdJv#3u)(;NO2 z!Zt*p;G+W~N+=qoHGb0Xe(-6Hr0*6YhbyvfnUO;D%D%vJ(XX|zzijt34Zho8bWSF$ z@p$_1(`iMO28M+8yy7nF)$8}3H*RQi)!+m3CIg)=MsUM3*s4^4PPc;jd?B7gJw%!* zdc?hV07?6@{_RJkoi~02XQ$Y81`|F!K%-e|I3qvmemEJ`cQ0m=Zs9WGOaf61`ap#M;1 zuYM$Jo*~}I5VwmTEV`j#cS`TH&fys&NTX?>He6|Tvl!0mSJA>}(#iPIN4O0dZh#*V z5!72Qco4`rB)&ZN|D8U_gMG+}DMZk>NiL^BS66jY_BTU>_f}Bmi;O?qWt-Kg(b=+u6 zYCho}G%}5q_Aiki1O^JPTdExSo64-zGN)mN#em9+sKEwDuZ`WNmLhXMs)k|%w^vDq z`L?*^^4Le`fwfQCfrtV+b2bMY)2QQE#nxy(ZF661ed#OQm92I#$V0U-<3K%^acc98 z77$B~eVK0^@7G}yza!v%U|&NKT72KZWw}g68nXKtP3)?a;mj2ug|-cR2_LhvcXW(d z_8Iu|=2T{Deel)O26}cjb-4Gtga-$OrhAZrB~=4tey>rgaTCIQXKQb3i!NF-2x zaAFMVIU=b37B!elBCeex39aDagPNI+zFCwc!K{(tY>P~LMAT^Xf?hhm<&QMjV{mu) zL95^>bNR%EtD8-*I^pO}hBt|8c(!kWwNMyFP0 zsa8<;Ob?!Y>lpnX+UKf;t&sO?%LU8!J;4-v#+vnFXJF|5&<0<&=kx7EdI8FMK5IPceya&q} zaJD7Sug0U)ZkVTG*Gk2uz?QM0>K4C}l`+S?*x%DLQHMjNUEh(>P{0b_FiKV1zI@Z3 zDvh3ILsMZ$L1nuuw;2q_y+a~Ai?H$fs9CiKOMZzONE&r1)#%IYo^G5fzsr6tSMP9k z?&{rWPhI1aus9e*cd(zG|K}hAO@043-d@k>9$`C zFe(#oJd&LeuJo_I!+(3uTx6bzrEWSckfoIx==Ppi=8CZniu$5=apfu`pj@ho%Gxwm zpkJM|EEYa^Bl_~E0On>zowvrYSHp1OFj-)?A4l-&wnDwz^lMDVi2oFrn2~Tv#t6Z| z*T+W#hAB0aqR2lcn;J-jxIl{+1w@5&sxEvjDw$p;D`sG1UC7ELpY?O`PlUSbNz=#_ z=4TSMgg3Hu9)w^w+G<-=Z=XkAI)5?fTktO(Iwg`r%&LjHsA)jD&I1}kC{R8Wf5NdG zStn7X84-T1PkX*iWjAVh=81I?S}>}^;#`o z6~}1;7DidvRqmwYYof}>99}?E`~}zVyru@TnLd@*5t1O0Pjmq%(@phqtz5=vJ+F*S zk-@}(;88Fh!ntyZ-(`k%iVGHZ)NS`n0?VW#n8w6T-s{Si3QJLz&c6zT9y=8mALQ#) z)un~8g26)-mEo4N@@s3}K~5{QHD!fD5K4ky34xtK34Rqx$qn*t0J@C6K0zu{K*EY! zd`qk%`8`>tU|=8>fIDG#9w3Esq7s++N=UdT^IH5VoeWPC|GTpDj)^5BHo3ti(Bn1Y}l0WNfeEJ$)>^m)PX#2_&=13pt4Q|FD6sV5P^R$F`zv$C)u zcC)D50)ud0^rtVj@RA6P{1&ut92jhO!?LDfPS;q-M69gckrmj_Y!w?o@`IIr!^CNcIgn0(sCC?iY(}gL)bLTujs!(e8k)f1(3tMh8 z{HDgkd~FOl1@0|Dx2~_DLCCoEbBr!9Hysa<(RdbOx2{_fBtf@e6v+vPo^ZjAz5)H; zC_{uGzvuPOwc_Gp3*-?KpF9%`JF+&V7eM(8m54%Af5b!c+O=NjpI8+F3`{wmqsueX`s>T*scH>%0vwxjED zQ34G%zji7$EQ2kGnJQgWJeVr{3}Q_~CB=iDgz)WE93y^Id@8V{W}hCdjhFKvaAx!{VTF>@5u(Q>pZ zwUSvDSS-F{XH`hVPW80cH2PSc=4kxF6U43Q$`#jDGA!WD+(VvwRIIqoM-byWdZ1t{K#q-Dfxel&*U!h3j#SOFbTt=K3rp1Y0 z@j~e(*2`&E2`6+IfIKccQ*->#SA|+@J;i6;J$S<+hF(Kst0p(orZImUJL!28UP4&Imk~31d@za667&6=)*Y5`dkrs{uig*N+e4g; zUv}8|FFzBH3TK4o?~bH=C`e!AN8D}n;inxd4&}%5o65H0Oq?DEJrM>EngrTnW?7Wf zRL0yqoXrSxVw=ho&uv+JZ=F4G6O^@>Mh&ok^NsfIM*olLAP33we=v%vI7-F<=I$HL zRDa!_PI;c-!c_tL`$n-#Wa&#bonaILKN63l8t6y))HjSlWHV>0nSpbeq6Ht#p)uCl z(!XD$^4(u;72=Zg{dSM8yWAH_Y{^b|d>D0HJY+BE9?jPsmgLJ-LUEdLrC#ZIifY}W zK`7iE+4~P}fzye0B=yw4)5xnhWj6C9zPBe3URQ0>?7bbol{2y&Q7q}U-&3dC&Z}9Y z!@fW~yTzX-irsAk2UzE}Qtpo;>W5~XIPKr*ICve%E*}Ll7x)utWo4)3s!F<=q~Z+X zF<5N&oMDW?r23rA@J<`vC3Q4%CLc_fAe?Z8Nm9=WC@%Y)a8#8e*l$|8 zvO<2tzX;;cmj98-waH&Ksgf`8u49AmpbgO@WSl4s&N+^}~?`&cT6bO&b4wIm;MfHC>< zd12*F|D?&7eSh1!RsdvQ9E-*h^5iyXnJLwXrYJZ1K;kt2LHgbC7^lsxtCY)W5n5cd z=4|3sZAqAwcV1KPV|h1yV)skj$nv`zwT|aSPWh&q<+(~w+brQcM7#*2Za)S-SA@XUW z+t`Vhn!R_vm#m;vHQr0PyPs&yHxNG2izoFeVfHo>*IXHv=47UU{+!b>y&8W*zzA}E z_NrCu=$B@L;&R8w#E=z&l08$=7s4(dFG{qQ)^%4k)KH9xl!(cBw8s3w4ssd=(OQ@A*=BXibil}iwMq<2Z}&+3eV4I zrB~PON+s}6^(4PmU3T49kv>X25vP+4kjWF3-I+D$#yi;(G0&k}agzQ$m~3^n$FksuHRB$%f$~MQqVoqdY@)~k?PgQEbIzU} zh_NAuua8jBw_PIC5Zbf*b3+7gu5{1up8%oe;KK|=R#a+ zXS|r}{`LaH_=SGBM7&S3AL0<*YSnI-<-%PkMGDF=0p3>-;wAvuDxt}-cgwhFuz#I3@l5v4iA(D z$v)dzwgPoGdsC9zcg!@uzKg>1!^0v3zd){37<$=#T+zMR-|7f8}`l{cG5 z|C&50P`6D)+tH^MA>F91W!)&Bv{O(X9I@Y6?xtbQkz*WX}!=DnZ-h=~5h-2p|IiY; z%{qtsVcd1($V*woWA{x@tIlWL1S`oVJV*3B>dzv8*UPsQ>FJF8;m4+51%02sZY8+Y zvBQclh!xAl3=DjQRuR_e=rJhnPH(VO`mJ7?Eq%AwR}!KDLf|Vj*!3DRFf|BH zhD^?s%Q&MM@bv9(IZ=zS=x%a^JD;-c3FsR*5pu)4b1v@2sp)DtvoI9Ao>B^Wk^ZM@ zg(&g2YBgejo|OPK5ZV=bc$(_w^$hyKZ3n)b(=G5klr__ChPY6z2~s4bnMa9u!1{l& z0El1}OD+vnp}Wmo?WZ?kO-*d{m6Yb1=8jNCw@)9wJQ8yEof3V$gGpAka$c;RSvpA| z85xhh=g!=|K!y8yOzgUoP^w)}thqPB^8FgRDlZDbu6GAss_r}rx7-;!Tp_T%z$p)p z(>bdV;dPjGPpH%WZW_oWPY+Jp4lmsO@cr44ueIk@%`%1>#`Xbm=kAH4FL^bdCzR^R zmxw9i(+nS#+K-y`yF#ZXBbM(BS41+)o_fQ%P84?eQfUm+paTUCAKI*dUHn}_CJz!h zNg5_?=!uutDvJ9e8=l0aFVu~>FE@J|WCa_(nEZ}tE$$=p+H&1X+nSjk^Z}oawO1Np zM~Ck2=;AoL-4#@In_FMGt=8SyESR&EvLN;`_oT_4hG=gZKU?O?FO+pz9+6hnKn5>6 zdS%Kf!Sx(h$TaY1z!|M9!A5YP?`xh<`{vPd9_!6bbVW68JSn8<7%qq4ru+5|7$mnZ zDij%oDy2#tx#bdNg=z9eh?peDo8IqXf~C&YL!>B(PBBC6%LmG2Hs`$a0n_@Sg3~5c z^-c%KY-5^80g})HwLlmK17gcdX9k6l5E<{T%P(Z}s-8-$xgM!t#D_Y-X_y;Oxf_Aa!bAB>&D^P8$^kfMrZX%jK* zpSMyT^219;Hn9)g{mZ&-tR*C7hES7L$x?;Hug|xIdje9_hd#DOtTSZqDAMgDC*5OL z57*FVn%+o0ttSPKBOjUI0Q?gXc-qz=g~7^$lT^8Y)L{ zFr$yeb#aq2bNkw0EEj46TGI^@F2CIifn18Q*Bg;MYS8KWRci&(6}*;| zJR1@M%QQ&fK4_qQeh&pvg-2pApaulKdq;HzEpDq{{fT$N{!Dm~uR8W@aAQ{DG>SD> zXK}0-ySM$AP@(2PljM&b)VWvM@VlK?X$|1nR30g>L6opoU6yL2G&slf@ySGg>ChW< zaKbNomPQ~fiCk&(`flraD7u~_S|DbIi^j7mR1-3r`P^Vg%={4W zOr08!L!|e}=D9Mm$}HK-b~~qzO^S*mSDHD@!wW~?DyI)=#_x@$3H7VrU3+a&K%jwz z+qBDe{8ObU)N^xqG;w%E>wwC$Ea;o0+*ec*=KZljHqU#_JjcVSQk@KVP|j!H@(PD1 z-p&Y}ijN+6L=u)`E!d#y? z;iOpmYJGV0v)*~Cb-5=`t^P!>r&hao->c?(PS`dCg zn6vjm_2sjlgDi65=}l*I_9T#6Cign;TPF87u}X|hXH;5By+PGE^x-+(weBz>Q4sCg z~=&vdcnAjE2BTiE!Yb_NK zf&yP46D>S@9Y}WVBaFa<7nyuAPt)8)Qo1O|XEopG)x|Lx-r6q$U(eG#)nS99%`T40 z75;)((?45_?Y{XU!2{pdl#`+H8F(F$&Xb5*ghn$U&vz z+QYPHswF+%@$%>Wq0r#y^n2HD8-)gAA7iN!5M3R(hHFDsfBP58vGnNMLzboV^Ea)> zd$CGA4glPQ1u2>NM_}fTL-q~hKmEepg1}4rgNB6m z<+&e;=)ujQ-4o1giZ|H3Nz{C~3q!^-}ozW6&XD_2t%ilr@dim6&LX+LNjwjU85 zDggD8Ye%??N)_NVTX#)%pphSyC7W&VP;z7+1Nx8k3G>&(q-#Bb)9Iai9{fai$9vC{ z#n_n?-{ICzlDH2X?=VD<(d@qEo$V^YWX}89Tv3bt#3-{eZ5H5wb4V&yBWxZMUzAn&7c8i<} zXK8{b*+Y5JXECp8jBwLSqkM;JdUyt5L!TwSf`=bxKSStvF7UP$vM1v?E`8kXydSWb5 z#s&b|VcmqM$p4!K0gB5wta2y$H^};1L;r^#qC_YtFimoQv_aqu`QH-MKmNR>{jEz4 zdS}0%-{*@9Qlt%$E)5YZ$z)ZQySpW69BlA# zmWiy0fKj*+o0ER`zvhdvarfMCo#5<%Kd7RX;1pe*NQ4KUo>B~~h35M}AC zgkWH046chOhP%5@z$e>;`Ik-V;S>1#aOko87+*#ZvbWzkeaGRjn<4}J#76z<#sJWU zeX-~e_}O|>s4t)_fL44lF z)q`E&xVKn~$MZMkYGa`+6pi{nqO0Z3GaVxMNmTKOA$?)Ruz63?|8%gxj*0TWhgPwR z9{l+VJ+kd!i0N|n4%^%rfP{@4vy$c(ln;QA%?Q9ISs zP35~mrwT;qCpQ?#%`Vktp{OT!RtuEGj(w!Zv%fgQ*6y}Jbj3t`8gJ;A#||weA_O0k zk?)Wexx?W=3G{FlYz;6S<=*^w4-)yG_Kp8X_53fhfCub-NkY2z7!5sg{ijz&yVC*L zY+7#^-_wlMPGTqdfWh#gCXJ(jE8-^(Db5Ti0evi>J5uH*THFumtxlj7V>N(`=g*W>Sp9&|u>5)#Fb)N)Crc$>zEzlO<`V zN<}0Hjqe|}TUgPhy8p6&K&W1Q*qwp|Q|@Fd(ZZTPiR%%K^pVQ>Bc`^^>p zK4q|M|B%sABLRc`Y9LuRZU0WFf0)`yUgJ6CL>3P`j}?Z5>d@hX6yAS&_x|at1k(Ay znIDspJ-o)E#_i4ex2m_8f0b>Qfd1-)JCnc3-4kO%Q~NyQ{N#WFa)d70;9tP#?xnYa zCWp~i2Gd`3aC_BVglo?h6z%Y=Ww5rvk;d4Fuz!JBYx8V*)_D{~BtpP)#EgUx#lIITFSP4zwN-qA?jbw=7No68q|EcY6t>Pl*lmFGD|J zv5E5dUZroE20$(8Lk#^hHJpFIwcV$|t5Rgm_`1BBw%JZDt-Ry*u?<4NYcEzhhIe4K z(o-x-wa@fY+WcT-hu-=BDAE5IwJzVk0=&o0BCZJE z>L$UuC{+x{HQ>ANC_YcWLyew@?myn7I%AcKCCMz3BJ1cI6(6LMBTgkL zcZ7bTx!o*l1D>Mc&8y4YbFUfa?5VLsy+nzaG!eXwl=epqBF-}XZ;eoS&xRoy`)~j3 zHKTK_`jMM~X+loCm|zs&mzPbgqz7xo&0t;>bn)ITc%yv&qj?4X*~2IE*ML~;em=iI zNw&f<&c;JQ!wBo^L*?)7^DkA7SjNE*k()A1XW_O^I`)#b6W@K$Fvv`1Vqrlu*qM!^ zNdn#lapMOJ6Um5QVgH0$sG=L@e?vk+iMj$#D_rU?(=}NB$0^XMxe-WD!GN z)Y9&@hn%ACK*l7T9%A?~5Ykc=GE!Hm3{phEC#Zr+x5UQLe*=W5*BIv3c(P;#%A>f3@2YTeEs1O`k z1&31a3hg{UVN7N7hDI(|@61LvhbgMp5?B0<{$ch2b#AX$#G6l${cPV*F1KEZMe%tv z=KL_8?pRW))Ps4}V1UDsh7~WG?u{K+&CwPM#5~vAkvSfFst4?bllTf*Vp+l8AdZYC` zELeLZ|D}J1ifi z`_?`Li;6WAw{FM4&t`gi=dGL--!rDox?vpj(m>w#*L>H=dWNxlKY=O1T51&tk=wLV zp5>2qX7Rg!yW~Q^)md~p!2N5a`6c|T)hv8+OqExNXzEZgOlWnZMJw6GYd{Oaqou=Z zu9^`oChfGSlXY_L;sS9VehOjcJSV>O%a#K%>-f$o%FWIBi?tCTMHC+rnk^k+_PG@1*1l zK)Po&4qed-W-QT0DMxO9ylmyVC=c-S<#8vGw6DI7atS`GHkNFp#!TS|EW$U~>$QF);9ERoL!uEYc>$!Yq5sW)W1}E4VMe1D z*gi#(51G%Zf-RL9^%@|KvMf;aCl53~Cu5632~5r8uCXZ= zn3PNqSOOPPQ;l3=F?zLuiBT$Kh(KQUj61E$XLKnt;J^qPJ@Q%Yj~vro??4!lR0prp^T?Ln#25#PTXVX2t@U}!2q_tes0Jr^5C z-1=swrslBtV(+;lShBpT@Ds+Ge?GCJer>*z$CVE!SrKbSayAazbA$(w&6~D@f|F+0 zZA@TPPTAJr&eYT-&afbnZTR))yYOj^yL{!4ddcrax#W_jA8qjFqvOV4Yu2o}(1R<@X-RjZt#@fNN)&|B-y~&_1@(W7Vf4#Z1{>C#uBtH2Ro0%5A()^+8 zsh=E11{l?OUAXq%u$r&r@=Df$$k0%kN`5OYnl05pWhqyHTWzR!rfr>-2u@SQZP&ISsm>F{ae>H%jB{xU%{_Kq`krx4da%HMPp$GklVVofZ;Q0iYciWe%36m)Bt{cXESKJF5Th}Pvv%f!qi zk6Yn@)|>}T6BFmjk2d;!h%r1sQUpc=uhX$rusixv0)8zj_kNh5ELxNL_%btY8_KM@ zGR1s^dUEyqW1Jc|1+x4l(fL3P+H3O8`^UnXoCxtW{aj=3BxR^QtMgBN-PjO%PKj*d zy22T1sj@hQxMm(&tfgca94sWRzUvQk^QTI%JI!*oyrBVK@AKHWESshQK&Fk@L}7Pl zUt&_(CWkw&luPbv`=x?#M+_i0yeJa>-y-m*66nA1RK3wxdNYtBNrZHSzk^jyUAJQz zFb#L%qj{9OP-<4g%}!XXnKLOLTt0@Wig;DvpvGX>Az5wcl-bS%2XBlR^&5zEEq z)i7Wl$*EEc!8AlWi9b0aE!BtJ-I#F6c0H1-YdTL+0(*XJ`{!)p_WlCyq7x$kcd=@(K26`zCDr*r1XGt36;5O8Mt)Rom(cjhT!hHccW#v#8e_c&udnqX_^f(eyQgF@Ul-inQB$;l@a10qdg{68Vq{CP~ zOxmIHPE4e#DljD-oui|pQh~99*)V_Fmnc)YMv#Y=1|zDJA6!Ah;cE=D5?r!$sX}zI z0^i2+AF&>UaW^r@;2QG=9n`gJ39I$G{9sdm?8KbfJV6fh)f!W&Yo!P72F4XK370t} z$6fUy>gpoK=RWXD{S>0D0!|@$ zRF(S@LI!5U{j1TDwbq_Xyppt%!$SiDx!Si~X`JpTYsxeTHlyF={#}_8k%7uYA@q5m zt6n57sJtS@?S2$3CI^ayoC@wEW8@s`+Sf@4@CtndemL1`s(pU`_0eu4begyvlk${9#1ygh*+bL5p&(A3n?oX+D zbZmt-Oh|ek$$`W*6ud`_m@cmy4ptNv0-}n8#T4k*R=%PZJ{OaCq&uuG-dJLi01VGy zbY;CQyv376p{%uoyq{QbuCA`!2vH%`*~qY7Ynev!ZPf`p%f^(d)zp?;3jsyQS2zCa zTHeZtiLcm%goy9E2lB(cAP14Kh%EP{k-qyA;D+J`XAK3 ze@Gf^cS{17|6EK*-$VIlFX}&HjbH-RbsRG)ANk)*E^uxCPihe2+;=rI)u?~Dd;TK; zksgp>hgOdvLiC^C_SxQ#F50 zldrSsV9WpSkvBlH)R~*F1?KzR5`MYjGQkOrxm6W`0h*ehlLiqA(4ZPn;x6=N?U0Sm zD;Ka(?b88bKwOXo@BvQbkpIpW_oRvf_f&5!D)RbIwx47O#p4BuqJ5!h{z|oM|TTO5~45Mc{Tz@RoGX=)T z&+wAk&pv#TfYix;5Sq!{Y5bRO;Q#myxQCB@6tW+3sBw@+r?HN9Vy5A|N+tQ(D#YfP zPja#9aI#2W_KBaYKD=7QGIGaT!p-HYDer&sthLCK$` zu<|9#{Iv6g{R7r!mgvH~?9r48q0O_yEkHGOP6hkG&a;4YYCkeZa_(g{FP*=X<;FQy zGok7KT%PLIHi5yI`7;>`32UyS!S84cBBKUn&LN9l_Vhub7Vv}rq!kIllVkVgh|nkP z60%PAhByB0o#=OiGQU8VnFS`AZ~PdSkbTJirt-Zc|4Nl~H`sLR|3x4@BkBtC;|ft$ z5c#y8@`p`tw&62T9yG|g-h>>C?vux!h%ih>59Uy^tX?6A@n zh4>7k&~b!`0!z0;GBgOS*xjmk8V)Y_mKc4U`V8K|m}Y;UGN12-z^ab)**Gdl5jQll zzuYxDJF+R6WP!GY1kx1bj%Z~f_oUETFBO!>Q6%P0I0L;7!BUDyZp}|Q->VnQPUMpR zPLjo`0i_?OVYGiU6XB4$#|K4)%Q6P(C6%FdPJwiw;btg>=Ys~jM7MO>t(D$E)-{2VZCj5zFJ;Q&BJsR8& zk%Tdohy56W6MC(&?CTSMRB4jt5X|QGV_Wx@S7urFu3Wp2TII-BsaUFIWHY=@v&k;Z zvF0p->IWv&$b;=%u(3$f|vv*hi~}+Gw*wt60JtjC8ZQBUr@z&9zzp?wKEc zv^-JByZP(x9qH`~;^CteU5vXxzDz?7?tB*GyA7#lN|I@vEbpdup`L_)*f5*D)3797 zJ2F&7i1$phtLsWC4b{g;LX7x-rH=fkGl2Gw_w*P!YhIc^xCQgE&hyr?lIsBpeGRT> zUqMHqy-`GTkY5FFBj$Wy>bKbTH7S7mO)2J z>!TanhQ||dXD6REXQ1Nz~q0gW};N^AioqT`Q6&-NIxbH zG$4<4Uw{f^oO~Wnu{3V5i*u!Z+J_yfvs&`NPL{1*_j%0taLn%hD$SK#{UAf?CBt|t zqV)lWWVh`waeQpI-Gh&uJ$kIPO{tkOyIYfQS{+miQ2$Kgs(ESX;dL* zv)#o8$HrB(aQU|eT%2{*>8zSl+MuGNyns#~04#RB9pIj`|Ds8nra+`Z)m&iP|5A&|a&Va_-46tZN2pMwSJ^~N09 ziIPU!Pi05z)ff?yzF{vTI>@Q0|K$%6(f>UeOwHYu>A)|e*L}C_svjL#B{>;+TaCw9 zOHIU|hg_caA&LRDKR)m#w0`09{;^>^5B&kmndqSX2`zg%tbRItBXfHa5sD|vj{d~0 z3G2J3QB&t<7Sb8^ptQ)RDViL0lOQlph?{Bqn!%}lAZK_Iwe)$}j0R0_Z@kVuv1Nyl zGAY_OXn}N+JPEvtefj!gxxR=95>0(HVMr}y`j@+~vC9xdv8qV*@N>P2$x1>G#iLL7 zj$*jsunx6gGw*QM5=S&3Wn#JEV$y2|z4nV2ij!UIcB_*r4fNa(IR_PD^>@kQt7Ec` z>tZmJg%#_47n|oa(J!5%xNq~P_X_N!zsww%6N$9j^~ZjR%8JPA#K{dpqs!hh z77rP|P_yS_Krgad5K_TapjC-ZLxZ~Ejc&S4-5n6}cEaYu?(epUNJ#HwVV(?Dr%2cL z8juk&gp&Pe!h&VX7R_6GXE52-XDJ_eX8OJ{&1<+kQ1sv4+RR@+7-u#Nkyi~g;_1uT zS}hdpQf7$Ey26zgr^BBhq8voy*~Ky4Obkat4mjOPZ{0<{5@YDLvjrbT?Y~uj`#1Q?Pkvxp{F4M! z5~kBTZOElUP83~henRGuO1pTQ^Gk*E_U|E`TS6Pn!>)#VaqaM6AJoM$qi#eR~*1N*1+v+YA!uajLh>SFVyEFCVr zyt<$y!ecMJiDVMmANPRb2OCjS70ubCEjjtz0o}c;LC)E*KO|nc(r~%mhq9!Qu(+1Y zS7MxwpJZFh1DNMx{Pc9h|62(DOz=|ZwmlR)o zV}F%s&sdL?y1nLQ&0Ie<=A`>rRmos02Gd9_moWNcOZFKSlzFa@EbaOTDBeUM8|Ci? zz-M>G5Cjb#dda@r@AuwOz%nakb`?k=I#RV2<@}WEE_&_xZ1`Ow`aBn+5m;(yfP2DW zO5DmDnZNL+I^&idbH)9=@^_%AZI}2?_%k5a8>q$xTgSmkVo1%41TPXq(D33$G0r5_ z9wc@|p*KQJ!@|z}a_E;hILtUqI<;Q>72jLP)C63}yOaBlK8O4}=HvihS6Ghk%hw7y z3OlSr>j}Jd0^K&DFv@E zLw!7iGjdN`!tDU5sT-OJze~Q6k$t7E3Yalz_jQD~I%Dz8-S-qmVUChZB33C9^bhc` zY)`#F{mK!glG0-pz?WFDa&Zs0a={U$tjA`B9GppaRmZC|w6m5I$?#*xGDzhpe01I{ z-%1B}3bTZ2`$5ks>hPFydj757-+b(rCAzaOF=T?HA&bJ{sRr$V z?svsK{PTyAF?i==)2;U1sZJ=RA)&$T+p~Jd0}395!{RD-ze0hsfiskY%i%|NO(HmU z4Jz3osq(bK+GAOxY=T>sxRtoT*W9Y%oh7fF7r-*KoMhj8h5Cp~$B7%MM@)J^`W|QR zP-gexrhCDdt>_?-t>C|c2f*LpLA@*g-U*cYYry-(_FZe_l@Cgkax1~u%A&|P#=X2W z1u`#zn%!#`Up|UT+$~s3unHObFY{nc73&2-fwj*f=$*$H{i*40!*OSlsGRSx%%Y9_ z3h$NQv|h-HwtoEKd%aNkVE#R^$jO`&=l;qA&Z1(8_jc*Ny<|f;au?B;*F{pUQXA0A zRWlAMEXf`4)@iqML;J)F8yvApR&tWBJsDztufl^x#xtBulgXXQF_w@(%tr0+F}apo zW#lZYI>k}9DOEKJN96lU1zN85Fhd$>X6`UPmXyN~m^^SpYc-#7LlQec?w`tti6Wg}g*tRjT z)7Z9c+qSKS6X&0E?>+DN-TQuxFt=OGT|HZjh!ide zG>`cpH}Aub7r9U5x%SpGo5=3t>tT&=<4GJQlyqt^PTp#E0_D;C>~y0VIc~RS|3<RsHv@1#)?eT*xuXK@0yeRE@{u`~*LvkL zd^N$Y@+9d&Ny5!?*(_SdQ-jS9&Q&~P)>?rmO&!$DZ^1O=MA;M7ILX`=xTi_16B$Ec zT-a+ZkG@6Y-#W-;MF=c-=#oB)WD)F#8t*M?j6RGozcu4-VUr^eO`|QQ-*J;~h>*&5 z8%7(l+aJ?g^DOzRQ)(v6%l0dTUoh)dv4$u=p26OdXt~C1`|OHj#^J z`HVCXI@V<8tO6{*W_pIrAJ${>_<*s+!+ahM|MdkKzxn-Zun6S!_!$rQUj0nFvY}e4 z>CR3#pDA8So@GsMh?_0E&U?;u)y8LZ@xAcS>cPrcjjx7U{1^sxnEip|!)7d3z2oCE zASID;6szoTqA%A8OU|a-wYfP$mJlQdqGyZAP#DUwmI;JNUXOm@jA@>n#9^+^F@gBXWp3QP9-a)go(v1*T zhX>qHH2&Z{xqUKv6F+Q#QnR*$(V&RwYA-4oOy8ay&iF*+_3{{oYKOo6T5|YLDQv&Y z$g!Sec(-=peYDk%eYY09cdl{{+yI@uLW9Hp$?Bc3!WPz-rt^nL0wOU!4;#)YYr@Ur z=eEsKHLk=un2K#DVwyyMj@I*mvcB`p)Ey~l${h!DjKe9-z`7Dddhb$rGRJyReF*8y z)K4%Z7zDcnt7O;QZ0EPC$<*pjMBBQP`>@CyLzNVg^dD#o20RPrU){_Y?I!CS37Eju zQH$7B2?+I9ZqrjTvB1}90xxSK1X{~Wsx88CPkZE!$$jcfXKQwgdgqy=RD5uHjEF~c2M_-d77c@m%P zyc1k`B`TRF=+o@f7uMsY-TOKq*1vD}n}K0WKGhe)xn64KcHIM^Lzm1YOR$N3r*jnYj7@slg(l{-TF5V* z3fR>K>rh+;(@GJBQ)LLtzNxB@AYnY2?+b-2!OryqygpXlrxT+5WX2Q&ItoaZeH*LH zq680_At3JX14$JFpeA!C;bCA(-g8b^%C{*Zp8mceIur~vL@RjBdB@^#36-Gii~th@ zg>9OL->JX@A0AQAOgzA{g`5hVVV=6bdShz?#0m7I9Y7_5xZ4fOs4E1CJ`uOvJu3?B z-W6K?4P_Cj>+F!*psIi~X{6X%#<|^Q#;k#!PVHFw9FE*!vq8|#73CO@^q&8C80@Lq zvS;apUaC&}MZW1;87O%6ZhEUMsD?9UXFOy;t=WO+$Ec0HwwmJ=&3~*s1q1!A2E1>h zC2moYl*$00&AqVOKN=KP>t*Uu;xqrC#bhkN1b)Z#5B1y6czVutThj|TLN|_c4+n>| z$K;yB`&0L~MT9`c74sfS#uwgWaelXDk=nB`D`jkpNQlpnv#T-w;`Ons(V^{I;nfLZ zv4G>YA(!jlFLb?ms2EifIw23mM{=mf09QN+@#Yo>!nHShF3LAVGKYmN9>g)bWy``B zedBtAjAF^N!!r*3&eJPsvy#6YBNw26t5B9xl>0h+QK@Z|dpl}ROoN@ZtJvT0C$u+6 z5xZZH%+#NiucJ*T=HHbxRDq`;``kR2fVpqBzle{IUtyKD1d=q6oC_V0iS5geizVRl zxp^b=HG-VPry9|`y6(va4in^_C-f>WK5v&2utjKPCh|vBoU_PjQPbhYB($a7849Gc_cW99o&L0DXXr(Xf-R{9 zHrPR~goQW(Y4X~R+=%B*#f^ek(p;h5XE`RuL+k^@qviD zk#+%XT!0Ur8y8}MlCXx~SLJqIt4!{6s#I)+jY>n)s|pizqW9#|0ZMO%AnLt&#bHj<<*msp?QlaRt!6I!%(w|FRoki9Q(qg(*6kZD-q8wgqG5%KsMOUH?q-SvhCH7}IcQ1Cb zVbXufDnv5V#q4HK(J^2BJ=f`G2M=yd7i(!82Y(%Bq;D~|MS!RzLXnG1ypfO1^k7J9 zFAn>8-?)_RzrVfhxz@5VP?3T~16>}g%&ybY{jE@2RKHq zoO*SYN72Jz+(nQHd2(Wk``4A*4j0f}Z*5Yi2Qni{9rMlbn!g7ZQ@mUn5l8)J3g{;* z!1p-}D8r}39gFg19843O#_Ua^GJNd6roga=;{9Mm)|7e7_RbQ2w0>Z&FIGBlFoCj* zsv^*$(A&J>{b9lNWy_llFCgttKql2!5AXDVtv}X+DLIqLfHBB>dziBE^pElKdl=6T z8V{2Bro(kATy6c@v-hsGBC&2JRkr5Oh}+F|D5U$-Y>AZI>9HP2GQcuo89=_hs1z<( zHZ^4MV0?}Xu&@7_K!)V4-3|+iz|)^CzoPfjb4VuP9cma$+HvQ_{-b=FYi20AjLCA? z_5AC4_7;Cqw>=7|KN8~pgsob+>0fRj>N&I%|Ax1}-2jT3DHP)A_2c%iTht{YXmt`C zf?}N>gPf9Uf_MydShDigLo}HbJesXSl49~0J#f_q5JO~bpUfqNsEYk4j?)EiV{adl zBvs|#Y9|>@Y~p8_jJ=~RVjT%)qt=G1`i*x-z=F$KvsCkYM*u0dJ=~l?w3>z^&&sJR z;_u2n)?-@7;33@y#LR;>Lu5yOFmt_7aTjzWoo0YGv~`Yom))r+Z&V{D;P3@DzL%k; zLYilQ-kovHi&qBICLf^gdDt!kh4^B?G4 z5@>VAu-@dSr&C3X#AD$DNRFI|2ccX^HanhBc?O`NUf)nwZrKWYru<1hr)`l}y{|ngnhyk+@aSl&5;HBy-1Y@7!ceK6kE$)`;O< zpM3LU(RAK1c6FjnXCT;nUNmf*>iH#3=r4GF&E|7$jDEr(G0`U9LX5k#sCgE+bo>Pg z^1b+MKML6Z{+gpi8``D5KdOx;Hod4X8`bl~^JlZFjc~mhLV0g{Jc| zBv>*{Ho6I!fdL>hk~88uS_2G%~xdtTWWE9y7lRchSO-;ZXD^9 zaN_Fa-Y!X-tn}@ylCCa4Sd;9`;=tCd3$Lea9T`G4*+Rug<(uupq^DS@+cnPwGoi8s z1%Wza3a&;{(R+MStf(HfN53%#-<)wPFu8=m37;pC_VHjALOM|>S{r$0)oqg5Xp|&% ze_u#ufC9!?8jDLFp zgx1scaXslQ6#ZCXCEyEx*ve7sJUTeMeS=Zvj{|El&&&Pur^PWqi zicFlT5djp<}JPYfw?tZ8JV3bE1NHdUM*E ziZPtLVyr^R_sR%~l9JHn3lF0pT{tZ$W?A(2&n=Rk58tI>oNd;v|6}9Uq)N+w)D#|N( zA_R3J^dzDy%LVF17P3>){)LS7VUoO6L z!$yDJO%LbMScathWr}v@j&pU{3-%2vPzGYpda_Dw3AfZ(a!!Cp`K^#u@kyF|JsXMW6Fd3fyqtN{LKDNsTIfO)~R$3CvT z_)9s9#MF88^`(PAk3^YkU#=J&5U60hbS3*4H%5NcxL2f$skbBzfRaj!RG);9udMdg z5O72~*#(MUW*#yW0A+RN?E9s#usr+p%Wagql_*gN2qThmX7E?l@JYk4f^u5naDlR& z2m-dIByk5d%?%C^tRFQ*gH}+6$1(iXO*JAV-$>=>l}c$-CxqI;u}d^+qq=I9nWP4(?lsriQSnr+%J3)4+QYE!nv~|OpVm1=V&`P&5OntDlhK1GQHMpMHOK0FlB|7m&9G$6 zfEgEB&^mx(`Ni((AM-Z@fs(xJ`Ga6H231ItE4M^)$xHNiDmiQn!XAxpL zGqm86(KqqnXM$7&+!SgzfZs`|;aUrY_07}+qi2E{8Q|!2p?(tc;M9OsF1H%}dT7JV z75xCiUY`In8ch%w`6kTHtn9!}G8_NuAeOVjRwV#8W{yNT&X*N7J=C8vG^@eg=1($U@=6`BOfQ3MCOj~9@$ z&bOOoJubH!bp30pWDT{PdSyFoOfi9EM>^Q>Ll4-V>3Mt=RwKDwQ~l!(RF%>YuRuin zs20;Rg89o$c(1m3!5wf3^l26m`7_39DtYw6XB+e&y-m@4TZK>%r-1PQhntm%YnD?k zBp_Gt(c>ox_((~O?OGGY+WRu}g~*oP_I6LP;^v<&B8h74(<4)dQ=Y$`Hh11m(Pu-# z{6a7$gCwCe|G}^yEkC(*7j-FNbU8l*QKH|HnLun#?{%zw#NcKiO56JeM9_blT)4q42bWy(&s5YD#j=F5w zzt@S-@8Jv19+mE^1qPGn`ud%eF?77$rQ?A~r(g&6y@0l-DOD4!r@^?}T=*NlX*HnQ z*>bz1lTT`i${COlwzloP@9C*6F9;yEjZ)}-GH$&$2%`>Y?91D+&KpHP11Zh~HQ607 zCfg7AB!zv8IM)GLT54h6G$M{K%!|Me2ZeT0Du-|Bb!l6!xsgM^n!2IFg+S9LH}lZln28OPs%>VWGB}up-7rHLRm~CFJdz>n-OQf+(%GSLr@9JX`EcAaqWZE! zKXGtKuZwU#%(SmGUW#hTcy>i#qpwXtG)CV{_ybM!K_JlGnU@jejQ=8MpSDUhOHg$y7@NLpgdGP7+ZCG(xsWfabDp7u&0)t@(HRP8; z^dbRAPKTFUvb}f3(jqnTz|PYxeKaKV?Pa>dWRRQcc6)rv>gND?QJe#IUxIJ0UdFM0 zccqO8vbN zsfa1ULmj%xMM`ATd3ac+N2@H%CJIEQsx$uOEv3h{M=kXR2UM%x``}MdLuLrPQfHC= zgc+J55UEMu-%`1cUp5Hc;CGY;W+N-c%zl?0&KJw)c|O@WI_768VosYyvfi9=Q#E6UZkA&Jw)RvhIQbM^`z`?Oqk8@WT228yhBW-mEKM%tO=!FTtV z%pu#%YRk4Ew+w5d?2!)kj}(4XbGCDGBoJZ|N|i&s(FoM@3kopSo@E93WtU#BVMA12 zN(%pTApDC7`LH5f@@ae*(63qhLf^0;y7fjhlt6|rTM&1EDdHMU!}MZ{kVvEuiF}qWfKX6Ys$8dlzb*PzR960^_UUdP@B_R*f^hmL9fw%Ez99{ zYNNqYhX>$LDSCJCDO&TJQNQ}oDtNlHnVerAPsa6O`hz64Q@WzEldw3#A9?m0v=iMV zvzwPUzlGeQOHN05yZ{n>=1~8LT^#AX3Ra*T%T-^5r9>Oqx?sH6td2UAA>@lZ4^uqB zMKp_{Gq`Pgz4Z zIq7%%Y%hgZ-Ngar5jwLeS+l1pIP}kJw5oQYm<(Lz1Ch~WbUZ%4D%+ccu4*GOM4!UE z{<%PFzgximiCcgYqx}YZI7cE&{-_mjjwtCBAnxcKU2T7}${X#@p%xQmEscBIr{j)^ zC&{+-A;!(|=|oX-Vr(%DA&qIkrHKtinhJNk%akEu# z`=Ex)?f!#Y36$eH8IIu&qKG`3@pWu~SJ?AYJV}`Pr5~;-M-nvqv@ki}wui0JG90pZq~pwVcG>Q3Z7^V;AiXHCB3Xj-mXEqng%m!`_>bbJKovR-~^d#>DySjg(VyH;> zo!*%;h-pk`dztRy9L@^z6i7s8CwMXCL8%AB^Tp^5I1fx2*KoV*+7e-hTFy+t^(1vv zQOtUm#;v!^N^g24Azoq@#AWe z{SBZt0*WQ!CQG>}hsk-muNuaFjU)Qmg+QA3V@E>%Rqy$S?r4IkyX`HyRSGZg!kbAO z=+@`LT)c*@(b|`h%MwA!2A-;bDR6{rRcMArK23M_FrDFxO^Xu4lO{x&<6;JT z#9P;B#xDaOE$1t8&3M-QnF>b7eLNJp6%!hYc%RXpz@71aK*TFQRcfr% zu)w!!yPEcmU371au11EDX(ZZ7Aa2_|@o(W3K**w#Gb zkF>)#(~%mFl>QrP=rFc_o=Tqf|DvDoEB-xj{GJkT_rX{QuYPPA|3{rEwfZj!g3LcafJ=8Ly74 zj<^IaYhj*uG3TTuRYrKK@Pfdb!DppxTR~g^==&3poZe$3c<&^1P5-&m1)fT`83n+6 zCT;=JEs|?s)2HI*dgDa2(up#6BoxM@)4OZ;;u zQ<}0YVdZA3?jJsA;M}GcAi?3DR+zO80%_gxJ$Y4%iA}YAyZ8F1CRC-Cz#ka%4WF#8 z%UmG;)bVR@mYOY%usbBYTv?Bd<04yfN9gk96VLVh+xIu-d;&y5&Ttub$kKt4bN9^8 zyI*S&iBQmVyr?CpC)PZ=AR@C=t(!+E-h(asX5wWoF2IF?EJ+!%;9Y2wS9%VbwMc*z zZ|-pmX}XRp#AYS(sKSd#j_+m3S*GOfd>7CK9nJ}-A+VB5qAF}e z*}tWFyu`f-wf@9l z@iw0-hOW{k?>eI_rohW2weAg0*!V1wd{Nw3)BVG#cfWv_n%XWGN~@$XYfjrVvAKp6+sY%7{Oy0BA~)8jc- z!_&Eh(&;`i{@-fh?>~jE*I8zIKNTzEmQ&OJ5EX_)C(Xtn5tzM3uc>Db2Rz0z_;OhV zdE5lJRJHytR^^j07s(*^lxzauV+?vWvZ7@>fHa(NR zoy+`JMU;|GsCj(`t1T-HE6$dl8{v^9&iHek^3d3yN)I56%3YO|9bcwxTHye_4>j?h zosLid_xAgzsZjYvQ1Jn?sy9B*371-{vR#1te0Sy17{f87`nxK_)38W5aWkd< z>7WJL`NDd{du;CLN^bohcZuLBIyG|%+|z@v?!K9wl=T#QC(jQ=>L{#v&l@7Mm1of` zF#7mZ9>%DVmg*5wW8T7shgUguFRIhUm)fT1O?W3d-#z#aw3)`|(uLfKic$hnt2<6@ zPd(&bV&{(I&|8c7>7@}t`Ap+gMPtWzx>Up93Q#nJ~qlc1$RJ@cn zrPo5eFK9v99(y-sfw?>gG0|c<9P`4VNUTyUc@76~NAeQQlkEFI=sfvTN?an%QsGFS>G<`nyB;p>oQCX2-z6BT134D5b9=({RBR@HdjS$qpxZThb6$b|P)SNUI#+l*b!& z!yWVKc{8~s%Ruv=Q-Uc!Ukc*Co9g>Q5G}-flz$&RV7=e1D5JWV1!;h#6qe>+1b;dx zQR@dT^AT|&h>OJ9BOT8cplPs{hJOn}*es7}FZpv_n_`+8TAH7|NHSlebMN zxJRM9>~lF+Y6njg3FxV8A{Ep_{Fe zs}V{{FJmqUDs`jfA1CLCH-Q~b*P@o!>L+fxyF-p!n070(oBY-didPa#Bqf&r69JyS zzcxHfuPFaThQgb(;<2wJIaSNao-DYYI{fh{^DnHlupJ(!5>>1`4Dnm5Sr=@q&QObO zf7SA?bp$MvNuQgoBKpi!j=TOM6i=3mWnU(=ii6Q`0sGDT7v)mbU`W*oW@c7JEMhSj zx368>Lg=)L5Coh~LS*U`C9D=?A0f z@T%LM{@jC7PJ_v`4SLqu@v-;>R9`n%Lm_Xmw)N!HBOFts>^4sAj3(*>pK*L)?53*p zt_CrdJ-NOF7yQb}w}v5KtqOU77|iv$rJ~5sqC9aQ41H=$L_ZJx%mUZztDO7mhKo4F zv$s8-sEfB|tm_pgNMU#RH%qPIovvB*v@Z%?F=5}_TLbE_x~7ibKkL^g&&)qmS~-Dc z5EPnyBu1qNJ|xqza+#JL5u_x~_;Lx{0*pGa5MjmJ7x#`>lC~v{C}bD>+K5JAEUhiH zRUTT<`l>eon{dH^q?iHhofJSKS`v*hOUsj}fTOU?jy$K8y|GQ#rc)N&@b^aq8k{1S z)K}MyGsL<-zFw=7;TR@ zFX>;{wnkg~)t`2wx3%UkUiH{4sC&-~{9mzL7$}SbG%&(`PQE0!o}LQ}OH1vamUNsY z9gG%3#(X;okWM%C9{3Ypyy-%nUKGin!Dy^b1*fR+-I-R$7jg_ahZ^gftuqeLy%Dnt z-ZS4xwmg1mt{?BJL#Da<69rX;JA9d&iMhc(V@B*-QxP!_LG}!Y*R$5fNtD2xR8vi% zn>-SEN|(n%S$53(4keomb)-)tH_Hs%#aExiviYa-;Ia8r{PAGt=2N4Zm!Nm9`G^J& zqq(D4N<$Vw6iimI|M&J@kc+q7{0t{o>?EvMxrp%GlBUHV4azSl&emi}V*Jetr@ef& z&*+dE!Bk1-P45q6r9$u#$Y$r+37HEi6j+-LX3lImy0Cs~`l;|acm;Xoi3xQ>ur;@A zb?;;h$yM7+MxiKjT_y&55iVTy zO^l9L+#ZY_s2?`Ha~rCzRX4|~jg`X6I+^U9?~}h6&Hzdp2r1|KO3PH3r_X?c^wB}U zmNzkdkmhZw(;`OJLjyrr@@=Qn1JXP5gY&hbt=DML@y?kV(M!5ibg(3A1G6A_cNngu zbeGIEm96Kmyx?`B6&M%DBVsv_1buoz6txVl_(Trc`9Pv4VSuW^l`K*SBa$inaeZq; z-f7uYDr26oK-x)-{bBGj^3&?wMm50_fF7S7oNdO6ai$>1RTDL(TYJQsijeG!g9e>f zYV^bS9g0l(o6ci(#ky_Z-ZuL%{BfT7MzHC=8rFb%wf9CAo6J~KQ~RY3*G;!obQ=3J z$c|!+xqo{^ak|>_0US4g-?Exl%ciCGO#aI*nxZ^k6}I`J&DWvUMlm-#tV%Yn?J2`p zN;UY;3I2kHlU$WGh8m_!(^SN!Nux)J`$=z5xb2S?jzO99u+B&|_M_5BNVw{aE^z)E zY}X7xB-4>iPqVS$r}+_7^moR=CL1CrhDPT$q9|OZ5}6E2!^l7N480!Q859#=Q=3XsiCcV7wmD zFt}Y4Pv5MGriSS*XqxkTeXWV`7Lz&%`00n1&I%Ebrm9>ua93I4W1>O+PFLWR3*F`( z$@9)(cjQ#d`LP&ER284^(;;K7x86^AoM)Die&BR7wRZOP^=;e3oFzOn9L014LPjxt zW?UnP3EE}XJsC{H<#b@z4)(at^?6Z}v@auoy57D)nD^1Ba)Vik$34`;0 zRWUV;Cz7>oF7k&L_y|6Da^^?*`BI+QS%ZmI4qedi>l$>7fyCimG)k7FYa@o}$#{5j zLpDl9pa|W5{+WELeYkicJsM^3T6C23eDbdo!P%s5{dB2}fg-X`LF*1MJ5pBx#4L&5 z_6#UE7{hkI#jz(G+>@Wy^oxip{#HV$IWA_vSK{~y;7QNqe#qf$vYDxAm@6sE!$`E8 zk4#@8$Un0t+`srpCuaZ9az{@ke&%`ODPT*SS;|uhU@t%AKTJ0tar9V1p6_Udb;4y$ ziQ2L)x?NL|+p-QYd)?)}`%Wz{h@BA@*9QQt-)$;&s*&RE*T%LBY*EO>@U(vfY^B@^ ztTYlzSbVq*-*)eK!rGxvuwtSmiBYZ`?ki5gxLssS0jBYbL62F-kJ5DR$6n!$G#^nK ztmabEYtaQ2qU#F_M>?`@#$+Wkjt9|KC5F=QNCyNGK3bl9*IK?mUy_to7-J03NS7j zFJaiatIQVk%2dClbnjDy$S;lel_U|{ByS5Hj8uO+)KheWBISOrcHEH*6@vGz2 za0_zFi&7BG)!eyP19T}I4Z}bQxg}iJ4nu5P@QtU^csw4v_bIdWy3fcDMcKcM)NnnJ zH1g$YunDx~@OZ#O)2&7ta}BIr6n2{8-PgZMsRL>)PjI7+M1CVOf`Ge{>Sv}APTJvn zm_=G`KTXtU3e5t1xWN&9y|a)g?zKFA49R1Yy;Y%4H$)eGyu(Rd$mo2I7GSglMB7=mY2iR*lSGiqFRLc z*4A;2A<byhrtbxr9p>ZTk*1=zPzYabxr&8Gh9oHX@MZpBvh+7UzDfsT z*~8Y?ls~>Sz=hJ~yr~>z&3c;|n8i=u+i{E^fLUbDC7SNj3hUx^J7zN3%g>}$z&w2# zORhe4%UVUmU;#l|HOLtK)bR>Hcn>FngzO#X*XGit--p5AmH8^K%` zF=*wd;te~(1A7F2cnLl-MfTJ`VJ7>`?p;5k6Py8>=Cl60j>wcwyV)6Ky zt~~B>L?&QWR~Acn-Txx;+I)g(V>Y?D9a3Lmv;e4ZEeM1js(PEb80-g%*n;UMb`<(_ z%gH$WKtb4BT92CKzvmpRbXvZ=wEejU!z0+d`P`P{>Yl@n_p)u`awNdrn?;soXELb! z7v!Pr$pzk2R}%R9pt2XMN2q=QSy`gO!a{TW>+5SV2?>E2dltE(D2UN~pcz#>itQH0 zr3<+JZ%s5Lq(r%X_C0=)SZpwxr8Ha6ji3}&&v+^0slA$B!K`q0QU-nZkNj(Xokzn1 z8#SMEhEq^lL`hz2j>62j5Kxv=5unT67kovi0~~m*b{SVh?iiE~y1nZxmTRYnZ?f!b zVKn%pBNFy(T(A;$!ibhCz#EfV%6F((DK*l-#=9H@#SisdJR(sW>wj-H{TX408$NkI zX4icC5=I6<8(u4l+W_u8>*D<#Dxp_8!uNt`Tl8NMb*GSl^Y|LQ*0gg=jl0_FPS3!w z(=sCLdhVq|aH$p}99UywJBCQDEdmdtDQ2 z_~jZ=b(e-DAk(S_Cr>0C7G>1!PqtiGZ?p!HUyD>9#T-hIvU}bp`cp+C2LWGb)*syc z^OYbHs?_nE&Q|T%jiD3)TQ;8D>yOyHLZj^ENw?gqDy31Y__aC;86essI|+k(nsc>Q z^tH*LT>dTwY_@u|QLH1t)JeT%v1haPM}ZUZgbRtMYv(#Q5pmocGCHj?YQx!e`&vG1 zhK>Hw6);eb=v<=-<_!5I*F9;*7Iy_=K|);r#D?H)r;xv%`ak+le?CIkPh*?t?*1LO z@7+(MIy`P+L&#iq_Mgs_F8ko%yJgv9zMaPlWjv)S)W~a$>1t_2(LBGFJn+ z!f88-%V37&k-}3@7&ko-KauywuBVl_K+EBg;qM|!BaPTxCA_BzGN9wp>;5TF-$#uL zk*%^yBvH4I)}>Lvck2PL%Z`bZ!*Rb<50g~0#WW_nggpF|5~D_D#jy_1rpHfzcAB_;drD{me2OYSMR$)`-{k1X#Gra&+b#qU%BL80 zBUsIa7(`pY5FGZL2v5BcgpZ7hrCxcfZVD&nU8das8$54~u&=7z63lq$5$+9pj^ zSETF5u9&D{-%p?1+0#Z9tjIs%E7ryRtkla}XR_MCBucQl{sHB+jByZtIJr;VteR6k zuo0eC?K&wD$i5^R-P0joq-1Pz(h>H+{lOjKp7GSuRIMaXqQK3Nj|J6%P%oo*^4g;2 zOa_P8irn+j+QdJ?Wq(yi{#MH6y*m738zcoE7@V9hxH@t77aB=yb@5>EU{ieUCZw~aOFtXeuLcKZ zF4HT0KbqjRZrrn-ijb!_6*I{m^C2J@%b72vtvpd4Dm5coERgzI1U@~cir~$YXh>u& zJs^ztho$$TU+=jaGeZOxDg|d+Ytn;m25Ejs)Qz`Z?fcxB=i1%>)u4X7merDQ7EPK0 z<~WSu4&R0u|1i?l{Vg5M-DgKRf?~fI5fBcae*5Q>8%VIv^on%Dsm9HqHZuFb2Smy8 zV$4nj2D~?#uHPCuK4lYz9XJd>lls%m+8W}^P`{wAMM36Swt=~JiBGD<8Nmtf3B1EU^-r#R#%=_ zxR2}J5=Q>DtD-Gd%_&Z=p)^_SyhOR^xf+pw^4R_K-;@0=yD@KJ zu;b(T6Z7qroVFibsEuE6rr@5FV*4@w;Ee8rx*F&5_hlc@E*VZy5i?BGkQ)a+u5)tmAmTGu)sDGe9BU)`C{hG;T*XKe?)XlaXT#psYMO^pfx%2+lk=X_woG)Fp0`6oHsMXT_+s7Wk%| zSjo+|IQc5_S+s5M~fx=%bO3dzq@16S?>sc75}r;iT_$^g?Oh9C@_fLO2UEDgO6>` zFlY{^vmlj)ux+eI92-YK_)()V~~sK$8UW>ektnt`{RR55#O_z@7fW zd$q<>F1W5WsEwCK#w)N#{4)MJtYM4GNRIPw?;5S(ZFCScoX!G6g^96CJOKsaRV^8S zs|OLG$E@yW%i6L6Pn{eu33%&8X=xMV`X)x!GtCX-AObwKuryw_U-1 zNhHy!c4DmsAE?{Fez||omK55)|I*)sB)lG;Dhv26^YrCt;k_af*58MZJeq~u{^eyoNWML0y*Sfv!onS;9;VfeUGxl)HujN;|S z_Q$3JcIhT^CdUY8xwAh90T39mk^fmx|9+GIe#n*d1)~0F*FYbx-3|Ol(R=T{I+lF4 zHF5;D2Aa&`9~+&YE5!fb2Ko0>yEUmlzYPsx`}rppH}f5I3yy9dZ$c|Pz77M-hy`0E zN|MWVMnhd7*A_2mYzHiAO)OT(3yI(8&o9wy(q)dozm;9Kbt2Mt}>sav`14a%1Q1P6p# zuU+q#KrR%%5+(k`(79dgZ0^I9P@*9D$l@41A#%-LQXlVT4h^R2i9o#Y|FhJ?C7)Z~ z{&cv@?~S!EP;cpZVcD4q^Ip?MD2)tbn~_GbwHebi5_E?AEw&8|+7eNFAAB@eW{)t} zghVP71e0Q#BW%0ui9Y;d(oRl zg$H`=RY1P=LN9{oC`jfoh5sWeAhXOb*wxi$*;ktLVRya(zWLY#@y9jk^+d+_bM+T z!FLpIZ&G90d%;dzB0aGsDn^(%zqLMr!fk0V3r)`D6^*_dQblHM1!`i$xSaZ_pmqDu zi?Kxz_JGfOUd&ivU)5!Ax{a~k&T4$fy?3+gTsrmoex25%)IY|0yrIZ6}z| zjzn!nP+waq+uUq7}W$Yey)6kH|GY$UsRcRMMRI1Xzal6>JY!jt& zEk~VtWH@~0w_NdnZjo3Lz8xeyG%A~F-bZ{^<3n?RPQ#S$DSz_W^URjH}@ zVz825{t=#8$5uRK3HN_H%J0a}j#6&-n}&{5k8gRPBAL;O?2jckIS6xZRId-?g7?1n zu_i#SqgwFKgd-eh8(e&m)Ib&|!lP*f!f~bvltwF$u`;)r^67Sy{A739Gxwq{bM6w% zN+TU~u97t#1w!m{jREPRGPujJ z?WY(a@wK&LI9+JfI{G_}1c(AUx}a*}yasP7E<*nwUEdgA z=eKR$*tXT!Mx%yJ8ryanHnz>iR%6??ZQHiroqNxD@45Z=e#nR9m;LO8`OG=ym}AXF z)`fGOC(&KFqv!PAy2a6EgcNVl3|ae=7WNh!q5LbY%);bRv<$`1w76%_)r=rR8_(6P zP%sl!#qk*R($K5U@jTKJ&mTTnUkThu@A+^RCP3J({3eWBiO1f3#XL0xIM=Y zve+?qn0R1R)QWTrE&MUhM-#73S!jlOU~5W>$eBS%6~7@tFb=By|QW2Obv15I+MmC;Cd+ZWjZ^<11j4_5fsWko;VL%YHJRs+D2 z6d|D?`FK&kLBhfdW>fOV9AnYONl|EIqiXzWxjb$msI*G}2T8AiP*a+{uBqd_N?LjU z;S2atJ9;yA7a4jT7eVj~3|)Drc~u(`Efag*Y=@nwfs%#1xbIEU2b3)y!Erf$;yt-3 zPBu%^+gZsW%`x#$lVe3STie9aoC9H~HzKt+jtYQVne~W(cF&(vS+2B&KKKbWg3dz5 z#c?Ce@FIk`#tJlEoei8HeLk*q^TrA=1($x4;>UBx0noT1Xs+9r>27^b4y6%MJXcuE zr|gbw$d;pgokE=3<@;ZRvzV+1?{}6Sp#JoBI}!lY#rbn(JxUAa^E!`z@cM{%xv+2; zJ|}9pcoX5?6uR5>sRGTyX9U!VxXyn4zCp>gFIf%MqcMW$ST3e9de+xloVaW41g`aI z{1vbc8r;wwoqfPiNM;MW^dVc1wH!a42qIjTH?}KnuOIpmkOqoe(nTK%pEG!xvui$; zHm7MumJNQ^YQnZ0XD!pNOx zxm{lTNM3RJKV1v_hief8e+oazo7fmS6HesvWWQoL3rF(hmhJfp_IahpYTeWKe6O@; zRR!+o7T0Jkr%3hDO@>ETJf4UY+oS((V_cO_f4cA!N1SB$h;%=t|71S&e5wj$eCK()?+KEiGOv3n}!_E$Be*S3H#`%hE#E@C2cc&1E5<(>6W6 z$obtTCuPge0t-lqBR3A!qfE!Ap;nr;G8oOs<%oc~TA}BBnZ+nmlFBxC<=x63-4`|( z%GuSoOha4uoYO*eRDy;HtMUanvIdg$+*(`J#KWVy3f@mUWh#U9ElWI zCJOZm5|XcRzmWrZKCAb#FsL_HbWN;P?0MlZ_g0F4bliu4K3iJqm#yH+xOqNJOw{d) zB`nuauCg;LsbE)&-aG7w{_~xaIvXDcM&Gu86@o=^vsasED--o>4e%Z*DAw-JeQ2D@|upc*b8pmyz9 zNSE}hhUK7<5xBvB3%r1l9%9g$Bwa%)HiAW*g)Z`&%FV{)w#bDHl7#s_#g&g_oZu9E zXISDVcX|Gaw)jY_S=*>>8hRbBT6jJg%wH}}+wFi=S|d_?up&dci1u<#xbo+`md%Y^ z#RRwX)!lDsW*2P?nz?YCSnNzREAqZqdqC?^)J!b-p4@Q0P`cVxfAJ*c8%$2+hXI>u zW5lb^;P`0To-=Yga+JckcfP$t)7}DUvYlZ|*J1Zz0jM_jJ7$+Q%F>f&1F)6z>?*$t zSOxHDbl=3`IjPCqmt;9qDR+?|<9ef@%zM*7HPsTI!k*T{BH@ThImJGc<$qg;^;!=T zsY3hzY1@Cpg(`nQ*!aO1W~ECGJGk{Sl$5PMdoQ#;?t$tmoq>VT(&}ITc$|H~*(w=8 zot~Wu%dyj@lCAelq_YU1Wh; zS>c1hl$^~?s#_bKM#K9qEgkMEg~+`nsfMAGF|^5Zr0VA~(c%p11~;eX`P*!V^t$tD=+4HSEY-4=Ud^$klZwX)Eo zT-n|<&aOU%HBjE`h_ z(iwp=hKz1pc7)>1Ttjv3S4I~Be^{5TAHceB8Aq(t$ipGq8}MWD>3S|PMQ3P|5auYw zzOjJeBBUIFk_Q@qYH){a=UWkqBoN>Qx(s%M@(RQWz(<0D0R_G@LYNH0YxsWO21?}6 zr2%K70H|O@LPPFvZxHf*0#K%Q+ zHz)8&dV;yrsuIX;ii_r}2Qq6t^KiP;;SOs!b;s6muYWcrXj#r zAj%NU@k+g>CoW1uV5H1YG%1g^Sf*GfsGL_uTt zdQ~&|43dI=l&fi|i|zOmXxd%={B>h`=6;tG4xBdI61U5dnVnzxH$3la3(CsRz6C#g~*n5ulbD{~H9hHmXxCa1*GF!6f8mr|W4?EFV0d^I$fe+)3)Szlq9#(m)bx zw!w8|k2b`s@KAR*xLgXJwNv52dP|9nDwW@~D@ZTGvlUeB+4^@Lf2SwpF#sOEP6R~U zTZB74YQm?;H2Sjs&!IgJDoA)Z_vIc&ZENqDzcsoVmCn4CM(u*URzPT9k;i&uv~8^W ziEQ=+3cf+|lJvn{qO#?+_KMdbZO7w}JC(D@qttfl-7b91E~?e&GL@BteROGUh)KM( z9Ur+Jpdilx3zobN41zzeM_i{_p!G>_C{J=jj}J$~8(JQCy&yDmBp|DM@PpQDl7|g@ zLPbJD|NK&}iPqLO-;6D%>orgp!gPU?JwUw}J4H9+oQtbX*Kh|(d$mv@YBs7!zD$2c2w~&NJ6}ON8K78hP zB%8_2t@K0pM|*Y(388+{px=HbHv3mll?Hu?lbL$;|rrd4{e=`%ik#KsqT= zCU)9>moEih;jH_#?!gT5{eIP(%nHwqSBY->WE~mobeF09x!OsO&Z;wZQ2uiHsoL6f z217fXAn+}mNe!IRIFmNvJ4e1@iyw3G(O^v=9rCi#uwisH`bLSNe%Ol{u!yG$RJ}BT zXvG|U+|kU}NewpJIr%*EmviVNfeI12%~Up5MmOj|($u`%r27QxfYc|D7-Q!g{?)bz zZ7WflG)}zr_bPCrJ9`F+yTRk_`>6X$!}`1c>=;Y^&EJC$XZgDgIYj|k+V1}Jd{+KG z4v}<|qd)Tfh`{O*%KK{wl!?c3C`CNx5NbnmGi2l2$naj~M*Ct5t1I60KCh_3cJ*B9 zPgYwlO=$~u@!&3aehQ3mB_USwu2Q7RaRLGZ2015U?DhKHBlH^+aKgTrD0!ybSML!w zE7q43P~!3%Ak^S?1kaaxB;=+8n{$|(C}#DlMJbxG@N%&)%84p&O%-;)CzsCX9IXH~ zVNo*)f0W750%0jp#-4*>x7is$O1+MZEl%F8lK;X!Q=*9`9QT!@<<07))dNB5Ij}oN z1cb%89_eaPy)(;1n596QKvBG*Yv}3{EAjAo{QQrQBD2v5%7j%@i3wIE#81bAQXff& z{`!Rt1%&*StSm_?)lGbKDd@Z2-Fs880GpC+wIlhYSqJ-7&)9*e5wvNJTW@5yYY=hW zTvxUb45M4;SETsgXpz!gj7KQFm;NNeDRPJKNZ+D%&(`?>^s2Zc{^J1RHu;{%9^(8EQj-V+8vNTi4dxfBGcfs@NFNnA@2fnAo~>O*IC6E1`>6eoy<%@PrC zXFo=WG%q$889g`Y)@otLY{fX#PlI9*64jl1b-&e*G?Vq}& z#8Te(hHd`FgI5Zd+JR zp;BI-*uGu$P0BHx?krxV)gb=jhny;^jRO)|@eF$D7f1A05h>K`gcn*P34Ag$Wm^ox zlsokH)2Ms-6r*u0f3By9h1L_-$-PywZ1GV9zxf+~y?gIgy~Rj*q5P#dp>49XUILtm z&1{srw6$Vl@rI@lCbZ+wh*g*1HuvX{&2?JD=NEw;*pd8KB}1jS-9Krj z4R0^gNW3t&rUgf5QTFTBXlseIj0FHk$` z+tkr{ph@1dxzJ1+j9zB}UA8TLt_+2EsJGU4KTo?V=f^b89{_mqwweUHJ_(wEtd&uN zHfP(Yzm3q2>RKKU>J{S|D(zwy^L2m<36nly%g|}p+-Vw<8vczzj%C=Gga(Ect$Y6z zMK@SYm!ZJ_{77{xCp{xHshBPZ)D-{JQ%2%^T^u#e+o(5Swbu$~JoKVr#)d6Cf$~H= z0uH@7K?-BkEDn|JaYgw4^1A2I)n?ogIy%)+pQ!@O`|9@|QOV+INLEhrsoh2OWqC=$2lnGOOMc$O^h0-R3u-N8jxEm10$1^Zb#iST%l1u;!h2 z<3aa80?PU>V&H`7bH3VZ-)Qr!{a(`9rcPCJvIGLsT*qI^1fV(ejMp+>bzcQS7Y4ow z2?=dnwJAQhZ_1rC<3jsVgx|UI^>_iDced($MIA_hXvE#QYB)$yX|w~{d#Qw07AC4W zzeVDG&cqr*bMPM$2wa#@=cshYdQ)k_7Jc!}nnmL;rUSXS?8lRt&7rv3ABFouJxo(f z&Rk4qkHq_Mw`_IU?IUS2aSaoOyM!ppsB)cjzMv1;`{O095c@-5H-Myh&i>MG{@KB-}pxyqCrA|klHR75!;c2bR zQlN!9X6u~C#)#dPf47mpj-j8S{h1rvvAckXV9VEWMfVGAJaB-(-UJ0e3w> zc$j@#Mds@b76YQ-Gn=nyG_ZA*>y41~G}HLO0ELjS@aEcK+Jj*y@(-SE1ot!X16FH9 zaC=KHpEBo-qa&C1r5ayyVrtlq0{Yg8Y+Iy-#6Hqp_&JG;h{NTsNxg%2h9XzZ2DM(D z7&oiF627dmB}`2h2jSZ-=iSCC^orPG29k1u-lN%L?~wF|m(b0E^K@mpS{5E!vlZ?( ze9u>IjA8nM==D{>H9)rJ+WB&ZEz)k?a{lFX9jwSp0&=M9{6v z<+rhEh1{O?M{UKM1`)umfCZIo9Rdd|q%5@Re_=aYK*%s>M?{Z?Op`+JO`{%~Jr5e^ zJM&kn-Ay+<>_!o!kvd-rPIJBMKJPMofB+?#>$1Bd(#fd*Bpy!}Dz}Th`KmNJky!+b zu~Q56)kCG;fppGtDcE&Mqu@pgg{4FR_bvo=<&rZzbpD)>_=xUI2!fbn36i{D4$zN* zOwHlMZ*DXt(*R%n+Cv`zxu+FBMI(oxefQ`t$%yZ$GJ&fp(Z)|G*Yed&%EO#&|5AkN zAmK>7Q*HKq&L<6j-8)1I!*E_M30Y4Q5O1NL<>L-2<*F@uDYD8?p=^w`^&49N#lIS7 zPOtYUoZ|omX#bbuM1!Fu>Y#=1?~9a$x>F8I;Uk%%!{>v^Y8Y;Fr;bbQ{5;-P=}T{n z3deJ)lch$4wKioWR4YHmUTk6UIN|Md_la6}Y6WU4X%@-);8dbb_`B0JUl36#l8$AF z1gnAlA-i9B6Wz5crm^J;NW123p?i`knP#%nZ>C%tDn$3ADO@E?(7^C z*(_OgyQ3Q*D%3Gjkp5@W1R@}e`tgL#Ww#%kXut_ar;I;tFx?iisceT|NAX8{m>?%K z6efO~5YZ@9R4VdJo#%hQ`k&JNzo4R1JRqSmhPu2?K%eMe?FoQOskq+4is;_Q)Ttkz&zp}{tHvbs5bE`J)h zkF))GH+1ft?Q5(aJEWfsmaw#1Qnat?bm{0`;qmHn!fAswh|Qy5c$D*fUdlu{_aMi6 ztD4G%Gy|pw{7GlKPjbQYiRqUd3xgVkY>^a<-CHJ4(eR=|p5zTdn;*DtJ^gk^-YJL6 z7=kXP*+A>Wj>m(ziYkWaXu#;kp(+GESVqm{dv`L^3)$Ts)Q^=KFi|?v&aqA)DXA(I zH|{MIWF75Oo|*)GwdRE!pk&@Rhofqz2jY_xiTQ!iyV4R}uu``?VkFXX+KYi&Q=sIh zw5&ZW6FBa(aqMP-OvwbGrRIAO7Gr%xj}%Uj_4gXEU7ly1izwa#l?fo*tE5LmqWl0X z7PF2apvLQ6-x%1Y5rZyB3prRfgp!W)@4K+0DD0UDf#57>Z%E*GKYWky!E}nNBrvjh zASc+m8W&~U7>giQEV6Cx>=?ovz+`IGbQe_f={?(lUPAwo(S7p*T7s#(%x2f@D3B+S z9lH~g8^+>@gX8^*L5Yeg2P-p@llBZ-H91KdxS2D8$ zS1BqnBuLXSHh*QPkWhYtdr}&6Vp-Gf;TP9N}}VuHQlYrgGLM8jUMKTn;JotJF>P()1cyd zC0Jm=r2(k{qA;|9sy0gZX7rSV#Z!ZDPD^JZvx;I$`9qwrVF7TbcWhzE%ijQ$jx7*T z_uNCnx6}t?aF5h?a+cXAor#G}6Q#xm4a*`?jR(V*>uv6rx+~5UP(XEw~91LYtETJt*W!|C+Yb|eMx9dJ=5WHj125PzO58Qa#VXBfv^-O`s>~FWVsRr2O+gbb)BJQ zej6nWml0AigoEg02Ka^H7@~v+AI$hZq(HVAjjPT)Jivaqvv&@;bAL{6lD@j~Iv#{- z#sZez;*;g!)#j>{`{ZHTFYB6LDP@u+$#o0g)vNv06sxPd_e_owH};fOi!}X zqMZs%cNTZ4b?jiGQOaVwJP;!p`J;QTl-x!+$7d`tNN@Hi%l>$dB5fQ95Kk)Ym)Rax zLj2;wfr5j_3g$N4ADtz4;;M&Hn>aLhE78;)a65Kg2PdG|S$^z7jn}ctGR&v6akDk^ z(4HXtFHS#d6$~@e@E*zNor2$coX60Iucqq4`6d;;^ozTw|&C4c;f{Ki&J31HxVh-3asewPLc z7)bu4f%c!N$-f9-4S*;mKBh|GZy)COKKT%OdFIg1vVMwu{S88E{U?_+3*+UqN7{NxIKanqK&J>`ilfT%ih-Ll3V}NKrH6QM?n}*nP7FRqp;m=U<&5ed z+Nu~plpNKWM8*jeo%OZh*b~IDI~%EdvYSGX)nAbgl~M~kj>?&^y_h5xcCzYNzt7_j z#t5*n=@bA3S1`-Vh9uI2@RRQD6q-u=F)Z=+Hd02cUr!Wb1N&*Xu6}=}VeX4*XZ*y* zUFE9E)3U@%>piGtXm-JJ4ZR4$nXl2Uc!yLW|I?#cSAnR83L#x8X9P(g{ASNtcLORa zLikq*Nj)0m@J882OJ>*?JwmR!f=Gd}!|o<_;h@tG^ZH|jHri1E?pFJ#>Wl_yqF?1c#rrgkJjD8BtlW>pJ8=pjj^$JS?wS6v`Fvl@m(bs3Nh z_ggdo5BrtKb(aJvnTrhZnVqKZo{zpWI@EEugw0XGYyy>3{;Xra2gpg3RIqGM==7}n zVwQuxrYMS@wq*F%S|##7cI^|`njOFtt)3t&-4R___4Kn5t#vW>UT&zB&YC`@t$9`5 zgVc;2g0NUnC`l&y6joz&(hHN^^E037E8z(mc(EBNsG~Z6=4U3{xL8koIdxQV*x<73 zb91Uc)(OK|CIGW4?+iO!*FQ~SFbRits$ulE*E*1G{-&nVU->7(2iO#$C_q#+jMSaV z!&Ml`5g8oQ)`RvOXOfCS0^I_$K;ikr)noWu7YQV zM!`(CYohZ~crvRz<2mFGN8J^DEh|5f=mvMw`=(AA-i|LZr)$Yhb|mU@aM<*Q*W@9Y zQTbo*c{Ug8*r2&od8$Bf>{Bk@{%~#|LwdOfIFUEkZT5Tkt5~sVu^0i5LB1%L*j)dY z?;eBGWYbo48B0*|i_hyk4pC_gtvdD+{qZ}=omoi7Z{VhYja z@tQ7vj>At#%nw~rLRAKZg!qUo5Y`t{r|ycSg+N%uW~4lqz;qUg#W1?x={+O^vqaYI zeL5XJW8KWG^7Cg9AeVk+p`yu>E7W$|_2&uRv%XI!rxv=oazlv6Vze=xDV?9vlzd}jXPyss#(|+J z6z3{x>jSAeF{bmRx>NZa1&W7Fpz?vR7u!&4v0dbGc6m)*R3T#PtcP|%A#maf#k^$6 zpHqSDr+9umT9O&PSK)f7Va3M%@ZQW3-SAAG)uER@uBoc@O-rLykhZ*u;p)EslePpn z`vH*;M?a}!ltR^68_@FOU84>&r%f_Z8fA-RNoES=5E&f}(h|yv2|3WAm||ld z<4UH%#~Qz9%GF{Q$Q5GReo=X+7DBPjOVEn(D~KX|&}@T!PpL=3!lC!&!pcykGK?~W z>30*}+(Qiwr+)9d3Mbzksepw?Vj$ZXI-KTkA48C(G&5o4YMhaWi7tl4Zk@-xrAG6L zg9Qx_?&NSjqM>_w+B%slU9jI^@sZTkOusT&YeGv_qzA|G2X_9h^z}4(R+cz&m85!} zX?uj}FXn4N@Db#UX)+d3#>}IGBqx$Um@+>M0i?X z*)sUG;@~Q(fv(*2pMKGuzzWIq2-U*myc1+8Ru-R&V=IPZ;{G0i&|iCEPA&>^WMQu$-_4cvu+L5H6ygdq{ZvC2zrgsf`IF zf|Xz7%Gkn4fIfQ~;EJ|N$&=Wl%e4a6d6F_cj7VRUUc&r6t@=tC!0$X>Xgl~1@%!Z{iZrc3R>Sk5PK(k;|7-E<*7gFfn>m!&P#cwo4d zLl8=h@iOmfzC%yIUS4oE5va3Sb&st=Z2o*iM>d!u6JO=S%~Y5C>1Ff{va2vBfAi-q zQ^e^~MB1}7k+@U>iWJ&pk*0Y~i864V09^4)u57h~rUJzlY_BgkU)~VG-`+JKU|VMU zHd`+*Sqyr*ABp6=1@dwfXy{CyazWYlE+p`sU%j_vfK<2m_n9$*Qy5JRB}fIw2W|1DhMWgF%*C2IhNEyl&yBpO zD9}^*(qYXd3A8rxhz41-9SYRh>7%$9<+fPm2Hv~tdF8>xcPMwf=uztaG?+Rw~9>nn?gl_W8mIv#ejIFLySK`ks3Vwcmzdif| zf;XrkdM|)1{NrGkC74^)5b&uZ!1_WUP+IX3CPLWWWM~##B@3p<4Pwe76I{73(}mD~}AjoA2ye>4W7UE}-(5ZPt}^bNy6!*)c{ z6R;c=kTLB^-iCozgOab3jVihW>`A7HscuTk}7(N1bp5nH^+}r;VJ771ytJyR{ zECGZA)716|HT!h;7r-8ZfAKL=0PGo0BBB}j{n&UT1yDhp`~uJQQzahf%uqZ>VX&ow zEmu$4Ww4~wEL7^Fd(Y5*HM`YY#vaKuPY1G#hmY3S&D$byoeqfyoC#=lyLs)iE za1&{4zL>akDgy@0a|Qd zB13@n`dS=VD{tAbHwdR5*TqeJqT0FO88p(Qy|bJz7KeVAsjK) zlJDG@F*ax}@mfkZl{>jvRU=Dt_%m1UPGNWcLqz=n47`E4g$M&-o9M>0U)T->H>3NN z2GCM$b&bAGZgn5(z0b;A@4eqwGgb#onjkjCn%LMJ6El{`8#A6PxA*u*eRdAwH!GZ& zHaCn?Jl*sL&D}jre#eDDwB@T2Wg(zf7KvAnZ_^0j2K%`uBU_zWDw8MWgA(qFfYSUT z;3tv}n|#Lv-AX5`vCJcVW4T4A5Rbms4JUD%bVhAlo{Prno)3p?zDt=pi=W!1nq7+7 z{@@27xy*Uxt}boQs#bo%4)Lgc@%-lj2f(QoC~K(w<8vt&o2F?=vrwOTKM`rIH6mQo4Pyuu*DCuW=)aojJip7#cNIw=vB+i@f;V1 zZK=M4oTdM=NWKJ#KWhnrT+Ab$UF#-)$B9TU#mV%lZhf2p_GQD;tsmDro?29C&|`P= zxnUTBEbVc1XB5Y`oXF9oG zbvH+n{X*QIq zSj6M4zuNBDuRw-)$H_CsQcWVJYc^*KE*A})gKB7fA(%FP3*xEY)-~0K`2PL-fSAFR zA^$HfDAME`|AbVeyNF%Nk<>>XCHw{`|2RoAPI*c!BT`b*&Q?=xPJN5d+{GA-{Y6DA;tiwzL)O>bnU2uu9!NgwoHK9&Fc}1EBT4kSq08lzskfe)qvk_77H{> zk1#JUDOPmEFHSRF=g&42Dt7cq)r~XVwgr4<#ig$A#?Tkp?j|c+c$|hEd2vUx%p0)H z^GJjqn)hW>GUMr3N!W|K`Z}m;f>8Ph_Yc~Dko<$q49=kgDfcVT(YWRfbr+S|yc%`2 zRZ8frAxKZP$n`R`WLI$A*Q_zsz`^&|%xWSa>|ML->yu2eW+cRR7^ADGstbry0#-8t zehQ^3@5gn`SCHmHEODKOvqr=Gpq^J+5=D_5Sy_Rgj5IU zEs+HM(ei5zSm(iYOKA_Z~e@EB6~-Ky^#n4|mNY8`gYSybm)zNxxO%d6OL z5Nvwab3Y;3mO44>(F{;_-R&oY`-sMtK+fx|0_VrZ&Ja zK!T3pw++zS{xg1h0doohIL|RmvC}8auGt3$8CC}q_d8>SZy#)Jc5vwuu2)sS`{cr2b3oz~R{Wy~E zFs;~?wyK65{+fx`)ATBh!|G?!M*NGx5qA@i)&?6;`=i%wE;jIL1yMFm~aFxl0))e7Fqct94nf&@7_;|ruO%U zt#1VSt7lCfRkQGaZ?XS`Lhk9Cp^O`#WXvCfAgceOLeDai;bjQp*@e(fBg${Wje9lU zyFuTtW8F?RUmmJs3UJb16LAg%t2}@ zu8$cXQwkq`7T0tC?g*+A1n9agV9n4fg&*Q_7Akb9rL^D#y+hz8+kDB*5{3|Fzn?eX z88!Svk^ZrwEsGz{WB*}5o;>{O`vCnTgj{4?O_b*#Jr=xSjc{Az$txS*%bLR4&FzId zz^-bj2wq=QHQ8pLH-=}(^JwTc=?%Ezt0gkh=7}{OZ8^sVT=N)rAYVbrNT~_ zVc7WP)-Yh(*)A3!#Z$$tSdCukVvL-!480|rGqze-mM%&YEx-Jp7i@mMXHSUPl`oI3 z1)Thm@W?wFsgo`|7Q5eg{>$rcfB;yaa(!GwCjANXtax$>vj64<84|CUv0y3VlqxD~ z1ATpYepr;~m=-!=hIJN*+KMyHP^w`Xm>4i%W+rHht3&MA+WOUw#MvdIs%rmQp_d?G zU&XK;XCPHzW2>mI4@|2?i>+!4(`Fipu|M1iPN2NP9~`8d7|bw)lPN0Er7l+{eb76a zF-?5lmuehgGB?URl#{SfBzbsI8hPT_y?&lk}PFLRA>75Cvz0mui&Fmf%zcI0l)a6dId>pgm;aoy%yVE4#7bi)I%zZ=%g_5|e|rRoc+^{#%;{+&V{`}7&~ z`FYz&2hldJB*e(#Dcd}M&PfzpG;c!$!vp_k-dLyfcLzc$F>nnRtf;{R(a^Ss23MG! zVBW~~`cxt75-)WRQ4@XOjdM>NT^p_j#e&FmL6Pirh*24;_U>*5u`)#=TQ`ofG`?`( zW=Fl=c+HEub_ipv#JCGC}hx5fP-IAvw!g&~jQ@UsKn2YFbm8Q@1+c|^BU;E0oTZ&vEMjl^+4 z8(d0wM@Pp7j)jg)F&3P)_y7mt;T+xKv4>1|9F_t_c;~bq2JO4Hlh@`97Gn}Li<^PJ zIZj_)G~7g=m7q}PAk$IUcOMy=LP-Y10_$U8dJsg=Y9BbkBfEf2xAxxd)1H?M2eGf0|Z znMWb&f>pob7S@^J8cGsbKi`X z6e0=Re{#5sf$Q}n=F>cxC>soPLtPOU_*8sW}6kCas{IBK+d@mMfE(WL9V zQzuV+S%$!dzv9CK5xKcSWTnxHM-9^1MWIbb@8^#_gsdn_9SN@-Ie=y3jkLF0JI3m| zQC!}q`|bFuqSY$l(6Wl~FKElE-fKl7tz4NwP{qUgE_l`rn%ia%VEh1!LG|)Xv}x@y#+i_ROi02xsHP~Km~$&cg`*~rqFfFd>m91u4PWoz zM!1WQNwto3HO*djYr%&yTX7IrpzI;`b{7fGRT6#KUC^JbR3IAFS|RLgT~4LYc9E*UJD5a1 z?bR(b=!`B#&k7Nqe@Yw^>xfV20ox0N4I_)}!3{VO0%*g#A!?=aeTXg(&ah5Ds##;O zR*naTM>M4=Q8b+jfDitrP+!LcA!bXJLNU4LskeE#Oe{Qidyxwhy$haCt*qq6+)~l`(ez1@w#@QP^Tr92Um(_mONM62Q@(&1vKG1Q9ErrG-F02&ZnQKQ z^X`}z>CW?R447`H1ZEo{+)GDCM<2t%v3_BpY_-&{vUzW*(xIfdtf{HYdsTDc8%6eS zJAe}Qd}IPL2>Q(9EN#6Cmf33M*>haO^PG8~o19IRdxugCIDN{8vQ;dlJXj76Mu`p=&I>!cr7&*uzd zKb%-4>uJ#|x&fO_B{n`E(6CpkBv9s1KYAqzmswF| z4*DF7WEal(2@6`X6=7S2ZeDW~(*%JH+#DK<`Nn~@{wB!3%>ky|!ZglO9a)mNd#fxI zFd&98VLJq6xi#CXc^J?S`8DuB$OL`sEPY?DnaP}B`71~CQ)mo9202#v1L^-cQ%0ao z@4g|~H$K#hmfJ7!blTx0j;oze@@_*@KdjgBG`Z$|ve5HANuBq9B4Z85NZw+6DP*aq`Uiyao*Su<#`5O@wc$pwjPkL0O_=>a~}jw8&vBTl49*0$}G% zG`Pg9hiNhu&spPAh!=EpwwbgSP>H%BdkFqI4Zt5SS`i;2s*m4<<>jOLOVZW@V?-M! z_7=Mf6vRQmENdoGa8S6fBE)YMhc&vA5=aXR=lQvjK>xF)@qgUp{q73f1#*g`*>%CL zY&upk_q#Z4=2Y&`n@x99);3Z?;&x|<0~M?{@G<&&n%v-hDW+?y0me9_=oU(s=9VL4 zfo`ooB3|l9%->FqdqSc?xAQIx4n__kkMNzm-_-V;g`mC#HI*s!C&6RepwHn+QPNCJ zg=Gbirr4Osc*_ljRq`=Oh!Q8W-q7wn!>b%*)@R`G(XGNFzyzjLURsobq{X7cqH@a z5RPh9W@uGCw$ccHjd_@YI_py9tA#OEw1l@#j&7i3!N8B~%RY%Oi`spwk(H-*twU6h&0JopCEgDK@-m~9N zmXsMO2q=G#6E;*RB^_O}3rpn&rlex?Mn*xQWF(5n?gAdz-Gox($mCI^kckgaw8JXfP=TyS<WAC-Dj+MPG5~)Co8tV*3{e^WpAqRlUv9T` zP$3j=D^sHc%n(t}_h6p(v)*uaOUQU}*}f9erkjIEHn`%P*h*dlo6d_{m%x5y`i(w! zc9Yl>Z>%{$XLD^l)Y!nzH-DbgHIB{bQ!I^!JH)q@T=_zy+JiY9o+bo=;=OR$`Z}l= zJiHB)cTjez2WTdt;ur9C}q>D{F{B%CQP-V;RMZRbpc#3gI%p^9Udac%pd`D##D;bR45Y!Dah$MA{ z0=CgR&&$qpo%0PZ@_Va}yAfw@yDJEDje^OdFC(rF!n;gpvc+2NiHfwrz}U7AZQR~` zcV~SB7-^emm{6@(x08*oo;N0{IAp@N{L#7>wr!C)`~Q!xcMh)f+q#9jJ9Y;hvtxH` z8#}gb+qQOW8=dUfwv7%tHam9mrBB`a-gAESp6{>JPVK5^?X}jNb39{A&~4=fV!)y? z3M66kFxXs~!83y*dpWVip*?^YAl%ERTk6ajo=8ii%!f=^Ocm?k_5I&7`5FlGWvWQd z6Qd4nG7a@r#mkd|?L_}IcryKk2|E>1FnMn7PiQjPEm0-BI(D}=0^-fsqbgMqqs76< z7~2u|om z1tzO{Z$V_$s~zYkuCEYHZqA2fw&W>e%B!TMjhcS$bK~-&x^NNA{J*kz!@Fd4dmUX} z-5$XC_00m(oyX;hbFn|_&xyawBNV}s5OxpYbmqZO&Co$;SFR`M8L|*`oTPiX+>dUm zmQ*_|XyCT(K{{J&FL)_2N#a}nl;MfWFpPFb<;)lU3_!0deAu)@B7f%%&H&zO$Yn>k!cq!OjOUz$iT>vl+(*3uap4J6cWJcoSXUmr>S)j!MqizXr@U&1!t~YTLMSsG9_?q zN<-!7*I5XEIQ*KVUog)kreROD(cX&LawC(*Vns}R@NJsqidsH*_TMD0>ia0a zY#)6y%WIAVne=`Jk`u>bJOOUh|K@<-Q9kbI>zr!FZmdKh4LT_dzr}I-^vq01vWbGp zG9LpsD~usg_y8;V?I&k^AYy$zb59m{EJaeaTQQ8y6_&sH-+OTXb7h^De^*~c4@%AaZ_fDR@!d-(U%#v@p`bm({a^I>zbw#e>F?^b zb#)jx{{OZDL`n!=4J`mbHB`n&qJ&UOQb|K&;lvm?4v{Lp5)Qh#*o_ymYl z{oJ^ZkFn(xpO@fIb$Pyihd_fZ9_*f9SRkt2JJ>^Ecp`NYiSmJIux*glkgm9Ft)N4X z)e4ZE0%Wzo*tLL@v;OBDdw}}827;g*(-IS8@#L?8gkAh37TDdt3fbZ7)rH6EP`$dK z;s=6z*a+bnX@wGLbih4U(gnT7fn&4=H<4wd+r3*Dn2=>5pCd4+zj$lY63u%tOmMHs zW6gy#K;I#W26%i%qm{>naPEJKh)czoErHpnP%QcQ`kAQc56x2 z$kvZemi7J7ZtWnRH(pOLTfvB2!;+7>1`rJ_`gBlp(pVid1XNbo45%B6U=w=vnr-Vt zL{qU|%{70$q9_DqJw3if2t{glrG64X$3bjLacTD1!7aP*SlPgQ*Wlq2Vo-S$Y z=)PTJsoLAM+eBsnGxg;PzG<-82EdZS3UNWKW9VYk6#V0=#rl3+oMd0CuAVrUs|V<2 z-qr{noco=Y9Q>!r%$PhdjRGTq%3XM7tnXi5H+OLS!kXDhNcxEw8L>r1RRYz|Rwzad zW_%xYWU#mVyWZD;(OhS8?G~oe8GO){IJIHLq@#M*WPTqhq*^t3J|6uTu z!rGIiLmy88C2B(dPq(Fq*!?NyBj4ta=MC2E5)A6Cho-FxM3T+qI zfz7=IMRxb6@25Pk$e6g$vKNH)_?)P4*82sW;h#5#_vSvm%OYlA9)}ckU33@!o|0=U zfY?_@6ogYWJPeD^2XzK~7wy8%JN^{^11Xce9wWnbwWnF#c0AM6pH*2(5E!g;`OpY1 z9_ROIX!Tc{4ICY3f5BMu5hl-1u2GzmqRJvPxFO90yOp~y*&XkGew&1D?oj!Za3U%y z*E5A)$Y$?L7&;`~JC_ot+V9k-f>AS;FVbpGwqOapN9;bUQq+@V|8RM`;vZJavHghA zHS)&A;6M3bc`a$hb>Y`4w6FKpOjN4g>o@Udj?&=DjD4+sU>Q#Ja&yM<7gwiht zfbP!Co%1jIeJrXp+1^w8KKUl;t*A7`2u$`34&XHgX!9-5)a~dn!0(|KlX(J`RRvXk zOxXSRTaWvTrVMRl|21l8waY6jc%pf@gd@#Nfx0m!WykItts&aZKfQ4LUq|!KjD{Aj z@8;ydree(vJ0;oiP;3QDbDXsQGsNnA)Trkp;R8mcY9rG0`0U5?kVtjg+nir-c+2S(l;(nbJjYX+V1#fo%F`Q-IAGPGt86rb7f$Xs;|h_=^1J)-(v%E08w>j)n=`o5d|dl&wXxJOU&VF|Y{j8;u^I)KN&hk9o{*s+x<(ixtQ zu92QE@vr38*1GV92t@Laa%V7qRYAi4ay;wG`c}yQ@Qm}T(*gl;L%!Q52zqYTLx%2pL)TMW=(t zhux#-HMU6|zE)~8=SdltFB*NaX15R{Vn9*Yb&i#MIhvnUpD&c*t6lL<_+p6iew_mr zOcIZeTb;LVB0AsxZp8UEowZGRWd%ySik@CdF?$_(Ll-&!NBvP6i8pfpdKQ6 z(QT)UbSmP38;g|5%H;@I^@FT+w$+r z{TrRDm+jY_*;a{trB*rj^kyTB4y4GE%nKHhT_+D>b!J8)KqU(EiV1Z~h>t}MeNY(SY>=!+N_Zp%((ol&o+v7R_kzP5+?$Sgbp6` zCp=g03qjo}^~Q}5$}}o*gTJi4o4yar+NI29T(6odQ7g{QQ3~{#fQX^^*2zsL&fv)7 z>l;lP&Z!m_e4ksT(8TDGY;t=R#^i1hiOp6?#Ui3oe2Cy`GX5hF_z}kPfeXVNuuUM6 z43mb30cC@tCR$h;8uAJ%zyRliq7EE|C@>@_l)Kzy2mo5M>t~I0Y%8Oj&R7w|3n-|M z_L$h2_n^o}tq6`HT^_Xg(H?m*>;O2Ngx9J06@zuzNWGCa(_p5D97;gnUhE}O4Hu9s zM^%U?)X-#G6n5_cH7=DRI4Zg^X>ey{g>!mlzm}dgA%%pDFz%puGanZLkllnh1qggy znw6!UV|b6_Tdq6YPBvC+aY3r*1Oy_aXgQbbzxW=%Ct_UQk*X5;8YHsE>@pVBcq@3B z+r4(tz@PzY{462XD|-D-r1cvTMh^n9Q!Wo zihBJw9MxD$5Jd=e^uUR^gvdacf1u))zj0lQf3yOC79RJ<>lPMq(gu1Dr~iuxK(10o zTZzgztp6)Pb94JpXt$l+5$rrHn&_kCf_$5WI~r|i#v+6)w*Seq8oXu`&Sf;2{%1ID zo(aiP;pWr9v~AzUnAfS@Q{TItZ3xVbR+NvaVlYM|K(T2k`sNRiAYVa9rBoipIM7n3 zMR#70b_FgB{?z?=tLgRxDfB%-D6cA%G07rV2EdC1=LX8k+S)=50#`v?K!_-?sQefM z(*hp_`?BhlWMtP-izTI6HDK*-sz$2^YOZ3*OG)5Q!+Y?3a8_<#UhUgz@N9Lf=({vY zq+9RJou`t^m2+rM+Hs?fMYq*u43%bRjWw(vQXTrBn7E;JY|j56N%`rUE2gF=i*9LQ zANtxd9J@G@M{=Clu{2^8Pc#e`W;;LBC5BFbUrF&BA~RDxfZ^(J^qvWQjXZ0mknd!) zSSlY6=!AdEw!K{M!_l8Xp%6z99T>~Cn9wMdYsI`tGck z|3F252#Eh;LFXQci0-8(JG+I8=4-OA499QKlG7qtHhnW~66*b}yuHyS7hj8}{j0uX z+j{5gX%!(40`dH9DzWoZKf7A*w26rX!8N|M!&|y)izdLRs5R?xVka^V&m8j3AK@m` zXomBY=;kgAx2V;3!&*~)n~YUX!rX^l?Sr!0yf_J)rwk_AAvX+j7eiBAPdu<&lJr8w zIKBPiN|)0Uo-qj>v*Z^i`bBHwp*zg3@T9e?zb@`@m%bUzp#||HP zx!;J7%#Rd>#fp9Skie-F%p&1tou+{|9w@s5Z4yam>;RaZkbEJQ9ocm~UmyeamP2rz zvx@!@U~(~y0f{xCHD6lC*g`)U|Q2k*qt}fj4H@uzktC>PnNk->59P8BR|Y5lqVCf!R6U~9-A?3AS=ymbONVY z!nP4u&6c9Rs}-$Hj$h!z_g33C2#aTt%1^FcoJ+EXaPZC<6gUL>E%# zTFoD$ZcrFY*uRQ@56bE*!-@F=B*QebOHonPJxneS6$tdqEnNd^!f)s2cVmW2xY)$5 z@-OZf>am#p;Z&~3DRxP}DzSHf6Y)c9xfQ!&@jT!ssi}&_YVLEFedgl`KkmOzApR_{ zsSEgk)fap#Xk_0l?`>CtM02raeZ}_foYgiu8NDAfJx50?fx9 z3fGlY=R@3xCcG)*Ah_uc9rH;^IiUa`o{vr|dyK(s#hJ4Ed#nnj95`GmXu*{=9E5Gq-dKgpHRLQg3!TR|4^eaU?RH`1?|zj_Da=duGP!3~a{5$M>>>6q~D#U93E zKq_A@ErLR!juP_jEb!cf6#40f+2_?6!^%7QhY@UK5V2=(eW_23F1nXP{EPZie(IfP zF$xVEzMV)8Or5?V5BNgl0W&(57Tsx2`NE0)wmI&TOd1kSMR=+F_Z^l9yOt;xNurwz zu`n4Wy>2CHHBHStn=(NfcvChUK_@>rIs$dzTvL{OeGmW5*<9oj1=i&0MpUKelu)@L zc$UF{97x+Y>R1JfO$N8yW*=9x9BhAu#_u+^SSvn5dGI@k<)dvF%6x41j0+Rf>L;!< z#8ie2$eq^t03u&^BK&`_G~HbR{BLR%;RPmSUvJMLCtmfTqpMd$@7}hId@YC(6w;q8 zvE@nY^y|;NiLWB#$y{3#;;&nrnVZcSW@+ZK_b-|Sh^$I~GKV4hItH5{SOZo>G-onz zF(`k~8Rv|fO228MNFS)m7VHnM6e|COTej*D%7Y7w*p4uxYjwwGg^Um12=xUmn+ z8(qYSc&U0yJ1GJdNgHE7hqWJ-M&CU%k%X z%HS*L^k;B0q`&ZJ9StN5IlbQjJZ{#)UwXHBM;LVtl+Yzy?|@9Nno~9LymTM9nU&JAX9_@8!G^#c+7Kx(Yn7WaygRY z?%t2HV)6=>#4=r=B2+x5?n&TtaiTUx%7C_c8*`e&n+N4;GgDw1vx|x%R9hHKjpr97 zORQxZf15hVJ*mQMJV1FBvDv_|Lx%MpA2OW;xGwcJ2b-l`%r*ga^qxEqMPugk5X=@I zy_S{R>r-izqX0B&qBFlZ*f8hD4ooNP;Ft?vATm2YB6{q7OJ0-b$j-)l4u_nl7H?gF zDDwfO-7Iyu-fTUZ*FNSUU}3Np#z#ZVly~7Pn&Gi2O~RHNIy`e^lEJF+RsDHHL&>~Y7ZSpW1n663n*sgl~{ z51*nnB|_jUrDkS}13{kgNxmn6V|}G2z6XcfrFdUvb;6Jv(@*BX@vJ@_D>4b=ysIBt zzgxknf0azgC_pJ%`2^h-;{S+p>i$qoHm``;5-r!h1w!741YQLzV*y~(&=lU{Mo7XC z5rXOpP+!V^9J88*rJo{~Tn3x-*7joxiw=HTdHTW3{9x3Q69Hkmk@@6KsbcehS2Q?o zn8RVoOlz5VGL9h;#FowD6VtT5CAX7EIJqI&imChZQ9u(&WSYyu&r=ui^kws`at_3YxKi)fMPw_X!~wQo5V_ZplKq4D;f-Y)yEwTVc|l&(f+P^;HNz zDDq^<@RftFKARbf35r!9?#nV8vK%YI%Koo5`+U^vR+g zhN*7S#g8wGTHEZo<|21E(v#vz58EVD_6s3kVP^BXEJJbD6+M0#IbKIX=tseADRkT8 zMNoFPSHk*rlbF07G@K4)xi)u{y3k%e8cF_l8B#Ddgx$+VeGWF|vAe#F>912V->Z5) zsa@TsP3K|HKIfF}6C#A}JYKWAKGJ<@No+6#>6oc@p};y=o4KeeV84kDT&C;l!)G%h zbYLeQI7plHW}C9zJ4oBX>~ zxr;l8yTlk1dRWchT&-FJ8vojGyeL*Vd+Pr<;x@W384F({FCQjBa)^Z%#5*K2+GV|b zn&-iBxW}PP?B9VXl6K9l2DCynd_CQvD=`ee|EK+MlgB2Nlj)3EehJ^b8N%1FYKV*B zxh(!g2(`lA7a+~vSLXK2P=6ajZK_f@Wg0t^A(dfnCN(KH_ajf=k%O9uf)a;jb3`J2 zw(YaI>bX$l-KspADeUeIv^}@f926_5HGuY*docaH9d6Gm^7LNL1cAcW;X3Tjjh{

kLp-v>iE|govC!|?AP>;?baj4?GpK3hn;6S*z1jy-xesu zwTI~Q+HAHs27wOgId}z#ImiVgmuYR#q@xv>NkgIWJCV;$R~KD}o7of0wnm=i%H`jq z526%O^d|L2&-{2Jz_1|LH;N3o*rsz1Y#mG#wH(k7#BfZ!h)9LTr~N!)vV_>Q!5mZk zi(}<-SD9Mv?_@c>9*~}PfQ30ip@>DisvRyyNE7(88B`gK`95>Wv)O!ia|EHYjk3$K zkI$PuflgcdLRYX6lMD$`*vHboR=R>XcX-9FL|eN~`drZ#ivBlFlGCxvC&TOXhWkm} zy}>Sakm;1BrQghL1gNLl3+*QRU(-XmSAeIJ1i1{_8j2El#K+9DAyu4EU)k73F01|N zCucJSXoqtxY}1<(m*cb4FpTO@r-~J(1U4?$~#0eLBLzR;=>V~@d^T1Cu6OMN3mR+pLtQxLy z=38BSLYFr}%05CICpJ@g=2&LPVLdBHEk=BBk;g_PK`N9(E+amCbR$|TmmFb{lsg00 z7-#7wI9Vi?^H~TRN0NZMeV8v6qIf=oF<%Yn$?WTdF}paMzu0#$`|Y4R>iYQ*SHYp9 z6X=sYo$+*YBGtjkQX)wvs#I`b^mOfyA;D|!_&l?vpc5N1TNKSyYJ9ZKY_#s-f%P?m zD~~wKZ_#w+yT{GewIsWu%=^^qWSd*Ukoz=m#f(w|zjb=^!;cmbvf}k>Q?{-P22oOd z+r#OYqWLaczT>$P@`h67;0UwDT>I<0nbl3LL1mS0BYh|9P*UfgYUwm4KXb)&wi2l; zi9VjFzWVKX(&qlZU)lPxqU3HV&n#C3uc?&{2VLyCIP zR5R;<+j@yef~Uhs6(Uvxwj}jr*OSF(z=#0;g-EDRy0l5~@$9e1t~f_M_`O`~vgwbE z#uk+6!kc~W$iPZz6u+wJ%>K*jMi z)z22ZX4qNo=UGRQY2#4Iv(H?iW_$H-^B`Y>kBj z6uR#$ocFQ}O_rYtZ!phvJD)7SqKANHrZ=LLOWk9uTjXUk;QB$d zSx(*Y$^F?IHXJ}0t~83ftV)-m=mTUZj}?~pYr^Spzs_#&$UOWLFf!k)$CLg*`I(j! z|Izh0Fo)XvQHfzxU46AuG_Fj7I{oEZ_a=LQy=<0^%6Z2syAamwDbs%t->mA?KUt=K z(uCfJBW9hCLndQss$e-1eL5f*&RidzkEuAiiW#o;GaY{1l@8#xOppLnfW=IpdoxPt>B8Q*Kz$&9X7$RjX?l+5Jzz6KA?o+kr6W zc($v(qYKkS#PbFO$x&`F_No6$2k2F=#%t5dgQB)(8mu5>^aL4Kv6L)eeIbIoIt1!MTsPtA0JHSV3CUKz;`$@2;M}hn zWEo3_pDWpDw+F#ElI$b}b!G;Sz)I0l6p~MSjX7M`6}7bBMS?`^D5px8!W}}%<|aD7 zC*z-K%J5WRCM4VAq&GdnLXffX%i+rsMHdX)?L8*gP}FgXNkS@SWr8!Kml+iF4sQ&F z2J{uMTaq80ODBI7BJ%It-P@CpaeXhef6a(fX|Y*D+rsT|x!$)fP;8nwUMpWB@p1C% z5iTG4Vgee0JM>o-K4)BtN1_OaA;JhPaXQ%tv^t-t;J>Wp;xwHeMvD;+97vK#u+@Pg z*s$E!)&T`v%7x`wqO<#57x4)Ns2uE_WNa6oB6d$>f8NAj@QNx9R6KFOnc)_EH2B}W zIFS<2kJBn<5Y)NBabMPiT%#{_@l}coNHfnfw$xfLK?(CGj7Hd#VVsFG}jilpz zFrlzl5+^>K#J4<_4+cCm+?4Gg!Np6`w)F6OIbpiDw8SP{<2by36-H*b)XK*c5$M$E z$Jg22+?1|*QAY~j9Ipj_NepgzhWb_7Nw#pxoVbYFDRT$H9ho^oh!lTAn=IN>4?h*4 zpPa^xG%=FPnmBV^&(Fn`gd@$9srP<^rsG*CKsghed7_>~>lu8BD|c3HIo}iKU9vS{ zhX=LEe$X|aO^QnSYtWgk7t-LP5z%JdA`~jyB&q%2J&UvHMcT_ZJPq!?Xs|K>ND6)p z%UwNh0(9^voeXZ3EOd2pI1zHhht4}lTsQP|=t-8dxs1R1(_5K0#yi%^rJ)`qdiUG@ zhqCm+foURt1%O|zDbpQ*QB4#z(W-~sw#f>ud!YGf)%$&PCQ$^uN+1JOOI!>G=yB{P zDZiL>kQ>HLNZ(k=X&8USe>p}=wnO#b$VMOh2?3|iREz+7VrF8_j(6;YD3x5M5Vy*8_rfh%2~?^?3uk zgQlDsg9(?1Z?@r!6D#>%`gp=*{X&2xOn_#S$hGK#{5WBOfSb<%MM-t_atplH3>*T0 zwQ@-hkwYiJD?;9e(cWgkg7r^Vbef%!A45`k^xaSBhL3}D*H)bwU85=jS8w`{x)<;y9JlA%;C54)Iw{8 zRdal5Ds-6^-b(xVu^rgwi2EMhayRx*Iq5tMp&97XXC@)Nt?5+R_MI{9=g)!_Ml8%W zrQGjB?Yx3;c9ii^eaOJCY z9P2F>a9J!_wo`3_TZ=^;%;NL9q1yV!-4PI5lm`nrlB@A&m2NqPFAcMO{I)+tvHws! zVspSdlN87&6NCx-j|6Mq(>NQRd*Z8Ff(jo4FPolc1Z3(BSM+Q{<4UCwTYrPsr;6{o}x3$Zl}9sV1UtOXTFI> zLLo433y$7+I(EpiL7bzvFhm<2aYIqW-2DcsIDZr5a+M}p1We8V(dgQ|4O#B|TsQR? zQU{o~sSy(eg4r3?V*Tr-1k{v;y`b-oRznZ%+k+5eX5&Z&X8mEv1h4P}yxe`3El-}i z-&cIEV1kW?RmQ#cf$x)m_A_Y$Tg|y`0REY1+d)izL~E;3z953dh9f8Kt1$+xI(Ndc zGDv9RS$(FAT*vD-lSzHQn@PlsmG_*2!90m#RMqHr(Qjxt+fpF>^ zFTCZ`2rq4XZ>m_ieDg#te#kM$X)HRTri%mC_A|JrI+(+^XuV{KHi4udJw!_N-&<~$ zi0L+VZ>eTqA!*auk|Coq1c^hO3SbAyA z9hFdajL2<7`J}c1*TIdK*abx|s!j`Q&Jy2DmCb%-^6VZ?A)v}w=ht|XIj}OM2$YQJ zC=PEAZ%o9UXLE_Dg4T=fdGk0@dPY&*vU~(F-k!4VP8*7z*GNdnteQOA`Bka2zemA^(B19itgH|s61PkhHisLij>rV~Rcad*y#`T}vXTfU^*CRUS zqe{(|spiWqb+LI3NG8=SGNST|PntR-bo#Z6;+Ai`J{?1tyjAu!^ui+{Om8~Z*{lgaW+?pc8Ug+ z&CRnoy*l$l5=r4d#;{J}2#`7h%>MiY7+vIixK(Z*9y7rHLW4w|9C_`^u(8-ywv*eN z@CV1p`a0MvX27{c%sZThXm4(AIftUuB_Ouqbi^YTN}Np|*=d%E6mLz-2HTz=EY>qa4N^J@$md~@>GzF-%ysyNE{5%l&4=hNL{24F z4ttwcmyF*`qRnJpN?e)F8y1mhA$#4Zr7yrpob#b0iEwl1atqIS*5{$@9VqFTI;bhQ%k1ud&SY5*+E*UlS{@o=Sl-%(oxkS zc{tW-p_q^0gT3PpM3+4lNa6w(M<3|OJ8{~wC>sVybPR_L)Zx#mOaq`~&%ci6Ec`HC zG2)1qOuFEoTcQxzWfqW?P{!XAN9fFMXmD{Z zSP%m@$O$-~&lDLZmTPbJVcF!&bmKG-(*=hJjD0(A4pewdkKzR+?iE2nL^f*#|o=kj)%31E_ifByLQLqwPrs*277pZ3|Ye2 zz1fsaH<-!rsa})3^O?Z{Puo;&WH!_@oU~FTpL8qiWI~@)+&HxA=t2R@ncF^k6_-_w zl&dEKe3XgcFJ~3$iaM8Ihd?UvYbLC~XNi{Nw8)FA{c!K>9Q*Q{9ouR>M&-pNk(Yw$ zfKi&}47dhuHj2|AN1*C)*Bz6&@hmT0JRTx9d6UtTB@@YpFK8*dY}#xN;8IkXGTQYq zWP6>?`~IzBE=T3{HtPHA?>{CDLtS?be_x1Nek(bY3LCcpa`BorN=$&m`RlWdEq$e# zGI;6j4;1bulV&48%p{1Jqa0H?Lmutze8zy~Pa0k?5V}jJ`C%0QGJ>Zdzv8HwAAD>c zmxl}-9oKmjlg-izZsEP_TBK5UpQ(S32PD6mi{-7A{dj#|9!iShzBn%#;jvICYxO=n z0K2FBLC@5mNG|g)8z_4-9knb|Px6S-5K77JD%q_5@wEAr81pkr0PiHJfU4nmHnYL= zFv(TzDg`?HX8(fN$!iJOnn%u>8n&I?WT_nKpY&Ip$)4{k1PShOz0w4Tj%9Rt6Wk zT*_6*{GtKXvIDFBBA!^$uGbsJdAEAdO@&i&nkL6wh+&&MzxKh z0NSEJ#tAl_+3~48hBa=}OYlxevU$U`DW_Eyp!X|dj@%3Ff1or%XfLvl0@~!9+nThP zk5LPfDz)&^5oiJCw;pEe#iS8b0d@!D&g?#6f+I2B3=X?rQ`wk<1>z~?T8T&goIkPB z=+>aSmhsRmLs?6gV2|J%9)@;Ea*aRKv2YY zO%j*M;U2zlZ9dw8ANdyD^vs{L1~v7!zI^CU2@UrtG@}ToP* zpm#4Atue0py9q&cg(N{Wf*tiDUfuqr7zolhuToTkAw8ihgB#^OiC4$c^?UxJ-FQR2 zAn@swyfn$+Cg~Q7?_?p7W4{GTgY%u~nH`N<8AW6)wfCowV*O7Q_^k{4?T{?6-i3&K z%`PI$wJxOcB&x)hN8)}lQNi9Eee-E1E44aw-!=K6QgcTJZ!4Ts%wztn?LVi#5Pxda zLD)M(87#K9m}xfzjgm_r%^5fOvZz$P;+gT_JJOiu8t;*8G^>H2yl7SQ$HMjCvqvv^ zS8csGzrTE*y#3|{|c5xp98&D#Ys6@7q*bz81|ks!2(BL+yzTYtrdJwzQ| z_-r<~PJ7H@$N#PjGd{ShN9WF9)ovJ1yLdccFG`2Pv=l-mWEoLGsKLj`8(y$}i09`= zBH`HqF+(EAf1w_qVyZLQ9dkiFN28tUAGWjmw>Z=RhKimQ&bf2A_>H>yV4kTSn;Q~c zyPUB9kra1rhfrV0jP`81bg33>d}6i0V>y3${jmQkda!kGtY*)GB4n~LWTKy!qK^$o zAi-Tr!M?^FtY})b)!>V3bgW^v;7AC0a~56e<(RrPD2)9%)U%P@a|^ag$1`Tu8p^OZ z>05fD{>*Lx7`6HE=7mA1V#(xYULg54sRkX`K*bFEIt2aA4u=0$B9im?-UoNoN z#k~IXgV-I{%;(NBZ0_tMm6-@6R>`42FfZ`#57fm6vFl+(3T52!h_+!ZRyX2CmW!k*C&OQTr@Fep z)cs~#%m4B^hw@T8g9tcS{n7VEF~!zv0ZYk!j@v(*bYe%Bi@r}&+Y{xFARiVzKF4=O zELyl7v1E_xA8u7`%fmiK_GMI< zqa_iCzgmF1qH%hE5cinUp9HRNfv2L{qfZ|5J~gQ2x+Kp(9Q_M7SX^l_RKgW$ZjQ{R z%#=!RwD_GPYGAbIMZw77QK~HmeuH$BTZeepf-0{p`m{=o-y$o01~kP%N7g}y4F>)Z z^I)|+da3@f<kDyx6$W zWsPqFEJgR1eCA`GH>KKTXSmxT>As&M`_n3)I739_J4?Yama6pky?zhM4ry7!DX}c> zILpC;p2b>fx*;>K%Kcy*_YFT-SB#~($!gJlcb;C5rOMV)q|YIXKA9ESZK%0?AaF@c z$2IFByDzOyb*o!9OuRWypw7m78U*B^LLaNomL%SKIpm(yZ6CLPg&#;yHN0wnG*f42 z3;D_j-pu=vN_t>_G2q$XFXi@|Hdy8N)76Rwl6}mQ-TC;p_N#EwFQHk*GUm)@(+K+= zngt3YB%isX4F^qo=v(B!)Qy#e^T#kughtx66r+3xw$u7cFdZ0ZiS zd^vh4t(UAWdq-rwmCb$lB@PSwhgA3d6D%-hN1;e7lW;vp24qT*sQ^*h7q;v)Kq*IT88hA(#AKW}8{`%N4MW05Kmdyf zB;)nE5|X@R;AZ3FySltAsV@DMc`#^VDM`TIqBTgW<~y^yep~8tLFsgu4K+|F(U>`w z&G=AbKL<8tdVe%d0ChANXo`lLmHO1l{la3M%@QOX(U6;VG8r%H#0}xOmP;F%JiExl zHcZbY%M_f*eL+yj@Oyl?@QPoea=28oP%5RJ&~f+oG9*1^AU7oT zb>(~-Q?oG9h}S@Tj_Ww}Vsbkw(()c{!c0n7$~t5Ad12P@_`@KwpWn_3GEnOIOriSp zNcuEeR>V|_m0~B8TEd(NRKe<^-kAEciFw=TD|~=NP<-N=BgZeC-ju|>O?y_44=66u znBCnQYN@A758%0&o-Cp^ADbMbngY!O6`leeqC8gZDP6iml^QMhG#an*At*aw=WG)& zriwlrwRy2Nm@&tq@9U%b-@KU)tVZSDVq{S;k|pOikFNrkO1oq2B6g-cW3J9n8Qba~6HwPrrSxPCBK#M8KwrwAbG(iEW6x-Y+b&MCjFK3ejT?5rj zcJaGg{900{eR=9#E~?i4uSO3n3x?r%?_O>S){5E9hfkbc#`w>|*z#>XiihJ&AYAU& z-SP&W=?@&`_aj2(34H$trTb6v6ndGWv^w8>&x^#)R1T~0CDn9H**GGB-Kp__-h7Je zj@$UVP3M$%0Lzkg$}7L)2(+MBX5Ml& zZ#oN0xADRp2;Bd!^UsQ*t2Dnq{9~rwWudYcV#@7I9|>@PyqvFf3iq#vFp<)-cXREX zETvvq`1MBbc(8Bq2`@mzl&64z{9k^a>ksnGK3j+$c$qGJIjs`BJaJ_Ga%RytM8IZ1w*&$j~~1}4z@&m1oF@l&dHSU&m^B)(ga!XtdDsYBWyW^6f>Ynp7@Fe z6rW>tIy#Nq|Dd8y`#KpD3?1|?2s$9^sqdFb^ zF{|%7kS|!YI<^?iBSAL*)WE}N#;x42R90JTJl`_L=X5UV#>SuW~*?U^jNKsm|s0o)^$5(!TAAF~i zei!Wd284y5OF*Ax&)WjXFGrKoZ&SeOUZUGlhYp*1y#r;|7gbg^@zw3IY)XoiXJk*w zm=YLP%|lXGrB@YwT0m4@b?m3B%e%so#*D|Yz;vauK0OVGL&y%Fi;)u>Xc20V_Z6;V zg!y-yTss3S1aq~p@uA5=sVSX=>$5p#?{Q4*!Ryv{I`$K_y{Y7PDa~}2LKfxHnT+J~ zW{EcrHzOY`!v znUkdCdC9ElhuZFat5j>aWQJdb|7hhNq{Npq#UcG<{jJvQSI$~srEjO0^EJT_dtpPg z*^za6n%7R7n2I4ELjy(H-6PH1EI;7s}2zk(FCQbW^Ya-}j7``Ug7ma_+TkqOvVv3sc~d!jzppGX%O?R% z__i}Q99` zSV5Q$8WSH=sSGU!WaNpO@39#4a`e~N8m+u8B3o&9VbP_3n`u`@9e!lm&Q`<56I08z z%_#pdD{+xjiu+XnWh*ceYH@Q3Y)P*9Ol>@!6>@^w`0R7a09wD)KPgtWzqK7+qO_6G z{C47B*9bleeYLx`cEQN!&v?K5f84!elw{k|F5G3?w%ujhwrv|-wv8^^UAApkb=mB) z-Ni5WdGEpdo!uUE)(p6`~InveDnM7Yi)P(QOsAk-V;W0};JU61t8ypMGz7 zF(c9tL+0GEIwg5cdu=_r%iT}(Tz$(uTX|lmeXF~Jrhdc>U;^J z_osGY=li=J>W^+CR%>*eP0H1&Ep8=2DpDKJUCN7#Qx(#|oPOu70u(|FgB28N$dcVUq+i%A>v*Sm)0^=!G3DPuLYs*)vn!-Bd zyb-Z&6-FSn?2w^neLBB_wYK0y?}B(0DlsA1n5;d)L7DVu{v_t(f=)05;^?%)`q2DM z;N34>c;+8~$#JZl>_i{<7W=Ipx8P*&z1DV}O($|X$ZSeGU(QmIl+YH4!}Z?he5DT0 zD^5?WehqpNyNRue>O_rjQ-$oqjw2{L_?V&hpS1wEA9pt{AON|)z}0AT0+nrQHCC4M zz@H5J(HPeGRT>-+6*HuHl5zRZKeU}uF|0F9?cun1jezBiF0xEpD_z7(Yaj|NhYC2g z5%<8<>9_PX!e3ujQV=O=D`owzq>7as<;6*eh=7JF*n~2hZ54(%YbdPT5Wy)uPhcWY6mR`dj^3dr{=dUM%0VOKFtGH^6U|Awa%K9*U#Ej9ksG zEy^idJkLB;K7QtwE(As1mE62Lf4)pK0Z6eyKHp!+@IyniYh(?hPee+GGS=>0eA0g< zhO$Ks^RDOPnAkUh^}YXjrTS`Mg59y_#8qpzEENB18jSRB8Zt0W_c-#G+~LWg3J38? zJ{+bz3O+|K-~5=TT`jEFGG?@X1z}?J8FfhQ{qylsHpe*X{Q*a#BWhZmHWVUJ?48W~ z_t`2fC=GM!t?Pxlwq2Z14;?-iL@)PqG{GrArRi9Ixr;AHiNy*(yfs;@=742!)Wtqz zq8|Bs&O#es6{(?_c?M)q+h@xbjQEmqp`F1@W}X?K!5!r*<}WP^J8p7(L*Ja+^ZGXj zaOyNBK|ATF;;Qw1qL$!z%!{qHP$3JY$Gvxxu z-D9;$%{?$bA1YXsUCrg;Uj|PLHWBVWyVUv0Z+!38x)m!bidi5$P`3 zy&QY6%x}7u-XF(#ks)|o;~tTp$|h*G3!y%?uG>+8wNs@m6vv>JW_+gUxw9v`MvhaM zT3w7N4jJzH@k|}>KYt*!v!gI?{=`R{>f)6@ONX58jA?js(-%ujVa3H2oh`y()*GTN zE%VS;6s%`mYp#lYV~eDq0XaGhuq<%!obaUmuw_sQ`H{c;>6xc^kyy@LTAwo*(c`|q zOJW0@G^tqasN9OmsHW_b0`A28GwOxhYzs_xaN6(CaN?$ea>;`5<>4|`%BUpsMp1D~ z!yoM57a=>ega3D&nsJ)Pv3T6j{Y-WP0JYk|ei5y!$0D|%x&G-Wbxs}EUhc9Q_Mz`* z&+<+$9vIitX+Uhu_(0zq+Q3NG`7Fd5`gY-JPwfu}ecDfVyvOY?S}v!7aMswSrnY;4xnAabs-0&r?N#aF7B_|K{FQ+mPv>n6kl#? zg%p?f9BL6icLOODOm}}ouztDP7W5!i7+pmJy9;mP#-fL@>x6-+{qoPNw*z(O%Z2Xg zU7O~Aj^=-~ow~k2ShRg0J3{Vcvry0wFu< zOOWZDUTx~a@G<}M?f(6mBjkXu(|7E(Cu0VKiVK+=#s!Al4oHy;HR6s*DS==HK_0;p zhY%qxV8J4%jhVzxOvNuMr^TWO#@69|Z8nB|Ei#KA`^A`K)|65@2M69oMd-P}jy#3^ zN{(STH~;EqvrQBPJ(o`uF3A)GNXKw!WIaN>Fz$fz==@(3|BvywlJN&=bQk-qL^(rp zGiCT#uf9KQDrcd%33X^hj&1+n4-B|tuLpQ$ zcKfVf%<*jCVINtM>CUM;zBkpF?2g1Q>@+aeOfVNs>^`*9p0WTne5ZISjj|N8utS(llpZyhUlh@ z9{r>qzMGQ4+QTCc^&687?h6#XBNH&|kGO+v>#2^v)@*~xTaB*=ub2$ImuVp^pal7d zgV#fYD5iG-r?s|T5I{eD54w3Tf;!fkkOZ|@Juc%zQb({KdenV&Je?GMe%=ymBh7B(e-!wK9zpFXp-siCz77)5ZPFqSg zne<44qbmREu`e>y0bgWse9)pqDB0al*p5#h`bg&vQ*+zN}z)+Y7U1X6^7wC;64P(ncVf*AI;v0!S?&hZARjCb7mzbJa*xZv?&CGmvi3ke#} zh3YthI8*#vSN==7?*yUme4%64(No&};h6ATyyalE07`#DaXq7Ny@OI1Dt=_o_^p6q zO`RH8)NKg^Mfy0&5zVJg^5>6lW)uR2_hN&>K_zA){>_|p0?J1c{v|^XRZ)z#hqyLv zM!iRc6vlfif)383(=Xn1GQUUqU%5*R+BDzn4|gf~o8j6Irw#ebetJb2rs!eC z2!Fl@44pCL$t1y3;<&=oq*nFbV8Ugb7Q$qyrekzVq$Hm$WW%Mc?>Tw4_tRqokqwsc?1I;-5^F3(IkS&Y11JF+R0WBG9adW| zvj}kxR!*cm%feb~y_XkrI7Y2XKQ^^Wzr$;(xN2lrqe?ARm z{_69-Xw3=7<#zDl=oeu4Zoi!o&AZyWP-s@q=>7w!{kcw^51Z-fiPP+{*Sddcy#Hum z{xvj5PXR!4%TGID>C#B5MMz?C$f^{oe9x|juy5^$h(-$`H>)iuOmhtgJkXjEUyjM< z^F($E=c$n}kib8+-H}j{>G8o9(}7viH{eBeHw<$Rg80!tBK@AqnXWml^AZFhOpS1b z2%h(={Sf<&kww$UfmxWb&Tlb-GNb4^Q|X57dujr@PkNX2K;2j)Y!>ko=JS;*>Az@sp z2gQST)?(IMDe&`UC1e8E1mZIR#j2_Kvo@s)Y&bDcT^fVh&Sh15Nz07h>P&CqG~5~S zW5A<_F*bh?-}eH`uuo6l#Re)j^re0M2JE>=pVRfD^y)3=2>#!|=wIX4`$`6osU6*V zFD6;q+z5ASfU{7S^D&pMlsO_-b-6L*FyaV4;I16dHnhDXB8U}E)z*Ix1te;)ODEw* zy{gmiJ?{>1w4T8VB|bPf(17B=Kp~T;)>3QWl0jQJ>^>7^-PiS|qC}(Nb@Y#^yj$?xr>ftP0dqLmrYGkSbazkK9$K(0@N^;X-U++VJP;H|5P8)`%K+h+$Y z4>xK2az$(bCdp$?9*&pX=EurQz-Q`Pc5t|JqSdNH`=D~O*yzUa^j?Bu9Y+ly}k;GumgGg_F5>_P<^~r zMYDlNC%RJ{LMzXdOrJRc`wWFVylD|VuPkI z%r`hDYkl*S=;uhehTQZD+t&(&r%>VHn`UmU+kUj+($B|sjr)D(4b5vHvS2yQ@K|qo z2=!L<#cn_aiSeW9p}?cEg_S%)TxZ0+0!X`U)fs!DcpIIWR8l7-I>N85F>~X8yB7Xu zygh^jXp^Vcxlv{nuYa?$6d5~XGdeQq#>o(M5j8Q7Ur(^SX40#S3dtcK6gCGP&7Sys z{4DPJR7r)ig1%XmGwxU*6Az(lTYk^-U3W_;Napf)uUPx{U!^~ z9s;XPZsm?WRRkbrZJ-ETh$|Q5$T%2a9u(@l&(sAY&`(-W83B>i*DWmWv z`ZisX6SG9dsK$?UqHuZ8*oXdLz;IF;RI-e&b0afa7@f=#43s+uRo~A8Do>_V633`X zYwd?oC&EwXO2kBR#HSsI44wJO`JH#kNoL$j{2G(9P71UbkOwK3GUOb?<*^uzJEJ1J zRQz&hVbA)l=DrBN7>)RC95>JMWOo&grA$^eal+(a?$CP8VGT2y*GJCvnDZ8Q<2x$! zsx^c~Mi~u@d~k@Uh;|4zLXl|LRie`CJtslT?ys8uY5S7=3nh@oC1LW}^4?bSFg9?q z-*&npxr;7n#?$h=;tY8eNMkb_>{QpJKKyxLzY-<@p)jJx4{yTqdK_#s>_EYqA_#N# zpQ)Q`$`kqgbA8rCMJnZzr~!;VMZuo)3pLyc*^I!{KWE1>DJc~*k-W{U=-qwG<4T=N z5#|!^QYqE$Jb^4PaS5Ly|HE$oA1tyXj0P}6@$@uJSI9DWi%?i?=Hm|Sd1O-Q^-E~f zy+=nz!m9PUq+We9<+7Z(2@2 z(j-?d@^(wFS+nYDok?bo^OFe7kIa35k(3D{g+N}Mm%WB_x{%$k8YXg`1Al|JO(g-+ zO1^S?Ic(4}YEH|_cp#7;xfGjc{@P(4{pHKKhD<#jb z)xKn+)wmDrSFkpmCs!+}{qKhhw2VwJ^tWtPX)9sWskTDGzR#(0x}74aL+|k~M3)gs z+Lq@>bYqIa?#QhQpEsYg*=gq6Ff;1kTCR?hE@zA8ge_JY^~V@)Gy@DRPf(M%$W<5`GiZd2h>dM2WulT5KK9HyL0fX84$h~WjNT&{5HfseD8v& zmWAjV#WqeBag)aW%qJcWMY2Reh#545qZHHcbR40eb%6g`F?CQqDFzIaD{2ok4w{i?)pMzv zSeAz#P)H~Du%`c{16rj<5&W{)CVIE|1O)Lc3*c<}*s%1_Z$pTg2G9OvmsBx($4-IK zHdirYy|8C2!Y4t;N|Fl6vk=|9T{9RR$`uSw@=6X$tA@Y{z~iPw=ZOBR`M=d z2RaW_z`yq)(YTzhx_3`v zax=hNi8@Uk)*xqX9do&*_5S_#MOKAcwLBl?t`+t0kWoZu5EUtJO1m2ZyHu?_>h8%~ z&mWx{m}PERUoHGg6)t$x7rFe|?b6KOf1nd3c zm%Gp5`htr;cs4hQ3GEpxzKQ%ueDHNhU4d+vh>Z34wInLu6=Bu!>oL znmtT5$9puD9ibAn05jyKwlPLzv_y@Zh>I9FVa}1bmQ2F*uy*1s9QgNL-;xw)1~@D7 z9!h-C&x}B(IsqJ$?`(e^&Q`2LJe}(wTts?2=!!B=I=|Hbv=9*v*%=|M{n?l3*QNNw zlmHg*Cy;2>L41bx0G~X*BAHrneB1*?m;PJ&A9kNp&|eO(18T%XU^#>F`)vntMKoh^ zI6+^PRXU^Sqy}g`cevMu&6e4oxjAQ(KbT+}DHD%KptS*LWH% znEvHS6h<2F$c(9`a%6gldBvbR_DmJ$rTXdv`ns6!Pc=cje8PYDA7C2Kxahv&JR}MGiM=bE_xaFj?=wWc}ZUny8>Sjc~ct=y%-P!}H z$nnKXO@8DS?@6(09uxL>6*9y&XECSM&bq{CE$*)F?mM(!9n{y@qrQan0+95w!QUac2oqy$oc`d8#6mH2g4p@EVZg~EE2$^)ar)MG*{7m0As zaOtC&-HmVhgQS)QhPFU4N?_%p_$uYgUvd{J1A%Wrbx;j2L##g}maKv@(3lw2+nxTNjmJWThgW$J3>vUo@u(7h zayTMnA|Vn?!eLf!uc6Japh+G@cM z6h927KfYTXH6U8&an51vr~a%us%$8R=a$bA*7JjF6hF&dRcZ)OK3VC)jJ)fS=vcZc zDl`X{OrIK>?Pqus7hYFqQ{DA02=^xs{x0_%84CZIBNy2ZJf4;bsElAwZB$Mu$yW!y zRx_gyBAa_4EwY0TRi(dP+@5h*1X`l;LWv< zL9*oQb=%Izr6Rn{_Aavd;{e5aFF;4jSCmHXnP2U`#FIs#b z(zmtr62;@o`OUiR^XI>Cj^$KfH#wVQeX-W#*8ENd<-XyT$qczMch29fx#9@-WQ@3z9=Sr*Z)ZncGXErsx-HV*Q3y)6(UKW zqLA~m%JQ(Mlv3@ti9=yTw-}7rW0BE!Vhd+Wxx(*w#n&cqSYLlJf$Jh?e^|3LQT^6H z3J|Hw9TN~`cb_VqIYTF5a*oNk#}S3F7v(|!StJ-C7K275j$mO4tvKHJSGECGg{v9Y zsWjnDtAIEC1;>oj!gcEvP4B0-XEEg*)&JRkNW&-DJsEjEpBv$dae@+ zNG(e&7c+(T0y;s-sAR?A2DabEfxOd)oXNKY1RWr*5MoiHio^U-E+ zT@lmSt5W=M$AVSXk9oPD8=_Andy=n%7x8+X1;jpgn_AE^5yuENHGV64N7<08AY$F;X z+Z*BO-4RaDA5D@XW}VSyJWd5G>0-H9bFT@;ULW6~u+QLI!n+(q#aU=;tZfwu_>GBS zZ!xsHF$roq(I6i6pPOl@NWLesJ`(V4GBCUy30{IQPCwQn%_@0h5Qbh)0umi@Pm3tq zJB_J)mTHm5rixVBhf*7Fk9nmOEk%28MXYX-b;)!!ICIugkqmRX%B(oAAVmkkXHS{* z6(?rIX7!IiBr7)`64`Rz-S*xKS=7=rAsa${#O6NEc8kzeB~_7@EiTzXj?d~EPK+#w zu`=Vjypqix>?MappN|!LAl};^kVV6-N{3esz?txkS5lBMgkC?hMP;p9lSF~kPnj%< zbUiV1*`m%?eLPM(ESHrl)RbJVZPywdV12Y2sNtUNkTzlEk&nDiA#dOuHwS;aYSD#c zt&0c``j9L&k#MvW+)TtOt3)1`o~#vfnW~6SEo-QtZhmTrDuRIJJg0&;QcB}>r;C(E z@8;hi+EUu4F{V*dp{kTSQUD4)ajmuZFEDw{JIU#S>)pEB(+ zng*9)bJ@eHQfo$!FTMqHwb;wxX}gtzRmdr6B(~<}VvjQev(zPxJ)LCda%J@N{lO}` zQu1FG@S;JO#*W9uhzYMX;JNRKG1i-?b38&yOR)~v@`1<~?ZS4TQv0%0z}UM_^W8FO z9}x{LqU;(+W6XiHldt=O@xTIIPJ24|+{(>n6?7LjXP5(Se`gRGC>cUs739F9FvH?ZAC(f~31wq}~j z=qMjk$9XCgM7nBojaq{0oUIVQCA*RGo&IBH2gJ|^Tj&;}5CaPICBHX(!Y*VcMF0&Y zc6i{b%ndqrDSt)@xS%`G++xs@o)lxVZRy-P-*g{0JH*O(bmel^$zhfrY1G zh{BN?CbwH|G;5j+D1BJC!8mV;5QZH*>9g2+&r5t|hs13xF z%8do1Ww_tJ7vz0WM;eVD)^GTiJMf>417slh*{$)A*}l&WzVo#@xG*`rfv!BpLF*=f zJDGAv2OSUGH%KpAtH5N70tRI8^_%fkcNd4-FYM~=)*uK3V!ww{9=U~ zQ5qIo4UC~^nD?jyGNxxPw$B#$PuuM%gBV`wCA;VsG^yS-nTTGRqg`ii!~`aUYeI5F zA|~*a)$by|#uw)blrb9ZwjeKK3lLsFb@)4>&X(+kx8?x{kE%h%tb0Q*zo{vRg6+Lo z5`YGa?Fm{S8U+dEQ*fi+mPMb!t>;OPELo^jfYLUg(jxWyEE$fywQ2ZD1z5>uMP~4}58%GwVDl;PQI` z?)!;k92fSMp3x*u^Io_;DXvuH&|~JZ0FyXOQSkQ2v~KARMtOi&lG^cN2hN(hbCB&z zo#wfU(2glCPbFcwM6DY^^$h%hfD&3Nu|vjXZ3B{yT)j!5|pu zvrb=(7g0DGU~zhKaHR_UhQ_Bb87YK#2cK@Z0V*GTrHjXA35N6& zh6W%|rXgqJT?k-GHMq7;~16_6e2yQHAPf8@l1LHAC7ZR!mW ztPz_WsCaf?B7SRm#ROguh@h6eS!@ZK`=KYQtg4xC!q;33VbP&bbm!stVMj>~HMGi1 z6n{t+YPA^I%Om|XX)Jpy{U;Un58e-&1yWx2b$>IS7m`73n2(;ggou9$8o|xY4JAEr z{G+Wlz`~+7c8jaic6^#KIBUk^KuU)7@`B*Vh%y*>4izIYD-k8)VnX_b(}*NMLkUIe=B8`FaA66#_k=>B=#e(O$6808G58BBV<(f;G{ zO815Fg#{9p2aI_0MgI*9*;WN(}SxpJPxP`d@WS!Re1cU0jBgAheOdU|?U=dl9X z1-HAvI=Ef!xK(SBbnoJTcCG0;&<3PzfE=}W7n`mq;xfD(Jn0C%K0j_W7$4W@!s$IP*?5y(xLSqkf zZx9H!_ZZ#Mf%us!Tv*4JTKjF&gC~m+Lz1w(Zx>1qhz+lkgYPL^buvNHXC($NlMkco zX1K$#XAtI#oQhP7IOz1QOim)Q2TbgbNq-7ikNN@c)|)46vV?YQ3>oUIL09R3_>eimw0?JREwL@5_ko7@Y60$K$`h0EA?;h6 zQ%l;S#uw9y1%W7h-MJD;-DD)|f^W*uSbH6l%MSj~rV#yXqaeY)e&&Ux= zYmO+_H@#%5Lu#8#*#9QOhy%zlYvOKw-&zTN_`^RTM2rNxUy8=3^H$nILd`Kzs+NMR zEieQ{g*_Tvr4X(h)rD1g*sySsGn~3ugt6K37?@`fegAkqb-Q^!EzS%_Z+M7~dpdNj zi9EdE&J!eKSD2-;SqBi%0#$13*$4%9W5H$D@?G&TD3L*Sn_O|Gb)fue6;HOv}W7fyHYk z7yx6BN#6&b`@aV3fAoE?^j|SlDx|CbAG7ISAOD~GiTBFb>@PP6q=}XIy?+iX*Y<{y z0M?P06Wv%|m|P76QUt=Bu9CI{pnLmFz_-%+&^iGMHdGr@F z<-u2pVouLt+;g5khEo6&%}E1bYfXE0=P$uZKzfDuLLeX&E(p{t3sCS;^um)#rG|<} z0M{~3qzg=7E|e=HBjMpk=XE^YF!F%kSmD7Et1u_6=ch?id3dxI<`u%3;1+b{pG{5G z{}a6ZXPEbZ9025;<`+He^c!32%<-0NuKQQ>C`l z5NiHlPCb9I`985D@R5@v9K$2-N4m-GZbr6o95}^j`YXR;5(76v5eqmY1879NgXQAA zH>%Hu!!sJr2k?!dZ_kbE3Njh91R?LA1M&@r2J;~f7O+aKO;~|f6yFNWr;Or{W(y0i zhD>+l{y5g&$$!1Gda7;|AiMz3gZhB*f(FD?d=O9u5tKNFY+rOP#;!$~hWS)8)UZr8 z?2KV9;g}bI~dJ0uNQXU_5!1#N4jps$?Fn~%^PkTKFVIcyJ32S7?plJHlN7Oq)gjlb{*W^3tzOSYB z@Y(%4bGJPsYwDay$Q7QLaRvKgctUG>C!{Ueva2%BaF+Y zn-u*~q)H>2GHFs&wlI4F3BYQA>MXU*=ojfUgv?$aN}t1w@-_29#x^8WU{Xzxl_I^Q z36Xr7;d}rx%Y-$#@7p&Zx2@59aSk}GIejvfF@(DPqt&z)54vjW!Ec+H#s@2k&&@t) z8URu9rdC^DAFN{q+d4@8QoMX7qOTmEmKH8Dr(adZXaI{pW#Xivp*bMtt)W4ghgkCS zet$3@JR5r)m5I;L4$;>$dk(3#u^}ish79)9gf!gR$XlP9IV`0q2Gi@bNrJnh9Oez; z&yv3rE+CFUfEk=$i_C~5(D8lERj2104$-b6aV4^0st!2wjiI3>+z4?s<=HM!T;lX- z>WEb316c?WC+yKo?ue+qUtP~I3yE~0+CaVwAbzV36H?^Z>L{x+)`{Vt_AFAiK%AVE z7GW@oPhZTV&?IHbf#HTIsC|eiJGqB!s3IlAg8!mB60`n~UjLIsJrEUhDV57Lv zhPmh6Z1nJ>i35)`67rxI+ak0`A&c7&h_7H9!~Jq_Yv2fpScq<19e+xzMPU91nE_RQ z&D&*)LZPQ>>p{cUDKX{JK@!?ye1JI`pzVu*h?H#rJzF&V5s78R<_7OTdjP8b^pjHk zCm7h>nh<>;hw+#1$ZpD6cRb$jg?VaNAi^<_j8zJCdb%o97ybhJ1kXA>VQHXbMd*84 z9I0YW8LWz9U63}YMr<4F&tp5z|B`$Et09olD~_Irz$?|0-4u&U%0|7w%p$mC5hW`>ojtp&)5Le=fpb z#>Q^yb#DTHDuLk;S?z)&DxM=ySy0=;2*ly3n!kU)$$?rQqboFa7$ZBgz(f!yEgfG~ zrVAQe7n(T8ZvSnpaHYYR_c8mvZ-4`WgR`T5aS;hR0()Nna?qzs7~x{F@kI;&YTfoc*?CXOvja## zG@+m2J&fRJW_X-;IRTf!?B@yWUhw5HUoD1J%7VdB^+2oJJi(7v3)0AP%j6oCHCW6D zN#XW8xc zu*qPwiC-?sY}Q-B&)jkQH^b^E^}0aBJmdtf)D{gYV1~x3#Oq#nkg{(0;J39uX2S_5 z#tPdZ<6srYD+rw}L)GXq3JuXHNViu*L}5}5UnT2uC5j`LFJhpQDDvdz>q|z6WdU)4 zaA=M&yb9luJ-?^DdOsf0M>$IUIy6r=+c%56`xfMpk2puB6WCB ziO(U1Nw03k+X>F$Y@upy_<$pIS+y2i1IWtrC^euLt(QVGUBAkmZ~3oA zee~5uxf41%h=rhM--x=;Elb(erdlGoeImPkvmNBB0q~VE!K_;{j`@!LFqaz0#?dBV zOlujM@mB*t#N~JQ90@jV<514Z&_vsQ{vL|vMVr*2E)?T~v&&wIa}G!-sasn41F81o-U(Oy=7_V6RY23!idTOz9aRN~SJD*`gKD0X_m`S!}?mm%=U zLmVct9d6#0m#4Hq7wIG3VrZ!1>f`^h%bds&u{++zWr_KiYer9X`; zCTp177Ccc3pKZ?|H;etkk&41O1wJ?F%3{f`v%6xM63x=6{3Z)jU!&=+y2^yE+}@bn z4Q@F}iY0H>2cK^gvdHP9g|cG&3_SK?8$RcF^gHrv0W4g4<9A>b7OV^2wNEtt6CumH zK_V5^e8$y06Nrq?Qywxa7c4ledny;>BmIR!JI=&R$)Vp>qfNg)k84n#s6=V~AD_{^ zK1pS-mJ^|AOiH>B8Ps!qHM%o}P!^JhCB}d#ENwo~i{g*aY*!<}7EDwMY`ijBURX9C zx8z7X7f5huU=pMFepSsSnR)$IFeMDyo zbT-I>iPD+&CpGgQMGL&>q?YqS<M{Boy-#y}18PA` zxW-|~s49oxCt|R_W%T8}ZaEA;G*n4LW2G?e+JNhJ8Pe)3AJH%q(oIKuelVq!AVg9g z%z~_JrnTq9ESj1QQ@X=K{v!3;um*JYt2u2GUAi0oTq)1P>(33JmYj?^Q>&Zei>~3R z#5P-`JXvHQt$t%5t)dw>?4>p}u@#7p7Z#^WK4|%GEQObf2rLI3->`kuSP{lzLWk=X z&jp>`bfz#fwLmR$;=n9^{=Bff&2Y9nT2Q5mCuS;N?UrNo`1CV<{v}RwMGrnk@52~^3wE@KYxqVHvwd?aLZ(T zh1&O0m&a?sG@ZR+1z>C1N-l40l?~Lu;z7RcD4+J?tGj2;him?JTz;8PDC@Z2>C`U$ zRdnRoPgj~CZvBcVmoZfQG6S7GPXdK=L^Bk*nM<6n?V6T)N9T@Gf8HP0a$tNJ05ZHQ z7{7dWXxl`yFwb`jp&8{tv|{NLAwlk;!r3#;)PZj5_WNg8-Ha4T-8X*cmHW=2H~r>Z za?~@?GIr-Jrwv9hMobOK!^!KF|C}6Kb0TT7wxFFxSQS!2es|#m+lK z*V1S&aa(9=+(XD7)%egcND^YI^;pR7rknTr0d#P6^%;%8-|lxszu870g5)S!5H7H0 z@s1{~l!dF!trAN!B@2Uux6SN_EZKSLbzk-v07p zZZGrsqleolVBm6}5NYXK9p6BsA>*7AmbmGP(seJt(yfQcsCVV%tfXZ05U8cB84p^P zJCZD7o}#8w;9TBEak#s`TZ|RJ1T)wc?G|gi?x2fw2I+NWU0xFmmcJbNm5MPSMn&vr zixtYYUE3XYp}~TO64<)DO%7|RI)Jdr=0jMO%Y44QE;KEP_^Y+*>A(2QOhvo)<6k8T zqR8NgA_a^eo)G0}DBIST^{1zSsB$pJjr}@doh_(|7*E@@N(d-}6V4VYCXEK;Bz4bT0?y`C z3_Wt;3uTYEbVl! zq=A*xSc{C(bMcZDRJ|IycQ@r4@)zn_=PieI@35$zH~iR3#n_q|r6EconX}j1@Jbd1 zFAylg-mr2xrKk?lf1v$#CyYOr}~$u-67X%frL zn=@4WObP0gj0G#!Vp1{%jUBqRP7h+%YPG<~#baR@HvM9eCb9D*p$I1TmrROI}xJ-ttl2{$idU!EQXZps*{&nHI=_DUqGQ@SM&{nX2RBFHio z7oTQh(&mkc>0~FupZBzh?oJ^UaeQ}<&VBa0MuQR6$C?Y|6G8aUbyb#Pi>9=TohF89 zc2w`UOk7!bSnHV=Jogjs;HCzZ*8Mv{sP`O*>TS8>;t@5#WC@+uZZTR8n1=)$C=+us zLqWHm?|L=^aJ$N?+)@VDTC4A#IRinoco>?m`GVIAcd6OVm8jBqE0uClt2&@WT}dN( z>EEB^4r#Ot-cxaauX+EPt8~ScMssGmP!CJiyCz7~?uZqngoH>(`A1H0OG6BVe^uf{Oiswea6V)r?j7qPF zT=fbs)RUvD*wzZdGUwZRupgDlUSn{!WXs`ji!E{-&QpAvuN$`XT%>}fQmF%mHsnET zny8L5b2mJ;ZT@HNZv*Y^0)(9>-q?fCY_l5JB|bwkdqs~QDk^BD4NJDUX~>|wyVcbZ zzDTXGQANsUxrD#j?k2+85*R4-5=NlkwD25RGf}AIT+?Rf0mR0tA3BS8^NT% zG`LFE!1Oc&*IJX@Pi)KIrDwk;Z@Oj=EQVkmXSC#mgm{AV?TD7`{fWShpbNBz+ZQrs zsZe6|woIWL6VVk9sPD#KmWZ-Y_=b=Y^v41y-9^3nAfdeDmZGhQSG_t+mdozS*DLW! zy;~D~<&h$BC#SE}jrfo=b;h&k#j~XUtOZah1T;E$m$udl!Ew+8rpJx2E2cpBMi5NA zv;jX9g(i$dgNF|aPDCme^#+!`qgH^M(R>k3kB8k?6^~c6_sn=D`|P-cYs0e(Q9jvk zk~W@pnv|u3tsgCeC!e5>C!Yc7_cy>Q3;7umq)~;diwlY9qh;6U1#$ZKM(=<{Od6Qz zUV<7_5%Cn2+7D=K7W?>L6hQKI;H}H+2!#KXnjDwfl;??ss$zq_QNeV@ZV}IvpXirMZbUtcNN003VtE^^ ztW<`G8#sFO8MJUg)D0A|0;hPQe!ido5#B?w#qWxAV|18#BT%kF9ZMG|hR0Q)yPh#> zepA-fXr})bq2zEjxK#RL#WT9HT~DoH94wNfJ^7$EwWev*ABt5Z0V00Lq zdXrizI-vfGogI9e&ZFJSfVt_PHwP{t3z`mssg3H-aMB`xXRB_>d_DX6H zLj09MDJWz2-yHk@!?Yhj0Wj?cI!btm|L$G?FB**cNJ;SH2XFyuv`-15u~WxRf`4e1 zUI;P)5&39w;$1%y9-!N9AH%n&jy?}rviOXfp6%_HiIMkzPjo;FAc0HkdlA%WutjjN zn6ObMMhNs_Pzcs%e==EnOFc>-VZUrw-xuT_$b@In(dUl%1sU)*9dIiBA1zL3d}&GH z4giEUV9=f`u^~UavjO`x9oNkM#*BClVxQNRaciV>ny* zJ)wjl`7rT*-cE|0=KeMvcs3x95S=;(n=8AP6sdI-jgh^@KST(;A#Qd7l|1sx&HKBx z+S@BH6S$$_93hPtD(x{O%nkG7N9<9Go%Pm#(vWx10IOgE7~K-Z;cf-rX3ZDbZ80-n zdE~o8y=rGaL!BlE)K#Jn&`&~gQcwip{)G5~U}`e38z?SR2bvgY{L4{edS9%)?a=zG z*91JvSyEya4p5?mIt40K1&R>?D|O6jvWY?sU+vK?sc;EA6f6fo@o>%i+0DhZQC|F$rpEQ z+vwQI7u&XN+crBkJ2p;c?wot(%=riJv!8dbwO3WGDol*q7*a20h)$fQ^#dYi<=*~& z5XZGQPR|JEuvVwDADzuPe(r4^<|}#R+IUrcUr`q(f|&0D|L2k6<@F!D!bN%nHg282 zS*`IpfKJyNDG4%aL~Q1!bk~^l6Op5Yg>m1L_TTxw{gxFfhs_4is&6+GDoRMd6kF4W z8xM8(NgP`e7J;|Hi#U?CdJwp(M}ph~=Nr`0%5lWd$zeDAD2j=MjvO``+Yy^&9r9Cj zB+qQ0rItNJP2+L$Dw^jxXEVlTqj~@Sk|+o$zUxD#&|)PR`O(h#J!=N?(F$ijuU&7o z!D}@W_}Q6L^%qdH5qMz~k73}$HJs6+&X}7V4)2n)0;@goK%K%R%hsVx>F6O-3tVSvVs>f-y-kRZa z%O2&1!`6JRjhE+-c|cNc$3u)$aIPJYSWTs5VgwJ5e+8(EZ?Vl2u-g`(&I^Mw~+;anJcu-k+R$ZO({ zS5IqS8ViOJJ`i)4ecB>AzI0&lK)sk zl4&lO^UWz7HEv4Hed6Hc2hY-x6y^O*VFioLel8e~u!%tVP@#)u!3F2=VM1l2!zpaSIyPNZboq#giOH5)~P%m&h zCUZ|390Udo4T=g$fhbo<$iKG-^Jg%mCNq!XCp;DDR?76HeXb6WAy!SBgV{ntLdQ4X zLNa~f_p;^J@>L12d3ie~BYNe(uZwxA8WRl@`v z7w(pV0RseIhKL#JDOMM!4cQ3Vo-$P-vI!oejqG!8!$_>m?%MW5z=w=6<}!HMG&}jn z3;##8iCg%SFZ%Q&u^viDNXT`M6AJd`x3oIFUs;nC7#@^|6mRo&fp6wtW4wtHM{JgR zXgIw2@Tbt+EMFKX?=28N!yj6IQN|DyS=FHa%HIkIiN=-^g;4(#yy*rtq3|k{@#I{{ z4m2BMXNjsGayMReK%+PQH>rq!uEaf^Rpv(hU+3rt^7DTmY^pAk3;$N0{i}?VM#Sg1xNeTt#;^REO$(xF4EOJl6@iBEBU=uNL+AzHBOjC-ntrc{D2`$;jqJ0aSW z^9sjb#4oCdjWdxC8O9&!1OmR*_w`<%9=i)lYr>z}@i{=fe#WB@lgCe|+(6EGi=9Px zKrTGF@@9Pwd6LQgSF`(viBz}i(S{3YWYrJ7JTu{~nHo^Nj(<76)%pOv{;!!H(x=gx zr@>rGXZr<#_7v3!kNZkV|EKBMy1N)EofiJ70}_x&qxD%t-@1}hUvX9OlK+0i|MS=M zr2&BfxkTj>&R_i1?(Pi#;X@yrj+icK)Nwky6?JY73%oHtZ_L;DR*Af|v8uetl;3!> z^nT4jx})u1jBZJtpSDR`)$1|kK(Nn!Z3Ib@&;}tmwQjy{OiTt3(NDw z*R_M=f{7hmdBi*a(9mp$rp>xA7=4SPK@kZ8 zg9dz6mauSLcjFGiO?Z$K`#)%%zAU}uTIHDyG2G_`2HV<{P=roo(r#4D@M)xsy+fTM z?uqr4B3Dc%$@ro(FX+a{$Bp?N1o8As%BQiCm&o9oA^VI@Q1M4m%>kJ+&3Zz2KsFM5 zbP@x){#tPUau^R&9UqODfG((~8B=`In~x!-PgxC>tbcmR4^)5kOex718NIdQg|tSU zYQPg#;D`>T>y!;IjExlN!*mRQMdY)_&Gh5Db@i9Vu2xx%Cn+iFW{cfzzP{!*LGD%%K{NqsfNz3%#)pRj!tA*59W5=KLny}o2MCT&2kmr%d)gcPbVmv!2Hdr!=yARPW zLo)S?7+~H~mz;XX0n^6kCdddm9dEvm*q9K~QAhnkxTn`DgqE{bG*QjV1qq%lbIG7; z)Z&}r6!f(v;tB1_F8GhdZ!@;Vt(yM-t?vIvL){?^@e2^5%~LoqQH)(6*X-DbBTsY9 zYf~OkShz)Q40N)xS@#V?5gWc=h(ByJ-_P*)HGKQ~wwo(Q!gTi(oo zy~11qL9tX8;6Xqx0i1*E`2iClw)QMMc=NxfnejzP%Q^y?ae4A3)Z(6qcO;*!D}aWPGYkfq@JoBMAz4F*rK(ys zj@Ta!k6>glKN3rMo=zSjr1wu<1dBIE-TTB6xzM<_})^-Ml*mMBZb==cH>YKF8ifPe3sO(ld5Ve2aip!ZQqWZ}D)3{I{qbiwXRNB$HAAmo>C^{`(qcc{8}NgMiLL zYnez;!*t6Yi;ZR2DvJt5fA7Iy^fRKQq|KCK&03uCKs2&s4$FU2 z8ZBu4E~EQmLPqaUfz1 zsBmH`840&7zvt2`dA#8v3nHjA16~tbsz~>U04O zwnh@#YF+9{t#~`q*x^poBNY}hSQ*&zBJzA9gbz5F2nMp*q(&M=`fMOP0A8vkbC@&M zU9X`-e#U&4=C5~1{=L>hV^~3HkuC`SYNJ2LWtULg={NI>a13cIKhU1k^vN;P zB2TX9mH+)>|Gz5Eb&t@0J~pQ_67SXJdAip989YnlHUrw&@;$xhlf@R&k}_c}Hjr

h)RX@)4MF6c;BHcEihJE#zpHNzR{_FrXz&Z{ekjg%NKZpd*$qEk+kd zU&&w`vYd7;c(PXDZ2qF|dPFXt2b%CHnj>dD`)t-E_P{YSsH6cHLhj=KXj?+$5XQZ zc8W>X^c0`ak(aBE`9V{YKdwsU>Qf-`@qbX&YjbM$>UzLRE)su@tejpuNP6sF$r2mv zqNT)39m>*`<%xrq6T|yY75@##SOh|Yb+~?~*Dbbv4c_lwY4F%zGi`dEO6|^_CT3TN zSGE5sMvyI6{DB%?cPjlm>|b0KlH7N2*YCO6@b-rjsm+dNa$053d5i%5Q1k37^A zDmJ$G_<5(X&Jie_zg6fazFtyx+S&8K-e2L73k9a4#S8UA`=y>AG@UO~1@}Qvjy?~e zw%oeKE8Wb6+IcuCWx3)sWTuWRL3{OVqoeikYmZlQm+74O5K9K!~4>TlDz+1cWdDUuW zUq-SM%eAASm38KoFqH9P*!gq<&zf<56@)w$Axz136Kiy)zGpHl#BC!kG?mz*p`?kN z8q!;?xAD7}n+h590a%KDsbip{kc-caq&m-YnD@FplATvymP&1&{mzCqmC9$t=fCB} z=W~q%3GAn0mKAg1LCp7GwxMQROxtR!#|H~ibgN|0c*q(h8E9_6P?qDf7SB{=8dq>uLU>X$08h|0b$?I|k zpe&S0+c?$N*6z5(@DqkfoeYA7M}!rXUfYL9%NUC^VSI3@5$Q?Tr>57@0lhHwPMeq*MqGc4SF;wz3oFxy9u^y%B0gG(K*^ zmh9^u6Uai$PmIU;SR=MFoNUf&30~bU?%!qiDUg3$w11fAURzcLv2+X1Z1VvgL`Fe! zJw$Jf&x!ZJt6{Y30coAeja3D&l2@sSENW^OtOZC0^c2OBae@--+Bv+;x&lrXaF!~l zM8`(a;MW_4XR?=5J8UVlJ;A^2uVL)Il^ngQ!wSmFq8YZJH%2KxPd1#HmM{1qW4@L0 zVvu|5s-DhFWiTe>%8{u~KeFpi`7p4H-Qxj49h-g0H#&2<;XmnkwVuV-U-!!;%+rnwTt=EPN zR-i%wKY{Ye5LNFSZK3e@efw5sbr!==Y&9AYhfBiP=qgxgoP!7GS~ojgg84=T*SkyX zl{x%y4^ibphmcOr8Qq{yd?c}B4z6w3b=MwmNF%Q6M)n!)rKQ>sDp;OQ5gaFM&J3z{ z>HY@%R>CsM_?A@?2AAye#t}|&Nm$Vxlr6bd-^oQaMq$vju6x0DWc7~Zv7s6{kvFev zfr>RD4C&5|eV?>3$XV$+#;AM0w6*zuMm;PlNJwJSs6@@#U(;E4J)`|p4ErS;9s*n- zrh6+xKRdI^6|P`db$*8k*q?_v22yR|_DtbNtsk=GZTnzxY}$(&CS&DCg0*VD!7Eo) zDab0L!*3vt?BZVi+H)dDj&|q{UklVM$PA$|DgiMXwtZ#Px>@D_5mr_fityO`ZzDOc zDG8hHo_@|`9OJJ&w1lQywYDBfYmQ#9fox-*#a~dTCjk*{54qhJJ_nrl9G&5Qfh*hh-Bhs*CxZJ?XenI_r&eJvCLN z3U0%{9tZARQ~y2xq4i-L7nxb{+S>_L3nMy2MaR2TtqLfI%DMOInMSu4$>nA56PeJ% zFc2ySpIcH4!Y^vH_^m9$hnVUQe_&;rujZgJw-Ty)L89aRinbAiJZ1K=e|P!Gqk)}o ziVq9W?#5>n5F-*z3=Bfz#8c-4l|94Cb#0JSpLzX`dzA#lBI@$B2Jd$y_ciP2*qf6W z%|llB^7_j}tL>hggj%gs_uc$eq{FgcBKna6gmY@U7rKV=YPj?#$9v=|DE2MuCU^Mud!C&4p0nr*y^&8NlJ@gzr^VQmTmLFNYZ zhMcVa-(Cn@AK%DSxRVwRYVIUpq&nk!w>cD@ z^O9o4)(>mmPXuUxBl2o7>{L+Z}3Po!zg!8X01G+y75r z7(jj0Zt@Q4d_PEqv|Ts{b)e}&5ISWGLPmzBhVFVeN3|c#P!%i~r*WUPC#QA*jC;l+wz%FI?FM0XF4w((+ z(kSY2)$(Wo*5&bP8S7MYVEW-=8eV3I*3~Yccl?5E`hn&M!Q?hxO77gj zY4-pVm}$sCHVoY0WhyTC$+mQcMmh^qhg8&iH%$yUUjthjFBhGu2gSJR2>-EcfPlLC zDObW=7DHc|4nI_`qqd8gL}FI5U6N6o8pGA)MT29-IkcMAGeAlr{>rznZSyiLnB;bstj=A0?#QW zc}pUD(|_t`j`Y>cAzdQDvK=y z!Ln}%1_QLq(# z0l7)kz{i5*y=wVNJjFimu!2=53lyAVRK@|!t01$+9RpQoLdAPey7a%8nJ0J@T@SFc1}`lPh-=1FryvD{ia z!>vc*tjG^@_=vXKeXMGNgJz-M%t**401BJAJ4q{gaQS>|lpiR;?&%l`yqtizdmgdEigiw=>36v5^EMs6F{XAlQ94$YYiO$5lcDC6O|dp zo2|%VH_-oc6hTQqpszf~LnKBDCxK(2_P9JFQ*uKMitQ1@mAX(H4hx_ zBfp5bl3bmO$Tigm!Di=;>##rhzCp1vXjO;oaQB`3E-IS;bD?penK22kWRUqvgnGw2 z<^|V7`f((lhGu_t_xmF)hl*RAHZD3o8*V)krcQtOxwc%+k|x}6$i#H;Gp6euA^!Cp zKR24%AK9T^g05Q28F;Ya5>lL1rvmN*i3mYxoa`Dald;EvBIfsHDBHRD3C3|3P4ZNCx z!G2dzO%0RFHY0}Ms9gz>t-yBw`i<0gYtU!^>c*4pK1GjIM5GmwK$qX2y+I;Z>XK$h z+KuUQ5mKg^p!d{h#Nqup=jqSIDzk-~zPT5K9o`{UvC;&+@ai2Y)^-Cjs$BX7xmoZu zy=Jb=o&T!aHFJNVacHHUgDV7zp%GF}<}jIfnVmv@9-qHyKn^CMxPCa)P&=(~C*6Sq z8M+7WX!}H%5A54mvw~5;NF;Ql@(Y-zt}9kFJzk8*MVG2X3BIXacVOx}(j*F~u&x9V z!ZabMa-L08wR-J#*jO4Bmu+@fvdRR@S_LqK{?*{h5%S{GMo=!RBT|x4=caF;{c6)Q zl2aEsQgm+gP^$Wi!hhNG7Uj{_2r+S{l$UY~{=5XykH9mC`jlQ9l8AoicLcRO4ZdYBK9FsQvfeIR4Iw^B_Ag2XAoIwphVrzM3_`6Y=AA zjNhGqxq0ZX=u9`#S@H;n8;Y_GecsUi(YqGcgsy!WNC%m8$nid{eXU+J`-a>uIZ#I; z1W3|51DS1G?dImeE8*M_M4|UY9w3oDSVu5Hmcf`{ofAjSNDD@(CKqDjPG5S$))%BV zO&Tl8WP+1S$|CIsE5#D%V82GTXb{6-QXa9Z{AWM-&yW6oQY2)>Ys@hHr;Jq2Jj!uq z>?IpGV4JsOsSdM7w-6SLD}(g);NuIaf{s>#%hXn4PbPi5c(lFZl5O;u6$3F@T9=fJ z9MUJ*{hBbKxopZqF~lNech%*8L8#T1=gv{ZJw%b^tc5K9YeHBBK${q zKJq>Rh7%*3i4)!cXl$-!I*L^gZEPY494qkXt#*>ETy^>B1EQ!a68djBs)UolPR}p8 ziKbR9i2;YLX=qZsqV8^G+kWqYoX=|)j__%v^=pS&*OMtv#nAf|Y<$M&+9ZR&yaBfMG$KAyg?Ol(txDy$5*~j_t!7CZpgoa{?PpCdHbYzlhr;Mhf!EnM=@6mpU3IuzVRMr89nH$>}H4ydFwb>(uS2y#k+u4enj&KtUW z=YrSN#(OxCdAEk=3n~mm8?b#E6Kq(OX^Y{P^~6L4-5QGZM;wT^CQpa+Fi1nNdEw~B z_X?=Sns}#pENxv@Y+08eufQZ`>uChH!6V>$GKd;BhoKv{ZEvh;QW_d5g5(E_0cB&% z(d^RLOJ0R32f(y@^a&!dM~|Lj_L142B89V30nsb=bq+dW_|Gw)s^zTBur3=WY^V9{ zswe|&7<1mWn>%8PJi*{JJ%W0>Ju(KpdI0tSM6N>=4Z!t#fk5aDC5#kbMi++QvR*Rg zY+XTZe#o!vf*eD2hnzyvIFV>p%%S?(?CUs;&s|%ZASEO=UY9g%;9<&XHu+AeEICm;+n$*^&a+|_ zUgKyY^Qj8_*jt(WM5}T5L_?-F`*5w3TaQ6ttKNj3;_=j9Vu>c#F`>IwuwU2mg+QYS z+fFf~2GzWOcD|qAL4S%&J@w=!hPi)T6Z>PhrY|D~PxUoT{OrvI^=A)eNv$IQc6vrF z8hSn7MX1ElrSVJv>rVBupPdp@i~?$K(`@y^P~GQhD#fO}v`HJ6V)npk5WShYNdUHn z>f@*2wle9agZ-mM}o*>Dhh1S;uuGX7)cL&qm3v@bq~+7ak1n+Oq>0GOrXh z-BaLWNwYCG=)dZ|$R*a=wNudJW;!VO59NRPCxHJ!3DijL=;;M3$0rHN9V5=x923Nb z`|#ISDh88ruMPI_JMjNR9TaXs9jQ_>d)4D*pQYP8u zs7HqRpgC?qO&=R367*m=qxjpiGd$0XF{XBbXQCYXM%(Uq$ivO=D@CAPM$bl()<~bO zSdm;c+R3LDMoV)^$yxr77a%KJH13j-P*nxbUa5GK02Ac9NL$~3x*D$1=rf`iUacK* zmbOB!lLUAB^iX8|V7b_QPK3j=8oka@A$!Ok6rK^_W88(YcO%J3?W!xrP@*<_)}(cs z9>W}^*O-YxkN1ncQ6<9pw?dZ#teRcraux=3GfO_T1#5=#HOf$D0eXc`UKP+b&SB-I z(QAwZXEh&!_=>+ueJqEtTRb0!-y%$b+DZM!3ig%gIVoH0NGWD4 z!VM3$G|eA@S|G)5l2RpHWt&!Dhb zNZzndwF*-V9evi(x#vyp~w@M6`Hr~qp$<9BaO?`its)lYH2dRBgqj zDAF1ghuZ>zUY#p-((=UdVR;WTtXqw<%R`AiOB=?`J8uL~9HXH}@iGzQK_V9uNtIZ=ht{3+JpL%bYmHqt_- zsfJ;+ote?v)nMQgOmu^%8<dB9E^YAhz&AP=I!^@Mur{JL>b_j z9z&>cG6so}qmi>8+g-P19+Ocs;Q3@%yWPFA>-Mwa0i#T^QslTZ0B*Z%vR>2nnklg(IXbm+`hsgu>kSDyP66y=6JsG_X#D~t+% zOgoQNWz^(>Uk)>qt(yu`lbUb#d-x5D{mvvMFz06eSL;@vH8~BiseN9BElg}}MskuX z_OYD|zW%Lx^1 za_uxuPR^$U_WipPjIYM~da}Lg>6ep1_pzYxtRdsLC-w3d3-P&p-tZj{2lfyST0~iN zkrKrQ`w`mLhL`=qm0HXgqLf&|svVW|8;qtGQbac09@c~%mp7#0H8#nj+Y6nN{K+Lj zUZefG45-~sZE~C6_g(E{htl1jKF`<0siMCJcHZwXqA#|>!C6a;4q`t~=I8O)@7Y#G z8)!TtwPGZ@wej)}reKAh_j>f2%kpIo))R+8FDqW@&YIXH-LEz?i;J8R`9|g3<3TH_@q(uur=4?=qw%c;*rjDoidGgB7XRgTS-Am~2ow zX979y{r$-7kF!_&1)tzs!%G_hE#C@hKQaLU1%Jx;%P&+Tk?XTpWCRskUE=N79u1j^Npc27_v=x#c{h&Z};9wC2T_$%vLxHq~z%P`y}h0>@pc@*ikl zQg3u-#;DScDZsqyrDZF=j_{(<`}1((x;m2<9j0t*biU=yb&HEikl@pw%8a^ee*(it zx=@&5;b6)FqLXRwK?w9)n;Cc)pQyFI5&5=Cg0gzd8d*R@p0Az%m7~Vx>&ClWqX%57 zP&IgH2W`Fki)ex8IF5#r&zG|^S#w@zJEh5G%@gD~<9$0bCPT+7cNkj`h-*4fo=uGN z2Paw-934W?=2h^ykmxH7`?m^huAthgCnoVmkg0g*z!|h_=^*suxug0Pv_&*CbUaA* z?pYbE+bvBHXCyf%PZ9ENk4$b@8_8yK8)S!(k$QhcJk0|IBfq87A+Z~vY+F7wU#Z+J z9b=)UBjeQb4X_hDvEjikOH^(=V&)X-4K=hOV_eai9e18reC7`53mIgp{PmZpw1a=P9TrRvPm8+fUr-CHmof(wVjsRrYA6FDg<0u4^u4PpYhi+1^4u^LwO{K#eW<3Ctb}TA z2*&pekJ_G|o2iMd{zwp$FeBjT_&hV?=^z2pA0Uc^Jiw97nELM4gC?N0cytbyE|wWG zo3c`8c`f8@vxWWfx|%Bmii*&n8dqH<@}NpArtagsU6C>9=9{_Njv=#*H#pHdnBR|9 z*)v)AtFx<#5LSX6XraQM`hXQ7D%ZUo$M!YBp<)c;X!EHALXW5sCd}32O#^X{6)Fpv zi8OW4!`CcsbWk?U{nYGulVk}Xp^c-KnBs=2StSu+<)jCZTah;y+H+53b0BE`@_p8Eei zoO~-JGmxJDpqK+_4ou1V*KX9{&Ng5R)Gh-(jV+0>A2q8rMfU=Vm?}SWm0}5@~zB%YFzJ zmkW2awzuD#T8zP(nAW1r)=yD2{9sn!wdm_sApeA9L0ucMCl^&;t{kH^*CSq?Qa~(c z(o}#uuG}?=CR-Wo9u~ za|>o+Sx-b=V2woTVIX^rv`{QkyvKs;jU%9<%yjQz7aVKJ2CMnwDzfokdA(|;P{t_t zY`}uRl)tON4VXIg*j10vy%#kk&aKjo;TO&-mYP4#aRCmGH>GwO6zzKK;Xdub)!Q>v z!?5#yr=4Z#vNM^h&ZCgwD(z|m|KdO1pTL|KOq3>Z&9RWsD$X(TF$d6W=XJ2xtBg0OX}4q~Vx9aWFQw^1 zvMT5*k#C0k|7v1KU%U#xg=Bd{1O|GKLZI`aIY#uJK`I6!GCeO$7xvC?mEGhd)wylH z6zwaj`{AX0HOE&0TEDwfOzPe=jE1)$FZ`b@(B;Co(Cy@S^@WVQDQ53y_bDmH1qO-u z*Nd21Qk#S6lXJXuJE4Boi0(3C-w8|=4MDa?WzB@WbU6gk0vNJ8Kx#lt-0Qweii$Iy z0BFl!QjHq&ReItT*U~Jn4pfh&gp620g8_xxv_5ZGzYN>rxM+6QAhD z3DXxJwQYA+syZBwl6jR@JJj8Vc=U!YgsbkLBS71%|9eLXwl9YWOznete$pwDuf zoen-h0$NETIkzd^FW+k(7pKRr`#&V$uL2b({_M&ksabsBY-Hd^v(lF25yuDzmmBUR z(z_8-AQ{wmgenI(yr8L-H`xZHC(-oPD>~idaQQ`$RQ6sKjMVuo(Xll!1!$ziJx5(q zCP6bBgd9oLBfTCma6~o;zZJvJ`RYLcoO=+1eD-&^g$Ht=sDAStdN|A#uGVgmn;Rm_A$@dOzpqs^93_j0&%*Og2 z%paP0Se=0+&1r=qhhpv)azV>^n&rak zmDsJLTOsoK)okdzA2ETla^fY1tQZfv@AuvtlvddI=?d2D>QxQIWygh8Rsz6!{tRCH ztWqes#JXs`qKAK(l!EE~Y`W;ntMUKV4DRO3xcQtY5zLEb8%+ffGKUVR+Ia?%$}kw0 z1;yaxX#H<5fahR4)OP~?my>CxrpAG@f|*z%%x=xq6Gnm237CO6@cp7fQ*efho!(%4 z7!h)(d~P(ycsjKDiPRUjXJQ14o9X;Y!|`7kk~bocotA)`tm3Us1uQH!NI7W ztcryJJe!m|s|7!dZ;NGWi^RSp?XfmM+1Z!oayJ#;k4_@ht0`!La$%lE_dSij{kyN2 zA)ReZVWMb;vD%F>Gt*{A!#zkVt}Nz3P&sn?N3gcQFg(_8o*mPKyax%?mR)AOuC2Ab zMg|RiPE<8}Nhb+6gE{ZUPq-`*uIg7*5rE)kBOZo#jSi_SzNA}{(7HByl4nk4htOC(_~8dY=zZ7R#+beCqz`EJdAep zjw7v_e|HDikMw&+pk6=kXx^8q1-{h}&Vs+~;-N)Y6HjXNDx|LL!HKCV zconUC3u0A*;Y&;vq9qfV$S~^tyXkf~6BJXc3Wat36$J9L?qaDmDh$~v%du3Yg%b~f zPXcWsIg1cO)qkMBycs`0RxK-T6r3LBicvad$#=dB>IzC&$9bzJLK*dwEYQMrJB?DIBY2iAc??J?BL0(ZVj-Mk8^;pEYpnm0z@F z3-0>%ISB(-H}798MLGG`2^V<`o2BDWKq+8b|M;(P>4elOUbsfwBs78nI{7cmwsQ&C({B-D!KI#S6Gu!gDsH6Sg6RlOeS^@1zw#Pu%t{VO2V-7?{5D2a@GS?s(UVP31A#F(^uY$*h(4`N zwY|l7(1MSf#4!+Eni*|*!amdNwmj{3&T{I*xwlwx?eW11RE<1jQjb7HTvj&J7iF~i zd<|~8M$FLBxoxLh;&UA7*K7WC2jL0HpmBrtG0DvX}FbZK4J7UVMIuK$cca{wrv-p7!0Nd za`pysHD|54m}F`TZdep1OnCGU>>GcsHZyWfjU|7Lh-vlDb16R0qfq(;$${!6g~~Mt zY(kRonJzqCD^&+5%Y^O(hm*c1H&h=8g(1oRXV}aky8cHz%S9+H?ZpPtsm*<`OJ(|a z21WYlXtDH<60iM%Ad~8gHA5KT4iF}3?#HOTy*2Pk_%;C0(a~I*tzW^qXnThv*!me7 z&UigYbt`wq>q4y?gh7cLwWf7_HcgU!GJ@j~6lGcK7gb&sl)GV;j&RdleqUKc^g^g4J7 z2xC_Kpd2}!!Nh-dnCKZ4;YYY(ZaBzE{|8NLSHu(_UB2o0r`7z)7&*dd~UkaPqmKP zf>@TdRqwMn`^f~uu$Y8mgOm+&2}tN?h$%oyCR-t1izEPO#i#nqxh$6_I5CCPs$cQI z<%Iyi((-3u{qV_OPl0GX-w2}Vj)cU_pw#Ohe67#8uj+ThTfJ!Tl5~9zj~o*_vYv8_ zTC1>aug_bmI6P}O{SbpTth9=`ci4N--h3<3*)Oc z76bPjh=Z=VsyY*`C}+k&XdF>Qpp`MhM1uCz8y0skFPIRme6jce0MmwF-z`CxyxHsG zeGedw8w^Sk3K{L0AK0wTp(W5<=J0b!kW1~kB-?V7D_tEVBO@Oq>dg|(PhQJ&*>q&8 z+D4Lb#Rm#{{x7=TG03tlSQqWGZQFKr8C|w*+qP}nHoI)wwrzCz)jsFm`}RRctUoJa z#GE;Dj?DZ<5)NezB&0f9hYKNU%v_+egVg-@xxWyn%-&=OJaJ(mA1_EZ2Gdh*E+d>_GBP3pR_v+gX+5s#b#WBO z!q(b}st|+w+kI75H}c{sjWCl7KhlGua3@r*Om%y2((3YZv+$|rCw99J82_I>^5*HW z9t`l(%ihEA?a`&Oo5(P)Q@vg57g6M{y`sz9Q=12uk@M%Ic~lAV>1~hnwnM@j?z

ioaY|Yarr3=ucBvJrtBOX|CpqS%p-BVrrz+YpzDN3 zz?yc354rhav%%Gh0tC9QH?xOxfrHWShPV%pc@i*e!lrMy>@JyGmKStMPU?-FcoE5y1FNXOu?vX^|i zt&%%b8Hx9(6+3aVzJ4{OcqS@M0lDYXK_` z=%*E7*I^z4*5ANZ812xhi*~2SO1G`Zo3p*Z(WYZUBcAVXdQ8VKA|!GF55vpB!*vKXXN5yU$twjU%If zTFO(ptc!o~%p)9`iDu;dE|R&{2|S63-rJ!D_0V(~*~JZJaG&=EE)^f-j3!F(om~zX z1tTf$J{6 zvOQ7QWMk*$s$)<1QabEMTEwW#bsvCEWoef`_qNAd z-!t}|+*B`W=H0QMI;Dw5wA8D^fw2tYQl~elXYv&xEg<_3r zl~7G)aHhDkl-`Lgwp`Ph++M1Xx@s0RoiEMjVd)t%W#4AAWmA_Y?Vp)$s8};{bK8Y3 z*=&k1C}GOR&`y%~P6hy~Ug4EQ<$X>kw&+uROTPxIa4Y7_aL z(?u*p=kr~kLRP056M43f_lmQYkR|1EwopoHc0fAUK8|+speAB}7g`YBge5O>Zmg)K z62NExc=8%`jEqi3I8V!M_9wL0a5=C7w0_FLK&w^YgoDF#JmG;dsi2%ivP^D%X%5nd z(rvXqJa!6?yhzjG^|mB#q1yn5QbpB@A%SZ;rx?h?c^2hbwS2Xn?D0Y~Mjx+}gf|WW zCgT|i#?(e2J?K)RhUtYevyCN6?b~XwRLDrJtID%nG9tgtrd4ma3h$ax8m`rm!C>(O zUZgJ*;IK6BFVMT&$1t>ig=@FD%ZVByQyiMfO9>(|R_G)|V!7a-TWgi-RmCf5@`v$X zpY*jdIlcrV4f4leHe1U~o?*te5k~=T>6D;V_Y%*s7U6z`%*T2=8{P%z^1dX&JM5pl z9yvAM{t-l6WXns(T;-Z>94+-$Fqap}VZNI?-EYTwxaj0HD#wyXYwu-l>Bd>C&<;}s zULPATgj=SaA1xR@t*;z%m9doZD~eb=t_TmECOI){Ryqv2BoNpG$vmYd z#v>M`E##8CV%;5$*@tXa`FUFG_h8F~!X}VY71^Rde6}{Xx1S(n2#eFUnHHjw}rW(q5{>JN;P_tPLme#V*AbuMHXoFj7Q}KB$mu169Ltl91v78 z%&Ch;8y<2s)m5S_V?md9JlsCJ(mVhNkruG0CyTipPFS*F8QC4ENhIG5;YV+GsqQ|} zrGFAK9G0FcOv@#hT@COX=G90U)f1gQ?M^lx&w$*C?m$L}Q~pZw*)fqII@@Z3<4stL zS5vZ*pL}E8T%bgf)c9>#EV!&Gno$w0Y!FHP(keF{yKIST;+V_Xx#NT{R)7G5#T^Oi z$a-wvA&|5u-_fP7VEo7_f9>s6s^LKd>He5_b?g3elEt~M#r&m8W;_erZXsZ0WjWgB z1_SBrChTI(o9BYNg^ZoR2X7YJ6-SY##!6~2RBItZRbE&o>|<(d*={X^JNu#Ga{8%Z zat8~(Z{CmS-GxP;jAObI%D*t)#&Bv^G)sulN?5*b;QnVOOuHQ<9xH}`m`6^%HGWL_ z;BhpQ-3p-TdM=2=slIT`C~1VLyWGM!A;)Zq%fsBNubGs~7Lqbd8=~Y%p?H2HPjff< z`n#-^xyogQ?^show40x)+PXppC5fbv_)lw5k-WM(Vvz+Keg*)Lsjrh4CEdiTCrY76 z+gy#NmGkqg1#+5P(1myU=r)CLoKlr|j^YkY>5@B33!_vTYeaA}3+dtp_E;QznW=ef zo49w=jeM!f?63dfz#0EGEa1$3vaJ`Q%YVk*oz-aOmS*Gf?3F-4jY|BMO32LkCEfZv-kFq{;ZvOTIIZ3M1VJTL_2(Y zSfz9Mk)@(FRA$>~L`pj*B}p5P6hb&UR=?|ft>e9YJkq;$@fWF8-Xpws0{R=xC*9l9 z^raJuWr`4R&f|r9`GVvhiSsI{3A4hJPD3cy3aLo|h>`Cu=&RJ){iFjI^2L~pW$~(h zaQJYCN?Hcbtq@F>2{5>ITax!-*zJLFc)c=nhrRiM-k^P2KUUnVK!M&qT6|%dC5J~U90$|!68&}wrGv*vLZ!NwJ%bVptA*x@^yk7J{hoba36?b)YEnZ5H)D_s+pRJ97f9t^UkTii=8#y6KDibQ| zyB9|-%u;^wWm|lf78GNda$YR3kT7}HPlxQx0;STjA_5`z?xNpQ-UE6e9e#(CiRGA! z&E_JVuMc@c#-N2t{s8>agsr>|#3H1ao_&mNK*=JM=EjDbHu#TCRwD<}v!K@+j+8RF zPS`)=f}bsg8Ln(bR_T)Kh2lNapbHsS*P8Uwfc4di2@HkifwJ4#a6R$GRv{y~_sfan z^>UJhlQNu1G}gBkYKM+TcQPB%@4PR6v0ZZ$)#&be>yH^+La8n=+K6kpugXC|A(fX> z@IHf_){kEI8nvAz2sy1k=dvcvolH%hJ3f~&6&0GGKIQ4Ghlep>nXk9ND$x#Sc)9iK zBhpL^2SH4~Jq*pqtP~`&OQwvKmL_<28ZR`&;ILH41!r&Qx4T_PVpNjdCvTR*jT>kS zri8JRAd=AQxqonwm}AN>PgRkxe-U(dOH6%IRH`KZGKYILTr5XgU1`qaU~yH@$z9$+ zb{}2V?*2%fQyP;4Dcck$XOpX4S7bCbkYerhNWiGUVC)!7gg+9Aso5?jtI?uhqzPbm zK|U8rTCh-;7a`rS)JRoO!g<`YPo6_RfM_)S5SgXVP)^)Cm!85Sq+lXr+lJ+#gR0ke z)m%Ou^0FC|qY*&?;UrIxXoaALs@X@ua>~`Fj!25Lj(fM?M=Tjm=pL77d)%RDeGQfs zc82v!6iYhR?L5L+3AP;VYoVOxCJBGLNoDBb&qnyEc=ffy3FliHT@BOyu*J{MUqp_2 z(fJITJW$OrgeVaiCM?8Ey8T?V&JxC;gv8Rk;t|oA8wvt~oV4W;a(SQA4i!4&{}|)W z%D=SP>M+gaaHVy;$#kr`KHFpsh!VCo$Gu%=Z1U&TDZp(41AF6(ZSMfpaXaY>%%x@N zVg-q&1KXD62AFXEke;C-*G*;K1;?#)sg%Xj>qAzeO=f()(SXB=2b&Q}qODr$#DEMc z%@LRAck|8tc|da$W!w)!F;SEptj;}P%gr{JUIP19XA++(H5}AYSC1d|J6B$>vN?p%shzoLMK z3v^A*irRI`l}6&PQrv`1H|OEJUK@iNP+rzLSvTIE+btP`LA<)oXH9f@Hu3K6DrdV= z=pGlLrRN^ioGT&g{2Z-fYjmU;1IvOQUZi=91oo6iu@4SgXX?DfqZb8L!+e!j4zwgm z0IIS|5~L?65oWpw0ICeqe2qg_5`6Zs5NDiAS`7}`k#3xSyVnvft%-~tH%k_SQDJei z|5Tetljk@mC&Q~xsMhP>%c*mFmP1@tDqYJ(8tP)h^%7NdOa{KTXG@m{urgtWFvXJ` zym7u7X+?}&@$?&m!OBbTe9^ibhrdB0?Ey~xfusxg9oeP#8z#JY1viZeIuygA5+@u@ z&MjR;M(hKe(S8k1M>R|=0biZe%1zndd;OAgOd-yl z?34WJEwwh%B=kSE9PM-q4Xm}P-BU!^%I1P;5Pr_{LDn!`pAp7KF5dpN^FpjDzaTnq zY|Q}>46Ae~WN`RfN3ZuTpmeSU#k>^6(xPc-=G0(j?PDu6OC(Aoj7!}WjT}bL$O3MB z7=9af@gAv@PLX_KQYagG4J2VvYZjyp7sLxHM>MM>Y;j&Ajnp|{oL_*0)8j8MQKvLltctaM- zk?;svzCJ9a-M~1Cus;Kdl=NqTn(jITDttvIKMWAr#^|E2IebQS3STJjyORUuxDofM zt15N%XkYpp?5$yt2ZM-tkOhRFFfr52K87X|13yaGMce?YCPU$xdQsZ3>^z^DbWq7IFU2<;KzN!cv3JAaO<5J#7Glz;xlIOSY zoAUF73&4;@ZK|qpFMm(3J)|`AhQk()Ojc+d>1}bndfITn;!S~*Bh8fR(=|_L#JsHU z&AxO}q<$)Eu&c>BnjSDvYqtF0#UAg&y6yn`V))oqS{3&2lE_&X!iWQJiv>q|`@VMt z62?Ro`cR4Pt-@fhe}X+_M~5V7(43$M%l$eqQuS~e3WOugb!l(2ZDcL-KIsyat`O~( zjI-X}`JQQ>`tZpdd43jjgA!p&DikoZza_GsE{$yWVek4ZaZYH-{(p`*0O^f7ye`0H zn)E#mz-613N@T_Z0jRSzD42vGa^djqLUq$0qe)!i2-H->8vZDn z6cgp$T)e{;M`8iAcTlOa$5dy~Aehcn^A63T?l@0&$_rvo$bFa3aEpiHM7wXeWU-{dW4 z!ka}NPuCD=8!wq&+~K7)!G&NqZia5+(BFkkTGmo?$r+GMq_KyZrQQ%Ee=bRyv;0aZ z?%X+E$Fb5_@>goD3Jwa>Td6nWX0_)10YnDJY_pwaPn<@;O99!tD_8q;D(RL0nrqh7 zPBgCgldaBvl}+_^T+j7Ix{vzn`2@@Ct=`IK8X3T42zU%3mwIVUGk^l^_a4-m|NEO? z#R=CCo&(_fYe_PU zuQ)c)3$U+p3ButZ;kbN$uqRi>O;L8F|C8Y!qlZ9t#By z=rhTiH0R2CqcLE*(i&CmjpgAdrR%nLr~b43VEM&JR%qVg;Cpw}e}4WyDe3{%HUXs0 z<(^;dCdv(Bf98ARXwpl{iiA#rOnd;hcFK_ND(W&-o5o=!S;uoAl+hcW+5`_v=p9-T z&mbbB8b{cCeJtuQhZ$c>N~f4E@RUw0_1>OkN-UrRN+e5bYZgg><(}!55pm^MmyHYX z=r&U(yc4hbNz!6ySLAQ*7D&o#dmt{OvA3p@%ORjuuK8 zYT7x{#Asf7w|m}d&kh6FfIxDk1eB)?p3J}L$rWgi-@u`fO|N*j7V07V0Sj< z6<{hOkHdg4SY1sdtHC%Ny%*{~gIyEg4DWE<0O$E0&_n@eV9D{_GEAFagV}T*<>S>+ zfQFJw9^FMd2X0C;IxvpG3a02j!;QI^Zq0L6 zJR?L3uk}{X@!c95s!dwMDaqDsCfeTcNyNe?Wq7^#Q?c-m?!T;g#~QFIpNI0j1jbrf zDi>{afu`|&a$jzRqLc%vCw62mLDG0nY&aZxhOGRJiGx5>IO2+eG#SbvGgG0DESAFF zeu#5bF{EwcxM)ngA65z}gEwojEuG{J0PuonhdbsQoV>tm?tP0L6(~k4a-6Xe$Q4WH zOqYbs!}TUo)TbQxJ;3JeI2{E5OML&d9LWq2yLa~cjxq9_y`ro}OKw{V*qVIb$7iQU zUR@&HUG`ZT-Kp=YC8@0rAm-~va*CIAhUq;H&wN%A@?lxxIDZ*JQxIX~bvdQCPQo9d ze{J&{wBYvut{L>Wt12{gfVZlRCXQPYbbYz8NODN`5L1$EcQr#|9B={;5o@DcXzsgI!mIz(WOoEy;RAOC&*X5Dpf*i+Rg!Q37{cT(-_x*|l<-QK7*W0< zmP&LZ-h`C#3Uwz;lHut|KjeQ(eKZ!%DbY&8iGDn1)89`90`J828))PT zhXmsCBHQJ4IM#KQU_-~?+GLehbEb~A{sMwZsTnGZbyfZ8k?Y-FeM1@Dzu05Y60Zil zHW`N6U%WdXV^W~fDK0~^reynP;&wIVzV2#8ikwCIg#LhgB8v%>b~c0IIwNSso&W_hiihiic-ERV z__wW>`E0d+|FQA^Llgf#RNu$V)#@KvN>!*4?+*O>yoRFww1PW;Tx>v^VFqFEdhh;Y z|7njcqs(Wuh{E*s3}M{84GPlhv)Aq>QTyo%eYcQo%3;cA{3at1(W44rzCSli*BKsqlV=zC7K;xaeSN~|&}au)*n==Pz3N~291-e@ zGNn?<+q%CwUvOXtDmF2#y*vqa^U&<*TSbv3i$DBXBUgO3i@r$+&BeZvOr}rXR+RnHJguJ)r*2(iLbOD zahCD!aj&w)S_5A^UwgCwQez@d%o^-X`?914NDvP9$?tx$Vtn@(0j)XS&|i9!x@Lwy zxbyttDSgZzU~~-)fdlN9EG`#JO&dvc9hb~sPE!~umbKp$F?B{Q*6vfdmgnb}rTJG^ zik6FRQ|QUfhPwYI`!{Rwf4-1rM0?~akYh)~V`Y?RprDM_fM47}LFt*9gUixDsA*_) zjm@EffDoO;i+>4SMudmw$y($tmFCHnf_Pil;?V8loce7t2}<=FELk zQ%~qx8iLOq;@;rzN>v5R6NuQ=U2%!Ow)^rxM@i}$>j4zJ%X;2FvZ=P+{XiDqM4c)@ zoM9Hn^k6Uyf>`O)B}lGTjiCpDFw*9}nVpEa;qM-*rC$iCEBiSyIjO5S<;uUWLV-d~ zIU%C$4G^UPKmcLvPF#M8+o#!ypZBdJC^kY0RLk2Z$tNQ{m~&0{HX)XEcSsEi=Y6thqhNOAMN-qa3V4Q%U2*P z17UoXjcZ%a80^0lGojc&@yqYK=hVQ!vA?sFdBlIPTCzRTrd!MJyM`X0g$73Lr`wt+ z*NH~Y)+_+*(-A$sarI@0{#~Kijl}_=mk|IW5$*oMEYM259ov*pZ#2wR#!&|| z3hd7Mywcea1EcfH@Iok5EPW_a8Ex~*{9@e=8xN^+o6crI_nkZKo}dCofgT z%8#>Q8Y!sRzv;y{G#xgN$itf?alY;?!ERSM6xgx*&s+BYG+I0Sz}lt0q>3-#s|}bs zQ(v&{w7&#|UYg3Au*1_FObirj+-r+9b=E}eI6gl=z&x8C&@!XN{k%H8QyENPhq_lI z^K;1ac*IofO1=h;bte1Qh(=}OdkRW)pr6~kQ`wzBg<&mWO|X!}J?Y@x3^AHAVxmgs z$RS}&87wh*(vW<$A1URu>DQ*nkwx1YQ`V>lu03c?-|8!rv53gt4DL$#OVw86n0C0oi& zUvc2)VlqKOV8(8~apt0*(ca-{Ie$sS$vD` zVb*daSF=5BEM^Bpjt@Z=o218<32E+Z^O)7w8?=rB$`qA6$`q#8?X8a} zpGO5oWf^426@kdW)#9J`bD@<=6nqe?c2Be%S(F)sON*XDMPF(&E!%UO_ZW2AZO3%< zc=wYfjQOL9!3#N@9Yfl~M1`F@P{f%oZU4<>F}{M621>2%-r;YsDXC!+XtU|dV(}ZF zof{4`xryHNLKPZo0av;ss3BA2M&W8rfDTQ*-Ofw+qL5gb8WFLt`U*1-bg5GL`w7ERwlMlG4;4U*&Y34;33hBY_< zZ#xkCU9a~O_mN6ML-iVE=3)1FYxoblE~$Ep8qlCxXk@NGB(jys)rDVM6Sv&xP$c0S zu7{)N{}kZ=<3Q6^2c(nbo5w#|p$x~&h;bq-Fy$;td8H-UK^>8&%Pn=r8<7~ zwklkY(U?joB@>h9NqvBTFa@ybne01>E>)S^EqJBkzBc%(_Wic?(L-ZjUePbx)gsVJ zQ;D{ZZZ_4*s+L#gk1|-eY68J zD=o2jO8c+m=eY0{N0AfpONLKCfs-Q)h>K|5X~cy-61PkiSmo`K_-kyB6352*E_IX> zw`^khJ=xygWN*FH{O@q$`}#ox-oXbTg@5!1?@uJ+R}HUsJ27Atfw0UQd;*!Xe?pid z=1vYAzn+;nVuWPnw@>D6NEhl1iHCi-|#op z@QPn45T<7x;M7GlX;3~ZkrFVg2MT${HxnyFtnw9+*ZQXfg5sR>XdC+1 zEgAA9i|+vCJ|_zC_zl!Zy#hQ++K(b362*$$@`^q&j@f^Rr^Q{APIkqG8YwpVZbcWZ zIfis3FJG%`sn+X1*HvmsB2Q~MD_mK%Rx`f;lsod9b6D|awNrzA^G&~6F(@G%d}Guv z*>4l7dte#Lhu^kYG6Se;)EBOFhyTYr=1&Eq$#}|Ql9I%v@$2H;cWZZ0R`iEY?1{8T zp0jq*BxW_fik%BvroSi-mj6(}N?SDV%@qk~0004pa-&L+BFn-J z&7hNz-HPLm3EoFw-#>?Wa^vWPqp$=WYq= zKOgY5w`aMCwf7cX8YXspz15UK9#fY0sX?Oz^*Xt~5ry+*1W9ihDx{H<AUXEI`WYo$OQC|JV45iU-C{Y%r|Cz9vyPu4UhiBH z3s33?LviFOtO-3MM~_QP3tN^#xTJdMYCSOlOoanSbsNNME{tsa%eul z1rBPR8yeQ^7C-1T#N;h22ID>CNRI-ha;5q_$aG>92$lmOH_#Bmv(96Z z6PXOCPFB!)%2aWVc3==<5w%CQ%FVW;R}FPL^J~Cj`UBJ zTH#4pu)F5@kcsKvO?ZX%KvCGxx4K9xCnZ1YqV8sKiE@d1CDo8as&sq;gvOT%BAZRj zH$3!>eR+#vtU)=M6wtB0g8;PRu)SwFN4sy^E<}v2slT{n92arZ#Sb+j+GLr5h2L1EMIR11BJ%%TYBs?T`%R4-?t&Oz0O(jxMA;$KSX{MJ}wN2 z@=$mHsR=&Kr6v86cxjJDkN;|`f7Oe>FuJa<3Y z?51;XzU7Nb=P}*L<uiIrKX0$6-O14Q2J35Mf!D&G%Ab%D zv{Ku!(&B+Pr5zPiRt06g^fcHKR=`l|Be;S=0q=B*SRJf0+;q=b=P|9PDW#l%ZyM#? z`I_f4&04DPn+KD{+_&60=4W*NAbS|98MjHc{1D5C-Dk4(PPbZwN^jjzM;7L_}H#EGuTYcBI>>O~Q#Ygn? z^glr2W(A`J)Ce{>V+4K(1vW9V#{@y4>FqNh?nFvCT^1^vo2#gT$5a=P>sxWWve+$J zzbK*=?QJ|&t<>-TpcNJ(;2;3Ov_M-R*&SOHhtorlOC*wlQ<^Xo6$=$4wBlbBGo5So zbN3=MCt81Hb!@eOD!bYYc`p+mY$n;(-KEQbSF40mFyne4;ug26y5^MTEWUcA>Xp?X z=L`E&QBvzP?u^apgHrc9KQ{B*KW?F<=+xxL$b)+u78bkC(Yv^b^r+#;|K%4jQx-;M zDQTKf$q1s+^@&Rq5q^d!uDApC0nkdB zh?fZhfdI|!VZkj>A@4_!I1B->klJZ~&lXIShEt%;@X<}>*^h#F$b_G>Dzy&Fw1ESL zy&FrgL!_ki%CW17(^N!cKuPAmg>-mz6z64ns&-$-E7(Kaq#q?jsBNWp()$>N~7pVp#l^mLE21R&QbZcHMb@ooHm|gr$a7 zfb+N`rt}lVi7$#T4#a@V_x=E4_jt**8n!l_uVu|NlQ34(%_IfuT|dn<5s0<0hUHu< zr?6VC9QoN4^#|^MH(I_B9m2q$cp#1Ti4@0a;(3LcMjQa&gHhl~Nr&he7libaEMEGu zAz=<>iRz@p@^N!zo@Be2i3F{#=zkt6&(B!A63d$n^2irMm#+9~rwKgvO;B^jsgIcz z$0qEML74jb#nIZNCn84S{Y@e10?J75Y!l>iw|;mM|IIi5{k>hQ|8av>yS6&~Z71}8 zDOvvnDBhs!KmPu<>7;+6!YFhuQ-6Ud-yYX`r0O#64J17L-j%NvH+QFsu42tC zd?2WK6myCD4BJcz?@RB0Wh1WSH?3d|yDp!Pz`)GrW&nkLxk0htzedzKHu}K0ID!W^ zmWP_0UY{gC;^yd*k}Dg%TJT7~KAnQ}!~r zHXi1M5O{O&lYZYJ{Skp)gVroZo7qxDS<0kXiUZlQ&?5Za!4Y&)(qeiR*1%qi5k)G} zc;7#FTgD&5D8b4soUpT3OhP|IGncz<<6205i`427GKFfLLWL zYJTIN1)q3P*gQUEc|^p7`B)W2Xf-(m`+LRG12rg~#|~Wsfd~R^818^kckZ?s^1Haq z)WCyC?2&MzSTtk)?KO~Wmh;}l(YsO4hq_ra+1Rx3g9BtkG-R{O5!$5y1s0cuN5_Du zGQB}h^)^=d__)N(=#D*oe0vnQh1**J7Br7k%ud|B2;v*c1A7`UZXkoy^z^=wo5P4L#B$FK zt~WO43m%vR4y)M`9hnwR)gE77CR``Io40=n%XeJkWcK%&%e9!Bm8@xQ>}Z}WIC^?| z&oA}&8{W*EuL}t|9&?lfp_dU+8NodI00Scvc6Cgoui?xePO!8WE5TLma0Jv0C}zZk zgGK1M$1QLOJG_Wnu*ES5b@%TfpVvGA$aX@!hK1R9CHcQ{k6tylBtYP{Tyxzj$;wm1@hNdknQm|HernmE^xv&D&y|M9B08b) zn@I03oHDftBjqu;1O`;0NIn?0;9!R6_m7?YyrY5kXrMZ4UF$z#fhRTk8P?+dg4jox zpr?BsJtT$MzdngOL^r=eLC8eLgI<9~JrLdmw##jZXbcRT+tdye6;Kno{FHy}_b-{> zK>*g)0gzG%%;-h4BcTRc#AN*y09sA&0nshwGw}Xtv6bBXW~$f-QcezM7A6PY?v9c| zt0fdH2=wRiJfwO{n!YlWUe^0pI(DGtc97N^-2T8|L<=fz7`6hQWa%g+Z|V;r>cc{lTvaP*3oy=9tZv*{=0m^IyS-Y2V_Ix>dq@N{I-+-D>N|^vUp-`a z{XVJ=stF^X?|760A(?$$+;4Vw0`#;&nu3z@v>cJ|@D0Y30-SVY;kXq&fI3+rGhT1C zCf9nv!JtIM@1*gU?R$b}{a;new)NloFwcP^Q8WU!FM2y!cTvYA*5cF;*9NvX8gB*I`AE2?g}#jcy|uem_fw#f8yhx&lAE5T2QML{ z$+ur(UjeYZ&k=k56$#bn3rjkvICnAjxoKI2?0SsejX>ZC8Dww4qVEv(wX*{PABF8(>wKX6uX11mY`BQbjJ=aKzk_V8^yMI z;OQGWn=Up%ab-BM%b?*SjP{N(6%7_7p_|BVF!|D)uZc`)9?c~xQ{+b>vj4RTeJ_Av0u|(Nh zXksv&gO2p6VB?}vGp~VCT0{A66az1za#?h_{|FD&0r7u(0Wjzl^BPZ`_?5FFKfZn$ z+(B_v0P1m~8O-io*HRI~F*;dDX8r_XDt6&Vu}DbSyH_wh5N+}N0fk1}pWb;aN3M{s zeO*VK3$hc1%5pLupyC0l6I*a&_Aje+BaY~51;T7J1@nIQlA`+VK<+-B8m0CLv}A`h0b^Bcku4KlEGM+MIZf=v4i) zR;}pGNWVkzx5{o~B#3m!2CLU^vn|)`h$`>1Wzi05j`5Voq@S9PP~KWT;4h^SaY&@Q zS-Vy{hDPt(~q?=2I!k>wkzl+%8N+8xe3();uRzIVJ(x_2-I0XII&b%)akjV5E4(()}d z=>luFfCfczsyAAK*o=L*xM%lT#1lvh`BS>yf{_5LFnspyGn4MigLu&K&$AeLvIzO? zn{)C&NKcLSA3z@SCT-Tq0 zB-UxF1l1A8wX!C(CQfKx?}i-~b*Rhp%ID0*u->;`p>vOwIPsbZ7s%z|bAx)Qct>4( zuZNC9;G#?XYyf-*>sjL7Qub7aGS~4ERzGT00`-&c%K1x$elM+HO32MaH?X9Eetx$5hVVdY}EiyAY{$ zYnaam{NX*nH+w^(G#t$AD;MhCyY@djS#5yLkGAZ~(U8vV$>Zk9rO9mAvJM!dlcN27 zED*-|U#`Zk-~ik~iotgv+{~0e!z03FD`oi$loeQn$6|^=CBL(ht;|59I^f;hf*Oki zK)eJEQqwg0=K7$A8f`l*s>CAzo5TUqD#2e~UdSmE`Dq}8zC-+l`;wBAdS+$-ObQ0* zvcG&CD~WH&kq1Y_BnE=|Gj98BeD?kvnwGLCG3cK`d$I=sGmHPILYj%xwbp}eh_}gC za?H-p*C!v*jtaIc?qXJ~n;DrvTw+zGrwQX|@$e+M9HAFf(Bf=}zX@tz=OzV1{1Fiz zfls9zi-X9^k?oC@s@^k{Ufw^8sY5|0tfxm1eh68r2>1#bOwY{V7Ze0CAr+ZpmKdna zt}qgpljXM(YOC3458y`zxA*W!+%tv4a?T=hT+O|+TnACE)ar2^N6F()kDP{AkpP`Nr!?ta^e+8 zR8U@+I;&+qO)M>;iHL}*w&j4VKG{IBbi}MoNl%;vIke3UzymnVFL{GYxasVYP|ylQ zh3`dqY;<*hbyrd{i58y&$$i-p zb&45lzh`#*%U|pROZE$q?Uc)ffxufHPu+QrMdlJF^5nJn=YqodtL$9IOvFQaShAQ~ zVG%WM3V|r!IzLZQ6raPX^v@aN{Q`a%U4f)GVh!R3l49W@;k8!QLbvYjRAInwMLT8E z0*mA;JV%`Zy~K_6lTU#~pCW_tb^&rnprqZ zlhx7B+h3U7;YYIdzdP{Iz+N4B3#aMzD>{GZNqXaJWwg;5Ivyno500I5scBM&gkNnw=;d{iN}6dFGshHp zmo%lY7wT+<3`U^s#{jvV@ZJJR=J0^yIwzvVwk?FqAI#6$Tps&5#+2Nrr=`uX6o}MV zG(JO}GUa#Zh+aO9*g(TLIaC+lisq0QN@B2;MRoCL*(@AQ)y43#wY??#^Vpm^_}Y7l z=`f2eIz^ps>FI-~>(sBVKqnu4Pm#f{ohY9pbA%CJCVHt`$O*0ni<;S-mW(3tXv%4H z`Av{4upN37bd;(5bYrskQxzMW|4 z>zyXKY9l~1K{%D(02PqbQABC%@Cgxko|}_-1L@z^lP?IOLl&Tl{1DXNpAMgyBLFpN zZcj}8Pild82CM0TRmjQ=d|=?FuyYsmE9p^f%~cfXWJZBs2^4jaPwsqeVGzHaH*0); zKRm8C4qJgjY?=mmRO1ZUZr@C7AR!fJi^sAsHy)m%gY!RWE(dA1YnOq3WM9SN9&fg^ zT-m5}jj{P64b@#Ivqemq6J|yP+z(o_jY3S!W{N?G&QOJc+I%;I5}{Khb%Cg!kR&vH zikR$6R({f#>%HyYRrS5aINKm*o@(lTeCyG`P$6qHGj#@>3)Pl+T9sTdU=Btc{zYe= zd9&(%l8ESqAYocn#V0e(Ae86Fd+w&_*1|*hRBU+mAT%?fQDsRq8x+^7`@%3`VK@+0 zG7Z0SOF`B7C{Y*>Op1t}+|dPS!S=sP8^9wagWOo)7AzMKvx-6+SYdXLL|+=S=hsTv zZ>hgCwJbclpU}K+E5kV4btM`J0r7(N@2dn!8vZ*;C z^78URBV|h5$Yz64&#s)X^0lkXV~d6e2q{rdTR|G>3xf!0+OaDZ_-Jz6ZJp`?!t3Gj z+<@X3!bWI`8N}pQA135RCy{s_&HDyNUwM*5NbFRLo4BdD0OMwFN5<)VH4!bPCUo_# z0?l!?OBG5$DSxWY>#MqGiCR-xNlg_ngy_JPyu=7eS0la(n_ z_=|pdq`v8bD1WbMIV-c8`wi1ct~;9ZmoPG+wGbgmJ31y1nCpT?Xz-)Cti9)&chU0! zG)c4(;2PsvABB=?CM!gLMw$XbB(dR996IH<&tGS9X%>T}qht3>9%2c#_<4dv>6L0~ z8FdQ09YbtiN{F9em2$G6j^d(RR?#?%rfs4mglRmAvVOA{pHH#hIyX0mCVWUz}}M8T|gya z>EnfLr?l6?vz-WunFK*=1wMdGcz6^9Tz}NMoM@2;VP3%tk=*bG>SCw4!rT8x)jKw4 z+C|;kcan~sT(QxyZQHhO8&_=Gw$ZVTj%}x7+j+C=eX91e>-@0JKQL#lHRl+|$aEm< zCE0>+A8!bcj1hhqPFnp5BS+Ao66gd5F_=*N9&cULMI9y$MPJaN0=V!KCTLeM1c(QX zSgbGNxRr`2pqUUy`mnKf`ils5%vfE<4_TqnP`3|k{o%TpVwH%A5c4s@29Typ?sKP5 zmePkjoZu_LefJ}jsy-6KXTqu+& zGB;VObnm0NvoewD_r~7fBuFHoEp*2RGZXx-{sp0I|aas$rOF~e+es0B=D6{dsTHli+s5M@3~-rdS)Li+N=`2{qTo@d#fBf8A-fUb#JETg;0II#+f!6#^b{;;p|_PPg3RiqWbDXz z!|`OV9rZ~bh)nJ*;OZ`~mA)}3pc{Os-HId{7}0Dl?DB{gA!z?3HX*j8;B$bK`u#%f zhT9!J`2c5v7N1{`qvgB0$~~~X9XrHy3jU_HmCriT%^y)I%odQ+fStzf6K3!Cf*)pQ zMog|BnLZ0`j}Jt>8Nc`s&i1HMR-+DYCs0(wnJ~LBRKlbvq~+n}M0dVcwB1d9tf0V> zC6)dKVPXYeM9r|4?M>1<_KJkjPQsnTOm~E5k8>v-Keo`|2^OF=wu#Id+s(HrC7q)=INI;2EAG|1?7F^op@9&l$%_)EA` z6QYF3+9*tk#B_T@{a_k@_%Tmmz!t03pwR(ZV2-9Cc`(%#umgOD z@2%U3^+0iLgjFeDaOQw;wFUP)3(PzjMAs~`F& zMKTK{1(}+I+~R5p=`@5vLHT1~U?lycqYP8BD^M_om=Vj7q__%gnZhZmEMnb)bQx$O z#4)1F9f^&>sn2R*w5Gw`wUS@Kr9E6Qwj*rlL&BfpGTz1coU2eZ-fDJ|CsptSnd-LG zZnEAFnOsycLE*-6RxXcna^U`hbTplNve`r3;bs*yqNi#gOFW%Zi~l=6!9y)Qro8Id z(PGBVh8HW82TU48=F-qGUj$+;Q^6oos|Ld0u>Q+pXY+tDahTfi{6((%)tL1uyC<^k zP^SY(Gu!0M?P8&ZcFX7}F`>Mm0aq|7*>8J?T2=W-VbqM#;c6R3)?7IdTIj!|$=={L zMWFF`%qB8QN=U6AkeLH%MEKfE_QgaO8c!hdn~-q|B<3xq>@wAOw$l*pEm0umBH7|s z4o9%9b?dH{555gA@2-V0h$;}mZd1a)!r2xMlNIIDdlq+rm+|QWh?0azbYes?P#Z!_ zR`s8po~wk(eLxYGD4$Tk#RW}qSt$`GBXY&5CTY3aXn7pzttp4?eUpIVKI_GL52E-? zC~+$XVu;cZ8nmRSlQPRC>#w36Pq4?F6pr^5tLpFb{|eCn!=^&WKtKdIFS*OfQW{?? zmlA0*qsW}`bW8>j%!1v*)2r_KOuYqpK9L^>IxTv7-J%F@7q9r0W-?{*qhSdVZB zB2|Z}VoC(FwYu?Am!qCwN@rw^RT)IpM2{}m^8B0~6EESa&*Mz~rKHLleMkMQ)x2&vKrOa>p_X9V(;LaFwHlmR6oT%$EMzcKmIEJ%!zrcy z(k%_D)90|rA~W*3izK3lC^#G7%i;2c-YBt)QZJZ=-*d87#EM%2#UiM!a-#h>BAZnt zsf+Q}lAE}w&mD6>j<|c6;mqMm2AHGtY;pxxxm6^OhNnOtX*$?whI~iB>o1v5ufU7B z7QD=jA8cz;Yiqj(AmDPwI%CKau@FPRMA|sjtN`$*u_L47`|HKnvE|uaYz{*QwBPIZ z!%|kAL)Q~-j+f0iK#zXy*3>o|DjEtFXWu9O7mtJzOyX_X(Cy4W{TooIG{t4H)WX~0 zejOqw+x4SX`^cq8cN*oNC`$*V(v<@>DI#-= zdw}jAjNA2QL_Ui&T>1-Rgbk|VOhln1RlNk2TwEDUp%Py!*lLS6K_GFGVv(FK2X{yR zKn0x-h7g$33$T99 z4lqsHyD9U;+^jK+_>W999*u$Az#8d6A` zYOL%6A6=@$&kV6FHd2!XuuTelD8FB=Kc&a*!3+}Hf-tzr54dq+AZZYQ20O1T*ImF_ zl8jDiSyH_a)ZR6+I}lrC)!`t19oN*68VhBh5M#Yd$qm{b@#phZSXlyXCuO?Y>_R?w z!ylY7f^@-V$vtu)T=!59Z6HQvr|h_$=R^@=zwS%8Z#6tTl1rdUQEa!9zGMAeORy8e zORkh}!vU4|>%~~>`D`qb!h zH1)Bn{AOBKucv}c3qLlRZ8c{2o{?nBj2IqNDr4qWrshb%zAB zp;oIQWc+r)%+7=#3VYD&oo+T1PH6lGorW5b>8p11B(og;ks zvVl6d1qG!rQ~l(?#mcgT!}0k0lA|c7bkcvAPxgs-`x?X(gcWV5@4ct^5jD=G_Ix0II4!0SRjkc)~#;BQA>zQ+=i)liAaEs4^5D&(?=x-k{n>5I>fFL1aS&`;}h z#~$in$?$L+JtUvL7)Xe#kSBbGus@?8I* z=4@|3h;ldI{&{nmsFhwrytFZGEGmwfJQ94leI#1KAuZ$ALG!+wTfx;QT>XG-XqC$MSyYwq7HFeqoJt1_lGRECwm10*_YuSG9H|;?Xt0(Ni=I^BU}1d?v_p1Vn`yX`ll+B^lwVkN?Lmr}^JPRMh^zW_{Wuv22 zy~95cLNk4eO}M-CUtI3Whk&#Qr?%F z){g@$?bT-gwRfl(7!$(y+PpIqH#}ZWBlvp%ciS(sJsMGmob2t@;=T4zPh3X-B@0S^ ztSib-YRHoDdXTR$G$~@%-HLSk?Hb#WwI>@sgwaU1k3(^1D7;qFcW>P-T%+T-P$Ci@ zo!^&J)x~B*%?0zDpyB9&HUZ!LbsQ@-ww*%lgY{i^ZZ-G{*^I96>ZaXDR-t$-DP_8O zs8nE+uT^Xke^ErGnE?fU0}-wF3JSfVoh5Hke!cnAXt|tjU}QiAbMcDWtd@&1(OLS6 zF{EPuL_RMRHQ z4o2BNia!k^oNQY)L@@=zXsoO^a<^PX5V`*My*dS*?T<)*eMl<3{TV<&`>joCijl1I z?v#LqN0Hj~Tzb>QfT7C!7Fp-VC3&6JiGf}cY1|8>^jdL4Bh$+nI=ps|N`Lt1JgOu9 zNTtQLp(V?pyN^VsqQnJrOmHvc;w#R@q+&v_LnE6uG$!`Fp#+ERs z*?i5s7q+K_tmMbVGDCfTHs6)7`J8orWdB<__MrKR-fe6fIPU}r|z689rT z2qTXORN`*BS0)vDra`uMuO^<1wvKe?dlL;-T)oy^ku?l$YkbCFY*$C!GqNMjK66L< zqHc+T${Y;*fOGu3$uknt=btNIL%Pi~-4ONA8OiYu*&jb5-xlei)F|I1z&p`yD zr5Q7e1U&I6oFTRopS02s@6wSBZxX~N--mGXJ)XQyVjO>esKD76{FeFNQ!e>DjO^p* zN49S&VRQ2uSg)x_|M;inNP89g{dc}D!v1rI?CWUY&j$DYXb_A2zbomumZAq8#8+B+ zl<@-ZVRK_hm|o;R!=cK^#AWpl+iBF(A72qC`lDLu^Sqr~^^``(zuzuszON%7eeiiu zH`=bv@301)Z;`&u35SeWsZlDQF~3M!_S7QQu+Jxx}b{^~=HKb_nQ1&n&}$ zGihy}0S}3JAN*e&G{kFG1>c{BZceF1Bq}U~qUv2iuIG)qvi;f2YuZvYDNN!Fko}(M zQNUb;1_ysNMYedOAeh1)+(r3B5KW50?@;a^C$);`Wt0m$YXiiM`hMkL=G%$~8iJ2{ z?|3GgM^V3jBjv? z)Ml)2=Z3D6GddBE8ZH^~MnC+0JRZwK$A`5gyo!khlh=bf)bWS($|N`fjae-GjEt_( zwhnRVuWi}zV>w4cW-d{qv$Lq6U_v;o7CB$WXN5)!k%UPqEiqbQ*H$^9}UJqkMfFEyBJ6`A=bsC(On#TpIQ>v(See06sO;c`nm zucD%yL^ork_jhx<_dOiiB&MfP(sBb0ffY!4Up_zHxbGQp5iL$V(*s&5RB_4Vi_0q! zq4ZA7k8O8i4lY^lp6GC6qd6Cppv}sNGgr$W--?idRCpGEYx1~fJ1_i ze`2TD=o}2HCEv1c?Vxt&|uWPM^??Qx-9S_BC?dmcI=7$7Stc8V6pI{&UiARjd zJU3o`e+o3Uv;f4sp7y`jy%F0gYwf(f6_SZ%BO?n5!^6W@6$C_wK_k1ZFS>^`6z<8c zyLUdXvhdNQ3mJ#ErEMryxzTAPkkQaUs71Yw`@Z6Q|KlX2Dj%2K=jz2=r?cj@zVw~O z_MYT?mwnf#y!m3LWTuw&!o(uRL%d0o0}qjo%PW(nWW*rd$@u&&4U;YwyFKq`&qK>g z_I-hIje4Pd#C%#`7q{4)n`?5#Bk<;R{F}1iMW^!elFhBflsp-6cIpkAC=HaSieUsA)7Uj!4h~?anV50iD;rvQfthC}SmZ|7? zD{dBSf#Cev44H%*;6}GoHbu>r|98k%OhWDmxx_eSg_4(8>BG&L+$sO}BY)y`slcga zWTeJh<}EdMZe4~leQZHvOliQP859ABj;Dg#r0U2**QLSf`IaF4OB%2KxNl*lzH>=* zFoorHOz!-5t5GlDl|@tP_;Hk&`y-7^cjL2!r7(w*0HQ)t)+;t?IX66BX}dN2 zIZ#q{gTBR&|6afWfIEzfie@$d zO-)ofl}Oq`q`&;;BXfv-*qy}Y`pl-_IF*3M(~{6#Z%N_FhLqY-xbcXm=roB$@6tS_ zoc}W#S6l`y3837*+N?A--7-#WdRp9KvO~{EIr})_0|vP4x6qW9=hjvGmvSMrpCT5! z!jehIo|G!HpAb%cT_J&YptxtAG2Rt#CQF=}=glR?bk&HeTYkxpQcMp?_4=_(Td9ky zSC-n5vD$CQXTIy}OHHVMk=m}eXmbItDgpSo6yU$I<9~$K@xm41i=Q8;>p#T7uJ(X71f08BT7yS2?Z^pkBi*kDuaZmZ3)% zQJq;C(|)tz#q&mkws3*w<@lqRd38y#WYC%6C%rC(p<_k1W=1B#E_ad8qw}Ia|4p|| zn$rmskBo>-yb=kPWUtjtiM7)sjyXA^ICbb%hi0iHmV0ghtl242lz^NqEC! z6O4?czPCDo0-5aN((^?>aZoJ7#r0{!#p*lMkw| zTgl&lnw#jU0;O4f%`MNquM)4wXJspy;v&}*NJMkKG&9$$kVi#@8*ee`XxFB^=er?I z-_TH<UBB^wKqFU zRqDmv6Z}bUO$=rJ)nu$EEJ|9AH=)VNxsPT1Dn10CXrmsFI!HFZaQ^@5{hu=S(03M~ zO(O;y&gXwj!SI9se!Zin(RUZoESQ;fCFL{cE3eSa7_A=Y(g_LY{DZF#Y=+vH<~Nu( zYae%=3y>V`bhU;FvWi}!`MWYRCZ`5ECOdR%JnDF*RL+#(*ZM<%-!Z3K*TSry?PI=p zpTT%htF2F)fHABeO!oW4fjveH4&kGI=?)e7Sn_?jJGXDRl7LEjc9yTBe0lCgFC>N3 z8m~>+sy)M%n(nKMy!&+MOP*Pc1NSu~QYWTadnmc2+Sy4I?s}^VzF0JeLcA01n`4IR z03y0y{T>W%zurfVZh zjvT(-#S3E3mm4R-mR&gXekG;C1Gwhuvv}{P&c|caXMl-fcd8H^4D_ zP~QoG`PJ2qMCR?n8GjTfa+X))c>(5vXV?ftW|0=-G@jZIJ6)^(t`ui;GrgimCbVl(>~nfFK2CtM}_ zB`m>FQ0C!MwB$Uih6*Ew<#>b#3fj0;C8Gx*E&Sz$<;YD%{HTb#4aoz4Pab8D&kMb7 zBV@AMM2qZO8MMj)`29&-&Fg}887f=nfb&m4!jhvivppNVW+a1rY0kP5(V0_L$k|y~ zEi1)2pHcCxu!JN2BC_H>z3zGoGu+-|RhUwTW4-xRp$$=;4R(rBuf_ia|MVU0nT-J40GF{o^K9eIp zn?lG_dmeksd}Q=QXnr(M9XcdcPk*Tl10kCg1YW&MaV$XHy)BKCa!U%z&Hfdfg7dWR z=0XezAv<9g#DA9jVM2ITP&s^Q$4$vBN^(S9_A>75-9qum7^Teazx(${>ay6L!lHkBgt8^)JD$jRDmUquD@TkaAy6dLGHNC*W5 z`aj3_f1rNQAXBv~EHEgr4Dl&zm8wl|{-AsB=!zb@#0;cpuTl-wWY}zW(PC_+b+Rwi zj8VPH&jFY`9{*iTStaX6s}tr+!l3_1(#zF_S=&1brO+f*8F%B5X16B=O-Cx|?x;^j zqe5g%Ta`HA7Qxf&`9t7wW@Z9$zEGRym60+=t>aHK-!+AqIT<(Sp4gT3|0NvR2&3R zI{C(#o_{15E^B5e_6s2^!-F}4uq2=F$ zKp(f&_lEkx_^)zv^!0b3H&J_q7nGIZ)*sby<*U=v!-xVxqhMlF3C3XjYJK$Qk=oSI zd({q9SWr9TR8OM-`c!S0N&?0M&nS%b7n75tG5dT3FeUzGmWYr~HL1C7A+~C6Q-z#d zZO?(~($;Ev%L1nB+8t9_;4JMO_XpPRt3Sbl0HnZ4TM7QmT5GUV69#~#}2XtZa4g${3yxM zf296>CA(o`%%{ex4n_5=`(BBeoiUST$kb=W@K&@CsmFYST zhegMam4E8%5p-RzLYSB6nLAPUYsvRB|FEihc))>YTU}7S!bsAYEWj+P;$)FplYj9eC zRuj$%2fybN!x<#~b+H!vxbZ|nOohlxHdzG4jBI;`BTz#Kdy3_$P*)4ft6M{rr|w!! zbs#`ZEt2AL$qK49;OavOYjKW15=KW|0%lvzyp@8+F?*M{=RUo6ooX+jkqbuVCkMHn zO&6cbE83Dt2M51+%fSR7DrluN5j)WQQ09s14v|p9h?ZvzI2Ivr!irj!a?i#FZ|WYN z{;QB|$*v1*t<@72>OsI}w~t-FD{PEBDDj#~=5%{K061Nwjw3YfIQ(lzfS4=MoiZQEWbwwB^64SrCT(hSrA0i&URuNuJ}YU#O077H|K zHq&5O?2%?+n33873KtSpInYWVW+d-Dx_-prqf6F!7SM(o#Og4*=V1U}j7Z45R5V1X zQrp{=fdSAGs2Vc*vxzwUcH4<>jn^L11CA}19r`bDjJ(Q0>qaSk6+DYNG@U!-4tqxM z;gXxqgvge`NL(PE=B1`;zR#T4t8bwO+#I3bt7^tms@**qh^&yH8x`@)^CluIllJc4Y4s!9*vSy)bWu8gx6%L-5-AY>xkip^y?Rhpa9bU912OO*WYWH zCXD;AkpvSf7$FYpp0XpyP87v|hNb2ArPouPj(_!h9a(=g9-}zj!Ms>|%e*`=(>$EU zLA;@boJx*K#tsIRi;vYQ!amZTqjB1`aeRG7(cgUD$wE=1&ysyh^Gy?mI!HmS-GcYL z+Hn@gI-elqgNgl4mZfCzxzCAYP*d#u&C}kL6*%;~>T*}g5YT%n~P80~^tq>xJuo_9o@LqEkR!3h}LXWWGgb2^A5 zx1=DUDL|d6?*w)H5>j%Y8t(;xUw;`edY zhAh^=DRUDsNX-4s-)n%?2^AMM$OK&xR_RetPwYBJj~g^Py?MB8=uh->E+_%r{?^4* zzf_r&NaIUmctM^uqHdW#TaeHQ&EmM%-{EF-?A19u4Pn?B;X~U)q-kr1g0U8V;;aiL z!q#(*BvOe4LTeQg=@~HYV2&9l?G( zhFi0F&f0^&$-*G`>`vg8FFq$TJe<()Vyy?^T`pK)!II->O5dM|Zuw9tK;io@vhQOy z6&_@Y?TG+RA>1C#XxaS1>)%k78^~8*e)KQxRHKzSk0v|@cmjC0fQk>Sg1{QIab%5Q z>&fRd-|&g@iT7+=E3RKLvWM^F!cBRbLm|$`NB}p22&@nWwpRa)9xf+l=aWg=U(|o* zYic2Jyv3WWE`viKWeTK0c#_`1b9uw03!dZCOF(eAL#Nay1}?MQk@E^5vfOm%7OG$~ znI^S(`SIeeLiWy<2t{}#L|Rnv)~#R;ss7`Y7H_)VjYMZV8jFQj4aM2<%|uXp17|tU zFUi8uSznzut%(ih7KDQ93Vw%fkFQ(`Rn|6dvRz2`QrNMSbZ- zo$bl@Ckv~Z^IX#Sxd-3KAW&xJZ^>)MV8dUVviI|CGOMQ^010AhCdR=HQh!IA&^D#*oR2!hR;P1vP*o_r)hY}X)y@1UKCY#4w6*}_&(H1MRG zt8xJc?`Zzs3I3&2{zDkB@HiAKV+c)Y8&hQiuUJrDpX@%zohV;>aT(`gRNHdsP!llh zQi`RKxO#B5U&1|iqJh)*(C!ZgTIx!%Dy*mJt zvOPl+hLENr4(~k}yX_8Gcwy=`m$S$xx|+R3Aw8GHz@DcGp*d=8A#;^kvDk&e6pq|k zKoYu7=<7naV%ilEE*M&9dgr=aDvUWL;3Z^Qo1ioyN3dLR2`3G^G&zsBIFFlwiI{JrKV!E_VEyVzC^? z575rx-ZG1Zk!Tw$_{Tg2IZ2$_94OGOV+n*7Vq- zfEaJ(xq2kAq{`tzjZZ@t<^D)E^v@hB&9iQx=_DX;$koFYuKo);nn(cSR{fNbW2B>jhhl zUMpHvJ66ze9(uv|$Pgu@q>zCHjfiBaD1QNp|U`y`qRd0POY zG1R4cjtDNiQ4MQW{v?X7L*YiF)e#G-0oA$@JhDm>?Pr0b zK%cew3uWQMuW(4n_y+Stqa8uA;=}nahrnlc{;r zqrx+~?PR0Hz0}_?%TKhMKtv|{jyb4vnfyt4N`GzA=In$f?4CxXGKxwtqv_vY3%{c$ zK+RvKmn~tsttfz_LzBNY#sQjccq0&&&TU?hFd zp^~1PabhNZpX483Kb@ys&1!h~4>l@?EAAC+bZ|}wT`6F3wd&@X-=DV^a(6vOTVj?a zq&WwyqrFx9e#G^bg$wr>Ax}srmrwNS7L$7ncIrHf=y}}r7w%?QjyJ-A)|jmY=8C8= zw>KssSi|$R;8p>FnJ-hACAAd^=Ld@0#~TUgi)VjR2?kGR=U}52C>{Pr9PdifYRq0b zb14B$qy7Ha*QG}5GeF3Gbf6q)E@LP+=qS9U5LUQ7=*(R&gmBQ46vY+8WpG%Ie6o}E z9H1T^R%cH6MfHD26F(CQ1>^vJRe>a!yM(knL{G>2OLv*B?tj1h)cPvVG3f}8?*@y1 zyI`%W=j@M65V$)dyP9$T9|;oo%vS$>w$BNx+i?0gGeeKP`64^Be`;5KGE8y-f;gMmx5o!zYADn~lTEcpc4U0zE%;s0I>)<`kTm;Hpz zXUrB)xe@v9kMcu2Em2;AXMJC^=!hK{0GxoIRamUr5jOUEZNc4UNF)vEh|VZFo!s&j z*H09StlWy(l4tUrM$nc``eH5-+T`ieGVbBmzkfvh=ZpwG&v-f58^{S+Gxv-AC+)C| z?oUuGCgJJm7)IcKFY%4^GcKzcQt6U8|3J+>*XhzHc$Syb|=c!wLfC zi4_%!=tRR#F}aJ$*Bujzs47)hG^K&$|9DF42S3dDZN##D30~^|8)msSlM+|b`*8Is z0sKZBbUh)L!qBi@AhjtHaG~779i;f>$--Fm(0-WC%4shvx2fv$xWxWqc;Kkv6+LVY z2!v>=kOyX04Q~uF0)ED_7{&*;3@-S`!tA>|q5rTbw(Oj~G6x25H&a~)r_gN<7O1J! zT5+(9At&6osFPWD$e9)*R2!`y9z0_5ub6xUk0M~kyr$_ir_|2pLprNtxW>|s8|CE3 z2Ra_k4YbF4e?1%!^cOyxt&TOD@a`Cso@d0xwT&exU50&|aS>fE80?+^EOjN%jm#A$ z3VOGyssev3*#QJIkAmORZY~>uDQ?bVc`+rpj}@OFf|Uy~4KZ7fn4FN;1wNrFh|n&? z`b$qhRPPT;=X4jk$e-={Psk0{dSZK}2V9`ylybV$_azqFaiQjT2I%_s&vU=L1wK^f zGd=oUnPp!{$4&Y#kve$01Q0XYR@$THnkS%|>ndGj&k0TxyXXSy#YwRz+t5&JDkFV! z6!M^hB_u(PRVk6MlX>RH*S|yHH==n|?u7OCHj^3K@qLAuco?a>bV{HzwDd2-3`&}o z?dWzon>9X!yL&Md=}*3RAW;e@mLwEc7yN{J-QZg1ngan7v~yXkAoo$I$xw&Wbja0X zi9`!1-Ljr^vM5MhJi`$q^hPWd1jH#BukfO* zRD*w+cx?$L!PA3f=+DCNg?NbJ;nRl5~yyYFbnP7M*oax^}wSMxj}`HIVh&QRE|E*@!65-^}`LE=N$n(w3%6?;KW zrnBOFYH8nPU##HhJM~6g8CN=64UlImldKoD^ytJ3TzQYPN!N?yy6^pz)lM|mj$7s9_V(U!Sg)lN78@=^iF_c9qcN3yvv^eqaEJ00K z4}W@mcBqN}1VDvTSOnwqcJJAwLiCnIosRIwtapnUE>AK#G}H`323kpk^91Sp`tCi~ zVm%8g%(tg$)Sr5wo3+&RSUR_#q9R^MWp8!zS_mc!X5 z%O5wo$*rPmtNbs1D54`}ipU%%Grv}<6Eh9BkO#vM`}@qA^)FE3J!U$1|umhtrzXU#9SbfddAcjTmGym~nizS^0W+WWY41`0N+B=NbH=8EbZ8 zLKbw{cFar$AP*N?g9u(^XFe)@0%v&bg>DQhjyv&51Y?(&Y~PDxx8Y7c{{0>%^8(C_WW|tFa6>uY18F|L=k`L3lG+L~;1->A| zBS@xaBrYp;waXKJfTZ$!J}rhj#H4O|#s@&~#|fYwnx_{&9U8Mi=)+_S_oi&KoX}48n~3O& z@9Db<8>bbLaJdbBtN4JiwiQ(-TGr6a1`e$K%tq3QjS|(+eq2F4l&Ck|*p9yHm*4vV z`sNK6(!2WJB^0QjIfYtKt2Xu}89baBfkl;beV(z4;v2EO@)+ zDHF~2qr1qGMK~XUY~N%7?`&{kQYMX9h_2FflJ6)TaS&$=wkv*g0|{?}7v+EB9d4Lh zuGm^D>|$CTifGd@;)5A}6DB3@4Dh}#*5DSP;3`_0%W$;Vw*Be)UxTmKwW-}DS7$RsnmtGR|hz%}{R ziA|kST5hA0Hpv?Y+Yqi$E&x4qyaPM#=!#9>n)cGH<(Uf{!F*_KUFB5BFHYRY_hF4= zKX}bo^pg`-zybMVkTx~-w5_X2S$&TAXM@`TLUv~sL=>B^-ovneiiq-RT5uZ$OpSHD z<2jMYDW}0~c?0H78SB|1Q%FSvG+c_1^C{evWLuOk_cArL{&t~rt?~6<`r-;$O|`YK zx^SUKs(*dHvBtGK`Wageqwj+PN2ZdBjHJi|YfH#V(9Q! zWz4iwl;_Y|39Cq8YfPUw=e(!WyLpefw{@A-J;Ho_wZRQ zF$FYiBE*1hZb}nHU)5fyp(fYzVpua#15wB2Bm3%H{OPp+AjUeivE>v_r;cpKf%7E4 z*9asIT1;kN2GO~Zxl|nA>Xn1g^3lk^mrW$6xUJ;O2!`l@8{l?p0hU&Mu3pdj(R2Op7n3d*$`(p{=9Zq-0}TS3?a2{ zE-fd+bTxwDe@}>!epSls%A?Kh*It_r*y>*(`26Q8%SCu{wCm^7aHk?j#>9+c49GK^ z`79=%{AaR_<|1z7S`!cUUcYipQOg@Uyp=xcky{7>i=(}&u_U6lue<4&7<~o-KRZ^* z+XV?Pwv#)hIFawtpO%Lm!^_=jIPvt^Hq65^j@4dYU;G{;-|)w2NuhI;LI#f=w>yj3 zTyeZm%MKRf82Lk>)Ak+#x7QCRZeKZHa{*#1lQX{-NHmZ^{ySzoOIs6bUvkoS)8i%> ztxM`rlKSXS>x3B+59ycwPKo`nV1f&373KbC+X3nP5=(xOi3R{rWD*{V$r^?5=hhaf zX(X+1j84R~O1RUb6SVy=hUPIvt9JOB@;@Hs5*sRkuhEal0$E8%M~S}+{tX#ec#*m4R3CA0!&TQ-Qn#}QHKX3 zntQT6Z7d_pUJbIPMRvA5h~3N08cYOYZ)ncA`pFIT+4vy7AQf?eOU+`ljdHWgs~}6< zoMm@knU@}V1@WRkRu{F_XGy%D@gt%8`> zEtG&Vz3REHzm@nUb4pWf+;0_EqnnKO#d7N7?M449fc=WFMhu6JMm#^SllI{d^Y=e$ zSoUWJ#qKxqm#s+gPq*BgVrP=C3)RhuKLfLz2eZ9Fti!2^mr?FjLl~+cQ*A)ury`vPUa)&M%7CO-`5pHrVZl17@?vSR0Qp zy$WTMIuvHY2V*7*JkqnFX=#tnIxr&2;qX=)jY_*X+^!hmNWvKENJbehmlj1zT}S$ef@aHdc%%PydAjrlXLAM} zjLPk9S*R|vGTyj zizE@JCngICEU6OD=mC`S_m?b`IbJN9>U21w&dKM_bE@RXU>7omAe^IL&J7pAW@9s9 z0oyq<-zab)pD#zg{F$NTO|B@#&0U<$hPThaB|I=A&f!8P$~Pc{LlH!-Q{m%1uVLhJ z4qd%YQHY6)Zdqgl-KN&zHmsx7?DTH#EFh=M*&V)yD0R93Bj=yc?MF=X+2Gi$ZUUpZ zxm_5igm?3fFF6PUHSck!nA75RuFf8@#S=F30vxkT)w%Zoadx`lne%62o(MS#vZ63a z+SPULf=+S!?bb~L^{jna!=;fCC0+<>4)f*rFoC#d6=x=^<-g#v*y#n>8(7bTa2WO? z#G+N4p+FV@7}o}dIwg?tUH@~+`Y2gf+5O>c15%e;gNTa)i98l>aP8dC$1k?XkUmd0 z=~$P#?eVa?HU`{C!{TUl?!Ik0keBgRGXAc~(9RT$dldcT=pQtqJNe>b1&6(6287d9 zXfA)0GL4q=ofd=LIHx=s_-v&nMvE*3L>%U?x>{NTDhx2M1`_JR(A+^wJpy$h0>Wh5 zaNpeO>T+mZl2w)#apfcY^Vt$uF3PIp%5J3DMy=REnb7Xk@i6rpTyekrz7`YsO2(aS za+eTS!~d_a>kNmh>)J6$7{WvwiP1%LLX?rHLqsP!AzH)`b(DzFTL=l!dv6)hdrKHS zhzUZph)#&!$M_`I_dd@v?~mu*f6lf3oU`^?_geexweE9vQB8tPxPR?syV6*$<^g{l zsRpeF$}o=WA$!_(%A1ih2bfy$e8>5{=F^y&GM0wwPM>=BacG(?rccJ@8m(SdVHH?D-HJ0;LZ2 zev?^Mc`Jy&p-fLid~a<&iY{s(+Z03XtdBs_^eXlo&)2`;1#T+?8&`nz^6ih>QlJ{TUIs92bn{? z;sgX#=+E6h6m*)kZ#MaLZ`+Sz)Xhc?-2=1Jcv1DemBTLQt!EO*1V?b_$;D9{urdO| zq3(1t5K!diq+UT-v36oSLTt-|j{Itrc}{)^QaEm!Y}Mvq<>y8SC%1v~!j^W&!dry8 z6_DM{u*Yiq9;uDT^w4Az*+^-Iynh<6(irHz+b3K2?KEf%LYkYg*3=u_R0!syPbqw~ z@5^)_}tq?b96+e_jv0AAPHtTo9Q7Y_G|Hxgt zbg!ti?JT8Qj&jCo!mwx_8*>Q)^(0J&+&Zl_g&MqQn`uvyPrI@6mMiJxlwG--iYa}P zb$JGkLqggPAI79V7G_?{QEbxK+VKc^AJUg&L}5Wr;(v}o^o*1cz!uZ23@zT?TmGUo zR>RP|5>X?HZxc4bYqN5J~9 zJ2|kSSM8CpUzGPNZKE{{XIi3FC0_7=SYs*_N2$H?_2%6tIvUuUy}i)cti+VUn#~9b z+=9oh3)3EXWZYP#d`w<*g8H+bhQcdumI8A}F|jWPhC>2kko<|RHLK%{gJNAIO-KrD zwWZR0oghlSBj3(r;}2eUpTF>GOXwGMrH=Cfk~fAx^i@j+Ys+B|=A>Z5?eU~b-fg1^ zda(FF^TqD_{HM1MHYU5nRH-1TBTDUOT8}7OgCd`CrZ1~z)Ll(&$O@|yH=Y#FI<4Pp zJ#O6A!EMwaRHz3UF;6yD{a31PIoG#)T^aW{2sThsow3)LTlscns3iL{@EhbafcR8H z$uNa;!hJK`Ov>~+l`&EVX^QTvE|zrL-;Z}3iHs!A%U{j)p%i37(V~%hof5>Bg1Zx2 zG7-^*DML04dyCL~#U}VReO~_PkJgrmLp_*61tC95ufo3z^z*GH(?hd+0AI76<97!v2w){z77Xl4m#ylIgla@j4e8jNa#yY1plWp^H4NmajDpX4#Y41}#S;Pbzw`=^Xul;<)ZuLc9hMx-K-f=y3 z`ksGSS16qMm~L7#nI@Iy5!SAX`DEzH;+I54@=)R1VQ<#0i?a=a!VgzdR8$42$T&o8 zU1CbhK2JSggh(zjq-Ms3MDCI2Zxy_M`f=CA<-C$p+qcdDSHhF!o?0FaCG;7ZN@8_P zUhT0s+WUFB*N4`}QR-P9rJ#lVtmwK~rFa>R7mDAT1W_XOUb?#%piku)7}-#CYXAr z_F0}{5@nLwZGZ$KmZyRJHrJhz8TU)Sh67DoFN-ad~ z9{7l#Ir-e9Q@Cisy-oEEzE0k`t{`4426Mj&^ee6yGc32{_MLPmEX^&n8cdsGBeH8C znQRIMBr``gs&__0`(th`=j%3j6;8_p<*WKsoTyQqyiN&huv*!QLw7Vpr4{o3^rAr_ zqyrLM2+-1iHj`-fT~^F}0)2A!JszI=VYPZ|fb{V_2}FX?gPvQS)| z7$e$9`OV6n_0iqk9jSi%&~1@Ii;k?XLSOooZ0rC4PwdM~KPmED4fJgfVZ^#o3i>6W zaa}0>`?j^`A}*bw;@Rl^tXjs<$c&Fu@$Zvd0|l1$6oQF=J((F;c6MeC)($fHxT4rd zOq@elS1IvQur{;tja^CWUH2KJ8q?za#G-eTzsfWXs+7#y)VM`E3(^OSu|?||GGv?? zk%!T?@kiO0+wD3K5oZK`-&9I^xvO&-aM*Sp+eA|WBzkdnn_QD}c8`e|=u>QKdOnfy2so<4(9Bp9f6kWowNiAd0J z4naHMqq;eP+&H-HPD-0bvkOpRTg z$iDiDoc@Wlk}xcAQOgK1xwJBsdn?T-zlL)YUWtsS^D#l!hNn zOC`^bl6F0Dt-MY4%$SX>EEA0p0Xy@GX(uy3G)0339yJQd6x@CGlNQXyJ)DeGcr5uc z=n5=rbA{lu*@hNM*lWHI0>l1;asIolOAC;fMFIIM_^mWtLEg)pj?=H@?HqsHU6j+Y z!a=8vzuK;?T;dhc$LRK6W!E}SC#9KHJ|c!Zx!JmFP&PZ*LVr(_S0OtCib|Zw@>ynD zna;hNl312;?D?zG)|;3JxV5eidd(~oiZGC%{3R_b&x6og`zlA68tS~!17MJP@v--0 zaV?n^qJ~ug9qp3#G)Nmob3FS@#b;ItI3A=hgrA2F<{*- zURqj|XI<9SqceIph%GLIy0Y$ms3f@(tDIx32b6DKik{QNUz^G}vl5b%j|G@=4493u ziWe~?tp%W>Vki9J$#toY8fd{{z^NxW5b-W?%>-K2X!T=J}>D`PgYYX!)Xu!5mnI%RBy=c>i+;s!t?cE zw{3R1ojr>*!N~`oM*Yq;(y1M2|B<<5K}Es@tXX>3lgH)$4r|;hk#Ocr+v0BbY~49W zX~=X)Z3sVP`m}writG`DFH3bS@i{5pmIf(76zqTSBeG_t_;32N?pyz~Pkbyam&BQd z<=|S>YrVY>-q{dbSu!yofwt$v3dvq)SHJH9*cB|oAIBb;5+?YvF=55DLB(?0*Y;BFI* zd>I}-kAv8TgoJ2u(L{fUNc?-c_)q>LZAp_9p-`}kalGjRDa*0K!U4uAD!^agKfLMa zPljr7rTU#z55QvjQ zb2PHyr5yYy+wxP)GODs!87`zu5pw`KIBdw|UAKe7;eHW}eB9imbxz;!%gYCn%wu7p z4K*w`F&Lx*sD1RH1uX^a(KH}1dn2N2YZjEu;11{`Sm z-#o!r4PV;&aSa5R2oqn*>Be^uzNE*(LR8K3kt+4yT#tPI2a-_@D;59ZBUxIKqWrwP zmk?XMOPpf(q4AgF|Cr&Z{5O=)H=Hmg|2A75DiFY!HXwZ9-;gBD2e9ky?PUwzMc+XD dkNbg+{MPE$5MC@vnwKtyvZ97Usl0jM{{bn&eSrV~ literal 0 HcmV?d00001 From ca636eedb9813cbd99400d5be60e86bae709dc56 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 23 Jan 2018 11:45:42 +0800 Subject: [PATCH 002/106] remove libwarpctc.so in core.so and libpaddle_fluid.so --- cmake/generic.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 585db019d5..147de8b242 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -186,6 +186,11 @@ function(cc_library TARGET_NAME) add_library(${TARGET_NAME} STATIC ${cc_library_SRCS}) endif() if (cc_library_DEPS) + # Don't need link libwarpctc.so + if ("${cc_library_DEPS};" MATCHES "warpctc;") + list(REMOVE_ITEM cc_library_DEPS warpctc) + add_dependencies(${TARGET_NAME} warpctc) + endif() add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) endif() From bcc67401113f04cbd52438d9a861f03725ad9a1a Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 23 Jan 2018 19:32:44 +0800 Subject: [PATCH 003/106] WIP python binding of send recv --- paddle/operators/recv_op.cc | 18 ++-- python/paddle/v2/fluid/layers/io.py | 101 +++++++++++++++++++ python/paddle/v2/fluid/tests/test_recv_op.py | 45 +++++++++ 3 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/test_recv_op.py diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index 593c35879a..e3c86966b8 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -49,7 +49,7 @@ static void CreateTensorFromMessageType(framework::Variable *var, var->GetMutable(); } else { PADDLE_THROW( - "VariableMessage type %d is not in " + "VraibleMessage type %d is not in " "[LoDTensor, SelectedRows]", var_type); } @@ -121,17 +121,17 @@ class RecvOp : public framework::OperatorBase { if (it != grad_list.end()) { param_var_name = param_list[it - grad_list.begin()]; } else { - LOG(ERROR) << "grad has no paired param:" << grad_var_name; + LOG(ERROR) << "grad have no paired param:" << grad_var_name; } - VLOG(3) << "received grad: " << grad_var_name + VLOG(3) << "recved grad: " << grad_var_name << " updating param: " << param_var_name; if (fan_in > 1) { grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); } auto *var = recv_scope.FindVar(grad_var_name); if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << grad_var_name; - PADDLE_THROW("Can not find server side var"); + LOG(ERROR) << "can not find server side var: " << grad_var_name; + PADDLE_THROW("can not find server side var"); } detail::DeserializeFromMessage(v.second, dev_ctx, var); } @@ -161,11 +161,11 @@ class RecvOpMaker : public framework::OpProtoAndCheckerMaker { public: RecvOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("RX", "(Tensor) Input tensor to be optimized").AsDuplicable(); + // AddInput("RX", "(Tensor) Input tensor to be optimized").AsDuplicable(); AddComment(R"DOC( Recv operator -This operator will recieve tensor from send_op +This operator will recv tensor from send_op )DOC"); AddAttr("endpoint", "(string, default 127.0.0.1:6164)" @@ -176,11 +176,11 @@ This operator will recieve tensor from send_op kOptimizeBlock, "Serialized ProgramDesc string for recv to run."); AddAttr>( "ParamList", "type list of string", - "grad->param name mapping to find which parameters to optimize.") + "grad->param name mapping to find which param to optimize.") .SetDefault({}); AddAttr>( "GradList", "type list of string", - "grad->param name mapping to find which parameters to optimize.") + "grad->param name mapping to find which param to optimize.") .SetDefault({}); AddAttr("Fanin", "type int", "Number of trainers in the current cluster job") diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py index 9af00e7de5..6a6c561641 100644 --- a/python/paddle/v2/fluid/layers/io.py +++ b/python/paddle/v2/fluid/layers/io.py @@ -74,3 +74,104 @@ def data(name, type=type, stop_gradient=stop_gradient, lod_level=lod_level) + + +class BlockGuardServ(BlockGuard): + """ + BlockGuardServ class. + + BlockGuardServ class is used to create an op with a block in a program. + """ + + def __init__(self, server): + if not (isinstance(server, ListenAndServ)): + raise TypeError("BlockGuardServ takes a ListenAndServ") + super(BlockGuardServ, self).__init__(server.helper.main_program) + self.server = server + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return False + + self.server.complete_op() + return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb) + + +class ListenAndServ(object): + """ + ListenAndServ class. + + ListenAndServ class is used to wrap listen_and_serv op to create a server + which can receive variables from clients and run a block. + """ + + def __init__(self, endpoint, fan_in=1): + self.helper = LayerHelper("recv", name=name) + self.inputs = [] + self.outputs = [] + self.endpoint = endpoint + self.fan_in = fan_in + + def do(self): + return BlockGuardServ(self) + + def get_params_and_grads(self): + main_program = self.helper.main_program + current_block = main_program.current_block() + parent_block = self.parent_block() + # params and grads in the same order. + params = list() + grads = list() + for op in current_block.ops: + # FIXME(typhoonzero): op.inputs is None if it's cloned. + if "Grad" in op.inputs and "Param" in op.inputs: + params.append(op.inputs["Param"].name) + grads.append(op.inputs["Grad"].name) + + return params, grads + + def complete_op(self): + main_program = self.helper.main_program + current_block = main_program.current_block() + parent_block = self.parent_block() + + params, grads = self.get_params_and_grads() + parent_block.append_op( + type='recv', + inputs={}, + outputs={}, + attrs={ + 'endpoint': self.endpoint, + 'Fanin': self.fan_in, + 'ParamList': params, + 'GradList': grads, + 'OptimizeBlock': current_block + }) + + +def Send(endpoints, send_vars, get_vars): + """ + Send layer + + Args: + endpoints: comma seperated IP:PORT pairs in the order + of send_vars to send + send_vars: vars to send + get_vars: vars to get from server after send completes. + + Send variables to the server side, and get vars from server + side when server have finished running server side program. + """ + assert (type(send_vars) == list) + assert (type(get_vars) == list) + + epmap = endpoints.split(",") + endpoints = set(epmap) + + helper = LayerHelper("Send", **locals()) + helper.append_op( + type="send", + inputs={"X": send_vars}, + outputs={"Out": get_vars}, + attrs={"endpoints": endpoints, + "epmap": epmap}) diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/test_recv_op.py new file mode 100644 index 0000000000..fbd182a716 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_recv_op.py @@ -0,0 +1,45 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers as layers +import numpy + + +class TestRecvOp(unittest.TestCase): + def run_test(self): + # Run init_serv in a thread + pass + + def init_serv(self, place): + main = fluid.Program() + with fluid.program_guard(main): + x = layers.data(shape=[32, 32], dtype='float32', name='X') + serv = fluid.ListenAndServ("127.0.0.1:6174") + with serv.do(): + layers.scale(input=x, scale=10) + exe = fluid.Executor(place) + exe.run(main) + + def init_client(self, place): + main = fluid.Program() + with fluid.program_guard(main): + x = layers.data(shape=[32, 32], dtype='float32', name='X') + i = fluid.initializer.Constant(x=1.0) + i(x, main.global_block()) + layers.Send("127.0.0.1:6174", [x], [x]) + exe = fluid.Executor(place) + exe.run(main) From 0e850c7417084675dcc997768c7f854333625bfe Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 23 Jan 2018 20:26:00 +0800 Subject: [PATCH 004/106] WIP --- python/paddle/v2/fluid/layers/io.py | 23 +++++++++++++++----- python/paddle/v2/fluid/tests/test_recv_op.py | 21 +++++++++++++----- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py index 6a6c561641..be581531d1 100644 --- a/python/paddle/v2/fluid/layers/io.py +++ b/python/paddle/v2/fluid/layers/io.py @@ -14,8 +14,10 @@ from .. import core from ..layer_helper import LayerHelper +from control_flow import BlockGuard +from ..layer_helper import LayerHelper -__all__ = ['data'] +__all__ = ['data', 'BlockGuardServ', 'ListenAndServ', 'Send'] def data(name, @@ -105,12 +107,14 @@ class ListenAndServ(object): which can receive variables from clients and run a block. """ - def __init__(self, endpoint, fan_in=1): - self.helper = LayerHelper("recv", name=name) + def __init__(self, endpoint, fan_in=1, optimizer_mode=True): + self.helper = LayerHelper("recv") self.inputs = [] self.outputs = [] self.endpoint = endpoint self.fan_in = fan_in + # FIXME(typhoonzero): Add this switch is stupid + self.optimizer_mode = optimizer_mode def do(self): return BlockGuardServ(self) @@ -124,9 +128,16 @@ class ListenAndServ(object): grads = list() for op in current_block.ops: # FIXME(typhoonzero): op.inputs is None if it's cloned. - if "Grad" in op.inputs and "Param" in op.inputs: - params.append(op.inputs["Param"].name) - grads.append(op.inputs["Grad"].name) + if self.optimizer_mode: + if "Grad" in op.inputs and "Param" in op.inputs: + params.append(op.inputs["Param"].name) + grads.append(op.inputs["Grad"].name) + else: + # simple recv mode, recv operators inputs. + for iname in op.input_names: + for in_var_name in op.input(iname): + params.append(parent_block.var(name)) + grads.append(parent_block.var(name)) return params, grads diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/test_recv_op.py index fbd182a716..e06f468648 100644 --- a/python/paddle/v2/fluid/tests/test_recv_op.py +++ b/python/paddle/v2/fluid/tests/test_recv_op.py @@ -17,20 +17,27 @@ import unittest import paddle.v2.fluid as fluid import paddle.v2.fluid.layers as layers import numpy +import threading class TestRecvOp(unittest.TestCase): - def run_test(self): + def test_send(self): # Run init_serv in a thread - pass + place = fluid.CPUPlace() + t = threading.Thread(target=self.init_serv, args=(place, )) + t.start() + self.init_client(place) + t.join() def init_serv(self, place): main = fluid.Program() with fluid.program_guard(main): x = layers.data(shape=[32, 32], dtype='float32', name='X') - serv = fluid.ListenAndServ("127.0.0.1:6174") + i = fluid.initializer.Constant(value=1.0) + y = i(x, main.global_block()) + serv = layers.ListenAndServ("127.0.0.1:6174") with serv.do(): - layers.scale(input=x, scale=10) + layers.scale(input=y, scale=10.0) exe = fluid.Executor(place) exe.run(main) @@ -38,8 +45,12 @@ class TestRecvOp(unittest.TestCase): main = fluid.Program() with fluid.program_guard(main): x = layers.data(shape=[32, 32], dtype='float32', name='X') - i = fluid.initializer.Constant(x=1.0) + i = fluid.initializer.Constant(value=1.0) i(x, main.global_block()) layers.Send("127.0.0.1:6174", [x], [x]) exe = fluid.Executor(place) exe.run(main) + + +if __name__ == "__main__": + unittest.main() From ca0177190f75a4f39482b8fe1c8e929ab8e1a381 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 22 Jan 2018 15:18:47 +0800 Subject: [PATCH 005/106] add layer_norm --- paddle/operators/layer_norm_op.cc | 283 ++++++++++++++++++ paddle/operators/layer_norm_op.h | 35 +++ .../v2/fluid/tests/test_layer_norm_op.py | 81 +++++ 3 files changed, 399 insertions(+) create mode 100644 paddle/operators/layer_norm_op.cc create mode 100644 paddle/operators/layer_norm_op.h create mode 100644 python/paddle/v2/fluid/tests/test_layer_norm_op.py diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc new file mode 100644 index 0000000000..f1ddcd8210 --- /dev/null +++ b/paddle/operators/layer_norm_op.cc @@ -0,0 +1,283 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/layer_norm_op.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + +template +using EigenMatrixMapRowMajor = Eigen::Map< + Eigen::Matrix>; +template +using ConstEigenMatrixMapRowMajor = Eigen::Map< + const Eigen::Matrix>; + +class LayerNormOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), ""); + PADDLE_ENFORCE(ctx->HasInput("Scale"), ""); + PADDLE_ENFORCE(ctx->HasInput("Bias"), ""); + PADDLE_ENFORCE(ctx->HasOutput("Y"), ""); + + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], 1); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], 1); + + ctx->SetOutputDim("Y", ctx->GetInputDim("X")); + ctx->SetOutputDim("Mean", {ctx->GetInputDim("X")[0]}); + ctx->SetOutputDim("Variance", {ctx->GetInputDim("X")[0]}); + + ctx->ShareLoD("X", "Y"); + } +}; + +class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LayerNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input tensor"); + AddInput("Scale", + "Scale is a 1-dimensional tensor of size 1 " + "that is applied to the output"); + AddInput("Bias", + "Bias is a 1-dimensional tensor of size 1 " + "that is applied to the output"); + AddOutput("Y", "result after normalization"); + AddOutput("Mean", "Mean of the current mini batch."); + AddOutput("Variance", "Variance of the current mini batch."); + + AddAttr("epsilon", "") + .SetDefault(1e-5) + .AddCustomChecker([](const float &epsilon) { + PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f, + "'epsilon' should be between 0.0 and 0.001."); + }); + AddAttr>("axis", + "(vector default:{1, 1, 1}), the " + "axis to normalize.") + .SetDefault({1, 2, 3}); // todo(zcd) : who to set axis + + AddComment(R"DOC( +Layer Normalization. + +Layer Norm has been implemented as discussed in the paper: +https://arxiv.org/abs/1607.06450 +... +)DOC"); + } +}; + +template +class LayerNormKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + const auto *scale = ctx.Input("Scale"); + const auto *bias = ctx.Input("Bias"); + const auto *x = ctx.Input("X"); + const auto &x_dims = x->dims(); + + const int N = x_dims[0]; + const int sample_size = x->numel() / N; + + auto scale_data = scale->data()[0]; + auto bias_data = bias->data()[0]; + + auto *output = ctx.Output("Y"); + auto *mean = ctx.Output("Mean"); + auto *var = ctx.Output("Variance"); + output->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + + int left = N, right = sample_size; + auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); + auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); + auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); + auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); + + auto squre = [](T ele) { return ele * ele; }; + auto add_epslion = [epsilon](T ele) { return ele + epsilon; }; + + mean_map = input_map.rowwise().mean(); + var_map = (input_map - mean_map.replicate(1, right)) + .unaryExpr(squre) + .rowwise() + .mean() + .unaryExpr(add_epslion); + + auto scale_inv_std = [scale_data](T ele) { + return std::sqrt(1 / ele) * scale_data; + }; + auto sub_bias = [bias_data](T ele) { return bias_data - ele; }; + + output_map = (var_map.unaryExpr(scale_inv_std).replicate(1, right)) + .cwiseProduct(input_map) + + var_map.unaryExpr(scale_inv_std) + .cwiseProduct(mean_map) + .unaryExpr(sub_bias) + .replicate(1, right); + } +}; + +class LayerNormGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + // check input + PADDLE_ENFORCE(ctx->HasInput("X")); + PADDLE_ENFORCE(ctx->HasInput("Scale"), ""); + PADDLE_ENFORCE(ctx->HasInput("Mean"), ""); + PADDLE_ENFORCE(ctx->HasInput("Variance"), ""); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), ""); + + const auto x_dims = ctx->GetInputDim("X"); + + // check output + if (ctx->HasOutput(framework::GradVarName("X"))) { + ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + } + if (ctx->HasOutput(framework::GradVarName("Scale"))) { + ctx->SetOutputDim(framework::GradVarName("Scale"), {1}); + } + if (ctx->HasOutput(framework::GradVarName("Bias"))) { + ctx->SetOutputDim(framework::GradVarName("Bias"), {1}); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + const auto *var = ctx.InputVar(framework::GradVarName("Y")); + if (var == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + const Tensor *t = nullptr; + if (var->IsType()) { + t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); + } + if (t == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + return framework::OpKernelType(framework::ToDataType(t->type()), + ctx.GetPlace()); + } +}; + +template +class LayerNormGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *x = ctx.Input("X"); + const auto *mean = ctx.Input("Mean"); + const auto *var = ctx.Input("Variance"); + const auto *scale = ctx.Input("Scale"); + const auto *d_y = ctx.Input(framework::GradVarName("Y")); + + const auto &x_dims = x->dims(); + const int N = x_dims[0]; + const int sample_size = x->numel() / N; + int left = N, right = sample_size; + + auto scale_data = scale->data()[0]; + + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); + auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); + auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); + auto var_map = ConstEigenMatrixMapRowMajor(var->data(), left, 1); + + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + d_bias->data()[0] = d_y_map.sum(); + } + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + auto inv_std = [](T ele) { return std::sqrt(1 / ele); }; + d_scale->data()[0] = + ((x_map - mean_map.replicate(1, right)) + .cwiseProduct(var_map.unaryExpr(inv_std).replicate(1, right)) + .cwiseProduct(d_y_map)) + .sum(); // also can use `y` to get d_scale_map + } + + if (d_x) { + d_x->mutable_data(ctx.GetPlace()); + auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); + auto triple_product = [](T ele) { return ele * ele * ele; }; + auto neg_inv_std = [](T ele) { return T(-1.0) * std::sqrt(1 / ele); }; + auto inv_std_scale_func = [scale_data](T ele) { + return std::sqrt(1 / ele) * scale_data; + }; + auto neg_inv_std_scale_func = [scale_data](T ele) { + return T(-1.0) * std::sqrt(1 / ele) * scale_data; + }; + // dy_dx + auto dx_end = var_map.unaryExpr(inv_std_scale_func) + .replicate(1, right) + .cwiseProduct(d_y_map); + // dy_dmean_dx + auto dmean_end = var_map.unaryExpr(neg_inv_std_scale_func) + .replicate(1, right) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dx_mean = (T(1.0) / right) * dmean_end.replicate(1, right); + // dy_var_dx + auto dvar_end_0 = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dvar_end = var_map.unaryExpr(neg_inv_std) + .unaryExpr(triple_product) + .cwiseProduct(dvar_end_0); + auto dx_var = (1.0f / right) * + (x_map - mean_map.replicate(1, right)) + .cwiseProduct(dvar_end.replicate(1, right)); + + d_x_map = dx_end + dx_mean + dx_var; + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(layer_norm, ops::LayerNormOp, ops::LayerNormOpMaker, + layer_norm_grad, ops::LayerNormGradOp); +REGISTER_OP_CPU_KERNEL( + layer_norm, + ops::LayerNormKernel); +REGISTER_OP_CPU_KERNEL( + layer_norm_grad, + ops::LayerNormGradKernel); diff --git a/paddle/operators/layer_norm_op.h b/paddle/operators/layer_norm_op.h new file mode 100644 index 0000000000..bca35b91e6 --- /dev/null +++ b/paddle/operators/layer_norm_op.h @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class LayerNormKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override; +}; + +template +class LayerNormGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override; +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py new file mode 100644 index 0000000000..73450c599d --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -0,0 +1,81 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +from op_test import OpTest + + +def layer_norm_naive(x, scale, beta, epsilon): + n, c, h, w = x.shape + mean = np.mean(x, axis=(1, 2, 3)) + var = np.var(x, axis=(1, 2, 3)) + epsilon + output = scale * np.divide((x - mean.reshape([n, 1, 1, 1])), + (np.sqrt(var)).reshape([n, 1, 1, 1])) + beta + return output, mean, var + + +class TestLayerNormdOp(OpTest): + def setUp(self): + self.init_test_case() + + input = np.random.random(self.input_size).astype("float32") + self.inputs = { + 'X': input, + 'Scale': np.array([self.scale]).astype("float32"), + 'Bias': np.array([self.bias]).astype("float32") + } + output, mean, var = layer_norm_naive(input, self.scale, self.bias, + self.epsilon) + self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} + + def test_check_output(self): + self.check_output() + + # def test_check_grad(self): + # self.check_grad( + # ['Scale', 'Bias', 'X'], ['Y', 'Mean', 'Variance'], + # max_relative_error=0.02) + + def test_check_grad_no_x(self): + self.check_grad( + ['Scale', 'Bias'], ['Y', 'Mean', 'Variance'], + max_relative_error=0.02, + no_grad_set=set(['X'])) + + # def test_check_grad_no_scale(self): + # self.check_grad( + # ['Bias','X'], + # 'Y', + # max_relative_error=0.02, + # no_grad_set=set(['Scale'])) + # + # def test_check_grad_no_bias(self): + # self.check_grad( + # ['Scale','X'], + # 'Y', + # max_relative_error=0.02, + # no_grad_set=set(['Bias'])) + + def init_test_case(self): + self.op_type = "layer_norm" + self.input_size = [2, 3, 4, 5] + self.scale = 0.21 + self.bias = 0.1 + self.epsilon = 0.00001 + + +if __name__ == '__main__': + unittest.main() From 9a8517fd8b909baddac7945f403763ec1e7bd0c7 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 24 Jan 2018 14:43:56 +0800 Subject: [PATCH 006/106] daemonize the server process --- python/paddle/v2/fluid/layers/io.py | 19 ++++++--- python/paddle/v2/fluid/tests/test_recv_op.py | 44 ++++++++++++++------ 2 files changed, 46 insertions(+), 17 deletions(-) diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py index be581531d1..bc804a4043 100644 --- a/python/paddle/v2/fluid/layers/io.py +++ b/python/paddle/v2/fluid/layers/io.py @@ -136,17 +136,26 @@ class ListenAndServ(object): # simple recv mode, recv operators inputs. for iname in op.input_names: for in_var_name in op.input(iname): - params.append(parent_block.var(name)) - grads.append(parent_block.var(name)) + params.append(parent_block.var(in_var_name)) + grads.append(parent_block.var(in_var_name)) return params, grads + def parent_block(self): + prog = self.helper.main_program + parent_idx = prog.current_block().parent_idx + assert parent_idx >= 0 + parent_block = prog.block(parent_idx) + return parent_block + def complete_op(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() params, grads = self.get_params_and_grads() + param_names = [p.name for p in params] + grad_names = [g.name for g in grads] parent_block.append_op( type='recv', inputs={}, @@ -154,8 +163,8 @@ class ListenAndServ(object): attrs={ 'endpoint': self.endpoint, 'Fanin': self.fan_in, - 'ParamList': params, - 'GradList': grads, + 'ParamList': param_names, + 'GradList': grad_names, 'OptimizeBlock': current_block }) @@ -177,7 +186,7 @@ def Send(endpoints, send_vars, get_vars): assert (type(get_vars) == list) epmap = endpoints.split(",") - endpoints = set(epmap) + endpoints = list(set(epmap)) helper = LayerHelper("Send", **locals()) helper.append_op( diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/test_recv_op.py index e06f468648..6ebb58ed33 100644 --- a/python/paddle/v2/fluid/tests/test_recv_op.py +++ b/python/paddle/v2/fluid/tests/test_recv_op.py @@ -17,40 +17,60 @@ import unittest import paddle.v2.fluid as fluid import paddle.v2.fluid.layers as layers import numpy -import threading +from multiprocessing import Process +import os, sys class TestRecvOp(unittest.TestCase): def test_send(self): # Run init_serv in a thread place = fluid.CPUPlace() - t = threading.Thread(target=self.init_serv, args=(place, )) - t.start() + p = Process(target=self.init_serv, args=(place, )) + p.daemon = True + p.start() self.init_client(place) - t.join() + # FIXME(typhoonzero): find a way to gracefully shutdown the server. + os.system("kill -9 %d" % p.pid) + p.join() def init_serv(self, place): main = fluid.Program() with fluid.program_guard(main): - x = layers.data(shape=[32, 32], dtype='float32', name='X') - i = fluid.initializer.Constant(value=1.0) - y = i(x, main.global_block()) - serv = layers.ListenAndServ("127.0.0.1:6174") + x = layers.data( + shape=[32, 32], + dtype='float32', + name="X", + append_batch_size=False) + fluid.initializer.Constant(value=1.0)(x, main.global_block()) + serv = layers.ListenAndServ("127.0.0.1:6174", optimizer_mode=False) with serv.do(): - layers.scale(input=y, scale=10.0) + o = layers.scale(x=x, scale=10.0) + main.global_block().create_var( + name=o.name, psersistable=False, dtype=o.dtype, shape=o.shape) + print main exe = fluid.Executor(place) exe.run(main) def init_client(self, place): main = fluid.Program() with fluid.program_guard(main): - x = layers.data(shape=[32, 32], dtype='float32', name='X') - i = fluid.initializer.Constant(value=1.0) - i(x, main.global_block()) + x = layers.data( + shape=[32, 32], + dtype='float32', + name='X', + append_batch_size=False) + fluid.initializer.Constant(value=1.0)(x, main.global_block()) layers.Send("127.0.0.1:6174", [x], [x]) + print main exe = fluid.Executor(place) exe.run(main) if __name__ == "__main__": unittest.main() + # test = TestRecvOp() + # place = fluid.CPUPlace() + # if sys.argv[1] == "server": + # test.init_serv(place) + # else: + # test.init_client(place) From b9d9b11c804c5f5efe645acd10fde7bb6641a317 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 24 Jan 2018 15:00:46 +0800 Subject: [PATCH 007/106] remove recv_op input --- paddle/operators/recv_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index e3c86966b8..381890d30b 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -161,7 +161,6 @@ class RecvOpMaker : public framework::OpProtoAndCheckerMaker { public: RecvOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - // AddInput("RX", "(Tensor) Input tensor to be optimized").AsDuplicable(); AddComment(R"DOC( Recv operator From e206c636fd154435973f9eeb5a8800e500bea0fa Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 24 Jan 2018 16:39:35 +0800 Subject: [PATCH 008/106] update by comment, add WITH_DISTRIBUTE switch --- python/paddle/v2/fluid/tests/CMakeLists.txt | 5 +++++ python/paddle/v2/fluid/tests/test_recv_op.py | 8 -------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/CMakeLists.txt index 8305316082..628ce60b40 100644 --- a/python/paddle/v2/fluid/tests/CMakeLists.txt +++ b/python/paddle/v2/fluid/tests/CMakeLists.txt @@ -1,5 +1,10 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +if(NOT WITH_DISTRIBUTE) + list(REMOVE_ITEM TEST_OPS test_recv_op) +endif(NOT WITH_DISTRIBUTE) + foreach(src ${TEST_OPS}) py_test(${src} SRCS ${src}.py) endforeach() diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/test_recv_op.py index 6ebb58ed33..5c4cec028d 100644 --- a/python/paddle/v2/fluid/tests/test_recv_op.py +++ b/python/paddle/v2/fluid/tests/test_recv_op.py @@ -47,7 +47,6 @@ class TestRecvOp(unittest.TestCase): o = layers.scale(x=x, scale=10.0) main.global_block().create_var( name=o.name, psersistable=False, dtype=o.dtype, shape=o.shape) - print main exe = fluid.Executor(place) exe.run(main) @@ -61,16 +60,9 @@ class TestRecvOp(unittest.TestCase): append_batch_size=False) fluid.initializer.Constant(value=1.0)(x, main.global_block()) layers.Send("127.0.0.1:6174", [x], [x]) - print main exe = fluid.Executor(place) exe.run(main) if __name__ == "__main__": unittest.main() - # test = TestRecvOp() - # place = fluid.CPUPlace() - # if sys.argv[1] == "server": - # test.init_serv(place) - # else: - # test.init_client(place) From 35b4d42ab69ee598df0c973c8884a053c42adb21 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 24 Jan 2018 16:54:35 +0800 Subject: [PATCH 009/106] merge doc fixes --- paddle/operators/recv_op.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index 381890d30b..5d1df566af 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -49,7 +49,7 @@ static void CreateTensorFromMessageType(framework::Variable *var, var->GetMutable(); } else { PADDLE_THROW( - "VraibleMessage type %d is not in " + "VariableMessage type %d is not in " "[LoDTensor, SelectedRows]", var_type); } @@ -121,17 +121,17 @@ class RecvOp : public framework::OperatorBase { if (it != grad_list.end()) { param_var_name = param_list[it - grad_list.begin()]; } else { - LOG(ERROR) << "grad have no paired param:" << grad_var_name; + LOG(ERROR) << "grad has no paired param:" << grad_var_name; } - VLOG(3) << "recved grad: " << grad_var_name + VLOG(3) << "received grad: " << grad_var_name << " updating param: " << param_var_name; if (fan_in > 1) { grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); } auto *var = recv_scope.FindVar(grad_var_name); if (var == nullptr) { - LOG(ERROR) << "can not find server side var: " << grad_var_name; - PADDLE_THROW("can not find server side var"); + LOG(ERROR) << "Can not find server side var: " << grad_var_name; + PADDLE_THROW("Can not find server side var"); } detail::DeserializeFromMessage(v.second, dev_ctx, var); } @@ -164,7 +164,7 @@ class RecvOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Recv operator -This operator will recv tensor from send_op +This operator will recieve tensor from send_op )DOC"); AddAttr("endpoint", "(string, default 127.0.0.1:6164)" @@ -175,11 +175,11 @@ This operator will recv tensor from send_op kOptimizeBlock, "Serialized ProgramDesc string for recv to run."); AddAttr>( "ParamList", "type list of string", - "grad->param name mapping to find which param to optimize.") + "grad->param name mapping to find which parameters to optimize.") .SetDefault({}); AddAttr>( "GradList", "type list of string", - "grad->param name mapping to find which param to optimize.") + "grad->param name mapping to find which parameters to optimize.") .SetDefault({}); AddAttr("Fanin", "type int", "Number of trainers in the current cluster job") From dc7073ded23126bde7a1e7d514b9174cce23a19c Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Thu, 25 Jan 2018 10:52:25 +0800 Subject: [PATCH 010/106] update by comment. --- python/paddle/v2/fluid/layers/io.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py index bc804a4043..b7b2cf2296 100644 --- a/python/paddle/v2/fluid/layers/io.py +++ b/python/paddle/v2/fluid/layers/io.py @@ -113,7 +113,8 @@ class ListenAndServ(object): self.outputs = [] self.endpoint = endpoint self.fan_in = fan_in - # FIXME(typhoonzero): Add this switch is stupid + # FIXME(typhoonzero): add optimizer_mode is stupid, should make it more + # general. self.optimizer_mode = optimizer_mode def do(self): From 250206d1cfeafa74c353ed167b6b5852f8ccec3e Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 23 Jan 2018 10:44:28 +0000 Subject: [PATCH 011/106] Change the example of inference to a unittest. --- paddle/inference/CMakeLists.txt | 17 ++-------- paddle/inference/tests/book/CMakeLists.txt | 13 +++++++ .../test_inference_recognize_digits_mlp.cc} | 34 +++++++++---------- 3 files changed, 31 insertions(+), 33 deletions(-) create mode 100644 paddle/inference/tests/book/CMakeLists.txt rename paddle/inference/{example.cc => tests/book/test_inference_recognize_digits_mlp.cc} (72%) diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index ae4d3fd2f5..fedf9e4cb8 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -24,19 +24,6 @@ if(NOT WITH_C_API AND WITH_FLUID) install(TARGETS paddle_fluid_shared DESTINATION lib) endif() -add_executable(example example.cc) -if(APPLE) - set(OPTIONAL_LINK_FLAGS) - if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") - set(OPTIONAL_LINK_FLAGS "-undefined dynamic_lookup") - endif() - target_link_libraries(example - -Wl,-force_load paddle_fluid - ${OPTIONAL_LINK_FLAGS} - ${PTOOLS_LIB}) -else() - target_link_libraries(example - -Wl,--start-group -Wl,--whole-archive paddle_fluid - -Wl,--no-whole-archive -Wl,--end-group - ${PTOOLS_LIB}) +if(WITH_TESTING) + add_subdirectory(tests/book) endif() diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt new file mode 100644 index 0000000000..31e6796fdb --- /dev/null +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -0,0 +1,13 @@ +add_executable(test_inference_recognize_digits_mlp test_inference_recognize_digits_mlp.cc) +target_circle_link_libraries( + test_inference_recognize_digits_mlp + ARCHIVE_START + paddle_fluid + ARCHIVE_END + gtest + gflags) +add_test( + NAME test_inference_recognize_digits_mlp + COMMAND test_inference_recognize_digits_mlp + --dirname=${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests/book/recognize_digits_mlp.inference.model + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/paddle/inference/example.cc b/paddle/inference/tests/book/test_inference_recognize_digits_mlp.cc similarity index 72% rename from paddle/inference/example.cc rename to paddle/inference/tests/book/test_inference_recognize_digits_mlp.cc index 0c18b45624..e96af21344 100644 --- a/paddle/inference/example.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits_mlp.cc @@ -12,20 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include -#include +#include #include "gflags/gflags.h" #include "paddle/inference/inference.h" DEFINE_string(dirname, "", "Directory of the inference model."); -int main(int argc, char** argv) { - google::ParseCommandLineFlags(&argc, &argv, true); +TEST(inference, recognize_digits_mlp) { if (FLAGS_dirname.empty()) { - // Example: - // ./example --dirname=recognize_digits_mlp.inference.model - std::cout << "Usage: ./example --dirname=path/to/your/model" << std::endl; - exit(1); + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; } std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl; @@ -48,20 +45,21 @@ int main(int argc, char** argv) { engine->Execute(feeds, fetchs); for (size_t i = 0; i < fetchs.size(); ++i) { - auto dims_i = fetchs[i].dims(); - std::cout << "dims_i:"; - for (int j = 0; j < dims_i.size(); ++j) { - std::cout << " " << dims_i[j]; - } - std::cout << std::endl; - std::cout << "result:"; + LOG(INFO) << fetchs[i].dims(); + std::stringstream ss; + ss << "result:"; float* output_ptr = fetchs[i].data(); - for (int j = 0; j < paddle::framework::product(dims_i); ++j) { - std::cout << " " << output_ptr[j]; + for (int j = 0; j < fetchs[i].numel(); ++j) { + ss << " " << output_ptr[j]; } - std::cout << std::endl; + LOG(INFO) << ss.str(); } delete engine; - return 0; +} + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, false); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } From 5594456a4bd6df3771f2d6d736a80adda83dd431 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Thu, 25 Jan 2018 15:13:00 +0800 Subject: [PATCH 012/106] Add build_type to build.sh --- paddle/scripts/docker/build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index e70d04d901..fbae37b2ca 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -32,7 +32,7 @@ function cmake_gen() { cat < Date: Thu, 25 Jan 2018 09:47:41 +0000 Subject: [PATCH 013/106] Enable the dependency. --- paddle/inference/inference.cc | 2 +- paddle/inference/tests/book/CMakeLists.txt | 11 ++-- ....cc => test_inference_recognize_digits.cc} | 2 +- .../fluid/tests/book/test_recognize_digits.py | 53 ++++++++++++++++--- 4 files changed, 54 insertions(+), 14 deletions(-) rename paddle/inference/tests/book/{test_inference_recognize_digits_mlp.cc => test_inference_recognize_digits.cc} (98%) diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc index 09268ffb3a..37d9776cd0 100644 --- a/paddle/inference/inference.cc +++ b/paddle/inference/inference.cc @@ -75,7 +75,7 @@ void InferenceEngine::GenerateLoadProgram(const std::string& dirname) { framework::BlockDesc* load_block = load_program_->MutableBlock(0); for (auto* var : global_block->AllVars()) { if (IsParameter(var)) { - LOG(INFO) << "parameter's name: " << var->Name(); + VLOG(3) << "parameter's name: " << var->Name(); framework::VarDesc* new_var = load_block->Var(var->Name()); new_var->SetShape(var->Shape()); diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 31e6796fdb..78083cc218 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -1,6 +1,7 @@ -add_executable(test_inference_recognize_digits_mlp test_inference_recognize_digits_mlp.cc) +set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) +add_executable(test_inference_recognize_digits test_inference_recognize_digits.cc) target_circle_link_libraries( - test_inference_recognize_digits_mlp + test_inference_recognize_digits ARCHIVE_START paddle_fluid ARCHIVE_END @@ -8,6 +9,8 @@ target_circle_link_libraries( gflags) add_test( NAME test_inference_recognize_digits_mlp - COMMAND test_inference_recognize_digits_mlp - --dirname=${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests/book/recognize_digits_mlp.inference.model + COMMAND test_inference_recognize_digits + --dirname=${PYTHON_TESTS_DIR}/book/recognize_digits_mlp.inference.model WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +set_tests_properties(test_inference_recognize_digits_mlp + PROPERTIES DEPENDS test_recognize_digits_mlp_cpu) diff --git a/paddle/inference/tests/book/test_inference_recognize_digits_mlp.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc similarity index 98% rename from paddle/inference/tests/book/test_inference_recognize_digits_mlp.cc rename to paddle/inference/tests/book/test_inference_recognize_digits.cc index e96af21344..d0e811914c 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits_mlp.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -20,7 +20,7 @@ limitations under the License. */ DEFINE_string(dirname, "", "Directory of the inference model."); -TEST(inference, recognize_digits_mlp) { +TEST(inference, recognize_digits) { if (FLAGS_dirname.empty()) { LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; } diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index ac7ef4046f..d6e4675a24 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -45,8 +45,9 @@ BATCH_SIZE = 64 def loss_net(hidden, label): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) - return fluid.layers.mean(x=loss), fluid.layers.accuracy( - input=prediction, label=label) + avg_loss = fluid.layers.mean(x=loss) + acc = fluid.layers.accuracy(input=prediction, label=label) + return prediction, avg_loss, acc def mlp(img, label): @@ -73,8 +74,7 @@ def conv_net(img, label): return loss_net(conv_pool_2, label) -def main(): - args = parse_arg() +def train(args, save_dirname=None): print("recognize digits with args: {0}".format(" ".join(sys.argv[1:]))) img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') @@ -91,7 +91,8 @@ def main(): with pd.do(): img_ = pd.read_input(img) label_ = pd.read_input(label) - for o in net_conf(img_, label_): + prediction, avg_loss, acc = net_conf(img_, label_) + for o in [avg_loss, acc]: pd.write_output(o) avg_loss, acc = pd() @@ -99,7 +100,7 @@ def main(): avg_loss = fluid.layers.mean(x=avg_loss) acc = fluid.layers.mean(x=acc) else: - avg_loss, acc = net_conf(img, label) + prediction, avg_loss, acc = net_conf(img, label) test_program = fluid.default_main_program().clone() @@ -137,7 +138,10 @@ def main(): acc_val = numpy.array(acc_set).mean() avg_loss_val = numpy.array(avg_loss_set).mean() if float(acc_val) > 0.85: # test acc > 85% - exit(0) + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, ["img"], + [prediction], exe) + return else: print( 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. @@ -145,5 +149,38 @@ def main(): float(avg_loss_val), float(acc_val))) +def infer(args, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + if args.nn_type == 'mlp': + tensor_img = numpy.random.rand(1, 28, 28).astype("float32") + else: + tensor_img = numpy.random.rand(1, 1, 28, 28).astype("float32") + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + print("infer results: ", results[0]) + + if __name__ == '__main__': - main() + args = parse_arg() + if not args.use_cuda and not args.parallel: + save_dirname = "recognize_digits_" + args.nn_type + ".inference.model" + else: + save_dirname = None + train(args, save_dirname) + infer(args, save_dirname) From 020bee03a914d1a904bad02e99981125fa4deb03 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Thu, 25 Jan 2018 20:28:20 +0800 Subject: [PATCH 014/106] fix the decoder_boot --- .../v2/fluid/tests/book/test_rnn_encoder_decoder.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py index 3fd3dbaf77..fdc6086176 100644 --- a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py +++ b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py @@ -49,7 +49,11 @@ def bi_lstm_encoder(input_seq, hidden_size): size=hidden_size * 4, is_reverse=True, use_peepholes=USE_PEEPHOLES) - return forward, backward + + forward_last = fluid.layers.sequence_last_step(input=forward) + backward_first = fluid.layers.sequence_first_step(input=backward) + + return forward_last, backward_first # FIXME(peterzhang2029): Replace this function with the lstm_unit_op. @@ -115,16 +119,13 @@ def seq_to_seq_net(): size=[source_dict_dim, embedding_dim], dtype='float32') - src_forward, src_backward = bi_lstm_encoder( + src_forward_last, src_backward_first = bi_lstm_encoder( input_seq=src_embedding, hidden_size=encoder_size) - src_forward_last = fluid.layers.sequence_last_step(input=src_forward) - src_backward_first = fluid.layers.sequence_first_step(input=src_backward) - encoded_vector = fluid.layers.concat( input=[src_forward_last, src_backward_first], axis=1) - decoder_boot = fluid.layers.fc(input=encoded_vector, + decoder_boot = fluid.layers.fc(input=src_backward_first, size=decoder_size, bias_attr=False, act='tanh') From a8c46f33d8ede25d92bda492acca5358d8b01ebe Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Thu, 25 Jan 2018 21:17:43 +0800 Subject: [PATCH 015/106] downgrade boost to fit manylinux --- cmake/external/boost.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index 137f11da7f..a2c979b2e8 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -15,8 +15,8 @@ include(ExternalProject) set(BOOST_PROJECT "extern_boost") -set(BOOST_VER "1.66.0") -set(BOOST_TAR "boost_1_66_0") +set(BOOST_VER "1.41.0") +set(BOOST_TAR "boost_1_41_0") set(BOOST_URL "https://dl.bintray.com/boostorg/release/${BOOST_VER}/source/${BOOST_TAR}.tar.gz") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") From 4ca19d4e18c73cd84e1787a79704f2a1da3c8c0a Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Thu, 25 Jan 2018 05:26:50 -0800 Subject: [PATCH 016/106] Add python api for lstmp operator --- doc/api/v2/fluid/layers.rst | 7 +- python/paddle/v2/fluid/layers/nn.py | 178 +++++++++++++++++++++++++++- 2 files changed, 183 insertions(+), 2 deletions(-) diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index f738bf1564..6c6af106db 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -18,7 +18,12 @@ dynamic_lstm .. autofunction:: paddle.v2.fluid.layers.dynamic_lstm :noindex: -dynamic_gru +dynamic_lstmp +------------ +.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp + :noindex: + + dynamic_gru ----------- .. autofunction:: paddle.v2.fluid.layers.dynamic_gru :noindex: diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index bae33f6a15..fa03a64291 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -26,6 +26,7 @@ __all__ = [ 'fc', 'embedding', 'dynamic_lstm', + 'dynamic_lstmp', 'dynamic_gru', 'gru_unit', 'linear_chain_crf', @@ -282,7 +283,7 @@ def dynamic_lstm(input, W_{fc}, W_{oc}` are diagonal weight matrices for peephole connections. In our implementation, we use vectors to reprenset these diagonal weight matrices. The :math:`b` terms denote bias vectors (:math:`b_i` is the input - gate bias vector), :math:`\sigma` is the non-line activations, such as + gate bias vector), :math:`\sigma` is the non-linear activations, such as logistic sigmoid function, and :math:`i, f, o` and :math:`c` are the input gate, forget gate, output gate, and cell activation vectors, respectively, all of which have the same size as the cell output activation vector :math:`h`. @@ -389,6 +390,181 @@ def dynamic_lstm(input, return hidden, cell +def dynamic_lstmp(input, + size, + proj_size, + param_attr=None, + bias_attr=None, + use_peepholes=True, + is_reverse=False, + gate_activation='sigmoid', + cell_activation='tanh', + candidate_activation='tanh', + proj_activation='tanh', + dtype='float32'): + """ + **Dynamic LSTMP Layer** + + LSTMP (LSTM with recurrent projection) layer has a separate projection + layer after the LSTM layer, projecting the original hidden state to a + lower-dimensional one, which is proposed to reduce the number of total + parameters and furthermore computational complexity for the LSTM, + espeacially for the case that the size of output units is relative + large (https://research.google.com/pubs/archive/43905.pdf). + + The formula is as follows: + + .. math:: + + i_t = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i) \\ + + f_t = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f) \\ + + \tilde{c_t} = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c) \\ + + o_t = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_t + b_o) \\ + + c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ + + h_t = o_t \odot act_h(c_t) \\ + + r_t = \overline{act_h}(W_{rh}h_t) + + where the :math:`W` terms denote weight matrices (e.g. :math:`W_{xi}` is + the matrix of weights from the input gate to the input), :math:`W_{ic}`, + :math:`W_{fc}`, :math:`W_{oc}` are diagonal weight matrices for peephole + connections. In our implementation, we use vectors to reprenset these + diagonal weight matrices. The :math:`b` terms denote bias vectors + (:math:`b_i` is the input gate bias vector), :math:`\sigma` is the + activation, such as logistic sigmoid function, and :math:`i, f, o` and + :math:`c` are the input gate, forget gate, output gate, and cell activation + vectors, respectively, all of which have the same size as the cell output + activation vector :math:`h`. Here :math:`h` is usually called the hidden + state and :math:`r` denotes its recurrent projection. And + :math:`\tilde{c_t}` is also called the candidate hidden state, whose + computation is based on the current input and previous hidden state. + + The :math:`\odot` is the element-wise product of the vectors. :math:`act_g` + and :math:`act_h` are the cell input and cell output activation functions + and `tanh` is usually used for them. :math:`\overline{act_h}` is the + activation function for the projection output, usually using `identity` or + same as :math:`act_h`. + + Set `use_peepholes` to `False` to disable peephole connection. The formula + is omitted here, please refer to the paper + http://www.bioinf.jku.at/publications/older/2604.pdf for details. + + Note that these :math:`W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}` + operations on the input :math:`x_{t}` are NOT included in this operator. + Users can choose to use fully-connected layer before LSTMP layer. + + Args: + input(Variable): The input of dynamic_lstmp layer, which supports + variable-time length input sequence. The underlying + tensor in this Variable is a matrix with shape + (T X 4D), where T is the total time steps in this + mini-batch, D is the hidden size. + size(int): 4 * hidden size. + proj_size(int): The size of projection output. + param_attr(ParamAttr): The parameter attribute for the learnable + hidden-hidden weight and projection weight. + + - The shape of hidden-hidden weight is (P x 4D), + where P is the projection size and D the hidden + size. + - The shape of projection weight is (D x P). + - Hidden-hidden weight = {:math:`W_{ch}, W_{ih}, \ + W_{fh}, W_{oh}`}. + - Projection weight = {:math:`W_{rh}`}. + bias_attr(ParamAttr): The bias attribute for the learnable bias + weights, which contains two parts, input-hidden + bias weights and peephole connections weights if + setting `use_peepholes` to `True`. + + 1. `use_peepholes = False` + - The shape is (1 x 4D). + - Biases = {:math:`b_c, b_i, b_f, b_o`}. + 2. `use_peepholes = True` + - The shape is (1 x 7D). + - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \ + W_{fc}, W_{oc}`}. + use_peepholes(bool): Whether to enable diagonal/peephole connections, + default `True`. + is_reverse(bool): Whether to compute reversed LSTM, default `False`. + gate_activation(str): The activation for input gate, forget gate and + output gate. Choices = ["sigmoid", "tanh", "relu", + "identity"], default "sigmoid". + cell_activation(str): The activation for cell output. Choices = ["sigmoid", + "tanh", "relu", "identity"], default "tanh". + candidate_activation(str): The activation for candidate hidden state. + Choices = ["sigmoid", "tanh", "relu", "identity"], + default "tanh". + proj_activation(str): The activation for projection output. + Choices = ["sigmoid", "tanh", "relu", "identity"], + default "tanh". + dtype(str): Data type. Choices = ["float32", "float64"], default "float32". + + Returns: + tuple: The projection of hidden state, and cell state of LSTMP. The + shape of projection is (T x P), for the cell state which is + (T x D), and both LoD is the same with the `input`. + + Examples: + .. code-block:: python + + hidden_dim = 512 + proj_dim = 256 + fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4, + act=None, bias_attr=None) + proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, + size=hidden_dim * 4, proj_size=proj_dim, use_peepholes=False) + """ + helper = LayerHelper('lstmp', **locals()) + size = size / 4 + weight = helper.create_parameter( + attr=helper.param_attr, shape=[proj_size, 4 * size], dtype=dtype) + proj_weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, proj_size], dtype=dtype) + bias_size = [1, 7 * size] + if not use_peepholes: + bias_size[1] = 4 * size + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) + + projection = helper.create_tmp_variable(dtype) + cell = helper.create_tmp_variable(dtype) + ordered_proj0 = helper.create_tmp_variable(dtype) + batch_hidden = helper.create_tmp_variable(dtype) + batch_gate = helper.create_tmp_variable(dtype) + batch_cell_pre_act = helper.create_tmp_variable(dtype) + + helper.append_op( + type='lstmp', + inputs={ + 'Input': input, + 'Weight': weight, + 'ProjWeight': proj_weight, + 'Bias': bias + }, + outputs={ + 'Projection': projection, + 'Cell': cell, + 'OrderedP0': ordered_proj0, + 'BatchHidden': batch_hidden, + 'BatchGate': batch_gate, + 'BatchCellPreAct': batch_cell_pre_act + }, + attrs={ + 'use_peepholes': use_peepholes, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation, + 'proj_activation': proj_activation + }) + return projection, cell + + def dynamic_gru(input, size, param_attr=None, From ae0ea5415902f3187c7883016c3798ee5ec64fab Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 24 Jan 2018 14:14:42 +0800 Subject: [PATCH 017/106] fix unit test --- paddle/operators/layer_norm_op.cc | 11 +- .../v2/fluid/tests/test_layer_norm_op.py | 261 ++++++++++++++---- 2 files changed, 215 insertions(+), 57 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index f1ddcd8210..0808192565 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -233,13 +233,13 @@ class LayerNormGradKernel if (d_x) { d_x->mutable_data(ctx.GetPlace()); auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); - auto triple_product = [](T ele) { return ele * ele * ele; }; - auto neg_inv_std = [](T ele) { return T(-1.0) * std::sqrt(1 / ele); }; + auto triple_product = [](T ele) { return ele * ele; }; + auto neg_inv_std = [](T ele) { return -std::sqrt(1 / ele); }; auto inv_std_scale_func = [scale_data](T ele) { return std::sqrt(1 / ele) * scale_data; }; auto neg_inv_std_scale_func = [scale_data](T ele) { - return T(-1.0) * std::sqrt(1 / ele) * scale_data; + return -std::sqrt(1 / ele) * scale_data; }; // dy_dx auto dx_end = var_map.unaryExpr(inv_std_scale_func) @@ -260,10 +260,13 @@ class LayerNormGradKernel auto dvar_end = var_map.unaryExpr(neg_inv_std) .unaryExpr(triple_product) .cwiseProduct(dvar_end_0); - auto dx_var = (1.0f / right) * + auto dx_var = (T(1.0) / right) * (x_map - mean_map.replicate(1, right)) .cwiseProduct(dvar_end.replicate(1, right)); + // d_x = (1. / N) * scale * inv_var * (N * d_y - np.sum(d_y, axis=0) + // - (X - mean) * inv_var * inv_var * np.sum(d_y * (X - mean), axis=0)) + d_x_map = dx_end + dx_mean + dx_var; } } diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 73450c599d..4ca9754f32 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -15,66 +15,221 @@ import unittest import numpy as np +from operator import mul from op_test import OpTest +import paddle.v2.fluid.core as core +from paddle.v2.fluid.op import Operator +from paddle.v2.fluid.framework import grad_var_name -def layer_norm_naive(x, scale, beta, epsilon): - n, c, h, w = x.shape - mean = np.mean(x, axis=(1, 2, 3)) - var = np.var(x, axis=(1, 2, 3)) + epsilon - output = scale * np.divide((x - mean.reshape([n, 1, 1, 1])), - (np.sqrt(var)).reshape([n, 1, 1, 1])) + beta +def get_backward_op(scope, op, no_grad_set): + backward_op = core.Operator.backward(op, no_grad_set) + for input in backward_op.input_vars(): + var = scope.var(input) + var.get_tensor() + for output in backward_op.output_vars(): + var = scope.var(output) + var.get_tensor() + return backward_op + + +def _reference_layer_norm_naive(x, scale, beta, epsilon): + old_shape = x.shape + N = x.shape[0] + D = reduce(mul, old_shape, 1) / N + x.shape = [N, D] + mean = np.mean(x, axis=1) + var = np.var(x, axis=1) + epsilon + output = scale * np.divide((x - mean.reshape([N, 1])), + (np.sqrt(var)).reshape([N, 1])) + beta + output.shape = old_shape return output, mean, var +def _reference_layer_norm_grad(x, grad_y, scale, mean, var, epsilon): + x_shape = x.shape + N = x_shape[0] + D = reduce(mul, x_shape, 1) / N + grad_y.shape = [N, D] + x.shape = [N, D] + grad_offset = np.sum(grad_y) + mean.shape = [N, 1] + var.shape = [N, 1] + grad_scale = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y) + + dx_end = np.sqrt(1.0 / var) * grad_y + + d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y, axis=1).reshape([N, 1]) + d_mean_1 = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape( + [N, 1]) * (-1.0 / D * np.sqrt(1.0 / var) * + np.sum(x - mean, axis=1).reshape([N, 1])).reshape([N, 1]) + d_mean = 1.0 / D * (d_mean_0 + d_mean_1) + + d_std = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape([N, 1]) * ( + 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean)) + + grad_x = scale * (dx_end + d_mean + d_std) + + grad_y.shape = x_shape + x.shape = x_shape + + return grad_x, grad_scale, grad_offset + + +def create_or_get_tensor(scope, var_name, var, place): + tensor = scope.var(var_name).get_tensor() + if var is not None: + assert isinstance(var, np.ndarray) + tensor.set_lod([[]]) + tensor.set_dims(var.shape) + tensor.set(var, place) + return tensor + + +def set_output_grad(scope, outputs, place, feed_dict=None): + def __set_tensor__(name, data=None): + out_tensor = scope.find_var(name).get_tensor() + grad_tensor = scope.var(grad_var_name(name)).get_tensor() + out_dtype = out_tensor.dtype() + if data is None: + if out_dtype == core.DataType.FP64: + data = np.ones(out_tensor.shape(), dtype=np.float64) + elif out_dtype == core.DataType.FP32: + data = np.ones(out_tensor.shape(), dtype=np.float32) + else: + raise ValueError("Not supported data type " + str(out_dtype)) + grad_tensor.set(data, place) + + for output in outputs: + data = None + if output in feed_dict: + data = feed_dict[output] + __set_tensor__(output, data) + + class TestLayerNormdOp(OpTest): - def setUp(self): - self.init_test_case() - - input = np.random.random(self.input_size).astype("float32") - self.inputs = { - 'X': input, - 'Scale': np.array([self.scale]).astype("float32"), - 'Bias': np.array([self.bias]).astype("float32") - } - output, mean, var = layer_norm_naive(input, self.scale, self.bias, - self.epsilon) - self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} - - def test_check_output(self): - self.check_output() - - # def test_check_grad(self): - # self.check_grad( - # ['Scale', 'Bias', 'X'], ['Y', 'Mean', 'Variance'], - # max_relative_error=0.02) - - def test_check_grad_no_x(self): - self.check_grad( - ['Scale', 'Bias'], ['Y', 'Mean', 'Variance'], - max_relative_error=0.02, - no_grad_set=set(['X'])) - - # def test_check_grad_no_scale(self): - # self.check_grad( - # ['Bias','X'], - # 'Y', - # max_relative_error=0.02, - # no_grad_set=set(['Scale'])) - # - # def test_check_grad_no_bias(self): - # self.check_grad( - # ['Scale','X'], - # 'Y', - # max_relative_error=0.02, - # no_grad_set=set(['Bias'])) - - def init_test_case(self): - self.op_type = "layer_norm" - self.input_size = [2, 3, 4, 5] - self.scale = 0.21 - self.bias = 0.1 - self.epsilon = 0.00001 + def __assert_close(self, tensor, np_array, msg, atol=1e-4): + self.assertTrue( + np.allclose( + np.array(tensor).reshape(np_array.shape), np_array, atol=atol), + msg) + + def __assert_grad_close(self, + tensor, + np_array, + name, + place, + max_relative_error=0.02): + a = np.array(tensor).reshape(np_array.shape) + b = np_array + abs_a = np.abs(a) + abs_a[abs_a < 1e-5] = 1 + + diff_mat = np.abs(a - b) / abs_a + max_diff = np.max(diff_mat) + + def err_msg(): + offset = np.argmax(diff_mat > max_relative_error) + return ("%s Variable %s max gradient diff %f over limit %f, " + "the first error element is %d, %f, %f") % ( + "Gradient Check On %s" % str(place), name, max_diff, + max_relative_error, offset, a.flatten()[offset], + b.flatten()[offset]) + + self.assertLessEqual(max_diff, max_relative_error, err_msg()) + + def test_forward_backward(self): + def test_with_place(place, shape): + # attr + epsilon = 0.00001 + x_shape = shape + scale_shape = [1] + + x_val = np.random.random_sample(x_shape).astype(np.float32) + scale_val = np.random.random_sample(scale_shape).astype(np.float32) + bias_val = np.random.random_sample(scale_shape).astype(np.float32) + + # run forward + y_out, saved_mean, var_ref = _reference_layer_norm_naive( + x_val, scale_val, bias_val, epsilon) + + # for gradient test + # y_grad = np.ones(x_shape).astype(np.float32) * 0.00277778 + y_grad = np.random.random_sample(x_shape).astype(np.float32) + + x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad( + x_val, y_grad, scale_val, saved_mean, var_ref, epsilon) + + scope = core.Scope() + + # create input + x_tensor = create_or_get_tensor(scope, "X", x_val, place) + scale_tensor = create_or_get_tensor(scope, "Scale", scale_val, + place) + bias_tensor = create_or_get_tensor(scope, "Bias", bias_val, place) + + # create output + y_tensor = create_or_get_tensor(scope, "Y", None, place) + mean_tensor = create_or_get_tensor(scope, "Mean", None, place) + variance_tensor = create_or_get_tensor(scope, "Variance", None, + place) + + layer_norm_op = Operator( + "layer_norm", + # inputs + X="X", + Scale="Scale", + Bias="Bias", + # outputs + Y="Y", + Mean="Mean", + Variance="Variance", + # attrs + epsilon=epsilon) + + layer_norm_op.run(scope, place) + + # check forward result + if isinstance(place, core.CUDAPlace): + atol = 5e-2 + else: + atol = 1e-4 + self.__assert_close(y_tensor, y_out, "Y", atol) + self.__assert_close(mean_tensor, saved_mean, "Mean", atol) + self.__assert_close(variance_tensor, var_ref, "Variance", atol) + + # run backward + layer_norm_op_grad = get_backward_op(scope, layer_norm_op, set()) + set_output_grad( + scope, ["Y", "Mean", "Variance"], + place, + feed_dict={"Y": y_grad}) + layer_norm_op_grad.run(scope, place) + + x_grad_tensor = create_or_get_tensor(scope, + grad_var_name("X"), None, + place) + scale_grad_tensor = create_or_get_tensor(scope, + grad_var_name("Scale"), + None, place) + bias_grad_tensor = create_or_get_tensor(scope, + grad_var_name("Bias"), None, + place) + + # check gradient output + self.__assert_grad_close(x_grad_tensor, x_grad_ref, "x_grad", place) + self.__assert_grad_close(scale_grad_tensor, scale_grad_ref, + "scale_grad", place) + self.__assert_grad_close(bias_grad_tensor, bias_grad_ref, + "bias_grad", place) + + places = [core.CPUPlace()] + if core.is_compile_gpu() and core.op_support_gpu("layer_norm"): + places.append(core.CUDAPlace(0)) + + for place in places: + test_with_place(place, [2, 3, 4, 5]) + test_with_place(place, [2, 3]) if __name__ == '__main__': From 0e1109cdf303b2da69f416733f4c08159e43c491 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 25 Jan 2018 14:54:06 -0800 Subject: [PATCH 018/106] Fix send op data race std::vector is not safe for concurrent write, even to difference indices. More discussion: https://stackoverflow.com/questions/48452611/is-stdfuturewait-a-memory-barrier-i-can-not-explain-this-data-race --- paddle/operators/detail/grpc_client.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/detail/grpc_client.cc b/paddle/operators/detail/grpc_client.cc index d699dabf2f..90e2b29659 100644 --- a/paddle/operators/detail/grpc_client.cc +++ b/paddle/operators/detail/grpc_client.cc @@ -102,7 +102,7 @@ bool RPCClient::Wait() { return true; } - std::vector a(req_count_); + bool a[req_count_]; std::vector> waits(req_count_); for (int i = 0; i < req_count_; i++) { From 6592ea156810ab053ff8926f629815b113647814 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 25 Jan 2018 15:02:42 -0800 Subject: [PATCH 019/106] initial commit --- paddle/framework/prune.cc | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index 25eb813ffb..59947470e7 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -102,6 +102,34 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, *op_field->Add() = input.blocks(block_id).ops(i); } } + + // remove the vars in ProgramDesc that are not referenced in + // the pruned ops + std::unordered_map var_map; + auto* var_field = output->mutable_blocks(block_id)->mutable_vars(); + for (auto* var : *var_field) { + var_map[var->name()] = *var; + } + + // for (size_t i = 0; i < var_field->size(); ++i) { + // auto* var = (*var_field)[i]; + // var_map[var->name()] = *var; + // } + + // var_field->Clear(); + // for (size_t i = 0; i < op_field->size(); ++i) { + // auto* op = (*op_field)[i]; + + // auto* input_field = op->mutable_inputs(); + // for (size_t j = 0; j < input_field->size(); ++j) { + // auto* input_names = (*input_field)[j]->arguments(); + // for () + // *var_field->Add() = var_map[] + // } + // auto* ouput_field = op->mutable_outputs(); + // for (size_t k = 0; k < output_field->size(); ++k) { + // } + // } } // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies From a6718797599943788cd2f10d9adc05dd2c2ef781 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 25 Jan 2018 15:50:32 -0800 Subject: [PATCH 020/106] remove unreferenced vars --- paddle/framework/prune.cc | 47 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index 59947470e7..6117cbb79f 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include #include @@ -103,33 +104,31 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, } } - // remove the vars in ProgramDesc that are not referenced in - // the pruned ops - std::unordered_map var_map; + // remove the VarDescs in BlockDesc that are not referenced in + // the pruned OpDescs + std::unordered_map var_map; auto* var_field = output->mutable_blocks(block_id)->mutable_vars(); - for (auto* var : *var_field) { - var_map[var->name()] = *var; + for (const auto& var : *var_field) { + var_map[var.name()] = var; } - // for (size_t i = 0; i < var_field->size(); ++i) { - // auto* var = (*var_field)[i]; - // var_map[var->name()] = *var; - // } - - // var_field->Clear(); - // for (size_t i = 0; i < op_field->size(); ++i) { - // auto* op = (*op_field)[i]; - - // auto* input_field = op->mutable_inputs(); - // for (size_t j = 0; j < input_field->size(); ++j) { - // auto* input_names = (*input_field)[j]->arguments(); - // for () - // *var_field->Add() = var_map[] - // } - // auto* ouput_field = op->mutable_outputs(); - // for (size_t k = 0; k < output_field->size(); ++k) { - // } - // } + var_field->Clear(); + for (const auto& op : *op_field) { + // add VarDescs of all input arguments for each OpDesc + auto& input_field = op.inputs(); + for (auto& input : input_field) { + for (auto& arg : input.arguments()) { + *var_field->Add() = var_map[arg]; + } + } + // add VarDescs of all output arguments for each OpDesc + auto& output_field = op.outputs(); + for (auto& output : output_field) { + for (auto& arg : output.arguments()) { + *var_field->Add() = var_map[arg]; + } + } + } } // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies From c3cebc0f04ddc3b51fa1f36d3691fd820e03dd20 Mon Sep 17 00:00:00 2001 From: Xi Chen Date: Thu, 25 Jan 2018 16:11:56 -0800 Subject: [PATCH 021/106] fix #5902 --- python/paddle/trainer/config_parser.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 4fdf409021..24a7e94e3f 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -140,8 +140,13 @@ def init_config_environment( g_submodel_stack=[], g_add_submodel_suffix=False, ): - for k, v in locals().iteritems(): - globals()[k] = copy.deepcopy(v) + # directly iterate through locals().iteritems() will change + # the size of locals() due to introducting k, v into scope + # which will break the process in some env + + local_vars = copy.deepcopy(locals()) + for k, v in local_vars.iteritems(): + globals()[k] = v # Because type is widely used as a variable name in this code. From ce54d5e834e85cc46c2ac020dbbfe651d7005000 Mon Sep 17 00:00:00 2001 From: Xi Chen Date: Thu, 25 Jan 2018 16:13:59 -0800 Subject: [PATCH 022/106] fix typo --- python/paddle/trainer/config_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 24a7e94e3f..186b91c226 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -141,7 +141,7 @@ def init_config_environment( g_add_submodel_suffix=False, ): # directly iterate through locals().iteritems() will change - # the size of locals() due to introducting k, v into scope + # the size of locals() due to introducing k, v into scope # which will break the process in some env local_vars = copy.deepcopy(locals()) From 788f5c6d439f2795d9882697be1e257eedfc5c5a Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Thu, 25 Jan 2018 18:20:12 -0800 Subject: [PATCH 023/106] New Run() method for framework::Executor (#7807) * initial commit * add new executor run function * fix bug * fix multiple definition of feed_fetch_method issue * fix cmake * fix tensor copy error * refine executor code * add comments * temporary modification * address comments * fix bug --- paddle/framework/CMakeLists.txt | 4 +- paddle/framework/executor.cc | 164 ++++++++++++++++++++++++++ paddle/framework/executor.h | 6 + paddle/framework/feed_fetch_method.cc | 56 +++++++++ paddle/framework/feed_fetch_method.h | 34 +----- paddle/inference/inference.cc | 14 ++- paddle/pybind/CMakeLists.txt | 2 +- paddle/pybind/pybind.cc | 4 +- 8 files changed, 244 insertions(+), 40 deletions(-) create mode 100644 paddle/framework/feed_fetch_method.cc diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8d9260811a..2804969842 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -74,8 +74,10 @@ cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context fill_constant_op) cc_library(lod_rank_table SRCS lod_rank_table.cc DEPS lod_tensor) +cc_library(feed_fetch_method SRCS feed_fetch_method.cc DEPS lod_tensor scope glog) + cc_library(executor SRCS executor.cc DEPS op_registry device_context scope -framework_proto backward glog lod_rank_table profiler) +framework_proto backward glog lod_rank_table profiler feed_fetch_method) cc_library(prune SRCS prune.cc DEPS framework_proto) cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index c28ffefdd0..50a70d723e 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" +#include "paddle/framework/feed_fetch_method.h" #include "paddle/framework/feed_fetch_type.h" #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor_array.h" @@ -149,5 +150,168 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, } } +// Check whether the block already has feed operators and feed_holder. +// Return false if the block does not have any feed operators. +// If some feed operators have been prepended to the block, check that +// the info contained in these feed operators matches the feed_targets +// and feed_holder_name. Raise exception when any mismatch is found. +// Return true if the block has feed operators and holder of matching info. +static bool has_feed_operators( + BlockDesc* block, std::map& feed_targets, + const std::string& feed_holder_name) { + size_t feed_count = 0; + for (auto* op : block->AllOps()) { + if (op->Type() == kFeedOpType) { + feed_count++; + PADDLE_ENFORCE_EQ(op->Input("X")[0], feed_holder_name, + "Input to feed op should be '%s'", feed_holder_name); + std::string feed_target_name = op->Output("Out")[0]; + PADDLE_ENFORCE( + feed_targets.find(feed_target_name) != feed_targets.end(), + "Feed operator output name '%s' cannot be found in 'feed_targets'", + feed_target_name); + } else { + break; + } + } + + if (feed_count > 0) { + PADDLE_ENFORCE_EQ( + feed_count, feed_targets.size(), + "The number of feed operators should match 'feed_targets'"); + + // When feed operator are present, so should be feed_holder + auto var = block->FindVar(feed_holder_name); + PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", + feed_holder_name); + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarDesc::FEED_MINIBATCH, + "'%s' variable should be 'FEED_MINIBATCH' type", + feed_holder_name); + } + + return feed_count > 0; +} + +// Check whether the block already has fetch operators and fetch_holder. +// Return false if the block does not have any fetch operators. +// If some fetch operators have been appended to the block, check that +// the info contained in these fetch operators matches the fetch_targets +// and fetch_holder_name. Raise exception when any mismatch is found. +// Return true if the block has fetch operators and holder of matching info. +static bool has_fetch_operators( + BlockDesc* block, std::map& fetch_targets, + const std::string& fetch_holder_name) { + size_t fetch_count = 0; + for (auto* op : block->AllOps()) { + if (op->Type() == kFetchOpType) { + fetch_count++; + PADDLE_ENFORCE_EQ(op->Output("Out")[0], fetch_holder_name, + "Output of fetch op should be '%s'", fetch_holder_name); + std::string fetch_target_name = op->Input("X")[0]; + PADDLE_ENFORCE( + fetch_targets.find(fetch_target_name) != fetch_targets.end(), + "Fetch operator input name '%s' cannot be found in 'fetch_targets'", + fetch_target_name); + } + } + + if (fetch_count > 0) { + PADDLE_ENFORCE_EQ( + fetch_count, fetch_targets.size(), + "The number of fetch operators should match 'fetch_targets'"); + + // When fetch operator are present, so should be fetch_holder + auto var = block->FindVar(fetch_holder_name); + PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", + fetch_holder_name); + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarDesc::FETCH_LIST, + "'%s' variable should be 'FETCH_LIST' type", + fetch_holder_name); + } + + return fetch_count > 0; +} + +void Executor::Run(const ProgramDesc& program, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, + const std::string& feed_holder_name, + const std::string& fetch_holder_name) { + auto* copy_program = new ProgramDesc(program); + auto* global_block = copy_program->MutableBlock(0); + + if (!has_feed_operators(global_block, feed_targets, feed_holder_name)) { + // create feed_holder variable + auto* feed_holder = global_block->Var(feed_holder_name); + feed_holder->SetType(proto::VarDesc::FEED_MINIBATCH); + feed_holder->SetPersistable(true); + + int i = 0; + for (auto& feed_target : feed_targets) { + std::string var_name = feed_target.first; + VLOG(3) << "feed target's name: " << var_name; + + // prepend feed op + auto* op = global_block->PrependOp(); + op->SetType(kFeedOpType); + op->SetInput("X", {feed_holder_name}); + op->SetOutput("Out", {var_name}); + op->SetAttr("col", {static_cast(i)}); + op->CheckAttrs(); + + i++; + } + } + + // map the data of feed_targets to feed_holder + for (auto* op : global_block->AllOps()) { + if (op->Type() == kFeedOpType) { + std::string feed_target_name = op->Output("Out")[0]; + int idx = boost::get(op->GetAttr("col")); + SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, + idx); + } else { + break; + } + } + + if (!has_fetch_operators(global_block, fetch_targets, fetch_holder_name)) { + // create fetch_holder variable + auto* fetch_holder = global_block->Var(fetch_holder_name); + fetch_holder->SetType(proto::VarDesc::FETCH_LIST); + fetch_holder->SetPersistable(true); + + int i = 0; + for (auto& fetch_target : fetch_targets) { + std::string var_name = fetch_target.first; + VLOG(3) << "fetch target's name: " << var_name; + + // append fetch op + auto* op = global_block->AppendOp(); + op->SetType(kFetchOpType); + op->SetInput("X", {var_name}); + op->SetOutput("Out", {fetch_holder_name}); + op->SetAttr("col", {static_cast(i)}); + op->CheckAttrs(); + + i++; + } + } + + Run(*copy_program, scope, 0, true, true); + + // obtain the data of fetch_targets from fetch_holder + for (auto* op : global_block->AllOps()) { + if (op->Type() == kFetchOpType) { + std::string fetch_target_name = op->Input("X")[0]; + int idx = boost::get(op->GetAttr("col")); + *fetch_targets[fetch_target_name] = + GetFetchVariable(*scope, fetch_holder_name, idx); + } + } + + delete copy_program; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index d869e18901..035ff48a52 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -41,6 +41,12 @@ class Executor { void Run(const ProgramDesc&, Scope*, int, bool create_local_scope = true, bool create_vars = true); + void Run(const ProgramDesc& program, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, + const std::string& feed_holder_name = "feed", + const std::string& fetch_holder_name = "fetch"); + private: const platform::Place place_; }; diff --git a/paddle/framework/feed_fetch_method.cc b/paddle/framework/feed_fetch_method.cc new file mode 100644 index 0000000000..21201b6755 --- /dev/null +++ b/paddle/framework/feed_fetch_method.cc @@ -0,0 +1,56 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/feed_fetch_method.h" +#include "glog/logging.h" +#include "paddle/framework/variable.h" + +namespace paddle { +namespace framework { + +void SetFeedVariable(Scope* scope, const LoDTensor& input, + const std::string& var_name, size_t index) { + // If var_name Variable is not found in GlobalScope, a new variable will + // be created. + VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index; + Variable* g_feed_value = scope->Var(var_name); + auto& feed_inputs = + *(g_feed_value->GetMutable>()); + if (index >= feed_inputs.size()) { + feed_inputs.resize(index + 1); + } + // shared data with input tensor + feed_inputs[index].ShareDataWith(input); + // set lod + feed_inputs[index].set_lod(input.lod()); +} + +LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, + size_t index) { + // Since we want to fetch LodTensor from a variable, the variable must + // be created alreadly. + Variable* g_fetch_value = scope.FindVar(var_name); + PADDLE_ENFORCE(g_fetch_value->IsType(), + "Only %s can be invoked by GetFetchVariable", + typeid(FeedFetchList).name()); + auto& fetch_outputs = *g_fetch_value->GetMutable(); + auto& tensor = fetch_outputs[index]; + VLOG(3) << "Fetch " << var_name << " with index " << index + << " shape= " << tensor.dims(); + PADDLE_ENFORCE_LT(index, fetch_outputs.size()); + return tensor; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/feed_fetch_method.h b/paddle/framework/feed_fetch_method.h index 7feacb1e24..b71945fcc8 100644 --- a/paddle/framework/feed_fetch_method.h +++ b/paddle/framework/feed_fetch_method.h @@ -13,46 +13,18 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "glog/logging.h" + #include "paddle/framework/feed_fetch_type.h" #include "paddle/framework/scope.h" -#include "paddle/framework/variable.h" namespace paddle { namespace framework { void SetFeedVariable(Scope* scope, const LoDTensor& input, - const std::string& var_name, size_t index) { - // If var_name Variable is not found in GlobalScope, a new variable will - // be created. - VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index; - Variable* g_feed_value = scope->Var(var_name); - auto& feed_inputs = - *(g_feed_value->GetMutable>()); - if (index >= feed_inputs.size()) { - feed_inputs.resize(index + 1); - } - // shared data with input tensor - feed_inputs[index].ShareDataWith(input); - // set lod - feed_inputs[index].set_lod(input.lod()); -} + const std::string& var_name, size_t index); LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, - size_t index) { - // Since we want to fetch LodTensor from a variable, the variable must - // be created alreadly. - Variable* g_fetch_value = scope.FindVar(var_name); - PADDLE_ENFORCE(g_fetch_value->IsType(), - "Only %s can be invoked by GetFetchVariable", - typeid(FeedFetchList).name()); - auto& fetch_outputs = *g_fetch_value->GetMutable(); - auto& tensor = fetch_outputs[index]; - VLOG(3) << "Fetch " << var_name << " with index " << index - << " shape= " << tensor.dims(); - PADDLE_ENFORCE_LT(index, fetch_outputs.size()); - return tensor; -} + size_t index); } // namespace framework } // namespace paddle diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc index 09268ffb3a..b43c359ed1 100644 --- a/paddle/inference/inference.cc +++ b/paddle/inference/inference.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include "inference.h" #include #include "paddle/framework/executor.h" -#include "paddle/framework/feed_fetch_method.h" #include "paddle/framework/init.h" #include "paddle/framework/scope.h" @@ -154,7 +153,7 @@ void InferenceEngine::Execute(const std::vector& feeds, LOG(FATAL) << "Please initialize the program_ and load_program_ first."; } - if (feeds.size() < feed_var_names_.size()) { + if (feeds.size() != feed_var_names_.size()) { LOG(FATAL) << "Please feed " << feed_var_names_.size() << " input Tensors."; } @@ -165,19 +164,22 @@ void InferenceEngine::Execute(const std::vector& feeds, executor->Run(*load_program_, scope, 0, true, true); + std::map feed_targets; + std::map fetch_targets; + // set_feed_variable for (size_t i = 0; i < feed_var_names_.size(); ++i) { - framework::SetFeedVariable(scope, feeds[i], "feed", i); + feed_targets[feed_var_names_[i]] = &feeds[i]; } - executor->Run(*program_, scope, 0, true, true); - // get_fetch_variable fetchs.resize(fetch_var_names_.size()); for (size_t i = 0; i < fetch_var_names_.size(); ++i) { - fetchs[i] = framework::GetFetchVariable(*scope, "fetch", i); + fetch_targets[fetch_var_names_[i]] = &fetchs[i]; } + executor->Run(*program_, scope, feed_targets, fetch_targets); + delete place; delete scope; delete executor; diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index e78673e0ba..de53fea0dd 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,7 +1,7 @@ if(WITH_PYTHON) cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc - DEPS pybind python backward proto_desc paddle_memory executor prune init profiler + DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method ${GLOB_OP_LIB}) if(NOT APPLE AND NOT ANDROID) target_link_libraries(paddle_pybind rt) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index b4fd2a8989..490397afdd 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -424,7 +424,9 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Executor") .def(py::init()) - .def("run", &Executor::Run); + .def("run", + (void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) & + Executor::Run); m.def("unique_integer", UniqueIntegerGenerator); m.def("init_gflags", framework::InitGflags); From 438aad24a5a82d5e5302543a7f56bfd8f414aaf6 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Fri, 26 Jan 2018 04:07:02 +0000 Subject: [PATCH 024/106] Update the inference unittest using the new Executor.Run(). --- paddle/inference/inference.cc | 103 ++---------------- paddle/inference/inference.h | 18 ++- .../book/test_inference_recognize_digits.cc | 56 +++++++--- 3 files changed, 59 insertions(+), 118 deletions(-) diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc index 2c4d717a13..51d43a63ee 100644 --- a/paddle/inference/inference.cc +++ b/paddle/inference/inference.cc @@ -14,13 +14,13 @@ limitations under the License. */ #include "inference.h" #include -#include "paddle/framework/executor.h" -#include "paddle/framework/init.h" -#include "paddle/framework/scope.h" namespace paddle { -void InferenceEngine::LoadInferenceModel(const std::string& dirname) { +framework::ProgramDesc* InferenceEngine::LoadInferenceModel( + framework::Executor& exe, + framework::Scope* scope, + const std::string& dirname) { std::string model_filename = dirname + "/__model__"; LOG(INFO) << "loading model from " << model_filename; std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); @@ -34,6 +34,7 @@ void InferenceEngine::LoadInferenceModel(const std::string& dirname) { program_ = new framework::ProgramDesc(program_desc_str); GenerateLoadProgram(dirname); + exe.Run(*load_program_, scope, 0, true, true); framework::BlockDesc* global_block = program_->MutableBlock(0); feed_var_names_.clear(); @@ -45,6 +46,8 @@ void InferenceEngine::LoadInferenceModel(const std::string& dirname) { fetch_var_names_.push_back(op->Input("X")[0]); } } + + return program_; } bool InferenceEngine::IsParameter(const framework::VarDesc* var) { @@ -92,96 +95,4 @@ void InferenceEngine::GenerateLoadProgram(const std::string& dirname) { } } } - -void InferenceEngine::PrependFeedOp() { - if (!program_) { - LOG(FATAL) << "Please initialize the program_ first."; - } - - framework::BlockDesc* global_block = program_->MutableBlock(0); - - // create_var - framework::VarDesc* feed_var = global_block->Var("feed"); - feed_var->SetType(framework::proto::VarDesc::FEED_MINIBATCH); - feed_var->SetPersistable(true); - - // prepend feed_op - for (size_t i = 0; i < feed_var_names_.size(); ++i) { - std::string var_name = feed_var_names_[i]; - LOG(INFO) << "feed var's name: " << var_name; - - // prepend_op - framework::OpDesc* op = global_block->PrependOp(); - op->SetType("feed"); - op->SetInput("X", {"feed"}); - op->SetOutput("Out", {var_name}); - op->SetAttr("col", {static_cast(i)}); - op->CheckAttrs(); - } -} - -void InferenceEngine::AppendFetchOp() { - if (!program_) { - LOG(FATAL) << "Please initialize the program_ first."; - } - - framework::BlockDesc* global_block = program_->MutableBlock(0); - - // create_var - framework::VarDesc* fetch_var = global_block->Var("fetch"); - fetch_var->SetType(framework::proto::VarDesc::FETCH_LIST); - fetch_var->SetPersistable(true); - - // append fetch_op - for (size_t i = 0; i < fetch_var_names_.size(); ++i) { - std::string var_name = fetch_var_names_[i]; - LOG(INFO) << "fetch var's name: " << var_name; - - // append_op - framework::OpDesc* op = global_block->AppendOp(); - op->SetType("fetch"); - op->SetInput("X", {var_name}); - op->SetOutput("Out", {"fetch"}); - op->SetAttr("col", {static_cast(i)}); - op->CheckAttrs(); - } -} - -void InferenceEngine::Execute(const std::vector& feeds, - std::vector& fetchs) { - if (!program_ || !load_program_) { - LOG(FATAL) << "Please initialize the program_ and load_program_ first."; - } - - if (feeds.size() != feed_var_names_.size()) { - LOG(FATAL) << "Please feed " << feed_var_names_.size() << " input Tensors."; - } - - auto* place = new platform::CPUPlace(); - framework::InitDevices(); - framework::Executor* executor = new framework::Executor(*place); - framework::Scope* scope = new framework::Scope(); - - executor->Run(*load_program_, scope, 0, true, true); - - std::map feed_targets; - std::map fetch_targets; - - // set_feed_variable - for (size_t i = 0; i < feed_var_names_.size(); ++i) { - feed_targets[feed_var_names_[i]] = &feeds[i]; - } - - // get_fetch_variable - fetchs.resize(fetch_var_names_.size()); - for (size_t i = 0; i < fetch_var_names_.size(); ++i) { - fetch_targets[fetch_var_names_[i]] = &fetchs[i]; - } - - executor->Run(*program_, scope, feed_targets, fetch_targets); - - delete place; - delete scope; - delete executor; -} } // namespace paddle diff --git a/paddle/inference/inference.h b/paddle/inference/inference.h index 26f259824b..60caa41c70 100644 --- a/paddle/inference/inference.h +++ b/paddle/inference/inference.h @@ -15,8 +15,10 @@ limitations under the License. */ #pragma once #include "paddle/framework/block_desc.h" +#include "paddle/framework/executor.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/program_desc.h" +#include "paddle/framework/scope.h" namespace paddle { @@ -28,15 +30,21 @@ public: delete load_program_; } - void LoadInferenceModel(const std::string& dirname); - void Execute(const std::vector& feeds, - std::vector& fetchs); + framework::ProgramDesc* LoadInferenceModel(framework::Executor& exe, + framework::Scope* scope, + const std::string& dirname); + + const std::vector& GetFeedVarNames() const { + return feed_var_names_; + } + + const std::vector& GetFetchVarNames() const { + return fetch_var_names_; + } private: bool IsParameter(const framework::VarDesc* var); void GenerateLoadProgram(const std::string& dirname); - void PrependFeedOp(); - void AppendFetchOp(); private: framework::ProgramDesc* program_; diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index d0e811914c..0dfaf9a0ee 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -16,11 +16,12 @@ limitations under the License. */ #include #include #include "gflags/gflags.h" +#include "paddle/framework/init.h" #include "paddle/inference/inference.h" DEFINE_string(dirname, "", "Directory of the inference model."); -TEST(inference, recognize_digits) { +TEST(recognize_digits, CPU) { if (FLAGS_dirname.empty()) { LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; } @@ -28,33 +29,54 @@ TEST(inference, recognize_digits) { std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl; std::string dirname = FLAGS_dirname; + // 0. Initialize all the devices + paddle::framework::InitDevices(); + + // 1. Define place, executor and scope + auto place = paddle::platform::CPUPlace(); + auto executor = paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + + // 2. Initialize the inference_program and load all parameters from file paddle::InferenceEngine* engine = new paddle::InferenceEngine(); - engine->LoadInferenceModel(dirname); + paddle::framework::ProgramDesc* inference_program = + engine->LoadInferenceModel(executor, scope, dirname); + + // 3. Get the feed_var_names and fetch_var_names + const std::vector& feed_target_names = engine->GetFeedVarNames(); + const std::vector& fetch_target_names = + engine->GetFetchVarNames(); + // 4. Prepare inputs + std::map feed_targets; paddle::framework::LoDTensor input; srand(time(0)); float* input_ptr = - input.mutable_data({1, 784}, paddle::platform::CPUPlace()); + input.mutable_data({1, 28, 28}, paddle::platform::CPUPlace()); for (int i = 0; i < 784; ++i) { input_ptr[i] = rand() / (static_cast(RAND_MAX)); } + feed_targets[feed_target_names[0]] = &input; + + // 5. Define Tensor to get the outputs + std::map fetch_targets; + paddle::framework::LoDTensor output; + fetch_targets[fetch_target_names[0]] = &output; + + // 6. Run the inference program + executor.Run(*inference_program, scope, feed_targets, fetch_targets); - std::vector feeds; - feeds.push_back(input); - std::vector fetchs; - engine->Execute(feeds, fetchs); - - for (size_t i = 0; i < fetchs.size(); ++i) { - LOG(INFO) << fetchs[i].dims(); - std::stringstream ss; - ss << "result:"; - float* output_ptr = fetchs[i].data(); - for (int j = 0; j < fetchs[i].numel(); ++j) { - ss << " " << output_ptr[j]; - } - LOG(INFO) << ss.str(); + // 7. Use the output as your expect. + LOG(INFO) << output.dims(); + std::stringstream ss; + ss << "result:"; + float* output_ptr = output.data(); + for (int j = 0; j < output.numel(); ++j) { + ss << " " << output_ptr[j]; } + LOG(INFO) << ss.str(); + delete scope; delete engine; } From 84c12c6edc2bc0b6b410c4385e928c8d061ea18e Mon Sep 17 00:00:00 2001 From: Yang yaming Date: Fri, 26 Jan 2018 15:35:23 +0800 Subject: [PATCH 025/106] Add one_hot operator. (#7819) * Add one_hot operator. * Add more unit tests. --- paddle/operators/one_hot_op.cc | 95 +++++++++++++++ paddle/operators/one_hot_op.cu | 80 +++++++++++++ paddle/operators/one_hot_op.h | 68 +++++++++++ .../paddle/v2/fluid/tests/test_one_hot_op.py | 110 ++++++++++++++++++ 4 files changed, 353 insertions(+) create mode 100644 paddle/operators/one_hot_op.cc create mode 100644 paddle/operators/one_hot_op.cu create mode 100644 paddle/operators/one_hot_op.h create mode 100644 python/paddle/v2/fluid/tests/test_one_hot_op.py diff --git a/paddle/operators/one_hot_op.cc b/paddle/operators/one_hot_op.cc new file mode 100644 index 0000000000..e78b7468de --- /dev/null +++ b/paddle/operators/one_hot_op.cc @@ -0,0 +1,95 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/operators/one_hot_op.h" +#include "paddle/framework/framework.pb.h" + +namespace paddle { +namespace operators { + +class OneHotOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of OneHotOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of OneHotOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + PADDLE_ENFORCE_GE(x_dims.size(), 2, + "Rank of Input(X) should be at least 2."); + PADDLE_ENFORCE_GE(x_dims[x_dims.size() - 1], 1U, + "Last dimension of Input(X) should be 1."); + + int depth = ctx->Attrs().Get("depth"); + + PADDLE_ENFORCE_GT(depth, 0, "Should provide a positive depth (%d).", depth); + + framework::DDim out_dims(x_dims); + out_dims[out_dims.size() - 1] = depth; + ctx->SetOutputDim("Out", out_dims); + ctx->ShareLoD("X", /* --> */ "Out"); + } +}; + +class OneHotOpMaker : public framework::OpProtoAndCheckerMaker { + public: + OneHotOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(LoDTensor, LoDTensor) Input variable with rank at least 2. " + "The last dimension of X should be 1. Each value of X is an index " + "to indicate the position."); + AddOutput("Out", + "(Tensor, Tensor) Output tensor with same rank as X. " + "The tensor consists of one-hot representations of values in X."); + AddAttr("depth", + "A positive integer to specify the length of one-hot vector."); + AddAttr("dtype", + "An integer to specify the data type of one-hot " + "vector. The default value is FP32.") + .SetDefault(paddle::framework::proto::DataType::FP32); + AddComment(R"DOC( +One Hot Operator. This operator creates the one-hot representations for input +index values. The following example will help to explain the function of this +operator: + +X is a LoDTensor: + X.lod = [[0, 1, 4]] + X.shape = [4, 1] + X.data = [[1], [1], [3], [0]] + +set depth = 4 + +Out is a LoDTensor: + Out.lod = [[0, 1, 4]] + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(one_hot, ops::OneHotOp, ops::OneHotOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL( + one_hot, ops::OneHotKernel, + ops::OneHotKernel); diff --git a/paddle/operators/one_hot_op.cu b/paddle/operators/one_hot_op.cu new file mode 100644 index 0000000000..16f6d9433e --- /dev/null +++ b/paddle/operators/one_hot_op.cu @@ -0,0 +1,80 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/operators/one_hot_op.h" +#include "paddle/platform/cuda_helper.h" +#include "paddle/platform/gpu_info.h" + +namespace paddle { +namespace operators { +using platform::PADDLE_CUDA_NUM_THREADS; + +template +__global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data, + const int64_t numel, const int depth) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < numel) { + *(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0; + } +} + +template +struct OneHotOpCUDAFunctor { + const framework::LoDTensor* in_; + framework::LoDTensor* out_; + const DeviceContext& ctx_; + int depth_; + + OneHotOpCUDAFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, + int depth, const DeviceContext& ctx) + : in_(in), out_(out), depth_(depth), ctx_(ctx) {} + + template + void operator()() const { + auto* p_in_data = in_->data(); + auto numel = in_->numel(); + auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); + auto stream = ctx_.stream(); + math::set_constant(ctx_, out_, 0.0); + + FillOutputKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / + PADDLE_CUDA_NUM_THREADS, + PADDLE_CUDA_NUM_THREADS, 0, stream>>>( + p_in_data, p_out_data, numel, depth_); + } +}; + +using LoDTensor = framework::LoDTensor; +template +class OneHotCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Input("X"); + auto* out = context.Output("Out"); + int depth = context.Attr("depth"); + + framework::VisitDataType( + static_cast(context.Attr("dtype")), + OneHotOpCUDAFunctor( + in, out, depth, context.template device_context())); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + one_hot, ops::OneHotCUDAKernel, + ops::OneHotCUDAKernel); diff --git a/paddle/operators/one_hot_op.h b/paddle/operators/one_hot_op.h new file mode 100644 index 0000000000..12031ede2c --- /dev/null +++ b/paddle/operators/one_hot_op.h @@ -0,0 +1,68 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +template +struct OneHotOpFunctor { + const framework::LoDTensor* in_; + framework::LoDTensor* out_; + int depth_; + const DeviceContext& ctx_; + + OneHotOpFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, + int depth, const DeviceContext& ctx) + : in_(in), out_(out), depth_(depth), ctx_(ctx) {} + + template + void operator()() const { + auto* p_in_data = in_->data(); + auto numel = in_->numel(); + auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); + math::set_constant(ctx_, out_, 0.0); + + for (int i = 0; i < numel; ++i) { + PADDLE_ENFORCE_GE(p_in_data[i], 0, + "Illegal index value, should be at least 0."); + PADDLE_ENFORCE_LT(p_in_data[i], depth_, + "Illegal index value, should be less than depth (%d).", + depth_); + *(p_out_data + i * depth_ + p_in_data[i]) = 1.0; + } + } +}; + +using LoDTensor = framework::LoDTensor; +template +class OneHotKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Input("X"); + auto* out = context.Output("Out"); + int depth = context.Attr("depth"); + + framework::VisitDataType( + static_cast(context.Attr("dtype")), + OneHotOpFunctor( + in, out, depth, context.template device_context())); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/tests/test_one_hot_op.py b/python/paddle/v2/fluid/tests/test_one_hot_op.py new file mode 100644 index 0000000000..e51ea27d14 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_one_hot_op.py @@ -0,0 +1,110 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import math +from op_test import OpTest +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +import paddle.v2.fluid.framework as framework +from paddle.v2.fluid.framework import Program, program_guard + + +class TestOneHotOp(OpTest): + def setUp(self): + self.op_type = 'one_hot' + depth = 10 + dimension = 12 + x_lod = [[0, 4, 5, 8, 11]] + x = [np.random.randint(0, depth - 1) for i in xrange(x_lod[0][-1])] + x = np.array(x).astype('int').reshape([x_lod[0][-1], 1]) + + out = np.zeros(shape=(np.product(x.shape[:-1]), + depth)).astype('float32') + + for i in xrange(np.product(x.shape)): + out[i, x[i]] = 1.0 + + self.inputs = {'X': (x, x_lod)} + self.attrs = {'depth': depth, 'dtype': int(core.DataType.FP32)} + self.outputs = {'Out': (out, x_lod)} + + def test_check_output(self): + self.check_output() + + +class TestOneHotOp_default_dtype(OpTest): + def setUp(self): + self.op_type = 'one_hot' + depth = 10 + dimension = 12 + x_lod = [[0, 4, 5, 8, 11]] + x = [np.random.randint(0, depth - 1) for i in xrange(x_lod[0][-1])] + x = np.array(x).astype('int').reshape([x_lod[0][-1], 1]) + + out = np.zeros(shape=(np.product(x.shape[:-1]), + depth)).astype('float32') + + for i in xrange(np.product(x.shape)): + out[i, x[i]] = 1.0 + + self.inputs = {'X': (x, x_lod)} + self.attrs = {'depth': depth} + self.outputs = {'Out': (out, x_lod)} + + def test_check_output(self): + self.check_output() + + +class TestOneHotOp_exception(OpTest): + def setUp(self): + self.op_type = 'one_hot' + self.depth = 10 + self.place = core.CPUPlace() + self.dimension = 12 + self.x = core.LoDTensor() + x_lod = [[0, 4, 5, 8, 11]] + data = [np.random.randint(11, 20) for i in xrange(x_lod[0][-1])] + data = np.array(data).astype('int').reshape([x_lod[0][-1], 1]) + self.x.set(data, self.place) + self.x.set_lod(x_lod) + + def test_check_output(self): + program = Program() + with program_guard(program): + x = fluid.layers.data( + name='x', shape=[self.dimension], dtype='float32', lod_level=1) + block = program.current_block() + one_hot_out = block.create_var( + name="one_hot_out", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='float32') + block.append_op( + type='one_hot', + inputs={'X': x}, + attrs={'depth': self.depth}, + outputs={'Out': one_hot_out}) + exe = fluid.Executor(self.place) + + def run(): + exe.run(feed={'x': self.x}, + fetch_list=[one_hot_out], + return_numpy=False) + + self.assertRaises(core.EnforceNotMet, run) + + +if __name__ == '__main__': + unittest.main() From eca58a62000b76d4aa218c6a12a42cefeb547a23 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Fri, 26 Jan 2018 08:58:12 +0000 Subject: [PATCH 026/106] Add unittest for GPU. --- .../book/test_inference_recognize_digits.cc | 97 +++++++++++++------ 1 file changed, 65 insertions(+), 32 deletions(-) diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index 0dfaf9a0ee..de15167ac3 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,19 +21,12 @@ limitations under the License. */ DEFINE_string(dirname, "", "Directory of the inference model."); -TEST(recognize_digits, CPU) { - if (FLAGS_dirname.empty()) { - LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; - } - - std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl; - std::string dirname = FLAGS_dirname; - - // 0. Initialize all the devices - paddle::framework::InitDevices(); - +template +void TestInference(const std::string& dirname, + const std::vector& cpu_feeds, + std::vector& cpu_fetchs) { // 1. Define place, executor and scope - auto place = paddle::platform::CPUPlace(); + auto place = Place(); auto executor = paddle::framework::Executor(place); auto* scope = new paddle::framework::Scope(); @@ -49,37 +42,77 @@ TEST(recognize_digits, CPU) { // 4. Prepare inputs std::map feed_targets; - paddle::framework::LoDTensor input; - srand(time(0)); - float* input_ptr = - input.mutable_data({1, 28, 28}, paddle::platform::CPUPlace()); - for (int i = 0; i < 784; ++i) { - input_ptr[i] = rand() / (static_cast(RAND_MAX)); + for (size_t i = 0; i < feed_target_names.size(); ++i) { + // Please make sure that cpu_feeds[i] is right for feed_target_names[i] + feed_targets[feed_target_names[i]] = cpu_feeds[i]; } - feed_targets[feed_target_names[0]] = &input; // 5. Define Tensor to get the outputs std::map fetch_targets; - paddle::framework::LoDTensor output; - fetch_targets[fetch_target_names[0]] = &output; + for (size_t i = 0; i < fetch_target_names.size(); ++i) { + fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; + } // 6. Run the inference program executor.Run(*inference_program, scope, feed_targets, fetch_targets); - // 7. Use the output as your expect. - LOG(INFO) << output.dims(); - std::stringstream ss; - ss << "result:"; - float* output_ptr = output.data(); - for (int j = 0; j < output.numel(); ++j) { - ss << " " << output_ptr[j]; - } - LOG(INFO) << ss.str(); - delete scope; delete engine; } +TEST(inference, recognize_digits) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + + // 0. Initialize all the devices + paddle::framework::InitDevices(); + + paddle::framework::LoDTensor input; + srand(time(0)); + float* input_ptr = + input.mutable_data({1, 28, 28}, paddle::platform::CPUPlace()); + for (int i = 0; i < 784; ++i) { + input_ptr[i] = rand() / (static_cast(RAND_MAX)); + } + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference( + FLAGS_dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference( + FLAGS_dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.dims(); + + EXPECT_EQ(output1.dims(), output2.dims()); + EXPECT_EQ(output1.numel(), output2.numel()); + + float err = 1E-3; + int count = 0; + for (int64_t i = 0; i < output1.numel(); ++i) { + if (fabs(output1.data()[i] - output2.data()[i]) > err) { + count++; + } + } + EXPECT_EQ(count, 0) << "There are " << count << " different elements."; +#endif +} + int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); testing::InitGoogleTest(&argc, argv); From 9beec1212bb895eabf4604f6231db52920fd61d0 Mon Sep 17 00:00:00 2001 From: chengduo Date: Sat, 27 Jan 2018 02:23:02 +0800 Subject: [PATCH 027/106] Add Channel (#7442) * add Channle * refine Channel --- paddle/operators/detail/channel.h | 89 +++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 paddle/operators/detail/channel.h diff --git a/paddle/operators/detail/channel.h b/paddle/operators/detail/channel.h new file mode 100644 index 0000000000..cbfdf80040 --- /dev/null +++ b/paddle/operators/detail/channel.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include + +namespace paddle { +namespace operators { +namespace detail { + +template +class Channel { + public: + explicit Channel(std::size_t capacity) : capacity_(capacity) {} + + void Send(T* channel_element) { + std::unique_lock lock(mu_); + + if (IsBounded()) { + full_cond_var_.wait(lock, [this]() { + bool capacity_valid = capacity_ > 0 ? !IsCapacityFull() : true; + return capacity_valid; + }); + } + channel_.push_back(std::move(*channel_element)); + + lock.unlock(); + empty_cond_var_.notify_all(); + } + + T* Receive() { + std::unique_lock lock(mu_); + empty_cond_var_.wait(lock, [this]() { return !channel_.empty(); }); + + T* channel_element = std::move(channel_.front()); + channel_.pop_front(); + + NotifyAllSenders(&lock); + return channel_element; + } + + size_t Size() { + std::unique_lock lock(mu_); + return channel_.size(); + } + + void Clear() { + std::unique_lock lock(mu_); + channel_.clear(); + + NotifyAllSenders(&lock); + } + + private: + std::size_t capacity_; + std::mutex mu_; + std::condition_variable empty_cond_var_; + std::condition_variable full_cond_var_; + std::deque channel_; + + private: + void NotifyAllSenders(std::unique_lock* lock) { + if (IsBounded()) { + lock->unlock(); + full_cond_var_.notify_all(); + } + } + + bool IsBounded() const { return capacity_ > 0; } + + bool IsCapacityFull() const { return channel_.size() >= capacity_; } +}; + +} // namespace detail +} // namespace operator +} // namespace paddle From 2f9cf7ccae6101fa5f14ac05a5364e2e22507993 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Fri, 26 Jan 2018 12:21:17 -0800 Subject: [PATCH 028/106] address comments --- paddle/framework/prune.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index 6117cbb79f..bff8e0bcea 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -116,15 +116,15 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, for (const auto& op : *op_field) { // add VarDescs of all input arguments for each OpDesc auto& input_field = op.inputs(); - for (auto& input : input_field) { - for (auto& arg : input.arguments()) { + for (auto& input_var : input_field) { + for (auto& arg : input_var.arguments()) { *var_field->Add() = var_map[arg]; } } // add VarDescs of all output arguments for each OpDesc auto& output_field = op.outputs(); - for (auto& output : output_field) { - for (auto& arg : output.arguments()) { + for (auto& output_var : output_field) { + for (auto& arg : output_var.arguments()) { *var_field->Add() = var_map[arg]; } } From 06e226378fb70d02d50b023c29f28dedb560fc5f Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Fri, 26 Jan 2018 14:14:32 -0800 Subject: [PATCH 029/106] Fix Latex (#7901) --- paddle/operators/gru_op.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/operators/gru_op.cc b/paddle/operators/gru_op.cc index 76f2adefed..fb901b6394 100644 --- a/paddle/operators/gru_op.cc +++ b/paddle/operators/gru_op.cc @@ -135,14 +135,14 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( GRU Operator implements part calculations of the complete GRU as following: -\f[ -update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\ -reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\ -output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\ +$$ +update\_gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\ +reset\_gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\ +output\_candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\ output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t) -\f] +$$ -@note To implement the complete GRU, fully-connected operator must be used +@note To implement the complete GRU, fully-connected operator must be used before to feed xu, xr and xc as the Input of GRU operator. )DOC"); } From 1f3caaa8a492e926b168586efaf89b3ef3228cc2 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Fri, 26 Jan 2018 14:14:47 -0800 Subject: [PATCH 030/106] Make notest_dist_image_classification consistent with distributed implementation in others. (#7899) * Make this file consistent with others * fixed style --- .../notest_dist_image_classification.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py index 218dea31e1..0c51ccf306 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -14,8 +14,6 @@ from __future__ import print_function -import sys - import paddle.v2 as paddle import paddle.v2.fluid as fluid import os @@ -106,10 +104,10 @@ if len(sys.argv) >= 2: net_type = sys.argv[1] if net_type == "vgg": - print("train vgg net") + print("training vgg net") net = vgg16_bn_drop(images) elif net_type == "resnet": - print("train resnet") + print("training resnet") net = resnet_cifar10(images, 32) else: raise ValueError("%s network is not supported" % net_type) @@ -129,6 +127,7 @@ train_reader = paddle.batch( batch_size=BATCH_SIZE) place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) exe = fluid.Executor(place) t = fluid.DistributeTranspiler() @@ -146,17 +145,14 @@ if training_role == "PSERVER": if not current_endpoint: print("need env SERVER_ENDPOINT") exit(1) - print("start pserver at:", current_endpoint) pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) - print("pserver run end") elif training_role == "TRAINER": - print("start trainer") trainer_prog = t.get_trainer_program() - feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) exe.run(fluid.default_startup_program()) + for pass_id in range(PASS_NUM): accuracy.reset(exe) for data in train_reader(): @@ -164,9 +160,10 @@ elif training_role == "TRAINER": feed=feeder.feed(data), fetch_list=[avg_cost] + accuracy.metrics) pass_acc = accuracy.eval(exe) - print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( - pass_acc)) - # this model is slow, so if we can train two mini batch, we think it works properly. + print("pass_id:" + str(pass_id) + "loss:" + str(loss) + " pass_acc:" + + str(pass_acc)) + # this model is slow, so if we can train two mini batches, + # we think it works properly. print("trainer run end") else: print("environment var TRAINER_ROLE should be TRAINER os PSERVER") From 9b6387e7aeca545260562fdee4021e040f5294e4 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Fri, 26 Jan 2018 14:25:24 -0800 Subject: [PATCH 031/106] address comments (#7900) --- paddle/framework/executor.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 50a70d723e..cbf3ec7526 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -170,8 +170,6 @@ static bool has_feed_operators( feed_targets.find(feed_target_name) != feed_targets.end(), "Feed operator output name '%s' cannot be found in 'feed_targets'", feed_target_name); - } else { - break; } } @@ -270,8 +268,6 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, int idx = boost::get(op->GetAttr("col")); SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, idx); - } else { - break; } } From 0531edf9da3a089ab1667cd1fb8d9760c7b3503a Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Fri, 26 Jan 2018 16:28:31 -0800 Subject: [PATCH 032/106] Adding distributed training for dynamic_lstm (#7903) --- ...otest_understand_sentiment_dynamic_lstm.py | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py new file mode 100644 index 0000000000..bff376a0e2 --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -0,0 +1,135 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import os +import paddle.v2 as paddle +import paddle.v2.fluid as fluid + + +def stacked_lstm_net(data, + label, + input_dim, + class_dim=2, + emb_dim=128, + hid_dim=512, + stacked_num=3): + assert stacked_num % 2 == 1 + + emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) + # add bias attr + + # TODO(qijun) linear act + fc1 = fluid.layers.fc(input=emb, size=hid_dim) + lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) + + inputs = [fc1, lstm1] + + for i in range(2, stacked_num + 1): + fc = fluid.layers.fc(input=inputs, size=hid_dim) + lstm, cell = fluid.layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) + inputs = [fc, lstm] + + fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') + lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') + + prediction = fluid.layers.fc(input=[fc_last, lstm_last], + size=class_dim, + act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(x=cost) + adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) + optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) + accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) + return avg_cost, accuracy, accuracy.metrics[0], optimize_ops, params_grads + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + res = fluid.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + +def main(): + BATCH_SIZE = 100 + PASS_NUM = 5 + + word_dict = paddle.dataset.imdb.word_dict() + print "loaded word dict successfully" + dict_dim = len(word_dict) + class_dim = 2 + + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + cost, accuracy, acc_out, optimize_ops, params_grads = stacked_lstm_net( + data, label, input_dim=dict_dim, class_dim=class_dim) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=1000), + batch_size=BATCH_SIZE) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + + t = fluid.DistributeTranspiler() + # all parameter server endpoints list for spliting parameters + pserver_endpoints = os.getenv("PSERVERS") + # server endpoint for current node + current_endpoint = os.getenv("SERVER_ENDPOINT") + # run as trainer or parameter server + training_role = os.getenv( + "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( + optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + + if training_role == "PSERVER": + if not current_endpoint: + print("need env SERVER_ENDPOINT") + exit(1) + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + exe.run(fluid.default_startup_program()) + trainer_prog = t.get_trainer_program() + for pass_id in xrange(PASS_NUM): + accuracy.reset(exe) + for data in train_data(): + cost_val, acc_val = exe.run(trainer_prog, + feed=feeder.feed(data), + fetch_list=[cost, acc_out]) + pass_acc = accuracy.eval(exe) + print("cost=" + str(cost_val) + " acc=" + str(acc_val) + + " pass_acc=" + str(pass_acc)) + if cost_val < 1.0 and acc_val > 0.8: + exit(0) + else: + print("environment var TRAINER_ROLE should be TRAINER os PSERVER") + + +if __name__ == '__main__': + main() From 90a5fd26a9688dcbb09d2f18659296174dcbe31b Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Sat, 27 Jan 2018 10:26:02 +0800 Subject: [PATCH 033/106] fix boost down link --- cmake/external/boost.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index a2c979b2e8..c70d83b3f4 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -17,7 +17,7 @@ include(ExternalProject) set(BOOST_PROJECT "extern_boost") set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0") -set(BOOST_URL "https://dl.bintray.com/boostorg/release/${BOOST_VER}/source/${BOOST_TAR}.tar.gz") +set(BOOST_URL "http://sourceforge.net/projects/boost/files/boost/${BOOST_VER}/${BOOST_TAR}.tar.gz") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) From 4ce397964b788f1e9bbbe08a8e5f4d4ce21dd2f8 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 27 Jan 2018 12:31:30 +0800 Subject: [PATCH 034/106] fix unit test and c++ code --- paddle/operators/layer_norm_op.cc | 44 +++++++++---------- .../v2/fluid/tests/test_layer_norm_op.py | 19 ++++---- 2 files changed, 30 insertions(+), 33 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 0808192565..0b0c760e57 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -233,39 +233,37 @@ class LayerNormGradKernel if (d_x) { d_x->mutable_data(ctx.GetPlace()); auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); - auto triple_product = [](T ele) { return ele * ele; }; - auto neg_inv_std = [](T ele) { return -std::sqrt(1 / ele); }; + auto triple_product_func = [](T ele) { return ele * ele * ele; }; + auto scale_func = [scale_data](T ele) { return ele * scale_data; }; + auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; auto inv_std_scale_func = [scale_data](T ele) { return std::sqrt(1 / ele) * scale_data; }; - auto neg_inv_std_scale_func = [scale_data](T ele) { - return -std::sqrt(1 / ele) * scale_data; - }; // dy_dx auto dx_end = var_map.unaryExpr(inv_std_scale_func) .replicate(1, right) .cwiseProduct(d_y_map); // dy_dmean_dx - auto dmean_end = var_map.unaryExpr(neg_inv_std_scale_func) - .replicate(1, right) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dx_mean = (T(1.0) / right) * dmean_end.replicate(1, right); + auto dx_mean = (T(-1.0) / right) * + var_map.unaryExpr(inv_std_scale_func) + .replicate(1, right) + .cwiseProduct(d_y_map) + .rowwise() + .sum() + .replicate(1, right); // dy_var_dx - auto dvar_end_0 = (x_map - mean_map.replicate(1, right)) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dvar_end = var_map.unaryExpr(neg_inv_std) - .unaryExpr(triple_product) - .cwiseProduct(dvar_end_0); - auto dx_var = (T(1.0) / right) * + auto dvar_end_part = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dvar_end = var_map.unaryExpr(inv_std_func) + .unaryExpr(triple_product_func) + .cwiseProduct(dvar_end_part) + .replicate(1, right); + auto dx_var = (T(-1.0) / right) * (x_map - mean_map.replicate(1, right)) - .cwiseProduct(dvar_end.replicate(1, right)); - - // d_x = (1. / N) * scale * inv_var * (N * d_y - np.sum(d_y, axis=0) - // - (X - mean) * inv_var * inv_var * np.sum(d_y * (X - mean), axis=0)) + .cwiseProduct(dvar_end) + .unaryExpr(scale_func); d_x_map = dx_end + dx_mean + dx_var; } diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 4ca9754f32..caa3b944eb 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -52,18 +52,19 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, epsilon): D = reduce(mul, x_shape, 1) / N grad_y.shape = [N, D] x.shape = [N, D] - grad_offset = np.sum(grad_y) mean.shape = [N, 1] var.shape = [N, 1] - grad_scale = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y) + + d_scale = np.sum(grad_y).reshape([1, ]) + d_bias = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y).reshape([1, ]) dx_end = np.sqrt(1.0 / var) * grad_y d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y, axis=1).reshape([N, 1]) - d_mean_1 = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape( - [N, 1]) * (-1.0 / D * np.sqrt(1.0 / var) * - np.sum(x - mean, axis=1).reshape([N, 1])).reshape([N, 1]) - d_mean = 1.0 / D * (d_mean_0 + d_mean_1) + # d_mean_1 = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape( + # [N, 1]) * (-1.0 / D * np.sqrt(1.0 / var) * + # np.sum(x - mean, axis=1).reshape([N, 1])).reshape([N, 1]) + d_mean = 1.0 / D * (d_mean_0) d_std = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape([N, 1]) * ( 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean)) @@ -73,7 +74,7 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, epsilon): grad_y.shape = x_shape x.shape = x_shape - return grad_x, grad_scale, grad_offset + return grad_x, d_bias, d_scale def create_or_get_tensor(scope, var_name, var, place): @@ -144,7 +145,7 @@ class TestLayerNormdOp(OpTest): epsilon = 0.00001 x_shape = shape scale_shape = [1] - + np.random.random(123) x_val = np.random.random_sample(x_shape).astype(np.float32) scale_val = np.random.random_sample(scale_shape).astype(np.float32) bias_val = np.random.random_sample(scale_shape).astype(np.float32) @@ -154,7 +155,6 @@ class TestLayerNormdOp(OpTest): x_val, scale_val, bias_val, epsilon) # for gradient test - # y_grad = np.ones(x_shape).astype(np.float32) * 0.00277778 y_grad = np.random.random_sample(x_shape).astype(np.float32) x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad( @@ -229,7 +229,6 @@ class TestLayerNormdOp(OpTest): for place in places: test_with_place(place, [2, 3, 4, 5]) - test_with_place(place, [2, 3]) if __name__ == '__main__': From a026f52b73b1d9ec80b95346de8671b77effa95c Mon Sep 17 00:00:00 2001 From: chengduo Date: Sun, 28 Jan 2018 03:59:44 +0800 Subject: [PATCH 035/106] refine channel (#7910) --- paddle/{operators/detail => framework}/channel.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) rename paddle/{operators/detail => framework}/channel.h (94%) diff --git a/paddle/operators/detail/channel.h b/paddle/framework/channel.h similarity index 94% rename from paddle/operators/detail/channel.h rename to paddle/framework/channel.h index cbfdf80040..9ba0fc5c55 100644 --- a/paddle/operators/detail/channel.h +++ b/paddle/framework/channel.h @@ -18,8 +18,7 @@ limitations under the License. */ #include namespace paddle { -namespace operators { -namespace detail { +namespace framework { template class Channel { @@ -38,7 +37,7 @@ class Channel { channel_.push_back(std::move(*channel_element)); lock.unlock(); - empty_cond_var_.notify_all(); + empty_cond_var_.notify_one(); } T* Receive() { @@ -75,7 +74,7 @@ class Channel { void NotifyAllSenders(std::unique_lock* lock) { if (IsBounded()) { lock->unlock(); - full_cond_var_.notify_all(); + full_cond_var_.notify_one(); } } @@ -84,6 +83,5 @@ class Channel { bool IsCapacityFull() const { return channel_.size() >= capacity_; } }; -} // namespace detail } // namespace operator } // namespace paddle From 0f0ce4e5ec66d7661fac57c1ab6aa4cd71d7e2c3 Mon Sep 17 00:00:00 2001 From: Yancey Date: Sun, 28 Jan 2018 10:30:44 +0800 Subject: [PATCH 036/106] Fix cpplint (#7914) --- paddle/operators/detail/grpc_client.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/detail/grpc_client.cc b/paddle/operators/detail/grpc_client.cc index 90e2b29659..c433945542 100644 --- a/paddle/operators/detail/grpc_client.cc +++ b/paddle/operators/detail/grpc_client.cc @@ -101,8 +101,8 @@ bool RPCClient::Wait() { if (req_count_ <= 0) { return true; } - - bool a[req_count_]; + const size_t kReqCnt = req_count_; + bool a[kReqCnt]; std::vector> waits(req_count_); for (int i = 0; i < req_count_; i++) { From 3646be7bfca135fc20eb291dcbe14e13c93d8429 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Sat, 27 Jan 2018 23:24:05 -0800 Subject: [PATCH 037/106] Fix some typos in support new device doc (#7895) * fix some typos * fix --- doc/design/support_new_device.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/design/support_new_device.md b/doc/design/support_new_device.md index 4c5f10e2ec..b154f01d32 100644 --- a/doc/design/support_new_device.md +++ b/doc/design/support_new_device.md @@ -2,9 +2,9 @@ ## Background -Deep learning has a high demand for computing resources. New high-performance devices and computing libraries are appearing very frequently. Deep learning frameworks have to integrate these high-performance devices and computing libraries flexibly and efficiently. +Deep learning has a high demand for computing resources. New high-performance devices and computing libraries are appearing very frequently. Deep learning frameworks have to integrate these high-performance devices and computing libraries in a flexible and efficient manner. -On one hand, hardware and computing libraries usually do not have a one-to-one correspondence. For example,Intel CPUs support Eigen and MKL computing libraries while Nvidia GPUs support Eigen and cuDNN computing libraries. We have to implement operator specific kernels for each computing library. +On one hand, hardware and computing libraries usually do not have a one-to-one correspondence. For example, Intel CPUs support Eigen and MKL computing libraries while Nvidia GPUs support Eigen and cuDNN computing libraries. We have to implement operator specific kernels for each computing library. On the other hand, users usually do not want to care about the low-level hardware and computing libraries when writing a neural network configuration. In Fluid, `Layer` is exposed in `Python`, and `Operator` is exposed in `C++`. Both `Layer` and `Operator` are hardware independent. @@ -17,7 +17,7 @@ For a general overview of fluid, please refer to the [overview doc](https://gith There are mainly three parts that we have to consider while integrating a new device/library: -- Place and DeviceContext: indicates the device id and manages hardware resources +- Place and DeviceContext: indicate the device id and manage hardware resources - Memory and Tensor: malloc/free data on certain device @@ -25,10 +25,10 @@ There are mainly three parts that we have to consider while integrating a new de ### Place and DeviceContext -Please remind that device and computing library are not one-to-one corresponding. A device can have a lot of computing libraries and a computing library can also support several devices. +Please note that device and computing library are not one-to-one corresponding. A device can have a lot of computing libraries and a computing library can also support several devices. #### Place -Fluid uses class [Place](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L55) to represent the device memory where data is located. If we add another device, we have to add corresponding `DevicePlace`. +Fluid uses class [Place](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L55) to represent the device memory where data is located. If we add another device, we have to add the corresponding `DevicePlace`. ``` | CPUPlace @@ -144,7 +144,7 @@ class Tensor { }; ``` -`Placeholder` is used to delay memory allocation; that is, we can first define a tensor, using `Resize` to configure its shape, and then call `mutuable_data` to allocate the actual memory. +`Placeholder` is used to delay memory allocation; that is, we can first define a tensor, using `Resize` to configurate its shape, and then call `mutuable_data` to allocate the actual memory. ```cpp paddle::framework::Tensor t; @@ -163,7 +163,7 @@ Fluid implements computing units based on different DeviceContexts. Some computi Let's take [MaxOutFunctor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/math/maxouting.h#L27) as an example: -The interface is defined in header file. +The interface is defined in the header file. ``` template @@ -203,7 +203,7 @@ class MaxOutFunctor { ``` -We get computing handle from a concrete DeviceContext, and make compution on tensors. +We first obtain the computing handle from a concrete DeviceContext, and then compute on tensors. The implemention of `OpKernel` is similar to math functors, the extra thing we need to do is to register the OpKernel in a global map. @@ -231,7 +231,7 @@ REGISTER_OP_CUDA_KERNEL( ## Advanced topics: How to switch between different Device/Library -Generally, we will impelement OpKernel for all Device/Library of an Operator. We can easily train a Convolutional Neural Network in GPU. However, some OpKernel is not sutibale on a specific Device. For example, crf operator can only run on CPU, whereas most other operators can run at GPU. To achieve high performance in such circumstance, we have to switch between different Device/Library. +Generally, we will implement OpKernel for all Device/Library of an Operator. We can easily train a Convolutional Neural Network in GPU. However, some OpKernel is not suitable on a specific Device. For example, crf operator can only run on CPU, whereas most other operators can run on GPU. To achieve high performance in such circumstance, we have to switch between different Device/Library. For more details, please refer to following docs: From 634faab1c06f9700297444789aa4336738a8100d Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Sun, 28 Jan 2018 00:53:15 -0800 Subject: [PATCH 038/106] Format doc & add unit test for dynamic_lstmp api --- doc/api/v2/fluid/layers.rst | 4 +- paddle/operators/CMakeLists.txt | 1 + python/paddle/v2/fluid/layers/nn.py | 58 ++++++++++++--------- python/paddle/v2/fluid/tests/test_layers.py | 12 +++++ 4 files changed, 48 insertions(+), 27 deletions(-) diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index 6c6af106db..231ec2d4ba 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -19,11 +19,11 @@ dynamic_lstm :noindex: dynamic_lstmp ------------- +------------- .. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp :noindex: - dynamic_gru +dynamic_gru ----------- .. autofunction:: paddle.v2.fluid.layers.dynamic_gru :noindex: diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 15f7cb6b56..48cf5816cc 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -147,6 +147,7 @@ op_library(max_sequence_len_op DEPS lod_rank_table) op_library(sequence_conv_op DEPS context_project) op_library(sequence_pool_op DEPS sequence_pooling) op_library(lstm_op DEPS sequence2batch lstm_compute) +op_library(lstmp_op DEPS sequence2batch lstm_compute) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op DEPS executor) op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale math_function) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index fa03a64291..c23cd733f1 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -257,7 +257,8 @@ def dynamic_lstm(input, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', - dtype='float32'): + dtype='float32', + name=None): """ **Dynamic LSTM Layer** @@ -309,25 +310,25 @@ def dynamic_lstm(input, (T X 4D), where T is the total time steps in this mini-batch, D is the hidden size. size(int): 4 * hidden size. - param_attr(ParamAttr): The parameter attribute for the learnable + param_attr(ParamAttr|None): The parameter attribute for the learnable hidden-hidden weights. - - The shape is (D x 4D), where D is the hidden - size. - Weights = {:math:`W_{ch}, W_{ih}, \ W_{fh}, W_{oh}`} - bias_attr(ParamAttr): The bias attribute for the learnable bias + - The shape is (D x 4D), where D is the hidden + size. + bias_attr(ParamAttr|None): The bias attribute for the learnable bias weights, which contains two parts, input-hidden bias weights and peephole connections weights if setting `use_peepholes` to `True`. 1. `use_peepholes = False` - - The shape is (1 x 4D). - Biases = {:math:`b_c, b_i, b_f, b_o`}. + - The shape is (1 x 4D). 2. `use_peepholes = True` - - The shape is (1 x 7D). - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \ W_{fc}, W_{oc}`}. + - The shape is (1 x 7D). use_peepholes(bool): Whether to enable diagonal/peephole connections, default `True`. is_reverse(bool): Whether to compute reversed LSTM, default `False`. @@ -340,6 +341,8 @@ def dynamic_lstm(input, Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". dtype(str): Data type. Choices = ["float32", "float64"], default "float32". + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: tuple: The hidden state, and cell state of LSTM. The shape of both \ @@ -354,6 +357,7 @@ def dynamic_lstm(input, forward, _ = fluid.layers.dynamic_lstm( input=forward_proj, size=hidden_dim * 4, use_peepholes=False) """ + helper = LayerHelper('lstm', **locals()) size = size / 4 weight = helper.create_parameter( @@ -401,7 +405,8 @@ def dynamic_lstmp(input, cell_activation='tanh', candidate_activation='tanh', proj_activation='tanh', - dtype='float32'): + dtype='float32', + name=None): """ **Dynamic LSTMP Layer** @@ -416,19 +421,19 @@ def dynamic_lstmp(input, .. math:: - i_t = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i) \\ + i_t & = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i) - f_t = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f) \\ + f_t & = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f) - \tilde{c_t} = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c) \\ + \\tilde{c_t} & = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c) - o_t = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_t + b_o) \\ + o_t & = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_t + b_o) - c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ + c_t & = f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} - h_t = o_t \odot act_h(c_t) \\ + h_t & = o_t \odot act_h(c_t) - r_t = \overline{act_h}(W_{rh}h_t) + r_t & = \overline{act_h}(W_{rh}h_t) where the :math:`W` terms denote weight matrices (e.g. :math:`W_{xi}` is the matrix of weights from the input gate to the input), :math:`W_{ic}`, @@ -441,7 +446,7 @@ def dynamic_lstmp(input, vectors, respectively, all of which have the same size as the cell output activation vector :math:`h`. Here :math:`h` is usually called the hidden state and :math:`r` denotes its recurrent projection. And - :math:`\tilde{c_t}` is also called the candidate hidden state, whose + :math:`\\tilde{c_t}` is also called the candidate hidden state, whose computation is based on the current input and previous hidden state. The :math:`\odot` is the element-wise product of the vectors. :math:`act_g` @@ -466,28 +471,28 @@ def dynamic_lstmp(input, mini-batch, D is the hidden size. size(int): 4 * hidden size. proj_size(int): The size of projection output. - param_attr(ParamAttr): The parameter attribute for the learnable + param_attr(ParamAttr|None): The parameter attribute for the learnable hidden-hidden weight and projection weight. + - Hidden-hidden weight = {:math:`W_{ch}, W_{ih}, \ + W_{fh}, W_{oh}`}. - The shape of hidden-hidden weight is (P x 4D), where P is the projection size and D the hidden size. - - The shape of projection weight is (D x P). - - Hidden-hidden weight = {:math:`W_{ch}, W_{ih}, \ - W_{fh}, W_{oh}`}. - Projection weight = {:math:`W_{rh}`}. - bias_attr(ParamAttr): The bias attribute for the learnable bias + - The shape of projection weight is (D x P). + bias_attr(ParamAttr|None): The bias attribute for the learnable bias weights, which contains two parts, input-hidden bias weights and peephole connections weights if setting `use_peepholes` to `True`. 1. `use_peepholes = False` - - The shape is (1 x 4D). - Biases = {:math:`b_c, b_i, b_f, b_o`}. + - The shape is (1 x 4D). 2. `use_peepholes = True` - - The shape is (1 x 7D). - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \ W_{fc}, W_{oc}`}. + - The shape is (1 x 7D). use_peepholes(bool): Whether to enable diagonal/peephole connections, default `True`. is_reverse(bool): Whether to compute reversed LSTM, default `False`. @@ -503,10 +508,12 @@ def dynamic_lstmp(input, Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". dtype(str): Data type. Choices = ["float32", "float64"], default "float32". + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: - tuple: The projection of hidden state, and cell state of LSTMP. The - shape of projection is (T x P), for the cell state which is + tuple: The projection of hidden state, and cell state of LSTMP. The \ + shape of projection is (T x P), for the cell state which is \ (T x D), and both LoD is the same with the `input`. Examples: @@ -519,6 +526,7 @@ def dynamic_lstmp(input, proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, size=hidden_dim * 4, proj_size=proj_dim, use_peepholes=False) """ + helper = LayerHelper('lstmp', **locals()) size = size / 4 weight = helper.create_parameter( diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index 4e86362542..3f54e28def 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -202,6 +202,18 @@ class TestBook(unittest.TestCase): x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell)) print(str(program)) + def test_dynamic_lstmp(self): + program = Program() + with program_guard(program): + hidden_dim, proj_dim = 16, 8 + seq_data = layers.data( + name='seq_data', shape=[10, 10], dtype='float32', lod_level=1) + fc_out = layers.fc(input=seq_data, size=4 * hidden_dim) + self.assertIsNotNone( + layers.dynamic_lstmp( + input=fc_out, size=4 * hidden_dim, proj_size=proj_dim)) + print(str(program)) + def test_sequence_softmax(self): program = Program() with program_guard(program): From a0669e387b1b4f5564ae51fdc561689b1d86cd04 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 28 Jan 2018 17:43:09 +0800 Subject: [PATCH 039/106] dtype2str --- paddle/framework/data_type.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h index 6a372ac32e..98eb3e857d 100644 --- a/paddle/framework/data_type.h +++ b/paddle/framework/data_type.h @@ -79,5 +79,33 @@ inline void VisitDataType(proto::DataType type, Visitor visitor) { } } +inline std::string DataTypeToString(const proto::DataType type) { + using namespace paddle::framework::proto; + switch (type) { + case DataType::FP16: + return "float16"; + case DataType::FP32: + return "float32"; + case DataType::FP64: + return "float64"; + case DataType::INT16: + return "int16"; + case DataType::INT32: + return "int32"; + case DataType::INT64: + return "int64"; + case DataType::BOOL: + return "bool"; + default: + PADDLE_THROW("Not support type %d", type); + } +} + +inline std::ostream& operator<<(std::ostream& out, + const proto::DataType& type) { + out << DataTypeToString(type); + return out; +} + } // namespace framework } // namespace paddle From 0f47703dd5db01a7510031e810f963e09a8c9c13 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 28 Jan 2018 16:41:18 +0800 Subject: [PATCH 040/106] add begin_norm_axis --- paddle/operators/layer_norm_op.cc | 47 ++++++++++++------- .../v2/fluid/tests/test_layer_norm_op.py | 28 ++++++----- 2 files changed, 47 insertions(+), 28 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 0b0c760e57..9e618d10d2 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -42,10 +42,17 @@ class LayerNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], 1); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], 1); + auto x_dim = ctx->GetInputDim("X"); + auto begin_norm_axis = ctx->Attrs().Get("begin_norm_axis"); + PADDLE_ENFORCE_LT(begin_norm_axis, x_dim.size(), + "'begin_norm_axis' must be less than the rank of X"); + + auto matrix_dim = framework::flatten_to_2d(x_dim, begin_norm_axis); + int left = static_cast(matrix_dim[0]); ctx->SetOutputDim("Y", ctx->GetInputDim("X")); - ctx->SetOutputDim("Mean", {ctx->GetInputDim("X")[0]}); - ctx->SetOutputDim("Variance", {ctx->GetInputDim("X")[0]}); + ctx->SetOutputDim("Mean", {left}); + ctx->SetOutputDim("Variance", {left}); ctx->ShareLoD("X", "Y"); } @@ -72,10 +79,14 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f, "'epsilon' should be between 0.0 and 0.001."); }); - AddAttr>("axis", - "(vector default:{1, 1, 1}), the " - "axis to normalize.") - .SetDefault({1, 2, 3}); // todo(zcd) : who to set axis + AddAttr("begin_norm_axis", + "(int default:1), the " + "axis of `begin_norm_axis ... Rank(X) - 1` will be normalized") + .SetDefault(1) + .AddCustomChecker([](const int &begin_norm_axis) { + PADDLE_ENFORCE_GT(begin_norm_axis, 0, + "'begin_norm_axis' should be greater than zero."); + }); AddComment(R"DOC( Layer Normalization. @@ -97,9 +108,7 @@ class LayerNormKernel const auto *bias = ctx.Input("Bias"); const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); - - const int N = x_dims[0]; - const int sample_size = x->numel() / N; + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); auto scale_data = scale->data()[0]; auto bias_data = bias->data()[0]; @@ -111,7 +120,9 @@ class LayerNormKernel mean->mutable_data(ctx.GetPlace()); var->mutable_data(ctx.GetPlace()); - int left = N, right = sample_size; + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); @@ -131,7 +142,8 @@ class LayerNormKernel return std::sqrt(1 / ele) * scale_data; }; auto sub_bias = [bias_data](T ele) { return bias_data - ele; }; - + // TODO(zcd): Some thinking about output_map, is it appropriate that + // `output_map` and `input_map` point to the same memory. output_map = (var_map.unaryExpr(scale_inv_std).replicate(1, right)) .cwiseProduct(input_map) + var_map.unaryExpr(scale_inv_std) @@ -198,13 +210,14 @@ class LayerNormGradKernel const auto *var = ctx.Input("Variance"); const auto *scale = ctx.Input("Scale"); const auto *d_y = ctx.Input(framework::GradVarName("Y")); + auto scale_data = scale->data()[0]; const auto &x_dims = x->dims(); - const int N = x_dims[0]; - const int sample_size = x->numel() / N; - int left = N, right = sample_size; - auto scale_data = scale->data()[0]; + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]), + right = static_cast(matrix_dim[1]); // init output auto *d_x = ctx.Output(framework::GradVarName("X")); @@ -223,11 +236,13 @@ class LayerNormGradKernel if (d_scale) { d_scale->mutable_data(ctx.GetPlace()); auto inv_std = [](T ele) { return std::sqrt(1 / ele); }; + // There are two equation to compute d_scale. One uses "Y" and the other + // does not use "Y" d_scale->data()[0] = ((x_map - mean_map.replicate(1, right)) .cwiseProduct(var_map.unaryExpr(inv_std).replicate(1, right)) .cwiseProduct(d_y_map)) - .sum(); // also can use `y` to get d_scale_map + .sum(); } if (d_x) { diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index caa3b944eb..8ce327436f 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import unittest import numpy as np @@ -33,23 +32,24 @@ def get_backward_op(scope, op, no_grad_set): return backward_op -def _reference_layer_norm_naive(x, scale, beta, epsilon): +def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): old_shape = x.shape - N = x.shape[0] - D = reduce(mul, old_shape, 1) / N + N = reduce(mul, old_shape[0:begin_norm_axis], 1) + D = reduce(mul, old_shape[begin_norm_axis:len(old_shape)], 1) x.shape = [N, D] mean = np.mean(x, axis=1) var = np.var(x, axis=1) + epsilon output = scale * np.divide((x - mean.reshape([N, 1])), (np.sqrt(var)).reshape([N, 1])) + beta output.shape = old_shape + x.shape = old_shape return output, mean, var -def _reference_layer_norm_grad(x, grad_y, scale, mean, var, epsilon): +def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): x_shape = x.shape - N = x_shape[0] - D = reduce(mul, x_shape, 1) / N + N = reduce(mul, x_shape[0:begin_norm_axis], 1) + D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) grad_y.shape = [N, D] x.shape = [N, D] mean.shape = [N, 1] @@ -140,7 +140,9 @@ class TestLayerNormdOp(OpTest): self.assertLessEqual(max_diff, max_relative_error, err_msg()) def test_forward_backward(self): - def test_with_place(place, shape): + def test_with_place(place, shape, begin_norm_axis=1): + assert begin_norm_axis > 0 and begin_norm_axis < len( + shape), 'begin_norm_axis must be between 0 and len(shape)-1.' # attr epsilon = 0.00001 x_shape = shape @@ -152,13 +154,13 @@ class TestLayerNormdOp(OpTest): # run forward y_out, saved_mean, var_ref = _reference_layer_norm_naive( - x_val, scale_val, bias_val, epsilon) + x_val, scale_val, bias_val, epsilon, begin_norm_axis) # for gradient test y_grad = np.random.random_sample(x_shape).astype(np.float32) x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, epsilon) + x_val, y_grad, scale_val, saved_mean, var_ref, begin_norm_axis) scope = core.Scope() @@ -185,7 +187,8 @@ class TestLayerNormdOp(OpTest): Mean="Mean", Variance="Variance", # attrs - epsilon=epsilon) + epsilon=epsilon, + begin_norm_axis=begin_norm_axis) layer_norm_op.run(scope, place) @@ -228,7 +231,8 @@ class TestLayerNormdOp(OpTest): places.append(core.CUDAPlace(0)) for place in places: - test_with_place(place, [2, 3, 4, 5]) + test_with_place(place, [2, 3, 4, 5], begin_norm_axis=1) + test_with_place(place, [2, 3, 4, 5], begin_norm_axis=3) if __name__ == '__main__': From e3952b9fae456c88aee758976d494467258cfea1 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 28 Jan 2018 23:31:06 +0800 Subject: [PATCH 041/106] fix unit test --- paddle/framework/op_kernel_type_test.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/framework/op_kernel_type_test.cc b/paddle/framework/op_kernel_type_test.cc index 649afeee8a..cb23bbde01 100644 --- a/paddle/framework/op_kernel_type_test.cc +++ b/paddle/framework/op_kernel_type_test.cc @@ -26,9 +26,9 @@ TEST(OpKernelType, ToString) { OpKernelType op_kernel_type(DataType::FP32, CPUPlace(), DataLayout::kNCHW, LibraryType::kCUDNN); - ASSERT_EQ( - paddle::framework::KernelTypeToString(op_kernel_type), - "data_type[5]:data_layout[NCHW]:place[CPUPlace]:library_type[CUDNN]"); + ASSERT_EQ(paddle::framework::KernelTypeToString(op_kernel_type), + "data_type[float32]:data_layout[NCHW]:place[CPUPlace]:library_type[" + "CUDNN]"); } TEST(OpKernelType, Hash) { From 4fb3c676a8f229684f0369d5e2e0d549c97565a8 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sun, 28 Jan 2018 08:50:55 -0800 Subject: [PATCH 042/106] Polish threadpool (#7918) * Polish threadpool * Add #include * Rename variables * Rename variables * clang-format --- paddle/framework/threadpool.cc | 91 ++++++++++++++++--- paddle/framework/threadpool.h | 134 ++++++++-------------------- paddle/framework/threadpool_test.cc | 6 +- 3 files changed, 118 insertions(+), 113 deletions(-) diff --git a/paddle/framework/threadpool.cc b/paddle/framework/threadpool.cc index 109a7e7dc4..b2f5ae4a96 100644 --- a/paddle/framework/threadpool.cc +++ b/paddle/framework/threadpool.cc @@ -1,24 +1,93 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "paddle/framework/threadpool.h" namespace paddle { namespace framework { -std::unique_ptr ThreadPool::threadpool(nullptr); -std::once_flag ThreadPool::init_flag; +std::unique_ptr ThreadPool::threadpool_(nullptr); +std::once_flag ThreadPool::init_flag_; + +ThreadPool* ThreadPool::GetInstance() { + std::call_once(init_flag_, &ThreadPool::Init); + return threadpool_.get(); +} + +void ThreadPool::Init() { + if (threadpool_.get() == nullptr) { + // TODO(Yancey1989): specify the max threads number + int num_threads = std::thread::hardware_concurrency(); + PADDLE_ENFORCE_GT(num_threads, 0); + threadpool_.reset(new ThreadPool(num_threads)); + } +} + +ThreadPool::ThreadPool(int num_threads) + : total_threads_(num_threads), idle_threads_(num_threads), running_(true) { + threads_.resize(num_threads); + for (auto& thread : threads_) { + // TODO(Yancey1989): binding the thread on the specify CPU number + thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); + } +} + +ThreadPool::~ThreadPool() { + { + // notify all threads to stop running + running_ = false; + scheduled_.notify_all(); + } + + for (auto& t : threads_) { + t->join(); + t.reset(nullptr); + } +} + +void ThreadPool::Wait() { + std::unique_lock lock(mutex_); + completed_.wait(lock, [=] { return Done() == true; }); +} + +void ThreadPool::TaskLoop() { + while (running_) { + std::unique_lock lock(mutex_); + scheduled_.wait(lock, [=] { return !tasks_.empty() || !running_; }); + + if (!running_) { + break; + } + // pop a task from the task queue + auto task = std::move(tasks_.front()); + tasks_.pop(); + + --idle_threads_; + lock.unlock(); + + // run the task + task(); + + { + std::unique_lock lock(mutex_); + ++idle_threads_; + if (Done()) { + completed_.notify_all(); + } + } + } +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/threadpool.h b/paddle/framework/threadpool.h index 3ac345851c..8912b1a43a 100644 --- a/paddle/framework/threadpool.h +++ b/paddle/framework/threadpool.h @@ -20,52 +20,36 @@ limitations under the License. */ #include #include #include +#include #include "paddle/platform/enforce.h" namespace paddle { namespace framework { +// ThreadPool maintains a queue of tasks, and runs them using a fixed +// number of threads. class ThreadPool { public: typedef std::packaged_task Task; - /** - * @brief Get a instance of threadpool, the thread number will - * be specified as the number of hardware thread contexts - */ - static ThreadPool* GetInstance() { - std::call_once(init_flag, &ThreadPool::Init); - return threadpool.get(); - } + // Returns the singleton of ThreadPool. + static ThreadPool* GetInstance(); - ~ThreadPool() { - { - // notify all threads to stop running - running_ = false; - scheduled_.notify_all(); - } - - for (auto& t : threads_) { - t->join(); - t.reset(nullptr); - } - } + ~ThreadPool(); - int GetNumThreads() const { return num_threads_; } + // Returns the number of threads created by the constructor. + size_t Threads() const { return total_threads_; } - int GetAvailable() { + // Returns the number of currently idle threads. + size_t IdleThreads() { std::unique_lock lock(mutex_); - return available_; + return idle_threads_; } - /** - * @brief Push a function to the queue, and will be scheduled and - * executed if a thread is available. - * @param[in] Task, will be pushed to the task queue. - * @return std::future, we could wait for the task finished by - * f.wait(). - */ + // Run pushes a function to the task queue and returns a std::future + // object. To wait for the completion of the task, call + // std::future::wait(). template std::future Run(Callback fn) { std::unique_lock lock(mutex_); @@ -77,84 +61,40 @@ class ThreadPool { return f; } - /** - * @brief Wait until all the tasks are completed. - */ - void Wait() { - std::unique_lock lock(mutex_); - completed_.wait(lock, [=] { return Done() == true; }); - } + // Wait until all the tasks are completed. + void Wait(); private: DISABLE_COPY_AND_ASSIGN(ThreadPool); - explicit ThreadPool(int num_threads) - : num_threads_(num_threads), available_(num_threads), running_(true) { - threads_.resize(num_threads); - for (auto& thread : threads_) { - // TODO(Yancey1989): binding the thread on the specify CPU number - thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); - } - } + explicit ThreadPool(int num_threads); - /** - * @brief If the task queue is empty and avaialbe - * is equal to the number of threads, means that - * all tasks are completed. - * - * Note: this function is not thread-safe. - * - * @return true if all tasks are completed. - */ - bool Done() { return tasks_.empty() && available_ == num_threads_; } - - void TaskLoop() { - while (running_) { - std::unique_lock lock(mutex_); - scheduled_.wait(lock, [=] { return !tasks_.empty() || !running_; }); - - if (!running_) { - break; - } - // pop a task from the task queue - auto task = std::move(tasks_.front()); - tasks_.pop(); - - --available_; - lock.unlock(); - - // run the task - task(); - - { - std::unique_lock lock(mutex_); - ++available_; - if (Done()) { - completed_.notify_all(); - } - } - } - } + // If the task queue is empty and avaialbe is equal to the number of + // threads, means that all tasks are completed. Note: this function + // is not thread-safe. Returns true if all tasks are completed. + // Note: don't delete the data member total_threads_ and use + // threads_.size() instead; because you'd need to lock the mutex + // before accessing threads_. + bool Done() { return tasks_.empty() && idle_threads_ == total_threads_; } - static void Init() { - if (threadpool.get() == nullptr) { - // TODO(Yancey1989): specify the max threads number - int num_threads = std::thread::hardware_concurrency(); - PADDLE_ENFORCE_GT(num_threads, 0); - threadpool.reset(new ThreadPool(num_threads)); - } - } + // The constructor starts threads to run TaskLoop, which retrieves + // and runs tasks from the queue. + void TaskLoop(); + + // Init is called by GetInstance. + static void Init(); private: - static std::unique_ptr threadpool; - static std::once_flag init_flag; + static std::unique_ptr threadpool_; + static std::once_flag init_flag_; - int num_threads_; - int available_; - bool running_; - std::queue tasks_; std::vector> threads_; + const size_t total_threads_; + size_t idle_threads_; + + std::queue tasks_; std::mutex mutex_; + bool running_; std::condition_variable scheduled_; std::condition_variable completed_; }; diff --git a/paddle/framework/threadpool_test.cc b/paddle/framework/threadpool_test.cc index 50b6238cd8..3fbfe7efc8 100644 --- a/paddle/framework/threadpool_test.cc +++ b/paddle/framework/threadpool_test.cc @@ -22,11 +22,7 @@ namespace framework = paddle::framework; void do_sum(framework::ThreadPool* pool, std::atomic& sum, int cnt) { std::vector> fs; for (int i = 0; i < cnt; ++i) { - auto f = pool->Run([&sum]() { sum.fetch_add(1); }); - fs.push_back(std::move(f)); - } - for (auto& f : fs) { - f.wait(); + fs.push_back(framework::Async([&sum]() { sum.fetch_add(1); })); } } From 59357f4fb94d4589b9b51a33d1f3febb00653779 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Mon, 29 Jan 2018 07:15:44 +0800 Subject: [PATCH 043/106] fix floor_op (#7926) --- paddle/operators/activation_op.h | 2 +- python/paddle/v2/fluid/tests/test_activation_op.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 88c3d1c597..c0809abc05 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -323,7 +323,7 @@ template struct FloorFunctor : public BaseActivationFunctor { template void operator()(Device d, X x, Out out) const { - out.device(d) = x.ceil(); + out.device(d) = x.floor(); } }; diff --git a/python/paddle/v2/fluid/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/test_activation_op.py index 18605e6065..1de5d446b8 100644 --- a/python/paddle/v2/fluid/tests/test_activation_op.py +++ b/python/paddle/v2/fluid/tests/test_activation_op.py @@ -186,8 +186,7 @@ class TestFloor(OpTest): self.op_type = "floor" x = np.random.uniform(-1, 1, [4, 4]).astype("float32") self.inputs = {'X': x} - # numpy floor need +1 - self.outputs = {'Out': np.floor(self.inputs['X']) + 1.0} + self.outputs = {'Out': np.floor(self.inputs['X'])} def test_check_output(self): self.check_output() From e27a030072d23701aeda91cc6c1b76c84ca361d2 Mon Sep 17 00:00:00 2001 From: guosheng Date: Wed, 24 Jan 2018 19:17:49 +0800 Subject: [PATCH 044/106] Add weight normalization --- python/paddle/v2/fluid/layer_helper.py | 186 +++++++++++++++++- python/paddle/v2/fluid/param_attr.py | 17 ++ .../fluid/tests/test_weight_normalization.py | 122 ++++++++++++ 3 files changed, 321 insertions(+), 4 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/test_weight_normalization.py diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index 0b0064ade9..368cf8ed2e 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -18,7 +18,7 @@ import itertools from framework import Variable, Parameter, default_main_program, default_startup_program, \ unique_name, dtype_is_floating from paddle.v2.fluid.initializer import Constant, Xavier -from param_attr import ParamAttr +from param_attr import ParamAttr, WeightNormParamAttr class LayerHelper(object): @@ -103,6 +103,177 @@ class LayerHelper(object): raise ValueError("Data Type mismatch") return dtype + def _create_weight_normalize(self, attr, shape, dtype): + from .layers import elementwise_mul, elementwise_div, reshape + + # Remove these ops when LayerHelper and layers support indicating + # program and block. + def __norm_op(x, + out=None, + p=2, + dim=None, + keep_dim=False, + block=self.startup_program.global_block()): + if out is None: + out = block.create_var( + name=unique_name(".".join([self.name, 'weight_norm_norm'])), + dtype=dtype, + persistable=False) + abs_out = block.create_var( + name=unique_name(".".join([self.name, 'weight_norm_abs'])), + dtype=dtype, + persistable=False) + block.append_op( + type='abs', inputs={'X': x}, outputs={'Out': abs_out}) + pow_out = block.create_var( + name=unique_name(".".join([self.name, 'weight_norm_pow'])), + dtype=dtype, + persistable=False) + block.append_op( + type='pow', + inputs={'X': abs_out}, + outputs={'Out': pow_out}, + attrs={'factor': float(p)}) + sum_out = block.create_var( + name=unique_name(".".join([self.name, 'weight_norm_sum'])), + dtype=dtype, + persistable=False) + block.append_op( + type='reduce_sum', + inputs={'X': pow_out}, + outputs={'Out': sum_out}, + attrs={ + 'dim': dim, + 'keep_dim': keep_dim, + 'reduce_all': True if dim is None else False + }) + block.append_op( + type='pow', + inputs={'X': sum_out}, + outputs={'Out': out}, + attrs={'factor': 1. / p}) + return out + + def __reshape_op(x, + shape, + out=None, + block=self.startup_program.global_block()): + if out is None: + out = block.create_var( + name=unique_name(".".join( + [self.name, 'weight_norm_reshape'])), + dtype=dtype, + persistable=False) + block.append_op( + type='reshape', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'shape': shape}) + return out + + def __transpose_op(x, + axis, + out=None, + block=self.startup_program.global_block()): + if out is None: + out = block.create_var( + name=unique_name(".".join( + [self.name, 'weight_norm_transpose'])), + dtype=dtype, + persistable=False) + block.append_op( + type='transpose', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'axis': axis}) + return out + + def __norm_except_dim(x, + out=None, + dim=None, + block=self.startup_program.global_block()): + """Computes the norm over all dimensions except dim""" + if out is None: + out = block.create_var( + name=unique_name(".".join([self.name, 'weight_norm_norm'])), + dtype=dtype, + persistable=False) + if dim is None: + __norm_op(x, out, dim=dim, block=block) + elif dim == 0: + out_shape = [x.shape[0]] + [1] * (len(x.shape) - 1) + reshape = __reshape_op(x, shape=[x.shape[0], -1], block=block) + norm = __norm_op(reshape, dim=1, block=block) + __reshape_op(norm, out=out, shape=out_shape, block=block) + elif dim == len(x.shape) - 1: + out_shape = [1] * (len(x.shape) - 1) + [x.shape[-1]] + reshape = __reshape_op(x, shape=[-1, x.shape[-1]], block=block) + norm = __norm_op(reshape, dim=0, block=block) + __reshape_op(norm, out=out, shape=out_shape, block=block) + else: + perm = range(len(x.shape)) + perm[0], perm[dim] = dim, 0 + transpose = __transpose_op(x, perm, block=block) + norm = __norm_op(transpose, dim=0, block=block) + __transpose_op(norm, perm, out=out, block=block) + return out + + def __weight_normalize(g, v, dim): + """Calculations for weight normalization""" + norm = __norm_except_dim( + v, dim=dim, block=self.main_program.current_block()) + scale = elementwise_div( + x=g, y=norm) # The shapes of g and norm are the same. + # Currently, elementwise_mul only support broadcast when the shape + # of y is a subset of x. Thus, we should reshape y to squeeze to + # achive it. + w = elementwise_mul( + x=v, + y=scale if dim is None else reshape( + x=scale, shape=[v.shape[dim]]), + axis=-1 if dim is None else dim) + # To serialize the original parameter for inference, maybe a + # parameter rather than a variable should be returned. + return w + + g_param_attr = copy.deepcopy(attr) + g_param_attr.name = attr.name + '_g' + g_param_shape = [1] * len(shape) + if attr.dim is not None: + g_param_shape[attr.dim] = shape[attr.dim] + v_param_attr = copy.deepcopy(attr) + v_param_attr.name = attr.name + '_v' + v_param_shape = shape + + # Add to startup_program to initialize g and v. + # Try to reconstruct the initializer of w by initializing g and v. + # Set the initializers of g and v as below, then the distribution + # of w is the same as initializing w with the given initializer. + # For Data-Dependent Initialization, please compute the init-values + # of g and v in external and then feed the values to g and v by + # executing an extra program. + g_param = self.startup_program.global_block().create_parameter( + dtype=dtype, + shape=g_param_shape, + **g_param_attr.to_kwargs(with_initializer=False)) + v_param = self.startup_program.global_block().create_parameter( + dtype=dtype, + shape=v_param_shape, + **v_param_attr.to_kwargs(with_initializer=True)) + __norm_except_dim( + x=v_param, + out=g_param, + dim=attr.dim, + block=self.startup_program.global_block()) + + # Add weight normalization to main_program + g_param = self.main_program.global_block().create_parameter( + dtype=dtype, shape=g_param_shape, **g_param_attr.to_kwargs()) + v_param = self.main_program.global_block().create_parameter( + dtype=dtype, shape=v_param_shape, **v_param_attr.to_kwargs()) + w_param = __weight_normalize(g_param, v_param, dim=attr.dim) + return w_param + def create_parameter(self, attr, shape, @@ -112,16 +283,23 @@ class LayerHelper(object): # Deepcopy the attr so that parameters can be shared in program assert isinstance(attr, ParamAttr) suffix = 'b' if is_bias else 'w' + if attr.name is None: + attr.name = unique_name(".".join([self.name, suffix])) - if default_initializer is None: + if default_initializer is None and attr.initializer is None: if is_bias: attr.set_default_bias_initializer() else: attr.set_default_param_initializer() else: attr.set_default_initializer(default_initializer) - if attr.name is None: - attr.name = unique_name(".".join([self.name, suffix])) + + # If weight normalization is set, insert extra parameters and ops. + # Refer to https://arxiv.org/pdf/1602.07868.pdf + if isinstance(attr, WeightNormParamAttr): + param = self._create_weight_normalize(attr, shape, dtype) + WeightNormParamAttr.params_with_weight_norm.append(param) + return param self.startup_program.global_block().create_parameter( dtype=dtype, shape=shape, **attr.to_kwargs(with_initializer=True)) diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/v2/fluid/param_attr.py index dcca8b6c54..1218e71ca1 100644 --- a/python/paddle/v2/fluid/param_attr.py +++ b/python/paddle/v2/fluid/param_attr.py @@ -82,3 +82,20 @@ class ParamAttr(object): if with_initializer: kwargs['initializer'] = self.initializer return kwargs + + +class WeightNormParamAttr(ParamAttr): + """ + Used for weight normalization. Any field in ParamAttr can also be set here. + Besides, an extra field dim can be set to indicate the dimension except + which to normalize. + """ + # List to record the parameters reparameterized by weight normalization. + # If these parameters are treated as Variable rather than Parameter, + # it can be used to discriminate these parameters and help to serialize + # these paramters for inference. + params_with_weight_norm = [] + + def __init__(self, dim=None, **kwargs): + super(WeightNormParamAttr, self).__init__(**kwargs) + self.dim = dim diff --git a/python/paddle/v2/fluid/tests/test_weight_normalization.py b/python/paddle/v2/fluid/tests/test_weight_normalization.py new file mode 100644 index 0000000000..200b5b9dc0 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_weight_normalization.py @@ -0,0 +1,122 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy +import collections +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +from paddle.v2.fluid.initializer import ConstantInitializer +from paddle.v2.fluid.param_attr import WeightNormParamAttr + + +class TestWeightNormalization(unittest.TestCase): + batch_size = 3 + hidden_size = 5 + data_desc = (['x', [10], 0], ) + + @classmethod + def setUpClass(cls): + cls.set_program() + + @classmethod + def set_program(cls): + data = fluid.layers.data( + name=cls.data_desc[0][0], shape=cls.data_desc[0][1]) + out = fluid.layers.fc(input=data, + size=cls.hidden_size, + param_attr=WeightNormParamAttr( + dim=None, + name='weight_norm_param', + initializer=ConstantInitializer(1.0)), + bias_attr=False, + act=None) + loss = fluid.layers.reduce_sum(out) + fluid.backward.append_backward(loss=loss) + cls.fetch_list = [ + 'weight_norm_param_g', 'weight_norm_param_v', + 'weight_norm_param_g@GRAD' + ] + + def run_program(self): + outputs = [] + places = [core.CPUPlace()] + if core.is_compile_gpu(): + places.append(core.CUDAPlace(0)) + for place in places: + self.set_inputs(place) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + output = exe.run(fluid.default_main_program(), + feed=self.inputs, + fetch_list=self.fetch_list, + return_numpy=False) + outputs.append(output) + self.actual_outputs = outputs + + def set_data(self): + self.data = collections.OrderedDict() + for desc in self.data_desc: + data_name = desc[0] + data_shape = desc[1] + data_lod_level = desc[2] + data_lod = [] + for i in range(data_lod_level): + lod_level_i = numpy.random.randint( + low=1, + high=5, + size=self.batch_size if i == 0 else lod_level_i[-1]) + lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist() + data_lod.append(lod_level_i) + data_value = numpy.random.random( + size=[data_lod[-1][-1] if data_lod else self.batch_size + ] + data_shape).astype('float32') + self.data[data_name] = (data_value, data_lod) + + def set_inputs(self, place): + self.inputs = {} + for desc in self.data_desc: + tensor = fluid.Tensor() + tensor.set(self.data[desc[0]][0], place) + if self.data[desc[0]][1]: + tensor.set_lod(self.data[desc[0]][1]) + self.inputs[desc[0]] = tensor + + def weight_normalize(self): + v = numpy.ones((self.data[self.data_desc[0][0]][0].shape[-1], + self.hidden_size)) + g = numpy.linalg.norm(v, axis=None, keepdims=True) + w = g * v / numpy.linalg.norm(v, axis=None, keepdims=True) + x = self.data[self.data_desc[0][0]][0] + out = numpy.dot(x, w) + g_grad = (numpy.dot(x.T, numpy.ones_like(out)) * (v / numpy.linalg.norm( + v, axis=None, keepdims=True))).sum(axis=None, keepdims=True) + return g, v, g_grad + + def test_weight_normalization(self): + self.set_data() + self.run_program() + expect_output = self.weight_normalize() + for actual_output in self.actual_outputs: + [ + self.assertTrue( + numpy.allclose( + numpy.array(actual_output), expect_output, atol=0.001)) + for expect_output, actual_output in zip(expect_output, + actual_output) + ] + + +if __name__ == '__main__': + unittest.main() From 0065548cd312c643d42f2bfae59348cd5f471811 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Sun, 28 Jan 2018 18:29:44 -0800 Subject: [PATCH 045/106] Update copyright for notest_dist_image_classification (#7898) Update copyright for notest_dist_image_classification --- .../notest_dist_image_classification.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py index 0c51ccf306..298ecfc386 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -1,16 +1,16 @@ -#Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import print_function From 6b9f1d343eac40d71b5275071a9e47a04546a356 Mon Sep 17 00:00:00 2001 From: guosheng Date: Mon, 29 Jan 2018 10:33:41 +0800 Subject: [PATCH 046/106] Make weight normalization adapt to the up-to-date code --- python/paddle/v2/fluid/layer_helper.py | 4 ++-- python/paddle/v2/fluid/param_attr.py | 7 +++++-- python/paddle/v2/fluid/tests/test_weight_normalization.py | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index cc4822735a..2119ca12c8 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -226,8 +226,8 @@ class LayerHelper(object): scale = elementwise_div( x=g, y=norm) # The shapes of g and norm are the same. # Currently, elementwise_mul only support broadcast when the shape - # of y is a subset of x. Thus, we should reshape y to squeeze to - # achive it. + # of y is a subset of the shape of x. Thus, we reshape y to squeeze + # to achive the subset. w = elementwise_mul( x=v, y=scale if dim is None else reshape( diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/v2/fluid/param_attr.py index 1218e71ca1..fc566b8a24 100644 --- a/python/paddle/v2/fluid/param_attr.py +++ b/python/paddle/v2/fluid/param_attr.py @@ -15,7 +15,10 @@ from initializer import Initializer, Xavier, Constant from regularizer import WeightDecayRegularizer -__all__ = ['ParamAttr'] +__all__ = [ + 'ParamAttr', + 'WeightNormParamAttr', +] class ParamAttr(object): @@ -92,7 +95,7 @@ class WeightNormParamAttr(ParamAttr): """ # List to record the parameters reparameterized by weight normalization. # If these parameters are treated as Variable rather than Parameter, - # it can be used to discriminate these parameters and help to serialize + # it can be used to discriminate these parameters and help to serialize # these paramters for inference. params_with_weight_norm = [] diff --git a/python/paddle/v2/fluid/tests/test_weight_normalization.py b/python/paddle/v2/fluid/tests/test_weight_normalization.py index 200b5b9dc0..1ac54ea305 100644 --- a/python/paddle/v2/fluid/tests/test_weight_normalization.py +++ b/python/paddle/v2/fluid/tests/test_weight_normalization.py @@ -52,7 +52,7 @@ class TestWeightNormalization(unittest.TestCase): def run_program(self): outputs = [] places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.set_inputs(place) From 0fbfd2dc7024badf2bbfc3ebc23bf13c50dca56a Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Sun, 28 Jan 2018 20:03:10 -0800 Subject: [PATCH 047/106] Simplify the symbol description --- python/paddle/v2/fluid/layers/nn.py | 51 ++++++++++++++++------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index c23cd733f1..d11dccfd22 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -435,25 +435,28 @@ def dynamic_lstmp(input, r_t & = \overline{act_h}(W_{rh}h_t) - where the :math:`W` terms denote weight matrices (e.g. :math:`W_{xi}` is - the matrix of weights from the input gate to the input), :math:`W_{ic}`, - :math:`W_{fc}`, :math:`W_{oc}` are diagonal weight matrices for peephole - connections. In our implementation, we use vectors to reprenset these - diagonal weight matrices. The :math:`b` terms denote bias vectors - (:math:`b_i` is the input gate bias vector), :math:`\sigma` is the - activation, such as logistic sigmoid function, and :math:`i, f, o` and - :math:`c` are the input gate, forget gate, output gate, and cell activation - vectors, respectively, all of which have the same size as the cell output - activation vector :math:`h`. Here :math:`h` is usually called the hidden - state and :math:`r` denotes its recurrent projection. And - :math:`\\tilde{c_t}` is also called the candidate hidden state, whose - computation is based on the current input and previous hidden state. - - The :math:`\odot` is the element-wise product of the vectors. :math:`act_g` - and :math:`act_h` are the cell input and cell output activation functions - and `tanh` is usually used for them. :math:`\overline{act_h}` is the - activation function for the projection output, usually using `identity` or - same as :math:`act_h`. + In the above formula: + + * :math:`W`: Denotes weight matrices (e.g. :math:`W_{xi}` is \ + the matrix of weights from the input gate to the input). + * :math:`W_{ic}`, :math:`W_{fc}`, :math:`W_{oc}`: Diagonal weight \ + matrices for peephole connections. In our implementation, \ + we use vectors to reprenset these diagonal weight matrices. + * :math:`b`: Denotes bias vectors (e.g. :math:`b_i` is the input gate \ + bias vector). + * :math:`\sigma`: The activation, such as logistic sigmoid function. + * :math:`i, f, o` and :math:`c`: The input gate, forget gate, output \ + gate, and cell activation vectors, respectively, all of which have \ + the same size as the cell output activation vector :math:`h`. + * :math:`h`: The hidden state. + * :math:`r`: The recurrent projection of the hidden state. + * :math:`\\tilde{c_t}`: The candidate hidden state, whose \ + computation is based on the current input and previous hidden state. + * :math:`\odot`: The element-wise product of the vectors. + * :math:`act_g` and :math:`act_h`: The cell input and cell output \ + activation functions and `tanh` is usually used for them. + * :math:`\overline{act_h}`: The activation function for the projection \ + output, usually using `identity` or same as :math:`act_h`. Set `use_peepholes` to `False` to disable peephole connection. The formula is omitted here, please refer to the paper @@ -519,12 +522,16 @@ def dynamic_lstmp(input, Examples: .. code-block:: python - hidden_dim = 512 - proj_dim = 256 + hidden_dim, proj_dim = 512, 256 fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4, act=None, bias_attr=None) proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, - size=hidden_dim * 4, proj_size=proj_dim, use_peepholes=False) + size=hidden_dim * 4, + proj_size=proj_dim, + use_peepholes=False, + is_reverse=True, + cell_activation="tanh", + proj_activation="tanh") """ helper = LayerHelper('lstmp', **locals()) From cce18c9fe6ae763c3cd93038d09a390a5a0ac99e Mon Sep 17 00:00:00 2001 From: gongweibao Date: Mon, 29 Jan 2018 12:50:56 +0800 Subject: [PATCH 048/106] Fix typo (#7933) * fix typo * fix typo --- doc/design/support_new_device.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/design/support_new_device.md b/doc/design/support_new_device.md index b154f01d32..8983df9004 100644 --- a/doc/design/support_new_device.md +++ b/doc/design/support_new_device.md @@ -174,7 +174,7 @@ class MaxOutFunctor { }; ``` -CPU implemention is in .cc file +CPU implementation is in .cc file ``` template @@ -188,7 +188,7 @@ class MaxOutFunctor { }; ``` -CUDA implemention is in .cu file +CUDA implementation is in .cu file ``` template @@ -203,9 +203,9 @@ class MaxOutFunctor { ``` -We first obtain the computing handle from a concrete DeviceContext, and then compute on tensors. +We first obtain the computing handle from a concrete DeviceContext and then compute on tensors. -The implemention of `OpKernel` is similar to math functors, the extra thing we need to do is to register the OpKernel in a global map. +The implementation of `OpKernel` is similar to math functors, the extra thing we need to do is to register the OpKernel in a global map. Fluid provides different register interfaces in op_registry.h From 52e17bf50c2e89e3a29136e4679d312d9c1aa995 Mon Sep 17 00:00:00 2001 From: guosheng Date: Mon, 29 Jan 2018 13:56:36 +0800 Subject: [PATCH 049/106] Fix the unit test of weight normalization --- paddle/operators/reduce_op.cc | 9 ++++++--- .../paddle/v2/fluid/tests/test_weight_normalization.py | 5 ++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 4a06babeda..84f24a9095 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/reduce_op.h" -#include "paddle/operators/net_op.h" namespace paddle { namespace operators { @@ -38,10 +37,14 @@ class ReduceOp : public framework::OperatorWithKernel { dim, x_rank, "The dim should be in the range [-rank(input), rank(input))."); bool reduce_all = ctx->Attrs().Get("reduce_all"); + bool keep_dim = ctx->Attrs().Get("keep_dim"); if (reduce_all) { - ctx->SetOutputDim("Out", {1}); + if (keep_dim) + ctx->SetOutputDim( + "Out", framework::make_ddim(std::vector(x_rank, 1))); + else + ctx->SetOutputDim("Out", {1}); } else { - bool keep_dim = ctx->Attrs().Get("keep_dim"); auto dims_vector = vectorize(x_dims); if (keep_dim || x_rank == 1) { dims_vector[dim] = 1; diff --git a/python/paddle/v2/fluid/tests/test_weight_normalization.py b/python/paddle/v2/fluid/tests/test_weight_normalization.py index 1ac54ea305..80ad8285d8 100644 --- a/python/paddle/v2/fluid/tests/test_weight_normalization.py +++ b/python/paddle/v2/fluid/tests/test_weight_normalization.py @@ -112,9 +112,8 @@ class TestWeightNormalization(unittest.TestCase): [ self.assertTrue( numpy.allclose( - numpy.array(actual_output), expect_output, atol=0.001)) - for expect_output, actual_output in zip(expect_output, - actual_output) + numpy.array(actual), expect, atol=0.001)) + for expect, actual in zip(expect_output, actual_output) ] From 82d924984f75cecce2626ecb7376f2424b50aaae Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 29 Jan 2018 14:45:10 +0800 Subject: [PATCH 050/106] update dist transpiler --- python/paddle/v2/fluid/distribute_transpiler.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index abcad899bf..4e54ab806b 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -221,7 +221,7 @@ class DistributeTranspiler: if len(splited_vars) <= 1: continue orig_var = program.global_block().vars[varname] - if orig_var == core.VarDesc.VarType.SELECTED_ROWS: + if orig_var.type == core.VarDesc.VarType.SELECTED_ROWS: height_sections = [] for v in splited_vars: height_sections.append(v.shape[0]) @@ -230,7 +230,7 @@ class DistributeTranspiler: inputs={"X": orig_var}, outputs={"Out": splited_vars}, attrs={"height_sections": height_sections}) - elif orig_var == core.VarDesc.VarType.LOD_TENSOR: + elif orig_var.type == core.VarDesc.VarType.LOD_TENSOR: sections = [] for v in splited_vars: sections.append(v.shape[0]) @@ -470,8 +470,7 @@ class DistributeTranspiler: # Append the recv op pserver_program.global_block().append_op( type="recv", - inputs={"RX": self.param_grad_ep_mapping[endpoint]["grads"] - }, # grads to recv + inputs={}, outputs={}, attrs={ "OptimizeBlock": optimize_sub_program.global_block(), From d082f3a911721231ac321ccbafa571ae8a44af7f Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sun, 28 Jan 2018 23:29:53 -0800 Subject: [PATCH 051/106] Rewrite class Channel to implement buffered and unbuffered channels (#7915) * Remove IsBounded as buffered channels have to be bounded * Add derived classes Buffered and UnBuffered" * Implement buffered and unbuffered channels * Correct the syntax of Channel::Receive * clang-format * clang-format 3.8 * clang 3.8 --- paddle/framework/CMakeLists.txt | 2 + paddle/framework/channel.h | 91 +++++++------------ paddle/framework/channel_test.cc | 26 ++++++ paddle/framework/details/buffered_channel.h | 82 +++++++++++++++++ paddle/framework/details/unbuffered_channel.h | 52 +++++++++++ 5 files changed, 196 insertions(+), 57 deletions(-) create mode 100644 paddle/framework/channel_test.cc create mode 100644 paddle/framework/details/buffered_channel.h create mode 100644 paddle/framework/details/unbuffered_channel.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 2804969842..318661af8b 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -98,3 +98,5 @@ if(NOT WITH_C_API AND WITH_FLUID) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/framework.pb.h DESTINATION include/paddle/framework) install(FILES details/cow_ptr.h details/op_registry.h DESTINATION include/paddle/framework/details) endif() + +cc_test(channel_test SRCS channel_test.cc) diff --git a/paddle/framework/channel.h b/paddle/framework/channel.h index 9ba0fc5c55..70ecccc1a1 100644 --- a/paddle/framework/channel.h +++ b/paddle/framework/channel.h @@ -13,75 +13,52 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include -#include -#include + +#include // for size_t namespace paddle { namespace framework { +// Channel is the abstract class of buffered and un-buffered channels. template class Channel { public: - explicit Channel(std::size_t capacity) : capacity_(capacity) {} - - void Send(T* channel_element) { - std::unique_lock lock(mu_); - - if (IsBounded()) { - full_cond_var_.wait(lock, [this]() { - bool capacity_valid = capacity_ > 0 ? !IsCapacityFull() : true; - return capacity_valid; - }); - } - channel_.push_back(std::move(*channel_element)); - - lock.unlock(); - empty_cond_var_.notify_one(); - } + virtual void Send(T*) = 0; + virtual void Receive(T*) = 0; + virtual size_t Cap() = 0; - T* Receive() { - std::unique_lock lock(mu_); - empty_cond_var_.wait(lock, [this]() { return !channel_.empty(); }); - - T* channel_element = std::move(channel_.front()); - channel_.pop_front(); - - NotifyAllSenders(&lock); - return channel_element; - } - - size_t Size() { - std::unique_lock lock(mu_); - return channel_.size(); - } + // Don't delete channels; instead, call Channel::Close. + protected: + virtual ~Channel() {} +}; - void Clear() { - std::unique_lock lock(mu_); - channel_.clear(); +// Forward declaration of channel implementations. +namespace details { +template +class Buffered; +template +class UnBuffered; +} // namespace details - NotifyAllSenders(&lock); +template +Channel* MakeChannel(size_t buffer_size) { + if (buffer_size > 0) { + return new details::Buffered(buffer_size); } + return new details::UnBuffered(); +} - private: - std::size_t capacity_; - std::mutex mu_; - std::condition_variable empty_cond_var_; - std::condition_variable full_cond_var_; - std::deque channel_; - - private: - void NotifyAllSenders(std::unique_lock* lock) { - if (IsBounded()) { - lock->unlock(); - full_cond_var_.notify_one(); - } +template +void CloseChannel(Channel* ch) { + if (ch->Cap() > 0) { + delete dynamic_cast*>(ch); + } else { + delete dynamic_cast*>(ch); } +} - bool IsBounded() const { return capacity_ > 0; } - - bool IsCapacityFull() const { return channel_.size() >= capacity_; } -}; - -} // namespace operator +} // namespace framework } // namespace paddle + +#include "paddle/framework/details/buffered_channel.h" +#include "paddle/framework/details/unbuffered_channel.h" diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc new file mode 100644 index 0000000000..9efc017265 --- /dev/null +++ b/paddle/framework/channel_test.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/channel.h" + +#include "gtest/gtest.h" + +TEST(Channel, MakeAndClose) { + using paddle::framework::Channel; + using paddle::framework::MakeChannel; + using paddle::framework::CloseChannel; + + Channel* ch = MakeChannel(10); + CloseChannel(ch); +} diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h new file mode 100644 index 0000000000..572e29d44a --- /dev/null +++ b/paddle/framework/details/buffered_channel.h @@ -0,0 +1,82 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include + +#include "paddle/framework/channel.h" + +namespace paddle { +namespace framework { +namespace details { + +template +class Buffered : public paddle::framework::Channel { + friend Channel* paddle::framework::MakeChannel(size_t); + friend void paddle::framework::CloseChannel(Channel*); + + public: + virtual void Send(T*); + virtual void Receive(T*); + virtual size_t Cap() { return cap_; } + + private: + size_t cap_; + std::mutex mu_; + std::condition_variable empty_cond_var_; + std::condition_variable full_cond_var_; + std::deque channel_; + + Buffered(size_t cap) : cap_(cap) {} + virtual ~Buffered(); + + void NotifyAllSenders(std::unique_lock*); +}; + +template +void Buffered::Send(T* item) { + std::unique_lock lock(mu_); + full_cond_var_.wait(lock, [this]() { return channel_.size() < cap_; }); + channel_.push_back(std::move(*item)); + lock.unlock(); + empty_cond_var_.notify_one(); +} + +template +void Buffered::Receive(T* item) { + std::unique_lock lock(mu_); + empty_cond_var_.wait(lock, [this]() { return !channel_.empty(); }); + *item = std::move(channel_.front()); + channel_.pop_front(); + NotifyAllSenders(&lock); +} + +template +Buffered::~Buffered() { + std::unique_lock lock(mu_); + channel_.clear(); + NotifyAllSenders(&lock); +} + +template +void Buffered::NotifyAllSenders(std::unique_lock* lock) { + lock->unlock(); + full_cond_var_.notify_one(); +} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h new file mode 100644 index 0000000000..7ecced1fba --- /dev/null +++ b/paddle/framework/details/unbuffered_channel.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include + +#include "paddle/framework/channel.h" + +namespace paddle { +namespace framework { +namespace details { + +template +class UnBuffered : public paddle::framework::Channel { + friend Channel* paddle::framework::MakeChannel(size_t); + friend void paddle::framework::CloseChannel(Channel*); + + public: + virtual void Send(T*); + virtual void Receive(T*); + virtual size_t Cap() { return 0; } + + private: + UnBuffered() {} + virtual ~UnBuffered(); +}; + +template +void UnBuffered::Send(T* channel_element) {} + +template +void UnBuffered::Receive(T*) {} + +template +UnBuffered::~UnBuffered() {} + +} // namespace details +} // namespace framework +} // namespace paddle From 5a3b06bef7d02366c5fc1055c5d5b6fe1a241c75 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 29 Jan 2018 16:05:25 +0800 Subject: [PATCH 052/106] update debug string --- python/paddle/v2/fluid/clip.py | 24 +++++++++ python/paddle/v2/fluid/framework.py | 71 ++++++++++++++++++++++++--- python/paddle/v2/fluid/regularizer.py | 11 +++++ 3 files changed, 98 insertions(+), 8 deletions(-) diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/v2/fluid/clip.py index 3028029e60..fcf070fd3d 100644 --- a/python/paddle/v2/fluid/clip.py +++ b/python/paddle/v2/fluid/clip.py @@ -30,6 +30,9 @@ __all__ = [ class BaseErrorClipAttr(object): + def __str__(self): + raise NotImplementedError() + def append_clip_op(self, block, grad_name): raise NotImplementedError() @@ -44,6 +47,9 @@ class ErrorClipByValue(BaseErrorClipAttr): self.max = max self.min = min + def __str__(self): + return "ByValue, min=%f, max=%f" % (self.min, self.max) + def append_clip_op(self, block, grad_name): clip_op_desc = block.desc.append_op() clip_op_desc.set_type("clip") @@ -71,6 +77,9 @@ def error_clip_callback(block, context): class BaseGradientClipAttr(object): + def __str__(self): + raise NotImplementedError() + def process_context(self, context, param, grad): raise NotImplementedError() @@ -79,6 +88,9 @@ class BaseGradientClipAttr(object): class NullGradientClipAttr(BaseGradientClipAttr): + def __str__(self): + return "Null" + def process_context(self, context, param, grad): pass @@ -96,6 +108,9 @@ class GradientClipByValue(BaseGradientClipAttr): self.max = max self.min = min + def __str__(self): + return "ByValue, min=%f, max=%f" % (self.min, self.max) + def process_context(self, context, param, grad): pass @@ -108,6 +123,9 @@ class GradientClipByNorm(BaseGradientClipAttr): def __init__(self, clip_norm): self.clip_norm = clip_norm + def __str__(self): + return "ByNorm, clip_norm=%f" % self.clip_norm + def process_context(self, context, param, grad): pass @@ -124,6 +142,10 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): self.clip_norm = clip_norm self.group_name = group_name + def __str__(self): + return "ByGlobalNorm, group_name=%s, clip_norm=%f" % (self.group_name, + self.clip_norm) + def process_context(self, context, param, grad): if self.group_name not in context: context[self.group_name] = [] @@ -199,3 +221,5 @@ def append_gradient_clip_ops(param_grad): ClipByValue = GradientClipByValue +ClipByNorm = GradientClipByNorm +ClipByGlobalNorm = GradientClipByGlobalNorm diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 4d8343e7de..ff9bc7239c 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -629,10 +629,34 @@ class Block(object): def __str__(self): return self.to_string(True) - def to_string(self, throw_on_error): - protostr = self.desc.serialize_to_string() - proto = framework_pb2.BlockDesc.FromString(str(protostr)) - return _debug_string_(proto, throw_on_error) + def to_string(self, throw_on_error, with_details=False): + """ + To debug string. + Args: + throw_on_error(bool): raise exception when self is not initialized + when throw_on_error is True + with_details(bool): more details about paramters(e.g. trainable, optimize_attr, ...) will be printed when with_details is True + + Returns(str): The debug string. + + """ + assert isinstance(throw_on_error, bool) and isinstance(with_details, + bool) + if with_details: + res_str = "blocks {\n idx: %d\n parent_idx: %d" % ( + self.idx, self.parent_idx) + for var in self.vars.itervalues(): + res_str += "\n vars {\n %s }" % var.to_string( + throw_on_error).replace("\n", "\n ") + for op in self.ops: + res_str += "\n ops {\n %s }" % op.to_string( + throw_on_error).replace("\n", "\n ") + res_str += "\n}" + else: + protostr = self.desc.serialize_to_string() + proto = framework_pb2.BlockDesc.FromString(str(protostr)) + res_str = _debug_string_(proto, throw_on_error) + return res_str __repr__ = __str__ @@ -796,10 +820,28 @@ class Program(object): def __str__(self): return self.to_string(True) - def to_string(self, throw_on_error): - protostr = self.desc.serialize_to_string() - proto = framework_pb2.ProgramDesc.FromString(str(protostr)) - return _debug_string_(proto, throw_on_error) + def to_string(self, throw_on_error, with_details=False): + """ + To debug string. + Args: + throw_on_error(bool): raise exception when self is not initialized + when throw_on_error is True + with_details(bool): more details about paramters(e.g. trainable, optimize_attr, ...) will be printed when with_details is True + + Returns(str): The debug string. + + """ + assert isinstance(throw_on_error, bool) and isinstance(with_details, + bool) + if with_details: + res_str = "" + for block in self.blocks: + res_str += block.to_string(throw_on_error, with_details) + else: + protostr = self.desc.serialize_to_string() + proto = framework_pb2.ProgramDesc.FromString(str(protostr)) + res_str = _debug_string_(proto, throw_on_error) + return res_str def get_desc(self): return self.desc @@ -950,6 +992,19 @@ class Parameter(Variable): self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None) + def __str__(self): + return self.to_string(True) + + def to_string(self, throw_on_error): + res_str = Variable.to_string(self, throw_on_error) + additional_attr = ("trainable", "optimize_attr", "regularizer", + "gradient_clip_attr") + for attr_name in additional_attr: + res_str += "%s: %s\n" % (attr_name, str(getattr(self, attr_name))) + return res_str + + __repr__ = __str__ + # program is a global instance. _main_program_ = Program() diff --git a/python/paddle/v2/fluid/regularizer.py b/python/paddle/v2/fluid/regularizer.py index c2f28eecfd..0273da647a 100644 --- a/python/paddle/v2/fluid/regularizer.py +++ b/python/paddle/v2/fluid/regularizer.py @@ -87,6 +87,11 @@ class WeightDecayRegularizer(object): """ raise NotImplementedError() + def __str__(self): + """Debug string + """ + raise NotImplementedError() + class L2DecayRegularizer(WeightDecayRegularizer): """Implements the L2 Weight Decay Regularization @@ -123,6 +128,9 @@ class L2DecayRegularizer(WeightDecayRegularizer): return decay + def __str__(self): + return "L2Decay, regularization_coeff=%f" % self._regularization_coeff + class L1DecayRegularizer(WeightDecayRegularizer): """Implements the L1 Weight Decay Regularization @@ -163,6 +171,9 @@ class L1DecayRegularizer(WeightDecayRegularizer): return decay + def __str__(self): + return "L1Decay, regularization_coeff=%f" % self._regularization_coeff + # We short the class name, since users will use the regulaizer with the package # name. The sample code: From e3b499605ad55c088326b9649c6ca23885320c0c Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 29 Jan 2018 16:42:50 +0800 Subject: [PATCH 053/106] add docstring --- python/paddle/v2/fluid/clip.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/v2/fluid/clip.py index 3028029e60..92081a47d4 100644 --- a/python/paddle/v2/fluid/clip.py +++ b/python/paddle/v2/fluid/clip.py @@ -160,6 +160,17 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): def set_gradient_clip(clip, param_list=None, program=None): + """ + To specify parameters that require gradient clip. + Args: + clip(BaseGradientClipAttr): An instance of some derived class of BaseGradientClipAttr, + which describes the type and detailed attributes of required gradient clip. + param_list(list, None by default): Parameters that require gradient clip. + It can be a list of parameter or a list of parameter's name. + When it's None, all parameters in the program will be included. + program(Program, None by default): The program where parameters are. + Will be the default main program when assigned with None. + """ if not isinstance(clip, BaseGradientClipAttr): raise TypeError( "'clip' should be an instance of BaseGradientClipAttr's derived class" From a585b585dd3f16c45d57a88bf2f6f0bb101d0398 Mon Sep 17 00:00:00 2001 From: Yancey Date: Mon, 29 Jan 2018 16:48:05 +0800 Subject: [PATCH 054/106] Batch barrier in send/recv op (#7847) * initialize batch barrier * add some comments * update * fix batch barrier * use sendvariable rpc interface to send batch barrier * fix comment * fix method * fix by comment * fix by comment --- paddle/operators/detail/grpc_client.cc | 15 ++++++ paddle/operators/detail/grpc_client.h | 24 ++++++++++ paddle/operators/detail/grpc_server.cc | 13 ++--- paddle/operators/detail/grpc_server.h | 3 +- paddle/operators/detail/sendrecvop_utils.h | 3 ++ paddle/operators/recv_op.cc | 55 +++++++++++++--------- paddle/operators/send_op.cc | 12 ++++- 7 files changed, 92 insertions(+), 33 deletions(-) diff --git a/paddle/operators/detail/grpc_client.cc b/paddle/operators/detail/grpc_client.cc index c433945542..9b5f7afc6a 100644 --- a/paddle/operators/detail/grpc_client.cc +++ b/paddle/operators/detail/grpc_client.cc @@ -97,6 +97,21 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, return true; } +bool RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { + const auto ch = GetChannel(ep); + + BatchBarrierProcessor* s = new BatchBarrierProcessor(ch); + s->Prepare(time_out); + + sendrecv::VariableMessage req; + req.set_varname(BATCH_BARRIER_MESSAGE); + auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); + rpc->Finish(&s->reply_, &s->status_, (void*)s); + req_count_++; + + return true; +} + bool RPCClient::Wait() { if (req_count_ <= 0) { return true; diff --git a/paddle/operators/detail/grpc_client.h b/paddle/operators/detail/grpc_client.h index a62e70a253..f9499f6dc7 100644 --- a/paddle/operators/detail/grpc_client.h +++ b/paddle/operators/detail/grpc_client.h @@ -71,6 +71,15 @@ class ClientBase { context_->set_deadline(deadline); } + virtual void Prepare(int64_t time_out) { + context_.reset(new grpc::ClientContext()); + + std::chrono::system_clock::time_point deadline = + std::chrono::system_clock::now() + std::chrono::milliseconds(time_out); + + context_->set_deadline(deadline); + } + virtual void Process() = 0; std::unique_ptr stub_; @@ -117,6 +126,17 @@ class GetProcessor : public ClientBase { RequestGetCallBack response_call_back_ = ProcGetResponse; }; +class BatchBarrierProcessor : public ClientBase { + public: + explicit BatchBarrierProcessor(std::shared_ptr ch) + : ClientBase(ch) {} + + virtual ~BatchBarrierProcessor() {} + + virtual void Process() {} + sendrecv::VoidMessage reply_; +}; + class RPCClient { public: bool AsyncSendVariable(const std::string& ep, @@ -130,6 +150,10 @@ class RPCClient { const framework::Scope& scope, const std::string& var_name, int64_t time_out = 600 * 1000); + + bool AsyncSendBatchBarrier(const std::string& ep, + int64_t time_out = 600 * 1000); + bool Wait(); private: diff --git a/paddle/operators/detail/grpc_server.cc b/paddle/operators/detail/grpc_server.cc index 3ddcd839bd..4f94e1315f 100644 --- a/paddle/operators/detail/grpc_server.cc +++ b/paddle/operators/detail/grpc_server.cc @@ -132,6 +132,7 @@ void AsyncGRPCServer::RunSyncUpdate() { cq_send_ = builder.AddCompletionQueue(); cq_get_ = builder.AddCompletionQueue(); + server_ = builder.BuildAndStart(); LOG(INFO) << "Server listening on " << address_ << std::endl; @@ -141,11 +142,11 @@ void AsyncGRPCServer::RunSyncUpdate() { std::bind(&AsyncGRPCServer::TryToRegisterNewGetOne, this); t_send_.reset( - new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, false, + new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, cq_send_.get(), "cq_send", send_register))); t_get_.reset( - new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, true, + new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, cq_get_.get(), "cq_get", get_register))); // wait server @@ -174,7 +175,7 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { } RequestSend* send = new RequestSend(&service_, cq_send_.get(), &var_recv_queue_); - VLOG(4) << "create RequestSend status:" << send->Status(); + VLOG(4) << "Create RequestSend status:" << send->Status(); } void AsyncGRPCServer::TryToRegisterNewGetOne() { @@ -184,11 +185,11 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { } RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_, &var_get_queue_); - VLOG(4) << "create Requestget status:" << get->Status(); + VLOG(4) << "Create RequestGet status:" << get->Status(); } -// FIXME(typhoonzero): remove wait argument and change cq_name to enum. -void AsyncGRPCServer::HandleRequest(bool wait, grpc::ServerCompletionQueue* cq, +// FIXME(typhoonzero): change cq_name to enum. +void AsyncGRPCServer::HandleRequest(grpc::ServerCompletionQueue* cq, std::string cq_name, std::function TryToRegisterNewOne) { TryToRegisterNewOne(); diff --git a/paddle/operators/detail/grpc_server.h b/paddle/operators/detail/grpc_server.h index 1ca9086c74..3f8b9d9317 100644 --- a/paddle/operators/detail/grpc_server.h +++ b/paddle/operators/detail/grpc_server.h @@ -57,8 +57,7 @@ class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { void ShutDown(); protected: - void HandleRequest(bool wait, grpc::ServerCompletionQueue *cq, - std::string cq_name, + void HandleRequest(grpc::ServerCompletionQueue *cq, std::string cq_name, std::function TryToRegisterNewOne); void TryToRegisterNewSendOne(); void TryToRegisterNewGetOne(); diff --git a/paddle/operators/detail/sendrecvop_utils.h b/paddle/operators/detail/sendrecvop_utils.h index bc6581afab..8e66f7299c 100644 --- a/paddle/operators/detail/sendrecvop_utils.h +++ b/paddle/operators/detail/sendrecvop_utils.h @@ -30,6 +30,9 @@ namespace paddle { namespace operators { namespace detail { +#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" +#define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV" + void SerializeToMessage(const std::string& name, const framework::Variable* var, const platform::DeviceContext& ctx, sendrecv::VariableMessage* msg); diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index 5d1df566af..49e1eb3402 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -29,8 +29,6 @@ limitations under the License. */ #include "paddle/operators/detail/simple_block_queue.h" #include "paddle/string/printf.h" -#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" - namespace paddle { namespace operators { @@ -95,7 +93,6 @@ class RecvOp : public framework::OperatorBase { auto param_list = Attr>("ParamList"); auto grad_list = Attr>("GradList"); auto fan_in = Attr("Fanin"); - size_t param_count = param_list.size(); auto *block = Attr(kOptimizeBlock); auto *program = block->Program(); @@ -103,38 +100,50 @@ class RecvOp : public framework::OperatorBase { // TODO(typhoonzero): change this to a while_op for every cluster-batch. bool exit_flag = false; - size_t barrier_size = param_count * fan_in; while (!exit_flag) { // Get from multiple trainers, we don't care about the order in which // the gradients arrives, just add suffix 0~n and merge the gradient. rpc_service_->SetCond(0); - for (size_t i = 0; i < barrier_size; ++i) { + size_t recv_var_cnt = 0; + int batch_barrier = 0; + while (batch_barrier != fan_in) { const detail::MessageWithName &v = rpc_service_->Get(); auto grad_var_name = v.first; if (grad_var_name == LISTEN_TERMINATE_MESSAGE) { LOG(INFO) << "received terminate message and exit"; exit_flag = true; break; - } - auto it = std::find(grad_list.begin(), grad_list.end(), grad_var_name); - std::string param_var_name; - if (it != grad_list.end()) { - param_var_name = param_list[it - grad_list.begin()]; + } else if (grad_var_name == BATCH_BARRIER_MESSAGE) { + VLOG(3) << "recv batch barrier message"; + batch_barrier++; + continue; } else { - LOG(ERROR) << "grad has no paired param:" << grad_var_name; - } - VLOG(3) << "received grad: " << grad_var_name - << " updating param: " << param_var_name; - if (fan_in > 1) { - grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); - } - auto *var = recv_scope.FindVar(grad_var_name); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << grad_var_name; - PADDLE_THROW("Can not find server side var"); + // receive a variable + recv_var_cnt++; + auto it = + std::find(grad_list.begin(), grad_list.end(), grad_var_name); + std::string param_var_name; + if (it != grad_list.end()) { + param_var_name = param_list[it - grad_list.begin()]; + } else { + LOG(ERROR) << "grad has no paired param:" << grad_var_name; + } + VLOG(3) << "received grad: " << grad_var_name + << " updating param: " << param_var_name; + + if (fan_in > 1) { + grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); + } + auto *var = recv_scope.FindVar(grad_var_name); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << grad_var_name; + PADDLE_THROW("Can not find server side var"); + } + detail::DeserializeFromMessage(v.second, dev_ctx, var); } - detail::DeserializeFromMessage(v.second, dev_ctx, var); } + VLOG(3) << "recv " << recv_var_cnt << " parmeters for one barrier."; + // TODO(Yancey1989): merge SelectedRows variables here if (exit_flag) { break; } @@ -146,7 +155,7 @@ class RecvOp : public framework::OperatorBase { LOG(ERROR) << "run sub program error " << e.what(); } rpc_service_->SetCond(1); - rpc_service_->WaitClientGet(barrier_size); + rpc_service_->WaitClientGet(recv_var_cnt); grads_counter_.clear(); } // while(true) } diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index 5aa66c20ea..bb719dc2a8 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -37,17 +37,25 @@ class SendOp : public framework::OperatorBase { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); + std::vector endpoints = + Attr>("endpoints"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); for (size_t i = 0; i < ins.size(); i++) { - VLOG(3) << "sending " << ins[i]; + VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; client_.AsyncSendVariable(epmap[i], ctx, scope, ins[i]); } PADDLE_ENFORCE(client_.Wait()); + for (auto& ep : endpoints) { + VLOG(3) << "batch barrier, ep: " << ep; + client_.AsyncSendBatchBarrier(ep); + } + PADDLE_ENFORCE(client_.Wait()); + for (size_t i = 0; i < outs.size(); i++) { - VLOG(3) << "getting " << outs[i]; + VLOG(3) << "getting " << outs[i] << " from " << epmap[i]; client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } From 1b8bc3c5b3d3884dad4310d58eb949c0cfbf0d60 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 29 Jan 2018 16:58:01 +0800 Subject: [PATCH 055/106] rename rpc ops --- paddle/operators/recv_op.cc | 188 ++++++-------------------- paddle/operators/send_op.cc | 13 +- paddle/operators/send_recv_op_test.cc | 19 +-- 3 files changed, 58 insertions(+), 162 deletions(-) diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index 593c35879a..f8af4a290b 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -12,179 +12,67 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include #include -#include -#include - -#include "paddle/framework/executor.h" +#include "paddle/framework/data_type.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" -#include "paddle/framework/proto_desc.h" -#include "paddle/operators/detail/grpc_server.h" -#include "paddle/operators/detail/sendrecvop_utils.h" -#include "paddle/operators/detail/simple_block_queue.h" -#include "paddle/string/printf.h" -#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" +#include +#include "paddle/operators/detail/grpc_client.h" namespace paddle { namespace operators { -constexpr char kOptimizeBlock[] = "OptimizeBlock"; - -void RunServer(std::shared_ptr service) { - service->RunSyncUpdate(); - VLOG(4) << "RunServer thread end"; -} - -static void CreateTensorFromMessageType(framework::Variable *var, - sendrecv::VarType var_type) { - if (var_type == sendrecv::VarType::LOD_TENSOR) { - var->GetMutable(); - } else if (var_type == sendrecv::VarType::SELECTED_ROWS) { - var->GetMutable(); - } else { - PADDLE_THROW( - "VariableMessage type %d is not in " - "[LoDTensor, SelectedRows]", - var_type); - } -} - -class RecvOp : public framework::OperatorBase { +class SendOp : public framework::OperatorBase { public: - RecvOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) { - if (!rpc_service_) { - std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); - server_thread_.reset(new std::thread(RunServer, rpc_service_)); + SendOp(const std::string& type, const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void Run(const framework::Scope& scope, + const platform::Place& place) const override { + auto outs = Outputs("Out"); + std::vector epmap = Attr>("epmap"); + + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto& ctx = *pool.Get(place); + + for (size_t i = 0; i < outs.size(); i++) { + VLOG(3) << "getting " << outs[i]; + client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } - } - - void Stop() override { - detail::MessageWithName term_msg; - term_msg.first = LISTEN_TERMINATE_MESSAGE; - rpc_service_->Push(term_msg); - rpc_service_->ShutDown(); - server_thread_->join(); - } - - std::string GetGradVarNameForTrainer(const std::string &varname) const { - if (grads_counter_.find(varname) == grads_counter_.end()) { - grads_counter_[varname] = 0; - } - return string::Sprintf("%s.trainer_%d", varname, grads_counter_[varname]++); - } - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); - framework::Scope &recv_scope = scope.NewScope(); - - // FIXME(Yancey1989): initialize rpc server with laze mode. - rpc_service_->SetScope(&recv_scope); - rpc_service_->SetDevCtx(&dev_ctx); - auto param_list = Attr>("ParamList"); - auto grad_list = Attr>("GradList"); - auto fan_in = Attr("Fanin"); - size_t param_count = param_list.size(); - - auto *block = Attr(kOptimizeBlock); - auto *program = block->Program(); - framework::Executor executor(dev_place); - - // TODO(typhoonzero): change this to a while_op for every cluster-batch. - bool exit_flag = false; - size_t barrier_size = param_count * fan_in; - while (!exit_flag) { - // Get from multiple trainers, we don't care about the order in which - // the gradients arrives, just add suffix 0~n and merge the gradient. - rpc_service_->SetCond(0); - for (size_t i = 0; i < barrier_size; ++i) { - const detail::MessageWithName &v = rpc_service_->Get(); - auto grad_var_name = v.first; - if (grad_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; - break; - } - auto it = std::find(grad_list.begin(), grad_list.end(), grad_var_name); - std::string param_var_name; - if (it != grad_list.end()) { - param_var_name = param_list[it - grad_list.begin()]; - } else { - LOG(ERROR) << "grad has no paired param:" << grad_var_name; - } - VLOG(3) << "received grad: " << grad_var_name - << " updating param: " << param_var_name; - if (fan_in > 1) { - grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); - } - auto *var = recv_scope.FindVar(grad_var_name); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << grad_var_name; - PADDLE_THROW("Can not find server side var"); - } - detail::DeserializeFromMessage(v.second, dev_ctx, var); - } - if (exit_flag) { - break; - } - - try { - executor.Run(*program, &recv_scope, block->ID(), /*global_block*/ - false /*create_local_scope*/, false /*create_vars*/); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - rpc_service_->SetCond(1); - rpc_service_->WaitClientGet(barrier_size); - grads_counter_.clear(); - } // while(true) + PADDLE_ENFORCE(client_.Wait()); } - protected: - std::shared_ptr rpc_service_; - std::shared_ptr server_thread_; - mutable std::unordered_map grads_counter_; + private: + mutable detail::RPCClient client_; }; -class RecvOpMaker : public framework::OpProtoAndCheckerMaker { +class SendOpMaker : public framework::OpProtoAndCheckerMaker { public: - RecvOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SendOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("RX", "(Tensor) Input tensor to be optimized").AsDuplicable(); + AddInput("X", "(Tensor) Input tensor to be sent").AsDuplicable(); + AddOutput("Out", "(Tensor) Output tensor to be received from server") + .AsDuplicable(); AddComment(R"DOC( -Recv operator +Send operator -This operator will recieve tensor from send_op +This operator will send tensor to recv_op at the parameter server. )DOC"); - AddAttr("endpoint", - "(string, default 127.0.0.1:6164)" - "IP address to listen on.") - .SetDefault("127.0.0.1:6164") - .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); - AddAttr( - kOptimizeBlock, "Serialized ProgramDesc string for recv to run."); - AddAttr>( - "ParamList", "type list of string", - "grad->param name mapping to find which parameters to optimize.") + AddAttr>("endpoints", + "(string vector, default 127.0.0.1:6164)" + "Server endpoints to send variables to.") .SetDefault({}); - AddAttr>( - "GradList", "type list of string", - "grad->param name mapping to find which parameters to optimize.") + AddAttr>("epmap", + "(string vector, default 127.0.0.1:6164)" + "Server endpoints in the order of input " + "variables for mapping") .SetDefault({}); - AddAttr("Fanin", "type int", - "Number of trainers in the current cluster job") - .SetDefault(1); } }; @@ -193,4 +81,4 @@ This operator will recieve tensor from send_op namespace ops = paddle::operators; -REGISTER_OPERATOR(recv, ops::RecvOp, ops::RecvOpMaker); +REGISTER_OPERATOR(send, ops::SendOp, ops::SendOpMaker); diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index 5aa66c20ea..c90e4d8ef0 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -37,6 +37,7 @@ class SendOp : public framework::OperatorBase { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); + bool do_get = Attr("DoGet"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); @@ -46,9 +47,11 @@ class SendOp : public framework::OperatorBase { } PADDLE_ENFORCE(client_.Wait()); - for (size_t i = 0; i < outs.size(); i++) { - VLOG(3) << "getting " << outs[i]; - client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); + if (do_get) { + for (size_t i = 0; i < outs.size(); i++) { + VLOG(3) << "getting " << outs[i]; + client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); + } } PADDLE_ENFORCE(client_.Wait()); @@ -79,6 +82,10 @@ This operator will send tensor to recv_op at the parameter server. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); + AddAttr("DoGet", + "(bool, default true)" + "Whether do GetVariable call after send") + .SetDefault(true); } }; diff --git a/paddle/operators/send_recv_op_test.cc b/paddle/operators/send_recv_op_test.cc index 045a0f5434..874eac9711 100644 --- a/paddle/operators/send_recv_op_test.cc +++ b/paddle/operators/send_recv_op_test.cc @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/string/printf.h" USE_NO_KERNEL_OP(send); -USE_NO_KERNEL_OP(recv); +USE_NO_KERNEL_OP(listen_and_serv); USE_OP(sum); namespace f = paddle::framework; @@ -33,7 +33,7 @@ namespace p = paddle::platform; namespace m = paddle::operators::math; // global for simplicity. -std::unique_ptr recv_op; +std::unique_ptr listen_and_serv_op; void InitTensorsInScope(f::Scope &scope, p::CPUPlace &place) { p::CPUDeviceContext ctx(place); @@ -120,7 +120,7 @@ void StartServerNet(bool is_sparse) { InitTensorsInScope(scope, place); } - // sub program run in recv_op, for simple test we use sum + // sub program run in listen_and_serv_op, for simple test we use sum f::ProgramDesc program; f::BlockDesc *block = program.MutableBlock(0); // X for server side tensors, RX for received tensers, must be of same shape. @@ -131,8 +131,9 @@ void StartServerNet(bool is_sparse) { attrs.insert({"ParamList", std::vector({"Out"})}); attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", block}); - recv_op = f::OpRegistry::CreateOp("recv", {{"RX", {"x1"}}}, {}, attrs); - recv_op->Run(scope, place); + listen_and_serv_op = + f::OpRegistry::CreateOp("listen_and_serv", {{"RX", {"x1"}}}, {}, attrs); + listen_and_serv_op->Run(scope, place); } TEST(SendRecvOp, CPUDense) { @@ -161,9 +162,9 @@ TEST(SendRecvOp, CPUDense) { for (int64_t i = 0; i < target->numel(); ++i) { EXPECT_EQ(expected[i] * 2, actual[i]); } - recv_op->Stop(); + listen_and_serv_op->Stop(); server_thread.join(); - recv_op.reset(nullptr); + listen_and_serv_op.reset(nullptr); } TEST(SendRecvOp, CPUSparse) { @@ -200,7 +201,7 @@ TEST(SendRecvOp, CPUSparse) { EXPECT_EQ(expect_value->mutable_data(place)[i], actual->mutable_data(place)[i]); } - recv_op->Stop(); + listen_and_serv_op->Stop(); server_thread.join(); - recv_op.reset(); + listen_and_serv_op.reset(); } From 369e2ba0d7f0045f710982df182ab947079dfc27 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Mon, 29 Jan 2018 17:22:56 +0800 Subject: [PATCH 056/106] Bug fix for sequence_reshape operator. --- paddle/operators/sequence_reshape_op.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/paddle/operators/sequence_reshape_op.cc b/paddle/operators/sequence_reshape_op.cc index 57cca13105..d89a46a712 100644 --- a/paddle/operators/sequence_reshape_op.cc +++ b/paddle/operators/sequence_reshape_op.cc @@ -30,8 +30,13 @@ class SequenceReshapeOp : public framework::OperatorWithKernel { auto x_numel = product(x_dims); PADDLE_ENFORCE_EQ(x_dims.size(), 2U, "Rank of Input(X) should be 2."); int new_dim = ctx->Attrs().Get("new_dim"); - ctx->SetOutputDim("Out", - {x_numel / new_dim, static_cast(new_dim)}); + if (ctx->IsRuntime()) { + ctx->SetOutputDim("Out", + {x_numel / new_dim, static_cast(new_dim)}); + } else { + // when compiling, the batch size is undetermined, just set to -1 + ctx->SetOutputDim("Out", {-1, static_cast(new_dim)}); + } } }; From 33ae13313142d23774367a1925cc45684b44d00d Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 29 Jan 2018 17:42:41 +0800 Subject: [PATCH 057/106] refine indent --- python/paddle/v2/fluid/framework.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index ff9bc7239c..cd4179fb12 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -14,6 +14,7 @@ import collections import contextlib +import re import numpy as np @@ -643,14 +644,15 @@ class Block(object): assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) if with_details: + re_add_indent = re.compile(r"\n(.)") res_str = "blocks {\n idx: %d\n parent_idx: %d" % ( self.idx, self.parent_idx) for var in self.vars.itervalues(): - res_str += "\n vars {\n %s }" % var.to_string( - throw_on_error).replace("\n", "\n ") + res_str += "\n vars {\n %s }" % re_add_indent.sub( + r"\n \1", var.to_string(throw_on_error)) for op in self.ops: - res_str += "\n ops {\n %s }" % op.to_string( - throw_on_error).replace("\n", "\n ") + res_str += "\n ops {\n %s }" % re_add_indent.sub( + r"\n \1", op.to_string(throw_on_error)) res_str += "\n}" else: protostr = self.desc.serialize_to_string() From 361bd29670c4f3a1618b8395c8aafaee5484ef5d Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 29 Jan 2018 18:13:06 +0800 Subject: [PATCH 058/106] update --- python/paddle/v2/fluid/framework.py | 51 ++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 11 deletions(-) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index cd4179fb12..8bf545e2ec 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -240,20 +240,30 @@ class Variable(object): def __str__(self): return self.to_string(True) - def to_string(self, throw_on_error): + def to_string(self, throw_on_error, with_details=False): """ Get debug string. Args: throw_on_error(bool): True if raise an exception when self is not intialized. + with_details(bool): more details about variables and parameters + (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. """ + assert isinstance(throw_on_error, bool) and isinstance(with_details, + bool) protostr = self.desc.serialize_to_string() proto = framework_pb2.VarDesc.FromString(str(protostr)) - return _debug_string_(proto, throw_on_error) + res_str = _debug_string_(proto, throw_on_error) + if with_details: + additional_attr = ("error_clip", "stop_gradient") + for attr_name in additional_attr: + res_str += "%s: %s\n" % (attr_name, + str(getattr(self, attr_name))) + return res_str __repr__ = __str__ @@ -636,7 +646,8 @@ class Block(object): Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True - with_details(bool): more details about paramters(e.g. trainable, optimize_attr, ...) will be printed when with_details is True + with_details(bool): more details about variables and parameters + (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. @@ -649,7 +660,7 @@ class Block(object): self.idx, self.parent_idx) for var in self.vars.itervalues(): res_str += "\n vars {\n %s }" % re_add_indent.sub( - r"\n \1", var.to_string(throw_on_error)) + r"\n \1", var.to_string(throw_on_error, with_details)) for op in self.ops: res_str += "\n ops {\n %s }" % re_add_indent.sub( r"\n \1", op.to_string(throw_on_error)) @@ -828,7 +839,8 @@ class Program(object): Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True - with_details(bool): more details about paramters(e.g. trainable, optimize_attr, ...) will be printed when with_details is True + with_details(bool): more details about variables and parameters + (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. @@ -997,12 +1009,29 @@ class Parameter(Variable): def __str__(self): return self.to_string(True) - def to_string(self, throw_on_error): - res_str = Variable.to_string(self, throw_on_error) - additional_attr = ("trainable", "optimize_attr", "regularizer", - "gradient_clip_attr") - for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, str(getattr(self, attr_name))) + def to_string(self, throw_on_error, with_details=False): + """ + To debug string. + Args: + throw_on_error(bool): raise exception when self is not initialized + when throw_on_error is True + with_details(bool): more details about variables and parameters + (e.g. trainable, optimize_attr, ...) will be printed when with_details is True + + Returns(str): The debug string. + + """ + assert isinstance(throw_on_error, bool) and isinstance(with_details, + bool) + if with_details: + res_str = Variable.to_string(self, throw_on_error, True) + additional_attr = ("trainable", "optimize_attr", "regularizer", + "gradient_clip_attr") + for attr_name in additional_attr: + res_str += "%s: %s\n" % (attr_name, + str(getattr(self, attr_name))) + else: + res_str = Variable.to_string(self, throw_on_error, False) return res_str __repr__ = __str__ From 970147505a393d5739e44bd6913e64b07b1ec1da Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 29 Jan 2018 18:04:44 -0800 Subject: [PATCH 059/106] Correct deps of threadpool (#7955) * refine channel test * follow comments * Add dependency enforce to threadpool * Revert changes to channel_test.cc * Revert changes to channel_test.cc * Add #include "paddle/framework/macros.h" --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/threadpool.cc | 2 ++ paddle/framework/threadpool.h | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 318661af8b..8c28709a68 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,7 +26,7 @@ nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) cc_test(variable_test SRCS variable_test.cc) -cc_library(threadpool SRCS threadpool.cc) +cc_library(threadpool SRCS threadpool.cc DEPS enforce) cc_test(threadpool_test SRCS threadpool_test.cc DEPS threadpool) cc_library(scope SRCS scope.cc DEPS glog threadpool) diff --git a/paddle/framework/threadpool.cc b/paddle/framework/threadpool.cc index b2f5ae4a96..b7d7c00bcf 100644 --- a/paddle/framework/threadpool.cc +++ b/paddle/framework/threadpool.cc @@ -14,6 +14,8 @@ #include "paddle/framework/threadpool.h" +#include "paddle/platform/enforce.h" + namespace paddle { namespace framework { diff --git a/paddle/framework/threadpool.h b/paddle/framework/threadpool.h index 8912b1a43a..4e9b58679d 100644 --- a/paddle/framework/threadpool.h +++ b/paddle/framework/threadpool.h @@ -22,7 +22,7 @@ limitations under the License. */ #include #include -#include "paddle/platform/enforce.h" +#include "paddle/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN namespace paddle { namespace framework { From 6ac2e079b37d0b3fd7165362f1749437aea8df5a Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 29 Jan 2018 09:53:19 +0000 Subject: [PATCH 060/106] Enable whole-archive flag in cc_test and use cc_test to rewrite the CMakeLists.txt of inference unittest. --- cmake/generic.cmake | 14 ++++++++++---- paddle/inference/tests/book/CMakeLists.txt | 17 ++++------------- .../book/test_inference_recognize_digits.cc | 15 +++++---------- paddle/testing/paddle_gtest_main.cc | 4 +++- .../fluid/tests/book/test_recognize_digits.py | 6 ++---- 5 files changed, 24 insertions(+), 32 deletions(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 585db019d5..18770fe286 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -224,12 +224,18 @@ function(cc_test TARGET_NAME) if(WITH_TESTING) set(options "") set(oneValueArgs "") - set(multiValueArgs SRCS DEPS) + set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + # Support linking flags: --whole-archive (Linux) / -force_load (MacOS) + target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + if("${cc_test_DEPS}" MATCHES "ARCHIVE_START") + list(REMOVE_ITEM cc_test_DEPS ARCHIVE_START ARCHIVE_END) + endif() add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) - add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + add_test(NAME ${TARGET_NAME} + COMMAND ${TARGET_NAME} ${cc_test_ARGS} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endif() endfunction(cc_test) @@ -457,7 +463,7 @@ endfunction() function(py_test TARGET_NAME) if(WITH_TESTING) - set(options STATIC static SHARED shared) + set(options "") set(oneValueArgs "") set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 78083cc218..d3798fb8fd 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -1,16 +1,7 @@ set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) -add_executable(test_inference_recognize_digits test_inference_recognize_digits.cc) -target_circle_link_libraries( - test_inference_recognize_digits - ARCHIVE_START - paddle_fluid - ARCHIVE_END - gtest - gflags) -add_test( - NAME test_inference_recognize_digits_mlp - COMMAND test_inference_recognize_digits - --dirname=${PYTHON_TESTS_DIR}/book/recognize_digits_mlp.inference.model - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +cc_test(test_inference_recognize_digits_mlp + SRCS test_inference_recognize_digits.cc + DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + ARGS --dirname=${PYTHON_TESTS_DIR}/book/recognize_digits_mlp.inference.model) set_tests_properties(test_inference_recognize_digits_mlp PROPERTIES DEPENDS test_recognize_digits_mlp_cpu) diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index de15167ac3..45fbfe27a7 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -66,9 +66,10 @@ TEST(inference, recognize_digits) { } LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; - // 0. Initialize all the devices - paddle::framework::InitDevices(); + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc paddle::framework::LoDTensor input; srand(time(0)); @@ -86,7 +87,7 @@ TEST(inference, recognize_digits) { // Run inference on CPU TestInference( - FLAGS_dirname, cpu_feeds, cpu_fetchs1); + dirname, cpu_feeds, cpu_fetchs1); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -96,7 +97,7 @@ TEST(inference, recognize_digits) { // Run inference on CUDA GPU TestInference( - FLAGS_dirname, cpu_feeds, cpu_fetchs2); + dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); EXPECT_EQ(output1.dims(), output2.dims()); @@ -112,9 +113,3 @@ TEST(inference, recognize_digits) { EXPECT_EQ(count, 0) << "There are " << count << " different elements."; #endif } - -int main(int argc, char** argv) { - google::ParseCommandLineFlags(&argc, &argv, false); - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index a7fb50ee41..a2f21e37e4 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -22,7 +22,9 @@ limitations under the License. */ int main(int argc, char** argv) { std::vector new_argv; std::string gflags_env; - new_argv.push_back(argv[0]); + for (int i = 0; i < argc; ++i) { + new_argv.push_back(argv[i]); + } #ifdef PADDLE_WITH_CUDA new_argv.push_back( strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index d6e4675a24..b4b6020f58 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -163,10 +163,8 @@ def infer(args, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - if args.nn_type == 'mlp': - tensor_img = numpy.random.rand(1, 28, 28).astype("float32") - else: - tensor_img = numpy.random.rand(1, 1, 28, 28).astype("float32") + # The input's dimension of conv should be 4-D or 5-D. + tensor_img = numpy.random.rand(1, 1, 28, 28).astype("float32") # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. From fcff9758ed7f15bc832fa505557bbf94974f687c Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 29 Jan 2018 18:12:49 -0800 Subject: [PATCH 061/106] Add label smooth operator --- paddle/operators/label_smooth_op.cc | 85 +++++++++++++++++++ paddle/operators/label_smooth_op.cu | 26 ++++++ paddle/operators/label_smooth_op.h | 58 +++++++++++++ .../v2/fluid/tests/test_label_smooth_op.py | 41 +++++++++ 4 files changed, 210 insertions(+) create mode 100644 paddle/operators/label_smooth_op.cc create mode 100644 paddle/operators/label_smooth_op.cu create mode 100644 paddle/operators/label_smooth_op.h create mode 100644 python/paddle/v2/fluid/tests/test_label_smooth_op.py diff --git a/paddle/operators/label_smooth_op.cc b/paddle/operators/label_smooth_op.cc new file mode 100644 index 0000000000..99a0a005a1 --- /dev/null +++ b/paddle/operators/label_smooth_op.cc @@ -0,0 +1,85 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/label_smooth_op.h" + +namespace paddle { +namespace operators { + +class LabelSmoothOp : public framework::OperatorWithKernel { + public: + LabelSmoothOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LabelSmoothOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of LabelSmoothOp should not be null."); + auto in_dims = ctx->GetInputDim("X"); + ctx->ShareLoD("X", /*->*/ "Out"); + ctx->SetOutputDim("Out", in_dims); + } +}; + +class LabelSmoothOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LabelSmoothOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input label of LabelSmooth operator."); + AddOutput("Out", "The smoothed label of LabelSmooth operator."); + AddAttr("epsilon", + "(float, default 0.0f)" + "The smoothing parameter of LabelSmooth operator.") + .SetDefault(0.0f); + AddComment(R"DOC( +LabelSmooth Operator. + +)DOC"); + } +}; + +class LabelSmoothGradOp : public framework::OperatorWithKernel { + public: + LabelSmoothGradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } +}; + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OP(label_smooth, ops::LabelSmoothOp, ops::LabelSmoothOpMaker, + label_smooth_grad, ops::LabelSmoothGradOp); +REGISTER_OP_CPU_KERNEL( + label_smooth, + ops::LabelSmoothKernel, + ops::LabelSmoothKernel); +REGISTER_OP_CPU_KERNEL( + label_smooth_grad, + ops::LabelSmoothGradKernel, + ops::LabelSmoothGradKernel); diff --git a/paddle/operators/label_smooth_op.cu b/paddle/operators/label_smooth_op.cu new file mode 100644 index 0000000000..5a0cec12bc --- /dev/null +++ b/paddle/operators/label_smooth_op.cu @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/label_smooth_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL( + label_smooth, + ops::LabelSmoothKernel, + ops::LabelSmoothKernel); +REGISTER_OP_CUDA_KERNEL( + label_smooth_grad, + ops::LabelSmoothGradKernel, + ops::LabelSmoothGradKernel); diff --git a/paddle/operators/label_smooth_op.h b/paddle/operators/label_smooth_op.h new file mode 100644 index 0000000000..d94ff43d5a --- /dev/null +++ b/paddle/operators/label_smooth_op.h @@ -0,0 +1,58 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class LabelSmoothKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out_t = ctx.Output("Out"); + auto* in_t = ctx.Input("X"); + auto label_dim = in_t->dims()[1]; + out_t->mutable_data(ctx.GetPlace()); + + auto epsilon = ctx.Attr("epsilon"); + auto out = framework::EigenVector::Flatten(*out_t); + auto in = framework::EigenVector::Flatten(*in_t); + auto& dev = *ctx.template device_context().eigen_device(); + out.device(dev) = + static_cast(1 - epsilon) * in + static_cast(epsilon / label_dim); + } +}; + +template +class LabelSmoothGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* d_out_t = ctx.Input(framework::GradVarName("Out")); + auto* d_in_t = ctx.Output(framework::GradVarName("X")); + d_in_t->mutable_data(ctx.GetPlace()); + + auto d_out = framework::EigenVector::Flatten(*d_out_t); + auto d_in = framework::EigenVector::Flatten(*d_in_t); + + auto epsilon = ctx.Attr("epsilon"); + auto& dev = *ctx.template device_context().eigen_device(); + d_in.device(dev) = static_cast(1 - epsilon) * d_out; + } +}; +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/tests/test_label_smooth_op.py b/python/paddle/v2/fluid/tests/test_label_smooth_op.py new file mode 100644 index 0000000000..d156e2c35f --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_label_smooth_op.py @@ -0,0 +1,41 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestLabelSmoothOp(OpTest): + def setUp(self): + self.op_type = "label_smooth" + epsilon = 0.1 + batch_size, label_dim = 5, 10 + label = np.zeros((batch_size, label_dim)).astype("float64") + nonzero_index = np.random.randint(label_dim, size=(batch_size)) + label[np.arange(batch_size), nonzero_index] = 1 + smoothed_label = (1 - epsilon) * label + epsilon / label_dim + self.inputs = {'X': label} + self.attrs = {'epsilon': epsilon} + self.outputs = {'Out': smoothed_label} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +if __name__ == '__main__': + unittest.main() From 87b5559cd15a28d515b16f3ad04ca9919c7edd32 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 29 Jan 2018 20:41:08 +0800 Subject: [PATCH 062/106] fix scale and bias dim --- paddle/operators/layer_norm_op.cc | 84 +++++++++---------- .../v2/fluid/tests/test_layer_norm_op.py | 16 ++-- 2 files changed, 52 insertions(+), 48 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 9e618d10d2..07ca8ac222 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -38,10 +38,6 @@ class LayerNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("Bias"), ""); PADDLE_ENFORCE(ctx->HasOutput("Y"), ""); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], 1); auto x_dim = ctx->GetInputDim("X"); auto begin_norm_axis = ctx->Attrs().Get("begin_norm_axis"); PADDLE_ENFORCE_LT(begin_norm_axis, x_dim.size(), @@ -50,6 +46,11 @@ class LayerNormOp : public framework::OperatorWithKernel { auto matrix_dim = framework::flatten_to_2d(x_dim, begin_norm_axis); int left = static_cast(matrix_dim[0]); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], left); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], left); + ctx->SetOutputDim("Y", ctx->GetInputDim("X")); ctx->SetOutputDim("Mean", {left}); ctx->SetOutputDim("Variance", {left}); @@ -64,10 +65,10 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor"); AddInput("Scale", - "Scale is a 1-dimensional tensor of size 1 " + "Scale is a 1-dimensional tensor of size H " "that is applied to the output"); AddInput("Bias", - "Bias is a 1-dimensional tensor of size 1 " + "Bias is a 1-dimensional tensor of size H " "that is applied to the output"); AddOutput("Y", "result after normalization"); AddOutput("Mean", "Mean of the current mini batch."); @@ -110,9 +111,6 @@ class LayerNormKernel const auto &x_dims = x->dims(); const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - auto scale_data = scale->data()[0]; - auto bias_data = bias->data()[0]; - auto *output = ctx.Output("Y"); auto *mean = ctx.Output("Mean"); auto *var = ctx.Output("Variance"); @@ -123,7 +121,10 @@ class LayerNormKernel auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); + auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); + auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), left, 1); + auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), left, 1); auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); @@ -138,18 +139,15 @@ class LayerNormKernel .mean() .unaryExpr(add_epslion); - auto scale_inv_std = [scale_data](T ele) { - return std::sqrt(1 / ele) * scale_data; - }; - auto sub_bias = [bias_data](T ele) { return bias_data - ele; }; + auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; + // TODO(zcd): Some thinking about output_map, is it appropriate that // `output_map` and `input_map` point to the same memory. - output_map = (var_map.unaryExpr(scale_inv_std).replicate(1, right)) - .cwiseProduct(input_map) + - var_map.unaryExpr(scale_inv_std) - .cwiseProduct(mean_map) - .unaryExpr(sub_bias) - .replicate(1, right); + auto inv_std_scale = + var_map.unaryExpr(inv_std_func).cwiseProduct(scale_map); + output_map = + inv_std_scale.replicate(1, right).cwiseProduct(input_map) + + (bias_map - inv_std_scale.cwiseProduct(mean_map)).replicate(1, right); } }; @@ -165,17 +163,17 @@ class LayerNormGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("Variance"), ""); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), ""); - const auto x_dims = ctx->GetInputDim("X"); - // check output if (ctx->HasOutput(framework::GradVarName("X"))) { - ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } if (ctx->HasOutput(framework::GradVarName("Scale"))) { - ctx->SetOutputDim(framework::GradVarName("Scale"), {1}); + ctx->SetOutputDim(framework::GradVarName("Scale"), + ctx->GetInputDim("Scale")); } if (ctx->HasOutput(framework::GradVarName("Bias"))) { - ctx->SetOutputDim(framework::GradVarName("Bias"), {1}); + ctx->SetOutputDim(framework::GradVarName("Bias"), + ctx->GetInputDim("Bias")); } } @@ -210,20 +208,20 @@ class LayerNormGradKernel const auto *var = ctx.Input("Variance"); const auto *scale = ctx.Input("Scale"); const auto *d_y = ctx.Input(framework::GradVarName("Y")); - auto scale_data = scale->data()[0]; const auto &x_dims = x->dims(); const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]), - right = static_cast(matrix_dim[1]); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); // init output auto *d_x = ctx.Output(framework::GradVarName("X")); auto *d_scale = ctx.Output(framework::GradVarName("Scale")); auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), left, 1); auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); @@ -231,36 +229,38 @@ class LayerNormGradKernel if (d_bias) { d_bias->mutable_data(ctx.GetPlace()); - d_bias->data()[0] = d_y_map.sum(); + auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), left, 1); + d_bias_map = d_y_map.colwise().mean(); } if (d_scale) { d_scale->mutable_data(ctx.GetPlace()); - auto inv_std = [](T ele) { return std::sqrt(1 / ele); }; + auto d_scale_map = EigenMatrixMapRowMajor(d_scale->data(), left, 1); + auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; // There are two equation to compute d_scale. One uses "Y" and the other // does not use "Y" - d_scale->data()[0] = + d_scale_map = ((x_map - mean_map.replicate(1, right)) - .cwiseProduct(var_map.unaryExpr(inv_std).replicate(1, right)) + .cwiseProduct( + var_map.unaryExpr(inv_std_func).replicate(1, right)) .cwiseProduct(d_y_map)) - .sum(); + .colwise() + .mean(); } if (d_x) { d_x->mutable_data(ctx.GetPlace()); auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); auto triple_product_func = [](T ele) { return ele * ele * ele; }; - auto scale_func = [scale_data](T ele) { return ele * scale_data; }; auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - auto inv_std_scale_func = [scale_data](T ele) { - return std::sqrt(1 / ele) * scale_data; - }; // dy_dx - auto dx_end = var_map.unaryExpr(inv_std_scale_func) + auto dx_end = var_map.unaryExpr(inv_std_func) + .cwiseProduct(scale_map) .replicate(1, right) .cwiseProduct(d_y_map); // dy_dmean_dx auto dx_mean = (T(-1.0) / right) * - var_map.unaryExpr(inv_std_scale_func) + var_map.unaryExpr(inv_std_func) + .cwiseProduct(scale_map) .replicate(1, right) .cwiseProduct(d_y_map) .rowwise() @@ -274,11 +274,11 @@ class LayerNormGradKernel auto dvar_end = var_map.unaryExpr(inv_std_func) .unaryExpr(triple_product_func) .cwiseProduct(dvar_end_part) + .cwiseProduct(scale_map) .replicate(1, right); - auto dx_var = (T(-1.0) / right) * - (x_map - mean_map.replicate(1, right)) - .cwiseProduct(dvar_end) - .unaryExpr(scale_func); + auto dx_var = + (T(-1.0) / right) * + (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); d_x_map = dx_end + dx_mean + dx_var; } diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 8ce327436f..9264cf4b79 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -39,8 +39,9 @@ def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): x.shape = [N, D] mean = np.mean(x, axis=1) var = np.var(x, axis=1) + epsilon - output = scale * np.divide((x - mean.reshape([N, 1])), - (np.sqrt(var)).reshape([N, 1])) + beta + output = scale.reshape([1, D]) * np.divide( + (x - mean.reshape([N, 1])), + (np.sqrt(var)).reshape([N, 1])) + beta.reshape([1, D]) output.shape = old_shape x.shape = old_shape return output, mean, var @@ -55,8 +56,10 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): mean.shape = [N, 1] var.shape = [N, 1] - d_scale = np.sum(grad_y).reshape([1, ]) - d_bias = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y).reshape([1, ]) + d_scale = np.sum(grad_y, axis=1).reshape([1, D]) + d_bias = scale.reshape([1, D]) * np.sum(( + (x - mean) * np.sqrt(1 / var)) * grad_y, + axis=1).reshape([1, D]) dx_end = np.sqrt(1.0 / var) * grad_y @@ -69,7 +72,7 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): d_std = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape([N, 1]) * ( 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean)) - grad_x = scale * (dx_end + d_mean + d_std) + grad_x = scale.reshape([1, D]) * (dx_end + d_mean + d_std) grad_y.shape = x_shape x.shape = x_shape @@ -146,7 +149,8 @@ class TestLayerNormdOp(OpTest): # attr epsilon = 0.00001 x_shape = shape - scale_shape = [1] + D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + scale_shape = [D] np.random.random(123) x_val = np.random.random_sample(x_shape).astype(np.float32) scale_val = np.random.random_sample(scale_shape).astype(np.float32) From 7e0d21de6d3352c1238d35d2586f40e48b6da27f Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 30 Jan 2018 11:11:04 +0800 Subject: [PATCH 063/106] fix scale and bias dim --- paddle/operators/layer_norm_op.cc | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 07ca8ac222..125ac9f53f 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -123,8 +123,8 @@ class LayerNormKernel int right = static_cast(matrix_dim[1]); auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); - auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), left, 1); - auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), left, 1); + auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); @@ -143,11 +143,11 @@ class LayerNormKernel // TODO(zcd): Some thinking about output_map, is it appropriate that // `output_map` and `input_map` point to the same memory. - auto inv_std_scale = - var_map.unaryExpr(inv_std_func).cwiseProduct(scale_map); - output_map = - inv_std_scale.replicate(1, right).cwiseProduct(input_map) + - (bias_map - inv_std_scale.cwiseProduct(mean_map)).replicate(1, right); + auto inv_std_scale = var_map.unaryExpr(inv_std_func); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std_scale.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) - + bias_map.replicate(left, 1); } }; @@ -221,7 +221,7 @@ class LayerNormGradKernel auto *d_scale = ctx.Output(framework::GradVarName("Scale")); auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), left, 1); + auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), 1, right); auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); @@ -229,12 +229,13 @@ class LayerNormGradKernel if (d_bias) { d_bias->mutable_data(ctx.GetPlace()); - auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), left, 1); + auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), 1, right); d_bias_map = d_y_map.colwise().mean(); } if (d_scale) { d_scale->mutable_data(ctx.GetPlace()); - auto d_scale_map = EigenMatrixMapRowMajor(d_scale->data(), left, 1); + auto d_scale_map = + EigenMatrixMapRowMajor(d_scale->data(), 1, right); auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; // There are two equation to compute d_scale. One uses "Y" and the other // does not use "Y" @@ -254,15 +255,15 @@ class LayerNormGradKernel auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; // dy_dx auto dx_end = var_map.unaryExpr(inv_std_func) - .cwiseProduct(scale_map) .replicate(1, right) - .cwiseProduct(d_y_map); + .cwiseProduct(d_y_map) + .cwiseProduct(scale_map.replicate(left, 1)); // dy_dmean_dx auto dx_mean = (T(-1.0) / right) * var_map.unaryExpr(inv_std_func) - .cwiseProduct(scale_map) .replicate(1, right) .cwiseProduct(d_y_map) + .cwiseProduct(scale_map.replicate(left, 1)) .rowwise() .sum() .replicate(1, right); @@ -274,8 +275,8 @@ class LayerNormGradKernel auto dvar_end = var_map.unaryExpr(inv_std_func) .unaryExpr(triple_product_func) .cwiseProduct(dvar_end_part) - .cwiseProduct(scale_map) - .replicate(1, right); + .replicate(1, right) + .cwiseProduct(scale_map.replicate(left, 1)); auto dx_var = (T(-1.0) / right) * (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); From ccefde203adb1a5af99fb3ce30ba553f0aec1680 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 30 Jan 2018 11:30:31 +0800 Subject: [PATCH 064/106] follow comments --- paddle/operators/listen_and_serv_op.cc | 207 +++++++++++++++++++ paddle/operators/recv_op.cc | 9 +- paddle/operators/send_op.cc | 12 +- python/paddle/v2/fluid/tests/test_recv_op.py | 2 +- 4 files changed, 214 insertions(+), 16 deletions(-) create mode 100644 paddle/operators/listen_and_serv_op.cc diff --git a/paddle/operators/listen_and_serv_op.cc b/paddle/operators/listen_and_serv_op.cc new file mode 100644 index 0000000000..5745938ed9 --- /dev/null +++ b/paddle/operators/listen_and_serv_op.cc @@ -0,0 +1,207 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include + +#include + +#include "paddle/framework/executor.h" +#include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/proto_desc.h" +#include "paddle/operators/detail/grpc_server.h" +#include "paddle/operators/detail/sendrecvop_utils.h" +#include "paddle/operators/detail/simple_block_queue.h" +#include "paddle/string/printf.h" + +namespace paddle { +namespace operators { + +constexpr char kOptimizeBlock[] = "OptimizeBlock"; + +void RunServer(std::shared_ptr service) { + service->RunSyncUpdate(); + VLOG(4) << "RunServer thread end"; +} + +static void CreateTensorFromMessageType(framework::Variable *var, + sendrecv::VarType var_type) { + if (var_type == sendrecv::VarType::LOD_TENSOR) { + var->GetMutable(); + } else if (var_type == sendrecv::VarType::SELECTED_ROWS) { + var->GetMutable(); + } else { + PADDLE_THROW( + "VariableMessage type %d is not in " + "[LoDTensor, SelectedRows]", + var_type); + } +} + +class ListenAndServOp : public framework::OperatorBase { + public: + ListenAndServOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) { + if (!rpc_service_) { + std::string endpoint = Attr("endpoint"); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + server_thread_.reset(new std::thread(RunServer, rpc_service_)); + } + } + + void Stop() override { + detail::MessageWithName term_msg; + term_msg.first = LISTEN_TERMINATE_MESSAGE; + rpc_service_->Push(term_msg); + rpc_service_->ShutDown(); + server_thread_->join(); + } + + std::string GetGradVarNameForTrainer(const std::string &varname) const { + if (grads_counter_.find(varname) == grads_counter_.end()) { + grads_counter_[varname] = 0; + } + return string::Sprintf("%s.trainer_%d", varname, grads_counter_[varname]++); + } + + void Run(const framework::Scope &scope, + const platform::Place &dev_place) const override { + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + framework::Scope &recv_scope = scope.NewScope(); + + // FIXME(Yancey1989): initialize rpc server with lazy mode. + rpc_service_->SetScope(&recv_scope); + rpc_service_->SetDevCtx(&dev_ctx); + auto param_list = Attr>("ParamList"); + auto grad_list = Attr>("GradList"); + auto fan_in = Attr("Fanin"); + + auto *block = Attr(kOptimizeBlock); + auto *program = block->Program(); + framework::Executor executor(dev_place); + + // TODO(typhoonzero): change this to a while_op for every cluster-batch. + bool exit_flag = false; + while (!exit_flag) { + // Get from multiple trainers, we don't care about the order in which + // the gradients arrives, just add suffix 0~n and merge the gradient. + rpc_service_->SetCond(0); + size_t recv_var_cnt = 0; + int batch_barrier = 0; + while (batch_barrier != fan_in) { + const detail::MessageWithName &v = rpc_service_->Get(); + auto grad_var_name = v.first; + if (grad_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; + break; + } else if (grad_var_name == BATCH_BARRIER_MESSAGE) { + VLOG(3) << "recv batch barrier message"; + batch_barrier++; + continue; + } else { + // receive a variable + recv_var_cnt++; + auto it = + std::find(grad_list.begin(), grad_list.end(), grad_var_name); + std::string param_var_name; + if (it != grad_list.end()) { + param_var_name = param_list[it - grad_list.begin()]; + } else { + LOG(ERROR) << "grad has no paired param:" << grad_var_name; + } + VLOG(3) << "received grad: " << grad_var_name + << " updating param: " << param_var_name; + + if (fan_in > 1) { + grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); + } + auto *var = recv_scope.FindVar(grad_var_name); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << grad_var_name; + PADDLE_THROW("Can not find server side var"); + } + detail::DeserializeFromMessage(v.second, dev_ctx, var); + } + } + VLOG(3) << "recv " << recv_var_cnt << " parmeters for one barrier."; + // TODO(Yancey1989): merge SelectedRows variables here + if (exit_flag) { + rpc_service_->ShutDown(); + } + + try { + executor.Run(*program, &recv_scope, block->ID(), /*global_block*/ + false /*create_local_scope*/, false /*create_vars*/); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + rpc_service_->SetCond(1); + rpc_service_->WaitClientGet(recv_var_cnt); + grads_counter_.clear(); + } // while(true) + } + + protected: + std::shared_ptr rpc_service_; + std::shared_ptr server_thread_; + mutable std::unordered_map grads_counter_; +}; + +class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ListenAndServOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddComment(R"DOC( +ListenAndServ operator + +This operator will start a RPC server which can receive variables +from send_op and send back variables to recv_op. +)DOC"); + AddAttr("endpoint", + "(string, default 127.0.0.1:6164)" + "IP address to listen on.") + .SetDefault("127.0.0.1:6164") + .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr(kOptimizeBlock, + "BlockID to run on server side."); + AddAttr>( + "ParamList", "type list of string", + "grad->param name mapping to find which parameters to optimize.") + .SetDefault({}); + AddAttr>( + "GradList", "type list of string", + "grad->param name mapping to find which parameters to optimize.") + .SetDefault({}); + AddAttr("Fanin", "type int", + "Number of trainers in the current cluster job") + .SetDefault(1); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(listen_and_serv, ops::ListenAndServOp, + ops::ListenAndServOpMaker); \ No newline at end of file diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index 1e64d5c65c..ba71094219 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -55,19 +55,12 @@ class RecvOpMaker : public framework::OpProtoAndCheckerMaker { public: RecvOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) Input tensor to be sent").AsDuplicable(); - AddOutput("Out", "(Tensor) Output tensor to be received from server") - .AsDuplicable(); + AddOutput("Out", "(Tensor) Variables to get from server.").AsDuplicable(); AddComment(R"DOC( Recv operator This operator can get variables from server side. )DOC"); - AddAttr>("endpoints", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints to recv variables" - "from.") - .SetDefault({}); AddAttr>("epmap", "(string vector, default 127.0.0.1:6164)" "Server endpoints in the order of input " diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index 9c180a7244..51e6b9de58 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -37,7 +37,6 @@ class SendOp : public framework::OperatorBase { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); - bool do_get = Attr("DoGet"); std::vector endpoints = Attr>("endpoints"); @@ -55,7 +54,7 @@ class SendOp : public framework::OperatorBase { } PADDLE_ENFORCE(client_.Wait()); - if (do_get) { + if (outs.size() > 0) { for (size_t i = 0; i < outs.size(); i++) { VLOG(3) << "getting " << outs[i] << " from " << epmap[i]; client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); @@ -65,7 +64,8 @@ class SendOp : public framework::OperatorBase { } private: - // TODO(typhoonzero): put RPCClient in a Variable. + // TODO(typhoonzero): put RPCClient in a Variable, so that + // send and recv can use the same connection. mutable detail::RPCClient client_; }; @@ -81,6 +81,8 @@ Send operator This operator will send tensor to recv_op at the parameter server. )DOC"); + // TODO(typhoonzero): remove this attr generate de-duplicated vector from + // epmap when initializing. AddAttr>("endpoints", "(string vector, default 127.0.0.1:6164)" "Server endpoints to send variables to.") @@ -90,10 +92,6 @@ This operator will send tensor to recv_op at the parameter server. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); - AddAttr("DoGet", - "(bool, default true)" - "Whether do GetVariable call after send") - .SetDefault(true); } }; diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/test_recv_op.py index def5ca9442..3a02b88241 100644 --- a/python/paddle/v2/fluid/tests/test_recv_op.py +++ b/python/paddle/v2/fluid/tests/test_recv_op.py @@ -29,7 +29,7 @@ class TestRecvOp(unittest.TestCase): p = Process(target=self.init_serv, args=(place, )) p.daemon = True p.start() - time.sleep(5) + time.sleep(1) self.init_client(place) # FIXME(typhoonzero): find a way to gracefully shutdown the server. os.system("kill -9 %d" % p.pid) From a96ac4f54d37fe225f3ca5b9075a114d22dbe1d6 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 30 Jan 2018 12:57:29 +0800 Subject: [PATCH 065/106] Refine code --- .../v2/fluid/tests/book/test_word2vec.py | 168 ++++++++++-------- 1 file changed, 98 insertions(+), 70 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 8cf54846fe..cdfa910fcd 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -12,76 +12,104 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid +import unittest -PASS_NUM = 100 -EMBED_SIZE = 32 -HIDDEN_SIZE = 256 -N = 5 -BATCH_SIZE = 32 -IS_SPARSE = True - -word_dict = paddle.dataset.imikolov.build_dict() -dict_size = len(word_dict) - -first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') -second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') -third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') -forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') -next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') - -embed_first = fluid.layers.embedding( - input=first_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') -embed_second = fluid.layers.embedding( - input=second_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') -embed_third = fluid.layers.embedding( - input=third_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') -embed_forth = fluid.layers.embedding( - input=forth_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - -concat_embed = fluid.layers.concat( - input=[embed_first, embed_second, embed_third, embed_forth], axis=1) -hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') -predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') -cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) -avg_cost = fluid.layers.mean(x=cost) -sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) -sgd_optimizer.minimize(avg_cost) - -train_reader = paddle.batch( - paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) - -place = fluid.CPUPlace() -exe = fluid.Executor(place) -feeder = fluid.DataFeeder( - feed_list=[first_word, second_word, third_word, forth_word, next_word], - place=place) - -exe.run(fluid.default_startup_program()) - -for pass_id in range(PASS_NUM): - for data in train_reader(): - avg_cost_np = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost]) - if avg_cost_np[0] < 5.0: - exit(0) # if avg cost less than 10.0, we think our code is good. -exit(1) + +def main_impl(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + PASS_NUM = 100 + EMBED_SIZE = 32 + HIDDEN_SIZE = 256 + N = 5 + BATCH_SIZE = 32 + IS_SPARSE = True + + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + + first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') + second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') + third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') + forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') + next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + + embed_first = fluid.layers.embedding( + input=first_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + embed_second = fluid.layers.embedding( + input=second_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + embed_third = fluid.layers.embedding( + input=third_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + embed_forth = fluid.layers.embedding( + input=forth_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_forth], axis=1) + hidden1 = fluid.layers.fc(input=concat_embed, + size=HIDDEN_SIZE, + act='sigmoid') + predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') + cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) + avg_cost = fluid.layers.mean(x=cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + train_reader = paddle.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + feeder = fluid.DataFeeder( + feed_list=[first_word, second_word, third_word, forth_word, next_word], + place=place) + + exe.run(fluid.default_startup_program()) + + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_cost_np = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) + if avg_cost_np[0] < 5.0: + return + raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) + + +def main(*args, **kwargs): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + main_impl(*args, **kwargs) + + +class W2VTest(unittest.TestCase): + def test_cpu_normal(self): + main(use_cuda=False) + + def test_gpu_normal(self): + main(use_cuda=True) + + +if __name__ == '__main__': + unittest.main() From 7c0cc113d92dfebb11bf689924a47f7ae520e739 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 30 Jan 2018 14:56:01 +0800 Subject: [PATCH 066/106] Test word2vec for parallel.do * Polish sum_op support SelectedRows in_place --- paddle/operators/sum_op.h | 49 +++++-- .../v2/fluid/tests/book/test_word2vec.py | 133 ++++++++++++------ 2 files changed, 125 insertions(+), 57 deletions(-) diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index 48201b344d..3d8102c3ae 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -68,7 +68,32 @@ class SumKernel : public framework::OpKernel { } } } else if (out_var->IsType()) { - PADDLE_ENFORCE(!in_place, "SelectedRows not support inplace sum now"); + std::unique_ptr in0; + if (in_place) { + // If is in_place, we store the input[0] to in0 + auto &in_sel0 = in_vars[0]->Get(); + auto &rows = in_sel0.rows(); +#ifdef PADDLE_WITH_CUDA + std::vector rows_in_cpu; + rows_in_cpu.reserve(rows.size()); + for (auto item : rows) { + rows_in_cpu.push_back(item); + } + in0.reset(new framework::SelectedRows(rows_in_cpu, in_sel0.height())); +#else + in0.reset(new framework::SelectedRows(rows, in_sel0.height())); +#endif + in0->mutable_value()->ShareDataWith(in_sel0.value()); + } + + auto get_selected_row = [&](size_t i) -> const SelectedRows & { + if (i == 0 && in0) { + return *in0.get(); + } else { + return in_vars[i]->Get(); + } + }; + auto *out = context.Output("Out"); out->mutable_rows()->clear(); auto *out_value = out->mutable_value(); @@ -76,24 +101,26 @@ class SumKernel : public framework::OpKernel { // Runtime InferShape size_t first_dim = 0; for (int i = 0; i < N; i++) { - first_dim += in_vars[i]->Get().rows().size(); + auto &sel_row = get_selected_row(i); + first_dim += sel_row.rows().size(); } - auto in_dim = in_vars[0]->Get().value().dims(); - auto in_dim_vec = framework::vectorize(in_dim); - in_dim_vec[0] = static_cast(first_dim); + auto in_dim = + framework::vectorize(get_selected_row(N - 1).value().dims()); + in_dim[0] = static_cast(first_dim); - out_value->Resize(framework::make_ddim(in_dim_vec)); + out_value->Resize(framework::make_ddim(in_dim)); out_value->mutable_data(context.GetPlace()); math::SelectedRowsAddTo functor; int64_t offset = 0; for (int i = 0; i < N; i++) { - PADDLE_ENFORCE_EQ(out->height(), - in_vars[i]->Get().height()); - functor(context.template device_context(), - in_vars[i]->Get(), offset, out); - offset += in_vars[i]->Get().value().numel(); + auto &sel_row = get_selected_row(i); + + PADDLE_ENFORCE_EQ(out->height(), sel_row.height()); + functor(context.template device_context(), sel_row, + offset, out); + offset += sel_row.value().numel(); } } else if (out_var->IsType()) { auto &out_array = *out_var->GetMutable(); diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index cdfa910fcd..cfa8d9580d 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -15,9 +15,10 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid import unittest +import os -def main_impl(use_cuda): +def main(use_cuda, is_sparse, parallel): if use_cuda and not fluid.core.is_compiled_with_cuda(): return @@ -26,7 +27,45 @@ def main_impl(use_cuda): HIDDEN_SIZE = 256 N = 5 BATCH_SIZE = 32 - IS_SPARSE = True + IS_SPARSE = is_sparse + + def __network__(words): + embed_first = fluid.layers.embedding( + input=words[0], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + embed_second = fluid.layers.embedding( + input=words[1], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + embed_third = fluid.layers.embedding( + input=words[2], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + embed_forth = fluid.layers.embedding( + input=words[3], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w') + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_forth], axis=1) + hidden1 = fluid.layers.fc(input=concat_embed, + size=HIDDEN_SIZE, + act='sigmoid') + predict_word = fluid.layers.fc(input=hidden1, + size=dict_size, + act='softmax') + cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) + avg_cost = fluid.layers.mean(x=cost) + return avg_cost word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) @@ -37,39 +76,21 @@ def main_impl(use_cuda): forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') - embed_first = fluid.layers.embedding( - input=first_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - embed_second = fluid.layers.embedding( - input=second_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - embed_third = fluid.layers.embedding( - input=third_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - embed_forth = fluid.layers.embedding( - input=forth_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - - concat_embed = fluid.layers.concat( - input=[embed_first, embed_second, embed_third, embed_forth], axis=1) - hidden1 = fluid.layers.fc(input=concat_embed, - size=HIDDEN_SIZE, - act='sigmoid') - predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') - cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) - avg_cost = fluid.layers.mean(x=cost) + if not parallel: + avg_cost = __network__( + [first_word, second_word, third_word, forth_word, next_word]) + else: + places = fluid.layers.get_places() + pd = fluid.layers.ParallelDo(places) + with pd.do(): + avg_cost = __network__( + map(pd.read_input, [ + first_word, second_word, third_word, forth_word, next_word + ])) + pd.write_output(avg_cost) + + avg_cost = fluid.layers.mean(x=pd()) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) @@ -94,22 +115,42 @@ def main_impl(use_cuda): raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) -def main(*args, **kwargs): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): - main_impl(*args, **kwargs) +FULL_TEST = os.getenv('FULL_TEST', + '1').lower() in ['true', '1', 't', 'y', 'yes', 'on'] +SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster" class W2VTest(unittest.TestCase): - def test_cpu_normal(self): - main(use_cuda=False) + pass + + +def inject_test_method(use_cuda, is_sparse, parallel): + fn_name = "test_{0}_{1}_{2}".format("cuda" if use_cuda else "cpu", "sparse" + if is_sparse else "dense", "parallel" + if parallel else "normal") + + def __impl__(*args, **kwargs): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + main(use_cuda=use_cuda, is_sparse=is_sparse, parallel=parallel) + + if use_cuda and is_sparse and parallel: + fn = __impl__ + else: + # skip the other test when on CI server + fn = unittest.skipUnless( + condition=FULL_TEST, reason=SKIP_REASON)(__impl__) + + setattr(W2VTest, fn_name, fn) - def test_gpu_normal(self): - main(use_cuda=True) +for use_cuda in (False, True): + for is_sparse in (False, True): + for parallel in (False, True): + inject_test_method(use_cuda, is_sparse, parallel) if __name__ == '__main__': unittest.main() From 311334e083dca3117bbc41bd11a3649ec7580ef0 Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Mon, 29 Jan 2018 23:27:20 -0800 Subject: [PATCH 067/106] Implement basic `Load()` and modify example based on updated inference design (#7690) * Initial commit * Remove resolution bug * Modify IsParam * Remove small bugs * First commit unifying Run and Load * Fix bugs * Fix Cmake * Modify Cmake and dir structure * Add io.* files to inference dir * Fix include in example * Address review comments: part 1 * Address review comments: round 2 * Address review comments: round 3 * Address review comments: round 4 --- paddle/framework/program_desc.cc | 25 +++++ paddle/framework/program_desc.h | 4 + paddle/inference/CMakeLists.txt | 6 +- paddle/inference/example.cc | 50 ++++++++- paddle/inference/inference.cc | 187 ------------------------------- paddle/inference/inference.h | 48 -------- paddle/inference/io.cc | 97 ++++++++++++++++ paddle/inference/io.h | 41 +++++++ 8 files changed, 214 insertions(+), 244 deletions(-) delete mode 100644 paddle/inference/inference.cc delete mode 100644 paddle/inference/inference.h create mode 100644 paddle/inference/io.cc create mode 100644 paddle/inference/io.h diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index b5d9e5e385..e59e392dfd 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -18,6 +18,9 @@ limitations under the License. */ namespace paddle { namespace framework { +const std::string kFeedOpType = "feed"; +const std::string kFetchOpType = "fetch"; + BlockDesc *ProgramDesc::AppendBlock(const BlockDesc &parent) { auto *b = desc_.add_blocks(); b->set_parent_idx(parent.ID()); @@ -64,5 +67,27 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) { } } +const std::vector ProgramDesc::GetFeedVarNames() { + BlockDesc *global_block = blocks_[0].get(); + std::vector feed_var_names; + for (auto *op : global_block->AllOps()) { + if (op->Type() == "feed") { + feed_var_names.insert(feed_var_names.begin(), op->Output("Out")[0]); + } + } + return feed_var_names; +} + +const std::vector ProgramDesc::GetFetchVarNames() { + BlockDesc *global_block = blocks_[0].get(); + std::vector fetch_var_names; + for (auto *op : global_block->AllOps()) { + if (op->Type() == "fetch") { + fetch_var_names.push_back(op->Input("X")[0]); + } + } + return fetch_var_names; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index 15a962bb69..2c3883275a 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -45,6 +45,10 @@ class ProgramDesc { proto::ProgramDesc *Proto(); + const std::vector GetFeedVarNames(); + + const std::vector GetFetchVarNames(); + private: proto::ProgramDesc desc_; diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index ae4d3fd2f5..683aaee42a 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -1,14 +1,14 @@ set(FLUID_CORE_MODULES proto_desc paddle_memory executor prune init) cc_library(paddle_fluid_api - SRCS inference.cc + SRCS io.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) # Merge all modules into a single static library cc_library(paddle_fluid DEPS paddle_fluid_api ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) # Create shared library -add_library(paddle_fluid_shared SHARED inference.cc) +add_library(paddle_fluid_shared SHARED io.cc) target_circle_link_libraries(paddle_fluid_shared ARCHIVE_START @@ -20,7 +20,7 @@ SET_TARGET_PROPERTIES(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) # install library & headers if(NOT WITH_C_API AND WITH_FLUID) - install(FILES inference.h DESTINATION include/paddle/inference) + install(FILES io.h DESTINATION include/paddle/inference) install(TARGETS paddle_fluid_shared DESTINATION lib) endif() diff --git a/paddle/inference/example.cc b/paddle/inference/example.cc index 0c18b45624..5173779c62 100644 --- a/paddle/inference/example.cc +++ b/paddle/inference/example.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,7 +15,9 @@ limitations under the License. */ #include #include #include "gflags/gflags.h" -#include "paddle/inference/inference.h" +#include "paddle/framework/init.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/inference/io.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -28,12 +30,27 @@ int main(int argc, char** argv) { exit(1); } + // 1. Define place, executor, scope + auto place = paddle::platform::CPUPlace(); + paddle::framework::InitDevices(); + auto* executor = new paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl; std::string dirname = FLAGS_dirname; - paddle::InferenceEngine* engine = new paddle::InferenceEngine(); - engine->LoadInferenceModel(dirname); + // 2. Initialize the inference program + auto* inference_program = paddle::inference::Load(*executor, *scope, dirname); + + // 3. Optional: perform optimization on the inference_program + + // 4. Get the feed_var_names and fetch_var_names + const std::vector& feed_var_names = + inference_program->GetFeedVarNames(); + const std::vector& fetch_var_names = + inference_program->GetFetchVarNames(); + // 5. Generate input paddle::framework::LoDTensor input; srand(time(0)); float* input_ptr = @@ -45,8 +62,26 @@ int main(int argc, char** argv) { std::vector feeds; feeds.push_back(input); std::vector fetchs; - engine->Execute(feeds, fetchs); + // Set up maps for feed and fetch targets + std::map feed_targets; + std::map fetch_targets; + + // set_feed_variable + for (size_t i = 0; i < feed_var_names.size(); ++i) { + feed_targets[feed_var_names[i]] = &feeds[i]; + } + + // get_fetch_variable + fetchs.resize(fetch_var_names.size()); + for (size_t i = 0; i < fetch_var_names.size(); ++i) { + fetch_targets[fetch_var_names[i]] = &fetchs[i]; + } + + // Run the inference program + executor->Run(*inference_program, scope, feed_targets, fetch_targets); + + // Get outputs for (size_t i = 0; i < fetchs.size(); ++i) { auto dims_i = fetchs[i].dims(); std::cout << "dims_i:"; @@ -62,6 +97,9 @@ int main(int argc, char** argv) { std::cout << std::endl; } - delete engine; + delete inference_program; + delete scope; + delete executor; + return 0; } diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc deleted file mode 100644 index b43c359ed1..0000000000 --- a/paddle/inference/inference.cc +++ /dev/null @@ -1,187 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "inference.h" -#include -#include "paddle/framework/executor.h" -#include "paddle/framework/init.h" -#include "paddle/framework/scope.h" - -namespace paddle { - -void InferenceEngine::LoadInferenceModel(const std::string& dirname) { - std::string model_filename = dirname + "/__model__"; - LOG(INFO) << "loading model from " << model_filename; - std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); - std::string program_desc_str; - inputfs.seekg(0, std::ios::end); - program_desc_str.resize(inputfs.tellg()); - inputfs.seekg(0, std::ios::beg); - LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); - inputfs.read(&program_desc_str[0], program_desc_str.size()); - inputfs.close(); - - program_ = new framework::ProgramDesc(program_desc_str); - GenerateLoadProgram(dirname); - - framework::BlockDesc* global_block = program_->MutableBlock(0); - feed_var_names_.clear(); - fetch_var_names_.clear(); - for (auto* op : global_block->AllOps()) { - if (op->Type() == "feed") { - feed_var_names_.insert(feed_var_names_.begin(), op->Output("Out")[0]); - } else if (op->Type() == "fetch") { - fetch_var_names_.push_back(op->Input("X")[0]); - } - } -} - -bool InferenceEngine::IsParameter(const framework::VarDesc* var) { - if (var->Persistable()) { - // There are many unreachable variables in the program - for (size_t i = 0; i < program_->Size(); ++i) { - const framework::BlockDesc& block = program_->Block(i); - for (auto* op : block.AllOps()) { - if (op->Type() == "feed") { - continue; - } - for (auto input_argument_name : op->InputArgumentNames()) { - if (input_argument_name == var->Name()) { - return true; - } - } - } - } - } - return false; -} - -void InferenceEngine::GenerateLoadProgram(const std::string& dirname) { - framework::BlockDesc* global_block = program_->MutableBlock(0); - - load_program_ = new framework::ProgramDesc(); - framework::BlockDesc* load_block = load_program_->MutableBlock(0); - for (auto* var : global_block->AllVars()) { - if (IsParameter(var)) { - LOG(INFO) << "parameter's name: " << var->Name(); - - framework::VarDesc* new_var = load_block->Var(var->Name()); - new_var->SetShape(var->Shape()); - new_var->SetDataType(var->GetDataType()); - new_var->SetType(var->GetType()); - new_var->SetLoDLevel(var->GetLoDLevel()); - new_var->SetPersistable(true); - - // append_op - framework::OpDesc* op = load_block->AppendOp(); - op->SetType("load"); - op->SetOutput("Out", {new_var->Name()}); - op->SetAttr("file_path", {dirname + "/" + new_var->Name()}); - op->CheckAttrs(); - } - } -} - -void InferenceEngine::PrependFeedOp() { - if (!program_) { - LOG(FATAL) << "Please initialize the program_ first."; - } - - framework::BlockDesc* global_block = program_->MutableBlock(0); - - // create_var - framework::VarDesc* feed_var = global_block->Var("feed"); - feed_var->SetType(framework::proto::VarDesc::FEED_MINIBATCH); - feed_var->SetPersistable(true); - - // prepend feed_op - for (size_t i = 0; i < feed_var_names_.size(); ++i) { - std::string var_name = feed_var_names_[i]; - LOG(INFO) << "feed var's name: " << var_name; - - // prepend_op - framework::OpDesc* op = global_block->PrependOp(); - op->SetType("feed"); - op->SetInput("X", {"feed"}); - op->SetOutput("Out", {var_name}); - op->SetAttr("col", {static_cast(i)}); - op->CheckAttrs(); - } -} - -void InferenceEngine::AppendFetchOp() { - if (!program_) { - LOG(FATAL) << "Please initialize the program_ first."; - } - - framework::BlockDesc* global_block = program_->MutableBlock(0); - - // create_var - framework::VarDesc* fetch_var = global_block->Var("fetch"); - fetch_var->SetType(framework::proto::VarDesc::FETCH_LIST); - fetch_var->SetPersistable(true); - - // append fetch_op - for (size_t i = 0; i < fetch_var_names_.size(); ++i) { - std::string var_name = fetch_var_names_[i]; - LOG(INFO) << "fetch var's name: " << var_name; - - // append_op - framework::OpDesc* op = global_block->AppendOp(); - op->SetType("fetch"); - op->SetInput("X", {var_name}); - op->SetOutput("Out", {"fetch"}); - op->SetAttr("col", {static_cast(i)}); - op->CheckAttrs(); - } -} - -void InferenceEngine::Execute(const std::vector& feeds, - std::vector& fetchs) { - if (!program_ || !load_program_) { - LOG(FATAL) << "Please initialize the program_ and load_program_ first."; - } - - if (feeds.size() != feed_var_names_.size()) { - LOG(FATAL) << "Please feed " << feed_var_names_.size() << " input Tensors."; - } - - auto* place = new platform::CPUPlace(); - framework::InitDevices(); - framework::Executor* executor = new framework::Executor(*place); - framework::Scope* scope = new framework::Scope(); - - executor->Run(*load_program_, scope, 0, true, true); - - std::map feed_targets; - std::map fetch_targets; - - // set_feed_variable - for (size_t i = 0; i < feed_var_names_.size(); ++i) { - feed_targets[feed_var_names_[i]] = &feeds[i]; - } - - // get_fetch_variable - fetchs.resize(fetch_var_names_.size()); - for (size_t i = 0; i < fetch_var_names_.size(); ++i) { - fetch_targets[fetch_var_names_[i]] = &fetchs[i]; - } - - executor->Run(*program_, scope, feed_targets, fetch_targets); - - delete place; - delete scope; - delete executor; -} -} // namespace paddle diff --git a/paddle/inference/inference.h b/paddle/inference/inference.h deleted file mode 100644 index 26f259824b..0000000000 --- a/paddle/inference/inference.h +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/framework/block_desc.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/program_desc.h" - -namespace paddle { - -class InferenceEngine { -public: - InferenceEngine() : program_(nullptr), load_program_(nullptr) {} - ~InferenceEngine() { - delete program_; - delete load_program_; - } - - void LoadInferenceModel(const std::string& dirname); - void Execute(const std::vector& feeds, - std::vector& fetchs); - -private: - bool IsParameter(const framework::VarDesc* var); - void GenerateLoadProgram(const std::string& dirname); - void PrependFeedOp(); - void AppendFetchOp(); - -private: - framework::ProgramDesc* program_; - framework::ProgramDesc* load_program_; - std::vector feed_var_names_; - std::vector fetch_var_names_; -}; - -} // namespace paddle diff --git a/paddle/inference/io.cc b/paddle/inference/io.cc new file mode 100644 index 0000000000..98b33d656d --- /dev/null +++ b/paddle/inference/io.cc @@ -0,0 +1,97 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/inference/io.h" +#include + +namespace paddle { +namespace inference { + +const std::string kFeedOpType = "feed"; + +bool IsParameter(const framework::VarDesc* var, + const framework::ProgramDesc* main_program) { + if (var->Persistable()) { + // There are many unreachable variables in the program + for (size_t i = 0; i < main_program->Size(); ++i) { + const framework::BlockDesc& block = main_program->Block(i); + for (auto* op : block.AllOps()) { + if (op->Type() == kFeedOpType) { + continue; + } + for (auto input_argument_name : op->InputArgumentNames()) { + if (input_argument_name == var->Name()) { + return true; + } + } + } + } + } + return false; +} + +void LoadPersistables(framework::Executor& executor, + framework::Scope& scope, + const std::string& dirname, + framework::ProgramDesc* main_program) { + framework::BlockDesc* global_block = main_program->MutableBlock(0); + + framework::ProgramDesc* load_program = new framework::ProgramDesc(); + framework::BlockDesc* load_block = load_program->MutableBlock(0); + for (auto* var : global_block->AllVars()) { + if (IsParameter(var, main_program)) { + LOG(INFO) << "parameter's name: " << var->Name(); + + framework::VarDesc* new_var = load_block->Var(var->Name()); + new_var->SetShape(var->Shape()); + new_var->SetDataType(var->GetDataType()); + new_var->SetType(var->GetType()); + new_var->SetLoDLevel(var->GetLoDLevel()); + new_var->SetPersistable(true); + + // append_op + framework::OpDesc* op = load_block->AppendOp(); + op->SetType("load"); + op->SetOutput("Out", {new_var->Name()}); + op->SetAttr("file_path", {dirname + "/" + new_var->Name()}); + op->CheckAttrs(); + } + } + executor.Run(*load_program, &scope, 0, true, true); + delete load_program; +} + +framework::ProgramDesc* Load(framework::Executor& executor, + framework::Scope& scope, + const std::string& dirname) { + std::string model_filename = dirname + "/__model__"; + LOG(INFO) << "loading model from " << model_filename; + std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); + std::string program_desc_str; + inputfs.seekg(0, std::ios::end); + program_desc_str.resize(inputfs.tellg()); + inputfs.seekg(0, std::ios::beg); + LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); + inputfs.read(&program_desc_str[0], program_desc_str.size()); + inputfs.close(); + + framework::ProgramDesc* main_program = + new framework::ProgramDesc(program_desc_str); + + LoadPersistables(executor, scope, dirname, main_program); + return main_program; +} + +} // namespace inference +} // namespace paddle diff --git a/paddle/inference/io.h b/paddle/inference/io.h new file mode 100644 index 0000000000..400f5af8c5 --- /dev/null +++ b/paddle/inference/io.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/framework/block_desc.h" +#include "paddle/framework/executor.h" +#include "paddle/framework/program_desc.h" +#include "paddle/framework/scope.h" +#include "paddle/framework/var_desc.h" + +namespace paddle { +namespace inference { + +bool IsParameter(const framework::VarDesc* var, + const framework::ProgramDesc* main_program); + +void LoadPersistables(framework::Executor& executor, + framework::Scope& scope, + const std::string& dirname, + framework::ProgramDesc* main_program); + +framework::ProgramDesc* Load(framework::Executor& executor, + framework::Scope& scope, + const std::string& dirname); + +} // namespace inference +} // namespace paddle From f1e32e241971b3603dbe9e1d094656f89c177497 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 30 Jan 2018 16:12:49 +0800 Subject: [PATCH 068/106] Skip tests of word2vec on CI --- python/paddle/v2/fluid/tests/book/test_word2vec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index cfa8d9580d..766ba9681d 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -116,7 +116,7 @@ def main(use_cuda, is_sparse, parallel): FULL_TEST = os.getenv('FULL_TEST', - '1').lower() in ['true', '1', 't', 'y', 'yes', 'on'] + '0').lower() in ['true', '1', 't', 'y', 'yes', 'on'] SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster" From 4fee15e86003b38973a1fdd943e4a6ef96bd9bbe Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 30 Jan 2018 16:59:54 +0800 Subject: [PATCH 069/106] Merge test_understand_sentiment together Into one unit test file --- ...c_lstm.py => test_understand_sentiment.py} | 115 +++++++++---- .../book/test_understand_sentiment_conv.py | 101 ----------- .../book/test_understand_sentiment_lstm.py | 160 ------------------ 3 files changed, 78 insertions(+), 298 deletions(-) rename python/paddle/v2/fluid/tests/book/{test_understand_sentiment_dynamic_lstm.py => test_understand_sentiment.py} (52%) delete mode 100644 python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py delete mode 100644 python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py similarity index 52% rename from python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py rename to python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 529223eba8..2ba9077a26 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np -import paddle.v2 as paddle +import unittest import paddle.v2.fluid as fluid +import paddle.v2 as paddle +import contextlib + + +def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, + hid_dim=32): + emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + prediction = fluid.layers.fc(input=[conv_3, conv_4], + size=class_dim, + act="softmax") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(x=cost) + adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) + adam_optimizer.minimize(avg_cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return avg_cost, accuracy def stacked_lstm_net(data, @@ -51,63 +78,77 @@ def stacked_lstm_net(data, avg_cost = fluid.layers.mean(x=cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) adam_optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) - return avg_cost, accuracy, accuracy.metrics[0] - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def main(): - BATCH_SIZE = 100 - PASS_NUM = 5 + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return avg_cost, accuracy - word_dict = paddle.dataset.imdb.word_dict() - print "load word dict successfully" + +def main(word_dict, net_method, use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + BATCH_SIZE = 128 + PASS_NUM = 5 dict_dim = len(word_dict) class_dim = 2 data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, accuracy, acc_out = stacked_lstm_net( + cost, acc_out = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=1000), batch_size=BATCH_SIZE) - place = fluid.CPUPlace() + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[data, label], place=place) exe.run(fluid.default_startup_program()) for pass_id in xrange(PASS_NUM): - accuracy.reset(exe) for data in train_data(): cost_val, acc_val = exe.run(fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[cost, acc_out]) - pass_acc = accuracy.eval(exe) - print("cost=" + str(cost_val) + " acc=" + str(acc_val) + - " pass_acc=" + str(pass_acc)) - if cost_val < 1.0 and acc_val > 0.8: - exit(0) - exit(1) + print("cost=" + str(cost_val) + " acc=" + str(acc_val)) + if cost_val < 0.4 and acc_val > 0.8: + return + raise AssertionError("Cost is too large for {0}".format( + net_method.__name__)) + + +class TestUnderstandSentiment(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.word_dict = paddle.dataset.imdb.word_dict() + + @contextlib.contextmanager + def new_program_scope(self): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + def test_conv_cpu(self): + with self.new_program_scope(): + main(self.word_dict, net_method=convolution_net, use_cuda=False) + + def test_stacked_lstm_cpu(self): + with self.new_program_scope(): + main(self.word_dict, net_method=stacked_lstm_net, use_cuda=False) + + def test_conv_gpu(self): + with self.new_program_scope(): + main(self.word_dict, net_method=convolution_net, use_cuda=True) + + def test_stacked_lstm_gpu(self): + with self.new_program_scope(): + main(self.word_dict, net_method=stacked_lstm_net, use_cuda=True) if __name__ == '__main__': - main() + unittest.main() diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py deleted file mode 100644 index df27399dd2..0000000000 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import numpy as np -import paddle.v2 as paddle -import paddle.v2.fluid as fluid - - -def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, - hid_dim=32): - emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) - conv_3 = fluid.nets.sequence_conv_pool( - input=emb, - num_filters=hid_dim, - filter_size=3, - act="tanh", - pool_type="sqrt") - conv_4 = fluid.nets.sequence_conv_pool( - input=emb, - num_filters=hid_dim, - filter_size=4, - act="tanh", - pool_type="sqrt") - prediction = fluid.layers.fc(input=[conv_3, conv_4], - size=class_dim, - act="softmax") - cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) - adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) - adam_optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) - return avg_cost, accuracy, accuracy.metrics[0] - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def main(): - BATCH_SIZE = 100 - PASS_NUM = 5 - - word_dict = paddle.dataset.imdb.word_dict() - dict_dim = len(word_dict) - class_dim = 2 - - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, accuracy, acc_out = convolution_net( - data, label, input_dim=dict_dim, class_dim=class_dim) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=1000), - batch_size=BATCH_SIZE) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) - - exe.run(fluid.default_startup_program()) - - for pass_id in xrange(PASS_NUM): - accuracy.reset(exe) - for data in train_data(): - cost_val, acc_val = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[cost, acc_out]) - pass_acc = accuracy.eval(exe) - print("cost=" + str(cost_val) + " acc=" + str(acc_val) + - " pass_acc=" + str(pass_acc)) - if cost_val < 1.0 and pass_acc > 0.8: - exit(0) - exit(1) - - -if __name__ == '__main__': - main() diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py deleted file mode 100644 index 117f74c59a..0000000000 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle.v2 as paddle -import paddle.v2.fluid as fluid -from paddle.v2.fluid.layer_helper import LayerHelper - - -def lstm(x, c_pre_init, hidden_dim, forget_bias=None): - """ - This function helps create an operator for the LSTM (Long Short Term - Memory) cell that can be used inside an RNN. - """ - helper = LayerHelper('lstm_unit', **locals()) - rnn = fluid.layers.StaticRNN() - with rnn.step(): - c_pre = rnn.memory(init=c_pre_init) - x_t = rnn.step_input(x) - - before_fc = fluid.layers.concat(input=[x_t, c_pre], axis=1) - after_fc = fluid.layers.fc(input=before_fc, size=hidden_dim * 4) - - dtype = x.dtype - c = helper.create_tmp_variable(dtype) - h = helper.create_tmp_variable(dtype) - - helper.append_op( - type='lstm_unit', - inputs={"X": after_fc, - "C_prev": c_pre}, - outputs={"C": c, - "H": h}, - attrs={"forget_bias": forget_bias}) - - rnn.update_memory(c_pre, c) - rnn.output(h) - - return rnn() - - -def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50): - data = fluid.layers.data( - name="words", - shape=[seq_len * batch_size, 1], - append_batch_size=False, - dtype="int64", - lod_level=1) - label = fluid.layers.data( - name="label", - shape=[batch_size, 1], - append_batch_size=False, - dtype="int64") - - emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) - emb = fluid.layers.reshape(x=emb, shape=[batch_size, seq_len, emb_dim]) - emb = fluid.layers.transpose(x=emb, perm=[1, 0, 2]) - - c_pre_init = fluid.layers.fill_constant( - dtype=emb.dtype, shape=[batch_size, emb_dim], value=0.0) - c_pre_init.stop_gradient = False - layer_1_out = lstm(emb, c_pre_init=c_pre_init, hidden_dim=emb_dim) - layer_1_out = fluid.layers.transpose(x=layer_1_out, perm=[1, 0, 2]) - - prediction = fluid.layers.fc(input=layer_1_out, - size=class_dim, - act="softmax") - cost = fluid.layers.cross_entropy(input=prediction, label=label) - - avg_cost = fluid.layers.mean(x=cost) - adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) - adam_optimizer.minimize(avg_cost) - acc = fluid.layers.accuracy(input=prediction, label=label) - - return avg_cost, acc - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def chop_data(data, chop_len=80, batch_size=50): - data = [(x[0][:chop_len], x[1]) for x in data if len(x[0]) >= chop_len] - - return data[:batch_size] - - -def prepare_feed_data(data, place): - tensor_words = to_lodtensor(map(lambda x: x[0], data), place) - - label = np.array(map(lambda x: x[1], data)).astype("int64") - label = label.reshape([len(label), 1]) - tensor_label = fluid.LoDTensor() - tensor_label.set(label, place) - - return tensor_words, tensor_label - - -def main(): - BATCH_SIZE = 100 - PASS_NUM = 5 - - word_dict = paddle.dataset.imdb.word_dict() - print "load word dict successfully" - dict_dim = len(word_dict) - class_dim = 2 - - cost, acc = lstm_net(dict_dim=dict_dim, class_dim=class_dim) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=BATCH_SIZE * 10), - batch_size=BATCH_SIZE) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - exe.run(fluid.default_startup_program()) - - for pass_id in xrange(PASS_NUM): - for data in train_data(): - chopped_data = chop_data(data) - tensor_words, tensor_label = prepare_feed_data(chopped_data, place) - - outs = exe.run(fluid.default_main_program(), - feed={"words": tensor_words, - "label": tensor_label}, - fetch_list=[cost, acc]) - cost_val = np.array(outs[0]) - acc_val = np.array(outs[1]) - - print("cost=" + str(cost_val) + " acc=" + str(acc_val)) - if acc_val > 0.7: - exit(0) - exit(1) - - -if __name__ == '__main__': - main() From f5d9336825e8f27fd02c260b5687c78ded61ed67 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 30 Jan 2018 10:55:07 +0000 Subject: [PATCH 070/106] Unify the definition of kFeedOpType and kFetchOpType. --- paddle/framework/executor.cc | 3 --- paddle/framework/feed_fetch_type.h | 4 ++++ paddle/framework/program_desc.cc | 24 +++++++++---------- paddle/framework/program_desc.h | 5 ++-- paddle/framework/prune.cc | 3 +-- paddle/inference/io.cc | 7 +++--- paddle/inference/io.h | 5 ---- .../book/test_inference_recognize_digits.cc | 6 ++--- 8 files changed, 25 insertions(+), 32 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index cbf3ec7526..4f87cf8b95 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -33,9 +33,6 @@ DEFINE_bool(check_nan_inf, false, namespace paddle { namespace framework { -const std::string kFeedOpType = "feed"; -const std::string kFetchOpType = "fetch"; - Executor::Executor(const platform::Place& place) : place_(place) {} static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { diff --git a/paddle/framework/feed_fetch_type.h b/paddle/framework/feed_fetch_type.h index 9bc4a90c44..168f456675 100644 --- a/paddle/framework/feed_fetch_type.h +++ b/paddle/framework/feed_fetch_type.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include "paddle/framework/lod_tensor.h" @@ -20,5 +21,8 @@ namespace paddle { namespace framework { using FeedFetchType = LoDTensor; using FeedFetchList = std::vector; + +static const std::string kFeedOpType = "feed"; +static const std::string kFetchOpType = "fetch"; } // namespace framework } // namespace paddle diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index e59e392dfd..15ea4035c6 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -14,13 +14,11 @@ limitations under the License. */ #include "paddle/framework/program_desc.h" #include "paddle/framework/block_desc.h" +#include "paddle/framework/feed_fetch_type.h" namespace paddle { namespace framework { -const std::string kFeedOpType = "feed"; -const std::string kFetchOpType = "fetch"; - BlockDesc *ProgramDesc::AppendBlock(const BlockDesc &parent) { auto *b = desc_.add_blocks(); b->set_parent_idx(parent.ID()); @@ -67,26 +65,26 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) { } } -const std::vector ProgramDesc::GetFeedVarNames() { +const std::vector ProgramDesc::GetFeedTargetNames() { BlockDesc *global_block = blocks_[0].get(); - std::vector feed_var_names; + std::vector feed_target_names; for (auto *op : global_block->AllOps()) { - if (op->Type() == "feed") { - feed_var_names.insert(feed_var_names.begin(), op->Output("Out")[0]); + if (op->Type() == kFeedOpType) { + feed_target_names.insert(feed_target_names.begin(), op->Output("Out")[0]); } } - return feed_var_names; + return feed_target_names; } -const std::vector ProgramDesc::GetFetchVarNames() { +const std::vector ProgramDesc::GetFetchTargetNames() { BlockDesc *global_block = blocks_[0].get(); - std::vector fetch_var_names; + std::vector fetch_target_names; for (auto *op : global_block->AllOps()) { - if (op->Type() == "fetch") { - fetch_var_names.push_back(op->Input("X")[0]); + if (op->Type() == kFetchOpType) { + fetch_target_names.push_back(op->Input("X")[0]); } } - return fetch_var_names; + return fetch_target_names; } } // namespace framework diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index 2c3883275a..b9741b3139 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -45,9 +45,8 @@ class ProgramDesc { proto::ProgramDesc *Proto(); - const std::vector GetFeedVarNames(); - - const std::vector GetFetchVarNames(); + const std::vector GetFeedTargetNames(); + const std::vector GetFetchTargetNames(); private: proto::ProgramDesc desc_; diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index bff8e0bcea..db63cd12c9 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -21,12 +21,11 @@ limitations under the License. */ #include #include +#include "paddle/framework/feed_fetch_type.h" namespace paddle { namespace framework { -const std::string kFeedOpType = "feed"; -const std::string kFetchOpType = "fetch"; const std::string kDropOutOpType = "dropout"; const std::string kBatchNormOpType = "batch_norm"; diff --git a/paddle/inference/io.cc b/paddle/inference/io.cc index d1842ec938..556f235d16 100644 --- a/paddle/inference/io.cc +++ b/paddle/inference/io.cc @@ -13,13 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/inference/io.h" + #include +#include "paddle/framework/block_desc.h" +#include "paddle/framework/feed_fetch_type.h" namespace paddle { namespace inference { -const std::string kFeedOpType = "feed"; - bool IsParameter(const framework::VarDesc* var, const framework::ProgramDesc* main_program) { if (var->Persistable()) { @@ -27,7 +28,7 @@ bool IsParameter(const framework::VarDesc* var, for (size_t i = 0; i < main_program->Size(); ++i) { const framework::BlockDesc& block = main_program->Block(i); for (auto* op : block.AllOps()) { - if (op->Type() == kFeedOpType) { + if (op->Type() == framework::kFeedOpType) { continue; } for (auto input_argument_name : op->InputArgumentNames()) { diff --git a/paddle/inference/io.h b/paddle/inference/io.h index 400f5af8c5..fa9a620764 100644 --- a/paddle/inference/io.h +++ b/paddle/inference/io.h @@ -16,18 +16,13 @@ limitations under the License. */ #include #include -#include "paddle/framework/block_desc.h" #include "paddle/framework/executor.h" #include "paddle/framework/program_desc.h" #include "paddle/framework/scope.h" -#include "paddle/framework/var_desc.h" namespace paddle { namespace inference { -bool IsParameter(const framework::VarDesc* var, - const framework::ProgramDesc* main_program); - void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const std::string& dirname, diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index d8e4c4d7ee..a2cdd60752 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -33,11 +33,11 @@ void TestInference(const std::string& dirname, // 2. Initialize the inference_program and load all parameters from file auto* inference_program = paddle::inference::Load(executor, *scope, dirname); - // 3. Get the feed_var_names and fetch_var_names + // 3. Get the feed_target_names and fetch_target_names const std::vector& feed_target_names = - inference_program->GetFeedVarNames(); + inference_program->GetFeedTargetNames(); const std::vector& fetch_target_names = - inference_program->GetFetchVarNames(); + inference_program->GetFetchTargetNames(); // 4. Prepare inputs std::map feed_targets; From d93959f0fb6580af95aea2645f99b163d69c82f5 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 30 Jan 2018 19:07:40 +0800 Subject: [PATCH 071/106] perf enhance reuse connection --- paddle/operators/send_op.cc | 27 +++++++++++++------ .../paddle/v2/fluid/distribute_transpiler.py | 9 ++++++- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index bb719dc2a8..0be3b37859 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -19,6 +19,7 @@ limitations under the License. */ #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" +#include #include #include "paddle/operators/detail/grpc_client.h" @@ -42,28 +43,35 @@ class SendOp : public framework::OperatorBase { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); + + auto client_var_name = Output("RPCClient"); + PADDLE_ENFORCE_NOT_NULL(scope.FindVar(client_var_name), + "Can not find variable '%s' in the scope.", + client_var_name); + auto* client_var = scope.FindVar(client_var_name); + detail::RPCClient* rpc_client = client_var->GetMutable(); + for (size_t i = 0; i < ins.size(); i++) { VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; - client_.AsyncSendVariable(epmap[i], ctx, scope, ins[i]); + rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); } - PADDLE_ENFORCE(client_.Wait()); + PADDLE_ENFORCE(rpc_client->Wait()); for (auto& ep : endpoints) { VLOG(3) << "batch barrier, ep: " << ep; - client_.AsyncSendBatchBarrier(ep); + rpc_client->AsyncSendBatchBarrier(ep); } - PADDLE_ENFORCE(client_.Wait()); + PADDLE_ENFORCE(rpc_client->Wait()); for (size_t i = 0; i < outs.size(); i++) { VLOG(3) << "getting " << outs[i] << " from " << epmap[i]; - client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); + rpc_client->AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } - - PADDLE_ENFORCE(client_.Wait()); + PADDLE_ENFORCE(rpc_client->Wait()); } private: - mutable detail::RPCClient client_; + // mutable detail::RPCClient client_; }; class SendOpMaker : public framework::OpProtoAndCheckerMaker { @@ -73,6 +81,9 @@ class SendOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(Tensor) Input tensor to be sent").AsDuplicable(); AddOutput("Out", "(Tensor) Output tensor to be received from server") .AsDuplicable(); + AddOutput("RPCClient", + "(RPCClient) The RPC client object which is" + "initialized at most once."); AddComment(R"DOC( Send operator diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 77f80442e0..a4464a281a 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -153,11 +153,18 @@ class DistributeTranspiler: self.param_grad_ep_mapping[ep]["params"].append(param) self.param_grad_ep_mapping[ep]["grads"].append(grad) + rpc_client_var = program.global_block().create_var( + name="RPC_CLIENT_VAR", + psersistable=True, + dtype='float32', # dtype and shape is not used in fact + shape=[0]) + # create send_op send_op = program.global_block().append_op( type="send", inputs={"X": send_inputs}, - outputs={"Out": send_outputs}, + outputs={"Out": send_outputs, + "RPCClient": rpc_client_var}, attrs={"endpoints": pserver_endpoints, "epmap": eplist}) # step4 From 683c5a3eb58ad3c75644a822b2e159e6b37b5b49 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 30 Jan 2018 19:09:19 +0800 Subject: [PATCH 072/106] clean up code --- paddle/operators/send_op.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index 0be3b37859..be41b527f2 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" -#include #include #include "paddle/operators/detail/grpc_client.h" @@ -69,9 +68,6 @@ class SendOp : public framework::OperatorBase { } PADDLE_ENFORCE(rpc_client->Wait()); } - - private: - // mutable detail::RPCClient client_; }; class SendOpMaker : public framework::OpProtoAndCheckerMaker { From 7d303bdc69232846adc0a0ae3a2b27b168bf6367 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 30 Jan 2018 18:12:52 +0800 Subject: [PATCH 073/106] fix the bug that dropout always use a fixed seed. --- paddle/operators/dropout_op.cc | 7 ++++ paddle/operators/dropout_op.cu | 6 ++- paddle/operators/dropout_op.h | 8 +++- python/paddle/v2/fluid/layers/nn.py | 39 +++++++++++++++++-- .../paddle/v2/fluid/tests/test_dropout_op.py | 8 ++-- 5 files changed, 58 insertions(+), 10 deletions(-) diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index 35cb18797f..5274aa204e 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -51,6 +51,13 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { "'dropout_prob' must be between 0.0 and 1.0."); }); AddAttr("is_test", "True if in test phase.").SetDefault(false); + AddAttr("fix_seed", + "A flag indicating whether to use a fixed seed to generate " + "random mask. NOTE: DO NOT set this flag to true in " + "training. Setting this flag to true is only useful in " + "unittest or for debug that always the same output units " + "will be dropped.") + .SetDefault(false); AddAttr("seed", "Dropout random seed.").SetDefault(0); AddComment(R"DOC( diff --git a/paddle/operators/dropout_op.cu b/paddle/operators/dropout_op.cu index c56930336e..84d78445a4 100644 --- a/paddle/operators/dropout_op.cu +++ b/paddle/operators/dropout_op.cu @@ -62,7 +62,11 @@ class GPUDropoutKernel : public framework::OpKernel { auto* mask = context.Output("Mask"); auto* mask_data = mask->mutable_data(context.GetPlace()); int size = framework::product(mask->dims()); - int seed = context.Attr("seed"); + + std::random_device rnd; + int seed = + context.Attr("fix_seed") ? context.Attr("seed") : rnd(); + thrust::counting_iterator index_sequence_begin(0); thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(mask_data), diff --git a/paddle/operators/dropout_op.h b/paddle/operators/dropout_op.h index c90b8d277e..46e5dbc64f 100644 --- a/paddle/operators/dropout_op.h +++ b/paddle/operators/dropout_op.h @@ -38,9 +38,15 @@ class CPUDropoutKernel : public framework::OpKernel { if (!context.Attr("is_test")) { auto* mask = context.Output("Mask"); auto* mask_data = mask->mutable_data(context.GetPlace()); - int seed = context.Attr("seed"); + + // NOTE: fixed seed should only be used in unittest or for debug. + // Guarantee to use random seed in training. + std::random_device rnd; std::minstd_rand engine; + int seed = + context.Attr("fix_seed") ? context.Attr("seed") : rnd(); engine.seed(seed); + std::uniform_real_distribution dist(0, 1); size_t size = framework::product(mask->dims()); for (size_t i = 0; i < size; ++i) { diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index d11dccfd22..c38e21087d 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -847,7 +847,35 @@ def cos_sim(X, Y, **kwargs): return out -def dropout(x, dropout_prob, is_test=False, seed=0, **kwargs): +def dropout(x, dropout_prob, is_test=False, seed=None, **kwargs): + """ + Computes dropout. + + Drop or keep each element of `x` independently. Dropout is a regularization + technique for reducing overfitting by preventing neuron co-adaption during + training. The dropout operator randomly set (according to the given dropout + probability) the outputs of some units to zero, while others are remain + unchanged. + + Args: + x(variable): The input tensor. + dropout_prob(float): Probability of setting units to zero. + is_test(bool): A flag indicating whether it is in test phrase or not. + seed(int): A Python integer used to create random seeds. If this + parameter is set to None, a random seed is used. + NOTE: If an integer seed is given, always the same output + units will be dropped. DO NOT use a fixed seed in training. + + Returns: + Variable: A tensor variable. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + droped = fluid.layers.dropout(input=x, dropout_rate=0.5) + """ + helper = LayerHelper('dropout', **kwargs) out = helper.create_tmp_variable(dtype=x.dtype) mask = helper.create_tmp_variable(dtype=x.dtype, stop_gradient=True) @@ -856,9 +884,12 @@ def dropout(x, dropout_prob, is_test=False, seed=0, **kwargs): inputs={'X': [x]}, outputs={'Out': [out], 'Mask': [mask]}, - attrs={'dropout_prob': dropout_prob, - 'is_test': is_test, - 'seed': seed}) + attrs={ + 'dropout_prob': dropout_prob, + 'is_test': is_test, + 'fix_seed': seed is not None, + 'seed': seed if seed is not None else 0 + }) return out diff --git a/python/paddle/v2/fluid/tests/test_dropout_op.py b/python/paddle/v2/fluid/tests/test_dropout_op.py index 107b9567dc..b0c55df9f5 100644 --- a/python/paddle/v2/fluid/tests/test_dropout_op.py +++ b/python/paddle/v2/fluid/tests/test_dropout_op.py @@ -21,7 +21,7 @@ class TestDropoutOp(OpTest): def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} - self.attrs = {'dropout_prob': 0.0, 'is_test': False} + self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': self.inputs['X'], 'Mask': np.ones((32, 64)).astype('float32') @@ -38,7 +38,7 @@ class TestDropoutOp2(TestDropoutOp): def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} - self.attrs = {'dropout_prob': 1.0, 'is_test': False} + self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': np.zeros((32, 64)).astype('float32'), 'Mask': np.zeros((32, 64)).astype('float32') @@ -49,7 +49,7 @@ class TestDropoutOp3(TestDropoutOp): def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")} - self.attrs = {'dropout_prob': 0.0, 'is_test': False} + self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': self.inputs['X'], 'Mask': np.ones((32, 64, 2)).astype('float32') @@ -60,7 +60,7 @@ class TestDropoutOp4(OpTest): def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} - self.attrs = {'dropout_prob': 0.35, 'is_test': True} + self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True} self.outputs = { 'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob']) } From 09570b48dd40a52009b66e93af6108cb308e361d Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 30 Jan 2018 15:22:52 +0800 Subject: [PATCH 074/106] layer norm -> scale + bias --- paddle/operators/layer_norm_op.cc | 19 ++++++------- .../v2/fluid/tests/test_layer_norm_op.py | 27 ++++++++++--------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 125ac9f53f..5821afe9f6 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -45,11 +45,12 @@ class LayerNormOp : public framework::OperatorWithKernel { auto matrix_dim = framework::flatten_to_2d(x_dim, begin_norm_axis); int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], left); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], left); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); ctx->SetOutputDim("Y", ctx->GetInputDim("X")); ctx->SetOutputDim("Mean", {left}); @@ -143,10 +144,10 @@ class LayerNormKernel // TODO(zcd): Some thinking about output_map, is it appropriate that // `output_map` and `input_map` point to the same memory. - auto inv_std_scale = var_map.unaryExpr(inv_std_func); + auto inv_std = var_map.unaryExpr(inv_std_func); output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std_scale.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) - + .cwiseProduct(inv_std.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) + bias_map.replicate(left, 1); } }; @@ -230,7 +231,7 @@ class LayerNormGradKernel if (d_bias) { d_bias->mutable_data(ctx.GetPlace()); auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), 1, right); - d_bias_map = d_y_map.colwise().mean(); + d_bias_map = d_y_map.colwise().sum(); } if (d_scale) { d_scale->mutable_data(ctx.GetPlace()); @@ -245,7 +246,7 @@ class LayerNormGradKernel var_map.unaryExpr(inv_std_func).replicate(1, right)) .cwiseProduct(d_y_map)) .colwise() - .mean(); + .sum(); } if (d_x) { @@ -269,14 +270,14 @@ class LayerNormGradKernel .replicate(1, right); // dy_var_dx auto dvar_end_part = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) .cwiseProduct(d_y_map) .rowwise() .sum(); auto dvar_end = var_map.unaryExpr(inv_std_func) .unaryExpr(triple_product_func) .cwiseProduct(dvar_end_part) - .replicate(1, right) - .cwiseProduct(scale_map.replicate(left, 1)); + .replicate(1, right); auto dx_var = (T(-1.0) / right) * (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 9264cf4b79..d27d1d8138 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -49,35 +49,38 @@ def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): x_shape = x.shape + scale_shape = scale.shape N = reduce(mul, x_shape[0:begin_norm_axis], 1) D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) grad_y.shape = [N, D] x.shape = [N, D] mean.shape = [N, 1] var.shape = [N, 1] + scale.shape = [1, D] - d_scale = np.sum(grad_y, axis=1).reshape([1, D]) - d_bias = scale.reshape([1, D]) * np.sum(( - (x - mean) * np.sqrt(1 / var)) * grad_y, - axis=1).reshape([1, D]) + d_bias = np.sum(grad_y, axis=0).reshape([1, D]) + d_scale = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y, + axis=0).reshape([1, D]) - dx_end = np.sqrt(1.0 / var) * grad_y + dx_end = scale * np.sqrt(1.0 / var) * grad_y - d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y, axis=1).reshape([N, 1]) + d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * scale, axis=1).reshape( + [N, 1]) # d_mean_1 = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape( # [N, 1]) * (-1.0 / D * np.sqrt(1.0 / var) * # np.sum(x - mean, axis=1).reshape([N, 1])).reshape([N, 1]) - d_mean = 1.0 / D * (d_mean_0) + d_mean = 1.0 / D * d_mean_0 - d_std = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape([N, 1]) * ( - 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean)) + d_std = np.sum( + -1.0 / var * (x - mean) * grad_y * scale, axis=1).reshape([N, 1]) * ( + 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean)) - grad_x = scale.reshape([1, D]) * (dx_end + d_mean + d_std) + grad_x = dx_end + d_mean + d_std grad_y.shape = x_shape x.shape = x_shape - - return grad_x, d_bias, d_scale + scale.shape = scale_shape + return grad_x, d_scale, d_bias def create_or_get_tensor(scope, var_name, var, place): From db8da0259abaf72ff8b1c42390ce03c091bbf17a Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Tue, 30 Jan 2018 04:05:57 -0800 Subject: [PATCH 075/106] fix comments (#7978) --- paddle/framework/program_desc.cc | 20 ++++++++++---------- paddle/framework/program_desc.h | 5 ++--- paddle/inference/example.cc | 23 +++++++++++------------ paddle/inference/io.cc | 24 ++++++++++++------------ paddle/inference/io.h | 11 ++++++----- 5 files changed, 41 insertions(+), 42 deletions(-) diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index e59e392dfd..b2368e3a27 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -67,26 +67,26 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) { } } -const std::vector ProgramDesc::GetFeedVarNames() { +const std::vector ProgramDesc::GetFeedTargetNames() { BlockDesc *global_block = blocks_[0].get(); - std::vector feed_var_names; + std::vector feed_target_names; for (auto *op : global_block->AllOps()) { - if (op->Type() == "feed") { - feed_var_names.insert(feed_var_names.begin(), op->Output("Out")[0]); + if (op->Type() == kFeedOpType) { + feed_target_names.insert(feed_target_names.begin(), op->Output("Out")[0]); } } - return feed_var_names; + return feed_target_names; } -const std::vector ProgramDesc::GetFetchVarNames() { +const std::vector ProgramDesc::GetFetchTargetNames() { BlockDesc *global_block = blocks_[0].get(); - std::vector fetch_var_names; + std::vector fetch_target_names; for (auto *op : global_block->AllOps()) { - if (op->Type() == "fetch") { - fetch_var_names.push_back(op->Input("X")[0]); + if (op->Type() == kFetchOpType) { + fetch_target_names.push_back(op->Input("X")[0]); } } - return fetch_var_names; + return fetch_target_names; } } // namespace framework diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index 2c3883275a..b9741b3139 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -45,9 +45,8 @@ class ProgramDesc { proto::ProgramDesc *Proto(); - const std::vector GetFeedVarNames(); - - const std::vector GetFetchVarNames(); + const std::vector GetFeedTargetNames(); + const std::vector GetFetchTargetNames(); private: proto::ProgramDesc desc_; diff --git a/paddle/inference/example.cc b/paddle/inference/example.cc index 5173779c62..ac2aedd88b 100644 --- a/paddle/inference/example.cc +++ b/paddle/inference/example.cc @@ -40,15 +40,15 @@ int main(int argc, char** argv) { std::string dirname = FLAGS_dirname; // 2. Initialize the inference program - auto* inference_program = paddle::inference::Load(*executor, *scope, dirname); + auto inference_program = paddle::inference::Load(*executor, *scope, dirname); // 3. Optional: perform optimization on the inference_program - // 4. Get the feed_var_names and fetch_var_names - const std::vector& feed_var_names = - inference_program->GetFeedVarNames(); - const std::vector& fetch_var_names = - inference_program->GetFetchVarNames(); + // 4. Get the feed_target_names and fetch_target_names + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = + inference_program->GetFetchTargetNames(); // 5. Generate input paddle::framework::LoDTensor input; @@ -68,14 +68,14 @@ int main(int argc, char** argv) { std::map fetch_targets; // set_feed_variable - for (size_t i = 0; i < feed_var_names.size(); ++i) { - feed_targets[feed_var_names[i]] = &feeds[i]; + for (size_t i = 0; i < feed_target_names.size(); ++i) { + feed_targets[feed_target_names[i]] = &feeds[i]; } // get_fetch_variable - fetchs.resize(fetch_var_names.size()); - for (size_t i = 0; i < fetch_var_names.size(); ++i) { - fetch_targets[fetch_var_names[i]] = &fetchs[i]; + fetchs.resize(fetch_target_names.size()); + for (size_t i = 0; i < fetch_target_names.size(); ++i) { + fetch_targets[fetch_target_names[i]] = &fetchs[i]; } // Run the inference program @@ -97,7 +97,6 @@ int main(int argc, char** argv) { std::cout << std::endl; } - delete inference_program; delete scope; delete executor; diff --git a/paddle/inference/io.cc b/paddle/inference/io.cc index 98b33d656d..f6d901381e 100644 --- a/paddle/inference/io.cc +++ b/paddle/inference/io.cc @@ -21,11 +21,11 @@ namespace inference { const std::string kFeedOpType = "feed"; bool IsParameter(const framework::VarDesc* var, - const framework::ProgramDesc* main_program) { + const framework::ProgramDesc& main_program) { if (var->Persistable()) { // There are many unreachable variables in the program - for (size_t i = 0; i < main_program->Size(); ++i) { - const framework::BlockDesc& block = main_program->Block(i); + for (size_t i = 0; i < main_program.Size(); ++i) { + const framework::BlockDesc& block = main_program.Block(i); for (auto* op : block.AllOps()) { if (op->Type() == kFeedOpType) { continue; @@ -44,12 +44,12 @@ bool IsParameter(const framework::VarDesc* var, void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const std::string& dirname, - framework::ProgramDesc* main_program) { - framework::BlockDesc* global_block = main_program->MutableBlock(0); + const framework::ProgramDesc& main_program) { + const framework::BlockDesc& global_block = main_program.Block(0); framework::ProgramDesc* load_program = new framework::ProgramDesc(); framework::BlockDesc* load_block = load_program->MutableBlock(0); - for (auto* var : global_block->AllVars()) { + for (auto* var : global_block.AllVars()) { if (IsParameter(var, main_program)) { LOG(INFO) << "parameter's name: " << var->Name(); @@ -72,9 +72,9 @@ void LoadPersistables(framework::Executor& executor, delete load_program; } -framework::ProgramDesc* Load(framework::Executor& executor, - framework::Scope& scope, - const std::string& dirname) { +std::unique_ptr Load(framework::Executor& executor, + framework::Scope& scope, + const std::string& dirname) { std::string model_filename = dirname + "/__model__"; LOG(INFO) << "loading model from " << model_filename; std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); @@ -86,10 +86,10 @@ framework::ProgramDesc* Load(framework::Executor& executor, inputfs.read(&program_desc_str[0], program_desc_str.size()); inputfs.close(); - framework::ProgramDesc* main_program = - new framework::ProgramDesc(program_desc_str); + std::unique_ptr main_program( + new framework::ProgramDesc(program_desc_str)); - LoadPersistables(executor, scope, dirname, main_program); + LoadPersistables(executor, scope, dirname, *main_program); return main_program; } diff --git a/paddle/inference/io.h b/paddle/inference/io.h index 400f5af8c5..dccb700e95 100644 --- a/paddle/inference/io.h +++ b/paddle/inference/io.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include "paddle/framework/block_desc.h" @@ -26,16 +27,16 @@ namespace paddle { namespace inference { bool IsParameter(const framework::VarDesc* var, - const framework::ProgramDesc* main_program); + const framework::ProgramDesc& main_program); void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const std::string& dirname, - framework::ProgramDesc* main_program); + const framework::ProgramDesc& main_program); -framework::ProgramDesc* Load(framework::Executor& executor, - framework::Scope& scope, - const std::string& dirname); +std::unique_ptr Load(framework::Executor& executor, + framework::Scope& scope, + const std::string& dirname); } // namespace inference } // namespace paddle From 263e01970d4f1923a5ee92e8d9b615a529bfb29e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 30 Jan 2018 19:43:26 +0800 Subject: [PATCH 076/106] follow comments --- paddle/operators/layer_norm_op.cc | 197 ++++++++++++++++++++---------- 1 file changed, 133 insertions(+), 64 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 5821afe9f6..1c6d2ae4d0 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -33,29 +33,35 @@ class LayerNormOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), ""); - PADDLE_ENFORCE(ctx->HasInput("Scale"), ""); - PADDLE_ENFORCE(ctx->HasInput("Bias"), ""); - PADDLE_ENFORCE(ctx->HasOutput("Y"), ""); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Y"), + "Output(Y) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Mean"), + "Output(Mean) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Variance"), + "Output(Variance) of LayerNormOp should not be null."); auto x_dim = ctx->GetInputDim("X"); auto begin_norm_axis = ctx->Attrs().Get("begin_norm_axis"); PADDLE_ENFORCE_LT(begin_norm_axis, x_dim.size(), - "'begin_norm_axis' must be less than the rank of X"); + "'begin_norm_axis' must be less than the rank of X."); auto matrix_dim = framework::flatten_to_2d(x_dim, begin_norm_axis); int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); - - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); + if (ctx->HasInput("Scale")) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); + } + if (ctx->HasInput("Bias")) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); + } ctx->SetOutputDim("Y", ctx->GetInputDim("X")); ctx->SetOutputDim("Mean", {left}); ctx->SetOutputDim("Variance", {left}); - ctx->ShareLoD("X", "Y"); } }; @@ -64,18 +70,26 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { public: LayerNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input tensor"); + AddInput("X", "(LoDTensor) The input tensor."); AddInput("Scale", - "Scale is a 1-dimensional tensor of size H " - "that is applied to the output"); + "(Tensor, optional) Scale is a 1-dimensional tensor of size " + "H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])." + "It is applied to the output.") + .AsDispensable(); AddInput("Bias", - "Bias is a 1-dimensional tensor of size H " - "that is applied to the output"); - AddOutput("Y", "result after normalization"); - AddOutput("Mean", "Mean of the current mini batch."); - AddOutput("Variance", "Variance of the current mini batch."); - - AddAttr("epsilon", "") + "(Tensor, optional) Bias is a 1-dimensional tensor of size " + "H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])." + "It is applied to the output.") + .AsDispensable(); + AddOutput("Y", "(LoDTensor) Result after normalization."); + AddOutput("Mean", "(Tensor) Mean of the current mini batch.") + .AsIntermediate(); + AddOutput("Variance", "(Tensor) Variance of the current mini batch.") + .AsIntermediate(); + + AddAttr("epsilon", + "(float, default 1e-5) Constant for " + "numerical stability") .SetDefault(1e-5) .AddCustomChecker([](const float &epsilon) { PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f, @@ -83,7 +97,9 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { }); AddAttr("begin_norm_axis", "(int default:1), the " - "axis of `begin_norm_axis ... Rank(X) - 1` will be normalized") + "axis of `begin_norm_axis ... Rank(X) - 1` will be " + "normalized. `begin_norm_axis` splits the tensor(`X`) to a " + "matrix [N,H].") .SetDefault(1) .AddCustomChecker([](const int &begin_norm_axis) { PADDLE_ENFORCE_GT(begin_norm_axis, 0, @@ -124,8 +140,7 @@ class LayerNormKernel int right = static_cast(matrix_dim[1]); auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); - auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); + auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); @@ -141,14 +156,32 @@ class LayerNormKernel .unaryExpr(add_epslion); auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - // TODO(zcd): Some thinking about output_map, is it appropriate that // `output_map` and `input_map` point to the same memory. auto inv_std = var_map.unaryExpr(inv_std_func); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) + - bias_map.replicate(left, 1); + if (scale && bias) { + auto scale_map = + ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) + + bias_map.replicate(left, 1); + } else if (scale) { + auto scale_map = + ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)); + } else if (bias) { + auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)) + + bias_map.replicate(left, 1); + } else { + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)); + } } }; @@ -158,11 +191,16 @@ class LayerNormGradOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { // check input - PADDLE_ENFORCE(ctx->HasInput("X")); - PADDLE_ENFORCE(ctx->HasInput("Scale"), ""); - PADDLE_ENFORCE(ctx->HasInput("Mean"), ""); - PADDLE_ENFORCE(ctx->HasInput("Variance"), ""); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), ""); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Scale"), + "Input(Scale) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Mean"), + "Input(Mean) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Variance"), + "Input(Variance) of LayerNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), + "Input(Y@GRAD) of LayerNormOp should not be null."); // check output if (ctx->HasOutput(framework::GradVarName("X"))) { @@ -222,7 +260,6 @@ class LayerNormGradKernel auto *d_scale = ctx.Output(framework::GradVarName("Scale")); auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), 1, right); auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); @@ -254,35 +291,67 @@ class LayerNormGradKernel auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); auto triple_product_func = [](T ele) { return ele * ele * ele; }; auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - // dy_dx - auto dx_end = var_map.unaryExpr(inv_std_func) - .replicate(1, right) - .cwiseProduct(d_y_map) - .cwiseProduct(scale_map.replicate(left, 1)); - // dy_dmean_dx - auto dx_mean = (T(-1.0) / right) * - var_map.unaryExpr(inv_std_func) - .replicate(1, right) - .cwiseProduct(d_y_map) - .cwiseProduct(scale_map.replicate(left, 1)) - .rowwise() - .sum() - .replicate(1, right); - // dy_var_dx - auto dvar_end_part = (x_map - mean_map.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dvar_end = var_map.unaryExpr(inv_std_func) - .unaryExpr(triple_product_func) - .cwiseProduct(dvar_end_part) - .replicate(1, right); - auto dx_var = - (T(-1.0) / right) * - (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); - - d_x_map = dx_end + dx_mean + dx_var; + // TODO(zcd): these code can be refined + if (d_scale) { + auto scale_map = + ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + // dy_dx + auto dx_end = var_map.unaryExpr(inv_std_func) + .replicate(1, right) + .cwiseProduct(d_y_map) + .cwiseProduct(scale_map.replicate(left, 1)); + // dy_dmean_dx + auto dx_mean = (T(-1.0) / right) * + var_map.unaryExpr(inv_std_func) + .replicate(1, right) + .cwiseProduct(d_y_map) + .cwiseProduct(scale_map.replicate(left, 1)) + .rowwise() + .sum() + .replicate(1, right); + // dy_var_dx + auto dvar_end_part = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dvar_end = var_map.unaryExpr(inv_std_func) + .unaryExpr(triple_product_func) + .cwiseProduct(dvar_end_part) + .replicate(1, right); + auto dx_var = + (T(-1.0) / right) * + (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); + + d_x_map = dx_end + dx_mean + dx_var; + } else { + // dy_dx + auto dx_end = var_map.unaryExpr(inv_std_func) + .replicate(1, right) + .cwiseProduct(d_y_map); + // dy_dmean_dx + auto dx_mean = (T(-1.0) / right) * + var_map.unaryExpr(inv_std_func) + .replicate(1, right) + .cwiseProduct(d_y_map) + .rowwise() + .sum() + .replicate(1, right); + // dy_var_dx + auto dvar_end_part = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dvar_end = var_map.unaryExpr(inv_std_func) + .unaryExpr(triple_product_func) + .cwiseProduct(dvar_end_part) + .replicate(1, right); + auto dx_var = + (T(-1.0) / right) * + (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); + + d_x_map = dx_end + dx_mean + dx_var; + } } } }; From 5ed07ef1d1d1b91b158f7b3fe622eeaac00b5ad5 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Tue, 30 Jan 2018 04:36:00 -0800 Subject: [PATCH 077/106] Add more comments and enable the distribution's outside setting --- paddle/operators/label_smooth_op.cc | 48 ++++++++++++++++++- paddle/operators/label_smooth_op.h | 12 ++++- .../v2/fluid/tests/test_label_smooth_op.py | 32 +++++++++---- 3 files changed, 79 insertions(+), 13 deletions(-) diff --git a/paddle/operators/label_smooth_op.cc b/paddle/operators/label_smooth_op.cc index 99a0a005a1..432d4c7d01 100644 --- a/paddle/operators/label_smooth_op.cc +++ b/paddle/operators/label_smooth_op.cc @@ -31,6 +31,14 @@ class LabelSmoothOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of LabelSmoothOp should not be null."); auto in_dims = ctx->GetInputDim("X"); + if (ctx->HasInput("PriorDist")) { + auto noise_dims = ctx->GetInputDim("PriorDist"); + auto noise_numel = paddle::framework::product(noise_dims); + PADDLE_ENFORCE( + in_dims[1] == noise_numel, + "The number of elements in Input(PriorDist) must be equal to the " + "dimension of each label."); + } ctx->ShareLoD("X", /*->*/ "Out"); ctx->SetOutputDim("Out", in_dims); } @@ -40,8 +48,22 @@ class LabelSmoothOpMaker : public framework::OpProtoAndCheckerMaker { public: LabelSmoothOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input label of LabelSmooth operator."); - AddOutput("Out", "The smoothed label of LabelSmooth operator."); + AddInput("X", + "(LoDTensor) The input labels of LabelSmooth operator. This " + "input can be batched labels in one-hot encoding or output from " + "softmax, with shape [N x K], where N is the batch size and K is " + "the number of classes"); + AddInput("PriorDist", + "(Tensor, optional)" + "The prior distribution to be added to the smoothed label. It is " + "fixed during training and the number of elements should be equal " + "to the dimension K of each label. Default is uniform " + "distribution and each element will be set to 1/K if not provided " + "in input.") + .AsDispensable(); + AddOutput("Out", + "(loDTensor) The smoothed label of LabelSmooth operator. It has" + "the same shape and LoD with the Input(LoDTensor)."); AddAttr("epsilon", "(float, default 0.0f)" "The smoothing parameter of LabelSmooth operator.") @@ -49,6 +71,28 @@ class LabelSmoothOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( LabelSmooth Operator. +Label smoothing is a mechanism to regularize the classifier layer. In machine +learning, optimizing the log-likelihood of the correct label directly may +cause two problems. First, it may result in overfitting: if the model learns +to assign full probability to the ground-truth label for each training example, +it is not guaranteed to generalize. Second, it encourages the differences +between the largest logit and all others to become large, reducing the ability +of the model to adapt. Label smoothing is proposed to encourage the model to +be less confident, which replaces the ground-truth label $y$ with the weighted +sum of itselft and some fixed distribution $\mu$, +i.e. + +$$ + \tilde{y} = (1 - \epsilon) * y + \epsilon * \mu, +$$ + +where $(1 - \epsilon)$ and $\epsilon$ are the weights respectively, and +$\tilde{y}$ is the smoothed label. Usually uniform distribution is used for +$\mu$. This change in the ground-truth label is called label-smoothing +regularization or LSR. + +See more details about label smoothing in https://arxiv.org/abs/1512.00567. + )DOC"); } }; diff --git a/paddle/operators/label_smooth_op.h b/paddle/operators/label_smooth_op.h index d94ff43d5a..87bc9f793e 100644 --- a/paddle/operators/label_smooth_op.h +++ b/paddle/operators/label_smooth_op.h @@ -26,6 +26,7 @@ class LabelSmoothKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const { auto* out_t = ctx.Output("Out"); auto* in_t = ctx.Input("X"); + auto* dist_t = ctx.Input("PriorDist"); auto label_dim = in_t->dims()[1]; out_t->mutable_data(ctx.GetPlace()); @@ -33,8 +34,15 @@ class LabelSmoothKernel : public framework::OpKernel { auto out = framework::EigenVector::Flatten(*out_t); auto in = framework::EigenVector::Flatten(*in_t); auto& dev = *ctx.template device_context().eigen_device(); - out.device(dev) = - static_cast(1 - epsilon) * in + static_cast(epsilon / label_dim); + if (dist_t) { + auto dist = framework::EigenVector::Flatten(*dist_t); + out.device(dev) = + static_cast(1 - epsilon) * in + + epsilon * dist.broadcast(Eigen::DSizes(in_t->numel())); + } else { + out.device(dev) = static_cast(1 - epsilon) * in + + static_cast(epsilon / label_dim); + } } }; diff --git a/python/paddle/v2/fluid/tests/test_label_smooth_op.py b/python/paddle/v2/fluid/tests/test_label_smooth_op.py index d156e2c35f..19a4df5744 100644 --- a/python/paddle/v2/fluid/tests/test_label_smooth_op.py +++ b/python/paddle/v2/fluid/tests/test_label_smooth_op.py @@ -18,16 +18,20 @@ from op_test import OpTest class TestLabelSmoothOp(OpTest): - def setUp(self): + def config(self): self.op_type = "label_smooth" - epsilon = 0.1 - batch_size, label_dim = 5, 10 - label = np.zeros((batch_size, label_dim)).astype("float64") - nonzero_index = np.random.randint(label_dim, size=(batch_size)) - label[np.arange(batch_size), nonzero_index] = 1 - smoothed_label = (1 - epsilon) * label + epsilon / label_dim - self.inputs = {'X': label} - self.attrs = {'epsilon': epsilon} + self.epsilon = 0.1 + batch_size, self.label_dim = 5, 10 + self.label = np.zeros((batch_size, self.label_dim)).astype("float64") + nonzero_index = np.random.randint(self.label_dim, size=(batch_size)) + self.label[np.arange(batch_size), nonzero_index] = 1 + + def setUp(self): + self.config() + smoothed_label = (1 - self.epsilon + ) * self.label + self.epsilon / self.label_dim + self.inputs = {'X': self.label} + self.attrs = {'epsilon': self.epsilon} self.outputs = {'Out': smoothed_label} def test_check_output(self): @@ -37,5 +41,15 @@ class TestLabelSmoothOp(OpTest): self.check_grad(["X"], "Out") +class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): + def setUp(self): + self.config() + dist = np.random.random((1, self.label_dim)) + smoothed_label = (1 - self.epsilon) * self.label + self.epsilon * dist + self.inputs = {'X': self.label, 'PriorDist': dist} + self.attrs = {'epsilon': self.epsilon} + self.outputs = {'Out': smoothed_label} + + if __name__ == '__main__': unittest.main() From a10caf7c2360cb18c788c19dc73ec7a1e055866a Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Tue, 30 Jan 2018 04:41:50 -0800 Subject: [PATCH 078/106] Fix typos in label_smooth_op --- paddle/operators/label_smooth_op.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/operators/label_smooth_op.cc b/paddle/operators/label_smooth_op.cc index 432d4c7d01..c89082f44b 100644 --- a/paddle/operators/label_smooth_op.cc +++ b/paddle/operators/label_smooth_op.cc @@ -79,8 +79,7 @@ it is not guaranteed to generalize. Second, it encourages the differences between the largest logit and all others to become large, reducing the ability of the model to adapt. Label smoothing is proposed to encourage the model to be less confident, which replaces the ground-truth label $y$ with the weighted -sum of itselft and some fixed distribution $\mu$, -i.e. +sum of itself and some fixed distribution $\mu$, i.e. $$ \tilde{y} = (1 - \epsilon) * y + \epsilon * \mu, From acb907878a0e3a66f56371aa82088233f6dc9aaf Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 30 Jan 2018 20:33:54 +0800 Subject: [PATCH 079/106] refine unit test --- .../v2/fluid/tests/test_layer_norm_op.py | 118 +++++++++--------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index d27d1d8138..ac94dfb92a 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -21,29 +21,19 @@ from paddle.v2.fluid.op import Operator from paddle.v2.fluid.framework import grad_var_name -def get_backward_op(scope, op, no_grad_set): - backward_op = core.Operator.backward(op, no_grad_set) - for input in backward_op.input_vars(): - var = scope.var(input) - var.get_tensor() - for output in backward_op.output_vars(): - var = scope.var(output) - var.get_tensor() - return backward_op - - def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): - old_shape = x.shape - N = reduce(mul, old_shape[0:begin_norm_axis], 1) - D = reduce(mul, old_shape[begin_norm_axis:len(old_shape)], 1) + x_shape = x.shape + N = reduce(mul, x_shape[0:begin_norm_axis], 1) + D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) x.shape = [N, D] + mean = np.mean(x, axis=1) var = np.var(x, axis=1) + epsilon output = scale.reshape([1, D]) * np.divide( (x - mean.reshape([N, 1])), (np.sqrt(var)).reshape([N, 1])) + beta.reshape([1, D]) - output.shape = old_shape - x.shape = old_shape + + x.shape, output.shape = x_shape, x_shape return output, mean, var @@ -52,27 +42,25 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): scale_shape = scale.shape N = reduce(mul, x_shape[0:begin_norm_axis], 1) D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) - grad_y.shape = [N, D] - x.shape = [N, D] - mean.shape = [N, 1] - var.shape = [N, 1] + x.shape, grad_y.shape = [N, D], [N, D] + var.shape, mean.shape = [N, 1], [N, 1] scale.shape = [1, D] + # d_bias d_bias = np.sum(grad_y, axis=0).reshape([1, D]) + # d_scale d_scale = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y, axis=0).reshape([1, D]) - + # dx dx_end = scale * np.sqrt(1.0 / var) * grad_y - d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * scale, axis=1).reshape( [N, 1]) # d_mean_1 = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape( # [N, 1]) * (-1.0 / D * np.sqrt(1.0 / var) * # np.sum(x - mean, axis=1).reshape([N, 1])).reshape([N, 1]) d_mean = 1.0 / D * d_mean_0 - d_std = np.sum( - -1.0 / var * (x - mean) * grad_y * scale, axis=1).reshape([N, 1]) * ( + -(1.0 / var) * (x - mean) * grad_y * scale, axis=1).reshape([N, 1]) * ( 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean)) grad_x = dx_end + d_mean + d_std @@ -83,6 +71,17 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): return grad_x, d_scale, d_bias +def get_backward_op(scope, op, no_grad_set): + backward_op = core.Operator.backward(op, no_grad_set) + for input in backward_op.input_vars(): + var = scope.var(input) + var.get_tensor() + for output in backward_op.output_vars(): + var = scope.var(output) + var.get_tensor() + return backward_op + + def create_or_get_tensor(scope, var_name, var, place): tensor = scope.var(var_name).get_tensor() if var is not None: @@ -145,8 +144,9 @@ class TestLayerNormdOp(OpTest): self.assertLessEqual(max_diff, max_relative_error, err_msg()) - def test_forward_backward(self): + def check_forward_backward(self, shape, begin_norm_axis): def test_with_place(place, shape, begin_norm_axis=1): + # setUp assert begin_norm_axis > 0 and begin_norm_axis < len( shape), 'begin_norm_axis must be between 0 and len(shape)-1.' # attr @@ -158,30 +158,35 @@ class TestLayerNormdOp(OpTest): x_val = np.random.random_sample(x_shape).astype(np.float32) scale_val = np.random.random_sample(scale_shape).astype(np.float32) bias_val = np.random.random_sample(scale_shape).astype(np.float32) + y_grad = np.random.random_sample(x_shape).astype(np.float32) # run forward y_out, saved_mean, var_ref = _reference_layer_norm_naive( x_val, scale_val, bias_val, epsilon, begin_norm_axis) + naive_fw = {"Y": y_out, "Mean": saved_mean, "Variance": var_ref} - # for gradient test - y_grad = np.random.random_sample(x_shape).astype(np.float32) - + # get gradient x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad( x_val, y_grad, scale_val, saved_mean, var_ref, begin_norm_axis) + naive_grad = { + "X": x_grad_ref, + "Scale": scale_grad_ref, + "Bias": bias_grad_ref + } scope = core.Scope() # create input - x_tensor = create_or_get_tensor(scope, "X", x_val, place) - scale_tensor = create_or_get_tensor(scope, "Scale", scale_val, - place) - bias_tensor = create_or_get_tensor(scope, "Bias", bias_val, place) + input_map = {"X": x_val, "Scale": scale_val, "Bias": bias_val} + for i_name in input_map: + create_or_get_tensor(scope, i_name, input_map[i_name], place) # create output - y_tensor = create_or_get_tensor(scope, "Y", None, place) - mean_tensor = create_or_get_tensor(scope, "Mean", None, place) - variance_tensor = create_or_get_tensor(scope, "Variance", None, - place) + output_map = {"Y": None, "Mean": None, "Variance": None} + output_tensor = {} + for o_name in output_map: + output_tensor[o_name] = create_or_get_tensor( + scope, o_name, output_map[o_name], place) layer_norm_op = Operator( "layer_norm", @@ -200,13 +205,10 @@ class TestLayerNormdOp(OpTest): layer_norm_op.run(scope, place) # check forward result - if isinstance(place, core.CUDAPlace): - atol = 5e-2 - else: - atol = 1e-4 - self.__assert_close(y_tensor, y_out, "Y", atol) - self.__assert_close(mean_tensor, saved_mean, "Mean", atol) - self.__assert_close(variance_tensor, var_ref, "Variance", atol) + atol = 5e-2 if isinstance(place, core.CUDAPlace) else 1e-4 + for o_tensor in output_tensor: + self.__assert_close(output_tensor[o_tensor], naive_fw[o_tensor], + o_tensor, atol) # run backward layer_norm_op_grad = get_backward_op(scope, layer_norm_op, set()) @@ -216,30 +218,28 @@ class TestLayerNormdOp(OpTest): feed_dict={"Y": y_grad}) layer_norm_op_grad.run(scope, place) - x_grad_tensor = create_or_get_tensor(scope, - grad_var_name("X"), None, - place) - scale_grad_tensor = create_or_get_tensor(scope, - grad_var_name("Scale"), - None, place) - bias_grad_tensor = create_or_get_tensor(scope, - grad_var_name("Bias"), None, - place) + # get output + grad_tensor = {} + for o_name in naive_grad: + grad_tensor[o_name] = x_ = create_or_get_tensor( + scope, grad_var_name(o_name), None, place) # check gradient output - self.__assert_grad_close(x_grad_tensor, x_grad_ref, "x_grad", place) - self.__assert_grad_close(scale_grad_tensor, scale_grad_ref, - "scale_grad", place) - self.__assert_grad_close(bias_grad_tensor, bias_grad_ref, - "bias_grad", place) + for o_grad in naive_grad: + self.__assert_grad_close(grad_tensor[o_grad], + naive_grad[o_grad], o_grad + "@GRAD", + place) places = [core.CPUPlace()] if core.is_compile_gpu() and core.op_support_gpu("layer_norm"): places.append(core.CUDAPlace(0)) for place in places: - test_with_place(place, [2, 3, 4, 5], begin_norm_axis=1) - test_with_place(place, [2, 3, 4, 5], begin_norm_axis=3) + test_with_place(place, shape, begin_norm_axis) + + def test_check_forward_backward(self): + self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) + self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) if __name__ == '__main__': From e5058ed1f14b8fe16be8055bb819e0a101cf2ade Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 30 Jan 2018 22:13:06 +0800 Subject: [PATCH 080/106] Add unit test for with_scale and with_bias --- .../paddle/v2/fluid/tests/test_layer_norm_op.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index ac94dfb92a..7d5dc7d1a6 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -54,10 +54,7 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): # dx dx_end = scale * np.sqrt(1.0 / var) * grad_y d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * scale, axis=1).reshape( - [N, 1]) - # d_mean_1 = np.sum(-1.0 / var * (x - mean) * grad_y, axis=1).reshape( - # [N, 1]) * (-1.0 / D * np.sqrt(1.0 / var) * - # np.sum(x - mean, axis=1).reshape([N, 1])).reshape([N, 1]) + [N, 1]) # the second part equals to zero. d_mean = 1.0 / D * d_mean_0 d_std = np.sum( -(1.0 / var) * (x - mean) * grad_y * scale, axis=1).reshape([N, 1]) * ( @@ -237,10 +234,19 @@ class TestLayerNormdOp(OpTest): for place in places: test_with_place(place, shape, begin_norm_axis) - def test_check_forward_backward(self): + def test_check_forward_backward_with_scale_and_bias(self): self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) + def test_check_forward_backward_with_scale(self): + pass # TODO(zcd) + + def test_check_forward_backward_with_bias(self): + pass # TODO(zcd) + + def test_check_forward_backward(self): + pass # TODO(zcd) + if __name__ == '__main__': unittest.main() From 2e907c3613abfd68ebe8bf4c9d7b2bc42816105a Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Tue, 30 Jan 2018 15:20:40 -0800 Subject: [PATCH 081/106] Add variant of new load and save ops for storing model params in a single file (#7909) * Add save_combine_op * Add load_combine_op and test * Add unit-test * Add a delete to free buffer memory * Add new variant of load/save * Fix unit-test * Add another unit test for compatibility with original save/load * Address review comments and simplify logic * Address review comments and simplify code - part 2 * Fix naming issues and CMake problems * Address review comments * Fix LoD information in tests * Address review comments: round 2 --- paddle/operators/CMakeLists.txt | 3 + paddle/operators/load_combine_op.cc | 108 +++++++++++ paddle/operators/save_combine_op.cc | 141 ++++++++++++++ paddle/operators/save_load_combine_op_test.cc | 180 ++++++++++++++++++ paddle/operators/save_load_op_test.cc | 2 +- 5 files changed, 433 insertions(+), 1 deletion(-) create mode 100644 paddle/operators/load_combine_op.cc create mode 100644 paddle/operators/save_combine_op.cc create mode 100644 paddle/operators/save_load_combine_op_test.cc diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 48cf5816cc..b2e73b6f23 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -173,6 +173,8 @@ endif() # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) op_library(load_op DEPS lod_tensor) +op_library(save_combine_op DEPS lod_tensor) +op_library(load_combine_op DEPS lod_tensor) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) @@ -192,3 +194,4 @@ if(WITH_GPU) cc_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) endif() cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) +cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) diff --git a/paddle/operators/load_combine_op.cc b/paddle/operators/load_combine_op.cc new file mode 100644 index 0000000000..f4be793d7b --- /dev/null +++ b/paddle/operators/load_combine_op.cc @@ -0,0 +1,108 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include + +#include "paddle/framework/op_registry.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { + +class LoadCombineOp : public framework::OperatorBase { + public: + LoadCombineOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::Place &place) const override { + auto filename = Attr("file_path"); + + std::ifstream fin(filename); + PADDLE_ENFORCE(static_cast(fin), + "Cannot open file %s for load_combine op", filename); + + auto out_var_names = Outputs("Out"); + PADDLE_ENFORCE_GT( + static_cast(out_var_names.size()), 0, + "The number of output variables should be greater than 0."); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + + for (size_t i = 0; i < out_var_names.size(); i++) { + auto *out_var = scope.FindVar(out_var_names[i]); + + PADDLE_ENFORCE(out_var != nullptr, "Output variable %s cannot be found", + out_var_names[i]); + + auto *tensor = out_var->GetMutable(); + + // Error checking + PADDLE_ENFORCE(static_cast(fin), "Cannot read more from file %s", + filename); + + // Get data from fin to tensor + DeserializeFromStream(fin, tensor, dev_ctx); + + if (platform::is_gpu_place(place)) { + // copy CPU to GPU + framework::LoDTensor cpu_tensor; + cpu_tensor.ShareDataWith(*tensor); + cpu_tensor.set_lod(tensor->lod()); + + // reset tensor + out_var->Clear(); + tensor = out_var->GetMutable(); + tensor->set_lod(cpu_tensor.lod()); + Copy(cpu_tensor, place, dev_ctx, tensor); + } + } + } +}; + +class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + LoadCombineOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddOutput( + "Out", + "(vector) The output LoDTensors that will be read from the input file.") + .AsDuplicable(); + AddAttr("file_path", + "(string) " + "LoDTensors will be loaded from \"file_path\".") + .AddCustomChecker( + [](const std::string &path) { return !path.empty(); }); + AddComment(R"DOC( +LoadCombine Operator. + +LoadCombine operator loads LoDTensor variables from a file. The file should +contain one or more LoDTensors serialized using the SaveCombine operator. The +LoadCombine operator applies a deserialization strategy to appropriately load +the LodTensors, and this strategy complements the serialization strategy used +in the SaveCombine operator. Hence, the LoadCombine operator is tightly coupled +with the SaveCombine operator, and can only deserialize one or more LoDTensors +that were saved using the SaveCombine operator. + +)DOC"); + } +}; +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OPERATOR(load_combine, ops::LoadCombineOp, + ops::LoadCombineOpProtoMaker); diff --git a/paddle/operators/save_combine_op.cc b/paddle/operators/save_combine_op.cc new file mode 100644 index 0000000000..bffa2908bc --- /dev/null +++ b/paddle/operators/save_combine_op.cc @@ -0,0 +1,141 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include +#include +#include "paddle/framework/data_type.h" +#include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/op_registry.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { + +// TODO(sidgoyal78): These function are needed by other files (save_op), move +// them to paddle::filesystem namespace. (as noted by yuyang18 in save_op). +constexpr char kSEP = '/'; +static bool FileExists(const std::string &filepath) { + struct stat buffer; + return (stat(filepath.c_str(), &buffer) == 0); +} + +static std::string DirName(const std::string &filepath) { + auto pos = filepath.rfind(kSEP); + if (pos == std::string::npos) { + return ""; + } + return filepath.substr(0, pos); +} + +static void MkDir(const char *path) { + if (mkdir(path, 0755)) { + PADDLE_ENFORCE_EQ(errno, EEXIST, "%s mkdir failed!", path); + } +} + +static void MkDirRecursively(const char *fullpath) { + if (*fullpath == '\0') return; // empty string + if (FileExists(fullpath)) return; + + MkDirRecursively(DirName(fullpath).c_str()); + MkDir(fullpath); +} + +class SaveCombineOp : public framework::OperatorBase { + public: + SaveCombineOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::Place &place) const override { + auto filename = Attr("file_path"); + auto overwrite = Attr("overwrite"); + + bool is_present = FileExists(filename); + if (is_present && !overwrite) { + PADDLE_THROW("%s exists!, cannot save_combine to it when overwrite=false", + filename, overwrite); + } + + MkDirRecursively(DirName(filename).c_str()); + std::ofstream fout(filename); + PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", + filename); + + auto inp_var_names = Inputs("X"); + PADDLE_ENFORCE_GT(static_cast(inp_var_names.size()), 0, + "The number of input variables should be greater than 0"); + + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + + for (size_t i = 0; i < inp_var_names.size(); i++) { + auto *var = scope.FindVar(inp_var_names[i]); + + PADDLE_ENFORCE(var != nullptr, + "Cannot find variable %s for save_combine_op", + inp_var_names[i]); + PADDLE_ENFORCE(var->IsType(), + "SaveCombineOp only supports LoDTensor, %s has wrong type", + inp_var_names[i]); + + auto &tensor = var->Get(); + // Serialize tensor + framework::SerializeToStream(fout, tensor, dev_ctx); + } + fout.close(); + } +}; + +class SaveCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + SaveCombineOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(vector) Input LoDTensors that need to be saved together in a file.") + .AsDuplicable(); + AddComment(R"DOC( +SaveCombine operator + +This operator will serialize and write a list of input LoDTensor variables +to a file on disk. +)DOC"); + AddAttr("overwrite", + "(boolean, default true)" + "Overwrite the output file if it exists.") + .SetDefault(true); + AddAttr( + "file_path", + "(string)" + "The \"file_path\" where the LoDTensor variables will be saved.") + .AddCustomChecker( + [](const std::string &path) { return !path.empty(); }); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(save_combine, ops::SaveCombineOp, + ops::SaveCombineOpProtoMaker); diff --git a/paddle/operators/save_load_combine_op_test.cc b/paddle/operators/save_load_combine_op_test.cc new file mode 100644 index 0000000000..f3ddc4a6c5 --- /dev/null +++ b/paddle/operators/save_load_combine_op_test.cc @@ -0,0 +1,180 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "gtest/gtest.h" +#include "paddle/framework/op_registry.h" + +USE_NO_KERNEL_OP(save_combine); +USE_NO_KERNEL_OP(load_combine); + +int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, + std::string var_name, + paddle::platform::CPUPlace& place, + paddle::framework::Scope& scope, + paddle::framework::LoD& expect_lod) { + auto var = scope.Var(var_name); + auto tensor = var->GetMutable(); + tensor->Resize({x, y}); + expect_lod.resize(1); + for (size_t i = 0; i < lod_info.size(); i++) { + expect_lod[0].push_back(lod_info[i]); + } + tensor->set_lod(expect_lod); + int* expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(i); + } + return expect; +} + +paddle::framework::LoDTensor* GeneratePlaceholderBeforeLoad( + const std::string out_var_name, paddle::framework::Scope& scope) { + auto load_var = scope.Var(out_var_name); + auto target = load_var->GetMutable(); + return target; +} + +int* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, + paddle::framework::Scope& scope, + paddle::framework::LoD& actual_lod) { + int* actual = target->data(); + actual_lod = target->lod(); + return actual; +} + +void CheckValues(int* expect, int* actual, paddle::framework::LoD expect_lod, + paddle::framework::LoD actual_lod, const int& numel) { + for (int64_t i = 0; i < numel; ++i) { + EXPECT_EQ(expect[i], actual[i]); + } + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } +} + +// Here, we create 4 LoDTensors and use save_combine_op to first save these +// in a single file. Then, we use load_combine_op to load these sequentially +TEST(SaveLoadCombineOp, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + std::vector lod1 = {0, 1, 2, 3, 10}; + int numel1 = 100; + paddle::framework::LoD expect_lod1; + int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", place, scope, + expect_lod1); + + std::vector lod2 = {0, 2, 5, 10}; + int numel2 = 200; + paddle::framework::LoD expect_lod2; + int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", place, scope, + expect_lod2); + + std::vector lod3 = {0, 2, 3, 20}; + int numel3 = 4000; + paddle::framework::LoD expect_lod3; + int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", place, + scope, expect_lod3); + + std::vector lod4 = {0, 1, 20}; + int numel4 = 1000; + paddle::framework::LoD expect_lod4; + int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", place, scope, + expect_lod4); + + // Set attributes + std::string filename = "check_tensor.ls"; + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string(filename)}); + + // Run the save_combine_op + auto save_combine_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", + {{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs); + save_combine_op->Run(scope, place); + + // Set up output vars + auto target1 = GeneratePlaceholderBeforeLoad("out_var1", scope); + auto target2 = GeneratePlaceholderBeforeLoad("out_var2", scope); + auto target3 = GeneratePlaceholderBeforeLoad("out_var3", scope); + auto target4 = GeneratePlaceholderBeforeLoad("out_var4", scope); + + // Run the load_combine_op + auto load_combine_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, + {{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs); + load_combine_op->Run(scope, place); + + paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; + int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, actual_lod1); + int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, actual_lod2); + int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, actual_lod3); + int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); +} + +// Test with original SaveLoadTest +TEST(SaveLoadTestWithCombineOp, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + auto var = scope.Var("test_var"); + auto tensor = var->GetMutable(); + tensor->Resize({3, 10}); + paddle::framework::LoD expect_lod; + expect_lod.resize(1); + expect_lod[0].push_back(0); + expect_lod[0].push_back(1); + expect_lod[0].push_back(2); + expect_lod[0].push_back(3); + + tensor->set_lod(expect_lod); + int* expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(i); + } + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string("check_t.save")}); + + auto save_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", {{"X", {"test_var"}}}, {}, attrs); + save_op->Run(scope, place); + + auto load_var = scope.Var("out_var"); + auto target = load_var->GetMutable(); + auto load_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, {{"Out", {"out_var"}}}, attrs); + load_op->Run(scope, place); + int* actual = target->data(); + for (int64_t i = 0; i < tensor->numel(); ++i) { + EXPECT_EQ(expect[i], actual[i]); + } + auto& actual_lod = target->lod(); + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } +} diff --git a/paddle/operators/save_load_op_test.cc b/paddle/operators/save_load_op_test.cc index 40103d864f..d829d5da17 100644 --- a/paddle/operators/save_load_op_test.cc +++ b/paddle/operators/save_load_op_test.cc @@ -24,7 +24,7 @@ TEST(SaveLoadOp, CPU) { auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); - tensor->Resize({10, 10}); + tensor->Resize({3, 10}); paddle::framework::LoD expect_lod; expect_lod.resize(1); expect_lod[0].push_back(0); From 80eff2662b4889ee169ee1efb0b2533d764b22d2 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 31 Jan 2018 10:11:38 +0800 Subject: [PATCH 082/106] "unify flags" (#7973) * "unify flags" * "fix init" --- paddle/framework/executor.cc | 6 +++--- paddle/framework/operator.cc | 6 ++---- paddle/framework/scope.cc | 8 +++++--- python/paddle/v2/fluid/__init__.py | 6 ++---- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index cbf3ec7526..fe1ca27eb3 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/platform/place.h" #include "paddle/platform/profiler.h" -DECLARE_bool(do_memory_benchmark); +DECLARE_bool(benchmark); DEFINE_bool(check_nan_inf, false, "Checking whether operator produce NAN/INF or not. It will be " "extremely slow so please use this flag wisely."); @@ -125,7 +125,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, op->Run(*local_scope, place_); VLOG(3) << op->DebugStringEx(local_scope); - if (FLAGS_do_memory_benchmark) { + if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " << memory::memory_usage(place_); } @@ -142,7 +142,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, if (create_vars && create_local_scope) { scope->DeleteScope(local_scope); } - if (FLAGS_do_memory_benchmark) { + if (FLAGS_benchmark) { VLOG(2) << "-------------------------------------------------------"; VLOG(2) << "Memory used after deleting local scope: " << memory::memory_usage(place_); diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 831b1e2a1e..4e854f54dd 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -22,9 +22,7 @@ limitations under the License. */ #include "paddle/framework/shape_inference.h" #include "paddle/framework/var_type.h" -DEFINE_bool(op_sync, false, - "Default cuda is asynchronous device, set to True will" - "force op run in synchronous mode."); +DECLARE_bool(benchmark); namespace paddle { namespace framework { @@ -531,7 +529,7 @@ void OperatorWithKernel::Run(const Scope& scope, ExecutionContext(*this, new_scope, *new_dev_ctx)); /*For profiling/benchmark only*/ - if (FLAGS_op_sync) { + if (FLAGS_benchmark) { new_dev_ctx->Wait(); } } diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index a67ff91009..af08b2ab81 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -20,9 +20,11 @@ limitations under the License. */ #include "paddle/framework/threadpool.h" #include "paddle/string/printf.h" -DEFINE_bool(do_memory_benchmark, false, +DEFINE_bool(benchmark, false, "Doing memory benchmark. It will make deleting scope synchronized, " - "and add some memory usage logs"); + "and add some memory usage logs." + "Default cuda is asynchronous device, set to True will" + "force op run in synchronous mode."); namespace paddle { namespace framework { @@ -93,7 +95,7 @@ void Scope::DeleteScope(Scope* scope) { PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope); this->kids_.erase(it); // When making memory benchmark on Fluid, we have to delete scope sync. - if (FLAGS_do_memory_benchmark) { + if (FLAGS_benchmark) { delete scope; } else { Async([scope] { delete scope; }); diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 787416aed1..a542e3dbab 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -86,11 +86,9 @@ def __bootstrap__(): os.environ['OMP_NUM_THREADS'] = str(num_threads) - read_env_flags = [ - 'use_pinned_memory', 'check_nan_inf', 'do_memory_benchmark' - ] + read_env_flags = ['use_pinned_memory', 'check_nan_inf', 'benchmark'] if core.is_compiled_with_cuda(): - read_env_flags += ['fraction_of_gpu_memory_to_use', 'op_sync'] + read_env_flags += ['fraction_of_gpu_memory_to_use'] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) From e100b37e30d23e11c9091cc7161eaa030985bfdb Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 31 Jan 2018 10:25:05 +0800 Subject: [PATCH 083/106] change the default option of `WITH_TESTING` to OFF --- CMakeLists.txt | 2 +- doc/getstarted/build_and_install/build_from_source_cn.rst | 2 +- doc/getstarted/build_and_install/build_from_source_en.rst | 2 +- paddle/scripts/docker/README.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c7eb260ae..e8ea828dd2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,7 +39,7 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) -option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) +option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) diff --git a/doc/getstarted/build_and_install/build_from_source_cn.rst b/doc/getstarted/build_and_install/build_from_source_cn.rst index 71904dc41e..ff904b1022 100644 --- a/doc/getstarted/build_and_install/build_from_source_cn.rst +++ b/doc/getstarted/build_and_install/build_from_source_cn.rst @@ -115,7 +115,7 @@ PaddlePaddle的编译选项,包括生成CPU/GPU二进制文件、链接何种B "WITH_AVX", "是否编译含有AVX指令集的PaddlePaddle二进制文件", "ON" "WITH_PYTHON", "是否内嵌PYTHON解释器", "ON" "WITH_STYLE_CHECK", "是否编译时进行代码风格检查", "ON" - "WITH_TESTING", "是否开启单元测试", "ON" + "WITH_TESTING", "是否开启单元测试", "OFF" "WITH_DOC", "是否编译中英文文档", "OFF" "WITH_SWIG_PY", "是否编译PYTHON的SWIG接口,该接口可用于预测和定制化训练", "Auto" "WITH_GOLANG", "是否编译go语言的可容错parameter server", "ON" diff --git a/doc/getstarted/build_and_install/build_from_source_en.rst b/doc/getstarted/build_and_install/build_from_source_en.rst index 27f73b2e2c..718fb869c2 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.rst +++ b/doc/getstarted/build_and_install/build_from_source_en.rst @@ -126,7 +126,7 @@ You can add :code:`-D` argument to pass such options, like: "WITH_AVX", "Build with AVX support", "ON" "WITH_PYTHON", "Build with integrated Python interpreter", "ON" "WITH_STYLE_CHECK", "Check code style when building", "ON" - "WITH_TESTING", "Build unit tests", "ON" + "WITH_TESTING", "Build unit tests", "OFF" "WITH_DOC", "Build documentations", "OFF" "WITH_SWIG_PY", "Build Python SWIG interface for V2 API", "Auto" "WITH_GOLANG", "Build fault-tolerant parameter server written in go", "ON" diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md index f0620498cf..65c4674555 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/docker/README.md @@ -56,7 +56,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | ------ | -------- | ----------- | | `WITH_GPU` | OFF | Generates NVIDIA CUDA GPU code and relies on CUDA libraries. | | `WITH_AVX` | OFF | Set to "ON" to enable AVX support. | -| `WITH_TESTING` | ON | Build unit tests binaries. | +| `WITH_TESTING` | OFF | Build unit tests binaries. | | `WITH_MKL` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) and [Intel® MKL-DNN](https://github.com/01org/mkl-dnn) support. | | `WITH_GOLANG` | ON | Build fault-tolerant parameter server written in go. | | `WITH_SWIG_PY` | ON | Build with SWIG python API support. | From be801d6c056c3435922e345d9d2ea105120b812d Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Wed, 31 Jan 2018 10:37:09 +0800 Subject: [PATCH 084/106] Add learning rate decay (#7892) * add basic interface for learning rate decay * add exponential_decay * add natural_exp_decay * add inverse_time_decay --- paddle/operators/elementwise_pow_op.cc | 37 ++++++ paddle/operators/elementwise_pow_op.cu | 20 +++ paddle/operators/elementwise_pow_op.h | 37 ++++++ python/paddle/v2/fluid/__init__.py | 2 + .../paddle/v2/fluid/layers/math_op_patch.py | 4 +- python/paddle/v2/fluid/layers/ops.py | 1 + python/paddle/v2/fluid/layers/tensor.py | 13 +- python/paddle/v2/fluid/learning_rate_decay.py | 125 ++++++++++++++++++ python/paddle/v2/fluid/optimizer.py | 66 +++++---- .../tests/book/test_label_semantic_roles.py | 2 +- .../v2/fluid/tests/test_elementwise_pow_op.py | 43 ++++++ .../fluid/tests/test_learning_rate_decay.py | 110 +++++++++++++++ 12 files changed, 432 insertions(+), 28 deletions(-) create mode 100644 paddle/operators/elementwise_pow_op.cc create mode 100644 paddle/operators/elementwise_pow_op.cu create mode 100644 paddle/operators/elementwise_pow_op.h create mode 100644 python/paddle/v2/fluid/learning_rate_decay.py create mode 100644 python/paddle/v2/fluid/tests/test_elementwise_pow_op.py create mode 100644 python/paddle/v2/fluid/tests/test_learning_rate_decay.py diff --git a/paddle/operators/elementwise_pow_op.cc b/paddle/operators/elementwise_pow_op.cc new file mode 100644 index 0000000000..5293cc7dd3 --- /dev/null +++ b/paddle/operators/elementwise_pow_op.cc @@ -0,0 +1,37 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/elementwise_pow_op.h" +#include "paddle/operators/elementwise_op.h" + +namespace paddle { +namespace operators { +class ElementwisePowOpMaker : public ElementwiseOpMaker { + public: + ElementwisePowOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : ElementwiseOpMaker(proto, op_checker) { + SetComment("Pow", "Out = X ^ Y"); + AddComment(comment_); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(elementwise_pow, ops::ElementwiseOp, + ops::ElementwisePowOpMaker); +REGISTER_OP_CPU_KERNEL( + elementwise_pow, + ops::ElementwisePowKernel, + ops::ElementwisePowKernel); diff --git a/paddle/operators/elementwise_pow_op.cu b/paddle/operators/elementwise_pow_op.cu new file mode 100644 index 0000000000..643c978e63 --- /dev/null +++ b/paddle/operators/elementwise_pow_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/elementwise_pow_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL( + elementwise_pow, + ops::ElementwisePowKernel, + ops::ElementwisePowKernel); diff --git a/paddle/operators/elementwise_pow_op.h b/paddle/operators/elementwise_pow_op.h new file mode 100644 index 0000000000..6019e709e0 --- /dev/null +++ b/paddle/operators/elementwise_pow_op.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/operators/elementwise_op_function.h" + +namespace paddle { +namespace operators { + +template +struct PowFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return std::pow(a, b); } +}; + +template +class ElementwisePowKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + ElementwiseComputeEx, DeviceContext, T>(ctx); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index a542e3dbab..18c8343d09 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -26,6 +26,7 @@ import initializer import layers import nets import optimizer +import learning_rate_decay import backward import regularizer from param_attr import ParamAttr @@ -44,6 +45,7 @@ __all__ = framework.__all__ + executor.__all__ + [ 'layers', 'nets', 'optimizer', + 'learning_rate_decay', 'backward', 'regularizer', 'LoDTensor', diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index f359e70126..79a130a3eb 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -145,7 +145,9 @@ def monkey_patch_variable(): # a*b == b*a. Do not need to reverse explicitly ("__rmul__", "elementwise_mul", False), ("__div__", "elementwise_div", False), - ("__rdiv__", "elementwise_div", True)): + ("__rdiv__", "elementwise_div", True), + ("__pow__", "elementwise_pow", False), + ("__rpow__", "elementwise_pow", True)): setattr(Variable, method_name, _elemwise_method_creator_(method_name, op_type, reverse)) diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index 022a94cad4..ee3172c7b8 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -56,6 +56,7 @@ __all__ = [ 'elementwise_mul', 'elementwise_max', 'elementwise_min', + 'elementwise_pow', 'clip', 'clip_by_norm', 'sequence_softmax', diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 6e7d09459c..c435c5206d 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -16,12 +16,14 @@ from ..layer_helper import LayerHelper from ..param_attr import ParamAttr from ..framework import convert_np_dtype_to_dtype_ from ..framework import Variable +from ..initializer import Constant from ..core import DataType import numpy __all__ = [ 'create_tensor', 'create_parameter', + 'create_global_var', 'cast', 'concat', 'sums', @@ -58,13 +60,22 @@ def create_parameter(shape, Returns: Parameter: the created parameter """ - helper = LayerHelper("create_parameter") + helper = LayerHelper("create_parameter", **locals()) if attr is None: attr = ParamAttr() return helper.create_parameter(attr, shape, dtype, is_bias, default_initializer) +def create_global_var(shape, value, dtype, persistable=False, name=None): + helper = LayerHelper("global_var", **locals()) + var = helper.create_global_variable( + dtype=dtype, shape=shape, persistable=persistable, name=name) + helper.set_variable_initializer( + var, initializer=Constant(value=float(value))) + return var + + def cast(x, dtype): """ This function takes in the input with input_dtype diff --git a/python/paddle/v2/fluid/learning_rate_decay.py b/python/paddle/v2/fluid/learning_rate_decay.py new file mode 100644 index 0000000000..96b3e9a0d7 --- /dev/null +++ b/python/paddle/v2/fluid/learning_rate_decay.py @@ -0,0 +1,125 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import layers +from framework import Variable + +__all__ = ['exponential_decay', 'natural_exp_decay', 'inverse_time_decay'] +""" +When training a model, it's often useful to decay the +learning rate during training process, this is called +learning_rate_decay. There are many strategies to do +this, this module will provide some classical method. +User can also implement their own learning_rate_decay +strategy according to this module. +""" + + +def exponential_decay(learning_rate, + global_step, + decay_steps, + decay_rate, + staircase=False): + """Applies exponential decay to the learning rate. + + ```python + decayed_learning_rate = learning_rate * + decay_rate ^ (global_step / decay_steps) + ``` + Args: + learning_rate: A scalar float32 value or a Variable. This + will be the initial learning rate during training + global_step: A Variable that record the training step. + decay_steps: A Python `int32` number. + decay_rate: A Python `float` number. + staircase: Boolean. If set true, decay the learning rate every decay_steps. + + Returns: + The decayed learning rate + """ + if not isinstance(global_step, Variable): + raise ValueError("global_step is required for exponential_decay.") + + # update learning_rate + div_res = global_step / decay_steps + if staircase: + div_res = layers.floor(x=div_res) + return learning_rate * (decay_rate**div_res) + + +def natural_exp_decay(learning_rate, + global_step, + decay_steps, + decay_rate, + staircase=False): + """Applies natural exponential decay to the initial learning rate. + + ```python + if not staircase: + decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps)) + else: + decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps)) + ``` + Args: + learning_rate: A scalar float32 value or a Variable. This + will be the initial learning rate during training + global_step: A Variable that record the training step. + decay_steps: A Python `int32` number. + decay_rate: A Python `float` number. + staircase: Boolean. If set true, decay the learning rate every decay_steps. + + Returns: + The decayed learning rate + """ + if not isinstance(global_step, Variable): + raise ValueError("global_step is required for natural_exp_decay.") + + div_res = global_step / decay_steps + if staircase: + div_res = layers.floor(x=div_res) + return learning_rate * layers.exp(x=(-1 * decay_rate * div_res)) + + +def inverse_time_decay(learning_rate, + global_step, + decay_steps, + decay_rate, + staircase=False): + """Applies inverse time decay to the initial learning rate. + + ```python + if staircase: + decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) + else + decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) + ``` + Args: + learning_rate: A scalar float32 value or a Variable. This + will be the initial learning rate during training + global_step: A Variable that record the training step. + decay_steps: A Python `int32` number. + decay_rate: A Python `float` number. + staircase: Boolean. If set true, decay the learning rate every decay_steps. + + Returns: + The decayed learning rate + """ + if not isinstance(global_step, Variable): + raise ValueError("global_step is required for inverse_time_decay.") + + div_res = global_step / decay_steps + if staircase: + div_res = layers.floor(x=div_res) + + return learning_rate / (1 + decay_rate * div_res) diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 0c3533b892..7844a4e2df 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -15,6 +15,7 @@ from collections import defaultdict import framework +import layers from backward import append_backward from framework import unique_name, program_guard from initializer import Constant @@ -33,9 +34,11 @@ class Optimizer(object): but need to use one of it's implementation. """ - def __init__(self, global_step=None, regularization=None): + def __init__(self, learning_rate, global_step=None, regularization=None): + assert learning_rate is not None self._global_step = global_step self.regularization = regularization + self._global_learning_rate = learning_rate # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. @@ -43,6 +46,28 @@ class Optimizer(object): self._accumulators = defaultdict(lambda: dict()) self.helper = None + def _create_global_learning_rate(self): + if isinstance(self._global_learning_rate, float): + self._global_learning_rate = layers.create_global_var( + name=unique_name("learning_rate"), + shape=[1], + value=float(self._global_learning_rate), + dtype='float32', + persistable=True) + + if not isinstance(self._global_learning_rate, framework.Variable): + raise ValueError("learning rate should be a Variable, " + "actual type is %s", + type(self._global_learning_rate)) + + @property + def global_learning_rate(self): + """ + get global decayed learning rate + :return: + """ + return self._global_learning_rate + def _append_optimize_op(self, block, param_and_grad): """ append optimize operator to block and return all the added optimize_op """ @@ -52,17 +77,7 @@ class Optimizer(object): # create learning rate variable for every parameter param = param_and_grad[0] param_lr = param.optimize_attr['learning_rate'] - param_lr_shape = [1] - param_lr_var = self.helper.create_global_variable( - name=unique_name("learning_rate"), - dtype='float32', - shape=param_lr_shape, - lod_level=1, - persistable=True) - param_lr = param_lr * self._learning_rate - self.helper.set_variable_initializer( - var=param_lr_var, initializer=Constant(param_lr)) - return param_lr_var + return self._global_learning_rate * param_lr def _create_accumulators(self, block, parameters): """Create all accumulators needed by the parameters @@ -163,7 +178,7 @@ class Optimizer(object): optimization. This will include parameter update ops, global step update ops and any other custom ops required by subclasses to manage their internal state. - :param startup_program: + :param startup_program: """ # This is a default implementation of create_optimization_pass that # can be shared by most optimizers. This implementation assumes that @@ -178,6 +193,7 @@ class Optimizer(object): self.helper = LayerHelper(self.__class__.__name__) self._create_accumulators(loss.block, [p[0] for p in parameters_and_grads]) + self._create_global_learning_rate() optimize_ops = [] for param_and_grad in parameters_and_grads: @@ -231,9 +247,9 @@ class SGDOptimizer(Optimizer): def __init__(self, learning_rate, **kwargs): assert learning_rate is not None - super(SGDOptimizer, self).__init__(**kwargs) + super(SGDOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) self.type = "sgd" - self._learning_rate = learning_rate def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -259,9 +275,9 @@ class MomentumOptimizer(Optimizer): def __init__(self, learning_rate, momentum, use_nesterov=False, **kwargs): assert learning_rate is not None assert momentum is not None - super(MomentumOptimizer, self).__init__(**kwargs) + super(MomentumOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) self.type = "momentum" - self._learning_rate = learning_rate self._momentum = momentum self._use_nesterov = bool(use_nesterov) @@ -303,9 +319,9 @@ class AdagradOptimizer(Optimizer): def __init__(self, learning_rate, epsilon=1.0e-6, **kwargs): assert learning_rate is not None assert epsilon is not None - super(AdagradOptimizer, self).__init__(**kwargs) + super(AdagradOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) self.type = "adagrad" - self._learning_rate = learning_rate self._epsilon = epsilon def _create_accumulators(self, block, parameters): @@ -352,9 +368,9 @@ class AdamOptimizer(Optimizer): assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(AdamOptimizer, self).__init__(**kwargs) + super(AdamOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) self.type = "adam" - self._learning_rate = learning_rate self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon @@ -457,9 +473,9 @@ class AdamaxOptimizer(Optimizer): assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(AdamaxOptimizer, self).__init__(**kwargs) + super(AdamaxOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) self.type = "adamax" - self._learning_rate = learning_rate self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon @@ -535,9 +551,9 @@ class DecayedAdagradOptimizer(Optimizer): assert decay is not None assert epsilon is not None - super(DecayedAdagradOptimizer, self).__init__(**kwargs) + super(DecayedAdagradOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) self.type = "decayed_adagrad" - self._learning_rate = learning_rate self._decay = decay self._epsilon = epsilon diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index 1a342bf1fb..f85768de99 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -175,7 +175,7 @@ def main(): paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) - #place = fluid.CPUPlace() + # place = fluid.CPUPlace() place = fluid.CUDAPlace(0) feeder = fluid.DataFeeder( feed_list=[ diff --git a/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py b/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py new file mode 100644 index 0000000000..e31749df9b --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import numpy as np +from op_test import OpTest + + +class TestElementwisePowOp(OpTest): + def setUp(self): + self.op_type = "elementwise_pow" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + +class TestElementwisePowOp_scalar(TestElementwisePowOp): + def setUp(self): + self.op_type = "elementwise_pow" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype('float32'), + 'Y': np.random.rand(1).astype('float32') + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_learning_rate_decay.py b/python/paddle/v2/fluid/tests/test_learning_rate_decay.py new file mode 100644 index 0000000000..dc348cf2d2 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_learning_rate_decay.py @@ -0,0 +1,110 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import math +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.learning_rate_decay as lr_decay + + +def exponential_decay(learning_rate, + global_step, + decay_steps, + decay_rate, + staircase=False): + exponent = float(global_step) / float(decay_steps) + if staircase: + exponent = math.floor(exponent) + return learning_rate * decay_rate**exponent + + +def natural_exp_decay(learning_rate, + global_step, + decay_steps, + decay_rate, + staircase=False): + exponent = float(global_step) / float(decay_steps) + if staircase: + exponent = math.floor(exponent) + return learning_rate * math.exp(-1 * decay_rate * exponent) + + +def inverse_time_decay(learning_rate, + global_step, + decay_steps, + decay_rate, + staircase=False): + temp = float(global_step) / float(decay_steps) + if staircase: + temp = math.floor(temp) + return learning_rate / (1 + decay_rate * temp) + + +class TestLearningRateDecay(unittest.TestCase): + def check_decay(self, python_decay_fn, fluid_decay_fn, staircase): + init_lr = 1.0 + decay_steps = 5 + decay_rate = 0.5 + + global_step = layers.create_global_var( + shape=[1], value=0.0, dtype='float32', persistable=True) + + decayed_lr = fluid_decay_fn( + learning_rate=init_lr, + global_step=global_step, + decay_steps=decay_steps, + decay_rate=decay_rate, + staircase=staircase) + layers.increment(global_step, 1.0) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + exe.run(fluid.default_startup_program()) + for step in range(10): + step_val, lr_val = exe.run(fluid.default_main_program(), + feed=[], + fetch_list=[global_step, decayed_lr]) + python_decayed_lr = python_decay_fn( + learning_rate=init_lr, + global_step=step, + decay_steps=decay_steps, + decay_rate=decay_rate, + staircase=staircase) + self.assertAlmostEqual(python_decayed_lr, lr_val[0]) + + def test_decay(self): + decay_fns = [ + (exponential_decay, lr_decay.exponential_decay, True), + (exponential_decay, lr_decay.exponential_decay, False), + (natural_exp_decay, lr_decay.natural_exp_decay, True), + (natural_exp_decay, lr_decay.natural_exp_decay, False), + (inverse_time_decay, lr_decay.inverse_time_decay, True), + (inverse_time_decay, lr_decay.inverse_time_decay, False), + ] + + for py_decay_fn, fluid_decay_fn, staircase in decay_fns: + print("decay_fn=" + str(py_decay_fn) + " staircase=" + str( + staircase)) + main_program = framework.Program() + startup_program = framework.Program() + with framework.program_guard(main_program, startup_program): + self.check_decay(py_decay_fn, fluid_decay_fn, staircase) + + +if __name__ == '__main__': + unittest.main() From e1611eb48485e4b62f0186277ac4d67218a2d77a Mon Sep 17 00:00:00 2001 From: Yancey Date: Wed, 31 Jan 2018 10:56:28 +0800 Subject: [PATCH 085/106] Add mirror registry server for book image (#7988) --- doc/getstarted/build_and_install/docker_install_cn.rst | 6 ++++++ doc/getstarted/build_and_install/docker_install_en.rst | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 98fada7bdb..79d214635a 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -95,6 +95,12 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note docker run -p 8888:8888 paddlepaddle/book +国内用户可以使用下面的镜像源来加速访问: + + .. code-block: bash + + docker run -p 8888:8888 docker.paddlepaddlehub.com/book + 然后在浏览器中输入以下网址: .. code-block:: text diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index b1d0890b4c..e0e0559fb8 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -102,6 +102,12 @@ We provide a packaged book image, simply issue the command: docker run -p 8888:8888 paddlepaddle/book +For users in China, we provide a faster mirror: + + .. code-block: bash + + docker run -p 8888:8888 docker.paddlepaddlehub.com/book + Then, you would back and paste the address into the local browser: .. code-block:: text From 6f7eb0d5f3201dc107181aed570e66c502dbfc02 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 31 Jan 2018 13:11:01 +0800 Subject: [PATCH 086/106] "fix gpu init" (#7528) * "fix gpu init" * "set env variable default value for share gpu" * "fix ci" * "removed CUDA_VISIBLE_DEVICES default" * "removed" --- paddle/framework/init.cc | 15 +++++++++++---- paddle/framework/init_test.cc | 16 +++++++++++++++- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/paddle/framework/init.cc b/paddle/framework/init.cc index 4ef82a541e..3f6ea121b3 100644 --- a/paddle/framework/init.cc +++ b/paddle/framework/init.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include // for strdup #include +#include #include #include "paddle/framework/init.h" @@ -46,17 +47,23 @@ void InitDevices() { std::vector places; places.emplace_back(platform::CPUPlace()); + int count = 0; #ifdef PADDLE_WITH_CUDA - int count = platform::GetCUDADeviceCount(); - for (int i = 0; i < count; ++i) { - places.emplace_back(platform::CUDAPlace(i)); + try { + count = platform::GetCUDADeviceCount(); + } catch (const std::exception &exp) { + LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime."; } #else LOG(WARNING) - << "'GPU' is not supported, Please re-compile with WITH_GPU option"; + << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; #endif + for (int i = 0; i < count; ++i) { + places.emplace_back(platform::CUDAPlace(i)); + } + platform::DeviceContextPool::Init(places); } diff --git a/paddle/framework/init_test.cc b/paddle/framework/init_test.cc index f837a965d3..01e076dd8e 100644 --- a/paddle/framework/init_test.cc +++ b/paddle/framework/init_test.cc @@ -20,7 +20,21 @@ TEST(InitDevices, CPU) { using paddle::framework::InitDevices; using paddle::platform::DeviceContextPool; +#ifndef PADDLE_WITH_CUDA InitDevices(); DeviceContextPool& pool = DeviceContextPool::Instance(); - ASSERT_GE(pool.size(), 1U); + ASSERT_EQ(pool.size(), 1U); +#endif +} + +TEST(InitDevices, CUDA) { + using paddle::framework::InitDevices; + using paddle::platform::DeviceContextPool; + +#ifdef PADDLE_WITH_CUDA + int count = paddle::platform::GetCUDADeviceCount(); + InitDevices(); + DeviceContextPool& pool = DeviceContextPool::Instance(); + ASSERT_EQ(pool.size(), 1U + static_cast(count)); +#endif } From adf14b0c708e20a39cb2f22cedd47be093129cae Mon Sep 17 00:00:00 2001 From: chengduo Date: Wed, 31 Jan 2018 14:01:40 +0800 Subject: [PATCH 087/106] Refine channel test (#7946) * refine channel test * follow comments * Add dependency enforce to threadpool * Revert changes to channel_test.cc * Revert changes to channel_test.cc * Add #include "paddle/framework/macros.h" * Add unit tests * fix code format * refine close channel * follow comments * use delete to destroy channel --- paddle/framework/channel.h | 10 +-- paddle/framework/channel_test.cc | 62 +++++++++++++++++-- paddle/framework/details/buffered_channel.h | 40 +++++++++--- paddle/framework/details/unbuffered_channel.h | 6 +- 4 files changed, 95 insertions(+), 23 deletions(-) diff --git a/paddle/framework/channel.h b/paddle/framework/channel.h index 70ecccc1a1..0570980c5a 100644 --- a/paddle/framework/channel.h +++ b/paddle/framework/channel.h @@ -26,9 +26,7 @@ class Channel { virtual void Send(T*) = 0; virtual void Receive(T*) = 0; virtual size_t Cap() = 0; - - // Don't delete channels; instead, call Channel::Close. - protected: + virtual void Close() = 0; virtual ~Channel() {} }; @@ -50,11 +48,7 @@ Channel* MakeChannel(size_t buffer_size) { template void CloseChannel(Channel* ch) { - if (ch->Cap() > 0) { - delete dynamic_cast*>(ch); - } else { - delete dynamic_cast*>(ch); - } + ch->Close(); } } // namespace framework diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index 9efc017265..1510fb8abf 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -14,13 +14,67 @@ limitations under the License. */ #include "paddle/framework/channel.h" +#include +#include + #include "gtest/gtest.h" +using paddle::framework::Channel; +using paddle::framework::MakeChannel; +using paddle::framework::CloseChannel; + TEST(Channel, MakeAndClose) { - using paddle::framework::Channel; - using paddle::framework::MakeChannel; - using paddle::framework::CloseChannel; + using paddle::framework::details::Buffered; + using paddle::framework::details::UnBuffered; + { + // MakeChannel should return a buffered channel is buffer_size > 0. + auto ch = MakeChannel(10); + EXPECT_NE(dynamic_cast*>(ch), nullptr); + EXPECT_EQ(dynamic_cast*>(ch), nullptr); + CloseChannel(ch); + delete ch; + } + { + // MakeChannel should return an un-buffered channel is buffer_size = 0. + auto ch = MakeChannel(0); + EXPECT_EQ(dynamic_cast*>(ch), nullptr); + EXPECT_NE(dynamic_cast*>(ch), nullptr); + CloseChannel(ch); + delete ch; + } +} + +TEST(Channel, SufficientBufferSizeDoesntBlock) { + const size_t buffer_size = 10; + auto ch = MakeChannel(buffer_size); + for (size_t i = 0; i < buffer_size; ++i) { + ch->Send(&i); // should not block + } + + size_t out; + for (size_t i = 0; i < buffer_size; ++i) { + ch->Receive(&out); // should not block + EXPECT_EQ(out, i); + } + CloseChannel(ch); + delete ch; +} + +TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { + const size_t buffer_size = 10; + auto ch = MakeChannel(buffer_size); + size_t sum = 0; + std::thread t([&]() { + // Try to write more than buffer size. + for (size_t i = 0; i < 2 * buffer_size; ++i) { + ch->Send(&i); // should not block + sum += i; + } + }); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait 0.5 sec + EXPECT_EQ(sum, 45U); - Channel* ch = MakeChannel(10); CloseChannel(ch); + t.join(); + delete ch; } diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 572e29d44a..b093e15892 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/framework/channel.h" +#include "paddle/platform/enforce.h" namespace paddle { namespace framework { @@ -32,6 +33,8 @@ class Buffered : public paddle::framework::Channel { virtual void Send(T*); virtual void Receive(T*); virtual size_t Cap() { return cap_; } + virtual void Close(); + virtual ~Buffered(); private: size_t cap_; @@ -39,9 +42,11 @@ class Buffered : public paddle::framework::Channel { std::condition_variable empty_cond_var_; std::condition_variable full_cond_var_; std::deque channel_; + bool closed_; - Buffered(size_t cap) : cap_(cap) {} - virtual ~Buffered(); + Buffered(size_t cap) : cap_(cap), closed_(false) { + PADDLE_ENFORCE_GT(cap, 0); + } void NotifyAllSenders(std::unique_lock*); }; @@ -49,24 +54,39 @@ class Buffered : public paddle::framework::Channel { template void Buffered::Send(T* item) { std::unique_lock lock(mu_); - full_cond_var_.wait(lock, [this]() { return channel_.size() < cap_; }); - channel_.push_back(std::move(*item)); - lock.unlock(); - empty_cond_var_.notify_one(); + full_cond_var_.wait(lock, + [this]() { return channel_.size() < cap_ || closed_; }); + if (!closed_) { + channel_.push_back(std::move(*item)); + lock.unlock(); + empty_cond_var_.notify_one(); + } } template void Buffered::Receive(T* item) { std::unique_lock lock(mu_); - empty_cond_var_.wait(lock, [this]() { return !channel_.empty(); }); - *item = std::move(channel_.front()); - channel_.pop_front(); + empty_cond_var_.wait(lock, [this]() { return !channel_.empty() || closed_; }); + if (!closed_) { + *item = std::move(channel_.front()); + channel_.pop_front(); + NotifyAllSenders(&lock); + } else { + item = nullptr; + } +} + +template +void Buffered::Close() { + std::unique_lock lock(mu_); + closed_ = true; NotifyAllSenders(&lock); } template Buffered::~Buffered() { std::unique_lock lock(mu_); + closed_ = true; channel_.clear(); NotifyAllSenders(&lock); } @@ -74,7 +94,7 @@ Buffered::~Buffered() { template void Buffered::NotifyAllSenders(std::unique_lock* lock) { lock->unlock(); - full_cond_var_.notify_one(); + full_cond_var_.notify_all(); } } // namespace details diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h index 7ecced1fba..cc2d2e587e 100644 --- a/paddle/framework/details/unbuffered_channel.h +++ b/paddle/framework/details/unbuffered_channel.h @@ -32,10 +32,11 @@ class UnBuffered : public paddle::framework::Channel { virtual void Send(T*); virtual void Receive(T*); virtual size_t Cap() { return 0; } + virtual void Close(); + virtual ~UnBuffered(); private: UnBuffered() {} - virtual ~UnBuffered(); }; template @@ -44,6 +45,9 @@ void UnBuffered::Send(T* channel_element) {} template void UnBuffered::Receive(T*) {} +template +void UnBuffered::Close() {} + template UnBuffered::~UnBuffered() {} From 7c53d72719c6f964e81363fb502f757501c99446 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Tue, 30 Jan 2018 22:12:57 -0800 Subject: [PATCH 088/106] Refine the design doc for ctc_beam_search_decoder --- doc/design/speech/README.MD | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/design/speech/README.MD b/doc/design/speech/README.MD index cc03aac7b4..4509d6453d 100644 --- a/doc/design/speech/README.MD +++ b/doc/design/speech/README.MD @@ -142,13 +142,15 @@ TODO by Assignees

-Figure 2. Algorithm for Beam Search Decoder. +Figure 2. Algorithm for CTC Beam Search Decoder.
-- The **Beam Search Decoder** for DS2 CTC-trained network follows the similar approach in \[[3](#references)\] with a modification for the ambiguous part, as shown in Figure 2. -- An **external defined scorer** would be passed into the decoder to evaluate a candidate prefix during decoding whenever a space character appended. -- Such scorer is a unified class, may consisting of language model, word count or any customed evaluators. -- The **language model** is built from Task 5, with a parameter should be carefully tuned to achieve minimum WER/CER (c.f. Task 7) +- The **Beam Search Decoder** for DS2 CTC-trained network follows the similar approach in \[[3](#references)\] as shown in Figure 2, with two important modifications for the ambiguous parts: + - 1) in the iterative computation of probabilities, the assignment operation is changed to accumulation for one prefix may comes from different paths; + - 2) the if condition ```if l^+ not in A_prev then``` after probabilities' computation is deprecated for it is hard to understand and seems unnecessary. +- An **external scorer** would be passed into the decoder to evaluate a candidate prefix during decoding whenever a white space appended in English decoding and any character appended in Mandarin decoding. +- Such external scorer consists of language model, word count or any other customed scorers. +- The **language model** is built from Task 5, with parameters should be carefully tuned to achieve minimum WER/CER (c.f. Task 7) - This decoder needs to perform with **high efficiency** for the convenience of parameters tuning and speech recognition in reality. From 535f6bdf7bec8903829cd2020ad80bb948f471bf Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Tue, 30 Jan 2018 22:19:01 -0800 Subject: [PATCH 089/106] Rename the DS2' design doc --- doc/design/speech/{README.MD => deep_speech_2.md} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename doc/design/speech/{README.MD => deep_speech_2.md} (99%) diff --git a/doc/design/speech/README.MD b/doc/design/speech/deep_speech_2.md similarity index 99% rename from doc/design/speech/README.MD rename to doc/design/speech/deep_speech_2.md index 4509d6453d..5e72c7e201 100644 --- a/doc/design/speech/README.MD +++ b/doc/design/speech/deep_speech_2.md @@ -149,7 +149,7 @@ Figure 2. Algorithm for CTC Beam Search Decoder. - 1) in the iterative computation of probabilities, the assignment operation is changed to accumulation for one prefix may comes from different paths; - 2) the if condition ```if l^+ not in A_prev then``` after probabilities' computation is deprecated for it is hard to understand and seems unnecessary. - An **external scorer** would be passed into the decoder to evaluate a candidate prefix during decoding whenever a white space appended in English decoding and any character appended in Mandarin decoding. -- Such external scorer consists of language model, word count or any other customed scorers. +- Such external scorer consists of language model, word count or any other custom scorers. - The **language model** is built from Task 5, with parameters should be carefully tuned to achieve minimum WER/CER (c.f. Task 7) - This decoder needs to perform with **high efficiency** for the convenience of parameters tuning and speech recognition in reality. From de1a70147ed97055a1ad595208a696e86ea6f183 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Tue, 30 Jan 2018 22:25:29 -0800 Subject: [PATCH 090/106] Adjust the width of figure 2 --- doc/design/speech/deep_speech_2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/speech/deep_speech_2.md b/doc/design/speech/deep_speech_2.md index 5e72c7e201..cfdc4d6df0 100644 --- a/doc/design/speech/deep_speech_2.md +++ b/doc/design/speech/deep_speech_2.md @@ -141,7 +141,7 @@ TODO by Assignees ### Beam Search with CTC and LM
-
+
Figure 2. Algorithm for CTC Beam Search Decoder.
From e3b0af43a05ac04159aa9d2d9777c0f5a2486d49 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Wed, 31 Jan 2018 14:42:20 +0800 Subject: [PATCH 091/106] Fix CI Error --- python/paddle/v2/fluid/tests/test_layer_norm_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 7d5dc7d1a6..68cf8673cd 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -228,7 +228,7 @@ class TestLayerNormdOp(OpTest): place) places = [core.CPUPlace()] - if core.is_compile_gpu() and core.op_support_gpu("layer_norm"): + if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): places.append(core.CUDAPlace(0)) for place in places: From ae7d1c1f8c7175d60420dcfefba37c1a2518a536 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 31 Jan 2018 15:46:59 +0800 Subject: [PATCH 092/106] Fix/lod (#7714) * "Need to re-design LoD " * "add lod design" * "fix lod gpu ptr pointer" * "removed commented code" * "fix CI" * "remove set lod in pybind" * "fix style check" * "fix CI" * "fix long type template error" * "pybind reorder to use Place" * "fix ci" * "fix ci" * fix ci * "sperate as a new file" * "fix CI" * "fix ci" * small fix * "add test" * "fix adam op" * "fix lstmp op" * "fix adam op" * "follow comments" * "fix ci" --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/lod_tensor.cc | 2 - paddle/framework/lod_tensor.h | 26 +-- paddle/framework/lod_tensor_test.cc | 11 ++ paddle/framework/lod_tensor_test.cu | 46 +++++- paddle/framework/mixed_vector.h | 154 ++++++++++++++++++ paddle/framework/tensor.h | 7 + paddle/inference/CMakeLists.txt | 2 +- paddle/operators/adagrad_op.cu | 6 +- paddle/operators/adam_op.h | 7 +- paddle/operators/ctc_align_op.cu | 5 +- paddle/operators/gru_op.h | 13 +- paddle/operators/lookup_table_op.cu | 4 +- paddle/operators/lstm_op.h | 12 +- paddle/operators/lstmp_op.h | 11 +- .../operators/math/selected_rows_functor.cu | 24 +-- paddle/operators/math/sequence2batch.cc | 6 +- paddle/operators/math/sequence2batch.cu | 6 +- paddle/operators/math/sequence2batch.h | 9 +- paddle/operators/math/sequence_padding.cu | 20 ++- paddle/operators/math/sequence_pooling.cu | 2 +- paddle/operators/math/sequence_scale.cu | 2 +- paddle/operators/row_conv_op.cu | 4 +- paddle/operators/sequence_erase_op.cu | 3 +- paddle/operators/sgd_op.cu | 4 +- paddle/pybind/pybind.cc | 37 +---- python/paddle/v2/fluid/tests/test_tensor.py | 24 ++- 27 files changed, 346 insertions(+), 103 deletions(-) create mode 100644 paddle/framework/mixed_vector.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8c28709a68..8b71f73c36 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -22,7 +22,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) -nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) +nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) cc_test(variable_test SRCS variable_test.cc) diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 53b0d0fe08..cb27de6991 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -24,8 +24,6 @@ limitations under the License. */ #include #include -#include - namespace paddle { namespace framework { diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 9d1294fdeb..d0ab640485 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -18,11 +18,11 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include #include -#include #endif #include #include "paddle/framework/ddim.h" +#include "paddle/framework/mixed_vector.h" #include "paddle/framework/tensor.h" #include "paddle/framework/tensor_util.h" #include "paddle/platform/enforce.h" @@ -31,15 +31,6 @@ limitations under the License. */ namespace paddle { namespace framework { -#ifndef PADDLE_WITH_CUDA -template -using Vector = std::vector; -#else -template -using Vector = thrust::host_vector< - T, thrust::system::cuda::experimental::pinned_allocator>; -#endif - /* * LoD is short for Level of Details. * @@ -55,7 +46,15 @@ using Vector = thrust::host_vector< * 0 2 4 7 * 0 2 5 7 10 12 15 20 */ -using LoD = std::vector>; +struct LoD : public std::vector> { + using std::vector>::vector; + + void CopyFromCUDA() { + for (auto it = this->begin(); it != this->end(); ++it) { + it->CopyFromCUDA(); + } + } +}; std::ostream& operator<<(std::ostream& os, const LoD& lod); std::ostream& operator<<(std::ostream& os, const LoDTensor& t); @@ -109,7 +108,10 @@ bool CheckAbsLoD(const LoD& in, int tensor_height = -1); */ class LoDTensor : public Tensor { public: - LoDTensor() {} + LoDTensor() : Tensor() {} + + /* Constructor with place should only be used in pybind */ + explicit LoDTensor(const platform::Place& place) : Tensor(place) {} explicit LoDTensor(const LoD& lod) : lod_(lod) {} diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index 4d172c43c7..3b63020e68 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -23,6 +23,17 @@ namespace paddle { namespace framework { +TEST(LoD, data) { + LoD lod{{0, 1, 2}}; + lod.push_back({0, 2, 4, 5}); + lod.push_back(std::vector({0, 1, 6, 8, 10, 11})); + + auto& v = lod[0]; + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(v[i], i); + } +} + TEST(LodExpand, test) { LoD lod{{0, 2}}; LoDTensor tensor; diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index 1e253a2f6f..d4c9f00bd9 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -14,6 +14,8 @@ #include #include +#include +#include "paddle/framework/init.h" #include "paddle/framework/lod_tensor.h" #include "paddle/platform/assert.h" @@ -26,7 +28,48 @@ __global__ void test(size_t* a, int size) { } } +TEST(Vector, Normal) { + using namespace paddle::framework; + using namespace paddle::platform; + using namespace paddle::memory; + + paddle::framework::InitDevices(); + + paddle::framework::Vector vec({1, 2, 3}); + size_t* ptr = vec.data(); + for (size_t i = 0; i < vec.size(); ++i) { + EXPECT_EQ(vec[i], *(ptr + i)); + } + + vec.clear(); + vec.CopyFromCUDA(); + + std::vector v = {1, 2, 3}; + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(v[i], vec[i]); + } +} + +TEST(LoD, data) { + paddle::framework::InitDevices(); + + paddle::framework::LoD lod{{0, 1, 2}}; + lod.push_back({0, 2, 4, 5}); + lod.push_back(std::vector({0, 1, 6, 8, 10, 11})); + + auto& v = lod[0]; + test<<<1, 1>>>(v.cuda_data(), v.size()); + cudaDeviceSynchronize(); + + v.CopyFromCUDA(); + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(v[i], i * 2); + } +} + TEST(LoDTensor, LoDInGPU) { + paddle::framework::InitDevices(); + paddle::framework::LoDTensor lod_tensor; paddle::platform::CUDAPlace place(0); @@ -42,8 +85,9 @@ TEST(LoDTensor, LoDInGPU) { auto lod = lod_tensor.lod(); - test<<<1, 8>>>(lod[0].data(), lod[0].size()); + test<<<1, 8>>>(lod[0].cuda_data(), lod[0].size()); cudaDeviceSynchronize(); + lod.CopyFromCUDA(); for (size_t i = 0; i < src_lod[0].size(); ++i) { EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h new file mode 100644 index 0000000000..0e0e239586 --- /dev/null +++ b/paddle/framework/mixed_vector.h @@ -0,0 +1,154 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include + +#include "paddle/memory/memcpy.h" +#include "paddle/memory/memory.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/enforce.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace framework { + +/** + * @brief Vector support both cpu and gpu. + * host vector lifetime is same with Vector + * device vector is lazily malloc and modified. + */ + +template +class Vector : public std::vector { + public: + /* NOTE(dzhwinter): + * Data always store and modified on Host. + * If the data is modified when use cuda_data interface, + * You need to call the CopyFromCUDA explicitly to synchronize data. + * + */ + enum class kDataPosition { + kDataOnHost = 0, + kDataOnDevice = 1, + }; + + public: + using std::vector::vector; + + Vector() {} + Vector(const std::vector &v) : std::vector(v) {} // NOLINT + + virtual ~Vector() { +#ifdef PADDLE_WITH_CUDA + if (cuda_ptr_ != nullptr) { + memory::Free(place_, static_cast(cuda_ptr_)); + } +#endif + } + + T *cuda_data() { + CopyToCUDA(); + PADDLE_ENFORCE_NOT_NULL( + cuda_ptr_, "No data or Insufficient CUDA memory to allocation"); + return static_cast(cuda_ptr_); + } + + T *data() { return std::vector::data(); } + + const T *data() const { return std::vector::data(); } + + void CopyToCUDA(); + + void CopyFromCUDA(); + + void CopyToPeer(platform::Place); + + private: + void *cuda_ptr_ = nullptr; + size_t cuda_size_ = 0; + /*The DataPosition is unused now, + if we want support random access from cpu and cuda, + we need to overload all the vector method */ + + kDataPosition position_ = kDataPosition::kDataOnHost; + platform::CUDAPlace place_; +}; + +template +void Vector::CopyToCUDA() { +#ifdef PADDLE_WITH_CUDA + if (cuda_ptr_ == nullptr) { + cuda_ptr_ = + memory::Alloc(place_, this->size() * sizeof(T)); + } + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto *cuda_ctx = pool.GetByPlace(place_); + + memory::Copy(place_, static_cast(cuda_ptr_), platform::CPUPlace(), + static_cast(this->data()), + this->size() * sizeof(T), cuda_ctx->stream()); + cuda_ctx->Wait(); + + cuda_size_ = this->size(); +#endif +} + +template +void Vector::CopyFromCUDA() { +#ifdef PADDLE_WITH_CUDA + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto *cuda_ctx = pool.GetByPlace(place_); + if (cuda_ptr_ == nullptr) { + LOG(WARNING) << "No uncommited cuda data."; + return; + } + this->resize(cuda_size_); + memory::Copy(platform::CPUPlace(), static_cast(this->data()), place_, + static_cast(cuda_ptr_), this->size() * sizeof(T), + cuda_ctx->stream()); + cuda_ctx->Wait(); + +#endif +} + +template +void Vector::CopyToPeer(platform::Place peer_place) { + if (platform::is_cpu_place(peer_place)) { + return; + } +#ifdef PADDLE_WITH_CUDA + auto *cuda_ctx = platform::DeviceContextPool::Instance().GetByPlace(place_); + void *peer_cuda_ptr_ = memory::Alloc( + boost::get(peer_place), this->size() * sizeof(T)); + memory::Copy(boost::get(peer_place), + static_cast(peer_cuda_ptr_), place_, + static_cast(cuda_ptr_), this->size() * sizeof(T), + cuda_ctx->stream()); + cuda_ctx->Wait(); + memory::Free(place_, static_cast(cuda_ptr_)); + place_ = boost::get(peer_place); + cuda_ptr_ = peer_cuda_ptr_; +#endif +} + +template class Vector; +template class Vector; +template class Vector; +template class Vector; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 4aaa29d794..f0ea709a5c 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -47,6 +47,11 @@ class Tensor { public: Tensor() : offset_(0) {} + /*! Constructor with place should only be used in pybind. */ + explicit Tensor(const platform::Place& place) : offset_(0) { + holder_->set_place(place); + } + /*! Return a pointer to mutable memory block. */ template inline T* data(); @@ -137,6 +142,7 @@ class Tensor { virtual std::type_index type() const = 0; virtual platform::Place place() const = 0; virtual void set_type(std::type_index type) = 0; + virtual void set_place(platform::Place place) = 0; }; template @@ -156,6 +162,7 @@ class Tensor { virtual void* ptr() const { return static_cast(ptr_.get()); } virtual std::type_index type() const { return type_; } virtual void set_type(std::type_index type) { type_ = type; } + virtual void set_place(platform::Place place) { place_ = place; } /*! the pointer of memory block. */ std::unique_ptr> ptr_; diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index 0288266c08..2289ddc139 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -1,4 +1,4 @@ -set(FLUID_CORE_MODULES proto_desc paddle_memory executor prune init) +set(FLUID_CORE_MODULES proto_desc paddle_memory lod_tensor executor prune init) cc_library(paddle_fluid_api SRCS io.cc diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index 4e57938792..00cb6e9caf 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -82,7 +82,7 @@ struct SparseAdagradFunctor { math::scatter::MergeAdd merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data(); - auto& merge_rows = grad_merge.rows(); + framework::Vector merge_rows(grad_merge.rows()); // 2. m += g_m * g_m math::scatter::Mul sqare_func; auto grad_square = sqare_func(context, grad_merge, grad_merge); @@ -101,8 +101,8 @@ struct SparseAdagradFunctor { SparseAdagradFunctorKernel< T, 256><<(context) - .stream()>>>(grad_merge_data, grad_merge.rows().data(), - lr, param_data, moment_data, grad_width, + .stream()>>>(grad_merge_data, merge_rows.cuda_data(), lr, + param_data, moment_data, grad_width, epsilon); } }; diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index 9cc34bdded..bf536687d3 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -199,7 +199,12 @@ class AdamOpKernel : public framework::OpKernel { merge_func(ctx.template device_context(), grad); auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data(); - auto* rows = grad_merge.rows().data(); + int64_t* rows = nullptr; + if (platform::is_gpu_place(ctx.GetPlace())) { + rows = grad_merge.mutable_rows()->cuda_data(); + } else { + rows = grad_merge.mutable_rows()->data(); + } auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor functor( diff --git a/paddle/operators/ctc_align_op.cu b/paddle/operators/ctc_align_op.cu index 45635f1674..2a970cd9fa 100644 --- a/paddle/operators/ctc_align_op.cu +++ b/paddle/operators/ctc_align_op.cu @@ -69,12 +69,11 @@ class CTCAlignOpCUDAKernel : public framework::OpKernel { auto stream = ctx.cuda_device_context().stream(); MergeAndDelCudaKernel<<<1, 1, 0, stream>>>( - num_tokens, tokens, num_seq, input_lod[level].data(), blank, + num_tokens, tokens, num_seq, input_lod[level].cuda_data(), blank, merge_repeated, dev_out_lod0_ptr, output_data); // set output lod - thrust::host_vector host_out_lod0(dev_out_lod0.begin(), - dev_out_lod0.end()); + std::vector host_out_lod0(dev_out_lod0.begin(), dev_out_lod0.end()); framework::LoD out_lod; out_lod.push_back(host_out_lod0); output->set_lod(out_lod); diff --git a/paddle/operators/gru_op.h b/paddle/operators/gru_op.h index b1957fb9ce..a08bd4233b 100644 --- a/paddle/operators/gru_op.h +++ b/paddle/operators/gru_op.h @@ -30,11 +30,12 @@ using Tensor = framework::Tensor; template inline void ReorderInitState(const DeviceContext& ctx, - const framework::Tensor& src, const size_t* index, + const framework::Tensor& src, + framework::Vector index_lod, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index, *dst, indexed_src); + row_shuffle(ctx, src, index_lod, *dst, indexed_src); } template @@ -76,7 +77,9 @@ class GRUKernel : public framework::OpKernel { gru_value.state_weight = const_cast(weight_data + 2 * frame_size * frame_size); Tensor ordered_h0; - const size_t* order = batch_gate->lod()[2].data(); + + framework::Vector order(batch_gate->lod()[2]); + if (h0) { // Since the batch computing for GRU reorders the input sequences // according to their length. The initialized cell state also needs @@ -159,7 +162,9 @@ class GRUGradKernel : public framework::OpKernel { zero(dev_ctx, &batch_reset_hidden_prev_grad, static_cast(0.0)); Tensor ordered_h0, ordered_h0_grad; - const size_t* order = batch_gate->lod()[2].data(); + + framework::Vector order(batch_gate->lod()[2]); + if (h0) { ReorderInitState(dev_ctx, *h0, order, &ordered_h0, true); diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index d97390fa1c..07372808bb 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -125,8 +125,8 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { new_rows.resize(ids_dim[0]); auto gpu_place = boost::get(context.GetPlace()); - memory::Copy(platform::CPUPlace(), new_rows.data(), gpu_place, ids_data, - ids_dim[0] * sizeof(int64_t), stream); + memory::Copy(platform::CPUPlace(), new_rows.cuda_data(), gpu_place, + ids_data, ids_dim[0] * sizeof(int64_t), stream); d_table->set_rows(new_rows); diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index c57ee414dc..72e95b75e2 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -27,11 +27,12 @@ using Tensor = framework::Tensor; template inline void ReorderInitState(const DeviceContext& ctx, - const framework::Tensor& src, const size_t* index, + const framework::Tensor& src, + framework::Vector index_lod, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index, *dst, indexed_src); + row_shuffle(ctx, src, index_lod, *dst, indexed_src); } template @@ -84,7 +85,9 @@ class LSTMKernel : public framework::OpKernel { } lstm_value.prev_state_value = nullptr; Tensor ordered_c0; - const size_t* order = batch_gate->lod()[2].data(); + + framework::Vector order(batch_gate->lod()[2]); + if (cell_t0) { // Since the batch computing for LSTM reorders the input sequence // according to their length. The initialized cell state also needs @@ -202,7 +205,8 @@ class LSTMGradKernel : public framework::OpKernel { // ordered_h0_g/c0_g is the reordered gradient of hidden/cell // initialization. Tensor ordered_h0, ordered_c0, ordered_h0_g, ordered_c0_g; - const size_t* order = batch_gate->lod()[2].data(); + framework::Vector order(batch_gate->lod()[2]); + if (c0) { ReorderInitState(device_ctx, *c0, order, &ordered_c0, true); diff --git a/paddle/operators/lstmp_op.h b/paddle/operators/lstmp_op.h index ee82d5c10a..e064a155df 100644 --- a/paddle/operators/lstmp_op.h +++ b/paddle/operators/lstmp_op.h @@ -34,7 +34,8 @@ using EigenMatrix = framework::EigenMatrix; template inline void ReorderInitState(const DeviceContext& ctx, - const framework::Tensor& src, const size_t* index, + const framework::Tensor& src, + framework::Vector index, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); @@ -109,7 +110,9 @@ class LSTMPKernel : public framework::OpKernel { } lstmp_value.prev_state_value = nullptr; Tensor ordered_c0; - const size_t* order = batch_gate->lod()[2].data(); + + framework::Vector order(batch_gate->lod()[2]); + if (cell_t0) { // Since the batch computing for LSTMP reorders the input sequence // according to their length. The initialized cell state also needs @@ -275,7 +278,9 @@ class LSTMPGradKernel : public framework::OpKernel { // ordered_h0_g/c0_g is the reordered gradient of hidden/cell // initialization. Tensor ordered_h0, ordered_c0, ordered_h0_g, ordered_c0_g; - const size_t* order = batch_gate->lod()[2].data(); + + framework::Vector order(batch_gate->lod()[2]); + if (c0) { ReorderInitState(device_ctx, *c0, order, &ordered_c0, true); diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index 0ee456f9bc..acdd87cb35 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -31,7 +31,7 @@ struct SelectedRowsAdd { PADDLE_ENFORCE_EQ(in1_height, input2.height()); output->set_height(in1_height); - auto& in1_rows = input1.rows(); + framework::Vector in1_rows(input1.rows()); auto& in2_rows = input2.rows(); std::vector out_rows; out_rows.reserve(in1_rows.size() + in2_rows.size()); @@ -108,7 +108,7 @@ struct SelectedRowsAddTensor { PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); auto& in1_value = input1.value(); - auto& in1_rows = input1.rows(); + framework::Vector in1_rows(input1.rows()); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); @@ -126,7 +126,7 @@ struct SelectedRowsAddTensor { dim3 grid(1, in1_rows.size()); SelectedRowsAddTensorKernel< T, block_size><<>>( - in1_data, in1_rows.data(), out_data, in1_row_numel); + in1_data, in1_rows.cuda_data(), out_data, in1_row_numel); auto out_eigen = framework::EigenVector::Flatten(*output); auto in2_eigen = framework::EigenVector::Flatten(input2); @@ -146,7 +146,7 @@ struct SelectedRowsAddTo { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2->height()); - auto& in1_rows = input1.rows(); + framework::Vector in1_rows(input1.rows()); auto& in2_rows = *(input2->mutable_rows()); auto& in1_value = input1.value(); @@ -204,7 +204,7 @@ struct SelectedRowsAddToTensor { PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = input1.value(); - auto& in1_rows = input1.rows(); + framework::Vector in1_rows(input1.rows()); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); @@ -216,7 +216,7 @@ struct SelectedRowsAddToTensor { dim3 grid(1, in1_rows.size()); SelectedRowsAddToTensorKernel< T, block_size><<>>( - in1_data, in1_rows.data(), in2_data, in1_row_numel); + in1_data, in1_rows.cuda_data(), in2_data, in1_row_numel); } }; @@ -257,7 +257,7 @@ struct MergeAdd { framework::SelectedRows operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input) { framework::SelectedRows out; - auto input_rows = input.rows(); + framework::Vector input_rows(input.rows()); std::set row_set(input_rows.begin(), input_rows.end()); std::vector merge_rows(row_set.begin(), row_set.end()); @@ -283,9 +283,9 @@ struct MergeAdd { MergeAddKernel< T, 256><<(context) - .stream()>>>(input_data, input.rows().data(), out_data, - out.rows().data(), out.rows().size(), - input_width); + .stream()>>>(input_data, input_rows.cuda_data(), out_data, + out.mutable_rows()->cuda_data(), + out.rows().size(), input_width); return out; } }; @@ -370,8 +370,8 @@ struct UpdateToTensor { dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1); dim3 grid(1, in1_rows.size()); UpdateToTensorKernel<<< - grid, threads, 0, context.stream()>>>(in1_data, in1_rows.data(), op, - in2_data, in1_row_numel); + grid, threads, 0, context.stream()>>>(in1_data, in1_rows.cuda_data(), + op, in2_data, in1_row_numel); } }; } // namespace scatter diff --git a/paddle/operators/math/sequence2batch.cc b/paddle/operators/math/sequence2batch.cc index e459a42ca2..17abce1c2f 100644 --- a/paddle/operators/math/sequence2batch.cc +++ b/paddle/operators/math/sequence2batch.cc @@ -23,8 +23,10 @@ template class CopyMatrixRowsFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& src, const size_t* index, - framework::Tensor& dst, bool is_src_index) { + const framework::Tensor& src, + framework::Vector index_lod, framework::Tensor& dst, + bool is_src_index) { + size_t* index = index_lod.data(); auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, diff --git a/paddle/operators/math/sequence2batch.cu b/paddle/operators/math/sequence2batch.cu index 452ae89510..f27631271a 100644 --- a/paddle/operators/math/sequence2batch.cu +++ b/paddle/operators/math/sequence2batch.cu @@ -42,8 +42,10 @@ template class CopyMatrixRowsFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& src, const size_t* index, - framework::Tensor& dst, bool is_src_index) { + const framework::Tensor& src, + framework::Vector index_lod, framework::Tensor& dst, + bool is_src_index) { + size_t* index = index_lod.cuda_data(); auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, diff --git a/paddle/operators/math/sequence2batch.h b/paddle/operators/math/sequence2batch.h index a5c43a2c7d..6db0427b41 100644 --- a/paddle/operators/math/sequence2batch.h +++ b/paddle/operators/math/sequence2batch.h @@ -35,7 +35,7 @@ class CopyMatrixRowsFunctor { // copy the input src to the indexed rows of output dst. // The indexed rows are based on the input index. void operator()(const DeviceContext& context, const framework::Tensor& src, - const size_t* index, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor& dst, bool is_src_index); }; @@ -66,7 +66,7 @@ class LoDTensor2BatchFunctor { PADDLE_ENFORCE_EQ(lods[1].size(), static_cast(lod_tensor.dims()[0])); CopyMatrixRowsFunctor to_batch; - to_batch(context, lod_tensor, lods[1].data(), batch, true); + to_batch(context, lod_tensor, lods[1], batch, true); return; } @@ -144,7 +144,7 @@ class LoDTensor2BatchFunctor { batch.set_lod(batch_lods); CopyMatrixRowsFunctor to_batch; - to_batch(context, lod_tensor, seq2batch_idx, batch, true); + to_batch(context, lod_tensor, batch_lods[1], batch, true); } }; @@ -159,8 +159,7 @@ class Batch2LoDTensorFunctor { PADDLE_ENFORCE_EQ(in_lod[1].size(), static_cast(lod_tensor.dims()[0])); CopyMatrixRowsFunctor to_seq; - size_t* index = in_lod[1].data(); - to_seq(context, batch, index, lod_tensor, false); + to_seq(context, batch, in_lod[1], lod_tensor, false); } }; diff --git a/paddle/operators/math/sequence_padding.cu b/paddle/operators/math/sequence_padding.cu index a38df26f59..65c9cfe4a0 100644 --- a/paddle/operators/math/sequence_padding.cu +++ b/paddle/operators/math/sequence_padding.cu @@ -120,12 +120,14 @@ class PaddingLoDTensorFunctor { T* padding_data = padding.data(); if (norm_by_times) { SequencePaddingKernel<<>>( - padding_data, const_cast(seq_data), abs_offset_lod[level].data(), - sequence_width, max_sequence_length, num_sequences); + padding_data, const_cast(seq_data), + abs_offset_lod[level].cuda_data(), sequence_width, + max_sequence_length, num_sequences); } else { SequencePaddingKernel<<>>( - padding_data, const_cast(seq_data), abs_offset_lod[level].data(), - sequence_width, max_sequence_length, num_sequences); + padding_data, const_cast(seq_data), + abs_offset_lod[level].cuda_data(), sequence_width, + max_sequence_length, num_sequences); } } }; @@ -193,12 +195,14 @@ class UnpaddingLoDTensorFunctor { T* seq_data = seq.data(); if (norm_by_times) { SequencePaddingKernel<<>>( - const_cast(padding_data), seq_data, abs_offset_lod[level].data(), - sequence_width, max_sequence_length, num_sequences); + const_cast(padding_data), seq_data, + abs_offset_lod[level].cuda_data(), sequence_width, + max_sequence_length, num_sequences); } else { SequencePaddingKernel<<>>( - const_cast(padding_data), seq_data, abs_offset_lod[level].data(), - sequence_width, max_sequence_length, num_sequences); + const_cast(padding_data), seq_data, + abs_offset_lod[level].cuda_data(), sequence_width, + max_sequence_length, num_sequences); } } }; diff --git a/paddle/operators/math/sequence_pooling.cu b/paddle/operators/math/sequence_pooling.cu index 4c9e6b375c..f66534a681 100644 --- a/paddle/operators/math/sequence_pooling.cu +++ b/paddle/operators/math/sequence_pooling.cu @@ -73,7 +73,7 @@ class MaxSeqPoolFunctor { dim3 grid(num_seq, 1); auto stream = context.stream(); KeMaxSequencePool<<>>( - in_data, starts.data(), out_data, max_index, num_seq, dim); + in_data, starts.cuda_data(), out_data, max_index, num_seq, dim); } }; diff --git a/paddle/operators/math/sequence_scale.cu b/paddle/operators/math/sequence_scale.cu index ceaabd8e0f..fd4e28f611 100644 --- a/paddle/operators/math/sequence_scale.cu +++ b/paddle/operators/math/sequence_scale.cu @@ -46,7 +46,7 @@ class ScaleLoDTensorFunctor { SequenceScaleKernel<<< num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( - seq_data, abs_offset_lod[level].data(), scales, seq_width); + seq_data, abs_offset_lod[level].cuda_data(), scales, seq_width); } }; diff --git a/paddle/operators/row_conv_op.cu b/paddle/operators/row_conv_op.cu index 41f2c5b9de..b3825212e1 100644 --- a/paddle/operators/row_conv_op.cu +++ b/paddle/operators/row_conv_op.cu @@ -307,7 +307,7 @@ class RowConvKernel int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; - size_t *idx = batch_indices.data(); + size_t *idx = batch_indices.cuda_data(); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { @@ -345,7 +345,7 @@ class RowConvGradKernel int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; - size_t *idx = batch_indices.data(); + size_t *idx = batch_indices.cuda_data(); auto &device_ctx = context.cuda_device_context(); math::SetConstant zero; diff --git a/paddle/operators/sequence_erase_op.cu b/paddle/operators/sequence_erase_op.cu index f1e3b96acd..a5311f15f0 100644 --- a/paddle/operators/sequence_erase_op.cu +++ b/paddle/operators/sequence_erase_op.cu @@ -96,9 +96,8 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel { GetOutLod<<<(lod_len - 1) / PADDLE_CUDA_NUM_THREADS + 1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( num_erased_ptr, dev_in_lod_ptr, lod_len, dev_out_lod_ptr); - // Set LoD for output - thrust::host_vector out_lod0 = dev_out_lod; + std::vector out_lod0(dev_out_lod.begin(), dev_out_lod.end()); framework::LoD out_lod; out_lod.push_back(out_lod0); out->set_lod(out_lod); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index 42f8f8b2f0..29f5aa3542 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -89,7 +89,7 @@ class SGDOpCUDAKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(in_height, out_dims[0]); auto& in_value = grad->value(); - auto& in_rows = grad->rows(); + framework::Vector in_rows(grad->rows()); int64_t in_row_numel = in_value.numel() / in_rows.size(); PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); @@ -102,7 +102,7 @@ class SGDOpCUDAKernel : public framework::OpKernel { dim3 grid(1, in_rows.size()); SparseSGDFunctorKernel< T, 256><<>>( - in_data, in_rows.data(), learning_rate->data(), out_data, + in_data, in_rows.cuda_data(), learning_rate->data(), out_data, in_row_numel); } else { diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 490397afdd..a880d9bdbc 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -124,44 +124,25 @@ PYBIND11_PLUGIN(core) { .def( "__init__", [](LoDTensor &instance, const std::vector> &lod) { -#ifndef PADDLE_WITH_CUDA - new (&instance) LoDTensor(lod); -#else - LoD new_lod; - new_lod.reserve(lod.size()); - std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); - new (&instance) LoDTensor(new_lod); -#endif + LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + new (&instance) LoDTensor(new_lod); }) .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); }) .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { -#ifndef PADDLE_WITH_CUDA - self.set_lod(lod); -#else LoD new_lod; new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); self.set_lod(new_lod); -#endif }) .def("lod", [](LoDTensor &self) -> std::vector> { -#ifndef PADDLE_WITH_CUDA - return self.lod(); -#else - auto lod = self.lod(); - std::vector> new_lod; - new_lod.reserve(lod.size()); - std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod), - [](Vector item) -> - std::vector { - std::vector v; - v.reserve(item.size()); - std::copy(item.begin(), item.end(), std::back_inserter(v)); - return v; - }); - return new_lod; -#endif + auto lod = self.lod(); + std::vector> new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + return new_lod; }); py::class_(m, "SelectedRows") diff --git a/python/paddle/v2/fluid/tests/test_tensor.py b/python/paddle/v2/fluid/tests/test_tensor.py index d5cc235f58..0219bef42b 100644 --- a/python/paddle/v2/fluid/tests/test_tensor.py +++ b/python/paddle/v2/fluid/tests/test_tensor.py @@ -108,9 +108,31 @@ class TestTensor(unittest.TestCase): scope = core.Scope() place = core.CPUPlace() lod_py = [[0, 2, 5], [0, 2, 4, 5]] - lod_tensor = core.LoDTensor(lod_py) + lod_tensor = core.LoDTensor() lod_tensor.set_dims([5, 2, 3, 4]) + lod_tensor.set_lod(lod_py) + lod_tensor.alloc_float(place) + tensor_array = numpy.array(lod_tensor) + tensor_array[0, 0, 0, 0] = 1.0 + tensor_array[0, 0, 0, 1] = 2.0 + lod_tensor.set(tensor_array, place) + + lod_v = numpy.array(lod_tensor) + self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) + self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) + self.assertListEqual(lod_py, lod_tensor.lod()) + + def test_lod_tensor_gpu_init(self): + if not core.is_compiled_with_cuda(): + return + scope = core.Scope() + place = core.CUDAPlace(0) + lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_tensor = core.LoDTensor() + + lod_tensor.set_dims([5, 2, 3, 4]) + lod_tensor.set_lod(lod_py) lod_tensor.alloc_float(place) tensor_array = numpy.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 From ee97604d882d2a071d91967c0eb879c67822a985 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Wed, 31 Jan 2018 16:00:39 +0800 Subject: [PATCH 093/106] Add documentation generate script --- doc/api/v2/fluid/data_feeder.rst | 13 +- doc/api/v2/fluid/evaluator.rst | 28 +- doc/api/v2/fluid/executor.rst | 33 +- doc/api/v2/fluid/gen_doc.py | 109 +++++ doc/api/v2/fluid/gen_doc.sh | 7 + doc/api/v2/fluid/initializer.rst | 55 +-- doc/api/v2/fluid/io.rst | 61 ++- doc/api/v2/fluid/layers.rst | 653 ++++++++++++++++++++--------- doc/api/v2/fluid/nets.rst | 22 +- doc/api/v2/fluid/optimizer.rst | 65 ++- doc/api/v2/fluid/param_attr.rst | 22 +- doc/api/v2/fluid/profiler.rst | 25 +- doc/api/v2/fluid/regularizer.rst | 32 +- python/paddle/v2/fluid/__init__.py | 22 +- python/paddle/v2/fluid/profiler.py | 4 +- 15 files changed, 802 insertions(+), 349 deletions(-) create mode 100644 doc/api/v2/fluid/gen_doc.py create mode 100755 doc/api/v2/fluid/gen_doc.sh diff --git a/doc/api/v2/fluid/data_feeder.rst b/doc/api/v2/fluid/data_feeder.rst index 0fa78f7dfb..a591c7334f 100644 --- a/doc/api/v2/fluid/data_feeder.rst +++ b/doc/api/v2/fluid/data_feeder.rst @@ -1,9 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + =========== -DataFeeder +data_feeder =========== DataFeeder ------------ -.. automodule:: paddle.v2.fluid.data_feeder - :members: DataFeeder +---------- + +.. autoclass:: paddle.v2.fluid.data_feeder.DataFeeder + :members: :noindex: + diff --git a/doc/api/v2/fluid/evaluator.rst b/doc/api/v2/fluid/evaluator.rst index a23f3301d0..00dcecfd62 100644 --- a/doc/api/v2/fluid/evaluator.rst +++ b/doc/api/v2/fluid/evaluator.rst @@ -1,9 +1,21 @@ -=========== -Evaluator -=========== - -Evaluator ------------ -.. automodule:: paddle.v2.fluid.evaluator - :members: Evaluator +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +========= +evaluator +========= + +Accuracy +-------- + +.. autoclass:: paddle.v2.fluid.evaluator.Accuracy + :members: :noindex: + +ChunkEvaluator +-------------- + +.. autoclass:: paddle.v2.fluid.evaluator.ChunkEvaluator + :members: + :noindex: + diff --git a/doc/api/v2/fluid/executor.rst b/doc/api/v2/fluid/executor.rst index 3a283538c1..a028f6283f 100644 --- a/doc/api/v2/fluid/executor.rst +++ b/doc/api/v2/fluid/executor.rst @@ -1,9 +1,32 @@ -=========== -Executor -=========== +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +======== +executor +======== Executor +-------- + +.. autoclass:: paddle.v2.fluid.executor.Executor + :members: + :noindex: + +global_scope +------------ + +.. autofunction:: paddle.v2.fluid.executor.global_scope + :noindex: + +scope_guard ----------- -.. automodule:: paddle.v2.fluid.executor - :members: Executor + +.. autofunction:: paddle.v2.fluid.executor.scope_guard + :noindex: + +switch_scope +------------ + +.. autofunction:: paddle.v2.fluid.executor.switch_scope :noindex: + diff --git a/doc/api/v2/fluid/gen_doc.py b/doc/api/v2/fluid/gen_doc.py new file mode 100644 index 0000000000..a2147fd3f7 --- /dev/null +++ b/doc/api/v2/fluid/gen_doc.py @@ -0,0 +1,109 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import argparse +import sys +import types + +import paddle.v2.fluid as fluid + + +def parse_arg(): + parser = argparse.ArgumentParser() + parser.add_argument('--submodules', nargs="*") + parser.add_argument( + 'module', type=str, help='Generate the documentation of which module') + return parser.parse_args() + + +class DocGenerator(object): + def __init__(self, module_name, stream=sys.stdout): + self.stream = stream + self.module_name = module_name + if not hasattr(fluid, module_name): + raise ValueError("Cannot find fluid.{0}".format(module_name)) + else: + self.module = getattr(fluid, module_name) + self.stream.write('''.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +''') + + self._print_header_(module_name, dot='=', is_title=True) + + def print_submodule(self, submodule_name): + submodule = getattr(self.module, submodule_name) + if submodule is None: + raise ValueError("Cannot find submodule {0}".format(submodule_name)) + self.print_section(submodule_name) + + for item in submodule.__all__: + self.print_item(item) + + def print_current_module(self): + for item in self.module.__all__: + self.print_item(item) + + def print_section(self, name): + self._print_header_(name, dot='=', is_title=False) + + def print_item(self, name): + item = getattr(self.module, name) + if isinstance(item, types.TypeType): + self.print_class(name) + elif isinstance(item, types.FunctionType): + self.print_method(name) + else: + raise RuntimeError("Unsupported item {0}".format(name)) + + def print_class(self, name): + self._print_header_(name, dot='-', is_title=False) + self.stream.write('''.. autoclass:: paddle.v2.fluid.{0}.{1} + :members: + :noindex: + +'''.format(self.module_name, name)) + + def print_method(self, name): + self._print_header_(name, dot='-', is_title=False) + self.stream.write('''.. autofunction:: paddle.v2.fluid.{0}.{1} + :noindex: + +'''.format(self.module_name, name)) + + def _print_header_(self, name, dot, is_title): + dot_line = dot * len(name) + if is_title: + self.stream.write(dot_line) + self.stream.write('\n') + self.stream.write(name) + self.stream.write('\n') + self.stream.write(dot_line) + self.stream.write('\n') + self.stream.write('\n') + + +def main(): + args = parse_arg() + gen = DocGenerator(args.module) + if args.submodules is None: + gen.print_current_module() + else: + for submodule_name in args.submodules: + gen.print_submodule(submodule_name) + + +if __name__ == '__main__': + main() diff --git a/doc/api/v2/fluid/gen_doc.sh b/doc/api/v2/fluid/gen_doc.sh new file mode 100755 index 0000000000..ba7b7ba8e5 --- /dev/null +++ b/doc/api/v2/fluid/gen_doc.sh @@ -0,0 +1,7 @@ +#!/bin/bash +python gen_doc.py layers --submodules control_flow device io nn ops tensor > layers.rst + +for module in io data_feeder evaluator executor initializer io nets optimizer param_attr profiler regularizer +do + python gen_doc.py ${module} > ${module}.rst +done diff --git a/doc/api/v2/fluid/initializer.rst b/doc/api/v2/fluid/initializer.rst index 8f587837e9..c38be033ff 100644 --- a/doc/api/v2/fluid/initializer.rst +++ b/doc/api/v2/fluid/initializer.rst @@ -1,50 +1,35 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + =========== -Initializer +initializer =========== +Constant +-------- - -Initializer ------------ -.. automodule:: paddle.v2.fluid.initializer - :members: Initializer - :noindex: - - - -ConstantInitializer -------------------- -.. automodule:: paddle.v2.fluid.initializer - :members: ConstantInitializer +.. autoclass:: paddle.v2.fluid.initializer.Constant + :members: :noindex: +Uniform +------- - -UniformInitializer ------------------- -.. automodule:: paddle.v2.fluid.initializer - :members: UniformInitializer - :noindex: - - - -NormalInitializer ------------------ -.. automodule:: paddle.v2.fluid.initializer - :members: NormalInitializer +.. autoclass:: paddle.v2.fluid.initializer.Uniform + :members: :noindex: +Normal +------ -XavierInitializer ------------------ -.. automodule:: paddle.v2.fluid.initializer - :members: XavierInitializer +.. autoclass:: paddle.v2.fluid.initializer.Normal + :members: :noindex: +Xavier +------ -MSRAInitializer ---------------- -.. automodule:: paddle.v2.fluid.initializer - :members: MSRAInitializer +.. autoclass:: paddle.v2.fluid.initializer.Xavier + :members: :noindex: diff --git a/doc/api/v2/fluid/io.rst b/doc/api/v2/fluid/io.rst index 67f68c4e9e..37c9c273e3 100644 --- a/doc/api/v2/fluid/io.rst +++ b/doc/api/v2/fluid/io.rst @@ -1,10 +1,61 @@ -=========== -IO -=========== +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +== +io +== +save_vars +--------- -is_parameter +.. autofunction:: paddle.v2.fluid.io.save_vars + :noindex: + +save_params ----------- -.. autofunction:: paddle.v2.fluid.io.is_parameter + +.. autofunction:: paddle.v2.fluid.io.save_params + :noindex: + +save_persistables +----------------- + +.. autofunction:: paddle.v2.fluid.io.save_persistables + :noindex: + +load_vars +--------- + +.. autofunction:: paddle.v2.fluid.io.load_vars + :noindex: + +load_params +----------- + +.. autofunction:: paddle.v2.fluid.io.load_params :noindex: + +load_persistables +----------------- + +.. autofunction:: paddle.v2.fluid.io.load_persistables + :noindex: + +save_inference_model +-------------------- + +.. autofunction:: paddle.v2.fluid.io.save_inference_model + :noindex: + +load_inference_model +-------------------- + +.. autofunction:: paddle.v2.fluid.io.load_inference_model + :noindex: + +get_inference_program +--------------------- + +.. autofunction:: paddle.v2.fluid.io.get_inference_program + :noindex: + diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index 231ec2d4ba..e24613b94b 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -1,546 +1,799 @@ -========== -Layers -========== +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +====== +layers +====== -fc ---- -.. autofunction:: paddle.v2.fluid.layers.fc +control_flow +============ + +split_lod_tensor +---------------- + +.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor :noindex: -embedding ---------- -.. autofunction:: paddle.v2.fluid.layers.embedding +merge_lod_tensor +---------------- + +.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor :noindex: -dynamic_lstm ------------- -.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm +BlockGuard +---------- + +.. autoclass:: paddle.v2.fluid.layers.BlockGuard + :members: :noindex: -dynamic_lstmp -------------- -.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp +BlockGuardWithCompletion +------------------------ + +.. autoclass:: paddle.v2.fluid.layers.BlockGuardWithCompletion + :members: :noindex: -dynamic_gru ------------ -.. autofunction:: paddle.v2.fluid.layers.dynamic_gru +StaticRNNMemoryLink +------------------- + +.. autoclass:: paddle.v2.fluid.layers.StaticRNNMemoryLink + :members: :noindex: -data ----- -.. autofunction:: paddle.v2.fluid.layers.data +WhileGuard +---------- + +.. autoclass:: paddle.v2.fluid.layers.WhileGuard + :members: :noindex: -mean ----- -.. autofunction:: paddle.v2.fluid.layers.mean +While +----- + +.. autoclass:: paddle.v2.fluid.layers.While + :members: :noindex: -mul ---- -.. autofunction:: paddle.v2.fluid.layers.mul +lod_rank_table +-------------- + +.. autofunction:: paddle.v2.fluid.layers.lod_rank_table :noindex: -elementwise_add ---------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_add +max_sequence_len +---------------- + +.. autofunction:: paddle.v2.fluid.layers.max_sequence_len :noindex: -elementwise_sub ---------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_sub +topk +---- + +.. autofunction:: paddle.v2.fluid.layers.topk :noindex: -elementwise_mul ---------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_mul +lod_tensor_to_array +------------------- + +.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array :noindex: -elementwise_div ---------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_div +array_to_lod_tensor +------------------- + +.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor :noindex: +increment +--------- -dropout -------- -.. autofunction:: paddle.v2.fluid.layers.dropout +.. autofunction:: paddle.v2.fluid.layers.increment :noindex: +array_write +----------- -reshape --------- -.. autofunction:: paddle.v2.fluid.layers.reshape +.. autofunction:: paddle.v2.fluid.layers.array_write :noindex: +create_array +------------ -sigmoid +.. autofunction:: paddle.v2.fluid.layers.create_array + :noindex: + +less_than --------- -.. autofunction:: paddle.v2.fluid.layers.sigmoid + +.. autofunction:: paddle.v2.fluid.layers.less_than :noindex: +array_read +---------- -scale ---------- -.. autofunction:: paddle.v2.fluid.layers.scale +.. autofunction:: paddle.v2.fluid.layers.array_read + :noindex: + +shrink_memory +------------- + +.. autofunction:: paddle.v2.fluid.layers.shrink_memory :noindex: +array_length +------------ -transpose +.. autofunction:: paddle.v2.fluid.layers.array_length + :noindex: + +IfElse +------ + +.. autoclass:: paddle.v2.fluid.layers.IfElse + :members: + :noindex: + +DynamicRNN +---------- + +.. autoclass:: paddle.v2.fluid.layers.DynamicRNN + :members: + :noindex: + +ConditionalBlock +---------------- + +.. autoclass:: paddle.v2.fluid.layers.ConditionalBlock + :members: + :noindex: + +StaticRNN --------- -.. autofunction:: paddle.v2.fluid.layers.transpose + +.. autoclass:: paddle.v2.fluid.layers.StaticRNN + :members: :noindex: +reorder_lod_tensor_by_rank +-------------------------- -sigmoid_cross_entropy_with_logits ---------------------------------- -.. autofunction:: paddle.v2.fluid.layers.esigmoid_cross_entropy_with_logits +.. autofunction:: paddle.v2.fluid.layers.reorder_lod_tensor_by_rank :noindex: +ParallelDo +---------- -cast +.. autoclass:: paddle.v2.fluid.layers.ParallelDo + :members: + :noindex: + +Print +----- + +.. autofunction:: paddle.v2.fluid.layers.Print + :noindex: + +device +====== + +get_places +---------- + +.. autofunction:: paddle.v2.fluid.layers.get_places + :noindex: + +io +== + +data ---- -.. autofunction:: paddle.v2.fluid.layers.cast + +.. autofunction:: paddle.v2.fluid.layers.data :noindex: +BlockGuardServ +-------------- -concat -------- -.. autofunction:: paddle.v2.fluid.layers.concat +.. autoclass:: paddle.v2.fluid.layers.BlockGuardServ + :members: :noindex: +ListenAndServ +------------- -sums +.. autoclass:: paddle.v2.fluid.layers.ListenAndServ + :members: + :noindex: + +Send ---- -.. autofunction:: paddle.v2.fluid.layers.sums + +.. autofunction:: paddle.v2.fluid.layers.Send :noindex: +nn +== -linear_chain_crf ----------------- -.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf +fc +-- + +.. autofunction:: paddle.v2.fluid.layers.fc :noindex: +embedding +--------- -assign -------- .. autofunction:: paddle.v2.fluid.layers.embedding :noindex: +dynamic_lstm +------------ -split_lod_tensor ----------------- -.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor +.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm :noindex: +dynamic_lstmp +------------- -merge_lod_tensor +.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp + :noindex: + +dynamic_gru +----------- + +.. autofunction:: paddle.v2.fluid.layers.dynamic_gru + :noindex: + +gru_unit +-------- + +.. autofunction:: paddle.v2.fluid.layers.gru_unit + :noindex: + +linear_chain_crf ---------------- -.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor + +.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf + :noindex: + +crf_decoding +------------ + +.. autofunction:: paddle.v2.fluid.layers.crf_decoding :noindex: cos_sim --------- +------- + .. autofunction:: paddle.v2.fluid.layers.cos_sim :noindex: - cross_entropy ------------- + .. autofunction:: paddle.v2.fluid.layers.cross_entropy :noindex: - - square_error_cost ----------------- + .. autofunction:: paddle.v2.fluid.layers.square_error_cost :noindex: - accuracy ---------- +-------- + .. autofunction:: paddle.v2.fluid.layers.accuracy :noindex: +chunk_eval +---------- + +.. autofunction:: paddle.v2.fluid.layers.chunk_eval + :noindex: sequence_conv ------------- + .. autofunction:: paddle.v2.fluid.layers.sequence_conv :noindex: - conv2d ------ + .. autofunction:: paddle.v2.fluid.layers.conv2d :noindex: - sequence_pool ------------- + .. autofunction:: paddle.v2.fluid.layers.sequence_pool :noindex: +pool2d +------ -sequence_first_step -------------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_first_step +.. autofunction:: paddle.v2.fluid.layers.pool2d :noindex: +batch_norm +---------- + +.. autofunction:: paddle.v2.fluid.layers.batch_norm + :noindex: -sequence_last_step +beam_search_decode ------------------ -.. autofunction:: paddle.v2.fluid.layers.sequence_last_step + +.. autofunction:: paddle.v2.fluid.layers.beam_search_decode :noindex: +conv2d_transpose +---------------- -pool2d ------- -.. autofunction:: paddle.v2.fluid.layers.pool2d +.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose :noindex: +sequence_expand +--------------- -batch_norm +.. autofunction:: paddle.v2.fluid.layers.sequence_expand + :noindex: + +lstm_unit +--------- + +.. autofunction:: paddle.v2.fluid.layers.lstm_unit + :noindex: + +reduce_sum ---------- -.. autofunction:: paddle.v2.fluid.layers.batch_norm + +.. autofunction:: paddle.v2.fluid.layers.reduce_sum + :noindex: + +reduce_mean +----------- + +.. autofunction:: paddle.v2.fluid.layers.reduce_mean :noindex: +reduce_max +---------- + +.. autofunction:: paddle.v2.fluid.layers.reduce_max + :noindex: -beam_search_decode +reduce_min +---------- + +.. autofunction:: paddle.v2.fluid.layers.reduce_min + :noindex: + +sequence_first_step +------------------- + +.. autofunction:: paddle.v2.fluid.layers.sequence_first_step + :noindex: + +sequence_last_step ------------------ -.. autofunction:: paddle.v2.fluid.layers.beam_search_decode + +.. autofunction:: paddle.v2.fluid.layers.sequence_last_step + :noindex: + +dropout +------- + +.. autofunction:: paddle.v2.fluid.layers.dropout :noindex: +split +----- -lod_rank_table --------------- -.. autofunction:: paddle.v2.fluid.layers.lod_rank_table +.. autofunction:: paddle.v2.fluid.layers.split :noindex: +ctc_greedy_decoder +------------------ -max_sequence_len ----------------- -.. autofunction:: paddle.v2.fluid.layers.max_sequence_len +.. autofunction:: paddle.v2.fluid.layers.ctc_greedy_decoder :noindex: +edit_distance +------------- -topk ------ -.. autofunction:: paddle.v2.fluid.layers.topk +.. autofunction:: paddle.v2.fluid.layers.edit_distance :noindex: +l2_normalize +------------ -lod_tensor_to_array -------------------- -.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array +.. autofunction:: paddle.v2.fluid.layers.l2_normalize :noindex: +matmul +------ - -array_to_lod_tensor -------------------- -.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor +.. autofunction:: paddle.v2.fluid.layers.matmul :noindex: +warpctc +------- +.. autofunction:: paddle.v2.fluid.layers.warpctc + :noindex: +sequence_reshape +---------------- -fill_constant -------------- -.. autofunction:: paddle.v2.fluid.layers.fill_constant +.. autofunction:: paddle.v2.fluid.layers.sequence_reshape :noindex: +transpose +--------- +.. autofunction:: paddle.v2.fluid.layers.transpose + :noindex: -fill_constant_batch_size_like ------------------------------ -.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like +im2sequence +----------- + +.. autofunction:: paddle.v2.fluid.layers.im2sequence :noindex: +nce +--- -ones ----- -.. autofunction:: paddle.v2.fluid.layers.ones +.. autofunction:: paddle.v2.fluid.layers.nce :noindex: +beam_search +----------- -zeros ------ -.. autofunction:: paddle.v2.fluid.layers.zeros +.. autofunction:: paddle.v2.fluid.layers.beam_search :noindex: +row_conv +-------- -increment ---------- -.. autofunction:: paddle.v2.fluid.layers.increment +.. autofunction:: paddle.v2.fluid.layers.row_conv :noindex: +multiplex +--------- -array_write ------------ -.. autofunction:: paddle.v2.fluid.layers.array_write +.. autofunction:: paddle.v2.fluid.layers.multiplex :noindex: +ops +=== +mean +---- -create_array ------------- -.. autofunction:: paddle.v2.fluid.layers.create_array +.. autofunction:: paddle.v2.fluid.layers.mean :noindex: +mul +--- -less_than ---------- -.. autofunction:: paddle.v2.fluid.layers.less_than +.. autofunction:: paddle.v2.fluid.layers.mul :noindex: +reshape +------- -array_read ----------- -.. autofunction:: paddle.v2.fluid.layers.array_read +.. autofunction:: paddle.v2.fluid.layers.reshape :noindex: +scale +----- -shrink_memory --------------- -.. autofunction:: paddle.v2.fluid.layers.shrink_memory +.. autofunction:: paddle.v2.fluid.layers.scale :noindex: +sigmoid_cross_entropy_with_logits +--------------------------------- -array_length -------------- -.. autofunction:: paddle.v2.fluid.layers.array_length +.. autofunction:: paddle.v2.fluid.layers.sigmoid_cross_entropy_with_logits :noindex: +elementwise_add +--------------- -conv2d_transpose ----------------- -.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose +.. autofunction:: paddle.v2.fluid.layers.elementwise_add :noindex: - -sequence_expand +elementwise_div --------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_expand + +.. autofunction:: paddle.v2.fluid.layers.elementwise_div :noindex: +elementwise_sub +--------------- -gru_unit --------- -.. autofunction:: paddle.v2.fluid.layers.gru_unit +.. autofunction:: paddle.v2.fluid.layers.elementwise_sub :noindex: +elementwise_mul +--------------- -lstm_unit ---------- -.. autofunction:: paddle.v2.fluid.layers.lstm_unit +.. autofunction:: paddle.v2.fluid.layers.elementwise_mul :noindex: +elementwise_max +--------------- -sequence_softmax ----------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_softmax +.. autofunction:: paddle.v2.fluid.layers.elementwise_max :noindex: +elementwise_min +--------------- -reduce_sum ----------- -.. autofunction:: paddle.v2.fluid.layers.reduce_sum +.. autofunction:: paddle.v2.fluid.layers.elementwise_min :noindex: +elementwise_pow +--------------- -reduce_mean ------------ -.. autofunction:: paddle.v2.fluid.layers.reduce_mean +.. autofunction:: paddle.v2.fluid.layers.elementwise_pow :noindex: +clip +---- -reduce_max ----------- -.. autofunction:: paddle.v2.fluid.layers.reduce_max +.. autofunction:: paddle.v2.fluid.layers.clip :noindex: +clip_by_norm +------------ -reduce_min ----------- -.. autofunction:: paddle.v2.fluid.layers.reduce_min +.. autofunction:: paddle.v2.fluid.layers.clip_by_norm :noindex: +sequence_softmax +---------------- -split ------ -.. autofunction:: paddle.v2.fluid.layers.split +.. autofunction:: paddle.v2.fluid.layers.sequence_softmax :noindex: +sigmoid +------- -matmul ------- -.. autofunction:: paddle.v2.fluid.layers.matmul +.. autofunction:: paddle.v2.fluid.layers.sigmoid :noindex: logsigmoid ---------- + .. autofunction:: paddle.v2.fluid.layers.logsigmoid :noindex: exp --- + .. autofunction:: paddle.v2.fluid.layers.exp :noindex: relu ---- + .. autofunction:: paddle.v2.fluid.layers.relu :noindex: tanh ---- + .. autofunction:: paddle.v2.fluid.layers.tanh :noindex: tanh_shrink ----------- + .. autofunction:: paddle.v2.fluid.layers.tanh_shrink :noindex: softshrink ---------- + .. autofunction:: paddle.v2.fluid.layers.softshrink :noindex: sqrt ---- + .. autofunction:: paddle.v2.fluid.layers.sqrt :noindex: abs ----- +--- + .. autofunction:: paddle.v2.fluid.layers.abs :noindex: ceil ---- + .. autofunction:: paddle.v2.fluid.layers.ceil :noindex: floor ----- + .. autofunction:: paddle.v2.fluid.layers.floor :noindex: round ----- + .. autofunction:: paddle.v2.fluid.layers.round :noindex: reciprocal ---------- + .. autofunction:: paddle.v2.fluid.layers.reciprocal :noindex: log --- + .. autofunction:: paddle.v2.fluid.layers.log :noindex: square ------ + .. autofunction:: paddle.v2.fluid.layers.square :noindex: softplus -------- + .. autofunction:: paddle.v2.fluid.layers.softplus :noindex: softsign ---------- +-------- + .. autofunction:: paddle.v2.fluid.layers.softsign :noindex: brelu ----- + .. autofunction:: paddle.v2.fluid.layers.brelu :noindex: leaky_relu ---------- + .. autofunction:: paddle.v2.fluid.layers.leaky_relu :noindex: soft_relu --------- + .. autofunction:: paddle.v2.fluid.layers.soft_relu :noindex: elu ----- +--- + .. autofunction:: paddle.v2.fluid.layers.elu :noindex: relu6 ----- + .. autofunction:: paddle.v2.fluid.layers.relu6 :noindex: pow ----- +--- + .. autofunction:: paddle.v2.fluid.layers.pow :noindex: +stanh +----- + +.. autofunction:: paddle.v2.fluid.layers.stanh + :noindex: + hard_shrink ----------- + .. autofunction:: paddle.v2.fluid.layers.hard_shrink :noindex: thresholded_relu ---------------- + .. autofunction:: paddle.v2.fluid.layers.thresholded_relu :noindex: hard_sigmoid -------------- +------------ + .. autofunction:: paddle.v2.fluid.layers.hard_sigmoid :noindex: swish ------- +----- + .. autofunction:: paddle.v2.fluid.layers.swish :noindex: -im2sequence +tensor +====== + +create_tensor +------------- + +.. autofunction:: paddle.v2.fluid.layers.create_tensor + :noindex: + +create_parameter +---------------- + +.. autofunction:: paddle.v2.fluid.layers.create_parameter + :noindex: + +create_global_var +----------------- + +.. autofunction:: paddle.v2.fluid.layers.create_global_var + :noindex: + +cast +---- + +.. autofunction:: paddle.v2.fluid.layers.cast + :noindex: + +concat ------ -.. autofunction:: paddle.v2.fluid.layers.im2sequence + +.. autofunction:: paddle.v2.fluid.layers.concat :noindex: -edit_distance ---------------- -.. autofunction:: paddle.v2.fluid.layers.edit_distance_error +sums +---- + +.. autofunction:: paddle.v2.fluid.layers.sums :noindex: -ctc_greedy_decoder ---------------- -.. autofunction:: paddle.v2.fluid.layers.ctc_greedy_decoder +assign +------ + +.. autofunction:: paddle.v2.fluid.layers.assign :noindex: -l2_normalize ------------- -.. autofunction:: paddle.v2.fluid.layers.l2_normalize +fill_constant_batch_size_like +----------------------------- + +.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like :noindex: -sequence_reshape ----------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_reshape +fill_constant +------------- + +.. autofunction:: paddle.v2.fluid.layers.fill_constant :noindex: -row_conv --------- -.. autofunction:: paddle.v2.fluid.layers.row_conv +ones +---- + +.. autofunction:: paddle.v2.fluid.layers.ones :noindex: -multiplex ---------- -.. autofunction:: paddle.v2.fluid.layers.multiplex +zeros +----- + +.. autofunction:: paddle.v2.fluid.layers.zeros :noindex: + diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/v2/fluid/nets.rst index 500019bc50..015581b766 100644 --- a/doc/api/v2/fluid/nets.rst +++ b/doc/api/v2/fluid/nets.rst @@ -1,33 +1,31 @@ -=========== -Nets -=========== +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +==== +nets +==== simple_img_conv_pool -------------------- -.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool - :noindex: - -img_conv_group ---------------- -.. autofunction:: paddle.v2.fluid.nets.img_conv_group +.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool :noindex: - sequence_conv_pool ------------------ + .. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool :noindex: - glu --- + .. autofunction:: paddle.v2.fluid.nets.glu :noindex: - scaled_dot_product_attention ---------------------------- + .. autofunction:: paddle.v2.fluid.nets.scaled_dot_product_attention :noindex: diff --git a/doc/api/v2/fluid/optimizer.rst b/doc/api/v2/fluid/optimizer.rst index 19b4940f08..1691ebb9a7 100644 --- a/doc/api/v2/fluid/optimizer.rst +++ b/doc/api/v2/fluid/optimizer.rst @@ -1,54 +1,49 @@ -=========== -Optimizer -=========== - -Optimizer ------------ -.. automodule:: paddle.v2.fluid.optimizer - :members: Optimizer - :noindex: +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +========= +optimizer +========= -SGDOptimizer ------------ -.. automodule:: paddle.v2.fluid.optimizer - :members: SGDOptimizer - :noindex: +SGD +--- +.. autoclass:: paddle.v2.fluid.optimizer.SGD + :members: + :noindex: +Momentum +-------- -MomentumOptimizer ------------------ -.. automodule:: paddle.v2.fluid.optimizer - :members: MomentumOptimizer +.. autoclass:: paddle.v2.fluid.optimizer.Momentum + :members: :noindex: +Adagrad +------- - -AdagradOptimizer ----------------- -.. automodule:: paddle.v2.fluid.optimizer - :members: AdagradOptimizer +.. autoclass:: paddle.v2.fluid.optimizer.Adagrad + :members: :noindex: +Adam +---- -AdamOptimizer -------------- -.. automodule:: paddle.v2.fluid.optimizer - :members: AdamOptimizer +.. autoclass:: paddle.v2.fluid.optimizer.Adam + :members: :noindex: +Adamax +------ -AdamaxOptimizer ------------ -.. automodule:: paddle.v2.fluid.optimizer - :members: AdamaxOptimizer +.. autoclass:: paddle.v2.fluid.optimizer.Adamax + :members: :noindex: +DecayedAdagrad +-------------- -DecayedAdagradOptimizer ------------------------ -.. automodule:: paddle.v2.fluid.optimizer - :members: DecayedAdagradOptimizer +.. autoclass:: paddle.v2.fluid.optimizer.DecayedAdagrad + :members: :noindex: diff --git a/doc/api/v2/fluid/param_attr.rst b/doc/api/v2/fluid/param_attr.rst index ca0c8af9e8..8083d0d858 100644 --- a/doc/api/v2/fluid/param_attr.rst +++ b/doc/api/v2/fluid/param_attr.rst @@ -1,11 +1,21 @@ -=========== +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +========== +param_attr +========== + ParamAttr -=========== +--------- +.. autoclass:: paddle.v2.fluid.param_attr.ParamAttr + :members: + :noindex: +WeightNormParamAttr +------------------- -ParamAttr ------------ -.. automodule:: paddle.v2.fluid.param_attr - :members: ParamAttr +.. autoclass:: paddle.v2.fluid.param_attr.WeightNormParamAttr + :members: :noindex: + diff --git a/doc/api/v2/fluid/profiler.rst b/doc/api/v2/fluid/profiler.rst index 7d4042d1f4..4a1ff7cb69 100644 --- a/doc/api/v2/fluid/profiler.rst +++ b/doc/api/v2/fluid/profiler.rst @@ -1,10 +1,25 @@ -=========== -Profiler -=========== +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +======== +profiler +======== +cuda_profiler +------------- -Profiler ------------ .. autofunction:: paddle.v2.fluid.profiler.cuda_profiler :noindex: + +reset_profiler +-------------- + +.. autofunction:: paddle.v2.fluid.profiler.reset_profiler + :noindex: + +profiler +-------- + +.. autofunction:: paddle.v2.fluid.profiler.profiler + :noindex: + diff --git a/doc/api/v2/fluid/regularizer.rst b/doc/api/v2/fluid/regularizer.rst index 868e225ed3..2c17d15599 100644 --- a/doc/api/v2/fluid/regularizer.rst +++ b/doc/api/v2/fluid/regularizer.rst @@ -1,25 +1,27 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + =========== -Regularizer +regularizer =========== -WeightDecayRegularizer ----------------------- -.. automodule:: paddle.v2.fluid.regularizer - :members: WeightDecayRegularizer - :noindex: - +append_regularization_ops +------------------------- -L2DecayRegularizer ------------------- -.. automodule:: paddle.v2.fluid.regularizer - :members: L2DecayRegularizer +.. autofunction:: paddle.v2.fluid.regularizer.append_regularization_ops :noindex: +L1Decay +------- +.. autoclass:: paddle.v2.fluid.regularizer.L1Decay + :members: + :noindex: -L1DecayRegularizer -------------------- -.. automodule:: paddle.v2.fluid.regularizer - :members: L1DecayRegularizer +L2Decay +------- +.. autoclass:: paddle.v2.fluid.regularizer.L2Decay + :members: + :noindex: diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 18c8343d09..f52346c3b5 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -36,28 +36,16 @@ from distribute_transpiler import DistributeTranspiler from distribute_transpiler_simple import SimpleDistributeTranspiler import clip from memory_optimization_transpiler import memory_optimize +import profiler Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + [ - 'io', - 'initializer', - 'layers', - 'nets', - 'optimizer', - 'learning_rate_decay', - 'backward', - 'regularizer', - 'LoDTensor', - 'CPUPlace', - 'CUDAPlace', - 'Tensor', + 'io', 'initializer', 'layers', 'nets', 'optimizer', 'learning_rate_decay', + 'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr' - 'DataFeeder', - 'clip', - 'SimpleDistributeTranspiler', - 'DistributeTranspiler', - 'memory_optimize', + 'DataFeeder', 'clip', 'SimpleDistributeTranspiler', 'DistributeTranspiler', + 'memory_optimize', 'profiler' ] diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py index 51c1c8aa70..d4a2cd7eea 100644 --- a/python/paddle/v2/fluid/profiler.py +++ b/python/paddle/v2/fluid/profiler.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core +import core from contextlib import contextmanager import os -__all__ = ['CudaProfiler'] +__all__ = ['cuda_profiler', 'reset_profiler', 'profiler'] NVPROF_CONFIG = [ "gpustarttimestamp", From b148f065a8d88c944c354eaea0e31a3da5fde99c Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Wed, 31 Jan 2018 13:31:41 +0800 Subject: [PATCH 094/106] Make Fit a line a normal unittest --- .../v2/fluid/tests/book/test_fit_a_line.py | 88 +++++++++++++------ 1 file changed, 59 insertions(+), 29 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 0b954c60b6..27f34b1733 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -12,44 +12,74 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid +import contextlib +import unittest -x = fluid.layers.data(name='x', shape=[13], dtype='float32') -y_predict = fluid.layers.fc(input=x, size=1, act=None) +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return -y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = fluid.layers.data(name='x', shape=[13], dtype='float32') -cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(x=cost) + y_predict = fluid.layers.fc(input=x, size=1, act=None) -sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) -sgd_optimizer.minimize(avg_cost) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') -BATCH_SIZE = 20 + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(x=cost) -train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), - batch_size=BATCH_SIZE) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) -place = fluid.CPUPlace() -feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) -exe = fluid.Executor(place) + BATCH_SIZE = 20 -exe.run(fluid.default_startup_program()) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) -PASS_NUM = 100 -for pass_id in range(PASS_NUM): - fluid.io.save_persistables(exe, "./fit_a_line.model/") - fluid.io.load_persistables(exe, "./fit_a_line.model/") - for data in train_reader(): - avg_loss_value, = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost]) - print(avg_loss_value) - if avg_loss_value[0] < 10.0: - exit(0) # if avg cost less than 10.0, we think our code is good. -exit(1) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + + exe.run(fluid.default_startup_program()) + + PASS_NUM = 100 + for pass_id in range(PASS_NUM): + fluid.io.save_persistables(exe, "./fit_a_line.model/") + fluid.io.load_persistables(exe, "./fit_a_line.model/") + for data in train_reader(): + avg_loss_value, = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) + print(avg_loss_value) + if avg_loss_value[0] < 10.0: + return + raise AssertionError("Fit a line cost is too large, {0:2.2}".format( + avg_loss_value[0])) + + +class TestFitALine(unittest.TestCase): + def test_cpu(self): + with self.program_scope_guard(): + main(use_cuda=False) + + def test_cuda(self): + with self.program_scope_guard(): + main(use_cuda=True) + + @contextlib.contextmanager + def program_scope_guard(self): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + +if __name__ == '__main__': + unittest.main() From 1b1f305babc3c91d0761814306df0004620be309 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Wed, 31 Jan 2018 13:07:19 +0800 Subject: [PATCH 095/106] Make image_classification as a normal python unittest --- .../paddle/v2/fluid/tests/book/CMakeLists.txt | 4 +- .../book/test_image_classification_train.py | 143 +++++++++++------- 2 files changed, 89 insertions(+), 58 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/CMakeLists.txt b/python/paddle/v2/fluid/tests/book/CMakeLists.txt index dda02c03fd..a870478db8 100644 --- a/python/paddle/v2/fluid/tests/book/CMakeLists.txt +++ b/python/paddle/v2/fluid/tests/book/CMakeLists.txt @@ -1,9 +1,7 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") -list(REMOVE_ITEM TEST_OPS test_image_classification_train test_recognize_digits) -py_test(test_image_classification_train_resnet SRCS test_image_classification_train.py ARGS resnet) -py_test(test_image_classification_train_vgg SRCS test_image_classification_train.py ARGS vgg) +list(REMOVE_ITEM TEST_OPS test_recognize_digits) py_test(test_recognize_digits_mlp_cpu SRCS test_recognize_digits.py ARGS mlp) diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index 30582a21d0..a4168d16db 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -14,10 +14,10 @@ from __future__ import print_function -import sys - import paddle.v2 as paddle import paddle.v2.fluid as fluid +import unittest +import contextlib def resnet_cifar10(input, depth=32): @@ -89,56 +89,89 @@ def vgg16_bn_drop(input): return fc2 -classdim = 10 -data_shape = [3, 32, 32] - -images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') -label = fluid.layers.data(name='label', shape=[1], dtype='int64') - -net_type = "vgg" -if len(sys.argv) >= 2: - net_type = sys.argv[1] - -if net_type == "vgg": - print("train vgg net") - net = vgg16_bn_drop(images) -elif net_type == "resnet": - print("train resnet") - net = resnet_cifar10(images, 32) -else: - raise ValueError("%s network is not supported" % net_type) - -predict = fluid.layers.fc(input=net, size=classdim, act='softmax') -cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) - -optimizer = fluid.optimizer.Adam(learning_rate=0.001) -opts = optimizer.minimize(avg_cost) - -accuracy = fluid.evaluator.Accuracy(input=predict, label=label) - -BATCH_SIZE = 128 -PASS_NUM = 1 - -train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), - batch_size=BATCH_SIZE) - -place = fluid.CPUPlace() -exe = fluid.Executor(place) -feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) -exe.run(fluid.default_startup_program()) - -for pass_id in range(PASS_NUM): - accuracy.reset(exe) - for data in train_reader(): - loss, acc = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) - pass_acc = accuracy.eval(exe) - print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( - pass_acc)) - # this model is slow, so if we can train two mini batch, we think it works properly. - exit(0) -exit(1) +def main(net_type, use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + classdim = 10 + data_shape = [3, 32, 32] + + images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + if net_type == "vgg": + print("train vgg net") + net = vgg16_bn_drop(images) + elif net_type == "resnet": + print("train resnet") + net = resnet_cifar10(images, 32) + else: + raise ValueError("%s network is not supported" % net_type) + + predict = fluid.layers.fc(input=net, size=classdim, act='softmax') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + optimizer = fluid.optimizer.Adam(learning_rate=0.001) + optimizer.minimize(avg_cost) + + accuracy = fluid.evaluator.Accuracy(input=predict, label=label) + + BATCH_SIZE = 128 + PASS_NUM = 1 + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=128 * 10), + batch_size=BATCH_SIZE) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) + exe.run(fluid.default_startup_program()) + + loss = 0.0 + for pass_id in range(PASS_NUM): + accuracy.reset(exe) + for data in train_reader(): + loss, acc = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost] + accuracy.metrics) + pass_acc = accuracy.eval(exe) + print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( + pass_acc)) + return + + raise AssertionError( + "Image classification loss is too large, {0:2.2}".format(loss)) + + +class TestImageClassification(unittest.TestCase): + def test_vgg_cuda(self): + with self.scope_prog_guard(): + main('vgg', use_cuda=True) + + def test_resnet_cuda(self): + with self.scope_prog_guard(): + main('resnet', use_cuda=True) + + def test_vgg_cpu(self): + with self.scope_prog_guard(): + main('vgg', use_cuda=False) + + def test_resnet_cpu(self): + with self.scope_prog_guard(): + main('resnet', use_cuda=False) + + @contextlib.contextmanager + def scope_prog_guard(self): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + +if __name__ == '__main__': + unittest.main() From e49b8b9c556016c43f64d73045208b62470df92a Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 31 Jan 2018 19:46:24 +0800 Subject: [PATCH 096/106] refine feed_op --- paddle/operators/feed_op.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index d738e1850c..789d01e002 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -52,7 +52,11 @@ class FeedOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::Copy(feed_item, place, dev_ctx, out_item); + if (platform::is_same_place(feed_item.place(), place)) { + out_item->ShareDataWith(feed_item); + } else { + framework::Copy(feed_item, place, dev_ctx, out_item); + } out_item->set_lod(feed_item.lod()); } }; From 0d550ea15618885df4153d733d525b901e7af05d Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Wed, 31 Jan 2018 19:27:44 +0800 Subject: [PATCH 097/106] Make parallel tests bind to different GPU. --- paddle/scripts/docker/test.sh | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100755 paddle/scripts/docker/test.sh diff --git a/paddle/scripts/docker/test.sh b/paddle/scripts/docker/test.sh new file mode 100755 index 0000000000..8180737a8f --- /dev/null +++ b/paddle/scripts/docker/test.sh @@ -0,0 +1,30 @@ +#!/bin/bash +set -e + +# the number of process to run tests +NUM_PROC=6 + +# calculate and set the memory usage for each process +MEM_USAGE=$(printf "%.2f" `echo "scale=5; 1.0 / $NUM_PROC" | bc`) +export FLAGS_fraction_of_gpu_memory_to_use=$MEM_USAGE + +# get the CUDA device count +CUDA_DEVICE_COUNT=$(nvidia-smi -L | wc -l) + +for (( i = 0; i < $NUM_PROC; i++ )); do + cuda_list=() + for (( j = 0; j < $CUDA_DEVICE_COUNT; j++ )); do + s=$[i+j] + n=$[s%CUDA_DEVICE_COUNT] + if [ $j -eq 0 ]; then + cuda_list=("$n") + else + cuda_list="$cuda_list,$n" + fi + done + echo $cuda_list + # CUDA_VISIBLE_DEVICES http://acceleware.com/blog/cudavisibledevices-masking-gpus + # ctest -I https://cmake.org/cmake/help/v3.0/manual/ctest.1.html?highlight=ctest + env CUDA_VISIBLE_DEVICES=$cuda_list ctest -I $i,,$NUM_PROC --output-on-failure & +done +wait From b3ecdafe7e281579065b54635095b45b01dec6df Mon Sep 17 00:00:00 2001 From: helinwang Date: Tue, 30 Jan 2018 18:27:13 -0800 Subject: [PATCH 098/106] CI build: take -DCUDA_ARCH_NAME from environment variable --- paddle/scripts/docker/build.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index fbae37b2ca..8369ded8cb 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -40,6 +40,7 @@ function cmake_gen() { -DWITH_MKL=${WITH_MKL:-ON} -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-ON} + -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-All} -DWITH_SWIG_PY=ON -DWITH_C_API=${WITH_C_API:-OFF} -DWITH_PYTHON=${WITH_PYTHON:-ON} @@ -62,6 +63,7 @@ EOF -DWITH_MKL=${WITH_MKL:-ON} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_GOLANG=${WITH_GOLANG:-ON} \ + -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-All} \ -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ -DWITH_C_API=${WITH_C_API:-OFF} \ -DWITH_PYTHON=${WITH_PYTHON:-ON} \ From 3b5588a814bda4bff940f4cee425455a0581ad23 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 1 Feb 2018 09:46:06 +0800 Subject: [PATCH 099/106] Fix default build_type env --- paddle/scripts/docker/build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index fbae37b2ca..48c8e26c93 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -32,7 +32,7 @@ function cmake_gen() { cat < Date: Thu, 1 Feb 2018 12:05:01 +0800 Subject: [PATCH 100/106] update by comment --- paddle/scripts/docker/build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 48c8e26c93..2ed237b5b0 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -32,7 +32,7 @@ function cmake_gen() { cat < Date: Thu, 1 Feb 2018 12:57:46 +0800 Subject: [PATCH 101/106] Fix compilie error --- paddle/operators/listen_and_serv_op.cc | 2 +- paddle/operators/send_op.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/operators/listen_and_serv_op.cc b/paddle/operators/listen_and_serv_op.cc index 5745938ed9..099f6b2373 100644 --- a/paddle/operators/listen_and_serv_op.cc +++ b/paddle/operators/listen_and_serv_op.cc @@ -204,4 +204,4 @@ from send_op and send back variables to recv_op. namespace ops = paddle::operators; REGISTER_OPERATOR(listen_and_serv, ops::ListenAndServOp, - ops::ListenAndServOpMaker); \ No newline at end of file + ops::ListenAndServOpMaker); diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index 291d19ba8b..ee0f268b0e 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -65,9 +65,9 @@ class SendOp : public framework::OperatorBase { if (outs.size() > 0) { for (size_t i = 0; i < outs.size(); i++) { VLOG(3) << "getting " << outs[i] << " from " << epmap[i]; - client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); + rpc_client->AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } - PADDLE_ENFORCE(client_.Wait()); + PADDLE_ENFORCE(rpc_client->Wait()); } } }; From 1830e2a01da528dc03ebba334bad9f418074b770 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Thu, 1 Feb 2018 17:43:03 +0800 Subject: [PATCH 102/106] fix bugs --- doc/howto/usage/cluster/cluster_train_cn.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/howto/usage/cluster/cluster_train_cn.md b/doc/howto/usage/cluster/cluster_train_cn.md index c2fc86687d..0f3db59607 100644 --- a/doc/howto/usage/cluster/cluster_train_cn.md +++ b/doc/howto/usage/cluster/cluster_train_cn.md @@ -92,11 +92,11 @@ paddle.init( 参数说明 - use_gpu: **可选,默认False**,是否启用GPU训练 -- trainer_count:**必选,默认1**,当前训练任务trainer总个数 +- trainer_count:**必选,默认1**,当前trainer的线程数目 - port:**必选,默认7164**,连接到pserver的端口 - ports_num:**必选,默认1**,连接到pserver的端口个数 - ports_num_for_sparse:**必选,默认0**,和pserver之间用于稀疏类型参数通信的端口个数 -- num_gradient_servers:**必选,默认1**,当前训练任务pserver总数 +- num_gradient_servers:**必选,默认1**,当前训练任务trainer总数 - trainer_id:**必选,默认0**,每个trainer的唯一ID,从0开始的整数 - pservers:**必选,默认127.0.0.1**,当前训练任务启动的pserver的IP列表,多个IP使用“,”隔开 From 2fb280c9f2648d43499839d448e589e71b2b20b0 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Thu, 1 Feb 2018 01:52:51 -0800 Subject: [PATCH 103/106] Revise python save load api using new load/save op (#7995) * initial commit * add get_parameters method * add get_parameters method * small fix * address comments * address comments * address comments * fix --- python/paddle/v2/fluid/framework.py | 3 +- python/paddle/v2/fluid/io.py | 141 +++++++++++++++++++--------- 2 files changed, 97 insertions(+), 47 deletions(-) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index ae98e299a4..7f5187d299 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -489,7 +489,8 @@ class Operator(object): no_kernel_op_set = { 'feed', 'fetch', 'save', 'load', 'recurrent', 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', - 'recv', 'listen_and_serv', 'parallel_do' + 'recv', 'listen_and_serv', 'parallel_do', 'save_combine', + 'load_combine' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index d56ec45c53..613dc20b6e 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -46,6 +46,9 @@ def is_parameter(var): def is_persistable(var): + if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ + var.desc.type() == core.VarDesc.VarType.FETCH_LIST: + return False return var.persistable @@ -60,7 +63,12 @@ def _clone_var_in_block_(block, var): persistable=True) -def save_vars(executor, dirname, main_program=None, vars=None, predicate=None): +def save_vars(executor, + dirname, + main_program=None, + vars=None, + predicate=None, + save_file_name=None): """ Save variables to directory by executor. @@ -69,9 +77,12 @@ def save_vars(executor, dirname, main_program=None, vars=None, predicate=None): :param main_program: program. If vars is None, then filter all variables in this program which fit `predicate`. Default default_main_program. :param predicate: The Predicate describes a callable that returns a variable - as a bool. If it returns true, the variables will be saved. - :param vars: variables need to be saved. If specify vars, program & predicate + as a bool. If it returns true, the corresponding input variable will be saved. + :param vars: variables need to be saved. If vars is specified, program & predicate will be ignored + :param save_file_name: The name of a single file that all vars are saved to. + If it is None, save variables to separate files. + :return: None """ if vars is None: @@ -83,21 +94,39 @@ def save_vars(executor, dirname, main_program=None, vars=None, predicate=None): save_vars( executor, dirname=dirname, - vars=filter(predicate, main_program.list_vars())) + vars=filter(predicate, main_program.list_vars()), + save_file_name=save_file_name) else: save_program = Program() save_block = save_program.global_block() + + save_var_map = {} for each_var in vars: new_var = _clone_var_in_block_(save_block, each_var) + if save_file_name is None: + save_block.append_op( + type='save', + inputs={'X': [new_var]}, + outputs={}, + attrs={'file_path': os.path.join(dirname, new_var.name)}) + else: + save_var_map[new_var.name] = new_var + + if save_file_name is not None: + save_var_list = [] + for name in sorted(save_var_map.keys()): + save_var_list.append(save_var_map[name]) + save_block.append_op( - type='save', - inputs={'X': [new_var]}, + type='save_combine', + inputs={'X': save_var_list}, outputs={}, - attrs={'file_path': os.path.join(dirname, new_var.name)}) + attrs={'file_path': os.path.join(dirname, save_file_name)}) + executor.run(save_program) -def save_params(executor, dirname, main_program=None): +def save_params(executor, dirname, main_program=None, save_file_name=None): """ Save all parameters to directory with executor. """ @@ -106,10 +135,12 @@ def save_params(executor, dirname, main_program=None): dirname=dirname, main_program=main_program, vars=None, - predicate=is_parameter) + predicate=is_parameter, + save_file_name=save_file_name) -def save_persistables(executor, dirname, main_program=None): +def save_persistables(executor, dirname, main_program=None, + save_file_name=None): """ Save all persistables to directory with executor. """ @@ -118,21 +149,30 @@ def save_persistables(executor, dirname, main_program=None): dirname=dirname, main_program=main_program, vars=None, - predicate=is_persistable) + predicate=is_persistable, + save_file_name=save_file_name) -def load_vars(executor, dirname, main_program=None, vars=None, predicate=None): +def load_vars(executor, + dirname, + main_program=None, + vars=None, + predicate=None, + load_file_name=None): """ Load variables from directory by executor. - :param executor: executor that save variable + :param executor: executor that load variable :param dirname: directory path :param main_program: program. If vars is None, then filter all variables in this program which fit `predicate`. Default default_main_program(). :param predicate: The Predicate describes a callable that returns a variable - as a bool. If it returns true, the variables will be loaded. - :param vars: variables need to be loaded. If specify vars, program & + as a bool. If it returns true, the corresponding input variable will be loaded. + :param vars: variables need to be loaded. If vars is specified, program & predicate will be ignored + :param load_file_name: The name of the single file that all vars are loaded from. + If it is None, load variables from separate files. + :return: None """ if vars is None: @@ -144,23 +184,40 @@ def load_vars(executor, dirname, main_program=None, vars=None, predicate=None): load_vars( executor, dirname=dirname, - vars=filter(predicate, main_program.list_vars())) + vars=filter(predicate, main_program.list_vars()), + load_file_name=load_file_name) else: load_prog = Program() load_block = load_prog.global_block() + + load_var_map = {} for each_var in vars: assert isinstance(each_var, Variable) new_var = _clone_var_in_block_(load_block, each_var) + if load_file_name is None: + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [new_var]}, + attrs={'file_path': os.path.join(dirname, new_var.name)}) + else: + load_var_map[new_var.name] = new_var + + if load_file_name is not None: + load_var_list = [] + for name in sorted(load_var_map.keys()): + load_var_list.append(load_var_map[name]) + load_block.append_op( - type='load', + type='load_combine', inputs={}, - outputs={"Out": [new_var]}, - attrs={'file_path': os.path.join(dirname, new_var.name)}) + outputs={"Out": load_var_list}, + attrs={'file_path': os.path.join(dirname, load_file_name)}) executor.run(load_prog) -def load_params(executor, dirname, main_program=None): +def load_params(executor, dirname, main_program=None, load_file_name=None): """ load all parameters from directory by executor. """ @@ -168,10 +225,12 @@ def load_params(executor, dirname, main_program=None): executor, dirname=dirname, main_program=main_program, - predicate=is_parameter) + predicate=is_parameter, + load_file_name=load_file_name) -def load_persistables(executor, dirname, main_program=None): +def load_persistables(executor, dirname, main_program=None, + load_file_name=None): """ load all persistables from directory by executor. """ @@ -179,7 +238,8 @@ def load_persistables(executor, dirname, main_program=None): executor, dirname=dirname, main_program=main_program, - predicate=is_persistable) + predicate=is_persistable, + load_file_name=load_file_name) def get_inference_program(target_vars, main_program=None): @@ -238,7 +298,8 @@ def save_inference_model(dirname, feeded_var_names, target_vars, executor, - main_program=None): + main_program=None, + save_file_name=None): """ Build a model especially for inference, and save it to directory by the executor. @@ -249,6 +310,8 @@ def save_inference_model(dirname, :param executor: executor that save inference model :param main_program: original program, which will be pruned to build the inference model. Default default_main_program(). + :param save_file_name: The name of a single file that all parameters are saved to. + If it is None, save parameters to separate files. :return: None """ @@ -283,25 +346,7 @@ def save_inference_model(dirname, with open(model_file_name, "wb") as f: f.write(inference_program.desc.serialize_to_string()) - save_params(executor, dirname, main_program) - - -def load_persistables_if_exist(executor, dirname, main_program=None): - filenames = next(os.walk(dirname))[2] - filenames = set(filenames) - - def _is_presistable_and_exist_(var): - if not is_persistable(var): - return False - else: - return var.name in filenames - - load_vars( - executor, - dirname, - main_program=main_program, - vars=None, - predicate=_is_presistable_and_exist_) + save_persistables(executor, dirname, inference_program, save_file_name) def get_feed_targets_names(program): @@ -322,13 +367,15 @@ def get_fetch_targets_names(program): return fetch_targets_names -def load_inference_model(dirname, executor): +def load_inference_model(dirname, executor, load_file_name=None): """ Load inference model from a directory :param dirname: directory path :param executor: executor that load inference model - + :param load_file_name: The name of the single file that all parameters are loaded from. + If it is None, load parameters from separate files. + :return: [program, feed_target_names, fetch_targets] program: program especially for inference. feed_target_names: Names of variables that need to feed data @@ -342,7 +389,7 @@ def load_inference_model(dirname, executor): program_desc_str = f.read() program = Program.parse_from_string(program_desc_str) - load_persistables_if_exist(executor, dirname, program) + load_persistables(executor, dirname, program, load_file_name) feed_target_names = get_feed_targets_names(program) fetch_target_names = get_fetch_targets_names(program) @@ -359,6 +406,7 @@ def get_parameter_value(para, executor): :param executor: executor for retrieving the value :param para: the given parameter + :return: the LoDTensor for the parameter """ assert is_parameter(para) @@ -377,6 +425,7 @@ def get_parameter_value_by_name(name, executor, program=None): :param name: the name of the parameter :param program: the program where the variable is found Default default_main_program(). + :return: the LoDTensor for the variable """ if program is None: From d2caf777ae0260d10ce3dfe9249d3ccf53a50641 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Thu, 1 Feb 2018 18:15:18 +0800 Subject: [PATCH 104/106] set FLAGS_warpctc_dir to pass the test_warpctc_op unit test --- cmake/generic.cmake | 4 ++-- paddle/testing/paddle_gtest_main.cc | 5 +++-- python/paddle/v2/fluid/__init__.py | 4 +++- python/paddle/v2/fluid/tests/CMakeLists.txt | 2 ++ 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index e10c0ecf68..33ef6860e1 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -470,10 +470,10 @@ function(py_test TARGET_NAME) if(WITH_TESTING) set(options "") set(oneValueArgs "") - set(multiValueArgs SRCS DEPS ARGS) + set(multiValueArgs SRCS DEPS ARGS ENVS) cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python + COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python ${py_test_ENVS} ${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endif() diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index a2f21e37e4..fd8c4a69da 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -27,9 +27,10 @@ int main(int argc, char** argv) { } #ifdef PADDLE_WITH_CUDA new_argv.push_back( - strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); + strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory," + "warpctc_dir")); #else - new_argv.push_back(strdup("--tryfromenv=use_pinned_memory")); + new_argv.push_back(strdup("--tryfromenv=use_pinned_memory,warpctc_dir")); #endif int new_argc = static_cast(new_argv.size()); char** new_argv_address = new_argv.data(); diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index f52346c3b5..3ee58393c7 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -76,7 +76,9 @@ def __bootstrap__(): os.environ['OMP_NUM_THREADS'] = str(num_threads) - read_env_flags = ['use_pinned_memory', 'check_nan_inf', 'benchmark'] + read_env_flags = [ + 'use_pinned_memory', 'check_nan_inf', 'benchmark', 'warpctc_dir' + ] if core.is_compiled_with_cuda(): read_env_flags += ['fraction_of_gpu_memory_to_use'] core.init_gflags([sys.argv[0]] + diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/CMakeLists.txt index 628ce60b40..26a80abcb5 100644 --- a/python/paddle/v2/fluid/tests/CMakeLists.txt +++ b/python/paddle/v2/fluid/tests/CMakeLists.txt @@ -5,9 +5,11 @@ if(NOT WITH_DISTRIBUTE) list(REMOVE_ITEM TEST_OPS test_recv_op) endif(NOT WITH_DISTRIBUTE) +list(REMOVE_ITEM TEST_OPS test_warpctc_op) foreach(src ${TEST_OPS}) py_test(${src} SRCS ${src}.py) endforeach() +py_test(test_warpctc_op SRCS test_warpctc_op.py ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) add_subdirectory(book) add_subdirectory(book_distribute) From b26a5b5d044c0bfc7bdfbc803ea604449d6e575a Mon Sep 17 00:00:00 2001 From: gongweibao Date: Thu, 1 Feb 2018 19:37:12 +0800 Subject: [PATCH 105/106] fix en doc --- doc/howto/usage/cluster/cluster_train_en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/howto/usage/cluster/cluster_train_en.md b/doc/howto/usage/cluster/cluster_train_en.md index 28cd1fa790..f9424f8f1a 100644 --- a/doc/howto/usage/cluster/cluster_train_en.md +++ b/doc/howto/usage/cluster/cluster_train_en.md @@ -95,11 +95,11 @@ paddle.init( Parameter Description - use_gpu: **optional, default False**, set to "True" to enable GPU training. -- trainer_count: **required, default 1**, total count of trainers in the training job. +- trainer_count: **required, default 1**, number of threads in current trainer. - port: **required, default 7164**, port to connect to parameter server. - ports_num: **required, default 1**, number of ports for communication. - ports_num_for_sparse: **required, default 0**, number of ports for sparse type caculation. -- num_gradient_servers: **required, default 1**, total number of gradient server. +- num_gradient_servers: **required, default 1**, number of trainers in current job. - trainer_id: **required, default 0**, ID for every trainer, start from 0. - pservers: **required, default 127.0.0.1**, list of IPs of parameter servers, separated by ",". From 47ebe435a79ab836649ba11c635129c8a6664ea1 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 1 Feb 2018 20:41:54 +0800 Subject: [PATCH 106/106] Fix/vector (#8045) * "clean code" * "clean code" --- paddle/framework/mixed_vector.h | 77 +++++++++++++-------------------- 1 file changed, 29 insertions(+), 48 deletions(-) diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index 0e0e239586..85caac8dcd 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -34,18 +34,6 @@ namespace framework { template class Vector : public std::vector { - public: - /* NOTE(dzhwinter): - * Data always store and modified on Host. - * If the data is modified when use cuda_data interface, - * You need to call the CopyFromCUDA explicitly to synchronize data. - * - */ - enum class kDataPosition { - kDataOnHost = 0, - kDataOnDevice = 1, - }; - public: using std::vector::vector; @@ -55,11 +43,12 @@ class Vector : public std::vector { virtual ~Vector() { #ifdef PADDLE_WITH_CUDA if (cuda_ptr_ != nullptr) { - memory::Free(place_, static_cast(cuda_ptr_)); + memory::Free(place_, cuda_ptr_); } #endif } + /* Get device vector */ T *cuda_data() { CopyToCUDA(); PADDLE_ENFORCE_NOT_NULL( @@ -67,81 +56,73 @@ class Vector : public std::vector { return static_cast(cuda_ptr_); } + /* Get host vector */ T *data() { return std::vector::data(); } - const T *data() const { return std::vector::data(); } + /* Synchronize host vector to device vector */ void CopyToCUDA(); - + /* Synchronize device vector to host vector */ void CopyFromCUDA(); - + /* Switch device vector location */ void CopyToPeer(platform::Place); private: void *cuda_ptr_ = nullptr; - size_t cuda_size_ = 0; - /*The DataPosition is unused now, - if we want support random access from cpu and cuda, - we need to overload all the vector method */ - - kDataPosition position_ = kDataPosition::kDataOnHost; + size_t cuda_size_ = 0; // device vector numel platform::CUDAPlace place_; }; template void Vector::CopyToCUDA() { #ifdef PADDLE_WITH_CUDA - if (cuda_ptr_ == nullptr) { + if (cuda_size_ < this->size()) { + if (cuda_ptr_ != nullptr) { + memory::Free(place_, cuda_ptr_); + } cuda_ptr_ = memory::Alloc(place_, this->size() * sizeof(T)); } + cuda_size_ = this->size(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto *cuda_ctx = pool.GetByPlace(place_); - - memory::Copy(place_, static_cast(cuda_ptr_), platform::CPUPlace(), + auto *ctx = pool.GetByPlace(place_); + memory::Copy(place_, cuda_ptr_, platform::CPUPlace(), static_cast(this->data()), - this->size() * sizeof(T), cuda_ctx->stream()); - cuda_ctx->Wait(); - - cuda_size_ = this->size(); + this->size() * sizeof(T), ctx->stream()); + ctx->Wait(); #endif } template void Vector::CopyFromCUDA() { #ifdef PADDLE_WITH_CUDA - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto *cuda_ctx = pool.GetByPlace(place_); if (cuda_ptr_ == nullptr) { - LOG(WARNING) << "No uncommited cuda data."; + LOG(WARNING) << "No uncommitted cuda data."; return; } this->resize(cuda_size_); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto *ctx = pool.GetByPlace(place_); memory::Copy(platform::CPUPlace(), static_cast(this->data()), place_, static_cast(cuda_ptr_), this->size() * sizeof(T), - cuda_ctx->stream()); - cuda_ctx->Wait(); - + ctx->stream()); + ctx->Wait(); #endif } template void Vector::CopyToPeer(platform::Place peer_place) { - if (platform::is_cpu_place(peer_place)) { - return; - } #ifdef PADDLE_WITH_CUDA - auto *cuda_ctx = platform::DeviceContextPool::Instance().GetByPlace(place_); - void *peer_cuda_ptr_ = memory::Alloc( + auto *ctx = platform::DeviceContextPool::Instance().GetByPlace(place_); + void *peer_cuda_ptr = memory::Alloc( boost::get(peer_place), this->size() * sizeof(T)); - memory::Copy(boost::get(peer_place), - static_cast(peer_cuda_ptr_), place_, - static_cast(cuda_ptr_), this->size() * sizeof(T), - cuda_ctx->stream()); - cuda_ctx->Wait(); - memory::Free(place_, static_cast(cuda_ptr_)); + memory::Copy(boost::get(peer_place), peer_cuda_ptr, + place_, cuda_ptr_, this->size() * sizeof(T), ctx->stream()); + ctx->Wait(); + + memory::Free(place_, cuda_ptr_); place_ = boost::get(peer_place); - cuda_ptr_ = peer_cuda_ptr_; + cuda_ptr_ = peer_cuda_ptr; #endif }