From 634ec498781d8e630caa0071424812dca5083d91 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 11:35:41 -0800 Subject: [PATCH 01/39] initial gan tutorial --- doc/tutorials/gan/gan.png | Bin 0 -> 33275 bytes doc/tutorials/gan/index_en.md | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 doc/tutorials/gan/gan.png create mode 100644 doc/tutorials/gan/index_en.md diff --git a/doc/tutorials/gan/gan.png b/doc/tutorials/gan/gan.png new file mode 100644 index 0000000000000000000000000000000000000000..001ed6cc19e8911f9b10f63211c9658160b3a06e GIT binary patch literal 33275 zcmeEtS5#ANw=TUClrCMVB2_w}2&jliQ9(d@??sRf37t<`q=S?Y5kWzE4?Xm*^csqQ z)C3R$B#?uC|K8_*Uz~BqIOl>v@~)M&)~xR{pE=h%eO*mjDmE$t0s>mCCl6l`5D<3| z5D;T(3Mx~ z=b-XkGvJt#B!(ss8n~HV%R@oLr*=)|(cim1du?_9b0p&!Ze@+VA<`rt^UQ`gAOMcRP3a-?o`(@N()q4!6yp}r6*@sD}o&+RM3cyOt zeSbo9=6jbz|JF2!tVp*XX}tW`X!kGE6?&@oc?Xgn{*c9TWgPmr5iT~%{O8zP5)K8H zIJu{Od@1!>c$uM8GGjU@TtJRH-?wVRy@jJAJzS{RQ%wN$>#0rT*%v( z=?Vc6x>${#>Vc#u19pFP@0?kW6s@$Zj?{-){av_gDQg(jos2b4Nuv6s3r zF?fOfOik;*s^!}l%AFeD+)LO`mFvnW?yM^+z~^MsCJo^N^-IQb+B>ov2sqav@NMeG zeae6{cx9gIqk07wp%!`49dr8Nglw>S&QsXYhje|#uOlc;gb9OHmlDr3<94bP$V`Hh z%b3CtFgpJeMZA(*v9eW+@N4^UPI%fvpaNvw$%D6oWzslVIaR1&*H(|{_ zEF>PcrIehva_RDIzhXvzfOfofkg2M=tPgvxVM8;GZT-@N;P^wyXg5qyc4ovFq zTyQE6f8n<;0?3X31XZ}A4b$(IJAUTtgI?WlU`I2Wn$zS^jf%>jm_~Z9<&&yDmn*&y z=F{Q*EHR0V_3m{u$vf3%u!p%Z!4EDgU^WIvK-Ar{`+H`O{Q=q_gtRD~|0RPkvWhAK za~znQE-29P$9z>=n?|J8c>1*oZU~ku-8jd|jniV`RQBrVW+GJj;;l6k2 z`KunVJY+V1j&NL!Az3f;BFsJKqzRBK#Mu$()kl$QBj)gGBR}Ra_#MRfspN>Y6STYH z`w#7c#};DcM?EC}YD3tRV>YXyO$whNlBvYIr83g>>J`@%&+%*zJ#abR)!O0FSzw?O zQpa`m^L;Nb0y;CY*wgUeM<&>$0B(6cTkNhD`h@cxbZgBtyfYB~co6t+@C)nrphkm{ z>0_E16GJ~2i#H9Q1Cz9H0sN}b+@=Vip#3-YWU_r|^dRj@Uu?~^_@28P|fnm&}#M?|M!;=^7r?gHCEpi?7Fc&2OKm}#v;r57u#?t*~C}EAw9gG#AAEn_j+*mpqrNZrSeqCq(|Uf@SJq5`Vz};e(MD zPI&);mypC+U>2B9={_L8QO*E-WhQ?rSdy)<-*ir7oMs}|My)h0K4+w-@E?n)+m4k? zCF4Pgv4G2!a{z2j62kn9Lk8Zpj_HUhN&lz{yjQza^~5O{{j&1BoHMj8&GbJmk+>B6 zFvBuP7<&rdXE#wP^6%`OSR;}T4lPl#*K)!}B`3SbUS-c_J>~j0T%#Y)+XdX;Q|{Dz z7Hf%$Tk_kdmp|U?4~_lA4=SV8>zriq_K-VH9QP}2S6}$n9sEBzSvp|$cnqaS0|2B;GH~3wbB1Q(+#Ov32+LM ztd*x*4&inX376^ktXAo#_)MI3W{F1Cf_2B|Yyh@jn`&ho-!I7yiCAFn=ys^AIjsJi*`@yc>;n8=CcH@d$&-&~umm4kQi!mL`w-iyj6u zs|7Wn2=$FM1toHLpVzMmIa^>WZRH(5IGZZsOh>|xFl3TR<(x$TOd*O1o7PjG8C&^B z4%WSV2r+DX`^%W1XP0Q`e*G>NY0d`(+7OM9KfBP(nxWpDig*lBD?Im_29Cc0mn3UM zTnpf~L#BEf<8Rb8quGH|E|@(Y_o9<86X)XMDS4j7u-P4c+b|*mw8h03kU+)vKeA+vZ0mnf%{3yq$j{kx`ZVd+7trW0Zal;BRCgF3@GoTQVqBtP z&{l6hUOCvlk)U%8^{6gbxq^9l&-3a%JNeoP9;Jp_{JEHQ4K`b1L)EF795+jo52s^; ze%LGdG%2x5-}qHEq?xIgHca`b32i9hpOk7+vh8Zk-RsTXSj^Piq(8O4k;x63D zK+@xR8N+4@k9(wohS853+@Q8ufBkKqzo@8ljlJMV;~MrwN5}s~O(`!TDF47lzPtSY zp7sA1{vXsw+D^R#=#zQwU(wULV$3O>AKh0OzAiz2oZ9~l(xS>4AU}}Exli(MRK1wY z$XTspNa2jGU1!wsBe-__r=09xJLeNCy~WbLQm6aH+B9@KFN)@bbA_J>cE;tv`oHV9 z$uU~yJRm^XUzHJZFweIJ3W;E;rty>;&1%`2u-TuBP~v+>43*J?Z1(D=dMM&+tJU$6{^z@CiX3rVS(nzufcW`C?M=Z>%h(mU zSucgMGAd_Fjq^?YHI4#*u~CH`R#Vjo{yeM85&a9*l16gICP7R)^NVNU*S=qyml(uD zk<=NkF8 zbCIMsbgP_Zdy;mRk(ofw$KrT?wF+%nM%sL{?Q8ix%oA|cJsT>VKo#K+B9K{oGI$yTF6dJS)MylNqX}gWOtlQQ4Q_cF(i(RK}^BLOs)#R^DD7gx* z$y_GkibnSN#7IhR2P7&(YNhSN`%|Z%S2B(d<{C6M)R+o(<8s%}j^yP}=JEzwL?tk$+Z3e?9ImigwuA5>Ld?gF9_T+x=ci&a+vOt}WK zRRM+nm5c&+E6gEde=?LaJg`{T?q~r=8BnFtf>AUJ5KIFpRbO@*u2yTWWYZPkHei>o z0_q!LCaFjSIC^^Q(Lt~k&thiBz0i-T$P7BQ9M*{v2$p^m%n+O)E@-a<+C>oxvV_lX zJw8yalp0nN#b!Bh-cUVv_X`m^CzUUHH7$#k4cFx?*rQ9v0+#{zitU+y6#SNf={2G+ ziz>p`W*$#NTy6l*mv(`(>+t^2QvXOrZbU9gzNE_b!t8+fgY}1>UuwU5dMf&7SKR+l z4`NnGKS{wv@{};lIUJ_(2-O9bjPwlILVUF*-!6#jQb&B-irtwRri;LvGtF&|btc4gLE&v1ei5gO|I^*9z16qI>xvq{#(_O*&swa#%)q*W(Vvis+UoHg6cJhRP;)jDU4 zzq~!8>s3qGN!XKUBUYe1vqvE}%2=7^Jng?E_+k735eoifozo`P>4=DxA?M`2tFzv+ zEr}9?NrRu@b$n1cTw%A4T}Fl?;yz3^rW&X2c

3TkgP49@aJ{wQn#Z7- zOt5WUc6R5Pr24IvKidR|SGQO+HtrU!pxl_NUr$M`Z*@N&-{2Oy;@4;SAs%r={;*!O zfmv23!c--E-e2u?YvmwfbynF583Qx7#OEKM^pC=!k23_@X~?C?Ud5g=xzpw?M|E0P zJyW0u1>N2{6b~X@zM|eqkN#c#fKF%%(7*oM@hd;t?jjhto&HTglU6mUyWA7}HQhVS*sWDQAC_~u@_Ttz$2$YP; z;+?YYLMZQ$=T_y%!y4>^<94kl-yP;=pd6>U`;+VR>{9tTV-#?|A=q}{;R4q=zcj$q z9{WAsn+sp%7(Q|y|9tRWh1#{EfB04TLi)YmGsBmf*OE8ZTEhupM`4v)7Gt-lpT&rJ z_Hnr)?<8;3iNZp1&zfg$n@(vy%o`he4+8g|ezXi9gf*`Z^+G7Ekgcl?s#5h}Rn{!} zcJ~=uhUS(-b6QiQ&grN6b+%5JBZWY& z4gDQPrxG)rJvJzIi~OWd?czpBJClP5Y1i`9f)$C`#j{D)CEoaLND6R{Z`>Kmukx(+ z*Y;>rTg zW29;08#4#b#|ZGpy-C2+f)e;Vm@bm|smAv!lZ8>?0${lz*`o|WP>v52*{hi`)LMrS zOJ>acSBp!-mW0I(wCP4Pkw>wyGEb~Ji!EXiC)o?oSJA#Qu*sI6`9_HY$s0yS>N%Wm z_bkVV?i;0*gMkL+?8U2@Xi#i~g2TIm@miW^+qus{aKhX)4d|71g_WX>;U+w^i0@xU zv_1!^2XMzMv!>$&N=&i+MR({s?$$1{C}XbgBv(|ydgv2V|KYl1Z<{~J=+pk^hqFsa!8n8r z65(UN(nGBQ#k9qu1iPx`{qOHykvQtnrem3-wJRbX5r1SW51V2^h+{M$YG@nH`qJg~ zJ44?rqENz!&yl<+$Rq=onIHH;+d+L3O!z1=0wj84ZOSpaRc zrzl!{LM#N!r}FCjZt}*0Sj5_DGIq!}24Am!Y@Lji z%cJGh3wI7iWH{2RJPTL2Vm`BbF@5Qi+0p;Qdcc$L>wVJYq@cR*|6mI|?avxRe_ok5 z{*;*Qli2^!Zw?Kmg?NV1ncBIxVT#emGs)OZok&Pqlz31_N(uZsgQ-S5YrK!vz<*4= z_T3ex!EO9LaLcEC&I@twh3bTjee*;_ zXoaJ4;PzpX zzgQT({f+k%?HH(R1aH4sx9ml0@P&Y>oLCovR0+7nOc#|N`{128%!Wy*hUUj(W?bD! ze66%`2(bak37b%n2l<=Ms1|iPG_p~p$`xcSN~&8b>)Hs5UG)u7h(-{~4*Jf|L7*dU z^Dp1P)JS=vu^_tqq-;!;Ut^)JT;@F80R6Z4HCO@cG0%f8RVXg(kst?J`8OsdAqO*0 zUW6aZJh@hmuvf8e#DY{qA6;w`(@LA{c5@J!kCU z-_A>YsgA34?~}7PQo~5P&9Rs@;)0#T@n7n*HH^|8R6VFCexK&2uKC{Y|8n~KDbK=~ zM*b2IDWB5*{-+j*dQrQ-ZA(L=_Q_cIrI{CkuFupGZ zwb|tDGl2A6OjI^;3THYM@xT1te9@x+OM}3mKj*Lhrw5gmI0SoGq#roKq~B~=UHHO! z<-ZQBF|AHo+**jOkUls3=dbsFG9Pu+kpXYR3XQq z)rHA^9-25ZBf}`(gV-P9p7?bpom0v`?Jr5WB(N(P$q!_^E0e%MOukR2$4!u#C_G!G^S8gq+6Dw9 z;iy1nG_eMMo1^EK!A=u5RYIwCl?qpS|LtCW6bUBw+a*QSM|!CKy;5jB@NGV^&pq{T zL_s}Se-+PIwB=%qBMO-yb$b7Y?wdLW7%Ko&u#Hx|J~#i+xp#-l01}lPE#rVx%ew>l zxwKF)5rd~l*5SWwZ*-H3q=+*uv4bFxUW*`;F1BdNdZh&i`IjlNpAV5|WC!pDb*fcd zKYheI`O`%HHxp~9+kf+!I`WK)jM{#TDIYn#857F0|9AU!H1hSMYewdi)6`hZBgC<) z?)L~Pr&AyF;hlg2P7!(G?99i)INcCZu^^_TD~7mlPyD3k=XCPPxp6f9GBG3HQaKZA zU4^R%J@72eDl|ISYr554?;bSrF+>nZweNBzN}h8X?rjg7HyTKC*fRJEC~M=vum1!R zS{&b8RJh3nr#)q-h0un#4k0|C8sDo1+Mc~wt@aZY$)|yigfqM*QtgU`cHR{f0N&qE zU~Z*sMMZ|cT>XVvjSMb044QS9`UK9{`}N6OW1Kp}g6ySmhmYt=ebxB7^{-%NgOjoB zPb78iln%>ebbx50puBHKLY9>1c7OGnca<0oi8uG9k=(P~jS4lLumKh2FX%7id(?|p zQZ&s>&3k|unb_E;>dlsHch$fugjh4;Vsm2rbi@*z$Xx24Ho;OVC#*L97ho#P0fNx# z*c0#y^)0Etc@@BR+NLx|_*8f=Lpd`U_tmT4hp6SJbcq~AkX)E%*d0#+tih)|?PNbM zMX!}tPV={9l?X3WIMe*7CZ_HA+VzIRON|IbQ#g~;6#bx7sD$u(Z7Zm))jZFvt#Nf> zib2*TF0h(czp;hC!+xc5eYKV!eJ1q)VF!(=J^l(M1Gkg-x^}cRJgC5?awb#NoG`$& zv>Vr-G&alX1FvTY?mDS9vKe1UOSf@MF9G;bI)3`}Xc8QjDFJkAb2ug)rIsgGFk&Zk z)AeL=a6IJs9<2Ut1X4HZR-V<58ssh#BYSvT9Jf58{tnG%b(Nh~WnLvb zY4*OVbORhpO;(wLc2{HTroNb=cQX%NTNXXZ4HDPpjiQE6#%(y>OhNtU^BB{scq6&%>qZS= zSR)Ti)M);u0BhX=ZY1STaW2mShmCE%rBX*`aV4XThm8y2o;^!4*yhx)KCks%|~l z@LWw4K3sSPXMK$$M}poT2bO=OoQgw;OWAhQpE?HmYC9=VE?@C)NiOUI+GXj|jg%-+ zbi+-SBGj?!smK)=NBP38pnAHj1b?{C6`W0Jl5Vs~%J0(S+Y$5bsL;&If~{7-E?4-v zrIz2XALabtq!)d~l~ity`Wrg+NF1=P+;cL$^T%qNE7Ve;q>8kv4p?oWdkFU_Ueuuc zNI7JV>cb@w`&G=BF@zSTW3961rY23)hj{CT?PS{}469O=X3645$6mb7PAFkbLq{HW zM-;czj)gnzeskA3st>Z?daSXqzZ^~t*T{>s-hiN2-=H_;U>6!Yh|PL^KH)htWzL;; zt|rvckyqD&{#pR*CCOsrU4xk~_i#pxU)}DM7h2x2HGIJnDsAR-XK$J7YUh1Z6;X=No3gUMJ6-&XkAS7?(jrZ7sY8$g+DPE_8EU?`{KoQzfug{iTt|`lgeuc{{`Vd)50ZJZ@K%*CpUyM>0=GxE9$%hXX;m4VDX(vRxK7{is0y9q3$h!s zKB*UPC!;Yv=iHiu@z5^jDn(}!1CYFHP8>_;q4moI6ie99orO;_ zVZjIgEXuuodV09jMxnTP6MoQL0#C9PdnZy6OmSdNFWe3!ea)HTo2v-vG!>5-L@5#O zwvXH{=UzQhdv76#^%EWHoD)}{i?yBMW~pP{=n851;e9OSH5I4dG{f^tQps<@Pg5Ol z13OxfLqs?$aNG6<>+|+lKFr-E30P$=2{x)mWccauOF@fFwivHn-jTcH#HcMc*`UM4 zFHAxFdXnm{xIhxMyWABJ1Yh5ctryhfuUC@`sCF;Gbw4^ZauD;U;9xy1J1nK{pU#H# zRcL#XkVo-<#mmN(;n&yukKXjGL#A%;Y<8w`Oba&`(UX1*m}Hrt9v#FQgWGQ~t$*BG zfZnP*?eNd3@x zCiN3IVr-71M(a6ideh|$wO#4e)&uC-iA+9S{QB?yW^V8rz9#*?_R82x`gSqu^4I`R zBSkEc(A@pQs`@!^?lM-|d!>jU2nY&+PmA2HNr80psG68w@QBz6zjaSLS_+VSjoq}~X7B|v;Vu?mc7c0&7)pXphY$pfSfK$5Y}BPM& zm5GrRV~@6|kNx-Hr*~{!{68metaMCtvAN~sn}yx$m{F9jZ9QS7h|!pb&0)^{t)1ZZ zax0!`k$J*|`2iF-1#yD|N#41e&W^ti11mj@d;K;dSAI2I11N*D)&^BKt`Ak_7%O?Z zkgMKQKBin!Ikg-|jSWae*jlE>z$5)S4(Ue5!Cb=Jn{aUY;e$l|^Vk~Ln`%8Od7eF! zB|~2&dNyeC3=Fwsz^h(jh<5EhkI*nR)y?|A9~y*`_L6HlyakYx=hN8^R5$ z{^Vvk<{i2xH0V!{y?TeoV@6(v)!C!ZBhQds9>@-tVOD$rix4Kg4 zx}2e%EA|6PLWp7Z)qYC!UX9o9If;`69OSO^40?nVwqI?3J1Yw-$weEiy4PZYo;`kW zxQ87MrH;VYBLk-_0N8yNVW^KH?%ek@R2EMK=FV$hX9g=&EBQI?`MEN8!K8e+P-htR zg~%GM7useNtqA6>olsU>=(D~0Qh^6sK#h$%!Cfl;8>p6-(Ue{sIjHdMSo;t@tvOao zWK65uVQKk;+W~$e70i#6Eshr~|2%5~@13ES)CgQR$u}(d=b5pyC&B@J6;$s2Ei^w& z)#pvEq3_0P`G32RA=4s6RUJ6j`#)ELXKRt{nH{(LI^WMr_*0h2b8h-aRVA)`p&ei0 zDDe>xQE|cMFPI`9W?&SUJzOYWjz#o%B2qM0kglsa0$v)vLVg6fP14}LXFxh#02~`M z^9H@KUU|hm8zNRV&(JncJ~$orP3BtI0%zF1rAibgwyXD|@|@hbp6CiNBzF*Z@dI`! zhrX~eZlRJ@AsvE-b%qu^S2bol?de;2{@zdS>0i%ElKw=)k#nAj$hgOI`NdB2dL-Gv zRQ^Q#tibT`VJ$c;@9Z$*gAp-Z)Zl(T31DQwnDmSDq(Q^0r7+7>PD7jJZ!}w1sa2dJ zq<-3d4}X+zWVFJ-BmFbKyd3OuzGOq?!hS$HeiBv&pSg?`BN`rAVH7Hxb$iR9s^NtM zUmtQ-{!9Wk2*5iFn#WI&<884H4k4`Xx2%2M9;>0>7@wcHhP&D}w8BJ`S`@F0MUx1) z0O@ddta?poFNB&*13zrO;U8IrQO~LD86N%ew4MvZt(Z0snz1oj=UkDuu*kwo^4&FT zsUx|Ni)7J@AOxNte<>>MLT+^y#v(9GtK)PjoX=wvzgZ6p2Utns6zO+ReAjK&_ML&G z2lKv{f$Rp#s>0_Zo%3S_9^hGNWp8kgs6>N6?unilQGh)by8Z><(a*r(X?bzlz0m9P zfF*!GJpEJb@35rxYR{MZPFeZ!{%j3xw4Xn#&5v2At0w#Nf+b2O2;Em2XB} zSrt}~^|EG(KYT`TU3X|SV8$LsGOjc}&`8RO6_lm*lwHZmoyjj0*v#1K4IBT3Mbem% zn@ygBH61m{*bkoXz6Z*SI8ibPAZpjG9?6t^r*)BzsZWHd!y}q>sRcVrRIPRSv%snb z?aVHhWZ$5XimGZ=92WF#n~$>QZwYXd9&^XMU@F)oaZ)DPX(MoETJw$4+K`f+0L3~4 z+q;Q_NI`St0w+-~APM>>VQh0D!HsPcZl;z6 zYyA3BK1B!x|E`ijiEo@~J2Z({jUn!WWdvi%)bT3D0_{(ig%y!*VrDyul_kFY?eSG# zCRJoQ1mQWgifXXhrg8$wOaGz@t|$B9eSpUpRaV*e`VIIOtkPC3xGS-I9*M0g?ual2Gc}dEyUH(hYGcS1k84v&^dOS5zMy zb$J(*xIm&rg$|u@_h7xMUQWJFl7fXGn^^;ROojRYkXM7E!ZTT6DcDzsRe}2o#}%P9 zZo?}b!1)26f5NzlqAtXCOm2ly>NhBJco9oyk#VqNYa_L-f9Sxxj~96M6D!F~t!*4| z+YK5u!31RO_!_i+NieQh@<;hjpdSzlw96NspzPBJ`gHi>H#0XG!bMDSK z@2a1*vL{ejY3Yb&0|T30$+twMCpM5LTA zeDvg1MrUa;5xXkUcrfFsIL|RE@G*nYLeaP^%a)h9BTpw2CY~c-(zP*EYQ?~cQlCZ$ z>Vpi14kJj(cCN_e2;@WvriqFW!czFQ@+-RQ*%9Ml=`iJk=$deU2)Aq{`w`+`jbL;SrBlBTyta31;KXCA>DZdO}7VQ0|HdXi2S7R>`{2M12ajb5H# zM#9)C9*P4t1`>8q!XEbbxdLY4bWZWkq~{W)8rnOaM@W(%t76o`MpCM|dC8^|Bt`)U z25!xkn{klf>-y)xZ|it|Nh@L7(miP++Mo8RTpxB4nSp=yi6vU1&J#tS#1hW;5%3f$~XR`c2~Icp!o zl(CQ$?vixgZjU`xM*B{`(M7R+RmBgol7HYLsgN5xM^!Xma~`{;>wUli=NoAk{8&ld z3(-l?RCi@NQl)=Z?z7Dvh&v?zdHX7+|I)Va5$SW0YS2Ws+PMD(cwhMjdq240w#_<+>C|JuVrsiJsvyefTppp5mpG7)18FC^xaPdIRb~fILr2ot zeN#<;6<%hmd>nd!--`_bmRA}_j^L#_2+&4$i7rMy)q=JUi8J{ zlIon}ZT@QyiC960pK}-M^L6OuCC{p+VD|ETh(ff^yHjT^p5wiygOoif(HiFTDE~90 zTII>sqGe%~CO?RVBEm487dyt448NMt>u1vlAfi$88n4dn`_f=}19xI&)8|%wo1z|< zcG9>?iB_X#P>t0>cDH4`18rc(Nxi-eTwlW_}iyo)@7THWR{3Lav%Ki#q1A z1}7b-5MVq~&!9`Y_|bXrP=%V;rPR9o=O3bi%{~_^TS3mr~=~ z5q%D=WKCLOw<(gYMX3_YyM@Q}ORB9&A~~*teFD}lKQgR*yQZZ^C_|E4_m3}{s%#;; zVPI}%aDp{{6E&7Xi7vNy)C!YRzL8EKh{h+==I~-e%=+Hda5| z`rw`Zh}GQz^skulE1<)c(z2ne`hp=QDww4!(-ZLzVr2va`^gErm=TXv-HILz9qe?4 z*oAbtFR)x`0fh{}eq-Z)cUPuu(1FCPfr>P*X@f&JzT&Ov&JL!`EIt(%xK0AcxPY8( z;2R(XMwL05R(zCA_PPc3ewsPQT-&Ic*Mr%##B#?@_~mNxXY`1xm)fLswolubLYbwo zJ#KOLQNSe^s~=qpmUNHvba+73wm@fA1~qImIpJR%pp6t z8)hUPIwi60IPV?cazDg^xXqb>4#YBdFh?dKFd8g?I=fqI-cF`;L>}^z4b;qJ;dW!L zUJrb>l{(x7L(jh-7c@J&nu+P4e*Urz)lPp=7QZ-CK^QUzPf|rxLA^Wa^SdmYB z{qDVH@BPE%`m*Mw8{SO&xq4iW$pP)$*>?e661)^*m!cG(m+7%trG1Z+-b}TcEa1=< zb~&P%TOneZms}+4+N5e|_vvOG(TD<^CG0RsRmy+HdD|%Ya5#svcvVJC!uR_l?WYssiEaxgqdc<)!me<8aT2x5 zmvCUB>^}90c1U^Qy(1QKP{%d)d z<8*zGX-AE<|Agi;jatzjBzRd`USL|?a@*?!RB1F@*LHD}LhQ~P?&xJhO0UZR`JL3V zk-?;h3V4)xX|x13CpR{eUb3FND|~|H`hqtQE2kCe8**IlmQ-80 zm6Ql~@|fXmN3Z3vd-~@PuJiZ?%gJC7OCV=lud!P5TC{}H4p<^`c_Lft4<>puaiwk!ZF)N}@tULT14PAse5DJ5F`F5aHRDM#KWjiEt>%}K-IwR6 zjGEsUzc6^gurfB5B*J~Jg{wc|3Y#bW{bQfm+EE#{nWQk&YyI-st9F7;&v%$_i;C4D zLwvJnq@Rk;wWh{Att}tBms9uyVs?0`4=^m) zt{QMuoY82wH`AIN>cTUL*TJ>@F>K#-0a3DuyPUek1qiO)*g36SsA>AeU1WYsUWw(N zlK|tIzrNxqeBJeNE8;M2`~9i)Uj4~Oi$~R)<+vVSA5x)uc~Iy=B^dw8l1hz>;hs;IF*7IAeJnOT-TI`sGDNs6hV(<& z4Nut_!8D3&7el5(>sl*$csN)WRWpysLEImNy786iJ(w+>n;#~6-R)tz7S?}sp>`g>`_GrT8KYmJgVMB zE0whnz4DE<_ebmituD!gq8!`SaClfN7qRzQUt2Hr10qWF@XOUI z|JC!8O8!>@hV3h3_RG=e?)$&M3}uTIE07a)aF#b^0>JcFA%(fS(#B=*)-{TCr z^V?M*f(aDKsA!gt2$<18Bnwu{Fb`JQJ5gMGD=ZDBo~@^_@Ic0L^~IHlC+{pKhhl3( zrIuOXe#Xm5M(cOau0(%TiqxVqRlHa`>d+k?UIzgsH`hne)g{0%-xwmtjmyPdx7ax; z!%;H&T<2=j(g;E%?!`%?NZ=5A(c%J<=JI!RdM}Y9Jb|H-l z*F0?W%cIW^z_h0E(Qz{LXVZ;?_H#3ym!x8FdNvTn7-OU;tq+$J5)=_U^kAQ5^}E{h z>&ZajDOOMu%W}fu;=+5uf~DvBt|Yjl*Lzp(b8rYrM--=vvmwqI4H0p$a0IKYi_4Qs73;pqoIh;FT?}b0Av=gX}4l^#G#EhPT83h zrdR1YzJ$VhR`rVr)8^=&0wR`>qFKAu%I+@bcm{Htc{Z8W;>m`Nx_61@AtX2a{P%jc z7$BuD;=31~4j9x1*#&A4NpaTe4&mRp>cupK3%YiO@vbzN!2f~Bgcmz>;j$|iEN)@r zh6QXoKPGY$%=Uvt*x&n`A?7_&FYb5Eyrq zXROGt+wau~CwdaL<)3J_-`MvJ?aeR5#5?!FIP9f!Ts|46USyJ|-Qx3Y9||RQSs5JI zs6+A*%x_at{M`j);OEu&Z=&jFiE|R%6$udCqXz8DlDQhkvdf+)})++*}0dWHS`lat-!>Uxv zFq3`i&8+V`&%zxoVh*2|egsKezi-idTSCdL>T9^8y+FHUnMb#-br9eli*CYef}+kg zzOuRT659>iRqij9!uU6Kf!*tK4anbwZej&L<;;@bcy+rO8<2aL-I&LpyfDr?fAp%y zBRRB9Rrv5Q-9NIBnW5z{=lb=Tj!*4Wn^}g1S8bP9`?=T3iM5olZkyV)AY`?-(0@>grk@Rm47(JJWAWQk7X8ePi zC-08;PrFMnOs|GF6=pO?9pEm$lxB>lfHAG?(y!%b_p8m1HxBz)t5ZXKu6zMw4600Z zAKjKRwTx>NlQ&qt3N9PXn=rx|UdN8$11}ivFek;!+5*RDw4k&N$_!1OkMIdl{-+?7fIs1*I7?mnV}+@}3q?z^p+s)aj@;?g+OG z6{J4pMGXIj(J7&rW(rlTVVpVgk9>4I8?|B-*0mb+Cb;N8HF5`PaPntMq0dheqj z1L(E)dwugMeV94n|JB`l2SxQo`<_jftmNDVKqL!DPQnk7j7U;KgMegc$wbjEcOrE+AKWz?r;q;l@{GE;(^C&UwMH=?K8*A^`1N`%ja>rnz5 zxR*n34Bj`+9Kc&F4oE6jC5$1|1y01`%3DRQKdvN**0jS1*)4Ep!g=Jghh)b+>m`f~ z0eBzG!*N}07}fUe^U6S4CuR~>la7qJ#d*qol}X+yc9Co?K`GYFAhzisLp7$KMB)?? zTFAEdu9m}Ly3Qoc9G`FR1QIl3Zt=e$VO>L1MWY7$pcbRAAl?CKm~XCeX|H^@-6Yp# z&aCkGXUY0swq?v%`l#01a_46gYau}yXNsB9F$%HB$gBdu{AW91Bhz@_v9x7zc(T7HH0%bNJ=b6Q_MkQo`7i&%tv1+VbXxx z#x=Xq{E$bq$rZr@u83(B;l&AEvn|8nd)xhS<_jH4(VQUq!+x5Uv?5W(yz$X^e?z`2Kq+KzSkBsBFZ)&4{ZOTSJukCDaG(usTkwoT0T zWzWL95hUWvYTgU(BA**>W`2tYiTl@O+=B(+w)T`WWi_`1ufSMwS~R8Khw&}dj;+NHwYVr3)n*5cX{z@%Lrb+^1&03LMB~paq(< z8!o%HBy0dbRu+MZh*9&~1_?mLaAjQ%IryQn|Bugdk*XgWLV+AqN7E;8?(oCpSzCTM zFnQl&%S41NVkko5r*S4C?foFOFH{3Q)d*kGX6gOn1yo91jovk^eyeSJ0TQ*mZkOSGhEM`V$X;mNSF{~ zdjfTvE5fqVxX70~<4oFgk`6uSpR&61!P3#{?%vHPHAlhcP_k)dd8GXkaKh80&J*v+pF2y+2?h4!py)D7MD^K|PIAWsiw60766t(V` z7?Yx5D46rf5dWPfUWQ8YihmQ6=6tqHCe=1)h1`WR{Whc5oP;@fC&wVh=e&y9m|}EN z9^qQgs6LP99vd~Ceg8L`fn6q)oUMki{H>)7mRY0JA(*&KC^Y>ow+w8Bj3X>arlTjtVzH!hJFqB_UaX$wJ3CE`Yo9Y8=hdgbL+p?4PrO{JzKZBVlOxpvg(uV2{4WB`T;UQ9M?uoeTca_>f$N`#b_G(D*%p6CR=ctkwhw{%460 z>(#xZtJ^EIa(Rb82%;`C5J#%%7DWMs1S0O6RS&p)G8eEQ{ z<+c@-+3lf54KC+AiQ#;EK{9;ToqOqA6%mF}u2|e(t@R$c`gkttj_%1!SnOKQ(TALs zv9bk{mNq2EOq4sPlfuq#%w={Xb|#eL(J$gKKk1=rYYhQ!{JU5ou`|@K$0!`XJ7>P* zhF+!)pUUziM0e9$y)fk{pTFU1Q~vJws^N!h4y#x;J~*2~i+j!9N995DU-bt2X4v7d zr@<>{@HwkXY$Lg}6M4qRHnIcd0_DL%i<2hZ*OpkX?0j3<&k1vkP;ZH-2Kt;MzhPgz z{XlVNczCDi;%_!suR?6tGLnS5+x6%Wbk-;xqY*YYy?*(UV4$9yWfj_T|4~Fin>Sdg zTku^>@iu3`A9;dWZ{1p1&r02r+VGVmQy@Ophu(kxW(F(T1)VYhs!)l2V>~L;D0E3D z&r5wzAs87HvYa=4I%v6i*$ED>mAN8!6$-axM?sf@NuL}$fO|vN`$yFQ`y?w_)m%wH2j_Kzq_CnogBFod7RaFL*k>Lrvb79o z;QT#+`uBZ&KBbdxqfnQLc1-G4!|YqR*;&>RH=lLv@cUuQ84Nt?wXDF@NzH$&>aWebz2BCT9q}gf3&k0Zv=#^0lL#b#OM~NBD}?AcP!l^ z&y}N>@h=fc?1o?FT1LxaQq%3Cr1iz6V)jR7K0BFHbqa;r*hcZKyZzm^N7y$rjk!Bt zGu>m4&dd+C)a6J|>!_39*jwVKT3D8tcj~1w0nG57_-7>?S2%*cC{vw~a&yASIU@h$ znJm5e?`MqMvkbh9O%i)n^j=${LCIOL?Wg)oW9O`EHN97oFtTxMOYRUY;HguuW!TDj z3o-uWD{+KvTC9>mjj=$LYg2g&Y-TO|`dl!Bp;4cBUhMG9aWeC&OKUS81!WVLhSXT)B zZ4yj^;DrwY+eNTuv-7ufvW-`!#P&M23;A#L-I_hzmq_$ZpmyqO za+`atjr_m-706DCRxa=R^M*ux6W+m&C!u4MNlAq$Jyjv+u)3z=2 znRZUhyO8nE7l(NP`VUQ84@(>Wq`P?_u*HlUsa`ZhZ9d#5f*MHk-!Kt#*k~WVakk2C z=l9m`Dn}z{<|y7OQ;$HpE}R&nwCb39OsZSTIf{EYWt-Zf(jQEz`0mVk{KNgFWY+`J z7|3SyF#3&a#{);2y_r%1)UumzGG^j-P59&Ii)UKU&EL8gbEa{(A4FCRMFo{8o^E~u z@fEys%!Rm1{w6f8VjeN%mbv?&hR8GbsA5YAM3+wPq)Fu)<_v~pta#(iClJ!6niv)ju>mXv&kCuq;yzb?CJj8$X0*DOUY>xa zxq4>9Y|^Xx?Y7g|(wZru*R!gMfgu5%5=HO?%gYSnby@2Cp6b*QwAi|&OvAdBb`+> zc*O}%WgzO6&;7yAml=RNT~JB(mXSXAHp4*fEL2Zq7^j7vDu``vA-1Qaphn^5P-znu z(MUdZUo_Hr*v_^7Ggk)A=Cb=&d6_l)r&^g=6N)Tk+0HTaBaYBzmx zS0aIiLd;XU3+mr6aqNW1wI$yPYw2B1V-p3hITLut$b9Z$3xrc_<*G!2`sFq)Z}v}u z%f7{erL}t&Z?~)=Ntuw{Y&@#xr!HrE%JoOib`L@5CYra@AuZtWIr*s1^L;6IOL0_} zTcnU?koO?LCPMYJy_e{_`e(iF`5lWzp<#?~kqju7gCOOSNUJq$u21n|y*E}@>V-eQ z{yFc|GoJeS27r$YFTJ;+N;un3*~Y9K`jz>Hn@NORC3dnCs4VH zU}UKHrHN08!0f?I|4Jt9CJOPEkK09wm8P0W&uMHfrRg1sz2zxz#EQRM0qNQIfPiC@ zuJtQt494NA$14s?xH@Cwlp=d$SDt*=Uo@|I#nFy!zFw;e$?L5hKqX&K$goST`ow6S z`30LNIUQW#wA=0{8?j)dXvh5ae%tAOTP^vi+8Rfghz`1sC-4u?dJ~W0}rZ;Ry zPp_sQ`}BO&gL_t~D{B2n>XX+eK;|t@!aELxPWB}U^inC#n4D`^jEyM*Ai1jLFLLod zGu`tMj-M~V?tW6A-hi6}O|XGU=HaC94jcBxyxm^;<+Fk;%2R`=nMUgc7c6AB*yfd0 z*GQhos3ZOkjX!bys6-oSZ>x0gw@tkwABy37LCbW;e`NsM4lVds1sja)>_MSPBpnlV z?I@Q0xe4JyaVJ@@!yZ*xv@$X!=huwxSCjEedt{qq+n2n>RhNAg`cHloiBpu)a-j9N zFss&x<5Sdt+VIk3Okii;eDE_bAcE4V$W$yn=8N91p4#88`0L#+CIP& za-%S&F^#d!vZDPR5Q=@2d-0s;IJ$-Qr7(*FB>4{mZ$GdMkn92wgfAVxTVPOS(oZ#a zudH@N)T@x^NwL7qh6-KYL6^T1Cyyrwk?|WT8mDN~`l1ZYX z{!5SQAk4YE4n#&@FKw57rZ-Ech|aohYCJxO@GjPDIr;Yk9&Ln^D5ad zjF^uj)xZT8pxt`8&weZeaxv2vUj7SDw%iP&wk?f}4kUif4M=`#%-ktFiirl-83%m+ zXyEk{3l)N0E22(^01&a>=EWhOkDq9BpmF<$-;MwbSREe{#z`Nzz)qqXNTeVbsmyyV zS9K+K?0{X_Fla|HlmUUo4y=B6w=q)wZeWkr8dR4++SMJP2J+32w;&y?)QDUI3#h8( zNW?cU`>}Q96Kj$?pgX=<(47@3T>RK+WM*~pzWU~K=vKNl>b}lmDzkfT+|2OE~ z_Kek+ca0xOE>{;A0Fp+|ulrB`AkS{XIKQO*$9yAr_RZB8FHV18a<`tXwFHo)ByoO) z?*Ab({0H1HA3H_pq~-;bS{=CVzN=GBqPNQ=M}@A}ezm^T>wR5cwJlQHix9@i^+)}( z+B;1oj0FT7chvNjGJxwN+IIbSDlaUIf8OF%Ao#+lG`%59#n5(f)m|KJq49#}IZfX- z-fxrYJ#jC+(OBEOzOj(!%cl!a^3pWwbZN!ZUERj}bO8zt69%yFj(_+xGVxO;i)sBi z8%W)QTSeadi8!{Apol_}j?Hpir`NBnv??gP2f{{dmA@M0Oo!-Jb0xYGs^HgK$nr}H zIKtt49anGLbJ>+Kmv|$B(b#PRt|!?0?O{m8q}RGt)cOIG4kw{xTsf1sC9UKKnkA-; zUckPW9LfO%FRsVt^8~V3Gs$MZp&bs)jdD3IE17w};2AKvf`8`Zn{LCXadMH`I{v87 zNi%vUlK1H*uTxT1hKQ5>)Nn@Q-qRDyH^zg5F7V4q1+--pC3dgFt%Ljxz$Wyx; zA_c9C4jhV4c!f5GqBb5F;h2D5zP63{+t46iHO+B;g(Q?*Z8LAQlF9#Mw^+@w%BI)qvJ*b2X+ekF%iDP_PRKw!pmIr0uIj%7I46& z)12K1{^~73rn`Z?~FJL~UV=z$Ytp*!K^AN=&`>`C?z>^E2q< z{Jp><5#`)I(R0apj>;NNr)m7LIPx9y^C>Uc?rIB1*2!kDeuLC^zw?zHA58$`ddLh6 zda`eW&-TXUnz<^4G4do3ifr@bqe+vvV1@F~+&Dzkm<3Ix|IFO#&apzc1ugdEHiaYM zo@ZFEZO4)~f+gQMds!Vucs*&;JETwilguZ9Y|tsAlB#1Ky{Vl}N-_6l{qRHz+65Q} zcimNy?A}glur^=-=648eK2lUMM>c3Q=!CpK*aDkTP9O&w;bS~@%}ksA`1H0N%_fVh zXd-u(3OLpJkAGWyt?Xtwyx^p~l~;EW*$dZ=k(yE%7|v;a0}E#?o?Ool;;79(L=A9-^=7{Z1ViHk9PTV zzmN{!b({&B=PBa5ucl{sP3hOO94FHzH#y$yTo2JYNOzqFM<#uMD15fE!)&zbI(P!d zYOUdxMsx!YV!qy-E|_L;vfGI+sCjYDLQPlKNDG!<`u73SrU22o2v5_6 zu|entYW)^;WN<33<_b7fw5;nGosK^-sqMfd=NhN@BxTl1+|mL?~E!dR2D(%KSaQ zL?*BxlTU0D-Ipa%iB_x|uyrq4hf35=m&C6kk`Z5PP@9m&AILk-bix$cEkaduAr(q9 zj;P{d=ppXor7)ZGA)neb9smG_R>cFxkMTw>DW?lh|wHLkse?33*M{l&nlKO@(bo&A$A7ZnbV~E)9ZfK_Umtb+w`qwN7 zxxTMsU*s7u^l(S?=98GvFO?;1XRr>KNB)3QT zVHY#Ly=PUNy*tZ*3`uRia#Ex2E_cp9<6ix$h`yT219Bdj8^!v;pM!o#t_6M+w~)C~ zdJ5C~(=uuw7yhhjS%0h+PPGT^e{3{ZBF$gm(Es`edVR(-42G0LXQg~e+jc{UCp@*ur{^e&5B<#o9H(uhiCIp~ioY#g#KK}PrE~aV<7aUXC4we0>;>ABt`S38K zVR*LfeZPBS^P;y8T6%4#9y*h$OI)d~k30i>w{GaN2n}5i& zytiHFfh~l#mBmtbZ>%v)FZw@9ZwwE6Wq;nskQvX?8uCXroIbuXxi033MDg!m@(=RL^)NvH`WxU7k2$% znN-j9j_KUWw>mrl0*RO4#B=he`!(|IxXMPkQg%v}iV(f(T#rXNK2+@4gV-0hB=7NO zbyhYD(b-ed2L&%N5)4{Vf7{eHenkKNdvKMiQfe5@4WF4eLLX*ZUK5v2;;TG)9FeA5 z#an6A4%r**w>;rAeniTv6*?k%8y7a|{EMaIn~=4Fc@wZ8@Lh(95P@qs9}xdB(D2(S z&MqYC7iG@2Y6dc|@FCVt3^+PLFyZcv6BQ5ynGqWiEXc7=)oL1?RnfB5QtMKD0%c=~8S8?H7T?U_ z4UR0vCqb1mD4^J}N+WF&)OzclG_o~0IusP>Nzaf3Kl&AS8zVCwSRn*u$M{+mam_XOx(Yw*f}j#H|N9Z zVNSdto9=EB+?%5Ot59jRK}Rvuk!H)}3;TdCvHCW9LlC>Jw17UxzHo-GGTO=vcVBjd zqY)jdlQqz~b3)RNxL37HtrILGM_Kk7VMT4DV3@Lw%Dk()9a`!+_X{;QuyX{DN}2BF zSRNBXHPgDan*ZW^jQH}-Y{t1D0HoCz*~b_CSZV?AvCD*CBaHNW=GyjK;|IiD=}~4owH_H-y+{@ z2J-1d-Sbl%47xibe@Gs)4Z}TzH^_>dmq9t%#Ob}79#+jor6{h8p}>dvd-~hA%1^QUagf#!>W(OUS}%NyHk59?Ne%4gq?>@UfiSbBam;P zJi1DW%{}3`1pu7XIBV(og zG(F=ZKEO=HrxTnDN5CO%3(A1R%z;MBRk=xZBtKyy3I!=CJF zEOa$&F{-O_6h{ zt2i1~zh861Cnjfd+q)Oh32xv#&IfzfBqTi=PHCdIyBdJQmUpipA!no8R}#gRqmA7R zQV4x(>1}c4%dlxq!>g(jLToHT<|T7`%b36j&|Al2#3_Q8}}u+Qt!`MSPaKR=?i zirD0dSaNKk17cN-=;Awzu$^U>S6TMrf^u$8a%ne< zFvAd;$$-PTobDp}Y{Gj?%nnY~70CtPJC;j;?rmkZpO!257Ic=@(k{3@0Ea$OlzfAkH!|FS*`D-?ARb+7+< z+*9mJ&97`PT>My^8u-zL9tlOuKV-V^KxZFS7Ae_H?AJj1gx+-nMuy~Hb4L|X_l#WV zqy@Y=9*&#okXUFd_(j9&aPD*F#cpaq{4xOaTkZ}{f#*Vm4!K%g^yuRLym-^s_6CE+ zvhA%B6|I)6b0X4odBG~v0}GxDC#+6AyuX{{sJjrgo}#>}(A1Vo+FJ1f_7~a5yYf=@ z#;+#Dl5g>t?wC-k*pQ_#R6{io=fvq`Wm0eRP=A_SIw>;H(Nf9i2JxC)!Qlz5t4TAc z@FF=QBOo9?`))rUF{eEGbFFWjqfLh6x!dV5+LpS*9Ly=6#c7ZEoBv5~h$RrTtPuJT z++o(T7+qw9HI)VlDv%ar5>hIV?m&)c*=DJ`3D!{=CJdHuHCxh<=mI@vp18mS#V|-UK+nNvs5Y~A3EJXAr)VYNY+2`-k($dYl zB0}3k9TcvPM3_&sr&god{BjN@_WK)hYr9RUROx$h4_{`0aG&Q7{47X$4jIJGn9u=) zUNRi-p#j_90%I;b8}?{8)zsFmvzYAslS~!tecQ@rnd-$>A!Wo;Q$*jy z{EdpChzB*w7~3zSXDolC;W9enj<+IBaxkv%+dLHx_KJNXz?i=g6BGyD2` zjoSgDrhzs97qzgh)HJ3#(A>J=hmqpeim8m69`QBnQ2$>800!kxMb-cM_ww;VF0!Pu z@!s~WBs#`SyydH4za`l#lO3D0*RDPr5J`Tx2FD%<+)TaKLb7V5=~}*z=kLMh@54`r zv^s1Wv@vRnc?{iUovLfvk^B@0&X-HADaTayxGkW2n_OV|6#q}-;N9tz2z&BLh-b8g z79)=YYaf=Feh&}b*KjqiU1=@PUjgcLiJEgzI)APBO*cK~#cThBYV?93V@06fclMx1 z+w|z|?TeC}9|>(d#;q*L>_jSp;dAQBQ{WKbTxy&R)iYpKhQ9{O8)UV#e`frMt=1g$ zMO?X)Aa~oQ4ZxI`4KrQNf?4=OxEEIZ`PKerM9`yI;lk5ad2?-x9RhB@YYNQ&2((l8 zuOZs`+=0c^!@np4aPZ#)+H%035_p0d>e{n8eef3&_`e5!`QQJR9mSv>JmrePNsdeI z7)ZkQzt=e+{?FF?cDg5Au2s}*By)CDXsybnrswd-8z(ayp!~V-9ssoCRQrm!juhBY zWR+2Rx2L>Hd6IKh`hUGu_aGPQ_6BEd#!8PJ5#@(KZ0@Y)dHn=9WG?sa0TDyHQ*5 z%yVDOigIwTvT7y4yCkEUr`me=%7T zq#~RkE42Ee-O)(-BKex~F+FyjaHTeSeqHH`UfA)TK2CP(r=9J$jW$#7;UuTx?Q9LV-<&}tm{9!qzz*#?p$>hKj!W3GtF{;2G<}9KoqR^>HqUj@ zYquDE(RZ=46YojaY-`oenR69E9e5b)W+7mM9eRyH)eGVKB{+4<&Pcb>vg+PP&K-yU z#=i7D_jMl1#Ic3@?a5!5XTCtf=2*F8?gD=6!0SEt*N>#~VO`lu)!8EQ%9y_wDE}-^ zB0(y+t=R_s>)~cUagE#6yK}UCFLy_~TlovgpWfK?XJ_S8W37#s>@YNH;8b~@!(ii7|Lru4$F}k!&iuFJW_Mxc5YQVZa z5jorTJbTN8Z6#wshY^?{=DXV?_S$wyu`hwj()`as`F-oDM%cSAx#wxKr*k5N?>Xps zA?~+i(f;UwwHDGEBvlez3LFbWiJ4!?c{C8UvM>k5h^M2PJc^jC%=L!}%H>He*c05b ze%r*f9HC2B{WCQfA@<@3T#4)|$WDaU+v+=p{K2sq_fD!AE=;k+-mO_-}gxESro)@Rbfx3goXJZwA$hiV5tm0 zL>WjRns4o|=0T@F#$3##0IB(?L-=f?-s{YhnZn?X=d-@d3e;d=drfep5S65Cxf`~> zA{y^pVa(_kqpA~ZgedR=kKzS3;xWjBG|~~H^UybwN@f5oKF?5TU-j};3a7o_HN*X@ z;;Jp#vAC*hM7xSeEwvN*y`Xnn`*s?aHgV+(sck#J4zG_GKK+ zu(MT>Y@V1{PYkaAs{KI7)=7euG65Y2KAP6kear4E{`gVSYz(?`59 z1)4u&9&vo_jM=Y)S^*C~ap0jve`y;@2NFl+9uj2&>E%9hST!E|xi20|=N_s)UtH<( zg>#^Q12Dbz>l2Ny!wv3^c7CHzjiGMaUP9T7(}Ghs)8LG}3M{p^e+AyN0@|h!9mfwp zRkHIN{4|V)?Y4b8uo}`iB;Uspl)<1ky|g|@h`%fPad_Fb?gdWNe~RBk&-*=p~Rgc1KE{VXXXVzz3ER0c2MoAsGxh)`Sv^y z$UD$c5LYKeV4(g39DNtsKdJlXwUF_F6yWFPZ6yZ$0~`6Sq4%ei_6F!bRs=AV@8bVf z4WGtNGoRhn->p&w^eQ3o%al?#Bdrq%qXuVfW~167m}ds-+DI|&>#5$MB$^?3iW85a z%Bqvzb)o0n=Y?zWtIDSjK5h9NtKSv~42XXP8fjMKYkxAm#g1Dl-wSuw_G>tO5Ch45X7 zE(v(sa>8Yf4gp|Lg&?r3lj*-WE`PvC`OYvPZHGS?-OIwTY zR+Bxjd#I@k?yuT=UQD}hQPjL4VCQRtml*>L+H1onU+L?nu;lA(-SeOCb9ZV6QFE0> z#B&Tu{^dIRH7f6Telilz-B4(`;ap%_20ydl!HM|4QJwFCxILn9WEP)4YyLp1F%@<+ z@9kL8<91%{w)?zP+uGNxHv{t&_WhnO;GfmEn|p>|+d_Oc$FaZUr@O1nBra2B)>K54 zq7HcGBl|=#tA}ky)^lUKLHn%xuF}U9WT!$gr3#DYJ{aANK>b~+lb`mAfZysad#;L( z#VoqDN{WvmX$7xAYd1R)K~tEr3x3bqTj=PL25Y1wzx^m4C(-+H-8mqfA+ocgV&H;m0W0$Esq zTsD|e9Pi-2{M2k_``?nVY=WgE3FRMV6~1aOSz#f5ryN_JFS_xt0Ur&ZG@Tj@eR_2C z)%wkMs79q~oDz-p8wrm(G}oNUerSq5()yyy=S((O_2a?UNss#2jv}ovw~fat{P`c= zud?eFxLqhu?nSvt^-;IN-LobfWn_>?I6360f7SwIy=C1m6@K@{OYg4-YJ&62eo{We$kMGSWFiWBl?r$^FW8~oD z+0eID*7rE{YUKUqKOTSebNGSf?p$_uQk-CBRT$5nW%q9E?GddlO)|jp^Ih{;u z;#?p90G8+ded0C72MT^f+6EG<*)f4xA&XD$U)GLi?LUQCI%azx=^foeCOzumieu$x|Qf&R@bo$T=vFn|D;(ZxH|OqU^3hc8 z^LP;YsFw->t4`tep3A`6FIe}5uk`)wov+|Bq14c3iRUjPchR6yk2Os__UB{vdke6x zS5l>VvB}m$5xoM`_hm-s_GZ?pYCfI`f7gl0xhqBl~;zBY&BeZZVXuG5}uYKFg zoPYPum0O`n#ursHgv%6-x^KdvzqL1cZ)!(^HOsrFC%*+EQlF5b0GF2Y&EI{b%!qY< z9_YvFIvUpd9Ik^tyS(T&^9vI3%+8ZHAC!}G)0I2c@8W6d?YSxRKet;D(w(HkgpKP-~7g}6Lmcj-9{ zou%ftw-Q92P`Ac~i@Z-Tq(yb^b~qJxR#`}Y%8eXtN%y(BrPliE)86l|RtcxctqZ&E zMk1A07v0FOO&-3;XKGtSTg=#n6>v*Jx2q(lhaFdL zzf(1AA--bX4WMR5xk=mBIm>jo``23+T0FGf#!$sZeFfYgf?+$s5aOKQgYu;efeF?z zii9A43VdOZR@rjrN_bwB(RC~dZ`cf&#he)WskF0vxKb}X4!0#>jM~MG zcZvbzqbK16$teHHYoPHmLUj)N2d1AV|@Y;NS6?g>3^p9|KEy)UlHhc*WF?r z)c^za!{wo3>|t%`VIytjW&`{Ki3mRw7Z8>d5E0dXC?+i;CH+X0Pxzs2?O*%;zZa;%?1}*eKx!)5|CB3Pg#I5$9aPK! literal 0 HcmV?d00001 diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md new file mode 100644 index 0000000000..ce91c80abb --- /dev/null +++ b/doc/tutorials/gan/index_en.md @@ -0,0 +1,19 @@ +# Generative Adversarial Networks (GAN) + +This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434). + +The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artifically generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinghuish between real and fake images. + +

![](./gan.png)
+
Figure 1. GAN-Model-Structure
+ +If the GAN model is trained to converge to the equillibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. + +The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data). + +In order to run the model, first download the corresponding data by running the shell script in ./data. +Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). + +$python gan_trainer_image.py -d cifar --useGpu 1 + +The generated images will be stored in ./cifar_samples/ From f005d9178e15280554c796aadd041609bdba69ee Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 15:58:45 -0800 Subject: [PATCH 02/39] gan tutorial v1 --- doc/tutorials/gan/index_en.md | 125 +++++++++++++++++++++++++-- doc/tutorials/gan/mnist_sample.png | Bin 0 -> 28721 bytes doc/tutorials/gan/uniform_sample.png | Bin 0 -> 20609 bytes 3 files changed, 118 insertions(+), 7 deletions(-) create mode 100644 doc/tutorials/gan/mnist_sample.png create mode 100644 doc/tutorials/gan/uniform_sample.png diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index ce91c80abb..e3841c4c9f 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -2,18 +2,129 @@ This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434). -The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artifically generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinghuish between real and fake images. +The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images.
![](./gan.png)
Figure 1. GAN-Model-Structure
-If the GAN model is trained to converge to the equillibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. +The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. -The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data). +## Implementation of GAN Model Structure +Since GAN model involves multiple neural networks, it requires to use paddle python API. So the code walk-through below can also partially serve as an introduction to the usage of Paddle Python API. -In order to run the model, first download the corresponding data by running the shell script in ./data. -Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). +There are three networks defined in gan_conf.py, namely generator_training, discriminator_training and generator. The relationship to the model structure we defined above is that discriminator_training is the discriminator, generator is the generator, and the generator_training combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code +```python +if is_generator_training: + noise = data_layer(name="noise", size=noise_dim) + sample = generator(noise) -$python gan_trainer_image.py -d cifar --useGpu 1 +if is_discriminator_training: + sample = data_layer(name="sample", size=sample_dim) -The generated images will be stored in ./cifar_samples/ +if is_generator_training or is_discriminator_training: + label = data_layer(name="label", size=1) + prob = discriminator(sample) + cost = cross_entropy(input=prob, label=label) + classification_error_evaluator( + input=prob, label=label, name=mode + '_error') + outputs(cost) + +if is_generator: + noise = data_layer(name="noise", size=noise_dim) + outputs(generator(noise)) +``` + +In order to train the networks defined in gan_conf.py, one first needs to initialize a Paddle environment, parse the config, create GradientMachine from the config and create trainer from GradientMachine as done in the code chunk below. +```python +import py_paddle.swig_paddle as api +# init paddle environment +api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', + '--log_period=100', '--gpu_id=' + args.gpu_id, + '--save_dir=' + "./%s_params/" % data_source) + +# Parse config +gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source) +dis_conf = parse_config(conf, "mode=discriminator_training,data=" + data_source) +generator_conf = parse_config(conf, "mode=generator,data=" + data_source) + +# Create GradientMachine +dis_training_machine = api.GradientMachine.createFromConfigProto( +dis_conf.model_config) +gen_training_machine = api.GradientMachine.createFromConfigProto( +gen_conf.model_config) +generator_machine = api.GradientMachine.createFromConfigProto( +generator_conf.model_config) + +# Create trainer +dis_trainer = api.Trainer.create(dis_conf, dis_training_machine) +gen_trainer = api.Trainer.create(gen_conf, gen_training_machine) +``` + +In order to balance the strength between generator and discriminator, we schedule to train whichever one is performing worse by comparing their loss function value. The loss function value can be calculated by a forward pass through the GradientMachine +```python +def get_training_loss(training_machine, inputs): + outputs = api.Arguments.createArguments(0) + training_machine.forward(inputs, outputs, api.PASS_TEST) + loss = outputs.getSlotValue(0).copyToNumpyMat() + return numpy.mean(loss) +``` + +After training one network, one needs to sync the new parameters to the other networks. The code below demonstrates one example of such use case. +```python +# Train the gen_training +gen_trainer.trainOneDataBatch(batch_size, data_batch_gen) + +# Copy the parameters from gen_training to dis_training and generator +copy_shared_parameters(gen_training_machine, +dis_training_machine) +copy_shared_parameters(gen_training_machine, generator_machine) +``` + + +## A Toy Example +With the infrastructure explained above, we can now walk you through a toy example of generating two dimensional uniform distribution using 10 dimensional Gaussian noise. + +The Gaussian noises are generated using the code below +```python +def get_noise(batch_size, noise_dim): + return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32') +``` + +The real samples (2-D uniform) are generated using the code below +```python +# synthesize 2-D uniform data in gan_trainer.py:114 +def load_uniform_data(): + data = numpy.random.rand(1000000, 2).astype('float32') + return data +``` + +The generator and discriminator network are built using fully-connected layer and batch_norm layer, and are defined in gan_conf.py. + +To train the GAN model, one can use the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). +```bash +$python gan_trainer.py -d uniform --useGpu 1 +``` +The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. + +
![](./uniform_sample.png)
+
Figure 2. Uniform Sample
+ +## MNIST Example +### Data preparation +To download the MNIST data, one can use the following commands. +```bash +$cd data/ +$./get_mnist_data.sh +``` + +### Model description +Following the DC-Gan paper (https://arxiv.org/abs/1511.06434), we use convolution/convolution-transpose layer in the discriminator/generator network to better deal with images. The details of the network structures are defined in gan_conf_image.py. + +### Training the model +To train the GAN model on mnist data, one can use the following command +```bash +$python gan_trainer.py -d mnist --useGpu 1 +``` +The generated sample images can be found at ./mnist_samples/ and one example is shown below as Figure 3. +
![](./mnist_sample.png)
+
Figure 2. MNIST Sample
diff --git a/doc/tutorials/gan/mnist_sample.png b/doc/tutorials/gan/mnist_sample.png new file mode 100644 index 0000000000000000000000000000000000000000..f9c7bf7ddd7f148eac4fe347e9c38afaa8876760 GIT binary patch literal 28721 zcmXV22RP7s+b$s?gsf~rLXu>Zy_1z>m64q?Lo%{MD3T;8BZO=jWhU8VCp)C9kcjwh z=l#xgz3077=cNC5e$R8S=O3o6sdA8%iFD7NJqOiP6?O6dW4r$&A;90`wEmUu+2gpb zrYNW9`E35ZhY9WFKQdJp-Qx>C%jsx;7%|Q%@HqJKYU)QiV}XJfA8kcHzR{t%R#mAm!H>6P_ut`kLb&&yp~%4eS6Y2RG( z_V)ISt7&V?EtDDDA7Z{lNhs$ab7G~MVsE4KdT3>3Wv;B2meybAwIdf<$xW%0bT*^& ziH0?&WvK}3^Y`u}_Vn`7Ung>ObljwVawd9MOPJi%*;!8KQR8H9rp~@?9aixuDaj9= zeSvu7L{Hv@?R*iCpmtj1P0G~*3f<}FX|Y#%aXnBMBoi=y~eZ_{Iywgq9GIP*Ws3ahAeN}X6Q1&& zm0j(#nXtz5vx7QKI}|-{u1E ztj!J9>(GX$jQadZnjhHHYQ3^HKa_Nfl!B6ymtEX;UFY?s@srm+=06a-z5FdDg)@dbH!k&Z-qlv3ipt95b9YMde0a59$nhgp<#2<5bC50a98DR<>v z`Pk`n>lTZQ?>qhM-dQ)@tR7p@AHThZHta>25{4wiqhx&kKH^pjPvHv>@Ed5q_17;_ zIW;-iG>+g9;l^^yiOtp7*hi1j`hVh=)zf2Irj(SFL=xtrWwvcQU&tMPKH1C@E6aW_ zEKG-wR?xUKO~#=ovn$ze`xtc?LsdSD=bbyprwBOgN<~Y zB-FIDqJG=nUQ7vX)}hnq0@&~TSr8Nuz)g9*zxq=99scppsGp2CUSECa42spNnKoDd zyv`%_^neS!(n0bZsu#0+t&0rv`iI&*iv}Hp8E$?p>q=9j2_4mEr}F;QKu;HO^z2z? z9%nN%GdVfAwx_3A9$z~2#@PmcsJ=oj=6KO?hHZ>aRpCBU zyIK>^EJmEDVmNd2Yo<=Br2E{!n=6(1`T6~Mru8*hy+0m2pb9k|Xl$Hmjyp~lVTGfG zXN%t~BqT&}K8u7TAT1-K{thW2fe;O?+yfjtrbuNkovUR|av9xSG^?}S87xxn-v-7G zaVbA(vrf>^uXMlBC%^7pWt1y@XU!o&ZE|t3+MS}4^kuzTw=F-#Cap@+6kN1X{ z5509fAt+d4l%4L5azj4b8#Aqb42+Bk@$r|f11ieOa!ksXarZ<-M8a3m5G)2o$m^F^ zR@$sxm6bziu85)f-5dkd-gM7C85llX*U`~oCOq(Tg)U<1*JPt+7w5~DFV)jOTNH@e zcHT{8j%X0^-SYG*uGx6e|I}u$xoiB>d?wjk&eAe1G4Y6^aBxV7eaV-vUrqXscb6t{Dc|>8 zn`0bkwl0zo5^6v8P}W;|2ggfT%46YclLbf8+1rK1#l@MK_t;4SYh|evH8tB&l_BJe z1XTfe;rWiEs4z6&4`DPX$$t(O&7cjY>DpOXSWJG5G;CS@^=ofnlSP50jZL0WiF#_+ zqsNam6GYPM7KcW+otTqPUJJ>MU|suL>9JTy;(L6bj7;^TN9gJwqgYyWXrm@^2>AH; z2J+48zm!~$KVj-S`}t_DzLdoWlBgIvGX^o6c4qB#$s{2Gf%}!MI;qNx)tuDICa zLY5u{oAg;pNuw-1UD|YQzA^{b8()f#9XrOM_PKrdRqE2(+RdTT`7-B8CQ;i5v9Yc{ z`vg^ZGb17*So%Y?`3PN~rDWo9xt1#_P<=!95SE<(Qj)#AiDzOX^bW(xqF}(H!0F3q zV>FwiNCJN0(zNS|6L|vz29e4|mQB)EZQ$e zb@_7tTgTBl-iHq#&VAq{*)4nA`e?N&%lLVc01dQQ-uMVG2*Hfmv3~X|%Pq^KH90A7A+P%k%6=ZQ%ad@{wY@i?m8ow|*$H z)!|AyZc@&7XGG)V|NK4K7(Mg(tQ2s`gO(`@i6^)pEh<8_7_ax+Xc48phsop+O!m{;IQ6J3+&`#Asz@Wsv3S{N(afi_>GP>h(i}b_4k@HF#P&I!;uJ6B5Yz z`c^erTCh&-k!;fUQ0I;T&~qW5ej?=|bi=J3y%W#ezJy5{gZN5?n|T)z=fm%PxyH0g zXyx})Tk0tK-dxGn=Bsd@Z{FgSmX^kU>&7idg#umq^~(ulS07xzHxi1c(6vtXu!8H{U`*9{7 ze|FUfIziQ#)~2QdBqTuF;!)9zr>0SJbP)|^!VaTV!yHH4FkFZO$Gf#;rh0OAlyM{9 zmyR-F+@OEuM31aYw{fWz=I3|n{GNRvvA2sNN>VOZejM#e{rve_*|fJ@5e+H*KUbEQ zF`i}~)QB($o9z+*j2ipaIVSS;z=pT9=!m6=1V`~cxd(X84CEo-zJ0@GqbGiL5NnxQ z24rtblKP{d>Pr=-2$X46X9;wD?(A8EmbuUA5lxnoPGff!rbAQTAEKtV5KA=t+9CIJ z8IYi;uy8C>M_YTT-_)WJ%CTaUvY`NJmcIv zaoqL&><3kuA(clv55i8`%n)S#8csT-YvjQu)o zGbtQ5LdOEk2r}LlJ-uqeN9!wkTxNnxdV*_PYl7|aqWnvzt`sG_4X^joxQXM(ucHvz zmZgQL!#;ohygjSGMgLUPZh56cW@dVNdTJ`}(W6W~{$Tls&!0;>4CGUm^JdHD8kKNU ztG&HBJm+^X<(j;L!tqUl5OW|B^Zr8^nD?U_x^z502jvjiYgjk_n_bVzTQ$0OJzk4a)i|x8USUCIoZoeJ%ZBe8M zV5ej~OF~9gFHGL(%-l#s;2#~W1vJKb&I{zmy8T%bh$VUbyZ7%wi!ft81_cp@DN@tW zT)gmy;2rTnrmf%orrc`HQ!blo_Q}3~vlKaXvU}YZR58XisR$#OeQA}BQmdh<-3anS zmEhi!Y(3dYIB7{kDOkL|cfLGj(@xGP5)>f{!1L*rIV**}VK$E8esYr1!KpEq(6Lvb zy&>0M)csVaRr=YVr$rSc|8h(umWt4BwkTTjAV890Jammf}owY@5#QL_aX~V%(9S6C29|s zsl7da%j0qJDw_`0EIqGppAHRm1O^745)t7G508jAUY~n$xni(0J=23AK$xDO(@uI-n`lW=1L&P2?2q$$>u~+I}2e3)fjS0N_rAP zilf&a*-=JK#`0>lx3>dSywuJg*O~bB_3KxQYuDPWPX!TyxT6jBf~A9U=u5Rw$q~xE z(n`K53Z`+bH?xue?i z*90muf}JF467K`mV=dP+xv(&jXIhPU@@dKi(^DitH74lng2&=W!-o$!SKmu=aG0Z) zfGefzI_sPVJ>XUoDWI4)IEKQHZJE-~KF!N(^QG8sb*B2?+L4@euo%fN!e`FBT^Oze zp0WqG1=1_?+mV)|0=S=Oh{%cak(Irx!E=g%oIlPU<%tKx7k>4f=R@u$Q2sT!P4TG4 z#zy1ZzSUJX21howWXI}%`suoiksmP>FEKb6Up>lBQs_7;9i9>s6T{D}_fnf=BbHk& zido|GOJ#!WL`BX+5e@aBlsiAN^9H8O%^BQRzv~nj>F_qwCw z;@!J@{AD}A`f_q|Vq&PXdz%{^>V)T}TH8_=uN#J#>5a*O0=GrH1=naOUreVPDaW6JA7Qf_37!K_+f-{ z%n~j^*QNiwyS-9uG!OQxATPf#@;+OSUo8f#8&XE=8Evo$hd}}GbZMf7f__)o=E5I-~|+@-N8}uCE3i zUNzmb6fv~0NKxeMnsqbE)z8(RnVK3da}Fen)B}|USg+Z-7ZKqa5Us>cMN3C#ZEgLT zHuU!G+xzxYxUIT*|6R2UPiZz5e)HzdZbi4X1;-f~7<>v)a)68g$hru%3xWq9T||k| ze4$m#Ggiv+pFhbeie9{U@y>jb>TV8X8cHV7dr3hp@h24cV-z_PPG5hVoqau!_#jxy zorSLwW*Y&50A0^H<`0FKtEs7(9l}-MK}v~>5A^nyfR@Y;y`>~4cj$jJ3FKr7$LJzZlHY?4kT!>DdP8flK4kD=X2tZKC&b9;=xAB?rKKe&SJ#!_ zzd=gG+H4LSuL80Xpg1TV)!oGnaRG3txuc`nc`}41f{!G)4s~gm+t<_6)BO4C6XW+_ zLfL~3efA}#TQ#jGty>vh?D*gT)cEg<7c_RC_z*TRVIj`a{Vz*5i&^ADu)H~-V|{)7 zhPQ9d;z=B}OVw0-}~tzdOhzO7hi? zTkYeA5BXB`m1SknY})NhLPA4N803%P_ngiyY?Is8`mgnM&!HZPr*9M4@43#j zJ3HDlj)_a#_wW@Bp1t)$k7xAH@^Zkti^4}VcxaaVjzlVJr=u7srb@Qy@#kOG);9Y` zX}AC9g^TP|%x68Rm=sr%jRhV98Gu)uzVW5UQe@UmGKx8&6nN#KQSSEk=Hk55wD9HL z)7Sg-_-JXB4l)Sem6DN(Zme_~C+JKqEK@P4a69JXRFL#kIrZeRV|KuuvScs1X9p;~ zr(~%t$LoU87E`-~H@5FHo=Vf51rmn(@TK&IF{EkfmEm5O2zSa1!G+0P9SC>k23fEG)Qa$U-w{l4V5 zVUDIzidT5fOEg|=AHlwTY%;#&n^{E;#O%Flnwr)kaxLT|`0aMec4bG^R83d*3k?LEot-Jp zq<}(=iSeoxW@dgmR&nt4KutO=OG5Odr=H%UUVEQk6GT!>b5N2WcMv3sScMNdY(sOD zC}nK?1$FiAWWXK0{WZP4zlSS5xYeqB|2=JbrDq*L8e9kX4$N?8syVJZL(37bVP0|T zTdYyeE+?&Iz11$R2bHHlRKg#Bdt6uoIE@_T3Wc&Tt26%S#68D zeedKy9@@QUL?cVDd8u?_rxy2xavP%r(D2*duqh!XSl-z3*<5?beS5ldB+EcgOlX^iz$Ec8x6r(v7?QOp)A{q~ zO{=_Tb6$tW?XAy0e))|y?eERS>LT$vz?Jhnu{Q?tYlns^G1x)v0wR|4s{V$jcFj#r zuB^^pTxxPXwxGibm61QL*}la0ckktP{$~VxDsG~+i!#{&F?##Rr?nT^bnyQaWo`r# zslMJX6wwfqkkIt})WgPlXKzqg$)t7%F%=~xiI2Xh`8f5X&$1v=4Jr|fr@j3;Jd6nl zj{=jccES)77%qWRsHHGNt96kgr(vPxfyPcF^7^s6S$h0QS9#xo0p&|U%4jODyeKQX z$LT9$q_RpLHE59E=KR}7j~)S54`^P5!T|C=1#oGU+vgzWn$^=hvw~-HP=^_f;o>U| zXM>})SrA!3xeKoaySZ7HRaVOQZb8&Asq#)}n1n8X!3Ux6mgE(4^YMlV`lOf_XMO&< zW!_)wy=;1N>%fZDV6O3*0Co(~T;sQM*|&MrMoMkFUmfg8?XoWs_+h=Yv32a?t6c#b zEvUS_T$^XuRun8B80RafHmY&1OaPDl3se||P>Mr@FZ+LeBxb(YyK6%QfZi6vjnXjm=_%PElch?-6=-)8 zyqg6AwxUTe^5J%*815TrymOv0>6A?rnp}d$p+kpEzLk}giN6&fj&2OpOm zk0(P9=&sM=3;%1H_+rOV>%_`*>FX!vImhFzJKw6u`Mi`AWeH>+iA50Zz2_vFVk-en#h+fU+>A>yW!mX_XU6AuaW1MSRDUyFt==H zoOGWvNY@<%ME0xT(5%@noxsoCyh(Bg4$1{p)fasB=s7!X#?lkj;|mL#jdrozqigcSd`kx?!@8ZwngZN9H>9g}sl1cZcqK?ec7Al%gq zw=zk(WnRte|M?r`^bh?~T>W?;jsO&Qh@!#rIwmG2+S*RrTN{+u_sLON{^-dnJ$g^% z)T#1T-j~{CUcc?3y+3>fou{~XH9N0rg7s7|`zN?PYP$wY;nb6{d-}OX33yda#c&7u zPAtsMmU}E-{h|@z36YUh&LC%~)T)Ipu$Dc(1;9lI^kn8ae~|q6V)c&DLsj>Mud_2V zq2>diAfxZ^cx)_BX(mbTH4eKddL9fNMoLmr5)1yKrqlr zjZs}ghXUAvYdj&!y1TmrKrCSV*}95`+4f0jj`9t}f?T%>Zj$<0uOjG6I%hlF>?PvcYfIrRv1UEG~xxI7AgmTce zo;-O1E$iRLOfpm;YdB|OQV&EYrl*OGV^WQBx12ahKI4tWa9`qwEuPxEL#Wh+pCPa6 zFQ}H3`UJkL{7dcZUK(D_oaL6awqq5RR)7GhQ=%>G+}z(F@Re?{on^(q{0Vl-t!9sY zU~<*K0GOMR)XmwssiR|;%l;1L8xyCnlR--#|K&*%0M2~k+_@{c`j}|R<;>E_d9WAH zUJYOevNkp`d6t;Ci3{$BPp@|%JY`prO3+v{wB8|uPcwGerKijeHrxD83SWg?HxD1@ z4cUt9y?)~HLnJGGbBPF7CGZN$O)ym#*| zX5vm6Kyv_km72?%Xx=7K* z4Vp`F@Zc>E4;w*dDq32?L}k2sy!QkxFG4veNw~`=4bblJc%UT>l{sI#diA*mPoS;9 zfC9k5wQJV^LJf+w`Dh8)e!yvXq&|fb3kVEkv7hOX*&%Pb0oVqGWNCT%wSaSfW1~u5 zKMmoy?6wmSubr;Om>tw<8XgU&o_&%I1H!_kuSHMX70oX$eiTuCQ0@UP@3r3R%|DAp z`nT}+3_kUj+MVZ#l)Auyq<#MC{bvom%E*YdAy4S$uECjL513auYP zm`)E~ceVx}o0!9sE?e)7<@9Sqr=9d3)k4;15ov|tWmjOpO&yk`k&Q!A?XyX@762L* zw?nXU3=)OYwBul5@NKRekQd4FrFRuHM?eY8IS+9=CUW8g4PxH!a1e?fM4Jy_(z2iQ zst-9BWuy)H=Ye`U4h&@21y~6n>h>ilD#&;TNJ$HG--b-CLxgCvUcoIH6w@em;jPAS zXZ8Oyv@|d&cOml3%zie=`H)0JW1y4zaqfGTAEcrPx!W>_T8bnmKL&1<$2zzfdsl#m1w9rx%&ON#`F`4I4^IyYyr0*i%9{$ z<`@=1qtd0l1;R|%T!qdj>v1%;$-v~(-XWQMQhIv&jEoFec(3K-{3q7|(GL>ut2`dE z{crO~jm8e%SMUFtsng}++j#uw2fO^=!OB+_k@^pgEkTo@#p8T(Ff z(QaC|w@l5<#6NmO_&j)SZVuEMy1Uzldxy|Owm@kfsUKZ3+iTu}HhKJ+B&rsofMwy~ zwb}0dwh!k=Z+|&-8rENU%IEIx+VS2(EAYpxE?b@pPYDejI3L=AJ8-LwRC+|~UnZyQ zAqi-Ge(s1v2F_n>lU8$%Ki&=<9bMh&Mwpf0QnTR$-c!(rH2|Ayr7w9Oh!4!RAL(6FjYE}u>h zKuCac3PAewceDOcNWkpk;tc$I_4FkJH>4R@St&l8$YBZE4L;avFBv{Rcc;%*l;F2e zCn3h*utfn{vdC8vjXo_{w^y&Sn(&;=p=D!Yvj2HVf$GEX@Ovl)&J@Mgy@W;}d!$0^qWTGa~sCjuC+dW{-LhdCA zDD(WqnK-~jUay~bHFr0M(b~}wdb7kpU_T|RNwMw2`iZ$ZWMrOkF(0U)B7_q``OUJB zj$nw!V5mkvr+XHu9kcy=rgrB6oyRCc2&8zzcbqS_UUMZ8H~xY$!$cl}IauH{z(FEn z`H3oq+y5V<+yhk0)RMzR~@ArVJ zk52SwZH=SYg98QEB!o1jlPRB9LQBF~KtD<^F4p6G zI0y3wbpk_CH%nAV=q6OvIr&&&@*|?QRKfxIRnL`lR`GaXtgj8)VEPXYNl8foX7xrz zNhNp4xK6iX#yISn6Oe>!dHtY=KDK6vnzXWztzBgnb6_B=yTn698l0c(l5^tTWSJr- z2gwA?j!V89|7NDxNnGHyh_D#z>V7VFouLynk~O2hK8_>|TFHK%?}7U%UF>Xt5^Bw0 za4S=#fi3E0JsKX|Gtr|>d&|`|8}b<(kD4Tb*rw@@hz1UXK|v(??hHE#0a2Un_upPZFLl9*5gZvy<02tYwf1*4)Vjt%G98rFkH8LahisPhGS|kfkwILBAYDqP6`%DZv4Tm2`eL?H?|+PDzmY% z6&R44hoY1%>#o^mC{(RakVQ1;nwY#phXU0?Z$fBIcvf~(iX%LQMbZskQ9$fEPLRaf zB?E&e1_J0I=x4r;+$YTnz7u2vyw3*^q^LDBD6|Yy z2sEA-B@*DN-Fg3TQ}~zkCgbM@H%XQV`JcUsX{gP%?hjO&yy1oWq>~*B44VC-QZh@P zX1&;Ue)rZFq8vLrI}gt^X6n(SM>Xkg%;dR0W+yWuS+8P=Ja7C@v(-D7LU6*ZPFTpZ zW9ZO1(Wt2HO|iGHCvWf2aX#`pFO|KIt>QNEL%Uv%zF85L8+SuPRnUdYRl58>sE9KH zy8!GGjQEk<9WA|)SX4^F+*rM}eE#04G(Ppb{vA}VW{1-Zh0P`5O;eC8Xu%ixXkq@s zBL<8w|JM@Cf2$}W;yg^q{Rf$f!M*8ET;=BF{rTG~u+}V5u$_q^WPY#+0uVoKI7I%; zmwPoYEYJ|hJ;11WCIi)GWd{0_O;V4Hc^%=sfxIR4e}Q)+B3d9LySTcNk&^c17y?4R zd-rbp*NwMlA-1KarrwZ9YqfUO)TF5NJ}C>uW88^YwCI19VRrAUSFc?3TyTiIm%oLf zSw=<%7b$6LcR~R>5b&VM@=B%q6z5k+M`-TWMU2H&x>;wvR{0{jhj^2{Q}p;b8iH@x z+y1U=YrBCbReJQ(vGC9KB{<#y6S&kJDr^4ml-&oBr6N1Uv+RbwdM2q*l$Y>p#A;_2&RuG1Q-nE{_e#pqUZ)Fhtbk zhs@okzsP06d6D{?hHS`gjte6S>f-HR6T#Wp$A6Y`l87Mn2kx}N4P3ecjD*xBfF3)6 zQEHcXe>T;TBM_O&oxUSeYs&fXYo$kC>$wbMd*xqxE{*vsWW7JjN~xrS`TOb9Cqx>w zpApKXrKht3%ce8i8Hi5#FYm*%*Ee9*tZsHQ}Mdhlw`wG`Lqbwvfxo>a`z} z@DI3-bG!QI&)X9(MgG{x%E~%BpOmA5N(zkUA9=rH=IxE4$0Og9?p$+U8bb;}$K0B& zo{deH~YS(GL~* z!WV+#tsjpnE2Ri8)S4b51jcSeyIl1;mlJ|wMs`g9SSgN+*7LJnKYtq<8Upr1aTvwH z!V@sRF_>q6$xsVb*EHo4!;N2>ic;DIp zBC2DJ!fMx`-cwz|1Cu)i;|#InQNNw{044c$#SVkZ7}xoW$7o4;rakfUVMA+XBqILu zk0WiYJ>eAZ>C?A>4qHtT_5(pmO38#aj>q5Cf0BcPgdoqe}13Rlejo5Q#WmGD!m?)0t+Ri!Oqo?1c<^qKuiFofe2Z{(GNI_ zwSN&iVU%>U1&FaDQ_Rv6a~SC4@LwCBn!03S;tlhabg0RThMbg?pO24SO&96Su0M#) zfqDqn2&;h0o7_IQ{#v&CMP=zGr*-%S8vAIJ^r;yhc+@QDrD+) z_w1b3F+w@Ce3ckW4UPNrdT%gSy}i$Be}>4H83Z94jmdZ_ssIX+*FDYbw8g4&-(QeJ7f|YdvpJiQ{Mw|ABtXEhkZ{=n={}p8>(~2oATEMzXF7ab+3-%+ zO?dqHBc}{g<1a|K4SkJi`q@dW<_8BQAS<5z{DnZN>qPYyby>+{p)1YW`{9X9A%+Mi z%>GMpRuUI`h`Dys5%PH`p(-0?TY>3_MgM1Ox=Wkp7c2|A5aFN_Df7hkkbHDSU8kKV&5S zI2cmS!3ZG-9oA)cnRY880kSN#KIb+}R z9qwIe?2*P_Nb-VyVX7rR>cI5CNz%{$4dizCW4hqP*4`+`o73rErR7lPSG2P_uP#68TKDk8I70 z5MVjCY8=E)t^N!N4)#9-#XzrVt?#X)Df{OF0|A8Gybq39248GP&w6sUSRPLe{Xe@` zJtj>zi;yx1nVI$w=jW z4l-kV%%z9n9_@D08L#&vQO8i%(6)!QHP4o8mY&F<&6O)xb{#rb z`Rv|>p|^R>$;i~JC@MnDcvV(r@KPJ%Z6vcqq2JE76Rn`5`YFNycGz|MZahp(974Hb z*CZ`1?Md4-oY1(-dk%l8t+Bv_fBgJ8lEVjy6`%$p{)(u4Ke#>49XfWoi7$o#7$<44 zJo=u$Y>lw=VWlp355SoS^IO~4RNr19{&b#I+S?%gn38b@*2u{_F;nz!r%{s-V%QLgJOjWDCZiYm_E<#+Mr*htr>UuFzFd7> z-Q9OVzvGZdLHBx*sB)iSue+d=o0XMSoT-DEPFzA6ywUw_db+yQ^zR4 z$kKcLU$_DnYiKp|_wQd&*0ICK*x6S`A=D#PlA98xgV`gdZvaL&W zPGY-$_ZUOSg;^wc$xpmTi-fYC5VW@@6B%@x@5OxKSeIDySWoiolJHowP6ci&v2|X{Alea?1LCul;)zx72~Nta*5MX6L%U zy(J02CXPIG487OQ2I(ytok%C_OSZys#Fo=^d(s`e2#gO_Dfb)@u#9eyK)n2~s1+!_ zNWnlWGHjVe^905WKCyqRUqFVuogLuCucfHt?>zimT|g&t#JyZ2GyXw!)bpd z1nzaqyPIaKn+iNtsC;o}9c!EFcG#24e-OAyDq!d=cgRbXQbOQt+^~Vy6y+P&q!51EZBMoS2A65M+^^Bu>4d^LP;ZC&B|ti<|8JErG8Un{#2fi(p#P?$VA8wpaR0(@x-L5TugE7E9~=y4HB^HK%@6V#f91? zgB1{6-I%L+m-%QBfc$|J%b)clNa^KM9;FW3SN;j9c9`=o?r`WT=Jj6}W%{z8Vw^=J zL4cDpfGe81rlsZhv14J}YJDY#_af>RBv%2Xf~m~K#Z~($B0AdUs;3$st+dxF$;Ts{ zqwKyN{-0^YV242x1?8H7z3|4Q9C5U}G#!0B%2eN=uR}0L8Ef3+LJR>zwXVJ%TLEQg zXMnhFK)O&#E<6?8Sg2ft{)3^r_QURMfNpYg-KD>gl^-U+iOy z>2|i#AtEHoKn`5K{yUACnbeJ zdqqTIqNW1`wQ}`O7#4oukbpV>y$lzL@DCgmNDwv`)S}o`<>dqP_}@E?*WujG2`h#M z2LrplMGCO+_k2cBShDeY|2&0H zr6mC11L~?m2zl7Od#YMmew%AIz>@H^%E_wSDv zot|BMG)VF6vo&uN9rxuydJ@LgR;>@3XQjF`9qu#FlMXEoS*zvWWatict_@Yb|7&EA z^wU3Ee}x6Ne@X9LW~P>|Yg)GrEKGPQ9`)x(?{zFvtquR)NtC4uI&T>uh=cJ;GwH9~ zFA4qq&u&lNW&bn-U7_vF=o1Ydv(IoLCJZ*?q@X4BJ@ZUn$N1kpqIsq=Qlri(#5bUZ z-Ez$?vb}**#UZ=4>!+?dAcBbM8?E%96hF(pjEvxi4GmDf$Wfb+!iStGEKNjYhzJSu#jYXqg9&jM`%#Dqra!r} z`hEPnc8TNY^Edld8WZ;(CZ$PgwyKk5MfEqA< z3z`h77=c+0q4Hf3t2G)IAgY^y!*#Zyu?}ZpFIxj_AzD~)%2A92aJ~;VQ zf%Hd?82d)MWNIOFP1tM&cD3ef+h{HYz4zX2Oh;_LvaB4now*I60mB^&A?LgvwYOn8 zB(cH5xp?eUbfa;;*74RpqlgV^Hacp7hnrdfnYH&5I+mXg;n8>j*RRLtl(K%THbx_@#y{uZf?Sq7_}I; zo43vp%0YENQ?H#~`}2pAloS96;sP@CF3nuT6wD+W9?9Oz2O3{dAYAyW2P(a!#{#*I zJp2`_0wUPI3IcQrS3yS7z!8ZGx0It<+tOCXq^i7rH|xj~O5?n5-4;I*)-VCi2ei5k z!Ucnqgp_omu7Z!2^v~ib<;&lQ5dHe|2QaNdk#iQ@YlFh-avnBReDWi|Xz6BCBcnq^ z`VU|Zf)T6)`3g-)bcU)(l$>I?h1IS6d07DQPOch-PctiqMHPKn7x(L?U8t_PmN~vZN0C28?4|>cA-BVhI#N)&*be!DyM&xcuLG$Epot-4~o(T!x>JYZT@q z7q1TF+}j+2$Ao;GIW8y4`WSLZY<^pZKK;UuYnb@}76`EQ3T5lYq=gRc@1>rRHmR-IHVvP_ukLELmLx*aGl5zp4k=C=vVv^C{cfpgM zPEJcWhvr2h2^xr}Awr>xi;Ot%V_^YyA_aqhdzdJi)XeOxnwItuv;O*tPSK#y(4x#f zqT7LkgM+vk*h}IRA1~_Y9IC7rNV;>0!|ccT^v~c`!nEQ}V>KT>D5iE9n3(K_udozu z&fvuL#fTkiFkf(@Kww>6Tny&Yu+q5arvw%$x<)@!kr3i1=Xc2w`(MpNsuBCQb8g1C zI(i@6G@vu{Y#vYe1S{2#`%X1j; z&?;R@XtHAilaE*Bejuw%*#|UXa6Yi@3=9>?-M^SJDQ>*%vNag;#B$hP01Vps%M#I3M)+6e}w$ycGbZX8>J@nb+9u;j*cI@E7GImr+D_`0!C)UNHD7 zv=FFwNQk0C?{3e76d<6q>)9U;y_JZ8aHGTsn~r74(kQrJzXq}JrL3$B?q(?_WhAn9 zY-)TS^Qv|2j0^#GQn?)fx z(0!ABdhiWjxR;mL_OC`Z{p?FZ@kLda{(o&T8+V14W@n)?HgJHFGE_5zuh~)r*tZR{ z3@2e~>h5)>n-^wT4w6=OZ;_x8%ENvaC@r!ybR-+tNT71XP6PyulwS zv}w=xD^61tT~ZSx$1Fi@sTAh6a3i|x4VXl66zZqO35Vpgb zsrYUMyt5)i6VQwXp!#8j;tohXK4lQmK;VnGCwF`c@Z_iT5}c`g0sq>Nl4s4rh*lR3 zGQ3Xj@`5lfz72kA=(Y*G1}yRl4gs>G*$E z*$$No8Wgl{1RoEQWzYXDG0>;;f{&k<_dndjLR|?#w8ZG#^*#Y1!89F^g}OR<+X1wm zIogEMx?~wY&6sb{Xr84f-dUSF(8yI_&}uZDrWT7&fSga3PWmj&8r@hg?6CU1OE;_d z>}{rVQdlXibepZy+=i@>tAAXMs&#hoIP{?-Vh;I76^XFgUt;8gZRx(^3GpZer3)dU z)%>^Wp%&B(U!eK`m2;pZym8W;L4W$$A*6+9F9S|88@{n!+VCEe;~}Zj48G65Yjwo0 z!w7IT@cD9CP@6#ri9A^HX2N%0hi=&a7nnehy~WG?LXza+s3!6Q0`oPEjT!m*53VX1 z<%;1qCeOYAQxD-%bwCcw95k#HeY_l7Bh z*PEK3PsI=fpW=feLF`RK%)E6Wh*lrJ{0U=Mg@)gnfEZl?jRj@t>FH+?y9&4Wgo{{O z7Z%-sCNQRuQ$oiqGCMCo>F2jgK#&)T7;gxhx6gZ^;ReN*P!1k#6q}4RJG&VU4^kAx zltIdIf|MjA+Wc|KsfXqm$C9>i{ZBeSoMdhT$2!lARh*}>vDc~}soWlDXT**UJoT{y zme*`0-anyoZB3G**KWeZX0JZN za@zj(BWS2wd+v<+yg*mRLQ_UJ9LWsHfq=O^U)L8`!q{k^@u14(!s zBa|1p31tg?iHx70_^ZI&IP3c+Cw89k!(!}{aI)9ePPltXC8^d}NBm$&^&^tLsp7EI z<=~UNKF}YvSMC9zykv{xq5xK|u)<#3Hi$?3I`uYA-bR@S@;-~9Y`zA-AfaOWY~2<| zSZ=dkFDo0`0K?4>M?C7Vdlo4IMZw{;nvXUX1ONM^2&^S!Xu6z~X_X)iAhkJL zb5~^Q-m_rzONDa93}Ib~>|g|Hj&6LdotT|$A*KC zkA&Z4*GP_IF<}t91Ih+ll3UFsD^De`-6^KYlIpR!SR!!3dqmlzQsDySLtYL~!I|9L z%VJ1KP>X2HReY-fjv|!HUBNWF_iCkk+sjohDSRq{kG2n=-;oO>#*$X5W(Gv@-JA-N zj@bTD(%Cioh1!rLL8d_@A%}c%pNEtjH}!>NX%+&tljO)`Q9W22IM&m?t6W1&2C9%u zx{38D<|8<%YL`dg>OrZUMt;lxS2TAMz)>IBC2neDamYWBgCItnJ-9U5)qYT$4FNPP zEQFezEHLm!j8L`hI6uEd1@Zq@_7Hm}+gL=ELaN6W$$><}xDFhid(qLm=-1a*n$`xP zTiCh}((!c#y1)8k@W}jUOOn@ zXq_-A+cTUPAMdfV?IRQ#4fIf_st~|;#rkW1{~t8}{K-7SqL5&PHu&1m&Isj@x&o=> zcim}mIEHax=-?pG6I1lXiSmm-;vB21ITcwX0f3J9c91dM8yIZ`23z%%GU9)qPo0F# z`)Lp#!vX9AVt+1qi(m9ONLxm?o#@l$*A<2Cl@%4caYN3B`S-jEM(Gb85hJ|Wh%LKy zO8_z|q86hwXRi0n0m!PSyMWL=2=bj9IN4{+8=eBOF-_&IM$H9ShmW#$QT6R>Z<|Bg zkuB4u27v*JnolIPvN(dhS#0&a{9+Tg5))#S%PQmR%~+KbZh8mwja%(Yi2u0w^Tb3c zmv5>I?KjBlVKC!g1O@~k^>ah(#@Dik?DxQ>mV+GQ40nSpeY0+OH`i3w>}K+I)!Xg# z7E#S-;)i(nDt*e)H-m$N3FTm;aSICzb8x(@3sRhx)e)7EVRO0A(ztbvp%mpC+mxow z*PQ%#?wwR;xm?Z_SPiqoIP))SS90bl5m|Crf->9 z*^SCjmg3kfafFyd9JjJ<#}_P2GP-y7J~Uw`0`EOXJ_G6;-tfnU2INHSA!Z_Miy;3e zWV9H_AAH61^IypOAq_^xoA#jFI8d99n`8p=YVWsP#D6NiH$*H2giW)6kT9r@9t~zW ztYcNX|G;=*|r1HBcHAMFRi}3v;<~SBgRWb zGA69xLGNUREJ?RmvIc)#d$LURk4J}LFkPug>$RWV^27MTm!~iVLKd5t#iMYVs6^-x zK4j+ni*H;UR*Qjlo4a(kDayAE<{t!gBFFe%dwg5wRj{J6avcTH%Q-BfbEx1uTf1Mu zfPt9UMjC8^KN0Ktcu`TM`6wc^zE!$o@h!)%6aVM52!}D+(~;LhN>$R)14L&uT?A%O zjVX4Ob))4A2Z4Fud#J={7~ed4Wn5fYdB@qgpuyrcBxWE>fLRzm^#Q?`r1yQ*fCryx z{B{B1<{A!oG&2v5kjXtjUF*{JA}Z$JN(?rF|uVOk?4>Lse}rZkac7f84)3s9U&uSv}5Z+~Yl7>k}Fj1i4tqk3)Rgp+WO5PA#ZFW<#VbX-`DZ+eHa^2pEWHvm7ua>7KS=Dq}&H8&!O>h2#3)E zai@t54wHwv|35WsV#>-^GI3iNzL<7S9y{f0@us4nC0oi6@K3cv57U~4&508`!mJBz zY3UlPK&n#)K{eyil3m#rmWIJE1I-A5LH}ohh zlL3b|ApQ+fSq?#!5^PeiwgQv~`}K7-gc8|r=Ux(Q@;E#q;_6+G5;lU!HoKdy$dC9H z^r9p;KqpNHI-oiQa)$LkLeZYQAbl3-UF(sWC=)Ad3)qrl7!SQ-)9}%u0TK ze(2x995in2e9K1^w=^SjI=v1+Cql~ZqnI`l_B!))KS^V%tFPa_d-rZ}@f)fAF>QpH zxLv}IAQ5m=0M~85*F5>4(dl{8yRVqKetejK&~n$|mNyJg<_GcsuiI3#qbo$@CW5;< zGY|SNePMzh`@=U#;fV+k)1*}ti;aQVKfelUMI($LMnX{LoVh4q;ed1@E^|*$ij*6K zZ?LpAH|OGV;)8?7{Dh*x*ZHXoO$HV+2YEw@7~BB-%s0To6RwBwDp)x2!jgI+v{>sq z_Xws$bq$SEIY*O>B}3w4ACSKQR)K>=;o2A*A6aDAt{(s+;3#mre3^djG9VU!QXBs} zFP{jdfC@TDq(wgh=KR;HsB&lbXzA$a*sZ;7YO+SH!W#ySOpWYCGT%`d?u6~8o1zKB zq&zmy5Tg=QUGlT$s7tP{g}@CHjV)F>zo+32gY!u{2V_G~-hr8yRtc60%8sA9Sa()H zg%VDZM;fL1P3Bv1bX*Xde(>T& z;%g+%xQ%&XRlE_Vj(iA7NpDY2*JF=BC}F3w&X3=48evy^5i&4Im$gRM{P`E>vM?UF zR1H9MC=FD(cwnPMwgMscx|C{4#TwP!eH2n;b)UvFZeq;Em&Z-KRgr>Z!>qsN55onP z#6G`HY@i|{RG21w8;Fg|<+(NL6Yc{vMhLP2p8yHo%EkySvjrsWU=OgF`b^~7>>za>2L0 z@2HhrcqV!H;VO^}K%`&~;H^Ww;uBAq4{~Gzzj-7lN$G-hYEYBnK>&I1eVvaKCvzLb ztd89~MR~vMh5?cPXl~Dw9w&Nw`UhS!=8`gwo|1p(STpC@R4+WlwQ+1PV0=^XabmgRQtgu!dg!KbQg{+)8G z1o(Fj@^c?(l%dj^NxaKavpwjEcJHHE%;HEhqqq4D?tXP?B#N!xGM({Dm3ZZ0AI(0c zo+u|HsS*z9WE0Zs06O~5^XVGHr*8eOmb6muFqviP^-Y;0g{83vpZ~bIk^Mzb_KTu} zFRt(VE7(7w3Wo+noc=P;I4xu!m?WOe6n-~8{~%7-`P}K?w(p_u?yW_qpUrQH9PV!G z*<)bwlsnYtquS$`>wiaF7XpU!=Mt8U*pHfuK<1|PM4swtriR|zV0>fi#+Kq%lgdV;EC|-3FV_V0A-ig9rBpMn*qS*T6QQAFyeCNr_xp zSpfocDz^tC{1cYiEj))Y<@EpvdSD?BJ>?20f|V7_@}RWLQ%zkyS0;uFf$T!W>kH*B zcJvE$DQ9JQwnSq@qRBj_tRh}T^!pA`(N}G4Q7_xjl;H7cCKQYi;I!BbDlmnmp`l?d zg-qP*(6>2eW@d{2ze;3F)+2nzI^Pclzsze)ktU`X$j}8kJC8t-UlFH>9t)q;#N917 zQ&2qY4t=7fIeK^~NxUqI{|V-bxj-&Cxdz#fT_Q$Ycy2!?g+yq>Wbj}4-NeEPw zy%!)K&`ET52D?{VD*|-lNzW9XcGcgJPvE5iyr`gX8ErN1S`6x_l%yn9FMu4VE?it( z?SzIMxu#}X_XW=cA+&NGSCp7x@v4!#!93Fbv?ZMr4O4oYnt5C8%eYa<`R!fd9ZxLH zVEYKkJH&qSi6Z|#SjHXUf{xs=fjQ?${V>H3vL0vUnwWJY1~*`@l3551!fO85c@mAP zF>!~C49muiMQe33!CfgkjHgE{j_xP~<_m~ij>muQt(0rX?EHMX&Q{U2aeOn}I{VLs zYk>w55OO@~cG|m;^2&DTNWne?fFS8La$V^I29$cZS->`E(G3vxh>QGD4fnClLX-fA z9R6!82B<}c&Upf#L}hpuKRq^f6;lz*IVp0iR@ojsGr;Sm1-Vln3jC2H$Lw7~mBV8~ z53K?hdY)KdZZ(#a<@~Rw%J+Kq=tfQ`ci^T-KD2`S z3T##6T8pEiq@*M)tjz=mGcunE5u4I+$cJevvVK zf^a&PNXFvfJfVnG6l~%udpcpD1QW@i)#y-+a$Mx)&yRr-z+!m)7GdE_{;y>fWMF&w z@A^J6DgF+!O9L=#0VdO8fAGrIHg<)?$Cm)ZmF3P+JMFFJp&R}(VPLb!IlAyvm(cTl z1kF4*H;2ioe#|x;Wj>GV*02_3AuKoAB{8ygH5rW&I61Mi?i9b#qX&baiSTgdu&O*=#*FWxQQ^T+8 z-rfYS*WlK85&qEAmscr%s}x6D*v2N>MLqH}L`lpj#;oNJt^}%O_=0bV%viUcHiXGp z!Vs|`;ipxbb)>n_O>6oTBqiUZ4g&ng-Rex<4*wV8lf!j3>2(ls6MK;KXIdq80mvVW zc6b@fjZIIT8bqE?pa?-3S~ub`#nuk6(<*g}E@^A;BmcjP$3l!O(%T3^(*Bd_ zx}&SXy70y6_Ir?j3f#jK47~?;=Vp}+P{RkF_$*pQf%>}!?jRz&g*}i>M&?8m+5B zLzxmhrjHz^12V3(EduV#I4^D(K~oLt0Nw~#9>#MM+($oIE6{TsNlz0QRRHQX-bDa6 z5l6(oil50TnD0roc`d06yzyXY6#S!tgEtlKmtfQN^mruPRdgbmgLJFQFB$vC*S zgG)q7*4crG?dfr|K7CKmvObsi<}nMNgQYhxU|Y$>qbwZl?Z?1~DW_&`ocx_Pas&-y z!;KsSl>V8K9e9CFxh4Qs`7;COG3KxLH&2{ikvS8OAWK41+2fR79H|J8(OIw)(qZdU z4T3e4(KH;ZDl63@fAE8cb*04=2tvTBeGwFB@6Ckj=BVZF+b3?LTG!NQ zE)y0{2s8uzAHF=SE3xH2ulYAl&ym>hsbi4vHZsq`6410_j#uQo)1F=BwTe~CQ?dy^ z0K4Z2dN2q2Ab6v24O;Ear*TlOYFTXKncAw?0che@y*n1L4I2pXUQGmUefuIZ@3-&Y zH@U2yQQs{s9pK-cXBE4+I0>=8AYLT}dL{2AAMhB=f$&f9SSA8GK=xKaK|#Ge$n!S* zv3E}|s0rWY0i(wT$R)OKrEBr9Tj+sB}Rk&~Rj zJzW5LUqE2Vg3ES)&LLu$wlbh}jJ>W9o-I&DiN#8>_FkkR>tW zEj~wx{NG^?IN!vQ-r2is(IYLt~B z5G}SpvZTXNPEKwBokqXnkv!%4FS*y33(1^V9O8^JpSxpZ0F|u2_rKE8ec!+BL zqCz$PZniQwZ35%9bQnZ|6`CSM;HHiEJy?>c%XR8RX5~?tje)L4Cs?@j3T(>POW9@~ zv4U}+BB!j(b8htQz}P&(7J&aV?RQqXUoNp1YD<=jQ8ltJP`}_JR4q-G3={#`o+T}_ zdv{d5wKmBC@j#2HPr zW2JVx6JnR3<5Df>ic1_L0c^clC4#lC5b7P|(0U_hiY#x1hf|$dIp{l9gueP^`r7D)89K&E=cQoQ#zLUx?cmWqYCBwLV+B)<4L; zpR4l@QD6a7sgIs{{eAeNzl}ndL>k9m-SqE{&uY{Xj!(Gh^rrg6K7Zuz zDp16K?QbdOc2$+wx7}#H)6*{cH;PFNqRfb?Al#_H$6U+4%kUp2Q_u_*^0Cb1JtZf$ck$uVMAK*{0*DYri?w&Kj#Q^B2&lHUObHf;* zP!^ws<&T~qsPmvpop#bNN;X5AY!Cw>edI&=Z{0bUphLQd*(Teq|DC1e*uE~XJ#Q_?z6`xBIyyQbfiJgb%8?`$IRZ2m ziG&N$b!E_@L!{E<#0>*O>N-n-(9XdW({a2RsTsE3-W5ifV$8!33bLKHA+v!?-&*M2 zI5wA@oLpNAb4W7g15isyECiF6BucjpyU)MoF@!!K(_3zXoQTM}C*#mGPE1VTH!5EG zjOK=>sX6r%Xj;>6NgYnmkceFdtS2Lt?c3yvO8aEXIKhPg0*2$?m&G)%eb-2@I`-iSM#@IpZozSU6Ssl{c4uL9v^ zWMnM9d0K`()pQAO4ZMDVB6xw{LsomopycW|OKkJVXA9M}<3kJ%#d8b`@Lj{j17;CXQ(S@qIy1GyTUx4!v`ryL- z{?vTS2PXRvg6|QfX-0?{Tt6-ihwr8uAa3&-^b{FhHPeJo_JYt60!edsPkDza;FhuN zXy7B4{udGI8$Lcv>PrQ(fS3L3Gn}08U}5QizP__!5+0MHmx06cf3W@V7S@1yOKuqX zLV^D4VQeh@cm{ae;rxt%1&+)k<j9Pz)CddsRok!VqKV^ zW@HM0GLprn(+jEGZL>^dJdyf)u*1@Q zC|T5Cm}AglgS)!iP9CSjVBy3>=9<=vNf$c??JLD=PJ);I8VZ{09mqrL(}GJ_<;2C) zjTHil2o7Q40FuhBt+%lr->PTUAFdB26P_N-34v6=sy|%xS&?Li26cKTUBfJz#Ere{ z6}C*bVkkVGZ+T0%_D#(VR>h=azfeelBqJIG9A4`ob;c$Nq;tW^jXb~`FsIRcArp|Y z8GiFBE%ZXX1nf~4gm)@&eFH=&6Kybx9&sYtW4*OUo7(yhc z0h=JUn{H*=b_DtI!pP01tS9kKwOmca+}ttFRb0& z<)*N{trs|uyx!=Gr+2UiY57H;S(-qdL>1qwR zHeZdbNM9KKJdWD{OdN9hjrxEe0PJwnwE|^& zziP+5UJahwBypxlO%cG+^rX?+3!=P(GAX$6?d4dzZTG2R{R^Fte{Q`gO=PHXSy)CC z!mAJ8I0Ps|(Nr$Ik>O$HK!ZbvKH+dGl2F|2E|5S77Jken`nlq2E*y5_Y|C)U*zMy- zB2wfCv=_t$#wfe=7)NHrjgN$VfwbxJRZb!RbJAI5g?qr!^3qr0II{S9`!CuVO75eL zK+bh;Yz(AeIQiZ8c4QWVGsoskBB4!66oG8G-9tIFIPp6MIQ7gGGdmJ1eu1n+W0QLk zi8=8Y-+k67%xf5%U7OpU_MItoA8SHEghnu`{?gjg2nRI`b&7Z=1}nV)@Jt5OG#Mv7 zp^Z>3K$@49jmoa7;n1;z0K2qE9;T6+}crQW}(y?oL7JZX~6}mMV>9o#U`1ctzaV1LxA<#nqV0;lxGC&X-MB>g(S-Z%^ zVOJ~Jt(V8geLAl&-sjxB^ZKnQ%~xe3M#JXwscgRZm!w^ba7wVw2&=pkHmSr*NT3;H z%BYk!&p*)5j(IJ2t~1K;rct50j^JG}<{;jxf(2#Mw`VV2NwT@ZTF>see|S}=*2#D}rTWfcmtJ@xBebsS^;ruw4D>Hd?rQMMj#!HXensEy$3_r0 zz4JD3Y)t9DJrrW1wKLoCS-Hq8E+r+!>MJ(Mm)KabRE13Gxqct=8-~-(VPt|fX|2LL zsyx{$@UMUt3y5_O=$Rmy2h%9gdsWD^+Fo z8}|F_M4sQq+h~jAx6HF!8&_4R>Ef&FouE+h-Y^nw&Cchm>~+{^xgiHvtT!2$4$mU@ z{%$oBWec7C^hcgSAX~}oClS9n(vsC|KWC|lI={k_clHML{+An>-Y zwT;YOmb7~OpVmEm?6@;*oA0<|`h8;U&cG>Zr`VJfru>2eL+G}W^+sk(#oO=uO?5<9Ph>)e`uTO%54)#)r)cFohxW!D}A4Z17l)3KxQ-Q zvcDv@DcR@dIT)OJmm7%+g({E^bS;GpURH*|+=D zY@@=%!$}qKVRneg&+tHhU_q;LJkaYRZC4gCVZr5|hP?w>WvOO^Uqash-#C=Y(MNfoaf^way)RxOu&1{-WFZ9u zF}~A}JtonW*mI3+m_Av$Q$)i0-MvCaii}qC76h?*Mv8^BapKHr$hcYHI52$=mDuMm-`yT9xqvyN9_%xNk_S5yaj-3#TgZ$_D{# z%-{e+TAu*f&tkDMAH>ysjZVhC)*}6prS&>IBEnLQ$c~h-8?lp-knkcS>rE{#R&v~4 z)QVE8y7?^oW8%Y9ey$IBY8<38v@%Hn0Ra@fxaxPHxsRBcnV-tZ5#1y)I~Q`&JxYH7 zcFh7U1F?Re5ZFyM$>GCcbeOAnS@2GmqPwY^i6YrKo2%#yP|4<|S^Rc_2To*0zy13} zn>Sit5)wROahf>a!%Xg{C-yezjBh}9&s%Z+jZIBF2Aw*v5l)pDu3kf@P8J^dgDO*8 zZJ0y_nzzqPVzQ$5BR0PFQk4e1OVjsfoWk4vJFN%QaH0sVw%b)s$v#0lHVpA4ZRZazd@nZ3b(Gm_Te>#JSZgc=@BjoG`a@!eYwKDLqwO>quc zmf_(=JC?tW=S~XT-HO*?bC&CPUP!)BijvbkyCmy^7K!iANu`oETKcd{xaVEu-z`3& zS-HM$f&KdO?qR;+%F+OPvNYPp*YA5g@=uqVyn7zHd82z}$9ND8t|qK45;o-9=Is`!J2wsy1yNX@hL}xuV>qL*9 zY)rRwVUwuFCPh0uC>*wluvzTyNN&1l+GjpBIy96f?6^H?Wzs8K8?~-A%*7xaintx< zs`OKGK64*&-cf^Sr~2^W!;+tG$Yn&f^+fl>3QUJntQUIcGD?>NXdgxi$ibRGqw)N_ zKI3|LPR@0e*H3K8scHQLqekXN-U~kT9B$KP>Yv3#);&#XNKfLD#{R7F+*#;HFEZ}U zE`35xf{fSr<8sBmX*onH0qc3$f)p>IuE7)BwNVa!!%Q%P-gls&>c@9CalP) zKY0>IAo5G>jn-tW{OYR;`dH58Zk0%(pY3=zDUhLhBCW#we7S^#1hq!NMtmwEecRwM zjASV>?d>dEKIiJVz}I|8QJ9Ig4VU?1KPM3pku@VVpINL_S9i8{oyBA3A~gZR_DH_v zi*x4d?lrzNBUDy`wVsduv5%A;A+a&4=WR~mk;M}-NLw+Ax*QZaiNu`5BkNvRvz%`3 z@V|UJpt1UAR`pLa`GqsKTA0c%=Ia|vlQj6qz(ub+@P9xfKDCJPoOa2T#gLGY6j*AK z_V)+gp13O}RhJ0c6?teJBM+Pe`QKuR z@mlxw7irhwtJw})hX^M!*Dm_PED*F=oRz6C8RW&I5>kW_;{QTaR+UWH{xMAlFG*xH z?EnMmHCOF-{(I~UVti^f+kUzH*SNP=4mz!j`Z9W8$d&L&IcP}aJWlahV=D79FlPt# z+>hLIl5E8rcN_lIe5+tU;Jw!F`jo@*;g0oK6^2A7yUQBR(rBedfUJs2DumNVaGMzU zO!agKv-dyW^9kstvI%8T`s8}p=h_2NHBPJM&0KO)w2Tl{-)jO9{h1iV&!Y20p7BHZ zci*3d^QKQXuNacxzPaet&3|QRDj+a=1%Hvx)2U96IPIBHzw@{kL*9!~5s3f@uMm6V zL)9H50%V^){fypQB<2w|b^~53Ha0f830E90y;wLU`s<}HrC{Q_cW2Yp%fs2R?4%3m zndu7gN3R%of9g>7sOd4(SB&{ib~PfB^c#lYY8ZCvufO816|SABq2aSvWU4hBxIUDE z@(rw|0vpFX*Kt3$aRm8{Vjwm@xtJ@JqWUOqJSVBQ9r&0Q^k9Jl;lqwmLU$gRW(>@9 z?7BUdB$d7vfBYC!`K$R=uxfTIoBxaJQE8KtJi0G)IXTZbJ^%UuLLt7qynG_8cNgdO z+#sT%D-n`QU(4Yeau5Q9-X)}sog7d$Bv?kf;#NPlP)24#T7Lj(aZq}~wuRTuW1H*) zsvI9JT(Rh9lFzI{QX8~Pqd$IgJZ*NWn$16nl;I5H&P&$egzd<@XILwjt0+&Gic!sz zGFQY|vd?+NgYu>4sn69R~*-De2PLIfG>3 zB=_~!G$(>J{?j@nuq%(}^^#{c)`omDE0DzuRAF<7ByOZL8olj=kE!(BLGkI+K*k1R zL&HXEIsaF~rM3|PQpv4B&C;SqQWA+;q{SJZlD<9TC#iQY-E1{CohsIgs!js*VL|Fkn_CD4&KS|XU zUvxcGtQs!u9QbniLh@5S)%4oh@Na7`#p<7w%JW+@D>09t%^JAh@1ad=n=HLz<(*q1 zTmA-nsGK3HYBI!|iotW;xWe-n#fcohESx$s!v(xZPs&t>zL=`XO>e3~_jM`sv8f5W z$F3!(6?z?fXW8nuv&Ve)h?&>!4BBZO5E~mpd&xH+t+Q;Y(3(8qR!%i5Qik5OR29@S zwP-iQ#CJ zaZJ3x-Oqy;@fBu@o^8(i@i$QZg99!p9# zg}Dw}^s*K{2-sd6_zlhKi2arM zSOB0S^fC*FMnXyH(q$54?9y*M6ifvEde6ta&@t~OHrH(LUt|3NjqYk2EhKxHb0fnqlht1DZ``a z?t|S89*aqFG!6s@2fw@bnURv*=N<|!)a-tGpjjNGHeX5vLcDLTpw~kBiEZ?nKgQ8X zhI}b0QEm!heX6Au2Z&JJ^>9nwZ~r^HOPnUhm#0b6bEC(Hqg(yjeg>;!U&A@I--NKK z89rV5c~4qZe1rqnq(AE>^jd)^FM{mW0}EiV_1bu}LJeBsu-?GHqt{9y(dk9VVAaCY zH~Yz(tAR8$zI-r2AO}5_tjve$fTlMrImai%8qA*7EtkrW9bZwV@ zW^NH|!{ndbY%Qj(!h2HcoJ&}z|P4l=ql1!<$iV_@q z-%Sk#+-_kFD3}dac-}Ze9wdum4vGx<+ckTOhd0e=L-0=cKP<>wqyp*yx=XF`6h6hr zqH>0rNK0QdQ2@wxxS1r{^T=hLPz|O@xQKH}!HZE-rKp$2Yn>_Q2Gm>A_vKf}X`}oN zjvlM)pSi!M$a_wVcc3#z5QvE=KJ!nKyK575gwRn&8vEh!-3&@W8`{Q1>8KPx*Mo_0 zVaEa(pzy=9SW4a5Om|=(v>TG$C)s|EW?-i$pUb^HdVs&&B{xRp86`dnc|Hp1nKopP^+K{>F4LWdpIFZ6YxsP9m_s>KO2Bg>%4sG6 z=T6e<_&W}>ByrkL;Hh|`hsJrvN7FHgbSXQ}VOphKP?ZmI&r*`gUxps}?_+UoV z)=|~SD0OaQw;3CU*s6FbRpT&4>G5MtDp8lRx;hg>!|QZmfWE^HF~mYh-)Qyr^+f>U zEnzw&_Ws_i$)WS$!NG~wm$`_&w-$CFbLm>4Hhf?tWiT3cOdKXMDoPu?`#aJFO;w(h z4qob0s@R`(mbaZ2maB6->#GGz!8&S~A=>WN=)v?2y8M+dHt15U#$oS%;j%$4(Rp;Z zzxDMLwLT=_0r6c)kBs`Ww1A)SM)L~J?1TOfSMOVG^(ZVHY_#IPyY7*D6#?=}QzQTIO z)Qxb~8d4@ECT^HdtcqD;p7@mLR-R|r!?ZM9s=%y}AvKhln0Qa6z_??sD~V15AYHhi zZN_pXP*QdiKIk4Kam(V@U1G4v)D&k3#6*aIdLikC#{rxBMntD&*Xhm_A;SyXyR#b>$pH)pYxZ748{cfciWS(K zDCFQ9`*}N6JCtmD-I>35TJ}N82kr+lKA0~b^!;u-Bvo$4>pw`j-{@?<=NuThyHn!q zKu%6zN6A`hM#sMNfIdpt)co(kwwUnW$7vmjI)@S$*YqD6`}=e~4UlnY`fpjKKmH9u znOa%b&uWomOeU~PWa2l}UnD_xUoKlC^4g?Ws!hzL0rn>1_js*T5!y$p;Tf<(wY7bK z^w?OAv?Cf`K+^8@dl=bINd}mEtC?f?yDC1%cawwMTDZ_v!UnhD?L8wRzlaCw#Pw>o zujhCGfhAVJ@tnjAsb!_#==^Amh?zo^<#SVbqoWh3P^*K5SOAyB_jb@zM~4u&WH)1= zAQetLim+H+ z5-zUrHPM z=3SC|_lh+@4!Rz3GP>k@z$B=={rt#XEtasPu|4e1*I#w9&6jj#dlYGrretP@0_4(H z%mijgRRIsb&Jb6Ofl^JkLqC1o{OJ5x{-XMsWtPAp!}+M1x>`r95Gvc zyQrzkoEz)64{XXdFLT@F-ry$}Yz_`!H}~JwAuq>X6{u!2BZm84?$I=`CCLh8tB>T1 zYw)tRxXzA0I?m%po9M~I!xI+JoS2xXs(|e#33Lx2?XZ9ge*x`8gYfC);nKFCzOaCS zIXxf725#J%oRUkwH!hVuQsLbNjpH8+_6^t!y$!6dzWUGY5D0IS4C}$3Rq6WW67Cxd zwfwwQGBbwM4Km=C+Py23_bdmnRsSSFk+^DRn6cO$!1{LiRk&Ef+>qrnz__^zSu zjJ9-S`{Xx)Vu`xDYL=F{01hV_O*-xIVi^E8ik-Y3#xdP%?(x@2PuLuBu75W|gfGLf zH=RqEHZ_<><*=6F@@KZ;VWZF4V9?y2%NYB)4PR0{YWA{zRc;3tgSk^5AY0-VRm7~~ z%4dEnkZ|FI;~JTmEdXMK_kpbfZ<58hd!lMjgKyM^KN&*>+XqMZPS%G$`4?v6z3DbR zjKH(~$P4FUM)Gh@-hOvyzTcg`F7j8&on;Q-;f%i(-C8l*?>C~+fF@Kuf6r_sn)$x` z9FA|Xng3Z2Z7i3Ah2m1U=w&fhd8^sn^6c$>o@7doKf6Wb<^)-Te}Qbtx8cSn z)?TSEnErmGxi~Z?>T25E3xP!+A7j%{11TAw{#jCpEL{%x*z&u!e`f2@AYC;3pHpI> z2gEAvvLz!WRFN@Z7`Ryyb{j9p{O@GQ`XwzAv3q5`bW~p5K=sONYok&4baBaF#}I+F zXWl%>Bo{olRgDpAFXEpGJZI-K(l^S=%KB?3kRr@fPDI>G!Il_sHLP2)2W4fs(5a=Q zwMgtE^--l}Q(SBeWWT>R$8mj9q9sOD0?^|`~>*V;sn%Yc`p2QZ5A zdxZ2-?Vuf{rE;(U!^dmfA3Hrg4Qqo@Tvv~C(|DsK9GS@m5-S$2koe^DJpoziKC+3LGw}uF^oNs2|tE~3%u@Z&Hc9FaIUstvglIo#gX#CAgk&87dCrJ z+7GVd)zn)PVbCz7wHg8P%TOzo_weut=P{E1l=kI|IG0{)cW=6iqe+}vj*iy@Nl7^s z6=n$mhs`fjTOK?Dwa&E@P2D2P8#4KR`>H9mwGMB%`BZ_@22NbYC;mx*Aq29VT*Y^! zXEBjtEy@1je4|i6l5(o5k+MMUeE@Jte#5jI@5WGUf@J7Cib+6ANjiV^o<1;5A-$Id z>O#v)truCI|BYXTA))P)v&4S$3K{r5L=fK?(nG4Oq9UiP{3D?(FHbhnU>*i@m}=)} ze`TS3XQT@B3dr75YV%UeQ17 z|GVE|%CNG2kfqYK0Q2R!nTz62Sctpw40!b9-x~a(sm#}WOaa|G{*7JK9N%81069-) zA7?*IFS-LHGH%_*i-+r>mEAoG8tM0`?7x*fxVj)vv3Pi-od2l(KPhOZftr2jC>>v~ zXz@^1)>24E(8Q@Eh+5P|$lUQ!okXV_2G(ZV@j;uH1;|Z~%f}Yy(r%UJyeR^+?1Cja*7K zXfHgWy&%CP==3Wq!OuXlA5xbkn=iKjwbB}VdwYJE>W0}AJ$8>ab>$^@8g(zVh z(B0eHtJkkqW|v*-M?D&oa<%DK&MA0;O$8=PQM(uXDz-j8z)-eZ_o?X(X#cqWUTdhx z{5tMcPL{Zu0+YeXja_cjp(0big`y>P*IjSGbT#pV0`g`>tJOH^$=ob*f6$0X5Tb(} z7`%=}DBM8}7@zTJ;%p6pX!koJ<_o%L?wcZ)Dho?jlCAhoKpyCpOC#libNy)dtl6oy zfkgai5i-ai49`{V27~l+RENE7ExQ(5Jw7;1l3uhQ^A6DfR_G$JrIz`I!bg%fqOUh!UTN`#TGv zl?MrwJ*WI{{}Em;v(L(}>m>Xut)uv|m>Nd;e*{Qx@Df1RXO^wMn{J6nLT4{X<778X zBSCT?I(=d>MFfe+E<3kJhGk6rA6|fJorHsLusE6uvK(Qa>vw#*tDL9*#BaxbDp;X} zl^Uq{XyKnPIT8 zJEZ#F@uoy*$YH;x@F5mN#OUE~np`#>`=@6x-Fg&#ATD0`_n39ioDJy*#j_7YKXP7U zO>K-K>Gce=ecB#Ivf)ItB7r)dTxz(G8emO4oJoyXR7`D z#~DFtQtM4V;_=ci#S-GlBtFyjTs!xrx#`a-)dHnv9=;*5`g!s|Xl)Y@x1d)z7fW|d zE?ZFwRLn2g*&94pxLW)n#TNMVyO%@zDd>e&V^N;bLw7dAD%w}V1?F`{g*yC<(-Lm-{Ynhi z@x$rf?5KK`%Xt$oM2k4vlCy!J*nfysw7iSes%ghvPG6=@V0`urYiZVr;b5dS#I#U| z)rwk6c)Ws>iUQDC^#v;h%vous;dR`Xw@UOiZ@xGUr{=!Juwn={pU7*!tj3)cd*l6p z2c19(^XCp_05v~E&HgAQZc5+8Di#ce_{bS!MM(*KM}VVj;_xOFE76a;?V2S;?r^(_ zjirX=88Y|D1OE_szrW#%jw;M*wzRtI3W&`2bJsRW@L13k02#Xm_ax-b8Z!n^=k~e% z)+w5Z)1~#2f0i`+ld7-!aajI%X|--;)z7vNWRj$sZT+lwkIHKk;8vyY_TL@R6l_@h zoIrt$S-GoC;t{8|i6}Q}F*uw5nwc(_aPRF4XMJX|9N&EtHBn}pLS`C z_4kw8=byT4AMEw7yH}MUsx2$5$`IVDVU+G7)wbnN^SV>*;Zfhbz3;GIh_)QGS`<&K`Ap~wlcD{6lPVj)}p zYR9v?RRvjs+O@c>N;!9WdU})#jKj|2;Gi<=-a=;5fuOq6+Fg^uf`3nOn0o2A zeE+?RVTx$vY5ge(T1$h43mzou?K>4a!pqg|mDn{2O{f;YPiOFyUWhFTTzxGG`h*)2-eNn8PiF!wAfv9V%+$qg4gIU`cidZ8 zkpMKF5rj*5x$|z4d?qF`@$AJ5P7X;bvjT0{BTrWaRH~OjV=XcH0Br+E6kP^viUYW2 z@|^c=n&wYcIa`$T8rUiZiSkgY&HQTs>9fz^TNB3Cc@0DnNUi@Vz1jh`iWG5H&edz{ z1+>lWa$qO9a2h%M-iSH(d4a-B&Ddbze}gWCz$=0Mgos%lHG~BODSG`}aXl{GULH8EDLFmQ%x8LAOFyVOCob{|RIA%r63MPZGO+Vz_g{D>iH00qOSfB59Sz(vC{cF@LkLxEHCnTjNiLE|ZblF80SQ zWR$k%)aB`t)_{Jy`O}ly@cTO=ube=JM}`1XVA6Nfg{odj79Ls#@M9hW_zzV_MUQve z0$o7wo@@x*+K4$;y)k0^AEyei1=7zlbv9aD2eC z`Af!?BDhnk$q^5c^3P_XNibM|qn`WRr((_L#g~i>X&69x5P`v1#-mm3{>>RA^|NR9 zGC-n*x}JxP>O<4{VEz>q+sC$)U;rP2St5nxZaKqTPy0aRrStXXHMJ4~Kaf^LXWm z(FEDd&bC?T)oY+9-0#QM#DTn>-O4KO!u`+r@9G}r7tWf7jEbtfMAW)wsY-h;Yk+0P zjrHT>tq&vgISKDF3=nZVU`$v-mvjro`nW;519)bUwq-zBl86y&3z1!uzjyBhP5#ch zaevm!sWnYz*k+rudl~#k!FELdFgy(wCDX;T$z^4#f%FgMRa8_mQr`#xC4jaCW7qpk zNN zJmciX_yu26QAEA!gxn;V7D-6sOjU1(H3{g;j(x)khASm1kRI--s}ytb6C;uCa37v( zHkQDZm#l#qf+dHD-5EmJ50dU`iu9vcD)ef$R{I#M0lsHa{suP6L8AkSaS}sb+f6qB zYYU zNEQP#1w)MOEFoA5_|b3H*~~0OnsE*f_s(?^1=x7C38E;yh;-sC#)1_|4(7|ovVmn) zi`ws2b<&aG3)OoN?VtCwli}M8rOa1NlB(}AmlQs-vpyCpnA*3Qp(N>Ax@o%?);R|g z5JcH8#l-`?8|VG%+71?$b4`D4;Fo{aB{AcKJNN!Er^$_AGbTOkoY=rW1U5+Zy}8H0 z*9XL63eq^CP9t%Zr%T!<>C4agx- z+TXm{;?JP;cLa|Wy2ai1#%+b3?nC&8=)Qui*yLSvM(z_5&Y6-M?))K1q%wYgsUMf;?{VmSAz{+4CD? z($f6{c*|cus?m5V;?WdF3hbT~Yx}7B&g#6)ONyUZgZv+zu!>FvN`7F2ZGwM1FP#sg z3h&QxEoS;>&h7-ph&VNRyqeAU>z%ZWUsWgFj^<1(7=4)dfM_bvoHpK3#6ytIKF;E9 z4&0u;z9tUb!x`6%Cw!_8Kc6kpd_~4mcPA}6n+$YtjhTi7Sc1>3$*q9iZsN2oc#(Dz zp^5qJlnimvSnc}qY6OU~5bZ0(NUY#Mc-z-`-dQ|YTL1aM&Ch2YZo>@y_!b+PP1Bye zDO4e~ben)pZomC?0!;5mMm*SLa_!``i`B$Z-(b@ZS=y!er@lVt!$h-Kt&ScP*CP-H zyclwXCmj0(TR7WzsF+ zGQe!Hj>BTP~9!#G74LF##@EvxVgaJBm@oJ-a1eqsynP6mc^ z>G{$ogG|(?YUzXK#eM3I>2=zD=_*MorV>lV3ke1`~BLfw5jNsNHRBj^!7i?hT{TIH<;IQJZ zH!<2dPZOmYV$XBh@CJeB_G?)_f!6?a{@DCZ(pzo4d zc;(Sn=5r3x>vVK3pdeiG{tg>|+nImEZyF?LW{Peo59mF3o)qJpNSj+xA<`$8_x*m` zOgNY8KAB|9qf%-DGr>Q~W33N4Lk))&5N7e~0OY~UZ2+`XIn`)q=O^$Yqh>{U(Ed*n zP#No5|GcF>Yq~61VS9h@yQDT-pvon|rcCJIJ`3F?#VRF$Ob_LUF& zor-lVa95LndJ7*^jfMOuIX(R$y7GE6H8Rf-($g+aZ0yQD_8;m$KwYT;o4+RfYBzyo07z_E3nJv36j5u>^2r;;pjv}4Yx?o9<4CC2_LM! z1{2_IKkKF>;#K1~o{LCXO~@HTH=@OlP80EG%=8{P48UFzzsKDAV`DT^pJQm==rE!I20f2px?d0MLUI9-Tf zuDivEFatrvPqCKPm#7-MXx_E8OdLLzkM^`$sJ*g-fN;{pmeVL17sX}DYTveoEmTBn?WD%R}hwk1;n0VaQ?F)FTBD{CKo6>V;g5By1V6h>Yq z!7$i+iW03u^HqOhtIwI-Y%AhJ8JD)!)T3oltK*n>c8LH;pr^vwx_$Fi|8DdNp?sZc zmr#gZJ~1v-dEVy3&SJ&<%3j<)&#u$!$zHbZKhB*F0ZfHY-h!fL=Fy`7b{5O$R}SJz zpM5_ok)#_gdQt6*+u`hZE4yms41>*Mte}V`xu(WO0qYOjLmrKu*EQ22EBa}c51QNs z-Iro-*-Wdhf*F&+=0#@skcR*eX3Mj;bQd1l-DG34!9b~#$;bCe4F5P`9KDzSIAQn- zad>{~i(~+r7#dA0cIBN97c_d3M79|3_e6;K$__YZ<5W=r?y_k1$#pMP3Ej-^vzd!_ ztoBuU_{s;->OfwYV7KfE!s;^8 zxAkoKV|?7ZI+Dqj?-c&%@VrxaEtVL^872QI8m{(S@~i~YQ;k#!#Y3SYa}9 z8E0y>W;JGmocL@**A-_KnU^-pLt&{Y7_B=0ea~>2D8?nO1Oe0%bMkeZ zszSQP5pXKua5O0B7sQL_EX0z#wxD_KH)mTX>imc1HESWoLd*r8m8)~+37Aj!@)!>* zMcR@n+iLFnWluM2r)4h|x7Kd>;~m9J7d>O17<3>K@aAL8(Pnya z#ua~JP_bcp)ui8yEo7fIcCX#r+vA$i9aWmqjbUs^%gkKaE^v#U#}rotl;#qvMTjT(N@g!?@>cI>H8DiD_sw51jQm z+!bD`yZBM2K`H$KU7~FJg7)B{5Lkr_n4<&PK1Irsl*SWkMp--wz(b|WRFO8f5yhxJ z(p>Md52@ynhdq7$O~wza=M8W(&~>A^L#|>xQl_-j^!`w+p~;Um&}y%FX*hS^KFcB4 z+gONTfNWtfX6E{!b(Ttsc2If2t-2!fiPnQA${0N{X{8Hsl&?b6B3oL`Bg4cKJlG=A z?dEKDGZnKx^~onB#kiF8)mb)e_`j2iKE+(p)8))IJd_{PT5PM*Ce2i~DRRq2oAma{ z)1`b7Q4@SzALdCc3n~|7me$+2^-HJh#pPUMPBEAEJpJ=EX}7KMa|D^?e*TbHT%uY0 z#31iC(JY&y(Oi6x6T!EJ-!-%E8Sgf;arm0we4LW;7(YprAbGxRW6XVZ%3VO`%J80J zK3zBS%GF8sYE!tYS*}&ZN6R(c`>8Stb3z2tv+}mLD6a>%?z{J~j7iDQ)%sNkxWarr&XVuR%bpf1WLg2B_eF>7iY7i7p2GvNz1rHpG@dw6&cg_q7 zV-4JgYNE-vgvu*Xk5A<-DB=_e&)VQJhyBB?V{hu_0!Pn{52J+VVua zAL<6?a~rg=KQRRP6yk=ZlQYca7L6vkW~VcyNKZt%b3!uAcY^Tb%SE*AXp93s%<=p` zxQPF~N7Zq4?B+0(C!hvZD5cokZF5ye-HhLPpaRxyaNz%FQWXRp+O%|uw;V;a%eIxN zo>UIvg0{=uCWXU^1qHkjhTxAyZsh3BW&a2L8u&;gncs3+JOFH^s3TQ>q|Dx7DZkG_ z$OZ&w7^;pHmmNVLYEkt7usGAIS+?+?F4nF3tA71!3~;q758xO9y`}>x%wyD>c1H3E##Pf{0kl5UE0$S8yUw=ULi{9l*9*HT+-sMs4f3A0GZB zaBbh}(-@s9fO3WWyu4Wu`35pM;GRNXL$U_{ckkzR5%A7Xm9lA<7Ek2|g<=R?yUv&L z>60f1%aw-NVfLshxrD|c_u9JxKIAKgXkgxk}g@@aP zl|8`<>I1r+Dz?Pz>XCWjXhq{W$|WE?5k$2yB#T)BCP2sG&S+*yi2&5~%>6LR z?D=O-Gk`q8+uJ+ee&cxykj#D}dtanuMA9K1xOVSU9y_a(9T@9Vz&!p0ts1;` zG3WE%>|NRm4c0_LP!jxCRrT)@>jhI#9byg_^?sCCr$#$2^E>a9j`k>2tP_=k%Cf$H z2!(pvX8Jn_at;T5FMw8NU9@E@zr*2H-&Vxv!95&emUhrQwkD{L72#gL>*CEP?g<{9 z*mNU67XGCGdl;w(bWc-rTKirCGQiM;P46Ku1a&mGcccDCu zu8VtXI^u=0j2?7EQ4H(};O8e3b#V$^hmFm7_3J6HfGmv;{P@9?EX<8A`r&N4#e#x@ zl)St=v^4JP>r-7vQGTLFMQ?B~eF8@Q{lY0Y>jjh44gwm~`eZ}UvWl!Mt%Lw@{Ntlh zs4oRPKmFO-20cHs>!`XQ!{7Q$eJDN?-_w@3_-ksc&YAAN@_+Y*i>XnUes}TfJyxu>0qlsBUscw zbgu08WV$4ot-;(cKdr`5QNdG80jXASS z=6Flv_-;D{8nFiu1cgBkg2dx_`C2VG%Kri7m55fK?r$%3!5;4jdowYHTdc zc{@J}{!giy70RF}IxJ?_mEhWw3OiU8F0B5JK<;YE&h!(ghyD=UOnsnI$U^NWYFdxk z>rM(-w*5||0B?Yx$@WTB7pe~5(T@GkiD9Qy^>^4~1Q_|D(V?&mM=XRZsc4w?XCPeoVAjQHtv|jO= z{LoSJP`egrN%3D)@stR78!fWk_oM2m_P~K+k^~&KA~Com$cxsEbvrjKC%vUwS6~_N zSWZi!Wm;%c?)g!rE9E|)gO$6aq06FFe&ZCxB9jpiD5t@6r-$0Frk_3%)>Q}4Grc<# z^I~}*H>pn>2JYFOlgI`zve&5{7dbaj;}f6(sjiis2!J1XbL?imV796gf;y3AsI!Q8 zBzl;~uT_2GcR2*q7(}T130h3PRg#x~4(j>$X~IWBT5L)=-zdN}#c$Ys4JsQ3WF2j1 zSNN&Gmb42#txi=aHQsIJYWq0)tM*3or%zWl=2J6y{isDvpDqo@Rn8RULS1nf?90z@ z-h8>cmFYTM{iEa&{fzC;Y7g`yd$4_dWR_3k2xZqWb(vz;$Tq%dUpT+Q?*bFD<<%?9 z4cMZ1(Ssse-A|_hFg}N8v}_2VVN-ttw0f=>`{_#?f}^{>_ArnUP@FRzciXcw!&|RYaQ zo_@jFI#sIGd}d$6{5#R~RX-tgIIB$=!XiiE_TeyZraKcP(=!rNbuD~>Hk7smZgd)? z>kGQnF2CY_TTcPjh4c)V0KXY_TDtQ1a0QwdYC+mW`)8`b!v2w93>tA_+YHxAO%*6ri5k^@m*9dX^%J{}W)1 z#*$CQP_0q|%Wexiql(HeZZu3%j9N6Ysx=CzZ|rUgEJ@PO6x9HOs1u$z-5O=2E2qX7 zxKRrB+~AN9`ZpdF!VcZ*dnHfkXMVkK-H!leQC9I+2Ys?jr3OSd?He7^|EOS>x_-Z+ ze)3=h-t?l@C<5`^2(anL;SaOXS>z!nqQ7k!aE7>`%=vH z*+%JAn}WR)EWM>cwn0(la^_6v4e>5rzXKb}rM@;+2SL9?gKax1?d*?WO!=@iQ{@f& zn_~{ROo)hy4I6_P2U0#Q=sNgTZl%94fRrL!z&b@v&MN;XdUR*hRRdyCw7TOGkML?0 z*52`8aixFah$#Npie(EUfic?%hg|>OOK~RJsvtbD8*+n!EjZ z_wygM=#1K9h;lA0)N&}PeSFXIslD3l=;mc6tv5fWqend|eOAI*ZMTyScgr+ML}rAN z+BVkA_7?4h9yvP;Q;9geU0gHCvxbo8lzCcl@uhp8&jVj?QSxGT^wF9~Evd`lUVfS-Yc5-EZmwi& zmAj|sYtAEH^bHMfJ$gg{_T%$JL{qXFuG822LMB}2Pl-Jq)x64GCs}N}LiJ;C z(ERAYK0ZFai1nW9%mZJIGCK^Pd#{gQSzthi6xK7W3)YCKdsU)kjFmAtCz`{rgT zZeHFWEQ6=G6qbL2v_K;)ObMak@wJPJii%)A_YeJH_$xR8#1el$2XHZ(@*^qnP&I-jn6(1e)dvXAs4PIu`aEnAuofUS6iY{m1a_4*ffhj)JXGf_hz4WMuDP*9O#D zrO$nZg{ArM=Be|xJrv)vv$Hwtr!2&2F9d{WZ!7TMw9Aq;MPL&Hnubo@rlTl9f(g@q(sTwJ{D_cph-UQY1U>*Ubw zn^Sng+?cfzDAmr?sC*;Kl<(MaMe$5a_V?9Qj_svEPpD;^7|7KFelZ$n*s6SSG%&HYA;={a~&@DQHnld7?Y(G=Ns7gdF@0m zbJxK}^twT(y1M$+Fz4rv)42nIG1(~Wv^6hRyCDP>6CwE~HZ~y= z2{IvJVRO4?|xCUU#amU%zg=J-|bXYYh9oG1TaeTyl#s&m&lzoTP=RsY4&w z@MBu4YCA0s30z&j3x*Ka>~&=|14dNKv11tFIM zQoXk5yM~5LK@aGx1SlOG9I~JY8u3;c)X!Irxb!M4?2?Y2UX3P4Y(W8WTwI*Ksp&~= zTr4as2%AflWC3mG!?nYt-oU_o&d9)r^2zMt=P>EO>9){g{_A=izxvs>4L;{RZYcAZ z_Gv$S)qc?0(xL@X`q#lOS4wK?hoGPn!4EtNUyOcAO~tV&KG48xm^s~I{`4s!85tQ( zV8df|b^O2v_rby!+WPvZAwweC4i4c^ zEBxwvZEa3{y?B9M8`bu73wh~AKww}P%=jar>({I8qK_w%a&kH&YJSa3&&(*PsZmOb zi)&u_@D@lXA8T9N!N|avn6_KDZY}OfOG#yQcFGhyZy|#le+TAn>3N6ODurOlBY1?R zmdn3{6qYAO*M!=j&B8<}8Wwbw8kH`(kioFf)4%b$b0s7q!l*B_C}aZord&v>a%X4t ziO-+^YGG*^cJK3={H)7We*Rs*Hrb|mrMq{ran78{R(Ysm+H$_xeY)>SB@~|sP~NX< zUc7MY^kw&R>!o9kSl;K*zn{QkvN;1E-4d_*^eptxM=bxZe6&Zs+)d)Z|EskdQX@p- MuJoOZTbi%_7b3Xb=>Px# literal 0 HcmV?d00001 From 4d1975edfe4d755bcee3a26bf506aaa0421e2e1b Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 16:03:38 -0800 Subject: [PATCH 03/39] change gan tutorial image size --- doc/tutorials/gan/index_en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index e3841c4c9f..00879c6ae3 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -4,7 +4,7 @@ This demo implements GAN training described in the original GAN paper (https://a The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images. -
![](./gan.png)
+
![](./gan.png =300x)
Figure 1. GAN-Model-Structure
The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. @@ -106,7 +106,7 @@ $python gan_trainer.py -d uniform --useGpu 1 ``` The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. -
![](./uniform_sample.png)
+
![](./uniform_sample.png =300x)
Figure 2. Uniform Sample
## MNIST Example From d3f95d64e67fa38bc39c0c5dd555ca04eb29335d Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 16:06:07 -0800 Subject: [PATCH 04/39] resize image gan tutorial with another method --- doc/tutorials/gan/index_en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index 00879c6ae3..1aa3d3ed8e 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -4,7 +4,7 @@ This demo implements GAN training described in the original GAN paper (https://a The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images. -
![](./gan.png =300x)
+
![](./gan.png | width=300)
Figure 1. GAN-Model-Structure
The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. @@ -106,7 +106,7 @@ $python gan_trainer.py -d uniform --useGpu 1 ``` The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. -
![](./uniform_sample.png =300x)
+
![](./uniform_sample.png | width=300)
Figure 2. Uniform Sample
## MNIST Example From 79856704dbed82f49f279f7721ff10b382dbe6e4 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 16:10:28 -0800 Subject: [PATCH 05/39] give up resizing image... --- doc/tutorials/gan/index_en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index 1aa3d3ed8e..e3841c4c9f 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -4,7 +4,7 @@ This demo implements GAN training described in the original GAN paper (https://a The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images. -
![](./gan.png | width=300)
+
![](./gan.png)
Figure 1. GAN-Model-Structure
The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. @@ -106,7 +106,7 @@ $python gan_trainer.py -d uniform --useGpu 1 ``` The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. -
![](./uniform_sample.png | width=300)
+
![](./uniform_sample.png)
Figure 2. Uniform Sample
## MNIST Example From d9596b2680f7adc18f98246a200902fb2f7e95a2 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 16:19:28 -0800 Subject: [PATCH 06/39] minor change on gan tutorial --- doc/tutorials/gan/index_en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index e3841c4c9f..dfedc11161 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -12,7 +12,7 @@ The generator and discriminator take turn to be trained using SGD. The objective ## Implementation of GAN Model Structure Since GAN model involves multiple neural networks, it requires to use paddle python API. So the code walk-through below can also partially serve as an introduction to the usage of Paddle Python API. -There are three networks defined in gan_conf.py, namely generator_training, discriminator_training and generator. The relationship to the model structure we defined above is that discriminator_training is the discriminator, generator is the generator, and the generator_training combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code +There are three networks defined in gan_conf.py, namely **generator_training**, **discriminator_training** and **generator**. The relationship to the model structure we defined above is that **discriminator_training** is the discriminator, **generator** is the generator, and the **generator_training** combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code ```python if is_generator_training: noise = data_layer(name="noise", size=noise_dim) From 0186edec501a131d3a95c889c38cb0bf480177f1 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 15 Dec 2016 16:47:43 -0800 Subject: [PATCH 07/39] added a figure reference --- doc/tutorials/gan/index_en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index dfedc11161..5634f18660 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -5,7 +5,7 @@ This demo implements GAN training described in the original GAN paper (https://a The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images.
![](./gan.png)
-
Figure 1. GAN-Model-Structure
+
Figure 1. GAN-Model-Structure Source: ishmaelbelghazi.github.io/ALI/
The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. From ea5594d1a1bd8405d3fd51323169a4069328596c Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 16 Dec 2016 10:59:34 -0800 Subject: [PATCH 08/39] modification of gan tutorial following luotao01 comments --- doc/tutorials/gan/index_en.md | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index 5634f18660..d1f2f15647 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -1,18 +1,18 @@ # Generative Adversarial Networks (GAN) -This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434). +This demo implements GAN training described in the original [GAN paper](https://arxiv.org/abs/1406.2661) and deep convolutional generative adversarial networks [DCGAN paper](https://arxiv.org/abs/1511.06434). The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images.
![](./gan.png)
-
Figure 1. GAN-Model-Structure Source: ishmaelbelghazi.github.io/ALI/
+
Figure 1. GAN-Model-Structure [Source](https://ishmaelbelghazi.github.io/ALI/)
The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. ## Implementation of GAN Model Structure Since GAN model involves multiple neural networks, it requires to use paddle python API. So the code walk-through below can also partially serve as an introduction to the usage of Paddle Python API. -There are three networks defined in gan_conf.py, namely **generator_training**, **discriminator_training** and **generator**. The relationship to the model structure we defined above is that **discriminator_training** is the discriminator, **generator** is the generator, and the **generator_training** combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code +There are three networks defined in gan_conf.py, namely **generator_training**, **discriminator_training** and **generator**. The relationship to the model structure we defined above is that **discriminator_training** is the discriminator, **generator** is the generator, and the **generator_training** combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code: ```python if is_generator_training: noise = data_layer(name="noise", size=noise_dim) @@ -34,7 +34,7 @@ if is_generator: outputs(generator(noise)) ``` -In order to train the networks defined in gan_conf.py, one first needs to initialize a Paddle environment, parse the config, create GradientMachine from the config and create trainer from GradientMachine as done in the code chunk below. +In order to train the networks defined in gan_conf.py, one first needs to initialize a Paddle environment, parse the config, create GradientMachine from the config and create trainer from GradientMachine as done in the code chunk below: ```python import py_paddle.swig_paddle as api # init paddle environment @@ -60,7 +60,7 @@ dis_trainer = api.Trainer.create(dis_conf, dis_training_machine) gen_trainer = api.Trainer.create(gen_conf, gen_training_machine) ``` -In order to balance the strength between generator and discriminator, we schedule to train whichever one is performing worse by comparing their loss function value. The loss function value can be calculated by a forward pass through the GradientMachine +In order to balance the strength between generator and discriminator, we schedule to train whichever one is performing worse by comparing their loss function value. The loss function value can be calculated by a forward pass through the GradientMachine. ```python def get_training_loss(training_machine, inputs): outputs = api.Arguments.createArguments(0) @@ -69,7 +69,7 @@ def get_training_loss(training_machine, inputs): return numpy.mean(loss) ``` -After training one network, one needs to sync the new parameters to the other networks. The code below demonstrates one example of such use case. +After training one network, one needs to sync the new parameters to the other networks. The code below demonstrates one example of such use case: ```python # Train the gen_training gen_trainer.trainOneDataBatch(batch_size, data_batch_gen) @@ -84,13 +84,13 @@ copy_shared_parameters(gen_training_machine, generator_machine) ## A Toy Example With the infrastructure explained above, we can now walk you through a toy example of generating two dimensional uniform distribution using 10 dimensional Gaussian noise. -The Gaussian noises are generated using the code below +The Gaussian noises are generated using the code below: ```python def get_noise(batch_size, noise_dim): return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32') ``` -The real samples (2-D uniform) are generated using the code below +The real samples (2-D uniform) are generated using the code below: ```python # synthesize 2-D uniform data in gan_trainer.py:114 def load_uniform_data(): @@ -106,12 +106,16 @@ $python gan_trainer.py -d uniform --useGpu 1 ``` The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. -
![](./uniform_sample.png)
-
Figure 2. Uniform Sample
+

+ +

+

+ Figure 2. Uniform Sample +

## MNIST Example ### Data preparation -To download the MNIST data, one can use the following commands. +To download the MNIST data, one can use the following commands: ```bash $cd data/ $./get_mnist_data.sh @@ -121,10 +125,10 @@ $./get_mnist_data.sh Following the DC-Gan paper (https://arxiv.org/abs/1511.06434), we use convolution/convolution-transpose layer in the discriminator/generator network to better deal with images. The details of the network structures are defined in gan_conf_image.py. ### Training the model -To train the GAN model on mnist data, one can use the following command +To train the GAN model on mnist data, one can use the following command: ```bash $python gan_trainer.py -d mnist --useGpu 1 ``` The generated sample images can be found at ./mnist_samples/ and one example is shown below as Figure 3.
![](./mnist_sample.png)
-
Figure 2. MNIST Sample
+
Figure 3. MNIST Sample
From 66b6f2a56ed45e964b5b3d39a39dcef050377c16 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 16 Dec 2016 11:08:01 -0800 Subject: [PATCH 09/39] change figure size in gan tutorial --- doc/tutorials/gan/index_en.md | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index d1f2f15647..47d1d6f8d5 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -4,8 +4,12 @@ This demo implements GAN training described in the original [GAN paper](https:// The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images. -
![](./gan.png)
-
Figure 1. GAN-Model-Structure [Source](https://ishmaelbelghazi.github.io/ALI/)
+

+ +

+

+ Figure 1. GAN-Model-Structure [figure credit](https://ishmaelbelghazi.github.io/ALI/) +

The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. @@ -107,7 +111,7 @@ $python gan_trainer.py -d uniform --useGpu 1 The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution.

- +

Figure 2. Uniform Sample @@ -130,5 +134,9 @@ To train the GAN model on mnist data, one can use the following command: $python gan_trainer.py -d mnist --useGpu 1 ``` The generated sample images can be found at ./mnist_samples/ and one example is shown below as Figure 3. -

![](./mnist_sample.png)
-
Figure 3. MNIST Sample
+

+ +

+

+ Figure 3. MNIST Sample +

From 78582ba2e811ffd348b31e97bc6c4751b3f79166 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 16 Dec 2016 11:13:31 -0800 Subject: [PATCH 10/39] figure caption url --- doc/tutorials/gan/index_en.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index 47d1d6f8d5..99c8d73011 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -8,7 +8,8 @@ The high-level structure of GAN is shown in Figure. 1 below. It is composed of t

- Figure 1. GAN-Model-Structure [figure credit](https://ishmaelbelghazi.github.io/ALI/) + Figure 1. GAN-Model-Structure + figure credit

The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. From c0e687b939904b60232f3f01f7acbba3164dfbd2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 20 Dec 2016 17:32:57 +0800 Subject: [PATCH 11/39] Refine Code --- demo/mnist/api_train.py | 12 ++++++++++++ demo/mnist/simple_mnist_network.py | 16 ++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 demo/mnist/api_train.py create mode 100644 demo/mnist/simple_mnist_network.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py new file mode 100644 index 0000000000..6abb5d4e56 --- /dev/null +++ b/demo/mnist/api_train.py @@ -0,0 +1,12 @@ +import py_paddle.swig_paddle as api +from paddle.trainer.config_parser import parse_config + + +def main(): + api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores + config = parse_config('simple_mnist_network.py', '') + m = api.GradientMachine.createFromConfigProto(config.model_config) + + +if __name__ == '__main__': + main() diff --git a/demo/mnist/simple_mnist_network.py b/demo/mnist/simple_mnist_network.py new file mode 100644 index 0000000000..41f4e51657 --- /dev/null +++ b/demo/mnist/simple_mnist_network.py @@ -0,0 +1,16 @@ +from paddle.trainer_config_helpers import * + +settings(learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) + +imgs = data_layer(name='pixel', size=784) + +hidden1 = fc_layer(input=imgs, size=200) +hidden2 = fc_layer(input=hidden1, size=200) + +inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) + +cost = classification_cost( + input=inference, label=data_layer( + name='label', size=10)) + +outputs(cost) From 8b4cbcfc1847c50228c151a485755202912e7df2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 20 Dec 2016 22:01:28 +0800 Subject: [PATCH 12/39] Start doing mnist_train_api --- demo/mnist/api_train.py | 31 ++++++++++++++++++++++++++--- paddle/api/CMakeLists.txt | 1 + paddle/api/Paddle.swig | 3 ++- paddle/api/PaddleAPI.h | 20 +++++++++++++++++++ paddle/api/PaddleAPIPrivate.h | 27 +++++++++++++++++++++++-- paddle/api/Parameter.cpp | 16 +-------------- paddle/api/ParameterUpdater.cpp | 35 +++++++++++++++++++++++++++++++++ 7 files changed, 112 insertions(+), 21 deletions(-) create mode 100644 paddle/api/ParameterUpdater.cpp diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 6abb5d4e56..5d4ef90f10 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -1,11 +1,36 @@ import py_paddle.swig_paddle as api -from paddle.trainer.config_parser import parse_config +import paddle.trainer.config_parser +import numpy as np + + +def init_parameter(network): + assert isinstance(network, api.GradientMachine) + for each_param in network.getParameters(): + assert isinstance(each_param, api.Parameter) + array = each_param.getBuf(api.PARAMETER_VALUE).toNumpyArrayInplace() + assert isinstance(array, np.ndarray) + for i in xrange(len(array)): + array[i] = np.random.uniform(-1.0, 1.0) def main(): api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores - config = parse_config('simple_mnist_network.py', '') - m = api.GradientMachine.createFromConfigProto(config.model_config) + config = paddle.trainer.config_parser.parse_config( + 'simple_mnist_network.py', '') + + opt_config = api.OptimizationConfig.createFromProto(config.opt_config) + _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) + enable_types = _temp_optimizer_.getParameterTypes() + + m = api.GradientMachine.createFromConfigProto( + config.model_config, api.CREATE_MODE_NORMAL, enable_types) + assert isinstance(m, api.GradientMachine) + init_parameter(network=m) + + updater = api.ParameterUpdater.createLocalUpdater(opt_config) + assert isinstance(updater, api.ParameterUpdater) + updater.init(m) + updater.startPass() if __name__ == '__main__': diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 6ad1d79e59..39fe435565 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -5,6 +5,7 @@ set(API_SOURCES Matrix.cpp Parameter.cpp ParameterOptimizer.cpp + ParameterUpdater.cpp SequenceGenerator.cpp Trainer.cpp Util.cpp diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index 9194a6371b..b0fa8beb16 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -174,6 +174,7 @@ namespace std { %newobject Parameter::getConfig; %newobject ParameterOptimizer::create; %newobject ParameterOptimizer::needSpecialTraversal; +%newobject ParameterUpdater::createLocalUpdater; %feature("director") UpdateCallback; %feature("autodoc", 1); // To generate method stub, for code hint in ide @@ -193,4 +194,4 @@ namespace std { %ignore OptimizationConfigPrivate; %ignore ParameterTraverseCallbackPrivate; %include "utils/GlobalConstants.h" -%include "api/PaddleAPI.h" \ No newline at end of file +%include "api/PaddleAPI.h" diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 84a66719c3..bd413eb1e9 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -519,6 +519,7 @@ private: friend class TrainerConfig; friend class ParameterOptimizer; + friend class ParameterUpdater; friend class Trainer; }; @@ -557,6 +558,7 @@ private: ParameterPrivate* m; friend class UpdateCallbackWrapper; friend class GradientMachine; + friend class ParameterUpdater; }; struct ModelConfigPrivate; @@ -772,6 +774,24 @@ private: // Not to use c++ 11 init-list, so we use static var as function default arg. static std::vector defaultParamTypes; friend class Trainer; + friend class ParameterUpdater; +}; + +struct ParameterUpdaterPrivate; +class ParameterUpdater { +private: + ParameterUpdater(); + +public: + static ParameterUpdater* createLocalUpdater(OptimizationConfig* config); + ~ParameterUpdater(); + + void init(const GradientMachine& gm); + + void startPass(); + +private: + ParameterUpdaterPrivate* m; }; struct TrainerPrivate; diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/api/PaddleAPIPrivate.h index d2b56fc41c..905668a62f 100644 --- a/paddle/api/PaddleAPIPrivate.h +++ b/paddle/api/PaddleAPIPrivate.h @@ -11,11 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - +#pragma once +#include +#include "PaddleAPI.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/trainer/TrainerConfigHelper.h" -#pragma once +#include "paddle/parameter/ParameterUpdaterBase.h" struct GradientMachinePrivate { std::shared_ptr machine; @@ -65,3 +67,24 @@ struct ArgumentsPrivate { return *(std::shared_ptr*)(rawPtr); } }; + +struct ParameterUpdaterPrivate { + std::unique_ptr updater; +}; + +struct ParameterPrivate { + std::shared_ptr sharedPtr; + paddle::Parameter* rawPtr; // rawPtr only used in ParameterUpdater, + // in other situation sharedPtr should + // contains value. + + ParameterPrivate() : sharedPtr(nullptr), rawPtr(nullptr) {} + + paddle::Parameter* getPtr() { + if (sharedPtr) { + return sharedPtr.get(); + } else { + return rawPtr; + } + } +}; diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index 4eed00a84a..41cf50043c 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -14,21 +14,7 @@ limitations under the License. */ #include "paddle/parameter/Parameter.h" #include "PaddleAPI.h" - -struct ParameterPrivate { - std::shared_ptr sharedPtr; - paddle::Parameter* rawPtr; - - ParameterPrivate() : sharedPtr(nullptr), rawPtr(nullptr) {} - - paddle::Parameter* getPtr() { - if (sharedPtr) { - return sharedPtr.get(); - } else { - return rawPtr; - } - } -}; +#include "PaddleAPIPrivate.h" Parameter::Parameter() : m(new ParameterPrivate()) {} diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp new file mode 100644 index 0000000000..af5b746a7c --- /dev/null +++ b/paddle/api/ParameterUpdater.cpp @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "PaddleAPI.h" + +#include "PaddleAPIPrivate.h" +#include "paddle/trainer/ThreadParameterUpdater.h" + +ParameterUpdater::ParameterUpdater() : m(new ParameterUpdaterPrivate()) {} + +ParameterUpdater *ParameterUpdater::createLocalUpdater( + OptimizationConfig *config) { + auto param = new ParameterUpdater(); + param->m->updater.reset(new paddle::SgdThreadUpdater(config->m->getConfig())); + return param; +} + +ParameterUpdater::~ParameterUpdater() { delete m; } + +void ParameterUpdater::init(const GradientMachine &gm) { + m->updater->init(gm.m->machine->getParameters()); +} + +void ParameterUpdater::startPass() { m->updater->startPass(); } From 025e3e94d2b216cc278de103cbef27b851274bf5 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 20 Dec 2016 23:00:34 +0800 Subject: [PATCH 13/39] Add GradientMachine::start/finish to API --- demo/mnist/api_train.py | 7 ++++++- paddle/api/GradientMachine.cpp | 4 ++++ paddle/api/PaddleAPI.h | 9 +++++++++ paddle/api/ParameterUpdater.cpp | 2 ++ 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 5d4ef90f10..b061cfb2b8 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -30,7 +30,12 @@ def main(): updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) updater.init(m) - updater.startPass() + m.start() + + for _ in xrange(100): + updater.startPass() + + m.finish() if __name__ == '__main__': diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 297eaa19bb..2cece21097 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -64,6 +64,10 @@ GradientMachine* GradientMachine::createByModelConfig( return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types); } +void GradientMachine::start() { m->machine->start(); } + +void GradientMachine::finish() { m->machine->finish(); } + void GradientMachine::forward(const Arguments& inArgs, Arguments* outArgs, PassType passType) { diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index bd413eb1e9..c074325091 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -716,6 +716,13 @@ public: GradientMatchineCreateMode mode = CREATE_MODE_NORMAL, const std::vector& parameterTypes = defaultParamTypes); + /** + * @brief finish + */ + void finish(); + + void start(); + /** * The forward stage of GradientMachine. * @@ -790,6 +797,8 @@ public: void startPass(); + void finishPass(); + private: ParameterUpdaterPrivate* m; }; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index af5b746a7c..3b626c0507 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -33,3 +33,5 @@ void ParameterUpdater::init(const GradientMachine &gm) { } void ParameterUpdater::startPass() { m->updater->startPass(); } + +void ParameterUpdater::finishPass() {} From 27d87db6a0f937a7fa22b03e3d18844f894698e1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 12:54:11 +0800 Subject: [PATCH 14/39] Wait for reading data. --- demo/mnist/api_train.py | 2 ++ paddle/api/ParameterUpdater.cpp | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index b061cfb2b8..59043ce6c4 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -35,6 +35,8 @@ def main(): for _ in xrange(100): updater.startPass() + updater.finishPass() + m.finish() diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 3b626c0507..4edec78b4a 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -34,4 +34,4 @@ void ParameterUpdater::init(const GradientMachine &gm) { void ParameterUpdater::startPass() { m->updater->startPass(); } -void ParameterUpdater::finishPass() {} +void ParameterUpdater::finishPass() { m->updater->finishPass(); } From 9f5e742b6d4018cf5022a6718d5913f2459cf95e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 12:57:19 +0800 Subject: [PATCH 15/39] A tiny fix in PyDataProvider2 * hidden decorator kwargs in DataProvider.__init__ * also add unit test for this. --- paddle/gserver/tests/test_PyDataProvider2.py | 2 +- python/paddle/trainer/PyDataProvider2.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index f7b540013e..2e6225519f 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -17,7 +17,7 @@ import random from paddle.trainer.PyDataProvider2 import * -@provider(input_types=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) +@provider(slots=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) def test_dense_no_seq(setting, filename): for i in xrange(200): yield [(float(j - 100) * float(i + 1)) / 200.0 for j in xrange(200)] diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index de266bb5d3..5ca4bcbca6 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -232,7 +232,7 @@ def provider(input_types=None, check=False, check_fail_continue=False, init_hook=None, - **kwargs): + **outter_kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. In this function, user only need to get each sample for some train/test @@ -318,10 +318,10 @@ def provider(input_types=None, self.logger = logging.getLogger("") self.logger.setLevel(logging.INFO) self.input_types = None - if 'slots' in kwargs: + if 'slots' in outter_kwargs: self.logger.warning('setting slots value is deprecated, ' 'please use input_types instead.') - self.slots = kwargs['slots'] + self.slots = outter_kwargs['slots'] self.slots = input_types self.should_shuffle = should_shuffle From 5f6c4af3a544b828fe7c71c98164f9e8b6994f5b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 13:27:32 +0800 Subject: [PATCH 16/39] Try to read data in mnist --- demo/mnist/api_train.py | 29 +++++++++++++++++++++++++++++ demo/mnist/mnist_provider.py | 28 +++------------------------- demo/mnist/mnist_util.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 25 deletions(-) create mode 100644 demo/mnist/mnist_util.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 59043ce6c4..e508af7a0c 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -1,6 +1,9 @@ import py_paddle.swig_paddle as api +from py_paddle import DataProviderConverter +import paddle.trainer.PyDataProvider2 as dp import paddle.trainer.config_parser import numpy as np +from mnist_util import read_from_mnist def init_parameter(network): @@ -13,6 +16,22 @@ def init_parameter(network): array[i] = np.random.uniform(-1.0, 1.0) +def generator_to_batch(generator, batch_size): + ret_val = list() + for each_item in generator: + ret_val.append(each_item) + if len(ret_val) == batch_size: + yield ret_val + ret_val = list() + if len(ret_val) != 0: + yield ret_val + + +def input_order_converter(generator): + for each_item in generator: + yield each_item['pixel'], each_item['label'] + + def main(): api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( @@ -30,10 +49,20 @@ def main(): updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) updater.init(m) + + converter = DataProviderConverter( + input_types=[dp.dense_vector(784), dp.integer_value(10)]) + + train_file = './data/raw_data/train' + m.start() for _ in xrange(100): updater.startPass() + train_data_generator = input_order_converter( + read_from_mnist(train_file)) + for data_batch in generator_to_batch(train_data_generator, 128): + inArgs = converter(data_batch) updater.finishPass() diff --git a/demo/mnist/mnist_provider.py b/demo/mnist/mnist_provider.py index 4635833d36..888cfef1e7 100644 --- a/demo/mnist/mnist_provider.py +++ b/demo/mnist/mnist_provider.py @@ -1,5 +1,5 @@ from paddle.trainer.PyDataProvider2 import * -import numpy +from mnist_util import read_from_mnist # Define a py data provider @@ -8,27 +8,5 @@ import numpy 'label': integer_value(10)}, cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, filename): # settings is not used currently. - imgf = filename + "-images-idx3-ubyte" - labelf = filename + "-labels-idx1-ubyte" - f = open(imgf, "rb") - l = open(labelf, "rb") - - f.read(16) - l.read(8) - - # Define number of samples for train/test - if "train" in filename: - n = 60000 - else: - n = 10000 - - images = numpy.fromfile( - f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') - images = images / 255.0 * 2.0 - 1.0 - labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") - - for i in xrange(n): - yield {"pixel": images[i, :], 'label': labels[i]} - - f.close() - l.close() + for each in read_from_mnist(filename): + yield each diff --git a/demo/mnist/mnist_util.py b/demo/mnist/mnist_util.py new file mode 100644 index 0000000000..3fd88ae7ed --- /dev/null +++ b/demo/mnist/mnist_util.py @@ -0,0 +1,30 @@ +import numpy + +__all__ = ['read_from_mnist'] + + +def read_from_mnist(filename): + imgf = filename + "-images-idx3-ubyte" + labelf = filename + "-labels-idx1-ubyte" + f = open(imgf, "rb") + l = open(labelf, "rb") + + f.read(16) + l.read(8) + + # Define number of samples for train/test + if "train" in filename: + n = 60000 + else: + n = 10000 + + images = numpy.fromfile( + f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') + images = images / 255.0 * 2.0 - 1.0 + labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") + + for i in xrange(n): + yield {"pixel": images[i, :], 'label': labels[i]} + + f.close() + l.close() From 36d1e6178c4e6d563cf1be644d1a828b577b7f28 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 13:38:31 +0800 Subject: [PATCH 17/39] Use numpy in DenseScanner. --- paddle/py_paddle/dataprovider_converter.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index edcefba6a8..981d10afda 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -15,6 +15,7 @@ import paddle.trainer.PyDataProvider2 as dp2 import collections import swig_paddle +import numpy __all__ = ['DataProviderConverter'] @@ -35,18 +36,18 @@ class IScanner(object): class DenseScanner(IScanner): def __init__(self, input_type, pos): IScanner.__init__(self, input_type, pos) - self.__mat__ = [] - self.__height__ = 0 + self.__mat__ = None def scan(self, dat): - self.__mat__.extend(dat) - self.__height__ += 1 + if self.__mat__ is None: + self.__mat__ = numpy.array([dat], dtype='float32') + else: + self.__mat__ = numpy.append(self.__mat__, [dat], axis=0) def finish_scan(self, argument): assert isinstance(argument, swig_paddle.Arguments) assert isinstance(self.input_type, dp2.InputType) - m = swig_paddle.Matrix.createDense(self.__mat__, self.__height__, - self.input_type.dim, False) + m = swig_paddle.Matrix.createDenseFromNumpy(self.__mat__, True, False) argument.setSlotValue(self.pos, m) From 20249e8e65aca17abaa9bbee9ab660e3573e21cf Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 13:55:44 +0800 Subject: [PATCH 18/39] Try expose ParamUpdater::update --- demo/mnist/api_train.py | 3 +-- paddle/api/PaddleAPI.h | 6 ++++++ paddle/api/ParameterUpdater.cpp | 13 +++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index e508af7a0c..ef8b20a48d 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -45,7 +45,6 @@ def main(): config.model_config, api.CREATE_MODE_NORMAL, enable_types) assert isinstance(m, api.GradientMachine) init_parameter(network=m) - updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) updater.init(m) @@ -62,7 +61,7 @@ def main(): train_data_generator = input_order_converter( read_from_mnist(train_file)) for data_batch in generator_to_batch(train_data_generator, 128): - inArgs = converter(data_batch) + trainRole = updater.startBatch(len(data_batch)) updater.finishPass() diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index c074325091..165997ba34 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -799,6 +799,12 @@ public: void finishPass(); + PassType startBatch(int64_t batchSize); + + void finishBatch(float cost); + + void update(Parameter* param); + private: ParameterUpdaterPrivate* m; }; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 4edec78b4a..e5d07b8178 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -35,3 +35,16 @@ void ParameterUpdater::init(const GradientMachine &gm) { void ParameterUpdater::startPass() { m->updater->startPass(); } void ParameterUpdater::finishPass() { m->updater->finishPass(); } + +PassType ParameterUpdater::startBatch(int64_t batchSize) { + return m->updater->startBatch(batchSize); +} + +void ParameterUpdater::finishBatch(float cost) { + m->updater->finishBatch(cost); +} + +void ParameterUpdater::update(Parameter *param) { + auto paddleParam = param->m->getPtr(); + m->updater->update(paddleParam); +} From 05ab22c332e615f3c81f4d4b2c9b47f71229c71c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 14:22:31 +0800 Subject: [PATCH 19/39] A simplest train file for mnist added. --- demo/mnist/api_train.py | 16 +++++++++++++++- paddle/api/PaddleAPI.h | 2 +- paddle/api/ParameterUpdater.cpp | 4 ++-- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index ef8b20a48d..425c5f897a 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -58,11 +58,25 @@ def main(): for _ in xrange(100): updater.startPass() + outArgs = api.Arguments.createArguments(0) train_data_generator = input_order_converter( read_from_mnist(train_file)) - for data_batch in generator_to_batch(train_data_generator, 128): + for batch_id, data_batch in enumerate( + generator_to_batch(train_data_generator, 256)): trainRole = updater.startBatch(len(data_batch)) + def update_callback(param): + updater.update(param) + + m.forwardBackward( + converter(data_batch), outArgs, trainRole, update_callback) + + cost_vec = outArgs.getSlotValue(0) + cost_vec = cost_vec.copyToNumpyMat() + cost = cost_vec.sum() / len(data_batch) + print 'Batch id', batch_id, 'with cost=', cost + updater.finishBatch(cost) + updater.finishPass() m.finish() diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 165997ba34..cc49e6a09d 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -799,7 +799,7 @@ public: void finishPass(); - PassType startBatch(int64_t batchSize); + PassType startBatch(size_t batchSize); void finishBatch(float cost); diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index e5d07b8178..fba4762024 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -36,8 +36,8 @@ void ParameterUpdater::startPass() { m->updater->startPass(); } void ParameterUpdater::finishPass() { m->updater->finishPass(); } -PassType ParameterUpdater::startBatch(int64_t batchSize) { - return m->updater->startBatch(batchSize); +PassType ParameterUpdater::startBatch(size_t batchSize) { + return m->updater->startBatch((int64_t)batchSize); } void ParameterUpdater::finishBatch(float cost) { From 1f4f04427d5f34e48a0a30b9137a882a6f1b571c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 12:57:19 +0800 Subject: [PATCH 20/39] A tiny fix in PyDataProvider2 * hidden decorator kwargs in DataProvider.__init__ * also add unit test for this. --- paddle/gserver/tests/test_PyDataProvider2.py | 2 +- python/paddle/trainer/PyDataProvider2.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index f7b540013e..2e6225519f 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -17,7 +17,7 @@ import random from paddle.trainer.PyDataProvider2 import * -@provider(input_types=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) +@provider(slots=[dense_vector(200, seq_type=SequenceType.NO_SEQUENCE)]) def test_dense_no_seq(setting, filename): for i in xrange(200): yield [(float(j - 100) * float(i + 1)) / 200.0 for j in xrange(200)] diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index de266bb5d3..c918fa78ac 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -232,7 +232,7 @@ def provider(input_types=None, check=False, check_fail_continue=False, init_hook=None, - **kwargs): + **outter_kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. In this function, user only need to get each sample for some train/test @@ -318,11 +318,15 @@ def provider(input_types=None, self.logger = logging.getLogger("") self.logger.setLevel(logging.INFO) self.input_types = None - if 'slots' in kwargs: + if 'slots' in outter_kwargs: self.logger.warning('setting slots value is deprecated, ' 'please use input_types instead.') - self.slots = kwargs['slots'] - self.slots = input_types + self.slots = outter_kwargs['slots'] + if input_types is not None: + self.slots = input_types + + assert self.slots is not None, \ + "Data Provider's input_types must be set" self.should_shuffle = should_shuffle true_table = [1, 't', 'true', 'on'] From eaba2e2eff6c9bf1bbdff452b4be636ef0b8da9a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 21:56:37 +0800 Subject: [PATCH 21/39] Expose Evaluator API --- demo/mnist/api_train.py | 6 +++--- paddle/api/CMakeLists.txt | 10 ++++++++++ paddle/api/Evaluator.cpp | 29 +++++++++++++++++++++++++++++ paddle/api/GradientMachine.cpp | 10 ++++++++++ paddle/api/Paddle.swig | 2 ++ paddle/api/PaddleAPI.h | 27 ++++++++++++++++++++++++++- paddle/api/PaddleAPIPrivate.h | 11 +++++++++-- paddle/api/ParameterUpdater.cpp | 2 +- 8 files changed, 90 insertions(+), 7 deletions(-) create mode 100644 paddle/api/Evaluator.cpp diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 425c5f897a..52cc13c5a3 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -62,14 +62,14 @@ def main(): train_data_generator = input_order_converter( read_from_mnist(train_file)) for batch_id, data_batch in enumerate( - generator_to_batch(train_data_generator, 256)): + generator_to_batch(train_data_generator, 2048)): trainRole = updater.startBatch(len(data_batch)) - def update_callback(param): + def updater_callback(param): updater.update(param) m.forwardBackward( - converter(data_batch), outArgs, trainRole, update_callback) + converter(data_batch), outArgs, trainRole, updater_callback) cost_vec = outArgs.getSlotValue(0) cost_vec = cost_vec.copyToNumpyMat() diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 39fe435565..a7f17e186b 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -1,6 +1,7 @@ set(API_SOURCES Arguments.cpp ConfigParser.cpp + Evaluator.cpp GradientMachine.cpp Matrix.cpp Parameter.cpp @@ -63,6 +64,15 @@ install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ add_custom_target(python_api_wheel ALL DEPENDS ${PROJ_ROOT}/paddle/dist/.timestamp) +add_dependencies(python_api_wheel python_swig_sources + paddle_parameter + paddle_math + paddle_utils + paddle_gserver + paddle_pserver + paddle_trainer + paddle_api + paddle_cuda) if(WITH_TESTING) add_subdirectory(test) diff --git a/paddle/api/Evaluator.cpp b/paddle/api/Evaluator.cpp new file mode 100644 index 0000000000..c30e098763 --- /dev/null +++ b/paddle/api/Evaluator.cpp @@ -0,0 +1,29 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include +#include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" + +Evaluator::Evaluator() : m(new EvaluatorPrivate()) {} +Evaluator::~Evaluator() { delete m; } + +void Evaluator::start() { m->rawPtr->start(); } + +void Evaluator::finish() { m->rawPtr->finish(); } + +std::string Evaluator::toString() { + std::ostringstream sout; + m->rawPtr->printStats(sout); + return sout.str(); +} diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 2cece21097..0d1e175296 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -162,3 +162,13 @@ SequenceGenerator* GradientMachine::asSequenceGenerator( r->setBeamSize(beam_size); return r; } + +Evaluator* GradientMachine::makeEvaluator() { + auto ev = new Evaluator(); + ev->m->rawPtr = m->machine->makeEvaluator(); + return ev; +} + +void GradientMachine::eval(Evaluator* evaluator) { + m->machine->eval(evaluator->m->rawPtr); +} diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index b0fa8beb16..7a110a90b8 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -97,6 +97,7 @@ namespace std { %rename(__setitem__) Vector::set; %rename(__len__) Vector::getSize; %rename(__call__) ParameterTraverseCallback::apply; +%rename(__repr__) Evaluator::toString; %apply (float* INPLACE_ARRAY2, int DIM1, int DIM2) { (float* data, int dim1, int dim2) @@ -167,6 +168,7 @@ namespace std { %newobject GradientMachine::asSequenceGenerator; %newobject GradientMachine::getParameter; %newobject GradientMachine::getLayerOutput; +%newobject GradientMachine::makeEvaluator; %newobject TrainerConfig::createFromTrainerConfigFile; %newobject TrainerConfig::getModelConfig; %newobject TrainerConfig::getOptimizationConfig; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index cc49e6a09d..413c385146 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -685,7 +685,7 @@ private: }; class SequenceGenerator; - +class Evaluator; struct GradientMachinePrivate; class GradientMachine { private: @@ -770,6 +770,10 @@ public: size_t max_length = 100UL, size_t beam_size = -1UL); + Evaluator* makeEvaluator(); + + void eval(Evaluator* evaluator); + private: GradientMachinePrivate* m; @@ -809,6 +813,27 @@ private: ParameterUpdaterPrivate* m; }; +struct EvaluatorPrivate; +class Evaluator { +private: + Evaluator(); + DISABLE_COPY_AND_ASSIGN(Evaluator); + +public: + ~Evaluator(); + + void start(); + + void finish(); + + std::string toString(); + +private: + EvaluatorPrivate* m; + + friend class GradientMachine; +}; + struct TrainerPrivate; class Trainer { private: diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/api/PaddleAPIPrivate.h index 905668a62f..f41352bfec 100644 --- a/paddle/api/PaddleAPIPrivate.h +++ b/paddle/api/PaddleAPIPrivate.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once #include #include "PaddleAPI.h" +#include "paddle/gserver/evaluators/Evaluator.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/trainer/TrainerConfigHelper.h" - #include "paddle/parameter/ParameterUpdaterBase.h" +#include "paddle/trainer/TrainerConfigHelper.h" struct GradientMachinePrivate { std::shared_ptr machine; @@ -88,3 +88,10 @@ struct ParameterPrivate { } } }; + +struct EvaluatorPrivate { + paddle::Evaluator* rawPtr; + + EvaluatorPrivate() : rawPtr(nullptr) {} + ~EvaluatorPrivate() { delete rawPtr; } +}; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index fba4762024..91c8392762 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -29,7 +29,7 @@ ParameterUpdater *ParameterUpdater::createLocalUpdater( ParameterUpdater::~ParameterUpdater() { delete m; } void ParameterUpdater::init(const GradientMachine &gm) { - m->updater->init(gm.m->machine->getParameters()); + m->updater->init(gm.m->machine->getNonStaticParameters()); } void ParameterUpdater::startPass() { m->updater->startPass(); } From 409a5774c475b67160ea5cdf22b489652da6bff3 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 21 Dec 2016 22:45:42 +0800 Subject: [PATCH 22/39] Complete a very simple mnist demo. --- demo/mnist/api_train.py | 108 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 99 insertions(+), 9 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 52cc13c5a3..c1439bd526 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -1,8 +1,17 @@ +""" +A very basic example for how to use current Raw SWIG API to train mnist network. + +Current implementation uses Raw SWIG, which means the API call is directly \ +passed to C++ side of Paddle. + +The user api could be simpler and carefully designed. +""" import py_paddle.swig_paddle as api from py_paddle import DataProviderConverter import paddle.trainer.PyDataProvider2 as dp import paddle.trainer.config_parser import numpy as np +import random from mnist_util import read_from_mnist @@ -27,6 +36,18 @@ def generator_to_batch(generator, batch_size): yield ret_val +class BatchPool(object): + def __init__(self, generator, batch_size): + self.data = list(generator) + self.batch_size = batch_size + + def __call__(self): + random.shuffle(self.data) + for offset in xrange(0, len(self.data), self.batch_size): + limit = min(offset + self.batch_size, len(self.data)) + yield self.data[offset:limit] + + def input_order_converter(generator): for each_item in generator: yield each_item['pixel'], each_item['label'] @@ -37,46 +58,115 @@ def main(): config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') + # get enable_types for each optimizer. + # enable_types = [value, gradient, momentum, etc] + # For each optimizer(SGD, Adam), GradientMachine should enable different + # buffers. opt_config = api.OptimizationConfig.createFromProto(config.opt_config) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() + # Create Simple Gradient Machine. m = api.GradientMachine.createFromConfigProto( config.model_config, api.CREATE_MODE_NORMAL, enable_types) + + # This type check is not useful. Only enable type hint in IDE. + # Such as PyCharm assert isinstance(m, api.GradientMachine) + + # Initialize Parameter by numpy. init_parameter(network=m) + + # Create Local Updater. Local means not run in cluster. + # For a cluster training, here we can change to createRemoteUpdater + # in future. updater = api.ParameterUpdater.createLocalUpdater(opt_config) assert isinstance(updater, api.ParameterUpdater) + + # Initialize ParameterUpdater. updater.init(m) + # DataProvider Converter is a utility convert Python Object to Paddle C++ + # Input. The input format is as same as Paddle's DataProvider. converter = DataProviderConverter( input_types=[dp.dense_vector(784), dp.integer_value(10)]) train_file = './data/raw_data/train' + test_file = './data/raw_data/t10k' + # start gradient machine. + # the gradient machine must be started before invoke forward/backward. + # not just for training, but also for inference. m.start() - for _ in xrange(100): + # evaluator can print error rate, etc. It is a C++ class. + batch_evaluator = m.makeEvaluator() + test_evaluator = m.makeEvaluator() + + # Get Train Data. + # TrainData will stored in a data pool. Currently implementation is not care + # about memory, speed. Just a very naive implementation. + train_data_generator = input_order_converter(read_from_mnist(train_file)) + train_data = BatchPool(train_data_generator, 128) + + # outArgs is Neural Network forward result. Here is not useful, just passed + # to gradient_machine.forward + outArgs = api.Arguments.createArguments(0) + + for pass_id in xrange(2): # we train 2 passes. updater.startPass() - outArgs = api.Arguments.createArguments(0) - train_data_generator = input_order_converter( - read_from_mnist(train_file)) - for batch_id, data_batch in enumerate( - generator_to_batch(train_data_generator, 2048)): - trainRole = updater.startBatch(len(data_batch)) + for batch_id, data_batch in enumerate(train_data()): + # data_batch is input images. + # here, for online learning, we could get data_batch from network. + + # Start update one batch. + pass_type = updater.startBatch(len(data_batch)) + + # Start BatchEvaluator. + # batch_evaluator can be used between start/finish. + batch_evaluator.start() + + # A callback when backward. + # It is used for updating weight values vy calculated Gradient. def updater_callback(param): updater.update(param) + # forwardBackward is a shortcut for forward and backward. + # It is sometimes faster than invoke forward/backward separately, + # because in GradientMachine, it may be async. m.forwardBackward( - converter(data_batch), outArgs, trainRole, updater_callback) + converter(data_batch), outArgs, pass_type, updater_callback) + # Get cost. We use numpy to calculate total cost for this batch. cost_vec = outArgs.getSlotValue(0) cost_vec = cost_vec.copyToNumpyMat() cost = cost_vec.sum() / len(data_batch) - print 'Batch id', batch_id, 'with cost=', cost + + # Make evaluator works. + m.eval(batch_evaluator) + + # Print logs. + print 'Pass id', pass_id, 'Batch id', batch_id, 'with cost=', \ + cost, batch_evaluator + + batch_evaluator.finish() + # Finish batch. + # * will clear gradient. + # * ensure all values should be updated. updater.finishBatch(cost) + # testing stage. use test data set to test current network. + test_evaluator.start() + test_data_generator = input_order_converter(read_from_mnist(test_file)) + for data_batch in generator_to_batch(test_data_generator, 128): + # in testing stage, only forward is needed. + m.forward(converter(data_batch), outArgs, api.PASS_TEST) + m.eval(test_evaluator) + + # print error rate for test data set + print 'Pass', pass_id, ' test evaluator: ', test_evaluator + test_evaluator.finish() updater.finishPass() m.finish() From 680dd92bde2e4d6c2173f47d6da3263d827050e8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 11:31:31 +0800 Subject: [PATCH 23/39] Add AverageOptimizer, Add save parameter --- demo/mnist/api_train.py | 13 +++++++++++++ demo/mnist/simple_mnist_network.py | 7 ++++++- paddle/api/PaddleAPI.h | 6 ++++++ paddle/api/ParameterUpdater.cpp | 6 ++++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index c1439bd526..ce75d79beb 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -157,6 +157,7 @@ def main(): updater.finishBatch(cost) # testing stage. use test data set to test current network. + updater.apply() test_evaluator.start() test_data_generator = input_order_converter(read_from_mnist(test_file)) for data_batch in generator_to_batch(test_data_generator, 128): @@ -167,6 +168,18 @@ def main(): # print error rate for test data set print 'Pass', pass_id, ' test evaluator: ', test_evaluator test_evaluator.finish() + updater.restore() + + updater.catchUpWith() + params = m.getParameters() + for each_param in params: + assert isinstance(each_param, api.Parameter) + value = each_param.getBuf(api.PARAMETER_VALUE) + value = value.toNumpyArrayInplace() + + # Here, we could save parameter to every where you want + print each_param.getName(), value + updater.finishPass() m.finish() diff --git a/demo/mnist/simple_mnist_network.py b/demo/mnist/simple_mnist_network.py index 41f4e51657..f5d1ea169e 100644 --- a/demo/mnist/simple_mnist_network.py +++ b/demo/mnist/simple_mnist_network.py @@ -1,6 +1,11 @@ from paddle.trainer_config_helpers import * -settings(learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) +settings( + learning_rate=1e-4, + learning_method=AdamOptimizer(), + batch_size=1000, + model_average=ModelAverage(average_window=0.5), + regularization=L2Regularization(rate=0.5)) imgs = data_layer(name='pixel', size=784) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 413c385146..d94fd1e52e 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -809,6 +809,12 @@ public: void update(Parameter* param); + void restore(); + + void apply(); + + void catchUpWith(); + private: ParameterUpdaterPrivate* m; }; diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 91c8392762..7cd8ed7e39 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -48,3 +48,9 @@ void ParameterUpdater::update(Parameter *param) { auto paddleParam = param->m->getPtr(); m->updater->update(paddleParam); } + +void ParameterUpdater::restore() { m->updater->restore(); } + +void ParameterUpdater::apply() { m->updater->apply(); } + +void ParameterUpdater::catchUpWith() { m->updater->catchUpWith(); } From 5bca268bd1f9fdc01afe52834b486119076b1e8b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 14:51:51 +0800 Subject: [PATCH 24/39] Add gitignore --- demo/mnist/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/demo/mnist/.gitignore b/demo/mnist/.gitignore index 810910fd5c..8bd9837523 100644 --- a/demo/mnist/.gitignore +++ b/demo/mnist/.gitignore @@ -4,3 +4,4 @@ mnist_vgg_model plot.png train.log *pyc +.ipynb_checkpoints From 59009ba72d54cc35717dbd80d73500f11fbb7852 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 14:51:51 +0800 Subject: [PATCH 25/39] Always use copy method for numpy. * Make this demo support GPU --- demo/mnist/.gitignore | 1 + demo/mnist/api_train.py | 9 ++++----- paddle/api/Paddle.swig | 1 + paddle/api/PaddleAPI.h | 2 ++ paddle/api/Parameter.cpp | 2 ++ 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/demo/mnist/.gitignore b/demo/mnist/.gitignore index 810910fd5c..8bd9837523 100644 --- a/demo/mnist/.gitignore +++ b/demo/mnist/.gitignore @@ -4,3 +4,4 @@ mnist_vgg_model plot.png train.log *pyc +.ipynb_checkpoints diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index ce75d79beb..7e653246a3 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -19,10 +19,9 @@ def init_parameter(network): assert isinstance(network, api.GradientMachine) for each_param in network.getParameters(): assert isinstance(each_param, api.Parameter) - array = each_param.getBuf(api.PARAMETER_VALUE).toNumpyArrayInplace() - assert isinstance(array, np.ndarray) - for i in xrange(len(array)): - array[i] = np.random.uniform(-1.0, 1.0) + array_size = len(each_param) + array = np.random.uniform(-1.0, 1.0, array_size).astype('float32') + each_param.getBuf(api.PARAMETER_VALUE).copyFromNumpyArray(array) def generator_to_batch(generator, batch_size): @@ -175,7 +174,7 @@ def main(): for each_param in params: assert isinstance(each_param, api.Parameter) value = each_param.getBuf(api.PARAMETER_VALUE) - value = value.toNumpyArrayInplace() + value = value.copyToNumpyArray() # Here, we could save parameter to every where you want print each_param.getName(), value diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index 7a110a90b8..3365927f9b 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -96,6 +96,7 @@ namespace std { %rename(__getitem__) Vector::get; %rename(__setitem__) Vector::set; %rename(__len__) Vector::getSize; +%rename(__len__) Parameter::getSize; %rename(__call__) ParameterTraverseCallback::apply; %rename(__repr__) Evaluator::toString; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index d94fd1e52e..d4b057e8a1 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -550,6 +550,8 @@ public: ParameterConfig* getConfig(); void setValueUpdated(); + size_t getSize() const; + private: static Parameter* createFromRawPtr(void* ptr); static Parameter* createFromSharedPtr(void* ptr); diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index 41cf50043c..ddc00d8d1a 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -56,3 +56,5 @@ ParameterConfig* Parameter::getConfig() { size_t Parameter::getID() const { return m->getPtr()->getID(); } void Parameter::setValueUpdated() { m->getPtr()->setValueUpdated(); } + +size_t Parameter::getSize() const { return m->getPtr()->getSize(); } From f06b64fee47c1d807a224049243d2d3dec39fc5c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 07:45:18 +0000 Subject: [PATCH 26/39] Test GPU --- demo/mnist/api_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index ce75d79beb..e5a9075c8e 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -54,7 +54,7 @@ def input_order_converter(generator): def main(): - api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores + api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') From 5a685841317625786d4c37eb79abfd22cec995d6 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 07:57:04 +0000 Subject: [PATCH 27/39] Test on GPU --- demo/mnist/api_train.py | 17 +++++++---------- paddle/api/Vector.cpp | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 129922c30b..48ba61c47d 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -53,7 +53,7 @@ def input_order_converter(generator): def main(): - api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores + api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') @@ -106,7 +106,7 @@ def main(): # TrainData will stored in a data pool. Currently implementation is not care # about memory, speed. Just a very naive implementation. train_data_generator = input_order_converter(read_from_mnist(train_file)) - train_data = BatchPool(train_data_generator, 128) + train_data = BatchPool(train_data_generator, 512) # outArgs is Neural Network forward result. Here is not useful, just passed # to gradient_machine.forward @@ -126,16 +126,13 @@ def main(): # batch_evaluator can be used between start/finish. batch_evaluator.start() - # A callback when backward. - # It is used for updating weight values vy calculated Gradient. - def updater_callback(param): - updater.update(param) - # forwardBackward is a shortcut for forward and backward. # It is sometimes faster than invoke forward/backward separately, # because in GradientMachine, it may be async. - m.forwardBackward( - converter(data_batch), outArgs, pass_type, updater_callback) + m.forwardBackward(converter(data_batch), outArgs, pass_type) + + for each_param in m.getParameters(): + updater.update(each_param) # Get cost. We use numpy to calculate total cost for this batch. cost_vec = outArgs.getSlotValue(0) @@ -159,7 +156,7 @@ def main(): updater.apply() test_evaluator.start() test_data_generator = input_order_converter(read_from_mnist(test_file)) - for data_batch in generator_to_batch(test_data_generator, 128): + for data_batch in generator_to_batch(test_data_generator, 512): # in testing stage, only forward is needed. m.forward(converter(data_batch), outArgs, api.PASS_TEST) m.eval(test_evaluator) diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 874f2fd044..db8f005929 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -253,7 +253,7 @@ void Vector::copyToNumpyArray(float** view_m_data, int* dim1) { *view_m_data = new float[*dim1]; if (auto cpuVec = dynamic_cast(m->vec.get())) { std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1)); - } else if (auto gpuVec = dynamic_cast(m->vec.get())) { + } else if (auto gpuVec = dynamic_cast(m->vec.get())) { hl_memcpy_device2host( *view_m_data, gpuVec->getData(), sizeof(float) * (*dim1)); } else { From 3a802729746468d654c1a0908a7787bc10618f94 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 16:57:12 +0800 Subject: [PATCH 28/39] Add comments. --- paddle/api/PaddleAPI.h | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index d4b057e8a1..0a273f9f6f 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -799,22 +799,61 @@ public: static ParameterUpdater* createLocalUpdater(OptimizationConfig* config); ~ParameterUpdater(); + /** + * @brief initialize Parameter Updater by GradientMachine. + * @param gm + */ void init(const GradientMachine& gm); + /** + * @brief begin of a training/testing of one pass. + */ void startPass(); + /** + * @brief end of a traning/testing of one pass. + */ void finishPass(); + /** + * @brief begin of a training/testing of one batch. + * @param data batch's size + * @return PassType, mostly will be training. + */ PassType startBatch(size_t batchSize); + /** + * @brief end of a traning/testing of one batch + * @param cost current batch cost. + */ void finishBatch(float cost); + /** + * @brief update a parameter (by local optimizer or by cluster pserver) + * @param param + */ void update(Parameter* param); + /** + * @brief restore the average parameter. + * @note It is only used in AverageOptimizer. Restore will get the current + * PARAMETER_VALUE back. + */ void restore(); + /** + * @brief apply. Store the average parameter. + * @note It is only used in AverageOptimizer. Apply will store the current + * PARAMETER_VALUE to buffer, calcaualte current Average Parameter, and save + * it to PARAMETER_VALUE. + */ void apply(); + /** + * @brief catchUpWith The Regularization will be delayed in many situations( + * pserver, local sparse). Catch Up means catch the regularization up, apply + * regularization to all params. + */ void catchUpWith(); private: @@ -830,10 +869,21 @@ private: public: ~Evaluator(); + /** + * @brief begin an evaluate stage. + */ void start(); + /** + * @brief end an evaluate stage. + */ void finish(); + /** + * @brief toString will get a evaluate result. + * + * __repr__ method in python + */ std::string toString(); private: From 843b63bb84586c2b861d971865be270b60a87c56 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 21 Dec 2016 19:26:05 +0800 Subject: [PATCH 29/39] add config_parser in trainer_config_helpers to seperate trainer config --- demo/mnist/api_train.py | 28 ++++++-- python/paddle/trainer/config_parser.py | 70 ++++++++++--------- .../paddle/trainer_config_helpers/__init__.py | 1 + .../trainer_config_helpers/config_parser.py | 38 ++++++++++ 4 files changed, 98 insertions(+), 39 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/config_parser.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 48ba61c47d..8fa286b5f9 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -9,11 +9,29 @@ The user api could be simpler and carefully designed. import py_paddle.swig_paddle as api from py_paddle import DataProviderConverter import paddle.trainer.PyDataProvider2 as dp -import paddle.trainer.config_parser import numpy as np import random from mnist_util import read_from_mnist +import paddle.trainer_config_helpers.config_parser as config_parser +from paddle.trainer_config_helpers import * + + +def optimizer_config(): + settings( + learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) + + +def network_config(): + imgs = data_layer(name='pixel', size=784) + hidden1 = fc_layer(input=imgs, size=200) + hidden2 = fc_layer(input=hidden1, size=200) + inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) + cost = classification_cost( + input=inference, label=data_layer( + name='label', size=10)) + outputs(cost) + def init_parameter(network): assert isinstance(network, api.GradientMachine) @@ -54,20 +72,20 @@ def input_order_converter(generator): def main(): api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores - config = paddle.trainer.config_parser.parse_config( - 'simple_mnist_network.py', '') # get enable_types for each optimizer. # enable_types = [value, gradient, momentum, etc] # For each optimizer(SGD, Adam), GradientMachine should enable different # buffers. - opt_config = api.OptimizationConfig.createFromProto(config.opt_config) + opt_config_proto = config_parser.parse_optimizer_config(optimizer_config) + opt_config = api.OptimizationConfig.createFromProto(opt_config_proto) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() # Create Simple Gradient Machine. + model_config = config_parser.parse_network_config(network_config) m = api.GradientMachine.createFromConfigProto( - config.model_config, api.CREATE_MODE_NORMAL, enable_types) + model_config, api.CREATE_MODE_NORMAL, enable_types) # This type check is not useful. Only enable type hint in IDE. # Such as PyCharm diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 2eb7b17a0b..674b5ac58b 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3416,8 +3416,35 @@ def register_parse_config_hook(f): _parse_config_hooks.add(f) -def parse_config(config_file, config_arg_str): +def update_g_config(): ''' + Update g_config after execute config_file or config_functions. + ''' + for k, v in settings.iteritems(): + if v is None: + continue + g_config.opt_config.__setattr__(k, v) + + for k, v in trainer_settings.iteritems(): + if v is None: + continue + g_config.__setattr__(k, v) + + for name in g_config.model_config.input_layer_names: + assert name in g_layer_map, \ + 'input name "%s" does not correspond to a layer name' % name + assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \ + 'The type of input layer "%s" is not "data"' % name + for name in g_config.model_config.output_layer_names: + assert name in g_layer_map, \ + 'input name "%s" does not correspond to a layer name' % name + return g_config + + +def parse_config(trainer_config, config_arg_str): + ''' + @param trainer_config: can be a string of config file name or a function name + with config logic @param config_arg_str: a string of the form var1=val1,var2=val2. It will be passed to config script as a dictionary CONFIG_ARGS ''' @@ -3451,45 +3478,20 @@ def parse_config(config_file, config_arg_str): g_root_submodel.is_recurrent_layer_group = False g_current_submodel = g_root_submodel - # for paddle on spark, need support non-file config. - # you can use parse_config like below: - # - # from paddle.trainer.config_parser import parse_config - # def configs(): - # #your paddle config code, which is same as config file. - # - # config = parse_config(configs, "is_predict=1") - # # then you get config proto object. - if hasattr(config_file, '__call__'): - config_file.func_globals.update( + if hasattr(trainer_config, '__call__'): + trainer_config.func_globals.update( make_config_environment("", config_args)) - config_file() + trainer_config() else: - execfile(config_file, make_config_environment(config_file, config_args)) - for k, v in settings.iteritems(): - if v is None: - continue - g_config.opt_config.__setattr__(k, v) - - for k, v in trainer_settings.iteritems(): - if v is None: - continue - g_config.__setattr__(k, v) + execfile(trainer_config, + make_config_environment(trainer_config, config_args)) - for name in g_config.model_config.input_layer_names: - assert name in g_layer_map, \ - 'input name "%s" does not correspond to a layer name' % name - assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \ - 'The type of input layer "%s" is not "data"' % name - for name in g_config.model_config.output_layer_names: - assert name in g_layer_map, \ - 'input name "%s" does not correspond to a layer name' % name - return g_config + return update_g_config() -def parse_config_and_serialize(config_file, config_arg_str): +def parse_config_and_serialize(trainer_config, config_arg_str): try: - config = parse_config(config_file, config_arg_str) + config = parse_config(trainer_config, config_arg_str) #logger.info(config) return config.SerializeToString() except: diff --git a/python/paddle/trainer_config_helpers/__init__.py b/python/paddle/trainer_config_helpers/__init__.py index a2335768b9..84ed40a036 100644 --- a/python/paddle/trainer_config_helpers/__init__.py +++ b/python/paddle/trainer_config_helpers/__init__.py @@ -20,6 +20,7 @@ from layers import * from networks import * from optimizers import * from attrs import * +from config_parser import * # This will enable operator overload for LayerOutput import math as layer_math diff --git a/python/paddle/trainer_config_helpers/config_parser.py b/python/paddle/trainer_config_helpers/config_parser.py new file mode 100644 index 0000000000..4b91b8d282 --- /dev/null +++ b/python/paddle/trainer_config_helpers/config_parser.py @@ -0,0 +1,38 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.trainer.config_parser as config_parser +''' +This file is a wrapper of formal config_parser. The main idea of this file is to +separete different config logic into different function, such as network configuration + and optimizer configuration. +''' + +__all__ = [ + "parse_trainer_config", "parse_network_config", "parse_optimizer_config" +] + + +def parse_trainer_config(trainer_conf, config_arg_str): + return config_parser.parse_config(trainer_conf, config_arg_str) + + +def parse_network_config(network_conf): + config = config_parser.parse_config(network_conf, '') + return config.model_config + + +def parse_optimizer_config(optimizer_conf): + config = config_parser.parse_config(optimizer_conf, '') + return config.opt_config From 763a30fdde211c047c2ba77d2d82cfa4152f0f26 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 22 Dec 2016 18:22:47 +0800 Subject: [PATCH 30/39] add config_parser_utils --- demo/mnist/api_train.py | 14 ++++--- demo/mnist/simple_mnist_network.py | 21 ---------- .../paddle/trainer_config_helpers/__init__.py | 2 +- .../config_parser_utils.py | 38 +++++++++++++++++++ 4 files changed, 48 insertions(+), 27 deletions(-) delete mode 100644 demo/mnist/simple_mnist_network.py create mode 100644 python/paddle/trainer_config_helpers/config_parser_utils.py diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 8fa286b5f9..924bd39a50 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -12,14 +12,17 @@ import paddle.trainer.PyDataProvider2 as dp import numpy as np import random from mnist_util import read_from_mnist - -import paddle.trainer_config_helpers.config_parser as config_parser +import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils from paddle.trainer_config_helpers import * def optimizer_config(): settings( - learning_rate=1e-4, learning_method=AdamOptimizer(), batch_size=1000) + learning_rate=1e-4, + learning_method=AdamOptimizer(), + batch_size=1000, + model_average=ModelAverage(average_window=0.5), + regularization=L2Regularization(rate=0.5)) def network_config(): @@ -77,13 +80,14 @@ def main(): # enable_types = [value, gradient, momentum, etc] # For each optimizer(SGD, Adam), GradientMachine should enable different # buffers. - opt_config_proto = config_parser.parse_optimizer_config(optimizer_config) + opt_config_proto = config_parser_utils.parse_optimizer_config( + optimizer_config) opt_config = api.OptimizationConfig.createFromProto(opt_config_proto) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() # Create Simple Gradient Machine. - model_config = config_parser.parse_network_config(network_config) + model_config = config_parser_utils.parse_network_config(network_config) m = api.GradientMachine.createFromConfigProto( model_config, api.CREATE_MODE_NORMAL, enable_types) diff --git a/demo/mnist/simple_mnist_network.py b/demo/mnist/simple_mnist_network.py deleted file mode 100644 index f5d1ea169e..0000000000 --- a/demo/mnist/simple_mnist_network.py +++ /dev/null @@ -1,21 +0,0 @@ -from paddle.trainer_config_helpers import * - -settings( - learning_rate=1e-4, - learning_method=AdamOptimizer(), - batch_size=1000, - model_average=ModelAverage(average_window=0.5), - regularization=L2Regularization(rate=0.5)) - -imgs = data_layer(name='pixel', size=784) - -hidden1 = fc_layer(input=imgs, size=200) -hidden2 = fc_layer(input=hidden1, size=200) - -inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) - -cost = classification_cost( - input=inference, label=data_layer( - name='label', size=10)) - -outputs(cost) diff --git a/python/paddle/trainer_config_helpers/__init__.py b/python/paddle/trainer_config_helpers/__init__.py index 84ed40a036..ef9859f831 100644 --- a/python/paddle/trainer_config_helpers/__init__.py +++ b/python/paddle/trainer_config_helpers/__init__.py @@ -20,7 +20,7 @@ from layers import * from networks import * from optimizers import * from attrs import * -from config_parser import * +from config_parser_utils import * # This will enable operator overload for LayerOutput import math as layer_math diff --git a/python/paddle/trainer_config_helpers/config_parser_utils.py b/python/paddle/trainer_config_helpers/config_parser_utils.py new file mode 100644 index 0000000000..681b177a55 --- /dev/null +++ b/python/paddle/trainer_config_helpers/config_parser_utils.py @@ -0,0 +1,38 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.trainer.config_parser as config_parser +''' +This file is a wrapper of formal config_parser. The main idea of this file is to +separete different config logic into different function, such as network configuration + and optimizer configuration. +''' + +__all__ = [ + "parse_trainer_config", "parse_network_config", "parse_optimizer_config" +] + + +def parse_trainer_config(trainer_conf, config_arg_str): + return config_parser.parse_config(trainer_conf, config_arg_str) + + +def parse_network_config(network_conf, config_arg_str=''): + config = config_parser.parse_config(network_conf, config_arg_str) + return config.model_config + + +def parse_optimizer_config(optimizer_conf, config_arg_str=''): + config = config_parser.parse_config(optimizer_conf, config_arg_str) + return config.opt_config From 9b41b08ef39aaf4f49daaf85a8defd4726642e69 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 21:24:14 +0800 Subject: [PATCH 31/39] Remove unnecessary import in api_train.py --- demo/mnist/api_train.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 924bd39a50..f301da382f 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -12,7 +12,6 @@ import paddle.trainer.PyDataProvider2 as dp import numpy as np import random from mnist_util import read_from_mnist -import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils from paddle.trainer_config_helpers import * @@ -80,14 +79,13 @@ def main(): # enable_types = [value, gradient, momentum, etc] # For each optimizer(SGD, Adam), GradientMachine should enable different # buffers. - opt_config_proto = config_parser_utils.parse_optimizer_config( - optimizer_config) + opt_config_proto = parse_optimizer_config(optimizer_config) opt_config = api.OptimizationConfig.createFromProto(opt_config_proto) _temp_optimizer_ = api.ParameterOptimizer.create(opt_config) enable_types = _temp_optimizer_.getParameterTypes() # Create Simple Gradient Machine. - model_config = config_parser_utils.parse_network_config(network_config) + model_config = parse_network_config(network_config) m = api.GradientMachine.createFromConfigProto( model_config, api.CREATE_MODE_NORMAL, enable_types) From 99e43d1d07aa6dc63dba09141529764c7db83198 Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 23 Dec 2016 13:20:42 +0800 Subject: [PATCH 32/39] Add c++11 build python binding package --- paddle/api/paddle_ld_flags.py | 7 +++++-- paddle/setup.py.in | 11 ++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index 7c8206e3fe..b4d27b1cc7 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -141,9 +141,12 @@ try: def c_flag(self): if self.with_coverage: - return ["-fprofile-arcs", "-ftest-coverage", "-O0", "-g"] + return [ + "-fprofile-arcs", "-ftest-coverage", "-O0", "-g", + "-std=c++11" + ] else: - return None + return ["-std=c++11"] except ImportError: class PaddleLDFlag(object): diff --git a/paddle/setup.py.in b/paddle/setup.py.in index b4c38a41b8..464ad63286 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -30,8 +30,10 @@ is_lin = (system == 'linux') # The extra links will passed from COMAKE # because generate paddle LDFLAGS is too complicated to do in setup.py # it just read COMAKE generated LDFLAGS. +extra_comps = [] extra_links = [] obj = api.paddle_ld_flags.PaddleLDFlag() +extra_comps = obj.c_flag() ldflags = obj.ldflag_str() if ldflags is not None: extra_links.extend(ldflags.split(" ")) @@ -51,20 +53,15 @@ elif is_osx == True: include_dirs = [np.get_include(), "../"] # include numpy and paddle. -extra_c = obj.c_flag() - -attr=dict() -if extra_c is not None: - attr["extra_compile_args"] = extra_c - setup(name="py_paddle", version="@PADDLE_VERSION@", ext_modules=[ Extension('py_paddle._swig_paddle', # Build SWIG Extension. ['Paddle_wrap.cxx'], + language = "c++", include_dirs = include_dirs, extra_link_args = extra_links, - **attr + extra_compile_args = extra_comps ) ], packages=['py_paddle'], From c8d0791accb7fbceda308756e6271e12e233c063 Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 23 Dec 2016 13:21:48 +0800 Subject: [PATCH 33/39] Add common.h and remove DisableCopy and Typedefs --- .../image_classification/index_cn.md | 205 ++++++++++++++++++ .../image_classification/index_en.md | 2 +- paddle/api/PaddleAPI.h | 34 ++- paddle/cuda/include/hl_base.h | 66 +++--- paddle/gserver/dataproviders/DataProvider.h | 2 +- .../gserver/layers/BatchNormalizationLayer.h | 2 + paddle/gserver/layers/GruCompute.h | 2 +- paddle/gserver/layers/LstmCompute.h | 2 +- paddle/gserver/layers/MultinomialSampler.h | 2 +- paddle/math/BaseMatrix.h | 2 +- paddle/math/Matrix.h | 2 +- paddle/math/TensorExpression.h | 2 +- paddle/math/Vector.h | 2 +- paddle/parameter/ParallelParameter.h | 2 +- paddle/parameter/Parameter.h | 2 +- paddle/parameter/ParameterUpdateFunctions.h | 2 +- paddle/pserver/BaseClient.h | 2 +- paddle/pserver/ParameterClient2.h | 2 +- paddle/pserver/ParameterServer2.h | 2 +- paddle/utils/CpuId.h | 2 +- paddle/utils/DisableCopy.h | 23 -- paddle/utils/Locks.h | 2 +- paddle/utils/Util.h | 3 +- paddle/utils/Version.h | 2 +- paddle/utils/{TypeDefs.h => common.h} | 15 +- 25 files changed, 277 insertions(+), 107 deletions(-) create mode 100644 doc/tutorials/image_classification/index_cn.md delete mode 100644 paddle/utils/DisableCopy.h rename paddle/utils/{TypeDefs.h => common.h} (71%) diff --git a/doc/tutorials/image_classification/index_cn.md b/doc/tutorials/image_classification/index_cn.md new file mode 100644 index 0000000000..87f465522a --- /dev/null +++ b/doc/tutorials/image_classification/index_cn.md @@ -0,0 +1,205 @@ +图像分类教程 +========== + +在本教程中,我们将使用CIFAR-10数据集训练一个卷积神经网络,并使用这个神经网络来对图片进行分类。如下图所示,卷积神经网络可以辨识图片中的主体,并给出分类结果。 +
![Image Classification](./image_classification.png)
+ +## 数据准备 +首先下载CIFAR-10数据集。下面是CIFAR-10数据集的官方网址: + + + +我们准备了一个脚本,可以用于从官方网站上下载CIFAR-10数据集,转为jpeg文件并存入特定的目录。使用这个脚本前请确认已经安装了pillow及相关依赖模块。可以参照下面的命令进行安装: + +1. 安装pillow + +```bash +sudo apt-get install libjpeg-dev +pip install pillow +``` + +2. 下载数据集 + +```bash +cd demo/image_classification/data/ +sh download_cifar.sh +``` + +CIFAR-10数据集包含60000张32x32的彩色图片。图片分为10类,每个类包含6000张。其中50000张图片作为训练集,10000张作为测试集。 + +下图展示了所有的图片类别,每个类别中随机抽取了10张图片。 +
![Image Classification](./cifar.png)
+ +脚本运行完成后,我们应当会得到一个名为cifar-out的文件夹,其下子文件夹的结构如下 + + +``` +train +---airplane +---automobile +---bird +---cat +---deer +---dog +---frog +---horse +---ship +---truck +test +---airplane +---automobile +---bird +---cat +---deer +---dog +---frog +---horse +---ship +---truck +``` + +cifar-out下包含`train`和`test`两个文件夹,其中分别包含了CIFAR-10中的训练集和测试集。这两个文件夹下各自有10个子文件夹,每个子文件夹下存储相应分类的图片。将图片按照上述结构存储好之后,我们就可以着手对分类模型进行训练了。 + +## 预处理 +数据下载之后,还需要进行预处理,将数据转换为Paddle的格式。我们可以通过如下命令进行预处理工作: + +``` +cd demo/image_classification/ +sh preprocess.sh +``` + +其中`preprocess.sh` 调用 `./demo/image_classification/preprocess.py` 对图片进行预处理 +```sh +export PYTHONPATH=$PYTHONPATH:../../ +data_dir=./data/cifar-out +python preprocess.py -i $data_dir -s 32 -c 1 +``` + +`./demo/image_classification/preprocess.py` 使用如下参数: + +- `-i` 或 `--input` 给出输入数据所在路径; +- `-s` 或 `--size` 给出图片尺寸; +- `-c` 或 `--color` 标示图片是彩色图或灰度图 + +## 模型训练 +在开始训练之前,我们需要先创建一个模型配置文件。下面我们给出了一个配置示例。**注意**,这里的列出的和`vgg_16_cifar.py`文件稍有差别,因为该文件可适用于预测。 + +```python +from paddle.trainer_config_helpers import * +data_dir='data/cifar-out/batches/' +meta_path=data_dir+'batches.meta' +args = {'meta':meta_path, 'mean_img_size': 32, + 'img_size': 32, 'num_classes': 10, + 'use_jpeg': 1, 'color': "color"} +define_py_data_sources2(train_list=data_dir+"train.list", + test_list=data_dir+'test.list', + module='image_provider', + obj='processData', + args=args) +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128)) + +img = data_layer(name='image', size=3*32*32) +lbl = data_layer(name="label", size=10) +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, num_channels=3) +outputs(classification_cost(input=predict, label=lbl)) +``` + +在第一行中我们载入用于定义网络的函数。 +```python +from paddle.trainer_config_helpers import * +``` + +之后定义的`define_py_data_sources2`使用Python数据提供器,其中 `args`将在`image_provider.py`进行使用,该文件负责产生图片数据并传递给Paddle系统 + - `meta`: 训练集平均值。 + - `mean_img_size`: 平均特征图的高度及宽度。 + - `img_size`:输入图片的高度及宽度。 + - `num_classes`:类别个数。 + - `use_jpeg`:处理过程中数据存储格式。 + - `color`:标示是否为彩色图片。 + + `settings`用于设置训练算法。在下面的例子中,learning rate被设置为0.1除以batch size,而weight decay则为0.0005乘以batch size。 + + ```python +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128) +) +``` + +`small_vgg`定义了网络结构。这里我们使用的是一个小的VGG网络。关于VGG卷积神经网络的描述可以参考:[http://www.robots.ox.ac.uk/~vgg/research/very_deep/](http://www.robots.ox.ac.uk/~vgg/research/very_deep/)。 +```python +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, num_channels=3) +``` +配置创建完毕后,可以运行脚本train.sh来训练模型。 + +```bash +config=vgg_16_cifar.py +output=./cifar_vgg_model +log=train.log + +paddle train \ +--config=$config \ +--dot_period=10 \ +--log_period=100 \ +--test_all_data_in_one_period=1 \ +--use_gpu=1 \ +--save_dir=$output \ +2>&1 | tee $log + +python -m paddle.utils.plotcurve -i $log > plot.png +``` +- 这里我们使用的是GPU模式进行训练。如果你没有GPU环境,可以设置`use_gpu=0`。 +- `./demo/image_classification/vgg_16_cifar.py`是网络和数据配置文件。各项参数的详细说明可以在命令行参数相关文档中找到。 +- 脚本`plotcurve.py`依赖于python的`matplotlib`模块。因此如果这个脚本运行失败,也许是因为需要安装`matplotlib`。 +在训练完成后,训练及测试误差曲线图会被`plotcurve.py`脚本保存在 `plot.png`中。下面是一个误差曲线图的示例: + +
![Training and testing curves.](./plot.png)
+ +## 预测 +在训练完成后,模型及参数会被保存在路径`./cifar_vgg_model/pass-%05d`下。例如第300个pass的模型会被保存在`./cifar_vgg_model/pass-00299`。 + +要对一个图片的进行分类预测,我们可以使用`predict.sh`,该脚本将输出预测分类的标签: + +``` +sh predict.sh +``` + +predict.sh: +``` +model=cifar_vgg_model/pass-00299/ +image=data/cifar-out/test/airplane/seaplane_s_000978.png +use_gpu=1 +python prediction.py $model $image $use_gpu +``` + +## 练习 +在CUB-200数据集上使用VGG模型训练一个鸟类图片分类模型。相关的鸟类数据集可以从如下地址下载,其中包含了200种鸟类的照片(主要来自北美洲)。 + + + + + + +## 细节探究 +### 卷积神经网络 +卷积神经网络是一种使用卷积层的前向神经网络,很适合构建用于理解图片内容的模型。一个典型的神经网络如下图所示: + +![Convolutional Neural Network](./lenet.png) + +一个卷积神经网络包含如下层: + +- 卷积层:通过卷积操作从图片或特征图中提取特征 +- 池化层:使用max-pooling对特征图下采样 +- 全连接层:使输入层到隐藏层的神经元是全部连接的。 + +卷积神经网络在图片分类上有着惊人的性能,这是因为它发掘出了图片的两类重要信息:局部关联性质和空间不变性质。通过交替使用卷积和池化处理, 卷积神经网络能够很好的表示这两类信息。 + +关于如何定义网络中的层,以及如何在层之间进行连接,请参考Layer文档。 diff --git a/doc/tutorials/image_classification/index_en.md b/doc/tutorials/image_classification/index_en.md index 29cfc99702..60c81a6a53 100644 --- a/doc/tutorials/image_classification/index_en.md +++ b/doc/tutorials/image_classification/index_en.md @@ -147,7 +147,7 @@ for classification. A description of VGG network can be found here [http://www.r # small_vgg is predined in trainer_config_helpers.network predict = small_vgg(input_image=img, num_channels=3) ``` -After writing the config, we can train the model by running the script train.sh. Notice that the following script assumes the you run the script in the `./demo/image_classification` folder. If you run the script in a different folder, you need to change the paths of the scripts and the configuration files accordingly. +After writing the config, we can train the model by running the script train.sh. ```bash config=vgg_16_cifar.py diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 84a66719c3..5c4c25e770 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -20,15 +20,11 @@ limitations under the License. */ #include #include #include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" /// Import PaddlePaddle's enumeration into global namespace. using namespace paddle::enumeration_wrapper; // NOLINT -#define DISABLE_COPY_AND_ASSIGN(classname) \ - classname(const classname& other); \ - classname& operator=(const classname& other) - /** * @brief Initialize paddle. * @@ -102,7 +98,7 @@ const size_t NO_SPARSE_ID = -1UL; struct MatrixPrivate; class Matrix { Matrix(); // User Cannot Create Matrix. - DISABLE_COPY_AND_ASSIGN(Matrix); + DISABLE_COPY(Matrix); static Matrix* createByPaddleMatrixPtr(void* sharedPtr); public: @@ -242,7 +238,7 @@ private: struct VectorPrivate; class Vector { - DISABLE_COPY_AND_ASSIGN(Vector); + DISABLE_COPY(Vector); Vector(); static Vector* createByPaddleVectorPtr(void* ptr); @@ -322,7 +318,7 @@ private: struct IVectorPrivate; class IVector { IVector(); - DISABLE_COPY_AND_ASSIGN(IVector); + DISABLE_COPY(IVector); static IVector* createByPaddleVectorPtr(void* ptr); public: @@ -402,7 +398,7 @@ struct ArgumentsPrivate; class Arguments { private: Arguments(); // Internal Create. - DISABLE_COPY_AND_ASSIGN(Arguments); + DISABLE_COPY(Arguments); public: /** @@ -472,7 +468,7 @@ enum GradientMatchineCreateMode { struct ParameterConfigPrivate; class ParameterConfig { - DISABLE_COPY_AND_ASSIGN(ParameterConfig); + DISABLE_COPY(ParameterConfig); ParameterConfig(); /** @@ -502,7 +498,7 @@ private: struct OptimizationConfigPrivate; class OptimizationConfig { - DISABLE_COPY_AND_ASSIGN(OptimizationConfig); + DISABLE_COPY(OptimizationConfig); OptimizationConfig(); public: @@ -526,7 +522,7 @@ struct ParameterPrivate; class Parameter { private: Parameter(); - DISABLE_COPY_AND_ASSIGN(Parameter); + DISABLE_COPY(Parameter); public: virtual ~Parameter(); @@ -568,7 +564,7 @@ struct ModelConfigPrivate; class ModelConfig { private: ModelConfig(); - DISABLE_COPY_AND_ASSIGN(ModelConfig); + DISABLE_COPY(ModelConfig); public: virtual ~ModelConfig(); @@ -589,7 +585,7 @@ struct TrainerConfigPrivate; class TrainerConfig { private: TrainerConfig(); - DISABLE_COPY_AND_ASSIGN(TrainerConfig); + DISABLE_COPY(TrainerConfig); public: virtual ~TrainerConfig(); @@ -629,7 +625,7 @@ public: struct ParameterTraverseCallbackPrivate; class ParameterTraverseCallback { - DISABLE_COPY_AND_ASSIGN(ParameterTraverseCallback); + DISABLE_COPY(ParameterTraverseCallback); ParameterTraverseCallback(); public: @@ -651,7 +647,7 @@ private: */ struct ParameterOptimizerPrivate; class ParameterOptimizer { - DISABLE_COPY_AND_ASSIGN(ParameterOptimizer); + DISABLE_COPY(ParameterOptimizer); ParameterOptimizer(); public: @@ -688,7 +684,7 @@ struct GradientMachinePrivate; class GradientMachine { private: GradientMachine(); - DISABLE_COPY_AND_ASSIGN(GradientMachine); + DISABLE_COPY(GradientMachine); public: virtual ~GradientMachine(); @@ -780,7 +776,7 @@ private: TrainerPrivate* m; Trainer(); Trainer(TrainerConfig* optConfig, GradientMachine* gm); - DISABLE_COPY_AND_ASSIGN(Trainer); + DISABLE_COPY(Trainer); public: virtual ~Trainer(); @@ -846,7 +842,7 @@ public: struct SequenceGeneratorPrivate; class SequenceGenerator { - DISABLE_COPY_AND_ASSIGN(SequenceGenerator); + DISABLE_COPY(SequenceGenerator); SequenceGenerator(); public: diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 84c5f2d5c9..5b9884b786 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -16,7 +16,31 @@ limitations under the License. */ #define HL_BASE_H_ #include -#include "paddle/utils/TypeDefs.h" + +#ifdef PADDLE_TYPE_DOUBLE +#define HL_FLOAT_MAX 3.40282347e+38F +#define HL_FLOAT_MIN 1.17549435e-38F +using real = double; +#else +#define HL_FLOAT_MAX 1.7976931348623157e+308 +#define HL_FLOAT_MIN 2.2250738585072014e-308 +using real = float; +#endif + +/** + * The maximum input value for exp, used to avoid overflow problem. + * currently only used for tanh function. + */ +#define EXP_MAX_INPUT 40.0 + +/** + * @brief DIVUP(x, y) is similar to ceil(x / y). + * @note For CUDA, DIVUP will be used to specify + * the size of blockDim. + */ +#ifndef DIVUP +#define DIVUP(x, y) (((x) + (y)-1) / (y)) +#endif /** * HPPL is an internal high performance parallel computing library @@ -181,46 +205,6 @@ typedef struct { size_t nnz; } _hl_sparse_matrix_s, *hl_sparse_matrix_s; -#ifndef PADDLE_TYPE_DOUBLE -/** - * HPPL data type: real (float or double) - * - * if real == float - * - * HL_FLOAT_MAX: 3.40282347e+38F - * - * HL_FLOAT_MIN: 1.17549435e-38F - */ -#define HL_FLOAT_MAX 3.40282347e+38F -/** - * if real == double - * - * HL_FLOAT_MAX: 1.7976931348623157e+308 - * - * HL_FLOAT_MIN: 2.2250738585072014e-308 - */ -#define HL_FLOAT_MIN 1.17549435e-38F -#else -#define HL_FLOAT_MAX 1.7976931348623157e+308 -#define HL_FLOAT_MIN 2.2250738585072014e-308 -#endif - -/** - * The maximum input value for exp, used to avoid overflow problem. - * - * Currently only used for tanh function. - */ -#define EXP_MAX_INPUT 40.0 - -/** - * @brief DIVUP(x, y) is similar to ceil(x / y). - * @note For CUDA, DIVUP will be used to specify - * the size of blockDim. - */ -#ifndef DIVUP -#define DIVUP(x, y) (((x) + (y)-1) / (y)) -#endif - #ifdef __NVCC__ #include "cuda_runtime.h" diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 9b7f7e36ce..5f031fc7c0 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -34,8 +34,8 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include "paddle/utils/Queue.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/TypeDefs.h" #include "paddle/utils/Util.h" +#include "paddle/utils/common.h" namespace paddle { /** diff --git a/paddle/gserver/layers/BatchNormalizationLayer.h b/paddle/gserver/layers/BatchNormalizationLayer.h index 052c207732..195acbbfc5 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.h +++ b/paddle/gserver/layers/BatchNormalizationLayer.h @@ -58,6 +58,8 @@ protected: /// to batch, channels* imagePixels. void shrinkMat(const MatrixPtr& in, MatrixPtr& out); + void onPassEnd() { firstTest_ = true; } + MatrixPtr tmpMat_, tmpGrad_; MatrixPtr expandedIn_, expandedOut_; MatrixPtr expandedInGrad_, expandedOutGrad_, inGrad_; diff --git a/paddle/gserver/layers/GruCompute.h b/paddle/gserver/layers/GruCompute.h index 42c0019319..a56af21317 100644 --- a/paddle/gserver/layers/GruCompute.h +++ b/paddle/gserver/layers/GruCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/gserver/layers/LstmCompute.h b/paddle/gserver/layers/LstmCompute.h index 140a4c6ecf..0d65b4158e 100644 --- a/paddle/gserver/layers/LstmCompute.h +++ b/paddle/gserver/layers/LstmCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/gserver/layers/MultinomialSampler.h b/paddle/gserver/layers/MultinomialSampler.h index 677b047029..b48073c80b 100644 --- a/paddle/gserver/layers/MultinomialSampler.h +++ b/paddle/gserver/layers/MultinomialSampler.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/BaseMatrix.h b/paddle/math/BaseMatrix.h index 2933c20fba..8f9bc9e823 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/math/BaseMatrix.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "TensorExpression.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 25ce09e346..bda863de38 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -27,7 +27,7 @@ limitations under the License. */ #include "MemoryHandle.h" #include "Vector.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/TensorExpression.h b/paddle/math/TensorExpression.h index 9bd789e8c5..f3d60e4003 100644 --- a/paddle/math/TensorExpression.h +++ b/paddle/math/TensorExpression.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "hl_tensor_ops.h" #include "paddle/utils/Logging.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/Vector.h b/paddle/math/Vector.h index 8a24103bd4..b4347a70f8 100644 --- a/paddle/math/Vector.h +++ b/paddle/math/Vector.h @@ -22,7 +22,7 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" #include "paddle/utils/Thread.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/parameter/ParallelParameter.h b/paddle/parameter/ParallelParameter.h index 417e386dc7..1ee220d2dc 100644 --- a/paddle/parameter/ParallelParameter.h +++ b/paddle/parameter/ParallelParameter.h @@ -28,7 +28,7 @@ limitations under the License. */ #include "paddle/parameter/ParameterUpdateFunctions.h" #include "paddle/utils/Flags.h" #include "paddle/utils/Locks.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" #include "ParameterConfig.pb.h" diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index 532c6770e5..e05137b315 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -29,8 +29,8 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Locks.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/TypeDefs.h" #include "paddle/utils/Util.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdateFunctions.h b/paddle/parameter/ParameterUpdateFunctions.h index 2d277e47e7..2cb3798717 100644 --- a/paddle/parameter/ParameterUpdateFunctions.h +++ b/paddle/parameter/ParameterUpdateFunctions.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/math/Vector.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index 262afafbe2..ccf05ae1ca 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/math/Matrix.h" #include "paddle/pserver/ProtoServer.h" #include "paddle/utils/Queue.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/pserver/ParameterClient2.h b/paddle/pserver/ParameterClient2.h index eed71ccb43..70cfc6d700 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/pserver/ParameterClient2.h @@ -26,8 +26,8 @@ limitations under the License. */ #include "paddle/utils/Flags.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Queue.h" -#include "paddle/utils/TypeDefs.h" #include "paddle/utils/Util.h" +#include "paddle/utils/common.h" #include "ParameterService.pb.h" diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index b0cf22e1fb..79d1eb97ff 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -32,7 +32,7 @@ limitations under the License. */ #include "paddle/utils/Locks.h" #include "paddle/utils/Stat.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/TypeDefs.h" +#include "paddle/utils/common.h" #include "ParameterService.pb.h" diff --git a/paddle/utils/CpuId.h b/paddle/utils/CpuId.h index 7a354da758..1218e8194c 100644 --- a/paddle/utils/CpuId.h +++ b/paddle/utils/CpuId.h @@ -11,7 +11,7 @@ limitations under the License. */ #pragma once -#include "DisableCopy.h" +#include "common.h" namespace paddle { diff --git a/paddle/utils/DisableCopy.h b/paddle/utils/DisableCopy.h deleted file mode 100644 index 41de98bbde..0000000000 --- a/paddle/utils/DisableCopy.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -/** - * Disable copy macro. - */ -#define DISABLE_COPY(CLASS_NAME) \ - CLASS_NAME(CLASS_NAME &&) = delete; \ - CLASS_NAME(const CLASS_NAME &other) = delete; \ - CLASS_NAME &operator=(const CLASS_NAME &other) = delete diff --git a/paddle/utils/Locks.h b/paddle/utils/Locks.h index 0f922f3548..a21872e89e 100644 --- a/paddle/utils/Locks.h +++ b/paddle/utils/Locks.h @@ -19,7 +19,7 @@ limitations under the License. */ #include #include -#include "DisableCopy.h" +#include "common.h" namespace paddle { diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index e5a89070f1..dc15ada586 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -26,12 +26,11 @@ limitations under the License. */ #include #include -#include "DisableCopy.h" #include "Logging.h" #include "TrainerConfig.pb.h" +#include "common.h" #include "Flags.h" -#include "TypeDefs.h" #include "hl_gpu.h" /** diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index d1a07d9485..aa5df32438 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include "TypeDefs.h" +#include "common.h" namespace paddle { diff --git a/paddle/utils/TypeDefs.h b/paddle/utils/common.h similarity index 71% rename from paddle/utils/TypeDefs.h rename to paddle/utils/common.h index c50a05e82d..3ff0b86947 100644 --- a/paddle/utils/TypeDefs.h +++ b/paddle/utils/common.h @@ -15,12 +15,19 @@ limitations under the License. */ #pragma once namespace paddle { + +/** + * Disable copy macro. + */ +#define DISABLE_COPY(class_name) \ + class_name(class_name &&) = delete; \ + class_name(const class_name &other) = delete; \ + class_name &operator=(const class_name &other) = delete + #ifdef PADDLE_TYPE_DOUBLE -typedef double real; +using real = double; #else -typedef float real; +using real = float; #endif } // namespace paddle - -using paddle::real; From 051d15cf0044e330515ab0a10ca82e0fdb576105 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 23 Dec 2016 17:44:25 +0800 Subject: [PATCH 34/39] add prefetch and onPassEnd to PaddleApi.h --- paddle/api/GradientMachine.cpp | 8 ++++++++ paddle/api/PaddleAPI.h | 10 ++++++++++ 2 files changed, 18 insertions(+) diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 297eaa19bb..ced2293376 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -64,6 +64,14 @@ GradientMachine* GradientMachine::createByModelConfig( return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types); } +void GradientMachine::onPassEnd() { m->machine->onPassEnd(); } + +void GradientMachine::prefetch(const Arguments& inArgs) { + auto& in = + m->cast>(inArgs.getInternalArgumentsPtr()); + m->machine->prefetch(in); +} + void GradientMachine::forward(const Arguments& inArgs, Arguments* outArgs, PassType passType) { diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 84a66719c3..7521ff4c6c 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -714,6 +714,16 @@ public: GradientMatchineCreateMode mode = CREATE_MODE_NORMAL, const std::vector& parameterTypes = defaultParamTypes); + /** + * Prefetch row ids of sparse parameter. + */ + void prefetch(const Arguments& inArgs); + + /** + * Do some thing when train pass ended. + */ + void onPassEnd(); + /** * The forward stage of GradientMachine. * From 224e5fcc77306705260d8f54f2994706cd8ee0ef Mon Sep 17 00:00:00 2001 From: wangyanfei01 Date: Sun, 25 Dec 2016 11:35:49 +0800 Subject: [PATCH 35/39] fix bug: * gradient_clipping_threshold should be allowed to set with parameter-grain --- python/paddle/trainer_config_helpers/attrs.py | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/python/paddle/trainer_config_helpers/attrs.py b/python/paddle/trainer_config_helpers/attrs.py index 59bb18bfca..bf02088346 100644 --- a/python/paddle/trainer_config_helpers/attrs.py +++ b/python/paddle/trainer_config_helpers/attrs.py @@ -19,34 +19,34 @@ __all__ = [ def convert_and_compare(x, Type): - """ - Convert x to be the same type as Type and then convert back to - check whether there is a loss of information - :param x: object to be checked - :param Type: target type to check x over - + """ + Convert x to be the same type as Type and then convert back to + check whether there is a loss of information + :param x: object to be checked + :param Type: target type to check x over + """ return type(x)(Type(x)) == x def is_compatible_with(x, Type): - """ - Check if x has a type compatible with Type - :param x: object to be checked - :param Type: target type to check x over - + """ + Check if x has a type compatible with Type + :param x: object to be checked + :param Type: target type to check x over + """ if type(x) == Type: return True try: if float == Type or int == Type: - # avoid those types that can be converted to float/int but not very - # meaningful and could potentially lead to error - # i.e., str and bool typed value should not be used for initializing float/int variable + # avoid those types that can be converted to float/int but not very + # meaningful and could potentially lead to error + # i.e., str and bool typed value should not be used for initializing float/int variable if not isinstance(x, str) and not isinstance(x, bool): return convert_and_compare(x, Type) elif bool == Type: - # should not use string type to initialize bool variable + # should not use string type to initialize bool variable if not isinstance(x, str): return convert_and_compare(x, Type) else: @@ -88,6 +88,10 @@ class ParameterAttribute(object): :type learning_rate: float or None :param momentum: The parameter momentum. None means use global value. :type momentum: float or None + :param gradient_clipping_threshold: gradient clipping threshold. If gradient + value larger than some value, will be + clipped. + :type gradient_clipping_threshold: float :param sparse_update: Enable sparse update for this parameter. It will enable both local and remote sparse update. :type sparse_update: bool @@ -104,6 +108,7 @@ class ParameterAttribute(object): l2_rate=None, learning_rate=None, momentum=None, + gradient_clipping_threshold=None, sparse_update=False): # initialize strategy. if is_static: @@ -152,6 +157,11 @@ class ParameterAttribute(object): self.attr['sparse_update'] = True self.attr['sparse_remote_update'] = True + if gradient_clipping_threshold is not None and \ + is_compatible_with(gradient_clipping_threshold, float): + self.attr['gradient_clipping_threshold'] = \ + gradient_clipping_threshold + def set_default_parameter_name(self, name): """ Set default parameter name. If parameter not set, then will use default From 027aaf9ef26ad89b33a2d094cb8196926a911cc2 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 25 Dec 2016 19:32:36 +0800 Subject: [PATCH 36/39] add cluster train for quick_start --- demo/quick_start/api_predict.sh | 2 +- demo/quick_start/cluster/cluster_train.sh | 44 +++++++++++++++++++++++ demo/quick_start/cluster/env.sh | 28 +++++++++++++++ demo/quick_start/cluster/pserver.sh | 26 ++++++++++++++ paddle/trainer/ThreadParameterUpdater.h | 4 +-- 5 files changed, 101 insertions(+), 3 deletions(-) create mode 100755 demo/quick_start/cluster/cluster_train.sh create mode 100644 demo/quick_start/cluster/env.sh create mode 100755 demo/quick_start/cluster/pserver.sh diff --git a/demo/quick_start/api_predict.sh b/demo/quick_start/api_predict.sh index c90d3b7054..4d9aa9e885 100755 --- a/demo/quick_start/api_predict.sh +++ b/demo/quick_start/api_predict.sh @@ -17,7 +17,7 @@ set -e #Note the default model is pass-00002, you shold make sure the model path #exists or change the mode path. #only test on trainer_config.lr.py -model=output/pass-00001/ +model=output/model/pass-00001/ config=trainer_config.lr.py label=data/labels.list dict=data/dict.txt diff --git a/demo/quick_start/cluster/cluster_train.sh b/demo/quick_start/cluster/cluster_train.sh new file mode 100755 index 0000000000..aac9b89b14 --- /dev/null +++ b/demo/quick_start/cluster/cluster_train.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +# Should run pserver.sh before run this script. +bin_dir=$(cd `dirname $0`; pwd) +home_dir=$(cd "${bin_dir}/.."; pwd) +source "$bin_dir/env.sh" + +model_dir="$bin_dir/output" +log_file="$bin_dir/train.log" + +pushd "$home_dir" +cfg=trainer_config.lr.py +paddle train \ + --config=$cfg \ + --save_dir=${model_dir} \ + --trainer_count=4 \ + --local=0 \ + --log_period=100 \ + --num_passes=15 \ + --use_gpu=false \ + --show_parameter_stats_period=100 \ + --test_all_data_in_one_period=1 \ + --num_gradient_servers=1 \ + --nics=`get_nics` \ + --port=7164 \ + --ports_num=1 \ + --pservers="127.0.0.1" \ + --comment="paddle_trainer" \ + 2>&1 | tee "$log_file" +popd diff --git a/demo/quick_start/cluster/env.sh b/demo/quick_start/cluster/env.sh new file mode 100644 index 0000000000..a404993835 --- /dev/null +++ b/demo/quick_start/cluster/env.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +function get_nics() { + machine=`uname -s` + local nics="" + if [ "$machine" == "Linux" ]; then + nics="lo" + elif [ "$machine" == "Darwin" ]; then + nics="lo0" + else + nics="unsupport" + fi + echo $nics +} diff --git a/demo/quick_start/cluster/pserver.sh b/demo/quick_start/cluster/pserver.sh new file mode 100755 index 0000000000..b187c1d9b9 --- /dev/null +++ b/demo/quick_start/cluster/pserver.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e +bin_dir=$(cd `dirname $0`; pwd) +source "$bin_dir/env.sh" + +paddle pserver \ + --nics=`get_nics` \ + --port=7164 \ + --ports_num=1 \ + --ports_num_for_sparse=1 \ + --num_gradient_servers=1 \ + --comment="paddle_pserver" \ + 2>&1 | tee 'pserver.log' diff --git a/paddle/trainer/ThreadParameterUpdater.h b/paddle/trainer/ThreadParameterUpdater.h index 880f1f9ddc..bc08a9e9f0 100644 --- a/paddle/trainer/ThreadParameterUpdater.h +++ b/paddle/trainer/ThreadParameterUpdater.h @@ -33,8 +33,8 @@ namespace paddle { because at the current moment, the merging on CPU is happening on the main thread, and the its parameter size can be much larger than the one GPU. Thus, for GPU, the parameter updates happens in updateImpl() function, which - is called by gradient machines as a callback function as a callback function - supplied to backward() and forwardBackward(). + is called by gradient machines as a callback function supplied to backward() + and forwardBackward(). For CPU, the parameter updates happens in separate threads maintained by this class. */ From 685299c3c54bb8fc10c3d38cb26445d899f32c5d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 26 Dec 2016 14:38:31 +0800 Subject: [PATCH 37/39] Rename math.py to layer_math.py * Fix #903 --- python/paddle/trainer_config_helpers/__init__.py | 4 +--- .../paddle/trainer_config_helpers/{math.py => layer_math.py} | 0 2 files changed, 1 insertion(+), 3 deletions(-) rename python/paddle/trainer_config_helpers/{math.py => layer_math.py} (100%) diff --git a/python/paddle/trainer_config_helpers/__init__.py b/python/paddle/trainer_config_helpers/__init__.py index a2335768b9..0ff5edf825 100644 --- a/python/paddle/trainer_config_helpers/__init__.py +++ b/python/paddle/trainer_config_helpers/__init__.py @@ -20,6 +20,4 @@ from layers import * from networks import * from optimizers import * from attrs import * - -# This will enable operator overload for LayerOutput -import math as layer_math +import layer_math diff --git a/python/paddle/trainer_config_helpers/math.py b/python/paddle/trainer_config_helpers/layer_math.py similarity index 100% rename from python/paddle/trainer_config_helpers/math.py rename to python/paddle/trainer_config_helpers/layer_math.py From eca45928d5f3f9b1c2219fd71adb72160fee9edf Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 27 Dec 2016 13:15:27 +0800 Subject: [PATCH 38/39] Fix merge errors. --- paddle/api/PaddleAPI.h | 2 +- paddle/utils/common.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index e83718448d..09c891871a 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -870,7 +870,7 @@ struct EvaluatorPrivate; class Evaluator { private: Evaluator(); - DISABLE_COPY_AND_ASSIGN(Evaluator); + DISABLE_COPY(Evaluator); public: ~Evaluator(); diff --git a/paddle/utils/common.h b/paddle/utils/common.h index 3ff0b86947..202a9d980d 100644 --- a/paddle/utils/common.h +++ b/paddle/utils/common.h @@ -14,8 +14,6 @@ limitations under the License. */ #pragma once -namespace paddle { - /** * Disable copy macro. */ @@ -24,6 +22,8 @@ namespace paddle { class_name(const class_name &other) = delete; \ class_name &operator=(const class_name &other) = delete +namespace paddle { + #ifdef PADDLE_TYPE_DOUBLE using real = double; #else From def00bc1064e5960568666f690773a8e5445f888 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 27 Dec 2016 16:44:31 +0800 Subject: [PATCH 39/39] Update index_cn.rst --- doc/faq/index_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/faq/index_cn.rst b/doc/faq/index_cn.rst index ea0ef25f00..7d425a05d4 100644 --- a/doc/faq/index_cn.rst +++ b/doc/faq/index_cn.rst @@ -72,7 +72,7 @@ PaddlePaddle支持非常多的优化算法(Optimizer),不同的优化算法需 减少数据载入的耗时 ++++++++++++++++++ -使用 :code:`pydataprovider`时,可以减少缓存池的大小,同时设置内存缓存功能,即可以极大的加速数据载入流程。 +使用\ :code:`pydataprovider`\ 时,可以减少缓存池的大小,同时设置内存缓存功能,即可以极大的加速数据载入流程。 :code:`DataProvider` 缓存池的减小,和之前减小通过减小缓存池来减小内存占用的原理一致。 .. literalinclude:: src/reduce_min_pool_size.py