From 81ff6fa6073a23fc7137d32d2cdee0595fd48af0 Mon Sep 17 00:00:00 2001 From: gkitsasv Date: Wed, 13 May 2020 16:56:25 +0300 Subject: [PATCH] First Commit --- helpers/sh_functions.py | 110 +++++++++++++++++++++++++++++++++++++ images/input.jpg | Bin 0 -> 18962 bytes inference.py | 74 +++++++++++++++++++++++++ loaders/Illum_loader.py | 61 ++++++++++++++++++++ loaders/autoenc_ldr2hdr.py | 80 +++++++++++++++++++++++++++ 5 files changed, 325 insertions(+) create mode 100644 helpers/sh_functions.py create mode 100644 images/input.jpg create mode 100644 inference.py create mode 100644 loaders/Illum_loader.py create mode 100644 loaders/autoenc_ldr2hdr.py diff --git a/helpers/sh_functions.py b/helpers/sh_functions.py new file mode 100644 index 0000000..21b637f --- /dev/null +++ b/helpers/sh_functions.py @@ -0,0 +1,110 @@ +import math +import numpy as np +import time +import torch +import torchvision + +#Convolve the SH coefficients with a low pass filter in spatial domain +def deringing(coeffs, window): + deringed_coeffs = torch.zeros_like(coeffs) + deringed_coeffs[:, 0] += coeffs[:, 0] + deringed_coeffs[:, 1:1 + 3] += \ + coeffs[:, 1:1 + 3] * math.pow(math.sin(math.pi * 1.0 / window) / (math.pi * 1.0 / window), 4.0) + deringed_coeffs[:, 4:4 + 5] += \ + coeffs[:, 4:4 + 5] * math.pow(math.sin(math.pi * 2.0 / window) / (math.pi * 2.0 / window), 4.0) + return deringed_coeffs + +# Spherical harmonics functions +def P(l, m, x): + pmm = 1.0 + if(m>0): + somx2 = np.sqrt((1.0-x)*(1.0+x)) + fact = 1.0 + for i in range(1,m+1): + pmm *= (-fact) * somx2 + fact += 2.0 + + if(l==m): + return pmm * np.ones(x.shape) + + pmmp1 = x * (2.0*m+1.0) * pmm + + if(l==m+1): + return pmmp1 + + pll = np.zeros(x.shape) + for ll in range(m+2, l+1): + pll = ( (2.0*ll-1.0)*x*pmmp1-(ll+m-1.0)*pmm ) / (ll-m) + pmm = pmmp1 + pmmp1 = pll + + return pll + +def factorial(x): + if(x == 0): + return 1.0 + return x * factorial(x-1) + +def K(l, m): + return np.sqrt( ((2 * l + 1) * factorial(l-m)) / (4*np.pi*factorial(l+m)) ) + +def SH(l, m, theta, phi): + sqrt2 = np.sqrt(2.0) + if(m==0): + if np.isscalar(phi): + return K(l,m)*P(l,m,np.cos(theta)) + else: + return K(l,m)*P(l,m,np.cos(theta))*np.ones(phi.shape) + elif(m>0): + return sqrt2*K(l,m)*np.cos(m*phi)*P(l,m,np.cos(theta)) + else: + return sqrt2*K(l,-m)*np.sin(-m*phi)*P(l,-m,np.cos(theta)) + +def shEvaluate(theta, phi, lmax): + if np.isscalar(theta): + coeffsMatrix = np.zeros((1,1,shTerms(lmax))) + else: + coeffsMatrix = np.zeros((theta.shape[0],phi.shape[0],shTerms(lmax))) + + for l in range(0,lmax+1): + for m in range(-l,l+1): + index = shIndex(l, m) + coeffsMatrix[:,:,index] = SH(l, m, theta, phi) + return coeffsMatrix + +def getCoeeficientsMatrix(xres,lmax=2): + yres = int(xres/2) + # setup fast vectorisation + x = np.arange(0,xres) + y = np.arange(0,yres).reshape(yres,1) + + # Setup polar coordinates + latLon = xy2ll(x,y,xres,yres) + + # Compute spherical harmonics. Apply thetaOffset due to EXR spherical coordiantes + Ylm = shEvaluate(latLon[0], latLon[1], lmax) + return Ylm + +def shReconstructSignal(coeffs, sh_basis_matrix=None, width=512): + if sh_basis_matrix is None: + lmax = sh_lmax_from_terms(coeffs.shape[0]) + sh_basis_matrix = getCoeeficientsMatrix(width,lmax) + sh_basis_matrix_t = torch.from_numpy(sh_basis_matrix).to(coeffs).float() + return (torch.matmul(sh_basis_matrix_t,coeffs)).to(coeffs).float() + +def shTerms(lmax): + return (lmax + 1) * (lmax + 1) + +def sh_lmax_from_terms(terms): + return int(np.sqrt(terms)-1) + +def shIndex(l, m): + return l*l+l+m + +def xy2ll(x,y,width,height): + def yLocToLat(yLoc, height): + return (yLoc / (float(height)/np.pi)) + def xLocToLon(xLoc, width): + return (xLoc / (float(width)/(np.pi * 2))) + return np.asarray([yLocToLat(y, height), xLocToLon(x, width)]) + diff --git a/images/input.jpg b/images/input.jpg new file mode 100644 index 0000000000000000000000000000000000000000..911952eb97aac033405909d21277b4fa19c668ac GIT binary patch literal 18962 zcmbTdWmHsO{60D`z)%tj0y2cm(1smgQx0D7=N#N3+)vpA6ksYUup}27m*oHFcGm%*CdLEc1AusJ0DNjZAT{1y zH-P2-JqhvtR|EVX4G$kkKu824CLtxef1sWUfR6_R;u8P~2?+@9pANpi4eE3vQ zj_8rDHHgifMkqYtD=|dAx|3FK^e0>&2h9~SCoLt;IBBEmAFt~)m3q>Vm z6;(BT14AQY6H_xA+c$Rh4vtQqUfw>we*OWG@1vqWeEbxXn3SB7nwI`K<6B;SL19sG zNoh@OU427iQ}d5sUEMvs=)V4evGIw?sp*;7xz#o7`o`wg_Rj9n@yY4g`Niec_5a|) zyT|!|w*MX2{{t8GJuZ9#0w4kCe{kX9``%wbY68NifC4CGPS!F-^A87vz+5bCW5&yrC{a?WTZ(MT#a^U?CQUj?02*9Rq;Ao<= zVbJr^#+w$(f0sE}z31=GJnsPfSRC&8<@9GSFB5cUE_cPm-{`6Uv&cJu3*C*A;a{Kn zJAh2+6)mDS_#NPvv;fQQa7Z!}zT6E< zfwlw_%6>5aIIh48D<86>ejDw1;pmyW0ngzYUC&Y5VN|fb0iO>E)4A?)veS;-)21?{L_>ngBr};C|ydHt?C`)V1EG!mP3{nWnm1p)@6A?UpCgLhI zi{;8c`Y9+^e64U$-2sl#$II`A1@R*=g;D@mZSGRPlrhW>}{U3Rj=q zn4s>-)+LZALrPT;(#jk#=Kfk!Xz7oh{ZiiISzZV%Ur2uG^h?{W(2aNg5@i8E{YUI1 z#f=`_R>5QZ;C0>B3C6$NWO1*i!Z?>zHw_rKm$f>yY-Q^FqjQ&gIIj5!aMpRhFN(!= zTXn5CPbjZw{69>U9WvK_*{*slg$cMnzqd2({Cx8me>z=Q`HNxDfVR+oq#NHQ;>1d? zlKqqM-VI&kpMWY1mVzC9S(<85F z{Vn~%Hin4f+H&%>H1pNv<3t>9+?J7XS4JMivw-Wq9IV8p}N-5Kwab-Aw-iffAopL_1K)m_i=^!H46?Cr84%f!Xb zcv)WB*CA3p)Nfb8!0Zq2*8cfxA3nUl5rG-? zpGtrHH`{&cc7B2)gTs|7Y`>9ujgKeXJj*8t^=ia$7-quGN-DeNOMT|^--Iz&ZYQdEb{{mgtkB3&dqX> zU=(&G#$0(Xt^@7>vE0^Hq!^O_syvn2wJEJg{X-1kg5Oj9jvRcMHo_J+wg#=QzX3C%Wdlw(B$6 zp2bcCSL2c;Qk8D)X=<@9Q;@VA$J3Yf5W1v8LEwn~)B+d5KH!3*zO9Z`*G+yOZ~crP zkMl@s>_v#1T^#Q|j^#GMk2J3t5nq@#KZ1u!rxj-i{2gr~A2eE;lk5t+FNIAvyX)el zvC)PL>$W*-h?nR+6FYl2F!PMS9S5y+K_aeHd}p=iV)Elm~tS8W}jn-=DV*A*({*dQp!)YFr> zn!iw>o6->G*X2RshYQV8pHVmO9@-RC;u*?GcB25GL6R*eaZWzwl&?o7p9QLDA!ZP( z8o0UYJ3#gp?(*H=^7(nY#t04vjwoB8vx*AkG6ZQu5RxL-+InW@lxZ?AAu{=P9Y)6* z?19KqJ)c*&13ag?11!I8eoesI8cU*jAhS@;YCk=+jOy*iQBX(X~7B)q_^>1aHn5NpLL980MfiUR&@^w4|7S> z_0>s{^-pC^@i=xhZzdE=#IJSOEYVZRf+7mQ0nd?u$;%4te~CoF+Ga$?F-xZY2Cy?J z!)>o#>r^+Hi1g_r<%8$#CVcv^2(s zwB7-t;6a8V5`2h@{yV^vO@g0itY4;SnyD;ySN>iHPDM3FV|@$=*5mPH&T{jlL3<{1 z(QzdUyFM2UAtnxBiGz@pt!pFI#)(=3KRbfY*5U$(LHmqNoUjg9uV=4thKh(^Pd9nB zGT+l?51E}#9=Cww_5$waBiG4=sl=6Nvg5yj%tl{Lr8FoIwQOEcFo~`*!#=tMyA++H zms;7FhQBSzwy*=1VL{i+i81`LJo4FA9G7wc(*7z&Vo50~)D(#AxwXGVX7W zZ;^9i!1}m`V51Jt6WfKA*8htAZ84##h0%X2fws&msFYdNq8HeMDetLh6y;z>+G9Nj zOk+C)B;`|~ZmEH)xSnDfPLq0&KC|K@HR;vtuxt}s4q~0yZ241B2%H7-hMQ>IzhpJ)*ooHH;~_V=hg z-nPy9j9KnB_L1hr-;40C{G!Loy>84K%yu9(9XqkhrM1|}aA@%6wI66tU+ao)>O$!h?uFEq|d zE)4PH)68@IAzrl;0hMHACIuX!mhg=2Bd93}-Z}GuPx+;JE_UxQ22j@gX~kww%Vf|q zA{c{T&KMi{R`(mF4tyJL>pN^`JV^&`j}U~SK8sWW)5wtVAUAlvFslW)5gQX{ec&!D zmz{shy-m11ncsMl=du!frl+nrPH@x#{p)!>)texic|L}g?&=X*UG2bz_%ke`UkgZV zHqYeY*bT80XJzN%f8I^>$#N+$<%=_E38FQ|tXzY?Yt~IwxVo0xC!+;y9uCl|fQ$bX zHG?mJM@4?o^IEY!bB-s_gn=Cq3{M{!5p)F^8mtuH#$y?(mN-3-f-_3C6Y?D71)i~R z@9FJIxa^Ea4Rw1yG~eQz?VmGgW`H!B4Ssg8`~r9Ja9016=d3BAE(?)6X>wWNvRvRf zStp+o6*zRTh=isnxsSfe3CMOvb&&xlUPll~>eW)gbv0o7 z24Nm8-Sc@K`MyMT&+0vYEs);<8rmm}+=lTWar&}Ku90U2Tt!XaXLcScCMfU{>5s&a z+?0`ytybhn;$H5|Ow}24OIfE$omV<`cGn$fX9#ClQ7ls3x~oBw<67cuOq2N?A3RTa zUeB4rx)%eGcgzo08|{-MjZHxlybi~6s<(HtbLn=U379mo5mE!X}% z-$|t+eBbzM9oSuVK$SdpX0o`P=_t|vv;y|F6U)?GLH~@3N>FcIl7ZS`qdH~SS^fbz z6~*#c!_z1~#!!j&houxW-up|no^POPaVZF48|ga$X4S|h$)2ISr^#-`X8E0&UrO>( zW~R@64zA7KO7vTko&fP#8JU>MFl?Pg!DLjcM@de9n&3nq&AJ#3k-o&)+??hUVX~=x z1L81qIpA^s(t*4|9Giiydv-JKLxqieag8f`)_m$4Nx5}$Fmu4^^<<~aXjI>z>y-+@ zDGcXDK8M=M4f23*i3%-JeF)LC{As?~_OAfIR( zc3#n)?uH=O`#B2|$xoVj|Kc-^aH zu;-rXtT*3|7lxY9+mwVWU}|Iktf|_6Y)!b{mt6eSTVr90Psg`^^i)?{J}K}l4}#xK z8iR3}MpfllItpYO!Qw#Ub&G~u6p<*O8{OZB7nEC?mV>q0X z^Qiplw55z_X*60lUKJ3heUfQh&~MigdG`5FztF?#nihlM4%_m>jm8T`*}+9bNT|4|ewJS9MS@f}TuL z<*V1x;-Ngu+fVlY$kbB@T@;!tvrj~^ksTuwhw0*a%MTkGR$i~(y2(=(d0t|VzLpW& zij6Pe_vNs{_|c>?`A2&GMs9b2^3R*DYl3wlwzjxeu?o*7YFR#i`U5H&KQ2BO_5x>5 z;6ZEhmAr+Ku6#9 za!pFbYxdWR_vh-L9Qn>!<`Xgl%DA2<@KRiMbU=#x4-Li~;;Z8G0;p7RmQH>LHb0_O z-aF-U9MCaw`Tik3cpjuFR)^0qGX zjP?Vb5hPoco*4R$Jrh;QZL=Bl5&KQr^_TzBt)t?3(x;$tj-;T)KAd3`A6)5eSY8;Q zHxy!2$FFt!iO>+jP)s9-vZc#>-5!fElm~uO{{;>rY58um&TGES|Kf8?nloW z5GH$bKc)N3fB%Ei!>^PSRcXr`S%rhU7Ea6FX}&P<)tk{pUVvB%VDoC~5Gqq`#i#AM zJ5gP@d% z1kKq1WsSgvJAhi6wIK;I;MR#rm7=fDlI`&wz=Vv&6GtnwHI;~t@fk?t zkYv~?`t`x1I~U6U%*e7bVAwZmI2q9?$<|c2d3ua4V74?lMo;f)5fg)0(hJ;pu0m6u zA_ENDBAfCCF|cnd`T|I=LeZ*TLF3j|y{nfK7UZe5oeSjy!KK1)jY~KeraIpT=#xXn zvVQeSn*THOXPX^zX=~GOiXM{QIVJ?w4z}9g4HMWXnIE>i(bo09cJ*H?X z{UaWDWikcwjwO|ckwHoo)z%X8qYUYSX6~MZx)BKw1!QiP+rt+}ygFrawoZ#}_+Rv> z=npG}q`1@90uu#$-A$c&pUbL(i6AJ?@V&VTG*J3x|J*E4xYW3pK1rkY1L_g0*i`~2K@wAd7U zNySPUr12NQ{H^w(T<86|IOnf)sl$4FwT^=4zbYYwj9YN%tw9Du(PpX33A=7n)E+Wv zgtG6*=%G9bbj8++qJ8Uzmvdh(k4@eNy8qIDs|bskDnM-*JnrXd?y$K~IYXpqhRwuy z&;VA?|IWxvN@&+6b)K?(zq%;*tJL@8sqwv?OGs2gq?kRz-$P;<2vu)n+Hs|_PR}|` z(>CZk8Cy`9!H5pSo~E5)UMh{piVGnZTK+PV0$H_TGizZYMb=PT>Ip+Kl@uZXSikky zGe_IZ_FWWF5<{>ycp_SE8kOY~(H|KW;7zj?zrS*RmP^K=HGAX40HrS`&tp;%CHY)4 zALot|L{GQFZ@B=OXHqAojd&`Y3E2Wp3vuCt6oHc@%Sh&*6L?B5lsf6(Qk-Q;$ud7W zx~L%V=!09htl$k9xnCqFjaQga$8W1h*~}F$&MUh!&r%6USDT%1P+DY^u%QO8rb-gs z%?eY`)i+;|DFv)}$kgY!5KQZr3l{wjd&r=z)ayk1)|27&R6DyplxmTpwXa*KFIl5!ulD6s{N&qb zR22m=l3O1#53s;XWrkt@Aw}^|&o}fUd|u$F=}8*rAvsf78>2*#kdaEYM0COgUQ$eR z(Kg50pBGU~lfP)4)02E*zczY9fu=%B78Yq_xQr9p>vEvMeNn~hvb9P70h;g<^u`}nh59F$)M zcAuaBWNnRDR;r6(TtPmY=eJ=bvwDQO^;|A+W}$^OHYPqJU7U8xMB8W0>S(|8<77S? z6gzCr*I2%keJ9v$R33;Ibg!0v+kL-8I;kg5k;0iO@#AIpLLfe&S5ePJpzs}l_>bR3 z9c_;L5kB5=|LQy^p?KsvZQmV$xukMu?AqeE|qTdPM)5g!F0XL8e<`MW0$1?WKsPoFi*0Xb8?R{p`N&el zc~iu&R$QAb1)nP_Mtm3fJ1?W;VPcgOA$PXoTVN$*-Yw7skP`&nBJb6_*t3scd}cTp z;JuwV(E{4Gn)ni@Tv|AvQ1FksK$@+ux_iZd6uoOr9a3RAQnjQl{jzQDw?4O z@h^at2+{;zk7%E&o0=LYeW8=89|0J4&n61J4bty^oXD#*q7X)O7*B+m2t6yY^dX~# z9Ep~(q<_SH4ZjsOwwfw0K&m7mzrR?H=G;~pdLc3-UBL3APilX`{q!cJ9&g0!irsCH z&z=*Eq#Q4LHCSEXB`NH)w8~Fy#_@)wD_+^=>f+Yv&1>XuGGv$nxz#!CZ_eo7q0$v6 z&^-EHH48NzDPsn6@ioJz+XR34u{-N`kT1fS9f+ z+Cni{i?=eYKWh!2MEyslMH@DIwvTa|tUcn-aoZ(W(oeXswQR58d`u_t*yT^T!NU&` zfd&b5OTUlp-J$$~;o~=Vfa}r-t=!v#EA*>(U3Y*AJ-(hVZrjGNUO`>!w42p-f)nIg zc8z-m#n(Sp7?A|&S6W+SC^c}b zc!9ubH2t`o7>+aazda`T<5ZrI=x=bgnp_Bu)`=^8R zqs@?C=X`Fw1+5Jz2jsgxY4^G2e>;`Q!JjYQ1Pgy)iuL+sY(~}JUhT5N$NxRSB$V`6 z?t;nbRLIYDSXIGH(|t+I2T!{iR5X2)SvOiwB4;=IFg&y6D6UJCPeQS9=;BGHlQNX; z@bK({sV$f*;Gj!CxADj>%h{E!Zj=wp1rrWAWqH|wtABDiG^2|9MDXD0polx&mefso zPpDeOgZ2+Du6Vvbnj4AB4Xoa@M8*rqr1)^RGOOMJK6ysYdzB;31A0esMn-M@eD(Wc zmQKUF>wMM~L7$Pjr9n%MS5=kozW0E}_ilbHT@~CkbQ(BMQg1LIm*yT5;L6JvB)2-bN^d_sakfM{~ zxZh}0-Pg;7fVK2CjP8!XQ|%?_2pnuflR1WZlKzm~T|Gt`wxb31P-J~_^(r92I4(eg zvSd%q9K{XDcVSV}#k>o!W)Wu@EP$!OQ%5M z=SEbZLCpmOYf&&A;YcM}P6mXUtf-Ak_W?91lR3;F%oDSvLJ3DB)GWJ7L;y%4r-GSo zXl7`V(SOH*lg@eR>Gmjzad}%}Vf1&kECDc_p<@r_^do!bT0aFP=xy&6W}d34 zKi@zU%?J3INmqItNe-p54+VQwu?!_?nvJn;uNOS0r4^r$o4%ZSb(Yi<-x1kL8T!i1 zW2y78@$jdVV0y}zITd=i{zyMEe!ul}^sC`QTyF!`rFR2@*o_X!3NCRVocvk4*ot8` zu{lIZ;0(SU_aukl#26UJyAn{DIB!)iXFZ`tFh%nRaG<9)uB#5lwo)Z6Nc=x;rYWV~ zu}h+mb&3EHLu54Ka~bt~H#)~1%5A;Smwi}gHEX*;p4f((wrnv*y~%~oYXGE z=vRwNL#!57`T0nkNI`ucoqKZ43O=ziGz2`hal$l=)aIdXwQ(Q_^9aNthD!#E92?(7 zhwJ>f`Kt6wa01)D#w-5MTo=i}U#|0OmN25upL68AOz<*;1No`KR(Iy95okGsZ^V)U z*WFs9&yf}okh6$@EYC|(OiDO6`n;<(sBwsP-|QBDes%rmw8-2!BPa%x$p!O8#^l)qQ3rg$98 z{3Q!wH>J&kkfBc!yn+RZ2+Qr6`Z#II!o%IhELuCKd@xUUmImAX_lVMJmhH_E05NY zH|*>+>?_Ls&uI(BBsikaG(R$_U|s)7z2!Y*N+WJ6x}l8)*7g7ua}>8@nES}JZ^Qkp zm_6RhEL!QOb{kla@|^zr$&l>&Emp8a zJ}fD|5-#KZwaBOXj4il*e@QWD7VMh@S#?lonxc!AJJ#5ecgf6(vL|On7Tef z?f@g2G+5h#Brz|tJm$1$%jGuC&28m7z{`4j#q7m{3OOzTXx!yD&gqE&mS0t;9QNbK zf6Sc)0NI3&JyNN|9Ph(Az!5_9%%#e1%1(MOPY6yS%TX5Db^8mH0g1LKm7kL?DgT8X zWosypOLzVWtuXXY6h09EhajQ=8t+$JcR`|T|6+=3TXS};eL>Av%6obQ&F+Kc{(s66 z5iGcRR=%Wd$2dSr2k zojNhCG%Xfo%WV zm3zbKRl{6TRMW>3%rI9A{`2QL;F9o6|`Ui_I1bo`LrzMr0 zlf@hU<-FMj$>73HkK1Itp7j5|Ld`Tk(IEIki1JmzTN}q-gxPoRCnCxoM8lGX9J~zN zB4bE7GUoZ5a9k))z^tZF$o0xT;#V6&tXUC8|)OMW|vvN3yrw-3~y}Wf3 zLv6C^8wGx77)tqSs-|^O?d@BgJlK^@AB$dS1z)@qLwAxmlbT9yNV18q*rbdseJYv( z8lX>#rW@p@heJ~_Bc7+_ljH1(qR-tZHai#oxCLLL>)9NEj2S8&!^D!Ga0cas$F1q| znY>om=B?>=tezVM1&s%Mg>rf(Bg#&Lmy3QE|9l4-ntgf#GbEII^L!eHVQyd7A(($9 zDMvr&i#O@N6*rJQZ%D!(=V1)V-TEo%12*)2Cvfw7h78I4{+{~v7hT27X!AAM^aF14 z2yCDymwF5mEks>8$>NOuP+vGMXE+1$?#jWEWVfqm z;G0sMR~V7OuH+hF21?Bswc!8iNvR(y$M?@tNiT3yknE- z?1}vF&_miYO!(Sh^9`Rkl@@@92tSiKB}k7)>`;KB#KE2mO-az|3gcaxwu)Lgz_ zGwCUvb*o-`8k1mwN1XK{VsM5XoPRE+5`Tr;`)vHM=KYylZVJ?H)02wv&>F^^ zEi*i1Fe59<&50=0A~urLj}yp?Zlkl(lQ2LHmz2C@$_eCCzPfh{S=1cH`gzd$%{f`4m6z730@=&u{_Z30t)&8@5yV+hz`|HUD zH$X>+ZkRN8W_J6nla;~AJF%an z{`GTUXAlOa+g+{$uQeHGN%yn#dtm&|r%W+I zPSo@>WX@htmj$|nY+hfDpZfWW9K>u3bQYoGFL2&EL;2Lv^^NGdKZt^tV=Fa3Qdgh- zR7}x*bx|-W>wN|U1NZdioUK$V3)I75grT(uRe`0zE2}A3eT&MhX4D9OxblHEl-xj* z{aas$`ENQYDphF6>&(WFL=g3_s4z*&J+tDO)WIj-*N*2)8h^DY*8H@1HC-PJ@X9yq zc{Zbnc(c8Xk+li|A0~pjSWrAJ{HqV&)qaTQ{CjZaf3u^;q%$sVlVPqJGFC(MRISd| znn_zHxcyi5uVL8F$+yD51_@W&cL$jvAKEtwoCy@3ZIrmKBS)I+PpLAcL0S$H6lul*YUk2t*&vT6kU$6 zTL-FzoHn1!!>Ct(ON=u;kp`c^)Yk?Jq~C4L;n{Ya4Qq%LpvDJ!mp|=`{7wVa;719_ z&8*ZppLhyb-=X3RZF_A6ZBrC%FCimz=2=rWH~Jr20b@UtlAqN01hZe~_2z%bPj6Rr zb2t#4?ppt#oN?TsRXwCV_#qr!!0#_JQAzN?o<;5nRwg;eI{e44_V_X&h$u`+!0-Iq zA7Vu9J_W)p=c4axT^R(Eh<9EFB)hy2!AACiAT*ZY#(#+EHvA22!fFI?^)J_kWyuP< zo1s>8Pzt4hq(d|NLie~m!vm5|dd9%@lYLDAVZ!*JtKnB*@=WYWQ9-M2#7`|b7(pg; zmfww2m%tm|c#t%=Sag@~UXSEQWN)c))D{!n9bo!uqOxd%_51eB#w|$R_eiW2uKEn0kJq{fd!n;p{Dz z;dHW=xK<$;gf$qE0?&WdDmT0|%QARWxc(^R1%Ns421mn++qMEhP(O2`SlLbedAj@v z*6+27pj$<_Et?|2$#_EFeTfU0cxe$B4&Xnala5l0QaG1FE#w3Ml4~M#1hKHbtwsi# z@$iSm>>h}?#_!rrnmDUoSyy*deh6YKBNp83Zs$4rGhu1 zwTd@UmR(803%#dd#0s{STVXNtw}a%gjTZoTKhW#YefLlW=lmW~%YD7fjsC5=mdD;V zhX(>=q9{Y8LvJleY2GCtfO!0{-|v%E1X3xi!HaDjOuO0wrS{n%hRwWQlQ4;sUuPv1 z`}|o7WR6EqBRdh-Tvv_K~XfsR<68uPvwkP$NcbVuh=mHhAb@C z9VLwzb`sixkxl=E6_L~QflmQPDuHooR!YNKsvS&3PMnOWg$H>J9uHKuyGs_jn9xPE z)IRX;w|UjD^>KMxxQ@XYlN`M$Gq#_BK{{Z36dJ#37$@E4_;R+ROOEx`dD^`~r9qrS z3czkiSz%eWt_?KnFXx1j4}-anDdZ#jr6%;(xiq~0p&1HUK-p_Zv3rZe|j@Srje!nH1!R3r@Bng<&|c{+1T`- zVcbgyL$A0GMmjbCxb2gd4>_(>@0t2!^UkdrNu<(#%2E3MD(gMg*5h5iWv6iQF_2q| zI2_%mrfXmP{fL!$1CA_CL;Z=CRob`&bc5)O**#h>~6 zb;jTB8MnrkIp0^%Am1?0E7H=Z=>xRiE0P~gCO==%BEs95s$Z%5o0I%&^?>28*dMCZGT5XT!E4cQCxcUxtS(to(IHmZEUxX&Xv7#>f8@1_R$rhG!JHVX7}QQLop&3+^sf| z**RN#N&p5v5-AETF1-2l2lHWQrl!K~Kgz88vLt-yO3*!tdljIA_X(xKdI7e%IgtnT zY7O81)h_7)$Uxb{I$S*jM@~+!$FLszprk%B%=FDagO4PT2+AOP1n#FJF=IH7kamOi z6)riqQaD7Tzl@?WjDQ@7fPa*wa#({k3mI&j|#N0F_iNvhtbXJ)}ba z3|10$#>y){6AzRPO^ zu#onv^Spu1^%On1UIBl^t?SRpA5wzu_^E9_tLmZaA2MC<0BE^l+YepmZ#Ax476J-d zP3|LjemOg8ot~mpg@zN2-#vnN`hm50^n=*|E!8s0(w&hN)1BwM{O;@%?{Sg%Wtyc< zLWP|?X5Gpp_=NN~+$Q{LQ{P>`7TAXSL;Ggl-Xg9iEh}9nUb9|+rWuXLk4jB7{Eq0svx zdE-dFP1loAWf80AgN^NG+2mgSo9a}AdoARR=4xNHI80QQcVrOk)#POr-#C&fYP?^5 z+!Iw5mOevxkX*4!h>O-<0{T$XR?0p+2|2C-%`Q4ARmBUj)G|d(S!yipKSfNHwN2RY z$$6r-L@gASIkUOy}p!+k>ni-e)d~>uW0lsRnu#kw?+>#Ltz-TrRr=d)1VDg$Ws;9 zD*`0Tt9Ld%Z^X{hsD00mDw3R)=Nrg_K?QOS=`2U{(~y} z!f3OxN=Hpw3^!#TkJ`o-X0=vTW_GBrBeZctmMn9s7AF?%$d4{;_F&g6Kjse7AF3x` z)fNiW7a#o^eW{WCGMBVZZpPRFzD|CER;v`zrg)Tsx&tu(N#2ViH1wlWh)=#JcLI+>;x?=+JJiKh8)L~C8 z+@Lwof2U>cm%hJB-8o9UIVj)oP_EXEyLufy_+(-}yP%SncMMwX!x?aVZ=pppP$ah` z{r)7}F&EXYYjheoDkY-|ceun9m)O=e+H**njQ?w?QaLT9WR4g`QzXpX$8F8L_QXoTtnqvCT7kxJW zW0fGRw?puq7fE}%W#a0E9BakdoHee$G__xrZW2}ukNB=7n=fSEta~o#)sSVWj1@wN zjyr>g7VNK{pn}_;&WzTgpx8B1Lf&jv4WfqZ0XwQdR97BpZ+^pKldrJrTGBSRx{c=RgbewYkA1yOOrdpopQ5!g2mRia!j1 zKGWD7r&khE32Z+e3b;9%_;TAHSXtCZKdiA< zy!rE{=r5ez54^Z%g`UHH<f(oY)`12=GWAZWrTo}_O-!Q;YjZxa?o3QXg{|*3M9qyJwP*IRV_9a)rPsk)wMJn zYe_3m6n+yI1g6pRjsTZ$xXC}8M;IV|Au*QSj}l>}n!#cCT*Gi!>5vpsFq8cB+pp%} zO1;MjK~5mOsJwr^Cvr8IP&x4d3<6*AE+ilbFx(UEsZ;nm_keoN_DwD7#vp-2l zo&iH&)K~OQ-eDVWXD|D-TZEiB*g+~YgTjp>AW%gLWmJ)Dzx3vL9|Z#y};QnmOka zF3AlT&I7M|&7PoBDj2%e!t_W@L>S!B<%I{?k@z2cmjNVj#nyOaO-`}>3Y+A}q3YVIc4koj+& zMXmQ}^1Y73NI}Iqoi68q863}g*q>#pl-rXR=sK5KK8NKyCaP7XZ+?ALJ*NuCvgJKl zKrWc}<}D*ujW}`FZLRo_W8BHTFPrSd2tO z(QhW1_i-C~nya^7-|EsDMv)Iz?0GM-*Aoe?N33l7qtxzUTL8)mqm0c-+eOh1`w)|~ z^;;+YYfAaLYc<=?-rW2&aR~-kmfwc!~>b>AlSC;R6kI(XQW0eheMg zhC>j&lMe}ypuLgo!pxM(k@0O8^6Ohqtp~mCy3v>bx6NuUHwb(PmMGV`RWGofiJGvz z_LQZxlJ3mq|6{A~c^RO$zHWv5H$o6*vn(MK#=Yt;QFip1jlGamz0;oh83kWNElW~71#y46$H0sStGTMl?QC>7_c#~ z`xwhS_cD^)<`d!PB7p76caRPKEalpadgjJ5g-q>VXAAd@MQ2}11rLdy#zFZPBW$VF5m*W!S5lLFqiH_uPG+$NsjUF%7j+HAq zQF6r7vW2yfUzAnP69B5vg-#mEl=q4HrB|Pi%Wju8Q##(4Y4|9194tnOIG(WL*&1C( z@G3$`zsk;moI>U-ehF!|?ozx)gcE%^;p9WM>&8rT_s|0?xi*22A)024beXvmyi;RL zw3X2>D)cZxJ)JypRqjwX4UL$ppIU-U-J1YE9jL#dXqEqc%Ml_=U#WXf)qEqYYoX#2 zFH$KCt;pTO(6x@F!bHD;>0dn;H7kLKBv0~e_kc$^!)Mo<2 zH`#{$uRBsKmzqFDCHG@KVV3-UC#g^Z%yd6cB^whi+op9~BL8$V+mqX`A6P^$acH!v z_<}PlorQ=<$W@UwVYw^?Z{>3VM2Hk_UIsSdLerLq)(q!RV|d6zCZ3(6R62}nv-Bg! zwI7bIkLj#Tf=U^hx8e7023AzE-p`CL;Y+if^?nCS-+0lG`xoPE`sT)FyP64XeeGMS zMEJn3*p|G_%f{zHx|AvE|gc$|-?g0AP_xXz3B9U2hb4Tc0hifn3IwnRg zl)F0#3V0~S$=AFyUN1>Fn7A-+Rw4xJ+rI-$l{??c!`B}wS~r4jeHU%U$J?hxU7ty? zv&NgYS!~T58Ryk0TNWtTTBVk1D(h02Eax}rbF78z*)aaht7bxP^f;82NIOfo5aJ1$ z!#`o5-g<$d*SkH*bi^O_waB(&BkMJqUlu#oYzA)KOCstCiLm0oGxVKkHOo7|66m;u z{s3=?-|bg=xwImTI)eytlwY`>)Wh$2dXWzd?*U+^=i+Uq!h$sck_j>5EM`J&I|U=) z$P54p8k(eIk&~9AtM}M$yD-nsOcwa)Qs0aeE7($@bVMu4!7O|dhn~uc57fsm*;COp zbwF$3CLbwohmr!UY-Yyamkah6rpP2`N$L;`dUdRO)3iICF-h~ZJ-w%r$Y(nlHZNu? z9&|Cu(%Aa;26J1GFSECC4-&k0E3ET7Bg#aU!r%VO;3qWMXZhDuk~-S>oJmuwCbt72 zSVHh+`odk1xhuxlhCX!=75Y?|nxn|6!t#}V zl>{k2fg(x%aYsq@5QkQPUem{p4j57Tzlfuo-QH1@rjW|Cbf}px!z<}#whY-n@Dc{V zjs~v=kxh1>fqjJ(DqxJ}KEYu~!~HbQUG|lke$}m`g~FR?xEzFMsi{i+bCrXnUh3#VcFfdM0 z)Hh=RG0fsT7xd8}eoqtA{eG&ZT+y=@6^=>!--Jx56>^pb^q|Wulv5P=kwgp=kpkPWA6%NIhv-nQWi3tw&u2+6D*Vv;C7YV z&l3WUO;j5#^Wl}AT(6b1;-yM`PaEwER@vH&TD+rJEpu-`vNq&FiJc8e?*T8>ssz6) z)u?NzVr>w5pKYB~$9*TG4Z0V-oc+Dm-f zzcZS8G8LE&#BRk=A0*u*hH0&+8PzkV2S6IWvvHV1&nM1hTG+#qOS+M;g0H0uZ$AJ> zZ=A43Cuw*7DDycn16)rba}N5nO=F$df^Z{S3#TSoo*zpP_Qu{aa*LIaL%HZ_Gbu8> zS|*0%r#em&J(BIn-qdG^OYvuKSTaO48x=;k31?;E$GRIyb&ZzZS$#jAHuYAeHTU$a zAxD&3-G^+%^So>(Z^zBD4V@(suu{{aiBdPSTZoMG0p|I`aEj#mkwuI9XB7J_oi2$l zxCGCC#%@upCsbnw#P4M63&U?uW%b@UCaWg|$gE*RJkikmXJD@# z%~@}dRn(^HPQ#kFE*{$0xb%3P7w;ND(-WCx(5hV7rR`05PN9gnT&YdxbKdiZLQB%u z{pqqe_Xi)?8Tl$%BSn9zQ>oqZ5<}hwnZEqnCG3XH zJLU>vVu!TW*(=%P85;5*H>oD}Vn$s8TJ64rb(pM()*=^|JrQY1>!{{xd8QGUe{Y;R zw#6C?J|>^yE2BX*PK{xbH_B=(m(PznDli}q@;obm8}0Pprx!pqK{jG@;&}dTrv$XpZ}Q%T(mn*S{R_&^PFxRKt}N~x znwf>{Ow=sCz7i!zvUOnbBL$%{FGK%k)o=bW5~qe>P97jkjKizYlS6%iO;CVpCZXc8 zEp~nT41H+PIf$Mf`QLW&qj7lM!Sl%vYHDX@6%V3KY-Zg%kuj*a1TNd^$TCE?a+a*9 zWkO)j$z@tH!UJ+qALteW?*>(oJNXs|UYThx2ew+;DbK;>Te2~yd1T3|hrXFaCEPWl zYMNXA_pCMgCp(M*^%*{>{K}B}Isf-#j-h8l<;4Wz)x(=9t_|Lw<_^o9MMsT5Pg>n| zZZGY}3uX)hAdnuX;*K=8}kNAu@*W65} z+Xd(B(ZqQdu)f}COVR`6OhHnWN{m?+TN=NKt51ZuD9DpWcRVnXyQ;43oDdqVsHqZX zOzABu`}TXy;)ld>fBL-tKR_MK+JgTySjd^V!}m9#m}`DDyc&0XjFqUO$<~`#E5Gn0 z>_Tk2RkFm7r!1#QkEEOSQIA|z4TZpp5}2e_pf#v{})pl2JNTzHsXq4h=rb>5CSS!mx{N89H8kO&%{Sq7eU8bGLJCoV<%Y3MXSp@1hsW60T?#sz^ro8!bd?7e_uW@IEGvP`gwAjZ0s^L{L)YgmBj%33t~Ovl?6pPKN0WNEqW>AfI$NPrzIl%_BSqSW@7*V literal 0 HcmV?d00001 diff --git a/inference.py b/inference.py new file mode 100644 index 0000000..1ca982d --- /dev/null +++ b/inference.py @@ -0,0 +1,74 @@ +import argparse +import os +import cv2 +import sys +import numpy as np +import torch +import torchvision +from helpers.sh_functions import * +from loaders.Illum_loader import IlluminationModule, Inference_Data +from loaders.autoenc_ldr2hdr import LDR2HDR +from torch.utils.data import DataLoader + +def parse_arguments(args): + usage_text = ( + "Inference script for Deep Lighting Environment Map Estimation from Spherical Panoramas" + "Usage: python3 inference.py --input_path " + ) + parser = argparse.ArgumentParser(description=usage_text) + parser.add_argument('--input_path', type=str, default='./images/input.jpg', help="Input panorama color image file") + parser.add_argument('--out_path', type=str, default='./output/', help='Output folder for the predicted environment map panorama') + parser.add_argument('-g','--gpu', type=str, default='0', help='GPU id of the device to use. Use -1 for CPU.') + parser.add_argument("--chkpnt_path", default='./models/model.pth', type=str, help='Pre-trained checkpoint file for lighting regression module') + parser.add_argument('--ldr2hdr_model', type=str, default='./models/ldr2hdr.pth', help='Pre-trained checkpoint file for ldr2hdr image translation module') + parser.add_argument("--width", type=float, default=512, help = "Spherical panorama image width.") + parser.add_argument('--deringing', type=int, default=0, help='Enable low pass deringing filter for the predicted SH coefficients') + parser.add_argument('--dr_window', type=float, default='6.0') + return parser.parse_known_args(args) + +def evaluate( + illumination_module: torch.nn.Module, + ldr2hdr_module: torch.nn.Module, + args: argparse.Namespace, + device: torch.device +): + if (os.path.isdir(args.out_path)!=True): + os.mkdir(args.out_path) + + in_filename, in_file_extention = os.path.splitext(args.input_path) + assert in_file_extention in ['.png','.jpg'] + inference_data = Inference_Data(args.input_path) + out_path = args.out_path + os.path.basename(args.input_path) + out_filename, out_file_extension = os.path.splitext(out_path) + out_file_extension = '.exr' + out_path = out_filename + out_file_extension + dataloader = DataLoader(inference_data, batch_size=1, shuffle=False, num_workers=1) + for i, data in enumerate(dataloader): + input_img = data.to(device).float() + with torch.no_grad(): + start_time = time.time() + right_rgb = ldr2hdr_module(input_img) + p_coeffs = illumination_module(right_rgb).view(1,9,3).to(device).float() + if args.deringing: + p_coeffs = deringing(p_coeffs, args.dr_window).to(device).float() + elapsed_time = time.time() - start_time + print("Elapsed inference time: %2.4fsec" % elapsed_time) + pred_env_map = shReconstructSignal(p_coeffs.squeeze(0), width=args.width) + cv2.imwrite(out_path, pred_env_map.cpu().detach().numpy()) + +def main(args): + device = torch.device("cuda:" + str(args.gpu) if (torch.cuda.is_available() and int(args.gpu) >= 0) else "cpu") + # load lighting module + illumination_module = IlluminationModule(batch_size=1).to(device) + illumination_module.load_state_dict(torch.load(args.chkpnt_path)) + print("Lighting moduled loaded") + # load LDR2HDR module + ldr2hdr_module = LDR2HDR() + ldr2hdr_module.load_state_dict(torch.load(args.ldr2hdr_model)['state_dict_G']) + ldr2hdr_module = ldr2hdr_module.to(device) + print("LDR2HDR moduled loaded") + evaluate(illumination_module, ldr2hdr_module, args, device) + +if __name__ == '__main__': + args, unknown = parse_arguments(sys.argv) + main(args) \ No newline at end of file diff --git a/loaders/Illum_loader.py b/loaders/Illum_loader.py new file mode 100644 index 0000000..1df41c4 --- /dev/null +++ b/loaders/Illum_loader.py @@ -0,0 +1,61 @@ +from skimage import io, transform +import numpy as np +import cv2 +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import Dataset +from torchvision import transforms, utils +''' + Input (256,512,3) +''' +class IlluminationModule(nn.Module): + def __init__(self, batch_size): + super().__init__() + self.cv_block1 = conv_bn_elu(3, 64, kernel_size=7, stride=2) + self.cv_block2 = conv_bn_elu(64, 128, kernel_size=5, stride=2) + self.cv_block3 = conv_bn_elu(128, 256, stride=2) + self.cv_block4 = conv_bn_elu(256, 256) + self.cv_block5 = conv_bn_elu(256, 256, stride=2) + self.cv_block6= conv_bn_elu(256, 256) + self.cv_block7 = conv_bn_elu(256, 256, stride=2) + self.fc = nn.Linear(256*16*8, 2048) + '''One head regression''' + self.sh_fc = nn.Linear(2048, 27) + + def forward(self, x): + x = self.cv_block1(x) + x = self.cv_block2(x) + x = self.cv_block3(x) + x = self.cv_block4(x) + x = self.cv_block5(x) + x = self.cv_block6(x) + x = self.cv_block7(x) + x = x.view(-1, 256*8*16) + x = F.elu(self.fc(x)) + return((self.sh_fc(x))) + +def conv_bn_elu(in_, out_, kernel_size=3, stride=1, padding=True): + # conv layer with ELU activation function + pad = int(kernel_size/2) + if padding is False: + pad = 0 + return nn.Sequential( + nn.Conv2d(in_, out_, kernel_size, stride=stride, padding=pad), + nn.ELU(), + ) + +class Inference_Data(Dataset): + def __init__(self, img_path): + self.input_img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) + self.input_img = cv2.resize(self.input_img, (512,256), interpolation=cv2.INTER_CUBIC) + self.to_tensor = transforms.ToTensor() + self.data_len = 1 + + def __getitem__(self, index): + self.tensor_img = self.to_tensor(self.input_img) + return self.tensor_img + + def __len__(self): + return self.data_len \ No newline at end of file diff --git a/loaders/autoenc_ldr2hdr.py b/loaders/autoenc_ldr2hdr.py new file mode 100644 index 0000000..97cfd7a --- /dev/null +++ b/loaders/autoenc_ldr2hdr.py @@ -0,0 +1,80 @@ +#Autoencoder for LDR to HDR image mapping + +from torch import nn +import torch +from torchvision import models +import torchvision +from torch.nn import functional as F + +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + m.weight.data.normal_(0.0, 0.02) + elif classname.find('BatchNorm') != -1: + m.weight.data.normal_(1.0, 0.02) + m.bias.data.fill_(0) + +class LDR2HDR(nn.Module): + def __init__(self, + n_filters: int=64, + n_channel_input: int=3, + n_channel_output: int=3 + ): + super(LDR2HDR, self).__init__() + self.conv1 = nn.Conv2d(n_channel_input, n_filters, 4, 2, 1) + self.conv2 = nn.Conv2d(n_filters, n_filters * 2, 4, 2, 1) + self.conv3 = nn.Conv2d(n_filters * 2, n_filters * 4, 4, 2, 1) + self.conv4 = nn.Conv2d(n_filters * 4, n_filters * 8, 4, 2, 1) + self.conv5 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) + self.conv6 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) + self.conv7 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) + self.conv8 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) + + self.deconv1 = nn.ConvTranspose2d(n_filters * 8, n_filters * 8, 4, 2, 1) + self.deconv2 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1) + self.deconv3 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1) + self.deconv4 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1) + self.deconv5 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 4, 4, 2, 1) + self.deconv6 = nn.ConvTranspose2d(n_filters * 4 * 2, n_filters * 2, 4, 2, 1) + self.deconv7 = nn.ConvTranspose2d(n_filters * 2 * 2, n_filters, 4, 2, 1) + self.deconv8 = nn.ConvTranspose2d(n_filters * 2, n_channel_output, 4, 2, 1) + + self.batch_norm = nn.BatchNorm2d(n_filters) + self.batch_norm2 = nn.BatchNorm2d(n_filters * 2) + self.batch_norm4 = nn.BatchNorm2d(n_filters * 4) + self.batch_norm8 = nn.BatchNorm2d(n_filters * 8) + + self.leaky_relu = nn.LeakyReLU(0.2, True) + self.relu = nn.ReLU(True) + + self.dropout = nn.Dropout(0.5) + + self.tanh = nn.Tanh() + + def forward(self, input): + encoder1 = self.conv1(input) + encoder2 = self.batch_norm2(self.conv2(self.leaky_relu(encoder1))) + encoder3 = self.batch_norm4(self.conv3(self.leaky_relu(encoder2))) + encoder4 = self.batch_norm8(self.conv4(self.leaky_relu(encoder3))) + encoder5 = self.batch_norm8(self.conv5(self.leaky_relu(encoder4))) + encoder6 = self.batch_norm8(self.conv6(self.leaky_relu(encoder5))) + encoder7 = self.batch_norm8(self.conv7(self.leaky_relu(encoder6))) + encoder8 = self.conv8(self.leaky_relu(encoder7)) + + decoder1 = self.dropout(self.batch_norm8(self.deconv1(self.relu(encoder8)))) + decoder1 = torch.cat((decoder1, encoder7), 1) + decoder2 = self.dropout(self.batch_norm8(self.deconv2(self.relu(decoder1)))) + decoder2 = torch.cat((decoder2, encoder6), 1) + decoder3 = self.dropout(self.batch_norm8(self.deconv3(self.relu(decoder2)))) + decoder3 = torch.cat((decoder3, encoder5), 1) + decoder4 = self.batch_norm8(self.deconv4(self.relu(decoder3))) + decoder4 = torch.cat((decoder4, encoder4), 1) + decoder5 = self.batch_norm4(self.deconv5(self.relu(decoder4))) + decoder5 = torch.cat((decoder5, encoder3), 1) + decoder6 = self.batch_norm2(self.deconv6(self.relu(decoder5))) + decoder6 = torch.cat((decoder6, encoder2),1) + decoder7 = self.batch_norm(self.deconv7(self.relu(decoder6))) + decoder7 = torch.cat((decoder7, encoder1), 1) + decoder8 = self.deconv8(self.relu(decoder7)) + output = self.tanh(decoder8) + return output