Skip to content

Commit dbb615d

Browse files
authored
Add files via upload
1 parent 01df657 commit dbb615d

27 files changed

+2823
-0
lines changed

Train_deSpeckNet_DAG_TV.m

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
%author: Adugna Mullissa
2+
3+
%Description: This script runs deSpeckNet training task on single polarization SAR intensity images.
4+
% The input image and the label should be stored in the same folder as this script. The default names are
5+
%Train2 and Test2.
6+
% This script is modified from https://github.com/cszn/DnCNN.
7+
8+
clc;
9+
clear all
10+
11+
rng('default')
12+
13+
addpath('utilities');
14+
addpath('../../matlab/matlab');
15+
%-------------------------------------------------------------------------
16+
% Configuration
17+
%-------------------------------------------------------------------------
18+
opts.modelName = 'deSpeckNet'; % model name
19+
opts.learningRate = [logspace(-3,-3,35) logspace(-4,-4,35)];% Learning rate
20+
opts.batchSize = 128; % Batch size
21+
opts.gpus = 1; %set to [] when using CPU
22+
opts.numSubBatches = 2;
23+
24+
% solver
25+
opts.solver = 'Adam'; % global
26+
opts.derOutputs = {'objective',100 ,'objective0',0 , 'objective1',1} ; %Loss weights
27+
opts.backPropDepth = Inf;
28+
%-------------------------------------------------------------------------
29+
% Initialize model
30+
%-------------------------------------------------------------------------
31+
net = deSpeckNet_Init_TV();
32+
%-------------------------------------------------------------------------
33+
% Train
34+
%-------------------------------------------------------------------------
35+
[net, info] = deSpecknet_train_dag_TV(net, ...
36+
'learningRate',opts.learningRate, ...
37+
'derOutputs',opts.derOutputs, ...
38+
'numSubBatches',opts.numSubBatches, ...
39+
'backPropDepth',opts.backPropDepth, ...
40+
'solver',opts.solver, ...
41+
'batchSize', opts.batchSize, ...
42+
'modelname', opts.modelName, ...
43+
'gpus',opts.gpus) ;
44+
45+
46+
47+
48+
49+

deSpeckNet_Init_TV.m

Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
function net = deSpeckNet_Init_TV()
2+
3+
% Create DAGNN object
4+
net = dagnn.DagNN();
5+
6+
% conv + relu
7+
blockNum = 1;
8+
inVar = 'input';
9+
channel= 1; % grayscale image
10+
dims = [3,3,channel,64];
11+
pad = [1,1];
12+
dilate = [1,1];
13+
stride = [1,1];
14+
lr = [1,1];
15+
16+
%FCN clean
17+
[net, inVar1, blockNum] = addConv(net, blockNum, inVar, dims, pad,dilate, stride, lr);
18+
[net, inVar1, blockNum] = addReLU(net, blockNum, inVar1);
19+
20+
for i = 1:15
21+
% conv + bn + relu
22+
dims0 = [3,3,64,64];
23+
[net, inVar1, blockNum] = addConv(net, blockNum, inVar1, dims0, pad,dilate, stride, lr);
24+
n_ch = dims0(4);
25+
[net, inVar1, blockNum] = addBnorm(net, blockNum, inVar1, n_ch);
26+
[net, inVar1, blockNum] = addReLU(net, blockNum, inVar1);
27+
end
28+
29+
% conv
30+
dims1 = [3,3,64,channel];
31+
[net, inVar5, blockNum] = addConv(net, blockNum, inVar1, dims1, pad,dilate, stride, lr);
32+
33+
%__________________________________________________________________________
34+
%FCN noise
35+
36+
[net, inVar8, blockNum] = addConv(net, blockNum, inVar, dims, pad,dilate, stride, lr);
37+
[net, inVar8, blockNum] = addReLU(net, blockNum, inVar8);
38+
39+
for i = 1:15
40+
% conv + bn + relu
41+
[net, inVar8, blockNum] = addConv(net, blockNum, inVar8, dims0, pad,dilate, stride, lr);
42+
n_ch = dims0(4);
43+
[net, inVar8, blockNum] = addBnorm(net, blockNum, inVar8, n_ch);
44+
[net, inVar8, blockNum] = addReLU(net, blockNum, inVar8);
45+
end
46+
47+
% conv
48+
[net, inVar13, blockNum] = addConv(net, blockNum, inVar8, dims1, pad,dilate, stride, lr);
49+
50+
51+
% % % Multiply and reconstruct noisy image
52+
inVarr = {inVar13,inVar5};
53+
[net, inVar30, blockNum] = addMultiply(net, blockNum, inVarr);
54+
%
55+
56+
%Clean image reconstruction
57+
outputName = 'prediction';
58+
net.renameVar(inVar5,outputName)
59+
60+
%__________________________________________________________________________
61+
%Add loss functions
62+
63+
% loss clean
64+
net.addLayer('loss', dagnn.Loss('loss','L2'), {'prediction','label'}, {'objective'},{});
65+
net.vars(net.getVarIndex('prediction')).precious = 1;
66+
67+
% loss TV
68+
net.addLayer('loss0', dagnn.LossTV('loss','TV'), {'prediction','label'}, {'objective0'},{});
69+
net.vars(net.getVarIndex('prediction')).precious = 1;
70+
71+
%Final noisy image reconstruction
72+
outputName1 = 'prediction1';
73+
net.renameVar(inVar30,outputName1)
74+
75+
% loss noisy
76+
net.addLayer('loss1', dagnn.Loss('loss','L2'), {'prediction1','input'}, {'objective1'},{});
77+
net.vars(net.getVarIndex('prediction1')).precious = 1;
78+
79+
end
80+
81+
82+
% Add a multiply layer
83+
function [net, inVar, blockNum] = addMultiply(net, blockNum, inVar)
84+
85+
outVar = sprintf('mult%d', blockNum);
86+
layerCur = sprintf('mult%d', blockNum);
87+
88+
block = dagnn.Multiply();
89+
net.addLayer(layerCur, block, inVar, {outVar},{});
90+
91+
inVar = outVar;
92+
blockNum = blockNum + 1;
93+
end
94+
95+
96+
% Add a relu layer
97+
function [net, inVar, blockNum] = addReLU(net, blockNum, inVar)
98+
99+
outVar = sprintf('relu%d', blockNum);
100+
layerCur = sprintf('relu%d', blockNum);
101+
102+
block = dagnn.ReLU('leak',0);
103+
net.addLayer(layerCur, block, {inVar}, {outVar},{});
104+
105+
inVar = outVar;
106+
blockNum = blockNum + 1;
107+
end
108+
109+
110+
% Add a bnorm layer
111+
function [net, inVar, blockNum] = addBnorm(net, blockNum, inVar, n_ch)
112+
113+
trainMethod = 'adam';
114+
outVar = sprintf('bnorm%d', blockNum);
115+
layerCur = sprintf('bnorm%d', blockNum);
116+
117+
params={[layerCur '_g'], [layerCur '_b'], [layerCur '_m']};
118+
net.addLayer(layerCur, dagnn.BatchNorm('numChannels', n_ch), {inVar}, {outVar},params) ;
119+
120+
pidx = net.getParamIndex({[layerCur '_g'], [layerCur '_b'], [layerCur '_m']});
121+
b_min = 0.025;
122+
net.params(pidx(1)).value = clipping(sqrt(2/(9*n_ch))*randn(n_ch,1,'single'),b_min);
123+
net.params(pidx(1)).learningRate= 1;
124+
net.params(pidx(1)).weightDecay = 0;
125+
net.params(pidx(1)).trainMethod = trainMethod;
126+
127+
net.params(pidx(2)).value = zeros(n_ch, 1, 'single');
128+
net.params(pidx(2)).learningRate= 1;
129+
net.params(pidx(2)).weightDecay = 0;
130+
net.params(pidx(2)).trainMethod = trainMethod;
131+
132+
net.params(pidx(3)).value = [zeros(n_ch,1,'single'), 0.01*ones(n_ch,1,'single')];
133+
net.params(pidx(3)).learningRate= 1;
134+
net.params(pidx(3)).weightDecay = 0;
135+
net.params(pidx(3)).trainMethod = 'average';
136+
137+
inVar = outVar;
138+
blockNum = blockNum + 1;
139+
end
140+
141+
% add a Conv layer
142+
function [net, inVar, blockNum] = addConv(net, blockNum, inVar, dims, pad, dilate, stride, lr)
143+
opts.cudnnWorkspaceLimit = 1024*1024*1024*2; % 2GB
144+
convOpts = {'CudnnWorkspaceLimit', opts.cudnnWorkspaceLimit} ;
145+
trainMethod = 'adam';
146+
147+
outVar = sprintf('conv%d', blockNum);
148+
layerCur = sprintf('conv%d', blockNum);
149+
150+
convBlock = dagnn.Conv('size', dims, 'pad', pad, 'dilate', dilate, 'stride', stride, ...
151+
'hasBias', true, 'opts', convOpts);
152+
153+
net.addLayer(layerCur, convBlock, {inVar}, {outVar},{[layerCur '_f'], [layerCur '_b']});
154+
155+
f = net.getParamIndex([layerCur '_f']) ;
156+
sc = sqrt(2/(dims(1)*dims(2)*max(dims(3), dims(4)))) ; %improved Xavier
157+
net.params(f).value = sc*randn(dims, 'single') ;
158+
net.params(f).learningRate = lr(1);
159+
net.params(f).weightDecay = 1;
160+
net.params(f).trainMethod = trainMethod;
161+
162+
f = net.getParamIndex([layerCur '_b']) ;
163+
net.params(f).value = zeros(dims(4), 1, 'single');
164+
net.params(f).learningRate = lr(2);
165+
net.params(f).weightDecay = 1;
166+
net.params(f).trainMethod = trainMethod;
167+
168+
inVar = outVar;
169+
blockNum = blockNum + 1;
170+
end
171+
172+
173+
function A = clipping(A,b)
174+
A(A>=0&A<b) = b;
175+
A(A<0&A>-b) = -b;
176+
end

0 commit comments

Comments
 (0)