Skip to content

Commit c48e682

Browse files
committed
update comment
1 parent 58293a4 commit c48e682

7 files changed

+42
-125
lines changed

export_image.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ def concat_image(tensor_input, tensor_pred, tensor_target) :
66
# Create Torchvision Transforms Instance
77
to_pil = transforms.Compose([transforms.ToPILImage()])
88

9-
# Convert PyTorch Tensor to Pillow Image
9+
# Convert PyTorch Tensor to Numpy Array
1010
image_input = np.array(to_pil(tensor_input), dtype = "uint8")
1111
image_pred = np.array(to_pil(tensor_pred), dtype = "uint8")
1212
image_target = np.array(to_pil(tensor_target), dtype = "uint8")

loss.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def __init__(self, device, lambda_tv) :
1212
# Initialize Loss Weight
1313
self._lambda_tv_ = lambda_tv
1414

15-
# Initialize Loss
15+
# Create Loss Instance
1616
self._loss_function_ = nn.MSELoss()
1717
self._dg_loss_ = DGLoss(self._loss_function_, self._device_)
1818
self._tv_loss_ = TVLoss()

model.py

+1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ def __init__(self, scale, in_channels, channels, kernel_size, stride, dilation,
88
# Inheritance
99
super(SAR_CAM, self).__init__()
1010

11+
# Create Layer Instance
1112
self._conv_in_ = _conv_(in_channels, channels, kernel_size, stride, dilation, bias)
1213
self._conv_out_ = _conv_(channels, in_channels, kernel_size, stride, dilation, bias)
1314
self._down_ = nn.MaxPool2d(kernel_size = scale, stride = scale)

model_parts.py

+10-91
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride, dilation, bia
77
# Inheritance
88
super(_conv_, self).__init__()
99

10-
# Create Instance Layer
10+
# Create Layer Instance
1111
self._conv_ = nn.Conv2d(
1212
in_channels = in_channels,
1313
out_channels = out_channels,
@@ -23,21 +23,12 @@ def forward(self, x) :
2323

2424
return out
2525

26-
def initialize_weights(self) :
27-
for m in self.modules() :
28-
if isinstance(m, nn.Conv2d) :
29-
# Apply Xavier Uniform Initialization
30-
torch.nn.init.xavier_uniform_(m.weight.data)
31-
32-
if m.bias is not None :
33-
m.bias.data.zero_()
34-
3526
class _conv_block_(nn.Module) :
3627
def __init__(self, in_channels, out_channels, kernel_size, stride, dilation, bias) :
3728
# Inheritance
3829
super(_conv_block_, self).__init__()
3930

40-
# Initialize Layer
31+
# Create Layer Instance
4132
self._conv_in_ = _conv_(in_channels, out_channels, kernel_size, stride, dilation, bias)
4233

4334
def forward(self, x) :
@@ -46,21 +37,12 @@ def forward(self, x) :
4637

4738
return out
4839

49-
def initialize_weights(self) :
50-
for m in self.modules() :
51-
if isinstance(m, nn.Conv2d) :
52-
# Apply Xavier Uniform Initialization
53-
torch.nn.init.xavier_uniform_(m.weight.data)
54-
55-
if m.bias is not None :
56-
m.bias.data.zero_()
57-
5840
class _context_block_(nn.Module) :
5941
def __init__(self, in_channels, kernel_size, stride, dilation, bias) :
6042
# Inheritance
6143
super(_context_block_, self).__init__()
6244

63-
# Initialize Layer
45+
# Create Layer Instance
6446
self._conv_in_ = _conv_(in_channels, in_channels, kernel_size, stride, dilation, bias)
6547
self._d_1_ = _residual_channel_attention_block_(in_channels, kernel_size, stride, dilation, bias)
6648
self._d_2_ = _residual_channel_attention_block_(in_channels, kernel_size, stride, dilation * 2, bias)
@@ -76,21 +58,12 @@ def forward(self, x) :
7658

7759
return out
7860

79-
def initialize_weights(self) :
80-
for m in self.modules() :
81-
if isinstance(m, nn.Conv2d) :
82-
# Apply Xavier Uniform Initialization
83-
torch.nn.init.xavier_uniform_(m.weight.data)
84-
85-
if m.bias is not None :
86-
m.bias.data.zero_()
87-
8861
class _channel_attention_module_(nn.Module) :
8962
def __init__(self, in_channels, stride, dilation, bias) :
9063
# Inheritance
9164
super(_channel_attention_module_, self).__init__()
9265

93-
# Initialize Layer
66+
# Create Layer Instance
9467
self._aap_ = nn.AdaptiveAvgPool2d(1)
9568
self._amp_ = nn.AdaptiveMaxPool2d(1)
9669
self._conv_ = nn.Sequential(
@@ -104,21 +77,12 @@ def forward(self, x) :
10477

10578
return out
10679

107-
def initialize_weights(self) :
108-
for m in self.modules() :
109-
if isinstance(m, nn.Conv2d) :
110-
# Apply Xavier Uniform Initialization
111-
torch.nn.init.xavier_uniform_(m.weight.data)
112-
113-
if m.bias is not None :
114-
m.bias.data.zero_()
115-
11680
class _spatial_attention_module_(nn.Module) :
11781
def __init__(self, in_channels, stride, dilation, bias) :
11882
# Inheritance
11983
super(_spatial_attention_module_, self).__init__()
12084

121-
# Initialize Layer
85+
# Create Layer Instance
12286
self._bottleneck_ = _conv_(2, 1, 7, stride, dilation, bias)
12387

12488
def forward(self, x) :
@@ -130,21 +94,12 @@ def forward(self, x) :
13094

13195
return out
13296

133-
def initialize_weights(self) :
134-
for m in self.modules() :
135-
if isinstance(m, nn.Conv2d) :
136-
# Apply Xavier Uniform Initialization
137-
torch.nn.init.xavier_uniform_(m.weight.data)
138-
139-
if m.bias is not None :
140-
m.bias.data.zero_()
141-
14297
class _ResBlock_CBAM_(nn.Module) :
14398
def __init__(self, in_channels, kernel_size, stride, dilation, bias) :
14499
# Inheritance
145100
super(_ResBlock_CBAM_, self).__init__()
146101

147-
# Initialize Layer
102+
# Create Layer Instance
148103
self._conv_in_ = _conv_(in_channels, in_channels, kernel_size, stride, dilation, bias)
149104
self._conv_out_ = _conv_(in_channels, in_channels, kernel_size, stride, dilation, bias)
150105
self._cam_ = _channel_attention_module_(in_channels, stride, dilation, bias)
@@ -158,21 +113,12 @@ def forward(self, x) :
158113

159114
return out
160115

161-
def initialize_weights(self) :
162-
for m in self.modules() :
163-
if isinstance(m, nn.Conv2d) :
164-
# Apply Xavier Uniform Initialization
165-
torch.nn.init.xavier_uniform_(m.weight.data)
166-
167-
if m.bias is not None :
168-
m.bias.data.zero_()
169-
170116
class _residual_channel_attention_block_(nn.Module) :
171117
def __init__(self, in_channels, kernel_size, stride, dilation, bias) :
172118
# Inheritance
173119
super(_residual_channel_attention_block_, self).__init__()
174120

175-
# Initialize Layer
121+
# Create Layer Instance
176122
self._layer_ = _conv_block_(in_channels, in_channels, kernel_size, stride, dilation, bias)
177123
self._conv_ = nn.Sequential(
178124
nn.AdaptiveAvgPool2d(1),
@@ -187,21 +133,12 @@ def forward(self, x) :
187133

188134
return out
189135

190-
def initialize_weights(self) :
191-
for m in self.modules() :
192-
if isinstance(m, nn.Conv2d) :
193-
# Apply Xavier Uniform Initialization
194-
torch.nn.init.xavier_uniform_(m.weight.data)
195-
196-
if m.bias is not None :
197-
m.bias.data.zero_()
198-
199136
class _residual_group_(nn.Module) :
200137
def __init__(self, in_channels, kernel_size, stride, dilation, bias) :
201138
# Inheritance
202139
super(_residual_group_, self).__init__()
203140

204-
# Initialize Layer
141+
# Create Layer Instance
205142
self._cab_1_ = _residual_channel_attention_block_(in_channels, kernel_size, stride, dilation, bias)
206143
self._cab_2_ = _residual_channel_attention_block_(in_channels, kernel_size, stride, dilation, bias)
207144
self._cab_3_ = _residual_channel_attention_block_(in_channels, kernel_size, stride, dilation, bias)
@@ -216,21 +153,12 @@ def forward(self, x) :
216153

217154
return out
218155

219-
def initialize_weights(self) :
220-
for m in self.modules() :
221-
if isinstance(m, nn.Conv2d) :
222-
# Apply Xavier Uniform Initialization
223-
torch.nn.init.xavier_uniform_(m.weight.data)
224-
225-
if m.bias is not None :
226-
m.bias.data.zero_()
227-
228156
class _upsample_(nn.Module) :
229157
def __init__(self, scale, in_channels, kernel_size, stride, dilation, bias) :
230158
# Inheritance
231159
super(_upsample_, self).__init__()
232160

233-
# Initialize Layer
161+
# Create Layer Instance
234162
self._up_ = nn.Sequential(
235163
nn.PixelShuffle(scale),
236164
_conv_block_(in_channels, in_channels, kernel_size, stride, dilation, bias)
@@ -242,13 +170,4 @@ def forward(self, x, skip) :
242170
out = torch.cat((out, skip), dim = 1)
243171
out = self._bottleneck_(out)
244172

245-
return out
246-
247-
def initialize_weights(self) :
248-
for m in self.modules() :
249-
if isinstance(m, nn.Conv2d) :
250-
# Apply Xavier Uniform Initialization
251-
torch.nn.init.xavier_uniform_(m.weight.data)
252-
253-
if m.bias is not None :
254-
m.bias.data.zero_()
173+
return out

model_summary.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def main() :
2727
set_logging()
2828
device = select_device(args.model_name, args.device)
2929

30-
# Initialize Model
30+
# Create Model Instance
3131
model = Model(
3232
scale = 2,
3333
in_channels = 1,

test.py

+13-14
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def main() :
3737
set_logging()
3838
device = select_device(args.model_name, args.device)
3939

40-
# Load Trained Denoising Model
40+
# Create Model Instance
4141
model = Model(
4242
scale = 2,
4343
in_channels = 1,
@@ -49,11 +49,11 @@ def main() :
4949
).to(device)
5050
model.load_state_dict(torch.load(args.weights_dir))
5151

52-
# Initialize Torchvision Transforms
52+
# Create Torchvision Transforms Instance
5353
to_tensor = transforms.ToTensor()
5454
to_pil = transforms.ToPILImage()
5555

56-
# Initialize List for Saving PSNR
56+
# Create List Instance for Saving Metrics
5757
image_name_list, psnr_noisy_list, psnr_denoised_list, ssim_noisy_list, ssim_denoised_list = list(), list(), list(), list(), list()
5858

5959
# Assign Device
@@ -63,33 +63,32 @@ def main() :
6363
model.eval()
6464

6565
with tqdm(total = len(listdir(args.noisy_image_dir))) as pbar :
66-
# Apply Denoising CNN
6766
with torch.no_grad() :
6867
for x in listdir(args.noisy_image_dir) :
69-
# Get the Absolute Image Path
68+
# Get Image Path
7069
clean_image_path = join(args.clean_image_dir, x)
7170
noisy_image_path = join(args.noisy_image_dir, x)
7271

73-
# Get Image
72+
# Load Image
7473
clean_image = pil_image.open(clean_image_path)
7574
noisy_image = pil_image.open(noisy_image_path)
7675

7776
# Convert Pillow Image to PyTorch Tensor
7877
tensor_clean_image = to_tensor(clean_image).unsqueeze(0)
7978
tensor_noisy_image = to_tensor(noisy_image).unsqueeze(0).to(device)
8079

81-
# Get Denoised Image
80+
# Get Prediction
8281
pred = model(tensor_noisy_image)
8382

8483
# Assign Device into CPU
8584
tensor_noisy_image = tensor_noisy_image.detach().cpu()
8685
pred = pred.detach().cpu()
8786

88-
# Get PSNR
87+
# Calculate PSNR
8988
psnr_noisy = calc_psnr(tensor_noisy_image, tensor_clean_image).item()
9089
psnr_denoised = calc_psnr(pred, tensor_clean_image).item()
9190

92-
# Get SSIM
91+
# Calculate SSIM
9392
ssim_noisy = calc_ssim(tensor_noisy_image, tensor_clean_image,size_average = True).item()
9493
ssim_denoised = calc_ssim(pred, tensor_clean_image, size_average = True).item()
9594

@@ -132,22 +131,22 @@ def main() :
132131
cv2.imwrite(f"{args.save_dir}/{x}", stacked_image)
133132

134133
else :
135-
# Save Denoised Image
134+
# Save Image
136135
pred.save(f"{args.save_dir}/{x}")
137136

138-
# Update tqdm
137+
# Update TQDM Bar
139138
pbar.update()
140139

141-
# Initialize Dictionary
140+
# Create Dictionary Instance
142141
d = {"Noisy Image PSNR(dB)" : psnr_noisy_list,
143142
"Noisy Image SSIM" : ssim_noisy_list,
144143
"Denoised Image PSNR(dB)" : psnr_denoised_list,
145144
"Denoised Image SSIM" : ssim_denoised_list}
146145

147-
# Initialize pandas Dataframe
146+
# Create Pandas Dataframe Instance
148147
df = pd.DataFrame(data = d, index = image_name_list)
149148

150-
# Save as .csv
149+
# Save as CSV Format
151150
df.to_csv(f"{args.save_dir}/image_quality_assessment.csv")
152151

153152
if __name__ == "__main__" :

0 commit comments

Comments
 (0)