@@ -7,7 +7,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride, dilation, bia
7
7
# Inheritance
8
8
super (_conv_ , self ).__init__ ()
9
9
10
- # Create Instance Layer
10
+ # Create Layer Instance
11
11
self ._conv_ = nn .Conv2d (
12
12
in_channels = in_channels ,
13
13
out_channels = out_channels ,
@@ -23,21 +23,12 @@ def forward(self, x) :
23
23
24
24
return out
25
25
26
- def initialize_weights (self ) :
27
- for m in self .modules () :
28
- if isinstance (m , nn .Conv2d ) :
29
- # Apply Xavier Uniform Initialization
30
- torch .nn .init .xavier_uniform_ (m .weight .data )
31
-
32
- if m .bias is not None :
33
- m .bias .data .zero_ ()
34
-
35
26
class _conv_block_ (nn .Module ) :
36
27
def __init__ (self , in_channels , out_channels , kernel_size , stride , dilation , bias ) :
37
28
# Inheritance
38
29
super (_conv_block_ , self ).__init__ ()
39
30
40
- # Initialize Layer
31
+ # Create Layer Instance
41
32
self ._conv_in_ = _conv_ (in_channels , out_channels , kernel_size , stride , dilation , bias )
42
33
43
34
def forward (self , x ) :
@@ -46,21 +37,12 @@ def forward(self, x) :
46
37
47
38
return out
48
39
49
- def initialize_weights (self ) :
50
- for m in self .modules () :
51
- if isinstance (m , nn .Conv2d ) :
52
- # Apply Xavier Uniform Initialization
53
- torch .nn .init .xavier_uniform_ (m .weight .data )
54
-
55
- if m .bias is not None :
56
- m .bias .data .zero_ ()
57
-
58
40
class _context_block_ (nn .Module ) :
59
41
def __init__ (self , in_channels , kernel_size , stride , dilation , bias ) :
60
42
# Inheritance
61
43
super (_context_block_ , self ).__init__ ()
62
44
63
- # Initialize Layer
45
+ # Create Layer Instance
64
46
self ._conv_in_ = _conv_ (in_channels , in_channels , kernel_size , stride , dilation , bias )
65
47
self ._d_1_ = _residual_channel_attention_block_ (in_channels , kernel_size , stride , dilation , bias )
66
48
self ._d_2_ = _residual_channel_attention_block_ (in_channels , kernel_size , stride , dilation * 2 , bias )
@@ -76,21 +58,12 @@ def forward(self, x) :
76
58
77
59
return out
78
60
79
- def initialize_weights (self ) :
80
- for m in self .modules () :
81
- if isinstance (m , nn .Conv2d ) :
82
- # Apply Xavier Uniform Initialization
83
- torch .nn .init .xavier_uniform_ (m .weight .data )
84
-
85
- if m .bias is not None :
86
- m .bias .data .zero_ ()
87
-
88
61
class _channel_attention_module_ (nn .Module ) :
89
62
def __init__ (self , in_channels , stride , dilation , bias ) :
90
63
# Inheritance
91
64
super (_channel_attention_module_ , self ).__init__ ()
92
65
93
- # Initialize Layer
66
+ # Create Layer Instance
94
67
self ._aap_ = nn .AdaptiveAvgPool2d (1 )
95
68
self ._amp_ = nn .AdaptiveMaxPool2d (1 )
96
69
self ._conv_ = nn .Sequential (
@@ -104,21 +77,12 @@ def forward(self, x) :
104
77
105
78
return out
106
79
107
- def initialize_weights (self ) :
108
- for m in self .modules () :
109
- if isinstance (m , nn .Conv2d ) :
110
- # Apply Xavier Uniform Initialization
111
- torch .nn .init .xavier_uniform_ (m .weight .data )
112
-
113
- if m .bias is not None :
114
- m .bias .data .zero_ ()
115
-
116
80
class _spatial_attention_module_ (nn .Module ) :
117
81
def __init__ (self , in_channels , stride , dilation , bias ) :
118
82
# Inheritance
119
83
super (_spatial_attention_module_ , self ).__init__ ()
120
84
121
- # Initialize Layer
85
+ # Create Layer Instance
122
86
self ._bottleneck_ = _conv_ (2 , 1 , 7 , stride , dilation , bias )
123
87
124
88
def forward (self , x ) :
@@ -130,21 +94,12 @@ def forward(self, x) :
130
94
131
95
return out
132
96
133
- def initialize_weights (self ) :
134
- for m in self .modules () :
135
- if isinstance (m , nn .Conv2d ) :
136
- # Apply Xavier Uniform Initialization
137
- torch .nn .init .xavier_uniform_ (m .weight .data )
138
-
139
- if m .bias is not None :
140
- m .bias .data .zero_ ()
141
-
142
97
class _ResBlock_CBAM_ (nn .Module ) :
143
98
def __init__ (self , in_channels , kernel_size , stride , dilation , bias ) :
144
99
# Inheritance
145
100
super (_ResBlock_CBAM_ , self ).__init__ ()
146
101
147
- # Initialize Layer
102
+ # Create Layer Instance
148
103
self ._conv_in_ = _conv_ (in_channels , in_channels , kernel_size , stride , dilation , bias )
149
104
self ._conv_out_ = _conv_ (in_channels , in_channels , kernel_size , stride , dilation , bias )
150
105
self ._cam_ = _channel_attention_module_ (in_channels , stride , dilation , bias )
@@ -158,21 +113,12 @@ def forward(self, x) :
158
113
159
114
return out
160
115
161
- def initialize_weights (self ) :
162
- for m in self .modules () :
163
- if isinstance (m , nn .Conv2d ) :
164
- # Apply Xavier Uniform Initialization
165
- torch .nn .init .xavier_uniform_ (m .weight .data )
166
-
167
- if m .bias is not None :
168
- m .bias .data .zero_ ()
169
-
170
116
class _residual_channel_attention_block_ (nn .Module ) :
171
117
def __init__ (self , in_channels , kernel_size , stride , dilation , bias ) :
172
118
# Inheritance
173
119
super (_residual_channel_attention_block_ , self ).__init__ ()
174
120
175
- # Initialize Layer
121
+ # Create Layer Instance
176
122
self ._layer_ = _conv_block_ (in_channels , in_channels , kernel_size , stride , dilation , bias )
177
123
self ._conv_ = nn .Sequential (
178
124
nn .AdaptiveAvgPool2d (1 ),
@@ -187,21 +133,12 @@ def forward(self, x) :
187
133
188
134
return out
189
135
190
- def initialize_weights (self ) :
191
- for m in self .modules () :
192
- if isinstance (m , nn .Conv2d ) :
193
- # Apply Xavier Uniform Initialization
194
- torch .nn .init .xavier_uniform_ (m .weight .data )
195
-
196
- if m .bias is not None :
197
- m .bias .data .zero_ ()
198
-
199
136
class _residual_group_ (nn .Module ) :
200
137
def __init__ (self , in_channels , kernel_size , stride , dilation , bias ) :
201
138
# Inheritance
202
139
super (_residual_group_ , self ).__init__ ()
203
140
204
- # Initialize Layer
141
+ # Create Layer Instance
205
142
self ._cab_1_ = _residual_channel_attention_block_ (in_channels , kernel_size , stride , dilation , bias )
206
143
self ._cab_2_ = _residual_channel_attention_block_ (in_channels , kernel_size , stride , dilation , bias )
207
144
self ._cab_3_ = _residual_channel_attention_block_ (in_channels , kernel_size , stride , dilation , bias )
@@ -216,21 +153,12 @@ def forward(self, x) :
216
153
217
154
return out
218
155
219
- def initialize_weights (self ) :
220
- for m in self .modules () :
221
- if isinstance (m , nn .Conv2d ) :
222
- # Apply Xavier Uniform Initialization
223
- torch .nn .init .xavier_uniform_ (m .weight .data )
224
-
225
- if m .bias is not None :
226
- m .bias .data .zero_ ()
227
-
228
156
class _upsample_ (nn .Module ) :
229
157
def __init__ (self , scale , in_channels , kernel_size , stride , dilation , bias ) :
230
158
# Inheritance
231
159
super (_upsample_ , self ).__init__ ()
232
160
233
- # Initialize Layer
161
+ # Create Layer Instance
234
162
self ._up_ = nn .Sequential (
235
163
nn .PixelShuffle (scale ),
236
164
_conv_block_ (in_channels , in_channels , kernel_size , stride , dilation , bias )
@@ -242,13 +170,4 @@ def forward(self, x, skip) :
242
170
out = torch .cat ((out , skip ), dim = 1 )
243
171
out = self ._bottleneck_ (out )
244
172
245
- return out
246
-
247
- def initialize_weights (self ) :
248
- for m in self .modules () :
249
- if isinstance (m , nn .Conv2d ) :
250
- # Apply Xavier Uniform Initialization
251
- torch .nn .init .xavier_uniform_ (m .weight .data )
252
-
253
- if m .bias is not None :
254
- m .bias .data .zero_ ()
173
+ return out
0 commit comments