Skip to content

Commit 0c78a6e

Browse files
committed
Merge branch 'docstrings'
2 parents 92f3ae4 + 569c238 commit 0c78a6e

File tree

4 files changed

+82
-5
lines changed

4 files changed

+82
-5
lines changed

segmentation_models_pytorch/fpn/model.py

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,35 @@
44

55

66
class FPN(EncoderDecoder):
7+
"""FPN_ is a fully convolution neural network for image semantic segmentation
8+
Args:
9+
encoder_name: name of classification model (without last dense layers) used as feature
10+
extractor to build segmentation model.
11+
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
12+
decoder_pyramid_channels: a number of convolution filters in Feature Pyramid of FPN_.
13+
decoder_segmentation_channels: a number of convolution filters in segmentation head of FPN_.
14+
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
15+
dropout: spatial dropout rate in range (0, 1).
16+
activation: one of [``sigmoid``, ``softmax``, None]
17+
18+
Returns:
19+
``keras.models.Model``: **FPN**
20+
21+
.. _FPN:
22+
http://presentations.cocodataset.org/COCO17-Stuff-FAIR.pdf
23+
24+
"""
725

826
def __init__(
927
self,
1028
encoder_name='resnet34',
1129
encoder_weights='imagenet',
1230
decoder_pyramid_channels=256,
13-
decoder_segmenation_channels=128,
31+
decoder_segmentation_channels=128,
1432
classes=1,
1533
dropout=0.2,
1634
activation='sigmoid',
1735
):
18-
1936
encoder = get_encoder(
2037
encoder_name,
2138
encoder_weights=encoder_weights
@@ -24,7 +41,7 @@ def __init__(
2441
decoder = FPNDecoder(
2542
encoder_channels=encoder.out_shapes,
2643
pyramid_channels=decoder_pyramid_channels,
27-
segmentation_channels=decoder_segmenation_channels,
44+
segmentation_channels=decoder_segmentation_channels,
2845
final_channels=classes,
2946
dropout=dropout,
3047
)

segmentation_models_pytorch/linknet/model.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,26 @@
44

55

66
class Linknet(EncoderDecoder):
7+
"""Linknet_ is a fully convolution neural network for fast image semantic segmentation
8+
9+
Note:
10+
This implementation by default has 4 skip connections (original - 3).
11+
12+
Args:
13+
encoder_name: name of classification model (without last dense layers) used as feature
14+
extractor to build segmentation model.
15+
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
16+
decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
17+
is used.
18+
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
19+
activation: one of [``sigmoid``, ``softmax``, None]
20+
21+
Returns:
22+
``torch.nn.Module``: **Linknet**
23+
24+
.. _Linknet:
25+
https://arxiv.org/pdf/1707.03718.pdf
26+
"""
727

828
def __init__(
929
self,
@@ -13,7 +33,6 @@ def __init__(
1333
classes=1,
1434
activation='sigmoid',
1535
):
16-
1736
encoder = get_encoder(
1837
encoder_name,
1938
encoder_weights=encoder_weights

segmentation_models_pytorch/pspnet/model.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,28 @@
44

55

66
class PSPNet(EncoderDecoder):
7+
"""PSPNet_ is a fully convolution neural network for image semantic segmentation
8+
9+
Args:
10+
encoder_name: name of classification model used as feature
11+
extractor to build segmentation model.
12+
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
13+
psp_in_factor: one of 4, 8 and 16. Downsampling rate or in other words backbone depth
14+
to construct PSP module on it.
15+
psp_out_channels: number of filters in PSP block.
16+
psp_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
17+
is used.
18+
psp_aux_output: if ``True`` add auxiliary classification output for encoder training
19+
psp_dropout: spatial dropout rate between 0 and 1.
20+
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
21+
activation: one of [``sigmoid``, ``softmax``, None]
22+
23+
Returns:
24+
``torch.nn.Module``: **PSPNet**
25+
26+
.. _PSPNet:
27+
https://arxiv.org/pdf/1612.01105.pdf
28+
"""
729

830
def __init__(
931
self,
@@ -17,7 +39,6 @@ def __init__(
1739
dropout=0.2,
1840
activation='softmax',
1941
):
20-
2142
encoder = get_encoder(
2243
encoder_name,
2344
encoder_weights=encoder_weights

segmentation_models_pytorch/unet/model.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,26 @@
44

55

66
class Unet(EncoderDecoder):
7+
"""Unet_ is a fully convolution neural network for image semantic segmentation
8+
9+
Args:
10+
encoder_name: name of classification model (without last dense layers) used as feature
11+
extractor to build segmentation model.
12+
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
13+
decoder_channels: list of numbers of ``Conv2D`` layer filters in decoder blocks
14+
decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
15+
is used.
16+
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
17+
activation: one of [``sigmoid``, ``softmax``, None]
18+
center: if ``True`` add ``Conv2dReLU`` block on encoder head (useful for VGG models)
19+
20+
Returns:
21+
``torch.nn.Module``: **Unet**
22+
23+
.. _Unet:
24+
https://arxiv.org/pdf/1505.04597
25+
26+
"""
727

828
def __init__(
929
self,

0 commit comments

Comments
 (0)