-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathunet.py
120 lines (93 loc) · 4.59 KB
/
unet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import tensorflow as tf
import numpy as np
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import concatenate
def conv_block(inputs=None, n_filters=32, dropout_prob=0, max_pooling=True):
"""
Convolutional downsampling block
Arguments:
inputs -- Input tensor
n_filters -- Number of filters for the convolutional layers
dropout_prob -- Dropout probability
max_pooling -- Use MaxPooling2D to reduce the spatial dimensions of the output volume
Returns:
next_layer, skip_connection -- Next layer and skip connection outputs
"""
conv = Conv2D(n_filters, 3, activation = 'relu', padding = 'same', kernel_initializer = tf.keras.initializers.HeNormal())(inputs)
conv = Conv2D(n_filters, 3, activation = 'relu', padding = 'same', kernel_initializer = tf.keras.initializers.HeNormal())(conv)
# if dropout_prob > 0 add a dropout layer, with the variable dropout_prob as parameter
if dropout_prob > 0:
conv = Dropout(dropout_prob)(conv)
# if max_pooling is True add a MaxPooling2D with 2x2 pool_size
if max_pooling:
next_layer = MaxPooling2D(pool_size=(2, 2))(conv)
else:
next_layer = conv
skip_connection = conv
return next_layer, skip_connection
def upsampling_block(expansive_input, contractive_input, n_filters=32):
"""
Convolutional upsampling block
Arguments:
expansive_input -- Input tensor from previous layer
contractive_input -- Input tensor from previous skip layer
n_filters -- Number of filters for the convolutional layers
Returns:
conv -- Tensor output
"""
up = Conv2DTranspose(n_filters, 3, strides=(2,2), padding='same')(expansive_input)
# Merge the previous output and the contractive_input
merge = concatenate([up, contractive_input], axis=3)
conv = Conv2D(n_filters, 3, activation = 'relu', padding = 'same', kernel_initializer = tf.keras.initializers.HeNormal())(merge)
conv = Conv2D(n_filters, 3, activation = 'relu', padding = 'same', kernel_initializer = tf.keras.initializers.HeNormal())(conv)
return conv
def unet_model(input_size=(96, 128, 3), n_filters=32, n_classes=23):
"""
Unet model
Arguments:
input_size -- Input shape
n_filters -- Number of filters for the convolutional layers
n_classes -- Number of output classes
Returns:
model -- tf.keras.Model
"""
inputs = Input(input_size)
# Contracting Path (encoding)
# Add a conv_block with the inputs of the unet_ model and n_filters
cblock1 = conv_block(inputs, n_filters)
# Chain the first element of the output of each block to be the input of the next conv_block.
# Double the number of filters at each new step
cblock2 = conv_block(cblock1[0], n_filters*2)
cblock3 = conv_block(cblock2[0], n_filters*4)
cblock4 = conv_block(cblock3[0], n_filters*8, 0.3)
# Include a dropout of 0.3 for this layer, and avoid the max_pooling layer
cblock5 = conv_block(cblock4[0], n_filters*16, 0.3, max_pooling=False)
# Expanding Path (decoding)
# Add the first upsampling_block.
# Use the cblock5[0] as expansive_input and cblock4[1] as contractive_input and n_filters * 8
ublock6 = upsampling_block(cblock5[0], cblock4[1], n_filters*8)
# Chain the output of the previous block as expansive_input and the corresponding contractive block output.
# Note that you must use the second element of the contractive block i.e before the maxpooling layer.
# At each step, use half the number of filters of the previous block
ublock7 = upsampling_block(ublock6, cblock3[1], n_filters*4)
ublock8 = upsampling_block(ublock7, cblock2[1], n_filters*2)
ublock9 = upsampling_block(ublock8, cblock1[1], n_filters)
conv9 = Conv2D(n_filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(ublock9)
# Add a Conv2D layer with n_classes filter, kernel size of 1 and a 'same' padding
conv10 = Conv2D(n_classes, 1, padding='same')(conv9)
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model
# test
img_height = 96
img_width = 128
num_channels = 3
unet = unet_model((img_height, img_width, num_channels))
unet.summary()