Skip to content

Commit

Permalink
Fix up layers unit tests with TF.
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Feb 16, 2017
1 parent 36ac91f commit d663fda
Show file tree
Hide file tree
Showing 35 changed files with 1,780 additions and 1,628 deletions.
19 changes: 18 additions & 1 deletion keras/applications/vgg19.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

from ..models import Model
from ..layers import Flatten, Dense, Input
from ..layers import Convolution2D, MaxPooling2D
from ..layers import Convolution2D, MaxPooling2D, GlobalAveragePooling, GlobalMaxPooling
from ..engine.topology import get_source_inputs
from ..utils.layer_utils import convert_all_kernels_in_model
from ..utils.data_utils import get_file
Expand All @@ -29,6 +29,7 @@

def VGG19(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiate the VGG19 architecture,
optionally loading weights pre-trained
Expand Down Expand Up @@ -56,6 +57,17 @@ def VGG19(include_top=True, weights='imagenet',
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Expand Down Expand Up @@ -122,6 +134,11 @@ def VGG19(include_top=True, weights='imagenet',
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling()(x)
elif pooling == 'max':
x = GlobalMaxPooling()(x)

# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
Expand Down
2 changes: 1 addition & 1 deletion keras/constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def serialize(constraint):
def deserialize(config):
return deserialize_keras_object(config,
module_objects=globals(),
printable_module_name='regularizer')
printable_module_name='constraint')


def get(identifier):
Expand Down
48 changes: 31 additions & 17 deletions keras/engine/topology.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def __init__(self, **kwargs):
# These properties will be set upon call of self.build()
self._trainable_weights = []
self._non_trainable_weights = []
self.constraints = {} # dict {tensor: constraint instance}
self._constraints = {} # dict {tensor: constraint instance}
self.built = False

# These lists will be filled via successive calls
Expand Down Expand Up @@ -267,6 +267,14 @@ def __init__(self, **kwargs):
else:
self._initial_weights = None

@property
def constraints(self):
return self._constraints

@constraints.setter
def constraints(self, constraints):
self._constraints = constraints

@property
def trainable_weights(self):
trainable = getattr(self, 'trainable', True)
Expand Down Expand Up @@ -522,7 +530,11 @@ def _add_inbound_node(self, input_tensors, output_tensors,
node_indices.append(node_index)
tensor_indices.append(tensor_index)
else:
raise ValueError('Input tensor is not a Keras tensor:', x)
assert len(input_tensors) == 1
inbound_layers = []
node_indices = []
tensor_indices = []
break

# Create node, add it to inbound nodes.
Node(
Expand Down Expand Up @@ -1023,8 +1035,9 @@ def __init__(self, input_shape=None, batch_input_shape=None,
self._non_trainable_weights = []
self.inbound_nodes = []
self.outbound_nodes = []
self.constraints = {}
self._constraints = {}
self.sparse = sparse
self.supports_masking = False

if not name:
prefix = 'input'
Expand Down Expand Up @@ -1846,17 +1859,23 @@ def run_internal_graph(self, inputs, masks=None):
# call layer
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
output_tensors = _to_list(layer.masked_call(computed_tensor,
mask=computed_mask))
if 'mask' in inspect.getargspec(layer.call).args:
output_tensors = _to_list(layer.call(computed_tensor,
mask=computed_mask))
else:
output_tensors = _to_list(layer.call(computed_tensor))
output_masks = _to_list(layer.compute_mask(computed_tensor,
computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
output_tensors = _to_list(layer.masked_call(computed_tensors,
mask=computed_masks))
if 'mask' in inspect.getargspec(layer.call).args:
output_tensors = _to_list(layer.call(computed_tensors,
mask=computed_mask))
else:
output_tensors = _to_list(layer.call(computed_tensors))
output_masks = _to_list(layer.compute_mask(computed_tensors,
computed_masks))

Expand Down Expand Up @@ -2372,20 +2391,15 @@ def _is_all_none(iterable_or_element):
def _collect_previous_mask(input_tensors):
# Return the output mask(s) of the previous node.
input_tensors = _to_list(input_tensors)
inbound_layers = []
node_indices = []
tensor_indices = []
masks = []
for x in input_tensors:
if hasattr(x, '_keras_history'):
inbound_layer, node_index, tensor_index = x._keras_history
inbound_layers.append(inbound_layer)
node_indices.append(node_index)
tensor_indices.append(tensor_index)
node = inbound_layer.inbound_nodes[node_index]
mask = node.output_masks[tensor_index]
masks.append(mask)
else:
raise ValueError('Input tensor is not a Keras tensor:', x)
nodes = [layer.inbound_nodes[i] for layer, i in zip(inbound_layers,
node_indices)]
masks = [node.output_masks[i] for node, i in zip(nodes, tensor_indices)]
masks.append(None)
if len(masks) == 1:
return masks[0]
return masks
Expand Down
8 changes: 5 additions & 3 deletions keras/layers/advanced_activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,12 @@ def build(self, input_shape):
constraint=self.alpha_constraint)
# Set input spec
axes = {}
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True

def call(self, inputs, mask=None):
pos = K.relu(inputs)
Expand Down
3 changes: 3 additions & 0 deletions keras/layers/convolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ def build(self, input_shape):
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True

def call(self, inputs):
if self.rank == 1:
Expand Down Expand Up @@ -723,6 +724,7 @@ def build(self, input_shape):
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True

def call(self, inputs):
input_shape = K.shape(inputs)
Expand Down Expand Up @@ -968,6 +970,7 @@ def build(self, input_shape):
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True

def call(self, inputs):
outputs = K.separable_conv2d(
Expand Down
39 changes: 20 additions & 19 deletions keras/layers/convolutional_recurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,31 +366,32 @@ def build(self, input_shape):
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
if self.unit_forget_bias:
self.bias += K.concatenate(K.zeros((self.filters,)),
self.bias += K.concatenate([K.zeros((self.filters,)),
K.ones((self.filters,)),
K.zeros((self.filters * 2,)))
K.zeros((self.filters * 2,))])
else:
self.bias = None

self.kernel_i = self.kernel[:, :, :, :self.units]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.units]
self.kernel_f = self.kernel[:, :, :, self.units: self.units * 2]
self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.units: self.units * 2]
self.kernel_c = self.kernel[:, :, :, self.units * 2: self.units * 3]
self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, :, :, self.units * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.units * 3:]
self.kernel_i = self.kernel[:, :, :, :self.filters]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
self.kernel_f = self.kernel[:, :, :, self.filters: self.filters * 2]
self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters: self.filters * 2]
self.kernel_c = self.kernel[:, :, :, self.filters * 2: self.filters * 3]
self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2: self.filters * 3]
self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]

if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
self.bias_i = self.bias[:self.filters]
self.bias_f = self.bias[self.filters: self.filters * 2]
self.bias_c = self.bias[self.filters * 2: self.filters * 3]
self.bias_o = self.bias[self.filters * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True

def get_initial_states(self, inputs):
# (samples, timesteps, rows, cols, filters)
Expand Down Expand Up @@ -452,7 +453,7 @@ def dropped_inputs():

if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
ones = K.tile(ones, (1, self.filters))

def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
Expand All @@ -465,7 +466,7 @@ def dropped_inputs():
return constants

def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.subsample,
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
Expand Down Expand Up @@ -524,9 +525,9 @@ def get_config(self):
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(recurrent_regularizer),
'bias_regularizer': regularizers.serialize(bias_regularizer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
Expand Down
3 changes: 2 additions & 1 deletion keras/layers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -818,6 +818,7 @@ def build(self, input_shape):
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True

def call(self, inputs):
output = K.dot(inputs, self.kernel)
Expand Down Expand Up @@ -872,7 +873,7 @@ def __init__(self, l1=0., l2=0., **kwargs):
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.activity_regularizer = regularizers.L1L2Regularizer(l1=l1, l2=l2)
self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)

def get_config(self):
config = {'l1': self.l1,
Expand Down
1 change: 1 addition & 0 deletions keras/layers/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ def build(self, input_shape):
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
self.built = True

def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
Expand Down
27 changes: 18 additions & 9 deletions keras/layers/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,9 @@ def __init__(self, filters,
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
Expand All @@ -114,7 +117,7 @@ def build(self, input_shape):
filters)
self.kernel = self.add_weight(
self.kernel_shape,
initializer=self.init,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
Expand All @@ -127,7 +130,8 @@ def build(self, input_shape):
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
self.input_spec = InputSpec(ndim=3, axes={2: input_dim})
self.built = True

def compute_output_shape(self, input_shape):
length = conv_utils.conv_output_length(input_shape[1],
Expand All @@ -152,9 +156,7 @@ def call(self, inputs):
output = K.permute_dimensions(output, (1, 0, 2))

if self.use_bias:
output = K.bias_add(output,
self.bias,
data_format='channels_last')
output += K.reshape(self.bias, (1, output_length, filters))
if self.activation is not None:
output = self.activation(output)
return output
Expand Down Expand Up @@ -281,6 +283,9 @@ def __init__(self, filters,
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
Expand Down Expand Up @@ -322,7 +327,7 @@ def build(self, input_shape):
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.bias:
if self.use_bias:
self.bias = self.add_weight((output_row, output_col, filters),
initializer=self.bias_initializer,
name='bias',
Expand All @@ -334,6 +339,7 @@ def build(self, input_shape):
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True

def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
Expand Down Expand Up @@ -404,9 +410,12 @@ def call(self, inputs):
output = K.permute_dimensions(output, (2, 0, 1, 3))

if self.use_bias:
output = K.bias_add(output,
self.bias,
data_format=self.data_format)
if self.data_format == 'channels_first':
output += K.reshape(self.bias,
(1, filters, self.output_row, self.output_col))
elif self.data_format == 'channels_last':
output += K.reshape(self.bias,
(1, self.output_row, self.output_col, filters))
output = self.activation(output)
return output

Expand Down
Loading

0 comments on commit d663fda

Please sign in to comment.