Skip to content

Commit

Permalink
Formatting fixes.
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Jan 5, 2018
1 parent b197ba9 commit f6eda66
Show file tree
Hide file tree
Showing 37 changed files with 130 additions and 53 deletions.
13 changes: 9 additions & 4 deletions keras/activations.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Built-in activation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down Expand Up @@ -38,11 +40,14 @@ def elu(x, alpha=1.0):


def selu(x):
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017)
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017).
# Arguments
x: A tensor or variable to compute the activation function for.
# Returns
Tensor with the same shape and dtype as `x`.
# Note
- To be used together with the initialization "lecun_normal".
- To be used together with the dropout variant "AlphaDropout".
Expand Down Expand Up @@ -102,12 +107,12 @@ def get(identifier):
return deserialize(identifier)
elif callable(identifier):
if isinstance(identifier, Layer):
warnings.warn((
warnings.warn(
'Do not pass a layer instance (such as {identifier}) as the '
'activation argument of another layer. Instead, advanced '
'activation layers should be used just like any other '
'layer in a model.'
).format(identifier=identifier.__class__.__name__))
'layer in a model.'.format(
identifier=identifier.__class__.__name__))
return identifier
else:
raise ValueError('Could not interpret '
Expand Down
75 changes: 40 additions & 35 deletions keras/callbacks.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down Expand Up @@ -815,11 +817,12 @@ class ReduceLROnPlateau(Callback):
of epochs, the learning rate is reduced.
# Example
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
# Arguments
monitor: quantity to be monitored.
Expand Down Expand Up @@ -928,10 +931,11 @@ class CSVLogger(Callback):
including 1D iterables such as np.ndarray.
# Example
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
# Arguments
filename: filename of the csv file, e.g. 'run/log.csv'.
Expand Down Expand Up @@ -1020,32 +1024,33 @@ class LambdaCallback(Callback):
on_train_end: called at the end of model training.
# Example
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""

def __init__(self,
Expand Down
2 changes: 2 additions & 0 deletions keras/constraints.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Constraints: functions that impose constraints on weight values.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/boston_housing.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Boston housing price regression dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/cifar.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Utilities common to CIFAR10 and CIFAR100 datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/cifar10.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""CIFAR10 small images classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/cifar100.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""CIFAR100 small images classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/fashion_mnist.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/imdb.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""IMDB sentiment classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/datasets/mnist.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""MNIST handwritten digits dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
3 changes: 2 additions & 1 deletion keras/datasets/reuters.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# -*- coding: utf-8 -*-
"""Reuters topic classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from ..utils.data_utils import get_file
from ..preprocessing.sequence import _remove_long_seq
from six.moves import zip
import numpy as np
import json
import warnings
Expand Down
2 changes: 2 additions & 0 deletions keras/engine/topology.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Topology-related part of the Keras engine.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
Expand Down
3 changes: 2 additions & 1 deletion keras/engine/training.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
# -*- coding: utf-8 -*-
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import warnings
import copy
import numpy as np
import six

from keras.utils import Sequence
from keras.utils import GeneratorEnqueuer
Expand Down
2 changes: 2 additions & 0 deletions keras/initializers.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Built-in weight initializers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/advanced_activations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Advanced activation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/convolutional.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Convolutional layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/convolutional_recurrent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/core.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/cudnn_recurrent.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Recurrent layers backed by cuDNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/embeddings.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Embedding layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/local.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Locally-connected layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/merge.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Layers that can merge several inputs into one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/noise.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Noise regularization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/normalization.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Normalization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/pooling.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
25 changes: 13 additions & 12 deletions keras/layers/recurrent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down Expand Up @@ -618,8 +620,7 @@ def step(inputs, states):
return output

def _standardize_args(self, inputs, initial_state, constants):
"""Brings the arguments of `__call__` that can contain input tensors to
standard format.
"""Standardize `__call__` to a single list of tensor inputs.
When running a model loaded from file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__` as part
Expand Down Expand Up @@ -775,11 +776,11 @@ class SimpleRNNCell(Layer):
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
Expand Down Expand Up @@ -933,11 +934,11 @@ class SimpleRNN(RNN):
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
Expand Down Expand Up @@ -1146,11 +1147,11 @@ class GRUCell(Layer):
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
Expand Down Expand Up @@ -1386,11 +1387,11 @@ class GRU(RNN):
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
Expand Down Expand Up @@ -1623,11 +1624,11 @@ class LSTMCell(Layer):
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/wrappers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
"""Layers that augment the functionality of a base layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
Loading

0 comments on commit f6eda66

Please sign in to comment.