Skip to content

Commit

Permalink
binding/python: more doc fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
mahilleb-msft committed Mar 22, 2017
1 parent 4740743 commit 0265cb5
Show file tree
Hide file tree
Showing 15 changed files with 63 additions and 31 deletions.
6 changes: 3 additions & 3 deletions bindings/python/cntk/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def from_csr(csr_array, device=None, read_only=False, borrow=False):
format.
Args:
csr_array (scipy.sparse.csr.csr_matrix): SciPy sparse matrix in CSR
csr_array (scipy.sparse.csr_matrix): SciPy sparse matrix in CSR
format
device (:class:`~cntk.device.DeviceDescriptor`): device this value
should be put on
Expand Down Expand Up @@ -115,7 +115,7 @@ def from_data(data, device=None, read_only=False, borrow=False):
array in CSR format.
Args:
data (numpy.ndarray or scipy.sparse.csr.csr_matrix): data
data (numpy.ndarray or scipy.sparse.csr_matrix): data
device (:class:`~cntk.device.DeviceDescriptor`): device this value
should be put on
read_only (bool, optional): whether the data can be modified or
Expand Down Expand Up @@ -397,7 +397,7 @@ def one_hot(batch, num_classes, dtype=None, device=None):
Args:
batch (list of lists of integers): batch input data of indices
sample_shape (integer or tuple): number of classes or shape of each sample whose trailing axis is one_hot
sample_shape (int or tuple): number of classes or shape of each sample whose trailing axis is one_hot
dtype (`np.float32`, `np.float64`, default None): data type
device (:class:`~cntk.device.DeviceDescriptor`, default None): device
this value should be put on
Expand Down
23 changes: 15 additions & 8 deletions bindings/python/cntk/debugging/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
n - execute the next node
n <number> - execute the next <number> nodes
u f - exeucte until forward pass (like 'n' when already in forward pass)
u b - exeucte until backward pass (like 'n' when already in backward pass)
u f - execute until forward pass (like 'n' when already in forward pass)
u b - execute until backward pass (like 'n' when already in backward pass)
u name - execute until a node with that name is hit
u <lambda> - execute until the lambda expression is True. Examples:
Until a Times node is hit:
Expand All @@ -29,7 +29,7 @@
Until the variance of the input exceeds 1 (np = numpy):
lambda arg, node: np.var(arg) > 1
c - exeucte until end
c - execute until end
p - print input (forward) or root gradients (backward)
d - drop into a pdb shell
q - quit\
Expand Down Expand Up @@ -112,18 +112,23 @@ def set_computation_network_track_gap_nans(enable):
For debugging purposes only.
Args:
enable (Boolean): whether to enable gap nans tracking (with performance impact)
enable (bool): whether to enable gap nans tracking (with performance impact)
'''
cntk_py.set_computation_network_track_gap_nans(enable)


def set_computation_network_trace_level(level):
'''
Set trace level to the computation network. Currently supported values:
0 : turn off trace
1 : output nodes' dimensions and some other static info
1000 : output each node's abs sum of elements in its value matrix for every forward/backward
1000000 : output each node's full matrix for every forward/backward
0
turn off trace
1
output nodes' dimensions and some other static info
1000
output each node's abs sum of elements in its value matrix for every forward/backward
1000000
output each node's full matrix for every forward/backward
Args:
level (int): trace level
Expand Down Expand Up @@ -391,6 +396,8 @@ def node_filter(x):
def debug_model(model, in_stream=sys.stdin, out_stream=sys.stdout,
exit_func=sys.exit):
'''
debug_model(model, in_stream=sys.stdin, out_stream=sys.stdout, exit_func=sys.exit)
Returns a cloned model that has debug nodes inserted everywhere. When the
graph is evaluated or trained, those nodes will allow to inspect the graph.
Expand Down
2 changes: 1 addition & 1 deletion bindings/python/cntk/internal/sanitize.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def sanitize_batch(var, batch, seq_starts=None, device=None):
* a single NumPy array denoting the full minibatch
* a list of NumPy arrays or SciPy sparse CSR matrices each representing a sequence
* a :class:`~cntk.core.Value` object (e.g. returned by :func:`one_hot`)
* a :class:`~cntk.core.Value` object (e.g. returned by :func:`cntk.core.Value.one_hot`)
seq_starts (list of bools or None): if None, every sequence is
treated as a new sequence. Otherwise, it is interpreted as a list of
Booleans one for each sequence in the batch that tell whether a
Expand Down
13 changes: 8 additions & 5 deletions bindings/python/cntk/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
import uuid

INFINITELY_REPEAT = cntk_py.MinibatchSource.infinitely_repeat
'''int: constant used to specify a minibatch scheduling unit to equal the size of the full data sweep.'''

FULL_DATA_SWEEP = cntk_py.MinibatchSource.full_data_sweep
INFINITE_SAMPLES = cntk_py.MinibatchSource.infinite_samples
DEFAULT_RANDOMIZATION_WINDOW = cntk_py.MinibatchSource.default_randomization_window
Expand Down Expand Up @@ -103,8 +105,7 @@ class MinibatchSource(cntk_py.MinibatchSource):
the entire data sweep (as indicated by the MinibatchSource) and parameters
change their values on the sweep-by-sweep basis specified by the schedule.
**Important:**
Click `here <https://github.com/Microsoft/CNTK/wiki/BrainScript-epochSize-and-Python-epoch_size-in-CNTK>`_ for a full description of this parameter.
distributed_after (int, defaults to cntk.io.INFINITE_SAMPLES): sample count after which minibatch source becomes distributed
Click `here <https://github.com/Microsoft/CNTK/wiki/BrainScript-epochSize-and-Python-epoch_size-in-CNTK>`__ for a full description of this parameter.
multithreaded_deserializer (`bool`, defaults to `None`): using multi threaded deserializer
frame_mode (`bool`, defaults to `False`): Specifies if data should be randomized and returned at the frame
or sequence level. When true , input sequence are split into frames.
Expand Down Expand Up @@ -186,7 +187,7 @@ def next_minibatch(self, minibatch_size_in_samples,
minibatch_size_in_samples (int): number of samples to retrieve for
the next minibatch. Must be > 0.
**Important:**
Click `here <https://github.com/Microsoft/CNTK/wiki/BrainScript-minibatchSize-and-Python-minibatch_size_in_samples-in-CNTK>`_ for a full description of this parameter.
Click `here <https://github.com/Microsoft/CNTK/wiki/BrainScript-minibatchSize-and-Python-minibatch_size_in_samples-in-CNTK>`__ for a full description of this parameter.
input_map (dict): mapping of :class:`~cntk.ops.variables.Variable`
to :class:`~cntk.cntk_py.StreamInformation` which will be used to convert the
returned data.
Expand Down Expand Up @@ -236,7 +237,7 @@ def restore_from_checkpoint(self, checkpoint):
Restores the MinibatchSource state from the specified checkpoint.
Args:
checkpoint (:class:`~cntk_py.Dictionary`): checkpoint to restore from
checkpoint (:class:`~cntk.cntk_py.Dictionary`): checkpoint to restore from
'''
super(MinibatchSource, self).restore_from_checkpoint(checkpoint)

Expand Down Expand Up @@ -325,7 +326,9 @@ class _ReaderConfig(dict):
samples. If no `epoch_size` is provided, this parameter is substituted
by the size of the full data sweep with infinite repeat, in which case the scheduling unit is
the entire data sweep (as indicated by the MinibatchSource) and parameters
change their values on the sweep-by-sweep basis specified by the schedule. **Important:** `click here <https://github.com/Microsoft/CNTK/wiki/BrainScript-epochSize-and-Python-epoch_size-in-CNTK>`_ for a full description of this parameter.
change their values on the sweep-by-sweep basis specified by the schedule.
**Important:**
Click `here <https://github.com/Microsoft/CNTK/wiki/BrainScript-epochSize-and-Python-epoch_size-in-CNTK>`__ for a full description of this parameter.
distributed_after (int, defaults to `cntk.io.INFINITE_SAMPLES`): sample count after which reader becomes distributed
multithreaded_deserializer (`bool`, defaults to `None`): using multi threaded deserializer
frame_mode (`bool`, defaults to `False`): Specifies if data should be randomized and returned at the frame
Expand Down
14 changes: 11 additions & 3 deletions bindings/python/cntk/layers/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def Dense(shape, activation=default_override_or(identity), init=default_override
bias=default_override_or(True), init_bias=default_override_or(0),
name=''):
'''
Dense(shape, activation=identity, init=glorot_uniform(),input_rank=None, map_rank=None, bias=True, init_bias=0, name='')
Dense(shape, activation=identity, init=glorot_uniform(), input_rank=None, map_rank=None, bias=True, init_bias=0, name='')
Layer factory function to create an instance of a fully-connected linear layer of the form
`activation(input @ W + b)` with weights `W` and bias `b`, and `activation` and `b` being optional.
Expand Down Expand Up @@ -114,7 +114,7 @@ def dense(x):

def Embedding(shape=None, init=default_override_or(glorot_uniform()), weights=None, name=''):
'''
Embedding(shape=None, init=glorot_uniform(), weights=None, name=''):
Embedding(shape=None, init=glorot_uniform(), weights=None, name='')
Layer factory function to create a embedding layer.
Expand Down Expand Up @@ -263,7 +263,7 @@ def Convolution(filter_shape, # shape of receptive field, e.g. (3,3)
max_temp_mem_size_in_samples=0,
op_name='Convolution', name=''):
'''
Convolution(filter_shape, num_filters=None, sequential=False, activation=identity, init=glorot_uniform(), pad=False, strides=1, sharing=True, bias=True, init_bias=0, reduction_rank=1, transpose=False, max_temp_mem_size_in_samples=0, op_name='Convolution', name='')
Convolution(filter_shape, num_filters=None, sequential=False, activation=identity, init=glorot_uniform(), pad=False, strides=1, sharing=True, bias=True, init_bias=0, reduction_rank=1, transpose_weight=False, max_temp_mem_size_in_samples=0, op_name='Convolution', name='')
Layer factory function to create a convolution layer.
Expand Down Expand Up @@ -617,6 +617,8 @@ def ConvolutionTranspose(filter_shape, # shape of receptive field, e.g. (
name=''):

'''
ConvolutionTranspose(filter_shape, num_filters, activation=identity, init=glorot_uniform(), pad=False, strides=1, sharing=True, bias=True, init_bias=0, output_shape=None, reduction_rank=1, max_temp_mem_size_in_samples=0, name='')
Layer factory function to create a convolution transpose layer.
This implements a convolution_transpose operation over items arranged on an N-dimensional grid, such as pixels in an image.
Expand Down Expand Up @@ -739,6 +741,8 @@ def ConvolutionTranspose1D(filter_shape, # a scalar, e.g., 3
output_shape=None,
name=''):
'''
ConvolutionTranspose1D(filter_shape, num_filters, activation=identity, init=glorot_uniform(), pad=False, strides=1, bias=True, init_bias=0, output_shape=None, name='')
Layer factory function to create a 1D convolution transpose layer with optional non-linearity.
Same as `ConvolutionTranspose()` except that filter_shape is verified to be 1-dimensional.
See `ConvolutionTranspose()` for extensive documentation.
Expand All @@ -765,6 +769,8 @@ def ConvolutionTranspose2D(filter_shape, # a 2D tuple, e.g., (3,3)
output_shape=None,
name=''):
'''
ConvolutionTranspose2D(filter_shape, num_filters, activation=identity, init=glorot_uniform(), pad=False, strides=1, bias=True, init_bias=0, output_shape=None, name='')
Layer factory function to create a 2D convolution transpose layer with optional non-linearity.
Same as `ConvolutionTranspose()` except that filter_shape is verified to be 2-dimensional.
See `ConvolutionTranspose()` for extensive documentation.
Expand Down Expand Up @@ -792,6 +798,8 @@ def ConvolutionTranspose3D(filter_shape, # a 3D tuple, e.g., (3,3,3)
output_shape=None,
name=''):
'''
ConvolutionTranspose3D(filter_shape, num_filters, activation=identity, init=glorot_uniform(), pad=False, strides=1, bias=True, init_bias=0, output_shape=None, name='')
Layer factory function to create a 3D convolution transpose layer with optional non-linearity.
Same as `ConvolutionTranspose()` except that filter_shape is verified to be 3-dimensional.
See `ConvolutionTranspose()` for extensive documentation.
Expand Down
2 changes: 2 additions & 0 deletions bindings/python/cntk/layers/models/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ def AttentionModel(attention_dim, attention_span=None, attention_axis=None,
go_backwards=default_override_or(False),
enable_self_stabilization=default_override_or(True), name=''):
'''
AttentionModel(attention_dim, attention_span=None, attention_axis=None, init=glorot_uniform(), go_backwards=False, enable_self_stabilization=True, name='')
Layer factory function to create a function object that implements an attention model
as described in Bahdanau, et al., "Neural machine translation by jointly learning to align and translate."
'''
Expand Down
4 changes: 4 additions & 0 deletions bindings/python/cntk/layers/sequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,8 @@ def _sanitize_function(f):
# TODO: allow to say sequential=False, axis=2, length=100, ... something like this
def RecurrenceFrom(step_function, go_backwards=default_override_or(False), return_full_state=False, name=''):
'''
RecurrenceFrom(step_function, go_backwards=False, return_full_state=False, name='')
Layer factory function to create a function that runs a cell function recurrently over an input sequence, with initial state.
This layer is very similar to :func:`~cntk.layers.sequence.Recurrence`,
except that the initial state is data dependent, and thus passed to the layer function as a data input
Expand Down Expand Up @@ -275,6 +277,8 @@ def recurrence_from_3(h, c, a, x):
# TODO: Can bidirectionality be an option of this? bidirectional=True?
def Recurrence(step_function, go_backwards=default_override_or(False), initial_state=default_override_or(0), return_full_state=False, name=''):
'''
Recurrence(step_function, go_backwards=False, initial_state=0, return_full_state=False, name='')
Layer factory function to create a function that runs a step function recurrently over an input sequence.
This implements the typical recurrent model.
Expand Down
3 changes: 2 additions & 1 deletion bindings/python/cntk/losses/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ def cosine_distance_with_negative_samples(x, y, shift, num_negative_samples, nam
[[ 1. , 0. , 0.5]]], dtype=float32)
Args:
x, y: numpy array or any :class:`~cntk.ops.functions.Function` that outputs a tensor
x: numpy array or any :class:`~cntk.ops.functions.Function` that outputs a tensor
y: numpy array or any :class:`~cntk.ops.functions.Function` that outputs a tensor
shift: non-zero positive integer representing number of shift to generate a negative sample
num_negative_samples: number of negative samples to generate, a non-zero positive integer
name (str, optional): the name of the Function instance in the network
Expand Down
4 changes: 3 additions & 1 deletion bindings/python/cntk/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,9 @@ def edit_distance_error(input_a, input_b, subPen=1, delPen=1, insPen=1, squashIn
Args:
input_a: first input sequence
input_b: second input sequence
subPen, delPen, insPen: substitution, deletion and insertion penalties
subPen: substitution penalty
delPen: deletion penalty
insPen: insertion penalty
squashInputs: whether to merge sequences of identical samples (in both input sequences). If true and tokensToIgnore contains label '-' then
given first input sequence as s1="a-ab-" and second as s2="-aa--abb" the edit distance will be computed against s1' = "aab" and s2' = "aab".
tokensToIgnore: list of samples to ignore during edit distance evaluation (in both sequences)
Expand Down
9 changes: 7 additions & 2 deletions bindings/python/cntk/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,10 @@ def roipooling(conv_feature_map, rois, roi_output_shape, name=''):

from cntk.cntk_py import PoolingType_Max, PoolingType_Average
MAX_POOLING = PoolingType_Max
'''int: constant used to specify maximum pooling'''

AVG_POOLING = PoolingType_Average
'''int: constant used to specify average pooling'''

@typemap
def pooling(operand, pooling_type, pooling_window_shape, strides=(1,), auto_padding=[False],
Expand Down Expand Up @@ -364,8 +367,8 @@ def pooling(operand, pooling_type, pooling_window_shape, strides=(1,), auto_padd
auto_padding (default [False,]): automatic padding flags for each input dimension.
lower_pad (default (0,)): precise lower padding for each input dimension
upper_pad (default (0,)): precise upper padding for each input dimension
ceil_out_dim (default false): ceiling while computing output size
include_pad(default false): include pad while average pooling
ceil_out_dim (default False): ceiling while computing output size
include_pad(default False): include pad while average pooling
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
Expand All @@ -381,6 +384,8 @@ def pooling(operand, pooling_type, pooling_window_shape, strides=(1,), auto_padd


MAX_UNPOOLING = PoolingType_Max
'''int: constant used to specify maximum unpooling'''

@typemap
def unpooling(operand, pooling_input, unpooling_type, unpooling_window_shape, strides=(1,), auto_padding=[False],
lower_pad=(0,), upper_pad=(0,), name=''):
Expand Down
2 changes: 1 addition & 1 deletion bindings/python/cntk/ops/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -932,7 +932,7 @@ def replace_placeholder(self, substitution):
Returns:
:class:`Function`: itself
:raises ExceptionType: when the function has multiple placeholders.
:raises Exception: when the function has multiple placeholders.
'''
return super(Function, self).replace_placeholder(substitution)

Expand Down
2 changes: 1 addition & 1 deletion bindings/python/cntk/ops/variables.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ class Constant(VariableMixin, TensorOpsMixin, cntk_py.Constant):
A constant value. It can be a scalar, vector, matrix, or tensor
of floating point numbers that cannot be modified.
A Constant is a :class:`~cntk.ops.Variable` and therefore inherits all its methods.
A Constant is a :class:`~cntk.ops.variables.Variable` and therefore inherits all its methods.
Args:
value (`np.ndarray` or `list` or `float` or `int`): Initial value.
Expand Down
4 changes: 2 additions & 2 deletions bindings/python/cntk/train/training_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def on_cross_validation_end(self, index, average_error, num_samples, num_minibat
@typemap
def minibatch_size_schedule(schedule, epoch_size=1):
'''
Create a minibatch size schedule
Creates a minibatch size schedule.
Examples:
>>> # Use a fixed value 32 for all minibatches
Expand All @@ -232,7 +232,7 @@ def minibatch_size_schedule(schedule, epoch_size=1):
(32, 32, 64, 64, 128, 128)
Args:
schedule (integer or list): if integer, it this minibatch size will be used for the whole training.
schedule (int or list): if integer, this minibatch size will be used for the whole training.
In case of list of integers, the elements are used as the values for ``epoch_size`` samples.
If list contains pair, the second element is used as a value for (``epoch_size`` x first element) samples
epoch_size (int): number of samples as a scheduling unit.
Expand Down
4 changes: 2 additions & 2 deletions bindings/python/cntk/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def get_train_eval_criterion(trainer):
Fetch the train evaluation criterion (e.g., classification error) from the last minibatch and copy it to the CPU in case it is on the GPU.
Args:
trainer (:class:`Trainer`): the trainer used.
trainer (:class:`~cntk.train.trainer.Trainer`): the trainer used.
Returns:
the criterion value
'''
Expand Down Expand Up @@ -201,7 +201,7 @@ def eval(op, arguments=None, precision=None, device=None, backward_pass=False, e
mainly to explore the operators and for convenient unit testing.
Args:
op (:class:`Function`): operation to evaluate
op (:class:`~cntk.ops.functions.Function`): operation to evaluate
arguments: maps variables to their input data. The
interpretation depends on the input type:
Expand Down
Loading

0 comments on commit 0265cb5

Please sign in to comment.