Skip to content

Commit

Permalink
Fix typos (apple#2018)
Browse files Browse the repository at this point in the history
* Fix typos

* Fix lambda variable

* Use only ascii characters

---------

Co-authored-by: Alejandro Gaston Alvarez Franceschi <[email protected]>
  • Loading branch information
alealv and Alejandro Gaston Alvarez Franceschi authored Nov 11, 2023
1 parent b2f7190 commit 6284782
Show file tree
Hide file tree
Showing 123 changed files with 226 additions and 225 deletions.
2 changes: 1 addition & 1 deletion coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

"""
List of all external dependancies for this package. Imported as
List of all external dependencies for this package. Imported as
optional includes
"""
import platform as _platform
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/_converters_entry.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def convert(
in the TF model.
- If ``name`` is specified with ``TensorType`` and ``ImageType``, it
must correspond to a placeholder op in the TF graph. The input names
in the converted Core ML model can later be modifed using the
in the converted Core ML model can later be modified using the
``ct.utils.rename_feature`` API.
- If ``dtype`` is not specified, it defaults to the ``dtype`` of the
inputs in the TF model.
Expand Down
4 changes: 2 additions & 2 deletions coremltools/converters/libsvm/_libsvm_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
# input will be a single array
if input_length == "auto":
print(
"[WARNING] Infering an input length of %d. If this is not correct,"
"[WARNING] Inferring an input length of %d. If this is not correct,"
" use the 'input_length' parameter." % inferred_length
)
input_length = inferred_length
Expand Down Expand Up @@ -167,7 +167,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
else:
svm.rho = libsvm_model.rho[0]

# set coefficents
# set coefficients
if svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC:
for _ in range(nr_class - 1):
svm.coefficients.add()
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/mil/backend/mil/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def convert_function(function, parameters, blob_writer, opset):
return pm.Function(inputs=inputs, opset=opset, block_specializations={opset: block})

# Add a classify op to the output.
# Replaces the original probabilites output (in the containing MIL block)
# Replaces the original probabilities output (in the containing MIL block)
# with the outputs of the classifier op. Returns the name of the original
# probabilities output variable.
def _add_classify_op(prog, classifier_config):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
def _match_pattern(op):
pow_op, sqrt_op = None, None

# check the curernt op is pow(2) or sqrt
# check the current op is pow(2) or sqrt
if op.op_type == "pow" and op.y.val == 2:
pow_op = op
if op.op_type == "sqrt":
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/mil/backend/nn/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def _set_user_inputs(proto, inputs):


def _set_symbolic_inputs(proto, symbolic_inputs):
# Set symbolic input shapes by -1 infered from graph
# Set symbolic input shapes by -1 inferred from graph
for input_name, shape in symbolic_inputs.items():
lb = [1 if is_symbolic(d) else d for d in shape]
ub = [-1 if is_symbolic(d) else d for d in shape]
Expand Down
4 changes: 2 additions & 2 deletions coremltools/converters/mil/backend/nn/op_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def conv_helper(const_context, builder, op):

if is_conv1d or is_conv2d:
if weights is None and has_bias:
# weights are dyanmic.
# weights are dynamic.
# In this case, bias, if present, cannot be part of the conv op
# it needs to be added separately via an add op
out_name += "_without_bias"
Expand Down Expand Up @@ -3537,7 +3537,7 @@ def _realloc_list(const_context, builder, ls_var, index_var, value_var, mode):
# (1)
# check if we need to re-initialize the tensorarray:
# it happens when the elem_shape is runtime determined and the runtime shape is not equal to
# the default shape. Ex: elem_shape is = [i0, 10] (initilized with [1, 10]) and at the runtime we get [2, 10].
# the default shape. Ex: elem_shape is = [i0, 10] (initialized with [1, 10]) and at the runtime we get [2, 10].

# (2)
# If index_var >= len(ls_var), reallocate the array and copy over existing
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def _remove_layers_from_spec(nn_spec, layers_to_delete):

def _get_disconnected_layers_rec(nn_spec):
"""
- Iteraters over layers in bottom-up fashion
- Iterates over layers in bottom-up fashion
- Collect layers if it's output is not being used (marks and does lazy deletion)
- Recursively iterates over NN Spec if layer is Loop or Branch
"""
Expand Down Expand Up @@ -245,7 +245,7 @@ def _decrease_input_degree(layer):
and len(else_layers_to_delete) == total_else_layers
):
# If both branches are empty after dead-layer elimination
# remove branch layer altogehter
# remove branch layer altogether
layers_to_delete.append(_layer)
_decrease_input_degree(_layer)
continue
Expand Down Expand Up @@ -422,7 +422,7 @@ def solve_dp(layers):
For example, if sol_num[10] = 5, this means after index 10, we can at most remove 5 nodes.
sol_bt[i] keeps the first starting point of identity sequence which results in the
optimal solution after index i.
For example, if sol_num[10] = 12, means that in order to get rid of the maxium number of
For example, if sol_num[10] = 12, means that in order to get rid of the maximum number of
nodes after 10, the first starting point is index 12.
After construct sol_num and sol_bt by dynamic programming, we backtrack for the optimal
solution using sol_bt.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ def _test_builder(self, builder, input_shape, expected_layer_num=None):

def test_output_edge_case(self):

# For now for safety purpose, the node which are output should't be merged
# For now for safety purpose, the node which are output shouldn't be merged
input_shape = (1, 10, 5)
input_features = [("data", datatypes.Array(*input_shape))]
output_features = [("out", None)]
Expand All @@ -374,7 +374,7 @@ def test_output_edge_case(self):

def test_output_edge_case_2(self):

# For now for safety purpose, the node which are output should't be merged
# For now for safety purpose, the node which are output shouldn't be merged
input_shape = (1, 10, 5)
input_features = [("data", datatypes.Array(*input_shape))]
output_features = [("out", None)]
Expand Down Expand Up @@ -675,7 +675,7 @@ def test_branch_structure(self):
RELU_2
t_0, t_1, t_3 can be merged.
t_4, t_5 can be merged.
The output shuld be
The output should be
INPUT
|
.------.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
...
When taking all of the conv/conv_tranpose, transpose/no transpose, and add/sub into account,
We end up with a total of 8 patterns (2^3). These patterns are paramaterized by the pattern_to_detect
We end up with a total of 8 patterns (2^3). These patterns are parameterized by the pattern_to_detect
function below.
"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def instancenorm_2(x):
y = (x - mean) / pow(variance + epsilon) * gamma + beta
This pattern corresponds to, should be fused as instance_norm.
All of the following must be satisty:
All of the following must be satisfy:
1) Input is rank 4 tensor
2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a
channel first to channel last transpose would be inserted in such case)
Expand Down Expand Up @@ -131,7 +131,7 @@ def instancenorm_3(x):
Detect InstanceNorm pattern in TensorFlow-Addons.
This pattern corresponds to, should be fused as instance_norm.
All of the following must be satisty:
All of the following must be satisfy:
1) Input is rank 4 tensor
2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a
channel first to channel last transpose would be inserted in such case)
Expand Down Expand Up @@ -169,7 +169,7 @@ def instancenorm_4(x):
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
This pattern corresponds to, should be fused as instance_norm.
All of the following must be satisty:
All of the following must be satisfy:
1) Input is rank 4 tensor
2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a
channel first to channel last transpose would be inserted in such case)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from ...mil.passes import pass_registry

# IMPORTANT: List of assumptions we are making about the problem
# 1) The user defined pattern has exactly one root variable, and one final output operation. As such, we will be searching for a singlular
# 1) The user defined pattern has exactly one root variable, and one final output operation. As such, we will be searching for a singular
# root variable in the larger program, and using that root variable as a starting point for our pattern matching.
# And, we will only match one of the final operations for the larger program.
# 2) The root variable in the larger program, where we start off the pattern matching, must have the same number of child ops as the
Expand Down Expand Up @@ -45,7 +45,7 @@ class Pattern:
add_attribute(attribute_name, attribute): Adds an attribute to the pattern object. Can be useful for the user.
Verifies name using the attribute set mentioned above
add_op(op_name, op): Adds an operation to the pattern, as an attribute which can be accessed and as part of the op_set
op_list(): convers the op_set to a list and returns it to make it easier for the user
op_list(): converts the op_set to a list and returns it to make it easier for the user
"""

Expand Down Expand Up @@ -102,7 +102,7 @@ def _pattern_detected(pattern, program_op, pattern_op, program_root_var, pattern
for i in range(len(program_op.outputs) if pattern_op is not None else 1):
output_same = False

# ASSUMTION: Assumming that the outputs of an operation are ordered in a particular way
# ASSUMPTION: Assuming that the outputs of an operation are ordered in a particular way
# So, two identical operations will have the same ordering of outputs.
program_child_op_list = list(program_op.outputs[i].child_ops) if pattern_op is not None else program_root_var.child_ops
pattern_child_op_list = list(pattern_op.outputs[i].child_ops) if pattern_op is not None else pattern_root_var.child_ops
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/mil/experimental/passes/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,6 @@ def transform_pattern(pattern):
## _**How to Add/Run a Pass**_
* Write the passs, and save it in a file in the `coreml/coremltools/coremltools/converters/mil/experimental/passes` folder
* Write the pass, and save it in a file in the `coreml/coremltools/coremltools/converters/mil/experimental/passes` folder
* Add an import line to the `coreml/coremltools/coremltools/converters/mil/mil/passes/init.py` file
* Run the experimental (generic) passes by setting the `ENABLE_EXPERIMENTAL_PASSES` environment variable to 1, which will override the regular passes with the same name
2 changes: 1 addition & 1 deletion coremltools/converters/mil/frontend/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def value_at(x: Var, idx: int, name=None, before_op=None):

def _reverse_input_einsum_eq(equation: str) -> str:
"""
Reverse the input order of the einsum eqaution
Reverse the input order of the einsum equation
e.g.:
input : "nchw,nwhu->nchu"
returns : "nwhu,nchw->nchu"
Expand Down
8 changes: 4 additions & 4 deletions coremltools/converters/mil/frontend/milproto/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def _load_value(context, value_spec):
def _create_var_from_spec(spec):
"""
This helper function is used for creating PyMIL Var/ListVar from the proto spec.
Mainly used for the contruction of the control flow ops.
Mainly used for the construction of the control flow ops.
"""
assert isinstance(spec, pm.NamedValueType)
sym_type = proto_to_types(spec.type)
Expand Down Expand Up @@ -214,7 +214,7 @@ def _create_nested_blocks(context, op_spec):
for v in input_vars:
context.register_var_with_name(v.name, v)

# In pymil, the outer_op for a block can only be None if the block is a Functino.
# In pymil, the outer_op for a block can only be None if the block is a Function.
# As the result, we use a dummy outer_op here for block creation, and set it to
# the legit op later on in _set_outer_op_for_nested_blocks
dummy = mb.const(val=0.)
Expand Down Expand Up @@ -286,7 +286,7 @@ def _load_operation(context, op_spec):
# For that case, we directly create a constant variable.

# (ii) Create nested blocks for control flow operations:
# The Python functinoal input arguments for control flow ops cannot be recovered from milproto -> pymil conversion,
# The Python functional input arguments for control flow ops cannot be recovered from milproto -> pymil conversion,
# for instance, the _body, _cond for mb.while_loop and _true_fn, _false_fn for mb.cond are not invertible
# Hence, here we directly create the nested blocks from the proto, and set them to mb.while_loop.blocks / mb.cond.blocks.
# Note that, when creating a block, PyMIL required an outer_op, which should be the control flow operation itself. However,
Expand All @@ -295,7 +295,7 @@ def _load_operation(context, op_spec):
# for the creation of the block.

# (iii) Create PyMIL operation using inputs / blocks
# Note that for the control flow cases, we create dummy functional inputs, and use the exisiting block to create the op.
# Note that for the control flow cases, we create dummy functional inputs, and use the existing block to create the op.

# (iv) Set the outer_op for control flow
# Once the operation is created, we replace the dummy outer_op with the legit one, to make it a valid PyMIL program
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ def _validate_and_update_main_output_types(self, prog):
# validation
if get_output_names(self.main_output_types) is None:
# this is the case, where the user did not provide names for the outputs.
# In this case, the outputs were inferred from the TF graph autmatically.
# In this case, the outputs were inferred from the TF graph automatically.
# There are two scenarios here: number of inferred outputs equal to 1 or greater than 1
if len(output_vars) == 1:
if len(self.main_output_types) > 1:
Expand Down
8 changes: 4 additions & 4 deletions coremltools/converters/mil/frontend/tensorflow/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -884,10 +884,10 @@ def Pow(context, node):
def DepthwiseConv2dNative(context, node):
# [kH, kW, C_in, multiplier]
W_hwim = context[node.inputs[1]] # m = multiplier
# [kH, kW, 1, C_in * multipler]
# [kH, kW, 1, C_in * multiplier]
shape_hw1o = list(W_hwim.shape[:2]) + [1, W_hwim.shape[2] * W_hwim.shape[3]]
W_hw1o = mb.reshape(x=W_hwim, shape=shape_hw1o)
# [C_in * multipler, 1, kH, kW]. Note that C_in * multiplier = C_out in
# [C_in * multiplier, 1, kH, kW]. Note that C_in * multiplier = C_out in
# MIL. C_in / groups = 1 in depthwise conv.
W_o1hw = mb.transpose(x=W_hw1o, perm=[3, 2, 0, 1])
data_format = node.attr.get("data_format", "NHWC")
Expand Down Expand Up @@ -1906,7 +1906,7 @@ def MirrorPad(context, node):

pad = pad.val

# get axix which is non zero
# get axis which is non zero
non_zero_axis = []
for i in range(len(pad)):
if not all(pad[i] == 0):
Expand Down Expand Up @@ -2378,7 +2378,7 @@ def _perform_gather_with_batch_dims(x, indices, batch_dims, gather_func, func_ar
# All results are stacked into a tensor with shape [prod(batch_1, ..., batch_n), *remaning_result_shape]
res = []
if batch_prod.val is None:
raise ValueError("batch dimenstion must be known at compile time")
raise ValueError("batch dimension must be known at compile time")
for i in range(batch_prod.val[0]):
temp_x = mb.gather(x=x_reshape, indices=[i], axis=0)
temp_indices = mb.gather(x=indices_reshape, indices=[i], axis=0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class tf_lstm_to_core_lstm(AbstractGraphPass):
"""
Try to map TF dialect ops `tf_lstm_block` and `tf_lstm_block_cell` to
`lstm` in the core op set if compatible. They are compatible if all of the
followings are satisfied:
following are satisfied:
- If tf_lstm_block: only h output is consumed. tf_lstm_block has 7
sequence outputs: [i, cs, f, o, ci, co, h]. Each of them (e.g., i) has
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ def create_custom_selu(self):
def Selu(context, node):
x = context[node.inputs[0]]
alpha = 1.6732631921768188
lamda = 1.0507010221481323
lmda = 1.0507010221481323
out_elu = mb.elu(x=x, alpha=alpha)
out = mb.mul(x=out_elu, y=lamda, name=node.name)
out = mb.mul(x=out_elu, y=lmda, name=node.name)
context.add(node.name, out)

yield
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def type_inference(self):
x_type = self.x.dtype
x_shape = self.x.shape
y_shape = self.y.shape
# For illustration purpose, assumming getting valid shape
# For illustration purpose, assuming getting valid shape
# Ideally, should consider transpose_?, ?_is_sparse parameters into consideration
# for computing output shape
return types.tensor(x_type, [x_shape[0], y_shape[1]])
Expand Down Expand Up @@ -159,7 +159,7 @@ def build_model(x, y):
assert layers[-1].custom is not None, "Expecting a custom layer"
assert (
"SparseMatMul" == layers[-1].custom.className
), "Custom Layer class name mis-match"
), "Custom Layer class name mismatch"
assert (
transpose_a == layers[-1].custom.parameters["transpose_x"].boolValue
), "Incorrect parameter value k"
Expand Down Expand Up @@ -279,7 +279,7 @@ def build_model(x):
assert layers[-1].custom is not None, "Expecting a custom layer"
assert (
"TopK" == layers[-1].custom.className
), "Custom Layer class name mis-match"
), "Custom Layer class name mismatch"
assert (
k == layers[-1].custom.parameters["k"].intValue
), "Incorrect parameter value k"
Expand Down
Loading

0 comments on commit 6284782

Please sign in to comment.