Skip to content

Commit

Permalink
Merge pull request keras-team#15543 from kianmeng:fix-typos
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 407106182
  • Loading branch information
tensorflower-gardener committed Nov 2, 2021
2 parents a49764c + d67a43a commit 863e385
Show file tree
Hide file tree
Showing 41 changed files with 65 additions and 64 deletions.
8 changes: 4 additions & 4 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ python3 -m venv venv_dir
```

You can activate the venv with the following command. You should always run the
tests with the venv activated. You need to activate the venv everytime you open
tests with the venv activated. You need to activate the venv every time you open
a new shell.

```shell
Expand Down Expand Up @@ -185,12 +185,12 @@ in the `BUILD` file.
### Run a single test case

The best way to run a single test case is to comment out the rest of the test
cases in a file before runing the test file.
cases in a file before running the test file.

### Run all tests

You can run all the tests locally by running the following commmand
in the repo root directory.
You can run all the tests locally by running the following command in the repo
root directory.

```
bazel test --test_timeout 300,450,1200,3600 --test_output=errors --keep_going --define=use_fast_cpp_protos=false --build_tests_only --build_tag_filters=-no_oss --test_tag_filters=-no_oss keras/...
Expand Down
2 changes: 1 addition & 1 deletion keras/applications/mobilenet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):

x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

# Project wiht a pointwise 1x1 convolution.
# Project with a pointwise 1x1 convolution.
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
Expand Down
2 changes: 1 addition & 1 deletion keras/benchmarks/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ filegroup(
# to the regular expression is executed.
# e.g. --benchmarks=".*lstm*." will run all lstm layer related benchmarks.

# Add all benchmarks related utils here for pip testing dependencis.
# Add all benchmarks related utils here for pip testing dependencies.
py_library(
name = "keras_benchmark_lib_pip",
srcs_version = "PY3",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def benchmark_antirectifier_bs_512_gpu_2(self):


class Antirectifier(tf.keras.layers.Layer):
"""Build simple custome layer."""
"""Build simple custom layer."""

def __init__(self, initializer="he_normal", **kwargs):
super(Antirectifier, self).__init__(**kwargs)
Expand Down
4 changes: 2 additions & 2 deletions keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,7 @@ class Callback:
1. You should pack all your callbacks into a single `callbacks.CallbackList`
so they can all be called together.
2. You will need to manually call all the `on_*` methods at the apropriate
2. You will need to manually call all the `on_*` methods at the appropriate
locations in your loop. Like this:
```
Expand Down Expand Up @@ -2360,7 +2360,7 @@ def _init_profile_batch(self, profile_batch):
of positive integers signify a range of batches to profile.
Raises:
ValueError: If profile_batch is not an integer or a comma seperated pair
ValueError: If profile_batch is not an integer or a comma separated pair
of positive integers.
"""
Expand Down
4 changes: 2 additions & 2 deletions keras/distribute/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -771,7 +771,7 @@ distribute_py_test(
shard_count = 50,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"nomultivm", # TODO(b/170502145)
"notsan", # TODO(b/184542721)
Expand All @@ -795,7 +795,7 @@ distribute_py_test(
shard_count = 21,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"nomultivm", # TODO(b/170502145)
"notsan", # TODO(b/184542721)
Expand Down
2 changes: 1 addition & 1 deletion keras/engine/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ tf_py_test(
srcs = ["training_utils_v1_test.py"],
python_version = "PY3",
tags = [
"no_oss", # TODO(b/135021748) reenable
"no_oss", # TODO(b/135021748) re-enable
"notsan",
],
deps = [
Expand Down
6 changes: 3 additions & 3 deletions keras/engine/base_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,7 @@ def getter(*args, **kwargs): # pylint: disable=function-redefined
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
# TODO(b/142020079): Re-enable it once the bug is fixed.
if caching_device is not None:
tf_logging.warning(
'`caching_device` does not work with mixed precision API. Ignoring '
Expand Down Expand Up @@ -3410,7 +3410,7 @@ def _apply_name_scope_on_model_declaration(enable):


class BaseRandomLayer(Layer):
"""A layer handle the random nubmer creation and savemodel behavior."""
"""A layer handle the random number creation and savemodel behavior."""

@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, seed=None, force_generator=False, **kwargs):
Expand All @@ -3430,7 +3430,7 @@ def __init__(self, seed=None, force_generator=False, **kwargs):
seed: optional integer, used to create RandomGenerator.
force_generator: boolean, default to False, whether to force the
RandomGenerator to use the code branch of tf.random.Generator.
**kwargs: other keyward arguements that will be passed to the parent class
**kwargs: other keyword arguments that will be passed to the parent class
"""
super().__init__(**kwargs)
self._random_generator = backend.RandomGenerator(
Expand Down
2 changes: 1 addition & 1 deletion keras/engine/base_layer_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ def getter(*args, **kwargs): # pylint: disable=function-redefined
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
# TODO(b/142020079): Re-enable it once the bug is fixed.
if caching_device is not None:
tf_logging.warning(
'`caching_device` does not work with mixed precision API. Ignoring '
Expand Down
2 changes: 1 addition & 1 deletion keras/engine/data_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
multiple input/output, or dictionary of objects when the input/output are
named.
Args:
Expand Down
4 changes: 2 additions & 2 deletions keras/engine/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ def from_config(cls, config, custom_objects=None):
return model
# The config does not contain all the information necessary to revive a
# Functional model. This happens when the user creates subclassed models
# with a Functional constructor and has overriden the `get_config` method
# with a Functional constructor and has overridden the `get_config` method
# to return a completely new dictionary.
try:
return cls(**config)
Expand Down Expand Up @@ -769,7 +769,7 @@ def _validate_graph_inputs_and_outputs(self):
for x in self.inputs])
input_batch_sizes.discard(None)
if len(input_batch_sizes) > 1:
logging.warning('Found incompatiable static batch sizes among the '
logging.warning('Found incompatible static batch sizes among the '
f'inputs. Batch sizes: {sorted(input_batch_sizes)}')

for x in self.outputs:
Expand Down
2 changes: 1 addition & 1 deletion keras/engine/functional_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def test_build_model_from_intermediate_tensor(self):
loaded_model = models.load_model(output_path)
self.assertEqual(model.summary(), loaded_model.summary())

# Also make sure the orignal inputs and y can still be used to build model
# Also make sure the original inputs and y can still be used to build model
new_model = models.Model(inputs, y)
# Make sure no new node is attached to layer2
self.assertLen(layer2.inbound_nodes, 2)
Expand Down
4 changes: 2 additions & 2 deletions keras/engine/training_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1125,7 +1125,7 @@ def call(self, inputs):
training_module.Model([input1, input2], outputs)
self.assertEqual(
mock_warn.call_args_list[0][0][0],
'Found incompatiable static batch sizes among the inputs. '
'Found incompatible static batch sizes among the inputs. '
'Batch sizes: [2, 3]')

@combinations.generate(combinations.combine(mode=['graph', 'eager']))
Expand Down Expand Up @@ -3511,7 +3511,7 @@ def call(self, x):

@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def DISABLED_test_add_metric_invalid_aggregation(self):
# TODO(psv): Reenable test once it is fixed.
# TODO(psv): Re-enable test once it is fixed.
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(1, kernel_initializer='ones')(x)
model = training_module.Model(x, y)
Expand Down
6 changes: 3 additions & 3 deletions keras/initializers/initializers_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class RandomNormal(tf.compat.v1.random_normal_initializer):
Random seed behavior:
Also be aware that if you pass a seed to the TF2 initializer
API it will reuse that same seed for every single initialization
(unlike the TF1 intializer)
(unlike the TF1 initializer)
#### Structural Mapping to Native TF2
Expand Down Expand Up @@ -192,7 +192,7 @@ class RandomUniform(tf.compat.v1.random_uniform_initializer):
Also be aware that if you pass a seed to the TF2 initializer
API it will reuse that same seed for every single initialization
(unlike the TF1 intializer)
(unlike the TF1 initializer)
#### Structural Mapping to Native TF2
Expand Down Expand Up @@ -313,7 +313,7 @@ class TruncatedNormal(tf.compat.v1.truncated_normal_initializer):
Random seed behavior:
Also be aware that if you pass a seed to the TF2 initializer
API it will reuse that same seed for every single initialization
(unlike the TF1 intializer)
(unlike the TF1 initializer)
#### Structural Mapping to Native TF2
Expand Down
10 changes: 5 additions & 5 deletions keras/integration_test/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ distribute_py_test(
shard_count = 50,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"noasan", # TODO(b/184542721)
"nomsan", # TODO(b/184542721)
Expand Down Expand Up @@ -216,7 +216,7 @@ distribute_py_test(
shard_count = 50,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"noasan", # TODO(b/184542721)
"nomsan", # TODO(b/184542721)
Expand All @@ -237,7 +237,7 @@ distribute_py_test(
shard_count = 50,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"noasan", # TODO(b/184542721)
"nomsan", # TODO(b/184542721)
Expand All @@ -258,7 +258,7 @@ distribute_py_test(
shard_count = 50,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"noasan", # TODO(b/184542721)
"nomsan", # TODO(b/184542721)
Expand All @@ -278,7 +278,7 @@ distribute_py_test(
python_version = "PY3",
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Reenable
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"noasan", # TODO(b/184542721)
"nomsan", # TODO(b/184542721)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ def serve_fn(raw_features):
self.assertIn(prediction0, ("yes", "no"))

prediction1 = loaded_serving_fn(
tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"]
tf.constant(["ironman", "ironman", "unknown"]))["output_0"]
self.assertIn(prediction1, ("yes", "no"))


Expand Down
2 changes: 1 addition & 1 deletion keras/integration_test/preprocessing_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def make_preprocessing_model(file_dir):
normalization = preprocessing.Normalization()
normalization.adapt(ds.map(lambda features, labels: features["float_col"]))
float_out = normalization(float_in)
# Lookup ints by adapting a vocab of interger IDs.
# Lookup ints by adapting a vocab of integer IDs.
int_lookup = preprocessing.IntegerLookup()
int_lookup.adapt(ds.map(lambda features, labels: features["int_col"]))
int_out = int_lookup(int_in)
Expand Down
2 changes: 1 addition & 1 deletion keras/integration_test/tpu_strategy_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def serve_fn(raw_features):
self.assertIn(prediction1, ("yes", "no"))

prediction2 = loaded_serving_fn(
tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"]
tf.constant(["ironman", "ironman", "unknown"]))["output_0"]
self.assertIn(prediction2, ("yes", "no"))


Expand Down
2 changes: 1 addition & 1 deletion keras/layers/convolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def build(self, input_shape):
self.filters)

# compute_output_shape contains some validation logic for the input shape,
# and make sure the output shape has all positive dimentions.
# and make sure the output shape has all positive dimensions.
self.compute_output_shape(input_shape)

self.kernel = self.add_weight(
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/core/core_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def test_dropout_with_savemodel(self):
predict2 = loaded_model(np.ones((20, 5, 10)))

self.assertAllClose(predict, predict2)
# Make sure the model droput different value after loading
# Make sure the model dropout different value after loading
train2 = loaded_model(np.ones((20, 5, 10)), training=True)
self.assertNotAllClose(train, train2)
self.assertIsNotNone(loaded_model.layers[1]._random_generator)
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/core/lambda_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def _check_variables(self, created_variables, accessed_variables):
but are not tracked by said layer:
{variable_str}
The layer cannot safely ensure proper Variable reuse across multiple
calls, and consquently this behavior is disallowed for safety. Lambda
calls, and consequently this behavior is disallowed for safety. Lambda
layers are not well suited to stateful computation; instead, writing a
subclassed Layer is the recommend way to define layers with
Variables.""").format(
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/core/tf_op_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def _check_variables(self, created_variables, accessed_variables):
'The following Variables were created within a Lambda layer '
f'({self.name}) but are not tracked by said layer: {variable_str}\n'
'The layer cannot safely ensure proper Variable reuse '
'across multiple calls, and consquently this behavior is disallowed '
'across multiple calls, and consequently this behavior is disallowed '
'for safety reasons. Lambda layers are not well suited for stateful '
'computation; instead, writing a subclassed Layer is the recommend '
'way to define layers with Variables.')
Expand Down
4 changes: 2 additions & 2 deletions keras/layers/normalization/layer_normalization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def _test_forward_pass(self, batch_input_shape, axis, fp64_tol=1e-14,
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
axis: A list of axes to normalize. Will be passed to the `axis` argument
of Layerlayer_normalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
Expand Down Expand Up @@ -255,7 +255,7 @@ def _test_backward_pass(self, batch_input_shape, axis, fp64_tol=1e-5,
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
axis: A list of axes to normalize. Will be passed to the `axis` argument
of Layerlayer_normalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/preprocessing/discretization.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def __init__(self,
if "dtype" not in kwargs or kwargs["dtype"] is None:
kwargs["dtype"] = tf.int64 if output_mode == INT else backend.floatx()
elif output_mode == "int" and not tf.as_dtype(kwargs["dtype"]).is_integer:
# Compat for when dtype was alwyas floating and ingored by the layer.
# Compat for when dtype was always floating and ignored by the layer.
kwargs["dtype"] = tf.int64

super().__init__(**kwargs)
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/preprocessing/hashing.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def __init__(self,
if 'dtype' not in kwargs or kwargs['dtype'] is None:
kwargs['dtype'] = tf.int64 if output_mode == INT else backend.floatx()
elif output_mode == 'int' and not tf.as_dtype(kwargs['dtype']).is_integer:
# Compat for when dtype was alwyas floating and ingored by the layer.
# Compat for when dtype was always floating and ignored by the layer.
kwargs['dtype'] = tf.int64

super().__init__(**kwargs)
Expand Down
9 changes: 5 additions & 4 deletions keras/layers/preprocessing/index_lookup.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def __init__(self,
# value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we hav multiple OOV values, we need to do a further hashing step;
# If we have multiple OOV values, we need to do a further hashing step;
# to make this easier, we set the OOV value to -1. (This lets us do a
# vectorized add and cast to boolean to determine locations where we
# need to do extra hashing.)
Expand All @@ -293,7 +293,7 @@ def __init__(self,
# to be uninitialized as a StaticHashTable cannot be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()

# Only set up adapt state if we did not recieve a vocab on construction.
# Only set up adapt state if we did not receive a vocab on construction.
if not self._has_input_vocabulary:
# Add a custom weight handler to return the layers vocab as it's weight.
self._add_trackable(VocabWeightHandler(self), False)
Expand Down Expand Up @@ -648,8 +648,9 @@ def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Inputs, do not call lookup. This
# is critical for restoring SavedModel, which will first trace layer.call
# and then attempt to restore the table. We need the table to be unitialized
# for the restore to work, but calling the table unitialized would error.
# and then attempt to restore the table. We need the table to be
# uninitialized for the restore to work, but calling the table uninitialized
# would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/preprocessing/preprocessing_stage.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class FunctionalPreprocessingStage(functional.Functional,
Args:
inputs: An input tensor (must be created via `tf.keras.Input()`), or a list,
a dict, or a nested strcture of input tensors.
a dict, or a nested structure of input tensors.
outputs: An output tensor, or a list, a dict or a nested structure of output
tensors.
name: String, optional. Name of the preprocessing stage.
Expand Down
Loading

0 comments on commit 863e385

Please sign in to comment.