From d67a43aa8cf2a6618bbd103869c477e449e7428d Mon Sep 17 00:00:00 2001 From: "Kian-Meng, Ang" Date: Mon, 25 Oct 2021 06:32:00 +0800 Subject: [PATCH] Fix typos --- CONTRIBUTING.md | 6 +++--- keras/applications/mobilenet_v2.py | 2 +- keras/benchmarks/BUILD | 2 +- .../antirectifier_benchmark_test.py | 2 +- keras/callbacks.py | 4 ++-- keras/distribute/BUILD | 4 ++-- keras/engine/BUILD | 2 +- keras/engine/base_layer.py | 6 +++--- keras/engine/base_layer_v1.py | 2 +- keras/engine/data_adapter.py | 2 +- keras/engine/functional.py | 4 ++-- keras/engine/functional_utils_test.py | 2 +- keras/engine/training_test.py | 4 ++-- keras/initializers/initializers_v1.py | 6 +++--- keras/integration_test/BUILD | 10 +++++----- .../parameter_server_keras_preprocessing_test.py | 2 +- keras/integration_test/preprocessing_test_utils.py | 2 +- keras/integration_test/tpu_strategy_test.py | 2 +- keras/layers/convolutional.py | 2 +- keras/layers/core/core_test.py | 2 +- keras/layers/core/lambda_layer.py | 2 +- keras/layers/core/tf_op_layer.py | 2 +- keras/layers/normalization/layer_normalization_test.py | 4 ++-- keras/layers/preprocessing/discretization.py | 2 +- keras/layers/preprocessing/hashing.py | 2 +- keras/layers/preprocessing/index_lookup.py | 8 ++++---- keras/layers/preprocessing/preprocessing_stage.py | 2 +- keras/layers/wrappers.py | 2 +- keras/losses.py | 2 +- keras/metrics.py | 4 ++-- keras/models.py | 2 +- keras/optimizer_experimental/optimizer.py | 2 +- keras/optimizer_v2/optimizer_v2.py | 4 ++-- keras/regularizers.py | 2 +- keras/saving/saved_model/layer_serialization.py | 2 +- keras/saving/saved_model/load.py | 2 +- keras/utils/data_utils.py | 2 +- keras/utils/generic_utils.py | 2 +- keras/utils/kpl_test_utils.py | 2 +- keras/utils/metrics_utils.py | 2 +- keras/utils/vis_utils.py | 4 ++-- 41 files changed, 62 insertions(+), 62 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4804ed46440..9a77674e61c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -136,7 +136,7 @@ python3 -m venv venv_dir ``` You can activate the venv with the following command. You should always run the -tests with the venv activated. You need to activate the venv everytime you open +tests with the venv activated. You need to activate the venv every time you open a new shell. ```shell @@ -185,11 +185,11 @@ in the `BUILD` file. ### Run a single test case The best way to run a single test case is to comment out the rest of the test -cases in a file before runing the test file. +cases in a file before running the test file. ### Run all tests -You can run all the tests locally by running the following commmand +You can run all the tests locally by running the following command in the repo root directory. ``` diff --git a/keras/applications/mobilenet_v2.py b/keras/applications/mobilenet_v2.py index 7aa79d299fc..eeacdb0c2de 100644 --- a/keras/applications/mobilenet_v2.py +++ b/keras/applications/mobilenet_v2.py @@ -481,7 +481,7 @@ def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x) - # Project wiht a pointwise 1x1 convolution. + # Project with a pointwise 1x1 convolution. x = layers.Conv2D( pointwise_filters, kernel_size=1, diff --git a/keras/benchmarks/BUILD b/keras/benchmarks/BUILD index a2bb9a0a470..120d70766d9 100644 --- a/keras/benchmarks/BUILD +++ b/keras/benchmarks/BUILD @@ -27,7 +27,7 @@ filegroup( # to the regular expression is executed. # e.g. --benchmarks=".*lstm*." will run all lstm layer related benchmarks. -# Add all benchmarks related utils here for pip testing dependencis. +# Add all benchmarks related utils here for pip testing dependencies. py_library( name = "keras_benchmark_lib_pip", srcs_version = "PY3", diff --git a/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py b/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py index 07826d33b38..085fd4eb4ac 100644 --- a/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py +++ b/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py @@ -128,7 +128,7 @@ def benchmark_antirectifier_bs_512_gpu_2(self): class Antirectifier(tf.keras.layers.Layer): - """Build simple custome layer.""" + """Build simple custom layer.""" def __init__(self, initializer="he_normal", **kwargs): super(Antirectifier, self).__init__(**kwargs) diff --git a/keras/callbacks.py b/keras/callbacks.py index 6f46a71b2b3..16bee9073ec 100644 --- a/keras/callbacks.py +++ b/keras/callbacks.py @@ -599,7 +599,7 @@ class Callback: 1. You should pack all your callbacks into a single `callbacks.CallbackList` so they can all be called together. - 2. You will need to manually call all the `on_*` methods at the apropriate + 2. You will need to manually call all the `on_*` methods at the appropriate locations in your loop. Like this: ``` @@ -2360,7 +2360,7 @@ def _init_profile_batch(self, profile_batch): of positive integers signify a range of batches to profile. Raises: - ValueError: If profile_batch is not an integer or a comma seperated pair + ValueError: If profile_batch is not an integer or a comma separated pair of positive integers. """ diff --git a/keras/distribute/BUILD b/keras/distribute/BUILD index 765acd69f4e..9f9b80bca23 100644 --- a/keras/distribute/BUILD +++ b/keras/distribute/BUILD @@ -771,7 +771,7 @@ distribute_py_test( shard_count = 50, tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) @@ -795,7 +795,7 @@ distribute_py_test( shard_count = 21, tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) diff --git a/keras/engine/BUILD b/keras/engine/BUILD index 871b6e22186..4400959a420 100644 --- a/keras/engine/BUILD +++ b/keras/engine/BUILD @@ -516,7 +516,7 @@ tf_py_test( srcs = ["training_utils_v1_test.py"], python_version = "PY3", tags = [ - "no_oss", # TODO(b/135021748) reenable + "no_oss", # TODO(b/135021748) re-enable "notsan", ], deps = [ diff --git a/keras/engine/base_layer.py b/keras/engine/base_layer.py index 04278fb96df..b4c30d05de2 100644 --- a/keras/engine/base_layer.py +++ b/keras/engine/base_layer.py @@ -668,7 +668,7 @@ def getter(*args, **kwargs): # pylint: disable=function-redefined return autocast_variable.create_autocast_variable(variable) # Also the caching_device does not work with the mixed precision API, # disable it if it is specified. - # TODO(b/142020079): Reenable it once the bug is fixed. + # TODO(b/142020079): Re-enable it once the bug is fixed. if caching_device is not None: tf_logging.warning( '`caching_device` does not work with mixed precision API. Ignoring ' @@ -3410,7 +3410,7 @@ def _apply_name_scope_on_model_declaration(enable): class BaseRandomLayer(Layer): - """A layer handle the random nubmer creation and savemodel behavior.""" + """A layer handle the random number creation and savemodel behavior.""" @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__(self, seed=None, force_generator=False, **kwargs): @@ -3430,7 +3430,7 @@ def __init__(self, seed=None, force_generator=False, **kwargs): seed: optional integer, used to create RandomGenerator. force_generator: boolean, default to False, whether to force the RandomGenerator to use the code branch of tf.random.Generator. - **kwargs: other keyward arguements that will be passed to the parent class + **kwargs: other keyword arguments that will be passed to the parent class """ super().__init__(**kwargs) self._random_generator = backend.RandomGenerator( diff --git a/keras/engine/base_layer_v1.py b/keras/engine/base_layer_v1.py index fad3da1688c..4c075fc2320 100644 --- a/keras/engine/base_layer_v1.py +++ b/keras/engine/base_layer_v1.py @@ -413,7 +413,7 @@ def getter(*args, **kwargs): # pylint: disable=function-redefined return autocast_variable.create_autocast_variable(variable) # Also the caching_device does not work with the mixed precision API, # disable it if it is specified. - # TODO(b/142020079): Reenable it once the bug is fixed. + # TODO(b/142020079): Re-enable it once the bug is fixed. if caching_device is not None: tf_logging.warning( '`caching_device` does not work with mixed precision API. Ignoring ' diff --git a/keras/engine/data_adapter.py b/keras/engine/data_adapter.py index 29c9e98f5a0..20dcca97b9b 100644 --- a/keras/engine/data_adapter.py +++ b/keras/engine/data_adapter.py @@ -74,7 +74,7 @@ def can_handle(x, y=None): """Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there - multiple input/output, or dictionary of objects when the intput/output are + multiple input/output, or dictionary of objects when the input/output are named. Args: diff --git a/keras/engine/functional.py b/keras/engine/functional.py index 8fcf79b070e..d7924f6be09 100644 --- a/keras/engine/functional.py +++ b/keras/engine/functional.py @@ -715,7 +715,7 @@ def from_config(cls, config, custom_objects=None): return model # The config does not contain all the information necessary to revive a # Functional model. This happens when the user creates subclassed models - # with a Functional constructor and has overriden the `get_config` method + # with a Functional constructor and has overridden the `get_config` method # to return a completely new dictionary. try: return cls(**config) @@ -769,7 +769,7 @@ def _validate_graph_inputs_and_outputs(self): for x in self.inputs]) input_batch_sizes.discard(None) if len(input_batch_sizes) > 1: - logging.warning('Found incompatiable static batch sizes among the ' + logging.warning('Found incompatible static batch sizes among the ' f'inputs. Batch sizes: {sorted(input_batch_sizes)}') for x in self.outputs: diff --git a/keras/engine/functional_utils_test.py b/keras/engine/functional_utils_test.py index 802461d2903..c6a5a2ede29 100644 --- a/keras/engine/functional_utils_test.py +++ b/keras/engine/functional_utils_test.py @@ -138,7 +138,7 @@ def test_build_model_from_intermediate_tensor(self): loaded_model = models.load_model(output_path) self.assertEqual(model.summary(), loaded_model.summary()) - # Also make sure the orignal inputs and y can still be used to build model + # Also make sure the original inputs and y can still be used to build model new_model = models.Model(inputs, y) # Make sure no new node is attached to layer2 self.assertLen(layer2.inbound_nodes, 2) diff --git a/keras/engine/training_test.py b/keras/engine/training_test.py index 6993942be25..1d392d3ab2b 100644 --- a/keras/engine/training_test.py +++ b/keras/engine/training_test.py @@ -1125,7 +1125,7 @@ def call(self, inputs): training_module.Model([input1, input2], outputs) self.assertEqual( mock_warn.call_args_list[0][0][0], - 'Found incompatiable static batch sizes among the inputs. ' + 'Found incompatible static batch sizes among the inputs. ' 'Batch sizes: [2, 3]') @combinations.generate(combinations.combine(mode=['graph', 'eager'])) @@ -3511,7 +3511,7 @@ def call(self, x): @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def DISABLED_test_add_metric_invalid_aggregation(self): - # TODO(psv): Reenable test once it is fixed. + # TODO(psv): Re-enable test once it is fixed. x = layers_module.Input(shape=(1,)) y = layers_module.Dense(1, kernel_initializer='ones')(x) model = training_module.Model(x, y) diff --git a/keras/initializers/initializers_v1.py b/keras/initializers/initializers_v1.py index 18df738fe7a..cabfa7b677f 100644 --- a/keras/initializers/initializers_v1.py +++ b/keras/initializers/initializers_v1.py @@ -77,7 +77,7 @@ class RandomNormal(tf.compat.v1.random_normal_initializer): Random seed behavior: Also be aware that if you pass a seed to the TF2 initializer API it will reuse that same seed for every single initialization - (unlike the TF1 intializer) + (unlike the TF1 initializer) #### Structural Mapping to Native TF2 @@ -192,7 +192,7 @@ class RandomUniform(tf.compat.v1.random_uniform_initializer): Also be aware that if you pass a seed to the TF2 initializer API it will reuse that same seed for every single initialization - (unlike the TF1 intializer) + (unlike the TF1 initializer) #### Structural Mapping to Native TF2 @@ -313,7 +313,7 @@ class TruncatedNormal(tf.compat.v1.truncated_normal_initializer): Random seed behavior: Also be aware that if you pass a seed to the TF2 initializer API it will reuse that same seed for every single initialization - (unlike the TF1 intializer) + (unlike the TF1 initializer) #### Structural Mapping to Native TF2 diff --git a/keras/integration_test/BUILD b/keras/integration_test/BUILD index b21b1afb5d1..9747c1f7076 100644 --- a/keras/integration_test/BUILD +++ b/keras/integration_test/BUILD @@ -170,7 +170,7 @@ distribute_py_test( shard_count = 50, tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) @@ -216,7 +216,7 @@ distribute_py_test( shard_count = 50, tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) @@ -237,7 +237,7 @@ distribute_py_test( shard_count = 50, tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) @@ -258,7 +258,7 @@ distribute_py_test( shard_count = 50, tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) @@ -278,7 +278,7 @@ distribute_py_test( python_version = "PY3", tags = [ "multi_gpu", - "no_oss", # TODO(b/183640564): Reenable + "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) diff --git a/keras/integration_test/parameter_server_keras_preprocessing_test.py b/keras/integration_test/parameter_server_keras_preprocessing_test.py index 6c6e613a8f6..1f0b747b7cf 100644 --- a/keras/integration_test/parameter_server_keras_preprocessing_test.py +++ b/keras/integration_test/parameter_server_keras_preprocessing_test.py @@ -258,7 +258,7 @@ def serve_fn(raw_features): self.assertIn(prediction0, ("yes", "no")) prediction1 = loaded_serving_fn( - tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"] + tf.constant(["ironman", "ironman", "unknown"]))["output_0"] self.assertIn(prediction1, ("yes", "no")) diff --git a/keras/integration_test/preprocessing_test_utils.py b/keras/integration_test/preprocessing_test_utils.py index c84ed95457a..ace50be2416 100644 --- a/keras/integration_test/preprocessing_test_utils.py +++ b/keras/integration_test/preprocessing_test_utils.py @@ -66,7 +66,7 @@ def make_preprocessing_model(file_dir): normalization = preprocessing.Normalization() normalization.adapt(ds.map(lambda features, labels: features["float_col"])) float_out = normalization(float_in) - # Lookup ints by adapting a vocab of interger IDs. + # Lookup ints by adapting a vocab of integer IDs. int_lookup = preprocessing.IntegerLookup() int_lookup.adapt(ds.map(lambda features, labels: features["int_col"])) int_out = int_lookup(int_in) diff --git a/keras/integration_test/tpu_strategy_test.py b/keras/integration_test/tpu_strategy_test.py index 42f7ec7b8dc..ba3c828f148 100644 --- a/keras/integration_test/tpu_strategy_test.py +++ b/keras/integration_test/tpu_strategy_test.py @@ -232,7 +232,7 @@ def serve_fn(raw_features): self.assertIn(prediction1, ("yes", "no")) prediction2 = loaded_serving_fn( - tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"] + tf.constant(["ironman", "ironman", "unknown"]))["output_0"] self.assertIn(prediction2, ("yes", "no")) diff --git a/keras/layers/convolutional.py b/keras/layers/convolutional.py index 0cf0b6ba76a..01a66ab4719 100644 --- a/keras/layers/convolutional.py +++ b/keras/layers/convolutional.py @@ -195,7 +195,7 @@ def build(self, input_shape): self.filters) # compute_output_shape contains some validation logic for the input shape, - # and make sure the output shape has all positive dimentions. + # and make sure the output shape has all positive dimensions. self.compute_output_shape(input_shape) self.kernel = self.add_weight( diff --git a/keras/layers/core/core_test.py b/keras/layers/core/core_test.py index 9f75e63a03b..a8f378ab41d 100644 --- a/keras/layers/core/core_test.py +++ b/keras/layers/core/core_test.py @@ -114,7 +114,7 @@ def test_dropout_with_savemodel(self): predict2 = loaded_model(np.ones((20, 5, 10))) self.assertAllClose(predict, predict2) - # Make sure the model droput different value after loading + # Make sure the model dropout different value after loading train2 = loaded_model(np.ones((20, 5, 10)), training=True) self.assertNotAllClose(train, train2) self.assertIsNotNone(loaded_model.layers[1]._random_generator) diff --git a/keras/layers/core/lambda_layer.py b/keras/layers/core/lambda_layer.py index ea3b58e52fd..0b423073f93 100644 --- a/keras/layers/core/lambda_layer.py +++ b/keras/layers/core/lambda_layer.py @@ -215,7 +215,7 @@ def _check_variables(self, created_variables, accessed_variables): but are not tracked by said layer: {variable_str} The layer cannot safely ensure proper Variable reuse across multiple - calls, and consquently this behavior is disallowed for safety. Lambda + calls, and consequently this behavior is disallowed for safety. Lambda layers are not well suited to stateful computation; instead, writing a subclassed Layer is the recommend way to define layers with Variables.""").format( diff --git a/keras/layers/core/tf_op_layer.py b/keras/layers/core/tf_op_layer.py index 05e23f5e554..75d1612f9f7 100644 --- a/keras/layers/core/tf_op_layer.py +++ b/keras/layers/core/tf_op_layer.py @@ -279,7 +279,7 @@ def _check_variables(self, created_variables, accessed_variables): 'The following Variables were created within a Lambda layer ' f'({self.name}) but are not tracked by said layer: {variable_str}\n' 'The layer cannot safely ensure proper Variable reuse ' - 'across multiple calls, and consquently this behavior is disallowed ' + 'across multiple calls, and consequently this behavior is disallowed ' 'for safety reasons. Lambda layers are not well suited for stateful ' 'computation; instead, writing a subclassed Layer is the recommend ' 'way to define layers with Variables.') diff --git a/keras/layers/normalization/layer_normalization_test.py b/keras/layers/normalization/layer_normalization_test.py index 3537ddb6951..b24eb1d70e7 100644 --- a/keras/layers/normalization/layer_normalization_test.py +++ b/keras/layers/normalization/layer_normalization_test.py @@ -197,7 +197,7 @@ def _test_forward_pass(self, batch_input_shape, axis, fp64_tol=1e-14, Args: batch_input_shape: The input shape that will be used to test, including the batch dimension. - axis: A list of axises to normalize. Will be passed to the `axis` argument + axis: A list of axes to normalize. Will be passed to the `axis` argument of Layerlayer_normalization. fp64_tol: The relative and absolute tolerance for float64. fp32_tol: The relative and absolute tolerance for float32. @@ -255,7 +255,7 @@ def _test_backward_pass(self, batch_input_shape, axis, fp64_tol=1e-5, Args: batch_input_shape: The input shape that will be used to test, including the batch dimension. - axis: A list of axises to normalize. Will be passed to the `axis` argument + axis: A list of axes to normalize. Will be passed to the `axis` argument of Layerlayer_normalization. fp64_tol: The relative and absolute tolerance for float64. fp32_tol: The relative and absolute tolerance for float32. diff --git a/keras/layers/preprocessing/discretization.py b/keras/layers/preprocessing/discretization.py index 6beba3dea36..41cd7482902 100644 --- a/keras/layers/preprocessing/discretization.py +++ b/keras/layers/preprocessing/discretization.py @@ -218,7 +218,7 @@ def __init__(self, if "dtype" not in kwargs or kwargs["dtype"] is None: kwargs["dtype"] = tf.int64 if output_mode == INT else backend.floatx() elif output_mode == "int" and not tf.as_dtype(kwargs["dtype"]).is_integer: - # Compat for when dtype was alwyas floating and ingored by the layer. + # Compat for when dtype was always floating and ignored by the layer. kwargs["dtype"] = tf.int64 super().__init__(**kwargs) diff --git a/keras/layers/preprocessing/hashing.py b/keras/layers/preprocessing/hashing.py index 75ffb9a5f14..e7555b36d95 100644 --- a/keras/layers/preprocessing/hashing.py +++ b/keras/layers/preprocessing/hashing.py @@ -168,7 +168,7 @@ def __init__(self, if 'dtype' not in kwargs or kwargs['dtype'] is None: kwargs['dtype'] = tf.int64 if output_mode == INT else backend.floatx() elif output_mode == 'int' and not tf.as_dtype(kwargs['dtype']).is_integer: - # Compat for when dtype was alwyas floating and ingored by the layer. + # Compat for when dtype was always floating and ignored by the layer. kwargs['dtype'] = tf.int64 super().__init__(**kwargs) diff --git a/keras/layers/preprocessing/index_lookup.py b/keras/layers/preprocessing/index_lookup.py index 6b53a1779c2..f8bf6368ecd 100644 --- a/keras/layers/preprocessing/index_lookup.py +++ b/keras/layers/preprocessing/index_lookup.py @@ -268,7 +268,7 @@ def __init__(self, # value of the index_lookup table. self._default_value = self._oov_start_index() else: - # If we hav multiple OOV values, we need to do a further hashing step; + # If we have multiple OOV values, we need to do a further hashing step; # to make this easier, we set the OOV value to -1. (This lets us do a # vectorized add and cast to boolean to determine locations where we # need to do extra hashing.) @@ -293,7 +293,7 @@ def __init__(self, # to be uninitialized as a StaticHashTable cannot be initialized twice. self.lookup_table = self._uninitialized_lookup_table() - # Only set up adapt state if we did not recieve a vocab on construction. + # Only set up adapt state if we did not receive a vocab on construction. if not self._has_input_vocabulary: # Add a custom weight handler to return the layers vocab as it's weight. self._add_trackable(VocabWeightHandler(self), False) @@ -648,8 +648,8 @@ def _lookup_dense(self, inputs): """Lookup table values for a dense Tensor, handling masking and OOV.""" # When executing eagerly and tracing keras.Inputs, do not call lookup. This # is critical for restoring SavedModel, which will first trace layer.call - # and then attempt to restore the table. We need the table to be unitialized - # for the restore to work, but calling the table unitialized would error. + # and then attempt to restore the table. We need the table to be uninitialized + # for the restore to work, but calling the table uninitialized would error. if tf.executing_eagerly() and backend.is_keras_tensor(inputs): lookups = tf.zeros_like(inputs, dtype=self._value_dtype) else: diff --git a/keras/layers/preprocessing/preprocessing_stage.py b/keras/layers/preprocessing/preprocessing_stage.py index 1cfb9cb9b41..2247f13b7aa 100644 --- a/keras/layers/preprocessing/preprocessing_stage.py +++ b/keras/layers/preprocessing/preprocessing_stage.py @@ -123,7 +123,7 @@ class FunctionalPreprocessingStage(functional.Functional, Args: inputs: An input tensor (must be created via `tf.keras.Input()`), or a list, - a dict, or a nested strcture of input tensors. + a dict, or a nested structure of input tensors. outputs: An output tensor, or a list, a dict or a nested structure of output tensors. name: String, optional. Name of the preprocessing stage. diff --git a/keras/layers/wrappers.py b/keras/layers/wrappers.py index 83b7051e74a..63ce7945c9b 100644 --- a/keras/layers/wrappers.py +++ b/keras/layers/wrappers.py @@ -593,7 +593,7 @@ def __call__(self, inputs, initial_state=None, constants=None, **kwargs): additional_inputs = [] additional_specs = [] if initial_state is not None: - # Check if `initial_state` can be splitted into half + # Check if `initial_state` can be split into half num_states = len(initial_state) if num_states % 2 > 0: raise ValueError( diff --git a/keras/losses.py b/keras/losses.py index 535b669c3f4..7c4604c7582 100644 --- a/keras/losses.py +++ b/keras/losses.py @@ -1695,7 +1695,7 @@ def _ragged_tensor_categorical_crossentropy(y_true, When used by CategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor - has 2 batches with [2, 1] values respectivly the resulting loss is + has 2 batches with [2, 1] values respectively the resulting loss is the sum of the individual loss values divided by 3. """ fn = functools.partial( diff --git a/keras/metrics.py b/keras/metrics.py index 25d75974ba8..fb2fc85c6dd 100644 --- a/keras/metrics.py +++ b/keras/metrics.py @@ -2146,12 +2146,12 @@ class AUC(Metric): Usage with `compile()` API: ```python - # Reports the AUC of a model outputing a probability. + # Reports the AUC of a model outputting a probability. model.compile(optimizer='sgd', loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()]) - # Reports the AUC of a model outputing a logit. + # Reports the AUC of a model outputting a logit. model.compile(optimizer='sgd', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=[tf.keras.metrics.AUC(from_logits=True)]) diff --git a/keras/models.py b/keras/models.py index ee6fb008b1e..fa99b706ebb 100644 --- a/keras/models.py +++ b/keras/models.py @@ -457,7 +457,7 @@ def clone_model(model, input_tensors=None, clone_function=None): model, input_tensors=input_tensors, layer_fn=clone_function) -# "Clone" a subclassed model by reseting all of the attributes. +# "Clone" a subclassed model by resetting all of the attributes. def _in_place_subclassed_model_reset(model): """Substitute for model cloning that works for subclassed models. diff --git a/keras/optimizer_experimental/optimizer.py b/keras/optimizer_experimental/optimizer.py index ea8dc44e5e7..0d5f0dc255e 100644 --- a/keras/optimizer_experimental/optimizer.py +++ b/keras/optimizer_experimental/optimizer.py @@ -278,7 +278,7 @@ def apply_gradients(self, grads_and_vars): scope_name = self._name or "optimizer" with tf.name_scope(scope_name): with tf.init_scope(): - # Lift variable creation to init scope to avoid enviroment issues. + # Lift variable creation to init scope to avoid environment issues. self.build(trainable_variables) grads = self._clip_gradients(grads) grads_and_vars = list(zip(grads, trainable_variables)) diff --git a/keras/optimizer_v2/optimizer_v2.py b/keras/optimizer_v2/optimizer_v2.py index b25b74490eb..e0e0c189027 100644 --- a/keras/optimizer_v2/optimizer_v2.py +++ b/keras/optimizer_v2/optimizer_v2.py @@ -1345,7 +1345,7 @@ def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): """Returns the slot variable that should have a value restored into it. - It is up to the caller to retore the value into the slot variable if a valid + It is up to the caller to restore the value into the slot variable if a valid slot variable is returned. Called when a variable which has an associated slot variable is created or @@ -1463,7 +1463,7 @@ class RestoredOptimizer(OptimizerV2): Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the - optimizer object iself (e.g. through `apply_gradients`). + optimizer object itself (e.g. through `apply_gradients`). """ # TODO(allenl): Make the restored optimizer functional by tracing its apply # methods. diff --git a/keras/regularizers.py b/keras/regularizers.py index 355050c3e02..3e47d3c7d54 100644 --- a/keras/regularizers.py +++ b/keras/regularizers.py @@ -35,7 +35,7 @@ def _check_penalty_number(x): if math.isinf(x) or math.isnan(x): raise ValueError( f'Value: {x} is not a valid regularization penalty number, ' - 'an infinity nubmer or NaN are not valid value') + 'an infinity number or NaN are not valid value') def _none_to_default(inputs, default): diff --git a/keras/saving/saved_model/layer_serialization.py b/keras/saving/saved_model/layer_serialization.py index f70086097b2..8e4ab1c4c7d 100644 --- a/keras/saving/saved_model/layer_serialization.py +++ b/keras/saving/saved_model/layer_serialization.py @@ -170,7 +170,7 @@ class VocabularySavedModelSaver(LayerSavedModelSaver): vocab as part of the config until saving, when we need to clear it to avoid initializing a StaticHashTable twice (once when restoring the config and once when restoring restoring module resources). After clearing the vocab, we - presist a property to the layer indicating it was constructed with a vocab. + persist a property to the layer indicating it was constructed with a vocab. """ @property diff --git a/keras/saving/saved_model/load.py b/keras/saving/saved_model/load.py index 12b4c964060..87c26775760 100644 --- a/keras/saving/saved_model/load.py +++ b/keras/saving/saved_model/load.py @@ -524,7 +524,7 @@ def _revive_layer_or_model_from_config(self, metadata, node_id): generic_utils.serialize_keras_class_and_config( class_name, config, shared_object_id=shared_object_id)) except (TypeError, KeyError) as e: - # A name conflict has occured. The `class_name` is in the Keras native + # A name conflict has occurred. The `class_name` is in the Keras native # framework; however, the value in the framework is different from the # user's class definition which confuses the KerasObjectLoader. builtin_layer = layers_module.get_builtin_layer(class_name) diff --git a/keras/utils/data_utils.py b/keras/utils/data_utils.py index 72376fd7c08..647eec20b5a 100644 --- a/keras/utils/data_utils.py +++ b/keras/utils/data_utils.py @@ -584,7 +584,7 @@ class SequenceEnqueuer: enqueuer.stop() ``` - The `enqueuer.get()` should be an infinite stream of datas. + The `enqueuer.get()` should be an infinite stream of data. """ def __init__(self, sequence, diff --git a/keras/utils/generic_utils.py b/keras/utils/generic_utils.py index 7ab5bcc3db8..b3481deb40f 100644 --- a/keras/utils/generic_utils.py +++ b/keras/utils/generic_utils.py @@ -1038,7 +1038,7 @@ def _estimate_step_duration(self, current, now): if current: # there are a few special scenarios here: # 1) somebody is calling the progress bar without ever supplying step 1 - # 2) somebody is calling the progress bar and supplies step one mulitple + # 2) somebody is calling the progress bar and supplies step one multiple # times, e.g. as part of a finalizing call # in these cases, we just fall back to the simple calculation if self._time_after_first_step is not None and current > 1: diff --git a/keras/utils/kpl_test_utils.py b/keras/utils/kpl_test_utils.py index 854e276b763..30232a84227 100644 --- a/keras/utils/kpl_test_utils.py +++ b/keras/utils/kpl_test_utils.py @@ -176,5 +176,5 @@ def test_save_load_serving_model(self, model, feature_mapper, self.assertIn(prediction0.numpy().decode("UTF-8"), ("yes", "no")) prediction1 = loaded_serving_fn( - tf.constant(["ironman", "ironman", "unkonwn"]))["output_0"] + tf.constant(["ironman", "ironman", "unknown"]))["output_0"] self.assertIn(prediction1.numpy().decode("UTF-8"), ("yes", "no")) diff --git a/keras/utils/metrics_utils.py b/keras/utils/metrics_utils.py index 9e3685d2b56..42b7494ad0a 100644 --- a/keras/utils/metrics_utils.py +++ b/keras/utils/metrics_utils.py @@ -466,7 +466,7 @@ def is_evenly_distributed_thresholds(thresholds): We could leverage evenly distributed thresholds to use less memory when calculate metrcis like AUC where each individual threshold need to be - evaluted. + evaluated. Args: thresholds: A python list or tuple, or 1D numpy array whose value is ranged diff --git a/keras/utils/vis_utils.py b/keras/utils/vis_utils.py index 710375e21db..556a09fd059 100644 --- a/keras/utils/vis_utils.py +++ b/keras/utils/vis_utils.py @@ -74,7 +74,7 @@ def get_layer_index_bound_by_layer_name(model, layer_names): layer_names: unique name of layer of the model, type(str) Returns: - retun the index value of layer based on its unique name (layer_names) + return the index value of layer based on its unique name (layer_names) """ lower_index = [] upper_index = [] @@ -186,7 +186,7 @@ def model_to_dot(model, layer_range = get_layer_index_bound_by_layer_name(model, layer_range) if layer_range[0] < 0 or layer_range[1] > len(model.layers): raise ValueError('Both values in layer_range should be in range (0, ' - f'{len(model.layers)}. Recieved: {layer_range}') + f'{len(model.layers)}. Received: {layer_range}') sub_n_first_node = {} sub_n_last_node = {}