Skip to content

Commit

Permalink
Fix typos (tensorflow#14516)
Browse files Browse the repository at this point in the history
  • Loading branch information
taehoonlee authored and jhseu committed Nov 17, 2017
1 parent e01949c commit 62a8905
Show file tree
Hide file tree
Showing 9 changed files with 13 additions and 13 deletions.
10 changes: 5 additions & 5 deletions tensorflow/contrib/boosted_trees/lib/utils/example.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class SparseFloatFeatureColumn {
public:
void Reserve(const int32 size) {
if (!single_dimensional_) {
mutlidimensional_values.Reserve(size);
multidimensional_values.Reserve(size);
}
}

Expand All @@ -76,15 +76,15 @@ class SparseFloatFeatureColumn {
DCHECK_EQ(0, feature_idx);
single_value_ = value;
} else {
mutlidimensional_values.Add(feature_idx, value);
multidimensional_values.Add(feature_idx, value);
}
initialized_ = true;
}

void Clear() {
single_dimensional_ = false;
initialized_ = false;
mutlidimensional_values.Clear();
multidimensional_values.Clear();
}

OptionalValue<T> operator[](int feature_idx) const {
Expand All @@ -94,15 +94,15 @@ class SparseFloatFeatureColumn {
if (single_dimensional_) {
return OptionalValue<T>(single_value_);
} else {
return mutlidimensional_values[feature_idx];
return multidimensional_values[feature_idx];
}
}

private:
bool single_dimensional_;
bool initialized_;
T single_value_;
SparseMultidimensionalValues<T> mutlidimensional_values;
SparseMultidimensionalValues<T> multidimensional_values;
};

// Holds data for one example and enables lookup by feature column.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h
Original file line number Diff line number Diff line change
Expand Up @@ -1454,7 +1454,7 @@ inline int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) {
* {@link ANeuralNetworksExecution_setOutputFromMemory} and
* {@link ANeuralNetworksExecution_setOperandValue}.
*
* To build a model that can accomodate inputs of various sizes, as you may want
* To build a model that can accommodate inputs of various sizes, as you may want
* to do for a CNN, set the size of the dimensions that will vary at run time to
* 0. If you do so, provide the full dimensions when calling
* {@link ANeuralNetworksExecution_setInput} or {@link
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/testing/parse_testdata.cc
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ TfLiteStatus CheckOutputs(tflite::Interpreter* interpreter,
// invoke {
// id: xyz
// input: 1,2,1,1,1,2,3,4
// ouput: 4,5,6
// output: 4,5,6
// }
class Invoke : public Message {
public:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/testing/test_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class TestRunner {
// Run the model.
virtual void Invoke() = 0;

// Verify that the contents of all ouputs conform to the existing
// Verify that the contents of all outputs conform to the existing
// expectations. Return true if there are no expectations or they are all
// satisfied.
virtual bool CheckResults() = 0;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/toco/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ enum class AxesOrder {
// The type of the scalars in an array.
// Note that that does not by itself tell whether the values in the array are
// real (are literally interpreted as real numbers) or quantized (only acquire
// a meaning as real numbers in conjuction with QuantizationParams).
// a meaning as real numbers in conjunction with QuantizationParams).
//
// In practice though:
// float values are always real
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/grappler/costs/graph_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -634,7 +634,7 @@ Status GraphProperties::InferStatically() {

std::unordered_map<const shape_inference::Dimension*, int> dim_ids;

// Track shapes globally accross the graph.
// Track shapes globally across the graph.
SymbolicShapeManager shape_manager;
bool found_error = false;
for (const Node* const node : graph.nodes()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1146,7 +1146,7 @@ Status ArithmeticOptimizer::SimplifyArithmeticOps(
if (simplified_node != nullptr) {
nodes_to_simplify.PushBack(simplified_node);
}
// When `node` is simplifed to another node rather than in-place, the
// When `node` is simplified to another node rather than in-place, the
// consumers of `node` are already redirected to `simplified_tensor`.
// Re-push the consumers into `nodes_to_simplify` for further
// optimizations.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/dynamic_partition_op_gpu.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ class DynamicPartitionOpGPU : public AsyncOpKernel {
Tensor* partitions_out, Tensor* indices_out,
DoneCallback done) {
int32 M = std::max(N, num_partitions_);
// indices_in will be made slightly larger to accomodate
// indices_in will be made slightly larger to accommodate
// later computations.
OP_REQUIRES_OK_ASYNC(
c, c->allocate_temp(DT_INT32, TensorShape({M}), indices_in), done);
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/slice_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ void SliceUsingEigen(const Device& d, Tensor* out, const Tensor& in,
namespace functor {

// Template parameter NDIM is not neccesary here. The aim of keeping it
// is to compile struct slice seperately which minimizes the compiling time.
// is to compile struct slice separately which minimizes the compiling time.
template <typename Device, typename T, int NDIM>
struct Slice {
void operator()(const Device& d, Tensor* out, const Tensor& in,
Expand Down

0 comments on commit 62a8905

Please sign in to comment.