Skip to content

Commit

Permalink
Renaming size() to numel() - 2/2
Browse files Browse the repository at this point in the history
Summary:
Codemod generated with clangr shard mode, 50 files per diff,
clangr code(size->numel): diffusion/FBS/browse/master/fbcode/caffe2/caffe2/fb/codemods/TensorMethodRename.cpp

i-am-not-moving-c2-to-c10

Reviewed By: ezyang

Differential Revision: D12833748

fbshipit-source-id: 98dc2d3abc23c177c2c9e457b81499952d4b690c
  • Loading branch information
jerryzh168 authored and facebook-github-bot committed Oct 30, 2018
1 parent c82e8bf commit 91e87c0
Show file tree
Hide file tree
Showing 22 changed files with 205 additions and 205 deletions.
8 changes: 4 additions & 4 deletions caffe2/python/pybind_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class TensorFetcher : public BlobFetcherBase {
FetchedBlob FetchTensor(const Tensor& tensor, bool force_copy) {
#ifdef USE_NUMPY
FetchedBlob result;
CAFFE_ENFORCE_GE(tensor.size(), 0, "Trying to fetch uninitialized tensor");
CAFFE_ENFORCE_GE(tensor.numel(), 0, "Trying to fetch uninitialized tensor");
const int numpy_type = CaffeToNumpyType(tensor.meta());
CAFFE_ENFORCE(
numpy_type != -1,
Expand All @@ -147,7 +147,7 @@ class TensorFetcher : public BlobFetcherBase {
if (numpy_type == NPY_OBJECT) {
PyObject** outObj = reinterpret_cast<PyObject**>(outPtr);
auto* str = tensor.template data<std::string>();
for (int i = 0; i < tensor.size(); ++i) {
for (int i = 0; i < tensor.numel(); ++i) {
outObj[i] = PyBytes_FromStringAndSize(str->data(), str->size());
str++;
// cleanup on failure
Expand Down Expand Up @@ -207,7 +207,7 @@ class TensorFeeder : public BlobFeederBase {
case NPY_OBJECT: {
PyObject** input = reinterpret_cast<PyObject**>(PyArray_DATA(array));
auto* outPtr = tensor->template mutable_data<std::string>();
for (int i = 0; i < tensor->size(); ++i) {
for (int i = 0; i < tensor->numel(); ++i) {
char* str;
Py_ssize_t strSize;
#if PY_MAJOR_VERSION > 2
Expand Down Expand Up @@ -240,7 +240,7 @@ class TensorFeeder : public BlobFeederBase {
break;
default:
context.CopyBytesFromCPU(
tensor->size() * meta.itemsize(),
tensor->numel() * meta.itemsize(),
static_cast<void*>(PyArray_DATA(array)),
tensor->raw_mutable_data(meta));
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/pybind_state_dlpack.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class DLPackWrapper {
tensor_context.device_type = *device_type_ptr;
tensor_context.device_id = device_option.device_id();

if (tensor->size() <= 0) {
if (tensor->numel() <= 0) {
tensor->Resize(0);
}
if (tensor->meta().id() == TypeIdentifier::uninitialized()) {
Expand Down
4 changes: 2 additions & 2 deletions caffe2/queue/queue_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ class SafeDequeueBlobsOp final : public Operator<Context> {
if (i == 0) {
out->CopyFrom(in);
} else {
auto oldSize = out->size();
auto oldSize = out->numel();

CAFFE_ENFORCE(
in.ndim() > 0,
Expand All @@ -164,7 +164,7 @@ class SafeDequeueBlobsOp final : public Operator<Context> {
auto* dst =
(char*)out->raw_mutable_data() + oldSize * in.meta().itemsize();
context_.template CopyItems<Context, Context>(
in.meta(), in.size(), in.raw_data(), dst);
in.meta(), in.numel(), in.raw_data(), dst);
}
}
}
Expand Down
6 changes: 3 additions & 3 deletions caffe2/queue/rebatching_queue.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,19 +46,19 @@ void concat(
}

// Skip empty tensors
if (input.size() == 0) {
if (input.numel() == 0) {
continue;
}

context.CopyItemsToCPU(
input.meta(),
input.size(),
input.numel(),
input.raw_data() /* src */,
destinations[j] /* dst */
);

destinations[j] =
(char*)destinations[j] + input.size() * input.itemsize();
(char*)destinations[j] + input.numel() * input.itemsize();
}
}
}
Expand Down
22 changes: 11 additions & 11 deletions caffe2/sgd/adadelta_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ class AdadeltaOp final : public Operator<Context> {
OP_SINGLE_ARG(float, "decay", decay_, 0.95f) {}

bool RunOnDevice() override {
CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_GRAD).size());
CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_DELTA).size());
CAFFE_ENFORCE(Input(GRAD).size() == Input(PARAM).size());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENT_GRAD).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENT_DELTA).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(PARAM).numel());
CAFFE_ENFORCE_GE(epsilon_, 0.0f);
CAFFE_ENFORCE_GT(decay_, 0.0f);
CAFFE_ENFORCE_LT(decay_, 1.0f);
Expand All @@ -51,7 +51,7 @@ class AdadeltaOp final : public Operator<Context> {
Output(OUTPUT_MOMENT_GRAD)->ResizeLike(Input(MOMENT_GRAD));
Output(OUTPUT_MOMENT_DELTA)->ResizeLike(Input(MOMENT_DELTA));
AdadeltaUpdate<Context>(
Input(GRAD).size(),
Input(GRAD).numel(),
Input(PARAM).template data<float>(),
Input(GRAD).template data<float>(),
Input(MOMENT_GRAD).template data<float>(),
Expand Down Expand Up @@ -84,9 +84,9 @@ class SparseAdadeltaOp final : public Operator<Context> {

bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_GRAD).size());
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_DELTA).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENT_GRAD).numel());
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENT_DELTA).numel());
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
Expand Down Expand Up @@ -114,12 +114,12 @@ class SparseAdadeltaOp final : public Operator<Context> {
auto* momentDeltaOut =
Output(OUTPUT_MOMENT_DELTA)->template mutable_data<float>();

auto n = Input(INDICES).size();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}

auto block_size = Input(GRAD).size() / n;
auto block_size = Input(GRAD).numel() / n;
for (int i = 0; i < n; ++i) {
auto idx = indices[i];
if (block_size == 1) {
Expand All @@ -136,7 +136,7 @@ class SparseAdadeltaOp final : public Operator<Context> {

#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).size(),
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
Expand All @@ -146,7 +146,7 @@ class SparseAdadeltaOp final : public Operator<Context> {
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).size(),
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
Expand Down
44 changes: 22 additions & 22 deletions caffe2/sgd/adagrad_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,23 +80,23 @@ class AdagradOp final : public Operator<Context> {

bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(
Input(GRAD).size(),
Input(MOMENT_1).size(),
Input(GRAD).numel(),
Input(MOMENT_1).numel(),
"PARAM size: ",
Input(PARAM).size(),
Input(PARAM).numel(),
", GRAD size: ",
Input(GRAD).size(),
Input(GRAD).numel(),
", MOMENT_1 size: ",
Input(MOMENT_1).size(),
Input(MOMENT_1).numel(),
", LR size: ",
Input(LR).size());
Input(LR).numel());

CAFFE_ENFORCE_EQ(Input(GRAD).size(), Input(PARAM).size());
CAFFE_ENFORCE_EQ(Input(GRAD).numel(), Input(PARAM).numel());
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
if (OutputSize() == 2) {
adagrad_update<Context>(
Input(GRAD).size(),
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(MOMENT_1).template data<T>(),
Expand All @@ -109,7 +109,7 @@ class AdagradOp final : public Operator<Context> {
} else if (OutputSize() == 3) {
Output(OUTPUT_EFFECTIVE_LR)->ResizeLike(Input(GRAD));
adagrad_update_output_effective_lr<Context>(
Input(GRAD).size(),
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(MOMENT_1).template data<T>(),
Expand All @@ -124,7 +124,7 @@ class AdagradOp final : public Operator<Context> {
Output(OUTPUT_EFFECTIVE_LR)->ResizeLike(Input(GRAD));
Output(OUTPUT_UPDATE)->ResizeLike(Input(GRAD));
adagrad_update_output_effective_lr_and_update<Context>(
Input(GRAD).size(),
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(MOMENT_1).template data<T>(),
Expand Down Expand Up @@ -162,8 +162,8 @@ class SparseAdagradOp final : public Operator<Context> {

bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENT_1).numel());
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
Expand All @@ -182,12 +182,12 @@ class SparseAdagradOp final : public Operator<Context> {
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();

auto n = Input(INDICES).size();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}

auto block_size = Input(GRAD).size() / n;
auto block_size = Input(GRAD).numel() / n;
for (auto i = 0; i < n; ++i) {
auto idx = indices[i];
if (block_size == 1) {
Expand All @@ -200,7 +200,7 @@ class SparseAdagradOp final : public Operator<Context> {

#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).size(),
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
Expand All @@ -210,7 +210,7 @@ class SparseAdagradOp final : public Operator<Context> {
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).size(),
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
Expand Down Expand Up @@ -250,8 +250,8 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {

bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).sizes()[0], Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(Input(PARAM).sizes()[0], Input(MOMENT_1).numel());
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
Expand All @@ -270,12 +270,12 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();

auto n = Input(INDICES).size();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}

auto block_size = Input(GRAD).size() / n;
auto block_size = Input(GRAD).numel() / n;

for (auto i = 0; i < n; ++i) {
auto idx = indices[i];
Expand All @@ -289,7 +289,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {

#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).size(),
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
Expand All @@ -299,7 +299,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).size(),
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
Expand Down
Loading

0 comments on commit 91e87c0

Please sign in to comment.