From 84788c61d79964b5508fba6022ee6f6a5926ccec Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 17:38:03 -0700 Subject: [PATCH 01/14] Added ArgMax Layer Conflicts: src/caffe/proto/caffe.proto --- include/caffe/vision_layers.hpp | 19 ++++++++++ src/caffe/layer_factory.cpp | 2 ++ src/caffe/layers/argmax_layer.cpp | 59 +++++++++++++++++++++++++++++++ src/caffe/proto/caffe.proto | 7 ++++ 4 files changed, 87 insertions(+) create mode 100644 src/caffe/layers/argmax_layer.cpp diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index de99bc3033f..21e3fd0126c 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -22,6 +22,25 @@ ConcatLayer Takes at least two blobs and concatenates them along either num or channel dim, outputting the result. */ +template +class ArgMaxLayer : public Layer { + public: + explicit ArgMaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + // For now ArgMax layer should not be used to compute backward operations. + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + NOT_IMPLEMENTED; + } + bool out_max_val_; +}; + template class ConcatLayer : public Layer { public: diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 2991c81f559..ae15ba5bb44 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -24,6 +24,8 @@ Layer* GetLayer(const LayerParameter& param) { switch (type) { case LayerParameter_LayerType_ACCURACY: return new AccuracyLayer(param); + case LayerParameter_LayerType_ARGMAX: + return new ArgMaxLayer(param); case LayerParameter_LayerType_BNLL: return new BNLLLayer(param); case LayerParameter_LayerType_CONCAT: diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp new file mode 100644 index 00000000000..b09fd7883e2 --- /dev/null +++ b/src/caffe/layers/argmax_layer.cpp @@ -0,0 +1,59 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::max; + +namespace caffe { + +template +void ArgMaxLayer::SetUp(const vector*>& bottom, + vector*>* top) { + CHECK_EQ(bottom.size(), 1) << "ArgMaxLayer Layer takes 1 input."; + CHECK_EQ(top->size(), 1) << "ArgMaxLayer Layer takes 1 output."; + out_max_val_ = this->layer_param_.argmax_param().out_max_val(); + // Produces max_ind and max_val + if (out_max_val_) { + (*top)[0]->Reshape(bottom[0]->num(), 2, 1, 1); + } // Produces only max_ind + else { + (*top)[0]->Reshape(bottom[0]->num(), 1, 1, 1); + } +} + +template +Dtype ArgMaxLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + for (int i = 0; i < num; ++i) { + // Accuracy + Dtype max_val = -FLT_MAX; + int max_ind = 0; + for (int j = 0; j < dim; ++j) { + if (bottom_data[i * dim + j] > max_val) { + max_val = bottom_data[i * dim + j]; + max_ind = j; + } + } + if (out_max_val_) { + top_data[i * 2] = max_ind; + top_data[i * 2 + 1] = max_val; + } + else { + top_data[i] = max_ind; + } + } + return Dtype(0); +} + +INSTANTIATE_CLASS(ArgMaxLayer); + + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index ce2f25b8db5..8d8b2144e96 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -160,6 +160,7 @@ message LayerParameter { SPLIT = 22; TANH = 23; WINDOW_DATA = 24; + ARGMAX = 30; } optional LayerType type = 5; // the layer type from the enum above @@ -186,6 +187,7 @@ message LayerParameter { optional PoolingParameter pooling_param = 19; optional PowerParameter power_param = 21; optional WindowDataParameter window_data_param = 20; + optional ArgMaxLayer argmax_param = 23; // DEPRECATED: The layer parameters specified as a V0LayerParameter. // This should never be used by any code except to upgrade to the new @@ -193,6 +195,11 @@ message LayerParameter { optional V0LayerParameter layer = 1; } +// Message that stores parameters used by ArgMaxLayer +message ArgMaxLayer { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; + // Message that stores parameters used by ConcatLayer message ConcatParameter { // Concat Layer needs to specify the dimension along the concat will happen, From cdebe7a617c83501ebeffc42239f8d89c0d71d7e Mon Sep 17 00:00:00 2001 From: Sergio Date: Thu, 15 May 2014 09:49:36 -0700 Subject: [PATCH 02/14] Added Test for ArgMax Layer --- src/caffe/test/test_argmax_layer.cpp | 113 +++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 src/caffe/test/test_argmax_layer.cpp diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp new file mode 100644 index 00000000000..c99dc23021f --- /dev/null +++ b/src/caffe/test/test_argmax_layer.cpp @@ -0,0 +1,113 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class ArgMaxLayerTest : public ::testing::Test { + protected: + ArgMaxLayerTest() + : blob_bottom_(new Blob(20, 10, 1, 1)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~ArgMaxLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(ArgMaxLayerTest, Dtypes); + + +TYPED_TEST(ArgMaxLayerTest, TestSetup) { + LayerParameter layer_param; + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->bottom_top_->num()); + EXPECT_EQ(this->blob_top_->channels(), 1); +} + +TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { + LayerParameter layer_param; + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true) + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->bottom_top_->num()); + EXPECT_EQ(this->blob_top_->channels(), 2); +} + +TYPED_TEST(ArgMaxLayerTest, TestCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(top_data[i], 0); + EXPECT_LE(top_data[i], dim); + max_ind = top_data[i]; + max_val = bottom_data[i * dim + max_ind]; + for (int j = 0; j < dim; ++j) { + EXPECT_LE(bottom_data[i * dim + j], max_val); + } + } +} + +TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true) + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(top_data[i], 0); + EXPECT_LE(top_data[i], dim); + max_ind = top_data[i * 2]; + max_val = top_data[i * 2 + 1]; + EXPECT_EQ(bottom_data[i * dim + max_ind],max_val); + for (int j = 0; j < dim; ++j) { + EXPECT_LE(bottom_data[i * dim + j], max_val); + } + } +} + +} // namespace caffe From 69dbbc2c045dcec996157875ea644c200b9d98c4 Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 17:39:52 -0700 Subject: [PATCH 03/14] Fixed numbers in proto and name of ArgMaxParameter Conflicts: src/caffe/proto/caffe.proto --- src/caffe/proto/caffe.proto | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 8d8b2144e96..b5bc26bb744 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -187,7 +187,8 @@ message LayerParameter { optional PoolingParameter pooling_param = 19; optional PowerParameter power_param = 21; optional WindowDataParameter window_data_param = 20; - optional ArgMaxLayer argmax_param = 23; + optional ArgMaxParameter argmax_param = 23; + // DEPRECATED: The layer parameters specified as a V0LayerParameter. // This should never be used by any code except to upgrade to the new @@ -196,9 +197,10 @@ message LayerParameter { } // Message that stores parameters used by ArgMaxLayer -message ArgMaxLayer { +message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; +} // Message that stores parameters used by ConcatLayer message ConcatParameter { From bdcd75e48746be0aab0d4c8b606eeb2aac6c9ae8 Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 17:42:38 -0700 Subject: [PATCH 04/14] Fix types of ArgMax Layers params Conflicts: include/caffe/vision_layers.hpp src/caffe/proto/caffe.proto --- src/caffe/proto/caffe.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index b5bc26bb744..d73357cb944 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -189,7 +189,6 @@ message LayerParameter { optional WindowDataParameter window_data_param = 20; optional ArgMaxParameter argmax_param = 23; - // DEPRECATED: The layer parameters specified as a V0LayerParameter. // This should never be used by any code except to upgrade to the new // LayerParameter specification. @@ -197,6 +196,7 @@ message LayerParameter { } // Message that stores parameters used by ArgMaxLayer + message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; From d19c18060134b7b409291cf7c9ffe815b1a9b0df Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 16:09:07 -0700 Subject: [PATCH 05/14] Added FLT_MAX to argmax layer --- src/caffe/layers/argmax_layer.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index b09fd7883e2..33ec1d34089 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -1,12 +1,11 @@ // Copyright 2014 BVLC and contributors. -#include #include +#include #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -using std::max; namespace caffe { From aa57dfb84c763d20d46f449290b3d00a7870dc9d Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 16:43:01 -0700 Subject: [PATCH 06/14] Added missing ; --- src/caffe/test/test_argmax_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index c99dc23021f..bc3a8ecd1e0 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -52,7 +52,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetup) { TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { LayerParameter layer_param; ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); - argmax_param->set_out_max_val(true) + argmax_param->set_out_max_val(true); ThresholdLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), this->bottom_top_->num()); @@ -87,7 +87,7 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { LayerParameter layer_param; Caffe::set_mode(Caffe::CPU); ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); - argmax_param->set_out_max_val(true) + argmax_param->set_out_max_val(true); ArgMaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); From d6748cb994e6e2fa082569cc10299f77e29aa60c Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 16:54:21 -0700 Subject: [PATCH 07/14] Fixed name of ArgMaxLayerParameter --- src/caffe/test/test_argmax_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index bc3a8ecd1e0..b30e7edbf66 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -51,7 +51,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetup) { TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { LayerParameter layer_param; - ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + ArgMaxLayerParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); ThresholdLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -86,7 +86,7 @@ TYPED_TEST(ArgMaxLayerTest, TestCPU) { TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { LayerParameter layer_param; Caffe::set_mode(Caffe::CPU); - ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + ArgMaxLayerParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); ArgMaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); From 91ab1f684c8ea03cb1bd0ffe223937dc9e5381cd Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 16:55:45 -0700 Subject: [PATCH 08/14] Fixed name of blob_bottom_ --- src/caffe/test/test_argmax_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index b30e7edbf66..0174e8988e1 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -45,7 +45,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetup) { LayerParameter layer_param; ThresholdLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - EXPECT_EQ(this->blob_top_->num(), this->bottom_top_->num()); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), 1); } @@ -55,7 +55,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { argmax_param->set_out_max_val(true); ThresholdLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - EXPECT_EQ(this->blob_top_->num(), this->bottom_top_->num()); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), 2); } From 766dd362db857171de45299fa75ecb8799f98289 Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 18:01:04 -0700 Subject: [PATCH 09/14] Change ThresholdLayer to ArgMaxLayer in test_argmax --- src/caffe/test/test_argmax_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 0174e8988e1..f0754f0c1d2 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -43,7 +43,7 @@ TYPED_TEST_CASE(ArgMaxLayerTest, Dtypes); TYPED_TEST(ArgMaxLayerTest, TestSetup) { LayerParameter layer_param; - ThresholdLayer layer(layer_param); + ArgMaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), 1); @@ -53,7 +53,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { LayerParameter layer_param; ArgMaxLayerParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); - ThresholdLayer layer(layer_param); + ArgMaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), 2); From f6c2d933c7c281379fd5bab5131a65856a54dec0 Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 15 May 2014 18:02:08 -0700 Subject: [PATCH 10/14] Change ArgMaxLayerParam to ArgMaxParam for consitency --- src/caffe/test/test_argmax_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index f0754f0c1d2..627dd575904 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -51,7 +51,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetup) { TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { LayerParameter layer_param; - ArgMaxLayerParameter* argmax_param = layer_param.mutable_argmax_param(); + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); ArgMaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -86,7 +86,7 @@ TYPED_TEST(ArgMaxLayerTest, TestCPU) { TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { LayerParameter layer_param; Caffe::set_mode(Caffe::CPU); - ArgMaxLayerParameter* argmax_param = layer_param.mutable_argmax_param(); + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); ArgMaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); From a3fbe2d6669756c727a17bb082d3319d00b451f5 Mon Sep 17 00:00:00 2001 From: Sergey Karayev Date: Tue, 20 May 2014 21:24:03 -0700 Subject: [PATCH 11/14] corrected the caffe.proto ids --- src/caffe/proto/caffe.proto | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index d73357cb944..2d9a1aa8519 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -112,26 +112,29 @@ message SolverState { repeated BlobProto history = 3; // The history for sgd solvers } +// NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available ID: 23 (last added: memory_data_param) +// LayerParameter next available ID: 24 (last added: argmax_param) message LayerParameter { repeated string bottom = 2; // the name of the bottom blobs repeated string top = 3; // the name of the top blobs optional string name = 4; // the layer name + // NOTE // Add new LayerTypes to the enum below in lexicographical order (other than // starting with NONE), starting with the next available ID in the comment // line above the enum. Update the next available ID when you add a new // LayerType. // - // LayerType next available ID: 30 (last added: MEMORY_DATA) + // LayerType next available ID: 31 (last added: ARGMAX) enum LayerType { // "NONE" layer type is 0th enum element so that we don't cause confusion // by defaulting to an existent LayerType (instead, should usually error if // the type is unspecified). NONE = 0; ACCURACY = 1; + ARGMAX = 30; BNLL = 2; CONCAT = 3; CONVOLUTION = 4; @@ -160,7 +163,6 @@ message LayerParameter { SPLIT = 22; TANH = 23; WINDOW_DATA = 24; - ARGMAX = 30; } optional LayerType type = 5; // the layer type from the enum above @@ -173,6 +175,7 @@ message LayerParameter { repeated float weight_decay = 8; // Parameters for particular layer types. + optional ArgMaxParameter argmax_param = 23; optional ConcatParameter concat_param = 9; optional ConvolutionParameter convolution_param = 10; optional DataParameter data_param = 11; @@ -187,7 +190,6 @@ message LayerParameter { optional PoolingParameter pooling_param = 19; optional PowerParameter power_param = 21; optional WindowDataParameter window_data_param = 20; - optional ArgMaxParameter argmax_param = 23; // DEPRECATED: The layer parameters specified as a V0LayerParameter. // This should never be used by any code except to upgrade to the new @@ -196,10 +198,9 @@ message LayerParameter { } // Message that stores parameters used by ArgMaxLayer - message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; } // Message that stores parameters used by ConcatLayer From a31dc6537e79f51ff6ad6f9d863af0d8ece8ee7a Mon Sep 17 00:00:00 2001 From: Sergey Karayev Date: Tue, 20 May 2014 21:32:07 -0700 Subject: [PATCH 12/14] Documented ArgMax layer in vision_layers.hpp --- include/caffe/vision_layers.hpp | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 21e3fd0126c..29daf09f656 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -17,10 +17,14 @@ namespace caffe { -/* -ConcatLayer - Takes at least two blobs and concatenates them along either num or - channel dim, outputting the result. +/* ArgmaxLayer + Compute the index of the max value across all (channels x height x width). + [In the future, can take specific dimension.] + Intended for use after a classification layer to produce prediction. + If parameter out_max_val is set to true, then output is a vector of pairs + (max_ind, max_val) for each image. + + NOTE: does not implement Backwards operation. */ template class ArgMaxLayer : public Layer { @@ -33,7 +37,6 @@ class ArgMaxLayer : public Layer { protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); - // For now ArgMax layer should not be used to compute backward operations. virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { NOT_IMPLEMENTED; @@ -41,6 +44,10 @@ class ArgMaxLayer : public Layer { bool out_max_val_; }; +/* ConcatLayer + Takes at least two blobs and concatenates them along either num or + channel dim, outputting the result. +*/ template class ConcatLayer : public Layer { public: @@ -126,6 +133,8 @@ class EltwiseProductLayer : public Layer { const bool propagate_down, vector*>* bottom); }; +/* FlattenLayer +*/ template class FlattenLayer : public Layer { public: @@ -308,6 +317,8 @@ class MemoryDataLayer : public Layer { int pos_; }; +/* PoolingLayer +*/ template class PoolingLayer : public Layer { public: From 0033f9c607f7f5079f38e569b7949e3073d2fc01 Mon Sep 17 00:00:00 2001 From: Sergey Karayev Date: Tue, 20 May 2014 21:32:19 -0700 Subject: [PATCH 13/14] Fixed lint errors due to ArgmaxLayer --- src/caffe/layers/argmax_layer.cpp | 11 +++++------ src/caffe/test/test_argmax_layer.cpp | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index 33ec1d34089..e04588d4baa 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -15,11 +15,11 @@ void ArgMaxLayer::SetUp(const vector*>& bottom, CHECK_EQ(bottom.size(), 1) << "ArgMaxLayer Layer takes 1 input."; CHECK_EQ(top->size(), 1) << "ArgMaxLayer Layer takes 1 output."; out_max_val_ = this->layer_param_.argmax_param().out_max_val(); - // Produces max_ind and max_val - if (out_max_val_) { + if (out_max_val_) { + // Produces max_ind and max_val (*top)[0]->Reshape(bottom[0]->num(), 2, 1, 1); - } // Produces only max_ind - else { + } else { + // Produces only max_ind (*top)[0]->Reshape(bottom[0]->num(), 1, 1, 1); } } @@ -44,8 +44,7 @@ Dtype ArgMaxLayer::Forward_cpu(const vector*>& bottom, if (out_max_val_) { top_data[i * 2] = max_ind; top_data[i * 2 + 1] = max_val; - } - else { + } else { top_data[i] = max_ind; } } diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 627dd575904..c4150e5abe0 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -103,7 +103,7 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { EXPECT_LE(top_data[i], dim); max_ind = top_data[i * 2]; max_val = top_data[i * 2 + 1]; - EXPECT_EQ(bottom_data[i * dim + max_ind],max_val); + EXPECT_EQ(bottom_data[i * dim + max_ind], max_val); for (int j = 0; j < dim; ++j) { EXPECT_LE(bottom_data[i * dim + j], max_val); } From 4d52ca7df5283bea4ee0abf95039538387d007c0 Mon Sep 17 00:00:00 2001 From: Sergey Karayev Date: Tue, 20 May 2014 21:48:23 -0700 Subject: [PATCH 14/14] setting canonical random seed --- src/caffe/test/test_argmax_layer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index c4150e5abe0..ab2d75b322c 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -22,7 +22,7 @@ class ArgMaxLayerTest : public ::testing::Test { ArgMaxLayerTest() : blob_bottom_(new Blob(20, 10, 1, 1)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(this->seed_); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param);