diff --git a/include/caffe/layers/clip_layer.hpp b/include/caffe/layers/clip_layer.hpp new file mode 100644 index 00000000000..2788193e3ec --- /dev/null +++ b/include/caffe/layers/clip_layer.hpp @@ -0,0 +1,75 @@ +#ifndef CAFFE_CLIP_LAYER_HPP_ +#define CAFFE_CLIP_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/neuron_layer.hpp" + +namespace caffe { + +/** + * @brief Clip: @f$ y = \max(min, \min(max, x)) @f$. + */ +template +class ClipLayer : public NeuronLayer { + public: + /** + * @param param provides ClipParameter clip_param, + * with ClipLayer options: + * - min + * - max + */ + explicit ClipLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline const char* type() const { return "Clip"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = \max(min, \min(max, x)) + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the clipped inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = \left\{ + * \begin{array}{lr} + * 0 & \mathrm{if} \; x < min \vee x > max \\ + * \frac{\partial E}{\partial y} & \mathrm{if} \; x \ge min \wedge x \le max + * \end{array} \right. + * @f$ + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +} // namespace caffe + +#endif // CAFFE_CLIP_LAYER_HPP_ diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 9f9026b1dde..d9984431ace 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -7,6 +7,7 @@ #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" +#include "caffe/layers/clip_layer.hpp" #include "caffe/layers/conv_layer.hpp" #include "caffe/layers/deconv_layer.hpp" #include "caffe/layers/lrn_layer.hpp" diff --git a/src/caffe/layers/clip_layer.cpp b/src/caffe/layers/clip_layer.cpp new file mode 100644 index 00000000000..76387011fa3 --- /dev/null +++ b/src/caffe/layers/clip_layer.cpp @@ -0,0 +1,50 @@ +#include +#include +#include "caffe/layers/clip_layer.hpp" + +namespace caffe { + +template +void ClipLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + + Dtype min = this->layer_param_.clip_param().min(); + Dtype max = this->layer_param_.clip_param().max(); + + for (int i = 0; i < count; ++i) { + top_data[i] = std::max(min, std::min(bottom_data[i], max)); + } +} + +template +void ClipLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + + Dtype min = this->layer_param_.clip_param().min(); + Dtype max = this->layer_param_.clip_param().max(); + + for (int i = 0; i < count; ++i) { + bottom_diff[i] = top_diff[i] * ( + bottom_data[i] >= min && bottom_data[i] <= max); + } + } +} + + +#ifdef CPU_ONLY +STUB_GPU(ClipLayer); +#endif + +INSTANTIATE_CLASS(ClipLayer); +REGISTER_LAYER_CLASS(Clip); + +} // namespace caffe diff --git a/src/caffe/layers/clip_layer.cu b/src/caffe/layers/clip_layer.cu new file mode 100644 index 00000000000..f780447fbcf --- /dev/null +++ b/src/caffe/layers/clip_layer.cu @@ -0,0 +1,66 @@ +#include +#include "caffe/layers/clip_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +__global__ void ClipForward(const int n, const float* in, float* out, + float p_min, float p_max) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = fmaxf(p_min, fminf(in[index], p_max)); + } +} + +__global__ void ClipForward(const int n, const double* in, double* out, + double p_min, double p_max) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = fmax(p_min, fmin(in[index], p_max)); + } +} + +template +void ClipLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + Dtype p_min = this->layer_param_.clip_param().min(); + Dtype p_max = this->layer_param_.clip_param().max(); + // NOLINT_NEXT_LINE(whitespace/operators) + ClipForward<<>>( + count, bottom_data, top_data, p_min, p_max); + CUDA_POST_KERNEL_CHECK; +} + +template +__global__ void ClipBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff, Dtype p_min, Dtype p_max) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * ( + in_data[index] >= p_min && in_data[index] <= p_max); + } +} + +template +void ClipLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + Dtype p_min = this->layer_param_.clip_param().min(); + Dtype p_max = this->layer_param_.clip_param().max(); + // NOLINT_NEXT_LINE(whitespace/operators) + ClipBackward<<>>( + count, top_diff, bottom_data, bottom_diff, p_min, p_max); + CUDA_POST_KERNEL_CHECK; + } +} + + +INSTANTIATE_LAYER_GPU_FUNCS(ClipLayer); + + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index f784aa9600c..5c235c6f87c 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -322,7 +322,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 148 (last added: swish_param) +// LayerParameter next available layer-specific ID: 149 (last added: clip_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -378,6 +378,7 @@ message LayerParameter { optional ArgMaxParameter argmax_param = 103; optional BatchNormParameter batch_norm_param = 139; optional BiasParameter bias_param = 141; + optional ClipParameter clip_param = 148; optional ConcatParameter concat_param = 104; optional ContrastiveLossParameter contrastive_loss_param = 105; optional ConvolutionParameter convolution_param = 106; @@ -505,6 +506,12 @@ message ArgMaxParameter { optional int32 axis = 3; } +// Message that stores parameters used by ClipLayer +message ClipParameter { + required float min = 1; + required float max = 2; +} + message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 83d80fcd895..5865e08e552 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -10,6 +10,7 @@ #include "caffe/layers/absval_layer.hpp" #include "caffe/layers/bnll_layer.hpp" +#include "caffe/layers/clip_layer.hpp" #include "caffe/layers/dropout_layer.hpp" #include "caffe/layers/elu_layer.hpp" #include "caffe/layers/exp_layer.hpp" @@ -206,6 +207,38 @@ TYPED_TEST(NeuronLayerTest, TestAbsGradient) { this->blob_top_vec_); } +TYPED_TEST(NeuronLayerTest, TestClip) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "clip_param { min: -1, max: 2 }", &layer_param)); + ClipLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], -1); + EXPECT_LE(top_data[i], 2); + EXPECT_TRUE(bottom_data[i] > -1 || top_data[i] == -1); + EXPECT_TRUE(bottom_data[i] < 2 || top_data[i] == 2); + EXPECT_TRUE(!(bottom_data[i] >= -1 && bottom_data[i] <= 2) + || top_data[i] == bottom_data[i]); + } +} + +TYPED_TEST(NeuronLayerTest, TestClipGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "clip_param { min: -1, max: 2 }", &layer_param)); + ClipLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + TYPED_TEST(NeuronLayerTest, TestReLU) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param;