Skip to content

Commit

Permalink
Merge pull request BVLC#2972 from jeffdonahue/concat-backward-fix
Browse files Browse the repository at this point in the history
[fix] properly backprop through ConcatLayer with propagate_down set
shelhamer committed Aug 25, 2015
2 parents f1cc905 + 6a7d4d6 commit c54f2c4
Showing 3 changed files with 25 additions and 14 deletions.
13 changes: 7 additions & 6 deletions src/caffe/layers/concat_layer.cpp
Original file line number Diff line number Diff line change
@@ -76,13 +76,14 @@ void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
for (int i = 0; i < bottom.size(); ++i) {
if (!propagate_down[i]) { continue; }
Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
for (int n = 0; n < num_concats_; ++n) {
caffe_copy(bottom_concat_axis * concat_input_size_, top_diff +
(n * top_concat_axis + offset_concat_axis) * concat_input_size_,
bottom_diff + n * bottom_concat_axis * concat_input_size_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
for (int n = 0; n < num_concats_; ++n) {
caffe_copy(bottom_concat_axis * concat_input_size_, top_diff +
(n * top_concat_axis + offset_concat_axis) * concat_input_size_,
bottom_diff + n * bottom_concat_axis * concat_input_size_);
}
}
offset_concat_axis += bottom_concat_axis;
}
17 changes: 9 additions & 8 deletions src/caffe/layers/concat_layer.cu
Original file line number Diff line number Diff line change
@@ -53,15 +53,16 @@ void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
if (!propagate_down[i]) { continue; }
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff);
}
offset_concat_axis += bottom_concat_axis;
}
}
9 changes: 9 additions & 0 deletions src/caffe/test/test_concat_layer.cpp
Original file line number Diff line number Diff line change
@@ -173,4 +173,13 @@ TYPED_TEST(ConcatLayerTest, TestGradientChannels) {
this->blob_top_vec_);
}

TYPED_TEST(ConcatLayerTest, TestGradientChannelsBottomOneOnly) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConcatLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradient(&layer, this->blob_bottom_vec_0_,
this->blob_top_vec_, 1);
}

} // namespace caffe

0 comments on commit c54f2c4

Please sign in to comment.