Skip to content

Commit

Permalink
Fixes to slice normalization merging of size 1
Browse files Browse the repository at this point in the history
Also add more tests.

PiperOrigin-RevId: 479662313
  • Loading branch information
ngzhian authored and xnnpack-bot committed Oct 7, 2022
1 parent 2e21482 commit 9c256f6
Show file tree
Hide file tree
Showing 5 changed files with 165 additions and 33 deletions.
1 change: 1 addition & 0 deletions BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -11447,6 +11447,7 @@ xnnpack_cc_library(
gcc_copts = xnnpack_gcc_std_copts(),
msvc_copts = xnnpack_msvc_std_copts(),
deps = [
":common",
":math",
":xnnpack_h",
],
Expand Down
60 changes: 47 additions & 13 deletions src/normalization.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@

void xnn_normalize_slice(
const size_t num_dims,
const size_t* offsets,
const size_t* sizes,
const size_t* input_shape,
size_t* normalized_offsets,
size_t* normalized_input_shape,
size_t* normalized_output_shape,
const size_t offsets[XNN_MIN_ELEMENTS(1)],
const size_t sizes[XNN_MIN_ELEMENTS(1)],
const size_t input_shape[XNN_MIN_ELEMENTS(1)],
size_t normalized_offsets[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_input_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_output_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t* num_normalized_dims)
{
*num_normalized_dims = num_dims;
Expand All @@ -27,23 +27,48 @@ void xnn_normalize_slice(
normalized_output_shape[i] = 1;
}

size_t output_dims = num_dims;
bool merge_previous_dim = false;
size_t num_sliced_dims = 0;
// First normalization pass will remove all slices of size 1, by merging it to an adjacent inner dimension.
size_t num_size_one = 0;
for (size_t i = 0; i < num_dims; i++) {
const size_t begin = offsets[num_dims - 1 - i];
const size_t offset = offsets[num_dims - 1 - i];
const size_t size = sizes[num_dims - 1 - i];
const size_t input_dim = input_shape[num_dims - 1 - i];

const bool merge_current_dim = (begin == 0 && size == input_dim) || (size == 1);
// If the innermost dimension is size 1, we can't merge it anywhere, so skip it.
if (size == 1 && i != 0) {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one] +=
offset * normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one];
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one] *= input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one] *= size;
num_size_one++;
} else {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i + num_size_one] = offset;
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i + num_size_one] = input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i + num_size_one] = size;
}
}

size_t new_num_dims = num_dims - num_size_one;
size_t output_dims = new_num_dims;
bool merge_previous_dim = false;
size_t num_sliced_dims = 0;
for (size_t i = 0; i < new_num_dims; i++) {
const size_t offset = normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i];
const size_t size = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i];
const size_t input_dim = normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i];

const bool merge_current_dim = (offset == 0 && size == input_dim) ;
if (merge_previous_dim) {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] =
begin * normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims];
offset * normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims];
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] *= input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] *= size;
output_dims -= 1;
if (!merge_current_dim) {
num_sliced_dims += 1;
}
} else {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = begin;
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = offset;
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = size;
if (!merge_current_dim) {
Expand All @@ -53,6 +78,15 @@ void xnn_normalize_slice(
}
merge_previous_dim = merge_current_dim;
}

// new_num_dims <= num_dims due to merge of size == 1, so we are left with some extra values at the front of the
// normalized values, set them to default values.
for (size_t i = 0; i < XNN_MAX_TENSOR_DIMS - output_dims; i++) {
normalized_offsets[i] = 0;
normalized_input_shape[i] = 1;
normalized_output_shape[i] = 1;
}

*num_normalized_dims = output_dims;
}

Expand Down
15 changes: 9 additions & 6 deletions src/xnnpack/normalization.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@

#include <stddef.h>

#include <xnnpack.h>
#include <xnnpack/common.h>

#ifdef __cplusplus
extern "C" {
#endif
Expand All @@ -20,12 +23,12 @@ extern "C" {
// output shape { 4, 3 } with offsets { 0, 6 }.
void xnn_normalize_slice(
const size_t num_dims,
const size_t* offsets,
const size_t* sizes,
const size_t* input_shape,
size_t* normalized_offsets,
size_t* normalized_input_shape,
size_t* normalized_output_shape,
const size_t offsets[XNN_MIN_ELEMENTS(1)],
const size_t sizes[XNN_MIN_ELEMENTS(1)],
const size_t input_shape[XNN_MIN_ELEMENTS(1)],
size_t normalized_offsets[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_input_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_output_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t* num_normalized_dims);

void xnn_normalize_transpose_permutation(
Expand Down
11 changes: 3 additions & 8 deletions test/slice-normalization-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,10 @@ class SliceNormalizationTester {
actual_normalized_output_shape.data(),
&actual_num_normalized_dims);

for (size_t i = 0; i < XNN_MAX_TENSOR_DIMS - num_dims(); i++) {
EXPECT_EQ(expected_offsets()[i], actual_normalized_offsets[i])
<< "at dimension i=" << i;
EXPECT_EQ(expected_input_shape()[i], actual_normalized_input_shape[i])
<< "at dimension i=" << i;
EXPECT_EQ(expected_output_shape()[i], actual_normalized_output_shape[i])
<< "at dimension i=" << i;
}
EXPECT_EQ(expected_num_normalized_dims_, actual_num_normalized_dims);
EXPECT_EQ(expected_offsets(), actual_normalized_offsets);
EXPECT_EQ(expected_input_shape(), actual_normalized_input_shape);
EXPECT_EQ(expected_output_shape(), actual_normalized_output_shape);
}

private:
Expand Down
111 changes: 105 additions & 6 deletions test/slice-normalization.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ TEST(SLICE_NORMALIZATION_TEST, no_normalization_3d) {
SliceNormalizationTester()
.input_shape({3, 4, 5})
.offsets({0, 1, 0})
.sizes({1, 2, 3})
.sizes({2, 2, 3})
.expected_offsets({0, 1, 0})
.expected_input_shape({3, 4, 5})
.expected_output_shape({1, 2, 3})
.expected_output_shape({2, 2, 3})
.Test();
}

Expand All @@ -59,7 +59,7 @@ TEST(SLICE_NORMALIZATION_TEST, normalize_2d) {
.offsets({1, 0})
.sizes({2, 4})
.expected_offsets({4})
.expected_input_shape({12})
.expected_input_shape({20})
.expected_output_shape({8})
.Test();
}
Expand Down Expand Up @@ -97,23 +97,122 @@ TEST(SLICE_NORMALIZATION_TEST, normalize_6d) {
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_1d_remove_size_1) {
SliceNormalizationTester()
.input_shape({3})
.offsets({1})
.sizes({1})
.expected_offsets({1})
.expected_input_shape({3})
.expected_output_shape({1})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_2d_remove_size_1) {
SliceNormalizationTester()
.input_shape({3, 4})
.offsets({1, 2})
.sizes({1, 2})
.expected_offsets({6})
.expected_input_shape({12})
.expected_output_shape({2})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_2d_remove_size_1_offset_0) {
SliceNormalizationTester()
.input_shape({3, 3})
.offsets({0, 0})
.sizes({1, 2})
.expected_offsets({0})
.expected_input_shape({9})
.expected_output_shape({2})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_2d_remove_size_1_offset_1) {
SliceNormalizationTester()
.input_shape({3, 3})
.offsets({1, 0})
.sizes({1, 3})
.expected_offsets({3})
.expected_input_shape({9})
.expected_output_shape({3})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_2d_remove_size_1_multiple) {
SliceNormalizationTester()
.input_shape({3, 4})
.offsets({1, 2})
.sizes({1, 1})
.expected_offsets({6})
.expected_input_shape({12})
.expected_output_shape({1})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_3d_remove_size_1) {
SliceNormalizationTester()
.input_shape({3, 4, 5})
.offsets({1, 1, 1})
.sizes({2, 1, 2})
.expected_offsets({1, 5})
.expected_offsets({1, 6})
.expected_input_shape({3, 20})
.expected_output_shape({2, 2})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_3d_remove_size_1_outer) {
SliceNormalizationTester()
.input_shape({3, 4, 5})
.offsets({1, 2, 1})
.sizes({1, 2, 1})
.expected_offsets({1 * 4 + 2, 1})
.expected_input_shape({12, 5})
.expected_output_shape({2, 1})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_4d_remove_size_1) {
SliceNormalizationTester()
.input_shape({3, 4, 5, 6})
.offsets({2, 1, 3, 4})
.sizes({1, 2, 1, 2})
.expected_offsets({2 * 4 + 1, 3 * 6 + 4})
.expected_input_shape({12, 30})
.expected_output_shape({2, 2})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_4d_remove_size_1_full_slice) {
SliceNormalizationTester()
.input_shape({3, 4, 5, 6})
.offsets({2, 3, 0, 4})
.sizes({1, 1, 5, 2})
.expected_offsets({2 * 4 * 5 + 3 * 5, 4})
.expected_input_shape({3 * 4 * 5, 6})
.expected_output_shape({5, 2})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_4d_remove_size_1_contiguous) {
SliceNormalizationTester()
.input_shape({3, 4, 5, 6})
.offsets({1, 2, 3, 4})
.sizes({2, 1, 1, 2})
.expected_offsets({1, 2 * 5 * 6 + 3 * 6 + 4})
.expected_input_shape({3, 120})
.expected_output_shape({2, 2})
.Test();
}

TEST(SLICE_NORMALIZATION_TEST, normalize_6d_remove_size_1) {
SliceNormalizationTester()
.input_shape({3, 4, 5, 6, 7, 8})
.offsets({1, 1, 1, 1, 1, 1})
.offsets({1, 2, 3, 4, 5, 6})
.sizes({2, 1, 1, 1, 1, 2})
.expected_offsets({1, 1680})
.expected_offsets({1, (2 * 5 * 6 * 7 * 8) + (3 * 6 * 7 * 8) + (4 * 7 * 8) + (5 * 8) + 6})
.expected_input_shape({3, 6720})
.expected_output_shape({2, 2})
.Test();
Expand Down

0 comments on commit 9c256f6

Please sign in to comment.