Skip to content

Commit

Permalink
Remove the canonicalize pattern for folding a pad op into the followi…
Browse files Browse the repository at this point in the history
…ng conv op.

Basically rolledback for cl/305641881, the pattern could hurt performance
because the operation can't be fully tiled in Linalg transformation. In this
context, not everyone wants this pattern, so remove it from canonicalize
patterns.

PiperOrigin-RevId: 317302072
Change-Id: I19aa64e14eecccfd738ad3f775f3670974bc68f9
  • Loading branch information
hanhanW authored and tensorflower-gardener committed Jun 19, 2020
1 parent 968ea60 commit 0c7e61d
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 174 deletions.
56 changes: 0 additions & 56 deletions tensorflow/compiler/mlir/xla/ir/hlo_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,53 +106,6 @@ DenseIntElementsAttr BuildSliceLimits(DenseIntElementsAttr start_indices,
return GetI64ElementsAttr(slice_limits, builder);
}

// Returns the padding value of the given position. If padding_attr is a
// nullptr, returns 0.
static int64_t GetPaddingValue(DenseIntElementsAttr padding_attr,
ArrayRef<uint64_t> index) {
if (!padding_attr) return 0;
return padding_attr.getValue<int64_t>(index);
}

static bool IsOnlyPaddingSpatialDims(Value lhs,
ConvDimensionNumbers dimension_numbers,
DenseIntElementsAttr edge_padding_low,
DenseIntElementsAttr edge_padding_high) {
const int64_t batch_dim = dimension_numbers.input_batch_dimension().getInt();
const int64_t feature_dim =
dimension_numbers.input_feature_dimension().getInt();
if (edge_padding_low.getValue<int64_t>(batch_dim) ||
edge_padding_high.getValue<int64_t>(batch_dim))
return false;
if (edge_padding_low.getValue<int64_t>(feature_dim) ||
edge_padding_high.getValue<int64_t>(feature_dim))
return false;
return true;
}

DenseIntElementsAttr BuildConvPaddingAttrs(
DenseIntElementsAttr edge_padding_low,
DenseIntElementsAttr edge_padding_high, DenseIntElementsAttr padding_attr,
ConvDimensionNumbers dimension_numbers, Builder* builder) {
SmallVector<int64_t, 4> padding_low, padding_high;
for (const auto& dim : dimension_numbers.input_spatial_dimensions()) {
unsigned i = dim.getZExtValue();
padding_low.push_back(edge_padding_low.getValue<int64_t>(i));
padding_high.push_back(edge_padding_high.getValue<int64_t>(i));
}

int rank = padding_low.size();
SmallVector<int64_t, 8> padding;
for (unsigned i = 0, e = rank; i < e; ++i) {
padding.push_back(GetPaddingValue(padding_attr, {i, 0}) + padding_low[i]);
padding.push_back(GetPaddingValue(padding_attr, {i, 1}) + padding_high[i]);
}
// padding_attr.getType() doesn't work because it is an optional attribute,
// which can be a nullptr.
auto type = RankedTensorType::get({rank, 2}, builder->getIntegerType(64));
return DenseIntElementsAttr::get(type, padding);
}

#include "tensorflow/compiler/mlir/xla/transforms/generated_canonicalize.inc"
} // namespace

Expand Down Expand Up @@ -2153,14 +2106,5 @@ LogicalResult deriveShapeFromFirstOperand(
return success();
}

//===----------------------------------------------------------------------===//
// ConvOp
//===----------------------------------------------------------------------===//

void ConvOp::getCanonicalizationPatterns(OwningRewritePatternList& results,
MLIRContext* context) {
results.insert<FoldPadIntoConv>(context);
}

} // namespace xla_hlo
} // namespace mlir
2 changes: 0 additions & 2 deletions tensorflow/compiler/mlir/xla/ir/hlo_ops.td
Original file line number Diff line number Diff line change
Expand Up @@ -929,8 +929,6 @@ def HLO_ConvOp : HLO_Op<"convolution", [NoSideEffect]>, BASE_HLO_ConvOp {
);

let results = (outs HLO_Tensor);

let hasCanonicalizer = 1;
}

def HLO_CopyOp: HLO_Op<"copy", [NoSideEffect, SameOperandsAndResultType]>, BASE_HLO_CopyOp {
Expand Down
65 changes: 0 additions & 65 deletions tensorflow/compiler/mlir/xla/tests/canonicalize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -415,71 +415,6 @@ func @fold_copy(%arg : tensor<1x4xf32>) -> tensor<1x4xf32> {
return %0 : tensor<1x4xf32>
}

// CHECK-LABEL: func @fold_pad_into_conv_f32
func @fold_pad_into_conv_f32(%arg0 : tensor<1x32x32x3xf32>,
%arg1 : tensor<7x7x3x64xf32>)
-> tensor<1x16x16x64xf32> {
// CHECK-NOT: xla_hlo.pad
// CHECK: xla_hlo.convolution
// CHECK-SAME: padding = dense<3> : tensor<2x2xi64>
%0 = xla_hlo.constant dense<0.000000e+00> : tensor<f32>
%1 = "xla_hlo.pad"(%arg0, %0) {
edge_padding_high = dense<[0, 3, 3, 0]> : tensor<4xi64>,
edge_padding_low = dense<[0, 3, 3, 0]> : tensor<4xi64>,
interior_padding = dense<0> : tensor<4xi64>
} : (tensor<1x32x32x3xf32>, tensor<f32>) -> tensor<1x38x38x3xf32>
%2 = "xla_hlo.convolution"(%1, %arg1) {
batch_group_count = 1 : i64,
dimension_numbers = {
input_batch_dimension = 0 : i64,
input_feature_dimension = 3 : i64,
input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>,
kernel_input_feature_dimension = 2 : i64,
kernel_output_feature_dimension = 3 : i64,
kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>,
output_batch_dimension = 0 : i64,
output_feature_dimension = 3 : i64,
output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>
},
feature_group_count = 1 : i64,
padding = dense<0> : tensor<2x2xi64>,
window_strides = dense<2> : tensor<2xi64>
} : (tensor<1x38x38x3xf32>, tensor<7x7x3x64xf32>) -> tensor<1x16x16x64xf32>
return %2 : tensor<1x16x16x64xf32>
}

// CHECK-LABEL: func @fold_pad_into_conv_i32
func @fold_pad_into_conv_i32(%arg0 : tensor<1x32x32x3xi32>,
%arg1 : tensor<7x7x3x64xi32>)
-> tensor<1x16x16x64xi32> {
// CHECK-NOT: xla_hlo.pad
// CHECK: xla_hlo.convolution
// CHECK-SAME: padding = dense<3> : tensor<2x2xi64>
%0 = xla_hlo.constant dense<0> : tensor<i32>
%1 = "xla_hlo.pad"(%arg0, %0) {
edge_padding_high = dense<[0, 3, 3, 0]> : tensor<4xi64>,
edge_padding_low = dense<[0, 3, 3, 0]> : tensor<4xi64>,
interior_padding = dense<0> : tensor<4xi64>
} : (tensor<1x32x32x3xi32>, tensor<i32>) -> tensor<1x38x38x3xi32>
%2 = "xla_hlo.convolution"(%1, %arg1) {
batch_group_count = 1 : i64,
dimension_numbers = {
input_batch_dimension = 0 : i64,
input_feature_dimension = 3 : i64,
input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>,
kernel_input_feature_dimension = 2 : i64,
kernel_output_feature_dimension = 3 : i64,
kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>,
output_batch_dimension = 0 : i64,
output_feature_dimension = 3 : i64,
output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>
},
feature_group_count = 1 : i64,
window_strides = dense<2> : tensor<2xi64>
} : (tensor<1x38x38x3xi32>, tensor<7x7x3x64xi32>) -> tensor<1x16x16x64xi32>
return %2 : tensor<1x16x16x64xi32>
}

// CHECK-LABEL: func @dynamic_reshape_not_actually_dynamic
func @dynamic_reshape_not_actually_dynamic(%arg0: tensor<4xf32>, %shape: tensor<2xindex>) -> tensor<4x1xf32> {
// CHECK: xla_hlo.reshape
Expand Down
51 changes: 0 additions & 51 deletions tensorflow/compiler/mlir/xla/transforms/canonicalize.td
Original file line number Diff line number Diff line change
Expand Up @@ -28,54 +28,3 @@ def UnaryEinsumToEinsum : Pat<
(HLO_UnaryEinsumOp $operand, $equation),
(HLO_EinsumOp (HLO_ConstOp (GetScalarOfType<1> $operand)),
$operand, (UnaryToBinaryEinsumEq $equation))>;

//===----------------------------------------------------------------------===//
// Conv op patterns.
//===----------------------------------------------------------------------===//

def IsZero : Attr<CPred<
"($_self.isa<DenseFPElementsAttr>() &&"
"$_self.cast<DenseFPElementsAttr>().isSplat() &&"
"$_self.cast<DenseFPElementsAttr>().getSplatValue<FloatAttr>()"
".getValue().isZero()) ||"
"($_self.isa<DenseIntElementsAttr>() &&"
"$_self.cast<DenseIntElementsAttr>().isSplat() &&"
"$_self.cast<DenseIntElementsAttr>().getSplatValue<IntegerAttr>()"
".getInt() == 0)">>;

def IsOnlyPaddingSpatialDims
: Constraint<CPred<"IsOnlyPaddingSpatialDims($0, $1, $2, $3)">>;

def BuildConvPaddingAttrs : NativeCodeCall<
"BuildConvPaddingAttrs($0, $1, $2, $3, &$_builder)">;

def FoldPadIntoConv : Pat<
(HLO_ConvOp
(HLO_PadOp $lhs,
(HLO_ConstOp IsZero:$padding_value),
$edge_padding_low,
$edge_padding_high,
IsZero:$interior_padding),
$rhs,
$window_strides,
$padding,
$lhs_dilation,
$rhs_dilation,
$dimension_numbers,
$feature_group_count,
$batch_group_count,
$precision_config),
(HLO_ConvOp
$lhs,
$rhs,
$window_strides,
(BuildConvPaddingAttrs $edge_padding_low, $edge_padding_high, $padding,
$dimension_numbers),
$lhs_dilation,
$rhs_dilation,
$dimension_numbers,
$feature_group_count,
$batch_group_count,
$precision_config),
[(IsOnlyPaddingSpatialDims $lhs, $dimension_numbers, $edge_padding_low,
$edge_padding_high)]>;

0 comments on commit 0c7e61d

Please sign in to comment.