Skip to content

Commit f67171a

Browse files
[mlir][Linalg] Make depthwise convolution naming scheme consistent.
Names should be consistent across all operations otherwise painful bugs will surface. Reviewed By: rsuderman Differential Revision: https://reviews.llvm.org/D113762
1 parent c3e3c76 commit f67171a

File tree

9 files changed

+83
-83
lines changed

9 files changed

+83
-83
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml

+13-13
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ metadata: !LinalgOpMetadata
6969
name: matmul_unsigned
7070
cpp_class_name: MatmulUnsignedOp
7171
doc: |-
72-
Performs a unsigned matrix multiplication of two 2D inputs.
72+
Performs an unsigned matrix multiplication of two 2D inputs.
7373
7474
Numeric casting is performed on the operands to the inner multiply, promoting
7575
them to the same data type as the accumulator/output.
@@ -1384,14 +1384,14 @@ structured_op: !LinalgStructuredOpConfig
13841384
is_unsigned_cast: false
13851385
--- !LinalgOpConfig
13861386
metadata: !LinalgOpMetadata
1387-
name: depthwise_conv1D_nw
1388-
cpp_class_name: DepthwiseConv1DNwOp
1387+
name: depthwise_conv_1d_nwc_wc
1388+
cpp_class_name: DepthwiseConv1DNwcWcOp
13891389
doc: |-
13901390
Performs depth-wise 1-D convolution.
13911391
13921392
Numeric casting is performed on the operands to the inner multiply, promoting
13931393
them to the same data type as the accumulator/output. Multiplier is set to 1
1394-
which is a special case for most dpethwise convolutions.
1394+
which is a special case for most depthwise convolutions.
13951395
implements:
13961396
- LinalgConvolutionOpInterface
13971397
structured_op: !LinalgStructuredOpConfig
@@ -1461,14 +1461,14 @@ structured_op: !LinalgStructuredOpConfig
14611461
is_unsigned_cast: false
14621462
--- !LinalgOpConfig
14631463
metadata: !LinalgOpMetadata
1464-
name: depthwise_conv2D_nhw
1465-
cpp_class_name: DepthwiseConv2DNhwOp
1464+
name: depthwise_conv_2d_nhwc_hwc
1465+
cpp_class_name: DepthwiseConv2DNhwcHwcOp
14661466
doc: |-
14671467
Performs depth-wise 2-D convolution.
14681468
14691469
Numeric casting is performed on the operands to the inner multiply, promoting
14701470
them to the same data type as the accumulator/output. Multiplier is set to 1
1471-
which is a special case for most dpethwise convolutions.
1471+
which is a special case for most depthwise convolutions.
14721472
implements:
14731473
- LinalgConvolutionOpInterface
14741474
structured_op: !LinalgStructuredOpConfig
@@ -1544,8 +1544,8 @@ structured_op: !LinalgStructuredOpConfig
15441544
is_unsigned_cast: false
15451545
--- !LinalgOpConfig
15461546
metadata: !LinalgOpMetadata
1547-
name: depthwise_conv2D_nhw_q
1548-
cpp_class_name: DepthwiseConv2DNhwQOp
1547+
name: depthwise_conv_2d_nhwc_hwc_q
1548+
cpp_class_name: DepthwiseConv2DNhwcHwcQOp
15491549
doc: |-
15501550
Performs depth-wise 2-D convolution.
15511551
@@ -1660,8 +1660,8 @@ structured_op: !LinalgStructuredOpConfig
16601660
is_unsigned_cast: false
16611661
--- !LinalgOpConfig
16621662
metadata: !LinalgOpMetadata
1663-
name: depthwise_conv2D_nhwc
1664-
cpp_class_name: DepthwiseConv2DNhwcOp
1663+
name: depthwise_conv_2d_nhwc_hwcm
1664+
cpp_class_name: DepthwiseConv2DNhwcHwcmOp
16651665
doc: |-
16661666
Performs depth-wise 2-D convolution.
16671667
@@ -1746,8 +1746,8 @@ structured_op: !LinalgStructuredOpConfig
17461746
is_unsigned_cast: false
17471747
--- !LinalgOpConfig
17481748
metadata: !LinalgOpMetadata
1749-
name: depthwise_conv2D_nhwc_q
1750-
cpp_class_name: DepthwiseConv2DNhwcQOp
1749+
name: depthwise_conv_2d_nhwc_hwcm_q
1750+
cpp_class_name: DepthwiseConv2DNhwcHwcmQOp
17511751
doc: |-
17521752
Performs depth-wise 2-D convolution.
17531753

mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1230,7 +1230,7 @@ class DepthwiseConvConverter
12301230
loc, resultTy.getShape(), resultETy);
12311231
if (!isQuantized) {
12321232
Value conv = rewriter
1233-
.create<linalg::DepthwiseConv2DNhwcOp>(
1233+
.create<linalg::DepthwiseConv2DNhwcHwcmOp>(
12341234
loc, linalgConvTy, ValueRange{input, weight},
12351235
ValueRange{zeroTensor}, strideAttr, dilationAttr)
12361236
.getResult(0);
@@ -1254,7 +1254,7 @@ class DepthwiseConvConverter
12541254
auto kZpVal = rewriter.create<arith::ConstantOp>(loc, kZp);
12551255
Value conv =
12561256
rewriter
1257-
.create<linalg::DepthwiseConv2DNhwcQOp>(
1257+
.create<linalg::DepthwiseConv2DNhwcHwcmQOp>(
12581258
loc, linalgConvTy, ValueRange{input, weight, iZpVal, kZpVal},
12591259
ValueRange{zeroTensor}, strideAttr, dilationAttr)
12601260
.getResult(0);

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -3037,16 +3037,16 @@ LogicalResult matchAndReplaceDepthwiseConv(Operation *operation, Value input,
30373037
loc, newInitTy, init, collapsedInitDims);
30383038

30393039
Value newConv;
3040-
if (isa<DepthwiseConv2DNhwcOp>(operation)) {
3040+
if (isa<DepthwiseConv2DNhwcHwcmOp>(operation)) {
30413041
newConv = rewriter
3042-
.create<DepthwiseConv2DNhwOp>(
3042+
.create<DepthwiseConv2DNhwcHwcOp>(
30433043
loc, newInitTy, ValueRange{input, collapsedKernel},
30443044
ValueRange{collapsedInit}, stride, dilation)
30453045
.getResult(0);
3046-
} else if (isa<DepthwiseConv2DNhwcQOp>(operation)) {
3046+
} else if (isa<DepthwiseConv2DNhwcHwcmQOp>(operation)) {
30473047
newConv =
30483048
rewriter
3049-
.create<DepthwiseConv2DNhwQOp>(
3049+
.create<DepthwiseConv2DNhwcHwcQOp>(
30503050
loc, newInitTy, ValueRange{input, collapsedKernel, iZp, kZp},
30513051
ValueRange{collapsedInit}, stride, dilation)
30523052
.getResult(0);
@@ -3062,10 +3062,10 @@ LogicalResult matchAndReplaceDepthwiseConv(Operation *operation, Value input,
30623062
}
30633063

30643064
struct SimplifyDepthwiseConvOp
3065-
: public OpRewritePattern<DepthwiseConv2DNhwcOp> {
3066-
using OpRewritePattern<DepthwiseConv2DNhwcOp>::OpRewritePattern;
3065+
: public OpRewritePattern<DepthwiseConv2DNhwcHwcmOp> {
3066+
using OpRewritePattern<DepthwiseConv2DNhwcHwcmOp>::OpRewritePattern;
30673067

3068-
LogicalResult matchAndRewrite(DepthwiseConv2DNhwcOp op,
3068+
LogicalResult matchAndRewrite(DepthwiseConv2DNhwcHwcmOp op,
30693069
PatternRewriter &rewriter) const override {
30703070
Operation *operation = op.getOperation();
30713071
Value input = op.getInputOperand(0)->get();
@@ -3082,10 +3082,10 @@ struct SimplifyDepthwiseConvOp
30823082
};
30833083

30843084
struct SimplifyDepthwiseConvQOp
3085-
: public OpRewritePattern<DepthwiseConv2DNhwcQOp> {
3086-
using OpRewritePattern<DepthwiseConv2DNhwcQOp>::OpRewritePattern;
3085+
: public OpRewritePattern<DepthwiseConv2DNhwcHwcmQOp> {
3086+
using OpRewritePattern<DepthwiseConv2DNhwcHwcmQOp>::OpRewritePattern;
30873087

3088-
LogicalResult matchAndRewrite(DepthwiseConv2DNhwcQOp op,
3088+
LogicalResult matchAndRewrite(DepthwiseConv2DNhwcHwcmQOp op,
30893089
PatternRewriter &rewriter) const override {
30903090
Operation *operation = op.getOperation();
30913091
Value input = op.getInputOperand(0)->get();

mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ def conv_3d_ndhwc_dhwcf(
310310
]) * cast(U, K[D.kd, D.kh, D.kw, D.c, D.f])
311311

312312
@linalg_structured_op
313-
def depthwise_conv1D_nw(
313+
def depthwise_conv_1d_nwc_wc(
314314
I=TensorDef(T1, S.N, S.OW * S.SW + S.KW * S.DW, S.IC),
315315
K=TensorDef(T2, S.KW, S.IC),
316316
O=TensorDef(U, S.N, S.OW, S.IC, output=True),
@@ -320,7 +320,7 @@ def depthwise_conv1D_nw(
320320
321321
Numeric casting is performed on the operands to the inner multiply, promoting
322322
them to the same data type as the accumulator/output. Multiplier is set to 1
323-
which is a special case for most dpethwise convolutions.
323+
which is a special case for most depthwise convolutions.
324324
"""
325325
implements(ConvolutionOpInterface)
326326
domain(D.n, D.ow, D.ic, D.kw)
@@ -329,7 +329,7 @@ def depthwise_conv1D_nw(
329329
cast(U, K[D.kw, D.ic])
330330

331331
@linalg_structured_op
332-
def depthwise_conv2D_nhw(
332+
def depthwise_conv_2d_nhwc_hwc(
333333
I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
334334
K=TensorDef(T2, S.KH, S.KW, S.IC),
335335
O=TensorDef(U, S.N, S.OH, S.OW, S.IC, output=True),
@@ -339,7 +339,7 @@ def depthwise_conv2D_nhw(
339339
340340
Numeric casting is performed on the operands to the inner multiply, promoting
341341
them to the same data type as the accumulator/output. Multiplier is set to 1
342-
which is a special case for most dpethwise convolutions.
342+
which is a special case for most depthwise convolutions.
343343
"""
344344
implements(ConvolutionOpInterface)
345345
domain(D.n, D.oh, D.ow, D.ic, D.kh, D.kw)
@@ -348,7 +348,7 @@ def depthwise_conv2D_nhw(
348348
D.ic]) * cast(U, K[D.kh, D.kw, D.ic])
349349

350350
@linalg_structured_op
351-
def depthwise_conv2D_nhw_q(
351+
def depthwise_conv_2d_nhwc_hwc_q(
352352
I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
353353
K=TensorDef(T2, S.KH, S.KW, S.IC),
354354
IZp=ScalarDef(I32),
@@ -369,7 +369,7 @@ def depthwise_conv2D_nhw_q(
369369
(cast(U, K[D.kh, D.kw, D.ic]) - cast(U, KZp)))
370370

371371
@linalg_structured_op
372-
def depthwise_conv2D_nhwc(
372+
def depthwise_conv_2d_nhwc_hwcm(
373373
I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
374374
K=TensorDef(T2, S.KH, S.KW, S.IC, S.CM),
375375
O=TensorDef(U, S.N, S.OH, S.OW, S.IC, S.CM, output=True),
@@ -387,7 +387,7 @@ def depthwise_conv2D_nhwc(
387387
D.ic]) * cast(U, K[D.kh, D.kw, D.ic, D.cm])
388388

389389
@linalg_structured_op
390-
def depthwise_conv2D_nhwc_q(
390+
def depthwise_conv_2d_nhwc_hwcm_q(
391391
I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
392392
K=TensorDef(T2, S.KH, S.KW, S.IC, S.CM),
393393
IZp=ScalarDef(I32),

mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir

+4-4
Original file line numberDiff line numberDiff line change
@@ -1592,7 +1592,7 @@ func @depthwise_conv(%arg0 : tensor<1x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>,
15921592
// CHECK: [[CST0:%.+]] = arith.constant 0
15931593
// CHECK: [[FILL:%.+]] = linalg.fill([[CST0]], [[INIT]])
15941594
// CHECK: [[OUT:%.+]] = linalg.init_tensor [1, 5, 5, 33]
1595-
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv2D_nhwc {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
1595+
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
15961596
// CHECK: [[COLLAPSED:%.+]] = linalg.tensor_collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
15971597
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<33xf32>, tensor<1x5x5x33xf32>) outs([[OUT]] : tensor<1x5x5x33xf32>) {
15981598
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
@@ -1614,7 +1614,7 @@ func @depthwise_conv_strides(%arg0 : tensor<1x11x9x3xf32>, %arg1 : tensor<3x1x3x
16141614
// CHECK: [[CST0:%.+]] = arith.constant 0
16151615
// CHECK: [[FILL:%.+]] = linalg.fill([[CST0]], [[INIT]])
16161616
// CHECK: [[OUT:%.+]] = linalg.init_tensor [1, 5, 5, 33]
1617-
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv2D_nhwc {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
1617+
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
16181618
// CHECK: [[COLLAPSED:%.+]] = linalg.tensor_collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
16191619
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<33xf32>, tensor<1x5x5x33xf32>) outs([[OUT]] : tensor<1x5x5x33xf32>) {
16201620
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
@@ -1642,7 +1642,7 @@ func @depthwise_conv_quant(%arg0 : tensor<1x12x12x4xi8>, %arg1 : tensor<3x3x4x12
16421642
// CHECK: [[OUT:%.+]] = linalg.init_tensor [1, 12, 12, 512]
16431643
// CHECK: [[C128:%.+]] = arith.constant -128
16441644
// CHECK: [[C42:%.+]] = arith.constant 42
1645-
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv2D_nhwc_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins([[PAD]], %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x12x12x4x128xi32>)
1645+
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins([[PAD]], %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x12x12x4x128xi32>)
16461646
// CHECK: [[COLLAPSED:%.+]] = linalg.tensor_collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
16471647
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<512xi32>, tensor<1x12x12x512xi32>) outs([[OUT]] : tensor<1x12x12x512xi32>) {
16481648
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32): // no predecessors
@@ -1666,7 +1666,7 @@ func @depthwise_conv_quant_dilations(%arg0 : tensor<1x14x14x4xi8>, %arg1 : tenso
16661666
// CHECK: [[OUT:%.+]] = linalg.init_tensor [1, 10, 10, 512]
16671667
// CHECK: [[C128:%.+]] = arith.constant -128
16681668
// CHECK: [[C42:%.+]] = arith.constant 42
1669-
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv2D_nhwc_q {dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x10x10x4x128xi32>)
1669+
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x10x10x4x128xi32>)
16701670
// CHECK: [[COLLAPSED:%.+]] = linalg.tensor_collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
16711671
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<512xi32>, tensor<1x10x10x512xi32>) outs([[OUT]] : tensor<1x10x10x512xi32>) {
16721672
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32): // no predecessors

mlir/test/Dialect/Linalg/canonicalize.mlir

+4-4
Original file line numberDiff line numberDiff line change
@@ -1095,9 +1095,9 @@ func @dim_of_tiled_loop_result_no_canonicalize(%arg0: tensor<?x?xf32>, %arg1: te
10951095
func @depthwise_conv(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<?x?x?x1xf32>, %arg2: tensor<?x?x?x?x1xf32>) -> tensor<?x?x?x?x1xf32> {
10961096
// CHECK-DAG: %[[KERNEL:.+]] = linalg.tensor_collapse_shape %arg1 {{\[\[}}0], [1], [2, 3]]
10971097
// CHECK-DAG: %[[INIT:.+]] = linalg.tensor_collapse_shape %arg2 {{\[\[}}0], [1], [2], [3, 4]]
1098-
// CHECK-DAG: %[[CONV:.+]] = linalg.depthwise_conv2D_nhw {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %[[KERNEL]] : tensor<?x?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[INIT]] : tensor<?x?x?x?xf32>)
1098+
// CHECK-DAG: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %[[KERNEL]] : tensor<?x?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[INIT]] : tensor<?x?x?x?xf32>)
10991099
// CHECK: %[[OUT:.+]] = linalg.tensor_expand_shape %[[CONV]] {{\[\[}}0], [1], [2], [3, 4]]
1100-
%0 = linalg.depthwise_conv2D_nhwc {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x1xf32>) outs(%arg2 : tensor<?x?x?x?x1xf32>) -> tensor<?x?x?x?x1xf32>
1100+
%0 = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x1xf32>) outs(%arg2 : tensor<?x?x?x?x1xf32>) -> tensor<?x?x?x?x1xf32>
11011101
return %0 : tensor<?x?x?x?x1xf32>
11021102
}
11031103

@@ -1108,8 +1108,8 @@ func @depthwise_conv(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<?x?x?x1xf32>, %ar
11081108
func @depthwise_conv_q(%arg0: tensor<?x?x?x?xi8>, %arg1: tensor<?x?x?x1xi8>, %arg2: tensor<?x?x?x?x1xi32>, %arg3 : i32, %arg4 : i32) -> tensor<?x?x?x?x1xi32> {
11091109
// CHECK-DAG: %[[KERNEL:.+]] = linalg.tensor_collapse_shape %arg1 {{\[\[}}0], [1], [2, 3]]
11101110
// CHECK-DAG: %[[INIT:.+]] = linalg.tensor_collapse_shape %arg2 {{\[\[}}0], [1], [2], [3, 4]]
1111-
// CHECK-DAG: %[[CONV:.+]] = linalg.depthwise_conv2D_nhw_q {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %[[KERNEL]], %arg3, %arg4 : tensor<?x?x?x?xi8>, tensor<?x?x?xi8>, i32, i32) outs(%[[INIT]] : tensor<?x?x?x?xi32>)
1111+
// CHECK-DAG: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwc_q {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %[[KERNEL]], %arg3, %arg4 : tensor<?x?x?x?xi8>, tensor<?x?x?xi8>, i32, i32) outs(%[[INIT]] : tensor<?x?x?x?xi32>)
11121112
// CHECK: %[[OUT:.+]] = linalg.tensor_expand_shape %[[CONV]] {{\[\[}}0], [1], [2], [3, 4]]
1113-
%0 = linalg.depthwise_conv2D_nhwc_q {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1, %arg3, %arg4 : tensor<?x?x?x?xi8>, tensor<?x?x?x1xi8>, i32, i32) outs(%arg2 : tensor<?x?x?x?x1xi32>) -> tensor<?x?x?x?x1xi32>
1113+
%0 = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1, %arg3, %arg4 : tensor<?x?x?x?xi8>, tensor<?x?x?x1xi8>, i32, i32) outs(%arg2 : tensor<?x?x?x?x1xi32>) -> tensor<?x?x?x?x1xi32>
11141114
return %0 : tensor<?x?x?x?x1xi32>
11151115
}

mlir/test/Dialect/Linalg/generalize-named-ops.mlir

+9-9
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@ func @generalize_matmul_tensor(%A : tensor<16x8xf32>, %B: tensor<8x32xf32>, %C:
4949

5050
// -----
5151

52-
func @depthwise_conv2D_nhwc(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x3x4x2x3xf32>) {
53-
linalg.depthwise_conv2D_nhwc
52+
func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x3x4x2x3xf32>) {
53+
linalg.depthwise_conv_2d_nhwc_hwcm
5454
{ dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> }
5555
ins(%input, %filter : memref<2x4x5x2xf32>, memref<2x2x2x3xf32>)
5656
outs(%output : memref<2x3x4x2x3xf32>)
@@ -61,7 +61,7 @@ func @depthwise_conv2D_nhwc(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3
6161
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d5, d6, d3, d4)>
6262
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4)>
6363

64-
// CHECK: func @depthwise_conv2D_nhwc
64+
// CHECK: func @depthwise_conv_2d_nhwc_hwcm
6565

6666
// CHECK: linalg.generic
6767
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
@@ -76,8 +76,8 @@ func @depthwise_conv2D_nhwc(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3
7676

7777
// -----
7878

79-
func @depthwise_conv2D_nhwc(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x2x3x2x3xf32>) {
80-
linalg.depthwise_conv2D_nhwc
79+
func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x2x3x2x3xf32>) {
80+
linalg.depthwise_conv_2d_nhwc_hwcm
8181
{ dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> }
8282
ins(%input, %filter : memref<2x4x5x2xf32>, memref<2x2x2x3xf32>)
8383
outs(%output : memref<2x2x3x2x3xf32>)
@@ -88,7 +88,7 @@ func @depthwise_conv2D_nhwc(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3
8888
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d5, d6, d3, d4)>
8989
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4)>
9090

91-
// CHECK: func @depthwise_conv2D_nhwc
91+
// CHECK: func @depthwise_conv_2d_nhwc_hwcm
9292

9393
// CHECK: linalg.generic
9494
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
@@ -103,8 +103,8 @@ func @depthwise_conv2D_nhwc(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3
103103

104104
// -----
105105

106-
func @depthwise_conv2D_nhw(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
107-
linalg.depthwise_conv2D_nhw {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
106+
func @depthwise_conv_2d_nhwc_hwc(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
107+
linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
108108
ins(%input, %filter: memref<1x113x113x96xf32>, memref<3x3x96xf32>)
109109
outs(%output: memref<1x56x56x96xf32>)
110110
return
@@ -114,7 +114,7 @@ func @depthwise_conv2D_nhw(%input: memref<1x113x113x96xf32>, %filter: memref<3x3
114114
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d4, d5, d3)>
115115
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3)>
116116

117-
// CHECK: func @depthwise_conv2D_nhw
117+
// CHECK: func @depthwise_conv_2d_nhwc_hwc
118118

119119
// CHECK: linalg.generic
120120
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]

0 commit comments

Comments
 (0)