Skip to content

Commit 01d696e

Browse files
committed
[mlir] rename the "packing" flag of linalg.pad_tensor to "nofold"
The discussion in https://reviews.llvm.org/D110425 demonstrated that "packing" may be a confusing term to define the behavior of this op in presence of the attribute. Instead, indicate the intended effect of preventing the folder from being applied. Reviewed By: nicolasvasilache, silvas Differential Revision: https://reviews.llvm.org/D111046
1 parent 24688f8 commit 01d696e

File tree

6 files changed

+49
-50
lines changed

6 files changed

+49
-50
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -146,9 +146,9 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
146146
* low: A list contains the padding along the start of each
147147
dimension, i.e `low`.
148148
* high: A list contains the padding along the end of each
149-
dimension, i.e. `high`.
150-
* packing: whether the padding operation is guaranteed to create a new
151-
tensor suitable for packing, i.e. a copy.
149+
dimension, i.e. `high`.
150+
* nofold: indicates that the operation should not be folded when source and
151+
result types are equal.
152152

153153
The result tensor dimensions are `low` + `dim` + `high` along that
154154
dimension. The number of elements of `low` and `high` must match
@@ -161,10 +161,9 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
161161
the rank of the `source` tensor. The value `yield`-ed by the
162162
region is used as the value of the view at the given position.
163163

164-
If `packing` is indicated, the padding is guaranteed to produce a new
165-
tensor, e.g., to use for packing or promotion to faster memory. Such
166-
operations are not optimized away even when the source type has the same
167-
static shape.
164+
If `nofold` is set, the padding operation will not be folded away even
165+
if the source type and the padded type have the same static shape. This can
166+
be used, e.g., for packing or promotion to faster memory.
168167

169168
Example 1:
170169

@@ -199,9 +198,9 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
199198
Example 4:
200199

201200
```mlir
202-
// Force a padded value to be always exist with `packing`.
201+
// Force a padded value to be always exist with `nofold`.
203202
%pad_value = ... : f32
204-
%0 = linalg.pad_tensor %arg0 packing low[0, 0] high[0, 0] {
203+
%0 = linalg.pad_tensor %arg0 nofold low[0, 0] high[0, 0] {
205204
^bb0(%arg1: index, %arg2: index):
206205
linalg.yield %pad_value : f32
207206
} : tensor<2x3xf32> to tensor<2x3xf32>
@@ -214,7 +213,7 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
214213
Variadic<Index>:$high,
215214
I64ArrayAttr:$static_low,
216215
I64ArrayAttr:$static_high,
217-
UnitAttr:$packing);
216+
UnitAttr:$nofold);
218217

219218
let regions = (region SizedRegion<1>:$region);
220219

@@ -223,7 +222,7 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
223222
// TODO: Remove custom<InferType> when AllTypesMatch supports opt. operands.
224223
let assemblyFormat = [{
225224
$source
226-
(`packing` $packing^)?
225+
(`nofold` $nofold^)?
227226
`low` `` custom<OperandsOrIntegersSizesList>($low, $static_low)
228227
`high` `` custom<OperandsOrIntegersSizesList>($high, $static_high)
229228
$region attr-dict `:` type($source) `to` type($result)
@@ -260,15 +259,15 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
260259
// "high" padding (i.e. it adds trailing padding values until the desired
261260
// size is met).
262261
static linalg::PadTensorOp createPadHighOp(
263-
Type type, Value source, Value pad, bool packing, Location loc,
262+
Type type, Value source, Value pad, bool nofold, Location loc,
264263
OpBuilder & builder);
265264

266265
// Return a PadTensorOp that pads `source to `type` size with `pad` value.
267266
// I.e., a block will be created and the `pad` value will be yielded
268267
// directly. If the type passed is nullptr, it is inferred.
269268
static linalg::PadTensorOp createPadScalarOp(
270269
Type type, Value source, Value pad, ArrayRef<OpFoldResult> low,
271-
ArrayRef<OpFoldResult> high, bool packing, Location loc,
270+
ArrayRef<OpFoldResult> high, bool nofold, Location loc,
272271
OpBuilder & builder);
273272

274273
// Return the pad value if it is a constant. Return null value otherwise.
@@ -313,17 +312,17 @@ def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
313312
// Build a PadTensorOp with mixed static and dynamic entries.
314313
OpBuilder<(ins "Value":$source, "ArrayRef<int64_t>":$staticLow,
315314
"ArrayRef<int64_t>":$staticHigh, "ValueRange":$low, "ValueRange":$high,
316-
CArg<"bool", "false">:$packing,
315+
CArg<"bool", "false">:$nofold,
317316
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
318317
// Build a PadTensorOp with all dynamic entries.
319318
OpBuilder<(ins "Value":$source, "ValueRange":$low, "ValueRange":$high,
320-
CArg<"bool", "false">:$packing,
319+
CArg<"bool", "false">:$nofold,
321320
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
322321
// Build a PadTensorOp with mixed static and dynamic entries and custom
323322
// result type. If the type passed is nullptr, it is inferred.
324323
OpBuilder<(ins "Type":$resultType, "Value":$source,
325324
"ArrayRef<OpFoldResult>":$low, "ArrayRef<OpFoldResult>":$high,
326-
CArg<"bool", "false">:$packing,
325+
CArg<"bool", "false">:$nofold,
327326
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
328327
];
329328

mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ static mlir::Value applyPad(Location loc, Value input, ArrayRef<int64_t> pad,
8787

8888
return linalg::PadTensorOp::createPadScalarOp(
8989
RankedTensorType::get(paddedShape, inputETy), input, padValue,
90-
lowIndices, highIndices, /*packing=*/false, loc, rewriter)
90+
lowIndices, highIndices, /*nofold=*/false, loc, rewriter)
9191
.result();
9292
}
9393

@@ -2349,7 +2349,7 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
23492349

23502350
auto newPadOp = linalg::PadTensorOp::createPadScalarOp(
23512351
padOp.getType(), input, constant, lowValues, highValues,
2352-
/*packing=*/false, loc, rewriter);
2352+
/*nofold=*/false, loc, rewriter);
23532353

23542354
rewriter.replaceOp(padOp, newPadOp.getResult());
23552355
return success();

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1085,28 +1085,28 @@ RankedTensorType PadTensorOp::inferResultType(RankedTensorType sourceType,
10851085
void PadTensorOp::build(OpBuilder &b, OperationState &result, Value source,
10861086
ArrayRef<int64_t> staticLow,
10871087
ArrayRef<int64_t> staticHigh, ValueRange low,
1088-
ValueRange high, bool packing,
1088+
ValueRange high, bool nofold,
10891089
ArrayRef<NamedAttribute> attrs) {
10901090
auto sourceType = source.getType().cast<RankedTensorType>();
10911091
auto resultType = inferResultType(sourceType, staticLow, staticHigh);
10921092
build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1093-
b.getI64ArrayAttr(staticHigh), packing ? b.getUnitAttr() : UnitAttr());
1093+
b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
10941094
result.addAttributes(attrs);
10951095
}
10961096

10971097
void PadTensorOp::build(OpBuilder &b, OperationState &result, Value source,
1098-
ValueRange low, ValueRange high, bool packing,
1098+
ValueRange low, ValueRange high, bool nofold,
10991099
ArrayRef<NamedAttribute> attrs) {
11001100
auto sourceType = source.getType().cast<RankedTensorType>();
11011101
unsigned rank = sourceType.getRank();
11021102
SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1103-
build(b, result, source, staticVector, staticVector, low, high, packing,
1103+
build(b, result, source, staticVector, staticVector, low, high, nofold,
11041104
attrs);
11051105
}
11061106

11071107
void PadTensorOp::build(OpBuilder &b, OperationState &result, Type resultType,
11081108
Value source, ArrayRef<OpFoldResult> low,
1109-
ArrayRef<OpFoldResult> high, bool packing,
1109+
ArrayRef<OpFoldResult> high, bool nofold,
11101110
ArrayRef<NamedAttribute> attrs) {
11111111
assert(resultType.isa<RankedTensorType>());
11121112
auto sourceType = source.getType().cast<RankedTensorType>();
@@ -1129,17 +1129,17 @@ void PadTensorOp::build(OpBuilder &b, OperationState &result, Type resultType,
11291129
}
11301130
build(b, result, resultType, source, dynamicLow, dynamicHigh,
11311131
b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1132-
packing ? b.getUnitAttr() : UnitAttr());
1132+
nofold ? b.getUnitAttr() : UnitAttr());
11331133
result.addAttributes(attrs);
11341134
}
11351135

11361136
PadTensorOp PadTensorOp::createPadScalarOp(Type type, Value source, Value pad,
11371137
ArrayRef<OpFoldResult> low,
11381138
ArrayRef<OpFoldResult> high,
1139-
bool packing, Location loc,
1139+
bool nofold, Location loc,
11401140
OpBuilder &builder) {
1141-
auto padTensorOp = builder.create<linalg::PadTensorOp>(loc, type, source, low,
1142-
high, packing);
1141+
auto padTensorOp =
1142+
builder.create<linalg::PadTensorOp>(loc, type, source, low, high, nofold);
11431143
int rank = padTensorOp.getResultType().getRank();
11441144
SmallVector<Type, 4> blockArgTypes;
11451145
blockArgTypes.assign(rank, builder.getIndexType());
@@ -1153,7 +1153,7 @@ PadTensorOp PadTensorOp::createPadScalarOp(Type type, Value source, Value pad,
11531153
}
11541154

11551155
PadTensorOp PadTensorOp::createPadHighOp(Type type, Value source, Value pad,
1156-
bool packing, Location loc,
1156+
bool nofold, Location loc,
11571157
OpBuilder &builder) {
11581158
SmallVector<OpFoldResult, 4> low, high;
11591159
auto rankedTensorType = type.cast<RankedTensorType>();
@@ -1167,7 +1167,7 @@ PadTensorOp PadTensorOp::createPadHighOp(Type type, Value source, Value pad,
11671167
high.push_back(highValue);
11681168
low.push_back(builder.createOrFold<ConstantIndexOp>(loc, 0));
11691169
}
1170-
return PadTensorOp::createPadScalarOp(type, source, pad, low, high, packing,
1170+
return PadTensorOp::createPadScalarOp(type, source, pad, low, high, nofold,
11711171
loc, builder);
11721172
}
11731173

@@ -1440,16 +1440,16 @@ Operation *PadTensorOp::getTiledImplementation(OpBuilder &b, ValueRange dest,
14401440
}
14411441

14421442
namespace {
1443-
// Folds linalg.pad_tensor when padding is static zeros and packing is not
1444-
// requested.
1443+
// Folds linalg.pad_tensor when padding is static zeros and the attribute
1444+
// doesn't request otherwise.
14451445
struct FoldStaticZeroPadding : public OpRewritePattern<PadTensorOp> {
14461446
using OpRewritePattern<PadTensorOp>::OpRewritePattern;
14471447

14481448
LogicalResult matchAndRewrite(PadTensorOp padTensorOp,
14491449
PatternRewriter &rewriter) const override {
14501450
if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
14511451
return failure();
1452-
if (padTensorOp.packing())
1452+
if (padTensorOp.nofold())
14531453
return failure();
14541454
rewriter.replaceOpWithNewOp<tensor::CastOp>(
14551455
padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
@@ -1481,7 +1481,7 @@ struct FoldSourceTensorCast : public OpRewritePattern<PadTensorOp> {
14811481
auto newOp = rewriter.create<PadTensorOp>(
14821482
padTensorOp->getLoc(), newResultType, padTensorOp.source(),
14831483
padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1484-
padTensorOp.static_high(), padTensorOp.packing());
1484+
padTensorOp.static_high(), padTensorOp.nofold());
14851485
BlockAndValueMapping mapper;
14861486
padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
14871487

@@ -1513,7 +1513,7 @@ struct FoldTargetTensorCast : public OpRewritePattern<PadTensorOp> {
15131513
padTensorOp.getLoc(), tensorCastOp.dest().getType(),
15141514
padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
15151515
padTensorOp.static_low(), padTensorOp.static_high(),
1516-
padTensorOp.packing());
1516+
padTensorOp.nofold());
15171517
replacementOp.region().takeBody(padTensorOp.region());
15181518

15191519
rewriter.replaceOp(padTensorOp, replacementOp.result());
@@ -1555,7 +1555,7 @@ Value PadTensorOp::getConstantPaddingValue() {
15551555

15561556
OpFoldResult PadTensorOp::fold(ArrayRef<Attribute>) {
15571557
if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
1558-
!packing())
1558+
!nofold())
15591559
return source();
15601560
return {};
15611561
}

mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ static LogicalResult padOperandToSmallestStaticBoundingBox(
182182
staticSizes, getElementTypeOrSelf(opOperand->get()));
183183
result = linalg::PadTensorOp::createPadHighOp(
184184
staticTensorType, opOperand->get(), paddingValue.getValue(),
185-
/*packing=*/true, opToPad->getLoc(), rewriter);
185+
/*nofold=*/true, opToPad->getLoc(), rewriter);
186186
return success();
187187
}
188188

mlir/test/Dialect/Linalg/canonicalize.mlir

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -630,14 +630,14 @@ func @pad_tensor_same_static_shape(%arg0: tensor<5x6xf32>, %a: index)
630630

631631
// -----
632632

633-
// CHECK-LABEL: func @pad_tensor_packing_same_static_shape(
633+
// CHECK-LABEL: func @pad_tensor_nofold_same_static_shape(
634634
// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32>
635635
// CHECK: %[[PAD:.*]] = linalg.pad_tensor
636636
// CHECK: return %[[PAD]]
637-
func @pad_tensor_packing_same_static_shape(%arg0: tensor<5x6xf32>, %a: index)
637+
func @pad_tensor_nofold_same_static_shape(%arg0: tensor<5x6xf32>, %a: index)
638638
-> tensor<5x6xf32> {
639639
%cst = constant 0.000000e+00 : f32
640-
%0 = linalg.pad_tensor %arg0 packing low[%a, 0] high[0, %a] {
640+
%0 = linalg.pad_tensor %arg0 nofold low[%a, 0] high[0, %a] {
641641
^bb0(%arg1: index, %arg2: index):
642642
linalg.yield %cst : f32
643643
} : tensor<5x6xf32> to tensor<5x6xf32>
@@ -937,13 +937,13 @@ func @pad_static_zero_cast(%arg0: tensor<?x?x?xf32>, %pad_value: f32) -> tensor<
937937

938938
// -----
939939

940-
// CHECK-LABEL: func @pad_packing_static_zero(
940+
// CHECK-LABEL: func @pad_nofold_static_zero(
941941
// CHECK-SAME: %[[ARG0:.*]]: tensor<?x?x?xf32>
942942
// CHECK: %[[PAD:.*]] = linalg.pad_tensor
943943
// CHECK: return %[[PAD]]
944-
func @pad_packing_static_zero(%arg0: tensor<?x?x?xf32>, %pad_value: f32) -> tensor<2x3x4xf32> {
944+
func @pad_nofold_static_zero(%arg0: tensor<?x?x?xf32>, %pad_value: f32) -> tensor<2x3x4xf32> {
945945
%c0 = constant 0 : index
946-
%0 = linalg.pad_tensor %arg0 packing low[0, %c0, 0] high[0, 0, %c0] {
946+
%0 = linalg.pad_tensor %arg0 nofold low[0, %c0, 0] high[0, 0, %c0] {
947947
^bb0(%arg1: index, %arg2: index, %arg3: index):
948948
linalg.yield %pad_value : f32
949949
} : tensor<?x?x?xf32> to tensor<2x3x4xf32>

mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@ func @matmul_tensors(
2020
// CHECK-NOT: linalg.matmul {{.*}} tensor<?x?xi8>
2121

2222
// Padding injects static information.
23-
// CHECK: %[[pA:.*]] = linalg.pad_tensor %[[sTA]] packing low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
23+
// CHECK: %[[pA:.*]] = linalg.pad_tensor %[[sTA]] nofold low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
2424
// CHECK: : tensor<?x?xi8> to tensor<2x4xi8>
25-
// CHECK: %[[pB:.*]] = linalg.pad_tensor %[[sTB]] packing low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
25+
// CHECK: %[[pB:.*]] = linalg.pad_tensor %[[sTB]] nofold low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
2626
// CHECK: : tensor<?x?xi8> to tensor<4x3xi8>
27-
// CHECK: %[[pC:.*]] = linalg.pad_tensor %[[sTC]] packing low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
27+
// CHECK: %[[pC:.*]] = linalg.pad_tensor %[[sTC]] nofold low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
2828
// CHECK: : tensor<?x?xi32> to tensor<2x3xi32>
2929
// CHECK: %[[pD:.*]] = linalg.matmul ins(%[[pA]], %[[pB]] : tensor<2x4xi8>, tensor<4x3xi8>)
3030
// CHECK-SAME: outs(%[[pC]] : tensor<2x3xi32>) -> tensor<2x3xi32>
@@ -55,7 +55,7 @@ func @generic_scalar_and_tensor(
5555
// CHECK: %[[sTC:.*]] = tensor.extract_slice %[[TC2]][{{.*}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
5656

5757
// Padding injects static information.
58-
// CHECK: %[[pC:.*]] = linalg.pad_tensor %[[sTC]] packing low[%[[C0]], %[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}, %{{.*}}]
58+
// CHECK: %[[pC:.*]] = linalg.pad_tensor %[[sTC]] nofold low[%[[C0]], %[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}, %{{.*}}]
5959
// CHECK: : tensor<?x?x?xf32> to tensor<2x3x4xf32>
6060
// CHECK: %[[pD:.*]] = linalg.generic
6161
// CHECK-SAME: ins(%[[VAL]] : f32) outs(%[[pC]] : tensor<2x3x4xf32>)
@@ -108,9 +108,9 @@ func @matmul_partially_padded_tensors(
108108
// CHECK-1DIM-TILE: %[[sTA:.*]] = tensor.extract_slice %[[TA]][{{.*}}] : tensor<?x8xi8> to tensor<?x8xi8>
109109
// CHECK-1DIM-TILE: %[[sTB:.*]] = tensor.extract_slice %[[TB]][{{.*}}] : tensor<8x?xi8> to tensor<8x?xi8>
110110
// CHECK-1DIM-TILE: %[[sTC:.*]] = tensor.extract_slice %[[TC1]][{{.*}}] : tensor<?x?xi32> to tensor<?x?xi32>
111-
// CHECK-1DIM-TILE: %[[pA:.*]] = linalg.pad_tensor %[[sTA]] packing low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
111+
// CHECK-1DIM-TILE: %[[pA:.*]] = linalg.pad_tensor %[[sTA]] nofold low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
112112
// CHECK-1DIM-TILE: : tensor<?x8xi8> to tensor<2x8xi8>
113-
// CHECK-1DIM-TILE: %[[pB:.*]] = linalg.pad_tensor %[[sTB]] packing low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
113+
// CHECK-1DIM-TILE: %[[pB:.*]] = linalg.pad_tensor %[[sTB]] nofold low[%[[C0]], %[[C0]]] high[%{{.*}}, %{{.*}}]
114114
// CHECK-1DIM-TILE: : tensor<8x?xi8> to tensor<8x3xi8>
115115
// CHECK-1DIM-TILE: %[[pD:.*]] = linalg.matmul ins(%[[pA]], %[[pB]] : tensor<2x8xi8>, tensor<8x3xi8>)
116116
// CHECK-1DIM-TILE: outs(%[[sTC]] : tensor<?x?xi32>) -> tensor<?x?xi32>
@@ -122,7 +122,7 @@ func @matmul_partially_padded_tensors(
122122
func @pad_to_same_static_size(%arg0: tensor<2x3x4xf32>, %arg1: f32) -> tensor<2x3x4xf32> {
123123
// CHECK: %[[c0:.*]] = constant 0 : index
124124
// CHECK-NOT: scf.for
125-
// CHECK: linalg.pad_tensor %{{.*}} packing low[%[[c0]], %[[c0]], %[[c0]]] high[%[[c0]], %[[c0]], %[[c0]]]
125+
// CHECK: linalg.pad_tensor %{{.*}} nofold low[%[[c0]], %[[c0]], %[[c0]]] high[%[[c0]], %[[c0]], %[[c0]]]
126126
// CHECK: tensor<2x3x4xf32> to tensor<2x3x4xf32>
127127
%0 = linalg.generic {
128128
indexing_maps = [affine_map<(d0, d1, d2) -> ()>,
@@ -140,7 +140,7 @@ func @pad_to_same_static_size(%arg0: tensor<2x3x4xf32>, %arg1: f32) -> tensor<2x
140140
func @pad_static_divisible_size(%arg0: tensor<4x6x8xf32>, %arg1: f32) -> tensor<4x6x8xf32> {
141141
// CHECK: %[[c0:.*]] = constant 0 : index
142142
// CHECK-COUNT-3: scf.for
143-
// CHECK: linalg.pad_tensor %{{.*}} packing low[%[[c0]], %[[c0]], %[[c0]]] high[%[[c0]], %[[c0]], %[[c0]]]
143+
// CHECK: linalg.pad_tensor %{{.*}} nofold low[%[[c0]], %[[c0]], %[[c0]]] high[%[[c0]], %[[c0]], %[[c0]]]
144144
// CHECK: tensor<2x3x4xf32> to tensor<2x3x4xf32>
145145
%0 = linalg.generic {
146146
indexing_maps = [affine_map<(d0, d1, d2) -> ()>,

0 commit comments

Comments
 (0)