Skip to content

Commit fd0c6f5

Browse files
committed
[mlir] Move linalg::PadTensorOp to tensor::PadOp.
RFC: https://llvm.discourse.group/t/rfc-move-linalg-padtensorop-to-tensor-padop/5785 Differential Revision: https://reviews.llvm.org/D117892
1 parent 4710750 commit fd0c6f5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+1639
-1450
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td

-202
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ include "mlir/Interfaces/ControlFlowInterfaces.td"
1818
include "mlir/Interfaces/InferTypeOpInterface.td"
1919
include "mlir/Interfaces/LoopLikeInterface.td"
2020
include "mlir/Interfaces/SideEffectInterfaces.td"
21-
include "mlir/Interfaces/TilingInterface.td"
2221
include "mlir/Interfaces/ViewLikeInterface.td"
2322

2423
// Base class for Linalg dialect ops that do not correspond to library calls.
@@ -130,207 +129,6 @@ def Linalg_InitTensorOp : Linalg_Op<"init_tensor",
130129
let hasCanonicalizer = 1;
131130
}
132131

133-
def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
134-
[AttrSizedOperandSegments, NoSideEffect,
135-
DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>,
136-
DeclareOpInterfaceMethods<TilingInterface,
137-
["getDestinationOperands", "getLoopIteratorTypes", "getIterationDomain",
138-
"getTiledImplementation"]>]> {
139-
let summary = "tensor pad operation";
140-
let description = [{
141-
`linalg.pad_tensor` is an operation that pads the `source` tensor
142-
with given `low` and `high` padding config.
143-
144-
The PadTensor operation supports the following arguments:
145-
146-
* source: the "base" tensor on which to pad.
147-
* low: A list contains the padding along the start of each
148-
dimension, i.e `low`.
149-
* high: A list contains the padding along the end of each
150-
dimension, i.e. `high`.
151-
* nofold: indicates that the operation should not be folded when source and
152-
result types are equal.
153-
154-
The result tensor dimensions are `low` + `dim` + `high` along that
155-
dimension. The number of elements of `low` and `high` must match
156-
the rank of the input tensor. They can be either a constant or a
157-
dynamic value.
158-
159-
The region of the `pad_tensor` operation returns the value to use
160-
for the padding. The arguments of the region represent the index
161-
of the source being accessed. There should be as many arguments as
162-
the rank of the `source` tensor. The value `yield`-ed by the
163-
region is used as the value of the view at the given position.
164-
165-
If `nofold` is set, the padding operation will not be folded away even
166-
if the source type and the padded type have the same static shape. This can
167-
be used, e.g., for packing or promotion to faster memory.
168-
169-
Example 1:
170-
171-
```mlir
172-
%pad_value = ... : f32
173-
%0 = linalg.pad_tensor %0 low[1, 2] high[2, 3] {
174-
^bb0(%arg0 : index, %arg1 : index):
175-
linalg.yield %pad_value : f32
176-
} : tensor<?x?xf32> to tensor<?x?xf32>
177-
```
178-
179-
Example 2:
180-
181-
```mlir
182-
%pad_value = ... : f32
183-
%0 = linalg.pad_tensor %arg0 low[2, %arg1, 3, 3] high[3, 3, %arg1, 2] {
184-
^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index):
185-
linalg.yield %pad_value : f32
186-
} : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32>
187-
```
188-
189-
Example 3:
190-
191-
```mlir
192-
%pad_value = ... : f32
193-
%0 = linalg.pad_tensor %arg0 low[0, 0] high[%ub0, %ub1] {
194-
^bb0(%arg1: index, %arg2: index):
195-
linalg.yield %pad_value : f32
196-
} : tensor<2x3xf32> to tensor<?x?xf32>
197-
```
198-
199-
Example 4:
200-
201-
```mlir
202-
// Force a padded value to be always exist with `nofold`.
203-
%pad_value = ... : f32
204-
%0 = linalg.pad_tensor %arg0 nofold low[0, 0] high[0, 0] {
205-
^bb0(%arg1: index, %arg2: index):
206-
linalg.yield %pad_value : f32
207-
} : tensor<2x3xf32> to tensor<2x3xf32>
208-
```
209-
}];
210-
211-
let arguments = (ins
212-
AnyTensor:$source,
213-
Variadic<Index>:$low,
214-
Variadic<Index>:$high,
215-
I64ArrayAttr:$static_low,
216-
I64ArrayAttr:$static_high,
217-
UnitAttr:$nofold);
218-
219-
let regions = (region SizedRegion<1>:$region);
220-
221-
let results = (outs AnyTensor:$result);
222-
223-
// TODO: Remove custom<InferType> when AllTypesMatch supports opt. operands.
224-
let assemblyFormat = [{
225-
$source
226-
(`nofold` $nofold^)?
227-
`low` `` custom<OperandsOrIntegersSizesList>($low, $static_low)
228-
`high` `` custom<OperandsOrIntegersSizesList>($high, $static_high)
229-
$region attr-dict `:` type($source) `to` type($result)
230-
}];
231-
232-
let extraClassDeclaration = [{
233-
static StringRef getStaticLowAttrName() {
234-
return "static_low";
235-
}
236-
237-
static StringRef getStaticHighAttrName() {
238-
return "static_high";
239-
}
240-
241-
RankedTensorType getSourceType() {
242-
return source().getType().cast<RankedTensorType>();
243-
}
244-
RankedTensorType getResultType() {
245-
return getResult().getType().cast<RankedTensorType>();
246-
}
247-
248-
// Infer the shape of the result tensor given the type of the source tensor
249-
// and paddings. Known result dimensions that cannot necessarily be inferred
250-
// from low/high padding sizes can be optionally specified. Those will be
251-
// considered when computing the result type.
252-
static RankedTensorType inferResultType(
253-
RankedTensorType sourceType,
254-
ArrayRef<int64_t> staticLow,
255-
ArrayRef<int64_t> staticHigh,
256-
ArrayRef<int64_t> resultShape = {});
257-
258-
// Return a PadTensorOp that pads `source` to `type` size where the static
259-
// sizes are assumed to be greater than the dynamic sizes. The op performs
260-
// "high" padding (i.e. it adds trailing padding values until the desired
261-
// size is met).
262-
static linalg::PadTensorOp createPadHighOp(
263-
Type type, Value source, Value pad, bool nofold, Location loc,
264-
OpBuilder & builder);
265-
266-
// Return a PadTensorOp that pads `source to `type` size with `pad` value.
267-
// I.e., a block will be created and the `pad` value will be yielded
268-
// directly. If the type passed is nullptr, it is inferred.
269-
static linalg::PadTensorOp createPadScalarOp(
270-
Type type, Value source, Value pad, ArrayRef<OpFoldResult> low,
271-
ArrayRef<OpFoldResult> high, bool nofold, Location loc,
272-
OpBuilder & builder);
273-
274-
// Return the pad value if it is a constant. Return null value otherwise.
275-
Value getConstantPaddingValue();
276-
277-
// Return a vector of all the static or dynamic values (low/high padding) of
278-
// the op.
279-
inline SmallVector<OpFoldResult> getMixedPadImpl(ArrayAttr staticAttrs,
280-
ValueRange values) {
281-
SmallVector<OpFoldResult> res;
282-
unsigned numDynamic = 0;
283-
unsigned count = staticAttrs.size();
284-
for (unsigned idx = 0; idx < count; ++idx) {
285-
if (ShapedType::isDynamic(staticAttrs[idx].cast<IntegerAttr>().getInt()))
286-
res.push_back(values[numDynamic++]);
287-
else
288-
res.push_back(staticAttrs[idx]);
289-
}
290-
return res;
291-
}
292-
SmallVector<OpFoldResult> getMixedLowPad() {
293-
return getMixedPadImpl(static_low(), low());
294-
}
295-
SmallVector<OpFoldResult> getMixedHighPad() {
296-
return getMixedPadImpl(static_high(), high());
297-
}
298-
// Return true if low padding is guaranteed to be 0.
299-
bool hasZeroLowPad() {
300-
return llvm::all_of(getMixedLowPad(), [](OpFoldResult ofr) {
301-
return getConstantIntValue(ofr) == static_cast<int64_t>(0);
302-
});
303-
}
304-
// Return true if high padding is guaranteed to be 0.
305-
bool hasZeroHighPad() {
306-
return llvm::all_of(getMixedHighPad(), [](OpFoldResult ofr) {
307-
return getConstantIntValue(ofr) == static_cast<int64_t>(0);
308-
});
309-
}
310-
}];
311-
312-
let builders = [
313-
// Build a PadTensorOp with mixed static and dynamic entries.
314-
OpBuilder<(ins "Value":$source, "ArrayRef<int64_t>":$staticLow,
315-
"ArrayRef<int64_t>":$staticHigh, "ValueRange":$low, "ValueRange":$high,
316-
CArg<"bool", "false">:$nofold,
317-
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
318-
// Build a PadTensorOp with all dynamic entries.
319-
OpBuilder<(ins "Value":$source, "ValueRange":$low, "ValueRange":$high,
320-
CArg<"bool", "false">:$nofold,
321-
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
322-
// Build a PadTensorOp with mixed static and dynamic entries and custom
323-
// result type. If the type passed is nullptr, it is inferred.
324-
OpBuilder<(ins "Type":$resultType, "Value":$source,
325-
"ArrayRef<OpFoldResult>":$low, "ArrayRef<OpFoldResult>":$high,
326-
CArg<"bool", "false">:$nofold,
327-
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
328-
];
329-
330-
let hasCanonicalizer = 1;
331-
let hasFolder = 1;
332-
}
333-
334132
def Linalg_YieldOp : Linalg_Op<"yield", [NoSideEffect, ReturnLike, Terminator]>,
335133
Arguments<(ins Variadic<AnyType>:$values)> {
336134
let summary = "Linalg yield operation";

mlir/include/mlir/Dialect/Linalg/Transforms/HoistPadding.h

+7-4
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//===- HoistPadding.h - Hoisting transformation for PadTensorOp -*- C++ -*-===//
1+
//===- HoistPadding.h - Hoisting for tensor::PadOp -*- C++ --------------*-===//
22
//
33
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
44
// See https://llvm.org/LICENSE.txt for license information.
@@ -14,8 +14,11 @@
1414
namespace mlir {
1515
class Value;
1616

17+
namespace tensor {
18+
class PadOp;
19+
} // namespace tensor
20+
1721
namespace linalg {
18-
class PadTensorOp;
1922

2023
/// Mechanically hoist padding operations on tensors by `numLoops` into a new,
2124
/// generally larger tensor. This achieves packing of multiple padding ops into
@@ -59,8 +62,8 @@ class PadTensorOp;
5962
/// }
6063
/// }
6164
/// ```
62-
FailureOr<Value> hoistPaddingOnTensors(PadTensorOp opToHoist, int numLoops,
63-
PadTensorOp &hoistedOp);
65+
FailureOr<Value> hoistPaddingOnTensors(tensor::PadOp opToHoist, int numLoops,
66+
tensor::PadOp &hoistedOp);
6467

6568
} // namespace linalg
6669
} // namespace mlir

mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h

+18-18
Original file line numberDiff line numberDiff line change
@@ -1132,18 +1132,18 @@ void populateLinalgDistributeTiledLoopPattern(
11321132
// Op-specific patterns.
11331133
//===----------------------------------------------------------------------===//
11341134

1135-
/// PadTensorOp is not canonicalized away yet, so we provide a transformation to
1136-
/// `linalg.generic`.
1137-
struct PadTensorOpTransformationPattern : public OpRewritePattern<PadTensorOp> {
1138-
using OpRewritePattern<PadTensorOp>::OpRewritePattern;
1135+
/// tensor::PadOp is not canonicalized away yet, so we provide a transformation
1136+
/// to `linalg.generic`.
1137+
struct PadOpTransformationPattern : public OpRewritePattern<tensor::PadOp> {
1138+
using OpRewritePattern<tensor::PadOp>::OpRewritePattern;
11391139

1140-
LogicalResult matchAndRewrite(PadTensorOp padOp,
1140+
LogicalResult matchAndRewrite(tensor::PadOp padOp,
11411141
PatternRewriter &rewriter) const override;
11421142
};
11431143

11441144
/// Pad the operands of `opToPad` to a static bounding box. Use `paddingFunc`
11451145
/// and `nofoldFunc` to set the padding value and the nofold attribute of the
1146-
/// introduced PadTensorOps, respectively. Update `paddedOp` to the cloned
1146+
/// introduced tensor::PadOps, respectively. Update `paddedOp` to the cloned
11471147
/// statically shaped operation and return the extracted dynamically shaped
11481148
/// results. If padding fails, return failure.
11491149
FailureOr<SmallVector<Value>>
@@ -1153,23 +1153,23 @@ rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad,
11531153
LinalgOp &paddedOp);
11541154

11551155
using OptimizeCopyFn =
1156-
std::function<LogicalResult(PatternRewriter &, PadTensorOp, Value)>;
1156+
std::function<LogicalResult(PatternRewriter &, tensor::PadOp, Value)>;
11571157

1158-
/// Rewrite a PadTensorOp into a sequence of InitTensorOp, FillOp and
1158+
/// Rewrite a tensor::PadOp into a sequence of InitTensorOp, FillOp and
11591159
/// InsertSliceOp. For now, only constant padding values are supported.
11601160
/// `OptimizeCopyFn` can be used to customize copying step optimization.
1161-
struct GeneralizePadTensorOpPattern : public OpRewritePattern<PadTensorOp> {
1162-
GeneralizePadTensorOpPattern(MLIRContext *context,
1163-
OptimizeCopyFn optimizeCopyFn = nullptr,
1164-
PatternBenefit benefit = 1)
1165-
: OpRewritePattern<PadTensorOp>(context, benefit),
1161+
struct GeneralizePadOpPattern : public OpRewritePattern<tensor::PadOp> {
1162+
GeneralizePadOpPattern(MLIRContext *context,
1163+
OptimizeCopyFn optimizeCopyFn = nullptr,
1164+
PatternBenefit benefit = 1)
1165+
: OpRewritePattern<tensor::PadOp>(context, benefit),
11661166
optimizeCopyFn(std::move(optimizeCopyFn)) {}
1167-
LogicalResult matchAndRewrite(PadTensorOp padOp,
1167+
LogicalResult matchAndRewrite(tensor::PadOp padOp,
11681168
PatternRewriter &rewriter) const override;
11691169

11701170
protected:
11711171
OptimizeCopyFn optimizeCopyFn;
1172-
Value createFillOrGenerateOp(PatternRewriter &rewriter, PadTensorOp padOp,
1172+
Value createFillOrGenerateOp(PatternRewriter &rewriter, tensor::PadOp padOp,
11731173
Value dest,
11741174
const SmallVector<Value> &dynSizes) const;
11751175
};
@@ -1179,9 +1179,9 @@ struct GeneralizePadTensorOpPattern : public OpRewritePattern<PadTensorOp> {
11791179
/// are used to encode a certain ordering of pattern application. To avoid
11801180
/// scattering magic constants throughout the code base, the patterns must be
11811181
/// added with this function. `baseBenefit` can be used to offset the benefit
1182-
/// of all PadTensorOp vectorization patterns by a certain value.
1183-
void populatePadTensorOpVectorizationPatterns(RewritePatternSet &patterns,
1184-
PatternBenefit baseBenefit = 1);
1182+
/// of all tensor::PadOp vectorization patterns by a certain value.
1183+
void populatePadOpVectorizationPatterns(RewritePatternSet &patterns,
1184+
PatternBenefit baseBenefit = 1);
11851185

11861186
/// Match and rewrite for the pattern:
11871187
/// ```

mlir/include/mlir/Dialect/Linalg/Utils/Utils.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,12 @@ tensor::ExtractSliceOp makeComposedExtractSliceOp(
107107
OpBuilder &b, Location loc, Value source, ArrayRef<OpFoldResult> offsets,
108108
ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides);
109109

110-
/// Create a PadTensorOp that pads `source` to the size of the statically sized
111-
/// `type` whose static sizes are assumed to be greater than the dynamic
110+
/// Create a tensor::PadOp that pads `source` to the size of the statically
111+
/// sized `type` whose static sizes are assumed to be greater than the dynamic
112112
/// `source` size. The padding introduces trailing `pad` values until the target
113113
/// size is met. If `source` is defined by one or more LinalgOps that have been
114114
/// padded with the same value and sizes, return their padded result instead of
115-
/// creating a PadTensorOp.
115+
/// creating a tensor::PadOp.
116116
///
117117
/// Example:
118118
/// ```

mlir/include/mlir/Dialect/Tensor/IR/Tensor.h

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "mlir/Interfaces/ControlFlowInterfaces.h"
2020
#include "mlir/Interfaces/InferTypeOpInterface.h"
2121
#include "mlir/Interfaces/SideEffectInterfaces.h"
22+
#include "mlir/Interfaces/TilingInterface.h"
2223
#include "mlir/Interfaces/ViewLikeInterface.h"
2324

2425
//===----------------------------------------------------------------------===//

0 commit comments

Comments
 (0)