[Mlir-commits] [mlir] 42444d0 - [mlir][Linalg] NFC: Verify tiling on linalg.generic operation on tensors.
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Jan 14 16:17:31 PST 2021
Author: MaheshRavishankar
Date: 2021-01-14T16:17:08-08:00
New Revision: 42444d0cf0c9cf92f89acf16f11f3b7242d81619
URL: https://github.com/llvm/llvm-project/commit/42444d0cf0c9cf92f89acf16f11f3b7242d81619
DIFF: https://github.com/llvm/llvm-project/commit/42444d0cf0c9cf92f89acf16f11f3b7242d81619.diff
LOG: [mlir][Linalg] NFC: Verify tiling on linalg.generic operation on tensors.
With the recent changes to linalg on tensor semantics, the tiling
operations works out-of-the-box for generic operations. Add a test to
verify that and some minor refactoring.
Differential Revision: https://reviews.llvm.org/D93077
Added:
Modified:
mlir/include/mlir/IR/AffineMap.h
mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
mlir/test/Dialect/Linalg/tile-tensors.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/IR/AffineMap.h b/mlir/include/mlir/IR/AffineMap.h
index f1f267ff0fc2..4e50a9599767 100644
--- a/mlir/include/mlir/IR/AffineMap.h
+++ b/mlir/include/mlir/IR/AffineMap.h
@@ -327,6 +327,21 @@ AffineMap inversePermutation(AffineMap map);
/// ```
AffineMap concatAffineMaps(ArrayRef<AffineMap> maps);
+/// Returns the map that results from projecting out the dimensions specified in
+/// `projectedDimensions`. The projected dimensions are set to 0.
+///
+/// Example:
+/// 1) map : affine_map<(d0, d1, d2) -> (d0, d1)>
+/// projected_dimensions : {2}
+/// result : affine_map<(d0, d1) -> (d0, d1)>
+///
+/// 2) map : affine_map<(d0, d1) -> (d0 + d1)>
+/// projected_dimensions : {1}
+/// result : affine_map<(d0) -> (d0)>
+///
+/// 3) map : affine_map<(d0, d1, d2) -> (d0, d1)>
+/// projected_dimensions : {1}
+/// result : affine_map<(d0, d1) -> (d0, 0)>
AffineMap getProjectedMap(AffineMap map,
ArrayRef<unsigned> projectedDimensions);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index f323d2e50435..eb8c9bb6a6fc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -221,9 +221,8 @@ static bool isTiled(AffineMap map, ValueRange tileSizes) {
static SmallVector<Value, 4>
makeTiledShapes(OpBuilder &b, Location loc, LinalgOp linalgOp,
- ValueRange operands, AffineMap map, ValueRange ivs,
+ ArrayRef<Value> tiledOperands, AffineMap map, ValueRange ivs,
ValueRange tileSizes, ValueRange allShapeSizes) {
- assert(operands.size() == linalgOp.getShapedOperands().size());
assert(ivs.size() == static_cast<size_t>(llvm::count_if(
llvm::make_range(tileSizes.begin(), tileSizes.end()),
[](Value v) { return !isZero(v); })) &&
@@ -243,11 +242,9 @@ makeTiledShapes(OpBuilder &b, Location loc, LinalgOp linalgOp,
subShapeSizes.push_back(size - std_constant_index(1));
}
- auto *op = linalgOp.getOperation();
-
SmallVector<Value, 4> res;
- res.reserve(op->getNumOperands());
- for (auto en : llvm::enumerate(operands)) {
+ res.reserve(tiledOperands.size());
+ for (auto en : llvm::enumerate(tiledOperands)) {
Value shapedOp = en.value();
ShapedType shapedType = shapedOp.getType().cast<ShapedType>();
unsigned rank = shapedType.getRank();
@@ -342,6 +339,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges(
b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
+
SmallVector<Attribute, 4> iteratorTypes;
for (auto attr :
enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
@@ -574,10 +572,10 @@ void mlir::linalg::populateLinalgTilingCanonicalizationPatterns(
static void insertTilingPatterns(OwningRewritePatternList &patterns,
const LinalgTilingOptions &options,
MLIRContext *ctx) {
- RewritePatternList<
+ RewritePatternList<GenericOp, IndexedGenericOp,
#define GET_OP_LIST
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
- >::insert(patterns, options, ctx);
+ >::insert(patterns, options, ctx);
}
static void applyTilingToLoopPatterns(LinalgTilingLoopType loopType,
diff --git a/mlir/test/Dialect/Linalg/tile-tensors.mlir b/mlir/test/Dialect/Linalg/tile-tensors.mlir
index 787ea8d2b395..f52d7fefa8be 100644
--- a/mlir/test/Dialect/Linalg/tile-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-tensors.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" | FileCheck %s
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -split-input-file | FileCheck %s
// CHECK-LABEL: func @matmul_tensors(
// CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor<?x?xf32>
@@ -26,3 +26,97 @@ func @matmul_tensors(
// CHECK: return %[[TD0]] : tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
+
+// -----
+
+func @generic_op_tensors(
+ %arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
+ %c0 = constant 0 : index
+ %c1 = constant 1 : index
+ %c2 = constant 2 : index
+ %0 = dim %arg0, %c0 : tensor<?x?x?xf32>
+ %1 = dim %arg0, %c1 : tensor<?x?x?xf32>
+ %2 = dim %arg0, %c2 : tensor<?x?x?xf32>
+ %3 = linalg.init_tensor [%0, %1, %2] : tensor<?x?x?xf32>
+ %4 = linalg.generic
+ {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
+ affine_map<(d0, d1, d2) -> (d0, d2, d1)>,
+ affine_map<(d0, d1, d2) -> (d2, d1, d0)>],
+ iterator_types = ["parallel", "parallel", "parallel"]}
+ ins(%arg0, %arg1 : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
+ outs(%3 : tensor<?x?x?xf32>) {
+ ^bb0(%arg2 : f32, %arg3: f32, %arg4: f32):
+ %5 = addf %arg2, %arg3 : f32
+ linalg.yield %5 : f32
+ } -> tensor<?x?x?xf32>
+ return %4 : tensor<?x?x?xf32>
+}
+
+// CHECK-LABEL: func @generic_op_tensors
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK: %[[INIT:.+]] = linalg.init_tensor
+// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor<?x?x?xf32>) {
+// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor<?x?x?xf32>) {
+// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor<?x?x?xf32>) {
+// CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
+// CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
+// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
+// CHECK: %[[STRETURN:.+]] = linalg.generic
+// CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
+// CHECK-SAME: outs(%[[STARG2]] : tensor<?x?x?xf32>)
+// CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]]
+// CHECK: scf.yield %[[TD]]
+// CHECK: }
+// CHECK: scf.yield %[[TD2]]
+// CHECK: }
+// CHECK: scf.yield %[[TD1]]
+// CHECK: }
+// CHECK: return %[[TD0]]
+
+// -----
+
+func @indexed_generic_op_tensors(
+ %arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
+ %c0 = constant 0 : index
+ %c1 = constant 1 : index
+ %c2 = constant 2 : index
+ %0 = dim %arg0, %c0 : tensor<?x?x?xf32>
+ %1 = dim %arg0, %c1 : tensor<?x?x?xf32>
+ %2 = dim %arg0, %c2 : tensor<?x?x?xf32>
+ %3 = linalg.init_tensor [%0, %1, %2] : tensor<?x?x?xf32>
+ %4 = linalg.indexed_generic
+ {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
+ affine_map<(d0, d1, d2) -> (d0, d2, d1)>,
+ affine_map<(d0, d1, d2) -> (d2, d1, d0)>],
+ iterator_types = ["parallel", "parallel", "parallel"]}
+ ins(%arg0, %arg1 : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
+ outs(%3 : tensor<?x?x?xf32>) {
+ ^bb0(%arg2 : index, %arg3 : index, %arg4 : index, %arg5 : f32, %arg6: f32, %arg7: f32):
+ %5 = addf %arg5, %arg6 : f32
+ linalg.yield %5 : f32
+ } -> tensor<?x?x?xf32>
+ return %4 : tensor<?x?x?xf32>
+}
+
+// CHECK-LABEL: func @indexed_generic_op_tensors
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK: %[[INIT:.+]] = linalg.init_tensor
+// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor<?x?x?xf32>) {
+// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor<?x?x?xf32>) {
+// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor<?x?x?xf32>) {
+// CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
+// CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
+// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
+// CHECK: %[[STRETURN:.+]] = linalg.indexed_generic
+// CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
+// CHECK-SAME: outs(%[[STARG2]] : tensor<?x?x?xf32>)
+// CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]]
+// CHECK: scf.yield %[[TD]]
+// CHECK: }
+// CHECK: scf.yield %[[TD2]]
+// CHECK: }
+// CHECK: scf.yield %[[TD1]]
+// CHECK: }
+// CHECK: return %[[TD0]]
More information about the Mlir-commits
mailing list