[Mlir-commits] [mlir] 8b78c50 - [mlir] Fix incorrect indexing of subview in DimOp folding.
Nicolas Vasilache
llvmlistbot at llvm.org
Fri May 15 10:52:15 PDT 2020
Author: Nicolas Vasilache
Date: 2020-05-15T13:50:40-04:00
New Revision: 8b78c50e82db0a21c0c9e3ca9635625f29889ea6
URL: https://github.com/llvm/llvm-project/commit/8b78c50e82db0a21c0c9e3ca9635625f29889ea6
DIFF: https://github.com/llvm/llvm-project/commit/8b78c50e82db0a21c0c9e3ca9635625f29889ea6.diff
LOG: [mlir] Fix incorrect indexing of subview in DimOp folding.
DimOp folding is using bare accesses to underlying SubViewOp operands.
This is generally incorrect and is fixed in this revision.
Differential Revision: https://reviews.llvm.org/D80017
Added:
Modified:
mlir/lib/Dialect/StandardOps/IR/Ops.cpp
mlir/test/Transforms/canonicalize.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 9cd97c3b337e..7fc598a36421 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -1315,21 +1315,17 @@ static LogicalResult verify(DimOp op) {
OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
// Constant fold dim when the size along the index referred to is a constant.
auto opType = memrefOrTensor().getType();
- int64_t dimSize = ShapedType::kDynamicSize;
- if (auto tensorType = opType.dyn_cast<RankedTensorType>())
- dimSize = tensorType.getShape()[getIndex()];
- else if (auto memrefType = opType.dyn_cast<MemRefType>())
- dimSize = memrefType.getShape()[getIndex()];
-
- if (!ShapedType::isDynamic(dimSize))
- return IntegerAttr::get(IndexType::get(getContext()), dimSize);
+ if (auto shapedType = opType.dyn_cast<ShapedType>())
+ if (!shapedType.isDynamicDim(getIndex()))
+ return IntegerAttr::get(IndexType::get(getContext()),
+ shapedType.getShape()[getIndex()]);
// Fold dim to the size argument for an AllocOp/ViewOp/SubViewOp.
auto memrefType = opType.dyn_cast<MemRefType>();
if (!memrefType)
return {};
- // The size at getIndex() is now a dynamic size of a memref.
+ // The size at getIndex() is now known to be a dynamic size of a memref.
auto memref = memrefOrTensor().getDefiningOp();
if (auto alloc = dyn_cast_or_null<AllocOp>(memref))
return *(alloc.getDynamicSizes().begin() +
@@ -1339,11 +1335,10 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
return *(view.getDynamicSizes().begin() +
memrefType.getDynamicDimIndex(getIndex()));
- // The subview op here is expected to have rank dynamic sizes now.
if (auto subview = dyn_cast_or_null<SubViewOp>(memref)) {
- auto sizes = subview.sizes();
- if (!sizes.empty())
- return *(sizes.begin() + getIndex());
+ assert(subview.isDynamicSize(getIndex()) &&
+ "Expected dynamic subview size");
+ return subview.getDynamicSize(getIndex());
}
/// dim(memrefcast) -> dim
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index eb768e1e3b4b..b17cade291a5 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -429,8 +429,13 @@ func @dyn_shape_fold(%L : index, %M : index) -> (memref<? x ? x i32>, memref<? x
#map1 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
#map2 = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s2 + d1 * s1 + d2 + s0)>
+#map3 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-LABEL: func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index,
+// CHECK-LABEL: func @dim_op_fold(
+// CHECK-SAME: %[[ARG0:[a-z0-9]*]]: index
+// CHECK-SAME: %[[ARG1:[a-z0-9]*]]: index
+// CHECK-SAME: %[[ARG2:[a-z0-9]*]]: index
+// CHECK-SAME: %[[BUF:[a-z0-9]*]]: memref<?xi8>
func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>, %M : index, %N : index, %K : index) {
// CHECK-SAME: [[M:arg[0-9]+]]: index
// CHECK-SAME: [[N:arg[0-9]+]]: index
@@ -452,11 +457,20 @@ func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>,
affine.for %arg5 = %l to %u {
"foo"() : () -> ()
}
+ %sv2 = subview %0[0, 0][17, %arg4][1, 1] : memref<?x?xf32> to memref<17x?xf32, #map3>
+ %l2 = dim %v, 1 : memref<?x?xf32>
+ %u2 = dim %sv2, 1 : memref<17x?xf32, #map3>
+ scf.for %arg5 = %l2 to %u2 step %c1 {
+ "foo"() : () -> ()
+ }
}
}
- // CHECK-NEXT: affine.for %arg7 = 0 to %arg2 {
- // CHECK-NEXT: affine.for %arg8 = 0 to %arg0 {
- // CHECK-NEXT: affine.for %arg9 = %arg0 to %arg0 {
+ // CHECK: affine.for %[[I:.*]] = 0 to %[[ARG2]] {
+ // CHECK-NEXT: affine.for %[[J:.*]] = 0 to %[[ARG0]] {
+ // CHECK-NEXT: affine.for %[[K:.*]] = %[[ARG0]] to %[[ARG0]] {
+ // CHECK-NEXT: "foo"() : () -> ()
+ // CHECK-NEXT: }
+ // CHECK-NEXT: scf.for %[[KK:.*]] = %[[ARG0]] to %[[J]] step %{{.*}} {
// CHECK-NEXT: "foo"() : () -> ()
// CHECK-NEXT: }
// CHECK-NEXT: }
More information about the Mlir-commits
mailing list