[Mlir-commits] [mlir] dfa96cf - [mlir][tensor] Fix ReifyRankedShapedTypeOpInterface impl. of reshape ops
Matthias Springer
llvmlistbot at llvm.org
Thu Aug 24 03:32:28 PDT 2023
Author: Matthias Springer
Date: 2023-08-24T12:23:10+02:00
New Revision: dfa96cfd7c2b86cb2379cb79e4259b8febf359ed
URL: https://github.com/llvm/llvm-project/commit/dfa96cfd7c2b86cb2379cb79e4259b8febf359ed
DIFF: https://github.com/llvm/llvm-project/commit/dfa96cfd7c2b86cb2379cb79e4259b8febf359ed.diff
LOG: [mlir][tensor] Fix ReifyRankedShapedTypeOpInterface impl. of reshape ops
`reifyResultShapes` should return an `Attribute` if and only if the respective dimension is static.
This fixes #64256.
Differential Revision: https://reviews.llvm.org/D158166
Added:
Modified:
mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
mlir/test/Dialect/Tensor/resolve-shaped-type-result-dims.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
index c9e71a820dae8b..c1358e18a5b230 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
@@ -42,6 +42,7 @@ static OpFoldResult getCollapsedOutputDimFromInputShape(
OpBuilder &builder, Location loc, int64_t dimIndex, Value src,
ArrayRef<int64_t> dstStaticShape, ArrayRef<AffineMap> reassociationMap) {
if (!ShapedType::isDynamic(dstStaticShape[dimIndex])) {
+ // Static dimension: return Attribute.
return builder.getIndexAttr(dstStaticShape[dimIndex]);
}
AffineMap map = reassociationMap[dimIndex];
@@ -55,9 +56,12 @@ static OpFoldResult getCollapsedOutputDimFromInputShape(
AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos);
expr = (expr ? expr * currExpr : currExpr);
}
- return affine::makeComposedFoldedAffineApply(
- builder, loc, AffineMap::get(0, endPos - startPos + 1, expr),
- dynamicDims);
+
+ // Dynamic dimension: return Value.
+ return affine::makeComposedAffineApply(
+ builder, loc, AffineMap::get(0, endPos - startPos + 1, expr),
+ dynamicDims)
+ ->getResult(0);
}
/// Given the `src` of a collapsing reshape op and its reassociation maps,
@@ -79,6 +83,7 @@ static OpFoldResult getExpandedOutputDimFromInputShape(
ArrayRef<int64_t> dstStaticShape, ArrayRef<AffineMap> reassociation,
llvm::DenseMap<int64_t, int64_t> &expandedDimToCollapsedDim) {
if (!ShapedType::isDynamic(dstStaticShape[dimIndex])) {
+ // Static dimension: return Attribute.
return builder.getIndexAttr(dstStaticShape[dimIndex]);
}
unsigned sourceDimPos = expandedDimToCollapsedDim[dimIndex];
@@ -104,11 +109,15 @@ static OpFoldResult getExpandedOutputDimFromInputShape(
}
OpFoldResult sourceDim =
builder.create<tensor::DimOp>(loc, src, sourceDimPos).getResult();
- return affine::makeComposedFoldedAffineApply(
- builder, loc,
- AffineMap::get(
- 0, 1, builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)),
- sourceDim);
+
+ // Dynamic dimension: return Value.
+ return affine::makeComposedAffineApply(
+ builder, loc,
+ AffineMap::get(
+ 0, 1,
+ builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)),
+ sourceDim)
+ ->getResult(0);
}
/// Given the `src` of an expanding reshape op, the reassociation maps and the
diff --git a/mlir/test/Dialect/Tensor/resolve-shaped-type-result-dims.mlir b/mlir/test/Dialect/Tensor/resolve-shaped-type-result-dims.mlir
index f031d935c43e71..1676c146773775 100644
--- a/mlir/test/Dialect/Tensor/resolve-shaped-type-result-dims.mlir
+++ b/mlir/test/Dialect/Tensor/resolve-shaped-type-result-dims.mlir
@@ -142,3 +142,21 @@ func.func @extract_slice_rank_reduced_6(%arg0 : tensor<?x?x?xf32>, %arg1 : index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index
// CHECK: return %[[ARG1]], %[[ARG2]]
+
+// -----
+
+func.func @collapse_shape() -> index {
+ %c0 = arith.constant 0 : index
+ %c7 = arith.constant 7 : index
+ %c1_i16 = arith.constant 1 : i16
+ %generated = tensor.generate %c7 {
+ ^bb0(%arg3: index, %arg4: index):
+ tensor.yield %c1_i16 : i16
+ } : tensor<?x22xi16>
+ %collapsed = tensor.collapse_shape %generated [[0, 1]] : tensor<?x22xi16> into tensor<?xi16>
+ %d0 = tensor.dim %collapsed, %c0 : tensor<?xi16>
+ return %d0 : index
+}
+// CHECK-LABEL: func @collapse_shape(
+// CHECK: %[[c154:.*]] = arith.constant 154 : index
+// CHECK: return %[[c154]]
More information about the Mlir-commits
mailing list