[Mlir-commits] [mlir] 73c0333 - [mlir][tensor][bufferize] Support 0-d collapse_shape with offset
Matthias Springer
llvmlistbot at llvm.org
Fri Apr 1 06:40:30 PDT 2022
Author: Matthias Springer
Date: 2022-04-01T22:30:37+09:00
New Revision: 73c0333deefc4e1d5981a4b76cc9df79a2877727
URL: https://github.com/llvm/llvm-project/commit/73c0333deefc4e1d5981a4b76cc9df79a2877727
DIFF: https://github.com/llvm/llvm-project/commit/73c0333deefc4e1d5981a4b76cc9df79a2877727.diff
LOG: [mlir][tensor][bufferize] Support 0-d collapse_shape with offset
Differential Revision: https://reviews.llvm.org/D122901
Added:
Modified:
mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/test/Dialect/Tensor/bufferize.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index 6f86270e5adda..01d8da85ce962 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -115,12 +115,27 @@ struct CollapseShapeOpInterface
if (tensorResultType.getRank() == 0) {
// 0-d collapses must go through a
diff erent op builder.
auto bufferType = buffer.getType().cast<MemRefType>();
- // Assume identity layout: No offset.
- assert(bufferType.getLayout().isIdentity() &&
- "non-zero offset for 0-d collapse not supported");
- MemRefLayoutAttrInterface layout;
- auto resultType = MemRefType::get({}, tensorResultType.getElementType(),
- layout, bufferType.getMemorySpace());
+ MemRefType resultType;
+
+ if (bufferType.getLayout().isIdentity()) {
+ // Standard layout: result type has no offset.
+ MemRefLayoutAttrInterface layout;
+ resultType = MemRefType::get({}, tensorResultType.getElementType(),
+ layout, bufferType.getMemorySpace());
+ } else {
+ // Source memref has a layout map: result type has the same offset as
+ // the source type.
+ SmallVector<int64_t> strides;
+ int64_t offset;
+ if (failed(getStridesAndOffset(bufferType, strides, offset)))
+ return failure();
+ AffineMap resultLayout =
+ makeStridedLinearLayoutMap({}, offset, op->getContext());
+ resultType =
+ MemRefType::get({}, tensorResultType.getElementType(), resultLayout,
+ bufferType.getMemorySpaceAsInt());
+ }
+
replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
rewriter, op, resultType, buffer, collapseShapeOp.reassociation());
return success();
diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir
index 5fa8e3f8a2a46..2178d9d3fa4fc 100644
--- a/mlir/test/Dialect/Tensor/bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/bufferize.mlir
@@ -3,6 +3,8 @@
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 20 + s0 + d1)>
// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 140 + d1 * 20 + d2 * 5 + d3 + s0)>
+// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0) -> (d0 + 1)>
+// CHECK-DAG: #[[$MAP4:.*]] = affine_map<() -> (1)>
// CHECK-LABEL: func @dim(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
@@ -361,3 +363,12 @@ func @tensor.collapse_shape_to_scalar(%t1: tensor<1x1x1xf32>) -> tensor<f32> {
// CHECK: return %[[r]]
return %0 : tensor<f32>
}
+
+// CHECK-LABEL: func @tensor.collapse_shape_of_slice(
+func @tensor.collapse_shape_of_slice(%arg0: tensor<2xi32>) -> tensor<i32> {
+ // CHECK: memref.subview %{{.*}}[1] [1] [1] : memref<2xi32> to memref<1xi32, #[[$MAP3]]>
+ %0 = tensor.extract_slice %arg0[1] [1] [1] : tensor<2xi32> to tensor<1xi32>
+ // CHECK: memref.collapse_shape %{{.*}} [] : memref<1xi32, #[[$MAP3]]> into memref<i32, #[[$MAP4]]>
+ %1 = tensor.collapse_shape %0 [] : tensor<1xi32> into tensor<i32>
+ return %1 : tensor<i32>
+}
More information about the Mlir-commits
mailing list