[Mlir-commits] [mlir] [MLIR] Fix canonicalization pattern for 'shape.shape_of' (PR #134234)

Alaa Ali llvmlistbot at llvm.org
Thu Apr 3 13:52:24 PDT 2025


https://github.com/alaa-ali updated https://github.com/llvm/llvm-project/pull/134234

>From 75de3afe3720c7c4f1c2ae4f484dfa9b9467925a Mon Sep 17 00:00:00 2001
From: Alaa Ali <alaaali at ah-alaaali-l.dhcp.mathworks.com>
Date: Thu, 3 Apr 2025 07:27:25 -0400
Subject: [PATCH 1/2] Fix canonicalization pattern for shape.shape_of

---
 mlir/lib/Dialect/Shape/IR/Shape.cpp       | 18 ++++++++++---
 mlir/test/Dialect/Shape/canonicalize.mlir | 33 +++++++++++++++++++++--
 2 files changed, 46 insertions(+), 5 deletions(-)

diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 10ba808cd26c2..b8eac7c86797b 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -1734,10 +1734,22 @@ struct ShapeOfFromReshape : public OpRewritePattern<shape::ShapeOfOp> {
     // Operand 'shape' of 'tensor.reshape' may now be used as the result of
     // 'shape.shape_of'. While its type is guaranteed to be compatible in well-
     // formed IR, it may not be identical (dynamically vs statically shaped),
-    // in which case it needs to be cast first.
+    // in which case it needs to be cast first using 'tensor.cast'.
+    // Additionally, it may not have identical element type (i32 vs index) 
+    // while it has identical shaped type (dynamic vs static), in which case it needs 
+    // to be cast first using 'arith.index_cast'.
+    // Note: 'shape.shape_of' op result must be shape or extent tensor.
     Value shape = tensorReshapeOp.getShape();
-    if (op.getType() != shape.getType())
-      shape = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(), shape);
+
+    auto opTensorType = llvm::dyn_cast<RankedTensorType>(op.getType());
+    auto shapeTensorType = llvm::dyn_cast<RankedTensorType>(shape.getType());
+
+    if (op.getType() != shape.getType()) {
+        if (opTensorType.getElementType() == shapeTensorType.getElementType())
+          shape = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(), shape);        
+        else if (!isExtentTensorType(shape.getType()))
+          shape = rewriter.create<arith::IndexCastOp>(op.getLoc(), op.getType(), shape);        
+    }
 
     rewriter.replaceOp(op, shape);
     return success();
diff --git a/mlir/test/Dialect/Shape/canonicalize.mlir b/mlir/test/Dialect/Shape/canonicalize.mlir
index cf439c9c1b854..9b25468b3ab1e 100644
--- a/mlir/test/Dialect/Shape/canonicalize.mlir
+++ b/mlir/test/Dialect/Shape/canonicalize.mlir
@@ -1389,10 +1389,25 @@ func.func @shape_of_from_reshape(%arg0: tensor<*xf32>, %arg1: tensor<?xindex>) -
 
 // -----
 
-// CHECK-LABEL: func @shape_of_from_reshape_compatible_types
+// Check statically shaped types, with element types i32 to index.
+// CHECK-LABEL: func @shape_of_from_reshape_compatible_types1
+// CHECK-SAME: %[[INPUT:.*]]: tensor<?x1xf32>
+// CHECK-SAME: %[[SHAPE:.*]]: tensor<3xi32>
+func.func @shape_of_from_reshape_compatible_types1(%arg0: tensor<?x1xf32>, %arg1: tensor<3xi32>) -> tensor<3xindex> {
+  // CHECK: %[[CAST_SHAPE:.*]] = arith.index_cast %[[SHAPE]] : tensor<3xi32> to tensor<3xindex>
+  // CHECK: return %[[CAST_SHAPE]] : tensor<3xindex>
+    %0 = tensor.reshape %arg0(%arg1) : (tensor<?x1xf32>, tensor<3xi32>) -> tensor<?x1x1xf32>
+    %1 = shape.shape_of %0 : tensor<?x1x1xf32> -> tensor<3xindex>
+    return %1 : tensor<3xindex>
+}
+
+// -----
+
+// Check similar element types, with statically shaped to dynamically shaped.
+// CHECK-LABEL: func @shape_of_from_reshape_compatible_types2
 // CHECK-SAME: %[[INPUT:.*]]: tensor<*xf32>
 // CHECK-SAME: %[[SHAPE:.*]]: tensor<5xindex>
-func.func @shape_of_from_reshape_compatible_types(%arg0: tensor<*xf32>, %arg1: tensor<5xindex>) -> tensor<?xindex> {
+func.func @shape_of_from_reshape_compatible_types2(%arg0: tensor<*xf32>, %arg1: tensor<5xindex>) -> tensor<?xindex> {
   // CHECK: %[[CAST_SHAPE:.*]] = tensor.cast %[[SHAPE]] : tensor<5xindex> to tensor<?xindex>
   // CHECK: return %[[CAST_SHAPE]] : tensor<?xindex>
   %0 = tensor.reshape %arg0(%arg1) : (tensor<*xf32>, tensor<5xindex>) -> tensor<*xf32>
@@ -1402,6 +1417,20 @@ func.func @shape_of_from_reshape_compatible_types(%arg0: tensor<*xf32>, %arg1: t
 
 // -----
 
+// Check similar element types, with dynamically shaped to statically shaped.
+// CHECK-LABEL: func @shape_of_from_reshape_compatible_types3
+// CHECK-SAME: %[[INPUT:.*]]: tensor<*xf32>
+// CHECK-SAME: %[[SHAPE:.*]]: tensor<?xindex>
+func.func @shape_of_from_reshape_compatible_types3(%arg0: tensor<*xf32>, %arg1: tensor<?xindex>) -> tensor<5xindex> {
+  // CHECK: %[[CAST_SHAPE:.*]] = tensor.cast %[[SHAPE]] : tensor<?xindex> to tensor<5xindex>
+  // CHECK: return %[[CAST_SHAPE]] : tensor<5xindex>
+  %0 = tensor.reshape %arg0(%arg1) : (tensor<*xf32>, tensor<?xindex>) -> tensor<*xf32>
+  %1 = shape.shape_of %0 : tensor<*xf32> -> tensor<5xindex>
+  return %1 : tensor<5xindex>
+}
+
+// -----
+
 // CHECK-LABEL: func @shape_of_from_reshape_nofold
 // CHECK-SAME: %[[INPUT:.*]]: tensor<*xf32>
 // CHECK-SAME: %[[SHAPE:.*]]: tensor<?xindex>

>From 394735f79035ae8586521302b1b89fc99462d26d Mon Sep 17 00:00:00 2001
From: Alaa Ali <alaaali at ah-alaaali-l.dhcp.mathworks.com>
Date: Thu, 3 Apr 2025 08:34:15 -0400
Subject: [PATCH 2/2] dyn_cast check

---
 mlir/lib/Dialect/Shape/IR/Shape.cpp | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index b8eac7c86797b..f9302256eefe2 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -1741,11 +1741,13 @@ struct ShapeOfFromReshape : public OpRewritePattern<shape::ShapeOfOp> {
     // Note: 'shape.shape_of' op result must be shape or extent tensor.
     Value shape = tensorReshapeOp.getShape();
 
-    auto opTensorType = llvm::dyn_cast<RankedTensorType>(op.getType());
-    auto shapeTensorType = llvm::dyn_cast<RankedTensorType>(shape.getType());
+    auto opTensorTy = llvm::dyn_cast<RankedTensorType>(op.getType());
+    auto shapeTensorTy = llvm::dyn_cast<RankedTensorType>(shape.getType());
+    if (!opTensorTy || !shapeTensorTy)
+      return failure();
 
     if (op.getType() != shape.getType()) {
-        if (opTensorType.getElementType() == shapeTensorType.getElementType())
+        if (opTensorTy.getElementType() == shapeTensorTy.getElementType())
           shape = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(), shape);        
         else if (!isExtentTensorType(shape.getType()))
           shape = rewriter.create<arith::IndexCastOp>(op.getLoc(), op.getType(), shape);        



More information about the Mlir-commits mailing list