[Mlir-commits] [mlir] [mlir][tensor] Restrict the verifier for tensor.pack/tensor.unpack (PR #113108)

Andrzej WarzyƄski llvmlistbot at llvm.org
Tue Oct 22 14:40:22 PDT 2024


https://github.com/banach-space updated https://github.com/llvm/llvm-project/pull/113108

>From 714250b66e97a3cc1b8c725d7aad56c10064c226 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Sun, 20 Oct 2024 16:51:34 -0700
Subject: [PATCH 1/3] [mlir][tensor] Restrict the verifier for
 tensor.pack/tensor.unpack

Restricts the verifier for tensor.pack and tensor.unpack Ops so that the
following is no longer allowed:

```mlir
  %c8 = arith.constant 8 : index
  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, %c8] into %output : tensor<?x?xf32> -> tensor<?x?x8x8xf32>
```

Specifically, in line with other Tensor Ops, require:
  * a dynamic dimensions for each (dynamic) SSA value,
  * a static dimension for each static size (attribute).

In the example above, a static dimension (8) is mixed with a dynamic
size (%c8).

Note that this is mostly deleting existing code - that's because this
change simplifies the logic in verifier.

For more context:
  * https://discourse.llvm.org/t/tensor-ops-with-dynamic-sizes-which-behaviour-is-more-correct
---
 mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 21 +++++--------
 mlir/test/Dialect/Tensor/invalid.mlir    | 38 ++++++++++++++++++++++++
 2 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 4d6c5965c4fcc3..60a04152848d88 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -3865,22 +3865,15 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
           llvm::zip(packedType.getShape().take_back(mixedTiles.size()),
                     mixedTiles),
           [](std::tuple<int64_t, OpFoldResult> it) {
-            std::optional<int64_t> constTileSize =
-                getConstantIntValue(std::get<1>(it));
             int64_t shape = std::get<0>(it);
-            if (!constTileSize) {
-              // If specified tile size is dynamic, output shape should
-              // be dynamic too.
-              return ShapedType::isDynamic(shape);
+            if (Attribute attr =
+                    llvm::dyn_cast_if_present<Attribute>(std::get<1>(it))) {
+              if (IntegerAttr intAttr = dyn_cast_or_null<IntegerAttr>(attr)) {
+                int64_t staticTileSize = intAttr.getValue().getSExtValue();
+                return shape == staticTileSize;
+              }
             }
-            if (ShapedType::isDynamic(shape)) {
-              // For the shape being dynamic when tile size is
-              // specified, return true. In canonical form a constant
-              // tile size should lead to constant shape of the tiled
-              // dimension, but not needed for verification.
-              return true;
-            }
-            return shape == constTileSize.value();
+            return ShapedType::isDynamic(shape);
           })) {
     return op->emitError("mismatch in inner tile sizes specified and shaped of "
                          "tiled dimension in the packed type");
diff --git a/mlir/test/Dialect/Tensor/invalid.mlir b/mlir/test/Dialect/Tensor/invalid.mlir
index 921d7f9f1fefc3..be470ce2af9b31 100644
--- a/mlir/test/Dialect/Tensor/invalid.mlir
+++ b/mlir/test/Dialect/Tensor/invalid.mlir
@@ -755,9 +755,47 @@ func.func @pack_mismatch_inner_tile_size_and_output_shape(
 
 // -----
 
+func.func @pack_dynamic_inner_tile_size_and_static_output_shape(
+  %input : tensor<?x?xf32>, %output : tensor<?x?x8x8xf32>) -> tensor<?x?x8x8xf32> {
+  %c8 = arith.constant 8 : index
+  // expected-error at +1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
+  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, %c8] into %output : tensor<?x?xf32> -> tensor<?x?x8x8xf32>
+  return %0 : tensor<?x?x8x8xf32>
+}
+
+// -----
+
+func.func @pack_static_inner_tile_size_and_dynamic_output_shape(
+  %input : tensor<?x?xf32>, %output : tensor<?x?x8x?xf32>) -> tensor<?x?x8x?xf32> {
+  // expected-error at +1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
+  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %output : tensor<?x?xf32> -> tensor<?x?x8x?xf32>
+  return %0 : tensor<?x?x8x?xf32>
+}
+
+// -----
+
 func.func @unpack_mismatch_inner_tile_size_and_output_shape(
   %input : tensor<?x?x8x8xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
   // expected-error at +1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
   %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?x8x8xf32> -> tensor<?x?xf32>
   return %0 : tensor<?x?xf32>
 }
+
+// -----
+
+func.func @unpack_dynamic_inner_tile_size_and_static_output_shape(
+  %input : tensor<?x?x8x4xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
+  %c8 = arith.constant 8 : index
+  // expected-error at +1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
+  %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [%c8, 4] into %output : tensor<?x?x8x4xf32> -> tensor<?x?xf32>
+  return %0 : tensor<?x?xf32>
+}
+
+// -----
+
+func.func @unpack_static_inner_tile_size_and_dynamic_output_shape(
+  %input : tensor<?x?x?x4xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
+  // expected-error at +1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
+  %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?x?x4xf32> -> tensor<?x?xf32>
+  return %0 : tensor<?x?xf32>
+}

>From 73554ee08601235a7d9ec70e1ea81db6f3dfa3c4 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Sun, 20 Oct 2024 21:16:07 -0700
Subject: [PATCH 2/3] fixup! [mlir][tensor] Restrict the verifier for
 tensor.pack/tensor.unpack

Update tests
---
 .../Dialect/Linalg/transform-lower-pack.mlir  |  8 ++++----
 mlir/test/Dialect/Tensor/fold-empty-op.mlir   | 20 +++++++++----------
 2 files changed, 13 insertions(+), 15 deletions(-)

diff --git a/mlir/test/Dialect/Linalg/transform-lower-pack.mlir b/mlir/test/Dialect/Linalg/transform-lower-pack.mlir
index 48bf1c151de8f5..7aadf190695630 100644
--- a/mlir/test/Dialect/Linalg/transform-lower-pack.mlir
+++ b/mlir/test/Dialect/Linalg/transform-lower-pack.mlir
@@ -586,7 +586,7 @@ module attributes {transform.with_named_sequence} {
 
 // Check that we can lower unpack "as unpad" with dynamic dims.
 // CHECK-LABEL: func.func @unpack_as_pad_dynamic(
-// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x1x?x?x?x?xf32>, %[[ARG1:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x1x136x64x16x16xf32>, %[[ARG1:.*]]: tensor<?x?x?x?xf32>
 //      CHECK-DAG:  %[[C0:.*]] = arith.constant 0 : index
 //      CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //      CHECK-DAG:  %[[C2:.*]] = arith.constant 2 : index
@@ -602,10 +602,10 @@ module attributes {transform.with_named_sequence} {
 // CHECK-SAME:   [1, 1, 1, 1, %[[DIM0]], %[[DIM1]], %[[DIM2]], %[[DIM3]]]
 // strides multiplers.
 // CHECK-SAME:   [1, 1, 1, 1, 1, 1, 1, 1]
-// CHECK-SAME:   : tensor<1x1x1x1x?x?x?x?xf32> to tensor<?x?x?x?xf32>
-func.func @unpack_as_pad_dynamic(%arg0: tensor<1x1x1x1x?x?x?x?xf32>, %arg1: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
+// CHECK-SAME:   :  tensor<1x1x1x1x136x64x16x16xf32> to tensor<?x?x?x?xf32>
+func.func @unpack_as_pad_dynamic(%arg0: tensor<1x1x1x1x136x64x16x16xf32>, %arg1: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
   %pack = tensor.unpack %arg0 inner_dims_pos = [0, 1, 2, 3] inner_tiles = [136, 64, 16, 16] into %arg1
-    : tensor<1x1x1x1x?x?x?x?xf32> -> tensor<?x?x?x?xf32>
+    : tensor<1x1x1x1x136x64x16x16xf32> -> tensor<?x?x?x?xf32>
   return %pack : tensor<?x?x?x?xf32>
 }
 
diff --git a/mlir/test/Dialect/Tensor/fold-empty-op.mlir b/mlir/test/Dialect/Tensor/fold-empty-op.mlir
index 5beb8c250aa105..65ceb4ff3e3df4 100644
--- a/mlir/test/Dialect/Tensor/fold-empty-op.mlir
+++ b/mlir/test/Dialect/Tensor/fold-empty-op.mlir
@@ -77,20 +77,20 @@ func.func @pack_empty(%arg0: tensor<8x8x32x32xf32>) -> tensor<8x8x32x32xf32> {
 // CHECK-NOT:    tensor.pack
 // CHECK:        return %[[T]] : tensor<8x8x32x32xf32>
 
-func.func @pack_empty_dynamic(%arg0: tensor<?x?x?x?xf32>, %dim0: index, %dim1: index) -> tensor<?x?x?x?xf32> {
+func.func @pack_empty_dynamic(%arg0: tensor<?x?x32x32xf32>, %dim0: index, %dim1: index) -> tensor<?x?x32x32xf32> {
   %empty_unpacked = tensor.empty(%dim0, %dim1) : tensor<?x?xf32>
   %packed = tensor.pack %empty_unpacked
     inner_dims_pos = [0, 1] inner_tiles = [32, 32]
-    into %arg0 : tensor<?x?xf32> -> tensor<?x?x?x?xf32>
-  return %packed : tensor<?x?x?x?xf32>
+    into %arg0 : tensor<?x?xf32> -> tensor<?x?x32x32xf32>
+  return %packed : tensor<?x?x32x32xf32>
 }
 
 // CHECK-LABEL: func.func @pack_empty_dynamic(
-// CHECK-SAME:   %[[T:.+]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME:   %[[T:.+]]: tensor<?x?x32x32xf32>,
 // CHECK-SAME:   %[[DIM0:[a-zA-Z0-9_]+]]: index,
 // CHECK-SAME:   %[[DIM1:[a-zA-Z0-9_]+]]: index
 // CHECK-NOT:    tensor.pack
-// CHECK:        return %[[T]] : tensor<?x?x?x?xf32>
+// CHECK:        return %[[T]] : tensor<?x?x32x32xf32>
 
 func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> {
   %empty_packed = tensor.empty() : tensor<8x8x32x32xf32>
@@ -105,20 +105,18 @@ func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> {
 // CHECK-NOT:    tensor.unpack
 // CHECK:        return %[[T]] : tensor<256x256xf32>
 
-func.func @unpack_empty_dynamic(%arg0: tensor<?x?xf32>, %dim0: index, %dim1: index, %dim2: index, %dim3: index) -> tensor<?x?xf32> {
-  %empty_packed = tensor.empty(%dim0, %dim1, %dim2, %dim3) : tensor<?x?x?x?xf32>
+func.func @unpack_empty_dynamic(%arg0: tensor<?x?xf32>, %dim0: index, %dim1: index) -> tensor<?x?xf32> {
+  %empty_packed = tensor.empty(%dim0, %dim1) : tensor<?x?x32x32xf32>
   %unpacked = tensor.unpack %empty_packed
     inner_dims_pos = [0, 1] inner_tiles = [32, 32]
-    into %arg0 : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
+    into %arg0 : tensor<?x?x32x32xf32> -> tensor<?x?xf32>
   return %unpacked : tensor<?x?xf32>
 }
 
 // CHECK-LABEL: func.func @unpack_empty_dynamic(
 // CHECK-SAME:   %[[T:.+]]: tensor<?x?xf32>,
 // CHECK-SAME:   %[[DIM0:[a-zA-Z0-9_]+]]: index,
-// CHECK-SAME:   %[[DIM1:[a-zA-Z0-9_]+]]: index,
-// CHECK-SAME:   %[[DIM2:[a-zA-Z0-9_]+]]: index,
-// CHECK-SAME:   %[[DIM3:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME:   %[[DIM1:[a-zA-Z0-9_]+]]: index
 // CHECK-NOT:    tensor.unpack
 // CHECK:        return %[[T]] : tensor<?x?xf32>
 

>From 558a29b89b4c6addaa494ef9066b754e4137a382 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Tue, 22 Oct 2024 14:40:01 -0700
Subject: [PATCH 3/3] fixup! fixup! [mlir][tensor] Restrict the verifier for
 tensor.pack/tensor.unpack

Remove redundant if
---
 mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 60a04152848d88..603e86ca3d7668 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -3868,10 +3868,9 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
             int64_t shape = std::get<0>(it);
             if (Attribute attr =
                     llvm::dyn_cast_if_present<Attribute>(std::get<1>(it))) {
-              if (IntegerAttr intAttr = dyn_cast_or_null<IntegerAttr>(attr)) {
-                int64_t staticTileSize = intAttr.getValue().getSExtValue();
-                return shape == staticTileSize;
-              }
+              IntegerAttr intAttr = dyn_cast_or_null<IntegerAttr>(attr);
+              int64_t staticTileSize = intAttr.getValue().getSExtValue();
+              return shape == staticTileSize;
             }
             return ShapedType::isDynamic(shape);
           })) {



More information about the Mlir-commits mailing list