[Mlir-commits] [mlir] 435debe - [mlir][test] NFC - Fix some worst offenders "capture by SSA name" tests
Nicolas Vasilache
llvmlistbot at llvm.org
Fri Sep 30 08:24:23 PDT 2022
Author: Nicolas Vasilache
Date: 2022-09-30T08:24:13-07:00
New Revision: 435debea69831ee1fd4ba15d023e50baf7533e75
URL: https://github.com/llvm/llvm-project/commit/435debea69831ee1fd4ba15d023e50baf7533e75
DIFF: https://github.com/llvm/llvm-project/commit/435debea69831ee1fd4ba15d023e50baf7533e75.diff
LOG: [mlir][test] NFC - Fix some worst offenders "capture by SSA name" tests
Many tests still depend on specific names of SSA values (!!).
This commit is a best effort cleanup that will set the stage for adding some pretty SSA result names.
Added:
Modified:
mlir/test/Conversion/AffineToStandard/lower-affine.mlir
mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
mlir/test/Dialect/Affine/affine-data-copy.mlir
mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
mlir/test/Dialect/Affine/affine-loop-normalize.mlir
mlir/test/Dialect/Affine/canonicalize.mlir
mlir/test/Dialect/Affine/dma-generate.mlir
mlir/test/Dialect/Affine/scalrep.mlir
mlir/test/Dialect/MemRef/canonicalize.mlir
mlir/test/Dialect/MemRef/subview.mlir
mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
mlir/test/Dialect/Vector/vector-warp-distribute.mlir
mlir/test/IR/core-ops.mlir
mlir/test/IR/memory-ops.mlir
mlir/test/Transforms/canonicalize.mlir
mlir/test/Transforms/cse.mlir
mlir/test/Transforms/loop-fusion-transformation.mlir
mlir/test/Transforms/loop-fusion.mlir
mlir/test/Transforms/loop-invariant-code-motion.mlir
mlir/test/Transforms/normalize-memrefs.mlir
mlir/test/Transforms/pipeline-data-transfer.mlir
Removed:
################################################################################
diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
index 31b3e10973e96..5fc8d010bcd30 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
@@ -578,12 +578,12 @@ func.func @affine_store(%arg0 : index) {
affine.for %i0 = 0 to 10 {
affine.store %1, %0[%i0 - symbol(%arg0) + 7] : memref<10xf32>
}
-// CHECK: %c-1 = arith.constant -1 : index
-// CHECK-NEXT: %[[a:.*]] = arith.muli %arg0, %c-1 : index
+// CHECK: %[[cm1:.*]] = arith.constant -1 : index
+// CHECK-NEXT: %[[a:.*]] = arith.muli %{{.*}}, %[[cm1]] : index
// CHECK-NEXT: %[[b:.*]] = arith.addi %{{.*}}, %[[a]] : index
-// CHECK-NEXT: %c7 = arith.constant 7 : index
-// CHECK-NEXT: %[[c:.*]] = arith.addi %[[b]], %c7 : index
-// CHECK-NEXT: store %cst, %0[%[[c]]] : memref<10xf32>
+// CHECK-NEXT: %[[c7:.*]] = arith.constant 7 : index
+// CHECK-NEXT: %[[c:.*]] = arith.addi %[[b]], %[[c7]] : index
+// CHECK-NEXT: store %{{.*}}, %{{.*}}[%[[c]]] : memref<10xf32>
return
}
@@ -620,11 +620,11 @@ func.func @affine_dma_start(%arg0 : index) {
affine.dma_start %0[%i0 + 7], %1[%arg0 + 11], %2[%c0], %c64
: memref<100xf32>, memref<100xf32, 2>, memref<1xi32>
}
-// CHECK: %c7 = arith.constant 7 : index
-// CHECK-NEXT: %[[a:.*]] = arith.addi %{{.*}}, %c7 : index
-// CHECK-NEXT: %c11 = arith.constant 11 : index
-// CHECK-NEXT: %[[b:.*]] = arith.addi %arg0, %c11 : index
-// CHECK-NEXT: dma_start %0[%[[a]]], %1[%[[b]]], %c64, %2[%c0] : memref<100xf32>, memref<100xf32, 2>, memref<1xi32>
+// CHECK: %[[c7:.*]] = arith.constant 7 : index
+// CHECK-NEXT: %[[a:.*]] = arith.addi %{{.*}}, %[[c7]] : index
+// CHECK-NEXT: %[[c11:.*]] = arith.constant 11 : index
+// CHECK-NEXT: %[[b:.*]] = arith.addi %{{.*}}, %[[c11]] : index
+// CHECK-NEXT: dma_start %{{.*}}[%[[a]]], %{{.*}}[%[[b]]], %{{.*}}, %{{.*}}[%{{.*}}] : memref<100xf32>, memref<100xf32, 2>, memref<1xi32>
return
}
@@ -636,9 +636,9 @@ func.func @affine_dma_wait(%arg0 : index) {
affine.dma_wait %2[%i0 + %arg0 + 17], %c64 : memref<1xi32>
}
// CHECK: %[[a:.*]] = arith.addi %{{.*}}, %arg0 : index
-// CHECK-NEXT: %c17 = arith.constant 17 : index
-// CHECK-NEXT: %[[b:.*]] = arith.addi %[[a]], %c17 : index
-// CHECK-NEXT: dma_wait %0[%[[b]]], %c64 : memref<1xi32>
+// CHECK-NEXT: %[[c17:.*]] = arith.constant 17 : index
+// CHECK-NEXT: %[[b:.*]] = arith.addi %[[a]], %[[c17]] : index
+// CHECK-NEXT: dma_wait %{{.*}}[%[[b]]], %{{.*}} : memref<1xi32>
return
}
diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
index 38500d9d3579d..e7b72eb1364a8 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
@@ -70,7 +70,7 @@ func.func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : inde
}
}
}
- // CHECK: %[[tensor:[0-9]+]] = memref.alloc
+ // CHECK: %[[tensor:[0-9a-zA-Z_]+]] = memref.alloc
// CHECK-NOT: {{.*}} memref.dim %[[tensor]], %c0
// CHECK-NOT: {{.*}} memref.dim %[[tensor]], %c3
return
diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
index bc77a3fc2e9f2..b2e46ada75fbb 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
@@ -1,16 +1,16 @@
// RUN: mlir-opt %s -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0" -split-input-file | FileCheck %s
-// CHECK-DAG: #[[$map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
+// CHECK-DAG: #[[$map_id1:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (0)>
// CHECK-LABEL: func @vec1d_1
func.func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -36,9 +36,9 @@ func.func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -62,9 +62,9 @@ func.func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %arg0, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %arg0, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %arg1, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %arg0, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %arg0, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %arg1, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -72,10 +72,10 @@ func.func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK:for [[IV8:%[arg0-9]+]] = 0 to [[ARG_M]] step 128
-// CHECK-NEXT: for [[IV9:%[arg0-9]*]] = 0 to [[ARG_N]] {
-// CHECK-NEXT: %[[APP9_0:[0-9]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
-// CHECK-NEXT: %[[APP9_1:[0-9]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
+// CHECK:for [[IV8:%[0-9a-zA-Z_]+]] = 0 to [[ARG_M]] step 128
+// CHECK-NEXT: for [[IV9:%[0-9a-zA-Z_]*]] = 0 to [[ARG_N]] {
+// CHECK-NEXT: %[[APP9_0:[0-9a-zA-Z_]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
+// CHECK-NEXT: %[[APP9_1:[0-9a-zA-Z_]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
// CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}}: f32
// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%[[APP9_0]], %[[APP9_1]]], %[[CST]] : memref<?x?xf32>, vector<128xf32>
affine.for %i8 = 0 to %M { // vectorized
@@ -167,8 +167,8 @@ func.func @vec_constant_with_two_users(%M : index, %N : index) -> (f32, f32) {
// CHECK-LABEL: func @vec_block_arg
func.func @vec_block_arg(%A : memref<32x512xi32>) {
- // CHECK: affine.for %[[IV0:[arg0-9]+]] = 0 to 512 step 128 {
- // CHECK-NEXT: affine.for %[[IV1:[arg0-9]+]] = 0 to 32 {
+ // CHECK: affine.for %[[IV0:[0-9a-zA-Z_]+]] = 0 to 512 step 128 {
+ // CHECK-NEXT: affine.for %[[IV1:[0-9a-zA-Z_]+]] = 0 to 32 {
// CHECK-NEXT: %[[BROADCAST:.*]] = vector.broadcast %[[IV1]] : index to vector<128xindex>
// CHECK-NEXT: %[[CAST:.*]] = arith.index_cast %[[BROADCAST]] : vector<128xindex> to vector<128xi32>
// CHECK-NEXT: vector.transfer_write %[[CAST]], {{.*}}[%[[IV1]], %[[IV0]]] : vector<128xi32>, memref<32x512xi32>
@@ -183,17 +183,17 @@ func.func @vec_block_arg(%A : memref<32x512xi32>) {
// -----
-// CHECK-DAG: #[[$map0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 2 + d1 - 1)>
-// CHECK-DAG: #[[$map1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2)>
+// CHECK-DAG: #[[$map0:map[0-9a-zA-Z_]+]] = affine_map<(d0, d1, d2) -> (d0 * 2 + d1 - 1)>
+// CHECK-DAG: #[[$map1:map[0-9a-zA-Z_]+]] = affine_map<(d0, d1, d2) -> (d2)>
// CHECK-LABEL: func @vec_block_arg_2
func.func @vec_block_arg_2(%A : memref<?x512xindex>) {
%c0 = arith.constant 0 : index
%N = memref.dim %A, %c0 : memref<?x512xindex>
- // CHECK: affine.for %[[IV0:[arg0-9]+]] = 0 to %{{.*}} {
+ // CHECK: affine.for %[[IV0:[0-9a-zA-Z_]+]] = 0 to %{{.*}} {
// CHECK-NEXT: %[[BROADCAST1:.*]] = vector.broadcast %[[IV0]] : index to vector<128xindex>
- // CHECK-NEXT: affine.for %[[IV1:[arg0-9]+]] = 0 to 512 step 128 {
+ // CHECK-NEXT: affine.for %[[IV1:[0-9a-zA-Z_]+]] = 0 to 512 step 128 {
// CHECK-NOT: vector.broadcast %[[IV1]]
- // CHECK: affine.for %[[IV2:[arg0-9]+]] = 0 to 2 {
+ // CHECK: affine.for %[[IV2:[0-9a-zA-Z_]+]] = 0 to 2 {
// CHECK-NEXT: %[[BROADCAST2:.*]] = vector.broadcast %[[IV2]] : index to vector<128xindex>
// CHECK-NEXT: %[[INDEX1:.*]] = affine.apply #[[$map0]](%[[IV0]], %[[IV2]], %[[IV1]])
// CHECK-NEXT: %[[INDEX2:.*]] = affine.apply #[[$map1]](%[[IV0]], %[[IV2]], %[[IV1]])
@@ -220,9 +220,9 @@ func.func @vec_rejected_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -244,9 +244,9 @@ func.func @vec_rejected_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -254,7 +254,7 @@ func.func @vec_rejected_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to [[ARG_M]] {
+// CHECK: affine.for %{{.*}}{{[0-9a-zA-Z_]*}} = 0 to [[ARG_M]] {
affine.for %i2 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
%a2 = affine.load %A[%i2, %c0] : memref<?x?xf32>
}
@@ -268,9 +268,9 @@ func.func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -278,8 +278,8 @@ func.func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK:for [[IV4:%[arg0-9]+]] = 0 to [[ARG_M]] step 128 {
-// CHECK-NEXT: for [[IV5:%[arg0-9]*]] = 0 to [[ARG_N]] {
+// CHECK:for [[IV4:%[0-9a-zA-Z_]+]] = 0 to [[ARG_M]] step 128 {
+// CHECK-NEXT: for [[IV5:%[0-9a-zA-Z_]*]] = 0 to [[ARG_N]] {
// CHECK-NEXT: %{{.*}} = arith.constant 0.0{{.*}}: f32
// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{[a-zA-Z0-9_]*}} : memref<?x?xf32>, vector<128xf32>
affine.for %i4 = 0 to %M { // vectorized
@@ -297,9 +297,9 @@ func.func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -307,8 +307,8 @@ func.func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: for [[IV6:%[arg0-9]*]] = 0 to [[ARG_M]] {
-// CHECK-NEXT: for [[IV7:%[arg0-9]*]] = 0 to [[ARG_N]] {
+// CHECK: for [[IV6:%[0-9a-zA-Z_]*]] = 0 to [[ARG_M]] {
+// CHECK-NEXT: for [[IV7:%[0-9a-zA-Z_]*]] = 0 to [[ARG_N]] {
affine.for %i6 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
affine.for %i7 = 0 to %N { // not vectorized, can never vectorize
%a7 = affine.load %A[%i6 + %i7, %i6] : memref<?x?xf32>
@@ -324,9 +324,9 @@ func.func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -334,8 +334,8 @@ func.func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: for [[IV10:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
-// CHECK: for [[IV11:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
+// CHECK: for [[IV10:%[0-9a-zA-Z_]*]] = 0 to %{{[0-9a-zA-Z_]*}} {
+// CHECK: for [[IV11:%[0-9a-zA-Z_]*]] = 0 to %{{[0-9a-zA-Z_]*}} {
affine.for %i10 = 0 to %M { // not vectorized, need per load transposes
affine.for %i11 = 0 to %N { // not vectorized, need per load transposes
%a11 = affine.load %A[%i10, %i11] : memref<?x?xf32>
@@ -352,9 +352,9 @@ func.func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -362,9 +362,9 @@ func.func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: for [[IV12:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
-// CHECK: for [[IV13:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
-// CHECK: for [[IV14:%[arg0-9]+]] = 0 to [[ARG_P]] step 128
+// CHECK: for [[IV12:%[0-9a-zA-Z_]*]] = 0 to %{{[0-9a-zA-Z_]*}} {
+// CHECK: for [[IV13:%[0-9a-zA-Z_]*]] = 0 to %{{[0-9a-zA-Z_]*}} {
+// CHECK: for [[IV14:%[0-9a-zA-Z_]+]] = 0 to [[ARG_P]] step 128
affine.for %i12 = 0 to %M { // not vectorized, can never vectorize
affine.for %i13 = 0 to %N { // not vectorized, can never vectorize
affine.for %i14 = 0 to %P { // vectorized
@@ -382,9 +382,9 @@ func.func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -392,7 +392,7 @@ func.func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
+// CHECK: affine.for %{{.*}}{{[0-9a-zA-Z_]*}} = 0 to %{{[0-9a-zA-Z_]*}} {
affine.for %i16 = 0 to %M { // not vectorized, can't vectorize a vector load
%a16 = memref.alloc(%M) : memref<?xvector<2xf32>>
%l16 = affine.load %a16[%i16] : memref<?xvector<2xf32>>
@@ -402,17 +402,17 @@ func.func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// -----
-// CHECK-DAG: #[[$map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
+// CHECK-DAG: #[[$map_id1:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (0)>
// CHECK-LABEL: func @vec_rejected_8
func.func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -420,7 +420,7 @@ func.func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
+// CHECK: affine.for %{{.*}}{{[0-9a-zA-Z_]*}} = 0 to %{{[0-9a-zA-Z_]*}} {
// CHECK: for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
// CHECK: %{{.*}} = affine.apply #[[$map_id1]](%{{.*}})
// CHECK: %{{.*}} = affine.apply #[[$map_id1]](%{{.*}})
@@ -436,17 +436,17 @@ func.func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// -----
-// CHECK-DAG: #[[$map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
+// CHECK-DAG: #[[$map_id1:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (0)>
// CHECK-LABEL: func @vec_rejected_9
func.func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -454,7 +454,7 @@ func.func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
+// CHECK: affine.for %{{.*}}{{[0-9a-zA-Z_]*}} = 0 to %{{[0-9a-zA-Z_]*}} {
// CHECK: for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
// CHECK: %{{.*}} = affine.apply #[[$map_id1]](%{{.*}})
// CHECK-NEXT: %{{.*}} = affine.apply #[[$map_id1]](%{{.*}})
@@ -477,9 +477,9 @@ func.func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+// CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -487,7 +487,7 @@ func.func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
-// CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
+// CHECK: affine.for %{{.*}}{{[0-9a-zA-Z_]*}} = 0 to %{{[0-9a-zA-Z_]*}} {
affine.for %i15 = 0 to %M { // not vectorized due to condition below
affine.if #set0(%i15) {
%a15 = affine.load %A[%c0, %c0] : memref<?x?xf32>
@@ -503,9 +503,9 @@ func.func @vec_rejected_11(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
- // CHECK-DAG: [[ARG_M:%[0-9]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
- // CHECK-DAG: [[ARG_N:%[0-9]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
- // CHECK-DAG: [[ARG_P:%[0-9]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+ // CHECK-DAG: [[ARG_M:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+ // CHECK-DAG: [[ARG_N:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+ // CHECK-DAG: [[ARG_P:%[0-9a-zA-Z_]+]] = memref.dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -513,8 +513,8 @@ func.func @vec_rejected_11(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
%N = memref.dim %A, %c1 : memref<?x?xf32>
%P = memref.dim %B, %c2 : memref<?x?x?xf32>
- // CHECK: for [[IV10:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
- // CHECK: for [[IV11:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
+ // CHECK: for [[IV10:%[0-9a-zA-Z_]*]] = 0 to %{{[0-9a-zA-Z_]*}} {
+ // CHECK: for [[IV11:%[0-9a-zA-Z_]*]] = 0 to %{{[0-9a-zA-Z_]*}} {
// This is similar to vec_rejected_5, but the order of indices is
diff erent.
affine.for %i10 = 0 to %M { // not vectorized
affine.for %i11 = 0 to %N { // not vectorized
diff --git a/mlir/test/Dialect/Affine/affine-data-copy.mlir b/mlir/test/Dialect/Affine/affine-data-copy.mlir
index b845f9db960c2..31e5538cc576d 100644
--- a/mlir/test/Dialect/Affine/affine-data-copy.mlir
+++ b/mlir/test/Dialect/Affine/affine-data-copy.mlir
@@ -18,8 +18,8 @@
#ub = affine_map<(d0) -> (d0 + 128)>
// Map used to index the buffer while computing.
-// CHECK-DAG: [[$MAP_IDENTITY:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: [[$MAP_PLUS_128:map[0-9]+]] = affine_map<(d0) -> (d0 + 128)>
+// CHECK-DAG: [[$MAP_IDENTITY:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[$MAP_PLUS_128:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 + 128)>
// CHECK-LABEL: func @matmul
// FILTER-LABEL: func @matmul
@@ -49,7 +49,7 @@ func.func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memr
// CHECK: affine.for %[[I:.*]] = 0 to 4096 step 128 {
// CHECK: affine.for %[[J:.*]] = 0 to 4096 step 128 {
-// CHECK: [[BUFC:%[0-9]+]] = memref.alloc() : memref<128x128xf32>
+// CHECK: [[BUFC:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<128x128xf32>
// The result matrix's copy gets hoisted out.
// Result matrix copy-in.
// CHECK: affine.for %[[II:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
@@ -61,7 +61,7 @@ func.func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memr
// LHS matrix copy-in.
// CHECK: affine.for %[[K:.*]] = 0 to 4096 step 128 {
-// CHECK: [[BUFA:%[0-9]+]] = memref.alloc() : memref<128x128xf32>
+// CHECK: [[BUFA:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<128x128xf32>
// CHECK: affine.for %[[II:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
// CHECK: affine.for %[[KK:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
// CHECK: affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<4096x4096xf32>
@@ -70,7 +70,7 @@ func.func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memr
// CHECK: }
// RHS matrix copy-in.
-// CHECK: [[BUFB:%[0-9]+]] = memref.alloc() : memref<128x128xf32>
+// CHECK: [[BUFB:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<128x128xf32>
// CHECK: affine.for %[[KK:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
// CHECK: affine.for %[[JJ:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
// CHECK: affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<4096x4096xf32>
@@ -200,9 +200,9 @@ func.func @single_elt_buffers(%arg0: memref<1024x1024xf32>, %arg1: memref<1024x1
#map_ub = affine_map<(d0) -> (4096, d0 + 100)>
-// CHECK-DAG: [[$MAP_IDENTITY:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: [[$MAP_MIN_UB1:map[0-9]+]] = affine_map<(d0) -> (d0 + 100, 4096)>
-// CHECK-DAG: [[$MAP_MIN_UB2:map[0-9]+]] = affine_map<(d0) -> (4096, d0 + 100)>
+// CHECK-DAG: [[$MAP_IDENTITY:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[$MAP_MIN_UB1:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 + 100, 4096)>
+// CHECK-DAG: [[$MAP_MIN_UB2:map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (4096, d0 + 100)>
// CHECK-LABEL: func @min_upper_bound
func.func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
@@ -246,8 +246,8 @@ func.func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
// CHECK-DAG: #[[$UB:.*]] = affine_map<()[s0, s1] -> (s0 * 512 + 512, s1 * 6 + 6)>
// CHECK-LABEL: max_lower_bound(%{{.*}}: memref<2048x516xf64>,
-// CHECK-SAME: [[i:arg[0-9]+]]
-// CHECK-SAME: [[j:arg[0-9]+]]
+// CHECK-SAME: [[i:arg[0-9a-zA-Z_]+]]
+// CHECK-SAME: [[j:arg[0-9a-zA-Z_]+]]
func.func @max_lower_bound(%M: memref<2048x516xf64>, %i : index, %j : index) {
affine.for %ii = 0 to 2048 {
affine.for %jj = max #lb()[%i, %j] to min #ub()[%i, %j] {
diff --git a/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir b/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
index 31c86e4405208..9036d6b0227de 100644
--- a/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
+++ b/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
@@ -12,14 +12,14 @@ func.func @nested_loops_both_having_invariant_code() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: %[[cst_0:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst_0]] : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: }
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
+ // CHECK-NEXT: affine.store
return
}
@@ -37,13 +37,13 @@ func.func @store_affine_apply() -> memref<10xf32> {
affine.store %cf7, %m[%t0] : memref<10xf32>
}
return %m : memref<10xf32>
-// CHECK: %cst = arith.constant 7.000000e+00 : f32
-// CHECK-NEXT: %0 = memref.alloc() : memref<10xf32>
-// CHECK-NEXT: affine.for %arg0 = 0 to 10 {
-// CHECK-NEXT: %1 = affine.apply #map{{[0-9]*}}(%arg0)
-// CHECK-NEXT: affine.store %cst, %0[%1] : memref<10xf32>
+// CHECK: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+// CHECK-NEXT: %[[VAR_0:.*]] = memref.alloc() : memref<10xf32>
+// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+// CHECK-NEXT: affine.apply
+// CHECK-NEXT: affine.store %[[cst]]
// CHECK-NEXT: }
-// CHECK-NEXT: return %0 : memref<10xf32>
+// CHECK-NEXT: return %[[VAR_0]] : memref<10xf32>
}
// -----
@@ -59,10 +59,10 @@ func.func @nested_loops_code_invariant_to_both() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: %[[cst_0:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst_0]] : f32
return
}
@@ -98,21 +98,21 @@ func.func @nested_loops_inner_loops_invariant_to_outermost_loop(%m : memref<10xi
func.func @single_loop_nothing_invariant() {
%m1 = memref.alloc() : memref<10xf32>
- %m2 = memref.alloc() : memref<10xf32>
+ %m2 = memref.alloc() : memref<11xf32>
affine.for %arg0 = 0 to 10 {
%v0 = affine.load %m1[%arg0] : memref<10xf32>
- %v1 = affine.load %m2[%arg0] : memref<10xf32>
+ %v1 = affine.load %m2[%arg0] : memref<11xf32>
%v2 = arith.addf %v0, %v1 : f32
affine.store %v2, %m1[%arg0] : memref<10xf32>
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %1 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: %2 = affine.load %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: %3 = affine.load %1[%arg0] : memref<10xf32>
- // CHECK-NEXT: %4 = arith.addf %2, %3 : f32
- // CHECK-NEXT: affine.store %4, %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: memref.alloc() : memref<11xf32>
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.load %{{.*}} : memref<10xf32>
+ // CHECK-NEXT: affine.load %{{.*}} : memref<11xf32>
+ // CHECK-NEXT: arith.addf
+ // CHECK-NEXT: affine.store %{{.*}} : memref<10xf32>
return
}
@@ -132,13 +132,13 @@ func.func @invariant_code_inside_affine_if() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: %1 = affine.apply #map{{[0-9]*}}(%arg0)
- // CHECK-NEXT: affine.if #set(%arg0, %1) {
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %2, %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.apply #map{{[0-9]*}}(%arg0)
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.store
// CHECK-NEXT: }
@@ -155,22 +155,22 @@ func.func @dependent_stores() {
affine.for %arg0 = 0 to 10 {
%v0 = arith.addf %cf7, %cf8 : f32
affine.for %arg1 = 0 to 10 {
- %v1 = arith.addf %cf7, %cf7 : f32
+ %v1 = arith.mulf %cf7, %cf7 : f32
affine.store %v1, %m[%arg1] : memref<10xf32>
affine.store %v0, %m[%arg0] : memref<10xf32>
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: %[[cst_0:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst_0]] : f32
+ // CHECK-NEXT: %[[mul:.*]] = arith.mulf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.store %2, %0[%arg1] : memref<10xf32>
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.store %[[mul]]
+ // CHECK-NEXT: affine.store
return
}
@@ -185,21 +185,21 @@ func.func @independent_stores() {
affine.for %arg0 = 0 to 10 {
%v0 = arith.addf %cf7, %cf8 : f32
affine.for %arg1 = 0 to 10 {
- %v1 = arith.addf %cf7, %cf7 : f32
+ %v1 = arith.mulf %cf7, %cf7 : f32
affine.store %v0, %m[%arg0] : memref<10xf32>
affine.store %v1, %m[%arg1] : memref<10xf32>
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: affine.store %2, %0[%arg1] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: %[[cst_0:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: %[[add:.*]] = arith.addf %[[cst]], %[[cst_0]] : f32
+ // CHECK-NEXT: %[[mul:.*]] = arith.mulf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.store %[[add]]
+ // CHECK-NEXT: affine.store %[[mul]]
// CHECK-NEXT: }
return
@@ -221,15 +221,15 @@ func.func @load_dependent_store() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.store %1, %0[%arg1] : memref<10xf32>
- // CHECK-NEXT: %3 = affine.load %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: %[[cst_0:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst_0]] : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.for
+ // CHECK-NEXT: affine.store
+ // CHECK-NEXT: affine.load
return
}
@@ -250,13 +250,13 @@ func.func @load_after_load() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: %3 = affine.load %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: %[[cst_0:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst_0]] : f32
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.load
// CHECK-NEXT: }
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: %{{.*}} = affine.load %{{.*}}[%{{.*}}] : memref<10xf32>
@@ -279,14 +279,14 @@ func.func @invariant_affine_if() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: }
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %1 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.store
// CHECK-NEXT: }
@@ -308,13 +308,13 @@ func.func @invariant_affine_if2() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %1 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %1, %0[%arg1] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.store
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -339,15 +339,15 @@ func.func @invariant_affine_nested_if() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %1 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: affine.store %1, %0[%arg1] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %[[arg0:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.for %[[arg1:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg0]]] : memref<10xf32>
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg1]]] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -375,17 +375,17 @@ func.func @invariant_affine_nested_if_else() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %1 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %[[arg0:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.for %[[arg1:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg0]]] : memref<10xf32>
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg0]]] : memref<10xf32>
// CHECK-NEXT: } else {
- // CHECK-NEXT: affine.store %1, %0[%arg1] : memref<10xf32>
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg1]]] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -414,19 +414,19 @@ func.func @invariant_affine_nested_if_else2() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %1 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %[[arg0:.*]] = 0 to 10 {
// CHECK-NEXT: }
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: %3 = affine.load %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: affine.store %2, %1[%arg0] : memref<10xf32>
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.load {{.*}}[%[[arg0]]] : memref<10xf32>
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg0]]] : memref<10xf32>
// CHECK-NEXT: } else {
- // CHECK-NEXT: %4 = affine.load %0[%arg0] : memref<10xf32>
+ // CHECK-NEXT: affine.load {{.*}}[%[[arg0]]] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -451,16 +451,16 @@ func.func @invariant_affine_nested_if2() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: }
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %1 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: %2 = affine.load %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %3 = affine.load %0[%arg0] : memref<10xf32>
+ // CHECK-NEXT: }
+ // CHECK-NEXT: affine.for %[[arg0:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.load {{.*}}[%[[arg0]]] : memref<10xf32>
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: affine.load {{.*}}[%[[arg0]]] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -485,15 +485,15 @@ func.func @invariant_affine_for_inside_affine_if() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.if #set(%arg0, %arg0) {
- // CHECK-NEXT: %1 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %1, %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: affine.for %arg2 = 0 to 10 {
- // CHECK-NEXT: affine.store %1, %0[%arg2] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for %[[arg0:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.for %[[arg1:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf %[[cst]], %[[cst]] : f32
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg0]]] : memref<10xf32>
+ // CHECK-NEXT: affine.for %[[arg2:.*]] = 0 to 10 {
+ // CHECK-NEXT: affine.store {{.*}}[%[[arg2]]] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -513,12 +513,12 @@ func.func @invariant_constant_and_load() {
affine.store %v, %m[%arg0] : memref<100xf32>
}
- // CHECK: %0 = memref.alloc() : memref<100xf32>
- // CHECK-NEXT: %1 = memref.alloc() : memref<100xf32>
- // CHECK-NEXT: %c0 = arith.constant 0 : index
- // CHECK-NEXT: %2 = affine.load %1[%c0] : memref<100xf32>
- // CHECK-NEXT: affine.for %arg0 = 0 to 5 {
- // CHECK-NEXT: affine.store %2, %0[%arg0] : memref<100xf32>
+ // CHECK: memref.alloc() : memref<100xf32>
+ // CHECK-NEXT: memref.alloc() : memref<100xf32>
+ // CHECK-NEXT: arith.constant 0 : index
+ // CHECK-NEXT: affine.load
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 5 {
+ // CHECK-NEXT: affine.store
return
@@ -537,13 +537,13 @@ func.func @nested_load_store_same_memref() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %c0 = arith.constant 0 : index
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: %1 = affine.load %0[%c0] : memref<10xf32>
- // CHECK-NEXT: affine.for %arg1 = 0 to 10 {
- // CHECK-NEXT: affine.store %cst, %0[%arg1] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.constant 0 : index
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
+ // CHECK-NEXT: affine.load
+ // CHECK-NEXT: affine.for
+ // CHECK-NEXT: affine.store %[[cst]]
return
@@ -562,14 +562,14 @@ func.func @nested_load_store_same_memref2() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %c0 = arith.constant 0 : index
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: %[[cst:.*]] = arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.constant 0 : index
+ // CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: }
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
- // CHECK-NEXT: affine.store %cst, %0[%c0] : memref<10xf32>
- // CHECK-NEXT: %1 = affine.load %0[%arg0] : memref<10xf32>
+ // CHECK-NEXT: affine.store %[[cst]]
+ // CHECK-NEXT: affine.load
return
diff --git a/mlir/test/Dialect/Affine/affine-loop-normalize.mlir b/mlir/test/Dialect/Affine/affine-loop-normalize.mlir
index e33b036dea417..692b1bec023de 100644
--- a/mlir/test/Dialect/Affine/affine-loop-normalize.mlir
+++ b/mlir/test/Dialect/Affine/affine-loop-normalize.mlir
@@ -168,9 +168,9 @@ func.func @loop_with_multiple_upper_bounds(%arg0: memref<?x?xf32>, %arg1 : index
// CHECK-NEXT: %{{.*}} = affine.load %[[ARG0]][%[[IIIV]], %[[KKIV]]] : memref<1024x1024xf32>
// CHECK-NEXT: %{{.*}} = affine.load %[[ARG1]][%[[KKIV]], %[[JJIV]]] : memref<1024x1024xf32>
// CHECK-NEXT: %{{.*}} = affine.load %[[ARG2]][%[[IIIV]], %[[JJIV]]] : memref<1024x1024xf32>
-// CHECK-NEXT: %{{.*}} = arith.mulf %9, %10 : f32
-// CHECK-NEXT: %{{.*}} = arith.addf %11, %12 : f32
-// CHECK-NEXT: affine.store %{{.*}}, %[[ARG2]][%6, %7] : memref<1024x1024xf32>
+// CHECK-NEXT: %{{.*}} = arith.mulf
+// CHECK-NEXT: %{{.*}} = arith.addf
+// CHECK-NEXT: affine.store %{{.*}}, %[[ARG2]]{{.*}} : memref<1024x1024xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
diff --git a/mlir/test/Dialect/Affine/canonicalize.mlir b/mlir/test/Dialect/Affine/canonicalize.mlir
index 4d13bee23d0c5..031be52a06eec 100644
--- a/mlir/test/Dialect/Affine/canonicalize.mlir
+++ b/mlir/test/Dialect/Affine/canonicalize.mlir
@@ -17,7 +17,7 @@ func.func @compose_affine_maps_1dto2d_no_symbols() {
%x1_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%x0, %x0)
// CHECK: %[[I0A:.*]] = affine.apply #[[$MAP0]](%{{.*}})
- // CHECK-NEXT: %[[V0:.*]] = memref.load %0[%[[I0A]], %[[I0A]]]
+ // CHECK-NEXT: %[[V0:.*]] = memref.load %{{.*}}[%[[I0A]], %[[I0A]]]
%v0 = memref.load %0[%x1_0, %x1_1] : memref<4x4xf32>
// Test store[%y, %y]
@@ -26,20 +26,20 @@ func.func @compose_affine_maps_1dto2d_no_symbols() {
%y1_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%y0, %y0)
// CHECK-NEXT: %[[I1A:.*]] = affine.apply #[[$MAP1]](%{{.*}})
- // CHECK-NEXT: memref.store %[[V0]], %0[%[[I1A]], %[[I1A]]]
+ // CHECK-NEXT: memref.store %[[V0]], %{{.*}}[%[[I1A]], %[[I1A]]]
memref.store %v0, %0[%y1_0, %y1_1] : memref<4x4xf32>
// Test store[%x, %y]
%xy_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%x0, %y0)
%xy_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%x0, %y0)
- // CHECK-NEXT: memref.store %[[V0]], %0[%[[I0A]], %[[I1A]]]
+ // CHECK-NEXT: memref.store %[[V0]], %{{.*}}[%[[I0A]], %[[I1A]]]
memref.store %v0, %0[%xy_0, %xy_1] : memref<4x4xf32>
// Test store[%y, %x]
%yx_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%y0, %x0)
%yx_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%y0, %x0)
- // CHECK-NEXT: memref.store %[[V0]], %0[%[[I1A]], %[[I0A]]]
+ // CHECK-NEXT: memref.store %[[V0]], %{{.*}}[%[[I1A]], %[[I0A]]]
memref.store %v0, %0[%yx_0, %yx_1] : memref<4x4xf32>
}
return
diff --git a/mlir/test/Dialect/Affine/dma-generate.mlir b/mlir/test/Dialect/Affine/dma-generate.mlir
index 6c4f8a2235adb..5e43ceb1c2ed2 100644
--- a/mlir/test/Dialect/Affine/dma-generate.mlir
+++ b/mlir/test/Dialect/Affine/dma-generate.mlir
@@ -58,13 +58,13 @@ func.func @loop_nest_1d() {
// CHECK-LABEL: func @loop_nest_high_d
// CHECK: %{{.*}} = arith.constant 16384 : index
-// CHECK-DAG: [[BUFB:%[0-9]+]] = memref.alloc() : memref<512x32xf32, 2>
-// CHECK-DAG: [[BUFA:%[0-9]+]] = memref.alloc() : memref<512x32xf32, 2>
-// CHECK-DAG: [[BUFC:%[0-9]+]] = memref.alloc() : memref<512x32xf32, 2>
-// CHECK-DAG: [[TAGB:%[0-9]+]] = memref.alloc() : memref<1xi32>
-// CHECK-DAG: [[TAGA:%[0-9]+]] = memref.alloc() : memref<1xi32>
-// CHECK-DAG: [[TAGC:%[0-9]+]] = memref.alloc() : memref<1xi32>
-// CHECK-DAG: [[TAGC_W:%[0-9]+]] = memref.alloc() : memref<1xi32>
+// CHECK-DAG: [[BUFB:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<512x32xf32, 2>
+// CHECK-DAG: [[BUFA:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<512x32xf32, 2>
+// CHECK-DAG: [[BUFC:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<512x32xf32, 2>
+// CHECK-DAG: [[TAGB:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xi32>
+// CHECK-DAG: [[TAGA:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xi32>
+// CHECK-DAG: [[TAGC:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xi32>
+// CHECK-DAG: [[TAGC_W:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xi32>
// INCOMING DMA for B
// CHECK-DAG: affine.dma_start %{{.*}}[%{{.*}}, %{{.*}}], [[BUFB]][%{{.*}}, %{{.*}}], [[TAGB]][%{{.*}}], %{{.*}} : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
// CHECK-DAG: affine.dma_wait [[TAGB]][%{{.*}}], %{{.*}} : memref<1xi32>
@@ -419,9 +419,9 @@ func.func @dma_mixed_loop_blocks() {
}
return
}
-// CHECK-DAG: [[MEM:%[0-9]+]] = memref.alloc() : memref<256x256xvector<8xf32>>
-// CHECK-DAG: [[BUF:%[0-9]+]] = memref.alloc() : memref<256x256xvector<8xf32>, 2>
-// CHECK-DAG: [[TAG:%[0-9]+]] = memref.alloc() : memref<1xi32>
+// CHECK-DAG: [[MEM:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<256x256xvector<8xf32>>
+// CHECK-DAG: [[BUF:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<256x256xvector<8xf32>, 2>
+// CHECK-DAG: [[TAG:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xi32>
// CHECK: affine.dma_start [[MEM]][%{{.*}}, %{{.*}}], [[BUF]][%{{.*}}, %{{.*}}], [[TAG]][%{{.*}}], %{{.*}} : memref<256x256xvector<8xf32>>, memref<256x256xvector<8xf32>, 2>, memref<1xi32>
// CHECK-NEXT: affine.dma_wait [[TAG]][%{{.*}}], %{{.*}} : memref<1xi32>
// CHECK-NEXT: affine.for %{{.*}} = 0 to 256 {
@@ -441,10 +441,10 @@ func.func @relative_loop_bounds(%arg0: memref<1027xf32>) {
}
return
}
-// CHECK: [[BUF:%[0-9]+]] = memref.alloc() : memref<1027xf32, 2>
-// CHECK-NEXT: [[MEM:%[0-9]+]] = memref.alloc() : memref<1xi32>
+// CHECK: [[BUF:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1027xf32, 2>
+// CHECK-NEXT: [[MEM:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xi32>
// CHECK-NEXT: affine.for %{{.*}} = 0 to 1024 {
-// CHECK-NEXT: affine.for %[[I2:.*]] = {{#map[0-9]+}}(%{{.*}}) to {{#map[0-9]+}}(%{{.*}}) {
+// CHECK-NEXT: affine.for %[[I2:.*]] = {{#map[0-9a-zA-Z_]+}}(%{{.*}}) to {{#map[0-9a-zA-Z_]+}}(%{{.*}}) {
// CHECK: affine.store %{{.*}}, [[BUF]][%[[I2]]] : memref<1027xf32, 2>
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -508,7 +508,7 @@ func.func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf
return %arg1, %arg2 : memref<144x9xf32>, memref<2xf32>
}
// CHECK: affine.for %{{.*}} = 0 to 9 step 3 {
-// CHECK: [[BUF:%[0-9]+]] = memref.alloc() : memref<2xf32, 2>
+// CHECK: [[BUF:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2xf32, 2>
// CHECK: affine.dma_start %{{.*}}[%{{.*}} floordiv 8], [[BUF]]
// CHECK: affine.dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xi32>
// CHECK: affine.for %{{.*}} =
@@ -551,7 +551,7 @@ func.func @test_memref_bounds(%arg0: memref<4x4x16x1xvector<8x128xf32>>, %arg1:
func.func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
// FAST-MEM-16KB: affine.for %{{.*}} = 0 to 256 step 4
affine.for %i0 = 0 to 256 step 4 {
- // FAST-MEM-16KB: [[BUF:%[0-9]+]] = memref.alloc() : memref<4x1024xf32, 2>
+ // FAST-MEM-16KB: [[BUF:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<4x1024xf32, 2>
// FAST-MEM-16KB: affine.dma_start %{{.*}}
// FAST-MEM-16KB-NEXT: affine.dma_wait
// FAST-MEM-16KB: affine.for %{{.*}}
@@ -614,9 +614,9 @@ func.func @simple_matmul(%arg0: memref<8x8xvector<64xf32>>, %arg1: memref<8x8xve
// FAST-MEM-16KB: affine.dma_wait
// FAST-MEM-16KB: affine.dma_start %{{.*}}
// FAST-MEM-16KB: affine.dma_wait
-// FAST-MEM-16KB: affine.for %{{.*}} = #map{{[0-9]+}}(%{{.*}}) to #map{{[0-9]+}}(%{{.*}}) {
-// FAST-MEM-16KB-NEXT: affine.for %{{.*}} = #map{{[0-9]+}}(%{{.*}}) to #map{{[0-9]+}}(%{{.*}}) {
-// FAST-MEM-16KB-NEXT: affine.for %{{.*}} = #map{{[0-9]+}}(%{{.*}}) to #map{{[0-9]+}}(%{{.*}}) {
+// FAST-MEM-16KB: affine.for %{{.*}} = #map{{[0-9a-zA-Z_]+}}(%{{.*}}) to #map{{[0-9a-zA-Z_]+}}(%{{.*}}) {
+// FAST-MEM-16KB-NEXT: affine.for %{{.*}} = #map{{[0-9a-zA-Z_]+}}(%{{.*}}) to #map{{[0-9a-zA-Z_]+}}(%{{.*}}) {
+// FAST-MEM-16KB-NEXT: affine.for %{{.*}} = #map{{[0-9a-zA-Z_]+}}(%{{.*}}) to #map{{[0-9a-zA-Z_]+}}(%{{.*}}) {
// FAST-MEM-16KB: }
// FAST-MEM-16KB: }
// FAST-MEM-16KB: }
diff --git a/mlir/test/Dialect/Affine/scalrep.mlir b/mlir/test/Dialect/Affine/scalrep.mlir
index 867b3f68029cf..30ee888c9139f 100644
--- a/mlir/test/Dialect/Affine/scalrep.mlir
+++ b/mlir/test/Dialect/Affine/scalrep.mlir
@@ -185,7 +185,7 @@ func.func @store_load_no_fwd() {
affine.store %cf7, %m[%i0] : memref<10xf32>
affine.for %i1 = 0 to 10 {
affine.for %i2 = 0 to 10 {
- // CHECK: affine.load %{{[0-9]+}}
+ // CHECK: affine.load
%v0 = affine.load %m[%i2] : memref<10xf32>
%v1 = arith.addf %v0, %v0 : f32
}
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index fc52e9c7b750d..f20838a470175 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -263,12 +263,12 @@ func.func @dim_of_memref_reshape_i32(%arg0: memref<*xf32>, %arg1: memref<?xi32>)
// CHECK-LABEL: func @alloc_const_fold
func.func @alloc_const_fold() -> memref<?xf32> {
- // CHECK-NEXT: %0 = memref.alloc() : memref<4xf32>
+ // CHECK-NEXT: memref.alloc() : memref<4xf32>
%c4 = arith.constant 4 : index
%a = memref.alloc(%c4) : memref<?xf32>
- // CHECK-NEXT: %1 = memref.cast %0 : memref<4xf32> to memref<?xf32>
- // CHECK-NEXT: return %1 : memref<?xf32>
+ // CHECK-NEXT: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
+ // CHECK-NEXT: return %{{.*}} : memref<?xf32>
return %a : memref<?xf32>
}
@@ -276,12 +276,12 @@ func.func @alloc_const_fold() -> memref<?xf32> {
// CHECK-LABEL: func @alloc_alignment_const_fold
func.func @alloc_alignment_const_fold() -> memref<?xf32> {
- // CHECK-NEXT: %0 = memref.alloc() {alignment = 4096 : i64} : memref<4xf32>
+ // CHECK-NEXT: memref.alloc() {alignment = 4096 : i64} : memref<4xf32>
%c4 = arith.constant 4 : index
%a = memref.alloc(%c4) {alignment = 4096 : i64} : memref<?xf32>
- // CHECK-NEXT: %1 = memref.cast %0 : memref<4xf32> to memref<?xf32>
- // CHECK-NEXT: return %1 : memref<?xf32>
+ // CHECK-NEXT: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
+ // CHECK-NEXT: return %{{.*}} : memref<?xf32>
return %a : memref<?xf32>
}
@@ -825,7 +825,7 @@ func.func @canonicalize_rank_reduced_subview(%arg0 : memref<8x?xf32>,
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[ARG0]][0, 0] [1, %[[ARG1]]] [1, 1]
// CHECK-SAME: memref<8x?xf32> to memref<?xf32, strided<[1], offset: ?>>
-// ----
+// -----
// CHECK-LABEL: func @memref_realloc_dead
// CHECK-SAME: %[[SRC:[0-9a-z]+]]: memref<2xf32>
diff --git a/mlir/test/Dialect/MemRef/subview.mlir b/mlir/test/Dialect/MemRef/subview.mlir
index 3e3367e4bff9c..48af627b327fd 100644
--- a/mlir/test/Dialect/MemRef/subview.mlir
+++ b/mlir/test/Dialect/MemRef/subview.mlir
@@ -8,11 +8,13 @@
// CHECK-LABEL: func @memref_subview(%arg0
func.func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+ // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index
+ // CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0 = memref.alloc() : memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>>
- // CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] :
+ // CHECK: subview %{{.*}}[%[[c0]], %[[c0]], %[[c0]]] [%{{.*}}, %{{.*}}, %{{.*}}] [%[[c1]], %[[c1]], %[[c1]]] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>>
// CHECK-SAME: to memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
%1 = memref.subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1]
@@ -20,7 +22,7 @@ func.func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
%2 = memref.alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
- // CHECK: memref.subview %2[%c1] [%arg0] [%c1] :
+ // CHECK: memref.subview %{{.*}}[%[[c1]]] [%{{.*}}] [%[[c1]]] :
// CHECK-SAME: memref<64xf32, #[[$BASE_MAP1]]>
// CHECK-SAME: to memref<?xf32, #[[$SUBVIEW_MAP1]]>
%3 = memref.subview %2[%c1][%arg0][%c1]
@@ -28,14 +30,14 @@ func.func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
memref<?xf32, affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>>
%4 = memref.alloc() : memref<64x22xf32, strided<[22, 1]>>
- // CHECK: memref.subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] :
+ // CHECK: memref.subview %{{.*}}[%[[c0]], %[[c1]]] [%{{.*}}, %{{.*}}] [%[[c1]], %[[c0]]] :
// CHECK-SAME: memref<64x22xf32, strided<[22, 1]>>
// CHECK-SAME: to memref<?x?xf32, strided<[?, ?], offset: ?>>
%5 = memref.subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0]
: memref<64x22xf32, strided<[22, 1], offset: 0>> to
memref<?x?xf32, strided<[?, ?], offset: ?>>
- // CHECK: memref.subview %0[0, 2, 0] [4, 4, 4] [1, 1, 1] :
+ // CHECK: memref.subview %{{.*}}[0, 2, 0] [4, 4, 4] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>>
// CHECK-SAME: to memref<4x4x4xf32, strided<[64, 4, 1], offset: 8>>
%6 = memref.subview %0[0, 2, 0][4, 4, 4][1, 1, 1]
@@ -63,29 +65,29 @@ func.func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
: memref<16x4xf32> to memref<4x4xf32, strided<[8, 2], offset: ?>>
%12 = memref.alloc() : memref<1x9x1x4x1xf32, strided<[36, 36, 4, 4, 1]>>
- // CHECK: memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1]
- // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] :
+ // CHECK: memref.subview
+ // CHECK-SAME: [1, 9, 1, 4, 1]
// CHECK-SAME: memref<1x9x1x4x1xf32, strided<[36, 36, 4, 4, 1]>> to memref<9x4xf32, strided<[?, ?], offset: ?>>
%13 = memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, strided<[36, 36, 4, 4, 1], offset: 0>> to memref<9x4xf32, strided<[?, ?], offset: ?>>
- // CHECK: memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1]
- // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] :
+ // CHECK: memref.subview
+ // CHECK-SAME: [1, 9, 1, 4, 1]
// CHECK-SAME: memref<1x9x1x4x1xf32, strided<[36, 36, 4, 4, 1]>> to memref<1x9x4xf32, strided<[?, ?, ?], offset: ?>>
%14 = memref.subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, strided<[36, 36, 4, 4, 1], offset: 0>> to memref<1x9x4xf32, strided<[?, ?, ?], offset: ?>>
%15 = memref.alloc(%arg1, %arg2)[%c0, %c1, %arg1, %arg0, %arg0, %arg2, %arg2] : memref<1x?x5x1x?x1xf32, strided<[?, ?, ?, ?, ?, ?], offset: ?>>
- // CHECK: memref.subview %15[0, 0, 0, 0, 0, 0] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] :
+ // CHECK: memref.subview %{{.*}}[0, 0, 0, 0, 0, 0] [1, %{{.*}}, 5, 1, %{{.*}}, 1] [1, 1, 1, 1, 1, 1] :
// CHECK-SAME: memref<1x?x5x1x?x1xf32, strided<[?, ?, ?, ?, ?, ?], offset: ?>> to memref<?x5x?xf32, strided<[?, ?, ?], offset: ?>>
%16 = memref.subview %15[0, 0, 0, 0, 0, 0][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, strided<[?, ?, ?, ?, ?, ?], offset: ?>> to memref<?x5x?xf32, strided<[?, ?, ?], offset: ?>>
- // CHECK: memref.subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] :
+ // CHECK: memref.subview %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] [1, %{{.*}}, 5, 1, %{{.*}}, 1] [1, 1, 1, 1, 1, 1] :
// CHECK-SAME: memref<1x?x5x1x?x1xf32, strided<[?, ?, ?, ?, ?, ?], offset: ?>> to memref<?x5x?x1xf32, strided<[?, ?, ?, ?], offset: ?>>
%17 = memref.subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, strided<[?, ?, ?, ?, ?, ?], offset: ?>> to memref<?x5x?x1xf32, strided<[?, ?, ?, ?], offset: ?>>
%18 = memref.alloc() : memref<1x8xf32>
- // CHECK: memref.subview %18[0, 0] [1, 8] [1, 1] : memref<1x8xf32> to memref<8xf32>
+ // CHECK: memref.subview %{{.*}}[0, 0] [1, 8] [1, 1] : memref<1x8xf32> to memref<8xf32>
%19 = memref.subview %18[0, 0][1, 8][1, 1] : memref<1x8xf32> to memref<8xf32>
%20 = memref.alloc() : memref<8x16x4xf32>
- // CHECK: memref.subview %20[0, 0, 0] [1, 16, 4] [1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32>
+ // CHECK: memref.subview %{{.*}}[0, 0, 0] [1, 16, 4] [1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32>
%21 = memref.subview %20[0, 0, 0][1, 16, 4][1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32>
%22 = memref.subview %20[3, 4, 2][1, 6, 3][1, 1, 1] : memref<8x16x4xf32> to memref<6x3xf32, strided<[4, 1], offset: 210>>
diff --git a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
index 611f1bfbea6c1..a973aec80b692 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
@@ -10,14 +10,14 @@
// LINALG-DAG: #[[$bounds_map_8:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 8)>
// CHECK-LABEL: split_vector_transfer_read_2d(
-// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref
-// CHECK-SAME: %[[i:[a-zA-Z0-9]*]]: index
-// CHECK-SAME: %[[j:[a-zA-Z0-9]*]]: index
+// CHECK-SAME: %[[A:[a-zA-Z0-9_]*]]: memref
+// CHECK-SAME: %[[i:[a-zA-Z0-9_]*]]: index
+// CHECK-SAME: %[[j:[a-zA-Z0-9_]*]]: index
// LINALG-LABEL: split_vector_transfer_read_2d(
-// LINALG-SAME: %[[A:[a-zA-Z0-9]*]]: memref
-// LINALG-SAME: %[[i:[a-zA-Z0-9]*]]: index
-// LINALG-SAME: %[[j:[a-zA-Z0-9]*]]: index
+// LINALG-SAME: %[[A:[a-zA-Z0-9_]*]]: memref
+// LINALG-SAME: %[[i:[a-zA-Z0-9_]*]]: index
+// LINALG-SAME: %[[j:[a-zA-Z0-9_]*]]: index
func.func @split_vector_transfer_read_2d(%A: memref<?x8xf32>, %i: index, %j: index) -> vector<4x8xf32> {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32
@@ -94,14 +94,14 @@ func.func @split_vector_transfer_read_2d(%A: memref<?x8xf32>, %i: index, %j: ind
}
// CHECK-LABEL: split_vector_transfer_read_strided_2d(
-// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref
-// CHECK-SAME: %[[i:[a-zA-Z0-9]*]]: index
-// CHECK-SAME: %[[j:[a-zA-Z0-9]*]]: index
+// CHECK-SAME: %[[A:[a-zA-Z0-9_]*]]: memref
+// CHECK-SAME: %[[i:[a-zA-Z0-9_]*]]: index
+// CHECK-SAME: %[[j:[a-zA-Z0-9_]*]]: index
// LINALG-LABEL: split_vector_transfer_read_strided_2d(
-// LINALG-SAME: %[[A:[a-zA-Z0-9]*]]: memref
-// LINALG-SAME: %[[i:[a-zA-Z0-9]*]]: index
-// LINALG-SAME: %[[j:[a-zA-Z0-9]*]]: index
+// LINALG-SAME: %[[A:[a-zA-Z0-9_]*]]: memref
+// LINALG-SAME: %[[i:[a-zA-Z0-9_]*]]: index
+// LINALG-SAME: %[[j:[a-zA-Z0-9_]*]]: index
func.func @split_vector_transfer_read_strided_2d(
%A: memref<7x8xf32, strided<[?, 1], offset: ?>>,
%i: index, %j: index) -> vector<4x8xf32> {
@@ -416,9 +416,9 @@ func.func private @fake_side_effecting_fun(%0: vector<2x2xf32>) -> ()
func.func @transfer_read_within_scf_for(%A : memref<?x?xf32>, %lb : index, %ub : index, %step : index) {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32
- // CHECK: alloca
+ // CHECK: memref.alloca
// CHECK: scf.for
- // CHECK-NOT: alloca
+ // CHECK-NOT: memref.alloca
scf.for %i = %lb to %ub step %step {
%0 = vector.transfer_read %A[%c0, %c0], %f0 : memref<?x?xf32>, vector<2x2xf32>
func.call @fake_side_effecting_fun(%0) : (vector<2x2xf32>) -> ()
diff --git a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
index 3bd047a6d08e9..3978d94f377b4 100644
--- a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
+++ b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
@@ -73,7 +73,7 @@ func.func @rewrite_warp_op_to_scf_if(%laneid: index,
// CHECK-D: vector.yield %{{.*}}, %{{.*}} : vector<64xf32>, vector<32xf32>
// CHECK-D-DAG: vector.transfer_write %[[R]]#1, %{{.*}}[%{{.*}}] {in_bounds = [true]} : vector<1xf32>, memref<128xf32
// CHECK-D-DAG: %[[ID1:.*]] = affine.apply #[[MAP1]]()[%{{.*}}]
-// CHECK-D-DAG: vector.transfer_write %[[R]]#0, %2[%[[ID1]]] {in_bounds = [true]} : vector<2xf32>, memref<128xf32
+// CHECK-D-DAG: vector.transfer_write %[[R]]#0, %{{.*}}[%[[ID1]]] {in_bounds = [true]} : vector<2xf32>, memref<128xf32
// CHECK-DIST-AND-PROP-NOT: vector.warp_execute_on_lane_0
// CHECK-DIST-AND-PROP: vector.transfer_read {{.*}} vector<1xf32>
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 210612bec7a3f..7f661dc180a98 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -234,16 +234,16 @@ func.func @calls(%arg0: i32) {
// CHECK-LABEL: func @memref_cast(%arg0
func.func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>) {
- // CHECK: %0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
+ // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
%0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
- // CHECK: %1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32>
+ // CHECK: memref.cast %{{.*}} : memref<?xf32> to memref<4xf32>
%1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32>
- // CHECK: {{%.*}} = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
+ // CHECK: memref.cast %{{.*}} : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
%2 = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
- // CHECK: {{%.*}} = memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>>
+ // CHECK: memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>>
%3 = memref.cast %2 : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>
// CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<*xf32>
@@ -263,15 +263,15 @@ func.func private @unranked_memref_roundtrip(memref<*xf32, 4>)
func.func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<2048xi8>
// Test two dynamic sizes and dynamic offset.
- // CHECK: %{{.*}} = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
+ // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<?x?xf32>
%1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
// Test one dynamic size and dynamic offset.
- // CHECK: %{{.*}} = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
+ // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<4x?xf32>
%3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
// Test static sizes and static offset.
- // CHECK: %{{.*}} = memref.view %0[{{.*}}][] : memref<2048xi8> to memref<64x4xf32>
+ // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<64x4xf32>
%c0 = arith.constant 0: index
%5 = memref.view %0[%c0][] : memref<2048xi8> to memref<64x4xf32>
return
diff --git a/mlir/test/IR/memory-ops.mlir b/mlir/test/IR/memory-ops.mlir
index 8fd32914769a8..fbbf36d6bc210 100644
--- a/mlir/test/IR/memory-ops.mlir
+++ b/mlir/test/IR/memory-ops.mlir
@@ -6,27 +6,27 @@
func.func @alloc() {
^bb0:
// Test simple alloc.
- // CHECK: %0 = memref.alloc() : memref<1024x64xf32, 1>
+ // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%c0 = "arith.constant"() {value = 0: index} : () -> index
%c1 = "arith.constant"() {value = 1: index} : () -> index
// Test alloc with dynamic dimensions.
- // CHECK: %1 = memref.alloc(%c0, %c1) : memref<?x?xf32, 1>
+ // CHECK: %{{.*}} = memref.alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
%1 = memref.alloc(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
// Test alloc with no dynamic dimensions and one symbol.
- // CHECK: %2 = memref.alloc()[%c0] : memref<2x4xf32, #map, 1>
+ // CHECK: %{{.*}} = memref.alloc()[%{{.*}}] : memref<2x4xf32, #map, 1>
%2 = memref.alloc()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
// Test alloc with dynamic dimensions and one symbol.
- // CHECK: %3 = memref.alloc(%c1)[%c0] : memref<2x?xf32, #map, 1>
+ // CHECK: %{{.*}} = memref.alloc(%{{.*}})[%{{.*}}] : memref<2x?xf32, #map, 1>
%3 = memref.alloc(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
// Alloc with no mappings.
// b/116054838 Parser crash while parsing ill-formed AllocOp
- // CHECK: %4 = memref.alloc() : memref<2xi32>
+ // CHECK: %{{.*}} = memref.alloc() : memref<2xi32>
%4 = memref.alloc() : memref<2 x i32>
// CHECK: return
@@ -37,26 +37,26 @@ func.func @alloc() {
func.func @alloca() {
^bb0:
// Test simple alloc.
- // CHECK: %0 = memref.alloca() : memref<1024x64xf32, 1>
+ // CHECK: %{{.*}} = memref.alloca() : memref<1024x64xf32, 1>
%0 = memref.alloca() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%c0 = "arith.constant"() {value = 0: index} : () -> index
%c1 = "arith.constant"() {value = 1: index} : () -> index
// Test alloca with dynamic dimensions.
- // CHECK: %1 = memref.alloca(%c0, %c1) : memref<?x?xf32, 1>
+ // CHECK: %{{.*}} = memref.alloca(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
%1 = memref.alloca(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
// Test alloca with no dynamic dimensions and one symbol.
- // CHECK: %2 = memref.alloca()[%c0] : memref<2x4xf32, #map, 1>
+ // CHECK: %{{.*}} = memref.alloca()[%{{.*}}] : memref<2x4xf32, #map, 1>
%2 = memref.alloca()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
// Test alloca with dynamic dimensions and one symbol.
- // CHECK: %3 = memref.alloca(%c1)[%c0] : memref<2x?xf32, #map, 1>
+ // CHECK: %{{.*}} = memref.alloca(%{{.*}})[%{{.*}}] : memref<2x?xf32, #map, 1>
%3 = memref.alloca(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
// Alloca with no mappings, but with alignment.
- // CHECK: %4 = memref.alloca() {alignment = 64 : i64} : memref<2xi32>
+ // CHECK: %{{.*}} = memref.alloca() {alignment = 64 : i64} : memref<2xi32>
%4 = memref.alloca() {alignment = 64} : memref<2 x i32>
return
@@ -65,10 +65,10 @@ func.func @alloca() {
// CHECK-LABEL: func @dealloc() {
func.func @dealloc() {
^bb0:
- // CHECK: %0 = memref.alloc() : memref<1024x64xf32>
+ // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32>
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
- // CHECK: memref.dealloc %0 : memref<1024x64xf32>
+ // CHECK: memref.dealloc %{{.*}} : memref<1024x64xf32>
memref.dealloc %0 : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
return
}
@@ -76,16 +76,16 @@ func.func @dealloc() {
// CHECK-LABEL: func @load_store
func.func @load_store() {
^bb0:
- // CHECK: %0 = memref.alloc() : memref<1024x64xf32, 1>
+ // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%1 = arith.constant 0 : index
%2 = arith.constant 1 : index
- // CHECK: %1 = memref.load %0[%c0, %c1] : memref<1024x64xf32, 1>
+ // CHECK: %{{.*}} = memref.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
%3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
- // CHECK: memref.store %1, %0[%c0, %c1] : memref<1024x64xf32, 1>
+ // CHECK: memref.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
return
@@ -105,14 +105,14 @@ func.func @dma_ops() {
memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0] : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
- // CHECK: dma_start %0[%c0], %1[%c0], %c256, %2[%c0] : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
- // CHECK-NEXT: dma_wait %2[%c0], %c256 : memref<1xf32>
+ // CHECK: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}] : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
+ // CHECK-NEXT: dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
// DMA with strides
memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0], %stride, %elt_per_stride : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
- // CHECK-NEXT: dma_start %0[%c0], %1[%c0], %c256, %2[%c0], %c32, %c16 : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
- // CHECK-NEXT: dma_wait %2[%c0], %c256 : memref<1xf32>
+ // CHECK-NEXT: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}} : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
+ // CHECK-NEXT: dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
return
}
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index 827ca899139d2..20538cd6262d3 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -464,8 +464,8 @@ func.func @dyn_shape_fold(%L : index, %M : index) -> (memref<4 x ? x 8 x ? x ? x
affine.for %i = 0 to %L {
// CHECK-NEXT: affine.for
affine.for %j = 0 to 10 {
- // CHECK-NEXT: memref.load %0[%arg2, %arg3] : memref<?x1024xf32>
- // CHECK-NEXT: memref.store %{{.*}}, %1[%c0, %c0, %arg2, %arg3, %c0] : memref<4x1024x8x512x?xf32>
+ // CHECK-NEXT: memref.load %{{.*}}[%arg2, %arg3] : memref<?x1024xf32>
+ // CHECK-NEXT: memref.store %{{.*}}, %{{.*}}[%c0, %c0, %arg2, %arg3, %c0] : memref<4x1024x8x512x?xf32>
%v = memref.load %a[%i, %j] : memref<?x?xf32>
memref.store %v, %b[%zero, %zero, %i, %j, %zero] : memref<4x?x8x?x?xf32>
}
diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir
index 4fec94105b41f..7a8de218bdd82 100644
--- a/mlir/test/Transforms/cse.mlir
+++ b/mlir/test/Transforms/cse.mlir
@@ -5,62 +5,62 @@
// CHECK-LABEL: @simple_constant
func.func @simple_constant() -> (i32, i32) {
- // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
+ // CHECK-NEXT: %[[VAR_c1_i32:.*]] = arith.constant 1 : i32
%0 = arith.constant 1 : i32
- // CHECK-NEXT: return %c1_i32, %c1_i32 : i32, i32
+ // CHECK-NEXT: return %[[VAR_c1_i32]], %[[VAR_c1_i32]] : i32, i32
%1 = arith.constant 1 : i32
return %0, %1 : i32, i32
}
// CHECK-LABEL: @basic
func.func @basic() -> (index, index) {
- // CHECK: %c0 = arith.constant 0 : index
+ // CHECK: %[[VAR_c0:[0-9a-zA-Z_]+]] = arith.constant 0 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 0 : index
- // CHECK-NEXT: %0 = affine.apply #[[$MAP]](%c0)
+ // CHECK-NEXT: %[[VAR_0:[0-9a-zA-Z_]+]] = affine.apply #[[$MAP]](%[[VAR_c0]])
%0 = affine.apply #map0(%c0)
%1 = affine.apply #map0(%c1)
- // CHECK-NEXT: return %0, %0 : index, index
+ // CHECK-NEXT: return %[[VAR_0]], %[[VAR_0]] : index, index
return %0, %1 : index, index
}
// CHECK-LABEL: @many
func.func @many(f32, f32) -> (f32) {
^bb0(%a : f32, %b : f32):
- // CHECK-NEXT: %0 = arith.addf %arg0, %arg1 : f32
+ // CHECK-NEXT: %[[VAR_0:[0-9a-zA-Z_]+]] = arith.addf %{{.*}}, %{{.*}} : f32
%c = arith.addf %a, %b : f32
%d = arith.addf %a, %b : f32
%e = arith.addf %a, %b : f32
%f = arith.addf %a, %b : f32
- // CHECK-NEXT: %1 = arith.addf %0, %0 : f32
+ // CHECK-NEXT: %[[VAR_1:[0-9a-zA-Z_]+]] = arith.addf %[[VAR_0]], %[[VAR_0]] : f32
%g = arith.addf %c, %d : f32
%h = arith.addf %e, %f : f32
%i = arith.addf %c, %e : f32
- // CHECK-NEXT: %2 = arith.addf %1, %1 : f32
+ // CHECK-NEXT: %[[VAR_2:[0-9a-zA-Z_]+]] = arith.addf %[[VAR_1]], %[[VAR_1]] : f32
%j = arith.addf %g, %h : f32
%k = arith.addf %h, %i : f32
- // CHECK-NEXT: %3 = arith.addf %2, %2 : f32
+ // CHECK-NEXT: %[[VAR_3:[0-9a-zA-Z_]+]] = arith.addf %[[VAR_2]], %[[VAR_2]] : f32
%l = arith.addf %j, %k : f32
- // CHECK-NEXT: return %3 : f32
+ // CHECK-NEXT: return %[[VAR_3]] : f32
return %l : f32
}
/// Check that operations are not eliminated if they have
diff erent operands.
// CHECK-LABEL: @
diff erent_ops
func.func @
diff erent_ops() -> (i32, i32) {
- // CHECK: %c0_i32 = arith.constant 0 : i32
- // CHECK: %c1_i32 = arith.constant 1 : i32
+ // CHECK: %[[VAR_c0_i32:[0-9a-zA-Z_]+]] = arith.constant 0 : i32
+ // CHECK: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
%0 = arith.constant 0 : i32
%1 = arith.constant 1 : i32
- // CHECK-NEXT: return %c0_i32, %c1_i32 : i32, i32
+ // CHECK-NEXT: return %[[VAR_c0_i32]], %[[VAR_c1_i32]] : i32, i32
return %0, %1 : i32, i32
}
@@ -68,12 +68,12 @@ func.func @
diff erent_ops() -> (i32, i32) {
/// types.
// CHECK-LABEL: @
diff erent_results
func.func @
diff erent_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4x?xf32>) {
- // CHECK: %0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
- // CHECK-NEXT: %1 = tensor.cast %arg0 : tensor<*xf32> to tensor<4x?xf32>
+ // CHECK: %[[VAR_0:[0-9a-zA-Z_]+]] = tensor.cast %{{.*}} : tensor<*xf32> to tensor<?x?xf32>
+ // CHECK-NEXT: %[[VAR_1:[0-9a-zA-Z_]+]] = tensor.cast %{{.*}} : tensor<*xf32> to tensor<4x?xf32>
%0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
%1 = tensor.cast %arg0 : tensor<*xf32> to tensor<4x?xf32>
- // CHECK-NEXT: return %0, %1 : tensor<?x?xf32>, tensor<4x?xf32>
+ // CHECK-NEXT: return %[[VAR_0]], %[[VAR_1]] : tensor<?x?xf32>, tensor<4x?xf32>
return %0, %1 : tensor<?x?xf32>, tensor<4x?xf32>
}
@@ -81,28 +81,28 @@ func.func @
diff erent_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4
// CHECK-LABEL: @
diff erent_attributes
func.func @
diff erent_attributes(index, index) -> (i1, i1, i1) {
^bb0(%a : index, %b : index):
- // CHECK: %0 = arith.cmpi slt, %arg0, %arg1 : index
+ // CHECK: %[[VAR_0:[0-9a-zA-Z_]+]] = arith.cmpi slt, %{{.*}}, %{{.*}} : index
%0 = arith.cmpi slt, %a, %b : index
- // CHECK-NEXT: %1 = arith.cmpi ne, %arg0, %arg1 : index
+ // CHECK-NEXT: %[[VAR_1:[0-9a-zA-Z_]+]] = arith.cmpi ne, %{{.*}}, %{{.*}} : index
/// Predicate 1 means inequality comparison.
%1 = arith.cmpi ne, %a, %b : index
%2 = "arith.cmpi"(%a, %b) {predicate = 1} : (index, index) -> i1
- // CHECK-NEXT: return %0, %1, %1 : i1, i1, i1
+ // CHECK-NEXT: return %[[VAR_0]], %[[VAR_1]], %[[VAR_1]] : i1, i1, i1
return %0, %1, %2 : i1, i1, i1
}
/// Check that operations with side effects are not eliminated.
// CHECK-LABEL: @side_effect
func.func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
- // CHECK: %0 = memref.alloc() : memref<2x1xf32>
+ // CHECK: %[[VAR_0:[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x1xf32>
%0 = memref.alloc() : memref<2x1xf32>
- // CHECK-NEXT: %1 = memref.alloc() : memref<2x1xf32>
+ // CHECK-NEXT: %[[VAR_1:[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x1xf32>
%1 = memref.alloc() : memref<2x1xf32>
- // CHECK-NEXT: return %0, %1 : memref<2x1xf32>, memref<2x1xf32>
+ // CHECK-NEXT: return %[[VAR_0]], %[[VAR_1]] : memref<2x1xf32>, memref<2x1xf32>
return %0, %1 : memref<2x1xf32>, memref<2x1xf32>
}
@@ -110,12 +110,12 @@ func.func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
/// tree.
// CHECK-LABEL: @down_propagate_for
func.func @down_propagate_for() {
- // CHECK: %c1_i32 = arith.constant 1 : i32
+ // CHECK: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
%0 = arith.constant 1 : i32
// CHECK-NEXT: affine.for {{.*}} = 0 to 4 {
affine.for %i = 0 to 4 {
- // CHECK-NEXT: "foo"(%c1_i32, %c1_i32) : (i32, i32) -> ()
+ // CHECK-NEXT: "foo"(%[[VAR_c1_i32]], %[[VAR_c1_i32]]) : (i32, i32) -> ()
%1 = arith.constant 1 : i32
"foo"(%0, %1) : (i32, i32) -> ()
}
@@ -124,17 +124,17 @@ func.func @down_propagate_for() {
// CHECK-LABEL: @down_propagate
func.func @down_propagate() -> i32 {
- // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
+ // CHECK-NEXT: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
%0 = arith.constant 1 : i32
- // CHECK-NEXT: %true = arith.constant true
+ // CHECK-NEXT: %[[VAR_true:[0-9a-zA-Z_]+]] = arith.constant true
%cond = arith.constant true
- // CHECK-NEXT: cf.cond_br %true, ^bb1, ^bb2(%c1_i32 : i32)
+ // CHECK-NEXT: cf.cond_br %[[VAR_true]], ^bb1, ^bb2(%[[VAR_c1_i32]] : i32)
cf.cond_br %cond, ^bb1, ^bb2(%0 : i32)
^bb1: // CHECK: ^bb1:
- // CHECK-NEXT: cf.br ^bb2(%c1_i32 : i32)
+ // CHECK-NEXT: cf.br ^bb2(%[[VAR_c1_i32]] : i32)
%1 = arith.constant 1 : i32
cf.br ^bb2(%1 : i32)
@@ -147,44 +147,44 @@ func.func @down_propagate() -> i32 {
func.func @up_propagate_for() -> i32 {
// CHECK: affine.for {{.*}} = 0 to 4 {
affine.for %i = 0 to 4 {
- // CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32
- // CHECK-NEXT: "foo"(%c1_i32_0) : (i32) -> ()
+ // CHECK-NEXT: %[[VAR_c1_i32_0:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
+ // CHECK-NEXT: "foo"(%[[VAR_c1_i32_0]]) : (i32) -> ()
%0 = arith.constant 1 : i32
"foo"(%0) : (i32) -> ()
}
- // CHECK: %c1_i32 = arith.constant 1 : i32
- // CHECK-NEXT: return %c1_i32 : i32
+ // CHECK: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
+ // CHECK-NEXT: return %[[VAR_c1_i32]] : i32
%1 = arith.constant 1 : i32
return %1 : i32
}
// CHECK-LABEL: func @up_propagate
func.func @up_propagate() -> i32 {
- // CHECK-NEXT: %c0_i32 = arith.constant 0 : i32
+ // CHECK-NEXT: %[[VAR_c0_i32:[0-9a-zA-Z_]+]] = arith.constant 0 : i32
%0 = arith.constant 0 : i32
- // CHECK-NEXT: %true = arith.constant true
+ // CHECK-NEXT: %[[VAR_true:[0-9a-zA-Z_]+]] = arith.constant true
%cond = arith.constant true
- // CHECK-NEXT: cf.cond_br %true, ^bb1, ^bb2(%c0_i32 : i32)
+ // CHECK-NEXT: cf.cond_br %[[VAR_true]], ^bb1, ^bb2(%[[VAR_c0_i32]] : i32)
cf.cond_br %cond, ^bb1, ^bb2(%0 : i32)
^bb1: // CHECK: ^bb1:
- // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
+ // CHECK-NEXT: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
%1 = arith.constant 1 : i32
- // CHECK-NEXT: cf.br ^bb2(%c1_i32 : i32)
+ // CHECK-NEXT: cf.br ^bb2(%[[VAR_c1_i32]] : i32)
cf.br ^bb2(%1 : i32)
^bb2(%arg : i32): // CHECK: ^bb2
- // CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32
+ // CHECK-NEXT: %[[VAR_c1_i32_0:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
%2 = arith.constant 1 : i32
- // CHECK-NEXT: %1 = arith.addi %0, %c1_i32_0 : i32
+ // CHECK-NEXT: %[[VAR_1:[0-9a-zA-Z_]+]] = arith.addi %{{.*}}, %[[VAR_c1_i32_0]] : i32
%add = arith.addi %arg, %2 : i32
- // CHECK-NEXT: return %1 : i32
+ // CHECK-NEXT: return %[[VAR_1]] : i32
return %add : i32
}
@@ -192,10 +192,10 @@ func.func @up_propagate() -> i32 {
/// an operation region.
// CHECK-LABEL: func @up_propagate_region
func.func @up_propagate_region() -> i32 {
- // CHECK-NEXT: %0 = "foo.region"
+ // CHECK-NEXT: {{.*}} "foo.region"
%0 = "foo.region"() ({
- // CHECK-NEXT: %c0_i32 = arith.constant 0 : i32
- // CHECK-NEXT: %true = arith.constant true
+ // CHECK-NEXT: %[[VAR_c0_i32:[0-9a-zA-Z_]+]] = arith.constant 0 : i32
+ // CHECK-NEXT: %[[VAR_true:[0-9a-zA-Z_]+]] = arith.constant true
// CHECK-NEXT: cf.cond_br
%1 = arith.constant 0 : i32
@@ -203,16 +203,16 @@ func.func @up_propagate_region() -> i32 {
cf.cond_br %true, ^bb1, ^bb2(%1 : i32)
^bb1: // CHECK: ^bb1:
- // CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
+ // CHECK-NEXT: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
// CHECK-NEXT: cf.br
%c1_i32 = arith.constant 1 : i32
cf.br ^bb2(%c1_i32 : i32)
- ^bb2(%arg : i32): // CHECK: ^bb2(%1: i32):
- // CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32
- // CHECK-NEXT: %2 = arith.addi %1, %c1_i32_0 : i32
- // CHECK-NEXT: "foo.yield"(%2) : (i32) -> ()
+ ^bb2(%arg : i32): // CHECK: ^bb2(%[[VAR_1:.*]]: i32):
+ // CHECK-NEXT: %[[VAR_c1_i32_0:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
+ // CHECK-NEXT: %[[VAR_2:[0-9a-zA-Z_]+]] = arith.addi %[[VAR_1]], %[[VAR_c1_i32_0]] : i32
+ // CHECK-NEXT: "foo.yield"(%[[VAR_2]]) : (i32) -> ()
%c1_i32_0 = arith.constant 1 : i32
%2 = arith.addi %arg, %c1_i32_0 : i32
@@ -252,7 +252,7 @@ func.func @nested_isolated() -> i32 {
func.func @use_before_def() {
// CHECK-NEXT: test.graph_region
test.graph_region {
- // CHECK-NEXT: arith.addi %c1_i32, %c1_i32_0
+ // CHECK-NEXT: arith.addi
%0 = arith.addi %1, %2 : i32
// CHECK-NEXT: arith.constant 1
@@ -260,7 +260,7 @@ func.func @use_before_def() {
%1 = arith.constant 1 : i32
%2 = arith.constant 1 : i32
- // CHECK-NEXT: "foo.yield"(%0) : (i32) -> ()
+ // CHECK-NEXT: "foo.yield"(%{{.*}}) : (i32) -> ()
"foo.yield"(%0) : (i32) -> ()
}
return
diff --git a/mlir/test/Transforms/loop-fusion-transformation.mlir b/mlir/test/Transforms/loop-fusion-transformation.mlir
index 8e36bdedb55e7..c8e0918e6bfd6 100644
--- a/mlir/test/Transforms/loop-fusion-transformation.mlir
+++ b/mlir/test/Transforms/loop-fusion-transformation.mlir
@@ -14,7 +14,7 @@ func.func @slice_depth1_loop_nest() {
// CHECK: affine.for %[[IV0:.*]] = 0 to 5 {
// CHECK-NEXT: affine.store %{{.*}}, %{{.*}}[%[[IV0]]] : memref<100xf32>
// CHECK-NEXT: affine.load %{{.*}}[%[[IV0]]] : memref<100xf32>
- // CHECK-NEXT: "prevent.dce"(%1) : (f32) -> ()
+ // CHECK-NEXT: "prevent.dce"(%{{.*}}) : (f32) -> ()
// CHECK-NEXT: }
// CHECK-NEXT: return
return
diff --git a/mlir/test/Transforms/loop-fusion.mlir b/mlir/test/Transforms/loop-fusion.mlir
index 7f40ea4490a39..82c23db4a0c2e 100644
--- a/mlir/test/Transforms/loop-fusion.mlir
+++ b/mlir/test/Transforms/loop-fusion.mlir
@@ -73,9 +73,9 @@ func.func @should_fuse_reduction_to_pointwise() {
// -----
-// CHECK-DAG: [[$MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
-// CHECK-DAG: [[$MAP_SHIFT_D0_BY_ONE:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
-// CHECK-DAG: [[$MAP_SHIFT_D1_BY_ONE:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + 1)>
+// CHECK-DAG: [[$MAP_SHIFT_MINUS_ONE_R1:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[$MAP_SHIFT_D0_BY_ONE:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (d0 + 1)>
+// CHECK-DAG: [[$MAP_SHIFT_D1_BY_ONE:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (d1 + 1)>
// CHECK-LABEL: func @should_fuse_loop_nests_with_shifts() {
func.func @should_fuse_loop_nests_with_shifts() {
@@ -140,8 +140,8 @@ func.func @should_fuse_loop_nest() {
}
}
// Expecting private memref for '%a' first, then private memref for '%b'.
- // CHECK-DAG: [[NEWA:%[0-9]+]] = memref.alloc() : memref<1x1xf32>
- // CHECK-DAG: [[NEWB:%[0-9]+]] = memref.alloc() : memref<1x1xf32>
+ // CHECK-DAG: [[NEWA:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1x1xf32>
+ // CHECK-DAG: [[NEWB:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1x1xf32>
// CHECK: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: affine.store %{{.*}}, [[NEWA]][0, 0] : memref<1x1xf32>
@@ -211,8 +211,8 @@ func.func @should_fuse_all_loops() {
// Should fuse first and second loops into third.
// Expecting private memref for '%a' first, then private memref for '%b'.
- // CHECK-DAG: [[NEWA:%[0-9]+]] = memref.alloc() : memref<1xf32>
- // CHECK-DAG: [[NEWB:%[0-9]+]] = memref.alloc() : memref<1xf32>
+ // CHECK-DAG: [[NEWA:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xf32>
+ // CHECK-DAG: [[NEWB:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<1xf32>
// CHECK: affine.for %{{.*}} = 0 to 10 {
// CHECK-NEXT: affine.store %{{.*}}, [[NEWA]][0] : memref<1xf32>
// CHECK-NEXT: affine.store %{{.*}}, [[NEWB]][0] : memref<1xf32>
@@ -618,9 +618,9 @@ func.func @permute_and_fuse() {
// -----
-// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
-// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
-// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
+// CHECK-DAG: [[$MAP0:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
+// CHECK-DAG: [[$MAP1:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 floordiv 4)>
+// CHECK-DAG: [[$MAP2:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 mod 4)>
// Reshape from a 64 x f32 to 16 x 4 x f32.
// CHECK-LABEL: func @fuse_reshape_64_16_4
@@ -648,9 +648,9 @@ func.func @fuse_reshape_64_16_4(%in : memref<64xf32>) {
}
// -----
-// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
-// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
-// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
+// CHECK-DAG: [[$MAP0:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 floordiv 4)>
+// CHECK-DAG: [[$MAP1:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 mod 4)>
+// CHECK-DAG: [[$MAP2:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
// Reshape a 16x4xf32 to 64xf32.
// CHECK-LABEL: func @fuse_reshape_16_4_64
@@ -735,18 +735,18 @@ func.func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
// Everything above is fused to a single 2-d loop nest, and the 6-d tensor %in
// is eliminated if -memref-dataflow-opt is also supplied.
//
-// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) floordiv 288)>
-// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 288) floordiv 144)>
-// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 144) floordiv 48)>
-// CHECK-DAG: [[$MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 48) floordiv 16)>
-// CHECK-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) mod 16)>
-// CHECK-DAG: [[$MAP11:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 9 + d1)>
-// CHECK-DAG: [[$MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 288)>
-// CHECK-DAG: [[$MAP13:#map[0-9]+]] = affine_map<(d0) -> ((d0 mod 288) floordiv 144)>
-// CHECK-DAG: [[$MAP14:#map[0-9]+]] = affine_map<(d0) -> ((d0 mod 144) floordiv 48)>
-// CHECK-DAG: [[$MAP15:#map[0-9]+]] = affine_map<(d0) -> ((d0 mod 48) floordiv 16)>
-// CHECK-DAG: [[$MAP16:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 16)>
-// CHECK-DAG: [[$MAP17:#map[0-9]+]] = affine_map<(d0) -> (0)>
+// CHECK-DAG: [[$MAP0:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) floordiv 288)>
+// CHECK-DAG: [[$MAP1:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 288) floordiv 144)>
+// CHECK-DAG: [[$MAP2:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 144) floordiv 48)>
+// CHECK-DAG: [[$MAP3:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 48) floordiv 16)>
+// CHECK-DAG: [[$MAP4:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) mod 16)>
+// CHECK-DAG: [[$MAP11:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (d0 * 9 + d1)>
+// CHECK-DAG: [[$MAP12:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 floordiv 288)>
+// CHECK-DAG: [[$MAP13:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> ((d0 mod 288) floordiv 144)>
+// CHECK-DAG: [[$MAP14:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> ((d0 mod 144) floordiv 48)>
+// CHECK-DAG: [[$MAP15:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> ((d0 mod 48) floordiv 16)>
+// CHECK-DAG: [[$MAP16:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 mod 16)>
+// CHECK-DAG: [[$MAP17:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (0)>
//
// CHECK-LABEL: func @R6_to_R2_reshape
@@ -1324,9 +1324,9 @@ func.func @R3_to_R2_reshape() {
}
return
}
-// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 3 + d1) floordiv 48)>
-// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1)>
-// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 48)>
+// CHECK-DAG: [[$MAP0:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> ((d0 * 3 + d1) floordiv 48)>
+// CHECK-DAG: [[$MAP1:#map[0-9a-zA-Z_]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1)>
+// CHECK-DAG: [[$MAP2:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 floordiv 48)>
// CHECK-LABEL: func @R3_to_R2_reshape()
// CHECK-DAG: memref.alloc() : memref<1x1x1xi32>
diff --git a/mlir/test/Transforms/loop-invariant-code-motion.mlir b/mlir/test/Transforms/loop-invariant-code-motion.mlir
index bcd8d7c9babd6..6ea6cdf8f2757 100644
--- a/mlir/test/Transforms/loop-invariant-code-motion.mlir
+++ b/mlir/test/Transforms/loop-invariant-code-motion.mlir
@@ -13,7 +13,7 @@ func.func @nested_loops_both_having_invariant_code() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
// CHECK-NEXT: %[[CST0:.*]] = arith.constant 7.000000e+00 : f32
// CHECK-NEXT: %[[CST1:.*]] = arith.constant 8.000000e+00 : f32
// CHECK-NEXT: %[[ADD0:.*]] = arith.addf %[[CST0]], %[[CST1]] : f32
@@ -38,10 +38,10 @@ func.func @nested_loops_code_invariant_to_both() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf
return
}
@@ -58,13 +58,13 @@ func.func @single_loop_nothing_invariant() {
affine.store %v2, %m1[%arg0] : memref<10xf32>
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %1 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: %2 = affine.load %0[%arg0] : memref<10xf32>
- // CHECK-NEXT: %3 = affine.load %1[%arg0] : memref<10xf32>
- // CHECK-NEXT: %4 = arith.addf %2, %3 : f32
- // CHECK-NEXT: affine.store %4, %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: affine.for
+ // CHECK-NEXT: affine.load
+ // CHECK-NEXT: affine.load
+ // CHECK-NEXT: arith.addf
+ // CHECK-NEXT: affine.store
return
}
@@ -84,13 +84,13 @@ func.func @invariant_code_inside_affine_if() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: affine.for %arg0 = 0 to 10 {
- // CHECK-NEXT: %1 = affine.apply #map(%arg0)
- // CHECK-NEXT: affine.if #set(%arg0, %1) {
- // CHECK-NEXT: %2 = arith.addf %cst, %cst : f32
- // CHECK-NEXT: affine.store %2, %0[%arg0] : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: affine.for
+ // CHECK-NEXT: affine.apply
+ // CHECK-NEXT: affine.if
+ // CHECK-NEXT: arith.addf
+ // CHECK-NEXT: affine.store
// CHECK-NEXT: }
@@ -110,7 +110,7 @@ func.func @invariant_affine_if() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
// CHECK-NEXT: %[[CST:.*]] = arith.constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %[[ARG:.*]] = 0 to 10 {
// CHECK-NEXT: }
@@ -234,10 +234,10 @@ func.func @invariant_loop_dialect() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
- // CHECK-NEXT: %cst = arith.constant 7.000000e+00 : f32
- // CHECK-NEXT: %cst_0 = arith.constant 8.000000e+00 : f32
- // CHECK-NEXT: %1 = arith.addf %cst, %cst_0 : f32
+ // CHECK: memref.alloc() : memref<10xf32>
+ // CHECK-NEXT: arith.constant 7.000000e+00 : f32
+ // CHECK-NEXT: arith.constant 8.000000e+00 : f32
+ // CHECK-NEXT: arith.addf
return
}
@@ -255,7 +255,7 @@ func.func @variant_loop_dialect() {
}
}
- // CHECK: %0 = memref.alloc() : memref<10xf32>
+ // CHECK: memref.alloc() : memref<10xf32>
// CHECK-NEXT: scf.for
// CHECK-NEXT: scf.for
// CHECK-NEXT: arith.addi
@@ -277,14 +277,14 @@ func.func @parallel_loop_with_invariant() {
}
// CHECK-LABEL: func @parallel_loop_with_invariant
- // CHECK: %c0 = arith.constant 0 : index
- // CHECK-NEXT: %c10 = arith.constant 10 : index
- // CHECK-NEXT: %c1 = arith.constant 1 : index
- // CHECK-NEXT: %c7_i32 = arith.constant 7 : i32
- // CHECK-NEXT: %c8_i32 = arith.constant 8 : i32
- // CHECK-NEXT: arith.addi %c7_i32, %c8_i32 : i32
- // CHECK-NEXT: scf.parallel (%arg0, %arg1) = (%c0, %c0) to (%c10, %c10) step (%c1, %c1)
- // CHECK-NEXT: arith.addi %arg0, %arg1 : index
+ // CHECK: arith.constant 0 : index
+ // CHECK-NEXT: arith.constant 10 : index
+ // CHECK-NEXT: arith.constant 1 : index
+ // CHECK-NEXT: arith.constant 7 : i32
+ // CHECK-NEXT: arith.constant 8 : i32
+ // CHECK-NEXT: arith.addi
+ // CHECK-NEXT: scf.parallel (%[[A:.*]],{{.*}}) =
+ // CHECK-NEXT: arith.addi %[[A]]
// CHECK-NEXT: yield
// CHECK-NEXT: }
// CHECK-NEXT: return
diff --git a/mlir/test/Transforms/normalize-memrefs.mlir b/mlir/test/Transforms/normalize-memrefs.mlir
index 243d7b6306fac..f3d0a12ac72df 100644
--- a/mlir/test/Transforms/normalize-memrefs.mlir
+++ b/mlir/test/Transforms/normalize-memrefs.mlir
@@ -17,9 +17,9 @@ func.func @permute() {
}
// The old memref alloc should disappear.
// CHECK-NOT: memref<64x256xf32>
-// CHECK: [[MEM:%[0-9]+]] = memref.alloc() : memref<256x64xf32>
-// CHECK-NEXT: affine.for %[[I:arg[0-9]+]] = 0 to 64 {
-// CHECK-NEXT: affine.for %[[J:arg[0-9]+]] = 0 to 256 {
+// CHECK: [[MEM:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<256x64xf32>
+// CHECK-NEXT: affine.for %[[I:arg[0-9a-zA-Z_]+]] = 0 to 64 {
+// CHECK-NEXT: affine.for %[[J:arg[0-9a-zA-Z_]+]] = 0 to 256 {
// CHECK-NEXT: affine.load [[MEM]][%[[J]], %[[I]]] : memref<256x64xf32>
// CHECK-NEXT: "prevent.dce"
// CHECK-NEXT: }
@@ -45,11 +45,11 @@ func.func @shift(%idx : index) {
func.func @high_dim_permute() {
// CHECK-NOT: memref<64x128x256xf32,
%A = memref.alloc() : memref<64x128x256xf32, affine_map<(d0, d1, d2) -> (d2, d0, d1)>>
- // CHECK: %[[I:arg[0-9]+]]
+ // CHECK: %[[I:arg[0-9a-zA-Z_]+]]
affine.for %i = 0 to 64 {
- // CHECK: %[[J:arg[0-9]+]]
+ // CHECK: %[[J:arg[0-9a-zA-Z_]+]]
affine.for %j = 0 to 128 {
- // CHECK: %[[K:arg[0-9]+]]
+ // CHECK: %[[K:arg[0-9a-zA-Z_]+]]
affine.for %k = 0 to 256 {
%1 = affine.load %A[%i, %j, %k] : memref<64x128x256xf32, affine_map<(d0, d1, d2) -> (d2, d0, d1)>>
// CHECK: %{{.*}} = affine.load %{{.*}}[%[[K]], %[[I]], %[[J]]] : memref<256x64x128xf32>
@@ -132,7 +132,7 @@ func.func @semi_affine_layout_map(%s0: index, %s1: index) {
%A = memref.alloc()[%s0, %s1] : memref<256x1024xf32, affine_map<(d0, d1)[s0, s1] -> (d0*s0 + d1*s1)>>
affine.for %i = 0 to 256 {
affine.for %j = 0 to 1024 {
- // CHECK: memref<256x1024xf32, #map{{[0-9]+}}>
+ // CHECK: memref<256x1024xf32, #map{{[0-9a-zA-Z_]+}}>
affine.load %A[%i, %j] : memref<256x1024xf32, affine_map<(d0, d1)[s0, s1] -> (d0*s0 + d1*s1)>>
}
}
@@ -152,7 +152,7 @@ func.func @alignment() {
// Test case 1: Check normalization for multiple memrefs in a function argument list.
// CHECK-LABEL: func @multiple_argument_type
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<4x4xf64>, %[[B:arg[0-9]+]]: f64, %[[C:arg[0-9]+]]: memref<2x4xf64>, %[[D:arg[0-9]+]]: memref<24xf64>) -> f64
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<4x4xf64>, %[[B:arg[0-9a-zA-Z_]+]]: f64, %[[C:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>, %[[D:arg[0-9a-zA-Z_]+]]: memref<24xf64>) -> f64
func.func @multiple_argument_type(%A: memref<16xf64, #tile>, %B: f64, %C: memref<8xf64, #tile>, %D: memref<24xf64>) -> f64 {
%a = affine.load %A[0] : memref<16xf64, #tile>
%p = arith.mulf %a, %a : f64
@@ -161,15 +161,15 @@ func.func @multiple_argument_type(%A: memref<16xf64, #tile>, %B: f64, %C: memref
return %B : f64
}
-// CHECK: %[[a:[0-9]+]] = affine.load %[[A]][0, 0] : memref<4x4xf64>
-// CHECK: %[[p:[0-9]+]] = arith.mulf %[[a]], %[[a]] : f64
+// CHECK: %[[a:[0-9a-zA-Z_]+]] = affine.load %[[A]][0, 0] : memref<4x4xf64>
+// CHECK: %[[p:[0-9a-zA-Z_]+]] = arith.mulf %[[a]], %[[a]] : f64
// CHECK: affine.store %[[p]], %[[A]][2, 2] : memref<4x4xf64>
// CHECK: call @single_argument_type(%[[C]]) : (memref<2x4xf64>) -> ()
// CHECK: return %[[B]] : f64
// Test case 2: Check normalization for single memref argument in a function.
// CHECK-LABEL: func @single_argument_type
-// CHECK-SAME: (%[[C:arg[0-9]+]]: memref<2x4xf64>)
+// CHECK-SAME: (%[[C:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>)
func.func @single_argument_type(%C : memref<8xf64, #tile>) {
%a = memref.alloc(): memref<8xf64, #tile>
%b = memref.alloc(): memref<16xf64, #tile>
@@ -181,17 +181,17 @@ func.func @single_argument_type(%C : memref<8xf64, #tile>) {
return
}
-// CHECK: %[[a:[0-9]+]] = memref.alloc() : memref<2x4xf64>
-// CHECK: %[[b:[0-9]+]] = memref.alloc() : memref<4x4xf64>
+// CHECK: %[[a:[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x4xf64>
+// CHECK: %[[b:[0-9a-zA-Z_]+]] = memref.alloc() : memref<4x4xf64>
// CHECK: %cst = arith.constant 2.300000e+01 : f64
-// CHECK: %[[e:[0-9]+]] = memref.alloc() : memref<24xf64>
+// CHECK: %[[e:[0-9a-zA-Z_]+]] = memref.alloc() : memref<24xf64>
// CHECK: call @single_argument_type(%[[a]]) : (memref<2x4xf64>) -> ()
// CHECK: call @single_argument_type(%[[C]]) : (memref<2x4xf64>) -> ()
// CHECK: call @multiple_argument_type(%[[b]], %cst, %[[a]], %[[e]]) : (memref<4x4xf64>, f64, memref<2x4xf64>, memref<24xf64>) -> f64
// Test case 3: Check function returning any other type except memref.
// CHECK-LABEL: func @non_memref_ret
-// CHECK-SAME: (%[[C:arg[0-9]+]]: memref<2x4xf64>) -> i1
+// CHECK-SAME: (%[[C:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>) -> i1
func.func @non_memref_ret(%A: memref<8xf64, #tile>) -> i1 {
%d = arith.constant 1 : i1
return %d : i1
@@ -201,7 +201,7 @@ func.func @non_memref_ret(%A: memref<8xf64, #tile>) -> i1 {
// Test case 4: Check successful memref normalization in case of inter/intra-recursive calls.
// CHECK-LABEL: func @ret_multiple_argument_type
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<4x4xf64>, %[[B:arg[0-9]+]]: f64, %[[C:arg[0-9]+]]: memref<2x4xf64>) -> (memref<2x4xf64>, f64)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<4x4xf64>, %[[B:arg[0-9a-zA-Z_]+]]: f64, %[[C:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>) -> (memref<2x4xf64>, f64)
func.func @ret_multiple_argument_type(%A: memref<16xf64, #tile>, %B: f64, %C: memref<8xf64, #tile>) -> (memref<8xf64, #tile>, f64) {
%a = affine.load %A[0] : memref<16xf64, #tile>
%p = arith.mulf %a, %a : f64
@@ -214,18 +214,18 @@ func.func @ret_multiple_argument_type(%A: memref<16xf64, #tile>, %B: f64, %C: me
return %C, %p: memref<8xf64, #tile>, f64
}
-// CHECK: %[[a:[0-9]+]] = affine.load %[[A]][0, 0] : memref<4x4xf64>
-// CHECK: %[[p:[0-9]+]] = arith.mulf %[[a]], %[[a]] : f64
+// CHECK: %[[a:[0-9a-zA-Z_]+]] = affine.load %[[A]][0, 0] : memref<4x4xf64>
+// CHECK: %[[p:[0-9a-zA-Z_]+]] = arith.mulf %[[a]], %[[a]] : f64
// CHECK: %true = arith.constant true
// CHECK: cf.cond_br %true, ^bb1, ^bb2
// CHECK: ^bb1: // pred: ^bb0
-// CHECK: %[[res:[0-9]+]]:2 = call @ret_single_argument_type(%[[C]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
+// CHECK: %[[res:[0-9a-zA-Z_]+]]:2 = call @ret_single_argument_type(%[[C]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
// CHECK: return %[[res]]#1, %[[p]] : memref<2x4xf64>, f64
// CHECK: ^bb2: // pred: ^bb0
// CHECK: return %{{.*}}, %{{.*}} : memref<2x4xf64>, f64
// CHECK-LABEL: func @ret_single_argument_type
-// CHECK-SAME: (%[[C:arg[0-9]+]]: memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
+// CHECK-SAME: (%[[C:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
func.func @ret_single_argument_type(%C: memref<8xf64, #tile>) -> (memref<16xf64, #tile>, memref<8xf64, #tile>){
%a = memref.alloc() : memref<8xf64, #tile>
%b = memref.alloc() : memref<16xf64, #tile>
@@ -237,18 +237,18 @@ func.func @ret_single_argument_type(%C: memref<8xf64, #tile>) -> (memref<16xf64,
return %b, %a: memref<16xf64, #tile>, memref<8xf64, #tile>
}
-// CHECK: %[[a:[0-9]+]] = memref.alloc() : memref<2x4xf64>
-// CHECK: %[[b:[0-9]+]] = memref.alloc() : memref<4x4xf64>
+// CHECK: %[[a:[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x4xf64>
+// CHECK: %[[b:[0-9a-zA-Z_]+]] = memref.alloc() : memref<4x4xf64>
// CHECK: %cst = arith.constant 2.300000e+01 : f64
-// CHECK: %[[resA:[0-9]+]]:2 = call @ret_single_argument_type(%[[a]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
-// CHECK: %[[resB:[0-9]+]]:2 = call @ret_single_argument_type(%[[C]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
-// CHECK: %[[resC:[0-9]+]]:2 = call @ret_multiple_argument_type(%[[b]], %cst, %[[a]]) : (memref<4x4xf64>, f64, memref<2x4xf64>) -> (memref<2x4xf64>, f64)
-// CHECK: %[[resD:[0-9]+]]:2 = call @ret_single_argument_type(%[[resC]]#0) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
+// CHECK: %[[resA:[0-9a-zA-Z_]+]]:2 = call @ret_single_argument_type(%[[a]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
+// CHECK: %[[resB:[0-9a-zA-Z_]+]]:2 = call @ret_single_argument_type(%[[C]]) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
+// CHECK: %[[resC:[0-9a-zA-Z_]+]]:2 = call @ret_multiple_argument_type(%[[b]], %cst, %[[a]]) : (memref<4x4xf64>, f64, memref<2x4xf64>) -> (memref<2x4xf64>, f64)
+// CHECK: %[[resD:[0-9a-zA-Z_]+]]:2 = call @ret_single_argument_type(%[[resC]]#0) : (memref<2x4xf64>) -> (memref<4x4xf64>, memref<2x4xf64>)
// CHECK: return %{{.*}}, %{{.*}} : memref<4x4xf64>, memref<2x4xf64>
// Test case set #5: To check normalization in a chain of interconnected functions.
// CHECK-LABEL: func @func_A
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<2x4xf64>)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>)
func.func @func_A(%A: memref<8xf64, #tile>) {
call @func_B(%A) : (memref<8xf64, #tile>) -> ()
return
@@ -256,7 +256,7 @@ func.func @func_A(%A: memref<8xf64, #tile>) {
// CHECK: call @func_B(%[[A]]) : (memref<2x4xf64>) -> ()
// CHECK-LABEL: func @func_B
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<2x4xf64>)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>)
func.func @func_B(%A: memref<8xf64, #tile>) {
call @func_C(%A) : (memref<8xf64, #tile>) -> ()
return
@@ -264,31 +264,31 @@ func.func @func_B(%A: memref<8xf64, #tile>) {
// CHECK: call @func_C(%[[A]]) : (memref<2x4xf64>) -> ()
// CHECK-LABEL: func @func_C
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<2x4xf64>)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<2x4xf64>)
func.func @func_C(%A: memref<8xf64, #tile>) {
return
}
// Test case set #6: Checking if no normalization takes place in a scenario: A -> B -> C and B has an unsupported type.
// CHECK-LABEL: func @some_func_A
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<8xf64, #map{{[0-9]+}}>)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<8xf64, #map{{[0-9a-zA-Z_]+}}>)
func.func @some_func_A(%A: memref<8xf64, #tile>) {
call @some_func_B(%A) : (memref<8xf64, #tile>) -> ()
return
}
-// CHECK: call @some_func_B(%[[A]]) : (memref<8xf64, #map{{[0-9]+}}>) -> ()
+// CHECK: call @some_func_B(%[[A]]) : (memref<8xf64, #map{{[0-9a-zA-Z_]+}}>) -> ()
// CHECK-LABEL: func @some_func_B
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<8xf64, #map{{[0-9]+}}>)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<8xf64, #map{{[0-9a-zA-Z_]+}}>)
func.func @some_func_B(%A: memref<8xf64, #tile>) {
"test.test"(%A) : (memref<8xf64, #tile>) -> ()
call @some_func_C(%A) : (memref<8xf64, #tile>) -> ()
return
}
-// CHECK: call @some_func_C(%[[A]]) : (memref<8xf64, #map{{[0-9]+}}>) -> ()
+// CHECK: call @some_func_C(%[[A]]) : (memref<8xf64, #map{{[0-9a-zA-Z_]+}}>) -> ()
// CHECK-LABEL: func @some_func_C
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<8xf64, #map{{[0-9]+}}>)
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<8xf64, #map{{[0-9a-zA-Z_]+}}>)
func.func @some_func_C(%A: memref<8xf64, #tile>) {
return
}
@@ -308,16 +308,16 @@ func.func @simply_call_external() {
call @external_func_A(%a) : (memref<16xf64, #tile>) -> ()
return
}
-// CHECK: %[[a:[0-9]+]] = memref.alloc() : memref<4x4xf64>
+// CHECK: %[[a:[0-9a-zA-Z_]+]] = memref.alloc() : memref<4x4xf64>
// CHECK: call @external_func_A(%[[a]]) : (memref<4x4xf64>) -> ()
// CHECK-LABEL: func @use_value_of_external
-// CHECK-SAME: (%[[A:arg[0-9]+]]: memref<4x4xf64>, %[[B:arg[0-9]+]]: f64) -> memref<2x4xf64>
+// CHECK-SAME: (%[[A:arg[0-9a-zA-Z_]+]]: memref<4x4xf64>, %[[B:arg[0-9a-zA-Z_]+]]: f64) -> memref<2x4xf64>
func.func @use_value_of_external(%A: memref<16xf64, #tile>, %B: f64) -> (memref<8xf64, #tile>) {
%res = call @external_func_B(%A, %B) : (memref<16xf64, #tile>, f64) -> (memref<8xf64, #tile>)
return %res : memref<8xf64, #tile>
}
-// CHECK: %[[res:[0-9]+]] = call @external_func_B(%[[A]], %[[B]]) : (memref<4x4xf64>, f64) -> memref<2x4xf64>
+// CHECK: %[[res:[0-9a-zA-Z_]+]] = call @external_func_B(%[[A]], %[[B]]) : (memref<4x4xf64>, f64) -> memref<2x4xf64>
// CHECK: return %{{.*}} : memref<2x4xf64>
// CHECK-LABEL: func @affine_parallel_norm
diff --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir
index d11da4a7a248e..78e47ac3cd119 100644
--- a/mlir/test/Transforms/pipeline-data-transfer.mlir
+++ b/mlir/test/Transforms/pipeline-data-transfer.mlir
@@ -2,8 +2,8 @@
// -----
-// CHECK-DAG: [[$MOD_2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 2)>
-// CHECK-DAG: [[$MAP_MINUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[$MOD_2:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 mod 2)>
+// CHECK-DAG: [[$MAP_MINUS_1:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 - 1)>
// CHECK-LABEL: func @loop_nest_dma() {
func.func @loop_nest_dma() {
@@ -64,8 +64,8 @@ func.func @loop_nest_dma() {
// -----
-// CHECK-DAG: [[$FLOOR_MOD_2:#map[0-9]+]] = affine_map<(d0) -> ((d0 floordiv 4) mod 2)>
-// CHECK-DAG: [[$REMAP_SHIFT_MINUS_4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
+// CHECK-DAG: [[$FLOOR_MOD_2:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> ((d0 floordiv 4) mod 2)>
+// CHECK-DAG: [[$REMAP_SHIFT_MINUS_4:#map[0-9a-zA-Z_]+]] = affine_map<(d0) -> (d0 - 4)>
// CHECK-LABEL: @loop_step
func.func @loop_step(%arg0: memref<512xf32>,
@@ -84,8 +84,8 @@ func.func @loop_step(%arg0: memref<512xf32>,
}
return
}
-// CHECK: [[BUF:%[0-9]+]] = memref.alloc() : memref<2x4xf32, 1>
-// CHECK: [[TAG:%[0-9]+]] = memref.alloc() : memref<2x1xi32>
+// CHECK: [[BUF:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x4xf32, 1>
+// CHECK: [[TAG:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x1xi32>
// CHECK-NEXT: affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[(%{{.*}} floordiv 4) mod 2, 0], [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
// CHECK-NEXT: affine.for %{{.*}} = 4 to 512 step 4 {
// CHECK-NEXT: affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[(%{{.*}} floordiv 4) mod 2, 0], [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
@@ -94,7 +94,7 @@ func.func @loop_step(%arg0: memref<512xf32>,
// CHECK: affine.dma_wait [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<2x1xi32>
// CHECK-NEXT: "compute"(%{{.*}}) : (index) -> ()
// CHECK-NEXT: }
-// CHECK-NEXT: [[SHIFTED:%[0-9]+]] = affine.apply [[$REMAP_SHIFT_MINUS_4]](%{{.*}})
+// CHECK-NEXT: [[SHIFTED:%[0-9a-zA-Z_]+]] = affine.apply [[$REMAP_SHIFT_MINUS_4]](%{{.*}})
// CHECK-NEXT: %{{.*}} = affine.apply [[$FLOOR_MOD_2]]([[SHIFTED]])
// CHECK: affine.dma_wait [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<2x1xi32>
// CHECK-NEXT: "compute"(%{{.*}}) : (index) -> ()
@@ -118,8 +118,8 @@ func.func @loop_dma_nested(%arg0: memref<512x32xvector<8xf32>>, %arg1: memref<51
%4 = memref.alloc() : memref<2xi32>
%5 = memref.alloc() : memref<2xi32>
// Prologue for DMA overlap on arg2.
- // CHECK-DAG: [[BUF_ARG2:%[0-9]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
- // CHECK-DAG: [[TAG_ARG2:%[0-9]+]] = memref.alloc() : memref<2x2xi32>
+ // CHECK-DAG: [[BUF_ARG2:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
+ // CHECK-DAG: [[TAG_ARG2:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x2xi32>
// CHECK: affine.dma_start %{{.*}}[
// CHECK: affine.for %{{.*}} = 1 to 8 {
affine.for %i0 = 0 to 8 {
@@ -130,10 +130,10 @@ func.func @loop_dma_nested(%arg0: memref<512x32xvector<8xf32>>, %arg1: memref<51
// CHECK: affine.dma_start %{{.*}}[
// CHECK: affine.dma_wait [[TAG_ARG2]]
// Prologue for DMA overlap on arg0, arg1 nested within i0
- // CHECK: [[BUF_ARG0:%[0-9]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
- // CHECK: [[BUF_ARG1:%[0-9]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
- // CHECK: [[TAG_ARG0:%[0-9]+]] = memref.alloc() : memref<2x2xi32>
- // CHECK: [[TAG_ARG1:%[0-9]+]] = memref.alloc() : memref<2x2xi32>
+ // CHECK: [[BUF_ARG0:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
+ // CHECK: [[BUF_ARG1:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
+ // CHECK: [[TAG_ARG0:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x2xi32>
+ // CHECK: [[TAG_ARG1:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x2xi32>
// CHECK: affine.dma_start %{{.*}}[
// CHECK: affine.dma_start %{{.*}}[
// CHECK-NEXT: affine.for %{{.*}} = 1 to 8 {
@@ -164,10 +164,10 @@ func.func @loop_dma_nested(%arg0: memref<512x32xvector<8xf32>>, %arg1: memref<51
// epilogue for DMA overlap on %arg2
// CHECK: affine.dma_wait [[TAG_ARG2]]
// Within the epilogue for arg2's DMA, we have the DMAs on %arg1, %arg2 nested.
- // CHECK: [[BUF_ARG0_NESTED:%[0-9]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
- // CHECK: [[BUF_ARG1_NESTED:%[0-9]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
- // CHECK: [[TAG_ARG0_NESTED:%[0-9]+]] = memref.alloc() : memref<2x2xi32>
- // CHECK: [[TAG_ARG1_NESTED:%[0-9]+]] = memref.alloc() : memref<2x2xi32>
+ // CHECK: [[BUF_ARG0_NESTED:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
+ // CHECK: [[BUF_ARG1_NESTED:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x64x4xvector<8xf32>, 2>
+ // CHECK: [[TAG_ARG0_NESTED:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x2xi32>
+ // CHECK: [[TAG_ARG1_NESTED:%[0-9a-zA-Z_]+]] = memref.alloc() : memref<2x2xi32>
// CHECK: affine.dma_start %{{.*}}[
// CHECK: affine.dma_start %{{.*}}[
// CHECK: affine.for %{{.*}} = 1 to 8 {
@@ -256,7 +256,7 @@ func.func @escaping_use(%arg0: memref<512 x 32 x f32>) {
memref.dealloc %tag : memref<1 x i32>
memref.dealloc %Av : memref<32 x 32 x f32, 2>
return
-// CHECK: "foo"(%{{[0-9]+}}) : (memref<32x32xf32, 2>) -> ()
+// CHECK: "foo"(%{{[0-9a-zA-Z_]+}}) : (memref<32x32xf32, 2>) -> ()
// CHECK: }
// CHECK: return
}
@@ -284,7 +284,7 @@ func.func @escaping_tag(%arg0: memref<512 x 32 x f32>) {
memref.dealloc %tag : memref<1 x i32>
memref.dealloc %Av : memref<32 x 32 x f32, 2>
return
-// CHECK: "foo"(%{{[0-9]+}}) : (memref<1xi32>) -> ()
+// CHECK: "foo"(%{{[0-9a-zA-Z_]+}}) : (memref<1xi32>) -> ()
// CHECK: }
// CHECK: return
}
@@ -313,7 +313,7 @@ func.func @live_out_use(%arg0: memref<512 x 32 x f32>) -> f32 {
memref.dealloc %tag : memref<1 x i32>
memref.dealloc %Av : memref<32 x 32 x f32, 2>
return %v : f32
-// CHECK: affine.load %{{[0-9]+}}[%{{.*}}, %{{.*}}] : memref<32x32xf32, 2>
+// CHECK: affine.load %{{[0-9a-zA-Z_]+}}[%{{.*}}, %{{.*}}] : memref<32x32xf32, 2>
// CHECK: return
}
@@ -376,5 +376,5 @@ func.func @escaping_and_indexed_use_mix() {
// CHECK-NEXT: affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}
// CHECK-NEXT: affine.dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
// CHECK-NEXT: "compute"(%{{.*}}) : (memref<32xf32, 1>) -> ()
-// CHECK-NEXT: [[VAL:%[0-9]+]] = affine.load %{{.*}}[%{{.*}}] : memref<32xf32, 1>
+// CHECK-NEXT: [[VAL:%[0-9a-zA-Z_]+]] = affine.load %{{.*}}[%{{.*}}] : memref<32xf32, 1>
// CHECK-NEXT: "foo"([[VAL]]) : (f32) -> ()
More information about the Mlir-commits
mailing list