[Mlir-commits] [mlir] [mlir][bufferization] Switch tests to new deallocation pass pipeline (PR #66471)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Sep 15 01:47:55 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-core
            
<details>
<summary>Changes</summary>
Use the new ownership based deallocation pass pipeline in the regression and integration tests. Some one-shot bufferization tests tested one-shot bufferize and deallocation at the same time. I removed the deallocation pass there because the deallocation pass is already thoroughly tested by itself.
--

Patch is 32.88 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/66471.diff

12 Files Affected:

- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir (+3-8) 
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir (+1-1) 
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir (+3-11) 
- (modified) mlir/test/Dialect/SCF/one-shot-bufferize.mlir (+24-80) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir (+2-2) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir (+4-4) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir (+2-2) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir (+3-2) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir (+4-2) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir (+4-2) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir (+2-2) 
- (modified) mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir (+2-2) 


<pre>
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
index ab9360e45c65ff3..bccff8ef8d65aaa 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -one-shot-bufferize=&quot;allow-return-allocs allow-unknown-ops&quot; -buffer-deallocation -canonicalize -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize=&quot;allow-return-allocs allow-unknown-ops&quot; -canonicalize -split-input-file | FileCheck %s
 
 // Run fuzzer with different seeds.
 // RUN: mlir-opt %s -one-shot-bufferize=&quot;allow-return-allocs test-analysis-only analysis-fuzzer-seed=23&quot; -split-input-file -o /dev/null
@@ -14,20 +14,17 @@ func.func @buffer_not_deallocated(%t : tensor&lt;?xf32&gt;, %c : i1) -&gt; tensor&lt;?xf32&gt;
     // CHECK: %[[some_op:.*]] = &quot;test.some_op&quot;
     // CHECK: %[[alloc:.*]] = memref.alloc(%[[some_op]])
     // CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
-    // CHECK-NOT: dealloc
     // CHECK: scf.yield %[[casted]]
     %sz = &quot;test.some_op&quot;() : () -&gt; (index)
     %0 = bufferization.alloc_tensor(%sz) : tensor&lt;?xf32&gt;
     scf.yield %0 : tensor&lt;?xf32&gt;
   } else {
   // CHECK: } else {
-    // CHECK: %[[cloned:.*]] = bufferization.clone %[[m]]
-    // CHECK: scf.yield %[[cloned]]
+    // CHECK: scf.yield %[[m]]
     scf.yield %t : tensor&lt;?xf32&gt;
   }
   // CHECK: }
   // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
-  // CHECK: memref.dealloc %[[r]]
   // CHECK: return %[[r_tensor]]
   return %r : tensor&lt;?xf32&gt;
 }
@@ -42,8 +39,7 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor&lt;i32&gt;,
 {
   // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]]
   // CHECK: %[[r:.*]] = scf.if {{.*}} {
-  // CHECK:   %[[clone:.*]] = bufferization.clone %[[arg0_m]]
-  // CHECK:   scf.yield %[[clone]]
+  // CHECK:   scf.yield %[[arg0_m]]
   // CHECK: } else {
   // CHECK:   %[[alloc:.*]] = memref.alloc
   // CHECK:   memref.store %{{.*}}, %[[alloc]]
@@ -51,7 +47,6 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor&lt;i32&gt;,
   // CHECK:   scf.yield %[[casted]]
   // CHECK: }
   // CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]]
-  // CHECK: memref.dealloc %[[r]]
   // CHECK: return %[[r_t]]
   %3 = scf.if %cond -&gt; (tensor&lt;i32&gt;) {
     scf.yield %arg0 : tensor&lt;i32&gt;
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
index 06c79d450cea705..9ebd60f50328e14 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
@@ -5,7 +5,7 @@
 
 // RUN: mlir-opt %s \
 // RUN:     -one-shot-bufferize=&quot;allow-unknown-ops create-deallocs=0&quot; \
-// RUN:     -buffer-deallocation | \
+// RUN:     -buffer-deallocation-pipeline | \
 // RUN: FileCheck %s --check-prefix=CHECK-BUFFERDEALLOC
 
 // CHECK-NODEALLOC-LABEL: func @out_of_place_bufferization
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
index 5d876e935df1af6..4e4340c9db8acea 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
@@ -1,6 +1,6 @@
-// RUN: mlir-opt %s -one-shot-bufferize=&quot;bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map&quot; -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -one-shot-bufferize=&quot;bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map&quot; -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
-// RUN: mlir-opt %s -one-shot-bufferize=&quot;bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map&quot; -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
+// RUN: mlir-opt %s -one-shot-bufferize=&quot;bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map&quot; -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize=&quot;bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map&quot; -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
+// RUN: mlir-opt %s -one-shot-bufferize=&quot;bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map&quot; -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
 
 // Note: function-boundary-type-conversion=infer-layout-map with
 // promote-buffer-results-to-out-params is an unsupported combination.
@@ -18,7 +18,6 @@
 //       CHECK:   memref.store %{{.*}}, %[[alloc]]
 //       CHECK:   %[[casted:.*]] = memref.cast %[[alloc]]
 //       CHECK:   memref.copy %[[casted]], %[[arg1]]
-//       CHECK:   memref.dealloc %[[alloc]]
 //       CHECK:   return
 //       CHECK: }
 
@@ -29,7 +28,6 @@
 //       CHECK-NO-LAYOUT:   memref.copy %[[arg0]], %[[alloc]]
 //       CHECK-NO-LAYOUT:   memref.store {{.*}}, %[[alloc]]
 //       CHECK-NO-LAYOUT:   memref.copy %[[alloc]], %[[arg1]]
-//       CHECK-NO-LAYOUT:   memref.dealloc %[[alloc]]
 
 // CHECK-BASELINE-LABEL: func @callee(
 //  CHECK-BASELINE-SAME:     %[[arg0:.*]]: memref&lt;5xf32, strided&lt;[?], offset: ?&gt;&gt;) -&gt; memref&lt;5xf32&gt; {
@@ -53,7 +51,6 @@ func.func @callee(%t: tensor&lt;5xf32&gt;) -&gt; (tensor&lt;5xf32&gt;, tensor&lt;5xf32&gt;) {
 // CHECK:   call @callee(%[[arg0]], %[[casted]])
 // CHECK:   %[[l1:.*]] = memref.load %[[arg0]]
 // CHECK:   %[[l2:.*]] = memref.load %[[casted]]
-// CHECK:   memref.dealloc %[[alloc]]
 // CHECK:   return %[[l1]], %[[l2]]
 // CHECK: }
 
@@ -78,7 +75,6 @@ func.func @main(%t: tensor&lt;5xf32&gt;) -&gt; (f32, f32) {
 //       CHECK:   %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref&lt;10x20xf32&gt; to memref&lt;2x5xf32, strided&lt;[20, 1], offset: ?&gt;&gt;
 //       CHECK:   %[[casted:.*]] = memref.cast %[[subview]]
 //       CHECK:   memref.copy %[[casted]], %[[r]]
-//       CHECK:   memref.dealloc %[[alloc]]
 
 // CHECK-NO-LAYOUT-LABEL: func @callee(
 //  CHECK-NO-LAYOUT-SAME:              %{{.*}}: index,
@@ -90,9 +86,7 @@ func.func @main(%t: tensor&lt;5xf32&gt;) -&gt; (f32, f32) {
 // value and function signature.
 //       CHECK-NO-LAYOUT:   %[[alloc2:.*]] = memref.alloc() : memref&lt;2x5xf32&gt;
 //       CHECK-NO-LAYOUT:   memref.copy %[[subview]], %[[alloc2]]
-//       CHECK-NO-LAYOUT:   memref.dealloc %[[alloc]]
 //       CHECK-NO-LAYOUT:   memref.copy %[[alloc2]], %[[r]]
-//       CHECK-NO-LAYOUT:   memref.dealloc %[[alloc2]]
 
 // CHECK-BASELINE-LABEL: func @callee(
 //  CHECK-BASELINE-SAME:     %{{.*}}: index) -&gt; memref&lt;2x5xf32, strided&lt;[20, 1], offset: ?&gt;&gt; {
@@ -110,13 +104,11 @@ func.func @callee(%idx: index) -&gt; tensor&lt;2x5xf32&gt; {
 // CHECK:   %[[casted:.*]] = memref.cast %[[alloc]] : memref&lt;2x5xf32&gt; to memref&lt;2x5xf32, strided&lt;[?, ?], offset: ?&gt;&gt;
 // CHECK:   call @callee(%{{.*}}, %[[casted]])
 // CHECK:   memref.load %[[casted]]
-// CHECK:   memref.dealloc %[[alloc]]
 
 // CHECK-NO-LAYOUT: func @main(
 // CHECK-NO-LAYOUT:   %[[alloc:.*]] = memref.alloc() : memref&lt;2x5xf32&gt;
 // CHECK-NO-LAYOUT:   call @callee(%{{.*}}, %[[alloc]])
 // CHECK-NO-LAYOUT:   memref.load %[[alloc]]
-// CHECK-NO-LAYOUT:   memref.dealloc
 
 // CHECK-BASELINE: func @main(
 // CHECK-BASELINE:   %[[call:.*]] = call @callee
diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index b01592dcad74488..a8c488461c74edd 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -1,5 +1,4 @@
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs bufferize-function-boundaries&quot; -cse -canonicalize -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs bufferize-function-boundaries&quot; -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-NO-DEALLOC-PASS
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs bufferize-function-boundaries&quot; -cse -canonicalize -drop-equivalent-buffer-results -split-input-file | FileCheck %s
 
 // Run fuzzer with different seeds.
 // RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries&quot; -split-input-file -o /dev/null
@@ -7,7 +6,7 @@
 // RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries&quot; -split-input-file -o /dev/null
 
 // Test bufferization using memref types that have no layout map.
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries&quot; -buffer-deallocation -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize=&quot;allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries&quot; -split-input-file -o /dev/null
 
 // CHECK-LABEL: func @scf_for_yield_only(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref&lt;?xf32, strided&lt;[?], offset: ?&gt;&gt;,
@@ -52,8 +51,7 @@ func.func @scf_for_is_reading(%A : tensor&lt;?xf32&gt;, %B : tensor&lt;?xf32&gt;,
 
   // CHECK: %[[alloc:.*]] = memref.alloc
   // CHECK: memref.copy %[[A]], %[[alloc]]
-  // CHECK: %[[clone:.*]] = bufferization.clone %[[alloc]]
-  // CHECK: scf.for {{.*}} iter_args(%{{.*}} = %[[clone]])
+  // CHECK: scf.for {{.*}} iter_args(%{{.*}} = %[[alloc]])
   %0 = scf.for %iv = %lb to %ub step %c1 iter_args(%1 = %A) -&gt; tensor&lt;?xf32&gt; {
     %r = linalg.fill ins(%cst : f32) outs(%1 : tensor&lt;?xf32&gt;) -&gt; tensor&lt;?xf32&gt;
     scf.yield %B : tensor&lt;?xf32&gt;
@@ -235,7 +233,6 @@ func.func @scf_if_non_equiv_yields(
 // CHECK-LABEL: func @scf_execute_region_yield_non_equivalent(
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%{{.*}})
 //       CHECK:   %[[r:.*]] = memref.load %[[alloc]][%{{.*}}]
-//       CHECK:   memref.dealloc %[[alloc]]
 //       CHECK:   return %[[r]]
 func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -&gt; f32 {
   %r = scf.execute_region -&gt; (tensor&lt;?xf32&gt;) {
@@ -256,16 +253,11 @@ func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -&gt; f32
 //  CHECK-SAME:     %[[t:.*]]: memref&lt;?xf32
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%{{.*}})
 //       CHECK:   memref.copy %[[t]], %[[alloc]]
-//       CHECK:   %[[cloned:.*]] = bufferization.clone %[[t]]
-//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[cloned]])
-//   CHECK-DAG:     memref.dealloc %[[iter]]
+//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[t]])
 //   CHECK-DAG:     %[[alloc2:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[alloc]], %[[alloc2]]
 //       CHECK:     %[[alloc2_casted:.*]] = memref.cast %[[alloc2]]
-//       CHECK:     %[[cloned2:.*]] = bufferization.clone %[[alloc2_casted]]
-//       CHECK:     memref.dealloc %[[alloc2]]
-//       CHECK:     scf.yield %[[cloned2]]
-//       CHECK:   memref.dealloc %[[alloc]]
+//       CHECK:     scf.yield %[[alloc2_casted]]
 //       CHECK:   return %[[for]]
 func.func @scf_for_yield_non_equivalent(
     %t: tensor&lt;?xf32&gt;, %lb : index, %ub : index, %step : index) -&gt; tensor&lt;?xf32&gt; {
@@ -284,19 +276,14 @@ func.func @scf_for_yield_non_equivalent(
 
 // CHECK-LABEL: func @scf_for_yield_allocation(
 //  CHECK-SAME:     %[[t:.*]]: memref&lt;?xf32
-//       CHECK:   %[[cloned:.*]] = bufferization.clone %[[t]]
-//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[cloned]])
+//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[t]])
 // This alloc is for the bufferization.alloc_tensor.
 //   CHECK-DAG:     %[[alloc2:.*]] = memref.alloc(%{{.*}})
-//   CHECK-DAG:     memref.dealloc %[[iter]]
 // This alloc is for the scf.yield.
 //       CHECK:     %[[alloc3:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[alloc2]], %[[alloc3]]
-//       CHECK:     memref.dealloc %[[alloc2]]
 //       CHECK:     %[[casted3:.*]] = memref.cast %[[alloc3]]
-//       CHECK:     %[[cloned3:.*]] = bufferization.clone %[[casted3]]
-//       CHECK:     memref.dealloc %[[alloc3]]
-//       CHECK:     scf.yield %[[cloned3]]
+//       CHECK:     scf.yield %[[casted3]]
 //       CHECK:   return %[[for]]
 func.func @scf_for_yield_allocation(%t: tensor&lt;?xf32&gt;, %lb : index, %ub : index,
                                %step : index) -&gt; tensor&lt;?xf32&gt; {
@@ -320,9 +307,7 @@ func.func @scf_for_swapping_yields(
     %C : tensor&lt;4xf32&gt;, %lb : index, %ub : index, %step : index)
   -&gt; (f32, f32)
 {
-//   CHECK-DAG:   %[[clone1:.*]] = bufferization.clone %[[A]]
-//   CHECK-DAG:   %[[clone2:.*]] = bufferization.clone %[[B]]
-//       CHECK:   %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[clone1]], %[[iter2:.*]] = %[[clone2]])
+//       CHECK:   %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[A]], %[[iter2:.*]] = %[[B]])
   %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B)
       -&gt; (tensor&lt;?xf32&gt;, tensor&lt;?xf32&gt;)
   {
@@ -335,25 +320,17 @@ func.func @scf_for_swapping_yields(
 
 //       CHECK:     %[[alloc2:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[iter2]], %[[alloc2]]
-//       CHECK:     memref.dealloc %[[iter2]]
 //       CHECK:     %[[alloc1:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[iter1]], %[[alloc1]]
-//       CHECK:     memref.dealloc %[[iter1]]
 //       CHECK:     %[[casted2:.*]] = memref.cast %[[alloc2]]
 //       CHECK:     %[[casted1:.*]] = memref.cast %[[alloc1]]
-//       CHECK:     %[[cloned1:.*]] = bufferization.clone %[[casted1]]
-//       CHECK:     memref.dealloc %[[alloc1]]
-//       CHECK:     %[[cloned2:.*]] = bufferization.clone %[[casted2]]
-//       CHECK:     memref.dealloc %[[alloc2]]
-//       CHECK:     scf.yield %[[cloned2]], %[[cloned1]]
+//       CHECK:     scf.yield %[[casted2]], %[[casted1]]
     // Yield tensors in different order.
     scf.yield %ttB, %ttA : tensor&lt;?xf32&gt;, tensor&lt;?xf32&gt;
   }
 
 //       CHECK:     %[[r0:.*]] = memref.load %[[for]]#0
-//       CHECK:     memref.dealloc %[[for]]#0
 //       CHECK:     %[[r1:.*]] = memref.load %[[for]]#1
-//       CHECK:     memref.dealloc %[[for]]#1
   %f0 = tensor.extract %r0#0[%step] : tensor&lt;?xf32&gt;
   %f1 = tensor.extract %r0#1[%step] : tensor&lt;?xf32&gt;
 //       CHECK:     return %[[r0]], %[[r1]]
@@ -399,23 +376,15 @@ func.func @scf_while_non_equiv_condition(%arg0: tensor&lt;5xi1&gt;,
                                          %idx: index)
   -&gt; (tensor&lt;5xi1&gt;, tensor&lt;5xi1&gt;)
 {
-  // CHECK: %[[clone1:.*]] = bufferization.clone %[[arg1]]
-  // CHECK: %[[clone0:.*]] = bufferization.clone %[[arg0]]
-  // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[clone0]], %[[w1:.*]] = %[[clone1]]) {{.*}} {
+  // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[arg0]], %[[w1:.*]] = %[[arg1]]) {{.*}} {
   %r0, %r1 = scf.while (%w0 = %arg0, %w1 = %arg1)
       : (tensor&lt;5xi1&gt;, tensor&lt;5xi1&gt;) -&gt; (tensor&lt;5xi1&gt;, tensor&lt;5xi1&gt;) {
     // CHECK: %[[condition:.*]] = memref.load %[[w0]]
     // CHECK: %[[a1:.*]] = memref.alloc() {{.*}} : memref&lt;5xi1&gt;
     // CHECK: memref.copy %[[w1]], %[[a1]]
-    // CHECK: memref.dealloc %[[w1]]
     // CHECK: %[[a0:.*]] = memref.alloc() {{.*}} : memref&lt;5xi1&gt;
     // CHECK: memref.copy %[[w0]], %[[a0]]
-    // CHECK: memref.dealloc %[[w0]]
-    // CHECK: %[[cloned1:.*]] = bufferization.clone %[[a1]]
-    // CHECK: memref.dealloc %[[a1]]
-    // CHECK: %[[cloned0:.*]] = bufferization.clone %[[a0]]
-    // CHECK: memref.dealloc %[[a0]]
-    // CHECK: scf.condition(%[[condition]]) %[[cloned1]], %[[cloned0]]
+    // CHECK: scf.condition(%[[condition]]) %[[a1]], %[[a0]]
     %condition = tensor.extract %w0[%idx] : tensor&lt;5xi1&gt;
     scf.condition(%condition) %w1, %w0 : tensor&lt;5xi1&gt;, tensor&lt;5xi1&gt;
   } do {
@@ -425,11 +394,7 @@ func.func @scf_while_non_equiv_condition(%arg0: tensor&lt;5xi1&gt;,
     // CHECK: memref.store %{{.*}}, %[[b0]]
     // CHECK: %[[casted0:.*]] = memref.cast %[[b0]] : memref&lt;5xi1&gt; to memref&lt;5xi1, strided{{.*}}&gt;
     // CHECK: %[[casted1:.*]] = memref.cast %[[b1]] : memref&lt;5xi1&gt; to memref&lt;5xi1, strided{{.*}}&gt;
-    // CHECK: %[[cloned2:.*]] = bufferization.clone %[[casted1]]
-    // CHECK: memref.dealloc %[[b1]]
-    // CHECK: %[[cloned3:.*]] = bufferization.clone %[[casted0]]
-    // CHECK: memref.dealloc %[[b0]]
-    // CHECK: scf.yield %[[cloned3]], %[[cloned2]]
+    // CHECK: scf.yield %[[casted0]], %[[casted1]]
     // CHECK: }
     %pos = &quot;dummy.some_op&quot;() : () -&gt; (index)
     %val = &quot;dummy.another_op&quot;() : () -&gt; (i1)
@@ -452,23 +417,15 @@ func.func @scf_while_non_equiv_condition_and_body(%arg0: tensor&lt;5xi1&gt;,
                                                   %idx: index)
   -&gt; (tensor&lt;5xi1&gt;, tensor&lt;5xi1&gt;)
 {
-  // CHECK-DAG: %[[clone1:.*]] = bufferization.clone ...
<truncated>
</pre>
</details>


https://github.com/llvm/llvm-project/pull/66471


More information about the Mlir-commits mailing list