[Mlir-commits] [mlir] 3d51010 - Revert "[mlir][bufferization] Switch tests to new deallocation pass pipeline (#66471)"

Martin Erhart llvmlistbot at llvm.org
Fri Sep 15 02:20:15 PDT 2023


Author: Martin Erhart
Date: 2023-09-15T09:19:54Z
New Revision: 3d51010a3350660160981c6b8e624dcc87c208a3

URL: https://github.com/llvm/llvm-project/commit/3d51010a3350660160981c6b8e624dcc87c208a3
DIFF: https://github.com/llvm/llvm-project/commit/3d51010a3350660160981c6b8e624dcc87c208a3.diff

LOG: Revert "[mlir][bufferization] Switch tests to new deallocation pass pipeline (#66471)"

This reverts commit ea42b49f1069ec647cbc67e6360026bc4e5d9a60.

Some GPU integration tests are failing that I didn't observe locally.
Reverting until I have a fix.

Added: 
    

Modified: 
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
    mlir/test/Dialect/SCF/one-shot-bufferize.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
index bccff8ef8d65aaa..ab9360e45c65ff3 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -buffer-deallocation -canonicalize -split-input-file | FileCheck %s
 
 // Run fuzzer with 
diff erent seeds.
 // RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
@@ -14,17 +14,20 @@ func.func @buffer_not_deallocated(%t : tensor<?xf32>, %c : i1) -> tensor<?xf32>
     // CHECK: %[[some_op:.*]] = "test.some_op"
     // CHECK: %[[alloc:.*]] = memref.alloc(%[[some_op]])
     // CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
+    // CHECK-NOT: dealloc
     // CHECK: scf.yield %[[casted]]
     %sz = "test.some_op"() : () -> (index)
     %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
     scf.yield %0 : tensor<?xf32>
   } else {
   // CHECK: } else {
-    // CHECK: scf.yield %[[m]]
+    // CHECK: %[[cloned:.*]] = bufferization.clone %[[m]]
+    // CHECK: scf.yield %[[cloned]]
     scf.yield %t : tensor<?xf32>
   }
   // CHECK: }
   // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
+  // CHECK: memref.dealloc %[[r]]
   // CHECK: return %[[r_tensor]]
   return %r : tensor<?xf32>
 }
@@ -39,7 +42,8 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor<i32>,
 {
   // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]]
   // CHECK: %[[r:.*]] = scf.if {{.*}} {
-  // CHECK:   scf.yield %[[arg0_m]]
+  // CHECK:   %[[clone:.*]] = bufferization.clone %[[arg0_m]]
+  // CHECK:   scf.yield %[[clone]]
   // CHECK: } else {
   // CHECK:   %[[alloc:.*]] = memref.alloc
   // CHECK:   memref.store %{{.*}}, %[[alloc]]
@@ -47,6 +51,7 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor<i32>,
   // CHECK:   scf.yield %[[casted]]
   // CHECK: }
   // CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]]
+  // CHECK: memref.dealloc %[[r]]
   // CHECK: return %[[r_t]]
   %3 = scf.if %cond -> (tensor<i32>) {
     scf.yield %arg0 : tensor<i32>

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
index 9ebd60f50328e14..06c79d450cea705 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir
@@ -5,7 +5,7 @@
 
 // RUN: mlir-opt %s \
 // RUN:     -one-shot-bufferize="allow-unknown-ops create-deallocs=0" \
-// RUN:     -buffer-deallocation-pipeline | \
+// RUN:     -buffer-deallocation | \
 // RUN: FileCheck %s --check-prefix=CHECK-BUFFERDEALLOC
 
 // CHECK-NODEALLOC-LABEL: func @out_of_place_bufferization

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
index 4e4340c9db8acea..5d876e935df1af6 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
@@ -1,6 +1,6 @@
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
 
 // Note: function-boundary-type-conversion=infer-layout-map with
 // promote-buffer-results-to-out-params is an unsupported combination.
@@ -18,6 +18,7 @@
 //       CHECK:   memref.store %{{.*}}, %[[alloc]]
 //       CHECK:   %[[casted:.*]] = memref.cast %[[alloc]]
 //       CHECK:   memref.copy %[[casted]], %[[arg1]]
+//       CHECK:   memref.dealloc %[[alloc]]
 //       CHECK:   return
 //       CHECK: }
 
@@ -28,6 +29,7 @@
 //       CHECK-NO-LAYOUT:   memref.copy %[[arg0]], %[[alloc]]
 //       CHECK-NO-LAYOUT:   memref.store {{.*}}, %[[alloc]]
 //       CHECK-NO-LAYOUT:   memref.copy %[[alloc]], %[[arg1]]
+//       CHECK-NO-LAYOUT:   memref.dealloc %[[alloc]]
 
 // CHECK-BASELINE-LABEL: func @callee(
 //  CHECK-BASELINE-SAME:     %[[arg0:.*]]: memref<5xf32, strided<[?], offset: ?>>) -> memref<5xf32> {
@@ -51,6 +53,7 @@ func.func @callee(%t: tensor<5xf32>) -> (tensor<5xf32>, tensor<5xf32>) {
 // CHECK:   call @callee(%[[arg0]], %[[casted]])
 // CHECK:   %[[l1:.*]] = memref.load %[[arg0]]
 // CHECK:   %[[l2:.*]] = memref.load %[[casted]]
+// CHECK:   memref.dealloc %[[alloc]]
 // CHECK:   return %[[l1]], %[[l2]]
 // CHECK: }
 
@@ -75,6 +78,7 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
 //       CHECK:   %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref<10x20xf32> to memref<2x5xf32, strided<[20, 1], offset: ?>>
 //       CHECK:   %[[casted:.*]] = memref.cast %[[subview]]
 //       CHECK:   memref.copy %[[casted]], %[[r]]
+//       CHECK:   memref.dealloc %[[alloc]]
 
 // CHECK-NO-LAYOUT-LABEL: func @callee(
 //  CHECK-NO-LAYOUT-SAME:              %{{.*}}: index,
@@ -86,7 +90,9 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
 // value and function signature.
 //       CHECK-NO-LAYOUT:   %[[alloc2:.*]] = memref.alloc() : memref<2x5xf32>
 //       CHECK-NO-LAYOUT:   memref.copy %[[subview]], %[[alloc2]]
+//       CHECK-NO-LAYOUT:   memref.dealloc %[[alloc]]
 //       CHECK-NO-LAYOUT:   memref.copy %[[alloc2]], %[[r]]
+//       CHECK-NO-LAYOUT:   memref.dealloc %[[alloc2]]
 
 // CHECK-BASELINE-LABEL: func @callee(
 //  CHECK-BASELINE-SAME:     %{{.*}}: index) -> memref<2x5xf32, strided<[20, 1], offset: ?>> {
@@ -104,11 +110,13 @@ func.func @callee(%idx: index) -> tensor<2x5xf32> {
 // CHECK:   %[[casted:.*]] = memref.cast %[[alloc]] : memref<2x5xf32> to memref<2x5xf32, strided<[?, ?], offset: ?>>
 // CHECK:   call @callee(%{{.*}}, %[[casted]])
 // CHECK:   memref.load %[[casted]]
+// CHECK:   memref.dealloc %[[alloc]]
 
 // CHECK-NO-LAYOUT: func @main(
 // CHECK-NO-LAYOUT:   %[[alloc:.*]] = memref.alloc() : memref<2x5xf32>
 // CHECK-NO-LAYOUT:   call @callee(%{{.*}}, %[[alloc]])
 // CHECK-NO-LAYOUT:   memref.load %[[alloc]]
+// CHECK-NO-LAYOUT:   memref.dealloc
 
 // CHECK-BASELINE: func @main(
 // CHECK-BASELINE:   %[[call:.*]] = call @callee

diff  --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index a8c488461c74edd..b01592dcad74488 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -1,4 +1,5 @@
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -cse -canonicalize -drop-equivalent-buffer-results -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -cse -canonicalize -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-NO-DEALLOC-PASS
 
 // Run fuzzer with 
diff erent seeds.
 // RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
@@ -6,7 +7,7 @@
 // RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
 
 // Test bufferization using memref types that have no layout map.
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -buffer-deallocation -split-input-file -o /dev/null
 
 // CHECK-LABEL: func @scf_for_yield_only(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, strided<[?], offset: ?>>,
@@ -51,7 +52,8 @@ func.func @scf_for_is_reading(%A : tensor<?xf32>, %B : tensor<?xf32>,
 
   // CHECK: %[[alloc:.*]] = memref.alloc
   // CHECK: memref.copy %[[A]], %[[alloc]]
-  // CHECK: scf.for {{.*}} iter_args(%{{.*}} = %[[alloc]])
+  // CHECK: %[[clone:.*]] = bufferization.clone %[[alloc]]
+  // CHECK: scf.for {{.*}} iter_args(%{{.*}} = %[[clone]])
   %0 = scf.for %iv = %lb to %ub step %c1 iter_args(%1 = %A) -> tensor<?xf32> {
     %r = linalg.fill ins(%cst : f32) outs(%1 : tensor<?xf32>) -> tensor<?xf32>
     scf.yield %B : tensor<?xf32>
@@ -233,6 +235,7 @@ func.func @scf_if_non_equiv_yields(
 // CHECK-LABEL: func @scf_execute_region_yield_non_equivalent(
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%{{.*}})
 //       CHECK:   %[[r:.*]] = memref.load %[[alloc]][%{{.*}}]
+//       CHECK:   memref.dealloc %[[alloc]]
 //       CHECK:   return %[[r]]
 func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32 {
   %r = scf.execute_region -> (tensor<?xf32>) {
@@ -253,11 +256,16 @@ func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32
 //  CHECK-SAME:     %[[t:.*]]: memref<?xf32
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%{{.*}})
 //       CHECK:   memref.copy %[[t]], %[[alloc]]
-//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[t]])
+//       CHECK:   %[[cloned:.*]] = bufferization.clone %[[t]]
+//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[cloned]])
+//   CHECK-DAG:     memref.dealloc %[[iter]]
 //   CHECK-DAG:     %[[alloc2:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[alloc]], %[[alloc2]]
 //       CHECK:     %[[alloc2_casted:.*]] = memref.cast %[[alloc2]]
-//       CHECK:     scf.yield %[[alloc2_casted]]
+//       CHECK:     %[[cloned2:.*]] = bufferization.clone %[[alloc2_casted]]
+//       CHECK:     memref.dealloc %[[alloc2]]
+//       CHECK:     scf.yield %[[cloned2]]
+//       CHECK:   memref.dealloc %[[alloc]]
 //       CHECK:   return %[[for]]
 func.func @scf_for_yield_non_equivalent(
     %t: tensor<?xf32>, %lb : index, %ub : index, %step : index) -> tensor<?xf32> {
@@ -276,14 +284,19 @@ func.func @scf_for_yield_non_equivalent(
 
 // CHECK-LABEL: func @scf_for_yield_allocation(
 //  CHECK-SAME:     %[[t:.*]]: memref<?xf32
-//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[t]])
+//       CHECK:   %[[cloned:.*]] = bufferization.clone %[[t]]
+//       CHECK:   %[[for:.*]] = scf.for {{.*}} iter_args(%[[iter:.*]] = %[[cloned]])
 // This alloc is for the bufferization.alloc_tensor.
 //   CHECK-DAG:     %[[alloc2:.*]] = memref.alloc(%{{.*}})
+//   CHECK-DAG:     memref.dealloc %[[iter]]
 // This alloc is for the scf.yield.
 //       CHECK:     %[[alloc3:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[alloc2]], %[[alloc3]]
+//       CHECK:     memref.dealloc %[[alloc2]]
 //       CHECK:     %[[casted3:.*]] = memref.cast %[[alloc3]]
-//       CHECK:     scf.yield %[[casted3]]
+//       CHECK:     %[[cloned3:.*]] = bufferization.clone %[[casted3]]
+//       CHECK:     memref.dealloc %[[alloc3]]
+//       CHECK:     scf.yield %[[cloned3]]
 //       CHECK:   return %[[for]]
 func.func @scf_for_yield_allocation(%t: tensor<?xf32>, %lb : index, %ub : index,
                                %step : index) -> tensor<?xf32> {
@@ -307,7 +320,9 @@ func.func @scf_for_swapping_yields(
     %C : tensor<4xf32>, %lb : index, %ub : index, %step : index)
   -> (f32, f32)
 {
-//       CHECK:   %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[A]], %[[iter2:.*]] = %[[B]])
+//   CHECK-DAG:   %[[clone1:.*]] = bufferization.clone %[[A]]
+//   CHECK-DAG:   %[[clone2:.*]] = bufferization.clone %[[B]]
+//       CHECK:   %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[clone1]], %[[iter2:.*]] = %[[clone2]])
   %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B)
       -> (tensor<?xf32>, tensor<?xf32>)
   {
@@ -320,17 +335,25 @@ func.func @scf_for_swapping_yields(
 
 //       CHECK:     %[[alloc2:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[iter2]], %[[alloc2]]
+//       CHECK:     memref.dealloc %[[iter2]]
 //       CHECK:     %[[alloc1:.*]] = memref.alloc(%{{.*}})
 //       CHECK:     memref.copy %[[iter1]], %[[alloc1]]
+//       CHECK:     memref.dealloc %[[iter1]]
 //       CHECK:     %[[casted2:.*]] = memref.cast %[[alloc2]]
 //       CHECK:     %[[casted1:.*]] = memref.cast %[[alloc1]]
-//       CHECK:     scf.yield %[[casted2]], %[[casted1]]
+//       CHECK:     %[[cloned1:.*]] = bufferization.clone %[[casted1]]
+//       CHECK:     memref.dealloc %[[alloc1]]
+//       CHECK:     %[[cloned2:.*]] = bufferization.clone %[[casted2]]
+//       CHECK:     memref.dealloc %[[alloc2]]
+//       CHECK:     scf.yield %[[cloned2]], %[[cloned1]]
     // Yield tensors in 
diff erent order.
     scf.yield %ttB, %ttA : tensor<?xf32>, tensor<?xf32>
   }
 
 //       CHECK:     %[[r0:.*]] = memref.load %[[for]]#0
+//       CHECK:     memref.dealloc %[[for]]#0
 //       CHECK:     %[[r1:.*]] = memref.load %[[for]]#1
+//       CHECK:     memref.dealloc %[[for]]#1
   %f0 = tensor.extract %r0#0[%step] : tensor<?xf32>
   %f1 = tensor.extract %r0#1[%step] : tensor<?xf32>
 //       CHECK:     return %[[r0]], %[[r1]]
@@ -376,15 +399,23 @@ func.func @scf_while_non_equiv_condition(%arg0: tensor<5xi1>,
                                          %idx: index)
   -> (tensor<5xi1>, tensor<5xi1>)
 {
-  // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[arg0]], %[[w1:.*]] = %[[arg1]]) {{.*}} {
+  // CHECK: %[[clone1:.*]] = bufferization.clone %[[arg1]]
+  // CHECK: %[[clone0:.*]] = bufferization.clone %[[arg0]]
+  // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[clone0]], %[[w1:.*]] = %[[clone1]]) {{.*}} {
   %r0, %r1 = scf.while (%w0 = %arg0, %w1 = %arg1)
       : (tensor<5xi1>, tensor<5xi1>) -> (tensor<5xi1>, tensor<5xi1>) {
     // CHECK: %[[condition:.*]] = memref.load %[[w0]]
     // CHECK: %[[a1:.*]] = memref.alloc() {{.*}} : memref<5xi1>
     // CHECK: memref.copy %[[w1]], %[[a1]]
+    // CHECK: memref.dealloc %[[w1]]
     // CHECK: %[[a0:.*]] = memref.alloc() {{.*}} : memref<5xi1>
     // CHECK: memref.copy %[[w0]], %[[a0]]
-    // CHECK: scf.condition(%[[condition]]) %[[a1]], %[[a0]]
+    // CHECK: memref.dealloc %[[w0]]
+    // CHECK: %[[cloned1:.*]] = bufferization.clone %[[a1]]
+    // CHECK: memref.dealloc %[[a1]]
+    // CHECK: %[[cloned0:.*]] = bufferization.clone %[[a0]]
+    // CHECK: memref.dealloc %[[a0]]
+    // CHECK: scf.condition(%[[condition]]) %[[cloned1]], %[[cloned0]]
     %condition = tensor.extract %w0[%idx] : tensor<5xi1>
     scf.condition(%condition) %w1, %w0 : tensor<5xi1>, tensor<5xi1>
   } do {
@@ -394,7 +425,11 @@ func.func @scf_while_non_equiv_condition(%arg0: tensor<5xi1>,
     // CHECK: memref.store %{{.*}}, %[[b0]]
     // CHECK: %[[casted0:.*]] = memref.cast %[[b0]] : memref<5xi1> to memref<5xi1, strided{{.*}}>
     // CHECK: %[[casted1:.*]] = memref.cast %[[b1]] : memref<5xi1> to memref<5xi1, strided{{.*}}>
-    // CHECK: scf.yield %[[casted0]], %[[casted1]]
+    // CHECK: %[[cloned2:.*]] = bufferization.clone %[[casted1]]
+    // CHECK: memref.dealloc %[[b1]]
+    // CHECK: %[[cloned3:.*]] = bufferization.clone %[[casted0]]
+    // CHECK: memref.dealloc %[[b0]]
+    // CHECK: scf.yield %[[cloned3]], %[[cloned2]]
     // CHECK: }
     %pos = "dummy.some_op"() : () -> (index)
     %val = "dummy.another_op"() : () -> (i1)
@@ -417,15 +452,23 @@ func.func @scf_while_non_equiv_condition_and_body(%arg0: tensor<5xi1>,
                                                   %idx: index)
   -> (tensor<5xi1>, tensor<5xi1>)
 {
-  // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[arg0]], %[[w1:.*]] = %[[arg1]]) {{.*}} {
+  // CHECK-DAG: %[[clone1:.*]] = bufferization.clone %[[arg1]]
+  // CHECK-DAG: %[[clone0:.*]] = bufferization.clone %[[arg0]]
+  // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[clone0]], %[[w1:.*]] = %[[clone1]]) {{.*}} {
   %r0, %r1 = scf.while (%w0 = %arg0, %w1 = %arg1)
       : (tensor<5xi1>, tensor<5xi1>) -> (tensor<5xi1>, tensor<5xi1>) {
     // CHECK: %[[condition:.*]] = memref.load %[[w0]]
     // CHECK: %[[a1:.*]] = memref.alloc() {{.*}} : memref<5xi1>
     // CHECK: memref.copy %[[w1]], %[[a1]]
+    // CHECK: memref.dealloc %[[w1]]
     // CHECK: %[[a0:.*]] = memref.alloc() {{.*}} : memref<5xi1>
     // CHECK: memref.copy %[[w0]], %[[a0]]
-    // CHECK: scf.condition(%[[condition]]) %[[a1]], %[[a0]]
+    // CHECK: memref.dealloc %[[w0]]
+    // CHECK: %[[cloned1:.*]] = bufferization.clone %[[a1]]
+    // CHECK: memref.dealloc %[[a1]]
+    // CHECK: %[[cloned0:.*]] = bufferization.clone %[[a0]]
+    // CHECK: memref.dealloc %[[a0]]
+    // CHECK: scf.condition(%[[condition]]) %[[cloned1]], %[[cloned0]]
     %condition = tensor.extract %w0[%idx] : tensor<5xi1>
     scf.condition(%condition) %w1, %w0 : tensor<5xi1>, tensor<5xi1>
   } do {
@@ -435,7 +478,11 @@ func.func @scf_while_non_equiv_condition_and_body(%arg0: tensor<5xi1>,
     // CHECK: memref.store %{{.*}}, %[[b0]]
     // CHECK: %[[casted1:.*]] = memref.cast %[[b1]]
     // CHECK: %[[casted0:.*]] = memref.cast %[[b0]]
-    // CHECK: scf.yield %[[casted1]], %[[casted0]]
+    // CHECK: %[[cloned1:.*]] = bufferization.clone %[[casted1]]
+    // CHECK: memref.dealloc %[[b1]]
+    // CHECK: %[[cloned0:.*]] = bufferization.clone %[[casted0]]
+    // CHECK: memref.dealloc %[[b0]]
+    // CHECK: scf.yield %[[cloned1]], %[[cloned0]]
     // CHECK: }
     %pos = "dummy.some_op"() : () -> (index)
     %val = "dummy.another_op"() : () -> (i1)
@@ -451,7 +498,9 @@ func.func @scf_while_non_equiv_condition_and_body(%arg0: tensor<5xi1>,
 
 // CHECK-LABEL: func @scf_while_iter_arg_result_mismatch(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<5xi1, strided{{.*}}>, %[[arg1:.*]]: memref<5xi1, strided{{.*}}>
-//       CHECK:   scf.while (%[[arg3:.*]] = %[[arg1]]) : (memref<5xi1, strided{{.*}}) -> () {
+//       CHECK:   %[[clone:.*]] = bufferization.clone %[[arg1]]
+//       CHECK:   scf.while (%[[arg3:.*]] = %[[clone]]) : (memref<5xi1, strided{{.*}}) -> () {
+//   CHECK-DAG:     memref.dealloc %[[arg3]]
 //   CHECK-DAG:     %[[load:.*]] = memref.load %[[arg0]]
 //       CHECK:     scf.condition(%[[load]])
 //       CHECK:   } do {
@@ -459,7 +508,9 @@ func.func @scf_while_non_equiv_condition_and_body(%arg0: tensor<5xi1>,
 //       CHECK:     memref.copy %[[arg0]], %[[alloc2]]
 //       CHECK:     memref.store %{{.*}}, %[[alloc2]]
 //       CHECK:     %[[casted:.*]] = memref.cast %[[alloc2]] : memref<5xi1> to memref<5xi1, strided{{.*}}>
-//       CHECK:     scf.yield %[[casted]]
+//       CHECK:     %[[cloned:.*]] = bufferization.clone %[[casted]]
+//       CHECK:     memref.dealloc %[[alloc2]]
+//       CHECK:     scf.yield %[[cloned]]
 //       CHECK:   }
 func.func @scf_while_iter_arg_result_mismatch(%arg0: tensor<5xi1>,
                                               %arg1: tensor<5xi1>,
@@ -558,6 +609,7 @@ func.func @parallel_insert_slice_with_conflict(
 
   // CHECK: %[[load:.*]] = memref.load %[[arg2]]
   // CHECK: %[[load2:.*]] = memref.load %[[alloc1]]
+  // CHECK: memref.dealloc %[[alloc1]]
   %f = tensor.extract %arg2[%c0] : tensor<?xf32>
   %f2 = tensor.extract %2[%c0] : tensor<?xf32>
 
@@ -669,12 +721,15 @@ func.func @scf_if_memory_space(%c: i1, %f: f32, %cst: f32) -> (f32, f32)
   %filled = linalg.fill ins(%cst : f32) outs(%alloc : tensor<5xf32>) -> tensor<5xf32>
   // CHECK: scf.if %{{.*}} -> (memref<5xf32, 1>) {
   %1 = scf.if %c -> tensor<5xf32> {
-    // CHECK: scf.yield %[[alloc]]
+    // CHECK: %[[cloned:.*]] = bufferization.clone %[[alloc]]
+    // CHECK: scf.yield %[[cloned]]
     scf.yield %filled : tensor<5xf32>
   } else {
     // CHECK: %[[alloc2:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1>
     // CHECK: memref.store %{{.*}}, %[[alloc2]]
-    // CHECK: scf.yield %[[alloc2]]
+    // CHECK: %[[cloned2:.*]] = bufferization.clone %[[alloc2]]
+    // CHECK: memref.dealloc %[[alloc2]]
+    // CHECK: scf.yield %[[cloned2]]
     %2 = tensor.insert %f into %filled[%c0] : tensor<5xf32>
     scf.yield %2 : tensor<5xf32>
   }
@@ -689,6 +744,7 @@ func.func @scf_if_memory_space(%c: i1, %f: f32, %cst: f32) -> (f32, f32)
 // CHECK: memref.alloc() {{.*}} : memref<5xf32, 1>
 // CHECK: memref.store
 // CHECK: memref.load
+// CHECK: memref.dealloc
 func.func @scf_execute_region_memory_space(%f: f32) -> f32 {
   %c0 = arith.constant 0 : index
   %0 = scf.execute_region -> tensor<5xf32> {
@@ -899,24 +955,24 @@ func.func @regression_cast_in_loop() -> tensor<2xindex> {
 // This test does not compute anything meaningful but it tests that
 // bufferizesToMemoryWrite is correctly propagated through regions.
 
-// CHECK-LABEL: func @elide_copy_of_non_writing_scf_if(
+// CHECK-NO-DEALLOC-PASS-LABEL: func @elide_copy_of_non_writing_scf_if(
 func.func @elide_copy_of_non_writing_scf_if(%c: i1, %p1: index, %p2: index, %f: f32)
   -> (tensor<10xf32>, f32)
 {
   %r = scf.if %c -> tensor<10xf32> {
-    // CHECK: memref.alloc
+    // CHECK-NO-DEALLOC-PASS: memref.alloc
     %t1 = bufferization.alloc_tensor() : tensor<10xf32>
     scf.yield %t1 : tensor<10xf32>
   } else {
-    // CHECK: memref.alloc
+    // CHECK-NO-DEALLOC-PASS: memref.alloc
     %t2 = bufferization.alloc_tensor() : tensor<10xf32>
     scf.yield %t2 : tensor<10xf32>
   }
 
   // No copy should be inserted because %r does not bufferize to a memory write.
   // I.e., %r does not have defined contents and the copy can be elided.
-  // CHECK-NOT: memref.alloc
-  // CHECK-NOT: memref.copy
+  // CHECK-NO-DEALLOC-PASS-NOT: memref.alloc
+  // CHECK-NO-DEALLOC-PASS-NOT: memref.copy
   %r2 = tensor.insert %f into %r[%p1] : tensor<10xf32>
   %r3 = tensor.extract %r[%p2] : tensor<10xf32>
   return %r2, %r3 : tensor<10xf32>, f32

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
index 53cdcc57ec1b185..1580778167321ed 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -linalg-bufferize \
 // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
-// RUN: -finalizing-bufferize -buffer-deallocation-pipeline \
-// RUN: -convert-scf-to-cf -expand-strided-metadata -lower-affine -convert-cf-to-llvm -convert-arith-to-llvm \
+// RUN: -finalizing-bufferize -buffer-deallocation  \
+// RUN: -expand-strided-metadata -lower-affine \
 // RUN: -finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
index 57b504222c241ab..8331757000ba487 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
@@ -1,8 +1,8 @@
 // RUN: mlir-opt %s -convert-elementwise-to-linalg \
-// RUN: -arith-bufferize -linalg-bufferize -tensor-bufferize -func-bufferize \
-// RUN: -canonicalize -buffer-deallocation-pipeline -convert-linalg-to-loops \
-// RUN: -convert-scf-to-cf -convert-arith-to-llvm -convert-cf-to-llvm --finalize-memref-to-llvm \
-// RUN: -convert-func-to-llvm -reconcile-unrealized-casts | \
+// RUN: -arith-bufferize -linalg-bufferize -tensor-bufferize \
+// RUN: -func-bufferize -buffer-deallocation -convert-linalg-to-loops \
+// RUN:  --finalize-memref-to-llvm -convert-func-to-llvm \
+// RUN: -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
index 7dddeb145262173..b7084a30654ab62 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -linalg-bufferize \
 // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
-// RUN: -finalizing-bufferize -buffer-deallocation-pipeline \
-// RUN: -convert-scf-to-cf -expand-strided-metadata -lower-affine -convert-cf-to-llvm -convert-arith-to-llvm \
+// RUN: -finalizing-bufferize -buffer-deallocation  \
+// RUN: -expand-strided-metadata -lower-affine \
 // RUN: -finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
index c9bc964a124ff45..967c4d58e71b4f2 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
@@ -1,9 +1,8 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-linalg-to-vector-patterns \
 // RUN: -empty-tensor-to-alloc-tensor -linalg-bufferize -arith-bufferize \
 // RUN: -bufferization-bufferize -tensor-bufferize -func-bufferize \
-// RUN: -finalizing-bufferize -buffer-deallocation-pipeline \
-// RUN: -convert-linalg-to-loops -convert-scf-to-cf -expand-strided-metadata \
-// RUN: -lower-affine -convert-arith-to-llvm -finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
+// RUN: -finalizing-bufferize -buffer-deallocation \
+// RUN: -convert-linalg-to-loops -convert-scf-to-cf  -expand-strided-metadata -lower-affine -convert-arith-to-llvm -finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils \
 // RUN: | FileCheck %s

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
index d749fca36a5ba52..18dd8bf183cb90a 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
@@ -1,9 +1,7 @@
 // RUN: mlir-opt %s -linalg-bufferize \
 // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
-// RUN: -finalizing-bufferize -buffer-deallocation-pipeline \
-// RUN: -convert-linalg-to-loops -convert-scf-to-cf -expand-strided-metadata  \
-// RUN: -lower-affine -convert-arith-to-llvm --finalize-memref-to-llvm \
-// RUN: -convert-func-to-llvm -reconcile-unrealized-casts | \
+// RUN: -finalizing-bufferize -buffer-deallocation \
+// RUN: -convert-linalg-to-loops -convert-scf-to-cf  -expand-strided-metadata -lower-affine -convert-arith-to-llvm --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils \
 // RUN: | FileCheck %s

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
index f3d416d4809794c..8002db794a40172 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
@@ -1,9 +1,7 @@
 // RUN: mlir-opt %s -linalg-bufferize \
 // RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
-// RUN: -finalizing-bufferize -buffer-deallocation-pipeline \
-// RUN: -convert-linalg-to-loops -convert-scf-to-cf -expand-strided-metadata \
-// RUN: -lower-affine -convert-arith-to-llvm --finalize-memref-to-llvm \
-// RUN: -convert-func-to-llvm -reconcile-unrealized-casts | \
+// RUN: -finalizing-bufferize -buffer-deallocation \
+// RUN: -convert-linalg-to-loops -convert-scf-to-cf  -expand-strided-metadata -lower-affine -convert-arith-to-llvm --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
index 6be2c18060f3777..715d62137f342c6 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -arith-bufferize -linalg-bufferize \
-// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation-pipeline -convert-linalg-to-loops \
-// RUN: -convert-arith-to-llvm -convert-scf-to-cf -convert-cf-to-llvm --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
+// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops \
+// RUN:  --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
index 0d5db35079aceee..e1b2e247d0619dd 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
@@ -1,7 +1,7 @@
 // UNSUPPORTED: asan
 // RUN: mlir-opt %s -test-transform-dialect-erase-schedule -linalg-bufferize -arith-bufferize \
-// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation-pipeline -convert-linalg-to-loops -convert-scf-to-cf \
-// RUN: -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
+// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN:  -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils \
 // RUN: | FileCheck %s


        


More information about the Mlir-commits mailing list