[Mlir-commits] [mlir] 5452fa6 - [MLIR] Added test operations to replace linalg dependency for

Alexander Bosch llvmlistbot at llvm.org
Tue Nov 3 03:26:44 PST 2020


Author: Alexander Bosch
Date: 2020-11-03T12:18:49+01:00
New Revision: 5452fa6a599b197b75ad5d017c0498612fa600b4

URL: https://github.com/llvm/llvm-project/commit/5452fa6a599b197b75ad5d017c0498612fa600b4
DIFF: https://github.com/llvm/llvm-project/commit/5452fa6a599b197b75ad5d017c0498612fa600b4.diff

LOG: [MLIR] Added test operations to replace linalg dependency for
BufferizeTests.

Summary:
Added test operations to replace the LinalgDialect dependency in tests
which use the buffer-deallocation, buffer-hoisting,
buffer-loop-hoisting, promote-buffers-to-stack,
buffer-placement-preparation-allowed-memref-resutls and
buffer-placement-preparation pass. Adapted the corresponding tests cases
and TestBufferPlacement.cpp.

Differential Revision: https://reviews.llvm.org/D90037

Added: 
    

Modified: 
    mlir/test/Transforms/buffer-deallocation.mlir
    mlir/test/Transforms/buffer-hoisting.mlir
    mlir/test/Transforms/buffer-loop-hoisting.mlir
    mlir/test/Transforms/finalizing-bufferize-allowed-memref-results.mlir
    mlir/test/Transforms/finalizing-bufferize.mlir
    mlir/test/Transforms/promote-buffers-to-stack.mlir
    mlir/test/lib/Dialect/Test/TestDialect.h
    mlir/test/lib/Dialect/Test/TestOps.td
    mlir/test/lib/Transforms/TestFinalizingBufferize.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/test/Transforms/buffer-deallocation.mlir b/mlir/test/Transforms/buffer-deallocation.mlir
index 709ee4f27663..079988103947 100644
--- a/mlir/test/Transforms/buffer-deallocation.mlir
+++ b/mlir/test/Transforms/buffer-deallocation.mlir
@@ -15,8 +15,6 @@
 // Since bb1 does not contain an adequate alloc and the alloc in bb2 is not
 // moved to bb0, we need to insert allocs and copies.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranch
 func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   cond_br %arg0, ^bb1, ^bb2
@@ -24,18 +22,10 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -44,12 +34,12 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-NEXT: linalg.copy
 // CHECK-NEXT: br ^bb3(%[[ALLOC0]]
 //      CHECK: %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOC2:.*]] = alloc()
 // CHECK-NEXT: linalg.copy
 // CHECK-NEXT: dealloc %[[ALLOC1]]
 // CHECK-NEXT: br ^bb3(%[[ALLOC2]]
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc
 // CHECK-NEXT: return
 
@@ -68,8 +58,6 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // appropriate shape dimensions. The copy buffer deallocation will be applied
 // to %2 in block bb3.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranchDynamicType
 func @condBranchDynamicType(
   %arg0: i1,
@@ -81,18 +69,10 @@ func @condBranchDynamicType(
   br ^bb3(%arg1 : memref<?xf32>)
 ^bb2(%0: index):
   %1 = alloc(%0) : memref<?xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<?xf32>)
-   outs(%1: memref<?xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<?xf32>) out(%1: memref<?xf32>)
   br ^bb3(%1 : memref<?xf32>)
 ^bb3(%2: memref<?xf32>):
-  "linalg.copy"(%2, %arg2) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%2, %arg2) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -103,14 +83,14 @@ func @condBranchDynamicType(
 // CHECK-NEXT: br ^bb3(%[[ALLOC0]]
 //      CHECK: ^bb2(%[[IDX:.*]]:{{.*}})
 // CHECK-NEXT: %[[ALLOC1:.*]] = alloc(%[[IDX]])
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[DIM1:.*]] = dim %[[ALLOC1]]
 // CHECK-NEXT: %[[ALLOC2:.*]] = alloc(%[[DIM1]])
 // CHECK-NEXT: linalg.copy(%[[ALLOC1]], %[[ALLOC2]])
 // CHECK-NEXT: dealloc %[[ALLOC1]]
 // CHECK-NEXT: br ^bb3
 // CHECK-NEXT: ^bb3(%[[ALLOC3:.*]]:{{.*}})
-//      CHECK: linalg.copy(%[[ALLOC3]],
+//      CHECK: test.copy(%[[ALLOC3]],
 // CHECK-NEXT: dealloc %[[ALLOC3]]
 // CHECK-NEXT: return
 
@@ -136,8 +116,6 @@ func @condBranchDynamicType(
 // buffer deallocations will be applied to %2 in block bb5 and to %3 in block
 // bb6. Furthermore, there should be no copy inserted for %4.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranchDynamicTypeNested
 func @condBranchDynamicTypeNested(
   %arg0: i1,
@@ -149,15 +127,7 @@ func @condBranchDynamicTypeNested(
   br ^bb6(%arg1 : memref<?xf32>)
 ^bb2(%0: index):
   %1 = alloc(%0) : memref<?xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<?xf32>)
-   outs(%1: memref<?xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<?xf32>) out(%1: memref<?xf32>)
   cond_br %arg0, ^bb3, ^bb4
 ^bb3:
   br ^bb5(%1 : memref<?xf32>)
@@ -168,7 +138,7 @@ func @condBranchDynamicTypeNested(
 ^bb6(%3: memref<?xf32>):
   br ^bb7(%3 : memref<?xf32>)
 ^bb7(%4: memref<?xf32>):
-  "linalg.copy"(%4, %arg2) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%4, %arg2) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -180,7 +150,7 @@ func @condBranchDynamicTypeNested(
 // CHECK-NEXT: br ^bb6
 //      CHECK: ^bb2(%[[IDX:.*]]:{{.*}})
 // CHECK-NEXT: %[[ALLOC1:.*]] = alloc(%[[IDX]])
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: cond_br
 //      CHECK: ^bb3:
 // CHECK-NEXT: br ^bb5(%[[ALLOC1]]{{.*}})
@@ -195,7 +165,7 @@ func @condBranchDynamicTypeNested(
 // CHECK-NEXT: ^bb6(%[[ALLOC4:.*]]:{{.*}})
 // CHECK-NEXT: br ^bb7(%[[ALLOC4]]{{.*}})
 // CHECK-NEXT: ^bb7(%[[ALLOC5:.*]]:{{.*}})
-//      CHECK: linalg.copy(%[[ALLOC5]],
+//      CHECK: test.copy(%[[ALLOC5]],
 // CHECK-NEXT: dealloc %[[ALLOC4]]
 // CHECK-NEXT: return
 
@@ -226,25 +196,15 @@ func @emptyUsesValue(%arg0: memref<4xf32>) {
 // exit block after CopyOp since %1 is an alias for %0 and %arg1. Furthermore,
 // we have to insert a copy and an alloc in the beginning of the function.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @criticalEdge
 func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>)
 ^bb1:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb2(%0 : memref<2xf32>)
 ^bb2(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -252,11 +212,11 @@ func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-NEXT: linalg.copy
 // CHECK-NEXT: cond_br
 //      CHECK: %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOC2:.*]] = alloc()
 // CHECK-NEXT: linalg.copy
 // CHECK-NEXT: dealloc %[[ALLOC1]]
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc
 // CHECK-NEXT: return
 
@@ -271,25 +231,15 @@ func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // BufferDeallocation expected behavior: It only inserts a DeallocOp at the
 // exit block after CopyOp since %1 is an alias for %0 and %arg1.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @invCriticalEdge
 func @invCriticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>)
 ^bb1:
   br ^bb2(%0 : memref<2xf32>)
 ^bb2(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -306,23 +256,13 @@ func @invCriticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 //    bb3 <- Initial position of the second AllocOp
 // BufferDeallocation expected behavior: It only inserts two missing
 // DeallocOps in the exit block. %5 is an alias for %0. Therefore, the
-// DeallocOp for %0 should occur after the last GenericOp. The Dealloc for %7
-// should happen after the CopyOp.
-
-#map0 = affine_map<(d0) -> (d0)>
+// DeallocOp for %0 should occur after the last BufferBasedOp. The Dealloc for
+// %7 should happen after CopyOp.
 
 // CHECK-LABEL: func @ifElse
 func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -332,25 +272,17 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
   %7 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%5: memref<2xf32>)
-   outs(%7: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%7, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%5: memref<2xf32>) out(%7: memref<2xf32>)
+  test.copy(%7, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[SECOND_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: dealloc %[[FIRST_ALLOC]]
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[SECOND_ALLOC]]
 // CHECK-NEXT: return
 
@@ -365,20 +297,10 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // BufferDeallocation expected behavior: It only inserts a missing DeallocOp
 // in the exit block since %5 or %6 are the latest aliases of %0.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @ifElseNoUsers
 func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -387,12 +309,12 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 ^bb2(%3: memref<2xf32>, %4: memref<2xf32>):
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
-  "linalg.copy"(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[FIRST_ALLOC]]
 // CHECK-NEXT: return
 
@@ -410,20 +332,10 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // BufferDeallocation expected behavior: Two missing DeallocOps should be
 // inserted in the exit block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @ifElseNested
 func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -437,25 +349,17 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>)
 ^bb5(%7: memref<2xf32>, %8: memref<2xf32>):
   %9 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%7: memref<2xf32>)
-   outs(%9: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%9, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>)
+  test.copy(%9, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[SECOND_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: dealloc %[[FIRST_ALLOC]]
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[SECOND_ALLOC]]
 // CHECK-NEXT: return
 
@@ -463,41 +367,22 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 
 // Test Case: Dead operations in a single block.
 // BufferDeallocation expected behavior: It only inserts the two missing
-// DeallocOps after the last GenericOp.
-
-#map0 = affine_map<(d0) -> (d0)>
+// DeallocOps after the last BufferBasedOp.
 
 // CHECK-LABEL: func @redundantOperations
 func @redundantOperations(%arg0: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   %1 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%0: memref<2xf32>)
-   outs(%1: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
+  test.buffer_based in(%0: memref<2xf32>) out(%1: memref<2xf32>)
   return
 }
 
 //      CHECK: (%[[ARG0:.*]]: {{.*}})
 // CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ARG0]]{{.*}}outs(%[[FIRST_ALLOC]]
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}}out(%[[FIRST_ALLOC]]
 //      CHECK: %[[SECOND_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} ins
-// CHECK-SAME: (%[[FIRST_ALLOC]]{{.*}}outs(%[[SECOND_ALLOC]]
+// CHECK-NEXT: test.buffer_based in(%[[FIRST_ALLOC]]{{.*}}out(%[[SECOND_ALLOC]]
 //      CHECK: dealloc
 // CHECK-NEXT: dealloc
 // CHECK-NEXT: return
@@ -515,8 +400,6 @@ func @redundantOperations(%arg0: memref<2xf32>) {
 // inserted in the respective block of the allocs. The copy is freed in the exit
 // block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @moving_alloc_and_inserting_missing_dealloc
 func @moving_alloc_and_inserting_missing_dealloc(
   %cond: i1,
@@ -525,30 +408,14 @@ func @moving_alloc_and_inserting_missing_dealloc(
   cond_br %cond, ^bb1, ^bb2
 ^bb1:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   br ^exit(%0 : memref<2xf32>)
 ^bb2:
   %1 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%1: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>)
   br ^exit(%1 : memref<2xf32>)
 ^exit(%arg2: memref<2xf32>):
-  "linalg.copy"(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -556,20 +423,20 @@ func @moving_alloc_and_inserting_missing_dealloc(
 //      CHECK: ^bb1
 //      CHECK: ^bb1
 //      CHECK: %[[ALLOC0:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOC1:.*]] = alloc()
 // CHECK-NEXT: linalg.copy
 // CHECK-NEXT: dealloc %[[ALLOC0]]
 // CHECK-NEXT: br ^bb3(%[[ALLOC1]]
 // CHECK-NEXT: ^bb2
 // CHECK-NEXT: %[[ALLOC2:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOC3:.*]] = alloc()
 // CHECK-NEXT: linalg.copy
 // CHECK-NEXT: dealloc %[[ALLOC2]]
 // CHECK-NEXT: br ^bb3(%[[ALLOC3]]
 // CHECK-NEXT: ^bb3(%[[ALLOC4:.*]]:{{.*}})
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[ALLOC4]]
 // CHECK-NEXT: return
 
@@ -585,8 +452,6 @@ func @moving_alloc_and_inserting_missing_dealloc(
 // BufferDeallocation expected behavior: The existing DeallocOp should be
 // moved to exit block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @moving_invalid_dealloc_op_complex
 func @moving_invalid_dealloc_op_complex(
   %cond: i1,
@@ -597,25 +462,17 @@ func @moving_invalid_dealloc_op_complex(
 ^bb1:
   br ^exit(%arg0 : memref<2xf32>)
 ^bb2:
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%1: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>)
   dealloc %1 : memref<2xf32>
   br ^exit(%1 : memref<2xf32>)
 ^exit(%arg2: memref<2xf32>):
-  "linalg.copy"(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
 // CHECK-NEXT: cond_br
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[ALLOC0]]
 // CHECK-NEXT: return
 
@@ -623,28 +480,18 @@ func @moving_invalid_dealloc_op_complex(
 
 // Test Case: Inserting missing DeallocOp in a single block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @inserting_missing_dealloc_simple
 func @inserting_missing_dealloc_simple(
   %arg0 : memref<2xf32>,
   %arg1: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
-  "linalg.copy"(%0, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
+  test.copy(%0, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[ALLOC0]]
 
 // -----
@@ -652,39 +499,27 @@ func @inserting_missing_dealloc_simple(
 // Test Case: Moving invalid DeallocOp (there is a user after deallocation) in a
 // single block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @moving_invalid_dealloc_op
 func @moving_invalid_dealloc_op(%arg0 : memref<2xf32>, %arg1: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   dealloc %0 : memref<2xf32>
-  "linalg.copy"(%0, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%0, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[ALLOC0]]
 
 // -----
 
-// Test Case: Nested regions - This test defines a GenericOp inside the region
-// of another GenericOp.
-// BufferDeallocation expected behavior: The AllocOp of inner GenericOp should
-// remain inside the region of outer GenericOp and it should insert the missing
-// DeallocOp in the same region. The missing DeallocOp should be inserted after
-// Linalg.Copy.
-
-#map0 = affine_map<(d0) -> (d0)>
+// Test Case: Nested regions - This test defines a BufferBasedOp inside the
+// region of a RegionBufferBasedOp.
+// BufferDeallocation expected behavior: The AllocOp for the BufferBasedOp
+// should remain inside the region of the RegionBufferBasedOp and it should insert
+// the missing DeallocOp in the same region. The missing DeallocOp should be
+// inserted after CopyOp.
 
 // CHECK-LABEL: func @nested_regions_and_cond_branch
 func @nested_regions_and_cond_branch(
@@ -696,28 +531,16 @@ func @nested_regions_and_cond_branch(
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
+  test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) {
   ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
     %1 = alloc() : memref<2xf32>
-    linalg.generic {
-      indexing_maps = [#map0, #map0],
-      iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%1: memref<2xf32>) {
-    ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-      %tmp2 = exp %gen2_arg0 : f32
-      linalg.yield %tmp2 : f32
-    }
+    test.buffer_based in(%arg1: memref<2xf32>) out(%1: memref<2xf32>)
     %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
+    test.region_yield %tmp1 : f32
   }
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 //      CHECK: (%[[cond:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %{{.*}}: {{.*}})
@@ -726,16 +549,16 @@ func @nested_regions_and_cond_branch(
 // CHECK-NEXT:   linalg.copy(%[[ARG1]], %[[ALLOC0]])
 //      CHECK: ^[[BB2]]:
 //      CHECK:   %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT:   linalg.generic {{{.*}}} ins(%[[ARG1]]{{.*}}outs(%[[ALLOC1]]
+// CHECK-NEXT:   test.region_buffer_based in(%[[ARG1]]{{.*}}out(%[[ALLOC1]]
 //      CHECK:     %[[ALLOC2:.*]] = alloc()
-// CHECK-NEXT:     linalg.generic {{{.*}}} ins(%[[ARG1]]{{.*}}outs(%[[ALLOC2]]
+// CHECK-NEXT:     test.buffer_based in(%[[ARG1]]{{.*}}out(%[[ALLOC2]]
 //      CHECK:     dealloc %[[ALLOC2]]
 // CHECK-NEXT:     %{{.*}} = exp
 //      CHECK:   %[[ALLOC3:.*]] = alloc()
 // CHECK-NEXT:   linalg.copy(%[[ALLOC1]], %[[ALLOC3]])
 // CHECK-NEXT:   dealloc %[[ALLOC1]]
 //      CHECK:  ^[[BB3:.*]]({{.*}}):
-//      CHECK:  linalg.copy
+//      CHECK:  test.copy
 // CHECK-NEXT:  dealloc
 
 // -----
@@ -743,9 +566,7 @@ func @nested_regions_and_cond_branch(
 // Test Case: buffer deallocation escaping
 // BufferDeallocation expected behavior: It must not dealloc %arg1 and %x
 // since they are operands of return operation and should escape from
-// deallocating. It should dealloc %y after linalg.copy.
-
-#map0 = affine_map<(d0) -> (d0)>
+// deallocating. It should dealloc %y after CopyOp.
 
 // CHECK-LABEL: func @memref_in_function_results
 func @memref_in_function_results(
@@ -754,21 +575,15 @@ func @memref_in_function_results(
   %arg2: memref<5xf32>) -> (memref<10xf32>, memref<15xf32>) {
   %x = alloc() : memref<15xf32>
   %y = alloc() : memref<5xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg0: memref<5xf32>)
-     outs(%y: memref<5xf32>) {
-  ^bb0(%arg3: f32, %arg4: f32):
-    %2 = exp %arg3 : f32
-    linalg.yield %2 : f32
-  }
-  linalg.copy(%y, %arg2) : memref<5xf32>, memref<5xf32>
+  test.buffer_based in(%arg0: memref<5xf32>) out(%y: memref<5xf32>)
+  test.copy(%y, %arg2) : (memref<5xf32>, memref<5xf32>)
   return %arg1, %x : memref<10xf32>, memref<15xf32>
 }
 //      CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>,
 // CHECK-SAME: %[[RESULT:.*]]: memref<5xf32>)
 //      CHECK: %[[X:.*]] = alloc()
 //      CHECK: %[[Y:.*]] = alloc()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 //      CHECK: dealloc %[[Y]]
 //      CHECK: return %[[ARG1]], %[[X]]
 
@@ -877,21 +692,19 @@ func @subview(%arg0 : index, %arg1 : index, %arg2 : memref<?x?xf32>) {
   %1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] :
     memref<64x4xf32, offset: 0, strides: [4, 1]>
   to memref<?x?xf32, offset: ?, strides: [?, ?]>
-  "linalg.copy"(%1, %arg2) :
-    (memref<?x?xf32, offset: ?, strides: [?, ?]>, memref<?x?xf32>) -> ()
+  test.copy(%1, %arg2) :
+    (memref<?x?xf32, offset: ?, strides: [?, ?]>, memref<?x?xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC:.*]] = alloc()
 // CHECK-NEXT: subview
-// CHECK-NEXT: linalg.copy
+// CHECK-NEXT: test.copy
 // CHECK-NEXT: dealloc %[[ALLOC]]
 // CHECK-NEXT: return
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // Test Case: In the presence of AllocaOps only the AllocOps has top be freed.
 // Therefore, all allocas are not handled.
 
@@ -902,18 +715,10 @@ func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloca() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -921,13 +726,11 @@ func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 //      CHECK: %[[ALLOCA:.*]] = alloca()
 //      CHECK: br ^bb3(%[[ALLOCA:.*]])
 // CHECK-NEXT: ^bb3
-// CHECK-NEXT: linalg.copy
+// CHECK-NEXT: test.copy
 // CHECK-NEXT: return
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // Test Case: In the presence of AllocaOps only the AllocOps has top be freed.
 // Therefore, all allocas are not handled. In this case, only alloc %0 has a
 // dealloc.
@@ -935,15 +738,7 @@ func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-LABEL: func @ifElseAlloca
 func @ifElseAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -953,46 +748,28 @@ func @ifElseAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
   %7 = alloca() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%5: memref<2xf32>)
-   outs(%7: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%7, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%5: memref<2xf32>) out(%7: memref<2xf32>)
+  test.copy(%7, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOCA:.*]] = alloca()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: dealloc %[[ALLOC]]
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: return
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @ifElseNestedAlloca
 func @ifElseNestedAlloca(
   %arg0: i1,
   %arg1: memref<2xf32>,
   %arg2: memref<2xf32>) {
   %0 = alloca() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -1006,31 +783,21 @@ func @ifElseNestedAlloca(
   br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>)
 ^bb5(%7: memref<2xf32>, %8: memref<2xf32>):
   %9 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%7: memref<2xf32>)
-   outs(%9: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%9, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>)
+  test.copy(%9, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOCA:.*]] = alloca()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
-//      CHECK: linalg.copy
+// CHECK-NEXT: test.buffer_based
+//      CHECK: test.copy
 // CHECK-NEXT: dealloc %[[ALLOC]]
 // CHECK-NEXT: return
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @nestedRegionsAndCondBranchAlloca
 func @nestedRegionsAndCondBranchAlloca(
   %arg0: i1,
@@ -1041,28 +808,16 @@ func @nestedRegionsAndCondBranchAlloca(
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
+  test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) {
   ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
     %1 = alloca() : memref<2xf32>
-    linalg.generic {
-      indexing_maps = [#map0, #map0],
-      iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%1: memref<2xf32>) {
-    ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-      %tmp2 = exp %gen2_arg0 : f32
-      linalg.yield %tmp2 : f32
-    }
+    test.buffer_based in(%arg1: memref<2xf32>) out(%1: memref<2xf32>)
     %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
+    test.region_yield %tmp1 : f32
   }
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 //      CHECK: (%[[cond:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %{{.*}}: {{.*}})
@@ -1072,15 +827,15 @@ func @nestedRegionsAndCondBranchAlloca(
 // CHECK-NEXT: linalg.copy
 //      CHECK: ^[[BB2]]:
 //      CHECK:   %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT:   linalg.generic {{{.*}}} ins(%[[ARG1]]{{.*}}outs(%[[ALLOC1]]
+// CHECK-NEXT:   test.region_buffer_based in(%[[ARG1]]{{.*}}out(%[[ALLOC1]]
 //      CHECK:     %[[ALLOCA:.*]] = alloca()
-// CHECK-NEXT:     linalg.generic {{{.*}}} ins(%[[ARG1]]{{.*}}outs(%[[ALLOCA]]
+// CHECK-NEXT:     test.buffer_based in(%[[ARG1]]{{.*}}out(%[[ALLOCA]]
 //      CHECK:     %{{.*}} = exp
 //      CHECK:  %[[ALLOC2:.*]] = alloc()
 // CHECK-NEXT:  linalg.copy
 // CHECK-NEXT:  dealloc %[[ALLOC1]]
 //      CHECK:  ^[[BB3:.*]]({{.*}}):
-//      CHECK:  linalg.copy
+//      CHECK:  test.copy
 // CHECK-NEXT:  dealloc
 
 // -----
@@ -1127,7 +882,7 @@ func @loop_alloc(
     %3 = alloc() : memref<2xf32>
     scf.yield %3 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -1145,7 +900,7 @@ func @loop_alloc(
 //      CHECK:    dealloc %[[ALLOC3]]
 //      CHECK:    scf.yield %[[ALLOC4]]
 //      CHECK: }
-//      CHECK: linalg.copy(%[[ALLOC2]], %arg4)
+//      CHECK: test.copy(%[[ALLOC2]], %arg4)
 // CHECK-NEXT: dealloc %[[ALLOC2]]
 
 // -----
@@ -1174,7 +929,7 @@ func @loop_nested_if_no_alloc(
     }
     scf.yield %3 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -1184,7 +939,7 @@ func @loop_nested_if_no_alloc(
 //      CHECK: scf.yield %[[ALLOC0]]
 //      CHECK: scf.yield %[[IALLOC]]
 //      CHECK: scf.yield %[[ALLOC2]]
-//      CHECK: linalg.copy(%[[ALLOC1]], %arg4)
+//      CHECK: test.copy(%[[ALLOC1]], %arg4)
 //      CHECK: dealloc %[[ALLOC0]]
 
 // -----
@@ -1279,7 +1034,7 @@ func @loop_nested_alloc(
     }
     scf.yield %2 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -1287,16 +1042,19 @@ func @loop_nested_alloc(
 // CHECK-NEXT: dealloc %[[ALLOC0]]
 // CHECK-NEXT: %[[ALLOC1:.*]] = alloc()
 // CHECK-NEXT: linalg.copy(%arg3, %[[ALLOC1]])
-// CHECK-NEXT: %[[VAL_7:.*]] = scf.for {{.*}} iter_args(%[[IALLOC0:.*]] = %[[ALLOC1]])
+// CHECK-NEXT: %[[VAL_7:.*]] = scf.for {{.*}} iter_args
+// CHECK-SAME: (%[[IALLOC0:.*]] = %[[ALLOC1]])
 //      CHECK: %[[ALLOC2:.*]] = alloc()
 // CHECK-NEXT: linalg.copy(%[[IALLOC0]], %[[ALLOC2]])
 // CHECK-NEXT: dealloc %[[IALLOC0]]
-// CHECK-NEXT: %[[ALLOC3:.*]] = scf.for {{.*}} iter_args(%[[IALLOC1:.*]] = %[[ALLOC2]])
+// CHECK-NEXT: %[[ALLOC3:.*]] = scf.for {{.*}} iter_args
+// CHECK-SAME: (%[[IALLOC1:.*]] = %[[ALLOC2]])
 //      CHECK: %[[ALLOC5:.*]] = alloc()
 // CHECK-NEXT: linalg.copy(%[[IALLOC1]], %[[ALLOC5]])
 // CHECK-NEXT: dealloc %[[IALLOC1]]
 
-//      CHECK: %[[ALLOC6:.*]] = scf.for {{.*}} iter_args(%[[IALLOC2:.*]] = %[[ALLOC5]])
+//      CHECK: %[[ALLOC6:.*]] = scf.for {{.*}} iter_args
+// CHECK-SAME: (%[[IALLOC2:.*]] = %[[ALLOC5]])
 //      CHECK: %[[ALLOC8:.*]] = alloc()
 // CHECK-NEXT: dealloc %[[ALLOC8]]
 //      CHECK: %[[ALLOC9:.*]] = scf.if
@@ -1327,7 +1085,7 @@ func @loop_nested_alloc(
 // CHECK-NEXT: dealloc %[[ALLOC3]]
 // CHECK-NEXT: scf.yield %[[ALLOC4]]
 
-//      CHECK: linalg.copy(%[[VAL_7]], %arg4)
+//      CHECK: test.copy(%[[VAL_7]], %arg4)
 // CHECK-NEXT: dealloc %[[VAL_7]]
 
 // -----
@@ -1359,7 +1117,7 @@ func @loop_dynalloc(
   br ^loopHeader(%inc, %alloc1 : i32, memref<?xf32>)
 
 ^exit(%buff3 : memref<?xf32>):
-  "linalg.copy"(%buff3, %arg3) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%buff3, %arg3) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -1393,7 +1151,7 @@ func @do_loop_alloc(
     ^exit(%buff : memref<2xf32>)
 
 ^exit(%buff3 : memref<2xf32>):
-  "linalg.copy"(%buff3, %arg3) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%buff3, %arg3) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -1401,7 +1159,11 @@ func @do_loop_alloc(
 
 // -----
 
-func @assumingOp(%arg0: !shape.witness, %arg2: memref<2xf32>, %arg3: memref<2xf32>) {
+// CHECK-LABEL: func @assumingOp(
+func @assumingOp(
+  %arg0: !shape.witness,
+  %arg2: memref<2xf32>,
+  %arg3: memref<2xf32>) {
   // Confirm the alloc will be dealloc'ed in the block.
   %1 = shape.assuming %arg0 -> memref<2xf32> {
      %0 = alloc() : memref<2xf32>
@@ -1412,28 +1174,22 @@ func @assumingOp(%arg0: !shape.witness, %arg2: memref<2xf32>, %arg3: memref<2xf3
     %2 = alloc() : memref<2xf32>
     shape.assuming_yield %2 : memref<2xf32>
   }
-  "linalg.copy"(%3, %arg3) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%3, %arg3) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
-// CHECK-LABEL: func @assumingOp(
-// CHECK-SAME:     %[[ARG0:.*]]: !shape.witness,
-// CHECK-SAME:     %[[ARG1:.*]]: memref<2xf32>,
-// CHECK-SAME:     %[[ARG2:.*]]: memref<2xf32>) {
-// CHECK:        %[[UNUSED_RESULT:.*]] = shape.assuming %[[ARG0]] -> (memref<2xf32>) {
-// CHECK:         %[[ALLOC0:.*]] = alloc() : memref<2xf32>
-// CHECK:         dealloc %[[ALLOC0]] : memref<2xf32>
-// CHECK:         shape.assuming_yield %[[ARG1]] : memref<2xf32>
-// CHECK:        }
-// CHECK:        %[[ASSUMING_RESULT:.*]] = shape.assuming %[[ARG0]] -> (memref<2xf32>) {
-// CHECK:         %[[TMP_ALLOC:.*]] = alloc() : memref<2xf32>
-// CHECK:         %[[RETURNING_ALLOC:.*]] = alloc() : memref<2xf32>
-// CHECK:         linalg.copy(%[[TMP_ALLOC]], %[[RETURNING_ALLOC]]) : memref<2xf32>, memref<2xf32>
-// CHECK:         dealloc %[[TMP_ALLOC]] : memref<2xf32>
-// CHECK:         shape.assuming_yield %[[RETURNING_ALLOC]] : memref<2xf32>
-// CHECK:        }
-// CHECK:        linalg.copy(%[[ASSUMING_RESULT:.*]], %[[ARG2]]) : memref<2xf32>, memref<2xf32>
-// CHECK:        dealloc %[[ASSUMING_RESULT]] : memref<2xf32>
-// CHECK:        return
-// CHECK:       }
-
+// CHECK-SAME: %[[ARG0:.*]]: !shape.witness,
+// CHECK-SAME: %[[ARG1:.*]]: {{.*}},
+// CHECK-SAME: %[[ARG2:.*]]: {{.*}}
+//      CHECK: %[[UNUSED_RESULT:.*]] = shape.assuming %[[ARG0]]
+// CHECK-NEXT:    %[[ALLOC0:.*]] = alloc()
+// CHECK-NEXT:    dealloc %[[ALLOC0]]
+// CHECK-NEXT:    shape.assuming_yield %[[ARG1]]
+//      CHECK: %[[ASSUMING_RESULT:.*]] = shape.assuming %[[ARG0]]
+// CHECK-NEXT:    %[[TMP_ALLOC:.*]] = alloc()
+// CHECK-NEXT:    %[[RETURNING_ALLOC:.*]] = alloc()
+// CHECK-NEXT:    linalg.copy(%[[TMP_ALLOC]], %[[RETURNING_ALLOC]])
+// CHECK-NEXT:    dealloc %[[TMP_ALLOC]]
+// CHECK-NEXT:    shape.assuming_yield %[[RETURNING_ALLOC]]
+//      CHECK: test.copy(%[[ASSUMING_RESULT:.*]], %[[ARG2]])
+// CHECK-NEXT: dealloc %[[ASSUMING_RESULT]]

diff  --git a/mlir/test/Transforms/buffer-hoisting.mlir b/mlir/test/Transforms/buffer-hoisting.mlir
index bce5f4370ed3..2112e5cf9dc3 100644
--- a/mlir/test/Transforms/buffer-hoisting.mlir
+++ b/mlir/test/Transforms/buffer-hoisting.mlir
@@ -12,8 +12,6 @@
 // BufferHoisting expected behavior: It should move the existing AllocOp to
 // the entry block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranch
 func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   cond_br %arg0, ^bb1, ^bb2
@@ -21,16 +19,10 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -49,8 +41,6 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // to any other block since the alloc has a dynamic dependency to block argument
 // %0 in bb2.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranchDynamicType
 func @condBranchDynamicType(
   %arg0: i1,
@@ -62,16 +52,10 @@ func @condBranchDynamicType(
   br ^bb3(%arg1 : memref<?xf32>)
 ^bb2(%0: index):
   %1 = alloc(%0) : memref<?xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<?xf32>)
-     outs(%1: memref<?xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<?xf32>) out(%1: memref<?xf32>)
   br ^bb3(%1 : memref<?xf32>)
 ^bb3(%2: memref<?xf32>):
-  "linalg.copy"(%2, %arg2) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%2, %arg2) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -79,7 +63,7 @@ func @condBranchDynamicType(
 //      CHECK: ^bb2
 //      CHECK: ^bb2(%[[IDX:.*]]:{{.*}})
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc(%[[IDX]])
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
@@ -99,8 +83,6 @@ func @condBranchDynamicType(
 // to any other block since the alloc has a dynamic dependency to block argument
 // %0 in bb2.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranchDynamicTypeNested
 func @condBranchDynamicTypeNested(
   %arg0: i1,
@@ -112,13 +94,7 @@ func @condBranchDynamicTypeNested(
   br ^bb6(%arg1 : memref<?xf32>)
 ^bb2(%0: index):
   %1 = alloc(%0) : memref<?xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<?xf32>)
-     outs(%1: memref<?xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<?xf32>) out(%1: memref<?xf32>)
   cond_br %arg0, ^bb3, ^bb4
 ^bb3:
   br ^bb5(%1 : memref<?xf32>)
@@ -129,7 +105,7 @@ func @condBranchDynamicTypeNested(
 ^bb6(%3: memref<?xf32>):
   br ^bb7(%3 : memref<?xf32>)
 ^bb7(%4: memref<?xf32>):
-  "linalg.copy"(%4, %arg2) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%4, %arg2) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -137,7 +113,7 @@ func @condBranchDynamicTypeNested(
 //      CHECK: ^bb2
 //      CHECK: ^bb2(%[[IDX:.*]]:{{.*}})
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc(%[[IDX]])
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
@@ -150,23 +126,15 @@ func @condBranchDynamicTypeNested(
 // BufferHoisting expected behavior: It should move the existing AllocOp to
 // the entry block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @criticalEdge
 func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>)
 ^bb1:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb2(%0 : memref<2xf32>)
 ^bb2(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -183,18 +151,10 @@ func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 //    bb3 <- Initial position of the second AllocOp
 // BufferHoisting expected behavior: It shouldn't move the AllocOps.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @ifElse
 func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -204,25 +164,19 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
   %7 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%7: memref<2xf32>)
-     outs(%7: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%7, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%7: memref<2xf32>) out(%7: memref<2xf32>)
+  test.copy(%7, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: br ^bb3
 //      CHECK: br ^bb3
 // CHECK-NEXT: ^bb3
 //      CHECK: %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
-//      CHECK: linalg.copy(%[[ALLOC1]]
+// CHECK-NEXT: test.buffer_based
+//      CHECK: test.copy(%[[ALLOC1]]
 // CHECK-NEXT: return
 
 // -----
@@ -235,18 +189,10 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 //    bb3
 // BufferHoisting expected behavior: It shouldn't move the AllocOp.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @ifElseNoUsers
 func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -255,12 +201,12 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 ^bb2(%3: memref<2xf32>, %4: memref<2xf32>):
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
-  "linalg.copy"(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
@@ -275,18 +221,10 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 //       bb5 <- Initial position of the second AllocOp
 // BufferHoisting expected behavior: AllocOps shouldn't be moved.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @ifElseNested
 func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -300,58 +238,38 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>)
 ^bb5(%7: memref<2xf32>, %8: memref<2xf32>):
   %9 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%7: memref<2xf32>)
-     outs(%9: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%9, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>)
+  test.copy(%9, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: br ^bb5
 //      CHECK: br ^bb5
 //      CHECK: br ^bb5
 // CHECK-NEXT: ^bb5
 //      CHECK: %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
 // Test Case: Dead operations in a single block.
 // BufferHoisting expected behavior: It shouldn't move the AllocOps.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @redundantOperations
 func @redundantOperations(%arg0: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg0: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   %1 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%0: memref<2xf32>)
-     outs(%1: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
+  test.buffer_based in(%0: memref<2xf32>) out(%1: memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
@@ -364,8 +282,6 @@ func @redundantOperations(%arg0: memref<2xf32>) {
 // BufferHoisting expected behavior: Both AllocOps should be moved to the
 // entry block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @moving_alloc_and_inserting_missing_dealloc
 func @moving_alloc_and_inserting_missing_dealloc(
   %cond: i1,
@@ -374,26 +290,14 @@ func @moving_alloc_and_inserting_missing_dealloc(
   cond_br %cond, ^bb1, ^bb2
 ^bb1:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg0: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   br ^exit(%0 : memref<2xf32>)
 ^bb2:
   %1 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg0: memref<2xf32>)
-     outs(%1: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>)
   br ^exit(%1 : memref<2xf32>)
 ^exit(%arg2: memref<2xf32>):
-  "linalg.copy"(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -413,8 +317,6 @@ func @moving_alloc_and_inserting_missing_dealloc(
 // BufferHoisting expected behavior: It should move the AllocOp to the entry
 // block.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @moving_invalid_dealloc_op_complex
 func @moving_invalid_dealloc_op_complex(
   %cond: i1,
@@ -425,17 +327,11 @@ func @moving_invalid_dealloc_op_complex(
   br ^exit(%arg0 : memref<2xf32>)
 ^bb2:
   %1 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg0: memref<2xf32>)
-     outs(%1: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>)
   dealloc %1 : memref<2xf32>
   br ^exit(%1 : memref<2xf32>)
 ^exit(%arg2: memref<2xf32>):
-  "linalg.copy"(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -444,13 +340,11 @@ func @moving_invalid_dealloc_op_complex(
 
 // -----
 
-// Test Case: Nested regions - This test defines a GenericOp inside the region
-// of another GenericOp.
-// BufferHoisting expected behavior: The AllocOp of inner GenericOp should
-// remain inside the region of outer GenericOp. The AllocOp of the outer
-// GenericOp should be moved to the entry block.
-
-#map0 = affine_map<(d0) -> (d0)>
+// Test Case: Nested regions - This test defines a BufferBasedOp inside the
+// region of a RegionBufferBasedOp.
+// BufferHoisting expected behavior: The AllocOp for the BufferBasedOp should
+// remain inside the region of the RegiobBufferBasedOp. The AllocOp of the
+// RegionBufferBasedOp should be moved to the entry block.
 
 // CHECK-LABEL: func @nested_regions_and_cond_branch
 func @nested_regions_and_cond_branch(
@@ -462,35 +356,23 @@ func @nested_regions_and_cond_branch(
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
+  test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) {
   ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
     %1 = alloc() : memref<2xf32>
-    linalg.generic {
-      indexing_maps = [#map0, #map0],
-      iterator_types = ["parallel"]}
-        ins(%arg1: memref<2xf32>)
-      outs(%1: memref<2xf32>) {
-    ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-      %tmp2 = exp %gen2_arg0 : f32
-      linalg.yield %tmp2 : f32
-    }
+    test.buffer_based in(%arg1: memref<2xf32>) out(%1: memref<2xf32>)
     %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
+    test.region_yield %tmp1 : f32
   }
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 // CHECK-NEXT:   %[[ALLOC0:.*]] = alloc()
 // CHECK-NEXT:   cond_br
-//      CHECK:   linalg.generic
+//      CHECK:   test.region_buffer_based
 //      CHECK:     %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT:     linalg.generic
+// CHECK-NEXT:     test.buffer_based
 
 // -----
 
@@ -630,8 +512,6 @@ func @inner_region_control_flow_div(
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // Test Case: Alloca operations shouldn't be moved.
 
 // CHECK-LABEL: func @condBranchAlloca
@@ -641,16 +521,10 @@ func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloca() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -658,12 +532,10 @@ func @condBranchAlloca(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 //      CHECK: ^bb2
 //      CHECK: ^bb2
 // CHECK-NEXT: %[[ALLOCA:.*]] = alloca()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // Test Case: Alloca operations shouldn't be moved. The alloc operation also
 // shouldn't be moved analogously to the ifElseNested test.
 
@@ -673,13 +545,7 @@ func @ifElseNestedAlloca(
   %arg1: memref<2xf32>,
   %arg2: memref<2xf32>) {
   %0 = alloca() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -693,30 +559,22 @@ func @ifElseNestedAlloca(
   br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>)
 ^bb5(%7: memref<2xf32>, %8: memref<2xf32>):
   %9 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-      ins(%7: memref<2xf32>)
-     outs(%9: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%9, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>)
+  test.copy(%9, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOCA:.*]] = alloca()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: ^bb5
 //      CHECK: ^bb5
 //      CHECK: ^bb5
 // CHECK-NEXT: ^bb5
 // CHECK-NEXT: %[[ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // Test Case: Alloca operations shouldn't be moved. The alloc operation should
 // be moved in the beginning analogous to the nestedRegionsAndCondBranch test.
 
@@ -730,35 +588,23 @@ func @nestedRegionsAndCondBranchAlloca(
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-     outs(%0: memref<2xf32>) {
+  test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) {
   ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
     %1 = alloca() : memref<2xf32>
-    linalg.generic {
-      indexing_maps = [#map0, #map0],
-      iterator_types = ["parallel"]}
-        ins(%arg1: memref<2xf32>)
-      outs(%1: memref<2xf32>) {
-    ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-      %tmp2 = exp %gen2_arg0 : f32
-      linalg.yield %tmp2 : f32
-    }
+    test.buffer_based in(%arg1: memref<2xf32>) out(%1: memref<2xf32>)
     %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
+    test.region_yield %tmp1 : f32
   }
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 // CHECK-NEXT:   %[[ALLOC:.*]] = alloc()
 // CHECK-NEXT:   cond_br
-//      CHECK:   linalg.generic
+//      CHECK:   test.region_buffer_based
 //      CHECK:     %[[ALLOCA:.*]] = alloca()
-// CHECK-NEXT:     linalg.generic
+// CHECK-NEXT:     test.buffer_based
 
 // -----
 
@@ -779,7 +625,7 @@ func @loop_alloc(
     %3 = alloc() : memref<2xf32>
     scf.yield %3 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -852,7 +698,7 @@ func @loop_nested_alloc(
     }
     scf.yield %2 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -893,7 +739,7 @@ func @loop_nested_alloc_dyn_dependency(
     }
     scf.yield %0 : memref<?xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%1, %res) : (memref<?xf32>, memref<?xf32>)
   return
 }
 

diff  --git a/mlir/test/Transforms/buffer-loop-hoisting.mlir b/mlir/test/Transforms/buffer-loop-hoisting.mlir
index bcb44efb5fea..4b4313f9978e 100644
--- a/mlir/test/Transforms/buffer-loop-hoisting.mlir
+++ b/mlir/test/Transforms/buffer-loop-hoisting.mlir
@@ -11,8 +11,6 @@
 //    bb3
 // BufferLoopHoisting expected behavior: It should not move the AllocOp.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranch
 func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   cond_br %arg0, ^bb1, ^bb2
@@ -20,16 +18,10 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-    outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -48,8 +40,6 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // to any other block since the alloc has a dynamic dependency to block argument
 // %0 in bb2.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @condBranchDynamicType
 func @condBranchDynamicType(
   %arg0: i1,
@@ -61,16 +51,10 @@ func @condBranchDynamicType(
   br ^bb3(%arg1 : memref<?xf32>)
 ^bb2(%0: index):
   %1 = alloc(%0) : memref<?xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<?xf32>)
-    outs(%1: memref<?xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<?xf32>) out(%1: memref<?xf32>)
   br ^bb3(%1 : memref<?xf32>)
 ^bb3(%2: memref<?xf32>):
-  "linalg.copy"(%2, %arg2) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%2, %arg2) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -78,17 +62,15 @@ func @condBranchDynamicType(
 //      CHECK: ^bb2
 //      CHECK: ^bb2(%[[IDX:.*]]:{{.*}})
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc(%[[IDX]])
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 
 // -----
 
-// Test Case: Nested regions - This test defines a GenericOp inside the region
-// of another GenericOp.
-// BufferLoopHoisting expected behavior: The AllocOp of inner GenericOp should
-// remain inside the region of outer GenericOp. The AllocOp of the outer
-// GenericOp should not be moved during this pass.
-
-#map0 = affine_map<(d0) -> (d0)>
+// Test Case: Nested regions - This test defines a BufferBasedOp inside the
+// region of a RegionBufferBasedOp.
+// BufferLoopHoisting expected behavior: The AllocOp for the BufferBasedOp
+// should remain inside the region of the RegionBufferBasedOp. The AllocOp of
+// the RegionBufferBasedOp should not be moved during this pass.
 
 // CHECK-LABEL: func @nested_regions_and_cond_branch
 func @nested_regions_and_cond_branch(
@@ -100,35 +82,23 @@ func @nested_regions_and_cond_branch(
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-    outs(%0: memref<2xf32>) {
+  test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) {
   ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
     %1 = alloc() : memref<2xf32>
-    linalg.generic {
-      indexing_maps = [#map0, #map0],
-      iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-      outs(%1: memref<2xf32>) {
-    ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-      %tmp2 = exp %gen2_arg0 : f32
-      linalg.yield %tmp2 : f32
-    }
+    test.buffer_based in(%arg1: memref<2xf32>) out(%1: memref<2xf32>)
     %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
+    test.region_yield %tmp1 : f32
   }
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 // CHECK-NEXT:   cond_br
 //      CHECK:   %[[ALLOC0:.*]] = alloc()
-//      CHECK:   linalg.generic
+//      CHECK:   test.region_buffer_based
 //      CHECK:     %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT:     linalg.generic
+// CHECK-NEXT:     test.buffer_based
 
 // -----
 
@@ -175,7 +145,7 @@ func @loop_alloc(
     %3 = alloc() : memref<2xf32>
     scf.yield %3 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -251,7 +221,7 @@ func @loop_nested_alloc(
     }
     scf.yield %2 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -297,7 +267,7 @@ func @loop_nested_alloc_dyn_dependency(
     }
     scf.yield %0 : memref<?xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%1, %res) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -323,7 +293,7 @@ func @hoist_one_loop(
       %2 = alloc() : memref<2xf32>
       scf.yield %0 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -345,7 +315,7 @@ func @no_hoist_one_loop(
       %1 = alloc() : memref<2xf32>
       scf.yield %1 : memref<2xf32>
   }
-  "linalg.copy"(%0, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%0, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -371,7 +341,7 @@ func @hoist_multiple_loop(
     }
     scf.yield %0 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -399,7 +369,7 @@ func @no_hoist_one_loop_conditional(
       }
     scf.yield %2 : memref<2xf32>
   }
-  "linalg.copy"(%0, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%0, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -430,7 +400,7 @@ func @hoist_one_loop_conditional(
   {
     scf.yield %0 : memref<2xf32>
   }
-  "linalg.copy"(%2, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%2, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -453,7 +423,7 @@ func @no_hoist_one_loop_dependency(
       %2 = alloc(%i) : memref<?xf32>
       scf.yield %0 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -480,7 +450,7 @@ func @partial_hoist_multiple_loop_dependency(
     }
     scf.yield %0 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 

diff  --git a/mlir/test/Transforms/finalizing-bufferize-allowed-memref-results.mlir b/mlir/test/Transforms/finalizing-bufferize-allowed-memref-results.mlir
index 149060156bb1..220a597506b3 100644
--- a/mlir/test/Transforms/finalizing-bufferize-allowed-memref-results.mlir
+++ b/mlir/test/Transforms/finalizing-bufferize-allowed-memref-results.mlir
@@ -1,9 +1,9 @@
 // RUN: mlir-opt -test-finalizing-bufferize-with-allowed-memref-results -split-input-file %s | FileCheck %s
 
-// Since allowMemrefEscaping is on for Buffer Placement in this test pass, all
-// tensor typed function results are converted to memref and remain as function
-// results. All memref typed function results will escape from the deallocation
-// phase of Buffer Placement.
+// Since allowMemrefEscaping is active for Bufferization in this test pass,
+// all tensor typed function results are converted to memref and remain as
+// function results. All memref typed function results will escape from the
+// deallocation phase of Bufferization.
 
 // CHECK-LABEL: func @void_function_signature_conversion
 func @void_function_signature_conversion(%arg0: tensor<4x8xf32>) {
@@ -13,17 +13,28 @@ func @void_function_signature_conversion(%arg0: tensor<4x8xf32>) {
 
 // -----
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @complex_signature_conversion
-func @complex_signature_conversion(%arg0: tensor<5xf32>, %arg1: memref<10xf32>, %arg2: i1, %arg3: f16) -> (i1, tensor<5xf32>, memref<10xf32>, memref<15xf32>, f16) {
+func @complex_signature_conversion(
+  %arg0: tensor<5xf32>,
+  %arg1: memref<10xf32>,
+  %arg2: i1, %arg3: f16) -> (
+    i1,
+    tensor<5xf32>,
+    memref<10xf32>,
+    memref<15xf32>,
+    f16) {
   %0 = alloc() : memref<15xf32>
-  return %arg2, %arg0, %arg1, %0, %arg3 : i1, tensor<5xf32>, memref<10xf32>, memref<15xf32>, f16
+  %1 = test.tensor_based in(%arg0 : tensor<5xf32>) -> tensor<5xf32>
+  return %arg2, %1, %arg1, %0, %arg3 :
+   i1, tensor<5xf32>, memref<10xf32>, memref<15xf32>, f16
 }
-//      CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>, %[[ARG2:.*]]: i1, %[[ARG3:.*]]: f16)
+//      CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>,
+// CHECK-SAME: %[[ARG2:.*]]: i1, %[[ARG3:.*]]: f16)
 // CHECK-SAME: (i1, memref<5xf32>, memref<10xf32>, memref<15xf32>, f16)
 //      CHECK: %[[FIRST_ALLOC:.*]] = alloc()
-//      CHECK: return %[[ARG2]], %[[ARG0]], %[[ARG1]], %[[FIRST_ALLOC]], %[[ARG3]]
+//      CHECK: %[[TENSOR_ALLOC:.*]] = alloc()
+//      CHECK: return %[[ARG2]], %[[TENSOR_ALLOC]], %[[ARG1]], %[[FIRST_ALLOC]],
+// CHECK-SAME: %[[ARG3]]
 
 // -----
 
@@ -103,11 +114,12 @@ func @caller(%arg0: tensor<5xf32>) -> tensor<5xf32> {
 // -----
 
 // Test case: Testing BufferizeCallOpConverter to see if it matches with the
-// signature of the new signature of the callee function when there are tuple typed
-// args and results. BufferizeTypeConverter is set to flatten tuple typed
+// signature of the new signature of the callee function when there are tuple
+// typed args and results. BufferizeTypeConverter is set to flatten tuple typed
 // arguments. The tuple typed values should be decomposed and composed using
 // get_tuple_element and make_tuple operations of test dialect. Tensor types are
-// converted to Memref. Memref typed function results remain as function results.
+// converted to Memref. Memref typed function results remain as function
+// results.
 
 // CHECK-LABEL: func @callee
 func @callee(%arg0: tuple<tensor<2xf32>,i1, tensor<5xf32>>) -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>){
@@ -150,8 +162,8 @@ func @caller(%arg0: tuple<tensor<2xf32>,i1, tensor<5xf32>>) -> tuple<tensor<2xf3
 // -----
 
 // Test case: Testing BufferizeFuncOpConverter and
-// BufferizeReturnOpConverter to see if the return operation matches with
-// the new function signature when there are tuple typed args and results.
+// BufferizeReturnOpConverter to see if the return operation matches with the
+// new function signature when there are tuple typed args and results.
 // BufferizeTypeConverter is set to flatten tuple typed arguments. The tuple
 // typed values should be decomposed and composed using get_tuple_element and
 // make_tuple operations of test dialect. Tensor types are converted to Memref.

diff  --git a/mlir/test/Transforms/finalizing-bufferize.mlir b/mlir/test/Transforms/finalizing-bufferize.mlir
index ad6bdb145a9f..83436bc56911 100644
--- a/mlir/test/Transforms/finalizing-bufferize.mlir
+++ b/mlir/test/Transforms/finalizing-bufferize.mlir
@@ -8,21 +8,23 @@ func @func_signature_conversion(%arg0: tensor<4x8xf32>) {
 
 // -----
 
-// Only tensor typed function result should be converted to memref and move to the
-// function arguments list. The other memref function results remain as function
-// results.
-
-#map0 = affine_map<(d0) -> (d0)>
+// Only tensor typed function result should be converted to memref and move to
+// the function arguments list. The other memref function results remain as
+// function results.
 
 // CHECK-LABEL: func @memref_in_function_results
-func @memref_in_function_results(%arg0: tensor<5xf32>, %arg1: memref<10xf32>) -> (tensor<5xf32>, memref<10xf32>, memref<15xf32>) {
+func @memref_in_function_results(%arg0: tensor<5xf32>, %arg1: memref<10xf32>)
+                            -> (tensor<5xf32>, memref<10xf32>, memref<15xf32>) {
   %0 = alloc() : memref<15xf32>
-  return %arg0, %arg1, %0 : tensor<5xf32>, memref<10xf32>, memref<15xf32>
+  %1 = test.tensor_based in(%arg0 : tensor<5xf32>) -> tensor<5xf32>
+  return %1, %arg1, %0 : tensor<5xf32>, memref<10xf32>, memref<15xf32>
 }
-//      CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>, %[[RESULT:.*]]: memref<5xf32>)
+//      CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>,
+// CHECK-SAME: %[[RESULT:.*]]: memref<5xf32>)
 // CHECK-SAME: (memref<10xf32>, memref<15xf32>)
 //      CHECK: %[[FIRST_ALLOC:.*]] = alloc()
-//      CHECK: linalg.copy(%[[ARG0]], %[[RESULT]])
+//      CHECK: %[[TENSOR_ALLOC:.*]] = alloc()
+//      CHECK: test.copy(%[[TENSOR_ALLOC]], %[[RESULT]])
 //      CHECK: return %[[ARG1]], %[[FIRST_ALLOC]]
 
 // -----
@@ -45,29 +47,38 @@ func @no_signature_conversion_is_needed(%arg0: i1, %arg1: f16) -> (i1, f16){
 // -----
 
 // CHECK-LABEL: func @complex_signature_conversion
-func @complex_signature_conversion(%arg0: tensor<4x8xf32>, %arg1: i1, %arg2: tensor<5x5xf64>,%arg3: f16) -> (i1, tensor<5x5xf64>, f16, tensor<4x8xf32>) {
-    return %arg1, %arg2, %arg3, %arg0 : i1, tensor<5x5xf64>, f16, tensor<4x8xf32>
+func @complex_signature_conversion(%arg0: tensor<4x8xf32>, %arg1: i1,
+                                   %arg2: tensor<5x5xf64>,%arg3: f16) ->
+                                   (i1, tensor<5x5xf64>, f16, tensor<4x8xf32>) {
+    return %arg1, %arg2, %arg3, %arg0 : i1, tensor<5x5xf64>, f16,
+           tensor<4x8xf32>
 }
-//      CHECK: (%[[ARG0:.*]]: memref<4x8xf32>, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: memref<5x5xf64>, %[[ARG3:.*]]: f16,
-// CHECK-SAME: %[[RESULT1:.*]]: memref<5x5xf64>, %[[RESULT2:.*]]: memref<4x8xf32>) -> (i1, f16) {
-// CHECK-NEXT: linalg.copy(%[[ARG2]], %[[RESULT1]])
-// CHECK-NEXT: linalg.copy(%[[ARG0]], %[[RESULT2]])
+//      CHECK: (%[[ARG0:.*]]: memref<4x8xf32>, %[[ARG1:.*]]: i1
+// CHECK-SAME: %[[ARG2:.*]]: memref<5x5xf64>, %[[ARG3:.*]]: f16
+// CHECK-SAME: %[[RESULT1:.*]]: memref<5x5xf64>
+// CHECK-SAME: %[[RESULT2:.*]]: memref<4x8xf32>) -> (i1, f16) {
+// CHECK-NEXT: test.copy(%[[ARG2]], %[[RESULT1]])
+// CHECK-NEXT: test.copy(%[[ARG0]], %[[RESULT2]])
 // CHECK-NEXT: return %[[ARG1]], %[[ARG3]]
 
 // -----
 
 // CHECK-LABEL: func @non_void_to_void_return_op_converter
-func @non_void_to_void_return_op_converter(%arg0: tensor<4x8xf32>) -> tensor<4x8xf32> {
+func @non_void_to_void_return_op_converter(%arg0: tensor<4x8xf32>)
+                                           -> tensor<4x8xf32> {
   return %arg0 : tensor<4x8xf32>
 }
-//      CHECK: (%[[ARG0:.*]]: [[TYPE:.*]]<[[RANK:.*]]>, %[[RESULT:.*]]: [[TYPE]]<[[RANK]]>) {
-// CHECK-NEXT: linalg.copy(%[[ARG0]], %[[RESULT]])
+//      CHECK: (%[[ARG0:.*]]: [[TYPE:.*]]<[[RANK:.*]]>,
+// CHECK-SAME: %[[RESULT:.*]]: [[TYPE]]<[[RANK]]>) {
+// CHECK-NEXT: test.copy(%[[ARG0]], %[[RESULT]])
 // CHECK-NEXT: return
 
 // -----
 
 // CHECK-LABEL: func @func_and_block_signature_conversion
-func @func_and_block_signature_conversion(%arg0 : tensor<2xf32>, %cond : i1, %arg1: tensor<4x4xf32>) -> tensor<4x4xf32>{
+func @func_and_block_signature_conversion(%arg0 : tensor<2xf32>, %cond : i1,
+                                          %arg1: tensor<4x4xf32>)
+                                          -> tensor<4x4xf32>{
     cond_br %cond, ^bb1, ^bb2
   ^bb1:
     br ^exit(%arg0 : tensor<2xf32>)
@@ -76,15 +87,83 @@ func @func_and_block_signature_conversion(%arg0 : tensor<2xf32>, %cond : i1, %ar
   ^exit(%arg2: tensor<2xf32>):
     return %arg1 : tensor<4x4xf32>
 }
-//      CHECK: (%[[ARG0:.*]]: [[ARG0_TYPE:.*]], %[[COND:.*]]: i1, %[[ARG1:.*]]: [[ARG1_TYPE:.*]], %[[RESULT:.*]]: [[RESULT_TYPE:.*]]) {
+//      CHECK: (%[[ARG0:.*]]: [[ARG0_TYPE:.*]], %[[COND:.*]]: i1,
+// CHECK-SAME: %[[ARG1:.*]]: [[ARG1_TYPE:.*]],
+// CHECK-SAME: %[[RESULT:.*]]: [[RESULT_TYPE:.*]]) {
 //      CHECK: br ^[[EXIT_BLOCK:.*]](%[[ARG0]] : [[ARG0_TYPE]])
 //      CHECK: br ^[[EXIT_BLOCK]](%[[ARG0]] : [[ARG0_TYPE]])
 //      CHECK: ^[[EXIT_BLOCK]](%{{.*}}: [[ARG0_TYPE]])
-// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[RESULT]])
+// CHECK-NEXT: test.copy(%[[ARG1]], %[[RESULT]])
 // CHECK-NEXT: return
 
 // -----
 
+// Test Case: Simple case for checking if BufferizePlacer creates AllocOps
+//            right before TensorBasedOp.
+
+// CHECK-LABEL: func @compute_allocs_position_simple
+func @compute_allocs_position_simple(%cond: i1, %arg0: tensor<2xf32>)
+                                     -> tensor<2xf32>{
+    %0 = test.tensor_based in(%arg0 : tensor<2xf32>) -> tensor<2xf32>
+    %1 = test.tensor_based in(%0 : tensor<2xf32>) -> tensor<2xf32>
+    return %1 : tensor<2xf32>
+}
+//      CHECK: (%{{.*}}: {{.*}}, %[[ARG0:.*]]: memref<2xf32>,
+// CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}} out(%[[FIRST_ALLOC]]
+//      CHECK: %[[SECOND_ALLOC:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[FIRST_ALLOC]]{{.*}} out(%[[SECOND_ALLOC]]
+
+// -----
+
+// Test Case: if-else case for checking if BufferizePlacer creates AllocOps
+//            right before TensorBasedOp.
+
+// CHECK-LABEL: func @compute_allocs_position
+func @compute_allocs_position(%cond: i1, %arg0: tensor<2xf32>) -> tensor<2xf32>{
+    %0 = test.tensor_based in(%arg0 : tensor<2xf32>) -> tensor<2xf32>
+    %1 = test.tensor_based in(%0 : tensor<2xf32>) -> tensor<2xf32>
+    cond_br %cond, ^bb1(%arg0, %0: tensor<2xf32>, tensor<2xf32>),
+                   ^bb2(%0, %arg0: tensor<2xf32>, tensor<2xf32>)
+  ^bb1(%arg1 : tensor<2xf32>, %arg2 : tensor<2xf32>):
+    %2 = test.tensor_based in(%arg0 : tensor<2xf32>) -> tensor<2xf32>
+    %3 = test.tensor_based in(%2 : tensor<2xf32>) -> tensor<2xf32>
+    br ^exit(%arg1, %arg2 : tensor<2xf32>, tensor<2xf32>)
+  ^bb2(%arg3 : tensor<2xf32>, %arg4 : tensor<2xf32>):
+    %4 = test.tensor_based in(%arg0 : tensor<2xf32>) -> tensor<2xf32>
+    %5 = test.tensor_based in(%4 : tensor<2xf32>) -> tensor<2xf32>
+    br ^exit(%arg3, %arg4 : tensor<2xf32>, tensor<2xf32>)
+  ^exit(%arg5 : tensor<2xf32>, %arg6 : tensor<2xf32>):
+    %6 = test.tensor_based in(%arg0 : tensor<2xf32>)  -> tensor<2xf32>
+    %7 = test.tensor_based in(%6 : tensor<2xf32>) -> tensor<2xf32>
+    return %7 : tensor<2xf32>
+}
+//      CHECK: (%{{.*}}: {{.*}}, %[[ARG0:.*]]: memref<2xf32>,
+// CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}} out(%[[ALLOC0]]
+//      CHECK: %[[ALLOC1:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ALLOC0]]{{.*}} out(%[[ALLOC1]]
+//      CHECK: cond_br %{{.*}}, ^[[BB0:.*]]({{.*}}), ^[[BB1:.*]](
+// CHECK-NEXT: ^[[BB0]]
+// CHECK-NEXT: %[[ALLOC2:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}} out(%[[ALLOC2]]
+//      CHECK: %[[ALLOC3:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ALLOC2]]{{.*}} out(%[[ALLOC3]]
+//      CHECK: br ^[[EXIT:.*]]({{.*}})
+// CHECK-NEXT: ^[[BB1]]
+// CHECK-NEXT: %[[ALLOC4:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}} out(%[[ALLOC4]]
+//      CHECK: %[[ALLOC5:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ALLOC4]]{{.*}} out(%[[ALLOC5]]
+//      CHECK: br ^[[EXIT]]
+// CHECK-NEXT: ^[[EXIT]]
+// CHECK-NEXT: %[[ALLOC6:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}} out(%[[ALLOC6]]
+//      CHECK: %[[ALLOC7:.*]] = alloc()
+// CHECK-NEXT: test.buffer_based in(%[[ALLOC6]]{{.*}} out(%[[ALLOC7]]
+
+// -----
+
 // Test case: Checking BufferizeCallOpConverter and
 // BufferizeFuncOpConverter and BufferizeReturnOpConverter all
 // together. The signature of `callee` after signature conversion would be:
@@ -94,14 +173,16 @@ func @func_and_block_signature_conversion(%arg0 : tensor<2xf32>, %cond : i1, %ar
 // The operands and results of caller and return operations must be matched
 // respectively.
 
-#map0 = affine_map<(d0) -> (d0)>
-
 // CHECK-LABEL: func @callee
-func @callee(%arg0: tensor<5xf32>) -> tensor<5xf32> {
-  return %arg0 : tensor<5xf32>
+func @callee(%arg1: tensor<5xf32>) -> tensor<5xf32> {
+  %0 = test.tensor_based in(%arg1 : tensor<5xf32>) -> tensor<5xf32>
+  return %0 : tensor<5xf32>
 }
-// CHECK: (%[[CALLEE_ARG:.*]]: memref<5xf32>, %[[CALLEE_RESULT:.*]]: memref<5xf32>)
-// CHECK: linalg.copy(%[[CALLEE_ARG]], %[[CALLEE_RESULT]])
+// CHECK: (%[[CALLEE_ARG:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[CALLEE_RESULT:.*]]: memref<5xf32>)
+// CHECK: %[[ALLOC:.*]] = alloc()
+// CHECK: test.buffer_based
+// CHECK: test.copy(%[[ALLOC]], %[[CALLEE_RESULT]])
 // CHECK: return
 
 // CHECK-LABEL: func @caller
@@ -110,12 +191,13 @@ func @caller(%arg0: tensor<5xf32>) -> tensor<5xf32> {
   %y = call @callee(%x) : (tensor<5xf32>) -> tensor<5xf32>
   return %y : tensor<5xf32>
 }
-// CHECK: (%[[CALLER_ARG:.*]]: memref<5xf32>, %[[CALLER_RESULT:.*]]: memref<5xf32>)
+// CHECK: (%[[CALLER_ARG:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[CALLER_RESULT:.*]]: memref<5xf32>)
 // CHECK: %[[FIRST_ALLOC:.*]] = alloc()
 // CHECK: call @callee(%[[CALLER_ARG]], %[[FIRST_ALLOC]])
 // CHECK: %[[SECOND_ALLOC:.*]] = alloc()
 // CHECK: call @callee(%[[FIRST_ALLOC]], %[[SECOND_ALLOC]])
-// CHECK: linalg.copy(%[[SECOND_ALLOC]], %[[CALLER_RESULT]])
+// CHECK: test.copy(%[[SECOND_ALLOC]], %[[CALLER_RESULT]])
 // CHECK: return
 
 // -----
@@ -127,37 +209,39 @@ func @caller(%arg0: tensor<5xf32>) -> tensor<5xf32> {
 
 // func @callee(%arg0: memref<5xf32>,%arg1: memref<5xf32>)-> memref<2xf32>
 
-// where %arg0 is the input and %arg1 is the output buffer and the original memref
-// type result remain as the function result. Then, the rewriter should match the
-// caller's signature with the callee. Thus, two buffers will be allocated instead
-// of %x0 and %y0 and they are passed to the callers' operands list as the output
-// buffers. %x1 and %y1 remain as callers' results.
-
+// where %arg0 is the input and %arg1 is the output buffer and the original
+// memref type result remain as the function result. Then, the rewriter should
+// match the caller's signature with the callee. Thus, two buffers will be
+// allocated instead of %x0 and %y0 and they are passed to the callers' operands
+// list as the output buffers. %x1 and %y1 remain as callers' results.
 
 // CHECK-LABEL: func @callee
 func @callee(%arg1: tensor<5xf32>) -> (tensor<5xf32>, memref<2xf32>) {
   %buff = alloc() : memref<2xf32>
   return %arg1, %buff : tensor<5xf32>, memref<2xf32>
 }
-//      CHECK: (%[[CALLEE_ARG:.*]]: memref<5xf32>, %[[CALLEE_RESULT:.*]]: memref<5xf32>)
+//      CHECK: (%[[CALLEE_ARG:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[CALLEE_RESULT:.*]]: memref<5xf32>)
 // CHECK-SAME: memref<2xf32>
 //      CHECK: %[[ALLOC:.*]] = alloc()
-//      CHECK: linalg.copy(%[[CALLEE_ARG]], %[[CALLEE_RESULT]])
+//      CHECK: test.copy(%[[CALLEE_ARG]], %[[CALLEE_RESULT]])
 //      CHECK: return %[[ALLOC]]
 
-
 // CHECK-LABEL: func @caller
 func @caller(%arg0: tensor<5xf32>) -> tensor<5xf32> {
-  %x0, %x1 = call @callee(%arg0) : (tensor<5xf32>) -> (tensor<5xf32>, memref<2xf32>)
-  %y0, %y1 = call @callee(%x0) : (tensor<5xf32>) -> (tensor<5xf32>, memref<2xf32>)
+  %x0, %x1 = call @callee(%arg0) : (tensor<5xf32>)
+                                   -> (tensor<5xf32>, memref<2xf32>)
+  %y0, %y1 = call @callee(%x0) : (tensor<5xf32>)
+                                 -> (tensor<5xf32>, memref<2xf32>)
   return %y0 : tensor<5xf32>
 }
-// CHECK: (%[[CALLER_ARG:.*]]: memref<5xf32>, %[[CALLER_RESULT:.*]]: memref<5xf32>)
+// CHECK: (%[[CALLER_ARG:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[CALLER_RESULT:.*]]: memref<5xf32>)
 // CHECK: %[[X0:.*]] = alloc()
 // CHECK: %[[X1:.*]] = call @callee(%[[CALLER_ARG]], %[[X0]])
 // CHECK: %[[Y0:.*]] = alloc()
 // CHECK: %[[Y1:.*]] = call @callee(%[[X0]], %[[Y0]])
-// CHECK: linalg.copy(%[[Y0]], %[[CALLER_RESULT]])
+// CHECK: test.copy(%[[Y0]], %[[CALLER_RESULT]])
 // CHECK: return
 
 // -----
@@ -171,82 +255,125 @@ func @func_with_unranked_arg(%arg0: tensor<*xf32>) {
 // -----
 
 // Test case: Testing BufferizeCallOpConverter to see if it matches with the
-// signature of the new signature of the callee function when there are tuple typed
-// args and results. BufferizeTypeConverter is set to flatten tuple typed
-// arguments. The tuple typed values should be decomposed and composed using
-// get_tuple_element and make_tuple operations of test dialect. Tensor types are
-// converted to Memref. Memref typed function results are appended to the function
-// arguments list.
+// signature of the new signature of the callee function when there are tuple
+// typed args and results. BufferizeTypeConverter is set to flatten tuple
+// typed arguments. The tuple typed values should be decomposed and composed
+// using get_tuple_element and make_tuple operations of test dialect. Tensor
+// types are converted to Memref. Memref typed function results are appended to
+// the function arguments list.
 
 // CHECK-LABEL: func @callee
-func @callee(%arg0: tuple<tensor<2xf32>,i1, tensor<5xf32>>) -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>){
+func @callee(%arg0: tuple<tensor<2xf32>,i1, tensor<5xf32>>)
+             -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>){
   return %arg0 : tuple<tensor<2xf32>,i1, tensor<5xf32>>
 }
-// CHECK-SAME: (%[[ARG0:.*]]: memref<2xf32>, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: memref<5xf32>, %[[RESULT0:.*]]: memref<2xf32>, %[[RESULT1:.*]]: memref<5xf32>)
-// CHECK-SAME: i1
-// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[ARG0]], %[[ARG1]], %[[ARG2]])
-// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 0 : i32}
-// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 1 : i32}
-// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]]) {index = 2 : i32}
-// CHECK-NEXT: linalg.copy(%[[FIRST_ELEM]], %[[RESULT0]])
-// CHECK-NEXT: linalg.copy(%[[THIRD_ELEM]], %[[RESULT1]])
+// CHECK-SAME: (%[[ARG0:.*]]: memref<2xf32>, %[[ARG1:.*]]: i1,
+// CHECK-SAME: %[[ARG2:.*]]: memref<5xf32>, %[[RESULT0:.*]]: memref<2xf32>,
+// CHECK-SAME: %[[RESULT1:.*]]: memref<5xf32>) -> i1
+// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[ARG0]], %[[ARG1]],
+// CHECK-SAME: %[[ARG2]])
+// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 0 : i32}
+// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 1 : i32}
+// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 2 : i32}
+// CHECK-NEXT: test.copy(%[[FIRST_ELEM]], %[[RESULT0]])
+// CHECK-NEXT: test.copy(%[[THIRD_ELEM]], %[[RESULT1]])
 // CHECK-NEXT: return %[[SECOND_ELEM]]
 
-
 // CHECK-LABEL: func @caller
-func @caller(%arg0: tuple<tensor<2xf32>,i1, tensor<5xf32>>) -> tuple<tensor<2xf32>,i1, tensor<5xf32>>{
-  %x0 = call @callee(%arg0) : (tuple<tensor<2xf32>,i1, tensor<5xf32>>) -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>)
-  %y0 = call @callee(%x0) : (tuple<tensor<2xf32>,i1, tensor<5xf32>>) -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>)
+func @caller(%arg0: tuple<tensor<2xf32>,i1, tensor<5xf32>>)
+             -> tuple<tensor<2xf32>,i1, tensor<5xf32>>{
+  %x0 = call @callee(%arg0) : (tuple<tensor<2xf32>,i1, tensor<5xf32>>)
+                              -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>)
+  %y0 = call @callee(%x0) : (tuple<tensor<2xf32>,i1, tensor<5xf32>>)
+                            -> (tuple<tensor<2xf32>,i1, tensor<5xf32>>)
   return %y0 : tuple<tensor<2xf32>,i1, tensor<5xf32>>
 }
-// CHECK-SAME: (%[[ARG0:.*]]: memref<2xf32>, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: memref<5xf32>, %[[RESULT0:.*]]: memref<2xf32>, %[[RESULT1:.*]]: memref<5xf32>)
-// CHECK-SAME: i1
-// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[ARG0]], %[[ARG1]], %[[ARG2]])
-// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 0 : i32}
-// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 1 : i32}
-// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]]) {index = 2 : i32}
+// CHECK-SAME: (%[[ARG0:.*]]: memref<2xf32>, %[[ARG1:.*]]: i1,
+// CHECK-SAME: %[[ARG2:.*]]: memref<5xf32>, %[[RESULT0:.*]]: memref<2xf32>,
+// CHECK-SAME: %[[RESULT1:.*]]: memref<5xf32>) -> i1
+// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[ARG0]], %[[ARG1]],
+// CHECK-SAME: %[[ARG2]])
+// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 0 : i32}
+// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 1 : i32}
+// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 2 : i32}
 // CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
 // CHECK-NEXT: %[[SECOND_ALLOC:.*]] = alloc()
-// CHECK-NEXT: %[[CALLEE_RESULT:.*]] = call @callee(%[[FIRST_ELEM]], %[[SECOND_ELEM]], %[[THIRD_ELEM]], %[[FIRST_ALLOC]], %[[SECOND_ALLOC]])
-// CHECK-SAME: (memref<2xf32>, i1, memref<5xf32>, memref<2xf32>, memref<5xf32>) -> i1
-// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[FIRST_ALLOC]], %[[CALLEE_RESULT]], %[[SECOND_ALLOC]])
-// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 0 : i32}
-// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 1 : i32}
-// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]]) {index = 2 : i32}
+// CHECK-NEXT: %[[CALLEE_RESULT:.*]] = call @callee(%[[FIRST_ELEM]],
+// CHECK-SAME: %[[SECOND_ELEM]], %[[THIRD_ELEM]], %[[FIRST_ALLOC]],
+// CHECK-SAME: %[[SECOND_ALLOC]])
+// CHECK-SAME: (memref<2xf32>, i1,
+// CHECK-SAME: memref<5xf32>, memref<2xf32>, memref<5xf32>) -> i1
+// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[FIRST_ALLOC]],
+// CHECK-SAME: %[[CALLEE_RESULT]], %[[SECOND_ALLOC]])
+// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 0 : i32}
+// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 1 : i32}
+// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 2 : i32}
 // CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
 // CHECK-NEXT: %[[SECOND_ALLOC:.*]] = alloc()
-// CHECK-NEXT: %[[CALLEE_RESULT:.*]] = call @callee(%[[FIRST_ELEM]], %[[SECOND_ELEM]], %[[THIRD_ELEM]], %[[FIRST_ALLOC]], %[[SECOND_ALLOC]])
-// CHECK-SAME: (memref<2xf32>, i1, memref<5xf32>, memref<2xf32>, memref<5xf32>) -> i1
-// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[FIRST_ALLOC]], %[[CALLEE_RESULT]], %[[SECOND_ALLOC]])
-// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 0 : i32}
-// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]]) {index = 1 : i32}
-// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]]) {index = 2 : i32}
-// CHECK-NEXT: linalg.copy(%[[FIRST_ELEM]], %[[RESULT0]])
-// CHECK-NEXT: linalg.copy(%[[THIRD_ELEM]], %[[RESULT1]])
+// CHECK-NEXT: %[[CALLEE_RESULT:.*]] = call @callee(%[[FIRST_ELEM]],
+// CHECK-SAME: %[[SECOND_ELEM]], %[[THIRD_ELEM]], %[[FIRST_ALLOC]],
+// CHECK-SAME: %[[SECOND_ALLOC]])
+// CHECK-SAME: (memref<2xf32>, i1, memref<5xf32>, memref<2xf32>, memref<5xf32>)
+// CHECK-SAME: i1
+// CHECK-NEXT: %[[TUPLE:.*]] = "test.make_tuple"(%[[FIRST_ALLOC]],
+// CHECK-SAME: %[[CALLEE_RESULT]], %[[SECOND_ALLOC]])
+// CHECK-NEXT: %[[FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 0 : i32}
+// CHECK-NEXT: %[[SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 1 : i32}
+// CHECK-NEXT: %[[THIRD_ELEM:.*]]  = "test.get_tuple_element"(%[[TUPLE]])
+// CHECK-SAME: {index = 2 : i32}
+// CHECK-NEXT: test.copy(%[[FIRST_ELEM]], %[[RESULT0]])
+// CHECK-NEXT: test.copy(%[[THIRD_ELEM]], %[[RESULT1]])
 // CHECK-NEXT: return %[[SECOND_ELEM]]
 
 // -----
 
-// Test case: Testing BufferizeFuncOpConverter and
-// BufferizeReturnOpConverter to see if the return operation matches with
-// the new function signature when there are tuple typed args and results.
-// BufferizeTypeConverter is set to flatten tuple typed arguments. The tuple
-// typed values should be decomposed and composed using get_tuple_element and
-// make_tuple operations of test dialect. Tensor types are converted to Memref.
-// Memref typed function results are appended to the function arguments list.
+// Test case: Testing BufferizeFuncOpConverter and BufferizeReturnOpConverter
+// to see if the return operation matches with the new function signature when
+// there are tuple typed args and results. BufferizeTypeConverter is set to
+// flatten tuple typed arguments. The tuple typed values should be decomposed
+// and composed using get_tuple_element and make_tuple operations of test
+// dialect. Tensor types are converted to Memref. Memref typed function results
+// are appended to the function arguments list.
 
 // CHECK-LABEL: func @decompose_tuple_typed_function_args_and_results
-func @decompose_tuple_typed_function_args_and_results(%arg0: tuple<i1,f32>, %arg1: tensor<10xf32>, %arg2: tuple<i1, tensor<5xf32>>) -> (tuple<i1, tensor<5xf32>>, tensor<10xf32>, tuple<i1,f32>){
-  return %arg2, %arg1, %arg0 : tuple<i1, tensor<5xf32>>, tensor<10xf32>, tuple<i1,f32>
+func @decompose_tuple_typed_function_args_and_results(%arg0: tuple<i1,f32>,
+                                                      %arg1: tensor<10xf32>,
+                                                      %arg2: tuple<i1,
+                                                             tensor<5xf32>>)
+                                                      -> (tuple<i1,
+                                                                tensor<5xf32>>,
+                                                      tensor<10xf32>,
+                                                            tuple<i1,f32>){
+  return %arg2, %arg1, %arg0 : tuple<i1, tensor<5xf32>>, tensor<10xf32>,
+                                    tuple<i1,f32>
 }
-// CHECK-SAME: %[[ARG0:.*]]: i1, %[[ARG1:.*]]: f32, %[[ARG2:.*]]: memref<10xf32>, %[[ARG3:.*]]: i1, %[[ARG4:.*]]: memref<5xf32>, %[[RESULT0:.*]]: memref<5xf32>, %[[RESULT1:.*]]: memref<10xf32>
+// CHECK-SAME: %[[ARG0:.*]]: i1, %[[ARG1:.*]]: f32,
+// CHECK-SAME: %[[ARG2:.*]]: memref<10xf32>, %[[ARG3:.*]]: i1,
+// CHECK-SAME: %[[ARG4:.*]]: memref<5xf32>, %[[RESULT0:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[RESULT1:.*]]: memref<10xf32>
 // CHECK-SAME: (i1, i1, f32)
 // CHECK-NEXT: %[[FIRST_TUPLE:.*]] = "test.make_tuple"(%[[ARG0]], %[[ARG1]])
 // CHECK-NEXT: %[[SECOND_TUPLE:.*]] = "test.make_tuple"(%[[ARG3]], %[[ARG4]])
-// CHECK-NEXT: %[[SECOND_TUPLE_FIRST_ELEM:.*]]  = "test.get_tuple_element"(%[[SECOND_TUPLE]]) {index = 0 : i32}
-// CHECK-NEXT: %[[SECOND_TUPLE_SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[SECOND_TUPLE]]) {index = 1 : i32}
-// CHECK-NEXT: %[[FIRST_TUPLE_FIRST_ELEM:.*]] = "test.get_tuple_element"(%[[FIRST_TUPLE]]) {index = 0 : i32}
-// CHECK-NEXT: %[[FIRST_TUPLE_SECOND_ELEM:.*]] = "test.get_tuple_element"(%[[FIRST_TUPLE]]) {index = 1 : i32}
-// CHECK-NEXT: linalg.copy(%[[SECOND_TUPLE_SECOND_ELEM]], %[[RESULT0]])
-// CHECK-NEXT: linalg.copy(%[[ARG2]], %[[RESULT1]])
-// CHECK-NEXT: return %[[SECOND_TUPLE_FIRST_ELEM]], %[[FIRST_TUPLE_FIRST_ELEM]], %[[FIRST_TUPLE_SECOND_ELEM]]
+// CHECK-NEXT: %[[SECOND_TUPLE_FIRST_ELEM:.*]]  = "test.get_tuple_element"
+// CHECK-SAME: (%[[SECOND_TUPLE]]) {index = 0 : i32}
+// CHECK-NEXT: %[[SECOND_TUPLE_SECOND_ELEM:.*]] = "test.get_tuple_element"
+// CHECK-SAME: (%[[SECOND_TUPLE]]) {index = 1 : i32}
+// CHECK-NEXT: %[[FIRST_TUPLE_FIRST_ELEM:.*]] = "test.get_tuple_element"
+// CHECK-SAME: (%[[FIRST_TUPLE]]) {index = 0 : i32}
+// CHECK-NEXT: %[[FIRST_TUPLE_SECOND_ELEM:.*]] = "test.get_tuple_element"
+// CHECK-SAME: (%[[FIRST_TUPLE]]) {index = 1 : i32}
+// CHECK-NEXT: test.copy(%[[SECOND_TUPLE_SECOND_ELEM]], %[[RESULT0]])
+// CHECK-NEXT: test.copy(%[[ARG2]], %[[RESULT1]])
+// CHECK-NEXT: return %[[SECOND_TUPLE_FIRST_ELEM]], %[[FIRST_TUPLE_FIRST_ELEM]],
+// CHECK-SAME: %[[FIRST_TUPLE_SECOND_ELEM]]

diff  --git a/mlir/test/Transforms/promote-buffers-to-stack.mlir b/mlir/test/Transforms/promote-buffers-to-stack.mlir
index 2f195943b35a..33f7c1a1a302 100644
--- a/mlir/test/Transforms/promote-buffers-to-stack.mlir
+++ b/mlir/test/Transforms/promote-buffers-to-stack.mlir
@@ -21,23 +21,17 @@ func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: cond_br {{.*}}
 //      CHECK: ^bb2
 // CHECK-NEXT: %[[ALLOCA:.*]] = alloca()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: return
 
 // -----
@@ -64,16 +58,10 @@ func @condBranchDynamicType(
   br ^bb3(%arg1 : memref<?xf32>)
 ^bb2(%0: index):
   %1 = alloc(%0) : memref<?xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<?xf32>)
-   outs(%1: memref<?xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<?xf32>) out(%1: memref<?xf32>)
   br ^bb3(%1 : memref<?xf32>)
 ^bb3(%2: memref<?xf32>):
-  "linalg.copy"(%2, %arg2) : (memref<?xf32>, memref<?xf32>) -> ()
+  test.copy(%2, %arg2) : (memref<?xf32>, memref<?xf32>)
   return
 }
 
@@ -81,10 +69,10 @@ func @condBranchDynamicType(
 //      CHECK: ^bb2
 //      CHECK: ^bb2(%[[IDX:.*]]:{{.*}})
 // CHECK-NEXT: %[[ALLOC0:.*]] = alloc(%[[IDX]])
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: br ^bb3
 // CHECK-NEXT: ^bb3(%[[ALLOC0:.*]]:{{.*}})
-//      CHECK: linalg.copy(%[[ALLOC0]],
+//      CHECK: test.copy(%[[ALLOC0]],
 // CHECK-NEXT: return
 
 // -----
@@ -119,23 +107,17 @@ func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>)
 ^bb1:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   br ^bb2(%0 : memref<2xf32>)
 ^bb2(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: cond_br {{.*}}
 //      CHECK: ^bb1
 // CHECK-NEXT: %[[ALLOCA:.*]] = alloca()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: return
 
 // -----
@@ -153,24 +135,18 @@ func @criticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-LABEL: func @invCriticalEdge
 func @invCriticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0, ^bb1, ^bb2(%arg1 : memref<2xf32>)
 ^bb1:
   br ^bb2(%0 : memref<2xf32>)
 ^bb2(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOCA:.*]] = alloca()
 //      CHECK: cond_br
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: return
 
 // -----
@@ -188,13 +164,7 @@ func @invCriticalEdge(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-LABEL: func @ifElse
 func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -204,22 +174,16 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
   %7 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%5: memref<2xf32>)
-   outs(%7: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%7, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%5: memref<2xf32>) out(%7: memref<2xf32>)
+  test.copy(%7, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOCA0:.*]] = alloca()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOCA1:.*]] = alloca()
-//      CHECK: linalg.generic
-//      CHECK: linalg.copy(%[[ALLOCA1]]
+//      CHECK: test.buffer_based
+//      CHECK: test.copy(%[[ALLOCA1]]
 // CHECK-NEXT: return
 
 // -----
@@ -237,13 +201,7 @@ func @ifElse(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-LABEL: func @ifElseNoUsers
 func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -252,7 +210,7 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 ^bb2(%3: memref<2xf32>, %4: memref<2xf32>):
   br ^bb3(%3, %4 : memref<2xf32>, memref<2xf32>)
 ^bb3(%5: memref<2xf32>, %6: memref<2xf32>):
-  "linalg.copy"(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -278,13 +236,7 @@ func @ifElseNoUsers(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-LABEL: func @ifElseNested
 func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>)
   cond_br %arg0,
     ^bb1(%arg1, %0 : memref<2xf32>, memref<2xf32>),
     ^bb2(%0, %arg1 : memref<2xf32>, memref<2xf32>)
@@ -298,22 +250,16 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
   br ^bb5(%3, %6 : memref<2xf32>, memref<2xf32>)
 ^bb5(%7: memref<2xf32>, %8: memref<2xf32>):
   %9 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%7: memref<2xf32>)
-   outs(%9: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
-  "linalg.copy"(%9, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.buffer_based in(%7: memref<2xf32>) out(%9: memref<2xf32>)
+  test.copy(%9, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOCA0:.*]] = alloca()
-// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: test.buffer_based
 //      CHECK: %[[ALLOCA1:.*]] = alloca()
-//      CHECK: linalg.generic
-//      CHECK: linalg.copy(%[[ALLOCA1]]
+//      CHECK: test.buffer_based
+//      CHECK: test.copy(%[[ALLOCA1]]
 // CHECK-NEXT: return
 
 // -----
@@ -327,29 +273,17 @@ func @ifElseNested(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
 // CHECK-LABEL: func @redundantOperations
 func @redundantOperations(%arg0: memref<2xf32>) {
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   %1 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%0: memref<2xf32>)
-   outs(%1: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
+  test.buffer_based in(%0: memref<2xf32>) out(%1: memref<2xf32>)
   return
 }
 
 //      CHECK: (%[[ARG0:.*]]: {{.*}})
 // CHECK-NEXT: %[[ALLOCA0:.*]] = alloca()
-// CHECK-NEXT: linalg.generic {{{.*}}} ins(%[[ARG0]]{{.*}} outs(%[[ALLOCA0]]
+// CHECK-NEXT: test.buffer_based in(%[[ARG0]]{{.*}} out(%[[ALLOCA0]]
 //      CHECK: %[[ALLOCA1:.*]] = alloca()
-// CHECK-NEXT: linalg.generic {{{.*}}} ins(%[[ALLOCA0]]{{.*}} outs(%[[ALLOCA1]]
+// CHECK-NEXT: test.buffer_based in(%[[ALLOCA0]]{{.*}} out(%[[ALLOCA1]]
 //      CHECK: return
 
 // -----
@@ -373,26 +307,14 @@ func @moving_alloc_and_inserting_missing_dealloc(
   cond_br %cond, ^bb1, ^bb2
 ^bb1:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
-  ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
-    %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%0: memref<2xf32>)
   br ^exit(%0 : memref<2xf32>)
 ^bb2:
   %1 = alloc() : memref<2xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg0: memref<2xf32>)
-   outs(%1: memref<2xf32>) {
-  ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-    %tmp2 = exp %gen2_arg0 : f32
-    linalg.yield %tmp2 : f32
-  }
+  test.buffer_based in(%arg0: memref<2xf32>) out(%1: memref<2xf32>)
   br ^exit(%1 : memref<2xf32>)
 ^exit(%arg2: memref<2xf32>):
-  "linalg.copy"(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%arg2, %arg1) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -401,13 +323,13 @@ func @moving_alloc_and_inserting_missing_dealloc(
 // CHECK-NEXT: %{{.*}} = alloca()
 //      CHECK: ^bb2
 // CHECK-NEXT: %{{.*}} = alloca()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 // CHECK-NEXT: return
 
 // -----
 
-// Test Case: Nested regions - This test defines a GenericOp inside the region
-// of another GenericOp.
+// Test Case: Nested regions - This test defines a BufferBasedOp inside the
+// region of a RegionBufferBasedOp.
 // PromoteBuffersToStack expected behavior: The AllocOps are converted into
 // allocas.
 
@@ -423,28 +345,16 @@ func @nested_regions_and_cond_branch(
   br ^bb3(%arg1 : memref<2xf32>)
 ^bb2:
   %0 = alloc() : memref<2xf32>
-  linalg.generic {
-    indexing_maps = [#map0, #map0],
-    iterator_types = ["parallel"]}
-    ins(%arg1: memref<2xf32>)
-   outs(%0: memref<2xf32>) {
+  test.region_buffer_based in(%arg1: memref<2xf32>) out(%0: memref<2xf32>) {
   ^bb0(%gen1_arg0: f32, %gen1_arg1: f32):
     %1 = alloc() : memref<2xf32>
-    linalg.generic {
-      indexing_maps = [#map0, #map0],
-      iterator_types = ["parallel"]}
-      ins(%arg1: memref<2xf32>)
-    outs(%1: memref<2xf32>) {
-    ^bb0(%gen2_arg0: f32, %gen2_arg1: f32):
-      %tmp2 = exp %gen2_arg0 : f32
-      linalg.yield %tmp2 : f32
-    }
+    test.buffer_based in(%arg1: memref<2xf32>) out(%1: memref<2xf32>)
     %tmp1 = exp %gen1_arg0 : f32
-    linalg.yield %tmp1 : f32
+    test.region_yield %tmp1 : f32
   }
   br ^bb3(%0 : memref<2xf32>)
 ^bb3(%1: memref<2xf32>):
-  "linalg.copy"(%1, %arg2) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %arg2) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -470,21 +380,15 @@ func @memref_in_function_results(
   %arg2: memref<5xf32>) -> (memref<10xf32>, memref<15xf32>) {
   %x = alloc() : memref<15xf32>
   %y = alloc() : memref<5xf32>
-  linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
-    ins(%arg0: memref<5xf32>)
-   outs(%y: memref<5xf32>) {
-  ^bb0(%arg3: f32, %arg4: f32):
-    %2 = exp %arg3 : f32
-    linalg.yield %2 : f32
-  }
-  linalg.copy(%y, %arg2) : memref<5xf32>, memref<5xf32>
+  test.buffer_based in(%arg0: memref<5xf32>) out(%y: memref<5xf32>)
+  test.copy(%y, %arg2) : (memref<5xf32>, memref<5xf32>)
   return %arg1, %x : memref<10xf32>, memref<15xf32>
 }
 //      CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>,
 // CHECK-SAME: %[[RESULT:.*]]: memref<5xf32>)
 //      CHECK: %[[ALLOC:.*]] = alloc()
 //      CHECK: %[[ALLOCA:.*]] = alloca()
-//      CHECK: linalg.copy
+//      CHECK: test.copy
 //      CHECK: return %[[ARG1]], %[[ALLOC]]
 
 // -----
@@ -566,7 +470,7 @@ func @loop_alloc(
     %3 = alloc() : memref<2xf32>
     scf.yield %3 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -600,7 +504,7 @@ func @loop_nested_if_no_alloc(
     }
     scf.yield %3 : memref<2xf32>
   }
-  "linalg.copy"(%1, %res) : (memref<2xf32>, memref<2xf32>) -> ()
+  test.copy(%1, %res) : (memref<2xf32>, memref<2xf32>)
   return
 }
 
@@ -610,7 +514,7 @@ func @loop_nested_if_no_alloc(
 //      CHECK: scf.yield %[[ALLOCA0]]
 //      CHECK: scf.yield %[[IALLOCA]]
 //      CHECK: scf.yield %[[ALLOCA2]]
-//      CHECK: linalg.copy(%[[ALLOCA1]], %arg4)
+//      CHECK: test.copy(%[[ALLOCA1]], %arg4)
 
 // -----
 
@@ -656,9 +560,9 @@ func @loop_nested_if_alloc(
 // CHECK-LABEL: func @large_buffer_allocation
 func @large_buffer_allocation(%arg0: memref<2048xf32>) {
   %0 = alloc() : memref<2048xf32>
-  "linalg.copy"(%0, %arg0) : (memref<2048xf32>, memref<2048xf32>) -> ()
+  test.copy(%0, %arg0) : (memref<2048xf32>, memref<2048xf32>)
   return
 }
 
 // CHECK-NEXT: %[[ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.copy
+// CHECK-NEXT: test.copy

diff  --git a/mlir/test/lib/Dialect/Test/TestDialect.h b/mlir/test/lib/Dialect/Test/TestDialect.h
index e44b5610c33b..7eb189b515ca 100644
--- a/mlir/test/lib/Dialect/Test/TestDialect.h
+++ b/mlir/test/lib/Dialect/Test/TestDialect.h
@@ -24,6 +24,7 @@
 #include "mlir/IR/SymbolTable.h"
 #include "mlir/Interfaces/CallInterfaces.h"
 #include "mlir/Interfaces/ControlFlowInterfaces.h"
+#include "mlir/Interfaces/CopyOpInterface.h"
 #include "mlir/Interfaces/DerivedAttributeOpInterface.h"
 #include "mlir/Interfaces/InferTypeOpInterface.h"
 #include "mlir/Interfaces/SideEffectInterfaces.h"

diff  --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 144f418c730f..41a21b5c376f 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -13,9 +13,9 @@ include "mlir/IR/OpBase.td"
 include "mlir/IR/OpAsmInterface.td"
 include "mlir/IR/RegionKindInterface.td"
 include "mlir/IR/SymbolInterfaces.td"
-include "mlir/Interfaces/SideEffectInterfaces.td"
 include "mlir/Interfaces/CallInterfaces.td"
 include "mlir/Interfaces/ControlFlowInterfaces.td"
+include "mlir/Interfaces/CopyOpInterface.td"
 include "mlir/Interfaces/InferTypeOpInterface.td"
 include "mlir/Interfaces/SideEffectInterfaces.td"
 
@@ -1693,6 +1693,81 @@ def SideEffectOp : TEST_Op<"side_effect_op",
   let results = (outs AnyType:$result);
 }
 
+//===----------------------------------------------------------------------===//
+// Test CopyOpInterface
+//===----------------------------------------------------------------------===//
+
+def CopyOp : TEST_Op<"copy", [CopyOpInterface]> {
+  let description = [{
+    Represents a copy operation.
+  }];
+  let arguments = (ins Res<AnyMemRef, "", [MemRead]>:$source,
+                   Res<AnyMemRef, "", [MemWrite]>:$target);
+  let assemblyFormat = [{
+    `(` $source `,` $target `)` `:` `(` type($source) `,` type($target) `)`
+     attr-dict
+  }];
+  let extraClassDeclaration = [{
+    Value getSource() { return source(); }
+    Value getTarget() { return target(); }
+  }];
+}
+
+//===----------------------------------------------------------------------===//
+// Test Buffer/Tensor
+//===----------------------------------------------------------------------===//
+
+def RegionYieldOp : TEST_Op<"region_yield",
+      [NoSideEffect, ReturnLike, Terminator]> {
+  let description = [{
+    This operation is used in a region and yields the corresponding type for
+    that operation.
+  }];
+  let arguments = (ins AnyType:$result);
+  let assemblyFormat = [{
+    $result `:` type($result) attr-dict
+  }];
+  let builders = [OpBuilderDAG<(ins),
+    [{ build($_builder, $_state, {}); }]>
+  ];
+}
+
+class BufferBasedOpBase<string mnemonic, list<OpTrait> traits>
+    : TEST_Op<mnemonic, traits> {
+  let description = [{
+    A buffer based operation, that uses memRefs as input and output.
+  }];
+  let arguments = (ins AnyMemRef:$input, AnyMemRef:$output);
+}
+
+def BufferBasedOp : BufferBasedOpBase<"buffer_based", []>{
+  let assemblyFormat = [{
+    `in` `(` $input`:` type($input) `)` `out` `(` $output`:` type($output) `)`
+    attr-dict
+  }];
+}
+
+def RegionBufferBasedOp : BufferBasedOpBase<"region_buffer_based",
+      [SingleBlockImplicitTerminator<"RegionYieldOp">]> {
+  let regions = (region AnyRegion:$region);
+  let assemblyFormat = [{
+    `in` `(` $input`:` type($input) `)` `out` `(` $output`:` type($output) `)`
+    $region attr-dict
+  }];
+}
+
+def TensorBasedOp : TEST_Op<"tensor_based", []> {
+  let description = [{
+    A tensor based operation, that uses a tensor as an input and results in a
+    tensor again.
+  }];
+  let arguments = (ins AnyRankedTensor:$input);
+  let results = (outs AnyRankedTensor:$result);
+  let assemblyFormat = [{
+    `in` `(` $input`:` type($input) `)` `->` type($result) attr-dict
+  }];
+}
+
 //===----------------------------------------------------------------------===//
 // Test RegionBranchOpInterface
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/test/lib/Transforms/TestFinalizingBufferize.cpp b/mlir/test/lib/Transforms/TestFinalizingBufferize.cpp
index 9d9c0a43f0ae..b7d48c66cae6 100644
--- a/mlir/test/lib/Transforms/TestFinalizingBufferize.cpp
+++ b/mlir/test/lib/Transforms/TestFinalizingBufferize.cpp
@@ -13,7 +13,7 @@
 
 #include "TestDialect.h"
 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/IR/BlockAndValueMapping.h"
 #include "mlir/IR/Function.h"
 #include "mlir/IR/Operation.h"
 #include "mlir/Pass/Pass.h"
@@ -47,9 +47,47 @@ struct TestFinalizingBufferizePass
     : mlir::PassWrapper<TestFinalizingBufferizePass<allowMemrefFunctionResults>,
                         OperationPass<ModuleOp>> {
 
+  /// Converts tensor based test operations to buffer based ones using
+  /// bufferize.
+  class TensorBasedOpConverter
+      : public BufferizeOpConversionPattern<mlir::TensorBasedOp> {
+  public:
+    using BufferizeOpConversionPattern<
+        mlir::TensorBasedOp>::BufferizeOpConversionPattern;
+
+    LogicalResult
+    matchAndRewrite(mlir::TensorBasedOp op, ArrayRef<Value> operands,
+                    ConversionPatternRewriter &rewriter) const final {
+      mlir::TensorBasedOpAdaptor adaptor(
+          operands, op.getOperation()->getAttrDictionary());
+
+      // The input needs to be turned into a buffer first. Until then, bail out.
+      if (!adaptor.input().getType().isa<MemRefType>())
+        return failure();
+
+      Location loc = op.getLoc();
+
+      // Update the result type to a memref type.
+      auto type = op.getResult().getType().cast<ShapedType>();
+      if (!type.hasStaticShape())
+        return rewriter.notifyMatchFailure(
+            op, "dynamic shapes not currently supported");
+      auto memrefType = MemRefType::get(type.getShape(), type.getElementType());
+      Value newOutputBuffer = rewriter.create<AllocOp>(loc, memrefType);
+
+      // Generate a new test operation that works on buffers.
+      rewriter.create<mlir::BufferBasedOp>(loc,
+                                           /*input=*/adaptor.input(),
+                                           /*output=*/newOutputBuffer);
+
+      // Replace the results of the old op with the new output buffers.
+      rewriter.replaceOp(op, newOutputBuffer);
+      return success();
+    }
+  };
+
   void getDependentDialects(DialectRegistry &registry) const override {
     registry.insert<TestDialect>();
-    registry.insert<linalg::LinalgDialect>();
   }
 
   void runOnOperation() override {
@@ -59,12 +97,17 @@ struct TestFinalizingBufferizePass
 
     // Mark all Standard operations legal.
     target.addLegalDialect<StandardOpsDialect>();
-    target.addLegalOp<linalg::CopyOp>();
     target.addLegalOp<MakeTupleOp>();
     target.addLegalOp<GetTupleElementOp>();
     target.addLegalOp<ModuleOp>();
     target.addLegalOp<ModuleTerminatorOp>();
 
+    // Mark all Test operations illegal as long as they work on tensors.
+    auto isLegalOperation = [&](Operation *op) {
+      return converter.isLegal(op);
+    };
+    target.addDynamicallyLegalDialect<TestDialect>(isLegalOperation);
+
     // Mark Standard Return operations illegal as long as one operand is tensor.
     target.addDynamicallyLegalOp<mlir::ReturnOp>([&](mlir::ReturnOp returnOp) {
       return converter.isLegal(returnOp.getOperandTypes());
@@ -118,8 +161,10 @@ struct TestFinalizingBufferizePass
 
     OwningRewritePatternList patterns;
     populateWithBufferizeOpConversionPatterns<mlir::ReturnOp, mlir::ReturnOp,
-                                              linalg::CopyOp>(
+                                              mlir::CopyOp>(
         &context, converter, patterns);
+    patterns.insert<TensorBasedOpConverter>(&context, converter);
+
     if (failed(applyFullConversion(this->getOperation(), target,
                                    std::move(patterns))))
       this->signalPassFailure();


        


More information about the Mlir-commits mailing list