[Mlir-commits] [mlir] [mlir][bufferization] Fix OneShotBufferize when `defaultMemorySpaceFn` is used (PR #91524)

Christopher Bate llvmlistbot at llvm.org
Mon May 20 09:24:56 PDT 2024


================
@@ -0,0 +1,133 @@
+// RUN: mlir-opt %s -one-shot-bufferize="use-encoding-for-memory-space" -split-input-file | FileCheck %s
+
+// TODO: move to tensor dialect tests
+func.func @from_elements(%fill: f32, %f: f32, %idx: index) -> tensor<3xf32, 1> {
+  %t = tensor.from_elements %fill, %fill, %fill : tensor<3xf32, 1>
+  %i = tensor.insert %f into %t[%idx] : tensor<3xf32, 1>
+  return %i : tensor<3xf32, 1>
+}
+
+// CHECK-LABEL: @from_elements
+//  CHECK-SAME: (%[[arg0:.+]]: f32, %[[arg1:.+]]: f32, %[[arg2:.+]]: index) -> tensor<3xf32, 1 : i64>
+//       CHECK:     %[[alloc:.+]] = memref.alloc() {{.*}} : memref<3xf32, 1>
+//       CHECK-DAG:     %[[c0:.+]] = arith.constant 0 : index
+//       CHECK-DAG:     %[[c1:.+]] = arith.constant 1 : index
+//       CHECK-DAG:     %[[c2:.+]] = arith.constant 2 : index
+//       CHECK:     memref.store %[[arg0]], %[[alloc]][%[[c0]]] : memref<3xf32, 1>
+//       CHECK:     memref.store %[[arg0]], %[[alloc]][%[[c1]]] : memref<3xf32, 1>
+//       CHECK:     memref.store %[[arg0]], %[[alloc]][%[[c2]]] : memref<3xf32, 1>
+//       CHECK:     memref.store %[[arg1]], %[[alloc]][%[[arg2]]] : memref<3xf32, 1>
+//       CHECK:     %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<3xf32, 1> -> tensor<3xf32, 1 : i64>
+//       CHECK:     return %[[v0]] : tensor<3xf32, 1 : i64>
+
+// -----
+
+func.func @alloc_tesor_with_space_no_encoding() -> tensor<128xf32> {
+  %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32>
+  return %0 : tensor<128xf32>
+}
+
+// CHECK-LABEL: @alloc_tesor_with_space_no_encoding
+//  CHECK-SAME: () -> tensor<128xf32> {
+//       CHECK:     %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
+//       CHECK:     %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32>
+//       CHECK:     return %[[v0]] : tensor<128xf32>
+
+// -----
+
+func.func @alloc_tesor_with_space_and_cast() -> tensor<128xf32, 1> {
+  %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32>
+  %1 = tensor.cast %0 : tensor<128xf32> to tensor<128xf32, 1>
+  return %1 : tensor<128xf32, 1>
+}
+
+// CHECK-LABEL: @alloc_tesor_with_space_and_cast
+//  CHECK-SAME: () -> tensor<128xf32, 1 : i64> {
+//       CHECK:     %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
+//       CHECK:     %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32, 1 : i64>
+//       CHECK:     return %[[v0]] : tensor<128xf32, 1 : i64>
+
+// -----
+
+func.func @alloc_tesor_with_space_with_encoding() -> tensor<128xf32, 1 : i64> {
+  %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32, 1 : i64>
+  return %0 : tensor<128xf32, 1 : i64>
+}
+
+// CHECK-LABEL: @alloc_tesor_with_space_with_encoding
+//  CHECK-SAME: () -> tensor<128xf32, 1 : i64> {
+//       CHECK:     %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
+//       CHECK:     %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32, 1 : i64>
+//       CHECK:     return %[[v0]] : tensor<128xf32, 1 : i64>
+
+// -----
+
+func.func @alloc_tesor_copy_from_default_space(%arg0: tensor<128xf32>) -> tensor<128xf32> {
+  %0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 1 : i64} : tensor<128xf32>
+  return %0 : tensor<128xf32>
+}
+
+// CHECK-LABEL: @alloc_tesor_copy_from_default_space
+//  CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32>) -> tensor<128xf32> {
+//       CHECK:     %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32> -> memref<128xf32, strided<[?], offset: ?>>
+//       CHECK:     %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
+//       CHECK:     memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>> to memref<128xf32, 1>
+//       CHECK:     %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32>
+//       CHECK:     return %[[v1]] : tensor<128xf32>
+
+// -----
+
+func.func @alloc_tesor_copy_from_non_default_space(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 2> {
+  %0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 2 : i64} : tensor<128xf32, 1>
----------------
christopherbate wrote:

Since having a `copy` generates a `memref.copy`, this meaning is to copy data between memory spaces, however `bufferization.alloc_tensor` only prints one type for both `copy` operand and the result. So we will need to update ASM format for `bufferization.alloc_tensor` as well. That will remove the need for the `tensor.cast`.

https://github.com/llvm/llvm-project/pull/91524


More information about the Mlir-commits mailing list