[Mlir-commits] [mlir] [MLIR] NFC. Move leftover memref op test cases out of test/IR (PR #115583)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Nov 8 21:25:08 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir

Author: Uday Bondhugula (bondhugula)

<details>
<summary>Changes</summary>

Move memref dialect ops' test cases of test/IR/. It was also surprising
to not find test cases of ops like memref.view in test/Dialect/MemRef/.
NFC.


---
Full diff: https://github.com/llvm/llvm-project/pull/115583.diff


3 Files Affected:

- (modified) mlir/test/Dialect/MemRef/ops.mlir (+198) 
- (modified) mlir/test/IR/core-ops.mlir (-81) 
- (removed) mlir/test/IR/memory-ops.mlir (-118) 


``````````diff
diff --git a/mlir/test/Dialect/MemRef/ops.mlir b/mlir/test/Dialect/MemRef/ops.mlir
index f616f6795bf9dc..7038a6ff744e4e 100644
--- a/mlir/test/Dialect/MemRef/ops.mlir
+++ b/mlir/test/Dialect/MemRef/ops.mlir
@@ -1,6 +1,123 @@
 // RUN: mlir-opt %s | mlir-opt | FileCheck %s
 // RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
 
+// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>
+
+// CHECK-LABEL: func @alloc() {
+func.func @alloc() {
+^bb0:
+  // Test simple alloc.
+  // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
+  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  %c0 = "arith.constant"() {value = 0: index} : () -> index
+  %c1 = "arith.constant"() {value = 1: index} : () -> index
+
+  // Test alloc with dynamic dimensions.
+  // CHECK: %{{.*}} = memref.alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
+  %1 = memref.alloc(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  // Test alloc with no dynamic dimensions and one symbol.
+  // CHECK: %{{.*}} = memref.alloc()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1>
+  %2 = memref.alloc()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
+
+  // Test alloc with dynamic dimensions and one symbol.
+  // CHECK: %{{.*}} = memref.alloc(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1>
+  %3 = memref.alloc(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
+
+  // Alloc with no mappings.
+  // b/116054838 Parser crash while parsing ill-formed AllocOp
+  // CHECK: %{{.*}} = memref.alloc() : memref<2xi32>
+  %4 = memref.alloc() : memref<2 x i32>
+
+  // CHECK:   return
+  return
+}
+
+// CHECK-LABEL: func @alloca() {
+func.func @alloca() {
+^bb0:
+  // Test simple alloc.
+  // CHECK: %{{.*}} = memref.alloca() : memref<1024x64xf32, 1>
+  %0 = memref.alloca() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  %c0 = "arith.constant"() {value = 0: index} : () -> index
+  %c1 = "arith.constant"() {value = 1: index} : () -> index
+
+  // Test alloca with dynamic dimensions.
+  // CHECK: %{{.*}} = memref.alloca(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
+  %1 = memref.alloca(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  // Test alloca with no dynamic dimensions and one symbol.
+  // CHECK: %{{.*}} = memref.alloca()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1>
+  %2 = memref.alloca()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
+
+  // Test alloca with dynamic dimensions and one symbol.
+  // CHECK: %{{.*}} = memref.alloca(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1>
+  %3 = memref.alloca(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
+
+  // Alloca with no mappings, but with alignment.
+  // CHECK: %{{.*}} = memref.alloca() {alignment = 64 : i64} : memref<2xi32>
+  %4 = memref.alloca() {alignment = 64} : memref<2 x i32>
+
+  return
+}
+
+// CHECK-LABEL: func @dealloc() {
+func.func @dealloc() {
+^bb0:
+  // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32>
+  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
+
+  // CHECK: memref.dealloc %{{.*}} : memref<1024x64xf32>
+  memref.dealloc %0 : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
+  return
+}
+
+// CHECK-LABEL: func @load_store
+func.func @load_store() {
+^bb0:
+  // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
+  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  %1 = arith.constant 0 : index
+  %2 = arith.constant 1 : index
+
+  // CHECK: %{{.*}} = memref.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
+  %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  // CHECK: memref.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
+  memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+
+  return
+}
+
+// CHECK-LABEL: func @dma_ops()
+func.func @dma_ops() {
+  %c0 = arith.constant 0 : index
+  %stride = arith.constant 32 : index
+  %elt_per_stride = arith.constant 16 : index
+
+  %A = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
+  %Ah = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 1>
+  %tag = memref.alloc() : memref<1 x f32>
+
+  %num_elements = arith.constant 256 : index
+
+  memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0] : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
+  memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
+  // CHECK: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}] : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
+  // CHECK-NEXT:  dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
+
+  // DMA with strides
+  memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0], %stride, %elt_per_stride : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
+  memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
+  // CHECK-NEXT:  dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}} : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
+  // CHECK-NEXT:  dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
+
+  return
+}
+
 // CHECK-LABEL: func @memref_reinterpret_cast
 func.func @memref_reinterpret_cast(%in: memref<?xf32>)
     -> memref<10x?xf32, strided<[?, 1], offset: ?>> {
@@ -90,6 +207,87 @@ func.func @memref_alloca_scope() {
   return
 }
 
+// CHECK-LABEL: func @memref_cast(%arg0
+func.func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>) {
+  // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
+  %0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
+
+  // CHECK: memref.cast %{{.*}} : memref<?xf32> to memref<4xf32>
+  %1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32>
+
+  // CHECK: memref.cast %{{.*}} : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
+  %2 = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
+
+  // CHECK: memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>>
+  %3 = memref.cast %2 : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>
+
+  // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<*xf32>
+  %4 = memref.cast %1 : memref<4xf32> to memref<*xf32>
+
+  // CHECK: memref.cast %{{.*}} : memref<*xf32> to memref<4xf32>
+  %5 = memref.cast %4 : memref<*xf32> to memref<4xf32>
+  return
+}
+
+// Check that unranked memrefs with non-default memory space roundtrip
+// properly.
+// CHECK-LABEL: @unranked_memref_roundtrip(memref<*xf32, 4>)
+func.func private @unranked_memref_roundtrip(memref<*xf32, 4>)
+
+// CHECK-LABEL: func @load_store_prefetch
+func.func @load_store_prefetch(memref<4x4xi32>, index) {
+^bb0(%0: memref<4x4xi32>, %1: index):
+  // CHECK: %0 = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
+  %2 = "memref.load"(%0, %1, %1) : (memref<4x4xi32>, index, index)->i32
+
+  // CHECK: %{{.*}} = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
+  %3 = memref.load %0[%1, %1] : memref<4x4xi32>
+
+  // CHECK: memref.prefetch %arg0[%arg1, %arg1], write, locality<1>, data : memref<4x4xi32>
+  memref.prefetch %0[%1, %1], write, locality<1>, data : memref<4x4xi32>
+
+  // CHECK: memref.prefetch %arg0[%arg1, %arg1], read, locality<3>, instr : memref<4x4xi32>
+  memref.prefetch %0[%1, %1], read, locality<3>, instr : memref<4x4xi32>
+
+  return
+}
+
+// Test with zero-dimensional operands using no index in load/store.
+// CHECK-LABEL: func @zero_dim_no_idx
+func.func @zero_dim_no_idx(%arg0 : memref<i32>, %arg1 : memref<i32>, %arg2 : memref<i32>) {
+  %0 = memref.load %arg0[] : memref<i32>
+  memref.store %0, %arg1[] : memref<i32>
+  return
+  // CHECK: %0 = memref.load %{{.*}}[] : memref<i32>
+  // CHECK: memref.store %{{.*}}, %{{.*}}[] : memref<i32>
+}
+
+// CHECK-LABEL: func @memref_view(%arg0
+func.func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<2048xi8>
+  // Test two dynamic sizes and dynamic offset.
+  // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<?x?xf32>
+  %1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
+
+  // Test one dynamic size and dynamic offset.
+  // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<4x?xf32>
+  %3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
+
+  // Test static sizes and static offset.
+  // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<64x4xf32>
+  %c0 = arith.constant 0: index
+  %5 = memref.view %0[%c0][] : memref<2048xi8> to memref<64x4xf32>
+  return
+}
+
+// CHECK-LABEL: func @assume_alignment
+// CHECK-SAME: %[[MEMREF:.*]]: memref<4x4xf16>
+func.func @assume_alignment(%0: memref<4x4xf16>) {
+  // CHECK: memref.assume_alignment %[[MEMREF]], 16 : memref<4x4xf16>
+  memref.assume_alignment %0, 16 : memref<4x4xf16>
+  return
+}
+
 // CHECK-LABEL: func @expand_collapse_shape_static
 func.func @expand_collapse_shape_static(
     %arg0: memref<3x4x5xf32>,
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 9858bcd1c05e7b..5d1583111541ca 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -171,34 +171,6 @@ func.func @affine_apply() {
   return
 }
 
-// CHECK-LABEL: func @load_store_prefetch
-func.func @load_store_prefetch(memref<4x4xi32>, index) {
-^bb0(%0: memref<4x4xi32>, %1: index):
-  // CHECK: %0 = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
-  %2 = "memref.load"(%0, %1, %1) : (memref<4x4xi32>, index, index)->i32
-
-  // CHECK: %{{.*}} = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
-  %3 = memref.load %0[%1, %1] : memref<4x4xi32>
-
-  // CHECK: memref.prefetch %arg0[%arg1, %arg1], write, locality<1>, data : memref<4x4xi32>
-  memref.prefetch %0[%1, %1], write, locality<1>, data : memref<4x4xi32>
-
-  // CHECK: memref.prefetch %arg0[%arg1, %arg1], read, locality<3>, instr : memref<4x4xi32>
-  memref.prefetch %0[%1, %1], read, locality<3>, instr : memref<4x4xi32>
-
-  return
-}
-
-// Test with zero-dimensional operands using no index in load/store.
-// CHECK-LABEL: func @zero_dim_no_idx
-func.func @zero_dim_no_idx(%arg0 : memref<i32>, %arg1 : memref<i32>, %arg2 : memref<i32>) {
-  %0 = memref.load %arg0[] : memref<i32>
-  memref.store %0, %arg1[] : memref<i32>
-  return
-  // CHECK: %0 = memref.load %{{.*}}[] : memref<i32>
-  // CHECK: memref.store %{{.*}}, %{{.*}}[] : memref<i32>
-}
-
 // CHECK-LABEL: func @return_op(%arg0: i32) -> i32 {
 func.func @return_op(%a : i32) -> i32 {
   // CHECK: return %arg0 : i32
@@ -232,51 +204,6 @@ func.func @calls(%arg0: i32) {
   return
 }
 
-// CHECK-LABEL: func @memref_cast(%arg0
-func.func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>) {
-  // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
-  %0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
-
-  // CHECK: memref.cast %{{.*}} : memref<?xf32> to memref<4xf32>
-  %1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32>
-
-  // CHECK: memref.cast %{{.*}} : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
-  %2 = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
-
-  // CHECK: memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>>
-  %3 = memref.cast %2 : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>
-
-  // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<*xf32>
-  %4 = memref.cast %1 : memref<4xf32> to memref<*xf32>
-
-  // CHECK: memref.cast %{{.*}} : memref<*xf32> to memref<4xf32>
-  %5 = memref.cast %4 : memref<*xf32> to memref<4xf32>
-  return
-}
-
-// Check that unranked memrefs with non-default memory space roundtrip
-// properly.
-// CHECK-LABEL: @unranked_memref_roundtrip(memref<*xf32, 4>)
-func.func private @unranked_memref_roundtrip(memref<*xf32, 4>)
-
-// CHECK-LABEL: func @memref_view(%arg0
-func.func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<2048xi8>
-  // Test two dynamic sizes and dynamic offset.
-  // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<?x?xf32>
-  %1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
-
-  // Test one dynamic size and dynamic offset.
-  // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<4x?xf32>
-  %3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
-
-  // Test static sizes and static offset.
-  // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<64x4xf32>
-  %c0 = arith.constant 0: index
-  %5 = memref.view %0[%c0][] : memref<2048xi8> to memref<64x4xf32>
-  return
-}
-
 // CHECK-LABEL: func @test_dimop
 // CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32>
 func.func @test_dimop(%arg0: tensor<4x4x?xf32>) {
@@ -288,11 +215,3 @@ func.func @test_dimop(%arg0: tensor<4x4x?xf32>) {
   %1 = affine.apply affine_map<(d0) -> (d0)>(%0)
   return
 }
-
-// CHECK-LABEL: func @assume_alignment
-// CHECK-SAME: %[[MEMREF:.*]]: memref<4x4xf16>
-func.func @assume_alignment(%0: memref<4x4xf16>) {
-  // CHECK: memref.assume_alignment %[[MEMREF]], 16 : memref<4x4xf16>
-  memref.assume_alignment %0, 16 : memref<4x4xf16>
-  return
-}
diff --git a/mlir/test/IR/memory-ops.mlir b/mlir/test/IR/memory-ops.mlir
deleted file mode 100644
index c1cfc3bfa0dbf7..00000000000000
--- a/mlir/test/IR/memory-ops.mlir
+++ /dev/null
@@ -1,118 +0,0 @@
-// RUN: mlir-opt %s | FileCheck %s
-
-// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>
-
-// CHECK-LABEL: func @alloc() {
-func.func @alloc() {
-^bb0:
-  // Test simple alloc.
-  // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
-  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  %c0 = "arith.constant"() {value = 0: index} : () -> index
-  %c1 = "arith.constant"() {value = 1: index} : () -> index
-
-  // Test alloc with dynamic dimensions.
-  // CHECK: %{{.*}} = memref.alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
-  %1 = memref.alloc(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  // Test alloc with no dynamic dimensions and one symbol.
-  // CHECK: %{{.*}} = memref.alloc()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1>
-  %2 = memref.alloc()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
-
-  // Test alloc with dynamic dimensions and one symbol.
-  // CHECK: %{{.*}} = memref.alloc(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1>
-  %3 = memref.alloc(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
-
-  // Alloc with no mappings.
-  // b/116054838 Parser crash while parsing ill-formed AllocOp
-  // CHECK: %{{.*}} = memref.alloc() : memref<2xi32>
-  %4 = memref.alloc() : memref<2 x i32>
-
-  // CHECK:   return
-  return
-}
-
-// CHECK-LABEL: func @alloca() {
-func.func @alloca() {
-^bb0:
-  // Test simple alloc.
-  // CHECK: %{{.*}} = memref.alloca() : memref<1024x64xf32, 1>
-  %0 = memref.alloca() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  %c0 = "arith.constant"() {value = 0: index} : () -> index
-  %c1 = "arith.constant"() {value = 1: index} : () -> index
-
-  // Test alloca with dynamic dimensions.
-  // CHECK: %{{.*}} = memref.alloca(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
-  %1 = memref.alloca(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  // Test alloca with no dynamic dimensions and one symbol.
-  // CHECK: %{{.*}} = memref.alloca()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1>
-  %2 = memref.alloca()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
-
-  // Test alloca with dynamic dimensions and one symbol.
-  // CHECK: %{{.*}} = memref.alloca(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1>
-  %3 = memref.alloca(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
-
-  // Alloca with no mappings, but with alignment.
-  // CHECK: %{{.*}} = memref.alloca() {alignment = 64 : i64} : memref<2xi32>
-  %4 = memref.alloca() {alignment = 64} : memref<2 x i32>
-
-  return
-}
-
-// CHECK-LABEL: func @dealloc() {
-func.func @dealloc() {
-^bb0:
-  // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32>
-  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
-
-  // CHECK: memref.dealloc %{{.*}} : memref<1024x64xf32>
-  memref.dealloc %0 : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
-  return
-}
-
-// CHECK-LABEL: func @load_store
-func.func @load_store() {
-^bb0:
-  // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
-  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  %1 = arith.constant 0 : index
-  %2 = arith.constant 1 : index
-
-  // CHECK: %{{.*}} = memref.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
-  %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  // CHECK: memref.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
-  memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-
-  return
-}
-
-// CHECK-LABEL: func @dma_ops()
-func.func @dma_ops() {
-  %c0 = arith.constant 0 : index
-  %stride = arith.constant 32 : index
-  %elt_per_stride = arith.constant 16 : index
-
-  %A = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
-  %Ah = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 1>
-  %tag = memref.alloc() : memref<1 x f32>
-
-  %num_elements = arith.constant 256 : index
-
-  memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0] : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
-  memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
-  // CHECK: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}] : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
-  // CHECK-NEXT:  dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
-
-  // DMA with strides
-  memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0], %stride, %elt_per_stride : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
-  memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
-  // CHECK-NEXT:  dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}} : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
-  // CHECK-NEXT:  dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
-
-  return
-}

``````````

</details>


https://github.com/llvm/llvm-project/pull/115583


More information about the Mlir-commits mailing list