[Mlir-commits] [mlir] [mlir][affine] Skip point-wise copy for scalar memref (PR #129183)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Feb 27 21:08:25 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-affine
Author: Kai Sasaki (Lewuathe)
<details>
<summary>Changes</summary>
Point wise copy does not consider scalar typed memref and it does not generate any affine.for on top. It causes segmentation fault. We can skip the point-wise copy in case of scalar memref.
Fixes https://github.com/llvm/llvm-project/issues/61167
---
Full diff: https://github.com/llvm/llvm-project/pull/129183.diff
2 Files Affected:
- (modified) mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp (+3)
- (modified) mlir/test/Dialect/Affine/affine-data-copy.mlir (+49)
``````````diff
diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index 6833d6583c27a..4b9c872bff1ec 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -2131,6 +2131,9 @@ static LogicalResult generateCopy(
if (!copyOptions.generateDma) {
// Point-wise copy generation.
+ // Unable to copy generation for scala memref due to lack of access point.
+ if (rank == 0)
+ return failure();
auto copyNest =
generatePointWiseCopy(loc, memref, fastMemRef, lbMaps,
/*lbOperands=*/regionSymbols, ubMaps,
diff --git a/mlir/test/Dialect/Affine/affine-data-copy.mlir b/mlir/test/Dialect/Affine/affine-data-copy.mlir
index 5615acae5ecc4..5e97f97f8c251 100644
--- a/mlir/test/Dialect/Affine/affine-data-copy.mlir
+++ b/mlir/test/Dialect/Affine/affine-data-copy.mlir
@@ -354,3 +354,52 @@ func.func @arbitrary_memory_space() {
}
return
}
+
+
+// CHECK-LABEL: func @scalar_memref_copy_without_dma
+func.func @scalar_memref_copy_without_dma() {
+ %false = arith.constant false
+ %4 = memref.alloc() {alignment = 128 : i64} : memref<i1>
+ affine.store %false, %4[] : memref<i1>
+
+ // CHECK: %[[FALSE:.*]] = arith.constant false
+ // CHECK: %[[MEMREF:.*]] = memref.alloc() {alignment = 128 : i64} : memref<i1>
+ // CHECK: affine.store %[[FALSE]], %[[MEMREF]][] : memref<i1>
+ return
+}
+
+// CHECK-LABEL: func @scalar_memref_copy_in_loop
+func.func @scalar_memref_copy_in_loop(%3:memref<480xi1>) {
+ %false = arith.constant false
+ %4 = memref.alloc() {alignment = 128 : i64} : memref<i1>
+ affine.store %false, %4[] : memref<i1>
+ %5 = memref.alloc() {alignment = 128 : i64} : memref<i1>
+ memref.copy %4, %5 : memref<i1> to memref<i1>
+ affine.for %arg0 = 0 to 480 {
+ %11 = affine.load %3[%arg0] : memref<480xi1>
+ %12 = affine.load %5[] : memref<i1>
+ %13 = arith.cmpi slt, %11, %12 : i1
+ %14 = arith.select %13, %11, %12 : i1
+ affine.store %14, %5[] : memref<i1>
+ }
+
+ // CHECK: %[[FALSE:.*]] = arith.constant false
+ // CHECK: %[[MEMREF:.*]] = memref.alloc() {alignment = 128 : i64} : memref<i1>
+ // CHECK: affine.store %[[FALSE]], %[[MEMREF]][] : memref<i1>
+ // CHECK: %[[TARGET:.*]] = memref.alloc() {alignment = 128 : i64} : memref<i1>
+ // CHECK: memref.copy %alloc, %[[TARGET]] : memref<i1> to memref<i1>
+ // CHECK: %[[FAST_MEMREF:.*]] = memref.alloc() : memref<480xi1>
+ // CHECK: affine.for %{{.*}} = 0 to 480 {
+ // CHECK: %{{.*}} = affine.load %arg0[%{{.*}}] : memref<480xi1>
+ // CHECK: affine.store %{{.*}}, %[[FAST_MEMREF]][%{{.*}}] : memref<480xi1>
+ // CHECK: }
+ // CHECK: affine.for %arg1 = 0 to 480 {
+ // CHECK: %[[L0:.*]] = affine.load %[[FAST_MEMREF]][%arg1] : memref<480xi1>
+ // CHECK: %[[L1:.*]] = affine.load %[[TARGET]][] : memref<i1>
+ // CHECK: %[[CMPI:.*]] = arith.cmpi slt, %[[L0]], %[[L1]] : i1
+ // CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[L0]], %[[L1]] : i1
+ // CHECK: affine.store %[[SELECT]], %[[TARGET]][] : memref<i1>
+ // CHECK: }
+ // CHECK: memref.dealloc %[[FAST_MEMREF]] : memref<480xi1>
+ return
+}
\ No newline at end of file
``````````
</details>
https://github.com/llvm/llvm-project/pull/129183
More information about the Mlir-commits
mailing list