[llvm-branch-commits] [mlir] [mlir][LLVM] erase call mappings in forgetMapping() (PR #84955)

Tom Eccles via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Mar 18 10:34:24 PDT 2024


https://github.com/tblah updated https://github.com/llvm/llvm-project/pull/84955

>From 0b2f5fee61d170b0a2197fd5da92f0e84b3b14f4 Mon Sep 17 00:00:00 2001
From: Tom Eccles <tom.eccles at arm.com>
Date: Wed, 21 Feb 2024 14:22:39 +0000
Subject: [PATCH 1/3] [mlir][LLVM] erase call mappings in forgetMapping()

It looks like the mappings for call instructions were forgotten here.
This fixes a bug in OpenMP when inlining a region containing call
operations multiple times.
---
 mlir/lib/Target/LLVMIR/ModuleTranslation.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index c00628a420a000..995544238e4a3c 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -716,6 +716,8 @@ void ModuleTranslation::forgetMapping(Region &region) {
           branchMapping.erase(&op);
         if (isa<LLVM::GlobalOp>(op))
           globalsMapping.erase(&op);
+        if (isa<LLVM::CallOp>(op))
+          callMapping.erase(&op);
         llvm::append_range(
             toProcess,
             llvm::map_range(op.getRegions(), [](Region &r) { return &r; }));

>From d6955d43534d23aaf58c10090c1a5a2167018796 Mon Sep 17 00:00:00 2001
From: Tom Eccles <tom.eccles at arm.com>
Date: Fri, 15 Mar 2024 14:05:53 +0000
Subject: [PATCH 2/3] Add test

---
 .../Target/LLVMIR/openmp-reduction-call.mlir  | 47 +++++++++++++++++++
 1 file changed, 47 insertions(+)
 create mode 100644 mlir/test/Target/LLVMIR/openmp-reduction-call.mlir

diff --git a/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir b/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir
new file mode 100644
index 00000000000000..60419a85f66aa2
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir
@@ -0,0 +1,47 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+// Test that we don't crash when there is a call operation in the combiner
+
+omp.reduction.declare @add_f32 : f32
+init {
+^bb0(%arg: f32):
+  %0 = llvm.mlir.constant(0.0 : f32) : f32
+  omp.yield (%0 : f32)
+}
+combiner {
+^bb1(%arg0: f32, %arg1: f32):
+// test this call here:
+  llvm.call @test_call() : () -> ()
+  %1 = llvm.fadd %arg0, %arg1 : f32
+  omp.yield (%1 : f32)
+}
+atomic {
+^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
+  %2 = llvm.load %arg3 : !llvm.ptr -> f32
+  llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+  omp.yield
+}
+
+llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) {
+  %c1 = llvm.mlir.constant(1 : i32) : i32
+  %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
+  omp.parallel {
+    omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr)
+    for (%iv) : i64 = (%lb) to (%ub) step (%step) {
+      %1 = llvm.mlir.constant(2.0 : f32) : f32
+      %2 = llvm.load %prv : !llvm.ptr -> f32
+      %3 = llvm.fadd %1, %2 : f32
+      llvm.store %3, %prv : f32, !llvm.ptr
+      omp.yield
+    }
+    omp.terminator
+  }
+  llvm.return
+}
+
+llvm.func @test_call() -> ()
+
+// Call to the troublesome function will have been inlined twice: once into
+// main and once into the outlined function
+// CHECK: call void @test_call()
+// CHECK: call void @test_call()

>From 02550e13fc8e9d3e653b8e689a4479cfdd0bfcc8 Mon Sep 17 00:00:00 2001
From: Tom Eccles <tom.eccles at arm.com>
Date: Mon, 18 Mar 2024 17:16:55 +0000
Subject: [PATCH 3/3] Simplify test

---
 .../Target/LLVMIR/openmp-reduction-call.mlir  | 20 +++++--------------
 1 file changed, 5 insertions(+), 15 deletions(-)

diff --git a/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir b/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir
index 60419a85f66aa2..9aa9ac7e4b5a52 100644
--- a/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-reduction-call.mlir
@@ -15,25 +15,15 @@ combiner {
   %1 = llvm.fadd %arg0, %arg1 : f32
   omp.yield (%1 : f32)
 }
-atomic {
-^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
-  %2 = llvm.load %arg3 : !llvm.ptr -> f32
-  llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
-  omp.yield
-}
 
 llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) {
   %c1 = llvm.mlir.constant(1 : i32) : i32
   %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
-  omp.parallel {
-    omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr)
-    for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-      %1 = llvm.mlir.constant(2.0 : f32) : f32
-      %2 = llvm.load %prv : !llvm.ptr -> f32
-      %3 = llvm.fadd %1, %2 : f32
-      llvm.store %3, %prv : f32, !llvm.ptr
-      omp.yield
-    }
+  omp.parallel reduction(@add_f32 %0 -> %prv : !llvm.ptr) {
+    %1 = llvm.mlir.constant(2.0 : f32) : f32
+    %2 = llvm.load %prv : !llvm.ptr -> f32
+    %3 = llvm.fadd %1, %2 : f32
+    llvm.store %3, %prv : f32, !llvm.ptr
     omp.terminator
   }
   llvm.return



More information about the llvm-branch-commits mailing list