[Mlir-commits] [llvm] [mlir] [Flang][OpenMP][Taskloop] Translation support for taskloop construct (PR #166903)

Tom Eccles llvmlistbot at llvm.org
Mon Nov 10 09:37:16 PST 2025


================
@@ -0,0 +1,151 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+omp.private {type = private} @_QFtestEi_private_i32 : i32
+
+omp.private {type = firstprivate} @_QFtestEa_firstprivate_i32 : i32 copy {
+^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
+  %0 = llvm.load %arg0 : !llvm.ptr -> i32
+  llvm.store %0, %arg1 : i32, !llvm.ptr
+  omp.yield(%arg1 : !llvm.ptr)
+}
+
+
+llvm.func @_QPtest() {
+  %0 = llvm.mlir.constant(1 : i64) : i64
+  %1 = llvm.alloca %0 x i32 {bindc_name = "i"} : (i64) -> !llvm.ptr
+  %3 = llvm.alloca %0 x i32 {bindc_name = "a"} : (i64) -> !llvm.ptr
+  %6 = llvm.mlir.constant(20 : i32) : i32
+  llvm.store %6, %3 : i32, !llvm.ptr
+  %7 = llvm.mlir.constant(1 : i32) : i32
+  %8 = llvm.mlir.constant(5 : i32) : i32
+  %9 = llvm.mlir.constant(1 : i32) : i32
+  omp.taskloop private(@_QFtestEa_firstprivate_i32 %3 -> %arg0, @_QFtestEi_private_i32 %1 -> %arg1 : !llvm.ptr, !llvm.ptr) {
+    omp.loop_nest (%arg2) : i32 = (%7) to (%8) inclusive step (%9) {
+      llvm.store %arg2, %arg1 : i32, !llvm.ptr
+      %10 = llvm.load %arg0 : !llvm.ptr -> i32
+      %11 = llvm.mlir.constant(1 : i32) : i32
+      %12 = llvm.add %10, %11 : i32
+      llvm.store %12, %arg0 : i32, !llvm.ptr
+      omp.yield
+    }
+  }
+  llvm.return
+}
+
+// CHECK:  %struct.kmp_task_info = type { ptr, ptr, i32, ptr, ptr, i64, i64, i64 }
+
+// CHECK-LABEL:  define void @_QPtest() {
+// CHECK:           %[[STRUCTARG:.*]] = alloca { ptr }, align 8
+// CHECK:           %[[VAL1:.*]] = alloca i32, i64 1, align 4
+// CHECK:           %[[VAL_X:.*]] = alloca i32, i64 1, align 4
+// CHECK:           store i32 20, ptr %[[VAL_X]], align 4
+// CHECK:           br label %entry
+
+// CHECK:         entry:
+// CHECK:           br label %omp.private.init
+
+// CHECK:         omp.private.init:                                 ; preds = %entry
+// CHECK:           %[[OMP_TASK_CONTEXT_PTR:.*]] = tail call ptr @malloc(i64 ptrtoint (ptr getelementptr ({ i32 }, ptr null, i32 1) to i64))
+// CHECK:           %[[PRIV_GEP:.*]] = getelementptr { i32 }, ptr %[[OMP_TASK_CONTEXT_PTR]], i32 0, i32 0
+// CHECK:           br label %omp.private.copy
+
+// CHECK:         omp.private.copy:
+// CHECK:           br label %omp.private.copy1
+
+// CHECK:         omp.private.copy1:
+// CHECK:           %[[LOAD_X:.*]] = load i32, ptr %[[VAL_X]], align 4
+// CHECK:           store i32 %[[LOAD_X]], ptr %[[PRIV_GEP]], align 4
+// CHECK:           br label %omp.taskloop.start
+
+// CHECK:         omp.taskloop.start:
+// CHECK:           br label %codeRepl
+
+// CHECK:         codeRepl:
+// CHECK:           %[[GEP_OMP_TASK_CONTEXT_PTR:.*]] = getelementptr { ptr }, ptr %[[STRUCTARG]], i32 0, i32 0
+// CHECK:           store ptr %[[OMP_TASK_CONTEXT_PTR]], ptr %[[GEP_OMP_TASK_CONTEXT_PTR]], align 8
+// CHECK:           %[[GTID:.*]] = call i32 @__kmpc_global_thread_num(ptr @1)
+// CHECK:           call void @__kmpc_taskgroup(ptr @1, i32 %[[GTID]])
+// CHECK:           %[[TASK_PTR:.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %[[GTID]], i32 1, i64 64, i64 8, ptr @_QPtest..omp_par)
+// CHECK:           %[[LB_GEP:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR]], i32 0, i32 5
+// CHECK:           store i32 1, ptr %[[LB_GEP]], align 4
+// CHECK:           %[[UB_GEP:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR]], i32 0, i32 6
+// CHECK:           store i32 5, ptr %[[UB_GEP]], align 4
+// CHECK:           %[[STEP_GEP:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR]], i32 0, i32 7
+// CHECK:           store i64 1, ptr %[[STEP_GEP]], align 4
+// CHECK:           %[[LOAD_STEP:.*]] = load i64, ptr %[[STEP_GEP]], align 4
+// CHECK:           %10 = load ptr, ptr %[[TASK_PTR]], align 8
+// CHECK:           call void @llvm.memcpy.p0.p0.i64(ptr align 1 %10, ptr align 1 %[[STRUCTARG]], i64 8, i1 false)
+// CHECK:           call void @__kmpc_taskloop(ptr @1, i32 %[[GTID]], ptr %[[TASK_PTR]], i32 1, ptr %[[LB_GEP]], ptr %[[UB_GEP]], i64 %[[LOAD_STEP]], i32 1, i32 0, i64 0, ptr null)
+// CHECK:           call void @__kmpc_end_taskgroup(ptr @1, i32 %[[GTID]])
+// CHECK:           br label %taskloop.exit
+
+// CHECK:           taskloop.exit:
+// CHECK:             tail call void @free(ptr %[[OMP_TASK_CONTEXT_PTR]])
----------------
tblah wrote:

A note to whoever implements nogroup:

The location of this free is valid because end_taskgroup waits until all generated tasks are complete before returning.

If end_taskgroup is not called, some other mechanism will have to be used to ensure that this free is not called until every thread has completed execution.

https://github.com/llvm/llvm-project/pull/166903


More information about the Mlir-commits mailing list