[Mlir-commits] [mlir] bda723f - [NFC][OpenMP][MLIR] Add MLIR test for lowering parallel if (#71788)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Nov 23 04:29:59 PST 2023


Author: Dominik Adamski
Date: 2023-11-23T13:29:55+01:00
New Revision: bda723f5badde4d4a7ece0e07380bee58394e1d9

URL: https://github.com/llvm/llvm-project/commit/bda723f5badde4d4a7ece0e07380bee58394e1d9
DIFF: https://github.com/llvm/llvm-project/commit/bda723f5badde4d4a7ece0e07380bee58394e1d9.diff

LOG: [NFC][OpenMP][MLIR] Add MLIR test for lowering parallel if (#71788)

Add test for clause omp target parallel if (parallel : cond )

Test checks if corresponding MLIR construct is correctly lowered to LLVM IR.

Added: 
    

Modified: 
    mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir
index 2628e42d533b50e..c99f2954d761386 100644
--- a/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir
@@ -32,31 +32,54 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
     }
   llvm.return
   }
+
+  llvm.func @parallel_if(%arg0: !llvm.ptr {fir.bindc_name = "ifcond"}) {
+    %0 = llvm.mlir.constant(1 : i64) : i64
+    %1 = llvm.alloca %0 x i32 {bindc_name = "d"} : (i64) -> !llvm.ptr
+    %2 = omp.map_info var_ptr(%1 : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = "d"}
+    %3 = omp.map_info var_ptr(%arg0 : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "ifcond"}
+    omp.target map_entries(%2 -> %arg1, %3 -> %arg2 : !llvm.ptr, !llvm.ptr) {
+    ^bb0(%arg1: !llvm.ptr, %arg2: !llvm.ptr):
+      %4 = llvm.mlir.constant(10 : i32) : i32
+      %5 = llvm.load %arg2 : !llvm.ptr -> i32
+      %6 = llvm.mlir.constant(0 : i64) : i32
+      %7 = llvm.icmp "ne" %5, %6 : i32
+      omp.parallel if(%7 : i1) {
+        llvm.store %4, %arg1 : i32, !llvm.ptr
+        omp.terminator
+      }
+      omp.terminator
+    }
+    llvm.return
+  }
 }
 
-// CHECK: define weak_odr protected amdgpu_kernel void [[FUNC0:@.*]](
-// CHECK-SAME: ptr [[TMP:%.*]], ptr [[TMP0:.*]]) {
-// CHECK:         [[TMP1:%.*]] = alloca [1 x ptr], align 8, addrspace(5)
-// CHECK:         [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[TMP1]] to ptr
-// CHECK:         [[STRUCTARG:%.*]] = alloca { ptr }, align 8, addrspace(5)
-// CHECK:         [[STRUCTARG_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRUCTARG]] to ptr
-// CHECK:         [[TMP3:%.*]] = alloca ptr, align 8, addrspace(5)
-// CHECK:         [[TMP4:%.*]] = addrspacecast ptr addrspace(5) [[TMP3]] to ptr
-// CHECK:         store ptr [[TMP0]], ptr [[TMP4]], align 8
-// CHECK:         [[TMP5:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) [[KERNEL_ENV:@.*]] to ptr), ptr [[TMP]])
-// CHECK:         [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1
-// CHECK:         br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
-// CHECK:         [[TMP6:%.*]] = load ptr, ptr [[TMP4]], align 8
-// CHECK:         [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
-// CHECK:         [[GEP_:%.*]] = getelementptr { ptr }, ptr addrspace(5) [[STRUCTARG]], i32 0, i32 0
-// CHECK:         store ptr [[TMP6]], ptr addrspace(5) [[GEP_]], align 8
-// CHECK:         [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
-// CHECK:         store ptr [[STRUCTARG_ASCAST]], ptr [[TMP7]], align 8
-// CHECK:         call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1, i32 -1, i32 -1, ptr [[FUNC1:@.*]], ptr null, ptr [[TMP2]], i64 1)
+// CHECK: define weak_odr protected amdgpu_kernel void @[[FUNC0:.*]](
+// CHECK-SAME: ptr %[[TMP:.*]], ptr %[[TMP0:.*]]) {
+// CHECK:         %[[TMP1:.*]] = alloca [1 x ptr], align 8, addrspace(5)
+// CHECK:         %[[TMP2:.*]] = addrspacecast ptr addrspace(5) %[[TMP1]] to ptr
+// CHECK:         %[[STRUCTARG:.*]] = alloca { ptr }, align 8, addrspace(5)
+// CHECK:         %[[STRUCTARG_ASCAST:.*]] = addrspacecast ptr addrspace(5) %[[STRUCTARG]] to ptr
+// CHECK:         %[[TMP3:.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK:         %[[TMP4:.*]] = addrspacecast ptr addrspace(5) %[[TMP3]] to ptr
+// CHECK:         store ptr %[[TMP0]], ptr %[[TMP4]], align 8
+// CHECK:         %[[TMP5:.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{.*}} to ptr), ptr %[[TMP]])
+// CHECK:         %[[EXEC_USER_CODE:.*]] = icmp eq i32 %[[TMP5]], -1
+// CHECK:         br i1 %[[EXEC_USER_CODE]], label %[[USER_CODE_ENTRY:.*]], label %[[WORKER_EXIT:.*]]
+// CHECK:         %[[TMP6:.*]] = load ptr, ptr %[[TMP4]], align 8
+// CHECK:         %[[OMP_GLOBAL_THREAD_NUM:.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
+// CHECK:         %[[GEP_:.*]] = getelementptr { ptr }, ptr addrspace(5) %[[STRUCTARG]], i32 0, i32 0
+// CHECK:         store ptr %[[TMP6]], ptr addrspace(5) %[[GEP_]], align 8
+// CHECK:         %[[TMP7:.*]] = getelementptr inbounds [1 x ptr], ptr %[[TMP2]], i64 0, i64 0
+// CHECK:         store ptr %[[STRUCTARG_ASCAST]], ptr %[[TMP7]], align 8
+// CHECK:         call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 1, i32 -1, i32 -1, ptr @[[FUNC1:.*]], ptr null, ptr %[[TMP2]], i64 1)
 // CHECK:         call void @__kmpc_target_deinit()
 
-// CHECK: define internal void [[FUNC1]](
-// CHECK-SAME: ptr noalias noundef [[TID_ADDR_ASCAST:%.*]], ptr noalias noundef [[ZERO_ADDR_ASCAST:%.*]], ptr [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK: define internal void @[[FUNC1]](
+// CHECK-SAME: ptr noalias noundef {{.*}}, ptr noalias noundef {{.*}}, ptr {{.*}}) #{{[0-9]+}} {
+
+// Test if num_threads OpenMP clause for target region is correctly lowered
+// and passed as a param to kmpc_parallel_51 function
 
 // CHECK: define weak_odr protected amdgpu_kernel void [[FUNC_NUM_THREADS0:@.*]](
 // CHECK-NOT:     call void @__kmpc_push_num_threads(
@@ -64,3 +87,24 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
 // CHECK-SAME:  ptr addrspace(1) @[[NUM_THREADS_GLOB:[0-9]+]] to ptr),
 // CHECK-SAME:  i32 [[NUM_THREADS_TMP0:%.*]], i32 1, i32 156,
 // CHECK-SAME:  i32 -1,  ptr [[FUNC_NUM_THREADS1:@.*]], ptr null, ptr [[NUM_THREADS_TMP1:%.*]], i64 1)
+
+// One of the arguments of  kmpc_parallel_51 function is responsible for handling if clause
+// of omp parallel construct for target region. If this  argument is nonzero,
+// then kmpc_parallel_51 launches multiple threads for parallel region.
+//
+// This test checks if MLIR expression:
+//      %7 = llvm.icmp "ne" %5, %6 : i32
+//      omp.parallel if(%7 : i1)
+// is correctly lowered to LLVM IR code and the if condition variable
+// is passed as a param to kmpc_parallel_51 function
+
+// CHECK: define weak_odr protected amdgpu_kernel void @{{.*}}(
+// CHECK-SAME: ptr {{.*}}, ptr {{.*}}, ptr %[[IFCOND_ARG2:.*]]) {
+// CHECK:         store ptr %[[IFCOND_ARG2]], ptr %[[IFCOND_TMP1:.*]], align 8
+// CHECK:         %[[IFCOND_TMP2:.*]] = load i32, ptr %[[IFCOND_TMP1]], align 4
+// CHECK:         %[[IFCOND_TMP3:.*]] = icmp ne i32 %[[IFCOND_TMP2]], 0
+// CHECK:         %[[IFCOND_TMP4:.*]] = sext i1 %[[IFCOND_TMP3]] to i32
+// CHECK:         call void @__kmpc_parallel_51(ptr addrspacecast (
+// CHECK-SAME:  ptr addrspace(1) {{.*}} to ptr),
+// CHECK-SAME:  i32 {{.*}}, i32 %[[IFCOND_TMP4]], i32 -1,
+// CHECK-SAME:  i32 -1,  ptr {{.*}}, ptr null, ptr {{.*}}, i64 1)


        


More information about the Mlir-commits mailing list