[llvm] c70c30d - [OpenMP][NFC] Precommit change to hide_mem_transfer_latency test flags

via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 9 13:16:53 PDT 2021


Author: Joseph Huber
Date: 2021-06-09T16:16:37-04:00
New Revision: c70c30d6da0a0d46c45ca6b77d760216ddd420a7

URL: https://github.com/llvm/llvm-project/commit/c70c30d6da0a0d46c45ca6b77d760216ddd420a7
DIFF: https://github.com/llvm/llvm-project/commit/c70c30d6da0a0d46c45ca6b77d760216ddd420a7.diff

LOG: [OpenMP][NFC] Precommit change to hide_mem_transfer_latency test flags

Added: 
    

Modified: 
    llvm/test/Transforms/OpenMP/hide_mem_transfer_latency.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/OpenMP/hide_mem_transfer_latency.ll b/llvm/test/Transforms/OpenMP/hide_mem_transfer_latency.ll
index a1464f20c5fcc..dc8b57a20da3d 100644
--- a/llvm/test/Transforms/OpenMP/hide_mem_transfer_latency.ll
+++ b/llvm/test/Transforms/OpenMP/hide_mem_transfer_latency.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: -p --function-signature --scrub-attributes
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
 ; RUN: opt -S -passes=openmp-opt-cgscc -aa-pipeline=basic-aa -openmp-hide-memory-transfer-latency < %s | FileCheck %s
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 
@@ -43,53 +43,53 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 define dso_local double @heavyComputation1() {
 ; CHECK-LABEL: define {{[^@]+}}@heavyComputation1() {
 ; CHECK-NEXT:  entry:
-
-; CHECK-NEXT:    %handle = alloca %struct.__tgt_async_info, align 8
-
-; CHECK-NEXT:    %a = alloca double, align 8
-; CHECK-NEXT:    %.offload_baseptrs = alloca [1 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs = alloca [1 x i8*], align 8
-; CHECK-NEXT:    %.offload_baseptrs4 = alloca [1 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs5 = alloca [1 x i8*], align 8
-; CHECK-NEXT:    %0 = bitcast double* %a to i8*
-; CHECK-NEXT:    %call = tail call i32 (...) @rand()
-; CHECK-NEXT:    %rem = srem i32 %call, 777
-; CHECK-NEXT:    %conv = sitofp i32 %rem to double
-; CHECK-NEXT:    store double %conv, double* %a, align 8
-; CHECK-NEXT:    %call1 = tail call i32 (...) @rand()
-; CHECK-NEXT:    %1 = getelementptr inbounds [1 x i8*], [1 x i8*]* %.offload_baseptrs, i64 0, i64 0
-; CHECK-NEXT:    %2 = bitcast [1 x i8*]* %.offload_baseptrs to double**
-; CHECK-NEXT:    store double* %a, double** %2, align 8
-; CHECK-NEXT:    %3 = getelementptr inbounds [1 x i8*], [1 x i8*]* %.offload_ptrs, i64 0, i64 0
-; CHECK-NEXT:    %4 = bitcast [1 x i8*]* %.offload_ptrs to double**
-; CHECK-NEXT:    store double* %a, double** %4, align 8
-
-; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_issue(%struct.ident_t* @0, i64 -1, i32 1, i8** %1, i8** %3, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i64 0, i64 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i64 0, i64 0), i8** null, i8** null, %struct.__tgt_async_info* %handle)
-
-; CHECK-NEXT:    %5 = bitcast double* %a to i64*
-
-; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_wait(i64 -1, %struct.__tgt_async_info* %handle)
-
-; CHECK-NEXT:    %6 = load i64, i64* %5, align 8
-; CHECK-NEXT:    %7 = getelementptr inbounds [1 x i8*], [1 x i8*]* %.offload_baseptrs4, i64 0, i64 0
-; CHECK-NEXT:    %8 = bitcast [1 x i8*]* %.offload_baseptrs4 to i64*
-; CHECK-NEXT:    store i64 %6, i64* %8, align 8
-; CHECK-NEXT:    %9 = getelementptr inbounds [1 x i8*], [1 x i8*]* %.offload_ptrs5, i64 0, i64 0
-; CHECK-NEXT:    %10 = bitcast [1 x i8*]* %.offload_ptrs5 to i64*
-; CHECK-NEXT:    store i64 %6, i64* %10, align 8
-; CHECK-NEXT:    %11 = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @0, i64 -1, i8* nonnull @.__omp_offloading_heavyComputation1.region_id, i32 1, i8** nonnull %7, i8** nonnull %9, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i64 0, i64 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i64 0, i64 0), i8** null, i8** null, i32 0, i32 0)
-; CHECK-NEXT:    %.not = icmp eq i32 %11, 0
-; CHECK-NEXT:    br i1 %.not, label %omp_offload.cont, label %omp_offload.failed
+; CHECK-NEXT:    [[HANDLE:%.*]] = alloca [[STRUCT___TGT_ASYNC_INFO:%.*]], align 8
+; CHECK-NEXT:    [[A:%.*]] = alloca double, align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A]] to i8*
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @rand()
+; CHECK-NEXT:    [[REM:%.*]] = srem i32 [[CALL]], 777
+; CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[REM]] to double
+; CHECK-NEXT:    store double [[CONV]], double* [[A]], align 8
+; CHECK-NEXT:    [[CALL1:%.*]] = tail call i32 (...) @rand()
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast [1 x i8*]* [[DOTOFFLOAD_PTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP4]], align 8
+; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_issue(%struct.ident_t* @[[GLOB0:[0-9]+]], i64 -1, i32 1, i8** [[TMP1]], i8** [[TMP3]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i64 0, i64 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i64 0, i64 0), i8** null, i8** null, %struct.__tgt_async_info* [[HANDLE]])
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[A]] to i64*
+; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_wait(i64 -1, %struct.__tgt_async_info* [[HANDLE]])
+; CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]] to i64*
+; CHECK-NEXT:    store i64 [[TMP6]], i64* [[TMP8]], align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast [1 x i8*]* [[DOTOFFLOAD_PTRS5]] to i64*
+; CHECK-NEXT:    store i64 [[TMP6]], i64* [[TMP10]], align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i8* nonnull @.__omp_offloading_heavyComputation1.region_id, i32 1, i8** nonnull [[TMP7]], i8** nonnull [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i64 0, i64 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i64 0, i64 0), i8** null, i8** null, i32 0, i32 0)
+; CHECK-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[DOTNOT]], label [[OMP_OFFLOAD_CONT:%.*]], label [[OMP_OFFLOAD_FAILED:%.*]]
 ; CHECK:       omp_offload.failed:
-; CHECK-NEXT:    call void @heavyComputation1FallBack(i64 %6)
-; CHECK-NEXT:    br label %omp_offload.cont
+; CHECK-NEXT:    call void @heavyComputation1FallBack(i64 [[TMP6]])
+; CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 ; CHECK:       omp_offload.cont:
-; CHECK-NEXT:    %conv2 = sitofp i32 %call1 to double
-; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @0, i64 -1, i32 1, i8** nonnull %1, i8** nonnull %3, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i64 0, i64 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i64 0, i64 0), i8** null, i8** null)
-; CHECK-NEXT:    %12 = load double, double* %a, align 8
-; CHECK-NEXT:    %add = fadd double %12, %conv2
-; CHECK-NEXT:    ret double %add
+; CHECK-NEXT:    [[CONV2:%.*]] = sitofp i32 [[CALL1]] to double
+; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i32 1, i8** nonnull [[TMP1]], i8** nonnull [[TMP3]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i64 0, i64 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i64 0, i64 0), i8** null, i8** null)
+; CHECK-NEXT:    [[TMP12:%.*]] = load double, double* [[A]], align 8
+; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[TMP12]], [[CONV2]]
+; CHECK-NEXT:    ret double [[ADD]]
 ;
+
+
+
+
+
+
 entry:
   %a = alloca double, align 8
   %.offload_baseptrs = alloca [1 x i8*], align 8
@@ -141,6 +141,11 @@ omp_offload.cont:                                 ; preds = %omp_offload.failed,
 }
 
 define internal void @heavyComputation1FallBack(i64 %a) {
+; CHECK-LABEL: define {{[^@]+}}@heavyComputation1FallBack
+; CHECK-SAME: (i64 [[A:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret void
+;
 entry:
   ; Fallback for offloading function heavyComputation1.
   ret void
@@ -163,62 +168,63 @@ entry:
 ;  return random;
 ;}
 define dso_local i32 @heavyComputation2(double* %a, i32 %size) {
-; CHECK-LABEL: define {{[^@]+}}@heavyComputation2(double* %a, i32 %size) {
+; CHECK-LABEL: define {{[^@]+}}@heavyComputation2
+; CHECK-SAME: (double* [[A:%.*]], i32 [[SIZE:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    %size.addr = alloca i32, align 4
-; CHECK-NEXT:    %.offload_baseptrs = alloca [2 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs = alloca [2 x i8*], align 8
-; CHECK-NEXT:    %.offload_sizes = alloca [2 x i64], align 8
-; CHECK-NEXT:    %.offload_baseptrs2 = alloca [2 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs3 = alloca [2 x i8*], align 8
-; CHECK-NEXT:    store i32 %size, i32* %size.addr, align 4
-; CHECK-NEXT:    %call = tail call i32 (...) @rand()
-; CHECK-NEXT:    %conv = zext i32 %size to i64
-; CHECK-NEXT:    %0 = shl nuw nsw i64 %conv, 3
-; CHECK-NEXT:    %1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs, i64 0, i64 0
-; CHECK-NEXT:    %2 = bitcast [2 x i8*]* %.offload_baseptrs to double**
-; CHECK-NEXT:    store double* %a, double** %2, align 8
-; CHECK-NEXT:    %3 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs, i64 0, i64 0
-; CHECK-NEXT:    %4 = bitcast [2 x i8*]* %.offload_ptrs to double**
-; CHECK-NEXT:    store double* %a, double** %4, align 8
-; CHECK-NEXT:    %5 = getelementptr inbounds [2 x i64], [2 x i64]* %.offload_sizes, i64 0, i64 0
-; CHECK-NEXT:    store i64 %0, i64* %5, align 8
-; CHECK-NEXT:    %6 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs, i64 0, i64 1
-; CHECK-NEXT:    %7 = bitcast i8** %6 to i32**
-; CHECK-NEXT:    store i32* %size.addr, i32** %7, align 8
-; CHECK-NEXT:    %8 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs, i64 0, i64 1
-; CHECK-NEXT:    %9 = bitcast i8** %8 to i32**
-; CHECK-NEXT:    store i32* %size.addr, i32** %9, align 8
-; CHECK-NEXT:    %10 = getelementptr inbounds [2 x i64], [2 x i64]* %.offload_sizes, i64 0, i64 1
-; CHECK-NEXT:    store i64 4, i64* %10, align 8
-
-; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper(%struct.ident_t* @0, i64 -1, i32 2, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
-
-; CHECK-NEXT:    %11 = load i32, i32* %size.addr, align 4
-; CHECK-NEXT:    %size.casted = zext i32 %11 to i64
-; CHECK-NEXT:    %12 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs2, i64 0, i64 0
-; CHECK-NEXT:    %13 = bitcast [2 x i8*]* %.offload_baseptrs2 to i64*
-; CHECK-NEXT:    store i64 %size.casted, i64* %13, align 8
-; CHECK-NEXT:    %14 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs3, i64 0, i64 0
-; CHECK-NEXT:    %15 = bitcast [2 x i8*]* %.offload_ptrs3 to i64*
-; CHECK-NEXT:    store i64 %size.casted, i64* %15, align 8
-; CHECK-NEXT:    %16 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs2, i64 0, i64 1
-; CHECK-NEXT:    %17 = bitcast i8** %16 to double**
-; CHECK-NEXT:    store double* %a, double** %17, align 8
-; CHECK-NEXT:    %18 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs3, i64 0, i64 1
-; CHECK-NEXT:    %19 = bitcast i8** %18 to double**
-; CHECK-NEXT:    store double* %a, double** %19, align 8
-; CHECK-NEXT:    %20 = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @0, i64 -1, i8* nonnull @.__omp_offloading_heavyComputation2.region_id, i32 2, i8** nonnull %12, i8** nonnull %14, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i64 0, i64 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.4, i64 0, i64 0), i8** null, i8** null, i32 0, i32 0)
-; CHECK-NEXT:    %.not = icmp eq i32 %20, 0
-; CHECK-NEXT:    br i1 %.not, label %omp_offload.cont, label %omp_offload.failed
+; CHECK-NEXT:    [[SIZE_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [2 x i64], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS2:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS3:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @rand()
+; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[SIZE]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[CONV]], 3
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_PTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[DOTOFFLOAD_SIZES]], i64 0, i64 0
+; CHECK-NEXT:    store i64 [[TMP0]], i64* [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
+; CHECK-NEXT:    store i32* [[SIZE_ADDR]], i32** [[TMP7]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32**
+; CHECK-NEXT:    store i32* [[SIZE_ADDR]], i32** [[TMP9]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[DOTOFFLOAD_SIZES]], i64 0, i64 1
+; CHECK-NEXT:    store i64 4, i64* [[TMP10]], align 8
+; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i32 2, i8** nonnull [[TMP1]], i8** nonnull [[TMP3]], i64* nonnull [[TMP5]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4
+; CHECK-NEXT:    [[SIZE_CASTED:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_BASEPTRS2]] to i64*
+; CHECK-NEXT:    store i64 [[SIZE_CASTED]], i64* [[TMP13]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS3]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_PTRS3]] to i64*
+; CHECK-NEXT:    store i64 [[SIZE_CASTED]], i64* [[TMP15]], align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS3]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i8* nonnull @.__omp_offloading_heavyComputation2.region_id, i32 2, i8** nonnull [[TMP12]], i8** nonnull [[TMP14]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i64 0, i64 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.4, i64 0, i64 0), i8** null, i8** null, i32 0, i32 0)
+; CHECK-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[TMP20]], 0
+; CHECK-NEXT:    br i1 [[DOTNOT]], label [[OMP_OFFLOAD_CONT:%.*]], label [[OMP_OFFLOAD_FAILED:%.*]]
 ; CHECK:       omp_offload.failed:
-; CHECK-NEXT:    call void @heavyComputation2FallBack(i64 %size.casted, double* %a)
-; CHECK-NEXT:    br label %omp_offload.cont
+; CHECK-NEXT:    call void @heavyComputation2FallBack(i64 [[SIZE_CASTED]], double* [[A]])
+; CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 ; CHECK:       omp_offload.cont:
-; CHECK-NEXT:    %rem = srem i32 %call, 7
-; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @0, i64 -1, i32 2, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
-; CHECK-NEXT:    ret i32 %rem
+; CHECK-NEXT:    [[REM:%.*]] = srem i32 [[CALL]], 7
+; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i32 2, i8** nonnull [[TMP1]], i8** nonnull [[TMP3]], i64* nonnull [[TMP5]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
+; CHECK-NEXT:    ret i32 [[REM]]
 ;
+
+
 entry:
   %size.addr = alloca i32, align 4
   %.offload_baseptrs = alloca [2 x i8*], align 8
@@ -281,6 +287,11 @@ omp_offload.cont:                                 ; preds = %omp_offload.failed,
 }
 
 define internal void @heavyComputation2FallBack(i64 %size, double* %a) {
+; CHECK-LABEL: define {{[^@]+}}@heavyComputation2FallBack
+; CHECK-SAME: (i64 [[SIZE:%.*]], double* [[A:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret void
+;
 entry:
   ; Fallback for offloading function heavyComputation2.
   ret void
@@ -303,62 +314,63 @@ entry:
 ;  return random;
 ;}
 define dso_local i32 @heavyComputation3(double* noalias %a, i32 %size) {
-; CHECK-LABEL: define {{[^@]+}}@heavyComputation3(double* noalias %a, i32 %size) {
+; CHECK-LABEL: define {{[^@]+}}@heavyComputation3
+; CHECK-SAME: (double* noalias [[A:%.*]], i32 [[SIZE:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    %size.addr = alloca i32, align 4
-; CHECK-NEXT:    %.offload_baseptrs = alloca [2 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs = alloca [2 x i8*], align 8
-; CHECK-NEXT:    %.offload_sizes = alloca [2 x i64], align 8
-; CHECK-NEXT:    %.offload_baseptrs2 = alloca [2 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs3 = alloca [2 x i8*], align 8
-; CHECK-NEXT:    store i32 %size, i32* %size.addr, align 4
-; CHECK-NEXT:    %call = tail call i32 (...) @rand()
-; CHECK-NEXT:    %conv = zext i32 %size to i64
-; CHECK-NEXT:    %0 = shl nuw nsw i64 %conv, 3
-; CHECK-NEXT:    %1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs, i64 0, i64 0
-; CHECK-NEXT:    %2 = bitcast [2 x i8*]* %.offload_baseptrs to double**
-; CHECK-NEXT:    store double* %a, double** %2, align 8
-; CHECK-NEXT:    %3 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs, i64 0, i64 0
-; CHECK-NEXT:    %4 = bitcast [2 x i8*]* %.offload_ptrs to double**
-; CHECK-NEXT:    store double* %a, double** %4, align 8
-; CHECK-NEXT:    %5 = getelementptr inbounds [2 x i64], [2 x i64]* %.offload_sizes, i64 0, i64 0
-; CHECK-NEXT:    store i64 %0, i64* %5, align 8
-; CHECK-NEXT:    %6 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs, i64 0, i64 1
-; CHECK-NEXT:    %7 = bitcast i8** %6 to i32**
-; CHECK-NEXT:    store i32* %size.addr, i32** %7, align 8
-; CHECK-NEXT:    %8 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs, i64 0, i64 1
-; CHECK-NEXT:    %9 = bitcast i8** %8 to i32**
-; CHECK-NEXT:    store i32* %size.addr, i32** %9, align 8
-; CHECK-NEXT:    %10 = getelementptr inbounds [2 x i64], [2 x i64]* %.offload_sizes, i64 0, i64 1
-; CHECK-NEXT:    store i64 4, i64* %10, align 8
-
-; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper(%struct.ident_t* @0, i64 -1, i32 2, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
-
-; CHECK-NEXT:    %11 = load i32, i32* %size.addr, align 4
-; CHECK-NEXT:    %size.casted = zext i32 %11 to i64
-; CHECK-NEXT:    %12 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs2, i64 0, i64 0
-; CHECK-NEXT:    %13 = bitcast [2 x i8*]* %.offload_baseptrs2 to i64*
-; CHECK-NEXT:    store i64 %size.casted, i64* %13, align 8
-; CHECK-NEXT:    %14 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs3, i64 0, i64 0
-; CHECK-NEXT:    %15 = bitcast [2 x i8*]* %.offload_ptrs3 to i64*
-; CHECK-NEXT:    store i64 %size.casted, i64* %15, align 8
-; CHECK-NEXT:    %16 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs2, i64 0, i64 1
-; CHECK-NEXT:    %17 = bitcast i8** %16 to double**
-; CHECK-NEXT:    store double* %a, double** %17, align 8
-; CHECK-NEXT:    %18 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs3, i64 0, i64 1
-; CHECK-NEXT:    %19 = bitcast i8** %18 to double**
-; CHECK-NEXT:    store double* %a, double** %19, align 8
-; CHECK-NEXT:    %20 = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @0, i64 -1, i8* nonnull @.__omp_offloading_heavyComputation3.region_id, i32 2, i8** nonnull %12, i8** nonnull %14, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i64 0, i64 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.4, i64 0, i64 0), i8** null, i8** null, i32 0, i32 0)
-; CHECK-NEXT:    %.not = icmp eq i32 %20, 0
-; CHECK-NEXT:    br i1 %.not, label %omp_offload.cont, label %omp_offload.failed
+; CHECK-NEXT:    [[SIZE_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [2 x i64], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS2:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS3:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT:    store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @rand()
+; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[SIZE]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[CONV]], 3
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_PTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[DOTOFFLOAD_SIZES]], i64 0, i64 0
+; CHECK-NEXT:    store i64 [[TMP0]], i64* [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
+; CHECK-NEXT:    store i32* [[SIZE_ADDR]], i32** [[TMP7]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32**
+; CHECK-NEXT:    store i32* [[SIZE_ADDR]], i32** [[TMP9]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[DOTOFFLOAD_SIZES]], i64 0, i64 1
+; CHECK-NEXT:    store i64 4, i64* [[TMP10]], align 8
+; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i32 2, i8** nonnull [[TMP1]], i8** nonnull [[TMP3]], i64* nonnull [[TMP5]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4
+; CHECK-NEXT:    [[SIZE_CASTED:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_BASEPTRS2]] to i64*
+; CHECK-NEXT:    store i64 [[SIZE_CASTED]], i64* [[TMP13]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS3]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast [2 x i8*]* [[DOTOFFLOAD_PTRS3]] to i64*
+; CHECK-NEXT:    store i64 [[SIZE_CASTED]], i64* [[TMP15]], align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS3]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i8* nonnull @.__omp_offloading_heavyComputation3.region_id, i32 2, i8** nonnull [[TMP12]], i8** nonnull [[TMP14]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i64 0, i64 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.4, i64 0, i64 0), i8** null, i8** null, i32 0, i32 0)
+; CHECK-NEXT:    [[DOTNOT:%.*]] = icmp eq i32 [[TMP20]], 0
+; CHECK-NEXT:    br i1 [[DOTNOT]], label [[OMP_OFFLOAD_CONT:%.*]], label [[OMP_OFFLOAD_FAILED:%.*]]
 ; CHECK:       omp_offload.failed:
-; CHECK-NEXT:    call void @heavyComputation3FallBack(i64 %size.casted, double* %a)
-; CHECK-NEXT:    br label %omp_offload.cont
+; CHECK-NEXT:    call void @heavyComputation3FallBack(i64 [[SIZE_CASTED]], double* [[A]])
+; CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 ; CHECK:       omp_offload.cont:
-; CHECK-NEXT:    %rem = srem i32 %call, 7
-; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @0, i64 -1, i32 2, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
-; CHECK-NEXT:    ret i32 %rem
+; CHECK-NEXT:    [[REM:%.*]] = srem i32 [[CALL]], 7
+; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i32 2, i8** nonnull [[TMP1]], i8** nonnull [[TMP3]], i64* nonnull [[TMP5]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i64 0, i64 0), i8** null, i8** null)
+; CHECK-NEXT:    ret i32 [[REM]]
 ;
+
+
 entry:
   %size.addr = alloca i32, align 4
   %.offload_baseptrs = alloca [2 x i8*], align 8
@@ -422,6 +434,11 @@ omp_offload.cont:                                 ; preds = %omp_offload.failed,
 }
 
 define internal void @heavyComputation3FallBack(i64 %size, double* %a) {
+; CHECK-LABEL: define {{[^@]+}}@heavyComputation3FallBack
+; CHECK-SAME: (i64 [[SIZE:%.*]], double* [[A:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret void
+;
 entry:
   ; Fallback for offloading function heavyComputation3.
   ret void
@@ -441,35 +458,36 @@ entry:
 ;  return random;
 ;}
 define dso_local i32 @dataTransferOnly1(double* noalias %a, i32 %size) {
-; CHECK-LABEL: define {{[^@]+}}@dataTransferOnly1(double* noalias %a, i32 %size) {
+; CHECK-LABEL: define {{[^@]+}}@dataTransferOnly1
+; CHECK-SAME: (double* noalias [[A:%.*]], i32 [[SIZE:%.*]]) {
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[HANDLE:%.*]] = alloca [[STRUCT___TGT_ASYNC_INFO:%.*]], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
+; CHECK-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [1 x i64], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 (...) @rand()
+; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[SIZE]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[CONV]], 3
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast [1 x i8*]* [[DOTOFFLOAD_PTRS]] to double**
+; CHECK-NEXT:    store double* [[A]], double** [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[DOTOFFLOAD_SIZES]], i64 0, i64 0
+; CHECK-NEXT:    store i64 [[TMP0]], i64* [[TMP5]], align 8
+; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_issue(%struct.ident_t* @[[GLOB0]], i64 -1, i32 1, i8** [[TMP1]], i8** [[TMP3]], i64* [[TMP5]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i64 0, i64 0), i8** null, i8** null, %struct.__tgt_async_info* [[HANDLE]])
+; CHECK-NEXT:    [[REM:%.*]] = urem i32 [[CALL]], [[SIZE]]
+; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_wait(i64 -1, %struct.__tgt_async_info* [[HANDLE]])
+; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB0]], i64 -1, i32 1, i8** nonnull [[TMP1]], i8** nonnull [[TMP3]], i64* nonnull [[TMP5]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i64 0, i64 0), i8** null, i8** null)
+; CHECK-NEXT:    ret i32 [[REM]]
+;
 
-; CHECK-NEXT:    %handle = alloca %struct.__tgt_async_info, align 8
 
-; CHECK-NEXT:    %.offload_baseptrs = alloca [1 x i8*], align 8
-; CHECK-NEXT:    %.offload_ptrs = alloca [1 x i8*], align 8
-; CHECK-NEXT:    %.offload_sizes = alloca [1 x i64], align 8
-; CHECK-NEXT:    %call = tail call i32 (...) @rand()
-; CHECK-NEXT:    %conv = zext i32 %size to i64
-; CHECK-NEXT:    %0 = shl nuw nsw i64 %conv, 3
-; CHECK-NEXT:    %1 = getelementptr inbounds [1 x i8*], [1 x i8*]* %.offload_baseptrs, i64 0, i64 0
-; CHECK-NEXT:    %2 = bitcast [1 x i8*]* %.offload_baseptrs to double**
-; CHECK-NEXT:    store double* %a, double** %2, align 8
-; CHECK-NEXT:    %3 = getelementptr inbounds [1 x i8*], [1 x i8*]* %.offload_ptrs, i64 0, i64 0
-; CHECK-NEXT:    %4 = bitcast [1 x i8*]* %.offload_ptrs to double**
-; CHECK-NEXT:    store double* %a, double** %4, align 8
-; CHECK-NEXT:    %5 = getelementptr inbounds [1 x i64], [1 x i64]* %.offload_sizes, i64 0, i64 0
-; CHECK-NEXT:    store i64 %0, i64* %5, align 8
 
-; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_issue(%struct.ident_t* @0, i64 -1, i32 1, i8** %1, i8** %3, i64* %5, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i64 0, i64 0), i8** null, i8** null, %struct.__tgt_async_info* %handle)
 
-; CHECK-NEXT:    %rem = urem i32 %call, %size
 
-; CHECK-NEXT:    call void @__tgt_target_data_begin_mapper_wait(i64 -1, %struct.__tgt_async_info* %handle)
 
-; CHECK-NEXT:    call void @__tgt_target_data_end_mapper(%struct.ident_t* @0, i64 -1, i32 1, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i64 0, i64 0), i8** null, i8** null)
-; CHECK-NEXT:    ret i32 %rem
-;
 entry:
   %.offload_baseptrs = alloca [1 x i8*], align 8
   %.offload_ptrs = alloca [1 x i8*], align 8


        


More information about the llvm-commits mailing list