r261779 - [CUDA] Mark all CUDA device-side function defs, decls, and calls as convergent.
Justin Lebar via cfe-commits
cfe-commits at lists.llvm.org
Wed Feb 24 13:55:11 PST 2016
Author: jlebar
Date: Wed Feb 24 15:55:11 2016
New Revision: 261779
URL: http://llvm.org/viewvc/llvm-project?rev=261779&view=rev
Log:
[CUDA] Mark all CUDA device-side function defs, decls, and calls as convergent.
Summary:
This is important for e.g. the following case:
void sync() { __syncthreads(); }
void foo() {
do_something();
sync();
do_something_else():
}
Without this change, if the optimizer does not inline sync() (which it
won't because __syncthreads is also marked as noduplicate, for now
anyway), it is free to perform optimizations on sync() that it would not
be able to perform on __syncthreads(), because sync() is not marked as
convergent.
Similarly, we need a notion of convergent calls, since in the case when
we can't statically determine a call's target(s), we need to know
whether it's safe to perform optimizations around the call.
This change is conservative; the optimizer will remove these attrs where
it can, see r260318, r260319.
Reviewers: majnemer
Subscribers: cfe-commits, jhen, echristo, tra
Differential Revision: http://reviews.llvm.org/D17056
Added:
cfe/trunk/test/CodeGenCUDA/convergent.cu
Modified:
cfe/trunk/lib/CodeGen/CGCall.cpp
cfe/trunk/test/CodeGenCUDA/device-var-init.cu
Modified: cfe/trunk/lib/CodeGen/CGCall.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGCall.cpp?rev=261779&r1=261778&r2=261779&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGCall.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGCall.cpp Wed Feb 24 15:55:11 2016
@@ -1595,6 +1595,14 @@ void CodeGenModule::ConstructAttributeLi
}
}
+ if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
+ // Conservatively, mark all functions and calls in CUDA as convergent
+ // (meaning, they may call an intrinsically convergent op, such as
+ // __syncthreads(), and so can't have certain optimizations applied around
+ // them). LLVM will remove this attribute where it safely can.
+ FuncAttrs.addAttribute(llvm::Attribute::Convergent);
+ }
+
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
QualType RetTy = FI.getReturnType();
Added: cfe/trunk/test/CodeGenCUDA/convergent.cu
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCUDA/convergent.cu?rev=261779&view=auto
==============================================================================
--- cfe/trunk/test/CodeGenCUDA/convergent.cu (added)
+++ cfe/trunk/test/CodeGenCUDA/convergent.cu Wed Feb 24 15:55:11 2016
@@ -0,0 +1,39 @@
+// REQUIRES: x86-registered-target
+// REQUIRES: nvptx-registered-target
+
+// RUN: %clang_cc1 -fcuda-is-device -triple nvptx-nvidia-cuda -emit-llvm \
+// RUN: -disable-llvm-passes -o - %s | FileCheck -check-prefix DEVICE %s
+
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm \
+// RUN: -disable-llvm-passes -o - %s | \
+// RUN: FileCheck -check-prefix HOST %s
+
+#include "Inputs/cuda.h"
+
+// DEVICE: Function Attrs:
+// DEVICE-SAME: convergent
+// DEVICE-NEXT: define void @_Z3foov
+__device__ void foo() {}
+
+// HOST: Function Attrs:
+// HOST-NOT: convergent
+// HOST-NEXT: define void @_Z3barv
+// DEVICE: Function Attrs:
+// DEVICE-SAME: convergent
+// DEVICE-NEXT: define void @_Z3barv
+__host__ __device__ void baz();
+__host__ __device__ void bar() {
+ // DEVICE: call void @_Z3bazv() [[CALL_ATTR:#[0-9]+]]
+ baz();
+}
+
+// DEVICE: declare void @_Z3bazv() [[BAZ_ATTR:#[0-9]+]]
+// DEVICE: attributes [[BAZ_ATTR]] = {
+// DEVICE-SAME: convergent
+// DEVICE-SAME: }
+// DEVICE: attributes [[CALL_ATTR]] = { convergent }
+
+// HOST: declare void @_Z3bazv() [[BAZ_ATTR:#[0-9]+]]
+// HOST: attributes [[BAZ_ATTR]] = {
+// HOST-NOT: convergent
+// NOST-SAME: }
Modified: cfe/trunk/test/CodeGenCUDA/device-var-init.cu
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCUDA/device-var-init.cu?rev=261779&r1=261778&r2=261779&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCUDA/device-var-init.cu (original)
+++ cfe/trunk/test/CodeGenCUDA/device-var-init.cu Wed Feb 24 15:55:11 2016
@@ -382,7 +382,7 @@ __device__ void df() {
// CHECK: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* %netc)
// CHECK: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* %ec_i_ec)
// CHECK: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* %ec_i_ec1)
-// CHECK: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* %t_v_t) #3
+// CHECK: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* %t_v_t)
// CHECK: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* %t_b_nec)
// CHECK: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* %t_f_nec)
// CHECK: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* %t_fa_nec)
More information about the cfe-commits
mailing list