[llvm] r343657 - AMDGPU: Always run AMDGPUAlwaysInline
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 2 19:47:26 PDT 2018
Author: arsenm
Date: Tue Oct 2 19:47:25 2018
New Revision: 343657
URL: http://llvm.org/viewvc/llvm-project?rev=343657&view=rev
Log:
AMDGPU: Always run AMDGPUAlwaysInline
Even if calls are enabled, it still needs to be run
for forcing inline of functions that use LDS.
Added:
llvm/trunk/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll
Modified:
llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp?rev=343657&r1=343656&r2=343657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Tue Oct 2 19:47:25 2018
@@ -605,18 +605,15 @@ void AMDGPUPassConfig::addIRPasses() {
addPass(createAtomicExpandPass());
addPass(createAMDGPULowerIntrinsicsPass());
- if (TM.getTargetTriple().getArch() == Triple::r600 ||
- !EnableAMDGPUFunctionCalls) {
- // Function calls are not supported, so make sure we inline everything.
- addPass(createAMDGPUAlwaysInlinePass());
- addPass(createAlwaysInlinerLegacyPass());
- // We need to add the barrier noop pass, otherwise adding the function
- // inlining pass will cause all of the PassConfigs passes to be run
- // one function at a time, which means if we have a nodule with two
- // functions, then we will generate code for the first function
- // without ever running any passes on the second.
- addPass(createBarrierNoopPass());
- }
+ // Function calls are not supported, so make sure we inline everything.
+ addPass(createAMDGPUAlwaysInlinePass());
+ addPass(createAlwaysInlinerLegacyPass());
+ // We need to add the barrier noop pass, otherwise adding the function
+ // inlining pass will cause all of the PassConfigs passes to be run
+ // one function at a time, which means if we have a nodule with two
+ // functions, then we will generate code for the first function
+ // without ever running any passes on the second.
+ addPass(createBarrierNoopPass());
if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
// TODO: May want to move later or split into an early and late one.
Added: llvm/trunk/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll?rev=343657&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll Tue Oct 2 19:47:25 2018
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-function-calls -amdgpu-stress-function-calls < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-stress-function-calls < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefix=GCN %s
+
+ at lds0 = addrspace(3) global i32 undef, align 4
+
+; GCN-NOT: load_lds_simple
+
+define internal i32 @load_lds_simple() {
+ %load = load i32, i32 addrspace(3)* @lds0, align 4
+ ret i32 %load
+}
+
+; GCN-LABEL: {{^}}kernel:
+; GCN: v_mov_b32_e32 [[ADDR:v[0-9]+]], 0
+; GCN: ds_read_b32 v{{[0-9]+}}, [[ADDR]]
+define amdgpu_kernel void @kernel(i32 addrspace(1)* %out) {
+ %call = call i32 @load_lds_simple()
+ store i32 %call, i32 addrspace(1)* %out
+ ret void
+}
More information about the llvm-commits
mailing list