[llvm] r361649 - AMDGPU: Boost inline threshold with addrspacecasted alloca arguments

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri May 24 09:52:35 PDT 2019


Author: arsenm
Date: Fri May 24 09:52:35 2019
New Revision: 361649

URL: http://llvm.org/viewvc/llvm-project?rev=361649&view=rev
Log:
AMDGPU: Boost inline threshold with addrspacecasted alloca arguments

This was skipping GetUnderlyingObject for nonprivate addresses, but an
alloca could also be found through an addrspacecast if it's flat.

Added:
    llvm/trunk/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInline.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInline.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInline.cpp?rev=361649&r1=361648&r2=361649&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInline.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInline.cpp Fri May 24 09:52:35 2019
@@ -123,10 +123,11 @@ unsigned AMDGPUInliner::getInlineThresho
   uint64_t AllocaSize = 0;
   SmallPtrSet<const AllocaInst *, 8> AIVisited;
   for (Value *PtrArg : CS.args()) {
-    Type *Ty = PtrArg->getType();
-    if (!Ty->isPointerTy() ||
-        Ty->getPointerAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
+    PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
+    if (!Ty || (Ty->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS &&
+                Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
       continue;
+
     PtrArg = GetUnderlyingObject(PtrArg, DL);
     if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
       if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)

Added: llvm/trunk/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll?rev=361649&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll (added)
+++ llvm/trunk/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll Fri May 24 09:52:35 2019
@@ -0,0 +1,70 @@
+; RUN: opt -mtriple=amdgcn--amdhsa -S -amdgpu-inline -inline-threshold=0 < %s | FileCheck %s
+
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+
+define void @use_flat_ptr_arg(float* nocapture %p) {
+entry:
+  %tmp1 = load float, float* %p, align 4
+  %div = fdiv float 1.000000e+00, %tmp1
+  %add0 = fadd float %div, 1.0
+  %add1 = fadd float %add0, 1.0
+  %add2 = fadd float %add1, 1.0
+  %add3 = fadd float %add2, 1.0
+  %add4 = fadd float %add3, 1.0
+  %add5 = fadd float %add4, 1.0
+  %add6 = fadd float %add5, 1.0
+  %add7 = fadd float %add6, 1.0
+  %add8 = fadd float %add7, 1.0
+  %add9 = fadd float %add8, 1.0
+  %add10 = fadd float %add9, 1.0
+  store float %add10, float* %p, align 4
+  ret void
+}
+
+define void @use_private_ptr_arg(float addrspace(5)* nocapture %p) {
+entry:
+  %tmp1 = load float, float addrspace(5)* %p, align 4
+  %div = fdiv float 1.000000e+00, %tmp1
+  %add0 = fadd float %div, 1.0
+  %add1 = fadd float %add0, 1.0
+  %add2 = fadd float %add1, 1.0
+  %add3 = fadd float %add2, 1.0
+  %add4 = fadd float %add3, 1.0
+  %add5 = fadd float %add4, 1.0
+  %add6 = fadd float %add5, 1.0
+  %add7 = fadd float %add6, 1.0
+  %add8 = fadd float %add7, 1.0
+  %add9 = fadd float %add8, 1.0
+  %add10 = fadd float %add9, 1.0
+  store float %add10, float addrspace(5)* %p, align 4
+  ret void
+}
+
+; Test that the inline threshold is boosted if called with an
+; addrspacecasted' alloca.
+; CHECK-LABEL: @test_inliner_flat_ptr(
+; CHECK: call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NOT: call
+; CHECK-NOT: call
+define amdgpu_kernel void @test_inliner_flat_ptr(float addrspace(1)* nocapture %a, i32 %n) {
+entry:
+  %pvt_arr = alloca [64 x float], align 4, addrspace(5)
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %arrayidx = getelementptr inbounds float, float addrspace(1)* %a, i32 %tid
+  %tmp2 = load float, float addrspace(1)* %arrayidx, align 4
+  %add = add i32 %tid, 1
+  %arrayidx2 = getelementptr inbounds float, float addrspace(1)* %a, i32 %add
+  %tmp5 = load float, float addrspace(1)* %arrayidx2, align 4
+  %or = or i32 %tid, %n
+  %arrayidx5 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or
+  %arrayidx7 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or
+  %to.flat = addrspacecast float addrspace(5)* %arrayidx7 to float*
+  call void @use_private_ptr_arg(float addrspace(5)* %arrayidx7)
+  call void @use_flat_ptr_arg(float* %to.flat)
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { noinline }
+attributes #1 = { nounwind readnone }




More information about the llvm-commits mailing list