[llvm] r361202 - AMDGPU: Force skip branches over calls

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon May 20 15:04:42 PDT 2019


Author: arsenm
Date: Mon May 20 15:04:42 2019
New Revision: 361202

URL: http://llvm.org/viewvc/llvm-project?rev=361202&view=rev
Log:
AMDGPU: Force skip branches over calls

Unfortunately the way SIInsertSkips works is backwards, and is
required for correctness. r338235 added handling of some special cases
where skipping is mandatory to avoid side effects if no lanes are
active. It conservatively handled asm correctly, but the same logic
needs to apply to calls.

Usually the call sequence code is larger than the skip threshold,
although the way the count is computed is really broken, so I'm not
sure if anything was likely to really hit this.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/call-skip.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=361202&r1=361201&r2=361202&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Mon May 20 15:04:42 2019
@@ -2486,7 +2486,7 @@ bool SIInstrInfo::hasUnwantedEffectsWhen
       Opcode == AMDGPU::DS_ORDERED_COUNT)
     return true;
 
-  if (MI.isInlineAsm())
+  if (MI.isCall() || MI.isInlineAsm())
     return true; // conservative assumption
 
   // These are like SALU instructions in terms of effects, so it's questionable

Added: llvm/trunk/test/CodeGen/AMDGPU/call-skip.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/call-skip.ll?rev=361202&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/call-skip.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/call-skip.ll Mon May 20 15:04:42 2019
@@ -0,0 +1,67 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
+
+; A call should be skipped if all lanes are zero, since we don't know
+; what side effects should be avoided inside the call.
+define hidden void @func() #1 {
+  ret void
+}
+
+; GCN-LABEL: {{^}}if_call:
+; GCN: s_and_saveexec_b64
+; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: s_cbranch_execz [[END]]
+; GCN: s_swappc_b64
+; GCN: [[END]]:
+define void @if_call(i32 %flag) #0 {
+  %cc = icmp eq i32 %flag, 0
+  br i1 %cc, label %call, label %end
+
+call:
+  call void @func()
+  br label %end
+
+end:
+  ret void
+}
+
+; GCN-LABEL: {{^}}if_asm:
+; GCN: s_and_saveexec_b64
+; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: s_cbranch_execz [[END]]
+; GCN: ; sample asm
+; GCN: [[END]]:
+define void @if_asm(i32 %flag) #0 {
+  %cc = icmp eq i32 %flag, 0
+  br i1 %cc, label %call, label %end
+
+call:
+  call void asm sideeffect "; sample asm", ""()
+  br label %end
+
+end:
+  ret void
+}
+
+; GCN-LABEL: {{^}}if_call_kernel:
+; GCN: s_and_saveexec_b64
+; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: s_cbranch_execz [[END]]
+; GCN: s_swappc_b64
+define amdgpu_kernel void @if_call_kernel() #0 {
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %cc = icmp eq i32 %id, 0
+  br i1 %cc, label %call, label %end
+
+call:
+  call void @func()
+  br label %end
+
+end:
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind noinline }
+attributes #2 = { nounwind readnone speculatable }




More information about the llvm-commits mailing list