[llvm] r322119 - [AMDGPU] Fixed incorrect uniform branch condition

Tim Renouf via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 9 13:34:43 PST 2018


Author: tpr
Date: Tue Jan  9 13:34:43 2018
New Revision: 322119

URL: http://llvm.org/viewvc/llvm-project?rev=322119&view=rev
Log:
[AMDGPU] Fixed incorrect uniform branch condition

Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.

There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.

This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.

Reviewers: arsenm, kzhuravl

Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle

Differential Revision: https://reviews.llvm.org/D41292

Added:
    llvm/trunk/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll
    llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
    llvm/trunk/test/CodeGen/AMDGPU/nested-loop-conditions.ll
    llvm/trunk/test/CodeGen/AMDGPU/select-opt.ll
    llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll
    llvm/trunk/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
    llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp Tue Jan  9 13:34:43 2018
@@ -1674,6 +1674,26 @@ void AMDGPUDAGToDAGISel::SelectBRCOND(SD
   unsigned CondReg = UseSCCBr ? AMDGPU::SCC : AMDGPU::VCC;
   SDLoc SL(N);
 
+  if (!UseSCCBr) {
+    // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
+    // analyzed what generates the vcc value, so we do not know whether vcc
+    // bits for disabled lanes are 0.  Thus we need to mask out bits for
+    // disabled lanes.
+    //
+    // For the case that we select S_CBRANCH_SCC1 and it gets
+    // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
+    // SIInstrInfo::moveToVALU which inserts the S_AND).
+    //
+    // We could add an analysis of what generates the vcc value here and omit
+    // the S_AND when is unnecessary. But it would be better to add a separate
+    // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
+    // catches both cases.
+    Cond = SDValue(CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1,
+                               CurDAG->getRegister(AMDGPU::EXEC, MVT::i1),
+                               Cond),
+                   0);
+  }
+
   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
                        N->getOperand(2), // Basic Block

Modified: llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll Tue Jan  9 13:34:43 2018
@@ -100,7 +100,8 @@ bb3:
 ; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch:
 ; GCN: s_load_dword [[CND:s[0-9]+]]
 ; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
-; GCN-DAG: v_cmp_eq_f32_e64 vcc, [[CND]], 0
+; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0
+; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]]
 ; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]]
 
 ; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
@@ -500,8 +501,7 @@ ret:
 ; GCN: s_setpc_b64
 
 ; GCN: [[LONG_BR_DEST0]]
-; GCN: v_cmp_ne_u32_e32
-; GCN-NEXT: s_cbranch_vccz
+; GCN: s_cbranch_vccz
 ; GCN: s_setpc_b64
 
 ; GCN: s_endpgm
@@ -520,6 +520,11 @@ bb9:
   br i1 %tmp12, label %bb19, label %bb14
 
 bb13:                                             ; preds = %bb
+  call void asm sideeffect
+  "v_nop_e64
+   v_nop_e64
+   v_nop_e64
+   v_nop_e64", ""() #0
   br i1 %tmp6, label %bb19, label %bb14
 
 bb14:                                             ; preds = %bb13, %bb9

Modified: llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll Tue Jan  9 13:34:43 2018
@@ -95,7 +95,7 @@ for.body:
 
 ; GCN-LABEL: {{^}}loop_arg_0:
 ; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; GCN: v_cmp_eq_u32_e32 vcc, 1,
+; GCN: v_cmp_eq_u32{{[^,]*}}, 1,
 
 ; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]
 ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80

Modified: llvm/trunk/test/CodeGen/AMDGPU/nested-loop-conditions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/nested-loop-conditions.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/nested-loop-conditions.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/nested-loop-conditions.ll Tue Jan  9 13:34:43 2018
@@ -63,8 +63,7 @@
 ; GCN-NEXT: s_cbranch_scc1
 
 ; FIXME: Should fold to unconditional branch?
-; GCN: s_mov_b64 vcc, -1
-; GCN-NEXT: ; implicit-def
+; GCN: ; implicit-def
 ; GCN: s_cbranch_vccz
 
 ; GCN: ds_read_b32

Added: llvm/trunk/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll?rev=322119&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll Tue Jan  9 13:34:43 2018
@@ -0,0 +1,54 @@
+; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx800 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
+
+; This checks for a bug where uniform control flow can result in multiple
+; v_cmp results being combined together with s_and_b64, s_or_b64 and s_xor_b64,
+; using the resulting mask in s_cbranch_vccnz
+; without ensuring that the resulting mask has bits clear for inactive lanes.
+; The problematic case is s_xor_b64, as, unlike the other ops, it can actually
+; set bits for inactive lanes.
+;
+; The check for an s_xor_b64 is just to check that this test tests what it is
+; supposed to test. If the s_xor_b64 disappears due to some other case, it does
+; not necessarily mean that the bug has reappeared.
+;
+; The check for "s_and_b64 vcc, exec, something" checks that the bug is fixed.
+
+; CHECK: {{^}}main:
+; CHECK: s_xor_b64
+; CHECK: s_and_b64 vcc, exec,
+
+define amdgpu_cs void @main(i32 inreg %arg) {
+.entry:
+  %tmp44 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
+  %tmp16 = load volatile float, float addrspace(1)* undef
+  %tmp22 = load volatile float, float addrspace(1)* undef
+  %tmp25 = load volatile float, float addrspace(1)* undef
+  %tmp31 = fcmp olt float %tmp16, 0x3FA99999A0000000
+  br i1 %tmp31, label %bb, label %.exit.thread
+
+bb:                                               ; preds = %.entry
+  %tmp42 = fcmp olt float %tmp25, 0x3FA99999A0000000
+  br i1 %tmp42, label %bb43, label %.exit.thread
+
+bb43:
+  %tmp46 = fcmp olt <2 x float> %tmp44, <float 0x3FA99999A0000000, float 0x3FA99999A0000000>
+  %tmp47 = extractelement <2 x i1> %tmp46, i32 0
+  %tmp48 = extractelement <2 x i1> %tmp46, i32 1
+  %tmp49 = and i1 %tmp47, %tmp48
+  br i1 %tmp49, label %bb50, label %.exit.thread
+
+bb50:
+  %tmp53 = fcmp olt float %tmp22, 0x3FA99999A0000000
+  br i1 %tmp53, label %.exit3.i, label %.exit.thread
+
+.exit3.i:
+  store volatile i32 0, i32 addrspace(1)* undef
+  br label %.exit.thread
+
+.exit.thread:
+  ret void
+}
+

Modified: llvm/trunk/test/CodeGen/AMDGPU/select-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/select-opt.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/select-opt.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/select-opt.ll Tue Jan  9 13:34:43 2018
@@ -134,8 +134,8 @@ define amdgpu_kernel void @opt_select_i6
 }
 
 ; GCN-LABEL: {{^}}regression:
-; GCN: v_cmp_neq_f32_e64 vcc
-; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0
+; GCN: v_cmp_neq_f32_e64
+; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0
 ; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
 
 define amdgpu_kernel void @regression(float addrspace(1)* %out, float %c0, float %c1) #0 {

Modified: llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll Tue Jan  9 13:34:43 2018
@@ -267,7 +267,7 @@ exit:
 
 ; CHECK: [[PHIBB]]:
 ; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]]
-; CHECK-NEXT: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
+; CHECK: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
 
 ; CHECK: ; %bb10
 ; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 9
@@ -302,14 +302,14 @@ end:
 
 ; CHECK-LABEL: {{^}}no_skip_no_successors:
 ; CHECK: v_cmp_nge_f32
-; CHECK-NEXT: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]]
+; CHECK: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]]
 
 ; CHECK: ; %bb6
 ; CHECK: s_mov_b64 exec, 0
 
 ; CHECK: [[SKIPKILL]]:
 ; CHECK: v_cmp_nge_f32_e32 vcc
-; CHECK-NEXT: %bb.3: ; %bb5
+; CHECK: %bb.3: ; %bb5
 ; CHECK-NEXT: .Lfunc_end{{[0-9]+}}
 define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 {
 bb:

Modified: llvm/trunk/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/smrd-vccz-bug.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/smrd-vccz-bug.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/smrd-vccz-bug.ll Tue Jan  9 13:34:43 2018
@@ -4,7 +4,7 @@
 
 ; GCN-FUNC: {{^}}vccz_workaround:
 ; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x0
-; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0{{$}}
+; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0{{$}}
 ; VCCZ-BUG: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VCCZ-BUG: s_mov_b64 vcc, vcc
 ; NOVCCZ-BUG-NOT: s_mov_b64 vcc, vcc

Modified: llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll?rev=322119&r1=322118&r2=322119&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll Tue Jan  9 13:34:43 2018
@@ -251,7 +251,7 @@ ENDIF:
 ; GCN: s_load_dword [[COND:s[0-9]+]]
 ; GCN: s_cmp_lt_i32 [[COND]], 1
 ; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]]
-; GCN: v_cmp_gt_i32_e64 vcc, [[COND]], 0{{$}}
+; GCN: v_cmp_gt_i32_e64 {{[^,]*}}, [[COND]], 0{{$}}
 ; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]]
 ; GCN: {{^}}[[EXIT]]:
 ; GCN: s_endpgm




More information about the llvm-commits mailing list