[llvm] 37c452a - AMDGPU/GlobalISel: Adjust branch target when lowering loop intrinsic

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 18 06:35:55 PST 2020


Author: Matt Arsenault
Date: 2020-02-18T06:35:40-08:00
New Revision: 37c452a2895071dac1782668bfcd884951ec2aa5

URL: https://github.com/llvm/llvm-project/commit/37c452a2895071dac1782668bfcd884951ec2aa5
DIFF: https://github.com/llvm/llvm-project/commit/37c452a2895071dac1782668bfcd884951ec2aa5.diff

LOG: AMDGPU/GlobalISel: Adjust branch target when lowering loop intrinsic

This needs to steal the branch target like the other control flow
intrinsics.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 53ce268d5e4a..8443b1515cf8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3575,11 +3575,18 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
 
       B.setInstr(*BrCond);
 
-      // FIXME: Need to adjust branch targets based on unconditional branch.
+      MachineBasicBlock *BrTarget = BrCond->getOperand(1).getMBB();
+      if (Br)
+        BrTarget = Br->getOperand(0).getMBB();
+
       Register Reg = MI.getOperand(2).getReg();
       B.buildInstr(AMDGPU::SI_LOOP)
         .addUse(Reg)
-        .addMBB(BrCond->getOperand(1).getMBB());
+        .addMBB(BrTarget);
+
+      if (Br)
+        Br->getOperand(0).setMBB(BrCond->getOperand(1).getMBB());
+
       MI.eraseFromParent();
       BrCond->eraseFromParent();
       MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
index 9c8fc50f947d..34aca1e80f59 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
 
 ; Make sure the branch targets are correct after lowering llvm.amdgcn.if
 
@@ -198,3 +198,54 @@ bb11:
 bb12:
   ret void
 }
+
+define amdgpu_kernel void @break_loop(i32 %arg) {
+; CHECK-LABEL: break_loop:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_dword s2, s[4:5], 0x0
+; CHECK-NEXT:    s_mov_b64 s[0:1], 0
+; CHECK-NEXT:    ; implicit-def: $vgpr1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_subrev_u32_e32 v0, s2, v0
+; CHECK-NEXT:  BB5_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    v_add_u32_e32 v1, 1, v1
+; CHECK-NEXT:    v_cmp_le_i32_e32 vcc, 0, v1
+; CHECK-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, 1
+; CHECK-NEXT:    s_cbranch_vccnz BB5_3
+; CHECK-NEXT:  ; %bb.2: ; %bb4
+; CHECK-NEXT:    ; in Loop: Header=BB5_1 Depth=1
+; CHECK-NEXT:    global_load_dword v2, v[0:1], off
+; CHECK-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, 1
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc, v0, v2
+; CHECK-NEXT:    s_xor_b64 s[2:3], vcc, s[2:3]
+; CHECK-NEXT:  BB5_3: ; %Flow
+; CHECK-NEXT:    ; in Loop: Header=BB5_1 Depth=1
+; CHECK-NEXT:    s_and_b64 s[2:3], exec, s[2:3]
+; CHECK-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; CHECK-NEXT:    s_andn2_b64 exec, exec, s[0:1]
+; CHECK-NEXT:    s_cbranch_execnz BB5_1
+; CHECK-NEXT:  ; %bb.4: ; %bb9
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp = sub i32 %id, %arg
+  br label %bb1
+
+bb1:
+  %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %bb4 ]
+  %lsr.iv.next = add i32 %lsr.iv, 1
+  %cmp0 = icmp slt i32 %lsr.iv.next, 0
+  br i1 %cmp0, label %bb4, label %bb9
+
+bb4:
+  %load = load volatile i32, i32 addrspace(1)* undef, align 4
+  %cmp1 = icmp slt i32 %tmp, %load
+  br i1 %cmp1, label %bb1, label %bb9
+
+bb9:
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()


        


More information about the llvm-commits mailing list