[llvm] [AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: use BranchProbability and TargetSchedmodel (PR #109818)
Juan Manuel Martinez CaamaƱo via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 25 07:59:14 PDT 2024
https://github.com/jmmartinez updated https://github.com/llvm/llvm-project/pull/109818
>From d02e468dd0144903bad89ea81ada14f7b8b82c5a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Fri, 20 Sep 2024 14:24:37 +0200
Subject: [PATCH] [AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: use
BranchProbability and TargetSchedModel
Remove s_cbranch_execnz branches if the transformation is
profitable according to BranchProbability and TargetSchedmodel.
---
llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp | 111 +++++-
.../AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll | 18 +-
.../AMDGPU/GlobalISel/mul-known-bits.i64.ll | 12 +-
.../AMDGPU/amdgpu-demote-scc-branches.ll | 9 +-
.../atomic_optimizations_local_pointer.ll | 318 ++++++------------
llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll | 6 +-
.../CodeGen/AMDGPU/branch-condition-and.ll | 46 ++-
.../dagcombine-v1i8-extractvecelt-crash.ll | 3 +-
llvm/test/CodeGen/AMDGPU/else.ll | 1 -
.../CodeGen/AMDGPU/flat-atomicrmw-fadd.ll | 9 +-
llvm/test/CodeGen/AMDGPU/fptoi.i128.ll | 6 +-
.../CodeGen/AMDGPU/indirect-addressing-si.ll | 3 +-
.../AMDGPU/insert-skips-flat-vmem-ds.mir | 4 +-
.../CodeGen/AMDGPU/insert-skips-gfx10.mir | 20 +-
.../CodeGen/AMDGPU/insert-skips-gfx12.mir | 40 +--
.../insert_waitcnt_for_precise_memory.ll | 18 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 36 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 36 +-
.../CodeGen/AMDGPU/local-atomicrmw-fadd.ll | 101 ++----
llvm/test/CodeGen/AMDGPU/ret_jump.ll | 1 -
llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll | 6 +-
llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll | 6 +-
.../si-unify-exit-return-unreachable.ll | 6 +-
.../CodeGen/AMDGPU/uniform-phi-with-undef.ll | 3 +-
.../AMDGPU/unstructured-cfg-def-use-issue.ll | 9 +-
...dgpu_generated_funcs.ll.generated.expected | 3 +-
26 files changed, 349 insertions(+), 482 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 1334029544f999..7c4ccaaff497b5 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -15,6 +15,8 @@
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/BranchProbability.h"
using namespace llvm;
@@ -41,7 +43,8 @@ class SIPreEmitPeephole : public MachineFunctionPass {
MachineBasicBlock *&TrueMBB,
MachineBasicBlock *&FalseMBB,
SmallVectorImpl<MachineOperand> &Cond);
- bool mustRetainExeczBranch(const MachineBasicBlock &From,
+ bool mustRetainExeczBranch(const MachineBasicBlock &Head,
+ const MachineBasicBlock &From,
const MachineBasicBlock &To) const;
bool removeExeczBranch(MachineInstr &MI, MachineBasicBlock &SrcMBB);
@@ -304,11 +307,95 @@ bool SIPreEmitPeephole::getBlockDestinations(
return true;
}
-bool SIPreEmitPeephole::mustRetainExeczBranch(
- const MachineBasicBlock &From, const MachineBasicBlock &To) const {
+namespace {
+class CostModelBase {
+public:
+ virtual bool isProfitable(const MachineInstr &MI) = 0;
+ virtual ~CostModelBase() = default;
+ static std::unique_ptr<CostModelBase> Create(const MachineBasicBlock &MBB,
+ const MachineBasicBlock &,
+ const SIInstrInfo &TII);
+};
+
+class TrivialCostModel : public CostModelBase {
+ friend CostModelBase;
+
unsigned NumInstr = 0;
- const MachineFunction *MF = From.getParent();
+ const SIInstrInfo &TII;
+
+ TrivialCostModel(const SIInstrInfo &TII) : TII(TII) {}
+
+public:
+ bool isProfitable(const MachineInstr &MI) override {
+ ++NumInstr;
+ if (NumInstr >= SkipThreshold)
+ return false;
+ // These instructions are potentially expensive even if EXEC = 0.
+ if (TII.isSMRD(MI) || TII.isVMEM(MI) || TII.isFLAT(MI) || TII.isDS(MI) ||
+ TII.isWaitcnt(MI.getOpcode()))
+ return false;
+ return true;
+ }
+ ~TrivialCostModel() override = default;
+};
+
+class BranchWeightCostModel : public CostModelBase {
+ friend CostModelBase;
+ BranchProbability BranchProb;
+ const TargetSchedModel &SchedModel;
+ uint64_t BranchCost;
+ uint64_t ThenCyclesCost = 0;
+
+ BranchWeightCostModel(const MachineInstr &Branch, const BranchProbability &BP,
+ const TargetSchedModel &SchedModel)
+ : BranchProb(BP), SchedModel(SchedModel) {
+ assert(!BP.isUnknown());
+ BranchCost = SchedModel.computeInstrLatency(&Branch, false);
+ }
+
+public:
+ bool isProfitable(const MachineInstr &MI) override {
+ ThenCyclesCost += SchedModel.computeInstrLatency(&MI, false);
+
+ // Consider `P = N/D` to be the probability of execnz being true
+ // The transformation is profitable if always executing the 'then' block
+ // is cheaper than executing sometimes 'then' and always
+ // executing s_cbranch_execnz:
+ // * ThenCost <= P*ThenCost + BranchCost
+ // * (1-P) * ThenCost <= BranchCost
+ // * (D-N)/D * ThenCost <= BranchCost
+ uint64_t Numerator = BranchProb.getNumerator();
+ uint64_t Denominator = BranchProb.getDenominator();
+ return (Denominator - Numerator) * ThenCyclesCost <=
+ Denominator * BranchCost;
+ }
+ ~BranchWeightCostModel() override = default;
+};
+
+std::unique_ptr<CostModelBase>
+CostModelBase::Create(const MachineBasicBlock &Head,
+ const MachineBasicBlock &Succ, const SIInstrInfo &TII) {
+ const auto *FromIt = find(Head.successors(), &Succ);
+ assert(FromIt != Head.succ_end());
+
+ BranchProbability ExecNZProb = Head.getSuccProbability(FromIt);
+ const auto &SchedModel = TII.getSchedModel();
+ if (!ExecNZProb.isUnknown()) {
+ return std::unique_ptr<CostModelBase>(new BranchWeightCostModel(
+ *Head.getFirstTerminator(), ExecNZProb, SchedModel));
+ }
+
+ return std::unique_ptr<CostModelBase>(new TrivialCostModel(TII));
+}
+
+bool SIPreEmitPeephole::mustRetainExeczBranch(
+ const MachineBasicBlock &Head, const MachineBasicBlock &From,
+ const MachineBasicBlock &To) const {
+
+ auto CostModel = CostModelBase::Create(Head, From, *TII);
+
+ const MachineFunction *MF = From.getParent();
for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
MBBI != End && MBBI != ToI; ++MBBI) {
const MachineBasicBlock &MBB = *MBBI;
@@ -326,19 +413,14 @@ bool SIPreEmitPeephole::mustRetainExeczBranch(
if (TII->hasUnwantedEffectsWhenEXECEmpty(MI))
return true;
- // These instructions are potentially expensive even if EXEC = 0.
- if (TII->isSMRD(MI) || TII->isVMEM(MI) || TII->isFLAT(MI) ||
- TII->isDS(MI) || TII->isWaitcnt(MI.getOpcode()))
- return true;
-
- ++NumInstr;
- if (NumInstr >= SkipThreshold)
+ if (!CostModel->isProfitable(MI))
return true;
}
}
return false;
}
+} // namespace
// Returns true if the skip branch instruction is removed.
bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
@@ -351,8 +433,11 @@ bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
return false;
// Consider only the forward branches.
- if ((SrcMBB.getNumber() >= TrueMBB->getNumber()) ||
- mustRetainExeczBranch(*FalseMBB, *TrueMBB))
+ if (SrcMBB.getNumber() >= TrueMBB->getNumber())
+ return false;
+
+ // Consider only when it is legal and profitable
+ if (mustRetainExeczBranch(SrcMBB, *FalseMBB, *TrueMBB))
return false;
LLVM_DEBUG(dbgs() << "Removing the execz branch: " << MI);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index eb39ca2d7daa7f..45a45d125a5ea0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -1726,7 +1726,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB59_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1736,7 +1735,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB59_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
@@ -1747,7 +1746,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB59_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1757,7 +1755,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB59_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -1773,7 +1771,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB60_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1783,7 +1780,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB60_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
@@ -1794,7 +1791,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB60_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1804,7 +1800,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB60_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -1820,7 +1816,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB61_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1830,7 +1825,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB61_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
@@ -1841,7 +1836,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB61_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1851,7 +1845,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB61_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
index 489f46d1237a36..cd656075efaf95 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
@@ -526,21 +526,19 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX10-NEXT: v_cmp_ge_u64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execz .LBB10_2
; GFX10-NEXT: ; %bb.1: ; %else
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], s1, v2, v4, 0
; GFX10-NEXT: v_mad_u64_u32 v[1:2], s1, v2, v5, v[1:2]
; GFX10-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX10-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX10-NEXT: .LBB10_2: ; %Flow
+; GFX10-NEXT: ; %bb.2: ; %Flow
; GFX10-NEXT: s_andn2_saveexec_b32 s0, s0
-; GFX10-NEXT: s_cbranch_execz .LBB10_4
; GFX10-NEXT: ; %bb.3: ; %if
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_mul_lo_u32 v1, v2, v5
; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: .LBB10_4: ; %endif
+; GFX10-NEXT: ; %bb.4: ; %endif
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
@@ -563,7 +561,6 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX11-NEXT: s_waitcnt vmcnt(1)
; GFX11-NEXT: v_cmpx_ge_u64_e32 0, v[2:3]
; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB10_2
; GFX11-NEXT: ; %bb.1: ; %else
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, v4, 0
@@ -572,14 +569,13 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX11-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX11-NEXT: v_mov_b32_e32 v1, v3
; GFX11-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX11-NEXT: .LBB10_2: ; %Flow
+; GFX11-NEXT: ; %bb.2: ; %Flow
; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB10_4
; GFX11-NEXT: ; %bb.3: ; %if
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mul_lo_u32 v1, v2, v5
; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: .LBB10_4: ; %endif
+; GFX11-NEXT: ; %bb.4: ; %endif
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
index 9319f0d3f5d40f..1c5b2cefc96f36 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
@@ -292,7 +292,6 @@ define void @divergent_br_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_lt_i32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %if.then
; GFX9-NEXT: s_mov_b32 s11, s18
; GFX9-NEXT: s_mov_b32 s10, s17
@@ -301,7 +300,7 @@ define void @divergent_br_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s19
; GFX9-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
-; GFX9-NEXT: .LBB5_2: ; %if.end
+; GFX9-NEXT: ; %bb.2: ; %if.end
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -311,7 +310,6 @@ define void @divergent_br_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_lt_i32_e32 vcc_lo, 0, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
-; GFX1010-NEXT: s_cbranch_execz .LBB5_2
; GFX1010-NEXT: ; %bb.1: ; %if.then
; GFX1010-NEXT: v_mov_b32_e32 v0, s6
; GFX1010-NEXT: v_mov_b32_e32 v1, s19
@@ -320,7 +318,7 @@ define void @divergent_br_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1010-NEXT: s_mov_b32 s9, s16
; GFX1010-NEXT: s_mov_b32 s8, s7
; GFX1010-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
-; GFX1010-NEXT: .LBB5_2: ; %if.end
+; GFX1010-NEXT: ; %bb.2: ; %if.end
; GFX1010-NEXT: s_waitcnt_depctr 0xffe3
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -331,7 +329,6 @@ define void @divergent_br_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_lt_i32_e32 0, v0
-; GFX1030-NEXT: s_cbranch_execz .LBB5_2
; GFX1030-NEXT: ; %bb.1: ; %if.then
; GFX1030-NEXT: v_mov_b32_e32 v0, s6
; GFX1030-NEXT: v_mov_b32_e32 v1, s19
@@ -340,7 +337,7 @@ define void @divergent_br_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1030-NEXT: s_mov_b32 s9, s16
; GFX1030-NEXT: s_mov_b32 s8, s7
; GFX1030-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
-; GFX1030-NEXT: .LBB5_2: ; %if.end
+; GFX1030-NEXT: ; %bb.2: ; %if.end
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index ce90fbed813103..714679c7df8f9f 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -58,7 +58,6 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB0_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -67,7 +66,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB0_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
@@ -86,7 +85,6 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB0_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -94,7 +92,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB0_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
@@ -274,7 +272,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB1_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -284,7 +281,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB1_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -305,7 +302,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB1_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -314,7 +310,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB1_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -528,14 +524,13 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB2_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB2_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -569,13 +564,12 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB2_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_add_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB2_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -802,13 +796,12 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB2_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_rtn_u32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB2_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -847,12 +840,11 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB2_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_add_rtn_u32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB2_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -1127,14 +1119,13 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX8_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB3_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_u32 v0, v1
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB3_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_endpgm
;
; GFX9_ITERATIVE-LABEL: add_i32_varying_nouse:
@@ -1156,13 +1147,12 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX9_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB3_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX9_ITERATIVE-NEXT: ds_add_u32 v0, v1
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB3_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_endpgm
;
; GFX1064_ITERATIVE-LABEL: add_i32_varying_nouse:
@@ -1318,13 +1308,12 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX8_DPP-NEXT: s_mov_b32 s0, s2
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB3_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_u32 v2, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB3_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_endpgm
;
; GFX9_DPP-LABEL: add_i32_varying_nouse:
@@ -1351,12 +1340,11 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX9_DPP-NEXT: s_mov_b32 s0, s2
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB3_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX9_DPP-NEXT: ds_add_u32 v2, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB3_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_endpgm
;
; GFX1064_DPP-LABEL: add_i32_varying_nouse:
@@ -1528,7 +1516,6 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB4_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -1537,7 +1524,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u64 v[0:1], v1, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB4_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s2, v1
@@ -1560,7 +1547,6 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB4_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -1568,7 +1554,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: ds_add_rtn_u64 v[0:1], v1, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB4_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s2, v1
@@ -1806,7 +1792,6 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB5_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[8:9]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -1819,7 +1804,7 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX9-NEXT: v_mov_b32_e32 v3, 0
; GFX9-NEXT: ds_add_rtn_u64 v[0:1], v3, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB5_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s0, v1
; GFX9-NEXT: v_readfirstlane_b32 s1, v0
@@ -2071,7 +2056,6 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB6_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -2079,7 +2063,7 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB6_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -2120,14 +2104,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB6_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_add_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB6_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -2423,14 +2406,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB6_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB6_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -2510,13 +2492,12 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB6_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_add_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB6_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -2966,7 +2947,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB7_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, s0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, 0
@@ -2974,7 +2954,7 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_u64 v2, v[0:1]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB7_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_endpgm
;
; GFX9_ITERATIVE-LABEL: add_i64_varying_nouse:
@@ -2999,14 +2979,13 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX9_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB7_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, s0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v1, s1
; GFX9_ITERATIVE-NEXT: ds_add_u64 v2, v[0:1]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB7_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_endpgm
;
; GFX1064_ITERATIVE-LABEL: add_i64_varying_nouse:
@@ -3214,14 +3193,13 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_DPP-NEXT: s_mov_b64 s[0:1], s[2:3]
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB7_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v9, s1
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s0
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_u64 v7, v[8:9]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB7_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_endpgm
;
; GFX9_DPP-LABEL: add_i64_varying_nouse:
@@ -3283,13 +3261,12 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX9_DPP-NEXT: s_mov_b64 s[0:1], s[2:3]
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB7_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v9, s1
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s0
; GFX9_DPP-NEXT: ds_add_u64 v7, v[8:9]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB7_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_endpgm
;
; GFX1064_DPP-LABEL: add_i64_varying_nouse:
@@ -3558,7 +3535,6 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB8_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -3567,7 +3543,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB8_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
@@ -3587,7 +3563,6 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB8_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -3595,7 +3570,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB8_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
@@ -3780,7 +3755,6 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB9_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -3790,7 +3764,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB9_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -3811,7 +3785,6 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB9_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -3820,7 +3793,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB9_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -4036,14 +4009,13 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB10_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_sub_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB10_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -4077,13 +4049,12 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB10_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_sub_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB10_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -4310,13 +4281,12 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB10_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_sub_rtn_u32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB10_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -4355,12 +4325,11 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB10_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_sub_rtn_u32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB10_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -4635,14 +4604,13 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX8_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB11_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_sub_u32 v0, v1
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB11_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_endpgm
;
; GFX9_ITERATIVE-LABEL: sub_i32_varying_nouse:
@@ -4664,13 +4632,12 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX9_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB11_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX9_ITERATIVE-NEXT: ds_sub_u32 v0, v1
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB11_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_endpgm
;
; GFX1064_ITERATIVE-LABEL: sub_i32_varying_nouse:
@@ -4826,13 +4793,12 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX8_DPP-NEXT: s_mov_b32 s0, s2
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB11_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_sub_u32 v2, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB11_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_endpgm
;
; GFX9_DPP-LABEL: sub_i32_varying_nouse:
@@ -4859,12 +4825,11 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX9_DPP-NEXT: s_mov_b32 s0, s2
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB11_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX9_DPP-NEXT: ds_sub_u32 v2, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB11_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_endpgm
;
; GFX1064_DPP-LABEL: sub_i32_varying_nouse:
@@ -5036,7 +5001,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB12_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -5045,7 +5009,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u64 v[0:1], v1, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB12_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
@@ -5069,7 +5033,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB12_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -5077,7 +5040,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: ds_sub_rtn_u64 v[0:1], v1, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB12_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
@@ -5329,7 +5292,6 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB13_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[8:9]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -5342,7 +5304,7 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX9-NEXT: v_mov_b32_e32 v3, 0
; GFX9-NEXT: ds_sub_rtn_u64 v[0:1], v3, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB13_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s6, v2, 0
@@ -5605,7 +5567,6 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB14_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -5613,7 +5574,7 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_sub_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB14_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -5654,14 +5615,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB14_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_sub_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB14_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -5957,14 +5917,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB14_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_sub_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB14_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -6044,13 +6003,12 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB14_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_sub_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB14_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -6508,14 +6466,13 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB15_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_and_rtn_b32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB15_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -6549,13 +6506,12 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB15_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_and_rtn_b32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB15_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -6781,14 +6737,13 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB15_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_and_rtn_b32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB15_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -6826,13 +6781,12 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB15_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_and_rtn_b32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB15_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -7132,7 +7086,6 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB16_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -7140,7 +7093,7 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_and_rtn_b64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB16_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -7179,14 +7132,13 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB16_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_and_rtn_b64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB16_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -7443,14 +7395,13 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX8_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB16_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_and_rtn_b64 v[5:6], v7, v[5:6]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB16_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s0, v6
@@ -7498,13 +7449,12 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX9_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB16_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX9_DPP-NEXT: ds_and_rtn_b64 v[5:6], v7, v[5:6]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB16_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s0, v6
@@ -7885,14 +7835,13 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB17_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_or_rtn_b32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB17_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -7926,13 +7875,12 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB17_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_or_rtn_b32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB17_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -8159,13 +8107,12 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB17_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_or_rtn_b32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB17_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -8204,12 +8151,11 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB17_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_or_rtn_b32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB17_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -8509,7 +8455,6 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB18_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -8517,7 +8462,7 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_or_rtn_b64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB18_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -8556,14 +8501,13 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB18_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_or_rtn_b64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB18_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -8820,14 +8764,13 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX8_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB18_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_or_rtn_b64 v[5:6], v7, v[5:6]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB18_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -8876,13 +8819,12 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX9_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB18_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX9_DPP-NEXT: ds_or_rtn_b64 v[5:6], v7, v[5:6]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB18_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -9264,14 +9206,13 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB19_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_xor_rtn_b32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB19_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -9305,13 +9246,12 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB19_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_xor_rtn_b32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB19_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -9538,13 +9478,12 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB19_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_xor_rtn_b32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB19_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -9583,12 +9522,11 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB19_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_xor_rtn_b32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB19_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -9888,7 +9826,6 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB20_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -9896,7 +9833,7 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_xor_rtn_b64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB20_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -9935,14 +9872,13 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB20_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_xor_rtn_b64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB20_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -10199,14 +10135,13 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX8_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB20_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_xor_rtn_b64 v[5:6], v7, v[5:6]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB20_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -10255,13 +10190,12 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX9_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB20_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX9_DPP-NEXT: ds_xor_rtn_b64 v[5:6], v7, v[5:6]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB20_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -10643,14 +10577,13 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB21_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_i32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB21_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -10684,13 +10617,12 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB21_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_max_rtn_i32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB21_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -10916,14 +10848,13 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB21_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_i32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB21_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -10961,13 +10892,12 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB21_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_max_rtn_i32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB21_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -11234,7 +11164,6 @@ define amdgpu_kernel void @max_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB22_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -11242,7 +11171,7 @@ define amdgpu_kernel void @max_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_max_rtn_i64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB22_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: v_readfirstlane_b32 s4, v0
; GFX8-NEXT: v_bfrev_b32_e32 v0, 1
@@ -11268,14 +11197,13 @@ define amdgpu_kernel void @max_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB22_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_max_rtn_i64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB22_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: v_bfrev_b32_e32 v0, 1
@@ -11522,7 +11450,6 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB23_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -11530,7 +11457,7 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_i64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB23_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -11578,14 +11505,13 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB23_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_max_rtn_i64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB23_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -11907,14 +11833,13 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB23_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_i64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB23_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -12002,13 +11927,12 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB23_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_max_rtn_i64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB23_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -12514,14 +12438,13 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB24_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_i32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB24_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -12555,13 +12478,12 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB24_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_min_rtn_i32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB24_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -12787,14 +12709,13 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB24_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_i32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB24_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -12832,13 +12753,12 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB24_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_min_rtn_i32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB24_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -13105,7 +13025,6 @@ define amdgpu_kernel void @min_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB25_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -13113,7 +13032,7 @@ define amdgpu_kernel void @min_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_min_rtn_i64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB25_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: v_readfirstlane_b32 s4, v0
; GFX8-NEXT: v_bfrev_b32_e32 v0, -2
@@ -13139,14 +13058,13 @@ define amdgpu_kernel void @min_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB25_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_min_rtn_i64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB25_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: v_bfrev_b32_e32 v0, -2
@@ -13393,7 +13311,6 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB26_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -13401,7 +13318,7 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_i64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB26_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -13449,14 +13366,13 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB26_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_min_rtn_i64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB26_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -13777,14 +13693,13 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB26_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_i64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB26_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s0, v7
@@ -13870,13 +13785,12 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB26_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_min_rtn_i64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB26_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s0, v7
@@ -14377,14 +14291,13 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB27_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB27_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -14418,13 +14331,12 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB27_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_max_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB27_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -14651,13 +14563,12 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB27_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_u32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB27_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -14696,12 +14607,11 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB27_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_max_rtn_u32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB27_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -14967,7 +14877,6 @@ define amdgpu_kernel void @umax_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB28_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -14975,7 +14884,7 @@ define amdgpu_kernel void @umax_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_max_rtn_u64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s5, v1
@@ -15000,14 +14909,13 @@ define amdgpu_kernel void @umax_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB28_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_max_rtn_u64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB28_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
@@ -15251,7 +15159,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB29_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -15259,7 +15166,7 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB29_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -15306,14 +15213,13 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB29_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_max_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB29_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -15627,14 +15533,13 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB29_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB29_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -15718,13 +15623,12 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB29_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_max_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB29_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -16223,14 +16127,13 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB30_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB30_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -16264,13 +16167,12 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB30_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_min_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB30_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -16496,14 +16398,13 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB30_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_u32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB30_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -16541,13 +16442,12 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB30_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_min_rtn_u32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB30_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -16813,7 +16713,6 @@ define amdgpu_kernel void @umin_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB31_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -16821,7 +16720,7 @@ define amdgpu_kernel void @umin_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_min_rtn_u64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB31_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s5, v1
@@ -16846,14 +16745,13 @@ define amdgpu_kernel void @umin_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB31_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_min_rtn_u64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB31_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
@@ -17097,7 +16995,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB32_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -17105,7 +17002,7 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB32_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -17152,14 +17049,13 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB32_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_min_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB32_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -17473,14 +17369,13 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB32_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB32_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s0, v7
@@ -17563,13 +17458,12 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB32_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_min_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB32_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s0, v7
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
index 4f0bc512565d13..edec0eb79bca54 100644
--- a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
@@ -68,13 +68,12 @@ define float @syncscope_system(ptr %addr, float %val) #0 {
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB0_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB0_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB0_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -174,14 +173,13 @@ define float @syncscope_workgroup_rtn(ptr %addr, float %val) #0 {
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB1_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB1_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB1_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
diff --git a/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll b/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
index cc05129b1b2af6..e32cb494f7702a 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
@@ -1,5 +1,6 @@
-; RUN: llc -mtriple=amdgcn -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN-NO-FLAT %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN-FLAT %s
; This used to crash because during intermediate control flow lowering, there
; was a sequence
@@ -9,20 +10,35 @@
; s_mov_b64_term exec, s[2:3]
; that was not treated correctly.
;
-; GCN-LABEL: {{^}}ham:
-; GCN-DAG: v_cmp_lt_f32_e64 [[OTHERCC:s\[[0-9]+:[0-9]+\]]],
-; GCN-DAG: v_cmp_lt_f32_e32 vcc,
-; GCN: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], vcc, [[OTHERCC]]
-; GCN: s_and_saveexec_b64 [[SAVED:s\[[0-9]+:[0-9]+\]]], [[AND]]
-; GCN-NEXT: s_cbranch_execz .LBB0_{{[0-9]+}}
-
-; GCN-NEXT: ; %bb.{{[0-9]+}}: ; %bb4
-; GCN: ds_write_b32
-
-; GCN: .LBB0_{{[0-9]+}}: ; %UnifiedReturnBlock
-; GCN-NEXT: s_endpgm
-; GCN-NEXT: .Lfunc_end
define amdgpu_ps void @ham(float %arg, float %arg1) #0 {
+; GCN-NO-FLAT-LABEL: ham:
+; GCN-NO-FLAT: ; %bb.0: ; %bb
+; GCN-NO-FLAT-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GCN-NO-FLAT-NEXT: v_cmp_lt_f32_e64 s[0:1], 0, v1
+; GCN-NO-FLAT-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; GCN-NO-FLAT-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GCN-NO-FLAT-NEXT: s_cbranch_execz .LBB0_2
+; GCN-NO-FLAT-NEXT: ; %bb.1: ; %bb4
+; GCN-NO-FLAT-NEXT: v_mov_b32_e32 v0, 4
+; GCN-NO-FLAT-NEXT: s_mov_b32 m0, -1
+; GCN-NO-FLAT-NEXT: ds_write_b32 v0, v0
+; GCN-NO-FLAT-NEXT: ; divergent unreachable
+; GCN-NO-FLAT-NEXT: .LBB0_2: ; %UnifiedReturnBlock
+; GCN-NO-FLAT-NEXT: s_endpgm
+;
+; GCN-FLAT-LABEL: ham:
+; GCN-FLAT: ; %bb.0: ; %bb
+; GCN-FLAT-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GCN-FLAT-NEXT: v_cmp_lt_f32_e64 s[0:1], 0, v1
+; GCN-FLAT-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; GCN-FLAT-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GCN-FLAT-NEXT: ; %bb.1: ; %bb4
+; GCN-FLAT-NEXT: v_mov_b32_e32 v0, 4
+; GCN-FLAT-NEXT: s_mov_b32 m0, -1
+; GCN-FLAT-NEXT: ds_write_b32 v0, v0
+; GCN-FLAT-NEXT: ; divergent unreachable
+; GCN-FLAT-NEXT: ; %bb.2: ; %UnifiedReturnBlock
+; GCN-FLAT-NEXT: s_endpgm
bb:
%tmp = fcmp ogt float %arg, 0.000000e+00
%tmp2 = fcmp ogt float %arg1, 0.000000e+00
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll
index eecc91239c7283..6f4b15ad473ca6 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll
@@ -9,11 +9,10 @@ define void @wombat(i1 %cond, ptr addrspace(5) %addr) {
; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; CHECK-NEXT: s_cbranch_execz .LBB0_2
; CHECK-NEXT: ; %bb.1: ; %then
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: .LBB0_2: ; %end
+; CHECK-NEXT: ; %bb.2: ; %end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_store_byte v2, v1, s[0:3], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/else.ll b/llvm/test/CodeGen/AMDGPU/else.ll
index 655c5cd184a1ed..d3d4b860f9ac7f 100644
--- a/llvm/test/CodeGen/AMDGPU/else.ll
+++ b/llvm/test/CodeGen/AMDGPU/else.ll
@@ -30,7 +30,6 @@ end:
; CHECK-NEXT: s_and_b64 exec, exec, [[INIT_EXEC]]
; CHECK-NEXT: s_and_b64 [[AND_INIT:s\[[0-9]+:[0-9]+\]]], exec, [[DST]]
; CHECK-NEXT: s_xor_b64 exec, exec, [[AND_INIT]]
-; CHECK-NEXT: s_cbranch_execz
define amdgpu_ps void @else_execfix_leave_wqm(i32 %z, float %v) #0 {
main_body:
%cc = icmp sgt i32 %z, 5
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
index 1ae1204e3cde18..ecd020dd1a42ca 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
@@ -108,13 +108,12 @@ define float @flat_agent_atomic_fadd_ret_f32__amdgpu_no_fine_grained_memory__amd
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB0_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB0_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB0_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -2428,13 +2427,12 @@ define float @flat_agent_atomic_fadd_ret_f32__ftz__amdgpu_no_fine_grained_memory
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB12_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB12_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB12_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -5099,13 +5097,12 @@ define float @flat_agent_atomic_fadd_ret_f32__amdgpu_no_fine_grained_memory_amdg
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB26_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB26_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB26_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 667a3f398c08a2..6d00da2a0a9fac 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -1744,7 +1744,6 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: ; implicit-def: $vgpr9
; GISEL-NEXT: .LBB6_4: ; %Flow
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
-; GISEL-NEXT: s_cbranch_execz .LBB6_6
; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x86, v5
; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
@@ -1758,7 +1757,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v9
; GISEL-NEXT: v_mov_b32_e32 v3, v2
-; GISEL-NEXT: .LBB6_6: ; %Flow1
+; GISEL-NEXT: ; %bb.6: ; %Flow1
; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
; GISEL-NEXT: .LBB6_7: ; %Flow2
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
@@ -2095,7 +2094,6 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: ; implicit-def: $vgpr9
; GISEL-NEXT: .LBB7_4: ; %Flow
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
-; GISEL-NEXT: s_cbranch_execz .LBB7_6
; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x86, v5
; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
@@ -2109,7 +2107,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v9
; GISEL-NEXT: v_mov_b32_e32 v3, v2
-; GISEL-NEXT: .LBB7_6: ; %Flow1
+; GISEL-NEXT: ; %bb.6: ; %Flow1
; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
; GISEL-NEXT: .LBB7_7: ; %Flow2
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 60946956547a7c..da19e09a88545c 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -5812,11 +5812,10 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1)
; GENERIC-NEXT: s_waitcnt vmcnt(0)
; GENERIC-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GENERIC-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GENERIC-NEXT: s_cbranch_execz .LBB17_2
; GENERIC-NEXT: ; %bb.1: ; %bb1
; GENERIC-NEXT: buffer_store_dword v1, off, s[20:23], 0
; GENERIC-NEXT: s_waitcnt vmcnt(0)
-; GENERIC-NEXT: .LBB17_2: ; %bb2
+; GENERIC-NEXT: ; %bb.2: ; %bb2
; GENERIC-NEXT: s_endpgm
;
; NOOPT-LABEL: insert_vgpr_offset_multiple_in_block:
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir
index da38929fab9907..432355397c75a1 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir
@@ -69,9 +69,7 @@ name: skip_execz_ds
body: |
; CHECK-LABEL: name: skip_execz_ds
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
index b4ed3cafbacb5f..a2f01ce9f371e0 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
@@ -6,9 +6,7 @@ name: skip_waitcnt_vscnt
body: |
; CHECK-LABEL: name: skip_waitcnt_vscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -36,9 +34,7 @@ name: skip_waitcnt_expcnt
body: |
; CHECK-LABEL: name: skip_waitcnt_expcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -66,9 +62,7 @@ name: skip_waitcnt_vmcnt
body: |
; CHECK-LABEL: name: skip_waitcnt_vmcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -96,9 +90,7 @@ name: skip_waitcnt_lgkmcnt
body: |
; CHECK-LABEL: name: skip_waitcnt_lgkmcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -126,9 +118,7 @@ name: skip_wait_idle
body: |
; CHECK-LABEL: name: skip_wait_idle
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir
index 2d092974ac566f..eabea70987d459 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir
@@ -6,9 +6,7 @@ name: skip_wait_loadcnt
body: |
; CHECK-LABEL: name: skip_wait_loadcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -36,9 +34,7 @@ name: skip_wait_loadcnt_dscnt
body: |
; CHECK-LABEL: name: skip_wait_loadcnt_dscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -66,9 +62,7 @@ name: skip_wait_storecnt
body: |
; CHECK-LABEL: name: skip_wait_storecnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -96,9 +90,7 @@ name: skip_wait_storecnt_dscnt
body: |
; CHECK-LABEL: name: skip_wait_storecnt_dscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -126,9 +118,7 @@ name: skip_wait_samplecnt
body: |
; CHECK-LABEL: name: skip_wait_samplecnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -156,9 +146,7 @@ name: skip_wait_bvhcnt
body: |
; CHECK-LABEL: name: skip_wait_bvhcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -186,9 +174,7 @@ name: skip_wait_expcnt
body: |
; CHECK-LABEL: name: skip_wait_expcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -216,9 +202,7 @@ name: skip_wait_dscnt
body: |
; CHECK-LABEL: name: skip_wait_dscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -246,9 +230,7 @@ name: skip_wait_kmcnt
body: |
; CHECK-LABEL: name: skip_wait_kmcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -276,9 +258,7 @@ name: skip_wait_idle
body: |
; CHECK-LABEL: name: skip_wait_idle
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll b/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
index 0045082eedb0a3..b38785ca178614 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
@@ -698,7 +698,6 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB5_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -708,7 +707,7 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: ds_add_u32 v0, v1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB5_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_endpgm
;
; GFX90A-LABEL: atomic_add_local:
@@ -718,7 +717,6 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB5_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -728,7 +726,7 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX90A-NEXT: v_mov_b32_e32 v0, s2
; GFX90A-NEXT: ds_add_u32 v0, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB5_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX10-LABEL: atomic_add_local:
@@ -758,7 +756,6 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-FLATSCR-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
; GFX9-FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-FLATSCR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-FLATSCR-NEXT: s_cbranch_execz .LBB5_2
; GFX9-FLATSCR-NEXT: ; %bb.1:
; GFX9-FLATSCR-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
@@ -768,7 +765,7 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v0, s2
; GFX9-FLATSCR-NEXT: ds_add_u32 v0, v1
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-FLATSCR-NEXT: .LBB5_2:
+; GFX9-FLATSCR-NEXT: ; %bb.2:
; GFX9-FLATSCR-NEXT: s_endpgm
;
; GFX11-LABEL: atomic_add_local:
@@ -900,7 +897,6 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB7_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -910,7 +906,7 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB7_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -929,7 +925,6 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: ; implicit-def: $vgpr1
; GFX90A-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB7_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -939,7 +934,7 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB7_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -988,7 +983,6 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-FLATSCR-NEXT: ; implicit-def: $vgpr1
; GFX9-FLATSCR-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-FLATSCR-NEXT: s_cbranch_execz .LBB7_2
; GFX9-FLATSCR-NEXT: ; %bb.1:
; GFX9-FLATSCR-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
@@ -998,7 +992,7 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v1, s6
; GFX9-FLATSCR-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-FLATSCR-NEXT: .LBB7_2:
+; GFX9-FLATSCR-NEXT: ; %bb.2:
; GFX9-FLATSCR-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 7371d498a70706..31ed8aa420c179 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -539,11 +539,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -576,13 +575,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8GISEL-NEXT: ; implicit-def: $vgpr0
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mov_b32 s6, s4
-; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8GISEL-NEXT: ; %bb.2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX8GISEL-NEXT: ; %bb.3: ; %if
@@ -611,11 +609,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -647,13 +644,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9GISEL-NEXT: ; implicit-def: $vgpr0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_mov_b32 s6, s4
-; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9GISEL-NEXT: ; %bb.2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX9GISEL-NEXT: ; %bb.3: ; %if
@@ -681,11 +677,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -717,13 +712,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
@@ -751,11 +745,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr1
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_load_dword s1, s[2:3], 0x2c
; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -787,13 +780,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr0
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_load_dword s0, s[2:3], 0x2c
; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s1, s1
; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
@@ -823,11 +815,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -864,13 +855,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[0:1], s[0:1]
; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
@@ -903,11 +893,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_load_b32 s1, s[2:3], 0x2c
; GFX1132DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -944,13 +933,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_load_b32 s0, s[2:3], 0x2c
; GFX1132GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s1, s1
; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index 60af21524a04a1..5939e26898f8e9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -540,11 +540,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -577,13 +576,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8GISEL-NEXT: ; implicit-def: $vgpr0
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mov_b32 s6, s4
-; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8GISEL-NEXT: ; %bb.2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX8GISEL-NEXT: ; %bb.3: ; %if
@@ -612,11 +610,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -648,13 +645,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9GISEL-NEXT: ; implicit-def: $vgpr0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_mov_b32 s6, s4
-; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9GISEL-NEXT: ; %bb.2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX9GISEL-NEXT: ; %bb.3: ; %if
@@ -682,11 +678,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -718,13 +713,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
@@ -752,11 +746,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr1
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_load_dword s1, s[2:3], 0x2c
; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -788,13 +781,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr0
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_load_dword s0, s[2:3], 0x2c
; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s1, s1
; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
@@ -824,11 +816,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -865,13 +856,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[0:1], s[0:1]
; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
@@ -904,11 +894,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_load_b32 s1, s[2:3], 0x2c
; GFX1132DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -945,13 +934,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_load_b32 s0, s[2:3], 0x2c
; GFX1132GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s1, s1
; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 295ae94902da73..7dd8357642bbe7 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -7149,7 +7149,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: s_add_i32 s5, s5, 4
; GFX940-NEXT: ; implicit-def: $vgpr1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB28_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: s_lshl_b32 s8, s5, 3
@@ -7158,7 +7157,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mov_b32_e32 v2, s8
; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: s_mov_b64 s[8:9], exec
; GFX940-NEXT: v_readfirstlane_b32 s10, v1
@@ -7166,7 +7165,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX940-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB28_4
; GFX940-NEXT: ; %bb.3:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7175,7 +7173,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f32 v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_4:
+; GFX940-NEXT: ; %bb.4:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7204,12 +7202,11 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX940-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB28_8
; GFX940-NEXT: ; %bb.7:
; GFX940-NEXT: v_mov_b32_e32 v2, s4
; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_8:
+; GFX940-NEXT: ; %bb.8:
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX940-NEXT: v_readfirstlane_b32 s2, v2
@@ -7411,7 +7408,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: s_add_i32 s5, s5, 4
; GFX90A-NEXT: ; implicit-def: $vgpr1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB28_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX90A-NEXT: s_lshl_b32 s8, s5, 3
@@ -7420,7 +7416,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[8:9], exec
; GFX90A-NEXT: v_readfirstlane_b32 s10, v1
@@ -7428,7 +7424,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX90A-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB28_4
; GFX90A-NEXT: ; %bb.3:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7437,7 +7432,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f32 v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_4:
+; GFX90A-NEXT: ; %bb.4:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7466,12 +7461,11 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX90A-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB28_8
; GFX90A-NEXT: ; %bb.7:
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_8:
+; GFX90A-NEXT: ; %bb.8:
; GFX90A-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX90A-NEXT: v_readfirstlane_b32 s2, v2
@@ -7494,7 +7488,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: s_add_i32 s5, s5, 4
; GFX908-NEXT: ; implicit-def: $vgpr1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB28_2
; GFX908-NEXT: ; %bb.1:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX908-NEXT: s_lshl_b32 s8, s5, 3
@@ -7503,7 +7496,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mov_b32_e32 v2, s8
; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_2:
+; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_mov_b64 s[8:9], exec
; GFX908-NEXT: v_readfirstlane_b32 s10, v1
@@ -7511,7 +7504,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX908-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB28_4
; GFX908-NEXT: ; %bb.3:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7520,7 +7512,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mov_b32_e32 v2, s0
; GFX908-NEXT: ds_add_f32 v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_4:
+; GFX908-NEXT: ; %bb.4:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7549,12 +7541,11 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX908-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB28_8
; GFX908-NEXT: ; %bb.7:
; GFX908-NEXT: v_mov_b32_e32 v2, s4
; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_8:
+; GFX908-NEXT: ; %bb.8:
; GFX908-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX908-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX908-NEXT: v_readfirstlane_b32 s2, v2
@@ -7578,7 +7569,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB28_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX8-NEXT: s_lshl_b32 s8, s5, 3
@@ -7587,7 +7577,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_mov_b64 s[8:9], exec
; GFX8-NEXT: v_readfirstlane_b32 s10, v1
@@ -7595,7 +7585,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB28_4
; GFX8-NEXT: ; %bb.3:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7604,7 +7593,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: ds_add_f32 v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_4:
+; GFX8-NEXT: ; %bb.4:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7633,13 +7622,12 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB28_8
; GFX8-NEXT: ; %bb.7:
; GFX8-NEXT: v_mov_b32_e32 v2, s4
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_8:
+; GFX8-NEXT: ; %bb.8:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX8-NEXT: v_readfirstlane_b32 s2, v2
@@ -7923,7 +7911,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_add_co_i32 s1, s5, 4
; GFX12-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX12-NEXT: s_cbranch_execz .LBB29_2
; GFX12-NEXT: ; %bb.1:
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_bcnt1_i32_b32 s5, s6
@@ -7933,7 +7920,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX12-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX12-NEXT: .LBB29_2:
+; GFX12-NEXT: ; %bb.2:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_mov_b32 s7, exec_lo
@@ -7944,7 +7931,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_mov_b32 s6, exec_lo
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX12-NEXT: s_cbranch_execz .LBB29_4
; GFX12-NEXT: ; %bb.3:
; GFX12-NEXT: s_bcnt1_i32_b32 s0, s7
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
@@ -7953,8 +7939,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX12-NEXT: ds_add_f32 v2, v1
-; GFX12-NEXT: .LBB29_4:
-; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: ; %bb.4:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX12-NEXT: s_mov_b32 s1, exec_lo
@@ -7988,11 +7973,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execz .LBB29_8
; GFX12-NEXT: ; %bb.7:
; GFX12-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
-; GFX12-NEXT: .LBB29_8:
+; GFX12-NEXT: ; %bb.8:
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX12-NEXT: s_load_b64 s[0:1], s[2:3], 0x0
@@ -8018,7 +8002,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: s_add_i32 s5, s5, 4
; GFX940-NEXT: ; implicit-def: $vgpr1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB29_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: s_lshl_b32 s8, s5, 3
@@ -8026,7 +8009,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX940-NEXT: v_mov_b32_e32 v2, s8
; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX940-NEXT: .LBB29_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: s_mov_b64 s[8:9], exec
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
@@ -8035,7 +8018,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX940-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB29_4
; GFX940-NEXT: ; %bb.3:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8043,7 +8025,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f32 v2, v1
-; GFX940-NEXT: .LBB29_4:
+; GFX940-NEXT: ; %bb.4:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8072,11 +8054,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX940-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB29_8
; GFX940-NEXT: ; %bb.7:
; GFX940-NEXT: v_mov_b32_e32 v2, s4
; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX940-NEXT: .LBB29_8:
+; GFX940-NEXT: ; %bb.8:
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
@@ -8100,7 +8081,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_i32 s1, s5, 4
; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
; GFX11-NEXT: ; %bb.1:
; GFX11-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -8109,7 +8089,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX11-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX11-NEXT: .LBB29_2:
+; GFX11-NEXT: ; %bb.2:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_mov_b32 s7, exec_lo
@@ -8118,7 +8098,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX11-NEXT: s_mov_b32 s6, exec_lo
; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX11-NEXT: s_cbranch_execz .LBB29_4
; GFX11-NEXT: ; %bb.3:
; GFX11-NEXT: s_bcnt1_i32_b32 s0, s7
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -8127,7 +8106,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX11-NEXT: ds_add_f32 v2, v1
-; GFX11-NEXT: .LBB29_4:
+; GFX11-NEXT: ; %bb.4:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX11-NEXT: v_bfrev_b32_e32 v1, 1
@@ -8159,11 +8138,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_8
; GFX11-NEXT: ; %bb.7:
; GFX11-NEXT: v_mov_b32_e32 v2, s4
; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX11-NEXT: .LBB29_8:
+; GFX11-NEXT: ; %bb.8:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
@@ -8186,7 +8164,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_i32 s1, s5, 4
; GFX10-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX10-NEXT: s_cbranch_execz .LBB29_2
; GFX10-NEXT: ; %bb.1:
; GFX10-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
@@ -8194,7 +8171,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mov_b32_e32 v2, s5
; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX10-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX10-NEXT: .LBB29_2:
+; GFX10-NEXT: ; %bb.2:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_mov_b32 s7, exec_lo
@@ -8203,7 +8180,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_and_saveexec_b32 s6, s0
-; GFX10-NEXT: s_cbranch_execz .LBB29_4
; GFX10-NEXT: ; %bb.3:
; GFX10-NEXT: s_bcnt1_i32_b32 s0, s7
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8211,7 +8187,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX10-NEXT: ds_add_f32 v2, v1
-; GFX10-NEXT: .LBB29_4:
+; GFX10-NEXT: ; %bb.4:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
@@ -8238,11 +8214,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execz .LBB29_8
; GFX10-NEXT: ; %bb.7:
; GFX10-NEXT: v_mov_b32_e32 v2, s4
; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX10-NEXT: .LBB29_8:
+; GFX10-NEXT: ; %bb.8:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
@@ -8251,7 +8226,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: v_add_f32_e32 v0, s2, v0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo
-; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -8266,7 +8240,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: s_add_i32 s5, s5, 4
; GFX90A-NEXT: ; implicit-def: $vgpr1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB29_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX90A-NEXT: s_lshl_b32 s8, s5, 3
@@ -8274,7 +8247,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX90A-NEXT: .LBB29_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[8:9], exec
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -8283,7 +8256,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX90A-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB29_4
; GFX90A-NEXT: ; %bb.3:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8291,7 +8263,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f32 v2, v1
-; GFX90A-NEXT: .LBB29_4:
+; GFX90A-NEXT: ; %bb.4:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8320,11 +8292,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX90A-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB29_8
; GFX90A-NEXT: ; %bb.7:
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX90A-NEXT: .LBB29_8:
+; GFX90A-NEXT: ; %bb.8:
; GFX90A-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -8347,7 +8318,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: s_add_i32 s5, s5, 4
; GFX908-NEXT: ; implicit-def: $vgpr1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB29_2
; GFX908-NEXT: ; %bb.1:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX908-NEXT: s_lshl_b32 s8, s5, 3
@@ -8355,7 +8325,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX908-NEXT: v_mov_b32_e32 v2, s8
; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX908-NEXT: .LBB29_2:
+; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_mov_b64 s[8:9], exec
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
@@ -8364,7 +8334,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX908-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB29_4
; GFX908-NEXT: ; %bb.3:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8372,7 +8341,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX908-NEXT: v_mov_b32_e32 v2, s0
; GFX908-NEXT: ds_add_f32 v2, v1
-; GFX908-NEXT: .LBB29_4:
+; GFX908-NEXT: ; %bb.4:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8401,11 +8370,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX908-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB29_8
; GFX908-NEXT: ; %bb.7:
; GFX908-NEXT: v_mov_b32_e32 v2, s4
; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX908-NEXT: .LBB29_8:
+; GFX908-NEXT: ; %bb.8:
; GFX908-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX908-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
@@ -8429,7 +8397,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB29_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX8-NEXT: s_lshl_b32 s8, s5, 3
@@ -8437,7 +8404,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX8-NEXT: .LBB29_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_mov_b64 s[8:9], exec
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -8446,7 +8413,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB29_4
; GFX8-NEXT: ; %bb.3:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8454,7 +8420,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: ds_add_f32 v2, v1
-; GFX8-NEXT: .LBB29_4:
+; GFX8-NEXT: ; %bb.4:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8483,12 +8449,11 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB29_8
; GFX8-NEXT: ; %bb.7:
; GFX8-NEXT: v_mov_b32_e32 v2, s4
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX8-NEXT: .LBB29_8:
+; GFX8-NEXT: ; %bb.8:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/ret_jump.ll b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
index ad38d78ddb2ff1..66a55d9eb128c6 100644
--- a/llvm/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
@@ -65,7 +65,6 @@ ret.bb: ; preds = %else, %main_body
; GCN: .LBB{{[0-9]+_[0-9]+}}: ; %else
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
-; GCN-NEXT: s_cbranch_execz .LBB1_{{[0-9]+}}
; GCN-NEXT: ; %unreachable.bb
; GCN: ds_write_b32
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 0630cca7c099b8..01aa5a42ca5be8 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -114,20 +114,18 @@ define amdgpu_kernel void @sgpr_if_else_valu_br(ptr addrspace(1) %out, float %a,
; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; SI-NEXT: s_cbranch_execz .LBB2_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_i32 s8, s6, s7
-; SI-NEXT: .LBB2_2: ; %Flow
+; SI-NEXT: ; %bb.2: ; %Flow
; SI-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: s_xor_b64 exec, exec, s[0:1]
-; SI-NEXT: s_cbranch_execz .LBB2_4
; SI-NEXT: ; %bb.3: ; %if
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_i32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: .LBB2_4: ; %endif
+; SI-NEXT: ; %bb.4: ; %endif
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
index 9f3596359a6625..1ad1bd09c1f204 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
@@ -74,14 +74,13 @@ define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; SI-NEXT: s_cbranch_execz .LBB1_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s2, s[2:3], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_eq_u32 s2, 0
; SI-NEXT: s_cselect_b64 s[2:3], -1, 0
; SI-NEXT: s_and_b64 s[4:5], s[2:3], exec
-; SI-NEXT: .LBB1_2: ; %endif
+; SI-NEXT: ; %bb.2: ; %endif
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: .LBB1_3: ; %loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -99,14 +98,13 @@ define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
; FLAT-NEXT: s_mov_b64 s[0:1], 0
; FLAT-NEXT: s_mov_b64 s[4:5], 0
; FLAT-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; FLAT-NEXT: s_cbranch_execz .LBB1_2
; FLAT-NEXT: ; %bb.1: ; %else
; FLAT-NEXT: s_load_dword s2, s[2:3], 0x24
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: s_cmp_eq_u32 s2, 0
; FLAT-NEXT: s_cselect_b64 s[2:3], -1, 0
; FLAT-NEXT: s_and_b64 s[4:5], s[2:3], exec
-; FLAT-NEXT: .LBB1_2: ; %endif
+; FLAT-NEXT: ; %bb.2: ; %endif
; FLAT-NEXT: s_or_b64 exec, exec, s[6:7]
; FLAT-NEXT: .LBB1_3: ; %loop
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll b/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll
index 1eef7b967f6d99..41e270c311ba3c 100644
--- a/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll
@@ -149,12 +149,14 @@ define void @my_func(i32 %0) {
; GCN-NEXT: s_mov_b64 s[6:7], 0
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB0_12
; GCN-NEXT: ; %bb.11: ; %LeafBlock5
; GCN-NEXT: s_mov_b64 s[6:7], exec
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
-; GCN-NEXT: ; %bb.12: ; %Flow13
+; GCN-NEXT: .LBB0_12: ; %Flow13
; GCN-NEXT: s_andn2_saveexec_b64 s[10:11], s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB0_14
; GCN-NEXT: ; %bb.13: ; %LeafBlock3
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
@@ -164,7 +166,7 @@ define void @my_func(i32 %0) {
; GCN-NEXT: s_and_b64 s[12:13], vcc, exec
; GCN-NEXT: s_or_b64 s[6:7], s[6:7], s[4:5]
; GCN-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-NEXT: ; %bb.14: ; %Flow14
+; GCN-NEXT: .LBB0_14: ; %Flow14
; GCN-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN-NEXT: s_mov_b64 s[4:5], 0
; GCN-NEXT: s_and_saveexec_b64 s[10:11], s[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll b/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll
index 64d4a0cf785013..d79245780c8bb0 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll
@@ -15,7 +15,6 @@ define amdgpu_ps float @uniform_phi_with_undef(float inreg %c, float %v, i32 %x,
; GCN-NEXT: s_mov_b32 s1, exec_lo
; GCN-NEXT: s_and_b32 s2, s1, s2
; GCN-NEXT: s_mov_b32 exec_lo, s2
-; GCN-NEXT: s_cbranch_execz .LBB0_2
; GCN-NEXT: ; %bb.1: ; %if
; GCN-NEXT: s_mov_b32 s2, 0x40400000
; GCN-NEXT: v_div_scale_f32 v1, s3, s2, s2, v0
@@ -30,7 +29,7 @@ define amdgpu_ps float @uniform_phi_with_undef(float inreg %c, float %v, i32 %x,
; GCN-NEXT: v_fma_f32 v1, -v1, v4, v3
; GCN-NEXT: v_div_fmas_f32 v1, v1, v2, v4
; GCN-NEXT: v_div_fixup_f32 v0, v1, s2, v0
-; GCN-NEXT: .LBB0_2: ; %end
+; GCN-NEXT: ; %bb.2: ; %end
; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GCN-NEXT: v_add_f32_e64 v0, v0, s0
; GCN-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll b/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
index a5e1506114f2d0..8842535308adec 100644
--- a/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
+++ b/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
@@ -359,23 +359,21 @@ define hidden void @blam() {
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_mov_b64 s[8:9], s[52:53]
; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[42:43]
-; GCN-NEXT: s_cbranch_execz .LBB1_7
; GCN-NEXT: ; %bb.6: ; %bb16
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: buffer_store_dword v44, off, s[0:3], 0
; GCN-NEXT: s_or_b64 s[8:9], s[52:53], exec
-; GCN-NEXT: .LBB1_7: ; %Flow3
+; GCN-NEXT: ; %bb.7: ; %Flow3
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN-NEXT: s_mov_b64 s[6:7], 0
; GCN-NEXT: s_and_saveexec_b64 s[10:11], s[8:9]
; GCN-NEXT: s_xor_b64 s[8:9], exec, s[10:11]
-; GCN-NEXT: s_cbranch_execz .LBB1_9
; GCN-NEXT: ; %bb.8: ; %bb17
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_mov_b64 s[6:7], exec
; GCN-NEXT: buffer_store_dword v43, off, s[0:3], 0
-; GCN-NEXT: .LBB1_9: ; %Flow4
+; GCN-NEXT: ; %bb.9: ; %Flow4
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -401,12 +399,11 @@ define hidden void @blam() {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: s_mov_b64 s[10:11], s[6:7]
; GCN-NEXT: s_and_saveexec_b64 s[12:13], vcc
-; GCN-NEXT: s_cbranch_execz .LBB1_15
; GCN-NEXT: ; %bb.14: ; %bb10
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: buffer_store_dword v44, off, s[0:3], 0
; GCN-NEXT: s_or_b64 s[10:11], s[6:7], exec
-; GCN-NEXT: .LBB1_15: ; %Flow6
+; GCN-NEXT: ; %bb.15: ; %Flow6
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_or_b64 exec, exec, s[12:13]
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_generated_funcs.ll.generated.expected
index d1500e002d7e92..2b0b22e8b1ba97 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_generated_funcs.ll.generated.expected
@@ -94,11 +94,10 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
; CHECK-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:16
; CHECK-NEXT: .LBB0_2: ; %Flow
; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; CHECK-NEXT: s_cbranch_execz .LBB0_4
; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: v_mov_b32_e32 v0, 1
; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:12
-; CHECK-NEXT: .LBB0_4:
+; CHECK-NEXT: ; %bb.4:
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_addk_i32 s32, 0xfa00
More information about the llvm-commits
mailing list