[llvm] [AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: estimate ThenBlock cost using MachineTraceInfo (PR #111117)
Juan Manuel Martinez CaamaƱo via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 11 07:42:54 PST 2025
https://github.com/jmmartinez updated https://github.com/llvm/llvm-project/pull/111117
>From 54796a96815e4e96cb0235905c13bf1d48d088b3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Tue, 11 Feb 2025 14:41:36 +0100
Subject: [PATCH] [AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: estimate
ThenBlock cost using MachineTraceInfo
---
llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp | 110 +++++++++---------
...vergence-divergent-i1-used-outside-loop.ll | 9 +-
.../GlobalISel/divergence-structurizer.ll | 12 +-
.../GlobalISel/llvm.amdgcn.ballot.i32.ll | 12 +-
.../GlobalISel/llvm.amdgcn.ballot.i64.ll | 6 +-
.../AMDGPU/GlobalISel/vni8-across-blocks.ll | 15 +--
.../atomic_optimizations_pixelshader.ll | 18 +--
llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll | 9 +-
.../CodeGen/AMDGPU/branch-condition-and.ll | 3 +-
.../AMDGPU/cgp-addressing-modes-flat.ll | 12 +-
llvm/test/CodeGen/AMDGPU/collapse-endcf.ll | 3 +-
llvm/test/CodeGen/AMDGPU/cse-convergent.ll | 3 +-
llvm/test/CodeGen/AMDGPU/fptoi.i128.ll | 18 ++-
.../CodeGen/AMDGPU/insert-skips-gfx10.mir | 4 +-
llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll | 6 +-
llvm/test/CodeGen/AMDGPU/itofp.i128.ll | 54 ++++++---
llvm/test/CodeGen/AMDGPU/llc-pipeline.ll | 8 ++
.../CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll | 6 +-
.../CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll | 3 +-
.../AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll | 35 ++++--
.../AMDGPU/llvm.amdgcn.init.whole.wave-w64.ll | 7 +-
.../AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll | 3 +-
.../CodeGen/AMDGPU/llvm.amdgcn.softwqm.ll | 3 +-
.../CodeGen/AMDGPU/local-atomicrmw-fadd.ll | 6 +-
llvm/test/CodeGen/AMDGPU/ret_jump.ll | 1 +
.../AMDGPU/subreg-coalescer-undef-use.ll | 3 +-
.../test/CodeGen/AMDGPU/vni8-across-blocks.ll | 15 +--
llvm/test/CodeGen/AMDGPU/wave32.ll | 6 +-
llvm/test/CodeGen/AMDGPU/while-break.ll | 3 +-
.../test/CodeGen/AMDGPU/wwm-reserved-spill.ll | 3 +-
llvm/test/CodeGen/AMDGPU/wwm-reserved.ll | 6 +-
31 files changed, 226 insertions(+), 176 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 2bb70c138a50c4..f3c3d87838d793 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -15,7 +15,9 @@
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/InitializePasses.h"
#include "llvm/Support/BranchProbability.h"
using namespace llvm;
@@ -29,6 +31,10 @@ class SIPreEmitPeephole : public MachineFunctionPass {
const SIInstrInfo *TII = nullptr;
const SIRegisterInfo *TRI = nullptr;
+ // Trace metrics analysis result, used to estimate the number of cycles it
+ // takes to execute a block.
+ MachineTraceMetrics::Ensemble *Traces;
+
bool optimizeVccBranch(MachineInstr &MI) const;
bool optimizeSetGPR(MachineInstr &First, MachineInstr &MI) const;
bool getBlockDestinations(MachineBasicBlock &SrcMBB,
@@ -37,9 +43,14 @@ class SIPreEmitPeephole : public MachineFunctionPass {
SmallVectorImpl<MachineOperand> &Cond);
bool mustRetainExeczBranch(const MachineInstr &Branch,
const MachineBasicBlock &From,
- const MachineBasicBlock &To) const;
+ const MachineBasicBlock &To);
bool removeExeczBranch(MachineInstr &MI, MachineBasicBlock &SrcMBB);
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineTraceMetricsWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
public:
static char ID;
@@ -52,8 +63,11 @@ class SIPreEmitPeephole : public MachineFunctionPass {
} // End anonymous namespace.
-INITIALIZE_PASS(SIPreEmitPeephole, DEBUG_TYPE,
- "SI peephole optimizations", false, false)
+INITIALIZE_PASS_BEGIN(SIPreEmitPeephole, DEBUG_TYPE,
+ "SI peephole optimizations", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineTraceMetricsWrapperPass)
+INITIALIZE_PASS_END(SIPreEmitPeephole, DEBUG_TYPE, "SI peephole optimizations",
+ false, false)
char SIPreEmitPeephole::ID = 0;
@@ -299,58 +313,23 @@ bool SIPreEmitPeephole::getBlockDestinations(
return true;
}
-namespace {
-class BranchWeightCostModel {
- const SIInstrInfo &TII;
- const TargetSchedModel &SchedModel;
- BranchProbability BranchProb;
- static constexpr uint64_t BranchNotTakenCost = 1;
- uint64_t BranchTakenCost;
- uint64_t ThenCyclesCost = 0;
+bool SIPreEmitPeephole::mustRetainExeczBranch(const MachineInstr &Branch,
+ const MachineBasicBlock &From,
+ const MachineBasicBlock &To) {
+ assert(is_contained(Branch.getParent()->successors(), &From));
-public:
- BranchWeightCostModel(const SIInstrInfo &TII, const MachineInstr &Branch,
- const MachineBasicBlock &Succ)
- : TII(TII), SchedModel(TII.getSchedModel()) {
- const MachineBasicBlock &Head = *Branch.getParent();
- const auto *FromIt = find(Head.successors(), &Succ);
- assert(FromIt != Head.succ_end());
-
- BranchProb = Head.getSuccProbability(FromIt);
- if (BranchProb.isUnknown())
- BranchProb = BranchProbability::getZero();
- BranchTakenCost = SchedModel.computeInstrLatency(&Branch);
- }
+ const MachineBasicBlock &Head = *Branch.getParent();
+ const auto *FromIt = find(Head.successors(), &From);
+ assert(FromIt != Head.succ_end());
- bool isProfitable(const MachineInstr &MI) {
- if (TII.isWaitcnt(MI.getOpcode()))
- return false;
+ auto BranchProb = Head.getSuccProbability(FromIt);
+ if (BranchProb.isUnknown())
+ return true;
- ThenCyclesCost += SchedModel.computeInstrLatency(&MI);
-
- // Consider `P = N/D` to be the probability of execz being false (skipping
- // the then-block) The transformation is profitable if always executing the
- // 'then' block is cheaper than executing sometimes 'then' and always
- // executing s_cbranch_execz:
- // * ThenCost <= P*ThenCost + (1-P)*BranchTakenCost + P*BranchNotTakenCost
- // * (1-P) * ThenCost <= (1-P)*BranchTakenCost + P*BranchNotTakenCost
- // * (D-N)/D * ThenCost <= (D-N)/D * BranchTakenCost + N/D *
- // BranchNotTakenCost
- uint64_t Numerator = BranchProb.getNumerator();
- uint64_t Denominator = BranchProb.getDenominator();
- return (Denominator - Numerator) * ThenCyclesCost <=
- ((Denominator - Numerator) * BranchTakenCost +
- Numerator * BranchNotTakenCost);
- }
-};
+ const MachineFunction *MF = From.getParent();
-bool SIPreEmitPeephole::mustRetainExeczBranch(
- const MachineInstr &Branch, const MachineBasicBlock &From,
- const MachineBasicBlock &To) const {
- assert(is_contained(Branch.getParent()->successors(), &From));
- BranchWeightCostModel CostModel{*TII, Branch, From};
+ SmallVector<const MachineBasicBlock *> ThenBlocks;
- const MachineFunction *MF = From.getParent();
for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
MBBI != End && MBBI != ToI; ++MBBI) {
const MachineBasicBlock &MBB = *MBBI;
@@ -372,14 +351,37 @@ bool SIPreEmitPeephole::mustRetainExeczBranch(
if (TII->hasUnwantedEffectsWhenEXECEmpty(MI))
return true;
- if (!CostModel.isProfitable(MI))
+ if (TII->isWaitcnt(MI.getOpcode()))
return true;
}
+ ThenBlocks.push_back(&MBB);
}
- return false;
+ MachineTraceMetrics::Trace Trace = Traces->getTrace(&Head);
+ const MCSchedClassDesc *BranchSchedClassDesc =
+ TII->getSchedModel().getMCSchedModel()->getSchedClassDesc(
+ Branch.getDesc().getSchedClass());
+ unsigned ResourceThenWithoutBranch =
+ Trace.getResourceLength(ThenBlocks, {}, {BranchSchedClassDesc});
+ unsigned ResourceThenWithBranch = Trace.getResourceLength(ThenBlocks, {}, {});
+ unsigned ResourceElseWithBranch = Trace.getResourceLength({}, {}, {});
+
+ // Consider `P = N/D` to be the probability of execz being false (skipping
+ // the then-block) The transformation is profitable if always executing the
+ // 'then' block is cheaper than executing sometimes 'then' and always
+ // executing s_cbranch_execz:
+ // * ThenCost <= P*BranchThenCost + (1-P)*BranchElseCost
+ // * D * ThenCost <= N * BranchThenCost + (D - N) * BranchElseCost
+ // For the resource lenght to be equivalent to the number of cycles to execute
+ // the block, we assume no data-dependencies between the instructions. This
+ // may not be true and should be refined.
+ uint64_t Numerator = BranchProb.getNumerator();
+ uint64_t Denominator = BranchProb.getDenominator();
+ bool IsProfitable = Denominator * ResourceThenWithoutBranch <=
+ Numerator * ResourceThenWithBranch +
+ (Denominator - Numerator) * ResourceElseWithBranch;
+ return !IsProfitable;
}
-} // namespace
// Returns true if the skip branch instruction is removed.
bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
@@ -414,6 +416,8 @@ bool SIPreEmitPeephole::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
TII = ST.getInstrInfo();
TRI = &TII->getRegisterInfo();
+ Traces = getAnalysis<MachineTraceMetricsWrapperPass>().getMTM().getEnsemble(
+ llvm::MachineTraceStrategy::TS_MinInstrCount);
bool Changed = false;
MF.RenumberBlocks();
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index 91e16d91ddd15a..f530fb22754e42 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
@@ -249,11 +249,10 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-NEXT: .LBB3_6: ; %Flow1
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s6
-; GFX10-NEXT: s_cbranch_execz .LBB3_8
; GFX10-NEXT: ; %bb.7: ; %block.after.loop
; GFX10-NEXT: v_mov_b32_e32 v0, 5
; GFX10-NEXT: flat_store_dword v[3:4], v0
-; GFX10-NEXT: .LBB3_8: ; %exit
+; GFX10-NEXT: ; %bb.8: ; %exit
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
@@ -340,10 +339,9 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-NEXT: .LBB4_6: ; %cond.block.1
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_and_saveexec_b32 s4, s6
-; GFX10-NEXT: s_cbranch_execz .LBB4_8
; GFX10-NEXT: ; %bb.7: ; %if.block.1
; GFX10-NEXT: global_store_dword v[6:7], v4, off
-; GFX10-NEXT: .LBB4_8: ; %exit
+; GFX10-NEXT: ; %bb.8: ; %exit
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_setpc_b64 s[30:31]
@@ -534,11 +532,10 @@ define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_and_saveexec_b32 s0, s1
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execz .LBB6_6
; GFX10-NEXT: ; %bb.5: ; %break.body
; GFX10-NEXT: v_mov_b32_e32 v0, 10
; GFX10-NEXT: global_store_dword v[4:5], v0, off
-; GFX10-NEXT: .LBB6_6: ; %exit
+; GFX10-NEXT: ; %bb.6: ; %exit
; GFX10-NEXT: s_endpgm
entry:
br label %A
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index 1698f84eea5185..cb7f11f19e05af 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -10,12 +10,13 @@ define amdgpu_ps void @divergent_i1_phi_if_then(ptr addrspace(1) %out, i32 %tid,
; GFX10-NEXT: v_cmp_le_u32_e64 s0, 6, v2
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo
+; GFX10-NEXT: s_cbranch_execz .LBB0_2
; GFX10-NEXT: ; %bb.1: ; %B
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 1, v2
; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
-; GFX10-NEXT: ; %bb.2: ; %exit
+; GFX10-NEXT: .LBB0_2: ; %exit
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, -1, s0
; GFX10-NEXT: v_add_nc_u32_e32 v2, 2, v2
@@ -46,20 +47,22 @@ define amdgpu_ps void @divergent_i1_phi_if_else(ptr addrspace(1) %out, i32 %tid,
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX10-NEXT: s_cbranch_execz .LBB1_2
; GFX10-NEXT: ; %bb.1: ; %B
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 2, v2
; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
-; GFX10-NEXT: ; %bb.2: ; %Flow
+; GFX10-NEXT: .LBB1_2: ; %Flow
; GFX10-NEXT: s_andn2_saveexec_b32 s1, s1
+; GFX10-NEXT: s_cbranch_execz .LBB1_4
; GFX10-NEXT: ; %bb.3: ; %A
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 1, v2
; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
-; GFX10-NEXT: ; %bb.4: ; %exit
+; GFX10-NEXT: .LBB1_4: ; %exit
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, -1, s0
; GFX10-NEXT: v_add_nc_u32_e32 v2, 2, v2
@@ -437,11 +440,10 @@ define amdgpu_cs void @loop_with_div_break_with_body(ptr addrspace(1) %x, ptr ad
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_and_saveexec_b32 s0, s1
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execz .LBB5_6
; GFX10-NEXT: ; %bb.5: ; %break.body
; GFX10-NEXT: v_mov_b32_e32 v0, 10
; GFX10-NEXT: global_store_dword v[4:5], v0, off
-; GFX10-NEXT: .LBB5_6: ; %exit
+; GFX10-NEXT: ; %bb.6: ; %exit
; GFX10-NEXT: s_endpgm
entry:
br label %A
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
index 927a31d3992b06..4e301a15b5e13d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
@@ -433,20 +433,22 @@ define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX10-NEXT: s_cbranch_execz .LBB20_2
; GFX10-NEXT: ; %bb.1: ; %B
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 2, v2
; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
-; GFX10-NEXT: ; %bb.2: ; %Flow
+; GFX10-NEXT: .LBB20_2: ; %Flow
; GFX10-NEXT: s_andn2_saveexec_b32 s1, s1
+; GFX10-NEXT: s_cbranch_execz .LBB20_4
; GFX10-NEXT: ; %bb.3: ; %A
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 1, v2
; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
-; GFX10-NEXT: ; %bb.4: ; %exit
+; GFX10-NEXT: .LBB20_4: ; %exit
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX10-NEXT: s_and_b32 s0, s0, exec_lo
; GFX10-NEXT: v_mov_b32_e32 v2, s0
@@ -460,20 +462,22 @@ define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid
; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v3
; GFX11-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX11-NEXT: s_cbranch_execz .LBB20_2
; GFX11-NEXT: ; %bb.1: ; %B
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 2, v2
; GFX11-NEXT: s_and_not1_b32 s0, s0, exec_lo
; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX11-NEXT: s_or_b32 s0, s0, s2
-; GFX11-NEXT: ; %bb.2: ; %Flow
+; GFX11-NEXT: .LBB20_2: ; %Flow
; GFX11-NEXT: s_and_not1_saveexec_b32 s1, s1
+; GFX11-NEXT: s_cbranch_execz .LBB20_4
; GFX11-NEXT: ; %bb.3: ; %A
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 1, v2
; GFX11-NEXT: s_and_not1_b32 s0, s0, exec_lo
; GFX11-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX11-NEXT: s_or_b32 s0, s0, s2
-; GFX11-NEXT: ; %bb.4: ; %exit
+; GFX11-NEXT: .LBB20_4: ; %exit
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX11-NEXT: s_and_b32 s0, s0, exec_lo
; GFX11-NEXT: v_mov_b32_e32 v2, s0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
index 0bbb40b8db43ab..8f908c19ff7064 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
@@ -436,20 +436,22 @@ define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
; CHECK-NEXT: s_and_saveexec_b64 s[2:3], vcc
; CHECK-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
+; CHECK-NEXT: s_cbranch_execz .LBB20_2
; CHECK-NEXT: ; %bb.1: ; %B
; CHECK-NEXT: v_cmp_gt_u32_e32 vcc, 2, v2
; CHECK-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; CHECK-NEXT: s_and_b64 s[4:5], exec, vcc
; CHECK-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; CHECK-NEXT: ; implicit-def: $vgpr2
-; CHECK-NEXT: ; %bb.2: ; %Flow
+; CHECK-NEXT: .LBB20_2: ; %Flow
; CHECK-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
+; CHECK-NEXT: s_cbranch_execz .LBB20_4
; CHECK-NEXT: ; %bb.3: ; %A
; CHECK-NEXT: v_cmp_le_u32_e32 vcc, 1, v2
; CHECK-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; CHECK-NEXT: s_and_b64 s[4:5], exec, vcc
; CHECK-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
-; CHECK-NEXT: ; %bb.4: ; %exit
+; CHECK-NEXT: .LBB20_4: ; %exit
; CHECK-NEXT: s_or_b64 exec, exec, s[2:3]
; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], exec
; CHECK-NEXT: v_mov_b32_e32 v3, s1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
index 9c2fabce4bcdeb..d7936469c40092 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
@@ -68,10 +68,9 @@ define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dword v1, v2, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB1_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dword v1, v2, s[2:3]
-; GFX906-NEXT: .LBB1_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
@@ -149,10 +148,9 @@ define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB3_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[2:3]
-; GFX906-NEXT: .LBB3_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
@@ -185,10 +183,9 @@ define amdgpu_kernel void @v16i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB4_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[2:3]
-; GFX906-NEXT: .LBB4_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(0)
@@ -222,11 +219,10 @@ define amdgpu_kernel void @v32i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[0:1]
; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[0:1] offset:16
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB5_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[2:3]
; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[2:3] offset:16
-; GFX906-NEXT: .LBB5_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: s_waitcnt vmcnt(1)
@@ -547,11 +543,10 @@ define amdgpu_kernel void @v8i8_multi_block(ptr addrspace(1) %src1, ptr addrspac
; GFX906-NEXT: global_load_dwordx2 v[1:2], v5, s[10:11]
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB9_3
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[12:13]
-; GFX906-NEXT: .LBB9_3: ; %Flow
+; GFX906-NEXT: ; %bb.3: ; %Flow
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: .LBB9_4: ; %bb.3
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
index 4ae08a0375c8c3..40320b2dc0fa3c 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
@@ -294,11 +294,10 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8-NEXT: ; implicit-def: $vgpr0
; GFX8-NEXT: s_and_saveexec_b64 s[10:11], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB1_3
; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: v_mov_b32_e32 v0, s12
; GFX8-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
-; GFX8-NEXT: .LBB1_3:
+; GFX8-NEXT: ; %bb.3:
; GFX8-NEXT: s_or_b64 exec, exec, s[10:11]
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_readfirstlane_b32 s4, v0
@@ -349,11 +348,10 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9-NEXT: ; implicit-def: $vgpr0
; GFX9-NEXT: s_and_saveexec_b64 s[10:11], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB1_3
; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: v_mov_b32_e32 v0, s12
; GFX9-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
-; GFX9-NEXT: .LBB1_3:
+; GFX9-NEXT: ; %bb.3:
; GFX9-NEXT: s_or_b64 exec, exec, s[10:11]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
@@ -408,11 +406,10 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1064-NEXT: ; implicit-def: $vgpr0
; GFX1064-NEXT: s_and_saveexec_b64 s[10:11], vcc
-; GFX1064-NEXT: s_cbranch_execz .LBB1_3
; GFX1064-NEXT: ; %bb.2:
; GFX1064-NEXT: v_mov_b32_e32 v0, s12
; GFX1064-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
-; GFX1064-NEXT: .LBB1_3:
+; GFX1064-NEXT: ; %bb.3:
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[10:11]
; GFX1064-NEXT: s_waitcnt vmcnt(0)
@@ -458,11 +455,10 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: ; implicit-def: $vgpr0
; GFX1032-NEXT: s_and_saveexec_b32 s9, vcc_lo
-; GFX1032-NEXT: s_cbranch_execz .LBB1_3
; GFX1032-NEXT: ; %bb.2:
; GFX1032-NEXT: v_mov_b32_e32 v0, s11
; GFX1032-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
-; GFX1032-NEXT: .LBB1_3:
+; GFX1032-NEXT: ; %bb.3:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s9
; GFX1032-NEXT: s_waitcnt vmcnt(0)
@@ -527,11 +523,10 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1164-NEXT: ; implicit-def: $vgpr0
; GFX1164-NEXT: s_and_saveexec_b64 s[10:11], vcc
-; GFX1164-NEXT: s_cbranch_execz .LBB1_3
; GFX1164-NEXT: ; %bb.2:
; GFX1164-NEXT: v_mov_b32_e32 v0, s12
; GFX1164-NEXT: buffer_atomic_add_u32 v0, off, s[4:7], 0 glc
-; GFX1164-NEXT: .LBB1_3:
+; GFX1164-NEXT: ; %bb.3:
; GFX1164-NEXT: s_or_b64 exec, exec, s[10:11]
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_readfirstlane_b32 s4, v0
@@ -585,11 +580,10 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132-NEXT: ; implicit-def: $vgpr0
; GFX1132-NEXT: s_and_saveexec_b32 s9, vcc_lo
-; GFX1132-NEXT: s_cbranch_execz .LBB1_3
; GFX1132-NEXT: ; %bb.2:
; GFX1132-NEXT: v_mov_b32_e32 v0, s11
; GFX1132-NEXT: buffer_atomic_add_u32 v0, off, s[4:7], 0 glc
-; GFX1132-NEXT: .LBB1_3:
+; GFX1132-NEXT: ; %bb.3:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s9
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_readfirstlane_b32 s4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
index f5c9b1a79b4764..691ca1d93a17c8 100644
--- a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
@@ -153,12 +153,11 @@ define float @syncscope_workgroup_rtn(ptr %addr, float %val) #0 {
; GFX90A-NEXT: ; implicit-def: $vgpr3
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GFX90A-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
-; GFX90A-NEXT: s_cbranch_execz .LBB1_3
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.global
; GFX90A-NEXT: global_atomic_add_f32 v3, v[0:1], v2, off glc
; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX90A-NEXT: ; implicit-def: $vgpr2
-; GFX90A-NEXT: .LBB1_3: ; %Flow
+; GFX90A-NEXT: ; %bb.3: ; %Flow
; GFX90A-NEXT: s_andn2_saveexec_b64 s[6:7], s[6:7]
; GFX90A-NEXT: s_cbranch_execz .LBB1_5
; GFX90A-NEXT: ; %bb.4: ; %atomicrmw.private
@@ -240,12 +239,11 @@ define void @syncscope_workgroup_nortn(ptr %addr, float %val) #0 {
; GFX908-NEXT: v_cmp_ne_u32_e32 vcc, s7, v1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GFX908-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
-; GFX908-NEXT: s_cbranch_execz .LBB2_5
; GFX908-NEXT: ; %bb.4: ; %atomicrmw.global
; GFX908-NEXT: global_atomic_add_f32 v[0:1], v2, off
; GFX908-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX908-NEXT: ; implicit-def: $vgpr2
-; GFX908-NEXT: .LBB2_5: ; %Flow
+; GFX908-NEXT: ; %bb.5: ; %Flow
; GFX908-NEXT: s_andn2_saveexec_b64 s[6:7], s[6:7]
; GFX908-NEXT: s_cbranch_execz .LBB2_7
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.private
@@ -290,12 +288,11 @@ define void @syncscope_workgroup_nortn(ptr %addr, float %val) #0 {
; GFX90A-NEXT: v_cmp_ne_u32_e32 vcc, s7, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GFX90A-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
-; GFX90A-NEXT: s_cbranch_execz .LBB2_5
; GFX90A-NEXT: ; %bb.4: ; %atomicrmw.global
; GFX90A-NEXT: global_atomic_add_f32 v[0:1], v2, off
; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX90A-NEXT: ; implicit-def: $vgpr2
-; GFX90A-NEXT: .LBB2_5: ; %Flow
+; GFX90A-NEXT: ; %bb.5: ; %Flow
; GFX90A-NEXT: s_andn2_saveexec_b64 s[6:7], s[6:7]
; GFX90A-NEXT: s_cbranch_execz .LBB2_7
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.private
diff --git a/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll b/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
index d23d7a7c8e0c80..6efc9f2ae77b82 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
@@ -17,12 +17,13 @@ define amdgpu_ps void @ham(float %arg, float %arg1) #0 {
; GCN-NEXT: v_cmp_lt_f32_e64 s[0:1], 0, v1
; GCN-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GCN-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GCN-NEXT: s_cbranch_execz .LBB0_2
; GCN-NEXT: ; %bb.1: ; %bb4
; GCN-NEXT: v_mov_b32_e32 v0, 4
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: ; divergent unreachable
-; GCN-NEXT: ; %bb.2: ; %UnifiedReturnBlock
+; GCN-NEXT: .LBB0_2: ; %UnifiedReturnBlock
; GCN-NEXT: s_endpgm
bb:
%tmp = fcmp ogt float %arg, 0.000000e+00
diff --git a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
index fdae1696a5a492..3b378c323fa0c4 100644
--- a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
@@ -116,10 +116,9 @@ define void @test_sinkable_flat_small_offset_i32(ptr %out, ptr %in, i32 %cond) {
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB0_2
; GFX9-NEXT: ; %bb.1: ; %if
; GFX9-NEXT: flat_load_dword v4, v[2:3] offset:28
-; GFX9-NEXT: .LBB0_2: ; %endif
+; GFX9-NEXT: ; %bb.2: ; %endif
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0x3d0000, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
@@ -273,10 +272,9 @@ define void @test_sink_noop_addrspacecast_flat_to_global_i32(ptr %out, ptr %in,
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %if
; GFX9-NEXT: global_load_dword v4, v[2:3], off offset:28
-; GFX9-NEXT: .LBB1_2: ; %endif
+; GFX9-NEXT: ; %bb.2: ; %endif
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0x3d0000, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
@@ -386,10 +384,9 @@ define void @test_sink_noop_addrspacecast_flat_to_constant_i32(ptr %out, ptr %in
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB2_2
; GFX9-NEXT: ; %bb.1: ; %if
; GFX9-NEXT: global_load_dword v4, v[2:3], off offset:28
-; GFX9-NEXT: .LBB2_2: ; %endif
+; GFX9-NEXT: ; %bb.2: ; %endif
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0x3d0000, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
@@ -555,10 +552,9 @@ define void @test_sink_flat_small_max_flat_offset(ptr %out, ptr %in) #1 {
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %if
; GFX9-NEXT: flat_load_sbyte v4, v[2:3] offset:4095
-; GFX9-NEXT: .LBB3_2: ; %endif
+; GFX9-NEXT: ; %bb.2: ; %endif
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0x1000, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll b/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll
index 50c9c0cb64ccd6..d87c9e5fc3a816 100644
--- a/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll
+++ b/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll
@@ -639,11 +639,10 @@ define amdgpu_kernel void @nested_if_else_if(ptr addrspace(1) nocapture %arg) {
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0
; GCN-NEXT: buffer_store_dword v3, v[1:2], s[0:3], 0 addr64 offset:4
; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GCN-NEXT: s_cbranch_execz .LBB3_7
; GCN-NEXT: ; %bb.6: ; %bb.inner.then
; GCN-NEXT: v_mov_b32_e32 v0, 2
; GCN-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 offset:8
-; GCN-NEXT: .LBB3_7: ; %Flow1
+; GCN-NEXT: ; %bb.7: ; %Flow1
; GCN-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN-NEXT: .LBB3_8: ; %bb.outer.end
; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/cse-convergent.ll b/llvm/test/CodeGen/AMDGPU/cse-convergent.ll
index 7aca63d34f51bf..52f1ed7e991169 100644
--- a/llvm/test/CodeGen/AMDGPU/cse-convergent.ll
+++ b/llvm/test/CodeGen/AMDGPU/cse-convergent.ll
@@ -19,6 +19,7 @@ define i32 @test(i32 %val, i32 %cond) {
; GCN-NEXT: v_mov_b32_e32 v4, v2
; GCN-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
; GCN-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GCN-NEXT: s_cbranch_execz .LBB0_2
; GCN-NEXT: ; %bb.1: ; %if
; GCN-NEXT: s_or_saveexec_b32 s5, -1
; GCN-NEXT: v_mov_b32_e32 v2, 0
@@ -26,7 +27,7 @@ define i32 @test(i32 %val, i32 %cond) {
; GCN-NEXT: v_mov_b32_dpp v2, v3 row_xmask:1 row_mask:0xf bank_mask:0xf
; GCN-NEXT: s_mov_b32 exec_lo, s5
; GCN-NEXT: v_mov_b32_e32 v5, v2
-; GCN-NEXT: ; %bb.2: ; %end
+; GCN-NEXT: .LBB0_2: ; %end
; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GCN-NEXT: v_add_nc_u32_e32 v0, v4, v5
; GCN-NEXT: s_xor_saveexec_b32 s4, -1
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 3e6b812c12d7f6..f1a7fe19f38ee2 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -114,6 +114,7 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
; SDAG-NEXT: .LBB0_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB0_9
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
@@ -121,7 +122,7 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
-; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: .LBB0_9: ; %Flow3
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB0_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -480,6 +481,7 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
; SDAG-NEXT: .LBB1_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB1_9
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
@@ -487,7 +489,7 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
-; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: .LBB1_9: ; %Flow3
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB1_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -839,6 +841,7 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB2_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB2_9
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
@@ -846,7 +849,7 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
-; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: .LBB2_9: ; %Flow3
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB2_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -1193,6 +1196,7 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB3_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB3_9
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
@@ -1200,7 +1204,7 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
-; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: .LBB3_9: ; %Flow3
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB3_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -1572,6 +1576,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB6_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB6_9
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
@@ -1579,7 +1584,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
-; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: .LBB6_9: ; %Flow3
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB6_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -1920,6 +1925,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB7_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB7_9
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
@@ -1927,7 +1933,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
-; SDAG-NEXT: ; %bb.9: ; %Flow3
+; SDAG-NEXT: .LBB7_9: ; %Flow3
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB7_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
index 0bf74d96e134e6..b37254636ed856 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
@@ -156,9 +156,7 @@ name: skip_bvh
body: |
; CHECK-LABEL: name: skip_bvh
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll
index 3e6de324924579..9994ad711921a5 100644
--- a/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll
@@ -43,6 +43,7 @@ define bfloat @sitofp_i128_to_bf16(i128 %x) {
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB0_3
; GCN-NEXT: ; %bb.2: ; %itofp-if-else
; GCN-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
; GCN-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
@@ -52,7 +53,7 @@ define bfloat @sitofp_i128_to_bf16(i128 %x) {
; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN-NEXT: ; implicit-def: $vgpr7
; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GCN-NEXT: ; %bb.3: ; %Flow3
+; GCN-NEXT: .LBB0_3: ; %Flow3
; GCN-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN-NEXT: s_cbranch_execz .LBB0_13
; GCN-NEXT: ; %bb.4: ; %NodeBlock
@@ -172,6 +173,7 @@ define bfloat @uitofp_i128_to_bf16(i128 %x) {
; GCN-NEXT: ; implicit-def: $vgpr7
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GCN-NEXT: s_cbranch_execz .LBB1_3
; GCN-NEXT: ; %bb.2: ; %itofp-if-else
; GCN-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
; GCN-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -181,7 +183,7 @@ define bfloat @uitofp_i128_to_bf16(i128 %x) {
; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN-NEXT: ; implicit-def: $vgpr6
; GCN-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN-NEXT: ; %bb.3: ; %Flow3
+; GCN-NEXT: .LBB1_3: ; %Flow3
; GCN-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN-NEXT: s_cbranch_execz .LBB1_13
; GCN-NEXT: ; %bb.4: ; %NodeBlock
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
index f372a54894604c..b4abc8ffeb5d43 100644
--- a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
@@ -39,6 +39,7 @@ define float @sitofp_i128_to_f32(i128 %x) {
; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB0_3
; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
@@ -48,7 +49,7 @@ define float @sitofp_i128_to_f32(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr7
; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
-; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: .LBB0_3: ; %Flow3
; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; SDAG-NEXT: s_cbranch_execz .LBB0_13
; SDAG-NEXT: ; %bb.4: ; %NodeBlock
@@ -167,6 +168,7 @@ define float @sitofp_i128_to_f32(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr4
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB0_3
; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -176,7 +178,7 @@ define float @sitofp_i128_to_f32(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr0
; GISEL-NEXT: ; implicit-def: $vgpr5
; GISEL-NEXT: ; implicit-def: $vgpr2
-; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: .LBB0_3: ; %Flow3
; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GISEL-NEXT: s_cbranch_execz .LBB0_13
; GISEL-NEXT: ; %bb.4: ; %NodeBlock
@@ -294,6 +296,7 @@ define float @uitofp_i128_to_f32(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr7
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB1_3
; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -303,7 +306,7 @@ define float @uitofp_i128_to_f32(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr6
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: .LBB1_3: ; %Flow3
; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; SDAG-NEXT: s_cbranch_execz .LBB1_13
; SDAG-NEXT: ; %bb.4: ; %NodeBlock
@@ -412,6 +415,7 @@ define float @uitofp_i128_to_f32(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr4
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB1_3
; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -421,7 +425,7 @@ define float @uitofp_i128_to_f32(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr0
; GISEL-NEXT: ; implicit-def: $vgpr5
; GISEL-NEXT: ; implicit-def: $vgpr2
-; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: .LBB1_3: ; %Flow3
; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GISEL-NEXT: s_cbranch_execz .LBB1_13
; GISEL-NEXT: ; %bb.4: ; %NodeBlock
@@ -551,6 +555,7 @@ define double @sitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB2_3
; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
; SDAG-NEXT: v_add_u32_e32 v6, 0xffffffb5, v9
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
@@ -561,7 +566,7 @@ define double @sitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
; SDAG-NEXT: ; implicit-def: $vgpr9
-; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: .LBB2_3: ; %Flow3
; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; SDAG-NEXT: s_cbranch_execz .LBB2_13
; SDAG-NEXT: ; %bb.4: ; %NodeBlock
@@ -620,12 +625,13 @@ define double @sitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
; SDAG-NEXT: .LBB2_8: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB2_10
; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
; SDAG-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
; SDAG-NEXT: v_lshrrev_b32_e32 v0, 31, v5
; SDAG-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5]
; SDAG-NEXT: v_or_b32_e32 v6, v6, v0
-; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: .LBB2_10: ; %itofp-sw-epilog
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: v_lshrrev_b32_e32 v0, 2, v4
; SDAG-NEXT: v_and_or_b32 v0, v0, 1, v4
@@ -638,12 +644,13 @@ define double @sitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: v_and_b32_e32 v1, 0x800000, v5
; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB2_12
; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
; SDAG-NEXT: v_lshrrev_b64 v[0:1], 3, v[4:5]
; SDAG-NEXT: v_lshlrev_b32_e32 v2, 29, v6
; SDAG-NEXT: v_or_b32_e32 v10, v1, v2
; SDAG-NEXT: v_mov_b32_e32 v2, v8
-; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: .LBB2_12: ; %Flow
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB2_13: ; %Flow4
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -697,6 +704,7 @@ define double @sitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB2_3
; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
; GISEL-NEXT: v_add_u32_e32 v4, 0xffffffb5, v9
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v4, v[2:3]
@@ -706,7 +714,7 @@ define double @sitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr8
; GISEL-NEXT: ; implicit-def: $vgpr2
; GISEL-NEXT: ; implicit-def: $vgpr9
-; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: .LBB2_3: ; %Flow3
; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GISEL-NEXT: s_cbranch_execz .LBB2_13
; GISEL-NEXT: ; %bb.4: ; %NodeBlock
@@ -789,11 +797,12 @@ define double @sitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
; GISEL-NEXT: v_lshl_or_b32 v10, v4, 30, v1
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB2_12
; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
; GISEL-NEXT: v_lshrrev_b64 v[0:1], 3, v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v7, v8
; GISEL-NEXT: v_lshl_or_b32 v10, v4, 29, v1
-; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: .LBB2_12: ; %Flow
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: .LBB2_13: ; %Flow4
; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -840,6 +849,7 @@ define double @uitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB3_3
; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
; SDAG-NEXT: v_add_u32_e32 v2, 0xffffffb5, v8
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -850,7 +860,7 @@ define double @uitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr8
-; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: .LBB3_3: ; %Flow3
; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; SDAG-NEXT: s_cbranch_execz .LBB3_13
; SDAG-NEXT: ; %bb.4: ; %NodeBlock
@@ -909,12 +919,13 @@ define double @uitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
; SDAG-NEXT: .LBB3_8: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
+; SDAG-NEXT: s_cbranch_execz .LBB3_10
; SDAG-NEXT: ; %bb.9: ; %itofp-sw-bb
; SDAG-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; SDAG-NEXT: v_lshrrev_b32_e32 v3, 31, v1
; SDAG-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; SDAG-NEXT: v_or_b32_e32 v2, v2, v3
-; SDAG-NEXT: ; %bb.10: ; %itofp-sw-epilog
+; SDAG-NEXT: .LBB3_10: ; %itofp-sw-epilog
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: v_lshrrev_b32_e32 v3, 2, v0
; SDAG-NEXT: v_and_or_b32 v0, v3, 1, v0
@@ -926,11 +937,12 @@ define double @uitofp_i128_to_f64(i128 %x) {
; SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; SDAG-NEXT: v_alignbit_b32 v9, v2, v1, 2
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; SDAG-NEXT: s_cbranch_execz .LBB3_12
; SDAG-NEXT: ; %bb.11: ; %itofp-if-then20
; SDAG-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
; SDAG-NEXT: v_alignbit_b32 v9, v2, v1, 3
; SDAG-NEXT: v_mov_b32_e32 v6, v7
-; SDAG-NEXT: ; %bb.12: ; %Flow
+; SDAG-NEXT: .LBB3_12: ; %Flow
; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB3_13: ; %Flow4
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -973,6 +985,7 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB3_3
; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
; GISEL-NEXT: v_add_u32_e32 v2, 0xffffffb5, v8
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -982,7 +995,7 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr7
; GISEL-NEXT: ; implicit-def: $vgpr0
; GISEL-NEXT: ; implicit-def: $vgpr8
-; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: .LBB3_3: ; %Flow3
; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GISEL-NEXT: s_cbranch_execz .LBB3_13
; GISEL-NEXT: ; %bb.4: ; %NodeBlock
@@ -1069,13 +1082,14 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: v_lshrrev_b32_e32 v5, 2, v1
; GISEL-NEXT: v_or_b32_e32 v9, v5, v8
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GISEL-NEXT: s_cbranch_execz .LBB3_12
; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
; GISEL-NEXT: v_lshlrev_b64 v[2:3], 29, v[2:3]
; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
; GISEL-NEXT: v_lshrrev_b32_e32 v0, 3, v1
; GISEL-NEXT: v_or_b32_e32 v9, v0, v2
; GISEL-NEXT: v_mov_b32_e32 v6, v7
-; GISEL-NEXT: ; %bb.12: ; %Flow
+; GISEL-NEXT: .LBB3_12: ; %Flow
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: .LBB3_13: ; %Flow4
; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
@@ -1129,6 +1143,7 @@ define half @sitofp_i128_to_f16(i128 %x) {
; SDAG-NEXT: v_cmp_gt_i32_e32 vcc, 25, v6
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB4_3
; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff98, v7
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
@@ -1138,7 +1153,7 @@ define half @sitofp_i128_to_f16(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr7
; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
-; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: .LBB4_3: ; %Flow3
; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; SDAG-NEXT: s_cbranch_execz .LBB4_13
; SDAG-NEXT: ; %bb.4: ; %NodeBlock
@@ -1258,6 +1273,7 @@ define half @sitofp_i128_to_f16(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr4
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB4_3
; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -1267,7 +1283,7 @@ define half @sitofp_i128_to_f16(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr0
; GISEL-NEXT: ; implicit-def: $vgpr5
; GISEL-NEXT: ; implicit-def: $vgpr2
-; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: .LBB4_3: ; %Flow3
; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GISEL-NEXT: s_cbranch_execz .LBB4_13
; GISEL-NEXT: ; %bb.4: ; %NodeBlock
@@ -1386,6 +1402,7 @@ define half @uitofp_i128_to_f16(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr7
; SDAG-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SDAG-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB5_3
; SDAG-NEXT: ; %bb.2: ; %itofp-if-else
; SDAG-NEXT: v_add_u32_e32 v2, 0xffffff98, v6
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -1395,7 +1412,7 @@ define half @uitofp_i128_to_f16(i128 %x) {
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr6
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: ; %bb.3: ; %Flow3
+; SDAG-NEXT: .LBB5_3: ; %Flow3
; SDAG-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; SDAG-NEXT: s_cbranch_execz .LBB5_13
; SDAG-NEXT: ; %bb.4: ; %NodeBlock
@@ -1505,6 +1522,7 @@ define half @uitofp_i128_to_f16(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr4
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; GISEL-NEXT: s_cbranch_execz .LBB5_3
; GISEL-NEXT: ; %bb.2: ; %itofp-if-else
; GISEL-NEXT: v_add_u32_e32 v2, 0xffffff98, v5
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -1514,7 +1532,7 @@ define half @uitofp_i128_to_f16(i128 %x) {
; GISEL-NEXT: ; implicit-def: $vgpr0
; GISEL-NEXT: ; implicit-def: $vgpr5
; GISEL-NEXT: ; implicit-def: $vgpr2
-; GISEL-NEXT: ; %bb.3: ; %Flow3
+; GISEL-NEXT: .LBB5_3: ; %Flow3
; GISEL-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GISEL-NEXT: s_cbranch_execz .LBB5_13
; GISEL-NEXT: ; %bb.4: ; %NodeBlock
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 893b9fa6fb40d0..0792ec6fb1579f 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -425,6 +425,8 @@
; GCN-O1-NEXT: Insert required mode register values
; GCN-O1-NEXT: SI Insert Hard Clauses
; GCN-O1-NEXT: SI Final Branch Preparation
+; GCN-O1-NEXT: Machine Natural Loop Construction
+; GCN-O1-NEXT: Machine Trace Metrics
; GCN-O1-NEXT: SI peephole optimizations
; GCN-O1-NEXT: Post RA hazard recognizer
; GCN-O1-NEXT: AMDGPU Insert waits for SGPR read hazards
@@ -738,6 +740,8 @@
; GCN-O1-OPTS-NEXT: Insert required mode register values
; GCN-O1-OPTS-NEXT: SI Insert Hard Clauses
; GCN-O1-OPTS-NEXT: SI Final Branch Preparation
+; GCN-O1-OPTS-NEXT: Machine Natural Loop Construction
+; GCN-O1-OPTS-NEXT: Machine Trace Metrics
; GCN-O1-OPTS-NEXT: SI peephole optimizations
; GCN-O1-OPTS-NEXT: Post RA hazard recognizer
; GCN-O1-OPTS-NEXT: AMDGPU Insert waits for SGPR read hazards
@@ -1057,6 +1061,8 @@
; GCN-O2-NEXT: Insert required mode register values
; GCN-O2-NEXT: SI Insert Hard Clauses
; GCN-O2-NEXT: SI Final Branch Preparation
+; GCN-O2-NEXT: Machine Natural Loop Construction
+; GCN-O2-NEXT: Machine Trace Metrics
; GCN-O2-NEXT: SI peephole optimizations
; GCN-O2-NEXT: Post RA hazard recognizer
; GCN-O2-NEXT: AMDGPU Insert waits for SGPR read hazards
@@ -1389,6 +1395,8 @@
; GCN-O3-NEXT: Insert required mode register values
; GCN-O3-NEXT: SI Insert Hard Clauses
; GCN-O3-NEXT: SI Final Branch Preparation
+; GCN-O3-NEXT: Machine Natural Loop Construction
+; GCN-O3-NEXT: Machine Trace Metrics
; GCN-O3-NEXT: SI peephole optimizations
; GCN-O3-NEXT: Post RA hazard recognizer
; GCN-O3-NEXT: AMDGPU Insert waits for SGPR read hazards
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll
index d26f0df49b0a83..d4ac2473485e5e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll
@@ -538,12 +538,13 @@ define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid
; GFX10-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX10-NEXT: ; %bb.2: ; %Flow
; GFX10-NEXT: s_andn2_saveexec_b32 s1, s1
+; GFX10-NEXT: s_cbranch_execz .LBB24_4
; GFX10-NEXT: ; %bb.3: ; %A
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: s_and_b32 s2, vcc_lo, exec_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
-; GFX10-NEXT: ; %bb.4: ; %exit
+; GFX10-NEXT: .LBB24_4: ; %exit
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v2
@@ -563,12 +564,13 @@ define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid
; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: ; %bb.2: ; %Flow
; GFX11-NEXT: s_and_not1_saveexec_b32 s1, s1
+; GFX11-NEXT: s_cbranch_execz .LBB24_4
; GFX11-NEXT: ; %bb.3: ; %A
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: s_and_not1_b32 s0, s0, exec_lo
; GFX11-NEXT: s_and_b32 s2, vcc_lo, exec_lo
; GFX11-NEXT: s_or_b32 s0, s0, s2
-; GFX11-NEXT: ; %bb.4: ; %exit
+; GFX11-NEXT: .LBB24_4: ; %exit
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, v2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll
index c7597e98a6d583..bcd8fb53fa4b62 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll
@@ -527,12 +527,13 @@ define amdgpu_ps void @non_cst_non_compare_input(ptr addrspace(1) %out, i32 %tid
; CHECK-NEXT: ; implicit-def: $vgpr2
; CHECK-NEXT: ; %bb.2: ; %Flow
; CHECK-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3]
+; CHECK-NEXT: s_cbranch_execz .LBB24_4
; CHECK-NEXT: ; %bb.3: ; %A
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; CHECK-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; CHECK-NEXT: s_and_b64 s[4:5], vcc, exec
; CHECK-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
-; CHECK-NEXT: ; %bb.4: ; %exit
+; CHECK-NEXT: .LBB24_4: ; %exit
; CHECK-NEXT: s_or_b64 exec, exec, s[2:3]
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index 1bdaa4c98127d0..edd5a13f29b34b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -117,6 +117,7 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; GISEL12-NEXT: s_mov_b32 s7, s4
; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_and_saveexec_b32 s3, s8
+; GISEL12-NEXT: s_cbranch_execz .LBB1_2
; GISEL12-NEXT: ; %bb.1: ; %shader
; GISEL12-NEXT: s_or_saveexec_b32 s4, -1
; GISEL12-NEXT: s_wait_alu 0xfffe
@@ -128,7 +129,8 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; GISEL12-NEXT: s_mov_b32 exec_lo, s4
; GISEL12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GISEL12-NEXT: v_dual_mov_b32 v11, v0 :: v_dual_add_nc_u32 v10, 42, v10
-; GISEL12-NEXT: ; %bb.2: ; %tail
+; GISEL12-NEXT: .LBB1_2: ; %tail
+; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GISEL12-NEXT: s_mov_b32 exec_lo, s5
; GISEL12-NEXT: s_setpc_b64 s[6:7]
@@ -146,6 +148,7 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; DAGISEL12-NEXT: s_mov_b32 s6, s3
; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_and_saveexec_b32 s3, s8
+; DAGISEL12-NEXT: s_cbranch_execz .LBB1_2
; DAGISEL12-NEXT: ; %bb.1: ; %shader
; DAGISEL12-NEXT: s_or_saveexec_b32 s4, -1
; DAGISEL12-NEXT: s_wait_alu 0xfffe
@@ -154,7 +157,8 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; DAGISEL12-NEXT: v_cmp_ne_u32_e64 s8, 0, v0
; DAGISEL12-NEXT: s_mov_b32 exec_lo, s4
; DAGISEL12-NEXT: v_dual_mov_b32 v11, s8 :: v_dual_add_nc_u32 v10, 42, v10
-; DAGISEL12-NEXT: ; %bb.2: ; %tail
+; DAGISEL12-NEXT: .LBB1_2: ; %tail
+; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s3
; DAGISEL12-NEXT: s_mov_b32 exec_lo, s5
; DAGISEL12-NEXT: s_setpc_b64 s[6:7]
@@ -168,6 +172,7 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; GISEL10-NEXT: s_mov_b32 s6, s3
; GISEL10-NEXT: s_mov_b32 s7, s4
; GISEL10-NEXT: s_and_saveexec_b32 s3, s8
+; GISEL10-NEXT: s_cbranch_execz .LBB1_2
; GISEL10-NEXT: ; %bb.1: ; %shader
; GISEL10-NEXT: s_or_saveexec_b32 s4, -1
; GISEL10-NEXT: v_cndmask_b32_e64 v0, 0x47, v10, s4
@@ -176,7 +181,7 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; GISEL10-NEXT: s_mov_b32 exec_lo, s4
; GISEL10-NEXT: v_add_nc_u32_e32 v10, 42, v10
; GISEL10-NEXT: v_mov_b32_e32 v11, v0
-; GISEL10-NEXT: ; %bb.2: ; %tail
+; GISEL10-NEXT: .LBB1_2: ; %tail
; GISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GISEL10-NEXT: s_mov_b32 exec_lo, s5
; GISEL10-NEXT: s_setpc_b64 s[6:7]
@@ -190,6 +195,7 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; DAGISEL10-NEXT: s_mov_b32 s7, s4
; DAGISEL10-NEXT: s_mov_b32 s6, s3
; DAGISEL10-NEXT: s_and_saveexec_b32 s3, s8
+; DAGISEL10-NEXT: s_cbranch_execz .LBB1_2
; DAGISEL10-NEXT: ; %bb.1: ; %shader
; DAGISEL10-NEXT: s_or_saveexec_b32 s4, -1
; DAGISEL10-NEXT: v_cndmask_b32_e64 v0, 0x47, v10, s4
@@ -197,7 +203,7 @@ define amdgpu_cs_chain void @wwm_in_shader(<3 x i32> inreg %sgpr, ptr inreg %cal
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s4
; DAGISEL10-NEXT: v_add_nc_u32_e32 v10, 42, v10
; DAGISEL10-NEXT: v_mov_b32_e32 v11, s8
-; DAGISEL10-NEXT: ; %bb.2: ; %tail
+; DAGISEL10-NEXT: .LBB1_2: ; %tail
; DAGISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s5
; DAGISEL10-NEXT: s_setpc_b64 s[6:7]
@@ -237,6 +243,7 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; GISEL12-NEXT: s_mov_b32 s7, s4
; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_and_saveexec_b32 s3, s8
+; GISEL12-NEXT: s_cbranch_execz .LBB2_2
; GISEL12-NEXT: ; %bb.1: ; %shader
; GISEL12-NEXT: s_or_saveexec_b32 s4, -1
; GISEL12-NEXT: s_wait_alu 0xfffe
@@ -248,7 +255,8 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; GISEL12-NEXT: s_mov_b32 exec_lo, s4
; GISEL12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GISEL12-NEXT: v_dual_mov_b32 v11, v0 :: v_dual_add_nc_u32 v10, 42, v12
-; GISEL12-NEXT: ; %bb.2: ; %tail
+; GISEL12-NEXT: .LBB2_2: ; %tail
+; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GISEL12-NEXT: s_mov_b32 exec_lo, s5
; GISEL12-NEXT: s_setpc_b64 s[6:7]
@@ -265,6 +273,7 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; DAGISEL12-NEXT: s_mov_b32 s6, s3
; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_and_saveexec_b32 s3, s8
+; DAGISEL12-NEXT: s_cbranch_execz .LBB2_2
; DAGISEL12-NEXT: ; %bb.1: ; %shader
; DAGISEL12-NEXT: s_or_saveexec_b32 s4, -1
; DAGISEL12-NEXT: s_wait_alu 0xfffe
@@ -273,7 +282,8 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; DAGISEL12-NEXT: v_cmp_ne_u32_e64 s8, 0, v0
; DAGISEL12-NEXT: s_mov_b32 exec_lo, s4
; DAGISEL12-NEXT: v_dual_mov_b32 v11, s8 :: v_dual_add_nc_u32 v10, 42, v12
-; DAGISEL12-NEXT: ; %bb.2: ; %tail
+; DAGISEL12-NEXT: .LBB2_2: ; %tail
+; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s3
; DAGISEL12-NEXT: s_mov_b32 exec_lo, s5
; DAGISEL12-NEXT: s_setpc_b64 s[6:7]
@@ -285,6 +295,7 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; GISEL10-NEXT: s_mov_b32 s6, s3
; GISEL10-NEXT: s_mov_b32 s7, s4
; GISEL10-NEXT: s_and_saveexec_b32 s3, s8
+; GISEL10-NEXT: s_cbranch_execz .LBB2_2
; GISEL10-NEXT: ; %bb.1: ; %shader
; GISEL10-NEXT: s_or_saveexec_b32 s4, -1
; GISEL10-NEXT: v_cndmask_b32_e64 v0, 0x47, v12, s4
@@ -293,7 +304,7 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; GISEL10-NEXT: s_mov_b32 exec_lo, s4
; GISEL10-NEXT: v_add_nc_u32_e32 v10, 42, v12
; GISEL10-NEXT: v_mov_b32_e32 v11, v0
-; GISEL10-NEXT: ; %bb.2: ; %tail
+; GISEL10-NEXT: .LBB2_2: ; %tail
; GISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GISEL10-NEXT: s_mov_b32 exec_lo, s5
; GISEL10-NEXT: s_setpc_b64 s[6:7]
@@ -305,6 +316,7 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; DAGISEL10-NEXT: s_mov_b32 s7, s4
; DAGISEL10-NEXT: s_mov_b32 s6, s3
; DAGISEL10-NEXT: s_and_saveexec_b32 s3, s8
+; DAGISEL10-NEXT: s_cbranch_execz .LBB2_2
; DAGISEL10-NEXT: ; %bb.1: ; %shader
; DAGISEL10-NEXT: s_or_saveexec_b32 s4, -1
; DAGISEL10-NEXT: v_cndmask_b32_e64 v0, 0x47, v12, s4
@@ -312,7 +324,7 @@ define amdgpu_cs_chain void @phi_whole_struct(<3 x i32> inreg %sgpr, ptr inreg %
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s4
; DAGISEL10-NEXT: v_add_nc_u32_e32 v10, 42, v12
; DAGISEL10-NEXT: v_mov_b32_e32 v11, s8
-; DAGISEL10-NEXT: ; %bb.2: ; %tail
+; DAGISEL10-NEXT: .LBB2_2: ; %tail
; DAGISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s5
; DAGISEL10-NEXT: s_setpc_b64 s[6:7]
@@ -387,6 +399,7 @@ define amdgpu_cs_chain void @control_flow(<3 x i32> inreg %sgpr, ptr inreg %call
; GISEL12-NEXT: v_cmpx_lt_i32_e64 v12, v13
; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_xor_b32 s3, exec_lo, s3
+; GISEL12-NEXT: s_cbranch_execz .LBB3_6
; GISEL12-NEXT: ; %bb.5: ; %tail.else
; GISEL12-NEXT: s_or_saveexec_b32 s4, -1
; GISEL12-NEXT: v_mov_b32_e32 v0, 15
@@ -394,7 +407,8 @@ define amdgpu_cs_chain void @control_flow(<3 x i32> inreg %sgpr, ptr inreg %call
; GISEL12-NEXT: s_mov_b32 exec_lo, s4
; GISEL12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GISEL12-NEXT: v_mov_b32_e32 v8, v0
-; GISEL12-NEXT: ; %bb.6: ; %Flow
+; GISEL12-NEXT: .LBB3_6: ; %Flow
+; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_and_not1_saveexec_b32 s3, s3
; GISEL12-NEXT: ; %bb.7: ; %tail.then
; GISEL12-NEXT: s_mov_b32 s4, 44
@@ -495,12 +509,13 @@ define amdgpu_cs_chain void @control_flow(<3 x i32> inreg %sgpr, ptr inreg %call
; GISEL10-NEXT: ; implicit-def: $vgpr8
; GISEL10-NEXT: v_cmpx_lt_i32_e64 v12, v13
; GISEL10-NEXT: s_xor_b32 s3, exec_lo, s3
+; GISEL10-NEXT: s_cbranch_execz .LBB3_6
; GISEL10-NEXT: ; %bb.5: ; %tail.else
; GISEL10-NEXT: s_or_saveexec_b32 s4, -1
; GISEL10-NEXT: v_mov_b32_e32 v0, 15
; GISEL10-NEXT: s_mov_b32 exec_lo, s4
; GISEL10-NEXT: v_mov_b32_e32 v8, v0
-; GISEL10-NEXT: ; %bb.6: ; %Flow
+; GISEL10-NEXT: .LBB3_6: ; %Flow
; GISEL10-NEXT: s_andn2_saveexec_b32 s3, s3
; GISEL10-NEXT: ; %bb.7: ; %tail.then
; GISEL10-NEXT: s_mov_b32 s4, 44
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w64.ll
index e0a5d397bded4d..46e680d212ac70 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w64.ll
@@ -57,6 +57,7 @@ define amdgpu_cs_chain void @basic(<3 x i32> inreg %sgpr, ptr inreg %callee, i64
; DAGISEL12-NEXT: s_mov_b32 s4, s3
; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
+; DAGISEL12-NEXT: s_cbranch_execz .LBB0_2
; DAGISEL12-NEXT: ; %bb.1: ; %shader
; DAGISEL12-NEXT: s_or_saveexec_b64 s[10:11], -1
; DAGISEL12-NEXT: s_wait_alu 0xfffe
@@ -68,7 +69,8 @@ define amdgpu_cs_chain void @basic(<3 x i32> inreg %sgpr, ptr inreg %callee, i64
; DAGISEL12-NEXT: v_add_nc_u32_e32 v10, 42, v13
; DAGISEL12-NEXT: s_delay_alu instid0(VALU_DEP_3)
; DAGISEL12-NEXT: v_mov_b32_e32 v12, s13
-; DAGISEL12-NEXT: ; %bb.2: ; %tail
+; DAGISEL12-NEXT: .LBB0_2: ; %tail
+; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_or_b64 exec, exec, s[8:9]
; DAGISEL12-NEXT: s_mov_b64 exec, s[6:7]
; DAGISEL12-NEXT: s_setpc_b64 s[4:5]
@@ -107,6 +109,7 @@ define amdgpu_cs_chain void @basic(<3 x i32> inreg %sgpr, ptr inreg %callee, i64
; DAGISEL10-NEXT: s_mov_b32 s5, s4
; DAGISEL10-NEXT: s_mov_b32 s4, s3
; DAGISEL10-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
+; DAGISEL10-NEXT: s_cbranch_execz .LBB0_2
; DAGISEL10-NEXT: ; %bb.1: ; %shader
; DAGISEL10-NEXT: s_or_saveexec_b64 s[10:11], -1
; DAGISEL10-NEXT: v_cndmask_b32_e64 v0, 0x47, v13, s[10:11]
@@ -115,7 +118,7 @@ define amdgpu_cs_chain void @basic(<3 x i32> inreg %sgpr, ptr inreg %callee, i64
; DAGISEL10-NEXT: v_mov_b32_e32 v11, s12
; DAGISEL10-NEXT: v_add_nc_u32_e32 v10, 42, v13
; DAGISEL10-NEXT: v_mov_b32_e32 v12, s13
-; DAGISEL10-NEXT: ; %bb.2: ; %tail
+; DAGISEL10-NEXT: .LBB0_2: ; %tail
; DAGISEL10-NEXT: s_or_b64 exec, exec, s[8:9]
; DAGISEL10-NEXT: s_mov_b64 exec, s[6:7]
; DAGISEL10-NEXT: s_setpc_b64 s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
index 7283ec88a90d83..bad129630569f8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
@@ -210,12 +210,13 @@ define amdgpu_cs void @inverse_ballot_branch(i64 inreg %s0_1, i64 inreg %s2, ptr
; SDAG-NEXT: v_mov_b32_e32 v2, s0
; SDAG-NEXT: s_xor_b64 s[4:5], s[2:3], -1
; SDAG-NEXT: s_and_saveexec_b64 s[2:3], s[4:5]
+; SDAG-NEXT: s_cbranch_execz .LBB6_2
; SDAG-NEXT: ; %bb.1: ; %if
; SDAG-NEXT: s_add_u32 s0, s0, 1
; SDAG-NEXT: s_addc_u32 s1, s1, 0
; SDAG-NEXT: v_mov_b32_e32 v3, s1
; SDAG-NEXT: v_mov_b32_e32 v2, s0
-; SDAG-NEXT: ; %bb.2: ; %endif
+; SDAG-NEXT: .LBB6_2: ; %endif
; SDAG-NEXT: s_or_b64 exec, exec, s[2:3]
; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
; SDAG-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.softwqm.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.softwqm.ll
index 5fb50d7e8589a7..9765d35dc50d83 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.softwqm.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.softwqm.ll
@@ -178,10 +178,9 @@ define amdgpu_ps float @test_control_flow_0(<8 x i32> inreg %rsrc, <4 x i32> inr
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; CHECK-NEXT: s_and_saveexec_b64 s[0:1], vcc
; CHECK-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; CHECK-NEXT: s_cbranch_execz .LBB6_2
; CHECK-NEXT: ; %bb.1: ; %ELSE
; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 idxen
-; CHECK-NEXT: .LBB6_2: ; %Flow
+; CHECK-NEXT: ; %bb.2: ; %Flow
; CHECK-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; CHECK-NEXT: s_cbranch_execz .LBB6_4
; CHECK-NEXT: ; %bb.3: ; %IF
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 5fd6deff0fbbb7..c2e2ce2b444814 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -7949,10 +7949,11 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX12-NEXT: s_cbranch_execz .LBB29_8
; GFX12-NEXT: ; %bb.7:
; GFX12-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
-; GFX12-NEXT: ; %bb.8:
+; GFX12-NEXT: .LBB29_8:
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
@@ -8434,11 +8435,12 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execz .LBB29_8
; GFX8-NEXT: ; %bb.7:
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX8-NEXT: ; %bb.8:
+; GFX8-NEXT: .LBB29_8:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/ret_jump.ll b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
index 66a55d9eb128c6..a49ac210b84431 100644
--- a/llvm/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
@@ -65,6 +65,7 @@ ret.bb: ; preds = %else, %main_body
; GCN: .LBB{{[0-9]+_[0-9]+}}: ; %else
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
+; GCN-NEXT: s_cbranch_execz [[ELSE:.LBB[0-9]+_[0-9]+]]
; GCN-NEXT: ; %unreachable.bb
; GCN: ds_write_b32
diff --git a/llvm/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll b/llvm/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll
index d4329aec2021c0..36899f8ab6c51d 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll
+++ b/llvm/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll
@@ -20,13 +20,14 @@ define amdgpu_kernel void @foobar(float %a0, float %a1, ptr addrspace(1) %out) #
; CHECK-NEXT: v_mov_b32_e32 v2, s6
; CHECK-NEXT: v_mov_b32_e32 v3, s7
; CHECK-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; CHECK-NEXT: s_cbranch_execz .LBB0_2
; CHECK-NEXT: ; %bb.1: ; %ift
; CHECK-NEXT: s_mov_b32 s4, s5
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: v_mov_b32_e32 v2, s6
; CHECK-NEXT: v_mov_b32_e32 v3, s7
-; CHECK-NEXT: ; %bb.2: ; %ife
+; CHECK-NEXT: .LBB0_2: ; %ife
; CHECK-NEXT: s_or_b64 exec, exec, s[6:7]
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb
; CHECK-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
index b5e4bcd049c42a..43cb79d8c95c58 100644
--- a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
@@ -60,10 +60,9 @@ define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dword v2, v3, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB1_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dword v2, v3, s[2:3]
-; GFX906-NEXT: .LBB1_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: global_store_dword v1, v2, s[6:7]
@@ -136,10 +135,9 @@ define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB3_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx2 v[1:2], v4, s[2:3]
-; GFX906-NEXT: .LBB3_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: global_store_dwordx2 v3, v[1:2], s[6:7]
@@ -172,10 +170,9 @@ define amdgpu_kernel void @v16i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: global_load_dwordx4 v[1:4], v6, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB4_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v6, s[2:3]
-; GFX906-NEXT: .LBB4_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: global_store_dwordx4 v5, v[1:4], s[6:7]
@@ -209,11 +206,10 @@ define amdgpu_kernel void @v32i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v10, s[0:1] offset:16
; GFX906-NEXT: global_load_dwordx4 v[5:8], v10, s[0:1]
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB5_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dwordx4 v[1:4], v10, s[2:3] offset:16
; GFX906-NEXT: global_load_dwordx4 v[5:8], v10, s[2:3]
-; GFX906-NEXT: .LBB5_2: ; %bb.2
+; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(1)
; GFX906-NEXT: global_store_dwordx4 v9, v[1:4], s[6:7] offset:16
@@ -646,11 +642,10 @@ define amdgpu_kernel void @v8i8_multi_block(ptr addrspace(1) %src1, ptr addrspac
; GFX906-NEXT: global_load_dwordx2 v[1:2], v6, s[10:11]
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 7, v0
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX906-NEXT: s_cbranch_execz .LBB11_3
; GFX906-NEXT: ; %bb.2: ; %bb.2
; GFX906-NEXT: v_mov_b32_e32 v0, 0
; GFX906-NEXT: global_store_dwordx2 v0, v[3:4], s[12:13]
-; GFX906-NEXT: .LBB11_3: ; %Flow
+; GFX906-NEXT: ; %bb.3: ; %Flow
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX906-NEXT: .LBB11_4: ; %bb.3
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index e49dd9eab30f3f..0d87258ca9a36f 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -396,13 +396,14 @@ define amdgpu_kernel void @test_loop_with_if(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: ; implicit-def: $vgpr4
; GFX1032-NEXT: s_and_saveexec_b32 s5, s4
; GFX1032-NEXT: s_xor_b32 s4, exec_lo, s5
+; GFX1032-NEXT: s_cbranch_execz .LBB10_6
; GFX1032-NEXT: ; %bb.5: ; %bb11
; GFX1032-NEXT: ; in Loop: Header=BB10_2 Depth=1
; GFX1032-NEXT: v_lshrrev_b32_e32 v4, 31, v1
; GFX1032-NEXT: s_andn2_b32 s3, s3, exec_lo
; GFX1032-NEXT: v_add_nc_u32_e32 v4, v1, v4
; GFX1032-NEXT: v_ashrrev_i32_e32 v4, 1, v4
-; GFX1032-NEXT: ; %bb.6: ; %Flow1
+; GFX1032-NEXT: .LBB10_6: ; %Flow1
; GFX1032-NEXT: ; in Loop: Header=BB10_2 Depth=1
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1032-NEXT: s_and_saveexec_b32 s4, s3
@@ -458,13 +459,14 @@ define amdgpu_kernel void @test_loop_with_if(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: ; implicit-def: $vgpr4
; GFX1064-NEXT: s_and_saveexec_b64 s[8:9], s[6:7]
; GFX1064-NEXT: s_xor_b64 s[6:7], exec, s[8:9]
+; GFX1064-NEXT: s_cbranch_execz .LBB10_6
; GFX1064-NEXT: ; %bb.5: ; %bb11
; GFX1064-NEXT: ; in Loop: Header=BB10_2 Depth=1
; GFX1064-NEXT: v_lshrrev_b32_e32 v4, 31, v1
; GFX1064-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GFX1064-NEXT: v_add_nc_u32_e32 v4, v1, v4
; GFX1064-NEXT: v_ashrrev_i32_e32 v4, 1, v4
-; GFX1064-NEXT: ; %bb.6: ; %Flow1
+; GFX1064-NEXT: .LBB10_6: ; %Flow1
; GFX1064-NEXT: ; in Loop: Header=BB10_2 Depth=1
; GFX1064-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX1064-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/while-break.ll b/llvm/test/CodeGen/AMDGPU/while-break.ll
index 9bb8a2f9f0282c..ffab26f079dac0 100644
--- a/llvm/test/CodeGen/AMDGPU/while-break.ll
+++ b/llvm/test/CodeGen/AMDGPU/while-break.ll
@@ -103,13 +103,14 @@ define amdgpu_ps float @while_break2(i32 %z, float %v, i32 %x, i32 %y) #0 {
; GCN-NEXT: ; %bb.4: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_andn2_saveexec_b32 s3, s3
+; GCN-NEXT: s_cbranch_execz .LBB1_6
; GCN-NEXT: ; %bb.5: ; %else
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: v_cmp_lt_i32_e32 vcc_lo, s1, v3
; GCN-NEXT: s_andn2_b32 s2, s2, exec_lo
; GCN-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GCN-NEXT: s_or_b32 s2, s2, s4
-; GCN-NEXT: ; %bb.6: ; %Flow1
+; GCN-NEXT: .LBB1_6: ; %Flow1
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GCN-NEXT: s_mov_b32 s3, -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
index af7d16968488a1..5b81b2640454dd 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -264,6 +264,7 @@ define amdgpu_gfx void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg)
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-O3-NEXT: s_and_saveexec_b64 s[34:35], vcc
+; GFX9-O3-NEXT: s_cbranch_execz .LBB1_2
; GFX9-O3-NEXT: ; %bb.1: ; %if
; GFX9-O3-NEXT: s_or_saveexec_b64 s[36:37], -1
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
@@ -273,7 +274,7 @@ define amdgpu_gfx void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg)
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
; GFX9-O3-NEXT: s_mov_b64 exec, s[36:37]
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O3-NEXT: ; %bb.2: ; %merge
+; GFX9-O3-NEXT: .LBB1_2: ; %merge
; GFX9-O3-NEXT: s_or_b64 exec, exec, s[34:35]
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
; GFX9-O3-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
index ddc50b7d495047..d8e306a5317aa3 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
@@ -230,6 +230,7 @@ define amdgpu_cs void @cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) {
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-O3-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-O3-NEXT: s_cbranch_execz .LBB1_2
; GFX9-O3-NEXT: ; %bb.1: ; %if
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
@@ -239,7 +240,7 @@ define amdgpu_cs void @cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) {
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O3-NEXT: ; %bb.2: ; %merge
+; GFX9-O3-NEXT: .LBB1_2: ; %merge
; GFX9-O3-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
; GFX9-O3-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
@@ -1082,6 +1083,7 @@ define amdgpu_cs void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) {
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-O3-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-O3-NEXT: s_cbranch_execz .LBB8_2
; GFX9-O3-NEXT: ; %bb.1: ; %if
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
@@ -1091,7 +1093,7 @@ define amdgpu_cs void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) {
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O3-NEXT: ; %bb.2: ; %merge
+; GFX9-O3-NEXT: .LBB8_2: ; %merge
; GFX9-O3-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
; GFX9-O3-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
More information about the llvm-commits
mailing list