[llvm] [AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: use BranchProbability and TargetSchedmodel (PR #109818)
Juan Manuel Martinez CaamaƱo via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 25 01:26:17 PDT 2024
https://github.com/jmmartinez updated https://github.com/llvm/llvm-project/pull/109818
>From 535a3ab0550f56f2c9621f9ee1d4e88ea72dc8b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Mon, 23 Sep 2024 16:38:37 +0200
Subject: [PATCH 1/3] [AMDGPU][StructurizeCFG] Maintain branch MD_prof metadata
---
llvm/lib/Transforms/Scalar/StructurizeCFG.cpp | 83 +++++++++++++++----
.../structurizer-keep-perf-md.ll | 8 +-
2 files changed, 71 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index aca8225cebb3fd..d0de3d29233f11 100644
--- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -30,6 +30,7 @@
#include "llvm/IR/Metadata.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ProfDataUtils.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
@@ -85,7 +86,43 @@ using PhiMap = MapVector<PHINode *, BBValueVector>;
using BB2BBVecMap = MapVector<BasicBlock *, BBVector>;
using BBPhiMap = DenseMap<BasicBlock *, PhiMap>;
-using BBPredicates = DenseMap<BasicBlock *, Value *>;
+
+using MaybeCondBranchWeights = std::optional<class CondBranchWeights>;
+
+class CondBranchWeights {
+ uint32_t TrueWeight;
+ uint32_t FalseWeight;
+
+ CondBranchWeights(uint32_t T, uint32_t F) : TrueWeight(T), FalseWeight(F) {}
+
+public:
+ static MaybeCondBranchWeights tryParse(const BranchInst &Br) {
+ assert(Br.isConditional());
+
+ uint64_t T, F;
+ if (!extractBranchWeights(Br, T, F))
+ return std::nullopt;
+
+ return CondBranchWeights(T, F);
+ }
+
+ static void setMetadata(BranchInst &Br,
+ const MaybeCondBranchWeights &Weights) {
+ assert(Br.isConditional());
+ if (!Weights)
+ return;
+ uint32_t Arr[] = {Weights->TrueWeight, Weights->FalseWeight};
+ setBranchWeights(Br, Arr, false);
+ }
+
+ CondBranchWeights invert() const {
+ return CondBranchWeights{FalseWeight, TrueWeight};
+ }
+};
+
+using ValueWeightPair = std::pair<Value *, MaybeCondBranchWeights>;
+
+using BBPredicates = DenseMap<BasicBlock *, ValueWeightPair>;
using PredMap = DenseMap<BasicBlock *, BBPredicates>;
using BB2BBMap = DenseMap<BasicBlock *, BasicBlock *>;
@@ -271,7 +308,7 @@ class StructurizeCFG {
void analyzeLoops(RegionNode *N);
- Value *buildCondition(BranchInst *Term, unsigned Idx, bool Invert);
+ ValueWeightPair buildCondition(BranchInst *Term, unsigned Idx, bool Invert);
void gatherPredicates(RegionNode *N);
@@ -449,16 +486,22 @@ void StructurizeCFG::analyzeLoops(RegionNode *N) {
}
/// Build the condition for one edge
-Value *StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx,
- bool Invert) {
+ValueWeightPair StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx,
+ bool Invert) {
Value *Cond = Invert ? BoolFalse : BoolTrue;
+ MaybeCondBranchWeights Weights = std::nullopt;
+
if (Term->isConditional()) {
Cond = Term->getCondition();
+ Weights = CondBranchWeights::tryParse(*Term);
- if (Idx != (unsigned)Invert)
+ if (Idx != (unsigned)Invert) {
Cond = invertCondition(Cond);
+ if (Weights)
+ Weights = Weights->invert();
+ }
}
- return Cond;
+ return {Cond, Weights};
}
/// Analyze the predecessors of each block and build up predicates
@@ -490,8 +533,8 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) {
if (Visited.count(Other) && !Loops.count(Other) &&
!Pred.count(Other) && !Pred.count(P)) {
- Pred[Other] = BoolFalse;
- Pred[P] = BoolTrue;
+ Pred[Other] = {BoolFalse, std::nullopt};
+ Pred[P] = {BoolTrue, std::nullopt};
continue;
}
}
@@ -512,9 +555,9 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) {
BasicBlock *Entry = R->getEntry();
if (Visited.count(Entry))
- Pred[Entry] = BoolTrue;
+ Pred[Entry] = {BoolTrue, std::nullopt};
else
- LPred[Entry] = BoolFalse;
+ LPred[Entry] = {BoolFalse, std::nullopt};
}
}
}
@@ -578,12 +621,14 @@ void StructurizeCFG::insertConditions(bool Loops) {
Dominator.addBlock(Parent);
Value *ParentValue = nullptr;
- for (std::pair<BasicBlock *, Value *> BBAndPred : Preds) {
+ MaybeCondBranchWeights ParentWeights = std::nullopt;
+ for (std::pair<BasicBlock *, ValueWeightPair> BBAndPred : Preds) {
BasicBlock *BB = BBAndPred.first;
- Value *Pred = BBAndPred.second;
+ Value *Pred = BBAndPred.second.first;
if (BB == Parent) {
ParentValue = Pred;
+ ParentWeights = BBAndPred.second.second;
break;
}
PhiInserter.AddAvailableValue(BB, Pred);
@@ -592,6 +637,7 @@ void StructurizeCFG::insertConditions(bool Loops) {
if (ParentValue) {
Term->setCondition(ParentValue);
+ CondBranchWeights::setMetadata(*Term, ParentWeights);
} else {
if (!Dominator.resultIsRememberedBlock())
PhiInserter.AddAvailableValue(Dominator.result(), Default);
@@ -607,7 +653,7 @@ void StructurizeCFG::simplifyConditions() {
for (auto &I : concat<PredMap::value_type>(Predicates, LoopPreds)) {
auto &Preds = I.second;
for (auto &J : Preds) {
- auto &Cond = J.second;
+ auto &Cond = J.second.first;
Instruction *Inverted;
if (match(Cond, m_Not(m_OneUse(m_Instruction(Inverted)))) &&
!Cond->use_empty()) {
@@ -904,9 +950,10 @@ void StructurizeCFG::setPrevNode(BasicBlock *BB) {
/// Does BB dominate all the predicates of Node?
bool StructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) {
BBPredicates &Preds = Predicates[Node->getEntry()];
- return llvm::all_of(Preds, [&](std::pair<BasicBlock *, Value *> Pred) {
- return DT->dominates(BB, Pred.first);
- });
+ return llvm::all_of(Preds,
+ [&](std::pair<BasicBlock *, ValueWeightPair> Pred) {
+ return DT->dominates(BB, Pred.first);
+ });
}
/// Can we predict that this node will always be called?
@@ -918,9 +965,9 @@ bool StructurizeCFG::isPredictableTrue(RegionNode *Node) {
if (!PrevNode)
return true;
- for (std::pair<BasicBlock*, Value*> Pred : Preds) {
+ for (std::pair<BasicBlock *, ValueWeightPair> Pred : Preds) {
BasicBlock *BB = Pred.first;
- Value *V = Pred.second;
+ Value *V = Pred.second.first;
if (V != BoolTrue)
return false;
diff --git a/llvm/test/Transforms/StructurizeCFG/structurizer-keep-perf-md.ll b/llvm/test/Transforms/StructurizeCFG/structurizer-keep-perf-md.ll
index 862c50c6183f16..cdf5ca569701be 100644
--- a/llvm/test/Transforms/StructurizeCFG/structurizer-keep-perf-md.ll
+++ b/llvm/test/Transforms/StructurizeCFG/structurizer-keep-perf-md.ll
@@ -5,7 +5,7 @@ define amdgpu_ps i32 @if_else(i32 %0) {
; OPT-LABEL: define amdgpu_ps i32 @if_else(
; OPT-SAME: i32 [[TMP0:%.*]]) {
; OPT-NEXT: [[C:%.*]] = icmp ne i32 [[TMP0]], 0
-; OPT-NEXT: br i1 [[C]], label %[[FALSE:.*]], label %[[FLOW:.*]]
+; OPT-NEXT: br i1 [[C]], label %[[FALSE:.*]], label %[[FLOW:.*]], !prof [[PROF0:![0-9]+]]
; OPT: [[FLOW]]:
; OPT-NEXT: [[TMP2:%.*]] = phi i32 [ 33, %[[FALSE]] ], [ undef, [[TMP1:%.*]] ]
; OPT-NEXT: [[TMP3:%.*]] = phi i1 [ false, %[[FALSE]] ], [ true, [[TMP1]] ]
@@ -40,7 +40,7 @@ define amdgpu_ps void @loop_if_break(i32 %n) {
; OPT: [[LOOP]]:
; OPT-NEXT: [[I:%.*]] = phi i32 [ [[N]], %[[ENTRY]] ], [ [[TMP0:%.*]], %[[FLOW:.*]] ]
; OPT-NEXT: [[C:%.*]] = icmp ugt i32 [[I]], 0
-; OPT-NEXT: br i1 [[C]], label %[[LOOP_BODY:.*]], label %[[FLOW]]
+; OPT-NEXT: br i1 [[C]], label %[[LOOP_BODY:.*]], label %[[FLOW]], !prof [[PROF1:![0-9]+]]
; OPT: [[LOOP_BODY]]:
; OPT-NEXT: [[I_NEXT:%.*]] = sub i32 [[I]], 1
; OPT-NEXT: br label %[[FLOW]]
@@ -70,3 +70,7 @@ exit: ; preds = %loop
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
!0 = !{!"branch_weights", i32 1000, i32 1}
+;.
+; OPT: [[PROF0]] = !{!"branch_weights", i32 1, i32 1000}
+; OPT: [[PROF1]] = !{!"branch_weights", i32 1000, i32 1}
+;.
>From 954e02e29984c47fb4f495dbf765ace87fd1b220 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Wed, 28 Aug 2024 17:30:54 +0200
Subject: [PATCH 2/3] [AMDGPU][SIPreEmitPeephole] pre-commit tests:
mustRetainExeczBranch: use a cost model
---
.../AMDGPU/amdgpu-demote-scc-branches.ll | 265 ++++++++++++++++++
1 file changed, 265 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
new file mode 100644
index 00000000000000..33865c04b3fe92
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
@@ -0,0 +1,265 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX1010 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX1030 %s
+
+define void @convergent_cmp_no_metadata(i32 noundef inreg %value, ptr addrspace(8) nocapture writeonly inreg %res, i32 noundef inreg %v_offset, i32 noundef inreg %0, i32 noundef inreg %flag) {
+; GFX10-LABEL: convergent_cmp_no_metadata:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_cmp_lt_i32 s21, 1
+; GFX10-NEXT: s_cbranch_scc1 .LBB0_2
+; GFX10-NEXT: ; %bb.1: ; %if.then
+; GFX10-NEXT: v_mov_b32_e32 v0, s6
+; GFX10-NEXT: v_mov_b32_e32 v1, s19
+; GFX10-NEXT: s_mov_b32 s11, s18
+; GFX10-NEXT: s_mov_b32 s10, s17
+; GFX10-NEXT: s_mov_b32 s9, s16
+; GFX10-NEXT: s_mov_b32 s8, s7
+; GFX10-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX10-NEXT: .LBB0_2: ; %if.end
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %cmp = icmp sgt i32 %flag, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %value, ptr addrspace(8) %res, i32 %v_offset, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ call void @llvm.amdgcn.s.waitcnt(i32 0)
+ ret void
+}
+
+define void @convergent_cmp_unprofitable(i32 noundef inreg %value, ptr addrspace(8) nocapture writeonly inreg %res, i32 noundef inreg %v_offset, i32 noundef inreg %0, i32 noundef inreg %flag) {
+; GFX10-LABEL: convergent_cmp_unprofitable:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_cmp_lt_i32 s21, 1
+; GFX10-NEXT: s_cbranch_scc1 .LBB1_2
+; GFX10-NEXT: ; %bb.1: ; %if.then
+; GFX10-NEXT: v_mov_b32_e32 v0, s6
+; GFX10-NEXT: v_mov_b32_e32 v1, s19
+; GFX10-NEXT: s_mov_b32 s11, s18
+; GFX10-NEXT: s_mov_b32 s10, s17
+; GFX10-NEXT: s_mov_b32 s9, s16
+; GFX10-NEXT: s_mov_b32 s8, s7
+; GFX10-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX10-NEXT: .LBB1_2: ; %if.end
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %cmp = icmp sgt i32 %flag, 0
+ br i1 %cmp, label %if.then, label %if.end, !prof !0
+
+if.then:
+ tail call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %value, ptr addrspace(8) %res, i32 %v_offset, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ call void @llvm.amdgcn.s.waitcnt(i32 0)
+ ret void
+}
+
+define void @convergent_cmp_profitable(i32 noundef inreg %value, ptr addrspace(8) nocapture writeonly inreg %res, i32 noundef inreg %v_offset, i32 noundef inreg %0, i32 noundef inreg %flag) {
+; GFX10-LABEL: convergent_cmp_profitable:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_cmp_lt_i32 s21, 1
+; GFX10-NEXT: s_cbranch_scc1 .LBB2_2
+; GFX10-NEXT: ; %bb.1: ; %if.then
+; GFX10-NEXT: v_mov_b32_e32 v0, s6
+; GFX10-NEXT: v_mov_b32_e32 v1, s19
+; GFX10-NEXT: s_mov_b32 s11, s18
+; GFX10-NEXT: s_mov_b32 s10, s17
+; GFX10-NEXT: s_mov_b32 s9, s16
+; GFX10-NEXT: s_mov_b32 s8, s7
+; GFX10-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX10-NEXT: .LBB2_2: ; %if.end
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %cmp = icmp sgt i32 %flag, 0
+ br i1 %cmp, label %if.then, label %if.end, !prof !1
+
+if.then:
+ tail call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %value, ptr addrspace(8) %res, i32 %v_offset, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ call void @llvm.amdgcn.s.waitcnt(i32 0)
+ ret void
+}
+
+define void @divergent_cmp_no_metadata(i32 noundef inreg %value, ptr addrspace(8) nocapture writeonly inreg %res, i32 noundef inreg %v_offset, i32 noundef inreg %0, i32 noundef inreg %flag) {
+; GFX1010-LABEL: divergent_cmp_no_metadata:
+; GFX1010: ; %bb.0: ; %entry
+; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, s21, v0
+; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1010-NEXT: s_cbranch_execz .LBB3_2
+; GFX1010-NEXT: ; %bb.1: ; %if.then
+; GFX1010-NEXT: v_mov_b32_e32 v0, s6
+; GFX1010-NEXT: v_mov_b32_e32 v1, s19
+; GFX1010-NEXT: s_mov_b32 s11, s18
+; GFX1010-NEXT: s_mov_b32 s10, s17
+; GFX1010-NEXT: s_mov_b32 s9, s16
+; GFX1010-NEXT: s_mov_b32 s8, s7
+; GFX1010-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX1010-NEXT: .LBB3_2: ; %if.end
+; GFX1010-NEXT: s_waitcnt_depctr 0xffe3
+; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: divergent_cmp_no_metadata:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX1030-NEXT: s_mov_b32 s4, exec_lo
+; GFX1030-NEXT: v_cmpx_gt_i32_e64 s21, v0
+; GFX1030-NEXT: s_cbranch_execz .LBB3_2
+; GFX1030-NEXT: ; %bb.1: ; %if.then
+; GFX1030-NEXT: v_mov_b32_e32 v0, s6
+; GFX1030-NEXT: v_mov_b32_e32 v1, s19
+; GFX1030-NEXT: s_mov_b32 s11, s18
+; GFX1030-NEXT: s_mov_b32 s10, s17
+; GFX1030-NEXT: s_mov_b32 s9, s16
+; GFX1030-NEXT: s_mov_b32 s8, s7
+; GFX1030-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX1030-NEXT: .LBB3_2: ; %if.end
+; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %cmp = icmp sgt i32 %flag, %id
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %value, ptr addrspace(8) %res, i32 %v_offset, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ call void @llvm.amdgcn.s.waitcnt(i32 0)
+ ret void
+}
+
+define void @divergent_cmp_unprofitable(i32 noundef inreg %value, ptr addrspace(8) nocapture writeonly inreg %res, i32 noundef inreg %v_offset, i32 noundef inreg %0, i32 noundef inreg %flag) {
+; GFX1010-LABEL: divergent_cmp_unprofitable:
+; GFX1010: ; %bb.0: ; %entry
+; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, s21, v0
+; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1010-NEXT: s_cbranch_execz .LBB4_2
+; GFX1010-NEXT: ; %bb.1: ; %if.then
+; GFX1010-NEXT: v_mov_b32_e32 v0, s6
+; GFX1010-NEXT: v_mov_b32_e32 v1, s19
+; GFX1010-NEXT: s_mov_b32 s11, s18
+; GFX1010-NEXT: s_mov_b32 s10, s17
+; GFX1010-NEXT: s_mov_b32 s9, s16
+; GFX1010-NEXT: s_mov_b32 s8, s7
+; GFX1010-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX1010-NEXT: .LBB4_2: ; %if.end
+; GFX1010-NEXT: s_waitcnt_depctr 0xffe3
+; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: divergent_cmp_unprofitable:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX1030-NEXT: s_mov_b32 s4, exec_lo
+; GFX1030-NEXT: v_cmpx_gt_i32_e64 s21, v0
+; GFX1030-NEXT: s_cbranch_execz .LBB4_2
+; GFX1030-NEXT: ; %bb.1: ; %if.then
+; GFX1030-NEXT: v_mov_b32_e32 v0, s6
+; GFX1030-NEXT: v_mov_b32_e32 v1, s19
+; GFX1030-NEXT: s_mov_b32 s11, s18
+; GFX1030-NEXT: s_mov_b32 s10, s17
+; GFX1030-NEXT: s_mov_b32 s9, s16
+; GFX1030-NEXT: s_mov_b32 s8, s7
+; GFX1030-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX1030-NEXT: .LBB4_2: ; %if.end
+; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %cmp = icmp sgt i32 %flag, %id
+ br i1 %cmp, label %if.then, label %if.end, !prof !0
+
+if.then:
+ tail call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %value, ptr addrspace(8) %res, i32 %v_offset, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ call void @llvm.amdgcn.s.waitcnt(i32 0)
+ ret void
+}
+
+define void @divergent_cmp_profitable(i32 noundef inreg %value, ptr addrspace(8) nocapture writeonly inreg %res, i32 noundef inreg %v_offset, i32 noundef inreg %0, i32 noundef inreg %flag) {
+; GFX1010-LABEL: divergent_cmp_profitable:
+; GFX1010: ; %bb.0: ; %entry
+; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, s21, v0
+; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1010-NEXT: s_cbranch_execz .LBB5_2
+; GFX1010-NEXT: ; %bb.1: ; %if.then
+; GFX1010-NEXT: v_mov_b32_e32 v0, s6
+; GFX1010-NEXT: v_mov_b32_e32 v1, s19
+; GFX1010-NEXT: s_mov_b32 s11, s18
+; GFX1010-NEXT: s_mov_b32 s10, s17
+; GFX1010-NEXT: s_mov_b32 s9, s16
+; GFX1010-NEXT: s_mov_b32 s8, s7
+; GFX1010-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX1010-NEXT: .LBB5_2: ; %if.end
+; GFX1010-NEXT: s_waitcnt_depctr 0xffe3
+; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: divergent_cmp_profitable:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX1030-NEXT: s_mov_b32 s4, exec_lo
+; GFX1030-NEXT: v_cmpx_gt_i32_e64 s21, v0
+; GFX1030-NEXT: s_cbranch_execz .LBB5_2
+; GFX1030-NEXT: ; %bb.1: ; %if.then
+; GFX1030-NEXT: v_mov_b32_e32 v0, s6
+; GFX1030-NEXT: v_mov_b32_e32 v1, s19
+; GFX1030-NEXT: s_mov_b32 s11, s18
+; GFX1030-NEXT: s_mov_b32 s10, s17
+; GFX1030-NEXT: s_mov_b32 s9, s16
+; GFX1030-NEXT: s_mov_b32 s8, s7
+; GFX1030-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
+; GFX1030-NEXT: .LBB5_2: ; %if.end
+; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %cmp = icmp sgt i32 %flag, %id
+ br i1 %cmp, label %if.then, label %if.end, !prof !1
+
+if.then:
+ tail call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %value, ptr addrspace(8) %res, i32 %v_offset, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ call void @llvm.amdgcn.s.waitcnt(i32 0)
+ ret void
+}
+
+declare void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32, ptr addrspace(8) nocapture writeonly, i32, i32, i32 immarg)
+declare void @llvm.amdgcn.s.waitcnt(i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+
+!0 = !{!"branch_weights", i32 1000, i32 1000}
+!1 = !{!"branch_weights", i32 2000, i32 1}
>From 08677c534a4f6ff3178c44f08ba4a6b135b68a2a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Fri, 20 Sep 2024 14:24:37 +0200
Subject: [PATCH 3/3] [AMDGPU][SIPreEmitPeephole] mustRetainExeczBranch: use
BranchProbability and TargetSchedModel
Remove s_cbranch_execnz branches if the transformation is
profitable according to BranchProbability and TargetSchedmodel.
---
llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp | 111 +++++-
.../AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll | 18 +-
.../AMDGPU/GlobalISel/mul-known-bits.i64.ll | 12 +-
.../AMDGPU/amdgpu-demote-scc-branches.ll | 6 +-
.../atomic_optimizations_local_pointer.ll | 324 ++++++------------
llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll | 6 +-
.../CodeGen/AMDGPU/branch-condition-and.ll | 46 ++-
.../dagcombine-v1i8-extractvecelt-crash.ll | 3 +-
llvm/test/CodeGen/AMDGPU/else.ll | 1 -
.../CodeGen/AMDGPU/flat-atomicrmw-fadd.ll | 9 +-
llvm/test/CodeGen/AMDGPU/fptoi.i128.ll | 12 +-
.../AMDGPU/insert-skips-flat-vmem-ds.mir | 4 +-
.../CodeGen/AMDGPU/insert-skips-gfx10.mir | 20 +-
.../CodeGen/AMDGPU/insert-skips-gfx12.mir | 40 +--
.../insert_waitcnt_for_precise_memory.ll | 18 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll | 36 +-
.../CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll | 36 +-
.../CodeGen/AMDGPU/local-atomicrmw-fadd.ll | 101 ++----
llvm/test/CodeGen/AMDGPU/ret_jump.ll | 1 -
llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll | 6 +-
llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll | 6 +-
.../CodeGen/AMDGPU/uniform-phi-with-undef.ll | 3 +-
22 files changed, 343 insertions(+), 476 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 1334029544f999..8b54bf07cd4fa3 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -15,6 +15,8 @@
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/BranchProbability.h"
using namespace llvm;
@@ -41,7 +43,8 @@ class SIPreEmitPeephole : public MachineFunctionPass {
MachineBasicBlock *&TrueMBB,
MachineBasicBlock *&FalseMBB,
SmallVectorImpl<MachineOperand> &Cond);
- bool mustRetainExeczBranch(const MachineBasicBlock &From,
+ bool mustRetainExeczBranch(const MachineBasicBlock &Head,
+ const MachineBasicBlock &From,
const MachineBasicBlock &To) const;
bool removeExeczBranch(MachineInstr &MI, MachineBasicBlock &SrcMBB);
@@ -304,11 +307,95 @@ bool SIPreEmitPeephole::getBlockDestinations(
return true;
}
-bool SIPreEmitPeephole::mustRetainExeczBranch(
- const MachineBasicBlock &From, const MachineBasicBlock &To) const {
+namespace {
+class CostModelBase {
+public:
+ virtual bool isProfitable(const MachineInstr &MI) = 0;
+ virtual ~CostModelBase() = default;
+ static std::unique_ptr<CostModelBase> Create(const MachineBasicBlock &MBB,
+ const MachineBasicBlock &,
+ const SIInstrInfo &TII);
+};
+
+class TrivialCostModel : public CostModelBase {
+ friend CostModelBase;
+
unsigned NumInstr = 0;
- const MachineFunction *MF = From.getParent();
+ const SIInstrInfo &TII;
+
+ TrivialCostModel(const SIInstrInfo &TII) : TII(TII) {}
+
+public:
+ bool isProfitable(const MachineInstr &MI) override {
+ ++NumInstr;
+ if (NumInstr >= SkipThreshold)
+ return false;
+ // These instructions are potentially expensive even if EXEC = 0.
+ if (TII.isSMRD(MI) || TII.isVMEM(MI) || TII.isFLAT(MI) || TII.isDS(MI) ||
+ TII.isWaitcnt(MI.getOpcode()))
+ return false;
+ return true;
+ }
+ ~TrivialCostModel() override = default;
+};
+
+class BranchWeightCostModel : public CostModelBase {
+ friend CostModelBase;
+
+ unsigned long ExecNZBranchCost;
+ unsigned long UnconditionalBranchCost;
+ unsigned long N;
+ unsigned long D;
+ unsigned long ThenCyclesCost = 0;
+ const TargetSchedModel &SchedModel;
+
+ BranchWeightCostModel(const BranchProbability &BP,
+ const TargetSchedModel &SchedModel)
+ : SchedModel(SchedModel) {
+ assert(!BP.isUnknown());
+ assert(SchedModel.hasInstrSchedModel());
+ ExecNZBranchCost = SchedModel.computeInstrLatency(AMDGPU::S_CBRANCH_EXECZ);
+ UnconditionalBranchCost = SchedModel.computeInstrLatency(AMDGPU::S_BRANCH);
+ N = BP.getNumerator();
+ D = BP.getDenominator();
+ }
+public:
+ bool isProfitable(const MachineInstr &MI) override {
+ ThenCyclesCost += SchedModel.computeInstrLatency(&MI, false);
+
+ // Consider `P = N/D` to be the probability of execnz being true
+ // The transformation is profitable if always executing the 'then' block
+ // is cheaper than executing sometimes 'then', s_branch and always
+ // executing s_cbranch_execnz
+ return (D - N) * ThenCyclesCost <=
+ D * ExecNZBranchCost + (D - N) * UnconditionalBranchCost;
+ }
+ ~BranchWeightCostModel() override = default;
+};
+
+std::unique_ptr<CostModelBase>
+CostModelBase::Create(const MachineBasicBlock &Head,
+ const MachineBasicBlock &Succ, const SIInstrInfo &TII) {
+ const auto *FromIt = find(Head.successors(), &Succ);
+ assert(FromIt != Head.succ_end());
+ BranchProbability ExecNZProb = Head.getSuccProbability(FromIt);
+
+ auto &SchedModel = TII.getSchedModel();
+ if (SchedModel.hasInstrSchedModel() && !ExecNZProb.isUnknown())
+ return std::unique_ptr<CostModelBase>(
+ new BranchWeightCostModel(ExecNZProb, SchedModel));
+
+ return std::unique_ptr<CostModelBase>(new TrivialCostModel(TII));
+}
+
+bool SIPreEmitPeephole::mustRetainExeczBranch(
+ const MachineBasicBlock &Head, const MachineBasicBlock &From,
+ const MachineBasicBlock &To) const {
+
+ auto CostModel = CostModelBase::Create(Head, From, *TII);
+
+ const MachineFunction *MF = From.getParent();
for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
MBBI != End && MBBI != ToI; ++MBBI) {
const MachineBasicBlock &MBB = *MBBI;
@@ -326,19 +413,14 @@ bool SIPreEmitPeephole::mustRetainExeczBranch(
if (TII->hasUnwantedEffectsWhenEXECEmpty(MI))
return true;
- // These instructions are potentially expensive even if EXEC = 0.
- if (TII->isSMRD(MI) || TII->isVMEM(MI) || TII->isFLAT(MI) ||
- TII->isDS(MI) || TII->isWaitcnt(MI.getOpcode()))
- return true;
-
- ++NumInstr;
- if (NumInstr >= SkipThreshold)
+ if (!CostModel->isProfitable(MI))
return true;
}
}
return false;
}
+} // namespace
// Returns true if the skip branch instruction is removed.
bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
@@ -351,8 +433,11 @@ bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
return false;
// Consider only the forward branches.
- if ((SrcMBB.getNumber() >= TrueMBB->getNumber()) ||
- mustRetainExeczBranch(*FalseMBB, *TrueMBB))
+ if (SrcMBB.getNumber() >= TrueMBB->getNumber())
+ return false;
+
+ // Consider only when it is legal and profitable
+ if (mustRetainExeczBranch(SrcMBB, *FalseMBB, *TrueMBB))
return false;
LLVM_DEBUG(dbgs() << "Removing the execz branch: " << MI);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index eb39ca2d7daa7f..45a45d125a5ea0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -1726,7 +1726,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB59_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1736,7 +1735,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB59_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
@@ -1747,7 +1746,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB59_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1757,7 +1755,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB59_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -1773,7 +1771,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB60_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1783,7 +1780,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB60_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
@@ -1794,7 +1791,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB60_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1804,7 +1800,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB60_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -1820,7 +1816,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB61_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1830,7 +1825,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB61_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
@@ -1841,7 +1836,6 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB61_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
@@ -1851,7 +1845,7 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX940-NEXT: v_mov_b32_e32 v2, s2
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB61_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
index 489f46d1237a36..cd656075efaf95 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
@@ -526,21 +526,19 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX10-NEXT: v_cmp_ge_u64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execz .LBB10_2
; GFX10-NEXT: ; %bb.1: ; %else
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], s1, v2, v4, 0
; GFX10-NEXT: v_mad_u64_u32 v[1:2], s1, v2, v5, v[1:2]
; GFX10-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX10-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX10-NEXT: .LBB10_2: ; %Flow
+; GFX10-NEXT: ; %bb.2: ; %Flow
; GFX10-NEXT: s_andn2_saveexec_b32 s0, s0
-; GFX10-NEXT: s_cbranch_execz .LBB10_4
; GFX10-NEXT: ; %bb.3: ; %if
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_mul_lo_u32 v1, v2, v5
; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: .LBB10_4: ; %endif
+; GFX10-NEXT: ; %bb.4: ; %endif
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
@@ -563,7 +561,6 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX11-NEXT: s_waitcnt vmcnt(1)
; GFX11-NEXT: v_cmpx_ge_u64_e32 0, v[2:3]
; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB10_2
; GFX11-NEXT: ; %bb.1: ; %else
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, v4, 0
@@ -572,14 +569,13 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX11-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX11-NEXT: v_mov_b32_e32 v1, v3
; GFX11-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX11-NEXT: .LBB10_2: ; %Flow
+; GFX11-NEXT: ; %bb.2: ; %Flow
; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB10_4
; GFX11-NEXT: ; %bb.3: ; %if
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mul_lo_u32 v1, v2, v5
; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: .LBB10_4: ; %endif
+; GFX11-NEXT: ; %bb.4: ; %endif
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
index 33865c04b3fe92..5387ea0281684b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-demote-scc-branches.ll
@@ -209,7 +209,6 @@ define void @divergent_cmp_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1010-NEXT: v_and_b32_e32 v0, 0x3ff, v31
; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, s21, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
-; GFX1010-NEXT: s_cbranch_execz .LBB5_2
; GFX1010-NEXT: ; %bb.1: ; %if.then
; GFX1010-NEXT: v_mov_b32_e32 v0, s6
; GFX1010-NEXT: v_mov_b32_e32 v1, s19
@@ -218,7 +217,7 @@ define void @divergent_cmp_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1010-NEXT: s_mov_b32 s9, s16
; GFX1010-NEXT: s_mov_b32 s8, s7
; GFX1010-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
-; GFX1010-NEXT: .LBB5_2: ; %if.end
+; GFX1010-NEXT: ; %bb.2: ; %if.end
; GFX1010-NEXT: s_waitcnt_depctr 0xffe3
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -230,7 +229,6 @@ define void @divergent_cmp_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1030-NEXT: v_and_b32_e32 v0, 0x3ff, v31
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_gt_i32_e64 s21, v0
-; GFX1030-NEXT: s_cbranch_execz .LBB5_2
; GFX1030-NEXT: ; %bb.1: ; %if.then
; GFX1030-NEXT: v_mov_b32_e32 v0, s6
; GFX1030-NEXT: v_mov_b32_e32 v1, s19
@@ -239,7 +237,7 @@ define void @divergent_cmp_profitable(i32 noundef inreg %value, ptr addrspace(8)
; GFX1030-NEXT: s_mov_b32 s9, s16
; GFX1030-NEXT: s_mov_b32 s8, s7
; GFX1030-NEXT: buffer_store_dword v0, v1, s[8:11], 0 offen
-; GFX1030-NEXT: .LBB5_2: ; %if.end
+; GFX1030-NEXT: ; %bb.2: ; %if.end
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index ce90fbed813103..2fcfc72022219f 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -58,7 +58,6 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB0_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -67,7 +66,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB0_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
@@ -86,7 +85,6 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB0_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -94,7 +92,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB0_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
@@ -274,7 +272,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB1_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -284,7 +281,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB1_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -305,7 +302,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB1_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -314,7 +310,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB1_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -528,14 +524,13 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB2_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB2_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -569,13 +564,12 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB2_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_add_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB2_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -802,13 +796,12 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB2_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_rtn_u32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB2_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -847,12 +840,11 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB2_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_add_rtn_u32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB2_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -1127,14 +1119,13 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX8_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB3_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_u32 v0, v1
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB3_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_endpgm
;
; GFX9_ITERATIVE-LABEL: add_i32_varying_nouse:
@@ -1156,13 +1147,12 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX9_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB3_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX9_ITERATIVE-NEXT: ds_add_u32 v0, v1
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB3_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_endpgm
;
; GFX1064_ITERATIVE-LABEL: add_i32_varying_nouse:
@@ -1318,13 +1308,12 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX8_DPP-NEXT: s_mov_b32 s0, s2
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB3_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_u32 v2, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB3_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_endpgm
;
; GFX9_DPP-LABEL: add_i32_varying_nouse:
@@ -1351,12 +1340,11 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX9_DPP-NEXT: s_mov_b32 s0, s2
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB3_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX9_DPP-NEXT: ds_add_u32 v2, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB3_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_endpgm
;
; GFX1064_DPP-LABEL: add_i32_varying_nouse:
@@ -1528,7 +1516,6 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB4_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -1537,7 +1524,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u64 v[0:1], v1, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB4_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s2, v1
@@ -1560,7 +1547,6 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB4_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -1568,7 +1554,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: ds_add_rtn_u64 v[0:1], v1, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB4_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s2, v1
@@ -1768,7 +1754,6 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB5_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v0, s8
@@ -1780,7 +1765,7 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_u64 v[0:1], v3, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB5_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
; GFX8-NEXT: v_readfirstlane_b32 s5, v0
@@ -1806,7 +1791,6 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB5_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[8:9]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -1819,7 +1803,7 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX9-NEXT: v_mov_b32_e32 v3, 0
; GFX9-NEXT: ds_add_rtn_u64 v[0:1], v3, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB5_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s0, v1
; GFX9-NEXT: v_readfirstlane_b32 s1, v0
@@ -2071,7 +2055,6 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB6_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -2079,7 +2062,7 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB6_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -2120,14 +2103,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB6_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_add_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB6_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -2423,14 +2405,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB6_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB6_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -2510,13 +2491,12 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB6_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_add_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB6_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -2966,7 +2946,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB7_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, s0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, 0
@@ -2974,7 +2953,7 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_add_u64 v2, v[0:1]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB7_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_endpgm
;
; GFX9_ITERATIVE-LABEL: add_i64_varying_nouse:
@@ -2999,14 +2978,13 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX9_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB7_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, s0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v1, s1
; GFX9_ITERATIVE-NEXT: ds_add_u64 v2, v[0:1]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB7_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_endpgm
;
; GFX1064_ITERATIVE-LABEL: add_i64_varying_nouse:
@@ -3214,14 +3192,13 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_DPP-NEXT: s_mov_b64 s[0:1], s[2:3]
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB7_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v9, s1
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s0
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_add_u64 v7, v[8:9]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB7_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_endpgm
;
; GFX9_DPP-LABEL: add_i64_varying_nouse:
@@ -3283,13 +3260,12 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX9_DPP-NEXT: s_mov_b64 s[0:1], s[2:3]
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB7_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v9, s1
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s0
; GFX9_DPP-NEXT: ds_add_u64 v7, v[8:9]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB7_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_endpgm
;
; GFX1064_DPP-LABEL: add_i64_varying_nouse:
@@ -3558,7 +3534,6 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB8_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -3567,7 +3542,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB8_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
@@ -3587,7 +3562,6 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB8_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -3595,7 +3569,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB8_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
@@ -3780,7 +3754,6 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB9_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -3790,7 +3763,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB9_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -3811,7 +3784,6 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB9_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -3820,7 +3792,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, i32 %subitive)
; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: ds_sub_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB9_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -4036,14 +4008,13 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB10_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_sub_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB10_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -4077,13 +4048,12 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB10_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_sub_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB10_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -4310,13 +4280,12 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB10_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_sub_rtn_u32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB10_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -4355,12 +4324,11 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB10_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_sub_rtn_u32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB10_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -4635,14 +4603,13 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX8_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB11_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_sub_u32 v0, v1
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB11_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_endpgm
;
; GFX9_ITERATIVE-LABEL: sub_i32_varying_nouse:
@@ -4664,13 +4631,12 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX9_ITERATIVE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB11_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v1, s2
; GFX9_ITERATIVE-NEXT: ds_sub_u32 v0, v1
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB11_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_endpgm
;
; GFX1064_ITERATIVE-LABEL: sub_i32_varying_nouse:
@@ -4826,13 +4792,12 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX8_DPP-NEXT: s_mov_b32 s0, s2
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB11_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_sub_u32 v2, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB11_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_endpgm
;
; GFX9_DPP-LABEL: sub_i32_varying_nouse:
@@ -4859,12 +4824,11 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX9_DPP-NEXT: s_mov_b32 s0, s2
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB11_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s0
; GFX9_DPP-NEXT: ds_sub_u32 v2, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB11_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_endpgm
;
; GFX1064_DPP-LABEL: sub_i32_varying_nouse:
@@ -5036,7 +5000,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB12_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX8-NEXT: s_mul_i32 s4, s4, 5
@@ -5045,7 +5008,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u64 v[0:1], v1, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB12_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s4, v1
@@ -5069,7 +5032,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB12_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s4, s[4:5]
; GFX9-NEXT: s_mul_i32 s4, s4, 5
@@ -5077,7 +5039,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: ds_sub_rtn_u64 v[0:1], v1, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB12_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
@@ -5290,7 +5252,6 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB13_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v0, s8
@@ -5302,7 +5263,7 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_sub_rtn_u64 v[0:1], v3, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB13_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, s0
@@ -5329,7 +5290,6 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB13_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[8:9]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -5342,7 +5302,7 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX9-NEXT: v_mov_b32_e32 v3, 0
; GFX9-NEXT: ds_sub_rtn_u64 v[0:1], v3, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB13_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s6, v2, 0
@@ -5605,7 +5565,6 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB14_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -5613,7 +5572,7 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_sub_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB14_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -5654,14 +5613,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB14_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_sub_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB14_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -5957,14 +5915,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB14_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_sub_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB14_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -6044,13 +6001,12 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB14_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_sub_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB14_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v8
@@ -6508,14 +6464,13 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB15_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_and_rtn_b32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB15_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -6549,13 +6504,12 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB15_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_and_rtn_b32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB15_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -6781,14 +6735,13 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB15_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_and_rtn_b32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB15_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -6826,13 +6779,12 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB15_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_and_rtn_b32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB15_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -7132,7 +7084,6 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB16_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -7140,7 +7091,7 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_and_rtn_b64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB16_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -7179,14 +7130,13 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB16_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_and_rtn_b64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB16_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -7443,14 +7393,13 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX8_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB16_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_and_rtn_b64 v[5:6], v7, v[5:6]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB16_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s0, v6
@@ -7498,13 +7447,12 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX9_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB16_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX9_DPP-NEXT: ds_and_rtn_b64 v[5:6], v7, v[5:6]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB16_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s0, v6
@@ -7885,14 +7833,13 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB17_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_or_rtn_b32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB17_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -7926,13 +7873,12 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB17_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_or_rtn_b32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB17_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -8159,13 +8105,12 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB17_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_or_rtn_b32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB17_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -8204,12 +8149,11 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB17_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_or_rtn_b32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB17_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -8509,7 +8453,6 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB18_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -8517,7 +8460,7 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_or_rtn_b64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB18_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -8556,14 +8499,13 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB18_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_or_rtn_b64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB18_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -8820,14 +8762,13 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX8_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB18_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_or_rtn_b64 v[5:6], v7, v[5:6]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB18_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -8876,13 +8817,12 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX9_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB18_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX9_DPP-NEXT: ds_or_rtn_b64 v[5:6], v7, v[5:6]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB18_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -9264,14 +9204,13 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB19_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_xor_rtn_b32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB19_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -9305,13 +9244,12 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB19_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_xor_rtn_b32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB19_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -9538,13 +9476,12 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB19_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_xor_rtn_b32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB19_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -9583,12 +9520,11 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB19_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_xor_rtn_b32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB19_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -9888,7 +9824,6 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB20_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -9896,7 +9831,7 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_xor_rtn_b64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB20_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -9935,14 +9870,13 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB20_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_xor_rtn_b64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB20_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v4
@@ -10199,14 +10133,13 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX8_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB20_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_xor_rtn_b64 v[5:6], v7, v[5:6]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB20_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -10255,13 +10188,12 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GFX9_DPP-NEXT: ; implicit-def: $vgpr5_vgpr6
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB20_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v6, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v5, s4
; GFX9_DPP-NEXT: ds_xor_rtn_b64 v[5:6], v7, v[5:6]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB20_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v6
@@ -10643,14 +10575,13 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB21_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_i32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB21_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -10684,13 +10615,12 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB21_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_max_rtn_i32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB21_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -10916,14 +10846,13 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB21_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_i32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB21_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -10961,13 +10890,12 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB21_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_max_rtn_i32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB21_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -11234,7 +11162,6 @@ define amdgpu_kernel void @max_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB22_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -11242,7 +11169,7 @@ define amdgpu_kernel void @max_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_max_rtn_i64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB22_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: v_readfirstlane_b32 s4, v0
; GFX8-NEXT: v_bfrev_b32_e32 v0, 1
@@ -11268,14 +11195,13 @@ define amdgpu_kernel void @max_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB22_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_max_rtn_i64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB22_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: v_bfrev_b32_e32 v0, 1
@@ -11522,7 +11448,6 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB23_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -11530,7 +11455,7 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_i64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB23_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -11578,14 +11503,13 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB23_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_max_rtn_i64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB23_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -11907,14 +11831,13 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB23_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_i64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB23_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -12002,13 +11925,12 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB23_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_max_rtn_i64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB23_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -12514,14 +12436,13 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB24_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_i32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB24_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -12555,13 +12476,12 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB24_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_min_rtn_i32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB24_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -12787,14 +12707,13 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB24_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_i32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB24_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -12832,13 +12751,12 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB24_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_min_rtn_i32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB24_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -13105,7 +13023,6 @@ define amdgpu_kernel void @min_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB25_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -13113,7 +13030,7 @@ define amdgpu_kernel void @min_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_min_rtn_i64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB25_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: v_readfirstlane_b32 s4, v0
; GFX8-NEXT: v_bfrev_b32_e32 v0, -2
@@ -13139,14 +13056,13 @@ define amdgpu_kernel void @min_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB25_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_min_rtn_i64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB25_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: v_bfrev_b32_e32 v0, -2
@@ -13393,7 +13309,6 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB26_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -13401,7 +13316,7 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_i64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB26_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -13449,14 +13364,13 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB26_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_min_rtn_i64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB26_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -13777,14 +13691,13 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB26_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_i64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB26_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s0, v7
@@ -13870,13 +13783,12 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB26_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_min_rtn_i64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB26_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s0, v7
@@ -14377,14 +14289,13 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB27_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB27_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -14418,13 +14329,12 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB27_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_max_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB27_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -14651,13 +14561,12 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB27_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_u32 v0, v3, v0
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB27_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -14696,12 +14605,11 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB27_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, s4
; GFX9_DPP-NEXT: ds_max_rtn_u32 v0, v3, v0
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB27_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -14967,7 +14875,6 @@ define amdgpu_kernel void @umax_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB28_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -14975,7 +14882,7 @@ define amdgpu_kernel void @umax_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_max_rtn_u64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s5, v1
@@ -15000,14 +14907,13 @@ define amdgpu_kernel void @umax_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB28_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_max_rtn_u64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB28_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
@@ -15251,7 +15157,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB29_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -15259,7 +15164,7 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_max_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB29_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -15306,14 +15211,13 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB29_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_max_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB29_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -15627,14 +15531,13 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB29_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_max_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB29_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -15718,13 +15621,12 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB29_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_max_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB29_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s5, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v7
@@ -16223,14 +16125,13 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB30_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_u32 v0, v0, v2
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB30_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -16264,13 +16165,12 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB30_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v2, s4
; GFX9_ITERATIVE-NEXT: ds_min_rtn_u32 v0, v0, v2
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB30_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s4, v0
@@ -16496,14 +16396,13 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX8_DPP-NEXT: ; implicit-def: $vgpr0
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB30_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX8_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_u32 v0, v0, v3
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB30_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -16541,13 +16440,12 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GFX9_DPP-NEXT: ; implicit-def: $vgpr0
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB30_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX9_DPP-NEXT: v_mov_b32_e32 v3, s4
; GFX9_DPP-NEXT: ds_min_rtn_u32 v0, v0, v3
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB30_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_DPP-NEXT: v_readfirstlane_b32 s4, v0
@@ -16813,7 +16711,6 @@ define amdgpu_kernel void @umin_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB31_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: v_mov_b32_e32 v0, 5
; GFX8-NEXT: v_mov_b32_e32 v1, 0
@@ -16821,7 +16718,7 @@ define amdgpu_kernel void @umin_i64_constant(ptr addrspace(1) %out) {
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_min_rtn_u64 v[0:1], v2, v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB31_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8-NEXT: v_readfirstlane_b32 s5, v1
@@ -16846,14 +16743,13 @@ define amdgpu_kernel void @umin_i64_constant(ptr addrspace(1) %out) {
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB31_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 5
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_min_rtn_u64 v[0:1], v2, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB31_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_readfirstlane_b32 s5, v1
@@ -17097,7 +16993,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX8_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cbranch_execz .LBB32_4
; GFX8_ITERATIVE-NEXT: ; %bb.3:
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX8_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
@@ -17105,7 +17000,7 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, -1
; GFX8_ITERATIVE-NEXT: ds_min_rtn_u64 v[3:4], v0, v[3:4]
; GFX8_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_ITERATIVE-NEXT: .LBB32_4:
+; GFX8_ITERATIVE-NEXT: ; %bb.4:
; GFX8_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX8_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -17152,14 +17047,13 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; implicit-def: $vgpr3_vgpr4
; GFX9_ITERATIVE-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cbranch_execz .LBB32_4
; GFX9_ITERATIVE-NEXT: ; %bb.3:
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v4, s1
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v0, 0
; GFX9_ITERATIVE-NEXT: v_mov_b32_e32 v3, s0
; GFX9_ITERATIVE-NEXT: ds_min_rtn_u64 v[3:4], v0, v[3:4]
; GFX9_ITERATIVE-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_ITERATIVE-NEXT: .LBB32_4:
+; GFX9_ITERATIVE-NEXT: ; %bb.4:
; GFX9_ITERATIVE-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9_ITERATIVE-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9_ITERATIVE-NEXT: v_readfirstlane_b32 s5, v4
@@ -17473,14 +17367,13 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX8_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX8_DPP-NEXT: s_cbranch_execz .LBB32_2
; GFX8_DPP-NEXT: ; %bb.1:
; GFX8_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX8_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX8_DPP-NEXT: s_mov_b32 m0, -1
; GFX8_DPP-NEXT: ds_min_rtn_u64 v[7:8], v9, v[7:8]
; GFX8_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8_DPP-NEXT: .LBB32_2:
+; GFX8_DPP-NEXT: ; %bb.2:
; GFX8_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX8_DPP-NEXT: v_readfirstlane_b32 s0, v7
@@ -17563,13 +17456,12 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX9_DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX9_DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9_DPP-NEXT: s_cbranch_execz .LBB32_2
; GFX9_DPP-NEXT: ; %bb.1:
; GFX9_DPP-NEXT: v_mov_b32_e32 v8, s5
; GFX9_DPP-NEXT: v_mov_b32_e32 v7, s4
; GFX9_DPP-NEXT: ds_min_rtn_u64 v[7:8], v9, v[7:8]
; GFX9_DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9_DPP-NEXT: .LBB32_2:
+; GFX9_DPP-NEXT: ; %bb.2:
; GFX9_DPP-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9_DPP-NEXT: v_readfirstlane_b32 s1, v8
; GFX9_DPP-NEXT: v_readfirstlane_b32 s0, v7
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
index 4f0bc512565d13..edec0eb79bca54 100644
--- a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
@@ -68,13 +68,12 @@ define float @syncscope_system(ptr %addr, float %val) #0 {
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB0_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB0_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB0_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -174,14 +173,13 @@ define float @syncscope_workgroup_rtn(ptr %addr, float %val) #0 {
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB1_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB1_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB1_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
diff --git a/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll b/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
index cc05129b1b2af6..e32cb494f7702a 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-condition-and.ll
@@ -1,5 +1,6 @@
-; RUN: llc -mtriple=amdgcn -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN-NO-FLAT %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=GCN-FLAT %s
; This used to crash because during intermediate control flow lowering, there
; was a sequence
@@ -9,20 +10,35 @@
; s_mov_b64_term exec, s[2:3]
; that was not treated correctly.
;
-; GCN-LABEL: {{^}}ham:
-; GCN-DAG: v_cmp_lt_f32_e64 [[OTHERCC:s\[[0-9]+:[0-9]+\]]],
-; GCN-DAG: v_cmp_lt_f32_e32 vcc,
-; GCN: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], vcc, [[OTHERCC]]
-; GCN: s_and_saveexec_b64 [[SAVED:s\[[0-9]+:[0-9]+\]]], [[AND]]
-; GCN-NEXT: s_cbranch_execz .LBB0_{{[0-9]+}}
-
-; GCN-NEXT: ; %bb.{{[0-9]+}}: ; %bb4
-; GCN: ds_write_b32
-
-; GCN: .LBB0_{{[0-9]+}}: ; %UnifiedReturnBlock
-; GCN-NEXT: s_endpgm
-; GCN-NEXT: .Lfunc_end
define amdgpu_ps void @ham(float %arg, float %arg1) #0 {
+; GCN-NO-FLAT-LABEL: ham:
+; GCN-NO-FLAT: ; %bb.0: ; %bb
+; GCN-NO-FLAT-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GCN-NO-FLAT-NEXT: v_cmp_lt_f32_e64 s[0:1], 0, v1
+; GCN-NO-FLAT-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; GCN-NO-FLAT-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GCN-NO-FLAT-NEXT: s_cbranch_execz .LBB0_2
+; GCN-NO-FLAT-NEXT: ; %bb.1: ; %bb4
+; GCN-NO-FLAT-NEXT: v_mov_b32_e32 v0, 4
+; GCN-NO-FLAT-NEXT: s_mov_b32 m0, -1
+; GCN-NO-FLAT-NEXT: ds_write_b32 v0, v0
+; GCN-NO-FLAT-NEXT: ; divergent unreachable
+; GCN-NO-FLAT-NEXT: .LBB0_2: ; %UnifiedReturnBlock
+; GCN-NO-FLAT-NEXT: s_endpgm
+;
+; GCN-FLAT-LABEL: ham:
+; GCN-FLAT: ; %bb.0: ; %bb
+; GCN-FLAT-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GCN-FLAT-NEXT: v_cmp_lt_f32_e64 s[0:1], 0, v1
+; GCN-FLAT-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; GCN-FLAT-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
+; GCN-FLAT-NEXT: ; %bb.1: ; %bb4
+; GCN-FLAT-NEXT: v_mov_b32_e32 v0, 4
+; GCN-FLAT-NEXT: s_mov_b32 m0, -1
+; GCN-FLAT-NEXT: ds_write_b32 v0, v0
+; GCN-FLAT-NEXT: ; divergent unreachable
+; GCN-FLAT-NEXT: ; %bb.2: ; %UnifiedReturnBlock
+; GCN-FLAT-NEXT: s_endpgm
bb:
%tmp = fcmp ogt float %arg, 0.000000e+00
%tmp2 = fcmp ogt float %arg1, 0.000000e+00
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll
index eecc91239c7283..6f4b15ad473ca6 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-v1i8-extractvecelt-crash.ll
@@ -9,11 +9,10 @@ define void @wombat(i1 %cond, ptr addrspace(5) %addr) {
; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; CHECK-NEXT: s_cbranch_execz .LBB0_2
; CHECK-NEXT: ; %bb.1: ; %then
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: .LBB0_2: ; %end
+; CHECK-NEXT: ; %bb.2: ; %end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_store_byte v2, v1, s[0:3], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/else.ll b/llvm/test/CodeGen/AMDGPU/else.ll
index 655c5cd184a1ed..d3d4b860f9ac7f 100644
--- a/llvm/test/CodeGen/AMDGPU/else.ll
+++ b/llvm/test/CodeGen/AMDGPU/else.ll
@@ -30,7 +30,6 @@ end:
; CHECK-NEXT: s_and_b64 exec, exec, [[INIT_EXEC]]
; CHECK-NEXT: s_and_b64 [[AND_INIT:s\[[0-9]+:[0-9]+\]]], exec, [[DST]]
; CHECK-NEXT: s_xor_b64 exec, exec, [[AND_INIT]]
-; CHECK-NEXT: s_cbranch_execz
define amdgpu_ps void @else_execfix_leave_wqm(i32 %z, float %v) #0 {
main_body:
%cc = icmp sgt i32 %z, 5
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
index 1ae1204e3cde18..ecd020dd1a42ca 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
@@ -108,13 +108,12 @@ define float @flat_agent_atomic_fadd_ret_f32__amdgpu_no_fine_grained_memory__amd
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB0_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB0_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB0_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -2428,13 +2427,12 @@ define float @flat_agent_atomic_fadd_ret_f32__ftz__amdgpu_no_fine_grained_memory
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB12_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB12_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB12_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -5099,13 +5097,12 @@ define float @flat_agent_atomic_fadd_ret_f32__amdgpu_no_fine_grained_memory_amdg
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: .LBB26_6: ; %Flow2
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GFX90A-NEXT: s_cbranch_execz .LBB26_8
; GFX90A-NEXT: ; %bb.7: ; %atomicrmw.shared
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GFX90A-NEXT: ds_add_rtn_f32 v3, v0, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB26_8: ; %atomicrmw.phi
+; GFX90A-NEXT: ; %bb.8: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: v_mov_b32_e32 v0, v3
; GFX90A-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 667a3f398c08a2..9c768ae9cf50de 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -835,7 +835,6 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
; SDAG-NEXT: .LBB2_4: ; %Flow
; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[12:13]
-; SDAG-NEXT: s_cbranch_execz .LBB2_6
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
@@ -849,7 +848,7 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: v_mov_b32_e32 v1, v5
; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v1, v4
-; SDAG-NEXT: .LBB2_6: ; %Flow1
+; SDAG-NEXT: ; %bb.6: ; %Flow1
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB2_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
@@ -1193,7 +1192,6 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
; SDAG-NEXT: .LBB3_4: ; %Flow
; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[12:13]
-; SDAG-NEXT: s_cbranch_execz .LBB3_6
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
@@ -1207,7 +1205,7 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: v_mov_b32_e32 v1, v5
; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v1, v4
-; SDAG-NEXT: .LBB3_6: ; %Flow1
+; SDAG-NEXT: ; %bb.6: ; %Flow1
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB3_7: ; %Flow2
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
@@ -1744,7 +1742,6 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: ; implicit-def: $vgpr9
; GISEL-NEXT: .LBB6_4: ; %Flow
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
-; GISEL-NEXT: s_cbranch_execz .LBB6_6
; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x86, v5
; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
@@ -1758,7 +1755,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v9
; GISEL-NEXT: v_mov_b32_e32 v3, v2
-; GISEL-NEXT: .LBB6_6: ; %Flow1
+; GISEL-NEXT: ; %bb.6: ; %Flow1
; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
; GISEL-NEXT: .LBB6_7: ; %Flow2
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
@@ -2095,7 +2092,6 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: ; implicit-def: $vgpr9
; GISEL-NEXT: .LBB7_4: ; %Flow
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17]
-; GISEL-NEXT: s_cbranch_execz .LBB7_6
; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; GISEL-NEXT: v_sub_co_u32_e32 v3, vcc, 0x86, v5
; GISEL-NEXT: v_subrev_u32_e32 v2, 64, v3
@@ -2109,7 +2105,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v9
; GISEL-NEXT: v_mov_b32_e32 v3, v2
-; GISEL-NEXT: .LBB7_6: ; %Flow1
+; GISEL-NEXT: ; %bb.6: ; %Flow1
; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
; GISEL-NEXT: .LBB7_7: ; %Flow2
; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[14:15]
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir
index da38929fab9907..432355397c75a1 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-flat-vmem-ds.mir
@@ -69,9 +69,7 @@ name: skip_execz_ds
body: |
; CHECK-LABEL: name: skip_execz_ds
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
index b4ed3cafbacb5f..a2f01ce9f371e0 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx10.mir
@@ -6,9 +6,7 @@ name: skip_waitcnt_vscnt
body: |
; CHECK-LABEL: name: skip_waitcnt_vscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -36,9 +34,7 @@ name: skip_waitcnt_expcnt
body: |
; CHECK-LABEL: name: skip_waitcnt_expcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -66,9 +62,7 @@ name: skip_waitcnt_vmcnt
body: |
; CHECK-LABEL: name: skip_waitcnt_vmcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -96,9 +90,7 @@ name: skip_waitcnt_lgkmcnt
body: |
; CHECK-LABEL: name: skip_waitcnt_lgkmcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -126,9 +118,7 @@ name: skip_wait_idle
body: |
; CHECK-LABEL: name: skip_wait_idle
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir
index 2d092974ac566f..eabea70987d459 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-gfx12.mir
@@ -6,9 +6,7 @@ name: skip_wait_loadcnt
body: |
; CHECK-LABEL: name: skip_wait_loadcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -36,9 +34,7 @@ name: skip_wait_loadcnt_dscnt
body: |
; CHECK-LABEL: name: skip_wait_loadcnt_dscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -66,9 +62,7 @@ name: skip_wait_storecnt
body: |
; CHECK-LABEL: name: skip_wait_storecnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -96,9 +90,7 @@ name: skip_wait_storecnt_dscnt
body: |
; CHECK-LABEL: name: skip_wait_storecnt_dscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -126,9 +118,7 @@ name: skip_wait_samplecnt
body: |
; CHECK-LABEL: name: skip_wait_samplecnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -156,9 +146,7 @@ name: skip_wait_bvhcnt
body: |
; CHECK-LABEL: name: skip_wait_bvhcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -186,9 +174,7 @@ name: skip_wait_expcnt
body: |
; CHECK-LABEL: name: skip_wait_expcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -216,9 +202,7 @@ name: skip_wait_dscnt
body: |
; CHECK-LABEL: name: skip_wait_dscnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -246,9 +230,7 @@ name: skip_wait_kmcnt
body: |
; CHECK-LABEL: name: skip_wait_kmcnt
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
@@ -276,9 +258,7 @@ name: skip_wait_idle
body: |
; CHECK-LABEL: name: skip_wait_idle
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: successors: %bb.1(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll b/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
index 0045082eedb0a3..b38785ca178614 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll
@@ -698,7 +698,6 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB5_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -708,7 +707,7 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: ds_add_u32 v0, v1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB5_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_endpgm
;
; GFX90A-LABEL: atomic_add_local:
@@ -718,7 +717,6 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB5_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -728,7 +726,7 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX90A-NEXT: v_mov_b32_e32 v0, s2
; GFX90A-NEXT: ds_add_u32 v0, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB5_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_endpgm
;
; GFX10-LABEL: atomic_add_local:
@@ -758,7 +756,6 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-FLATSCR-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
; GFX9-FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-FLATSCR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-FLATSCR-NEXT: s_cbranch_execz .LBB5_2
; GFX9-FLATSCR-NEXT: ; %bb.1:
; GFX9-FLATSCR-NEXT: s_load_dword s2, s[2:3], 0x24
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
@@ -768,7 +765,7 @@ define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v0, s2
; GFX9-FLATSCR-NEXT: ds_add_u32 v0, v1
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-FLATSCR-NEXT: .LBB5_2:
+; GFX9-FLATSCR-NEXT: ; %bb.2:
; GFX9-FLATSCR-NEXT: s_endpgm
;
; GFX11-LABEL: atomic_add_local:
@@ -900,7 +897,6 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB7_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -910,7 +906,7 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: .LBB7_2:
+; GFX9-NEXT: ; %bb.2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -929,7 +925,6 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX90A-NEXT: ; implicit-def: $vgpr1
; GFX90A-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB7_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -939,7 +934,7 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB7_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -988,7 +983,6 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-FLATSCR-NEXT: ; implicit-def: $vgpr1
; GFX9-FLATSCR-NEXT: s_and_saveexec_b64 s[0:1], vcc
-; GFX9-FLATSCR-NEXT: s_cbranch_execz .LBB7_2
; GFX9-FLATSCR-NEXT: ; %bb.1:
; GFX9-FLATSCR-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
@@ -998,7 +992,7 @@ define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrs
; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v1, s6
; GFX9-FLATSCR-NEXT: ds_add_rtn_u32 v1, v1, v2
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-FLATSCR-NEXT: .LBB7_2:
+; GFX9-FLATSCR-NEXT: ; %bb.2:
; GFX9-FLATSCR-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
index 7371d498a70706..31ed8aa420c179 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll
@@ -539,11 +539,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -576,13 +575,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8GISEL-NEXT: ; implicit-def: $vgpr0
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mov_b32 s6, s4
-; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8GISEL-NEXT: ; %bb.2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX8GISEL-NEXT: ; %bb.3: ; %if
@@ -611,11 +609,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -647,13 +644,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9GISEL-NEXT: ; implicit-def: $vgpr0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_mov_b32 s6, s4
-; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9GISEL-NEXT: ; %bb.2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX9GISEL-NEXT: ; %bb.3: ; %if
@@ -681,11 +677,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -717,13 +712,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
@@ -751,11 +745,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr1
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_load_dword s1, s[2:3], 0x2c
; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -787,13 +780,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr0
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_load_dword s0, s[2:3], 0x2c
; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s1, s1
; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
@@ -823,11 +815,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -864,13 +855,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[0:1], s[0:1]
; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
@@ -903,11 +893,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_load_b32 s1, s[2:3], 0x2c
; GFX1132DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -944,13 +933,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_load_b32 s0, s[2:3], 0x2c
; GFX1132GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s1, s1
; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
index 60af21524a04a1..5939e26898f8e9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll
@@ -540,11 +540,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8DAGISEL-NEXT: ; %bb.1: ; %else
; GFX8DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -577,13 +576,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX8GISEL-NEXT: ; implicit-def: $sgpr6
; GFX8GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX8GISEL-NEXT: ; %bb.1: ; %else
; GFX8GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX8GISEL-NEXT: ; implicit-def: $vgpr0
; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX8GISEL-NEXT: s_mov_b32 s6, s4
-; GFX8GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX8GISEL-NEXT: ; %bb.2: ; %Flow
; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX8GISEL-NEXT: ; %bb.3: ; %if
@@ -612,11 +610,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9DAGISEL-NEXT: ; %bb.1: ; %else
; GFX9DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -648,13 +645,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX9GISEL-NEXT: ; implicit-def: $sgpr6
; GFX9GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX9GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX9GISEL-NEXT: ; %bb.1: ; %else
; GFX9GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9GISEL-NEXT: ; implicit-def: $vgpr0
; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9GISEL-NEXT: s_mov_b32 s6, s4
-; GFX9GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX9GISEL-NEXT: ; %bb.2: ; %Flow
; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX9GISEL-NEXT: ; %bb.3: ; %if
@@ -682,11 +678,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4
; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1064DAGISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -718,13 +713,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6
; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX1064GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1064GISEL-NEXT: ; %bb.1: ; %else
; GFX1064GISEL-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1064GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1064GISEL-NEXT: ; %bb.3: ; %if
@@ -752,11 +746,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr1
; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1032DAGISEL-NEXT: s_load_dword s1, s[2:3], 0x2c
; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -788,13 +781,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1032GISEL-NEXT: ; implicit-def: $sgpr0
; GFX1032GISEL-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1032GISEL-NEXT: ; %bb.1: ; %else
; GFX1032GISEL-NEXT: s_load_dword s0, s[2:3], 0x2c
; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1032GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s1, s1
; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1032GISEL-NEXT: ; %bb.3: ; %if
@@ -824,11 +816,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1164DAGISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1164DAGISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s4
@@ -865,13 +856,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1164GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1164GISEL-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1164GISEL-NEXT: ; %bb.1: ; %else
; GFX1164GISEL-NEXT: s_load_b32 s4, s[2:3], 0x2c
; GFX1164GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164GISEL-NEXT: s_mov_b32 s6, s4
-; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1164GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[0:1], s[0:1]
; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1164GISEL-NEXT: ; %bb.3: ; %if
@@ -904,11 +894,10 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0
; GFX1132DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else
; GFX1132DAGISEL-NEXT: s_load_b32 s1, s[2:3], 0x2c
; GFX1132DAGISEL-NEXT: ; implicit-def: $vgpr0
-; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132DAGISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s0, s0
; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s1
@@ -945,13 +934,12 @@ define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) {
; GFX1132GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0
; GFX1132GISEL-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2
; GFX1132GISEL-NEXT: ; %bb.1: ; %else
; GFX1132GISEL-NEXT: s_load_b32 s0, s[2:3], 0x2c
; GFX1132GISEL-NEXT: ; implicit-def: $vgpr0
; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132GISEL-NEXT: s_mov_b32 s0, s0
-; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow
+; GFX1132GISEL-NEXT: ; %bb.2: ; %Flow
; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s1, s1
; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_5
; GFX1132GISEL-NEXT: ; %bb.3: ; %if
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 295ae94902da73..7dd8357642bbe7 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -7149,7 +7149,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: s_add_i32 s5, s5, 4
; GFX940-NEXT: ; implicit-def: $vgpr1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB28_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: s_lshl_b32 s8, s5, 3
@@ -7158,7 +7157,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mov_b32_e32 v2, s8
; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: s_mov_b64 s[8:9], exec
; GFX940-NEXT: v_readfirstlane_b32 s10, v1
@@ -7166,7 +7165,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX940-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB28_4
; GFX940-NEXT: ; %bb.3:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7175,7 +7173,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f32 v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_4:
+; GFX940-NEXT: ; %bb.4:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7204,12 +7202,11 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX940-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB28_8
; GFX940-NEXT: ; %bb.7:
; GFX940-NEXT: v_mov_b32_e32 v2, s4
; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: .LBB28_8:
+; GFX940-NEXT: ; %bb.8:
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX940-NEXT: v_readfirstlane_b32 s2, v2
@@ -7411,7 +7408,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: s_add_i32 s5, s5, 4
; GFX90A-NEXT: ; implicit-def: $vgpr1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB28_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX90A-NEXT: s_lshl_b32 s8, s5, 3
@@ -7420,7 +7416,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[8:9], exec
; GFX90A-NEXT: v_readfirstlane_b32 s10, v1
@@ -7428,7 +7424,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX90A-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB28_4
; GFX90A-NEXT: ; %bb.3:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7437,7 +7432,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f32 v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_4:
+; GFX90A-NEXT: ; %bb.4:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7466,12 +7461,11 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX90A-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB28_8
; GFX90A-NEXT: ; %bb.7:
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: .LBB28_8:
+; GFX90A-NEXT: ; %bb.8:
; GFX90A-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX90A-NEXT: v_readfirstlane_b32 s2, v2
@@ -7494,7 +7488,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: s_add_i32 s5, s5, 4
; GFX908-NEXT: ; implicit-def: $vgpr1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB28_2
; GFX908-NEXT: ; %bb.1:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX908-NEXT: s_lshl_b32 s8, s5, 3
@@ -7503,7 +7496,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mov_b32_e32 v2, s8
; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_2:
+; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_mov_b64 s[8:9], exec
; GFX908-NEXT: v_readfirstlane_b32 s10, v1
@@ -7511,7 +7504,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX908-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB28_4
; GFX908-NEXT: ; %bb.3:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7520,7 +7512,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: v_mov_b32_e32 v2, s0
; GFX908-NEXT: ds_add_f32 v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_4:
+; GFX908-NEXT: ; %bb.4:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7549,12 +7541,11 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX908-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB28_8
; GFX908-NEXT: ; %bb.7:
; GFX908-NEXT: v_mov_b32_e32 v2, s4
; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: .LBB28_8:
+; GFX908-NEXT: ; %bb.8:
; GFX908-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX908-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX908-NEXT: v_readfirstlane_b32 s2, v2
@@ -7578,7 +7569,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB28_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX8-NEXT: s_lshl_b32 s8, s5, 3
@@ -7587,7 +7577,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_mov_b64 s[8:9], exec
; GFX8-NEXT: v_readfirstlane_b32 s10, v1
@@ -7595,7 +7585,6 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB28_4
; GFX8-NEXT: ; %bb.3:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -7604,7 +7593,7 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: ds_add_f32 v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_4:
+; GFX8-NEXT: ; %bb.4:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -7633,13 +7622,12 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB28_8
; GFX8-NEXT: ; %bb.7:
; GFX8-NEXT: v_mov_b32_e32 v2, s4
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: .LBB28_8:
+; GFX8-NEXT: ; %bb.8:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX8-NEXT: v_readfirstlane_b32 s2, v2
@@ -7923,7 +7911,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_add_co_i32 s1, s5, 4
; GFX12-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX12-NEXT: s_cbranch_execz .LBB29_2
; GFX12-NEXT: ; %bb.1:
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_bcnt1_i32_b32 s5, s6
@@ -7933,7 +7920,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX12-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX12-NEXT: .LBB29_2:
+; GFX12-NEXT: ; %bb.2:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_mov_b32 s7, exec_lo
@@ -7944,7 +7931,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_mov_b32 s6, exec_lo
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX12-NEXT: s_cbranch_execz .LBB29_4
; GFX12-NEXT: ; %bb.3:
; GFX12-NEXT: s_bcnt1_i32_b32 s0, s7
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
@@ -7953,8 +7939,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX12-NEXT: ds_add_f32 v2, v1
-; GFX12-NEXT: .LBB29_4:
-; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: ; %bb.4:
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX12-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX12-NEXT: s_mov_b32 s1, exec_lo
@@ -7988,11 +7973,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execz .LBB29_8
; GFX12-NEXT: ; %bb.7:
; GFX12-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: ds_add_rtn_f32 v1, v1, v2
-; GFX12-NEXT: .LBB29_8:
+; GFX12-NEXT: ; %bb.8:
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX12-NEXT: s_load_b64 s[0:1], s[2:3], 0x0
@@ -8018,7 +8002,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: s_add_i32 s5, s5, 4
; GFX940-NEXT: ; implicit-def: $vgpr1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX940-NEXT: s_cbranch_execz .LBB29_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: s_lshl_b32 s8, s5, 3
@@ -8026,7 +8009,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX940-NEXT: v_mov_b32_e32 v2, s8
; GFX940-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX940-NEXT: .LBB29_2:
+; GFX940-NEXT: ; %bb.2:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: s_mov_b64 s[8:9], exec
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
@@ -8035,7 +8018,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX940-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX940-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB29_4
; GFX940-NEXT: ; %bb.3:
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8043,7 +8025,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f32 v2, v1
-; GFX940-NEXT: .LBB29_4:
+; GFX940-NEXT: ; %bb.4:
; GFX940-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX940-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX940-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8072,11 +8054,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX940-NEXT: ; implicit-def: $vgpr2
; GFX940-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX940-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execz .LBB29_8
; GFX940-NEXT: ; %bb.7:
; GFX940-NEXT: v_mov_b32_e32 v2, s4
; GFX940-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX940-NEXT: .LBB29_8:
+; GFX940-NEXT: ; %bb.8:
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
@@ -8100,7 +8081,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_i32 s1, s5, 4
; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
; GFX11-NEXT: ; %bb.1:
; GFX11-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -8109,7 +8089,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX11-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX11-NEXT: .LBB29_2:
+; GFX11-NEXT: ; %bb.2:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_mov_b32 s7, exec_lo
@@ -8118,7 +8098,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX11-NEXT: s_mov_b32 s6, exec_lo
; GFX11-NEXT: v_cmpx_eq_u32_e32 0, v2
-; GFX11-NEXT: s_cbranch_execz .LBB29_4
; GFX11-NEXT: ; %bb.3:
; GFX11-NEXT: s_bcnt1_i32_b32 s0, s7
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -8127,7 +8106,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mul_f32 v1, 0x42280000, v1
; GFX11-NEXT: ds_add_f32 v2, v1
-; GFX11-NEXT: .LBB29_4:
+; GFX11-NEXT: ; %bb.4:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX11-NEXT: v_bfrev_b32_e32 v1, 1
@@ -8159,11 +8138,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: ; implicit-def: $vgpr2
; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_8
; GFX11-NEXT: ; %bb.7:
; GFX11-NEXT: v_mov_b32_e32 v2, s4
; GFX11-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX11-NEXT: .LBB29_8:
+; GFX11-NEXT: ; %bb.8:
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
@@ -8186,7 +8164,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_i32 s1, s5, 4
; GFX10-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX10-NEXT: s_cbranch_execz .LBB29_2
; GFX10-NEXT: ; %bb.1:
; GFX10-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s5
@@ -8194,7 +8171,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mov_b32_e32 v2, s5
; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX10-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX10-NEXT: .LBB29_2:
+; GFX10-NEXT: ; %bb.2:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_mov_b32 s7, exec_lo
@@ -8203,7 +8180,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_and_saveexec_b32 s6, s0
-; GFX10-NEXT: s_cbranch_execz .LBB29_4
; GFX10-NEXT: ; %bb.3:
; GFX10-NEXT: s_bcnt1_i32_b32 s0, s7
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8211,7 +8187,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mov_b32_e32 v2, s0
; GFX10-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX10-NEXT: ds_add_f32 v2, v1
-; GFX10-NEXT: .LBB29_4:
+; GFX10-NEXT: ; %bb.4:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
@@ -8238,11 +8214,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execz .LBB29_8
; GFX10-NEXT: ; %bb.7:
; GFX10-NEXT: v_mov_b32_e32 v2, s4
; GFX10-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX10-NEXT: .LBB29_8:
+; GFX10-NEXT: ; %bb.8:
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
@@ -8251,7 +8226,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: v_add_f32_e32 v0, s2, v0
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo
-; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -8266,7 +8240,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: s_add_i32 s5, s5, 4
; GFX90A-NEXT: ; implicit-def: $vgpr1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX90A-NEXT: s_cbranch_execz .LBB29_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX90A-NEXT: s_lshl_b32 s8, s5, 3
@@ -8274,7 +8247,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
; GFX90A-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX90A-NEXT: .LBB29_2:
+; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[8:9], exec
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -8283,7 +8256,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX90A-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX90A-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB29_4
; GFX90A-NEXT: ; %bb.3:
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8291,7 +8263,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f32 v2, v1
-; GFX90A-NEXT: .LBB29_4:
+; GFX90A-NEXT: ; %bb.4:
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX90A-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX90A-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8320,11 +8292,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: ; implicit-def: $vgpr2
; GFX90A-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX90A-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execz .LBB29_8
; GFX90A-NEXT: ; %bb.7:
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
; GFX90A-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX90A-NEXT: .LBB29_8:
+; GFX90A-NEXT: ; %bb.8:
; GFX90A-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
@@ -8347,7 +8318,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: s_add_i32 s5, s5, 4
; GFX908-NEXT: ; implicit-def: $vgpr1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX908-NEXT: s_cbranch_execz .LBB29_2
; GFX908-NEXT: ; %bb.1:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX908-NEXT: s_lshl_b32 s8, s5, 3
@@ -8355,7 +8325,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX908-NEXT: v_mov_b32_e32 v2, s8
; GFX908-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX908-NEXT: .LBB29_2:
+; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_mov_b64 s[8:9], exec
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
@@ -8364,7 +8334,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX908-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX908-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB29_4
; GFX908-NEXT: ; %bb.3:
; GFX908-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8372,7 +8341,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX908-NEXT: v_mov_b32_e32 v2, s0
; GFX908-NEXT: ds_add_f32 v2, v1
-; GFX908-NEXT: .LBB29_4:
+; GFX908-NEXT: ; %bb.4:
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX908-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX908-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8401,11 +8370,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: ; implicit-def: $vgpr2
; GFX908-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX908-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX908-NEXT: s_cbranch_execz .LBB29_8
; GFX908-NEXT: ; %bb.7:
; GFX908-NEXT: v_mov_b32_e32 v2, s4
; GFX908-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX908-NEXT: .LBB29_8:
+; GFX908-NEXT: ; %bb.8:
; GFX908-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX908-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
@@ -8429,7 +8397,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: ; implicit-def: $vgpr1
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB29_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX8-NEXT: s_lshl_b32 s8, s5, 3
@@ -8437,7 +8404,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX8-NEXT: v_mov_b32_e32 v2, s8
; GFX8-NEXT: ds_add_rtn_f32 v1, v2, v1
-; GFX8-NEXT: .LBB29_2:
+; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_mov_b64 s[8:9], exec
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -8446,7 +8413,6 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v1, s9, v1
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX8-NEXT: s_and_saveexec_b64 s[6:7], s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB29_4
; GFX8-NEXT: ; %bb.3:
; GFX8-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v1, s0
@@ -8454,7 +8420,7 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: v_mul_f32_e32 v1, 0x42280000, v1
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: ds_add_f32 v2, v1
-; GFX8-NEXT: .LBB29_4:
+; GFX8-NEXT: ; %bb.4:
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; GFX8-NEXT: v_mul_f32_e32 v0, 0x42280000, v0
@@ -8483,12 +8449,11 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: ; implicit-def: $vgpr2
; GFX8-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX8-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; GFX8-NEXT: s_cbranch_execz .LBB29_8
; GFX8-NEXT: ; %bb.7:
; GFX8-NEXT: v_mov_b32_e32 v2, s4
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: ds_add_rtn_f32 v2, v2, v1
-; GFX8-NEXT: .LBB29_8:
+; GFX8-NEXT: ; %bb.8:
; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/ret_jump.ll b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
index ad38d78ddb2ff1..66a55d9eb128c6 100644
--- a/llvm/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
@@ -65,7 +65,6 @@ ret.bb: ; preds = %else, %main_body
; GCN: .LBB{{[0-9]+_[0-9]+}}: ; %else
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
-; GCN-NEXT: s_cbranch_execz .LBB1_{{[0-9]+}}
; GCN-NEXT: ; %unreachable.bb
; GCN: ds_write_b32
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 0630cca7c099b8..01aa5a42ca5be8 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -114,20 +114,18 @@ define amdgpu_kernel void @sgpr_if_else_valu_br(ptr addrspace(1) %out, float %a,
; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
-; SI-NEXT: s_cbranch_execz .LBB2_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_i32 s8, s6, s7
-; SI-NEXT: .LBB2_2: ; %Flow
+; SI-NEXT: ; %bb.2: ; %Flow
; SI-NEXT: s_or_saveexec_b64 s[0:1], s[0:1]
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: s_xor_b64 exec, exec, s[0:1]
-; SI-NEXT: s_cbranch_execz .LBB2_4
; SI-NEXT: ; %bb.3: ; %if
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_add_i32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: .LBB2_4: ; %endif
+; SI-NEXT: ; %bb.4: ; %endif
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
index 9f3596359a6625..1ad1bd09c1f204 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
@@ -74,14 +74,13 @@ define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; SI-NEXT: s_cbranch_execz .LBB1_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s2, s[2:3], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_eq_u32 s2, 0
; SI-NEXT: s_cselect_b64 s[2:3], -1, 0
; SI-NEXT: s_and_b64 s[4:5], s[2:3], exec
-; SI-NEXT: .LBB1_2: ; %endif
+; SI-NEXT: ; %bb.2: ; %endif
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: .LBB1_3: ; %loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -99,14 +98,13 @@ define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
; FLAT-NEXT: s_mov_b64 s[0:1], 0
; FLAT-NEXT: s_mov_b64 s[4:5], 0
; FLAT-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; FLAT-NEXT: s_cbranch_execz .LBB1_2
; FLAT-NEXT: ; %bb.1: ; %else
; FLAT-NEXT: s_load_dword s2, s[2:3], 0x24
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: s_cmp_eq_u32 s2, 0
; FLAT-NEXT: s_cselect_b64 s[2:3], -1, 0
; FLAT-NEXT: s_and_b64 s[4:5], s[2:3], exec
-; FLAT-NEXT: .LBB1_2: ; %endif
+; FLAT-NEXT: ; %bb.2: ; %endif
; FLAT-NEXT: s_or_b64 exec, exec, s[6:7]
; FLAT-NEXT: .LBB1_3: ; %loop
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll b/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll
index 64d4a0cf785013..d79245780c8bb0 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-phi-with-undef.ll
@@ -15,7 +15,6 @@ define amdgpu_ps float @uniform_phi_with_undef(float inreg %c, float %v, i32 %x,
; GCN-NEXT: s_mov_b32 s1, exec_lo
; GCN-NEXT: s_and_b32 s2, s1, s2
; GCN-NEXT: s_mov_b32 exec_lo, s2
-; GCN-NEXT: s_cbranch_execz .LBB0_2
; GCN-NEXT: ; %bb.1: ; %if
; GCN-NEXT: s_mov_b32 s2, 0x40400000
; GCN-NEXT: v_div_scale_f32 v1, s3, s2, s2, v0
@@ -30,7 +29,7 @@ define amdgpu_ps float @uniform_phi_with_undef(float inreg %c, float %v, i32 %x,
; GCN-NEXT: v_fma_f32 v1, -v1, v4, v3
; GCN-NEXT: v_div_fmas_f32 v1, v1, v2, v4
; GCN-NEXT: v_div_fixup_f32 v0, v1, s2, v0
-; GCN-NEXT: .LBB0_2: ; %end
+; GCN-NEXT: ; %bb.2: ; %end
; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GCN-NEXT: v_add_f32_e64 v0, v0, s0
; GCN-NEXT: ; return to shader part epilog
More information about the llvm-commits
mailing list